mirror of
https://github.com/zebrajr/pytorch.git
synced 2025-12-06 12:20:52 +01:00
Summary: ``` This removes PyObjectFinalizer. We were seeing SIGSEGV at exit in some programs that use multiprocessing. The backtrace pointed to StorageRef.__del__ being called from subtype_dealloc. My guess is that the Python interpreter was shutdown before all C++ Storage objects were deallocated. Deallocating the C++ Storage called the finalizer which called back into Python after it was no longer safe to do so. This avoids a callback from C++ into Python during Storage finalization. Instead, dead Storage objects (expired weak references) are collected periodically when shared_cache exceeds a limit. The limit is scaled with 2x the number of live references, which places an upper bound on the amount of extra memory held by dead Storage objects. In practice, this should be very small. ``` Pull Request resolved: https://github.com/pytorch/pytorch/pull/10407 Differential Revision: D9272400 Pulled By: colesbury fbshipit-source-id: ecb14d9c6d54ffc91e134c34a4e770a4d09048a2
48 lines
1.0 KiB
C++
48 lines
1.0 KiB
C++
#define __STDC_FORMAT_MACROS
|
|
|
|
#include "torch/csrc/python_headers.h"
|
|
#ifdef _MSC_VER
|
|
#include <Windows.h>
|
|
#endif
|
|
#include <structmember.h>
|
|
|
|
#define THP_HOST_HALF
|
|
|
|
#include <stdbool.h>
|
|
#include <TH/TH.h>
|
|
// See Note [TH abstraction violation]
|
|
// - Used to get at the allocator associated with a storage
|
|
#include <TH/THStorageFunctions.hpp>
|
|
#include <libshm.h>
|
|
#include "THP.h"
|
|
#include "copy_utils.h"
|
|
#include "DynamicTypes.h"
|
|
|
|
#ifdef USE_CUDA
|
|
#include <THC/THCStorage.hpp>
|
|
#endif
|
|
|
|
#include "generic/Storage.cpp"
|
|
#include <TH/THGenerateAllTypes.h>
|
|
|
|
#include "generic/Storage.cpp"
|
|
#include <TH/THGenerateHalfType.h>
|
|
|
|
// NB: If you ever divest libtorch of USE_CUDA, you'll have to virtualize
|
|
// the CUDA call.
|
|
template<>
|
|
void THPPointer<THStorage>::free() {
|
|
if (ptr) {
|
|
if (ptr->data_ptr().device().is_cpu()) {
|
|
THStorage_free(ptr);
|
|
} else {
|
|
AT_ASSERT(ptr->data_ptr().device().is_cuda());
|
|
#ifdef USE_CUDA
|
|
THStorage_free(ptr);
|
|
#else
|
|
AT_ERROR("Cannot free THCStorage when not built with CUDA");
|
|
#endif
|
|
}
|
|
}
|
|
}
|