mirror of
https://github.com/zebrajr/pytorch.git
synced 2025-12-06 12:20:52 +01:00
Summary: Added on top of ezyang's https://github.com/pytorch/pytorch/pull/9278 Pull Request resolved: https://github.com/pytorch/pytorch/pull/9357 Reviewed By: ezyang Differential Revision: D8863934 Pulled By: cpuhrsch fbshipit-source-id: a45c955c0b1e9e0866749b3a7e8a36de931bdff1
49 lines
1.0 KiB
C++
49 lines
1.0 KiB
C++
#define __STDC_FORMAT_MACROS
|
|
|
|
#include "torch/csrc/python_headers.h"
|
|
#ifdef _MSC_VER
|
|
#include <Windows.h>
|
|
#endif
|
|
#include <structmember.h>
|
|
|
|
#define THP_HOST_HALF
|
|
|
|
#include <stdbool.h>
|
|
#include <TH/TH.h>
|
|
// See Note [TH abstraction violation]
|
|
// - Used to get at the allocator associated with a storage
|
|
#include <TH/THStorageFunctions.hpp>
|
|
#include <torch/csrc/finalizer.h>
|
|
#include <libshm.h>
|
|
#include "THP.h"
|
|
#include "copy_utils.h"
|
|
#include "DynamicTypes.h"
|
|
|
|
#ifdef USE_CUDA
|
|
#include <THC/THCStorage.hpp>
|
|
#endif
|
|
|
|
#include "generic/Storage.cpp"
|
|
#include <TH/THGenerateAllTypes.h>
|
|
|
|
#include "generic/Storage.cpp"
|
|
#include <TH/THGenerateHalfType.h>
|
|
|
|
// NB: If you ever divest libtorch of USE_CUDA, you'll have to virtualize
|
|
// the CUDA call.
|
|
template<>
|
|
void THPPointer<THStorage>::free() {
|
|
if (ptr) {
|
|
if (ptr->data_ptr.device().is_cpu()) {
|
|
THStorage_free(ptr);
|
|
} else {
|
|
AT_ASSERT(ptr->data_ptr.device().is_cuda());
|
|
#ifdef USE_CUDA
|
|
THStorage_free(ptr);
|
|
#else
|
|
AT_ERROR("Cannot free THCStorage when not built with CUDA");
|
|
#endif
|
|
}
|
|
}
|
|
}
|