mirror of
https://github.com/zebrajr/pytorch.git
synced 2025-12-06 12:20:52 +01:00
NumPy versions 1.22 and 1.23 (and their respective bugfix releases included) have a buggy implementation of the Dlpack deleter that doesn't account for no-GIL contexts. Since we now release the GIL when deallocating tensors in `THPVariable_clear`, this leads to a failure of internal consistency checks when freeing a Dlpack-backed tensor from NumPy. This PR adds a check for the buggy NumPy versions and overrides the `DlManagedTensor` deleter to reacquire the GIL before deallocation. ### Rationale for this implementation The version check was added to `tensor_numpy.h/cpp` as it seemed like a more logical location for it than creating a new translation unit. The overriding of the deleter was originally attempted by directly modifying `at::fromDlpack`, but the lack of a build dependency on the Python C API in A10 prevented that. So, I extended the A10 Dlpack API instead to additionally accept a custom deleter functor. Fixes #88082 Pull Request resolved: https://github.com/pytorch/pytorch/pull/89759 Approved by: https://github.com/albanD
27 lines
703 B
C++
27 lines
703 B
C++
#pragma once
|
|
|
|
#include <ATen/core/Tensor.h>
|
|
#include <torch/csrc/python_headers.h>
|
|
|
|
namespace torch {
|
|
namespace utils {
|
|
|
|
PyObject* tensor_to_numpy(const at::Tensor& tensor, bool force = false);
|
|
at::Tensor tensor_from_numpy(PyObject* obj, bool warn_if_not_writeable = true);
|
|
|
|
int aten_to_numpy_dtype(const at::ScalarType scalar_type);
|
|
at::ScalarType numpy_dtype_to_aten(int dtype);
|
|
|
|
bool is_numpy_available();
|
|
bool is_numpy_int(PyObject* obj);
|
|
bool is_numpy_scalar(PyObject* obj);
|
|
|
|
void warn_numpy_not_writeable();
|
|
at::Tensor tensor_from_cuda_array_interface(PyObject* obj);
|
|
|
|
void validate_numpy_for_dlpack_deleter_bug();
|
|
bool is_numpy_dlpack_deleter_bugged();
|
|
|
|
} // namespace utils
|
|
} // namespace torch
|