mirror of
https://github.com/zebrajr/pytorch.git
synced 2025-12-07 12:21:27 +01:00
Summary: This PR implements auto-conversion of GPU arrays that support the `__cuda_array_interface__` protocol (fixes #15601). If an object exposes the `__cuda_array_interface__` attribute, `touch.as_tensor()` and `touch.tensor()` will use the exposed device memory. #### Zero-copy When using `touch.as_tensor(...,device=D)` where `D` is the same device as the one used in `__cuda_array_interface__`. #### Implicit copy When using `touch.as_tensor(...,device=D)` where `D` is the CPU or another non-CUDA device. #### Explicit copy When using `torch.tensor()`. #### Exception When using `touch.as_tensor(...,device=D)` where `D` is a CUDA device not used in `__cuda_array_interface__`. #### Lifetime `torch.as_tensor(obj)` tensor grabs a reference to `obj` so that the lifetime of `obj` exceeds the tensor Pull Request resolved: https://github.com/pytorch/pytorch/pull/20584 Differential Revision: D15435610 Pulled By: ezyang fbshipit-source-id: c423776ba2f2c073b902e0a0ce272d54e9005286
18 lines
389 B
C++
18 lines
389 B
C++
#pragma once
|
|
|
|
#include <torch/csrc/python_headers.h>
|
|
#include <ATen/ATen.h>
|
|
|
|
namespace torch { namespace utils {
|
|
|
|
PyObject* tensor_to_numpy(const at::Tensor& tensor);
|
|
at::Tensor tensor_from_numpy(PyObject* obj);
|
|
|
|
at::ScalarType numpy_dtype_to_aten(int dtype);
|
|
|
|
bool is_numpy_scalar(PyObject* obj);
|
|
|
|
at::Tensor tensor_from_cuda_array_interface(PyObject* obj);
|
|
|
|
}} // namespace torch::utils
|