mirror of
https://github.com/zebrajr/pytorch.git
synced 2025-12-06 12:20:52 +01:00
**Reopened** to help with merge issues. See #59790 for full context. Fixes #20778. Helps #71688. Finalizes @martinPasen's force argument for `Tensor.numpy()`. It is set to False by default. If it's set to True then we: 1. detatch the Tensor, if requires_grad == True 2. move to cpu, if not on cpu already 3. Uses .resolve_conj() if .is_conj() == True 4. Uses .resolve_neg() if .is_neg() == True cc @albanD Pull Request resolved: https://github.com/pytorch/pytorch/pull/78564 Approved by: https://github.com/albanD
22 lines
600 B
C++
22 lines
600 B
C++
#pragma once
|
|
|
|
#include <torch/csrc/python_headers.h>
|
|
#include <ATen/core/Tensor.h>
|
|
|
|
namespace torch { namespace utils {
|
|
|
|
PyObject* tensor_to_numpy(const at::Tensor& tensor, bool force=false);
|
|
at::Tensor tensor_from_numpy(PyObject* obj, bool warn_if_not_writeable=true);
|
|
|
|
int aten_to_numpy_dtype(const at::ScalarType scalar_type);
|
|
at::ScalarType numpy_dtype_to_aten(int dtype);
|
|
|
|
bool is_numpy_available();
|
|
bool is_numpy_int(PyObject* obj);
|
|
bool is_numpy_scalar(PyObject* obj);
|
|
|
|
void warn_numpy_not_writeable();
|
|
at::Tensor tensor_from_cuda_array_interface(PyObject* obj);
|
|
|
|
}} // namespace torch::utils
|