mirror of
https://github.com/zebrajr/pytorch.git
synced 2025-12-06 12:20:52 +01:00
Summary: Pull Request resolved: https://github.com/pytorch/pytorch/pull/30486 Fixes: https://github.com/pytorch/pytorch/issues/29252 There is some incorrect code in the handling of parsing python numbers that led to issue #29252: When we allow interpretation of a zero-dim numpy integer value as a scalar in pytorch, we incorrectly parse the int as a float. This PR also fixes the issue described in the "FIXME" here: https://github.com/pytorch/pytorch/pull/27628/files#diff-f539198dd366265fb8dc2d661bc5d5bcR1487 Test Plan: Added a unit test based on the example given in the issue. Differential Revision: D18932520 Pulled By: nairbv fbshipit-source-id: f6416f28dfd73ac72c1042042851d76beb5fcf65
20 lines
482 B
C++
20 lines
482 B
C++
#pragma once
|
|
|
|
#include <torch/csrc/python_headers.h>
|
|
#include <ATen/ATen.h>
|
|
|
|
namespace torch { namespace utils {
|
|
|
|
PyObject* tensor_to_numpy(const at::Tensor& tensor);
|
|
at::Tensor tensor_from_numpy(PyObject* obj);
|
|
|
|
int aten_to_numpy_dtype(const at::ScalarType scalar_type);
|
|
at::ScalarType numpy_dtype_to_aten(int dtype);
|
|
|
|
bool is_numpy_int(PyObject* obj);
|
|
bool is_numpy_scalar(PyObject* obj);
|
|
|
|
at::Tensor tensor_from_cuda_array_interface(PyObject* obj);
|
|
|
|
}} // namespace torch::utils
|