mirror of
https://github.com/zebrajr/pytorch.git
synced 2025-12-06 12:20:52 +01:00
[Exception] [6/N] Remove use of torch::TypeError (#117964)
Pull Request resolved: https://github.com/pytorch/pytorch/pull/117964 Approved by: https://github.com/albanD
This commit is contained in:
parent
67300a11cb
commit
87335fabae
|
|
@ -39,6 +39,7 @@
|
|||
#include <ATen/core/Tensor.h>
|
||||
#include <ATen/FuncTorchTLS.h>
|
||||
#include "c10/util/Optional.h"
|
||||
#include "c10/util/Exception.h"
|
||||
#include "c10/core/Stream.h"
|
||||
|
||||
#include <stdexcept>
|
||||
|
|
@ -370,9 +371,7 @@ static PyObject * THPVariable_index_scalar(PyObject* self, PyObject* args) {
|
|||
auto& self_ = THPVariable_Unpack(self);
|
||||
// TODO: change the condition to `self_.dim() != 0` once we expose scalars
|
||||
// in PyTorch.
|
||||
if (!isIntegralType(self_.scalar_type(), /*includeBool=*/true) || self_.sym_numel() != 1) {
|
||||
throw TypeError("only integer tensors of a single element can be converted to an index");
|
||||
}
|
||||
TORCH_CHECK_TYPE(isIntegralType(self_.scalar_type(), /*includeBool=*/true) && self_.sym_numel() == 1, "only integer tensors of a single element can be converted to an index");
|
||||
return wrap(dispatch_to<int64_t>(self_));
|
||||
END_HANDLE_TH_ERRORS
|
||||
}
|
||||
|
|
@ -389,9 +388,7 @@ static PyObject * THPVariable_invert(PyObject* self, PyObject* args) {
|
|||
return handle_torch_function(self, "__invert__", args);
|
||||
}
|
||||
auto& self_ = THPVariable_Unpack(self);
|
||||
if (!isIntegralType(self_.scalar_type(), /*includeBool=*/true)) {
|
||||
throw TypeError("~ (operator.invert) is only implemented on integer and Boolean-type tensors");
|
||||
}
|
||||
TORCH_CHECK_TYPE(isIntegralType(self_.scalar_type(), /*includeBool=*/true), "~ (operator.invert) is only implemented on integer and Boolean-type tensors");
|
||||
return THPVariable_Wrap(dispatch_invert(self_));
|
||||
END_HANDLE_TH_ERRORS
|
||||
}
|
||||
|
|
@ -1043,7 +1040,7 @@ static PyObject * THPVariable_type(PyObject* self, PyObject* args, PyObject* kwa
|
|||
} else if (THPDtype_Check(obj)) {
|
||||
is_dtype = true;
|
||||
} else {
|
||||
throw TypeError("dtype must be a type, str, or dtype object");
|
||||
C10_THROW_ERROR(TypeError, "dtype must be a type, str, or dtype object");
|
||||
}
|
||||
ScalarType scalar_type;
|
||||
Device device = self_.device();
|
||||
|
|
|
|||
|
|
@ -142,9 +142,9 @@ PyObject* THPDevice_rc(PyObject* a, PyObject* b, int op) {
|
|||
case Py_LE:
|
||||
case Py_GT:
|
||||
case Py_GE:
|
||||
throw torch::TypeError("comparison not implemented");
|
||||
C10_THROW_ERROR(TypeError, "comparison not implemented");
|
||||
default:
|
||||
throw torch::TypeError("unexpected comparison op");
|
||||
C10_THROW_ERROR(TypeError, "unexpected comparison op");
|
||||
}
|
||||
END_HANDLE_TH_ERRORS
|
||||
}
|
||||
|
|
|
|||
|
|
@ -105,11 +105,10 @@ static PyObject* THPGenerator_setState(PyObject* _self, PyObject* _new_state) {
|
|||
using namespace torch::autograd;
|
||||
|
||||
HANDLE_TH_ERRORS
|
||||
if (!THPVariable_Check(_new_state)) {
|
||||
throw torch::TypeError(
|
||||
"expected a torch.ByteTensor, but got %s",
|
||||
Py_TYPE(_new_state)->tp_name);
|
||||
}
|
||||
TORCH_CHECK_TYPE(
|
||||
THPVariable_Check(_new_state),
|
||||
"expected a torch.ByteTensor, but got ",
|
||||
Py_TYPE(_new_state)->tp_name);
|
||||
auto self = (THPGenerator*)_self;
|
||||
auto& gen = self->cdata;
|
||||
const auto& new_state_tensor = THPVariable_Unpack(_new_state);
|
||||
|
|
|
|||
|
|
@ -29,7 +29,7 @@ inline Device py_object_to_device(py::object object) {
|
|||
if (THPDevice_Check(obj)) {
|
||||
return reinterpret_cast<THPDevice*>(obj)->device;
|
||||
}
|
||||
throw TypeError("Expected device");
|
||||
TORCH_CHECK_TYPE(false, "Expected device");
|
||||
}
|
||||
|
||||
inline Dtype py_object_to_dtype(py::object object) {
|
||||
|
|
@ -37,7 +37,7 @@ inline Dtype py_object_to_dtype(py::object object) {
|
|||
if (THPDtype_Check(obj)) {
|
||||
return reinterpret_cast<THPDtype*>(obj)->scalar_type;
|
||||
}
|
||||
throw TypeError("Expected dtype");
|
||||
TORCH_CHECK_TYPE(false, "Expected dtype");
|
||||
}
|
||||
|
||||
template <typename ModuleType>
|
||||
|
|
|
|||
|
|
@ -768,15 +768,15 @@ static void _get_tensors_to_save(
|
|||
tensors_to_save.emplace_back(tensor);
|
||||
}
|
||||
} else {
|
||||
if (is_executable) {
|
||||
// TODO: We should really just ALWAYS throw an error here, but
|
||||
// doing so will break some internal tests. We should fix those.
|
||||
throw torch::TypeError(
|
||||
"save_for_backward can only save variables, but argument %ld is of "
|
||||
"type %s",
|
||||
i,
|
||||
Py_TYPE(obj)->tp_name);
|
||||
}
|
||||
// TODO: We should really just ALWAYS throw an error here, but
|
||||
// doing so will break some internal tests. We should fix those.
|
||||
TORCH_CHECK_TYPE(
|
||||
!is_executable,
|
||||
"save_for_backward can only save variables, but argument ",
|
||||
i,
|
||||
" is of "
|
||||
"type ",
|
||||
Py_TYPE(obj)->tp_name);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
|||
|
|
@ -57,11 +57,10 @@ static PyObject* THPVariable_pynew(
|
|||
TORCH_CHECK_VALUE(
|
||||
!is_volatile || !requires_grad,
|
||||
"Variable can't be volatile and require_grad at the same time!");
|
||||
if (grad_fn && !THPFunction_Check(grad_fn)) {
|
||||
throw TypeError(
|
||||
"_grad_fn has to be a Function object or None, but got %s",
|
||||
Py_TYPE(grad_fn)->tp_name);
|
||||
}
|
||||
TORCH_CHECK_TYPE(
|
||||
!grad_fn || THPFunction_Check(grad_fn),
|
||||
"_grad_fn has to be a Function object or None, but got ",
|
||||
Py_TYPE(grad_fn)->tp_name);
|
||||
Variable var;
|
||||
if (!data || data == Py_None) {
|
||||
// For legacy serialization code, create an empty tensor. This is also used
|
||||
|
|
@ -75,8 +74,10 @@ static PyObject* THPVariable_pynew(
|
|||
} else if (THPVariable_Check(data)) {
|
||||
var = THPVariable_Unpack(data).detach();
|
||||
} else {
|
||||
throw torch::TypeError(
|
||||
"Variable data has to be a tensor, but got %s", Py_TYPE(data)->tp_name);
|
||||
TORCH_CHECK_TYPE(
|
||||
false,
|
||||
"Variable data has to be a tensor, but got ",
|
||||
Py_TYPE(data)->tp_name);
|
||||
}
|
||||
// We set `tensor`'s `allow_tensor_metadata_change` to true here, because we
|
||||
// want to allow the following use case for backward compatibility:
|
||||
|
|
|
|||
|
|
@ -28,8 +28,6 @@
|
|||
#include <c10/util/irange.h>
|
||||
|
||||
#include <c10/core/Layout.h>
|
||||
#include <tuple>
|
||||
#include <vector>
|
||||
|
||||
using namespace at;
|
||||
using namespace torch::autograd::utils;
|
||||
|
|
@ -129,10 +127,12 @@ inline Variable valueToTensor(
|
|||
} else if (torch::is_symbool(value)) {
|
||||
scalar = Scalar(py::cast<c10::SymBool>(py::handle(value)));
|
||||
} else {
|
||||
throw TypeError(
|
||||
"can't assign a %s to a %s",
|
||||
TORCH_CHECK_TYPE(
|
||||
false,
|
||||
"can't assign a ",
|
||||
Py_TYPE(value)->tp_name,
|
||||
torch::utils::options_to_string(options).c_str());
|
||||
" to a ",
|
||||
torch::utils::options_to_string(options));
|
||||
}
|
||||
// lift_fresh is supposed to be used in situations where you are guaranteed to
|
||||
// get a plain Tensor which is not true for cpu device but not for non cpu
|
||||
|
|
@ -437,9 +437,7 @@ void dispatch_set_item(
|
|||
// indexing is needed, it calls C++ `at::indexing::dispatch_index_put_`.
|
||||
int THPVariable_setitem(PyObject* self, PyObject* index, PyObject* py_value) {
|
||||
HANDLE_TH_ERRORS
|
||||
if (py_value == nullptr) {
|
||||
throw TypeError("Tensor does not support deleting items");
|
||||
}
|
||||
TORCH_CHECK_TYPE(py_value, "Tensor does not support deleting items");
|
||||
if ((!THPVariable_CheckExact(self) && check_has_torch_function(self)) ||
|
||||
(!THPVariable_CheckExact(py_value) &&
|
||||
check_has_torch_function(py_value))) {
|
||||
|
|
@ -449,11 +447,11 @@ int THPVariable_setitem(PyObject* self, PyObject* index, PyObject* py_value) {
|
|||
}
|
||||
|
||||
const auto& self_ = THPVariable_Unpack(self);
|
||||
if (self_.layout() == kSparse || self_.layout() == kSparseCsr ||
|
||||
self_.layout() == kSparseCsc || self_.layout() == kSparseBsr ||
|
||||
self_.layout() == kSparseBsc) {
|
||||
throw TypeError("Cannot assign to a sparse tensor");
|
||||
}
|
||||
TORCH_CHECK_TYPE(
|
||||
self_.layout() != kSparse && self_.layout() != kSparseCsr &&
|
||||
self_.layout() != kSparseCsc && self_.layout() != kSparseBsr &&
|
||||
self_.layout() != kSparseBsc,
|
||||
"Cannot assign to a sparse tensor");
|
||||
OptionalDeviceGuard device_guard(device_of(self_));
|
||||
at::Device self_device = self_.device();
|
||||
Variable value;
|
||||
|
|
|
|||
|
|
@ -209,8 +209,8 @@ PyObject* THPModule_disable_torch_function(PyObject* self, PyObject* a) {
|
|||
} else if (PyTuple_Check(args)) {
|
||||
py_args = py::reinterpret_borrow<py::tuple>(args);
|
||||
} else {
|
||||
throw torch::TypeError(
|
||||
"expected List or Tuple (got %s)", Py_TYPE(args)->tp_name);
|
||||
TORCH_CHECK_TYPE(
|
||||
false, "expected List or Tuple (got ", Py_TYPE(args)->tp_name, ")");
|
||||
}
|
||||
|
||||
// These are all C-API calls so no exceptions will be raised
|
||||
|
|
@ -243,8 +243,8 @@ PyObject* THPModule_disable_torch_dispatch(PyObject* self, PyObject* a) {
|
|||
} else if (PyTuple_Check(args)) {
|
||||
py_args = py::reinterpret_borrow<py::tuple>(args);
|
||||
} else {
|
||||
throw torch::TypeError(
|
||||
"expected List or Tuple (got %s)", Py_TYPE(args)->tp_name);
|
||||
TORCH_CHECK_TYPE(
|
||||
false, "expected List or Tuple (got ", Py_TYPE(args)->tp_name, ")");
|
||||
}
|
||||
|
||||
// This implementation is not completely correct. The moral
|
||||
|
|
|
|||
|
|
@ -12,6 +12,7 @@
|
|||
#include <ATen/ATen.h>
|
||||
#include <ATen/PythonTorchFunctionTLS.h>
|
||||
#include <ATen/TracerMode.h>
|
||||
#include <c10/util/Exception.h>
|
||||
#include <c10/util/irange.h>
|
||||
|
||||
#include <sstream>
|
||||
|
|
@ -1307,20 +1308,28 @@ std::string FunctionSignature::toString() const {
|
|||
const auto max_pos_args = signature.max_pos_args;
|
||||
const auto min_args = signature.min_args;
|
||||
const long nargs_ = nargs;
|
||||
if (min_args != max_pos_args) {
|
||||
throw TypeError(
|
||||
"%s() takes from %zu to %zu positional arguments but %ld were given",
|
||||
signature.name.c_str(),
|
||||
min_args,
|
||||
max_pos_args,
|
||||
nargs_);
|
||||
}
|
||||
throw TypeError(
|
||||
"%s() takes %zu positional argument%s but %ld %s given",
|
||||
signature.name.c_str(),
|
||||
TORCH_CHECK_TYPE(
|
||||
min_args == max_pos_args,
|
||||
signature.name,
|
||||
"() takes from ",
|
||||
min_args,
|
||||
" to ",
|
||||
max_pos_args,
|
||||
max_pos_args == 1 ? "" : "s",
|
||||
" positional arguments but ",
|
||||
nargs_,
|
||||
" were given");
|
||||
TORCH_CHECK_TYPE(
|
||||
false,
|
||||
signature.name,
|
||||
"() takes ",
|
||||
max_pos_args,
|
||||
" positional argument",
|
||||
max_pos_args == 1 ? "" : "s",
|
||||
" but ",
|
||||
nargs_,
|
||||
" ",
|
||||
nargs == 1 ? "was" : "were",
|
||||
" given",
|
||||
nargs == 1 ? "was" : "were");
|
||||
}
|
||||
|
||||
|
|
@ -1340,13 +1349,15 @@ std::string FunctionSignature::toString() const {
|
|||
num_missing++;
|
||||
}
|
||||
}
|
||||
|
||||
throw TypeError(
|
||||
"%s() missing %d required positional argument%s: %s",
|
||||
signature.name.c_str(),
|
||||
TORCH_CHECK_TYPE(
|
||||
false,
|
||||
signature.name,
|
||||
"() missing ",
|
||||
num_missing,
|
||||
" required positional argument",
|
||||
num_missing == 1 ? "s" : "",
|
||||
ss.str().c_str());
|
||||
": ",
|
||||
ss.str());
|
||||
}
|
||||
|
||||
static Py_ssize_t find_param(FunctionSignature& signature, PyObject* name) {
|
||||
|
|
@ -1372,28 +1383,26 @@ static Py_ssize_t find_param(FunctionSignature& signature, PyObject* name) {
|
|||
Py_ssize_t pos = 0;
|
||||
|
||||
while (PyDict_Next(kwargs, &pos, &key, &value)) {
|
||||
if (!THPUtils_checkString(key)) {
|
||||
throw TypeError("keywords must be strings");
|
||||
}
|
||||
TORCH_CHECK_TYPE(THPUtils_checkString(key), "keywords must be strings");
|
||||
|
||||
auto param_idx = find_param(signature, key);
|
||||
if (param_idx < 0) {
|
||||
throw TypeError(
|
||||
"%s() got an unexpected keyword argument '%s'",
|
||||
signature.name.c_str(),
|
||||
THPUtils_unpackString(key).c_str());
|
||||
}
|
||||
TORCH_CHECK_TYPE(
|
||||
param_idx >= 0,
|
||||
signature.name,
|
||||
"() got an unexpected keyword argument '",
|
||||
THPUtils_unpackString(key),
|
||||
"'");
|
||||
|
||||
if (param_idx < num_pos_args) {
|
||||
throw TypeError(
|
||||
"%s() got multiple values for argument '%s'",
|
||||
signature.name.c_str(),
|
||||
THPUtils_unpackString(key).c_str());
|
||||
}
|
||||
TORCH_CHECK_TYPE(
|
||||
param_idx >= num_pos_args,
|
||||
signature.name,
|
||||
"() got multiple values for argument '",
|
||||
THPUtils_unpackString(key),
|
||||
"'");
|
||||
}
|
||||
|
||||
// this should never be hit
|
||||
throw TypeError("invalid keyword arguments");
|
||||
TORCH_CHECK_TYPE(false, "invalid keyword arguments");
|
||||
}
|
||||
|
||||
bool FunctionSignature::parse(
|
||||
|
|
@ -1476,42 +1485,51 @@ bool FunctionSignature::parse(
|
|||
arg_pos = nargs;
|
||||
continue;
|
||||
} else if (raise_exception) {
|
||||
if (is_kwd) {
|
||||
// foo(): argument 'other' must be str, not int
|
||||
throw TypeError(
|
||||
"%s(): argument '%s' must be %s, not %s",
|
||||
name.c_str(),
|
||||
param.name.c_str(),
|
||||
param.type_name().c_str(),
|
||||
Py_TYPE(obj)->tp_name);
|
||||
} else {
|
||||
// foo(): argument 'other' (position 2) must be str, not int
|
||||
if (failed_idx != -1) {
|
||||
if (!(PyTuple_Check(obj) || PyList_Check(obj))) {
|
||||
TORCH_INTERNAL_ASSERT(varargs_eligible);
|
||||
obj = args;
|
||||
}
|
||||
TORCH_INTERNAL_ASSERT(failed_idx < PySequence_Size(obj));
|
||||
throw TypeError(
|
||||
"%s(): argument '%s' (position %ld) must be %s, but found element of type %s at pos %ld",
|
||||
name.c_str(),
|
||||
param.name.c_str(),
|
||||
static_cast<long>(arg_pos + 1),
|
||||
param.type_name().c_str(),
|
||||
Py_TYPE(py::reinterpret_steal<py::object>(
|
||||
PySequence_GetItem(obj, failed_idx))
|
||||
.ptr())
|
||||
->tp_name,
|
||||
static_cast<long>(failed_idx));
|
||||
// foo(): argument 'other' must be str, not int
|
||||
TORCH_CHECK_TYPE(
|
||||
!is_kwd,
|
||||
name,
|
||||
"(): argument '",
|
||||
param.name,
|
||||
"' must be ",
|
||||
param.type_name(),
|
||||
", not ",
|
||||
Py_TYPE(obj)->tp_name);
|
||||
// foo(): argument 'other' (position 2) must be str, not int
|
||||
if (failed_idx != -1) {
|
||||
if (!(PyTuple_Check(obj) || PyList_Check(obj))) {
|
||||
TORCH_INTERNAL_ASSERT(varargs_eligible);
|
||||
obj = args;
|
||||
}
|
||||
throw TypeError(
|
||||
"%s(): argument '%s' (position %ld) must be %s, not %s",
|
||||
name.c_str(),
|
||||
param.name.c_str(),
|
||||
static_cast<long>(arg_pos + 1),
|
||||
param.type_name().c_str(),
|
||||
Py_TYPE(obj)->tp_name);
|
||||
TORCH_INTERNAL_ASSERT(failed_idx < PySequence_Size(obj));
|
||||
TORCH_CHECK_TYPE(
|
||||
false,
|
||||
name,
|
||||
"(): argument '",
|
||||
param.name,
|
||||
"' (position ",
|
||||
arg_pos + 1,
|
||||
") must be ",
|
||||
param.type_name(),
|
||||
", but found element of type ",
|
||||
Py_TYPE(py::reinterpret_steal<py::object>(
|
||||
PySequence_GetItem(obj, failed_idx))
|
||||
.ptr())
|
||||
->tp_name,
|
||||
" at pos ",
|
||||
failed_idx);
|
||||
}
|
||||
TORCH_CHECK_TYPE(
|
||||
false,
|
||||
name,
|
||||
"(): argument '",
|
||||
param.name,
|
||||
"' (position ",
|
||||
arg_pos + 1,
|
||||
") must be ",
|
||||
param.type_name(),
|
||||
", not ",
|
||||
Py_TYPE(obj)->tp_name);
|
||||
} else {
|
||||
return false;
|
||||
}
|
||||
|
|
@ -1632,7 +1650,7 @@ void PythonArgParser::print_error(
|
|||
auto options = get_signatures();
|
||||
auto msg =
|
||||
torch::format_invalid_args(args, kwargs, function_name + "()", options);
|
||||
throw TypeError("%s", msg.c_str());
|
||||
TORCH_CHECK_TYPE(false, msg);
|
||||
}
|
||||
|
||||
std::vector<std::string> PythonArgParser::get_signatures() const {
|
||||
|
|
@ -1699,8 +1717,12 @@ at::Tensor PythonArgs::tensor_slow(int i) {
|
|||
// a test for Py_None here; instead, you need to mark the argument
|
||||
// as *allowing none*; you can do this by writing 'Tensor?' instead
|
||||
// of 'Tensor' in the ATen metadata.
|
||||
throw TypeError(
|
||||
"expected Tensor as argument %d, but got %s", i, Py_TYPE(obj)->tp_name);
|
||||
TORCH_CHECK_TYPE(
|
||||
false,
|
||||
"expected Tensor as argument ",
|
||||
i,
|
||||
", but got ",
|
||||
Py_TYPE(obj)->tp_name);
|
||||
}
|
||||
at::AutoDispatchBelowADInplaceOrView guard; // TODO: remove
|
||||
at::tracer::impl::NoTracerDispatchMode tracer_guard;
|
||||
|
|
|
|||
|
|
@ -486,9 +486,8 @@ inline std::array<at::Tensor, N> PythonArgs::tensorlist_n(int i) {
|
|||
THPObjectPtr arg = six::maybeAsTuple(args[i]);
|
||||
// NOLINTNEXTLINE(bugprone-branch-clone)
|
||||
auto size = tuple ? PyTuple_GET_SIZE(arg.get()) : PyList_GET_SIZE(arg.get());
|
||||
if (size != N) {
|
||||
throw TypeError("expected tuple of %d elements but got %d", N, (int)size);
|
||||
}
|
||||
TORCH_CHECK_TYPE(
|
||||
size == N, "expected tuple of ", N, " elements but got ", size);
|
||||
for (const auto idx : c10::irange(size)) {
|
||||
PyObject* obj = tuple ? PyTuple_GET_ITEM(arg.get(), idx)
|
||||
: PyList_GET_ITEM(arg.get(), idx);
|
||||
|
|
@ -524,12 +523,16 @@ inline void throw_intlist_exception(
|
|||
? e.what()
|
||||
: std::string("type must be ") + args->signature.params[i].type_name() +
|
||||
",but got " + Py_TYPE(obj)->tp_name;
|
||||
throw TypeError(
|
||||
"%s(): argument '%s' failed to unpack the object at pos %zu with error \"%s\"",
|
||||
args->signature.name.c_str(),
|
||||
args->signature.params[i].name.c_str(),
|
||||
TORCH_CHECK_TYPE(
|
||||
false,
|
||||
args->signature.name,
|
||||
"(): argument '",
|
||||
args->signature.params[i].name,
|
||||
"' failed to unpack the object at pos ",
|
||||
idx + 1,
|
||||
error.c_str());
|
||||
" with error \"",
|
||||
error,
|
||||
"\"");
|
||||
}
|
||||
|
||||
inline std::vector<c10::SymInt> PythonArgs::symintlist(int i) {
|
||||
|
|
@ -703,12 +706,16 @@ inline std::vector<double> PythonArgs::getDoublelist(int i) {
|
|||
try {
|
||||
res[idx] = THPUtils_unpackDouble(obj);
|
||||
} catch (const std::exception& e) {
|
||||
throw TypeError(
|
||||
"%s(): argument '%s' must be %s, but found element of type %s at pos %zu",
|
||||
signature.name.c_str(),
|
||||
signature.params[i].name.c_str(),
|
||||
signature.params[i].type_name().c_str(),
|
||||
TORCH_CHECK_TYPE(
|
||||
false,
|
||||
signature.name,
|
||||
"(): argument '",
|
||||
signature.params[i].name,
|
||||
"' must be ",
|
||||
signature.params[i].type_name(),
|
||||
", but found element of type ",
|
||||
Py_TYPE(obj)->tp_name,
|
||||
" at pos ",
|
||||
idx + 1);
|
||||
}
|
||||
}
|
||||
|
|
@ -1101,10 +1108,11 @@ inline c10::Stream PythonArgs::stream(int i) {
|
|||
if (!args[i])
|
||||
return c10::Stream(
|
||||
c10::Stream::Default::DEFAULT, c10::Device(c10::DeviceType::CPU, -1));
|
||||
if (!THPStream_Check(args[i])) {
|
||||
throw TypeError(
|
||||
"expected Stream object. Got '%s'", Py_TYPE(args[i])->tp_name);
|
||||
}
|
||||
TORCH_CHECK_TYPE(
|
||||
THPStream_Check(args[i]),
|
||||
"expected Stream object. Got '",
|
||||
Py_TYPE(args[i])->tp_name,
|
||||
"'");
|
||||
return c10::Stream::unpack3(
|
||||
((THPStream*)args[i])->stream_id,
|
||||
static_cast<c10::DeviceIndex>(((THPStream*)args[i])->device_index),
|
||||
|
|
|
|||
|
|
@ -643,11 +643,13 @@ Tensor legacy_sparse_tensor_generic_ctor_new(
|
|||
// new(sequence) binds to this signature but should be treated differently
|
||||
// unless the sequences is a torch.Size
|
||||
if (ctor_or_new == CtorOrNew::CTOR) {
|
||||
throw TypeError(
|
||||
TORCH_CHECK_TYPE(
|
||||
false,
|
||||
"torch.sparse.SparseTensor(sequence) only accepts sizes. Please use torch.sparse_coo_tensor() "
|
||||
"or construct a strided tensor and convert it to sparse via to_sparse.");
|
||||
} else {
|
||||
throw TypeError(
|
||||
TORCH_CHECK_TYPE(
|
||||
false,
|
||||
"SparseTensor.new(sequence) only accepts sizes. Please use torch.sparse_coo_tensor() "
|
||||
"or construct a strided tensor and convert it to sparse via to_sparse.");
|
||||
}
|
||||
|
|
|
|||
|
|
@ -107,9 +107,7 @@ static std::vector<int64_t> to_aten_shape(int ndim, npy_intp* values) {
|
|||
|
||||
static std::vector<int64_t> seq_to_aten_shape(PyObject* py_seq) {
|
||||
int ndim = PySequence_Length(py_seq);
|
||||
if (ndim == -1) {
|
||||
throw TypeError("shape and strides must be sequences");
|
||||
}
|
||||
TORCH_CHECK_TYPE(ndim != -1, "shape and strides must be sequences");
|
||||
auto result = std::vector<int64_t>(ndim);
|
||||
for (const auto i : c10::irange(ndim)) {
|
||||
auto item = THPObjectPtr(PySequence_GetItem(py_seq, i));
|
||||
|
|
@ -301,7 +299,8 @@ int aten_to_numpy_dtype(const ScalarType scalar_type) {
|
|||
case kBool:
|
||||
return NPY_BOOL;
|
||||
default:
|
||||
throw TypeError("Got unsupported ScalarType %s", toString(scalar_type));
|
||||
TORCH_CHECK_TYPE(
|
||||
false, "Got unsupported ScalarType ", toString(scalar_type));
|
||||
}
|
||||
}
|
||||
|
||||
|
|
@ -353,10 +352,12 @@ ScalarType numpy_dtype_to_aten(int dtype) {
|
|||
auto pytype = THPObjectPtr(PyArray_TypeObjectFromType(dtype));
|
||||
if (!pytype)
|
||||
throw python_error();
|
||||
throw TypeError(
|
||||
"can't convert np.ndarray of type %s. The only supported types are: "
|
||||
"float64, float32, float16, complex64, complex128, int64, int32, int16, int8, uint64, uint32, uint16, uint8, and bool.",
|
||||
((PyTypeObject*)pytype.get())->tp_name);
|
||||
TORCH_CHECK_TYPE(
|
||||
false,
|
||||
"can't convert np.ndarray of type ",
|
||||
((PyTypeObject*)pytype.get())->tp_name,
|
||||
". The only supported types are: "
|
||||
"float64, float32, float16, complex64, complex128, int64, int32, int16, int8, uint64, uint32, uint16, uint8, and bool.");
|
||||
}
|
||||
|
||||
bool is_numpy_int(PyObject* obj) {
|
||||
|
|
@ -382,17 +383,15 @@ at::Tensor tensor_from_cuda_array_interface(PyObject* obj) {
|
|||
THPObjectPtr(PyObject_GetAttrString(obj, "__cuda_array_interface__"));
|
||||
TORCH_INTERNAL_ASSERT(cuda_dict);
|
||||
|
||||
if (!PyDict_Check(cuda_dict.get())) {
|
||||
throw TypeError("`__cuda_array_interface__` must be a dict");
|
||||
}
|
||||
TORCH_CHECK_TYPE(
|
||||
PyDict_Check(cuda_dict.get()),
|
||||
"`__cuda_array_interface__` must be a dict");
|
||||
|
||||
// Extract the `obj.__cuda_array_interface__['shape']` attribute
|
||||
std::vector<int64_t> sizes;
|
||||
{
|
||||
PyObject* py_shape = PyDict_GetItemString(cuda_dict, "shape");
|
||||
if (py_shape == nullptr) {
|
||||
throw TypeError("attribute `shape` must exist");
|
||||
}
|
||||
TORCH_CHECK_TYPE(py_shape, "attribute `shape` must exist");
|
||||
sizes = seq_to_aten_shape(py_shape);
|
||||
}
|
||||
|
||||
|
|
@ -403,9 +402,7 @@ at::Tensor tensor_from_cuda_array_interface(PyObject* obj) {
|
|||
int dtype_size_in_bytes;
|
||||
{
|
||||
PyObject* py_typestr = PyDict_GetItemString(cuda_dict, "typestr");
|
||||
if (py_typestr == nullptr) {
|
||||
throw TypeError("attribute `typestr` must exist");
|
||||
}
|
||||
TORCH_CHECK_TYPE(py_typestr, "attribute `typestr` must exist");
|
||||
// NOLINTNEXTLINE(cppcoreguidelines-init-variables)
|
||||
PyArray_Descr* descr;
|
||||
TORCH_CHECK_VALUE(
|
||||
|
|
@ -420,12 +417,10 @@ at::Tensor tensor_from_cuda_array_interface(PyObject* obj) {
|
|||
void* data_ptr;
|
||||
{
|
||||
PyObject* py_data = PyDict_GetItemString(cuda_dict, "data");
|
||||
if (py_data == nullptr) {
|
||||
throw TypeError("attribute `shape` data exist");
|
||||
}
|
||||
if (!PyTuple_Check(py_data) || PyTuple_GET_SIZE(py_data) != 2) {
|
||||
throw TypeError("`data` must be a 2-tuple of (int, bool)");
|
||||
}
|
||||
TORCH_CHECK_TYPE(py_data, "attribute `shape` data exist");
|
||||
TORCH_CHECK_TYPE(
|
||||
PyTuple_Check(py_data) && PyTuple_GET_SIZE(py_data) == 2,
|
||||
"`data` must be a 2-tuple of (int, bool)");
|
||||
data_ptr = PyLong_AsVoidPtr(PyTuple_GET_ITEM(py_data, 0));
|
||||
if (data_ptr == nullptr && PyErr_Occurred()) {
|
||||
throw python_error();
|
||||
|
|
@ -434,10 +429,9 @@ at::Tensor tensor_from_cuda_array_interface(PyObject* obj) {
|
|||
if (read_only == -1) {
|
||||
throw python_error();
|
||||
}
|
||||
if (read_only) {
|
||||
throw TypeError(
|
||||
"the read only flag is not supported, should always be False");
|
||||
}
|
||||
TORCH_CHECK_TYPE(
|
||||
!read_only,
|
||||
"the read only flag is not supported, should always be False");
|
||||
}
|
||||
|
||||
// Extract the `obj.__cuda_array_interface__['strides']` attribute
|
||||
|
|
@ -445,11 +439,11 @@ at::Tensor tensor_from_cuda_array_interface(PyObject* obj) {
|
|||
{
|
||||
PyObject* py_strides = PyDict_GetItemString(cuda_dict, "strides");
|
||||
if (py_strides != nullptr && py_strides != Py_None) {
|
||||
if (PySequence_Length(py_strides) == -1 ||
|
||||
static_cast<size_t>(PySequence_Length(py_strides)) != sizes.size()) {
|
||||
throw TypeError(
|
||||
"strides must be a sequence of the same length as shape");
|
||||
}
|
||||
TORCH_CHECK_TYPE(
|
||||
PySequence_Length(py_strides) != -1 &&
|
||||
static_cast<size_t>(PySequence_Length(py_strides)) ==
|
||||
sizes.size(),
|
||||
"strides must be a sequence of the same length as shape");
|
||||
strides = seq_to_aten_shape(py_strides);
|
||||
|
||||
// __cuda_array_interface__ strides use bytes. Torch strides use element
|
||||
|
|
|
|||
Loading…
Reference in New Issue
Block a user