pytorch/tools/autograd/templates/python_variable_methods.cpp
Adam Paszke 90e31f4896 Improve tracer warnings (#11545)
Summary:
Also, fix a performance bug in `ensureUnique`. Previously it formatted the warning string even though we weren't tracing, so all that work would *always* happen in the hot path and be for nothing.

A sample of how the new warnings look like:
```
tmp.py:4: TracerWarning: Converting a tensor to a Python integer might cause the trace to be incorrect. We can't record the data flow of Pytho
n values, so this value will be treated as a constant in the future. This means that the trace might not generalize to other inputs!
  int(x)
tmp.py:5: TracerWarning: torch.tensor results are registered as constants in the trace. You can safely ignore this warning if you use this fun
ction to create tensors out of constant variables that would be the same every time you call this function. In any other case, this might caus
e the trace to be incorrect.
  torch.tensor([1.])
tmp.py:6: TracerWarning: There are 2 live references to the data region being modified when tracing in-place operator add_. This might cause t
he trace to be incorrect, because all other views that also reference this data will not not reflect this change in the trace! On the other ha
nd, if all other views use the same memory, but are disjoint (e.g. are outputs of torch.split), this might still be safe.
  torch.split(y, 2, dim=1)[0].add_(2)

```
Pull Request resolved: https://github.com/pytorch/pytorch/pull/11545

Differential Revision: D9782975

Pulled By: apaszke

fbshipit-source-id: 5b3abd31366e59c69e0b7ff278042b5563deb5a9
2018-09-11 22:10:32 -07:00

639 lines
23 KiB
C++

// ${generated_comment}
#include <Python.h>
#include "torch/csrc/DynamicTypes.h"
#include "torch/csrc/Exceptions.h"
#include "torch/csrc/Size.h"
#include "torch/csrc/autograd/python_variable.h"
#include "torch/csrc/autograd/utils/python_error_messages.h"
#include "torch/csrc/autograd/utils/wrap_outputs.h"
#include "torch/csrc/autograd/utils/python_arg_parsing.h"
#include "torch/csrc/jit/tracer.h"
#ifdef USE_CUDA
#include "torch/csrc/cuda/Stream.h"
#endif
#include "torch/csrc/utils/cuda_lazy_init.h"
#include "torch/csrc/utils/object_ptr.h"
#include "torch/csrc/utils/python_arg_parser.h"
#include "torch/csrc/utils/python_numbers.h"
#include "torch/csrc/utils/python_strings.h"
#include "torch/csrc/utils/python_tuples.h"
#include "torch/csrc/utils/tensor_apply.h"
#include "torch/csrc/utils/tensor_conversion_dispatch.h"
#include "torch/csrc/utils/tensor_list.h"
#include "torch/csrc/utils/tensor_new.h"
#include "torch/csrc/utils/tensor_numpy.h"
#include "torch/csrc/utils/tensor_types.h"
#include <ATen/ATen.h>
#include <ATen/core/optional.h>
#include "python_variable_methods_dispatch.h"
#include <stdexcept>
using at::DeviceGuard;
using at::Backend;
using at::Scalar;
using at::ScalarType;
using at::Tensor;
using namespace torch::autograd::utils;
namespace torch { namespace autograd {
static PyObject * THPVariable_apply_(PyObject* self, PyObject* arg)
{
HANDLE_TH_ERRORS
auto& self_ = reinterpret_cast<THPVariable*>(self)->cdata;
if (self_.requires_grad()) {
throw std::runtime_error(
"Can't call apply_() on Variable that requires grad. Use "
"var.detach().apply_() instead.");
}
return THPVariable_Wrap(torch::utils::apply_(self_, arg));
END_HANDLE_TH_ERRORS
}
static PyObject * THPVariable_size(PyObject* self, PyObject* args, PyObject* kwargs)
{
HANDLE_TH_ERRORS
static PythonArgParser parser({
"size(int64_t dim)",
"size()",
});
auto& self_ = reinterpret_cast<THPVariable*>(self)->cdata;
ParsedArgs<3> parsed_args;
auto r = parser.parse(args, kwargs, parsed_args);
if (r.idx == 0) {
if (jit::tracer::isTracing()) {
return wrap(jit::tracer::getSizeOf(self_, r.toInt64(0)));
} else {
return wrap(self_.size(r.toInt64(0)));
}
} else if (r.idx == 1) {
// we can't do the normal wrapping here because IntList maps to both
// torch.Size and tuple in python.
return THPSize_New(self_);
}
Py_RETURN_NONE;
END_HANDLE_TH_ERRORS
}
static PyObject * THPVariable_stride(PyObject* self, PyObject* args, PyObject* kwargs)
{
HANDLE_TH_ERRORS
static PythonArgParser parser({
"stride(int64_t dim)",
"stride()",
});
auto& self_ = reinterpret_cast<THPVariable*>(self)->cdata;
ParsedArgs<3> parsed_args;
auto r = parser.parse(args, kwargs, parsed_args);
if (r.idx == 0) {
return wrap(self_.stride(r.toInt64(0)));
} else if (r.idx == 1) {
// yes, this is called strides in ATen.
IntList strides = self_.strides();
// we can't do the normal wrapping here because IntList maps to both
// torch.Size and tuple in python
return THPUtils_packInt64Array(strides.size(), strides.data());
}
Py_RETURN_NONE;
END_HANDLE_TH_ERRORS
}
static PyObject * THPVariable_dim(PyObject* self, PyObject* args)
{
HANDLE_TH_ERRORS
auto& self_ = reinterpret_cast<THPVariable*>(self)->cdata;
return THPUtils_packInt64(self_.dim());
END_HANDLE_TH_ERRORS
}
static Tensor dispatch_contiguous(const Tensor & self) {
AutoNoGIL no_gil;
DeviceGuard device_guard(self);
return self.contiguous();
}
static PyObject * THPVariable_contiguous(PyObject* self, PyObject* args)
{
HANDLE_TH_ERRORS
auto& self_ = reinterpret_cast<THPVariable*>(self)->cdata;
// avoids touching the GIL or current device if self is already contiguous
if (self_.is_contiguous()) {
Py_INCREF(self);
return self;
}
return THPVariable_Wrap(dispatch_contiguous(self_));
END_HANDLE_TH_ERRORS
}
static Tensor dispatch_copy_(Tensor & self, const Tensor & other, bool non_blocking) {
AutoNoGIL no_gil;
DeviceGuard device_guard(self);
return self.copy_(other, non_blocking);
}
static PyObject * THPVariable_copy_(PyObject* self, PyObject* args, PyObject* kwargs)
{
HANDLE_TH_ERRORS
static PythonArgParser parser({
"copy_(Tensor other, bool non_blocking=False)",
"copy_(Tensor other, bool async=False)|deprecated"
});
auto& self_ = reinterpret_cast<THPVariable*>(self)->cdata;
ParsedArgs<2> parsed_args;
auto r = parser.parse(args, kwargs, parsed_args);
return THPVariable_Wrap(dispatch_copy_(self_, r.tensor(0), r.toBool(1)));
END_HANDLE_TH_ERRORS
}
static double dispatch_to_CDouble(const Tensor & self) {
AutoNoGIL no_gil;
DeviceGuard device_guard(self);
if (self.numel() != 1) {
throw ValueError("only one element tensors can be converted to Python scalars");
}
return self.toCDouble();
}
static std::complex<double> dispatch_to_CComplexDouble(const Tensor & self) {
AutoNoGIL no_gil;
DeviceGuard device_guard(self);
if (self.numel() != 1) {
throw ValueError("only one element tensors can be converted to Python scalars");
}
return self.toCComplexDouble();
}
static int64_t dispatch_to_CLong(const Tensor & self) {
AutoNoGIL no_gil;
DeviceGuard device_guard(self);
if (self.numel() != 1) {
throw ValueError("only one element tensors can be converted to Python scalars");
}
return self.toCLong();
}
static PyObject * THPVariable_float_scalar(PyObject* self, PyObject* args) {
HANDLE_TH_ERRORS
jit::tracer::warn("Converting a tensor to a Python float", jit::tracer::WARN_PYTHON_DATAFLOW);
auto& self_ = reinterpret_cast<THPVariable*>(self)->cdata;
return wrap(dispatch_to_CDouble(self_));
END_HANDLE_TH_ERRORS
}
static PyObject * THPVariable_integral_scalar(PyObject* self, PyObject* args) {
HANDLE_TH_ERRORS
jit::tracer::warn("Converting a tensor to a Python integer", jit::tracer::WARN_PYTHON_DATAFLOW);
auto& self_ = reinterpret_cast<THPVariable*>(self)->cdata;
if (isFloatingType(self_.type().scalarType())) {
// we can't dispatch to toCLong here because we want to avoid ATen overflow checks;
// the python integral type (long in python2) can't overflow.
return THPUtils_packDoubleAsInt(dispatch_to_CDouble(self_));
} else {
return wrap(dispatch_to_CLong(self_));
}
END_HANDLE_TH_ERRORS
}
// This is the __index__ function in Python which is similar to __int__, but
// called when used as a slice.
static PyObject * THPVariable_index_scalar(PyObject* self, PyObject* args) {
HANDLE_TH_ERRORS
jit::tracer::warn("Converting a tensor to a Python index", jit::tracer::WARN_PYTHON_DATAFLOW);
auto& self_ = reinterpret_cast<THPVariable*>(self)->cdata;
// TODO: change the condition to `self_.dim() != 0` once we expose scalars
// in PyTorch.
if (!isIntegralType(self_.type().scalarType()) || self_.numel() != 1) {
throw TypeError("only integer tensors of a single element can be converted to an index");
}
return wrap(dispatch_to_CLong(self_));
END_HANDLE_TH_ERRORS
}
static Tensor dispatch_invert(const Tensor & self) {
AutoNoGIL no_gil;
DeviceGuard device_guard(self);
return 1 - self;
}
static PyObject * THPVariable_invert(PyObject* self, PyObject* args) {
HANDLE_TH_ERRORS
auto& self_ = reinterpret_cast<THPVariable*>(self)->cdata;
if (self_.type().scalarType() != at::kByte) {
throw TypeError("~ (operator.invert) is only implemented on byte tensors");
}
return THPVariable_Wrap(dispatch_invert(self_));
END_HANDLE_TH_ERRORS
}
static Tensor dispatch_to(const Tensor & self, Device device, bool non_blocking) {
AutoNoGIL no_gil;
return self.to(device, non_blocking);
}
static Tensor dispatch_to(const Tensor & self, ScalarType dtype, bool non_blocking) {
AutoNoGIL no_gil;
return self.to(dtype, non_blocking);
}
static Tensor dispatch_to(const Tensor & self, Device device, ScalarType dtype, bool non_blocking) {
AutoNoGIL no_gil;
return self.to(device, dtype, non_blocking);
}
static PyObject * THPVariable_cpu(PyObject* self, PyObject* args)
{
HANDLE_TH_ERRORS
auto& self_ = reinterpret_cast<THPVariable*>(self)->cdata;
return THPVariable_Wrap(dispatch_to(self_, at::Device(at::DeviceType::CPU), false));
END_HANDLE_TH_ERRORS
}
static PyObject * THPVariable_cuda(PyObject* self, PyObject* args, PyObject* kwargs)
{
HANDLE_TH_ERRORS
static PythonArgParser parser({
"cuda(Device? device=None, bool non_blocking=False)",
"cuda(Device? device=None, bool async=False)|deprecated"
});
auto& self_ = reinterpret_cast<THPVariable*>(self)->cdata;
ParsedArgs<2> parsed_args;
auto r = parser.parse(args, kwargs, parsed_args);
auto device = r.isNone(0) ? at::Device(at::DeviceType::CUDA) : r.device(0);
AT_CHECK(device.is_cuda(), "Invalid device, must be cuda device");
torch::utils::cuda_lazy_init();
return THPVariable_Wrap(dispatch_to(self_, device, r.toBool(1)));
END_HANDLE_TH_ERRORS
}
static PyObject * THPVariable_to_type(PyObject* self, ScalarType scalarType) {
HANDLE_TH_ERRORS
auto& self_ = reinterpret_cast<THPVariable*>(self)->cdata;
return THPVariable_Wrap(dispatch_to(self_, scalarType, false));
END_HANDLE_TH_ERRORS
}
static PyObject * THPVariable_byte(PyObject* self, PyObject* args) {
return THPVariable_to_type(self, ScalarType::Byte);
}
static PyObject * THPVariable_char(PyObject* self, PyObject* args) {
return THPVariable_to_type(self, ScalarType::Char);
}
static PyObject * THPVariable_double(PyObject* self, PyObject* args) {
return THPVariable_to_type(self, ScalarType::Double);
}
static PyObject * THPVariable_float(PyObject* self, PyObject* args) {
return THPVariable_to_type(self, ScalarType::Float);
}
static PyObject * THPVariable_half(PyObject* self, PyObject* args) {
return THPVariable_to_type(self, ScalarType::Half);
}
static PyObject * THPVariable_int(PyObject* self, PyObject* args) {
return THPVariable_to_type(self, ScalarType::Int);
}
static PyObject * THPVariable_long(PyObject* self, PyObject* args) {
return THPVariable_to_type(self, ScalarType::Long);
}
static PyObject * THPVariable_short(PyObject* self, PyObject* args) {
return THPVariable_to_type(self, ScalarType::Short);
}
static PyObject * THPVariable_element_size(PyObject* self, PyObject* args)
{
HANDLE_TH_ERRORS
auto& self_ = reinterpret_cast<THPVariable*>(self)->cdata;
size_t element_size = self_.type().elementSizeInBytes();
return THPUtils_packInt64(element_size);
END_HANDLE_TH_ERRORS
}
static PyObject * THPVariable_numpy(PyObject* self, PyObject* arg)
{
HANDLE_TH_ERRORS
jit::tracer::warn("Converting a tensor to a NumPy array", jit::tracer::WARN_PYTHON_DATAFLOW);
auto& self_ = reinterpret_cast<THPVariable*>(self)->cdata;
if (self_.requires_grad()) {
throw std::runtime_error(
"Can't call numpy() on Variable that requires grad. "
"Use var.detach().numpy() instead.");
}
return torch::utils::tensor_to_numpy(self_.data());
END_HANDLE_TH_ERRORS
}
// TODO: move this to ATen. We would need to expose Stream objects in ATen.
static PyObject * THPVariable_record_stream(PyObject* self, PyObject* arg)
{
HANDLE_TH_ERRORS
#ifdef USE_CUDA
auto& self_ = reinterpret_cast<THPVariable*>(self)->cdata;
if (!THCPStream_Check(arg)) {
return PyErr_Format(PyExc_TypeError, "expected Stream object");
}
void* data = self_.data_ptr();
THCCachingAllocator_recordStream(data, ((THCPStream*)arg)->cdata);
Py_RETURN_NONE;
#else
throw std::runtime_error("PyTorch compiled without CUDA support");
#endif
END_HANDLE_TH_ERRORS
}
static PyObject * THPVariable_requires_grad_(PyObject* self, PyObject* args, PyObject* kwargs)
{
HANDLE_TH_ERRORS
static PythonArgParser parser({
"requires_grad_(bool requires_grad=True)",
});
auto& self_ = reinterpret_cast<THPVariable*>(self)->cdata;
ParsedArgs<1> parsed_args;
auto r = parser.parse(args, kwargs, parsed_args);
auto requires_grad = r.toBool(0);
// should we throw if requires_grad is true? var.requires_grad = True throws here
// but it's nice to let this be a no-op.
if (!self_.is_leaf() && !requires_grad) {
throw std::runtime_error(autograd::utils::requires_grad_leaf_error(requires_grad));
}
if (requires_grad && !self_.is_floating_point()) {
throw std::runtime_error("only Tensors of floating point dtype can require gradients");
}
self_.set_requires_grad(requires_grad);
return THPVariable_Wrap(self_);
END_HANDLE_TH_ERRORS
}
static PyObject * THPVariable_item(PyObject* self, PyObject* args)
{
HANDLE_TH_ERRORS
jit::tracer::warn("Converting a tensor to a Python number", jit::tracer::WARN_PYTHON_DATAFLOW);
auto& self_ = reinterpret_cast<THPVariable*>(self)->cdata;
if (self_.is_floating_point()) {
return wrap(dispatch_to_CDouble(self_));
} else if (self_.is_complex()) {
return wrap(dispatch_to_CComplexDouble(self_));
} else {
return wrap(dispatch_to_CLong(self_));
}
END_HANDLE_TH_ERRORS
}
static PyObject * THPVariable_map_(PyObject* self, PyObject* args, PyObject* kwargs)
{
HANDLE_TH_ERRORS
static PythonArgParser parser({ "map_(Tensor other, PyObject* callable)" });
auto& self_ = reinterpret_cast<THPVariable*>(self)->cdata;
ParsedArgs<2> parsed_args;
auto r = parser.parse(args, kwargs, parsed_args);
Variable other = r.tensor(0);
if (self_.requires_grad() || other.requires_grad()) {
throw std::runtime_error(
"Can't call map_() on Variable that requires grad. Use "
"var.detach().map_() instead.");
}
return THPVariable_Wrap(torch::utils::map_(self_, other, r.pyobject(1)));
END_HANDLE_TH_ERRORS
}
static PyObject * THPVariable_map2_(PyObject* self, PyObject* args, PyObject* kwargs)
{
HANDLE_TH_ERRORS
static PythonArgParser parser({ "map2_(Tensor x, Tensor y, PyObject* callable)" });
auto& self_ = reinterpret_cast<THPVariable*>(self)->cdata;
ParsedArgs<3> parsed_args;
auto r = parser.parse(args, kwargs, parsed_args);
Variable x = r.tensor(0);
Variable y = r.tensor(1);
if (self_.requires_grad() || x.requires_grad() || y.requires_grad()) {
throw std::runtime_error(
"Can't call map2_() on Variable that requires grad. Use "
"var.detach().map2_() instead.");
}
return THPVariable_Wrap(torch::utils::map2_(self_, x, y, r.pyobject(2)));
END_HANDLE_TH_ERRORS
}
static PyObject * THPVariable_new(PyObject* self, PyObject* args, PyObject* kwargs)
{
HANDLE_TH_ERRORS
auto& self_ = reinterpret_cast<THPVariable*>(self)->cdata;
DeviceGuard device_guard(self_);
return THPVariable_Wrap(torch::utils::legacy_tensor_new(self_.type(), args, kwargs));
END_HANDLE_TH_ERRORS
}
static PyObject * THPVariable_new_empty(PyObject* self, PyObject* args, PyObject* kwargs)
{
HANDLE_TH_ERRORS
auto& self_ = reinterpret_cast<THPVariable*>(self)->cdata;
DeviceGuard device_guard(self_);
return THPVariable_Wrap(torch::utils::new_empty(self_.type(), args, kwargs));
END_HANDLE_TH_ERRORS
}
static PyObject * THPVariable_new_full(PyObject* self, PyObject* args, PyObject* kwargs)
{
HANDLE_TH_ERRORS
auto& self_ = reinterpret_cast<THPVariable*>(self)->cdata;
DeviceGuard device_guard(self_);
return THPVariable_Wrap(torch::utils::new_full(self_.type(), args, kwargs));
END_HANDLE_TH_ERRORS
}
static PyObject * THPVariable_new_ones(PyObject* self, PyObject* args, PyObject* kwargs)
{
HANDLE_TH_ERRORS
auto& self_ = reinterpret_cast<THPVariable*>(self)->cdata;
DeviceGuard device_guard(self_);
return THPVariable_Wrap(torch::utils::new_ones(self_.type(), args, kwargs));
END_HANDLE_TH_ERRORS
}
static PyObject * THPVariable_new_tensor(PyObject* self, PyObject* args, PyObject* kwargs)
{
HANDLE_TH_ERRORS
auto& self_ = reinterpret_cast<THPVariable*>(self)->cdata;
DeviceGuard device_guard(self_);
return THPVariable_Wrap(torch::utils::new_tensor(self_.type(), args, kwargs));
END_HANDLE_TH_ERRORS
}
static PyObject * THPVariable_new_zeros(PyObject* self, PyObject* args, PyObject* kwargs)
{
HANDLE_TH_ERRORS
auto& self_ = reinterpret_cast<THPVariable*>(self)->cdata;
DeviceGuard device_guard(self_);
return THPVariable_Wrap(torch::utils::new_zeros(self_.type(), args, kwargs));
END_HANDLE_TH_ERRORS
}
static PyObject * THPVariable_storage(PyObject* self, PyObject* arg)
{
HANDLE_TH_ERRORS
auto& self_ = reinterpret_cast<THPVariable*>(self)->cdata;
return createPyObject(self_.storage());
END_HANDLE_TH_ERRORS
}
static PyObject * THPVariable_storage_type(PyObject* self, PyObject* arg)
{
HANDLE_TH_ERRORS
auto& self_ = reinterpret_cast<THPVariable*>(self)->cdata;
auto storage = THPObjectPtr(createPyObject(self_.storage()));
auto storage_type = (PyObject*)Py_TYPE(storage);
Py_INCREF(storage_type);
return storage_type;
END_HANDLE_TH_ERRORS
}
static PyObject * THPVariable_to(PyObject* self, PyObject* args, PyObject* kwargs)
{
HANDLE_TH_ERRORS
auto parsed = parse_to_conversion(args, kwargs);
auto& device = std::get<0>(parsed);
auto& scalarType = std::get<1>(parsed);
auto non_blocking = std::get<2>(parsed);
auto& self_ = reinterpret_cast<THPVariable*>(self)->cdata;
if (device && device->is_cuda()) {
torch::utils::cuda_lazy_init();
}
if (!device && !scalarType) {
Py_INCREF(self);
return self;
} else if (!device) {
return THPVariable_Wrap(dispatch_to(self_, *scalarType, non_blocking));
} else if (!scalarType) {
return THPVariable_Wrap(dispatch_to(self_, *device, non_blocking));
} else {
return THPVariable_Wrap(dispatch_to(self_, *device, *scalarType, non_blocking));
}
Py_RETURN_NONE;
END_HANDLE_TH_ERRORS
}
static PyObject * THPVariable_tolist(PyObject* self, PyObject* args)
{
HANDLE_TH_ERRORS
jit::tracer::warn("Converting a tensor to a Python list", jit::tracer::WARN_PYTHON_DATAFLOW);
auto self_ = reinterpret_cast<THPVariable*>(self)->cdata;
return torch::utils::tensor_to_list(self_.data());
END_HANDLE_TH_ERRORS
}
static PyObject * THPVariable_type(PyObject* self, PyObject* args, PyObject* kwargs)
{
HANDLE_TH_ERRORS
static PythonArgParser parser({
"type(PyObject* dtype=None, bool non_blocking=False)",
"type(PyObject* dtype=None, bool async=False)|deprecated"
});
auto& self_ = reinterpret_cast<THPVariable*>(self)->cdata;
ParsedArgs<2> parsed_args;
auto r = parser.parse(args, kwargs, parsed_args);
if (r.isNone(0)) {
return THPUtils_packString(torch::utils::type_to_string(self_.type()));
}
auto obj = r.pyobject(0);
std::string type_name;
bool is_dtype = false;
if (PyType_Check(obj)) {
if (obj == THPVariableClass) {
type_name = "torch.Tensor";
} else {
type_name = ((PyTypeObject*)obj)->tp_name;
}
} else if (THPUtils_checkString(obj)) {
type_name = THPUtils_unpackString(obj);
} else if (THPDtype_Check(obj)) {
is_dtype = true;
} else {
throw TypeError("dtype must be a type, str, or dtype object");
}
auto self_device_type = torch::getDeviceType(self_.type());
auto& type = is_dtype ? torch::getVariableType(r.scalartype(0), *torch::getLayout(self_.type().backend()), self_device_type) :
torch::utils::type_from_string(type_name);
return THPVariable_Wrap(torch::utils::dispatch_type_conversion(self_, type, at::nullopt, r.toBool(1)));
END_HANDLE_TH_ERRORS
}
// generated methods start here
${py_methods}
static PyObject * THPVariable_bool(PyObject* self, PyObject* args) {
jit::tracer::warn("Converting a tensor to a Python boolean", jit::tracer::WARN_PYTHON_DATAFLOW);
return THPVariable_is_nonzero(self, args);
}
PyMethodDef variable_methods[] = {
{"__add__", (PyCFunction)THPVariable_add, METH_VARARGS | METH_KEYWORDS, NULL},
{"__radd__", (PyCFunction)THPVariable_add, METH_VARARGS | METH_KEYWORDS, NULL},
{"__iadd__", (PyCFunction)THPVariable_add_, METH_VARARGS | METH_KEYWORDS, NULL},
{"__rmul__", (PyCFunction)THPVariable_mul, METH_VARARGS | METH_KEYWORDS, NULL},
{"__mul__", (PyCFunction)THPVariable_mul, METH_VARARGS | METH_KEYWORDS, NULL},
{"__imul__", (PyCFunction)THPVariable_mul_, METH_VARARGS | METH_KEYWORDS, NULL},
{"__sub__", (PyCFunction)THPVariable_sub, METH_VARARGS | METH_KEYWORDS, NULL},
{"__isub__", (PyCFunction)THPVariable_sub_, METH_VARARGS | METH_KEYWORDS, NULL},
{"__div__", (PyCFunction)THPVariable_div, METH_VARARGS | METH_KEYWORDS, NULL},
{"__truediv__", (PyCFunction)THPVariable_div, METH_VARARGS | METH_KEYWORDS, NULL},
{"__idiv__", (PyCFunction)THPVariable_div_, METH_VARARGS | METH_KEYWORDS, NULL},
{"__mod__", (PyCFunction)THPVariable_remainder, METH_VARARGS | METH_KEYWORDS, NULL},
{"__bool__", (PyCFunction)THPVariable_bool, METH_NOARGS, NULL},
{"__float__", (PyCFunction)THPVariable_float_scalar, METH_NOARGS, NULL},
{"__int__", (PyCFunction)THPVariable_integral_scalar, METH_NOARGS, NULL},
{"__long__", (PyCFunction)THPVariable_integral_scalar, METH_NOARGS, NULL},
{"__index__", (PyCFunction)THPVariable_index_scalar, METH_NOARGS, NULL},
{"__nonzero__", (PyCFunction)THPVariable_bool, METH_NOARGS, NULL},
{"__invert__", (PyCFunction)THPVariable_invert, METH_NOARGS, NULL},
{"__matmul__", (PyCFunction)THPVariable_matmul, METH_VARARGS | METH_KEYWORDS, NULL},
{"apply_", (PyCFunction)THPVariable_apply_, METH_O, NULL},
{"byte", (PyCFunction)THPVariable_byte, METH_NOARGS, NULL},
{"char", (PyCFunction)THPVariable_char, METH_NOARGS, NULL},
{"contiguous", (PyCFunction)THPVariable_contiguous, METH_NOARGS, NULL},
{"copy_", (PyCFunction)THPVariable_copy_, METH_VARARGS | METH_KEYWORDS, NULL},
{"cpu", (PyCFunction)THPVariable_cpu, METH_NOARGS, NULL},
{"cuda", (PyCFunction)THPVariable_cuda, METH_VARARGS | METH_KEYWORDS, NULL},
{"dim", (PyCFunction)THPVariable_dim, METH_NOARGS, NULL},
{"double", (PyCFunction)THPVariable_double, METH_NOARGS, NULL},
{"element_size", (PyCFunction)THPVariable_element_size, METH_NOARGS, NULL},
{"float", (PyCFunction)THPVariable_float, METH_NOARGS, NULL},
{"half", (PyCFunction)THPVariable_half, METH_NOARGS, NULL},
{"int", (PyCFunction)THPVariable_int, METH_NOARGS, NULL},
{"item", (PyCFunction)THPVariable_item, METH_NOARGS, NULL},
{"long", (PyCFunction)THPVariable_long, METH_NOARGS, NULL},
{"map_", (PyCFunction)THPVariable_map_, METH_VARARGS | METH_KEYWORDS, NULL},
{"map2_", (PyCFunction)THPVariable_map2_, METH_VARARGS | METH_KEYWORDS, NULL},
{"ndimension", (PyCFunction)THPVariable_dim, METH_NOARGS, NULL},
{"nelement", (PyCFunction)THPVariable_numel, METH_NOARGS, NULL},
{"new", (PyCFunction)THPVariable_new, METH_VARARGS | METH_KEYWORDS, NULL},
{"new_empty", (PyCFunction)THPVariable_new_empty, METH_VARARGS | METH_KEYWORDS, NULL},
{"new_full", (PyCFunction)THPVariable_new_full, METH_VARARGS | METH_KEYWORDS, NULL},
{"new_ones", (PyCFunction)THPVariable_new_ones, METH_VARARGS | METH_KEYWORDS, NULL},
{"new_tensor", (PyCFunction)THPVariable_new_tensor, METH_VARARGS | METH_KEYWORDS, NULL},
{"new_zeros", (PyCFunction)THPVariable_new_zeros, METH_VARARGS | METH_KEYWORDS, NULL},
{"numpy", (PyCFunction)THPVariable_numpy, METH_NOARGS, NULL},
{"record_stream", (PyCFunction)THPVariable_record_stream, METH_O, NULL},
{"requires_grad_", (PyCFunction)THPVariable_requires_grad_, METH_VARARGS | METH_KEYWORDS, NULL},
{"short", (PyCFunction)THPVariable_short, METH_NOARGS, NULL},
{"size", (PyCFunction)THPVariable_size, METH_VARARGS | METH_KEYWORDS, NULL},
{"storage", (PyCFunction)THPVariable_storage, METH_NOARGS, NULL},
{"storage_type", (PyCFunction)THPVariable_storage_type, METH_NOARGS, NULL},
{"stride", (PyCFunction)THPVariable_stride, METH_VARARGS | METH_KEYWORDS, NULL},
{"to", (PyCFunction)THPVariable_to, METH_VARARGS | METH_KEYWORDS, NULL},
{"tolist", (PyCFunction)THPVariable_tolist, METH_NOARGS, NULL},
{"type", (PyCFunction)THPVariable_type, METH_VARARGS | METH_KEYWORDS, NULL},
${py_method_defs}
{NULL}
};
}} // namespace torch::autograd