mirror of
https://github.com/zebrajr/pytorch.git
synced 2025-12-07 00:21:07 +01:00
Summary: Commits: 1. Add autograd function `NotImplemented` (subclass of `Error`) so python `grad_fn` prints nicer. Since `Error` is used in `DelayedError` to implement `oncedifferentiable`, I can't just change its name. cc colesbury 2. Add printing for sparse tensors. Fixes https://github.com/pytorch/pytorch/issues/9412 . cc weiyangfb The controller you requested could not be found. . 3. Add tests for sparse printing Examples: ```diff In [2]: x = torch.sparse.FloatTensor(torch.arange(4).view(2,2), torch.randn(2, 2), [10, 10, 2]) In [3]: x Out[3]: - torch.sparse.FloatTensor of size (10,10,2) with indices: - tensor([[0, 1], - [2, 3]]) - and values: - tensor([[-1.1832, -0.5927], - [ 0.0831, 0.2511]]) + tensor(indices=tensor([[0, 1], + [2, 3]]), + values=tensor([[ 1.5081, 0.3451], + [-0.0392, 0.4776]]), + size=(10, 10, 2), nnz=2, layout=torch.sparse_coo) In [4]: x.requires_grad_() Out[4]: - torch.sparse.FloatTensor of size (10,10,2) with indices: - tensor([[0, 1], - [2, 3]], grad_fn=<Error>) - and values: - tensor([[-1.1832, -0.5927], - [ 0.0831, 0.2511]], grad_fn=<Error>) + tensor(indices=tensor([[0, 1], + [2, 3]]), + values=tensor([[ 1.5081, 0.3451], + [-0.0392, 0.4776]]), + size=(10, 10, 2), nnz=2, layout=torch.sparse_coo, requires_grad=True) In [5]: x + x Out[5]: - torch.sparse.FloatTensor of size (10,10,2) with indices: - tensor([[0, 1], - [2, 3]], grad_fn=<Error>) - and values: - tensor([[-2.3664, -1.1855], - [ 0.1662, 0.5021]], grad_fn=<Error>) + tensor(indices=tensor([[0, 1], + [2, 3]]), + values=tensor([[ 3.0162, 0.6902], + [-0.0785, 0.9553]]), + size=(10, 10, 2), nnz=2, layout=torch.sparse_coo, grad_fn=<AddBackward0>) In [6]: x.double() Out[6]: - torch.sparse.DoubleTensor of size (10,10,2) with indices: - tensor([[0, 1], - [2, 3]], grad_fn=<Error>) - and values: - tensor([[-1.1832, -0.5927], - [ 0.0831, 0.2511]], dtype=torch.float64, grad_fn=<Error>) + tensor(indices=tensor([[0, 1], + [2, 3]]), + values=tensor([[ 1.5081, 0.3451], + [-0.0392, 0.4776]]), + size=(10, 10, 2), nnz=2, dtype=torch.float64, layout=torch.sparse_coo, + grad_fn=<NotImplemented>) In [7]: x = torch.sparse.FloatTensor(torch.ones(0, 2, dtype=torch.long), torch.randn(2, 0), [0]) In [8]: x Out[8]: - torch.sparse.FloatTensor of size (0,) with indices: - tensor([], size=(0, 2), dtype=torch.int64) - and values: - tensor([], size=(2, 0)) + tensor(indices=tensor([], size=(0, 2)), + values=tensor([], size=(2, 0)), + size=(0,), nnz=2, layout=torch.sparse_coo) In [9]: x = torch.sparse.FloatTensor(torch.ones(0, 2, dtype=torch.long), torch.randn(2), []) In [10]: x Out[10]: - torch.sparse.FloatTensor of size () with indices: - tensor([], size=(0, 2), dtype=torch.int64) - and values: - tensor([-0.0064, 0.8518]) + tensor(indices=tensor([], size=(0, 2)), + values=tensor([ 0.9800, -0.5978]), + size=(), nnz=2, layout=torch.sparse_coo) ``` Pull Request resolved: https://github.com/pytorch/pytorch/pull/10181 Differential Revision: D9139845 Pulled By: SsnL fbshipit-source-id: 353eebd55fac4049ed9bf85f8b0ee2c1418a744e
206 lines
7.0 KiB
C++
206 lines
7.0 KiB
C++
#include "Size.h"
|
|
|
|
#include <string>
|
|
#include "torch/csrc/utils/object_ptr.h"
|
|
#include "torch/csrc/utils/python_strings.h"
|
|
#include "torch/csrc/utils/python_tuples.h"
|
|
|
|
#include "torch/csrc/autograd/python_variable.h"
|
|
#include "torch/csrc/jit/tracer.h"
|
|
|
|
struct THPSize {
|
|
PyTupleObject tuple;
|
|
};
|
|
|
|
PyObject * THPSize_New(const torch::autograd::Variable& var)
|
|
{
|
|
if (!torch::jit::tracer::isTracing()) {
|
|
auto sizes = var.sizes();
|
|
return THPSize_NewFromSizes(var.dim(), sizes.data());
|
|
}
|
|
auto self = THPObjectPtr(THPSizeType.tp_alloc(&THPSizeType, var.dim()));
|
|
if (!self) throw python_error();
|
|
|
|
for (int64_t i = 0; i < var.dim(); ++i) {
|
|
PyObject *py_size_tensor = THPVariable_Wrap(torch::jit::tracer::getSizeOf(var, i));
|
|
if (!py_size_tensor) throw python_error();
|
|
PyTuple_SET_ITEM(self.get(), i, py_size_tensor);
|
|
}
|
|
|
|
return self.release();
|
|
}
|
|
|
|
PyObject * THPSize_NewFromSizes(int dim, const int64_t *sizes)
|
|
{
|
|
auto self = THPObjectPtr(THPSizeType.tp_alloc(&THPSizeType, dim));
|
|
if (!self) throw python_error();
|
|
THPUtils_packInt64Array(self, dim, sizes);
|
|
return self.release();
|
|
}
|
|
|
|
static bool isTracedZeroDimVar(PyObject *item) {
|
|
if (!THPVariable_Check(item)) return false;
|
|
auto & var = reinterpret_cast<THPVariable*>(item)->cdata;
|
|
return var.dim() == 0 && torch::jit::tracer::getValueTrace(var);
|
|
}
|
|
|
|
static PyObject * THPSize_pynew(PyTypeObject *type, PyObject *args, PyObject *kwargs)
|
|
{
|
|
THPObjectPtr self(PyTuple_Type.tp_new(type, args, kwargs));
|
|
if (self) {
|
|
for (Py_ssize_t i = 0; i < PyTuple_Size(self); ++i) {
|
|
PyObject *item = PyTuple_GET_ITEM(self.get(), i);
|
|
if (THPUtils_checkLong(item)) {
|
|
continue;
|
|
}
|
|
if (torch::jit::tracer::isTracing() && isTracedZeroDimVar(item)) {
|
|
continue;
|
|
}
|
|
// item.__index__() works with 0-dim tensors and tensors with one element
|
|
THPObjectPtr number(PyNumber_Index(item));
|
|
if (number && THPUtils_checkLong(number.get())) {
|
|
Py_INCREF(number.get());
|
|
auto status = PyTuple_SetItem(self, i, number.get());
|
|
if (status != 0) {
|
|
throw python_error();
|
|
}
|
|
continue;
|
|
}
|
|
return PyErr_Format(PyExc_TypeError,
|
|
"torch.Size() takes an iterable of 'int' (item %zd is '%s')",
|
|
i, Py_TYPE(item)->tp_name);
|
|
}
|
|
}
|
|
return self.release();
|
|
}
|
|
|
|
static PyObject * THPSize_repr(THPSize *self)
|
|
{
|
|
HANDLE_TH_ERRORS
|
|
std::string repr("torch.Size([");
|
|
for (Py_ssize_t i = 0; i < PyTuple_Size((PyObject*)self); ++i) {
|
|
if (i != 0) {
|
|
repr += ", ";
|
|
}
|
|
repr += std::to_string(PyLong_AsLong(PyTuple_GET_ITEM(self, i)));
|
|
}
|
|
repr += "])";
|
|
return THPUtils_packString(repr);
|
|
END_HANDLE_TH_ERRORS
|
|
}
|
|
|
|
extern PyTypeObject THPSizeType;
|
|
|
|
template<typename FnType, FnType fn, typename ...Args>
|
|
static PyObject* wrap_tuple_fn(Args ... args)
|
|
{
|
|
THPObjectPtr result((*fn)(std::forward<Args>(args)...));
|
|
if (!result) return nullptr;
|
|
if (PyTuple_Check(result.get())) {
|
|
return PyObject_CallFunctionObjArgs((PyObject*)&THPSizeType, result.get(), nullptr);
|
|
}
|
|
return result.release();
|
|
}
|
|
|
|
// We use an anonymous namespace instead of static to work around
|
|
// (what @peterjc123 think is) a bug in Visual Studio
|
|
namespace {
|
|
auto sq_concat = PyTuple_Type.tp_as_sequence->sq_concat;
|
|
auto sq_repeat = PyTuple_Type.tp_as_sequence->sq_repeat;
|
|
#if PY_MAJOR_VERSION == 2
|
|
auto sq_slice = PyTuple_Type.tp_as_sequence->sq_slice;
|
|
#endif
|
|
binaryfunc mp_subscript = PyTuple_Type.tp_as_mapping->mp_subscript;
|
|
}
|
|
|
|
|
|
static PySequenceMethods THPSize_as_sequence = {
|
|
PyTuple_Type.tp_as_sequence->sq_length,
|
|
wrap_tuple_fn<decltype(&sq_concat), &sq_concat>,
|
|
wrap_tuple_fn<decltype(&sq_repeat), &sq_repeat>,
|
|
PyTuple_Type.tp_as_sequence->sq_item,
|
|
#if PY_MAJOR_VERSION == 2
|
|
wrap_tuple_fn<decltype(&sq_slice), &sq_slice>,
|
|
#else
|
|
0, /* sq_slice */
|
|
#endif
|
|
0, /* sq_ass_item */
|
|
0, /* sq_ass_slice */
|
|
PyTuple_Type.tp_as_sequence->sq_contains
|
|
};
|
|
|
|
static PyMappingMethods THPSize_as_mapping = {
|
|
PyTuple_Type.tp_as_mapping->mp_length,
|
|
wrap_tuple_fn<decltype(&mp_subscript), &mp_subscript>,
|
|
0
|
|
};
|
|
|
|
static PyObject *THPSize_numel(THPSize *self)
|
|
{
|
|
HANDLE_TH_ERRORS
|
|
int64_t numel = 1;
|
|
for (Py_ssize_t i = 0; i < PyTuple_Size((PyObject*)self); ++i) {
|
|
numel *= PyLong_AsLong(PyTuple_GET_ITEM(self, i));
|
|
}
|
|
return THPUtils_packInt64(numel);
|
|
END_HANDLE_TH_ERRORS
|
|
}
|
|
|
|
static PyMethodDef THPSize_methods[] = {
|
|
{"numel", (PyCFunction)THPSize_numel, METH_NOARGS, nullptr},
|
|
{nullptr}
|
|
};
|
|
|
|
|
|
PyTypeObject THPSizeType = {
|
|
PyVarObject_HEAD_INIT(nullptr, 0)
|
|
"torch.Size", /* tp_name */
|
|
sizeof(THPSize), /* tp_basicsize */
|
|
0, /* tp_itemsize */
|
|
0, /* tp_dealloc */
|
|
0, /* tp_print */
|
|
0, /* tp_getattr */
|
|
0, /* tp_setattr */
|
|
0, /* tp_reserved */
|
|
(reprfunc)THPSize_repr, /* tp_repr */
|
|
0, /* tp_as_number */
|
|
&THPSize_as_sequence, /* tp_as_sequence */
|
|
&THPSize_as_mapping, /* tp_as_mapping */
|
|
0, /* tp_hash */
|
|
0, /* tp_call */
|
|
0, /* tp_str */
|
|
0, /* tp_getattro */
|
|
0, /* tp_setattro */
|
|
0, /* tp_as_buffer */
|
|
Py_TPFLAGS_DEFAULT, /* tp_flags */
|
|
nullptr, /* tp_doc */
|
|
0, /* tp_traverse */
|
|
0, /* tp_clear */
|
|
0, /* tp_richcompare */
|
|
0, /* tp_weaklistoffset */
|
|
0, /* tp_iter */
|
|
0, /* tp_iternext */
|
|
THPSize_methods, /* tp_methods */
|
|
0, /* tp_members */
|
|
0, /* tp_getset */
|
|
&PyTuple_Type, /* tp_base */
|
|
0, /* tp_dict */
|
|
0, /* tp_descr_get */
|
|
0, /* tp_descr_set */
|
|
0, /* tp_dictoffset */
|
|
0, /* tp_init */
|
|
0, /* tp_alloc */
|
|
THPSize_pynew, /* tp_new */
|
|
};
|
|
|
|
void THPSize_init(PyObject *module)
|
|
{
|
|
if (PyType_Ready(&THPSizeType) < 0) {
|
|
throw python_error();
|
|
}
|
|
Py_INCREF(&THPSizeType);
|
|
if (PyModule_AddObject(module, "Size", (PyObject*)&THPSizeType) < 0) {
|
|
throw python_error();
|
|
}
|
|
}
|