pytorch/torch/csrc/generic/methods/Tensor.cwrap
2017-02-01 21:48:11 +01:00

655 lines
13 KiB
Plaintext

// TODO: check that there are no args
[[
name: THPTensor_(elementSize)
python_name: element_size
only_register: True
]]
static PyObject * THPTensor_(elementSize)(THPTensor *self, PyObject *args)
{
return PyLong_FromLong(THStorage_(elementSize)(LIBRARY_STATE_NOARGS));
}
// TODO: check that there are no args
[[
name: THPTensor_(storage)
python_name: storage
only_register: True
]]
static PyObject * THPTensor_(storage)(THPTensor *self, PyObject *args)
{
// TODO: memory leak on error
THStorage *result = THTensor_(storage)(LIBRARY_STATE self->cdata);
if (result == NULL)
Py_RETURN_NONE;
THStorage_(retain)(LIBRARY_STATE result);
THStoragePtr _tmp = result;
PyObject *ret = THPStorage_(New)(result);
_tmp.release();
return ret;
}
[[
name: storageOffset
python_name: storage_offset
return: long
arguments:
- THTensor* self
]]
[[
name: nDimension
python_name: ndimension
return: long
arguments:
- THTensor* self
]]
[[
name: THPTensor_(nDimension)
python_name: dim
only_register: True
method_flags: METH_KEYWORDS
]]
[[
python_name: index
name: THPTensor_(getValue)<true>
only_register: True
override_method_flags: METH_O
]]
[[
python_name: _set_index
name: THPTensor_(setIndex)
only_register: True
]]
PyObject * THPTensor_(setIndex)(THPTensor *self, PyObject *args)
{
THPUtils_assert(PyTuple_GET_SIZE(args) == 2, "set_index takes exactly two "
"arguments (%d given)", (int)PyTuple_GET_SIZE(args));
if (THPTensor_(setValue)<true>(self, PyTuple_GET_ITEM(args, 0), PyTuple_GET_ITEM(args, 1)) != 0)
return NULL;
Py_RETURN_NONE;
}
[[
name: resize_
return: self
cname: resize
arguments:
- THTensor* self
- arg: THSize* size
long_args: True
- CONSTANT NULL
]]
[[
name: zeros
only_stateless: True
return: argument 0
arguments:
- arg: THTensor* result
output: True
- arg: THSize* size
long_args: True
]]
[[
name: ones
only_stateless: True
return: argument 0
arguments:
- arg: THTensor* result
output: True
- arg: THSize* size
long_args: True
]]
[[
name: numel
return: long
with_stateless: True
arguments:
- THTensor* self
]]
[[
name: THPTensor_(numel)
python_name: nelement
only_register: True
method_flags: METH_KEYWORDS
]]
[[
name: set_
cname: set
return: argument 0
options:
- cname: set
arguments:
- THTensor* self
- THTensor* source
- cname: setStorage
arguments:
- THTensor* self
- CONSTANT NULL, 0, NULL, NULL
- cname: setStorage
before_call: THLongStoragePtr __storage_size = THLongStorage_newWithSize1(THStorage_(size)(LIBRARY_STATE ((THPStorage*)$arg1)->cdata));
arguments:
- THTensor* self
- THStorage* storage
- CONSTANT 0
- CONSTANT __storage_size.get()
- CONSTANT NULL
- cname: setStorage
arguments:
- THTensor* self
- THStorage* sourceStorage
- long storage_offset
- arg: THSize* size
long_args: True
- CONSTANT NULL
- cname: setStorage
arguments:
- THTensor* self
- THStorage* sourceStorage
- long storage_offset
- THSize* size
- THStride* strides
]]
[[
name: THPTensor_(select)
python_name: select
only_register: True
]]
static PyObject * THPTensor_(select)(THPTensor *self, PyObject *args)
{
HANDLE_TH_ERRORS
long dim, idx;
if (!PyArg_ParseTuple(args, "ll", &dim, &idx))
return NULL;
int ndim = THTensor_(nDimension)(LIBRARY_STATE self->cdata);
if(ndim > 1) {
THTensorPtr selected = THTensor_(newWithTensor)(LIBRARY_STATE self->cdata);
THTensor_(select)(LIBRARY_STATE selected.get(), NULL, dim, idx);
return THPTensor_(New)(selected.release());
}
else {
THArgCheck(ndim == 1, 1, "empty Tensor");
return THPUtils_(newReal)(THTensor_(get1d)(LIBRARY_STATE self->cdata, idx));
}
END_HANDLE_TH_ERRORS
}
PyObject * THPTensor_(size)(PyObject *self, PyObject *args, PyObject *kwargs)
{
HANDLE_TH_ERRORS
THTensor* tensor = ((THPTensor*)self)->cdata;
if (PyTuple_Size(args) == 0 && (!kwargs || PyDict_Size(kwargs) == 0)) {
return THPSize_New(tensor->nDimension, tensor->size);
}
int tuplecount = args ? PyTuple_Size(args) : 0;
int dictcount = kwargs ? PyDict_Size(kwargs) : 0;
PyObject* pydim = NULL;
if (tuplecount == 1 && dictcount == 0) {
pydim = PyTuple_GET_ITEM(args, 0);
} else if (dictcount == 1 && tuplecount == 0) {
pydim = PyDict_GetItemString(kwargs, "dim");
}
if (pydim && THPUtils_checkLong(pydim)) {
int dim = (int)THPUtils_unpackLong(pydim);
if (dim < 0)
dim += tensor->nDimension;
return PyInt_FromLong(THTensor_(size)(LIBRARY_STATE tensor, dim));
}
THPUtils_invalidArguments(args, kwargs, "size", 2, "(int dim)", "no arguments");
return NULL;
END_HANDLE_TH_ERRORS
}
[[
name: THPTensor_(size)
python_name: size
method_flags: METH_KEYWORDS
only_register: True
]]
PyObject * THPTensor_(stride)(PyObject *self, PyObject *args, PyObject *kwargs)
{
HANDLE_TH_ERRORS
THTensor* tensor = ((THPTensor*)self)->cdata;
if (PyTuple_Size(args) == 0 && (!kwargs || PyDict_Size(kwargs) == 0)) {
PyObject* stride = PyTuple_New(tensor->nDimension);
for (int i = 0; i != tensor->nDimension; ++i) {
PyTuple_SET_ITEM(stride, i, PyLong_FromLong(tensor->stride[i]));
}
return stride;
}
int tuplecount = args ? PyTuple_Size(args) : 0;
int dictcount = kwargs ? PyDict_Size(kwargs) : 0;
PyObject* pydim = NULL;
if (tuplecount == 1 && dictcount == 0) {
pydim = PyTuple_GET_ITEM(args, 0);
} else if (dictcount == 1 && tuplecount == 0) {
pydim = PyDict_GetItemString(kwargs, "dim");
}
if (pydim && THPUtils_checkLong(pydim)) {
int dim = (int)THPUtils_unpackLong(pydim);
if (dim < 0)
dim += tensor->nDimension;
return PyInt_FromLong(THTensor_(stride)(LIBRARY_STATE tensor, dim));
}
THPUtils_invalidArguments(args, kwargs, "stride", 2, "(int dim)", "no arguments");
return NULL;
END_HANDLE_TH_ERRORS
}
[[
name: THPTensor_(stride)
python_name: stride
method_flags: METH_KEYWORDS
only_register: True
]]
[[
name: fill_
cname: fill
return: self
arguments:
- THTensor* self
- real value
]]
[[
name: isSameSizeAs
python_name: is_same_size
return: bool
arguments:
- THTensor* self
- THTensor* other
]]
[[
name: isContiguous
python_name: is_contiguous
return: bool
arguments:
- THTensor* self
]]
[[
name: isSetTo
python_name: is_set_to
return: bool
arguments:
- THTensor* self
- THTensor* tensor
]]
[[
name: maskedFill_
cname: maskedFill
python_name: masked_fill_
return: self
arguments:
- THTensor* self
- THBoolTensor* mask
- real value
]]
[[
name: maskedCopy_
cname: maskedCopy
python_name: masked_copy_
return: self
arguments:
- THTensor* self
- THBoolTensor* mask
- THTensor* source
]]
[[
name: maskedSelect
python_name: masked_select
with_stateless: True
return: argument 0
arguments:
- arg: THTensor* result
output: True
- THTensor* self
- THBoolTensor* mask
]]
[[
name: transpose
with_stateless: True
cname: newTranspose
return: THTensor*
arguments:
- THTensor* self
- long dim0
- long dim1
]]
[[
name: transpose_
cname: transpose
return: self
arguments:
- THTensor* self
- THTensor* self
- long dim0
- long dim1
]]
[[
name: t
with_stateless: True
cname: newTranspose
return: THTensor*
before_call: |
long ndim = ((THPTensor*)${arg0})->cdata->nDimension;
THPUtils_assert(ndim == 2, "t() expects a 2D tensor, but self is %ldD", ndim);
arguments:
- THTensor* self
- CONSTANT 0
- CONSTANT 1
]]
[[
name: t_
cname: transpose
return: self
before_call: |
long ndim = ((THPTensor*)${arg0})->cdata->nDimension;
THPUtils_assert(ndim == 2, "t_() expects a 2D tensor, but self is %ldD", ndim);
arguments:
- THTensor* self
- THTensor* self
- CONSTANT 0
- CONSTANT 1
]]
[[
name: squeeze
with_stateless: True
return: argument 0
options:
- arguments:
- arg: THTensor* result
output: True
- THTensor* self
- cname: squeeze1d
arguments:
- arg: THTensor* result
output: True
- THTensor* self
- long dim
]]
[[
name: squeeze_
return: self
options:
- cname: squeeze
arguments:
- THTensor* self
- THTensor* self
- cname: squeeze1d
arguments:
- THTensor* self
- THTensor* self
- long dim
]]
[[
name: nonzero
with_stateless: True
return: argument 0
arguments:
- arg: THIndexTensor* result
output: True
- THTensor* self
]]
[[
name: contiguous
cname: newContiguous
return: THTensor*
arguments:
- THTensor* self
]]
[[
name: clone
cname: newClone
return: THTensor*
arguments:
- THTensor* self
]]
[[
name: resizeAs_
python_name: resize_as_
cname: resizeAs
return: self
arguments:
- THTensor* self
- THTensor* template
]]
[[
name: indexSelect
python_name: index_select
with_stateless: True
return: argument 0
arguments:
- arg: THTensor* result
output: True
- THTensor* self
- long dim
- THIndexTensor* index
]]
[[
name: indexCopy_
python_name: index_copy_
cname: indexCopy
return: argument 0
arguments:
- THTensor* self
- long dim
- THIndexTensor* index
- THTensor* source
]]
[[
name: indexAdd_
python_name: index_add_
cname: indexAdd
return: argument 0
arguments:
- THTensor* self
- long dim
- THIndexTensor* index
- THTensor* source
]]
[[
name: indexFill_
python_name: index_fill_
cname: indexFill
return: argument 0
arguments:
- THTensor* self
- long dim
- THIndexTensor* index
- real value
]]
[[
name: narrow
return: argument 0
arguments:
- arg: THTensor* result
output: True
- THTensor* self
- long dimension
- long start
- long length
]]
[[
name: unfold
return: argument 0
arguments:
- arg: THTensor* result
output: True
- THTensor* self
- long dimension
- long size
- long step
]]
[[
name: range
only_stateless: True
defined_if: "!IS_CUDA"
return: argument 0
arguments:
- arg: THTensor* result
output: True
- accreal start
- accreal end
- arg: accreal step
default: 1
]]
[[
name: scatter_
return: argument 0
options:
- cname: scatter
arguments:
- THTensor* self
- long dim
- THIndexTensor* index
- THTensor* src
- cname: scatterFill
arguments:
- THTensor* self
- long dim
- THIndexTensor* index
- real value
]]
[[
name: gather
with_stateless: True
return: argument 0
before_call: |
THLongStoragePtr _size = THIndexTensor_(newSizeOf)(LIBRARY_STATE ((THPIndexTensor*)$arg3)->cdata);
THTensor_(resize)(LIBRARY_STATE ((THPTensor*)$arg0)->cdata, _size, NULL);
arguments:
- arg: THTensor* result
output: True
- THTensor* self
- long dim
- THIndexTensor* index
]]
[[
name: THPTensor_stateless_(cat)
python_name: cat
only_register: True
only_stateless: True
]]
static PyObject * THPTensor_stateless_(cat)(THPTensor *_unused, PyObject *args)
{
#if IS_CUDA && THCP_AUTO_GPU
THCPAutoGPU __autogpu_guard = THCPAutoGPU(args);
#endif
HANDLE_TH_ERRORS
Py_ssize_t _argcount = args ? PyTuple_Size(args) : 0;
std::vector<THPObjectPtr> items;
std::vector<THTensor *> item_tensors;
PyObject *sequence;
Py_ssize_t seq_length;
THPTensorPtr result;
long dimension = -1;
if (_argcount == 0 || _argcount > 2 || !PySequence_Check(PyTuple_GET_ITEM(args, 0)))
goto invalid_arguments;
sequence = PyTuple_GET_ITEM(args, 0);
seq_length = PySequence_Length(sequence);
if (seq_length <= 0)
goto invalid_arguments;
items.reserve(seq_length);
item_tensors.reserve(seq_length);
for (int i = 0; i < seq_length; i++) {
items.push_back(PySequence_ITEM(sequence, i));
if (!THPTensor_(Check)(items[i].get()))
goto invalid_arguments;
item_tensors.push_back(((THPTensor*)items[i].get())->cdata);
}
if (_argcount == 2) {
if (!THPUtils_checkLong(PyTuple_GET_ITEM(args, 1)))
goto invalid_arguments;
dimension = THPUtils_unpackLong(PyTuple_GET_ITEM(args, 1));
} else {
dimension = 0;
}
result = (THPTensor *)THPTensor_(NewEmpty)();
if (!result) return NULL;
THTensor_(catArray)(LIBRARY_STATE result->cdata, item_tensors.data(),
items.size(), dimension);
return (PyObject*)result.release();
invalid_arguments:
THPUtils_invalidArguments(args, NULL, "cat", 2,
"(sequence[" THPTensorStr "] tensors)",
"(sequence[" THPTensorStr "] tensors, int dim)");
return NULL;
END_HANDLE_TH_ERRORS
}
[[
name: data_ptr
return: void*
cname: data
arguments:
- THTensor* self
]]
[[
name: equal
with_stateless: True
return: bool
arguments:
- THTensor* self
- THTensor* other
]]
[[
python_name: copy_
name: THPTensor_(copy_)
method_flags: METH_KEYWORDS
only_register: True
]]
PyObject * THPTensor_(copy_)(PyObject *self, PyObject *args, PyObject *kwargs)
{
HANDLE_TH_ERRORS
return THPCopyMethod(THTensor_(copy_functions), self, args, kwargs);
END_HANDLE_TH_ERRORS
}