pytorch/torch/csrc/utils/tensor_apply.cpp
Edward Yang 4404762d7d Rename IntList to IntArrayRef. (#16751)
Summary:
Pull Request resolved: https://github.com/pytorch/pytorch/pull/16751

This was made more complicated by the fact that ivalue::IntList
is a thing.  So I had to fix all of the sites where we referring
to IValue post facto.

The following codemods were run, in this order:

```
codemod -m -d . --extensions cc,cpp,cu,cuh,h,hpp,py,cwrap,yaml,in IntList IntArrayRef
codemod -m -d . --extensions cc,cpp,cu,cuh,h,hpp,py,cwrap,yaml,in IntArrayRef::create IntList::create
codemod -m -d . --extensions cc,cpp,cu,cuh,h,hpp,py,cwrap,yaml,in ivalue::IntArrayRef ivalue::IntList
codemod -m -d . --extensions cc,cpp,cu,cuh,h,hpp,py,cwrap,yaml,in Tag::IntArrayRef Tag::IntList
codemod -m -d . --extensions cc,cpp,cu,cuh,h,hpp,py,cwrap,yaml,in isIntArrayRef isIntList
codemod -m -d . --extensions cc,cpp,cu,cuh,h,hpp,py,cwrap,yaml,in toIntArrayRef toIntList
codemod -m -d . --extensions cc,cpp,cu,cuh,h,hpp,py,cwrap,yaml,in 'Shared<IntArrayRef>' 'Shared<IntList>'
codemod -m -d . --extensions cc,cpp,cu,cuh,h,hpp,py,cwrap,yaml,in 'intrusive_ptr<IntArrayRef>' 'intrusive_ptr<IntList>'
```

Some manual fixups were done afterwards; they can be reviewed separately
at https://github.com/pytorch/pytorch/pull/16752

Reviewed By: dzhulgakov

Differential Revision: D13954363

fbshipit-source-id: b5c40aacba042402155a2f5a229fa6db7992ac64
2019-02-05 14:54:34 -08:00

100 lines
3.2 KiB
C++

#include <torch/csrc/utils/tensor_apply.h>
#include <ATen/TensorUtils.h>
#include <ATen/ExpandUtils.h>
#include <torch/csrc/Exceptions.h>
#include <torch/csrc/utils/python_numbers.h>
#include <torch/csrc/utils/python_scalars.h>
using namespace at;
namespace torch { namespace utils {
struct StridedData {
StridedData(const Tensor & tensor)
: data(tensor.data_ptr())
, strides(tensor.strides())
, elementSize(tensor.type().elementSizeInBytes()) {}
void* data;
IntArrayRef strides;
int64_t elementSize;
void step(int dim) {
data = (char*)data + (strides[dim] * elementSize);
}
};
template<size_t N>
static void recursive_apply(IntArrayRef sizes, ScalarType scalarType, int64_t dim,
PyObject* fn, std::array<StridedData, N> strided_data) {
int64_t ndim = sizes.size();
if (dim == ndim) {
auto args = THPObjectPtr(PyTuple_New(N));
if (!args) throw python_error();
for (size_t i = 0; i < N; i++) {
PyObject* arg = load_scalar(strided_data[i].data, scalarType);
if (!arg) throw python_error();
PyTuple_SET_ITEM(args.get(), i, arg);
}
auto ret = THPObjectPtr(PyObject_CallObject(fn, args.get()));
if (!ret) throw python_error();
store_scalar(strided_data[0].data, scalarType, ret.get());
return;
}
auto n = sizes[dim];
for (int64_t i = 0; i < n; i++) {
recursive_apply(sizes, scalarType, dim + 1, fn, strided_data);
for (auto& td : strided_data) {
td.step(dim);
}
}
}
Tensor & apply_(Tensor & self, PyObject* fn) {
if (self.type().backend() != Backend::CPU) {
throw TypeError("apply_ is only implemented on CPU tensors");
}
auto scalarType = self.type().scalarType();
recursive_apply<1>(self.sizes(), scalarType, 0, fn, {{ self }});
return self;
}
Tensor & map_(Tensor & self, const Tensor & other_, PyObject* fn) {
if (self.type().backend() != Backend::CPU) {
throw TypeError("map_ is only implemented on CPU tensors");
}
if (other_.type() != self.type()) {
throw TypeError("map_: expected %s for 'other' (got %s)",
self.type().toString(), other_.type().toString());
}
Tensor other;
std::tie(other) = expand_inplace(self, other_, "map_");
auto scalarType = self.type().scalarType();
recursive_apply<2>(self.sizes(), scalarType, 0, fn, {{ self, other }});
return self;
}
Tensor & map2_(Tensor & self, const Tensor & x_, const Tensor & y_, PyObject* fn) {
if (self.type().backend() != Backend::CPU || x_.type().backend() != Backend::CPU || y_.type().backend() != Backend::CPU) {
throw TypeError("map2_ is only implemented on CPU tensors");
}
if (x_.type() != self.type()) {
throw TypeError("map2_: expected %s for argument 'x' (got %s)",
self.type().toString(), x_.type().toString());
}
if (y_.type() != self.type()) {
throw TypeError("map2_: expected %s for argument 'y' (got %s)",
self.type().toString(), y_.type().toString());
}
Tensor other1, other2;
std::tie(other1, other2) = expand_inplace(self, x_, y_, "map2_");
auto scalarType = self.type().scalarType();
recursive_apply<3>(self.sizes(), scalarType, 0, fn, {{ self, other1, other2 }});
return self;
}
}} // namespace torch::utils