mirror of
https://github.com/zebrajr/pytorch.git
synced 2025-12-06 12:20:52 +01:00
Summary: Pull Request resolved: https://github.com/pytorch/pytorch/pull/10478 - Removed Backend constructor from Device, and fixed all use-sites to use DeviceType::CPU instead of kCPU, or use a new function backendToDeviceType to perform the conversion. - New method device_type() on Type; it gives you the underlying device type, e.g., CPU for SparseCPU. - We add backward compatibility for kCPU/kCUDA uses, by introducing a new special type which is implicitly convertible to both DeviceType and Backend. As long as you don't define a function that's overloaded on both DeviceType and Backend (but not on BackendOrDeviceType), the implicit conversions will ensure that uses of at::Device(at::kCPU) keep working. We fixed use-sites in the library, but did NOT fix sites in the test code, so that we can exercise this BC code. Reviewed By: Yangqing Differential Revision: D9301861 fbshipit-source-id: 9a9d88620500715c7b37e655b4fd761f6dd72716
100 lines
3.1 KiB
C++
100 lines
3.1 KiB
C++
#include "tensor_apply.h"
|
|
|
|
#include <ATen/TensorUtils.h>
|
|
#include <ATen/ExpandUtils.h>
|
|
|
|
#include "torch/csrc/Exceptions.h"
|
|
#include "torch/csrc/utils/python_numbers.h"
|
|
#include "torch/csrc/utils/python_scalars.h"
|
|
|
|
using namespace at;
|
|
|
|
namespace torch { namespace utils {
|
|
|
|
struct StridedData {
|
|
StridedData(const Tensor & tensor)
|
|
: data(tensor.data_ptr())
|
|
, strides(tensor.strides())
|
|
, elementSize(tensor.type().elementSizeInBytes()) {}
|
|
|
|
void* data;
|
|
IntList strides;
|
|
int64_t elementSize;
|
|
|
|
void step(int dim) {
|
|
data = (char*)data + (strides[dim] * elementSize);
|
|
}
|
|
};
|
|
|
|
template<size_t N>
|
|
static void recursive_apply(IntList sizes, ScalarType scalarType, int64_t dim,
|
|
PyObject* fn, std::array<StridedData, N> strided_data) {
|
|
int64_t ndim = sizes.size();
|
|
if (dim == ndim) {
|
|
auto args = THPObjectPtr(PyTuple_New(N));
|
|
if (!args) throw python_error();
|
|
for (size_t i = 0; i < N; i++) {
|
|
PyObject* arg = load_scalar(strided_data[i].data, scalarType);
|
|
if (!arg) throw python_error();
|
|
PyTuple_SET_ITEM(args.get(), i, arg);
|
|
}
|
|
auto ret = THPObjectPtr(PyObject_CallObject(fn, args.get()));
|
|
if (!ret) throw python_error();
|
|
store_scalar(strided_data[0].data, scalarType, ret.get());
|
|
return;
|
|
}
|
|
|
|
auto n = sizes[dim];
|
|
for (int64_t i = 0; i < n; i++) {
|
|
recursive_apply(sizes, scalarType, dim + 1, fn, strided_data);
|
|
for (auto& td : strided_data) {
|
|
td.step(dim);
|
|
}
|
|
}
|
|
}
|
|
|
|
Tensor & apply_(Tensor & self, PyObject* fn) {
|
|
if (self.type().backend() != Backend::CPU) {
|
|
throw TypeError("apply_ is only implemented on CPU tensors");
|
|
}
|
|
auto scalarType = self.type().scalarType();
|
|
recursive_apply<1>(self.sizes(), scalarType, 0, fn, {{ self }});
|
|
return self;
|
|
}
|
|
|
|
Tensor & map_(Tensor & self, const Tensor & other_, PyObject* fn) {
|
|
if (self.type().backend() != Backend::CPU) {
|
|
throw TypeError("map_ is only implemented on CPU tensors");
|
|
}
|
|
if (other_.type() != self.type()) {
|
|
throw TypeError("map_: expected %s for 'other' (got %s)",
|
|
self.type().toString(), other_.type().toString());
|
|
}
|
|
Tensor other;
|
|
std::tie(other) = expand_inplace(self, other_, "map_");
|
|
auto scalarType = self.type().scalarType();
|
|
recursive_apply<2>(self.sizes(), scalarType, 0, fn, {{ self, other }});
|
|
return self;
|
|
}
|
|
|
|
Tensor & map2_(Tensor & self, const Tensor & x_, const Tensor & y_, PyObject* fn) {
|
|
if (self.type().backend() != Backend::CPU || x_.type().backend() != Backend::CPU || y_.type().backend() != Backend::CPU) {
|
|
throw TypeError("map2_ is only implemented on CPU tensors");
|
|
}
|
|
if (x_.type() != self.type()) {
|
|
throw TypeError("map2_: expected %s for argument 'x' (got %s)",
|
|
self.type().toString(), x_.type().toString());
|
|
}
|
|
if (y_.type() != self.type()) {
|
|
throw TypeError("map2_: expected %s for argument 'y' (got %s)",
|
|
self.type().toString(), y_.type().toString());
|
|
}
|
|
Tensor other1, other2;
|
|
std::tie(other1, other2) = expand_inplace(self, x_, y_, "map2_");
|
|
auto scalarType = self.type().scalarType();
|
|
recursive_apply<3>(self.sizes(), scalarType, 0, fn, {{ self, other1, other2 }});
|
|
return self;
|
|
}
|
|
|
|
}} // namespace torch::utils
|