mirror of
https://github.com/zebrajr/pytorch.git
synced 2025-12-07 00:21:07 +01:00
Summary: Pull Request resolved: https://github.com/pytorch/pytorch/pull/26719 This PR adds a pair of tests for fallback boxed dispatch, exercising two different ways you might use it: (1) to implement a "wrapper" tensor type (e.g., LazyTensor, NestedTensor), and (2) to implement a toggleable "mode" (e.g., Profiling, Tracing). Both implement the most trivial possible implementations of their type: they "wrap" a real tensor simply forward along to the real implementation. This PR also adds the necessary feature support for toggleable mode, which is in the original generic dispatch abstraction design, but was not previously implemented. I had not originally intended to add this, but it turns out writing a new "mode" is a lot simpler than writing a "wrapper" type, so I ended up writing the mode version first. General structure of the PR: * Add two new testing tensor type ids, `TESTING_ONLY_GenericWrapperTensorId` and `TESTING_ONLY_GenericModeTensorId`, which our tests use. They might find other use in other tests if necessary. * Add support for toggling the availability of `TESTING_ONLY_GenericModeTensorId`. Introduces a new thread local variable accessible by `tls_local_tensor_type_set()` which is considered as part of dispatch. * The mode fallback is very simple: it increments a counter and then passes on the call to the underlying kernel by invoking the JIT. * The wrapper fallback is more complex: it parses the arguments, unwrapping any wrapped tensor arguments, then invokes the JIT, and then rewraps the outputs. The examples here are somewhat simplistic; there are a number of engineering improvements that could be applied. We could save these for later (landing this patch to get immediate testing), or incorporate them into this patch: * `getOperator` is horrible. Bram Wasti and I discussed a plan for how to make this easier, by simply refactoring the JIT interface. * `GenericWrapperTensorImpl` doesn't populate all of its fields accurately. Most notably, size is not setup correctly. * `generic_wrapper_fallback` should handle tensor lists in arguments and returns properly. One pitfall: fallback dispatch only works with non-c10 code. That's why I test using `batch_norm`. Signed-off-by: Edward Z. Yang <ezyang@fb.com> Differential Revision: D17549624 Test Plan: Imported from OSS Pulled By: ezyang fbshipit-source-id: 57dbdd8d6812a66082aa6db2934c8edcda340ea6
228 lines
5.9 KiB
C++
228 lines
5.9 KiB
C++
#include <c10/core/TensorImpl.h>
|
|
|
|
#include <c10/core/Backend.h>
|
|
#include <c10/core/WrapDimMinimal.h>
|
|
#include <c10/core/impl/LocalTensorTypeSet.h>
|
|
#include <c10/util/Optional.h>
|
|
|
|
C10_DEFINE_bool(
|
|
caffe2_keep_on_shrink,
|
|
true,
|
|
"If set, keeps memory when a tensor is shrinking its size.");
|
|
|
|
C10_DEFINE_int64(
|
|
caffe2_max_keep_on_shrink_memory,
|
|
LLONG_MAX,
|
|
"The maximum memory in bytes to keep on shrink, if the difference between "
|
|
"tensor sizes is bigger than this then tensor will be reset.");
|
|
|
|
namespace c10 {
|
|
|
|
const char * const TensorImpl::err_msg_tensor_metadata_change_not_allowed =
|
|
"is not allowed on a Tensor created from .data or .detach().\n"
|
|
"If your intent is to change the metadata of a Tensor (such as sizes / strides / storage / storage_offset)\n"
|
|
"without autograd tracking the change, remove the .data / .detach() call and wrap the change in a `with torch.no_grad():` block.\n"
|
|
"For example, change:\n"
|
|
" x.data.set_(y)\n"
|
|
"to:\n"
|
|
" with torch.no_grad():\n"
|
|
" x.set_(y)";
|
|
|
|
at::Tensor& TensorImpl::grad() {
|
|
if (autograd_meta()) {
|
|
return autograd_meta()->grad();
|
|
} else {
|
|
AT_ERROR("grad is not implemented for Tensor");
|
|
}
|
|
}
|
|
|
|
const at::Tensor& TensorImpl::grad() const {
|
|
if (autograd_meta()) {
|
|
return autograd_meta()->grad();
|
|
} else {
|
|
AT_ERROR("grad is not implemented for Tensor");
|
|
}
|
|
}
|
|
|
|
TensorImpl::TensorImpl(Storage&& storage, TensorTypeSet type_set)
|
|
: TensorImpl(std::move(storage), type_set, storage.dtype(), storage.device()) {}
|
|
|
|
TensorImpl::TensorImpl(TensorTypeSet type_set, const caffe2::TypeMeta& data_type, c10::optional<c10::Device> device_opt)
|
|
: TensorImpl({}, type_set, data_type, std::move(device_opt)) {}
|
|
|
|
TensorImpl::TensorImpl(Storage&& storage, TensorTypeSet type_set, const caffe2::TypeMeta& data_type,
|
|
c10::optional<c10::Device> device_opt)
|
|
: storage_(std::move(storage)),
|
|
sizes_{0},
|
|
storage_offset_(0),
|
|
numel_(0),
|
|
data_type_(data_type),
|
|
device_opt_(device_opt),
|
|
type_set_(type_set.remove(TensorTypeId::VariableTensorId)) {
|
|
if (!type_set.empty()) {
|
|
AT_ASSERT(data_type.id() == caffe2::TypeIdentifier::uninitialized() ||
|
|
device_opt_.has_value());
|
|
// UndefinedTensorImpl is a singleton, so we skip logging it
|
|
C10_LOG_API_USAGE_ONCE("tensor.create");
|
|
}
|
|
// we would also like to check that non-cpu devices have an index, but some Caffe2 operators create
|
|
// Storages with default devices.
|
|
strides_.push_back(1);
|
|
}
|
|
|
|
IntArrayRef TensorImpl::sizes() const {
|
|
return sizes_;
|
|
}
|
|
|
|
IntArrayRef TensorImpl::strides() const {
|
|
return strides_;
|
|
}
|
|
|
|
bool TensorImpl::compute_contiguous() const {
|
|
bool is_contiguous = true;
|
|
if (is_empty())
|
|
return is_contiguous;
|
|
int64_t z = 1;
|
|
for (int64_t d = dim() - 1; d >= 0; d--) {
|
|
if (size(d) != 1) {
|
|
if (stride(d) == z) {
|
|
z *= size(d);
|
|
} else {
|
|
is_contiguous = false;
|
|
break;
|
|
}
|
|
}
|
|
}
|
|
return is_contiguous;
|
|
}
|
|
|
|
bool TensorImpl::compute_channels_last_contiguous() const {
|
|
if (dim() == 4) {
|
|
int64_t expected = 1;
|
|
for (auto& d : {1, 3, 2, 0}) {
|
|
if (size(d) != 1) {
|
|
if (stride(d) == expected) {
|
|
expected *= size(d);
|
|
} else {
|
|
return false;
|
|
}
|
|
}
|
|
}
|
|
return true;
|
|
}
|
|
return false;
|
|
}
|
|
|
|
bool TensorImpl::compute_strides_like_channels_last() const {
|
|
if (dim() == 4) {
|
|
int64_t min = 0;
|
|
for (auto& d : {1, 3, 2, 0}) {
|
|
if (size(d) != 1) {
|
|
if (stride(d) > min) {
|
|
min = stride(d);
|
|
} else {
|
|
return false;
|
|
}
|
|
}
|
|
}
|
|
return true;
|
|
}
|
|
return false;
|
|
}
|
|
|
|
bool TensorImpl::compute_non_overlapping_and_dense() const {
|
|
if (dim() == 1) {
|
|
return size(0) < 2 || stride(0) == 1;
|
|
}
|
|
SmallVector<int64_t,5> perm;
|
|
perm.resize(dim());
|
|
for (int64_t i = 0; i < dim(); i ++) {
|
|
perm[i] = i;
|
|
}
|
|
// Sort by strides, leaving 0 and 1 sized dims at the end of the array
|
|
std::sort(perm.begin(), perm.end(), [&](int64_t a, int64_t b) {
|
|
if (sizes_[a] < 2) {
|
|
return false;
|
|
}
|
|
return strides_[a] < strides_[b];
|
|
});
|
|
auto require_stride = 1;
|
|
for (int64_t i = 0; i < dim(); i ++) {
|
|
if (sizes_[perm[i]] < 2) {
|
|
return true;
|
|
}
|
|
if (strides_[perm[i]] != require_stride) {
|
|
return false;
|
|
}
|
|
require_stride *= sizes_[perm[i]];
|
|
}
|
|
return true;
|
|
}
|
|
|
|
void TensorImpl::release_resources() {
|
|
autograd_meta_.reset();
|
|
if (storage_) {
|
|
storage_ = {};
|
|
}
|
|
}
|
|
|
|
int64_t TensorImpl::dim() const {
|
|
return sizes_.size();
|
|
}
|
|
|
|
int64_t TensorImpl::size(int64_t d) const {
|
|
d = at::maybe_wrap_dim(d, dim(), false);
|
|
return sizes_[d];
|
|
}
|
|
|
|
int64_t TensorImpl::stride(int64_t d) const {
|
|
d = at::maybe_wrap_dim(d, dim(), false);
|
|
return strides_[d];
|
|
}
|
|
|
|
TensorImpl* TensorImpl::maybe_zero_dim(bool condition_when_zero_dim) {
|
|
bool set_zero_dim = condition_when_zero_dim && this->sizes().size() == 1 && this->size(0) == 1;
|
|
if (set_zero_dim) {
|
|
resize_dim(0);
|
|
}
|
|
return this;
|
|
}
|
|
|
|
bool TensorImpl::has_storage() const {
|
|
return storage_;
|
|
}
|
|
|
|
bool TensorImpl::is_contiguous(at::MemoryFormat memory_format) const {
|
|
#ifdef DEBUG
|
|
AT_ASSERT(compute_contiguous() == is_contiguous_);
|
|
#endif
|
|
if (memory_format == at::MemoryFormat::ChannelsLast) {
|
|
return is_channels_last_contiguous_;
|
|
}
|
|
return is_contiguous_;
|
|
}
|
|
|
|
const Storage& TensorImpl::storage() const {
|
|
return storage_;
|
|
}
|
|
|
|
static void deletePlacementDeleteContext(void* ptr) {
|
|
delete static_cast<PlacementDeleteContext*>(ptr);
|
|
}
|
|
|
|
at::DataPtr PlacementDeleteContext::makeDataPtr(
|
|
at::DataPtr&& data_ptr,
|
|
PlacementDtor placement_dtor,
|
|
size_t size,
|
|
at::Device device) {
|
|
auto* ptr = data_ptr.get();
|
|
return {ptr,
|
|
new PlacementDeleteContext(std::move(data_ptr), placement_dtor, size),
|
|
&deletePlacementDeleteContext,
|
|
device};
|
|
}
|
|
|
|
AutogradMetaInterface::~AutogradMetaInterface() {}
|
|
|
|
} // namespace c10
|