pytorch/c10/core/UndefinedTensorImpl.cpp
PyTorch MergeBot 846bb30e13 Revert "[1/N] Change #include <c10/util/Optional.h> to #include <optional> (#128301)"
This reverts commit bd72e28314.

Reverted https://github.com/pytorch/pytorch/pull/128301 on behalf of https://github.com/huydhn due to Sorry for reverting your change but it fails XLA build bd72e28314. Please rebase your PR before relanding because I think the failure is hidden by an unrelated broken trunk XLA failure from your current base commit ([comment](https://github.com/pytorch/pytorch/pull/128301#issuecomment-2169035822))
2024-06-15 01:58:20 +00:00

44 lines
1.4 KiB
C++

#include <c10/core/UndefinedTensorImpl.h>
#include <c10/util/Exception.h>
namespace c10 {
// should this use the globalContext? Can it get a context passed in somehow?
UndefinedTensorImpl::UndefinedTensorImpl()
: TensorImpl(DispatchKey::Undefined, caffe2::TypeMeta(), c10::nullopt) {
set_storage_access_should_throw();
// TODO: accessing the sizes on an undefined tensor is not meaningful
// and should error too, but empirically it does not!
set_custom_sizes_strides(SizesStridesPolicy::CustomStrides);
}
bool UndefinedTensorImpl::is_contiguous_custom(MemoryFormat format) const {
return is_contiguous_default(format);
}
IntArrayRef UndefinedTensorImpl::strides_custom() const {
TORCH_CHECK(false, "strides() called on an undefined Tensor");
}
SymIntArrayRef UndefinedTensorImpl::sym_strides_custom() const {
TORCH_CHECK(false, "sym_strides() called on an undefined Tensor");
}
#ifdef DEBUG
bool UndefinedTensorImpl::has_storage() const {
TORCH_INTERNAL_ASSERT_DEBUG_ONLY(
!storage_, "UndefinedTensorImpl assumes that storage_ is never set");
return false;
}
#endif
void UndefinedTensorImpl::set_storage_offset(int64_t) {
TORCH_CHECK(false, "set_storage_offset() called on an undefined Tensor");
}
const char* UndefinedTensorImpl::tensorimpl_type_name() const {
return "UndefinedTensorImpl";
}
UndefinedTensorImpl UndefinedTensorImpl::_singleton;
} // namespace c10