diff --git a/aten/src/ATen/core/ATenDispatch.h b/aten/src/ATen/core/ATenDispatch.h index 0fd0c79c782..ba2940feb30 100644 --- a/aten/src/ATen/core/ATenDispatch.h +++ b/aten/src/ATen/core/ATenDispatch.h @@ -1,12 +1,15 @@ #pragma once -#include #include +#include +#include #include #include #include #include +// TODO: Rewrite this comment +// // This dispatch class serves as a replacement for our previous dispatch // mechanism, in which all functions were members of a Type class. A derived // class existed for each backend (and Variable), and the vtable was used to @@ -19,7 +22,7 @@ namespace at { namespace impl { // Take a TensorTypeSet for a Tensor, and combine it with the current thread -// local set of inclusions and exclusions (not yet implemented, coming soon!) +// local valid (implemented) and enabled (not implemented) TensorTypeSets // to determine what the actual dispatch TensorTypeId should be. Unlike // Tensor::type_set(), the value of this on a tensor can change depending // on TLS. @@ -30,8 +33,7 @@ namespace impl { // question is whether or not we have access to all the relevant TLS at this // point. static inline TensorTypeId dispatchTypeId(TensorTypeSet ts) { - // TODO: Account for TLS! - return ts.highestPriorityTypeId(); + return (ts - c10::impl::tls_excluded_tensor_type_set()).highestPriorityTypeId(); } } @@ -44,70 +46,53 @@ class CAFFE2_API ATenOpTable { : schema_(std::move(schema)) {} template - FuncType* getOp(TensorTypeSet ts, bool is_variable) const { - if (is_variable) { - return reinterpret_cast(getVariableOp()); - } - return reinterpret_cast(getBaseOp(tensorTypeIdToBackend(impl::dispatchTypeId(ts)))); + FuncType* getOp(TensorTypeSet ts) const { + return reinterpret_cast(getOp(impl::dispatchTypeId(ts))); } private: - void registerOp(Backend backend, void* fn) { - TORCH_CHECK(function_table_[static_cast(backend)] == nullptr, - "Attempting to register variable function for schema ", schema_, - " and backend ", toString(backend), + void registerOp(TensorTypeId tid, void* fn) { + TORCH_CHECK(function_table_[static_cast(tid)] == nullptr, + "Attempting to register function for schema ", schema_, + " and tensor type ", toString(tid), " but there is already a function registered"); - function_table_[static_cast(backend)] = fn; + function_table_[static_cast(tid)] = fn; } - void registerVariableOp(void* fn) { - TORCH_CHECK(variable_function_ == nullptr, - "Attempting to register variable function for schema ", schema_, - " but there is already a function registered"); - variable_function_ = fn; - } - - void* getBaseOp(Backend backend) const { - if (function_table_[static_cast(backend)] == nullptr) { - TORCH_CHECK(function_table_[static_cast(Backend::Undefined)] != nullptr, - "No function is registered for schema ", schema_, " on backend ", toString(backend)); - return function_table_[static_cast(Backend::Undefined)]; + void* getOp(TensorTypeId tid) const { + // You might think we can minorly optimize this further by maintaining a + // bitmask of registered operator keys, so we don't select dispatch ids + // which don't have implementations here. But the net effect is that if you + // get a Variable CPUTensor, if there is no variable registration, you'll + // fall back to the CPU implementation. Is this what you want? Unlikely... + if (function_table_[static_cast(tid)] == nullptr) { + TORCH_CHECK(function_table_[static_cast(TensorTypeId::UndefinedTensorId)] != nullptr, + "No function is registered for schema ", schema_, " on tensor type ", toString(tid)); + return function_table_[static_cast(TensorTypeId::UndefinedTensorId)]; } - return function_table_[static_cast(backend)]; - } - - void* getVariableOp() const { - TORCH_CHECK(variable_function_ != nullptr, - "No variable function registered for ", schema_); - return variable_function_; + return function_table_[static_cast(tid)]; } friend class ATenDispatch; std::string schema_; - void* function_table_[static_cast(Backend::NumOptions)] = {nullptr}; - void* variable_function_ = nullptr; + void* function_table_[static_cast(TensorTypeId::NumTensorIds)] = {nullptr}; }; class CAFFE2_API ATenDispatch { public: template - ATenDispatch& registerOp(Backend backend, const char* schema, FuncType* fn) { + ATenDispatch& registerOp(TensorTypeId id, const char* schema, FuncType* fn) { std::lock_guard lock(mutex_); if (op_tables_.find(schema) == op_tables_.end()) { op_tables_.insert(std::make_pair(schema, ATenOpTable(schema))); } - op_tables_.at(schema).registerOp(backend, reinterpret_cast(fn)); + op_tables_.at(schema).registerOp(id, reinterpret_cast(fn)); return *this; } - template - ATenDispatch& registerVariableOp(const char* schema, FuncType* fn) { - std::lock_guard lock(mutex_); - if (op_tables_.find(schema) == op_tables_.end()) { - op_tables_.insert(std::make_pair(schema, ATenOpTable(schema))); - } - op_tables_.at(schema).registerVariableOp(reinterpret_cast(fn)); - return *this; + template + ATenDispatch& registerOp(Backend b, const char* schema, FuncType* fn) { + return registerOp(backendToTensorTypeId(b), schema, fn); } const ATenOpTable* getOpTable(const char* schema) const { diff --git a/aten/src/ATen/core/LegacyTypeDispatch.h b/aten/src/ATen/core/LegacyTypeDispatch.h index abd1031089c..f3dc9e457c6 100644 --- a/aten/src/ATen/core/LegacyTypeDispatch.h +++ b/aten/src/ATen/core/LegacyTypeDispatch.h @@ -20,11 +20,11 @@ namespace at { class CAFFE2_API LegacyTypeDispatch { public: void initForTensorTypeSet(TensorTypeSet ts) { - // TODO: When Variable gets turned on in TensorTypeSet, this - // will skip initialization when you initially process a - // Variable CUDA tensor, for example (because I'll get Variable - // and it's not gonna have any device type.) Is that OK? - auto b = tensorTypeIdToBackend(impl::dispatchTypeId(ts)); + // TODO: Avoid use of legacyExtractTypeId here. The key + // problem is that you may get a TensorTypeSet with + // VariableTensorId set; should you initialize the "underlying" + // type in that case? Hard to say. + auto b = tensorTypeIdToBackend(legacyExtractTypeId(ts)); auto p = backendToDeviceType(b); static std::once_flag cpu_once; static std::once_flag cuda_once; diff --git a/aten/src/ATen/core/TensorBody.h b/aten/src/ATen/core/TensorBody.h index 9b097628726..754a9f363a4 100644 --- a/aten/src/ATen/core/TensorBody.h +++ b/aten/src/ATen/core/TensorBody.h @@ -218,10 +218,7 @@ class CAFFE2_API Tensor { DeprecatedTypeProperties & type() const { return globalDeprecatedTypePropertiesRegistry().getDeprecatedTypeProperties( - // TODO: When we build in Variable here, we need to change the - // signature of getDeprecatedTypeProperties to collapse backend - // and is_variable into TensorTypeSet - tensorTypeIdToBackend(type_set().highestPriorityTypeId()), + tensorTypeIdToBackend(legacyExtractTypeId(type_set())), scalar_type(), is_variable()); } @@ -932,14 +929,6 @@ inline TensorTypeSet infer_tensor_type_set(TensorList tl) { return tl[0].type_set(); } -inline bool infer_is_variable(const Tensor & t) { - TORCH_CHECK(t.defined(), "undefined Tensor"); - return t.is_variable(); -} -inline bool infer_is_variable(const TensorList & tl) { - TORCH_CHECK(tl.size() > 0, "expected a non-empty list of Tensors"); - return tl[0].is_variable(); -} } // namespace detail static inline TensorTypeId legacyExtractTypeId(const Tensor& t) { diff --git a/aten/src/ATen/core/TensorMethods.h b/aten/src/ATen/core/TensorMethods.h index d3a31c83ed7..88224fb15ba 100644 --- a/aten/src/ATen/core/TensorMethods.h +++ b/aten/src/ATen/core/TensorMethods.h @@ -62,7 +62,7 @@ inline void Tensor::backward(const Tensor & gradient, bool keep_graph, bool crea TypeDefault::backward(const_cast(*this), gradient, keep_graph, create_graph); #else static auto table = globalATenDispatch().getOpTable("aten::backward(Tensor self, Tensor? gradient=None, bool keep_graph=False, bool create_graph=False) -> void"); - return table->getOp(type_set(), is_variable())(const_cast(*this), gradient, keep_graph, create_graph); + return table->getOp(type_set())(const_cast(*this), gradient, keep_graph, create_graph); #endif } inline void Tensor::set_data(const Tensor & new_data) const { @@ -70,7 +70,7 @@ inline void Tensor::set_data(const Tensor & new_data) const { TypeDefault::set_data(const_cast(*this), new_data); #else static auto table = globalATenDispatch().getOpTable("aten::set_data(Tensor(a!) self, Tensor new_data) -> void"); - return table->getOp(type_set(), is_variable())(const_cast(*this), new_data); + return table->getOp(type_set())(const_cast(*this), new_data); #endif } #ifdef BUILD_NAMEDTENSOR @@ -79,7 +79,7 @@ inline Tensor & Tensor::names_(c10::optional names) const { return TypeDefault::names_(const_cast(*this), names); #else static auto table = globalATenDispatch().getOpTable("aten::names_(Tensor(a!) self, Dimname[]? names) -> Tensor(a!)"); - return table->getOp)>(type_set(), is_variable())(const_cast(*this), names); + return table->getOp)>(type_set())(const_cast(*this), names); #endif } #endif @@ -89,7 +89,7 @@ inline Tensor Tensor::renamed(c10::optional names) const { return TypeDefault::renamed(const_cast(*this), names); #else static auto table = globalATenDispatch().getOpTable("aten::renamed(Tensor(a) self, Dimname[]? names) -> Tensor(a)"); - return table->getOp)>(type_set(), is_variable())(const_cast(*this), names); + return table->getOp)>(type_set())(const_cast(*this), names); #endif } #endif @@ -99,7 +99,7 @@ inline Tensor Tensor::align_to(DimnameList names) const { return TypeDefault::align_to(const_cast(*this), names); #else static auto table = globalATenDispatch().getOpTable("aten::align_to(Tensor self, DimnameList names) -> Tensor"); - return table->getOp(type_set(), is_variable())(const_cast(*this), names); + return table->getOp(type_set())(const_cast(*this), names); #endif } #endif @@ -108,7 +108,7 @@ inline Tensor Tensor::abs() const { return TypeDefault::abs(const_cast(*this)); #else static auto table = globalATenDispatch().getOpTable("aten::abs(Tensor self) -> Tensor"); - return table->getOp(type_set(), is_variable())(const_cast(*this)); + return table->getOp(type_set())(const_cast(*this)); #endif } inline Tensor & Tensor::abs_() const { @@ -122,7 +122,7 @@ inline Tensor & Tensor::abs_() const { } #else static auto table = globalATenDispatch().getOpTable("aten::abs_(Tensor(a!) self) -> Tensor(a!)"); - return table->getOp(type_set(), is_variable())(const_cast(*this)); + return table->getOp(type_set())(const_cast(*this)); #endif } inline Tensor Tensor::acos() const { @@ -130,7 +130,7 @@ inline Tensor Tensor::acos() const { return TypeDefault::acos(const_cast(*this)); #else static auto table = globalATenDispatch().getOpTable("aten::acos(Tensor self) -> Tensor"); - return table->getOp(type_set(), is_variable())(const_cast(*this)); + return table->getOp(type_set())(const_cast(*this)); #endif } inline Tensor & Tensor::acos_() const { @@ -144,7 +144,7 @@ inline Tensor & Tensor::acos_() const { } #else static auto table = globalATenDispatch().getOpTable("aten::acos_(Tensor(a!) self) -> Tensor(a!)"); - return table->getOp(type_set(), is_variable())(const_cast(*this)); + return table->getOp(type_set())(const_cast(*this)); #endif } inline Tensor Tensor::add(const Tensor & other, Scalar alpha) const { @@ -161,7 +161,7 @@ inline Tensor Tensor::add(const Tensor & other, Scalar alpha) const { } #else static auto table = globalATenDispatch().getOpTable("aten::add.Tensor(Tensor self, Tensor other, *, Scalar alpha=1) -> Tensor"); - return table->getOp(type_set(), is_variable())(const_cast(*this), other, alpha); + return table->getOp(type_set())(const_cast(*this), other, alpha); #endif } inline Tensor & Tensor::add_(const Tensor & other, Scalar alpha) const { @@ -178,7 +178,7 @@ inline Tensor & Tensor::add_(const Tensor & other, Scalar alpha) const { } #else static auto table = globalATenDispatch().getOpTable("aten::add_.Tensor(Tensor(a!) self, Tensor other, *, Scalar alpha=1) -> Tensor(a!)"); - return table->getOp(type_set(), is_variable())(const_cast(*this), other, alpha); + return table->getOp(type_set())(const_cast(*this), other, alpha); #endif } inline Tensor Tensor::add(Scalar other, Scalar alpha) const { @@ -186,7 +186,7 @@ inline Tensor Tensor::add(Scalar other, Scalar alpha) const { return TypeDefault::add(const_cast(*this), other, alpha); #else static auto table = globalATenDispatch().getOpTable("aten::add.Scalar(Tensor self, Scalar other, Scalar alpha=1) -> Tensor"); - return table->getOp(type_set(), is_variable())(const_cast(*this), other, alpha); + return table->getOp(type_set())(const_cast(*this), other, alpha); #endif } inline Tensor & Tensor::add_(Scalar other, Scalar alpha) const { @@ -194,7 +194,7 @@ inline Tensor & Tensor::add_(Scalar other, Scalar alpha) const { return TypeDefault::add_(const_cast(*this), other, alpha); #else static auto table = globalATenDispatch().getOpTable("aten::add_.Scalar(Tensor(a!) self, Scalar other, Scalar alpha=1) -> Tensor(a!)"); - return table->getOp(type_set(), is_variable())(const_cast(*this), other, alpha); + return table->getOp(type_set())(const_cast(*this), other, alpha); #endif } inline Tensor Tensor::addmv(const Tensor & mat, const Tensor & vec, Scalar beta, Scalar alpha) const { @@ -208,7 +208,7 @@ inline Tensor Tensor::addmv(const Tensor & mat, const Tensor & vec, Scalar beta, } #else static auto table = globalATenDispatch().getOpTable("aten::addmv(Tensor self, Tensor mat, Tensor vec, *, Scalar beta=1, Scalar alpha=1) -> Tensor"); - return table->getOp(type_set(), is_variable())(const_cast(*this), mat, vec, beta, alpha); + return table->getOp(type_set())(const_cast(*this), mat, vec, beta, alpha); #endif } inline Tensor & Tensor::addmv_(const Tensor & mat, const Tensor & vec, Scalar beta, Scalar alpha) const { @@ -222,7 +222,7 @@ inline Tensor & Tensor::addmv_(const Tensor & mat, const Tensor & vec, Scalar be } #else static auto table = globalATenDispatch().getOpTable("aten::addmv_(Tensor(a!) self, Tensor mat, Tensor vec, *, Scalar beta=1, Scalar alpha=1) -> Tensor(a!)"); - return table->getOp(type_set(), is_variable())(const_cast(*this), mat, vec, beta, alpha); + return table->getOp(type_set())(const_cast(*this), mat, vec, beta, alpha); #endif } inline Tensor Tensor::addr(const Tensor & vec1, const Tensor & vec2, Scalar beta, Scalar alpha) const { @@ -230,7 +230,7 @@ inline Tensor Tensor::addr(const Tensor & vec1, const Tensor & vec2, Scalar beta return TypeDefault::addr(const_cast(*this), vec1, vec2, beta, alpha); #else static auto table = globalATenDispatch().getOpTable("aten::addr(Tensor self, Tensor vec1, Tensor vec2, *, Scalar beta=1, Scalar alpha=1) -> Tensor"); - return table->getOp(type_set(), is_variable())(const_cast(*this), vec1, vec2, beta, alpha); + return table->getOp(type_set())(const_cast(*this), vec1, vec2, beta, alpha); #endif } inline Tensor & Tensor::addr_(const Tensor & vec1, const Tensor & vec2, Scalar beta, Scalar alpha) const { @@ -238,7 +238,7 @@ inline Tensor & Tensor::addr_(const Tensor & vec1, const Tensor & vec2, Scalar b return TypeDefault::addr_(const_cast(*this), vec1, vec2, beta, alpha); #else static auto table = globalATenDispatch().getOpTable("aten::addr_(Tensor(a!) self, Tensor vec1, Tensor vec2, *, Scalar beta=1, Scalar alpha=1) -> Tensor(a!)"); - return table->getOp(type_set(), is_variable())(const_cast(*this), vec1, vec2, beta, alpha); + return table->getOp(type_set())(const_cast(*this), vec1, vec2, beta, alpha); #endif } inline Tensor Tensor::all(int64_t dim, bool keepdim) const { @@ -246,7 +246,7 @@ inline Tensor Tensor::all(int64_t dim, bool keepdim) const { return TypeDefault::all(const_cast(*this), dim, keepdim); #else static auto table = globalATenDispatch().getOpTable("aten::all.dim(Tensor self, int dim, bool keepdim=False) -> Tensor"); - return table->getOp(type_set(), is_variable())(const_cast(*this), dim, keepdim); + return table->getOp(type_set())(const_cast(*this), dim, keepdim); #endif } inline bool Tensor::allclose(const Tensor & other, double rtol, double atol, bool equal_nan) const { @@ -254,7 +254,7 @@ inline bool Tensor::allclose(const Tensor & other, double rtol, double atol, boo return TypeDefault::allclose(const_cast(*this), other, rtol, atol, equal_nan); #else static auto table = globalATenDispatch().getOpTable("aten::allclose(Tensor self, Tensor other, float rtol=1e-05, float atol=1e-08, bool equal_nan=False) -> bool"); - return table->getOp(type_set(), is_variable())(const_cast(*this), other, rtol, atol, equal_nan); + return table->getOp(type_set())(const_cast(*this), other, rtol, atol, equal_nan); #endif } inline Tensor Tensor::any(int64_t dim, bool keepdim) const { @@ -262,7 +262,7 @@ inline Tensor Tensor::any(int64_t dim, bool keepdim) const { return TypeDefault::any(const_cast(*this), dim, keepdim); #else static auto table = globalATenDispatch().getOpTable("aten::any.dim(Tensor self, int dim, bool keepdim=False) -> Tensor"); - return table->getOp(type_set(), is_variable())(const_cast(*this), dim, keepdim); + return table->getOp(type_set())(const_cast(*this), dim, keepdim); #endif } inline Tensor Tensor::argmax(c10::optional dim, bool keepdim) const { @@ -270,7 +270,7 @@ inline Tensor Tensor::argmax(c10::optional dim, bool keepdim) const { return TypeDefault::argmax(const_cast(*this), dim, keepdim); #else static auto table = globalATenDispatch().getOpTable("aten::argmax(Tensor self, int? dim=None, bool keepdim=False) -> Tensor"); - return table->getOp, bool)>(type_set(), is_variable())(const_cast(*this), dim, keepdim); + return table->getOp, bool)>(type_set())(const_cast(*this), dim, keepdim); #endif } inline Tensor Tensor::argmin(c10::optional dim, bool keepdim) const { @@ -278,7 +278,7 @@ inline Tensor Tensor::argmin(c10::optional dim, bool keepdim) const { return TypeDefault::argmin(const_cast(*this), dim, keepdim); #else static auto table = globalATenDispatch().getOpTable("aten::argmin(Tensor self, int? dim=None, bool keepdim=False) -> Tensor"); - return table->getOp, bool)>(type_set(), is_variable())(const_cast(*this), dim, keepdim); + return table->getOp, bool)>(type_set())(const_cast(*this), dim, keepdim); #endif } inline Tensor Tensor::as_strided(IntArrayRef size, IntArrayRef stride, c10::optional storage_offset) const { @@ -295,7 +295,7 @@ inline Tensor Tensor::as_strided(IntArrayRef size, IntArrayRef stride, c10::opti } #else static auto table = globalATenDispatch().getOpTable("aten::as_strided(Tensor(a) self, int[] size, int[] stride, int? storage_offset=None) -> Tensor(a)"); - return table->getOp)>(type_set(), is_variable())(const_cast(*this), size, stride, storage_offset); + return table->getOp)>(type_set())(const_cast(*this), size, stride, storage_offset); #endif } inline Tensor & Tensor::as_strided_(IntArrayRef size, IntArrayRef stride, c10::optional storage_offset) const { @@ -303,7 +303,7 @@ inline Tensor & Tensor::as_strided_(IntArrayRef size, IntArrayRef stride, c10::o return TypeDefault::as_strided_(const_cast(*this), size, stride, storage_offset); #else static auto table = globalATenDispatch().getOpTable("aten::as_strided_(Tensor(a!) self, int[] size, int[] stride, int? storage_offset=None) -> Tensor(a!)"); - return table->getOp)>(type_set(), is_variable())(const_cast(*this), size, stride, storage_offset); + return table->getOp)>(type_set())(const_cast(*this), size, stride, storage_offset); #endif } inline Tensor Tensor::asin() const { @@ -311,7 +311,7 @@ inline Tensor Tensor::asin() const { return TypeDefault::asin(const_cast(*this)); #else static auto table = globalATenDispatch().getOpTable("aten::asin(Tensor self) -> Tensor"); - return table->getOp(type_set(), is_variable())(const_cast(*this)); + return table->getOp(type_set())(const_cast(*this)); #endif } inline Tensor & Tensor::asin_() const { @@ -325,7 +325,7 @@ inline Tensor & Tensor::asin_() const { } #else static auto table = globalATenDispatch().getOpTable("aten::asin_(Tensor(a!) self) -> Tensor(a!)"); - return table->getOp(type_set(), is_variable())(const_cast(*this)); + return table->getOp(type_set())(const_cast(*this)); #endif } inline Tensor Tensor::atan() const { @@ -333,7 +333,7 @@ inline Tensor Tensor::atan() const { return TypeDefault::atan(const_cast(*this)); #else static auto table = globalATenDispatch().getOpTable("aten::atan(Tensor self) -> Tensor"); - return table->getOp(type_set(), is_variable())(const_cast(*this)); + return table->getOp(type_set())(const_cast(*this)); #endif } inline Tensor & Tensor::atan_() const { @@ -347,7 +347,7 @@ inline Tensor & Tensor::atan_() const { } #else static auto table = globalATenDispatch().getOpTable("aten::atan_(Tensor(a!) self) -> Tensor(a!)"); - return table->getOp(type_set(), is_variable())(const_cast(*this)); + return table->getOp(type_set())(const_cast(*this)); #endif } inline Tensor Tensor::baddbmm(const Tensor & batch1, const Tensor & batch2, Scalar beta, Scalar alpha) const { @@ -361,7 +361,7 @@ inline Tensor Tensor::baddbmm(const Tensor & batch1, const Tensor & batch2, Scal } #else static auto table = globalATenDispatch().getOpTable("aten::baddbmm(Tensor self, Tensor batch1, Tensor batch2, *, Scalar beta=1, Scalar alpha=1) -> Tensor"); - return table->getOp(type_set(), is_variable())(const_cast(*this), batch1, batch2, beta, alpha); + return table->getOp(type_set())(const_cast(*this), batch1, batch2, beta, alpha); #endif } inline Tensor & Tensor::baddbmm_(const Tensor & batch1, const Tensor & batch2, Scalar beta, Scalar alpha) const { @@ -375,7 +375,7 @@ inline Tensor & Tensor::baddbmm_(const Tensor & batch1, const Tensor & batch2, S } #else static auto table = globalATenDispatch().getOpTable("aten::baddbmm_(Tensor(a!) self, Tensor batch1, Tensor batch2, *, Scalar beta=1, Scalar alpha=1) -> Tensor(a!)"); - return table->getOp(type_set(), is_variable())(const_cast(*this), batch1, batch2, beta, alpha); + return table->getOp(type_set())(const_cast(*this), batch1, batch2, beta, alpha); #endif } inline Tensor Tensor::bernoulli(Generator * generator) const { @@ -383,7 +383,7 @@ inline Tensor Tensor::bernoulli(Generator * generator) const { return TypeDefault::bernoulli(const_cast(*this), generator); #else static auto table = globalATenDispatch().getOpTable("aten::bernoulli(Tensor self, *, Generator? generator=None) -> Tensor"); - return table->getOp(type_set(), is_variable())(const_cast(*this), generator); + return table->getOp(type_set())(const_cast(*this), generator); #endif } inline Tensor & Tensor::bernoulli_(const Tensor & p, Generator * generator) const { @@ -397,7 +397,7 @@ inline Tensor & Tensor::bernoulli_(const Tensor & p, Generator * generator) cons } #else static auto table = globalATenDispatch().getOpTable("aten::bernoulli_.Tensor(Tensor(a!) self, Tensor p, *, Generator? generator=None) -> Tensor(a!)"); - return table->getOp(type_set(), is_variable())(const_cast(*this), p, generator); + return table->getOp(type_set())(const_cast(*this), p, generator); #endif } inline Tensor & Tensor::bernoulli_(double p, Generator * generator) const { @@ -411,7 +411,7 @@ inline Tensor & Tensor::bernoulli_(double p, Generator * generator) const { } #else static auto table = globalATenDispatch().getOpTable("aten::bernoulli_.float(Tensor(a!) self, float p=0.5, *, Generator? generator=None) -> Tensor(a!)"); - return table->getOp(type_set(), is_variable())(const_cast(*this), p, generator); + return table->getOp(type_set())(const_cast(*this), p, generator); #endif } inline Tensor Tensor::bernoulli(double p, Generator * generator) const { @@ -419,7 +419,7 @@ inline Tensor Tensor::bernoulli(double p, Generator * generator) const { return TypeDefault::bernoulli(const_cast(*this), p, generator); #else static auto table = globalATenDispatch().getOpTable("aten::bernoulli.p(Tensor self, float p, *, Generator? generator=None) -> Tensor"); - return table->getOp(type_set(), is_variable())(const_cast(*this), p, generator); + return table->getOp(type_set())(const_cast(*this), p, generator); #endif } inline Tensor Tensor::bincount(const Tensor & weights, int64_t minlength) const { @@ -433,7 +433,7 @@ inline Tensor Tensor::bincount(const Tensor & weights, int64_t minlength) const } #else static auto table = globalATenDispatch().getOpTable("aten::bincount(Tensor self, Tensor? weights=None, int minlength=0) -> Tensor"); - return table->getOp(type_set(), is_variable())(const_cast(*this), weights, minlength); + return table->getOp(type_set())(const_cast(*this), weights, minlength); #endif } inline Tensor Tensor::bitwise_not() const { @@ -441,7 +441,7 @@ inline Tensor Tensor::bitwise_not() const { return TypeDefault::bitwise_not(const_cast(*this)); #else static auto table = globalATenDispatch().getOpTable("aten::bitwise_not(Tensor self) -> Tensor"); - return table->getOp(type_set(), is_variable())(const_cast(*this)); + return table->getOp(type_set())(const_cast(*this)); #endif } inline Tensor & Tensor::bitwise_not_() const { @@ -449,7 +449,7 @@ inline Tensor & Tensor::bitwise_not_() const { return TypeDefault::bitwise_not_(const_cast(*this)); #else static auto table = globalATenDispatch().getOpTable("aten::bitwise_not_(Tensor(a!) self) -> Tensor(a!)"); - return table->getOp(type_set(), is_variable())(const_cast(*this)); + return table->getOp(type_set())(const_cast(*this)); #endif } inline Tensor Tensor::logical_not() const { @@ -457,7 +457,7 @@ inline Tensor Tensor::logical_not() const { return TypeDefault::logical_not(const_cast(*this)); #else static auto table = globalATenDispatch().getOpTable("aten::logical_not(Tensor self) -> Tensor"); - return table->getOp(type_set(), is_variable())(const_cast(*this)); + return table->getOp(type_set())(const_cast(*this)); #endif } inline Tensor & Tensor::logical_not_() const { @@ -465,7 +465,7 @@ inline Tensor & Tensor::logical_not_() const { return TypeDefault::logical_not_(const_cast(*this)); #else static auto table = globalATenDispatch().getOpTable("aten::logical_not_(Tensor(a!) self) -> Tensor(a!)"); - return table->getOp(type_set(), is_variable())(const_cast(*this)); + return table->getOp(type_set())(const_cast(*this)); #endif } inline Tensor Tensor::logical_xor(const Tensor & other) const { @@ -473,7 +473,7 @@ inline Tensor Tensor::logical_xor(const Tensor & other) const { return TypeDefault::logical_xor(const_cast(*this), other); #else static auto table = globalATenDispatch().getOpTable("aten::logical_xor(Tensor self, Tensor other) -> Tensor"); - return table->getOp(type_set(), is_variable())(const_cast(*this), other); + return table->getOp(type_set())(const_cast(*this), other); #endif } inline Tensor & Tensor::logical_xor_(const Tensor & other) const { @@ -481,7 +481,7 @@ inline Tensor & Tensor::logical_xor_(const Tensor & other) const { return TypeDefault::logical_xor_(const_cast(*this), other); #else static auto table = globalATenDispatch().getOpTable("aten::logical_xor_(Tensor(a!) self, Tensor other) -> Tensor(a!)"); - return table->getOp(type_set(), is_variable())(const_cast(*this), other); + return table->getOp(type_set())(const_cast(*this), other); #endif } inline Tensor Tensor::bmm(const Tensor & mat2) const { @@ -495,7 +495,7 @@ inline Tensor Tensor::bmm(const Tensor & mat2) const { } #else static auto table = globalATenDispatch().getOpTable("aten::bmm(Tensor self, Tensor mat2) -> Tensor"); - return table->getOp(type_set(), is_variable())(const_cast(*this), mat2); + return table->getOp(type_set())(const_cast(*this), mat2); #endif } inline Tensor Tensor::ceil() const { @@ -503,7 +503,7 @@ inline Tensor Tensor::ceil() const { return TypeDefault::ceil(const_cast(*this)); #else static auto table = globalATenDispatch().getOpTable("aten::ceil(Tensor self) -> Tensor"); - return table->getOp(type_set(), is_variable())(const_cast(*this)); + return table->getOp(type_set())(const_cast(*this)); #endif } inline Tensor & Tensor::ceil_() const { @@ -511,7 +511,7 @@ inline Tensor & Tensor::ceil_() const { return TypeDefault::ceil_(const_cast(*this)); #else static auto table = globalATenDispatch().getOpTable("aten::ceil_(Tensor(a!) self) -> Tensor(a!)"); - return table->getOp(type_set(), is_variable())(const_cast(*this)); + return table->getOp(type_set())(const_cast(*this)); #endif } inline std::vector Tensor::chunk(int64_t chunks, int64_t dim) const { @@ -519,7 +519,7 @@ inline std::vector Tensor::chunk(int64_t chunks, int64_t dim) const { return TypeDefault::chunk(const_cast(*this), chunks, dim); #else static auto table = globalATenDispatch().getOpTable("aten::chunk(Tensor(a) self, int chunks, int dim=0) -> Tensor(a)[]"); - return table->getOp (const Tensor &, int64_t, int64_t)>(type_set(), is_variable())(const_cast(*this), chunks, dim); + return table->getOp (const Tensor &, int64_t, int64_t)>(type_set())(const_cast(*this), chunks, dim); #endif } inline Tensor Tensor::clamp(c10::optional min, c10::optional max) const { @@ -527,7 +527,7 @@ inline Tensor Tensor::clamp(c10::optional min, c10::optional max return TypeDefault::clamp(const_cast(*this), min, max); #else static auto table = globalATenDispatch().getOpTable("aten::clamp(Tensor self, Scalar? min=None, Scalar? max=None) -> Tensor"); - return table->getOp, c10::optional)>(type_set(), is_variable())(const_cast(*this), min, max); + return table->getOp, c10::optional)>(type_set())(const_cast(*this), min, max); #endif } inline Tensor & Tensor::clamp_(c10::optional min, c10::optional max) const { @@ -541,7 +541,7 @@ inline Tensor & Tensor::clamp_(c10::optional min, c10::optional } #else static auto table = globalATenDispatch().getOpTable("aten::clamp_(Tensor(a!) self, Scalar? min=None, Scalar? max=None) -> Tensor(a!)"); - return table->getOp, c10::optional)>(type_set(), is_variable())(const_cast(*this), min, max); + return table->getOp, c10::optional)>(type_set())(const_cast(*this), min, max); #endif } inline Tensor Tensor::clamp_max(Scalar max) const { @@ -549,7 +549,7 @@ inline Tensor Tensor::clamp_max(Scalar max) const { return TypeDefault::clamp_max(const_cast(*this), max); #else static auto table = globalATenDispatch().getOpTable("aten::clamp_max(Tensor self, Scalar max) -> Tensor"); - return table->getOp(type_set(), is_variable())(const_cast(*this), max); + return table->getOp(type_set())(const_cast(*this), max); #endif } inline Tensor & Tensor::clamp_max_(Scalar max) const { @@ -563,7 +563,7 @@ inline Tensor & Tensor::clamp_max_(Scalar max) const { } #else static auto table = globalATenDispatch().getOpTable("aten::clamp_max_(Tensor(a!) self, Scalar max) -> Tensor(a!)"); - return table->getOp(type_set(), is_variable())(const_cast(*this), max); + return table->getOp(type_set())(const_cast(*this), max); #endif } inline Tensor Tensor::clamp_min(Scalar min) const { @@ -571,7 +571,7 @@ inline Tensor Tensor::clamp_min(Scalar min) const { return TypeDefault::clamp_min(const_cast(*this), min); #else static auto table = globalATenDispatch().getOpTable("aten::clamp_min(Tensor self, Scalar min) -> Tensor"); - return table->getOp(type_set(), is_variable())(const_cast(*this), min); + return table->getOp(type_set())(const_cast(*this), min); #endif } inline Tensor & Tensor::clamp_min_(Scalar min) const { @@ -585,7 +585,7 @@ inline Tensor & Tensor::clamp_min_(Scalar min) const { } #else static auto table = globalATenDispatch().getOpTable("aten::clamp_min_(Tensor(a!) self, Scalar min) -> Tensor(a!)"); - return table->getOp(type_set(), is_variable())(const_cast(*this), min); + return table->getOp(type_set())(const_cast(*this), min); #endif } inline Tensor Tensor::contiguous(MemoryFormat memory_format) const { @@ -593,7 +593,7 @@ inline Tensor Tensor::contiguous(MemoryFormat memory_format) const { return TypeDefault::contiguous(const_cast(*this), memory_format); #else static auto table = globalATenDispatch().getOpTable("aten::contiguous(Tensor self, *, MemoryFormat memory_format=contiguous_format) -> Tensor"); - return table->getOp(type_set(), is_variable())(const_cast(*this), memory_format); + return table->getOp(type_set())(const_cast(*this), memory_format); #endif } inline Tensor & Tensor::copy_(const Tensor & src, bool non_blocking) const { @@ -601,7 +601,7 @@ inline Tensor & Tensor::copy_(const Tensor & src, bool non_blocking) const { return TypeDefault::copy_(const_cast(*this), src, non_blocking); #else static auto table = globalATenDispatch().getOpTable("aten::copy_(Tensor(a!) self, Tensor src, bool non_blocking=False) -> Tensor(a!)"); - return table->getOp(type_set(), is_variable())(const_cast(*this), src, non_blocking); + return table->getOp(type_set())(const_cast(*this), src, non_blocking); #endif } inline Tensor Tensor::cos() const { @@ -609,7 +609,7 @@ inline Tensor Tensor::cos() const { return TypeDefault::cos(const_cast(*this)); #else static auto table = globalATenDispatch().getOpTable("aten::cos(Tensor self) -> Tensor"); - return table->getOp(type_set(), is_variable())(const_cast(*this)); + return table->getOp(type_set())(const_cast(*this)); #endif } inline Tensor & Tensor::cos_() const { @@ -623,7 +623,7 @@ inline Tensor & Tensor::cos_() const { } #else static auto table = globalATenDispatch().getOpTable("aten::cos_(Tensor(a!) self) -> Tensor(a!)"); - return table->getOp(type_set(), is_variable())(const_cast(*this)); + return table->getOp(type_set())(const_cast(*this)); #endif } inline Tensor Tensor::cosh() const { @@ -631,7 +631,7 @@ inline Tensor Tensor::cosh() const { return TypeDefault::cosh(const_cast(*this)); #else static auto table = globalATenDispatch().getOpTable("aten::cosh(Tensor self) -> Tensor"); - return table->getOp(type_set(), is_variable())(const_cast(*this)); + return table->getOp(type_set())(const_cast(*this)); #endif } inline Tensor & Tensor::cosh_() const { @@ -645,7 +645,7 @@ inline Tensor & Tensor::cosh_() const { } #else static auto table = globalATenDispatch().getOpTable("aten::cosh_(Tensor(a!) self) -> Tensor(a!)"); - return table->getOp(type_set(), is_variable())(const_cast(*this)); + return table->getOp(type_set())(const_cast(*this)); #endif } inline Tensor Tensor::cumsum(int64_t dim, c10::optional dtype) const { @@ -653,7 +653,7 @@ inline Tensor Tensor::cumsum(int64_t dim, c10::optional dtype) const return TypeDefault::cumsum(const_cast(*this), dim, dtype); #else static auto table = globalATenDispatch().getOpTable("aten::cumsum(Tensor self, int dim, *, ScalarType? dtype=None) -> Tensor"); - return table->getOp)>(type_set(), is_variable())(const_cast(*this), dim, dtype); + return table->getOp)>(type_set())(const_cast(*this), dim, dtype); #endif } inline Tensor Tensor::cumprod(int64_t dim, c10::optional dtype) const { @@ -661,7 +661,7 @@ inline Tensor Tensor::cumprod(int64_t dim, c10::optional dtype) cons return TypeDefault::cumprod(const_cast(*this), dim, dtype); #else static auto table = globalATenDispatch().getOpTable("aten::cumprod(Tensor self, int dim, *, ScalarType? dtype=None) -> Tensor"); - return table->getOp)>(type_set(), is_variable())(const_cast(*this), dim, dtype); + return table->getOp)>(type_set())(const_cast(*this), dim, dtype); #endif } inline Tensor Tensor::det() const { @@ -669,7 +669,7 @@ inline Tensor Tensor::det() const { return TypeDefault::det(const_cast(*this)); #else static auto table = globalATenDispatch().getOpTable("aten::det(Tensor self) -> Tensor"); - return table->getOp(type_set(), is_variable())(const_cast(*this)); + return table->getOp(type_set())(const_cast(*this)); #endif } inline Tensor Tensor::diag_embed(int64_t offset, int64_t dim1, int64_t dim2) const { @@ -677,7 +677,7 @@ inline Tensor Tensor::diag_embed(int64_t offset, int64_t dim1, int64_t dim2) con return TypeDefault::diag_embed(const_cast(*this), offset, dim1, dim2); #else static auto table = globalATenDispatch().getOpTable("aten::diag_embed(Tensor self, int offset=0, int dim1=-2, int dim2=-1) -> Tensor"); - return table->getOp(type_set(), is_variable())(const_cast(*this), offset, dim1, dim2); + return table->getOp(type_set())(const_cast(*this), offset, dim1, dim2); #endif } inline Tensor Tensor::diagflat(int64_t offset) const { @@ -685,7 +685,7 @@ inline Tensor Tensor::diagflat(int64_t offset) const { return TypeDefault::diagflat(const_cast(*this), offset); #else static auto table = globalATenDispatch().getOpTable("aten::diagflat(Tensor self, int offset=0) -> Tensor"); - return table->getOp(type_set(), is_variable())(const_cast(*this), offset); + return table->getOp(type_set())(const_cast(*this), offset); #endif } inline Tensor Tensor::diagonal(int64_t offset, int64_t dim1, int64_t dim2) const { @@ -693,7 +693,7 @@ inline Tensor Tensor::diagonal(int64_t offset, int64_t dim1, int64_t dim2) const return TypeDefault::diagonal(const_cast(*this), offset, dim1, dim2); #else static auto table = globalATenDispatch().getOpTable("aten::diagonal(Tensor(a) self, int offset=0, int dim1=0, int dim2=1) -> Tensor(a)"); - return table->getOp(type_set(), is_variable())(const_cast(*this), offset, dim1, dim2); + return table->getOp(type_set())(const_cast(*this), offset, dim1, dim2); #endif } inline Tensor & Tensor::fill_diagonal_(Scalar fill_value, bool wrap) const { @@ -701,7 +701,7 @@ inline Tensor & Tensor::fill_diagonal_(Scalar fill_value, bool wrap) const { return TypeDefault::fill_diagonal_(const_cast(*this), fill_value, wrap); #else static auto table = globalATenDispatch().getOpTable("aten::fill_diagonal_(Tensor(a!) self, Scalar fill_value, bool wrap=False) -> Tensor(a!)"); - return table->getOp(type_set(), is_variable())(const_cast(*this), fill_value, wrap); + return table->getOp(type_set())(const_cast(*this), fill_value, wrap); #endif } inline Tensor Tensor::div(const Tensor & other) const { @@ -709,7 +709,7 @@ inline Tensor Tensor::div(const Tensor & other) const { return TypeDefault::div(const_cast(*this), other); #else static auto table = globalATenDispatch().getOpTable("aten::div.Tensor(Tensor self, Tensor other) -> Tensor"); - return table->getOp(type_set(), is_variable())(const_cast(*this), other); + return table->getOp(type_set())(const_cast(*this), other); #endif } inline Tensor & Tensor::div_(const Tensor & other) const { @@ -717,7 +717,7 @@ inline Tensor & Tensor::div_(const Tensor & other) const { return TypeDefault::div_(const_cast(*this), other); #else static auto table = globalATenDispatch().getOpTable("aten::div_.Tensor(Tensor(a!) self, Tensor other) -> Tensor(a!)"); - return table->getOp(type_set(), is_variable())(const_cast(*this), other); + return table->getOp(type_set())(const_cast(*this), other); #endif } inline Tensor Tensor::div(Scalar other) const { @@ -725,7 +725,7 @@ inline Tensor Tensor::div(Scalar other) const { return TypeDefault::div(const_cast(*this), other); #else static auto table = globalATenDispatch().getOpTable("aten::div.Scalar(Tensor self, Scalar other) -> Tensor"); - return table->getOp(type_set(), is_variable())(const_cast(*this), other); + return table->getOp(type_set())(const_cast(*this), other); #endif } inline Tensor & Tensor::div_(Scalar other) const { @@ -733,7 +733,7 @@ inline Tensor & Tensor::div_(Scalar other) const { return TypeDefault::div_(const_cast(*this), other); #else static auto table = globalATenDispatch().getOpTable("aten::div_.Scalar(Tensor(a!) self, Scalar other) -> Tensor(a!)"); - return table->getOp(type_set(), is_variable())(const_cast(*this), other); + return table->getOp(type_set())(const_cast(*this), other); #endif } inline Tensor Tensor::dot(const Tensor & tensor) const { @@ -747,7 +747,7 @@ inline Tensor Tensor::dot(const Tensor & tensor) const { } #else static auto table = globalATenDispatch().getOpTable("aten::dot(Tensor self, Tensor tensor) -> Tensor"); - return table->getOp(type_set(), is_variable())(const_cast(*this), tensor); + return table->getOp(type_set())(const_cast(*this), tensor); #endif } inline Tensor Tensor::new_empty(IntArrayRef size, const TensorOptions & options) const { @@ -755,7 +755,7 @@ inline Tensor Tensor::new_empty(IntArrayRef size, const TensorOptions & options) return TypeDefault::new_empty(const_cast(*this), size, options); #else static auto table = globalATenDispatch().getOpTable("aten::new_empty(Tensor self, int[] size, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor"); - return table->getOp(type_set(), is_variable())(const_cast(*this), size, options); + return table->getOp(type_set())(const_cast(*this), size, options); #endif } inline Tensor Tensor::new_full(IntArrayRef size, Scalar fill_value, const TensorOptions & options) const { @@ -763,7 +763,7 @@ inline Tensor Tensor::new_full(IntArrayRef size, Scalar fill_value, const Tensor return TypeDefault::new_full(const_cast(*this), size, fill_value, options); #else static auto table = globalATenDispatch().getOpTable("aten::new_full(Tensor self, int[] size, Scalar fill_value, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor"); - return table->getOp(type_set(), is_variable())(const_cast(*this), size, fill_value, options); + return table->getOp(type_set())(const_cast(*this), size, fill_value, options); #endif } inline Tensor & Tensor::resize_(IntArrayRef size) const { @@ -777,7 +777,7 @@ inline Tensor & Tensor::resize_(IntArrayRef size) const { } #else static auto table = globalATenDispatch().getOpTable("aten::resize_(Tensor(a!) self, int[] size) -> Tensor(a!)"); - return table->getOp(type_set(), is_variable())(const_cast(*this), size); + return table->getOp(type_set())(const_cast(*this), size); #endif } inline Tensor Tensor::erf() const { @@ -785,7 +785,7 @@ inline Tensor Tensor::erf() const { return TypeDefault::erf(const_cast(*this)); #else static auto table = globalATenDispatch().getOpTable("aten::erf(Tensor self) -> Tensor"); - return table->getOp(type_set(), is_variable())(const_cast(*this)); + return table->getOp(type_set())(const_cast(*this)); #endif } inline Tensor & Tensor::erf_() const { @@ -799,7 +799,7 @@ inline Tensor & Tensor::erf_() const { } #else static auto table = globalATenDispatch().getOpTable("aten::erf_(Tensor(a!) self) -> Tensor(a!)"); - return table->getOp(type_set(), is_variable())(const_cast(*this)); + return table->getOp(type_set())(const_cast(*this)); #endif } inline Tensor Tensor::erfc() const { @@ -807,7 +807,7 @@ inline Tensor Tensor::erfc() const { return TypeDefault::erfc(const_cast(*this)); #else static auto table = globalATenDispatch().getOpTable("aten::erfc(Tensor self) -> Tensor"); - return table->getOp(type_set(), is_variable())(const_cast(*this)); + return table->getOp(type_set())(const_cast(*this)); #endif } inline Tensor & Tensor::erfc_() const { @@ -821,7 +821,7 @@ inline Tensor & Tensor::erfc_() const { } #else static auto table = globalATenDispatch().getOpTable("aten::erfc_(Tensor(a!) self) -> Tensor(a!)"); - return table->getOp(type_set(), is_variable())(const_cast(*this)); + return table->getOp(type_set())(const_cast(*this)); #endif } inline Tensor Tensor::exp() const { @@ -829,7 +829,7 @@ inline Tensor Tensor::exp() const { return TypeDefault::exp(const_cast(*this)); #else static auto table = globalATenDispatch().getOpTable("aten::exp(Tensor self) -> Tensor"); - return table->getOp(type_set(), is_variable())(const_cast(*this)); + return table->getOp(type_set())(const_cast(*this)); #endif } inline Tensor & Tensor::exp_() const { @@ -843,7 +843,7 @@ inline Tensor & Tensor::exp_() const { } #else static auto table = globalATenDispatch().getOpTable("aten::exp_(Tensor(a!) self) -> Tensor(a!)"); - return table->getOp(type_set(), is_variable())(const_cast(*this)); + return table->getOp(type_set())(const_cast(*this)); #endif } inline Tensor Tensor::expm1() const { @@ -851,7 +851,7 @@ inline Tensor Tensor::expm1() const { return TypeDefault::expm1(const_cast(*this)); #else static auto table = globalATenDispatch().getOpTable("aten::expm1(Tensor self) -> Tensor"); - return table->getOp(type_set(), is_variable())(const_cast(*this)); + return table->getOp(type_set())(const_cast(*this)); #endif } inline Tensor & Tensor::expm1_() const { @@ -865,7 +865,7 @@ inline Tensor & Tensor::expm1_() const { } #else static auto table = globalATenDispatch().getOpTable("aten::expm1_(Tensor(a!) self) -> Tensor(a!)"); - return table->getOp(type_set(), is_variable())(const_cast(*this)); + return table->getOp(type_set())(const_cast(*this)); #endif } inline Tensor Tensor::expand(IntArrayRef size, bool implicit) const { @@ -873,7 +873,7 @@ inline Tensor Tensor::expand(IntArrayRef size, bool implicit) const { return TypeDefault::expand(const_cast(*this), size, implicit); #else static auto table = globalATenDispatch().getOpTable("aten::expand(Tensor(a) self, int[] size, *, bool implicit=False) -> Tensor(a)"); - return table->getOp(type_set(), is_variable())(const_cast(*this), size, implicit); + return table->getOp(type_set())(const_cast(*this), size, implicit); #endif } inline Tensor Tensor::expand_as(const Tensor & other) const { @@ -881,7 +881,7 @@ inline Tensor Tensor::expand_as(const Tensor & other) const { return TypeDefault::expand_as(const_cast(*this), other); #else static auto table = globalATenDispatch().getOpTable("aten::expand_as(Tensor self, Tensor other) -> Tensor"); - return table->getOp(type_set(), is_variable())(const_cast(*this), other); + return table->getOp(type_set())(const_cast(*this), other); #endif } inline Tensor Tensor::flatten(int64_t start_dim, int64_t end_dim) const { @@ -889,7 +889,7 @@ inline Tensor Tensor::flatten(int64_t start_dim, int64_t end_dim) const { return TypeDefault::flatten(const_cast(*this), start_dim, end_dim); #else static auto table = globalATenDispatch().getOpTable("aten::flatten(Tensor self, int start_dim=0, int end_dim=-1) -> Tensor"); - return table->getOp(type_set(), is_variable())(const_cast(*this), start_dim, end_dim); + return table->getOp(type_set())(const_cast(*this), start_dim, end_dim); #endif } #ifdef BUILD_NAMEDTENSOR @@ -898,7 +898,7 @@ inline Tensor Tensor::flatten(int64_t start_dim, int64_t end_dim, Dimname out_di return TypeDefault::flatten(const_cast(*this), start_dim, end_dim, out_dim); #else static auto table = globalATenDispatch().getOpTable("aten::flatten(Tensor self, int start_dim, int end_dim, Dimname out_dim) -> Tensor"); - return table->getOp(type_set(), is_variable())(const_cast(*this), start_dim, end_dim, out_dim); + return table->getOp(type_set())(const_cast(*this), start_dim, end_dim, out_dim); #endif } #endif @@ -908,7 +908,7 @@ inline Tensor Tensor::flatten(Dimname start_dim, Dimname end_dim, Dimname out_di return TypeDefault::flatten(const_cast(*this), start_dim, end_dim, out_dim); #else static auto table = globalATenDispatch().getOpTable("aten::flatten(Tensor self, Dimname start_dim, Dimname end_dim, Dimname out_dim) -> Tensor"); - return table->getOp(type_set(), is_variable())(const_cast(*this), start_dim, end_dim, out_dim); + return table->getOp(type_set())(const_cast(*this), start_dim, end_dim, out_dim); #endif } #endif @@ -918,7 +918,7 @@ inline Tensor Tensor::flatten(DimnameList dims, Dimname out_dim) const { return TypeDefault::flatten(const_cast(*this), dims, out_dim); #else static auto table = globalATenDispatch().getOpTable("aten::flatten(Tensor self, DimnameList dims, Dimname out_dim) -> Tensor"); - return table->getOp(type_set(), is_variable())(const_cast(*this), dims, out_dim); + return table->getOp(type_set())(const_cast(*this), dims, out_dim); #endif } #endif @@ -927,7 +927,7 @@ inline Tensor & Tensor::fill_(Scalar value) const { return TypeDefault::fill_(const_cast(*this), value); #else static auto table = globalATenDispatch().getOpTable("aten::fill_.Scalar(Tensor(a!) self, Scalar value) -> Tensor(a!)"); - return table->getOp(type_set(), is_variable())(const_cast(*this), value); + return table->getOp(type_set())(const_cast(*this), value); #endif } inline Tensor & Tensor::fill_(const Tensor & value) const { @@ -935,7 +935,7 @@ inline Tensor & Tensor::fill_(const Tensor & value) const { return TypeDefault::fill_(const_cast(*this), value); #else static auto table = globalATenDispatch().getOpTable("aten::fill_.Tensor(Tensor(a!) self, Tensor value) -> Tensor(a!)"); - return table->getOp(type_set(), is_variable())(const_cast(*this), value); + return table->getOp(type_set())(const_cast(*this), value); #endif } inline Tensor Tensor::floor() const { @@ -943,7 +943,7 @@ inline Tensor Tensor::floor() const { return TypeDefault::floor(const_cast(*this)); #else static auto table = globalATenDispatch().getOpTable("aten::floor(Tensor self) -> Tensor"); - return table->getOp(type_set(), is_variable())(const_cast(*this)); + return table->getOp(type_set())(const_cast(*this)); #endif } inline Tensor & Tensor::floor_() const { @@ -957,7 +957,7 @@ inline Tensor & Tensor::floor_() const { } #else static auto table = globalATenDispatch().getOpTable("aten::floor_(Tensor(a!) self) -> Tensor(a!)"); - return table->getOp(type_set(), is_variable())(const_cast(*this)); + return table->getOp(type_set())(const_cast(*this)); #endif } inline Tensor Tensor::frac() const { @@ -965,7 +965,7 @@ inline Tensor Tensor::frac() const { return TypeDefault::frac(const_cast(*this)); #else static auto table = globalATenDispatch().getOpTable("aten::frac(Tensor self) -> Tensor"); - return table->getOp(type_set(), is_variable())(const_cast(*this)); + return table->getOp(type_set())(const_cast(*this)); #endif } inline Tensor & Tensor::frac_() const { @@ -979,7 +979,7 @@ inline Tensor & Tensor::frac_() const { } #else static auto table = globalATenDispatch().getOpTable("aten::frac_(Tensor(a!) self) -> Tensor(a!)"); - return table->getOp(type_set(), is_variable())(const_cast(*this)); + return table->getOp(type_set())(const_cast(*this)); #endif } inline Tensor Tensor::ger(const Tensor & vec2) const { @@ -993,7 +993,7 @@ inline Tensor Tensor::ger(const Tensor & vec2) const { } #else static auto table = globalATenDispatch().getOpTable("aten::ger(Tensor self, Tensor vec2) -> Tensor"); - return table->getOp(type_set(), is_variable())(const_cast(*this), vec2); + return table->getOp(type_set())(const_cast(*this), vec2); #endif } inline Tensor Tensor::fft(int64_t signal_ndim, bool normalized) const { @@ -1001,7 +1001,7 @@ inline Tensor Tensor::fft(int64_t signal_ndim, bool normalized) const { return TypeDefault::fft(const_cast(*this), signal_ndim, normalized); #else static auto table = globalATenDispatch().getOpTable("aten::fft(Tensor self, int signal_ndim, bool normalized=False) -> Tensor"); - return table->getOp(type_set(), is_variable())(const_cast(*this), signal_ndim, normalized); + return table->getOp(type_set())(const_cast(*this), signal_ndim, normalized); #endif } inline Tensor Tensor::ifft(int64_t signal_ndim, bool normalized) const { @@ -1009,7 +1009,7 @@ inline Tensor Tensor::ifft(int64_t signal_ndim, bool normalized) const { return TypeDefault::ifft(const_cast(*this), signal_ndim, normalized); #else static auto table = globalATenDispatch().getOpTable("aten::ifft(Tensor self, int signal_ndim, bool normalized=False) -> Tensor"); - return table->getOp(type_set(), is_variable())(const_cast(*this), signal_ndim, normalized); + return table->getOp(type_set())(const_cast(*this), signal_ndim, normalized); #endif } inline Tensor Tensor::rfft(int64_t signal_ndim, bool normalized, bool onesided) const { @@ -1017,7 +1017,7 @@ inline Tensor Tensor::rfft(int64_t signal_ndim, bool normalized, bool onesided) return TypeDefault::rfft(const_cast(*this), signal_ndim, normalized, onesided); #else static auto table = globalATenDispatch().getOpTable("aten::rfft(Tensor self, int signal_ndim, bool normalized=False, bool onesided=True) -> Tensor"); - return table->getOp(type_set(), is_variable())(const_cast(*this), signal_ndim, normalized, onesided); + return table->getOp(type_set())(const_cast(*this), signal_ndim, normalized, onesided); #endif } inline Tensor Tensor::irfft(int64_t signal_ndim, bool normalized, bool onesided, IntArrayRef signal_sizes) const { @@ -1025,7 +1025,7 @@ inline Tensor Tensor::irfft(int64_t signal_ndim, bool normalized, bool onesided, return TypeDefault::irfft(const_cast(*this), signal_ndim, normalized, onesided, signal_sizes); #else static auto table = globalATenDispatch().getOpTable("aten::irfft(Tensor self, int signal_ndim, bool normalized=False, bool onesided=True, int[] signal_sizes=[]) -> Tensor"); - return table->getOp(type_set(), is_variable())(const_cast(*this), signal_ndim, normalized, onesided, signal_sizes); + return table->getOp(type_set())(const_cast(*this), signal_ndim, normalized, onesided, signal_sizes); #endif } inline Tensor Tensor::index(TensorList indices) const { @@ -1033,7 +1033,7 @@ inline Tensor Tensor::index(TensorList indices) const { return TypeDefault::index(const_cast(*this), indices); #else static auto table = globalATenDispatch().getOpTable("aten::index(Tensor self, Tensor?[] indices) -> Tensor"); - return table->getOp(type_set(), is_variable())(const_cast(*this), indices); + return table->getOp(type_set())(const_cast(*this), indices); #endif } inline Tensor & Tensor::index_copy_(int64_t dim, const Tensor & index, const Tensor & source) const { @@ -1041,7 +1041,7 @@ inline Tensor & Tensor::index_copy_(int64_t dim, const Tensor & index, const Ten return TypeDefault::index_copy_(const_cast(*this), dim, index, source); #else static auto table = globalATenDispatch().getOpTable("aten::index_copy_(Tensor(a!) self, int dim, Tensor index, Tensor source) -> Tensor(a!)"); - return table->getOp(type_set(), is_variable())(const_cast(*this), dim, index, source); + return table->getOp(type_set())(const_cast(*this), dim, index, source); #endif } inline Tensor Tensor::index_copy(int64_t dim, const Tensor & index, const Tensor & source) const { @@ -1049,7 +1049,7 @@ inline Tensor Tensor::index_copy(int64_t dim, const Tensor & index, const Tensor return TypeDefault::index_copy(const_cast(*this), dim, index, source); #else static auto table = globalATenDispatch().getOpTable("aten::index_copy(Tensor self, int dim, Tensor index, Tensor source) -> Tensor"); - return table->getOp(type_set(), is_variable())(const_cast(*this), dim, index, source); + return table->getOp(type_set())(const_cast(*this), dim, index, source); #endif } inline Tensor & Tensor::index_put_(TensorList indices, const Tensor & values, bool accumulate) const { @@ -1057,7 +1057,7 @@ inline Tensor & Tensor::index_put_(TensorList indices, const Tensor & values, bo return TypeDefault::index_put_(const_cast(*this), indices, values, accumulate); #else static auto table = globalATenDispatch().getOpTable("aten::index_put_(Tensor(a!) self, Tensor?[] indices, Tensor values, bool accumulate=False) -> Tensor(a!)"); - return table->getOp(type_set(), is_variable())(const_cast(*this), indices, values, accumulate); + return table->getOp(type_set())(const_cast(*this), indices, values, accumulate); #endif } inline Tensor Tensor::index_put(TensorList indices, const Tensor & values, bool accumulate) const { @@ -1065,7 +1065,7 @@ inline Tensor Tensor::index_put(TensorList indices, const Tensor & values, bool return TypeDefault::index_put(const_cast(*this), indices, values, accumulate); #else static auto table = globalATenDispatch().getOpTable("aten::index_put(Tensor self, Tensor?[] indices, Tensor values, bool accumulate=False) -> Tensor"); - return table->getOp(type_set(), is_variable())(const_cast(*this), indices, values, accumulate); + return table->getOp(type_set())(const_cast(*this), indices, values, accumulate); #endif } inline Tensor Tensor::inverse() const { @@ -1073,7 +1073,7 @@ inline Tensor Tensor::inverse() const { return TypeDefault::inverse(const_cast(*this)); #else static auto table = globalATenDispatch().getOpTable("aten::inverse(Tensor self) -> Tensor"); - return table->getOp(type_set(), is_variable())(const_cast(*this)); + return table->getOp(type_set())(const_cast(*this)); #endif } inline Tensor Tensor::isclose(const Tensor & other, double rtol, double atol, bool equal_nan) const { @@ -1081,7 +1081,7 @@ inline Tensor Tensor::isclose(const Tensor & other, double rtol, double atol, bo return TypeDefault::isclose(const_cast(*this), other, rtol, atol, equal_nan); #else static auto table = globalATenDispatch().getOpTable("aten::isclose(Tensor self, Tensor other, float rtol=1e-05, float atol=1e-08, bool equal_nan=False) -> Tensor"); - return table->getOp(type_set(), is_variable())(const_cast(*this), other, rtol, atol, equal_nan); + return table->getOp(type_set())(const_cast(*this), other, rtol, atol, equal_nan); #endif } inline bool Tensor::is_distributed() const { @@ -1089,7 +1089,7 @@ inline bool Tensor::is_distributed() const { return TypeDefault::is_distributed(const_cast(*this)); #else static auto table = globalATenDispatch().getOpTable("aten::is_distributed(Tensor self) -> bool"); - return table->getOp(type_set(), is_variable())(const_cast(*this)); + return table->getOp(type_set())(const_cast(*this)); #endif } inline bool Tensor::is_floating_point() const { @@ -1097,7 +1097,7 @@ inline bool Tensor::is_floating_point() const { return TypeDefault::is_floating_point(const_cast(*this)); #else static auto table = globalATenDispatch().getOpTable("aten::is_floating_point(Tensor self) -> bool"); - return table->getOp(type_set(), is_variable())(const_cast(*this)); + return table->getOp(type_set())(const_cast(*this)); #endif } inline bool Tensor::is_complex() const { @@ -1105,7 +1105,7 @@ inline bool Tensor::is_complex() const { return TypeDefault::is_complex(const_cast(*this)); #else static auto table = globalATenDispatch().getOpTable("aten::is_complex(Tensor self) -> bool"); - return table->getOp(type_set(), is_variable())(const_cast(*this)); + return table->getOp(type_set())(const_cast(*this)); #endif } inline bool Tensor::is_nonzero() const { @@ -1113,7 +1113,7 @@ inline bool Tensor::is_nonzero() const { return TypeDefault::is_nonzero(const_cast(*this)); #else static auto table = globalATenDispatch().getOpTable("aten::is_nonzero(Tensor self) -> bool"); - return table->getOp(type_set(), is_variable())(const_cast(*this)); + return table->getOp(type_set())(const_cast(*this)); #endif } inline bool Tensor::is_same_size(const Tensor & other) const { @@ -1121,7 +1121,7 @@ inline bool Tensor::is_same_size(const Tensor & other) const { return TypeDefault::is_same_size(const_cast(*this), other); #else static auto table = globalATenDispatch().getOpTable("aten::is_same_size(Tensor self, Tensor other) -> bool"); - return table->getOp(type_set(), is_variable())(const_cast(*this), other); + return table->getOp(type_set())(const_cast(*this), other); #endif } inline bool Tensor::is_signed() const { @@ -1129,7 +1129,7 @@ inline bool Tensor::is_signed() const { return TypeDefault::is_signed(const_cast(*this)); #else static auto table = globalATenDispatch().getOpTable("aten::is_signed(Tensor self) -> bool"); - return table->getOp(type_set(), is_variable())(const_cast(*this)); + return table->getOp(type_set())(const_cast(*this)); #endif } inline std::tuple Tensor::kthvalue(int64_t k, int64_t dim, bool keepdim) const { @@ -1137,7 +1137,7 @@ inline std::tuple Tensor::kthvalue(int64_t k, int64_t dim, bool k return TypeDefault::kthvalue(const_cast(*this), k, dim, keepdim); #else static auto table = globalATenDispatch().getOpTable("aten::kthvalue(Tensor self, int k, int dim=-1, bool keepdim=False) -> (Tensor values, Tensor indices)"); - return table->getOp (const Tensor &, int64_t, int64_t, bool)>(type_set(), is_variable())(const_cast(*this), k, dim, keepdim); + return table->getOp (const Tensor &, int64_t, int64_t, bool)>(type_set())(const_cast(*this), k, dim, keepdim); #endif } inline Tensor Tensor::log() const { @@ -1145,7 +1145,7 @@ inline Tensor Tensor::log() const { return TypeDefault::log(const_cast(*this)); #else static auto table = globalATenDispatch().getOpTable("aten::log(Tensor self) -> Tensor"); - return table->getOp(type_set(), is_variable())(const_cast(*this)); + return table->getOp(type_set())(const_cast(*this)); #endif } inline Tensor & Tensor::log_() const { @@ -1159,7 +1159,7 @@ inline Tensor & Tensor::log_() const { } #else static auto table = globalATenDispatch().getOpTable("aten::log_(Tensor(a!) self) -> Tensor(a!)"); - return table->getOp(type_set(), is_variable())(const_cast(*this)); + return table->getOp(type_set())(const_cast(*this)); #endif } inline Tensor Tensor::log10() const { @@ -1167,7 +1167,7 @@ inline Tensor Tensor::log10() const { return TypeDefault::log10(const_cast(*this)); #else static auto table = globalATenDispatch().getOpTable("aten::log10(Tensor self) -> Tensor"); - return table->getOp(type_set(), is_variable())(const_cast(*this)); + return table->getOp(type_set())(const_cast(*this)); #endif } inline Tensor & Tensor::log10_() const { @@ -1181,7 +1181,7 @@ inline Tensor & Tensor::log10_() const { } #else static auto table = globalATenDispatch().getOpTable("aten::log10_(Tensor(a!) self) -> Tensor(a!)"); - return table->getOp(type_set(), is_variable())(const_cast(*this)); + return table->getOp(type_set())(const_cast(*this)); #endif } inline Tensor Tensor::log1p() const { @@ -1189,7 +1189,7 @@ inline Tensor Tensor::log1p() const { return TypeDefault::log1p(const_cast(*this)); #else static auto table = globalATenDispatch().getOpTable("aten::log1p(Tensor self) -> Tensor"); - return table->getOp(type_set(), is_variable())(const_cast(*this)); + return table->getOp(type_set())(const_cast(*this)); #endif } inline Tensor & Tensor::log1p_() const { @@ -1206,7 +1206,7 @@ inline Tensor & Tensor::log1p_() const { } #else static auto table = globalATenDispatch().getOpTable("aten::log1p_(Tensor(a!) self) -> Tensor(a!)"); - return table->getOp(type_set(), is_variable())(const_cast(*this)); + return table->getOp(type_set())(const_cast(*this)); #endif } inline Tensor Tensor::log2() const { @@ -1214,7 +1214,7 @@ inline Tensor Tensor::log2() const { return TypeDefault::log2(const_cast(*this)); #else static auto table = globalATenDispatch().getOpTable("aten::log2(Tensor self) -> Tensor"); - return table->getOp(type_set(), is_variable())(const_cast(*this)); + return table->getOp(type_set())(const_cast(*this)); #endif } inline Tensor & Tensor::log2_() const { @@ -1228,7 +1228,7 @@ inline Tensor & Tensor::log2_() const { } #else static auto table = globalATenDispatch().getOpTable("aten::log2_(Tensor(a!) self) -> Tensor(a!)"); - return table->getOp(type_set(), is_variable())(const_cast(*this)); + return table->getOp(type_set())(const_cast(*this)); #endif } inline Tensor Tensor::logdet() const { @@ -1236,7 +1236,7 @@ inline Tensor Tensor::logdet() const { return TypeDefault::logdet(const_cast(*this)); #else static auto table = globalATenDispatch().getOpTable("aten::logdet(Tensor self) -> Tensor"); - return table->getOp(type_set(), is_variable())(const_cast(*this)); + return table->getOp(type_set())(const_cast(*this)); #endif } inline Tensor Tensor::log_softmax(int64_t dim, c10::optional dtype) const { @@ -1244,7 +1244,7 @@ inline Tensor Tensor::log_softmax(int64_t dim, c10::optional dtype) return TypeDefault::log_softmax(const_cast(*this), dim, dtype); #else static auto table = globalATenDispatch().getOpTable("aten::log_softmax(Tensor self, int dim, ScalarType? dtype=None) -> Tensor"); - return table->getOp)>(type_set(), is_variable())(const_cast(*this), dim, dtype); + return table->getOp)>(type_set())(const_cast(*this), dim, dtype); #endif } #ifdef BUILD_NAMEDTENSOR @@ -1253,7 +1253,7 @@ inline Tensor Tensor::log_softmax(Dimname dim, c10::optional dtype) return TypeDefault::log_softmax(const_cast(*this), dim, dtype); #else static auto table = globalATenDispatch().getOpTable("aten::log_softmax(Tensor self, Dimname dim, *, ScalarType? dtype=None) -> Tensor"); - return table->getOp)>(type_set(), is_variable())(const_cast(*this), dim, dtype); + return table->getOp)>(type_set())(const_cast(*this), dim, dtype); #endif } #endif @@ -1262,7 +1262,7 @@ inline Tensor Tensor::logsumexp(IntArrayRef dim, bool keepdim) const { return TypeDefault::logsumexp(const_cast(*this), dim, keepdim); #else static auto table = globalATenDispatch().getOpTable("aten::logsumexp(Tensor self, int[1] dim, bool keepdim=False) -> Tensor"); - return table->getOp(type_set(), is_variable())(const_cast(*this), dim, keepdim); + return table->getOp(type_set())(const_cast(*this), dim, keepdim); #endif } #ifdef BUILD_NAMEDTENSOR @@ -1271,7 +1271,7 @@ inline Tensor Tensor::logsumexp(DimnameList dim, bool keepdim) const { return TypeDefault::logsumexp(const_cast(*this), dim, keepdim); #else static auto table = globalATenDispatch().getOpTable("aten::logsumexp.names(Tensor self, Dimname[1] dim, bool keepdim=False) -> Tensor"); - return table->getOp(type_set(), is_variable())(const_cast(*this), dim, keepdim); + return table->getOp(type_set())(const_cast(*this), dim, keepdim); #endif } #endif @@ -1280,7 +1280,7 @@ inline Tensor Tensor::matmul(const Tensor & other) const { return TypeDefault::matmul(const_cast(*this), other); #else static auto table = globalATenDispatch().getOpTable("aten::matmul(Tensor self, Tensor other) -> Tensor"); - return table->getOp(type_set(), is_variable())(const_cast(*this), other); + return table->getOp(type_set())(const_cast(*this), other); #endif } inline Tensor Tensor::matrix_power(int64_t n) const { @@ -1288,7 +1288,7 @@ inline Tensor Tensor::matrix_power(int64_t n) const { return TypeDefault::matrix_power(const_cast(*this), n); #else static auto table = globalATenDispatch().getOpTable("aten::matrix_power(Tensor self, int n) -> Tensor"); - return table->getOp(type_set(), is_variable())(const_cast(*this), n); + return table->getOp(type_set())(const_cast(*this), n); #endif } inline std::tuple Tensor::max(int64_t dim, bool keepdim) const { @@ -1296,7 +1296,7 @@ inline std::tuple Tensor::max(int64_t dim, bool keepdim) const { return TypeDefault::max(const_cast(*this), dim, keepdim); #else static auto table = globalATenDispatch().getOpTable("aten::max.dim(Tensor self, int dim, bool keepdim=False) -> (Tensor values, Tensor indices)"); - return table->getOp (const Tensor &, int64_t, bool)>(type_set(), is_variable())(const_cast(*this), dim, keepdim); + return table->getOp (const Tensor &, int64_t, bool)>(type_set())(const_cast(*this), dim, keepdim); #endif } inline Tensor Tensor::max_values(IntArrayRef dim, bool keepdim) const { @@ -1304,7 +1304,7 @@ inline Tensor Tensor::max_values(IntArrayRef dim, bool keepdim) const { return TypeDefault::max_values(const_cast(*this), dim, keepdim); #else static auto table = globalATenDispatch().getOpTable("aten::max_values(Tensor self, int[1] dim, bool keepdim=False) -> Tensor"); - return table->getOp(type_set(), is_variable())(const_cast(*this), dim, keepdim); + return table->getOp(type_set())(const_cast(*this), dim, keepdim); #endif } #ifdef BUILD_NAMEDTENSOR @@ -1313,7 +1313,7 @@ inline std::tuple Tensor::max(Dimname dim, bool keepdim) const { return TypeDefault::max(const_cast(*this), dim, keepdim); #else static auto table = globalATenDispatch().getOpTable("aten::max.names_dim(Tensor self, Dimname dim, bool keepdim=False) -> (Tensor values, Tensor indices)"); - return table->getOp (const Tensor &, Dimname, bool)>(type_set(), is_variable())(const_cast(*this), dim, keepdim); + return table->getOp (const Tensor &, Dimname, bool)>(type_set())(const_cast(*this), dim, keepdim); #endif } #endif @@ -1323,7 +1323,7 @@ inline Tensor Tensor::max_values(DimnameList dim, bool keepdim) const { return TypeDefault::max_values(const_cast(*this), dim, keepdim); #else static auto table = globalATenDispatch().getOpTable("aten::max_values.names(Tensor self, Dimname[1] dim, bool keepdim=False) -> Tensor"); - return table->getOp(type_set(), is_variable())(const_cast(*this), dim, keepdim); + return table->getOp(type_set())(const_cast(*this), dim, keepdim); #endif } #endif @@ -1332,7 +1332,7 @@ inline Tensor Tensor::mean(c10::optional dtype) const { return TypeDefault::mean(const_cast(*this), dtype); #else static auto table = globalATenDispatch().getOpTable("aten::mean(Tensor self, *, ScalarType? dtype=None) -> Tensor"); - return table->getOp)>(type_set(), is_variable())(const_cast(*this), dtype); + return table->getOp)>(type_set())(const_cast(*this), dtype); #endif } inline Tensor Tensor::mean(IntArrayRef dim, bool keepdim, c10::optional dtype) const { @@ -1340,7 +1340,7 @@ inline Tensor Tensor::mean(IntArrayRef dim, bool keepdim, c10::optional(*this), dim, keepdim, dtype); #else static auto table = globalATenDispatch().getOpTable("aten::mean.dim(Tensor self, int[1] dim, bool keepdim=False, *, ScalarType? dtype=None) -> Tensor"); - return table->getOp)>(type_set(), is_variable())(const_cast(*this), dim, keepdim, dtype); + return table->getOp)>(type_set())(const_cast(*this), dim, keepdim, dtype); #endif } #ifdef BUILD_NAMEDTENSOR @@ -1349,7 +1349,7 @@ inline Tensor Tensor::mean(DimnameList dim, bool keepdim, c10::optional(*this), dim, keepdim, dtype); #else static auto table = globalATenDispatch().getOpTable("aten::mean.names_dim(Tensor self, Dimname[1] dim, bool keepdim=False, *, ScalarType? dtype=None) -> Tensor"); - return table->getOp)>(type_set(), is_variable())(const_cast(*this), dim, keepdim, dtype); + return table->getOp)>(type_set())(const_cast(*this), dim, keepdim, dtype); #endif } #endif @@ -1358,7 +1358,7 @@ inline std::tuple Tensor::median(int64_t dim, bool keepdim) const return TypeDefault::median(const_cast(*this), dim, keepdim); #else static auto table = globalATenDispatch().getOpTable("aten::median.dim(Tensor self, int dim, bool keepdim=False) -> (Tensor values, Tensor indices)"); - return table->getOp (const Tensor &, int64_t, bool)>(type_set(), is_variable())(const_cast(*this), dim, keepdim); + return table->getOp (const Tensor &, int64_t, bool)>(type_set())(const_cast(*this), dim, keepdim); #endif } #ifdef BUILD_NAMEDTENSOR @@ -1367,7 +1367,7 @@ inline std::tuple Tensor::median(Dimname dim, bool keepdim) const return TypeDefault::median(const_cast(*this), dim, keepdim); #else static auto table = globalATenDispatch().getOpTable("aten::median.names_dim(Tensor self, Dimname dim, bool keepdim=False) -> (Tensor values, Tensor indices)"); - return table->getOp (const Tensor &, Dimname, bool)>(type_set(), is_variable())(const_cast(*this), dim, keepdim); + return table->getOp (const Tensor &, Dimname, bool)>(type_set())(const_cast(*this), dim, keepdim); #endif } #endif @@ -1376,7 +1376,7 @@ inline std::tuple Tensor::min(int64_t dim, bool keepdim) const { return TypeDefault::min(const_cast(*this), dim, keepdim); #else static auto table = globalATenDispatch().getOpTable("aten::min.dim(Tensor self, int dim, bool keepdim=False) -> (Tensor values, Tensor indices)"); - return table->getOp (const Tensor &, int64_t, bool)>(type_set(), is_variable())(const_cast(*this), dim, keepdim); + return table->getOp (const Tensor &, int64_t, bool)>(type_set())(const_cast(*this), dim, keepdim); #endif } inline Tensor Tensor::min_values(IntArrayRef dim, bool keepdim) const { @@ -1384,7 +1384,7 @@ inline Tensor Tensor::min_values(IntArrayRef dim, bool keepdim) const { return TypeDefault::min_values(const_cast(*this), dim, keepdim); #else static auto table = globalATenDispatch().getOpTable("aten::min_values(Tensor self, int[1] dim, bool keepdim=False) -> Tensor"); - return table->getOp(type_set(), is_variable())(const_cast(*this), dim, keepdim); + return table->getOp(type_set())(const_cast(*this), dim, keepdim); #endif } #ifdef BUILD_NAMEDTENSOR @@ -1393,7 +1393,7 @@ inline std::tuple Tensor::min(Dimname dim, bool keepdim) const { return TypeDefault::min(const_cast(*this), dim, keepdim); #else static auto table = globalATenDispatch().getOpTable("aten::min.names_dim(Tensor self, Dimname dim, bool keepdim=False) -> (Tensor values, Tensor indices)"); - return table->getOp (const Tensor &, Dimname, bool)>(type_set(), is_variable())(const_cast(*this), dim, keepdim); + return table->getOp (const Tensor &, Dimname, bool)>(type_set())(const_cast(*this), dim, keepdim); #endif } #endif @@ -1403,7 +1403,7 @@ inline Tensor Tensor::min_values(DimnameList dim, bool keepdim) const { return TypeDefault::min_values(const_cast(*this), dim, keepdim); #else static auto table = globalATenDispatch().getOpTable("aten::min_values.names(Tensor self, Dimname[1] dim, bool keepdim=False) -> Tensor"); - return table->getOp(type_set(), is_variable())(const_cast(*this), dim, keepdim); + return table->getOp(type_set())(const_cast(*this), dim, keepdim); #endif } #endif @@ -1421,7 +1421,7 @@ inline Tensor Tensor::mm(const Tensor & mat2) const { } #else static auto table = globalATenDispatch().getOpTable("aten::mm(Tensor self, Tensor mat2) -> Tensor"); - return table->getOp(type_set(), is_variable())(const_cast(*this), mat2); + return table->getOp(type_set())(const_cast(*this), mat2); #endif } inline std::tuple Tensor::mode(int64_t dim, bool keepdim) const { @@ -1429,7 +1429,7 @@ inline std::tuple Tensor::mode(int64_t dim, bool keepdim) const { return TypeDefault::mode(const_cast(*this), dim, keepdim); #else static auto table = globalATenDispatch().getOpTable("aten::mode(Tensor self, int dim=-1, bool keepdim=False) -> (Tensor values, Tensor indices)"); - return table->getOp (const Tensor &, int64_t, bool)>(type_set(), is_variable())(const_cast(*this), dim, keepdim); + return table->getOp (const Tensor &, int64_t, bool)>(type_set())(const_cast(*this), dim, keepdim); #endif } inline Tensor Tensor::mul(const Tensor & other) const { @@ -1446,7 +1446,7 @@ inline Tensor Tensor::mul(const Tensor & other) const { } #else static auto table = globalATenDispatch().getOpTable("aten::mul.Tensor(Tensor self, Tensor other) -> Tensor"); - return table->getOp(type_set(), is_variable())(const_cast(*this), other); + return table->getOp(type_set())(const_cast(*this), other); #endif } inline Tensor & Tensor::mul_(const Tensor & other) const { @@ -1463,7 +1463,7 @@ inline Tensor & Tensor::mul_(const Tensor & other) const { } #else static auto table = globalATenDispatch().getOpTable("aten::mul_.Tensor(Tensor(a!) self, Tensor other) -> Tensor(a!)"); - return table->getOp(type_set(), is_variable())(const_cast(*this), other); + return table->getOp(type_set())(const_cast(*this), other); #endif } inline Tensor Tensor::mul(Scalar other) const { @@ -1471,7 +1471,7 @@ inline Tensor Tensor::mul(Scalar other) const { return TypeDefault::mul(const_cast(*this), other); #else static auto table = globalATenDispatch().getOpTable("aten::mul.Scalar(Tensor self, Scalar other) -> Tensor"); - return table->getOp(type_set(), is_variable())(const_cast(*this), other); + return table->getOp(type_set())(const_cast(*this), other); #endif } inline Tensor & Tensor::mul_(Scalar other) const { @@ -1479,7 +1479,7 @@ inline Tensor & Tensor::mul_(Scalar other) const { return TypeDefault::mul_(const_cast(*this), other); #else static auto table = globalATenDispatch().getOpTable("aten::mul_.Scalar(Tensor(a!) self, Scalar other) -> Tensor(a!)"); - return table->getOp(type_set(), is_variable())(const_cast(*this), other); + return table->getOp(type_set())(const_cast(*this), other); #endif } inline Tensor Tensor::mv(const Tensor & vec) const { @@ -1493,7 +1493,7 @@ inline Tensor Tensor::mv(const Tensor & vec) const { } #else static auto table = globalATenDispatch().getOpTable("aten::mv(Tensor self, Tensor vec) -> Tensor"); - return table->getOp(type_set(), is_variable())(const_cast(*this), vec); + return table->getOp(type_set())(const_cast(*this), vec); #endif } inline Tensor Tensor::mvlgamma(int64_t p) const { @@ -1501,7 +1501,7 @@ inline Tensor Tensor::mvlgamma(int64_t p) const { return TypeDefault::mvlgamma(const_cast(*this), p); #else static auto table = globalATenDispatch().getOpTable("aten::mvlgamma(Tensor self, int p) -> Tensor"); - return table->getOp(type_set(), is_variable())(const_cast(*this), p); + return table->getOp(type_set())(const_cast(*this), p); #endif } inline Tensor & Tensor::mvlgamma_(int64_t p) const { @@ -1509,7 +1509,7 @@ inline Tensor & Tensor::mvlgamma_(int64_t p) const { return TypeDefault::mvlgamma_(const_cast(*this), p); #else static auto table = globalATenDispatch().getOpTable("aten::mvlgamma_(Tensor(a!) self, int p) -> Tensor(a!)"); - return table->getOp(type_set(), is_variable())(const_cast(*this), p); + return table->getOp(type_set())(const_cast(*this), p); #endif } inline Tensor Tensor::narrow_copy(int64_t dim, int64_t start, int64_t length) const { @@ -1526,7 +1526,7 @@ inline Tensor Tensor::narrow_copy(int64_t dim, int64_t start, int64_t length) co } #else static auto table = globalATenDispatch().getOpTable("aten::narrow_copy(Tensor self, int dim, int start, int length) -> Tensor"); - return table->getOp(type_set(), is_variable())(const_cast(*this), dim, start, length); + return table->getOp(type_set())(const_cast(*this), dim, start, length); #endif } inline Tensor Tensor::narrow(int64_t dim, int64_t start, int64_t length) const { @@ -1534,7 +1534,7 @@ inline Tensor Tensor::narrow(int64_t dim, int64_t start, int64_t length) const { return TypeDefault::narrow(const_cast(*this), dim, start, length); #else static auto table = globalATenDispatch().getOpTable("aten::narrow(Tensor(a) self, int dim, int start, int length) -> Tensor(a)"); - return table->getOp(type_set(), is_variable())(const_cast(*this), dim, start, length); + return table->getOp(type_set())(const_cast(*this), dim, start, length); #endif } inline Tensor Tensor::permute(IntArrayRef dims) const { @@ -1542,7 +1542,7 @@ inline Tensor Tensor::permute(IntArrayRef dims) const { return TypeDefault::permute(const_cast(*this), dims); #else static auto table = globalATenDispatch().getOpTable("aten::permute(Tensor(a) self, int[] dims) -> Tensor(a)"); - return table->getOp(type_set(), is_variable())(const_cast(*this), dims); + return table->getOp(type_set())(const_cast(*this), dims); #endif } inline Tensor Tensor::numpy_T() const { @@ -1550,7 +1550,7 @@ inline Tensor Tensor::numpy_T() const { return TypeDefault::numpy_T(const_cast(*this)); #else static auto table = globalATenDispatch().getOpTable("aten::numpy_T(Tensor(a) self) -> Tensor(a)"); - return table->getOp(type_set(), is_variable())(const_cast(*this)); + return table->getOp(type_set())(const_cast(*this)); #endif } inline bool Tensor::is_pinned() const { @@ -1558,7 +1558,7 @@ inline bool Tensor::is_pinned() const { return TypeDefault::is_pinned(const_cast(*this)); #else static auto table = globalATenDispatch().getOpTable("aten::is_pinned(Tensor self) -> bool"); - return table->getOp(type_set(), is_variable())(const_cast(*this)); + return table->getOp(type_set())(const_cast(*this)); #endif } inline Tensor Tensor::pin_memory() const { @@ -1566,7 +1566,7 @@ inline Tensor Tensor::pin_memory() const { return TypeDefault::pin_memory(const_cast(*this)); #else static auto table = globalATenDispatch().getOpTable("aten::pin_memory(Tensor self) -> Tensor"); - return table->getOp(type_set(), is_variable())(const_cast(*this)); + return table->getOp(type_set())(const_cast(*this)); #endif } inline Tensor Tensor::pinverse(double rcond) const { @@ -1574,7 +1574,7 @@ inline Tensor Tensor::pinverse(double rcond) const { return TypeDefault::pinverse(const_cast(*this), rcond); #else static auto table = globalATenDispatch().getOpTable("aten::pinverse(Tensor self, float rcond=1e-15) -> Tensor"); - return table->getOp(type_set(), is_variable())(const_cast(*this), rcond); + return table->getOp(type_set())(const_cast(*this), rcond); #endif } inline Tensor Tensor::reciprocal() const { @@ -1582,7 +1582,7 @@ inline Tensor Tensor::reciprocal() const { return TypeDefault::reciprocal(const_cast(*this)); #else static auto table = globalATenDispatch().getOpTable("aten::reciprocal(Tensor self) -> Tensor"); - return table->getOp(type_set(), is_variable())(const_cast(*this)); + return table->getOp(type_set())(const_cast(*this)); #endif } inline Tensor & Tensor::reciprocal_() const { @@ -1596,7 +1596,7 @@ inline Tensor & Tensor::reciprocal_() const { } #else static auto table = globalATenDispatch().getOpTable("aten::reciprocal_(Tensor(a!) self) -> Tensor(a!)"); - return table->getOp(type_set(), is_variable())(const_cast(*this)); + return table->getOp(type_set())(const_cast(*this)); #endif } inline Tensor Tensor::neg() const { @@ -1604,7 +1604,7 @@ inline Tensor Tensor::neg() const { return TypeDefault::neg(const_cast(*this)); #else static auto table = globalATenDispatch().getOpTable("aten::neg(Tensor self) -> Tensor"); - return table->getOp(type_set(), is_variable())(const_cast(*this)); + return table->getOp(type_set())(const_cast(*this)); #endif } inline Tensor & Tensor::neg_() const { @@ -1612,7 +1612,7 @@ inline Tensor & Tensor::neg_() const { return TypeDefault::neg_(const_cast(*this)); #else static auto table = globalATenDispatch().getOpTable("aten::neg_(Tensor(a!) self) -> Tensor(a!)"); - return table->getOp(type_set(), is_variable())(const_cast(*this)); + return table->getOp(type_set())(const_cast(*this)); #endif } inline Tensor Tensor::repeat(IntArrayRef repeats) const { @@ -1620,7 +1620,7 @@ inline Tensor Tensor::repeat(IntArrayRef repeats) const { return TypeDefault::repeat(const_cast(*this), repeats); #else static auto table = globalATenDispatch().getOpTable("aten::repeat(Tensor self, int[] repeats) -> Tensor"); - return table->getOp(type_set(), is_variable())(const_cast(*this), repeats); + return table->getOp(type_set())(const_cast(*this), repeats); #endif } inline Tensor Tensor::repeat_interleave(const Tensor & repeats, c10::optional dim) const { @@ -1628,7 +1628,7 @@ inline Tensor Tensor::repeat_interleave(const Tensor & repeats, c10::optional(*this), repeats, dim); #else static auto table = globalATenDispatch().getOpTable("aten::repeat_interleave.self_Tensor(Tensor self, Tensor repeats, int? dim=None) -> Tensor"); - return table->getOp)>(type_set(), is_variable())(const_cast(*this), repeats, dim); + return table->getOp)>(type_set())(const_cast(*this), repeats, dim); #endif } inline Tensor Tensor::repeat_interleave(int64_t repeats, c10::optional dim) const { @@ -1636,7 +1636,7 @@ inline Tensor Tensor::repeat_interleave(int64_t repeats, c10::optional return TypeDefault::repeat_interleave(const_cast(*this), repeats, dim); #else static auto table = globalATenDispatch().getOpTable("aten::repeat_interleave.self_int(Tensor self, int repeats, int? dim=None) -> Tensor"); - return table->getOp)>(type_set(), is_variable())(const_cast(*this), repeats, dim); + return table->getOp)>(type_set())(const_cast(*this), repeats, dim); #endif } inline Tensor Tensor::reshape(IntArrayRef shape) const { @@ -1644,7 +1644,7 @@ inline Tensor Tensor::reshape(IntArrayRef shape) const { return TypeDefault::reshape(const_cast(*this), shape); #else static auto table = globalATenDispatch().getOpTable("aten::reshape(Tensor self, int[] shape) -> Tensor"); - return table->getOp(type_set(), is_variable())(const_cast(*this), shape); + return table->getOp(type_set())(const_cast(*this), shape); #endif } inline Tensor Tensor::reshape_as(const Tensor & other) const { @@ -1652,7 +1652,7 @@ inline Tensor Tensor::reshape_as(const Tensor & other) const { return TypeDefault::reshape_as(const_cast(*this), other); #else static auto table = globalATenDispatch().getOpTable("aten::reshape_as(Tensor self, Tensor other) -> Tensor"); - return table->getOp(type_set(), is_variable())(const_cast(*this), other); + return table->getOp(type_set())(const_cast(*this), other); #endif } inline Tensor Tensor::round() const { @@ -1660,7 +1660,7 @@ inline Tensor Tensor::round() const { return TypeDefault::round(const_cast(*this)); #else static auto table = globalATenDispatch().getOpTable("aten::round(Tensor self) -> Tensor"); - return table->getOp(type_set(), is_variable())(const_cast(*this)); + return table->getOp(type_set())(const_cast(*this)); #endif } inline Tensor & Tensor::round_() const { @@ -1674,7 +1674,7 @@ inline Tensor & Tensor::round_() const { } #else static auto table = globalATenDispatch().getOpTable("aten::round_(Tensor(a!) self) -> Tensor(a!)"); - return table->getOp(type_set(), is_variable())(const_cast(*this)); + return table->getOp(type_set())(const_cast(*this)); #endif } inline Tensor Tensor::relu() const { @@ -1691,7 +1691,7 @@ inline Tensor Tensor::relu() const { } #else static auto table = globalATenDispatch().getOpTable("aten::relu(Tensor self) -> Tensor"); - return table->getOp(type_set(), is_variable())(const_cast(*this)); + return table->getOp(type_set())(const_cast(*this)); #endif } inline Tensor & Tensor::relu_() const { @@ -1708,7 +1708,7 @@ inline Tensor & Tensor::relu_() const { } #else static auto table = globalATenDispatch().getOpTable("aten::relu_(Tensor(a!) self) -> Tensor(a!)"); - return table->getOp(type_set(), is_variable())(const_cast(*this)); + return table->getOp(type_set())(const_cast(*this)); #endif } inline Tensor Tensor::prelu(const Tensor & weight) const { @@ -1722,7 +1722,7 @@ inline Tensor Tensor::prelu(const Tensor & weight) const { } #else static auto table = globalATenDispatch().getOpTable("aten::prelu(Tensor self, Tensor weight) -> Tensor"); - return table->getOp(type_set(), is_variable())(const_cast(*this), weight); + return table->getOp(type_set())(const_cast(*this), weight); #endif } inline std::tuple Tensor::prelu_backward(const Tensor & grad_output, const Tensor & weight) const { @@ -1736,7 +1736,7 @@ inline std::tuple Tensor::prelu_backward(const Tensor & grad_outp } #else static auto table = globalATenDispatch().getOpTable("aten::prelu_backward(Tensor grad_output, Tensor self, Tensor weight) -> (Tensor, Tensor)"); - return table->getOp (const Tensor &, const Tensor &, const Tensor &)>(type_set(), is_variable())(grad_output, const_cast(*this), weight); + return table->getOp (const Tensor &, const Tensor &, const Tensor &)>(type_set())(grad_output, const_cast(*this), weight); #endif } inline Tensor Tensor::hardshrink(Scalar lambd) const { @@ -1750,7 +1750,7 @@ inline Tensor Tensor::hardshrink(Scalar lambd) const { } #else static auto table = globalATenDispatch().getOpTable("aten::hardshrink(Tensor self, Scalar lambd=0.5) -> Tensor"); - return table->getOp(type_set(), is_variable())(const_cast(*this), lambd); + return table->getOp(type_set())(const_cast(*this), lambd); #endif } inline Tensor Tensor::hardshrink_backward(const Tensor & grad_out, Scalar lambd) const { @@ -1764,7 +1764,7 @@ inline Tensor Tensor::hardshrink_backward(const Tensor & grad_out, Scalar lambd) } #else static auto table = globalATenDispatch().getOpTable("aten::hardshrink_backward(Tensor grad_out, Tensor self, Scalar lambd) -> Tensor"); - return table->getOp(type_set(), is_variable())(grad_out, const_cast(*this), lambd); + return table->getOp(type_set())(grad_out, const_cast(*this), lambd); #endif } inline Tensor Tensor::rsqrt() const { @@ -1772,7 +1772,7 @@ inline Tensor Tensor::rsqrt() const { return TypeDefault::rsqrt(const_cast(*this)); #else static auto table = globalATenDispatch().getOpTable("aten::rsqrt(Tensor self) -> Tensor"); - return table->getOp(type_set(), is_variable())(const_cast(*this)); + return table->getOp(type_set())(const_cast(*this)); #endif } inline Tensor & Tensor::rsqrt_() const { @@ -1786,7 +1786,7 @@ inline Tensor & Tensor::rsqrt_() const { } #else static auto table = globalATenDispatch().getOpTable("aten::rsqrt_(Tensor(a!) self) -> Tensor(a!)"); - return table->getOp(type_set(), is_variable())(const_cast(*this)); + return table->getOp(type_set())(const_cast(*this)); #endif } #ifdef BUILD_NAMEDTENSOR @@ -1795,7 +1795,7 @@ inline Tensor Tensor::select(Dimname dim, int64_t index) const { return TypeDefault::select(const_cast(*this), dim, index); #else static auto table = globalATenDispatch().getOpTable("aten::select.Dimname(Tensor(a) self, Dimname dim, int index) -> Tensor(a)"); - return table->getOp(type_set(), is_variable())(const_cast(*this), dim, index); + return table->getOp(type_set())(const_cast(*this), dim, index); #endif } #endif @@ -1804,7 +1804,7 @@ inline Tensor Tensor::select(int64_t dim, int64_t index) const { return TypeDefault::select(const_cast(*this), dim, index); #else static auto table = globalATenDispatch().getOpTable("aten::select.int(Tensor(a) self, int dim, int index) -> Tensor(a)"); - return table->getOp(type_set(), is_variable())(const_cast(*this), dim, index); + return table->getOp(type_set())(const_cast(*this), dim, index); #endif } inline Tensor Tensor::sigmoid() const { @@ -1818,7 +1818,7 @@ inline Tensor Tensor::sigmoid() const { } #else static auto table = globalATenDispatch().getOpTable("aten::sigmoid(Tensor self) -> Tensor"); - return table->getOp(type_set(), is_variable())(const_cast(*this)); + return table->getOp(type_set())(const_cast(*this)); #endif } inline Tensor & Tensor::sigmoid_() const { @@ -1832,7 +1832,7 @@ inline Tensor & Tensor::sigmoid_() const { } #else static auto table = globalATenDispatch().getOpTable("aten::sigmoid_(Tensor(a!) self) -> Tensor(a!)"); - return table->getOp(type_set(), is_variable())(const_cast(*this)); + return table->getOp(type_set())(const_cast(*this)); #endif } inline Tensor Tensor::sin() const { @@ -1840,7 +1840,7 @@ inline Tensor Tensor::sin() const { return TypeDefault::sin(const_cast(*this)); #else static auto table = globalATenDispatch().getOpTable("aten::sin(Tensor self) -> Tensor"); - return table->getOp(type_set(), is_variable())(const_cast(*this)); + return table->getOp(type_set())(const_cast(*this)); #endif } inline Tensor & Tensor::sin_() const { @@ -1854,7 +1854,7 @@ inline Tensor & Tensor::sin_() const { } #else static auto table = globalATenDispatch().getOpTable("aten::sin_(Tensor(a!) self) -> Tensor(a!)"); - return table->getOp(type_set(), is_variable())(const_cast(*this)); + return table->getOp(type_set())(const_cast(*this)); #endif } inline Tensor Tensor::sinh() const { @@ -1862,7 +1862,7 @@ inline Tensor Tensor::sinh() const { return TypeDefault::sinh(const_cast(*this)); #else static auto table = globalATenDispatch().getOpTable("aten::sinh(Tensor self) -> Tensor"); - return table->getOp(type_set(), is_variable())(const_cast(*this)); + return table->getOp(type_set())(const_cast(*this)); #endif } inline Tensor & Tensor::sinh_() const { @@ -1876,7 +1876,7 @@ inline Tensor & Tensor::sinh_() const { } #else static auto table = globalATenDispatch().getOpTable("aten::sinh_(Tensor(a!) self) -> Tensor(a!)"); - return table->getOp(type_set(), is_variable())(const_cast(*this)); + return table->getOp(type_set())(const_cast(*this)); #endif } inline Tensor Tensor::detach() const { @@ -1884,7 +1884,7 @@ inline Tensor Tensor::detach() const { return TypeDefault::detach(const_cast(*this)); #else static auto table = globalATenDispatch().getOpTable("aten::detach(Tensor self) -> Tensor"); - return table->getOp(type_set(), is_variable())(const_cast(*this)); + return table->getOp(type_set())(const_cast(*this)); #endif } inline Tensor & Tensor::detach_() const { @@ -1892,7 +1892,7 @@ inline Tensor & Tensor::detach_() const { return TypeDefault::detach_(const_cast(*this)); #else static auto table = globalATenDispatch().getOpTable("aten::detach_(Tensor(a!) self) -> Tensor(a!)"); - return table->getOp(type_set(), is_variable())(const_cast(*this)); + return table->getOp(type_set())(const_cast(*this)); #endif } inline int64_t Tensor::size(int64_t dim) const { @@ -1900,7 +1900,7 @@ inline int64_t Tensor::size(int64_t dim) const { return TypeDefault::size(const_cast(*this), dim); #else static auto table = globalATenDispatch().getOpTable("aten::size.int(Tensor self, int dim) -> int"); - return table->getOp(type_set(), is_variable())(const_cast(*this), dim); + return table->getOp(type_set())(const_cast(*this), dim); #endif } #ifdef BUILD_NAMEDTENSOR @@ -1909,7 +1909,7 @@ inline int64_t Tensor::size(Dimname dim) const { return TypeDefault::size(const_cast(*this), dim); #else static auto table = globalATenDispatch().getOpTable("aten::size.Dimname(Tensor self, Dimname dim) -> int"); - return table->getOp(type_set(), is_variable())(const_cast(*this), dim); + return table->getOp(type_set())(const_cast(*this), dim); #endif } #endif @@ -1918,7 +1918,7 @@ inline Tensor Tensor::slice(int64_t dim, int64_t start, int64_t end, int64_t ste return TypeDefault::slice(const_cast(*this), dim, start, end, step); #else static auto table = globalATenDispatch().getOpTable("aten::slice.Tensor(Tensor(a) self, int dim=0, int start=0, int end=9223372036854775807, int step=1) -> Tensor(a)"); - return table->getOp(type_set(), is_variable())(const_cast(*this), dim, start, end, step); + return table->getOp(type_set())(const_cast(*this), dim, start, end, step); #endif } inline std::tuple Tensor::slogdet() const { @@ -1926,7 +1926,7 @@ inline std::tuple Tensor::slogdet() const { return TypeDefault::slogdet(const_cast(*this)); #else static auto table = globalATenDispatch().getOpTable("aten::slogdet(Tensor self) -> (Tensor sign, Tensor logabsdet)"); - return table->getOp (const Tensor &)>(type_set(), is_variable())(const_cast(*this)); + return table->getOp (const Tensor &)>(type_set())(const_cast(*this)); #endif } inline Tensor Tensor::smm(const Tensor & mat2) const { @@ -1934,7 +1934,7 @@ inline Tensor Tensor::smm(const Tensor & mat2) const { return TypeDefault::smm(const_cast(*this), mat2); #else static auto table = globalATenDispatch().getOpTable("aten::smm(Tensor self, Tensor mat2) -> Tensor"); - return table->getOp(type_set(), is_variable())(const_cast(*this), mat2); + return table->getOp(type_set())(const_cast(*this), mat2); #endif } inline Tensor Tensor::softmax(int64_t dim, c10::optional dtype) const { @@ -1942,7 +1942,7 @@ inline Tensor Tensor::softmax(int64_t dim, c10::optional dtype) cons return TypeDefault::softmax(const_cast(*this), dim, dtype); #else static auto table = globalATenDispatch().getOpTable("aten::softmax(Tensor self, int dim, ScalarType? dtype=None) -> Tensor"); - return table->getOp)>(type_set(), is_variable())(const_cast(*this), dim, dtype); + return table->getOp)>(type_set())(const_cast(*this), dim, dtype); #endif } #ifdef BUILD_NAMEDTENSOR @@ -1951,7 +1951,7 @@ inline Tensor Tensor::softmax(Dimname dim, c10::optional dtype) cons return TypeDefault::softmax(const_cast(*this), dim, dtype); #else static auto table = globalATenDispatch().getOpTable("aten::softmax(Tensor self, Dimname dim, *, ScalarType? dtype=None) -> Tensor"); - return table->getOp)>(type_set(), is_variable())(const_cast(*this), dim, dtype); + return table->getOp)>(type_set())(const_cast(*this), dim, dtype); #endif } #endif @@ -1960,7 +1960,7 @@ inline std::vector Tensor::split(int64_t split_size, int64_t dim) const return TypeDefault::split(const_cast(*this), split_size, dim); #else static auto table = globalATenDispatch().getOpTable("aten::split.Tensor(Tensor(a) self, int split_size, int dim=0) -> Tensor(a)[]"); - return table->getOp (const Tensor &, int64_t, int64_t)>(type_set(), is_variable())(const_cast(*this), split_size, dim); + return table->getOp (const Tensor &, int64_t, int64_t)>(type_set())(const_cast(*this), split_size, dim); #endif } inline std::vector Tensor::split_with_sizes(IntArrayRef split_sizes, int64_t dim) const { @@ -1968,7 +1968,7 @@ inline std::vector Tensor::split_with_sizes(IntArrayRef split_sizes, int return TypeDefault::split_with_sizes(const_cast(*this), split_sizes, dim); #else static auto table = globalATenDispatch().getOpTable("aten::split_with_sizes(Tensor self, int[] split_sizes, int dim=0) -> Tensor[]"); - return table->getOp (const Tensor &, IntArrayRef, int64_t)>(type_set(), is_variable())(const_cast(*this), split_sizes, dim); + return table->getOp (const Tensor &, IntArrayRef, int64_t)>(type_set())(const_cast(*this), split_sizes, dim); #endif } inline Tensor Tensor::squeeze() const { @@ -1976,7 +1976,7 @@ inline Tensor Tensor::squeeze() const { return TypeDefault::squeeze(const_cast(*this)); #else static auto table = globalATenDispatch().getOpTable("aten::squeeze(Tensor(a) self) -> Tensor(a)"); - return table->getOp(type_set(), is_variable())(const_cast(*this)); + return table->getOp(type_set())(const_cast(*this)); #endif } inline Tensor Tensor::squeeze(int64_t dim) const { @@ -1984,7 +1984,7 @@ inline Tensor Tensor::squeeze(int64_t dim) const { return TypeDefault::squeeze(const_cast(*this), dim); #else static auto table = globalATenDispatch().getOpTable("aten::squeeze.dim(Tensor(a) self, int dim) -> Tensor(a)"); - return table->getOp(type_set(), is_variable())(const_cast(*this), dim); + return table->getOp(type_set())(const_cast(*this), dim); #endif } inline Tensor & Tensor::squeeze_() const { @@ -1992,7 +1992,7 @@ inline Tensor & Tensor::squeeze_() const { return TypeDefault::squeeze_(const_cast(*this)); #else static auto table = globalATenDispatch().getOpTable("aten::squeeze_(Tensor(a!) self) -> Tensor(a!)"); - return table->getOp(type_set(), is_variable())(const_cast(*this)); + return table->getOp(type_set())(const_cast(*this)); #endif } inline Tensor & Tensor::squeeze_(int64_t dim) const { @@ -2000,7 +2000,7 @@ inline Tensor & Tensor::squeeze_(int64_t dim) const { return TypeDefault::squeeze_(const_cast(*this), dim); #else static auto table = globalATenDispatch().getOpTable("aten::squeeze_.dim(Tensor(a!) self, int dim) -> Tensor(a!)"); - return table->getOp(type_set(), is_variable())(const_cast(*this), dim); + return table->getOp(type_set())(const_cast(*this), dim); #endif } inline Tensor Tensor::sspaddmm(const Tensor & mat1, const Tensor & mat2, Scalar beta, Scalar alpha) const { @@ -2008,7 +2008,7 @@ inline Tensor Tensor::sspaddmm(const Tensor & mat1, const Tensor & mat2, Scalar return TypeDefault::sspaddmm(const_cast(*this), mat1, mat2, beta, alpha); #else static auto table = globalATenDispatch().getOpTable("aten::sspaddmm(Tensor self, Tensor mat1, Tensor mat2, *, Scalar beta=1, Scalar alpha=1) -> Tensor"); - return table->getOp(type_set(), is_variable())(const_cast(*this), mat1, mat2, beta, alpha); + return table->getOp(type_set())(const_cast(*this), mat1, mat2, beta, alpha); #endif } inline Tensor Tensor::stft(int64_t n_fft, c10::optional hop_length, c10::optional win_length, const Tensor & window, bool normalized, bool onesided) const { @@ -2016,7 +2016,7 @@ inline Tensor Tensor::stft(int64_t n_fft, c10::optional hop_length, c10 return TypeDefault::stft(const_cast(*this), n_fft, hop_length, win_length, window, normalized, onesided); #else static auto table = globalATenDispatch().getOpTable("aten::stft(Tensor self, int n_fft, int? hop_length=None, int? win_length=None, Tensor? window=None, bool normalized=False, bool onesided=True) -> Tensor"); - return table->getOp, c10::optional, const Tensor &, bool, bool)>(type_set(), is_variable())(const_cast(*this), n_fft, hop_length, win_length, window, normalized, onesided); + return table->getOp, c10::optional, const Tensor &, bool, bool)>(type_set())(const_cast(*this), n_fft, hop_length, win_length, window, normalized, onesided); #endif } inline int64_t Tensor::stride(int64_t dim) const { @@ -2024,7 +2024,7 @@ inline int64_t Tensor::stride(int64_t dim) const { return TypeDefault::stride(const_cast(*this), dim); #else static auto table = globalATenDispatch().getOpTable("aten::stride.int(Tensor self, int dim) -> int"); - return table->getOp(type_set(), is_variable())(const_cast(*this), dim); + return table->getOp(type_set())(const_cast(*this), dim); #endif } #ifdef BUILD_NAMEDTENSOR @@ -2033,7 +2033,7 @@ inline int64_t Tensor::stride(Dimname dim) const { return TypeDefault::stride(const_cast(*this), dim); #else static auto table = globalATenDispatch().getOpTable("aten::stride.Dimname(Tensor self, Dimname dim) -> int"); - return table->getOp(type_set(), is_variable())(const_cast(*this), dim); + return table->getOp(type_set())(const_cast(*this), dim); #endif } #endif @@ -2042,7 +2042,7 @@ inline Tensor Tensor::sum(c10::optional dtype) const { return TypeDefault::sum(const_cast(*this), dtype); #else static auto table = globalATenDispatch().getOpTable("aten::sum(Tensor self, *, ScalarType? dtype=None) -> Tensor"); - return table->getOp)>(type_set(), is_variable())(const_cast(*this), dtype); + return table->getOp)>(type_set())(const_cast(*this), dtype); #endif } inline Tensor Tensor::sum(IntArrayRef dim, bool keepdim, c10::optional dtype) const { @@ -2050,7 +2050,7 @@ inline Tensor Tensor::sum(IntArrayRef dim, bool keepdim, c10::optional(*this), dim, keepdim, dtype); #else static auto table = globalATenDispatch().getOpTable("aten::sum.dim_IntList(Tensor self, int[1] dim, bool keepdim=False, *, ScalarType? dtype=None) -> Tensor"); - return table->getOp)>(type_set(), is_variable())(const_cast(*this), dim, keepdim, dtype); + return table->getOp)>(type_set())(const_cast(*this), dim, keepdim, dtype); #endif } #ifdef BUILD_NAMEDTENSOR @@ -2059,7 +2059,7 @@ inline Tensor Tensor::sum(DimnameList dim, bool keepdim, c10::optional(*this), dim, keepdim, dtype); #else static auto table = globalATenDispatch().getOpTable("aten::sum.dim_DimnameList(Tensor self, Dimname[1] dim, bool keepdim=False, *, ScalarType? dtype=None) -> Tensor"); - return table->getOp)>(type_set(), is_variable())(const_cast(*this), dim, keepdim, dtype); + return table->getOp)>(type_set())(const_cast(*this), dim, keepdim, dtype); #endif } #endif @@ -2068,7 +2068,7 @@ inline Tensor Tensor::sum_to_size(IntArrayRef size) const { return TypeDefault::sum_to_size(const_cast(*this), size); #else static auto table = globalATenDispatch().getOpTable("aten::sum_to_size(Tensor self, int[] size) -> Tensor"); - return table->getOp(type_set(), is_variable())(const_cast(*this), size); + return table->getOp(type_set())(const_cast(*this), size); #endif } inline Tensor Tensor::sqrt() const { @@ -2076,7 +2076,7 @@ inline Tensor Tensor::sqrt() const { return TypeDefault::sqrt(const_cast(*this)); #else static auto table = globalATenDispatch().getOpTable("aten::sqrt(Tensor self) -> Tensor"); - return table->getOp(type_set(), is_variable())(const_cast(*this)); + return table->getOp(type_set())(const_cast(*this)); #endif } inline Tensor & Tensor::sqrt_() const { @@ -2090,7 +2090,7 @@ inline Tensor & Tensor::sqrt_() const { } #else static auto table = globalATenDispatch().getOpTable("aten::sqrt_(Tensor(a!) self) -> Tensor(a!)"); - return table->getOp(type_set(), is_variable())(const_cast(*this)); + return table->getOp(type_set())(const_cast(*this)); #endif } inline Tensor Tensor::std(bool unbiased) const { @@ -2098,7 +2098,7 @@ inline Tensor Tensor::std(bool unbiased) const { return TypeDefault::std(const_cast(*this), unbiased); #else static auto table = globalATenDispatch().getOpTable("aten::std(Tensor self, bool unbiased=True) -> Tensor"); - return table->getOp(type_set(), is_variable())(const_cast(*this), unbiased); + return table->getOp(type_set())(const_cast(*this), unbiased); #endif } inline Tensor Tensor::std(IntArrayRef dim, bool unbiased, bool keepdim) const { @@ -2106,7 +2106,7 @@ inline Tensor Tensor::std(IntArrayRef dim, bool unbiased, bool keepdim) const { return TypeDefault::std(const_cast(*this), dim, unbiased, keepdim); #else static auto table = globalATenDispatch().getOpTable("aten::std.dim(Tensor self, int[1] dim, bool unbiased=True, bool keepdim=False) -> Tensor"); - return table->getOp(type_set(), is_variable())(const_cast(*this), dim, unbiased, keepdim); + return table->getOp(type_set())(const_cast(*this), dim, unbiased, keepdim); #endif } #ifdef BUILD_NAMEDTENSOR @@ -2115,7 +2115,7 @@ inline Tensor Tensor::std(DimnameList dim, bool unbiased, bool keepdim) const { return TypeDefault::std(const_cast(*this), dim, unbiased, keepdim); #else static auto table = globalATenDispatch().getOpTable("aten::std.names_dim(Tensor self, Dimname[1] dim, bool unbiased=True, bool keepdim=False) -> Tensor"); - return table->getOp(type_set(), is_variable())(const_cast(*this), dim, unbiased, keepdim); + return table->getOp(type_set())(const_cast(*this), dim, unbiased, keepdim); #endif } #endif @@ -2124,7 +2124,7 @@ inline Tensor Tensor::prod(c10::optional dtype) const { return TypeDefault::prod(const_cast(*this), dtype); #else static auto table = globalATenDispatch().getOpTable("aten::prod(Tensor self, *, ScalarType? dtype=None) -> Tensor"); - return table->getOp)>(type_set(), is_variable())(const_cast(*this), dtype); + return table->getOp)>(type_set())(const_cast(*this), dtype); #endif } inline Tensor Tensor::prod(int64_t dim, bool keepdim, c10::optional dtype) const { @@ -2132,7 +2132,7 @@ inline Tensor Tensor::prod(int64_t dim, bool keepdim, c10::optional return TypeDefault::prod(const_cast(*this), dim, keepdim, dtype); #else static auto table = globalATenDispatch().getOpTable("aten::prod.dim_int(Tensor self, int dim, bool keepdim=False, *, ScalarType? dtype=None) -> Tensor"); - return table->getOp)>(type_set(), is_variable())(const_cast(*this), dim, keepdim, dtype); + return table->getOp)>(type_set())(const_cast(*this), dim, keepdim, dtype); #endif } #ifdef BUILD_NAMEDTENSOR @@ -2141,7 +2141,7 @@ inline Tensor Tensor::prod(Dimname dim, bool keepdim, c10::optional return TypeDefault::prod(const_cast(*this), dim, keepdim, dtype); #else static auto table = globalATenDispatch().getOpTable("aten::prod.dim_Dimname(Tensor self, Dimname dim, bool keepdim=False, *, ScalarType? dtype=None) -> Tensor"); - return table->getOp)>(type_set(), is_variable())(const_cast(*this), dim, keepdim, dtype); + return table->getOp)>(type_set())(const_cast(*this), dim, keepdim, dtype); #endif } #endif @@ -2150,7 +2150,7 @@ inline Tensor Tensor::t() const { return TypeDefault::t(const_cast(*this)); #else static auto table = globalATenDispatch().getOpTable("aten::t(Tensor(a) self) -> Tensor(a)"); - return table->getOp(type_set(), is_variable())(const_cast(*this)); + return table->getOp(type_set())(const_cast(*this)); #endif } inline Tensor & Tensor::t_() const { @@ -2158,7 +2158,7 @@ inline Tensor & Tensor::t_() const { return TypeDefault::t_(const_cast(*this)); #else static auto table = globalATenDispatch().getOpTable("aten::t_(Tensor(a!) self) -> Tensor(a!)"); - return table->getOp(type_set(), is_variable())(const_cast(*this)); + return table->getOp(type_set())(const_cast(*this)); #endif } inline Tensor Tensor::tan() const { @@ -2166,7 +2166,7 @@ inline Tensor Tensor::tan() const { return TypeDefault::tan(const_cast(*this)); #else static auto table = globalATenDispatch().getOpTable("aten::tan(Tensor self) -> Tensor"); - return table->getOp(type_set(), is_variable())(const_cast(*this)); + return table->getOp(type_set())(const_cast(*this)); #endif } inline Tensor & Tensor::tan_() const { @@ -2180,7 +2180,7 @@ inline Tensor & Tensor::tan_() const { } #else static auto table = globalATenDispatch().getOpTable("aten::tan_(Tensor(a!) self) -> Tensor(a!)"); - return table->getOp(type_set(), is_variable())(const_cast(*this)); + return table->getOp(type_set())(const_cast(*this)); #endif } inline Tensor Tensor::tanh() const { @@ -2188,7 +2188,7 @@ inline Tensor Tensor::tanh() const { return TypeDefault::tanh(const_cast(*this)); #else static auto table = globalATenDispatch().getOpTable("aten::tanh(Tensor self) -> Tensor"); - return table->getOp(type_set(), is_variable())(const_cast(*this)); + return table->getOp(type_set())(const_cast(*this)); #endif } inline Tensor & Tensor::tanh_() const { @@ -2202,7 +2202,7 @@ inline Tensor & Tensor::tanh_() const { } #else static auto table = globalATenDispatch().getOpTable("aten::tanh_(Tensor(a!) self) -> Tensor(a!)"); - return table->getOp(type_set(), is_variable())(const_cast(*this)); + return table->getOp(type_set())(const_cast(*this)); #endif } inline Tensor Tensor::transpose(int64_t dim0, int64_t dim1) const { @@ -2210,7 +2210,7 @@ inline Tensor Tensor::transpose(int64_t dim0, int64_t dim1) const { return TypeDefault::transpose(const_cast(*this), dim0, dim1); #else static auto table = globalATenDispatch().getOpTable("aten::transpose(Tensor(a) self, int dim0, int dim1) -> Tensor(a)"); - return table->getOp(type_set(), is_variable())(const_cast(*this), dim0, dim1); + return table->getOp(type_set())(const_cast(*this), dim0, dim1); #endif } #ifdef BUILD_NAMEDTENSOR @@ -2219,7 +2219,7 @@ inline Tensor Tensor::transpose(Dimname dim0, Dimname dim1) const { return TypeDefault::transpose(const_cast(*this), dim0, dim1); #else static auto table = globalATenDispatch().getOpTable("aten::transpose(Tensor(a) self, Dimname dim0, Dimname dim1) -> Tensor(a)"); - return table->getOp(type_set(), is_variable())(const_cast(*this), dim0, dim1); + return table->getOp(type_set())(const_cast(*this), dim0, dim1); #endif } #endif @@ -2228,7 +2228,7 @@ inline Tensor & Tensor::transpose_(int64_t dim0, int64_t dim1) const { return TypeDefault::transpose_(const_cast(*this), dim0, dim1); #else static auto table = globalATenDispatch().getOpTable("aten::transpose_(Tensor(a!) self, int dim0, int dim1) -> Tensor(a!)"); - return table->getOp(type_set(), is_variable())(const_cast(*this), dim0, dim1); + return table->getOp(type_set())(const_cast(*this), dim0, dim1); #endif } inline Tensor Tensor::flip(IntArrayRef dims) const { @@ -2242,7 +2242,7 @@ inline Tensor Tensor::flip(IntArrayRef dims) const { } #else static auto table = globalATenDispatch().getOpTable("aten::flip(Tensor self, int[] dims) -> Tensor"); - return table->getOp(type_set(), is_variable())(const_cast(*this), dims); + return table->getOp(type_set())(const_cast(*this), dims); #endif } inline Tensor Tensor::roll(IntArrayRef shifts, IntArrayRef dims) const { @@ -2256,7 +2256,7 @@ inline Tensor Tensor::roll(IntArrayRef shifts, IntArrayRef dims) const { } #else static auto table = globalATenDispatch().getOpTable("aten::roll(Tensor self, int[1] shifts, int[1] dims=[]) -> Tensor"); - return table->getOp(type_set(), is_variable())(const_cast(*this), shifts, dims); + return table->getOp(type_set())(const_cast(*this), shifts, dims); #endif } inline Tensor Tensor::rot90(int64_t k, IntArrayRef dims) const { @@ -2264,7 +2264,7 @@ inline Tensor Tensor::rot90(int64_t k, IntArrayRef dims) const { return TypeDefault::rot90(const_cast(*this), k, dims); #else static auto table = globalATenDispatch().getOpTable("aten::rot90(Tensor self, int k=1, int[] dims=[0,1]) -> Tensor"); - return table->getOp(type_set(), is_variable())(const_cast(*this), k, dims); + return table->getOp(type_set())(const_cast(*this), k, dims); #endif } inline Tensor Tensor::trunc() const { @@ -2272,7 +2272,7 @@ inline Tensor Tensor::trunc() const { return TypeDefault::trunc(const_cast(*this)); #else static auto table = globalATenDispatch().getOpTable("aten::trunc(Tensor self) -> Tensor"); - return table->getOp(type_set(), is_variable())(const_cast(*this)); + return table->getOp(type_set())(const_cast(*this)); #endif } inline Tensor & Tensor::trunc_() const { @@ -2286,7 +2286,7 @@ inline Tensor & Tensor::trunc_() const { } #else static auto table = globalATenDispatch().getOpTable("aten::trunc_(Tensor(a!) self) -> Tensor(a!)"); - return table->getOp(type_set(), is_variable())(const_cast(*this)); + return table->getOp(type_set())(const_cast(*this)); #endif } inline Tensor Tensor::type_as(const Tensor & other) const { @@ -2294,7 +2294,7 @@ inline Tensor Tensor::type_as(const Tensor & other) const { return TypeDefault::type_as(const_cast(*this), other); #else static auto table = globalATenDispatch().getOpTable("aten::type_as(Tensor self, Tensor other) -> Tensor"); - return table->getOp(type_set(), is_variable())(const_cast(*this), other); + return table->getOp(type_set())(const_cast(*this), other); #endif } inline Tensor Tensor::unsqueeze(int64_t dim) const { @@ -2302,7 +2302,7 @@ inline Tensor Tensor::unsqueeze(int64_t dim) const { return TypeDefault::unsqueeze(const_cast(*this), dim); #else static auto table = globalATenDispatch().getOpTable("aten::unsqueeze(Tensor(a) self, int dim) -> Tensor(a)"); - return table->getOp(type_set(), is_variable())(const_cast(*this), dim); + return table->getOp(type_set())(const_cast(*this), dim); #endif } inline Tensor & Tensor::unsqueeze_(int64_t dim) const { @@ -2310,7 +2310,7 @@ inline Tensor & Tensor::unsqueeze_(int64_t dim) const { return TypeDefault::unsqueeze_(const_cast(*this), dim); #else static auto table = globalATenDispatch().getOpTable("aten::unsqueeze_(Tensor(a!) self, int dim) -> Tensor(a!)"); - return table->getOp(type_set(), is_variable())(const_cast(*this), dim); + return table->getOp(type_set())(const_cast(*this), dim); #endif } inline Tensor Tensor::var(bool unbiased) const { @@ -2318,7 +2318,7 @@ inline Tensor Tensor::var(bool unbiased) const { return TypeDefault::var(const_cast(*this), unbiased); #else static auto table = globalATenDispatch().getOpTable("aten::var(Tensor self, bool unbiased=True) -> Tensor"); - return table->getOp(type_set(), is_variable())(const_cast(*this), unbiased); + return table->getOp(type_set())(const_cast(*this), unbiased); #endif } inline Tensor Tensor::var(IntArrayRef dim, bool unbiased, bool keepdim) const { @@ -2326,7 +2326,7 @@ inline Tensor Tensor::var(IntArrayRef dim, bool unbiased, bool keepdim) const { return TypeDefault::var(const_cast(*this), dim, unbiased, keepdim); #else static auto table = globalATenDispatch().getOpTable("aten::var.dim(Tensor self, int[1] dim, bool unbiased=True, bool keepdim=False) -> Tensor"); - return table->getOp(type_set(), is_variable())(const_cast(*this), dim, unbiased, keepdim); + return table->getOp(type_set())(const_cast(*this), dim, unbiased, keepdim); #endif } #ifdef BUILD_NAMEDTENSOR @@ -2335,7 +2335,7 @@ inline Tensor Tensor::var(DimnameList dim, bool unbiased, bool keepdim) const { return TypeDefault::var(const_cast(*this), dim, unbiased, keepdim); #else static auto table = globalATenDispatch().getOpTable("aten::var.names_dim(Tensor self, Dimname[1] dim, bool unbiased=True, bool keepdim=False) -> Tensor"); - return table->getOp(type_set(), is_variable())(const_cast(*this), dim, unbiased, keepdim); + return table->getOp(type_set())(const_cast(*this), dim, unbiased, keepdim); #endif } #endif @@ -2344,7 +2344,7 @@ inline Tensor Tensor::view_as(const Tensor & other) const { return TypeDefault::view_as(const_cast(*this), other); #else static auto table = globalATenDispatch().getOpTable("aten::view_as(Tensor self, Tensor other) -> Tensor"); - return table->getOp(type_set(), is_variable())(const_cast(*this), other); + return table->getOp(type_set())(const_cast(*this), other); #endif } inline Tensor Tensor::where(const Tensor & condition, const Tensor & other) const { @@ -2352,7 +2352,7 @@ inline Tensor Tensor::where(const Tensor & condition, const Tensor & other) cons return TypeDefault::where(condition, const_cast(*this), other); #else static auto table = globalATenDispatch().getOpTable("aten::where.self(Tensor condition, Tensor self, Tensor other) -> Tensor"); - return table->getOp(type_set(), is_variable())(condition, const_cast(*this), other); + return table->getOp(type_set())(condition, const_cast(*this), other); #endif } inline Tensor Tensor::norm(c10::optional p, ScalarType dtype) const { @@ -2360,7 +2360,7 @@ inline Tensor Tensor::norm(c10::optional p, ScalarType dtype) const { return TypeDefault::norm(const_cast(*this), p, dtype); #else static auto table = globalATenDispatch().getOpTable("aten::norm.ScalarOpt_dtype(Tensor self, Scalar? p, *, ScalarType dtype) -> Tensor"); - return table->getOp, ScalarType)>(type_set(), is_variable())(const_cast(*this), p, dtype); + return table->getOp, ScalarType)>(type_set())(const_cast(*this), p, dtype); #endif } inline Tensor Tensor::norm(Scalar p) const { @@ -2368,7 +2368,7 @@ inline Tensor Tensor::norm(Scalar p) const { return TypeDefault::norm(const_cast(*this), p); #else static auto table = globalATenDispatch().getOpTable("aten::norm.Scalar(Tensor self, Scalar p=2) -> Tensor"); - return table->getOp(type_set(), is_variable())(const_cast(*this), p); + return table->getOp(type_set())(const_cast(*this), p); #endif } inline Tensor Tensor::norm(c10::optional p, IntArrayRef dim, bool keepdim, ScalarType dtype) const { @@ -2376,7 +2376,7 @@ inline Tensor Tensor::norm(c10::optional p, IntArrayRef dim, bool keepdi return TypeDefault::norm(const_cast(*this), p, dim, keepdim, dtype); #else static auto table = globalATenDispatch().getOpTable("aten::norm.ScalarOpt_dim_dtype(Tensor self, Scalar? p, int[1] dim, bool keepdim, *, ScalarType dtype) -> Tensor"); - return table->getOp, IntArrayRef, bool, ScalarType)>(type_set(), is_variable())(const_cast(*this), p, dim, keepdim, dtype); + return table->getOp, IntArrayRef, bool, ScalarType)>(type_set())(const_cast(*this), p, dim, keepdim, dtype); #endif } inline Tensor Tensor::norm(c10::optional p, IntArrayRef dim, bool keepdim) const { @@ -2384,7 +2384,7 @@ inline Tensor Tensor::norm(c10::optional p, IntArrayRef dim, bool keepdi return TypeDefault::norm(const_cast(*this), p, dim, keepdim); #else static auto table = globalATenDispatch().getOpTable("aten::norm.ScalarOpt_dim(Tensor self, Scalar? p, int[1] dim, bool keepdim=False) -> Tensor"); - return table->getOp, IntArrayRef, bool)>(type_set(), is_variable())(const_cast(*this), p, dim, keepdim); + return table->getOp, IntArrayRef, bool)>(type_set())(const_cast(*this), p, dim, keepdim); #endif } #ifdef BUILD_NAMEDTENSOR @@ -2393,7 +2393,7 @@ inline Tensor Tensor::norm(c10::optional p, DimnameList dim, bool keepdi return TypeDefault::norm(const_cast(*this), p, dim, keepdim, dtype); #else static auto table = globalATenDispatch().getOpTable("aten::norm.names_ScalarOpt_dim_dtype(Tensor self, Scalar? p, Dimname[1] dim, bool keepdim, *, ScalarType dtype) -> Tensor"); - return table->getOp, DimnameList, bool, ScalarType)>(type_set(), is_variable())(const_cast(*this), p, dim, keepdim, dtype); + return table->getOp, DimnameList, bool, ScalarType)>(type_set())(const_cast(*this), p, dim, keepdim, dtype); #endif } #endif @@ -2403,7 +2403,7 @@ inline Tensor Tensor::norm(c10::optional p, DimnameList dim, bool keepdi return TypeDefault::norm(const_cast(*this), p, dim, keepdim); #else static auto table = globalATenDispatch().getOpTable("aten::norm.names_ScalarOpt_dim(Tensor self, Scalar? p, Dimname[1] dim, bool keepdim=False) -> Tensor"); - return table->getOp, DimnameList, bool)>(type_set(), is_variable())(const_cast(*this), p, dim, keepdim); + return table->getOp, DimnameList, bool)>(type_set())(const_cast(*this), p, dim, keepdim); #endif } #endif @@ -2424,7 +2424,7 @@ inline Tensor Tensor::clone() const { } #else static auto table = globalATenDispatch().getOpTable("aten::clone(Tensor self) -> Tensor"); - return table->getOp(type_set(), is_variable())(const_cast(*this)); + return table->getOp(type_set())(const_cast(*this)); #endif } inline Tensor & Tensor::resize_as_(const Tensor & the_template) const { @@ -2441,7 +2441,7 @@ inline Tensor & Tensor::resize_as_(const Tensor & the_template) const { } #else static auto table = globalATenDispatch().getOpTable("aten::resize_as_(Tensor(a!) self, Tensor the_template) -> Tensor(a!)"); - return table->getOp(type_set(), is_variable())(const_cast(*this), the_template); + return table->getOp(type_set())(const_cast(*this), the_template); #endif } inline Tensor Tensor::pow(Scalar exponent) const { @@ -2458,7 +2458,7 @@ inline Tensor Tensor::pow(Scalar exponent) const { } #else static auto table = globalATenDispatch().getOpTable("aten::pow.Tensor_Scalar(Tensor self, Scalar exponent) -> Tensor"); - return table->getOp(type_set(), is_variable())(const_cast(*this), exponent); + return table->getOp(type_set())(const_cast(*this), exponent); #endif } inline Tensor & Tensor::zero_() const { @@ -2475,7 +2475,7 @@ inline Tensor & Tensor::zero_() const { } #else static auto table = globalATenDispatch().getOpTable("aten::zero_(Tensor(a!) self) -> Tensor(a!)"); - return table->getOp(type_set(), is_variable())(const_cast(*this)); + return table->getOp(type_set())(const_cast(*this)); #endif } inline Tensor Tensor::sub(const Tensor & other, Scalar alpha) const { @@ -2483,7 +2483,7 @@ inline Tensor Tensor::sub(const Tensor & other, Scalar alpha) const { return TypeDefault::sub(const_cast(*this), other, alpha); #else static auto table = globalATenDispatch().getOpTable("aten::sub.Tensor(Tensor self, Tensor other, *, Scalar alpha=1) -> Tensor"); - return table->getOp(type_set(), is_variable())(const_cast(*this), other, alpha); + return table->getOp(type_set())(const_cast(*this), other, alpha); #endif } inline Tensor & Tensor::sub_(const Tensor & other, Scalar alpha) const { @@ -2491,7 +2491,7 @@ inline Tensor & Tensor::sub_(const Tensor & other, Scalar alpha) const { return TypeDefault::sub_(const_cast(*this), other, alpha); #else static auto table = globalATenDispatch().getOpTable("aten::sub_.Tensor(Tensor(a!) self, Tensor other, *, Scalar alpha=1) -> Tensor(a!)"); - return table->getOp(type_set(), is_variable())(const_cast(*this), other, alpha); + return table->getOp(type_set())(const_cast(*this), other, alpha); #endif } inline Tensor Tensor::sub(Scalar other, Scalar alpha) const { @@ -2499,7 +2499,7 @@ inline Tensor Tensor::sub(Scalar other, Scalar alpha) const { return TypeDefault::sub(const_cast(*this), other, alpha); #else static auto table = globalATenDispatch().getOpTable("aten::sub.Scalar(Tensor self, Scalar other, Scalar alpha=1) -> Tensor"); - return table->getOp(type_set(), is_variable())(const_cast(*this), other, alpha); + return table->getOp(type_set())(const_cast(*this), other, alpha); #endif } inline Tensor & Tensor::sub_(Scalar other, Scalar alpha) const { @@ -2507,7 +2507,7 @@ inline Tensor & Tensor::sub_(Scalar other, Scalar alpha) const { return TypeDefault::sub_(const_cast(*this), other, alpha); #else static auto table = globalATenDispatch().getOpTable("aten::sub_.Scalar(Tensor(a!) self, Scalar other, Scalar alpha=1) -> Tensor(a!)"); - return table->getOp(type_set(), is_variable())(const_cast(*this), other, alpha); + return table->getOp(type_set())(const_cast(*this), other, alpha); #endif } inline Tensor Tensor::addmm(const Tensor & mat1, const Tensor & mat2, Scalar beta, Scalar alpha) const { @@ -2515,7 +2515,7 @@ inline Tensor Tensor::addmm(const Tensor & mat1, const Tensor & mat2, Scalar bet return TypeDefault::addmm(const_cast(*this), mat1, mat2, beta, alpha); #else static auto table = globalATenDispatch().getOpTable("aten::addmm(Tensor self, Tensor mat1, Tensor mat2, *, Scalar beta=1, Scalar alpha=1) -> Tensor"); - return table->getOp(type_set(), is_variable())(const_cast(*this), mat1, mat2, beta, alpha); + return table->getOp(type_set())(const_cast(*this), mat1, mat2, beta, alpha); #endif } inline Tensor & Tensor::addmm_(const Tensor & mat1, const Tensor & mat2, Scalar beta, Scalar alpha) const { @@ -2523,7 +2523,7 @@ inline Tensor & Tensor::addmm_(const Tensor & mat1, const Tensor & mat2, Scalar return TypeDefault::addmm_(const_cast(*this), mat1, mat2, beta, alpha); #else static auto table = globalATenDispatch().getOpTable("aten::addmm_(Tensor(a!) self, Tensor mat1, Tensor mat2, *, Scalar beta=1, Scalar alpha=1) -> Tensor(a!)"); - return table->getOp(type_set(), is_variable())(const_cast(*this), mat1, mat2, beta, alpha); + return table->getOp(type_set())(const_cast(*this), mat1, mat2, beta, alpha); #endif } inline Tensor & Tensor::sparse_resize_(IntArrayRef size, int64_t sparse_dim, int64_t dense_dim) const { @@ -2537,7 +2537,7 @@ inline Tensor & Tensor::sparse_resize_(IntArrayRef size, int64_t sparse_dim, int } #else static auto table = globalATenDispatch().getOpTable("aten::sparse_resize_(Tensor(a!) self, int[] size, int sparse_dim, int dense_dim) -> Tensor(a!)"); - return table->getOp(type_set(), is_variable())(const_cast(*this), size, sparse_dim, dense_dim); + return table->getOp(type_set())(const_cast(*this), size, sparse_dim, dense_dim); #endif } inline Tensor & Tensor::sparse_resize_and_clear_(IntArrayRef size, int64_t sparse_dim, int64_t dense_dim) const { @@ -2551,7 +2551,7 @@ inline Tensor & Tensor::sparse_resize_and_clear_(IntArrayRef size, int64_t spars } #else static auto table = globalATenDispatch().getOpTable("aten::sparse_resize_and_clear_(Tensor(a!) self, int[] size, int sparse_dim, int dense_dim) -> Tensor(a!)"); - return table->getOp(type_set(), is_variable())(const_cast(*this), size, sparse_dim, dense_dim); + return table->getOp(type_set())(const_cast(*this), size, sparse_dim, dense_dim); #endif } inline Tensor Tensor::sparse_mask(const Tensor & mask) const { @@ -2565,7 +2565,7 @@ inline Tensor Tensor::sparse_mask(const Tensor & mask) const { } #else static auto table = globalATenDispatch().getOpTable("aten::sparse_mask(Tensor self, Tensor mask) -> Tensor"); - return table->getOp(type_set(), is_variable())(const_cast(*this), mask); + return table->getOp(type_set())(const_cast(*this), mask); #endif } inline Tensor Tensor::to_dense() const { @@ -2579,7 +2579,7 @@ inline Tensor Tensor::to_dense() const { } #else static auto table = globalATenDispatch().getOpTable("aten::to_dense(Tensor self) -> Tensor"); - return table->getOp(type_set(), is_variable())(const_cast(*this)); + return table->getOp(type_set())(const_cast(*this)); #endif } inline int64_t Tensor::sparse_dim() const { @@ -2593,7 +2593,7 @@ inline int64_t Tensor::sparse_dim() const { } #else static auto table = globalATenDispatch().getOpTable("aten::sparse_dim(Tensor self) -> int"); - return table->getOp(type_set(), is_variable())(const_cast(*this)); + return table->getOp(type_set())(const_cast(*this)); #endif } inline int64_t Tensor::_dimI() const { @@ -2607,7 +2607,7 @@ inline int64_t Tensor::_dimI() const { } #else static auto table = globalATenDispatch().getOpTable("aten::_dimI(Tensor self) -> int"); - return table->getOp(type_set(), is_variable())(const_cast(*this)); + return table->getOp(type_set())(const_cast(*this)); #endif } inline int64_t Tensor::dense_dim() const { @@ -2621,7 +2621,7 @@ inline int64_t Tensor::dense_dim() const { } #else static auto table = globalATenDispatch().getOpTable("aten::dense_dim(Tensor self) -> int"); - return table->getOp(type_set(), is_variable())(const_cast(*this)); + return table->getOp(type_set())(const_cast(*this)); #endif } inline int64_t Tensor::_dimV() const { @@ -2635,7 +2635,7 @@ inline int64_t Tensor::_dimV() const { } #else static auto table = globalATenDispatch().getOpTable("aten::_dimV(Tensor self) -> int"); - return table->getOp(type_set(), is_variable())(const_cast(*this)); + return table->getOp(type_set())(const_cast(*this)); #endif } inline int64_t Tensor::_nnz() const { @@ -2649,7 +2649,7 @@ inline int64_t Tensor::_nnz() const { } #else static auto table = globalATenDispatch().getOpTable("aten::_nnz(Tensor self) -> int"); - return table->getOp(type_set(), is_variable())(const_cast(*this)); + return table->getOp(type_set())(const_cast(*this)); #endif } inline Tensor Tensor::coalesce() const { @@ -2663,7 +2663,7 @@ inline Tensor Tensor::coalesce() const { } #else static auto table = globalATenDispatch().getOpTable("aten::coalesce(Tensor self) -> Tensor"); - return table->getOp(type_set(), is_variable())(const_cast(*this)); + return table->getOp(type_set())(const_cast(*this)); #endif } inline bool Tensor::is_coalesced() const { @@ -2677,7 +2677,7 @@ inline bool Tensor::is_coalesced() const { } #else static auto table = globalATenDispatch().getOpTable("aten::is_coalesced(Tensor self) -> bool"); - return table->getOp(type_set(), is_variable())(const_cast(*this)); + return table->getOp(type_set())(const_cast(*this)); #endif } inline Tensor Tensor::_indices() const { @@ -2691,7 +2691,7 @@ inline Tensor Tensor::_indices() const { } #else static auto table = globalATenDispatch().getOpTable("aten::_indices(Tensor(a) self) -> Tensor(a)"); - return table->getOp(type_set(), is_variable())(const_cast(*this)); + return table->getOp(type_set())(const_cast(*this)); #endif } inline Tensor Tensor::_values() const { @@ -2705,7 +2705,7 @@ inline Tensor Tensor::_values() const { } #else static auto table = globalATenDispatch().getOpTable("aten::_values(Tensor(a) self) -> Tensor(a)"); - return table->getOp(type_set(), is_variable())(const_cast(*this)); + return table->getOp(type_set())(const_cast(*this)); #endif } inline Tensor & Tensor::_coalesced_(bool coalesced) const { @@ -2719,7 +2719,7 @@ inline Tensor & Tensor::_coalesced_(bool coalesced) const { } #else static auto table = globalATenDispatch().getOpTable("aten::_coalesced_(Tensor(a!) self, bool coalesced) -> Tensor(a!)"); - return table->getOp(type_set(), is_variable())(const_cast(*this), coalesced); + return table->getOp(type_set())(const_cast(*this), coalesced); #endif } inline Tensor Tensor::indices() const { @@ -2733,7 +2733,7 @@ inline Tensor Tensor::indices() const { } #else static auto table = globalATenDispatch().getOpTable("aten::indices(Tensor(a) self) -> Tensor(a)"); - return table->getOp(type_set(), is_variable())(const_cast(*this)); + return table->getOp(type_set())(const_cast(*this)); #endif } inline Tensor Tensor::values() const { @@ -2747,7 +2747,7 @@ inline Tensor Tensor::values() const { } #else static auto table = globalATenDispatch().getOpTable("aten::values(Tensor(a) self) -> Tensor(a)"); - return table->getOp(type_set(), is_variable())(const_cast(*this)); + return table->getOp(type_set())(const_cast(*this)); #endif } inline int64_t Tensor::numel() const { @@ -2755,7 +2755,7 @@ inline int64_t Tensor::numel() const { return TypeDefault::numel(const_cast(*this)); #else static auto table = globalATenDispatch().getOpTable("aten::numel(Tensor self) -> int"); - return table->getOp(type_set(), is_variable())(const_cast(*this)); + return table->getOp(type_set())(const_cast(*this)); #endif } inline std::vector Tensor::unbind(int64_t dim) const { @@ -2763,7 +2763,7 @@ inline std::vector Tensor::unbind(int64_t dim) const { return TypeDefault::unbind(const_cast(*this), dim); #else static auto table = globalATenDispatch().getOpTable("aten::unbind(Tensor(a) self, int dim=0) -> Tensor(a)[]"); - return table->getOp (const Tensor &, int64_t)>(type_set(), is_variable())(const_cast(*this), dim); + return table->getOp (const Tensor &, int64_t)>(type_set())(const_cast(*this), dim); #endif } #ifdef BUILD_NAMEDTENSOR @@ -2772,7 +2772,7 @@ inline std::vector Tensor::unbind(Dimname dim) const { return TypeDefault::unbind(const_cast(*this), dim); #else static auto table = globalATenDispatch().getOpTable("aten::unbind(Tensor(a) self, Dimname dim) -> Tensor(a)[]"); - return table->getOp (const Tensor &, Dimname)>(type_set(), is_variable())(const_cast(*this), dim); + return table->getOp (const Tensor &, Dimname)>(type_set())(const_cast(*this), dim); #endif } #endif @@ -2787,7 +2787,7 @@ inline Tensor Tensor::to_sparse(int64_t sparse_dim) const { } #else static auto table = globalATenDispatch().getOpTable("aten::to_sparse.sparse_dim(Tensor self, int sparse_dim) -> Tensor"); - return table->getOp(type_set(), is_variable())(const_cast(*this), sparse_dim); + return table->getOp(type_set())(const_cast(*this), sparse_dim); #endif } inline Tensor Tensor::to_sparse() const { @@ -2801,7 +2801,7 @@ inline Tensor Tensor::to_sparse() const { } #else static auto table = globalATenDispatch().getOpTable("aten::to_sparse(Tensor self) -> Tensor"); - return table->getOp(type_set(), is_variable())(const_cast(*this)); + return table->getOp(type_set())(const_cast(*this)); #endif } inline Tensor Tensor::to_mkldnn() const { @@ -2815,7 +2815,7 @@ inline Tensor Tensor::to_mkldnn() const { } #else static auto table = globalATenDispatch().getOpTable("aten::to_mkldnn(Tensor self) -> Tensor"); - return table->getOp(type_set(), is_variable())(const_cast(*this)); + return table->getOp(type_set())(const_cast(*this)); #endif } inline Tensor Tensor::dequantize() const { @@ -2829,7 +2829,7 @@ inline Tensor Tensor::dequantize() const { } #else static auto table = globalATenDispatch().getOpTable("aten::dequantize(Tensor self) -> Tensor"); - return table->getOp(type_set(), is_variable())(const_cast(*this)); + return table->getOp(type_set())(const_cast(*this)); #endif } inline double Tensor::q_scale() const { @@ -2843,7 +2843,7 @@ inline double Tensor::q_scale() const { } #else static auto table = globalATenDispatch().getOpTable("aten::q_scale(Tensor self) -> float"); - return table->getOp(type_set(), is_variable())(const_cast(*this)); + return table->getOp(type_set())(const_cast(*this)); #endif } inline int64_t Tensor::q_zero_point() const { @@ -2857,7 +2857,7 @@ inline int64_t Tensor::q_zero_point() const { } #else static auto table = globalATenDispatch().getOpTable("aten::q_zero_point(Tensor self) -> int"); - return table->getOp(type_set(), is_variable())(const_cast(*this)); + return table->getOp(type_set())(const_cast(*this)); #endif } inline Tensor Tensor::q_per_channel_scales() const { @@ -2871,7 +2871,7 @@ inline Tensor Tensor::q_per_channel_scales() const { } #else static auto table = globalATenDispatch().getOpTable("aten::q_per_channel_scales(Tensor self) -> Tensor"); - return table->getOp(type_set(), is_variable())(const_cast(*this)); + return table->getOp(type_set())(const_cast(*this)); #endif } inline Tensor Tensor::q_per_channel_zero_points() const { @@ -2885,7 +2885,7 @@ inline Tensor Tensor::q_per_channel_zero_points() const { } #else static auto table = globalATenDispatch().getOpTable("aten::q_per_channel_zero_points(Tensor self) -> Tensor"); - return table->getOp(type_set(), is_variable())(const_cast(*this)); + return table->getOp(type_set())(const_cast(*this)); #endif } inline Tensor Tensor::int_repr() const { @@ -2899,7 +2899,7 @@ inline Tensor Tensor::int_repr() const { } #else static auto table = globalATenDispatch().getOpTable("aten::int_repr(Tensor self) -> Tensor"); - return table->getOp(type_set(), is_variable())(const_cast(*this)); + return table->getOp(type_set())(const_cast(*this)); #endif } inline QScheme Tensor::qscheme() const { @@ -2913,7 +2913,7 @@ inline QScheme Tensor::qscheme() const { } #else static auto table = globalATenDispatch().getOpTable("aten::qscheme(Tensor self) -> QScheme"); - return table->getOp(type_set(), is_variable())(const_cast(*this)); + return table->getOp(type_set())(const_cast(*this)); #endif } inline Tensor Tensor::to(const TensorOptions & options, bool non_blocking, bool copy) const { @@ -2921,7 +2921,7 @@ inline Tensor Tensor::to(const TensorOptions & options, bool non_blocking, bool return TypeDefault::to(const_cast(*this), options, non_blocking, copy); #else static auto table = globalATenDispatch().getOpTable("aten::to.dtype_layout(Tensor self, *, ScalarType dtype, Layout layout, Device device, bool pin_memory=False, bool non_blocking=False, bool copy=False) -> Tensor"); - return table->getOp(type_set(), is_variable())(const_cast(*this), options, non_blocking, copy); + return table->getOp(type_set())(const_cast(*this), options, non_blocking, copy); #endif } inline Tensor Tensor::to(Device device, ScalarType dtype, bool non_blocking, bool copy) const { @@ -2929,7 +2929,7 @@ inline Tensor Tensor::to(Device device, ScalarType dtype, bool non_blocking, boo return TypeDefault::to(const_cast(*this), device, dtype, non_blocking, copy); #else static auto table = globalATenDispatch().getOpTable("aten::to.device(Tensor self, Device device, ScalarType dtype, bool non_blocking=False, bool copy=False) -> Tensor"); - return table->getOp(type_set(), is_variable())(const_cast(*this), device, dtype, non_blocking, copy); + return table->getOp(type_set())(const_cast(*this), device, dtype, non_blocking, copy); #endif } inline Tensor Tensor::to(ScalarType dtype, bool non_blocking, bool copy) const { @@ -2937,7 +2937,7 @@ inline Tensor Tensor::to(ScalarType dtype, bool non_blocking, bool copy) const { return TypeDefault::to(const_cast(*this), dtype, non_blocking, copy); #else static auto table = globalATenDispatch().getOpTable("aten::to.dtype(Tensor self, ScalarType dtype, bool non_blocking=False, bool copy=False) -> Tensor"); - return table->getOp(type_set(), is_variable())(const_cast(*this), dtype, non_blocking, copy); + return table->getOp(type_set())(const_cast(*this), dtype, non_blocking, copy); #endif } inline Tensor Tensor::to(const Tensor & other, bool non_blocking, bool copy) const { @@ -2945,7 +2945,7 @@ inline Tensor Tensor::to(const Tensor & other, bool non_blocking, bool copy) con return TypeDefault::to(const_cast(*this), other, non_blocking, copy); #else static auto table = globalATenDispatch().getOpTable("aten::to.other(Tensor self, Tensor other, bool non_blocking=False, bool copy=False) -> Tensor"); - return table->getOp(type_set(), is_variable())(const_cast(*this), other, non_blocking, copy); + return table->getOp(type_set())(const_cast(*this), other, non_blocking, copy); #endif } inline Scalar Tensor::item() const { @@ -2953,7 +2953,7 @@ inline Scalar Tensor::item() const { return TypeDefault::item(const_cast(*this)); #else static auto table = globalATenDispatch().getOpTable("aten::item(Tensor self) -> Scalar"); - return table->getOp(type_set(), is_variable())(const_cast(*this)); + return table->getOp(type_set())(const_cast(*this)); #endif } inline Tensor & Tensor::set_(Storage source) const { @@ -2967,7 +2967,7 @@ inline Tensor & Tensor::set_(Storage source) const { } #else static auto table = globalATenDispatch().getOpTable("aten::set_.source_Storage(Tensor(a!) self, Storage source) -> Tensor(a!)"); - return table->getOp(type_set(), is_variable())(const_cast(*this), source); + return table->getOp(type_set())(const_cast(*this), source); #endif } inline Tensor & Tensor::set_(Storage source, int64_t storage_offset, IntArrayRef size, IntArrayRef stride) const { @@ -2984,7 +2984,7 @@ inline Tensor & Tensor::set_(Storage source, int64_t storage_offset, IntArrayRef } #else static auto table = globalATenDispatch().getOpTable("aten::set_.source_Storage_storage_offset(Tensor(a!) self, Storage source, int storage_offset, int[] size, int[] stride=[]) -> Tensor(a!)"); - return table->getOp(type_set(), is_variable())(const_cast(*this), source, storage_offset, size, stride); + return table->getOp(type_set())(const_cast(*this), source, storage_offset, size, stride); #endif } inline Tensor & Tensor::set_(const Tensor & source) const { @@ -2998,7 +2998,7 @@ inline Tensor & Tensor::set_(const Tensor & source) const { } #else static auto table = globalATenDispatch().getOpTable("aten::set_.source_Tensor(Tensor(a!) self, Tensor source) -> Tensor(a!)"); - return table->getOp(type_set(), is_variable())(const_cast(*this), source); + return table->getOp(type_set())(const_cast(*this), source); #endif } inline Tensor & Tensor::set_() const { @@ -3012,7 +3012,7 @@ inline Tensor & Tensor::set_() const { } #else static auto table = globalATenDispatch().getOpTable("aten::set_(Tensor(a!) self) -> Tensor(a!)"); - return table->getOp(type_set(), is_variable())(const_cast(*this)); + return table->getOp(type_set())(const_cast(*this)); #endif } inline Tensor & Tensor::set_quantizer_(ConstQuantizerPtr quantizer) const { @@ -3026,7 +3026,7 @@ inline Tensor & Tensor::set_quantizer_(ConstQuantizerPtr quantizer) const { } #else static auto table = globalATenDispatch().getOpTable("aten::set_quantizer_(Tensor(a!) self, ConstQuantizerPtr quantizer) -> Tensor(a!)"); - return table->getOp(type_set(), is_variable())(const_cast(*this), quantizer); + return table->getOp(type_set())(const_cast(*this), quantizer); #endif } inline bool Tensor::is_set_to(const Tensor & tensor) const { @@ -3040,7 +3040,7 @@ inline bool Tensor::is_set_to(const Tensor & tensor) const { } #else static auto table = globalATenDispatch().getOpTable("aten::is_set_to(Tensor self, Tensor tensor) -> bool"); - return table->getOp(type_set(), is_variable())(const_cast(*this), tensor); + return table->getOp(type_set())(const_cast(*this), tensor); #endif } inline Tensor & Tensor::masked_fill_(const Tensor & mask, Scalar value) const { @@ -3054,7 +3054,7 @@ inline Tensor & Tensor::masked_fill_(const Tensor & mask, Scalar value) const { } #else static auto table = globalATenDispatch().getOpTable("aten::masked_fill_.Scalar(Tensor(a!) self, Tensor mask, Scalar value) -> Tensor(a!)"); - return table->getOp(type_set(), is_variable())(const_cast(*this), mask, value); + return table->getOp(type_set())(const_cast(*this), mask, value); #endif } inline Tensor Tensor::masked_fill(const Tensor & mask, Scalar value) const { @@ -3062,7 +3062,7 @@ inline Tensor Tensor::masked_fill(const Tensor & mask, Scalar value) const { return TypeDefault::masked_fill(const_cast(*this), mask, value); #else static auto table = globalATenDispatch().getOpTable("aten::masked_fill.Scalar(Tensor self, Tensor mask, Scalar value) -> Tensor"); - return table->getOp(type_set(), is_variable())(const_cast(*this), mask, value); + return table->getOp(type_set())(const_cast(*this), mask, value); #endif } inline Tensor & Tensor::masked_fill_(const Tensor & mask, const Tensor & value) const { @@ -3076,7 +3076,7 @@ inline Tensor & Tensor::masked_fill_(const Tensor & mask, const Tensor & value) } #else static auto table = globalATenDispatch().getOpTable("aten::masked_fill_.Tensor(Tensor(a!) self, Tensor mask, Tensor value) -> Tensor(a!)"); - return table->getOp(type_set(), is_variable())(const_cast(*this), mask, value); + return table->getOp(type_set())(const_cast(*this), mask, value); #endif } inline Tensor Tensor::masked_fill(const Tensor & mask, const Tensor & value) const { @@ -3084,7 +3084,7 @@ inline Tensor Tensor::masked_fill(const Tensor & mask, const Tensor & value) con return TypeDefault::masked_fill(const_cast(*this), mask, value); #else static auto table = globalATenDispatch().getOpTable("aten::masked_fill.Tensor(Tensor self, Tensor mask, Tensor value) -> Tensor"); - return table->getOp(type_set(), is_variable())(const_cast(*this), mask, value); + return table->getOp(type_set())(const_cast(*this), mask, value); #endif } inline Tensor & Tensor::masked_scatter_(const Tensor & mask, const Tensor & source) const { @@ -3098,7 +3098,7 @@ inline Tensor & Tensor::masked_scatter_(const Tensor & mask, const Tensor & sour } #else static auto table = globalATenDispatch().getOpTable("aten::masked_scatter_(Tensor(a!) self, Tensor mask, Tensor source) -> Tensor(a!)"); - return table->getOp(type_set(), is_variable())(const_cast(*this), mask, source); + return table->getOp(type_set())(const_cast(*this), mask, source); #endif } inline Tensor Tensor::masked_scatter(const Tensor & mask, const Tensor & source) const { @@ -3106,7 +3106,7 @@ inline Tensor Tensor::masked_scatter(const Tensor & mask, const Tensor & source) return TypeDefault::masked_scatter(const_cast(*this), mask, source); #else static auto table = globalATenDispatch().getOpTable("aten::masked_scatter(Tensor self, Tensor mask, Tensor source) -> Tensor"); - return table->getOp(type_set(), is_variable())(const_cast(*this), mask, source); + return table->getOp(type_set())(const_cast(*this), mask, source); #endif } inline Tensor Tensor::view(IntArrayRef size) const { @@ -3123,7 +3123,7 @@ inline Tensor Tensor::view(IntArrayRef size) const { } #else static auto table = globalATenDispatch().getOpTable("aten::view(Tensor(a) self, int[] size) -> Tensor(a)"); - return table->getOp(type_set(), is_variable())(const_cast(*this), size); + return table->getOp(type_set())(const_cast(*this), size); #endif } inline Tensor & Tensor::put_(const Tensor & index, const Tensor & source, bool accumulate) const { @@ -3137,7 +3137,7 @@ inline Tensor & Tensor::put_(const Tensor & index, const Tensor & source, bool a } #else static auto table = globalATenDispatch().getOpTable("aten::put_(Tensor(a!) self, Tensor index, Tensor source, bool accumulate=False) -> Tensor(a!)"); - return table->getOp(type_set(), is_variable())(const_cast(*this), index, source, accumulate); + return table->getOp(type_set())(const_cast(*this), index, source, accumulate); #endif } inline Tensor & Tensor::index_add_(int64_t dim, const Tensor & index, const Tensor & source) const { @@ -3151,7 +3151,7 @@ inline Tensor & Tensor::index_add_(int64_t dim, const Tensor & index, const Tens } #else static auto table = globalATenDispatch().getOpTable("aten::index_add_(Tensor(a!) self, int dim, Tensor index, Tensor source) -> Tensor(a!)"); - return table->getOp(type_set(), is_variable())(const_cast(*this), dim, index, source); + return table->getOp(type_set())(const_cast(*this), dim, index, source); #endif } inline Tensor Tensor::index_add(int64_t dim, const Tensor & index, const Tensor & source) const { @@ -3159,7 +3159,7 @@ inline Tensor Tensor::index_add(int64_t dim, const Tensor & index, const Tensor return TypeDefault::index_add(const_cast(*this), dim, index, source); #else static auto table = globalATenDispatch().getOpTable("aten::index_add(Tensor self, int dim, Tensor index, Tensor source) -> Tensor"); - return table->getOp(type_set(), is_variable())(const_cast(*this), dim, index, source); + return table->getOp(type_set())(const_cast(*this), dim, index, source); #endif } inline Tensor & Tensor::index_fill_(int64_t dim, const Tensor & index, Scalar value) const { @@ -3173,7 +3173,7 @@ inline Tensor & Tensor::index_fill_(int64_t dim, const Tensor & index, Scalar va } #else static auto table = globalATenDispatch().getOpTable("aten::index_fill_.Scalar(Tensor(a!) self, int dim, Tensor index, Scalar value) -> Tensor(a!)"); - return table->getOp(type_set(), is_variable())(const_cast(*this), dim, index, value); + return table->getOp(type_set())(const_cast(*this), dim, index, value); #endif } inline Tensor Tensor::index_fill(int64_t dim, const Tensor & index, Scalar value) const { @@ -3181,7 +3181,7 @@ inline Tensor Tensor::index_fill(int64_t dim, const Tensor & index, Scalar value return TypeDefault::index_fill(const_cast(*this), dim, index, value); #else static auto table = globalATenDispatch().getOpTable("aten::index_fill.Scalar(Tensor self, int dim, Tensor index, Scalar value) -> Tensor"); - return table->getOp(type_set(), is_variable())(const_cast(*this), dim, index, value); + return table->getOp(type_set())(const_cast(*this), dim, index, value); #endif } inline Tensor & Tensor::index_fill_(int64_t dim, const Tensor & index, const Tensor & value) const { @@ -3195,7 +3195,7 @@ inline Tensor & Tensor::index_fill_(int64_t dim, const Tensor & index, const Ten } #else static auto table = globalATenDispatch().getOpTable("aten::index_fill_.Tensor(Tensor(a!) self, int dim, Tensor index, Tensor value) -> Tensor(a!)"); - return table->getOp(type_set(), is_variable())(const_cast(*this), dim, index, value); + return table->getOp(type_set())(const_cast(*this), dim, index, value); #endif } inline Tensor Tensor::index_fill(int64_t dim, const Tensor & index, const Tensor & value) const { @@ -3203,7 +3203,7 @@ inline Tensor Tensor::index_fill(int64_t dim, const Tensor & index, const Tensor return TypeDefault::index_fill(const_cast(*this), dim, index, value); #else static auto table = globalATenDispatch().getOpTable("aten::index_fill.Tensor(Tensor self, int dim, Tensor index, Tensor value) -> Tensor"); - return table->getOp(type_set(), is_variable())(const_cast(*this), dim, index, value); + return table->getOp(type_set())(const_cast(*this), dim, index, value); #endif } inline Tensor & Tensor::scatter_(int64_t dim, const Tensor & index, const Tensor & src) const { @@ -3217,7 +3217,7 @@ inline Tensor & Tensor::scatter_(int64_t dim, const Tensor & index, const Tensor } #else static auto table = globalATenDispatch().getOpTable("aten::scatter_.src(Tensor(a!) self, int dim, Tensor index, Tensor src) -> Tensor(a!)"); - return table->getOp(type_set(), is_variable())(const_cast(*this), dim, index, src); + return table->getOp(type_set())(const_cast(*this), dim, index, src); #endif } inline Tensor Tensor::scatter(int64_t dim, const Tensor & index, const Tensor & src) const { @@ -3225,7 +3225,7 @@ inline Tensor Tensor::scatter(int64_t dim, const Tensor & index, const Tensor & return TypeDefault::scatter(const_cast(*this), dim, index, src); #else static auto table = globalATenDispatch().getOpTable("aten::scatter.src(Tensor self, int dim, Tensor index, Tensor src) -> Tensor"); - return table->getOp(type_set(), is_variable())(const_cast(*this), dim, index, src); + return table->getOp(type_set())(const_cast(*this), dim, index, src); #endif } inline Tensor & Tensor::scatter_(int64_t dim, const Tensor & index, Scalar value) const { @@ -3239,7 +3239,7 @@ inline Tensor & Tensor::scatter_(int64_t dim, const Tensor & index, Scalar value } #else static auto table = globalATenDispatch().getOpTable("aten::scatter_.value(Tensor(a!) self, int dim, Tensor index, Scalar value) -> Tensor(a!)"); - return table->getOp(type_set(), is_variable())(const_cast(*this), dim, index, value); + return table->getOp(type_set())(const_cast(*this), dim, index, value); #endif } inline Tensor Tensor::scatter(int64_t dim, const Tensor & index, Scalar value) const { @@ -3247,7 +3247,7 @@ inline Tensor Tensor::scatter(int64_t dim, const Tensor & index, Scalar value) c return TypeDefault::scatter(const_cast(*this), dim, index, value); #else static auto table = globalATenDispatch().getOpTable("aten::scatter.value(Tensor self, int dim, Tensor index, Scalar value) -> Tensor"); - return table->getOp(type_set(), is_variable())(const_cast(*this), dim, index, value); + return table->getOp(type_set())(const_cast(*this), dim, index, value); #endif } inline Tensor & Tensor::scatter_add_(int64_t dim, const Tensor & index, const Tensor & src) const { @@ -3261,7 +3261,7 @@ inline Tensor & Tensor::scatter_add_(int64_t dim, const Tensor & index, const Te } #else static auto table = globalATenDispatch().getOpTable("aten::scatter_add_(Tensor(a!) self, int dim, Tensor index, Tensor src) -> Tensor(a!)"); - return table->getOp(type_set(), is_variable())(const_cast(*this), dim, index, src); + return table->getOp(type_set())(const_cast(*this), dim, index, src); #endif } inline Tensor Tensor::scatter_add(int64_t dim, const Tensor & index, const Tensor & src) const { @@ -3269,7 +3269,7 @@ inline Tensor Tensor::scatter_add(int64_t dim, const Tensor & index, const Tenso return TypeDefault::scatter_add(const_cast(*this), dim, index, src); #else static auto table = globalATenDispatch().getOpTable("aten::scatter_add(Tensor self, int dim, Tensor index, Tensor src) -> Tensor"); - return table->getOp(type_set(), is_variable())(const_cast(*this), dim, index, src); + return table->getOp(type_set())(const_cast(*this), dim, index, src); #endif } inline Tensor & Tensor::lt_(Scalar other) const { @@ -3283,7 +3283,7 @@ inline Tensor & Tensor::lt_(Scalar other) const { } #else static auto table = globalATenDispatch().getOpTable("aten::lt_.Scalar(Tensor(a!) self, Scalar other) -> Tensor(a!)"); - return table->getOp(type_set(), is_variable())(const_cast(*this), other); + return table->getOp(type_set())(const_cast(*this), other); #endif } inline Tensor & Tensor::lt_(const Tensor & other) const { @@ -3297,7 +3297,7 @@ inline Tensor & Tensor::lt_(const Tensor & other) const { } #else static auto table = globalATenDispatch().getOpTable("aten::lt_.Tensor(Tensor(a!) self, Tensor other) -> Tensor(a!)"); - return table->getOp(type_set(), is_variable())(const_cast(*this), other); + return table->getOp(type_set())(const_cast(*this), other); #endif } inline Tensor & Tensor::gt_(Scalar other) const { @@ -3311,7 +3311,7 @@ inline Tensor & Tensor::gt_(Scalar other) const { } #else static auto table = globalATenDispatch().getOpTable("aten::gt_.Scalar(Tensor(a!) self, Scalar other) -> Tensor(a!)"); - return table->getOp(type_set(), is_variable())(const_cast(*this), other); + return table->getOp(type_set())(const_cast(*this), other); #endif } inline Tensor & Tensor::gt_(const Tensor & other) const { @@ -3325,7 +3325,7 @@ inline Tensor & Tensor::gt_(const Tensor & other) const { } #else static auto table = globalATenDispatch().getOpTable("aten::gt_.Tensor(Tensor(a!) self, Tensor other) -> Tensor(a!)"); - return table->getOp(type_set(), is_variable())(const_cast(*this), other); + return table->getOp(type_set())(const_cast(*this), other); #endif } inline Tensor & Tensor::le_(Scalar other) const { @@ -3339,7 +3339,7 @@ inline Tensor & Tensor::le_(Scalar other) const { } #else static auto table = globalATenDispatch().getOpTable("aten::le_.Scalar(Tensor(a!) self, Scalar other) -> Tensor(a!)"); - return table->getOp(type_set(), is_variable())(const_cast(*this), other); + return table->getOp(type_set())(const_cast(*this), other); #endif } inline Tensor & Tensor::le_(const Tensor & other) const { @@ -3353,7 +3353,7 @@ inline Tensor & Tensor::le_(const Tensor & other) const { } #else static auto table = globalATenDispatch().getOpTable("aten::le_.Tensor(Tensor(a!) self, Tensor other) -> Tensor(a!)"); - return table->getOp(type_set(), is_variable())(const_cast(*this), other); + return table->getOp(type_set())(const_cast(*this), other); #endif } inline Tensor & Tensor::ge_(Scalar other) const { @@ -3367,7 +3367,7 @@ inline Tensor & Tensor::ge_(Scalar other) const { } #else static auto table = globalATenDispatch().getOpTable("aten::ge_.Scalar(Tensor(a!) self, Scalar other) -> Tensor(a!)"); - return table->getOp(type_set(), is_variable())(const_cast(*this), other); + return table->getOp(type_set())(const_cast(*this), other); #endif } inline Tensor & Tensor::ge_(const Tensor & other) const { @@ -3381,7 +3381,7 @@ inline Tensor & Tensor::ge_(const Tensor & other) const { } #else static auto table = globalATenDispatch().getOpTable("aten::ge_.Tensor(Tensor(a!) self, Tensor other) -> Tensor(a!)"); - return table->getOp(type_set(), is_variable())(const_cast(*this), other); + return table->getOp(type_set())(const_cast(*this), other); #endif } inline Tensor & Tensor::eq_(Scalar other) const { @@ -3395,7 +3395,7 @@ inline Tensor & Tensor::eq_(Scalar other) const { } #else static auto table = globalATenDispatch().getOpTable("aten::eq_.Scalar(Tensor(a!) self, Scalar other) -> Tensor(a!)"); - return table->getOp(type_set(), is_variable())(const_cast(*this), other); + return table->getOp(type_set())(const_cast(*this), other); #endif } inline Tensor & Tensor::eq_(const Tensor & other) const { @@ -3409,7 +3409,7 @@ inline Tensor & Tensor::eq_(const Tensor & other) const { } #else static auto table = globalATenDispatch().getOpTable("aten::eq_.Tensor(Tensor(a!) self, Tensor other) -> Tensor(a!)"); - return table->getOp(type_set(), is_variable())(const_cast(*this), other); + return table->getOp(type_set())(const_cast(*this), other); #endif } inline Tensor & Tensor::ne_(Scalar other) const { @@ -3423,7 +3423,7 @@ inline Tensor & Tensor::ne_(Scalar other) const { } #else static auto table = globalATenDispatch().getOpTable("aten::ne_.Scalar(Tensor(a!) self, Scalar other) -> Tensor(a!)"); - return table->getOp(type_set(), is_variable())(const_cast(*this), other); + return table->getOp(type_set())(const_cast(*this), other); #endif } inline Tensor & Tensor::ne_(const Tensor & other) const { @@ -3437,7 +3437,7 @@ inline Tensor & Tensor::ne_(const Tensor & other) const { } #else static auto table = globalATenDispatch().getOpTable("aten::ne_.Tensor(Tensor(a!) self, Tensor other) -> Tensor(a!)"); - return table->getOp(type_set(), is_variable())(const_cast(*this), other); + return table->getOp(type_set())(const_cast(*this), other); #endif } inline Tensor Tensor::__and__(Scalar other) const { @@ -3451,7 +3451,7 @@ inline Tensor Tensor::__and__(Scalar other) const { } #else static auto table = globalATenDispatch().getOpTable("aten::__and__.Scalar(Tensor self, Scalar other) -> Tensor"); - return table->getOp(type_set(), is_variable())(const_cast(*this), other); + return table->getOp(type_set())(const_cast(*this), other); #endif } inline Tensor Tensor::__and__(const Tensor & other) const { @@ -3465,7 +3465,7 @@ inline Tensor Tensor::__and__(const Tensor & other) const { } #else static auto table = globalATenDispatch().getOpTable("aten::__and__.Tensor(Tensor self, Tensor other) -> Tensor"); - return table->getOp(type_set(), is_variable())(const_cast(*this), other); + return table->getOp(type_set())(const_cast(*this), other); #endif } inline Tensor & Tensor::__iand__(Scalar other) const { @@ -3479,7 +3479,7 @@ inline Tensor & Tensor::__iand__(Scalar other) const { } #else static auto table = globalATenDispatch().getOpTable("aten::__iand__.Scalar(Tensor(a!) self, Scalar other) -> Tensor(a!)"); - return table->getOp(type_set(), is_variable())(const_cast(*this), other); + return table->getOp(type_set())(const_cast(*this), other); #endif } inline Tensor & Tensor::__iand__(const Tensor & other) const { @@ -3493,7 +3493,7 @@ inline Tensor & Tensor::__iand__(const Tensor & other) const { } #else static auto table = globalATenDispatch().getOpTable("aten::__iand__.Tensor(Tensor(a!) self, Tensor other) -> Tensor(a!)"); - return table->getOp(type_set(), is_variable())(const_cast(*this), other); + return table->getOp(type_set())(const_cast(*this), other); #endif } inline Tensor Tensor::__or__(Scalar other) const { @@ -3507,7 +3507,7 @@ inline Tensor Tensor::__or__(Scalar other) const { } #else static auto table = globalATenDispatch().getOpTable("aten::__or__.Scalar(Tensor self, Scalar other) -> Tensor"); - return table->getOp(type_set(), is_variable())(const_cast(*this), other); + return table->getOp(type_set())(const_cast(*this), other); #endif } inline Tensor Tensor::__or__(const Tensor & other) const { @@ -3521,7 +3521,7 @@ inline Tensor Tensor::__or__(const Tensor & other) const { } #else static auto table = globalATenDispatch().getOpTable("aten::__or__.Tensor(Tensor self, Tensor other) -> Tensor"); - return table->getOp(type_set(), is_variable())(const_cast(*this), other); + return table->getOp(type_set())(const_cast(*this), other); #endif } inline Tensor & Tensor::__ior__(Scalar other) const { @@ -3535,7 +3535,7 @@ inline Tensor & Tensor::__ior__(Scalar other) const { } #else static auto table = globalATenDispatch().getOpTable("aten::__ior__.Scalar(Tensor(a!) self, Scalar other) -> Tensor(a!)"); - return table->getOp(type_set(), is_variable())(const_cast(*this), other); + return table->getOp(type_set())(const_cast(*this), other); #endif } inline Tensor & Tensor::__ior__(const Tensor & other) const { @@ -3549,7 +3549,7 @@ inline Tensor & Tensor::__ior__(const Tensor & other) const { } #else static auto table = globalATenDispatch().getOpTable("aten::__ior__.Tensor(Tensor(a!) self, Tensor other) -> Tensor(a!)"); - return table->getOp(type_set(), is_variable())(const_cast(*this), other); + return table->getOp(type_set())(const_cast(*this), other); #endif } inline Tensor Tensor::__xor__(Scalar other) const { @@ -3563,7 +3563,7 @@ inline Tensor Tensor::__xor__(Scalar other) const { } #else static auto table = globalATenDispatch().getOpTable("aten::__xor__.Scalar(Tensor self, Scalar other) -> Tensor"); - return table->getOp(type_set(), is_variable())(const_cast(*this), other); + return table->getOp(type_set())(const_cast(*this), other); #endif } inline Tensor Tensor::__xor__(const Tensor & other) const { @@ -3577,7 +3577,7 @@ inline Tensor Tensor::__xor__(const Tensor & other) const { } #else static auto table = globalATenDispatch().getOpTable("aten::__xor__.Tensor(Tensor self, Tensor other) -> Tensor"); - return table->getOp(type_set(), is_variable())(const_cast(*this), other); + return table->getOp(type_set())(const_cast(*this), other); #endif } inline Tensor & Tensor::__ixor__(Scalar other) const { @@ -3591,7 +3591,7 @@ inline Tensor & Tensor::__ixor__(Scalar other) const { } #else static auto table = globalATenDispatch().getOpTable("aten::__ixor__.Scalar(Tensor(a!) self, Scalar other) -> Tensor(a!)"); - return table->getOp(type_set(), is_variable())(const_cast(*this), other); + return table->getOp(type_set())(const_cast(*this), other); #endif } inline Tensor & Tensor::__ixor__(const Tensor & other) const { @@ -3605,7 +3605,7 @@ inline Tensor & Tensor::__ixor__(const Tensor & other) const { } #else static auto table = globalATenDispatch().getOpTable("aten::__ixor__.Tensor(Tensor(a!) self, Tensor other) -> Tensor(a!)"); - return table->getOp(type_set(), is_variable())(const_cast(*this), other); + return table->getOp(type_set())(const_cast(*this), other); #endif } inline Tensor Tensor::__lshift__(Scalar other) const { @@ -3619,7 +3619,7 @@ inline Tensor Tensor::__lshift__(Scalar other) const { } #else static auto table = globalATenDispatch().getOpTable("aten::__lshift__.Scalar(Tensor self, Scalar other) -> Tensor"); - return table->getOp(type_set(), is_variable())(const_cast(*this), other); + return table->getOp(type_set())(const_cast(*this), other); #endif } inline Tensor Tensor::__lshift__(const Tensor & other) const { @@ -3633,7 +3633,7 @@ inline Tensor Tensor::__lshift__(const Tensor & other) const { } #else static auto table = globalATenDispatch().getOpTable("aten::__lshift__.Tensor(Tensor self, Tensor other) -> Tensor"); - return table->getOp(type_set(), is_variable())(const_cast(*this), other); + return table->getOp(type_set())(const_cast(*this), other); #endif } inline Tensor & Tensor::__ilshift__(Scalar other) const { @@ -3647,7 +3647,7 @@ inline Tensor & Tensor::__ilshift__(Scalar other) const { } #else static auto table = globalATenDispatch().getOpTable("aten::__ilshift__.Scalar(Tensor(a!) self, Scalar other) -> Tensor(a!)"); - return table->getOp(type_set(), is_variable())(const_cast(*this), other); + return table->getOp(type_set())(const_cast(*this), other); #endif } inline Tensor & Tensor::__ilshift__(const Tensor & other) const { @@ -3661,7 +3661,7 @@ inline Tensor & Tensor::__ilshift__(const Tensor & other) const { } #else static auto table = globalATenDispatch().getOpTable("aten::__ilshift__.Tensor(Tensor(a!) self, Tensor other) -> Tensor(a!)"); - return table->getOp(type_set(), is_variable())(const_cast(*this), other); + return table->getOp(type_set())(const_cast(*this), other); #endif } inline Tensor Tensor::__rshift__(Scalar other) const { @@ -3675,7 +3675,7 @@ inline Tensor Tensor::__rshift__(Scalar other) const { } #else static auto table = globalATenDispatch().getOpTable("aten::__rshift__.Scalar(Tensor self, Scalar other) -> Tensor"); - return table->getOp(type_set(), is_variable())(const_cast(*this), other); + return table->getOp(type_set())(const_cast(*this), other); #endif } inline Tensor Tensor::__rshift__(const Tensor & other) const { @@ -3689,7 +3689,7 @@ inline Tensor Tensor::__rshift__(const Tensor & other) const { } #else static auto table = globalATenDispatch().getOpTable("aten::__rshift__.Tensor(Tensor self, Tensor other) -> Tensor"); - return table->getOp(type_set(), is_variable())(const_cast(*this), other); + return table->getOp(type_set())(const_cast(*this), other); #endif } inline Tensor & Tensor::__irshift__(Scalar other) const { @@ -3703,7 +3703,7 @@ inline Tensor & Tensor::__irshift__(Scalar other) const { } #else static auto table = globalATenDispatch().getOpTable("aten::__irshift__.Scalar(Tensor(a!) self, Scalar other) -> Tensor(a!)"); - return table->getOp(type_set(), is_variable())(const_cast(*this), other); + return table->getOp(type_set())(const_cast(*this), other); #endif } inline Tensor & Tensor::__irshift__(const Tensor & other) const { @@ -3717,7 +3717,7 @@ inline Tensor & Tensor::__irshift__(const Tensor & other) const { } #else static auto table = globalATenDispatch().getOpTable("aten::__irshift__.Tensor(Tensor(a!) self, Tensor other) -> Tensor(a!)"); - return table->getOp(type_set(), is_variable())(const_cast(*this), other); + return table->getOp(type_set())(const_cast(*this), other); #endif } inline Tensor & Tensor::lgamma_() const { @@ -3731,7 +3731,7 @@ inline Tensor & Tensor::lgamma_() const { } #else static auto table = globalATenDispatch().getOpTable("aten::lgamma_(Tensor(a!) self) -> Tensor(a!)"); - return table->getOp(type_set(), is_variable())(const_cast(*this)); + return table->getOp(type_set())(const_cast(*this)); #endif } inline Tensor & Tensor::atan2_(const Tensor & other) const { @@ -3739,7 +3739,7 @@ inline Tensor & Tensor::atan2_(const Tensor & other) const { return TypeDefault::atan2_(const_cast(*this), other); #else static auto table = globalATenDispatch().getOpTable("aten::atan2_(Tensor(a!) self, Tensor other) -> Tensor(a!)"); - return table->getOp(type_set(), is_variable())(const_cast(*this), other); + return table->getOp(type_set())(const_cast(*this), other); #endif } inline Tensor & Tensor::tril_(int64_t diagonal) const { @@ -3753,7 +3753,7 @@ inline Tensor & Tensor::tril_(int64_t diagonal) const { } #else static auto table = globalATenDispatch().getOpTable("aten::tril_(Tensor(a!) self, int diagonal=0) -> Tensor(a!)"); - return table->getOp(type_set(), is_variable())(const_cast(*this), diagonal); + return table->getOp(type_set())(const_cast(*this), diagonal); #endif } inline Tensor & Tensor::triu_(int64_t diagonal) const { @@ -3767,7 +3767,7 @@ inline Tensor & Tensor::triu_(int64_t diagonal) const { } #else static auto table = globalATenDispatch().getOpTable("aten::triu_(Tensor(a!) self, int diagonal=0) -> Tensor(a!)"); - return table->getOp(type_set(), is_variable())(const_cast(*this), diagonal); + return table->getOp(type_set())(const_cast(*this), diagonal); #endif } inline Tensor & Tensor::digamma_() const { @@ -3775,7 +3775,7 @@ inline Tensor & Tensor::digamma_() const { return TypeDefault::digamma_(const_cast(*this)); #else static auto table = globalATenDispatch().getOpTable("aten::digamma_(Tensor(a!) self) -> Tensor(a!)"); - return table->getOp(type_set(), is_variable())(const_cast(*this)); + return table->getOp(type_set())(const_cast(*this)); #endif } inline Tensor & Tensor::polygamma_(int64_t n) const { @@ -3783,7 +3783,7 @@ inline Tensor & Tensor::polygamma_(int64_t n) const { return TypeDefault::polygamma_(const_cast(*this), n); #else static auto table = globalATenDispatch().getOpTable("aten::polygamma_(Tensor(a!) self, int n) -> Tensor(a!)"); - return table->getOp(type_set(), is_variable())(const_cast(*this), n); + return table->getOp(type_set())(const_cast(*this), n); #endif } inline Tensor & Tensor::renorm_(Scalar p, int64_t dim, Scalar maxnorm) const { @@ -3797,7 +3797,7 @@ inline Tensor & Tensor::renorm_(Scalar p, int64_t dim, Scalar maxnorm) const { } #else static auto table = globalATenDispatch().getOpTable("aten::renorm_(Tensor(a!) self, Scalar p, int dim, Scalar maxnorm) -> Tensor(a!)"); - return table->getOp(type_set(), is_variable())(const_cast(*this), p, dim, maxnorm); + return table->getOp(type_set())(const_cast(*this), p, dim, maxnorm); #endif } inline Tensor & Tensor::pow_(Scalar exponent) const { @@ -3811,7 +3811,7 @@ inline Tensor & Tensor::pow_(Scalar exponent) const { } #else static auto table = globalATenDispatch().getOpTable("aten::pow_.Scalar(Tensor(a!) self, Scalar exponent) -> Tensor(a!)"); - return table->getOp(type_set(), is_variable())(const_cast(*this), exponent); + return table->getOp(type_set())(const_cast(*this), exponent); #endif } inline Tensor & Tensor::pow_(const Tensor & exponent) const { @@ -3825,7 +3825,7 @@ inline Tensor & Tensor::pow_(const Tensor & exponent) const { } #else static auto table = globalATenDispatch().getOpTable("aten::pow_.Tensor(Tensor(a!) self, Tensor exponent) -> Tensor(a!)"); - return table->getOp(type_set(), is_variable())(const_cast(*this), exponent); + return table->getOp(type_set())(const_cast(*this), exponent); #endif } inline Tensor & Tensor::lerp_(const Tensor & end, Scalar weight) const { @@ -3839,7 +3839,7 @@ inline Tensor & Tensor::lerp_(const Tensor & end, Scalar weight) const { } #else static auto table = globalATenDispatch().getOpTable("aten::lerp_.Scalar(Tensor(a!) self, Tensor end, Scalar weight) -> Tensor(a!)"); - return table->getOp(type_set(), is_variable())(const_cast(*this), end, weight); + return table->getOp(type_set())(const_cast(*this), end, weight); #endif } inline Tensor & Tensor::lerp_(const Tensor & end, const Tensor & weight) const { @@ -3853,7 +3853,7 @@ inline Tensor & Tensor::lerp_(const Tensor & end, const Tensor & weight) const { } #else static auto table = globalATenDispatch().getOpTable("aten::lerp_.Tensor(Tensor(a!) self, Tensor end, Tensor weight) -> Tensor(a!)"); - return table->getOp(type_set(), is_variable())(const_cast(*this), end, weight); + return table->getOp(type_set())(const_cast(*this), end, weight); #endif } inline Tensor & Tensor::fmod_(Scalar other) const { @@ -3867,7 +3867,7 @@ inline Tensor & Tensor::fmod_(Scalar other) const { } #else static auto table = globalATenDispatch().getOpTable("aten::fmod_.Scalar(Tensor(a!) self, Scalar other) -> Tensor(a!)"); - return table->getOp(type_set(), is_variable())(const_cast(*this), other); + return table->getOp(type_set())(const_cast(*this), other); #endif } inline Tensor & Tensor::fmod_(const Tensor & other) const { @@ -3881,7 +3881,7 @@ inline Tensor & Tensor::fmod_(const Tensor & other) const { } #else static auto table = globalATenDispatch().getOpTable("aten::fmod_.Tensor(Tensor(a!) self, Tensor other) -> Tensor(a!)"); - return table->getOp(type_set(), is_variable())(const_cast(*this), other); + return table->getOp(type_set())(const_cast(*this), other); #endif } inline Tensor & Tensor::remainder_(Scalar other) const { @@ -3895,7 +3895,7 @@ inline Tensor & Tensor::remainder_(Scalar other) const { } #else static auto table = globalATenDispatch().getOpTable("aten::remainder_.Scalar(Tensor(a!) self, Scalar other) -> Tensor(a!)"); - return table->getOp(type_set(), is_variable())(const_cast(*this), other); + return table->getOp(type_set())(const_cast(*this), other); #endif } inline Tensor & Tensor::remainder_(const Tensor & other) const { @@ -3909,7 +3909,7 @@ inline Tensor & Tensor::remainder_(const Tensor & other) const { } #else static auto table = globalATenDispatch().getOpTable("aten::remainder_.Tensor(Tensor(a!) self, Tensor other) -> Tensor(a!)"); - return table->getOp(type_set(), is_variable())(const_cast(*this), other); + return table->getOp(type_set())(const_cast(*this), other); #endif } inline Tensor & Tensor::addbmm_(const Tensor & batch1, const Tensor & batch2, Scalar beta, Scalar alpha) const { @@ -3923,7 +3923,7 @@ inline Tensor & Tensor::addbmm_(const Tensor & batch1, const Tensor & batch2, Sc } #else static auto table = globalATenDispatch().getOpTable("aten::addbmm_(Tensor(a!) self, Tensor batch1, Tensor batch2, *, Scalar beta=1, Scalar alpha=1) -> Tensor(a!)"); - return table->getOp(type_set(), is_variable())(const_cast(*this), batch1, batch2, beta, alpha); + return table->getOp(type_set())(const_cast(*this), batch1, batch2, beta, alpha); #endif } inline Tensor Tensor::addbmm(const Tensor & batch1, const Tensor & batch2, Scalar beta, Scalar alpha) const { @@ -3937,7 +3937,7 @@ inline Tensor Tensor::addbmm(const Tensor & batch1, const Tensor & batch2, Scala } #else static auto table = globalATenDispatch().getOpTable("aten::addbmm(Tensor self, Tensor batch1, Tensor batch2, *, Scalar beta=1, Scalar alpha=1) -> Tensor"); - return table->getOp(type_set(), is_variable())(const_cast(*this), batch1, batch2, beta, alpha); + return table->getOp(type_set())(const_cast(*this), batch1, batch2, beta, alpha); #endif } inline Tensor & Tensor::addcdiv_(const Tensor & tensor1, const Tensor & tensor2, Scalar value) const { @@ -3945,7 +3945,7 @@ inline Tensor & Tensor::addcdiv_(const Tensor & tensor1, const Tensor & tensor2, return TypeDefault::addcdiv_(const_cast(*this), tensor1, tensor2, value); #else static auto table = globalATenDispatch().getOpTable("aten::addcdiv_(Tensor(a!) self, Tensor tensor1, Tensor tensor2, *, Scalar value=1) -> Tensor(a!)"); - return table->getOp(type_set(), is_variable())(const_cast(*this), tensor1, tensor2, value); + return table->getOp(type_set())(const_cast(*this), tensor1, tensor2, value); #endif } inline Tensor & Tensor::random_(int64_t from, int64_t to, Generator * generator) const { @@ -3959,7 +3959,7 @@ inline Tensor & Tensor::random_(int64_t from, int64_t to, Generator * generator) } #else static auto table = globalATenDispatch().getOpTable("aten::random_.from(Tensor(a!) self, int from, int to, *, Generator? generator=None) -> Tensor(a!)"); - return table->getOp(type_set(), is_variable())(const_cast(*this), from, to, generator); + return table->getOp(type_set())(const_cast(*this), from, to, generator); #endif } inline Tensor & Tensor::random_(int64_t to, Generator * generator) const { @@ -3973,7 +3973,7 @@ inline Tensor & Tensor::random_(int64_t to, Generator * generator) const { } #else static auto table = globalATenDispatch().getOpTable("aten::random_.to(Tensor(a!) self, int to, *, Generator? generator=None) -> Tensor(a!)"); - return table->getOp(type_set(), is_variable())(const_cast(*this), to, generator); + return table->getOp(type_set())(const_cast(*this), to, generator); #endif } inline Tensor & Tensor::random_(Generator * generator) const { @@ -3987,7 +3987,7 @@ inline Tensor & Tensor::random_(Generator * generator) const { } #else static auto table = globalATenDispatch().getOpTable("aten::random_(Tensor(a!) self, *, Generator? generator=None) -> Tensor(a!)"); - return table->getOp(type_set(), is_variable())(const_cast(*this), generator); + return table->getOp(type_set())(const_cast(*this), generator); #endif } inline Tensor & Tensor::uniform_(double from, double to, Generator * generator) const { @@ -4001,7 +4001,7 @@ inline Tensor & Tensor::uniform_(double from, double to, Generator * generator) } #else static auto table = globalATenDispatch().getOpTable("aten::uniform_(Tensor(a!) self, float from=0, float to=1, *, Generator? generator=None) -> Tensor(a!)"); - return table->getOp(type_set(), is_variable())(const_cast(*this), from, to, generator); + return table->getOp(type_set())(const_cast(*this), from, to, generator); #endif } inline Tensor & Tensor::normal_(double mean, double std, Generator * generator) const { @@ -4015,7 +4015,7 @@ inline Tensor & Tensor::normal_(double mean, double std, Generator * generator) } #else static auto table = globalATenDispatch().getOpTable("aten::normal_(Tensor(a!) self, float mean=0, float std=1, *, Generator? generator=None) -> Tensor(a!)"); - return table->getOp(type_set(), is_variable())(const_cast(*this), mean, std, generator); + return table->getOp(type_set())(const_cast(*this), mean, std, generator); #endif } inline Tensor & Tensor::cauchy_(double median, double sigma, Generator * generator) const { @@ -4029,7 +4029,7 @@ inline Tensor & Tensor::cauchy_(double median, double sigma, Generator * generat } #else static auto table = globalATenDispatch().getOpTable("aten::cauchy_(Tensor(a!) self, float median=0, float sigma=1, *, Generator? generator=None) -> Tensor(a!)"); - return table->getOp(type_set(), is_variable())(const_cast(*this), median, sigma, generator); + return table->getOp(type_set())(const_cast(*this), median, sigma, generator); #endif } inline Tensor & Tensor::log_normal_(double mean, double std, Generator * generator) const { @@ -4043,7 +4043,7 @@ inline Tensor & Tensor::log_normal_(double mean, double std, Generator * generat } #else static auto table = globalATenDispatch().getOpTable("aten::log_normal_(Tensor(a!) self, float mean=1, float std=2, *, Generator? generator=None) -> Tensor(a!)"); - return table->getOp(type_set(), is_variable())(const_cast(*this), mean, std, generator); + return table->getOp(type_set())(const_cast(*this), mean, std, generator); #endif } inline Tensor & Tensor::exponential_(double lambd, Generator * generator) const { @@ -4057,7 +4057,7 @@ inline Tensor & Tensor::exponential_(double lambd, Generator * generator) const } #else static auto table = globalATenDispatch().getOpTable("aten::exponential_(Tensor(a!) self, float lambd=1, *, Generator? generator=None) -> Tensor(a!)"); - return table->getOp(type_set(), is_variable())(const_cast(*this), lambd, generator); + return table->getOp(type_set())(const_cast(*this), lambd, generator); #endif } inline Tensor & Tensor::geometric_(double p, Generator * generator) const { @@ -4071,7 +4071,7 @@ inline Tensor & Tensor::geometric_(double p, Generator * generator) const { } #else static auto table = globalATenDispatch().getOpTable("aten::geometric_(Tensor(a!) self, float p, *, Generator? generator=None) -> Tensor(a!)"); - return table->getOp(type_set(), is_variable())(const_cast(*this), p, generator); + return table->getOp(type_set())(const_cast(*this), p, generator); #endif } inline Tensor Tensor::diag(int64_t diagonal) const { @@ -4085,7 +4085,7 @@ inline Tensor Tensor::diag(int64_t diagonal) const { } #else static auto table = globalATenDispatch().getOpTable("aten::diag(Tensor self, int diagonal=0) -> Tensor"); - return table->getOp(type_set(), is_variable())(const_cast(*this), diagonal); + return table->getOp(type_set())(const_cast(*this), diagonal); #endif } inline Tensor Tensor::cross(const Tensor & other, c10::optional dim) const { @@ -4093,7 +4093,7 @@ inline Tensor Tensor::cross(const Tensor & other, c10::optional dim) co return TypeDefault::cross(const_cast(*this), other, dim); #else static auto table = globalATenDispatch().getOpTable("aten::cross(Tensor self, Tensor other, int? dim=None) -> Tensor"); - return table->getOp)>(type_set(), is_variable())(const_cast(*this), other, dim); + return table->getOp)>(type_set())(const_cast(*this), other, dim); #endif } inline Tensor Tensor::triu(int64_t diagonal) const { @@ -4101,7 +4101,7 @@ inline Tensor Tensor::triu(int64_t diagonal) const { return TypeDefault::triu(const_cast(*this), diagonal); #else static auto table = globalATenDispatch().getOpTable("aten::triu(Tensor self, int diagonal=0) -> Tensor"); - return table->getOp(type_set(), is_variable())(const_cast(*this), diagonal); + return table->getOp(type_set())(const_cast(*this), diagonal); #endif } inline Tensor Tensor::tril(int64_t diagonal) const { @@ -4109,7 +4109,7 @@ inline Tensor Tensor::tril(int64_t diagonal) const { return TypeDefault::tril(const_cast(*this), diagonal); #else static auto table = globalATenDispatch().getOpTable("aten::tril(Tensor self, int diagonal=0) -> Tensor"); - return table->getOp(type_set(), is_variable())(const_cast(*this), diagonal); + return table->getOp(type_set())(const_cast(*this), diagonal); #endif } inline Tensor Tensor::trace() const { @@ -4123,7 +4123,7 @@ inline Tensor Tensor::trace() const { } #else static auto table = globalATenDispatch().getOpTable("aten::trace(Tensor self) -> Tensor"); - return table->getOp(type_set(), is_variable())(const_cast(*this)); + return table->getOp(type_set())(const_cast(*this)); #endif } inline Tensor Tensor::ne(Scalar other) const { @@ -4140,7 +4140,7 @@ inline Tensor Tensor::ne(Scalar other) const { } #else static auto table = globalATenDispatch().getOpTable("aten::ne.Scalar(Tensor self, Scalar other) -> Tensor"); - return table->getOp(type_set(), is_variable())(const_cast(*this), other); + return table->getOp(type_set())(const_cast(*this), other); #endif } inline Tensor Tensor::ne(const Tensor & other) const { @@ -4157,7 +4157,7 @@ inline Tensor Tensor::ne(const Tensor & other) const { } #else static auto table = globalATenDispatch().getOpTable("aten::ne.Tensor(Tensor self, Tensor other) -> Tensor"); - return table->getOp(type_set(), is_variable())(const_cast(*this), other); + return table->getOp(type_set())(const_cast(*this), other); #endif } inline Tensor Tensor::eq(Scalar other) const { @@ -4174,7 +4174,7 @@ inline Tensor Tensor::eq(Scalar other) const { } #else static auto table = globalATenDispatch().getOpTable("aten::eq.Scalar(Tensor self, Scalar other) -> Tensor"); - return table->getOp(type_set(), is_variable())(const_cast(*this), other); + return table->getOp(type_set())(const_cast(*this), other); #endif } inline Tensor Tensor::eq(const Tensor & other) const { @@ -4191,7 +4191,7 @@ inline Tensor Tensor::eq(const Tensor & other) const { } #else static auto table = globalATenDispatch().getOpTable("aten::eq.Tensor(Tensor self, Tensor other) -> Tensor"); - return table->getOp(type_set(), is_variable())(const_cast(*this), other); + return table->getOp(type_set())(const_cast(*this), other); #endif } inline Tensor Tensor::ge(Scalar other) const { @@ -4208,7 +4208,7 @@ inline Tensor Tensor::ge(Scalar other) const { } #else static auto table = globalATenDispatch().getOpTable("aten::ge.Scalar(Tensor self, Scalar other) -> Tensor"); - return table->getOp(type_set(), is_variable())(const_cast(*this), other); + return table->getOp(type_set())(const_cast(*this), other); #endif } inline Tensor Tensor::ge(const Tensor & other) const { @@ -4225,7 +4225,7 @@ inline Tensor Tensor::ge(const Tensor & other) const { } #else static auto table = globalATenDispatch().getOpTable("aten::ge.Tensor(Tensor self, Tensor other) -> Tensor"); - return table->getOp(type_set(), is_variable())(const_cast(*this), other); + return table->getOp(type_set())(const_cast(*this), other); #endif } inline Tensor Tensor::le(Scalar other) const { @@ -4242,7 +4242,7 @@ inline Tensor Tensor::le(Scalar other) const { } #else static auto table = globalATenDispatch().getOpTable("aten::le.Scalar(Tensor self, Scalar other) -> Tensor"); - return table->getOp(type_set(), is_variable())(const_cast(*this), other); + return table->getOp(type_set())(const_cast(*this), other); #endif } inline Tensor Tensor::le(const Tensor & other) const { @@ -4259,7 +4259,7 @@ inline Tensor Tensor::le(const Tensor & other) const { } #else static auto table = globalATenDispatch().getOpTable("aten::le.Tensor(Tensor self, Tensor other) -> Tensor"); - return table->getOp(type_set(), is_variable())(const_cast(*this), other); + return table->getOp(type_set())(const_cast(*this), other); #endif } inline Tensor Tensor::gt(Scalar other) const { @@ -4276,7 +4276,7 @@ inline Tensor Tensor::gt(Scalar other) const { } #else static auto table = globalATenDispatch().getOpTable("aten::gt.Scalar(Tensor self, Scalar other) -> Tensor"); - return table->getOp(type_set(), is_variable())(const_cast(*this), other); + return table->getOp(type_set())(const_cast(*this), other); #endif } inline Tensor Tensor::gt(const Tensor & other) const { @@ -4293,7 +4293,7 @@ inline Tensor Tensor::gt(const Tensor & other) const { } #else static auto table = globalATenDispatch().getOpTable("aten::gt.Tensor(Tensor self, Tensor other) -> Tensor"); - return table->getOp(type_set(), is_variable())(const_cast(*this), other); + return table->getOp(type_set())(const_cast(*this), other); #endif } inline Tensor Tensor::lt(Scalar other) const { @@ -4310,7 +4310,7 @@ inline Tensor Tensor::lt(Scalar other) const { } #else static auto table = globalATenDispatch().getOpTable("aten::lt.Scalar(Tensor self, Scalar other) -> Tensor"); - return table->getOp(type_set(), is_variable())(const_cast(*this), other); + return table->getOp(type_set())(const_cast(*this), other); #endif } inline Tensor Tensor::lt(const Tensor & other) const { @@ -4327,7 +4327,7 @@ inline Tensor Tensor::lt(const Tensor & other) const { } #else static auto table = globalATenDispatch().getOpTable("aten::lt.Tensor(Tensor self, Tensor other) -> Tensor"); - return table->getOp(type_set(), is_variable())(const_cast(*this), other); + return table->getOp(type_set())(const_cast(*this), other); #endif } inline Tensor Tensor::take(const Tensor & index) const { @@ -4341,7 +4341,7 @@ inline Tensor Tensor::take(const Tensor & index) const { } #else static auto table = globalATenDispatch().getOpTable("aten::take(Tensor self, Tensor index) -> Tensor"); - return table->getOp(type_set(), is_variable())(const_cast(*this), index); + return table->getOp(type_set())(const_cast(*this), index); #endif } inline Tensor Tensor::index_select(int64_t dim, const Tensor & index) const { @@ -4358,7 +4358,7 @@ inline Tensor Tensor::index_select(int64_t dim, const Tensor & index) const { } #else static auto table = globalATenDispatch().getOpTable("aten::index_select(Tensor self, int dim, Tensor index) -> Tensor"); - return table->getOp(type_set(), is_variable())(const_cast(*this), dim, index); + return table->getOp(type_set())(const_cast(*this), dim, index); #endif } inline Tensor Tensor::masked_select(const Tensor & mask) const { @@ -4372,7 +4372,7 @@ inline Tensor Tensor::masked_select(const Tensor & mask) const { } #else static auto table = globalATenDispatch().getOpTable("aten::masked_select(Tensor self, Tensor mask) -> Tensor"); - return table->getOp(type_set(), is_variable())(const_cast(*this), mask); + return table->getOp(type_set())(const_cast(*this), mask); #endif } inline Tensor Tensor::nonzero() const { @@ -4386,7 +4386,7 @@ inline Tensor Tensor::nonzero() const { } #else static auto table = globalATenDispatch().getOpTable("aten::nonzero(Tensor self) -> Tensor"); - return table->getOp(type_set(), is_variable())(const_cast(*this)); + return table->getOp(type_set())(const_cast(*this)); #endif } inline std::vector Tensor::nonzero_numpy() const { @@ -4394,7 +4394,7 @@ inline std::vector Tensor::nonzero_numpy() const { return TypeDefault::nonzero_numpy(const_cast(*this)); #else static auto table = globalATenDispatch().getOpTable("aten::nonzero_numpy(Tensor self) -> Tensor[]"); - return table->getOp (const Tensor &)>(type_set(), is_variable())(const_cast(*this)); + return table->getOp (const Tensor &)>(type_set())(const_cast(*this)); #endif } inline Tensor Tensor::gather(int64_t dim, const Tensor & index, bool sparse_grad) const { @@ -4408,7 +4408,7 @@ inline Tensor Tensor::gather(int64_t dim, const Tensor & index, bool sparse_grad } #else static auto table = globalATenDispatch().getOpTable("aten::gather(Tensor self, int dim, Tensor index, *, bool sparse_grad=False) -> Tensor"); - return table->getOp(type_set(), is_variable())(const_cast(*this), dim, index, sparse_grad); + return table->getOp(type_set())(const_cast(*this), dim, index, sparse_grad); #endif } inline Tensor Tensor::addcmul(const Tensor & tensor1, const Tensor & tensor2, Scalar value) const { @@ -4416,7 +4416,7 @@ inline Tensor Tensor::addcmul(const Tensor & tensor1, const Tensor & tensor2, Sc return TypeDefault::addcmul(const_cast(*this), tensor1, tensor2, value); #else static auto table = globalATenDispatch().getOpTable("aten::addcmul(Tensor self, Tensor tensor1, Tensor tensor2, *, Scalar value=1) -> Tensor"); - return table->getOp(type_set(), is_variable())(const_cast(*this), tensor1, tensor2, value); + return table->getOp(type_set())(const_cast(*this), tensor1, tensor2, value); #endif } inline Tensor & Tensor::addcmul_(const Tensor & tensor1, const Tensor & tensor2, Scalar value) const { @@ -4424,7 +4424,7 @@ inline Tensor & Tensor::addcmul_(const Tensor & tensor1, const Tensor & tensor2, return TypeDefault::addcmul_(const_cast(*this), tensor1, tensor2, value); #else static auto table = globalATenDispatch().getOpTable("aten::addcmul_(Tensor(a!) self, Tensor tensor1, Tensor tensor2, *, Scalar value=1) -> Tensor(a!)"); - return table->getOp(type_set(), is_variable())(const_cast(*this), tensor1, tensor2, value); + return table->getOp(type_set())(const_cast(*this), tensor1, tensor2, value); #endif } inline Tensor Tensor::addcdiv(const Tensor & tensor1, const Tensor & tensor2, Scalar value) const { @@ -4432,7 +4432,7 @@ inline Tensor Tensor::addcdiv(const Tensor & tensor1, const Tensor & tensor2, Sc return TypeDefault::addcdiv(const_cast(*this), tensor1, tensor2, value); #else static auto table = globalATenDispatch().getOpTable("aten::addcdiv(Tensor self, Tensor tensor1, Tensor tensor2, *, Scalar value=1) -> Tensor"); - return table->getOp(type_set(), is_variable())(const_cast(*this), tensor1, tensor2, value); + return table->getOp(type_set())(const_cast(*this), tensor1, tensor2, value); #endif } inline std::tuple Tensor::lstsq(const Tensor & A) const { @@ -4446,7 +4446,7 @@ inline std::tuple Tensor::lstsq(const Tensor & A) const { } #else static auto table = globalATenDispatch().getOpTable("aten::lstsq(Tensor self, Tensor A) -> (Tensor solution, Tensor QR)"); - return table->getOp (const Tensor &, const Tensor &)>(type_set(), is_variable())(const_cast(*this), A); + return table->getOp (const Tensor &, const Tensor &)>(type_set())(const_cast(*this), A); #endif } inline std::tuple Tensor::triangular_solve(const Tensor & A, bool upper, bool transpose, bool unitriangular) const { @@ -4454,7 +4454,7 @@ inline std::tuple Tensor::triangular_solve(const Tensor & A, bool return TypeDefault::triangular_solve(const_cast(*this), A, upper, transpose, unitriangular); #else static auto table = globalATenDispatch().getOpTable("aten::triangular_solve(Tensor self, Tensor A, bool upper=True, bool transpose=False, bool unitriangular=False) -> (Tensor solution, Tensor cloned_coefficient)"); - return table->getOp (const Tensor &, const Tensor &, bool, bool, bool)>(type_set(), is_variable())(const_cast(*this), A, upper, transpose, unitriangular); + return table->getOp (const Tensor &, const Tensor &, bool, bool, bool)>(type_set())(const_cast(*this), A, upper, transpose, unitriangular); #endif } inline std::tuple Tensor::symeig(bool eigenvectors, bool upper) const { @@ -4462,7 +4462,7 @@ inline std::tuple Tensor::symeig(bool eigenvectors, bool upper) c return TypeDefault::symeig(const_cast(*this), eigenvectors, upper); #else static auto table = globalATenDispatch().getOpTable("aten::symeig(Tensor self, bool eigenvectors=False, bool upper=True) -> (Tensor eigenvalues, Tensor eigenvectors)"); - return table->getOp (const Tensor &, bool, bool)>(type_set(), is_variable())(const_cast(*this), eigenvectors, upper); + return table->getOp (const Tensor &, bool, bool)>(type_set())(const_cast(*this), eigenvectors, upper); #endif } inline std::tuple Tensor::eig(bool eigenvectors) const { @@ -4476,7 +4476,7 @@ inline std::tuple Tensor::eig(bool eigenvectors) const { } #else static auto table = globalATenDispatch().getOpTable("aten::eig(Tensor self, bool eigenvectors=False) -> (Tensor eigenvalues, Tensor eigenvectors)"); - return table->getOp (const Tensor &, bool)>(type_set(), is_variable())(const_cast(*this), eigenvectors); + return table->getOp (const Tensor &, bool)>(type_set())(const_cast(*this), eigenvectors); #endif } inline std::tuple Tensor::svd(bool some, bool compute_uv) const { @@ -4484,7 +4484,7 @@ inline std::tuple Tensor::svd(bool some, bool compute_uv) return TypeDefault::svd(const_cast(*this), some, compute_uv); #else static auto table = globalATenDispatch().getOpTable("aten::svd(Tensor self, bool some=True, bool compute_uv=True) -> (Tensor U, Tensor S, Tensor V)"); - return table->getOp (const Tensor &, bool, bool)>(type_set(), is_variable())(const_cast(*this), some, compute_uv); + return table->getOp (const Tensor &, bool, bool)>(type_set())(const_cast(*this), some, compute_uv); #endif } inline Tensor Tensor::cholesky(bool upper) const { @@ -4492,7 +4492,7 @@ inline Tensor Tensor::cholesky(bool upper) const { return TypeDefault::cholesky(const_cast(*this), upper); #else static auto table = globalATenDispatch().getOpTable("aten::cholesky(Tensor self, bool upper=False) -> Tensor"); - return table->getOp(type_set(), is_variable())(const_cast(*this), upper); + return table->getOp(type_set())(const_cast(*this), upper); #endif } inline Tensor Tensor::cholesky_solve(const Tensor & input2, bool upper) const { @@ -4500,7 +4500,7 @@ inline Tensor Tensor::cholesky_solve(const Tensor & input2, bool upper) const { return TypeDefault::cholesky_solve(const_cast(*this), input2, upper); #else static auto table = globalATenDispatch().getOpTable("aten::cholesky_solve(Tensor self, Tensor input2, bool upper=False) -> Tensor"); - return table->getOp(type_set(), is_variable())(const_cast(*this), input2, upper); + return table->getOp(type_set())(const_cast(*this), input2, upper); #endif } inline std::tuple Tensor::solve(const Tensor & A) const { @@ -4508,7 +4508,7 @@ inline std::tuple Tensor::solve(const Tensor & A) const { return TypeDefault::solve(const_cast(*this), A); #else static auto table = globalATenDispatch().getOpTable("aten::solve(Tensor self, Tensor A) -> (Tensor solution, Tensor LU)"); - return table->getOp (const Tensor &, const Tensor &)>(type_set(), is_variable())(const_cast(*this), A); + return table->getOp (const Tensor &, const Tensor &)>(type_set())(const_cast(*this), A); #endif } inline Tensor Tensor::cholesky_inverse(bool upper) const { @@ -4522,7 +4522,7 @@ inline Tensor Tensor::cholesky_inverse(bool upper) const { } #else static auto table = globalATenDispatch().getOpTable("aten::cholesky_inverse(Tensor self, bool upper=False) -> Tensor"); - return table->getOp(type_set(), is_variable())(const_cast(*this), upper); + return table->getOp(type_set())(const_cast(*this), upper); #endif } inline std::tuple Tensor::qr(bool some) const { @@ -4530,7 +4530,7 @@ inline std::tuple Tensor::qr(bool some) const { return TypeDefault::qr(const_cast(*this), some); #else static auto table = globalATenDispatch().getOpTable("aten::qr(Tensor self, bool some=True) -> (Tensor Q, Tensor R)"); - return table->getOp (const Tensor &, bool)>(type_set(), is_variable())(const_cast(*this), some); + return table->getOp (const Tensor &, bool)>(type_set())(const_cast(*this), some); #endif } inline std::tuple Tensor::geqrf() const { @@ -4544,7 +4544,7 @@ inline std::tuple Tensor::geqrf() const { } #else static auto table = globalATenDispatch().getOpTable("aten::geqrf(Tensor self) -> (Tensor a, Tensor tau)"); - return table->getOp (const Tensor &)>(type_set(), is_variable())(const_cast(*this)); + return table->getOp (const Tensor &)>(type_set())(const_cast(*this)); #endif } inline Tensor Tensor::orgqr(const Tensor & input2) const { @@ -4558,7 +4558,7 @@ inline Tensor Tensor::orgqr(const Tensor & input2) const { } #else static auto table = globalATenDispatch().getOpTable("aten::orgqr(Tensor self, Tensor input2) -> Tensor"); - return table->getOp(type_set(), is_variable())(const_cast(*this), input2); + return table->getOp(type_set())(const_cast(*this), input2); #endif } inline Tensor Tensor::ormqr(const Tensor & input2, const Tensor & input3, bool left, bool transpose) const { @@ -4572,7 +4572,7 @@ inline Tensor Tensor::ormqr(const Tensor & input2, const Tensor & input3, bool l } #else static auto table = globalATenDispatch().getOpTable("aten::ormqr(Tensor self, Tensor input2, Tensor input3, bool left=True, bool transpose=False) -> Tensor"); - return table->getOp(type_set(), is_variable())(const_cast(*this), input2, input3, left, transpose); + return table->getOp(type_set())(const_cast(*this), input2, input3, left, transpose); #endif } inline Tensor Tensor::lu_solve(const Tensor & LU_data, const Tensor & LU_pivots) const { @@ -4580,7 +4580,7 @@ inline Tensor Tensor::lu_solve(const Tensor & LU_data, const Tensor & LU_pivots) return TypeDefault::lu_solve(const_cast(*this), LU_data, LU_pivots); #else static auto table = globalATenDispatch().getOpTable("aten::lu_solve(Tensor self, Tensor LU_data, Tensor LU_pivots) -> Tensor"); - return table->getOp(type_set(), is_variable())(const_cast(*this), LU_data, LU_pivots); + return table->getOp(type_set())(const_cast(*this), LU_data, LU_pivots); #endif } inline Tensor Tensor::multinomial(int64_t num_samples, bool replacement, Generator * generator) const { @@ -4594,7 +4594,7 @@ inline Tensor Tensor::multinomial(int64_t num_samples, bool replacement, Generat } #else static auto table = globalATenDispatch().getOpTable("aten::multinomial(Tensor self, int num_samples, bool replacement=False, *, Generator? generator=None) -> Tensor"); - return table->getOp(type_set(), is_variable())(const_cast(*this), num_samples, replacement, generator); + return table->getOp(type_set())(const_cast(*this), num_samples, replacement, generator); #endif } inline Tensor Tensor::lgamma() const { @@ -4608,7 +4608,7 @@ inline Tensor Tensor::lgamma() const { } #else static auto table = globalATenDispatch().getOpTable("aten::lgamma(Tensor self) -> Tensor"); - return table->getOp(type_set(), is_variable())(const_cast(*this)); + return table->getOp(type_set())(const_cast(*this)); #endif } inline Tensor Tensor::digamma() const { @@ -4616,7 +4616,7 @@ inline Tensor Tensor::digamma() const { return TypeDefault::digamma(const_cast(*this)); #else static auto table = globalATenDispatch().getOpTable("aten::digamma(Tensor self) -> Tensor"); - return table->getOp(type_set(), is_variable())(const_cast(*this)); + return table->getOp(type_set())(const_cast(*this)); #endif } inline Tensor Tensor::polygamma(int64_t n) const { @@ -4624,7 +4624,7 @@ inline Tensor Tensor::polygamma(int64_t n) const { return TypeDefault::polygamma(n, const_cast(*this)); #else static auto table = globalATenDispatch().getOpTable("aten::polygamma(int n, Tensor self) -> Tensor"); - return table->getOp(type_set(), is_variable())(n, const_cast(*this)); + return table->getOp(type_set())(n, const_cast(*this)); #endif } inline Tensor Tensor::erfinv() const { @@ -4632,7 +4632,7 @@ inline Tensor Tensor::erfinv() const { return TypeDefault::erfinv(const_cast(*this)); #else static auto table = globalATenDispatch().getOpTable("aten::erfinv(Tensor self) -> Tensor"); - return table->getOp(type_set(), is_variable())(const_cast(*this)); + return table->getOp(type_set())(const_cast(*this)); #endif } inline Tensor & Tensor::erfinv_() const { @@ -4640,7 +4640,7 @@ inline Tensor & Tensor::erfinv_() const { return TypeDefault::erfinv_(const_cast(*this)); #else static auto table = globalATenDispatch().getOpTable("aten::erfinv_(Tensor(a!) self) -> Tensor(a!)"); - return table->getOp(type_set(), is_variable())(const_cast(*this)); + return table->getOp(type_set())(const_cast(*this)); #endif } inline Tensor Tensor::sign() const { @@ -4648,7 +4648,7 @@ inline Tensor Tensor::sign() const { return TypeDefault::sign(const_cast(*this)); #else static auto table = globalATenDispatch().getOpTable("aten::sign(Tensor self) -> Tensor"); - return table->getOp(type_set(), is_variable())(const_cast(*this)); + return table->getOp(type_set())(const_cast(*this)); #endif } inline Tensor & Tensor::sign_() const { @@ -4656,7 +4656,7 @@ inline Tensor & Tensor::sign_() const { return TypeDefault::sign_(const_cast(*this)); #else static auto table = globalATenDispatch().getOpTable("aten::sign_(Tensor(a!) self) -> Tensor(a!)"); - return table->getOp(type_set(), is_variable())(const_cast(*this)); + return table->getOp(type_set())(const_cast(*this)); #endif } inline Tensor Tensor::dist(const Tensor & other, Scalar p) const { @@ -4670,7 +4670,7 @@ inline Tensor Tensor::dist(const Tensor & other, Scalar p) const { } #else static auto table = globalATenDispatch().getOpTable("aten::dist(Tensor self, Tensor other, Scalar p=2) -> Tensor"); - return table->getOp(type_set(), is_variable())(const_cast(*this), other, p); + return table->getOp(type_set())(const_cast(*this), other, p); #endif } inline Tensor Tensor::atan2(const Tensor & other) const { @@ -4678,7 +4678,7 @@ inline Tensor Tensor::atan2(const Tensor & other) const { return TypeDefault::atan2(const_cast(*this), other); #else static auto table = globalATenDispatch().getOpTable("aten::atan2(Tensor self, Tensor other) -> Tensor"); - return table->getOp(type_set(), is_variable())(const_cast(*this), other); + return table->getOp(type_set())(const_cast(*this), other); #endif } inline Tensor Tensor::lerp(const Tensor & end, Scalar weight) const { @@ -4692,7 +4692,7 @@ inline Tensor Tensor::lerp(const Tensor & end, Scalar weight) const { } #else static auto table = globalATenDispatch().getOpTable("aten::lerp.Scalar(Tensor self, Tensor end, Scalar weight) -> Tensor"); - return table->getOp(type_set(), is_variable())(const_cast(*this), end, weight); + return table->getOp(type_set())(const_cast(*this), end, weight); #endif } inline Tensor Tensor::lerp(const Tensor & end, const Tensor & weight) const { @@ -4706,7 +4706,7 @@ inline Tensor Tensor::lerp(const Tensor & end, const Tensor & weight) const { } #else static auto table = globalATenDispatch().getOpTable("aten::lerp.Tensor(Tensor self, Tensor end, Tensor weight) -> Tensor"); - return table->getOp(type_set(), is_variable())(const_cast(*this), end, weight); + return table->getOp(type_set())(const_cast(*this), end, weight); #endif } inline Tensor Tensor::histc(int64_t bins, Scalar min, Scalar max) const { @@ -4720,7 +4720,7 @@ inline Tensor Tensor::histc(int64_t bins, Scalar min, Scalar max) const { } #else static auto table = globalATenDispatch().getOpTable("aten::histc(Tensor self, int bins=100, Scalar min=0, Scalar max=0) -> Tensor"); - return table->getOp(type_set(), is_variable())(const_cast(*this), bins, min, max); + return table->getOp(type_set())(const_cast(*this), bins, min, max); #endif } inline Tensor Tensor::fmod(Scalar other) const { @@ -4734,7 +4734,7 @@ inline Tensor Tensor::fmod(Scalar other) const { } #else static auto table = globalATenDispatch().getOpTable("aten::fmod.Scalar(Tensor self, Scalar other) -> Tensor"); - return table->getOp(type_set(), is_variable())(const_cast(*this), other); + return table->getOp(type_set())(const_cast(*this), other); #endif } inline Tensor Tensor::fmod(const Tensor & other) const { @@ -4748,7 +4748,7 @@ inline Tensor Tensor::fmod(const Tensor & other) const { } #else static auto table = globalATenDispatch().getOpTable("aten::fmod.Tensor(Tensor self, Tensor other) -> Tensor"); - return table->getOp(type_set(), is_variable())(const_cast(*this), other); + return table->getOp(type_set())(const_cast(*this), other); #endif } inline Tensor Tensor::remainder(Scalar other) const { @@ -4762,7 +4762,7 @@ inline Tensor Tensor::remainder(Scalar other) const { } #else static auto table = globalATenDispatch().getOpTable("aten::remainder.Scalar(Tensor self, Scalar other) -> Tensor"); - return table->getOp(type_set(), is_variable())(const_cast(*this), other); + return table->getOp(type_set())(const_cast(*this), other); #endif } inline Tensor Tensor::remainder(const Tensor & other) const { @@ -4776,7 +4776,7 @@ inline Tensor Tensor::remainder(const Tensor & other) const { } #else static auto table = globalATenDispatch().getOpTable("aten::remainder.Tensor(Tensor self, Tensor other) -> Tensor"); - return table->getOp(type_set(), is_variable())(const_cast(*this), other); + return table->getOp(type_set())(const_cast(*this), other); #endif } inline Tensor Tensor::min(const Tensor & other) const { @@ -4790,7 +4790,7 @@ inline Tensor Tensor::min(const Tensor & other) const { } #else static auto table = globalATenDispatch().getOpTable("aten::min.other(Tensor self, Tensor other) -> Tensor"); - return table->getOp(type_set(), is_variable())(const_cast(*this), other); + return table->getOp(type_set())(const_cast(*this), other); #endif } inline Tensor Tensor::min() const { @@ -4807,7 +4807,7 @@ inline Tensor Tensor::min() const { } #else static auto table = globalATenDispatch().getOpTable("aten::min(Tensor self) -> Tensor"); - return table->getOp(type_set(), is_variable())(const_cast(*this)); + return table->getOp(type_set())(const_cast(*this)); #endif } inline Tensor Tensor::max(const Tensor & other) const { @@ -4821,7 +4821,7 @@ inline Tensor Tensor::max(const Tensor & other) const { } #else static auto table = globalATenDispatch().getOpTable("aten::max.other(Tensor self, Tensor other) -> Tensor"); - return table->getOp(type_set(), is_variable())(const_cast(*this), other); + return table->getOp(type_set())(const_cast(*this), other); #endif } inline Tensor Tensor::max() const { @@ -4838,7 +4838,7 @@ inline Tensor Tensor::max() const { } #else static auto table = globalATenDispatch().getOpTable("aten::max(Tensor self) -> Tensor"); - return table->getOp(type_set(), is_variable())(const_cast(*this)); + return table->getOp(type_set())(const_cast(*this)); #endif } inline Tensor Tensor::median() const { @@ -4852,7 +4852,7 @@ inline Tensor Tensor::median() const { } #else static auto table = globalATenDispatch().getOpTable("aten::median(Tensor self) -> Tensor"); - return table->getOp(type_set(), is_variable())(const_cast(*this)); + return table->getOp(type_set())(const_cast(*this)); #endif } inline std::tuple Tensor::sort(int64_t dim, bool descending) const { @@ -4869,7 +4869,7 @@ inline std::tuple Tensor::sort(int64_t dim, bool descending) cons } #else static auto table = globalATenDispatch().getOpTable("aten::sort(Tensor self, int dim=-1, bool descending=False) -> (Tensor values, Tensor indices)"); - return table->getOp (const Tensor &, int64_t, bool)>(type_set(), is_variable())(const_cast(*this), dim, descending); + return table->getOp (const Tensor &, int64_t, bool)>(type_set())(const_cast(*this), dim, descending); #endif } inline Tensor Tensor::argsort(int64_t dim, bool descending) const { @@ -4877,7 +4877,7 @@ inline Tensor Tensor::argsort(int64_t dim, bool descending) const { return TypeDefault::argsort(const_cast(*this), dim, descending); #else static auto table = globalATenDispatch().getOpTable("aten::argsort(Tensor self, int dim=-1, bool descending=False) -> Tensor"); - return table->getOp(type_set(), is_variable())(const_cast(*this), dim, descending); + return table->getOp(type_set())(const_cast(*this), dim, descending); #endif } inline std::tuple Tensor::topk(int64_t k, int64_t dim, bool largest, bool sorted) const { @@ -4885,7 +4885,7 @@ inline std::tuple Tensor::topk(int64_t k, int64_t dim, bool large return TypeDefault::topk(const_cast(*this), k, dim, largest, sorted); #else static auto table = globalATenDispatch().getOpTable("aten::topk(Tensor self, int k, int dim=-1, bool largest=True, bool sorted=True) -> (Tensor values, Tensor indices)"); - return table->getOp (const Tensor &, int64_t, int64_t, bool, bool)>(type_set(), is_variable())(const_cast(*this), k, dim, largest, sorted); + return table->getOp (const Tensor &, int64_t, int64_t, bool, bool)>(type_set())(const_cast(*this), k, dim, largest, sorted); #endif } inline Tensor Tensor::all() const { @@ -4893,7 +4893,7 @@ inline Tensor Tensor::all() const { return TypeDefault::all(const_cast(*this)); #else static auto table = globalATenDispatch().getOpTable("aten::all(Tensor self) -> Tensor"); - return table->getOp(type_set(), is_variable())(const_cast(*this)); + return table->getOp(type_set())(const_cast(*this)); #endif } inline Tensor Tensor::any() const { @@ -4901,7 +4901,7 @@ inline Tensor Tensor::any() const { return TypeDefault::any(const_cast(*this)); #else static auto table = globalATenDispatch().getOpTable("aten::any(Tensor self) -> Tensor"); - return table->getOp(type_set(), is_variable())(const_cast(*this)); + return table->getOp(type_set())(const_cast(*this)); #endif } inline Tensor Tensor::renorm(Scalar p, int64_t dim, Scalar maxnorm) const { @@ -4915,7 +4915,7 @@ inline Tensor Tensor::renorm(Scalar p, int64_t dim, Scalar maxnorm) const { } #else static auto table = globalATenDispatch().getOpTable("aten::renorm(Tensor self, Scalar p, int dim, Scalar maxnorm) -> Tensor"); - return table->getOp(type_set(), is_variable())(const_cast(*this), p, dim, maxnorm); + return table->getOp(type_set())(const_cast(*this), p, dim, maxnorm); #endif } inline Tensor Tensor::unfold(int64_t dimension, int64_t size, int64_t step) const { @@ -4929,7 +4929,7 @@ inline Tensor Tensor::unfold(int64_t dimension, int64_t size, int64_t step) cons } #else static auto table = globalATenDispatch().getOpTable("aten::unfold(Tensor(a) self, int dimension, int size, int step) -> Tensor(a)"); - return table->getOp(type_set(), is_variable())(const_cast(*this), dimension, size, step); + return table->getOp(type_set())(const_cast(*this), dimension, size, step); #endif } inline bool Tensor::equal(const Tensor & other) const { @@ -4946,7 +4946,7 @@ inline bool Tensor::equal(const Tensor & other) const { } #else static auto table = globalATenDispatch().getOpTable("aten::equal(Tensor self, Tensor other) -> bool"); - return table->getOp(type_set(), is_variable())(const_cast(*this), other); + return table->getOp(type_set())(const_cast(*this), other); #endif } inline Tensor Tensor::pow(const Tensor & exponent) const { @@ -4960,7 +4960,7 @@ inline Tensor Tensor::pow(const Tensor & exponent) const { } #else static auto table = globalATenDispatch().getOpTable("aten::pow.Tensor_Tensor(Tensor self, Tensor exponent) -> Tensor"); - return table->getOp(type_set(), is_variable())(const_cast(*this), exponent); + return table->getOp(type_set())(const_cast(*this), exponent); #endif } inline Tensor Tensor::alias() const { @@ -4968,7 +4968,7 @@ inline Tensor Tensor::alias() const { return TypeDefault::alias(const_cast(*this)); #else static auto table = globalATenDispatch().getOpTable("aten::alias(Tensor(a) self) -> Tensor(a)"); - return table->getOp(type_set(), is_variable())(const_cast(*this)); + return table->getOp(type_set())(const_cast(*this)); #endif } diff --git a/aten/src/ATen/core/dispatch/DispatchTable.h b/aten/src/ATen/core/dispatch/DispatchTable.h index d643ba0a9d7..e3ab0a5ef32 100644 --- a/aten/src/ATen/core/dispatch/DispatchTable.h +++ b/aten/src/ATen/core/dispatch/DispatchTable.h @@ -216,9 +216,11 @@ private: if (tensor_list.size() == 0) { throw std::runtime_error("Tried to dispatch operator " + operator_name + " based on an empty tensor list. When the first tensor argument of an operator is a tensor list, then it must not be empty."); } - return at::impl::dispatchTypeId(tensor_list[0].type_set()); + // TODO: Don't use legacy extractor; blocked on c10 understanding + // variable + return c10::legacyExtractTypeId(tensor_list[0].type_set()); } else { - return at::impl::dispatchTypeId(first_tensor_arg.unsafeToTensorImpl()->type_set()); + return c10::legacyExtractTypeId(first_tensor_arg.unsafeToTensorImpl()->type_set()); } } }; diff --git a/aten/src/ATen/function_wrapper.py b/aten/src/ATen/function_wrapper.py index 0da4e2b0c4c..b183e0f5757 100644 --- a/aten/src/ATen/function_wrapper.py +++ b/aten/src/ATen/function_wrapper.py @@ -113,10 +113,10 @@ ${return_type} ${Type}::${api_name}(${type_method_formals}) { """) DEFAULT_FUNCTION_REGISTRATION = CodeTemplate("""\ -.registerOp<${return_type} (${formals_types})>(Backend::Undefined, "${schema_string}", &TypeDefault::${api_name}) +.registerOp<${return_type} (${formals_types})>(TensorTypeId::UndefinedTensorId, "${schema_string}", &TypeDefault::${api_name}) """) BACKEND_FUNCTION_REGISTRATION = CodeTemplate("""\ -.registerOp<${return_type} (${formals_types})>(Backend::${Backend}, "${schema_string}", &${Type}::${api_name}) +.registerOp<${return_type} (${formals_types})>(TensorTypeId::${Backend}TensorId, "${schema_string}", &${Type}::${api_name}) """) # Generate a file that lists all functions and their schema string. Used for XLA @@ -136,7 +136,7 @@ inline ${return_type} Tensor::${api_name}(${method_formals}) const { ${static_dispatch_method_body} #else static auto table = globalATenDispatch().getOpTable("${schema_string}"); - return table->getOp<${return_type} (${formals_types})>(type_set(), is_variable())(${method_actuals}); + return table->getOp<${return_type} (${formals_types})>(type_set())(${method_actuals}); #endif } """) @@ -155,7 +155,7 @@ static inline ${return_type} ${api_name}(${formals}) { ${static_dispatch_function_body} #else static auto table = globalATenDispatch().getOpTable("${schema_string}"); - return table->getOp<${return_type} (${formals_types})>(${inferred_type_set}, ${inferred_is_variable})(${native_actuals}); + return table->getOp<${return_type} (${formals_types})>(${inferred_type_set})(${native_actuals}); #endif } """) @@ -191,7 +191,7 @@ static inline ${return_type} ${api_name}(${formals}) { #else globalLegacyTypeDispatch().initForTensorTypeSet(${inferred_type_set}); static auto table = globalATenDispatch().getOpTable("${schema_string}"); - return table->getOp<${return_type} (${formals_types})>(${inferred_type_set}, ${inferred_is_variable})(${native_actuals}); + return table->getOp<${return_type} (${formals_types})>(${inferred_type_set})(${native_actuals}); #endif } """) @@ -552,7 +552,6 @@ FunctionOption = TypedDict('FunctionOption', { 'formals': List[str], 'formals_types': List[str], 'inferred_type_set': str, - 'inferred_is_variable': str, 'inplace': bool, 'matches_jit_signature': bool, # This controls whether or not we generate the interface in Type or @@ -1095,6 +1094,18 @@ def create_generic(top_env, declarations): # type: (Any) -> FunctionCode if isinstance(type_method_dispatch, dict): static_dispatch_function_switches = [] + # NB: As this code is currently written, there will NEVER be + # a backend generated for variable dispatch. There is nothing + # stopping us from actually implementing this, however, if you + # really wanted variable on mobile, there's nothing stopping + # you from implementing this (however, you would have an + # annoying phase problem, since code generation for variable + # happens in tools/ which happens later than here.) + # + # If you pass in a variable to the dispatch, and variable is + # enabled, this switch will fail. This is intentional: you + # probably need to disable variable globally in the mobile + # calling code. for backend in static_dispatch_backends: if backend in type_method_dispatch: static_dispatch_function_switches.append(STATIC_DISPATCH_FUNCTION_SWITCH_STATEMENT.substitute( @@ -1104,10 +1115,6 @@ def create_generic(top_env, declarations): native_arguments=option['method_actuals'])) static_dispatch_method_body = STATIC_DISPATCH_FUNCTION_SWITCH_BODY.substitute( option, - # TODO: When Variable gets added, this needs to get adjusted - # to avoid picking up the Variable bit. The correct way - # to encode this is probably to just have Variable in the - # disabled set. type_set='type_set()', static_dispatch_function_switches=static_dispatch_function_switches) else: @@ -1122,15 +1129,12 @@ def create_generic(top_env, declarations): # type: (Any, Optional[str], Any) -> FunctionCode if dispatch_tensor: option['inferred_type_set'] = 'at::detail::infer_tensor_type_set({})'.format(dispatch_tensor) - option['inferred_is_variable'] = 'at::detail::infer_is_variable({})'.format(dispatch_tensor) elif dispatch_options: option['inferred_type_set'] = '{}.type_set()'.format(dispatch_options['name']) - option['inferred_is_variable'] = '{}.is_variable()'.format(dispatch_options['name']) else: # doesn't depend on a specific backend, use the empty set # TODO: Does this actually work? option['inferred_type_set'] = 'TensorTypeSet()' - option['inferred_is_variable'] = 'false' declaration = DEPRECATED_FUNCTION_DECLARATION if option['deprecated'] else FUNCTION_DECLARATION fn_declaration = declaration.substitute(option) diff --git a/aten/src/ATen/templates/TensorBody.h b/aten/src/ATen/templates/TensorBody.h index 51277be6dfb..03a272f12c8 100644 --- a/aten/src/ATen/templates/TensorBody.h +++ b/aten/src/ATen/templates/TensorBody.h @@ -218,10 +218,7 @@ class CAFFE2_API Tensor { DeprecatedTypeProperties & type() const { return globalDeprecatedTypePropertiesRegistry().getDeprecatedTypeProperties( - // TODO: When we build in Variable here, we need to change the - // signature of getDeprecatedTypeProperties to collapse backend - // and is_variable into TensorTypeSet - tensorTypeIdToBackend(type_set().highestPriorityTypeId()), + tensorTypeIdToBackend(legacyExtractTypeId(type_set())), scalar_type(), is_variable()); } @@ -439,14 +436,6 @@ inline TensorTypeSet infer_tensor_type_set(TensorList tl) { return tl[0].type_set(); } -inline bool infer_is_variable(const Tensor & t) { - TORCH_CHECK(t.defined(), "undefined Tensor"); - return t.is_variable(); -} -inline bool infer_is_variable(const TensorList & tl) { - TORCH_CHECK(tl.size() > 0, "expected a non-empty list of Tensors"); - return tl[0].is_variable(); -} } // namespace detail static inline TensorTypeId legacyExtractTypeId(const Tensor& t) { diff --git a/c10/core/TensorImpl.cpp b/c10/core/TensorImpl.cpp index a7360c7bc3a..7f33d6a1fc8 100644 --- a/c10/core/TensorImpl.cpp +++ b/c10/core/TensorImpl.cpp @@ -2,6 +2,7 @@ #include #include +#include #include C10_DEFINE_bool( @@ -57,7 +58,7 @@ TensorImpl::TensorImpl(Storage&& storage, TensorTypeSet type_set, const caffe2:: numel_(0), data_type_(data_type), device_opt_(device_opt), - type_set_(type_set) { + type_set_(type_set.remove(TensorTypeId::VariableTensorId)) { if (!type_set.empty()) { AT_ASSERT(data_type.id() == caffe2::TypeIdentifier::uninitialized() || device_opt_.has_value()); @@ -210,52 +211,12 @@ int64_t NamedTensorMetaInterface::slow_dim() const { } #endif -/// NOTE [ Treating Variables as non-Variables in type dispatch ] -/// -/// Previously, in VariableType_*.cpp (generated by gen_variable_type.py), when -/// a function is using the 'use_derived' strategy, we call its implementation -/// on the base non-Variable type (`baseType`), passing unwrapped tensors to the -/// call so that any `.dispatch_type()` calls in the implementation can treat the passed -/// tensors as non-Variables and won't dispatch back to functions in VariableType. -/// -/// However, after the Variable/Tensor merge, there is no concept of unwrapping -/// a tensor anymore, and directly passing variables to the base type calls will -/// cause the `.dispatch_type()` dispatch in the implementation to treat the tensor as a -/// variable, and any function dispatch based on `.dispatch_type()` will dispatch back to -/// VariableType, which is not what we want. -/// -/// The solution to the above problem is to add `at::NonVariableTypeMode`, which -/// when enabled will cause `legacyTensorType()` and `getType()` to always return -/// non-Variable type, even if the tensor being called on is a variable. -/// -/// TODO: Since `torch::NoGradGuard` serves the same purpose in libtorch, we should -/// merge these two thread-local guards. - -/// In the CAFFE2_FB_LIMITED_MOBILE_CAPABILITY build setting, -/// thread_local is not supported. In that case, we don't provide -/// `at::NonVariableTypeMode`. -#ifndef CAFFE2_FB_LIMITED_MOBILE_CAPABILITY - -thread_local bool NonVariableTypeMode_enabled = false; - bool NonVariableTypeMode::is_enabled() { - return NonVariableTypeMode_enabled; + return !impl::tls_variable_is_enabled(); } void NonVariableTypeMode::set_enabled(bool enabled) { - NonVariableTypeMode_enabled = enabled; + impl::tls_variable_set_enabled(!enabled); } -#else // defined(CAFFE2_FB_LIMITED_MOBILE_CAPABILITY) - -bool NonVariableTypeMode::is_enabled() { - throw std::runtime_error("NonVariableTypeMode is not supported on mobile"); -} - -void NonVariableTypeMode::set_enabled(bool enabled) { - throw std::runtime_error("NonVariableTypeMode is not supported on mobile"); -} - -#endif - } // namespace c10 diff --git a/c10/core/TensorImpl.h b/c10/core/TensorImpl.h index 6d80b0a5164..40be8867d01 100644 --- a/c10/core/TensorImpl.h +++ b/c10/core/TensorImpl.h @@ -826,6 +826,11 @@ struct C10_API TensorImpl : public c10::intrusive_ptr_target { */ void set_autograd_meta(std::unique_ptr autograd_meta) { autograd_meta_ = std::move(autograd_meta); + if (autograd_meta_) { + type_set_ = type_set_.add(TensorTypeId::VariableTensorId); + } else { + type_set_ = type_set_.remove(TensorTypeId::VariableTensorId); + } } /** @@ -839,6 +844,7 @@ struct C10_API TensorImpl : public c10::intrusive_ptr_target { * Detach the autograd metadata unique_ptr from this tensor, and return it. */ std::unique_ptr detach_autograd_meta() { + type_set_ = type_set_.remove(TensorTypeId::VariableTensorId); return std::move(autograd_meta_); } @@ -1522,7 +1528,15 @@ protected: dest_impl->storage_offset_ = src_impl->storage_offset_; dest_impl->data_type_ = src_impl->data_type_; dest_impl->device_opt_ = src_impl->device_opt_; + // This may temporarily violate invariant that + // type_set_.has(VariableTensorId) iff autograd_meta_ != nullptr... dest_impl->type_set_ = src_impl->type_set_; + // ...so refresh Variable in autograd_meta_ + if (dest_impl->autograd_meta_) { + dest_impl->type_set_ = dest_impl->type_set_.add(TensorTypeId::VariableTensorId); + } else { + dest_impl->type_set_ = dest_impl->type_set_.remove(TensorTypeId::VariableTensorId); + } dest_impl->is_contiguous_ = src_impl->is_contiguous_; dest_impl->is_wrapped_number_ = src_impl->is_wrapped_number_; dest_impl->reserved_ = src_impl->reserved_; @@ -1543,12 +1557,17 @@ protected: static const char * const err_msg_tensor_metadata_change_not_allowed; Storage storage_; + +private: // This pointer points to an AutogradMeta struct that stores autograd-specific fields // (such as grad_ / grad_fn_ / grad_accumulator_). // This pointer always has unique ownership (meaning only one TensorImpl can own it // at a time). + // This is private because we must maintain dispatcher invariants on it + // in type_set_. std::unique_ptr autograd_meta_ = nullptr; +protected: #ifdef BUILD_NAMEDTENSOR std::unique_ptr named_tensor_meta_ = nullptr; #endif diff --git a/c10/core/TensorOptions.h b/c10/core/TensorOptions.h index 30e271579de..830260888a1 100644 --- a/c10/core/TensorOptions.h +++ b/c10/core/TensorOptions.h @@ -378,8 +378,9 @@ struct C10_API TensorOptions { // Resolves the tensor type set specified by the current construction axes. TensorTypeSet type_set() const noexcept { - // TODO: This should also contain variable eventually - return TensorTypeSet(computeTensorTypeId()); + auto r = TensorTypeSet(computeTensorTypeId()); + if (is_variable()) r = r.add(TensorTypeId::VariableTensorId); + return r; } inline TensorTypeId computeTensorTypeId() const { diff --git a/c10/core/TensorTypeId.cpp b/c10/core/TensorTypeId.cpp index 85628001880..1dd4476ae6c 100644 --- a/c10/core/TensorTypeId.cpp +++ b/c10/core/TensorTypeId.cpp @@ -38,6 +38,8 @@ const char* toString(TensorTypeId t) { return "ComplexCPUTensorId"; case TensorTypeId::ComplexCUDATensorId: return "ComplexCUDATensorId"; + case TensorTypeId::VariableTensorId: + return "VariableTensorId"; default: return "UNKNOWN_TENSOR_TYPE_ID"; } diff --git a/c10/core/TensorTypeId.h b/c10/core/TensorTypeId.h index ecff7e841bb..eb998812963 100644 --- a/c10/core/TensorTypeId.h +++ b/c10/core/TensorTypeId.h @@ -40,7 +40,12 @@ enum class TensorTypeId : uint8_t { ComplexCPUTensorId, // PyTorch only ComplexCUDATensorId, // PyTorch only - // VariableTensorId, // upcoming! + // WARNING! If you add more "wrapper" style tensor ids (tensor + // ids which don't get kernels directly defined in native_functions.yaml; + // examples are tracing or profiling) here, you need to also adjust + // legacyExtractTypeId in c10/core/TensorTypeId.h to mask them out. + + VariableTensorId, NumTensorIds, // Sentinel }; diff --git a/c10/core/TensorTypeSet.h b/c10/core/TensorTypeSet.h index 37b189954b2..f5ce1af70b5 100644 --- a/c10/core/TensorTypeSet.h +++ b/c10/core/TensorTypeSet.h @@ -33,7 +33,19 @@ namespace c10 { // An undefined tensor is one with an empty tensor type set. class TensorTypeSet final { public: - TensorTypeSet() {} + enum Full { FULL }; + enum Raw { RAW }; + + // NB: default constructor representation as zero is MANDATORY as + // use of TensorTypeSet in TLS requires this. + TensorTypeSet() + : repr_(0) {} + TensorTypeSet(Full) + : repr_(-1) {} + // Public version of TensorTypeSet(uint64_t) API; external users + // must be explicit when they do this! + TensorTypeSet(Raw, uint64_t x) + : repr_(x) {} explicit TensorTypeSet(TensorTypeId t) : repr_(t == TensorTypeId::UndefinedTensorId ? 0 @@ -47,6 +59,14 @@ public: TensorTypeSet operator|(TensorTypeSet other) const { return TensorTypeSet(repr_ | other.repr_); } + // Perform set intersection + TensorTypeSet operator&(TensorTypeSet other) const { + return TensorTypeSet(repr_ & other.repr_); + } + // Compute the set difference self - other + TensorTypeSet operator-(TensorTypeSet other) const { + return TensorTypeSet(repr_ & ~other.repr_); + } // Perform set equality bool operator==(TensorTypeSet other) const { return repr_ == other.repr_; @@ -66,6 +86,7 @@ public: bool empty() const { return repr_ == 0; } + uint64_t raw_repr() { return repr_; } // Return the type id in this set with the highest priority (i.e., // is the largest in the TensorTypeId enum). Intuitively, this // type id is the one that should handle dispatch (assuming there @@ -98,10 +119,10 @@ C10_API std::ostream& operator<<(std::ostream&, TensorTypeSet); // but s.has(VariableTensorId) will evaluate to true if s has VariableTensorId. // For non-VariableTensorId equality tests, they are indistinguishable. // -// TODO: this will need to change when we add VariableTensorId to the -// set of IDs put in TensorTypeSet. +// NB: If you add other non-VariableTensorId other keys to this set, you'll +// have to adjust this some more (sorry.) static inline TensorTypeId legacyExtractTypeId(TensorTypeSet s) { - return s.highestPriorityTypeId(); + return s.remove(TensorTypeId::VariableTensorId).highestPriorityTypeId(); } } diff --git a/c10/core/impl/LocalTensorTypeSet.cpp b/c10/core/impl/LocalTensorTypeSet.cpp new file mode 100644 index 00000000000..9f4de688009 --- /dev/null +++ b/c10/core/impl/LocalTensorTypeSet.cpp @@ -0,0 +1,42 @@ +#include + +#include + +namespace c10 { +namespace impl { + +namespace { + +/// In the CAFFE2_FB_LIMITED_MOBILE_CAPABILITY build setting, +/// thread_local is not supported. In that case, we don't provide +/// `at::NonVariableTypeMode`. +#ifndef CAFFE2_FB_LIMITED_MOBILE_CAPABILITY + +// NB: Zero initialized! +thread_local uint64_t raw_excluded; + +#else // defined(CAFFE2_FB_LIMITED_MOBILE_CAPABILITY) + +uint64_t raw_excluded = 0; + +#endif + +} + +TensorTypeSet tls_excluded_tensor_type_set() { + return TensorTypeSet(TensorTypeSet::RAW, raw_excluded); +} + +bool tls_variable_is_enabled() { + return !tls_excluded_tensor_type_set().has(TensorTypeId::VariableTensorId); +} + +void tls_variable_set_enabled(bool enabled) { + if (enabled) { + raw_excluded = tls_excluded_tensor_type_set().remove(TensorTypeId::VariableTensorId).raw_repr(); + } else { + raw_excluded = tls_excluded_tensor_type_set().add(TensorTypeId::VariableTensorId).raw_repr(); + } +} + +}} // namespace c10::impl diff --git a/c10/core/impl/LocalTensorTypeSet.h b/c10/core/impl/LocalTensorTypeSet.h new file mode 100644 index 00000000000..b049dbaa868 --- /dev/null +++ b/c10/core/impl/LocalTensorTypeSet.h @@ -0,0 +1,22 @@ +#include + +// TLS management for TensorTypeSet +// +// This manages thread-local TensorTypeSet of excluded keys which disqualify +// tensor types from dispatch. Keys which are in this set, even if they appear +// in a list of potential valid keys on a tensor, are not considered for +// dispatch. This is used to, for example, turn off autograd after we have +// handled autograd for a top-level element. +// +// Originally, I implemented this as storing the inverted set, but +// TLS is defined to be zero-initialized, so this doesn't actually work +// (you want the set to be -1 initialized). + +namespace c10 { +namespace impl { + +C10_API bool tls_variable_is_enabled(); +C10_API void tls_variable_set_enabled(bool enabled); +C10_API TensorTypeSet tls_excluded_tensor_type_set(); + +}} // namespace c10::impl diff --git a/c10/test/core/TensorTypeSet_test.cpp b/c10/test/core/TensorTypeSet_test.cpp index 86e0b7043c9..507707aa15a 100644 --- a/c10/test/core/TensorTypeSet_test.cpp +++ b/c10/test/core/TensorTypeSet_test.cpp @@ -45,3 +45,11 @@ TEST(TensorTypeSet, Doubleton) { } } } + +TEST(TensorTypeSet, Full) { + TensorTypeSet full(TensorTypeSet::FULL); + for (uint8_t i = 1; i < static_cast(TensorTypeId::NumTensorIds); i++) { + auto tid = static_cast(i); + ASSERT_TRUE(full.has(tid)); + } +} diff --git a/tools/autograd/gen_variable_type.py b/tools/autograd/gen_variable_type.py index 0081457599e..5c0fa49d1df 100644 --- a/tools/autograd/gen_variable_type.py +++ b/tools/autograd/gen_variable_type.py @@ -153,7 +153,7 @@ ${return_type} VariableType::${api_name}(${type_method_formals}) { """) WRAPPER_REGISTRATION = CodeTemplate("""\ -.registerVariableOp<${return_type} (${formal_types})>("${schema_string}", &VariableType::${api_name}) +.registerOp<${return_type} (${formal_types})>(TensorTypeId::VariableTensorId, "${schema_string}", &VariableType::${api_name}) """) UNPACK_TENSOR = CodeTemplate("""\