Add a lot of dimname overloads (#26636)

Summary:
Pull Request resolved: https://github.com/pytorch/pytorch/pull/26636

This PR defines a lot of dimname overloads so that when named tensor
support is added for those operators, we will not have to modify the
autogenerated TensorMethods.h, thereby avoiding potential merge
conflicts in the future.

Overloads were added for the following:
- all
- any
- argmax
- argmin
- cumsum
- cumprod
- index_copy
- kthvalue
- mode
- permute
- squeeze
- index_add
- index_fill
- scatter
- scatter_add
- index_select
- gather
- sort
- argsort

Test Plan: - [namedtensor ci]

Differential Revision: D17522984

Pulled By: zou3519

fbshipit-source-id: eca6dea819ba4e4e43b71b700d5cf09176f00061
This commit is contained in:
Richard Zou 2019-09-24 16:59:56 -07:00 committed by Facebook Github Bot
parent 67bde6b724
commit 925e51ea7f
9 changed files with 571 additions and 1 deletions

View File

@ -32,6 +32,14 @@ CAFFE2_API std::vector<int64_t> dimnames_to_positions(const Tensor& tensor, Dimn
CAFFE2_API std::vector<Dimname>
unify_from_right(DimnameList names, DimnameList other, const char* action = "broadcast");
[[noreturn]] inline void reportNYIDimnameOverload(const char* op_name) {
TORCH_CHECK(
false,
op_name, ": You passed a dimname (string) to this op in place of a dimension "
"index but it does not yet support this behavior. Please pass a dimension "
"index to work around this.");
}
namespace namedinference {
// Names get propagated via the following rules:

View File

@ -784,7 +784,19 @@ bool aten_op_is_not_moved_to_c10_yet(const c10::OperatorName& opName) {
{"aten::addmv", "out"},
{"aten::addr", "out"},
{"aten::all", "out"},
#ifdef BUILD_NAMEDTENSOR
{"aten::all", "dimname"},
#endif
#ifdef BUILD_NAMEDTENSOR
{"aten::all", "dimname_out"},
#endif
{"aten::any", "out"},
#ifdef BUILD_NAMEDTENSOR
{"aten::any", "dimname"},
#endif
#ifdef BUILD_NAMEDTENSOR
{"aten::any", "dimname_out"},
#endif
{"aten::arange", ""},
{"aten::arange", "start"},
{"aten::arange", "start_step"},
@ -845,8 +857,20 @@ bool aten_op_is_not_moved_to_c10_yet(const c10::OperatorName& opName) {
{"aten::cudnn_convolution_transpose", ""},
{"aten::cumsum", ""},
{"aten::cumsum", "out"},
#ifdef BUILD_NAMEDTENSOR
{"aten::cumsum", "dimname"},
#endif
#ifdef BUILD_NAMEDTENSOR
{"aten::cumsum", "dimname_out"},
#endif
{"aten::cumprod", ""},
{"aten::cumprod", "out"},
#ifdef BUILD_NAMEDTENSOR
{"aten::cumprod", "dimname"},
#endif
#ifdef BUILD_NAMEDTENSOR
{"aten::cumprod", "dimname_out"},
#endif
{"aten::div", "out"},
{"aten::dot", "out"},
{"aten::embedding_bag", ""},
@ -902,12 +926,24 @@ bool aten_op_is_not_moved_to_c10_yet(const c10::OperatorName& opName) {
{"aten::_cufft_set_plan_cache_max_size", ""},
{"aten::_cufft_clear_plan_cache", ""},
{"aten::index", "Tensor"},
#ifdef BUILD_NAMEDTENSOR
{"aten::index_copy_", "dimname"},
#endif
#ifdef BUILD_NAMEDTENSOR
{"aten::index_copy", "dimname"},
#endif
{"aten::index_put_", ""},
{"aten::index_put", ""},
{"aten::_index_put_impl_", ""},
{"aten::instance_norm", ""},
{"aten::inverse", "out"},
{"aten::kthvalue", "values"},
#ifdef BUILD_NAMEDTENSOR
{"aten::kthvalue", "dimname"},
#endif
#ifdef BUILD_NAMEDTENSOR
{"aten::kthvalue", "dimname_out"},
#endif
{"aten::layer_norm", ""},
{"aten::native_layer_norm", ""},
{"aten::native_layer_norm_backward", ""},
@ -980,6 +1016,12 @@ bool aten_op_is_not_moved_to_c10_yet(const c10::OperatorName& opName) {
{"aten::miopen_rnn_backward", ""},
{"aten::mm", "out"},
{"aten::mode", "values"},
#ifdef BUILD_NAMEDTENSOR
{"aten::mode", "dimname"},
#endif
#ifdef BUILD_NAMEDTENSOR
{"aten::mode", "dimname_out"},
#endif
{"aten::mul", "out"},
{"aten::mv", "out"},
{"aten::native_batch_norm", ""},
@ -1055,6 +1097,12 @@ bool aten_op_is_not_moved_to_c10_yet(const c10::OperatorName& opName) {
{"aten::softmax", ""},
#ifdef BUILD_NAMEDTENSOR
{"aten::softmax", ""},
#endif
#ifdef BUILD_NAMEDTENSOR
{"aten::squeeze", "dimname"},
#endif
#ifdef BUILD_NAMEDTENSOR
{"aten::squeeze_", "dimname"},
#endif
{"aten::sspaddmm", "out"},
{"aten::stack", "out"},
@ -1169,6 +1217,24 @@ bool aten_op_is_not_moved_to_c10_yet(const c10::OperatorName& opName) {
{"aten::set_", "source_Storage"},
{"aten::set_", "source_Storage_storage_offset"},
{"aten::set_quantizer_", ""},
#ifdef BUILD_NAMEDTENSOR
{"aten::index_add", "dimname"},
#endif
#ifdef BUILD_NAMEDTENSOR
{"aten::index_fill", "dimname_Scalar"},
#endif
#ifdef BUILD_NAMEDTENSOR
{"aten::index_fill", "dimname_Tensor"},
#endif
#ifdef BUILD_NAMEDTENSOR
{"aten::scatter", "dimname_src"},
#endif
#ifdef BUILD_NAMEDTENSOR
{"aten::scatter", "dimname_value"},
#endif
#ifdef BUILD_NAMEDTENSOR
{"aten::scatter_add", "dimname"},
#endif
{"aten::addbmm", "out"},
{"aten::random_", "from"},
{"aten::random_", "to"},
@ -1199,9 +1265,21 @@ bool aten_op_is_not_moved_to_c10_yet(const c10::OperatorName& opName) {
{"aten::lt", "Tensor_out"},
{"aten::take", "out"},
{"aten::index_select", "out"},
#ifdef BUILD_NAMEDTENSOR
{"aten::index_select", "dimname_out"},
#endif
#ifdef BUILD_NAMEDTENSOR
{"aten::index_select", "dimname"},
#endif
{"aten::masked_select", "out"},
{"aten::nonzero", "out"},
{"aten::gather", "out"},
#ifdef BUILD_NAMEDTENSOR
{"aten::gather", "dimname_out"},
#endif
#ifdef BUILD_NAMEDTENSOR
{"aten::gather", "dimname"},
#endif
{"aten::addcmul", "out"},
{"aten::addcdiv", "out"},
{"aten::lstsq", "X"},
@ -1237,6 +1315,15 @@ bool aten_op_is_not_moved_to_c10_yet(const c10::OperatorName& opName) {
{"aten::min", "out"},
{"aten::max", "out"},
{"aten::sort", "values"},
#ifdef BUILD_NAMEDTENSOR
{"aten::sort", "dimname_values"},
#endif
#ifdef BUILD_NAMEDTENSOR
{"aten::sort", "dimname"},
#endif
#ifdef BUILD_NAMEDTENSOR
{"aten::argsort", "dimname"},
#endif
{"aten::topk", "values"},
{"aten::renorm", "out"},
{"aten::pow", "Tensor_Tensor_out"},

View File

@ -438,8 +438,14 @@ class CAFFE2_API Tensor {
Tensor addr(const Tensor & vec1, const Tensor & vec2, Scalar beta=1, Scalar alpha=1) const;
Tensor & addr_(const Tensor & vec1, const Tensor & vec2, Scalar beta=1, Scalar alpha=1) const;
Tensor all(int64_t dim, bool keepdim=false) const;
#ifdef BUILD_NAMEDTENSOR
Tensor all(Dimname dim, bool keepdim=false) const;
#endif
bool allclose(const Tensor & other, double rtol=1e-05, double atol=1e-08, bool equal_nan=false) const;
Tensor any(int64_t dim, bool keepdim=false) const;
#ifdef BUILD_NAMEDTENSOR
Tensor any(Dimname dim, bool keepdim=false) const;
#endif
Tensor argmax(c10::optional<int64_t> dim=c10::nullopt, bool keepdim=false) const;
Tensor argmin(c10::optional<int64_t> dim=c10::nullopt, bool keepdim=false) const;
Tensor as_strided(IntArrayRef size, IntArrayRef stride, c10::optional<int64_t> storage_offset=c10::nullopt) const;
@ -478,7 +484,13 @@ class CAFFE2_API Tensor {
Tensor cosh() const;
Tensor & cosh_() const;
Tensor cumsum(int64_t dim, c10::optional<ScalarType> dtype=c10::nullopt) const;
#ifdef BUILD_NAMEDTENSOR
Tensor cumsum(Dimname dim, c10::optional<ScalarType> dtype=c10::nullopt) const;
#endif
Tensor cumprod(int64_t dim, c10::optional<ScalarType> dtype=c10::nullopt) const;
#ifdef BUILD_NAMEDTENSOR
Tensor cumprod(Dimname dim, c10::optional<ScalarType> dtype=c10::nullopt) const;
#endif
Tensor det() const;
Tensor diag_embed(int64_t offset=0, int64_t dim1=-2, int64_t dim2=-1) const;
Tensor diagflat(int64_t offset=0) const;
@ -526,6 +538,12 @@ class CAFFE2_API Tensor {
Tensor index(TensorList indices) const;
Tensor & index_copy_(int64_t dim, const Tensor & index, const Tensor & source) const;
Tensor index_copy(int64_t dim, const Tensor & index, const Tensor & source) const;
#ifdef BUILD_NAMEDTENSOR
Tensor & index_copy_(Dimname dim, const Tensor & index, const Tensor & source) const;
#endif
#ifdef BUILD_NAMEDTENSOR
Tensor index_copy(Dimname dim, const Tensor & index, const Tensor & source) const;
#endif
Tensor & index_put_(TensorList indices, const Tensor & values, bool accumulate=false) const;
Tensor index_put(TensorList indices, const Tensor & values, bool accumulate=false) const;
Tensor inverse() const;
@ -537,6 +555,9 @@ class CAFFE2_API Tensor {
bool is_same_size(const Tensor & other) const;
bool is_signed() const;
std::tuple<Tensor,Tensor> kthvalue(int64_t k, int64_t dim=-1, bool keepdim=false) const;
#ifdef BUILD_NAMEDTENSOR
std::tuple<Tensor,Tensor> kthvalue(int64_t k, Dimname dim, bool keepdim=false) const;
#endif
Tensor log() const;
Tensor & log_() const;
Tensor log10() const;
@ -583,6 +604,9 @@ class CAFFE2_API Tensor {
#endif
Tensor mm(const Tensor & mat2) const;
std::tuple<Tensor,Tensor> mode(int64_t dim=-1, bool keepdim=false) const;
#ifdef BUILD_NAMEDTENSOR
std::tuple<Tensor,Tensor> mode(Dimname dim, bool keepdim=false) const;
#endif
Tensor mul(const Tensor & other) const;
Tensor & mul_(const Tensor & other) const;
Tensor mul(Scalar other) const;
@ -643,8 +667,14 @@ class CAFFE2_API Tensor {
std::vector<Tensor> split_with_sizes(IntArrayRef split_sizes, int64_t dim=0) const;
Tensor squeeze() const;
Tensor squeeze(int64_t dim) const;
#ifdef BUILD_NAMEDTENSOR
Tensor squeeze(Dimname dim) const;
#endif
Tensor & squeeze_() const;
Tensor & squeeze_(int64_t dim) const;
#ifdef BUILD_NAMEDTENSOR
Tensor & squeeze_(Dimname dim) const;
#endif
Tensor sspaddmm(const Tensor & mat1, const Tensor & mat2, Scalar beta=1, Scalar alpha=1) const;
Tensor stft(int64_t n_fft, c10::optional<int64_t> hop_length=c10::nullopt, c10::optional<int64_t> win_length=c10::nullopt, const Tensor & window={}, bool normalized=false, bool onesided=true) const;
int64_t stride(int64_t dim) const;
@ -768,16 +798,34 @@ class CAFFE2_API Tensor {
Tensor & put_(const Tensor & index, const Tensor & source, bool accumulate=false) const;
Tensor & index_add_(int64_t dim, const Tensor & index, const Tensor & source) const;
Tensor index_add(int64_t dim, const Tensor & index, const Tensor & source) const;
#ifdef BUILD_NAMEDTENSOR
Tensor index_add(Dimname dim, const Tensor & index, const Tensor & source) const;
#endif
Tensor & index_fill_(int64_t dim, const Tensor & index, Scalar value) const;
Tensor index_fill(int64_t dim, const Tensor & index, Scalar value) const;
Tensor & index_fill_(int64_t dim, const Tensor & index, const Tensor & value) const;
Tensor index_fill(int64_t dim, const Tensor & index, const Tensor & value) const;
#ifdef BUILD_NAMEDTENSOR
Tensor index_fill(Dimname dim, const Tensor & index, Scalar value) const;
#endif
#ifdef BUILD_NAMEDTENSOR
Tensor index_fill(Dimname dim, const Tensor & index, const Tensor & value) const;
#endif
Tensor & scatter_(int64_t dim, const Tensor & index, const Tensor & src) const;
Tensor scatter(int64_t dim, const Tensor & index, const Tensor & src) const;
Tensor & scatter_(int64_t dim, const Tensor & index, Scalar value) const;
Tensor scatter(int64_t dim, const Tensor & index, Scalar value) const;
#ifdef BUILD_NAMEDTENSOR
Tensor scatter(Dimname dim, const Tensor & index, const Tensor & src) const;
#endif
#ifdef BUILD_NAMEDTENSOR
Tensor scatter(Dimname dim, const Tensor & index, Scalar value) const;
#endif
Tensor & scatter_add_(int64_t dim, const Tensor & index, const Tensor & src) const;
Tensor scatter_add(int64_t dim, const Tensor & index, const Tensor & src) const;
#ifdef BUILD_NAMEDTENSOR
Tensor scatter_add(Dimname dim, const Tensor & index, const Tensor & src) const;
#endif
Tensor & lt_(Scalar other) const;
Tensor & lt_(const Tensor & other) const;
Tensor & gt_(Scalar other) const;
@ -856,10 +904,16 @@ class CAFFE2_API Tensor {
Tensor lt(const Tensor & other) const;
Tensor take(const Tensor & index) const;
Tensor index_select(int64_t dim, const Tensor & index) const;
#ifdef BUILD_NAMEDTENSOR
Tensor index_select(Dimname dim, const Tensor & index) const;
#endif
Tensor masked_select(const Tensor & mask) const;
Tensor nonzero() const;
std::vector<Tensor> nonzero_numpy() const;
Tensor gather(int64_t dim, const Tensor & index, bool sparse_grad=false) const;
#ifdef BUILD_NAMEDTENSOR
Tensor gather(Dimname dim, const Tensor & index, bool sparse_grad=false) const;
#endif
Tensor addcmul(const Tensor & tensor1, const Tensor & tensor2, Scalar value=1) const;
Tensor & addcmul_(const Tensor & tensor1, const Tensor & tensor2, Scalar value=1) const;
Tensor addcdiv(const Tensor & tensor1, const Tensor & tensor2, Scalar value=1) const;
@ -900,7 +954,13 @@ class CAFFE2_API Tensor {
Tensor max() const;
Tensor median() const;
std::tuple<Tensor,Tensor> sort(int64_t dim=-1, bool descending=false) const;
#ifdef BUILD_NAMEDTENSOR
std::tuple<Tensor,Tensor> sort(Dimname dim, bool descending=false) const;
#endif
Tensor argsort(int64_t dim=-1, bool descending=false) const;
#ifdef BUILD_NAMEDTENSOR
Tensor argsort(Dimname dim, bool descending=false) const;
#endif
std::tuple<Tensor,Tensor> topk(int64_t k, int64_t dim=-1, bool largest=true, bool sorted=true) const;
Tensor all() const;
Tensor any() const;

View File

@ -361,6 +361,16 @@ inline Tensor Tensor::all(int64_t dim, bool keepdim) const {
op, impl::dispatchTypeId(at::detail::multi_dispatch_tensor_type_set(*this)), const_cast<Tensor&>(*this), dim, keepdim);
#endif
}
#ifdef BUILD_NAMEDTENSOR
inline Tensor Tensor::all(Dimname dim, bool keepdim) const {
#ifdef USE_STATIC_DISPATCH
return TypeDefault::all(const_cast<Tensor&>(*this), dim, keepdim);
#else
static auto table = globalATenDispatch().getOpTable("aten::all.dimname(Tensor self, Dimname dim, bool keepdim=False) -> Tensor");
return table->getOp<Tensor (const Tensor &, Dimname, bool)>(at::detail::multi_dispatch_tensor_type_set(*this))(const_cast<Tensor&>(*this), dim, keepdim);
#endif
}
#endif
inline bool Tensor::allclose(const Tensor & other, double rtol, double atol, bool equal_nan) const {
#ifdef USE_STATIC_DISPATCH
return TypeDefault::allclose(const_cast<Tensor&>(*this), other, rtol, atol, equal_nan);
@ -379,6 +389,16 @@ inline Tensor Tensor::any(int64_t dim, bool keepdim) const {
op, impl::dispatchTypeId(at::detail::multi_dispatch_tensor_type_set(*this)), const_cast<Tensor&>(*this), dim, keepdim);
#endif
}
#ifdef BUILD_NAMEDTENSOR
inline Tensor Tensor::any(Dimname dim, bool keepdim) const {
#ifdef USE_STATIC_DISPATCH
return TypeDefault::any(const_cast<Tensor&>(*this), dim, keepdim);
#else
static auto table = globalATenDispatch().getOpTable("aten::any.dimname(Tensor self, Dimname dim, bool keepdim=False) -> Tensor");
return table->getOp<Tensor (const Tensor &, Dimname, bool)>(at::detail::multi_dispatch_tensor_type_set(*this))(const_cast<Tensor&>(*this), dim, keepdim);
#endif
}
#endif
inline Tensor Tensor::argmax(c10::optional<int64_t> dim, bool keepdim) const {
#ifdef USE_STATIC_DISPATCH
return TypeDefault::argmax(const_cast<Tensor&>(*this), dim, keepdim);
@ -801,6 +821,16 @@ inline Tensor Tensor::cumsum(int64_t dim, c10::optional<ScalarType> dtype) const
return table->getOp<Tensor (const Tensor &, int64_t, c10::optional<ScalarType>)>(at::detail::multi_dispatch_tensor_type_set(*this))(const_cast<Tensor&>(*this), dim, dtype);
#endif
}
#ifdef BUILD_NAMEDTENSOR
inline Tensor Tensor::cumsum(Dimname dim, c10::optional<ScalarType> dtype) const {
#ifdef USE_STATIC_DISPATCH
return TypeDefault::cumsum(const_cast<Tensor&>(*this), dim, dtype);
#else
static auto table = globalATenDispatch().getOpTable("aten::cumsum.dimname(Tensor self, Dimname dim, *, ScalarType? dtype=None) -> Tensor");
return table->getOp<Tensor (const Tensor &, Dimname, c10::optional<ScalarType>)>(at::detail::multi_dispatch_tensor_type_set(*this))(const_cast<Tensor&>(*this), dim, dtype);
#endif
}
#endif
inline Tensor Tensor::cumprod(int64_t dim, c10::optional<ScalarType> dtype) const {
#ifdef USE_STATIC_DISPATCH
return TypeDefault::cumprod(const_cast<Tensor&>(*this), dim, dtype);
@ -809,6 +839,16 @@ inline Tensor Tensor::cumprod(int64_t dim, c10::optional<ScalarType> dtype) cons
return table->getOp<Tensor (const Tensor &, int64_t, c10::optional<ScalarType>)>(at::detail::multi_dispatch_tensor_type_set(*this))(const_cast<Tensor&>(*this), dim, dtype);
#endif
}
#ifdef BUILD_NAMEDTENSOR
inline Tensor Tensor::cumprod(Dimname dim, c10::optional<ScalarType> dtype) const {
#ifdef USE_STATIC_DISPATCH
return TypeDefault::cumprod(const_cast<Tensor&>(*this), dim, dtype);
#else
static auto table = globalATenDispatch().getOpTable("aten::cumprod.dimname(Tensor self, Dimname dim, *, ScalarType? dtype=None) -> Tensor");
return table->getOp<Tensor (const Tensor &, Dimname, c10::optional<ScalarType>)>(at::detail::multi_dispatch_tensor_type_set(*this))(const_cast<Tensor&>(*this), dim, dtype);
#endif
}
#endif
inline Tensor Tensor::det() const {
#ifdef USE_STATIC_DISPATCH
return TypeDefault::det(const_cast<Tensor&>(*this));
@ -1247,6 +1287,26 @@ inline Tensor Tensor::index_copy(int64_t dim, const Tensor & index, const Tensor
op, impl::dispatchTypeId(at::detail::multi_dispatch_tensor_type_set(*this, index, source)), const_cast<Tensor&>(*this), dim, index, source);
#endif
}
#ifdef BUILD_NAMEDTENSOR
inline Tensor & Tensor::index_copy_(Dimname dim, const Tensor & index, const Tensor & source) const {
#ifdef USE_STATIC_DISPATCH
return TypeDefault::index_copy_(const_cast<Tensor&>(*this), dim, index, source);
#else
static auto table = globalATenDispatch().getOpTable("aten::index_copy_.dimname(Tensor(a!) self, Dimname dim, Tensor index, Tensor source) -> Tensor(a!)");
return table->getOp<Tensor & (Tensor &, Dimname, const Tensor &, const Tensor &)>(at::detail::multi_dispatch_tensor_type_set(*this, index, source))(const_cast<Tensor&>(*this), dim, index, source);
#endif
}
#endif
#ifdef BUILD_NAMEDTENSOR
inline Tensor Tensor::index_copy(Dimname dim, const Tensor & index, const Tensor & source) const {
#ifdef USE_STATIC_DISPATCH
return TypeDefault::index_copy(const_cast<Tensor&>(*this), dim, index, source);
#else
static auto table = globalATenDispatch().getOpTable("aten::index_copy.dimname(Tensor self, Dimname dim, Tensor index, Tensor source) -> Tensor");
return table->getOp<Tensor (const Tensor &, Dimname, const Tensor &, const Tensor &)>(at::detail::multi_dispatch_tensor_type_set(*this, index, source))(const_cast<Tensor&>(*this), dim, index, source);
#endif
}
#endif
inline Tensor & Tensor::index_put_(TensorList indices, const Tensor & values, bool accumulate) const {
#ifdef USE_STATIC_DISPATCH
return TypeDefault::index_put_(const_cast<Tensor&>(*this), indices, values, accumulate);
@ -1344,6 +1404,16 @@ inline std::tuple<Tensor,Tensor> Tensor::kthvalue(int64_t k, int64_t dim, bool k
op, impl::dispatchTypeId(at::detail::multi_dispatch_tensor_type_set(*this)), const_cast<Tensor&>(*this), k, dim, keepdim);
#endif
}
#ifdef BUILD_NAMEDTENSOR
inline std::tuple<Tensor,Tensor> Tensor::kthvalue(int64_t k, Dimname dim, bool keepdim) const {
#ifdef USE_STATIC_DISPATCH
return TypeDefault::kthvalue(const_cast<Tensor&>(*this), k, dim, keepdim);
#else
static auto table = globalATenDispatch().getOpTable("aten::kthvalue.dimname(Tensor self, int k, Dimname dim, bool keepdim=False) -> (Tensor values, Tensor indices)");
return table->getOp<std::tuple<Tensor,Tensor> (const Tensor &, int64_t, Dimname, bool)>(at::detail::multi_dispatch_tensor_type_set(*this))(const_cast<Tensor&>(*this), k, dim, keepdim);
#endif
}
#endif
inline Tensor Tensor::log() const {
#ifdef USE_STATIC_DISPATCH
return TypeDefault::log(const_cast<Tensor&>(*this));
@ -1679,6 +1749,16 @@ inline std::tuple<Tensor,Tensor> Tensor::mode(int64_t dim, bool keepdim) const {
op, impl::dispatchTypeId(at::detail::multi_dispatch_tensor_type_set(*this)), const_cast<Tensor&>(*this), dim, keepdim);
#endif
}
#ifdef BUILD_NAMEDTENSOR
inline std::tuple<Tensor,Tensor> Tensor::mode(Dimname dim, bool keepdim) const {
#ifdef USE_STATIC_DISPATCH
return TypeDefault::mode(const_cast<Tensor&>(*this), dim, keepdim);
#else
static auto table = globalATenDispatch().getOpTable("aten::mode.dimname(Tensor self, Dimname dim, bool keepdim=False) -> (Tensor values, Tensor indices)");
return table->getOp<std::tuple<Tensor,Tensor> (const Tensor &, Dimname, bool)>(at::detail::multi_dispatch_tensor_type_set(*this))(const_cast<Tensor&>(*this), dim, keepdim);
#endif
}
#endif
inline Tensor Tensor::mul(const Tensor & other) const {
#ifdef USE_STATIC_DISPATCH
switch(tensorTypeIdToBackend(impl::dispatchTypeId(type_set()))) {
@ -2272,6 +2352,16 @@ inline Tensor Tensor::squeeze(int64_t dim) const {
op, impl::dispatchTypeId(at::detail::multi_dispatch_tensor_type_set(*this)), const_cast<Tensor&>(*this), dim);
#endif
}
#ifdef BUILD_NAMEDTENSOR
inline Tensor Tensor::squeeze(Dimname dim) const {
#ifdef USE_STATIC_DISPATCH
return TypeDefault::squeeze(const_cast<Tensor&>(*this), dim);
#else
static auto table = globalATenDispatch().getOpTable("aten::squeeze.dimname(Tensor(a) self, Dimname dim) -> Tensor(a)");
return table->getOp<Tensor (const Tensor &, Dimname)>(at::detail::multi_dispatch_tensor_type_set(*this))(const_cast<Tensor&>(*this), dim);
#endif
}
#endif
inline Tensor & Tensor::squeeze_() const {
#ifdef USE_STATIC_DISPATCH
return TypeDefault::squeeze_(const_cast<Tensor&>(*this));
@ -2290,6 +2380,16 @@ inline Tensor & Tensor::squeeze_(int64_t dim) const {
op, impl::dispatchTypeId(at::detail::multi_dispatch_tensor_type_set(*this)), const_cast<Tensor&>(*this), dim);
#endif
}
#ifdef BUILD_NAMEDTENSOR
inline Tensor & Tensor::squeeze_(Dimname dim) const {
#ifdef USE_STATIC_DISPATCH
return TypeDefault::squeeze_(const_cast<Tensor&>(*this), dim);
#else
static auto table = globalATenDispatch().getOpTable("aten::squeeze_.dimname(Tensor(a!) self, Dimname dim) -> Tensor(a!)");
return table->getOp<Tensor & (Tensor &, Dimname)>(at::detail::multi_dispatch_tensor_type_set(*this))(const_cast<Tensor&>(*this), dim);
#endif
}
#endif
inline Tensor Tensor::sspaddmm(const Tensor & mat1, const Tensor & mat2, Scalar beta, Scalar alpha) const {
#ifdef USE_STATIC_DISPATCH
return TypeDefault::sspaddmm(const_cast<Tensor&>(*this), mat1, mat2, beta, alpha);
@ -3574,6 +3674,16 @@ inline Tensor Tensor::index_add(int64_t dim, const Tensor & index, const Tensor
op, impl::dispatchTypeId(at::detail::multi_dispatch_tensor_type_set(*this, index, source)), const_cast<Tensor&>(*this), dim, index, source);
#endif
}
#ifdef BUILD_NAMEDTENSOR
inline Tensor Tensor::index_add(Dimname dim, const Tensor & index, const Tensor & source) const {
#ifdef USE_STATIC_DISPATCH
return TypeDefault::index_add(const_cast<Tensor&>(*this), dim, index, source);
#else
static auto table = globalATenDispatch().getOpTable("aten::index_add.dimname(Tensor self, Dimname dim, Tensor index, Tensor source) -> Tensor");
return table->getOp<Tensor (const Tensor &, Dimname, const Tensor &, const Tensor &)>(at::detail::multi_dispatch_tensor_type_set(*this, index, source))(const_cast<Tensor&>(*this), dim, index, source);
#endif
}
#endif
inline Tensor & Tensor::index_fill_(int64_t dim, const Tensor & index, Scalar value) const {
#ifdef USE_STATIC_DISPATCH
switch(tensorTypeIdToBackend(impl::dispatchTypeId(type_set()))) {
@ -3622,6 +3732,26 @@ inline Tensor Tensor::index_fill(int64_t dim, const Tensor & index, const Tensor
op, impl::dispatchTypeId(at::detail::multi_dispatch_tensor_type_set(*this, index, value)), const_cast<Tensor&>(*this), dim, index, value);
#endif
}
#ifdef BUILD_NAMEDTENSOR
inline Tensor Tensor::index_fill(Dimname dim, const Tensor & index, Scalar value) const {
#ifdef USE_STATIC_DISPATCH
return TypeDefault::index_fill(const_cast<Tensor&>(*this), dim, index, value);
#else
static auto table = globalATenDispatch().getOpTable("aten::index_fill.dimname_Scalar(Tensor self, Dimname dim, Tensor index, Scalar value) -> Tensor");
return table->getOp<Tensor (const Tensor &, Dimname, const Tensor &, Scalar)>(at::detail::multi_dispatch_tensor_type_set(*this, index))(const_cast<Tensor&>(*this), dim, index, value);
#endif
}
#endif
#ifdef BUILD_NAMEDTENSOR
inline Tensor Tensor::index_fill(Dimname dim, const Tensor & index, const Tensor & value) const {
#ifdef USE_STATIC_DISPATCH
return TypeDefault::index_fill(const_cast<Tensor&>(*this), dim, index, value);
#else
static auto table = globalATenDispatch().getOpTable("aten::index_fill.dimname_Tensor(Tensor self, Dimname dim, Tensor index, Tensor value) -> Tensor");
return table->getOp<Tensor (const Tensor &, Dimname, const Tensor &, const Tensor &)>(at::detail::multi_dispatch_tensor_type_set(*this, index, value))(const_cast<Tensor&>(*this), dim, index, value);
#endif
}
#endif
inline Tensor & Tensor::scatter_(int64_t dim, const Tensor & index, const Tensor & src) const {
#ifdef USE_STATIC_DISPATCH
switch(tensorTypeIdToBackend(impl::dispatchTypeId(type_set()))) {
@ -3670,6 +3800,26 @@ inline Tensor Tensor::scatter(int64_t dim, const Tensor & index, Scalar value) c
op, impl::dispatchTypeId(at::detail::multi_dispatch_tensor_type_set(*this, index)), const_cast<Tensor&>(*this), dim, index, value);
#endif
}
#ifdef BUILD_NAMEDTENSOR
inline Tensor Tensor::scatter(Dimname dim, const Tensor & index, const Tensor & src) const {
#ifdef USE_STATIC_DISPATCH
return TypeDefault::scatter(const_cast<Tensor&>(*this), dim, index, src);
#else
static auto table = globalATenDispatch().getOpTable("aten::scatter.dimname_src(Tensor self, Dimname dim, Tensor index, Tensor src) -> Tensor");
return table->getOp<Tensor (const Tensor &, Dimname, const Tensor &, const Tensor &)>(at::detail::multi_dispatch_tensor_type_set(*this, index, src))(const_cast<Tensor&>(*this), dim, index, src);
#endif
}
#endif
#ifdef BUILD_NAMEDTENSOR
inline Tensor Tensor::scatter(Dimname dim, const Tensor & index, Scalar value) const {
#ifdef USE_STATIC_DISPATCH
return TypeDefault::scatter(const_cast<Tensor&>(*this), dim, index, value);
#else
static auto table = globalATenDispatch().getOpTable("aten::scatter.dimname_value(Tensor self, Dimname dim, Tensor index, Scalar value) -> Tensor");
return table->getOp<Tensor (const Tensor &, Dimname, const Tensor &, Scalar)>(at::detail::multi_dispatch_tensor_type_set(*this, index))(const_cast<Tensor&>(*this), dim, index, value);
#endif
}
#endif
inline Tensor & Tensor::scatter_add_(int64_t dim, const Tensor & index, const Tensor & src) const {
#ifdef USE_STATIC_DISPATCH
switch(tensorTypeIdToBackend(impl::dispatchTypeId(type_set()))) {
@ -3694,6 +3844,16 @@ inline Tensor Tensor::scatter_add(int64_t dim, const Tensor & index, const Tenso
op, impl::dispatchTypeId(at::detail::multi_dispatch_tensor_type_set(*this, index, src)), const_cast<Tensor&>(*this), dim, index, src);
#endif
}
#ifdef BUILD_NAMEDTENSOR
inline Tensor Tensor::scatter_add(Dimname dim, const Tensor & index, const Tensor & src) const {
#ifdef USE_STATIC_DISPATCH
return TypeDefault::scatter_add(const_cast<Tensor&>(*this), dim, index, src);
#else
static auto table = globalATenDispatch().getOpTable("aten::scatter_add.dimname(Tensor self, Dimname dim, Tensor index, Tensor src) -> Tensor");
return table->getOp<Tensor (const Tensor &, Dimname, const Tensor &, const Tensor &)>(at::detail::multi_dispatch_tensor_type_set(*this, index, src))(const_cast<Tensor&>(*this), dim, index, src);
#endif
}
#endif
inline Tensor & Tensor::lt_(Scalar other) const {
#ifdef USE_STATIC_DISPATCH
switch(tensorTypeIdToBackend(impl::dispatchTypeId(type_set()))) {
@ -4852,6 +5012,16 @@ inline Tensor Tensor::index_select(int64_t dim, const Tensor & index) const {
op, impl::dispatchTypeId(at::detail::multi_dispatch_tensor_type_set(*this, index)), const_cast<Tensor&>(*this), dim, index);
#endif
}
#ifdef BUILD_NAMEDTENSOR
inline Tensor Tensor::index_select(Dimname dim, const Tensor & index) const {
#ifdef USE_STATIC_DISPATCH
return TypeDefault::index_select(const_cast<Tensor&>(*this), dim, index);
#else
static auto table = globalATenDispatch().getOpTable("aten::index_select.dimname(Tensor self, Dimname dim, Tensor index) -> Tensor");
return table->getOp<Tensor (const Tensor &, Dimname, const Tensor &)>(at::detail::multi_dispatch_tensor_type_set(*this, index))(const_cast<Tensor&>(*this), dim, index);
#endif
}
#endif
inline Tensor Tensor::masked_select(const Tensor & mask) const {
#ifdef USE_STATIC_DISPATCH
switch(tensorTypeIdToBackend(impl::dispatchTypeId(type_set()))) {
@ -4906,6 +5076,16 @@ inline Tensor Tensor::gather(int64_t dim, const Tensor & index, bool sparse_grad
op, impl::dispatchTypeId(at::detail::multi_dispatch_tensor_type_set(*this, index)), const_cast<Tensor&>(*this), dim, index, sparse_grad);
#endif
}
#ifdef BUILD_NAMEDTENSOR
inline Tensor Tensor::gather(Dimname dim, const Tensor & index, bool sparse_grad) const {
#ifdef USE_STATIC_DISPATCH
return TypeDefault::gather(const_cast<Tensor&>(*this), dim, index, sparse_grad);
#else
static auto table = globalATenDispatch().getOpTable("aten::gather.dimname(Tensor self, Dimname dim, Tensor index, *, bool sparse_grad=False) -> Tensor");
return table->getOp<Tensor (const Tensor &, Dimname, const Tensor &, bool)>(at::detail::multi_dispatch_tensor_type_set(*this, index))(const_cast<Tensor&>(*this), dim, index, sparse_grad);
#endif
}
#endif
inline Tensor Tensor::addcmul(const Tensor & tensor1, const Tensor & tensor2, Scalar value) const {
#ifdef USE_STATIC_DISPATCH
return TypeDefault::addcmul(const_cast<Tensor&>(*this), tensor1, tensor2, value);
@ -5418,6 +5598,16 @@ inline std::tuple<Tensor,Tensor> Tensor::sort(int64_t dim, bool descending) cons
op, impl::dispatchTypeId(at::detail::multi_dispatch_tensor_type_set(*this)), const_cast<Tensor&>(*this), dim, descending);
#endif
}
#ifdef BUILD_NAMEDTENSOR
inline std::tuple<Tensor,Tensor> Tensor::sort(Dimname dim, bool descending) const {
#ifdef USE_STATIC_DISPATCH
return TypeDefault::sort(const_cast<Tensor&>(*this), dim, descending);
#else
static auto table = globalATenDispatch().getOpTable("aten::sort.dimname(Tensor self, Dimname dim, bool descending=False) -> (Tensor values, Tensor indices)");
return table->getOp<std::tuple<Tensor,Tensor> (const Tensor &, Dimname, bool)>(at::detail::multi_dispatch_tensor_type_set(*this))(const_cast<Tensor&>(*this), dim, descending);
#endif
}
#endif
inline Tensor Tensor::argsort(int64_t dim, bool descending) const {
#ifdef USE_STATIC_DISPATCH
return TypeDefault::argsort(const_cast<Tensor&>(*this), dim, descending);
@ -5427,6 +5617,16 @@ inline Tensor Tensor::argsort(int64_t dim, bool descending) const {
op, impl::dispatchTypeId(at::detail::multi_dispatch_tensor_type_set(*this)), const_cast<Tensor&>(*this), dim, descending);
#endif
}
#ifdef BUILD_NAMEDTENSOR
inline Tensor Tensor::argsort(Dimname dim, bool descending) const {
#ifdef USE_STATIC_DISPATCH
return TypeDefault::argsort(const_cast<Tensor&>(*this), dim, descending);
#else
static auto table = globalATenDispatch().getOpTable("aten::argsort.dimname(Tensor self, Dimname dim, bool descending=False) -> Tensor");
return table->getOp<Tensor (const Tensor &, Dimname, bool)>(at::detail::multi_dispatch_tensor_type_set(*this))(const_cast<Tensor&>(*this), dim, descending);
#endif
}
#endif
inline std::tuple<Tensor,Tensor> Tensor::topk(int64_t k, int64_t dim, bool largest, bool sorted) const {
#ifdef USE_STATIC_DISPATCH
switch(tensorTypeIdToBackend(impl::dispatchTypeId(type_set()))) {

View File

@ -232,5 +232,100 @@ Tensor unflatten(const Tensor& self, Dimname dim, IntArrayRef sizes, DimnameList
return native::unflatten(self, dimname_to_position(self, dim), sizes, names);
}
#ifdef BUILD_NAMEDTENSOR
// Misc. Dimname overloads that don't have homes. Maybe we should move
// all of them here or autogenerate them because they look so similar.
Tensor gather(const Tensor& self, Dimname dim, const Tensor& index, bool sparse_grad) {
reportNYIDimnameOverload("gather");
}
Tensor& gather_out(Tensor& result, const Tensor& self, Dimname dim, const Tensor& index, bool sparse_grad) {
reportNYIDimnameOverload("gather");
}
std::tuple<Tensor, Tensor> mode(const Tensor& self, Dimname dim, bool keepdim) {
reportNYIDimnameOverload("mode");
}
std::tuple<Tensor &,Tensor &> mode_out(Tensor& values, Tensor& indices,
const Tensor& self, Dimname dim, bool keepdim) {
reportNYIDimnameOverload("mode");
}
std::tuple<Tensor&, Tensor&> kthvalue_out(
Tensor& values,
Tensor& indices,
const Tensor& self,
int64_t k,
Dimname dim,
bool keepdim) {
reportNYIDimnameOverload("kthvalue");
}
std::tuple<Tensor, Tensor> kthvalue(
const Tensor& self,
int64_t k,
Dimname dim,
bool keepdim) {
reportNYIDimnameOverload("kthvalue");
}
Tensor index_add(const Tensor& self, Dimname dim, const Tensor& index, const Tensor& source) {
reportNYIDimnameOverload("index_add");
}
Tensor& index_add_(Tensor& self, Dimname dim, const Tensor& index, const Tensor& source) {
reportNYIDimnameOverload("index_add");
}
Tensor index_fill(const Tensor& self, Dimname dim, const Tensor& index, Scalar source) {
reportNYIDimnameOverload("index_fill");
}
Tensor& index_fill_(Tensor& self, Dimname dim, const Tensor& index, Scalar source) {
reportNYIDimnameOverload("index_fill");
}
Tensor index_fill(const Tensor& self, Dimname dim, const Tensor& index, const Tensor& source) {
reportNYIDimnameOverload("index_fill");
}
Tensor& index_fill_(Tensor& self, Dimname dim, const Tensor& index, const Tensor& source) {
reportNYIDimnameOverload("index_fill");
}
Tensor index_copy(const Tensor& self, Dimname dim, const Tensor& index, const Tensor& source) {
reportNYIDimnameOverload("index_copy");
}
Tensor& index_copy_(Tensor& self, Dimname dim, const Tensor& index, const Tensor& source) {
reportNYIDimnameOverload("index_copy");
}
Tensor& index_select_out(Tensor& out, const Tensor& self, Dimname dim, const Tensor& index) {
reportNYIDimnameOverload("index_select");
}
Tensor index_select(const Tensor& self, Dimname dim, const Tensor& index) {
reportNYIDimnameOverload("index_select");
}
Tensor scatter(const Tensor& self, Dimname dim, const Tensor& index, const Tensor& source) {
reportNYIDimnameOverload("scatter");
}
Tensor& scatter_(Tensor& self, Dimname dim, const Tensor& index, const Tensor& source) {
reportNYIDimnameOverload("scatter");
}
Tensor scatter(const Tensor& self, Dimname dim, const Tensor& index, Scalar source) {
reportNYIDimnameOverload("scatter");
}
Tensor& scatter_(Tensor& self, Dimname dim, const Tensor& index, Scalar source) {
reportNYIDimnameOverload("scatter");
}
Tensor scatter_add(const Tensor& self, Dimname dim, const Tensor& index, const Tensor& source) {
reportNYIDimnameOverload("scatter_add");
}
Tensor& scatter_add_(Tensor& self, Dimname dim, const Tensor& index, const Tensor& source) {
reportNYIDimnameOverload("scatter_add");
}
std::tuple<Tensor&, Tensor&> sort_out(Tensor& values, Tensor& indices, const Tensor& self, Dimname dim, bool keepdim) {
reportNYIDimnameOverload("sort");
}
std::tuple<Tensor, Tensor> sort(const Tensor& self, Dimname dim, bool keepdim) {
reportNYIDimnameOverload("sort");
}
Tensor& squeeze_(Tensor& self, Dimname dim) {
reportNYIDimnameOverload("squeeze");
}
Tensor squeeze(const Tensor& self, Dimname dim) {
reportNYIDimnameOverload("squeeze");
}
#endif
}} // namespace at::native
#endif

View File

@ -716,6 +716,32 @@ Tensor norm(const Tensor& self, optional<Scalar> p, DimnameList dim, bool keepdi
Tensor norm(const Tensor& self, optional<Scalar> p, DimnameList dim, bool keepdim) {
return at::norm(self, p, dimnames_to_positions(self, dim), keepdim);
}
Tensor any(const Tensor& self, Dimname dim, bool keepdim) {
reportNYIDimnameOverload("any");
}
Tensor& any_out(Tensor& result, const Tensor &self, Dimname dim, bool keepdim) {
reportNYIDimnameOverload("any");
}
Tensor all(const Tensor& self, Dimname dim, bool keepdim) {
reportNYIDimnameOverload("all");
}
Tensor& all_out(Tensor& result, const Tensor &self, Dimname dim, bool keepdim) {
reportNYIDimnameOverload("all");
}
Tensor cumsum(const Tensor& self, Dimname dim, c10::optional<ScalarType> dtype) {
reportNYIDimnameOverload("cumsum");
}
Tensor& cumsum_out(Tensor& result, const Tensor& self, Dimname dim, c10::optional<ScalarType> dtype) {
reportNYIDimnameOverload("cumsum");
}
Tensor cumprod(const Tensor& self, Dimname dim, c10::optional<ScalarType> dtype) {
reportNYIDimnameOverload("cumprod");
}
Tensor& cumprod_out(Tensor& result, const Tensor& self, Dimname dim, c10::optional<ScalarType> dtype) {
reportNYIDimnameOverload("cumprod");
}
#endif
}} // namespace at::native

View File

@ -275,6 +275,15 @@ std::tuple<Tensor &,Tensor &> max_out(Tensor& max, Tensor& max_indices,
TORCH_CHECK(false, "NYI: max with names");
return at::max_out(max, max_indices, self, dimname_to_position(self, dim), keepdim);
}
Tensor argmax(const Tensor& self, Dimname dim, bool keepdim) {
reportNYIDimnameOverload("argmax");
}
Tensor argmin(const Tensor& self, Dimname dim, bool keepdim) {
reportNYIDimnameOverload("argmin");
}
Tensor argsort(const Tensor& self, Dimname dim, bool keepdim) {
reportNYIDimnameOverload("argsort");
}
#endif
}} // namespace at::native

View File

@ -311,6 +311,11 @@
- func: all.out(Tensor self, int dim, bool keepdim=False, *, Tensor(a!) out) -> Tensor(a!)
- func: all.dimname(Tensor self, Dimname dim, bool keepdim=False) -> Tensor
variants: function, method
- func: all.dimname_out(Tensor self, Dimname dim, bool keepdim=False, *, Tensor(a!) out) -> Tensor(a!)
- func: allclose(Tensor self, Tensor other, float rtol=1e-05, float atol=1e-08, bool equal_nan=False) -> bool
use_c10_dispatcher: full
variants: function, method
@ -321,6 +326,11 @@
- func: any.out(Tensor self, int dim, bool keepdim=False, *, Tensor(a!) out) -> Tensor(a!)
- func: any.dimname(Tensor self, Dimname dim, bool keepdim=False) -> Tensor
variants: function, method
- func: any.dimname_out(Tensor self, Dimname dim, bool keepdim=False, *, Tensor(a!) out) -> Tensor(a!)
- func: arange(Scalar end, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
- func: arange.start(Scalar start, Scalar end, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
@ -821,11 +831,22 @@
- func: cumsum.out(Tensor self, int dim, *, ScalarType? dtype=None, Tensor(a!) out) -> Tensor(a!)
- func: cumsum.dimname(Tensor self, Dimname dim, *, ScalarType? dtype=None) -> Tensor
variants: function, method
- func: cumsum.dimname_out(Tensor self, Dimname dim, *, ScalarType? dtype=None, Tensor(a!) out) -> Tensor(a!)
- func: cumprod(Tensor self, int dim, *, ScalarType? dtype=None) -> Tensor
variants: function, method
- func: cumprod.out(Tensor self, int dim, *, ScalarType? dtype=None, Tensor(a!) out) -> Tensor(a!)
- func: cumprod.dimname(Tensor self, Dimname dim, *, ScalarType? dtype=None) -> Tensor
variants: function, method
- func: cumprod.dimname_out(Tensor self, Dimname dim, *, ScalarType? dtype=None, Tensor(a!) out) -> Tensor(a!)
- func: ctc_loss.IntList(Tensor log_probs, Tensor targets, int[] input_lengths, int[] target_lengths, int blank=0, int reduction=Mean, bool zero_infinity=False) -> Tensor
use_c10_dispatcher: unboxed_only
@ -1321,6 +1342,12 @@
use_c10_dispatcher: full
variants: function, method
- func: index_copy_.dimname(Tensor(a!) self, Dimname dim, Tensor index, Tensor source) -> Tensor(a!)
variants: method
- func: index_copy.dimname(Tensor self, Dimname dim, Tensor index, Tensor source) -> Tensor
variants: function, method
- func: index_put_(Tensor(a!) self, Tensor?[] indices, Tensor values, bool accumulate=False) -> Tensor(a!)
variants: function, method
@ -1408,6 +1435,11 @@
CPU: kthvalue_out_cpu
CUDA: kthvalue_out_cuda
- func: kthvalue.dimname(Tensor self, int k, Dimname dim, bool keepdim=False) -> (Tensor values, Tensor indices)
variants: function, method
- func: kthvalue.dimname_out(Tensor self, int k, Dimname dim, bool keepdim=False, *, Tensor(a!) values, Tensor(b!) indices) -> (Tensor(a!) values, Tensor(b!) indices)
- func: layer_norm(Tensor input, int[] normalized_shape, Tensor? weight=None, Tensor? bias=None, float eps=1e-05, bool cudnn_enable=True) -> Tensor
- func: native_layer_norm(Tensor input, Tensor? weight, Tensor? bias, int M, int N, float eps) -> (Tensor, Tensor, Tensor)
@ -1837,6 +1869,11 @@
- func: mode.values(Tensor self, int dim=-1, bool keepdim=False, *, Tensor(a!) values, Tensor(b!) indices) -> (Tensor(a!) values, Tensor(b!) indices)
- func: mode.dimname(Tensor self, Dimname dim, bool keepdim=False) -> (Tensor values, Tensor indices)
variants: function, method
- func: mode.dimname_out(Tensor self, Dimname dim, bool keepdim=False, *, Tensor(a!) values, Tensor(b!) indices) -> (Tensor(a!) values, Tensor(b!) indices)
- func: mul.Tensor(Tensor self, Tensor other) -> Tensor
use_c10_dispatcher: full
variants: function, method
@ -2463,6 +2500,10 @@
variants: function, method
device_guard: False
- func: squeeze.dimname(Tensor(a) self, Dimname dim) -> Tensor(a)
variants: function, method
device_guard: False
- func: squeeze_(Tensor(a!) self) -> Tensor(a!)
use_c10_dispatcher: unboxed_only
variants: method
@ -2473,6 +2514,10 @@
variants: method
device_guard: False
- func: squeeze_.dimname(Tensor(a!) self, Dimname dim) -> Tensor(a!)
variants: method
device_guard: False
- func: sspaddmm(Tensor self, Tensor mat1, Tensor mat2, *, Scalar beta=1, Scalar alpha=1) -> Tensor
use_c10_dispatcher: full
variants: function, method
@ -2995,7 +3040,6 @@
- func: norm.names_out(Tensor self, Scalar? p, Dimname[1] dim, bool keepdim=False, *, Tensor(a!) out) -> Tensor(a!)
- func: frobenius_norm(Tensor self) -> Tensor
use_c10_dispatcher: full
variants: function
@ -3843,6 +3887,9 @@
use_c10_dispatcher: full
variants: function, method
- func: index_add.dimname(Tensor self, Dimname dim, Tensor index, Tensor source) -> Tensor
variants: function, method
- func: index_fill_.Scalar(Tensor(a!) self, int dim, Tensor index, Scalar value) -> Tensor(a!)
use_c10_dispatcher: unboxed_only
variants: method
@ -3865,6 +3912,12 @@
use_c10_dispatcher: full
variants: function, method
- func: index_fill.dimname_Scalar(Tensor self, Dimname dim, Tensor index, Scalar value) -> Tensor
variants: function, method
- func: index_fill.dimname_Tensor(Tensor self, Dimname dim, Tensor index, Tensor value) -> Tensor
variants: function, method
- func: scatter_.src(Tensor(a!) self, int dim, Tensor index, Tensor src) -> Tensor(a!)
use_c10_dispatcher: unboxed_only
variants: method
@ -3887,6 +3940,12 @@
use_c10_dispatcher: full
variants: function, method
- func: scatter.dimname_src(Tensor self, Dimname dim, Tensor index, Tensor src) -> Tensor
variants: function, method
- func: scatter.dimname_value(Tensor self, Dimname dim, Tensor index, Scalar value) -> Tensor
variants: function, method
- func: scatter_add_(Tensor(a!) self, int dim, Tensor index, Tensor src) -> Tensor(a!)
use_c10_dispatcher: unboxed_only
variants: method
@ -3898,6 +3957,9 @@
use_c10_dispatcher: full
variants: function, method
- func: scatter_add.dimname(Tensor self, Dimname dim, Tensor index, Tensor src) -> Tensor
variants: function, method
- func: lt_.Scalar(Tensor(a!) self, Scalar other) -> Tensor(a!)
use_c10_dispatcher: unboxed_only
variants: method
@ -4558,6 +4620,11 @@
SparseCPU: index_select_sparse
SparseCUDA: index_select_sparse
- func: index_select.dimname_out(Tensor self, Dimname dim, Tensor index, *, Tensor(a!) out) -> Tensor(a!)
- func: index_select.dimname(Tensor self, Dimname dim, Tensor index) -> Tensor
variants: method, function
- func: masked_select.out(Tensor self, Tensor mask, *, Tensor(a!) out) -> Tensor(a!)
dispatch:
CPU: masked_select_out_cpu
@ -4600,6 +4667,11 @@
CPU: gather_cpu
CUDA: gather_cuda
- func: gather.dimname_out(Tensor self, Dimname dim, Tensor index, *, bool sparse_grad=False, Tensor(a!) out) -> Tensor(a!)
- func: gather.dimname(Tensor self, Dimname dim, Tensor index, *, bool sparse_grad=False) -> Tensor
variants: method, function
- func: _gather_sparse_backward(Tensor self, int dim, Tensor index, Tensor grad) -> Tensor
use_c10_dispatcher: full
@ -5048,10 +5120,18 @@
CUDA: legacy::cuda::_th_sort
QuantizedCPU: sort_quant
- func: sort.dimname_values(Tensor self, Dimname dim, bool descending=False, *, Tensor(a!) values, Tensor(b!) indices) -> (Tensor(a!) values, Tensor(b!) indices)
- func: sort.dimname(Tensor self, Dimname dim, bool descending=False) -> (Tensor values, Tensor indices)
variants: method, function
- func: argsort(Tensor self, int dim=-1, bool descending=False) -> Tensor
use_c10_dispatcher: full
variants: method, function
- func: argsort.dimname(Tensor self, Dimname dim, bool descending=False) -> Tensor
variants: method, function
- func: topk.values(Tensor self, int k, int dim=-1, bool largest=True, bool sorted=True, *, Tensor(a!) values, Tensor(b!) indices) ->(Tensor(a!) values, Tensor(b!) indices)
dispatch:
CPU: topk_out_cpu

View File

@ -1682,6 +1682,11 @@ class TestNamedTensor(TestCase):
self.assertTrue(
str(warns[0].message).startswith('Autograd was passed a named grad tensor'))
def test_nyi_dimname_overload_msg(self):
x = torch.randn(3, 3)
with self.assertRaisesRegex(RuntimeError, "squeeze: You passed a dimname"):
x.squeeze("N")
def test_dot(self):
for device in torch.testing.get_all_device_types():
# torch.dot ignores the names of both tensors