mirror of
https://github.com/zebrajr/pytorch.git
synced 2025-12-06 12:20:52 +01:00
Rename IntList to IntArrayRef. (#16751)
Summary: Pull Request resolved: https://github.com/pytorch/pytorch/pull/16751 This was made more complicated by the fact that ivalue::IntList is a thing. So I had to fix all of the sites where we referring to IValue post facto. The following codemods were run, in this order: ``` codemod -m -d . --extensions cc,cpp,cu,cuh,h,hpp,py,cwrap,yaml,in IntList IntArrayRef codemod -m -d . --extensions cc,cpp,cu,cuh,h,hpp,py,cwrap,yaml,in IntArrayRef::create IntList::create codemod -m -d . --extensions cc,cpp,cu,cuh,h,hpp,py,cwrap,yaml,in ivalue::IntArrayRef ivalue::IntList codemod -m -d . --extensions cc,cpp,cu,cuh,h,hpp,py,cwrap,yaml,in Tag::IntArrayRef Tag::IntList codemod -m -d . --extensions cc,cpp,cu,cuh,h,hpp,py,cwrap,yaml,in isIntArrayRef isIntList codemod -m -d . --extensions cc,cpp,cu,cuh,h,hpp,py,cwrap,yaml,in toIntArrayRef toIntList codemod -m -d . --extensions cc,cpp,cu,cuh,h,hpp,py,cwrap,yaml,in 'Shared<IntArrayRef>' 'Shared<IntList>' codemod -m -d . --extensions cc,cpp,cu,cuh,h,hpp,py,cwrap,yaml,in 'intrusive_ptr<IntArrayRef>' 'intrusive_ptr<IntList>' ``` Some manual fixups were done afterwards; they can be reviewed separately at https://github.com/pytorch/pytorch/pull/16752 Reviewed By: dzhulgakov Differential Revision: D13954363 fbshipit-source-id: b5c40aacba042402155a2f5a229fa6db7992ac64
This commit is contained in:
parent
e2d3a3fd6a
commit
4404762d7d
|
|
@ -127,7 +127,7 @@ inline std::pair<int64_t, int64_t> collapse_dims(
|
|||
*/
|
||||
|
||||
inline Tensor sort_strides(Tensor& tensor_) {
|
||||
IntList strides = tensor_.strides();
|
||||
IntArrayRef strides = tensor_.strides();
|
||||
std::vector<int64_t> indices;
|
||||
indices.reserve(tensor_.ndimension());
|
||||
for (int64_t i = 0; i < tensor_.ndimension(); i++) {
|
||||
|
|
|
|||
|
|
@ -172,8 +172,8 @@ Tensor fromDLPack(const DLManagedTensor* src) {
|
|||
src->deleter(const_cast<DLManagedTensor*>(src));
|
||||
};
|
||||
return at::from_blob(src->dl_tensor.data,
|
||||
IntList(src->dl_tensor.shape, src->dl_tensor.ndim),
|
||||
IntList(src->dl_tensor.strides, src->dl_tensor.ndim),
|
||||
IntArrayRef(src->dl_tensor.shape, src->dl_tensor.ndim),
|
||||
IntArrayRef(src->dl_tensor.strides, src->dl_tensor.ndim),
|
||||
deleter,
|
||||
at::device(device_type).dtype(stype));
|
||||
}
|
||||
|
|
|
|||
|
|
@ -29,8 +29,8 @@
|
|||
- THTensor* self
|
||||
- THStorage* source
|
||||
- long storage_offset
|
||||
- IntListSize size
|
||||
- arg: IntList stride
|
||||
- IntArrayRefSize size
|
||||
- arg: IntArrayRef stride
|
||||
default: {}
|
||||
]]
|
||||
[[
|
||||
|
|
@ -131,7 +131,7 @@
|
|||
return: THTensor*
|
||||
arguments:
|
||||
- THTensor* self
|
||||
- arg: IntListSize size
|
||||
- arg: IntArrayRefSize size
|
||||
long_args: True
|
||||
]]
|
||||
[[
|
||||
|
|
|
|||
|
|
@ -2,7 +2,7 @@
|
|||
|
||||
namespace at {
|
||||
|
||||
std::vector<int64_t> infer_size(IntList a, IntList b) {
|
||||
std::vector<int64_t> infer_size(IntArrayRef a, IntArrayRef b) {
|
||||
auto dimsA = a.size();
|
||||
auto dimsB = b.size();
|
||||
ptrdiff_t ndim = dimsA > dimsB ? dimsA : dimsB;
|
||||
|
|
@ -29,9 +29,9 @@ std::vector<int64_t> infer_size(IntList a, IntList b) {
|
|||
}
|
||||
|
||||
std::tuple<std::vector<int64_t>, std::vector<int64_t>> inferExpandGeometry(
|
||||
IntList tensor_sizes,
|
||||
IntList tensor_strides,
|
||||
IntList sizes) {
|
||||
IntArrayRef tensor_sizes,
|
||||
IntArrayRef tensor_strides,
|
||||
IntArrayRef sizes) {
|
||||
int64_t ndim = sizes.size();
|
||||
int64_t tensor_dim = tensor_sizes.size();
|
||||
|
||||
|
|
|
|||
|
|
@ -9,12 +9,12 @@
|
|||
|
||||
namespace at {
|
||||
|
||||
CAFFE2_API std::vector<int64_t> infer_size(IntList a, IntList b);
|
||||
CAFFE2_API std::vector<int64_t> infer_size(IntArrayRef a, IntArrayRef b);
|
||||
CAFFE2_API std::tuple<std::vector<int64_t>, std::vector<int64_t>>
|
||||
inferExpandGeometry(
|
||||
IntList tensor_sizes,
|
||||
IntList tensor_strides,
|
||||
IntList sizes);
|
||||
IntArrayRef tensor_sizes,
|
||||
IntArrayRef tensor_strides,
|
||||
IntArrayRef sizes);
|
||||
|
||||
// avoid copy-construction of Tensor by using a reference_wrapper.
|
||||
inline void check_defined(std::initializer_list<std::reference_wrapper<const Tensor>> tensors, const char *api_name) {
|
||||
|
|
@ -93,7 +93,7 @@ inline std::tuple<Tensor, Tensor, Tensor> expand_outplace(const Tensor &to_expan
|
|||
return expand_outplace(to_expand1, to_expand2, to_expand3);
|
||||
}
|
||||
|
||||
inline std::tuple<Tensor> expand_size(const Tensor &to_expand, IntList sizes) {
|
||||
inline std::tuple<Tensor> expand_size(const Tensor &to_expand, IntArrayRef sizes) {
|
||||
if(to_expand.sizes().equals(sizes)) {
|
||||
return std::make_tuple(to_expand);
|
||||
}
|
||||
|
|
@ -101,7 +101,7 @@ inline std::tuple<Tensor> expand_size(const Tensor &to_expand, IntList sizes) {
|
|||
return std::make_tuple(to_expand.expand(sizes, /*implicit=*/true)); // see [expand implicit]
|
||||
}
|
||||
|
||||
inline std::tuple<Tensor> expand_size(const Tensor &to_expand, IntList sizes, const char *api_name) {
|
||||
inline std::tuple<Tensor> expand_size(const Tensor &to_expand, IntArrayRef sizes, const char *api_name) {
|
||||
check_defined({to_expand}, api_name);
|
||||
return expand_size(to_expand, sizes);
|
||||
}
|
||||
|
|
@ -136,12 +136,12 @@ inline std::vector<Tensor> expand_outplace(TensorList to_expand) {
|
|||
|
||||
// Sums `tensor` repeatedly to produce a tensor of shape `shape`.
|
||||
// Precondition: is_expandable_to(shape, tensor.sizes()) must be true
|
||||
static inline Tensor sum_to(Tensor tensor, const IntList shape) {
|
||||
static inline Tensor sum_to(Tensor tensor, const IntArrayRef shape) {
|
||||
if (shape.size() == 0) {
|
||||
return tensor.sum();
|
||||
}
|
||||
c10::SmallVector<int64_t, 8> reduce_dims;
|
||||
const at::IntList sizes = tensor.sizes();
|
||||
const at::IntArrayRef sizes = tensor.sizes();
|
||||
const int64_t leading_dims = sizes.size() - shape.size();
|
||||
for (int64_t i = 0; i < leading_dims; ++i) {
|
||||
reduce_dims.push_back(i);
|
||||
|
|
@ -158,7 +158,7 @@ static inline Tensor sum_to(Tensor tensor, const IntList shape) {
|
|||
}
|
||||
|
||||
// True if `shape` can be broadcasted to `desired`
|
||||
static inline bool is_expandable_to(IntList shape, IntList desired) {
|
||||
static inline bool is_expandable_to(IntArrayRef shape, IntArrayRef desired) {
|
||||
int ndim = shape.size();
|
||||
int target_dim = desired.size();
|
||||
if (ndim > target_dim) {
|
||||
|
|
|
|||
|
|
@ -9,7 +9,7 @@ namespace at {
|
|||
|
||||
// Infers the size of a dim with size -1, if it exists. Also checks that new
|
||||
// shape is compatible with the number of elements.
|
||||
static std::vector<int64_t> infer_size(IntList shape, int64_t numel) {
|
||||
static std::vector<int64_t> infer_size(IntArrayRef shape, int64_t numel) {
|
||||
auto res = shape.vec();
|
||||
int64_t newsize = 1;
|
||||
auto infer_dim = c10::optional<int64_t>();
|
||||
|
|
|
|||
|
|
@ -37,10 +37,10 @@ SparseTensorImpl::SparseTensorImpl(at::TensorTypeId type_id, const caffe2::TypeM
|
|||
, indices_(at::empty({1, 0}, at::initialTensorOptions().device(sparseTensorIdToDeviceType(type_id)).dtype(ScalarType::Long)))
|
||||
, values_(at::empty({0}, at::initialTensorOptions().device(sparseTensorIdToDeviceType(type_id)).dtype(data_type))) {}
|
||||
|
||||
IntList SparseTensorImpl::sizes() const {
|
||||
IntArrayRef SparseTensorImpl::sizes() const {
|
||||
return sizes_;
|
||||
}
|
||||
IntList SparseTensorImpl::strides() const {
|
||||
IntArrayRef SparseTensorImpl::strides() const {
|
||||
AT_ERROR("sparse tensors do not have strides");
|
||||
}
|
||||
bool SparseTensorImpl::is_contiguous() const {
|
||||
|
|
@ -98,7 +98,7 @@ void SparseTensorImpl::set_indices_and_values_unsafe(const Tensor& indices, cons
|
|||
auto dense_size_original = sizes().slice(sparse_dim_);
|
||||
std::vector<int64_t> expected_values_size_vec = {values.size(0)};
|
||||
expected_values_size_vec.insert(expected_values_size_vec.end(), dense_size_original.begin(), dense_size_original.end());
|
||||
IntList expected_values_size(expected_values_size_vec);
|
||||
IntArrayRef expected_values_size(expected_values_size_vec);
|
||||
auto new_values_size = values.sizes();
|
||||
AT_CHECK(
|
||||
std::equal(expected_values_size.begin(), expected_values_size.end(), new_values_size.begin()),
|
||||
|
|
|
|||
|
|
@ -40,8 +40,8 @@ public:
|
|||
Tensor indices() const { return indices_; }
|
||||
Tensor values() const { return values_; }
|
||||
|
||||
IntList sizes() const override;
|
||||
IntList strides() const override;
|
||||
IntArrayRef sizes() const override;
|
||||
IntArrayRef strides() const override;
|
||||
bool is_contiguous() const override;
|
||||
int64_t stride(int64_t d) const override;
|
||||
void resize_dim(int64_t ndim) override;
|
||||
|
|
@ -56,7 +56,7 @@ public:
|
|||
|
||||
// WARNING: This function does NOT preserve invariants of sparse_dim/dense_dim with
|
||||
// respect to indices and values
|
||||
void raw_resize_(int64_t sparse_dim, int64_t dense_dim, IntList size) {
|
||||
void raw_resize_(int64_t sparse_dim, int64_t dense_dim, IntArrayRef size) {
|
||||
AT_CHECK(allow_tensor_metadata_change(), "raw_resize_ is not allowed on Tensor created from .data or .detach()");
|
||||
sizes_ = size.vec();
|
||||
sparse_dim_ = sparse_dim;
|
||||
|
|
@ -86,7 +86,7 @@ public:
|
|||
// and for API consistency we don't support it).
|
||||
// 4. When we attempt to shrink the size of any of the sparse dimensions on a non-empty sparse tensor
|
||||
// (this could make some of the stored indices out-of-bound and thus unsafe).
|
||||
void resize_(int64_t sparse_dim, int64_t dense_dim, IntList size) {
|
||||
void resize_(int64_t sparse_dim, int64_t dense_dim, IntArrayRef size) {
|
||||
AT_CHECK(allow_tensor_metadata_change(), "resize_ is not allowed on Tensor created from .data or .detach()");
|
||||
AT_CHECK(sparse_dim + dense_dim == static_cast<int64_t>(size.size()), "number of dimensions must be sparse_dim (", sparse_dim, ") + dense_dim (", dense_dim, "), but got ", size.size());
|
||||
if (nnz() > 0) {
|
||||
|
|
@ -144,7 +144,7 @@ public:
|
|||
}
|
||||
|
||||
// NOTE: this function will resize the sparse tensor and also set `indices` and `values` to empty.
|
||||
void resize_and_clear_(int64_t sparse_dim, int64_t dense_dim, IntList size) {
|
||||
void resize_and_clear_(int64_t sparse_dim, int64_t dense_dim, IntArrayRef size) {
|
||||
AT_CHECK(allow_tensor_metadata_change(), "resize_and_clear_ is not allowed on Tensor created from .data or .detach()");
|
||||
AT_CHECK(sparse_dim + dense_dim == static_cast<int64_t>(size.size()), "number of dimensions must be sparse_dim (", sparse_dim, ") + dense_dim (", dense_dim, "), but got ", size.size());
|
||||
|
||||
|
|
|
|||
|
|
@ -66,7 +66,7 @@ inline Tensor new_values_with_size_of(const Tensor& values, int64_t nnz) {
|
|||
// the flattened tensor `t.reshape( prod(full_size[:indices.size(0)]), -1 )`.
|
||||
// if forceClone is true, the result will forced to be a clone of self.
|
||||
// if force_clone is true, the result will forced to be a clone of self.
|
||||
inline LongTensor flatten_indices(const Tensor& indices, IntList full_size, bool force_clone = false) {
|
||||
inline LongTensor flatten_indices(const Tensor& indices, IntArrayRef full_size, bool force_clone = false) {
|
||||
int64_t sparse_dim = indices.size(0);
|
||||
if (sparse_dim == 1) {
|
||||
if (force_clone) {
|
||||
|
|
@ -113,7 +113,7 @@ inline LongTensor flatten_indices(const Tensor& indices, IntList full_size, bool
|
|||
// Ex2:
|
||||
// dims_to_flatten = [1]
|
||||
// new_indices = [ 3, 1, 3 ] # uncoalesced
|
||||
inline LongTensor flatten_indices_by_dims(const LongTensor& indices, const IntList& sizes, const IntList& dims_to_flatten){
|
||||
inline LongTensor flatten_indices_by_dims(const LongTensor& indices, const IntArrayRef& sizes, const IntArrayRef& dims_to_flatten){
|
||||
LongTensor new_indices = at::zeros({indices.size(1)}, indices.options());
|
||||
for (auto d : dims_to_flatten) {
|
||||
new_indices.mul_(sizes[d]);
|
||||
|
|
|
|||
|
|
@ -8,7 +8,7 @@ namespace at {
|
|||
struct CAFFE2_API TensorGeometry {
|
||||
TensorGeometry() : storage_offset_(0) {}
|
||||
|
||||
explicit TensorGeometry(IntList sizes)
|
||||
explicit TensorGeometry(IntArrayRef sizes)
|
||||
: sizes_(sizes.vec())
|
||||
, strides_(sizes.size())
|
||||
, storage_offset_(0) {
|
||||
|
|
@ -35,12 +35,12 @@ struct CAFFE2_API TensorGeometry {
|
|||
dim = maybe_wrap_dim(dim, this->dim());
|
||||
return sizes_.at(static_cast<size_t>(dim));
|
||||
}
|
||||
IntList sizes() const { return IntList{ sizes_ }; }
|
||||
IntArrayRef sizes() const { return IntArrayRef{ sizes_ }; }
|
||||
int64_t stride(int64_t dim) const {
|
||||
dim = maybe_wrap_dim(dim, this->dim());
|
||||
return strides_.at(static_cast<size_t>(dim));
|
||||
}
|
||||
IntList strides() const { return IntList{ strides_ }; }
|
||||
IntArrayRef strides() const { return IntArrayRef{ strides_ }; }
|
||||
int64_t storage_offset() const { return storage_offset_; }
|
||||
int64_t numel() const { return numel_; }
|
||||
|
||||
|
|
|
|||
|
|
@ -47,7 +47,7 @@ void checkAllContiguous(CheckedFrom c, at::ArrayRef<TensorArg> ts) {
|
|||
}
|
||||
}
|
||||
|
||||
void checkSize(CheckedFrom c, const TensorGeometryArg& t, IntList sizes) {
|
||||
void checkSize(CheckedFrom c, const TensorGeometryArg& t, IntArrayRef sizes) {
|
||||
checkDim(c, t, sizes.size());
|
||||
AT_CHECK(
|
||||
t->sizes().equals(sizes),
|
||||
|
|
@ -217,7 +217,7 @@ void * maybe_data_ptr(const TensorArg& tensor) {
|
|||
}
|
||||
|
||||
// See TensorUtils.h on why this is useful now that we cache is_contiguous.
|
||||
bool geometry_is_contiguous(IntList sizes, IntList strides) {
|
||||
bool geometry_is_contiguous(IntArrayRef sizes, IntArrayRef strides) {
|
||||
int64_t dim = sizes.size();
|
||||
int64_t expected_stride = 1;
|
||||
bool contig_if_nonempty = true;
|
||||
|
|
|
|||
|
|
@ -69,7 +69,7 @@ CAFFE2_API void checkAllContiguous(CheckedFrom c, at::ArrayRef<TensorArg> ts);
|
|||
CAFFE2_API void checkSize(
|
||||
CheckedFrom c,
|
||||
const TensorGeometryArg& t,
|
||||
IntList sizes);
|
||||
IntArrayRef sizes);
|
||||
CAFFE2_API void checkSize(
|
||||
CheckedFrom c,
|
||||
const TensorGeometryArg& t,
|
||||
|
|
@ -124,5 +124,5 @@ CAFFE2_API void* maybe_data_ptr(const TensorArg& tensor);
|
|||
// allows checking if a particular geometry is contiguous without explicitly
|
||||
// constructing a tensor, e.g., when you want to choose a kernel strategy based
|
||||
// on whether a subgeometry is contiguous.
|
||||
CAFFE2_API bool geometry_is_contiguous(IntList sizes, IntList strides);
|
||||
CAFFE2_API bool geometry_is_contiguous(IntArrayRef sizes, IntArrayRef strides);
|
||||
}
|
||||
|
|
|
|||
|
|
@ -12,7 +12,7 @@ namespace at {
|
|||
|
||||
constexpr size_t dim_bitset_size = 64;
|
||||
|
||||
static inline std::bitset<dim_bitset_size> dim_list_to_bitset(IntList dims, int64_t ndims) {
|
||||
static inline std::bitset<dim_bitset_size> dim_list_to_bitset(IntArrayRef dims, int64_t ndims) {
|
||||
AT_CHECK(ndims <= (int64_t) dim_bitset_size, "only tensors with up to ", dim_bitset_size, " dims are supported");
|
||||
std::bitset<dim_bitset_size> seen;
|
||||
for (size_t i = 0; i < dims.size(); i++) {
|
||||
|
|
|
|||
|
|
@ -163,10 +163,10 @@ class CAFFE2_API Tensor {
|
|||
|
||||
const char * toString() const;
|
||||
|
||||
IntList sizes() const {
|
||||
IntArrayRef sizes() const {
|
||||
return impl_->sizes();
|
||||
}
|
||||
IntList strides() const {
|
||||
IntArrayRef strides() const {
|
||||
return impl_->strides();
|
||||
}
|
||||
int64_t ndimension() const {
|
||||
|
|
@ -324,8 +324,8 @@ class CAFFE2_API Tensor {
|
|||
Tensor argmax() const;
|
||||
Tensor argmin(int64_t dim, bool keepdim=false) const;
|
||||
Tensor argmin() const;
|
||||
Tensor as_strided(IntList size, IntList stride, c10::optional<int64_t> storage_offset=c10::nullopt) const;
|
||||
Tensor & as_strided_(IntList size, IntList stride, c10::optional<int64_t> storage_offset=c10::nullopt);
|
||||
Tensor as_strided(IntArrayRef size, IntArrayRef stride, c10::optional<int64_t> storage_offset=c10::nullopt) const;
|
||||
Tensor & as_strided_(IntArrayRef size, IntArrayRef stride, c10::optional<int64_t> storage_offset=c10::nullopt);
|
||||
Tensor asin() const;
|
||||
Tensor & asin_();
|
||||
Tensor atan() const;
|
||||
|
|
@ -365,7 +365,7 @@ class CAFFE2_API Tensor {
|
|||
Tensor div(Scalar other) const;
|
||||
Tensor & div_(Scalar other);
|
||||
Tensor dot(const Tensor & tensor) const;
|
||||
Tensor & resize_(IntList size);
|
||||
Tensor & resize_(IntArrayRef size);
|
||||
Tensor erf() const;
|
||||
Tensor & erf_();
|
||||
Tensor erfc() const;
|
||||
|
|
@ -374,7 +374,7 @@ class CAFFE2_API Tensor {
|
|||
Tensor & exp_();
|
||||
Tensor expm1() const;
|
||||
Tensor & expm1_();
|
||||
Tensor expand(IntList size, bool implicit=false) const;
|
||||
Tensor expand(IntArrayRef size, bool implicit=false) const;
|
||||
Tensor expand_as(const Tensor & other) const;
|
||||
Tensor flatten(int64_t start_dim=0, int64_t end_dim=-1) const;
|
||||
Tensor & fill_(Scalar value);
|
||||
|
|
@ -386,7 +386,7 @@ class CAFFE2_API Tensor {
|
|||
Tensor fft(int64_t signal_ndim, bool normalized=false) const;
|
||||
Tensor ifft(int64_t signal_ndim, bool normalized=false) const;
|
||||
Tensor rfft(int64_t signal_ndim, bool normalized=false, bool onesided=true) const;
|
||||
Tensor irfft(int64_t signal_ndim, bool normalized=false, bool onesided=true, IntList signal_sizes={}) const;
|
||||
Tensor irfft(int64_t signal_ndim, bool normalized=false, bool onesided=true, IntArrayRef signal_sizes={}) const;
|
||||
Tensor index(TensorList indices) const;
|
||||
Tensor & index_copy_(int64_t dim, const Tensor & index, const Tensor & source);
|
||||
Tensor index_put(TensorList indices, const Tensor & values, bool accumulate=false) const;
|
||||
|
|
@ -411,19 +411,19 @@ class CAFFE2_API Tensor {
|
|||
Tensor logdet() const;
|
||||
Tensor log_softmax(int64_t dim, ScalarType dtype) const;
|
||||
Tensor log_softmax(int64_t dim) const;
|
||||
Tensor logsumexp(IntList dim, bool keepdim=false) const;
|
||||
Tensor logsumexp(IntArrayRef dim, bool keepdim=false) const;
|
||||
Tensor matmul(const Tensor & other) const;
|
||||
Tensor matrix_power(int64_t n) const;
|
||||
std::tuple<Tensor,Tensor> max(int64_t dim, bool keepdim=false) const;
|
||||
Tensor max_values(IntList dim, bool keepdim=false) const;
|
||||
Tensor max_values(IntArrayRef dim, bool keepdim=false) const;
|
||||
Tensor mean(ScalarType dtype) const;
|
||||
Tensor mean() const;
|
||||
Tensor mean(IntList dim, bool keepdim, ScalarType dtype) const;
|
||||
Tensor mean(IntList dim, bool keepdim=false) const;
|
||||
Tensor mean(IntList dim, ScalarType dtype) const;
|
||||
Tensor mean(IntArrayRef dim, bool keepdim, ScalarType dtype) const;
|
||||
Tensor mean(IntArrayRef dim, bool keepdim=false) const;
|
||||
Tensor mean(IntArrayRef dim, ScalarType dtype) const;
|
||||
std::tuple<Tensor,Tensor> median(int64_t dim, bool keepdim=false) const;
|
||||
std::tuple<Tensor,Tensor> min(int64_t dim, bool keepdim=false) const;
|
||||
Tensor min_values(IntList dim, bool keepdim=false) const;
|
||||
Tensor min_values(IntArrayRef dim, bool keepdim=false) const;
|
||||
Tensor mm(const Tensor & mat2) const;
|
||||
std::tuple<Tensor,Tensor> mode(int64_t dim=-1, bool keepdim=false) const;
|
||||
Tensor mul(const Tensor & other) const;
|
||||
|
|
@ -435,11 +435,11 @@ class CAFFE2_API Tensor {
|
|||
Tensor & mvlgamma_(int64_t p);
|
||||
Tensor narrow_copy(int64_t dim, int64_t start, int64_t length) const;
|
||||
Tensor narrow(int64_t dim, int64_t start, int64_t length) const;
|
||||
Tensor permute(IntList dims) const;
|
||||
Tensor permute(IntArrayRef dims) const;
|
||||
Tensor pin_memory() const;
|
||||
Tensor pinverse(double rcond=1e-15) const;
|
||||
Tensor repeat(IntList repeats) const;
|
||||
Tensor reshape(IntList shape) const;
|
||||
Tensor repeat(IntArrayRef repeats) const;
|
||||
Tensor reshape(IntArrayRef shape) const;
|
||||
Tensor reshape_as(const Tensor & other) const;
|
||||
Tensor round() const;
|
||||
Tensor & round_();
|
||||
|
|
@ -467,7 +467,7 @@ class CAFFE2_API Tensor {
|
|||
Tensor softmax(int64_t dim, ScalarType dtype) const;
|
||||
Tensor softmax(int64_t dim) const;
|
||||
std::vector<Tensor> split(int64_t split_size, int64_t dim=0) const;
|
||||
std::vector<Tensor> split_with_sizes(IntList split_sizes, int64_t dim=0) const;
|
||||
std::vector<Tensor> split_with_sizes(IntArrayRef split_sizes, int64_t dim=0) const;
|
||||
Tensor squeeze() const;
|
||||
Tensor squeeze(int64_t dim) const;
|
||||
Tensor & squeeze_();
|
||||
|
|
@ -477,14 +477,14 @@ class CAFFE2_API Tensor {
|
|||
int64_t stride(int64_t dim) const;
|
||||
Tensor sum(ScalarType dtype) const;
|
||||
Tensor sum() const;
|
||||
Tensor sum(IntList dim, bool keepdim, ScalarType dtype) const;
|
||||
Tensor sum(IntList dim, bool keepdim=false) const;
|
||||
Tensor sum(IntList dim, ScalarType dtype) const;
|
||||
Tensor sum_to_size(IntList size) const;
|
||||
Tensor sum(IntArrayRef dim, bool keepdim, ScalarType dtype) const;
|
||||
Tensor sum(IntArrayRef dim, bool keepdim=false) const;
|
||||
Tensor sum(IntArrayRef dim, ScalarType dtype) const;
|
||||
Tensor sum_to_size(IntArrayRef size) const;
|
||||
Tensor sqrt() const;
|
||||
Tensor & sqrt_();
|
||||
Tensor std(bool unbiased=true) const;
|
||||
Tensor std(IntList dim, bool unbiased=true, bool keepdim=false) const;
|
||||
Tensor std(IntArrayRef dim, bool unbiased=true, bool keepdim=false) const;
|
||||
Tensor prod(ScalarType dtype) const;
|
||||
Tensor prod() const;
|
||||
Tensor prod(int64_t dim, bool keepdim, ScalarType dtype) const;
|
||||
|
|
@ -498,22 +498,22 @@ class CAFFE2_API Tensor {
|
|||
Tensor & tanh_();
|
||||
Tensor transpose(int64_t dim0, int64_t dim1) const;
|
||||
Tensor & transpose_(int64_t dim0, int64_t dim1);
|
||||
Tensor flip(IntList dims) const;
|
||||
Tensor roll(IntList shifts, IntList dims={}) const;
|
||||
Tensor rot90(int64_t k=1, IntList dims={0,1}) const;
|
||||
Tensor flip(IntArrayRef dims) const;
|
||||
Tensor roll(IntArrayRef shifts, IntArrayRef dims={}) const;
|
||||
Tensor rot90(int64_t k=1, IntArrayRef dims={0,1}) const;
|
||||
Tensor trunc() const;
|
||||
Tensor & trunc_();
|
||||
Tensor type_as(const Tensor & other) const;
|
||||
Tensor unsqueeze(int64_t dim) const;
|
||||
Tensor & unsqueeze_(int64_t dim);
|
||||
Tensor var(bool unbiased=true) const;
|
||||
Tensor var(IntList dim, bool unbiased=true, bool keepdim=false) const;
|
||||
Tensor var(IntArrayRef dim, bool unbiased=true, bool keepdim=false) const;
|
||||
Tensor view_as(const Tensor & other) const;
|
||||
Tensor where(const Tensor & condition, const Tensor & other) const;
|
||||
Tensor norm(c10::optional<Scalar> p, ScalarType dtype) const;
|
||||
Tensor norm(Scalar p=2) const;
|
||||
Tensor norm(c10::optional<Scalar> p, IntList dim, bool keepdim, ScalarType dtype) const;
|
||||
Tensor norm(c10::optional<Scalar> p, IntList dim, bool keepdim=false) const;
|
||||
Tensor norm(c10::optional<Scalar> p, IntArrayRef dim, bool keepdim, ScalarType dtype) const;
|
||||
Tensor norm(c10::optional<Scalar> p, IntArrayRef dim, bool keepdim=false) const;
|
||||
Tensor clone() const;
|
||||
Tensor & resize_as_(const Tensor & the_template);
|
||||
Tensor pow(Scalar exponent) const;
|
||||
|
|
@ -524,8 +524,8 @@ class CAFFE2_API Tensor {
|
|||
Tensor & sub_(Scalar other, Scalar alpha=1);
|
||||
Tensor addmm(const Tensor & mat1, const Tensor & mat2, Scalar beta=1, Scalar alpha=1) const;
|
||||
Tensor & addmm_(const Tensor & mat1, const Tensor & mat2, Scalar beta=1, Scalar alpha=1);
|
||||
Tensor & sparse_resize_(IntList size, int64_t sparse_dim, int64_t dense_dim);
|
||||
Tensor & sparse_resize_and_clear_(IntList size, int64_t sparse_dim, int64_t dense_dim);
|
||||
Tensor & sparse_resize_(IntArrayRef size, int64_t sparse_dim, int64_t dense_dim);
|
||||
Tensor & sparse_resize_and_clear_(IntArrayRef size, int64_t sparse_dim, int64_t dense_dim);
|
||||
Tensor sparse_mask(SparseTensorRef mask) const;
|
||||
Tensor to_dense() const;
|
||||
int64_t sparse_dim() const;
|
||||
|
|
@ -551,14 +551,14 @@ class CAFFE2_API Tensor {
|
|||
Scalar item() const;
|
||||
void* data_ptr() const;
|
||||
Tensor & set_(Storage source);
|
||||
Tensor & set_(Storage source, int64_t storage_offset, IntList size, IntList stride={});
|
||||
Tensor & set_(Storage source, int64_t storage_offset, IntArrayRef size, IntArrayRef stride={});
|
||||
Tensor & set_(const Tensor & source);
|
||||
Tensor & set_();
|
||||
bool is_set_to(const Tensor & tensor) const;
|
||||
Tensor & masked_fill_(const Tensor & mask, Scalar value);
|
||||
Tensor & masked_fill_(const Tensor & mask, const Tensor & value);
|
||||
Tensor & masked_scatter_(const Tensor & mask, const Tensor & source);
|
||||
Tensor view(IntList size) const;
|
||||
Tensor view(IntArrayRef size) const;
|
||||
Tensor & put_(const Tensor & index, const Tensor & source, bool accumulate=false);
|
||||
Tensor & index_add_(int64_t dim, const Tensor & index, const Tensor & source);
|
||||
Tensor & index_fill_(int64_t dim, const Tensor & index, Scalar value);
|
||||
|
|
|
|||
|
|
@ -23,7 +23,7 @@ struct RestrictPtrTraits {
|
|||
|
||||
// TensorAccessorBase and TensorAccessor are used for both CPU and CUDA tensors.
|
||||
// For CUDA tensors it is used in device code (only). This means that we restrict ourselves
|
||||
// to functions and types available there (e.g. IntList isn't).
|
||||
// to functions and types available there (e.g. IntArrayRef isn't).
|
||||
|
||||
// The PtrTraits argument is only relevant to cuda to support `__restrict__` pointers.
|
||||
template<typename T, size_t N, template <typename U> class PtrTraits = DefaultPtrTraits, typename index_t = int64_t>
|
||||
|
|
@ -36,11 +36,11 @@ public:
|
|||
const index_t* sizes_,
|
||||
const index_t* strides_)
|
||||
: data_(data_), sizes_(sizes_), strides_(strides_) {}
|
||||
C10_HOST IntList sizes() const {
|
||||
return IntList(sizes_,N);
|
||||
C10_HOST IntArrayRef sizes() const {
|
||||
return IntArrayRef(sizes_,N);
|
||||
}
|
||||
C10_HOST IntList strides() const {
|
||||
return IntList(strides_,N);
|
||||
C10_HOST IntArrayRef strides() const {
|
||||
return IntArrayRef(strides_,N);
|
||||
}
|
||||
C10_HOST_DEVICE index_t stride(index_t i) const {
|
||||
return strides_[i];
|
||||
|
|
|
|||
|
|
@ -115,10 +115,10 @@ inline Tensor Tensor::argmin(int64_t dim, bool keepdim) const {
|
|||
inline Tensor Tensor::argmin() const {
|
||||
return type().argmin(*this);
|
||||
}
|
||||
inline Tensor Tensor::as_strided(IntList size, IntList stride, c10::optional<int64_t> storage_offset) const {
|
||||
inline Tensor Tensor::as_strided(IntArrayRef size, IntArrayRef stride, c10::optional<int64_t> storage_offset) const {
|
||||
return type().as_strided(*this, size, stride, storage_offset);
|
||||
}
|
||||
inline Tensor & Tensor::as_strided_(IntList size, IntList stride, c10::optional<int64_t> storage_offset) {
|
||||
inline Tensor & Tensor::as_strided_(IntArrayRef size, IntArrayRef stride, c10::optional<int64_t> storage_offset) {
|
||||
return type().as_strided_(*this, size, stride, storage_offset);
|
||||
}
|
||||
inline Tensor Tensor::asin() const {
|
||||
|
|
@ -238,7 +238,7 @@ inline Tensor & Tensor::div_(Scalar other) {
|
|||
inline Tensor Tensor::dot(const Tensor & tensor) const {
|
||||
return type().dot(*this, tensor);
|
||||
}
|
||||
inline Tensor & Tensor::resize_(IntList size) {
|
||||
inline Tensor & Tensor::resize_(IntArrayRef size) {
|
||||
return type().resize_(*this, size);
|
||||
}
|
||||
inline Tensor Tensor::erf() const {
|
||||
|
|
@ -265,7 +265,7 @@ inline Tensor Tensor::expm1() const {
|
|||
inline Tensor & Tensor::expm1_() {
|
||||
return type().expm1_(*this);
|
||||
}
|
||||
inline Tensor Tensor::expand(IntList size, bool implicit) const {
|
||||
inline Tensor Tensor::expand(IntArrayRef size, bool implicit) const {
|
||||
return type().expand(*this, size, implicit);
|
||||
}
|
||||
inline Tensor Tensor::expand_as(const Tensor & other) const {
|
||||
|
|
@ -301,7 +301,7 @@ inline Tensor Tensor::ifft(int64_t signal_ndim, bool normalized) const {
|
|||
inline Tensor Tensor::rfft(int64_t signal_ndim, bool normalized, bool onesided) const {
|
||||
return type().rfft(*this, signal_ndim, normalized, onesided);
|
||||
}
|
||||
inline Tensor Tensor::irfft(int64_t signal_ndim, bool normalized, bool onesided, IntList signal_sizes) const {
|
||||
inline Tensor Tensor::irfft(int64_t signal_ndim, bool normalized, bool onesided, IntArrayRef signal_sizes) const {
|
||||
return type().irfft(*this, signal_ndim, normalized, onesided, signal_sizes);
|
||||
}
|
||||
inline Tensor Tensor::index(TensorList indices) const {
|
||||
|
|
@ -376,7 +376,7 @@ inline Tensor Tensor::log_softmax(int64_t dim, ScalarType dtype) const {
|
|||
inline Tensor Tensor::log_softmax(int64_t dim) const {
|
||||
return type().log_softmax(*this, dim);
|
||||
}
|
||||
inline Tensor Tensor::logsumexp(IntList dim, bool keepdim) const {
|
||||
inline Tensor Tensor::logsumexp(IntArrayRef dim, bool keepdim) const {
|
||||
return type().logsumexp(*this, dim, keepdim);
|
||||
}
|
||||
inline Tensor Tensor::matmul(const Tensor & other) const {
|
||||
|
|
@ -388,7 +388,7 @@ inline Tensor Tensor::matrix_power(int64_t n) const {
|
|||
inline std::tuple<Tensor,Tensor> Tensor::max(int64_t dim, bool keepdim) const {
|
||||
return type().max(*this, dim, keepdim);
|
||||
}
|
||||
inline Tensor Tensor::max_values(IntList dim, bool keepdim) const {
|
||||
inline Tensor Tensor::max_values(IntArrayRef dim, bool keepdim) const {
|
||||
return type().max_values(*this, dim, keepdim);
|
||||
}
|
||||
inline Tensor Tensor::mean(ScalarType dtype) const {
|
||||
|
|
@ -397,13 +397,13 @@ inline Tensor Tensor::mean(ScalarType dtype) const {
|
|||
inline Tensor Tensor::mean() const {
|
||||
return type().mean(*this);
|
||||
}
|
||||
inline Tensor Tensor::mean(IntList dim, bool keepdim, ScalarType dtype) const {
|
||||
inline Tensor Tensor::mean(IntArrayRef dim, bool keepdim, ScalarType dtype) const {
|
||||
return type().mean(*this, dim, keepdim, dtype);
|
||||
}
|
||||
inline Tensor Tensor::mean(IntList dim, bool keepdim) const {
|
||||
inline Tensor Tensor::mean(IntArrayRef dim, bool keepdim) const {
|
||||
return type().mean(*this, dim, keepdim);
|
||||
}
|
||||
inline Tensor Tensor::mean(IntList dim, ScalarType dtype) const {
|
||||
inline Tensor Tensor::mean(IntArrayRef dim, ScalarType dtype) const {
|
||||
return type().mean(*this, dim, dtype);
|
||||
}
|
||||
inline std::tuple<Tensor,Tensor> Tensor::median(int64_t dim, bool keepdim) const {
|
||||
|
|
@ -412,7 +412,7 @@ inline std::tuple<Tensor,Tensor> Tensor::median(int64_t dim, bool keepdim) const
|
|||
inline std::tuple<Tensor,Tensor> Tensor::min(int64_t dim, bool keepdim) const {
|
||||
return type().min(*this, dim, keepdim);
|
||||
}
|
||||
inline Tensor Tensor::min_values(IntList dim, bool keepdim) const {
|
||||
inline Tensor Tensor::min_values(IntArrayRef dim, bool keepdim) const {
|
||||
return type().min_values(*this, dim, keepdim);
|
||||
}
|
||||
inline Tensor Tensor::mm(const Tensor & mat2) const {
|
||||
|
|
@ -448,7 +448,7 @@ inline Tensor Tensor::narrow_copy(int64_t dim, int64_t start, int64_t length) co
|
|||
inline Tensor Tensor::narrow(int64_t dim, int64_t start, int64_t length) const {
|
||||
return type().narrow(*this, dim, start, length);
|
||||
}
|
||||
inline Tensor Tensor::permute(IntList dims) const {
|
||||
inline Tensor Tensor::permute(IntArrayRef dims) const {
|
||||
return type().permute(*this, dims);
|
||||
}
|
||||
inline Tensor Tensor::pin_memory() const {
|
||||
|
|
@ -457,10 +457,10 @@ inline Tensor Tensor::pin_memory() const {
|
|||
inline Tensor Tensor::pinverse(double rcond) const {
|
||||
return type().pinverse(*this, rcond);
|
||||
}
|
||||
inline Tensor Tensor::repeat(IntList repeats) const {
|
||||
inline Tensor Tensor::repeat(IntArrayRef repeats) const {
|
||||
return type().repeat(*this, repeats);
|
||||
}
|
||||
inline Tensor Tensor::reshape(IntList shape) const {
|
||||
inline Tensor Tensor::reshape(IntArrayRef shape) const {
|
||||
return type().reshape(*this, shape);
|
||||
}
|
||||
inline Tensor Tensor::reshape_as(const Tensor & other) const {
|
||||
|
|
@ -544,7 +544,7 @@ inline Tensor Tensor::softmax(int64_t dim) const {
|
|||
inline std::vector<Tensor> Tensor::split(int64_t split_size, int64_t dim) const {
|
||||
return type().split(*this, split_size, dim);
|
||||
}
|
||||
inline std::vector<Tensor> Tensor::split_with_sizes(IntList split_sizes, int64_t dim) const {
|
||||
inline std::vector<Tensor> Tensor::split_with_sizes(IntArrayRef split_sizes, int64_t dim) const {
|
||||
return type().split_with_sizes(*this, split_sizes, dim);
|
||||
}
|
||||
inline Tensor Tensor::squeeze() const {
|
||||
|
|
@ -574,16 +574,16 @@ inline Tensor Tensor::sum(ScalarType dtype) const {
|
|||
inline Tensor Tensor::sum() const {
|
||||
return type().sum(*this);
|
||||
}
|
||||
inline Tensor Tensor::sum(IntList dim, bool keepdim, ScalarType dtype) const {
|
||||
inline Tensor Tensor::sum(IntArrayRef dim, bool keepdim, ScalarType dtype) const {
|
||||
return type().sum(*this, dim, keepdim, dtype);
|
||||
}
|
||||
inline Tensor Tensor::sum(IntList dim, bool keepdim) const {
|
||||
inline Tensor Tensor::sum(IntArrayRef dim, bool keepdim) const {
|
||||
return type().sum(*this, dim, keepdim);
|
||||
}
|
||||
inline Tensor Tensor::sum(IntList dim, ScalarType dtype) const {
|
||||
inline Tensor Tensor::sum(IntArrayRef dim, ScalarType dtype) const {
|
||||
return type().sum(*this, dim, dtype);
|
||||
}
|
||||
inline Tensor Tensor::sum_to_size(IntList size) const {
|
||||
inline Tensor Tensor::sum_to_size(IntArrayRef size) const {
|
||||
return type().sum_to_size(*this, size);
|
||||
}
|
||||
inline Tensor Tensor::sqrt() const {
|
||||
|
|
@ -595,7 +595,7 @@ inline Tensor & Tensor::sqrt_() {
|
|||
inline Tensor Tensor::std(bool unbiased) const {
|
||||
return type().std(*this, unbiased);
|
||||
}
|
||||
inline Tensor Tensor::std(IntList dim, bool unbiased, bool keepdim) const {
|
||||
inline Tensor Tensor::std(IntArrayRef dim, bool unbiased, bool keepdim) const {
|
||||
return type().std(*this, dim, unbiased, keepdim);
|
||||
}
|
||||
inline Tensor Tensor::prod(ScalarType dtype) const {
|
||||
|
|
@ -637,13 +637,13 @@ inline Tensor Tensor::transpose(int64_t dim0, int64_t dim1) const {
|
|||
inline Tensor & Tensor::transpose_(int64_t dim0, int64_t dim1) {
|
||||
return type().transpose_(*this, dim0, dim1);
|
||||
}
|
||||
inline Tensor Tensor::flip(IntList dims) const {
|
||||
inline Tensor Tensor::flip(IntArrayRef dims) const {
|
||||
return type().flip(*this, dims);
|
||||
}
|
||||
inline Tensor Tensor::roll(IntList shifts, IntList dims) const {
|
||||
inline Tensor Tensor::roll(IntArrayRef shifts, IntArrayRef dims) const {
|
||||
return type().roll(*this, shifts, dims);
|
||||
}
|
||||
inline Tensor Tensor::rot90(int64_t k, IntList dims) const {
|
||||
inline Tensor Tensor::rot90(int64_t k, IntArrayRef dims) const {
|
||||
return type().rot90(*this, k, dims);
|
||||
}
|
||||
inline Tensor Tensor::trunc() const {
|
||||
|
|
@ -664,7 +664,7 @@ inline Tensor & Tensor::unsqueeze_(int64_t dim) {
|
|||
inline Tensor Tensor::var(bool unbiased) const {
|
||||
return type().var(*this, unbiased);
|
||||
}
|
||||
inline Tensor Tensor::var(IntList dim, bool unbiased, bool keepdim) const {
|
||||
inline Tensor Tensor::var(IntArrayRef dim, bool unbiased, bool keepdim) const {
|
||||
return type().var(*this, dim, unbiased, keepdim);
|
||||
}
|
||||
inline Tensor Tensor::view_as(const Tensor & other) const {
|
||||
|
|
@ -679,10 +679,10 @@ inline Tensor Tensor::norm(c10::optional<Scalar> p, ScalarType dtype) const {
|
|||
inline Tensor Tensor::norm(Scalar p) const {
|
||||
return type().norm(*this, p);
|
||||
}
|
||||
inline Tensor Tensor::norm(c10::optional<Scalar> p, IntList dim, bool keepdim, ScalarType dtype) const {
|
||||
inline Tensor Tensor::norm(c10::optional<Scalar> p, IntArrayRef dim, bool keepdim, ScalarType dtype) const {
|
||||
return type().norm(*this, p, dim, keepdim, dtype);
|
||||
}
|
||||
inline Tensor Tensor::norm(c10::optional<Scalar> p, IntList dim, bool keepdim) const {
|
||||
inline Tensor Tensor::norm(c10::optional<Scalar> p, IntArrayRef dim, bool keepdim) const {
|
||||
return type().norm(*this, p, dim, keepdim);
|
||||
}
|
||||
inline Tensor Tensor::clone() const {
|
||||
|
|
@ -715,10 +715,10 @@ inline Tensor Tensor::addmm(const Tensor & mat1, const Tensor & mat2, Scalar bet
|
|||
inline Tensor & Tensor::addmm_(const Tensor & mat1, const Tensor & mat2, Scalar beta, Scalar alpha) {
|
||||
return type().addmm_(*this, mat1, mat2, beta, alpha);
|
||||
}
|
||||
inline Tensor & Tensor::sparse_resize_(IntList size, int64_t sparse_dim, int64_t dense_dim) {
|
||||
inline Tensor & Tensor::sparse_resize_(IntArrayRef size, int64_t sparse_dim, int64_t dense_dim) {
|
||||
return type().sparse_resize_(*this, size, sparse_dim, dense_dim);
|
||||
}
|
||||
inline Tensor & Tensor::sparse_resize_and_clear_(IntList size, int64_t sparse_dim, int64_t dense_dim) {
|
||||
inline Tensor & Tensor::sparse_resize_and_clear_(IntArrayRef size, int64_t sparse_dim, int64_t dense_dim) {
|
||||
return type().sparse_resize_and_clear_(*this, size, sparse_dim, dense_dim);
|
||||
}
|
||||
inline Tensor Tensor::sparse_mask(SparseTensorRef mask) const {
|
||||
|
|
@ -796,7 +796,7 @@ inline void* Tensor::data_ptr() const {
|
|||
inline Tensor & Tensor::set_(Storage source) {
|
||||
return type().set_(*this, source);
|
||||
}
|
||||
inline Tensor & Tensor::set_(Storage source, int64_t storage_offset, IntList size, IntList stride) {
|
||||
inline Tensor & Tensor::set_(Storage source, int64_t storage_offset, IntArrayRef size, IntArrayRef stride) {
|
||||
return type().set_(*this, source, storage_offset, size, stride);
|
||||
}
|
||||
inline Tensor & Tensor::set_(const Tensor & source) {
|
||||
|
|
@ -817,7 +817,7 @@ inline Tensor & Tensor::masked_fill_(const Tensor & mask, const Tensor & value)
|
|||
inline Tensor & Tensor::masked_scatter_(const Tensor & mask, const Tensor & source) {
|
||||
return type().masked_scatter_(*this, mask, source);
|
||||
}
|
||||
inline Tensor Tensor::view(IntList size) const {
|
||||
inline Tensor Tensor::view(IntArrayRef size) const {
|
||||
return type().view(*this, size);
|
||||
}
|
||||
inline Tensor & Tensor::put_(const Tensor & index, const Tensor & source, bool accumulate) {
|
||||
|
|
|
|||
|
|
@ -150,10 +150,10 @@ struct CAFFE2_API Type {
|
|||
bool create_graph) const = 0;
|
||||
virtual void set_data(Tensor & self, Tensor new_data) const = 0;
|
||||
|
||||
virtual Tensor tensorFromBlob(void * data, IntList sizes, const std::function<void(void*)> & deleter=noop_deleter) const = 0;
|
||||
virtual Tensor tensorFromBlob(void * data, IntList sizes, IntList strides, const std::function<void(void*)> & deleter=noop_deleter) const = 0;
|
||||
virtual Tensor tensorWithAllocator(IntList sizes, Allocator* allocator) const = 0;
|
||||
virtual Tensor tensorWithAllocator(IntList sizes, IntList strides, Allocator* allocator) const = 0;
|
||||
virtual Tensor tensorFromBlob(void * data, IntArrayRef sizes, const std::function<void(void*)> & deleter=noop_deleter) const = 0;
|
||||
virtual Tensor tensorFromBlob(void * data, IntArrayRef sizes, IntArrayRef strides, const std::function<void(void*)> & deleter=noop_deleter) const = 0;
|
||||
virtual Tensor tensorWithAllocator(IntArrayRef sizes, Allocator* allocator) const = 0;
|
||||
virtual Tensor tensorWithAllocator(IntArrayRef sizes, IntArrayRef strides, Allocator* allocator) const = 0;
|
||||
|
||||
bool operator==(const Type& other) const {
|
||||
return this == &other;
|
||||
|
|
@ -207,8 +207,8 @@ struct CAFFE2_API Type {
|
|||
virtual Tensor argmax(const Tensor & self) const = 0;
|
||||
virtual Tensor argmin(const Tensor & self, int64_t dim, bool keepdim) const = 0;
|
||||
virtual Tensor argmin(const Tensor & self) const = 0;
|
||||
virtual Tensor as_strided(const Tensor & self, IntList size, IntList stride, c10::optional<int64_t> storage_offset) const = 0;
|
||||
virtual Tensor & as_strided_(Tensor & self, IntList size, IntList stride, c10::optional<int64_t> storage_offset) const = 0;
|
||||
virtual Tensor as_strided(const Tensor & self, IntArrayRef size, IntArrayRef stride, c10::optional<int64_t> storage_offset) const = 0;
|
||||
virtual Tensor & as_strided_(Tensor & self, IntArrayRef size, IntArrayRef stride, c10::optional<int64_t> storage_offset) const = 0;
|
||||
virtual Tensor asin(const Tensor & self) const = 0;
|
||||
virtual Tensor & asin_(Tensor & self) const = 0;
|
||||
virtual Tensor atan(const Tensor & self) const = 0;
|
||||
|
|
@ -248,7 +248,7 @@ struct CAFFE2_API Type {
|
|||
virtual Tensor div(const Tensor & self, Scalar other) const = 0;
|
||||
virtual Tensor & div_(Tensor & self, Scalar other) const = 0;
|
||||
virtual Tensor dot(const Tensor & self, const Tensor & tensor) const = 0;
|
||||
virtual Tensor & resize_(Tensor & self, IntList size) const = 0;
|
||||
virtual Tensor & resize_(Tensor & self, IntArrayRef size) const = 0;
|
||||
virtual Tensor erf(const Tensor & self) const = 0;
|
||||
virtual Tensor & erf_(Tensor & self) const = 0;
|
||||
virtual Tensor erfc(const Tensor & self) const = 0;
|
||||
|
|
@ -257,7 +257,7 @@ struct CAFFE2_API Type {
|
|||
virtual Tensor & exp_(Tensor & self) const = 0;
|
||||
virtual Tensor expm1(const Tensor & self) const = 0;
|
||||
virtual Tensor & expm1_(Tensor & self) const = 0;
|
||||
virtual Tensor expand(const Tensor & self, IntList size, bool implicit) const = 0;
|
||||
virtual Tensor expand(const Tensor & self, IntArrayRef size, bool implicit) const = 0;
|
||||
virtual Tensor expand_as(const Tensor & self, const Tensor & other) const = 0;
|
||||
virtual Tensor flatten(const Tensor & self, int64_t start_dim, int64_t end_dim) const = 0;
|
||||
virtual Tensor & fill_(Tensor & self, Scalar value) const = 0;
|
||||
|
|
@ -269,7 +269,7 @@ struct CAFFE2_API Type {
|
|||
virtual Tensor fft(const Tensor & self, int64_t signal_ndim, bool normalized) const = 0;
|
||||
virtual Tensor ifft(const Tensor & self, int64_t signal_ndim, bool normalized) const = 0;
|
||||
virtual Tensor rfft(const Tensor & self, int64_t signal_ndim, bool normalized, bool onesided) const = 0;
|
||||
virtual Tensor irfft(const Tensor & self, int64_t signal_ndim, bool normalized, bool onesided, IntList signal_sizes) const = 0;
|
||||
virtual Tensor irfft(const Tensor & self, int64_t signal_ndim, bool normalized, bool onesided, IntArrayRef signal_sizes) const = 0;
|
||||
virtual Tensor index(const Tensor & self, TensorList indices) const = 0;
|
||||
virtual Tensor & index_copy_(Tensor & self, int64_t dim, const Tensor & index, const Tensor & source) const = 0;
|
||||
virtual Tensor index_put(const Tensor & self, TensorList indices, const Tensor & values, bool accumulate) const = 0;
|
||||
|
|
@ -294,19 +294,19 @@ struct CAFFE2_API Type {
|
|||
virtual Tensor logdet(const Tensor & self) const = 0;
|
||||
virtual Tensor log_softmax(const Tensor & self, int64_t dim, ScalarType dtype) const = 0;
|
||||
virtual Tensor log_softmax(const Tensor & self, int64_t dim) const = 0;
|
||||
virtual Tensor logsumexp(const Tensor & self, IntList dim, bool keepdim) const = 0;
|
||||
virtual Tensor logsumexp(const Tensor & self, IntArrayRef dim, bool keepdim) const = 0;
|
||||
virtual Tensor matmul(const Tensor & self, const Tensor & other) const = 0;
|
||||
virtual Tensor matrix_power(const Tensor & self, int64_t n) const = 0;
|
||||
virtual std::tuple<Tensor,Tensor> max(const Tensor & self, int64_t dim, bool keepdim) const = 0;
|
||||
virtual Tensor max_values(const Tensor & self, IntList dim, bool keepdim) const = 0;
|
||||
virtual Tensor max_values(const Tensor & self, IntArrayRef dim, bool keepdim) const = 0;
|
||||
virtual Tensor mean(const Tensor & self, ScalarType dtype) const = 0;
|
||||
virtual Tensor mean(const Tensor & self) const = 0;
|
||||
virtual Tensor mean(const Tensor & self, IntList dim, bool keepdim, ScalarType dtype) const = 0;
|
||||
virtual Tensor mean(const Tensor & self, IntList dim, bool keepdim) const = 0;
|
||||
virtual Tensor mean(const Tensor & self, IntList dim, ScalarType dtype) const = 0;
|
||||
virtual Tensor mean(const Tensor & self, IntArrayRef dim, bool keepdim, ScalarType dtype) const = 0;
|
||||
virtual Tensor mean(const Tensor & self, IntArrayRef dim, bool keepdim) const = 0;
|
||||
virtual Tensor mean(const Tensor & self, IntArrayRef dim, ScalarType dtype) const = 0;
|
||||
virtual std::tuple<Tensor,Tensor> median(const Tensor & self, int64_t dim, bool keepdim) const = 0;
|
||||
virtual std::tuple<Tensor,Tensor> min(const Tensor & self, int64_t dim, bool keepdim) const = 0;
|
||||
virtual Tensor min_values(const Tensor & self, IntList dim, bool keepdim) const = 0;
|
||||
virtual Tensor min_values(const Tensor & self, IntArrayRef dim, bool keepdim) const = 0;
|
||||
virtual Tensor mm(const Tensor & self, const Tensor & mat2) const = 0;
|
||||
virtual std::tuple<Tensor,Tensor> mode(const Tensor & self, int64_t dim, bool keepdim) const = 0;
|
||||
virtual Tensor mul(const Tensor & self, const Tensor & other) const = 0;
|
||||
|
|
@ -318,11 +318,11 @@ struct CAFFE2_API Type {
|
|||
virtual Tensor & mvlgamma_(Tensor & self, int64_t p) const = 0;
|
||||
virtual Tensor narrow_copy(const Tensor & self, int64_t dim, int64_t start, int64_t length) const = 0;
|
||||
virtual Tensor narrow(const Tensor & self, int64_t dim, int64_t start, int64_t length) const = 0;
|
||||
virtual Tensor permute(const Tensor & self, IntList dims) const = 0;
|
||||
virtual Tensor permute(const Tensor & self, IntArrayRef dims) const = 0;
|
||||
virtual Tensor pin_memory(const Tensor & self) const = 0;
|
||||
virtual Tensor pinverse(const Tensor & self, double rcond) const = 0;
|
||||
virtual Tensor repeat(const Tensor & self, IntList repeats) const = 0;
|
||||
virtual Tensor reshape(const Tensor & self, IntList shape) const = 0;
|
||||
virtual Tensor repeat(const Tensor & self, IntArrayRef repeats) const = 0;
|
||||
virtual Tensor reshape(const Tensor & self, IntArrayRef shape) const = 0;
|
||||
virtual Tensor reshape_as(const Tensor & self, const Tensor & other) const = 0;
|
||||
virtual Tensor round(const Tensor & self) const = 0;
|
||||
virtual Tensor & round_(Tensor & self) const = 0;
|
||||
|
|
@ -350,7 +350,7 @@ struct CAFFE2_API Type {
|
|||
virtual Tensor softmax(const Tensor & self, int64_t dim, ScalarType dtype) const = 0;
|
||||
virtual Tensor softmax(const Tensor & self, int64_t dim) const = 0;
|
||||
virtual std::vector<Tensor> split(const Tensor & self, int64_t split_size, int64_t dim) const = 0;
|
||||
virtual std::vector<Tensor> split_with_sizes(const Tensor & self, IntList split_sizes, int64_t dim) const = 0;
|
||||
virtual std::vector<Tensor> split_with_sizes(const Tensor & self, IntArrayRef split_sizes, int64_t dim) const = 0;
|
||||
virtual Tensor squeeze(const Tensor & self) const = 0;
|
||||
virtual Tensor squeeze(const Tensor & self, int64_t dim) const = 0;
|
||||
virtual Tensor & squeeze_(Tensor & self) const = 0;
|
||||
|
|
@ -360,14 +360,14 @@ struct CAFFE2_API Type {
|
|||
virtual int64_t stride(const Tensor & self, int64_t dim) const = 0;
|
||||
virtual Tensor sum(const Tensor & self, ScalarType dtype) const = 0;
|
||||
virtual Tensor sum(const Tensor & self) const = 0;
|
||||
virtual Tensor sum(const Tensor & self, IntList dim, bool keepdim, ScalarType dtype) const = 0;
|
||||
virtual Tensor sum(const Tensor & self, IntList dim, bool keepdim) const = 0;
|
||||
virtual Tensor sum(const Tensor & self, IntList dim, ScalarType dtype) const = 0;
|
||||
virtual Tensor sum_to_size(const Tensor & self, IntList size) const = 0;
|
||||
virtual Tensor sum(const Tensor & self, IntArrayRef dim, bool keepdim, ScalarType dtype) const = 0;
|
||||
virtual Tensor sum(const Tensor & self, IntArrayRef dim, bool keepdim) const = 0;
|
||||
virtual Tensor sum(const Tensor & self, IntArrayRef dim, ScalarType dtype) const = 0;
|
||||
virtual Tensor sum_to_size(const Tensor & self, IntArrayRef size) const = 0;
|
||||
virtual Tensor sqrt(const Tensor & self) const = 0;
|
||||
virtual Tensor & sqrt_(Tensor & self) const = 0;
|
||||
virtual Tensor std(const Tensor & self, bool unbiased) const = 0;
|
||||
virtual Tensor std(const Tensor & self, IntList dim, bool unbiased, bool keepdim) const = 0;
|
||||
virtual Tensor std(const Tensor & self, IntArrayRef dim, bool unbiased, bool keepdim) const = 0;
|
||||
virtual Tensor prod(const Tensor & self, ScalarType dtype) const = 0;
|
||||
virtual Tensor prod(const Tensor & self) const = 0;
|
||||
virtual Tensor prod(const Tensor & self, int64_t dim, bool keepdim, ScalarType dtype) const = 0;
|
||||
|
|
@ -381,22 +381,22 @@ struct CAFFE2_API Type {
|
|||
virtual Tensor & tanh_(Tensor & self) const = 0;
|
||||
virtual Tensor transpose(const Tensor & self, int64_t dim0, int64_t dim1) const = 0;
|
||||
virtual Tensor & transpose_(Tensor & self, int64_t dim0, int64_t dim1) const = 0;
|
||||
virtual Tensor flip(const Tensor & self, IntList dims) const = 0;
|
||||
virtual Tensor roll(const Tensor & self, IntList shifts, IntList dims) const = 0;
|
||||
virtual Tensor rot90(const Tensor & self, int64_t k, IntList dims) const = 0;
|
||||
virtual Tensor flip(const Tensor & self, IntArrayRef dims) const = 0;
|
||||
virtual Tensor roll(const Tensor & self, IntArrayRef shifts, IntArrayRef dims) const = 0;
|
||||
virtual Tensor rot90(const Tensor & self, int64_t k, IntArrayRef dims) const = 0;
|
||||
virtual Tensor trunc(const Tensor & self) const = 0;
|
||||
virtual Tensor & trunc_(Tensor & self) const = 0;
|
||||
virtual Tensor type_as(const Tensor & self, const Tensor & other) const = 0;
|
||||
virtual Tensor unsqueeze(const Tensor & self, int64_t dim) const = 0;
|
||||
virtual Tensor & unsqueeze_(Tensor & self, int64_t dim) const = 0;
|
||||
virtual Tensor var(const Tensor & self, bool unbiased) const = 0;
|
||||
virtual Tensor var(const Tensor & self, IntList dim, bool unbiased, bool keepdim) const = 0;
|
||||
virtual Tensor var(const Tensor & self, IntArrayRef dim, bool unbiased, bool keepdim) const = 0;
|
||||
virtual Tensor view_as(const Tensor & self, const Tensor & other) const = 0;
|
||||
virtual Tensor where(const Tensor & condition, const Tensor & self, const Tensor & other) const = 0;
|
||||
virtual Tensor norm(const Tensor & self, c10::optional<Scalar> p, ScalarType dtype) const = 0;
|
||||
virtual Tensor norm(const Tensor & self, Scalar p) const = 0;
|
||||
virtual Tensor norm(const Tensor & self, c10::optional<Scalar> p, IntList dim, bool keepdim, ScalarType dtype) const = 0;
|
||||
virtual Tensor norm(const Tensor & self, c10::optional<Scalar> p, IntList dim, bool keepdim) const = 0;
|
||||
virtual Tensor norm(const Tensor & self, c10::optional<Scalar> p, IntArrayRef dim, bool keepdim, ScalarType dtype) const = 0;
|
||||
virtual Tensor norm(const Tensor & self, c10::optional<Scalar> p, IntArrayRef dim, bool keepdim) const = 0;
|
||||
virtual Tensor clone(const Tensor & self) const = 0;
|
||||
virtual Tensor & resize_as_(Tensor & self, const Tensor & the_template) const = 0;
|
||||
virtual Tensor pow(const Tensor & self, Scalar exponent) const = 0;
|
||||
|
|
@ -407,8 +407,8 @@ struct CAFFE2_API Type {
|
|||
virtual Tensor & sub_(Tensor & self, Scalar other, Scalar alpha) const = 0;
|
||||
virtual Tensor addmm(const Tensor & self, const Tensor & mat1, const Tensor & mat2, Scalar beta, Scalar alpha) const = 0;
|
||||
virtual Tensor & addmm_(Tensor & self, const Tensor & mat1, const Tensor & mat2, Scalar beta, Scalar alpha) const = 0;
|
||||
virtual Tensor & sparse_resize_(Tensor & self, IntList size, int64_t sparse_dim, int64_t dense_dim) const = 0;
|
||||
virtual Tensor & sparse_resize_and_clear_(Tensor & self, IntList size, int64_t sparse_dim, int64_t dense_dim) const = 0;
|
||||
virtual Tensor & sparse_resize_(Tensor & self, IntArrayRef size, int64_t sparse_dim, int64_t dense_dim) const = 0;
|
||||
virtual Tensor & sparse_resize_and_clear_(Tensor & self, IntArrayRef size, int64_t sparse_dim, int64_t dense_dim) const = 0;
|
||||
virtual Tensor sparse_mask(const Tensor & self, SparseTensorRef mask) const = 0;
|
||||
virtual Tensor to_dense(const Tensor & self) const = 0;
|
||||
virtual int64_t sparse_dim(const Tensor & self) const = 0;
|
||||
|
|
@ -434,14 +434,14 @@ struct CAFFE2_API Type {
|
|||
virtual Scalar item(const Tensor & self) const = 0;
|
||||
virtual void* data_ptr(const Tensor & self) const = 0;
|
||||
virtual Tensor & set_(Tensor & self, Storage source) const = 0;
|
||||
virtual Tensor & set_(Tensor & self, Storage source, int64_t storage_offset, IntList size, IntList stride) const = 0;
|
||||
virtual Tensor & set_(Tensor & self, Storage source, int64_t storage_offset, IntArrayRef size, IntArrayRef stride) const = 0;
|
||||
virtual Tensor & set_(Tensor & self, const Tensor & source) const = 0;
|
||||
virtual Tensor & set_(Tensor & self) const = 0;
|
||||
virtual bool is_set_to(const Tensor & self, const Tensor & tensor) const = 0;
|
||||
virtual Tensor & masked_fill_(Tensor & self, const Tensor & mask, Scalar value) const = 0;
|
||||
virtual Tensor & masked_fill_(Tensor & self, const Tensor & mask, const Tensor & value) const = 0;
|
||||
virtual Tensor & masked_scatter_(Tensor & self, const Tensor & mask, const Tensor & source) const = 0;
|
||||
virtual Tensor view(const Tensor & self, IntList size) const = 0;
|
||||
virtual Tensor view(const Tensor & self, IntArrayRef size) const = 0;
|
||||
virtual Tensor & put_(Tensor & self, const Tensor & index, const Tensor & source, bool accumulate) const = 0;
|
||||
virtual Tensor & index_add_(Tensor & self, int64_t dim, const Tensor & index, const Tensor & source) const = 0;
|
||||
virtual Tensor & index_fill_(Tensor & self, int64_t dim, const Tensor & index, Scalar value) const = 0;
|
||||
|
|
|
|||
|
|
@ -384,21 +384,21 @@ struct CAFFE2_API CompleteTensorType : public TensorType {
|
|||
}
|
||||
|
||||
// overloaded create variadic template argument as it could not distinguish initializer list
|
||||
static CompleteTensorTypePtr create(at::ScalarType scalar_type, at::Device device, at::IntList sizes) {
|
||||
static CompleteTensorTypePtr create(at::ScalarType scalar_type, at::Device device, at::IntArrayRef sizes) {
|
||||
return CompleteTensorTypePtr(new CompleteTensorType(scalar_type, device, sizes)); // NOLINT(modernize-make-shared)
|
||||
}
|
||||
static CompleteTensorTypePtr create(at::ScalarType scalar_type, at::Device device, at::IntList sizes, at::IntList strides) {
|
||||
static CompleteTensorTypePtr create(at::ScalarType scalar_type, at::Device device, at::IntArrayRef sizes, at::IntArrayRef strides) {
|
||||
return CompleteTensorTypePtr(new CompleteTensorType(scalar_type, device, sizes, strides)); // NOLINT(modernize-make-shared)
|
||||
}
|
||||
|
||||
const std::vector<int64_t>& sizes() const { return sizes_; }
|
||||
const std::vector<int64_t>& strides() const { return strides_; }
|
||||
|
||||
TypePtr withSizesStrides(at::IntList sizes, at::IntList strides) const {
|
||||
TypePtr withSizesStrides(at::IntArrayRef sizes, at::IntArrayRef strides) const {
|
||||
return CompleteTensorType::create(scalar_type_, device_, sizes, strides);
|
||||
}
|
||||
|
||||
TypePtr withSizes(at::IntList sizes) const {
|
||||
TypePtr withSizes(at::IntArrayRef sizes) const {
|
||||
return withSizesStrides(sizes, CompleteTensorType::contiguousStridesOf(sizes));
|
||||
}
|
||||
|
||||
|
|
@ -457,14 +457,14 @@ private:
|
|||
: TensorType(tensor, TypeKind::CompleteTensorType)
|
||||
, sizes_(tensor.sizes().vec())
|
||||
, strides_(tensor.strides().vec()) {}
|
||||
CompleteTensorType(at::ScalarType scalar_type, at::Device device, at::IntList sizes, bool requires_grad=true)
|
||||
CompleteTensorType(at::ScalarType scalar_type, at::Device device, at::IntArrayRef sizes, bool requires_grad=true)
|
||||
: CompleteTensorType(scalar_type, device, sizes, CompleteTensorType::contiguousStridesOf(sizes), requires_grad) {}
|
||||
CompleteTensorType(at::ScalarType scalar_type, at::Device device, at::IntList sizes, at::IntList strides, bool requires_grad=true)
|
||||
CompleteTensorType(at::ScalarType scalar_type, at::Device device, at::IntArrayRef sizes, at::IntArrayRef strides, bool requires_grad=true)
|
||||
: TensorType(scalar_type, device, sizes.size(), requires_grad, TypeKind::CompleteTensorType)
|
||||
, sizes_(sizes.vec())
|
||||
, strides_(strides.vec()) {}
|
||||
|
||||
static std::vector<int64_t> contiguousStridesOf(at::IntList sizes) {
|
||||
static std::vector<int64_t> contiguousStridesOf(at::IntArrayRef sizes) {
|
||||
std::vector<int64_t> strides(sizes.size());
|
||||
if(sizes.empty()) // zero-dim case
|
||||
return strides;
|
||||
|
|
|
|||
|
|
@ -33,7 +33,7 @@ void TensorDescriptor::set(const at::Tensor &t, size_t pad) {
|
|||
set(getDataType(t), t.sizes(), t.strides(), pad);
|
||||
}
|
||||
|
||||
void TensorDescriptor::set(cudnnDataType_t datatype, IntList t_sizes, IntList t_strides, size_t pad) {
|
||||
void TensorDescriptor::set(cudnnDataType_t datatype, IntArrayRef t_sizes, IntArrayRef t_strides, size_t pad) {
|
||||
size_t dim = t_sizes.size();
|
||||
if (dim > CUDNN_DIM_MAX || pad > CUDNN_DIM_MAX)
|
||||
#define _STR(X) #X
|
||||
|
|
|
|||
|
|
@ -117,7 +117,7 @@ public:
|
|||
// broadcasting size 1 dimensions.
|
||||
|
||||
void set(const at::Tensor &t, size_t pad = 0);
|
||||
void set(cudnnDataType_t dataType, IntList sizes, IntList strides, size_t pad = 0);
|
||||
void set(cudnnDataType_t dataType, IntArrayRef sizes, IntArrayRef strides, size_t pad = 0);
|
||||
|
||||
void print();
|
||||
|
||||
|
|
|
|||
|
|
@ -208,7 +208,7 @@ TYPE_FORMAL_GENERIC = {
|
|||
'THDenseIndexTensor*': 'Tensor &',
|
||||
'THStorage*': 'Storage',
|
||||
'THGenerator*': 'Generator *',
|
||||
'IntListSize': 'IntList',
|
||||
'IntArrayRefSize': 'IntArrayRef',
|
||||
'accreal': 'Scalar',
|
||||
'real': 'Scalar',
|
||||
'long': 'int64_t',
|
||||
|
|
@ -224,7 +224,7 @@ DYNAMIC_TYPE = {
|
|||
'THDenseIndexTensor*': 'IndexTensor',
|
||||
'THStorage*': 'Storage',
|
||||
'THGenerator*': 'Generator*',
|
||||
'IntListSize': 'IntList',
|
||||
'IntArrayRefSize': 'IntArrayRef',
|
||||
'accreal': 'accreal',
|
||||
'real': 'real',
|
||||
'long': 'int64_t',
|
||||
|
|
@ -295,13 +295,13 @@ CHECKED_CAST = {
|
|||
CodeTemplate(
|
||||
'check_generator<${Backend}Generator>(${arg_name}, &globalContext().defaultGenerator(device_type()))'),
|
||||
# This is a cast done via direct-construction
|
||||
'IntListStride': CodeTemplate('at::IntList ${result_name} = get_intlist_stride_th(${arg_name});'),
|
||||
'IntArrayRefStride': CodeTemplate('at::IntArrayRef ${result_name} = get_intlist_stride_th(${arg_name});'),
|
||||
'real': CodeTemplate('${arg_name}.to${ScalarName}()'),
|
||||
'accreal': CodeTemplate('${arg_name}.to${AccScalarName}()'),
|
||||
'TensorList': CodeTemplate(
|
||||
'checked_tensor_list_unwrap(${arg_name},"${arg_name}",${arg_pos}, '
|
||||
'Backend::${Backend}, ScalarType::${ScalarName})'),
|
||||
'IntList': CodeTemplate('check_intlist<${size}>(${arg_name}, "${arg_name}", ${arg_pos}${,default_init})')
|
||||
'IntArrayRef': CodeTemplate('check_intlist<${size}>(${arg_name}, "${arg_name}", ${arg_pos}${,default_init})')
|
||||
}
|
||||
|
||||
CHECKED_USE = {
|
||||
|
|
@ -1211,7 +1211,7 @@ def create_derived(backend_type_env, declarations):
|
|||
|
||||
def requires_checked_cast(argument):
|
||||
# type: (THFormal) -> bool
|
||||
if argument['type'] == 'IntList':
|
||||
if argument['type'] == 'IntArrayRef':
|
||||
return 'size' in argument
|
||||
return argument['type'] in CHECKED_CAST
|
||||
|
||||
|
|
@ -1388,7 +1388,7 @@ def create_derived(backend_type_env, declarations):
|
|||
output_count = 0
|
||||
|
||||
# scalar_check is the heuristic conditions when a result may be a scalar_check
|
||||
# if there is a IntListSize argument, then its dimensions are used to determine scalar.
|
||||
# if there is a IntArrayRefSize argument, then its dimensions are used to determine scalar.
|
||||
# otherwise, it is true if all the input tensors are scalars,
|
||||
scalar_check_is_from_size = False
|
||||
scalar_check_is_from_option = False
|
||||
|
|
@ -1404,7 +1404,7 @@ def create_derived(backend_type_env, declarations):
|
|||
for arg in option['arguments']:
|
||||
if is_real_argument_to_wrapper(arg):
|
||||
count += 1
|
||||
if arg['type'] == 'IntListSize' and not scalar_check_is_from_option:
|
||||
if arg['type'] == 'IntArrayRefSize' and not scalar_check_is_from_option:
|
||||
scalar_check_is_from_size = True
|
||||
scalar_check = '{}.size() == 0'.format(arg['name'])
|
||||
if arg['type'] == 'TensorList':
|
||||
|
|
|
|||
|
|
@ -28,7 +28,7 @@ void TensorDescriptor::set(const at::Tensor &t, size_t pad) {
|
|||
|
||||
static int MIOPEN_DIM_MAX = 4;
|
||||
|
||||
void TensorDescriptor::set(miopenDataType_t datatype, IntList t_sizes, IntList t_strides, size_t pad) {
|
||||
void TensorDescriptor::set(miopenDataType_t datatype, IntArrayRef t_sizes, IntArrayRef t_strides, size_t pad) {
|
||||
size_t dim = t_sizes.size();
|
||||
if (dim > MIOPEN_DIM_MAX || pad > MIOPEN_DIM_MAX)
|
||||
#define _STR(X) #X
|
||||
|
|
|
|||
|
|
@ -89,7 +89,7 @@ public:
|
|||
}
|
||||
|
||||
void set(const at::Tensor &t, size_t pad = 0);
|
||||
void set(miopenDataType_t dataType, IntList sizes, IntList strides, size_t pad = 0);
|
||||
void set(miopenDataType_t dataType, IntArrayRef sizes, IntArrayRef strides, size_t pad = 0);
|
||||
|
||||
void print();
|
||||
|
||||
|
|
|
|||
|
|
@ -73,7 +73,7 @@ namespace {
|
|||
void adaptive_avg_pool2d_out_cpu_template(
|
||||
at::Tensor& output,
|
||||
at::Tensor const& input,
|
||||
IntList output_size)
|
||||
IntArrayRef output_size)
|
||||
{
|
||||
int dimD = 0;
|
||||
int dimH = 1;
|
||||
|
|
@ -285,7 +285,7 @@ namespace {
|
|||
Tensor& adaptive_avg_pool2d_out_cpu(
|
||||
Tensor& output,
|
||||
const Tensor& input,
|
||||
IntList output_size)
|
||||
IntArrayRef output_size)
|
||||
{
|
||||
adaptive_avg_pool2d_out_cpu_template(
|
||||
output, input, output_size);
|
||||
|
|
@ -294,7 +294,7 @@ namespace {
|
|||
|
||||
Tensor adaptive_avg_pool2d_cpu(
|
||||
at::Tensor const& input,
|
||||
IntList output_size)
|
||||
IntArrayRef output_size)
|
||||
{
|
||||
auto output = at::empty({0}, input.options());
|
||||
adaptive_avg_pool2d_out_cpu_template(
|
||||
|
|
|
|||
|
|
@ -66,7 +66,7 @@ Tensor affine_grid_generator_5D(
|
|||
return grid.view({N, D, H, W, 3});
|
||||
}
|
||||
|
||||
Tensor affine_grid_generator(const Tensor& theta, IntList size) {
|
||||
Tensor affine_grid_generator(const Tensor& theta, IntArrayRef size) {
|
||||
AT_CHECK(
|
||||
size.size() == 4 || size.size() == 5,
|
||||
"AffineGridGenerator needs 4d (spatial) or 5d (volumetric) inputs.");
|
||||
|
|
@ -85,7 +85,7 @@ Tensor affine_grid_generator_4D_backward(
|
|||
int64_t H,
|
||||
int64_t W) {
|
||||
auto base_grid = make_base_grid_4D(grad_grid, N, C, H, W);
|
||||
AT_ASSERT(grad_grid.sizes() == IntList({N, H, W, 2}));
|
||||
AT_ASSERT(grad_grid.sizes() == IntArrayRef({N, H, W, 2}));
|
||||
auto grad_theta = base_grid.view({N, H * W, 3})
|
||||
.transpose(1, 2)
|
||||
.bmm(grad_grid.view({N, H * W, 2}));
|
||||
|
|
@ -100,14 +100,14 @@ Tensor affine_grid_generator_5D_backward(
|
|||
int64_t H,
|
||||
int64_t W) {
|
||||
auto base_grid = make_base_grid_5D(grad_grid, N, C, D, H, W);
|
||||
AT_ASSERT(grad_grid.sizes() == IntList({N, D, H, W, 3}));
|
||||
AT_ASSERT(grad_grid.sizes() == IntArrayRef({N, D, H, W, 3}));
|
||||
auto grad_theta = base_grid.view({N, D * H * W, 4})
|
||||
.transpose(1, 2)
|
||||
.bmm(grad_grid.view({N, D * H * W, 3}));
|
||||
return grad_theta.transpose(1, 2);
|
||||
}
|
||||
|
||||
Tensor affine_grid_generator_backward(const Tensor& grad, IntList size) {
|
||||
Tensor affine_grid_generator_backward(const Tensor& grad, IntArrayRef size) {
|
||||
AT_CHECK(
|
||||
size.size() == 4 || size.size() == 5,
|
||||
"AffineGridGenerator needs 4d (spatial) or 5d (volumetric) inputs.");
|
||||
|
|
|
|||
|
|
@ -2,7 +2,7 @@
|
|||
|
||||
namespace at { namespace native {
|
||||
|
||||
Tensor constant_pad_nd(const Tensor& self, IntList pad, Scalar value) {
|
||||
Tensor constant_pad_nd(const Tensor& self, IntArrayRef pad, Scalar value) {
|
||||
AT_CHECK(pad.size() % 2 == 0, "Length of pad must be even but instead it equals ",
|
||||
pad.size());
|
||||
|
||||
|
|
|
|||
|
|
@ -38,11 +38,11 @@ struct ConvParams {
|
|||
|
||||
std::ostream& operator<<(std::ostream & out, const ConvParams& params) {
|
||||
out << "ConvParams {"
|
||||
<< " stride = " << IntList{params.stride}
|
||||
<< " padding = " << IntList{params.padding}
|
||||
<< " dilation = " << IntList{params.dilation}
|
||||
<< " stride = " << IntArrayRef{params.stride}
|
||||
<< " padding = " << IntArrayRef{params.padding}
|
||||
<< " dilation = " << IntArrayRef{params.dilation}
|
||||
<< " transposed = " << params.transposed
|
||||
<< " output_padding = " << IntList{params.output_padding}
|
||||
<< " output_padding = " << IntArrayRef{params.output_padding}
|
||||
<< " groups = " << params.groups
|
||||
<< " benchmark = " << params.benchmark
|
||||
<< " deterministic = " << params.deterministic
|
||||
|
|
@ -245,50 +245,50 @@ static at::Tensor subtensor(at::Tensor& tensor, int dim, int groups, int g) {
|
|||
|
||||
at::Tensor conv1d(
|
||||
const Tensor& input, const Tensor& weight, const Tensor& bias,
|
||||
IntList stride, IntList padding, IntList dilation, int64_t groups) {
|
||||
IntArrayRef stride, IntArrayRef padding, IntArrayRef dilation, int64_t groups) {
|
||||
return at::convolution(input, weight, bias, stride, padding, dilation,
|
||||
false, {0}, groups);
|
||||
}
|
||||
|
||||
at::Tensor conv2d(
|
||||
const Tensor& input, const Tensor& weight, const Tensor& bias,
|
||||
IntList stride, IntList padding, IntList dilation, int64_t groups) {
|
||||
IntArrayRef stride, IntArrayRef padding, IntArrayRef dilation, int64_t groups) {
|
||||
return at::convolution(input, weight, bias, stride, padding, dilation,
|
||||
false, {{0, 0}}, groups);
|
||||
}
|
||||
|
||||
at::Tensor conv3d(
|
||||
const Tensor& input, const Tensor& weight, const Tensor& bias,
|
||||
IntList stride, IntList padding, IntList dilation, int64_t groups) {
|
||||
IntArrayRef stride, IntArrayRef padding, IntArrayRef dilation, int64_t groups) {
|
||||
return at::convolution(input, weight, bias, stride, padding, dilation,
|
||||
false, {{0, 0, 0}}, groups);
|
||||
}
|
||||
|
||||
at::Tensor conv_transpose1d(
|
||||
const Tensor& input, const Tensor& weight, const Tensor& bias,
|
||||
IntList stride, IntList padding, IntList output_padding, int64_t groups, IntList dilation) {
|
||||
IntArrayRef stride, IntArrayRef padding, IntArrayRef output_padding, int64_t groups, IntArrayRef dilation) {
|
||||
return at::convolution(input, weight, bias, stride, padding, dilation,
|
||||
true, output_padding, groups);
|
||||
}
|
||||
|
||||
at::Tensor conv_transpose2d(
|
||||
const Tensor& input, const Tensor& weight, const Tensor& bias,
|
||||
IntList stride, IntList padding, IntList output_padding, int64_t groups, IntList dilation) {
|
||||
IntArrayRef stride, IntArrayRef padding, IntArrayRef output_padding, int64_t groups, IntArrayRef dilation) {
|
||||
return at::convolution(input, weight, bias, stride, padding, dilation,
|
||||
true, output_padding, groups);
|
||||
}
|
||||
|
||||
at::Tensor conv_transpose3d(
|
||||
const Tensor& input, const Tensor& weight, const Tensor& bias,
|
||||
IntList stride, IntList padding, IntList output_padding, int64_t groups, IntList dilation) {
|
||||
IntArrayRef stride, IntArrayRef padding, IntArrayRef output_padding, int64_t groups, IntArrayRef dilation) {
|
||||
return at::convolution(input, weight, bias, stride, padding, dilation,
|
||||
true, output_padding, groups);
|
||||
}
|
||||
|
||||
at::Tensor convolution(
|
||||
const Tensor& input, const Tensor& weight, const Tensor& bias,
|
||||
IntList stride, IntList padding, IntList dilation,
|
||||
bool transposed, IntList output_padding, int64_t groups) {
|
||||
IntArrayRef stride, IntArrayRef padding, IntArrayRef dilation,
|
||||
bool transposed, IntArrayRef output_padding, int64_t groups) {
|
||||
auto& ctx = at::globalContext();
|
||||
return at::_convolution(input, weight, bias, stride, padding, dilation,
|
||||
transposed, output_padding, groups,
|
||||
|
|
@ -296,7 +296,7 @@ at::Tensor convolution(
|
|||
}
|
||||
|
||||
static inline std::vector<int64_t> convolution_expand_param_if_needed(
|
||||
IntList list_param, const char *param_name, int64_t expected_dim) {
|
||||
IntArrayRef list_param, const char *param_name, int64_t expected_dim) {
|
||||
if (list_param.size() == 1) {
|
||||
return std::vector<int64_t>(expected_dim, list_param[0]);
|
||||
} else if ((int64_t) list_param.size() != expected_dim) {
|
||||
|
|
@ -312,8 +312,8 @@ static inline std::vector<int64_t> convolution_expand_param_if_needed(
|
|||
|
||||
at::Tensor _convolution(
|
||||
const Tensor& input_r, const Tensor& weight_r, const Tensor& bias_r,
|
||||
IntList stride_, IntList padding_, IntList dilation_,
|
||||
bool transposed_, IntList output_padding_, int64_t groups_,
|
||||
IntArrayRef stride_, IntArrayRef padding_, IntArrayRef dilation_,
|
||||
bool transposed_, IntArrayRef output_padding_, int64_t groups_,
|
||||
bool benchmark, bool deterministic, bool cudnn_enabled) {
|
||||
|
||||
auto input = input_r.contiguous();
|
||||
|
|
@ -430,8 +430,8 @@ at::Tensor _convolution(
|
|||
// natively implement groups (e.g., not CuDNN).
|
||||
at::Tensor _convolution_nogroup(
|
||||
const Tensor& input, const Tensor& weight, const Tensor& bias,
|
||||
IntList stride, IntList padding, IntList dilation,
|
||||
bool transposed, IntList output_padding) {
|
||||
IntArrayRef stride, IntArrayRef padding, IntArrayRef dilation,
|
||||
bool transposed, IntArrayRef output_padding) {
|
||||
|
||||
ConvParams params;
|
||||
params.stride = stride.vec();
|
||||
|
|
@ -503,8 +503,8 @@ static Tensor subvariable(const Tensor& var, int dim, int groups, int g) {
|
|||
std::tuple<Tensor,Tensor,Tensor> _convolution_double_backward(
|
||||
const Tensor& ggI, const Tensor& ggW_r, const Tensor& ggb,
|
||||
const Tensor& gO_r, const Tensor& weight_r, const Tensor& input,
|
||||
IntList stride_, IntList padding_, IntList dilation_,
|
||||
bool transposed_, IntList output_padding_, int64_t groups_,
|
||||
IntArrayRef stride_, IntArrayRef padding_, IntArrayRef dilation_,
|
||||
bool transposed_, IntArrayRef output_padding_, int64_t groups_,
|
||||
bool benchmark, bool deterministic, bool cudnn_enabled,
|
||||
std::array<bool, 3> output_mask) {
|
||||
|
||||
|
|
|
|||
|
|
@ -126,8 +126,8 @@ static void fractional_max_pool2d_out_frame(
|
|||
void fractional_max_pool2d_out_cpu_template(
|
||||
const at::Tensor& input_,
|
||||
at::Tensor& output,
|
||||
IntList output_size,
|
||||
IntList pool_size,
|
||||
IntArrayRef output_size,
|
||||
IntArrayRef pool_size,
|
||||
at::Tensor& indices,
|
||||
const at::Tensor& randomSamples) {
|
||||
|
||||
|
|
@ -256,8 +256,8 @@ Tensor& fractional_max_pool2d_backward_out_cpu_template(
|
|||
const at::Tensor& input,
|
||||
const at::Tensor& gradOutput_,
|
||||
at::Tensor& gradInput,
|
||||
IntList output_size,
|
||||
IntList pool_size /* unused */,
|
||||
IntArrayRef output_size,
|
||||
IntArrayRef pool_size /* unused */,
|
||||
const at::Tensor& indices) {
|
||||
|
||||
int numBatch = 1;
|
||||
|
|
@ -318,8 +318,8 @@ std::tuple<Tensor&, Tensor&> fractional_max_pool2d_out_cpu(
|
|||
at::Tensor& output,
|
||||
at::Tensor& indices,
|
||||
const at::Tensor& input,
|
||||
IntList pool_size,
|
||||
IntList output_size,
|
||||
IntArrayRef pool_size,
|
||||
IntArrayRef output_size,
|
||||
const at::Tensor& randomSamples)
|
||||
{
|
||||
fractional_max_pool2d_out_cpu_template(
|
||||
|
|
@ -334,8 +334,8 @@ std::tuple<Tensor&, Tensor&> fractional_max_pool2d_out_cpu(
|
|||
|
||||
std::tuple<Tensor, Tensor> fractional_max_pool2d_cpu(
|
||||
const at::Tensor& input,
|
||||
IntList pool_size,
|
||||
IntList output_size,
|
||||
IntArrayRef pool_size,
|
||||
IntArrayRef output_size,
|
||||
const at::Tensor& randomSamples)
|
||||
{
|
||||
Tensor output = at::empty({0}, input.options());
|
||||
|
|
@ -354,8 +354,8 @@ Tensor& fractional_max_pool2d_backward_out_cpu(
|
|||
at::Tensor& gradInput,
|
||||
const at::Tensor& gradOutput_,
|
||||
const at::Tensor& input,
|
||||
IntList pool_size,
|
||||
IntList output_size,
|
||||
IntArrayRef pool_size,
|
||||
IntArrayRef output_size,
|
||||
const at::Tensor& indices)
|
||||
{
|
||||
gradInput.resize_as_(input);
|
||||
|
|
@ -372,8 +372,8 @@ Tensor& fractional_max_pool2d_backward_out_cpu(
|
|||
Tensor fractional_max_pool2d_backward_cpu(
|
||||
const at::Tensor& gradOutput_,
|
||||
const at::Tensor& input,
|
||||
IntList pool_size,
|
||||
IntList output_size,
|
||||
IntArrayRef pool_size,
|
||||
IntArrayRef output_size,
|
||||
const at::Tensor& indices)
|
||||
{
|
||||
Tensor gradInput = at::empty({0}, input.options());
|
||||
|
|
|
|||
|
|
@ -141,8 +141,8 @@ void fractional_max_pool3d_out_cpu_template(
|
|||
Tensor& output,
|
||||
Tensor& indices,
|
||||
const Tensor& input_,
|
||||
IntList pool_size,
|
||||
IntList output_size,
|
||||
IntArrayRef pool_size,
|
||||
IntArrayRef output_size,
|
||||
const Tensor& randomSamples) {
|
||||
|
||||
int64_t outputT = output_size[0];
|
||||
|
|
@ -284,8 +284,8 @@ void fractional_max_pool3d_backward_out_cpu_template(
|
|||
const Tensor& input,
|
||||
const Tensor& gradOutput_,
|
||||
Tensor& gradInput,
|
||||
IntList output_size,
|
||||
IntList pool_size /* unused */,
|
||||
IntArrayRef output_size,
|
||||
IntArrayRef pool_size /* unused */,
|
||||
const Tensor& indices) {
|
||||
|
||||
int64_t outputT = output_size[0];
|
||||
|
|
@ -351,8 +351,8 @@ std::tuple<Tensor&, Tensor&> fractional_max_pool3d_out_cpu(
|
|||
at::Tensor& output,
|
||||
at::Tensor& indices,
|
||||
const at::Tensor& input,
|
||||
IntList pool_size,
|
||||
IntList output_size,
|
||||
IntArrayRef pool_size,
|
||||
IntArrayRef output_size,
|
||||
const at::Tensor& randomSamples) {
|
||||
fractional_max_pool3d_out_cpu_template(
|
||||
output,
|
||||
|
|
@ -366,8 +366,8 @@ std::tuple<Tensor&, Tensor&> fractional_max_pool3d_out_cpu(
|
|||
|
||||
std::tuple<Tensor, Tensor> fractional_max_pool3d_cpu(
|
||||
const at::Tensor& input,
|
||||
IntList pool_size,
|
||||
IntList output_size,
|
||||
IntArrayRef pool_size,
|
||||
IntArrayRef output_size,
|
||||
const at::Tensor& randomSamples) {
|
||||
Tensor output = at::empty(output_size, input.options());
|
||||
Tensor indices = at::empty(output_size, at::kLong);
|
||||
|
|
@ -385,8 +385,8 @@ Tensor& fractional_max_pool3d_backward_out_cpu(
|
|||
at::Tensor& gradInput,
|
||||
const at::Tensor& gradOutput_,
|
||||
const at::Tensor& input,
|
||||
IntList pool_size,
|
||||
IntList output_size,
|
||||
IntArrayRef pool_size,
|
||||
IntArrayRef output_size,
|
||||
const at::Tensor& indices) {
|
||||
fractional_max_pool3d_backward_out_cpu_template(
|
||||
input,
|
||||
|
|
@ -401,8 +401,8 @@ Tensor& fractional_max_pool3d_backward_out_cpu(
|
|||
Tensor fractional_max_pool3d_backward_cpu(
|
||||
const at::Tensor& gradOutput_,
|
||||
const at::Tensor& input,
|
||||
IntList pool_size,
|
||||
IntList output_size,
|
||||
IntArrayRef pool_size,
|
||||
IntArrayRef output_size,
|
||||
const at::Tensor& indices) {
|
||||
Tensor gradInput = at::empty({0}, input.options());
|
||||
fractional_max_pool3d_backward_out_cpu_template(
|
||||
|
|
|
|||
|
|
@ -301,7 +301,7 @@ struct AdvancedIndex {
|
|||
// values and the stride of src. The new shape is not meaningful. It's used to make
|
||||
// the shape compatible with the result tensor.
|
||||
static Tensor restride_src(const Tensor& src, int64_t dims_before, int64_t dims_indexed,
|
||||
IntList replacement_shape) {
|
||||
IntArrayRef replacement_shape) {
|
||||
auto shape = DimVector(src.sizes());
|
||||
auto strides = DimVector(src.strides());
|
||||
int64_t end = dims_before + dims_indexed;
|
||||
|
|
@ -327,7 +327,7 @@ AdvancedIndex::AdvancedIndex(const Tensor& src, TensorList indices_list)
|
|||
{
|
||||
int64_t element_size_bytes = src.type().elementSizeInBytes();
|
||||
int dims_before = 0, dims_after = 0, dims_indexed = 0;
|
||||
IntList replacement_shape;
|
||||
IntArrayRef replacement_shape;
|
||||
for (size_t dim = 0; dim < indices_list.size(); dim++) {
|
||||
if (!indices_list[dim].defined()) {
|
||||
if (dims_indexed == 0) {
|
||||
|
|
|
|||
|
|
@ -11,8 +11,8 @@ namespace at {
|
|||
|
||||
namespace at { namespace native {
|
||||
|
||||
using index_fn = void(*)(TensorIterator &, IntList indexed_sizes, IntList indexed_strides);
|
||||
using index_put_fn = void(*)(TensorIterator &, IntList indexed_sizes, IntList indexed_strides, bool accumulate);
|
||||
using index_fn = void(*)(TensorIterator &, IntArrayRef indexed_sizes, IntArrayRef indexed_strides);
|
||||
using index_put_fn = void(*)(TensorIterator &, IntArrayRef indexed_sizes, IntArrayRef indexed_strides, bool accumulate);
|
||||
|
||||
DECLARE_DISPATCH(index_fn, index_stub);
|
||||
DECLARE_DISPATCH(index_put_fn, index_put_stub);
|
||||
|
|
|
|||
|
|
@ -14,7 +14,7 @@ Tensor & set_(Tensor& self, Storage source) {
|
|||
return at::legacy::th::_th_set_(self, source);
|
||||
}
|
||||
|
||||
Tensor & set_(Tensor& self, Storage source, int64_t storage_offset, IntList size, IntList stride) {
|
||||
Tensor & set_(Tensor& self, Storage source, int64_t storage_offset, IntArrayRef size, IntArrayRef stride) {
|
||||
return at::legacy::th::_th_set_(self, source, storage_offset, size, stride);
|
||||
}
|
||||
|
||||
|
|
@ -42,7 +42,7 @@ Tensor & masked_scatter_(Tensor& self, const Tensor & mask, const Tensor & sourc
|
|||
return at::legacy::th::_th_masked_scatter_(self, mask, source);
|
||||
}
|
||||
|
||||
Tensor view(const Tensor& self, IntList size) {
|
||||
Tensor view(const Tensor& self, IntArrayRef size) {
|
||||
return at::legacy::th::_th_view(self, size);
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -332,11 +332,11 @@ Tensor softshrink_backward(const Tensor & grad_output, const Tensor & self, Scal
|
|||
return at::legacy::th::_thnn_softshrink_backward(grad_output, self, lambd);
|
||||
}
|
||||
|
||||
Tensor & adaptive_avg_pool3d_out(Tensor & output, const Tensor & self, IntList output_size) {
|
||||
Tensor & adaptive_avg_pool3d_out(Tensor & output, const Tensor & self, IntArrayRef output_size) {
|
||||
return at::legacy::th::_thnn_adaptive_avg_pool3d_forward_out(output, self, output_size);
|
||||
}
|
||||
|
||||
Tensor adaptive_avg_pool3d(const Tensor & self, IntList output_size) {
|
||||
Tensor adaptive_avg_pool3d(const Tensor & self, IntArrayRef output_size) {
|
||||
return at::legacy::th::_thnn_adaptive_avg_pool3d_forward(self, output_size);
|
||||
}
|
||||
|
||||
|
|
@ -348,11 +348,11 @@ Tensor adaptive_avg_pool3d_backward(const Tensor & grad_output, const Tensor & s
|
|||
return at::legacy::th::_thnn_adaptive_avg_pool3d_backward(grad_output, self);
|
||||
}
|
||||
|
||||
std::tuple<Tensor &,Tensor &> adaptive_max_pool2d_out(Tensor & output, Tensor & indices, const Tensor & self, IntList output_size) {
|
||||
std::tuple<Tensor &,Tensor &> adaptive_max_pool2d_out(Tensor & output, Tensor & indices, const Tensor & self, IntArrayRef output_size) {
|
||||
return at::legacy::th::_thnn_adaptive_max_pool2d_forward_out(output, indices, self, output_size);
|
||||
}
|
||||
|
||||
std::tuple<Tensor,Tensor> adaptive_max_pool2d(const Tensor & self, IntList output_size) {
|
||||
std::tuple<Tensor,Tensor> adaptive_max_pool2d(const Tensor & self, IntArrayRef output_size) {
|
||||
return at::legacy::th::_thnn_adaptive_max_pool2d_forward(self, output_size);
|
||||
}
|
||||
|
||||
|
|
@ -364,11 +364,11 @@ Tensor adaptive_max_pool2d_backward(const Tensor & grad_output, const Tensor & s
|
|||
return at::legacy::th::_thnn_adaptive_max_pool2d_backward(grad_output, self, indices);
|
||||
}
|
||||
|
||||
std::tuple<Tensor &,Tensor &> adaptive_max_pool3d_out(Tensor & output, Tensor & indices, const Tensor & self, IntList output_size) {
|
||||
std::tuple<Tensor &,Tensor &> adaptive_max_pool3d_out(Tensor & output, Tensor & indices, const Tensor & self, IntArrayRef output_size) {
|
||||
return at::legacy::th::_thnn_adaptive_max_pool3d_forward_out(output, indices, self, output_size);
|
||||
}
|
||||
|
||||
std::tuple<Tensor,Tensor> adaptive_max_pool3d(const Tensor & self, IntList output_size) {
|
||||
std::tuple<Tensor,Tensor> adaptive_max_pool3d(const Tensor & self, IntArrayRef output_size) {
|
||||
return at::legacy::th::_thnn_adaptive_max_pool3d_forward(self, output_size);
|
||||
}
|
||||
|
||||
|
|
@ -380,211 +380,211 @@ Tensor adaptive_max_pool3d_backward(const Tensor & grad_output, const Tensor & s
|
|||
return at::legacy::th::_thnn_adaptive_max_pool3d_backward(grad_output, self, indices);
|
||||
}
|
||||
|
||||
Tensor & avg_pool2d_out(Tensor & output, const Tensor & self, IntList kernel_size, IntList stride, IntList padding, bool ceil_mode, bool count_include_pad) {
|
||||
Tensor & avg_pool2d_out(Tensor & output, const Tensor & self, IntArrayRef kernel_size, IntArrayRef stride, IntArrayRef padding, bool ceil_mode, bool count_include_pad) {
|
||||
return at::legacy::th::_thnn_avg_pool2d_forward_out(output, self, kernel_size, stride, padding, ceil_mode, count_include_pad);
|
||||
}
|
||||
|
||||
Tensor avg_pool2d(const Tensor & self, IntList kernel_size, IntList stride, IntList padding, bool ceil_mode, bool count_include_pad) {
|
||||
Tensor avg_pool2d(const Tensor & self, IntArrayRef kernel_size, IntArrayRef stride, IntArrayRef padding, bool ceil_mode, bool count_include_pad) {
|
||||
return at::legacy::th::_thnn_avg_pool2d_forward(self, kernel_size, stride, padding, ceil_mode, count_include_pad);
|
||||
}
|
||||
|
||||
Tensor & avg_pool2d_backward_out(Tensor & grad_input, const Tensor & grad_output, const Tensor & self, IntList kernel_size, IntList stride, IntList padding, bool ceil_mode, bool count_include_pad) {
|
||||
Tensor & avg_pool2d_backward_out(Tensor & grad_input, const Tensor & grad_output, const Tensor & self, IntArrayRef kernel_size, IntArrayRef stride, IntArrayRef padding, bool ceil_mode, bool count_include_pad) {
|
||||
return at::legacy::th::_thnn_avg_pool2d_backward_out(grad_input, grad_output, self, kernel_size, stride, padding, ceil_mode, count_include_pad);
|
||||
}
|
||||
|
||||
Tensor avg_pool2d_backward(const Tensor & grad_output, const Tensor & self, IntList kernel_size, IntList stride, IntList padding, bool ceil_mode, bool count_include_pad) {
|
||||
Tensor avg_pool2d_backward(const Tensor & grad_output, const Tensor & self, IntArrayRef kernel_size, IntArrayRef stride, IntArrayRef padding, bool ceil_mode, bool count_include_pad) {
|
||||
return at::legacy::th::_thnn_avg_pool2d_backward(grad_output, self, kernel_size, stride, padding, ceil_mode, count_include_pad);
|
||||
}
|
||||
|
||||
Tensor & avg_pool3d_out(Tensor & output, const Tensor & self, IntList kernel_size, IntList stride, IntList padding, bool ceil_mode, bool count_include_pad) {
|
||||
Tensor & avg_pool3d_out(Tensor & output, const Tensor & self, IntArrayRef kernel_size, IntArrayRef stride, IntArrayRef padding, bool ceil_mode, bool count_include_pad) {
|
||||
return at::legacy::th::_thnn_avg_pool3d_forward_out(output, self, kernel_size, stride, padding, ceil_mode, count_include_pad);
|
||||
}
|
||||
|
||||
Tensor avg_pool3d(const Tensor & self, IntList kernel_size, IntList stride, IntList padding, bool ceil_mode, bool count_include_pad) {
|
||||
Tensor avg_pool3d(const Tensor & self, IntArrayRef kernel_size, IntArrayRef stride, IntArrayRef padding, bool ceil_mode, bool count_include_pad) {
|
||||
return at::legacy::th::_thnn_avg_pool3d_forward(self, kernel_size, stride, padding, ceil_mode, count_include_pad);
|
||||
}
|
||||
|
||||
Tensor & avg_pool3d_backward_out(Tensor & grad_input, const Tensor & grad_output, const Tensor & self, IntList kernel_size, IntList stride, IntList padding, bool ceil_mode, bool count_include_pad) {
|
||||
Tensor & avg_pool3d_backward_out(Tensor & grad_input, const Tensor & grad_output, const Tensor & self, IntArrayRef kernel_size, IntArrayRef stride, IntArrayRef padding, bool ceil_mode, bool count_include_pad) {
|
||||
return at::legacy::th::_thnn_avg_pool3d_backward_out(grad_input, grad_output, self, kernel_size, stride, padding, ceil_mode, count_include_pad);
|
||||
}
|
||||
|
||||
Tensor avg_pool3d_backward(const Tensor & grad_output, const Tensor & self, IntList kernel_size, IntList stride, IntList padding, bool ceil_mode, bool count_include_pad) {
|
||||
Tensor avg_pool3d_backward(const Tensor & grad_output, const Tensor & self, IntArrayRef kernel_size, IntArrayRef stride, IntArrayRef padding, bool ceil_mode, bool count_include_pad) {
|
||||
return at::legacy::th::_thnn_avg_pool3d_backward(grad_output, self, kernel_size, stride, padding, ceil_mode, count_include_pad);
|
||||
}
|
||||
|
||||
std::tuple<Tensor &,Tensor &> max_pool2d_with_indices_out(Tensor & output, Tensor & indices, const Tensor & self, IntList kernel_size, IntList stride, IntList padding, IntList dilation, bool ceil_mode) {
|
||||
std::tuple<Tensor &,Tensor &> max_pool2d_with_indices_out(Tensor & output, Tensor & indices, const Tensor & self, IntArrayRef kernel_size, IntArrayRef stride, IntArrayRef padding, IntArrayRef dilation, bool ceil_mode) {
|
||||
return at::legacy::th::_thnn_max_pool2d_with_indices_forward_out(output, indices, self, kernel_size, stride, padding, dilation, ceil_mode);
|
||||
}
|
||||
|
||||
std::tuple<Tensor,Tensor> max_pool2d_with_indices(const Tensor & self, IntList kernel_size, IntList stride, IntList padding, IntList dilation, bool ceil_mode) {
|
||||
std::tuple<Tensor,Tensor> max_pool2d_with_indices(const Tensor & self, IntArrayRef kernel_size, IntArrayRef stride, IntArrayRef padding, IntArrayRef dilation, bool ceil_mode) {
|
||||
return at::legacy::th::_thnn_max_pool2d_with_indices_forward(self, kernel_size, stride, padding, dilation, ceil_mode);
|
||||
}
|
||||
|
||||
Tensor & max_pool2d_with_indices_backward_out(Tensor & grad_input, const Tensor & grad_output, const Tensor & self, IntList kernel_size, IntList stride, IntList padding, IntList dilation, bool ceil_mode, const Tensor & indices) {
|
||||
Tensor & max_pool2d_with_indices_backward_out(Tensor & grad_input, const Tensor & grad_output, const Tensor & self, IntArrayRef kernel_size, IntArrayRef stride, IntArrayRef padding, IntArrayRef dilation, bool ceil_mode, const Tensor & indices) {
|
||||
return at::legacy::th::_thnn_max_pool2d_with_indices_backward_out(grad_input, grad_output, self, kernel_size, stride, padding, dilation, ceil_mode, indices);
|
||||
}
|
||||
|
||||
Tensor max_pool2d_with_indices_backward(const Tensor & grad_output, const Tensor & self, IntList kernel_size, IntList stride, IntList padding, IntList dilation, bool ceil_mode, const Tensor & indices) {
|
||||
Tensor max_pool2d_with_indices_backward(const Tensor & grad_output, const Tensor & self, IntArrayRef kernel_size, IntArrayRef stride, IntArrayRef padding, IntArrayRef dilation, bool ceil_mode, const Tensor & indices) {
|
||||
return at::legacy::th::_thnn_max_pool2d_with_indices_backward(grad_output, self, kernel_size, stride, padding, dilation, ceil_mode, indices);
|
||||
}
|
||||
|
||||
std::tuple<Tensor &,Tensor &> max_pool3d_with_indices_out(Tensor & output, Tensor & indices, const Tensor & self, IntList kernel_size, IntList stride, IntList padding, IntList dilation, bool ceil_mode) {
|
||||
std::tuple<Tensor &,Tensor &> max_pool3d_with_indices_out(Tensor & output, Tensor & indices, const Tensor & self, IntArrayRef kernel_size, IntArrayRef stride, IntArrayRef padding, IntArrayRef dilation, bool ceil_mode) {
|
||||
return at::legacy::th::_thnn_max_pool3d_with_indices_forward_out(output, indices, self, kernel_size, stride, padding, dilation, ceil_mode);
|
||||
}
|
||||
|
||||
std::tuple<Tensor,Tensor> max_pool3d_with_indices(const Tensor & self, IntList kernel_size, IntList stride, IntList padding, IntList dilation, bool ceil_mode) {
|
||||
std::tuple<Tensor,Tensor> max_pool3d_with_indices(const Tensor & self, IntArrayRef kernel_size, IntArrayRef stride, IntArrayRef padding, IntArrayRef dilation, bool ceil_mode) {
|
||||
return at::legacy::th::_thnn_max_pool3d_with_indices_forward(self, kernel_size, stride, padding, dilation, ceil_mode);
|
||||
}
|
||||
|
||||
Tensor & max_pool3d_with_indices_backward_out(Tensor & grad_input, const Tensor & grad_output, const Tensor & self, IntList kernel_size, IntList stride, IntList padding, IntList dilation, bool ceil_mode, const Tensor & indices) {
|
||||
Tensor & max_pool3d_with_indices_backward_out(Tensor & grad_input, const Tensor & grad_output, const Tensor & self, IntArrayRef kernel_size, IntArrayRef stride, IntArrayRef padding, IntArrayRef dilation, bool ceil_mode, const Tensor & indices) {
|
||||
return at::legacy::th::_thnn_max_pool3d_with_indices_backward_out(grad_input, grad_output, self, kernel_size, stride, padding, dilation, ceil_mode, indices);
|
||||
}
|
||||
|
||||
Tensor max_pool3d_with_indices_backward(const Tensor & grad_output, const Tensor & self, IntList kernel_size, IntList stride, IntList padding, IntList dilation, bool ceil_mode, const Tensor & indices) {
|
||||
Tensor max_pool3d_with_indices_backward(const Tensor & grad_output, const Tensor & self, IntArrayRef kernel_size, IntArrayRef stride, IntArrayRef padding, IntArrayRef dilation, bool ceil_mode, const Tensor & indices) {
|
||||
return at::legacy::th::_thnn_max_pool3d_with_indices_backward(grad_output, self, kernel_size, stride, padding, dilation, ceil_mode, indices);
|
||||
}
|
||||
|
||||
Tensor & max_unpool2d_out(Tensor & output, const Tensor & self, const Tensor & indices, IntList output_size) {
|
||||
Tensor & max_unpool2d_out(Tensor & output, const Tensor & self, const Tensor & indices, IntArrayRef output_size) {
|
||||
return at::legacy::th::_thnn_max_unpool2d_forward_out(output, self, indices, output_size);
|
||||
}
|
||||
|
||||
Tensor max_unpool2d(const Tensor & self, const Tensor & indices, IntList output_size) {
|
||||
Tensor max_unpool2d(const Tensor & self, const Tensor & indices, IntArrayRef output_size) {
|
||||
return at::legacy::th::_thnn_max_unpool2d_forward(self, indices, output_size);
|
||||
}
|
||||
|
||||
Tensor & max_unpool2d_backward_out(Tensor & grad_input, const Tensor & grad_output, const Tensor & self, const Tensor & indices, IntList output_size) {
|
||||
Tensor & max_unpool2d_backward_out(Tensor & grad_input, const Tensor & grad_output, const Tensor & self, const Tensor & indices, IntArrayRef output_size) {
|
||||
return at::legacy::th::_thnn_max_unpool2d_backward_out(grad_input, grad_output, self, indices, output_size);
|
||||
}
|
||||
|
||||
Tensor max_unpool2d_backward(const Tensor & grad_output, const Tensor & self, const Tensor & indices, IntList output_size) {
|
||||
Tensor max_unpool2d_backward(const Tensor & grad_output, const Tensor & self, const Tensor & indices, IntArrayRef output_size) {
|
||||
return at::legacy::th::_thnn_max_unpool2d_backward(grad_output, self, indices, output_size);
|
||||
}
|
||||
|
||||
Tensor & max_unpool3d_out(Tensor & output, const Tensor & self, const Tensor & indices, IntList output_size, IntList stride, IntList padding) {
|
||||
Tensor & max_unpool3d_out(Tensor & output, const Tensor & self, const Tensor & indices, IntArrayRef output_size, IntArrayRef stride, IntArrayRef padding) {
|
||||
return at::legacy::th::_thnn_max_unpool3d_forward_out(output, self, indices, output_size, stride, padding);
|
||||
}
|
||||
|
||||
Tensor max_unpool3d(const Tensor & self, const Tensor & indices, IntList output_size, IntList stride, IntList padding) {
|
||||
Tensor max_unpool3d(const Tensor & self, const Tensor & indices, IntArrayRef output_size, IntArrayRef stride, IntArrayRef padding) {
|
||||
return at::legacy::th::_thnn_max_unpool3d_forward(self, indices, output_size, stride, padding);
|
||||
}
|
||||
|
||||
Tensor & max_unpool3d_backward_out(Tensor & grad_input, const Tensor & grad_output, const Tensor & self, const Tensor & indices, IntList output_size, IntList stride, IntList padding) {
|
||||
Tensor & max_unpool3d_backward_out(Tensor & grad_input, const Tensor & grad_output, const Tensor & self, const Tensor & indices, IntArrayRef output_size, IntArrayRef stride, IntArrayRef padding) {
|
||||
return at::legacy::th::_thnn_max_unpool3d_backward_out(grad_input, grad_output, self, indices, output_size, stride, padding);
|
||||
}
|
||||
|
||||
Tensor max_unpool3d_backward(const Tensor & grad_output, const Tensor & self, const Tensor & indices, IntList output_size, IntList stride, IntList padding) {
|
||||
Tensor max_unpool3d_backward(const Tensor & grad_output, const Tensor & self, const Tensor & indices, IntArrayRef output_size, IntArrayRef stride, IntArrayRef padding) {
|
||||
return at::legacy::th::_thnn_max_unpool3d_backward(grad_output, self, indices, output_size, stride, padding);
|
||||
}
|
||||
|
||||
Tensor & upsample_linear1d_out(Tensor & output, const Tensor & self, IntList output_size, bool align_corners) {
|
||||
Tensor & upsample_linear1d_out(Tensor & output, const Tensor & self, IntArrayRef output_size, bool align_corners) {
|
||||
return at::legacy::th::_thnn_upsample_linear1d_forward_out(output, self, output_size, align_corners);
|
||||
}
|
||||
|
||||
Tensor upsample_linear1d(const Tensor & self, IntList output_size, bool align_corners) {
|
||||
Tensor upsample_linear1d(const Tensor & self, IntArrayRef output_size, bool align_corners) {
|
||||
return at::legacy::th::_thnn_upsample_linear1d_forward(self, output_size, align_corners);
|
||||
}
|
||||
|
||||
Tensor & upsample_linear1d_backward_out(Tensor & grad_input, const Tensor & grad_output, IntList output_size, IntList input_size, bool align_corners) {
|
||||
Tensor & upsample_linear1d_backward_out(Tensor & grad_input, const Tensor & grad_output, IntArrayRef output_size, IntArrayRef input_size, bool align_corners) {
|
||||
return at::legacy::th::_thnn_upsample_linear1d_backward_out(grad_input, grad_output, output_size, input_size, align_corners);
|
||||
}
|
||||
|
||||
Tensor upsample_linear1d_backward(const Tensor & grad_output, IntList output_size, IntList input_size, bool align_corners) {
|
||||
Tensor upsample_linear1d_backward(const Tensor & grad_output, IntArrayRef output_size, IntArrayRef input_size, bool align_corners) {
|
||||
return at::legacy::th::_thnn_upsample_linear1d_backward(grad_output, output_size, input_size, align_corners);
|
||||
}
|
||||
|
||||
Tensor & upsample_bilinear2d_out(Tensor & output, const Tensor & self, IntList output_size, bool align_corners) {
|
||||
Tensor & upsample_bilinear2d_out(Tensor & output, const Tensor & self, IntArrayRef output_size, bool align_corners) {
|
||||
return at::legacy::th::_thnn_upsample_bilinear2d_forward_out(output, self, output_size, align_corners);
|
||||
}
|
||||
|
||||
Tensor upsample_bilinear2d(const Tensor & self, IntList output_size, bool align_corners) {
|
||||
Tensor upsample_bilinear2d(const Tensor & self, IntArrayRef output_size, bool align_corners) {
|
||||
return at::legacy::th::_thnn_upsample_bilinear2d_forward(self, output_size, align_corners);
|
||||
}
|
||||
|
||||
Tensor & upsample_bilinear2d_backward_out(Tensor & grad_input, const Tensor & grad_output, IntList output_size, IntList input_size, bool align_corners) {
|
||||
Tensor & upsample_bilinear2d_backward_out(Tensor & grad_input, const Tensor & grad_output, IntArrayRef output_size, IntArrayRef input_size, bool align_corners) {
|
||||
return at::legacy::th::_thnn_upsample_bilinear2d_backward_out(grad_input, grad_output, output_size, input_size, align_corners);
|
||||
}
|
||||
|
||||
Tensor upsample_bilinear2d_backward(const Tensor & grad_output, IntList output_size, IntList input_size, bool align_corners) {
|
||||
Tensor upsample_bilinear2d_backward(const Tensor & grad_output, IntArrayRef output_size, IntArrayRef input_size, bool align_corners) {
|
||||
return at::legacy::th::_thnn_upsample_bilinear2d_backward(grad_output, output_size, input_size, align_corners);
|
||||
}
|
||||
|
||||
Tensor & upsample_bicubic2d_out(Tensor & output, const Tensor & self, IntList output_size, bool align_corners) {
|
||||
Tensor & upsample_bicubic2d_out(Tensor & output, const Tensor & self, IntArrayRef output_size, bool align_corners) {
|
||||
return at::legacy::th::_thnn_upsample_bicubic2d_forward_out(output, self, output_size, align_corners);
|
||||
}
|
||||
|
||||
Tensor upsample_bicubic2d(const Tensor & self, IntList output_size, bool align_corners) {
|
||||
Tensor upsample_bicubic2d(const Tensor & self, IntArrayRef output_size, bool align_corners) {
|
||||
return at::legacy::th::_thnn_upsample_bicubic2d_forward(self, output_size, align_corners);
|
||||
}
|
||||
|
||||
Tensor & upsample_bicubic2d_backward_out(Tensor & grad_input, const Tensor & grad_output, IntList output_size, IntList input_size, bool align_corners) {
|
||||
Tensor & upsample_bicubic2d_backward_out(Tensor & grad_input, const Tensor & grad_output, IntArrayRef output_size, IntArrayRef input_size, bool align_corners) {
|
||||
return at::legacy::th::_thnn_upsample_bicubic2d_backward_out(grad_input, grad_output, output_size, input_size, align_corners);
|
||||
}
|
||||
|
||||
Tensor upsample_bicubic2d_backward(const Tensor & grad_output, IntList output_size, IntList input_size, bool align_corners) {
|
||||
Tensor upsample_bicubic2d_backward(const Tensor & grad_output, IntArrayRef output_size, IntArrayRef input_size, bool align_corners) {
|
||||
return at::legacy::th::_thnn_upsample_bicubic2d_backward(grad_output, output_size, input_size, align_corners);
|
||||
}
|
||||
|
||||
Tensor & upsample_trilinear3d_out(Tensor & output, const Tensor & self, IntList output_size, bool align_corners) {
|
||||
Tensor & upsample_trilinear3d_out(Tensor & output, const Tensor & self, IntArrayRef output_size, bool align_corners) {
|
||||
return at::legacy::th::_thnn_upsample_trilinear3d_forward_out(output, self, output_size, align_corners);
|
||||
}
|
||||
|
||||
Tensor upsample_trilinear3d(const Tensor & self, IntList output_size, bool align_corners) {
|
||||
Tensor upsample_trilinear3d(const Tensor & self, IntArrayRef output_size, bool align_corners) {
|
||||
return at::legacy::th::_thnn_upsample_trilinear3d_forward(self, output_size, align_corners);
|
||||
}
|
||||
|
||||
Tensor & upsample_trilinear3d_backward_out(Tensor & grad_input, const Tensor & grad_output, IntList output_size, IntList input_size, bool align_corners) {
|
||||
Tensor & upsample_trilinear3d_backward_out(Tensor & grad_input, const Tensor & grad_output, IntArrayRef output_size, IntArrayRef input_size, bool align_corners) {
|
||||
return at::legacy::th::_thnn_upsample_trilinear3d_backward_out(grad_input, grad_output, output_size, input_size, align_corners);
|
||||
}
|
||||
|
||||
Tensor upsample_trilinear3d_backward(const Tensor & grad_output, IntList output_size, IntList input_size, bool align_corners) {
|
||||
Tensor upsample_trilinear3d_backward(const Tensor & grad_output, IntArrayRef output_size, IntArrayRef input_size, bool align_corners) {
|
||||
return at::legacy::th::_thnn_upsample_trilinear3d_backward(grad_output, output_size, input_size, align_corners);
|
||||
}
|
||||
|
||||
Tensor & upsample_nearest1d_out(Tensor & output, const Tensor & self, IntList output_size) {
|
||||
Tensor & upsample_nearest1d_out(Tensor & output, const Tensor & self, IntArrayRef output_size) {
|
||||
return at::legacy::th::_thnn_upsample_nearest1d_forward_out(output, self, output_size);
|
||||
}
|
||||
|
||||
Tensor upsample_nearest1d(const Tensor & self, IntList output_size) {
|
||||
Tensor upsample_nearest1d(const Tensor & self, IntArrayRef output_size) {
|
||||
return at::legacy::th::_thnn_upsample_nearest1d_forward(self, output_size);
|
||||
}
|
||||
|
||||
Tensor & upsample_nearest1d_backward_out(Tensor & grad_input, const Tensor & grad_output, IntList output_size, IntList input_size) {
|
||||
Tensor & upsample_nearest1d_backward_out(Tensor & grad_input, const Tensor & grad_output, IntArrayRef output_size, IntArrayRef input_size) {
|
||||
return at::legacy::th::_thnn_upsample_nearest1d_backward_out(grad_input, grad_output, output_size, input_size);
|
||||
}
|
||||
|
||||
Tensor upsample_nearest1d_backward(const Tensor & grad_output, IntList output_size, IntList input_size) {
|
||||
Tensor upsample_nearest1d_backward(const Tensor & grad_output, IntArrayRef output_size, IntArrayRef input_size) {
|
||||
return at::legacy::th::_thnn_upsample_nearest1d_backward(grad_output, output_size, input_size);
|
||||
}
|
||||
|
||||
Tensor & upsample_nearest2d_out(Tensor & output, const Tensor & self, IntList output_size) {
|
||||
Tensor & upsample_nearest2d_out(Tensor & output, const Tensor & self, IntArrayRef output_size) {
|
||||
return at::legacy::th::_thnn_upsample_nearest2d_forward_out(output, self, output_size);
|
||||
}
|
||||
|
||||
Tensor upsample_nearest2d(const Tensor & self, IntList output_size) {
|
||||
Tensor upsample_nearest2d(const Tensor & self, IntArrayRef output_size) {
|
||||
return at::legacy::th::_thnn_upsample_nearest2d_forward(self, output_size);
|
||||
}
|
||||
|
||||
Tensor & upsample_nearest2d_backward_out(Tensor & grad_input, const Tensor & grad_output, IntList output_size, IntList input_size) {
|
||||
Tensor & upsample_nearest2d_backward_out(Tensor & grad_input, const Tensor & grad_output, IntArrayRef output_size, IntArrayRef input_size) {
|
||||
return at::legacy::th::_thnn_upsample_nearest2d_backward_out(grad_input, grad_output, output_size, input_size);
|
||||
}
|
||||
|
||||
Tensor upsample_nearest2d_backward(const Tensor & grad_output, IntList output_size, IntList input_size) {
|
||||
Tensor upsample_nearest2d_backward(const Tensor & grad_output, IntArrayRef output_size, IntArrayRef input_size) {
|
||||
return at::legacy::th::_thnn_upsample_nearest2d_backward(grad_output, output_size, input_size);
|
||||
}
|
||||
|
||||
Tensor & upsample_nearest3d_out(Tensor & output, const Tensor & self, IntList output_size) {
|
||||
Tensor & upsample_nearest3d_out(Tensor & output, const Tensor & self, IntArrayRef output_size) {
|
||||
return at::legacy::th::_thnn_upsample_nearest3d_forward_out(output, self, output_size);
|
||||
}
|
||||
|
||||
Tensor upsample_nearest3d(const Tensor & self, IntList output_size) {
|
||||
Tensor upsample_nearest3d(const Tensor & self, IntArrayRef output_size) {
|
||||
return at::legacy::th::_thnn_upsample_nearest3d_forward(self, output_size);
|
||||
}
|
||||
|
||||
Tensor & upsample_nearest3d_backward_out(Tensor & grad_input, const Tensor & grad_output, IntList output_size, IntList input_size) {
|
||||
Tensor & upsample_nearest3d_backward_out(Tensor & grad_input, const Tensor & grad_output, IntArrayRef output_size, IntArrayRef input_size) {
|
||||
return at::legacy::th::_thnn_upsample_nearest3d_backward_out(grad_input, grad_output, output_size, input_size);
|
||||
}
|
||||
|
||||
Tensor upsample_nearest3d_backward(const Tensor & grad_output, IntList output_size, IntList input_size) {
|
||||
Tensor upsample_nearest3d_backward(const Tensor & grad_output, IntArrayRef output_size, IntArrayRef input_size) {
|
||||
return at::legacy::th::_thnn_upsample_nearest3d_backward(grad_output, output_size, input_size);
|
||||
}
|
||||
|
||||
|
|
@ -604,215 +604,215 @@ Tensor tanh_backward(const Tensor & grad_output, const Tensor & output) {
|
|||
return at::legacy::th::_thnn_tanh_backward(grad_output, output);
|
||||
}
|
||||
|
||||
Tensor & thnn_conv_transpose2d_out(Tensor & output, const Tensor & self, const Tensor & weight, IntList kernel_size, const Tensor & bias, IntList stride, IntList padding, IntList output_padding, IntList dilation) {
|
||||
Tensor & thnn_conv_transpose2d_out(Tensor & output, const Tensor & self, const Tensor & weight, IntArrayRef kernel_size, const Tensor & bias, IntArrayRef stride, IntArrayRef padding, IntArrayRef output_padding, IntArrayRef dilation) {
|
||||
Tensor columns = at::empty({0}, self.options());
|
||||
Tensor ones = at::empty({0}, self.options());
|
||||
return std::get<0>(at::thnn_conv_transpose2d_forward_out(output, columns, ones, self, weight, kernel_size, bias, stride, padding, output_padding, dilation));
|
||||
}
|
||||
|
||||
Tensor thnn_conv_transpose2d(const Tensor & self, const Tensor & weight, IntList kernel_size, const Tensor & bias, IntList stride, IntList padding, IntList output_padding, IntList dilation) {
|
||||
Tensor thnn_conv_transpose2d(const Tensor & self, const Tensor & weight, IntArrayRef kernel_size, const Tensor & bias, IntArrayRef stride, IntArrayRef padding, IntArrayRef output_padding, IntArrayRef dilation) {
|
||||
return std::get<0>(at::thnn_conv_transpose2d_forward(self, weight, kernel_size, bias, stride, padding, output_padding, dilation));
|
||||
}
|
||||
|
||||
std::tuple<Tensor &,Tensor &,Tensor &> thnn_conv_transpose2d_forward_out(Tensor & output, Tensor & columns, Tensor & ones, const Tensor & self, const Tensor & weight, IntList kernel_size, const Tensor & bias, IntList stride, IntList padding, IntList output_padding, IntList dilation) {
|
||||
std::tuple<Tensor &,Tensor &,Tensor &> thnn_conv_transpose2d_forward_out(Tensor & output, Tensor & columns, Tensor & ones, const Tensor & self, const Tensor & weight, IntArrayRef kernel_size, const Tensor & bias, IntArrayRef stride, IntArrayRef padding, IntArrayRef output_padding, IntArrayRef dilation) {
|
||||
return at::legacy::th::_thnn_conv_transpose2d_forward_out(output, columns, ones, self, weight, kernel_size, bias, stride, padding, output_padding, dilation);
|
||||
}
|
||||
|
||||
std::tuple<Tensor,Tensor,Tensor> thnn_conv_transpose2d_forward(const Tensor & self, const Tensor & weight, IntList kernel_size, const Tensor & bias, IntList stride, IntList padding, IntList output_padding, IntList dilation) {
|
||||
std::tuple<Tensor,Tensor,Tensor> thnn_conv_transpose2d_forward(const Tensor & self, const Tensor & weight, IntArrayRef kernel_size, const Tensor & bias, IntArrayRef stride, IntArrayRef padding, IntArrayRef output_padding, IntArrayRef dilation) {
|
||||
return at::legacy::th::_thnn_conv_transpose2d_forward(self, weight, kernel_size, bias, stride, padding, output_padding, dilation);
|
||||
}
|
||||
|
||||
std::tuple<Tensor &,Tensor &,Tensor &> thnn_conv_transpose2d_backward_out(Tensor & grad_input, Tensor & grad_weight, Tensor & grad_bias, const Tensor & grad_output, const Tensor & self, const Tensor & weight, IntList kernel_size, IntList stride, IntList padding, IntList output_padding, IntList dilation, const Tensor & columns, const Tensor & ones) {
|
||||
std::tuple<Tensor &,Tensor &,Tensor &> thnn_conv_transpose2d_backward_out(Tensor & grad_input, Tensor & grad_weight, Tensor & grad_bias, const Tensor & grad_output, const Tensor & self, const Tensor & weight, IntArrayRef kernel_size, IntArrayRef stride, IntArrayRef padding, IntArrayRef output_padding, IntArrayRef dilation, const Tensor & columns, const Tensor & ones) {
|
||||
return at::legacy::th::_thnn_conv_transpose2d_backward_out(grad_input, grad_weight, grad_bias, grad_output, self, weight, kernel_size, stride, padding, output_padding, dilation, columns, ones);
|
||||
}
|
||||
|
||||
std::tuple<Tensor,Tensor,Tensor> thnn_conv_transpose2d_backward(const Tensor & grad_output, const Tensor & self, const Tensor & weight, IntList kernel_size, IntList stride, IntList padding, IntList output_padding, IntList dilation, const Tensor & columns, const Tensor & ones, std::array<bool,3> output_mask) {
|
||||
std::tuple<Tensor,Tensor,Tensor> thnn_conv_transpose2d_backward(const Tensor & grad_output, const Tensor & self, const Tensor & weight, IntArrayRef kernel_size, IntArrayRef stride, IntArrayRef padding, IntArrayRef output_padding, IntArrayRef dilation, const Tensor & columns, const Tensor & ones, std::array<bool,3> output_mask) {
|
||||
return at::legacy::th::_thnn_conv_transpose2d_backward(grad_output, self, weight, kernel_size, stride, padding, output_padding, dilation, columns, ones, output_mask);
|
||||
}
|
||||
|
||||
Tensor & thnn_conv_transpose3d_out(Tensor & output, const Tensor & self, const Tensor & weight, IntList kernel_size, const Tensor & bias, IntList stride, IntList padding, IntList output_padding, IntList dilation) {
|
||||
Tensor & thnn_conv_transpose3d_out(Tensor & output, const Tensor & self, const Tensor & weight, IntArrayRef kernel_size, const Tensor & bias, IntArrayRef stride, IntArrayRef padding, IntArrayRef output_padding, IntArrayRef dilation) {
|
||||
Tensor finput = at::empty({0}, self.options());
|
||||
Tensor fgrad_input = at::empty({0}, self.options());
|
||||
return std::get<0>(at::thnn_conv_transpose3d_forward_out(output, finput, fgrad_input, self, weight, kernel_size, bias, stride, padding, output_padding, dilation));
|
||||
}
|
||||
|
||||
Tensor thnn_conv_transpose3d(const Tensor & self, const Tensor & weight, IntList kernel_size, const Tensor & bias, IntList stride, IntList padding, IntList output_padding, IntList dilation) {
|
||||
Tensor thnn_conv_transpose3d(const Tensor & self, const Tensor & weight, IntArrayRef kernel_size, const Tensor & bias, IntArrayRef stride, IntArrayRef padding, IntArrayRef output_padding, IntArrayRef dilation) {
|
||||
return std::get<0>(at::thnn_conv_transpose3d_forward(self, weight, kernel_size, bias, stride, padding, output_padding, dilation));
|
||||
}
|
||||
|
||||
std::tuple<Tensor &,Tensor &,Tensor &> thnn_conv_transpose3d_forward_out(Tensor & output, Tensor & finput, Tensor & fgrad_input, const Tensor & self, const Tensor & weight, IntList kernel_size, const Tensor & bias, IntList stride, IntList padding, IntList output_padding, IntList dilation) {
|
||||
std::tuple<Tensor &,Tensor &,Tensor &> thnn_conv_transpose3d_forward_out(Tensor & output, Tensor & finput, Tensor & fgrad_input, const Tensor & self, const Tensor & weight, IntArrayRef kernel_size, const Tensor & bias, IntArrayRef stride, IntArrayRef padding, IntArrayRef output_padding, IntArrayRef dilation) {
|
||||
return at::legacy::th::_thnn_conv_transpose3d_forward_out(output, finput, fgrad_input, self, weight, kernel_size, bias, stride, padding, output_padding, dilation);
|
||||
}
|
||||
|
||||
std::tuple<Tensor,Tensor,Tensor> thnn_conv_transpose3d_forward(const Tensor & self, const Tensor & weight, IntList kernel_size, const Tensor & bias, IntList stride, IntList padding, IntList output_padding, IntList dilation) {
|
||||
std::tuple<Tensor,Tensor,Tensor> thnn_conv_transpose3d_forward(const Tensor & self, const Tensor & weight, IntArrayRef kernel_size, const Tensor & bias, IntArrayRef stride, IntArrayRef padding, IntArrayRef output_padding, IntArrayRef dilation) {
|
||||
return at::legacy::th::_thnn_conv_transpose3d_forward(self, weight, kernel_size, bias, stride, padding, output_padding, dilation);
|
||||
}
|
||||
|
||||
std::tuple<Tensor &,Tensor &,Tensor &> thnn_conv_transpose3d_backward_out(Tensor & grad_input, Tensor & grad_weight, Tensor & grad_bias, const Tensor & grad_output, const Tensor & self, const Tensor & weight, IntList kernel_size, IntList stride, IntList padding, IntList output_padding, IntList dilation, const Tensor & finput, const Tensor & fgrad_input) {
|
||||
std::tuple<Tensor &,Tensor &,Tensor &> thnn_conv_transpose3d_backward_out(Tensor & grad_input, Tensor & grad_weight, Tensor & grad_bias, const Tensor & grad_output, const Tensor & self, const Tensor & weight, IntArrayRef kernel_size, IntArrayRef stride, IntArrayRef padding, IntArrayRef output_padding, IntArrayRef dilation, const Tensor & finput, const Tensor & fgrad_input) {
|
||||
return at::legacy::th::_thnn_conv_transpose3d_backward_out(grad_input, grad_weight, grad_bias, grad_output, self, weight, kernel_size, stride, padding, output_padding, dilation, finput, fgrad_input);
|
||||
}
|
||||
|
||||
std::tuple<Tensor,Tensor,Tensor> thnn_conv_transpose3d_backward(const Tensor & grad_output, const Tensor & self, const Tensor & weight, IntList kernel_size, IntList stride, IntList padding, IntList output_padding, IntList dilation, const Tensor & finput, const Tensor & fgrad_input, std::array<bool,3> output_mask) {
|
||||
std::tuple<Tensor,Tensor,Tensor> thnn_conv_transpose3d_backward(const Tensor & grad_output, const Tensor & self, const Tensor & weight, IntArrayRef kernel_size, IntArrayRef stride, IntArrayRef padding, IntArrayRef output_padding, IntArrayRef dilation, const Tensor & finput, const Tensor & fgrad_input, std::array<bool,3> output_mask) {
|
||||
return at::legacy::th::_thnn_conv_transpose3d_backward(grad_output, self, weight, kernel_size, stride, padding, output_padding, dilation, finput, fgrad_input, output_mask);
|
||||
}
|
||||
|
||||
Tensor & thnn_conv2d_out(Tensor & output, const Tensor & self, const Tensor & weight, IntList kernel_size, const Tensor & bias, IntList stride, IntList padding) {
|
||||
Tensor & thnn_conv2d_out(Tensor & output, const Tensor & self, const Tensor & weight, IntArrayRef kernel_size, const Tensor & bias, IntArrayRef stride, IntArrayRef padding) {
|
||||
Tensor finput = at::empty({0}, self.options());
|
||||
Tensor fgrad_input = at::empty({0}, self.options());
|
||||
return std::get<0>(at::thnn_conv2d_forward_out(output, finput, fgrad_input, self, weight, kernel_size, bias, stride, padding));
|
||||
}
|
||||
|
||||
Tensor thnn_conv2d(const Tensor & self, const Tensor & weight, IntList kernel_size, const Tensor & bias, IntList stride, IntList padding) {
|
||||
Tensor thnn_conv2d(const Tensor & self, const Tensor & weight, IntArrayRef kernel_size, const Tensor & bias, IntArrayRef stride, IntArrayRef padding) {
|
||||
return std::get<0>(at::thnn_conv2d_forward(self, weight, kernel_size, bias, stride, padding));
|
||||
}
|
||||
|
||||
std::tuple<Tensor &,Tensor &,Tensor &> thnn_conv2d_forward_out(Tensor & output, Tensor & finput, Tensor & fgrad_input, const Tensor & self, const Tensor & weight, IntList kernel_size, const Tensor & bias, IntList stride, IntList padding) {
|
||||
std::tuple<Tensor &,Tensor &,Tensor &> thnn_conv2d_forward_out(Tensor & output, Tensor & finput, Tensor & fgrad_input, const Tensor & self, const Tensor & weight, IntArrayRef kernel_size, const Tensor & bias, IntArrayRef stride, IntArrayRef padding) {
|
||||
return at::legacy::th::_thnn_conv2d_forward_out(output, finput, fgrad_input, self, weight, kernel_size, bias, stride, padding);
|
||||
}
|
||||
|
||||
std::tuple<Tensor,Tensor,Tensor> thnn_conv2d_forward(const Tensor & self, const Tensor & weight, IntList kernel_size, const Tensor & bias, IntList stride, IntList padding) {
|
||||
std::tuple<Tensor,Tensor,Tensor> thnn_conv2d_forward(const Tensor & self, const Tensor & weight, IntArrayRef kernel_size, const Tensor & bias, IntArrayRef stride, IntArrayRef padding) {
|
||||
return at::legacy::th::_thnn_conv2d_forward(self, weight, kernel_size, bias, stride, padding);
|
||||
}
|
||||
|
||||
std::tuple<Tensor &,Tensor &,Tensor &> thnn_conv2d_backward_out(Tensor & grad_input, Tensor & grad_weight, Tensor & grad_bias, const Tensor & grad_output, const Tensor & self, const Tensor & weight, IntList kernel_size, IntList stride, IntList padding, const Tensor & finput, const Tensor & fgrad_input) {
|
||||
std::tuple<Tensor &,Tensor &,Tensor &> thnn_conv2d_backward_out(Tensor & grad_input, Tensor & grad_weight, Tensor & grad_bias, const Tensor & grad_output, const Tensor & self, const Tensor & weight, IntArrayRef kernel_size, IntArrayRef stride, IntArrayRef padding, const Tensor & finput, const Tensor & fgrad_input) {
|
||||
return at::legacy::th::_thnn_conv2d_backward_out(grad_input, grad_weight, grad_bias, grad_output, self, weight, kernel_size, stride, padding, finput, fgrad_input);
|
||||
}
|
||||
|
||||
std::tuple<Tensor,Tensor,Tensor> thnn_conv2d_backward(const Tensor & grad_output, const Tensor & self, const Tensor & weight, IntList kernel_size, IntList stride, IntList padding, const Tensor & finput, const Tensor & fgrad_input, std::array<bool,3> output_mask) {
|
||||
std::tuple<Tensor,Tensor,Tensor> thnn_conv2d_backward(const Tensor & grad_output, const Tensor & self, const Tensor & weight, IntArrayRef kernel_size, IntArrayRef stride, IntArrayRef padding, const Tensor & finput, const Tensor & fgrad_input, std::array<bool,3> output_mask) {
|
||||
return at::legacy::th::_thnn_conv2d_backward(grad_output, self, weight, kernel_size, stride, padding, finput, fgrad_input, output_mask);
|
||||
}
|
||||
|
||||
Tensor & thnn_conv3d_out(Tensor & output, const Tensor & self, const Tensor & weight, IntList kernel_size, const Tensor & bias, IntList stride, IntList padding) {
|
||||
Tensor & thnn_conv3d_out(Tensor & output, const Tensor & self, const Tensor & weight, IntArrayRef kernel_size, const Tensor & bias, IntArrayRef stride, IntArrayRef padding) {
|
||||
Tensor finput = at::empty({0}, self.options());
|
||||
Tensor fgrad_input = at::empty({0}, self.options());
|
||||
return std::get<0>(at::thnn_conv3d_forward_out(output, finput, fgrad_input, self, weight, kernel_size, bias, stride, padding));
|
||||
}
|
||||
|
||||
Tensor & thnn_conv_depthwise2d_out(Tensor & output, const Tensor & self, const Tensor & weight, IntList kernel_size, const Tensor & bias, IntList stride, IntList padding, IntList dilation) {
|
||||
Tensor & thnn_conv_depthwise2d_out(Tensor & output, const Tensor & self, const Tensor & weight, IntArrayRef kernel_size, const Tensor & bias, IntArrayRef stride, IntArrayRef padding, IntArrayRef dilation) {
|
||||
return at::thnn_conv_depthwise2d_forward_out(output, self, weight, kernel_size, bias, stride, padding, dilation);
|
||||
}
|
||||
|
||||
Tensor thnn_conv_depthwise2d(const Tensor & self, const Tensor & weight, IntList kernel_size, const Tensor & bias, IntList stride, IntList padding, IntList dilation) {
|
||||
Tensor thnn_conv_depthwise2d(const Tensor & self, const Tensor & weight, IntArrayRef kernel_size, const Tensor & bias, IntArrayRef stride, IntArrayRef padding, IntArrayRef dilation) {
|
||||
return at::thnn_conv_depthwise2d_forward(self, weight, kernel_size, bias, stride, padding, dilation);
|
||||
}
|
||||
|
||||
Tensor & thnn_conv_depthwise2d_forward_out(Tensor & output, const Tensor & self, const Tensor & weight, IntList kernel_size, const Tensor & bias, IntList stride, IntList padding, IntList dilation) {
|
||||
Tensor & thnn_conv_depthwise2d_forward_out(Tensor & output, const Tensor & self, const Tensor & weight, IntArrayRef kernel_size, const Tensor & bias, IntArrayRef stride, IntArrayRef padding, IntArrayRef dilation) {
|
||||
return at::legacy::th::_thnn_conv_depthwise2d_forward_out(output, self, weight, kernel_size, bias, stride, padding, dilation);
|
||||
}
|
||||
|
||||
Tensor thnn_conv_depthwise2d_forward(const Tensor & self, const Tensor & weight, IntList kernel_size, const Tensor & bias, IntList stride, IntList padding, IntList dilation) {
|
||||
Tensor thnn_conv_depthwise2d_forward(const Tensor & self, const Tensor & weight, IntArrayRef kernel_size, const Tensor & bias, IntArrayRef stride, IntArrayRef padding, IntArrayRef dilation) {
|
||||
return at::legacy::th::_thnn_conv_depthwise2d_forward(self, weight, kernel_size, bias, stride, padding, dilation);
|
||||
}
|
||||
|
||||
std::tuple<Tensor &,Tensor &> thnn_conv_depthwise2d_backward_out(Tensor & grad_input, Tensor & grad_weight, const Tensor & grad_output, const Tensor & self, const Tensor & weight, IntList kernel_size, IntList stride, IntList padding, IntList dilation) {
|
||||
std::tuple<Tensor &,Tensor &> thnn_conv_depthwise2d_backward_out(Tensor & grad_input, Tensor & grad_weight, const Tensor & grad_output, const Tensor & self, const Tensor & weight, IntArrayRef kernel_size, IntArrayRef stride, IntArrayRef padding, IntArrayRef dilation) {
|
||||
return at::legacy::th::_thnn_conv_depthwise2d_backward_out(grad_input, grad_weight, grad_output, self, weight, kernel_size, stride, padding, dilation);
|
||||
}
|
||||
|
||||
std::tuple<Tensor,Tensor> thnn_conv_depthwise2d_backward(const Tensor & grad_output, const Tensor & self, const Tensor & weight, IntList kernel_size, IntList stride, IntList padding, IntList dilation, std::array<bool,2> output_mask) {
|
||||
std::tuple<Tensor,Tensor> thnn_conv_depthwise2d_backward(const Tensor & grad_output, const Tensor & self, const Tensor & weight, IntArrayRef kernel_size, IntArrayRef stride, IntArrayRef padding, IntArrayRef dilation, std::array<bool,2> output_mask) {
|
||||
return at::legacy::th::_thnn_conv_depthwise2d_backward(grad_output, self, weight, kernel_size, stride, padding, dilation, output_mask);
|
||||
}
|
||||
|
||||
Tensor thnn_conv3d(const Tensor & self, const Tensor & weight, IntList kernel_size, const Tensor & bias, IntList stride, IntList padding) {
|
||||
Tensor thnn_conv3d(const Tensor & self, const Tensor & weight, IntArrayRef kernel_size, const Tensor & bias, IntArrayRef stride, IntArrayRef padding) {
|
||||
return std::get<0>(at::thnn_conv3d_forward(self, weight, kernel_size, bias, stride, padding));
|
||||
}
|
||||
|
||||
std::tuple<Tensor &,Tensor &,Tensor &> thnn_conv3d_forward_out(Tensor & output, Tensor & finput, Tensor & fgrad_input, const Tensor & self, const Tensor & weight, IntList kernel_size, const Tensor & bias, IntList stride, IntList padding) {
|
||||
std::tuple<Tensor &,Tensor &,Tensor &> thnn_conv3d_forward_out(Tensor & output, Tensor & finput, Tensor & fgrad_input, const Tensor & self, const Tensor & weight, IntArrayRef kernel_size, const Tensor & bias, IntArrayRef stride, IntArrayRef padding) {
|
||||
return at::legacy::th::_thnn_conv3d_forward_out(output, finput, fgrad_input, self, weight, kernel_size, bias, stride, padding);
|
||||
}
|
||||
|
||||
std::tuple<Tensor,Tensor,Tensor> thnn_conv3d_forward(const Tensor & self, const Tensor & weight, IntList kernel_size, const Tensor & bias, IntList stride, IntList padding) {
|
||||
std::tuple<Tensor,Tensor,Tensor> thnn_conv3d_forward(const Tensor & self, const Tensor & weight, IntArrayRef kernel_size, const Tensor & bias, IntArrayRef stride, IntArrayRef padding) {
|
||||
return at::legacy::th::_thnn_conv3d_forward(self, weight, kernel_size, bias, stride, padding);
|
||||
}
|
||||
|
||||
std::tuple<Tensor &,Tensor &,Tensor &> thnn_conv3d_backward_out(Tensor & grad_input, Tensor & grad_weight, Tensor & grad_bias, const Tensor & grad_output, const Tensor & self, const Tensor & weight, IntList kernel_size, IntList stride, IntList padding, const Tensor & finput, const Tensor & fgrad_input) {
|
||||
std::tuple<Tensor &,Tensor &,Tensor &> thnn_conv3d_backward_out(Tensor & grad_input, Tensor & grad_weight, Tensor & grad_bias, const Tensor & grad_output, const Tensor & self, const Tensor & weight, IntArrayRef kernel_size, IntArrayRef stride, IntArrayRef padding, const Tensor & finput, const Tensor & fgrad_input) {
|
||||
return at::legacy::th::_thnn_conv3d_backward_out(grad_input, grad_weight, grad_bias, grad_output, self, weight, kernel_size, stride, padding, finput, fgrad_input);
|
||||
}
|
||||
|
||||
std::tuple<Tensor,Tensor,Tensor> thnn_conv3d_backward(const Tensor & grad_output, const Tensor & self, const Tensor & weight, IntList kernel_size, IntList stride, IntList padding, const Tensor & finput, const Tensor & fgrad_input, std::array<bool,3> output_mask) {
|
||||
std::tuple<Tensor,Tensor,Tensor> thnn_conv3d_backward(const Tensor & grad_output, const Tensor & self, const Tensor & weight, IntArrayRef kernel_size, IntArrayRef stride, IntArrayRef padding, const Tensor & finput, const Tensor & fgrad_input, std::array<bool,3> output_mask) {
|
||||
return at::legacy::th::_thnn_conv3d_backward(grad_output, self, weight, kernel_size, stride, padding, finput, fgrad_input, output_mask);
|
||||
}
|
||||
|
||||
Tensor & thnn_conv_dilated2d_out(Tensor & output, const Tensor & self, const Tensor & weight, IntList kernel_size, const Tensor & bias, IntList stride, IntList padding, IntList dilation) {
|
||||
Tensor & thnn_conv_dilated2d_out(Tensor & output, const Tensor & self, const Tensor & weight, IntArrayRef kernel_size, const Tensor & bias, IntArrayRef stride, IntArrayRef padding, IntArrayRef dilation) {
|
||||
Tensor columns = at::empty({0}, self.options());
|
||||
Tensor ones = at::empty({0}, self.options());
|
||||
return std::get<0>(at::thnn_conv_dilated2d_forward_out(output, columns, ones, self, weight, kernel_size, bias, stride, padding, dilation));
|
||||
}
|
||||
|
||||
Tensor thnn_conv_dilated2d(const Tensor & self, const Tensor & weight, IntList kernel_size, const Tensor & bias, IntList stride, IntList padding, IntList dilation) {
|
||||
Tensor thnn_conv_dilated2d(const Tensor & self, const Tensor & weight, IntArrayRef kernel_size, const Tensor & bias, IntArrayRef stride, IntArrayRef padding, IntArrayRef dilation) {
|
||||
return std::get<0>(at::thnn_conv_dilated2d_forward(self, weight, kernel_size, bias, stride, padding, dilation));
|
||||
}
|
||||
|
||||
std::tuple<Tensor &,Tensor &,Tensor &> thnn_conv_dilated2d_forward_out(Tensor & output, Tensor & columns, Tensor & ones, const Tensor & self, const Tensor & weight, IntList kernel_size, const Tensor & bias, IntList stride, IntList padding, IntList dilation) {
|
||||
std::tuple<Tensor &,Tensor &,Tensor &> thnn_conv_dilated2d_forward_out(Tensor & output, Tensor & columns, Tensor & ones, const Tensor & self, const Tensor & weight, IntArrayRef kernel_size, const Tensor & bias, IntArrayRef stride, IntArrayRef padding, IntArrayRef dilation) {
|
||||
return at::legacy::th::_thnn_conv_dilated2d_forward_out(output, columns, ones, self, weight, kernel_size, bias, stride, padding, dilation);
|
||||
}
|
||||
|
||||
std::tuple<Tensor,Tensor,Tensor> thnn_conv_dilated2d_forward(const Tensor & self, const Tensor & weight, IntList kernel_size, const Tensor & bias, IntList stride, IntList padding, IntList dilation) {
|
||||
std::tuple<Tensor,Tensor,Tensor> thnn_conv_dilated2d_forward(const Tensor & self, const Tensor & weight, IntArrayRef kernel_size, const Tensor & bias, IntArrayRef stride, IntArrayRef padding, IntArrayRef dilation) {
|
||||
return at::legacy::th::_thnn_conv_dilated2d_forward(self, weight, kernel_size, bias, stride, padding, dilation);
|
||||
}
|
||||
|
||||
std::tuple<Tensor &,Tensor &,Tensor &> thnn_conv_dilated2d_backward_out(Tensor & grad_input, Tensor & grad_weight, Tensor & grad_bias, const Tensor & grad_output, const Tensor & self, const Tensor & weight, IntList kernel_size, IntList stride, IntList padding, IntList dilation, const Tensor & columns, const Tensor & ones) {
|
||||
std::tuple<Tensor &,Tensor &,Tensor &> thnn_conv_dilated2d_backward_out(Tensor & grad_input, Tensor & grad_weight, Tensor & grad_bias, const Tensor & grad_output, const Tensor & self, const Tensor & weight, IntArrayRef kernel_size, IntArrayRef stride, IntArrayRef padding, IntArrayRef dilation, const Tensor & columns, const Tensor & ones) {
|
||||
return at::legacy::th::_thnn_conv_dilated2d_backward_out(grad_input, grad_weight, grad_bias, grad_output, self, weight, kernel_size, stride, padding, dilation, columns, ones);
|
||||
}
|
||||
|
||||
std::tuple<Tensor,Tensor,Tensor> thnn_conv_dilated2d_backward(const Tensor & grad_output, const Tensor & self, const Tensor & weight, IntList kernel_size, IntList stride, IntList padding, IntList dilation, const Tensor & columns, const Tensor & ones, std::array<bool,3> output_mask) {
|
||||
std::tuple<Tensor,Tensor,Tensor> thnn_conv_dilated2d_backward(const Tensor & grad_output, const Tensor & self, const Tensor & weight, IntArrayRef kernel_size, IntArrayRef stride, IntArrayRef padding, IntArrayRef dilation, const Tensor & columns, const Tensor & ones, std::array<bool,3> output_mask) {
|
||||
return at::legacy::th::_thnn_conv_dilated2d_backward(grad_output, self, weight, kernel_size, stride, padding, dilation, columns, ones, output_mask);
|
||||
}
|
||||
|
||||
Tensor & thnn_conv_dilated3d_out(Tensor & output, const Tensor & self, const Tensor & weight, IntList kernel_size, const Tensor & bias, IntList stride, IntList padding, IntList dilation) {
|
||||
Tensor & thnn_conv_dilated3d_out(Tensor & output, const Tensor & self, const Tensor & weight, IntArrayRef kernel_size, const Tensor & bias, IntArrayRef stride, IntArrayRef padding, IntArrayRef dilation) {
|
||||
Tensor columns = at::empty({0}, self.options());
|
||||
Tensor ones = at::empty({0}, self.options());
|
||||
return std::get<0>(at::thnn_conv_dilated3d_forward_out(output, columns, ones, self, weight, kernel_size, bias, stride, padding, dilation));
|
||||
}
|
||||
|
||||
Tensor thnn_conv_dilated3d(const Tensor & self, const Tensor & weight, IntList kernel_size, const Tensor & bias, IntList stride, IntList padding, IntList dilation) {
|
||||
Tensor thnn_conv_dilated3d(const Tensor & self, const Tensor & weight, IntArrayRef kernel_size, const Tensor & bias, IntArrayRef stride, IntArrayRef padding, IntArrayRef dilation) {
|
||||
return std::get<0>(at::thnn_conv_dilated3d_forward(self, weight, kernel_size, bias, stride, padding, dilation));
|
||||
}
|
||||
|
||||
std::tuple<Tensor &,Tensor &,Tensor &> thnn_conv_dilated3d_forward_out(Tensor & output, Tensor & columns, Tensor & ones, const Tensor & self, const Tensor & weight, IntList kernel_size, const Tensor & bias, IntList stride, IntList padding, IntList dilation) {
|
||||
std::tuple<Tensor &,Tensor &,Tensor &> thnn_conv_dilated3d_forward_out(Tensor & output, Tensor & columns, Tensor & ones, const Tensor & self, const Tensor & weight, IntArrayRef kernel_size, const Tensor & bias, IntArrayRef stride, IntArrayRef padding, IntArrayRef dilation) {
|
||||
return at::legacy::th::_thnn_conv_dilated3d_forward_out(output, columns, ones, self, weight, kernel_size, bias, stride, padding, dilation);
|
||||
}
|
||||
|
||||
std::tuple<Tensor,Tensor,Tensor> thnn_conv_dilated3d_forward(const Tensor & self, const Tensor & weight, IntList kernel_size, const Tensor & bias, IntList stride, IntList padding, IntList dilation) {
|
||||
std::tuple<Tensor,Tensor,Tensor> thnn_conv_dilated3d_forward(const Tensor & self, const Tensor & weight, IntArrayRef kernel_size, const Tensor & bias, IntArrayRef stride, IntArrayRef padding, IntArrayRef dilation) {
|
||||
return at::legacy::th::_thnn_conv_dilated3d_forward(self, weight, kernel_size, bias, stride, padding, dilation);
|
||||
}
|
||||
|
||||
std::tuple<Tensor &,Tensor &,Tensor &> thnn_conv_dilated3d_backward_out(Tensor & grad_input, Tensor & grad_weight, Tensor & grad_bias, const Tensor & grad_output, const Tensor & self, const Tensor & weight, IntList kernel_size, IntList stride, IntList padding, IntList dilation, const Tensor & columns, const Tensor & ones) {
|
||||
std::tuple<Tensor &,Tensor &,Tensor &> thnn_conv_dilated3d_backward_out(Tensor & grad_input, Tensor & grad_weight, Tensor & grad_bias, const Tensor & grad_output, const Tensor & self, const Tensor & weight, IntArrayRef kernel_size, IntArrayRef stride, IntArrayRef padding, IntArrayRef dilation, const Tensor & columns, const Tensor & ones) {
|
||||
return at::legacy::th::_thnn_conv_dilated3d_backward_out(grad_input, grad_weight, grad_bias, grad_output, self, weight, kernel_size, stride, padding, dilation, columns, ones);
|
||||
}
|
||||
|
||||
std::tuple<Tensor,Tensor,Tensor> thnn_conv_dilated3d_backward(const Tensor & grad_output, const Tensor & self, const Tensor & weight, IntList kernel_size, IntList stride, IntList padding, IntList dilation, const Tensor & columns, const Tensor & ones, std::array<bool,3> output_mask) {
|
||||
std::tuple<Tensor,Tensor,Tensor> thnn_conv_dilated3d_backward(const Tensor & grad_output, const Tensor & self, const Tensor & weight, IntArrayRef kernel_size, IntArrayRef stride, IntArrayRef padding, IntArrayRef dilation, const Tensor & columns, const Tensor & ones, std::array<bool,3> output_mask) {
|
||||
return at::legacy::th::_thnn_conv_dilated3d_backward(grad_output, self, weight, kernel_size, stride, padding, dilation, columns, ones, output_mask);
|
||||
}
|
||||
|
||||
Tensor thnn_col2im(const Tensor & self, IntList output_size, IntList kernel_size, IntList dilation, IntList padding, IntList stride) {
|
||||
Tensor thnn_col2im(const Tensor & self, IntArrayRef output_size, IntArrayRef kernel_size, IntArrayRef dilation, IntArrayRef padding, IntArrayRef stride) {
|
||||
return at::legacy::th::_thnn_col2im_forward(self, output_size, kernel_size, dilation, padding, stride);
|
||||
}
|
||||
|
||||
Tensor & thnn_col2im_out(Tensor & output, const Tensor & self, IntList output_size, IntList kernel_size, IntList dilation, IntList padding, IntList stride) {
|
||||
Tensor & thnn_col2im_out(Tensor & output, const Tensor & self, IntArrayRef output_size, IntArrayRef kernel_size, IntArrayRef dilation, IntArrayRef padding, IntArrayRef stride) {
|
||||
return at::legacy::th::_thnn_col2im_forward_out(output, self, output_size, kernel_size, dilation, padding, stride);
|
||||
}
|
||||
|
||||
Tensor thnn_col2im_backward(const Tensor & grad_output, IntList kernel_size, IntList dilation, IntList padding, IntList stride) {
|
||||
Tensor thnn_col2im_backward(const Tensor & grad_output, IntArrayRef kernel_size, IntArrayRef dilation, IntArrayRef padding, IntArrayRef stride) {
|
||||
return at::legacy::th::_thnn_col2im_backward(grad_output, kernel_size, dilation, padding, stride);
|
||||
}
|
||||
|
||||
Tensor & thnn_col2im_backward_out(Tensor & grad_input, const Tensor & grad_output, IntList kernel_size, IntList dilation, IntList padding, IntList stride) {
|
||||
Tensor & thnn_col2im_backward_out(Tensor & grad_input, const Tensor & grad_output, IntArrayRef kernel_size, IntArrayRef dilation, IntArrayRef padding, IntArrayRef stride) {
|
||||
return at::legacy::th::_thnn_col2im_backward_out(grad_input, grad_output, kernel_size, dilation, padding, stride);
|
||||
}
|
||||
|
||||
Tensor thnn_im2col(const Tensor & self, IntList kernel_size, IntList dilation, IntList padding, IntList stride) {
|
||||
Tensor thnn_im2col(const Tensor & self, IntArrayRef kernel_size, IntArrayRef dilation, IntArrayRef padding, IntArrayRef stride) {
|
||||
return at::legacy::th::_thnn_im2col_forward(self, kernel_size, dilation, padding, stride);
|
||||
}
|
||||
|
||||
Tensor & thnn_im2col_out(Tensor & output, const Tensor & self, IntList kernel_size, IntList dilation, IntList padding, IntList stride) {
|
||||
Tensor & thnn_im2col_out(Tensor & output, const Tensor & self, IntArrayRef kernel_size, IntArrayRef dilation, IntArrayRef padding, IntArrayRef stride) {
|
||||
return at::legacy::th::_thnn_im2col_forward_out(output, self, kernel_size, dilation, padding, stride);
|
||||
}
|
||||
|
||||
Tensor thnn_im2col_backward(const Tensor & grad_output, IntList input_size, IntList kernel_size, IntList dilation, IntList padding, IntList stride) {
|
||||
Tensor thnn_im2col_backward(const Tensor & grad_output, IntArrayRef input_size, IntArrayRef kernel_size, IntArrayRef dilation, IntArrayRef padding, IntArrayRef stride) {
|
||||
return at::legacy::th::_thnn_im2col_backward(grad_output, input_size, kernel_size, dilation, padding, stride);
|
||||
}
|
||||
|
||||
Tensor & thnn_im2col_backward_out(Tensor & grad_input, const Tensor & grad_output, IntList input_size, IntList kernel_size, IntList dilation, IntList padding, IntList stride) {
|
||||
Tensor & thnn_im2col_backward_out(Tensor & grad_input, const Tensor & grad_output, IntArrayRef input_size, IntArrayRef kernel_size, IntArrayRef dilation, IntArrayRef padding, IntArrayRef stride) {
|
||||
return at::legacy::th::_thnn_im2col_backward_out(grad_input, grad_output, input_size, kernel_size, dilation, padding, stride);
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -26,7 +26,7 @@ Tensor linear(const Tensor& input, const Tensor& weight, const Tensor& bias) {
|
|||
// sumproduct_pair computes `(left*right).sum(sumdims)` by means of permutation and
|
||||
// batch matrix multiplication
|
||||
// its main purpose is to provide a pairwise reduction for einsum
|
||||
static Tensor sumproduct_pair(const Tensor& left_, const Tensor& right_, IntList sum_dims_, bool keepdim) {
|
||||
static Tensor sumproduct_pair(const Tensor& left_, const Tensor& right_, IntArrayRef sum_dims_, bool keepdim) {
|
||||
// assumes that tensors have been pre-unsqueezed (so that all dimensions match - after broadcasting)
|
||||
// but makes no other assumptions on the order of dimensions
|
||||
AT_CHECK(left_.dim()==right_.dim(), "number of dimensions must match");
|
||||
|
|
@ -360,8 +360,8 @@ Tensor einsum(std::string eqn, TensorList tensors) {
|
|||
// the computation is unrolled in the unroll_dim dimension
|
||||
// its main purpose is to unify the computations in bilinear and bilinear_backward
|
||||
Tensor _trilinear(const Tensor& i1_, const Tensor& i2_, const Tensor& i3_,
|
||||
IntList expand1_, IntList expand2_, IntList expand3_,
|
||||
IntList sumdim_, int64_t unroll_dim) {
|
||||
IntArrayRef expand1_, IntArrayRef expand2_, IntArrayRef expand3_,
|
||||
IntArrayRef sumdim_, int64_t unroll_dim) {
|
||||
int64_t total_dim = i1_.dim()+expand1_.size();
|
||||
AT_CHECK((unroll_dim >= 0) && (unroll_dim < total_dim), "unroll_dim must be in [0,", total_dim-1, "]");
|
||||
auto expand1 = at::dim_list_to_bitset(expand1_, total_dim);
|
||||
|
|
@ -459,7 +459,7 @@ Tensor bilinear(const Tensor& input1, const Tensor& input2, const Tensor& weight
|
|||
|
||||
// implements tensordot, a matrix-multiplication-like contraction, but the dimensions given
|
||||
// in the two dimension lists
|
||||
Tensor tensordot(const Tensor& input1, const Tensor& input2, IntList dims1, IntList dims2) {
|
||||
Tensor tensordot(const Tensor& input1, const Tensor& input2, IntArrayRef dims1, IntArrayRef dims2) {
|
||||
AT_CHECK(dims1.size() == dims2.size(), "both dimension lists should have same length");
|
||||
int64_t csize = 1; // total size of the contracted dimensions
|
||||
Tensor t1 = input1;
|
||||
|
|
|
|||
|
|
@ -428,10 +428,10 @@ Tensor matmul(
|
|||
// we track m1 vs m2 separately even though they must match for nicer error messages
|
||||
int64_t n = dim_tensor1 > 1 ? tensor1.size(-2) : 1;
|
||||
int64_t m1 = tensor1.size(-1);
|
||||
IntList batch_tensor1(tensor1.sizes().data(), std::max<int64_t>(dim_tensor1 - 2, 0));
|
||||
IntArrayRef batch_tensor1(tensor1.sizes().data(), std::max<int64_t>(dim_tensor1 - 2, 0));
|
||||
int64_t m2 = dim_tensor2 > 1 ? tensor2.size(-2) : 1;
|
||||
int64_t p = tensor2.size(-1);
|
||||
IntList batch_tensor2(tensor2.sizes().data(), std::max<int64_t>(dim_tensor2 - 2, 0));
|
||||
IntArrayRef batch_tensor2(tensor2.sizes().data(), std::max<int64_t>(dim_tensor2 - 2, 0));
|
||||
|
||||
// expand the batch portion (i.e. cut off matrix dimensions and expand rest)
|
||||
std::vector<int64_t> expand_batch_portion = infer_size(batch_tensor1, batch_tensor2);
|
||||
|
|
@ -525,7 +525,7 @@ Tensor frobenius_norm(const Tensor& self) {
|
|||
return at::norm(self);
|
||||
}
|
||||
|
||||
Tensor frobenius_norm(const Tensor& self, IntList dim, bool keepdim) {
|
||||
Tensor frobenius_norm(const Tensor& self, IntArrayRef dim, bool keepdim) {
|
||||
AT_CHECK(
|
||||
dim.size() <= 2,
|
||||
"Expected at most 2 dimensions, but got ",
|
||||
|
|
@ -540,7 +540,7 @@ Tensor frobenius_norm(const Tensor& self, IntList dim, bool keepdim) {
|
|||
Tensor &frobenius_norm_out(
|
||||
Tensor& result,
|
||||
const Tensor& self,
|
||||
IntList dim,
|
||||
IntArrayRef dim,
|
||||
bool keepdim) {
|
||||
AT_CHECK(
|
||||
dim.size() <= 2,
|
||||
|
|
|
|||
|
|
@ -133,8 +133,8 @@ static inline std::tuple<Tensor,Tensor> _linear_solve_broadcast_args(const Tenso
|
|||
linearSolveCheckInputs(arg1, arg2);
|
||||
|
||||
// broadcast the batch dimensions of arg1 and arg2.
|
||||
IntList arg1_batch_sizes(arg1.sizes().data(), arg1.ndimension() - 2);
|
||||
IntList arg2_batch_sizes(arg2.sizes().data(), arg2.ndimension() - 2);
|
||||
IntArrayRef arg1_batch_sizes(arg1.sizes().data(), arg1.ndimension() - 2);
|
||||
IntArrayRef arg2_batch_sizes(arg2.sizes().data(), arg2.ndimension() - 2);
|
||||
std::vector<int64_t> expand_batch_portion = infer_size(arg1_batch_sizes, arg2_batch_sizes);
|
||||
|
||||
std::vector<int64_t> arg1_expand_size({expand_batch_portion});
|
||||
|
|
|
|||
|
|
@ -33,7 +33,7 @@ static inline int64_t get_target_prime(target_t* target, int64_t offset, int64_t
|
|||
// The function returns the loss and the alphas, the alphas are kept for the backward step. The wrapper (ctc_loss below) hides
|
||||
// the alphas from the user by only returning the loss.
|
||||
template<typename scalar_t, ScalarType target_scalar_type>
|
||||
std::tuple<Tensor, Tensor> ctc_loss_cpu_template(const Tensor& log_probs, const Tensor& targets, IntList input_lengths, IntList target_lengths, int64_t BLANK) {
|
||||
std::tuple<Tensor, Tensor> ctc_loss_cpu_template(const Tensor& log_probs, const Tensor& targets, IntArrayRef input_lengths, IntArrayRef target_lengths, int64_t BLANK) {
|
||||
// log_probs: input_len x batch_size x num_labels
|
||||
// targets [int64]: batch_size x target_length OR sum(target_lengths)
|
||||
constexpr scalar_t neginf = -std::numeric_limits<scalar_t>::infinity();
|
||||
|
|
@ -161,7 +161,7 @@ std::tuple<Tensor, Tensor> ctc_loss_cpu_template(const Tensor& log_probs, const
|
|||
// a) computing the beta analogous to the alphas in the forward (backward half of the forward-backward algorithm) (eq (10) and (11))
|
||||
// b) collecting the per-activation characters for all s and wrapping the gradient (eq (16), the collection is the sum)
|
||||
template<typename scalar_t, ScalarType target_scalar_type>
|
||||
Tensor ctc_loss_backward_cpu_template(const Tensor& grad_out, const Tensor& log_probs, const Tensor& targets, IntList input_lengths, IntList target_lengths,
|
||||
Tensor ctc_loss_backward_cpu_template(const Tensor& grad_out, const Tensor& log_probs, const Tensor& targets, IntArrayRef input_lengths, IntArrayRef target_lengths,
|
||||
const Tensor& neg_log_likelihood, const Tensor& log_alpha, int64_t BLANK) {
|
||||
constexpr scalar_t neginf = -std::numeric_limits<scalar_t>::infinity();
|
||||
using target_t = typename std::conditional<target_scalar_type == kInt, int, int64_t>::type;
|
||||
|
|
@ -300,7 +300,7 @@ Tensor ctc_loss_backward_cpu_template(const Tensor& grad_out, const Tensor& log_
|
|||
|
||||
} // namespace
|
||||
|
||||
std::tuple<Tensor, Tensor> ctc_loss_cpu(const Tensor& log_probs, const Tensor& targets, IntList input_lengths, IntList target_lengths, int64_t BLANK) {
|
||||
std::tuple<Tensor, Tensor> ctc_loss_cpu(const Tensor& log_probs, const Tensor& targets, IntArrayRef input_lengths, IntArrayRef target_lengths, int64_t BLANK) {
|
||||
return AT_DISPATCH_FLOATING_TYPES(log_probs.type(), "ctc_loss", [&] {
|
||||
if (targets.type().scalarType() == kLong) {
|
||||
return ctc_loss_cpu_template<scalar_t, kLong>(log_probs, targets, input_lengths, target_lengths, BLANK);
|
||||
|
|
@ -310,7 +310,7 @@ std::tuple<Tensor, Tensor> ctc_loss_cpu(const Tensor& log_probs, const Tensor& t
|
|||
});
|
||||
}
|
||||
|
||||
Tensor ctc_loss_backward_cpu(const Tensor& grad, const Tensor& log_probs, const Tensor& targets, IntList input_lengths, IntList target_lengths,
|
||||
Tensor ctc_loss_backward_cpu(const Tensor& grad, const Tensor& log_probs, const Tensor& targets, IntArrayRef input_lengths, IntArrayRef target_lengths,
|
||||
const Tensor& neg_log_likelihood, const Tensor& log_alpha, int64_t BLANK) {
|
||||
return AT_DISPATCH_FLOATING_TYPES(log_probs.type(), "ctc_loss_backward", [&] {
|
||||
if (targets.type().scalarType() == kLong) {
|
||||
|
|
@ -324,7 +324,7 @@ Tensor ctc_loss_backward_cpu(const Tensor& grad, const Tensor& log_probs, const
|
|||
// this wrapper function dispatches to the native and cudnn implementations and hides the alpha/grad from the user (by just returning the loss)
|
||||
// the gradient is implemented for _cudnn_ctc_loss (just in derivatives.yaml) and _ctc_loss and this function has automatic gradients
|
||||
// it also handles the reduction if desired
|
||||
Tensor ctc_loss(const Tensor& log_probs, const Tensor& targets, IntList input_lengths, IntList target_lengths, int64_t BLANK, int64_t reduction) {
|
||||
Tensor ctc_loss(const Tensor& log_probs, const Tensor& targets, IntArrayRef input_lengths, IntArrayRef target_lengths, int64_t BLANK, int64_t reduction) {
|
||||
auto& ctx = at::globalContext();
|
||||
|
||||
bool use_cudnn =
|
||||
|
|
@ -369,8 +369,8 @@ Tensor ctc_loss(const Tensor& log_probs, const Tensor& targets, const Tensor& in
|
|||
|
||||
Tensor ilc = input_lengths.toType(kLong).toBackend(Backend::CPU).contiguous();
|
||||
Tensor tlc = target_lengths.toType(kLong).toBackend(Backend::CPU).contiguous();
|
||||
IntList il(ilc.data<int64_t>(), ilc.numel());
|
||||
IntList tl(tlc.data<int64_t>(), tlc.numel());
|
||||
IntArrayRef il(ilc.data<int64_t>(), ilc.numel());
|
||||
IntArrayRef tl(tlc.data<int64_t>(), tlc.numel());
|
||||
return at::native::ctc_loss(log_probs, targets, il, tl, BLANK, reduction);
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -10,7 +10,7 @@ at::Tensor _nnpack_spatial_convolution(
|
|||
const at::Tensor& input,
|
||||
const at::Tensor& weight,
|
||||
const at::Tensor& bias,
|
||||
IntList padding) {
|
||||
IntArrayRef padding) {
|
||||
throw std::runtime_error(
|
||||
"nnpack_spatial_convolution: ATen not compiled with NNPACK support");
|
||||
}
|
||||
|
|
@ -19,16 +19,16 @@ at::Tensor _nnpack_spatial_convolution_backward_input(
|
|||
const at::Tensor& input,
|
||||
const at::Tensor& gradOutput,
|
||||
const at::Tensor& weight,
|
||||
IntList padding) {
|
||||
IntArrayRef padding) {
|
||||
throw std::runtime_error(
|
||||
"nnpack_spatial_convolution_backward_input: ATen not compiled with NNPACK support");
|
||||
}
|
||||
|
||||
at::Tensor _nnpack_spatial_convolution_backward_weight(
|
||||
const at::Tensor& input,
|
||||
at::IntList weight_size,
|
||||
at::IntArrayRef weight_size,
|
||||
const at::Tensor& gradOutput,
|
||||
IntList padding) {
|
||||
IntArrayRef padding) {
|
||||
throw std::runtime_error(
|
||||
"nnpack_spatial_convolution_backward_weight: ATen not compiled with NNPACK support");
|
||||
}
|
||||
|
|
@ -38,7 +38,7 @@ _nnpack_spatial_convolution_backward(
|
|||
const at::Tensor& input,
|
||||
const at::Tensor& gradOutput,
|
||||
const at::Tensor& weight,
|
||||
IntList padding,
|
||||
IntArrayRef padding,
|
||||
std::array<bool, 3> output_mask) {
|
||||
throw std::runtime_error(
|
||||
"_nnpack_spatial_convolution_backward: ATen not compiled with NNPACK support");
|
||||
|
|
@ -145,9 +145,9 @@ constexpr int weight_width_dim = 3;
|
|||
constexpr int max_dim = 3;
|
||||
|
||||
std::vector<int64_t> conv_output_size(
|
||||
IntList input_size,
|
||||
IntList weight_size,
|
||||
IntList padding) {
|
||||
IntArrayRef input_size,
|
||||
IntArrayRef weight_size,
|
||||
IntArrayRef padding) {
|
||||
auto dim = input_size.size();
|
||||
std::vector<int64_t> output_size(dim);
|
||||
output_size[output_batch_size_dim] = input_size[input_batch_size_dim];
|
||||
|
|
@ -163,7 +163,7 @@ Tensor _nnpack_spatial_convolution(
|
|||
const at::Tensor& input,
|
||||
const at::Tensor& weight,
|
||||
const at::Tensor& bias,
|
||||
IntList padding) {
|
||||
IntArrayRef padding) {
|
||||
at::Tensor output = at::empty(
|
||||
conv_output_size(input.sizes(), weight.sizes(), padding),
|
||||
input.options());
|
||||
|
|
@ -325,7 +325,7 @@ Tensor _nnpack_spatial_convolution_backward_input(
|
|||
const at::Tensor& input,
|
||||
const at::Tensor& gradOutput,
|
||||
const at::Tensor& weight,
|
||||
IntList padding) {
|
||||
IntArrayRef padding) {
|
||||
at::Tensor gradInput = at::empty(input.sizes(), input.options());
|
||||
|
||||
// Our input and gradInput Tensors must be in the form N,C,H,W
|
||||
|
|
@ -453,9 +453,9 @@ Tensor _nnpack_spatial_convolution_backward_input(
|
|||
|
||||
Tensor _nnpack_spatial_convolution_backward_weight(
|
||||
const at::Tensor& input,
|
||||
IntList weight_size,
|
||||
IntArrayRef weight_size,
|
||||
const at::Tensor& gradOutput,
|
||||
IntList padding) {
|
||||
IntArrayRef padding) {
|
||||
at::Tensor gradWeight = at::empty(weight_size, input.options());
|
||||
|
||||
// Our input and gradInput Tensors must be in the form N,C,H,W
|
||||
|
|
@ -574,7 +574,7 @@ std::tuple<Tensor, Tensor, Tensor> _nnpack_spatial_convolution_backward(
|
|||
const at::Tensor& input,
|
||||
const at::Tensor& grad_output,
|
||||
const at::Tensor& weight,
|
||||
IntList padding,
|
||||
IntArrayRef padding,
|
||||
std::array<bool, 3> output_mask) {
|
||||
Tensor grad_input, grad_weight, grad_bias;
|
||||
if (output_mask[0]) {
|
||||
|
|
|
|||
|
|
@ -348,7 +348,7 @@ Tensor instance_norm(
|
|||
return out.view(input.sizes());
|
||||
}
|
||||
|
||||
Tensor layer_norm(const Tensor& input, IntList normalized_shape,
|
||||
Tensor layer_norm(const Tensor& input, IntArrayRef normalized_shape,
|
||||
const Tensor& weight /* optional */, const Tensor& bias /* optional */,
|
||||
double eps, bool cudnn_enabled) {
|
||||
|
||||
|
|
|
|||
|
|
@ -85,7 +85,7 @@ std::tuple<Tensor, Tensor> _pack_padded_sequence(const Tensor& _input, const Ten
|
|||
return std::make_tuple(at::cat(steps), batch_sizes_t);
|
||||
}
|
||||
|
||||
Tensor _pack_padded_sequence_backward(const Tensor& grad, at::IntList input_size, const Tensor& _batch_sizes, bool batch_first) {
|
||||
Tensor _pack_padded_sequence_backward(const Tensor& grad, at::IntArrayRef input_size, const Tensor& _batch_sizes, bool batch_first) {
|
||||
std::vector<int64_t> input_size_after_t = input_size.vec();
|
||||
if (batch_first) {
|
||||
AT_CHECK(input_size.size() >= 2);
|
||||
|
|
|
|||
|
|
@ -11,14 +11,14 @@ namespace at { namespace native {
|
|||
static void check1d(
|
||||
const char* function_name,
|
||||
const char* argument_name,
|
||||
IntList x) {
|
||||
IntArrayRef x) {
|
||||
AT_CHECK(
|
||||
x.size() == 1,
|
||||
function_name, "() argument '", argument_name,
|
||||
"' should contain one int (got ", x.size(), ")");
|
||||
}
|
||||
|
||||
Tensor adaptive_avg_pool1d(const Tensor & self, IntList output_size) {
|
||||
Tensor adaptive_avg_pool1d(const Tensor & self, IntArrayRef output_size) {
|
||||
checkDim("adaptive_avg_pool1d", TensorArg(self, "self", 1), 3);
|
||||
check1d("adaptive_avg_pool1d", "output_size", output_size);
|
||||
|
||||
|
|
@ -29,7 +29,7 @@ Tensor adaptive_avg_pool1d(const Tensor & self, IntList output_size) {
|
|||
return output.squeeze(2);
|
||||
}
|
||||
|
||||
std::tuple<Tensor,Tensor> adaptive_max_pool1d(const Tensor & self, IntList output_size) {
|
||||
std::tuple<Tensor,Tensor> adaptive_max_pool1d(const Tensor & self, IntArrayRef output_size) {
|
||||
checkDim("adaptive_max_pool1d", TensorArg(self, "self", 1), 3);
|
||||
check1d("adaptive_max_pool1d", "output_size", output_size);
|
||||
|
||||
|
|
@ -43,10 +43,10 @@ std::tuple<Tensor,Tensor> adaptive_max_pool1d(const Tensor & self, IntList outpu
|
|||
|
||||
std::tuple<Tensor, Tensor> max_pool1d_with_indices(
|
||||
const Tensor& self,
|
||||
IntList kernel_size,
|
||||
IntList stride,
|
||||
IntList padding,
|
||||
IntList dilation,
|
||||
IntArrayRef kernel_size,
|
||||
IntArrayRef stride,
|
||||
IntArrayRef padding,
|
||||
IntArrayRef dilation,
|
||||
bool ceil_mode) {
|
||||
if (stride.empty()) {
|
||||
stride = kernel_size;
|
||||
|
|
@ -71,9 +71,9 @@ std::tuple<Tensor, Tensor> max_pool1d_with_indices(
|
|||
|
||||
Tensor avg_pool1d(
|
||||
const Tensor& self,
|
||||
IntList kernel_size,
|
||||
IntList stride,
|
||||
IntList padding,
|
||||
IntArrayRef kernel_size,
|
||||
IntArrayRef stride,
|
||||
IntArrayRef padding,
|
||||
bool ceil_mode,
|
||||
bool count_include_pad) {
|
||||
if (stride.empty()) {
|
||||
|
|
@ -97,10 +97,10 @@ Tensor avg_pool1d(
|
|||
|
||||
Tensor max_pool1d(
|
||||
const Tensor& self,
|
||||
IntList kernel_size,
|
||||
IntList stride,
|
||||
IntList padding,
|
||||
IntList dilation,
|
||||
IntArrayRef kernel_size,
|
||||
IntArrayRef stride,
|
||||
IntArrayRef padding,
|
||||
IntArrayRef dilation,
|
||||
bool ceil_mode) {
|
||||
auto output_and_indices = at::max_pool1d_with_indices(
|
||||
self, kernel_size, stride, padding, dilation, ceil_mode);
|
||||
|
|
@ -109,10 +109,10 @@ Tensor max_pool1d(
|
|||
|
||||
Tensor max_pool2d(
|
||||
const Tensor& self,
|
||||
IntList kernel_size,
|
||||
IntList stride,
|
||||
IntList padding,
|
||||
IntList dilation,
|
||||
IntArrayRef kernel_size,
|
||||
IntArrayRef stride,
|
||||
IntArrayRef padding,
|
||||
IntArrayRef dilation,
|
||||
bool ceil_mode) {
|
||||
auto output_and_indices = at::max_pool2d_with_indices(
|
||||
self, kernel_size, stride, padding, dilation, ceil_mode);
|
||||
|
|
@ -121,10 +121,10 @@ Tensor max_pool2d(
|
|||
|
||||
Tensor max_pool3d(
|
||||
const Tensor& self,
|
||||
IntList kernel_size,
|
||||
IntList stride,
|
||||
IntList padding,
|
||||
IntList dilation,
|
||||
IntArrayRef kernel_size,
|
||||
IntArrayRef stride,
|
||||
IntArrayRef padding,
|
||||
IntArrayRef dilation,
|
||||
bool ceil_mode) {
|
||||
auto output_and_indices = at::max_pool3d_with_indices(
|
||||
self, kernel_size, stride, padding, dilation, ceil_mode);
|
||||
|
|
|
|||
|
|
@ -38,7 +38,7 @@ static inline Tensor integer_upcast(const Tensor& self, optional<ScalarType> dty
|
|||
|
||||
using DimMask = TensorIterator::DimMask;
|
||||
|
||||
static DimMask make_dim_mask(IntList dims, int ndim) {
|
||||
static DimMask make_dim_mask(IntArrayRef dims, int ndim) {
|
||||
auto mask = DimMask();
|
||||
if (dims.empty()) {
|
||||
mask.flip();
|
||||
|
|
@ -87,7 +87,7 @@ static Tensor review_reduce_result(const Tensor& result, int ndim, DimMask mask,
|
|||
}
|
||||
|
||||
static std::unique_ptr<TensorIterator> make_reduction(
|
||||
const char* name, Tensor& result, const Tensor& self, IntList dim,
|
||||
const char* name, Tensor& result, const Tensor& self, IntArrayRef dim,
|
||||
bool keepdim, ScalarType dtype)
|
||||
{
|
||||
// check that result type and dtype match if provided
|
||||
|
|
@ -114,7 +114,7 @@ static std::unique_ptr<TensorIterator> make_reduction(
|
|||
return TensorIterator::reduce_op(viewed_result, self.to(dtype));
|
||||
}
|
||||
|
||||
static inline int64_t n_dim_size(const Tensor& self, IntList dim) {
|
||||
static inline int64_t n_dim_size(const Tensor& self, IntArrayRef dim) {
|
||||
int64_t numel = 1;
|
||||
for (auto d : dim) {
|
||||
numel *= self.size(d);
|
||||
|
|
@ -202,7 +202,7 @@ static ScalarType get_dtype(Tensor& result, const Tensor& self, optional<ScalarT
|
|||
return src_type;
|
||||
}
|
||||
|
||||
static Tensor& sum_out(Tensor& result, const Tensor& self, IntList dim,
|
||||
static Tensor& sum_out(Tensor& result, const Tensor& self, IntArrayRef dim,
|
||||
bool keepdim, optional<ScalarType> opt_dtype) {
|
||||
ScalarType dtype = get_dtype(result, self, opt_dtype, true);
|
||||
auto iter = make_reduction("sum", result, self, dim, keepdim, dtype);
|
||||
|
|
@ -214,7 +214,7 @@ static Tensor& sum_out(Tensor& result, const Tensor& self, IntList dim,
|
|||
return result;
|
||||
}
|
||||
|
||||
static Tensor sum(const Tensor& self, IntList dim, bool keepdim, optional<ScalarType> dtype) {
|
||||
static Tensor sum(const Tensor& self, IntArrayRef dim, bool keepdim, optional<ScalarType> dtype) {
|
||||
Tensor result;
|
||||
native::sum_out(result, self, dim, keepdim, dtype);
|
||||
return result;
|
||||
|
|
@ -228,7 +228,7 @@ Tensor sum(const Tensor &self) {
|
|||
return at::native::sum(self, {}, false, c10::nullopt);
|
||||
}
|
||||
|
||||
static Tensor& prod_out(Tensor& result, const Tensor& self, IntList dim,
|
||||
static Tensor& prod_out(Tensor& result, const Tensor& self, IntArrayRef dim,
|
||||
bool keepdim, optional<ScalarType> opt_dtype) {
|
||||
ScalarType dtype = get_dtype(result, self, opt_dtype, true);
|
||||
auto iter = make_reduction("prod", result, self, dim, keepdim, dtype);
|
||||
|
|
@ -240,7 +240,7 @@ static Tensor& prod_out(Tensor& result, const Tensor& self, IntList dim,
|
|||
return result;
|
||||
}
|
||||
|
||||
static Tensor prod(const Tensor& self, IntList dim, bool keepdim, optional<ScalarType> dtype) {
|
||||
static Tensor prod(const Tensor& self, IntArrayRef dim, bool keepdim, optional<ScalarType> dtype) {
|
||||
Tensor result;
|
||||
native::prod_out(result, self, dim, keepdim, dtype);
|
||||
return result;
|
||||
|
|
@ -254,7 +254,7 @@ Tensor prod(const Tensor &self) {
|
|||
return at::native::prod(self, {}, false, c10::nullopt);
|
||||
}
|
||||
|
||||
static inline Tensor &mean_out(Tensor &result, const Tensor &self, IntList dim,
|
||||
static inline Tensor &mean_out(Tensor &result, const Tensor &self, IntArrayRef dim,
|
||||
bool keepdim, optional<ScalarType> opt_dtype) {
|
||||
ScalarType scalarType = opt_dtype.has_value() ? opt_dtype.value() : self.type().scalarType();
|
||||
AT_CHECK(
|
||||
|
|
@ -293,19 +293,19 @@ static inline Tensor &mean_out(Tensor &result, const Tensor &self, IntList dim,
|
|||
|
||||
// DIM REDUCE #################################################################
|
||||
|
||||
Tensor& mean_out(Tensor& result, const Tensor& self, IntList dim, bool keepdim, ScalarType dtype) {
|
||||
Tensor& mean_out(Tensor& result, const Tensor& self, IntArrayRef dim, bool keepdim, ScalarType dtype) {
|
||||
return at::native::mean_out(
|
||||
result, self, dim, keepdim, c10::optional<ScalarType>(dtype));
|
||||
}
|
||||
Tensor& mean_out(Tensor& result, const Tensor& self, IntList dim, bool keepdim) {
|
||||
Tensor& mean_out(Tensor& result, const Tensor& self, IntArrayRef dim, bool keepdim) {
|
||||
return at::native::mean_out(result, self, dim, keepdim, c10::nullopt);
|
||||
}
|
||||
|
||||
Tensor& mean_out(Tensor& result, const Tensor& self, IntList dim, ScalarType dtype) {
|
||||
Tensor& mean_out(Tensor& result, const Tensor& self, IntArrayRef dim, ScalarType dtype) {
|
||||
return at::native::mean_out(result, self, dim, false, dtype);
|
||||
}
|
||||
|
||||
static inline Tensor mean(const Tensor &self, IntList dim, bool keepdim, optional<ScalarType> dtype) {
|
||||
static inline Tensor mean(const Tensor &self, IntArrayRef dim, bool keepdim, optional<ScalarType> dtype) {
|
||||
Tensor result;
|
||||
return at::native::mean_out(result, self, dim, keepdim, dtype);
|
||||
}
|
||||
|
|
@ -322,16 +322,16 @@ Tensor mean(const Tensor &self) {
|
|||
return at::native::mean(self, c10::nullopt);
|
||||
}
|
||||
|
||||
Tensor& sum_out(Tensor& result, const Tensor& self, IntList dim, bool keepdim, ScalarType dtype) {
|
||||
Tensor& sum_out(Tensor& result, const Tensor& self, IntArrayRef dim, bool keepdim, ScalarType dtype) {
|
||||
return at::native::sum_out(
|
||||
result, self, dim, keepdim, c10::optional<ScalarType>(dtype));
|
||||
}
|
||||
|
||||
Tensor& sum_out(Tensor& result, const Tensor& self, IntList dim, bool keepdim) {
|
||||
Tensor& sum_out(Tensor& result, const Tensor& self, IntArrayRef dim, bool keepdim) {
|
||||
return at::native::sum_out(result, self, dim, keepdim, c10::nullopt);
|
||||
}
|
||||
|
||||
Tensor& sum_out(Tensor& result, const Tensor& self, IntList dim, ScalarType dtype) {
|
||||
Tensor& sum_out(Tensor& result, const Tensor& self, IntArrayRef dim, ScalarType dtype) {
|
||||
return at::native::sum_out(result, self, dim, false, dtype);
|
||||
}
|
||||
|
||||
|
|
@ -348,27 +348,27 @@ Tensor& prod_out(Tensor& result, const Tensor& self, int64_t dim, ScalarType dty
|
|||
return at::native::prod_out(result, self, dim, false, dtype);
|
||||
}
|
||||
|
||||
Tensor mean(const Tensor& self, IntList dim, bool keepdim, ScalarType dtype) {
|
||||
Tensor mean(const Tensor& self, IntArrayRef dim, bool keepdim, ScalarType dtype) {
|
||||
return at::native::mean(self, dim, keepdim, c10::optional<ScalarType>(dtype));
|
||||
}
|
||||
|
||||
Tensor mean(const Tensor& self, IntList dim, bool keepdim) {
|
||||
Tensor mean(const Tensor& self, IntArrayRef dim, bool keepdim) {
|
||||
return at::native::mean(self, dim, keepdim, c10::nullopt);
|
||||
}
|
||||
|
||||
Tensor mean(const Tensor& self, IntList dim, ScalarType dtype) {
|
||||
Tensor mean(const Tensor& self, IntArrayRef dim, ScalarType dtype) {
|
||||
return at::native::mean(self, dim, false, dtype);
|
||||
}
|
||||
|
||||
Tensor sum(const Tensor& self, IntList dim, bool keepdim, ScalarType dtype) {
|
||||
Tensor sum(const Tensor& self, IntArrayRef dim, bool keepdim, ScalarType dtype) {
|
||||
return at::native::sum(self, dim, keepdim, c10::optional<ScalarType>(dtype));
|
||||
}
|
||||
|
||||
Tensor sum(const Tensor& self, IntList dim, bool keepdim) {
|
||||
Tensor sum(const Tensor& self, IntArrayRef dim, bool keepdim) {
|
||||
return at::native::sum(self, dim, keepdim, c10::nullopt);
|
||||
}
|
||||
|
||||
Tensor sum(const Tensor& self, IntList dim, ScalarType dtype) {
|
||||
Tensor sum(const Tensor& self, IntArrayRef dim, ScalarType dtype) {
|
||||
return at::native::sum(self, dim, false, dtype);
|
||||
}
|
||||
|
||||
|
|
@ -384,7 +384,7 @@ Tensor prod(const Tensor& self, int64_t dim, ScalarType dtype) {
|
|||
return at::native::prod(self, dim, false, dtype);
|
||||
}
|
||||
|
||||
static Tensor squeeze_multiple(const Tensor& self, IntList dims) {
|
||||
static Tensor squeeze_multiple(const Tensor& self, IntArrayRef dims) {
|
||||
int ndims = self.sizes().size();
|
||||
auto dims_to_squeeze = at::dim_list_to_bitset(dims, ndims);
|
||||
Tensor result = self;
|
||||
|
|
@ -396,7 +396,7 @@ static Tensor squeeze_multiple(const Tensor& self, IntList dims) {
|
|||
return result;
|
||||
}
|
||||
|
||||
Tensor& logsumexp_out(Tensor& result, const Tensor &self, IntList dims, bool keepdim) {
|
||||
Tensor& logsumexp_out(Tensor& result, const Tensor &self, IntArrayRef dims, bool keepdim) {
|
||||
// can't take max of empty tensor
|
||||
if (self.numel() != 0) {
|
||||
auto maxes = at::max_values(self, dims, true);
|
||||
|
|
@ -411,13 +411,13 @@ Tensor& logsumexp_out(Tensor& result, const Tensor &self, IntList dims, bool kee
|
|||
return result;
|
||||
}
|
||||
|
||||
Tensor logsumexp(const Tensor &self, IntList dims, bool keepdim) {
|
||||
Tensor logsumexp(const Tensor &self, IntArrayRef dims, bool keepdim) {
|
||||
Tensor result = at::empty({0}, self.options());
|
||||
return at::native::logsumexp_out(result, self, dims, keepdim);
|
||||
}
|
||||
|
||||
static Tensor& norm_out(Tensor &result, const Tensor &self, optional<Scalar> opt_p,
|
||||
IntList dim, bool keepdim, optional<ScalarType> opt_dtype) {
|
||||
IntArrayRef dim, bool keepdim, optional<ScalarType> opt_dtype) {
|
||||
auto p = opt_p.value_or(2.0);
|
||||
AT_CHECK(self.type().backend() == Backend::CPU || self.type().backend() == Backend::CUDA,
|
||||
"norm only supports CPU AND CUDA backend, got: ", toString(self.type().backend()));
|
||||
|
|
@ -452,21 +452,21 @@ static inline Tensor _norm(const Tensor &self, Scalar p) {
|
|||
}
|
||||
}
|
||||
|
||||
Tensor &norm_out(Tensor& result, const Tensor& self, optional<Scalar> p, IntList dim, bool keepdim, ScalarType dtype) {
|
||||
Tensor &norm_out(Tensor& result, const Tensor& self, optional<Scalar> p, IntArrayRef dim, bool keepdim, ScalarType dtype) {
|
||||
return at::native::norm_out(result, self, p, dim, keepdim, optional<ScalarType>(dtype));
|
||||
}
|
||||
|
||||
Tensor &norm_out(Tensor& result, const Tensor& self, optional<Scalar> p, IntList dim, bool keepdim) {
|
||||
Tensor &norm_out(Tensor& result, const Tensor& self, optional<Scalar> p, IntArrayRef dim, bool keepdim) {
|
||||
return at::native::norm_out(result, self, p, dim, keepdim, c10::nullopt);
|
||||
}
|
||||
|
||||
static Tensor norm(const Tensor& self, optional<Scalar> p, IntList dim, bool keepdim,
|
||||
static Tensor norm(const Tensor& self, optional<Scalar> p, IntArrayRef dim, bool keepdim,
|
||||
optional<ScalarType> opt_dtype) {
|
||||
Tensor result;
|
||||
return at::native::norm_out(result, self, p, dim, keepdim, opt_dtype);
|
||||
}
|
||||
|
||||
Tensor norm(const Tensor& self, optional<Scalar> p, IntList dim, bool keepdim, ScalarType dtype) {
|
||||
Tensor norm(const Tensor& self, optional<Scalar> p, IntArrayRef dim, bool keepdim, ScalarType dtype) {
|
||||
return at::native::norm(self, p, dim, keepdim, optional<ScalarType>(dtype));
|
||||
}
|
||||
|
||||
|
|
@ -474,7 +474,7 @@ Tensor norm(const Tensor& self, optional<Scalar> p, ScalarType dtype) {
|
|||
return at::native::norm(self, p, {}, false, optional<ScalarType>(dtype));
|
||||
}
|
||||
|
||||
Tensor norm(const Tensor& self, optional<Scalar> p, IntList dim, bool keepdim) {
|
||||
Tensor norm(const Tensor& self, optional<Scalar> p, IntArrayRef dim, bool keepdim) {
|
||||
return at::native::norm(self, p, dim, keepdim, c10::nullopt);
|
||||
}
|
||||
|
||||
|
|
@ -571,7 +571,7 @@ Tensor &any_out(Tensor &result, const Tensor &self, int64_t dim, bool keepdim) {
|
|||
}
|
||||
}
|
||||
|
||||
Tensor min_values(const Tensor& self, IntList dims, bool keepdim) {
|
||||
Tensor min_values(const Tensor& self, IntArrayRef dims, bool keepdim) {
|
||||
if (dims.size() == 1) {
|
||||
return std::get<0>(self.min(dims[0], keepdim));
|
||||
} else {
|
||||
|
|
@ -584,7 +584,7 @@ Tensor min_values(const Tensor& self, IntList dims, bool keepdim) {
|
|||
}
|
||||
}
|
||||
|
||||
Tensor max_values(const Tensor& self, IntList dims, bool keepdim) {
|
||||
Tensor max_values(const Tensor& self, IntArrayRef dims, bool keepdim) {
|
||||
if (dims.size() == 1) {
|
||||
return std::get<0>(self.max(dims[0], keepdim));
|
||||
} else {
|
||||
|
|
@ -597,7 +597,7 @@ Tensor max_values(const Tensor& self, IntList dims, bool keepdim) {
|
|||
}
|
||||
}
|
||||
|
||||
static Tensor &std_var_out(Tensor &result, const Tensor &self, IntList dim, bool unbiased, bool keepdim, bool take_sqrt) {
|
||||
static Tensor &std_var_out(Tensor &result, const Tensor &self, IntArrayRef dim, bool unbiased, bool keepdim, bool take_sqrt) {
|
||||
AT_CHECK(self.type().backend() == Backend::CPU || self.type().backend() == Backend::CUDA,
|
||||
"std and var only support CPU AND CUDA backend, got: ", toString(self.type().backend()));
|
||||
AT_CHECK(at::isFloatingType(self.type().scalarType()), "std and var only support floating-point dtypes");
|
||||
|
|
@ -619,12 +619,12 @@ Tensor var(const Tensor& self, bool unbiased) {
|
|||
return trivial_return.has_value() ? trivial_return.value() : at::legacy::th::_th_var(self, unbiased);
|
||||
}
|
||||
|
||||
Tensor var(const Tensor& self, IntList dim, bool unbiased, bool keepdim) {
|
||||
Tensor var(const Tensor& self, IntArrayRef dim, bool unbiased, bool keepdim) {
|
||||
Tensor result = at::empty({0}, self.options());
|
||||
return at::native::var_out(result, self, dim, unbiased, keepdim);
|
||||
}
|
||||
|
||||
Tensor &var_out(Tensor &result, const Tensor &self, IntList dim, bool unbiased, bool keepdim) {
|
||||
Tensor &var_out(Tensor &result, const Tensor &self, IntArrayRef dim, bool unbiased, bool keepdim) {
|
||||
return std_var_out(result, self, dim, unbiased, keepdim, false);
|
||||
}
|
||||
|
||||
|
|
@ -636,12 +636,12 @@ Tensor std(const Tensor& self, bool unbiased) {
|
|||
return trivial_return.has_value() ? trivial_return.value() : at::legacy::th::_th_std(self, unbiased);
|
||||
}
|
||||
|
||||
Tensor std(const Tensor& self, IntList dim, bool unbiased, bool keepdim) {
|
||||
Tensor std(const Tensor& self, IntArrayRef dim, bool unbiased, bool keepdim) {
|
||||
Tensor result = at::empty({0}, self.options());
|
||||
return at::native::std_out(result, self, dim, unbiased, keepdim);
|
||||
}
|
||||
|
||||
Tensor &std_out(Tensor &result, const Tensor &self, IntList dim, bool unbiased, bool keepdim) {
|
||||
Tensor &std_out(Tensor &result, const Tensor &self, IntArrayRef dim, bool unbiased, bool keepdim) {
|
||||
return std_var_out(result, self, dim, unbiased, keepdim, true);
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -4,7 +4,7 @@ namespace at { namespace native {
|
|||
|
||||
static Tensor &_dimreduce_setup(Tensor &result, const Tensor &self,
|
||||
int64_t dim) {
|
||||
IntList self_sizes = self.sizes();
|
||||
IntArrayRef self_sizes = self.sizes();
|
||||
std::vector<int64_t> result_sizes;
|
||||
result_sizes.insert(result_sizes.end(), self_sizes.begin(), self_sizes.end());
|
||||
result_sizes[dim] = 1;
|
||||
|
|
|
|||
|
|
@ -55,7 +55,7 @@ inline void reflection_pad1d_out_loop(
|
|||
}
|
||||
|
||||
void reflection_pad1d_out_template(
|
||||
Tensor& output, const Tensor& input_, IntList padding) {
|
||||
Tensor& output, const Tensor& input_, IntArrayRef padding) {
|
||||
int64_t dim_plane = 0;
|
||||
int64_t dim_w = 1;
|
||||
int64_t nbatch = 1;
|
||||
|
|
@ -160,7 +160,7 @@ inline void reflection_pad1d_backward_out_loop(
|
|||
|
||||
void reflection_pad1d_backward_out_template(
|
||||
Tensor& grad_input, const Tensor& grad_output_, const Tensor& input,
|
||||
IntList padding) {
|
||||
IntArrayRef padding) {
|
||||
int64_t dim_plane = 0;
|
||||
int64_t dim_w = 1;
|
||||
int64_t nbatch = 1;
|
||||
|
|
@ -273,7 +273,7 @@ inline void reflection_pad2d_out_loop(
|
|||
}
|
||||
|
||||
void reflection_pad2d_out_template(
|
||||
Tensor &output, const Tensor &input_, IntList padding) {
|
||||
Tensor &output, const Tensor &input_, IntArrayRef padding) {
|
||||
int dim_w = 2;
|
||||
int dim_h = 1;
|
||||
int dim_slices = 0;
|
||||
|
|
@ -409,7 +409,7 @@ inline void reflection_pad2d_backward_out_loop(
|
|||
|
||||
void reflection_pad2d_backward_out_template(
|
||||
Tensor &grad_input, const Tensor &grad_output_,
|
||||
const Tensor &input, IntList padding) {
|
||||
const Tensor &input, IntArrayRef padding) {
|
||||
int dim_w = 2;
|
||||
int dim_h = 1;
|
||||
int dim_plane = 0;
|
||||
|
|
@ -472,12 +472,12 @@ void reflection_pad2d_backward_out_template(
|
|||
} // namespace
|
||||
|
||||
Tensor& reflection_pad1d_out_cpu(
|
||||
Tensor& output, const Tensor& input, IntList padding) {
|
||||
Tensor& output, const Tensor& input, IntArrayRef padding) {
|
||||
reflection_pad1d_out_template(output, input, padding);
|
||||
return output;
|
||||
}
|
||||
|
||||
Tensor reflection_pad1d_cpu(const Tensor& input, IntList padding) {
|
||||
Tensor reflection_pad1d_cpu(const Tensor& input, IntArrayRef padding) {
|
||||
auto output = at::empty({0}, input.options());
|
||||
reflection_pad1d_out_template(output, input, padding);
|
||||
return output;
|
||||
|
|
@ -487,7 +487,7 @@ Tensor& reflection_pad1d_backward_out_cpu(
|
|||
Tensor& grad_input,
|
||||
const Tensor& grad_output,
|
||||
const Tensor& input,
|
||||
IntList padding) {
|
||||
IntArrayRef padding) {
|
||||
grad_input.resize_as_(input);
|
||||
grad_input.zero_();
|
||||
reflection_pad1d_backward_out_template(
|
||||
|
|
@ -498,7 +498,7 @@ Tensor& reflection_pad1d_backward_out_cpu(
|
|||
Tensor reflection_pad1d_backward_cpu(
|
||||
const Tensor& grad_output,
|
||||
const Tensor& input,
|
||||
IntList padding) {
|
||||
IntArrayRef padding) {
|
||||
auto grad_input = at::zeros_like(input);
|
||||
reflection_pad1d_backward_out_template(
|
||||
grad_input, grad_output, input, padding);
|
||||
|
|
@ -506,12 +506,12 @@ Tensor reflection_pad1d_backward_cpu(
|
|||
}
|
||||
|
||||
Tensor& reflection_pad2d_out_cpu(
|
||||
Tensor& output, const Tensor& input, IntList padding) {
|
||||
Tensor& output, const Tensor& input, IntArrayRef padding) {
|
||||
reflection_pad2d_out_template(output, input, padding);
|
||||
return output;
|
||||
}
|
||||
|
||||
Tensor reflection_pad2d_cpu(const Tensor& input, IntList padding) {
|
||||
Tensor reflection_pad2d_cpu(const Tensor& input, IntArrayRef padding) {
|
||||
auto output = at::empty({0}, input.options());
|
||||
reflection_pad2d_out_template(output, input, padding);
|
||||
return output;
|
||||
|
|
@ -521,7 +521,7 @@ Tensor& reflection_pad2d_backward_out_cpu(
|
|||
Tensor& grad_input,
|
||||
const Tensor& grad_output,
|
||||
const Tensor& input,
|
||||
IntList padding) {
|
||||
IntArrayRef padding) {
|
||||
grad_input.resize_as_(input);
|
||||
grad_input.zero_();
|
||||
reflection_pad2d_backward_out_template(
|
||||
|
|
@ -532,7 +532,7 @@ Tensor& reflection_pad2d_backward_out_cpu(
|
|||
Tensor reflection_pad2d_backward_cpu(
|
||||
const Tensor& grad_output,
|
||||
const Tensor& input,
|
||||
IntList padding) {
|
||||
IntArrayRef padding) {
|
||||
auto grad_input = at::zeros_like(input);
|
||||
reflection_pad2d_backward_out_template(
|
||||
grad_input, grad_output, input, padding);
|
||||
|
|
|
|||
|
|
@ -60,7 +60,7 @@ static void replication_pad1d_out_batch(
|
|||
void replication_pad1d_out_cpu_template(
|
||||
Tensor& output,
|
||||
const Tensor& input_,
|
||||
IntList paddingSize)
|
||||
IntArrayRef paddingSize)
|
||||
{
|
||||
int dimw = 1;
|
||||
int dimslices = 0;
|
||||
|
|
@ -185,7 +185,7 @@ Tensor& replication_pad1d_backward_out_cpu_template(
|
|||
Tensor& gradInput,
|
||||
const Tensor& gradOutput_,
|
||||
const Tensor& input,
|
||||
IntList paddingSize)
|
||||
IntArrayRef paddingSize)
|
||||
{
|
||||
int dimw = 1;
|
||||
int dimslices = 0;
|
||||
|
|
@ -323,7 +323,7 @@ static void replication_pad2d_out_batch(
|
|||
|
||||
void replication_pad2d_out_cpu_template(Tensor& output,
|
||||
const Tensor& input_,
|
||||
IntList paddingSize)
|
||||
IntArrayRef paddingSize)
|
||||
{
|
||||
AT_CHECK(paddingSize.size() == 4, "padding size is expected to be 4");
|
||||
int pad_l = paddingSize[0];
|
||||
|
|
@ -466,7 +466,7 @@ Tensor& replication_pad2d_backward_out_cpu_template(
|
|||
Tensor& gradInput,
|
||||
const Tensor& gradOutput_,
|
||||
const Tensor& input,
|
||||
IntList paddingSize)
|
||||
IntArrayRef paddingSize)
|
||||
{
|
||||
AT_CHECK(paddingSize.size() == 4, "padding size is expected to be 4");
|
||||
int pad_l = paddingSize[0];
|
||||
|
|
@ -666,7 +666,7 @@ static void replication_pad3d_out_batch(
|
|||
void replication_pad3d_out_cpu_template(
|
||||
Tensor& output,
|
||||
const Tensor& input_,
|
||||
IntList paddingSize)
|
||||
IntArrayRef paddingSize)
|
||||
{
|
||||
AT_CHECK(paddingSize.size() == 6, "padding size is expected to be 6");
|
||||
int pleft = paddingSize[0];
|
||||
|
|
@ -823,7 +823,7 @@ Tensor& replication_pad3d_backward_out_cpu_template(
|
|||
Tensor& gradInput,
|
||||
const Tensor& gradOutput_,
|
||||
const Tensor& input,
|
||||
IntList paddingSize)
|
||||
IntArrayRef paddingSize)
|
||||
{
|
||||
AT_CHECK(paddingSize.size() == 6, "padding size is expected to be 6");
|
||||
int pleft = paddingSize[0];
|
||||
|
|
@ -908,7 +908,7 @@ Tensor& replication_pad3d_backward_out_cpu_template(
|
|||
Tensor& replication_pad1d_out_cpu(
|
||||
Tensor& output,
|
||||
const Tensor& input,
|
||||
IntList paddingSize)
|
||||
IntArrayRef paddingSize)
|
||||
{
|
||||
replication_pad1d_out_cpu_template(
|
||||
output, input, paddingSize);
|
||||
|
|
@ -917,7 +917,7 @@ Tensor& replication_pad1d_out_cpu(
|
|||
|
||||
Tensor replication_pad1d_cpu(
|
||||
const Tensor& input,
|
||||
IntList paddingSize)
|
||||
IntArrayRef paddingSize)
|
||||
{
|
||||
auto output = at::empty({0}, input.options());
|
||||
replication_pad1d_out_cpu_template(
|
||||
|
|
@ -929,7 +929,7 @@ Tensor& replication_pad1d_backward_out_cpu(
|
|||
Tensor& gradInput,
|
||||
const Tensor& gradOutput,
|
||||
const Tensor& input,
|
||||
IntList paddingSize)
|
||||
IntArrayRef paddingSize)
|
||||
{
|
||||
gradInput.resize_as_(input);
|
||||
replication_pad1d_backward_out_cpu_template(
|
||||
|
|
@ -940,7 +940,7 @@ Tensor& replication_pad1d_backward_out_cpu(
|
|||
Tensor replication_pad1d_backward_cpu(
|
||||
const Tensor& gradOutput,
|
||||
const Tensor& input,
|
||||
IntList paddingSize)
|
||||
IntArrayRef paddingSize)
|
||||
{
|
||||
auto gradInput = at::zeros_like(input);
|
||||
replication_pad1d_backward_out_cpu_template(
|
||||
|
|
@ -951,7 +951,7 @@ Tensor replication_pad1d_backward_cpu(
|
|||
Tensor& replication_pad2d_out_cpu(
|
||||
Tensor& output,
|
||||
const Tensor& input,
|
||||
IntList paddingSize)
|
||||
IntArrayRef paddingSize)
|
||||
{
|
||||
replication_pad2d_out_cpu_template(
|
||||
output, input, paddingSize);
|
||||
|
|
@ -960,7 +960,7 @@ Tensor& replication_pad2d_out_cpu(
|
|||
|
||||
Tensor replication_pad2d_cpu(
|
||||
const Tensor& input,
|
||||
IntList paddingSize)
|
||||
IntArrayRef paddingSize)
|
||||
{
|
||||
auto output = at::empty({0}, input.options());
|
||||
replication_pad2d_out_cpu_template(
|
||||
|
|
@ -972,7 +972,7 @@ Tensor& replication_pad2d_backward_out_cpu(
|
|||
Tensor& gradInput,
|
||||
const Tensor& gradOutput,
|
||||
const Tensor& input,
|
||||
IntList paddingSize)
|
||||
IntArrayRef paddingSize)
|
||||
{
|
||||
replication_pad2d_backward_out_cpu_template(
|
||||
gradInput, gradOutput, input, paddingSize);
|
||||
|
|
@ -982,7 +982,7 @@ Tensor& replication_pad2d_backward_out_cpu(
|
|||
Tensor replication_pad2d_backward_cpu(
|
||||
const Tensor& gradOutput,
|
||||
const Tensor& input,
|
||||
IntList paddingSize)
|
||||
IntArrayRef paddingSize)
|
||||
{
|
||||
auto gradInput = at::zeros_like(input);
|
||||
replication_pad2d_backward_out_cpu_template(
|
||||
|
|
@ -993,7 +993,7 @@ Tensor replication_pad2d_backward_cpu(
|
|||
Tensor& replication_pad3d_out_cpu(
|
||||
Tensor& output,
|
||||
const Tensor& input,
|
||||
IntList paddingSize)
|
||||
IntArrayRef paddingSize)
|
||||
{
|
||||
replication_pad3d_out_cpu_template(
|
||||
output, input, paddingSize);
|
||||
|
|
@ -1002,7 +1002,7 @@ Tensor& replication_pad3d_out_cpu(
|
|||
|
||||
Tensor replication_pad3d_cpu(
|
||||
const Tensor& input,
|
||||
IntList paddingSize)
|
||||
IntArrayRef paddingSize)
|
||||
{
|
||||
auto output = at::empty({0}, input.options());
|
||||
replication_pad3d_out_cpu_template(
|
||||
|
|
@ -1014,7 +1014,7 @@ Tensor& replication_pad3d_backward_out_cpu(
|
|||
Tensor& gradInput,
|
||||
const Tensor& gradOutput,
|
||||
const Tensor& input,
|
||||
IntList paddingSize)
|
||||
IntArrayRef paddingSize)
|
||||
{
|
||||
replication_pad3d_backward_out_cpu_template(
|
||||
gradInput, gradOutput, input, paddingSize);
|
||||
|
|
@ -1024,7 +1024,7 @@ Tensor& replication_pad3d_backward_out_cpu(
|
|||
Tensor replication_pad3d_backward_cpu(
|
||||
const Tensor& gradOutput,
|
||||
const Tensor& input,
|
||||
IntList paddingSize)
|
||||
IntArrayRef paddingSize)
|
||||
{
|
||||
auto gradInput = at::zeros_like(input);
|
||||
replication_pad3d_backward_out_cpu_template(
|
||||
|
|
|
|||
|
|
@ -3,7 +3,7 @@
|
|||
|
||||
namespace at { namespace native {
|
||||
|
||||
Tensor& resize_cpu_(Tensor& self, IntList size) {
|
||||
Tensor& resize_cpu_(Tensor& self, IntArrayRef size) {
|
||||
auto* self_ = self.unsafeGetTensorImpl();
|
||||
resize_impl_cpu_(self_, size, /*strides=*/c10::nullopt);
|
||||
self_->maybe_zero_dim(size.size() == 0);
|
||||
|
|
|
|||
|
|
@ -24,8 +24,8 @@ static inline void maybe_resize_storage_cpu(TensorImpl* self, int64_t new_size)
|
|||
|
||||
inline TensorImpl* resize_impl_cpu_(
|
||||
TensorImpl* self,
|
||||
IntList size,
|
||||
c10::optional<IntList> stride) {
|
||||
IntArrayRef size,
|
||||
c10::optional<IntArrayRef> stride) {
|
||||
if (self->sizes() == size && (!stride || self->strides() == stride)) {
|
||||
return self;
|
||||
}
|
||||
|
|
@ -52,7 +52,7 @@ inline TensorImpl* resize_impl_cpu_(
|
|||
return self;
|
||||
}
|
||||
|
||||
static inline int64_t computeStorageSize(IntList sizes, IntList strides) {
|
||||
static inline int64_t computeStorageSize(IntArrayRef sizes, IntArrayRef strides) {
|
||||
int64_t storage_size = 1;
|
||||
for (size_t dim = 0; dim < sizes.size(); ++dim) {
|
||||
if (sizes[dim] == 0) {
|
||||
|
|
@ -64,8 +64,8 @@ static inline int64_t computeStorageSize(IntList sizes, IntList strides) {
|
|||
}
|
||||
|
||||
static inline void checkInBoundsForStorage(
|
||||
IntList size,
|
||||
IntList stride,
|
||||
IntArrayRef size,
|
||||
IntArrayRef stride,
|
||||
int64_t storage_offset,
|
||||
const Storage& new_storage) {
|
||||
int64_t storage_size = computeStorageSize(size, stride);
|
||||
|
|
@ -88,8 +88,8 @@ static inline void checkInBoundsForStorage(
|
|||
*/
|
||||
inline void setStrided(
|
||||
const Tensor& self,
|
||||
IntList size,
|
||||
IntList stride,
|
||||
IntArrayRef size,
|
||||
IntArrayRef stride,
|
||||
int64_t storage_offset) {
|
||||
auto* self_ = self.unsafeGetTensorImpl();
|
||||
checkInBoundsForStorage(size, stride, storage_offset, self_->storage());
|
||||
|
|
|
|||
|
|
@ -21,7 +21,7 @@ namespace at { namespace native {
|
|||
// at::_fft_with_size which dispatches to _fft_cufft (CUDA) or _fft_mkl (CPU).
|
||||
static inline Tensor _fft(const Tensor &self, const int64_t signal_ndim,
|
||||
const bool complex_input, const bool complex_output,
|
||||
const bool inverse, IntList signal_sizes, const bool normalized,
|
||||
const bool inverse, IntArrayRef signal_sizes, const bool normalized,
|
||||
const bool onesided) {
|
||||
|
||||
AT_CHECK(signal_ndim >= 1 && signal_ndim <= 3,
|
||||
|
|
@ -166,7 +166,7 @@ Tensor rfft(const Tensor& self, const int64_t signal_ndim, const bool normalized
|
|||
}
|
||||
|
||||
Tensor irfft(const Tensor& self, const int64_t signal_ndim, const bool normalized,
|
||||
const bool onesided, IntList signal_sizes) {
|
||||
const bool onesided, IntArrayRef signal_sizes) {
|
||||
return _fft(self, signal_ndim, /* complex_input */ true,
|
||||
/* complex_output */ false, /* inverse */ true, signal_sizes,
|
||||
normalized, onesided);
|
||||
|
|
|
|||
|
|
@ -88,7 +88,7 @@ Tensor _dim_arange(const Tensor& like, int64_t dim) {
|
|||
|
||||
// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ empty ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
Tensor empty_cpu(IntList size, const TensorOptions& options) {
|
||||
Tensor empty_cpu(IntArrayRef size, const TensorOptions& options) {
|
||||
AT_ASSERT(options.backend() == Backend::CPU);
|
||||
AT_ASSERT(!options.is_variable()); // is_variable should have been 'unpacked' // TODO: remove this when Variable and Tensor are merged
|
||||
|
||||
|
|
@ -110,13 +110,13 @@ Tensor empty_cpu(IntList size, const TensorOptions& options) {
|
|||
return tensor;
|
||||
}
|
||||
|
||||
Tensor empty_strided_cpu(IntList size, IntList stride, const TensorOptions& options) {
|
||||
Tensor empty_strided_cpu(IntArrayRef size, IntArrayRef stride, const TensorOptions& options) {
|
||||
auto t = at::native::empty_cpu({0}, options);
|
||||
at::native::resize_impl_cpu_(t.unsafeGetTensorImpl(), size, stride);
|
||||
return t;
|
||||
}
|
||||
|
||||
Tensor& empty_out(Tensor& result, IntList size) {
|
||||
Tensor& empty_out(Tensor& result, IntArrayRef size) {
|
||||
if (result.is_sparse()) {
|
||||
result.sparse_resize_and_clear_(size, size.size(), 0);
|
||||
} else {
|
||||
|
|
@ -193,7 +193,7 @@ Tensor& eye_out_cpu(Tensor& result, int64_t n, int64_t m) {
|
|||
|
||||
// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ full ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
Tensor full(IntList size, Scalar fill_value, const TensorOptions& options) {
|
||||
Tensor full(IntArrayRef size, Scalar fill_value, const TensorOptions& options) {
|
||||
if (options.layout() == kSparse) {
|
||||
AT_ERROR("full(...) is not implemented for sparse layout");
|
||||
}
|
||||
|
|
@ -201,7 +201,7 @@ Tensor full(IntList size, Scalar fill_value, const TensorOptions& options) {
|
|||
return result.fill_(fill_value);
|
||||
}
|
||||
|
||||
Tensor& full_out(Tensor& result, IntList size, Scalar fill_value) {
|
||||
Tensor& full_out(Tensor& result, IntArrayRef size, Scalar fill_value) {
|
||||
if (result.is_sparse()) {
|
||||
AT_ERROR("full(...) is not implemented for sparse layout");
|
||||
}
|
||||
|
|
@ -241,11 +241,11 @@ Tensor logspace(
|
|||
|
||||
// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ ones ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
Tensor ones(IntList size, const TensorOptions& options) {
|
||||
Tensor ones(IntArrayRef size, const TensorOptions& options) {
|
||||
return native::full(size, /*fill_value=*/1, options);
|
||||
}
|
||||
|
||||
Tensor& ones_out(Tensor& result, IntList size) {
|
||||
Tensor& ones_out(Tensor& result, IntArrayRef size) {
|
||||
return native::full_out(result, size, /*fill_value=*/1);
|
||||
}
|
||||
|
||||
|
|
@ -265,20 +265,20 @@ Tensor scalar_tensor(Scalar s, const TensorOptions& options) {
|
|||
|
||||
// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ rand ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
Tensor rand(IntList size, const TensorOptions& options) {
|
||||
Tensor rand(IntArrayRef size, const TensorOptions& options) {
|
||||
return native::rand(size, nullptr, options);
|
||||
}
|
||||
|
||||
Tensor rand(IntList size, Generator* generator, const TensorOptions& options) {
|
||||
Tensor rand(IntArrayRef size, Generator* generator, const TensorOptions& options) {
|
||||
auto result = at::empty(size, options);
|
||||
return result.uniform_(0, 1, generator);
|
||||
}
|
||||
|
||||
Tensor& rand_out(Tensor& result, IntList size) {
|
||||
Tensor& rand_out(Tensor& result, IntArrayRef size) {
|
||||
return native::rand_out(result, size, nullptr);
|
||||
}
|
||||
|
||||
Tensor& rand_out(Tensor& result, IntList size, Generator* generator) {
|
||||
Tensor& rand_out(Tensor& result, IntArrayRef size, Generator* generator) {
|
||||
result.resize_(size);
|
||||
return result.uniform_(0, 1, generator);
|
||||
}
|
||||
|
|
@ -293,13 +293,13 @@ Tensor rand_like(const Tensor& self, const TensorOptions& options) {
|
|||
|
||||
// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ randint ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
Tensor randint(int64_t high, IntList size, const TensorOptions& options) {
|
||||
Tensor randint(int64_t high, IntArrayRef size, const TensorOptions& options) {
|
||||
return native::randint(high, size, nullptr, options);
|
||||
}
|
||||
|
||||
Tensor randint(
|
||||
int64_t high,
|
||||
IntList size,
|
||||
IntArrayRef size,
|
||||
Generator* generator,
|
||||
const TensorOptions& options) {
|
||||
return native::randint(0, high, size, generator, options);
|
||||
|
|
@ -308,7 +308,7 @@ Tensor randint(
|
|||
Tensor randint(
|
||||
int64_t low,
|
||||
int64_t high,
|
||||
IntList size,
|
||||
IntArrayRef size,
|
||||
const TensorOptions& options) {
|
||||
return native::randint(low, high, size, nullptr, options);
|
||||
}
|
||||
|
|
@ -316,27 +316,27 @@ Tensor randint(
|
|||
Tensor randint(
|
||||
int64_t low,
|
||||
int64_t high,
|
||||
IntList size,
|
||||
IntArrayRef size,
|
||||
Generator* generator,
|
||||
const TensorOptions& options) {
|
||||
auto result = at::empty(size, options);
|
||||
return result.random_(low, high, generator);
|
||||
}
|
||||
|
||||
Tensor& randint_out(Tensor& result, int64_t high, IntList size) {
|
||||
Tensor& randint_out(Tensor& result, int64_t high, IntArrayRef size) {
|
||||
return native::randint_out(result, high, size, nullptr);
|
||||
}
|
||||
|
||||
Tensor& randint_out(
|
||||
Tensor& result,
|
||||
int64_t high,
|
||||
IntList size,
|
||||
IntArrayRef size,
|
||||
Generator* generator) {
|
||||
result.resize_(size);
|
||||
return result.random_(0, high, generator);
|
||||
}
|
||||
|
||||
Tensor& randint_out(Tensor& result, int64_t low, int64_t high, IntList size) {
|
||||
Tensor& randint_out(Tensor& result, int64_t low, int64_t high, IntArrayRef size) {
|
||||
return native::randint_out(result, low, high, size, nullptr);
|
||||
}
|
||||
|
||||
|
|
@ -344,7 +344,7 @@ Tensor& randint_out(
|
|||
Tensor& result,
|
||||
int64_t low,
|
||||
int64_t high,
|
||||
IntList size,
|
||||
IntArrayRef size,
|
||||
Generator* generator) {
|
||||
result.resize_(size);
|
||||
return result.random_(low, high, generator);
|
||||
|
|
@ -375,20 +375,20 @@ Tensor randint_like(
|
|||
|
||||
// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ randn ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
Tensor randn(IntList size, const TensorOptions& options) {
|
||||
Tensor randn(IntArrayRef size, const TensorOptions& options) {
|
||||
return native::randn(size, nullptr, options);
|
||||
}
|
||||
|
||||
Tensor randn(IntList size, Generator* generator, const TensorOptions& options) {
|
||||
Tensor randn(IntArrayRef size, Generator* generator, const TensorOptions& options) {
|
||||
auto result = at::empty(size, options);
|
||||
return result.normal_(0, 1, generator);
|
||||
}
|
||||
|
||||
Tensor& randn_out(Tensor& result, IntList size) {
|
||||
Tensor& randn_out(Tensor& result, IntArrayRef size) {
|
||||
return native::randn_out(result, size, nullptr);
|
||||
}
|
||||
|
||||
Tensor& randn_out(Tensor& result, IntList size, Generator* generator) {
|
||||
Tensor& randn_out(Tensor& result, IntArrayRef size, Generator* generator) {
|
||||
result.resize_(size);
|
||||
return result.normal_(0, 1, generator);
|
||||
}
|
||||
|
|
@ -560,12 +560,12 @@ Tensor triu_indices_cpu(
|
|||
|
||||
// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ zeros ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
Tensor zeros(IntList size, const TensorOptions& options) {
|
||||
Tensor zeros(IntArrayRef size, const TensorOptions& options) {
|
||||
auto result = at::empty(size, options);
|
||||
return result.zero_();
|
||||
}
|
||||
|
||||
Tensor& zeros_out(Tensor& result, IntList size) {
|
||||
Tensor& zeros_out(Tensor& result, IntArrayRef size) {
|
||||
if (result.is_sparse()) {
|
||||
result.sparse_resize_and_clear_(size, size.size(), 0);
|
||||
return result;
|
||||
|
|
|
|||
|
|
@ -165,7 +165,7 @@ DimVector TensorIterator::compatible_stride(int element_size) const {
|
|||
return stride;
|
||||
}
|
||||
|
||||
DimVector TensorIterator::invert_perm(IntList input) const {
|
||||
DimVector TensorIterator::invert_perm(IntArrayRef input) const {
|
||||
// Invert the permutation caused by reorder_dimensions. This is not valid
|
||||
// after coalesce_dimensions is called.
|
||||
AT_ASSERT(!has_coalesced_dimensions_);
|
||||
|
|
@ -264,7 +264,7 @@ DimVector TensorIterator::get_dim_strides(int dim) const {
|
|||
return inner_strides;
|
||||
}
|
||||
|
||||
SmallVector<char*, 4> TensorIterator::get_data_ptrs(ArrayRef<char*> base, IntList counter) const {
|
||||
SmallVector<char*, 4> TensorIterator::get_data_ptrs(ArrayRef<char*> base, IntArrayRef counter) const {
|
||||
auto ptrs = SmallVector<char*, 4>(base);
|
||||
for (int dim = 0; dim < ndim(); dim++) {
|
||||
int64_t value = counter[dim];
|
||||
|
|
@ -292,10 +292,10 @@ bool TensorIterator::is_dim_reduced(int dim) const {
|
|||
return false;
|
||||
}
|
||||
|
||||
void TensorIterator::permute_dimensions(IntList perm) {
|
||||
void TensorIterator::permute_dimensions(IntArrayRef perm) {
|
||||
AT_ASSERT(perm.size() == ndim());
|
||||
|
||||
auto reorder = [perm](IntList data) {
|
||||
auto reorder = [perm](IntArrayRef data) {
|
||||
auto res = DimVector(data.size(), 0);
|
||||
for (size_t i = 0; i < perm.size(); i++) {
|
||||
res[i] = data[perm[i]];
|
||||
|
|
@ -429,7 +429,7 @@ void TensorIterator::remove_operand(int arg) {
|
|||
operands_.erase(operands_.begin() + arg);
|
||||
}
|
||||
|
||||
void TensorIterator::replace_operand(int arg, void* data, IntList stride) {
|
||||
void TensorIterator::replace_operand(int arg, void* data, IntArrayRef stride) {
|
||||
operands_[arg].data = data;
|
||||
operands_[arg].stride_bytes = stride;
|
||||
}
|
||||
|
|
@ -453,7 +453,7 @@ void TensorIterator::narrow(int dim, int64_t start, int64_t size) {
|
|||
}
|
||||
}
|
||||
|
||||
void TensorIterator::select_all_keeping_dim(int start_dim, IntList indices) {
|
||||
void TensorIterator::select_all_keeping_dim(int start_dim, IntArrayRef indices) {
|
||||
AT_ASSERT(start_dim <= ndim());
|
||||
for (int i = start_dim; i < ndim(); ++i) {
|
||||
for (auto& op : operands_) {
|
||||
|
|
@ -542,7 +542,7 @@ void TensorIterator::compute_shape() {
|
|||
}
|
||||
}
|
||||
|
||||
static DimVector compute_stride(const Tensor& tensor, IntList shape) {
|
||||
static DimVector compute_stride(const Tensor& tensor, IntArrayRef shape) {
|
||||
int ndim = shape.size();
|
||||
auto original_shape = tensor.sizes();
|
||||
auto original_stride = tensor.strides();
|
||||
|
|
@ -677,7 +677,7 @@ SplitUntil32Bit::iterator SplitUntil32Bit::end() const {
|
|||
return SplitUntil32Bit::iterator();
|
||||
}
|
||||
|
||||
DimCounter::DimCounter(IntList shape, Range range)
|
||||
DimCounter::DimCounter(IntArrayRef shape, Range range)
|
||||
: shape(shape)
|
||||
, range(range)
|
||||
, values(shape.size(), 0)
|
||||
|
|
|
|||
|
|
@ -53,13 +53,13 @@
|
|||
namespace at {
|
||||
|
||||
struct DimCounter {
|
||||
DimCounter(IntList shape, Range range);
|
||||
DimCounter(IntArrayRef shape, Range range);
|
||||
|
||||
void increment(const std::array<int64_t, 2>& step);
|
||||
bool is_done() const;
|
||||
std::array<int64_t, 2> max_2d_step() const;
|
||||
|
||||
IntList shape;
|
||||
IntArrayRef shape;
|
||||
Range range;
|
||||
DimVector values;
|
||||
int64_t offset;
|
||||
|
|
@ -129,7 +129,7 @@ struct CAFFE2_API TensorIterator {
|
|||
static std::unique_ptr<TensorIterator> reduce_op(Tensor& out, const Tensor& a);
|
||||
|
||||
int ndim() const { return shape_.size(); }
|
||||
IntList shape() const { return shape_; }
|
||||
IntArrayRef shape() const { return shape_; }
|
||||
int64_t numel() const;
|
||||
int ntensors() const { return operands_.size(); }
|
||||
|
||||
|
|
@ -145,7 +145,7 @@ struct CAFFE2_API TensorIterator {
|
|||
bool is_dim_reduced(int dim) const;
|
||||
|
||||
/// Accessors for each operand
|
||||
IntList strides(int arg) const { return operands_[arg].stride_bytes; }
|
||||
IntArrayRef strides(int arg) const { return operands_[arg].stride_bytes; }
|
||||
void* data_ptr(int arg) const;
|
||||
const Type& type(int arg=0) const {
|
||||
AT_ASSERT(operands_[arg].type);
|
||||
|
|
@ -172,9 +172,9 @@ struct CAFFE2_API TensorIterator {
|
|||
/// Shrinks an iterated dimension
|
||||
void narrow(int dim, int64_t start, int64_t size);
|
||||
/// Narrows every dim after and including `start_dim` to size one.
|
||||
void select_all_keeping_dim(int start_dim, IntList starts);
|
||||
void select_all_keeping_dim(int start_dim, IntArrayRef starts);
|
||||
/// Replaces the data pointer and strides for the operand at index `arg`
|
||||
void replace_operand(int arg, void* data, IntList stride);
|
||||
void replace_operand(int arg, void* data, IntArrayRef stride);
|
||||
|
||||
/// Splits this TensorIterator into two iterators. Together they iterate over
|
||||
/// the entire operation. Used by `with_32bit_indexing()`.
|
||||
|
|
@ -204,13 +204,13 @@ struct CAFFE2_API TensorIterator {
|
|||
|
||||
/// Inverts the re-ordering done by reorder_dimensions. This can only be
|
||||
/// called *before* coalesce_dimensions() is called.
|
||||
DimVector invert_perm(IntList input) const;
|
||||
DimVector invert_perm(IntArrayRef input) const;
|
||||
|
||||
/// Helper functions for CPU iteration
|
||||
DimVector get_dim_strides(int dim) const;
|
||||
DimVector get_strides() const;
|
||||
DimVector get_inner_strides() const { return get_dim_strides(0); }
|
||||
PtrVector get_data_ptrs(ArrayRef<char*> base, IntList counter) const;
|
||||
PtrVector get_data_ptrs(ArrayRef<char*> base, IntArrayRef counter) const;
|
||||
PtrVector get_base_ptrs() const;
|
||||
|
||||
/// true if the stride computation can use 32-bit arithmetic. Used by GPU kernels
|
||||
|
|
@ -234,7 +234,7 @@ protected:
|
|||
void compute_shape();
|
||||
void compute_strides();
|
||||
void reorder_dimensions();
|
||||
void permute_dimensions(IntList perm);
|
||||
void permute_dimensions(IntArrayRef perm);
|
||||
void compute_types();
|
||||
Type& compute_common_type();
|
||||
void allocate_outputs();
|
||||
|
|
|
|||
|
|
@ -24,7 +24,7 @@ Tensor _reshape_from_tensor(const Tensor& self, const Tensor& shape_tensor) {
|
|||
for (size_t i = 0; i < shape_tensor.numel(); ++i) {
|
||||
shape.push_back(accessor[i]);
|
||||
}
|
||||
return self.reshape(IntList(shape));
|
||||
return self.reshape(IntArrayRef(shape));
|
||||
}
|
||||
|
||||
Tensor _shape_as_tensor(const Tensor& self) {
|
||||
|
|
@ -50,7 +50,7 @@ Tensor & cat_out(Tensor & result, TensorList tensors, int64_t dim) {
|
|||
return at::legacy::th::_th_cat_out(result, tensors, dim);
|
||||
}
|
||||
|
||||
static bool sizes_match_except(IntList s1, IntList s2, int64_t dim_except /* should already be wrapped */) {
|
||||
static bool sizes_match_except(IntArrayRef s1, IntArrayRef s2, int64_t dim_except /* should already be wrapped */) {
|
||||
if (s1.size() != s2.size()) {
|
||||
return false;
|
||||
}
|
||||
|
|
@ -66,7 +66,7 @@ static bool sizes_match_except(IntList s1, IntList s2, int64_t dim_except /* sho
|
|||
// for being concatenated along a given dimension.
|
||||
static void check_cat_sparse_dims(Tensor const &t,
|
||||
int64_t pos /* used only for debug messages */,
|
||||
IntList sizes,
|
||||
IntArrayRef sizes,
|
||||
int64_t wrapped,
|
||||
int64_t sparse_dim,
|
||||
int64_t dense_dim) {
|
||||
|
|
@ -86,7 +86,7 @@ static Tensor cat_sparse(TensorList tensors, int64_t dim) {
|
|||
int64_t wrapped = maybe_wrap_dim(dim, tensors[0].dim());
|
||||
int64_t sparse_dim = tensors[0].sparse_dim();
|
||||
int64_t dense_dim = tensors[0].dense_dim();
|
||||
IntList sizes = tensors[0].sizes();
|
||||
IntArrayRef sizes = tensors[0].sizes();
|
||||
if (wrapped < sparse_dim) {
|
||||
for (size_t i = 0; i < tensors.size(); ++i) {
|
||||
auto const &t = tensors[i];
|
||||
|
|
@ -267,7 +267,7 @@ Tensor diag_embed(const Tensor& self, int64_t offset, int64_t dim1_, int64_t dim
|
|||
return result;
|
||||
}
|
||||
|
||||
Tensor expand(const Tensor& self, IntList size, bool implicit) {
|
||||
Tensor expand(const Tensor& self, IntArrayRef size, bool implicit) {
|
||||
// [expand implicit]
|
||||
// The implicit flag is set to true for any expand calls inserted by broadcast
|
||||
// operators in ExpandUtils.h This flag is recorded by the tracer to
|
||||
|
|
@ -291,14 +291,14 @@ Tensor expand_as(const Tensor& self, const Tensor& other) {
|
|||
return self.expand(other.sizes());
|
||||
}
|
||||
|
||||
Tensor sum_to_size(const Tensor& self, IntList size) {
|
||||
Tensor sum_to_size(const Tensor& self, IntArrayRef size) {
|
||||
AT_CHECK(is_expandable_to(size, self.sizes()),
|
||||
"size {", size, "} is not expandable to size {", self.sizes(), "}.");
|
||||
|
||||
return sum_to(self, size);
|
||||
}
|
||||
|
||||
Tensor as_strided(const Tensor& self, IntList size, IntList stride, optional<int64_t> storage_offset_) {
|
||||
Tensor as_strided(const Tensor& self, IntArrayRef size, IntArrayRef stride, optional<int64_t> storage_offset_) {
|
||||
auto storage_offset = storage_offset_.value_or(self.storage_offset());
|
||||
auto tid = self.type_id();
|
||||
AT_CHECK(
|
||||
|
|
@ -309,7 +309,7 @@ Tensor as_strided(const Tensor& self, IntList size, IntList stride, optional<int
|
|||
return result;
|
||||
}
|
||||
|
||||
Tensor &as_strided_(Tensor& self, IntList size, IntList stride, optional<int64_t> storage_offset_) {
|
||||
Tensor &as_strided_(Tensor& self, IntArrayRef size, IntArrayRef stride, optional<int64_t> storage_offset_) {
|
||||
auto storage_offset = storage_offset_.value_or(self.storage_offset());
|
||||
setStrided(self, size, stride, storage_offset);
|
||||
return self;
|
||||
|
|
@ -364,7 +364,7 @@ Tensor narrow(const Tensor& self, int64_t dim, int64_t start, int64_t length) {
|
|||
return at::slice(self, dim, start, start + length, 1);
|
||||
}
|
||||
|
||||
Tensor permute(const Tensor& self, IntList dims) {
|
||||
Tensor permute(const Tensor& self, IntArrayRef dims) {
|
||||
auto nDims = self.dim();
|
||||
AT_CHECK(dims.size() == (size_t)nDims,
|
||||
"number of dims don't match in permute");
|
||||
|
|
@ -384,7 +384,7 @@ Tensor permute(const Tensor& self, IntList dims) {
|
|||
return self.as_strided(newSizes, newStrides);
|
||||
}
|
||||
|
||||
Tensor repeat(const Tensor& self, IntList repeats) {
|
||||
Tensor repeat(const Tensor& self, IntArrayRef repeats) {
|
||||
AT_CHECK(repeats.size() >= (size_t)self.dim(),
|
||||
"Number of dimensions of repeat dims can not be smaller than number of dimensions of tensor");
|
||||
|
||||
|
|
@ -414,7 +414,7 @@ Tensor repeat(const Tensor& self, IntList repeats) {
|
|||
return result;
|
||||
}
|
||||
|
||||
Tensor reshape(const Tensor& self, IntList proposed_shape) {
|
||||
Tensor reshape(const Tensor& self, IntArrayRef proposed_shape) {
|
||||
if (self.is_sparse()) {
|
||||
AT_ERROR("reshape is not implemented for sparse tensors");
|
||||
}
|
||||
|
|
@ -503,7 +503,7 @@ std::vector<Tensor> split(const Tensor& self, int64_t split_size, int64_t dim) {
|
|||
return splits;
|
||||
}
|
||||
|
||||
std::vector<Tensor> split_with_sizes(const Tensor& self, IntList split_sizes, int64_t dim) {
|
||||
std::vector<Tensor> split_with_sizes(const Tensor& self, IntArrayRef split_sizes, int64_t dim) {
|
||||
AT_CHECK(self.dim() != 0, "split expects at least a 1-dimensional tensor");
|
||||
int64_t dim_size = self.size(dim);
|
||||
int64_t num_splits = split_sizes.size();
|
||||
|
|
@ -722,7 +722,7 @@ Tensor & squeeze_(Tensor& self, int64_t dim) {
|
|||
//
|
||||
// This is a hack because in-place operations on tensors treated like views
|
||||
// can be much more expensive than the same operations on non-view tensors.
|
||||
Tensor _unsafe_view(const Tensor& self, IntList size) {
|
||||
Tensor _unsafe_view(const Tensor& self, IntArrayRef size) {
|
||||
return self.view(size);
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -44,7 +44,7 @@ void inline flip_cpu_kernel(
|
|||
}
|
||||
}
|
||||
|
||||
Tensor flip_cpu(const Tensor& self, IntList dims) {
|
||||
Tensor flip_cpu(const Tensor& self, IntArrayRef dims) {
|
||||
auto in_tensor = self;
|
||||
const int64_t total_dims = in_tensor.dim();
|
||||
auto flip_dims_b = at::dim_list_to_bitset(dims, total_dims);
|
||||
|
|
@ -73,7 +73,7 @@ Tensor flip_cpu(const Tensor& self, IntList dims) {
|
|||
return out_tensor;
|
||||
}
|
||||
|
||||
Tensor roll_cpu(const Tensor& self, IntList shifts, IntList dims) {
|
||||
Tensor roll_cpu(const Tensor& self, IntArrayRef shifts, IntArrayRef dims) {
|
||||
if (dims.size() != 1 || shifts.size() != 1) {
|
||||
return roll_common(self, shifts, dims);
|
||||
}
|
||||
|
|
@ -103,7 +103,7 @@ Tensor roll_cpu(const Tensor& self, IntList shifts, IntList dims) {
|
|||
return at::stack(vec, dim);
|
||||
}
|
||||
|
||||
Tensor rot90(const Tensor& self, int64_t k, IntList dims) {
|
||||
Tensor rot90(const Tensor& self, int64_t k, IntArrayRef dims) {
|
||||
const int64_t total_dims = self.dim(), total_rot_dims = dims.size();
|
||||
|
||||
AT_CHECK(total_rot_dims == 2,
|
||||
|
|
|
|||
|
|
@ -9,7 +9,7 @@
|
|||
namespace at {
|
||||
namespace native {
|
||||
|
||||
static inline void flip_check_errors(int64_t total_dims, int64_t flip_dims_size, IntList dims) {
|
||||
static inline void flip_check_errors(int64_t total_dims, int64_t flip_dims_size, IntArrayRef dims) {
|
||||
// check if number of axis in dim is valid
|
||||
AT_CHECK(flip_dims_size > 0 && flip_dims_size <= total_dims,
|
||||
"flip dims size out of range, got flip dims size=", flip_dims_size);
|
||||
|
|
@ -33,7 +33,7 @@ static inline void flip_check_errors(int64_t total_dims, int64_t flip_dims_size,
|
|||
", but unique flip dims size=", flip_dims_v.size());
|
||||
}
|
||||
|
||||
static inline Tensor roll_common(const Tensor& self, IntList shifts, IntList dims) {
|
||||
static inline Tensor roll_common(const Tensor& self, IntArrayRef shifts, IntArrayRef dims) {
|
||||
AT_CHECK(shifts.size() > 0, "`shifts` required");
|
||||
if (dims.size() == 0 && shifts.size() == 1) {
|
||||
auto flattened = self.contiguous().view(self.numel());
|
||||
|
|
|
|||
|
|
@ -14,7 +14,7 @@ using namespace vec256;
|
|||
|
||||
struct Indexer {
|
||||
Indexer(int64_t num_indexers, char** indexers, const int64_t* indexer_strides,
|
||||
IntList original_sizes, IntList original_strides)
|
||||
IntArrayRef original_sizes, IntArrayRef original_strides)
|
||||
: num_indexers(num_indexers)
|
||||
, indexers(indexers)
|
||||
, indexer_strides(indexer_strides)
|
||||
|
|
@ -58,7 +58,7 @@ static bool is_constant_index(int ntensor, const int64_t* strides) {
|
|||
}
|
||||
|
||||
template <typename scalar_t, typename func_t>
|
||||
void cpu_index_kernel(TensorIterator& iter, IntList index_size, IntList index_stride,
|
||||
void cpu_index_kernel(TensorIterator& iter, IntArrayRef index_size, IntArrayRef index_stride,
|
||||
const func_t& f, bool serial_execution=false)
|
||||
{
|
||||
auto loop = [&](int ntensor, char** data, const int64_t* strides, int64_t n) {
|
||||
|
|
@ -91,7 +91,7 @@ void cpu_index_kernel(TensorIterator& iter, IntList index_size, IntList index_st
|
|||
}
|
||||
}
|
||||
|
||||
void index_kernel(TensorIterator& iter, IntList index_size, IntList index_stride) {
|
||||
void index_kernel(TensorIterator& iter, IntArrayRef index_size, IntArrayRef index_stride) {
|
||||
AT_DISPATCH_ALL_TYPES(iter.type(0), "index", [&] {
|
||||
cpu_index_kernel<scalar_t>(iter, index_size, index_stride, [](char* dst, char* src, int64_t offset) {
|
||||
*(scalar_t*)dst = *(scalar_t*)(src + offset);
|
||||
|
|
@ -99,7 +99,7 @@ void index_kernel(TensorIterator& iter, IntList index_size, IntList index_stride
|
|||
});
|
||||
}
|
||||
|
||||
void index_put_kernel(TensorIterator& iter, IntList index_size, IntList index_stride, bool accumulate) {
|
||||
void index_put_kernel(TensorIterator& iter, IntArrayRef index_size, IntArrayRef index_stride, bool accumulate) {
|
||||
// NOTE: duplicate indices are only supported if accumulate is true.
|
||||
AT_DISPATCH_ALL_TYPES(iter.type(0), "index_put", [&] {
|
||||
if (accumulate) {
|
||||
|
|
|
|||
|
|
@ -207,7 +207,7 @@ namespace {
|
|||
void adaptive_avg_pool2d_out_cuda_template(
|
||||
Tensor& output,
|
||||
const Tensor& input,
|
||||
IntList output_size)
|
||||
IntArrayRef output_size)
|
||||
{
|
||||
TensorArg input_arg{ input, "input", 1 },
|
||||
output_arg{ output, "output", 2 };
|
||||
|
|
@ -386,7 +386,7 @@ namespace {
|
|||
Tensor& adaptive_avg_pool2d_out_cuda(
|
||||
Tensor& output,
|
||||
const Tensor& input,
|
||||
IntList output_size)
|
||||
IntArrayRef output_size)
|
||||
{
|
||||
adaptive_avg_pool2d_out_cuda_template(
|
||||
output, input, output_size);
|
||||
|
|
@ -395,7 +395,7 @@ namespace {
|
|||
|
||||
Tensor adaptive_avg_pool2d_cuda(
|
||||
at::Tensor const& input,
|
||||
IntList output_size)
|
||||
IntArrayRef output_size)
|
||||
{
|
||||
auto output = at::empty({0}, input.options());
|
||||
adaptive_avg_pool2d_out_cuda_template(
|
||||
|
|
|
|||
|
|
@ -34,7 +34,7 @@ struct CuFFTParams
|
|||
// would not be a POD anymore.
|
||||
static inline void setCuFFTParams(CuFFTParams* params,
|
||||
const Tensor& input, int64_t signal_ndim, bool complex_input,
|
||||
bool complex_output, IntList checked_signal_sizes, bool onesided) {
|
||||
bool complex_output, IntArrayRef checked_signal_sizes, bool onesided) {
|
||||
|
||||
memset(params, 0, sizeof(CuFFTParams));
|
||||
params->scalar_type_ = input.type().scalarType();
|
||||
|
|
@ -83,8 +83,8 @@ public:
|
|||
CuFFTConfig& operator=(CuFFTConfig const&) = delete;
|
||||
|
||||
explicit CuFFTConfig(Tensor& input, int64_t signal_ndim, bool complex_input,
|
||||
bool complex_output, IntList checked_signal_sizes, bool onesided,
|
||||
IntList output_sizes) {
|
||||
bool complex_output, IntArrayRef checked_signal_sizes, bool onesided,
|
||||
IntArrayRef output_sizes) {
|
||||
|
||||
// signal sizes
|
||||
#ifdef __HIP_PLATFORM_HCC__
|
||||
|
|
|
|||
|
|
@ -126,8 +126,8 @@ void fractional_max_pool2d_out_cuda_template(
|
|||
Tensor & output,
|
||||
Tensor& indices,
|
||||
const Tensor& input,
|
||||
IntList pool_size,
|
||||
IntList output_size,
|
||||
IntArrayRef pool_size,
|
||||
IntArrayRef output_size,
|
||||
const Tensor& randomSamples) {
|
||||
int planeDim = 0;
|
||||
int dimh = 1;
|
||||
|
|
@ -217,8 +217,8 @@ void fractional_max_pool2d_backward_out_cuda_template(
|
|||
Tensor& gradInput,
|
||||
const Tensor& gradOutput,
|
||||
const Tensor& input,
|
||||
IntList pool_size /* unused */,
|
||||
IntList output_size,
|
||||
IntArrayRef pool_size /* unused */,
|
||||
IntArrayRef output_size,
|
||||
const Tensor& indices)
|
||||
{
|
||||
int dimh = 1;
|
||||
|
|
@ -288,8 +288,8 @@ std::tuple<Tensor&, Tensor&> fractional_max_pool2d_out_cuda(
|
|||
at::Tensor& output,
|
||||
at::Tensor& indices,
|
||||
const at::Tensor& input,
|
||||
IntList pool_size,
|
||||
IntList output_size,
|
||||
IntArrayRef pool_size,
|
||||
IntArrayRef output_size,
|
||||
const at::Tensor& randomSamples)
|
||||
{
|
||||
fractional_max_pool2d_out_cuda_template(
|
||||
|
|
@ -304,8 +304,8 @@ std::tuple<Tensor&, Tensor&> fractional_max_pool2d_out_cuda(
|
|||
|
||||
std::tuple<Tensor, Tensor> fractional_max_pool2d_cuda(
|
||||
const at::Tensor& input,
|
||||
IntList pool_size,
|
||||
IntList output_size,
|
||||
IntArrayRef pool_size,
|
||||
IntArrayRef output_size,
|
||||
const at::Tensor& randomSamples)
|
||||
{
|
||||
Tensor output = at::empty({0}, input.options());
|
||||
|
|
@ -324,8 +324,8 @@ Tensor& fractional_max_pool2d_backward_out_cuda(
|
|||
at::Tensor& gradInput,
|
||||
const at::Tensor& gradOutput_,
|
||||
const at::Tensor& input,
|
||||
IntList pool_size,
|
||||
IntList output_size,
|
||||
IntArrayRef pool_size,
|
||||
IntArrayRef output_size,
|
||||
const at::Tensor& indices)
|
||||
{
|
||||
fractional_max_pool2d_backward_out_cuda_template(
|
||||
|
|
@ -341,8 +341,8 @@ Tensor& fractional_max_pool2d_backward_out_cuda(
|
|||
Tensor fractional_max_pool2d_backward_cuda(
|
||||
const at::Tensor& gradOutput_,
|
||||
const at::Tensor& input,
|
||||
IntList pool_size,
|
||||
IntList output_size,
|
||||
IntArrayRef pool_size,
|
||||
IntArrayRef output_size,
|
||||
const at::Tensor& indices)
|
||||
{
|
||||
Tensor gradInput = at::empty({0}, input.options());
|
||||
|
|
|
|||
|
|
@ -146,8 +146,8 @@ void fractional_max_pool3d_out_cuda_template(
|
|||
Tensor& output,
|
||||
Tensor& indices,
|
||||
const Tensor& input,
|
||||
IntList pool_size,
|
||||
IntList output_size,
|
||||
IntArrayRef pool_size,
|
||||
IntArrayRef output_size,
|
||||
const Tensor& randomSamples) {
|
||||
int64_t planeDim = 0;
|
||||
int64_t dimt = 1;
|
||||
|
|
@ -253,8 +253,8 @@ void fractional_max_pool3d_backward_out_cuda_template(
|
|||
Tensor& gradInput,
|
||||
const Tensor& gradOutput,
|
||||
const Tensor& input,
|
||||
IntList pool_size /* unused */,
|
||||
IntList output_size,
|
||||
IntArrayRef pool_size /* unused */,
|
||||
IntArrayRef output_size,
|
||||
const Tensor& indices) {
|
||||
int64_t dimt = 1;
|
||||
int64_t dimh = 2;
|
||||
|
|
@ -343,8 +343,8 @@ std::tuple<Tensor&, Tensor&> fractional_max_pool3d_out_cuda(
|
|||
at::Tensor& output,
|
||||
at::Tensor& indices,
|
||||
const at::Tensor& input,
|
||||
IntList pool_size,
|
||||
IntList output_size,
|
||||
IntArrayRef pool_size,
|
||||
IntArrayRef output_size,
|
||||
const at::Tensor& randomSamples) {
|
||||
fractional_max_pool3d_out_cuda_template(
|
||||
output,
|
||||
|
|
@ -359,8 +359,8 @@ std::tuple<Tensor&, Tensor&> fractional_max_pool3d_out_cuda(
|
|||
|
||||
std::tuple<Tensor, Tensor> fractional_max_pool3d_cuda(
|
||||
const at::Tensor& input,
|
||||
IntList pool_size,
|
||||
IntList output_size,
|
||||
IntArrayRef pool_size,
|
||||
IntArrayRef output_size,
|
||||
const at::Tensor& randomSamples) {
|
||||
Tensor output = at::empty({0}, input.options());
|
||||
Tensor indices = at::empty({0}, input.options().dtype(kLong));
|
||||
|
|
@ -379,8 +379,8 @@ Tensor& fractional_max_pool3d_backward_out_cuda(
|
|||
at::Tensor& gradInput,
|
||||
const at::Tensor& gradOutput_,
|
||||
const at::Tensor& input,
|
||||
IntList pool_size,
|
||||
IntList output_size,
|
||||
IntArrayRef pool_size,
|
||||
IntArrayRef output_size,
|
||||
const at::Tensor& indices) {
|
||||
fractional_max_pool3d_backward_out_cuda_template(
|
||||
gradInput,
|
||||
|
|
@ -396,8 +396,8 @@ Tensor& fractional_max_pool3d_backward_out_cuda(
|
|||
Tensor fractional_max_pool3d_backward_cuda(
|
||||
const at::Tensor& gradOutput,
|
||||
const at::Tensor& input,
|
||||
IntList pool_size,
|
||||
IntList output_size,
|
||||
IntArrayRef pool_size,
|
||||
IntArrayRef output_size,
|
||||
const at::Tensor& indices) {
|
||||
Tensor gradInput = at::empty({0}, input.options());
|
||||
fractional_max_pool3d_backward_out_cuda_template(
|
||||
|
|
|
|||
|
|
@ -19,7 +19,7 @@ static OffsetCalculator<N> index_make_offset_calculator(const TensorIterator& it
|
|||
}
|
||||
|
||||
template <typename func_t>
|
||||
void gpu_index_kernel(TensorIterator& iter, IntList index_size, IntList index_stride, const func_t& f) {
|
||||
void gpu_index_kernel(TensorIterator& iter, IntArrayRef index_size, IntArrayRef index_stride, const func_t& f) {
|
||||
int num_indices = index_size.size();
|
||||
AT_ASSERT(num_indices == index_stride.size());
|
||||
AT_ASSERT(num_indices == iter.ntensors() - 2);
|
||||
|
|
@ -67,20 +67,20 @@ template <int N> struct alignas(N) OpaqueType { char data[N]; };
|
|||
|
||||
|
||||
template <typename scalar_t>
|
||||
void index_kernel_impl(TensorIterator& iter, IntList index_size, IntList index_stride) {
|
||||
void index_kernel_impl(TensorIterator& iter, IntArrayRef index_size, IntArrayRef index_stride) {
|
||||
gpu_index_kernel(iter, index_size, index_stride, []C10_DEVICE(char* out_data, char* in_data, int64_t offset) {
|
||||
*(scalar_t*)out_data = *(scalar_t*)(in_data + offset);
|
||||
});
|
||||
}
|
||||
|
||||
template <typename scalar_t>
|
||||
void index_put_kernel_impl(TensorIterator& iter, IntList index_size, IntList index_stride) {
|
||||
void index_put_kernel_impl(TensorIterator& iter, IntArrayRef index_size, IntArrayRef index_stride) {
|
||||
gpu_index_kernel(iter, index_size, index_stride, []C10_DEVICE(char* out_data, char* in_data, int64_t offset) {
|
||||
*(scalar_t*)(out_data + offset) = *(scalar_t*)in_data;
|
||||
});
|
||||
}
|
||||
|
||||
static void index_kernel(TensorIterator& iter, IntList index_size, IntList index_stride) {
|
||||
static void index_kernel(TensorIterator& iter, IntArrayRef index_size, IntArrayRef index_stride) {
|
||||
AT_DISPATCH_ALL_TYPES_AND_HALF(iter.type(), "index", [&] {
|
||||
using dtype = OpaqueType<sizeof(scalar_t)>;
|
||||
index_kernel_impl<dtype>(iter, index_size, index_stride);
|
||||
|
|
@ -88,7 +88,7 @@ static void index_kernel(TensorIterator& iter, IntList index_size, IntList index
|
|||
}
|
||||
|
||||
|
||||
static void index_put_kernel(TensorIterator& iter, IntList index_size, IntList index_stride, bool accumulate) {
|
||||
static void index_put_kernel(TensorIterator& iter, IntArrayRef index_size, IntArrayRef index_stride, bool accumulate) {
|
||||
AT_ASSERTM(!accumulate, "index_put does not support accumulate=true");
|
||||
AT_DISPATCH_ALL_TYPES_AND_HALF(iter.type(), "index_put", [&] {
|
||||
using dtype = OpaqueType<sizeof(scalar_t)>;
|
||||
|
|
|
|||
|
|
@ -165,7 +165,7 @@ ctc_loss_log_alpha_gpu_kernel(scalar_t* __restrict__ log_alpha_data,
|
|||
// We return log_alpha (currently, might change to (log_alpha+log_beta) to be passed to the
|
||||
// backward. The dispatch function will only return the loss.
|
||||
template<typename scalar_t, ScalarType target_scalar_type>
|
||||
std::tuple<Tensor, Tensor> ctc_loss_gpu_template(const Tensor& log_probs, const Tensor& targets_, IntList input_lengths, IntList target_lengths, int64_t BLANK) {
|
||||
std::tuple<Tensor, Tensor> ctc_loss_gpu_template(const Tensor& log_probs, const Tensor& targets_, IntArrayRef input_lengths, IntArrayRef target_lengths, int64_t BLANK) {
|
||||
// log_probs: input_len x batch_size x num_labels
|
||||
// targets [int64]: batch_size x target_length OR sum(target_lengths)
|
||||
CheckedFrom c = "ctc_loss_gpu";
|
||||
|
|
@ -479,7 +479,7 @@ ctc_loss_backward_collect_gpu_kernel(scalar_t* __restrict__ gradient_data,
|
|||
// The backward. It essentially computes eq 16 by using the above kernels.
|
||||
// We don't do a lot of checking as we envision this to be called only when backpropagating through a (well-checked) forward.
|
||||
template<typename scalar_t, ScalarType target_scalar_type>
|
||||
Tensor ctc_loss_backward_gpu_template(const Tensor& grad_out, const Tensor& log_probs, const Tensor& targets_, IntList input_lengths, IntList target_lengths,
|
||||
Tensor ctc_loss_backward_gpu_template(const Tensor& grad_out, const Tensor& log_probs, const Tensor& targets_, IntArrayRef input_lengths, IntArrayRef target_lengths,
|
||||
const Tensor& neg_log_likelihood, const Tensor& log_alpha, int64_t BLANK) {
|
||||
constexpr scalar_t neginf = -INFINITY;
|
||||
using target_t = typename std::conditional<target_scalar_type == kInt, int, int64_t>::type;
|
||||
|
|
@ -623,7 +623,7 @@ Tensor ctc_loss_backward_gpu_template(const Tensor& grad_out, const Tensor& log_
|
|||
|
||||
} // namespace
|
||||
|
||||
std::tuple<Tensor, Tensor> ctc_loss_gpu(const Tensor& log_probs, const Tensor& targets, IntList input_lengths, IntList target_lengths, int64_t BLANK) {
|
||||
std::tuple<Tensor, Tensor> ctc_loss_gpu(const Tensor& log_probs, const Tensor& targets, IntArrayRef input_lengths, IntArrayRef target_lengths, int64_t BLANK) {
|
||||
return AT_DISPATCH_FLOATING_TYPES(log_probs.type(), "ctc_loss", [&] {
|
||||
if (targets.type().scalarType() == kLong) {
|
||||
return ctc_loss_gpu_template<scalar_t, kLong>(log_probs, targets, input_lengths, target_lengths, BLANK);
|
||||
|
|
@ -633,7 +633,7 @@ std::tuple<Tensor, Tensor> ctc_loss_gpu(const Tensor& log_probs, const Tensor& t
|
|||
});
|
||||
}
|
||||
|
||||
Tensor ctc_loss_backward_gpu(const Tensor& grad, const Tensor& log_probs, const Tensor& targets, IntList input_lengths, IntList target_lengths,
|
||||
Tensor ctc_loss_backward_gpu(const Tensor& grad, const Tensor& log_probs, const Tensor& targets, IntArrayRef input_lengths, IntArrayRef target_lengths,
|
||||
const Tensor& neg_log_likelihood, const Tensor& log_alpha, int64_t BLANK) {
|
||||
return AT_DISPATCH_FLOATING_TYPES(log_probs.type(), "ctc_loss_backward", [&] {
|
||||
if (targets.type().scalarType() == kLong) {
|
||||
|
|
|
|||
|
|
@ -147,7 +147,7 @@ __global__ void reflection_pad2d_backward_out_kernel(
|
|||
}
|
||||
|
||||
void reflection_pad1d_out_template(
|
||||
Tensor &output, const Tensor &input_, IntList padding) {
|
||||
Tensor &output, const Tensor &input_, IntArrayRef padding) {
|
||||
AT_CHECK(canUse32BitIndexMath(input_),
|
||||
"input tensor must fit into 32-bit index math");
|
||||
|
||||
|
|
@ -204,7 +204,7 @@ void reflection_pad1d_out_template(
|
|||
|
||||
void reflection_pad1d_backward_out_template(
|
||||
Tensor & grad_input, const Tensor & grad_output_,
|
||||
const Tensor & input, IntList padding) {
|
||||
const Tensor & input, IntArrayRef padding) {
|
||||
|
||||
AT_CHECK(canUse32BitIndexMath(input),
|
||||
"input tensor must fit into 32-bit index math");
|
||||
|
|
@ -251,7 +251,7 @@ void reflection_pad1d_backward_out_template(
|
|||
}
|
||||
|
||||
void reflection_pad2d_out_template(
|
||||
Tensor &output, const Tensor &input_, IntList padding) {
|
||||
Tensor &output, const Tensor &input_, IntArrayRef padding) {
|
||||
AT_CHECK(canUse32BitIndexMath(input_),
|
||||
"input tensor must fit into 32-bit index math");
|
||||
|
||||
|
|
@ -325,7 +325,7 @@ void reflection_pad2d_out_template(
|
|||
|
||||
void reflection_pad2d_backward_out_template(
|
||||
Tensor &grad_input, const Tensor &grad_output_,
|
||||
const Tensor &input, IntList padding) {
|
||||
const Tensor &input, IntArrayRef padding) {
|
||||
AT_CHECK(canUse32BitIndexMath(input),
|
||||
"input tensor must fit into 32-bit index math");
|
||||
AT_CHECK(canUse32BitIndexMath(grad_output_),
|
||||
|
|
@ -384,12 +384,12 @@ void reflection_pad2d_backward_out_template(
|
|||
|
||||
|
||||
Tensor& reflection_pad1d_out_cuda(
|
||||
Tensor& output, const Tensor& input, IntList padding) {
|
||||
Tensor& output, const Tensor& input, IntArrayRef padding) {
|
||||
reflection_pad1d_out_template(output, input, padding);
|
||||
return output;
|
||||
}
|
||||
|
||||
Tensor reflection_pad1d_cuda(const Tensor& input, IntList padding) {
|
||||
Tensor reflection_pad1d_cuda(const Tensor& input, IntArrayRef padding) {
|
||||
auto output = at::empty({0}, input.options());
|
||||
reflection_pad1d_out_template(output, input, padding);
|
||||
return output;
|
||||
|
|
@ -398,7 +398,7 @@ Tensor reflection_pad1d_cuda(const Tensor& input, IntList padding) {
|
|||
Tensor& reflection_pad1d_backward_out_cuda(
|
||||
Tensor& grad_input, const Tensor& grad_output,
|
||||
const Tensor& input,
|
||||
IntList padding) {
|
||||
IntArrayRef padding) {
|
||||
grad_input.resize_as_(input);
|
||||
grad_input.zero_();
|
||||
reflection_pad1d_backward_out_template(
|
||||
|
|
@ -409,7 +409,7 @@ Tensor& reflection_pad1d_backward_out_cuda(
|
|||
Tensor reflection_pad1d_backward_cuda(
|
||||
const Tensor& grad_output,
|
||||
const Tensor& input,
|
||||
IntList padding) {
|
||||
IntArrayRef padding) {
|
||||
auto grad_input = at::zeros_like(input);
|
||||
reflection_pad1d_backward_out_template(
|
||||
grad_input, grad_output, input, padding);
|
||||
|
|
@ -417,12 +417,12 @@ Tensor reflection_pad1d_backward_cuda(
|
|||
}
|
||||
|
||||
Tensor& reflection_pad2d_out_cuda(
|
||||
Tensor& output, const Tensor& input, IntList padding) {
|
||||
Tensor& output, const Tensor& input, IntArrayRef padding) {
|
||||
reflection_pad2d_out_template(output, input, padding);
|
||||
return output;
|
||||
}
|
||||
|
||||
Tensor reflection_pad2d_cuda(const Tensor& input, IntList padding) {
|
||||
Tensor reflection_pad2d_cuda(const Tensor& input, IntArrayRef padding) {
|
||||
auto output = at::empty({0}, input.options());
|
||||
reflection_pad2d_out_template(output, input, padding);
|
||||
return output;
|
||||
|
|
@ -431,7 +431,7 @@ Tensor reflection_pad2d_cuda(const Tensor& input, IntList padding) {
|
|||
Tensor& reflection_pad2d_backward_out_cuda(
|
||||
Tensor& grad_input, const Tensor& grad_output,
|
||||
const Tensor& input,
|
||||
IntList padding) {
|
||||
IntArrayRef padding) {
|
||||
grad_input.resize_as_(input);
|
||||
grad_input.zero_();
|
||||
reflection_pad2d_backward_out_template(
|
||||
|
|
@ -442,7 +442,7 @@ Tensor& reflection_pad2d_backward_out_cuda(
|
|||
Tensor reflection_pad2d_backward_cuda(
|
||||
const Tensor& grad_output,
|
||||
const Tensor& input,
|
||||
IntList padding) {
|
||||
IntArrayRef padding) {
|
||||
auto grad_input = at::zeros_like(input);
|
||||
reflection_pad2d_backward_out_template(
|
||||
grad_input, grad_output, input, padding);
|
||||
|
|
|
|||
|
|
@ -203,7 +203,7 @@ __global__ void replication_pad_backward_kernel(
|
|||
void replication_pad1d_out_cuda_template(
|
||||
Tensor& output,
|
||||
const Tensor& input,
|
||||
IntList paddingSize)
|
||||
IntArrayRef paddingSize)
|
||||
{
|
||||
AT_CHECK(at::cuda::detail::canUse32BitIndexMath(input),
|
||||
"input tensor must fit into 32-bit index math");
|
||||
|
|
@ -276,7 +276,7 @@ void replication_pad1d_backward_out_cuda_template(
|
|||
Tensor& gradInput,
|
||||
const Tensor& gradOutput,
|
||||
const Tensor& input,
|
||||
IntList paddingSize)
|
||||
IntArrayRef paddingSize)
|
||||
{
|
||||
|
||||
AT_CHECK(at::cuda::detail::canUse32BitIndexMath(input),
|
||||
|
|
@ -334,7 +334,7 @@ void replication_pad1d_backward_out_cuda_template(
|
|||
void replication_pad2d_out_cuda_template(
|
||||
Tensor& output,
|
||||
const Tensor& input,
|
||||
IntList paddingSize)
|
||||
IntArrayRef paddingSize)
|
||||
{
|
||||
AT_CHECK(at::cuda::detail::canUse32BitIndexMath(input),
|
||||
"input tensor must fit into 32-bit index math");
|
||||
|
|
@ -415,7 +415,7 @@ void replication_pad2d_backward_out_cuda_template(
|
|||
Tensor& gradInput,
|
||||
const Tensor& gradOutput,
|
||||
const Tensor& input,
|
||||
IntList paddingSize)
|
||||
IntArrayRef paddingSize)
|
||||
{
|
||||
|
||||
AT_CHECK(at::cuda::detail::canUse32BitIndexMath(input),
|
||||
|
|
@ -571,7 +571,7 @@ static inline void shapeAndGradOutputCheck3d(
|
|||
void replication_pad3d_out_cuda_template(
|
||||
Tensor& output,
|
||||
const Tensor& input,
|
||||
IntList paddingSize)
|
||||
IntArrayRef paddingSize)
|
||||
{
|
||||
AT_CHECK(paddingSize.size() == 6, "padding Size is expected to be 6");
|
||||
int pleft = paddingSize[0];
|
||||
|
|
@ -652,7 +652,7 @@ void replication_pad3d_backward_out_cuda_template(
|
|||
Tensor& gradInput,
|
||||
const Tensor& gradOutput,
|
||||
const Tensor& input,
|
||||
IntList paddingSize)
|
||||
IntArrayRef paddingSize)
|
||||
{
|
||||
AT_CHECK(paddingSize.size() == 6, "padding Size is expected to be 6");
|
||||
int pleft = paddingSize[0];
|
||||
|
|
@ -711,7 +711,7 @@ void replication_pad3d_backward_out_cuda_template(
|
|||
Tensor& replication_pad1d_out_cuda(
|
||||
Tensor& output,
|
||||
const Tensor& input,
|
||||
IntList paddingSize)
|
||||
IntArrayRef paddingSize)
|
||||
{
|
||||
replication_pad1d_out_cuda_template(
|
||||
output, input, paddingSize);
|
||||
|
|
@ -720,7 +720,7 @@ Tensor& replication_pad1d_out_cuda(
|
|||
|
||||
Tensor replication_pad1d_cuda(
|
||||
const Tensor& input,
|
||||
IntList paddingSize)
|
||||
IntArrayRef paddingSize)
|
||||
{
|
||||
auto output = at::empty({0}, input.options());
|
||||
replication_pad1d_out_cuda_template(
|
||||
|
|
@ -732,7 +732,7 @@ Tensor& replication_pad1d_backward_out_cuda(
|
|||
Tensor& gradInput,
|
||||
const Tensor& gradOutput,
|
||||
const Tensor& input,
|
||||
IntList paddingSize)
|
||||
IntArrayRef paddingSize)
|
||||
{
|
||||
replication_pad1d_backward_out_cuda_template(
|
||||
gradInput, gradOutput, input, paddingSize);
|
||||
|
|
@ -742,7 +742,7 @@ Tensor& replication_pad1d_backward_out_cuda(
|
|||
Tensor replication_pad1d_backward_cuda(
|
||||
const Tensor& gradOutput,
|
||||
const Tensor& input,
|
||||
IntList paddingSize)
|
||||
IntArrayRef paddingSize)
|
||||
{
|
||||
auto gradInput = at::zeros_like(input);
|
||||
replication_pad1d_backward_out_cuda_template(
|
||||
|
|
@ -753,7 +753,7 @@ Tensor replication_pad1d_backward_cuda(
|
|||
Tensor& replication_pad2d_out_cuda(
|
||||
Tensor& output,
|
||||
const Tensor& input,
|
||||
IntList paddingSize)
|
||||
IntArrayRef paddingSize)
|
||||
{
|
||||
replication_pad2d_out_cuda_template(
|
||||
output, input, paddingSize);
|
||||
|
|
@ -762,7 +762,7 @@ Tensor& replication_pad2d_out_cuda(
|
|||
|
||||
Tensor replication_pad2d_cuda(
|
||||
const Tensor& input,
|
||||
IntList paddingSize)
|
||||
IntArrayRef paddingSize)
|
||||
{
|
||||
auto output = at::empty({0}, input.options());
|
||||
replication_pad2d_out_cuda_template(
|
||||
|
|
@ -774,7 +774,7 @@ Tensor& replication_pad2d_backward_out_cuda(
|
|||
Tensor& gradInput,
|
||||
const Tensor& gradOutput,
|
||||
const Tensor& input,
|
||||
IntList paddingSize)
|
||||
IntArrayRef paddingSize)
|
||||
{
|
||||
replication_pad2d_backward_out_cuda_template(
|
||||
gradInput, gradOutput, input, paddingSize);
|
||||
|
|
@ -784,7 +784,7 @@ Tensor& replication_pad2d_backward_out_cuda(
|
|||
Tensor replication_pad2d_backward_cuda(
|
||||
const Tensor& gradOutput,
|
||||
const Tensor& input,
|
||||
IntList paddingSize)
|
||||
IntArrayRef paddingSize)
|
||||
{
|
||||
auto gradInput = at::zeros_like(input);
|
||||
replication_pad2d_backward_out_cuda_template(
|
||||
|
|
@ -795,7 +795,7 @@ Tensor replication_pad2d_backward_cuda(
|
|||
Tensor& replication_pad3d_out_cuda(
|
||||
Tensor& output,
|
||||
const Tensor& input,
|
||||
IntList paddingSize)
|
||||
IntArrayRef paddingSize)
|
||||
{
|
||||
replication_pad3d_out_cuda_template(
|
||||
output, input, paddingSize);
|
||||
|
|
@ -804,7 +804,7 @@ Tensor& replication_pad3d_out_cuda(
|
|||
|
||||
Tensor replication_pad3d_cuda(
|
||||
const Tensor& input,
|
||||
IntList paddingSize)
|
||||
IntArrayRef paddingSize)
|
||||
{
|
||||
auto output = at::empty({0}, input.options());
|
||||
replication_pad3d_out_cuda_template(
|
||||
|
|
@ -816,7 +816,7 @@ Tensor& replication_pad3d_backward_out_cuda(
|
|||
Tensor& gradInput,
|
||||
const Tensor& gradOutput,
|
||||
const Tensor& input,
|
||||
IntList paddingSize)
|
||||
IntArrayRef paddingSize)
|
||||
{
|
||||
replication_pad3d_backward_out_cuda_template(
|
||||
gradInput, gradOutput, input, paddingSize);
|
||||
|
|
@ -826,7 +826,7 @@ Tensor& replication_pad3d_backward_out_cuda(
|
|||
Tensor replication_pad3d_backward_cuda(
|
||||
const Tensor& gradOutput,
|
||||
const Tensor& input,
|
||||
IntList paddingSize)
|
||||
IntArrayRef paddingSize)
|
||||
{
|
||||
auto gradInput = at::zeros_like(input);
|
||||
replication_pad3d_backward_out_cuda_template(
|
||||
|
|
|
|||
|
|
@ -5,7 +5,7 @@
|
|||
|
||||
namespace at { namespace native {
|
||||
|
||||
Tensor& resize_cuda_(Tensor& self, IntList size) {
|
||||
Tensor& resize_cuda_(Tensor& self, IntArrayRef size) {
|
||||
auto* self_ = self.unsafeGetTensorImpl();
|
||||
resize_impl_cuda_(self_, size, /*strides=*/c10::nullopt);
|
||||
self_->maybe_zero_dim(size.size() == 0);
|
||||
|
|
|
|||
|
|
@ -27,8 +27,8 @@ static inline void maybe_resize_storage_cuda(TensorImpl* self, int64_t new_size)
|
|||
|
||||
inline TensorImpl* resize_impl_cuda_(
|
||||
TensorImpl* self,
|
||||
IntList size,
|
||||
c10::optional<IntList> stride,
|
||||
IntArrayRef size,
|
||||
c10::optional<IntArrayRef> stride,
|
||||
bool device_guard = true) {
|
||||
if (self->sizes() == size && (!stride || self->strides() == stride)) {
|
||||
return self;
|
||||
|
|
|
|||
|
|
@ -173,8 +173,8 @@ static void _fft_fill_with_conjugate_symmetry_(Tensor& input,
|
|||
static inline Tensor _run_cufft(
|
||||
const CuFFTConfig &config, Tensor& input, int64_t signal_ndim,
|
||||
bool complex_input, bool complex_output, bool inverse,
|
||||
IntList checked_signal_sizes, bool normalized, bool onesided,
|
||||
IntList output_sizes, bool input_was_cloned
|
||||
IntArrayRef checked_signal_sizes, bool normalized, bool onesided,
|
||||
IntArrayRef output_sizes, bool input_was_cloned
|
||||
) {
|
||||
if (config.should_clone_input() && !input_was_cloned) {
|
||||
input = input.clone();
|
||||
|
|
@ -291,8 +291,8 @@ void cufft_clear_plan_cache_impl() {
|
|||
// Currently not utilizing multi GPUs so this can be potentially sped up.
|
||||
Tensor _fft_cufft(const Tensor& self, int64_t signal_ndim,
|
||||
bool complex_input, bool complex_output, bool inverse,
|
||||
IntList checked_signal_sizes, bool normalized, bool onesided,
|
||||
IntList output_sizes) {
|
||||
IntArrayRef checked_signal_sizes, bool normalized, bool onesided,
|
||||
IntArrayRef output_sizes) {
|
||||
Tensor input = self;
|
||||
bool input_was_cloned = false;
|
||||
|
||||
|
|
|
|||
|
|
@ -43,7 +43,7 @@ Tensor& eye_out_cuda(Tensor& result, int64_t n, int64_t m) {
|
|||
return result;
|
||||
}
|
||||
|
||||
Tensor empty_cuda(IntList size, const TensorOptions& options) {
|
||||
Tensor empty_cuda(IntArrayRef size, const TensorOptions& options) {
|
||||
AT_ASSERT(options.backend() == at::Backend::CUDA);
|
||||
AT_ASSERT(!options.is_variable()); // is_variable should have been 'unpacked' // TODO: remove this when Variable and Tensor are merged
|
||||
|
||||
|
|
@ -65,7 +65,7 @@ Tensor empty_cuda(IntList size, const TensorOptions& options) {
|
|||
return tensor;
|
||||
}
|
||||
|
||||
Tensor empty_strided_cuda(IntList size, IntList stride, const TensorOptions& options) {
|
||||
Tensor empty_strided_cuda(IntArrayRef size, IntArrayRef stride, const TensorOptions& options) {
|
||||
auto t = at::native::empty_cuda({0}, options);
|
||||
at::native::resize_impl_cuda_(t.unsafeGetTensorImpl(), size, stride);
|
||||
return t;
|
||||
|
|
|
|||
|
|
@ -68,7 +68,7 @@ void flip_cuda_kernel(scalar_t* in_tensor, scalar_t* out_tensor, int64_t N, int6
|
|||
}
|
||||
|
||||
// Flip tensor given a list of dims
|
||||
Tensor flip_cuda(const Tensor& self, IntList dims) {
|
||||
Tensor flip_cuda(const Tensor& self, IntArrayRef dims) {
|
||||
auto in_tensor = self;
|
||||
const int64_t flip_dims_size = dims.size(), total_dims = in_tensor.dim(), N = in_tensor.numel();
|
||||
flip_check_errors(total_dims, flip_dims_size, dims);
|
||||
|
|
@ -150,7 +150,7 @@ void roll_cuda_kernel(scalar_t* in_tensor, scalar_t* out_tensor, int64_t N,
|
|||
}
|
||||
|
||||
// Roll a tensor along a dimension
|
||||
Tensor roll_cuda(const Tensor& self, IntList shifts, IntList dims) {
|
||||
Tensor roll_cuda(const Tensor& self, IntArrayRef shifts, IntArrayRef dims) {
|
||||
if (dims.size() != 1 || shifts.size() != 1) {
|
||||
return roll_common(self, shifts, dims);
|
||||
}
|
||||
|
|
|
|||
|
|
@ -12,21 +12,21 @@ namespace at { namespace native {
|
|||
|
||||
at::Tensor cudnn_convolution(
|
||||
const at::Tensor& input, const at::Tensor& weight, const at::Tensor& bias /* optional */,
|
||||
IntList padding, IntList stride, IntList dilation,
|
||||
IntArrayRef padding, IntArrayRef stride, IntArrayRef dilation,
|
||||
int64_t groups, bool benchmark, bool deterministic) {
|
||||
AT_ERROR("cudnn_convolution: ATen not compiled with cuDNN support");
|
||||
}
|
||||
|
||||
at::Tensor cudnn_convolution_backward_input(
|
||||
IntList input_size, const at::Tensor& grad_output, const at::Tensor& weight,
|
||||
IntList padding, IntList stride, IntList dilation, int64_t groups,
|
||||
IntArrayRef input_size, const at::Tensor& grad_output, const at::Tensor& weight,
|
||||
IntArrayRef padding, IntArrayRef stride, IntArrayRef dilation, int64_t groups,
|
||||
bool benchmark, bool deterministic) {
|
||||
AT_ERROR("cudnn_convolution_backward_input: ATen not compiled with cuDNN support");
|
||||
}
|
||||
|
||||
at::Tensor cudnn_convolution_backward_weight(
|
||||
IntList weight_size, const at::Tensor& grad_output, const at::Tensor& input,
|
||||
IntList padding, IntList stride, IntList dilation, int64_t groups,
|
||||
IntArrayRef weight_size, const at::Tensor& grad_output, const at::Tensor& input,
|
||||
IntArrayRef padding, IntArrayRef stride, IntArrayRef dilation, int64_t groups,
|
||||
bool benchmark, bool deterministic) {
|
||||
AT_ERROR("cudnn_convolution_backward_weight: ATen not compiled with cuDNN support");
|
||||
}
|
||||
|
|
@ -38,35 +38,35 @@ at::Tensor cudnn_convolution_backward_bias(
|
|||
|
||||
std::tuple<at::Tensor,at::Tensor,at::Tensor> cudnn_convolution_backward(
|
||||
const at::Tensor& input, const at::Tensor& grad_output, const at::Tensor& weight,
|
||||
IntList padding, IntList stride, IntList dilation, int64_t groups,
|
||||
IntArrayRef padding, IntArrayRef stride, IntArrayRef dilation, int64_t groups,
|
||||
bool benchmark, bool deterministic, std::array<bool,3> output_mask) {
|
||||
AT_ERROR("cudnn_convolution_backward: ATen not compiled with cuDNN support");
|
||||
}
|
||||
|
||||
at::Tensor cudnn_convolution_transpose(
|
||||
const at::Tensor& input, const at::Tensor& weight, const at::Tensor& bias /* optional */,
|
||||
IntList padding, IntList output_padding, IntList stride, IntList dilation,
|
||||
IntArrayRef padding, IntArrayRef output_padding, IntArrayRef stride, IntArrayRef dilation,
|
||||
int64_t groups, bool benchmark, bool deterministic) {
|
||||
AT_ERROR("cudnn_convolution_transpose: ATen not compiled with cuDNN support");
|
||||
}
|
||||
|
||||
at::Tensor cudnn_convolution_transpose_backward_input(
|
||||
const at::Tensor& grad_output, const at::Tensor& weight,
|
||||
IntList padding, IntList stride, IntList dilation,
|
||||
IntArrayRef padding, IntArrayRef stride, IntArrayRef dilation,
|
||||
int64_t groups, bool benchmark, bool deterministic) {
|
||||
AT_ERROR("cudnn_convolution_transpose_backward: ATen not compiled with cuDNN support");
|
||||
}
|
||||
|
||||
at::Tensor cudnn_convolution_transpose_backward_weight(
|
||||
IntList weight_size, const at::Tensor& grad_output, const at::Tensor& input,
|
||||
IntList padding, IntList stride, IntList dilation, int64_t groups,
|
||||
IntArrayRef weight_size, const at::Tensor& grad_output, const at::Tensor& input,
|
||||
IntArrayRef padding, IntArrayRef stride, IntArrayRef dilation, int64_t groups,
|
||||
bool benchmark, bool deterministic) {
|
||||
AT_ERROR("cudnn_convolution_transpose_backward_weight: ATen not compiled with cuDNN support");
|
||||
}
|
||||
|
||||
std::tuple<at::Tensor,at::Tensor,at::Tensor> cudnn_convolution_transpose_backward(
|
||||
const at::Tensor& input, const at::Tensor& grad_output, const at::Tensor& weight,
|
||||
IntList padding, IntList output_padding, IntList stride, IntList dilation, int64_t groups,
|
||||
IntArrayRef padding, IntArrayRef output_padding, IntArrayRef stride, IntArrayRef dilation, int64_t groups,
|
||||
bool benchmark, bool deterministic, std::array<bool,3> output_mask) {
|
||||
AT_ERROR("cudnn_convolution_transpose_backward: ATen not compiled with cuDNN support");
|
||||
}
|
||||
|
|
@ -138,8 +138,8 @@ constexpr int max_dim = 3;
|
|||
// takes an extra output_padding argument to resolve the ambiguity.
|
||||
|
||||
static std::vector<int64_t> conv_output_size(
|
||||
IntList input_size, IntList weight_size,
|
||||
IntList padding, IntList stride, IntList dilation, int64_t groups
|
||||
IntArrayRef input_size, IntArrayRef weight_size,
|
||||
IntArrayRef padding, IntArrayRef stride, IntArrayRef dilation, int64_t groups
|
||||
) {
|
||||
// ASSERT(input_size.size() > 2)
|
||||
// ASSERT(input_size.size() == weight_size.size())
|
||||
|
|
@ -156,8 +156,8 @@ static std::vector<int64_t> conv_output_size(
|
|||
}
|
||||
|
||||
std::vector<int64_t> conv_input_size(
|
||||
IntList output_size, IntList weight_size,
|
||||
IntList padding, IntList output_padding, IntList stride, IntList dilation, int64_t groups
|
||||
IntArrayRef output_size, IntArrayRef weight_size,
|
||||
IntArrayRef padding, IntArrayRef output_padding, IntArrayRef stride, IntArrayRef dilation, int64_t groups
|
||||
) {
|
||||
// ASSERT(output_size.size() > 2)
|
||||
// ASSERT(output_size.size() == weight_size.size())
|
||||
|
|
@ -174,8 +174,8 @@ std::vector<int64_t> conv_input_size(
|
|||
}
|
||||
|
||||
std::vector<int64_t> conv_weight_size(
|
||||
IntList input_size, IntList output_size,
|
||||
IntList padding, IntList output_padding, IntList stride, IntList dilation, int64_t groups
|
||||
IntArrayRef input_size, IntArrayRef output_size,
|
||||
IntArrayRef padding, IntArrayRef output_padding, IntArrayRef stride, IntArrayRef dilation, int64_t groups
|
||||
) {
|
||||
auto dim = input_size.size();
|
||||
std::vector<int64_t> weight_size(dim);
|
||||
|
|
@ -210,7 +210,7 @@ Tensor narrowGroup(const Tensor& t, int dim, int group_idx, int64_t groups) {
|
|||
// has a fairly good diagram explaining how it works.
|
||||
|
||||
// Used on pad, stride and dilation
|
||||
static void check_args(CheckedFrom c, IntList args, size_t expected_size, const char* arg_name)
|
||||
static void check_args(CheckedFrom c, IntArrayRef args, size_t expected_size, const char* arg_name)
|
||||
{
|
||||
AT_CHECK(args.size() <= expected_size,
|
||||
"Too many ", arg_name, " values (", args.size(), ") supplied, expecting ",
|
||||
|
|
@ -248,7 +248,7 @@ static void check_args(CheckedFrom c, IntList args, size_t expected_size, const
|
|||
static void convolution_shape_check(
|
||||
CheckedFrom c,
|
||||
const TensorGeometryArg& input, const TensorGeometryArg& weight, const TensorGeometryArg& output,
|
||||
IntList padding, IntList stride, IntList dilation, int64_t groups)
|
||||
IntArrayRef padding, IntArrayRef stride, IntArrayRef dilation, int64_t groups)
|
||||
{
|
||||
check_args(c, padding, input->dim() - 2, "padding");
|
||||
check_args(c, stride, padding.size(), "stride");
|
||||
|
|
@ -291,7 +291,7 @@ struct ConvolutionParams
|
|||
void setConvolutionParams(
|
||||
ConvolutionParams* params,
|
||||
const at::Tensor& input, const at::Tensor& weight,
|
||||
IntList padding, IntList stride, IntList dilation,
|
||||
IntArrayRef padding, IntArrayRef stride, IntArrayRef dilation,
|
||||
int64_t groups, bool deterministic) {
|
||||
|
||||
cudnnDataType_t dataType = getCudnnDataType(input);
|
||||
|
|
@ -822,7 +822,7 @@ void cudnn_convolution_add_bias_(CheckedFrom c, const TensorArg& output, const T
|
|||
//
|
||||
void raw_cudnn_convolution_forward_out(
|
||||
const Tensor& output, const Tensor& input, const Tensor& weight,
|
||||
IntList padding, IntList stride, IntList dilation, int64_t groups,
|
||||
IntArrayRef padding, IntArrayRef stride, IntArrayRef dilation, int64_t groups,
|
||||
bool benchmark, bool deterministic) {
|
||||
|
||||
auto dataType = getCudnnDataType(input);
|
||||
|
|
@ -862,7 +862,7 @@ void raw_cudnn_convolution_forward_out(
|
|||
Tensor cudnn_convolution_forward(
|
||||
CheckedFrom c,
|
||||
const TensorArg& input, const TensorArg& weight,
|
||||
IntList padding, IntList stride, IntList dilation, int64_t groups,
|
||||
IntArrayRef padding, IntArrayRef stride, IntArrayRef dilation, int64_t groups,
|
||||
bool benchmark, bool deterministic)
|
||||
{
|
||||
checkAllSameType(c, {input, weight});
|
||||
|
|
@ -889,7 +889,7 @@ Tensor cudnn_convolution_forward(
|
|||
|
||||
Tensor cudnn_convolution(
|
||||
const Tensor& input_t, const Tensor& weight_t, const Tensor& bias_t,
|
||||
IntList padding, IntList stride, IntList dilation,
|
||||
IntArrayRef padding, IntArrayRef stride, IntArrayRef dilation,
|
||||
int64_t groups, bool benchmark, bool deterministic)
|
||||
{
|
||||
TensorArg input { input_t, "input", 1 },
|
||||
|
|
@ -909,7 +909,7 @@ Tensor cudnn_convolution(
|
|||
// resolve
|
||||
Tensor cudnn_convolution_transpose_backward_input(
|
||||
const Tensor& grad_output_t, const Tensor& weight_t,
|
||||
IntList padding, IntList stride, IntList dilation,
|
||||
IntArrayRef padding, IntArrayRef stride, IntArrayRef dilation,
|
||||
int64_t groups, bool benchmark, bool deterministic)
|
||||
{
|
||||
TensorArg grad_output { grad_output_t, "grad_output", 1 },
|
||||
|
|
@ -922,7 +922,7 @@ Tensor cudnn_convolution_transpose_backward_input(
|
|||
|
||||
std::tuple<at::Tensor,at::Tensor,at::Tensor> cudnn_convolution_transpose_backward(
|
||||
const at::Tensor& input, const at::Tensor& grad_output_t, const at::Tensor& weight,
|
||||
IntList padding, IntList output_padding, IntList stride, IntList dilation, int64_t groups,
|
||||
IntArrayRef padding, IntArrayRef output_padding, IntArrayRef stride, IntArrayRef dilation, int64_t groups,
|
||||
bool benchmark, bool deterministic, std::array<bool,3> output_mask) {
|
||||
|
||||
Tensor grad_output = grad_output_t.contiguous();
|
||||
|
|
@ -951,7 +951,7 @@ void raw_cudnn_convolution_backward_input_out(
|
|||
const at::Tensor& grad_input,
|
||||
const at::Tensor& grad_output,
|
||||
const at::Tensor& weight,
|
||||
IntList padding, IntList stride, IntList dilation, int64_t groups,
|
||||
IntArrayRef padding, IntArrayRef stride, IntArrayRef dilation, int64_t groups,
|
||||
bool benchmark, bool deterministic) {
|
||||
|
||||
auto dataType = getCudnnDataType(grad_output);
|
||||
|
|
@ -997,8 +997,8 @@ void raw_cudnn_convolution_backward_input_out(
|
|||
|
||||
Tensor cudnn_convolution_backward_input(
|
||||
CheckedFrom c,
|
||||
IntList input_size, const TensorArg& grad_output, const TensorArg& weight,
|
||||
IntList padding, IntList stride, IntList dilation, int64_t groups,
|
||||
IntArrayRef input_size, const TensorArg& grad_output, const TensorArg& weight,
|
||||
IntArrayRef padding, IntArrayRef stride, IntArrayRef dilation, int64_t groups,
|
||||
bool benchmark, bool deterministic)
|
||||
{
|
||||
checkAllSameType(c, {grad_output, weight});
|
||||
|
|
@ -1023,7 +1023,7 @@ Tensor cudnn_convolution_backward_input(
|
|||
Tensor cudnn_convolution_transpose_forward(
|
||||
CheckedFrom c,
|
||||
const TensorArg& grad_output, const TensorArg& weight,
|
||||
IntList padding, IntList output_padding, IntList stride, IntList dilation, int64_t groups,
|
||||
IntArrayRef padding, IntArrayRef output_padding, IntArrayRef stride, IntArrayRef dilation, int64_t groups,
|
||||
bool benchmark, bool deterministic)
|
||||
{
|
||||
auto input_size = conv_input_size(grad_output->sizes(), weight->sizes(),
|
||||
|
|
@ -1033,8 +1033,8 @@ Tensor cudnn_convolution_transpose_forward(
|
|||
}
|
||||
|
||||
Tensor cudnn_convolution_backward_input(
|
||||
IntList input_size, const Tensor& grad_output_t, const Tensor& weight_t,
|
||||
IntList padding, IntList stride, IntList dilation, int64_t groups,
|
||||
IntArrayRef input_size, const Tensor& grad_output_t, const Tensor& weight_t,
|
||||
IntArrayRef padding, IntArrayRef stride, IntArrayRef dilation, int64_t groups,
|
||||
bool benchmark, bool deterministic)
|
||||
{
|
||||
TensorArg grad_output{ grad_output_t, "grad_output", 1 },
|
||||
|
|
@ -1048,7 +1048,7 @@ Tensor cudnn_convolution_backward_input(
|
|||
|
||||
std::tuple<at::Tensor,at::Tensor,at::Tensor> cudnn_convolution_backward(
|
||||
const at::Tensor& input, const at::Tensor& grad_output_t, const at::Tensor& weight,
|
||||
IntList padding, IntList stride, IntList dilation, int64_t groups,
|
||||
IntArrayRef padding, IntArrayRef stride, IntArrayRef dilation, int64_t groups,
|
||||
bool benchmark, bool deterministic, std::array<bool,3> output_mask) {
|
||||
|
||||
Tensor grad_output = grad_output_t.contiguous();
|
||||
|
|
@ -1069,7 +1069,7 @@ std::tuple<at::Tensor,at::Tensor,at::Tensor> cudnn_convolution_backward(
|
|||
|
||||
Tensor cudnn_convolution_transpose(
|
||||
const Tensor& input_t, const Tensor& weight_t, const Tensor& bias_t,
|
||||
IntList padding, IntList output_padding, IntList stride, IntList dilation,
|
||||
IntArrayRef padding, IntArrayRef output_padding, IntArrayRef stride, IntArrayRef dilation,
|
||||
int64_t groups, bool benchmark, bool deterministic)
|
||||
{
|
||||
TensorArg input { input_t, "input", 1 },
|
||||
|
|
@ -1092,7 +1092,7 @@ Tensor cudnn_convolution_transpose(
|
|||
|
||||
void raw_cudnn_convolution_backward_weight_out(
|
||||
const Tensor& grad_weight, const Tensor& grad_output, const Tensor& input,
|
||||
IntList padding, IntList stride, IntList dilation, int64_t groups,
|
||||
IntArrayRef padding, IntArrayRef stride, IntArrayRef dilation, int64_t groups,
|
||||
bool benchmark, bool deterministic) {
|
||||
|
||||
auto dataType = getCudnnDataType(input);
|
||||
|
|
@ -1126,8 +1126,8 @@ void raw_cudnn_convolution_backward_weight_out(
|
|||
|
||||
Tensor cudnn_convolution_backward_weight(
|
||||
CheckedFrom c,
|
||||
IntList weight_size, const TensorArg& grad_output, const TensorArg& input,
|
||||
IntList padding, IntList stride, IntList dilation, int64_t groups,
|
||||
IntArrayRef weight_size, const TensorArg& grad_output, const TensorArg& input,
|
||||
IntArrayRef padding, IntArrayRef stride, IntArrayRef dilation, int64_t groups,
|
||||
bool benchmark, bool deterministic)
|
||||
{
|
||||
|
||||
|
|
@ -1149,10 +1149,10 @@ Tensor cudnn_convolution_backward_weight(
|
|||
}
|
||||
|
||||
Tensor cudnn_convolution_backward_weight(
|
||||
IntList weight_size,
|
||||
IntArrayRef weight_size,
|
||||
const Tensor& grad_output_t,
|
||||
const Tensor& input_t,
|
||||
IntList padding, IntList stride, IntList dilation, int64_t groups,
|
||||
IntArrayRef padding, IntArrayRef stride, IntArrayRef dilation, int64_t groups,
|
||||
bool benchmark, bool deterministic)
|
||||
{
|
||||
TensorArg grad_output{ grad_output_t, "grad_output", 1 },
|
||||
|
|
@ -1165,10 +1165,10 @@ Tensor cudnn_convolution_backward_weight(
|
|||
}
|
||||
|
||||
Tensor cudnn_convolution_transpose_backward_weight(
|
||||
IntList weight_size,
|
||||
IntArrayRef weight_size,
|
||||
const Tensor& grad_output_t,
|
||||
const Tensor& input_t,
|
||||
IntList padding, IntList stride, IntList dilation, int64_t groups,
|
||||
IntArrayRef padding, IntArrayRef stride, IntArrayRef dilation, int64_t groups,
|
||||
bool benchmark, bool deterministic)
|
||||
{
|
||||
TensorArg grad_output{ grad_output_t, "grad_output", 1 },
|
||||
|
|
|
|||
|
|
@ -13,7 +13,7 @@ namespace at { namespace native {
|
|||
|
||||
// See Note [ATen preprocessor philosophy]
|
||||
|
||||
std::tuple<Tensor, Tensor> _cudnn_ctc_loss(const Tensor& log_probs, const Tensor& targets, IntList input_lengths, IntList target_lengths, int64_t BLANK, bool deterministic) {
|
||||
std::tuple<Tensor, Tensor> _cudnn_ctc_loss(const Tensor& log_probs, const Tensor& targets, IntArrayRef input_lengths, IntArrayRef target_lengths, int64_t BLANK, bool deterministic) {
|
||||
AT_ERROR("cudnn_ctc_loss: ATen not compiled with cuDNN >= 7 support");
|
||||
}
|
||||
|
||||
|
|
@ -33,7 +33,7 @@ namespace {
|
|||
|
||||
} // namespace
|
||||
|
||||
std::tuple<Tensor, Tensor> _cudnn_ctc_loss(const Tensor& log_probs_t, const Tensor& targets_t, IntList input_lengths_, IntList target_lengths_, int64_t BLANK, bool deterministic) {
|
||||
std::tuple<Tensor, Tensor> _cudnn_ctc_loss(const Tensor& log_probs_t, const Tensor& targets_t, IntArrayRef input_lengths_, IntArrayRef target_lengths_, int64_t BLANK, bool deterministic) {
|
||||
CheckedFrom c = "cudnn_ctc_loss";
|
||||
TensorArg log_probs { log_probs_t, "log_probs", 1 };
|
||||
TensorArg targets { targets_t, "targets", 2 };
|
||||
|
|
|
|||
|
|
@ -32,7 +32,7 @@ std::tuple<Tensor, Tensor, Tensor, Tensor, Tensor> _cudnn_rnn(
|
|||
const Tensor& weight_buf_r, const Tensor& hx, const Tensor& cx,
|
||||
int64_t fn_mode, int64_t fn_hidden_size,
|
||||
int64_t fn_num_layers, bool batch_first, double fn_dropout,
|
||||
bool fn_train, bool fn_bidirectional, IntList fn_batch_sizes,
|
||||
bool fn_train, bool fn_bidirectional, IntArrayRef fn_batch_sizes,
|
||||
const Tensor& fn_dropout_state
|
||||
) {
|
||||
AT_ERROR("_cudnn_rnn: ATen not compiled with cuDNN support");
|
||||
|
|
@ -44,7 +44,7 @@ std::tuple<Tensor, Tensor, Tensor, std::vector<Tensor>> _cudnn_rnn_backward(
|
|||
const Tensor& grad_cy_r,
|
||||
int64_t mode, int64_t hidden_size,
|
||||
int64_t num_layers, bool batch_first, double dropout,
|
||||
bool train, bool bidirectional, IntList batch_sizes,
|
||||
bool train, bool bidirectional, IntArrayRef batch_sizes,
|
||||
const Tensor& dropout_state, const Tensor& reserve,
|
||||
std::array<bool, 4> output_mask
|
||||
) {
|
||||
|
|
@ -169,7 +169,7 @@ namespace {
|
|||
|
||||
// TensorDescriptor list
|
||||
|
||||
std::vector<TensorDescriptor> rnn_descriptor_sequence(const Tensor& tensor, IntList batch_sizes) {
|
||||
std::vector<TensorDescriptor> rnn_descriptor_sequence(const Tensor& tensor, IntArrayRef batch_sizes) {
|
||||
std::vector<TensorDescriptor> descriptors(batch_sizes.size());
|
||||
size_t i = 0;
|
||||
// To be mutated in the loop
|
||||
|
|
@ -253,10 +253,10 @@ namespace {
|
|||
// input.size() = mini_batch x seq_length x input_size
|
||||
//
|
||||
struct TensorDescriptorListParams {
|
||||
IntList batch_sizes;
|
||||
IntArrayRef batch_sizes;
|
||||
int64_t seq_length;
|
||||
int64_t mini_batch;
|
||||
// NB: this is not input.size(), which is an IntList; instead, this
|
||||
// NB: this is not input.size(), which is an IntArrayRef; instead, this
|
||||
// size of the inner-most dimension. In NL applications, this is usually
|
||||
// the size of the embedding. You can also think of this as the size
|
||||
// of the "channel" dimension (at risk of confusing vision researchers :)
|
||||
|
|
@ -268,7 +268,7 @@ namespace {
|
|||
return batch_sizes.size() != 0;
|
||||
}
|
||||
|
||||
void set(IntList input_sizes, IntList batch_sizes_, bool batch_first) {
|
||||
void set(IntArrayRef input_sizes, IntArrayRef batch_sizes_, bool batch_first) {
|
||||
batch_sizes = batch_sizes_;
|
||||
if (is_input_packed()) {
|
||||
seq_length = batch_sizes.size();
|
||||
|
|
@ -666,7 +666,7 @@ std::tuple<Tensor, Tensor, Tensor, Tensor, Tensor> _cudnn_rnn(
|
|||
const Tensor& weight_buf_r, const Tensor& hx, const Tensor& cx,
|
||||
int64_t fn_mode, int64_t fn_hidden_size,
|
||||
int64_t fn_num_layers, bool batch_first, double fn_dropout,
|
||||
bool fn_train, bool fn_bidirectional, IntList fn_batch_sizes,
|
||||
bool fn_train, bool fn_bidirectional, IntArrayRef fn_batch_sizes,
|
||||
const Tensor& fn_dropout_state
|
||||
) {
|
||||
|
||||
|
|
@ -736,7 +736,7 @@ std::tuple<Tensor, Tensor, Tensor, Tensor, Tensor> _cudnn_rnn(
|
|||
}
|
||||
|
||||
AT_CHECK(!cx.defined() || cx.sizes().equals(hidden_size),
|
||||
"Expected cell size ", IntList{hidden_size}, ", got ", cx.sizes());
|
||||
"Expected cell size ", IntArrayRef{hidden_size}, ", got ", cx.sizes());
|
||||
|
||||
size_t workspace_size;
|
||||
auto x_descs_arr = descs.get_x_descs();
|
||||
|
|
@ -808,7 +808,7 @@ std::tuple<Tensor, Tensor, Tensor> _cudnn_rnn_backward_input(
|
|||
const Tensor& grad_cy,
|
||||
int64_t fn_mode, int64_t fn_hidden_size,
|
||||
int64_t fn_num_layers, bool batch_first, double fn_dropout,
|
||||
bool fn_train, bool fn_bidirectional, IntList fn_batch_sizes,
|
||||
bool fn_train, bool fn_bidirectional, IntArrayRef fn_batch_sizes,
|
||||
const Tensor& fn_dropout_state, const Tensor& fn_reserve,
|
||||
std::array<bool, 3> output_mask
|
||||
) {
|
||||
|
|
@ -861,18 +861,18 @@ std::tuple<Tensor, Tensor, Tensor> _cudnn_rnn_backward_input(
|
|||
"cudnn RNN backward can only be called in training mode");
|
||||
|
||||
AT_CHECK(input.sizes().equals(input_size),
|
||||
"Expected input size ", IntList{input_size}, ", got ", input.sizes());
|
||||
"Expected input size ", IntArrayRef{input_size}, ", got ", input.sizes());
|
||||
AT_CHECK(output.sizes().equals(output_size),
|
||||
"Expected output size ", IntList{output_size}, ", got ", output.sizes());
|
||||
"Expected output size ", IntArrayRef{output_size}, ", got ", output.sizes());
|
||||
|
||||
AT_CHECK(!hx.defined() || hx.sizes().equals(hidden_size),
|
||||
"Expected hidden size ", IntList{hidden_size}, ", got ", hx.sizes());
|
||||
"Expected hidden size ", IntArrayRef{hidden_size}, ", got ", hx.sizes());
|
||||
AT_CHECK(!cx.defined() || cx.sizes().equals(hidden_size),
|
||||
"Expected cell size ", IntList{hidden_size}, ", got ", cx.sizes());
|
||||
"Expected cell size ", IntArrayRef{hidden_size}, ", got ", cx.sizes());
|
||||
AT_CHECK(!dhy.defined() || dhy.sizes().equals(hidden_size),
|
||||
"Expected d_hidden size ", IntList{hidden_size}, ", got ", dhy.sizes());
|
||||
"Expected d_hidden size ", IntArrayRef{hidden_size}, ", got ", dhy.sizes());
|
||||
AT_CHECK(!dcy.defined() || dcy.sizes().equals(hidden_size),
|
||||
"Expected d_cell size ", IntList{hidden_size}, ", got ", dcy.sizes());
|
||||
"Expected d_cell size ", IntArrayRef{hidden_size}, ", got ", dcy.sizes());
|
||||
|
||||
AT_CHECK(dhy.is_cuda() && dy.is_cuda() && (!dcy.defined() || dcy.is_cuda()),
|
||||
"Gradients aren't CUDA tensors");
|
||||
|
|
@ -931,7 +931,7 @@ std::vector<Tensor> _cudnn_rnn_backward_weight(
|
|||
const Tensor& output_r,
|
||||
int64_t fn_mode, int64_t fn_hidden_size,
|
||||
int64_t fn_num_layers, bool batch_first, double fn_dropout,
|
||||
bool fn_train, bool fn_bidirectional, IntList fn_batch_sizes,
|
||||
bool fn_train, bool fn_bidirectional, IntArrayRef fn_batch_sizes,
|
||||
const Tensor& fn_dropout_state, const Tensor& fn_reserve
|
||||
) {
|
||||
|
||||
|
|
@ -965,9 +965,9 @@ std::vector<Tensor> _cudnn_rnn_backward_weight(
|
|||
"cudnn RNN backward can only be called in training mode");
|
||||
|
||||
AT_CHECK(input.sizes().equals(input_size),
|
||||
"Expected input size ", IntList{input_size}, ", got ", input.sizes());
|
||||
"Expected input size ", IntArrayRef{input_size}, ", got ", input.sizes());
|
||||
AT_CHECK(!hx.defined() || hx.sizes().equals(hidden_size),
|
||||
"Expected hidden size ", IntList{hidden_size}, ", got ", hx.sizes());
|
||||
"Expected hidden size ", IntArrayRef{hidden_size}, ", got ", hx.sizes());
|
||||
|
||||
// TODO: the above were the only checks in rnn.py, but it doesn't seem
|
||||
// like these checks are enough
|
||||
|
|
@ -1040,7 +1040,7 @@ std::tuple<Tensor, Tensor, Tensor, std::vector<Tensor>> _cudnn_rnn_backward(
|
|||
const Tensor& grad_cy_r,
|
||||
int64_t mode, int64_t hidden_size,
|
||||
int64_t num_layers, bool batch_first, double dropout,
|
||||
bool train, bool bidirectional, IntList batch_sizes,
|
||||
bool train, bool bidirectional, IntArrayRef batch_sizes,
|
||||
const Tensor& dropout_state, const Tensor& reserve,
|
||||
std::array<bool, 4> output_mask
|
||||
) {
|
||||
|
|
@ -1220,7 +1220,7 @@ std::pair<Tensor, hidden_type> _cudnn_impl(
|
|||
}
|
||||
|
||||
AT_CHECK(_batch_sizes.dim() == 1, "batch_sizes tensor should be 1D");
|
||||
IntList batch_sizes { _batch_sizes.data<int64_t>(), static_cast<size_t>(_batch_sizes.size(0)) };
|
||||
IntArrayRef batch_sizes { _batch_sizes.data<int64_t>(), static_cast<size_t>(_batch_sizes.size(0)) };
|
||||
|
||||
auto & dropout_state = get_dropout_state(dropout_p, train, input.options());
|
||||
std::unique_lock<DropoutState> lock { dropout_state };
|
||||
|
|
|
|||
|
|
@ -14,21 +14,21 @@ namespace at { namespace native {
|
|||
|
||||
at::Tensor miopen_convolution(
|
||||
const at::Tensor& input, const at::Tensor& weight, const at::Tensor& bias /* optional */,
|
||||
IntList padding, IntList stride, IntList dilation,
|
||||
IntArrayRef padding, IntArrayRef stride, IntArrayRef dilation,
|
||||
int64_t groups, bool benchmark, bool deterministic) {
|
||||
AT_ERROR("miopen_convolution: ATen not compiled with MIOpen support");
|
||||
}
|
||||
|
||||
at::Tensor miopen_convolution_backward_input(
|
||||
IntList input_size, const at::Tensor& grad_output, const at::Tensor& weight,
|
||||
IntList padding, IntList stride, IntList dilation, int64_t groups,
|
||||
IntArrayRef input_size, const at::Tensor& grad_output, const at::Tensor& weight,
|
||||
IntArrayRef padding, IntArrayRef stride, IntArrayRef dilation, int64_t groups,
|
||||
bool benchmark, bool deterministic) {
|
||||
AT_ERROR("miopen_convolution_backward_input: ATen not compiled with MIOpen support");
|
||||
}
|
||||
|
||||
at::Tensor miopen_convolution_backward_weight(
|
||||
IntList weight_size, const at::Tensor& grad_output, const at::Tensor& input,
|
||||
IntList padding, IntList stride, IntList dilation, int64_t groups,
|
||||
IntArrayRef weight_size, const at::Tensor& grad_output, const at::Tensor& input,
|
||||
IntArrayRef padding, IntArrayRef stride, IntArrayRef dilation, int64_t groups,
|
||||
bool benchmark, bool deterministic) {
|
||||
AT_ERROR("miopen_convolution_backward_weight: ATen not compiled with MIOpen support");
|
||||
}
|
||||
|
|
@ -40,35 +40,35 @@ at::Tensor miopen_convolution_backward_bias(
|
|||
|
||||
std::tuple<at::Tensor,at::Tensor,at::Tensor> miopen_convolution_backward(
|
||||
const at::Tensor& input, const at::Tensor& grad_output, const at::Tensor& weight,
|
||||
IntList padding, IntList stride, IntList dilation, int64_t groups,
|
||||
IntArrayRef padding, IntArrayRef stride, IntArrayRef dilation, int64_t groups,
|
||||
bool benchmark, bool deterministic, std::array<bool,3> output_mask) {
|
||||
AT_ERROR("miopen_convolution_backward: ATen not compiled with MIOpen support");
|
||||
}
|
||||
|
||||
at::Tensor miopen_convolution_transpose(
|
||||
const at::Tensor& input, const at::Tensor& weight, const at::Tensor& bias /* optional */,
|
||||
IntList padding, IntList output_padding, IntList stride, IntList dilation,
|
||||
IntArrayRef padding, IntArrayRef output_padding, IntArrayRef stride, IntArrayRef dilation,
|
||||
int64_t groups, bool benchmark, bool deterministic) {
|
||||
AT_ERROR("miopen_convolution_transpose: ATen not compiled with MIOpen support");
|
||||
}
|
||||
|
||||
at::Tensor miopen_convolution_transpose_backward_input(
|
||||
const at::Tensor& grad_output, const at::Tensor& weight,
|
||||
IntList padding, IntList stride, IntList dilation,
|
||||
IntArrayRef padding, IntArrayRef stride, IntArrayRef dilation,
|
||||
int64_t groups, bool benchmark, bool deterministic) {
|
||||
AT_ERROR("miopen_convolution_transpose_backward: ATen not compiled with MIOpen support");
|
||||
}
|
||||
|
||||
at::Tensor miopen_convolution_transpose_backward_weight(
|
||||
IntList weight_size, const at::Tensor& grad_output, const at::Tensor& input,
|
||||
IntList padding, IntList stride, IntList dilation, int64_t groups,
|
||||
IntArrayRef weight_size, const at::Tensor& grad_output, const at::Tensor& input,
|
||||
IntArrayRef padding, IntArrayRef stride, IntArrayRef dilation, int64_t groups,
|
||||
bool benchmark, bool deterministic) {
|
||||
AT_ERROR("miopen_convolution_transpose_backward_weight: ATen not compiled with MIOpen support");
|
||||
}
|
||||
|
||||
std::tuple<at::Tensor,at::Tensor,at::Tensor> miopen_convolution_transpose_backward(
|
||||
const at::Tensor& input, const at::Tensor& grad_output, const at::Tensor& weight,
|
||||
IntList padding, IntList output_padding, IntList stride, IntList dilation, int64_t groups,
|
||||
IntArrayRef padding, IntArrayRef output_padding, IntArrayRef stride, IntArrayRef dilation, int64_t groups,
|
||||
bool benchmark, bool deterministic, std::array<bool,3> output_mask) {
|
||||
AT_ERROR("miopen_convolution_transpose_backward: ATen not compiled with MIOpen support");
|
||||
}
|
||||
|
|
@ -118,8 +118,8 @@ constexpr int max_dim = 3;
|
|||
// takes an extra output_padding argument to resolve the ambiguity.
|
||||
|
||||
static std::vector<int64_t> conv_output_size(
|
||||
IntList input_size, IntList weight_size,
|
||||
IntList padding, IntList stride, IntList dilation, int64_t groups
|
||||
IntArrayRef input_size, IntArrayRef weight_size,
|
||||
IntArrayRef padding, IntArrayRef stride, IntArrayRef dilation, int64_t groups
|
||||
) {
|
||||
// ASSERT(input_size.size() > 2)
|
||||
// ASSERT(input_size.size() == weight_size.size())
|
||||
|
|
@ -136,8 +136,8 @@ static std::vector<int64_t> conv_output_size(
|
|||
}
|
||||
|
||||
std::vector<int64_t> conv_input_size(
|
||||
IntList output_size, IntList weight_size,
|
||||
IntList padding, IntList output_padding, IntList stride, IntList dilation, int64_t groups
|
||||
IntArrayRef output_size, IntArrayRef weight_size,
|
||||
IntArrayRef padding, IntArrayRef output_padding, IntArrayRef stride, IntArrayRef dilation, int64_t groups
|
||||
) {
|
||||
// ASSERT(output_size.size() > 2)
|
||||
// ASSERT(output_size.size() == weight_size.size())
|
||||
|
|
@ -154,8 +154,8 @@ std::vector<int64_t> conv_input_size(
|
|||
}
|
||||
|
||||
std::vector<int64_t> conv_weight_size(
|
||||
IntList input_size, IntList output_size,
|
||||
IntList padding, IntList output_padding, IntList stride, IntList dilation, int64_t groups
|
||||
IntArrayRef input_size, IntArrayRef output_size,
|
||||
IntArrayRef padding, IntArrayRef output_padding, IntArrayRef stride, IntArrayRef dilation, int64_t groups
|
||||
) {
|
||||
auto dim = input_size.size();
|
||||
std::vector<int64_t> weight_size(dim);
|
||||
|
|
@ -181,7 +181,7 @@ Tensor narrowGroup(const Tensor& t, int dim, int group_idx, int64_t groups) {
|
|||
// ---------------------------------------------------------------------
|
||||
|
||||
// Used on pad, stride and dilation
|
||||
static void check_args(CheckedFrom c, IntList args, size_t expected_size, const char* arg_name)
|
||||
static void check_args(CheckedFrom c, IntArrayRef args, size_t expected_size, const char* arg_name)
|
||||
{
|
||||
AT_CHECK(args.size() <= expected_size,
|
||||
"Too many ", arg_name, " values (", args.size(), ") supplied, expecting ",
|
||||
|
|
@ -204,7 +204,7 @@ static void check_args(CheckedFrom c, IntList args, size_t expected_size, const
|
|||
static void convolution_shape_check(
|
||||
CheckedFrom c,
|
||||
const TensorGeometryArg& input, const TensorGeometryArg& weight, const TensorGeometryArg& output,
|
||||
IntList padding, IntList stride, IntList dilation, int64_t groups)
|
||||
IntArrayRef padding, IntArrayRef stride, IntArrayRef dilation, int64_t groups)
|
||||
{
|
||||
check_args(c, padding, input->dim() - 2, "padding");
|
||||
check_args(c, stride, padding.size(), "stride");
|
||||
|
|
@ -245,7 +245,7 @@ static_assert(std::is_pod<ConvolutionParams>::value, "ConvolutionParams not POD"
|
|||
void setConvolutionParams(
|
||||
ConvolutionParams* params, miopenHandle_t handle,
|
||||
const at::Tensor& input, const at::Tensor& weight,
|
||||
IntList padding, IntList stride, IntList dilation,
|
||||
IntArrayRef padding, IntArrayRef stride, IntArrayRef dilation,
|
||||
int64_t groups, bool deterministic) {
|
||||
|
||||
miopenDataType_t dataType = getMiopenDataType(input);
|
||||
|
|
@ -599,7 +599,7 @@ void miopen_convolution_add_bias_(CheckedFrom c, const TensorArg& output, const
|
|||
//
|
||||
void raw_miopen_convolution_forward_out(
|
||||
const Tensor& output, const Tensor& input, const Tensor& weight,
|
||||
IntList padding, IntList stride, IntList dilation, int64_t groups,
|
||||
IntArrayRef padding, IntArrayRef stride, IntArrayRef dilation, int64_t groups,
|
||||
bool benchmark, bool deterministic) {
|
||||
|
||||
auto dataType = getMiopenDataType(input);
|
||||
|
|
@ -629,7 +629,7 @@ void raw_miopen_convolution_forward_out(
|
|||
Tensor miopen_convolution_forward(
|
||||
CheckedFrom c,
|
||||
const TensorArg& input, const TensorArg& weight,
|
||||
IntList padding, IntList stride, IntList dilation, int64_t groups,
|
||||
IntArrayRef padding, IntArrayRef stride, IntArrayRef dilation, int64_t groups,
|
||||
bool benchmark, bool deterministic)
|
||||
{
|
||||
checkAllSameType(c, {input, weight});
|
||||
|
|
@ -656,7 +656,7 @@ Tensor miopen_convolution_forward(
|
|||
|
||||
Tensor miopen_convolution(
|
||||
const Tensor& input_t, const Tensor& weight_t, const Tensor& bias_t,
|
||||
IntList padding, IntList stride, IntList dilation,
|
||||
IntArrayRef padding, IntArrayRef stride, IntArrayRef dilation,
|
||||
int64_t groups, bool benchmark, bool deterministic)
|
||||
{
|
||||
TensorArg input { input_t, "input", 1 },
|
||||
|
|
@ -674,7 +674,7 @@ Tensor miopen_convolution(
|
|||
|
||||
Tensor miopen_convolution_transpose_backward_input(
|
||||
const Tensor& grad_output_t, const Tensor& weight_t,
|
||||
IntList padding, IntList stride, IntList dilation,
|
||||
IntArrayRef padding, IntArrayRef stride, IntArrayRef dilation,
|
||||
int64_t groups, bool benchmark, bool deterministic)
|
||||
{
|
||||
TensorArg grad_output { grad_output_t, "grad_output", 1 },
|
||||
|
|
@ -687,7 +687,7 @@ Tensor miopen_convolution_transpose_backward_input(
|
|||
|
||||
std::tuple<at::Tensor,at::Tensor,at::Tensor> miopen_convolution_transpose_backward(
|
||||
const at::Tensor& input, const at::Tensor& grad_output_t, const at::Tensor& weight,
|
||||
IntList padding, IntList output_padding, IntList stride, IntList dilation, int64_t groups,
|
||||
IntArrayRef padding, IntArrayRef output_padding, IntArrayRef stride, IntArrayRef dilation, int64_t groups,
|
||||
bool benchmark, bool deterministic, std::array<bool,3> output_mask) {
|
||||
|
||||
Tensor grad_output = grad_output_t.contiguous();
|
||||
|
|
@ -716,7 +716,7 @@ void raw_miopen_convolution_backward_input_out(
|
|||
const at::Tensor& grad_input,
|
||||
const at::Tensor& grad_output,
|
||||
const at::Tensor& weight,
|
||||
IntList padding, IntList stride, IntList dilation, int64_t groups,
|
||||
IntArrayRef padding, IntArrayRef stride, IntArrayRef dilation, int64_t groups,
|
||||
bool benchmark, bool deterministic) {
|
||||
|
||||
auto dataType = getMiopenDataType(grad_output);
|
||||
|
|
@ -747,8 +747,8 @@ void raw_miopen_convolution_backward_input_out(
|
|||
|
||||
Tensor miopen_convolution_backward_input(
|
||||
CheckedFrom c,
|
||||
IntList input_size, const TensorArg& grad_output, const TensorArg& weight,
|
||||
IntList padding, IntList stride, IntList dilation, int64_t groups,
|
||||
IntArrayRef input_size, const TensorArg& grad_output, const TensorArg& weight,
|
||||
IntArrayRef padding, IntArrayRef stride, IntArrayRef dilation, int64_t groups,
|
||||
bool benchmark, bool deterministic)
|
||||
{
|
||||
checkAllSameType(c, {grad_output, weight});
|
||||
|
|
@ -773,7 +773,7 @@ Tensor miopen_convolution_backward_input(
|
|||
Tensor miopen_convolution_transpose_forward(
|
||||
CheckedFrom c,
|
||||
const TensorArg& grad_output, const TensorArg& weight,
|
||||
IntList padding, IntList output_padding, IntList stride, IntList dilation, int64_t groups,
|
||||
IntArrayRef padding, IntArrayRef output_padding, IntArrayRef stride, IntArrayRef dilation, int64_t groups,
|
||||
bool benchmark, bool deterministic)
|
||||
{
|
||||
auto input_size = conv_input_size(grad_output->sizes(), weight->sizes(),
|
||||
|
|
@ -783,8 +783,8 @@ Tensor miopen_convolution_transpose_forward(
|
|||
}
|
||||
|
||||
Tensor miopen_convolution_backward_input(
|
||||
IntList input_size, const Tensor& grad_output_t, const Tensor& weight_t,
|
||||
IntList padding, IntList stride, IntList dilation, int64_t groups,
|
||||
IntArrayRef input_size, const Tensor& grad_output_t, const Tensor& weight_t,
|
||||
IntArrayRef padding, IntArrayRef stride, IntArrayRef dilation, int64_t groups,
|
||||
bool benchmark, bool deterministic)
|
||||
{
|
||||
TensorArg grad_output{ grad_output_t, "grad_output", 1 },
|
||||
|
|
@ -798,7 +798,7 @@ Tensor miopen_convolution_backward_input(
|
|||
|
||||
std::tuple<at::Tensor,at::Tensor,at::Tensor> miopen_convolution_backward(
|
||||
const at::Tensor& input, const at::Tensor& grad_output_t, const at::Tensor& weight,
|
||||
IntList padding, IntList stride, IntList dilation, int64_t groups,
|
||||
IntArrayRef padding, IntArrayRef stride, IntArrayRef dilation, int64_t groups,
|
||||
bool benchmark, bool deterministic, std::array<bool,3> output_mask) {
|
||||
|
||||
Tensor grad_output = grad_output_t.contiguous();
|
||||
|
|
@ -819,7 +819,7 @@ std::tuple<at::Tensor,at::Tensor,at::Tensor> miopen_convolution_backward(
|
|||
|
||||
Tensor miopen_convolution_transpose(
|
||||
const Tensor& input_t, const Tensor& weight_t, const Tensor& bias_t,
|
||||
IntList padding, IntList output_padding, IntList stride, IntList dilation,
|
||||
IntArrayRef padding, IntArrayRef output_padding, IntArrayRef stride, IntArrayRef dilation,
|
||||
int64_t groups, bool benchmark, bool deterministic)
|
||||
{
|
||||
TensorArg input { input_t, "input", 1 },
|
||||
|
|
@ -842,7 +842,7 @@ Tensor miopen_convolution_transpose(
|
|||
|
||||
void raw_miopen_convolution_backward_weight_out(
|
||||
const Tensor& grad_weight, const Tensor& grad_output, const Tensor& input,
|
||||
IntList padding, IntList stride, IntList dilation, int64_t groups,
|
||||
IntArrayRef padding, IntArrayRef stride, IntArrayRef dilation, int64_t groups,
|
||||
bool benchmark, bool deterministic) {
|
||||
|
||||
auto dataType = getMiopenDataType(input);
|
||||
|
|
@ -871,8 +871,8 @@ void raw_miopen_convolution_backward_weight_out(
|
|||
|
||||
Tensor miopen_convolution_backward_weight(
|
||||
CheckedFrom c,
|
||||
IntList weight_size, const TensorArg& grad_output, const TensorArg& input,
|
||||
IntList padding, IntList stride, IntList dilation, int64_t groups,
|
||||
IntArrayRef weight_size, const TensorArg& grad_output, const TensorArg& input,
|
||||
IntArrayRef padding, IntArrayRef stride, IntArrayRef dilation, int64_t groups,
|
||||
bool benchmark, bool deterministic)
|
||||
{
|
||||
|
||||
|
|
@ -894,10 +894,10 @@ Tensor miopen_convolution_backward_weight(
|
|||
}
|
||||
|
||||
Tensor miopen_convolution_backward_weight(
|
||||
IntList weight_size,
|
||||
IntArrayRef weight_size,
|
||||
const Tensor& grad_output_t,
|
||||
const Tensor& input_t,
|
||||
IntList padding, IntList stride, IntList dilation, int64_t groups,
|
||||
IntArrayRef padding, IntArrayRef stride, IntArrayRef dilation, int64_t groups,
|
||||
bool benchmark, bool deterministic)
|
||||
{
|
||||
TensorArg grad_output{ grad_output_t, "grad_output", 1 },
|
||||
|
|
@ -910,10 +910,10 @@ Tensor miopen_convolution_backward_weight(
|
|||
}
|
||||
|
||||
Tensor miopen_convolution_transpose_backward_weight(
|
||||
IntList weight_size,
|
||||
IntArrayRef weight_size,
|
||||
const Tensor& grad_output_t,
|
||||
const Tensor& input_t,
|
||||
IntList padding, IntList stride, IntList dilation, int64_t groups,
|
||||
IntArrayRef padding, IntArrayRef stride, IntArrayRef dilation, int64_t groups,
|
||||
bool benchmark, bool deterministic)
|
||||
{
|
||||
TensorArg grad_output{ grad_output_t, "grad_output", 1 },
|
||||
|
|
|
|||
|
|
@ -9,9 +9,9 @@ namespace at { namespace native {
|
|||
|
||||
Tensor _fft_mkl(const Tensor& input, int64_t signal_ndim,
|
||||
bool complex_input, bool complex_output,
|
||||
bool inverse, IntList checked_signal_sizes,
|
||||
bool inverse, IntArrayRef checked_signal_sizes,
|
||||
bool normalized, bool onesided,
|
||||
IntList output_sizes) {
|
||||
IntArrayRef output_sizes) {
|
||||
AT_ERROR("fft: ATen not compiled with MKL support");
|
||||
}
|
||||
|
||||
|
|
@ -162,9 +162,9 @@ static inline void _fft_fill_with_conjugate_symmetry_(Tensor& input,
|
|||
// MKL DFTI
|
||||
Tensor _fft_mkl(const Tensor& self, int64_t signal_ndim,
|
||||
bool complex_input, bool complex_output,
|
||||
bool inverse, IntList checked_signal_sizes,
|
||||
bool inverse, IntArrayRef checked_signal_sizes,
|
||||
bool normalized, bool onesided,
|
||||
IntList output_sizes) {
|
||||
IntArrayRef output_sizes) {
|
||||
int64_t batch = self.size(0);
|
||||
Tensor input = self;
|
||||
// real/imag dimension must aligned when viewed as of complex type
|
||||
|
|
|
|||
|
|
@ -8,25 +8,25 @@ namespace at { namespace native {
|
|||
|
||||
at::Tensor mkldnn_convolution(
|
||||
const at::Tensor& input, const at::Tensor& weight, const at::Tensor& bias,
|
||||
IntList padding, IntList stride, IntList dilation, int64_t groups) {
|
||||
IntArrayRef padding, IntArrayRef stride, IntArrayRef dilation, int64_t groups) {
|
||||
AT_ERROR("mkldnn_convolution_forward: ATen not compiled with MKLDNN support");
|
||||
}
|
||||
|
||||
at::Tensor mkldnn_convolution_backward_input(
|
||||
IntList input_size, const at::Tensor& grad_output, const at::Tensor& weight,
|
||||
IntList padding, IntList stride, IntList dilation, int64_t groups, bool bias_defined) {
|
||||
IntArrayRef input_size, const at::Tensor& grad_output, const at::Tensor& weight,
|
||||
IntArrayRef padding, IntArrayRef stride, IntArrayRef dilation, int64_t groups, bool bias_defined) {
|
||||
AT_ERROR("mkldnn_convolution_backward_input: ATen not compiled with MKLDNN support");
|
||||
}
|
||||
|
||||
std::tuple<at::Tensor,at::Tensor> mkldnn_convolution_backward_weights(
|
||||
IntList weight_size, const at::Tensor& grad_output, const at::Tensor& input,
|
||||
IntList padding, IntList stride, IntList dilation, int64_t groups, bool bias_defined) {
|
||||
IntArrayRef weight_size, const at::Tensor& grad_output, const at::Tensor& input,
|
||||
IntArrayRef padding, IntArrayRef stride, IntArrayRef dilation, int64_t groups, bool bias_defined) {
|
||||
AT_ERROR("mkldnn_convolution_backward_weights: ATen not compiled with MKLDNN support");
|
||||
}
|
||||
|
||||
std::tuple<at::Tensor,at::Tensor,at::Tensor> mkldnn_convolution_backward(
|
||||
const at::Tensor& input, const at::Tensor& grad_output_t, const at::Tensor& weight,
|
||||
IntList padding, IntList stride, IntList dilation, int64_t groups, std::array<bool,3> output_mask) {
|
||||
IntArrayRef padding, IntArrayRef stride, IntArrayRef dilation, int64_t groups, std::array<bool,3> output_mask) {
|
||||
AT_ERROR("mkldnn_convolution_backward: ATen not compiled with MKLDNN support");
|
||||
}
|
||||
|
||||
|
|
@ -51,8 +51,8 @@ constexpr int weight_input_channels_dim = 1;
|
|||
constexpr int max_dim = 3;
|
||||
|
||||
static std::vector<int64_t> conv_output_size(
|
||||
IntList input_size, IntList weight_size,
|
||||
IntList padding, IntList stride, IntList dilation, int64_t groups)
|
||||
IntArrayRef input_size, IntArrayRef weight_size,
|
||||
IntArrayRef padding, IntArrayRef stride, IntArrayRef dilation, int64_t groups)
|
||||
{
|
||||
auto dim = input_size.size();
|
||||
std::vector<int64_t> output_size(dim);
|
||||
|
|
@ -68,7 +68,7 @@ static std::vector<int64_t> conv_output_size(
|
|||
|
||||
at::Tensor mkldnn_convolution(
|
||||
const at::Tensor& input, const at::Tensor& weight, const at::Tensor& bias,
|
||||
IntList padding, IntList stride, IntList dilation, int64_t groups)
|
||||
IntArrayRef padding, IntArrayRef stride, IntArrayRef dilation, int64_t groups)
|
||||
{
|
||||
auto output = at::empty(conv_output_size(
|
||||
input.sizes(), weight.sizes(), padding, stride, dilation, groups), input.options());
|
||||
|
|
@ -179,8 +179,8 @@ at::Tensor mkldnn_convolution(
|
|||
}
|
||||
|
||||
Tensor mkldnn_convolution_backward_input(
|
||||
IntList input_size, const at::Tensor& grad_output, const at::Tensor& weight,
|
||||
IntList padding, IntList stride, IntList dilation, int64_t groups, bool bias_defined)
|
||||
IntArrayRef input_size, const at::Tensor& grad_output, const at::Tensor& weight,
|
||||
IntArrayRef padding, IntArrayRef stride, IntArrayRef dilation, int64_t groups, bool bias_defined)
|
||||
{
|
||||
auto grad_input = at::empty(input_size, grad_output.options());
|
||||
|
||||
|
|
@ -291,8 +291,8 @@ Tensor mkldnn_convolution_backward_input(
|
|||
}
|
||||
|
||||
std::tuple<at::Tensor, at::Tensor> mkldnn_convolution_backward_weights(
|
||||
IntList weight_size, const at::Tensor& grad_output, const at::Tensor& input,
|
||||
IntList padding, IntList stride, IntList dilation, int64_t groups, bool bias_defined)
|
||||
IntArrayRef weight_size, const at::Tensor& grad_output, const at::Tensor& input,
|
||||
IntArrayRef padding, IntArrayRef stride, IntArrayRef dilation, int64_t groups, bool bias_defined)
|
||||
{
|
||||
auto grad_weight = at::empty(weight_size, grad_output.options());
|
||||
|
||||
|
|
@ -425,7 +425,7 @@ std::tuple<at::Tensor, at::Tensor> mkldnn_convolution_backward_weights(
|
|||
|
||||
std::tuple<at::Tensor,at::Tensor,at::Tensor> mkldnn_convolution_backward(
|
||||
const at::Tensor& input, const at::Tensor& grad_output_t, const at::Tensor& weight,
|
||||
IntList padding, IntList stride, IntList dilation, int64_t groups, std::array<bool,3> output_mask)
|
||||
IntArrayRef padding, IntArrayRef stride, IntArrayRef dilation, int64_t groups, std::array<bool,3> output_mask)
|
||||
{
|
||||
Tensor grad_output = grad_output_t.contiguous();
|
||||
|
||||
|
|
|
|||
|
|
@ -110,7 +110,7 @@ SparseTensor new_with_dims_and_tensor_sparse(
|
|||
/** Public creation API that dispatch to methods above **/
|
||||
|
||||
/** Empty init **/
|
||||
Tensor empty_sparse(IntList size, const TensorOptions& options) {
|
||||
Tensor empty_sparse(IntArrayRef size, const TensorOptions& options) {
|
||||
return new_with_dims_sparse(size.size(), 0, size, options);
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -839,11 +839,11 @@ Tensor _sparse_sum(const SparseTensor& input, ScalarType dtype) {
|
|||
return input.coalesce().values().sum(dtype);
|
||||
}
|
||||
|
||||
Tensor _sparse_sum(const SparseTensor& input, IntList dims_to_sum, ScalarType dtype) {
|
||||
Tensor _sparse_sum(const SparseTensor& input, IntArrayRef dims_to_sum, ScalarType dtype) {
|
||||
return at::_sparse_sum(input.to(dtype), dims_to_sum);
|
||||
}
|
||||
|
||||
Tensor _sparse_sum(const SparseTensor& input, IntList dims_to_sum) {
|
||||
Tensor _sparse_sum(const SparseTensor& input, IntArrayRef dims_to_sum) {
|
||||
AT_CHECK(input._nnz() > 0, "_sparse_sum: sparse tensor input._nnz() == 0, please call torch.sparse.sum(input) instead.")
|
||||
|
||||
const int64_t input_dim = input.dim();
|
||||
|
|
@ -853,7 +853,7 @@ Tensor _sparse_sum(const SparseTensor& input, IntList dims_to_sum) {
|
|||
|
||||
LongTensor indices = input._indices();
|
||||
Tensor values = input._values();
|
||||
IntList sizes = input.sizes();
|
||||
IntArrayRef sizes = input.sizes();
|
||||
const int64_t sparse_dim = input.sparse_dim();
|
||||
const int64_t dense_dim = input.dense_dim();
|
||||
|
||||
|
|
@ -960,7 +960,7 @@ Tensor _sparse_sum(const SparseTensor& input, IntList dims_to_sum) {
|
|||
// - assign zero values to input gradients if cannot find matched indices at grad
|
||||
// - grad.values might have zeros
|
||||
// --------------------------------------------------------------------
|
||||
Tensor _sparse_sum_backward_cpu(const Tensor& grad_, const SparseTensor& input_, IntList dims_to_sum) {
|
||||
Tensor _sparse_sum_backward_cpu(const Tensor& grad_, const SparseTensor& input_, IntArrayRef dims_to_sum) {
|
||||
AT_CHECK(!grad_.is_cuda(), "_sparse_sum_backward_cpu: expected 'grad_' to be CPU tensor, but got CUDA tensor");
|
||||
AT_CHECK(!input_.is_cuda(), "_sparse_sum_backward_cpu: expected 'input_' to be CPU tensor, but got CUDA tensor");
|
||||
|
||||
|
|
@ -972,7 +972,7 @@ Tensor _sparse_sum_backward_cpu(const Tensor& grad_, const SparseTensor& input_,
|
|||
|
||||
LongTensor input_indices = input._indices();
|
||||
Tensor input_values = input._values();
|
||||
IntList input_sizes = input.sizes();
|
||||
IntArrayRef input_sizes = input.sizes();
|
||||
const int64_t input_sparse_dim = input.sparse_dim();
|
||||
const int64_t input_dense_dim = input.dense_dim();
|
||||
const int64_t input_nnz = input._nnz();
|
||||
|
|
|
|||
|
|
@ -515,7 +515,7 @@ __global__ void _sparse_sum_backward_cuda_kernel(
|
|||
}
|
||||
}
|
||||
|
||||
Tensor _sparse_sum_backward_cuda(const Tensor& grad_, const SparseTensor& input_, IntList dims_to_sum) {
|
||||
Tensor _sparse_sum_backward_cuda(const Tensor& grad_, const SparseTensor& input_, IntArrayRef dims_to_sum) {
|
||||
AT_CHECK(grad_.is_cuda(), "_sparse_sum_backward_cuda: expected 'grad_' to be CUDA tensor, but got CPU tensor");
|
||||
AT_CHECK(input_.is_cuda(), "_sparse_sum_backward_cuda: expected 'input_' to be CUDA tensor, but got CPU tensor");
|
||||
|
||||
|
|
@ -527,7 +527,7 @@ Tensor _sparse_sum_backward_cuda(const Tensor& grad_, const SparseTensor& input_
|
|||
|
||||
LongTensor input_indices = input._indices();
|
||||
Tensor input_values = input._values();
|
||||
IntList input_sizes = input.sizes();
|
||||
IntArrayRef input_sizes = input.sizes();
|
||||
const int64_t input_sparse_dim = input.sparse_dim();
|
||||
const int64_t input_dense_dim = input.dense_dim();
|
||||
const int64_t input_nnz = input._nnz();
|
||||
|
|
|
|||
|
|
@ -48,9 +48,9 @@ def type_argument_translations(arg):
|
|||
# Enables Tensor[] by translating to legacy TensorList.
|
||||
elif t == 'Tensor[]':
|
||||
t = 'TensorList'
|
||||
# Enables int[] by translating to legacy IntList.
|
||||
# Enables int[] by translating to legacy IntArrayRef.
|
||||
elif t == 'int[]':
|
||||
t = 'IntList'
|
||||
t = 'IntArrayRef'
|
||||
# Enables int by translating to legacy int64_t.
|
||||
elif t == 'int':
|
||||
t = 'int64_t'
|
||||
|
|
@ -59,10 +59,10 @@ def type_argument_translations(arg):
|
|||
# Enables float by translating to legacy double.
|
||||
elif t == 'float':
|
||||
t = 'double'
|
||||
# Enables int[x] by translating to legacy IntList[x]. See [temp translations]
|
||||
# Enables int[x] by translating to legacy IntArrayRef[x]. See [temp translations]
|
||||
elif re.match(r'int\[(\d+)\]', t):
|
||||
match = re.match(r'int\[(\d+)\]', t)
|
||||
t = 'IntList'
|
||||
t = 'IntArrayRef'
|
||||
size = int(match.group(1))
|
||||
# Enables bool[x] by translating to legacy std::array<bool,x>. See [temp translations]
|
||||
elif re.match(r'bool\[(\d+)\]', t):
|
||||
|
|
|
|||
|
|
@ -118,25 +118,25 @@
|
|||
|
||||
# Pooling
|
||||
|
||||
- name: _thnn_adaptive_avg_pool3d(Tensor self, IntList[3] output_size)
|
||||
- name: _thnn_adaptive_avg_pool3d(Tensor self, IntArrayRef[3] output_size)
|
||||
cname: VolumetricAdaptiveAveragePooling
|
||||
scalar_check:
|
||||
output: 'false'
|
||||
grad_input: 'false'
|
||||
|
||||
- name: _thnn_adaptive_max_pool2d(Tensor self, IntList[2] output_size)
|
||||
- name: _thnn_adaptive_max_pool2d(Tensor self, IntArrayRef[2] output_size)
|
||||
cname: SpatialAdaptiveMaxPooling
|
||||
scalar_check:
|
||||
output: 'false'
|
||||
grad_input: 'false'
|
||||
|
||||
- name: _thnn_adaptive_max_pool3d(Tensor self, IntList[3] output_size)
|
||||
- name: _thnn_adaptive_max_pool3d(Tensor self, IntArrayRef[3] output_size)
|
||||
cname: VolumetricAdaptiveMaxPooling
|
||||
scalar_check:
|
||||
output: 'false'
|
||||
grad_input: 'false'
|
||||
|
||||
- name: _thnn_avg_pool2d(Tensor self, IntList[2] kernel_size, IntList[2] stride={}, IntList[2] padding=0, bool ceil_mode=false, bool count_include_pad=true)
|
||||
- name: _thnn_avg_pool2d(Tensor self, IntArrayRef[2] kernel_size, IntArrayRef[2] stride={}, IntArrayRef[2] padding=0, bool ceil_mode=false, bool count_include_pad=true)
|
||||
cname: SpatialAveragePooling
|
||||
default_init:
|
||||
stride: kernel_size
|
||||
|
|
@ -144,7 +144,7 @@
|
|||
output: 'false'
|
||||
grad_input: 'false'
|
||||
|
||||
- name: _thnn_avg_pool3d(Tensor self, IntList[3] kernel_size, IntList[3] stride={}, IntList[3] padding=0, bool ceil_mode=false, bool count_include_pad=true)
|
||||
- name: _thnn_avg_pool3d(Tensor self, IntArrayRef[3] kernel_size, IntArrayRef[3] stride={}, IntArrayRef[3] padding=0, bool ceil_mode=false, bool count_include_pad=true)
|
||||
cname: VolumetricAveragePooling
|
||||
default_init:
|
||||
stride: kernel_size
|
||||
|
|
@ -152,7 +152,7 @@
|
|||
output: 'false'
|
||||
grad_input: 'false'
|
||||
|
||||
- name: _thnn_max_pool2d_with_indices(Tensor self, IntList[2] kernel_size, IntList[2] stride={}, IntList[2] padding=0, IntList[2] dilation=1, bool ceil_mode=false)
|
||||
- name: _thnn_max_pool2d_with_indices(Tensor self, IntArrayRef[2] kernel_size, IntArrayRef[2] stride={}, IntArrayRef[2] padding=0, IntArrayRef[2] dilation=1, bool ceil_mode=false)
|
||||
cname: SpatialDilatedMaxPooling
|
||||
default_init:
|
||||
stride: kernel_size
|
||||
|
|
@ -160,7 +160,7 @@
|
|||
output: 'false'
|
||||
grad_input: 'false'
|
||||
|
||||
- name: _thnn_max_pool3d_with_indices(Tensor self, IntList[3] kernel_size, IntList[3] stride={}, IntList[3] padding=0, IntList[3] dilation=1, bool ceil_mode=false)
|
||||
- name: _thnn_max_pool3d_with_indices(Tensor self, IntArrayRef[3] kernel_size, IntArrayRef[3] stride={}, IntArrayRef[3] padding=0, IntArrayRef[3] dilation=1, bool ceil_mode=false)
|
||||
cname: VolumetricDilatedMaxPooling
|
||||
default_init:
|
||||
stride: kernel_size
|
||||
|
|
@ -168,13 +168,13 @@
|
|||
output: 'false'
|
||||
grad_input: 'false'
|
||||
|
||||
- name: _thnn_max_unpool2d(Tensor self, LongTensor indices, IntList[2] output_size)
|
||||
- name: _thnn_max_unpool2d(Tensor self, LongTensor indices, IntArrayRef[2] output_size)
|
||||
cname: SpatialMaxUnpooling
|
||||
scalar_check:
|
||||
output: 'false'
|
||||
grad_input: 'false'
|
||||
|
||||
- name: _thnn_max_unpool3d(Tensor self, LongTensor indices, IntList[3] output_size, IntList[3] stride, IntList[3] padding)
|
||||
- name: _thnn_max_unpool3d(Tensor self, LongTensor indices, IntArrayRef[3] output_size, IntArrayRef[3] stride, IntArrayRef[3] padding)
|
||||
cname: VolumetricMaxUnpooling
|
||||
scalar_check:
|
||||
output: 'false'
|
||||
|
|
@ -182,45 +182,45 @@
|
|||
|
||||
# Upsampling
|
||||
|
||||
# Note: The upsampling backwards functions also include an IntList input_size
|
||||
# Note: The upsampling backwards functions also include an IntArrayRef input_size
|
||||
# parameter, which is added by nn_parse.py
|
||||
|
||||
- name: _thnn_upsample_linear1d(Tensor self, IntList[1] output_size, bool align_corners)
|
||||
- name: _thnn_upsample_linear1d(Tensor self, IntArrayRef[1] output_size, bool align_corners)
|
||||
cname: TemporalUpSamplingLinear
|
||||
scalar_check:
|
||||
self: 'false'
|
||||
grad_input: 'false'
|
||||
|
||||
- name: _thnn_upsample_bilinear2d(Tensor self, IntList[2] output_size, bool align_corners)
|
||||
- name: _thnn_upsample_bilinear2d(Tensor self, IntArrayRef[2] output_size, bool align_corners)
|
||||
cname: SpatialUpSamplingBilinear
|
||||
scalar_check:
|
||||
self: 'false'
|
||||
grad_input: 'false'
|
||||
|
||||
- name: _thnn_upsample_bicubic2d(Tensor self, IntList[2] output_size, bool align_corners)
|
||||
- name: _thnn_upsample_bicubic2d(Tensor self, IntArrayRef[2] output_size, bool align_corners)
|
||||
cname: SpatialUpSamplingBicubic
|
||||
scalar_check:
|
||||
grad_input: 'false'
|
||||
|
||||
- name: _thnn_upsample_trilinear3d(Tensor self, IntList[3] output_size, bool align_corners)
|
||||
- name: _thnn_upsample_trilinear3d(Tensor self, IntArrayRef[3] output_size, bool align_corners)
|
||||
cname: VolumetricUpSamplingTrilinear
|
||||
scalar_check:
|
||||
self: 'false'
|
||||
grad_input: 'false'
|
||||
|
||||
- name: _thnn_upsample_nearest1d(Tensor self, IntList[1] output_size)
|
||||
- name: _thnn_upsample_nearest1d(Tensor self, IntArrayRef[1] output_size)
|
||||
cname: TemporalUpSamplingNearest
|
||||
scalar_check:
|
||||
self: 'false'
|
||||
grad_input: 'false'
|
||||
|
||||
- name: _thnn_upsample_nearest2d(Tensor self, IntList[2] output_size)
|
||||
- name: _thnn_upsample_nearest2d(Tensor self, IntArrayRef[2] output_size)
|
||||
cname: SpatialUpSamplingNearest
|
||||
scalar_check:
|
||||
self: 'false'
|
||||
grad_input: 'false'
|
||||
|
||||
- name: _thnn_upsample_nearest3d(Tensor self, IntList[3] output_size)
|
||||
- name: _thnn_upsample_nearest3d(Tensor self, IntArrayRef[3] output_size)
|
||||
cname: VolumetricUpSamplingNearest
|
||||
scalar_check:
|
||||
self: 'false'
|
||||
|
|
@ -244,43 +244,43 @@
|
|||
|
||||
# Convolutions
|
||||
|
||||
- name: _thnn_conv_transpose2d(Tensor self, Tensor weight, IntList[2] kernel_size, Tensor? bias={}, IntList[2] stride=1, IntList[2] padding=0, IntList[2] output_padding=0, IntList[2] dilation=1)
|
||||
- name: _thnn_conv_transpose2d(Tensor self, Tensor weight, IntArrayRef[2] kernel_size, Tensor? bias={}, IntArrayRef[2] stride=1, IntArrayRef[2] padding=0, IntArrayRef[2] output_padding=0, IntArrayRef[2] dilation=1)
|
||||
cname: SpatialFullDilatedConvolution
|
||||
buffers: [columns, ones]
|
||||
|
||||
- name: _thnn_conv_transpose3d(Tensor self, Tensor weight, IntList[3] kernel_size, Tensor? bias={}, IntList[3] stride=1, IntList[3] padding=0, IntList[3] output_padding=0, IntList[3] dilation=1)
|
||||
- name: _thnn_conv_transpose3d(Tensor self, Tensor weight, IntArrayRef[3] kernel_size, Tensor? bias={}, IntArrayRef[3] stride=1, IntArrayRef[3] padding=0, IntArrayRef[3] output_padding=0, IntArrayRef[3] dilation=1)
|
||||
cname: VolumetricFullDilatedConvolution
|
||||
buffers: [finput, fgrad_input]
|
||||
|
||||
- name: _thnn_conv2d(Tensor self, Tensor weight, IntList[2] kernel_size, Tensor? bias={}, IntList[2] stride=1, IntList[2] padding=0)
|
||||
- name: _thnn_conv2d(Tensor self, Tensor weight, IntArrayRef[2] kernel_size, Tensor? bias={}, IntArrayRef[2] stride=1, IntArrayRef[2] padding=0)
|
||||
cname: SpatialConvolutionMM
|
||||
buffers: [finput, fgrad_input]
|
||||
|
||||
- name: _thnn_conv_depthwise2d(Tensor self, Tensor weight, IntList[2] kernel_size, Tensor? bias={}, IntList[2] stride=1, IntList[2] padding=0, IntList[2] dilation=1)
|
||||
- name: _thnn_conv_depthwise2d(Tensor self, Tensor weight, IntArrayRef[2] kernel_size, Tensor? bias={}, IntArrayRef[2] stride=1, IntArrayRef[2] padding=0, IntArrayRef[2] dilation=1)
|
||||
cname: SpatialDepthwiseConvolution
|
||||
buffers: []
|
||||
|
||||
- name: _thnn_conv3d(Tensor self, Tensor weight, IntList[3] kernel_size, Tensor? bias={}, IntList[3] stride=1, IntList[3] padding=0)
|
||||
- name: _thnn_conv3d(Tensor self, Tensor weight, IntArrayRef[3] kernel_size, Tensor? bias={}, IntArrayRef[3] stride=1, IntArrayRef[3] padding=0)
|
||||
cname: VolumetricConvolutionMM
|
||||
buffers: [finput, fgrad_input]
|
||||
|
||||
- name: _thnn_conv_dilated2d(Tensor self, Tensor weight, IntList[2] kernel_size, Tensor? bias={}, IntList[2] stride=1, IntList[2] padding=0, IntList[2] dilation=1)
|
||||
- name: _thnn_conv_dilated2d(Tensor self, Tensor weight, IntArrayRef[2] kernel_size, Tensor? bias={}, IntArrayRef[2] stride=1, IntArrayRef[2] padding=0, IntArrayRef[2] dilation=1)
|
||||
cname: SpatialDilatedConvolution
|
||||
buffers: [columns, ones]
|
||||
|
||||
- name: _thnn_conv_dilated3d(Tensor self, Tensor weight, IntList[3] kernel_size, Tensor? bias={}, IntList[3] stride=1, IntList[3] padding=0, IntList[3] dilation=1)
|
||||
- name: _thnn_conv_dilated3d(Tensor self, Tensor weight, IntArrayRef[3] kernel_size, Tensor? bias={}, IntArrayRef[3] stride=1, IntArrayRef[3] padding=0, IntArrayRef[3] dilation=1)
|
||||
cname: VolumetricDilatedConvolution
|
||||
buffers: [columns, ones]
|
||||
|
||||
# Fold and Unfold
|
||||
|
||||
- name: _thnn_col2im(Tensor self, IntList[2] output_size, IntList[2] kernel_size, IntList[2] dilation, IntList[2] padding, IntList[2] stride)
|
||||
- name: _thnn_col2im(Tensor self, IntArrayRef[2] output_size, IntArrayRef[2] kernel_size, IntArrayRef[2] dilation, IntArrayRef[2] padding, IntArrayRef[2] stride)
|
||||
cname: Col2Im
|
||||
scalar_check:
|
||||
output: 'false'
|
||||
grad_input: 'false'
|
||||
|
||||
- name: _thnn_im2col(Tensor self, IntList[2] kernel_size, IntList[2] dilation, IntList[2] padding, IntList[2] stride)
|
||||
- name: _thnn_im2col(Tensor self, IntArrayRef[2] kernel_size, IntArrayRef[2] dilation, IntArrayRef[2] padding, IntArrayRef[2] stride)
|
||||
cname: Im2Col
|
||||
scalar_check:
|
||||
output: 'false'
|
||||
|
|
|
|||
|
|
@ -30,9 +30,9 @@ def argument_to_declaration(param, func=None):
|
|||
elif arg['type'] == 'Generator*':
|
||||
arg['type'] = 'THGenerator*'
|
||||
|
||||
match = re.match(r'IntList\[(\d+)\]', arg['type'])
|
||||
match = re.match(r'IntArrayRef\[(\d+)\]', arg['type'])
|
||||
if match:
|
||||
arg['type'] = 'IntList'
|
||||
arg['type'] = 'IntArrayRef'
|
||||
arg['size'] = int(match.group(1))
|
||||
|
||||
if '=' in name:
|
||||
|
|
@ -150,7 +150,7 @@ def get_thnn_args(thnn_function, params, inplace):
|
|||
if name not in params_by_name:
|
||||
raise RuntimeError('missing arg "{}" in {}'.format(name, thnn_function.name))
|
||||
param = params_by_name[name]
|
||||
if param['type'] == 'IntList' and 'size' in param:
|
||||
if param['type'] == 'IntArrayRef' and 'size' in param:
|
||||
name = name + '_'
|
||||
# NB: We calculate the dimension based on the name of
|
||||
# the argument, not its positional order. This means
|
||||
|
|
@ -300,7 +300,7 @@ def backward_declaration(base, thnn_functions):
|
|||
# Add input_size as parameter to upsample backwards functions
|
||||
# Note that input_size is 4-dim for upsample_xxx2d
|
||||
size = 2 + int(re.search(r'(\d+)d', base['name']).group(1))
|
||||
input_size_arg = {'type': 'IntList', 'name': 'input_size', 'size': size}
|
||||
input_size_arg = {'type': 'IntArrayRef', 'name': 'input_size', 'size': size}
|
||||
for output_size_idx, arg in enumerate(arguments):
|
||||
if arg['name'] == 'output_size':
|
||||
break
|
||||
|
|
@ -308,7 +308,7 @@ def backward_declaration(base, thnn_functions):
|
|||
|
||||
if 'im2col' in base['name']:
|
||||
# Add input_size as parameter to im2col backwards function
|
||||
input_size_arg = {'type': 'IntList', 'name': 'input_size', 'size': 2}
|
||||
input_size_arg = {'type': 'IntArrayRef', 'name': 'input_size', 'size': 2}
|
||||
arguments.insert(2, input_size_arg)
|
||||
|
||||
# outputs from the forward may be inputs to the backwards
|
||||
|
|
|
|||
|
|
@ -27,7 +27,7 @@ namespace native {
|
|||
|
||||
inline Tensor from_blob(
|
||||
void* data,
|
||||
IntList sizes,
|
||||
IntArrayRef sizes,
|
||||
const std::function<void(void*)>& deleter,
|
||||
const TensorOptions& options = {}) {
|
||||
return at::getType(options).tensorFromBlob(data, sizes, deleter);
|
||||
|
|
@ -35,8 +35,8 @@ inline Tensor from_blob(
|
|||
|
||||
inline Tensor from_blob(
|
||||
void* data,
|
||||
IntList sizes,
|
||||
IntList strides,
|
||||
IntArrayRef sizes,
|
||||
IntArrayRef strides,
|
||||
const std::function<void(void*)>& deleter,
|
||||
const TensorOptions& options = {}) {
|
||||
return at::getType(options).tensorFromBlob(data, sizes, strides, deleter);
|
||||
|
|
|
|||
|
|
@ -163,10 +163,10 @@ class CAFFE2_API Tensor {
|
|||
|
||||
const char * toString() const;
|
||||
|
||||
IntList sizes() const {
|
||||
IntArrayRef sizes() const {
|
||||
return impl_->sizes();
|
||||
}
|
||||
IntList strides() const {
|
||||
IntArrayRef strides() const {
|
||||
return impl_->strides();
|
||||
}
|
||||
int64_t ndimension() const {
|
||||
|
|
|
|||
|
|
@ -119,10 +119,10 @@ struct CAFFE2_API Type {
|
|||
bool create_graph) const = 0;
|
||||
virtual void set_data(Tensor & self, Tensor new_data) const = 0;
|
||||
|
||||
virtual Tensor tensorFromBlob(void * data, IntList sizes, const std::function<void(void*)> & deleter=noop_deleter) const = 0;
|
||||
virtual Tensor tensorFromBlob(void * data, IntList sizes, IntList strides, const std::function<void(void*)> & deleter=noop_deleter) const = 0;
|
||||
virtual Tensor tensorWithAllocator(IntList sizes, Allocator* allocator) const = 0;
|
||||
virtual Tensor tensorWithAllocator(IntList sizes, IntList strides, Allocator* allocator) const = 0;
|
||||
virtual Tensor tensorFromBlob(void * data, IntArrayRef sizes, const std::function<void(void*)> & deleter=noop_deleter) const = 0;
|
||||
virtual Tensor tensorFromBlob(void * data, IntArrayRef sizes, IntArrayRef strides, const std::function<void(void*)> & deleter=noop_deleter) const = 0;
|
||||
virtual Tensor tensorWithAllocator(IntArrayRef sizes, Allocator* allocator) const = 0;
|
||||
virtual Tensor tensorWithAllocator(IntArrayRef sizes, IntArrayRef strides, Allocator* allocator) const = 0;
|
||||
|
||||
bool operator==(const Type& other) const {
|
||||
return this == &other;
|
||||
|
|
|
|||
|
|
@ -58,7 +58,7 @@ Type & TypeDefault::toBackend(Backend b) const {
|
|||
Type & TypeDefault::toScalarType(ScalarType s) const {
|
||||
return at::globalContext().getNonVariableType(backend(),s);
|
||||
}
|
||||
static std::vector<int64_t> defaultStrides(IntList sizes) {
|
||||
static std::vector<int64_t> defaultStrides(IntArrayRef sizes) {
|
||||
std::vector<int64_t> strides(sizes.size());
|
||||
int64_t stride = 1;
|
||||
for(size_t i = sizes.size(); i > 0; --i) {
|
||||
|
|
@ -67,7 +67,7 @@ static std::vector<int64_t> defaultStrides(IntList sizes) {
|
|||
}
|
||||
return strides;
|
||||
}
|
||||
static int64_t computeStorageSize(IntList sizes, IntList strides) {
|
||||
static int64_t computeStorageSize(IntArrayRef sizes, IntArrayRef strides) {
|
||||
// size of the underlying storage is 1 bigger than the offset
|
||||
// of the last element according to stride
|
||||
int64_t size = 1;
|
||||
|
|
@ -79,17 +79,17 @@ static int64_t computeStorageSize(IntList sizes, IntList strides) {
|
|||
}
|
||||
return size;
|
||||
}
|
||||
Tensor TypeDefault::tensorFromBlob(void * data, IntList sizes, const std::function<void(void*)> & deleter) const {
|
||||
Tensor TypeDefault::tensorFromBlob(void * data, IntArrayRef sizes, const std::function<void(void*)> & deleter) const {
|
||||
return tensorFromBlob(data, sizes, defaultStrides(sizes), deleter);
|
||||
}
|
||||
Tensor TypeDefault::tensorFromBlob(void * data, IntList sizes, IntList strides, const std::function<void(void*)> & deleter) const {
|
||||
Tensor TypeDefault::tensorFromBlob(void * data, IntArrayRef sizes, IntArrayRef strides, const std::function<void(void*)> & deleter) const {
|
||||
auto storage = storageFromBlob(data, computeStorageSize(sizes, strides), deleter);
|
||||
return at::empty({0}, options()).set_(storage, 0, sizes, strides);
|
||||
}
|
||||
Tensor TypeDefault::tensorWithAllocator(IntList sizes, Allocator* allocator) const {
|
||||
Tensor TypeDefault::tensorWithAllocator(IntArrayRef sizes, Allocator* allocator) const {
|
||||
return tensorWithAllocator(sizes, defaultStrides(sizes), std::move(allocator));
|
||||
}
|
||||
Tensor TypeDefault::tensorWithAllocator(IntList sizes, IntList strides, Allocator* allocator) const {
|
||||
Tensor TypeDefault::tensorWithAllocator(IntArrayRef sizes, IntArrayRef strides, Allocator* allocator) const {
|
||||
auto storage = storageWithAllocator(computeStorageSize(sizes, strides), std::move(allocator));
|
||||
return at::empty({0}, options()).set_(storage, 0, sizes, strides);
|
||||
}
|
||||
|
|
|
|||
|
|
@ -38,10 +38,10 @@ struct CAFFE2_API TypeDefault : public TypeExtendedInterface {
|
|||
bool create_graph) const override;
|
||||
void set_data(Tensor & self, Tensor new_data) const override;
|
||||
|
||||
Tensor tensorFromBlob(void * data, IntList sizes, const std::function<void(void*)> & deleter=noop_deleter) const override;
|
||||
Tensor tensorFromBlob(void * data, IntList sizes, IntList strides, const std::function<void(void*)> & deleter=noop_deleter) const override;
|
||||
Tensor tensorWithAllocator(IntList sizes, Allocator* allocator) const override;
|
||||
Tensor tensorWithAllocator(IntList sizes, IntList strides, Allocator* allocator) const override;
|
||||
Tensor tensorFromBlob(void * data, IntArrayRef sizes, const std::function<void(void*)> & deleter=noop_deleter) const override;
|
||||
Tensor tensorFromBlob(void * data, IntArrayRef sizes, IntArrayRef strides, const std::function<void(void*)> & deleter=noop_deleter) const override;
|
||||
Tensor tensorWithAllocator(IntArrayRef sizes, Allocator* allocator) const override;
|
||||
Tensor tensorWithAllocator(IntArrayRef sizes, IntArrayRef strides, Allocator* allocator) const override;
|
||||
|
||||
Storage storageFromBlob(void * data, int64_t size, const std::function<void(void*)> & deleter) const override;
|
||||
Storage storageWithAllocator(int64_t size, Allocator* allocator) const override;
|
||||
|
|
|
|||
|
|
@ -23,7 +23,7 @@ void fill_tensor(int64_t scalar, Tensor& t_) {
|
|||
// write the same type as we read (using a0, ..., aX-1) and we once write to
|
||||
// double (using a4 as a target). We also exercise on a zero_dim and empty
|
||||
// tensor.
|
||||
void test(Type& type, IntList shape, int64_t a = 0, int64_t b = 1) {
|
||||
void test(Type& type, IntArrayRef shape, int64_t a = 0, int64_t b = 1) {
|
||||
auto zero_dim = at::empty({}, type);
|
||||
zero_dim.fill_(2);
|
||||
zero_dim.exp_();
|
||||
|
|
|
|||
|
|
@ -8,7 +8,7 @@ using namespace at;
|
|||
|
||||
static int test_int;
|
||||
|
||||
Tensor empty_override(IntList size, const TensorOptions & options) {
|
||||
Tensor empty_override(IntArrayRef size, const TensorOptions & options) {
|
||||
test_int = 1;
|
||||
auto tensor_impl = c10::make_intrusive<TensorImpl, UndefinedTensorImpl>(
|
||||
Storage(
|
||||
|
|
@ -32,7 +32,7 @@ TEST(BackendExtensionTest, TestRegisterOp) {
|
|||
EXPECT_ANY_THROW(empty({5, 5}, at::kMSNPU));
|
||||
register_extension_backend_op(
|
||||
Backend::MSNPU,
|
||||
"empty(IntList size, TensorOptions options) -> Tensor", &empty_override);
|
||||
"empty(IntArrayRef size, TensorOptions options) -> Tensor", &empty_override);
|
||||
Tensor a = empty({5, 5}, at::kMSNPU);
|
||||
ASSERT_EQ(a.device().type(), at::kMSNPU);
|
||||
ASSERT_EQ(a.device().index(), 1);
|
||||
|
|
@ -61,6 +61,6 @@ TEST(BackendExtensionTest, TestRegisterOp) {
|
|||
EXPECT_ANY_THROW(
|
||||
register_extension_backend_op(
|
||||
Backend::MSNPU,
|
||||
"empty(IntList size, TensorOptions options) -> Tensor", &empty_override)
|
||||
"empty(IntArrayRef size, TensorOptions options) -> Tensor", &empty_override)
|
||||
);
|
||||
}
|
||||
|
|
|
|||
|
|
@ -26,7 +26,7 @@ void require_equal_size_dim(const Tensor &lhs, const Tensor &rhs) {
|
|||
ASSERT_TRUE(lhs.sizes().equals(rhs.sizes()));
|
||||
}
|
||||
|
||||
bool should_expand(const IntList &from_size, const IntList &to_size) {
|
||||
bool should_expand(const IntArrayRef &from_size, const IntArrayRef &to_size) {
|
||||
if (from_size.size() > to_size.size()) {
|
||||
return false;
|
||||
}
|
||||
|
|
|
|||
|
|
@ -17,7 +17,7 @@ void THTensor_free(THTensor *self)
|
|||
c10::raw::intrusive_ptr::decref(self);
|
||||
}
|
||||
|
||||
void THTensor_setStorage(THTensor *self, THStorage *storage_, ptrdiff_t storageOffset_, at::IntList size_, at::IntList stride_) {
|
||||
void THTensor_setStorage(THTensor *self, THStorage *storage_, ptrdiff_t storageOffset_, at::IntArrayRef size_, at::IntArrayRef stride_) {
|
||||
if (stride_.data()) {
|
||||
THArgCheck(size_.size() == stride_.size(), 5, "inconsistent size/stride sizes");
|
||||
}
|
||||
|
|
@ -61,7 +61,7 @@ void THTensor_setStorageNd(THTensor *self, THStorage *storage, ptrdiff_t storage
|
|||
THTensor_resizeNd(self, nDimension, size, stride);
|
||||
}
|
||||
|
||||
void THTensor_resize(THTensor *self, at::IntList size, at::IntList stride)
|
||||
void THTensor_resize(THTensor *self, at::IntArrayRef size, at::IntArrayRef stride)
|
||||
{
|
||||
if (stride.data()) {
|
||||
THArgCheck(stride.size() == size.size(), 3, "invalid stride");
|
||||
|
|
@ -76,10 +76,10 @@ void THTensor_resize(THTensor *self, at::IntList size, at::IntList stride)
|
|||
void THTensor_resizeNd(THTensor *self, int nDimension, const int64_t *size, const int64_t *stride)
|
||||
{
|
||||
AT_CHECK(nDimension >= 0, "resizeNd nDimension must be non-negative");
|
||||
at::IntList sizes(size, nDimension);
|
||||
at::optional<at::IntList> strides;
|
||||
at::IntArrayRef sizes(size, nDimension);
|
||||
at::optional<at::IntArrayRef> strides;
|
||||
if (stride) {
|
||||
strides = at::IntList(stride, nDimension);
|
||||
strides = at::IntArrayRef(stride, nDimension);
|
||||
}
|
||||
at::native::resize_impl_cpu_(self, sizes, strides);
|
||||
}
|
||||
|
|
@ -91,9 +91,9 @@ void THTensor_resizeNd(THTensor *self, int nDimension, const int64_t *size, cons
|
|||
// where each chunk of newshape has matching ``numel'', i.e., number of subspaces,
|
||||
// as the corresponding chunk of oldshape.
|
||||
c10::optional<std::vector<int64_t>> THTensor_compute_stride(
|
||||
at::IntList oldshape,
|
||||
at::IntList oldstride,
|
||||
at::IntList newshape) {
|
||||
at::IntArrayRef oldshape,
|
||||
at::IntArrayRef oldstride,
|
||||
at::IntArrayRef newshape) {
|
||||
if (oldshape.empty()) {
|
||||
return std::vector<int64_t>(newshape.size(), 1);
|
||||
}
|
||||
|
|
|
|||
|
|
@ -115,12 +115,12 @@ TH_API void THTensor_free(THTensor *self);
|
|||
TH_API void THTensor_setStorageNd(THTensor *self, THStorage *storage, ptrdiff_t storageOffset, int nDimension, const int64_t *size, const int64_t *stride);
|
||||
TH_API void THTensor_resizeNd(THTensor *self, int nDimension, const int64_t *size, const int64_t *stride);
|
||||
|
||||
TH_CPP_API void THTensor_resize(THTensor *self, at::IntList size, at::IntList stride);
|
||||
TH_CPP_API void THTensor_setStorage(THTensor *self, THStorage *storage_, ptrdiff_t storageOffset_, at::IntList size_, at::IntList stride_);
|
||||
TH_CPP_API void THTensor_resize(THTensor *self, at::IntArrayRef size, at::IntArrayRef stride);
|
||||
TH_CPP_API void THTensor_setStorage(THTensor *self, THStorage *storage_, ptrdiff_t storageOffset_, at::IntArrayRef size_, at::IntArrayRef stride_);
|
||||
TH_CPP_API c10::optional<std::vector<int64_t>> THTensor_compute_stride(
|
||||
at::IntList oldshape,
|
||||
at::IntList oldstride,
|
||||
at::IntList newshape);
|
||||
at::IntArrayRef oldshape,
|
||||
at::IntArrayRef oldstride,
|
||||
at::IntArrayRef newshape);
|
||||
|
||||
#include <TH/generic/THTensor.hpp>
|
||||
#include <TH/THGenerateAllTypes.h>
|
||||
|
|
|
|||
|
|
@ -79,7 +79,7 @@ THTensor *THTensor_(newWithTensor)(THTensor *tensor)
|
|||
}
|
||||
|
||||
/* Storage init */
|
||||
THTensor *THTensor_(newWithStorage)(THStorage *storage, ptrdiff_t storageOffset, at::IntList sizes, at::IntList strides) {
|
||||
THTensor *THTensor_(newWithStorage)(THStorage *storage, ptrdiff_t storageOffset, at::IntArrayRef sizes, at::IntArrayRef strides) {
|
||||
if (strides.data()) {
|
||||
AT_CHECK(sizes.size() == strides.size(), "number of sizes and strides must match");
|
||||
}
|
||||
|
|
@ -126,7 +126,7 @@ THTensor *THTensor_(newWithStorage4d)(THStorage *storage, ptrdiff_t storageOffse
|
|||
{stride0, stride1, stride2, stride3});
|
||||
}
|
||||
|
||||
THTensor *THTensor_(newWithSize)(at::IntList size, at::IntList stride)
|
||||
THTensor *THTensor_(newWithSize)(at::IntArrayRef size, at::IntArrayRef stride)
|
||||
{
|
||||
return THTensor_(newWithStorage)(NULL, 0, size, stride);
|
||||
}
|
||||
|
|
@ -200,7 +200,7 @@ THTensor *THTensor_(newUnfold)(THTensor *tensor, int dimension_, int64_t size_,
|
|||
return self;
|
||||
}
|
||||
|
||||
THTensor *THTensor_(newView)(THTensor *tensor, at::IntList size)
|
||||
THTensor *THTensor_(newView)(THTensor *tensor, at::IntArrayRef size)
|
||||
{
|
||||
ptrdiff_t numel = THTensor_(nElement)(tensor);
|
||||
THTensor *self = THTensor_(new)();
|
||||
|
|
@ -217,7 +217,7 @@ THTensor *THTensor_(newView)(THTensor *tensor, at::IntList size)
|
|||
}
|
||||
|
||||
/* Resize */
|
||||
void THTensor_(resize)(THTensor *self, at::IntList size, at::IntList stride)
|
||||
void THTensor_(resize)(THTensor *self, at::IntArrayRef size, at::IntArrayRef stride)
|
||||
{
|
||||
return THTensor_resize(self, size, stride);
|
||||
}
|
||||
|
|
@ -274,7 +274,7 @@ void THTensor_(set)(THTensor *self, THTensor *src)
|
|||
THTensor_getStridePtr(src));
|
||||
}
|
||||
|
||||
void THTensor_(setStorage)(THTensor *self, THStorage *storage_, ptrdiff_t storageOffset_, at::IntList size_, at::IntList stride_)
|
||||
void THTensor_(setStorage)(THTensor *self, THStorage *storage_, ptrdiff_t storageOffset_, at::IntArrayRef size_, at::IntArrayRef stride_)
|
||||
{
|
||||
THTensor_setStorage(self, storage_, storageOffset_, size_, stride_);
|
||||
}
|
||||
|
|
|
|||
|
|
@ -9,13 +9,13 @@
|
|||
// new functions in here, they should probably be un-genericized.
|
||||
|
||||
TH_CPP_API void THTensor_(setStorage)(THTensor *self, THStorage *storage_, ptrdiff_t storageOffset_,
|
||||
at::IntList size_, at::IntList stride_);
|
||||
TH_CPP_API THTensor *THTensor_(newView)(THTensor *tensor, at::IntList size);
|
||||
at::IntArrayRef size_, at::IntArrayRef stride_);
|
||||
TH_CPP_API THTensor *THTensor_(newView)(THTensor *tensor, at::IntArrayRef size);
|
||||
/* strides.data() might be NULL */
|
||||
TH_CPP_API THTensor *THTensor_(newWithStorage)(THStorage *storage, ptrdiff_t storageOffset,
|
||||
at::IntList sizes, at::IntList strides);
|
||||
at::IntArrayRef sizes, at::IntArrayRef strides);
|
||||
|
||||
TH_CPP_API void THTensor_(resize)(THTensor *self, at::IntList size, at::IntList stride);
|
||||
TH_CPP_API THTensor *THTensor_(newWithSize)(at::IntList size, at::IntList stride);
|
||||
TH_CPP_API void THTensor_(resize)(THTensor *self, at::IntArrayRef size, at::IntArrayRef stride);
|
||||
TH_CPP_API THTensor *THTensor_(newWithSize)(at::IntArrayRef size, at::IntArrayRef stride);
|
||||
|
||||
#endif
|
||||
|
|
|
|||
|
|
@ -66,7 +66,7 @@ THCTensor *THCTensor_new(THCState *state, caffe2::TypeMeta type_meta) {
|
|||
}
|
||||
}
|
||||
|
||||
void THCTensor_resize(THCState *state, THCTensor *self, at::IntList size, at::IntList stride) {
|
||||
void THCTensor_resize(THCState *state, THCTensor *self, at::IntArrayRef size, at::IntArrayRef stride) {
|
||||
if(stride.data()) {
|
||||
THArgCheck(stride.size() == size.size(), 3, "invalid stride");
|
||||
}
|
||||
|
|
@ -100,10 +100,10 @@ void THCTensor_resizeAs(THCState *state, THCTensor *self, THCTensor *src) {
|
|||
void THCTensor_resizeNd(THCState *state, THCTensor *self, int nDimension, const int64_t *size, const int64_t *stride)
|
||||
{
|
||||
AT_CHECK(nDimension >= 0, "resizeNd nDimension must be non-negative");
|
||||
at::IntList sizes(size, nDimension);
|
||||
at::optional<at::IntList> strides;
|
||||
at::IntArrayRef sizes(size, nDimension);
|
||||
at::optional<at::IntArrayRef> strides;
|
||||
if (stride) {
|
||||
strides = at::IntList(stride, nDimension);
|
||||
strides = at::IntArrayRef(stride, nDimension);
|
||||
}
|
||||
at::native::resize_impl_cuda_(self, sizes, strides, /*device_guard=*/false);
|
||||
}
|
||||
|
|
@ -120,7 +120,7 @@ void THCTensor_set(THCState *state, THCTensor *self, THCTensor *src)
|
|||
THTensor_getStridePtr(src));
|
||||
}
|
||||
|
||||
void THCTensor_setStorage(THCState *state, THCTensor *self, THCStorage *storage_, ptrdiff_t storageOffset_, at::IntList size_, at::IntList stride_)
|
||||
void THCTensor_setStorage(THCState *state, THCTensor *self, THCStorage *storage_, ptrdiff_t storageOffset_, at::IntArrayRef size_, at::IntArrayRef stride_)
|
||||
{
|
||||
if (stride_.data()) {
|
||||
THArgCheck(size_.size() == stride_.size(), 5, "inconsistent size/stride sizes");
|
||||
|
|
|
|||
|
|
@ -23,12 +23,12 @@ THC_API int64_t THCTensor_strideLegacyNoScalars(THCState *state, const THCTensor
|
|||
|
||||
THC_API THCTensor *THCTensor_new(THCState *state, caffe2::TypeMeta type_meta);
|
||||
|
||||
THC_API void THCTensor_resize(THCState *state, THCTensor *tensor, at::IntList size, at::IntList stride);
|
||||
THC_API void THCTensor_resize(THCState *state, THCTensor *tensor, at::IntArrayRef size, at::IntArrayRef stride);
|
||||
THC_API void THCTensor_resizeNd(THCState *state, THCTensor *tensor, int nDimension, const int64_t *size, const int64_t *stride);
|
||||
THC_API void THCTensor_resizeAs(THCState *state, THCTensor *tensor, THCTensor *src);
|
||||
|
||||
THC_API void THCTensor_set(THCState *state, THCTensor *self, THCTensor *src);
|
||||
THC_API void THCTensor_setStorage(THCState *state, THCTensor *self, THCStorage *storage_, ptrdiff_t storageOffset_, at::IntList size_, at::IntList stride_);
|
||||
THC_API void THCTensor_setStorage(THCState *state, THCTensor *self, THCStorage *storage_, ptrdiff_t storageOffset_, at::IntArrayRef size_, at::IntArrayRef stride_);
|
||||
THC_API void THCTensor_setStorageNd(THCState *state, THCTensor *self, THCStorage *storage, ptrdiff_t storageOffset, int nDimension, const int64_t *size, const int64_t *stride);
|
||||
|
||||
THC_API void THCTensor_squeeze1d(THCState *state, THCTensor *self, THCTensor *src, int dimension_);
|
||||
|
|
|
|||
|
|
@ -89,7 +89,7 @@ THCTensor *THCTensor_(newWithTensor)(THCState *state, THCTensor *tensor)
|
|||
}
|
||||
|
||||
/* Storage init */
|
||||
THCTensor *THCTensor_(newWithStorage)(THCState *state, THCStorage *storage, ptrdiff_t storageOffset, at::IntList sizes, at::IntList strides) {
|
||||
THCTensor *THCTensor_(newWithStorage)(THCState *state, THCStorage *storage, ptrdiff_t storageOffset, at::IntArrayRef sizes, at::IntArrayRef strides) {
|
||||
if (strides.data()) {
|
||||
AT_CHECK(sizes.size() == strides.size(), "number of sizes and strides must match");
|
||||
}
|
||||
|
|
@ -136,7 +136,7 @@ THCTensor *THCTensor_(newWithStorage4d)(THCState *state, THCStorage *storage, pt
|
|||
{stride0, stride1, stride2, stride3});
|
||||
}
|
||||
|
||||
THCTensor *THCTensor_(newWithSize)(THCState *state, at::IntList size, at::IntList stride)
|
||||
THCTensor *THCTensor_(newWithSize)(THCState *state, at::IntArrayRef size, at::IntArrayRef stride)
|
||||
{
|
||||
return THCTensor_(newWithStorage)(state, NULL, 0, size, stride);
|
||||
}
|
||||
|
|
@ -207,7 +207,7 @@ THCTensor *THCTensor_(newUnfold)(THCState *state, THCTensor *tensor, int dimensi
|
|||
return self;
|
||||
}
|
||||
|
||||
THCTensor *THCTensor_(newView)(THCState *state, THCTensor *tensor, at::IntList size)
|
||||
THCTensor *THCTensor_(newView)(THCState *state, THCTensor *tensor, at::IntArrayRef size)
|
||||
{
|
||||
ptrdiff_t numel = THCTensor_(nElement)(state, tensor);
|
||||
THCTensor *self = THCTensor_(new)(state);
|
||||
|
|
@ -240,7 +240,7 @@ THCTensor *THCTensor_(newFoldBatchDim)(THCState *state, THCTensor *input) {
|
|||
}
|
||||
|
||||
/* Resize */
|
||||
void THCTensor_(resize)(THCState *state, THCTensor *self, at::IntList size, at::IntList stride)
|
||||
void THCTensor_(resize)(THCState *state, THCTensor *self, at::IntArrayRef size, at::IntArrayRef stride)
|
||||
{
|
||||
THCTensor_resize(state, self, size, stride);
|
||||
}
|
||||
|
|
@ -290,7 +290,7 @@ void THCTensor_(set)(THCState *state, THCTensor *self, THCTensor *src)
|
|||
THCTensor_set(state, self, src);
|
||||
}
|
||||
|
||||
void THCTensor_(setStorage)(THCState *state, THCTensor *self, THCStorage *storage_, ptrdiff_t storageOffset_, at::IntList size_, at::IntList stride_) {
|
||||
void THCTensor_(setStorage)(THCState *state, THCTensor *self, THCStorage *storage_, ptrdiff_t storageOffset_, at::IntArrayRef size_, at::IntArrayRef stride_) {
|
||||
THCTensor_setStorage(state, self, storage_, storageOffset_, size_, stride_);
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -9,13 +9,13 @@
|
|||
// new functions in here, they should probably be un-genericized.
|
||||
|
||||
THC_API void THCTensor_(setStorage)(THCState *state, THCTensor *self, THCStorage *storage_, ptrdiff_t storageOffset_,
|
||||
at::IntList size_, at::IntList stride_);
|
||||
THC_API THCTensor *THCTensor_(newView)(THCState *state, THCTensor *tensor, at::IntList size);
|
||||
at::IntArrayRef size_, at::IntArrayRef stride_);
|
||||
THC_API THCTensor *THCTensor_(newView)(THCState *state, THCTensor *tensor, at::IntArrayRef size);
|
||||
/* strides.data() might be nullptr */
|
||||
THC_API THCTensor *THCTensor_(newWithStorage)(THCState *state, THCStorage *storage, ptrdiff_t storageOffset,
|
||||
at::IntList sizes, at::IntList strides);
|
||||
at::IntArrayRef sizes, at::IntArrayRef strides);
|
||||
|
||||
THC_API void THCTensor_(resize)(THCState *state, THCTensor *self, at::IntList size, at::IntList stride);
|
||||
THC_API THCTensor *THCTensor_(newWithSize)(THCState *state, at::IntList size, at::IntList stride);
|
||||
THC_API void THCTensor_(resize)(THCState *state, THCTensor *self, at::IntArrayRef size, at::IntArrayRef stride);
|
||||
THC_API THCTensor *THCTensor_(newWithSize)(THCState *state, at::IntArrayRef size, at::IntArrayRef stride);
|
||||
|
||||
#endif
|
||||
|
|
|
|||
|
|
@ -54,7 +54,7 @@ void THCTensor_(maskedCopy)(THCState* state,
|
|||
// iterator prefix sums? Convert `mask` to the same datatype as what
|
||||
// we're accumulating the prefix sum in (int64_t) to get around it
|
||||
THCudaLongTensor* maskLong = THCudaLongTensor_new(state);
|
||||
at::IntList maskSizes = mask->sizes();
|
||||
at::IntArrayRef maskSizes = mask->sizes();
|
||||
THCudaLongTensor_resize(state, maskLong, maskSizes, {});
|
||||
THCTensor_(copy)(state, maskLong, mask);
|
||||
|
||||
|
|
@ -124,7 +124,7 @@ void THCTensor_(maskedSelect)(THCState* state,
|
|||
// iterator prefix sums? Convert `mask` to the same datatype as what
|
||||
// we're accumulating the prefix sum in (int64_t) to get around it
|
||||
THCudaLongTensor* maskLong = THCudaLongTensor_new(state);
|
||||
at::IntList maskSizes = mask->sizes();
|
||||
at::IntArrayRef maskSizes = mask->sizes();
|
||||
THCudaLongTensor_resize(state, maskLong, maskSizes, {});
|
||||
THCTensor_(copy)(state, maskLong, mask);
|
||||
|
||||
|
|
|
|||
|
|
@ -56,11 +56,11 @@ TensorImpl::TensorImpl(Storage&& storage, TensorTypeId type_id, const caffe2::Ty
|
|||
strides_.push_back(1);
|
||||
}
|
||||
|
||||
IntList TensorImpl::sizes() const {
|
||||
IntArrayRef TensorImpl::sizes() const {
|
||||
return sizes_;
|
||||
}
|
||||
|
||||
IntList TensorImpl::strides() const {
|
||||
IntArrayRef TensorImpl::strides() const {
|
||||
return strides_;
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -50,7 +50,7 @@ inline std::vector<int64_t> ToVectorint64_t(ArrayRef<int> src) {
|
|||
/**
|
||||
* Return product of all dimensions starting from k
|
||||
*/
|
||||
inline int64_t size_from_dim_(int k, IntList dims) {
|
||||
inline int64_t size_from_dim_(int k, IntArrayRef dims) {
|
||||
int64_t r = 1;
|
||||
for (size_t i = k; i < dims.size(); ++i) {
|
||||
r *= dims[i];
|
||||
|
|
@ -59,7 +59,7 @@ inline int64_t size_from_dim_(int k, IntList dims) {
|
|||
}
|
||||
|
||||
// Product of all dims up to k (not including dims[k])
|
||||
inline int64_t size_to_dim_(int k, IntList dims) {
|
||||
inline int64_t size_to_dim_(int k, IntArrayRef dims) {
|
||||
AT_ASSERT((unsigned)k <= dims.size());
|
||||
int64_t r = 1;
|
||||
for (int i = 0; i < k; ++i) {
|
||||
|
|
@ -69,7 +69,7 @@ inline int64_t size_to_dim_(int k, IntList dims) {
|
|||
}
|
||||
|
||||
// Product of all dims between k and l (not including dims[k] and dims[l])
|
||||
inline int64_t size_between_dim_(int k, int l, IntList dims) {
|
||||
inline int64_t size_between_dim_(int k, int l, IntArrayRef dims) {
|
||||
AT_ASSERT((unsigned)l < dims.size());
|
||||
int64_t r = 1;
|
||||
if (k < l) {
|
||||
|
|
@ -277,13 +277,13 @@ struct C10_API TensorImpl : public c10::intrusive_ptr_target {
|
|||
* Return a reference to the sizes of this tensor. This reference remains
|
||||
* valid as long as the tensor is live and not resized.
|
||||
*/
|
||||
virtual IntList sizes() const;
|
||||
virtual IntArrayRef sizes() const;
|
||||
|
||||
/**
|
||||
* Return a reference to the strides of this tensor. This reference remains
|
||||
* valid as long as the tensor is live and not restrided.
|
||||
*/
|
||||
virtual IntList strides() const;
|
||||
virtual IntArrayRef strides() const;
|
||||
|
||||
/**
|
||||
* Return the number of dimensions of this tensor. Note that 0-dimension
|
||||
|
|
@ -722,7 +722,7 @@ struct C10_API TensorImpl : public c10::intrusive_ptr_target {
|
|||
* WARNING: It is NOT valid to call this method on a Variable.
|
||||
* See Note [We regret making Variable hold a Tensor]
|
||||
*/
|
||||
void set_sizes_contiguous(IntList new_size) {
|
||||
void set_sizes_contiguous(IntArrayRef new_size) {
|
||||
AT_CHECK(allow_tensor_metadata_change(), "set_sizes_contiguous is not allowed on Tensor created from .data or .detach()");
|
||||
AT_ASSERT(!is_variable()); // TODO: remove this when Variable and Tensor are merged
|
||||
auto old_dim = sizes_.size();
|
||||
|
|
@ -747,7 +747,7 @@ struct C10_API TensorImpl : public c10::intrusive_ptr_target {
|
|||
* WARNING: It is NOT valid to call this method on a Variable.
|
||||
* See Note [We regret making Variable hold a Tensor]
|
||||
*/
|
||||
void set_sizes_and_strides(IntList new_size, IntList new_stride) {
|
||||
void set_sizes_and_strides(IntArrayRef new_size, IntArrayRef new_stride) {
|
||||
AT_CHECK(allow_tensor_metadata_change(), "set_sizes_and_strides is not allowed on Tensor created from .data or .detach()");
|
||||
AT_ASSERT(!is_variable()); // TODO: remove this when Variable and Tensor are merged
|
||||
AT_CHECK(
|
||||
|
|
@ -1281,23 +1281,23 @@ private:
|
|||
}
|
||||
|
||||
bool SetDims() {
|
||||
return SetDims(IntList{});
|
||||
return SetDims(IntArrayRef{});
|
||||
}
|
||||
|
||||
bool SetDims(const int64_t d0) {
|
||||
return SetDims(IntList{d0});
|
||||
return SetDims(IntArrayRef{d0});
|
||||
}
|
||||
|
||||
bool SetDims(const int64_t d0, const int64_t d1) {
|
||||
return SetDims(IntList{d0, d1});
|
||||
return SetDims(IntArrayRef{d0, d1});
|
||||
}
|
||||
|
||||
bool SetDims(const int64_t d0, const int64_t d1, const int64_t d2) {
|
||||
return SetDims(IntList{d0, d1, d2});
|
||||
return SetDims(IntArrayRef{d0, d1, d2});
|
||||
}
|
||||
|
||||
bool SetDims(const int64_t d0, const int64_t d1, const int64_t d2, const int64_t d3) {
|
||||
return SetDims(IntList{d0, d1, d2, d3});
|
||||
return SetDims(IntArrayRef{d0, d1, d2, d3});
|
||||
}
|
||||
|
||||
inline void update_to_contiguous_strides(size_t old_dim) {
|
||||
|
|
|
|||
|
|
@ -8,7 +8,7 @@ UndefinedTensorImpl::UndefinedTensorImpl()
|
|||
: TensorImpl(UndefinedTensorId(), caffe2::TypeMeta(), nullptr, /* is variable */ false) {
|
||||
}
|
||||
|
||||
IntList UndefinedTensorImpl::sizes() const {
|
||||
IntArrayRef UndefinedTensorImpl::sizes() const {
|
||||
AT_ERROR("sizes() called on undefined Tensor");
|
||||
}
|
||||
|
||||
|
|
@ -32,7 +32,7 @@ int64_t UndefinedTensorImpl::storage_offset() const {
|
|||
AT_ERROR("storage_offset() called on an undefined Tensor");
|
||||
}
|
||||
|
||||
IntList UndefinedTensorImpl::strides() const {
|
||||
IntArrayRef UndefinedTensorImpl::strides() const {
|
||||
AT_ERROR("strides() called on undefined Tensor");
|
||||
}
|
||||
UndefinedTensorImpl UndefinedTensorImpl::_singleton;
|
||||
|
|
|
|||
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue
Block a user