mirror of
https://github.com/zebrajr/pytorch.git
synced 2025-12-07 12:21:27 +01:00
[3/N] Remove unused functions (#128179)
Following https://github.com/pytorch/pytorch/pull/128005, this PR continues to remove unused functions. Pull Request resolved: https://github.com/pytorch/pytorch/pull/128179 Approved by: https://github.com/ezyang
This commit is contained in:
parent
8d16a73f0f
commit
c219fa5eb9
|
|
@ -31,46 +31,6 @@ Tensor index_select_backward_hack(const Tensor& grad, IntArrayRef self_sizes, in
|
||||||
return at::zeros(self_sizes, grad.options()).index_add(dim, index, grad);
|
return at::zeros(self_sizes, grad.options()).index_add(dim, index, grad);
|
||||||
}
|
}
|
||||||
|
|
||||||
static optional<std::tuple<Tensor,int64_t>> unwrap(const Tensor& tensor) {
|
|
||||||
auto* wrapped = maybeGetTensorWrapper(tensor);
|
|
||||||
if (wrapped) {
|
|
||||||
if (wrapped->level().has_value()) {
|
|
||||||
return std::make_tuple(wrapped->value(), *wrapped->level());
|
|
||||||
}
|
|
||||||
return unwrap(wrapped->value());
|
|
||||||
}
|
|
||||||
auto* batched = maybeGetBatchedImpl(tensor);
|
|
||||||
if (batched) {
|
|
||||||
return std::make_tuple(batched->value(), batched->level());
|
|
||||||
}
|
|
||||||
return nullopt;
|
|
||||||
}
|
|
||||||
|
|
||||||
static bool can_perform_inplace(const Tensor& a, const Tensor& b) {
|
|
||||||
// TODO: generalize this to more transforms
|
|
||||||
auto a_ = unwrap(a);
|
|
||||||
auto b_ = unwrap(b);
|
|
||||||
if (!a_.has_value() && b_.has_value()) {
|
|
||||||
return false;
|
|
||||||
}
|
|
||||||
if (!a_.has_value() && !b_.has_value()) {
|
|
||||||
return true;
|
|
||||||
}
|
|
||||||
if (a_.has_value() && !b_.has_value()) {
|
|
||||||
return true;
|
|
||||||
}
|
|
||||||
TORCH_INTERNAL_ASSERT(a_.has_value() && b_.has_value());
|
|
||||||
|
|
||||||
// If b has any wrapper that a does not, then we cannot do a.inplace_(b)
|
|
||||||
if (std::get<1>(*a_) < std::get<1>(*b_)) {
|
|
||||||
return false;
|
|
||||||
}
|
|
||||||
if (std::get<1>(*a_) > std::get<1>(*b_)) {
|
|
||||||
return can_perform_inplace(std::get<0>(*a_), b);
|
|
||||||
}
|
|
||||||
return can_perform_inplace(std::get<0>(*a_), std::get<0>(*b_));
|
|
||||||
}
|
|
||||||
|
|
||||||
// TODO: linear is pretty important for performance, but I'm not sure how to work
|
// TODO: linear is pretty important for performance, but I'm not sure how to work
|
||||||
// around the in-place.
|
// around the in-place.
|
||||||
Tensor linear_hack(const Tensor& input, const Tensor& weight, const std::optional<Tensor>& bias_opt) {
|
Tensor linear_hack(const Tensor& input, const Tensor& weight, const std::optional<Tensor>& bias_opt) {
|
||||||
|
|
|
||||||
|
|
@ -28,18 +28,6 @@ Tensor empty_meta_symint(
|
||||||
size, dtype_opt, layout_opt, device_opt, pin_memory_opt, memory_format_opt);
|
size, dtype_opt, layout_opt, device_opt, pin_memory_opt, memory_format_opt);
|
||||||
}
|
}
|
||||||
|
|
||||||
// Kept only for BC with XLA
|
|
||||||
static Tensor empty_strided_meta(
|
|
||||||
IntArrayRef size,
|
|
||||||
IntArrayRef stride,
|
|
||||||
std::optional<ScalarType> dtype_opt,
|
|
||||||
std::optional<Layout> layout_opt,
|
|
||||||
std::optional<Device> device_opt,
|
|
||||||
std::optional<bool> pin_memory_opt
|
|
||||||
) {
|
|
||||||
return empty_strided_meta_symint(c10::fromIntArrayRefSlow(size), c10::fromIntArrayRefSlow(stride), dtype_opt, layout_opt, device_opt, pin_memory_opt);
|
|
||||||
}
|
|
||||||
|
|
||||||
Tensor empty_strided_meta_symint(
|
Tensor empty_strided_meta_symint(
|
||||||
SymIntArrayRef size,
|
SymIntArrayRef size,
|
||||||
SymIntArrayRef stride,
|
SymIntArrayRef stride,
|
||||||
|
|
|
||||||
|
|
@ -24,10 +24,6 @@
|
||||||
|
|
||||||
namespace at::native {
|
namespace at::native {
|
||||||
|
|
||||||
static bool is_cuda(const Tensor& self) {
|
|
||||||
return self.is_cuda();
|
|
||||||
}
|
|
||||||
|
|
||||||
bool is_distributed(const Tensor& self) {
|
bool is_distributed(const Tensor& self) {
|
||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
|
|
@ -60,18 +56,6 @@ bool is_neg(const Tensor& self) {
|
||||||
return self.is_neg();
|
return self.is_neg();
|
||||||
}
|
}
|
||||||
|
|
||||||
static bool is_sparse(const Tensor& self) {
|
|
||||||
return self.is_sparse();
|
|
||||||
}
|
|
||||||
|
|
||||||
static bool is_sparse_csr(const Tensor& self) {
|
|
||||||
return self.is_sparse_csr();
|
|
||||||
}
|
|
||||||
|
|
||||||
static bool is_quantized(const Tensor& self) {
|
|
||||||
return self.is_quantized();
|
|
||||||
}
|
|
||||||
|
|
||||||
// True if `self` and `from` have compatible tensor type so that `from`'s
|
// True if `self` and `from` have compatible tensor type so that `from`'s
|
||||||
// TensorImpl can be copied to `self`.
|
// TensorImpl can be copied to `self`.
|
||||||
bool _has_compatible_shallow_copy_type(const Tensor& self, const Tensor& from) {
|
bool _has_compatible_shallow_copy_type(const Tensor& self, const Tensor& from) {
|
||||||
|
|
|
||||||
|
|
@ -1640,16 +1640,6 @@ static PyObject* _dims(PyObject *self,
|
||||||
PY_END(nullptr)
|
PY_END(nullptr)
|
||||||
}
|
}
|
||||||
|
|
||||||
static int64_t dim_index(const std::vector<mpy::obj<Dim>>& dims, mpy::hdl<Dim> dim) {
|
|
||||||
for (int64_t i = 0, N = dims.size(); i < N; ++i) {
|
|
||||||
if (dims[i].ptr() == dim.ptr()) {
|
|
||||||
return i;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return -1;
|
|
||||||
}
|
|
||||||
|
|
||||||
|
|
||||||
struct DotPart {
|
struct DotPart {
|
||||||
Slice<DimEntry> dims;
|
Slice<DimEntry> dims;
|
||||||
size_t total_size = 1;
|
size_t total_size = 1;
|
||||||
|
|
|
||||||
|
|
@ -385,10 +385,6 @@ bool is_int(handle h) {
|
||||||
return PyLong_Check(h.ptr());
|
return PyLong_Check(h.ptr());
|
||||||
}
|
}
|
||||||
|
|
||||||
bool is_float(handle h) {
|
|
||||||
return PyFloat_Check(h.ptr());
|
|
||||||
}
|
|
||||||
|
|
||||||
bool is_none(handle h) {
|
bool is_none(handle h) {
|
||||||
return h.ptr() == Py_None;
|
return h.ptr() == Py_None;
|
||||||
}
|
}
|
||||||
|
|
|
||||||
|
|
@ -84,12 +84,6 @@ static void THCPStream_dealloc(THCPStream* self) {
|
||||||
Py_TYPE(self)->tp_free((PyObject*)self);
|
Py_TYPE(self)->tp_free((PyObject*)self);
|
||||||
}
|
}
|
||||||
|
|
||||||
static PyObject* THCPStream_get_device(THCPStream* self, void* unused) {
|
|
||||||
HANDLE_TH_ERRORS
|
|
||||||
return THPDevice_New(self->cuda_stream.device());
|
|
||||||
END_HANDLE_TH_ERRORS
|
|
||||||
}
|
|
||||||
|
|
||||||
static PyObject* THCPStream_get_cuda_stream(THCPStream* self, void* unused) {
|
static PyObject* THCPStream_get_cuda_stream(THCPStream* self, void* unused) {
|
||||||
HANDLE_TH_ERRORS
|
HANDLE_TH_ERRORS
|
||||||
return PyLong_FromVoidPtr(self->cuda_stream.stream());
|
return PyLong_FromVoidPtr(self->cuda_stream.stream());
|
||||||
|
|
|
||||||
|
|
@ -128,12 +128,6 @@ static std::ostream& operator<<(
|
||||||
return printValueRefs(out, nodes);
|
return printValueRefs(out, nodes);
|
||||||
}
|
}
|
||||||
|
|
||||||
static std::ostream& operator<<(
|
|
||||||
std::ostream& out,
|
|
||||||
const at::ArrayRef<Value*> nodes) {
|
|
||||||
return printValueRefs(out, nodes);
|
|
||||||
}
|
|
||||||
|
|
||||||
struct const_value_list_with_types {
|
struct const_value_list_with_types {
|
||||||
const ArrayRef<const Value*> values;
|
const ArrayRef<const Value*> values;
|
||||||
std::string delim;
|
std::string delim;
|
||||||
|
|
|
||||||
Loading…
Reference in New Issue
Block a user