mirror of
https://github.com/zebrajr/pytorch.git
synced 2025-12-06 00:20:18 +01:00
Remove copy_imag and copy_real methods
ghstack-source-id: 066fc33916
Pull Request resolved: https://github.com/pytorch/pytorch/pull/39065
This commit is contained in:
parent
46447045ea
commit
520e3b80b7
|
|
@ -723,8 +723,6 @@ _(aten, where) \
|
|||
_(aten, zero) \
|
||||
_(aten, zeros) \
|
||||
_(aten, zeros_like) \
|
||||
_(aten, copy_real) \
|
||||
_(aten, copy_imag) \
|
||||
/* nothing */
|
||||
|
||||
#define FORALL_ATTR_BASE_SYMBOLS(_) \
|
||||
|
|
|
|||
|
|
@ -557,7 +557,7 @@ Tensor frobenius_norm(const Tensor& self, IntArrayRef dim, bool keepdim) {
|
|||
return at::norm(self, 2, dim, keepdim, self.scalar_type());
|
||||
}
|
||||
if (self.is_complex()){
|
||||
return at::sqrt(at::sum((self.conj() * self).copy_real(), dim, keepdim));
|
||||
return at::sqrt(at::sum(at::real(self.conj() * self), dim, keepdim));
|
||||
} else {
|
||||
return at::sqrt(at::sum((self * self), dim, keepdim));
|
||||
}
|
||||
|
|
@ -577,7 +577,7 @@ Tensor &frobenius_norm_out(
|
|||
return at::norm_out(result, self, 2, dim, keepdim, self.scalar_type());
|
||||
}
|
||||
if (self.is_complex()){
|
||||
return at::sqrt_out(result, at::sum((self.conj() * self).copy_real(), dim, keepdim));
|
||||
return at::sqrt_out(result, at::sum(at::real(self.conj() * self), dim, keepdim));
|
||||
} else {
|
||||
return at::sqrt_out(result, at::sum((self * self), dim, keepdim));
|
||||
}
|
||||
|
|
|
|||
|
|
@ -681,7 +681,7 @@ static Tensor &std_var_out(Tensor &result, const Tensor &self, IntArrayRef dim,
|
|||
|
||||
if (at::isComplexType(self.scalar_type())){
|
||||
ScalarType dtype = c10::toValueType(get_dtype(result, self, {}, true));
|
||||
Tensor real_in = self.copy_real().to(dtype);
|
||||
Tensor real_in = at::real(self);
|
||||
Tensor real_out = at::empty({0}, self.options().dtype(dtype));
|
||||
auto iter = make_reduction("std or var", real_out, real_in, dim, keepdim, dtype);
|
||||
if (iter.numel() == 0) {
|
||||
|
|
@ -689,7 +689,7 @@ static Tensor &std_var_out(Tensor &result, const Tensor &self, IntArrayRef dim,
|
|||
} else {
|
||||
std_var_stub(iter.device_type(), iter, unbiased, false);
|
||||
}
|
||||
Tensor imag_in = self.copy_imag().to(dtype);
|
||||
Tensor imag_in = at::imag(self);
|
||||
Tensor imag_out = at::empty({0}, self.options().dtype(dtype));
|
||||
iter = make_reduction("std or var", imag_out, imag_in, dim, keepdim, dtype);
|
||||
if (iter.numel() == 0) {
|
||||
|
|
@ -727,7 +727,7 @@ static std::tuple<Tensor&,Tensor&> std_var_mean_out(const char* fname, Tensor &r
|
|||
".");
|
||||
if (at::isComplexType(self.scalar_type())){
|
||||
ScalarType dtype = c10::toValueType(get_dtype(result1, self, {}, true));
|
||||
Tensor real_in = self.copy_real().to(dtype);
|
||||
Tensor real_in = at::real(self);
|
||||
Tensor real_out_var = at::empty({0}, self.options().dtype(dtype));
|
||||
Tensor real_out_mean = at::empty({0}, self.options().dtype(dtype));
|
||||
auto iter = make_reduction(fname, real_out_var, real_out_mean, real_in, dim, keepdim, dtype);
|
||||
|
|
@ -737,7 +737,7 @@ static std::tuple<Tensor&,Tensor&> std_var_mean_out(const char* fname, Tensor &r
|
|||
} else {
|
||||
std_var_stub(iter.device_type(), iter, unbiased, false);
|
||||
}
|
||||
Tensor imag_in = self.copy_imag().to(dtype);
|
||||
Tensor imag_in = at::imag(self);
|
||||
Tensor imag_out_var = at::empty({0}, self.options().dtype(dtype));
|
||||
Tensor imag_out_mean = at::empty({0}, self.options().dtype(dtype));
|
||||
iter = make_reduction(fname, imag_out_var, imag_out_mean, imag_in, dim, keepdim, dtype);
|
||||
|
|
|
|||
|
|
@ -87,9 +87,8 @@ Tensor isinf(const Tensor &self) {
|
|||
|
||||
// Note: a complex value is infinite when either part is infinite
|
||||
if (self.is_complex()) {
|
||||
const auto float_type = c10::toValueType(self.scalar_type());
|
||||
return at::isinf(self.copy_real().to(float_type)).__ior__
|
||||
(at::isinf(self.copy_imag().to(float_type)));
|
||||
return at::isinf(at::real(self)).__ior__
|
||||
(at::isinf(at::imag(self)));
|
||||
}
|
||||
|
||||
return AT_DISPATCH_FLOATING_TYPES_AND_HALF(self.scalar_type(), "isinf", [&]() {
|
||||
|
|
|
|||
|
|
@ -153,14 +153,6 @@ Tensor imag(const Tensor& self) {
|
|||
}
|
||||
}
|
||||
|
||||
Tensor& copy_real_out(Tensor& result, const Tensor& self) { return unary_op_impl_out(result, self, real_stub); }
|
||||
|
||||
Tensor copy_real(const Tensor& self) { return unary_op_impl(self, at::copy_real_out); }
|
||||
|
||||
Tensor& copy_imag_out(Tensor& result, const Tensor& self) { return unary_op_impl_out(result, self, imag_stub); }
|
||||
|
||||
Tensor copy_imag(const Tensor& self) { return unary_op_impl(self, at::copy_imag_out); }
|
||||
|
||||
Tensor& conj_out(Tensor& result, const Tensor& self) { return unary_op_impl_out(result, self, conj_stub); }
|
||||
Tensor conj(const Tensor& self) { return unary_op_impl(self, at::conj_out); }
|
||||
|
||||
|
|
|
|||
|
|
@ -251,24 +251,6 @@
|
|||
use_c10_dispatcher: full
|
||||
variants: function
|
||||
|
||||
# Temporary methods for getting the real and imaginary values in complex tensors. These are needed
|
||||
# until we support real and imag as tensor attributes which is blocked by the untyped storage task
|
||||
# TODO: remove these methods when we add real and imag as tensor attributes
|
||||
|
||||
- func: copy_real(Tensor self) -> Tensor
|
||||
use_c10_dispatcher: full
|
||||
variants: function, method
|
||||
|
||||
- func: copy_real.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
|
||||
variants: function
|
||||
|
||||
- func: copy_imag(Tensor self) -> Tensor
|
||||
use_c10_dispatcher: full
|
||||
variants: function, method
|
||||
|
||||
- func: copy_imag.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
|
||||
variants: function
|
||||
|
||||
- func: conj(Tensor self) -> Tensor
|
||||
use_c10_dispatcher: full
|
||||
variants: function, method
|
||||
|
|
|
|||
|
|
@ -26,15 +26,6 @@ class TestComplexTensor(TestCase):
|
|||
exp_fn(torch.complex64)
|
||||
exp_fn(torch.complex128)
|
||||
|
||||
def test_copy_real_imag_methods(self):
|
||||
real = torch.randn(4)
|
||||
imag = torch.randn(4)
|
||||
complex_tensor = real + 1j * imag
|
||||
# TODO(#38095): Replace assertEqualIgnoreType. See issue #38095
|
||||
self.assertEqualIgnoreType(complex_tensor.copy_real(), real)
|
||||
# TODO(#38095): Replace assertEqualIgnoreType. See issue #38095
|
||||
self.assertEqualIgnoreType(complex_tensor.copy_imag(), imag)
|
||||
|
||||
def test_dtype_inference(self):
|
||||
# issue: https://github.com/pytorch/pytorch/issues/36834
|
||||
torch.set_default_dtype(torch.double)
|
||||
|
|
|
|||
|
|
@ -152,9 +152,9 @@ class AbstractTestCases:
|
|||
skip_regexes.append(re.compile('^{}$'.format(re.escape(r))))
|
||||
else:
|
||||
skip_regexes.append(r)
|
||||
skipnames = ['copy_real', 'copy_imag']
|
||||
|
||||
for name in dir(ns):
|
||||
if name.startswith('_') or name in skipnames:
|
||||
if name.startswith('_'):
|
||||
continue
|
||||
if name in ['real', 'imag']:
|
||||
y = torch.randn(1, dtype=torch.cfloat)
|
||||
|
|
|
|||
|
|
@ -433,9 +433,6 @@
|
|||
- name: imag(Tensor self) -> Tensor
|
||||
self: Scalar(std::complex<double>{0.0, 1.0})*grad.to(self.scalar_type())
|
||||
|
||||
- name: copy_imag(Tensor self) -> Tensor
|
||||
self: Scalar(std::complex<double>{0.0, 1.0})*grad.to(self.scalar_type())
|
||||
|
||||
- name: index.Tensor(Tensor self, Tensor?[] indices) -> Tensor
|
||||
self: index_backward(zeros_like(self), indices, grad)
|
||||
indices: TensorList()
|
||||
|
|
@ -757,9 +754,6 @@
|
|||
- name: real(Tensor self) -> Tensor
|
||||
self: at::real(grad)
|
||||
|
||||
- name: copy_real(Tensor self) -> Tensor
|
||||
self: grad.copy_real()
|
||||
|
||||
- name: reciprocal(Tensor self) -> Tensor
|
||||
self: -grad * result * result
|
||||
|
||||
|
|
|
|||
|
|
@ -313,7 +313,6 @@ def get_testing_overrides():
|
|||
torch.histc: lambda input, bins=100, min=0, max=0, out=None: -1,
|
||||
torch.hspmm: lambda mat1, mat2, out=None: -1,
|
||||
torch.ifft: lambda input, signal_ndim, normalized=False: -1,
|
||||
torch.copy_imag: lambda input, out=None: -1,
|
||||
torch.imag: lambda input, out=None: -1,
|
||||
torch.index_add: lambda input, dim, index, source: -1,
|
||||
torch.index_copy: lambda input, dim, index, source: -1,
|
||||
|
|
@ -575,7 +574,6 @@ def get_testing_overrides():
|
|||
torch.randint_like: lambda input, low, high, dtype=None, layout=torch.strided, device=None, requires_grad=False: -1,
|
||||
torch.randn_like: lambda input, dtype=None, layout=None, device=None, requires_grad=False: -1,
|
||||
torch.real: lambda input, out=None: -1,
|
||||
torch.copy_real: lambda input, out=None: -1,
|
||||
torch.reciprocal: lambda input, out=None: -1,
|
||||
torch.relu: lambda input, inplace=False: -1,
|
||||
torch.remainder: lambda input, other, out=None: -1,
|
||||
|
|
|
|||
|
|
@ -92,9 +92,8 @@ def _compare_tensors_internal(a, b, *, rtol, atol, equal_nan):
|
|||
# Compares complex tensors' real and imaginary parts separately.
|
||||
# (see NOTE Test Framework Tensor "Equality")
|
||||
if a.is_complex():
|
||||
float_dtype = torch.float32 if a.dtype == torch.complex64 else torch.float64
|
||||
a_real = a.copy_real().to(float_dtype)
|
||||
b_real = b.copy_real().to(float_dtype)
|
||||
a_real = a.real
|
||||
b_real = b.real
|
||||
real_result, debug_msg = _compare_tensors_internal(a_real, b_real,
|
||||
rtol=rtol, atol=atol,
|
||||
equal_nan=equal_nan)
|
||||
|
|
@ -103,8 +102,8 @@ def _compare_tensors_internal(a, b, *, rtol, atol, equal_nan):
|
|||
debug_msg = "Real parts failed to compare as equal! " + debug_msg
|
||||
return (real_result, debug_msg)
|
||||
|
||||
a_imag = a.copy_imag().to(float_dtype)
|
||||
b_imag = b.copy_imag().to(float_dtype)
|
||||
a_imag = a.imag
|
||||
b_imag = b.imag
|
||||
imag_result, debug_msg = _compare_tensors_internal(a_imag, b_imag,
|
||||
rtol=rtol, atol=atol,
|
||||
equal_nan=equal_nan)
|
||||
|
|
|
|||
Loading…
Reference in New Issue
Block a user