Revert "Unify torch.tensor and torch.ops.aten.scalar_tensor behavior (#158537)"

This reverts commit 58c7cf9ede.

Reverted https://github.com/pytorch/pytorch/pull/158537 on behalf of https://github.com/albanD due to This broke C++ tests ([comment](https://github.com/pytorch/pytorch/pull/158537#issuecomment-3084425920))
This commit is contained in:
PyTorch MergeBot 2025-07-17 15:06:43 +00:00
parent 288bf54a23
commit 813c76b98d
2 changed files with 1 additions and 55 deletions

View File

@ -8,29 +8,7 @@ namespace at {
namespace {
template <typename scalar_t>
inline void fill_inplace(Tensor& self, const Scalar& value_scalar) {
scalar_t value{};
if constexpr (std::is_floating_point_v<scalar_t> ||
std::is_same_v<scalar_t, at::Half> ||
std::is_same_v<scalar_t, at::BFloat16> ||
std::is_same_v<scalar_t, at::Float8_e5m2> ||
std::is_same_v<scalar_t, at::Float8_e5m2fnuz> ||
std::is_same_v<scalar_t, at::Float8_e4m3fn> ||
std::is_same_v<scalar_t, at::Float8_e4m3fnuz> ||
std::is_same_v<scalar_t, at::Float8_e8m0fnu>) {
// relaxed float cast: allow inf similar to the torch.tensor constructor
//
// without this, we had the following divergence:
// torch.tensor(1123581321.0, dtype=torch.float16)
// => tensor(inf, dtype=torch.float16)
// torch.ops.aten.scalar_tensor.default(1123581321, dtype=torch.float16)
// => RuntimeError: value cannot be converted to type at::Half without overflow
value = static_cast<scalar_t>(value_scalar.to<double>());
} else {
value = value_scalar.to<scalar_t>();
}
auto value = value_scalar.to<scalar_t>();
scalar_t* dptr = static_cast<scalar_t*>(self.data_ptr());
*dptr = value;
}

View File

@ -12962,38 +12962,6 @@ class MiscTestsDevice(torch._inductor.test_case.TestCase):
y = torch.tensor(5)
f(x, y)
def test_dynamic_float_scalar_tensor_coersion(self):
# Minified version of https://github.com/pytorch/pytorch/issues/158376#issuecomment-3079591367
class Foo:
def __init__(self):
self.config = type(
"Config", (), {"pad_val": 1123581321.0, "tolerance": 1e-6}
)
@torch.compile(fullgraph=True)
def forward(self, input):
outputs = torch.where(
torch.abs(input - self.config.pad_val) < self.config.tolerance,
torch.tensor(
self.config.pad_val, dtype=input.dtype, device=input.device
),
torch.tensor(
self.config.pad_val + 1, dtype=input.dtype, device=input.device
),
)
return outputs
foo = Foo()
inputs = torch.randn(3, 4)
result = foo.forward(inputs)
original_pad_val = foo.config.pad_val
foo.config.pad_val += 1.0
result2 = foo.forward(inputs)
# Previously would crash with:
# RuntimeError: value cannot be converted to type at::Half without overflow
devices = ("cuda", "hpu", "xpu")
instantiate_device_type_tests(