Revert "[quant] Enable backward for choose_qparams_per_token_asymmetric (#123452)"

This reverts commit c83900887f.

Reverted https://github.com/pytorch/pytorch/pull/123452 on behalf of https://github.com/clee2000 due to broke test_quantization.py::TestQuantizedTensor::test_decomposed_choose_qparams_per_token_asymmetric_backward on multiple jobs c83900887f https://github.com/pytorch/pytorch/actions/runs/8648781225/job/23714753103, probably a landrace ([comment](https://github.com/pytorch/pytorch/pull/123452#issuecomment-2050056601))
This commit is contained in:
PyTorch MergeBot 2024-04-11 16:19:28 +00:00
parent efa36ef092
commit fe092da874
2 changed files with 1 additions and 10 deletions

View File

@ -1602,14 +1602,6 @@ class TestQuantizedTensor(TestCase):
self.assertEqual(quantized_X.int_repr(), quantized_decomposed_X)
self.assertEqual(dequantized_X, dequantized_decomposed_X)
def test_decomposed_choose_qparams_per_token_asymmetric_backward(self):
# register the ops
import torch.ao.quantization.fx._decomposed
x = torch.randn(2, 3).requires_grad_()
(s, zp) = torch.ops.quantized_decomposed.choose_qparams_per_token_asymmetric(x, torch.int8)
out = x.div(s).add(zp).round()
out.sum().backward()
if __name__ == '__main__':
raise RuntimeError("This test file is not meant to be run directly, use:\n\n"
"\tpython test/test_quantization.py TESTNAME\n\n"

View File

@ -606,8 +606,7 @@ def choose_qparams_per_token_asymmetric(
"""
# Based on https://github.com/google/XNNPACK/blob/df156f0cf3db5a4576cc711123eeb54915f82ffc/src/xnnpack/quantization.h#L18
qmin, qmax = -128, 127
min_val = torch.amin(input, dim=-1, keepdim=True)
max_val = torch.amax(input, dim=-1, keepdim=True)
min_val, max_val = torch.aminmax(input, dim=-1, keepdim=True)
min_val_neg = torch.min(min_val, torch.zeros_like(min_val))
max_val_pos = torch.max(max_val, torch.zeros_like(max_val))
eps = torch.finfo(torch.float32).eps # use xnnpack eps?