Remove _dequantize_per_tensor (#26681)

Summary:
Pull Request resolved: https://github.com/pytorch/pytorch/pull/26681

att

Test Plan:
ci

Imported from OSS

Differential Revision: D17542833

fbshipit-source-id: 653e906b0e146763609c69ef0de7f9cf38621586
This commit is contained in:
Jerry Zhang 2019-09-24 10:53:02 -07:00 committed by Facebook Github Bot
parent d0fff0ebc8
commit 3f72bcfcaa
4 changed files with 1 additions and 39 deletions

View File

@ -1153,7 +1153,6 @@ bool aten_op_is_not_moved_to_c10_yet(const c10::OperatorName& opName) {
#endif #endif
{"aten::quantize_per_tensor", ""}, {"aten::quantize_per_tensor", ""},
{"aten::quantize_per_channel", ""}, {"aten::quantize_per_channel", ""},
{"aten::_dequantize_per_tensor", ""},
{"aten::q_per_channel_axis", ""}, {"aten::q_per_channel_axis", ""},
{"aten::qscheme", ""}, {"aten::qscheme", ""},
{"aten::to", "dtype_layout"}, {"aten::to", "dtype_layout"},

View File

@ -3536,11 +3536,6 @@
dispatch: dispatch:
QuantizedCPU: dequantize_quant QuantizedCPU: dequantize_quant
- func: _dequantize_per_tensor(Tensor self, float scale, int zero_point, ScalarType dtype) -> Tensor
variants: function
dispatch:
CPU: dequantize_per_tensor_cpu
- func: q_scale(Tensor self) -> float - func: q_scale(Tensor self) -> float
use_c10_dispatcher: full use_c10_dispatcher: full
variants: function, method variants: function, method

View File

@ -30,30 +30,6 @@ Tensor dequantize_quant(const Tensor& self) {
return get_qtensorimpl(self)->quantizer()->dequantize(self); return get_qtensorimpl(self)->quantizer()->dequantize(self);
} }
Tensor dequantize_per_tensor_cpu(
const Tensor& self,
double scale,
int64_t zero_point,
ScalarType dtype) {
TORCH_CHECK(
isQIntType(toQIntType(self.scalar_type())),
"Scalar type for quantized Tensor must have same underlying type as input.");
TORCH_CHECK(
dtype == toQIntType(self.scalar_type()),
"ScalarType argument must match the corresponding quantized scalar type of input integer Tensor");
// scalar type of output Tensor is hard-coded as float
Tensor f = at::empty(self.sizes(), self.options().dtype(at::kFloat));
AT_DISPATCH_QINT_TYPES(
toQIntType(self.scalar_type()), "dequantize_linear_cpu", [&]() {
underlying_t* qdata = self.data_ptr<underlying_t>();
auto* fdata = f.data_ptr<float>();
for (int i = 0; i < self.numel(); ++i) {
fdata[i] = (static_cast<float>(qdata[i]) - zero_point) * scale;
}
});
return f;
}
double q_scale_quant(const Tensor& self) { double q_scale_quant(const Tensor& self) {
auto quantizer = get_qtensorimpl(self)->quantizer(); auto quantizer = get_qtensorimpl(self)->quantizer();
TORCH_CHECK(quantizer->qscheme() == kPerTensorAffine); TORCH_CHECK(quantizer->qscheme() == kPerTensorAffine);

View File

@ -129,15 +129,7 @@ class TestQuantizedTensor(TestCase):
rqr = qr.dequantize() rqr = qr.dequantize()
self.assertTrue(np.allclose(r.numpy(), rqr.numpy(), atol=2 / scale)) self.assertTrue(np.allclose(r.numpy(), rqr.numpy(), atol=2 / scale))
def test_qtensor_dequantize_per_tensor(self): def test_qtensor_quantize_per_channel(self):
t = torch.arange(-10, 10, dtype=torch.int8)
scale = 3
zero_point = 2
qt = torch._dequantize_per_tensor(t, scale, zero_point, torch.qint8)
qt2 = torch._make_per_tensor_quantized_tensor(t, scale, zero_point)
self.assertEqual(qt, qt2.dequantize())
def test_qtensor_per_channel_affine(self):
r = torch.rand(3, 2, dtype=torch.float) * 4 - 2 r = torch.rand(3, 2, dtype=torch.float) * 4 - 2
scales = torch.tensor([0.2, 0.03], dtype=torch.double) scales = torch.tensor([0.2, 0.03], dtype=torch.double)
zero_points = torch.tensor([5, 10], dtype=torch.long) zero_points = torch.tensor([5, 10], dtype=torch.long)