mirror of
https://github.com/zebrajr/pytorch.git
synced 2025-12-07 12:21:27 +01:00
Using _floats_wrapper in per_channel_tensor generation (#31780)
Summary: Pull Request resolved: https://github.com/pytorch/pytorch/pull/31780 We need to specify width to ensure the generated float is representable by `float32` fixes: https://github.com/pytorch/pytorch/issues/31774 Test Plan: ci Imported from OSS Differential Revision: D19275165 fbshipit-source-id: 50560b4208c562b6bcd2abccadd234f29fbb4b0a
This commit is contained in:
parent
86a4e2135d
commit
40e720282c
|
|
@ -4,7 +4,7 @@
|
|||
source "$(dirname "${BASH_SOURCE[0]}")/macos-common.sh"
|
||||
|
||||
conda install -y six
|
||||
pip install -q hypothesis==4.57.1 "librosa>=0.6.2" psutil
|
||||
pip install -q hypothesis "librosa>=0.6.2" psutil
|
||||
|
||||
# TODO move this to docker
|
||||
pip install unittest-xml-reporting
|
||||
|
|
|
|||
|
|
@ -40,7 +40,7 @@ def _get_valid_min_max(qparams):
|
|||
max_value = min((long_max - zero_point) * scale, (long_max / scale + zero_point))
|
||||
return np.float32(min_value), np.float32(max_value)
|
||||
|
||||
# This wrapper around `st.floats` checks the version of `hypothesis`, and if
|
||||
# This wrapper wraps around `st.floats` and checks the version of `hypothesis`, if
|
||||
# it is too old, removes the `width` parameter (which was introduced)
|
||||
# in 3.67.0
|
||||
def _floats_wrapper(*args, **kwargs):
|
||||
|
|
@ -48,6 +48,11 @@ def _floats_wrapper(*args, **kwargs):
|
|||
kwargs.pop('width')
|
||||
return st.floats(*args, **kwargs)
|
||||
|
||||
def floats(*args, **kwargs):
|
||||
if 'width' not in kwargs:
|
||||
kwargs['width'] = 32
|
||||
return st.floats(*args, **kwargs)
|
||||
|
||||
"""Hypothesis filter to avoid overflows with quantized tensors.
|
||||
|
||||
Args:
|
||||
|
|
@ -109,7 +114,7 @@ def qparams(draw, dtypes=None, scale_min=None, scale_max=None,
|
|||
scale_min = torch.finfo(torch.float).eps
|
||||
if scale_max is None:
|
||||
scale_max = torch.finfo(torch.float).max
|
||||
scale = draw(_floats_wrapper(min_value=scale_min, max_value=scale_max, width=32))
|
||||
scale = draw(floats(min_value=scale_min, max_value=scale_max, width=32))
|
||||
|
||||
return scale, zero_point, quantized_type
|
||||
|
||||
|
|
@ -165,15 +170,15 @@ def tensor(draw, shapes=None, elements=None, qparams=None):
|
|||
_shape = draw(st.sampled_from(shapes))
|
||||
if qparams is None:
|
||||
if elements is None:
|
||||
elements = _floats_wrapper(-1e6, 1e6, allow_nan=False, width=32)
|
||||
elements = floats(-1e6, 1e6, allow_nan=False, width=32)
|
||||
X = draw(stnp.arrays(dtype=np.float32, elements=elements, shape=_shape))
|
||||
assume(not (np.isnan(X).any() or np.isinf(X).any()))
|
||||
return X, None
|
||||
qparams = draw(qparams)
|
||||
if elements is None:
|
||||
min_value, max_value = _get_valid_min_max(qparams)
|
||||
elements = _floats_wrapper(min_value, max_value, allow_infinity=False,
|
||||
allow_nan=False, width=32)
|
||||
elements = floats(min_value, max_value, allow_infinity=False,
|
||||
allow_nan=False, width=32)
|
||||
X = draw(stnp.arrays(dtype=np.float32, elements=elements, shape=_shape))
|
||||
# Recompute the scale and zero_points according to the X statistics.
|
||||
scale, zp = _calculate_dynamic_qparams(X, qparams[2])
|
||||
|
|
@ -190,15 +195,15 @@ def per_channel_tensor(draw, shapes=None, elements=None, qparams=None):
|
|||
_shape = draw(st.sampled_from(shapes))
|
||||
if qparams is None:
|
||||
if elements is None:
|
||||
elements = st.floats(-1e6, 1e6, allow_nan=False)
|
||||
elements = floats(-1e6, 1e6, allow_nan=False, width=32)
|
||||
X = draw(stnp.arrays(dtype=np.float32, elements=elements, shape=_shape))
|
||||
assume(not (np.isnan(X).any() or np.isinf(X).any()))
|
||||
return X, None
|
||||
qparams = draw(qparams)
|
||||
if elements is None:
|
||||
min_value, max_value = _get_valid_min_max(qparams)
|
||||
elements = st.floats(min_value, max_value, allow_infinity=False,
|
||||
allow_nan=False)
|
||||
elements = floats(min_value, max_value, allow_infinity=False,
|
||||
allow_nan=False, width=32)
|
||||
X = draw(stnp.arrays(dtype=np.float32, elements=elements, shape=_shape))
|
||||
# Recompute the scale and zero_points according to the X statistics.
|
||||
scale, zp = _calculate_dynamic_per_channel_qparams(X, qparams[2])
|
||||
|
|
|
|||
|
|
@ -146,10 +146,10 @@ class TestQuantizedOps(TestCase):
|
|||
|
||||
"""Tests the correctness of the quantized::clamp op."""
|
||||
@given(X=hu.tensor(shapes=hu.array_shapes(1, 8, 1, 8),
|
||||
elements=st.floats(-1e6, 1e6, allow_nan=False),
|
||||
elements=hu.floats(-1e6, 1e6, allow_nan=False),
|
||||
qparams=hu.qparams()),
|
||||
min_val=st.floats(-1e6, 1e6, allow_nan=False),
|
||||
max_val=st.floats(-1e6, 1e6, allow_nan=False))
|
||||
min_val=hu.floats(-1e6, 1e6, allow_nan=False),
|
||||
max_val=hu.floats(-1e6, 1e6, allow_nan=False))
|
||||
def test_qclamp(self, X, min_val, max_val):
|
||||
X, (scale, zero_point, torch_type) = X
|
||||
|
||||
|
|
@ -173,9 +173,9 @@ class TestQuantizedOps(TestCase):
|
|||
|
||||
"""Tests the correctness of the scalar addition."""
|
||||
@given(A=hu.tensor(shapes=hu.array_shapes(1, 4, 1, 5),
|
||||
elements=st.floats(-1e6, 1e6, allow_nan=False),
|
||||
elements=hu.floats(-1e6, 1e6, allow_nan=False),
|
||||
qparams=hu.qparams()),
|
||||
b=st.floats(-1e6, 1e6, allow_nan=False, allow_infinity=False))
|
||||
b=hu.floats(-1e6, 1e6, allow_nan=False, allow_infinity=False))
|
||||
def test_qadd_scalar_relu(self, A, b):
|
||||
import copy
|
||||
add_scalar = torch.ops.quantized.add_scalar
|
||||
|
|
@ -2031,7 +2031,7 @@ class TestComparatorOps(TestCase):
|
|||
@unittest.skip("FIXME: Failing due to overflow error without width option")
|
||||
@given(A=hu.tensor(shapes=((3, 4, 5),),
|
||||
qparams=hu.qparams()),
|
||||
b=st.floats(allow_infinity=False, allow_nan=False))
|
||||
b=hu.floats(allow_infinity=False, allow_nan=False))
|
||||
def test_compare_tensor_scalar(self, A, b):
|
||||
A, (scale_a, zero_point_a, dtype_a) = A
|
||||
tA = torch.from_numpy(A)
|
||||
|
|
|
|||
Loading…
Reference in New Issue
Block a user