mirror of
https://github.com/zebrajr/pytorch.git
synced 2025-12-06 12:20:52 +01:00
Currently we don't have a dtype check in verifying the consistency between PyTorch and ONNX outputs. As a result, some of dtype inconsistencies were found and reported: #77842 #77845 This is a POC. Failed workflows: - [linux-xenial-py3.7-clang7-onnx / test (default, 2, 2, linux.2xlarge)] - inconsistent shape - TestONNXRuntime_opset10.test_all (#79371) - TestONNXRuntime_opset10.test_any (#79371) - TestONNXRuntime_opset10.test_argmin_argmax (#79503) - TestONNXRuntime_opset10.test_hardshrink (#79695) - TestONNXRuntime_opset10.test_linalg_norm (#79506) - TestONNXRuntime_opset10.test_linalg_vector_norm (#79506) - TestONNXRuntime_opset10.test_prelu_scalar (#79846) - TestONNXRuntime_opset10.test_softshrink (#79695) - TestONNXRuntime_opset10.test_sum_empty_tensor (skipped) - TestONNXRuntime_opset10.test_tolist (skipped) - inconsistent dtype - test_arithmetic_prim_bool (skipped) - test_arithmeticOps_with_low_precision (skipped) - test_arithmetic_prim_float (skipped) - test_logical_and (#79339) - test_logical_or (#79339) - test_logical_xor (#79339) - test_pow (skipped) - test_primitive_input_floating (skipped) - test_quantize_per_tensor (#79690) - test_quantized_adaptive_avg_pool2d (#79690) - test_quantized_arithmetic (#79690) - test_quantized_arithmetic_qfunctional (#79690) - test_quantized_conv2d (#79690) - test_quantized_conv2d_relu (#79690) - test_quantized_flatten (#79690) - test_quantized_hardsigmoid (#79690) - test_quantized_hardswish (#79690) - test_quantized_linear (#79690) - test_quantized_sigmoid (#79690) - test_item (skipped) - test_full_like_value (skipped) - TestONNXRuntime_opset7.test_div_rounding_mode (skipped) - TestONNXRuntime_opset8.test_div_rounding_mode (skipped) - TestONNXRuntime_opset9.test_div_rounding_mode (skipped) - TestONNXRuntime_opset9_IRv4.test_div_rounding_mode (skipped) - test_outer (skipped) - test_symbolic_shape_inference_arange_2 (skipped) Pull Request resolved: https://github.com/pytorch/pytorch/pull/79263 Approved by: https://github.com/justinchuby, https://github.com/BowenBao
162 lines
4.6 KiB
Python
162 lines
4.6 KiB
Python
# Owner(s): ["module: onnx"]
|
|
|
|
import functools
|
|
import os
|
|
import sys
|
|
import unittest
|
|
|
|
import torch
|
|
from torch.autograd import function
|
|
|
|
pytorch_test_dir = os.path.dirname(os.path.dirname(os.path.realpath(__file__)))
|
|
sys.path.insert(-1, pytorch_test_dir)
|
|
|
|
torch.set_default_tensor_type("torch.FloatTensor")
|
|
|
|
BATCH_SIZE = 2
|
|
|
|
RNN_BATCH_SIZE = 7
|
|
RNN_SEQUENCE_LENGTH = 11
|
|
RNN_INPUT_SIZE = 5
|
|
RNN_HIDDEN_SIZE = 3
|
|
|
|
|
|
def _skipper(condition, reason):
|
|
def decorator(f):
|
|
@functools.wraps(f)
|
|
def wrapper(*args, **kwargs):
|
|
if condition():
|
|
raise unittest.SkipTest(reason)
|
|
return f(*args, **kwargs)
|
|
|
|
return wrapper
|
|
|
|
return decorator
|
|
|
|
|
|
skipIfNoCuda = _skipper(lambda: not torch.cuda.is_available(), "CUDA is not available")
|
|
|
|
skipIfTravis = _skipper(lambda: os.getenv("TRAVIS"), "Skip In Travis")
|
|
|
|
skipIfNoBFloat16Cuda = _skipper(
|
|
lambda: not torch.cuda.is_bf16_supported(), "BFloat16 CUDA is not available"
|
|
)
|
|
|
|
# skips tests for all versions below min_opset_version.
|
|
# if exporting the op is only supported after a specific version,
|
|
# add this wrapper to prevent running the test for opset_versions
|
|
# smaller than the currently tested opset_version
|
|
def skipIfUnsupportedMinOpsetVersion(min_opset_version):
|
|
def skip_dec(func):
|
|
@functools.wraps(func)
|
|
def wrapper(self, *args, **kwargs):
|
|
if self.opset_version < min_opset_version:
|
|
raise unittest.SkipTest(
|
|
f"Unsupported opset_version: {self.opset_version} < {min_opset_version}"
|
|
)
|
|
return func(self, *args, **kwargs)
|
|
|
|
return wrapper
|
|
|
|
return skip_dec
|
|
|
|
|
|
# skips tests for all versions above max_opset_version.
|
|
def skipIfUnsupportedMaxOpsetVersion(max_opset_version):
|
|
def skip_dec(func):
|
|
@functools.wraps(func)
|
|
def wrapper(self, *args, **kwargs):
|
|
if self.opset_version > max_opset_version:
|
|
raise unittest.SkipTest(
|
|
f"Unsupported opset_version: {self.opset_version} > {max_opset_version}"
|
|
)
|
|
return func(self, *args, **kwargs)
|
|
|
|
return wrapper
|
|
|
|
return skip_dec
|
|
|
|
|
|
# skips tests for all opset versions.
|
|
def skipForAllOpsetVersions():
|
|
def skip_dec(func):
|
|
@functools.wraps(func)
|
|
def wrapper(self, *args, **kwargs):
|
|
if self.opset_version:
|
|
raise unittest.SkipTest(
|
|
"Skip verify test for unsupported opset_version"
|
|
)
|
|
return func(self, *args, **kwargs)
|
|
|
|
return wrapper
|
|
|
|
return skip_dec
|
|
|
|
|
|
def skipTraceTest(min_opset_version=float("inf")):
|
|
def skip_dec(func):
|
|
@functools.wraps(func)
|
|
def wrapper(self, *args, **kwargs):
|
|
self.is_trace_test_enabled = self.opset_version >= min_opset_version
|
|
if not self.is_trace_test_enabled and not self.is_script:
|
|
raise unittest.SkipTest("Skip verify test for torch trace")
|
|
return func(self, *args, **kwargs)
|
|
|
|
return wrapper
|
|
|
|
return skip_dec
|
|
|
|
|
|
def skipScriptTest(min_opset_version=float("inf")):
|
|
def skip_dec(func):
|
|
@functools.wraps(func)
|
|
def wrapper(self, *args, **kwargs):
|
|
self.is_script_test_enabled = self.opset_version >= min_opset_version
|
|
if not self.is_script_test_enabled and self.is_script:
|
|
raise unittest.SkipTest("Skip verify test for TorchScript")
|
|
return func(self, *args, **kwargs)
|
|
|
|
return wrapper
|
|
|
|
return skip_dec
|
|
|
|
|
|
# skips tests for opset_versions listed in unsupported_opset_versions.
|
|
# if the caffe2 test cannot be run for a specific version, add this wrapper
|
|
# (for example, an op was modified but the change is not supported in caffe2)
|
|
def skipIfUnsupportedOpsetVersion(unsupported_opset_versions):
|
|
def skip_dec(func):
|
|
@functools.wraps(func)
|
|
def wrapper(self, *args, **kwargs):
|
|
if self.opset_version in unsupported_opset_versions:
|
|
raise unittest.SkipTest(
|
|
"Skip verify test for unsupported opset_version"
|
|
)
|
|
return func(self, *args, **kwargs)
|
|
|
|
return wrapper
|
|
|
|
return skip_dec
|
|
|
|
|
|
def skipShapeChecking(func):
|
|
@functools.wraps(func)
|
|
def wrapper(self, *args, **kwargs):
|
|
self.check_shape = False
|
|
return func(self, *args, **kwargs)
|
|
|
|
return wrapper
|
|
|
|
|
|
def skipDtypeChecking(func):
|
|
@functools.wraps(func)
|
|
def wrapper(self, *args, **kwargs):
|
|
self.check_dtype = False
|
|
return func(self, *args, **kwargs)
|
|
|
|
return wrapper
|
|
|
|
|
|
def flatten(x):
|
|
return tuple(function._iter_filter(lambda o: isinstance(o, torch.Tensor))(x))
|