diff --git a/test/onnx/debug_embed_params.py b/test/onnx/debug_embed_params.py index 7fe40a5906d..8f32a838a99 100644 --- a/test/onnx/debug_embed_params.py +++ b/test/onnx/debug_embed_params.py @@ -1,7 +1,7 @@ import sys import onnx -from test_pytorch_common import flatten +import pytorch_test_common import caffe2.python.onnx.backend as c2 import torch @@ -41,7 +41,9 @@ def run_embed_params(proto, model, input, state_dict=None, use_gpu=True): parameters = list(model.state_dict().values()) W = {} - for k, v in zip(model_def.graph.input, flatten((input, parameters))): + for k, v in zip( + model_def.graph.input, pytorch_test_common.flatten((input, parameters)) + ): if isinstance(v, Variable): W[k.name] = v.data.cpu().numpy() else: diff --git a/test/onnx/export_onnx_tests_filter.py b/test/onnx/export_onnx_tests_filter.py index cf8afafd9b7..868f72fddc3 100644 --- a/test/onnx/export_onnx_tests_filter.py +++ b/test/onnx/export_onnx_tests_filter.py @@ -6,7 +6,7 @@ import traceback import google.protobuf.text_format import onnx.backend.test -import test_onnx_common +import onnx_test_common from test_caffe2_common import run_generated_test from torch.testing._internal.common_device_type import get_all_device_types @@ -20,7 +20,7 @@ _expect_dir = os.path.join(os.path.dirname(os.path.realpath(__file__)), "expect" def collect_generated_testcases( - root_dir=test_onnx_common.pytorch_converted_dir, + root_dir=onnx_test_common.pytorch_converted_dir, verbose=False, fail_dir=None, expect=True, @@ -95,7 +95,7 @@ if __name__ == "__main__": collect_generated_testcases(verbose=verbose, fail_dir=fail_dir, expect=expect) # We already generate the expect files for test_operators.py. collect_generated_testcases( - root_dir=test_onnx_common.pytorch_operator_dir, + root_dir=onnx_test_common.pytorch_operator_dir, verbose=verbose, fail_dir=fail_dir, expect=False, diff --git a/test/onnx/export_onnx_tests_generator.py b/test/onnx/export_onnx_tests_generator.py index ef728cead0d..a6d3c6bee53 100644 --- a/test/onnx/export_onnx_tests_generator.py +++ b/test/onnx/export_onnx_tests_generator.py @@ -4,7 +4,7 @@ import shutil import traceback import onnx -import test_onnx_common +import onnx_test_common from onnx import numpy_helper from test_nn import new_module_tests @@ -110,7 +110,7 @@ def convert_tests(testcases, sets=1): onnx_model = onnx.load_from_string(f.getvalue()) onnx.checker.check_model(onnx_model) onnx.helper.strip_doc_string(onnx_model) - output_dir = os.path.join(test_onnx_common.pytorch_converted_dir, test_name) + output_dir = os.path.join(onnx_test_common.pytorch_converted_dir, test_name) if os.path.exists(output_dir): shutil.rmtree(output_dir) @@ -151,7 +151,7 @@ def convert_tests(testcases, sets=1): ) print( "PyTorch converted cases are stored in {}.".format( - test_onnx_common.pytorch_converted_dir + onnx_test_common.pytorch_converted_dir ) ) print_stats(FunctionalModule_nums, nn_module) diff --git a/test/onnx/test_onnx_common.py b/test/onnx/onnx_test_common.py similarity index 100% rename from test/onnx/test_onnx_common.py rename to test/onnx/onnx_test_common.py diff --git a/test/onnx/test_pytorch_common.py b/test/onnx/pytorch_test_common.py similarity index 97% rename from test/onnx/test_pytorch_common.py rename to test/onnx/pytorch_test_common.py index 346ff03ebef..77bdd28ad4e 100644 --- a/test/onnx/test_pytorch_common.py +++ b/test/onnx/pytorch_test_common.py @@ -6,13 +6,11 @@ import sys import unittest import torch -import torch.autograd.function as function +from torch.autograd import function pytorch_test_dir = os.path.dirname(os.path.dirname(os.path.realpath(__file__))) sys.path.insert(-1, pytorch_test_dir) -from torch.testing._internal.common_utils import * # noqa: F401,F403 - torch.set_default_tensor_type("torch.FloatTensor") BATCH_SIZE = 2 diff --git a/test/onnx/test_custom_ops.py b/test/onnx/test_custom_ops.py index 1eed757077f..d4e1d7bd345 100644 --- a/test/onnx/test_custom_ops.py +++ b/test/onnx/test_custom_ops.py @@ -2,17 +2,17 @@ import numpy as np import onnx -from test_onnx_common import run_model_test -from test_pytorch_common import TestCase, run_tests +import onnx_test_common from test_pytorch_onnx_caffe2 import do_export import caffe2.python.onnx.backend as c2 import torch import torch.utils.cpp_extension -from torch.onnx.symbolic_helper import _unimplemented +from torch.onnx import symbolic_helper +from torch.testing._internal import common_utils -class TestCustomOps(TestCase): +class TestCustomOps(common_utils.TestCase): def test_custom_add(self): op_source = """ #include @@ -56,7 +56,7 @@ class TestCustomOps(TestCase): np.testing.assert_array_equal(caffe2_out[0], model(x, y).cpu().numpy()) -class TestCustomAutogradFunction(TestCase): +class TestCustomAutogradFunction(common_utils.TestCase): opset_version = 9 keep_initializers_as_inputs = False onnx_shape_inference = True @@ -83,7 +83,7 @@ class TestCustomAutogradFunction(TestCase): x = torch.randn(2, 3, 4, requires_grad=True) model = MyModule() - run_model_test(self, model, input_args=(x,)) + onnx_test_common.run_model_test(self, model, input_args=(x,)) def test_register_custom_op(self): class MyClip(torch.autograd.Function): @@ -117,7 +117,9 @@ class TestCustomAutogradFunction(TestCase): elif name == "MyRelu": return g.op("Relu", args[0], outputs=n.outputsSize()) else: - return _unimplemented("prim::PythonOp", "unknown node kind: " + name) + return symbolic_helper._unimplemented( + "prim::PythonOp", "unknown node kind: " + name + ) from torch.onnx import register_custom_op_symbolic @@ -125,10 +127,10 @@ class TestCustomAutogradFunction(TestCase): x = torch.randn(2, 3, 4, requires_grad=True) model = MyModule() - run_model_test(self, model, input_args=(x,)) + onnx_test_common.run_model_test(self, model, input_args=(x,)) -class TestExportAsContribOps(TestCase): +class TestExportAsContribOps(common_utils.TestCase): opset_version = 14 keep_initializers_as_inputs = False onnx_shape_inference = True @@ -159,8 +161,8 @@ class TestExportAsContribOps(TestCase): x = torch.randn(3, 3, 4, requires_grad=True) model = torch.jit.script(M()) - run_model_test(self, model, input_args=(x,)) + onnx_test_common.run_model_test(self, model, input_args=(x,)) if __name__ == "__main__": - run_tests() + common_utils.run_tests() diff --git a/test/onnx/test_models.py b/test/onnx/test_models.py index 609cf9c8f55..5997212fddd 100644 --- a/test/onnx/test_models.py +++ b/test/onnx/test_models.py @@ -5,23 +5,11 @@ import unittest from model_defs.dcgan import _netD, _netG, bsz, imgsz, nz, weights_init from model_defs.emb_seq import EmbeddingNetwork1, EmbeddingNetwork2 from model_defs.mnist import MNIST -from model_defs.op_test import ( - ConcatNet, - DummyNet, - FakeQuantNet, - PermuteNet, - PReluNet, -) +from model_defs.op_test import ConcatNet, DummyNet, FakeQuantNet, PermuteNet, PReluNet from model_defs.squeezenet import SqueezeNet from model_defs.srresnet import SRResNet from model_defs.super_resolution import SuperResolutionNet -from test_pytorch_common import ( - TestCase, - run_tests, - skipIfNoLapack, - skipIfUnsupportedMinOpsetVersion, - skipScriptTest, -) +from pytorch_test_common import skipIfUnsupportedMinOpsetVersion, skipScriptTest from torchvision.models import shufflenet_v2_x1_0 from torchvision.models.alexnet import alexnet from torchvision.models.densenet import densenet121 @@ -37,11 +25,11 @@ from verify import verify import caffe2.python.onnx.backend as backend import torch -import torch.onnx -import torch.onnx.utils from torch import quantization from torch.autograd import Variable from torch.onnx import OperatorExportTypes +from torch.testing._internal import common_utils +from torch.testing._internal.common_utils import skipIfNoLapack if torch.cuda.is_available(): @@ -57,7 +45,7 @@ else: BATCH_SIZE = 2 -class TestModels(TestCase): +class TestModels(common_utils.TestCase): opset_version = 9 # Caffe2 doesn't support the default. keep_initializers_as_inputs = False @@ -296,4 +284,4 @@ class TestModels(TestCase): if __name__ == "__main__": - run_tests() + common_utils.run_tests() diff --git a/test/onnx/test_models_onnxruntime.py b/test/onnx/test_models_onnxruntime.py index 761f9cfd87b..344bf97ca4c 100644 --- a/test/onnx/test_models_onnxruntime.py +++ b/test/onnx/test_models_onnxruntime.py @@ -5,17 +5,12 @@ import unittest from collections import OrderedDict from typing import List, Mapping, Tuple +import onnx_test_common import parameterized import PIL -import test_onnx_common import torchvision +from pytorch_test_common import skipIfUnsupportedMinOpsetVersion, skipScriptTest from test_models import TestModels -from test_pytorch_common import ( - TestCase, - run_tests, - skipIfUnsupportedMinOpsetVersion, - skipScriptTest, -) from torchvision import ops from torchvision.models.detection import ( faster_rcnn, @@ -29,6 +24,7 @@ from torchvision.models.detection import ( import torch from torch import nn +from torch.testing._internal import common_utils def exportTest(self, model, inputs, rtol=1e-2, atol=1e-7, opset_versions=None): @@ -37,20 +33,20 @@ def exportTest(self, model, inputs, rtol=1e-2, atol=1e-7, opset_versions=None): for opset_version in opset_versions: self.opset_version = opset_version self.onnx_shape_inference = True - test_onnx_common.run_model_test( + onnx_test_common.run_model_test( self, model, input_args=inputs, rtol=rtol, atol=atol ) if self.is_script_test_enabled and opset_version > 11: script_model = torch.jit.script(model) - test_onnx_common.run_model_test( + onnx_test_common.run_model_test( self, script_model, input_args=inputs, rtol=rtol, atol=atol ) TestModels = type( "TestModels", - (TestCase,), + (common_utils.TestCase,), dict( TestModels.__dict__, is_script_test_enabled=False, @@ -63,7 +59,7 @@ TestModels = type( # model tests for scripting with new JIT APIs and shape inference TestModels_new_jit_API = type( "TestModels_new_jit_API", - (TestCase,), + (common_utils.TestCase,), dict( TestModels.__dict__, exportTest=exportTest, @@ -185,9 +181,9 @@ def _init_test_roi_heads_faster_rcnn(): @parameterized.parameterized_class( ("is_script",), ([True, False],), - class_name_func=test_onnx_common.parameterize_class_name, + class_name_func=onnx_test_common.parameterize_class_name, ) -class TestModelsONNXRuntime(test_onnx_common._TestONNXRuntime): +class TestModelsONNXRuntime(onnx_test_common._TestONNXRuntime): @skipIfUnsupportedMinOpsetVersion(11) @skipScriptTest() # Faster RCNN model is not scriptable def test_faster_rcnn(self): @@ -422,4 +418,4 @@ class TestModelsONNXRuntime(test_onnx_common._TestONNXRuntime): if __name__ == "__main__": - run_tests() + common_utils.run_tests() diff --git a/test/onnx/test_onnx_opset.py b/test/onnx/test_onnx_opset.py index 7adb21b8d40..6bce330e235 100644 --- a/test/onnx/test_onnx_opset.py +++ b/test/onnx/test_onnx_opset.py @@ -4,13 +4,13 @@ import io import itertools import onnx -from test_pytorch_common import TestCase, run_tests import torch import torch.onnx from torch.nn import Module from torch.onnx import producer_name, producer_version from torch.onnx._globals import GLOBALS +from torch.testing._internal import common_utils def check_onnx_opset_operator( @@ -70,7 +70,7 @@ def check_onnx_opsets_operator( check_onnx_opset_operator(model, ops[opset_version], opset_version) -class TestONNXOpset(TestCase): +class TestONNXOpset(common_utils.TestCase): def test_opset_fallback(self): class MyModule(Module): def forward(self, x): @@ -524,4 +524,4 @@ class TestONNXOpset(TestCase): if __name__ == "__main__": - run_tests() + common_utils.run_tests() diff --git a/test/onnx/test_operators.py b/test/onnx/test_operators.py index ff890b3f926..05a2db582a4 100644 --- a/test/onnx/test_operators.py +++ b/test/onnx/test_operators.py @@ -8,34 +8,27 @@ import os import shutil import tempfile -from test_pytorch_common import ( +from pytorch_test_common import ( BATCH_SIZE, RNN_HIDDEN_SIZE, RNN_INPUT_SIZE, RNN_SEQUENCE_LENGTH, - TestCase, flatten, - run_tests, - skipIfCaffe2, - skipIfNoLapack, ) import torch import torch.nn as nn import torch.nn.functional as F import torch.onnx -import torch.testing._internal.common_utils as common from torch.autograd import Function, Variable from torch.nn import Module, functional -from torch.onnx import ( - register_custom_op_symbolic, - unregister_custom_op_symbolic, -) from torch.onnx.symbolic_helper import ( _get_tensor_dim_size, _get_tensor_sizes, parse_args, ) +from torch.testing._internal import common_utils +from torch.testing._internal.common_utils import skipIfCaffe2, skipIfNoLapack """Usage: python test/onnx/test_operators.py [--no-onnx] [--produce-onnx-test-data] --no-onnx: no onnx python dependence @@ -77,7 +70,7 @@ class FuncModule(Module): return self.f(*itertools.chain(args, self.params)) -class TestOperators(TestCase): +class TestOperators(common_utils.TestCase): def assertONNX(self, f, args, params=None, **kwargs): if params is None: params = () @@ -94,7 +87,7 @@ class TestOperators(TestCase): import onnx import onnx.checker import onnx.numpy_helper - import test_onnx_common + import onnx_test_common model_def = onnx.ModelProto.FromString(onnx_model_pb) onnx.checker.check_model(model_def) @@ -102,7 +95,7 @@ class TestOperators(TestCase): test_function = inspect.stack()[1][0].f_code.co_name test_name = test_function[0:4] + "_operator" + test_function[4:] output_dir = os.path.join( - test_onnx_common.pytorch_operator_dir, test_name + onnx_test_common.pytorch_operator_dir, test_name ) # Assume: # 1) the old test should be delete before the test. @@ -1159,7 +1152,9 @@ class TestOperators(TestCase): ) return output - register_custom_op_symbolic("::embedding", embedding, _onnx_opset_version) + torch.onnx.register_custom_op_symbolic( + "::embedding", embedding, _onnx_opset_version + ) class Model(torch.nn.Module): def __init__(self): @@ -1176,7 +1171,7 @@ class TestOperators(TestCase): y = torch.randn(1, 8) self.assertONNX(model, (x, y), opset_version=_onnx_opset_version) - unregister_custom_op_symbolic("::embedding", _onnx_opset_version) + torch.onnx.unregister_custom_op_symbolic("::embedding", _onnx_opset_version) # This is test_aten_embedding_1 with shape inference on custom symbolic aten::embedding. @skipIfCaffe2 @@ -1208,7 +1203,9 @@ class TestOperators(TestCase): output.setType(output_type) return output - register_custom_op_symbolic("::embedding", embedding, _onnx_opset_version) + torch.onnx.register_custom_op_symbolic( + "::embedding", embedding, _onnx_opset_version + ) class Model(torch.nn.Module): def __init__(self): @@ -1233,7 +1230,7 @@ class TestOperators(TestCase): operator_export_type=torch.onnx.OperatorExportTypes.ONNX_ATEN_FALLBACK, ) - unregister_custom_op_symbolic("::embedding", _onnx_opset_version) + torch.onnx.unregister_custom_op_symbolic("::embedding", _onnx_opset_version) # Without shapeValueMap, the onnx graph looks like: # graph(%0 : Float(*, 1, 128, 1, strides=[128, 128, 1, 1], requires_grad=0, device=cpu)): @@ -1277,19 +1274,19 @@ class TestOperators(TestCase): if __name__ == "__main__": no_onnx_dep_flag = "--no-onnx" - _onnx_dep = no_onnx_dep_flag not in common.UNITTEST_ARGS - if no_onnx_dep_flag in common.UNITTEST_ARGS: - common.UNITTEST_ARGS.remove(no_onnx_dep_flag) + _onnx_dep = no_onnx_dep_flag not in common_utils.UNITTEST_ARGS + if no_onnx_dep_flag in common_utils.UNITTEST_ARGS: + common_utils.UNITTEST_ARGS.remove(no_onnx_dep_flag) onnx_test_flag = "--produce-onnx-test-data" - _onnx_test = onnx_test_flag in common.UNITTEST_ARGS - if onnx_test_flag in common.UNITTEST_ARGS: - common.UNITTEST_ARGS.remove(onnx_test_flag) + _onnx_test = onnx_test_flag in common_utils.UNITTEST_ARGS + if onnx_test_flag in common_utils.UNITTEST_ARGS: + common_utils.UNITTEST_ARGS.remove(onnx_test_flag) if _onnx_test: _onnx_dep = True - import test_onnx_common + import onnx_test_common for d in glob.glob( - os.path.join(test_onnx_common.pytorch_operator_dir, "test_operator_*") + os.path.join(onnx_test_common.pytorch_operator_dir, "test_operator_*") ): shutil.rmtree(d) - run_tests() + common_utils.run_tests() diff --git a/test/onnx/test_pytorch_helper.py b/test/onnx/test_pytorch_helper.py index eeb5f88f17f..4594bc2e94f 100644 --- a/test/onnx/test_pytorch_helper.py +++ b/test/onnx/test_pytorch_helper.py @@ -5,16 +5,17 @@ import unittest import numpy as np from pytorch_helper import PyTorchModule -from test_pytorch_common import skipIfNoLapack, run_tests, TestCase import torch.nn.init as init import torch.onnx from caffe2.python.core import workspace from caffe2.python.model_helper import ModelHelper from torch import nn +from torch.testing._internal import common_utils +from torch.testing._internal.common_utils import skipIfNoLapack -class TestCaffe2Backend(TestCase): +class TestCaffe2Backend(common_utils.TestCase): @skipIfNoLapack @unittest.skip("test broken because Lapack was always missing.") def test_helper(self): @@ -67,4 +68,4 @@ class TestCaffe2Backend(TestCase): if __name__ == "__main__": - run_tests() + common_utils.run_tests() diff --git a/test/onnx/test_pytorch_jit_onnx.py b/test/onnx/test_pytorch_jit_onnx.py index f0cabae76b5..a2a38466356 100644 --- a/test/onnx/test_pytorch_jit_onnx.py +++ b/test/onnx/test_pytorch_jit_onnx.py @@ -2,9 +2,8 @@ import onnxruntime import torch -from torch._C import parse_ir from torch.onnx import verification -from test_pytorch_common import TestCase, run_tests +from torch.testing._internal import common_utils def _jit_graph_to_onnx_model(graph, operator_export_type, opset_version): @@ -16,14 +15,14 @@ def _jit_graph_to_onnx_model(graph, operator_export_type, opset_version): It also does not interact with actual PyTorch modules nor PyTorch tensor inputs. """ - from torch.onnx.symbolic_helper import _set_onnx_shape_inference, _set_opset_version - from torch.onnx.utils import _optimize_graph # Shape inference is required because some ops' symbolic functions # generate sub-graphs based on inputs' types. - _set_onnx_shape_inference(True) - _set_opset_version(opset_version) - graph = _optimize_graph(graph, operator_export_type, params_dict={}) + torch.onnx.symbolic_helper._set_onnx_shape_inference(True) + torch.onnx.symbolic_helper._set_opset_version(opset_version) + graph = torch.onnx.utils._optimize_graph( + graph, operator_export_type, params_dict={} + ) proto, _, _, _ = graph._export_onnx( {}, opset_version, @@ -52,7 +51,7 @@ class _TestJITIRToONNX: ort_providers = ["CPUExecutionProvider"] def run_test(self, graph_ir, example_inputs): - graph = parse_ir(graph_ir) + graph = torch._C.parse_ir(graph_ir) jit_outs = torch._C._jit_interpret_graph(graph, example_inputs) onnx_proto = _jit_graph_to_onnx_model( @@ -84,7 +83,7 @@ def MakeTestCase(opset_version: int) -> type: name = f"TestJITIRToONNX_opset{opset_version}" return type( str(name), - (TestCase,), + (common_utils.TestCase,), dict(_TestJITIRToONNX.__dict__, opset_version=opset_version), ) @@ -92,4 +91,4 @@ def MakeTestCase(opset_version: int) -> type: TestJITIRToONNX_opset14 = MakeTestCase(14) if __name__ == "__main__": - run_tests() + common_utils.run_tests() diff --git a/test/onnx/test_pytorch_onnx_caffe2.py b/test/onnx/test_pytorch_onnx_caffe2.py index 10d419fd445..a2c489ff11e 100644 --- a/test/onnx/test_pytorch_onnx_caffe2.py +++ b/test/onnx/test_pytorch_onnx_caffe2.py @@ -18,19 +18,16 @@ from model_defs.rnn_model_with_packed_sequence import RnnModelWithPackedSequence from model_defs.squeezenet import SqueezeNet from model_defs.srresnet import SRResNet from model_defs.super_resolution import SuperResolutionNet -from test_pytorch_common import ( +from pytorch_test_common import ( BATCH_SIZE, RNN_BATCH_SIZE, RNN_HIDDEN_SIZE, RNN_INPUT_SIZE, RNN_SEQUENCE_LENGTH, skipIfNoCuda, - skipIfNoLapack, skipIfTravis, skipIfUnsupportedMinOpsetVersion, skipIfUnsupportedOpsetVersion, - TestCase, - run_tests, ) # Import various models for testing @@ -52,6 +49,8 @@ from torch import nn from torch.autograd import Variable, function from torch.nn.utils import rnn as rnn_utils from torch.onnx import ExportTypes +from torch.testing._internal import common_utils +from torch.testing._internal.common_utils import skipIfNoLapack skip = unittest.skip @@ -130,13 +129,13 @@ model_urls = { } -class TestCaffe2Backend_opset9(TestCase): +class TestCaffe2Backend_opset9(common_utils.TestCase): opset_version = 9 embed_params = False def setUp(self): # the following should ideally be super().setUp(), https://github.com/pytorch/pytorch/issues/79630 - TestCase.setUp(self) + common_utils.TestCase.setUp(self) torch.manual_seed(0) if torch.cuda.is_available(): torch.cuda.manual_seed_all(0) @@ -3199,44 +3198,44 @@ setup_rnn_tests() # to embed_params=True TestCaffe2BackendEmbed_opset9 = type( "TestCaffe2BackendEmbed_opset9", - (TestCase,), + (common_utils.TestCase,), dict(TestCaffe2Backend_opset9.__dict__, embed_params=True), ) # opset 7 tests TestCaffe2Backend_opset7 = type( "TestCaffe2Backend_opset7", - (TestCase,), + (common_utils.TestCase,), dict(TestCaffe2Backend_opset9.__dict__, opset_version=7), ) TestCaffe2BackendEmbed_opset7 = type( "TestCaffe2BackendEmbed_opset7", - (TestCase,), + (common_utils.TestCase,), dict(TestCaffe2Backend_opset9.__dict__, embed_params=True, opset_version=7), ) # opset 8 tests TestCaffe2Backend_opset8 = type( "TestCaffe2Backend_opset8", - (TestCase,), + (common_utils.TestCase,), dict(TestCaffe2Backend_opset9.__dict__, opset_version=8), ) TestCaffe2BackendEmbed_opset8 = type( "TestCaffe2BackendEmbed_opset8", - (TestCase,), + (common_utils.TestCase,), dict(TestCaffe2Backend_opset9.__dict__, embed_params=True, opset_version=8), ) # opset 10 tests TestCaffe2Backend_opset10 = type( "TestCaffe2Backend_opset10", - (TestCase,), + (common_utils.TestCase,), dict(TestCaffe2Backend_opset9.__dict__, opset_version=10), ) TestCaffe2BackendEmbed_opset10 = type( "TestCaffe2BackendEmbed_opset10", - (TestCase,), + (common_utils.TestCase,), dict(TestCaffe2Backend_opset9.__dict__, embed_params=True, opset_version=10), ) @@ -3244,9 +3243,9 @@ TestCaffe2BackendEmbed_opset10 = type( # to embed_params=True TestCaffe2BackendEmbed_opset9_new_jit_API = type( "TestCaffe2BackendEmbed_opset9_new_jit_API", - (TestCase,), + (common_utils.TestCase,), dict(TestCaffe2Backend_opset9.__dict__, embed_params=True), ) if __name__ == "__main__": - run_tests() + common_utils.run_tests() diff --git a/test/onnx/test_pytorch_onnx_caffe2_quantized.py b/test/onnx/test_pytorch_onnx_caffe2_quantized.py index 5aacba78df4..a7c4751a67b 100644 --- a/test/onnx/test_pytorch_onnx_caffe2_quantized.py +++ b/test/onnx/test_pytorch_onnx_caffe2_quantized.py @@ -1,6 +1,7 @@ # Owner(s): ["module: unknown"] import io + import numpy as np import onnx @@ -8,10 +9,10 @@ import caffe2.python.onnx.backend as c2 import torch.nn as nn import torch.nn.quantized as nnq import torch.onnx -from test_pytorch_common import TestCase, run_tests +from torch.testing._internal import common_utils -class TestQuantizedOps(TestCase): +class TestQuantizedOps(common_utils.TestCase): def generic_test( self, model, sample_inputs, input_names=None, decimal=3, relaxed_check=False ): @@ -377,4 +378,4 @@ class TestQuantizedOps(TestCase): if __name__ == "__main__": - run_tests() + common_utils.run_tests() diff --git a/test/onnx/test_pytorch_onnx_onnxruntime.py b/test/onnx/test_pytorch_onnx_onnxruntime.py index 199bf0bd40f..acd91369f59 100644 --- a/test/onnx/test_pytorch_onnx_onnxruntime.py +++ b/test/onnx/test_pytorch_onnx_onnxruntime.py @@ -9,52 +9,35 @@ import unittest from collections import OrderedDict from typing import Dict, List, Optional, Tuple, Union -import model_defs.word_language_model as word_language_model import numpy as np +import onnx_test_common import parameterized -import test_onnx_common import torchvision -from model_defs.lstm_flattening_result import ( - LstmFlatteningResultWithoutSeqLength, - LstmFlatteningResultWithSeqLength, +from model_defs import ( + lstm_flattening_result, + rnn_model_with_packed_sequence, + word_language_model, ) -from model_defs.rnn_model_with_packed_sequence import ( - RnnModelWithPackedSequence, - RnnModelWithPackedSequenceWithoutState, - RnnModelWithPackedSequenceWithState, -) -from test_pytorch_common import ( +from pytorch_test_common import ( BATCH_SIZE, RNN_BATCH_SIZE, RNN_HIDDEN_SIZE, RNN_INPUT_SIZE, RNN_SEQUENCE_LENGTH, - run_tests, - skipIfNoLapack, + skipForAllOpsetVersions, skipIfUnsupportedMaxOpsetVersion, skipIfUnsupportedMinOpsetVersion, skipIfUnsupportedOpsetVersion, skipScriptTest, skipTraceTest, - skipForAllOpsetVersions, ) -from torchvision import ops -from torchvision.models.detection.image_list import ImageList -from torchvision.models.detection.rpn import ( - AnchorGenerator, - RegionProposalNetwork, - RPNHead, -) -from torchvision.models.detection.transform import GeneralizedRCNNTransform import torch -import torch.onnx.verification as verification from torch import Tensor from torch.nn.utils import rnn as rnn_utils -from torch.nn.utils.rnn import PackedSequence -from torch.onnx import register_custom_op_symbolic, unregister_custom_op_symbolic -from torch.onnx.symbolic_helper import _unimplemented +from torch.onnx import verification from torch.testing._internal import common_utils +from torch.testing._internal.common_utils import skipIfNoLapack def _init_test_generalized_rcnn_transform(): @@ -62,16 +45,22 @@ def _init_test_generalized_rcnn_transform(): max_size = 200 image_mean = [0.485, 0.456, 0.406] image_std = [0.229, 0.224, 0.225] - transform = GeneralizedRCNNTransform(min_size, max_size, image_mean, image_std) + transform = torchvision.models.detection.transform.GeneralizedRCNNTransform( + min_size, max_size, image_mean, image_std + ) return transform def _init_test_rpn(): anchor_sizes = ((32,), (64,), (128,), (256,), (512,)) aspect_ratios = ((0.5, 1.0, 2.0),) * len(anchor_sizes) - rpn_anchor_generator = AnchorGenerator(anchor_sizes, aspect_ratios) + rpn_anchor_generator = torchvision.models.detection.rpn.AnchorGenerator( + anchor_sizes, aspect_ratios + ) out_channels = 256 - rpn_head = RPNHead(out_channels, rpn_anchor_generator.num_anchors_per_location()[0]) + rpn_head = torchvision.models.detection.rpn.RPNHead( + out_channels, rpn_anchor_generator.num_anchors_per_location()[0] + ) rpn_fg_iou_thresh = 0.7 rpn_bg_iou_thresh = 0.3 rpn_batch_size_per_image = 256 @@ -81,7 +70,7 @@ def _init_test_rpn(): rpn_nms_thresh = 0.7 rpn_score_thresh = 0.0 - rpn = RegionProposalNetwork( + rpn = torchvision.models.detection.rpn.RegionProposalNetwork( rpn_anchor_generator, rpn_head, rpn_fg_iou_thresh, @@ -100,7 +89,7 @@ def _construct_tensor_for_quantization_test( shape: Tuple[int, ...], offset: Optional[Union[int, float]] = None, max_val: Optional[Union[int, float]] = None, -) -> torch.Tensor: +) -> Tensor: """Helper function to generate weights and test inputs in a deterministic way. Due to difference in implementation details between PyTorch and ONNXRuntime, randomly generated @@ -154,10 +143,10 @@ def _parametrize_rnn_args(arg_name): @parameterized.parameterized_class( **_parameterized_class_attrs_and_values(), - class_name_func=test_onnx_common.parameterize_class_name, + class_name_func=onnx_test_common.parameterize_class_name, ) @common_utils.instantiate_parametrized_tests -class TestONNXRuntime(test_onnx_common._TestONNXRuntime): +class TestONNXRuntime(onnx_test_common._TestONNXRuntime): def test_fuse_conv_bn1d(self): class Fuse(torch.nn.Module): def __init__(self): @@ -617,7 +606,10 @@ class TestONNXRuntime(test_onnx_common._TestONNXRuntime): def test_mixed_optional_default_none(self): class Model(torch.nn.Module): def forward( - self, x, y: Optional[Tensor] = None, z: Optional[Tensor] = None + self, + x, + y: Optional[Tensor] = None, + z: Optional[Tensor] = None, ): if y is not None: return x + y @@ -3630,10 +3622,13 @@ class TestONNXRuntime(test_onnx_common._TestONNXRuntime): elif name == "MyRelu": return g.op("Relu", args[0], outputs=n.outputsSize()) else: - return _unimplemented("prim::PythonOp", "unknown node kind: " + name) + # TODO(justinchuby): Remove reference to internal names in symbolic_helper + return torch.onnx.symbolic_helper._unimplemented( + "prim::PythonOp", "unknown node kind: " + name + ) - register_custom_op_symbolic("prim::PythonOp", symbolic_python_op, 1) - self.addCleanup(unregister_custom_op_symbolic, "prim::PythonOp", 1) + torch.onnx.register_custom_op_symbolic("prim::PythonOp", symbolic_python_op, 1) + self.addCleanup(torch.onnx.unregister_custom_op_symbolic, "prim::PythonOp", 1) class MyClipModule(torch.nn.Module): def forward(self, x, min): @@ -4472,9 +4467,13 @@ class TestONNXRuntime(test_onnx_common._TestONNXRuntime): ) if packed_sequence == 1: - model = RnnModelWithPackedSequence(model, False) + model = rnn_model_with_packed_sequence.RnnModelWithPackedSequence( + model, False + ) if packed_sequence == 2: - model = RnnModelWithPackedSequence(model, True) + model = rnn_model_with_packed_sequence.RnnModelWithPackedSequence( + model, True + ) return model def make_input(batch_size, layers, packed_sequence): @@ -9197,7 +9196,7 @@ class TestONNXRuntime(test_onnx_common._TestONNXRuntime): batch_first=batch_first, ) - def forward(self, input: PackedSequence, hx=None): + def forward(self, input: rnn_utils.PackedSequence, hx=None): return self.inner_model(input, hx) class ElmanWithoutStateModel(torch.nn.Module): @@ -9214,7 +9213,7 @@ class TestONNXRuntime(test_onnx_common._TestONNXRuntime): batch_first=batch_first, ) - def forward(self, input: PackedSequence): + def forward(self, input: rnn_utils.PackedSequence): return self.inner_model(input) batch_first = packed_sequence == 2 @@ -9228,7 +9227,11 @@ class TestONNXRuntime(test_onnx_common._TestONNXRuntime): batch_first=batch_first, ) if packed_sequence: - model = RnnModelWithPackedSequenceWithState(model, batch_first) + model = ( + rnn_model_with_packed_sequence.RnnModelWithPackedSequenceWithState( + model, batch_first + ) + ) else: model = ElmanWithStateModel( layers=layers, @@ -9238,7 +9241,9 @@ class TestONNXRuntime(test_onnx_common._TestONNXRuntime): batch_first=batch_first, ) if packed_sequence: - model = RnnModelWithPackedSequenceWithoutState(model, batch_first) + model = rnn_model_with_packed_sequence.RnnModelWithPackedSequenceWithoutState( + model, batch_first + ) def make_input(batch_size): seq_lengths = np.random.randint(1, RNN_SEQUENCE_LENGTH + 1, size=batch_size) @@ -9279,7 +9284,7 @@ class TestONNXRuntime(test_onnx_common._TestONNXRuntime): batch_first = packed_sequence == 2 if packed_sequence: - model = LstmFlatteningResultWithSeqLength( + model = lstm_flattening_result.LstmFlatteningResultWithSeqLength( RNN_INPUT_SIZE, RNN_HIDDEN_SIZE, layers, @@ -9288,11 +9293,17 @@ class TestONNXRuntime(test_onnx_common._TestONNXRuntime): batch_first, ) if initial_state: - model = RnnModelWithPackedSequenceWithState(model, batch_first) + model = ( + rnn_model_with_packed_sequence.RnnModelWithPackedSequenceWithState( + model, batch_first + ) + ) else: - model = RnnModelWithPackedSequenceWithoutState(model, batch_first) + model = rnn_model_with_packed_sequence.RnnModelWithPackedSequenceWithoutState( + model, batch_first + ) else: - model = LstmFlatteningResultWithoutSeqLength( + model = lstm_flattening_result.LstmFlatteningResultWithoutSeqLength( RNN_INPUT_SIZE, RNN_HIDDEN_SIZE, layers, @@ -9352,7 +9363,7 @@ class TestONNXRuntime(test_onnx_common._TestONNXRuntime): batch_first=batch_first, ) - def forward(self, input: PackedSequence, hx): + def forward(self, input: rnn_utils.PackedSequence, hx): return self.inner_model(input, hx) class GRUWithoutStateModel(torch.nn.Module): @@ -9368,7 +9379,7 @@ class TestONNXRuntime(test_onnx_common._TestONNXRuntime): batch_first=batch_first, ) - def forward(self, input: PackedSequence): + def forward(self, input: rnn_utils.PackedSequence): return self.inner_model(input) class GRUNoSeqLengthWithoutStateModel(torch.nn.Module): @@ -9413,7 +9424,11 @@ class TestONNXRuntime(test_onnx_common._TestONNXRuntime): dropout=dropout, batch_first=batch_first, ) - model = RnnModelWithPackedSequenceWithState(model, batch_first) + model = ( + rnn_model_with_packed_sequence.RnnModelWithPackedSequenceWithState( + model, batch_first + ) + ) else: model = GRUWithoutStateModel( layers=layers, @@ -9421,7 +9436,9 @@ class TestONNXRuntime(test_onnx_common._TestONNXRuntime): dropout=dropout, batch_first=batch_first, ) - model = RnnModelWithPackedSequenceWithoutState(model, batch_first) + model = rnn_model_with_packed_sequence.RnnModelWithPackedSequenceWithoutState( + model, batch_first + ) else: if initial_state: model = GRUNoSeqLengthWithStateModel( @@ -9514,7 +9531,9 @@ class TestONNXRuntime(test_onnx_common._TestONNXRuntime): self.run_test(FakeQuantizePerChannelModel(), (x)) @skipIfUnsupportedMinOpsetVersion(13) - @skipScriptTest() # RuntimeError: Can't redefine method: forward on class: __torch__.torch.nn.modules.linear.Linear + # RuntimeError: Can't redefine method: + # forward on class: __torch__.torch.nn.modules.linear.Linear + @skipScriptTest() def test_fake_quantize_activation(self): from torch import quantization @@ -9944,7 +9963,7 @@ class TestONNXRuntime(test_onnx_common._TestONNXRuntime): class Module(torch.nn.Module): def forward(self, boxes, scores): - return ops.nms(boxes, scores, 0.5) + return torchvision.ops.nms(boxes, scores, 0.5) self.run_test(Module(), (boxes, scores)) @@ -9962,7 +9981,7 @@ class TestONNXRuntime(test_onnx_common._TestONNXRuntime): class Module(torch.nn.Module): def forward(self, boxes, scores, idxs): - return ops.batched_nms(boxes, scores, idxs, 0.5) + return torchvision.ops.batched_nms(boxes, scores, idxs, 0.5) self.run_test(Module(), (boxes, scores, idxs)) @@ -9978,7 +9997,7 @@ class TestONNXRuntime(test_onnx_common._TestONNXRuntime): class Module(torch.nn.Module): def forward(self, boxes, size): shape = (size.shape[0], size.shape[1]) - return ops.boxes.clip_boxes_to_image(boxes, shape) + return torchvision.ops.boxes.clip_boxes_to_image(boxes, shape) self.run_test( Module(), @@ -9996,7 +10015,7 @@ class TestONNXRuntime(test_onnx_common._TestONNXRuntime): def test_roi_align(self): x = torch.rand(1, 1, 10, 10, dtype=torch.float32) single_roi = torch.tensor([[0, 0, 0, 4, 4]], dtype=torch.float32) - model = ops.RoIAlign((5, 5), 1.0, 2) + model = torchvision.ops.RoIAlign((5, 5), 1.0, 2) self.run_test(model, (x, single_roi)) @unittest.skip( @@ -10007,22 +10026,22 @@ class TestONNXRuntime(test_onnx_common._TestONNXRuntime): def test_roi_align_aligned(self): x = torch.rand(1, 1, 10, 10, dtype=torch.float32) single_roi = torch.tensor([[0, 1.5, 1.5, 3, 3]], dtype=torch.float32) - model1 = ops.RoIAlign((5, 5), 1.0, 2, aligned=True) + model1 = torchvision.ops.RoIAlign((5, 5), 1.0, 2, aligned=True) self.run_test(model1, (x, single_roi)) x = torch.rand(1, 1, 10, 10, dtype=torch.float32) single_roi = torch.tensor([[0, 0.2, 0.3, 4.5, 3.5]], dtype=torch.float32) - model2 = ops.RoIAlign((5, 5), 0.5, 3, aligned=True) + model2 = torchvision.ops.RoIAlign((5, 5), 0.5, 3, aligned=True) self.run_test(model2, (x, single_roi)) x = torch.rand(1, 1, 10, 10, dtype=torch.float32) single_roi = torch.tensor([[0, 0.2, 0.3, 4.5, 3.5]], dtype=torch.float32) - model3 = ops.RoIAlign((5, 5), 1.8, 2, aligned=True) + model3 = torchvision.ops.RoIAlign((5, 5), 1.8, 2, aligned=True) self.run_test(model3, (x, single_roi)) x = torch.rand(1, 1, 10, 10, dtype=torch.float32) single_roi = torch.tensor([[0, 0.2, 0.3, 4.5, 3.5]], dtype=torch.float32) - model4 = ops.RoIAlign((2, 2), 2.5, 0, aligned=True) + model4 = torchvision.ops.RoIAlign((2, 2), 2.5, 0, aligned=True) self.run_test(model4, (x, single_roi)) @unittest.skip( @@ -10034,7 +10053,7 @@ class TestONNXRuntime(test_onnx_common._TestONNXRuntime): rois = torch.tensor([[0, 0, 0, 4, 4]], dtype=torch.float32) pool_h = 5 pool_w = 5 - model = ops.RoIPool((pool_h, pool_w), 2.0) + model = torchvision.ops.RoIPool((pool_h, pool_w), 2.0) self.run_test(model, (x, rois)) @skipIfUnsupportedMinOpsetVersion(11) @@ -10097,7 +10116,7 @@ class TestONNXRuntime(test_onnx_common._TestONNXRuntime): self.rpn = _init_test_rpn() def forward(self, images, features: Dict[str, Tensor]): - images_m = ImageList( + images_m = torchvision.models.detection.image_list.ImageList( images, [(i.shape[-1], i.shape[-2]) for i in images] ) return self.rpn(images_m, features) @@ -10133,7 +10152,9 @@ class TestONNXRuntime(test_onnx_common._TestONNXRuntime): class TransformModule(torch.nn.Module): def __init__(self): super().__init__() - self.model = ops.MultiScaleRoIAlign(["feat1", "feat2"], 3, 2) + self.model = torchvision.ops.MultiScaleRoIAlign( + ["feat1", "feat2"], 3, 2 + ) self.image_sizes = [(512, 512)] def forward(self, input: Dict[str, Tensor], boxes: List[Tensor]) -> Tensor: @@ -10703,7 +10724,7 @@ class TestONNXRuntime(test_onnx_common._TestONNXRuntime): @torch.jit.script def check_init( input_data: Tensor, hidden_size: int, prev_state: Tensor - ) -> Tuple[torch.Tensor, torch.Tensor]: + ) -> Tuple[Tensor, Tensor]: batch_size = input_data.size(0) spatial_size_0 = input_data.size(2) spatial_size_1 = input_data.size(3) @@ -11787,7 +11808,9 @@ class TestONNXRuntime(test_onnx_common._TestONNXRuntime): self.run_test(FlattenModel(), x) @skipIfUnsupportedMinOpsetVersion(10) - @skipScriptTest() # torch.jit.frontend.FrontendError: Cannot instantiate class 'QFunctional' in a script function: + # torch.jit.frontend.FrontendError: + # Cannot instantiate class 'QFunctional' in a script function + @skipScriptTest() def test_quantized_arithmetic_qfunctional(self): x = torch.quantize_per_tensor(torch.randn(3, 4), 0.2, 128, torch.quint8) y = torch.quantize_per_tensor(torch.randn(3, 4), 0.2, 128, torch.quint8) @@ -12228,4 +12251,4 @@ class TestONNXRuntime(test_onnx_common._TestONNXRuntime): if __name__ == "__main__": - run_tests() + common_utils.run_tests() diff --git a/test/onnx/test_pytorch_onnx_onnxruntime_cuda.py b/test/onnx/test_pytorch_onnx_onnxruntime_cuda.py index b5d41bbf71f..f747ab3449a 100644 --- a/test/onnx/test_pytorch_onnx_onnxruntime_cuda.py +++ b/test/onnx/test_pytorch_onnx_onnxruntime_cuda.py @@ -3,23 +3,21 @@ import unittest import onnxruntime # noqa: F401 -from test_pytorch_common import ( +from pytorch_test_common import ( skipIfNoBFloat16Cuda, skipIfNoCuda, skipIfUnsupportedMinOpsetVersion, skipScriptTest, - TestCase, ) - -# TODO(justinchuby): Remove reference to other unit tests. from test_pytorch_onnx_onnxruntime import TestONNXRuntime import torch from torch.cuda.amp import autocast from torch.onnx._globals import GLOBALS +from torch.testing._internal import common_utils -class TestONNXRuntime_cuda(TestCase): +class TestONNXRuntime_cuda(common_utils.TestCase): opset_version = GLOBALS.export_onnx_opset_version keep_initializers_as_inputs = True @@ -151,5 +149,4 @@ TestONNXRuntime_cuda.setUp = TestONNXRuntime.setUp TestONNXRuntime_cuda.run_test = TestONNXRuntime.run_test if __name__ == "__main__": - # TODO: convert this to use common_utils.run_tests() - unittest.main(TestONNXRuntime_cuda()) + common_utils.run_tests() diff --git a/test/onnx/test_pytorch_onnx_shape_inference.py b/test/onnx/test_pytorch_onnx_shape_inference.py index 6b293d65697..a7fd58d381a 100644 --- a/test/onnx/test_pytorch_onnx_shape_inference.py +++ b/test/onnx/test_pytorch_onnx_shape_inference.py @@ -1,11 +1,11 @@ # Owner(s): ["module: onnx"] import numpy as np -from test_pytorch_common import run_tests, skipIfUnsupportedMinOpsetVersion, TestCase +from pytorch_test_common import skipIfUnsupportedMinOpsetVersion import torch -from torch.onnx import _constants -from torch.onnx.symbolic_helper import _set_onnx_shape_inference, _set_opset_version +from torch.onnx import _constants, symbolic_helper +from torch.testing._internal import common_utils def expect_tensor(scalar_type, shape=None): @@ -19,12 +19,11 @@ def expect_tensor(scalar_type, shape=None): return verify -class TestONNXShapeInference(TestCase): - def __init__(self, *args, **kwargs): - TestCase.__init__(self, *args, **kwargs) +class TestONNXShapeInference(common_utils.TestCase): + def setUp(self): self.opset_version = _constants.onnx_main_opset - _set_onnx_shape_inference(True) - _set_opset_version(self.opset_version) + symbolic_helper._set_onnx_shape_inference(True) + symbolic_helper._set_opset_version(self.opset_version) def run_test(self, g, n, type_assertion_funcs): if not isinstance(type_assertion_funcs, list): @@ -271,4 +270,4 @@ class TestONNXShapeInference(TestCase): if __name__ == "__main__": - run_tests() + common_utils.run_tests() diff --git a/test/onnx/test_utility_funs.py b/test/onnx/test_utility_funs.py index 6fd15522106..186a679ae18 100644 --- a/test/onnx/test_utility_funs.py +++ b/test/onnx/test_utility_funs.py @@ -6,9 +6,7 @@ import io import onnx import torchvision from autograd_helper import CustomFunction as CustomFunction2 -from test_pytorch_common import ( - TestCase, - run_tests, +from pytorch_test_common import ( skipIfNoCuda, skipIfUnsupportedMaxOpsetVersion, skipIfUnsupportedMinOpsetVersion, @@ -32,9 +30,10 @@ from torch.onnx.symbolic_helper import ( _unpack_list, parse_args, ) +from torch.testing._internal import common_utils -class _BaseTestCase(TestCase): +class _BaseTestCase(common_utils.TestCase): def setUp(self): super().setUp() torch.manual_seed(0) @@ -1676,4 +1675,4 @@ class TestUtilityFuns_opset15(TestUtilityFuns_opset9): if __name__ == "__main__": - run_tests() + common_utils.run_tests() diff --git a/test/onnx/test_verify.py b/test/onnx/test_verify.py index a9a88ac6143..ada6fb7b255 100644 --- a/test/onnx/test_verify.py +++ b/test/onnx/test_verify.py @@ -1,15 +1,15 @@ # Owner(s): ["module: onnx"] -from test_pytorch_common import TestCase, run_tests from verify import verify import caffe2.python.onnx.backend as backend import torch from torch.autograd import Function from torch.nn import Module, Parameter +from torch.testing._internal import common_utils -class TestVerify(TestCase): +class TestVerify(common_utils.TestCase): maxDiff = None def assertVerifyExpectFail(self, *args, **kwargs): @@ -106,4 +106,4 @@ class TestVerify(TestCase): if __name__ == "__main__": - run_tests() + common_utils.run_tests()