pytorch/test/onnx/test_pytorch_common.py
BowenBao 08126c9153 [ONNX] Utilize ONNX shape inference for ONNX exporter (#40628)
Summary:
It is often that the conversion from torch operator to onnx operator requires input rank/dtype/shape to be known. Previously, the conversion depends on tracer to provide these info, leaving a gap in conversion of scripted modules.

We are extending the export with support from onnx shape inference. If enabled, onnx shape inference will be called whenever an onnx node is created. This is the first PR introducing the initial look of the feature. More and more cases will be supported following this PR.

* Added pass to run onnx shape inference on a given node. The node has to have namespace `onnx`.
* Moved helper functions from `export.cpp` to a common place for re-use.
* This feature is currently experimental, and can be turned on through flag `onnx_shape_inference` in internal api `torch.onnx._export`.
* Currently skipping ONNX Sequence ops, If/Loop and ConstantOfShape due to limitations. Support will be added in the future.

Pull Request resolved: https://github.com/pytorch/pytorch/pull/40628

Reviewed By: mrshenli

Differential Revision: D22709746

Pulled By: bzinodev

fbshipit-source-id: b52aeeae00667e66e0b0c1144022f7af9a8b2948
2020-08-30 18:35:46 -07:00

100 lines
3.2 KiB
Python

from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import functools
import os
import unittest
import sys
import torch
import torch.autograd.function as function
pytorch_test_dir = os.path.dirname(os.path.dirname(os.path.realpath(__file__)))
sys.path.insert(-1, pytorch_test_dir)
from torch.testing._internal.common_utils import * # noqa: F401
torch.set_default_tensor_type('torch.FloatTensor')
BATCH_SIZE = 2
RNN_BATCH_SIZE = 7
RNN_SEQUENCE_LENGTH = 11
RNN_INPUT_SIZE = 5
RNN_HIDDEN_SIZE = 3
def _skipper(condition, reason):
def decorator(f):
@functools.wraps(f)
def wrapper(*args, **kwargs):
if condition():
raise unittest.SkipTest(reason)
return f(*args, **kwargs)
return wrapper
return decorator
skipIfNoCuda = _skipper(lambda: not torch.cuda.is_available(),
'CUDA is not available')
skipIfTravis = _skipper(lambda: os.getenv('TRAVIS'),
'Skip In Travis')
# skips tests for all versions below min_opset_version.
# if exporting the op is only supported after a specific version,
# add this wrapper to prevent running the test for opset_versions
# smaller than the currently tested opset_version
def skipIfUnsupportedMinOpsetVersion(min_opset_version):
def skip_dec(func):
def wrapper(self):
if self.opset_version < min_opset_version:
raise unittest.SkipTest("Skip verify test for unsupported opset_version")
return func(self)
return wrapper
return skip_dec
# skips tests for all versions above min_opset_version.
def skipIfUnsupportedMaxOpsetVersion(min_opset_version):
def skip_dec(func):
def wrapper(self):
if self.opset_version > min_opset_version:
raise unittest.SkipTest("Skip verify test for unsupported opset_version")
return func(self)
return wrapper
return skip_dec
# Enables tests for scripting, instead of only tracing the model.
def enableScriptTest():
def script_dec(func):
def wrapper(self):
self.is_script_test_enabled = True
return func(self)
return wrapper
return script_dec
# skips tests for opset_versions listed in unsupported_opset_versions.
# if the caffe2 test cannot be run for a specific version, add this wrapper
# (for example, an op was modified but the change is not supported in caffe2)
def skipIfUnsupportedOpsetVersion(unsupported_opset_versions):
def skip_dec(func):
def wrapper(self):
if self.opset_version in unsupported_opset_versions:
raise unittest.SkipTest("Skip verify test for unsupported opset_version")
return func(self)
return wrapper
return skip_dec
def skipIfONNXShapeInference(onnx_shape_inference):
def skip_dec(func):
def wrapper(self):
if self.onnx_shape_inference is onnx_shape_inference:
raise unittest.SkipTest("Skip verify test for unsupported opset_version")
return func(self)
return wrapper
return skip_dec
def flatten(x):
return tuple(function._iter_filter(lambda o: isinstance(o, torch.Tensor))(x))