mirror of
https://github.com/zebrajr/pytorch.git
synced 2025-12-07 12:21:27 +01:00
Fixes https://github.com/microsoft/onnx-converters-private/issues/132 @kit1980 and @malfet agreed in disabling ONNX tests for Caffe2 builds. With this change, exporting models with `operator+export_type=ONNX_ATEN_FALLBACK` will properly test non-caffe2 builds, which is the only scenario for aten fallback after caffe2 deprecation Pull Request resolved: https://github.com/pytorch/pytorch/pull/90475 Approved by: https://github.com/kit1980, https://github.com/BowenBao
47 lines
1.3 KiB
Python
47 lines
1.3 KiB
Python
# Owner(s): ["module: onnx"]
|
|
|
|
import glob
|
|
import os
|
|
|
|
import caffe2.python.onnx.backend as c2
|
|
|
|
import numpy as np
|
|
import onnx.backend.test
|
|
from onnx import numpy_helper
|
|
|
|
|
|
def load_tensor_as_numpy_array(f):
|
|
tensor = onnx.TensorProto()
|
|
with open(f, "rb") as file:
|
|
tensor.ParseFromString(file.read())
|
|
return tensor
|
|
|
|
|
|
def assert_similar(ref, real):
|
|
np.testing.assert_equal(len(ref), len(real))
|
|
for i in range(len(ref)):
|
|
np.testing.assert_allclose(ref[i], real[i], rtol=1e-3)
|
|
|
|
|
|
def run_generated_test(model_file, data_dir, device="CPU"):
|
|
model = onnx.load(model_file)
|
|
input_num = len(glob.glob(os.path.join(data_dir, "input_*.pb")))
|
|
inputs = []
|
|
for i in range(input_num):
|
|
inputs.append(
|
|
numpy_helper.to_array(
|
|
load_tensor_as_numpy_array(os.path.join(data_dir, f"input_{i}.pb"))
|
|
)
|
|
)
|
|
output_num = len(glob.glob(os.path.join(data_dir, "output_*.pb")))
|
|
outputs = []
|
|
for i in range(output_num):
|
|
outputs.append(
|
|
numpy_helper.to_array(
|
|
load_tensor_as_numpy_array(os.path.join(data_dir, f"output_{i}.pb"))
|
|
)
|
|
)
|
|
prepared = c2.prepare(model, device=device)
|
|
c2_outputs = prepared.run(inputs)
|
|
assert_similar(outputs, c2_outputs)
|