mirror of
https://github.com/zebrajr/pytorch.git
synced 2025-12-07 00:21:07 +01:00
* Move ONNX integration tests from onnx-fb-universe to PyTorch repo
* Switch to use torchvision
* Delete single rnn operator tests, they have been covered in e2e tests in test_caffe2.py
* Mirror the fix in onnx-fb-universe to bypass cuda check
667326d84b
42 lines
1.3 KiB
Python
42 lines
1.3 KiB
Python
from __future__ import absolute_import
|
|
from __future__ import division
|
|
from __future__ import print_function
|
|
from __future__ import unicode_literals
|
|
|
|
import glob
|
|
import numpy as np
|
|
import onnx.backend.test
|
|
import caffe2.python.onnx.backend as c2
|
|
import os
|
|
from onnx import numpy_helper
|
|
|
|
|
|
def load_tensor_as_numpy_array(f):
|
|
tensor = onnx.TensorProto()
|
|
with open(f, 'rb') as file:
|
|
tensor.ParseFromString(file.read())
|
|
return tensor
|
|
|
|
|
|
def assert_similar(ref, real):
|
|
np.testing.assert_equal(len(ref), len(real))
|
|
for i in range(len(ref)):
|
|
np.testing.assert_allclose(ref[i], real[i], rtol=1e-3)
|
|
|
|
|
|
def run_generated_test(model_file, data_dir, device='CPU'):
|
|
model = onnx.load(model_file)
|
|
input_num = len(glob.glob(os.path.join(data_dir, "input_*.pb")))
|
|
inputs = []
|
|
for i in range(input_num):
|
|
inputs.append(numpy_helper.to_array(load_tensor_as_numpy_array(
|
|
os.path.join(data_dir, "input_{}.pb".format(i)))))
|
|
output_num = len(glob.glob(os.path.join(data_dir, "output_*.pb")))
|
|
outputs = []
|
|
for i in range(output_num):
|
|
outputs.append(numpy_helper.to_array(load_tensor_as_numpy_array(
|
|
os.path.join(data_dir, "output_{}.pb".format(i)))))
|
|
prepared = c2.prepare(model, device=device)
|
|
c2_outputs = prepared.run(inputs)
|
|
assert_similar(outputs, c2_outputs)
|