USE_TENSORRT support and TensorRT 5 compatibility

Summary: Pull Request resolved: https://github.com/pytorch/pytorch/pull/13945

Differential Revision: D13317525

Pulled By: yinghai

fbshipit-source-id: 8630dfec1bbc5aac19539e344e7c38a7fd8b051d
This commit is contained in:
Sergei Nikolaev 2018-12-07 13:52:56 -08:00 committed by Facebook Github Bot
parent febc7ff99f
commit a0ee3a279c
10 changed files with 86 additions and 38 deletions

View File

@ -133,6 +133,22 @@ CMAKE_ARGS+=("-DCMAKE_INSTALL_PREFIX=${INSTALL_PREFIX}")
if [[ $BUILD_ENVIRONMENT == *mkl* ]]; then
CMAKE_ARGS+=("-DBLAS=MKL")
fi
if [[ $BUILD_ENVIRONMENT == py2-cuda9.0-cudnn7-ubuntu16.04 ]]; then
# removing http:// duplicate in favor of nvidia-ml.list
# which is https:// version of the same repo
sudo rm -f /etc/apt/sources.list.d/nvidia-machine-learning.list
curl -o ./nvinfer-runtime-trt-repo-ubuntu1604-5.0.2-ga-cuda9.0_1-1_amd64.deb https://developer.download.nvidia.com/compute/machine-learning/repos/ubuntu1604/x86_64/nvinfer-runtime-trt-repo-ubuntu1604-5.0.2-ga-cuda9.0_1-1_amd64.deb
sudo dpkg -i ./nvinfer-runtime-trt-repo-ubuntu1604-5.0.2-ga-cuda9.0_1-1_amd64.deb
sudo apt-key add /var/nvinfer-runtime-trt-repo-5.0.2-ga-cuda9.0/7fa2af80.pub
sudo apt-get -qq update
sudo apt-get install libnvinfer5 libnvinfer-dev
rm ./nvinfer-runtime-trt-repo-ubuntu1604-5.0.2-ga-cuda9.0_1-1_amd64.deb
CMAKE_ARGS+=("-DUSE_TENSORRT=ON")
fi
if [[ $BUILD_ENVIRONMENT == *cuda* ]]; then
CMAKE_ARGS+=("-DUSE_CUDA=ON")
CMAKE_ARGS+=("-DCUDA_ARCH_NAME=Maxwell")

View File

@ -0,0 +1,18 @@
# Caffe2 & TensorRT integration
[![Jenkins Build Status](https://ci.pytorch.org/jenkins/job/caffe2-master/lastCompletedBuild/badge/icon)](https://ci.pytorch.org/jenkins/job/caffe2-master)
This directory contains the code implementing `TensorRTOp` Caffe2 operator as well as Caffe2 model converter (using `ONNX` model as an intermediate format).
To enable this functionality in your PyTorch build please set
`USE_TENSORRT=1 ... python setup.py ...`
or if you use CMake directly
`-DUSE_TENSORRT=ON`
For further information please explore `caffe2/python/trt/test_trt.py` test showing all possible use cases.
## Questions and Feedback
Please use Github issues (https://github.com/pytorch/pytorch/issues) to ask questions, report bugs, and request new features.

View File

@ -15,7 +15,7 @@ namespace {
// Otherwise, return the product of CHW dimensions
int64_t CheckDims(
const nvinfer1::Dims& nv_dims,
const std::vector<int64_t>& c2_dims) {
at::ArrayRef<int64_t> c2_dims) {
if (nv_dims.nbDims + 1 != c2_dims.size()) {
CAFFE_THROW(
"Mismatched dimensions between TRT input (",
@ -51,7 +51,7 @@ TensorRTOp::TensorRTOp(const OperatorDef& operator_def, Workspace* ws)
logger_(
(nvinfer1::ILogger::Severity)(OperatorBase::GetSingleArgument<int>(
"log_verbosity",
FLAGS_minloglevel))),
FLAGS_caffe2_log_level))),
max_batch_size_(
OperatorBase::GetSingleArgument<int>("max_batch_size", 1)) {
{

View File

@ -99,7 +99,7 @@ void BlobToTensorProto(
CPUTensorToTensorProto(cpu_tensor, t);
} else if (BlobIsTensorType(*blob, CUDA)) {
const auto& cuda_tensor = blob->template Get<TensorCUDA>();
const auto cpu_tensor = TensorCPU(cuda_tensor, context);
const auto cpu_tensor = TensorCPU(cuda_tensor, CPU);
context->FinishDeviceComputation();
CPUTensorToTensorProto(cpu_tensor, t);
} else {
@ -140,7 +140,7 @@ void FillModelInfo(::ONNX_NAMESPACE::ModelProto* model) {
model->set_producer_name("caffe2");
auto* opset_id = model->add_opset_import();
opset_id->set_domain("");
opset_id->set_version(3);
opset_id->set_version(7);
}
} // namespace
@ -298,17 +298,25 @@ NetDef TensorRTTransformer::SubnetToTrtOp(
::ONNX_NAMESPACE::TensorProto tf;
tf.set_name(t.name());
tf.mutable_dims()->CopyFrom(t.dims());
tf.set_data_type(::ONNX_NAMESPACE::TensorProto::FLOAT);
std::vector<int64_t> v;
v.resize(t.raw_data().size() / sizeof(int64_t));
memcpy(v.data(), t.raw_data().data(), t.raw_data().size());
std::vector<float> vf;
for (auto i : v) {
vf.push_back(static_cast<float>(i));
}
tf.mutable_raw_data()->assign(
reinterpret_cast<const char*>(vf.data()), sizeof(float) * vf.size());
if (t.data_type() == ::ONNX_NAMESPACE::TensorProto::FLOAT) {
tf.set_data_type(::ONNX_NAMESPACE::TensorProto::FLOAT);
std::vector<int64_t> v;
v.resize(t.raw_data().size() / sizeof(int64_t));
memcpy(v.data(), t.raw_data().data(), t.raw_data().size());
std::vector<float> vf;
for (auto i : v) {
vf.push_back(static_cast<float>(i));
}
tf.mutable_raw_data()->assign(
reinterpret_cast<const char *>(vf.data()), sizeof(float) * vf.size());
} else if (t.data_type() == ::ONNX_NAMESPACE::TensorProto::INT64) {
tf.set_data_type(::ONNX_NAMESPACE::TensorProto::INT64);
tf.mutable_raw_data()->assign(t.raw_data().data(), t.raw_data().size());
} else {
CAFFE_THROW("Unsupported tensor data type for conversion: ",
t.data_type());
}
onnx_model.mutable_graph()->add_initializer()->CopyFrom(tf);
}
}
@ -473,7 +481,7 @@ void TensorRTTransformer::Transform(
auto shape_hints = InferShapes(&mapped_ws, pred_net, &shape_hints_ordered);
CAFFE_ENFORCE(pred_net, "Predict net cannot be nullptr");
onnx::OnnxExporter exporter(nullptr, true);
onnx::OnnxExporter exporter(nullptr);
tensorrt::TrtLogger logger;
auto trt_builder = tensorrt::TrtObject(nvinfer1::createInferBuilder(logger));
auto trt_network = tensorrt::TrtObject(trt_builder->createNetwork());
@ -504,7 +512,7 @@ void TensorRTTransformer::Transform(
// but it should be OK as the cost is really small. We also need to keep the
// same exporter throughout the process to avoid duplicated dummy name
// generation
onnx::OnnxExporter exporter2(nullptr, true);
onnx::OnnxExporter exporter2(nullptr);
auto trt_converter = [this, &mapped_ws, &shape_hints, &exporter2](
const caffe2::NetDef& net) mutable {
return SubnetToTrtOp(net, &mapped_ws, &exporter2, &shape_hints);

View File

@ -37,11 +37,12 @@ def _print_net(net):
print(" output: {}".format(y))
_BASE_URL = 'https://s3.amazonaws.com/download.onnx/models/opset_{}'.format(onnx.defs.onnx_opset_version())
def _base_url(opset_version):
return 'https://s3.amazonaws.com/download.onnx/models/opset_{}'.format(opset_version)
# TODO: This is copied from https://github.com/onnx/onnx/blob/master/onnx/backend/test/runner/__init__.py. Maybe we should
# expose a model retrival API from ONNX
def _download_onnx_model(model_name):
def _download_onnx_model(model_name, opset_version):
onnx_home = os.path.expanduser(os.getenv('ONNX_HOME', os.path.join('~', '.onnx')))
models_dir = os.getenv('ONNX_MODELS',
os.path.join(onnx_home, 'models'))
@ -60,7 +61,7 @@ def _download_onnx_model(model_name):
# On Windows, NamedTemporaryFile can not be opened for a
# second time
url = '{}/{}.tar.gz'.format(_BASE_URL, model_name)
url = '{}/{}.tar.gz'.format(_base_url(opset_version), model_name)
download_file = tempfile.NamedTemporaryFile(delete=False)
try:
download_file.close()
@ -113,8 +114,9 @@ class TensorRTOpTest(TestCase):
X = np.random.randn(52, 1, 3, 2).astype(np.float32)
self._test_relu_graph(X, 52, 50)
def _test_onnx_importer(self, model_name, data_input_index = 0):
model_dir = _download_onnx_model(model_name)
def _test_onnx_importer(self, model_name, data_input_index,
opset_version = onnx.defs.onnx_opset_version()):
model_dir = _download_onnx_model(model_name, opset_version)
model_def = onnx.load(os.path.join(model_dir, 'model.onnx'))
input_blob_dims = [int(x.dim_value) for x in model_def.graph.input[data_input_index].type.tensor_type.shape.dim]
op_inputs = [x.name for x in model_def.graph.input]
@ -134,29 +136,29 @@ class TensorRTOpTest(TestCase):
Y_trt = namedtupledict('Outputs', op_outputs)(*output_values)
np.testing.assert_allclose(Y_c2, Y_trt, rtol=1e-3)
@unittest.skipIf(not workspace.C.use_trt, "No TensortRT support")
@unittest.skip("Until fixing Reshape op")
def test_resnet50(self):
self._test_onnx_importer('resnet50')
self._test_onnx_importer('resnet50', 0)
@unittest.skipIf(not workspace.C.use_trt, "No TensortRT support")
def test_bvlc_alexnet(self):
self._test_onnx_importer('bvlc_alexnet')
self._test_onnx_importer('bvlc_alexnet', 0)
@unittest.skipIf(not workspace.C.use_trt, "No TensortRT support")
@unittest.skip("Until fixing Unsqueeze op")
def test_densenet121(self):
self._test_onnx_importer('densenet121', -1)
self._test_onnx_importer('densenet121', -1, 3)
@unittest.skipIf(not workspace.C.use_trt, "No TensortRT support")
@unittest.skip("Until fixing Reshape op")
def test_inception_v1(self):
self._test_onnx_importer('inception_v1', -1)
self._test_onnx_importer('inception_v1', -1, 3)
@unittest.skipIf(not workspace.C.use_trt, "No TensortRT support")
@unittest.skip("Until fixing Reshape op")
def test_inception_v2(self):
self._test_onnx_importer('inception_v2')
self._test_onnx_importer('inception_v2', 0, 3)
@unittest.skip('Need to revisit our ChannelShuffle exporter to avoid generating 5D tensor')
def test_shufflenet(self):
self._test_onnx_importer('shufflenet')
self._test_onnx_importer('shufflenet', 0)
@unittest.skipIf(not workspace.C.use_trt, "No TensortRT support")
def test_squeezenet(self):
@ -164,11 +166,11 @@ class TensorRTOpTest(TestCase):
@unittest.skipIf(not workspace.C.use_trt, "No TensortRT support")
def test_vgg16(self):
self._test_onnx_importer('vgg16')
self._test_onnx_importer('vgg16', 0)
@unittest.skipIf(not workspace.C.use_trt, "No TensortRT support")
@unittest.skip("Until fixing Reshape op")
def test_vgg19(self):
self._test_onnx_importer('vgg19', -1)
self._test_onnx_importer('vgg19', -1, 3)
class TensorRTTransformTest(DownloadingTestCase):

View File

@ -1013,7 +1013,7 @@ endif()
if (CAFFE2_CMAKE_BUILDING_WITH_MAIN_REPO)
if (USE_TENSORRT)
set(CMAKE_CUDA_COMPILER ${CUDA_NVCC_EXECUTABLE})
add_subdirectory(${CMAKE_CURRENT_LIST_DIR}/../third_party/onnx-tensorrt)
add_subdirectory(${CMAKE_CURRENT_LIST_DIR}/../third_party/onnx-tensorrt EXCLUDE_FROM_ALL)
include_directories("${CMAKE_CURRENT_LIST_DIR}/../third_party/onnx-tensorrt")
caffe2_interface_library(nvonnxparser_static onnx_trt_library)
list(APPEND Caffe2_DEPENDENCY_WHOLE_LINK_LIBS onnx_trt_library)

View File

@ -160,7 +160,8 @@ def hotpatch_var(var, prefix='USE_'):
# Before we run the setup_helpers, let's look for NO_* and WITH_*
# variables and hotpatch environment with the USE_* equivalent
use_env_vars = ['CUDA', 'CUDNN', 'FBGEMM', 'MIOPEN', 'MKLDNN', 'NNPACK', 'DISTRIBUTED',
'OPENCV', 'QNNPACK', 'FFMPEG', 'SYSTEM_NCCL', 'GLOO_IBVERBS']
'OPENCV', 'TENSORRT', 'QNNPACK', 'FFMPEG', 'SYSTEM_NCCL',
'GLOO_IBVERBS']
list(map(hotpatch_var, use_env_vars))
# Also hotpatch a few with BUILD_* equivalent
@ -170,7 +171,7 @@ build_env_vars = ['BINARY', 'TEST', 'CAFFE2_OPS']
from tools.setup_helpers.cuda import USE_CUDA, CUDA_HOME, CUDA_VERSION
from tools.setup_helpers.build import (BUILD_BINARY, BUILD_TEST,
BUILD_CAFFE2_OPS, USE_LEVELDB,
USE_LMDB, USE_OPENCV, USE_FFMPEG)
USE_LMDB, USE_OPENCV, USE_TENSORRT, USE_FFMPEG)
from tools.setup_helpers.rocm import USE_ROCM, ROCM_HOME, ROCM_VERSION
from tools.setup_helpers.cudnn import (USE_CUDNN, CUDNN_LIBRARY,
CUDNN_LIB_DIR, CUDNN_INCLUDE_DIR)
@ -388,6 +389,7 @@ def build_libs(libs):
my_env["USE_LEVELDB"] = "ON" if USE_LEVELDB else "OFF"
my_env["USE_LMDB"] = "ON" if USE_LMDB else "OFF"
my_env["USE_OPENCV"] = "ON" if USE_OPENCV else "OFF"
my_env["USE_TENSORRT"] = "ON" if USE_TENSORRT else "OFF"
my_env["USE_FFMPEG"] = "ON" if USE_FFMPEG else "OFF"
my_env["USE_DISTRIBUTED"] = "ON" if USE_DISTRIBUTED else "OFF"
my_env["USE_SYSTEM_NCCL"] = "ON" if USE_SYSTEM_NCCL else "OFF"

@ -1 +1 @@
Subproject commit fa0964e8477fc004ee2f49ee77ffce0bf7f711a9
Subproject commit f1c7aa63d88d8d8ef70490f2ebb6b33f7450218b

View File

@ -228,6 +228,7 @@ function build_caffe2() {
-DUSE_LMDB=$USE_LMDB \
-DUSE_OPENCV=$USE_OPENCV \
-DUSE_QNNPACK=$USE_QNNPACK \
-DUSE_TENSORRT=$USE_TENSORRT \
-DUSE_FFMPEG=$USE_FFMPEG \
-DUSE_GLOG=OFF \
-DUSE_GFLAGS=OFF \

View File

@ -6,4 +6,5 @@ BUILD_CAFFE2_OPS = not check_negative_env_flag('BUILD_CAFFE2_OPS')
USE_LEVELDB = check_env_flag('USE_LEVELDB')
USE_LMDB = check_env_flag('USE_LMDB')
USE_OPENCV = check_env_flag('USE_OPENCV')
USE_TENSORRT = check_env_flag('USE_TENSORRT')
USE_FFMPEG = check_env_flag('USE_FFMPEG')