mirror of
https://github.com/zebrajr/tensorflow.git
synced 2025-12-06 12:20:11 +01:00
Disable TensorRT in TF, XLA and JAX.
This is needed for hermetic CUDA integration in Google ML projects since tensorRT is not distributed in the same free way as other CUDA/CUDNN distributives. PiperOrigin-RevId: 662601190
This commit is contained in:
parent
3bf565ae80
commit
caa2b33e73
6
.bazelrc
6
.bazelrc
|
|
@ -222,8 +222,6 @@ build:cuda --@local_config_cuda//:enable_cuda
|
||||||
|
|
||||||
# CUDA: This config refers to building CUDA op kernels with clang.
|
# CUDA: This config refers to building CUDA op kernels with clang.
|
||||||
build:cuda_clang --config=cuda
|
build:cuda_clang --config=cuda
|
||||||
# Enable TensorRT optimizations https://developer.nvidia.com/tensorrt
|
|
||||||
build:cuda_clang --config=tensorrt
|
|
||||||
build:cuda_clang --action_env=TF_CUDA_CLANG="1"
|
build:cuda_clang --action_env=TF_CUDA_CLANG="1"
|
||||||
build:cuda_clang --@local_config_cuda//:cuda_compiler=clang
|
build:cuda_clang --@local_config_cuda//:cuda_compiler=clang
|
||||||
# Select supported compute capabilities (supported graphics cards).
|
# Select supported compute capabilities (supported graphics cards).
|
||||||
|
|
@ -546,7 +544,6 @@ build:rbe_linux_cuda --config=rbe_linux_cpu
|
||||||
# For Remote build execution -- GPU configuration
|
# For Remote build execution -- GPU configuration
|
||||||
build:rbe_linux_cuda --repo_env=REMOTE_GPU_TESTING=1
|
build:rbe_linux_cuda --repo_env=REMOTE_GPU_TESTING=1
|
||||||
build:rbe_linux_cuda --repo_env=TF_CUDA_CONFIG_REPO="@sigbuild-r2.17-clang_config_cuda"
|
build:rbe_linux_cuda --repo_env=TF_CUDA_CONFIG_REPO="@sigbuild-r2.17-clang_config_cuda"
|
||||||
build:rbe_linux_cuda --repo_env=TF_TENSORRT_CONFIG_REPO="@sigbuild-r2.17-clang_config_tensorrt"
|
|
||||||
build:rbe_linux_cuda --repo_env=TF_NCCL_CONFIG_REPO="@sigbuild-r2.17-clang_config_nccl"
|
build:rbe_linux_cuda --repo_env=TF_NCCL_CONFIG_REPO="@sigbuild-r2.17-clang_config_nccl"
|
||||||
test:rbe_linux_cuda --test_env=LD_LIBRARY_PATH="/usr/local/cuda/lib64:/usr/local/cuda/extras/CUPTI/lib64"
|
test:rbe_linux_cuda --test_env=LD_LIBRARY_PATH="/usr/local/cuda/lib64:/usr/local/cuda/extras/CUPTI/lib64"
|
||||||
|
|
||||||
|
|
@ -678,9 +675,8 @@ build:unsupported_gpu_linux --config=unsupported_cpu_linux
|
||||||
build:unsupported_gpu_linux --action_env=TF_CUDA_VERSION="11"
|
build:unsupported_gpu_linux --action_env=TF_CUDA_VERSION="11"
|
||||||
build:unsupported_gpu_linux --action_env=TF_CUDNN_VERSION="8"
|
build:unsupported_gpu_linux --action_env=TF_CUDNN_VERSION="8"
|
||||||
build:unsupported_gpu_linux --repo_env=TF_CUDA_COMPUTE_CAPABILITIES="sm_35,sm_50,sm_60,sm_70,sm_75,compute_80"
|
build:unsupported_gpu_linux --repo_env=TF_CUDA_COMPUTE_CAPABILITIES="sm_35,sm_50,sm_60,sm_70,sm_75,compute_80"
|
||||||
build:unsupported_gpu_linux --config=tensorrt
|
|
||||||
build:unsupported_gpu_linux --action_env=CUDA_TOOLKIT_PATH="/usr/local/cuda-11.2"
|
build:unsupported_gpu_linux --action_env=CUDA_TOOLKIT_PATH="/usr/local/cuda-11.2"
|
||||||
build:unsupported_gpu_linux --action_env=LD_LIBRARY_PATH="/usr/local/cuda:/usr/local/cuda/lib64:/usr/local/cuda/extras/CUPTI/lib64:/usr/local/cuda-11.1/lib64:/usr/local/tensorrt/lib"
|
build:unsupported_gpu_linux --action_env=LD_LIBRARY_PATH="/usr/local/cuda:/usr/local/cuda/lib64:/usr/local/cuda/extras/CUPTI/lib64:/usr/local/cuda-11.1/lib64"
|
||||||
build:unsupported_gpu_linux --action_env=GCC_HOST_COMPILER_PATH="/dt9/usr/bin/gcc"
|
build:unsupported_gpu_linux --action_env=GCC_HOST_COMPILER_PATH="/dt9/usr/bin/gcc"
|
||||||
build:unsupported_gpu_linux --crosstool_top=@ubuntu20.04-gcc9_manylinux2014-cuda11.2-cudnn8.1-tensorrt7.2_config_cuda//crosstool:toolchain
|
build:unsupported_gpu_linux --crosstool_top=@ubuntu20.04-gcc9_manylinux2014-cuda11.2-cudnn8.1-tensorrt7.2_config_cuda//crosstool:toolchain
|
||||||
|
|
||||||
|
|
|
||||||
|
|
@ -13,6 +13,8 @@
|
||||||
* C API:
|
* C API:
|
||||||
* An optional, fourth parameter was added `TfLiteOperatorCreate` as a step forward towards a cleaner API for `TfLiteOperator`. Function `TfLiteOperatorCreate` was added recently, in TensorFlow Lite version 2.17.0, released on 7/11/2024, and we do not expect there will be much code using this function yet. Any code breakages can be easily resolved by passing nullptr as the new, 4th parameter.
|
* An optional, fourth parameter was added `TfLiteOperatorCreate` as a step forward towards a cleaner API for `TfLiteOperator`. Function `TfLiteOperatorCreate` was added recently, in TensorFlow Lite version 2.17.0, released on 7/11/2024, and we do not expect there will be much code using this function yet. Any code breakages can be easily resolved by passing nullptr as the new, 4th parameter.
|
||||||
|
|
||||||
|
* TensorRT support is disabled in CUDA builds for code health improvement.
|
||||||
|
|
||||||
### Known Caveats
|
### Known Caveats
|
||||||
|
|
||||||
* <CAVEATS REGARDING THE RELEASE (BUT NOT BREAKING CHANGES).>
|
* <CAVEATS REGARDING THE RELEASE (BUT NOT BREAKING CHANGES).>
|
||||||
|
|
|
||||||
44
configure.py
44
configure.py
|
|
@ -33,7 +33,6 @@ except ImportError:
|
||||||
|
|
||||||
_DEFAULT_CUDA_VERSION = '11'
|
_DEFAULT_CUDA_VERSION = '11'
|
||||||
_DEFAULT_CUDNN_VERSION = '2'
|
_DEFAULT_CUDNN_VERSION = '2'
|
||||||
_DEFAULT_TENSORRT_VERSION = '6'
|
|
||||||
_DEFAULT_CUDA_COMPUTE_CAPABILITIES = '3.5,7.0'
|
_DEFAULT_CUDA_COMPUTE_CAPABILITIES = '3.5,7.0'
|
||||||
|
|
||||||
_SUPPORTED_ANDROID_NDK_VERSIONS = [19, 20, 21, 25]
|
_SUPPORTED_ANDROID_NDK_VERSIONS = [19, 20, 21, 25]
|
||||||
|
|
@ -982,23 +981,6 @@ def set_tf_cudnn_version(environ_cp):
|
||||||
environ_cp['TF_CUDNN_VERSION'] = tf_cudnn_version
|
environ_cp['TF_CUDNN_VERSION'] = tf_cudnn_version
|
||||||
|
|
||||||
|
|
||||||
def set_tf_tensorrt_version(environ_cp):
|
|
||||||
"""Set TF_TENSORRT_VERSION."""
|
|
||||||
if not (is_linux() or is_windows()):
|
|
||||||
raise ValueError('Currently TensorRT is only supported on Linux platform.')
|
|
||||||
|
|
||||||
if not int(environ_cp.get('TF_NEED_TENSORRT', False)):
|
|
||||||
return
|
|
||||||
|
|
||||||
ask_tensorrt_version = (
|
|
||||||
'Please specify the TensorRT version you want to use. '
|
|
||||||
'[Leave empty to default to TensorRT %s]: ') % _DEFAULT_TENSORRT_VERSION
|
|
||||||
tf_tensorrt_version = get_from_env_or_user_or_default(
|
|
||||||
environ_cp, 'TF_TENSORRT_VERSION', ask_tensorrt_version,
|
|
||||||
_DEFAULT_TENSORRT_VERSION)
|
|
||||||
environ_cp['TF_TENSORRT_VERSION'] = tf_tensorrt_version
|
|
||||||
|
|
||||||
|
|
||||||
def set_tf_nccl_version(environ_cp):
|
def set_tf_nccl_version(environ_cp):
|
||||||
"""Set TF_NCCL_VERSION."""
|
"""Set TF_NCCL_VERSION."""
|
||||||
if not is_linux():
|
if not is_linux():
|
||||||
|
|
@ -1244,14 +1226,8 @@ def validate_cuda_config(environ_cp):
|
||||||
|
|
||||||
cuda_libraries = ['cuda', 'cudnn']
|
cuda_libraries = ['cuda', 'cudnn']
|
||||||
if is_linux():
|
if is_linux():
|
||||||
if int(environ_cp.get('TF_NEED_TENSORRT', False)):
|
|
||||||
cuda_libraries.append('tensorrt')
|
|
||||||
if environ_cp.get('TF_NCCL_VERSION', None):
|
if environ_cp.get('TF_NCCL_VERSION', None):
|
||||||
cuda_libraries.append('nccl')
|
cuda_libraries.append('nccl')
|
||||||
if is_windows():
|
|
||||||
if int(environ_cp.get('TF_NEED_TENSORRT', False)):
|
|
||||||
cuda_libraries.append('tensorrt')
|
|
||||||
print('WARNING: TensorRT support on Windows is experimental\n')
|
|
||||||
|
|
||||||
paths = glob.glob('**/third_party/gpus/find_cuda_config.py', recursive=True)
|
paths = glob.glob('**/third_party/gpus/find_cuda_config.py', recursive=True)
|
||||||
if not paths:
|
if not paths:
|
||||||
|
|
@ -1278,11 +1254,6 @@ def validate_cuda_config(environ_cp):
|
||||||
print(' %s' % config['cudnn_library_dir'])
|
print(' %s' % config['cudnn_library_dir'])
|
||||||
print(' %s' % config['cudnn_include_dir'])
|
print(' %s' % config['cudnn_include_dir'])
|
||||||
|
|
||||||
if 'tensorrt_version' in config:
|
|
||||||
print('Found TensorRT %s in:' % config['tensorrt_version'])
|
|
||||||
print(' %s' % config['tensorrt_library_dir'])
|
|
||||||
print(' %s' % config['tensorrt_include_dir'])
|
|
||||||
|
|
||||||
if config.get('nccl_version', None):
|
if config.get('nccl_version', None):
|
||||||
print('Found NCCL %s in:' % config['nccl_version'])
|
print('Found NCCL %s in:' % config['nccl_version'])
|
||||||
print(' %s' % config['nccl_library_dir'])
|
print(' %s' % config['nccl_library_dir'])
|
||||||
|
|
@ -1344,9 +1315,6 @@ def main():
|
||||||
environ_cp['TF_DOWNLOAD_CLANG'] = '0'
|
environ_cp['TF_DOWNLOAD_CLANG'] = '0'
|
||||||
environ_cp['TF_NEED_MPI'] = '0'
|
environ_cp['TF_NEED_MPI'] = '0'
|
||||||
|
|
||||||
if is_macos():
|
|
||||||
environ_cp['TF_NEED_TENSORRT'] = '0'
|
|
||||||
|
|
||||||
if is_ppc64le():
|
if is_ppc64le():
|
||||||
# Enable MMA Dynamic Dispatch support if 'gcc' and if linker >= 2.35
|
# Enable MMA Dynamic Dispatch support if 'gcc' and if linker >= 2.35
|
||||||
gcc_env = get_gcc_compiler(environ_cp)
|
gcc_env = get_gcc_compiler(environ_cp)
|
||||||
|
|
@ -1398,13 +1366,6 @@ def main():
|
||||||
if (environ_cp.get('TF_NEED_CUDA') == '1' and
|
if (environ_cp.get('TF_NEED_CUDA') == '1' and
|
||||||
'TF_CUDA_CONFIG_REPO' not in environ_cp):
|
'TF_CUDA_CONFIG_REPO' not in environ_cp):
|
||||||
|
|
||||||
set_action_env_var(
|
|
||||||
environ_cp,
|
|
||||||
'TF_NEED_TENSORRT',
|
|
||||||
'TensorRT',
|
|
||||||
False,
|
|
||||||
bazel_config_name='tensorrt')
|
|
||||||
|
|
||||||
environ_save = dict(environ_cp)
|
environ_save = dict(environ_cp)
|
||||||
for _ in range(_DEFAULT_PROMPT_ASK_ATTEMPTS):
|
for _ in range(_DEFAULT_PROMPT_ASK_ATTEMPTS):
|
||||||
|
|
||||||
|
|
@ -1413,7 +1374,6 @@ def main():
|
||||||
'TF_CUDA_VERSION',
|
'TF_CUDA_VERSION',
|
||||||
'TF_CUBLAS_VERSION',
|
'TF_CUBLAS_VERSION',
|
||||||
'TF_CUDNN_VERSION',
|
'TF_CUDNN_VERSION',
|
||||||
'TF_TENSORRT_VERSION',
|
|
||||||
'TF_NCCL_VERSION',
|
'TF_NCCL_VERSION',
|
||||||
'TF_CUDA_PATHS',
|
'TF_CUDA_PATHS',
|
||||||
# Items below are for backwards compatibility when not using
|
# Items below are for backwards compatibility when not using
|
||||||
|
|
@ -1422,7 +1382,6 @@ def main():
|
||||||
'CUDNN_INSTALL_PATH',
|
'CUDNN_INSTALL_PATH',
|
||||||
'NCCL_INSTALL_PATH',
|
'NCCL_INSTALL_PATH',
|
||||||
'NCCL_HDR_PATH',
|
'NCCL_HDR_PATH',
|
||||||
'TENSORRT_INSTALL_PATH'
|
|
||||||
]
|
]
|
||||||
# Note: set_action_env_var above already writes to bazelrc.
|
# Note: set_action_env_var above already writes to bazelrc.
|
||||||
for name in cuda_env_names:
|
for name in cuda_env_names:
|
||||||
|
|
@ -1435,10 +1394,7 @@ def main():
|
||||||
|
|
||||||
set_tf_cuda_version(environ_cp)
|
set_tf_cuda_version(environ_cp)
|
||||||
set_tf_cudnn_version(environ_cp)
|
set_tf_cudnn_version(environ_cp)
|
||||||
if is_windows():
|
|
||||||
set_tf_tensorrt_version(environ_cp)
|
|
||||||
if is_linux():
|
if is_linux():
|
||||||
set_tf_tensorrt_version(environ_cp)
|
|
||||||
set_tf_nccl_version(environ_cp)
|
set_tf_nccl_version(environ_cp)
|
||||||
|
|
||||||
set_tf_cuda_paths(environ_cp)
|
set_tf_cuda_paths(environ_cp)
|
||||||
|
|
|
||||||
|
|
@ -714,7 +714,7 @@ def initialize_rbe_configs():
|
||||||
"TF_CUDNN_VERSION": "8.9",
|
"TF_CUDNN_VERSION": "8.9",
|
||||||
"TF_ENABLE_XLA": "1",
|
"TF_ENABLE_XLA": "1",
|
||||||
"TF_NEED_CUDA": "1",
|
"TF_NEED_CUDA": "1",
|
||||||
"TF_NEED_TENSORRT": "1",
|
"TF_NEED_TENSORRT": "0",
|
||||||
"TF_SYSROOT": "/dt9",
|
"TF_SYSROOT": "/dt9",
|
||||||
"TF_TENSORRT_VERSION": "8.6",
|
"TF_TENSORRT_VERSION": "8.6",
|
||||||
},
|
},
|
||||||
|
|
@ -753,7 +753,7 @@ def initialize_rbe_configs():
|
||||||
"TF_CUDNN_VERSION": "8.9",
|
"TF_CUDNN_VERSION": "8.9",
|
||||||
"TF_ENABLE_XLA": "1",
|
"TF_ENABLE_XLA": "1",
|
||||||
"TF_NEED_CUDA": "1",
|
"TF_NEED_CUDA": "1",
|
||||||
"TF_NEED_TENSORRT": "1",
|
"TF_NEED_TENSORRT": "0",
|
||||||
"TF_SYSROOT": "/dt9",
|
"TF_SYSROOT": "/dt9",
|
||||||
"TF_TENSORRT_VERSION": "8.6",
|
"TF_TENSORRT_VERSION": "8.6",
|
||||||
},
|
},
|
||||||
|
|
@ -793,7 +793,7 @@ def initialize_rbe_configs():
|
||||||
"TF_ENABLE_XLA": "1",
|
"TF_ENABLE_XLA": "1",
|
||||||
"TF_NEED_CUDA": "1",
|
"TF_NEED_CUDA": "1",
|
||||||
"TF_SYSROOT": "/dt9",
|
"TF_SYSROOT": "/dt9",
|
||||||
"TF_NEED_TENSORRT": "1",
|
"TF_NEED_TENSORRT": "0",
|
||||||
"TF_TENSORRT_VERSION": "8.6",
|
"TF_TENSORRT_VERSION": "8.6",
|
||||||
},
|
},
|
||||||
)
|
)
|
||||||
|
|
@ -831,7 +831,7 @@ def initialize_rbe_configs():
|
||||||
"TF_ENABLE_XLA": "1",
|
"TF_ENABLE_XLA": "1",
|
||||||
"TF_NEED_CUDA": "1",
|
"TF_NEED_CUDA": "1",
|
||||||
"TF_SYSROOT": "/dt9",
|
"TF_SYSROOT": "/dt9",
|
||||||
"TF_NEED_TENSORRT": "1",
|
"TF_NEED_TENSORRT": "0",
|
||||||
"TF_TENSORRT_VERSION": "8.6",
|
"TF_TENSORRT_VERSION": "8.6",
|
||||||
},
|
},
|
||||||
)
|
)
|
||||||
|
|
@ -869,7 +869,7 @@ def initialize_rbe_configs():
|
||||||
"TF_ENABLE_XLA": "1",
|
"TF_ENABLE_XLA": "1",
|
||||||
"TF_NEED_CUDA": "1",
|
"TF_NEED_CUDA": "1",
|
||||||
"TF_SYSROOT": "/dt9",
|
"TF_SYSROOT": "/dt9",
|
||||||
"TF_NEED_TENSORRT": "1",
|
"TF_NEED_TENSORRT": "0",
|
||||||
"TF_TENSORRT_VERSION": "10.0",
|
"TF_TENSORRT_VERSION": "10.0",
|
||||||
},
|
},
|
||||||
)
|
)
|
||||||
|
|
|
||||||
|
|
@ -42,7 +42,7 @@ def _tensorflow_rbe_config(name, compiler, python_versions, os, rocm_version = N
|
||||||
"TF_CUDNN_VERSION": cudnn_version,
|
"TF_CUDNN_VERSION": cudnn_version,
|
||||||
"TF_CUDA_VERSION": cuda_version,
|
"TF_CUDA_VERSION": cuda_version,
|
||||||
"CUDNN_INSTALL_PATH": cudnn_install_path if cudnn_install_path != None else "/usr/lib/x86_64-linux-gnu",
|
"CUDNN_INSTALL_PATH": cudnn_install_path if cudnn_install_path != None else "/usr/lib/x86_64-linux-gnu",
|
||||||
"TF_NEED_TENSORRT": "1",
|
"TF_NEED_TENSORRT": "0",
|
||||||
"TF_TENSORRT_VERSION": tensorrt_version if tensorrt_version != None else "",
|
"TF_TENSORRT_VERSION": tensorrt_version if tensorrt_version != None else "",
|
||||||
"TENSORRT_INSTALL_PATH": tensorrt_install_path if tensorrt_install_path != None else "/usr/lib/x86_64-linux-gnu",
|
"TENSORRT_INSTALL_PATH": tensorrt_install_path if tensorrt_install_path != None else "/usr/lib/x86_64-linux-gnu",
|
||||||
"GCC_HOST_COMPILER_PATH": compiler if not compiler.endswith("clang") else "",
|
"GCC_HOST_COMPILER_PATH": compiler if not compiler.endswith("clang") else "",
|
||||||
|
|
|
||||||
6
third_party/xla/.bazelrc
vendored
6
third_party/xla/.bazelrc
vendored
|
|
@ -222,8 +222,6 @@ build:cuda --@local_config_cuda//:enable_cuda
|
||||||
|
|
||||||
# CUDA: This config refers to building CUDA op kernels with clang.
|
# CUDA: This config refers to building CUDA op kernels with clang.
|
||||||
build:cuda_clang --config=cuda
|
build:cuda_clang --config=cuda
|
||||||
# Enable TensorRT optimizations https://developer.nvidia.com/tensorrt
|
|
||||||
build:cuda_clang --config=tensorrt
|
|
||||||
build:cuda_clang --action_env=TF_CUDA_CLANG="1"
|
build:cuda_clang --action_env=TF_CUDA_CLANG="1"
|
||||||
build:cuda_clang --@local_config_cuda//:cuda_compiler=clang
|
build:cuda_clang --@local_config_cuda//:cuda_compiler=clang
|
||||||
# Select supported compute capabilities (supported graphics cards).
|
# Select supported compute capabilities (supported graphics cards).
|
||||||
|
|
@ -546,7 +544,6 @@ build:rbe_linux_cuda --config=rbe_linux_cpu
|
||||||
# For Remote build execution -- GPU configuration
|
# For Remote build execution -- GPU configuration
|
||||||
build:rbe_linux_cuda --repo_env=REMOTE_GPU_TESTING=1
|
build:rbe_linux_cuda --repo_env=REMOTE_GPU_TESTING=1
|
||||||
build:rbe_linux_cuda --repo_env=TF_CUDA_CONFIG_REPO="@sigbuild-r2.17-clang_config_cuda"
|
build:rbe_linux_cuda --repo_env=TF_CUDA_CONFIG_REPO="@sigbuild-r2.17-clang_config_cuda"
|
||||||
build:rbe_linux_cuda --repo_env=TF_TENSORRT_CONFIG_REPO="@sigbuild-r2.17-clang_config_tensorrt"
|
|
||||||
build:rbe_linux_cuda --repo_env=TF_NCCL_CONFIG_REPO="@sigbuild-r2.17-clang_config_nccl"
|
build:rbe_linux_cuda --repo_env=TF_NCCL_CONFIG_REPO="@sigbuild-r2.17-clang_config_nccl"
|
||||||
test:rbe_linux_cuda --test_env=LD_LIBRARY_PATH="/usr/local/cuda/lib64:/usr/local/cuda/extras/CUPTI/lib64"
|
test:rbe_linux_cuda --test_env=LD_LIBRARY_PATH="/usr/local/cuda/lib64:/usr/local/cuda/extras/CUPTI/lib64"
|
||||||
|
|
||||||
|
|
@ -678,9 +675,8 @@ build:unsupported_gpu_linux --config=unsupported_cpu_linux
|
||||||
build:unsupported_gpu_linux --action_env=TF_CUDA_VERSION="11"
|
build:unsupported_gpu_linux --action_env=TF_CUDA_VERSION="11"
|
||||||
build:unsupported_gpu_linux --action_env=TF_CUDNN_VERSION="8"
|
build:unsupported_gpu_linux --action_env=TF_CUDNN_VERSION="8"
|
||||||
build:unsupported_gpu_linux --repo_env=TF_CUDA_COMPUTE_CAPABILITIES="sm_35,sm_50,sm_60,sm_70,sm_75,compute_80"
|
build:unsupported_gpu_linux --repo_env=TF_CUDA_COMPUTE_CAPABILITIES="sm_35,sm_50,sm_60,sm_70,sm_75,compute_80"
|
||||||
build:unsupported_gpu_linux --config=tensorrt
|
|
||||||
build:unsupported_gpu_linux --action_env=CUDA_TOOLKIT_PATH="/usr/local/cuda-11.2"
|
build:unsupported_gpu_linux --action_env=CUDA_TOOLKIT_PATH="/usr/local/cuda-11.2"
|
||||||
build:unsupported_gpu_linux --action_env=LD_LIBRARY_PATH="/usr/local/cuda:/usr/local/cuda/lib64:/usr/local/cuda/extras/CUPTI/lib64:/usr/local/cuda-11.1/lib64:/usr/local/tensorrt/lib"
|
build:unsupported_gpu_linux --action_env=LD_LIBRARY_PATH="/usr/local/cuda:/usr/local/cuda/lib64:/usr/local/cuda/extras/CUPTI/lib64:/usr/local/cuda-11.1/lib64"
|
||||||
build:unsupported_gpu_linux --action_env=GCC_HOST_COMPILER_PATH="/dt9/usr/bin/gcc"
|
build:unsupported_gpu_linux --action_env=GCC_HOST_COMPILER_PATH="/dt9/usr/bin/gcc"
|
||||||
build:unsupported_gpu_linux --crosstool_top=@ubuntu20.04-gcc9_manylinux2014-cuda11.2-cudnn8.1-tensorrt7.2_config_cuda//crosstool:toolchain
|
build:unsupported_gpu_linux --crosstool_top=@ubuntu20.04-gcc9_manylinux2014-cuda11.2-cudnn8.1-tensorrt7.2_config_cuda//crosstool:toolchain
|
||||||
|
|
||||||
|
|
|
||||||
|
|
@ -333,7 +333,6 @@ class XLAConfigOptions:
|
||||||
# CUDA specific
|
# CUDA specific
|
||||||
cuda_compiler: CudaCompiler
|
cuda_compiler: CudaCompiler
|
||||||
using_nccl: bool
|
using_nccl: bool
|
||||||
using_tensorrt: bool
|
|
||||||
|
|
||||||
def to_bazelrc_lines(
|
def to_bazelrc_lines(
|
||||||
self,
|
self,
|
||||||
|
|
@ -401,7 +400,6 @@ class XLAConfigOptions:
|
||||||
f" TF_CUDA_COMPUTE_CAPABILITIES={','.join(dpav.cuda_compute_capabilities)}"
|
f" TF_CUDA_COMPUTE_CAPABILITIES={','.join(dpav.cuda_compute_capabilities)}"
|
||||||
)
|
)
|
||||||
rc.append(f"build --action_env TF_CUDNN_VERSION={dpav.cudnn_version}")
|
rc.append(f"build --action_env TF_CUDNN_VERSION={dpav.cudnn_version}")
|
||||||
rc.append(f"build --repo_env TF_NEED_TENSORRT={int(self.using_tensorrt)}")
|
|
||||||
if self.using_nccl:
|
if self.using_nccl:
|
||||||
rc.append(f"build --action_env TF_NCCL_VERSION={dpav.nccl_version}")
|
rc.append(f"build --action_env TF_NCCL_VERSION={dpav.nccl_version}")
|
||||||
else:
|
else:
|
||||||
|
|
@ -476,7 +474,6 @@ def _parse_args():
|
||||||
default="-Wno-sign-compare",
|
default="-Wno-sign-compare",
|
||||||
)
|
)
|
||||||
parser.add_argument("--nccl", action="store_true")
|
parser.add_argument("--nccl", action="store_true")
|
||||||
parser.add_argument("--tensorrt", action="store_true")
|
|
||||||
|
|
||||||
# Path and version overrides
|
# Path and version overrides
|
||||||
path_help = "Optional: will be found on PATH if possible."
|
path_help = "Optional: will be found on PATH if possible."
|
||||||
|
|
@ -518,7 +515,6 @@ def main():
|
||||||
python_bin_path=args.python_bin_path,
|
python_bin_path=args.python_bin_path,
|
||||||
compiler_options=args.compiler_options,
|
compiler_options=args.compiler_options,
|
||||||
using_nccl=args.nccl,
|
using_nccl=args.nccl,
|
||||||
using_tensorrt=args.tensorrt,
|
|
||||||
)
|
)
|
||||||
|
|
||||||
bazelrc_lines = config.to_bazelrc_lines(
|
bazelrc_lines = config.to_bazelrc_lines(
|
||||||
|
|
|
||||||
|
|
@ -85,7 +85,6 @@ class ConfigureTest(absltest.TestCase):
|
||||||
compiler_options=list(_COMPILER_OPTIONS),
|
compiler_options=list(_COMPILER_OPTIONS),
|
||||||
cuda_compiler=CudaCompiler.NVCC,
|
cuda_compiler=CudaCompiler.NVCC,
|
||||||
using_nccl=False,
|
using_nccl=False,
|
||||||
using_tensorrt=False,
|
|
||||||
)
|
)
|
||||||
|
|
||||||
bazelrc_lines = config.to_bazelrc_lines(
|
bazelrc_lines = config.to_bazelrc_lines(
|
||||||
|
|
@ -107,7 +106,6 @@ class ConfigureTest(absltest.TestCase):
|
||||||
compiler_options=list(_COMPILER_OPTIONS),
|
compiler_options=list(_COMPILER_OPTIONS),
|
||||||
cuda_compiler=CudaCompiler.NVCC,
|
cuda_compiler=CudaCompiler.NVCC,
|
||||||
using_nccl=False,
|
using_nccl=False,
|
||||||
using_tensorrt=False,
|
|
||||||
)
|
)
|
||||||
|
|
||||||
bazelrc_lines = config.to_bazelrc_lines(
|
bazelrc_lines = config.to_bazelrc_lines(
|
||||||
|
|
@ -128,7 +126,6 @@ class ConfigureTest(absltest.TestCase):
|
||||||
compiler_options=list(_COMPILER_OPTIONS),
|
compiler_options=list(_COMPILER_OPTIONS),
|
||||||
cuda_compiler=CudaCompiler.CLANG,
|
cuda_compiler=CudaCompiler.CLANG,
|
||||||
using_nccl=False,
|
using_nccl=False,
|
||||||
using_tensorrt=False,
|
|
||||||
)
|
)
|
||||||
|
|
||||||
bazelrc_lines = config.to_bazelrc_lines(
|
bazelrc_lines = config.to_bazelrc_lines(
|
||||||
|
|
@ -150,7 +147,6 @@ class ConfigureTest(absltest.TestCase):
|
||||||
compiler_options=list(_COMPILER_OPTIONS),
|
compiler_options=list(_COMPILER_OPTIONS),
|
||||||
cuda_compiler=CudaCompiler.NVCC,
|
cuda_compiler=CudaCompiler.NVCC,
|
||||||
using_nccl=False,
|
using_nccl=False,
|
||||||
using_tensorrt=False,
|
|
||||||
)
|
)
|
||||||
|
|
||||||
bazelrc_lines = config.to_bazelrc_lines(
|
bazelrc_lines = config.to_bazelrc_lines(
|
||||||
|
|
@ -172,7 +168,6 @@ class ConfigureTest(absltest.TestCase):
|
||||||
compiler_options=list(_COMPILER_OPTIONS),
|
compiler_options=list(_COMPILER_OPTIONS),
|
||||||
cuda_compiler=CudaCompiler.NVCC,
|
cuda_compiler=CudaCompiler.NVCC,
|
||||||
using_nccl=False,
|
using_nccl=False,
|
||||||
using_tensorrt=False,
|
|
||||||
)
|
)
|
||||||
|
|
||||||
bazelrc_lines = config.to_bazelrc_lines(
|
bazelrc_lines = config.to_bazelrc_lines(
|
||||||
|
|
|
||||||
|
|
@ -7,7 +7,6 @@ build --action_env CUDA_TOOLKIT_PATH=/usr/local/cuda-12.2
|
||||||
build --action_env TF_CUBLAS_VERSION=12.3
|
build --action_env TF_CUBLAS_VERSION=12.3
|
||||||
build --action_env TF_CUDA_COMPUTE_CAPABILITIES=7.5
|
build --action_env TF_CUDA_COMPUTE_CAPABILITIES=7.5
|
||||||
build --action_env TF_CUDNN_VERSION=8
|
build --action_env TF_CUDNN_VERSION=8
|
||||||
build --repo_env TF_NEED_TENSORRT=0
|
|
||||||
build --config nonccl
|
build --config nonccl
|
||||||
build --action_env LD_LIBRARY_PATH=/usr/local/nvidia/lib:/usr/local/nvidia/lib64
|
build --action_env LD_LIBRARY_PATH=/usr/local/nvidia/lib:/usr/local/nvidia/lib64
|
||||||
build --action_env PYTHON_BIN_PATH=/usr/bin/python3
|
build --action_env PYTHON_BIN_PATH=/usr/bin/python3
|
||||||
|
|
|
||||||
|
|
@ -7,7 +7,6 @@ build --action_env CUDA_TOOLKIT_PATH=/usr/local/cuda-12.2
|
||||||
build --action_env TF_CUBLAS_VERSION=12.3
|
build --action_env TF_CUBLAS_VERSION=12.3
|
||||||
build --action_env TF_CUDA_COMPUTE_CAPABILITIES=7.5
|
build --action_env TF_CUDA_COMPUTE_CAPABILITIES=7.5
|
||||||
build --action_env TF_CUDNN_VERSION=8
|
build --action_env TF_CUDNN_VERSION=8
|
||||||
build --repo_env TF_NEED_TENSORRT=0
|
|
||||||
build --config nonccl
|
build --config nonccl
|
||||||
build --action_env LD_LIBRARY_PATH=/usr/local/nvidia/lib:/usr/local/nvidia/lib64
|
build --action_env LD_LIBRARY_PATH=/usr/local/nvidia/lib:/usr/local/nvidia/lib64
|
||||||
build --action_env PYTHON_BIN_PATH=/usr/bin/python3
|
build --action_env PYTHON_BIN_PATH=/usr/bin/python3
|
||||||
|
|
|
||||||
|
|
@ -4,7 +4,6 @@ build --action_env CUDA_TOOLKIT_PATH=/usr/local/cuda-12.2
|
||||||
build --action_env TF_CUBLAS_VERSION=12.3
|
build --action_env TF_CUBLAS_VERSION=12.3
|
||||||
build --action_env TF_CUDA_COMPUTE_CAPABILITIES=7.5
|
build --action_env TF_CUDA_COMPUTE_CAPABILITIES=7.5
|
||||||
build --action_env TF_CUDNN_VERSION=8
|
build --action_env TF_CUDNN_VERSION=8
|
||||||
build --repo_env TF_NEED_TENSORRT=0
|
|
||||||
build --config nonccl
|
build --config nonccl
|
||||||
build --action_env LD_LIBRARY_PATH=/usr/local/nvidia/lib:/usr/local/nvidia/lib64
|
build --action_env LD_LIBRARY_PATH=/usr/local/nvidia/lib:/usr/local/nvidia/lib64
|
||||||
build --action_env PYTHON_BIN_PATH=/usr/bin/python3
|
build --action_env PYTHON_BIN_PATH=/usr/bin/python3
|
||||||
|
|
|
||||||
6
third_party/xla/third_party/tsl/.bazelrc
vendored
6
third_party/xla/third_party/tsl/.bazelrc
vendored
|
|
@ -222,8 +222,6 @@ build:cuda --@local_config_cuda//:enable_cuda
|
||||||
|
|
||||||
# CUDA: This config refers to building CUDA op kernels with clang.
|
# CUDA: This config refers to building CUDA op kernels with clang.
|
||||||
build:cuda_clang --config=cuda
|
build:cuda_clang --config=cuda
|
||||||
# Enable TensorRT optimizations https://developer.nvidia.com/tensorrt
|
|
||||||
build:cuda_clang --config=tensorrt
|
|
||||||
build:cuda_clang --action_env=TF_CUDA_CLANG="1"
|
build:cuda_clang --action_env=TF_CUDA_CLANG="1"
|
||||||
build:cuda_clang --@local_config_cuda//:cuda_compiler=clang
|
build:cuda_clang --@local_config_cuda//:cuda_compiler=clang
|
||||||
# Select supported compute capabilities (supported graphics cards).
|
# Select supported compute capabilities (supported graphics cards).
|
||||||
|
|
@ -546,7 +544,6 @@ build:rbe_linux_cuda --config=rbe_linux_cpu
|
||||||
# For Remote build execution -- GPU configuration
|
# For Remote build execution -- GPU configuration
|
||||||
build:rbe_linux_cuda --repo_env=REMOTE_GPU_TESTING=1
|
build:rbe_linux_cuda --repo_env=REMOTE_GPU_TESTING=1
|
||||||
build:rbe_linux_cuda --repo_env=TF_CUDA_CONFIG_REPO="@sigbuild-r2.17-clang_config_cuda"
|
build:rbe_linux_cuda --repo_env=TF_CUDA_CONFIG_REPO="@sigbuild-r2.17-clang_config_cuda"
|
||||||
build:rbe_linux_cuda --repo_env=TF_TENSORRT_CONFIG_REPO="@sigbuild-r2.17-clang_config_tensorrt"
|
|
||||||
build:rbe_linux_cuda --repo_env=TF_NCCL_CONFIG_REPO="@sigbuild-r2.17-clang_config_nccl"
|
build:rbe_linux_cuda --repo_env=TF_NCCL_CONFIG_REPO="@sigbuild-r2.17-clang_config_nccl"
|
||||||
test:rbe_linux_cuda --test_env=LD_LIBRARY_PATH="/usr/local/cuda/lib64:/usr/local/cuda/extras/CUPTI/lib64"
|
test:rbe_linux_cuda --test_env=LD_LIBRARY_PATH="/usr/local/cuda/lib64:/usr/local/cuda/extras/CUPTI/lib64"
|
||||||
|
|
||||||
|
|
@ -678,9 +675,8 @@ build:unsupported_gpu_linux --config=unsupported_cpu_linux
|
||||||
build:unsupported_gpu_linux --action_env=TF_CUDA_VERSION="11"
|
build:unsupported_gpu_linux --action_env=TF_CUDA_VERSION="11"
|
||||||
build:unsupported_gpu_linux --action_env=TF_CUDNN_VERSION="8"
|
build:unsupported_gpu_linux --action_env=TF_CUDNN_VERSION="8"
|
||||||
build:unsupported_gpu_linux --repo_env=TF_CUDA_COMPUTE_CAPABILITIES="sm_35,sm_50,sm_60,sm_70,sm_75,compute_80"
|
build:unsupported_gpu_linux --repo_env=TF_CUDA_COMPUTE_CAPABILITIES="sm_35,sm_50,sm_60,sm_70,sm_75,compute_80"
|
||||||
build:unsupported_gpu_linux --config=tensorrt
|
|
||||||
build:unsupported_gpu_linux --action_env=CUDA_TOOLKIT_PATH="/usr/local/cuda-11.2"
|
build:unsupported_gpu_linux --action_env=CUDA_TOOLKIT_PATH="/usr/local/cuda-11.2"
|
||||||
build:unsupported_gpu_linux --action_env=LD_LIBRARY_PATH="/usr/local/cuda:/usr/local/cuda/lib64:/usr/local/cuda/extras/CUPTI/lib64:/usr/local/cuda-11.1/lib64:/usr/local/tensorrt/lib"
|
build:unsupported_gpu_linux --action_env=LD_LIBRARY_PATH="/usr/local/cuda:/usr/local/cuda/lib64:/usr/local/cuda/extras/CUPTI/lib64:/usr/local/cuda-11.1/lib64"
|
||||||
build:unsupported_gpu_linux --action_env=GCC_HOST_COMPILER_PATH="/dt9/usr/bin/gcc"
|
build:unsupported_gpu_linux --action_env=GCC_HOST_COMPILER_PATH="/dt9/usr/bin/gcc"
|
||||||
build:unsupported_gpu_linux --crosstool_top=@ubuntu20.04-gcc9_manylinux2014-cuda11.2-cudnn8.1-tensorrt7.2_config_cuda//crosstool:toolchain
|
build:unsupported_gpu_linux --crosstool_top=@ubuntu20.04-gcc9_manylinux2014-cuda11.2-cudnn8.1-tensorrt7.2_config_cuda//crosstool:toolchain
|
||||||
|
|
||||||
|
|
|
||||||
|
|
@ -714,7 +714,7 @@ def initialize_rbe_configs():
|
||||||
"TF_CUDNN_VERSION": "8.9",
|
"TF_CUDNN_VERSION": "8.9",
|
||||||
"TF_ENABLE_XLA": "1",
|
"TF_ENABLE_XLA": "1",
|
||||||
"TF_NEED_CUDA": "1",
|
"TF_NEED_CUDA": "1",
|
||||||
"TF_NEED_TENSORRT": "1",
|
"TF_NEED_TENSORRT": "0",
|
||||||
"TF_SYSROOT": "/dt9",
|
"TF_SYSROOT": "/dt9",
|
||||||
"TF_TENSORRT_VERSION": "8.6",
|
"TF_TENSORRT_VERSION": "8.6",
|
||||||
},
|
},
|
||||||
|
|
@ -753,7 +753,7 @@ def initialize_rbe_configs():
|
||||||
"TF_CUDNN_VERSION": "8.9",
|
"TF_CUDNN_VERSION": "8.9",
|
||||||
"TF_ENABLE_XLA": "1",
|
"TF_ENABLE_XLA": "1",
|
||||||
"TF_NEED_CUDA": "1",
|
"TF_NEED_CUDA": "1",
|
||||||
"TF_NEED_TENSORRT": "1",
|
"TF_NEED_TENSORRT": "0",
|
||||||
"TF_SYSROOT": "/dt9",
|
"TF_SYSROOT": "/dt9",
|
||||||
"TF_TENSORRT_VERSION": "8.6",
|
"TF_TENSORRT_VERSION": "8.6",
|
||||||
},
|
},
|
||||||
|
|
@ -793,7 +793,7 @@ def initialize_rbe_configs():
|
||||||
"TF_ENABLE_XLA": "1",
|
"TF_ENABLE_XLA": "1",
|
||||||
"TF_NEED_CUDA": "1",
|
"TF_NEED_CUDA": "1",
|
||||||
"TF_SYSROOT": "/dt9",
|
"TF_SYSROOT": "/dt9",
|
||||||
"TF_NEED_TENSORRT": "1",
|
"TF_NEED_TENSORRT": "0",
|
||||||
"TF_TENSORRT_VERSION": "8.6",
|
"TF_TENSORRT_VERSION": "8.6",
|
||||||
},
|
},
|
||||||
)
|
)
|
||||||
|
|
@ -831,7 +831,7 @@ def initialize_rbe_configs():
|
||||||
"TF_ENABLE_XLA": "1",
|
"TF_ENABLE_XLA": "1",
|
||||||
"TF_NEED_CUDA": "1",
|
"TF_NEED_CUDA": "1",
|
||||||
"TF_SYSROOT": "/dt9",
|
"TF_SYSROOT": "/dt9",
|
||||||
"TF_NEED_TENSORRT": "1",
|
"TF_NEED_TENSORRT": "0",
|
||||||
"TF_TENSORRT_VERSION": "8.6",
|
"TF_TENSORRT_VERSION": "8.6",
|
||||||
},
|
},
|
||||||
)
|
)
|
||||||
|
|
@ -869,7 +869,7 @@ def initialize_rbe_configs():
|
||||||
"TF_ENABLE_XLA": "1",
|
"TF_ENABLE_XLA": "1",
|
||||||
"TF_NEED_CUDA": "1",
|
"TF_NEED_CUDA": "1",
|
||||||
"TF_SYSROOT": "/dt9",
|
"TF_SYSROOT": "/dt9",
|
||||||
"TF_NEED_TENSORRT": "1",
|
"TF_NEED_TENSORRT": "0",
|
||||||
"TF_TENSORRT_VERSION": "10.0",
|
"TF_TENSORRT_VERSION": "10.0",
|
||||||
},
|
},
|
||||||
)
|
)
|
||||||
|
|
|
||||||
|
|
@ -42,7 +42,7 @@ def _tensorflow_rbe_config(name, compiler, python_versions, os, rocm_version = N
|
||||||
"TF_CUDNN_VERSION": cudnn_version,
|
"TF_CUDNN_VERSION": cudnn_version,
|
||||||
"TF_CUDA_VERSION": cuda_version,
|
"TF_CUDA_VERSION": cuda_version,
|
||||||
"CUDNN_INSTALL_PATH": cudnn_install_path if cudnn_install_path != None else "/usr/lib/x86_64-linux-gnu",
|
"CUDNN_INSTALL_PATH": cudnn_install_path if cudnn_install_path != None else "/usr/lib/x86_64-linux-gnu",
|
||||||
"TF_NEED_TENSORRT": "1",
|
"TF_NEED_TENSORRT": "0",
|
||||||
"TF_TENSORRT_VERSION": tensorrt_version if tensorrt_version != None else "",
|
"TF_TENSORRT_VERSION": tensorrt_version if tensorrt_version != None else "",
|
||||||
"TENSORRT_INSTALL_PATH": tensorrt_install_path if tensorrt_install_path != None else "/usr/lib/x86_64-linux-gnu",
|
"TENSORRT_INSTALL_PATH": tensorrt_install_path if tensorrt_install_path != None else "/usr/lib/x86_64-linux-gnu",
|
||||||
"GCC_HOST_COMPILER_PATH": compiler if not compiler.endswith("clang") else "",
|
"GCC_HOST_COMPILER_PATH": compiler if not compiler.endswith("clang") else "",
|
||||||
|
|
|
||||||
|
|
@ -714,7 +714,7 @@ def initialize_rbe_configs():
|
||||||
"TF_CUDNN_VERSION": "8.9",
|
"TF_CUDNN_VERSION": "8.9",
|
||||||
"TF_ENABLE_XLA": "1",
|
"TF_ENABLE_XLA": "1",
|
||||||
"TF_NEED_CUDA": "1",
|
"TF_NEED_CUDA": "1",
|
||||||
"TF_NEED_TENSORRT": "1",
|
"TF_NEED_TENSORRT": "0",
|
||||||
"TF_SYSROOT": "/dt9",
|
"TF_SYSROOT": "/dt9",
|
||||||
"TF_TENSORRT_VERSION": "8.6",
|
"TF_TENSORRT_VERSION": "8.6",
|
||||||
},
|
},
|
||||||
|
|
@ -753,7 +753,7 @@ def initialize_rbe_configs():
|
||||||
"TF_CUDNN_VERSION": "8.9",
|
"TF_CUDNN_VERSION": "8.9",
|
||||||
"TF_ENABLE_XLA": "1",
|
"TF_ENABLE_XLA": "1",
|
||||||
"TF_NEED_CUDA": "1",
|
"TF_NEED_CUDA": "1",
|
||||||
"TF_NEED_TENSORRT": "1",
|
"TF_NEED_TENSORRT": "0",
|
||||||
"TF_SYSROOT": "/dt9",
|
"TF_SYSROOT": "/dt9",
|
||||||
"TF_TENSORRT_VERSION": "8.6",
|
"TF_TENSORRT_VERSION": "8.6",
|
||||||
},
|
},
|
||||||
|
|
@ -793,7 +793,7 @@ def initialize_rbe_configs():
|
||||||
"TF_ENABLE_XLA": "1",
|
"TF_ENABLE_XLA": "1",
|
||||||
"TF_NEED_CUDA": "1",
|
"TF_NEED_CUDA": "1",
|
||||||
"TF_SYSROOT": "/dt9",
|
"TF_SYSROOT": "/dt9",
|
||||||
"TF_NEED_TENSORRT": "1",
|
"TF_NEED_TENSORRT": "0",
|
||||||
"TF_TENSORRT_VERSION": "8.6",
|
"TF_TENSORRT_VERSION": "8.6",
|
||||||
},
|
},
|
||||||
)
|
)
|
||||||
|
|
@ -831,7 +831,7 @@ def initialize_rbe_configs():
|
||||||
"TF_ENABLE_XLA": "1",
|
"TF_ENABLE_XLA": "1",
|
||||||
"TF_NEED_CUDA": "1",
|
"TF_NEED_CUDA": "1",
|
||||||
"TF_SYSROOT": "/dt9",
|
"TF_SYSROOT": "/dt9",
|
||||||
"TF_NEED_TENSORRT": "1",
|
"TF_NEED_TENSORRT": "0",
|
||||||
"TF_TENSORRT_VERSION": "8.6",
|
"TF_TENSORRT_VERSION": "8.6",
|
||||||
},
|
},
|
||||||
)
|
)
|
||||||
|
|
@ -869,7 +869,7 @@ def initialize_rbe_configs():
|
||||||
"TF_ENABLE_XLA": "1",
|
"TF_ENABLE_XLA": "1",
|
||||||
"TF_NEED_CUDA": "1",
|
"TF_NEED_CUDA": "1",
|
||||||
"TF_SYSROOT": "/dt9",
|
"TF_SYSROOT": "/dt9",
|
||||||
"TF_NEED_TENSORRT": "1",
|
"TF_NEED_TENSORRT": "0",
|
||||||
"TF_TENSORRT_VERSION": "10.0",
|
"TF_TENSORRT_VERSION": "10.0",
|
||||||
},
|
},
|
||||||
)
|
)
|
||||||
|
|
|
||||||
|
|
@ -42,7 +42,7 @@ def _tensorflow_rbe_config(name, compiler, python_versions, os, rocm_version = N
|
||||||
"TF_CUDNN_VERSION": cudnn_version,
|
"TF_CUDNN_VERSION": cudnn_version,
|
||||||
"TF_CUDA_VERSION": cuda_version,
|
"TF_CUDA_VERSION": cuda_version,
|
||||||
"CUDNN_INSTALL_PATH": cudnn_install_path if cudnn_install_path != None else "/usr/lib/x86_64-linux-gnu",
|
"CUDNN_INSTALL_PATH": cudnn_install_path if cudnn_install_path != None else "/usr/lib/x86_64-linux-gnu",
|
||||||
"TF_NEED_TENSORRT": "1",
|
"TF_NEED_TENSORRT": "0",
|
||||||
"TF_TENSORRT_VERSION": tensorrt_version if tensorrt_version != None else "",
|
"TF_TENSORRT_VERSION": tensorrt_version if tensorrt_version != None else "",
|
||||||
"TENSORRT_INSTALL_PATH": tensorrt_install_path if tensorrt_install_path != None else "/usr/lib/x86_64-linux-gnu",
|
"TENSORRT_INSTALL_PATH": tensorrt_install_path if tensorrt_install_path != None else "/usr/lib/x86_64-linux-gnu",
|
||||||
"GCC_HOST_COMPILER_PATH": compiler if not compiler.endswith("clang") else "",
|
"GCC_HOST_COMPILER_PATH": compiler if not compiler.endswith("clang") else "",
|
||||||
|
|
|
||||||
Loading…
Reference in New Issue
Block a user