mirror of
https://github.com/zebrajr/tensorflow.git
synced 2025-12-06 00:19:58 +01:00
Enable CUDA forward-compatibility mode in all RBE jobs by default. Forward compatibility mode in hermetic CUDA allows the linker to use the user-mode driver from Bazel cache, so there is no need to install UMD in the RBE Docker image. UMD on RBE machines is rarely updated, thus RBE jobs need forward compatibility mode to enable the most recent CUDA features usage in the tests. The non-RBE job runners are updated more often, hence we can update the drivers on those machines and not rely on forward compatibility mode. PiperOrigin-RevId: 810595379
153 lines
3.7 KiB
Python
153 lines
3.7 KiB
Python
# buildifier: disable=load-on-top
|
|
workspace(name = "xla")
|
|
|
|
load("@bazel_tools//tools/build_defs/repo:http.bzl", "http_archive")
|
|
|
|
# Initialize toolchains for ML projects.
|
|
#
|
|
# A hermetic build system is designed to produce completely reproducible builds for C++.
|
|
# Details: https://github.com/google-ml-infra/rules_ml_toolchain
|
|
http_archive(
|
|
name = "rules_ml_toolchain",
|
|
sha256 = "77ad040f826af31ce3142e3b8bcf6c61972b4f95c84185676fa1af325fbf52c6",
|
|
strip_prefix = "rules_ml_toolchain-a912c87727405e2145b168e5b62a5d5ae7232cb2",
|
|
urls = [
|
|
"https://github.com/google-ml-infra/rules_ml_toolchain/archive/a912c87727405e2145b168e5b62a5d5ae7232cb2.tar.gz",
|
|
],
|
|
)
|
|
|
|
load(
|
|
"@rules_ml_toolchain//cc/deps:cc_toolchain_deps.bzl",
|
|
"cc_toolchain_deps",
|
|
)
|
|
|
|
cc_toolchain_deps()
|
|
|
|
register_toolchains("@rules_ml_toolchain//cc:linux_x86_64_linux_x86_64")
|
|
|
|
register_toolchains("@rules_ml_toolchain//cc:linux_x86_64_linux_x86_64_cuda")
|
|
|
|
register_toolchains("@rules_ml_toolchain//cc:linux_aarch64_linux_aarch64")
|
|
|
|
register_toolchains("@rules_ml_toolchain//cc:linux_aarch64_linux_aarch64_cuda")
|
|
|
|
# Initialize the XLA repository and all dependencies.
|
|
#
|
|
# The cascade of load() statements and xla_workspace?() calls works around the
|
|
# restriction that load() statements need to be at the top of .bzl files.
|
|
# E.g. we can not retrieve a new repository with http_archive and then load()
|
|
# a macro from that repository in the same file.
|
|
|
|
load(":workspace4.bzl", "xla_workspace4")
|
|
|
|
xla_workspace4()
|
|
|
|
load(":workspace3.bzl", "xla_workspace3")
|
|
|
|
xla_workspace3()
|
|
|
|
# Initialize hermetic Python
|
|
load("//third_party/py:python_init_rules.bzl", "python_init_rules")
|
|
|
|
python_init_rules()
|
|
|
|
load("//third_party/py:python_init_repositories.bzl", "python_init_repositories")
|
|
|
|
python_init_repositories(
|
|
requirements = {
|
|
"3.11": "//:requirements_lock_3_11.txt",
|
|
"3.12": "//:requirements_lock_3_12.txt",
|
|
},
|
|
)
|
|
|
|
load("//third_party/py:python_init_toolchains.bzl", "python_init_toolchains")
|
|
|
|
python_init_toolchains()
|
|
|
|
load("//third_party/py:python_init_pip.bzl", "python_init_pip")
|
|
|
|
python_init_pip()
|
|
|
|
load("@pypi//:requirements.bzl", "install_deps")
|
|
|
|
install_deps()
|
|
|
|
load(":workspace2.bzl", "xla_workspace2")
|
|
|
|
xla_workspace2()
|
|
|
|
load(":workspace1.bzl", "xla_workspace1")
|
|
|
|
xla_workspace1()
|
|
|
|
load(":workspace0.bzl", "xla_workspace0")
|
|
|
|
xla_workspace0()
|
|
|
|
load(
|
|
"@rules_ml_toolchain//gpu/cuda:cuda_json_init_repository.bzl",
|
|
"cuda_json_init_repository",
|
|
)
|
|
|
|
cuda_json_init_repository()
|
|
|
|
load(
|
|
"@cuda_redist_json//:distributions.bzl",
|
|
"CUDA_REDISTRIBUTIONS",
|
|
"CUDNN_REDISTRIBUTIONS",
|
|
)
|
|
load(
|
|
"@rules_ml_toolchain//gpu/cuda:cuda_redist_init_repositories.bzl",
|
|
"cuda_redist_init_repositories",
|
|
"cudnn_redist_init_repository",
|
|
)
|
|
|
|
cuda_redist_init_repositories(
|
|
cuda_redistributions = CUDA_REDISTRIBUTIONS,
|
|
)
|
|
|
|
cudnn_redist_init_repository(
|
|
cudnn_redistributions = CUDNN_REDISTRIBUTIONS,
|
|
)
|
|
|
|
load(
|
|
"@rules_ml_toolchain//gpu/cuda:cuda_configure.bzl",
|
|
"cuda_configure",
|
|
)
|
|
|
|
cuda_configure(name = "local_config_cuda")
|
|
|
|
load(
|
|
"@rules_ml_toolchain//gpu/nccl:nccl_redist_init_repository.bzl",
|
|
"nccl_redist_init_repository",
|
|
)
|
|
|
|
nccl_redist_init_repository()
|
|
|
|
load(
|
|
"@rules_ml_toolchain//gpu/nccl:nccl_configure.bzl",
|
|
"nccl_configure",
|
|
)
|
|
|
|
nccl_configure(name = "local_config_nccl")
|
|
|
|
load(
|
|
"@rules_ml_toolchain//gpu/nvshmem:nvshmem_json_init_repository.bzl",
|
|
"nvshmem_json_init_repository",
|
|
)
|
|
|
|
nvshmem_json_init_repository()
|
|
|
|
load(
|
|
"@nvshmem_redist_json//:distributions.bzl",
|
|
"NVSHMEM_REDISTRIBUTIONS",
|
|
)
|
|
load(
|
|
"@rules_ml_toolchain//gpu/nvshmem:nvshmem_redist_init_repository.bzl",
|
|
"nvshmem_redist_init_repository",
|
|
)
|
|
|
|
nvshmem_redist_init_repository(
|
|
nvshmem_redistributions = NVSHMEM_REDISTRIBUTIONS,
|
|
)
|