Remove local_config_nvshmem repository from XLA and Tensorflow WORKSPACE files.

Upgrading manylinux compliancy tag in [JAX PR](https://github.com/jax-ml/jax/pull/29672) enabled building targets with linked `nvshmem` libraries.

PiperOrigin-RevId: 786533277
This commit is contained in:
A. Unique TensorFlower 2025-07-23 20:26:10 -07:00 committed by TensorFlower Gardener
parent 3c1a8779ff
commit f4ee7a188e
21 changed files with 49 additions and 44 deletions

View File

@ -303,7 +303,6 @@ build:cuda --@local_config_cuda//cuda:include_cuda_libs=true
# This configuration is used for building the wheels.
build:cuda_wheel --@local_config_cuda//cuda:include_cuda_libs=false
build:cuda_wheel --@local_config_nvshmem//:include_nvshmem_libs=false
# CUDA: This config refers to building CUDA op kernels with clang.
build:cuda_clang --config=cuda

View File

@ -162,10 +162,3 @@ load(
nvshmem_redist_init_repository(
nvshmem_redistributions = NVSHMEM_REDISTRIBUTIONS,
)
load(
"@rules_ml_toolchain//third_party/nvshmem/hermetic:nvshmem_configure.bzl",
"nvshmem_configure",
)
nvshmem_configure(name = "local_config_nvshmem")

View File

@ -48,6 +48,7 @@ nvidia-cusolver-cu12 == 11.6.3.83
nvidia-cusparse-cu12 == 12.5.1.3
nvidia-nccl-cu12 == 2.26.5
nvidia-nvjitlink-cu12 == 12.5.82
nvidia-nvshmem-cu12>=3.2.5
# The dependencies below are needed for TF wheel testing.
tensorflow-io-gcs-filesystem==0.37.1 ; python_version <= "3.12"
libclang >= 13.0.0

View File

@ -465,6 +465,10 @@ nvidia-nvjitlink-cu12==12.5.82 \
# nvidia-cufft-cu12
# nvidia-cusolver-cu12
# nvidia-cusparse-cu12
nvidia-nvshmem-cu12==3.3.9 \
--hash=sha256:2de43cbfe559e16b8e3cb777b95f1fe2ddd5c2cfd79414b09cf9cf099feba2ba \
--hash=sha256:95ba1e98189c056eb5372bd355ab714b3741a03e6de1e32f167f5240fd967c5f
# via -r ci/official/requirements_updater/requirements.in
opt-einsum==3.3.0 \
--hash=sha256:2455e59e3947d3c275477df7f5205b30635e266fe6dc300e3d9f9646bfcea147 \
--hash=sha256:59f6475f77bbc37dcf7cd748519c0ec60722e91e63ca114e68821c0c54a46549

View File

@ -465,6 +465,11 @@ nvidia-nvjitlink-cu12==12.5.82 \
# nvidia-cufft-cu12
# nvidia-cusolver-cu12
# nvidia-cusparse-cu12
nvidia-nvshmem-cu12==3.2.5 \
--hash=sha256:2f5798d65f1a08f9878aae17cf4d3dcbfe884d1f12cf170556cd40f2be90ca96 \
--hash=sha256:e076957d5cc72e51061a04f2d46f55df477be53e8a55d0d621be08f7aefe1d00
# via
# -r ci/official/requirements_updater/requirements.in
opt-einsum==3.3.0 \
--hash=sha256:2455e59e3947d3c275477df7f5205b30635e266fe6dc300e3d9f9646bfcea147 \
--hash=sha256:59f6475f77bbc37dcf7cd748519c0ec60722e91e63ca114e68821c0c54a46549

View File

@ -465,6 +465,11 @@ nvidia-nvjitlink-cu12==12.5.82 \
# nvidia-cufft-cu12
# nvidia-cusolver-cu12
# nvidia-cusparse-cu12
nvidia-nvshmem-cu12==3.2.5 \
--hash=sha256:2f5798d65f1a08f9878aae17cf4d3dcbfe884d1f12cf170556cd40f2be90ca96 \
--hash=sha256:e076957d5cc72e51061a04f2d46f55df477be53e8a55d0d621be08f7aefe1d00
# via
# -r ci/official/requirements_updater/requirements.in
opt-einsum==3.3.0 \
--hash=sha256:2455e59e3947d3c275477df7f5205b30635e266fe6dc300e3d9f9646bfcea147 \
--hash=sha256:59f6475f77bbc37dcf7cd748519c0ec60722e91e63ca114e68821c0c54a46549

View File

@ -469,6 +469,10 @@ nvidia-nvjitlink-cu12==12.5.82 \
# nvidia-cufft-cu12
# nvidia-cusolver-cu12
# nvidia-cusparse-cu12
nvidia-nvshmem-cu12==3.3.9 \
--hash=sha256:2de43cbfe559e16b8e3cb777b95f1fe2ddd5c2cfd79414b09cf9cf099feba2ba \
--hash=sha256:95ba1e98189c056eb5372bd355ab714b3741a03e6de1e32f167f5240fd967c5f
# via -r ci/official/requirements_updater/requirements.in
opt-einsum==3.3.0 \
--hash=sha256:2455e59e3947d3c275477df7f5205b30635e266fe6dc300e3d9f9646bfcea147 \
--hash=sha256:59f6475f77bbc37dcf7cd748519c0ec60722e91e63ca114e68821c0c54a46549

View File

@ -48,6 +48,7 @@ nvidia-cusolver-cu12 == 11.6.3.83
nvidia-cusparse-cu12 == 12.5.1.3
nvidia-nccl-cu12 == 2.26.5
nvidia-nvjitlink-cu12 == 12.5.82
nvidia-nvshmem-cu12>=3.2.5
# The dependencies below are needed for TF wheel testing.
tensorflow-io-gcs-filesystem==0.37.1 ; python_version <= "3.12"
libclang >= 13.0.0

View File

@ -484,6 +484,10 @@ nvidia-nvjitlink-cu12==12.5.82 \
# nvidia-cufft-cu12
# nvidia-cusolver-cu12
# nvidia-cusparse-cu12
nvidia-nvshmem-cu12==3.3.9 \
--hash=sha256:2de43cbfe559e16b8e3cb777b95f1fe2ddd5c2cfd79414b09cf9cf099feba2ba \
--hash=sha256:95ba1e98189c056eb5372bd355ab714b3741a03e6de1e32f167f5240fd967c5f
# via -r ci/official/requirements_updater/requirements.in
opt-einsum==3.3.0 \
--hash=sha256:2455e59e3947d3c275477df7f5205b30635e266fe6dc300e3d9f9646bfcea147 \
--hash=sha256:59f6475f77bbc37dcf7cd748519c0ec60722e91e63ca114e68821c0c54a46549

View File

@ -484,6 +484,11 @@ nvidia-nvjitlink-cu12==12.5.82 \
# nvidia-cufft-cu12
# nvidia-cusolver-cu12
# nvidia-cusparse-cu12
nvidia-nvshmem-cu12==3.2.5 \
--hash=sha256:2f5798d65f1a08f9878aae17cf4d3dcbfe884d1f12cf170556cd40f2be90ca96 \
--hash=sha256:e076957d5cc72e51061a04f2d46f55df477be53e8a55d0d621be08f7aefe1d00
# via
# -r ci/official/requirements_updater/requirements.in
opt-einsum==3.3.0 \
--hash=sha256:2455e59e3947d3c275477df7f5205b30635e266fe6dc300e3d9f9646bfcea147 \
--hash=sha256:59f6475f77bbc37dcf7cd748519c0ec60722e91e63ca114e68821c0c54a46549

View File

@ -484,6 +484,11 @@ nvidia-nvjitlink-cu12==12.5.82 \
# nvidia-cufft-cu12
# nvidia-cusolver-cu12
# nvidia-cusparse-cu12
nvidia-nvshmem-cu12==3.2.5 \
--hash=sha256:2f5798d65f1a08f9878aae17cf4d3dcbfe884d1f12cf170556cd40f2be90ca96 \
--hash=sha256:e076957d5cc72e51061a04f2d46f55df477be53e8a55d0d621be08f7aefe1d00
# via
# -r ci/official/requirements_updater/requirements.in
opt-einsum==3.3.0 \
--hash=sha256:2455e59e3947d3c275477df7f5205b30635e266fe6dc300e3d9f9646bfcea147 \
--hash=sha256:59f6475f77bbc37dcf7cd748519c0ec60722e91e63ca114e68821c0c54a46549

View File

@ -484,6 +484,11 @@ nvidia-nvjitlink-cu12==12.5.82 \
# nvidia-cufft-cu12
# nvidia-cusolver-cu12
# nvidia-cusparse-cu12
nvidia-nvshmem-cu12==3.2.5 \
--hash=sha256:2f5798d65f1a08f9878aae17cf4d3dcbfe884d1f12cf170556cd40f2be90ca96 \
--hash=sha256:e076957d5cc72e51061a04f2d46f55df477be53e8a55d0d621be08f7aefe1d00
# via
# -r ci/official/requirements_updater/requirements.in
opt-einsum==3.3.0 \
--hash=sha256:2455e59e3947d3c275477df7f5205b30635e266fe6dc300e3d9f9646bfcea147 \
--hash=sha256:59f6475f77bbc37dcf7cd748519c0ec60722e91e63ca114e68821c0c54a46549

View File

@ -478,6 +478,10 @@ nvidia-nvjitlink-cu12==12.5.82 \
# nvidia-cufft-cu12
# nvidia-cusolver-cu12
# nvidia-cusparse-cu12
nvidia-nvshmem-cu12==3.3.9 \
--hash=sha256:2de43cbfe559e16b8e3cb777b95f1fe2ddd5c2cfd79414b09cf9cf099feba2ba \
--hash=sha256:95ba1e98189c056eb5372bd355ab714b3741a03e6de1e32f167f5240fd967c5f
# via -r ci/official/requirements_updater/requirements.in
opt-einsum==3.3.0 \
--hash=sha256:2455e59e3947d3c275477df7f5205b30635e266fe6dc300e3d9f9646bfcea147 \
--hash=sha256:59f6475f77bbc37dcf7cd748519c0ec60722e91e63ca114e68821c0c54a46549

View File

@ -1517,6 +1517,7 @@ cc_library(
if_true = (
if_cuda_is_configured([
"@local_xla//xla/tsl/cuda:cupti",
"@local_xla//xla/tsl/cuda:nvshmem_stub",
]) + if_nccl([
"@local_config_nccl//:nccl",
])

View File

@ -444,6 +444,7 @@ py_import(
"@pypi_nvidia_cusparse_cu12//:pkg",
"@pypi_nvidia_nccl_cu12//:pkg",
"@pypi_nvidia_nvjitlink_cu12//:pkg",
"@pypi_nvidia_nvshmem_cu12//:pkg",
]),
deps = [
"@pypi_absl_py//:pkg",

View File

@ -77,18 +77,11 @@ def _is_dest_file(basename, dest_files_suffixes):
def _tf_wheel_impl(ctx):
include_cuda_libs = ctx.attr.include_cuda_libs[BuildSettingInfo].value
override_include_cuda_libs = ctx.attr.override_include_cuda_libs[BuildSettingInfo].value
include_nvshmem_libs = ctx.attr.include_nvshmem_libs[BuildSettingInfo].value
override_include_nvshmem_libs = ctx.attr.override_include_nvshmem_libs[BuildSettingInfo].value
if include_cuda_libs and not override_include_cuda_libs:
fail("TF wheel shouldn't be built with CUDA dependencies." +
" Please provide `--config=cuda_wheel` for bazel build command." +
" If you absolutely need to add CUDA dependencies, provide" +
" `--@local_config_cuda//cuda:override_include_cuda_libs=true`.")
if include_nvshmem_libs and not override_include_nvshmem_libs:
fail("TF wheel shouldn't be built directly against the NVSHMEM libraries." +
" Please provide `--config=cuda_wheel` for bazel build command." +
" If you absolutely need to build links directly against the NVSHMEM libraries," +
" `provide --@local_config_nvshmem//:override_include_nvshmem_libs=true`.")
executable = ctx.executable.wheel_binary
full_wheel_version = (TF_VERSION + TF_WHEEL_VERSION_SUFFIX)
@ -154,8 +147,6 @@ tf_wheel = rule(
),
"include_cuda_libs": attr.label(default = Label("@local_config_cuda//cuda:include_cuda_libs")),
"override_include_cuda_libs": attr.label(default = Label("@local_config_cuda//cuda:override_include_cuda_libs")),
"include_nvshmem_libs": attr.label(default = Label("@local_config_nvshmem//:include_nvshmem_libs")),
"override_include_nvshmem_libs": attr.label(default = Label("@local_config_nvshmem//:override_include_nvshmem_libs")),
"platform_tag": attr.string(mandatory = True),
"platform_name": attr.string(mandatory = True),
},

View File

@ -140,10 +140,10 @@ def workspace():
# Details: https://github.com/google-ml-infra/rules_ml_toolchain
http_archive(
name = "rules_ml_toolchain",
sha256 = "562e0517f4e833afe0de7bb8da49f9adafcbca30a8259f118a65b4adf533b51f",
strip_prefix = "rules_ml_toolchain-4995c0be587c6e173fe8cf8dc614f92011f7913d",
sha256 = "350dd1948be2f2084814599faff76df590f83c3095f9a96fc7fa7396b190a1d3",
strip_prefix = "rules_ml_toolchain-626d94d3817af117d39b95fe57fa59115f80c268",
urls = [
"https://github.com/google-ml-infra/rules_ml_toolchain/archive/4995c0be587c6e173fe8cf8dc614f92011f7913d.tar.gz",
"https://github.com/google-ml-infra/rules_ml_toolchain/archive/626d94d3817af117d39b95fe57fa59115f80c268.tar.gz",
],
)

View File

@ -130,10 +130,3 @@ load(
nvshmem_redist_init_repository(
nvshmem_redistributions = NVSHMEM_REDISTRIBUTIONS,
)
load(
"@rules_ml_toolchain//third_party/nvshmem/hermetic:nvshmem_configure.bzl",
"nvshmem_configure",
)
nvshmem_configure(name = "local_config_nvshmem")

View File

@ -200,7 +200,6 @@ build:cuda --config=clang_local
# This configuration is used for building the wheels.
build:cuda_wheel --@local_config_cuda//cuda:include_cuda_libs=false
build:cuda_wheel --@local_config_nvshmem//:include_nvshmem_libs=false
# CUDA: This config refers to building CUDA op kernels with clang.
build:cuda_clang --config=cuda

View File

@ -13,7 +13,6 @@ load(
load(
"//xla/tsl:tsl.default.bzl",
"if_cuda_libs",
"if_nvshmem_libs",
)
load("//xla/tsl/cuda:stub.bzl", "cuda_stub")
@ -386,6 +385,6 @@ cc_library(
alias(
name = "nvshmem_stub",
actual = if_nvshmem_libs("@nvidia_nvshmem//:nvshmem", ":nvshmem"),
actual = if_cuda_libs("@nvidia_nvshmem//:nvshmem", ":nvshmem"),
visibility = ["//visibility:public"],
)

View File

@ -44,17 +44,3 @@ def if_cuda_libs(if_true, if_false = []): # buildifier: disable=unused-variable
"@local_config_cuda//cuda:cuda_tools_and_libs": if_true,
"//conditions:default": if_false,
})
def if_nvshmem_tools(if_true, if_false = []): # buildifier: disable=unused-variable
"""Shorthand for select()'ing on whether we're building with hermetic NVSHMEM tools."""
return select({
"@local_config_nvshmem//:nvshmem_tools": if_true,
"//conditions:default": if_false,
})
def if_nvshmem_libs(if_true, if_false = []): # buildifier: disable=unused-variable
"""Shorthand for select()'ing on whether we need to include hermetic NVSHMEM libraries."""
return select({
"@local_config_nvshmem//:nvshmem_tools_and_libs": if_true,
"//conditions:default": if_false,
})