Remove linux_arm64 container as this has been moved to ml_build_arm64

PiperOrigin-RevId: 766897560
This commit is contained in:
Quoc Truong 2025-06-03 18:01:01 -07:00 committed by TensorFlower Gardener
parent d30b737e9c
commit 9fc62761fd
33 changed files with 1 additions and 3139 deletions

View File

@ -31,14 +31,6 @@ updates:
- dependency-name: "*"
update-types: ["version-update:semver-major", "version-update:semver-minor"]
- package-ecosystem: docker
directory: /ci/official/containers/linux_arm64
schedule:
interval: monthly
ignore:
- dependency-name: "*"
update-types: ["version-update:semver-major", "version-update:semver-minor"]
- package-ecosystem: docker
directory: /tensorflow/tools/gcs_test
schedule:

View File

@ -1 +0,0 @@
.*.swp

View File

@ -1,91 +0,0 @@
################################################################################
FROM ubuntu:20.04@sha256:8feb4d8ca5354def3d8fce243717141ce31e2c428701f6682bd2fafe15388214 as builder
################################################################################
# Install devtoolset build dependencies
COPY setup.packages.sh setup.packages.sh
COPY builder.packages.txt builder.packages.txt
RUN /setup.packages.sh /builder.packages.txt
RUN update-ca-certificates
# Install devtoolset-9 in /dt10 with glibc 2.17 and libstdc++ 4.8, for building
# manylinux2014-compatible packages.
COPY builder.devtoolset/fixlinks_aarch64.sh /fixlinks.sh
COPY builder.devtoolset/rpm-patch.sh /rpm-patch.sh
COPY builder.devtoolset/build_devtoolset.sh /build_devtoolset.sh
COPY builder.devtoolset/gcc9-fixups.patch /gcc9-fixups.patch
COPY builder.devtoolset/stringop_trunc.patch /stringop_trunc.patch
RUN /build_devtoolset.sh devtoolset-10 /dt10
# Build later version of patchelf that is not so buggy
COPY builder.patchelf/build_patchelf.sh /build_patchelf.sh
COPY apt.conf /etc/apt/
RUN /build_patchelf.sh
################################################################################
FROM nvidia/cuda:12.3.2-devel-ubuntu20.04@sha256:cf1404fc25ae571d26e2185d37bfa3258124ab24eb96b6ac930ac71908970d0b as devel
################################################################################
COPY --from=builder /dt10 /dt10
COPY --from=builder /patchelf/patchelf_0.14.3-1_arm64.deb /patchelf/patchelf_0.14.3-1_arm64.deb
# Install devtoolset devel dependencies
COPY setup.sources.sh /setup.sources.sh
COPY setup.packages.sh /setup.packages.sh
COPY devel.packages.txt /devel.packages.txt
COPY cuda.packages.txt /cuda.packages.txt
RUN /setup.sources.sh && /setup.packages.sh /devel.packages.txt
# Install various tools.
# - bats: bash unit testing framework
# NOTE: v1.6.0 seems to have a bug that made "git" in setup_file break
# - bazelisk: always use the correct bazel version
# - buildifier: clean bazel build deps
# - buildozer: clean bazel build deps
RUN git clone --branch v1.7.0 https://github.com/bats-core/bats-core.git && bats-core/install.sh /usr/local && rm -rf bats-core
RUN wget --retry-connrefused --waitretry=1 --read-timeout=20 --timeout=15 --tries=5 https://github.com/bazelbuild/bazelisk/releases/download/v1.12.0/bazelisk-linux-arm64 -O /usr/local/bin/bazel && chmod +x /usr/local/bin/bazel
RUN wget --retry-connrefused --waitretry=1 --read-timeout=20 --timeout=15 --tries=5 https://github.com/bazelbuild/buildtools/releases/download/4.2.5/buildifier-linux-arm64 -O /usr/local/bin/buildifier && chmod +x /usr/local/bin/buildifier
RUN wget --retry-connrefused --waitretry=1 --read-timeout=20 --timeout=15 --tries=5 https://github.com/bazelbuild/buildtools/releases/download/4.2.5/buildozer-linux-arm64 -O /usr/local/bin/buildozer && chmod +x /usr/local/bin/buildozer
RUN groupadd -g 1001 buildslave && useradd -m -u 1001 -g buildslave buildslave
RUN mkdir -p /tf/venv
RUN chown -R buildslave:buildslave /tf
RUN dpkg -i /patchelf/patchelf_0.14.3-1_arm64.deb
# All lines past this point are reset when $CACHEBUSTER is set. We need this
# for Python specifically because we install some nightly packages which are
# likely to change daily.
ARG CACHEBUSTER=0
RUN echo $CACHEBUSTER
# Setup build and environment
COPY devel.usertools /usertools
COPY devel.bashrc /root/.bashrc
COPY ld.so.conf /dt10/etc/
# Make sure clang is on the path
RUN ln -s /usr/lib/llvm-18/bin/clang /usr/bin/clang
# Setup JAX Python environment.
FROM devel as jax
RUN /setup.packages.sh /cuda.packages.txt
COPY jax.requirements.txt /devel.requirements.txt
COPY setup.python.sh /setup.python.sh
RUN /setup.python.sh python3.9 devel.requirements.txt
RUN /setup.python.sh python3.10 devel.requirements.txt
RUN /setup.python.sh python3.11 devel.requirements.txt
RUN /setup.python.sh python3.12 devel.requirements.txt
RUN /setup.python.sh python3.13 devel.requirements.txt
# python3.13-nogil is a free-threaded build of python3.13
RUN /setup.python.sh python3.13-nogil devel.requirements.txt
FROM devel as tf
# Setup TF Python environment.
COPY devel.requirements.txt /devel.requirements.txt
COPY setup.python.sh /setup.python.sh
RUN /setup.python.sh python3.9 devel.requirements.txt
RUN /setup.python.sh python3.10 devel.requirements.txt
RUN /setup.python.sh python3.11 devel.requirements.txt
RUN /setup.python.sh python3.12 devel.requirements.txt
RUN /setup.python.sh python3.13 devel.requirements.txt
# "python3" commands by default run under 3.10
RUN update-alternatives --install /usr/bin/python3 python3 /usr/bin/python3.10 1

View File

@ -1,15 +0,0 @@
/* Copyright 2023 The TensorFlow Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
==============================================================================*/
APT::Default-Release "focal";

View File

@ -1,74 +0,0 @@
#!/bin/bash
# Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
# Builds the following Docker images for Linux ARM64. See the accompanying
# Dockerfile for more details:
# - us-central1-docker.pkg.dev/tensorflow-sigs/build-arm64:jax-latest-multi-python
# - us-central1-docker.pkg.dev/tensorflow-sigs/build-arm64:tf-latest-multi-python
set -exo pipefail
function is_continuous_or_release() {
[[ "$KOKORO_JOB_TYPE" == "CONTINUOUS_INTEGRATION" ]] || [[ "$KOKORO_JOB_TYPE" == "RELEASE" ]]
}
# Move into the directory of the script
cd "$(dirname "$0")"
if is_continuous_or_release || [[ -z "$KOKORO_BUILD_ID" ]]; then
# A continuous job is the only one to publish to latest
TAG="latest-multi-python"
else
# If it is a change, grab a good tag for iterative builds
if [[ -z "${KOKORO_GITHUB_PULL_REQUEST_NUMBER}" ]]; then
TAG=$(head -n 1 "$KOKORO_PIPER_DIR/presubmit_request.txt" | cut -d" " -f2)
else
TAG="pr-${KOKORO_GITHUB_PULL_REQUEST_NUMBER}"
fi
fi
AR_IMAGE_PATH="us-central1-docker.pkg.dev/tensorflow-sigs/tensorflow/build-arm64"
# Build for both JAX and TF usage. We do these in one place because they share
# almost all of the same cache layers
export DOCKER_BUILDKIT=1
for target in jax tf; do
AR_IMAGE="$AR_IMAGE_PATH:$target-$TAG"
docker pull "$AR_IMAGE" || true
# Due to some flakiness of resources pulled in the build, allow the docker
# command to reattempt build a few times in the case of failure (b/302558736)
set +e
for i in $(seq 1 5)
do
docker build \
--build-arg REQUIREMENTS_FILE=jax.requirements.txt \
--target=$target \
--cache-from "$AR_IMAGE" \
-t "$AR_IMAGE" . && break
done
final=$?
if [ $final -ne 0 ]; then
exit $final
fi
set -e
INFRA_PUBLIC_TAG=infrastructure-public-image-$(docker images "$AR_IMAGE" --quiet)
AR_IMAGE_INFRA_PUBLIC="$AR_IMAGE_PATH:$INFRA_PUBLIC_TAG"
docker image tag "$AR_IMAGE" "$AR_IMAGE_INFRA_PUBLIC"
gcloud auth configure-docker us-central1-docker.pkg.dev
docker push "$AR_IMAGE_PATH" --all-tags
done

View File

@ -1,157 +0,0 @@
#!/bin/bash -eu
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
#
# Builds a devtoolset cross-compiler targeting manylinux2014 (glibc 2.17 / libstdc++ 4.8).
VERSION="$1"
TARGET="$2"
case "${VERSION}" in
devtoolset-9)
LIBSTDCXX_VERSION="6.0.28"
LIBSTDCXX_ABI="new"
;;
devtoolset-10)
LIBSTDCXX_VERSION="6.0.28"
LIBSTDCXX_ABI="new"
;;
*)
echo "Usage: $0 {devtoolset-9|devtoolset-10} <target-directory> <arch>"
echo "Use 'devtoolset-9' to build a manylinux2014 compatible toolchain"
exit 1
;;
esac
mkdir -p "${TARGET}"
mkdir -p ${TARGET}/usr/include
# Put the current kernel headers from ubuntu in place.
ln -s "/usr/include/linux" "${TARGET}/usr/include/linux"
ln -s "/usr/include/asm-generic" "${TARGET}/usr/include/asm-generic"
ln -s "/usr/include/aarch64-linux-gnu/asm" "${TARGET}/usr/include/asm"
# Download glibc's shared and development libraries based on the value of the
# `VERSION` parameter.
# Note: 'Templatizing' this and the other conditional branches would require
# defining several variables (version, os, path) making it difficult to maintain
# and extend for future modifications.
mkdir -p glibc-src
mkdir -p glibc-build
cd glibc-src
wget --retry-connrefused --waitretry=1 --read-timeout=20 --timeout=15 --tries=5 "https://vault.centos.org/centos/7/os/Source/SPackages/glibc-2.17-317.el7.src.rpm"
rpm2cpio "glibc-2.17-317.el7.src.rpm" |cpio -idmv
tar -xvzf "glibc-2.17-c758a686.tar.gz" --strip 1
tar -xvzf "glibc-2.17-c758a686-releng.tar.gz" --strip 1
sed -i '/patch0060/d' glibc.spec
/rpm-patch.sh "glibc.spec"
rm -f "glibc-2.17-317.el7.src.rpm" "glibc-2.17-c758a686.tar.gz" "glibc-2.17-c758a686-releng.tar.gz"
patch -p1 < /gcc9-fixups.patch
patch -p1 < /stringop_trunc.patch
cd ../glibc-build
../glibc-src/configure --prefix=/usr --disable-werror --enable-obsolete-rpc --disable-profile
make -j$(nproc)
make install DESTDIR=${TARGET}
cd ..
# Symlinks in the binary distribution are set up for installation in /usr, we
# need to fix up all the links to stay within /${TARGET}.
/fixlinks.sh "/${TARGET}"
# Patch to allow non-glibc 2.12 compatible builds to work.
sed -i '54i#define TCP_USER_TIMEOUT 18' "/${TARGET}/usr/include/netinet/tcp.h"
# Download specific version of libstdc++ shared library based on the value of
# the `VERSION` parameter
# Download binary libstdc++ 4.8 shared library release
wget --retry-connrefused --waitretry=1 --read-timeout=20 --timeout=15 --tries=5 "http://old-releases.ubuntu.com/ubuntu/pool/main/g/gcc-4.8/libstdc++6_4.8.1-10ubuntu8_arm64.deb" && \
unar "libstdc++6_4.8.1-10ubuntu8_arm64.deb" && \
tar -C "${TARGET}" -xvzf "libstdc++6_4.8.1-10ubuntu8_arm64/data.tar.gz" "./usr/lib/aarch64-linux-gnu/libstdc++.so.6.0.18" && \
rm -rf "libstdc++6_4.8.1-10ubuntu8_arm64.deb" "libstdc++6_4.8.1-10ubuntu8_arm64"
mkdir -p "${TARGET}-src"
cd "${TARGET}-src"
# Build a devtoolset cross-compiler based on our glibc 2.12/glibc 2.17 sysroot setup.
case "${VERSION}" in
devtoolset-9)
wget --retry-connrefused --waitretry=1 --read-timeout=20 --timeout=15 --tries=5 "https://vault.centos.org/centos/7/sclo/Source/rh/devtoolset-9-gcc-9.3.1-2.2.el7.src.rpm"
rpm2cpio "devtoolset-9-gcc-9.3.1-2.2.el7.src.rpm" |cpio -idmv
tar -xvf "gcc-9.3.1-20200408.tar.xz" --strip 1
;;
devtoolset-10)
wget --retry-connrefused --waitretry=1 --read-timeout=20 --timeout=15 --tries=5 "https://vault.centos.org/centos/7/sclo/Source/rh/devtoolset-10-gcc-10.2.1-11.2.el7.src.rpm"
rpm2cpio "devtoolset-10-gcc-10.2.1-11.2.el7.src.rpm" |cpio -idmv
tar -xvf "gcc-10.2.1-20210130.tar.xz" --strip 1
;;
esac
# Apply the devtoolset patches to gcc.
/rpm-patch.sh "gcc.spec"
./contrib/download_prerequisites
mkdir -p "${TARGET}-build"
cd "${TARGET}-build"
"${TARGET}-src/configure" \
--prefix="${TARGET}/usr" \
--with-sysroot="/${TARGET}" \
--disable-bootstrap \
--disable-libmpx \
--enable-libsanitizer \
--disable-libunwind-exceptions \
--disable-libunwind-exceptions \
--disable-lto \
--disable-multilib \
--enable-__cxa_atexit \
--enable-gnu-indirect-function \
--enable-gnu-unique-object \
--enable-initfini-array \
--enable-languages="c,c++" \
--enable-linker-build-id \
--enable-plugin \
--enable-shared \
--enable-threads=posix \
--with-default-libstdcxx-abi=${LIBSTDCXX_ABI} \
--with-gcc-major-version-only \
--with-linker-hash-style="gnu" \
&& \
make -j$(nproc) && \
make install
# Create the devtoolset libstdc++ linkerscript that links dynamically against
# the system libstdc++ 4.4 and provides all other symbols statically.
# Note that the installation path for libstdc++ here is ${TARGET}/usr/lib64/
mv "${TARGET}/usr/lib64/libstdc++.so.${LIBSTDCXX_VERSION}" \
"${TARGET}/usr/lib64/libstdc++.so.${LIBSTDCXX_VERSION}.backup"
echo -e "OUTPUT_FORMAT(elf64-littleaarch64)\nINPUT ( libstdc++.so.6.0.18 -lstdc++_nonshared44 )" \
> "${TARGET}/usr/lib64/libstdc++.so.${LIBSTDCXX_VERSION}"
cp "./aarch64-unknown-linux-gnu/libstdc++-v3/src/.libs/libstdc++_nonshared44.a" \
"${TARGET}/usr/lib64"
# Link in architecture specific includes from the system; note that we cannot
# link in the whole aarch64-linux-gnu folder, as otherwise we're overlaying
# system gcc paths that we do not want to find.
# TODO(klimek): Automate linking in all non-gcc / non-kernel include
# directories.
mkdir -p "${TARGET}/usr/include/aarch64-linux-gnu"
PYTHON_VERSIONS=("python3.8" "python3.9" "python3.10" "python3.11")
for v in "${PYTHON_VERSIONS[@]}"; do
ln -s "/usr/local/include/${v}" "${TARGET}/usr/include/aarch64-linux-gnu/${v}"
done

View File

@ -1,28 +0,0 @@
#!/bin/bash
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
#
# Re-direct all links in $1 that are relative to be canonical
BASE="$1"
find "${BASE}" -type l | \
while read l ; do
if [[ "$(readlink "$l")" == \.\./* ]]; then
CANONICAL="$(readlink "$l")";
rm "$l";
ln -s "${CANONICAL}" "$l"
fi
done

View File

@ -1,270 +0,0 @@
diff --git a/iconv/gconv.h b/iconv/gconv.h
index 3f9112e..8e60197 100644
--- a/iconv/gconv.h
+++ b/iconv/gconv.h
@@ -174,7 +174,7 @@ typedef struct __gconv_info
{
size_t __nsteps;
struct __gconv_step *__steps;
- __extension__ struct __gconv_step_data __data __flexarr;
+ __extension__ struct __gconv_step_data __data[0];
} *__gconv_t;
#endif /* gconv.h */
diff --git a/include/libc-symbols.h b/include/libc-symbols.h
index c555bf2..143b26d 100644
--- a/include/libc-symbols.h
+++ b/include/libc-symbols.h
@@ -107,6 +107,11 @@
# endif
#endif
+#ifndef __attribute_copy__
+/* Provide an empty definition when cdefs.h is not included. */
+# define __attribute_copy__(arg)
+#endif
+
#ifndef __ASSEMBLER__
/* GCC understands weak symbols and aliases; use its interface where
possible, instead of embedded assembly language. */
@@ -114,7 +119,8 @@
/* Define ALIASNAME as a strong alias for NAME. */
# define strong_alias(name, aliasname) _strong_alias(name, aliasname)
# define _strong_alias(name, aliasname) \
- extern __typeof (name) aliasname __attribute__ ((alias (#name)));
+ extern __typeof (name) aliasname __attribute__ ((alias (#name))) \
+ __attribute_copy__ (name);
/* This comes between the return type and function name in
a function definition to make that definition weak. */
@@ -125,14 +131,16 @@
If weak aliases are not available, this defines a strong alias. */
# define weak_alias(name, aliasname) _weak_alias (name, aliasname)
# define _weak_alias(name, aliasname) \
- extern __typeof (name) aliasname __attribute__ ((weak, alias (#name)));
+ extern __typeof (name) aliasname __attribute__ ((weak, alias (#name))) \
+ __attribute_copy__ (name);
/* Same as WEAK_ALIAS, but mark symbol as hidden. */
# define weak_hidden_alias(name, aliasname) \
_weak_hidden_alias (name, aliasname)
# define _weak_hidden_alias(name, aliasname) \
extern __typeof (name) aliasname \
- __attribute__ ((weak, alias (#name), __visibility__ ("hidden")));
+ __attribute__ ((weak, alias (#name), __visibility__ ("hidden"))) \
+ __attribute_copy__ (name);
/* Declare SYMBOL as weak undefined symbol (resolved to 0 if not defined). */
# define weak_extern(symbol) _weak_extern (weak symbol)
@@ -528,7 +536,8 @@ for linking")
# define __hidden_ver1(local, internal, name) \
extern __typeof (name) __EI_##name __asm__(__hidden_asmname (#internal)); \
extern __typeof (name) __EI_##name \
- __attribute__((alias (__hidden_asmname (#local))))
+ __attribute__((alias (__hidden_asmname (#local)))) \
+ __attribute_copy__ (name)
# define hidden_ver(local, name) __hidden_ver1(local, __GI_##name, name);
# define hidden_data_ver(local, name) hidden_ver(local, name)
# define hidden_def(name) __hidden_ver1(__GI_##name, name, name);
@@ -541,7 +550,8 @@ for linking")
# define __hidden_nolink1(local, internal, name, version) \
__hidden_nolink2 (local, internal, name, version)
# define __hidden_nolink2(local, internal, name, version) \
- extern __typeof (name) internal __attribute__ ((alias (#local))); \
+ extern __typeof (name) internal __attribute__ ((alias (#local))) \
+ __attribute_copy__ (name); \
__hidden_nolink3 (local, internal, #name "@" #version)
# define __hidden_nolink3(local, internal, vername) \
__asm__ (".symver " #internal ", " vername);
diff --git a/locale/weightwc.h b/locale/weightwc.h
index e966c03..22ab790 100644
--- a/locale/weightwc.h
+++ b/locale/weightwc.h
@@ -79,19 +79,19 @@ findidx (const wint_t **cpp, size_t len)
if (cp[cnt] != usrc[cnt])
break;
- if (cnt < nhere - 1)
+ if (cnt < nhere - 1 || cnt == len)
{
cp += 2 * nhere;
continue;
}
- if (cp[nhere - 1] > usrc[nhere -1])
+ if (cp[nhere - 1] > usrc[nhere - 1])
{
cp += 2 * nhere;
continue;
}
- if (cp[2 * nhere - 1] < usrc[nhere -1])
+ if (cp[2 * nhere - 1] < usrc[nhere - 1])
{
cp += 2 * nhere;
continue;
diff --git a/locale/xlocale.h b/locale/xlocale.h
index 98c080b..843bd45 100644
--- a/locale/xlocale.h
+++ b/locale/xlocale.h
@@ -20,6 +20,9 @@
#ifndef _XLOCALE_H
#define _XLOCALE_H 1
+#ifndef _BITS_TYPES___LOCALE_T_H
+#define _BITS_TYPES___LOCALE_T_H 1
+
/* Structure for reentrant locale using functions. This is an
(almost) opaque type for the user level programs. The file and
this data structure is not standardized. Don't rely on it. It can
@@ -41,4 +44,6 @@ typedef struct __locale_struct
/* POSIX 2008 makes locale_t official. */
typedef __locale_t locale_t;
+#endif /* bits/types/__locale_t.h */
+
#endif /* xlocale.h */
diff --git a/misc/sys/cdefs.h b/misc/sys/cdefs.h
index d1cb3dd..30482a1 100644
--- a/misc/sys/cdefs.h
+++ b/misc/sys/cdefs.h
@@ -423,4 +423,14 @@
# endif
#endif
+/* Undefine (also defined in libc-symbols.h). */
+#undef __attribute_copy__
+#if __GNUC_PREREQ (9, 0)
+/* Copies attributes from the declaration or type referenced by
+ the argument. */
+# define __attribute_copy__(arg) __attribute__ ((__copy__ (arg)))
+#else
+# define __attribute_copy__(arg)
+#endif
+
#endif /* sys/cdefs.h */
diff --git a/stdlib/setenv.c b/stdlib/setenv.c
index 45efe2e..06bfab0 100644
--- a/stdlib/setenv.c
+++ b/stdlib/setenv.c
@@ -319,6 +319,7 @@ unsetenv (const char *name)
ep = __environ;
if (ep != NULL)
+ {
while (*ep != NULL)
if (!strncmp (*ep, name, len) && (*ep)[len] == '=')
{
@@ -332,6 +333,7 @@ unsetenv (const char *name)
}
else
++ep;
+ }
UNLOCK;
diff --git a/support/Makefile b/support/Makefile
index a253698..2f4e2a9 100644
--- a/support/Makefile
+++ b/support/Makefile
@@ -167,13 +167,6 @@ CFLAGS-support_paths.c = \
-DINSTDIR_PATH=\"$(prefix)\" \
-DLIBDIR_PATH=\"$(libdir)\"
-ifeq (,$(CXX))
-LINKS_DSO_PROGRAM = links-dso-program-c
-else
-LINKS_DSO_PROGRAM = links-dso-program
-LDLIBS-links-dso-program = -lstdc++ -lgcc -lgcc_s $(libunwind)
-endif
-
LDLIBS-test-container = $(libsupport)
others += test-container
@@ -182,9 +175,6 @@ others-noinstall += test-container
others += shell-container echo-container true-container
others-noinstall += shell-container echo-container true-container
-others += $(LINKS_DSO_PROGRAM)
-others-noinstall += $(LINKS_DSO_PROGRAM)
-
$(objpfx)test-container : $(libsupport)
$(objpfx)shell-container : $(libsupport)
$(objpfx)echo-container : $(libsupport)
diff --git a/support/links-dso-program.cc b/support/links-dso-program.cc
index 8ff3155..f9d2b77 100644
--- a/support/links-dso-program.cc
+++ b/support/links-dso-program.cc
@@ -3,6 +3,11 @@
backported. */
#define _ISOMAC 1
+#define __GLIBC_USE(F) __GLIBC_USE_ ## F
+
+# define __attribute_alloc_size__(params) \
+ __attribute__ ((__alloc_size__ params))
+
#include <iostream>
using namespace std;
diff --git a/sysdeps/aarch64/dl-machine.h b/sysdeps/aarch64/dl-machine.h
index 185402f..bbdeae0 100644
--- a/sysdeps/aarch64/dl-machine.h
+++ b/sysdeps/aarch64/dl-machine.h
@@ -49,23 +49,11 @@ elf_machine_load_address (void)
/* To figure out the load address we use the definition that for any symbol:
dynamic_addr(symbol) = static_addr(symbol) + load_addr
- The choice of symbol is arbitrary. The static address we obtain
- by constructing a non GOT reference to the symbol, the dynamic
- address of the symbol we compute using adrp/add to compute the
- symbol's address relative to the PC. */
-
- ElfW(Addr) static_addr;
- ElfW(Addr) dynamic_addr;
-
- asm (" \n\
- adrp %1, _dl_start; \n\
- add %1, %1, #:lo12:_dl_start \n\
- ldr %w0, 1f \n\
- b 2f \n\
-1: .word _dl_start \n\
-2: \n\
- " : "=r" (static_addr), "=r" (dynamic_addr));
- return dynamic_addr - static_addr;
+ _DYNAMIC sysmbol is used here as its link-time address stored in
+ the special unrelocated first GOT entry. */
+
+ extern ElfW(Dyn) _DYNAMIC[] attribute_hidden;
+ return (ElfW(Addr)) &_DYNAMIC - elf_machine_dynamic ();
}
/* Set up the loaded object described by L so its unrelocated PLT
diff --git a/sysdeps/ieee754/dbl-64/k_rem_pio2.c b/sysdeps/ieee754/dbl-64/k_rem_pio2.c
index fcf956a..e2c5d29 100644
--- a/sysdeps/ieee754/dbl-64/k_rem_pio2.c
+++ b/sysdeps/ieee754/dbl-64/k_rem_pio2.c
@@ -172,7 +172,8 @@ int __kernel_rem_pio2(double *x, double *y, int e0, int nx, int prec, const int3
/* compute q[0],q[1],...q[jk] */
for (i=0;i<=jk;i++) {
- for(j=0,fw=0.0;j<=jx;j++) fw += x[j]*f[jx+i-j]; q[i] = fw;
+ for(j=0,fw=0.0;j<=jx;j++) fw += x[j]*f[jx+i-j];
+ q[i] = fw;
}
jz = jk;
diff --git a/sysdeps/ieee754/flt-32/k_rem_pio2f.c b/sysdeps/ieee754/flt-32/k_rem_pio2f.c
index e54a067..215b0e0 100644
--- a/sysdeps/ieee754/flt-32/k_rem_pio2f.c
+++ b/sysdeps/ieee754/flt-32/k_rem_pio2f.c
@@ -65,7 +65,8 @@ int __kernel_rem_pio2f(float *x, float *y, int e0, int nx, int prec, const int32
/* compute q[0],q[1],...q[jk] */
for (i=0;i<=jk;i++) {
- for(j=0,fw=0.0;j<=jx;j++) fw += x[j]*f[jx+i-j]; q[i] = fw;
+ for(j=0,fw=0.0;j<=jx;j++) fw += x[j]*f[jx+i-j];
+ q[i] = fw;
}
jz = jk;

View File

@ -1,28 +0,0 @@
#!/bin/bash -eu
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
#
# Given an RPM spec file $1, apply its patches.
SPEC="$1"
grep '%patch' "${SPEC}" |while read cmd ; do
N=$(echo "${cmd}" |sed 's,%patch\([0-9]\+\).*,\1,')
file=$(grep "Patch$N:" "${SPEC}" |sed 's,.*: ,,')
parg=$(echo "${cmd}" |sed 's,.*\(-p[0-9]\).*,\1,')
if [[ ! "${file}" =~ doxygen && "${cmd}" != \#* ]]; then
echo "patch ${parg} -s < ${file}"
patch ${parg} -s < "${file}"
fi
done

View File

@ -1,36 +0,0 @@
# Packages needed to install Python from source
# See https://github.com/pyenv/pyenv/wiki#suggested-build-environment
build-essential
curl
libbz2-dev
libffi-dev
liblzma-dev
libncurses5-dev
libreadline-dev
libsqlite3-dev
libssl-dev
libxml2-dev
libxmlsec1-dev
llvm
make
openssl
tk-dev
wget
xz-utils
zlib1g-dev
git
# Packages needed to build devtoolset
file
flex
g++
make
patch
rpm2cpio
unar
wget
xz-utils
cpio
gawk
texinfo
gettext

View File

@ -1,28 +0,0 @@
#!/bin/bash -eu
# Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
# Need a newer version of patchelf as the installed version is buggy in 20.04
# so get patchelf source from 22.04 ie 'jammy' and build it to avoid dependency
# problems that would occur with a binary package
mkdir -p /patchelf
cd /patchelf
echo deb-src http://ports.ubuntu.com/ubuntu-ports/ jammy universe>>/etc/apt/sources.list
apt-get update
apt-get -y build-dep patchelf/jammy
apt-get -b source patchelf/jammy
# This will leave a .deb file for installation in a later stage

View File

@ -1,8 +0,0 @@
# CuDNN: https://docs.nvidia.com/deeplearning/sdk/cudnn-install/index.html#ubuntu-network-installation
libcudnn9-dev-cuda-12=9.1.1.17-1
libcudnn9-cuda-12=9.1.1.17-1
# This can be removed once NVIDIA publishes a cuda-12.3.2 Docker image.
# For now it ensures that we install at least version 12.3.107 of PTXAS,
# since 12.3.103 has a bug.
cuda-compiler-12-3=12.3.2-1

View File

@ -1,26 +0,0 @@
# Copyright 2021 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# ==============================================================================
# Do not print anything if this is not being used interactively
[ -z "$PS1" ] && return
# Set up attractive prompt
export PS1="\[\e[31m\]tf-docker\[\e[m\] \[\e[33m\]\w\[\e[m\] > "
export TERM=xterm-256color
alias grep="grep --color=auto"
alias ls="ls --color=auto"
# Fix nvidia-docker
ldconfig

View File

@ -1,45 +0,0 @@
# Other build-related tools
autoconf
automake
build-essential
ca-certificates
llvm-18
clang-18
clang-format-12
lld-18
colordiff
curl
ffmpeg
gdb
git
jq
less
libcurl3-dev
libcurl4-openssl-dev
libfreetype6-dev
libhdf5-serial-dev
libomp-18-dev
libssl-dev
libtool
libxml2-dev
libxslt1-dev
libzmq3-dev
mlocate
moreutils
openjdk-21-jdk
openjdk-21-jre-headless
openssl
parallel
patchelf
pkg-config
python3-dev
python3-setuptools
rsync
software-properties-common
sudo
swig
unzip
vim
wget
zip
zlib1g-dev

View File

@ -1,4 +0,0 @@
portpicker==1.6.0
# For wheel verification, and uploading
auditwheel ~= 5.4.0
twine ~= 4.0.2

View File

@ -1,86 +0,0 @@
# This bazelrc can build a CPU-supporting TF package.
# Convenient cache configurations
# Use a cache directory mounted to /tf/cache. Very useful!
build:sigbuild_local_cache --disk_cache=/tf/cache
# Use the public-access TF DevInfra cache (read only)
build:sigbuild_remote_cache --remote_cache="https://storage.googleapis.com/tensorflow-devinfra-bazel-cache/manylinux2014" --remote_upload_local_results=false
# Change the value of CACHEBUSTER when upgrading the toolchain, or when testing
# different compilation methods. E.g. for a PR to test a new CUDA version, set
# the CACHEBUSTER to the PR number.
build --action_env=CACHEBUSTER=20220325
# Use Python 3.X as installed in container image
build --action_env PYTHON_BIN_PATH="/usr/local/bin/python3"
build --python_path="/usr/local/bin/python3"
# Build TensorFlow v2
build --define=tf_api_version=2 --action_env=TF2_BEHAVIOR=1
# Prevent double-compilation of some TF code, ref. b/183279666 (internal)
# > TF's gen_api_init_files has a genrule to run the core TensorFlow code
# > on the host machine. If we don't have --distinct_host_configuration=false,
# > the core TensorFlow code will be built once for the host and once for the
# > target platform.
# See also https://docs.bazel.build/versions/master/guide.html#build-configurations-and-cross-compilation
build --distinct_host_configuration=false
# Store performance profiling log in the mounted artifact directory.
# The profile can be viewed by visiting chrome://tracing in a Chrome browser.
# See https://docs.bazel.build/versions/main/skylark/performance.html#performance-profiling
build --profile=/tf/pkg/profile.json.gz
# Use the rebuilt gcc toolchain to compile for manylinux2014
build --crosstool_top="@ml2014_aarch64_config_aarch64//crosstool:toolchain"
test --crosstool_top="@ml2014_aarch64_config_aarch64//crosstool:toolchain"
build --copt="-mtune=generic" --copt="-march=armv8-a" --copt="-O3"
# Test-related settings below this point.
test --build_tests_only --keep_going --test_output=errors --verbose_failures=true
test --test_timeout=500,900,-1,-1
# Give only the list of failed tests at the end of the log
test --test_summary=short
# "nonpip" tests are regular py_test tests.
# Pass --config=nonpip to run the same suite of tests. If you want to run just
# one test for investigation, you don't need --config=nonpip; just run the
# bazel test invocation as normal.
test:nonpip_filters --test_tag_filters=-no_oss,-oss_serial,-gpu,-tpu,-benchmark-test,-v1only,-no_aarch64,-no_oss_py38,-no_oss_py39,-no_oss_py310
test:nonpip_filters --build_tag_filters=-no_oss,-oss_serial,-gpu,-tpu,-benchmark-test,-v1only,-no_aarch64,-no_oss_py38,-no_oss_py39,-no_oss_py310
test:nonpip_filters --test_lang_filters=py --flaky_test_attempts=3 --test_size_filters=small,medium
test:nonpip --config=nonpip_filters -- //tensorflow/... -//tensorflow/compiler/tf2tensorrt/... -//tensorflow/core/tpu/... -//tensorflow/lite/... -//tensorflow/tools/toolchains/...
# "pip tests" run a similar suite of tests the "nonpip" tests, but do something
# odd to attempt to validate the quality of the pip package. The wheel is
# installed into a virtual environment, and then that venv is used to run all
# bazel tests with a special flag "--define=no_tensorflow_py_deps=true", which
# drops all the bazel dependencies for each py_test; this makes all the tests
# use the wheel's TensorFlow installation instead of the one made available
# through bazel. This must be done in a different root directory, //bazel_pip/...,
# because "import tensorflow" run from the root directory would instead import
# the folder instead of the venv package.
#
# Pass --config=pip to run the same suite of tests. If you want to run just one
# test for investigation, you'll need --config=pip_venv instead, and then you
# can specify whichever target you want.
test:pip_venv --action_env PYTHON_BIN_PATH="/bazel_pip/bin/python3"
test:pip_venv --action_env PYTHON_LIB_PATH="/bazel_pip/lib/python3/site-packages"
test:pip_venv --python_path="/bazel_pip/bin/python3"
test:pip_venv --define=no_tensorflow_py_deps=true
test:pip --config=pip_venv
# Yes, we don't exclude the gpu tests on pip for some reason.
test:pip_filters --test_tag_filters=-nopip,-no_pip,-no_oss,-oss_serial,-benchmark-test,-v1only,-no_aarch64,-no_oss_py38,-no_oss_py39,-no_oss_py310
test:pip_filters --build_tag_filters=-nopip,-no_pip,-no_oss,-oss_serial,-benchmark-test,-v1only,-no_aarch64,-no_oss_py38,-no_oss_py39,-no_oss_py310
test:pip_filters --test_lang_filters=py --flaky_test_attempts=3 --test_size_filters=small,medium
test:pip --config=pip_filters -- //bazel_pip/tensorflow/... -//bazel_pip/tensorflow/python/integration_testing/... -//bazel_pip/tensorflow/compiler/tf2tensorrt/... -//bazel_pip/tensorflow/core/tpu/... -//bazel_pip/tensorflow/lite/... -//tensorflow/tools/toolchains/...
# For building libtensorflow archives
test:libtensorflow_test -- //tensorflow/tools/lib_package:libtensorflow_test //tensorflow/tools/lib_package:libtensorflow_java_test
build:libtensorflow_build -- //tensorflow/tools/lib_package:libtensorflow.tar.gz //tensorflow/tools/lib_package:libtensorflow_jni.tar.gz //tensorflow/java:libtensorflow.jar //tensorflow/java:libtensorflow-src.jar //tensorflow/tools/lib_package:libtensorflow_proto.zip
# For continuous builds
test:pycpp_filters --test_tag_filters=-no_oss,-oss_serial,-gpu,-tpu,-benchmark-test,-v1only,-no_aarch64,-no_oss_py38,-no_oss_py39,-no_oss_py310
test:pycpp_filters --build_tag_filters=-no_oss,-oss_serial,-gpu,-tpu,-benchmark-test,-v1only,-no_aarch64,-no_oss_py38,-no_oss_py39,-no_oss_py310
test:pycpp_filters --test_lang_filters=cc,py --flaky_test_attempts=3 --test_size_filters=small,medium
test:pycpp --config=pycpp_filters -- //tensorflow/... -//tensorflow/compiler/tf2tensorrt/... -//tensorflow/core/tpu/... -//tensorflow/lite/... -//tensorflow/tools/toolchains/...

View File

@ -1,97 +0,0 @@
# This bazelrc can build a CPU-supporting TF package.
# Convenient cache configurations
# Use a cache directory mounted to /tf/cache. Very useful!
build:sigbuild_local_cache --disk_cache=/tf/cache
# Use the public-access TF DevInfra cache (read only)
build:sigbuild_remote_cache --remote_cache="https://storage.googleapis.com/tensorflow-devinfra-bazel-cache/manylinux2014" --remote_upload_local_results=false
# Change the value of CACHEBUSTER when upgrading the toolchain, or when testing
# different compilation methods. E.g. for a PR to test a new CUDA version, set
# the CACHEBUSTER to the PR number.
build --action_env=CACHEBUSTER=20220325
# Use Python 3.X as installed in container image
build --action_env PYTHON_BIN_PATH="/usr/local/bin/python3"
build --python_path="/usr/local/bin/python3"
# Build TensorFlow v2
build --define=tf_api_version=2 --action_env=TF2_BEHAVIOR=1
# Use lld as the linker
build --linkopt="-fuse-ld=lld"
build --linkopt="-lm"
build --linkopt="-Wl,--undefined-version"
# Prevent double-compilation of some TF code, ref. b/183279666 (internal)
# > TF's gen_api_init_files has a genrule to run the core TensorFlow code
# > on the host machine. If we don't have --distinct_host_configuration=false,
# > the core TensorFlow code will be built once for the host and once for the
# > target platform.
# See also https://docs.bazel.build/versions/master/guide.html#build-configurations-and-cross-compilation
build --distinct_host_configuration=false
# Disable clang extension that rejects type definitions within offsetof.
# This was added in clang-16 by https://reviews.llvm.org/D133574.
# Can be removed once upb is updated, since a type definition is used within
# offset of in the current version of ubp.
# See https://github.com/protocolbuffers/upb/blob/9effcbcb27f0a665f9f345030188c0b291e32482/upb/upb.c#L183.
build --copt=-Wno-gnu-offsetof-extensions
# Store performance profiling log in the mounted artifact directory.
# The profile can be viewed by visiting chrome://tracing in a Chrome browser.
# See https://docs.bazel.build/versions/main/skylark/performance.html#performance-profiling
build --profile=/tf/pkg/profile.json.gz
# Use the rebuilt gcc toolchain to compile for manylinux2014
build --crosstool_top="@ml2014_clang_aarch64_config_aarch64//crosstool:toolchain"
build --copt="-mtune=generic" --copt="-march=armv8-a" --copt="-O3"
# Test-related settings below this point.
test --build_tests_only --keep_going --test_output=errors --verbose_failures=true
test --test_timeout=500,900,-1,-1
# Give only the list of failed tests at the end of the log
test --test_summary=short
# "nonpip" tests are regular py_test tests.
# Pass --config=nonpip to run the same suite of tests. If you want to run just
# one test for investigation, you don't need --config=nonpip; just run the
# bazel test invocation as normal.
test:nonpip_filters --test_tag_filters=-no_oss,-oss_serial,-gpu,-tpu,-benchmark-test,-v1only,-no_aarch64,-no_oss_py38,-no_oss_py39,-no_oss_py310
test:nonpip_filters --build_tag_filters=-no_oss,-oss_serial,-gpu,-tpu,-benchmark-test,-v1only,-no_aarch64,-no_oss_py38,-no_oss_py39,-no_oss_py310
test:nonpip_filters --test_lang_filters=py --flaky_test_attempts=3 --test_size_filters=small,medium
test:nonpip --config=nonpip_filters -- //tensorflow/... -//tensorflow/compiler/tf2tensorrt/... -//tensorflow/core/tpu/... -//tensorflow/lite/... -//tensorflow/tools/toolchains/...
# "pip tests" run a similar suite of tests the "nonpip" tests, but do something
# odd to attempt to validate the quality of the pip package. The wheel is
# installed into a virtual environment, and then that venv is used to run all
# bazel tests with a special flag "--define=no_tensorflow_py_deps=true", which
# drops all the bazel dependencies for each py_test; this makes all the tests
# use the wheel's TensorFlow installation instead of the one made available
# through bazel. This must be done in a different root directory, //bazel_pip/...,
# because "import tensorflow" run from the root directory would instead import
# the folder instead of the venv package.
#
# Pass --config=pip to run the same suite of tests. If you want to run just one
# test for investigation, you'll need --config=pip_venv instead, and then you
# can specify whichever target you want.
test:pip_venv --action_env PYTHON_BIN_PATH="/bazel_pip/bin/python3"
test:pip_venv --action_env PYTHON_LIB_PATH="/bazel_pip/lib/python3/site-packages"
test:pip_venv --python_path="/bazel_pip/bin/python3"
test:pip_venv --define=no_tensorflow_py_deps=true
test:pip --config=pip_venv
# Yes, we don't exclude the gpu tests on pip for some reason.
test:pip_filters --test_tag_filters=-nopip,-no_pip,-no_oss,-oss_serial,-benchmark-test,-v1only,-no_aarch64,-no_oss_py38,-no_oss_py39,-no_oss_py310
test:pip_filters --build_tag_filters=-nopip,-no_pip,-no_oss,-oss_serial,-benchmark-test,-v1only,-no_aarch64,-no_oss_py38,-no_oss_py39,-no_oss_py310
test:pip_filters --test_lang_filters=py --flaky_test_attempts=3 --test_size_filters=small,medium
test:pip --config=pip_filters -- //bazel_pip/tensorflow/... -//bazel_pip/tensorflow/python/integration_testing/... -//bazel_pip/tensorflow/compiler/tf2tensorrt/... -//bazel_pip/tensorflow/core/tpu/... -//bazel_pip/tensorflow/lite/... -//tensorflow/tools/toolchains/...
# For building libtensorflow archives
test:libtensorflow_test -- //tensorflow/tools/lib_package:libtensorflow_test //tensorflow/tools/lib_package:libtensorflow_java_test
build:libtensorflow_build -- //tensorflow/tools/lib_package:libtensorflow.tar.gz //tensorflow/tools/lib_package:libtensorflow_jni.tar.gz //tensorflow/java:libtensorflow.jar //tensorflow/java:libtensorflow-src.jar //tensorflow/tools/lib_package:libtensorflow_proto.zip
# For continuous builds
test:pycpp_filters --test_tag_filters=-no_oss,-oss_serial,-gpu,-tpu,-benchmark-test,-v1only,-no_aarch64,-no_oss_py38,-no_oss_py39,-no_oss_py310
test:pycpp_filters --build_tag_filters=-no_oss,-oss_serial,-gpu,-tpu,-benchmark-test,-v1only,-no_aarch64,-no_oss_py38,-no_oss_py39,-no_oss_py310
test:pycpp_filters --test_lang_filters=cc,py --flaky_test_attempts=3 --test_size_filters=small,medium
test:pycpp --config=pycpp_filters -- //tensorflow/... -//tensorflow/compiler/tf2tensorrt/... -//tensorflow/core/tpu/... -//tensorflow/lite/... -//tensorflow/tools/toolchains/...

View File

@ -1,76 +0,0 @@
# vim: filetype=bash
#
# Copyright 2022 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
setup_file() {
cd /tf/tensorflow
bazel version # Start the bazel server
# Without this, git errors if /tf/tensorflow directory owner is different
git config --global --add safe.directory /tf/tensorflow
# Note that you could generate a list of all the affected targets with e.g.:
# bazel query $(paste -sd "+" $BATS_FILE_TMPDIR/changed_files) --keep_going
# Only shows Added, Changed, Modified, Renamed, and Type-changed files
if [[ "$(git rev-parse --abbrev-ref HEAD)" = "pull_branch" ]]; then
# TF's CI runs 'git fetch origin "pull/PR#/merge:pull_branch"'
# To get the as-merged branch during the CI tests
git diff --diff-filter ACMRT --name-only pull_branch^ pull_branch > $BATS_FILE_TMPDIR/changed_files
else
# If the branch is not present, then diff against origin/master
git diff --diff-filter ACMRT --name-only origin/master > $BATS_FILE_TMPDIR/changed_files
fi
}
# Note: this is excluded on the full code base, since any submitted code must
# have passed Google's internal style guidelines.
@test "Check buildifier formatting on BUILD files" {
echo "buildifier formatting is recommended. Here are the suggested fixes:"
echo "============================="
grep -e 'BUILD' $BATS_FILE_TMPDIR/changed_files \
| xargs buildifier -v -mode=diff -diff_command="git diff --no-index"
}
# Note: this is excluded on the full code base, since any submitted code must
# have passed Google's internal style guidelines.
@test "Check formatting for C++ files" {
skip "clang-format doesn't match internal clang-format checker"
echo "clang-format is recommended. Here are the suggested changes:"
echo "============================="
grep -e '\.h$' -e '\.cc$' $BATS_FILE_TMPDIR/changed_files > $BATS_TEST_TMPDIR/files || true
if [[ ! -s $BATS_TEST_TMPDIR/files ]]; then return 0; fi
xargs -a $BATS_TEST_TMPDIR/files -i -n1 -P $(nproc --all) \
bash -c 'clang-format-12 --style=Google {} | git diff --no-index {} -' \
| tee $BATS_TEST_TMPDIR/needs_help.txt
echo "You can use clang-format --style=Google -i <file> to apply changes to a file."
[[ ! -s $BATS_TEST_TMPDIR/needs_help.txt ]]
}
# Note: this is excluded on the full code base, since any submitted code must
# have passed Google's internal style guidelines.
@test "Check pylint for Python files" {
echo "Python formatting is recommended. Here are the pylint errors:"
echo "============================="
grep -e "\.py$" $BATS_FILE_TMPDIR/changed_files > $BATS_TEST_TMPDIR/files || true
if [[ ! -s $BATS_TEST_TMPDIR/files ]]; then return 0; fi
xargs -a $BATS_TEST_TMPDIR/files -n1 -P $(nproc --all) \
python -m pylint --rcfile=tensorflow/tools/ci_build/pylintrc --score false \
| grep -v "**** Module" \
| tee $BATS_TEST_TMPDIR/needs_help.txt
[[ ! -s $BATS_TEST_TMPDIR/needs_help.txt ]]
}
teardown_file() {
bazel shutdown
}

View File

@ -1,311 +0,0 @@
# vim: filetype=bash
#
# Copyright 2022 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
setup_file() {
cd /tf/tensorflow
bazel version # Start the bazel server
}
# Do a bazel query specifically for the licenses checker. It searches for
# targets matching the provided query, which start with // or @ but not
# //tensorflow (so it looks for //third_party, //external, etc.), and then
# gathers the list of all packages (i.e. directories) which contain those
# targets.
license_query() {
bazel cquery --experimental_cc_shared_library "$1" --keep_going \
| grep -e "^//" -e "^@" \
| grep -E -v "^//tensorflow" \
| sed -e 's|:.*||' \
| sort -u
}
# Verify that, given a build target and a license-list generator target, all of
# the dependencies of that target which include a license notice file are then
# included when generating that license. Necessary because the license targets
# in TensorFlow are manually enumerated rather than generated automatically.
do_external_licenses_check(){
BUILD_TARGET="$1"
LICENSES_TARGET="$2"
# grep patterns for targets which are allowed to be missing from the licenses
cat > $BATS_TEST_TMPDIR/allowed_to_be_missing <<EOF
@absl_py//absl
@bazel_tools//platforms
@bazel_tools//third_party/
@bazel_tools//tools
@local
@com_google_absl//absl
@org_tensorflow//
@com_github_googlecloudplatform_google_cloud_cpp//google
@com_github_grpc_grpc//src/compiler
@platforms//os
@ruy//
EOF
# grep patterns for targets which are allowed to be extra licenses
cat > $BATS_TEST_TMPDIR/allowed_to_be_extra <<EOF
@local_xla//third_party/mkl
@local_xla//third_party/mkl_dnn
@absl_py//
@bazel_tools//src
@bazel_tools//platforms
@bazel_tools//tools/
@org_tensorflow//tensorflow
@com_google_absl//
//external
@local
@com_github_googlecloudplatform_google_cloud_cpp//
@embedded_jdk//
^//$
@ruy//
EOF
license_query "attr('licenses', 'notice', deps($BUILD_TARGET))" > $BATS_TEST_TMPDIR/expected_licenses
license_query "deps($LICENSES_TARGET)" > $BATS_TEST_TMPDIR/actual_licenses
# Column 1 is left only, Column 2 is right only, Column 3 is shared lines
# Select lines unique to actual_licenses, i.e. extra licenses.
comm -1 -3 $BATS_TEST_TMPDIR/expected_licenses $BATS_TEST_TMPDIR/actual_licenses | grep -v -f $BATS_TEST_TMPDIR/allowed_to_be_extra > $BATS_TEST_TMPDIR/actual_extra_licenses || true
# Select lines unique to expected_licenses, i.e. missing licenses
comm -2 -3 $BATS_TEST_TMPDIR/expected_licenses $BATS_TEST_TMPDIR/actual_licenses | grep -v -f $BATS_TEST_TMPDIR/allowed_to_be_missing > $BATS_TEST_TMPDIR/actual_missing_licenses || true
if [[ -s $BATS_TEST_TMPDIR/actual_extra_licenses ]]; then
echo "Please remove the following extra licenses from $LICENSES_TARGET:"
cat $BATS_TEST_TMPDIR/actual_extra_licenses
fi
if [[ -s $BATS_TEST_TMPDIR/actual_missing_licenses ]]; then
echo "Please include the missing licenses for the following packages in $LICENSES_TARGET:"
cat $BATS_TEST_TMPDIR/actual_missing_licenses
fi
# Fail if either of the two "extras" or "missing" lists are present. If so,
# then the user will see the above error messages.
[[ ! -s $BATS_TEST_TMPDIR/actual_extra_licenses ]] && [[ ! -s $BATS_TEST_TMPDIR/actual_missing_licenses ]]
}
@test "Pip package generated license includes all dependencies' licenses" {
do_external_licenses_check \
"//tensorflow/tools/pip_package:build_pip_package" \
"//tensorflow/tools/pip_package:licenses"
}
@test "Libtensorflow generated license includes all dependencies' licenses" {
do_external_licenses_check \
"//tensorflow:libtensorflow.so" \
"//tensorflow/tools/lib_package:clicenses_generate"
}
@test "Java library generated license includes all dependencies' licenses" {
do_external_licenses_check \
"//tensorflow/java:libtensorflow_jni.so" \
"//tensorflow/tools/lib_package:jnilicenses_generate"
}
# This test ensures that all the targets built into the Python package include
# their dependencies. It's a rewritten version of the "smoke test", an older
# Python script that was very difficult to understand. See
# https://github.com/tensorflow/tensorflow/blob/master/tensorflow/tools/pip_package/pip_smoke_test.py
@test "Pip package includes all required //tensorflow dependencies" {
# grep patterns for packages whose dependencies can be ignored
cat > $BATS_TEST_TMPDIR/ignore_deps_for_these_packages <<EOF
//tensorflow/lite
//tensorflow/compiler/mlir/lite
//tensorflow/compiler/mlir/tfrt
//tensorflow/core/runtime_fallback
//tensorflow/core/tfrt
//tensorflow/python/kernel_tests/signal
//tensorflow/examples
//tensorflow/tools/android
//tensorflow/python/eager/benchmarks
EOF
# grep patterns for files and targets which don't need to be in the pip
# package, ever.
cat > $BATS_TEST_TMPDIR/ignore_these_deps <<EOF
benchmark
_test$
_test.py$
_test_cpu$
_test_cpu.py$
_test_gpu$
_test_gpu.py$
_test_lib$
//tensorflow/cc/saved_model:saved_model_test_files
//tensorflow/cc/saved_model:saved_model_half_plus_two
//tensorflow:no_tensorflow_py_deps
//tensorflow/tools/pip_package:win_pip_package_marker
//tensorflow/core:image_testdata
//tensorflow/core/lib/lmdb:lmdb_testdata
//tensorflow/core/lib/lmdb/testdata:lmdb_testdata
//tensorflow/core/kernels/cloud:bigquery_reader_ops
//tensorflow/python:extra_py_tests_deps
//tensorflow/python:mixed_precision
//tensorflow/python:tf_optimizer
//tensorflow/python:compare_test_proto_py
//tensorflow/python/framework:test_ops_2
//tensorflow/python/framework:test_file_system.so
//tensorflow/python/debug:grpc_tensorflow_server.par
//tensorflow/python/feature_column:vocabulary_testdata
//tensorflow/python/util:nest_test_main_lib
//tensorflow/lite/experimental/examples/lstm:rnn_cell
//tensorflow/lite/experimental/examples/lstm:rnn_cell.py
//tensorflow/lite/experimental/examples/lstm:unidirectional_sequence_lstm_test
//tensorflow/lite/experimental/examples/lstm:unidirectional_sequence_lstm_test.py
//tensorflow/lite/python:interpreter
//tensorflow/lite/python:interpreter_test
//tensorflow/lite/python:interpreter.py
//tensorflow/lite/python:interpreter_test.py
EOF
# Get the full list of files and targets which get included into the pip
# package
bazel query --keep_going 'deps(//tensorflow/tools/pip_package:build_pip_package)' | sort -u > $BATS_TEST_TMPDIR/pip_deps
# Find all Python py_test targets not tagged "no_pip" or "manual", excluding
# any targets in ignored packages. Combine this list of targets into a bazel
# query list (e.g. the list becomes "target+target2+target3")
bazel query --keep_going 'kind(py_test, //tensorflow/python/...) - attr("tags", "no_pip|manual", //tensorflow/python/...)' | grep -v -f $BATS_TEST_TMPDIR/ignore_deps_for_these_packages | paste -sd "+" - > $BATS_TEST_TMPDIR/deps
# Find all one-step dependencies of those tests which are from //tensorflow
# (since external deps will come from Python-level pip dependencies),
# excluding dependencies and files that are known to be unneccessary.
# This creates a list of targets under //tensorflow that are required for
# TensorFlow python tests.
bazel query --keep_going "deps($(cat $BATS_TEST_TMPDIR/deps), 1)" | grep "^//tensorflow" | grep -v -f $BATS_TEST_TMPDIR/ignore_these_deps | sort -u > $BATS_TEST_TMPDIR/required_deps
# Find if any required dependencies are missing from the list of dependencies
# included in the pip package.
# (comm: Column 1 is left, Column 2 is right, Column 3 is shared lines)
comm -2 -3 $BATS_TEST_TMPDIR/required_deps $BATS_TEST_TMPDIR/pip_deps > $BATS_TEST_TMPDIR/missing_deps || true
if [[ -s $BATS_TEST_TMPDIR/missing_deps ]]; then
cat <<EOF
One or more test dependencies are not in the pip package.
If these test dependencies need to be in the TensorFlow pip package, please
add them to //tensorflow/tools/pip_package/BUILD. Otherwise, add the no_pip tag
to the test, or change code_check_full.bats in the SIG Build repository. That's
https://github.com/tensorflow/tensorflow/blob/master/tensorflow/tools/tf_sig_build_dockerfiles/devel.usertools/code_check_full.bats
Here are the affected tests:
EOF
while read dep; do
echo "For dependency $dep:"
# For every missing dependency, find the tests which directly depend on
# it, and print that list for debugging. Not really clear if this is
# helpful since the only examples I've seen are enormous.
bazel query "rdeps(kind(py_test, $(cat $BATS_TEST_TMPDIR/deps)), $dep, 1)"
done < $BATS_TEST_TMPDIR/missing_deps
exit 1
fi
}
# The Python package is not allowed to depend on any CUDA packages.
@test "Pip package doesn't depend on CUDA" {
bazel cquery \
--experimental_cc_shared_library \
--@local_config_cuda//:enable_cuda \
--repo_env=HERMETIC_CUDA_VERSION="12.3.2" \
--repo_env=HERMETIC_CUDNN_VERSION="8.9.7.29" \
"somepath(//tensorflow/tools/pip_package:build_pip_package, " \
"@local_config_cuda//cuda:cudart + "\
"@local_config_cuda//cuda:cudart + "\
"@local_config_cuda//cuda:cuda_driver + "\
"@local_config_cuda//cuda:cudnn + "\
"@local_config_cuda//cuda:curand + "\
"@local_config_cuda//cuda:cusolver + "\
"@local_config_tensorrt//:tensorrt)" --keep_going > $BATS_TEST_TMPDIR/out
cat <<EOF
There was a path found connecting //tensorflow/tools/pip_package:build_pip_package
to a banned CUDA dependency. Here's the output from bazel query:
EOF
cat $BATS_TEST_TMPDIR/out
[[ ! -s $BATS_TEST_TMPDIR/out ]]
}
@test "Pip package doesn't depend on CUDA for static builds (i.e. Windows)" {
bazel cquery \
--experimental_cc_shared_library \
--@local_config_cuda//:enable_cuda \
--repo_env=HERMETIC_CUDA_VERSION="12.3.2" \
--repo_env=HERMETIC_CUDNN_VERSION="8.9.7.29" \
--define framework_shared_object=false \
"somepath(//tensorflow/tools/pip_package:build_pip_package, " \
"@local_config_cuda//cuda:cudart + "\
"@local_config_cuda//cuda:cudart + "\
"@local_config_cuda//cuda:cuda_driver + "\
"@local_config_cuda//cuda:cudnn + "\
"@local_config_cuda//cuda:curand + "\
"@local_config_cuda//cuda:cusolver + "\
"@local_config_tensorrt//:tensorrt)" --keep_going > $BATS_TEST_TMPDIR/out
cat <<EOF
There was a path found connecting //tensorflow/tools/pip_package:build_pip_package
to a banned CUDA dependency when '--define framework_shared_object=false' is set.
This means that a CUDA target was probably included via an is_static condition,
used when targeting platforms like Windows where we build statically instead
of dynamically. Here's the output from bazel query:
EOF
cat $BATS_TEST_TMPDIR/out
[[ ! -s $BATS_TEST_TMPDIR/out ]]
}
@test "All tensorflow.org/code links point to real files" {
for i in $(grep -onI 'https://www.tensorflow.org/code/[a-zA-Z0-9/._-]\+' -r tensorflow); do
target=$(echo $i | sed 's!.*https://www.tensorflow.org/code/!!g')
if [[ ! -f $target ]] && [[ ! -d $target ]]; then
echo "$i" >> errors.txt
fi
if [[ -e errors.txt ]]; then
echo "Broken links found:"
cat errors.txt
rm errors.txt
false
fi
done
}
@test "No duplicate files on Windows" {
cat <<EOF
Please rename files so there are no repeats. For example, README.md and
Readme.md would be the same file on Windows. In this test, you would get a
warning for "readme.md" because it makes everything lowercase. There are
repeats of these filename(s) with different casing:
EOF
find . | tr '[A-Z]' '[a-z]' | sort | uniq -d | tee $BATS_FILE_TMPDIR/repeats
[[ ! -s $BATS_FILE_TMPDIR/repeats ]]
}
# It's unclear why, but running this on //tensorflow/... is faster than running
# only on affected targets, usually. There are targets in //tensorflow/lite that
# don't pass --nobuild, so they're on their own.
#
# Although buildifier checks for formatting as well, "bazel build nobuild"
# checks for cross-file issues like bad includes or missing BUILD definitions.
#
# We can't test on the windows toolchains because they're using a legacy
# toolchain format (or something) that specifies the toolchain directly instead
# of as a "repository". They can't be valid on Linux because Linux can't do
# anything with a Windows-only toolchain, and bazel errors if trying to build
# that directory.
@test "bazel nobuild passes on all of TF except TF Lite and win toolchains" {
bazel build --experimental_cc_shared_library --nobuild --keep_going -- //tensorflow/... -//tensorflow/lite/... -//tensorflow/tools/toolchains/win/... -//tensorflow/tools/toolchains/win_1803/...
}
teardown_file() {
bazel shutdown
}

View File

@ -1,24 +0,0 @@
#!/bin/bash
#
# Copyright 2022 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
#
# Usage: get_test_list.sh OUTPUT BAZEL_TEST_COMMAND...
# Writes the list of tests that would be run from BAZEL_TEST_COMMAND to OUTPUT.
# Hides all extra output and always exits with success for now.
OUTPUT=$1
shift
"$@" --test_summary=short --check_tests_up_to_date 2>/dev/null | sort -u | awk '{print $1}' | grep "^//" | tee $OUTPUT

View File

@ -1,34 +0,0 @@
#!/usr/bin/env bash
#
# Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
# Check and rename wheels with auditwheel. Inserts the platform tags like
# "manylinux_xyz" into the wheel filename.
set -exo pipefail
for wheel in /tf/pkg/*.whl; do
echo "Checking and renaming $wheel..."
time python3 -m auditwheel repair --plat manylinux2014_aarch64 "$wheel" --wheel-dir /tf/pkg 2>&1 | tee check.txt
# We don't need the original wheel if it was renamed
new_wheel=$(grep --extended-regexp --only-matching '/tf/pkg/\S+.whl' check.txt)
if [[ "$new_wheel" != "$wheel" ]]; then
rm "$wheel"
wheel="$new_wheel"
fi
TF_WHEEL="$wheel" bats /usertools/wheel_verification.bats --timing
done

View File

@ -1,64 +0,0 @@
#!/bin/bash
#
# Copyright 2022 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# ==============================================================================
#
# Repacks libtensorflow tarballs into $DIR with provided $TARBALL_SUFFIX,
# and also repacks libtensorflow-src.jar into a standardized format.
# Helper function to copy a srcjar after moving any source files
# directly under the root to the "maven-style" src/main/java layout
#
# Source files generated by annotation processors appear directly
# under the root of srcjars jars created by bazel, rather than under
# the maven-style src/main/java subdirectory.
#
# Bazel manages annotation generated source as follows: First, it
# calls javac with options that create generated files under a
# bazel-out directory. Next, it archives the generated source files
# into a srcjar directly under the root. There doesn't appear to be a
# simple way to parameterize this from bazel, hence this helper to
# "normalize" the srcjar layout.
#
# Arguments:
# src_jar - path to the original srcjar
# dest_jar - path to the destination
# Returns:
# None
function cp_normalized_srcjar() {
src_jar="$1"
dest_jar="$2"
tmp_dir=$(mktemp -d)
cp "${src_jar}" "${tmp_dir}/orig.jar"
pushd "${tmp_dir}"
# Extract any src/ files
jar -xf "${tmp_dir}/orig.jar" src/
# Extract any org/ files under src/main/java
(mkdir -p src/main/java && cd src/main/java && jar -xf "${tmp_dir}/orig.jar" org/)
# Repackage src/
jar -cMf "${tmp_dir}/new.jar" src
popd
cp "${tmp_dir}/new.jar" "${dest_jar}"
rm -rf "${tmp_dir}"
}
DIR=$1
TARBALL_SUFFIX=$2
mkdir -p "$DIR"
cp bazel-bin/tensorflow/tools/lib_package/libtensorflow.tar.gz "${DIR}/libtensorflow${TARBALL_SUFFIX}.tar.gz"
cp bazel-bin/tensorflow/tools/lib_package/libtensorflow_jni.tar.gz "${DIR}/libtensorflow_jni${TARBALL_SUFFIX}.tar.gz"
cp bazel-bin/tensorflow/java/libtensorflow.jar "${DIR}"
cp_normalized_srcjar bazel-bin/tensorflow/java/libtensorflow-src.jar "${DIR}/libtensorflow-src.jar"
cp bazel-bin/tensorflow/tools/lib_package/libtensorflow_proto.zip "${DIR}"

View File

@ -1,35 +0,0 @@
#!/usr/bin/env bash
#
# Copyright 2022 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
set -exo pipefail
# Run this from inside the tensorflow github directory.
# Usage: setup_venv_test.sh venv_and_symlink_name "glob pattern for one wheel file"
# Example: setup_venv_test.sh bazel_pip "/tf/pkg/*.whl"
#
# This will create a venv with that wheel file installed in it, and a symlink
# in ./venv_and_symlink_name/tensorflow to ./tensorflow. We use this for the
# "pip" tests.
python -m venv /$1
mkdir -p $1
rm -f ./$1/tensorflow
ln -s $(ls /$1/lib) /$1/lib/python3
ln -s ../tensorflow $1/tensorflow
# extglob is necessary for @(a|b) pattern matching
# see "extglob" in the bash manual page ($ man bash)
bash -O extglob -c "/$1/bin/pip install $2"
/$1/bin/pip install -r /usertools/test.requirements.txt

View File

@ -1,124 +0,0 @@
#!/usr/bin/env python3
# pylint:disable=protected-access
#
# Copyright 2022 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Merge all JUnit test.xml files in one directory into one.
Usage: squash_testlogs.py START_DIRECTORY OUTPUT_FILE
Example: squash_testlogs.py /tf/pkg/testlogs /tf/pkg/merged.xml
Recursively find all the JUnit test.xml files in one directory, and merge any
of them that contain failures into one file. The TensorFlow DevInfra team
uses this to generate a simple overview of an entire pip and nonpip test
invocation, since the normal logs that Bazel creates are too large for the
internal invocation viewer.
"""
import collections
import os
import re
import subprocess
import sys
from junitparser import JUnitXml
result = JUnitXml()
try:
files = subprocess.check_output(
["grep", "-rlE", '(failures|errors)="[1-9]', sys.argv[1]]
)
except subprocess.CalledProcessError as e:
print("No failures found to log!")
exit(0)
# For test cases, only show the ones that failed that have text (a log)
seen = collections.Counter()
runfiles_matcher = re.compile(r"(/.*\.runfiles/)")
for f in files.strip().splitlines():
# Just ignore any failures, they're probably not important
try:
r = JUnitXml.fromfile(f)
except Exception as e: # pylint: disable=broad-except
print("Ignoring this XML parse failure in {}: ".format(f), str(e))
source_file = re.search(
r"/(bazel_pip|tensorflow)/.*", f.decode("utf-8")
).group(0)
for testsuite in r:
testsuite._elem.set("source_file", source_file)
# Remove empty testcases
for p in testsuite._elem.xpath(".//testcase"):
if not len(p): # pylint: disable=g-explicit-length-test
testsuite._elem.remove(p)
# Change "testsuite > testcase,system-out" to "testsuite > testcase > error"
for p in testsuite._elem.xpath(".//system-out"):
for c in p.getparent().xpath(".//error | .//failure"):
c.text = p.text
p.getparent().remove(p)
# Remove duplicate results of the same exact test (e.g. due to retry
# attempts)
for p in testsuite._elem.xpath(".//error | .//failure"):
# Sharded tests have target names like this:
# WindowOpsTest.test_tflite_convert0 (<function hann_window at
# 0x7fc61728dd40>, 10, False, tf.float32)
# Where 0x... is a thread ID (or something) that is not important for
# debugging, but breaks this "number of failures" counter because it's
# different for repetitions of the same test. We use re.sub(r"0x\w+")
# to remove it.
key = re.sub(r"0x\w+", "", p.getparent().get("name", "")) + p.text
if key in seen:
testsuite._elem.remove(p.getparent())
seen[key] += 1
# Remove this testsuite if it doesn't have anything in it any more
if len(testsuite) == 0: # pylint: disable=g-explicit-length-test
r._elem.remove(testsuite._elem)
if len(r) > 0: # pylint: disable=g-explicit-length-test
result += r
# Insert the number of failures for each test to help identify flakes
# need to clarify for shard
for p in result._elem.xpath(".//error | .//failure"):
key = re.sub(r"0x\w+", "", p.getparent().get("name", "")) + p.text
p.text = runfiles_matcher.sub("[testroot]/", p.text)
source_file = p.getparent().getparent().get("source_file", "")
p.text += f"\nNOTE: From {source_file}"
if "bazel_pip" in source_file:
p.text += (
"\nNOTE: This is a --config=pip test. Remove 'bazel_pip' to find"
" the file."
)
n_failures = seen[key]
p.text += f"\nNOTE: Number of failures for this test: {seen[key]}."
p.text += "\n Most TF jobs run tests three times to root out flakes."
if seen[key] == 3:
p.text += (
"\n Since there were three failures, this is not flaky, and it"
)
p.text += "\n probably caused the Kokoro invocation to fail."
else:
p.text += (
"\n Since there were not three failures, this is probably a flake."
)
p.text += (
"\n Flakes make this pkg/pip_and_nonpip_tests target show "
"as failing,"
)
p.text += "\n but do not make the Kokoro invocation fail."
os.makedirs(os.path.dirname(sys.argv[2]), exist_ok=True)
result.update_statistics()
result.write(sys.argv[2])

View File

@ -1,6 +0,0 @@
# Test dependencies for pip tests
grpcio ~= 1.42.0
portpicker ~= 1.5.2
scipy ~= 1.7.3
jax ~= 0.2.26
jaxlib ~= 0.1.75

View File

@ -1,73 +0,0 @@
# Copyright 2022 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
# Suite of verification tests for the SINGLE TensorFlow wheel in /tf/pkg
# or whatever path is set as $TF_WHEEL.
setup_file() {
cd /tf/pkg
if [[ -z "$TF_WHEEL" ]]; then
export TF_WHEEL=$(find /tf/pkg -iname "*.whl")
fi
}
teardown_file() {
rm -rf /tf/venv
}
@test "Wheel is manylinux2014 (manylinux_2_17) compliant" {
python3 -m auditwheel show "$TF_WHEEL" > audit.txt
grep --quiet -zoP 'is consistent with the following platform tag:\n"manylinux_2_17_(aarch|x86_)64"\.' audit.txt
}
@test "Wheel conforms to upstream size limitations" {
WHEEL_MEGABYTES=$(stat --format %s "$TF_WHEEL" | awk '{print int($1/(1024*1024))}')
# Googlers: search for "test_tf_whl_size"
case "$TF_WHEEL" in
# CPU:
*cpu*manylinux*) LARGEST_OK_SIZE=220 ;;
# GPU:
*manylinux*) LARGEST_OK_SIZE=580 ;;
# Unknown:
*)
echo "The wheel's name is in an unknown format."
exit 1
;;
esac
# >&3 forces output in bats even if the test passes. See
# https://bats-core.readthedocs.io/en/stable/writing-tests.html#printing-to-the-terminal
echo "# Size of $TF_WHEEL is $WHEEL_MEGABYTES / $LARGEST_OK_SIZE megabytes." >&3
test "$WHEEL_MEGABYTES" -le "$LARGEST_OK_SIZE"
}
# Note: this runs before the tests further down the file, so TF is installed in
# the venv and the venv is active when those tests run. The venv gets cleaned
# up in teardown_file() above.
@test "Wheel is installable" {
python3 -m venv /tf/venv
source /tf/venv/bin/activate
python3 -m pip install --upgrade setuptools wheel
python3 -m pip install "$TF_WHEEL"
}
@test "TensorFlow is importable" {
source /tf/venv/bin/activate
python3 -c 'import tensorflow as tf; t1=tf.constant([1,2,3,4]); t2=tf.constant([5,6,7,8]); print(tf.add(t1,t2).shape)'
}
# Is this still useful?
@test "TensorFlow has Keras" {
source /tf/venv/bin/activate
python3 -c 'import sys; import tensorflow as tf; sys.exit(0 if "keras" in tf.keras.__name__ else 1)'
}

View File

@ -1,29 +0,0 @@
# JAX requirements, passed into container by defining the ARG
# REQUIREMENTS_FILE=jax.requirements.txt
setuptools
wheel
cloudpickle
colorama>=0.4.4
matplotlib
pillow>=9.1.0
rich
absl-py
portpicker
six
opt-einsum
auditwheel
typing_extensions
importlib_metadata>=4.6
numpy==1.26.0;python_version=="3.12"
numpy==1.23.4;python_version=="3.11"
numpy==1.22.4;python_version<"3.11"
scipy==1.11.2;python_version=="3.12"
scipy==1.9.2;python_version=="3.11"
scipy==1.7.3;python_version<"3.11"
ml_dtypes>=0.5.1
# For using Python 3.11 with Bazel 6 (b/286090018)
lit ~= 17.0.2

View File

@ -1,18 +0,0 @@
# Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
#
# Builds a devtoolset cross-compiler targeting manylinux2014 (glibc 2.17 / libstdc++ 4.8).
/lib64

View File

@ -1,28 +0,0 @@
#!/usr/bin/env bash
#
# Copyright 2022 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
#
# setup.packages.sh: Given a list of Ubuntu packages, install them and clean up.
# Usage: setup.packages.sh <package_list.txt>
set -e
# Prevent apt install tzinfo from asking our location (assumes UTC)
export DEBIAN_FRONTEND=noninteractive
apt-get update
# Remove commented lines and blank lines
apt-get install -y --no-install-recommends $(sed -e '/^\s*#.*$/d' -e '/^\s*$/d' "$1" | sort -u)
rm -rf /var/lib/apt/lists/*

View File

@ -1,65 +0,0 @@
#!/usr/bin/env bash
#
# Copyright 2022 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
#
# setup.python.sh: Install a specific Python version and packages for it.
# Usage: setup.python.sh <pyversion> <requirements.txt>
set -xe
source ~/.bashrc
VERSION=$1
REQUIREMENTS=$2
add-apt-repository ppa:deadsnakes/ppa
# Install Python packages for this container's version
if [[ ${VERSION} == "python3.13-nogil" ]]; then
cat >pythons.txt <<EOF
$VERSION
EOF
else
cat >pythons.txt <<EOF
$VERSION
$VERSION-dev
$VERSION-venv
EOF
fi
/setup.packages.sh pythons.txt
# Re-link pyconfig.h from aarch64-linux-gnu into the devtoolset directory
# for any Python version present
pushd /usr/include/aarch64-linux-gnu
for f in $(ls | grep python); do
# set up symlink for devtoolset-10
rm -f /dt10/usr/include/aarch64-linux-gnu/$f
ln -s /usr/include/aarch64-linux-gnu/$f /dt10/usr/include/aarch64-linux-gnu/$f
done
popd
# Python 3.10 include headers fix:
# sysconfig.get_path('include') incorrectly points to /usr/local/include/python
# map /usr/include/python3.10 to /usr/local/include/python3.10
if [[ ! -f "/usr/local/include/$VERSION" ]]; then
ln -sf /usr/include/$VERSION /usr/local/include/$VERSION
fi
# Install pip
wget --retry-connrefused --waitretry=1 --read-timeout=20 --timeout=15 --tries=5 https://bootstrap.pypa.io/get-pip.py
/usr/bin/$VERSION get-pip.py
/usr/bin/$VERSION -m pip install --no-cache-dir --upgrade pip
# Disable the cache dir to save image space, and install packages
/usr/bin/$VERSION -m pip install --no-cache-dir -r $REQUIREMENTS -U

View File

@ -1,45 +0,0 @@
#!/usr/bin/env bash
#
# Copyright 2022 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
#
# setup.python.sh: Install a specific Python version and packages for it.
# Usage: setup.python.sh <pyversion> <requirements.txt>
# Sets up custom apt sources for our TF images.
# Prevent apt install tzinfo from asking our location (assumes UTC)
export DEBIAN_FRONTEND=noninteractive
# Set up shared custom sources
apt-get update
apt-get install -y gnupg ca-certificates
# Deadsnakes: https://launchpad.net/~deadsnakes/+archive/ubuntu/ppa
apt-key adv --keyserver keyserver.ubuntu.com --recv-keys F23C5A6CF475977595C89F51BA6932366A755776
# LLVM/Clang: https://apt.llvm.org/
apt-key adv --fetch-keys https://apt.llvm.org/llvm-snapshot.gpg.key
# Set up custom sources
cat >/etc/apt/sources.list.d/custom.list <<SOURCES
# More Python versions: Deadsnakes
deb http://ppa.launchpad.net/deadsnakes/ppa/ubuntu focal main
deb-src http://ppa.launchpad.net/deadsnakes/ppa/ubuntu focal main
# LLVM/Clang 18 repository
deb http://apt.llvm.org/focal/ llvm-toolchain-focal-18 main
deb-src http://apt.llvm.org/focal/ llvm-toolchain-focal-18 main
SOURCES

View File

@ -21,7 +21,7 @@ TFCI_BUILD_PIP_PACKAGE_WHEEL_NAME_ARG="--repo_env=WHEEL_NAME=tensorflow"
TFCI_DOCKER_ENABLE=1
TFCI_DOCKER_IMAGE=us-docker.pkg.dev/ml-oss-artifacts-published/ml-public-container/ml-build-arm64:latest
TFCI_DOCKER_PULL_ENABLE=1
TFCI_DOCKER_REBUILD_ARGS="--target=tf ci/official/containers/linux_arm64"
TFCI_DOCKER_REBUILD_ARGS="--target=tf ci/official/containers/ml_build_arm64"
TFCI_INDEX_HTML_ENABLE=1
TFCI_LIB_SUFFIX="-cpu-linux-arm64"
TFCI_OUTPUT_DIR=build_output