Merge branch 'tensorflow:master' into ica574-doc-contrib

This commit is contained in:
Isaac Cilia Attard 2023-05-07 20:04:27 +02:00 committed by GitHub
commit bb3d28fa4c
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
4150 changed files with 178971 additions and 75644 deletions

View File

@ -184,6 +184,10 @@ build:android_x86_64 --config=android
build:android_x86_64 --cpu=x86_64
build:android_x86_64 --fat_apk_cpu=x86_64
# Build everything statically for Android since all static libs are later
# bundled together into a single .so for deployment.
build:android --dynamic_mode=off
# Sets the default Apple platform to macOS.
build:macos --apple_platform_type=macos
@ -202,6 +206,8 @@ build:ios_armv7 --config=ios
build:ios_armv7 --cpu=ios_armv7
build:ios_arm64 --config=ios
build:ios_arm64 --cpu=ios_arm64
build:ios_arm64e --config=ios
build:ios_arm64e --cpu=ios_arm64e
build:ios_sim_arm64 --config=ios
build:ios_sim_arm64 --cpu=ios_sim_arm64
build:ios_i386 --config=ios
@ -219,7 +225,9 @@ build:monolithic --define framework_shared_object=false
build:monolithic --define tsl_protobuf_header_only=false
build:monolithic --experimental_link_static_libraries_once=false # b/229868128
# Please note that MKL on MacOS or windows is still not supported.
build:linux --define=build_with_onednn_v2=true
# Please note that MKL on MacOS is still not supported.
# If you would like to use a local MKL instead of downloading, please set the
# environment variable "TF_MKL_ROOT" every time before build.
build:mkl --define=build_with_mkl=true --define=enable_mkl=true
@ -551,8 +559,8 @@ build:rbe_linux_py3_base --python_path="/usr/local/bin/python3.9"
build:rbe_linux_py3_base --repo_env=TF_PYTHON_CONFIG_REPO="@ubuntu20.04-gcc9_manylinux2014-cuda11.2-cudnn8.1-tensorrt7.2_config_python3.9"
build:rbe_win --config=rbe
build:rbe_win --crosstool_top="//tensorflow/tools/toolchains/win/tf_win_02232023:toolchain"
build:rbe_win --extra_toolchains="//tensorflow/tools/toolchains/win/tf_win_02232023:cc-toolchain-x64_windows"
build:rbe_win --crosstool_top="//tensorflow/tools/toolchains/win/tf_win_05022023:toolchain"
build:rbe_win --extra_toolchains="//tensorflow/tools/toolchains/win/tf_win_05022023:cc-toolchain-x64_windows"
build:rbe_win --extra_execution_platforms="//tensorflow/tools/toolchains/win:rbe_windows_ltsc2019"
build:rbe_win --host_platform="//tensorflow/tools/toolchains/win:rbe_windows_ltsc2019"
build:rbe_win --platforms="//tensorflow/tools/toolchains/win:rbe_windows_ltsc2019"
@ -683,10 +691,10 @@ build:ubsan --linkopt -fsanitize=undefined
build:ubsan --linkopt -lubsan
# Disable TFRT integration for now unless --config=tfrt is specified.
build --deleted_packages=tensorflow/compiler/mlir/tfrt,tensorflow/compiler/mlir/tfrt/benchmarks,tensorflow/compiler/mlir/tfrt/jit/python_binding,tensorflow/compiler/mlir/tfrt/jit/transforms,tensorflow/compiler/mlir/tfrt/python_tests,tensorflow/compiler/mlir/tfrt/tests,tensorflow/compiler/mlir/tfrt/tests/ir,tensorflow/compiler/mlir/tfrt/tests/analysis,tensorflow/compiler/mlir/tfrt/tests/jit,tensorflow/compiler/mlir/tfrt/tests/lhlo_to_tfrt,tensorflow/compiler/mlir/tfrt/tests/lhlo_to_jitrt,tensorflow/compiler/mlir/tfrt/tests/tf_to_corert,tensorflow/compiler/mlir/tfrt/tests/tf_to_tfrt_data,tensorflow/compiler/mlir/tfrt/tests/saved_model,tensorflow/compiler/mlir/tfrt/transforms/lhlo_gpu_to_tfrt_gpu,tensorflow/core/runtime_fallback,tensorflow/core/runtime_fallback/conversion,tensorflow/core/runtime_fallback/kernel,tensorflow/core/runtime_fallback/opdefs,tensorflow/core/runtime_fallback/runtime,tensorflow/core/runtime_fallback/util,tensorflow/core/tfrt/eager,tensorflow/core/tfrt/eager/backends/cpu,tensorflow/core/tfrt/eager/backends/gpu,tensorflow/core/tfrt/eager/core_runtime,tensorflow/core/tfrt/eager/cpp_tests/core_runtime,tensorflow/core/tfrt/gpu,tensorflow/core/tfrt/run_handler_thread_pool,tensorflow/core/tfrt/runtime,tensorflow/core/tfrt/saved_model,tensorflow/core/tfrt/graph_executor,tensorflow/core/tfrt/saved_model/tests,tensorflow/core/tfrt/tpu,tensorflow/core/tfrt/utils
build --deleted_packages=tensorflow/compiler/mlir/tfrt,tensorflow/compiler/mlir/tfrt/benchmarks,tensorflow/compiler/mlir/tfrt/jit/python_binding,tensorflow/compiler/mlir/tfrt/jit/transforms,tensorflow/compiler/mlir/tfrt/python_tests,tensorflow/compiler/mlir/tfrt/tests,tensorflow/compiler/mlir/tfrt/tests/ir,tensorflow/compiler/mlir/tfrt/tests/analysis,tensorflow/compiler/mlir/tfrt/tests/jit,tensorflow/compiler/mlir/tfrt/tests/lhlo_to_tfrt,tensorflow/compiler/mlir/tfrt/tests/lhlo_to_jitrt,tensorflow/compiler/mlir/tfrt/tests/tf_to_corert,tensorflow/compiler/mlir/tfrt/tests/tf_to_tfrt_data,tensorflow/compiler/mlir/tfrt/tests/saved_model,tensorflow/compiler/mlir/tfrt/transforms/lhlo_gpu_to_tfrt_gpu,tensorflow/core/runtime_fallback,tensorflow/core/runtime_fallback/conversion,tensorflow/core/runtime_fallback/kernel,tensorflow/core/runtime_fallback/opdefs,tensorflow/core/runtime_fallback/runtime,tensorflow/core/runtime_fallback/util,tensorflow/core/tfrt/eager,tensorflow/core/tfrt/eager/backends/cpu,tensorflow/core/tfrt/eager/backends/gpu,tensorflow/core/tfrt/eager/core_runtime,tensorflow/core/tfrt/eager/cpp_tests/core_runtime,tensorflow/core/tfrt/gpu,tensorflow/core/tfrt/run_handler_thread_pool,tensorflow/core/tfrt/runtime,tensorflow/core/tfrt/saved_model,tensorflow/core/tfrt/graph_executor,tensorflow/core/tfrt/saved_model/tests,tensorflow/core/tfrt/tpu,tensorflow/core/tfrt/utils,tensorflow/core/tfrt/utils/debug
# TODO(b/240450920): We are in the process of migrating JitRt backend to XLA
# and while we are doing this we can't keep it buildable/testable in OSS.
build:tfrt --deleted_packages=tensorflow/compiler/mlir/tfrt,tensorflow/compiler/mlir/tfrt/benchmarks,tensorflow/compiler/mlir/tfrt/jit/python_binding,tensorflow/compiler/mlir/tfrt/jit/transforms,tensorflow/compiler/mlir/tfrt/python_tests,tensorflow/compiler/mlir/tfrt/tests,tensorflow/compiler/mlir/tfrt/tests/ir,tensorflow/compiler/mlir/tfrt/tests/analysis,tensorflow/compiler/mlir/tfrt/tests/jit,tensorflow/compiler/mlir/tfrt/tests/lhlo_to_tfrt,tensorflow/compiler/mlir/tfrt/tests/lhlo_to_jitrt,tensorflow/compiler/mlir/tfrt/tests/tf_to_corert,tensorflow/compiler/mlir/tfrt/tests/tf_to_tfrt_data,tensorflow/compiler/mlir/tfrt/tests/saved_model,tensorflow/compiler/mlir/tfrt/transforms/lhlo_gpu_to_tfrt_gpu,tensorflow/core/runtime_fallback,tensorflow/core/runtime_fallback/conversion,tensorflow/core/runtime_fallback/kernel,tensorflow/core/runtime_fallback/opdefs,tensorflow/core/runtime_fallback/runtime,tensorflow/core/runtime_fallback/util,tensorflow/core/tfrt/eager,tensorflow/core/tfrt/eager/backends/cpu,tensorflow/core/tfrt/eager/backends/gpu,tensorflow/core/tfrt/eager/core_runtime,tensorflow/core/tfrt/eager/cpp_tests/core_runtime,tensorflow/core/tfrt/gpu,tensorflow/core/tfrt/run_handler_thread_pool,tensorflow/core/tfrt/runtime,tensorflow/core/tfrt/saved_model,tensorflow/core/tfrt/graph_executor,tensorflow/core/tfrt/saved_model/tests,tensorflow/core/tfrt/tpu,tensorflow/core/tfrt/utils
build:tfrt --deleted_packages=tensorflow/compiler/mlir/tfrt,tensorflow/compiler/mlir/tfrt/benchmarks,tensorflow/compiler/mlir/tfrt/jit/python_binding,tensorflow/compiler/mlir/tfrt/jit/transforms,tensorflow/compiler/mlir/tfrt/python_tests,tensorflow/compiler/mlir/tfrt/tests,tensorflow/compiler/mlir/tfrt/tests/ir,tensorflow/compiler/mlir/tfrt/tests/analysis,tensorflow/compiler/mlir/tfrt/tests/jit,tensorflow/compiler/mlir/tfrt/tests/lhlo_to_tfrt,tensorflow/compiler/mlir/tfrt/tests/lhlo_to_jitrt,tensorflow/compiler/mlir/tfrt/tests/tf_to_corert,tensorflow/compiler/mlir/tfrt/tests/tf_to_tfrt_data,tensorflow/compiler/mlir/tfrt/tests/saved_model,tensorflow/compiler/mlir/tfrt/transforms/lhlo_gpu_to_tfrt_gpu,tensorflow/core/runtime_fallback,tensorflow/core/runtime_fallback/conversion,tensorflow/core/runtime_fallback/kernel,tensorflow/core/runtime_fallback/opdefs,tensorflow/core/runtime_fallback/runtime,tensorflow/core/runtime_fallback/util,tensorflow/core/tfrt/eager,tensorflow/core/tfrt/eager/backends/cpu,tensorflow/core/tfrt/eager/backends/gpu,tensorflow/core/tfrt/eager/core_runtime,tensorflow/core/tfrt/eager/cpp_tests/core_runtime,tensorflow/core/tfrt/gpu,tensorflow/core/tfrt/run_handler_thread_pool,tensorflow/core/tfrt/runtime,tensorflow/core/tfrt/saved_model,tensorflow/core/tfrt/graph_executor,tensorflow/core/tfrt/saved_model/tests,tensorflow/core/tfrt/tpu,tensorflow/core/tfrt/utils,tensorflow/core/tfrt/utils/debug
# TF Fuzztest config
try-import fuzztest.bazelrc

View File

@ -131,7 +131,6 @@ body:
description: Also tell us, what did you expect to happen?
placeholder: Tell us what you see!
value: "A bug happened!"
render: shell
validations:
required: true
- type: textarea

View File

@ -16,9 +16,8 @@
# A list of assignees
assignees:
- synandi
- tiruk007
- SuryanarayanaY
- tilakrayal
- pjpratik
# A list of assignees for compiler folder
compiler_assignees:
- joker-eph

View File

@ -0,0 +1,61 @@
# Copyright 2022 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
name: ARM CI Extended C++
on:
push:
tags:
- v2.**
schedule:
- cron: '0 2 * * *'
jobs:
build:
if: github.repository == 'tensorflow/tensorflow' # Don't do this in forks
runs-on: [self-hosted, linux, ARM64]
strategy:
matrix:
pyver: ['3.10']
steps:
- name: Stop old running containers (if any)
shell: bash
run: |
running_containers=$(docker ps -q) && \
if [[ $running_containers == "" ]]; then
echo "No running containers";
else
echo "Running container(s) found" && \
docker stop $running_containers;
fi
docker container prune -f
docker image prune -af
- name: Clean repository
shell: bash
run: find /home/ubuntu/actions-runner/_work/tensorflow/tensorflow/. -name . -o -prune -exec sudo rm -rf -- {} + || true
- name: Checkout repository for nightly (skipped for releases)
if: ${{ github.event_name == 'schedule' }}
uses: actions/checkout@755da8c3cf115ac066823e79a1e1788f8940201b # v3.2.0
with:
ref: 'nightly'
- name: Checkout repository
if: ${{ github.event_name == 'push' }}
uses: actions/checkout@755da8c3cf115ac066823e79a1e1788f8940201b # v3.2.0
- name: Build binary and run C++ tests
shell: bash
run: |
is_nightly=0 && tf_project_name='tf_ci_ext_c' && ${{ github.event_name == 'schedule' }} && is_nightly=1 && tf_project_name='tf_nightly_ci_ext_c'
CI_DOCKER_BUILD_EXTRA_PARAMS="--build-arg py_major_minor_version=${{ matrix.pyver }} --build-arg is_nightly=${is_nightly} --build-arg tf_project_name=${tf_project_name}" \
./tensorflow/tools/ci_build/ci_build.sh cpu.arm64 bash tensorflow/tools/ci_build/rel/ubuntu/cpu_arm64_cpp.sh

View File

@ -17,14 +17,10 @@ name: ARM CI Extended
on:
push:
branches:
- master
- r2.**
pull_request:
types: [opened, synchronize, reopened]
branches:
- master
- r2.**
tags:
- v2.**
schedule:
- cron: '0 4 * * *'
jobs:
build:
@ -49,10 +45,17 @@ jobs:
- name: Clean repository
shell: bash
run: find /home/ubuntu/actions-runner/_work/tensorflow/tensorflow/. -name . -o -prune -exec sudo rm -rf -- {} + || true
- name: Checkout repository for nightly (skipped for releases)
if: ${{ github.event_name == 'schedule' }}
uses: actions/checkout@755da8c3cf115ac066823e79a1e1788f8940201b # v3.2.0
with:
ref: 'nightly'
- name: Checkout repository
if: ${{ github.event_name == 'push' }}
uses: actions/checkout@755da8c3cf115ac066823e79a1e1788f8940201b # v3.2.0
- name: Build binary and run non-pip tests
shell: bash
run: |
CI_DOCKER_BUILD_EXTRA_PARAMS='--build-arg py_major_minor_version=${{ matrix.pyver }}' \
is_nightly=0 && tf_project_name='tf_ci_ext' && ${{ github.event_name == 'schedule' }} && is_nightly=1 && tf_project_name='tf_nightly_ci_ext'
CI_DOCKER_BUILD_EXTRA_PARAMS="--build-arg py_major_minor_version=${{ matrix.pyver }} --build-arg is_nightly=${is_nightly} --build-arg tf_project_name=${tf_project_name}" \
./tensorflow/tools/ci_build/ci_build.sh cpu.arm64 bash tensorflow/tools/ci_build/rel/ubuntu/cpu_arm64_nonpip.sh

View File

@ -54,7 +54,7 @@ jobs:
- name: Build and test pip wheel
shell: bash
run: |
CI_DOCKER_BUILD_EXTRA_PARAMS='--build-arg py_major_minor_version=${{ matrix.pyver }}' \
CI_DOCKER_BUILD_EXTRA_PARAMS="--build-arg py_major_minor_version=${{ matrix.pyver }} --build-arg is_nightly=1 --build-arg tf_project_name=tf_nightly_ci" \
./tensorflow/tools/ci_build/ci_build.sh cpu.arm64 bash tensorflow/tools/ci_build/rel/ubuntu/cpu_arm64_pip.sh
- name: Upload pip wheel to GitHub
uses: actions/upload-artifact@83fd05a356d7e2593de66fc9913b3002723633cb # v3.1.1

View File

@ -28,8 +28,16 @@ jobs:
pull-requests: write
steps:
- name: Awaiting response issues
uses: actions/stale@v5
uses: actions/stale@v7
with:
#Comma separated list of labels that can be assigned to issues to exclude them from being marked as stale
exempt-issue-labels: 'override-stale'
#Comma separated list of labels that can be assigned to PRs to exclude them from being marked as stale
exempt-pr-labels: "override-stale"
#Limit the No. of API calls in one run default value is 30.
operations-per-run: 1000
#Prevent to remove stale label when PRs or issues are updated.
remove-stale-when-updated: false
days-before-issue-stale: 7
days-before-issue-close: 7
stale-issue-label: "stale"
@ -48,8 +56,16 @@ jobs:
close-pr-message: "This PR was closed because it has been inactive for 14 days since being marked as stale. Please reopen if you'd like to work on this further."
repo-token: ${{ secrets.GITHUB_TOKEN }}
- name: Contribution issues
uses: actions/stale@v5
uses: actions/stale@v7
with:
#Comma separated list of labels that can be assigned to issues to exclude them from being marked as stale
exempt-issue-labels: 'override-stale'
#Comma separated list of labels that can be assigned to PRs to exclude them from being marked as stale
exempt-pr-labels: "override-stale"
#Limit the No. of API calls in one run default value is 30.
operations-per-run: 1000
#Prevent to remove stale label when PRs or issues are updated.
remove-stale-when-updated: false
days-before-issue-stale: 180
days-before-issue-close: 365
stale-issue-label: "stale"

View File

@ -80,6 +80,18 @@ jobs:
map sigbuild-r2.12-clang-python3.9 2.12-python3.9
map sigbuild-r2.12-clang-python3.10 2.12-python3.10
map sigbuild-r2.12-clang-python3.11 2.12-python3.11
# TF 2.13
map sigbuild-r2.13 2.13-python3.9
map sigbuild-r2.13-python3.8 2.13-python3.8
map sigbuild-r2.13-python3.9 2.13-python3.9
map sigbuild-r2.13-python3.10 2.13-python3.10
map sigbuild-r2.13-python3.11 2.13-python3.11
# TF 2.13 + Clang (containers are the same, but env vars in configs.bzl are different)
map sigbuild-r2.13-clang 2.13-python3.9
map sigbuild-r2.13-clang-python3.8 2.13-python3.8
map sigbuild-r2.13-clang-python3.9 2.13-python3.9
map sigbuild-r2.13-clang-python3.10 2.13-python3.10
map sigbuild-r2.13-clang-python3.11 2.13-python3.11
- name: Create Pull Request with changes
uses: peter-evans/create-pull-request@2b011faafdcbc9ceb11414d64d0573f37c774b04 # v4.2.3
with:

View File

@ -19,39 +19,58 @@ Before sending your pull requests, make sure you do the following:
### Typical Pull Request Workflow -
**1. New PR** - As a contributor, you submit a New PR on GitHub. - We inspect
every incoming PR and add certain labels to the PR such as `size:`, `comp:` etc.
At this stage we check if the PR is valid and meets certain quality
requirements. - For example - We check if the CLA is signed, PR has sufficient
description, if applicable unit tests are added, if it is a reasonable
contribution meaning it is not a single liner cosmetic PR.
**1. New PR**
**2. Valid?** - If the PR passes all the quality checks then we go ahead and
assign a reviewer. - If the PR didn't meet the validation criteria, we request
for additional changes to be made to PR to pass quality checks and send it back
or on a rare occassion we may reject it.
- As a contributor, you submit a New PR on GitHub.
- We inspect every incoming PR and add certain labels to the PR such as `size:`,
`comp:` etc. At this stage we check if the PR is valid and meets certain
quality requirements. For example, we check if the CLA is signed, PR has
sufficient description, if applicable unit tests are added, if it is a
reasonable contribution (meaning it is not a single liner cosmetic PR).
**3. Review** - For Valid PR, reviewer (person familiar with the
code/functionality) checks if the PR looks good or needs additional changes. -
If all looks good, reviewer would approve the PR. - If a change is needed, the
contributor is requested to make suggested change. - You make the change and
submit for the review again. - This cycle repeats itself till the PR gets
approved. - Note: As a friendly reminder we may reach out to you if the PR is
awaiting your response for more than 2 weeks.
**2. Valid?**
**4. Approved** - Once the PR is approved, it gets `kokoro:force-run` label
applied and it initiates CI/CD tests. - We can't move forward if these tests
fail. - In such situations, we may request you to make further changes to your
PR for the tests to pass. - Once the tests pass, we now bring all the code in
the internal code base, using a job called "copybara".
- If the PR passes all the quality checks then we go ahead and assign a
reviewer.
- If the PR didn't meet the validation criteria, we request for additional
changes to be made to PR to pass quality checks and send it back or on a rare
occassion we may reject it.
**5. Copy to G3** - Once the PR is in Google codebase, we make sure it
integrates well with its dependencies and the rest of the system. - Rarely, but
If the tests fail at this stage, we cannot merge the code. - If needed, we may
come to you to make some changes. - At times, it may not be you, it may be us
who may have hit a snag. - Please be patient while we work to fix this. - Once
the internal tests pass, we go ahead and merge the code internally as well as
externally on GitHub.
**3. Review**
- For Valid PR, reviewer (person familiar with the code/functionality) checks if
the PR looks good or needs additional changes.
- If all looks good, reviewer would approve the PR.
- If a change is needed, the contributor is requested to make suggested change.
- You make the change and submit for the review again.
- This cycle repeats itself till the PR gets approved.
- Note: As a friendly reminder we may reach out to you if the PR is awaiting
your response for more than 2 weeks.
**4. Approved**
- Once the PR is approved, it gets `kokoro:force-run` label applied and it
initiates CI/CD tests.
- We can't move forward if these tests fail.
- In such situations, we may request you to make further changes to your PR for
the tests to pass.
- Once the tests pass, we now bring all the code in the internal code base,
using a job called "copybara".
**5. Copy to Google Internal codebase and run internal CI**
- Once the PR is in Google codebase, we make sure it integrates well with its
dependencies and the rest of the system.
- Rarely, but If the tests fail at this stage, we cannot merge the code.
- If needed, we may come to you to make some changes. At times, it may not be
you, it may be us who may have hit a snag. Please be patient while we work to
fix this.
- Once the internal tests pass, we go ahead and merge the code internally as
well as externally on GitHub.
In a graphical form, the entire lifetime of a PR looks like
![image](https://user-images.githubusercontent.com/323199/229561784-0a2f5509-b731-493f-ad88-bad487688c8d.png)
### Contributor License Agreements

View File

@ -92,8 +92,8 @@ uphold this code.**
**We use [GitHub issues](https://github.com/tensorflow/tensorflow/issues) for
tracking requests and bugs, please see
[TensorFlow Discuss](https://groups.google.com/a/tensorflow.org/forum/#!forum/discuss)
for general questions and discussion, and please direct specific questions to
[TensorFlow Forum](https://discuss.tensorflow.org/) for general questions and
discussion, and please direct specific questions to
[Stack Overflow](https://stackoverflow.com/questions/tagged/tensorflow).**
The TensorFlow project strives to abide by generally accepted best practices in

View File

@ -1,3 +1,45 @@
# Release 2.14.0
<INSERT SMALL BLURB ABOUT RELEASE FOCUS AREA AND POTENTIAL TOOLCHAIN CHANGES>
# Breaking Changes
* <DOCUMENT BREAKING CHANGES HERE>
* <THIS SECTION SHOULD CONTAIN API, ABI AND BEHAVIORAL BREAKING CHANGES>
* `tf.Tensor`
* The class hierarchy for `tf.Tensor` has changed, and there are now
explicit `EagerTensor` and `SymbolicTensor` classes for eager and
tf.function respectively. Users who relied on the exact type of Tensor
(e.g. `type(t) == tf.Tensor`) will need to update their code to use
`isinstance(t, tf.Tensor)`. The `tf.is_symbolic_tensor` helper added in
2.13 may be used when it is necessary to determine if a value is
specifically a symbolic tensor.
# Known Caveats
* <CAVEATS REGARDING THE RELEASE (BUT NOT BREAKING CHANGES).>
* <ADDING/BUMPING DEPENDENCIES SHOULD GO HERE>
* <KNOWN LACK OF SUPPORT ON SOME PLATFORM, SHOULD GO HERE>
# Major Features and Improvements
* <INSERT MAJOR FEATURE HERE, USING MARKDOWN SYNTAX>
* <IF RELEASE CONTAINS MULTIPLE FEATURES FROM SAME AREA, GROUP THEM TOGETHER>
# Bug Fixes and Other Changes
* `tf.lite`
* Strided_Slice now supports `UINT32`.
* <SIMILAR TO ABOVE SECTION, BUT FOR OTHER IMPORTANT CHANGES / BUG FIXES>
* <IF A CHANGE CLOSES A GITHUB ISSUE, IT SHOULD BE DOCUMENTED HERE>
* <NOTES SHOULD BE GROUPED PER AREA>
# Thanks to our Contributors
This release contains contributions from many people at Google, as well as:
<INSERT>, <NAME>, <HERE>, <USING>, <GITHUB>, <HANDLE>
# Release 2.13.0
## Breaking Changes
@ -18,6 +60,26 @@
modifying H5 files saved by Keras under a `.keras` extension.
If this breaks you, simply add `save_format="h5"` to your `.save()` call
to revert back to the prior behavior.
* Added `keras.utils.TimedThread` utility to run a timed thread every x
seconds. It can be used to run a threaded function alongside model
training or any other snippet of code.
* In the `keras` PyPI package, accessible symbols are now restricted to
symbols that are intended to be public.
This may affect your code if you were using `import keras` and you used
`keras` functions that were not public APIs, but were accessible in
earlier versions with direct imports. In those cases, please use the
following guideline:
- The API may be available in the public Keras API under a different
name, so make sure to look for it on keras.io or TensorFlow docs
and switch to the public version.
- It could also be a simple python or TF utility that you could easily
copy over to your own codebase. In those case, just make it your own!
- If you believe it should definitely be a public Keras API,
please open a feature request in keras GitHub repo.
- As a workaround, you could import the same private symbol keras
`keras.src`, but keep in mind the `src` namespace is not stable and
those APIs may change or be removed in the future.
* The LMDB kernels have been changed to return an error. This is in preparation
for completely removing them from TensorFlow. The LMDB dependency that these
@ -40,11 +102,19 @@
clustering.
* Add int16x8 support for the built-in op `exp`
* Add int16x8 support for the built-in op `mirror_pad`
* Add int16x8 support for the built-in ops `space_to_batch_nd` and
`batch_to_space_nd`
* Add 16-bit int type support for built-in op `less`, `greater_than`,
`equal`
* Add 8-bit and 16-bit support for `floor_div` and `floor_mod`.
* Add 16-bit and 32-bit int support for the built-in op `bitcast`.
* Add 8-bit/16-bit/32-bit int/uint support for the built-in op `bitwise_xor`
* Add int16 indices support for built-in op `gather` and `gather_nd`.
* Add 8-bit/16-bit/32-bit int/uint support for the built-in op `right_shift`
* Add reference implementation for 16-bit int unquantized `add`.
* Add reference implementation for 16-bit int and 32-bit unsigned int unquantized `mul`.
* `add_op` supports broadcasting up to 6 dimensions.
* Add 16-bit support for `top_k`.
* `tf.keras`
@ -57,6 +127,8 @@
libraries (like sklearn or pycocotools) into Keras as first-class Keras
metrics.
* Added `tf.keras.optimizers.Lion` optimizer.
* Added `tf.keras.layers.SpectralNormalization` layer wrapper to perform
spectral normalization on the weights of a target layer.
* The `SidecarEvaluatorModelExport` callback has been added to Keras as
`keras.callbacks.SidecarEvaluatorModelExport`. This callback allows for
exporting the model the best-scoring model as evaluated by a
@ -76,6 +148,16 @@
`tf.keras.__internal__.RaggedKerasTensor` classes. You can use these
classes to do instance type checking and type annotations for
layer/model inputs and outputs.
* All the `tf.keras.dtensor.experimental.optimizers` classes have been
merged with `tf.keras.optimizers`. You can migrate your code to use
`tf.keras.optimizers` directly. The API namespace for
`tf.keras.dtensor.experimental.optimizers` will be removed in future
releases.
* Added support for `class_weight` for 3+ dimensional targets (e.g.
image segmentation masks) in `Model.fit`.
* Added a new loss, `keras.losses.CategoricalFocalCrossentropy`.
* Remove the `tf.keras.dtensor.experimental.layout_map_scope()`. You can
user the `tf.keras.dtensor.experimental.LayoutMap.scope()` instead.
* `tf.function`:
@ -94,6 +176,22 @@
`tf.nn.safe_embedding_lookup_sparse`, which enables a simplified and
typically faster lookup procedure.
* `tf.data`
* `tf.data.Dataset.zip` now supports Python-style zipping, i.e.
`Dataset.zip(a, b, c)`.
* `tf.data.Dataset.shuffle` now supports full shuffling. To specify that
data should be fully shuffled, use
`dataset = dataset.shuffle(dataset.cardinality())`. This will load the
full dataset into memory so that it can be shuffled, so make sure to
only use this with datasets of filenames or other small datasets.
* `tf.math`
* `tf.nn.top_k` now supports specifying the output index type via parameter
`index_type`. Supported types are `tf.int16`, `tf.int32`
(default), and `tf.int64`.
* `tf.SavedModel`
* Introduce class method
@ -109,6 +207,13 @@
* <IF A CHANGE CLOSES A GITHUB ISSUE, IT SHOULD BE DOCUMENTED HERE>
* <NOTES SHOULD BE GROUPED PER AREA>
* `tf.Variable`
* Changed resource variables to inherit from `tf.compat.v2.Variable`
instead of `tf.compat.v1.Variable`. Some checks for
`isinstance(v, tf.compat.v1.Variable)` that previously returned True
may now return False.
* `tf.distribute`
* Opened an experimental API,
@ -124,6 +229,20 @@
* List of members of dtensor.Layout and dtensor.Mesh have slightly changed
as part of efforts to consolidate the C++ and Python source
code with pybind11. Most notably, Layout.serialized_string is removed.
* Minor API changes to represent Single Device Layout for non-distributed
Tensors inside DTensor functions. Runtime support will be added soon.
* `tf.experimental.ExtensionType`:
* `tf.experimental.ExtensionType` now supports Python `tuple` as
the type annotation of its fields.
* `tf.nest`:
* Deprecated API `tf.nest.is_sequence` has now been deleted.
Please use `tf.nest.is_nested` instead.
* `tf.lite`:
* Add UINT32 support to tfl.pack
## Thanks to our Contributors
@ -134,217 +253,166 @@ This release contains contributions from many people at Google, as well as:
# Release 2.12.0
# Breaking Changes
* <DOCUMENT BREAKING CHANGES HERE>
* <THIS SECTION SHOULD CONTAIN API, ABI AND BEHAVIORAL BREAKING CHANGES>
### Breaking Changes
* Build, Compilation and Packaging
* Removal of redundant packages: the `tensorflow-gpu` and `tf-nightly-gpu`
packages have been effectively removed and replaced with packages that
direct users to switch to `tensorflow` or `tf-nightly` respectively.
The naming difference was the only difference between the two sets of
packages ever since TensorFlow 2.1, so there is no loss of functionality
or GPU support. See
https://pypi.org/project/tensorflow-gpu for more details.
* Removed redundant packages `tensorflow-gpu` and `tf-nightly-gpu`. These packages were removed and replaced with packages that direct users to switch to `tensorflow` or `tf-nightly` respectively. Since TensorFlow 2.1, the only difference between these two sets of packages was their names, so there is no loss of functionality or GPU support. See https://pypi.org/project/tensorflow-gpu for more details.
* `tf.function`:
* tf.function now uses the Python inspect library directly for parsing
the signature of the Python function it is decorated on.
* This can break certain cases that were previously ignored where the
signature is malformed, e.g.
* Using functools.wraps on a function with different signature
* Using functools.partial with an invalid tf.function input
* tf.function now enforces input parameter names to be valid Python
identifiers. Incompatible names are automatically sanitized similarly to
existing SavedModel signature behavior.
* Parameterless tf.functions are assumed to have an empty input_signature
instead of an undefined one even if the input_signature is unspecified.
* tf.types.experimental.TraceType now requires an additional
`placeholder_value` method to be defined.
* tf.function now traces with placeholder values generated by TraceType
instead of the value itself.
* `tf.function` now uses the Python inspect library directly for parsing the signature of the Python function it is decorated on. This change may break code where the function signature is malformed, but was ignored previously, such as:
* Using `functools.wraps` on a function with different signature
* Using `functools.partial` with an invalid `tf.function` input
* `tf.function` now enforces input parameter names to be valid Python identifiers. Incompatible names are automatically sanitized similarly to existing SavedModel signature behavior.
* Parameterless `tf.function`s are assumed to have an empty `input_signature` instead of an undefined one even if the `input_signature` is unspecified.
* `tf.types.experimental.TraceType` now requires an additional `placeholder_value` method to be defined.
* `tf.function` now traces with placeholder values generated by TraceType instead of the value itself.
* `tf.config.experimental.enable_mlir_graph_optimization`:
* Experimental APIs `tf.config.experimental.enable_mlir_graph_optimization` and `tf.config.experimental.disable_mlir_graph_optimization` were removed.
* Experimental API removed.
### Major Features and Improvements
* `tf.config.experimental.disable_mlir_graph_optimization`:
* Experimental API removed.
* `tf.keras`
* Moved all saving-related utilities to a new namespace, `keras.saving`,
i.e. `keras.saving.load_model`, `keras.saving.save_model`,
`keras.saving.custom_object_scope`, `keras.saving.get_custom_objects`,
`keras.saving.register_keras_serializable`,
`keras.saving.get_registered_name` and
`keras.saving.get_registered_object`.
The previous API locations (in `keras.utils` and `keras.models`) will
stay available indefinitely, but we recommend that you update your code
to point to the new API locations.
* Improvements and fixes in Keras loss masking:
* Whether you represent a ragged tensor as a `tf.RaggedTensor` or using
[keras masking](https://www.tensorflow.org/guide/keras/masking_and_padding),
the returned loss values should be the identical to each other.
In previous versions Keras may have silently ignored the mask.
* If you use masked losses with Keras the loss values may be different
in TensorFlow `2.12` compared to previous versions.
* In cases where the mask was previously ignored, you will now get
an error if you pass a mask with an incompatible shape.
* `tf.SavedModel`
* Introduce new class `tf.saved_model.experimental.Fingerprint` that
contains the fingerprint of the SavedModel. See the
[SavedModel Fingerprinting RFC](https://github.com/tensorflow/community/pull/415)
for details.
* Introduce API `tf.saved_model.experimental.read_fingerprint(export_dir)`
for reading the fingerprint of a SavedModel.
# Known Caveats
* <CAVEATS REGARDING THE RELEASE (BUT NOT BREAKING CHANGES).>
* <ADDING/BUMPING DEPENDENCIES SHOULD GO HERE>
* <KNOWN LACK OF SUPPORT ON SOME PLATFORM, SHOULD GO HERE>
# Major Features and Improvements
* Support for Python 3.11 has been added.
* Support for Python 3.7 has been removed. We are not releasing any more patches for Python 3.7.
* `tf.lite`:
* Add 16-bit float type support for built-in op `fill`.
* Transpose now supports 6D tensors.
* Float LSTM now supports diagonal recurrent tensors:
https://arxiv.org/abs/1903.08023
* `tf.keras`:
* The new Keras model saving format (`.keras`) is available. You can start
using it via `model.save(f"{fname}.keras", save_format="keras_v3")`. In
the future it will become the default for all files with the `.keras`
extension. This file format targets the Python runtime only and makes
it possible to reload Python objects identical to the saved originals.
The format supports non-numerical state such as vocabulary files and
lookup tables, and it is easy to customize in the case of custom layers
with exotic elements of state (e.g. a FIFOQueue). The format
does not rely on bytecode or pickling, and is safe by default. Note
that as a result, Python `lambdas` are disallowed at loading time. If
you want to use `lambdas`, you can pass `safe_mode=False` to the loading
method (only do this if you trust the source of the model).
* Added a `model.export(filepath)` API to create a lightweight SavedModel
artifact that can be used for inference (e.g. with TF-Serving).
* Added `keras.export.ExportArchive` class for low-level customization of
the process of exporting SavedModel artifacts for inference.
Both ways of exporting models are based on `tf.function` tracing
and produce a TF program composed of TF ops. They are meant primarily
for environments where the TF runtime is available,
but not the Python interpreter, as is typical
for production with TF Serving.
* Added utility `tf.keras.utils.FeatureSpace`, a one-stop shop for
structured data preprocessing and encoding.
* Added `tf.SparseTensor` input support to `tf.keras.layers.Embedding`
layer. The layer now accepts a new boolean argument `sparse`. If
`sparse` is set to True, the layer returns a SparseTensor instead of a
dense Tensor. Defaults to False.
* Added `jit_compile` as a settable property to `tf.keras.Model`.
* Added `synchronized` optional parameter to `layers.BatchNormalization`.
* Added deprecation warning to
`layers.experimental.SyncBatchNormalization` and suggested to use
`layers.BatchNormalization` with `synchronized=True` instead.
* Updated `tf.keras.layers.BatchNormalization` to support masking of the
inputs (`mask` argument) when computing the mean and variance.
* Add `tf.keras.layers.Identity`, a placeholder pass-through layer.
* Add `show_trainable` option to `tf.keras.utils.model_to_dot` to display
layer trainable status in model plots.
* Add ability to save a `tf.keras.utils.FeatureSpace` object, via
`feature_space.save("myfeaturespace.keras")`, and reload it via
`feature_space = tf.keras.models.load_model("myfeaturespace.keras")`.
* Added utility `tf.keras.utils.to_ordinal` to convert class vector to
ordinal regression / classification matrix.
* Float LSTM now supports diagonal recurrent tensors: https://arxiv.org/abs/1903.08023
* `tf.experimental.dtensor`:
* Coordination service now works with
`dtensor.initialize_accelerator_system`, and enabled by default.
* Add `tf.experimental.dtensor.is_dtensor` to check if a tensor is a
DTensor instance.
* Coordination service now works with `dtensor.initialize_accelerator_system`, and enabled by default.
* Add `tf.experimental.dtensor.is_dtensor` to check if a tensor is a DTensor instance.
* `tf.data`:
* Added support for alternative checkpointing protocol which makes it
possible to checkpoint the state of the input pipeline without having to
store the contents of internal buffers. The new functionality can be
enabled through the `experimental_symbolic_checkpoint` option of
`tf.data.Options()`.
* Added a new `rerandomize_each_iteration` argument for the
`tf.data.Dataset.random()` operation, which controls whether the
sequence of generated random numbers should be re-randomized every epoch
or not (the default behavior). If `seed` is set and
`rerandomize_each_iteration=True`, the `random()` operation will produce
a different (deterministic) sequence of numbers every epoch.
* Added a new `rerandomize_each_iteration` argument for the
`tf.data.Dataset.sample_from_datasets()` operation, which controls
whether the sequence of generated random numbers used for sampling
should be re-randomized every epoch or not. If `seed` is set and
`rerandomize_each_iteration=True`, the `sample_from_datasets()`
operation will use a different (deterministic) sequence of numbers every
epoch.
* Added a new field, `warm_start`, to
`tf.data.experimental.OptimizationOptions`. If it is set to `True`,
tf.data will start background threads of asynchronous
transformations upon iterator creation (as opposed to upon first call
to `GetNext`). To enable this behavior, set `warm_start=True` in
`tf.data.experimental.OptimizationOptions`. It should be noted that this
possibly improves the latency of the initial 'GetNext' call at the
expense of requiring more memory to hold prefetched elements between
the time of iterator construction and usage.
* Added support for alternative checkpointing protocol which makes it possible to checkpoint the state of the input pipeline without having to store the contents of internal buffers. The new functionality can be enabled through the `experimental_symbolic_checkpoint` option of `tf.data.Options()`.
* Added a new `rerandomize_each_iteration` argument for the `tf.data.Dataset.random()` operation, which controls whether the sequence of generated random numbers should be re-randomized every epoch or not (the default behavior). If `seed` is set and `rerandomize_each_iteration=True`, the `random()` operation will produce a different (deterministic) sequence of numbers every epoch.
* Added a new `rerandomize_each_iteration` argument for the `tf.data.Dataset.sample_from_datasets()` operation, which controls whether the sequence of generated random numbers used for sampling should be re-randomized every epoch or not. If `seed` is set and `rerandomize_each_iteration=True`, the `sample_from_datasets()` operation will use a different (deterministic) sequence of numbers every epoch.
* `tf.test`:
* Added `tf.test.experimental.sync_devices`, which is useful for
accurately measuring performance in benchmarks.
* Added `tf.test.experimental.sync_devices`, which is useful for accurately measuring performance in benchmarks.
* `tf.experimental.dtensor`:
* Added experimental support to ReduceScatter fuse on GPU (NCCL).
# Bug Fixes and Other Changes
* <SIMILAR TO ABOVE SECTION, BUT FOR OTHER IMPORTANT CHANGES / BUG FIXES>
* <IF A CHANGE CLOSES A GITHUB ISSUE, IT SHOULD BE DOCUMENTED HERE>
* <NOTES SHOULD BE GROUPED PER AREA>
### Bug Fixes and Other Changes
* `tf.SavedModel`:
* Introduced new class `tf.saved_model.experimental.Fingerprint` that contains the fingerprint of the SavedModel. See the [SavedModel Fingerprinting RFC](https://github.com/tensorflow/community/pull/415) for details.
* Introduced API `tf.saved_model.experimental.read_fingerprint(export_dir)` for reading the fingerprint of a SavedModel.
* `tf.random`
* Added non-experimental aliases for `tf.random.split` and
`tf.random.fold_in`, the experimental endpoints are still available
so no code changes are necessary.
* Added non-experimental aliases for `tf.random.split` and `tf.random.fold_in`, the experimental endpoints are still available so no code changes are necessary.
* `tf.experimental.ExtensionType`
* Added function `experimental.extension_type.as_dict()`, which converts an
instance of `tf.experimental.ExtensionType` to a `dict` representation.
* Added function `experimental.extension_type.as_dict()`, which converts an instance of `tf.experimental.ExtensionType` to a `dict` representation.
* `stream_executor`
* Top level `stream_executor` directory has been deleted, users should use
equivalent headers and targets under `compiler/xla/stream_executor`.
* Top level `stream_executor` directory has been deleted, users should use equivalent headers and targets under `compiler/xla/stream_executor`.
* `tf.nn`
* Added `tf.nn.experimental.general_dropout`, which is similar to
`tf.random.experimental.stateless_dropout` but accepts a custom sampler
function.
* Added `tf.nn.experimental.general_dropout`, which is similar to `tf.random.experimental.stateless_dropout` but accepts a custom sampler function.
* `tf.types.experimental.GenericFunction`
* The `experimental_get_compiler_ir` method supports tf.TensorSpec
compilation arguments.
* The `experimental_get_compiler_ir` method supports tf.TensorSpec compilation arguments.
* `tf.config.experimental.mlir_bridge_rollout`
* Removed enums `MLIR_BRIDGE_ROLLOUT_SAFE_MODE_ENABLED` and
`MLIR_BRIDGE_ROLLOUT_SAFE_MODE_FALLBACK_ENABLED` which are no longer used by
the tf2xla bridge
* Removed enums `MLIR_BRIDGE_ROLLOUT_SAFE_MODE_ENABLED` and `MLIR_BRIDGE_ROLLOUT_SAFE_MODE_FALLBACK_ENABLED` which are no longer used by the tf2xla bridge
## Keras
Keras is a framework built on top of the TensorFlow. See more details on the Keras [website](https://keras.io/).
### Breaking Changes
# Thanks to our Contributors
`tf.keras`:
* Moved all saving-related utilities to a new namespace, `keras.saving`, for example: `keras.saving.load_model`, `keras.saving.save_model`, `keras.saving.custom_object_scope`, `keras.saving.get_custom_objects`, `keras.saving.register_keras_serializable`,`keras.saving.get_registered_name` and `keras.saving.get_registered_object`. The previous API locations (in `keras.utils` and `keras.models`) will be available indefinitely, but we recommend you update your code to point to the new API locations.
* Improvements and fixes in Keras loss masking:
* Whether you represent a ragged tensor as a `tf.RaggedTensor` or using [keras masking](https://www.tensorflow.org/guide/keras/masking_and_padding), the returned loss values should be the identical to each other. In previous versions Keras may have silently ignored the mask.
* If you use masked losses with Keras the loss values may be different in TensorFlow `2.12` compared to previous versions.
* In cases where the mask was previously ignored, you will now get an error if you pass a mask with an incompatible shape.
### Major Features and Improvements
`tf.keras`:
* The new Keras model saving format (`.keras`) is available. You can start using it via `model.save(f"{fname}.keras", save_format="keras_v3")`. In the future it will become the default for all files with the `.keras` extension. This file format targets the Python runtime only and makes it possible to reload Python objects identical to the saved originals. The format supports non-numerical state such as vocabulary files and lookup tables, and it is easy to customize in the case of custom layers with exotic elements of state (e.g. a FIFOQueue). The format does not rely on bytecode or pickling, and is safe by default. Note that as a result, Python `lambdas` are disallowed at loading time. If you want to use `lambdas`, you can pass `safe_mode=False` to the loading method (only do this if you trust the source of the model).
* Added a `model.export(filepath)` API to create a lightweight SavedModel artifact that can be used for inference (e.g. with TF-Serving).
* Added `keras.export.ExportArchive` class for low-level customization of the process of exporting SavedModel artifacts for inference. Both ways of exporting models are based on `tf.function` tracing and produce a TF program composed of TF ops. They are meant primarily for environments where the TF runtime is available, but not the Python interpreter, as is typical for production with TF Serving.
* Added utility `tf.keras.utils.FeatureSpace`, a one-stop shop for structured data preprocessing and encoding.
* Added `tf.SparseTensor` input support to `tf.keras.layers.Embedding` layer. The layer now accepts a new boolean argument `sparse`. If `sparse` is set to True, the layer returns a SparseTensor instead of a dense Tensor. Defaults to False.
* Added `jit_compile` as a settable property to `tf.keras.Model`.
* Added `synchronized` optional parameter to `layers.BatchNormalization`.
* Added deprecation warning to `layers.experimental.SyncBatchNormalization` and suggested to use `layers.BatchNormalization` with `synchronized=True` instead.
* Updated `tf.keras.layers.BatchNormalization` to support masking of the inputs (`mask` argument) when computing the mean and variance.
* Add `tf.keras.layers.Identity`, a placeholder pass-through layer.
* Add `show_trainable` option to `tf.keras.utils.model_to_dot` to display layer trainable status in model plots.
* Add ability to save a `tf.keras.utils.FeatureSpace` object, via `feature_space.save("myfeaturespace.keras")`, and reload it via `feature_space = tf.keras.models.load_model("myfeaturespace.keras")`.
* Added utility `tf.keras.utils.to_ordinal` to convert class vector to ordinal regression / classification matrix.
### Bug Fixes and Other Changes
* N/A
## Security
* Moving forward, TensorFlow will no longer update [TFSAs](https://github.com/tensorflow/tensorflow/tree/master/tensorflow/security). Please refer instead to our [GitHub security advisories](https://github.com/tensorflow/tensorflow/security/advisories), which are attached to [CVEs](https://cve.mitre.org/cve/).
* Fixes an FPE in TFLite in conv kernel [CVE-2023-27579](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2023-27579)
* Fixes a double free in Fractional(Max/Avg)Pool [CVE-2023-25801](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2023-25801)
* Fixes a null dereference on ParallelConcat with XLA [CVE-2023-25676](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2023-25676)
* Fixes a segfault in Bincount with XLA [CVE-2023-25675](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2023-25675)
* Fixes an NPE in RandomShuffle with XLA enable [CVE-2023-25674](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2023-25674)
* Fixes an FPE in TensorListSplit with XLA [CVE-2023-25673](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2023-25673)
* Fixes segmentation fault in tfg-translate [CVE-2023-25671](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2023-25671)
* Fixes an NPE in QuantizedMatMulWithBiasAndDequantize [CVE-2023-25670](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2023-25670)
* Fixes an FPE in AvgPoolGrad with XLA [CVE-2023-25669](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2023-25669)
* Fixes a heap out-of-buffer read vulnerability in the QuantizeAndDequantize operation [CVE-2023-25668](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2023-25668)
* Fixes a segfault when opening multiframe gif [CVE-2023-25667](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2023-25667)
* Fixes an NPE in SparseSparseMaximum [CVE-2023-25665](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2023-25665)
* Fixes an FPE in AudioSpectrogram [CVE-2023-25666](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2023-25666)
* Fixes a heap-buffer-overflow in AvgPoolGrad [CVE-2023-25664](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2023-25664)
* Fixes a NPE in TensorArrayConcatV2 [CVE-2023-25663](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2023-25663)
* Fixes a Integer overflow in EditDistance [CVE-2023-25662](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2023-25662)
* Fixes a Seg fault in `tf.raw_ops.Print` [CVE-2023-25660](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2023-25660)
* Fixes a OOB read in DynamicStitch [CVE-2023-25659](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2023-25659)
* Fixes a OOB Read in GRUBlockCellGrad [CVE-2023-25658](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2023-25658)
## Thanks to our Contributors
This release contains contributions from many people at Google, as well as:
<INSERT>, <NAME>, <HERE>, <USING>, <GITHUB>, <HANDLE>
103yiran, 8bitmp3, Aakar, Aakar Dwivedi, Abinash Satapathy, Aditya Kane, ag.ramesh, Alexander Grund, Andrei Pikas, andreii, Andrew Goodbody, angerson, Anthony_256, Ashay Rane, Ashiq Imran, Awsaf, Balint Cristian, Banikumar Maiti (Intel Aipg), Ben Barsdell, bhack, cfRod, Chao Chen, chenchongsong, Chris Mc, Daniil Kutz, David Rubinstein, dianjiaogit, dixr, Dongfeng Yu, dongfengy, drah, Eric Kunze, Feiyue Chen, Frederic Bastien, Gauri1 Deshpande, guozhong.zhuang, hDn248, HYChou, ingkarat, James Hilliard, Jason Furmanek, Jaya, Jens Glaser, Jerry Ge, Jiao Dian'S Power Plant, Jie Fu, Jinzhe Zeng, Jukyy, Kaixi Hou, Kanvi Khanna, Karel Ha, karllessard, Koan-Sin Tan, Konstantin Beluchenko, Kulin Seth, Kun Lu, Kyle Gerard Felker, Leopold Cambier, Lianmin Zheng, linlifan, liuyuanqiang, Lukas Geiger, Luke Hutton, Mahmoud Abuzaina, Manas Mohanty, Mateo Fidabel, Maxiwell S. Garcia, Mayank Raunak, mdfaijul, meatybobby, Meenakshi Venkataraman, Michael Holman, Nathan John Sircombe, Nathan Luehr, nitins17, Om Thakkar, Patrice Vignola, Pavani Majety, per1234, Philipp Hack, pollfly, Prianka Liz Kariat, Rahul Batra, rahulbatra85, ratnam.parikh, Rickard Hallerbäck, Roger Iyengar, Rohit Santhanam, Roman Baranchuk, Sachin Muradi, sanadani, Saoirse Stewart, seanshpark, Shawn Wang, shuw, Srinivasan Narayanamoorthy, Stewart Miles, Sunita Nadampalli, SuryanarayanaY, Takahashi Shuuji, Tatwai Chong, Thibaut Goetghebuer-Planchon, tilakrayal, Tirumalesh, TJ, Tony Sung, Trevor Morris, unda, Vertexwahn, Vinila S, William Muir, Xavier Bonaventura, xiang.zhang, Xiao-Yong Jin, yleeeee, Yong Tang, Yuriy Chernyshov, Zhang, Xiangze, zhaozheng09
# Release 2.11.1
**Note**: TensorFlow 2.10 was the last TensorFlow release that supported GPU on native-Windows. Starting with TensorFlow 2.11, you will need to install TensorFlow in WSL2, or install tensorflow-cpu and, optionally, try the TensorFlow-DirectML-Plugin.
* Security vulnerability fixes will no longer be patched to this Tensorflow version. The latest Tensorflow version includes the security vulnerability fixes. You can update to the latest version (recommended) or patch security vulnerabilities yourself [steps](https://github.com/tensorflow/tensorflow#patching-guidelines). You can refer to the [release notes](https://github.com/tensorflow/tensorflow/releases) of the latest Tensorflow version for a list of newly fixed vulnerabilities. If you have any questions, please create a GitHub issue to let us know.
This release also introduces several vulnerability fixes:
* Fixes an FPE in TFLite in conv kernel [CVE-2023-27579](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2023-27579)
* Fixes a double free in Fractional(Max/Avg)Pool [CVE-2023-25801](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2023-25801)
* Fixes a null dereference on ParallelConcat with XLA [CVE-2023-25676](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2023-25676)
* Fixes a segfault in Bincount with XLA [CVE-2023-25675](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2023-25675)
* Fixes an NPE in RandomShuffle with XLA enable [CVE-2023-25674](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2023-25674)
* Fixes an FPE in TensorListSplit with XLA [CVE-2023-25673](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2023-25673)
* Fixes segmentation fault in tfg-translate [CVE-2023-25671](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2023-25671)
* Fixes an NPE in QuantizedMatMulWithBiasAndDequantize [CVE-2023-25670](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2023-25670)
* Fixes an FPE in AvgPoolGrad with XLA [CVE-2023-25669](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2023-25669)
* Fixes a heap out-of-buffer read vulnerability in the QuantizeAndDequantize operation [CVE-2023-25668](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2023-25668)
* Fixes a segfault when opening multiframe gif [CVE-2023-25667](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2023-25667)
* Fixes an NPE in SparseSparseMaximum [CVE-2023-25665](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2023-25665)
* Fixes an FPE in AudioSpectrogram [CVE-2023-25666](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2023-25666)
* Fixes a heap-buffer-overflow in AvgPoolGrad [CVE-2023-25664](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2023-25664)
* Fixes a NPE in TensorArrayConcatV2 [CVE-2023-25663](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2023-25663)
* Fixes a Integer overflow in EditDistance [CVE-2023-25662](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2023-25662)
* Fixes a Seg fault in `tf.raw_ops.Print` [CVE-2023-25660](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2023-25660)
* Fixes a OOB read in DynamicStitch [CVE-2023-25659](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2023-25659)
* Fixes a OOB Read in GRUBlockCellGrad [CVE-2023-25658](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2023-25658)
# Release 2.11.0

View File

@ -279,9 +279,9 @@ For each vulnerability, we try to ingress it as soon as possible, given the size
of the team and the number of reports. Vulnerabilities will, in general, be
batched to be fixed at the same time as a quarterly release.
Past security advisories are listed
Security advisories from 2018 to March 2023 are listed
[here](https://github.com/tensorflow/tensorflow/blob/master/tensorflow/security/README.md).
In the future, we might sunset this list and only use GitHub's Security Advisory
format, to simplify the post-vulnerability-fix process. We credit reporters for
identifying security issues, although we keep your name confidential if you
request it.
From TF 2.13 onwards, we have sunset this list and only use GitHub's Security
Advisory format, to simplify the post-vulnerability-fix process. In both
locations, we credit reporters for identifying security issues, although we keep
your name confidential if you request it.

View File

@ -32,6 +32,10 @@ load(
"//third_party/mkl:build_defs.bzl",
"if_mkl_ml",
)
load(
"//third_party/mkl_dnn:build_defs.bzl",
"if_onednn_v3",
)
load("@bazel_skylib//:bzl_library.bzl", "bzl_library")
load(
"//tensorflow:tensorflow.default.bzl",
@ -124,7 +128,7 @@ PACKAGE_STATIC_DEPS = [
"@flatbuffers//:__subpackages__",
"@nccl_archive//:__subpackages__",
"@triton//:__subpackages__",
] + tsl_async_value_deps()
] + tsl_async_value_deps() + if_onednn_v3(["@onednn_v3//:__subpackages__"])
package(
# copybara:uncomment default_applicable_licenses = [":license"],
@ -1025,8 +1029,10 @@ package_group(
"//third_party/cloud_tpu/inference_converter/...",
"//third_party/py/cloud_ml_autoflow/...",
"//third_party/py/envlogger/...",
"//third_party/py/gldm/...",
"//third_party/py/keras/...",
"//third_party/yggdrasil_decision_forests/...",
"//waymo/ml/cn/...",
],
)
@ -1144,6 +1150,9 @@ tf_cc_shared_library(
],
"//conditions:default": [
"-Wl,--version-script,$(location //tensorflow:tf_framework_version_script.lds)",
# copybara:uncomment_begin(google-only)
# "-Wl,--undefined-version",
# copybara:uncomment_end(google-only)
],
}),
linkstatic = 1,
@ -1350,6 +1359,7 @@ tf_cc_shared_library(
"//tensorflow/core/data/service:server_lib",
"//tensorflow/core/debug",
"//tensorflow/core/distributed_runtime:server_lib",
"//tensorflow/core/framework:full_type_util",
"//tensorflow/core/function/runtime_client:runtime_client_cc",
"//tensorflow/core/grappler/clusters:cluster",
"//tensorflow/core/grappler/clusters:single_machine",

View File

@ -22,6 +22,29 @@ package(
licenses = ["notice"],
)
filegroup(
name = "safe_ptr_hdr",
srcs = ["safe_ptr.h"],
visibility = [
"//tensorflow:internal",
],
)
cc_library(
name = "safe_ptr",
srcs = [
"safe_ptr.cc",
"//tensorflow/c/eager:headers",
],
hdrs = ["safe_ptr.h"],
visibility = [
"//tensorflow:internal",
],
deps = [
":c_api_internal",
],
)
# -----------------------------------------------------------------------------
# Public targets
@ -62,10 +85,10 @@ filegroup(
"*test*",
],
) + [
"//tensorflow/tsl/c:srcs",
"//tensorflow/tsl/platform:ctstring",
"//tensorflow/cc:srcs_no_runtime",
"//tensorflow/core/distributed_runtime:server_lib.h",
"//tensorflow/tsl/c:srcs",
"//tensorflow/tsl/platform:ctstring",
],
visibility = ["//visibility:public"],
)
@ -94,14 +117,17 @@ cc_library(
name = "c_api_headers",
hdrs = [
"c_api.h",
"c_api_macros.h",
],
visibility = ["//visibility:public"],
deps = [
":c_api_macros_hdrs",
":tf_attrtype",
":tf_buffer",
":tf_datatype",
":tf_buffer_hdrs",
":tf_datatype_hdrs",
":tf_status_headers",
":tf_tensor_hdrs",
# TODO: Only include tf_tstring_hdrs. Don't expose the implementation of TF_TString to API
# users.
":tf_tstring",
],
)
@ -165,6 +191,14 @@ cc_library(
visibility = ["//visibility:public"],
)
cc_library(
name = "c_api_macros_hdrs",
hdrs = [
"c_api_macros.h",
],
visibility = ["//visibility:public"],
)
cc_library(
name = "c_api_macros",
hdrs = [
@ -195,8 +229,9 @@ tf_cuda_library(
copts = tf_copts(),
visibility = ["//visibility:public"],
deps = [
":c_api_no_xla",
":c_api_internal",
":c_api_macros_hdrs",
":c_api_no_xla",
":tf_attrtype",
":tf_buffer",
":tf_file_statistics",
@ -207,8 +242,8 @@ tf_cuda_library(
"//tensorflow/tsl/c:tsl_status",
] + select({
"//tensorflow:with_xla_support": [
"//tensorflow/compiler/tf2xla:xla_compiler",
"//tensorflow/compiler/jit",
"//tensorflow/compiler/tf2xla:xla_compiler",
],
"//conditions:default": [],
}) + if_tensorrt([
@ -240,9 +275,9 @@ tf_cuda_library(
deps = [
":c_api_internal",
":tf_attrtype",
":tf_datatype",
":tf_buffer",
":tf_buffer_internal",
":tf_datatype",
":tf_status_internal",
] + select({
"//tensorflow:android": [
@ -253,25 +288,25 @@ tf_cuda_library(
":logging",
":tf_status",
":tf_tensor",
"@com_google_absl//absl/strings",
"//tensorflow/c/experimental/filesystem:modular_filesystem",
"//tensorflow/cc/saved_model:loader_lite",
"//tensorflow/cc:grad_ops",
"//tensorflow/cc:gradients",
"//tensorflow/cc:ops",
"//tensorflow/cc:grad_ops",
"//tensorflow/cc:scope_internal",
"//tensorflow/cc:while_loop",
"//tensorflow/cc/saved_model:loader_lite",
"//tensorflow/compiler/mlir/tfr:graph_decompose_pass",
"//tensorflow/compiler/mlir/tfr:node_expansion_pass",
"//tensorflow/core:core_cpu",
"//tensorflow/core:core_cpu_internal",
"//tensorflow/core:framework",
"//tensorflow/core:op_gen_lib",
"//tensorflow/core:protos_all_cc",
"//tensorflow/core:lib",
"//tensorflow/core:lib_internal",
"//tensorflow/core:op_gen_lib",
"//tensorflow/core:protos_all_cc",
"//tensorflow/core/distributed_runtime:server_lib",
"//tensorflow/core/kernels:logging_ops",
"//tensorflow/compiler/mlir/tfr:node_expansion_pass",
"//tensorflow/compiler/mlir/tfr:graph_decompose_pass",
"@com_google_absl//absl/strings",
],
}),
alwayslink = 1,
@ -308,9 +343,10 @@ tf_cuda_library(
"//tensorflow/core/transforms:__subpackages__",
],
deps = [
"//tensorflow/tsl/platform:status",
":c_api_macros_hdrs",
"//tensorflow/tsl/c:tsl_status",
"//tensorflow/tsl/c:tsl_status_internal",
"//tensorflow/tsl/platform:status",
] + select({
"//tensorflow:android": [
"//tensorflow/core:portable_tensorflow_lib_lite", # TODO(annarev): exclude runtime srcs
@ -363,6 +399,7 @@ cc_library(
copts = tf_copts(),
visibility = ["//visibility:public"],
deps = [
":c_api_macros_hdrs",
":tf_status_internal",
"//tensorflow/tsl/c:tsl_status",
] + select({
@ -380,7 +417,8 @@ cc_library(
hdrs = ["tf_status.h"],
visibility = ["//visibility:public"],
deps = [
"//tensorflow/tsl/c:tsl_status",
":c_api_macros_hdrs",
"//tensorflow/tsl/c:tsl_status_headers",
],
)
@ -390,15 +428,15 @@ cc_library(
"tf_tstring.cc",
],
hdrs = [
"c_api_macros.h",
"tf_datatype.h",
"tf_status.h",
"tf_tensor.h",
"tf_tstring.h",
],
copts = tf_copts(),
visibility = ["//visibility:public"],
deps = [
":c_api_macros_hdrs",
":tf_datatype_hdrs",
":tf_status_headers",
":tf_tensor_hdrs",
"//tensorflow/core/platform:status",
"//tensorflow/core/platform:tstring",
"//tensorflow/tsl/c:tsl_status",
@ -426,13 +464,23 @@ cc_library(
}),
)
cc_library(
name = "tf_datatype_hdrs",
hdrs = ["tf_datatype.h"],
deps = [
":c_api_macros_hdrs",
],
)
cc_library(
name = "tf_datatype",
srcs = ["tf_datatype.cc"],
hdrs = ["tf_datatype.h"],
copts = tf_copts(),
visibility = ["//visibility:public"],
deps = select({
deps = [
":c_api_macros_hdrs",
] + select({
"//tensorflow:android": [
"//tensorflow/core:portable_tensorflow_lib_lite", # TODO(annarev): exclude runtime srcs
],
@ -443,6 +491,17 @@ cc_library(
alwayslink = 1,
)
cc_library(
name = "tf_tensor_hdrs",
hdrs = ["tf_tensor.h"],
visibility = ["//visibility:public"],
deps = [
":c_api_macros_hdrs",
":tf_datatype_hdrs",
":tf_status_headers",
],
)
cc_library(
name = "tf_tensor",
srcs = ["tf_tensor.cc"],
@ -493,6 +552,16 @@ tf_cuda_library(
}),
)
cc_library(
name = "tf_buffer_hdrs",
hdrs = [
"tf_buffer.h",
],
deps = [
":c_api_macros_hdrs",
],
)
cc_library(
name = "tf_buffer",
srcs = [
@ -504,6 +573,7 @@ cc_library(
copts = tf_copts(),
visibility = ["//visibility:public"],
deps = [
":c_api_macros_hdrs",
":tf_buffer_internal",
":tf_status",
":tf_tensor_internal",
@ -525,6 +595,7 @@ tf_cuda_library(
"//tensorflow/c:__subpackages__",
],
deps = [
":c_api_macros_hdrs",
":tf_status",
":tf_tensor_internal",
"//tensorflow/core/platform:protobuf",
@ -545,6 +616,7 @@ tf_cuda_library(
deps = [
":c_api",
":c_api_internal",
":c_api_macros_hdrs",
":checkpoint_reader",
":tf_buffer",
":tf_buffer_internal",
@ -635,9 +707,9 @@ tf_cuda_library(
],
}) + [
":c_api_macros",
":tf_file_statistics",
":tf_status",
":tf_status_helper",
":tf_file_statistics",
"//tensorflow/core/platform:env",
"//tensorflow/core/platform:path",
"//tensorflow/core/platform:types",
@ -652,10 +724,11 @@ cc_library(
],
visibility = ["//tensorflow:internal"],
deps = [
":c_api_internal",
":tf_datatype",
":tf_status",
":tf_tensor",
":c_api_headers",
":c_api_macros_hdrs",
":tf_datatype_hdrs",
":tf_status_headers",
":tf_tensor_hdrs",
"//tensorflow/c/experimental/stream_executor:stream_executor_hdrs",
],
)
@ -671,6 +744,7 @@ tf_cuda_library(
copts = tf_copts(),
visibility = ["//visibility:public"],
deps = [
":c_api_macros_hdrs",
":tf_buffer",
":tf_buffer_internal",
":tf_status",
@ -685,12 +759,14 @@ tf_cuda_library(
"//conditions:default": [
":c_api_internal",
":tf_tensor",
"//tensorflow/compiler/xla/stream_executor:stream_executor",
"//tensorflow/c/experimental/stream_executor",
"//tensorflow/c/experimental/stream_executor:stream_executor_internal",
"//tensorflow/compiler/xla/stream_executor",
"//tensorflow/core:framework",
"//tensorflow/core:framework_lite",
"//tensorflow/core:protos_all_cc",
"//tensorflow/c/experimental/stream_executor:stream_executor",
"//tensorflow/c/experimental/stream_executor:stream_executor_internal",
"//tensorflow/tsl/framework:device_id_utils",
"//tensorflow/tsl/platform:statusor",
],
}),
)
@ -699,7 +775,10 @@ cc_library(
name = "kernels_experimental_hdrs",
hdrs = ["kernels_experimental.h"],
visibility = ["//tensorflow:internal"],
deps = [":kernels_hdrs"],
deps = [
":c_api_macros_hdrs",
":kernels_hdrs",
],
)
tf_cuda_library(
@ -709,6 +788,7 @@ tf_cuda_library(
copts = tf_copts(),
visibility = ["//visibility:public"],
deps = [
":c_api_macros_hdrs",
":kernels",
":tf_status_helper",
":tf_status_internal",
@ -739,6 +819,7 @@ tf_cuda_library(
copts = tf_copts(),
visibility = ["//visibility:public"],
deps = [
":c_api_macros_hdrs",
":tf_datatype",
":tf_status",
":tf_status_helper",
@ -758,6 +839,7 @@ cc_library(
hdrs = ["ops.h"],
visibility = ["//tensorflow:internal"],
deps = [
":c_api_macros_hdrs",
":tf_datatype",
":tf_status",
],

View File

@ -19,6 +19,7 @@ limitations under the License.
#include <stddef.h>
#include <stdint.h>
#include "tensorflow/c/c_api_macros.h"
#include "tensorflow/c/tf_attrtype.h"
#include "tensorflow/c/tf_buffer.h"
#include "tensorflow/c/tf_datatype.h"
@ -72,25 +73,6 @@ limitations under the License.
// and the API just provides high level controls over the number of
// devices of each type.
// Macro to control visibility of exported symbols in the shared library (.so,
// .dylib, .dll).
// This duplicates the TF_EXPORT macro definition in
// tensorflow/core/platform/macros.h in order to keep this .h file independent
// of any other includes.
#ifdef SWIG
#define TF_CAPI_EXPORT
#else
#if defined(_WIN32)
#ifdef TF_COMPILE_LIBRARY
#define TF_CAPI_EXPORT __declspec(dllexport)
#else
#define TF_CAPI_EXPORT __declspec(dllimport)
#endif // TF_COMPILE_LIBRARY
#else
#define TF_CAPI_EXPORT __attribute__((visibility("default")))
#endif // _WIN32
#endif // SWIG
#ifdef __cplusplus
extern "C" {
#endif

View File

@ -190,7 +190,7 @@ const char* TF_GraphDebugString(TF_Graph* graph, size_t* len) {
}
char* TF_FunctionDebugString(TF_Function* func, size_t* len) {
const auto& debug_str = DebugString(func->fdef);
const auto& debug_str = DebugString(func->record->fdef());
*len = debug_str.size();
char* ret = static_cast<char*>(malloc(*len + 1));
memcpy(ret, debug_str.c_str(), *len + 1);

View File

@ -20,6 +20,7 @@ limitations under the License.
#include <stdint.h>
#include "tensorflow/c/c_api.h"
#include "tensorflow/c/c_api_macros.h"
#include "tensorflow/c/eager/c_api.h"
// --------------------------------------------------------------------------
@ -28,25 +29,6 @@ limitations under the License.
// The API here is subject to changes in the future.
// --------------------------------------------------------------------------
// Macro to control visibility of exported symbols in the shared library (.so,
// .dylib, .dll).
// This duplicates the TF_EXPORT macro definition in
// tensorflow/core/platform/macros.h in order to keep this .h file independent
// of any other includes.$a
#ifdef SWIG
#define TF_CAPI_EXPORT
#else
#if defined(_WIN32)
#ifdef TF_COMPILE_LIBRARY
#define TF_CAPI_EXPORT __declspec(dllexport)
#else
#define TF_CAPI_EXPORT __declspec(dllimport)
#endif // TF_COMPILE_LIBRARY
#else
#define TF_CAPI_EXPORT __attribute__((visibility("default")))
#endif // _WIN32
#endif // SWIG
#ifdef __cplusplus
extern "C" {
#endif

View File

@ -16,11 +16,13 @@ limitations under the License.
#include <algorithm>
#include <unordered_map>
#include <unordered_set>
#include <utility>
#include "absl/strings/match.h"
#include "tensorflow/c/c_api_internal.h"
#include "tensorflow/c/tf_buffer_internal.h"
#include "tensorflow/core/framework/attr_value_util.h"
#include "tensorflow/core/framework/function.h"
#include "tensorflow/core/framework/function.pb.h"
#include "tensorflow/core/framework/graph_to_functiondef.h"
#include "tensorflow/core/framework/node_def.pb.h"
@ -30,6 +32,7 @@ limitations under the License.
#include "tensorflow/core/graph/graph.h"
#include "tensorflow/core/platform/base64.h"
#include "tensorflow/core/platform/strcat.h"
#include "tensorflow/core/util/debug_data_dumper.h"
using tensorflow::errors::InvalidArgument;
@ -203,23 +206,31 @@ TF_Function* TF_GraphToFunctionWithControlOutputs(
}
// Do the actual function creation.
TF_Function* tf_function = new TF_Function();
DCHECK(append_hash_to_fn_name <= 1);
tensorflow::FunctionDef fdef;
status->status = tensorflow::GraphToFunctionDef(
fn_body->graph, fn_name, append_hash_to_fn_name != 0,
/*set_stateful_from_nodes=*/true,
/*copy_placeholder_attrs_from_nodes=*/true, body_nodes, input_tensors,
output_tensors, output_names_vec, control_output_nodes,
control_output_names_vec, description, &tf_function->fdef);
control_output_names_vec, description, &fdef);
if (TF_GetCode(status) != TF_OK) {
TF_DeleteFunction(tf_function);
return nullptr;
}
// Dump the op creation stacktraces for debugging purpose.
DEBUG_DATA_DUMPER()->DumpOpCreationStackTraces(
fn_name, kDebugGroupOpStacktrace, "initial", &fn_body->graph);
tensorflow::StackTracesMap stack_traces;
for (const Node* n : fn_body->graph.nodes()) {
tf_function->stack_traces[n->name()] = n->GetStackTrace();
stack_traces[n->name()] = n->GetStackTrace();
}
TF_Function* tf_function = new TF_Function();
tf_function->record = new tensorflow::FunctionRecord(
std::move(fdef), std::move(stack_traces), false);
return tf_function;
}
@ -238,7 +249,7 @@ TF_Function* TF_GraphToFunction(const TF_Graph* fn_body, const char* fn_name,
}
const char* TF_FunctionName(TF_Function* func) {
return func->fdef.signature().name().c_str();
return func->record->fdef().signature().name().c_str();
}
void TF_GraphCopyFunction(TF_Graph* g, const TF_Function* func,
@ -249,19 +260,20 @@ void TF_GraphCopyFunction(TF_Graph* g, const TF_Function* func,
return;
}
// TODO(iga): Add AddFunctionDef() and AddGradientDef() methods to graph
// to avoid the extra copy here.
tensorflow::FunctionDefLibrary fdef_lib;
*fdef_lib.add_function() = func->fdef;
if (grad) {
*fdef_lib.add_function() = grad->fdef;
tensorflow::GradientDef* gdef = fdef_lib.add_gradient();
gdef->set_function_name(func->fdef.signature().name());
gdef->set_gradient_func(grad->fdef.signature().name());
}
tensorflow::mutex_lock l(g->mu);
status->status = g->graph.AddFunctionLibrary(fdef_lib);
status->status = g->graph.AddFunctionDef(func->record->fdef(),
func->record->stack_traces());
if (TF_GetCode(status) != TF_OK) return;
if (!grad) return;
status->status = g->graph.AddFunctionDef(grad->record->fdef(),
grad->record->stack_traces());
if (TF_GetCode(status) != TF_OK) return;
tensorflow::GradientDef gdef;
gdef.set_function_name(func->record->fdef().signature().name());
gdef.set_gradient_func(grad->record->fdef().signature().name());
status->status = g->graph.AddGradientDef(std::move(gdef));
}
int TF_GraphNumFunctions(TF_Graph* g) {
@ -279,7 +291,7 @@ int TF_GraphGetFunctions(TF_Graph* g, TF_Function** funcs, int max_func,
const auto len = std::min(max_func, static_cast<int>(lib.function_size()));
for (int i = 0; i < len; ++i) {
TF_Function* func = new TF_Function();
func->fdef = lib.function(i);
func->record = new tensorflow::FunctionRecord(lib.function(i), {}, false);
funcs[i] = func;
}
status->status = ::tensorflow::OkStatus();
@ -288,18 +300,21 @@ int TF_GraphGetFunctions(TF_Graph* g, TF_Function** funcs, int max_func,
void TF_FunctionToFunctionDef(TF_Function* func, TF_Buffer* output_func_def,
TF_Status* status) {
status->status = MessageToBuffer(func->fdef, output_func_def);
status->status = MessageToBuffer(func->record->fdef(), output_func_def);
}
TF_Function* TF_FunctionImportFunctionDef(const void* proto, size_t proto_len,
TF_Status* status) {
TF_Function* func = new TF_Function();
if (!func->fdef.ParseFromArray(proto, proto_len)) {
tensorflow::FunctionDef fdef;
bool success = fdef.ParseFromArray(proto, proto_len);
if (!success) {
status->status = InvalidArgument(
"Invalid FunctionDef given to TF_FunctionImportFunctionDef");
TF_DeleteFunction(func);
return nullptr;
}
TF_Function* func = new TF_Function();
func->record = new tensorflow::FunctionRecord(std::move(fdef), {}, false);
status->status = ::tensorflow::OkStatus();
return func;
}
@ -314,21 +329,37 @@ void TF_FunctionSetAttrValueProto(TF_Function* func, const char* attr_name,
"TF_FunctionSetAttrValueProto");
return;
}
(*func->fdef.mutable_attr())[string(attr_name)] = attr_value;
auto fdef_or = func->record->mutable_fdef();
if (!fdef_or.ok()) {
status->status = fdef_or.status();
return;
}
(*(fdef_or.value()->mutable_attr()))[string(attr_name)] = attr_value;
status->status = ::tensorflow::OkStatus();
}
void TF_FunctionGetAttrValueProto(TF_Function* func, const char* attr_name,
TF_Buffer* output_attr_value,
TF_Status* status) {
const auto& it = func->fdef.attr().find(attr_name);
if (it == func->fdef.attr().end()) {
const auto& it = func->record->fdef().attr().find(attr_name);
if (it == func->record->fdef().attr().end()) {
status->status =
InvalidArgument("Function '", func->fdef.signature().name(),
InvalidArgument("Function '", func->record->fdef().signature().name(),
"' has no attr named '", attr_name, "'.");
return;
}
status->status = MessageToBuffer(it->second, output_attr_value);
}
void TF_DeleteFunction(TF_Function* func) { delete func; }
void TF_DeleteFunction(TF_Function* func) {
if (func == nullptr) {
return;
}
func->record->Unref();
func->record = nullptr;
delete func;
}

View File

@ -17,6 +17,7 @@ limitations under the License.
#include "tensorflow/c/c_api_internal.h"
#include "tensorflow/c/c_test_util.h"
#include "tensorflow/core/framework/common_shape_fns.h"
#include "tensorflow/core/framework/function.h"
#include "tensorflow/core/framework/function.pb.h"
#include "tensorflow/core/framework/op_def.pb.h"
#include "tensorflow/core/lib/hash/hash.h"
@ -1210,6 +1211,25 @@ TEST_F(CApiFunctionTest, OutputOpNotInBody) {
string(TF_Message(s_)));
}
class TestStackTrace : public AbstractStackTrace {
absl::Span<StackFrame const> ToFrames() const override { return frames_; }
StackFrame LastUserFrame() const override { return frames_.back(); }
std::vector<StackFrame> GetUserFrames(int limit) const override {
return frames_;
}
string ToString(const TracePrintingOptions& opts) const override {
auto frame = LastUserFrame();
return absl::StrCat(frame.file_name, ":", frame.line_number, ":",
frame.function_name);
}
std::vector<StackFrame> frames_{
StackFrame({"dummy_file_name", 10, "dummy_function_name"})};
};
void DefineFunction(const char* name, TF_Function** func,
const char* description = nullptr,
bool append_hash = false) {
@ -1221,6 +1241,9 @@ void DefineFunction(const char* name, TF_Function** func,
TF_Operation* feed = Placeholder(func_graph.get(), s.get());
TF_Operation* neg = Neg(feed, func_graph.get(), s.get());
feed->node.SetStackTrace(std::make_shared<TestStackTrace>());
neg->node.SetStackTrace(std::make_shared<TestStackTrace>());
TF_Output inputs[] = {{feed, 0}};
TF_Output outputs[] = {{neg, 0}};
*func = TF_GraphToFunction(func_graph.get(), name, append_hash, -1,
@ -1270,11 +1293,11 @@ TEST_F(CApiFunctionTest, GraphToFunctionDefWithPlaceholderAttr) {
ASSERT_NE(func_, nullptr);
// Verify that FunctionDef has 2 attributes, "v1" and "v2".
ASSERT_EQ(func_->fdef.signature().attr().size(), 2);
EXPECT_EQ(func_->fdef.signature().attr(0).name(), "v1");
EXPECT_EQ(func_->fdef.signature().attr(0).type(), "int");
EXPECT_EQ(func_->fdef.signature().attr(1).name(), "v2");
EXPECT_EQ(func_->fdef.signature().attr(1).type(), "int");
ASSERT_EQ(func_->record->fdef().signature().attr().size(), 2);
EXPECT_EQ(func_->record->fdef().signature().attr(0).name(), "v1");
EXPECT_EQ(func_->record->fdef().signature().attr(0).type(), "int");
EXPECT_EQ(func_->record->fdef().signature().attr(1).name(), "v2");
EXPECT_EQ(func_->record->fdef().signature().attr(1).type(), "int");
}
void NodeWithAttrHelper(TF_Graph* graph, TF_Status* s, const char* name,
@ -1308,14 +1331,65 @@ TEST_F(CApiFunctionTest, GraphToFunctionDefWithArgAttr) {
ASSERT_NE(func_, nullptr);
// Verify that FunctionDef ArgDef has attributes.
ASSERT_EQ(func_->fdef.arg_attr_size(), 1);
auto arg_attrs = func_->fdef.arg_attr().find(0);
ASSERT_NE(arg_attrs, func_->fdef.arg_attr().end());
ASSERT_EQ(func_->record->fdef().arg_attr_size(), 1);
auto arg_attrs = func_->record->fdef().arg_attr().find(0);
ASSERT_NE(arg_attrs, func_->record->fdef().arg_attr().end());
auto iter = arg_attrs->second.attr().find("_test_attr");
ASSERT_NE(iter, arg_attrs->second.attr().end());
EXPECT_EQ(iter->second.s(), "value");
}
TEST_F(CApiFunctionTest, TFGraphToFunctionWithStackTraces) {
DefineFunction(func_name_, &func_);
auto stack_traces = func_->record->stack_traces();
EXPECT_EQ(stack_traces.size(), 4);
EXPECT_EQ(stack_traces["neg"]->ToString({}),
"dummy_file_name:10:dummy_function_name");
EXPECT_EQ(stack_traces["feed"]->ToString({}),
"dummy_file_name:10:dummy_function_name");
}
TEST_F(CApiFunctionTest, TFGraphCopyFunctionWithStackTraces) {
// Define the function and its grad
DefineFunction(func_name_, &func_);
TF_Function* grad_func;
DefineFunction("MyGrad", &grad_func);
// Add func and its gradient to host graph
TF_GraphCopyFunction(host_graph_, func_, grad_func, s_);
ASSERT_EQ(TF_OK, TF_GetCode(s_)) << TF_Message(s_);
TF_DeleteFunction(grad_func);
const StackTracesMap* func_stack_traces;
const StackTracesMap* grad_stack_traces;
{
mutex_lock l(host_graph_->mu);
auto flib_def = host_graph_->graph.flib_def();
func_stack_traces = flib_def.GetStackTraces(func_name_);
grad_stack_traces = flib_def.GetStackTraces("MyGrad");
}
// Verify that stack traces of func is copied to graph function library.
ASSERT_NE(func_stack_traces, nullptr);
EXPECT_EQ(func_stack_traces->size(), 4);
EXPECT_EQ(func_stack_traces->at("neg")->ToString({}),
"dummy_file_name:10:dummy_function_name");
EXPECT_EQ(func_stack_traces->at("feed")->ToString({}),
"dummy_file_name:10:dummy_function_name");
// Verify that stack traces of grad_func is copied to graph function library.
ASSERT_NE(grad_stack_traces, nullptr);
EXPECT_EQ(grad_stack_traces->size(), 4);
EXPECT_EQ(grad_stack_traces->at("neg")->ToString({}),
"dummy_file_name:10:dummy_function_name");
EXPECT_EQ(grad_stack_traces->at("feed")->ToString({}),
"dummy_file_name:10:dummy_function_name");
}
TEST_F(CApiFunctionTest, SetGradientAndRun) {
// Define the function and its grad
DefineFunction(func_name_, &func_);

View File

@ -16,14 +16,14 @@ limitations under the License.
#ifndef TENSORFLOW_C_C_API_INTERNAL_H_
#define TENSORFLOW_C_C_API_INTERNAL_H_
#include "tensorflow/c/c_api.h"
#include <list>
#include <set>
#include <string>
#include <unordered_map>
#include <vector>
#include "tensorflow/c/c_api.h"
// clang-format off
// Required for IS_MOBILE_PLATFORM
#include "tensorflow/core/platform/platform.h"
@ -34,11 +34,12 @@ limitations under the License.
#if !defined(IS_MOBILE_PLATFORM) && !defined(IS_SLIM_BUILD)
#include "tensorflow/core/framework/op_gen_lib.h"
#endif // !defined(IS_MOBILE_PLATFORM) && !defined(IS_SLIM_BUILD)
#include "tensorflow/core/common_runtime/graph_constructor.h"
#include "tensorflow/core/common_runtime/shape_refiner.h"
#include "tensorflow/core/framework/function.h"
#include "tensorflow/core/framework/tensor.h"
#include "tensorflow/core/framework/tensor_shape.h"
#include "tensorflow/core/graph/graph.h"
#include "tensorflow/core/common_runtime/graph_constructor.h"
#include "tensorflow/core/graph/node_builder.h"
#include "tensorflow/core/platform/mutex.h"
#include "tensorflow/core/platform/status.h"
@ -159,8 +160,7 @@ struct TF_DeviceList {
};
struct TF_Function {
tensorflow::FunctionDef fdef;
tensorflow::StackTracesMap stack_traces;
tensorflow::FunctionRecord* record;
};
struct TF_ApiDefMap {

View File

@ -243,7 +243,7 @@ void TestEncodeDecode(int line, const std::vector<string>& data) {
src.flat<tstring>()(i) = data[i];
}
TF_Tensor* dst = TF_TensorFromTensor(src, &status);
ASSERT_TRUE(status.ok()) << status.error_message();
ASSERT_TRUE(status.ok()) << status.message();
// Convert back to a C++ Tensor and ensure we get expected output.
Tensor output;
@ -1435,7 +1435,7 @@ TEST(CAPI, SavedModel) {
ASSERT_TRUE(input_op != nullptr);
Status status;
csession.SetInputs({{input_op, TF_TensorFromTensor(input, &status)}});
ASSERT_TRUE(status.ok()) << status.error_message();
ASSERT_TRUE(status.ok()) << status.message();
const tensorflow::string output_op_name(
tensorflow::ParseTensorName(output_name).first);

View File

@ -42,7 +42,7 @@ CheckpointReader::CheckpointReader(const string& filename, TF_Status* status)
v2_reader_.reset(
new BundleReader(Env::Default(), filename /* prefix to a V2 ckpt */));
if (!v2_reader_->status().ok()) {
Set_TF_Status_from_Status(status, v2_reader_->status());
tsl::Set_TF_Status_from_Status(status, v2_reader_->status());
return;
}
auto result = BuildV2VarMaps();
@ -51,7 +51,7 @@ CheckpointReader::CheckpointReader(const string& filename, TF_Status* status)
} else {
reader_.reset(new TensorSliceReader(filename));
if (!reader_->status().ok()) {
Set_TF_Status_from_Status(status, reader_->status());
tsl::Set_TF_Status_from_Status(status, reader_->status());
return;
}
var_to_shape_map_.reset(
@ -102,7 +102,7 @@ void CheckpointReader::GetTensor(
}
}
if (!status.ok()) {
Set_TF_Status_from_Status(out_status, status);
tsl::Set_TF_Status_from_Status(out_status, status);
}
}

View File

@ -37,20 +37,17 @@ tf_cuda_library(
],
"//conditions:default": [
":immediate_execution_context",
":immediate_execution_distributed_manager",
":immediate_execution_operation",
":immediate_execution_tensor_handle",
":immediate_execution_distributed_manager",
":tfe_context_internal",
":tfe_cancellation_manager_internal",
":tfe_context_internal",
":tfe_executor_internal",
":tfe_monitoring_internal",
":tfe_op_attrs_internal",
":tfe_op_internal",
":tfe_tensor_debug_info_internal",
":tfe_tensorhandle_internal",
"@com_google_absl//absl/algorithm:container",
"@com_google_absl//absl/types:span",
"@com_google_absl//absl/types:variant",
"//tensorflow/c:c_api",
"//tensorflow/c:c_api_internal",
"//tensorflow/c:tf_buffer",
@ -58,6 +55,12 @@ tf_cuda_library(
"//tensorflow/c:tf_status_internal",
"//tensorflow/c:tf_tensor_internal",
"//tensorflow/core:core_cpu",
"//tensorflow/core:core_cpu_internal",
"//tensorflow/core:framework",
"//tensorflow/core:framework_internal",
"//tensorflow/core:lib",
"//tensorflow/core:lib_internal",
"//tensorflow/core:protos_all_cc",
"//tensorflow/core/common_runtime/eager:attr_builder",
"//tensorflow/core/common_runtime/eager:context",
"//tensorflow/core/common_runtime/eager:context_distributed_manager",
@ -65,34 +68,32 @@ tf_cuda_library(
"//tensorflow/core/common_runtime/eager:custom_device",
"//tensorflow/core/common_runtime/eager:eager_executor",
"//tensorflow/core/common_runtime/eager:execute",
"//tensorflow/core/common_runtime/eager:tensor_handle",
"//tensorflow/core/common_runtime/eager:placement_utils",
"//tensorflow/core:core_cpu_internal",
"//tensorflow/core:framework",
"//tensorflow/core:framework_internal",
"//tensorflow/core:lib",
"//tensorflow/core:lib_internal",
"//tensorflow/core:protos_all_cc",
"//tensorflow/core/common_runtime/eager:tensor_handle",
"//tensorflow/core/profiler/lib:traceme",
"@com_google_absl//absl/algorithm:container",
"@com_google_absl//absl/types:span",
"@com_google_absl//absl/types:variant",
],
}) + [
"@com_google_absl//absl/memory",
":abstract_tensor_handle",
"//tensorflow/c:c_api_macros_hdrs",
"//tensorflow/core:gpu_runtime",
"//tensorflow/core/common_runtime/eager:eager_operation",
"//tensorflow/core/distributed_runtime/eager:remote_mgr",
"//tensorflow/core/distributed_runtime:remote_device",
"//tensorflow/core/distributed_runtime:server_lib",
"//tensorflow/core/distributed_runtime:worker_env",
"//tensorflow/core/distributed_runtime:worker_interface",
"//tensorflow/core/distributed_runtime/eager:cluster_function_library_runtime",
"//tensorflow/core/distributed_runtime/eager:eager_client",
"//tensorflow/core/distributed_runtime/rpc/eager:grpc_eager_client",
"//tensorflow/core/distributed_runtime/eager:remote_mgr",
"//tensorflow/core/distributed_runtime/rpc:grpc_channel",
"//tensorflow/core/distributed_runtime/rpc:grpc_server_lib",
"//tensorflow/core/distributed_runtime/rpc:grpc_worker_cache",
"//tensorflow/core/distributed_runtime/rpc:grpc_worker_service",
"//tensorflow/core/distributed_runtime/rpc:rpc_rendezvous_mgr",
"//tensorflow/core/distributed_runtime:remote_device",
"//tensorflow/core/distributed_runtime:server_lib",
"//tensorflow/core/distributed_runtime:worker_env",
"//tensorflow/core/distributed_runtime:worker_interface",
"//tensorflow/core:gpu_runtime",
"//tensorflow/core/distributed_runtime/rpc/eager:grpc_eager_client",
"@com_google_absl//absl/memory",
"@com_google_absl//absl/strings:str_format",
] + internal_tfrt_deps(),
alwayslink = 1,
@ -541,7 +542,9 @@ cc_library(
cc_library(
name = "tfe_op_attrs_internal",
hdrs = ["tfe_op_attrs_internal.h"],
visibility = ["//visibility:private"],
visibility = [
"//tensorflow:internal",
],
deps = [
":abstract_op_attrs",
"//tensorflow/c:conversion_macros",
@ -836,64 +839,84 @@ tf_cuda_library(
"//tensorflow/core:portable_tensorflow_lib_lite",
],
"//conditions:default": [
":abstract_context",
":abstract_operation",
":abstract_tensor_handle",
":c_api",
":c_api_internal",
":graph_function",
":immediate_execution_context",
":immediate_execution_tensor_handle",
":tfe_context_internal",
":tfe_op_internal",
":tfe_tensorhandle_internal",
":abstract_operation",
":abstract_context",
":abstract_tensor_handle",
":immediate_execution_tensor_handle",
":immediate_execution_context",
"//tensorflow/core/lib/llvm_rtti",
"//tensorflow/c:c_api",
"//tensorflow/c:c_api_internal",
"//tensorflow/c:conversion_macros",
"//tensorflow/core:core_cpu",
"//tensorflow/core/common_runtime/eager:attr_builder",
"//tensorflow/core/common_runtime/eager:context",
"//tensorflow/core/common_runtime/eager:eager_executor",
"//tensorflow/core/common_runtime/eager:eager_operation",
"//tensorflow/core/common_runtime/eager:execute",
"//tensorflow/core/common_runtime/eager:kernel_and_device",
"//tensorflow/core/common_runtime/eager:tensor_handle",
"//tensorflow/core/common_runtime/eager:copy_to_device_node",
"//tensorflow/core:core_cpu_internal",
"//tensorflow/core:framework",
"//tensorflow/core:framework_internal",
"//tensorflow/core:lib",
"//tensorflow/core:lib_internal",
"//tensorflow/core:protos_all_cc",
"//tensorflow/core/common_runtime/eager:attr_builder",
"//tensorflow/core/common_runtime/eager:context",
"//tensorflow/core/common_runtime/eager:copy_to_device_node",
"//tensorflow/core/common_runtime/eager:eager_executor",
"//tensorflow/core/common_runtime/eager:eager_operation",
"//tensorflow/core/common_runtime/eager:execute",
"//tensorflow/core/common_runtime/eager:kernel_and_device",
"//tensorflow/core/common_runtime/eager:tensor_handle",
"//tensorflow/core/lib/llvm_rtti",
"@com_google_absl//absl/types:variant",
"//tensorflow/c:conversion_macros",
],
}) + select({
"//tensorflow:with_xla_support": [
"//tensorflow/compiler/tf2xla:xla_compiler",
"//tensorflow/compiler/jit",
"//tensorflow/compiler/jit:xla_device",
"//tensorflow/compiler/tf2xla:xla_compiler",
],
"//conditions:default": [],
}) + [
"@com_google_absl//absl/memory",
"@com_google_absl//absl/strings",
"@com_google_absl//absl/time",
"@com_google_absl//absl/container:flat_hash_map",
"//tensorflow/c:tf_status_helper",
"//tensorflow/core:gpu_runtime",
"//tensorflow/core/distributed_runtime:remote_device",
"//tensorflow/core/distributed_runtime:server_lib",
"//tensorflow/core/distributed_runtime:worker_env",
"//tensorflow/core/distributed_runtime/coordination:coordination_service_error_util",
"//tensorflow/core/distributed_runtime/eager:eager_client",
"//tensorflow/core/distributed_runtime/rpc/eager:grpc_eager_client",
"//tensorflow/core/distributed_runtime/rpc:grpc_channel",
"//tensorflow/core/distributed_runtime/rpc:grpc_server_lib",
"//tensorflow/core/distributed_runtime/rpc:grpc_worker_cache",
"//tensorflow/core/distributed_runtime/rpc:grpc_worker_service",
"//tensorflow/core/distributed_runtime/rpc:rpc_rendezvous_mgr",
"//tensorflow/core/distributed_runtime:remote_device",
"//tensorflow/core/distributed_runtime:server_lib",
"//tensorflow/core/distributed_runtime:worker_env",
"//tensorflow/core:gpu_runtime",
"//tensorflow/core/distributed_runtime/rpc/eager:grpc_eager_client",
"//tensorflow/tsl/distributed_runtime/coordination:coordination_service_agent",
"@com_google_absl//absl/container:flat_hash_map",
"@com_google_absl//absl/memory",
"@com_google_absl//absl/strings",
"@com_google_absl//absl/time",
],
alwayslink = 1,
)
cc_library(
name = "c_api_experimental_reader",
testonly = True,
srcs = [
"c_api_experimental_reader.cc",
],
hdrs = [
"c_api_experimental_reader.h",
"tfe_monitoring_reader_internal.h",
],
visibility = ["//tensorflow:internal"],
deps = [
":c_api",
"//tensorflow/c:c_api",
"//tensorflow/core/lib/monitoring:cell_reader",
"@com_google_absl//absl/memory",
],
alwayslink = 1,
)
@ -920,6 +943,29 @@ tf_cuda_cc_test(
],
)
tf_cuda_cc_test(
name = "c_api_experimental_reader_test",
size = "small",
srcs = [
"c_api_experimental_reader_test.cc",
],
args = ["--heap_check="],
tags = tf_cuda_tests_tags() + ["nomac"],
deps = [
":c_api",
":c_api_experimental",
":c_api_experimental_reader",
":c_api_test_util",
"//tensorflow/c:c_test_util",
"//tensorflow/core:lib",
"//tensorflow/core:protos_all_cc",
"//tensorflow/core:test",
"//tensorflow/core:test_main",
"//tensorflow/core/platform:status",
"@com_google_absl//absl/strings",
],
)
tf_cuda_cc_test(
name = "c_api_unified_experimental_test",
size = "small",
@ -1009,6 +1055,23 @@ filegroup(
visibility = ["//tensorflow:__subpackages__"],
)
filegroup(
name = "pywrap_headers_monitoring_reader",
srcs = [
"c_api_experimental_reader.h",
"tfe_monitoring_reader_internal.h",
],
visibility = ["//tensorflow:__subpackages__"],
)
filegroup(
name = "headers_monitoring_reader",
srcs = [
"c_api_experimental_reader.h",
],
visibility = ["//tensorflow:__subpackages__"],
)
cc_library(
name = "dlpack",
srcs = ["dlpack.cc"],
@ -1046,6 +1109,9 @@ filegroup(
],
exclude = [
"c_api_experimental.cc",
"c_api_experimental_reader.cc",
"c_api_experimental_reader.h",
"tfe_monitoring_reader_internal.h",
"c_api_unified_experimental.cc",
"c_api_unified_experimental_eager.cc",
"c_api_unified_experimental_graph.cc",

View File

@ -19,6 +19,7 @@ limitations under the License.
#include <cstddef>
#include <memory>
#include <string>
#include <utility>
#include <vector>
#include "absl/algorithm/container.h"
@ -137,14 +138,14 @@ TFE_Context* TFE_NewContext(const TFE_ContextOptions* opts, TF_Status* status) {
std::unique_ptr<tensorflow::DeviceMgr> device_mgr(
new tensorflow::DynamicDeviceMgr(std::move(devices)));
tensorflow::Rendezvous* r =
new tensorflow::IntraProcessRendezvous(device_mgr.get());
auto r = tsl::core::RefCountPtr<tensorflow::IntraProcessRendezvous>(
new tensorflow::IntraProcessRendezvous(device_mgr.get()));
tensorflow::EagerContext* eager_context = new tensorflow::EagerContext(
opts->session_options.options,
static_cast<tensorflow::ContextDevicePlacementPolicy>(
opts->device_placement_policy),
opts->async, device_mgr.release(),
/*device_mgr_owned*/ true, r,
/*device_mgr_owned*/ true, std::move(r),
/*cluster_flr=*/nullptr,
/*collective_executor_mgr=*/nullptr,
/*run_eager_op_as_function=*/opts->run_eager_op_as_function,
@ -931,9 +932,32 @@ void TFE_ContextAddFunctionDef(TFE_Context* ctx,
void TFE_ContextAddFunction(TFE_Context* ctx, TF_Function* function,
TF_Status* status) {
AnnotateEagerRuntimeConstructionContext(function->fdef);
auto fdef_or = function->record->mutable_fdef();
if (!fdef_or.ok()) {
status->status = fdef_or.status();
return;
}
AnnotateEagerRuntimeConstructionContext(*fdef_or.value());
status->status = tensorflow::unwrap(ctx)->AddFunctionDefWithStackTraces(
function->fdef, function->stack_traces);
*fdef_or.value(), function->record->stack_traces());
}
TF_Function* TFE_ContextGetFunction(TFE_Context* ctx, const char* name,
TF_Status* status) {
tensorflow::core::RefCountPtr<tensorflow::FunctionRecord> record =
tensorflow::unwrap(ctx)->FindRecord(name);
if (record == nullptr) {
status->status = tensorflow::errors::NotFound(
"Unable to find Function with name: ", name);
return nullptr;
}
TF_Function* result = new TF_Function();
record->Ref();
result->record = record.get();
return result;
}
void TFE_ContextRemoveFunction(TFE_Context* ctx, const char* name,

View File

@ -21,25 +21,7 @@ limitations under the License.
// stable and can change without notice.
#include "tensorflow/c/c_api.h"
// Macro to control visibility of exported symbols in the shared library (.so,
// .dylib, .dll).
// This duplicates the TF_EXPORT macro definition in
// tensorflow/core/platform/macros.h in order to keep this .h file independent
// of any other includes.$a
#ifdef SWIG
#define TF_CAPI_EXPORT
#else
#if defined(_WIN32)
#ifdef TF_COMPILE_LIBRARY
#define TF_CAPI_EXPORT __declspec(dllexport)
#else
#define TF_CAPI_EXPORT __declspec(dllimport)
#endif // TF_COMPILE_LIBRARY
#else
#define TF_CAPI_EXPORT __attribute__((visibility("default")))
#endif // _WIN32
#endif // SWIG
#include "tensorflow/c/c_api_macros.h"
#ifdef __cplusplus
extern "C" {

View File

@ -150,7 +150,7 @@ void TestRemoteExecuteChangeServerDef(bool async) {
updated_server_def.set_task_index(1);
tensorflow::Status s = tensorflow::GrpcServer::Create(
updated_server_def, tensorflow::Env::Default(), &worker_server);
ASSERT_TRUE(s.ok()) << s.error_message();
ASSERT_TRUE(s.ok()) << s.message();
ASSERT_TRUE(worker_server->Start().ok());
TFE_ContextSetServerDef(ctx, 0, serialized.data(), serialized.size(), status);

View File

@ -434,6 +434,7 @@ class FunctionErrorInjectionPass : public tensorflow::FunctionOptimizationPass {
tensorflow::Status Run(const std::string& function_name,
const tensorflow::DeviceSet& device_set,
const tensorflow::ConfigProto& config_proto,
absl::string_view xla_compile_device_type,
std::unique_ptr<tensorflow::Graph>* graph,
tensorflow::FunctionLibraryDefinition* flib_def,
std::vector<std::string>* control_ret_node_names,

View File

@ -15,8 +15,10 @@ limitations under the License.
#include "tensorflow/c/eager/c_api_experimental.h"
#include <utility>
#include <vector>
#include "absl/container/flat_hash_map.h"
#include "absl/strings/match.h"
#include "absl/time/time.h"
#include "tensorflow/c/c_api.h"
@ -29,6 +31,8 @@ limitations under the License.
#include "tensorflow/core/common_runtime/device.h"
#include "tensorflow/core/common_runtime/eager/eager_operation.h"
#include "tensorflow/core/distributed_runtime/coordination/coordination_service_error_util.h"
#include "tensorflow/core/framework/function.h"
#include "tensorflow/core/framework/graph_debug_info.pb.h"
#include "tensorflow/core/lib/monitoring/counter.h"
#include "tensorflow/core/lib/monitoring/gauge.h"
#include "tensorflow/core/lib/monitoring/sampler.h"
@ -80,7 +84,7 @@ TFE_MonitoringCounter0* TFE_MonitoringNewCounter0(const char* name,
TF_Status* status,
const char* description) {
auto* result = new TFE_MonitoringCounter0({name, description});
Set_TF_Status_from_Status(status, result->counter->GetStatus());
tsl::Set_TF_Status_from_Status(status, result->counter->GetStatus());
if (!result->counter->GetStatus().ok()) {
delete result;
return nullptr;
@ -103,7 +107,7 @@ TFE_MonitoringCounter1* TFE_MonitoringNewCounter1(const char* name,
const char* description,
const char* label1) {
auto* result = new TFE_MonitoringCounter1({name, description, label1});
Set_TF_Status_from_Status(status, result->counter->GetStatus());
tsl::Set_TF_Status_from_Status(status, result->counter->GetStatus());
if (!result->counter->GetStatus().ok()) {
delete result;
return nullptr;
@ -128,7 +132,7 @@ TFE_MonitoringCounter2* TFE_MonitoringNewCounter2(const char* name,
const char* label2) {
auto* result =
new TFE_MonitoringCounter2({name, description, label1, label2});
Set_TF_Status_from_Status(status, result->counter->GetStatus());
tsl::Set_TF_Status_from_Status(status, result->counter->GetStatus());
if (!result->counter->GetStatus().ok()) {
delete result;
return nullptr;
@ -159,7 +163,7 @@ TFE_MonitoringIntGauge0* TFE_MonitoringNewIntGauge0(const char* name,
TF_Status* status,
const char* description) {
auto* result = new TFE_MonitoringIntGauge0({name, description});
Set_TF_Status_from_Status(status, result->gauge->GetStatus());
tsl::Set_TF_Status_from_Status(status, result->gauge->GetStatus());
if (!result->gauge->GetStatus().ok()) {
delete result;
return nullptr;
@ -182,7 +186,7 @@ TFE_MonitoringIntGauge1* TFE_MonitoringNewIntGauge1(const char* name,
const char* description,
const char* label1) {
auto* result = new TFE_MonitoringIntGauge1({name, description, label1});
Set_TF_Status_from_Status(status, result->gauge->GetStatus());
tsl::Set_TF_Status_from_Status(status, result->gauge->GetStatus());
if (!result->gauge->GetStatus().ok()) {
delete result;
return nullptr;
@ -207,7 +211,7 @@ TFE_MonitoringIntGauge2* TFE_MonitoringNewIntGauge2(const char* name,
const char* label2) {
auto* result =
new TFE_MonitoringIntGauge2({name, description, label1, label2});
Set_TF_Status_from_Status(status, result->gauge->GetStatus());
tsl::Set_TF_Status_from_Status(status, result->gauge->GetStatus());
if (!result->gauge->GetStatus().ok()) {
delete result;
return nullptr;
@ -245,7 +249,7 @@ const void TFE_MonitoringStringGaugeCellValue(
TFE_MonitoringStringGauge0* TFE_MonitoringNewStringGauge0(
const char* name, TF_Status* status, const char* description) {
auto* result = new TFE_MonitoringStringGauge0({name, description});
Set_TF_Status_from_Status(status, result->gauge->GetStatus());
tsl::Set_TF_Status_from_Status(status, result->gauge->GetStatus());
if (!result->gauge->GetStatus().ok()) {
delete result;
return nullptr;
@ -267,7 +271,7 @@ TFE_MonitoringStringGauge1* TFE_MonitoringNewStringGauge1(
const char* name, TF_Status* status, const char* description,
const char* label1) {
auto* result = new TFE_MonitoringStringGauge1({name, description, label1});
Set_TF_Status_from_Status(status, result->gauge->GetStatus());
tsl::Set_TF_Status_from_Status(status, result->gauge->GetStatus());
if (!result->gauge->GetStatus().ok()) {
delete result;
return nullptr;
@ -290,7 +294,7 @@ TFE_MonitoringStringGauge2* TFE_MonitoringNewStringGauge2(
const char* label1, const char* label2) {
auto* result =
new TFE_MonitoringStringGauge2({name, description, label1, label2});
Set_TF_Status_from_Status(status, result->gauge->GetStatus());
tsl::Set_TF_Status_from_Status(status, result->gauge->GetStatus());
if (!result->gauge->GetStatus().ok()) {
delete result;
return nullptr;
@ -313,7 +317,7 @@ TFE_MonitoringStringGauge3* TFE_MonitoringNewStringGauge3(
const char* label1, const char* label2, const char* label3) {
auto* result = new TFE_MonitoringStringGauge3(
{name, description, label1, label2, label3});
Set_TF_Status_from_Status(status, result->gauge->GetStatus());
tsl::Set_TF_Status_from_Status(status, result->gauge->GetStatus());
if (!result->gauge->GetStatus().ok()) {
delete result;
return nullptr;
@ -338,7 +342,7 @@ TFE_MonitoringStringGauge4* TFE_MonitoringNewStringGauge4(
const char* label4) {
auto* result = new TFE_MonitoringStringGauge4(
{name, description, label1, label2, label3, label4});
Set_TF_Status_from_Status(status, result->gauge->GetStatus());
tsl::Set_TF_Status_from_Status(status, result->gauge->GetStatus());
if (!result->gauge->GetStatus().ok()) {
delete result;
return nullptr;
@ -370,7 +374,7 @@ TFE_MonitoringBoolGauge0* TFE_MonitoringNewBoolGauge0(const char* name,
TF_Status* status,
const char* description) {
auto* result = new TFE_MonitoringBoolGauge0({name, description});
Set_TF_Status_from_Status(status, result->gauge->GetStatus());
tsl::Set_TF_Status_from_Status(status, result->gauge->GetStatus());
if (!result->gauge->GetStatus().ok()) {
delete result;
return nullptr;
@ -393,7 +397,7 @@ TFE_MonitoringBoolGauge1* TFE_MonitoringNewBoolGauge1(const char* name,
const char* description,
const char* label1) {
auto* result = new TFE_MonitoringBoolGauge1({name, description, label1});
Set_TF_Status_from_Status(status, result->gauge->GetStatus());
tsl::Set_TF_Status_from_Status(status, result->gauge->GetStatus());
if (!result->gauge->GetStatus().ok()) {
delete result;
return nullptr;
@ -418,7 +422,7 @@ TFE_MonitoringBoolGauge2* TFE_MonitoringNewBoolGauge2(const char* name,
const char* label2) {
auto* result =
new TFE_MonitoringBoolGauge2({name, description, label1, label2});
Set_TF_Status_from_Status(status, result->gauge->GetStatus());
tsl::Set_TF_Status_from_Status(status, result->gauge->GetStatus());
if (!result->gauge->GetStatus().ok()) {
delete result;
return nullptr;
@ -472,7 +476,7 @@ TFE_MonitoringSampler0* TFE_MonitoringNewSampler0(
const char* description) {
auto* result = new TFE_MonitoringSampler0(
{name, buckets->create_buckets(), description});
Set_TF_Status_from_Status(status, result->sampler->GetStatus());
tsl::Set_TF_Status_from_Status(status, result->sampler->GetStatus());
if (!result->sampler->GetStatus().ok()) {
delete result;
return nullptr;
@ -495,7 +499,7 @@ TFE_MonitoringSampler1* TFE_MonitoringNewSampler1(
const char* description, const char* label1) {
auto* result = new TFE_MonitoringSampler1(
{name, buckets->create_buckets(), description, label1});
Set_TF_Status_from_Status(status, result->sampler->GetStatus());
tsl::Set_TF_Status_from_Status(status, result->sampler->GetStatus());
if (!result->sampler->GetStatus().ok()) {
delete result;
return nullptr;
@ -518,7 +522,7 @@ TFE_MonitoringSampler2* TFE_MonitoringNewSampler2(
const char* description, const char* label1, const char* label2) {
auto* result = new TFE_MonitoringSampler2(
{name, buckets->create_buckets(), description, label1, label2});
Set_TF_Status_from_Status(status, result->sampler->GetStatus());
tsl::Set_TF_Status_from_Status(status, result->sampler->GetStatus());
if (!result->sampler->GetStatus().ok()) {
delete result;
return nullptr;
@ -628,6 +632,30 @@ void TFE_ContextGetFunctionDef(TFE_Context* ctx, const char* function_name,
status->status = ::tensorflow::OkStatus();
}
void TFE_ContextGetGraphDebugInfo(TFE_Context* ctx, const char* function_name,
TF_Buffer* buf, TF_Status* status) {
auto function_record = tensorflow::unwrap(ctx)->FindRecord(function_name);
if (function_record == nullptr) {
status->status = tensorflow::errors::NotFound(
"Unable to find function with name: ", function_name);
return;
}
tensorflow::GraphDebugInfo debug_info =
tensorflow::StackTracesMapToGraphDebugInfo(
function_record->stack_traces());
string str = debug_info.SerializeAsString();
void* data = tensorflow::port::Malloc(str.length());
str.copy(static_cast<char*>(data), str.length(), 0);
buf->data = data;
buf->length = str.length();
buf->data_deallocator = [](void* data, size_t length) {
tensorflow::port::Free(data);
};
status->status = ::tensorflow::OkStatus();
}
TF_Tensor* TFE_AllocateHostTensor(TFE_Context* ctx, TF_DataType dtype,
const int64_t* dims, int num_dims,
TF_Status* status) {
@ -884,7 +912,7 @@ void TFE_GetTaskStates(TFE_Context* ctx, const TF_Buffer& tasks, void* states,
const auto& result = (*results)[i];
TF_Status s;
TF_SetStatus(&s, static_cast<TF_Code>(result.error_code()),
result.error_message().data());
std::string(result.error_message()).data());
if (TF_GetCode(&s) != TF_Code::TF_OK) {
tensorflow::CoordinationServiceError error;
*error.mutable_source_task() = result.error_payload().source_task();

View File

@ -612,6 +612,17 @@ TF_CAPI_EXPORT extern void TFE_ContextGetFunctionDef(TFE_Context* ctx,
TF_Buffer* buf,
TF_Status* status);
// Get GraphDebugInfo containing stack traces mapping to node names
TF_CAPI_EXPORT extern void TFE_ContextGetGraphDebugInfo(
TFE_Context* ctx, const char* function_name, TF_Buffer* buf,
TF_Status* status);
// Extracts a TF_Function from the context.
// Must call TF_DeleteFunction on the returned value.
TF_CAPI_EXPORT extern TF_Function* TFE_ContextGetFunction(TFE_Context* ctx,
const char* name,
TF_Status* status);
// Allocate and return a new Tensor on the host.
//
// The caller must set the Tensor values by writing them to the pointer returned

View File

@ -0,0 +1,42 @@
/* Copyright 2023 The TensorFlow Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");;
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
==============================================================================*/
#include "tensorflow/c/eager/c_api_experimental_reader.h"
#include "tensorflow/c/eager/tfe_monitoring_reader_internal.h"
template <typename... LabelType>
int64_t TFE_MonitoringCounterReader::Read(const LabelType&... labels) {
return counter->Read(labels...);
}
TFE_MonitoringCounterReader* TFE_MonitoringNewCounterReader(const char* name) {
auto* result = new TFE_MonitoringCounterReader(name);
return result;
}
int64_t TFE_MonitoringReadCounter0(TFE_MonitoringCounterReader* cell_reader) {
int64_t result = cell_reader->Read();
return result;
}
int64_t TFE_MonitoringReadCounter1(TFE_MonitoringCounterReader* cell_reader,
const char* label) {
int64_t result = cell_reader->Read(label);
return result;
}

View File

@ -0,0 +1,60 @@
/* Copyright 2023 The TensorFlow Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");;
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
==============================================================================*/
#ifndef TENSORFLOW_C_EAGER_C_API_EXPERIMENTAL_READER_H_
#define TENSORFLOW_C_EAGER_C_API_EXPERIMENTAL_READER_H_
#include "tensorflow/c/eager/c_api.h"
#ifdef __cplusplus
extern "C" {
#endif
// Test only exports of the monitoring Cell Reader API which allows tests to
// read current values from streamz counters defined in other modules.
//
// The code under test will have created streamz counters like this:
// auto* streamz = tensorflow::monitoring::Counter<1>::New("name",
// "description", "label");
// and then incremented that counter for various values of label:
// streamz->GetCell("label-value")->IncrementBy(1);
//
// The test code can then read and test the value of that counter:
//
// auto* reader = TFE_MonitoringNewCounterReader("name");
// test();
// int64_t value = TFE_MonitoringReadCounter1(reader, "label-value");
// Opaque handle to a reader.
typedef struct TFE_MonitoringCounterReader TFE_MonitoringCounterReader;
// Returns a handle to be used for reading values from streamz counter. The
// counter can have been created with any number of labels.
TF_CAPI_EXPORT extern TFE_MonitoringCounterReader*
TFE_MonitoringNewCounterReader(const char* name);
// Reads the value of a counter that was created with 0 labels.
TF_CAPI_EXPORT extern int64_t TFE_MonitoringReadCounter0(
TFE_MonitoringCounterReader*);
// Reads the value of specific cell of a counter that was created with 1 label.
TF_CAPI_EXPORT extern int64_t TFE_MonitoringReadCounter1(
TFE_MonitoringCounterReader*, const char* label_value);
#ifdef __cplusplus
} /* end extern "C" */
#endif
#endif // TENSORFLOW_C_EAGER_C_API_EXPERIMENTAL_READER_H_

View File

@ -0,0 +1,86 @@
/* Copyright 2023 The TensorFlow Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
==============================================================================*/
#include "tensorflow/c/eager/c_api_experimental_reader.h"
#include <cstdint>
#include "tensorflow/c/eager/c_api_experimental.h"
#include "tensorflow/core/platform/logging.h"
#include "tensorflow/core/platform/test.h"
namespace tensorflow {
namespace {
TFE_MonitoringCounter0* CreateCounter0(const char* counter_name);
TFE_MonitoringCounter1* CreateCounter1(const char* counter_name,
const char* label);
void IncrementCounter0(TFE_MonitoringCounter0* counter, int64_t delta = 1);
void IncrementCounter1(TFE_MonitoringCounter1* counter, const char* label,
int64_t delta = 1);
TEST(CAPI, MonitoringCellReader0) {
auto counter_name = "test/counter0";
auto* counter = CreateCounter0(counter_name);
auto* reader = TFE_MonitoringNewCounterReader(counter_name);
IncrementCounter0(counter);
int64_t actual = TFE_MonitoringReadCounter0(reader);
CHECK_EQ(actual, 1);
}
TEST(CAPI, MonitoringCellReader1) {
auto counter_name = "test/counter1";
auto label_name = "test/label";
auto* counter = CreateCounter1(counter_name, label_name);
auto* reader = TFE_MonitoringNewCounterReader(counter_name);
IncrementCounter1(counter, label_name);
int64_t actual = TFE_MonitoringReadCounter1(reader, label_name);
CHECK_EQ(actual, 1);
}
TFE_MonitoringCounter0* CreateCounter0(const char* counter_name) {
TF_Status* status = TF_NewStatus();
auto* counter =
TFE_MonitoringNewCounter0(counter_name, status, "description");
TF_DeleteStatus(status);
return counter;
}
void IncrementCounter0(TFE_MonitoringCounter0* counter, int64_t delta) {
auto* cell = TFE_MonitoringGetCellCounter0(counter);
TFE_MonitoringCounterCellIncrementBy(cell, delta);
}
TFE_MonitoringCounter1* CreateCounter1(const char* counter_name,
const char* label) {
TF_Status* status = TF_NewStatus();
auto* counter =
TFE_MonitoringNewCounter1(counter_name, status, "description", label);
TF_DeleteStatus(status);
return counter;
}
void IncrementCounter1(TFE_MonitoringCounter1* counter, const char* label,
int64_t delta) {
auto* cell = TFE_MonitoringGetCellCounter1(counter, label);
TFE_MonitoringCounterCellIncrementBy(cell, delta);
}
} // namespace
} // namespace tensorflow

View File

@ -15,6 +15,10 @@ limitations under the License.
#include "tensorflow/c/eager/c_api_test_util.h"
#include <memory>
#include <string>
#include <vector>
#include "tensorflow/c/eager/c_api.h"
#include "tensorflow/c/eager/c_api_experimental.h"
#include "tensorflow/c/tf_datatype.h"
@ -434,6 +438,8 @@ tensorflow::ServerDef GetServerDef(const string& job_name, int num_tasks) {
int port = tensorflow::testing::PickUnusedPortOrDie();
job_def->mutable_tasks()->insert(
{i, tensorflow::strings::StrCat("localhost:", port)});
LOG(INFO) << "Picked test port: " << port << " for job: " << job_name
<< ", task: " << i;
}
return server_def;
}

View File

@ -76,7 +76,7 @@ static TracingContext* CreateTracingExecutionContext(const char* fn_name,
if (default_factory) {
return default_factory(fn_name, s);
}
Set_TF_Status_from_Status(
tsl::Set_TF_Status_from_Status(
s, errors::FailedPrecondition("default_factory is nullptr"));
return nullptr;
}
@ -109,7 +109,7 @@ using tensorflow::tracing::TracingOperation;
using tensorflow::tracing::TracingTensorHandle;
void TF_SetTracingImplementation(const char* name, TF_Status* s) {
Set_TF_Status_from_Status(s, SetDefaultTracingEngine(name));
tsl::Set_TF_Status_from_Status(s, SetDefaultTracingEngine(name));
}
// Creates a new TensorFlow function, it is an execution context attached to a
@ -123,12 +123,13 @@ TF_AbstractFunction* TF_FinalizeFunction(TF_ExecutionContext* ctx,
AbstractFunction* func;
TracingContext* tracing_ctx = dyn_cast<TracingContext>(unwrap(ctx));
if (!tracing_ctx) {
Set_TF_Status_from_Status(
tsl::Set_TF_Status_from_Status(
s, tensorflow::errors::InvalidArgument(
"Only TracingContext can be converted into a function."));
return nullptr;
}
Set_TF_Status_from_Status(s, tracing_ctx->Finalize(unwrap(outputs), &func));
tsl::Set_TF_Status_from_Status(s,
tracing_ctx->Finalize(unwrap(outputs), &func));
TF_DeleteExecutionContext(ctx);
return wrap(func);
}
@ -140,7 +141,7 @@ TF_AbstractTensor* TF_AddFunctionParameter(TF_ExecutionContext* func,
TracingTensorHandle* t;
TracingContext* tracing_ctx = dyn_cast<TracingContext>(unwrap(func));
if (!tracing_ctx) {
Set_TF_Status_from_Status(
tsl::Set_TF_Status_from_Status(
s, tensorflow::errors::InvalidArgument(
"TF_AddFunctionParameter must be called on a TracingContext."));
return nullptr;
@ -152,11 +153,11 @@ TF_AbstractTensor* TF_AddFunctionParameter(TF_ExecutionContext* func,
reinterpret_cast<int64_t*>(shape.dim_sizes), shape.num_dims,
&partial_shape);
if (!status.ok()) {
Set_TF_Status_from_Status(s, status);
tsl::Set_TF_Status_from_Status(s, status);
return nullptr;
}
}
Set_TF_Status_from_Status(
tsl::Set_TF_Status_from_Status(
s, tracing_ctx->AddParameter(static_cast<DataType>(dtype), partial_shape,
&t));
return wrap(t);
@ -193,20 +194,21 @@ void TF_OutputListPushBack(TF_OutputList* o, TF_AbstractTensor* tensor,
void TF_AbstractOpSetOpType(TF_AbstractOp* op, const char* const op_type,
TF_Status* s) {
Set_TF_Status_from_Status(s, unwrap(op)->Reset(op_type,
/*raw_device_name=*/nullptr));
tsl::Set_TF_Status_from_Status(
s, unwrap(op)->Reset(op_type,
/*raw_device_name=*/nullptr));
}
void TF_AbstractOpSetOpName(TF_AbstractOp* op, const char* const op_name,
TF_Status* s) {
TracingOperation* tracing_op = dyn_cast<TracingOperation>(unwrap(op));
if (!tracing_op) {
Set_TF_Status_from_Status(
tsl::Set_TF_Status_from_Status(
s, tensorflow::errors::InvalidArgument(
"TF_AbstractOpSetOpName must be called on a TracingOperation."));
return;
}
Set_TF_Status_from_Status(s, tracing_op->SetOpName(op_name));
tsl::Set_TF_Status_from_Status(s, tracing_op->SetOpName(op_name));
}
void TF_AbstractOpSetAttrType(TF_AbstractOp* op, const char* const attr_name,
@ -214,20 +216,20 @@ void TF_AbstractOpSetAttrType(TF_AbstractOp* op, const char* const attr_name,
Status status =
unwrap(op)->SetAttrType(attr_name, static_cast<DataType>(value));
TF_SetStatus(s, static_cast<TF_Code>(status.code()),
status.error_message().c_str());
tsl::NullTerminatedMessage(status));
}
void TF_ExecuteOperation(TF_AbstractOp* op, int num_inputs,
TF_AbstractTensor* const* inputs, TF_OutputList* o,
TF_Status* s) {
for (int i = 0; i < num_inputs; i++) {
Set_TF_Status_from_Status(s, unwrap(op)->AddInput(unwrap(inputs[i])));
tsl::Set_TF_Status_from_Status(s, unwrap(op)->AddInput(unwrap(inputs[i])));
if (TF_GetCode(s) != TF_OK) {
return;
}
}
int num_outputs = unwrap(o)->expected_num_outputs;
Set_TF_Status_from_Status(
tsl::Set_TF_Status_from_Status(
s, unwrap(op)->Execute(
absl::MakeSpan(reinterpret_cast<AbstractTensorHandle**>(
unwrap(o)->outputs.data()),
@ -242,5 +244,6 @@ void TF_DeleteAbstractFunction(TF_AbstractFunction* func) {
void TF_ExecutionContextRegisterFunction(TF_ExecutionContext* ctx,
TF_AbstractFunction* func,
TF_Status* s) {
Set_TF_Status_from_Status(s, unwrap(ctx)->RegisterFunction(unwrap(func)));
tsl::Set_TF_Status_from_Status(s,
unwrap(ctx)->RegisterFunction(unwrap(func)));
}

View File

@ -14,6 +14,7 @@ limitations under the License.
==============================================================================*/
#include <memory>
#include <utility>
#include <vector>
#include "absl/strings/str_cat.h"
@ -204,7 +205,7 @@ class GraphOperation : public TracingOperation {
Status SetAttrType(const char* const attr_name, DataType value) override {
if (!op_) {
return Status(
error::Code::FAILED_PRECONDITION,
absl::StatusCode::kFailedPrecondition,
"op_type and op_name must be specified before specifying attrs.");
}
op_->node_builder.Attr(attr_name, value);
@ -387,7 +388,7 @@ class GraphContext : public TracingContext {
inputs_.size(), inputs_.data(),
graph_outputs.size(), graph_outputs.data(),
nullptr, nullptr, name_.data(), s);
*f = new GraphFunction(std::move(func->fdef));
*f = new GraphFunction(std::move(func->record->fdef()));
TF_DeleteFunction(func);
TF_RETURN_IF_ERROR(StatusFromTF_Status(s));
TF_DeleteStatus(s);

View File

@ -47,7 +47,7 @@ class UnifiedCAPI
TF_StatusPtr status(TF_NewStatus());
TF_SetTracingImplementation(std::get<0>(GetParam()), status.get());
Status s = StatusFromTF_Status(status.get());
CHECK_EQ(errors::OK, s.code()) << s.error_message();
CHECK_EQ(errors::OK, s.code()) << s.message();
}
};

View File

@ -41,13 +41,13 @@ void CompareNumericalAndManualGradients(
AbstractTensorHandle* numerical_grad_raw;
s = CalcNumericalGrad(ctx, model, inputs, input_index, use_function,
&numerical_grad_raw);
ASSERT_EQ(errors::OK, s.code()) << s.error_message();
ASSERT_EQ(errors::OK, s.code()) << s.message();
numerical_grad.reset(numerical_grad_raw);
}
TF_Tensor* numerical_tensor;
s = GetValue(numerical_grad.get(), &numerical_tensor);
ASSERT_EQ(errors::OK, s.code()) << s.error_message();
ASSERT_EQ(errors::OK, s.code()) << s.message();
auto num_elem_numerical = TF_TensorElementCount(numerical_tensor);
ASSERT_EQ(num_elem_numerical, num_grad);
@ -90,14 +90,14 @@ class GradientCheckerTest
{
Status s = StatusFromTF_Status(status.get());
CHECK_EQ(errors::OK, s.code()) << s.error_message();
CHECK_EQ(errors::OK, s.code()) << s.message();
}
{
AbstractContext* ctx_raw = nullptr;
Status s =
BuildImmediateExecutionContext(std::get<1>(GetParam()), &ctx_raw);
ASSERT_EQ(errors::OK, s.code()) << s.error_message();
ASSERT_EQ(errors::OK, s.code()) << s.message();
ctx_.reset(ctx_raw);
}
@ -122,7 +122,7 @@ TEST_P(GradientCheckerTest, TestMatMul) {
AbstractTensorHandle* A_raw;
Status s = TestTensorHandleWithDims<float, TF_FLOAT>(ctx_.get(), A_vals,
A_dims, 2, &A_raw);
ASSERT_EQ(errors::OK, s.code()) << s.error_message();
ASSERT_EQ(errors::OK, s.code()) << s.message();
A.reset(A_raw);
}
float B_vals[] = {.5f, -1.0f, 1.0f, 1.0f};
@ -132,7 +132,7 @@ TEST_P(GradientCheckerTest, TestMatMul) {
AbstractTensorHandle* B_raw;
Status s = TestTensorHandleWithDims<float, TF_FLOAT>(ctx_.get(), B_vals,
B_dims, 2, &B_raw);
ASSERT_EQ(errors::OK, s.code()) << s.error_message();
ASSERT_EQ(errors::OK, s.code()) << s.message();
B.reset(B_raw);
}
@ -148,7 +148,7 @@ TEST_P(GradientCheckerTest, TestMul) {
AbstractTensorHandle* x_raw = nullptr;
Status s =
TestScalarTensorHandle<float, TF_FLOAT>(ctx_.get(), 2.0f, &x_raw);
ASSERT_EQ(errors::OK, s.code()) << s.error_message();
ASSERT_EQ(errors::OK, s.code()) << s.message();
x.reset(x_raw);
}
@ -157,7 +157,7 @@ TEST_P(GradientCheckerTest, TestMul) {
AbstractTensorHandle* y_raw = nullptr;
Status s =
TestScalarTensorHandle<float, TF_FLOAT>(ctx_.get(), 7.0f, &y_raw);
ASSERT_EQ(errors::OK, s.code()) << s.error_message();
ASSERT_EQ(errors::OK, s.code()) << s.message();
y.reset(y_raw);
}

View File

@ -53,7 +53,7 @@ class CppGradients
TF_StatusPtr status(TF_NewStatus());
TF_SetTracingImplementation(std::get<0>(GetParam()), status.get());
Status s = StatusFromTF_Status(status.get());
CHECK_EQ(errors::OK, s.code()) << s.error_message();
CHECK_EQ(errors::OK, s.code()) << s.message();
}
};
@ -70,7 +70,7 @@ TEST_P(CppGradients, TestSetAttrString) {
AbstractContext* ctx_raw = nullptr;
Status s =
BuildImmediateExecutionContext(std::get<1>(GetParam()), &ctx_raw);
ASSERT_EQ(errors::OK, s.code()) << s.error_message();
ASSERT_EQ(errors::OK, s.code()) << s.message();
ctx.reset(ctx_raw);
}
@ -78,7 +78,7 @@ TEST_P(CppGradients, TestSetAttrString) {
{
AbstractTensorHandle* x_raw = nullptr;
Status s = TestScalarTensorHandle<float, TF_FLOAT>(ctx.get(), 1.0f, &x_raw);
ASSERT_EQ(errors::OK, s.code()) << s.error_message();
ASSERT_EQ(errors::OK, s.code()) << s.message();
t.reset(x_raw);
}
@ -86,31 +86,31 @@ TEST_P(CppGradients, TestSetAttrString) {
ForwardOperation forward_op;
Status s = Reset(check_numerics_op.get(), "CheckNumerics",
/*raw_device_name=*/nullptr, &forward_op);
ASSERT_EQ(errors::OK, s.code()) << s.error_message();
ASSERT_EQ(errors::OK, s.code()) << s.message();
if (isa<TracingOperation>(check_numerics_op.get())) {
s = dyn_cast<TracingOperation>(check_numerics_op.get())
->SetOpName("check_numerics");
ASSERT_EQ(errors::OK, s.code()) << s.error_message();
ASSERT_EQ(errors::OK, s.code()) << s.message();
}
s = AddInput(check_numerics_op.get(), t.get(), &forward_op);
ASSERT_EQ(errors::OK, s.code()) << s.error_message();
ASSERT_EQ(errors::OK, s.code()) << s.message();
string message = "This is the way!";
s = SetAttrString(check_numerics_op.get(), "message", message.data(),
message.length(), &forward_op);
ASSERT_EQ(errors::OK, s.code()) << s.error_message();
ASSERT_EQ(errors::OK, s.code()) << s.message();
int num_retvals = 1;
std::vector<AbstractTensorHandle*> outputs(1);
GradientRegistry registry;
s = RegisterGradients(&registry);
ASSERT_EQ(errors::OK, s.code()) << s.error_message();
ASSERT_EQ(errors::OK, s.code()) << s.message();
auto tape = std::make_unique<Tape>(/*persistent=*/false);
s = Execute(check_numerics_op.get(), ctx.get(), absl::MakeSpan(outputs),
&num_retvals, &forward_op, tape.get(), registry);
ASSERT_EQ(errors::OK, s.code()) << s.error_message();
ASSERT_EQ(errors::OK, s.code()) << s.message();
string read_message;
s = forward_op.attrs.Get("message", &read_message);
ASSERT_EQ(errors::OK, s.code()) << s.error_message();
ASSERT_EQ(errors::OK, s.code()) << s.message();
ASSERT_EQ(read_message, message);
}
@ -136,7 +136,7 @@ TEST_P(CppGradients, TestRecordOperationWithNullGradientFunctionRaises) {
AbstractContext* ctx_raw = nullptr;
Status s =
BuildImmediateExecutionContext(std::get<1>(GetParam()), &ctx_raw);
ASSERT_EQ(errors::OK, s.code()) << s.error_message();
ASSERT_EQ(errors::OK, s.code()) << s.message();
ctx.reset(ctx_raw);
}
@ -144,7 +144,7 @@ TEST_P(CppGradients, TestRecordOperationWithNullGradientFunctionRaises) {
{
AbstractTensorHandle* x_raw = nullptr;
Status s = TestScalarTensorHandle<float, TF_FLOAT>(ctx.get(), 2.0f, &x_raw);
ASSERT_EQ(errors::OK, s.code()) << s.error_message();
ASSERT_EQ(errors::OK, s.code()) << s.message();
x.reset(x_raw);
}
@ -157,7 +157,7 @@ TEST_P(CppGradients, TestRecordOperationWithNullGradientFunctionRaises) {
"Provided null gradient_function for 'Neg'.\nIf the intent is to treat "
"this op as non-differentiable consider using RegisterNotDifferentiable "
"or NotDifferentiableGradientFunction.",
s.error_message());
s.message());
ASSERT_EQ(nullptr, outputs[0]);
}

View File

@ -134,6 +134,10 @@ class ImmediateExecutionContext : public AbstractContext {
// Find and return a added function by its name.
virtual const FunctionDef* FindFunctionDef(const string& name) const = 0;
// Find and return a function record added by its name.
virtual core::RefCountPtr<FunctionRecord> FindRecord(
const string& name) const = 0;
// Return the ParsedName of Host CPU device.
virtual const DeviceNameUtils::ParsedName& HostCPUParsedName() const = 0;
virtual const string& HostCPUName() const = 0;
@ -249,6 +253,7 @@ class ImmediateExecutionContext : public AbstractContext {
int64_t kernel_cache_size;
int64_t device_cache_size;
std::map<std::string, int64_t> func_kernel_cache_entries;
int64_t local_rendezvous_cache_active_size;
};
virtual CacheStats GetCacheStats() = 0;

View File

@ -77,6 +77,7 @@ cc_library(
visibility = ["//tensorflow:internal"],
deps = [
"//tensorflow/c:c_api",
"//tensorflow/c:safe_ptr",
"//tensorflow/c:tf_status_internal",
"//tensorflow/c/eager:c_api",
"//tensorflow/c/eager:c_api_experimental",

View File

@ -211,7 +211,7 @@ int ParallelTensorNumDims(void* data, TF_Status* status) {
const std::vector<int64_t>* shape;
Status s = reinterpret_cast<ParallelTensor*>(data)->Shape(&shape);
if (!s.ok()) {
Set_TF_Status_from_Status(status, s);
tsl::Set_TF_Status_from_Status(status, s);
return -1;
}
return shape->size();
@ -223,7 +223,7 @@ int64_t ParallelTensorDim(void* data, int dim_index, TF_Status* status) {
const std::vector<int64_t>* shape;
Status s = reinterpret_cast<ParallelTensor*>(data)->Shape(&shape);
if (!s.ok()) {
Set_TF_Status_from_Status(status, s);
tsl::Set_TF_Status_from_Status(status, s);
return -1;
}
return (*shape)[dim_index];
@ -234,7 +234,7 @@ TF_Buffer* ParallelTensorSummarize(void* data, TF_Status* status) {
std::string summary;
Status cpp_status = parallel_tensor->SummarizeValue(summary);
if (!cpp_status.ok()) {
Set_TF_Status_from_Status(status, cpp_status);
tsl::Set_TF_Status_from_Status(status, cpp_status);
return nullptr;
}
return TF_NewBufferFromString(summary.data(), summary.size());

View File

@ -368,6 +368,27 @@ void ParallelDevice::StartExecute(TFE_Context* context,
}
}
void ParallelDevice::StartExecute(
TFE_Context* context,
const std::vector<std::vector<TFE_TensorHandle*>>& inputs,
const char* operation_name, const TFE_OpAttrs* attributes,
int expected_max_outputs, CancellationManager& cancellation_manager,
absl::optional<int64_t> step_id) const {
for (int device_index = 0; device_index < underlying_devices_.size();
++device_index) {
DeviceThread* device_thread = device_threads_[device_index].get();
std::vector<TFE_TensorHandle*> device_inputs;
device_inputs.reserve(inputs.size());
for (int input_index = 0; input_index < inputs.size(); ++input_index) {
// Parallel tensors are divided between operations by device.
device_inputs.push_back(inputs[input_index][device_index]);
}
device_thread->StartExecute(
context, operation_name, std::move(device_inputs), attributes,
expected_max_outputs, cancellation_manager, step_id);
}
}
void ParallelDevice::AsyncWait(TFE_Context* context, TF_Status* status) const {
StatusPtr first_bad_status(nullptr);
@ -486,6 +507,11 @@ std::unique_ptr<ParallelTensor> ParallelTensor::FromTensorHandles(
const ParallelDevice& parallel_device,
std::vector<TensorHandlePtr> components, absl::Span<const int64_t> shape,
TF_Status* status) {
if (components.empty()) {
TF_SetStatus(status, TF_INTERNAL,
"No components are provide for creating a ParallelTensor");
return nullptr;
}
TFE_TensorHandleGetStatus(components[0].get(), status);
if (!status->status.ok()) {
return nullptr;
@ -513,6 +539,11 @@ std::unique_ptr<ParallelTensor> ParallelTensor::FromTensorHandles(
std::unique_ptr<ParallelTensor> ParallelTensor::FromTensorHandles(
const ParallelDevice& parallel_device,
std::vector<TensorHandlePtr> components, TF_Status* status) {
if (components.empty()) {
TF_SetStatus(status, TF_INTERNAL,
"No components are provided for creating a ParallelTensor");
return nullptr;
}
TFE_TensorHandleGetStatus(components[0].get(), status);
if (!status->status.ok()) {
return nullptr;

View File

@ -19,6 +19,7 @@ limitations under the License.
#include <memory>
#include <optional>
#include <string>
#include <utility>
#include <vector>
#include "absl/types/optional.h"
@ -28,6 +29,7 @@ limitations under the License.
#include "tensorflow/c/eager/c_api.h"
#include "tensorflow/c/eager/c_api_experimental.h"
#include "tensorflow/c/eager/tfe_op_internal.h"
#include "tensorflow/c/safe_ptr.h"
#include "tensorflow/core/framework/cancellation.h"
#include "tensorflow/core/framework/tensor_shape.h"
#include "tensorflow/core/framework/types.h"
@ -35,19 +37,7 @@ limitations under the License.
namespace tensorflow {
namespace parallel_device {
// Functor for making unique_ptrs slightly more ergonomic. Using
// decltype(delete_fn) in the unique_ptr's second template argument requires
// passing a function pointer to delete_fn when constructing the unique_ptr.
class TensorHandleDeleter {
public:
void operator()(TFE_TensorHandle* to_delete) const {
TFE_DeleteTensorHandle(to_delete);
}
};
// TODO(b/256016071): Replace this with `Safe_TFE_TensorHandlePtr` when
// `Safe_TFE_TensorHandlePtr` is marked to be compatible on non-prod env.
using TensorHandlePtr = std::unique_ptr<TFE_TensorHandle, TensorHandleDeleter>;
using TensorHandlePtr = tensorflow::Safe_TFE_TensorHandlePtr;
class ParallelTensor;
class DeviceThread;
@ -128,6 +118,13 @@ class ParallelDevice {
CancellationManager& cancellation_manager,
std::optional<int64_t> step_id = std::nullopt) const;
void StartExecute(TFE_Context* context,
const std::vector<std::vector<TFE_TensorHandle*>>& inputs,
const char* operation_name, const TFE_OpAttrs* attributes,
int expected_max_outputs,
CancellationManager& cancellation_manager,
std::optional<int64_t> step_id = std::nullopt) const;
// Blocks until the previous `StartExecute` has run `TFE_Execute` on each
// device. If is_async=false (constructor argument) this means the ops have
// run and have results. If is_async=true it means that all of the
@ -206,6 +203,17 @@ class ParallelTensor {
// component device.
Status SummarizeValue(std::string& summary);
std::vector<TensorHandlePtr> release_tensors() { return std::move(tensors_); }
std::vector<TFE_TensorHandle*> tensors() const {
std::vector<TFE_TensorHandle*> result;
result.reserve(tensors_.size());
for (const TensorHandlePtr& tensor : tensors_) {
result.emplace_back(tensor.get());
}
return result;
}
private:
ParallelTensor(const ParallelDevice& device,
std::vector<TensorHandlePtr> tensors,
@ -222,7 +230,7 @@ class ParallelTensor {
dtype_(dtype) {}
const ParallelDevice& device_;
const std::vector<TensorHandlePtr> tensors_;
std::vector<TensorHandlePtr> tensors_;
// Parallel tensors are immutable but compute their shape lazily unless it is
// provided on construction. The optional has a value if the lazy computation
// has been completed or the shape was provided on construction.

View File

@ -14,6 +14,7 @@ limitations under the License.
==============================================================================*/
#include <array>
#include <memory>
#include <string>
#include "tensorflow/c/c_api.h"
@ -37,6 +38,8 @@ tensorflow::ServerDef GetServerDef(const std::string& job_name, int num_tasks) {
int port = tensorflow::testing::PickUnusedPortOrDie();
job_def->mutable_tasks()->insert(
{i, tensorflow::strings::StrCat("localhost", ":", port)});
LOG(INFO) << "Picked test port: " << port << " for job: " << job_name
<< ", task: " << i;
}
return server_def;
}

View File

@ -0,0 +1,34 @@
/* Copyright 2023 The TensorFlow Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
==============================================================================*/
#ifndef TENSORFLOW_C_EAGER_TFE_MONITORING_READER_INTERNAL_H_
#define TENSORFLOW_C_EAGER_TFE_MONITORING_READER_INTERNAL_H_
#include <memory>
#include "tensorflow/core/lib/monitoring/cell_reader.h"
struct TFE_MonitoringCounterReader {
explicit TFE_MonitoringCounterReader(const char* name) {
counter = std::make_unique<
::tensorflow::monitoring::testing::CellReader<int64_t>>(name);
}
template <typename... LabelType>
int64_t Read(const LabelType&... labels);
std::unique_ptr<::tensorflow::monitoring::testing::CellReader<int64_t>>
counter;
};
#endif // TENSORFLOW_C_EAGER_TFE_MONITORING_READER_INTERNAL_H_

View File

@ -30,7 +30,7 @@ class UnifiedAPI
TF_StatusPtr status(TF_NewStatus());
TF_SetTracingImplementation(std::get<0>(GetParam()), status.get());
Status s = StatusFromTF_Status(status.get());
CHECK_EQ(errors::OK, s.code()) << s.error_message();
CHECK_EQ(errors::OK, s.code()) << s.message();
}
public:
@ -61,7 +61,7 @@ TEST_P(UnifiedAPI, TestTensorShapeScalar) {
AbstractContext* ctx_raw = nullptr;
Status s =
BuildImmediateExecutionContext(std::get<1>(GetParam()), &ctx_raw);
ASSERT_EQ(errors::OK, s.code()) << s.error_message();
ASSERT_EQ(errors::OK, s.code()) << s.message();
ctx.reset(ctx_raw);
}
@ -69,7 +69,7 @@ TEST_P(UnifiedAPI, TestTensorShapeScalar) {
{
AbstractTensorHandle* x_raw = nullptr;
Status s = TestScalarTensorHandle<float, TF_FLOAT>(ctx.get(), 2.0f, &x_raw);
ASSERT_EQ(errors::OK, s.code()) << s.error_message();
ASSERT_EQ(errors::OK, s.code()) << s.message();
x.reset(x_raw);
}
@ -77,7 +77,7 @@ TEST_P(UnifiedAPI, TestTensorShapeScalar) {
/*inputs=*/{x.get()},
/*outputs=*/{},
/*use_function=*/UseFunction());
ASSERT_EQ(errors::OK, s.code()) << s.error_message();
ASSERT_EQ(errors::OK, s.code()) << s.message();
}
// Checks that inputs[0] is a matrix with shape 2x4.
@ -111,7 +111,7 @@ TEST_P(UnifiedAPI, TestTensorShape2x4) {
AbstractContext* ctx_raw = nullptr;
Status s =
BuildImmediateExecutionContext(std::get<1>(GetParam()), &ctx_raw);
ASSERT_EQ(errors::OK, s.code()) << s.error_message();
ASSERT_EQ(errors::OK, s.code()) << s.message();
ctx.reset(ctx_raw);
}
@ -122,7 +122,7 @@ TEST_P(UnifiedAPI, TestTensorShape2x4) {
int64_t dim_sizes[] = {2, 4};
Status s = TestTensorHandleWithDims<float, TF_FLOAT>(ctx.get(), data,
dim_sizes, 2, &x_raw);
ASSERT_EQ(errors::OK, s.code()) << s.error_message();
ASSERT_EQ(errors::OK, s.code()) << s.message();
x.reset(x_raw);
}
@ -130,7 +130,7 @@ TEST_P(UnifiedAPI, TestTensorShape2x4) {
/*inputs=*/{x.get()},
/*outputs=*/{},
/*use_function=*/UseFunction());
ASSERT_EQ(errors::OK, s.code()) << s.error_message();
ASSERT_EQ(errors::OK, s.code()) << s.message();
}
TEST_P(UnifiedAPI, TestUnknownShapeTracing) {
@ -148,13 +148,13 @@ TEST_P(UnifiedAPI, TestUnknownShapeTracing) {
PartialTensorShape shape;
Status s = dyn_cast<tracing::TracingContext>(ctx.get())->AddParameter(
DT_FLOAT, shape, &x_raw);
ASSERT_EQ(errors::OK, s.code()) << s.error_message();
ASSERT_EQ(errors::OK, s.code()) << s.message();
x.reset(x_raw);
}
PartialTensorShape shape;
Status s = x->Shape(&shape);
ASSERT_EQ(errors::OK, s.code()) << s.error_message();
ASSERT_EQ(errors::OK, s.code()) << s.message();
ASSERT_TRUE(shape.unknown_rank());
}
@ -172,16 +172,16 @@ TEST_P(UnifiedAPI, TestPartialShapeTracing) {
PartialTensorShape shape;
int64_t dim_sizes[] = {2, -1};
Status s = PartialTensorShape::MakePartialShape(dim_sizes, 2, &shape);
ASSERT_EQ(errors::OK, s.code()) << s.error_message();
ASSERT_EQ(errors::OK, s.code()) << s.message();
s = dyn_cast<tracing::TracingContext>(ctx.get())->AddParameter(
DT_FLOAT, shape, &x_raw);
ASSERT_EQ(errors::OK, s.code()) << s.error_message();
ASSERT_EQ(errors::OK, s.code()) << s.message();
x.reset(x_raw);
}
PartialTensorShape shape;
Status s = x->Shape(&shape);
ASSERT_EQ(errors::OK, s.code()) << s.error_message();
ASSERT_EQ(errors::OK, s.code()) << s.message();
ASSERT_FALSE(shape.unknown_rank());
ASSERT_EQ(2, shape.dim_size(0));

View File

@ -178,9 +178,9 @@ tf_cuda_cc_test(
"//tensorflow/c/eager:unified_api_testutil",
"//tensorflow/c/experimental/gradients/tape:tape_context",
"//tensorflow/c/experimental/ops:nn_ops",
"//tensorflow/core/platform:tensor_float_32_utils",
"//tensorflow/core:test",
"//tensorflow/core:test_main",
"//tensorflow/core/platform:tensor_float_32_utils",
] + if_libtpu(
if_false = ["//tensorflow/compiler/mlir/tensorflow/c:mlir_c_api_registration"],
if_true = [],
@ -204,9 +204,9 @@ tf_cuda_cc_test(
"//tensorflow/c/eager:unified_api_testutil",
"//tensorflow/c/experimental/gradients/tape:tape_context",
"//tensorflow/c/experimental/ops:math_ops",
"//tensorflow/core/platform:tensor_float_32_utils",
"//tensorflow/core:test",
"//tensorflow/core:test_main",
"//tensorflow/core/platform:tensor_float_32_utils",
] + if_libtpu(
if_false = ["//tensorflow/compiler/mlir/tensorflow/c:mlir_c_api_registration"],
if_true = [],
@ -222,17 +222,17 @@ tf_cuda_cc_test(
args = ["--heap_check="], # TODO(b/174752220): Remove
tags = tf_cuda_tests_tags() + ["no_cuda_asan"], # b/173654156,
deps = [
":grad_test_helper",
":array_grad",
":grad_test_helper",
"//tensorflow/c:tf_status_helper",
"//tensorflow/c/eager:c_api_test_util",
"//tensorflow/c/experimental/gradients/tape:tape_context",
"//tensorflow/c/experimental/ops:array_ops",
"//tensorflow/core/platform:tensor_float_32_utils",
"//tensorflow/core:test",
"//tensorflow/core:test_main",
"//tensorflow/c/eager:c_api_unified_internal",
"//tensorflow/c/eager:unified_api_testutil",
"//tensorflow/c/experimental/gradients/tape:tape_context",
"//tensorflow/c/experimental/ops:array_ops",
"//tensorflow/core:test",
"//tensorflow/core:test_main",
"//tensorflow/core/platform:tensor_float_32_utils",
] + if_libtpu(
if_false = ["//tensorflow/compiler/mlir/tensorflow/c:mlir_c_api_registration"],
if_true = [],

View File

@ -51,13 +51,13 @@ class CppGradients
TF_StatusPtr status(TF_NewStatus());
TF_SetTracingImplementation(std::get<0>(GetParam()), status.get());
status_ = StatusFromTF_Status(status.get());
ASSERT_EQ(errors::OK, status_.code()) << status_.error_message();
ASSERT_EQ(errors::OK, status_.code()) << status_.message();
{
AbstractContext* ctx_raw = nullptr;
status_ =
BuildImmediateExecutionContext(std::get<1>(GetParam()), &ctx_raw);
ASSERT_EQ(errors::OK, status_.code()) << status_.error_message();
ASSERT_EQ(errors::OK, status_.code()) << status_.message();
immediate_execution_ctx_.reset(ctx_raw);
}
@ -86,7 +86,7 @@ TEST_P(CppGradients, TestIdentityNGrad) {
AbstractTensorHandle* x1_raw = nullptr;
status_ = TestScalarTensorHandle<float, TF_FLOAT>(
immediate_execution_ctx_.get(), 1.0f, &x1_raw);
ASSERT_EQ(errors::OK, status_.code()) << status_.error_message();
ASSERT_EQ(errors::OK, status_.code()) << status_.message();
x1.reset(x1_raw);
}
@ -95,19 +95,19 @@ TEST_P(CppGradients, TestIdentityNGrad) {
AbstractTensorHandle* x2_raw = nullptr;
status_ = TestScalarTensorHandle<float, TF_FLOAT>(
immediate_execution_ctx_.get(), 1.0f, &x2_raw);
ASSERT_EQ(errors::OK, status_.code()) << status_.error_message();
ASSERT_EQ(errors::OK, status_.code()) << status_.message();
x2.reset(x2_raw);
}
status_ = registry_.Register("IdentityN", IdentityNRegisterer);
ASSERT_EQ(errors::OK, status_.code()) << status_.error_message();
ASSERT_EQ(errors::OK, status_.code()) << status_.message();
auto IdentityNGradModel = BuildGradModel(IdentityNModel, registry_);
std::vector<AbstractTensorHandle*> outputs(2);
status_ =
RunModel(IdentityNGradModel, immediate_execution_ctx_.get(),
{x1.get(), x2.get()}, absl::MakeSpan(outputs), UseFunction());
ASSERT_EQ(errors::OK, status_.code()) << status_.error_message();
ASSERT_EQ(errors::OK, status_.code()) << status_.message();
EXPECT_EQ(outputs[0], nullptr);
ASSERT_NO_FATAL_FAILURE(CheckTensorValue(outputs[1], {1.0f}, /*dims*/ {},
/*abs_error*/ 0));

View File

@ -38,7 +38,7 @@ class CustomGradientTest
TF_StatusPtr status(TF_NewStatus());
TF_SetTracingImplementation(std::get<0>(GetParam()), status.get());
Status s = StatusFromTF_Status(status.get());
CHECK_EQ(errors::OK, s.code()) << s.error_message();
CHECK_EQ(errors::OK, s.code()) << s.message();
}
};
@ -92,7 +92,7 @@ TEST_P(CustomGradientTest, ExpWithPassThroughGrad) {
AbstractContext* ctx_raw = nullptr;
Status s =
BuildImmediateExecutionContext(std::get<1>(GetParam()), &ctx_raw);
ASSERT_EQ(errors::OK, s.code()) << s.error_message();
ASSERT_EQ(errors::OK, s.code()) << s.message();
ctx.reset(ctx_raw);
}
@ -100,7 +100,7 @@ TEST_P(CustomGradientTest, ExpWithPassThroughGrad) {
{
AbstractTensorHandle* x_raw = nullptr;
Status s = TestScalarTensorHandle<float, TF_FLOAT>(ctx.get(), 1.0f, &x_raw);
ASSERT_EQ(errors::OK, s.code()) << s.error_message();
ASSERT_EQ(errors::OK, s.code()) << s.message();
x.reset(x_raw);
}
@ -113,11 +113,11 @@ TEST_P(CustomGradientTest, ExpWithPassThroughGrad) {
Status s = RunModel(ExpWithPassThroughGrad, ctx.get(), {x.get()},
absl::MakeSpan(outputs),
/*use_function=*/!std::get<2>(GetParam()));
ASSERT_EQ(errors::OK, s.code()) << s.error_message();
ASSERT_EQ(errors::OK, s.code()) << s.message();
TF_Tensor* result_tensor;
s = GetValue(outputs[0], &result_tensor);
ASSERT_EQ(errors::OK, s.code()) << s.error_message();
ASSERT_EQ(errors::OK, s.code()) << s.message();
auto result_value = static_cast<float*>(TF_TensorData(result_tensor));
EXPECT_EQ(*result_value, 1.0);
outputs[0]->Unref();

View File

@ -30,7 +30,7 @@ void CompareNumericalAndAutodiffGradients(
std::vector<AbstractTensorHandle*> outputs(num_inputs);
auto s = RunModel(grad_model, ctx, inputs, absl::MakeSpan(outputs),
/*use_function=*/use_function);
ASSERT_EQ(errors::OK, s.code()) << s.error_message();
ASSERT_EQ(errors::OK, s.code()) << s.message();
for (int i = 0; i < num_inputs; ++i) {
if (!outputs[i]) continue;
@ -41,18 +41,18 @@ void CompareNumericalAndAutodiffGradients(
s = CalcNumericalGrad(ctx, model, inputs,
/*input_index=*/i, use_function,
&numerical_grad_raw);
ASSERT_EQ(errors::OK, s.code()) << s.error_message();
ASSERT_EQ(errors::OK, s.code()) << s.message();
numerical_grad.reset(numerical_grad_raw);
}
TF_Tensor* numerical_tensor;
s = GetValue(numerical_grad.get(), &numerical_tensor);
ASSERT_EQ(errors::OK, s.code()) << s.error_message();
ASSERT_EQ(errors::OK, s.code()) << s.message();
auto num_elem_numerical = TF_TensorElementCount(numerical_tensor);
TF_Tensor* analytical_tensor;
s = GetValue(outputs[i], &analytical_tensor);
ASSERT_EQ(errors::OK, s.code()) << s.error_message();
ASSERT_EQ(errors::OK, s.code()) << s.message();
auto num_elem_analytical = TF_TensorElementCount(analytical_tensor);
ASSERT_EQ(num_elem_numerical, num_elem_analytical);
@ -79,7 +79,7 @@ void CheckTensorValue(AbstractTensorHandle* t, absl::Span<const float> manuals,
absl::Span<const int64_t> dims, double abs_error) {
TF_Tensor* analytical_tensor;
auto s = GetValue(t, &analytical_tensor);
ASSERT_EQ(errors::OK, s.code()) << s.error_message();
ASSERT_EQ(errors::OK, s.code()) << s.message();
int64_t num_elem_analytical = 1;
auto num_dims_analytical = TF_NumDims(analytical_tensor);

View File

@ -86,13 +86,13 @@ class CppGradients
TF_StatusPtr status(TF_NewStatus());
TF_SetTracingImplementation(std::get<0>(GetParam()), status.get());
status_ = StatusFromTF_Status(status.get());
ASSERT_EQ(errors::OK, status_.code()) << status_.error_message();
ASSERT_EQ(errors::OK, status_.code()) << status_.message();
{
AbstractContext* ctx_raw = nullptr;
status_ =
BuildImmediateExecutionContext(std::get<1>(GetParam()), &ctx_raw);
ASSERT_EQ(errors::OK, status_.code()) << status_.error_message();
ASSERT_EQ(errors::OK, status_.code()) << status_.message();
immediate_execution_ctx_.reset(ctx_raw);
}
@ -117,7 +117,7 @@ TEST_P(CppGradients, TestAddGrad) {
AbstractTensorHandle* x_raw = nullptr;
status_ = TestScalarTensorHandle<float, TF_FLOAT>(
immediate_execution_ctx_.get(), 2.0f, &x_raw);
ASSERT_EQ(errors::OK, status_.code()) << status_.error_message();
ASSERT_EQ(errors::OK, status_.code()) << status_.message();
x.reset(x_raw);
}
@ -126,14 +126,14 @@ TEST_P(CppGradients, TestAddGrad) {
AbstractTensorHandle* y_raw = nullptr;
status_ = TestScalarTensorHandle<float, TF_FLOAT>(
immediate_execution_ctx_.get(), 2.0f, &y_raw);
ASSERT_EQ(errors::OK, status_.code()) << status_.error_message();
ASSERT_EQ(errors::OK, status_.code()) << status_.message();
y.reset(y_raw);
}
// TODO(srbs): Rename ops::Add to ops::AddV2 and AddRegister to
// AddV2Registerer.
status_ = registry_.Register("AddV2", AddRegisterer);
ASSERT_EQ(errors::OK, status_.code()) << status_.error_message();
ASSERT_EQ(errors::OK, status_.code()) << status_.message();
ASSERT_NO_FATAL_FAILURE(CompareNumericalAndAutodiffGradients(
AddModel, BuildGradModel(AddModel, registry_),
@ -146,12 +146,12 @@ TEST_P(CppGradients, TestExpGrad) {
AbstractTensorHandle* x_raw = nullptr;
status_ = TestScalarTensorHandle<float, TF_FLOAT>(
immediate_execution_ctx_.get(), 2.0f, &x_raw);
ASSERT_EQ(errors::OK, status_.code()) << status_.error_message();
ASSERT_EQ(errors::OK, status_.code()) << status_.message();
x.reset(x_raw);
}
status_ = registry_.Register("Exp", ExpRegisterer);
ASSERT_EQ(errors::OK, status_.code()) << status_.error_message();
ASSERT_EQ(errors::OK, status_.code()) << status_.message();
ASSERT_NO_FATAL_FAILURE(CompareNumericalAndAutodiffGradients(
ExpModel, BuildGradModel(ExpModel, registry_),
@ -171,7 +171,7 @@ TEST_P(CppGradients, TestMatMulGrad) {
AbstractTensorHandle* A_raw;
status_ = TestTensorHandleWithDims<float, TF_FLOAT>(
immediate_execution_ctx_.get(), A_vals, A_dims, 2, &A_raw);
ASSERT_EQ(errors::OK, status_.code()) << status_.error_message();
ASSERT_EQ(errors::OK, status_.code()) << status_.message();
A.reset(A_raw);
}
@ -182,12 +182,12 @@ TEST_P(CppGradients, TestMatMulGrad) {
AbstractTensorHandle* B_raw;
status_ = TestTensorHandleWithDims<float, TF_FLOAT>(
immediate_execution_ctx_.get(), B_vals, B_dims, 2, &B_raw);
ASSERT_EQ(errors::OK, status_.code()) << status_.error_message();
ASSERT_EQ(errors::OK, status_.code()) << status_.message();
B.reset(B_raw);
}
status_ = registry_.Register("MatMul", MatMulRegisterer);
ASSERT_EQ(errors::OK, status_.code()) << status_.error_message();
ASSERT_EQ(errors::OK, status_.code()) << status_.message();
for (bool transpose_a : {false, true}) {
for (bool transpose_b : {false, true}) {
@ -214,7 +214,7 @@ TEST_P(CppGradients, TestMatMulGradManual) {
AbstractTensorHandle* A_raw;
status_ = TestTensorHandleWithDims<float, TF_FLOAT>(
immediate_execution_ctx_.get(), A_vals, A_dims, 2, &A_raw);
ASSERT_EQ(errors::OK, status_.code()) << status_.error_message();
ASSERT_EQ(errors::OK, status_.code()) << status_.message();
A.reset(A_raw);
}
@ -225,12 +225,12 @@ TEST_P(CppGradients, TestMatMulGradManual) {
AbstractTensorHandle* B_raw;
status_ = TestTensorHandleWithDims<float, TF_FLOAT>(
immediate_execution_ctx_.get(), B_vals, B_dims, 2, &B_raw);
ASSERT_EQ(errors::OK, status_.code()) << status_.error_message();
ASSERT_EQ(errors::OK, status_.code()) << status_.message();
B.reset(B_raw);
}
status_ = registry_.Register("MatMul", MatMulRegisterer);
ASSERT_EQ(errors::OK, status_.code()) << status_.error_message();
ASSERT_EQ(errors::OK, status_.code()) << status_.message();
bool transpose_a_vals[] = {false, false, true, true};
bool transpose_b_vals[] = {false, true, false, true};
@ -259,7 +259,7 @@ TEST_P(CppGradients, TestMatMulGradManual) {
status_ =
RunModel(MatMulGradModel, immediate_execution_ctx_.get(),
{A.get(), B.get()}, absl::MakeSpan(outputs), UseFunction());
ASSERT_EQ(errors::OK, status_.code()) << status_.error_message();
ASSERT_EQ(errors::OK, status_.code()) << status_.message();
ASSERT_NO_FATAL_FAILURE(CheckTensorValue(outputs[0], dA_vals[i],
/*dims*/ {3, 3},
/*abs_error*/ 0));
@ -277,12 +277,12 @@ TEST_P(CppGradients, TestSqrtGrad) {
AbstractTensorHandle* x_raw = nullptr;
status_ = TestScalarTensorHandle<float, TF_FLOAT>(
immediate_execution_ctx_.get(), 2.0f, &x_raw);
ASSERT_EQ(errors::OK, status_.code()) << status_.error_message();
ASSERT_EQ(errors::OK, status_.code()) << status_.message();
x.reset(x_raw);
}
status_ = registry_.Register("Sqrt", SqrtRegisterer);
ASSERT_EQ(errors::OK, status_.code()) << status_.error_message();
ASSERT_EQ(errors::OK, status_.code()) << status_.message();
ASSERT_NO_FATAL_FAILURE(CompareNumericalAndAutodiffGradients(
SqrtModel, BuildGradModel(SqrtModel, registry_),
@ -295,12 +295,12 @@ TEST_P(CppGradients, TestNegGrad) {
AbstractTensorHandle* x_raw = nullptr;
status_ = TestScalarTensorHandle<float, TF_FLOAT>(
immediate_execution_ctx_.get(), 2.0f, &x_raw);
ASSERT_EQ(errors::OK, status_.code()) << status_.error_message();
ASSERT_EQ(errors::OK, status_.code()) << status_.message();
x.reset(x_raw);
}
status_ = registry_.Register("Neg", NegRegisterer);
ASSERT_EQ(errors::OK, status_.code()) << status_.error_message();
ASSERT_EQ(errors::OK, status_.code()) << status_.message();
ASSERT_NO_FATAL_FAILURE(CompareNumericalAndAutodiffGradients(
NegModel, BuildGradModel(NegModel, registry_),
@ -313,7 +313,7 @@ TEST_P(CppGradients, TestSubGrad) {
AbstractTensorHandle* x_raw = nullptr;
status_ = TestScalarTensorHandle<float, TF_FLOAT>(
immediate_execution_ctx_.get(), 2.0f, &x_raw);
ASSERT_EQ(errors::OK, status_.code()) << status_.error_message();
ASSERT_EQ(errors::OK, status_.code()) << status_.message();
x.reset(x_raw);
}
@ -322,12 +322,12 @@ TEST_P(CppGradients, TestSubGrad) {
AbstractTensorHandle* y_raw = nullptr;
status_ = TestScalarTensorHandle<float, TF_FLOAT>(
immediate_execution_ctx_.get(), 2.0f, &y_raw);
ASSERT_EQ(errors::OK, status_.code()) << status_.error_message();
ASSERT_EQ(errors::OK, status_.code()) << status_.message();
y.reset(y_raw);
}
status_ = registry_.Register("Sub", SubRegisterer);
ASSERT_EQ(errors::OK, status_.code()) << status_.error_message();
ASSERT_EQ(errors::OK, status_.code()) << status_.message();
ASSERT_NO_FATAL_FAILURE(CompareNumericalAndAutodiffGradients(
SubModel, BuildGradModel(SubModel, registry_),
@ -340,7 +340,7 @@ TEST_P(CppGradients, TestMulGrad) {
AbstractTensorHandle* x_raw = nullptr;
status_ = TestScalarTensorHandle<float, TF_FLOAT>(
immediate_execution_ctx_.get(), 2.0f, &x_raw);
ASSERT_EQ(errors::OK, status_.code()) << status_.error_message();
ASSERT_EQ(errors::OK, status_.code()) << status_.message();
x.reset(x_raw);
}
@ -349,12 +349,12 @@ TEST_P(CppGradients, TestMulGrad) {
AbstractTensorHandle* y_raw = nullptr;
status_ = TestScalarTensorHandle<float, TF_FLOAT>(
immediate_execution_ctx_.get(), 2.0f, &y_raw);
ASSERT_EQ(errors::OK, status_.code()) << status_.error_message();
ASSERT_EQ(errors::OK, status_.code()) << status_.message();
y.reset(y_raw);
}
status_ = registry_.Register("Mul", MulRegisterer);
ASSERT_EQ(errors::OK, status_.code()) << status_.error_message();
ASSERT_EQ(errors::OK, status_.code()) << status_.message();
ASSERT_NO_FATAL_FAILURE(CompareNumericalAndAutodiffGradients(
MulModel, BuildGradModel(MulModel, registry_),
@ -367,12 +367,12 @@ TEST_P(CppGradients, TestLog1pGrad) {
AbstractTensorHandle* x_raw = nullptr;
status_ = TestScalarTensorHandle<float, TF_FLOAT>(
immediate_execution_ctx_.get(), 2.0f, &x_raw);
ASSERT_EQ(errors::OK, status_.code()) << status_.error_message();
ASSERT_EQ(errors::OK, status_.code()) << status_.message();
x.reset(x_raw);
}
status_ = registry_.Register("Log1p", Log1pRegisterer);
ASSERT_EQ(errors::OK, status_.code()) << status_.error_message();
ASSERT_EQ(errors::OK, status_.code()) << status_.message();
ASSERT_NO_FATAL_FAILURE(CompareNumericalAndAutodiffGradients(
Log1pModel, BuildGradModel(Log1pModel, registry_),
@ -381,7 +381,7 @@ TEST_P(CppGradients, TestLog1pGrad) {
TEST_P(CppGradients, TestDivNoNanGrad) {
status_ = registry_.Register("DivNoNan", DivNoNanRegisterer);
ASSERT_EQ(errors::OK, status_.code()) << status_.error_message();
ASSERT_EQ(errors::OK, status_.code()) << status_.message();
auto DivNoNanGradModel = BuildGradModel(DivNoNanModel, registry_);
@ -390,7 +390,7 @@ TEST_P(CppGradients, TestDivNoNanGrad) {
AbstractTensorHandle* x_raw = nullptr;
status_ = TestScalarTensorHandle<float, TF_FLOAT>(
immediate_execution_ctx_.get(), 2.0f, &x_raw);
ASSERT_EQ(errors::OK, status_.code()) << status_.error_message();
ASSERT_EQ(errors::OK, status_.code()) << status_.message();
x.reset(x_raw);
}
@ -399,7 +399,7 @@ TEST_P(CppGradients, TestDivNoNanGrad) {
AbstractTensorHandle* y_raw = nullptr;
status_ = TestScalarTensorHandle<float, TF_FLOAT>(
immediate_execution_ctx_.get(), 2.0f, &y_raw);
ASSERT_EQ(errors::OK, status_.code()) << status_.error_message();
ASSERT_EQ(errors::OK, status_.code()) << status_.message();
y.reset(y_raw);
}
@ -413,14 +413,14 @@ TEST_P(CppGradients, TestDivNoNanGrad) {
AbstractTensorHandle* z_raw = nullptr;
status_ = TestScalarTensorHandle<float, TF_FLOAT>(
immediate_execution_ctx_.get(), 0.0f, &z_raw);
ASSERT_EQ(errors::OK, status_.code()) << status_.error_message();
ASSERT_EQ(errors::OK, status_.code()) << status_.message();
z.reset(z_raw);
}
std::vector<AbstractTensorHandle*> outputs(2);
status_ =
RunModel(DivNoNanGradModel, immediate_execution_ctx_.get(),
{x.get(), z.get()}, absl::MakeSpan(outputs), UseFunction());
ASSERT_EQ(errors::OK, status_.code()) << status_.error_message();
ASSERT_EQ(errors::OK, status_.code()) << status_.message();
ASSERT_NO_FATAL_FAILURE(CheckTensorValue(outputs[0], {0.0f}, /*dims*/ {},
/*abs_error*/ 0));
ASSERT_NO_FATAL_FAILURE(CheckTensorValue(outputs[1], {0.0f}, /*dims*/ {},

View File

@ -67,13 +67,13 @@ class CppGradients
TF_StatusPtr status(TF_NewStatus());
TF_SetTracingImplementation(std::get<0>(GetParam()), status.get());
status_ = StatusFromTF_Status(status.get());
ASSERT_EQ(errors::OK, status_.code()) << status_.error_message();
ASSERT_EQ(errors::OK, status_.code()) << status_.message();
{
AbstractContext* ctx_raw = nullptr;
status_ =
BuildImmediateExecutionContext(std::get<1>(GetParam()), &ctx_raw);
ASSERT_EQ(errors::OK, status_.code()) << status_.error_message();
ASSERT_EQ(errors::OK, status_.code()) << status_.message();
immediate_execution_ctx_.reset(ctx_raw);
}
@ -94,7 +94,7 @@ class CppGradients
TEST_P(CppGradients, TestReluGrad) {
status_ = registry_.Register("Relu", ReluRegisterer);
ASSERT_EQ(errors::OK, status_.code()) << status_.error_message();
ASSERT_EQ(errors::OK, status_.code()) << status_.message();
auto ReluGradModel = BuildGradModel(ReluModel, registry_);
@ -105,7 +105,7 @@ TEST_P(CppGradients, TestReluGrad) {
AbstractTensorHandle* X_raw;
status_ = TestTensorHandleWithDims<float, TF_FLOAT>(
immediate_execution_ctx_.get(), X_vals, X_dims, 2, &X_raw);
ASSERT_EQ(errors::OK, status_.code()) << status_.error_message();
ASSERT_EQ(errors::OK, status_.code()) << status_.message();
X.reset(X_raw);
}
@ -120,14 +120,14 @@ TEST_P(CppGradients, TestReluGrad) {
AbstractTensorHandle* Y_raw;
status_ = TestScalarTensorHandle<float, TF_FLOAT>(
immediate_execution_ctx_.get(), 0.0f, &Y_raw);
ASSERT_EQ(errors::OK, status_.code()) << status_.error_message();
ASSERT_EQ(errors::OK, status_.code()) << status_.message();
Y.reset(Y_raw);
}
std::vector<AbstractTensorHandle*> outputs(1);
status_ = RunModel(ReluGradModel, immediate_execution_ctx_.get(), {Y.get()},
absl::MakeSpan(outputs), UseFunction());
ASSERT_EQ(errors::OK, status_.code()) << status_.error_message();
ASSERT_EQ(errors::OK, status_.code()) << status_.message();
ASSERT_NO_FATAL_FAILURE(CheckTensorValue(outputs[0], {0.0f}, /*dims*/ {},
/*abs_error*/ 0));
outputs[0]->Unref();
@ -148,7 +148,7 @@ TEST_P(CppGradients, TestSparseSoftmaxCrossEntropyWithLogitsGrad) {
AbstractTensorHandle* X_raw;
status_ = TestTensorHandleWithDims<float, TF_FLOAT>(
immediate_execution_ctx_.get(), X_vals, X_dims, 2, &X_raw);
ASSERT_EQ(errors::OK, status_.code()) << status_.error_message();
ASSERT_EQ(errors::OK, status_.code()) << status_.message();
X.reset(X_raw);
}
// Label
@ -159,13 +159,13 @@ TEST_P(CppGradients, TestSparseSoftmaxCrossEntropyWithLogitsGrad) {
AbstractTensorHandle* Y_raw;
status_ = TestTensorHandleWithDims<int32_t, TF_INT32>(
immediate_execution_ctx_.get(), Y_vals, Y_dims, 1, &Y_raw);
ASSERT_EQ(errors::OK, status_.code()) << status_.error_message();
ASSERT_EQ(errors::OK, status_.code()) << status_.message();
Y.reset(Y_raw);
}
status_ = registry_.Register("SparseSoftmaxCrossEntropyWithLogits",
SparseSoftmaxCrossEntropyWithLogitsRegisterer);
ASSERT_EQ(errors::OK, status_.code()) << status_.error_message();
ASSERT_EQ(errors::OK, status_.code()) << status_.message();
ASSERT_NO_FATAL_FAILURE(CompareNumericalAndAutodiffGradients(
SparseSoftmaxCrossEntropyWithLogitsModel,
@ -186,7 +186,7 @@ TEST_P(CppGradients, TestBiasAddGrad) {
AbstractTensorHandle* A_raw;
status_ = TestTensorHandleWithDims<float, TF_FLOAT>(
immediate_execution_ctx_.get(), A_vals, A_dims, 2, &A_raw);
ASSERT_EQ(errors::OK, status_.code()) << status_.error_message();
ASSERT_EQ(errors::OK, status_.code()) << status_.message();
A.reset(A_raw);
}
// Bias
@ -197,12 +197,12 @@ TEST_P(CppGradients, TestBiasAddGrad) {
AbstractTensorHandle* Bias_raw;
status_ = TestTensorHandleWithDims<float, TF_FLOAT>(
immediate_execution_ctx_.get(), Bias_vals, Bias_dims, 1, &Bias_raw);
ASSERT_EQ(errors::OK, status_.code()) << status_.error_message();
ASSERT_EQ(errors::OK, status_.code()) << status_.message();
Bias.reset(Bias_raw);
}
status_ = registry_.Register("BiasAdd", BiasAddRegisterer);
ASSERT_EQ(errors::OK, status_.code()) << status_.error_message();
ASSERT_EQ(errors::OK, status_.code()) << status_.message();
ASSERT_NO_FATAL_FAILURE(CompareNumericalAndAutodiffGradients(
BiasAddModel, BuildGradModel(BiasAddModel, registry_),

View File

@ -94,7 +94,7 @@ TEST(Grappler, DeviceTypeNotSet) {
tensorflow::Status status = InitGraphPlugin(plugin_init);
ASSERT_EQ(status.code(), tensorflow::error::FAILED_PRECONDITION);
ASSERT_EQ(
status.error_message(),
status.message(),
"'device_type' field in TP_OptimizerRegistrationParams must be set.");
}
@ -109,7 +109,7 @@ TEST(Grappler, OptimizeFuncNotSet) {
tensorflow::Status status = InitGraphPlugin(plugin_init);
ASSERT_EQ(status.code(), tensorflow::error::FAILED_PRECONDITION);
ASSERT_EQ(status.error_message(),
ASSERT_EQ(status.message(),
"'optimize_func' field in TP_Optimizer must be set.");
}

View File

@ -12,6 +12,7 @@ cc_library(
visibility = ["//visibility:public"],
deps = [
"//tensorflow/c:c_api_headers",
"//tensorflow/c:c_api_macros_hdrs",
"//tensorflow/c:kernels_experimental_hdrs",
"//tensorflow/c:kernels_hdrs",
"//tensorflow/c:tf_buffer_internal",
@ -41,6 +42,7 @@ cc_library(
visibility = ["//visibility:public"],
deps = [
"//tensorflow/c:c_api_headers",
"//tensorflow/c:c_api_macros_hdrs",
"//tensorflow/c:kernels_hdrs",
"//tensorflow/c:tf_buffer_internal",
"//tensorflow/compiler/xla/pjrt/c:pjrt_c_api_hdrs",

View File

@ -56,7 +56,7 @@ void TF_CreatePluginResource(TF_OpKernelContext* ctx,
auto cc_status =
cc_ctx->resource_manager()->Create<tensorflow::PluginResource>(
container_name, plugin_resource_name, cc_resource_ptr);
Set_TF_Status_from_Status(status, cc_status);
tsl::Set_TF_Status_from_Status(status, cc_status);
}
void TF_LookupOrCreatePluginResource(
@ -86,7 +86,7 @@ void TF_LookupOrCreatePluginResource(
} else {
*result_plugin_resource = nullptr;
}
Set_TF_Status_from_Status(status, cc_status);
tsl::Set_TF_Status_from_Status(status, cc_status);
}
// ------------------------- VariableInfo ------------------------------------
@ -113,7 +113,7 @@ TF_VariableInfo* TF_CreateVariableInfoFromContext(TF_OpKernelContext* ctx,
cc_status = tsl::errors::InvalidArgument(
"Trying to obtain resource handle from Input[", index,
"], which is not type DT_RESOURCE.");
Set_TF_Status_from_Status(status, cc_status);
tsl::Set_TF_Status_from_Status(status, cc_status);
return nullptr;
}
const tensorflow::ResourceHandle& handle =
@ -141,20 +141,20 @@ void TF_AllocateTempForVariableInfo(TF_OpKernelContext* ctx,
tsl::Status cc_status;
if (var_info == nullptr) {
cc_status = tsl::errors::InvalidArgument("TF_VariableInfo is NULL.");
Set_TF_Status_from_Status(status, cc_status);
tsl::Set_TF_Status_from_Status(status, cc_status);
return;
}
if (var_info->var_info.var() == nullptr) {
cc_status = tsl::errors::InvalidArgument(
"VariableInfo does not track a resource variable.");
Set_TF_Status_from_Status(status, cc_status);
tsl::Set_TF_Status_from_Status(status, cc_status);
return;
}
cc_status = cc_ctx->allocate_temp(var_info->var_info.var()->tensor()->dtype(),
var_info->var_info.var()->tensor()->shape(),
var_info->var_info.var()->tensor());
Set_TF_Status_from_Status(status, cc_status);
tsl::Set_TF_Status_from_Status(status, cc_status);
}
TF_Tensor* TF_GetTensorFromVariableInfo(TF_VariableInfo* var_info,
@ -162,20 +162,20 @@ TF_Tensor* TF_GetTensorFromVariableInfo(TF_VariableInfo* var_info,
tsl::Status cc_status;
if (var_info == nullptr) {
cc_status = tsl::errors::InvalidArgument("TF_VariableInfo is NULL.");
Set_TF_Status_from_Status(status, cc_status);
tsl::Set_TF_Status_from_Status(status, cc_status);
return nullptr;
}
if (var_info->var_info.var() == nullptr) {
cc_status = tsl::errors::InvalidArgument(
"VariableInfo does not track a resource variable.");
Set_TF_Status_from_Status(status, cc_status);
tsl::Set_TF_Status_from_Status(status, cc_status);
return nullptr;
}
tensorflow::Tensor* tensor = var_info->var_info.var()->tensor();
TF_Tensor* result_tensor =
tensorflow::TF_TensorFromTensor(*tensor, &cc_status);
Set_TF_Status_from_Status(status, cc_status);
tsl::Set_TF_Status_from_Status(status, cc_status);
return result_tensor;
}
@ -323,6 +323,13 @@ void TF_CreatePjRtBuffer(TF_Tensor* c_tensor, PJRT_Buffer* c_buffer,
}
tensorflow::AsyncValueTensor* av_tensor =
tensorflow::AsyncValueTensor::FromTensor(&tensor);
if (av_tensor == nullptr) {
tensorflow::Set_TF_Status_from_Status(
status,
tsl::errors::Internal(
"The tensor to set PjRtBuffer is not an AsyncValueTensor."));
return;
}
av_tensor->SetBuffer(
std::make_unique<xla::PjRtCApiBuffer>(pjrt_c_api_client, c_buffer));
TF_SetStatus(status, TF_OK, "");

View File

@ -17,6 +17,7 @@ limitations under the License.
#define TENSORFLOW_C_EXPERIMENTAL_NEXT_PLUGGABLE_DEVICE_C_API_H_
#include "tensorflow/c/c_api.h"
#include "tensorflow/c/c_api_macros.h"
#include "tensorflow/c/kernels.h"
#include "tensorflow/c/kernels_experimental.h"
#include "tensorflow/c/tf_buffer.h"
@ -26,25 +27,6 @@ limitations under the License.
// C API for device. The API is under active development and eventually
// should allow registering a plugin device with TensorFlow.
// Macro to control visibility of exported symbols in the shared library (.so,
// .dylib, .dll).
// This duplicates the TF_EXPORT macro definition in
// tensorflow/core/platform/macros.h in order to keep this .h file independent
// of any other includes.
#ifdef SWIG
#define TF_CAPI_EXPORT
#else
#if defined(_WIN32)
#ifdef TF_COMPILE_LIBRARY
#define TF_CAPI_EXPORT __declspec(dllexport)
#else
#define TF_CAPI_EXPORT __declspec(dllimport)
#endif // TF_COMPILE_LIBRARY
#else
#define TF_CAPI_EXPORT __attribute__((visibility("default")))
#endif // _WIN32
#endif // SWIG
#ifdef __cplusplus
extern "C" {
#endif

View File

@ -16,7 +16,6 @@ package(
# copybara:uncomment(<g3 only>) "//learning/brain/tfrt/aot:__pkg__",
"//tensorflow/c:__subpackages__",
"//tensorflow/c/experimental/saved_model/internal:__pkg__",
"//tensorflow/cc/experimental/libtf:__pkg__",
"//tensorflow/core:__subpackages__",
],
licenses = ["notice"],

View File

@ -96,7 +96,7 @@ TEST_F(RestoreOpsTest, BadCheckpointPrefixShouldFail) {
Status status = internal::SingleRestore(
context(), CheckpointPrefix("unknown_bad_checkpoint_prefix"),
"x/.ATTRIBUTES/VARIABLE_VALUE", DT_FLOAT, &x_handle);
EXPECT_FALSE(status.ok()) << status.error_message();
EXPECT_FALSE(status.ok()) << status.message();
}
TEST_F(RestoreOpsTest, BadCheckpointKeyShouldFail) {
@ -104,7 +104,7 @@ TEST_F(RestoreOpsTest, BadCheckpointKeyShouldFail) {
Status status = internal::SingleRestore(
context(), CheckpointPrefix("VarsAndArithmeticObjectGraph"),
"bad_checkpoint_key", DT_FLOAT, &x_handle);
EXPECT_FALSE(status.ok()) << status.error_message();
EXPECT_FALSE(status.ok()) << status.message();
}
} // namespace

View File

@ -41,7 +41,7 @@ FlatTensorFunction::~FlatTensorFunction() {
Status status = ctx_->RemoveFunction(name_);
if (!status.ok()) {
LOG(ERROR) << "Failed to remove functiondef " << name_ << ". "
<< status.error_message();
<< status.message();
}
}

View File

@ -71,7 +71,7 @@ RestoredResource::~RestoredResource() {
if (!status.ok()) {
LOG(WARNING)
<< "Failed executing destroy_resource function for RestoredResource: "
<< status.error_message();
<< status.message();
}
}
}

View File

@ -126,7 +126,7 @@ TEST_P(SavedVariableLoadingTest, AssignAndReadVariableSuccesful) {
ImmediateTensorHandlePtr expected_handle =
testing::CreateTensorHandle(context(), dtype, shape_vector, 42);
AbstractTensorPtr expected_tensor(expected_handle->Resolve(&status));
TF_EXPECT_OK(status) << status.error_message();
TF_EXPECT_OK(status) << status.message();
// Assign the tensorhandle to the variable.
TF_EXPECT_OK(var->Assign(expected_handle.get()));
@ -135,7 +135,7 @@ TEST_P(SavedVariableLoadingTest, AssignAndReadVariableSuccesful) {
ImmediateTensorHandlePtr output_handle;
TF_EXPECT_OK(var->ReadValue(&output_handle));
AbstractTensorPtr output_tensor(output_handle->Resolve(&status));
TF_EXPECT_OK(status) << status.error_message();
TF_EXPECT_OK(status) << status.message();
// Check that output_tensor == expected_tensor
EXPECT_EQ(output_tensor->Type(), expected_tensor->Type());

View File

@ -139,7 +139,7 @@ void CheckBufferDataIsEqual(DataType dtype, int64_t num_elements, void* a,
AbstractTensorPtr TensorHandleToTensor(ImmediateExecutionTensorHandle* handle) {
Status status;
AbstractTensorPtr tensor(handle->Resolve(&status));
CHECK(status.ok()) << status.error_message();
CHECK(status.ok()) << status.message();
CHECK_NE(tensor.get(), nullptr);
return tensor;
}

View File

@ -81,8 +81,7 @@ TEST_F(SavedConcreteFunctionLoadingTest, TooFewInputsInSavedConcreteFunction) {
std::unique_ptr<TFConcreteFunction> result;
Status status =
internal::LoadTFConcreteFunction(saved, &func, {}, context(), &result);
EXPECT_EQ(status.code(), error::FAILED_PRECONDITION)
<< status.error_message();
EXPECT_EQ(status.code(), error::FAILED_PRECONDITION) << status.message();
}
// A SavedConcreteFunction whose canonicalized input signature length +
@ -105,8 +104,7 @@ TEST_F(SavedConcreteFunctionLoadingTest,
std::unique_ptr<TFConcreteFunction> result;
Status status = internal::LoadTFConcreteFunction(saved, &func, captures,
context(), &result);
EXPECT_EQ(status.code(), error::FAILED_PRECONDITION)
<< status.error_message();
EXPECT_EQ(status.code(), error::FAILED_PRECONDITION) << status.message();
}
// A SavedConcreteFunction whose canonicalized input signature
@ -124,8 +122,7 @@ TEST_F(SavedConcreteFunctionLoadingTest, TooManyInputsInSavedConcreteFunction) {
std::unique_ptr<TFConcreteFunction> result;
Status status =
internal::LoadTFConcreteFunction(saved, &func, {}, context(), &result);
EXPECT_EQ(status.code(), error::FAILED_PRECONDITION)
<< status.error_message();
EXPECT_EQ(status.code(), error::FAILED_PRECONDITION) << status.message();
}
// A SavedConcreteFunction whose canonicalized input signature
@ -149,8 +146,7 @@ TEST_F(SavedConcreteFunctionLoadingTest,
std::unique_ptr<TFConcreteFunction> result;
Status status = internal::LoadTFConcreteFunction(saved, &func, captures,
context(), &result);
EXPECT_EQ(status.code(), error::FAILED_PRECONDITION)
<< status.error_message();
EXPECT_EQ(status.code(), error::FAILED_PRECONDITION) << status.message();
}
// A SavedConcreteFunction whose capture refers to an index not in the capture
@ -174,8 +170,7 @@ TEST_F(SavedConcreteFunctionLoadingTest, ImproperCaptureIndex) {
std::unique_ptr<TFConcreteFunction> result;
Status status = internal::LoadTFConcreteFunction(saved, &func, captures,
context(), &result);
EXPECT_EQ(status.code(), error::FAILED_PRECONDITION)
<< status.error_message();
EXPECT_EQ(status.code(), error::FAILED_PRECONDITION) << status.message();
}
// A SavedConcreteFunction whose outputs are fewer than its corresponding
@ -193,8 +188,7 @@ TEST_F(SavedConcreteFunctionLoadingTest, TooFewOutputsInSavedConcreteFunction) {
std::unique_ptr<TFConcreteFunction> result;
Status status =
internal::LoadTFConcreteFunction(saved, &func, {}, context(), &result);
EXPECT_EQ(status.code(), error::FAILED_PRECONDITION)
<< status.error_message();
EXPECT_EQ(status.code(), error::FAILED_PRECONDITION) << status.message();
}
// A SavedConcreteFunction whose outputs exceed its corresponding functiondef
@ -213,8 +207,7 @@ TEST_F(SavedConcreteFunctionLoadingTest,
std::unique_ptr<TFConcreteFunction> result;
Status status =
internal::LoadTFConcreteFunction(saved, &func, {}, context(), &result);
EXPECT_EQ(status.code(), error::FAILED_PRECONDITION)
<< status.error_message();
EXPECT_EQ(status.code(), error::FAILED_PRECONDITION) << status.message();
}
// A SavedConcreteFunction whose (inputs + captures) = functiondef inputs,
@ -238,7 +231,7 @@ TEST_F(SavedConcreteFunctionLoadingTest, SuccessfulLoad) {
std::unique_ptr<TFConcreteFunction> result;
Status status = internal::LoadTFConcreteFunction(saved, &func, captures,
context(), &result);
TF_EXPECT_OK(status) << status.error_message();
TF_EXPECT_OK(status) << status.message();
}
// A TFConcreteFunction should register functiondefs on creation, and
@ -257,7 +250,7 @@ TEST_F(SavedConcreteFunctionLoadingTest, RegistersAndRemovesFunctionDefs) {
std::unique_ptr<TFConcreteFunction> result;
Status status =
internal::LoadTFConcreteFunction(saved, &func, {}, context(), &result);
TF_EXPECT_OK(status) << status.error_message();
TF_EXPECT_OK(status) << status.message();
// The function should be registered with context.
EXPECT_TRUE(context()->FindFunctionByName(func_name));
}

View File

@ -26,7 +26,7 @@ cc_library(
hdrs = ["stream_executor.h"],
visibility = ["//tensorflow:internal"],
deps = [
"//tensorflow/c:c_api_macros",
"//tensorflow/c:c_api_macros_hdrs",
"//tensorflow/c:tf_status_headers",
],
)

View File

@ -204,7 +204,7 @@ struct HostCallbackContext {
void HostCallbackTrampoline(void* ctx, TF_Status* status) {
HostCallbackContext* host_ctx = static_cast<HostCallbackContext*>(ctx);
tsl::Status s = std::move(host_ctx->callback)();
Set_TF_Status_from_Status(status, s);
tsl::Set_TF_Status_from_Status(status, s);
delete host_ctx;
}
@ -237,7 +237,7 @@ class CStreamExecutor : public internal::StreamExecutorInterface {
stream_executor_->allocate(&device_, size, memory_space, &mem);
tsl::Status status = ValidateSPDeviceMemoryBase(mem);
if (!status.ok()) {
LOG(ERROR) << status.error_message();
LOG(ERROR) << status.message();
}
return DeviceMemoryBaseFromC(mem);
}
@ -284,7 +284,7 @@ class CStreamExecutor : public internal::StreamExecutorInterface {
}
tsl::Status status = ValidateSPAllocatorStats(c_stats);
if (!status.ok()) {
LOG(ERROR) << status.error_message();
LOG(ERROR) << status.message();
return absl::nullopt;
}
::stream_executor::AllocatorStats stats;

View File

@ -65,7 +65,7 @@ TEST(StreamExecutor, NameNotSet) {
tsl::Status status =
InitStreamExecutorPlugin(plugin_init, &device_type, &platform_name);
ASSERT_EQ(status.code(), tensorflow::error::FAILED_PRECONDITION);
ASSERT_EQ(status.error_message(), "'name' field in SP_Platform must be set.");
ASSERT_EQ(status.message(), "'name' field in SP_Platform must be set.");
}
TEST(StreamExecutor, InvalidNameWithSemicolon) {
@ -81,7 +81,7 @@ TEST(StreamExecutor, InvalidNameWithSemicolon) {
InitStreamExecutorPlugin(plugin_init, &device_type, &platform_name);
ASSERT_EQ(status.code(), tensorflow::error::FAILED_PRECONDITION);
EXPECT_THAT(
status.error_message(),
status.message(),
testing::ContainsRegex("Device name/type 'INVALID:NAME' must match"));
}
@ -97,7 +97,7 @@ TEST(StreamExecutor, InvalidNameWithSlash) {
tsl::Status status =
InitStreamExecutorPlugin(plugin_init, &device_type, &platform_name);
ASSERT_EQ(status.code(), tensorflow::error::FAILED_PRECONDITION);
EXPECT_THAT(status.error_message(),
EXPECT_THAT(status.message(),
testing::ContainsRegex("Device name/type 'INVALID/' must match"));
}
@ -113,7 +113,7 @@ TEST(StreamExecutor, CreateDeviceNotSet) {
tsl::Status status =
InitStreamExecutorPlugin(plugin_init, &device_type, &platform_name);
ASSERT_EQ(status.code(), tensorflow::error::FAILED_PRECONDITION);
ASSERT_EQ(status.error_message(),
ASSERT_EQ(status.message(),
"'create_device' field in SP_PlatformFns must be set.");
}
@ -130,7 +130,7 @@ TEST(StreamExecutor, UnifiedMemoryAllocateNotSet) {
InitStreamExecutorPlugin(plugin_init, &device_type, &platform_name);
ASSERT_EQ(status.code(), tensorflow::error::FAILED_PRECONDITION);
ASSERT_EQ(
status.error_message(),
status.message(),
"'unified_memory_allocate' field in SP_StreamExecutor must be set.");
}
@ -327,7 +327,7 @@ TEST_F(StreamExecutorTest, StreamStatus) {
status_ok = false;
auto updated_status = stream.RefreshStatus();
ASSERT_FALSE(stream.ok());
ASSERT_EQ(updated_status.error_message(), "Test error");
ASSERT_EQ(updated_status.message(), "Test error");
}
TEST_F(StreamExecutorTest, CreateEvent) {

View File

@ -83,9 +83,6 @@ void SynchronizeAllActivity(const SP_Device* const device,
TF_Bool HostCallback(const SP_Device* const device, SP_Stream stream,
SE_StatusCallbackFn const callback_fn,
void* const callback_arg) {
TSL_Status* status_ignored = TSL_NewStatus();
callback_fn(callback_arg, status_ignored);
TSL_DeleteStatus(status_ignored);
return true;
}

View File

@ -36,6 +36,9 @@ limitations under the License.
#if !defined(IS_MOBILE_PLATFORM) && !defined(IS_SLIM_BUILD)
#include "tensorflow/c/experimental/stream_executor/stream_executor_internal.h"
#include "tensorflow/compiler/xla/stream_executor/stream.h"
#include "tensorflow/core/framework/device.h"
#include "tensorflow/tsl/framework/device_id_utils.h"
#include "tensorflow/tsl/platform/statusor.h"
#endif // !defined(IS_MOBILE_PLATFORM) && !defined(IS_SLIM_BUILD)
using tensorflow::errors::InvalidArgument;
@ -660,12 +663,12 @@ TF_Buffer* TF_OpKernelConstruction_GetAttrFunction(TF_OpKernelConstruction* ctx,
tensorflow::NameAttrList function;
auto cc_status = cc_ctx->GetAttr(attr_name, &function);
if (!cc_status.ok()) {
Set_TF_Status_from_Status(status, cc_status);
tsl::Set_TF_Status_from_Status(status, cc_status);
return nullptr;
}
TF_Buffer* buffer = TF_NewBuffer();
cc_status = tensorflow::MessageToBuffer(function, buffer);
Set_TF_Status_from_Status(status, cc_status);
tsl::Set_TF_Status_from_Status(status, cc_status);
if (!cc_status.ok())
return nullptr;
else
@ -753,10 +756,19 @@ int64_t TF_GetStepId(TF_OpKernelContext* ctx) {
int TF_GetDeviceId(TF_OpKernelContext* ctx) {
// TensorFlow always sets device in OpKernelContext.
auto* device =
reinterpret_cast<::tensorflow::OpKernelContext*>(ctx)->device();
if (!device->parsed_name().has_id) return -1;
return device->parsed_name().id;
const tensorflow::DeviceBase* device_base =
reinterpret_cast<tensorflow::OpKernelContext*>(ctx)->device();
#if defined(IS_MOBILE_PLATFORM) || defined(IS_SLIM_BUILD)
if (!device_base->parsed_name().has_id) return -1;
return device_base->parsed_name().id;
#else
const auto* device = reinterpret_cast<const tensorflow::Device*>(
device_base->UnderlyingDevice());
const tsl::StatusOr<int> id = tsl::GetDeviceIdFromDeviceParsedName(
device->parsed_name(), tensorflow::DeviceType(device->device_type()));
if (!id.ok()) return -1;
return *id;
#endif // defined(IS_MOBILE_PLATFORM) || defined(IS_SLIM_BUILD)
}
TF_StringView TF_GetOpKernelName(TF_OpKernelContext* ctx) {
@ -791,8 +803,6 @@ TF_Tensor* TF_AllocateOutput(TF_OpKernelContext* context, int index,
int num_dims, size_t len, TF_Status* status) {
TF_SetStatus(status, TF_OK, "");
auto* cc_ctx = reinterpret_cast<::tensorflow::OpKernelContext*>(context);
static_assert(sizeof(int64_t) == sizeof(int64_t),
"64-bit int types should match in size");
tensorflow::gtl::ArraySlice<const int64_t> dimarray(
reinterpret_cast<const int64_t*>(dims), num_dims);
tensorflow::Tensor* tensor;
@ -818,8 +828,6 @@ TF_Tensor* TF_ForwardInputOrAllocateOutput(
TF_SetStatus(status, TF_OK, "");
auto* cc_ctx = reinterpret_cast<::tensorflow::OpKernelContext*>(context);
static_assert(sizeof(int64_t) == sizeof(int64_t),
"64-bit int types should match in size");
tensorflow::gtl::ArraySlice<int> input_indices_array(
candidate_input_indices, num_candidate_input_indices);
tensorflow::gtl::ArraySlice<const int64_t> output_dimarray(
@ -847,8 +855,6 @@ TF_Tensor* TF_AllocateTemp(TF_OpKernelContext* context, TF_DataType dtype,
TF_Status* status) {
auto* cc_ctx = reinterpret_cast<::tensorflow::OpKernelContext*>(context);
TF_SetStatus(status, TF_OK, "");
static_assert(sizeof(int64_t) == sizeof(int64_t),
"64-bit int types should match in size");
tensorflow::gtl::ArraySlice<const int64_t> dimarray(
reinterpret_cast<const int64_t*>(dims), num_dims);
if (attributes && !attributes->struct_size) {

View File

@ -19,30 +19,12 @@ limitations under the License.
#include <stdint.h>
#include "tensorflow/c/c_api.h"
#include "tensorflow/c/c_api_macros.h"
#include "tensorflow/c/experimental/stream_executor/stream_executor.h"
#include "tensorflow/c/tf_datatype.h"
#include "tensorflow/c/tf_status.h"
#include "tensorflow/c/tf_tensor.h"
// Macro to control visibility of exported symbols in the shared library (.so,
// .dylib, .dll).
// This duplicates the TF_EXPORT macro definition in
// tensorflow/core/platform/macros.h in order to keep this .h file independent
// of any other includes.
#ifdef SWIG
#define TF_CAPI_EXPORT
#else
#if defined(_WIN32)
#ifdef TF_COMPILE_LIBRARY
#define TF_CAPI_EXPORT __declspec(dllexport)
#else
#define TF_CAPI_EXPORT __declspec(dllimport)
#endif // TF_COMPILE_LIBRARY
#else
#define TF_CAPI_EXPORT __attribute__((visibility("default")))
#endif // _WIN32
#endif // SWIG
#ifdef __cplusplus
extern "C" {
#endif
@ -283,7 +265,11 @@ TF_CAPI_EXPORT extern int64_t TF_GetIterId(TF_OpKernelContext* ctx);
// Returns the Step ID of the given context.
TF_CAPI_EXPORT extern int64_t TF_GetStepId(TF_OpKernelContext* ctx);
// Returns the Device ID of the device that the context possesses.
// Returns the Device ID of the device that the context possesses. Returns the
// PlatformDeviceId if a mapping between between TfDeviceId and PlatformDeviceId
// is set; otherwise returns the id in the device name. Please refer to
// tensorflow/tsl/framework/device_id.h for more details.
// For mobile or slim build, returns the id in the device name.
TF_CAPI_EXPORT extern int TF_GetDeviceId(TF_OpKernelContext* ctx);
// Returns the graph def version of the given context.

View File

@ -262,7 +262,7 @@ void TF_AssignUpdateVariable(TF_OpKernelContext* ctx, int input_index,
Status status =
LookupResource(context, HandleFromInput(context, input_index), &variable);
if (!status.ok()) {
printf("Failed with error: %s\n", status.error_message().c_str());
printf("Failed with error: %s\n", tsl::NullTerminatedMessage(status));
abort();
}
const Tensor& value = context->input(value_index);
@ -475,6 +475,118 @@ static Status ValidateVariantType(const Variant& variant) {
return ::tensorflow::OkStatus();
}
static Status VariantBinaryAddFunc(
::tensorflow::OpKernelContext* cc_ctx, const Variant& a, const Variant& b,
Variant* out,
void (*binary_add_func)(TF_OpKernelContext* ctx, TF_Tensor* a, TF_Tensor* b,
TF_Tensor* out));
static Status CCBinaryAddFunc(
::tensorflow::OpKernelContext* cc_ctx, const Tensor& cc_a,
const Tensor& cc_b, Tensor* cc_out,
void (*binary_add_func)(TF_OpKernelContext* ctx, TF_Tensor* a, TF_Tensor* b,
TF_Tensor* out)) {
if (cc_a.dtype() == ::tensorflow::DT_INVALID) {
*cc_out = cc_b;
return ::tensorflow::OkStatus();
}
if (cc_b.dtype() == ::tensorflow::DT_INVALID) {
*cc_out = cc_a;
return ::tensorflow::OkStatus();
}
Status status;
TF_Tensor* a = TF_TensorFromTensor(cc_a, &status);
TF_RETURN_IF_ERROR(status);
TF_Tensor* b = TF_TensorFromTensor(cc_b, &status);
if (!status.ok()) {
TF_DeleteTensor(a);
return status;
}
::tensorflow::AllocatorAttributes attr;
if (cc_a.dtype() == ::tensorflow::DT_VARIANT) {
attr.set_on_host(true);
}
status = cc_ctx->allocate_temp(cc_a.dtype(), cc_a.shape(), cc_out, attr);
if (!status.ok()) {
TF_DeleteTensor(a);
TF_DeleteTensor(b);
return status;
}
TF_Tensor* out = TF_TensorFromTensor(*cc_out, &status);
if (!status.ok()) {
TF_DeleteTensor(a);
TF_DeleteTensor(b);
return status;
}
auto* ctx = reinterpret_cast<TF_OpKernelContext*>(cc_ctx);
if (cc_a.dtype() == ::tensorflow::DT_VARIANT) {
return VariantBinaryAddFunc(
cc_ctx, cc_a.scalar<Variant>()(), cc_b.scalar<Variant>()(),
cc_out->scalar<Variant>().data(), binary_add_func);
} else {
binary_add_func(ctx, a, b, out);
return cc_ctx->status();
}
};
static Status VariantBinaryAddFunc(
::tensorflow::OpKernelContext* cc_ctx, const Variant& a, const Variant& b,
Variant* out,
void (*binary_add_func)(TF_OpKernelContext* ctx, TF_Tensor* a, TF_Tensor* b,
TF_Tensor* out)) {
auto cc_binary_add = [binary_add_func](::tensorflow::OpKernelContext* cc_ctx,
const Tensor& cc_a, const Tensor& cc_b,
Tensor* cc_out) {
return CCBinaryAddFunc(cc_ctx, cc_a, cc_b, cc_out, binary_add_func);
};
if (out == nullptr) {
return ::tensorflow::errors::Internal(
"The output variant hasn't been initialized");
}
if (a.TypeId() != b.TypeId()) {
return ::tensorflow::errors::Internal(
"BinaryOpVariants: Variants a and b have different "
"type ids. Type names: '",
a.TypeName(), "' vs. '", b.TypeName(), "'");
}
if (a.TypeId() == tensorflow::TypeIndex::Make<::tensorflow::TensorList>()) {
TF_RETURN_IF_ERROR(ValidateVariantType<::tensorflow::TensorList>(a));
*out = ::tensorflow::TensorList();
return ::tensorflow::TensorListBinaryAdd(
cc_ctx, *a.get<::tensorflow::TensorList>(),
*b.get<::tensorflow::TensorList>(),
out->get<::tensorflow::TensorList>(), cc_binary_add);
} else if (a.TypeId() == tensorflow::TypeIndex::Make<
::tensorflow::data::OptionalVariant>()) {
TF_RETURN_IF_ERROR(
ValidateVariantType<::tensorflow::data::OptionalVariant>(a));
*out = ::tensorflow::data::OptionalVariant();
return ::tensorflow::data::OptionalBinaryAdd(
cc_ctx, *a.get<::tensorflow::data::OptionalVariant>(),
*b.get<::tensorflow::data::OptionalVariant>(),
out->get<::tensorflow::data::OptionalVariant>(), cc_binary_add);
}
const std::string type_index_name =
::tensorflow::port::MaybeAbiDemangle(a.TypeId().name());
return ::tensorflow::errors::Internal(
"No unary variant binary_op function found for op ADD Variant "
"type_name: ",
type_index_name, " for device type: ", cc_ctx->device()->name());
}
void TF_AddNVariant(TF_OpKernelContext* ctx,
void (*binary_add_func)(TF_OpKernelContext* ctx,
TF_Tensor* a, TF_Tensor* b,
@ -482,97 +594,11 @@ void TF_AddNVariant(TF_OpKernelContext* ctx,
TF_Status* status) {
auto* cc_ctx = reinterpret_cast<::tensorflow::OpKernelContext*>(ctx);
auto cc_binary_add_func = [binary_add_func](
::tensorflow::OpKernelContext* cc_ctx,
const Tensor& cc_a, const Tensor& cc_b,
Tensor* cc_out) {
if (cc_a.dtype() == ::tensorflow::DT_INVALID) {
*cc_out = cc_b;
return ::tensorflow::OkStatus();
}
if (cc_b.dtype() == ::tensorflow::DT_INVALID) {
*cc_out = cc_a;
return ::tensorflow::OkStatus();
}
Status status;
TF_Tensor* a = TF_TensorFromTensor(cc_a, &status);
TF_RETURN_IF_ERROR(status);
TF_Tensor* b = TF_TensorFromTensor(cc_b, &status);
if (!status.ok()) {
TF_DeleteTensor(a);
return status;
}
::tensorflow::AllocatorAttributes attr;
if (cc_a.dtype() == ::tensorflow::DT_VARIANT) {
attr.set_on_host(true);
}
status = cc_ctx->allocate_temp(cc_a.dtype(), cc_a.shape(), cc_out, attr);
if (!status.ok()) {
TF_DeleteTensor(a);
TF_DeleteTensor(b);
return status;
}
TF_Tensor* out = TF_TensorFromTensor(*cc_out, &status);
if (!status.ok()) {
TF_DeleteTensor(a);
TF_DeleteTensor(b);
return status;
}
auto* ctx = reinterpret_cast<TF_OpKernelContext*>(cc_ctx);
binary_add_func(ctx, a, b, out);
return cc_ctx->status();
};
auto binary_add_variant = [cc_binary_add_func](
::tensorflow::OpKernelContext* cc_ctx,
const Variant& a, const Variant& b,
Variant* out) {
if (out == nullptr) {
return ::tensorflow::errors::Internal(
"The output variant hasn't been initialized");
}
if (a.TypeId() != b.TypeId()) {
return ::tensorflow::errors::Internal(
"BinaryOpVariants: Variants a and b have different "
"type ids. Type names: '",
a.TypeName(), "' vs. '", b.TypeName(), "'");
}
if (a.TypeId() == tensorflow::TypeIndex::Make<::tensorflow::TensorList>()) {
TF_RETURN_IF_ERROR(ValidateVariantType<::tensorflow::TensorList>(a));
*out = ::tensorflow::TensorList();
return ::tensorflow::TensorListBinaryAdd(
cc_ctx, *a.get<::tensorflow::TensorList>(),
*b.get<::tensorflow::TensorList>(),
out->get<::tensorflow::TensorList>(), cc_binary_add_func);
} else if (a.TypeId() == tensorflow::TypeIndex::Make<
::tensorflow::data::OptionalVariant>()) {
TF_RETURN_IF_ERROR(
ValidateVariantType<::tensorflow::data::OptionalVariant>(a));
*out = ::tensorflow::data::OptionalVariant();
return ::tensorflow::data::OptionalBinaryAdd(
cc_ctx, *a.get<::tensorflow::data::OptionalVariant>(),
*b.get<::tensorflow::data::OptionalVariant>(),
out->get<::tensorflow::data::OptionalVariant>(), cc_binary_add_func);
}
const std::string type_index_name =
::tensorflow::port::MaybeAbiDemangle(a.TypeId().name());
return ::tensorflow::errors::Internal(
"No unary variant binary_op function found for op ADD Variant "
"type_name: ",
type_index_name, " for device type: ", cc_ctx->device()->name());
};
auto binary_add_variant =
[binary_add_func](::tensorflow::OpKernelContext* cc_ctx, const Variant& a,
const Variant& b, Variant* out) {
return VariantBinaryAddFunc(cc_ctx, a, b, out, binary_add_func);
};
::tensorflow::AddNVariant(cc_ctx, binary_add_variant);
::tensorflow::Set_TF_Status_from_Status(status, cc_ctx->status());
}

View File

@ -16,6 +16,7 @@ limitations under the License.
#ifndef TENSORFLOW_C_KERNELS_EXPERIMENTAL_H_
#define TENSORFLOW_C_KERNELS_EXPERIMENTAL_H_
#include "tensorflow/c/c_api_macros.h"
#include "tensorflow/c/kernels.h"
// --------------------------------------------------------------------------
@ -24,25 +25,6 @@ limitations under the License.
// The API here is subject to changes in the future.
// --------------------------------------------------------------------------
// Macro to control visibility of exported symbols in the shared library (.so,
// .dylib, .dll).
// This duplicates the TF_EXPORT macro definition in
// tensorflow/core/platform/macros.h in order to keep this .h file independent
// of any other includes.
#ifdef SWIG
#define TF_CAPI_EXPORT
#else
#if defined(_WIN32)
#ifdef TF_COMPILE_LIBRARY
#define TF_CAPI_EXPORT __declspec(dllexport)
#else
#define TF_CAPI_EXPORT __declspec(dllimport)
#endif // TF_COMPILE_LIBRARY
#else
#define TF_CAPI_EXPORT __attribute__((visibility("default")))
#endif // _WIN32
#endif // SWIG
#ifdef __cplusplus
extern "C" {
#endif

View File

@ -73,23 +73,10 @@ limitations under the License.
#include <stdint.h>
#include <stdlib.h>
#include "tensorflow/c/c_api_macros.h"
#include "tensorflow/c/tf_datatype.h"
#include "tensorflow/c/tf_status.h"
#ifdef SWIG
#define TF_CAPI_EXPORT
#else
#if defined(_WIN32)
#ifdef TF_COMPILE_LIBRARY
#define TF_CAPI_EXPORT __declspec(dllexport)
#else
#define TF_CAPI_EXPORT __declspec(dllimport)
#endif // TF_COMPILE_LIBRARY
#else
#define TF_CAPI_EXPORT __attribute__((visibility("default")))
#endif // _WIN32
#endif // SWIG
#ifdef __cplusplus
extern "C" {
#endif

View File

@ -13,7 +13,7 @@ See the License for the specific language governing permissions and
limitations under the License.
==============================================================================*/
#include "tensorflow/python/lib/core/safe_ptr.h"
#include "tensorflow/c/safe_ptr.h"
namespace tensorflow {

View File

@ -13,16 +13,13 @@ See the License for the specific language governing permissions and
limitations under the License.
==============================================================================*/
#ifndef TENSORFLOW_PYTHON_LIB_CORE_SAFE_PTR_H_
#define TENSORFLOW_PYTHON_LIB_CORE_SAFE_PTR_H_
#include <Python.h>
#ifndef TENSORFLOW_C_SAFE_PTR_H_
#define TENSORFLOW_C_SAFE_PTR_H_
#include <memory>
#include "tensorflow/c/c_api.h"
#include "tensorflow/c/eager/c_api.h"
#include "tensorflow/python/lib/core/safe_pyobject_ptr.h"
namespace tensorflow {
namespace detail {
@ -68,4 +65,4 @@ Safe_TF_BufferPtr make_safe(TF_Buffer* buffer);
} // namespace tensorflow
#endif // TENSORFLOW_PYTHON_LIB_CORE_SAFE_PTR_H_
#endif // TENSORFLOW_C_SAFE_PTR_H_

View File

@ -18,24 +18,7 @@ limitations under the License.
#include <stddef.h>
// Macro to control visibility of exported symbols in the shared library (.so,
// .dylib, .dll).
// This duplicates the TF_EXPORT macro definition in
// tensorflow/core/platform/macros.h in order to keep this .h file independent
// of any other includes.
#ifdef SWIG
#define TF_CAPI_EXPORT
#else
#if defined(_WIN32)
#ifdef TF_COMPILE_LIBRARY
#define TF_CAPI_EXPORT __declspec(dllexport)
#else
#define TF_CAPI_EXPORT __declspec(dllimport)
#endif // TF_COMPILE_LIBRARY
#else
#define TF_CAPI_EXPORT __attribute__((visibility("default")))
#endif // _WIN32
#endif // SWIG
#include "tensorflow/c/c_api_macros.h"
#ifdef __cplusplus
extern "C" {

View File

@ -22,11 +22,7 @@ limitations under the License.
#include "tensorflow/core/platform/protobuf.h"
#include "tensorflow/core/platform/status.h"
namespace tsl {
class Status;
}
namespace tensorflow {
using tsl::Status;
Status MessageToBuffer(const tensorflow::protobuf::MessageLite& in,
TF_Buffer* out);

View File

@ -18,24 +18,7 @@ limitations under the License.
#include <stddef.h>
// Macro to control visibility of exported symbols in the shared library (.so,
// .dylib, .dll).
// This duplicates the TF_EXPORT macro definition in
// tensorflow/core/platform/macros.h in order to keep this .h file independent
// of any other includes.
#ifdef SWIG
#define TF_CAPI_EXPORT
#else
#if defined(_WIN32)
#ifdef TF_COMPILE_LIBRARY
#define TF_CAPI_EXPORT __declspec(dllexport)
#else
#define TF_CAPI_EXPORT __declspec(dllimport)
#endif // TF_COMPILE_LIBRARY
#else
#define TF_CAPI_EXPORT __attribute__((visibility("default")))
#endif // _WIN32
#endif // SWIG
#include "tensorflow/c/c_api_macros.h"
#ifdef __cplusplus
extern "C" {

View File

@ -16,22 +16,9 @@ limitations under the License.
#ifndef TENSORFLOW_C_TF_STATUS_H_
#define TENSORFLOW_C_TF_STATUS_H_
#include "tensorflow/c/c_api_macros.h"
#include "tensorflow/tsl/c/tsl_status.h"
#ifdef SWIG
#define TF_CAPI_EXPORT
#else
#if defined(_WIN32)
#ifdef TF_COMPILE_LIBRARY
#define TF_CAPI_EXPORT __declspec(dllexport)
#else
#define TF_CAPI_EXPORT __declspec(dllimport)
#endif // TF_COMPILE_LIBRARY
#else
#define TF_CAPI_EXPORT __attribute__((visibility("default")))
#endif // _WIN32
#endif // SWIG
#ifdef __cplusplus
extern "C" {
#endif

View File

@ -183,7 +183,7 @@ void TF_TensorBitcastFrom(const TF_Tensor* from, TF_DataType type,
*tensorflow::down_cast<const tensorflow::TensorInterface*>(
from->tensor),
static_cast<tensorflow::DataType>(type), new_dims, num_new_dims));
Set_TF_Status_from_Status(status, cc_status);
tsl::Set_TF_Status_from_Status(status, cc_status);
}
namespace tensorflow {

View File

@ -23,25 +23,6 @@ limitations under the License.
#include "tensorflow/c/tf_datatype.h"
#include "tensorflow/c/tf_status.h"
// Macro to control visibility of exported symbols in the shared library (.so,
// .dylib, .dll).
// This duplicates the TF_EXPORT macro definition in
// tensorflow/core/platform/macros.h in order to keep this .h file independent
// of any other includes.
#ifdef SWIG
#define TF_CAPI_EXPORT
#else
#if defined(_WIN32)
#ifdef TF_COMPILE_LIBRARY
#define TF_CAPI_EXPORT __declspec(dllexport)
#else
#define TF_CAPI_EXPORT __declspec(dllimport)
#endif // TF_COMPILE_LIBRARY
#else
#define TF_CAPI_EXPORT __attribute__((visibility("default")))
#endif // _WIN32
#endif // SWIG
#ifdef __cplusplus
extern "C" {
#endif

View File

@ -15,23 +15,10 @@ limitations under the License.
#ifndef TENSORFLOW_C_TF_TSTRING_H_
#define TENSORFLOW_C_TF_TSTRING_H_
#include "tensorflow/c/c_api_macros.h"
#include "tensorflow/c/tf_tensor.h"
#include "tensorflow/core/platform/ctstring.h"
#ifdef SWIG
#define TF_CAPI_EXPORT
#else
#if defined(_WIN32)
#ifdef TF_COMPILE_LIBRARY
#define TF_CAPI_EXPORT __declspec(dllexport)
#else
#define TF_CAPI_EXPORT __declspec(dllimport)
#endif // TF_COMPILE_LIBRARY
#else
#define TF_CAPI_EXPORT __attribute__((visibility("default")))
#endif // _WIN32
#endif // SWIG
#ifdef __cplusplus
extern "C" {
#endif

View File

@ -108,10 +108,14 @@ Status ClientSession::Run(const RunOptions& run_options, const FeedType& inputs,
std::vector<Tensor>* outputs,
RunMetadata* run_metadata) const {
std::vector<std::pair<string, Tensor>> feeds;
feeds.reserve(inputs.size());
for (auto const& feed : inputs) {
TF_RETURN_IF_ERROR(feed.second.status);
feeds.emplace_back(feed.first.name(), feed.second.tensor);
feeds.emplace_back(std::piecewise_construct,
std::forward_as_tuple(feed.first.name()),
std::forward_as_tuple(feed.second.tensor));
}
std::vector<string> output_tensor_names;
output_tensor_names.reserve(fetch_outputs.size());
for (auto const& output : fetch_outputs) {

View File

@ -50,7 +50,7 @@ class FunctionTest
impl::TaggedValueTensor CreateScalarTensor(T val) {
AbstractTensorHandle* raw = nullptr;
Status s = TestScalarTensorHandle<T, datatype>(ctx_.get(), val, &raw);
CHECK_EQ(tensorflow::errors::OK, s.code()) << s.error_message();
CHECK_EQ(tensorflow::errors::OK, s.code()) << s.message();
return impl::TaggedValueTensor(raw, /*add_ref=*/false);
}
@ -64,12 +64,12 @@ class FunctionTest
TF_StatusPtr status(TF_NewStatus());
TF_SetTracingImplementation(std::get<0>(GetParam()), status.get());
Status s = tensorflow::StatusFromTF_Status(status.get());
CHECK_EQ(tensorflow::errors::OK, s.code()) << s.error_message();
CHECK_EQ(tensorflow::errors::OK, s.code()) << s.message();
// Set the runtime impl, Core RT vs TFRT.
AbstractContext* ctx_raw = nullptr;
s = BuildImmediateExecutionContext(UseTfrt(), &ctx_raw);
CHECK_EQ(tensorflow::errors::OK, s.code()) << s.error_message();
CHECK_EQ(tensorflow::errors::OK, s.code()) << s.message();
ctx_.reset(ctx_raw);
}
};
@ -139,7 +139,7 @@ template <typename T>
void ExpectEquals(AbstractTensorHandle* t, T expected) {
TF_Tensor* result_t;
Status s = tensorflow::GetValue(t, &result_t);
ASSERT_TRUE(s.ok()) << s.error_message();
ASSERT_TRUE(s.ok()) << s.message();
auto value = static_cast<T*>(TF_TensorData(result_t));
EXPECT_EQ(*value, expected);
TF_DeleteTensor(result_t);
@ -156,10 +156,10 @@ TEST_P(FunctionTest, Square) {
PartialTensorShape unknown_shape;
TaggedValue signature(unknown_shape, DT_FLOAT);
Status s = tf_function.RegisterTrace(std::move(trace), signature, signature);
ASSERT_TRUE(s.ok()) << s.error_message();
ASSERT_TRUE(s.ok()) << s.message();
TaggedValue args(std::move(x));
StatusOr<TaggedValue> v = tf_function.Execute(ctx_.get(), args);
ASSERT_TRUE(v.ok()) << v.status().error_message();
ASSERT_TRUE(v.ok()) << v.status().message();
const TaggedValue& result = v.value();
AbstractTensorHandle* t = result.tensor().get();
ExpectEquals(t, 4.0f);
@ -178,12 +178,12 @@ TEST_P(FunctionTest, Add) {
input_signature.tuple().emplace_back(tensor_spec);
Status s =
tf_function.RegisterTrace(std::move(trace), input_signature, tensor_spec);
ASSERT_TRUE(s.ok()) << s.error_message();
ASSERT_TRUE(s.ok()) << s.message();
TaggedValue args = TaggedValue::Tuple();
args.tuple().emplace_back(TaggedValue(x));
args.tuple().emplace_back(TaggedValue(x));
StatusOr<TaggedValue> v = tf_function.Execute(ctx_.get(), args);
ASSERT_TRUE(v.ok()) << v.status().error_message();
ASSERT_TRUE(v.ok()) << v.status().message();
const TaggedValue& result = v.value();
ExpectEquals(result.tensor().get(), 4.0f);
}
@ -200,12 +200,12 @@ TEST_P(FunctionTest, IdentityN) {
signature.tuple().emplace_back(tensor_spec);
signature.tuple().emplace_back(tensor_spec);
Status s = tf_function.RegisterTrace(std::move(trace), signature, signature);
ASSERT_TRUE(s.ok()) << s.error_message();
ASSERT_TRUE(s.ok()) << s.message();
TaggedValue args = TaggedValue::Tuple();
args.tuple().emplace_back(TaggedValue(x));
args.tuple().emplace_back(TaggedValue(y));
StatusOr<TaggedValue> v = tf_function.Execute(ctx_.get(), args);
ASSERT_TRUE(v.ok()) << v.status().error_message();
ASSERT_TRUE(v.ok()) << v.status().message();
const TaggedValue& result = v.value();
ExpectEquals(result.tuple()[0].tensor().get(), 2.0f);
ExpectEquals(result.tuple()[1].tensor().get(), 4.0f);
@ -220,13 +220,13 @@ TEST_P(FunctionTest, UnaryFuncCalledWithMultipleArgsFails) {
PartialTensorShape unknown_shape;
TaggedValue signature(unknown_shape, DT_FLOAT);
Status s = tf_function.RegisterTrace(std::move(trace), signature, signature);
ASSERT_TRUE(s.ok()) << s.error_message();
ASSERT_TRUE(s.ok()) << s.message();
TaggedValue args = TaggedValue::Tuple();
args.tuple().emplace_back(TaggedValue(x));
args.tuple().emplace_back(TaggedValue(x));
StatusOr<TaggedValue> v = tf_function.Execute(ctx_.get(), args);
ASSERT_TRUE(tensorflow::errors::IsInvalidArgument(v.status()));
ASSERT_TRUE(absl::StrContains(v.status().error_message(), "No match"));
ASSERT_TRUE(absl::StrContains(v.status().message(), "No match"));
}
TEST_P(FunctionTest, IncorrectArityOfOutputSignatureFails) {
@ -248,13 +248,13 @@ TEST_P(FunctionTest, IncorrectArityOfOutputSignatureFails) {
TaggedValue output_signature(unknown_shape, DT_FLOAT);
Status s = tf_function.RegisterTrace(std::move(trace), input_signature,
output_signature);
ASSERT_TRUE(s.ok()) << s.error_message();
ASSERT_TRUE(s.ok()) << s.message();
TaggedValue args = TaggedValue::Tuple();
args.tuple().emplace_back(TaggedValue(x));
args.tuple().emplace_back(TaggedValue(y));
StatusOr<TaggedValue> v = tf_function.Execute(ctx_.get(), args);
ASSERT_TRUE(tensorflow::errors::IsInvalidArgument(v.status())) << v.status();
ASSERT_TRUE(absl::StrContains(v.status().error_message(),
ASSERT_TRUE(absl::StrContains(v.status().message(),
"Expecting 2 outputs, but *num_retvals is 1"));
}
@ -273,15 +273,15 @@ TEST_P(FunctionTest, IncorrectDtypeInOutputSignatureFails) {
TaggedValue output_tensor_spec(unknown_shape, tensorflow::DT_INT64);
Status s = tf_function.RegisterTrace(std::move(trace), input_signature,
output_tensor_spec);
ASSERT_TRUE(s.ok()) << s.error_message();
ASSERT_TRUE(s.ok()) << s.message();
TaggedValue args = TaggedValue::Tuple();
args.tuple().emplace_back(TaggedValue(x));
args.tuple().emplace_back(TaggedValue(x));
StatusOr<TaggedValue> v = tf_function.Execute(ctx_.get(), args);
ASSERT_TRUE(tensorflow::errors::IsInternal(v.status())) << v.status();
ASSERT_TRUE(absl::StrContains(v.status().error_message(),
"Shape and dtype of tensor"));
ASSERT_TRUE(absl::StrContains(v.status().error_message(),
ASSERT_TRUE(
absl::StrContains(v.status().message(), "Shape and dtype of tensor"));
ASSERT_TRUE(absl::StrContains(v.status().message(),
"does not match that in signature"));
}

View File

@ -43,7 +43,7 @@ class UnifiedCAPI
TF_StatusPtr status(TF_NewStatus());
TF_SetTracingImplementation(std::get<0>(GetParam()), status.get());
Status s = tensorflow::StatusFromTF_Status(status.get());
CHECK_EQ(tensorflow::errors::OK, s.code()) << s.error_message();
CHECK_EQ(tensorflow::errors::OK, s.code()) << s.message();
}
};
@ -52,7 +52,7 @@ template <class T>
TaggedValue MakeContext(T runtime) {
AbstractContext* ctx_raw = nullptr;
Status s = BuildImmediateExecutionContext(runtime, &ctx_raw);
// ASSERT_EQ(tensorflow::errors::OK, s.code()) << s.error_message();
// ASSERT_EQ(tensorflow::errors::OK, s.code()) << s.message();
return TaggedValue::Capsule(static_cast<void*>(ctx_raw), [](void* p) {
tensorflow::internal::AbstractContextDeleter()(
static_cast<AbstractContext*>(p));
@ -67,7 +67,7 @@ TEST_P(UnifiedCAPI, HoldTensors) {
AbstractContext* ctx_raw = nullptr;
Status s =
BuildImmediateExecutionContext(std::get<1>(GetParam()), &ctx_raw);
ASSERT_EQ(tensorflow::errors::OK, s.code()) << s.error_message();
ASSERT_EQ(tensorflow::errors::OK, s.code()) << s.message();
ctx.reset(ctx_raw);
}
@ -76,7 +76,7 @@ TEST_P(UnifiedCAPI, HoldTensors) {
{
AbstractTensorHandle* x_raw = nullptr;
Status s = TestScalarTensorHandle<float, TF_FLOAT>(ctx.get(), 2.0f, &x_raw);
ASSERT_EQ(tensorflow::errors::OK, s.code()) << s.error_message();
ASSERT_EQ(tensorflow::errors::OK, s.code()) << s.message();
x.reset(x_raw, false);
}
// Manually copy pointer so we can later compare the reference count.

View File

@ -48,7 +48,7 @@ class VariableTest
impl::TaggedValueTensor CreateScalarTensor(T val) {
AbstractTensorHandle* raw = nullptr;
Status s = TestScalarTensorHandle<T, datatype>(ctx_.get(), val, &raw);
CHECK_EQ(tensorflow::errors::OK, s.code()) << s.error_message();
CHECK_EQ(tensorflow::errors::OK, s.code()) << s.message();
return impl::TaggedValueTensor(raw, /*add_ref=*/false);
}
@ -62,12 +62,12 @@ class VariableTest
TF_StatusPtr status(TF_NewStatus());
TF_SetTracingImplementation(std::get<0>(GetParam()), status.get());
Status s = tensorflow::StatusFromTF_Status(status.get());
CHECK_EQ(tensorflow::errors::OK, s.code()) << s.error_message();
CHECK_EQ(tensorflow::errors::OK, s.code()) << s.message();
// Set the runtime impl, Core RT vs TFRT.
AbstractContext* ctx_raw = nullptr;
s = BuildImmediateExecutionContext(UseTfrt(), &ctx_raw);
CHECK_EQ(tensorflow::errors::OK, s.code()) << s.error_message();
CHECK_EQ(tensorflow::errors::OK, s.code()) << s.message();
ctx_.reset(ctx_raw);
}
};
@ -76,7 +76,7 @@ template <typename T>
void ExpectEquals(AbstractTensorHandle* t, T expected) {
TF_Tensor* result_t;
Status s = tensorflow::GetValue(t, &result_t);
ASSERT_TRUE(s.ok()) << s.error_message();
ASSERT_TRUE(s.ok()) << s.message();
auto value = static_cast<T*>(TF_TensorData(result_t));
EXPECT_EQ(*value, expected);
TF_DeleteTensor(result_t);
@ -89,7 +89,7 @@ TEST_P(VariableTest, CreateAssignReadDestroy) {
AbstractTensorHandle* var_ptr = nullptr;
PartialTensorShape scalar_shape;
TF_EXPECT_OK(
PartialTensorShape::MakePartialShape<int32>({}, 0, &scalar_shape));
PartialTensorShape::MakePartialShape<int32_t>({}, 0, &scalar_shape));
TF_EXPECT_OK(tensorflow::ops::VarHandleOp(ctx_.get(), &var_ptr, DT_FLOAT,
scalar_shape));
var.reset(var_ptr);

View File

@ -13,6 +13,8 @@ See the License for the specific language governing permissions and
limitations under the License.
==============================================================================*/
#include <string>
#include "tensorflow/cc/client/client_session.h"
#include "tensorflow/cc/framework/testutil.h"
#include "tensorflow/cc/ops/standard_ops.h"
@ -241,7 +243,7 @@ TEST(CCOpTest, InvalidFinalize) {
ops::ReaderReadUpTo(root, Variable(root, {}, DT_STRING),
Variable(root, {}, DT_STRING), static_cast<int32>(2));
EXPECT_FALSE(root.status().ok());
auto err_msg = root.status().error_message();
auto err_msg = std::string(root.status().message());
EXPECT_NE(err_msg.find("'num_records' passed int32 expected int64"),
string::npos);
}

View File

@ -459,7 +459,7 @@ TEST_F(GradientsTest, UnreachableInput) {
Status status =
AddSymbolicGradients(scope_test_, {m1}, {z}, {dm1}, &grad_outputs);
EXPECT_EQ(status.code(), error::INVALID_ARGUMENT);
EXPECT_EQ(status.error_message(),
EXPECT_EQ(status.message(),
"Cannot compute the partial derivative"
" for node 'z' as it's unreachable from the output node(s).");
}

View File

@ -42,7 +42,7 @@ class WhileLoopTest : public ::testing::Test {
Status s =
ops::BuildWhileLoop(scope_, inputs_, cond, body, kFrameName, &outputs_);
EXPECT_EQ(s.code(), error_code);
EXPECT_EQ(s.error_message(), error_msg);
EXPECT_EQ(s.message(), error_msg);
}
template <typename T>

View File

@ -26,7 +26,9 @@ package(
licenses = ["notice"],
)
exports_files(["loader.h"])
exports_files([
"loader.h",
])
cc_library(
name = "constants",
@ -58,9 +60,9 @@ cc_library(
hdrs = ["reader.h"],
deps = [
":constants",
"//tensorflow/core:protos_all_cc",
":metrics",
":util",
"//tensorflow/core:protos_all_cc",
] + if_not_mobile([
# TODO(b/111634734): :lib and :protos_all contain dependencies that
# cannot be built on mobile platforms. Instead, include the appropriate
@ -158,6 +160,7 @@ cc_library(
"//tensorflow/core/util/tensor_bundle",
"//tensorflow/core/util/tensor_bundle:byteswaptensor",
"@com_google_absl//absl/container:flat_hash_set",
"@jsoncpp_git//:jsoncpp",
],
)
@ -186,6 +189,11 @@ tf_cc_test(
"//tensorflow/core:test",
"//tensorflow/core:test_main",
"//tensorflow/core/platform:test",
"//tensorflow/core/protobuf:for_core_protos_cc",
"@com_google_absl//absl/status",
"@com_google_absl//absl/status:statusor",
"@com_google_absl//absl/strings",
"@jsoncpp_git//:jsoncpp",
],
)
@ -331,7 +339,12 @@ cc_library(
"//tensorflow/python:__pkg__",
"//tensorflow/security/fuzzing/cc/ops:__pkg__", # TODO(b/261455394): Remove.
],
deps = if_not_mobile(["//tensorflow/core:lib"]) + if_android(["//tensorflow/core:portable_tensorflow_lib_lite"]),
deps = [
"//tensorflow/core:protos_all_cc",
"@com_google_absl//absl/status",
"@com_google_absl//absl/status:statusor",
"@jsoncpp_git//:jsoncpp",
] + if_not_mobile(["//tensorflow/core:lib"]) + if_android(["//tensorflow/core:portable_tensorflow_lib_lite"]),
alwayslink = True,
)
@ -341,7 +354,11 @@ cc_library(
visibility = ["//tensorflow/python/saved_model:__subpackages__"],
deps = if_static([
":metrics_impl",
]) + if_not_mobile(["//tensorflow/core:lib"]) + if_android(["//tensorflow/core:portable_tensorflow_lib_lite"]),
]) + if_not_mobile(["//tensorflow/core:lib"]) + if_android(["//tensorflow/core:portable_tensorflow_lib_lite"]) + [
"//tensorflow/core/protobuf:for_core_protos_cc",
"@com_google_absl//absl/status",
"@com_google_absl//absl/status:statusor",
],
)
tf_cc_test(
@ -350,9 +367,10 @@ tf_cc_test(
srcs = ["metrics_test.cc"],
deps = [
":metrics",
"//tensorflow/core:lib",
"//tensorflow/core:test",
"//tensorflow/core:test_main",
"@com_google_googletest//:gtest_main",
"@jsoncpp_git//:jsoncpp",
],
)
@ -392,14 +410,14 @@ cc_library(
],
deps = [
":constants",
"//tensorflow/core:protos_all_cc",
"//tensorflow/core/graph/regularization:simple_delete",
"//tensorflow/core/graph/regularization:util",
"//tensorflow/core:protos_all_cc",
"//tensorflow/core/util/tensor_bundle:naming",
"//tensorflow/tsl/platform:types",
"@com_google_protobuf//:protobuf_headers",
"@com_google_absl//absl/container:btree",
"@com_google_absl//absl/strings",
"@com_google_protobuf//:protobuf_headers",
] + if_not_mobile(["//tensorflow/core:lib"]) + if_android(["//tensorflow/core:portable_tensorflow_lib_lite"]),
alwayslink = True,
)
@ -407,7 +425,12 @@ cc_library(
cc_library(
name = "fingerprinting",
hdrs = ["fingerprinting.h"],
visibility = ["//tensorflow/python/saved_model:__subpackages__"],
visibility = [
"//learning/brain/contrib/hub/server/distro:__subpackages__",
"//learning/brain/contrib/tpu_modeling:__subpackages__",
"//learning/tfx/pipeline/util:__subpackages__",
"//tensorflow/python/saved_model:__subpackages__",
],
deps = if_static([
":fingerprinting_impl",
"@com_google_absl//absl/strings",

View File

@ -73,8 +73,8 @@ Status ReadSavedModelProto(const string& export_dir,
Status err;
if (found_pb.code() == found_pbtxt.code()) {
err = Status(found_pb.code(), StrCat(found_pb.error_message(), "\n",
found_pbtxt.error_message()));
err = Status(found_pb.code(),
StrCat(found_pb.message(), "\n", found_pbtxt.message()));
} else if (found_pb.code() == NOT_FOUND) {
err = found_pbtxt;
} else if (found_pbtxt.code() == NOT_FOUND) {
@ -171,11 +171,17 @@ Status SavedModelV2Bundle::Load(const std::string& export_dir,
// Read the fingerprint.
auto fingerprint_proto =
saved_model::fingerprinting::ReadSavedModelFingerprint(export_dir);
std::string singleprint = "";
if (fingerprint_proto.ok()) {
// Set gauge cell with saved_model_checksum.
metrics::SavedModelReadFingerprint().Set(
std::to_string(fingerprint_proto->saved_model_checksum()));
metrics::MakeFingerprintJson(fingerprint_proto.value()));
singleprint =
saved_model::fingerprinting::Singleprint(fingerprint_proto.value());
}
metrics::SavedModelReadPathAndSingleprint().Set(
metrics::MakeSavedModelPathAndSingleprint(export_dir, singleprint));
return OkStatus();
}

View File

@ -25,8 +25,8 @@ limitations under the License.
#include <string>
#include "absl/container/flat_hash_set.h"
#include "tensorflow/core/framework/graph_debug_info.pb.h"
#include "tensorflow/core/lib/core/status.h"
#include "tensorflow/core/protobuf/graph_debug_info.pb.h"
#include "tensorflow/core/protobuf/meta_graph.pb.h"
#include "tensorflow/core/protobuf/saved_object_graph.pb.h"
#include "tensorflow/core/protobuf/trackable_object_graph.pb.h"

View File

@ -17,19 +17,25 @@ limitations under the License.
#include <string>
#include <tuple>
#include <utility>
#include <vector>
#include "absl/status/statusor.h"
#include "absl/strings/str_cat.h"
#include "json/json.h"
#include "json/reader.h"
#include "json/value.h"
#include "tensorflow/cc/saved_model/metrics.h"
#include "tensorflow/core/lib/core/status_test_util.h"
#include "tensorflow/core/platform/path.h"
#include "tensorflow/core/platform/test.h"
#include "tensorflow/core/protobuf/fingerprint.pb.h"
#include "tensorflow/tsl/lib/core/status_test_util.h"
namespace tensorflow {
namespace {
constexpr char kTestData[] = "cc/saved_model/testdata";
// This is the value in testdata/VarsAndArithmeticObjectGraph/fingerprint.pb
constexpr char kV2ModuleSavedModelChecksum[] = "15788619162413586750";
class BundleV2Test : public ::testing::Test {
protected:
@ -116,10 +122,33 @@ TEST_F(BundleV2Test, UpdatesMetrics) {
EXPECT_EQ(metrics::SavedModelReadCount("2").value(), read_count + 1);
EXPECT_EQ(metrics::SavedModelReadApi(kCCLoadBundleV2Label).value(),
api_count + 1);
// Check that the gauge contains the fingerprint.
EXPECT_EQ(metrics::SavedModelReadFingerprint().value(),
kV2ModuleSavedModelChecksum);
// Check that the gauge contains the path and fingerprint.
EXPECT_EQ(metrics::SavedModelReadPath().value(), export_dir);
Json::Value fingerprint = Json::objectValue;
Json::Reader reader = Json::Reader();
reader.parse(metrics::SavedModelReadFingerprint().value(), fingerprint);
EXPECT_EQ(fingerprint["saved_model_checksum"].asUInt64(),
15788619162413586750ULL);
EXPECT_EQ(fingerprint["graph_def_program_hash"].asUInt64(),
706963557435316516ULL);
EXPECT_EQ(fingerprint["signature_def_hash"].asUInt64(),
5693392539583495303ULL);
EXPECT_EQ(fingerprint["saved_object_graph_hash"].asUInt64(),
12074714563970609759ULL);
EXPECT_EQ(fingerprint["checkpoint_hash"].asUInt64(), 10788359570789890102ULL);
// TODO(adamcogdell): add ASSERT_OK_AND_ASSIGN here after migrating
// cc/saved_model code from the tsl version of StatusOr to absl::StatusOr
auto [path, singleprint] = metrics::ParseSavedModelPathAndSingleprint(
metrics::SavedModelReadPathAndSingleprint().value());
EXPECT_TRUE(absl::StrContains(
path, absl::StrCat(kTestData, "/VarsAndArithmeticObjectGraph")));
EXPECT_EQ(singleprint,
"706963557435316516/" // graph_def_program_hash
"5693392539583495303/" // signature_def_hash
"12074714563970609759/" // saved_object_graph_hash
"10788359570789890102"); // checkpoint_hash
}
} // namespace

View File

@ -152,16 +152,14 @@ StatusOr<FingerprintDef> ReadSavedModelFingerprint(
const string fingerprint_pb_path =
io::JoinPath(export_dir, kFingerprintFilenamePb);
Status found_pb = Env::Default()->FileExists(fingerprint_pb_path);
if (found_pb.ok()) {
FingerprintDef fingerprint_proto;
Status result = ReadBinaryProto(Env::Default(), fingerprint_pb_path,
&fingerprint_proto);
if (result.ok()) {
return fingerprint_proto;
}
return result;
}
return found_pb;
if (!found_pb.ok()) return found_pb;
FingerprintDef fingerprint_proto;
Status result =
ReadBinaryProto(Env::Default(), fingerprint_pb_path, &fingerprint_proto);
if (!result.ok()) return result;
return fingerprint_proto;
}
std::string Singleprint(uint64 graph_def_program_hash,

View File

@ -136,7 +136,8 @@ TEST(FingerprintingTest, TestReadValidFingerprint) {
TEST(FingerprintingTest, TestReadNonexistentFingerprint) {
const std::string export_dir = io::JoinPath(
testing::TensorFlowSrcRoot(), "cc/saved_model/testdata", "AssetModule");
EXPECT_FALSE(ReadSavedModelFingerprint(export_dir).ok());
EXPECT_EQ(ReadSavedModelFingerprint(export_dir).status().code(),
absl::StatusCode::kNotFound);
}
TEST(FingerprintingTest, TestSingleprint) {

View File

@ -26,6 +26,7 @@ limitations under the License.
#include "tensorflow/cc/saved_model/util.h"
#include "tensorflow/core/framework/attr_value.pb.h"
#include "tensorflow/core/framework/function.pb.h"
#include "tensorflow/core/framework/graph_debug_info.pb.h"
#include "tensorflow/core/framework/node_def.pb.h"
#include "tensorflow/core/framework/op_def.pb.h"
#include "tensorflow/core/framework/tensor.pb.h"
@ -38,7 +39,6 @@ limitations under the License.
#include "tensorflow/core/platform/errors.h"
#include "tensorflow/core/platform/file_system_helper.h"
#include "tensorflow/core/platform/statusor.h"
#include "tensorflow/core/protobuf/graph_debug_info.pb.h"
#include "tensorflow/core/protobuf/meta_graph.pb.h"
#include "tensorflow/core/protobuf/saver.pb.h"
#include "tensorflow/core/public/session.h"

View File

@ -21,8 +21,8 @@ limitations under the License.
#include <string>
#include <unordered_set>
#include "tensorflow/core/framework/graph_debug_info.pb.h"
#include "tensorflow/core/lib/core/status.h"
#include "tensorflow/core/protobuf/graph_debug_info.pb.h"
#include "tensorflow/core/protobuf/meta_graph.pb.h"
#include "tensorflow/core/public/session.h"

View File

@ -16,10 +16,15 @@ limitations under the License.
#include "tensorflow/cc/saved_model/metrics.h"
#include <string>
#include <utility>
#include "json/config.h"
#include "json/json.h"
#include "json/writer.h"
#include "tensorflow/core/lib/monitoring/counter.h"
#include "tensorflow/core/lib/monitoring/gauge.h"
#include "tensorflow/core/lib/monitoring/sampler.h"
#include "tensorflow/core/protobuf/fingerprint.pb.h"
namespace tensorflow {
namespace metrics {
@ -64,6 +69,17 @@ auto* saved_model_write_path = monitoring::Gauge<string, 0>::New(
"/tensorflow/core/saved_model/write/path",
"The path (saved_model_path) of the exported SavedModel.");
// Gauge that contains the path (saved_model_path) and the singleprint
// (concatenation of graph_def_program_hash, signature_def_hash,
// saved_object_graph_hash, and checkpoint_hash) of the newly written
// SavedModel.
auto* saved_model_write_path_and_singleprint =
monitoring::Gauge<string, 0>::New(
"/tensorflow/core/saved_model/write/path_and_singleprint",
"The path (saved_model_path) and singleprint (concatenation of "
"graph_def_program_hash, signature_def_hash, saved_object_graph_hash, "
"and checkpoint_hash) of the newly written SavedModel.");
// Gauge that contains the fingerprint (saved_model_checksum) of the loaded
// SavedModel.
auto* saved_model_read_fingerprint = monitoring::Gauge<string, 0>::New(
@ -75,6 +91,15 @@ auto* saved_model_read_path = monitoring::Gauge<string, 0>::New(
"/tensorflow/core/saved_model/read/path",
"The path (saved_model_path) of the loaded SavedModel.");
// Gauge that contains the path (saved_model_path) and the singleprint
// (concatenation of graph_def_program_hash, signature_def_hash,
// saved_object_graph_hash, and checkpoint_hash) of the loaded SavedModel.
auto* saved_model_read_path_and_singleprint = monitoring::Gauge<string, 0>::New(
"/tensorflow/core/saved_model/read/path_and_singleprint",
"The path (saved_model_path) and singleprint (concatenation of "
"graph_def_program_hash, signature_def_hash, saved_object_graph_hash, "
"and checkpoint_hash) of the loaded SavedModel.");
// Distribution of checkpoint write durations.
auto* checkpoint_write_durations = monitoring::Sampler<1>::New(
{
@ -153,6 +178,10 @@ monitoring::GaugeCell<string>& SavedModelReadPath() {
return *saved_model_read_path->GetCell();
}
monitoring::GaugeCell<string>& SavedModelReadPathAndSingleprint() {
return *saved_model_read_path_and_singleprint->GetCell();
}
monitoring::GaugeCell<string>& SavedModelWriteFingerprint() {
return *saved_model_write_fingerprint->GetCell();
}
@ -161,6 +190,41 @@ monitoring::GaugeCell<string>& SavedModelWritePath() {
return *saved_model_write_path->GetCell();
}
monitoring::GaugeCell<string>& SavedModelWritePathAndSingleprint() {
return *saved_model_write_path_and_singleprint->GetCell();
}
string MakeFingerprintJson(FingerprintDef fingerprint_serialized) {
Json::Value fingerprint = Json::objectValue;
fingerprint["saved_model_checksum"] =
Json::UInt64(fingerprint_serialized.saved_model_checksum());
fingerprint["graph_def_program_hash"] =
Json::UInt64(fingerprint_serialized.graph_def_program_hash());
fingerprint["signature_def_hash"] =
Json::UInt64(fingerprint_serialized.signature_def_hash());
fingerprint["saved_object_graph_hash"] =
Json::UInt64(fingerprint_serialized.saved_object_graph_hash());
fingerprint["checkpoint_hash"] =
Json::UInt64(fingerprint_serialized.checkpoint_hash());
Json::StreamWriterBuilder json_factory;
return Json::writeString(json_factory, fingerprint);
}
string MakeSavedModelPathAndSingleprint(string path, string singleprint) {
return absl::StrCat(path, ":", singleprint);
}
std::pair<string, string> ParseSavedModelPathAndSingleprint(
string path_and_singleprint) {
size_t delimiter = path_and_singleprint.rfind(':');
if (delimiter == std::string::npos) {
return std::pair<string, string>("", "");
}
return std::pair<string, string>(path_and_singleprint.substr(0, delimiter),
path_and_singleprint.substr(delimiter + 1));
}
monitoring::SamplerCell& CheckpointReadDuration(absl::string_view api_label) {
return *checkpoint_read_durations->GetCell(std::string(api_label));
}

View File

@ -20,11 +20,13 @@ limitations under the License.
#ifndef TENSORFLOW_CC_SAVED_MODEL_METRICS_H_
#define TENSORFLOW_CC_SAVED_MODEL_METRICS_H_
#include <string>
#include <utility>
#include "absl/status/status.h"
#include "tensorflow/core/lib/monitoring/counter.h"
#include "tensorflow/core/lib/monitoring/gauge.h"
#include "tensorflow/core/lib/monitoring/sampler.h"
#include "tensorflow/core/protobuf/fingerprint.pb.h"
namespace tensorflow {
namespace metrics {
@ -49,6 +51,12 @@ monitoring::GaugeCell<string>& SavedModelWriteFingerprint();
// the saved_model_path of the SM when it is exported.
monitoring::GaugeCell<string>& SavedModelWritePath();
// Returns "/tensorflow/core/saved_model/write/path_and_fingerprint" cell, which
// contains the path (saved_model_path) and fingerprint (concatenation of
// graph_def_program_hash, signature_def_hash, saved_object_graph_hash,
// and checkpoint_hash) of the SavedModel when it is exported.
monitoring::GaugeCell<string>& SavedModelWritePathAndSingleprint();
// Returns "/tensorflow/core/saved_model/read/fingerprint" cell, wich contains
// the saved_model_checksum of the SM's fingerprint when it is imported.
monitoring::GaugeCell<string>& SavedModelReadFingerprint();
@ -57,6 +65,24 @@ monitoring::GaugeCell<string>& SavedModelReadFingerprint();
// the saved_model_path of the SM when it is imported.
monitoring::GaugeCell<string>& SavedModelReadPath();
// Returns "/tensorflow/core/saved_model/read/path_and_fingerprint" cell, which
// contains the path (saved_model_path) and singleprint (concatenation of
// graph_def_program_hash, signature_def_hash, saved_object_graph_hash,
// and checkpoint_hash) of the SavedModel when it is imported.
monitoring::GaugeCell<string>& SavedModelReadPathAndSingleprint();
// Returns the fingerprint as a Json string.
string MakeFingerprintJson(FingerprintDef fingerprint_serialized);
// Returns canonical string concatenation of path and singleprint.
string MakeSavedModelPathAndSingleprint(string path, string singleprint);
// TODO(adamcogdell): change to StatusOr<> to account for missing delimiter
// Returns path and singleprint as a pair, parsed canonically from the string
// metric.
std::pair<string, string> ParseSavedModelPathAndSingleprint(
string path_and_singleprint);
// Returns "/tensorflow/core/saved_model/write/api" cell. This metric has 1
// field "api_label" which corresponds to a SavedModel write API. The cell for
// `foo` should be incremented when the write API `foo` is called.

View File

@ -15,6 +15,10 @@ limitations under the License.
#include "tensorflow/cc/saved_model/metrics.h"
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "json/json.h"
#include "json/reader.h"
#include "tensorflow/core/platform/test.h"
namespace tensorflow {
@ -89,6 +93,17 @@ TEST(MetricsTest, TestWritePath) {
EXPECT_EQ(SavedModelWritePath().value(), "bar");
}
TEST(MetricsTest, TestWritePathAndSingleprint) {
EXPECT_EQ(SavedModelWritePathAndSingleprint().value(), "");
SavedModelWritePathAndSingleprint().Set("foo");
EXPECT_EQ(SavedModelWritePathAndSingleprint().value(), "foo");
SavedModelWritePathAndSingleprint().Set("bar");
EXPECT_EQ(SavedModelWritePathAndSingleprint().value(), "bar");
EXPECT_EQ(MakeSavedModelPathAndSingleprint("path", "singleprint"),
"path:singleprint");
}
TEST(MetricsTest, TestReadFingerprint) {
EXPECT_EQ(SavedModelReadFingerprint().value(), "");
SavedModelReadFingerprint().Set("foo");
@ -105,5 +120,44 @@ TEST(MetricsTest, TestReadPath) {
EXPECT_EQ(SavedModelReadPath().value(), "bar");
}
TEST(MetricsTest, TestReadPathAndSingleprint) {
EXPECT_EQ(SavedModelReadPathAndSingleprint().value(), "");
SavedModelReadPathAndSingleprint().Set("foo");
EXPECT_EQ(SavedModelReadPathAndSingleprint().value(), "foo");
SavedModelReadPathAndSingleprint().Set("bar");
EXPECT_EQ(SavedModelReadPathAndSingleprint().value(), "bar");
auto [path, singleprint] =
ParseSavedModelPathAndSingleprint("path/model:name:singleprint");
EXPECT_EQ(path, "path/model:name");
EXPECT_EQ(singleprint, "singleprint");
}
TEST(MetricsTest, TestMakeFingerprintJson) {
FingerprintDef fingerprint;
fingerprint.set_saved_model_checksum(1);
fingerprint.set_graph_def_program_hash(2);
fingerprint.set_signature_def_hash(3);
fingerprint.set_saved_object_graph_hash(4);
fingerprint.set_checkpoint_hash(5);
string serialized_fingerprint_json = MakeFingerprintJson(fingerprint);
EXPECT_EQ(
serialized_fingerprint_json,
"{\n\t\"checkpoint_hash\" : 5,\n\t\"graph_def_program_hash\" : "
"2,\n\t\"saved_model_checksum\" : 1,\n\t\"saved_object_graph_hash\" : "
"4,\n\t\"signature_def_hash\" : 3\n}");
Json::Value fingerprint_json = Json::objectValue;
Json::Reader reader = Json::Reader();
reader.parse(serialized_fingerprint_json, fingerprint_json);
EXPECT_EQ(fingerprint_json["saved_model_checksum"].asUInt64(), 1);
EXPECT_EQ(fingerprint_json["graph_def_program_hash"].asUInt64(), 2);
EXPECT_EQ(fingerprint_json["signature_def_hash"].asUInt64(), 3);
EXPECT_EQ(fingerprint_json["saved_object_graph_hash"].asUInt64(), 4);
EXPECT_EQ(fingerprint_json["checkpoint_hash"].asUInt64(), 5);
}
} // namespace metrics
} // namespace tensorflow

View File

@ -21,8 +21,8 @@ limitations under the License.
#include <string>
#include <unordered_set>
#include "tensorflow/core/framework/graph_debug_info.pb.h"
#include "tensorflow/core/lib/core/status.h"
#include "tensorflow/core/protobuf/graph_debug_info.pb.h"
#include "tensorflow/core/protobuf/meta_graph.pb.h"
namespace tensorflow {

Some files were not shown because too many files have changed in this diff Show More