mirror of
https://github.com/zebrajr/tensorflow.git
synced 2025-12-06 12:20:11 +01:00
commit
7a144ad7fa
|
|
@ -126,7 +126,7 @@ class TensorSlice {
|
||||||
// Interaction with other TensorSlices.
|
// Interaction with other TensorSlices.
|
||||||
|
|
||||||
// Compute the intersection with another slice and if "result" is not
|
// Compute the intersection with another slice and if "result" is not
|
||||||
// nullptr, store the results in *result; returns true is there is any real
|
// nullptr, store the results in *result; returns true if there is any real
|
||||||
// intersection.
|
// intersection.
|
||||||
bool Intersect(const TensorSlice& other, TensorSlice* result) const;
|
bool Intersect(const TensorSlice& other, TensorSlice* result) const;
|
||||||
// A short hand.
|
// A short hand.
|
||||||
|
|
|
||||||
|
|
@ -112,10 +112,10 @@ Status SingleMachine::Shutdown() {
|
||||||
TF_RETURN_IF_ERROR(CloseSession(true /*use_timeout*/));
|
TF_RETURN_IF_ERROR(CloseSession(true /*use_timeout*/));
|
||||||
|
|
||||||
// Delete the threadpool: this ensures that all the pending closures complete
|
// Delete the threadpool: this ensures that all the pending closures complete
|
||||||
// before we return. Note that if that if TF deadlocked on us, the closures
|
// before we return. Note that if TF deadlocked on us, the closures will
|
||||||
// will never complete, and the call to thread_pool_.reset() will never
|
// never complete, and the call to thread_pool_.reset() will never return:
|
||||||
// return: therefore we need to delete the threadpool with the background
|
// therefore we need to delete the threadpool with the background thread.
|
||||||
// thread. That thread itself will also never complete, so the user should
|
// That thread itself will also never complete, so the user should
|
||||||
// abort the process to avoid leaking too many resources.
|
// abort the process to avoid leaking too many resources.
|
||||||
auto n = std::make_shared<Notification>();
|
auto n = std::make_shared<Notification>();
|
||||||
Env::Default()->SchedClosure([this, n]() {
|
Env::Default()->SchedClosure([this, n]() {
|
||||||
|
|
|
||||||
|
|
@ -14,7 +14,7 @@ limitations under the License.
|
||||||
==============================================================================*/
|
==============================================================================*/
|
||||||
|
|
||||||
// Build a tree structure based on the TensorFlow model's python code stacks.
|
// Build a tree structure based on the TensorFlow model's python code stacks.
|
||||||
// Stats are aggregated from descendants from ancestors.
|
// Stats are aggregated from descendants to ancestors.
|
||||||
|
|
||||||
#ifndef THIRD_PARTY_TENSORFLOW_CORE_PROFILER_INTERNAL_TFPROF_CODE_H_
|
#ifndef THIRD_PARTY_TENSORFLOW_CORE_PROFILER_INTERNAL_TFPROF_CODE_H_
|
||||||
#define THIRD_PARTY_TENSORFLOW_CORE_PROFILER_INTERNAL_TFPROF_CODE_H_
|
#define THIRD_PARTY_TENSORFLOW_CORE_PROFILER_INTERNAL_TFPROF_CODE_H_
|
||||||
|
|
|
||||||
|
|
@ -15,7 +15,7 @@ limitations under the License.
|
||||||
|
|
||||||
// Build a tree structure based on the TensorFlow op names.
|
// Build a tree structure based on the TensorFlow op names.
|
||||||
// For example, 'name1/name2' is a child of 'name1'.
|
// For example, 'name1/name2' is a child of 'name1'.
|
||||||
// Stats are aggregated from descendants from ancestors.
|
// Stats are aggregated from descendants to ancestors.
|
||||||
|
|
||||||
#ifndef THIRD_PARTY_TENSORFLOW_CORE_PROFILER_INTERNAL_TFPROF_SCOPE_H_
|
#ifndef THIRD_PARTY_TENSORFLOW_CORE_PROFILER_INTERNAL_TFPROF_SCOPE_H_
|
||||||
#define THIRD_PARTY_TENSORFLOW_CORE_PROFILER_INTERNAL_TFPROF_SCOPE_H_
|
#define THIRD_PARTY_TENSORFLOW_CORE_PROFILER_INTERNAL_TFPROF_SCOPE_H_
|
||||||
|
|
|
||||||
|
|
@ -198,7 +198,7 @@ def smart_cond(pred, fn1, fn2, name=None):
|
||||||
Tensors returned by the call to either `fn1` or `fn2`.
|
Tensors returned by the call to either `fn1` or `fn2`.
|
||||||
|
|
||||||
Raises:
|
Raises:
|
||||||
TypeError is fn1 or fn2 is not callable.
|
TypeError: If `fn1` or `fn2` is not callable.
|
||||||
"""
|
"""
|
||||||
if not callable(fn1):
|
if not callable(fn1):
|
||||||
raise TypeError('`fn1` must be callable.')
|
raise TypeError('`fn1` must be callable.')
|
||||||
|
|
@ -226,7 +226,7 @@ def constant_value(pred):
|
||||||
True or False if `pred` has a constant boolean value, None otherwise.
|
True or False if `pred` has a constant boolean value, None otherwise.
|
||||||
|
|
||||||
Raises:
|
Raises:
|
||||||
TypeError is pred is not a Variable, Tensor or bool.
|
TypeError: If `pred` is not a Variable, Tensor or bool.
|
||||||
"""
|
"""
|
||||||
if isinstance(pred, bool):
|
if isinstance(pred, bool):
|
||||||
pred_value = pred
|
pred_value = pred
|
||||||
|
|
|
||||||
|
|
@ -750,7 +750,7 @@ def fill_lower_triangular(x, validate_args=False, name="fill_lower_triangular"):
|
||||||
tril: `Tensor` with lower triangular elements filled from `x`.
|
tril: `Tensor` with lower triangular elements filled from `x`.
|
||||||
|
|
||||||
Raises:
|
Raises:
|
||||||
ValueError: if shape if `x` has static shape which cannot be mapped to a
|
ValueError: if shape of `x` has static shape which cannot be mapped to a
|
||||||
lower triangular matrix.
|
lower triangular matrix.
|
||||||
"""
|
"""
|
||||||
# TODO(jvdillon): Replace this code with dedicated op when it exists.
|
# TODO(jvdillon): Replace this code with dedicated op when it exists.
|
||||||
|
|
|
||||||
Loading…
Reference in New Issue
Block a user