Automated Code Change

PiperOrigin-RevId: 826363842
This commit is contained in:
A. Unique TensorFlower 2025-10-31 00:40:43 -07:00 committed by TensorFlower Gardener
parent ebacf2a211
commit 5133f83425
14 changed files with 129 additions and 123 deletions

View File

@ -134,9 +134,9 @@ void SetMemory(NodeExecStatsInterface* stats, OpKernelContext* ctx) {
// Time the execution of kernels (in CPU cycles). Used to dynamically identify
// inexpensive kernels which can be dispatched inline.
struct KernelTimer {
uint64 start_cycles = profile_utils::CpuUtils::GetCurrentClockCycle();
uint64_t start_cycles = profile_utils::CpuUtils::GetCurrentClockCycle();
uint64 ElapsedCycles() {
uint64_t ElapsedCycles() {
return profile_utils::CpuUtils::GetCurrentClockCycle() - start_cycles;
}
};
@ -197,14 +197,14 @@ class ExecutorImpl : public Executor {
// given node is expensive. The new cost estimate is a weighted average of
// the old cost estimate and the latest cost. We only update cost estimates
// for kernels for which IsExpensive() return true.
void UpdateCostEstimate(const NodeItem& node, uint64 elapsed_cycles) {
void UpdateCostEstimate(const NodeItem& node, uint64_t elapsed_cycles) {
// N.B. Updates to `cost_estimate` are atomic but unlocked. Simultaneous
// updates may result in one or more updates being ignored. This does not
// affect correctness but may slow down the update frequency.
std::atomic_uint_fast64_t& cost_estimate = cost_estimates_[node.node_id];
auto prev_estimate = cost_estimate.load(std::memory_order_relaxed);
uint64 new_estimate =
uint64_t new_estimate =
((kCostDecay - 1) * prev_estimate + elapsed_cycles) / kCostDecay;
cost_estimate.store(new_estimate, std::memory_order_relaxed);
@ -214,9 +214,9 @@ class ExecutorImpl : public Executor {
// Initial time (in CPU cycles) we expect an operation to take. Used to
// determine whether an operation should be place in a threadpool.
// Operations start out "expensive".
static constexpr uint64 kInitialCostEstimateCycles = 100 * 1000 * 1000;
static constexpr uint64 kOpIsExpensiveThresholdCycles = 8000;
static constexpr uint64 kCostDecay = 10;
static constexpr uint64_t kInitialCostEstimateCycles = 100 * 1000 * 1000;
static constexpr uint64_t kOpIsExpensiveThresholdCycles = 8000;
static constexpr uint64_t kCostDecay = 10;
std::vector<bool> is_expensive_;
// std::unique_ptr<std::atomic<bool>[]> is_expensive_;
@ -369,14 +369,14 @@ class ExecutorState {
// Maximum number of kernels that can be scheduled inline. If lots of kernels
// are ready at the same time, scheduling them in one thread can be very slow.
// TODO(fishx): Make it configurable if necessary.
static constexpr uint64 kInlineScheduleReadyThreshold = 500;
static constexpr uint64_t kInlineScheduleReadyThreshold = 500;
// Not owned.
RendezvousInterface* rendezvous_;
CollectiveExecutor* collective_executor_ = nullptr;
const ConfigProto* const session_config_;
SessionState* session_state_;
string session_handle_;
std::string session_handle_;
const SessionMetadata* session_metadata_ = nullptr;
TensorStore* tensor_store_;
// Step-local container.
@ -1099,7 +1099,7 @@ absl::Status ExecutorState<PropagatorStateType>::ProcessOutputs(
}
if (s.code() == error::RESOURCE_EXHAUSTED) {
if (stats_collector_) {
string err =
std::string err =
stats_collector_->ReportAllocsOnResourceExhausted(s.message());
s = errors::CreateWithUpdatedMessage(s, absl::StrCat(s.message(), err));
} else {

View File

@ -105,7 +105,7 @@ class Executor {
const ConfigProto* session_config = nullptr;
SessionState* session_state = nullptr;
// Unique session identifier. Can be empty.
string session_handle;
std::string session_handle;
TensorStore* tensor_store = nullptr;
ScopedStepContainer* step_container = nullptr;
CollectiveExecutor* collective_executor = nullptr;

View File

@ -29,7 +29,7 @@ namespace {
static mutex executor_factory_lock(LINKER_INITIALIZED);
typedef std::unordered_map<string, ExecutorFactory*> ExecutorFactories;
typedef std::unordered_map<std::string, ExecutorFactory*> ExecutorFactories;
ExecutorFactories* executor_factories() {
static ExecutorFactories* factories = new ExecutorFactories;
return factories;
@ -37,7 +37,7 @@ ExecutorFactories* executor_factories() {
} // namespace
void ExecutorFactory::Register(const string& executor_type,
void ExecutorFactory::Register(const std::string& executor_type,
ExecutorFactory* factory) {
mutex_lock l(executor_factory_lock);
if (!executor_factories()->insert({executor_type, factory}).second) {
@ -47,9 +47,9 @@ void ExecutorFactory::Register(const string& executor_type,
}
namespace {
const string RegisteredFactoriesErrorMessageLocked()
const std::string RegisteredFactoriesErrorMessageLocked()
TF_SHARED_LOCKS_REQUIRED(executor_factory_lock) {
std::vector<string> factory_types;
std::vector<std::string> factory_types;
for (const auto& executor_factory : *executor_factories()) {
factory_types.push_back(executor_factory.first);
}
@ -58,7 +58,7 @@ const string RegisteredFactoriesErrorMessageLocked()
}
} // namespace
absl::Status ExecutorFactory::GetFactory(const string& executor_type,
absl::Status ExecutorFactory::GetFactory(const std::string& executor_type,
ExecutorFactory** out_factory) {
tf_shared_lock l(executor_factory_lock);
@ -73,7 +73,7 @@ absl::Status ExecutorFactory::GetFactory(const string& executor_type,
return absl::OkStatus();
}
absl::Status NewExecutor(const string& executor_type,
absl::Status NewExecutor(const std::string& executor_type,
const LocalExecutorParams& params, const Graph& graph,
std::unique_ptr<Executor>* out_executor) {
ExecutorFactory* factory = nullptr;

View File

@ -36,12 +36,13 @@ class ExecutorFactory {
std::unique_ptr<Executor>* out_executor) = 0;
virtual ~ExecutorFactory() {}
static void Register(const string& executor_type, ExecutorFactory* factory);
static absl::Status GetFactory(const string& executor_type,
static void Register(const std::string& executor_type,
ExecutorFactory* factory);
static absl::Status GetFactory(const std::string& executor_type,
ExecutorFactory** out_factory);
};
absl::Status NewExecutor(const string& executor_type,
absl::Status NewExecutor(const std::string& executor_type,
const LocalExecutorParams& params, const Graph& graph,
std::unique_ptr<Executor>* out_executor);

View File

@ -128,7 +128,7 @@ Tensor V(const float val) {
// A int32 val -> Tensor<int32>
Tensor VI(const int32_t val) {
Tensor tensor(DT_INT32, TensorShape({}));
tensor.scalar<int32>()() = val;
tensor.scalar<int32_t>()() = val;
return tensor;
}
@ -153,10 +153,11 @@ float V(const Tensor& tensor) {
return tensor.scalar<float>()();
}
static uint64 kIncarnation = 1; // Uses in following tests.
static uint64_t kIncarnation = 1; // Uses in following tests.
Rendezvous::ParsedKey Key(const string& sender, const uint64 incarnation,
const string& receiver, const string& name) {
Rendezvous::ParsedKey Key(const std::string& sender, const uint64_t incarnation,
const std::string& receiver,
const std::string& name) {
Rendezvous::ParsedKey result;
CHECK(
Rendezvous::ParseKey(Rendezvous::CreateKey(sender, incarnation, receiver,
@ -508,8 +509,8 @@ static void BM_executor(::testing::benchmark::State& state) {
Graph* g = new Graph(OpRegistry::Global());
random::PhiloxRandom philox(1729, 17);
random::SimplePhilox rand(&philox);
uint64 cur = 0;
uint32 r = 1 + rand.Rand32() % width;
uint64_t cur = 0;
uint32_t r = 1 + rand.Rand32() % width;
std::vector<Node*> ready_nodes;
for (int i = 0; i < r; ++i) {
ready_nodes.push_back(test::graph::NoOp(g, {}));
@ -589,9 +590,9 @@ static void BM_FeedInputFetchOutput(::testing::benchmark::State& state) {
Node* sum = test::graph::Add(g, x, y);
Node* z = test::graph::Send(g, sum, "z", BOB, 1, ALICE);
string x_key = test::GetRendezvousKey(x);
string y_key = test::GetRendezvousKey(y);
string z_key = test::GetRendezvousKey(z);
std::string x_key = test::GetRendezvousKey(x);
std::string y_key = test::GetRendezvousKey(y);
std::string z_key = test::GetRendezvousKey(z);
Tensor val(DT_FLOAT, TensorShape({}));
val.scalar<float>()() = 3.14;
@ -603,9 +604,10 @@ static void BM_FeedInputFetchOutput(::testing::benchmark::State& state) {
BENCHMARK(BM_FeedInputFetchOutput);
absl::Status ReplaceEdgeWithSendRecv(Graph* g, const Edge* edge,
const string& tensor, const string& sender,
const uint64 sender_incarnation,
const string& receiver) {
const std::string& tensor,
const std::string& sender,
const uint64_t sender_incarnation,
const std::string& receiver) {
Node* send;
NodeDef send_def;
TF_CHECK_OK(NodeDefBuilder(g->NewName("n"), "_Send")
@ -662,16 +664,16 @@ static void BM_WhileLoopHelper(::testing::benchmark::State& state,
FunctionDefLibrary f_lib_proto;
// Define the loop body as a function: `x = x + 1`.
const Tensor one_t = test::AsScalar<int32>(1);
const Tensor one_t = test::AsScalar<int32_t>(1);
std::vector<string> args;
std::vector<std::string> args;
args.reserve(loop_vars);
args.push_back("x: int32");
for (int i = 1; i < loop_vars; ++i) {
args.push_back(absl::StrCat("x", i, ": int32"));
}
std::vector<string> body_rets;
std::vector<std::string> body_rets;
body_rets.reserve(loop_vars);
body_rets.push_back("y: int32");
for (int i = 1; i < loop_vars; ++i) {
@ -703,7 +705,7 @@ static void BM_WhileLoopHelper(::testing::benchmark::State& state,
body_nodes);
// Define the loop condition as a function: `x < loop_iters`.
const Tensor loop_iters_t = test::AsScalar<int32>(loop_iters);
const Tensor loop_iters_t = test::AsScalar<int32_t>(loop_iters);
*f_lib_proto.add_function() = FunctionDefHelper::Define(
// Name
"LessThanOrEqualToN",
@ -775,7 +777,7 @@ static void BM_WhileLoopHelper(::testing::benchmark::State& state,
if (edge->dst()->type_string() != "Switch") {
continue;
}
string tensor_name = absl::StrCat("c", edge->id());
std::string tensor_name = absl::StrCat("c", edge->id());
TF_ASSERT_OK(ReplaceEdgeWithSendRecv(graph.get(), edge, tensor_name,
BOB, 1, ALICE));
}

View File

@ -88,7 +88,7 @@ struct Endpoint {
int index;
// Returns the string name represents this endpoint.
string name() const {
std::string name() const {
if (index == 0) {
return node->name();
} else {
@ -100,7 +100,7 @@ struct Endpoint {
};
struct EndpointHash {
uint64 operator()(const Endpoint& x) const {
uint64_t operator()(const Endpoint& x) const {
return Hash64(reinterpret_cast<const char*>(&x.node), sizeof(Node*),
x.index);
}
@ -166,7 +166,7 @@ class FunctionLibraryRuntimeOverlay : public FunctionLibraryRuntime {
: base_flr_(base_flr), lib_def_(std::move(lib_def)) {}
~FunctionLibraryRuntimeOverlay() override;
absl::Status Instantiate(const string& function_name, AttrSlice attrs,
absl::Status Instantiate(const std::string& function_name, AttrSlice attrs,
const InstantiateOptions& options,
Handle* handle) override;
@ -192,7 +192,7 @@ class FunctionLibraryRuntimeOverlay : public FunctionLibraryRuntime {
absl::Status CreateKernel(const std::shared_ptr<const NodeProperties>& props,
OpKernel** kernel) override;
bool IsStateful(const string& function_name) const override;
bool IsStateful(const std::string& function_name) const override;
const FunctionLibraryDefinition* GetFunctionLibraryDefinition()
const override;
@ -204,7 +204,7 @@ class FunctionLibraryRuntimeOverlay : public FunctionLibraryRuntime {
std::function<void(std::function<void()>)>* runner() override;
const DeviceMgr* device_mgr() const override;
string DebugString(Handle handle) override;
std::string DebugString(Handle handle) override;
int graph_def_version() const override;
absl::Status Clone(std::unique_ptr<FunctionLibraryDefinition>* out_lib_def,
@ -220,7 +220,7 @@ class FunctionLibraryRuntimeOverlay : public FunctionLibraryRuntime {
FunctionLibraryRuntimeOverlay::~FunctionLibraryRuntimeOverlay() = default;
absl::Status FunctionLibraryRuntimeOverlay::Instantiate(
const string& function_name, AttrSlice attrs,
const std::string& function_name, AttrSlice attrs,
const InstantiateOptions& options, Handle* handle) {
// We automatically set the `lib_def` option for all instantiations, if the
// caller doesn't set this option explicitly.
@ -284,7 +284,7 @@ absl::Status FunctionLibraryRuntimeOverlay::CreateKernel(
}
bool FunctionLibraryRuntimeOverlay::IsStateful(
const string& function_name) const {
const std::string& function_name) const {
// Important: we do not forward lookup to the base FLR.
const OpDef* op_def;
const absl::Status s = lib_def_.LookUpOpDef(function_name, &op_def);
@ -317,7 +317,7 @@ FunctionLibraryRuntimeOverlay::GetFunctionLibraryDefinition() const {
return &lib_def_;
}
string FunctionLibraryRuntimeOverlay::DebugString(Handle handle) {
std::string FunctionLibraryRuntimeOverlay::DebugString(Handle handle) {
return base_flr_->DebugString(handle);
}
@ -348,7 +348,7 @@ class FunctionLibraryRuntimeImpl : public FunctionLibraryRuntime {
~FunctionLibraryRuntimeImpl() override;
absl::Status Instantiate(const string& function_name, AttrSlice attrs,
absl::Status Instantiate(const std::string& function_name, AttrSlice attrs,
const InstantiateOptions& options,
Handle* handle) override;
@ -375,7 +375,7 @@ class FunctionLibraryRuntimeImpl : public FunctionLibraryRuntime {
absl::Status RunSync(Options opts, Handle handle,
CallFrameInterface* call_frame) override;
bool IsStateful(const string& function) const override;
bool IsStateful(const std::string& function) const override;
// TODO: b/396484774 - Consider handling the case where the FLR is already
// finalized instead of always returning the pointer to the unowned library
@ -397,7 +397,7 @@ class FunctionLibraryRuntimeImpl : public FunctionLibraryRuntime {
const ConfigProto* const config_proto() override { return config_; }
int graph_def_version() const override { return graph_def_version_; }
string DebugString(Handle h) override;
std::string DebugString(Handle h) override;
absl::Status Clone(std::unique_ptr<FunctionLibraryDefinition>* out_lib_def,
std::unique_ptr<ProcessFunctionLibraryRuntime>* out_pflr,
@ -416,9 +416,9 @@ class FunctionLibraryRuntimeImpl : public FunctionLibraryRuntime {
GraphOptimizer optimizer_;
const SessionMetadata* const session_metadata_;
Executor::Args::Runner default_runner_;
const string device_name_;
const std::string device_name_;
std::function<absl::Status(const string&, const OpDef**)> get_func_sig_;
std::function<absl::Status(const std::string&, const OpDef**)> get_func_sig_;
std::function<absl::Status(const std::shared_ptr<const NodeProperties>&,
OpKernel**)>
create_kernel_;
@ -432,13 +432,13 @@ class FunctionLibraryRuntimeImpl : public FunctionLibraryRuntime {
// The instantiated and transformed function is encoded as a Graph
// object, and an executor is created for the graph.
struct Item {
uint64 instantiation_counter = 0;
uint64_t instantiation_counter = 0;
std::unique_ptr<const Graph> graph = nullptr;
const FunctionLibraryDefinition* lib_def = nullptr; // Not owned.
FunctionBody* func_graph = nullptr;
Executor* exec = nullptr;
core::RefCountPtr<FunctionLibraryRuntimeOverlay> overlay_flr = nullptr;
string executor_type;
std::string executor_type;
bool allow_small_function_optimizations = false;
bool allow_control_flow_sync_execution = false;
bool function_runs_at_most_once = false;
@ -517,7 +517,7 @@ FunctionLibraryRuntimeImpl::FunctionLibraryRuntimeImpl(
absl::flat_hash_map<Handle, std::unique_ptr<Item>>>()),
function_handle_cache_(std::make_unique<FunctionHandleCache>(this)),
parent_(parent) {
get_func_sig_ = [this](const string& op, const OpDef** sig) {
get_func_sig_ = [this](const std::string& op, const OpDef** sig) {
return base_lib_def_->LookUpOpDef(op, sig);
};
create_kernel_ = [this](const std::shared_ptr<const NodeProperties>& props,
@ -714,7 +714,7 @@ absl::Status FunctionLibraryRuntimeImpl::FunctionDefToBody(
return FunctionDefToBodyHelper(std::move(record), attrs, lib_def,
get_func_sig_, fbody);
} else {
auto get_func_sig = [lib_def](const string& op, const OpDef** sig) {
auto get_func_sig = [lib_def](const std::string& op, const OpDef** sig) {
return lib_def->LookUpOpDef(op, sig);
};
return FunctionDefToBodyHelper(std::move(record), attrs, lib_def,
@ -779,7 +779,7 @@ bool FunctionLibraryRuntimeImpl::IsLocalTarget(
}
absl::Status FunctionLibraryRuntimeImpl::Instantiate(
const string& function_name, AttrSlice attrs,
const std::string& function_name, AttrSlice attrs,
const InstantiateOptions& options, Handle* handle) {
if (!IsLocalTarget(options)) {
return parent_->Instantiate(function_name, attrs, options, handle);
@ -796,7 +796,7 @@ absl::Status FunctionLibraryRuntimeImpl::Instantiate(
// in the canonical key.
InstantiateOptions options_copy(options);
options_copy.target = device_name_;
const string key = Canonicalize(function_name, attrs, options_copy);
const std::string key = Canonicalize(function_name, attrs, options_copy);
{
mutex_lock l(mu_);
@ -837,7 +837,7 @@ absl::Status FunctionLibraryRuntimeImpl::Instantiate(
if (func.name() == kGradientOp) {
return errors::InvalidArgument("Can't take gradient of SymbolicGradient");
}
const string grad = lib_def->FindGradient(func.name());
const std::string grad = lib_def->FindGradient(func.name());
if (!grad.empty()) {
return Instantiate(grad, AttrSlice(&func.attr()), options, handle);
}
@ -941,7 +941,7 @@ absl::Status FunctionLibraryRuntimeImpl::ReleaseHandle(Handle handle) {
absl::Status FunctionLibraryRuntimeImpl::CreateItem(Item** item) {
const FunctionBody* fbody;
FunctionLibraryRuntime* flr;
string executor_type;
std::string executor_type;
{
tf_shared_lock l(mu_);
fbody = (*item)->func_graph;
@ -1120,8 +1120,8 @@ void FunctionLibraryRuntimeImpl::RunRemote(const Options& opts, Handle handle,
absl::Span<const Tensor> args,
std::vector<Tensor>* rets,
Item* item, DoneCallback done) {
string target_device = parent_->GetDeviceName(handle);
string source_device = opts.source_device;
std::string target_device = parent_->GetDeviceName(handle);
std::string source_device = opts.source_device;
RendezvousInterface* rendezvous = opts.rendezvous;
DeviceContext* device_context;
absl::Status s = parent_->GetDeviceContext(target_device, &device_context);
@ -1436,13 +1436,13 @@ absl::Status FunctionLibraryRuntimeImpl::RunSync(
return absl::OkStatus();
}
bool FunctionLibraryRuntimeImpl::IsStateful(const string& func) const {
bool FunctionLibraryRuntimeImpl::IsStateful(const std::string& func) const {
const OpDef* op_def;
const absl::Status s = base_lib_def_->LookUpOpDef(func, &op_def);
return s.ok() && op_def->is_stateful();
}
string FunctionLibraryRuntimeImpl::DebugString(Handle handle) {
std::string FunctionLibraryRuntimeImpl::DebugString(Handle handle) {
Item* item = nullptr;
LocalHandle local_handle = parent_->GetHandleOnDevice(device_name_, handle);
absl::Status s = GetOrCreateItem(local_handle, &item);

View File

@ -41,7 +41,7 @@ namespace tensorflow {
absl::Status FunctionDefToBodyHelper(
core::RefCountPtr<FunctionRecord>&& record, const AttrSlice& attrs,
const FunctionLibraryDefinition* const lib_def,
const std::function<absl::Status(const string&, const OpDef**)>&
const std::function<absl::Status(const std::string&, const OpDef**)>&
get_func_sig,
std::unique_ptr<FunctionBody>* fbody) {
// Instantiates the function template into a graph def.
@ -96,7 +96,8 @@ absl::Status FunctionDefToBodyHelper(core::RefCountPtr<FunctionRecord>&& record,
const AttrSlice& attrs,
const FunctionLibraryDefinition* lib_def,
std::unique_ptr<FunctionBody>* fbody) {
const auto get_func_sig = [&lib_def](const string& op, const OpDef** sig) {
const auto get_func_sig = [&lib_def](const std::string& op,
const OpDef** sig) {
return lib_def->LookUpOpDef(op, sig);
};
return FunctionDefToBodyHelper(std::move(record), attrs, lib_def,
@ -109,7 +110,8 @@ absl::Status FunctionDefToBodyHelper(const FunctionDef& fdef,
std::unique_ptr<FunctionBody>* fbody) {
core::RefCountPtr<FunctionRecord> record(
new FunctionRecord(FunctionDef(fdef), {}, true));
const auto get_func_sig = [&lib_def](const string& op, const OpDef** sig) {
const auto get_func_sig = [&lib_def](const std::string& op,
const OpDef** sig) {
return lib_def->LookUpOpDef(op, sig);
};
return FunctionDefToBodyHelper(std::move(record), attrs, lib_def,
@ -125,8 +127,8 @@ bool PrunableStatefulNode(const Node* n) {
// and can produce different results on each invocation (due to variable
// updates) but it does not itself modify the variable.
// TODO(b/341721055): Consolidate this set with other side effect modeling.
static const absl::flat_hash_set<string>* prunable_stateful_ops =
new absl::flat_hash_set<string>{
static const absl::flat_hash_set<std::string>* prunable_stateful_ops =
new absl::flat_hash_set<std::string>{
FunctionLibraryDefinition::kArgOp,
"ResourceGather",
"ResourceGatherNd",

View File

@ -55,7 +55,7 @@ absl::Status FunctionDefToBodyHelper(const FunctionDef& fdef,
absl::Status FunctionDefToBodyHelper(
core::RefCountPtr<FunctionRecord>&& record, const AttrSlice& attrs,
const FunctionLibraryDefinition* lib_def,
const std::function<absl::Status(const string&, const OpDef**)>&
const std::function<absl::Status(const std::string&, const OpDef**)>&
get_func_sig,
std::unique_ptr<FunctionBody>* fbody);

View File

@ -74,7 +74,7 @@ using ::tsl::testing::StatusIs;
using FDH = ::tensorflow::FunctionDefHelper;
using OutputControlSrc = InlineFunctionBodyOptions::OutputControlSource;
absl::Status GetOpSig(const string& op, const OpDef** sig) {
absl::Status GetOpSig(const std::string& op, const OpDef** sig) {
return OpRegistry::Global()->LookUpOpDef(op, sig);
}
@ -220,14 +220,14 @@ class FunctionLibraryRuntimeTest : public ::testing::Test {
return absl::OkStatus();
}
absl::Status Instantiate(FunctionLibraryRuntime* flr, const string& name,
absl::Status Instantiate(FunctionLibraryRuntime* flr, const std::string& name,
test::function::Attrs attrs,
FunctionLibraryRuntime::Handle* handle) {
return flr->Instantiate(name, attrs, handle);
}
absl::Status Instantiate(
FunctionLibraryRuntime* flr, const string& name,
FunctionLibraryRuntime* flr, const std::string& name,
test::function::Attrs attrs,
const FunctionLibraryRuntime::InstantiateOptions& options,
FunctionLibraryRuntime::Handle* handle) {
@ -235,7 +235,7 @@ class FunctionLibraryRuntimeTest : public ::testing::Test {
}
absl::Status InstantiateAndRun(FunctionLibraryRuntime* flr,
const string& name,
const std::string& name,
test::function::Attrs attrs,
const std::vector<Tensor>& args,
std::vector<Tensor*> rets) {
@ -245,7 +245,7 @@ class FunctionLibraryRuntimeTest : public ::testing::Test {
}
absl::Status InstantiateAndRun(
FunctionLibraryRuntime* flr, const string& name,
FunctionLibraryRuntime* flr, const std::string& name,
test::function::Attrs attrs,
const FunctionLibraryRuntime::InstantiateOptions& options,
const std::vector<Tensor>& args, std::vector<Tensor*> rets) {
@ -295,7 +295,7 @@ class FunctionLibraryRuntimeTest : public ::testing::Test {
}
absl::Status InstantiateAndRunViaCallFrameInterface(
FunctionLibraryRuntime* flr, const string& name,
FunctionLibraryRuntime* flr, const std::string& name,
test::function::Attrs attrs, const std::vector<Tensor>& args,
std::vector<Tensor*> rets) {
FunctionLibraryRuntime::Handle handle;
@ -331,7 +331,7 @@ class FunctionLibraryRuntimeTest : public ::testing::Test {
}
std::unique_ptr<Graph> GetFuncBody(FunctionLibraryRuntime* flr,
const string& name,
const std::string& name,
test::function::Attrs attrs) {
FunctionLibraryRuntime::Handle handle;
absl::Status status = flr->Instantiate(name, attrs, &handle);
@ -347,7 +347,7 @@ class FunctionLibraryRuntimeTest : public ::testing::Test {
}
std::unique_ptr<Graph> GetGradBody(FunctionLibraryRuntime* flr,
const string& func,
const std::string& func,
test::function::Attrs attrs) {
FunctionLibraryRuntime::Handle handle;
absl::Status status = flr->Instantiate(func, attrs, &handle);
@ -646,9 +646,9 @@ TEST_F(FunctionLibraryRuntimeTest, StateHandle) {
// Attrs
{},
// Nodes
{FDH::Const<int32>("shape", absl::Span<const int32>({1})),
FDH::Const<int32>("minval", 0),
FDH::Const<int32>("maxval", 10),
{FDH::Const<int32_t>("shape", absl::Span<const int32_t>({1})),
FDH::Const<int32_t>("minval", 0),
FDH::Const<int32_t>("maxval", 10),
// A stateful node.
{{"y"},
"RandomUniformInt",
@ -665,7 +665,7 @@ TEST_F(FunctionLibraryRuntimeTest, StateHandle) {
// Simple case: instantiating with no state_handle.
for (int32_t expected : {6, 4}) {
TF_CHECK_OK(Run(flr0_, handle, opts, {}, {&y}));
test::ExpectTensorEqual<int>(y, test::AsTensor<int32>({expected}));
test::ExpectTensorEqual<int>(y, test::AsTensor<int32_t>({expected}));
}
}
@ -678,7 +678,7 @@ TEST_F(FunctionLibraryRuntimeTest, StateHandle) {
EXPECT_EQ(handle, handle_non_isolated);
for (int32_t expected : {0, 1}) {
TF_CHECK_OK(Run(flr0_, handle_non_isolated, opts, {}, {&y}));
test::ExpectTensorEqual<int>(y, test::AsTensor<int32>({expected}));
test::ExpectTensorEqual<int>(y, test::AsTensor<int32_t>({expected}));
}
}
@ -693,7 +693,7 @@ TEST_F(FunctionLibraryRuntimeTest, StateHandle) {
EXPECT_NE(handle, handle_isolated);
for (int32_t expected : {6, 4, 0, 1}) {
TF_CHECK_OK(Run(flr0_, handle_isolated, opts, {}, {&y}));
test::ExpectTensorEqual<int>(y, test::AsTensor<int32>({expected}));
test::ExpectTensorEqual<int>(y, test::AsTensor<int32_t>({expected}));
}
}
@ -708,7 +708,7 @@ TEST_F(FunctionLibraryRuntimeTest, StateHandle) {
EXPECT_NE(handle, handle_isolated);
for (int32_t expected : {6, 4, 0, 1}) {
TF_CHECK_OK(Run(flr0_, handle_isolated, opts, {}, {&y}));
test::ExpectTensorEqual<int>(y, test::AsTensor<int32>({expected}));
test::ExpectTensorEqual<int>(y, test::AsTensor<int32_t>({expected}));
}
}
@ -725,7 +725,7 @@ TEST_F(FunctionLibraryRuntimeTest, StateHandle) {
EXPECT_NE(handle, handle_isolated);
for (int32_t expected : {6, 4, 0, 1}) {
TF_CHECK_OK(Run(flr0_, handle_isolated, opts, {}, {&y}));
test::ExpectTensorEqual<int>(y, test::AsTensor<int32>({expected}));
test::ExpectTensorEqual<int>(y, test::AsTensor<int32_t>({expected}));
}
TF_CHECK_OK(flr0_->ReleaseHandle(handle_isolated));
}
@ -1128,9 +1128,9 @@ TEST_F(FunctionLibraryRuntimeTest,
std::unique_ptr<Graph> g;
ExpandInlineFunctionsOptions opts;
const string input_node = "Func/b/input/_0";
const string output_node = "Func/b/output/_1";
const string output_control_node = "Func/b/output_control_node/_2";
const std::string input_node = "Func/b/input/_0";
const std::string output_node = "Func/b/output/_1";
const std::string output_control_node = "Func/b/output_control_node/_2";
// Use data outputs as output control source.
opts.native_options.output_control_src = OutputControlSrc::kDataOutputs;
@ -1203,9 +1203,9 @@ TEST_F(FunctionLibraryRuntimeTest, ExpandInlineFunctionsAndKeepCallerNode) {
return absl::OkStatus();
};
const string input_node = "Func/b/input/_0";
const string output_node = "Func/b/output/_1";
const string output_control_node = "Func/b/output_control_node/_2";
const std::string input_node = "Func/b/input/_0";
const std::string output_node = "Func/b/output/_1";
const std::string output_control_node = "Func/b/output_control_node/_2";
// Construct expected graph after function inlining.
auto expected_graph = [&](const NodeDef& caller) -> GraphDef {
@ -1266,9 +1266,9 @@ TEST_F(FunctionLibraryRuntimeTest, ExpandInlineFunctionsAndPlaceInlinedNodes) {
using test::function::NDef;
using KeepCallerNode = InlineFunctionBodyOptions::KeepCallerNode;
const string arg_device = "/job:arg/replica:0/task:0/device:GPU";
const string call_device = "/job:call/replica:0/task:1/device:GPU";
const string body_device = "/job:body/replica:0/task:1/device:CPU";
const std::string arg_device = "/job:arg/replica:0/task:0/device:GPU";
const std::string call_device = "/job:call/replica:0/task:1/device:GPU";
const std::string body_device = "/job:body/replica:0/task:1/device:CPU";
const FunctionDef func = FDH::Create(
"AddFunc", {"i: float"}, {"o: float"}, {},
@ -1291,12 +1291,13 @@ TEST_F(FunctionLibraryRuntimeTest, ExpandInlineFunctionsAndPlaceInlinedNodes) {
return absl::OkStatus();
};
const string input_node = "Func/b/input/_0";
const string output_node = "Func/b/output/_1";
const string output_control_node = "Func/b/output_control_node/_2";
const std::string input_node = "Func/b/input/_0";
const std::string output_node = "Func/b/output/_1";
const std::string output_control_node = "Func/b/output_control_node/_2";
// Construct expected graph after function inlining.
auto expected_graph = [&](const std::vector<string>& placed) -> GraphDef {
auto expected_graph =
[&](const std::vector<std::string>& placed) -> GraphDef {
return test::function::GDef(
{
NDef("a", "_Arg", {}, {{"T", DT_FLOAT}, {"index", 0}}, placed[0]),
@ -1364,7 +1365,7 @@ TEST_F(FunctionLibraryRuntimeTest, ExpandInlineFunctionsAndPlaceInlinedNodes) {
auto g = std::make_unique<Graph>(OpRegistry::Global());
TF_ASSERT_OK(construct_graph(&g));
const string merged_device = "/job:body/replica:0/task:1/device:CPU:*";
const std::string merged_device = "/job:body/replica:0/task:1/device:CPU:*";
ExpandInlineFunctions(flr0_, g.get(), opts);
GraphDef expected = expected_graph({/*a*/ arg_device, //
@ -1400,7 +1401,7 @@ TEST_F(FunctionLibraryRuntimeTest, PruneBody) {
{{"x1"}, "Add", {"o", "o"}, {{"T", T}}},
{{"x2"}, "Mul", {"a", "x1"}, {{"T", T}}},
{{"x3"}, "Mul", {"x1", "x2"}, {{"T", T}}},
FDH::Const<int32>("shape", {1, 2}),
FDH::Const<int32_t>("shape", {1, 2}),
// A stateful node.
{{"keep_me"},
"RandomUniform",
@ -1410,7 +1411,7 @@ TEST_F(FunctionLibraryRuntimeTest, PruneBody) {
{{"z"}, "Add", {"a", "o"}, {{"T", T}}}});
Init({stateful_func});
auto x = test::AsTensor<int32>({1, 2, 3, 4});
auto x = test::AsTensor<int32_t>({1, 2, 3, 4});
auto y = test::AsTensor<float>({1.0, 2.0, 3.0, 4.0});
Tensor z;
@ -1427,15 +1428,15 @@ TEST_F(FunctionLibraryRuntimeTest, PruneBody) {
TF_CHECK_OK(InstantiateAndRun(flr0_, "SquareAndAddOneWithStatefulNodes", {},
{x, y}, {&z}));
test::ExpectTensorEqual<int>(z, test::AsTensor<int32>({2, 5, 10, 17}));
test::ExpectTensorEqual<int>(z, test::AsTensor<int32_t>({2, 5, 10, 17}));
stats_collector.FinalizeAndSwap(&stats);
// Note that we do not expect the nodes named "y", "x1", "x2", or "x3" to
// execute.
std::set<string> expected_node_names(
std::set<std::string> expected_node_names(
{"_SOURCE", "shape", "x", "o", "a", "keep_me", "z", "z_RetVal"});
std::set<string> executed_node_names;
std::set<std::string> executed_node_names;
for (const auto& node_stats : stats.dev_stats()[0].node_stats()) {
executed_node_names.insert(node_stats.node_name());
}
@ -1475,9 +1476,9 @@ TEST_F(FunctionLibraryRuntimeTest, DoNotPruneControlOutputsFromBody) {
stats_collector.FinalizeAndSwap(&stats);
std::set<string> expected_node_names(
std::set<std::string> expected_node_names(
{"_SOURCE", "i", "add", "ret", "o_RetVal"});
std::set<string> executed_node_names;
std::set<std::string> executed_node_names;
for (const auto& node_stats : stats.dev_stats()[0].node_stats()) {
executed_node_names.insert(node_stats.node_name());
}
@ -1645,7 +1646,7 @@ TEST_F(FunctionLibraryRuntimeTest, Error_InstantiationError) {
TEST_F(FunctionLibraryRuntimeTest, Error_BadControlFlow) {
Init({test::function::InvalidControlFlow()});
auto x = test::AsTensor<int32>({0});
auto x = test::AsTensor<int32_t>({0});
DCHECK_EQ(x.dtype(), DT_INT32);
Tensor y;
HasError(InstantiateAndRun(flr0_, "InvalidControlFlow", {}, {x}, {&y}),
@ -2117,7 +2118,7 @@ TEST_F(FunctionLibraryRuntimeTest, FullTypeForInt32) {
{{"z"}, "Add", {"x", "x"}, {{"T", T}}}});
Init({int32_func});
auto x = test::AsTensor<int32>({1, 2, 3, 4});
auto x = test::AsTensor<int32_t>({1, 2, 3, 4});
auto y = test::AsTensor<float>({1.0, 2.0, 3.0, 4.0});
Tensor z;

View File

@ -126,8 +126,8 @@ FunctionDef BlockingOpFn() {
}
// TODO(phawkins): replace with C++ API for calling functions, when that exists.
Output Call(Scope* scope, const string& op_name, const string& fn_name,
absl::Span<const Input> inputs) {
Output Call(Scope* scope, const std::string& op_name,
const std::string& fn_name, absl::Span<const Input> inputs) {
NodeDef def;
NodeDefBuilder builder(op_name, fn_name, scope->graph()->op_registry());
for (const Input& input : inputs) {

View File

@ -44,8 +44,8 @@ FunctionDef BlockingOpFn();
// Adds a function call to the given scope and returns the output for the node.
// TODO(phawkins): replace with C++ API for calling functions, when that exists.
Output Call(Scope* scope, const string& op_name, const string& fn_name,
absl::Span<const Input> inputs);
Output Call(Scope* scope, const std::string& op_name,
const std::string& fn_name, absl::Span<const Input> inputs);
} // namespace function
} // namespace test

View File

@ -81,7 +81,7 @@ class FunctionLibraryRuntimeTest : public ::testing::Test {
FunctionLibraryRuntime::Options opts,
const std::vector<Tensor>& args, std::vector<Tensor*> rets,
bool add_runner = true) {
std::atomic<int32> call_count(0);
std::atomic<int32_t> call_count(0);
std::function<void(std::function<void()>)> runner =
[&call_count](std::function<void()> fn) {
++call_count;
@ -115,14 +115,14 @@ class FunctionLibraryRuntimeTest : public ::testing::Test {
return absl::OkStatus();
}
absl::Status Instantiate(FunctionLibraryRuntime* flr, const string& name,
absl::Status Instantiate(FunctionLibraryRuntime* flr, const std::string& name,
test::function::Attrs attrs,
FunctionLibraryRuntime::Handle* handle) {
return flr->Instantiate(name, attrs, handle);
}
absl::Status Instantiate(
FunctionLibraryRuntime* flr, const string& name,
FunctionLibraryRuntime* flr, const std::string& name,
test::function::Attrs attrs,
const FunctionLibraryRuntime::InstantiateOptions& options,
FunctionLibraryRuntime::Handle* handle) {
@ -130,7 +130,7 @@ class FunctionLibraryRuntimeTest : public ::testing::Test {
}
absl::Status InstantiateAndRun(FunctionLibraryRuntime* flr,
const string& name,
const std::string& name,
test::function::Attrs attrs,
const std::vector<Tensor>& args,
std::vector<Tensor*> rets,
@ -141,7 +141,7 @@ class FunctionLibraryRuntimeTest : public ::testing::Test {
}
absl::Status InstantiateAndRun(
FunctionLibraryRuntime* flr, const string& name,
FunctionLibraryRuntime* flr, const std::string& name,
test::function::Attrs attrs,
const FunctionLibraryRuntime::InstantiateOptions& options,
const std::vector<Tensor>& args, std::vector<Tensor*> rets,
@ -171,7 +171,7 @@ class FunctionLibraryRuntimeTest : public ::testing::Test {
FunctionLibraryRuntime::Handle handle,
FunctionLibraryRuntime::Options opts,
CallFrameInterface* frame, bool add_runner = true) {
std::atomic<int32> call_count(0);
std::atomic<int32_t> call_count(0);
std::function<void(std::function<void()>)> runner =
[&call_count](std::function<void()> fn) {
++call_count;
@ -232,7 +232,7 @@ TEST_F(FunctionLibraryRuntimeTest, DefaultThreadpool) {
TF_CHECK_OK(Instantiate(flr0_, "XTimesTwo", {{"T", DT_FLOAT}}, &h));
auto x1 = test::AsTensor<float>({1, 2, 3, 4});
std::atomic<int32> num_done(0);
std::atomic<int32_t> num_done(0);
FunctionLibraryRuntime::Options opts;
for (int i = 0; i < 4; ++i) {
tp1->Schedule([&h, &x1, &opts, &num_done, this]() {

View File

@ -36,7 +36,7 @@ struct Endpoint {
int index;
// Returns the string name represents this endpoint.
string name() const {
std::string name() const {
if (index == 0) {
return node->name();
} else {
@ -285,7 +285,7 @@ bool IsFunctionCall(const FunctionLibraryDefinition& lib_def,
return node.IsFunctionCall();
}
string NewName(const Node* n, bool pretty) {
std::string NewName(const Node* n, bool pretty) {
if (pretty) {
return absl::StrCat(n->type_string(), n->id());
} else {
@ -347,7 +347,7 @@ void ToGraphDef(const Graph* g, GraphDef* gdef, bool pretty) {
ndef->add_input("unknown");
continue;
}
const string srcname = NewName(e->src(), pretty);
const std::string srcname = NewName(e->src(), pretty);
if (!e->src()->IsOp()) {
} else if (e->IsControlEdge()) {
ndef->add_input(absl::StrCat("^", srcname));
@ -360,7 +360,7 @@ void ToGraphDef(const Graph* g, GraphDef* gdef, bool pretty) {
});
}
string DebugString(const Graph* g) {
std::string DebugString(const Graph* g) {
GraphDef gdef;
ToGraphDef(g, &gdef);
return DebugString(gdef);

View File

@ -34,7 +34,7 @@ class OpDef;
// Debugging facility. Returns a debug string for a graph
// representing an instantiated function.
string DebugString(const Graph* g);
std::string DebugString(const Graph* g);
// Dump the contents of the "graph" to log files if the logging level is
// sufficiently high.