mirror of
https://github.com/zebrajr/tensorflow.git
synced 2025-12-07 00:20:20 +01:00
Performance-related tweaks: Don't copy loop variables; remove ineffective std::move casts.
PiperOrigin-RevId: 158017670
This commit is contained in:
parent
009789f742
commit
a51a9846cf
|
|
@ -127,7 +127,7 @@ Allocator* ProcessState::GetGPUAllocator(const GPUOptions& options, int gpu_id,
|
|||
gpu_platform->ExecutorForDevice(gpu_id).ValueOrDie();
|
||||
int bus_id = se->GetDeviceDescription().numa_node();
|
||||
if (bus_id >= 0 && bus_id < static_cast<int64>(gpu_visitors_.size())) {
|
||||
for (auto v : gpu_visitors_[bus_id]) {
|
||||
for (const auto& v : gpu_visitors_[bus_id]) {
|
||||
gpu_allocators_[gpu_id]->AddAllocVisitor(v);
|
||||
}
|
||||
}
|
||||
|
|
|
|||
|
|
@ -557,7 +557,7 @@ TEST(ShapeRefinerTest, ConstantValueAsShape_PackInt32) {
|
|||
.Finalize(root.graph(), &result));
|
||||
|
||||
ShapeRefiner m(TF_GRAPH_DEF_VERSION, OpRegistry::Global());
|
||||
for (auto input : inputs) {
|
||||
for (const auto& input : inputs) {
|
||||
TF_ASSERT_OK(m.AddNode(input.node()));
|
||||
}
|
||||
TF_ASSERT_OK(m.AddNode(pack.node()));
|
||||
|
|
|
|||
|
|
@ -173,7 +173,7 @@ class GrpcRemoteWorker : public WorkerInterface {
|
|||
}
|
||||
|
||||
IssueRequest(req_copy ? req_copy : request, response, recvtensor_,
|
||||
std::move(*cb_to_use), call_opts);
|
||||
*cb_to_use, call_opts);
|
||||
}
|
||||
|
||||
void LoggingAsync(const LoggingRequest* request, LoggingResponse* response,
|
||||
|
|
|
|||
|
|
@ -258,7 +258,7 @@ void Worker::DoPartialRunGraph(CallOptions* opts,
|
|||
}
|
||||
}
|
||||
if (request->is_last_partial_run()) {
|
||||
partial_run_mgr_.PartialRunDone(step_id, std::move(finish), s);
|
||||
partial_run_mgr_.PartialRunDone(step_id, finish, s);
|
||||
} else {
|
||||
finish(s);
|
||||
}
|
||||
|
|
|
|||
|
|
@ -153,7 +153,7 @@ Status CudaSolver::CopyLapackInfoToHostAsync(
|
|||
info_checker_callback) const {
|
||||
std::vector<HostLapackInfo> host_lapack_infos;
|
||||
if (dev_lapack_infos.empty()) {
|
||||
info_checker_callback(Status::OK(), std::move(host_lapack_infos));
|
||||
info_checker_callback(Status::OK(), host_lapack_infos);
|
||||
return Status::OK();
|
||||
}
|
||||
|
||||
|
|
@ -174,7 +174,7 @@ Status CudaSolver::CopyLapackInfoToHostAsync(
|
|||
auto wrapped_info_checker_callback =
|
||||
[info_checker_callback](std::vector<HostLapackInfo> host_lapack_infos) {
|
||||
Status status;
|
||||
for (auto host_lapack_info : host_lapack_infos) {
|
||||
for (const auto& host_lapack_info : host_lapack_infos) {
|
||||
for (int i = 0; i < host_lapack_info.size() && status.ok(); ++i) {
|
||||
const int info_value = (host_lapack_info.data())[i];
|
||||
if (info_value != 0) {
|
||||
|
|
|
|||
|
|
@ -246,7 +246,7 @@ class OneShotIteratorOp : public OpKernel {
|
|||
n.Notify();
|
||||
});
|
||||
n.WaitForNotification();
|
||||
OP_REQUIRES_OK(ctx, std::move(factory_status));
|
||||
OP_REQUIRES_OK(ctx, factory_status);
|
||||
OP_REQUIRES(
|
||||
ctx,
|
||||
return_values.size() == 1 &&
|
||||
|
|
|
|||
|
|
@ -170,7 +170,7 @@ void Diagnostician::LogDiagnosticInformation() {
|
|||
VLOG(1) << "LD_LIBRARY_PATH is: \"" << library_path << "\"";
|
||||
|
||||
std::vector<string> pieces = port::Split(library_path, ':');
|
||||
for (auto piece : pieces) {
|
||||
for (const auto &piece : pieces) {
|
||||
if (piece.empty()) {
|
||||
continue;
|
||||
}
|
||||
|
|
|
|||
Loading…
Reference in New Issue
Block a user