Change function parameters to references to avoid copying, or otherwise move from function parameters when moving reduces the amount of copying.

PiperOrigin-RevId: 157867333
This commit is contained in:
A. Unique TensorFlower 2017-06-02 13:22:15 -07:00 committed by TensorFlower Gardener
parent 4905c0eae4
commit f4b8d21b8e
14 changed files with 34 additions and 19 deletions

View File

@ -15,6 +15,8 @@ limitations under the License.
#include "tensorflow/cc/framework/testutil.h"
#include <utility>
#include "tensorflow/cc/client/client_session.h"
#include "tensorflow/core/framework/tensor_testutil.h"
#include "tensorflow/core/graph/default_device.h"
@ -30,7 +32,7 @@ void GetTensors(const Scope& scope, OutputList tensors,
void GetTensor(const Scope& scope, Output tensor, Tensor* out) {
std::vector<Tensor> outputs;
GetTensors(scope, {tensor}, &outputs);
GetTensors(scope, {std::move(tensor)}, &outputs);
*out = outputs[0];
}

View File

@ -13,6 +13,8 @@ See the License for the specific language governing permissions and
limitations under the License.
==============================================================================*/
#include <utility>
#include "tensorflow/compiler/jit/encapsulate_subgraphs_pass.h"
#include "tensorflow/cc/framework/ops.h"
@ -101,12 +103,12 @@ Node* Input(const GraphDefBuilder::Options& opts) {
}
Node* Unary(ops::NodeOut a, const GraphDefBuilder::Options& opts) {
return ops::UnaryOp("UnaryTest", a, opts);
return ops::UnaryOp("UnaryTest", std::move(a), opts);
}
Node* Binary(ops::NodeOut a, ops::NodeOut b,
const GraphDefBuilder::Options& opts) {
return ops::BinaryOp("BinaryTest", a, b, opts);
return ops::BinaryOp("BinaryTest", std::move(a), std::move(b), opts);
}
Node* AddNLike(const std::vector<ops::NodeOut>& inputs,
@ -127,7 +129,7 @@ Node* RetOp(int index, ops::NodeOut a, const GraphDefBuilder::Options& opts) {
if (opts.HaveError()) return nullptr;
NodeBuilder node_builder(opts.GetNameForOp("Retval"), "_Retval",
opts.op_registry());
node_builder.Input(a).Attr("index", index);
node_builder.Input(std::move(a)).Attr("index", index);
return opts.FinalizeBuilder(&node_builder);
}

View File

@ -16,6 +16,7 @@ limitations under the License.
#include "tensorflow/compiler/xla/client/global_data.h"
#include <string>
#include <utility>
#include "tensorflow/compiler/xla/types.h"
#include "tensorflow/core/platform/logging.h"
@ -23,7 +24,7 @@ limitations under the License.
namespace xla {
GlobalData::GlobalData(ServiceInterface* parent, GlobalDataHandle handle)
: handle_(handle), parent_(parent) {}
: handle_(std::move(handle)), parent_(parent) {}
GlobalData::~GlobalData() {
UnregisterRequest request;

View File

@ -16,6 +16,7 @@ limitations under the License.
#include "tensorflow/compiler/xla/reference_util.h"
#include <array>
#include <utility>
#include "tensorflow/compiler/xla/client/computation_builder.h"
#include "tensorflow/compiler/xla/service/cpu/runtime_single_threaded_matmul.h"
@ -331,7 +332,8 @@ ReferenceUtil::ConvArray4DGeneralDimensions(
std::pair<int64, int64> kernel_stride, Padding padding,
ConvolutionDimensionNumbers dimension_numbers) {
return ConvArray4DGeneralDimensionsDilated(lhs, rhs, kernel_stride, padding,
{1, 1}, {1, 1}, dimension_numbers);
{1, 1}, {1, 1},
std::move(dimension_numbers));
}
/* static */ std::unique_ptr<Array4D<float>>

View File

@ -31,7 +31,7 @@ AsyncExecution::AsyncExecution(Backend* backend,
: backend_(CHECK_NOTNULL(backend)),
streams_(std::move(streams)),
profile_(profile),
result_(result) {
result_(std::move(result)) {
for (const auto& stream : streams_) {
CHECK(stream != nullptr);
}

View File

@ -122,7 +122,7 @@ bool CompareShapes(const Shape& lhs, const Shape& rhs, bool compare_layouts) {
for (const auto& shape : parameters) {
*program_shape.add_parameters() = shape;
}
*program_shape.mutable_result() = result;
*program_shape.mutable_result() = std::move(result);
return program_shape;
}

View File

@ -375,8 +375,8 @@ class NearestNeighborsOp : public OpKernel {
const Eigen::Ref<const Eigen::VectorXf>& points_half_squared_norm,
const Eigen::Ref<const MatrixXfRowMajor>& centers,
const Eigen::Ref<const Eigen::VectorXf>& centers_half_squared_norm,
Eigen::Ref<MatrixXi64RowMajor> nearest_center_indices,
Eigen::Ref<MatrixXfRowMajor> nearest_center_distances) {
const Eigen::Ref<MatrixXi64RowMajor>& nearest_center_indices,
const Eigen::Ref<MatrixXfRowMajor>& nearest_center_distances) {
CHECK_LE(k, centers.rows());
if (centers.rows() <= kNearestNeighborsCentersMaxBlockSize) {
FindKNearestCentersOneBlock(k, points, points_half_squared_norm, centers,

View File

@ -270,7 +270,7 @@ class SessionBundleTest : public ::testing::Test {
// MetaGraphDef.
// Returns the path of the export.
// ** Should only be called once per test **
string SetupExport(MetaGraphDefTwiddler twiddler) {
string SetupExport(const MetaGraphDefTwiddler& twiddler) {
return SetupExport(twiddler, kVariablesFilename, kMetaGraphDefFilename);
}
// SetupExport that allows for the variables and meta_graph_def filenames

View File

@ -456,7 +456,8 @@ class OpKernelBuilderTest : public ::testing::Test {
}
}
string GetKernelClassName(const string& op_type, DeviceType device_type,
string GetKernelClassName(const string& op_type,
const DeviceType& device_type,
const std::vector<string>& attrs,
DataTypeSlice input_types = {}) {
NodeDef def = CreateNodeDef(op_type, attrs);

View File

@ -15,6 +15,8 @@ limitations under the License.
#include "tensorflow/core/graph/graph_def_builder.h"
#include <utility>
#include "tensorflow/core/graph/graph_constructor.h"
#include "tensorflow/core/graph/tensor_id.h"
#include "tensorflow/core/lib/core/errors.h"
@ -119,7 +121,7 @@ Node* UnaryOp(const string& op_name, NodeOut input,
if (opts.HaveError()) return nullptr;
NodeBuilder node_builder(opts.GetNameForOp(op_name), op_name,
opts.op_registry());
node_builder.Input(input);
node_builder.Input(std::move(input));
return opts.FinalizeBuilder(&node_builder);
}
@ -128,7 +130,7 @@ Node* BinaryOp(const string& op_name, NodeOut a, NodeOut b,
if (opts.HaveError()) return nullptr;
NodeBuilder node_builder(opts.GetNameForOp(op_name), op_name,
opts.op_registry());
node_builder.Input(a).Input(b);
node_builder.Input(std::move(a)).Input(std::move(b));
return opts.FinalizeBuilder(&node_builder);
}

View File

@ -17,6 +17,7 @@ limitations under the License.
#include <deque>
#include <unordered_map>
#include <utility>
#include <vector>
#include "tensorflow/core/framework/memory_types.h"
@ -392,7 +393,8 @@ Node* AddControlMerge(const string& in_name1, const string& in_name2, Graph* g,
Node* AddControlSwitch(NodeBuilder::NodeOut input1, NodeBuilder::NodeOut input2,
const string& device_name,
const GraphDefBuilder::Options& bopts) {
Node* res_node = ops::BinaryOp("Switch", input1, input2, bopts);
Node* res_node =
ops::BinaryOp("Switch", std::move(input1), std::move(input2), bopts);
if (bopts.HaveError()) return nullptr;
res_node->set_assigned_device_name(device_name);
return res_node;
@ -401,7 +403,7 @@ Node* AddControlSwitch(NodeBuilder::NodeOut input1, NodeBuilder::NodeOut input2,
// A next_iteration node for control flow.
Node* AddControlNext(NodeBuilder::NodeOut input, const string& device_name,
const GraphDefBuilder::Options& bopts) {
Node* res_node = ops::UnaryOp("NextIteration", input, bopts);
Node* res_node = ops::UnaryOp("NextIteration", std::move(input), bopts);
if (bopts.HaveError()) return nullptr;
res_node->set_assigned_device_name(device_name);
return res_node;

View File

@ -16,6 +16,7 @@ limitations under the License.
#include "tensorflow/core/graph/graph_partition.h"
#include <unordered_map>
#include <utility>
#include "tensorflow/cc/ops/array_ops.h"
#include "tensorflow/cc/ops/const_op.h"
@ -159,7 +160,7 @@ Output BoolInput(const Scope& scope) {
}
Output Combine(const Scope& scope, Input a, Input b) {
return ConstructOp(scope, "Combine", {a, b});
return ConstructOp(scope, "Combine", {std::move(a), std::move(b)});
}
class GraphPartitionTest : public ::testing::Test {

View File

@ -86,7 +86,7 @@ class OptimizerCSETest : public ::testing::Test {
str_util::Join(edges, ";"));
}
string DoCSE(std::function<bool(const Node*)> consider_fn = nullptr) {
string DoCSE(const std::function<bool(const Node*)>& consider_fn = nullptr) {
string before = CanonicalGraphString(&graph_);
LOG(ERROR) << "Before rewrites: " << before;

View File

@ -13,6 +13,8 @@ See the License for the specific language governing permissions and
limitations under the License.
==============================================================================*/
#include <utility>
#include "tensorflow/core/util/equal_graph_def.h"
#include "tensorflow/core/framework/node_def_util.h"
@ -40,7 +42,7 @@ Node* Alternate(const GraphDefBuilder::Options& opts) {
Node* Combine(ops::NodeOut a, ops::NodeOut b,
const GraphDefBuilder::Options& opts) {
return ops::BinaryOp("Combine", a, b, opts);
return ops::BinaryOp("Combine", std::move(a), std::move(b), opts);
}
class EqualGraphDefTest : public ::testing::Test {