Make single-parameter constructors explicit

PiperOrigin-RevId: 157628970
This commit is contained in:
A. Unique TensorFlower 2017-05-31 13:45:15 -07:00 committed by TensorFlower Gardener
parent 0b8070253d
commit 6882effb86
24 changed files with 47 additions and 38 deletions

View File

@ -887,7 +887,7 @@ class CSession {
TF_DeleteSessionOptions(opts);
}
CSession(TF_Session* session) { session_ = session; }
explicit CSession(TF_Session* session) : session_(session) {}
~CSession() {
TF_Status* s = TF_NewStatus();

View File

@ -55,7 +55,7 @@ TEST(CoordinatorTest, TestStopAndWaitOnStop) {
class MockQueueRunner : public RunnerInterface {
public:
MockQueueRunner(Coordinator* coord) {
explicit MockQueueRunner(Coordinator* coord) {
coord_ = coord;
join_counter_ = nullptr;
thread_pool_.reset(new thread::ThreadPool(Env::Default(), "test-pool", 10));

View File

@ -240,7 +240,7 @@ class MatcherBase {
//
class WhileConditionComputationMatcher : public MatcherBase {
public:
WhileConditionComputationMatcher(const HloComputation* computation)
explicit WhileConditionComputationMatcher(const HloComputation* computation)
: computation_(computation) {
expr_trees_.emplace_back(BuildCondExprTree());
}

View File

@ -230,7 +230,8 @@ inline perftools::gputools::port::Status ToExecutorStatus(const Status& s) {
class CudnnRNNWorkspaceAllocator : public ScratchAllocator {
public:
~CudnnRNNWorkspaceAllocator() override {}
CudnnRNNWorkspaceAllocator(OpKernelContext* context) : context_(context) {}
explicit CudnnRNNWorkspaceAllocator(OpKernelContext* context)
: context_(context) {}
int64 GetMemoryLimitInBytes(perftools::gputools::Stream* stream) override {
return std::numeric_limits<int64>::max();
}
@ -302,7 +303,7 @@ class CudnnRNNReserveSpaceAllocator : public ScratchAllocator {
// This class is not thread-safe.
class CudnnRNNPersistentSpaceAllocator : public ScratchAllocator {
public:
CudnnRNNPersistentSpaceAllocator(OpKernelContext* context)
explicit CudnnRNNPersistentSpaceAllocator(OpKernelContext* context)
: context_(context) {}
~CudnnRNNPersistentSpaceAllocator() override {}
@ -460,7 +461,8 @@ void RestoreParams(const OpInputList params_input,
// shape validations.
class CudnnRNNKernelCommon : public OpKernel {
protected:
CudnnRNNKernelCommon(OpKernelConstruction* context) : OpKernel(context) {
explicit CudnnRNNKernelCommon(OpKernelConstruction* context)
: OpKernel(context) {
OP_REQUIRES_OK(context, context->GetAttr("dropout", &dropout_));
OP_REQUIRES_OK(context, context->GetAttr("seed", &seed_));
OP_REQUIRES_OK(context, context->GetAttr("seed2", &seed2_));

View File

@ -66,7 +66,7 @@ struct NcclManager::CommunicatorMember {
struct NcclManager::Communicator {
public:
Communicator(std::vector<CommunicatorMember> members)
explicit Communicator(std::vector<CommunicatorMember> members)
: num_devices(members.size()), members(std::move(members)) {}
const int num_devices;

View File

@ -38,7 +38,7 @@ namespace tensorflow {
// when the async op kernel's done callback is called.
class NcclAsyncOpBase : public AsyncOpKernel {
public:
NcclAsyncOpBase(OpKernelConstruction* c) : AsyncOpKernel(c) {
explicit NcclAsyncOpBase(OpKernelConstruction* c) : AsyncOpKernel(c) {
OP_REQUIRES_OK(c, c->GetAttr("num_devices", &num_devices_));
OP_REQUIRES_OK(c, c->GetAttr("shared_name", &collective_prefix_));
}
@ -62,7 +62,7 @@ class NcclAsyncOpBase : public AsyncOpKernel {
// <k> devices in the communicator.
class NcclAllReduceOpKernel : public NcclAsyncOpBase {
public:
NcclAllReduceOpKernel(OpKernelConstruction* c) : NcclAsyncOpBase(c) {
explicit NcclAllReduceOpKernel(OpKernelConstruction* c) : NcclAsyncOpBase(c) {
string reduction;
OP_REQUIRES_OK(c, c->GetAttr("reduction", &reduction));
if (reduction == "min") {
@ -106,7 +106,8 @@ REGISTER_KERNEL_BUILDER(Name("NcclAllReduce").Device(DEVICE_GPU),
class NcclBroadcastSendKernel : public NcclAsyncOpBase {
public:
NcclBroadcastSendKernel(OpKernelConstruction* c) : NcclAsyncOpBase(c) {}
explicit NcclBroadcastSendKernel(OpKernelConstruction* c)
: NcclAsyncOpBase(c) {}
void ComputeAsync(OpKernelContext* c, DoneCallback done) override {
auto actual_done = [c, done](Status s) {
@ -127,7 +128,8 @@ REGISTER_KERNEL_BUILDER(Name("NcclBroadcastSend").Device(DEVICE_GPU),
class NcclBroadcastRecvKernel : public NcclAsyncOpBase {
public:
NcclBroadcastRecvKernel(OpKernelConstruction* c) : NcclAsyncOpBase(c) {}
explicit NcclBroadcastRecvKernel(OpKernelConstruction* c)
: NcclAsyncOpBase(c) {}
void ComputeAsync(OpKernelContext* c, DoneCallback done) override {
const Tensor& shape_t = c->input(0);

View File

@ -524,7 +524,7 @@ namespace {
template <typename Device, typename T>
class SliceHelper {
public:
SliceHelper(OpKernelContext* ctx)
explicit SliceHelper(OpKernelContext* ctx)
: ctx_(ctx), device_(ctx_->eigen_device<Device>()) {}
~SliceHelper() {

View File

@ -80,7 +80,7 @@ static std::atomic_int_fast64_t live_tensor_bytes(0);
// A TensorBuffer that counts live memory usage for testing
class TestTensorBuffer : public TensorBuffer {
public:
TestTensorBuffer(size_t bytes) : bytes_(bytes) {
explicit TestTensorBuffer(size_t bytes) : bytes_(bytes) {
live_tensor_bytes += bytes_;
}
~TestTensorBuffer() override { live_tensor_bytes -= bytes_; }

View File

@ -31,7 +31,7 @@ namespace tensorflow {
bool LocalDevice::use_global_threadpool_ = true;
struct LocalDevice::EigenThreadPoolInfo {
EigenThreadPoolInfo(const SessionOptions& options) {
explicit EigenThreadPoolInfo(const SessionOptions& options) {
int32 intra_op_parallelism_threads =
options.config.intra_op_parallelism_threads();
if (intra_op_parallelism_threads == 0) {

View File

@ -200,7 +200,9 @@ TEST(ContainerInfo, Error) {
// handles.
class StubDevice : public DeviceBase {
public:
StubDevice(const string& name) : DeviceBase(nullptr) { attr_.set_name(name); }
explicit StubDevice(const string& name) : DeviceBase(nullptr) {
attr_.set_name(name);
}
Allocator* GetAllocator(AllocatorAttributes) override {
return cpu_allocator();

View File

@ -51,7 +51,7 @@ namespace {
// An un-templated base class for Buffer.
class BufferBase : public TensorBuffer {
public:
BufferBase(Allocator* alloc) : alloc_(alloc) {}
explicit BufferBase(Allocator* alloc) : alloc_(alloc) {}
TensorBuffer* root_buffer() override { return this; }
void FillAllocationDescription(AllocationDescription* proto) const override {

View File

@ -39,7 +39,7 @@ typedef Eigen::GpuDevice GPUDevice;
class AdjustHueOpBase : public OpKernel {
protected:
AdjustHueOpBase(OpKernelConstruction* context) : OpKernel(context) {}
explicit AdjustHueOpBase(OpKernelConstruction* context) : OpKernel(context) {}
struct ComputeOptions {
const Tensor* input;

View File

@ -30,7 +30,8 @@ typedef Eigen::GpuDevice GPUDevice;
class AdjustSaturationOpBase : public OpKernel {
protected:
AdjustSaturationOpBase(OpKernelConstruction* context) : OpKernel(context) {}
explicit AdjustSaturationOpBase(OpKernelConstruction* context)
: OpKernel(context) {}
struct ComputeOptions {
const Tensor* input;

View File

@ -95,7 +95,8 @@ class FactOpKernel : public OpKernel {
class FactOpKernel1 : public FactOpKernel {
public:
FactOpKernel1(OpKernelConstruction* context) : FactOpKernel(context) {}
explicit FactOpKernel1(OpKernelConstruction* context)
: FactOpKernel(context) {}
void Compute(OpKernelContext* context) override {
FactOpKernel::Compute(context, kFacts1, kNum1);
@ -104,7 +105,8 @@ class FactOpKernel1 : public FactOpKernel {
class FactOpKernel2 : public FactOpKernel {
public:
FactOpKernel2(OpKernelConstruction* context) : FactOpKernel(context) {}
explicit FactOpKernel2(OpKernelConstruction* context)
: FactOpKernel(context) {}
void Compute(OpKernelContext* context) override {
FactOpKernel::Compute(context, kFacts2, kNum2);

View File

@ -148,7 +148,7 @@ REGISTER_KERNEL_BUILDER(Name("DestroyResourceOp").Device(DEVICE_CPU),
template <typename Device, typename T>
class AssignVariableOp : public OpKernel {
public:
AssignVariableOp(OpKernelConstruction* c) : OpKernel(c) {
explicit AssignVariableOp(OpKernelConstruction* c) : OpKernel(c) {
OP_REQUIRES_OK(c, c->GetAttr("dtype", &dtype_));
}

View File

@ -64,7 +64,7 @@ using sdca::ExampleStatistics;
using sdca::ModelWeights;
struct ComputeOptions {
ComputeOptions(OpKernelConstruction* const context) {
explicit ComputeOptions(OpKernelConstruction* const context) {
string loss_type;
OP_REQUIRES_OK(context, context->GetAttr("loss_type", &loss_type));
if (loss_type == "logistic_loss") {

View File

@ -41,7 +41,7 @@ using sparse::SparseTensor;
class SparseTensorsMap : public ResourceBase {
public:
SparseTensorsMap(const string& name) : name_(name), counter_(0) {}
explicit SparseTensorsMap(const string& name) : name_(name), counter_(0) {}
string DebugString() override { return "A SparseTensorsMap"; }
@ -116,7 +116,7 @@ class SparseTensorAccessingOp : public OpKernel {
public:
typedef std::function<Status(SparseTensorsMap**)> CreatorCallback;
SparseTensorAccessingOp(OpKernelConstruction* context)
explicit SparseTensorAccessingOp(OpKernelConstruction* context)
: OpKernel(context), sparse_tensors_map_(nullptr) {}
protected:

View File

@ -25,7 +25,7 @@ namespace {
class TestStringStream : public InputStreamInterface {
public:
TestStringStream(const string& content) : content_(content) {}
explicit TestStringStream(const string& content) : content_(content) {}
Status ReadNBytes(int64 bytes_to_read, string* result) override {
result->clear();

View File

@ -53,7 +53,7 @@ void Iterator::RegisterCleanup(CleanupFunction func, void* arg1, void* arg2) {
namespace {
class EmptyIterator : public Iterator {
public:
EmptyIterator(const Status& s) : status_(s) {}
explicit EmptyIterator(const Status& s) : status_(s) {}
bool Valid() const override { return false; }
void Seek(const StringPiece& target) override {}
void SeekToFirst() override {}

View File

@ -109,7 +109,7 @@ class StringSink : public WritableFile {
class StringSource : public RandomAccessFile {
public:
StringSource(const StringPiece& contents)
explicit StringSource(const StringPiece& contents)
: contents_(contents.data(), contents.size()), bytes_read_(0) {}
~StringSource() override {}

View File

@ -35,7 +35,7 @@ ExpectedCalls CreateRetriableErrors(const string& method, int n) {
// A class to manage call expectations on mock implementations.
class MockCallSequence {
public:
MockCallSequence(const ExpectedCalls& calls) : calls_(calls) {}
explicit MockCallSequence(const ExpectedCalls& calls) : calls_(calls) {}
~MockCallSequence() {
EXPECT_TRUE(calls_.empty())
@ -57,7 +57,7 @@ class MockCallSequence {
class MockRandomAccessFile : public RandomAccessFile {
public:
MockRandomAccessFile(const ExpectedCalls& calls) : calls_(calls) {}
explicit MockRandomAccessFile(const ExpectedCalls& calls) : calls_(calls) {}
Status Read(uint64 offset, size_t n, StringPiece* result,
char* scratch) const override {
return calls_.ConsumeNextCall("Read");
@ -69,7 +69,7 @@ class MockRandomAccessFile : public RandomAccessFile {
class MockWritableFile : public WritableFile {
public:
MockWritableFile(const ExpectedCalls& calls) : calls_(calls) {}
explicit MockWritableFile(const ExpectedCalls& calls) : calls_(calls) {}
Status Append(const StringPiece& data) override {
return calls_.ConsumeNextCall("Append");
}
@ -83,7 +83,7 @@ class MockWritableFile : public WritableFile {
class MockFileSystem : public FileSystem {
public:
MockFileSystem(const ExpectedCalls& calls) : calls_(calls) {}
explicit MockFileSystem(const ExpectedCalls& calls) : calls_(calls) {}
Status NewRandomAccessFile(
const string& fname, std::unique_ptr<RandomAccessFile>* result) override {

View File

@ -66,7 +66,7 @@ namespace parsed {
class Feature {
public:
Feature() {}
Feature(StringPiece serialized) : serialized_(serialized) {}
explicit Feature(StringPiece serialized) : serialized_(serialized) {}
Status ParseDataType(DataType* dtype) {
DCHECK(dtype != nullptr);

View File

@ -107,7 +107,7 @@ REGISTER_KERNEL_BUILDER(Name("KernelLabel")
class GraphDefVersionOp : public OpKernel {
public:
GraphDefVersionOp(OpKernelConstruction* ctx)
explicit GraphDefVersionOp(OpKernelConstruction* ctx)
: OpKernel(ctx), graph_def_version_(ctx->graph_def_version()) {}
void Compute(OpKernelContext* ctx) override {
@ -125,7 +125,7 @@ REGISTER_KERNEL_BUILDER(Name("GraphDefVersion").Device(DEVICE_CPU),
class OldOp : public OpKernel {
public:
OldOp(OpKernelConstruction* ctx) : OpKernel(ctx) {}
explicit OldOp(OpKernelConstruction* ctx) : OpKernel(ctx) {}
void Compute(OpKernelContext* ctx) override {}
};
@ -145,7 +145,7 @@ REGISTER_KERNEL_BUILDER(Name("ResourceInitializedOp").Device(DEVICE_CPU),
class ResourceCreateOp : public OpKernel {
public:
ResourceCreateOp(OpKernelConstruction* c) : OpKernel(c) {}
explicit ResourceCreateOp(OpKernelConstruction* c) : OpKernel(c) {}
void Compute(OpKernelContext* c) override {
OP_REQUIRES_OK(c,

View File

@ -58,7 +58,7 @@ string StrAppend(string* to_append, const Args&... args) {
// the field names (it's a loop over all names), and tracking of has_seen.
class Generator {
public:
Generator(const string& tf_header_prefix)
explicit Generator(const string& tf_header_prefix)
: tf_header_prefix_(tf_header_prefix),
header_(&code_.header),
header_impl_(&code_.header_impl),
@ -71,7 +71,7 @@ class Generator {
private:
struct Section {
Section(string* str) : str(str) {}
explicit Section(string* str) : str(str) {}
string* str;
string indent;
};