mirror of
https://github.com/zebrajr/tensorflow.git
synced 2025-12-07 00:20:20 +01:00
Make single-parameter constructors explicit
PiperOrigin-RevId: 157628970
This commit is contained in:
parent
0b8070253d
commit
6882effb86
|
|
@ -887,7 +887,7 @@ class CSession {
|
||||||
TF_DeleteSessionOptions(opts);
|
TF_DeleteSessionOptions(opts);
|
||||||
}
|
}
|
||||||
|
|
||||||
CSession(TF_Session* session) { session_ = session; }
|
explicit CSession(TF_Session* session) : session_(session) {}
|
||||||
|
|
||||||
~CSession() {
|
~CSession() {
|
||||||
TF_Status* s = TF_NewStatus();
|
TF_Status* s = TF_NewStatus();
|
||||||
|
|
|
||||||
|
|
@ -55,7 +55,7 @@ TEST(CoordinatorTest, TestStopAndWaitOnStop) {
|
||||||
|
|
||||||
class MockQueueRunner : public RunnerInterface {
|
class MockQueueRunner : public RunnerInterface {
|
||||||
public:
|
public:
|
||||||
MockQueueRunner(Coordinator* coord) {
|
explicit MockQueueRunner(Coordinator* coord) {
|
||||||
coord_ = coord;
|
coord_ = coord;
|
||||||
join_counter_ = nullptr;
|
join_counter_ = nullptr;
|
||||||
thread_pool_.reset(new thread::ThreadPool(Env::Default(), "test-pool", 10));
|
thread_pool_.reset(new thread::ThreadPool(Env::Default(), "test-pool", 10));
|
||||||
|
|
|
||||||
|
|
@ -240,7 +240,7 @@ class MatcherBase {
|
||||||
//
|
//
|
||||||
class WhileConditionComputationMatcher : public MatcherBase {
|
class WhileConditionComputationMatcher : public MatcherBase {
|
||||||
public:
|
public:
|
||||||
WhileConditionComputationMatcher(const HloComputation* computation)
|
explicit WhileConditionComputationMatcher(const HloComputation* computation)
|
||||||
: computation_(computation) {
|
: computation_(computation) {
|
||||||
expr_trees_.emplace_back(BuildCondExprTree());
|
expr_trees_.emplace_back(BuildCondExprTree());
|
||||||
}
|
}
|
||||||
|
|
|
||||||
|
|
@ -230,7 +230,8 @@ inline perftools::gputools::port::Status ToExecutorStatus(const Status& s) {
|
||||||
class CudnnRNNWorkspaceAllocator : public ScratchAllocator {
|
class CudnnRNNWorkspaceAllocator : public ScratchAllocator {
|
||||||
public:
|
public:
|
||||||
~CudnnRNNWorkspaceAllocator() override {}
|
~CudnnRNNWorkspaceAllocator() override {}
|
||||||
CudnnRNNWorkspaceAllocator(OpKernelContext* context) : context_(context) {}
|
explicit CudnnRNNWorkspaceAllocator(OpKernelContext* context)
|
||||||
|
: context_(context) {}
|
||||||
int64 GetMemoryLimitInBytes(perftools::gputools::Stream* stream) override {
|
int64 GetMemoryLimitInBytes(perftools::gputools::Stream* stream) override {
|
||||||
return std::numeric_limits<int64>::max();
|
return std::numeric_limits<int64>::max();
|
||||||
}
|
}
|
||||||
|
|
@ -302,7 +303,7 @@ class CudnnRNNReserveSpaceAllocator : public ScratchAllocator {
|
||||||
// This class is not thread-safe.
|
// This class is not thread-safe.
|
||||||
class CudnnRNNPersistentSpaceAllocator : public ScratchAllocator {
|
class CudnnRNNPersistentSpaceAllocator : public ScratchAllocator {
|
||||||
public:
|
public:
|
||||||
CudnnRNNPersistentSpaceAllocator(OpKernelContext* context)
|
explicit CudnnRNNPersistentSpaceAllocator(OpKernelContext* context)
|
||||||
: context_(context) {}
|
: context_(context) {}
|
||||||
|
|
||||||
~CudnnRNNPersistentSpaceAllocator() override {}
|
~CudnnRNNPersistentSpaceAllocator() override {}
|
||||||
|
|
@ -460,7 +461,8 @@ void RestoreParams(const OpInputList params_input,
|
||||||
// shape validations.
|
// shape validations.
|
||||||
class CudnnRNNKernelCommon : public OpKernel {
|
class CudnnRNNKernelCommon : public OpKernel {
|
||||||
protected:
|
protected:
|
||||||
CudnnRNNKernelCommon(OpKernelConstruction* context) : OpKernel(context) {
|
explicit CudnnRNNKernelCommon(OpKernelConstruction* context)
|
||||||
|
: OpKernel(context) {
|
||||||
OP_REQUIRES_OK(context, context->GetAttr("dropout", &dropout_));
|
OP_REQUIRES_OK(context, context->GetAttr("dropout", &dropout_));
|
||||||
OP_REQUIRES_OK(context, context->GetAttr("seed", &seed_));
|
OP_REQUIRES_OK(context, context->GetAttr("seed", &seed_));
|
||||||
OP_REQUIRES_OK(context, context->GetAttr("seed2", &seed2_));
|
OP_REQUIRES_OK(context, context->GetAttr("seed2", &seed2_));
|
||||||
|
|
|
||||||
|
|
@ -66,7 +66,7 @@ struct NcclManager::CommunicatorMember {
|
||||||
|
|
||||||
struct NcclManager::Communicator {
|
struct NcclManager::Communicator {
|
||||||
public:
|
public:
|
||||||
Communicator(std::vector<CommunicatorMember> members)
|
explicit Communicator(std::vector<CommunicatorMember> members)
|
||||||
: num_devices(members.size()), members(std::move(members)) {}
|
: num_devices(members.size()), members(std::move(members)) {}
|
||||||
|
|
||||||
const int num_devices;
|
const int num_devices;
|
||||||
|
|
|
||||||
|
|
@ -38,7 +38,7 @@ namespace tensorflow {
|
||||||
// when the async op kernel's done callback is called.
|
// when the async op kernel's done callback is called.
|
||||||
class NcclAsyncOpBase : public AsyncOpKernel {
|
class NcclAsyncOpBase : public AsyncOpKernel {
|
||||||
public:
|
public:
|
||||||
NcclAsyncOpBase(OpKernelConstruction* c) : AsyncOpKernel(c) {
|
explicit NcclAsyncOpBase(OpKernelConstruction* c) : AsyncOpKernel(c) {
|
||||||
OP_REQUIRES_OK(c, c->GetAttr("num_devices", &num_devices_));
|
OP_REQUIRES_OK(c, c->GetAttr("num_devices", &num_devices_));
|
||||||
OP_REQUIRES_OK(c, c->GetAttr("shared_name", &collective_prefix_));
|
OP_REQUIRES_OK(c, c->GetAttr("shared_name", &collective_prefix_));
|
||||||
}
|
}
|
||||||
|
|
@ -62,7 +62,7 @@ class NcclAsyncOpBase : public AsyncOpKernel {
|
||||||
// <k> devices in the communicator.
|
// <k> devices in the communicator.
|
||||||
class NcclAllReduceOpKernel : public NcclAsyncOpBase {
|
class NcclAllReduceOpKernel : public NcclAsyncOpBase {
|
||||||
public:
|
public:
|
||||||
NcclAllReduceOpKernel(OpKernelConstruction* c) : NcclAsyncOpBase(c) {
|
explicit NcclAllReduceOpKernel(OpKernelConstruction* c) : NcclAsyncOpBase(c) {
|
||||||
string reduction;
|
string reduction;
|
||||||
OP_REQUIRES_OK(c, c->GetAttr("reduction", &reduction));
|
OP_REQUIRES_OK(c, c->GetAttr("reduction", &reduction));
|
||||||
if (reduction == "min") {
|
if (reduction == "min") {
|
||||||
|
|
@ -106,7 +106,8 @@ REGISTER_KERNEL_BUILDER(Name("NcclAllReduce").Device(DEVICE_GPU),
|
||||||
|
|
||||||
class NcclBroadcastSendKernel : public NcclAsyncOpBase {
|
class NcclBroadcastSendKernel : public NcclAsyncOpBase {
|
||||||
public:
|
public:
|
||||||
NcclBroadcastSendKernel(OpKernelConstruction* c) : NcclAsyncOpBase(c) {}
|
explicit NcclBroadcastSendKernel(OpKernelConstruction* c)
|
||||||
|
: NcclAsyncOpBase(c) {}
|
||||||
|
|
||||||
void ComputeAsync(OpKernelContext* c, DoneCallback done) override {
|
void ComputeAsync(OpKernelContext* c, DoneCallback done) override {
|
||||||
auto actual_done = [c, done](Status s) {
|
auto actual_done = [c, done](Status s) {
|
||||||
|
|
@ -127,7 +128,8 @@ REGISTER_KERNEL_BUILDER(Name("NcclBroadcastSend").Device(DEVICE_GPU),
|
||||||
|
|
||||||
class NcclBroadcastRecvKernel : public NcclAsyncOpBase {
|
class NcclBroadcastRecvKernel : public NcclAsyncOpBase {
|
||||||
public:
|
public:
|
||||||
NcclBroadcastRecvKernel(OpKernelConstruction* c) : NcclAsyncOpBase(c) {}
|
explicit NcclBroadcastRecvKernel(OpKernelConstruction* c)
|
||||||
|
: NcclAsyncOpBase(c) {}
|
||||||
|
|
||||||
void ComputeAsync(OpKernelContext* c, DoneCallback done) override {
|
void ComputeAsync(OpKernelContext* c, DoneCallback done) override {
|
||||||
const Tensor& shape_t = c->input(0);
|
const Tensor& shape_t = c->input(0);
|
||||||
|
|
|
||||||
|
|
@ -524,7 +524,7 @@ namespace {
|
||||||
template <typename Device, typename T>
|
template <typename Device, typename T>
|
||||||
class SliceHelper {
|
class SliceHelper {
|
||||||
public:
|
public:
|
||||||
SliceHelper(OpKernelContext* ctx)
|
explicit SliceHelper(OpKernelContext* ctx)
|
||||||
: ctx_(ctx), device_(ctx_->eigen_device<Device>()) {}
|
: ctx_(ctx), device_(ctx_->eigen_device<Device>()) {}
|
||||||
|
|
||||||
~SliceHelper() {
|
~SliceHelper() {
|
||||||
|
|
|
||||||
|
|
@ -80,7 +80,7 @@ static std::atomic_int_fast64_t live_tensor_bytes(0);
|
||||||
// A TensorBuffer that counts live memory usage for testing
|
// A TensorBuffer that counts live memory usage for testing
|
||||||
class TestTensorBuffer : public TensorBuffer {
|
class TestTensorBuffer : public TensorBuffer {
|
||||||
public:
|
public:
|
||||||
TestTensorBuffer(size_t bytes) : bytes_(bytes) {
|
explicit TestTensorBuffer(size_t bytes) : bytes_(bytes) {
|
||||||
live_tensor_bytes += bytes_;
|
live_tensor_bytes += bytes_;
|
||||||
}
|
}
|
||||||
~TestTensorBuffer() override { live_tensor_bytes -= bytes_; }
|
~TestTensorBuffer() override { live_tensor_bytes -= bytes_; }
|
||||||
|
|
|
||||||
|
|
@ -31,7 +31,7 @@ namespace tensorflow {
|
||||||
bool LocalDevice::use_global_threadpool_ = true;
|
bool LocalDevice::use_global_threadpool_ = true;
|
||||||
|
|
||||||
struct LocalDevice::EigenThreadPoolInfo {
|
struct LocalDevice::EigenThreadPoolInfo {
|
||||||
EigenThreadPoolInfo(const SessionOptions& options) {
|
explicit EigenThreadPoolInfo(const SessionOptions& options) {
|
||||||
int32 intra_op_parallelism_threads =
|
int32 intra_op_parallelism_threads =
|
||||||
options.config.intra_op_parallelism_threads();
|
options.config.intra_op_parallelism_threads();
|
||||||
if (intra_op_parallelism_threads == 0) {
|
if (intra_op_parallelism_threads == 0) {
|
||||||
|
|
|
||||||
|
|
@ -200,7 +200,9 @@ TEST(ContainerInfo, Error) {
|
||||||
// handles.
|
// handles.
|
||||||
class StubDevice : public DeviceBase {
|
class StubDevice : public DeviceBase {
|
||||||
public:
|
public:
|
||||||
StubDevice(const string& name) : DeviceBase(nullptr) { attr_.set_name(name); }
|
explicit StubDevice(const string& name) : DeviceBase(nullptr) {
|
||||||
|
attr_.set_name(name);
|
||||||
|
}
|
||||||
|
|
||||||
Allocator* GetAllocator(AllocatorAttributes) override {
|
Allocator* GetAllocator(AllocatorAttributes) override {
|
||||||
return cpu_allocator();
|
return cpu_allocator();
|
||||||
|
|
|
||||||
|
|
@ -51,7 +51,7 @@ namespace {
|
||||||
// An un-templated base class for Buffer.
|
// An un-templated base class for Buffer.
|
||||||
class BufferBase : public TensorBuffer {
|
class BufferBase : public TensorBuffer {
|
||||||
public:
|
public:
|
||||||
BufferBase(Allocator* alloc) : alloc_(alloc) {}
|
explicit BufferBase(Allocator* alloc) : alloc_(alloc) {}
|
||||||
|
|
||||||
TensorBuffer* root_buffer() override { return this; }
|
TensorBuffer* root_buffer() override { return this; }
|
||||||
void FillAllocationDescription(AllocationDescription* proto) const override {
|
void FillAllocationDescription(AllocationDescription* proto) const override {
|
||||||
|
|
|
||||||
|
|
@ -39,7 +39,7 @@ typedef Eigen::GpuDevice GPUDevice;
|
||||||
|
|
||||||
class AdjustHueOpBase : public OpKernel {
|
class AdjustHueOpBase : public OpKernel {
|
||||||
protected:
|
protected:
|
||||||
AdjustHueOpBase(OpKernelConstruction* context) : OpKernel(context) {}
|
explicit AdjustHueOpBase(OpKernelConstruction* context) : OpKernel(context) {}
|
||||||
|
|
||||||
struct ComputeOptions {
|
struct ComputeOptions {
|
||||||
const Tensor* input;
|
const Tensor* input;
|
||||||
|
|
|
||||||
|
|
@ -30,7 +30,8 @@ typedef Eigen::GpuDevice GPUDevice;
|
||||||
|
|
||||||
class AdjustSaturationOpBase : public OpKernel {
|
class AdjustSaturationOpBase : public OpKernel {
|
||||||
protected:
|
protected:
|
||||||
AdjustSaturationOpBase(OpKernelConstruction* context) : OpKernel(context) {}
|
explicit AdjustSaturationOpBase(OpKernelConstruction* context)
|
||||||
|
: OpKernel(context) {}
|
||||||
|
|
||||||
struct ComputeOptions {
|
struct ComputeOptions {
|
||||||
const Tensor* input;
|
const Tensor* input;
|
||||||
|
|
|
||||||
|
|
@ -95,7 +95,8 @@ class FactOpKernel : public OpKernel {
|
||||||
|
|
||||||
class FactOpKernel1 : public FactOpKernel {
|
class FactOpKernel1 : public FactOpKernel {
|
||||||
public:
|
public:
|
||||||
FactOpKernel1(OpKernelConstruction* context) : FactOpKernel(context) {}
|
explicit FactOpKernel1(OpKernelConstruction* context)
|
||||||
|
: FactOpKernel(context) {}
|
||||||
|
|
||||||
void Compute(OpKernelContext* context) override {
|
void Compute(OpKernelContext* context) override {
|
||||||
FactOpKernel::Compute(context, kFacts1, kNum1);
|
FactOpKernel::Compute(context, kFacts1, kNum1);
|
||||||
|
|
@ -104,7 +105,8 @@ class FactOpKernel1 : public FactOpKernel {
|
||||||
|
|
||||||
class FactOpKernel2 : public FactOpKernel {
|
class FactOpKernel2 : public FactOpKernel {
|
||||||
public:
|
public:
|
||||||
FactOpKernel2(OpKernelConstruction* context) : FactOpKernel(context) {}
|
explicit FactOpKernel2(OpKernelConstruction* context)
|
||||||
|
: FactOpKernel(context) {}
|
||||||
|
|
||||||
void Compute(OpKernelContext* context) override {
|
void Compute(OpKernelContext* context) override {
|
||||||
FactOpKernel::Compute(context, kFacts2, kNum2);
|
FactOpKernel::Compute(context, kFacts2, kNum2);
|
||||||
|
|
|
||||||
|
|
@ -148,7 +148,7 @@ REGISTER_KERNEL_BUILDER(Name("DestroyResourceOp").Device(DEVICE_CPU),
|
||||||
template <typename Device, typename T>
|
template <typename Device, typename T>
|
||||||
class AssignVariableOp : public OpKernel {
|
class AssignVariableOp : public OpKernel {
|
||||||
public:
|
public:
|
||||||
AssignVariableOp(OpKernelConstruction* c) : OpKernel(c) {
|
explicit AssignVariableOp(OpKernelConstruction* c) : OpKernel(c) {
|
||||||
OP_REQUIRES_OK(c, c->GetAttr("dtype", &dtype_));
|
OP_REQUIRES_OK(c, c->GetAttr("dtype", &dtype_));
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
||||||
|
|
@ -64,7 +64,7 @@ using sdca::ExampleStatistics;
|
||||||
using sdca::ModelWeights;
|
using sdca::ModelWeights;
|
||||||
|
|
||||||
struct ComputeOptions {
|
struct ComputeOptions {
|
||||||
ComputeOptions(OpKernelConstruction* const context) {
|
explicit ComputeOptions(OpKernelConstruction* const context) {
|
||||||
string loss_type;
|
string loss_type;
|
||||||
OP_REQUIRES_OK(context, context->GetAttr("loss_type", &loss_type));
|
OP_REQUIRES_OK(context, context->GetAttr("loss_type", &loss_type));
|
||||||
if (loss_type == "logistic_loss") {
|
if (loss_type == "logistic_loss") {
|
||||||
|
|
|
||||||
|
|
@ -41,7 +41,7 @@ using sparse::SparseTensor;
|
||||||
|
|
||||||
class SparseTensorsMap : public ResourceBase {
|
class SparseTensorsMap : public ResourceBase {
|
||||||
public:
|
public:
|
||||||
SparseTensorsMap(const string& name) : name_(name), counter_(0) {}
|
explicit SparseTensorsMap(const string& name) : name_(name), counter_(0) {}
|
||||||
|
|
||||||
string DebugString() override { return "A SparseTensorsMap"; }
|
string DebugString() override { return "A SparseTensorsMap"; }
|
||||||
|
|
||||||
|
|
@ -116,7 +116,7 @@ class SparseTensorAccessingOp : public OpKernel {
|
||||||
public:
|
public:
|
||||||
typedef std::function<Status(SparseTensorsMap**)> CreatorCallback;
|
typedef std::function<Status(SparseTensorsMap**)> CreatorCallback;
|
||||||
|
|
||||||
SparseTensorAccessingOp(OpKernelConstruction* context)
|
explicit SparseTensorAccessingOp(OpKernelConstruction* context)
|
||||||
: OpKernel(context), sparse_tensors_map_(nullptr) {}
|
: OpKernel(context), sparse_tensors_map_(nullptr) {}
|
||||||
|
|
||||||
protected:
|
protected:
|
||||||
|
|
|
||||||
|
|
@ -25,7 +25,7 @@ namespace {
|
||||||
|
|
||||||
class TestStringStream : public InputStreamInterface {
|
class TestStringStream : public InputStreamInterface {
|
||||||
public:
|
public:
|
||||||
TestStringStream(const string& content) : content_(content) {}
|
explicit TestStringStream(const string& content) : content_(content) {}
|
||||||
|
|
||||||
Status ReadNBytes(int64 bytes_to_read, string* result) override {
|
Status ReadNBytes(int64 bytes_to_read, string* result) override {
|
||||||
result->clear();
|
result->clear();
|
||||||
|
|
|
||||||
|
|
@ -53,7 +53,7 @@ void Iterator::RegisterCleanup(CleanupFunction func, void* arg1, void* arg2) {
|
||||||
namespace {
|
namespace {
|
||||||
class EmptyIterator : public Iterator {
|
class EmptyIterator : public Iterator {
|
||||||
public:
|
public:
|
||||||
EmptyIterator(const Status& s) : status_(s) {}
|
explicit EmptyIterator(const Status& s) : status_(s) {}
|
||||||
bool Valid() const override { return false; }
|
bool Valid() const override { return false; }
|
||||||
void Seek(const StringPiece& target) override {}
|
void Seek(const StringPiece& target) override {}
|
||||||
void SeekToFirst() override {}
|
void SeekToFirst() override {}
|
||||||
|
|
|
||||||
|
|
@ -109,7 +109,7 @@ class StringSink : public WritableFile {
|
||||||
|
|
||||||
class StringSource : public RandomAccessFile {
|
class StringSource : public RandomAccessFile {
|
||||||
public:
|
public:
|
||||||
StringSource(const StringPiece& contents)
|
explicit StringSource(const StringPiece& contents)
|
||||||
: contents_(contents.data(), contents.size()), bytes_read_(0) {}
|
: contents_(contents.data(), contents.size()), bytes_read_(0) {}
|
||||||
|
|
||||||
~StringSource() override {}
|
~StringSource() override {}
|
||||||
|
|
|
||||||
|
|
@ -35,7 +35,7 @@ ExpectedCalls CreateRetriableErrors(const string& method, int n) {
|
||||||
// A class to manage call expectations on mock implementations.
|
// A class to manage call expectations on mock implementations.
|
||||||
class MockCallSequence {
|
class MockCallSequence {
|
||||||
public:
|
public:
|
||||||
MockCallSequence(const ExpectedCalls& calls) : calls_(calls) {}
|
explicit MockCallSequence(const ExpectedCalls& calls) : calls_(calls) {}
|
||||||
|
|
||||||
~MockCallSequence() {
|
~MockCallSequence() {
|
||||||
EXPECT_TRUE(calls_.empty())
|
EXPECT_TRUE(calls_.empty())
|
||||||
|
|
@ -57,7 +57,7 @@ class MockCallSequence {
|
||||||
|
|
||||||
class MockRandomAccessFile : public RandomAccessFile {
|
class MockRandomAccessFile : public RandomAccessFile {
|
||||||
public:
|
public:
|
||||||
MockRandomAccessFile(const ExpectedCalls& calls) : calls_(calls) {}
|
explicit MockRandomAccessFile(const ExpectedCalls& calls) : calls_(calls) {}
|
||||||
Status Read(uint64 offset, size_t n, StringPiece* result,
|
Status Read(uint64 offset, size_t n, StringPiece* result,
|
||||||
char* scratch) const override {
|
char* scratch) const override {
|
||||||
return calls_.ConsumeNextCall("Read");
|
return calls_.ConsumeNextCall("Read");
|
||||||
|
|
@ -69,7 +69,7 @@ class MockRandomAccessFile : public RandomAccessFile {
|
||||||
|
|
||||||
class MockWritableFile : public WritableFile {
|
class MockWritableFile : public WritableFile {
|
||||||
public:
|
public:
|
||||||
MockWritableFile(const ExpectedCalls& calls) : calls_(calls) {}
|
explicit MockWritableFile(const ExpectedCalls& calls) : calls_(calls) {}
|
||||||
Status Append(const StringPiece& data) override {
|
Status Append(const StringPiece& data) override {
|
||||||
return calls_.ConsumeNextCall("Append");
|
return calls_.ConsumeNextCall("Append");
|
||||||
}
|
}
|
||||||
|
|
@ -83,7 +83,7 @@ class MockWritableFile : public WritableFile {
|
||||||
|
|
||||||
class MockFileSystem : public FileSystem {
|
class MockFileSystem : public FileSystem {
|
||||||
public:
|
public:
|
||||||
MockFileSystem(const ExpectedCalls& calls) : calls_(calls) {}
|
explicit MockFileSystem(const ExpectedCalls& calls) : calls_(calls) {}
|
||||||
|
|
||||||
Status NewRandomAccessFile(
|
Status NewRandomAccessFile(
|
||||||
const string& fname, std::unique_ptr<RandomAccessFile>* result) override {
|
const string& fname, std::unique_ptr<RandomAccessFile>* result) override {
|
||||||
|
|
|
||||||
|
|
@ -66,7 +66,7 @@ namespace parsed {
|
||||||
class Feature {
|
class Feature {
|
||||||
public:
|
public:
|
||||||
Feature() {}
|
Feature() {}
|
||||||
Feature(StringPiece serialized) : serialized_(serialized) {}
|
explicit Feature(StringPiece serialized) : serialized_(serialized) {}
|
||||||
|
|
||||||
Status ParseDataType(DataType* dtype) {
|
Status ParseDataType(DataType* dtype) {
|
||||||
DCHECK(dtype != nullptr);
|
DCHECK(dtype != nullptr);
|
||||||
|
|
|
||||||
|
|
@ -107,8 +107,8 @@ REGISTER_KERNEL_BUILDER(Name("KernelLabel")
|
||||||
|
|
||||||
class GraphDefVersionOp : public OpKernel {
|
class GraphDefVersionOp : public OpKernel {
|
||||||
public:
|
public:
|
||||||
GraphDefVersionOp(OpKernelConstruction* ctx)
|
explicit GraphDefVersionOp(OpKernelConstruction* ctx)
|
||||||
: OpKernel(ctx), graph_def_version_(ctx->graph_def_version()) {}
|
: OpKernel(ctx), graph_def_version_(ctx->graph_def_version()) {}
|
||||||
|
|
||||||
void Compute(OpKernelContext* ctx) override {
|
void Compute(OpKernelContext* ctx) override {
|
||||||
Tensor* output = nullptr;
|
Tensor* output = nullptr;
|
||||||
|
|
@ -125,7 +125,7 @@ REGISTER_KERNEL_BUILDER(Name("GraphDefVersion").Device(DEVICE_CPU),
|
||||||
|
|
||||||
class OldOp : public OpKernel {
|
class OldOp : public OpKernel {
|
||||||
public:
|
public:
|
||||||
OldOp(OpKernelConstruction* ctx) : OpKernel(ctx) {}
|
explicit OldOp(OpKernelConstruction* ctx) : OpKernel(ctx) {}
|
||||||
|
|
||||||
void Compute(OpKernelContext* ctx) override {}
|
void Compute(OpKernelContext* ctx) override {}
|
||||||
};
|
};
|
||||||
|
|
@ -145,7 +145,7 @@ REGISTER_KERNEL_BUILDER(Name("ResourceInitializedOp").Device(DEVICE_CPU),
|
||||||
|
|
||||||
class ResourceCreateOp : public OpKernel {
|
class ResourceCreateOp : public OpKernel {
|
||||||
public:
|
public:
|
||||||
ResourceCreateOp(OpKernelConstruction* c) : OpKernel(c) {}
|
explicit ResourceCreateOp(OpKernelConstruction* c) : OpKernel(c) {}
|
||||||
|
|
||||||
void Compute(OpKernelContext* c) override {
|
void Compute(OpKernelContext* c) override {
|
||||||
OP_REQUIRES_OK(c,
|
OP_REQUIRES_OK(c,
|
||||||
|
|
|
||||||
|
|
@ -58,7 +58,7 @@ string StrAppend(string* to_append, const Args&... args) {
|
||||||
// the field names (it's a loop over all names), and tracking of has_seen.
|
// the field names (it's a loop over all names), and tracking of has_seen.
|
||||||
class Generator {
|
class Generator {
|
||||||
public:
|
public:
|
||||||
Generator(const string& tf_header_prefix)
|
explicit Generator(const string& tf_header_prefix)
|
||||||
: tf_header_prefix_(tf_header_prefix),
|
: tf_header_prefix_(tf_header_prefix),
|
||||||
header_(&code_.header),
|
header_(&code_.header),
|
||||||
header_impl_(&code_.header_impl),
|
header_impl_(&code_.header_impl),
|
||||||
|
|
@ -71,7 +71,7 @@ class Generator {
|
||||||
|
|
||||||
private:
|
private:
|
||||||
struct Section {
|
struct Section {
|
||||||
Section(string* str) : str(str) {}
|
explicit Section(string* str) : str(str) {}
|
||||||
string* str;
|
string* str;
|
||||||
string indent;
|
string indent;
|
||||||
};
|
};
|
||||||
|
|
|
||||||
Loading…
Reference in New Issue
Block a user