Apply modernize-use-override (2nd iteration)

Summary:
Use C++11’s override and remove virtual where applicable.
Change are automatically generated.

Reviewed By: Orvid

Differential Revision: D14086124

fbshipit-source-id: 2005227d095d776ca3b4309a57f54e25782b9b58
This commit is contained in:
Michael Liu 2019-02-14 16:21:50 -08:00 committed by Facebook Github Bot
parent f1da9892e9
commit 5f866d0ea2
28 changed files with 66 additions and 56 deletions

View File

@ -45,7 +45,7 @@ class DestructableMock : public intrusive_ptr_target {
DestructableMock(bool* resourcesReleased, bool* wasDestructed)
: resourcesReleased_(resourcesReleased), wasDestructed_(wasDestructed) {}
~DestructableMock() {
~DestructableMock() override {
*wasDestructed_ = true;
}

View File

@ -51,11 +51,11 @@ class CudaProfileInitializeOp : public OperatorBase {
}
}
~CudaProfileInitializeOp() {
~CudaProfileInitializeOp() override {
unlink(config_.c_str());
}
virtual bool Run(int /* unused */ /*stream_id*/ = 0) {
bool Run(int /* unused */ /*stream_id*/ = 0) override {
// If this fails, check the contents of "output" for hints.
CUDA_CHECK(
cudaProfilerInitialize(config_.c_str(), output_.c_str(), cudaCSV));
@ -72,7 +72,7 @@ class CudaProfileStartOp : public OperatorBase {
CudaProfileStartOp(const OperatorDef& operator_def, Workspace* ws)
: OperatorBase(operator_def, ws) {}
virtual bool Run(int /* unused */ /*stream_id*/ = 0) {
bool Run(int /* unused */ /*stream_id*/ = 0) override {
CUDA_ENFORCE(cudaProfilerStart());
return true;
}
@ -83,7 +83,7 @@ class CudaProfileStopOp : public OperatorBase {
CudaProfileStopOp(const OperatorDef& operator_def, Workspace* ws)
: OperatorBase(operator_def, ws) {}
virtual bool Run(int /* unused */ /*stream_id*/ = 0) {
bool Run(int /* unused */ /*stream_id*/ = 0) override {
CUDA_ENFORCE(cudaProfilerStop());
return true;
}

View File

@ -31,7 +31,7 @@ namespace caffe2 {
class StringSerializer : public BlobSerializerBase {
public:
StringSerializer() {}
~StringSerializer() {}
~StringSerializer() override {}
/**
* Serializes a Blob. Note that this blob has to contain Tensor,
* otherwise this function produces a fatal error.

View File

@ -45,7 +45,7 @@ CAFFE_KNOWN_TYPE(BlobTestNonDefaultConstructible);
class BlobTestFooSerializer : public BlobSerializerBase {
public:
BlobTestFooSerializer() {}
~BlobTestFooSerializer() {}
~BlobTestFooSerializer() override {}
/**
* Serializes a Blob. Note that this blob has to contain Tensor,
* otherwise this function produces a fatal error.
@ -792,7 +792,7 @@ class VectorCursor : public db::Cursor {
explicit VectorCursor(StringMap* data) : data_(data) {
pos_ = 0;
}
~VectorCursor() {}
~VectorCursor() override {}
void Seek(const string& /* unused */) override {}
void SeekToFirst() override {}
void Next() override {
@ -817,7 +817,7 @@ class VectorDB : public db::DB {
public:
VectorDB(const string& source, db::Mode mode)
: DB(source, mode), name_(source) {}
~VectorDB() {
~VectorDB() override {
data_.erase(name_);
}
void Close() override {}
@ -949,7 +949,7 @@ struct DummyType {
class DummyTypeSerializer : public BlobSerializerBase {
public:
DummyTypeSerializer() {}
~DummyTypeSerializer() {}
~DummyTypeSerializer() override {}
void Serialize(
const void* pointer,
TypeMeta typeMeta,

View File

@ -26,7 +26,7 @@ class MiniDBCursor : public Cursor {
// We call Next() to read in the first entry.
Next();
}
~MiniDBCursor() {}
~MiniDBCursor() override {}
void Seek(const string& /*key*/) override {
LOG(FATAL) << "MiniDB does not support seeking to a specific key.";
@ -93,7 +93,7 @@ class MiniDBTransaction : public Transaction {
public:
explicit MiniDBTransaction(FILE* f, std::mutex* mutex)
: file_(f), lock_(*mutex) {}
~MiniDBTransaction() {
~MiniDBTransaction() override {
Commit();
}
@ -140,7 +140,9 @@ class MiniDB : public DB {
CAFFE_ENFORCE(file_, "Cannot open file: " + source);
VLOG(1) << "Opened MiniDB " << source;
}
~MiniDB() { Close(); }
~MiniDB() override {
Close();
}
void Close() override {
if (file_) {

View File

@ -844,7 +844,7 @@ class AsyncErrorOp final : public Operator<CPUContext> {
return true;
}
~AsyncErrorOp() {
~AsyncErrorOp() override {
if (thread_) {
thread_->join();
}
@ -989,7 +989,7 @@ class SyncErrorOp final : public Operator<CPUContext> {
}
}
~SyncErrorOp() {}
~SyncErrorOp() override {}
private:
bool fail_;

View File

@ -94,7 +94,7 @@ SchedulerInput loadSchedulerInputFromString(const std::string& fileInput) {
// constraints.
class SimpleScheduler : Scheduler {
public:
virtual SchedulerOutput schedule(const SchedulerInput& input) override {
SchedulerOutput schedule(const SchedulerInput& input) override {
int numTasks = input.getNumberOfTasks();
SchedulerOutput result(numTasks);

View File

@ -20,7 +20,7 @@ class DummyObserver final : public ObserverBase<T> {
void Start() override;
void Stop() override;
~DummyObserver() {}
~DummyObserver() override {}
};
template <>

View File

@ -80,7 +80,7 @@ class ElementwiseRTCOp final : public Operator<CUDAContext> {
CAFFE_ENFORCE(src.size(), "Op should have a non-zero source code size.");
func_.Compile(InputSize(), OutputSize(), src);
}
~ElementwiseRTCOp() {}
~ElementwiseRTCOp() override {}
bool RunOnDevice() override {
static_assert(sizeof(void*) == sizeof(size_t),

View File

@ -192,7 +192,7 @@ class MaxPoolRTCOp final : public ConvPoolOpBase<CUDAContext> {
CAFFE_ENFORCE_EQ(
order_, StorageOrder::NCHW, "Currently only NCHW is supported.");
}
~MaxPoolRTCOp() {}
~MaxPoolRTCOp() override {}
bool RunOnDeviceWithOrderNCHW() override {
auto& X = Input(0);
@ -250,7 +250,7 @@ class MaxPoolGradientRTCOp final : public ConvPoolOpBase<CUDAContext> {
CAFFE_ENFORCE_EQ(
order_, StorageOrder::NCHW, "Currently only NCHW is supported.");
}
~MaxPoolGradientRTCOp() {}
~MaxPoolGradientRTCOp() override {}
bool RunOnDeviceWithOrderNCHW() override {
auto& X = Input(0);

View File

@ -18,7 +18,7 @@ class LevelDBCursor : public Cursor {
: iter_(db->NewIterator(leveldb::ReadOptions())) {
SeekToFirst();
}
~LevelDBCursor() {}
~LevelDBCursor() override {}
void Seek(const string& key) override { iter_->Seek(key); }
bool SupportsSeek() override { return true; }
void SeekToFirst() override { iter_->SeekToFirst(); }
@ -37,7 +37,9 @@ class LevelDBTransaction : public Transaction {
CAFFE_ENFORCE(db_);
batch_.reset(new leveldb::WriteBatch());
}
~LevelDBTransaction() { Commit(); }
~LevelDBTransaction() override {
Commit();
}
void Put(const string& key, const string& value) override {
batch_->Put(key, value);
}

View File

@ -29,7 +29,7 @@ class LMDBCursor : public Cursor {
MDB_CHECK(mdb_cursor_open(mdb_txn_, mdb_dbi_, &mdb_cursor_));
SeekToFirst();
}
virtual ~LMDBCursor() {
~LMDBCursor() override {
mdb_cursor_close(mdb_cursor_);
mdb_dbi_close(mdb_env_, mdb_dbi_);
mdb_txn_abort(mdb_txn_);
@ -96,7 +96,7 @@ class LMDBTransaction final : public Transaction {
MDB_CHECK(mdb_txn_begin(mdb_env_, NULL, 0, &mdb_txn_));
MDB_CHECK(mdb_dbi_open(mdb_txn_, NULL, 0, &mdb_dbi_));
}
~LMDBTransaction() {
~LMDBTransaction() override {
MDB_CHECK(mdb_txn_commit(mdb_txn_));
mdb_dbi_close(mdb_env_, mdb_dbi_);
}
@ -120,7 +120,9 @@ class LMDBTransaction final : public Transaction {
class LMDB : public DB {
public:
LMDB(const string& source, Mode mode);
virtual ~LMDB() { Close(); }
~LMDB() override {
Close();
}
void Close() override {
if (mdb_env_ != NULL) {
mdb_env_close(mdb_env_);

View File

@ -11,7 +11,7 @@ class ProtoDBCursor : public Cursor {
public:
explicit ProtoDBCursor(const TensorProtos* proto)
: proto_(proto), iter_(0) {}
~ProtoDBCursor() {}
~ProtoDBCursor() override {}
void Seek(const string& /*str*/) override {
CAFFE_THROW("ProtoDB is not designed to support seeking.");
@ -39,7 +39,9 @@ class ProtoDBTransaction : public Transaction {
existing_names_.insert(tensor.name());
}
}
~ProtoDBTransaction() { Commit(); }
~ProtoDBTransaction() override {
Commit();
}
void Put(const string& key, const string& value) override {
if (existing_names_.count(key)) {
CAFFE_THROW("An item with key ", key, " already exists.");
@ -77,7 +79,9 @@ class ProtoDB : public DB {
}
LOG(INFO) << "Opened protodb " << source;
}
~ProtoDB() { Close(); }
~ProtoDB() override {
Close();
}
void Close() override {
if (mode_ == NEW || mode_ == WRITE) {

View File

@ -23,7 +23,7 @@ class ZmqDBCursor : public Cursor {
Next();
}
~ZmqDBCursor() {
~ZmqDBCursor() override {
finalize_ = true;
prefetched_ = false;
producer_.notify_one();
@ -91,7 +91,7 @@ class ZmqDB : public DB {
CAFFE_ENFORCE(mode == READ, "ZeroMQ DB only supports read mode.");
}
~ZmqDB() {}
~ZmqDB() override {}
void Close() override {}

View File

@ -26,7 +26,7 @@ class IDEEPConcatOp final : public IDEEPOperator {
}
CAFFE_ENFORCE_GE(axis_, 0);
}
virtual ~IDEEPConcatOp() {}
~IDEEPConcatOp() override {}
bool RunOnDevice() override {
bool fallback_to_cpu = false;
@ -97,7 +97,7 @@ class IDEEPSplitOp final : public IDEEPOperator {
}
CAFFE_ENFORCE_GE(axis_, 0);
}
virtual ~IDEEPSplitOp() {}
~IDEEPSplitOp() override {}
bool RunOnDevice() override {
const auto& input = Input(INPUT);

View File

@ -44,7 +44,7 @@ class IDEEPConvFusionOp final : public IDEEPConvPoolOpBase {
"explicitly the kernel size.");
}
}
virtual ~IDEEPConvFusionOp() {}
~IDEEPConvFusionOp() override {}
bool RunOnDeviceWithOrderNCHW() override {
const auto& X = Input(INPUT_X);

View File

@ -17,7 +17,7 @@ class IDEEPConvOp final : public IDEEPConvPoolOpBase {
pad_l() == pad_r() && pad_t() == pad_b(),
"Uneven padding not supported.");
}
virtual ~IDEEPConvOp() {}
~IDEEPConvOp() override {}
bool RunOnDeviceWithOrderNCHW() override {
const auto& X = Input(INPUT);
@ -131,7 +131,7 @@ class IDEEPConvGradientOp final : public IDEEPConvPoolOpBase {
"In order to backward propagate weights correctly, "
"please set training_mode=1");
}
virtual ~IDEEPConvGradientOp() {}
~IDEEPConvGradientOp() override {}
bool RunOnDeviceWithOrderNCHW() override {
const auto& X = Input(INPUT);

View File

@ -15,7 +15,7 @@ class IDEEPDropoutOp final : public IDEEPOperator {
CAFFE_ENFORCE_GE(ratio_, 0);
CAFFE_ENFORCE_LT(ratio_, 1);
}
virtual ~IDEEPDropoutOp() {}
~IDEEPDropoutOp() override {}
bool RunOnDevice() override {
const auto& X = Input(INPUT);
@ -55,7 +55,7 @@ class IDEEPDropoutGradientOp final : public IDEEPOperator {
CAFFE_ENFORCE_GE(ratio_, 0);
CAFFE_ENFORCE_LT(ratio_, 1);
}
virtual ~IDEEPDropoutGradientOp() {}
~IDEEPDropoutGradientOp() override {}
bool RunOnDevice() override {
const auto& dY = Input(OUTPUT_GRAD);

View File

@ -17,7 +17,7 @@ class IDEEPSumOp final : public IDEEPOperator {
: IDEEPOperator(operator_def, ws),
fallback_sum_(operator_def, ws),
fallback_add_(operator_def, ws) {}
virtual ~IDEEPSumOp() {}
~IDEEPSumOp() override {}
bool RunOnDevice() override {
itensor::dims input_dims;

View File

@ -23,7 +23,7 @@ class IDEEPExpandDimsOp final : public IDEEPOperator {
}
CAFFE_ENFORCE_GE(dims_.front(), 0, "Dimension ids must be non-negative.");
}
~IDEEPExpandDimsOp() {}
~IDEEPExpandDimsOp() override {}
bool RunOnDevice() override {
if (!OperatorBase::InputBlob(INPUT).template IsType<itensor>()) {
@ -85,7 +85,7 @@ class IDEEPSqueezeOp final : public IDEEPOperator {
}
CAFFE_ENFORCE_GE(dims_.front(), 0, "Dimension ids must be non-negative.");
}
~IDEEPSqueezeOp() {}
~IDEEPSqueezeOp() override {}
bool RunOnDevice() override {
if (!OperatorBase::InputBlob(INPUT).template IsType<itensor>()) {

View File

@ -33,7 +33,7 @@ class IDEEPFullyConnectedOp final : public IDEEPOperator {
: IDEEPOperator(operator_def, ws),
axis_(OperatorBase::GetSingleArgument<int32_t>("axis", 1)),
axis_w_(OperatorBase::GetSingleArgument<int32_t>("axis_w", 1)) {}
virtual ~IDEEPFullyConnectedOp() {}
~IDEEPFullyConnectedOp() override {}
bool RunOnDevice() override {
const auto& X = Input(INPUT);
@ -78,7 +78,7 @@ class IDEEPFullyConnectedGradientOp final : public IDEEPOperator {
: IDEEPOperator(operator_def, ws),
axis_(OperatorBase::GetSingleArgument<int32_t>("axis", 1)),
axis_w_(OperatorBase::GetSingleArgument<int32_t>("axis_w", 1)) {}
virtual ~IDEEPFullyConnectedGradientOp() {}
~IDEEPFullyConnectedGradientOp() override {}
bool RunOnDevice() override {
const auto& X = Input(INPUT);

View File

@ -19,7 +19,7 @@ class IDEEPLRNOp final : public IDEEPOperator {
DCHECK_GT(alpha_, 0);
DCHECK_GT(beta_, 0);
}
virtual ~IDEEPLRNOp() {}
~IDEEPLRNOp() override {}
bool RunOnDevice() override {
auto& X = Input(INPUT);
@ -58,7 +58,7 @@ class IDEEPLRNGradientOp final : public IDEEPOperator {
DCHECK_GT(alpha_, 0);
DCHECK_GT(beta_, 0);
}
virtual ~IDEEPLRNGradientOp() {}
~IDEEPLRNGradientOp() override {}
bool RunOnDevice() override {
const auto& X = Input(INPUT);

View File

@ -29,7 +29,7 @@ class IDEEPPoolOp final : public IDEEPConvPoolOpBase {
LOG(FATAL) << "Unsupported pooling method: " << operator_def.type();
}
}
virtual ~IDEEPPoolOp() {}
~IDEEPPoolOp() override {}
bool RunOnDeviceWithOrderNCHW() override {
auto& X = Input(INPUT);
@ -77,7 +77,7 @@ class IDEEPPoolGradientOp final : public IDEEPConvPoolOpBase {
LOG(FATAL) << "Unsupported pooling method: " << operator_def.type();
}
}
virtual ~IDEEPPoolGradientOp() {}
~IDEEPPoolGradientOp() override {}
bool RunOnDeviceWithOrderNCHW() override {
const auto& X = Input(INPUT);

View File

@ -21,7 +21,7 @@ class IDEEPReluOp final : public IDEEPOperator {
LOG(FATAL) << "Unsupported Relu method: " << operator_def.type();
}
}
virtual ~IDEEPReluOp() {}
~IDEEPReluOp() override {}
bool RunOnDevice() override {
const auto& X = Input(INPUT);
@ -59,7 +59,7 @@ class IDEEPReluGradientOp final : public IDEEPOperator {
LOG(FATAL) << "Unsupported Relu method: " << operator_def.type();
}
}
virtual ~IDEEPReluGradientOp() {}
~IDEEPReluGradientOp() override {}
bool RunOnDevice() override {
const auto& Y = Input(OUTPUT);

View File

@ -10,7 +10,7 @@ class IDEEPSigmoidOp final : public IDEEPOperator {
IDEEPSigmoidOp(const OperatorDef& operator_def, Workspace* ws)
: IDEEPOperator(operator_def, ws) {
}
virtual ~IDEEPSigmoidOp() {}
~IDEEPSigmoidOp() override {}
bool RunOnDevice() override {
const auto& X = Input(INPUT);
@ -36,7 +36,7 @@ class IDEEPSigmoidGradientOp final : public IDEEPOperator {
IDEEPSigmoidGradientOp(const OperatorDef& operator_def, Workspace* ws)
: IDEEPOperator(operator_def, ws) {
}
virtual ~IDEEPSigmoidGradientOp() {}
~IDEEPSigmoidGradientOp() override {}
bool RunOnDevice() override {
const auto& Y = Input(OUTPUT);

View File

@ -19,7 +19,7 @@ class IDEEPSpatialBNOp final : public IDEEPOperator {
CAFFE_ENFORCE_GE(momentum_, 0);
CAFFE_ENFORCE_LE(momentum_, 1);
}
virtual ~IDEEPSpatialBNOp() {}
~IDEEPSpatialBNOp() override {}
bool RunOnDevice() override {
const auto& X = Input(INPUT);
@ -70,7 +70,7 @@ class IDEEPSpatialBNGradientOp final : public IDEEPOperator {
CAFFE_ENFORCE(InputSize() > SAVED_VAR);
CAFFE_ENFORCE(OutputSize() > BIAS_GRAD);
}
virtual ~IDEEPSpatialBNGradientOp() {}
~IDEEPSpatialBNGradientOp() override {}
bool RunOnDevice() override {
const auto& X = Input(INPUT);

View File

@ -87,7 +87,7 @@ class CudnnConvOpBase : public ConvPoolOpBase<CUDAContext> {
CUDNN_ENFORCE(cudnnCreateConvolutionDescriptor(&conv_desc_));
}
~CudnnConvOpBase() {
~CudnnConvOpBase() override {
CUDNN_ENFORCE(cudnnDestroyTensorDescriptor(bottom_desc_));
CUDNN_ENFORCE(cudnnDestroyFilterDescriptor(filter_desc_));
CUDNN_ENFORCE(cudnnDestroyTensorDescriptor(bias_desc_));
@ -435,7 +435,7 @@ class CudnnConvOp final : public CudnnConvOpBase {
CudnnConvOp(const OperatorDef& operator_def, Workspace* ws)
: CudnnConvOpBase(operator_def, ws) {}
~CudnnConvOp() {}
~CudnnConvOp() override {}
template <typename T_X, typename T_W, typename T_B, typename T_Y>
bool DoRunWithType();
@ -464,7 +464,7 @@ class CudnnConvGradientOp final : public CudnnConvOpBase {
CUDNN_ENFORCE(cudnnCreateConvolutionDescriptor(&bwd_filter_conv_desc_));
}
~CudnnConvGradientOp() {
~CudnnConvGradientOp() override {
CUDNN_ENFORCE(cudnnDestroyConvolutionDescriptor(bwd_data_conv_desc_));
CUDNN_ENFORCE(cudnnDestroyConvolutionDescriptor(bwd_filter_conv_desc_));
}

View File

@ -19,7 +19,7 @@ class EigenConvOp final : public ConvPoolOpBase<CPUContext> {
: ConvPoolOpBase<CPUContext>(operator_def, ws) {
OPERATOR_NEEDS_FEATURE(group_ == 1, "Group convolution not supported yet.");
}
~EigenConvOp() {}
~EigenConvOp() override {}
bool RunOnDeviceWithOrderNCHW() override;
bool RunOnDeviceWithOrderNHWC() override;