Clean up more clang-tidy supression (#92203)

1. remove unused NOLINTNEXTLINE(performance-move-const-arg)
2. add more std::move

Pull Request resolved: https://github.com/pytorch/pytorch/pull/92203
Approved by: https://github.com/Skylion007
This commit is contained in:
cyy 2023-01-17 05:43:05 +00:00 committed by PyTorch MergeBot
parent bbce4184be
commit 9b716a0682
22 changed files with 29 additions and 70 deletions

View File

@ -35,10 +35,8 @@ static void stateful_conv1d(benchmark::State& state) {
std::vector<std::vector<torch::jit::IValue>> inputs;
for (const auto i : c10::irange(10)) {
std::vector<torch::jit::IValue> input;
// NOLINTNEXTLINE(modernize-use-emplace)
input.push_back(torch::rand({batch_size, input_channels, width}));
inputs.push_back(input);
inputs.emplace_back(
{torch::jit::IValue(torch::rand({batch_size, input_channels, width}))});
}
auto m_cloned = m.clone();

View File

@ -18,16 +18,12 @@ const HIPHooksInterface& getHIPHooks() {
c10::call_once(once, [] {
hip_hooks = HIPHooksRegistry()->Create("HIPHooks", HIPHooksArgs{});
if (!hip_hooks) {
hip_hooks =
// NOLINTNEXTLINE(modernize-make-unique)
std::unique_ptr<HIPHooksInterface>(new HIPHooksInterface());
hip_hooks = std::make_unique<HIPHooksInterface>();
}
});
#else
if (hip_hooks == nullptr) {
hip_hooks =
// NOLINTNEXTLINE(modernize-make-unique)
std::unique_ptr<HIPHooksInterface>(new HIPHooksInterface());
hip_hooks = std::make_unique<HIPHooksInterface>();
}
#endif
return *hip_hooks;

View File

@ -17,9 +17,7 @@ const ORTHooksInterface& getORTHooks() {
c10::call_once(once, [] {
ort_hooks = ORTHooksRegistry()->Create("ORTHooks", {});
if (!ort_hooks) {
ort_hooks =
// NOLINTNEXTLINE(modernize-make-unique)
std::unique_ptr<ORTHooksInterface>(new ORTHooksInterface());
ort_hooks = std::make_unique<ORTHooksInterface>();
}
});
return *ort_hooks;

View File

@ -101,8 +101,7 @@ int64_t sample_poisson(double lambda, at::CPUGeneratorImpl* generator) {
invalpha = 1.1239 + 1.1328 / (b - 3.4);
vr = 0.9277 - 3.6224 / (b - 2);
// NOLINTNEXTLINE(modernize-use-bool-literals)
while (1) {
while (true) {
U = standard_uniform(generator) - 0.5;
V = standard_uniform(generator);
us = 0.5 - std::fabs(U);
@ -129,8 +128,7 @@ int64_t sample_poisson(double lambda, at::CPUGeneratorImpl* generator) {
enlam = std::exp(-lambda);
X = 0;
prod = 1.0;
// NOLINTNEXTLINE(modernize-use-bool-literals)
while (1) {
while (true) {
U = standard_uniform(generator);
prod *= U;
if (prod > enlam) {

View File

@ -162,8 +162,7 @@ int load_nnapi_model(
operand.scale = operands[i].scale;
operand.zeroPoint = operands[i].zero_point;
operand.dimensionCount = operands[i].dimension_count;
// NOLINTNEXTLINE(modernize-use-nullptr)
operand.dimensions = operands[i].dimension_count ? (const uint32_t*)next_pointer : NULL;
operand.dimensions = operands[i].dimension_count ? (const uint32_t*)next_pointer : nullptr;
next_pointer += 4 * operands[i].dimension_count;
CAFFE_ENFORCE(next_pointer <= end_of_buf);
@ -175,8 +174,7 @@ int load_nnapi_model(
for (const auto i : c10::irange(ser_model->value_count)) {
uint32_t len = values[i].source_length;
const uint8_t* stored_pointer = next_pointer;
// NOLINTNEXTLINE(modernize-use-nullptr)
const void* value_pointer = NULL;
const void* value_pointer = nullptr;
// NOLINTNEXTLINE(cppcoreguidelines-init-variables)
size_t value_length;
@ -207,8 +205,7 @@ int load_nnapi_model(
CAFFE_ENFORCE(false, "Unknown source type: ", values[i].source_type);
}
// NOLINTNEXTLINE(modernize-use-nullptr)
CAFFE_ENFORCE(value_pointer != NULL);
CAFFE_ENFORCE(value_pointer != nullptr);
next_pointer += value_physical_size(len);
CAFFE_ENFORCE(next_pointer <= end_of_buf);
@ -260,8 +257,7 @@ int load_nnapi_model(
// TODO: Maybe eliminate required_size and just rely on next_pointer for bounds checking.
CAFFE_ENFORCE(next_pointer <= end_of_buf);
CAFFE_ENFORCE(next_pointer == (const uint8_t*)serialized_model + required_size);
// NOLINTNEXTLINE(modernize-use-nullptr)
if (out_bytes_consumed != NULL) {
if (out_bytes_consumed != nullptr) {
*out_bytes_consumed = next_pointer - (const uint8_t*)serialized_model;
}

View File

@ -271,8 +271,8 @@ TORCH_PYTHON_API std::string processErrorMsg(std::string str);
// Abstract base class for exceptions which translate to specific Python types
struct PyTorchError : public std::exception {
// NOLINTNEXTLINE(modernize-pass-by-value)
PyTorchError(const std::string& msg_ = std::string()) : msg(msg_) {}
PyTorchError() = default;
PyTorchError(std::string msg_) : msg(std::move(msg_)) {}
virtual PyObject* python_type() = 0;
const char* what() const noexcept override {
return msg.c_str();

View File

@ -36,7 +36,6 @@ class DataLoaderBase {
DataLoaderBase(
DataLoaderOptions options,
std::unique_ptr<Dataset> main_thread_dataset = nullptr)
// NOLINTNEXTLINE(performance-move-const-arg)
: options_(std::move(options)),
main_thread_dataset_(std::move(main_thread_dataset)),
sequencer_(new_sequencer()) {}
@ -127,7 +126,6 @@ class DataLoaderBase {
Result(optional<Batch>&& b, size_t sqn)
: Sequenced(sqn), batch(std::move(b)) {}
Result(std::exception_ptr exception, size_t sqn)
// NOLINTNEXTLINE(performance-move-const-arg)
: Sequenced(sqn), exception(std::move(exception)) {}
optional<Batch> batch;
std::exception_ptr exception;

View File

@ -37,7 +37,6 @@ class StatefulDataLoader : public DataLoaderBase<
/// Constructs the `StatefulDataLoader` from a `dataset` and some `options`.
StatefulDataLoader(Dataset dataset, DataLoaderOptions options)
: super(
// NOLINTNEXTLINE(performance-move-const-arg)
std::move(options),
torch::make_unique<Dataset>(std::move(dataset))) {
for (const auto w : c10::irange(this->options_.workers)) {

View File

@ -40,7 +40,6 @@ class StatelessDataLoader : public DataLoaderBase<
Dataset dataset,
Sampler sampler,
DataLoaderOptions options)
// NOLINTNEXTLINE(performance-move-const-arg)
: super(std::move(options)), sampler_(std::move(sampler)) {
for (const auto w : c10::irange(this->options_.workers)) {
// Here we copy the dataset into the worker thread closure. Each worker

View File

@ -13,7 +13,6 @@ namespace data {
struct WorkerException : public std::exception {
/// Constructs a `WorkerException` from an `exception_ptr`.
explicit WorkerException(std::exception_ptr original)
// NOLINTNEXTLINE(performance-move-const-arg)
: original_exception(std::move(original)),
message("Caught exception in DataLoader worker thread.") {
try {

View File

@ -119,7 +119,6 @@ class SequentialImpl : public Cloneable<SequentialImpl> {
explicit SequentialImpl(std::initializer_list<NamedAnyModule> named_modules) {
modules_.reserve(named_modules.size());
for (const auto& named_module : named_modules) {
// NOLINTNEXTLINE(performance-move-const-arg)
push_back(
std::move(named_module.name()), std::move(named_module.module()));
}

View File

@ -64,7 +64,6 @@ using FoldFuncOptions = FoldOptions;
/// ```
struct TORCH_API UnfoldOptions {
UnfoldOptions(ExpandingArray<2> kernel_size)
// NOLINTNEXTLINE(performance-move-const-arg)
: kernel_size_(std::move(kernel_size)) {}
/// the size of the sliding blocks

View File

@ -63,7 +63,6 @@ const Tensor& resize_(
{
at::tracer::impl::NoTracerDispatchMode tracer_guard;
// NOLINTNEXTLINE(performance-move-const-arg)
self.resize_(size, std::move(optional_memory_format));
}
return self;
@ -80,7 +79,6 @@ const Tensor& resize_as_(
{
at::tracer::impl::NoTracerDispatchMode tracer_guard;
// NOLINTNEXTLINE(performance-move-const-arg)
self.resize_as_(the_template, std::move(optional_memory_format));
}
return self;

View File

@ -649,7 +649,6 @@ void GraphTask::mark_as_completed_and_run_post_processing() {
// Need to unlock before we call markCompleted to avoid holding locks
// when the callbacks are called.
lock.unlock();
// NOLINTNEXTLINE(performance-move-const-arg)
future_result_->markCompleted(std::move(vars));
} catch (std::exception& e) {
future_result_->setErrorIfNeeded(std::current_exception());
@ -743,7 +742,6 @@ void GraphTask::set_exception(
const std::shared_ptr<Node>& fn) {
set_exception_without_signal(fn);
if (!future_completed_.exchange(true)) {
// NOLINTNEXTLINE(performance-move-const-arg)
future_result_->setError(std::move(eptr));
}
}

View File

@ -64,12 +64,11 @@ struct NodeTask {
int getReentrantDepth() const;
NodeTask(
// NOLINTNEXTLINE(modernize-pass-by-value)
std::weak_ptr<GraphTask> base,
std::shared_ptr<Node> fn,
InputBuffer inputs,
bool isShutdownTask = false)
: base_(base),
: base_(std::move(base)),
fn_(std::move(fn)),
inputs_(std::move(inputs)),
isShutdownTask_(isShutdownTask) {}

View File

@ -19,17 +19,14 @@ namespace autograd {
// NOLINTNEXTLINE(cppcoreguidelines-pro-type-member-init)
Scatter::Scatter(
std::vector<at::Device> devices,
// NOLINTNEXTLINE(modernize-pass-by-value)
const c10::optional<std::vector<int64_t>>& chunk_sizes,
c10::optional<std::vector<int64_t>> chunk_sizes,
int64_t dim,
// NOLINTNEXTLINE(modernize-pass-by-value)
const c10::optional<std::vector<c10::optional<at::cuda::CUDAStream>>>&
streams,
c10::optional<std::vector<c10::optional<at::cuda::CUDAStream>>> streams,
bool unsqueeze_scalars)
: devices_(std::move(devices)),
chunk_sizes_(chunk_sizes),
chunk_sizes_(std::move(chunk_sizes)),
dim_(dim),
streams_(streams),
streams_(std::move(streams)),
unsqueeze_scalars_(unsqueeze_scalars) {}
Scatter::~Scatter() = default;
@ -49,12 +46,7 @@ variable_list Scatter::apply(variable_list&& inputs) {
return device.index();
});
auto tensors = torch::cuda::scatter(
// NOLINTNEXTLINE(performance-move-const-arg)
std::move(input),
device_indices,
chunk_sizes_,
dim_,
streams_);
std::move(input), device_indices, chunk_sizes_, dim_, streams_);
std::vector<Variable> variables;
variables.reserve(tensors.size());
@ -105,8 +97,10 @@ variable_list Gather::apply(variable_list&& inputs) {
if (compute_requires_grad(inputs)) {
// NOLINTNEXTLINE(cppcoreguidelines-init-variables)
std::vector<at::Device> source_devices;
source_devices.reserve(inputs.size());
// NOLINTNEXTLINE(cppcoreguidelines-init-variables)
std::vector<int64_t> input_sizes;
input_sizes.reserve(inputs.size());
for (auto& input : inputs) {
source_devices.push_back(input.device());
input_sizes.push_back(input.size(dim_));

View File

@ -17,10 +17,10 @@ namespace autograd {
struct TORCH_CUDA_CU_API Scatter : public Node {
explicit Scatter(
std::vector<at::Device> devices,
const c10::optional<std::vector<int64_t>>& chunk_sizes = c10::nullopt,
c10::optional<std::vector<int64_t>> chunk_sizes = c10::nullopt,
int64_t dim = 0,
const c10::optional<std::vector<c10::optional<at::cuda::CUDAStream>>>&
streams = c10::nullopt,
c10::optional<std::vector<c10::optional<at::cuda::CUDAStream>>> streams =
c10::nullopt,
bool unsqueeze_scalars = false);
~Scatter() override;

View File

@ -2180,7 +2180,6 @@ void initTensorImplConversion(PyObject* module) {
unsafe_reclaim_from_nonowning(static_cast<c10::TensorImpl*>(ptr));
TORCH_CHECK(p.defined(), "Can't wrap undefined tensor");
auto tensor = at::Tensor::wrap_tensor_impl(std::move(p));
// NOLINTNEXTLINE(performance-move-const-arg)
return py::cast(std::move(tensor));
});
// set on the module level to avoid mixing pybind and plain CPython extensions

View File

@ -17,7 +17,6 @@ RecvRpcBackward::RecvRpcBackward(
rpc::worker_id_t fromWorkerId,
rpc::DeviceMap deviceMap)
: autogradMetadata_(autogradMetadata),
// NOLINTNEXTLINE(performance-move-const-arg)
autogradContext_(std::move(autogradContext)),
fromWorkerId_(fromWorkerId),
deviceMap_(std::move(deviceMap)) {}

View File

@ -141,7 +141,6 @@ std::unique_ptr<RpcWithProfilingReq> RpcWithProfilingReq::fromMessage(
std::move(wrappedRpc),
wrappedMsgType,
std::move(wrappedMessage->tensors()),
// NOLINTNEXTLINE(performance-move-const-arg)
std::move(cfg),
profilerId);
}

View File

@ -93,7 +93,6 @@ c10::intrusive_ptr<Message> getMessageWithProfiling(
auto wrappedProfilingMsg = RpcWithProfilingReq(
msgType,
std::move(wrappedRpcMessage),
// NOLINTNEXTLINE(performance-move-const-arg)
std::move(profilerConfig),
globallyUniqueProfilingId);
@ -165,7 +164,6 @@ c10::intrusive_ptr<JitFuture> sendMessageWithAutograd(
auto msgWithProfiling = getMessageWithProfiling(
std::move(msg),
rpc::MessageType::RUN_WITH_PROFILING_REQ,
// NOLINTNEXTLINE(performance-move-const-arg)
std::move(profilerConfig));
return agent.send(dst, std::move(msgWithProfiling), rpcTimeoutSeconds);
}

View File

@ -40,20 +40,16 @@ class ThreadPool {
// it on a separate thread. This prevents tricky thread-pool-size-deadlocks
// caused by an undersized thread pool and closures that end up doing sync
// waits on the pool threads.
bool scheduled = false;
{
std::lock_guard<std::mutex> lock(mutex_);
std::unique_lock<std::mutex> lock(mutex_);
if (work_.size() < waiting_) {
work_.emplace_back(std::move(closure));
scheduled = true;
lock.unlock();
cv_.notify_one();
return;
}
}
if (scheduled) {
cv_.notify_one();
} else {
// NOLINTNEXTLINE(bugprone-use-after-move)
ScheduleOnThread(std::move(closure));
}
ScheduleOnThread(std::move(closure));
}
private: