mirror of
https://github.com/zebrajr/pytorch.git
synced 2025-12-06 12:20:52 +01:00
Clean up more clang-tidy supression (#92203)
1. remove unused NOLINTNEXTLINE(performance-move-const-arg) 2. add more std::move Pull Request resolved: https://github.com/pytorch/pytorch/pull/92203 Approved by: https://github.com/Skylion007
This commit is contained in:
parent
bbce4184be
commit
9b716a0682
|
|
@ -35,10 +35,8 @@ static void stateful_conv1d(benchmark::State& state) {
|
||||||
|
|
||||||
std::vector<std::vector<torch::jit::IValue>> inputs;
|
std::vector<std::vector<torch::jit::IValue>> inputs;
|
||||||
for (const auto i : c10::irange(10)) {
|
for (const auto i : c10::irange(10)) {
|
||||||
std::vector<torch::jit::IValue> input;
|
inputs.emplace_back(
|
||||||
// NOLINTNEXTLINE(modernize-use-emplace)
|
{torch::jit::IValue(torch::rand({batch_size, input_channels, width}))});
|
||||||
input.push_back(torch::rand({batch_size, input_channels, width}));
|
|
||||||
inputs.push_back(input);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
auto m_cloned = m.clone();
|
auto m_cloned = m.clone();
|
||||||
|
|
|
||||||
|
|
@ -18,16 +18,12 @@ const HIPHooksInterface& getHIPHooks() {
|
||||||
c10::call_once(once, [] {
|
c10::call_once(once, [] {
|
||||||
hip_hooks = HIPHooksRegistry()->Create("HIPHooks", HIPHooksArgs{});
|
hip_hooks = HIPHooksRegistry()->Create("HIPHooks", HIPHooksArgs{});
|
||||||
if (!hip_hooks) {
|
if (!hip_hooks) {
|
||||||
hip_hooks =
|
hip_hooks = std::make_unique<HIPHooksInterface>();
|
||||||
// NOLINTNEXTLINE(modernize-make-unique)
|
|
||||||
std::unique_ptr<HIPHooksInterface>(new HIPHooksInterface());
|
|
||||||
}
|
}
|
||||||
});
|
});
|
||||||
#else
|
#else
|
||||||
if (hip_hooks == nullptr) {
|
if (hip_hooks == nullptr) {
|
||||||
hip_hooks =
|
hip_hooks = std::make_unique<HIPHooksInterface>();
|
||||||
// NOLINTNEXTLINE(modernize-make-unique)
|
|
||||||
std::unique_ptr<HIPHooksInterface>(new HIPHooksInterface());
|
|
||||||
}
|
}
|
||||||
#endif
|
#endif
|
||||||
return *hip_hooks;
|
return *hip_hooks;
|
||||||
|
|
|
||||||
|
|
@ -17,9 +17,7 @@ const ORTHooksInterface& getORTHooks() {
|
||||||
c10::call_once(once, [] {
|
c10::call_once(once, [] {
|
||||||
ort_hooks = ORTHooksRegistry()->Create("ORTHooks", {});
|
ort_hooks = ORTHooksRegistry()->Create("ORTHooks", {});
|
||||||
if (!ort_hooks) {
|
if (!ort_hooks) {
|
||||||
ort_hooks =
|
ort_hooks = std::make_unique<ORTHooksInterface>();
|
||||||
// NOLINTNEXTLINE(modernize-make-unique)
|
|
||||||
std::unique_ptr<ORTHooksInterface>(new ORTHooksInterface());
|
|
||||||
}
|
}
|
||||||
});
|
});
|
||||||
return *ort_hooks;
|
return *ort_hooks;
|
||||||
|
|
|
||||||
|
|
@ -101,8 +101,7 @@ int64_t sample_poisson(double lambda, at::CPUGeneratorImpl* generator) {
|
||||||
invalpha = 1.1239 + 1.1328 / (b - 3.4);
|
invalpha = 1.1239 + 1.1328 / (b - 3.4);
|
||||||
vr = 0.9277 - 3.6224 / (b - 2);
|
vr = 0.9277 - 3.6224 / (b - 2);
|
||||||
|
|
||||||
// NOLINTNEXTLINE(modernize-use-bool-literals)
|
while (true) {
|
||||||
while (1) {
|
|
||||||
U = standard_uniform(generator) - 0.5;
|
U = standard_uniform(generator) - 0.5;
|
||||||
V = standard_uniform(generator);
|
V = standard_uniform(generator);
|
||||||
us = 0.5 - std::fabs(U);
|
us = 0.5 - std::fabs(U);
|
||||||
|
|
@ -129,8 +128,7 @@ int64_t sample_poisson(double lambda, at::CPUGeneratorImpl* generator) {
|
||||||
enlam = std::exp(-lambda);
|
enlam = std::exp(-lambda);
|
||||||
X = 0;
|
X = 0;
|
||||||
prod = 1.0;
|
prod = 1.0;
|
||||||
// NOLINTNEXTLINE(modernize-use-bool-literals)
|
while (true) {
|
||||||
while (1) {
|
|
||||||
U = standard_uniform(generator);
|
U = standard_uniform(generator);
|
||||||
prod *= U;
|
prod *= U;
|
||||||
if (prod > enlam) {
|
if (prod > enlam) {
|
||||||
|
|
|
||||||
|
|
@ -162,8 +162,7 @@ int load_nnapi_model(
|
||||||
operand.scale = operands[i].scale;
|
operand.scale = operands[i].scale;
|
||||||
operand.zeroPoint = operands[i].zero_point;
|
operand.zeroPoint = operands[i].zero_point;
|
||||||
operand.dimensionCount = operands[i].dimension_count;
|
operand.dimensionCount = operands[i].dimension_count;
|
||||||
// NOLINTNEXTLINE(modernize-use-nullptr)
|
operand.dimensions = operands[i].dimension_count ? (const uint32_t*)next_pointer : nullptr;
|
||||||
operand.dimensions = operands[i].dimension_count ? (const uint32_t*)next_pointer : NULL;
|
|
||||||
|
|
||||||
next_pointer += 4 * operands[i].dimension_count;
|
next_pointer += 4 * operands[i].dimension_count;
|
||||||
CAFFE_ENFORCE(next_pointer <= end_of_buf);
|
CAFFE_ENFORCE(next_pointer <= end_of_buf);
|
||||||
|
|
@ -175,8 +174,7 @@ int load_nnapi_model(
|
||||||
for (const auto i : c10::irange(ser_model->value_count)) {
|
for (const auto i : c10::irange(ser_model->value_count)) {
|
||||||
uint32_t len = values[i].source_length;
|
uint32_t len = values[i].source_length;
|
||||||
const uint8_t* stored_pointer = next_pointer;
|
const uint8_t* stored_pointer = next_pointer;
|
||||||
// NOLINTNEXTLINE(modernize-use-nullptr)
|
const void* value_pointer = nullptr;
|
||||||
const void* value_pointer = NULL;
|
|
||||||
// NOLINTNEXTLINE(cppcoreguidelines-init-variables)
|
// NOLINTNEXTLINE(cppcoreguidelines-init-variables)
|
||||||
size_t value_length;
|
size_t value_length;
|
||||||
|
|
||||||
|
|
@ -207,8 +205,7 @@ int load_nnapi_model(
|
||||||
CAFFE_ENFORCE(false, "Unknown source type: ", values[i].source_type);
|
CAFFE_ENFORCE(false, "Unknown source type: ", values[i].source_type);
|
||||||
}
|
}
|
||||||
|
|
||||||
// NOLINTNEXTLINE(modernize-use-nullptr)
|
CAFFE_ENFORCE(value_pointer != nullptr);
|
||||||
CAFFE_ENFORCE(value_pointer != NULL);
|
|
||||||
|
|
||||||
next_pointer += value_physical_size(len);
|
next_pointer += value_physical_size(len);
|
||||||
CAFFE_ENFORCE(next_pointer <= end_of_buf);
|
CAFFE_ENFORCE(next_pointer <= end_of_buf);
|
||||||
|
|
@ -260,8 +257,7 @@ int load_nnapi_model(
|
||||||
// TODO: Maybe eliminate required_size and just rely on next_pointer for bounds checking.
|
// TODO: Maybe eliminate required_size and just rely on next_pointer for bounds checking.
|
||||||
CAFFE_ENFORCE(next_pointer <= end_of_buf);
|
CAFFE_ENFORCE(next_pointer <= end_of_buf);
|
||||||
CAFFE_ENFORCE(next_pointer == (const uint8_t*)serialized_model + required_size);
|
CAFFE_ENFORCE(next_pointer == (const uint8_t*)serialized_model + required_size);
|
||||||
// NOLINTNEXTLINE(modernize-use-nullptr)
|
if (out_bytes_consumed != nullptr) {
|
||||||
if (out_bytes_consumed != NULL) {
|
|
||||||
*out_bytes_consumed = next_pointer - (const uint8_t*)serialized_model;
|
*out_bytes_consumed = next_pointer - (const uint8_t*)serialized_model;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
||||||
|
|
@ -271,8 +271,8 @@ TORCH_PYTHON_API std::string processErrorMsg(std::string str);
|
||||||
|
|
||||||
// Abstract base class for exceptions which translate to specific Python types
|
// Abstract base class for exceptions which translate to specific Python types
|
||||||
struct PyTorchError : public std::exception {
|
struct PyTorchError : public std::exception {
|
||||||
// NOLINTNEXTLINE(modernize-pass-by-value)
|
PyTorchError() = default;
|
||||||
PyTorchError(const std::string& msg_ = std::string()) : msg(msg_) {}
|
PyTorchError(std::string msg_) : msg(std::move(msg_)) {}
|
||||||
virtual PyObject* python_type() = 0;
|
virtual PyObject* python_type() = 0;
|
||||||
const char* what() const noexcept override {
|
const char* what() const noexcept override {
|
||||||
return msg.c_str();
|
return msg.c_str();
|
||||||
|
|
|
||||||
|
|
@ -36,7 +36,6 @@ class DataLoaderBase {
|
||||||
DataLoaderBase(
|
DataLoaderBase(
|
||||||
DataLoaderOptions options,
|
DataLoaderOptions options,
|
||||||
std::unique_ptr<Dataset> main_thread_dataset = nullptr)
|
std::unique_ptr<Dataset> main_thread_dataset = nullptr)
|
||||||
// NOLINTNEXTLINE(performance-move-const-arg)
|
|
||||||
: options_(std::move(options)),
|
: options_(std::move(options)),
|
||||||
main_thread_dataset_(std::move(main_thread_dataset)),
|
main_thread_dataset_(std::move(main_thread_dataset)),
|
||||||
sequencer_(new_sequencer()) {}
|
sequencer_(new_sequencer()) {}
|
||||||
|
|
@ -127,7 +126,6 @@ class DataLoaderBase {
|
||||||
Result(optional<Batch>&& b, size_t sqn)
|
Result(optional<Batch>&& b, size_t sqn)
|
||||||
: Sequenced(sqn), batch(std::move(b)) {}
|
: Sequenced(sqn), batch(std::move(b)) {}
|
||||||
Result(std::exception_ptr exception, size_t sqn)
|
Result(std::exception_ptr exception, size_t sqn)
|
||||||
// NOLINTNEXTLINE(performance-move-const-arg)
|
|
||||||
: Sequenced(sqn), exception(std::move(exception)) {}
|
: Sequenced(sqn), exception(std::move(exception)) {}
|
||||||
optional<Batch> batch;
|
optional<Batch> batch;
|
||||||
std::exception_ptr exception;
|
std::exception_ptr exception;
|
||||||
|
|
|
||||||
|
|
@ -37,7 +37,6 @@ class StatefulDataLoader : public DataLoaderBase<
|
||||||
/// Constructs the `StatefulDataLoader` from a `dataset` and some `options`.
|
/// Constructs the `StatefulDataLoader` from a `dataset` and some `options`.
|
||||||
StatefulDataLoader(Dataset dataset, DataLoaderOptions options)
|
StatefulDataLoader(Dataset dataset, DataLoaderOptions options)
|
||||||
: super(
|
: super(
|
||||||
// NOLINTNEXTLINE(performance-move-const-arg)
|
|
||||||
std::move(options),
|
std::move(options),
|
||||||
torch::make_unique<Dataset>(std::move(dataset))) {
|
torch::make_unique<Dataset>(std::move(dataset))) {
|
||||||
for (const auto w : c10::irange(this->options_.workers)) {
|
for (const auto w : c10::irange(this->options_.workers)) {
|
||||||
|
|
|
||||||
|
|
@ -40,7 +40,6 @@ class StatelessDataLoader : public DataLoaderBase<
|
||||||
Dataset dataset,
|
Dataset dataset,
|
||||||
Sampler sampler,
|
Sampler sampler,
|
||||||
DataLoaderOptions options)
|
DataLoaderOptions options)
|
||||||
// NOLINTNEXTLINE(performance-move-const-arg)
|
|
||||||
: super(std::move(options)), sampler_(std::move(sampler)) {
|
: super(std::move(options)), sampler_(std::move(sampler)) {
|
||||||
for (const auto w : c10::irange(this->options_.workers)) {
|
for (const auto w : c10::irange(this->options_.workers)) {
|
||||||
// Here we copy the dataset into the worker thread closure. Each worker
|
// Here we copy the dataset into the worker thread closure. Each worker
|
||||||
|
|
|
||||||
|
|
@ -13,7 +13,6 @@ namespace data {
|
||||||
struct WorkerException : public std::exception {
|
struct WorkerException : public std::exception {
|
||||||
/// Constructs a `WorkerException` from an `exception_ptr`.
|
/// Constructs a `WorkerException` from an `exception_ptr`.
|
||||||
explicit WorkerException(std::exception_ptr original)
|
explicit WorkerException(std::exception_ptr original)
|
||||||
// NOLINTNEXTLINE(performance-move-const-arg)
|
|
||||||
: original_exception(std::move(original)),
|
: original_exception(std::move(original)),
|
||||||
message("Caught exception in DataLoader worker thread.") {
|
message("Caught exception in DataLoader worker thread.") {
|
||||||
try {
|
try {
|
||||||
|
|
|
||||||
|
|
@ -119,7 +119,6 @@ class SequentialImpl : public Cloneable<SequentialImpl> {
|
||||||
explicit SequentialImpl(std::initializer_list<NamedAnyModule> named_modules) {
|
explicit SequentialImpl(std::initializer_list<NamedAnyModule> named_modules) {
|
||||||
modules_.reserve(named_modules.size());
|
modules_.reserve(named_modules.size());
|
||||||
for (const auto& named_module : named_modules) {
|
for (const auto& named_module : named_modules) {
|
||||||
// NOLINTNEXTLINE(performance-move-const-arg)
|
|
||||||
push_back(
|
push_back(
|
||||||
std::move(named_module.name()), std::move(named_module.module()));
|
std::move(named_module.name()), std::move(named_module.module()));
|
||||||
}
|
}
|
||||||
|
|
|
||||||
|
|
@ -64,7 +64,6 @@ using FoldFuncOptions = FoldOptions;
|
||||||
/// ```
|
/// ```
|
||||||
struct TORCH_API UnfoldOptions {
|
struct TORCH_API UnfoldOptions {
|
||||||
UnfoldOptions(ExpandingArray<2> kernel_size)
|
UnfoldOptions(ExpandingArray<2> kernel_size)
|
||||||
// NOLINTNEXTLINE(performance-move-const-arg)
|
|
||||||
: kernel_size_(std::move(kernel_size)) {}
|
: kernel_size_(std::move(kernel_size)) {}
|
||||||
|
|
||||||
/// the size of the sliding blocks
|
/// the size of the sliding blocks
|
||||||
|
|
|
||||||
|
|
@ -63,7 +63,6 @@ const Tensor& resize_(
|
||||||
|
|
||||||
{
|
{
|
||||||
at::tracer::impl::NoTracerDispatchMode tracer_guard;
|
at::tracer::impl::NoTracerDispatchMode tracer_guard;
|
||||||
// NOLINTNEXTLINE(performance-move-const-arg)
|
|
||||||
self.resize_(size, std::move(optional_memory_format));
|
self.resize_(size, std::move(optional_memory_format));
|
||||||
}
|
}
|
||||||
return self;
|
return self;
|
||||||
|
|
@ -80,7 +79,6 @@ const Tensor& resize_as_(
|
||||||
|
|
||||||
{
|
{
|
||||||
at::tracer::impl::NoTracerDispatchMode tracer_guard;
|
at::tracer::impl::NoTracerDispatchMode tracer_guard;
|
||||||
// NOLINTNEXTLINE(performance-move-const-arg)
|
|
||||||
self.resize_as_(the_template, std::move(optional_memory_format));
|
self.resize_as_(the_template, std::move(optional_memory_format));
|
||||||
}
|
}
|
||||||
return self;
|
return self;
|
||||||
|
|
|
||||||
|
|
@ -649,7 +649,6 @@ void GraphTask::mark_as_completed_and_run_post_processing() {
|
||||||
// Need to unlock before we call markCompleted to avoid holding locks
|
// Need to unlock before we call markCompleted to avoid holding locks
|
||||||
// when the callbacks are called.
|
// when the callbacks are called.
|
||||||
lock.unlock();
|
lock.unlock();
|
||||||
// NOLINTNEXTLINE(performance-move-const-arg)
|
|
||||||
future_result_->markCompleted(std::move(vars));
|
future_result_->markCompleted(std::move(vars));
|
||||||
} catch (std::exception& e) {
|
} catch (std::exception& e) {
|
||||||
future_result_->setErrorIfNeeded(std::current_exception());
|
future_result_->setErrorIfNeeded(std::current_exception());
|
||||||
|
|
@ -743,7 +742,6 @@ void GraphTask::set_exception(
|
||||||
const std::shared_ptr<Node>& fn) {
|
const std::shared_ptr<Node>& fn) {
|
||||||
set_exception_without_signal(fn);
|
set_exception_without_signal(fn);
|
||||||
if (!future_completed_.exchange(true)) {
|
if (!future_completed_.exchange(true)) {
|
||||||
// NOLINTNEXTLINE(performance-move-const-arg)
|
|
||||||
future_result_->setError(std::move(eptr));
|
future_result_->setError(std::move(eptr));
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
||||||
|
|
@ -64,12 +64,11 @@ struct NodeTask {
|
||||||
int getReentrantDepth() const;
|
int getReentrantDepth() const;
|
||||||
|
|
||||||
NodeTask(
|
NodeTask(
|
||||||
// NOLINTNEXTLINE(modernize-pass-by-value)
|
|
||||||
std::weak_ptr<GraphTask> base,
|
std::weak_ptr<GraphTask> base,
|
||||||
std::shared_ptr<Node> fn,
|
std::shared_ptr<Node> fn,
|
||||||
InputBuffer inputs,
|
InputBuffer inputs,
|
||||||
bool isShutdownTask = false)
|
bool isShutdownTask = false)
|
||||||
: base_(base),
|
: base_(std::move(base)),
|
||||||
fn_(std::move(fn)),
|
fn_(std::move(fn)),
|
||||||
inputs_(std::move(inputs)),
|
inputs_(std::move(inputs)),
|
||||||
isShutdownTask_(isShutdownTask) {}
|
isShutdownTask_(isShutdownTask) {}
|
||||||
|
|
|
||||||
|
|
@ -19,17 +19,14 @@ namespace autograd {
|
||||||
// NOLINTNEXTLINE(cppcoreguidelines-pro-type-member-init)
|
// NOLINTNEXTLINE(cppcoreguidelines-pro-type-member-init)
|
||||||
Scatter::Scatter(
|
Scatter::Scatter(
|
||||||
std::vector<at::Device> devices,
|
std::vector<at::Device> devices,
|
||||||
// NOLINTNEXTLINE(modernize-pass-by-value)
|
c10::optional<std::vector<int64_t>> chunk_sizes,
|
||||||
const c10::optional<std::vector<int64_t>>& chunk_sizes,
|
|
||||||
int64_t dim,
|
int64_t dim,
|
||||||
// NOLINTNEXTLINE(modernize-pass-by-value)
|
c10::optional<std::vector<c10::optional<at::cuda::CUDAStream>>> streams,
|
||||||
const c10::optional<std::vector<c10::optional<at::cuda::CUDAStream>>>&
|
|
||||||
streams,
|
|
||||||
bool unsqueeze_scalars)
|
bool unsqueeze_scalars)
|
||||||
: devices_(std::move(devices)),
|
: devices_(std::move(devices)),
|
||||||
chunk_sizes_(chunk_sizes),
|
chunk_sizes_(std::move(chunk_sizes)),
|
||||||
dim_(dim),
|
dim_(dim),
|
||||||
streams_(streams),
|
streams_(std::move(streams)),
|
||||||
unsqueeze_scalars_(unsqueeze_scalars) {}
|
unsqueeze_scalars_(unsqueeze_scalars) {}
|
||||||
|
|
||||||
Scatter::~Scatter() = default;
|
Scatter::~Scatter() = default;
|
||||||
|
|
@ -49,12 +46,7 @@ variable_list Scatter::apply(variable_list&& inputs) {
|
||||||
return device.index();
|
return device.index();
|
||||||
});
|
});
|
||||||
auto tensors = torch::cuda::scatter(
|
auto tensors = torch::cuda::scatter(
|
||||||
// NOLINTNEXTLINE(performance-move-const-arg)
|
std::move(input), device_indices, chunk_sizes_, dim_, streams_);
|
||||||
std::move(input),
|
|
||||||
device_indices,
|
|
||||||
chunk_sizes_,
|
|
||||||
dim_,
|
|
||||||
streams_);
|
|
||||||
|
|
||||||
std::vector<Variable> variables;
|
std::vector<Variable> variables;
|
||||||
variables.reserve(tensors.size());
|
variables.reserve(tensors.size());
|
||||||
|
|
@ -105,8 +97,10 @@ variable_list Gather::apply(variable_list&& inputs) {
|
||||||
if (compute_requires_grad(inputs)) {
|
if (compute_requires_grad(inputs)) {
|
||||||
// NOLINTNEXTLINE(cppcoreguidelines-init-variables)
|
// NOLINTNEXTLINE(cppcoreguidelines-init-variables)
|
||||||
std::vector<at::Device> source_devices;
|
std::vector<at::Device> source_devices;
|
||||||
|
source_devices.reserve(inputs.size());
|
||||||
// NOLINTNEXTLINE(cppcoreguidelines-init-variables)
|
// NOLINTNEXTLINE(cppcoreguidelines-init-variables)
|
||||||
std::vector<int64_t> input_sizes;
|
std::vector<int64_t> input_sizes;
|
||||||
|
input_sizes.reserve(inputs.size());
|
||||||
for (auto& input : inputs) {
|
for (auto& input : inputs) {
|
||||||
source_devices.push_back(input.device());
|
source_devices.push_back(input.device());
|
||||||
input_sizes.push_back(input.size(dim_));
|
input_sizes.push_back(input.size(dim_));
|
||||||
|
|
|
||||||
|
|
@ -17,10 +17,10 @@ namespace autograd {
|
||||||
struct TORCH_CUDA_CU_API Scatter : public Node {
|
struct TORCH_CUDA_CU_API Scatter : public Node {
|
||||||
explicit Scatter(
|
explicit Scatter(
|
||||||
std::vector<at::Device> devices,
|
std::vector<at::Device> devices,
|
||||||
const c10::optional<std::vector<int64_t>>& chunk_sizes = c10::nullopt,
|
c10::optional<std::vector<int64_t>> chunk_sizes = c10::nullopt,
|
||||||
int64_t dim = 0,
|
int64_t dim = 0,
|
||||||
const c10::optional<std::vector<c10::optional<at::cuda::CUDAStream>>>&
|
c10::optional<std::vector<c10::optional<at::cuda::CUDAStream>>> streams =
|
||||||
streams = c10::nullopt,
|
c10::nullopt,
|
||||||
bool unsqueeze_scalars = false);
|
bool unsqueeze_scalars = false);
|
||||||
~Scatter() override;
|
~Scatter() override;
|
||||||
|
|
||||||
|
|
|
||||||
|
|
@ -2180,7 +2180,6 @@ void initTensorImplConversion(PyObject* module) {
|
||||||
unsafe_reclaim_from_nonowning(static_cast<c10::TensorImpl*>(ptr));
|
unsafe_reclaim_from_nonowning(static_cast<c10::TensorImpl*>(ptr));
|
||||||
TORCH_CHECK(p.defined(), "Can't wrap undefined tensor");
|
TORCH_CHECK(p.defined(), "Can't wrap undefined tensor");
|
||||||
auto tensor = at::Tensor::wrap_tensor_impl(std::move(p));
|
auto tensor = at::Tensor::wrap_tensor_impl(std::move(p));
|
||||||
// NOLINTNEXTLINE(performance-move-const-arg)
|
|
||||||
return py::cast(std::move(tensor));
|
return py::cast(std::move(tensor));
|
||||||
});
|
});
|
||||||
// set on the module level to avoid mixing pybind and plain CPython extensions
|
// set on the module level to avoid mixing pybind and plain CPython extensions
|
||||||
|
|
|
||||||
|
|
@ -17,7 +17,6 @@ RecvRpcBackward::RecvRpcBackward(
|
||||||
rpc::worker_id_t fromWorkerId,
|
rpc::worker_id_t fromWorkerId,
|
||||||
rpc::DeviceMap deviceMap)
|
rpc::DeviceMap deviceMap)
|
||||||
: autogradMetadata_(autogradMetadata),
|
: autogradMetadata_(autogradMetadata),
|
||||||
// NOLINTNEXTLINE(performance-move-const-arg)
|
|
||||||
autogradContext_(std::move(autogradContext)),
|
autogradContext_(std::move(autogradContext)),
|
||||||
fromWorkerId_(fromWorkerId),
|
fromWorkerId_(fromWorkerId),
|
||||||
deviceMap_(std::move(deviceMap)) {}
|
deviceMap_(std::move(deviceMap)) {}
|
||||||
|
|
|
||||||
|
|
@ -141,7 +141,6 @@ std::unique_ptr<RpcWithProfilingReq> RpcWithProfilingReq::fromMessage(
|
||||||
std::move(wrappedRpc),
|
std::move(wrappedRpc),
|
||||||
wrappedMsgType,
|
wrappedMsgType,
|
||||||
std::move(wrappedMessage->tensors()),
|
std::move(wrappedMessage->tensors()),
|
||||||
// NOLINTNEXTLINE(performance-move-const-arg)
|
|
||||||
std::move(cfg),
|
std::move(cfg),
|
||||||
profilerId);
|
profilerId);
|
||||||
}
|
}
|
||||||
|
|
|
||||||
|
|
@ -93,7 +93,6 @@ c10::intrusive_ptr<Message> getMessageWithProfiling(
|
||||||
auto wrappedProfilingMsg = RpcWithProfilingReq(
|
auto wrappedProfilingMsg = RpcWithProfilingReq(
|
||||||
msgType,
|
msgType,
|
||||||
std::move(wrappedRpcMessage),
|
std::move(wrappedRpcMessage),
|
||||||
// NOLINTNEXTLINE(performance-move-const-arg)
|
|
||||||
std::move(profilerConfig),
|
std::move(profilerConfig),
|
||||||
globallyUniqueProfilingId);
|
globallyUniqueProfilingId);
|
||||||
|
|
||||||
|
|
@ -165,7 +164,6 @@ c10::intrusive_ptr<JitFuture> sendMessageWithAutograd(
|
||||||
auto msgWithProfiling = getMessageWithProfiling(
|
auto msgWithProfiling = getMessageWithProfiling(
|
||||||
std::move(msg),
|
std::move(msg),
|
||||||
rpc::MessageType::RUN_WITH_PROFILING_REQ,
|
rpc::MessageType::RUN_WITH_PROFILING_REQ,
|
||||||
// NOLINTNEXTLINE(performance-move-const-arg)
|
|
||||||
std::move(profilerConfig));
|
std::move(profilerConfig));
|
||||||
return agent.send(dst, std::move(msgWithProfiling), rpcTimeoutSeconds);
|
return agent.send(dst, std::move(msgWithProfiling), rpcTimeoutSeconds);
|
||||||
}
|
}
|
||||||
|
|
|
||||||
|
|
@ -40,20 +40,16 @@ class ThreadPool {
|
||||||
// it on a separate thread. This prevents tricky thread-pool-size-deadlocks
|
// it on a separate thread. This prevents tricky thread-pool-size-deadlocks
|
||||||
// caused by an undersized thread pool and closures that end up doing sync
|
// caused by an undersized thread pool and closures that end up doing sync
|
||||||
// waits on the pool threads.
|
// waits on the pool threads.
|
||||||
bool scheduled = false;
|
|
||||||
{
|
{
|
||||||
std::lock_guard<std::mutex> lock(mutex_);
|
std::unique_lock<std::mutex> lock(mutex_);
|
||||||
if (work_.size() < waiting_) {
|
if (work_.size() < waiting_) {
|
||||||
work_.emplace_back(std::move(closure));
|
work_.emplace_back(std::move(closure));
|
||||||
scheduled = true;
|
lock.unlock();
|
||||||
|
cv_.notify_one();
|
||||||
|
return;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
if (scheduled) {
|
ScheduleOnThread(std::move(closure));
|
||||||
cv_.notify_one();
|
|
||||||
} else {
|
|
||||||
// NOLINTNEXTLINE(bugprone-use-after-move)
|
|
||||||
ScheduleOnThread(std::move(closure));
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
private:
|
private:
|
||||||
|
|
|
||||||
Loading…
Reference in New Issue
Block a user