pytorch/caffe2/queue/rebatching_queue_ops.h
Richard Barnes 1622546050 use irange for loops (#70248)
Summary:
Pull Request resolved: https://github.com/pytorch/pytorch/pull/70248

Modified loops in files under fbsource/fbcode/caffe2/ from the format
```
for(TYPE var=x0;var<x_max;x++)
```
to the format
```
for(const auto var: irange(xmax))
```

This was achieved by running r-barnes's loop upgrader script (D28874212) with some modification to exclude all files under /torch/jit and a number of reversions or unused variable suppression warnings added by hand.

Test Plan: Sandcastle

Reviewed By: malfet

Differential Revision: D32813863

fbshipit-source-id: 527244b4a2b220fdfe7f17dee3599603f492a2ca
2022-01-06 23:14:29 -08:00

86 lines
2.5 KiB
C++

#pragma once
#include "rebatching_queue.h"
#include "c10/util/irange.h"
namespace caffe2 {
using RebatchingQueuePtr = std::unique_ptr<RebatchingQueue>;
class CreateRebatchingQueueOp : public Operator<CPUContext> {
public:
CreateRebatchingQueueOp(const OperatorDef& operator_def, Workspace* ws)
: Operator(operator_def, ws) {}
bool RunOnDevice() override {
*OperatorBase::Output<RebatchingQueuePtr>(0) =
RebatchingQueuePtr(new RebatchingQueue(
OperatorBase::GetSingleArgument<int>("capacity", 1),
OperatorBase::GetSingleArgument<int>("num_blobs", 1)));
return true;
}
};
class EnqueueRebatchingQueueOp : public Operator<CPUContext> {
public:
EnqueueRebatchingQueueOp(const OperatorDef& operator_def, Workspace* ws)
: Operator(operator_def, ws),
enqueueBatch_(
OperatorBase::GetSingleArgument<bool>("enqueue_batch", false)) {}
bool RunOnDevice() override {
auto& queue = Inputs()[0]->template Get<RebatchingQueuePtr>();
CHECK(queue);
CAFFE_ENFORCE_EQ(InputSize(), queue->numBlobs() + 1);
std::vector<const Tensor*> inputTensors;
inputTensors.reserve(InputSize() - 1);
for (const auto i : c10::irange(1, InputSize())) {
inputTensors.push_back(&Input(i));
}
return enqueueBatch_ ? queue->enqueueMany(context_, inputTensors)
: queue->enqueueOne(context_, inputTensors);
}
private:
const bool enqueueBatch_;
};
class DequeueRebatchingQueueOp : public Operator<CPUContext> {
public:
DequeueRebatchingQueueOp(const OperatorDef& operator_def, Workspace* ws)
: Operator(operator_def, ws),
numElements_(OperatorBase::GetSingleArgument<int>("num_elements", 1)) {}
bool RunOnDevice() override {
auto& queue = Inputs()[0]->template Get<RebatchingQueuePtr>();
CHECK(queue);
std::vector<Tensor*> outputTensors;
outputTensors.reserve(OutputSize());
for (const auto i : c10::irange(OutputSize())) {
outputTensors.push_back(Output(i));
}
return queue->dequeue(context_, numElements_, outputTensors);
}
private:
int numElements_;
};
class CloseRebatchingQueueOp : public Operator<CPUContext> {
public:
CloseRebatchingQueueOp(const OperatorDef& operator_def, Workspace* ws)
: Operator(operator_def, ws) {}
bool RunOnDevice() override {
CAFFE_ENFORCE_EQ(InputSize(), 1);
auto& queue = Inputs()[0]->template Get<RebatchingQueuePtr>();
CAFFE_ENFORCE(queue);
queue->close();
return true;
}
};
} // namespace caffe2