irange-ify 7 (#62117)

Summary: Pull Request resolved: https://github.com/pytorch/pytorch/pull/62117

Test Plan: Sandcastle

Reviewed By: ngimel

Differential Revision: D29879640

fbshipit-source-id: 189578a57301747a3421742e145bbcdf2ad75c49
This commit is contained in:
Richard Barnes 2021-07-28 13:27:37 -07:00 committed by Facebook GitHub Bot
parent 59bb4f2dab
commit b5867a1b34
6 changed files with 17 additions and 12 deletions

View File

@ -2,6 +2,7 @@
#include <ATen/core/ivalue.h>
#include <c10/util/ScopeExit.h>
#include <c10/util/irange.h>
#include <caffe2/serialize/inline_container.h>
#include <torch/csrc/jit/api/compilation_unit.h>
#include <torch/csrc/jit/mobile/interpreter.h>
@ -169,7 +170,7 @@ c10::intrusive_ptr<c10::ivalue::Object> objLoaderMobile(
size_t ndict = dict.size();
auto obj = c10::ivalue::Object::create(type, ndict);
auto it = dict.begin();
for (size_t i = 0; i < ndict; ++i) {
for (const auto i : c10::irange(ndict)) {
std::stringstream name;
name << it->key();
cls->addOrCheckAttribute(name.str(), it->key().type());
@ -323,7 +324,7 @@ void BytecodeDeserializer::parseMethods(
}
// Process all methods in this mobile module.
for (size_t i = method_i_start; i < vals.size(); ++i) {
for (const auto i : c10::irange(method_i_start, vals.size())) {
const auto& element = vals[i];
const auto& m_tuple = element.toTuple()->elements();
const std::string& function_name = m_tuple[0].toStringRef();
@ -380,8 +381,8 @@ void BytecodeDeserializer::parseMethods(
"The numbers of instructions and debug handles strings do not match.");
}
for (size_t i = 0; i < ins_list.size(); ++i) {
auto ins_item = ins_list[i].toTuple()->elements();
for (const auto j : c10::irange(ins_list.size())) {
auto ins_item = ins_list[j].toTuple()->elements();
TORCH_CHECK(
ins_item.size() == 3,
"There should be three parts in an instruction. The function name is ",
@ -390,7 +391,7 @@ void BytecodeDeserializer::parseMethods(
int X = ins_item[1].toInt();
int N = ins_item[2].toInt();
if (has_debug_handles) {
int64_t debug_handle = debug_handles_list[i].toInt();
int64_t debug_handle = debug_handles_list[j].toInt();
function->append_instruction(op_code, X, N, debug_handle);
} else {
function->append_instruction(op_code, X, N);

View File

@ -1,6 +1,7 @@
#include <torch/csrc/jit/mobile/import_data.h>
#include <ATen/core/ivalue.h>
#include <c10/util/irange.h>
#include <caffe2/serialize/inline_container.h>
#include <torch/csrc/jit/api/compilation_unit.h>
#include <torch/csrc/jit/mobile/observer.h>
@ -127,7 +128,7 @@ c10::IValue BytecodeDeserializer::readArchive(
size_t ndict = dict.size();
auto obj = c10::ivalue::Object::create(type, ndict);
auto it = dict.begin();
for (size_t i = 0; i < ndict; ++i) {
for (const auto i : c10::irange(ndict)) {
std::stringstream name;
name << it->key();
cls->addOrCheckAttribute(name.str(), it->key().type());

View File

@ -8,6 +8,7 @@
#include <torch/csrc/jit/runtime/vararg_functions.h>
#include <ATen/record_function.h>
#include <c10/util/irange.h>
#include <torch/csrc/jit/mobile/observer.h>
#include <torch/csrc/jit/backends/backend_exception.h>
@ -169,7 +170,7 @@ bool InterpreterState::run(Stack& stack) {
++pc;
} else {
size_t n_loop_carried = inst.N - 2;
for (size_t i = 0; i < n_loop_carried; ++i) {
for (const auto i : c10::irange(n_loop_carried)) {
frame[i] = std::move(frame[i + 3]);
}
drop(stack, 3); // iteration_count, max_iter, cond

View File

@ -1,4 +1,5 @@
#include <ATen/core/ivalue.h>
#include <c10/util/irange.h>
#include <caffe2/serialize/file_adapter.h>
#include <caffe2/serialize/inline_container.h>
#include <torch/csrc/jit/api/compilation_unit.h> // removed after using simple type_resolver/obj_loader
@ -148,8 +149,7 @@ std::unordered_map<std::string, OperatorInfo> _get_model_ops_and_info(
return result;
}
// loop over all the functions in the bytecode
// NOLINTNEXTLINE(clang-diagnostic-sign-compare)
for (int i = 1; i < bytecode_ivalues.size(); i++) {
for (const auto i : c10::irange(1, bytecode_ivalues.size())) {
// descend to the operators list
auto method_tuple = bytecode_ivalues.at(i).toTuple()->elements();
auto operators_tuple = method_tuple.at(1).toTuple()->elements()[1];

View File

@ -7,6 +7,7 @@
#include <exception>
#include <ATen/record_function.h>
#include <c10/util/irange.h>
namespace torch {
namespace jit {
@ -76,7 +77,7 @@ void slot_named_params_recurse(
const std::string& parent_name) {
auto slots = obj->slots();
size_t nslots = slots.size();
for (size_t i = 0; i < nslots; ++i) {
for (const auto i : c10::irange(nslots)) {
auto slot = slots[i];
std::string name =
parent_name.size() == 0 ? parent_name : parent_name + ".";

View File

@ -3,6 +3,7 @@
#include <ATen/Functions.h>
#include <ATen/core/functional.h>
#include <c10/core/CPUAllocator.h>
#include <c10/util/irange.h>
#include <torch/csrc/jit/mobile/nnc/registry.h>
@ -197,7 +198,7 @@ c10::impl::GenericList Function::run(
input_specs_.size(),
" actual: ",
inputs.size());
for (size_t i = 0; i < inputs.size(); ++i) {
for (const auto i : c10::irange(inputs.size())) {
const c10::IValue& input = inputs[i];
const auto& input_tensor = input.toTensor();
TORCH_CHECK(
@ -208,7 +209,7 @@ c10::impl::GenericList Function::run(
// Preallocate and fill in output tensors.
c10::List<at::Tensor> outputs;
outputs.reserve(output_specs_.size());
for (size_t i = 0; i < output_specs_.size(); ++i) {
for (const auto i : c10::irange(output_specs_.size())) {
at::Tensor output = output_specs_[i].allocate();
outputs.emplace_back(output);
args[inputs.size() + i] = output.data_ptr();