mirror of
https://github.com/zebrajr/pytorch.git
synced 2025-12-06 12:20:52 +01:00
[3/N] Fix unused loop variables (#166509)
This PR removes unused loop variables in tests. Pull Request resolved: https://github.com/pytorch/pytorch/pull/166509 Approved by: https://github.com/Lucaskabela, https://github.com/Skylion007
This commit is contained in:
parent
99b05d1b78
commit
0d50e5d8d4
|
|
@ -827,7 +827,7 @@ class TestFullyShardShardPlacementFnMultiProcess(FSDPTest):
|
|||
|
||||
torch.manual_seed(42 + self.rank)
|
||||
inp = torch.randint(0, model_args.vocab_size, (2, 16), device=device_type.type)
|
||||
for iter_idx in range(5):
|
||||
for _ in range(5):
|
||||
ref_loss = ref_model(inp).sum()
|
||||
loss = model(inp).sum()
|
||||
self.assertEqual(ref_loss, loss)
|
||||
|
|
|
|||
|
|
@ -262,7 +262,7 @@ class ConstLoop(torch.nn.Module):
|
|||
self.count = 3
|
||||
|
||||
def forward(self, x):
|
||||
for i in range(self.count):
|
||||
for _ in range(self.count):
|
||||
x = torch.sigmoid(self.linear1(x))
|
||||
return x
|
||||
|
||||
|
|
@ -509,7 +509,7 @@ class CfgModule(torch.nn.Module):
|
|||
self.layer = torch.nn.Linear(10, 10)
|
||||
|
||||
def forward(self, x):
|
||||
for i in range(self.cfg.count):
|
||||
for _ in range(self.cfg.count):
|
||||
x = self.layer(x + self.cfg.val)
|
||||
return x
|
||||
|
||||
|
|
@ -781,7 +781,7 @@ class ParametersModule5(torch.nn.Module):
|
|||
|
||||
def forward(self, x):
|
||||
counter = 0
|
||||
for param in self.parameters():
|
||||
for _param in self.parameters():
|
||||
counter += 1
|
||||
|
||||
return x * self.scale * counter
|
||||
|
|
@ -841,7 +841,7 @@ class EnumValues(torch.nn.ModuleDict):
|
|||
|
||||
def forward(self, init_features):
|
||||
features = [init_features]
|
||||
for idx, layer in enumerate(self.values()):
|
||||
for layer in self.values():
|
||||
new_features = layer(features)
|
||||
features.append(new_features)
|
||||
return torch.cat(features, 1)
|
||||
|
|
@ -2161,7 +2161,7 @@ class OptimizedModuleTest(torch._dynamo.test_case.TestCase):
|
|||
|
||||
cnts = torch._dynamo.testing.CompileCounterWithBackend("eager")
|
||||
opt_mod = torch.compile(fn, backend=cnts)
|
||||
for i in range(8):
|
||||
for _ in range(8):
|
||||
mod = Mod()
|
||||
opt_mod(torch.randn(5, 5), mod)
|
||||
|
||||
|
|
@ -2516,7 +2516,7 @@ class OptimizedModuleTest(torch._dynamo.test_case.TestCase):
|
|||
compiled_model = torch.compile(model, backend="aot_eager")
|
||||
|
||||
activations = compiled_activations
|
||||
for i in range(2):
|
||||
for _ in range(2):
|
||||
# second iteration is key, hooks would have fired during aot trace
|
||||
# on first iter
|
||||
compiled_activations.clear()
|
||||
|
|
@ -2526,7 +2526,7 @@ class OptimizedModuleTest(torch._dynamo.test_case.TestCase):
|
|||
loss.backward()
|
||||
|
||||
activations = eager_activations
|
||||
for i in range(2):
|
||||
for _ in range(2):
|
||||
# second iteration is key, hooks would have fired during aot trace
|
||||
# on first iter
|
||||
eager_activations.clear()
|
||||
|
|
@ -2575,12 +2575,12 @@ class OptimizedModuleTest(torch._dynamo.test_case.TestCase):
|
|||
def save_activations(mod, inp, out):
|
||||
activations.append(inp)
|
||||
|
||||
for name, module in model.named_modules():
|
||||
for module in model.modules():
|
||||
module.register_forward_hook(save_activations)
|
||||
|
||||
cnt = torch._dynamo.testing.CompileCounter()
|
||||
model = torch.compile(model, backend=cnt, fullgraph=True)
|
||||
for i in range(2):
|
||||
for _ in range(2):
|
||||
# second iteration is key, hooks would have fired during aot trace
|
||||
# on first iter
|
||||
activations.clear()
|
||||
|
|
@ -2703,7 +2703,7 @@ class OptimizedModuleTest(torch._dynamo.test_case.TestCase):
|
|||
|
||||
model = torch.compile(model, backend="aot_eager")
|
||||
|
||||
for i in range(2):
|
||||
for _ in range(2):
|
||||
# second iteration is key, hooks would have fired during aot trace
|
||||
# on first iter
|
||||
x = torch.randn((20, 10))
|
||||
|
|
|
|||
|
|
@ -7555,7 +7555,7 @@ metadata incorrectly.
|
|||
(_inp, _tg3),
|
||||
]
|
||||
|
||||
for i, (inp_fn, tg_fn) in enumerate(TEST_CASES):
|
||||
for inp_fn, tg_fn in TEST_CASES:
|
||||
ref_x = inp_fn()
|
||||
x = ref_x.detach().clone().requires_grad_()
|
||||
|
||||
|
|
|
|||
|
|
@ -491,9 +491,7 @@ def forward(self, arg0_1, arg1_1, arg2_1):
|
|||
def ins_dense():
|
||||
return torch.tensor([1.0, 2.0, 3.0]), torch.tensor([4.0, 5.0, 6.0])
|
||||
|
||||
for i, (ins_fn, expected_fw_count) in enumerate(
|
||||
zip([ins_sc, ins_dense], [2, 1])
|
||||
):
|
||||
for ins_fn, expected_fw_count in zip([ins_sc, ins_dense], [2, 1]):
|
||||
reset_counter()
|
||||
ref_out = fn(*ins_fn())
|
||||
assert_counter(expected_fw_count, 0)
|
||||
|
|
@ -524,16 +522,14 @@ def forward(self, arg0_1, arg1_1, arg2_1):
|
|||
),
|
||||
)
|
||||
|
||||
for i, (
|
||||
for (
|
||||
ins_fn_req_grad,
|
||||
(
|
||||
expected_fw_count,
|
||||
expected_fw_count_after_bw,
|
||||
expected_bw_count_after_bw,
|
||||
),
|
||||
) in enumerate(
|
||||
zip([ins_dense_req_grad, ins_sc_req_grad], [(1, 1, 1), (2, 2, 2)])
|
||||
):
|
||||
) in zip([ins_dense_req_grad, ins_sc_req_grad], [(1, 1, 1), (2, 2, 2)]):
|
||||
ref_ins = ins_fn_req_grad()
|
||||
reset_counter()
|
||||
ref_out = fn(*ref_ins)
|
||||
|
|
|
|||
|
|
@ -35,7 +35,7 @@ class TestAsync(JitTestCase):
|
|||
def test_async_future_type_python(self):
|
||||
def foo(inp):
|
||||
futures = torch.jit.annotate(List[torch.jit.Future[torch.Tensor]], [])
|
||||
for i in range(5):
|
||||
for _ in range(5):
|
||||
futures.append(torch.jit.fork(lambda x: x, inp))
|
||||
all_outputs = []
|
||||
for future in futures:
|
||||
|
|
@ -458,7 +458,7 @@ class TestAsync(JitTestCase):
|
|||
class TestListFutureModule(nn.Module):
|
||||
def forward(self, input):
|
||||
input_list = []
|
||||
for i in range(3):
|
||||
for _ in range(3):
|
||||
input_list.append(input)
|
||||
|
||||
fut_list: List[Future[torch.Tensor]] = []
|
||||
|
|
|
|||
|
|
@ -68,7 +68,7 @@ class TestAutodiffJit(JitTestCase):
|
|||
|
||||
fn_s = torch.jit.script(fn)
|
||||
|
||||
for i in range(4):
|
||||
for _ in range(4):
|
||||
x, y = fn_s(a, b, c)
|
||||
self.assertFalse(x.requires_grad)
|
||||
self.assertTrue(y.requires_grad)
|
||||
|
|
@ -90,7 +90,7 @@ class TestAutodiffJit(JitTestCase):
|
|||
b = torch.rand((10, 10), requires_grad=False)
|
||||
c = torch.rand((10, 10), requires_grad=True)
|
||||
|
||||
for i in range(4):
|
||||
for _ in range(4):
|
||||
x_s, y_s, z_s = fn_s(a, b, c)
|
||||
x, y, z = fn(a, b, c)
|
||||
|
||||
|
|
@ -115,7 +115,7 @@ class TestAutodiffJit(JitTestCase):
|
|||
b = torch.rand((10, 10), requires_grad=False)
|
||||
c = torch.rand((10, 10), requires_grad=True)
|
||||
|
||||
for i in range(4):
|
||||
for _ in range(4):
|
||||
x_s, y_s, z_s = fn_s(a, b, c)
|
||||
x, y, z = fn(a, b, c)
|
||||
|
||||
|
|
@ -141,7 +141,7 @@ class TestAutodiffJit(JitTestCase):
|
|||
b = torch.rand((10, 10), requires_grad=True)
|
||||
c = torch.rand((10, 10), requires_grad=True)
|
||||
|
||||
for i in range(4):
|
||||
for _ in range(4):
|
||||
x_s, y_s, z_s = fn_s(a, b, c)
|
||||
x, y, z = fn(a, b, c)
|
||||
|
||||
|
|
|
|||
|
|
@ -2989,7 +2989,7 @@ class TestScriptList(JitTestCase):
|
|||
test_script.segments_groupby_col
|
||||
|
||||
# Smoketest for flakiness. Takes around 2s.
|
||||
for i in range(300):
|
||||
for _ in range(300):
|
||||
test = Test()
|
||||
test_script = torch.jit.script(test)
|
||||
|
||||
|
|
|
|||
|
|
@ -19,7 +19,7 @@ class TestLogging(JitTestCase):
|
|||
class ModuleThatLogs(torch.jit.ScriptModule):
|
||||
@torch.jit.script_method
|
||||
def forward(self, x):
|
||||
for i in range(x.size(0)):
|
||||
for _ in range(x.size(0)):
|
||||
x += 1.0
|
||||
torch.jit._logging.add_stat_value("foo", 1)
|
||||
|
||||
|
|
@ -33,7 +33,7 @@ class TestLogging(JitTestCase):
|
|||
old_logger = torch.jit._logging.set_logger(logger)
|
||||
try:
|
||||
mtl = ModuleThatLogs()
|
||||
for i in range(5):
|
||||
for _ in range(5):
|
||||
mtl(torch.rand(3, 4, 5))
|
||||
|
||||
self.assertEqual(logger.get_counter_val("foo"), 15)
|
||||
|
|
@ -60,7 +60,7 @@ class TestLogging(JitTestCase):
|
|||
class ModuleThatTimes(torch.jit.ScriptModule):
|
||||
def forward(self, x):
|
||||
tp_start = torch.jit._logging.time_point()
|
||||
for i in range(30):
|
||||
for _ in range(30):
|
||||
x += 1.0
|
||||
tp_end = torch.jit._logging.time_point()
|
||||
torch.jit._logging.add_stat_value("mytimer", tp_end - tp_start)
|
||||
|
|
@ -80,7 +80,7 @@ class TestLogging(JitTestCase):
|
|||
@torch.jit.script_method
|
||||
def forward(self, x):
|
||||
tp_start = torch.jit._logging.time_point()
|
||||
for i in range(30):
|
||||
for _ in range(30):
|
||||
x += 1.0
|
||||
tp_end = torch.jit._logging.time_point()
|
||||
torch.jit._logging.add_stat_value("mytimer", tp_end - tp_start)
|
||||
|
|
@ -97,7 +97,7 @@ class TestLogging(JitTestCase):
|
|||
|
||||
def test_counter_aggregation(self):
|
||||
def foo(x):
|
||||
for i in range(3):
|
||||
for _ in range(3):
|
||||
torch.jit._logging.add_stat_value("foo", 1)
|
||||
return x + 1.0
|
||||
|
||||
|
|
|
|||
|
|
@ -518,7 +518,7 @@ class TestMisc(JitTestCase):
|
|||
ref = fn(x)
|
||||
|
||||
script_fn = torch.jit.script(fn)
|
||||
for i in range(4):
|
||||
for _ in range(4):
|
||||
res = script_fn(x)
|
||||
|
||||
self.assertEqual(ref, res)
|
||||
|
|
|
|||
|
|
@ -2025,7 +2025,7 @@ class TestTracer(JitTestCase):
|
|||
module = torch.jit.trace_module(n, inputs)
|
||||
|
||||
check_inputs = []
|
||||
for i in range(2):
|
||||
for _ in range(2):
|
||||
check_weight = torch.rand(1, 1, 3, 3)
|
||||
check_forward_input = torch.rand(1, 1, 3, 3)
|
||||
check_inputs.append(
|
||||
|
|
|
|||
|
|
@ -341,7 +341,7 @@ class TestTypesAndAnnotation(JitTestCase):
|
|||
self.x = x
|
||||
|
||||
def set(self, val: int):
|
||||
for i in range(3):
|
||||
for _ in range(3):
|
||||
self.x: int = val
|
||||
|
||||
# Type annotation in __init__, should not fail
|
||||
|
|
|
|||
|
|
@ -582,14 +582,14 @@ class TestAutograd(TestCase):
|
|||
ctx_1 = torch.autograd.graph.saved_tensors_hooks(lambda x: x, unpack)
|
||||
ctx_2 = torch.autograd.graph.saved_tensors_hooks(lambda x: x, lambda x: x)
|
||||
|
||||
for i in range(10):
|
||||
for _ in range(10):
|
||||
with ctx_2:
|
||||
ctx_1.__enter__()
|
||||
x = torch.randn(3, 3, requires_grad=True)
|
||||
x.sin().sum().backward()
|
||||
|
||||
# Clean up
|
||||
for i in range(10):
|
||||
for _ in range(10):
|
||||
ctx_1.__exit__()
|
||||
|
||||
# Validate there are no more hooks on the stack
|
||||
|
|
@ -2989,7 +2989,7 @@ class TestAutograd(TestCase):
|
|||
state = set()
|
||||
with torch.enable_grad():
|
||||
coro = coro_no_grad(state)
|
||||
for i in range(5):
|
||||
for _ in range(5):
|
||||
next(coro)
|
||||
|
||||
coro.close()
|
||||
|
|
@ -2998,7 +2998,7 @@ class TestAutograd(TestCase):
|
|||
state = set()
|
||||
with torch.no_grad():
|
||||
coro = coro_enable_grad(state)
|
||||
for i in range(5):
|
||||
for _ in range(5):
|
||||
next(coro)
|
||||
|
||||
coro.close()
|
||||
|
|
@ -5293,7 +5293,7 @@ Done""",
|
|||
rnn = torch.nn.LSTM(10, 20, 2)
|
||||
total_time_s = 0
|
||||
with profile(record_shapes=True, use_kineto=kineto_available()) as prof:
|
||||
for i in range(20):
|
||||
for _ in range(20):
|
||||
input = torch.randn(5, 3, 10)
|
||||
h = torch.randn(2, 3, 20)
|
||||
c = torch.randn(2, 3, 20)
|
||||
|
|
@ -5925,7 +5925,7 @@ Done""",
|
|||
self.assertTrue(p_a == p_g or p_b == p_g)
|
||||
|
||||
# Run backwards multiple times to ensure accumulation works.
|
||||
for i in range(10):
|
||||
for _ in range(10):
|
||||
loss.backward(retain_graph=True)
|
||||
|
||||
# non-contiguous indices and value, we should trigger a copy.
|
||||
|
|
@ -5943,7 +5943,7 @@ Done""",
|
|||
self.assertFalse(p_b == p_g)
|
||||
|
||||
# Run backwards multiple times to ensure accumulation works.
|
||||
for i in range(10):
|
||||
for _ in range(10):
|
||||
loss.backward(retain_graph=True)
|
||||
|
||||
def test_gradcheck_single_input(self):
|
||||
|
|
@ -7132,7 +7132,7 @@ for shape in [(1,), ()]:
|
|||
)
|
||||
|
||||
feat_combined = []
|
||||
for r in range(num_inp):
|
||||
for _ in range(num_inp):
|
||||
data_r = torch.empty(1, nz_inp)
|
||||
data_r.uniform_()
|
||||
data_r.requires_grad = True
|
||||
|
|
@ -7202,7 +7202,7 @@ for shape in [(1,), ()]:
|
|||
self.use_checkpoint = use_checkpoint
|
||||
self.use_reentrant = use_reentrant
|
||||
self.layers = nn.ModuleList()
|
||||
for i in range(self.n):
|
||||
for _ in range(self.n):
|
||||
layer = nn.Sequential(
|
||||
nn.Linear(256, 256), nn.Linear(256, 256), nn.Linear(256, 256)
|
||||
)
|
||||
|
|
@ -7513,7 +7513,7 @@ for shape in [(1,), ()]:
|
|||
|
||||
feat_combined = []
|
||||
feat_combined_no_checkpoint = []
|
||||
for r in range(num_inp):
|
||||
for _ in range(num_inp):
|
||||
data_r = torch.empty(1, nz_inp)
|
||||
data_r.uniform_()
|
||||
data_r.requires_grad = input_requires_grad
|
||||
|
|
@ -11714,7 +11714,7 @@ class TestAutogradDeviceType(TestCase):
|
|||
def test_parameter_resize(self, device):
|
||||
asd = torch.nn.Parameter(torch.ones(16, dtype=torch.double, device=device))
|
||||
|
||||
for i in range(2):
|
||||
for _ in range(2):
|
||||
with torch.no_grad():
|
||||
asd.set_(asd[1:])
|
||||
asd.grad = None
|
||||
|
|
@ -11942,7 +11942,7 @@ class TestAutogradDeviceType(TestCase):
|
|||
|
||||
# Child gpu graph (much longer than parent graph).
|
||||
prev = t2 * t2
|
||||
for i in range(10):
|
||||
for _ in range(10):
|
||||
prev = prev * t2
|
||||
reentrant_root = prev
|
||||
|
||||
|
|
|
|||
|
|
@ -4784,7 +4784,7 @@ print(torch.cuda.get_allocator_backend())
|
|||
total -= x.numel()
|
||||
|
||||
choices = [alloc, free, torch.cuda.memory.empty_cache]
|
||||
for i in range(N):
|
||||
for _ in range(N):
|
||||
while total >= 1024 * 1024 * 1024 / (4 * 10):
|
||||
free()
|
||||
(action,) = random.choices(choices, weights=[1, 1 if mem else 0, 0.1])
|
||||
|
|
|
|||
|
|
@ -512,7 +512,7 @@ class FakeTensorTest(TestCase):
|
|||
def test_upsample_bilinear_small_channels(self):
|
||||
out = []
|
||||
mode = FakeTensorMode()
|
||||
for i, context in enumerate([contextlib.nullcontext, lambda: mode]):
|
||||
for context in [contextlib.nullcontext, lambda: mode]:
|
||||
with context():
|
||||
arg0_1 = torch.empty_strided(
|
||||
(3, 427, 640), (1, 1920, 3), dtype=torch.float32, device="cuda"
|
||||
|
|
|
|||
|
|
@ -318,13 +318,11 @@ class TestForeach(TestCase):
|
|||
return arg
|
||||
|
||||
scalar_self_arg_test_complete = False
|
||||
for i, sample in enumerate(
|
||||
op.sample_inputs(
|
||||
device,
|
||||
dtype,
|
||||
noncontiguous=not is_fastpath,
|
||||
allow_higher_dtype_scalars=True,
|
||||
)
|
||||
for sample in op.sample_inputs(
|
||||
device,
|
||||
dtype,
|
||||
noncontiguous=not is_fastpath,
|
||||
allow_higher_dtype_scalars=True,
|
||||
):
|
||||
(rhs_arg,) = sample.args
|
||||
kwargs = {} or sample.kwargs
|
||||
|
|
|
|||
|
|
@ -156,7 +156,7 @@ class TestIndexing(TestCase):
|
|||
torch.DoubleTensor if not device.startswith("mps") else torch.FloatTensor
|
||||
)
|
||||
tensor = _make_tensor(lst).to(device)
|
||||
for _i in range(100):
|
||||
for _ in range(100):
|
||||
idx1_start = random.randrange(10)
|
||||
idx1_end = idx1_start + random.randrange(1, 10 - idx1_start + 1)
|
||||
idx1_step = random.randrange(1, 8)
|
||||
|
|
|
|||
|
|
@ -2337,9 +2337,9 @@ graph(%Ra, %Rb):
|
|||
print("stays")
|
||||
while False:
|
||||
print("removed")
|
||||
for _i in range(0):
|
||||
for _ in range(0):
|
||||
print("removed")
|
||||
for _i in range(-4):
|
||||
for _ in range(-4):
|
||||
print("removed")
|
||||
return b
|
||||
|
||||
|
|
@ -3138,7 +3138,7 @@ class TestScript(JitTestCase):
|
|||
with enable_profiling_mode_for_profiling_tests():
|
||||
|
||||
def fct_loop(x):
|
||||
for i in range(3):
|
||||
for _ in range(3):
|
||||
x = torch.cat((x, x), 0)
|
||||
return x
|
||||
|
||||
|
|
@ -3245,7 +3245,7 @@ class TestScript(JitTestCase):
|
|||
def test_nested_bailouts(self):
|
||||
@torch.jit.script
|
||||
def fct_loop(x):
|
||||
for i in range(3):
|
||||
for _ in range(3):
|
||||
x = torch.cat((x, x), 0)
|
||||
return x
|
||||
|
||||
|
|
@ -3907,7 +3907,7 @@ def foo(x):
|
|||
else:
|
||||
return f'v{idx - len(exprs)}'
|
||||
|
||||
for i in range(50):
|
||||
for _ in range(50):
|
||||
n = None
|
||||
while n is None or n > len(exprs) + n_variables:
|
||||
template = random.choice(templates)
|
||||
|
|
@ -3922,7 +3922,7 @@ def foo(x):
|
|||
src_lines.append(' return ({})\n'.format(''.join(f'v{i},' for i in range(n_variables))))
|
||||
return '\n'.join(src_lines)
|
||||
|
||||
for i in range(100):
|
||||
for _ in range(100):
|
||||
g = {'torch': torch}
|
||||
code = gen_code()
|
||||
builtins.exec(code, g, None)
|
||||
|
|
@ -4602,7 +4602,7 @@ def foo(xyz):
|
|||
y = torch.randn(3, 3, requires_grad=True)
|
||||
|
||||
def grad_in_loop(x, y):
|
||||
for i in range(100):
|
||||
for _ in range(100):
|
||||
x = y @ x
|
||||
return x
|
||||
|
||||
|
|
@ -5559,7 +5559,7 @@ a")
|
|||
@torch.jit.script
|
||||
def test(x):
|
||||
after_resize_alias = torch.zeros([2])
|
||||
for _i in range(5):
|
||||
for _ in range(5):
|
||||
b = x + 1
|
||||
f = [1]
|
||||
before_resize_alias = b.sub_(1)
|
||||
|
|
@ -5950,7 +5950,7 @@ a")
|
|||
# type: (int) -> int
|
||||
prev = 1
|
||||
v = 1
|
||||
for i in range(x):
|
||||
for _ in range(x):
|
||||
save = v
|
||||
v = v + prev
|
||||
prev = save
|
||||
|
|
@ -7785,7 +7785,7 @@ dedent """
|
|||
while int(tensor.add_(1)) < 4:
|
||||
if y == 1:
|
||||
continue
|
||||
for i in range(y):
|
||||
for _ in range(y):
|
||||
continue
|
||||
ret += 1
|
||||
ret += 1
|
||||
|
|
@ -7896,7 +7896,7 @@ dedent """
|
|||
def assign_after_break_nested(y):
|
||||
# type: (int)
|
||||
x = 0
|
||||
for i in range(y):
|
||||
for _ in range(y):
|
||||
if y == 1:
|
||||
x = 5
|
||||
break
|
||||
|
|
@ -7916,7 +7916,7 @@ dedent """
|
|||
def may_break(y):
|
||||
# type: (int)
|
||||
x = 0
|
||||
for i in range(y):
|
||||
for _ in range(y):
|
||||
if y == 1:
|
||||
x = 5
|
||||
else:
|
||||
|
|
@ -7988,7 +7988,7 @@ dedent """
|
|||
def test_varexit(cond):
|
||||
# type: (int)
|
||||
m = 0
|
||||
for i in range(3):
|
||||
for _ in range(3):
|
||||
if cond == 2:
|
||||
if cond == 2:
|
||||
m = 2
|
||||
|
|
@ -8376,7 +8376,7 @@ dedent """
|
|||
# find the last output, then all subsequent uses
|
||||
fc.check(out_name[-1] + " : ")
|
||||
# skip past node body
|
||||
for i in range(contained_blocks(node)):
|
||||
for _ in range(contained_blocks(node)):
|
||||
fc.check("->")
|
||||
if (node.kind() == "prim::If"):
|
||||
fc.check("->").check("->").check("\n")
|
||||
|
|
@ -8429,7 +8429,7 @@ dedent """
|
|||
a = 1
|
||||
b = 2
|
||||
c = 3
|
||||
for i in range(iter):
|
||||
for _ in range(iter):
|
||||
a = 4
|
||||
b = 5
|
||||
c = 6
|
||||
|
|
@ -8445,7 +8445,7 @@ dedent """
|
|||
a = 1
|
||||
b = 2
|
||||
c = 3
|
||||
for i in range(iter):
|
||||
for _ in range(iter):
|
||||
c = c + 1
|
||||
b = b + 1
|
||||
a = a + 1
|
||||
|
|
@ -10938,7 +10938,7 @@ dedent """
|
|||
|
||||
# Test symbolic differentiation
|
||||
# Run Forward and Backward thrice to trigger autodiff graph
|
||||
for i in range(3):
|
||||
for _ in range(3):
|
||||
y = jit_module(x)
|
||||
y.backward(grad)
|
||||
x.grad.zero_()
|
||||
|
|
@ -11030,7 +11030,7 @@ dedent """
|
|||
W.data /= 4
|
||||
|
||||
with enable_profiling_mode_for_profiling_tests():
|
||||
for i in range(4):
|
||||
for _ in range(4):
|
||||
self.assertTrue((foo(x, y, W).grad_fn is None) == (jitted_foo(x, y, W).grad_fn is None))
|
||||
|
||||
|
||||
|
|
@ -11822,7 +11822,7 @@ dedent """
|
|||
def test_for_in_tensors(self):
|
||||
def test_sizes(x):
|
||||
sumz = 0
|
||||
for s in x:
|
||||
for _ in x:
|
||||
sumz += 1
|
||||
return sumz
|
||||
self.checkScript(test_sizes, (torch.rand(5, 4, 3, 2, 1),))
|
||||
|
|
@ -11834,7 +11834,7 @@ dedent """
|
|||
@torch.jit.script
|
||||
def test_sizes(x):
|
||||
sumz = 0
|
||||
for s in x:
|
||||
for _ in x:
|
||||
sumz += 1
|
||||
return sumz
|
||||
|
||||
|
|
@ -11846,7 +11846,7 @@ dedent """
|
|||
def test_sizes(x):
|
||||
# type: (float) -> int
|
||||
sumz = 0
|
||||
for s in x:
|
||||
for _ in x:
|
||||
sumz += 1
|
||||
return sumz
|
||||
|
||||
|
|
@ -11856,7 +11856,7 @@ dedent """
|
|||
def test_sizes(x):
|
||||
sumz = 0
|
||||
for n in x:
|
||||
for t in n:
|
||||
for _ in n:
|
||||
sumz += 1
|
||||
return sumz
|
||||
|
||||
|
|
@ -12316,7 +12316,7 @@ dedent """
|
|||
|
||||
@torch.jit.script_method
|
||||
def forward(self, x):
|
||||
for _i in range(4):
|
||||
for _ in range(4):
|
||||
x += self.param
|
||||
return x
|
||||
|
||||
|
|
@ -12840,7 +12840,7 @@ dedent """
|
|||
|
||||
# Load from filename
|
||||
tracemalloc.start()
|
||||
for i in range(num_iters):
|
||||
for _ in range(num_iters):
|
||||
torch._C.PyTorchFileReader(filename)
|
||||
_, peak_from_string = tracemalloc.get_traced_memory()
|
||||
tracemalloc.stop()
|
||||
|
|
@ -12848,7 +12848,7 @@ dedent """
|
|||
# Load from stream
|
||||
tracemalloc.start()
|
||||
with open(filename, 'rb') as f:
|
||||
for i in range(num_iters):
|
||||
for _ in range(num_iters):
|
||||
f.seek(0)
|
||||
torch._C.PyTorchFileReader(f)
|
||||
_, peak_from_file = tracemalloc.get_traced_memory()
|
||||
|
|
@ -13287,7 +13287,7 @@ dedent """
|
|||
def test_pass(self):
|
||||
def foo(x):
|
||||
# type: (bool) -> int
|
||||
for _i in range(3):
|
||||
for _ in range(3):
|
||||
pass
|
||||
if x:
|
||||
pass
|
||||
|
|
@ -13903,7 +13903,7 @@ dedent """
|
|||
def test_loop_no_escape(x):
|
||||
# type: (int)
|
||||
if x >= 0:
|
||||
for i in range(x):
|
||||
for _ in range(x):
|
||||
raise RuntimeError("hi")
|
||||
else:
|
||||
return 5
|
||||
|
|
@ -14116,7 +14116,7 @@ dedent """
|
|||
|
||||
def test_will_ret(y):
|
||||
# type: (int) -> int
|
||||
for i in range(y):
|
||||
for _ in range(y):
|
||||
return 2
|
||||
return 1
|
||||
|
||||
|
|
@ -14125,8 +14125,8 @@ dedent """
|
|||
|
||||
def test_loop_nest_ret(y):
|
||||
# type: (int) -> int
|
||||
for i in range(y):
|
||||
for i in range(y - 2):
|
||||
for _ in range(y):
|
||||
for _ in range(y - 2):
|
||||
return 10
|
||||
return 5
|
||||
return 0
|
||||
|
|
@ -15387,7 +15387,7 @@ dedent """
|
|||
if isinstance(item, list):
|
||||
return is_tensor_value(item[0])
|
||||
return False
|
||||
for name, value, the_type in self.get_pickle_values():
|
||||
for name, value, _the_type in self.get_pickle_values():
|
||||
if is_tensor_value(value):
|
||||
continue
|
||||
self.assertEqual(value, getattr(loaded, "_" + name))
|
||||
|
|
@ -15768,7 +15768,7 @@ dedent """
|
|||
def test_for_else(self):
|
||||
def fn():
|
||||
c = 0
|
||||
for i in range(4):
|
||||
for _ in range(4):
|
||||
c += 10
|
||||
else:
|
||||
print("In else block of for...else")
|
||||
|
|
|
|||
|
|
@ -115,7 +115,7 @@ class TestPythonJiterator(TestCase):
|
|||
@parametrize("num_inputs", [1, 5, 8])
|
||||
def test_various_num_inputs(self, num_inputs):
|
||||
inputs = []
|
||||
for i in range(num_inputs):
|
||||
for _ in range(num_inputs):
|
||||
inputs.append(torch.rand(3, device='cuda').mul(10))
|
||||
|
||||
input_string = ",".join([f"T i{i}" for i in range(num_inputs)])
|
||||
|
|
|
|||
|
|
@ -58,7 +58,7 @@ class SubProcess(mp.Process):
|
|||
|
||||
|
||||
def _test_cuda_ipc_deadlock_actor(queue, iterations):
|
||||
for i in range(iterations):
|
||||
for _ in range(iterations):
|
||||
if not queue.empty():
|
||||
queue.get()
|
||||
time.sleep(0.01)
|
||||
|
|
@ -66,7 +66,7 @@ def _test_cuda_ipc_deadlock_actor(queue, iterations):
|
|||
|
||||
def _test_cuda_ipc_deadlock_learner(queue, iterations):
|
||||
net = torch.nn.LSTM(1, 1).cuda()
|
||||
for i in range(iterations):
|
||||
for _ in range(iterations):
|
||||
if not queue.full():
|
||||
queue.put(copy.deepcopy(net.state_dict()))
|
||||
time.sleep(0.01)
|
||||
|
|
@ -138,7 +138,7 @@ def send_tensor_with_untyped_storage(queue, event):
|
|||
|
||||
def receive_and_send_sum(queue, out_queue, event, device, dtype, count, size=5):
|
||||
s = torch.full([size], 0, device=device, dtype=dtype)
|
||||
for i in range(count):
|
||||
for _ in range(count):
|
||||
t = queue.get()
|
||||
s += t
|
||||
out_queue.put(s)
|
||||
|
|
@ -146,7 +146,7 @@ def receive_and_send_sum(queue, out_queue, event, device, dtype, count, size=5):
|
|||
|
||||
|
||||
def receive_and_send(queue, out_queue, event, count):
|
||||
for i in range(count):
|
||||
for _ in range(count):
|
||||
t = queue.get()
|
||||
out_queue.put(t.clone())
|
||||
event.wait()
|
||||
|
|
|
|||
|
|
@ -1238,7 +1238,7 @@ tensor(..., device='meta', size=(1,), requires_grad=True)""")
|
|||
|
||||
def check():
|
||||
self.assertEqual(len(parameter_dict), len(parameters))
|
||||
for i, (k1, (k2, m2)) in enumerate(zip(parameters, parameter_dict.named_parameters())):
|
||||
for (k1, (k2, m2)) in zip(parameters, parameter_dict.named_parameters()):
|
||||
self.assertEqual(k1, k2)
|
||||
self.assertIs(parameters[k1], m2)
|
||||
for k1, k2 in zip(parameters, parameter_dict):
|
||||
|
|
@ -2958,7 +2958,7 @@ tensor(..., device='meta', size=(1,), requires_grad=True)""")
|
|||
batch_first=batch_first)
|
||||
|
||||
# set constant weights of the model
|
||||
for idx, p in enumerate(model.parameters()):
|
||||
for p in model.parameters():
|
||||
x = p.data
|
||||
sz = x.view(-1).size(0)
|
||||
shape = x.shape
|
||||
|
|
@ -3108,7 +3108,7 @@ tensor(..., device='meta', size=(1,), requires_grad=True)""")
|
|||
activation, batch_first=batch_first)
|
||||
|
||||
# set constant weights of the model
|
||||
for idx, p in enumerate(model.parameters()):
|
||||
for p in model.parameters():
|
||||
x = p.data
|
||||
sz = x.view(-1).size(0)
|
||||
shape = x.shape
|
||||
|
|
@ -3185,7 +3185,7 @@ tensor(..., device='meta', size=(1,), requires_grad=True)""")
|
|||
|
||||
with torch.no_grad():
|
||||
# set constant weights of the model
|
||||
for idx, p in enumerate(layer.parameters()):
|
||||
for p in layer.parameters():
|
||||
x = p.data
|
||||
sz = x.view(-1).size(0)
|
||||
shape = x.shape
|
||||
|
|
@ -13129,7 +13129,7 @@ if __name__ == '__main__':
|
|||
model = model.eval()
|
||||
|
||||
# set constant weights of the model
|
||||
for idx, p in enumerate(model.parameters()):
|
||||
for p in model.parameters():
|
||||
x = p.data
|
||||
sz = x.view(-1).size(0)
|
||||
shape = x.shape
|
||||
|
|
@ -13349,7 +13349,7 @@ if __name__ == '__main__':
|
|||
model = model.eval()
|
||||
|
||||
# set constant weights of the model
|
||||
for idx, p in enumerate(model.parameters()):
|
||||
for p in model.parameters():
|
||||
x = p.data
|
||||
sz = x.view(-1).size(0)
|
||||
shape = x.shape
|
||||
|
|
|
|||
|
|
@ -1483,7 +1483,7 @@ class TestSparse(TestSparseBase):
|
|||
def test_shape(num_mats, dim_i, dim_j, dim_k, nnz):
|
||||
a_list = []
|
||||
b_list = []
|
||||
for mat_idx in range(num_mats):
|
||||
for _ in range(num_mats):
|
||||
a_list.append(self._gen_sparse(2, nnz, [dim_i, dim_j], dtype, device, coalesced)[0])
|
||||
b_list.append(torch.randn([dim_j, dim_k], dtype=dtype, device=device))
|
||||
|
||||
|
|
|
|||
|
|
@ -4251,9 +4251,9 @@ class TestSparseCompressedTritonKernels(TestCase):
|
|||
# Test warn_once when requesting non-existing tuned parameters multiple times
|
||||
f = io.StringIO()
|
||||
with redirect_stderr(f):
|
||||
for i in range(5):
|
||||
for _ in range(5):
|
||||
get_meta(16, 16, 16)
|
||||
for i in range(5):
|
||||
for _ in range(5):
|
||||
get_meta(16, 16, 32)
|
||||
|
||||
msg = f.getvalue()
|
||||
|
|
|
|||
|
|
@ -139,7 +139,7 @@ def fork_wait_graph_exception(input1, input2):
|
|||
|
||||
def loop_graph(a, b, iters: int):
|
||||
c = a + b * 2
|
||||
for i in range(iters):
|
||||
for _ in range(iters):
|
||||
c = c + b
|
||||
c *= 2
|
||||
c -= a
|
||||
|
|
|
|||
|
|
@ -81,7 +81,7 @@ def _generate_input(shape, dtype, device, with_extremal):
|
|||
# TODO: replace with make_tensor
|
||||
def _rand_shape(dim, min_size, max_size):
|
||||
shape = []
|
||||
for i in range(dim):
|
||||
for _ in range(dim):
|
||||
shape.append(random.randint(min_size, max_size))
|
||||
return tuple(shape)
|
||||
|
||||
|
|
@ -942,7 +942,7 @@ class TestTensorCreation(TestCase):
|
|||
num_tensors = random.randint(1, 5)
|
||||
torch_input = []
|
||||
# Create tensors with shape being different along one axis only
|
||||
for param in range(num_tensors):
|
||||
for _ in range(num_tensors):
|
||||
shape[i] = random.randint(1, 5)
|
||||
torch_input.append(_generate_input(tuple(shape), dtype, device, with_extremal=False))
|
||||
|
||||
|
|
@ -997,7 +997,7 @@ class TestTensorCreation(TestCase):
|
|||
ops = ((torch.vstack, np.vstack), (torch.row_stack, np.vstack))
|
||||
for torch_op, np_op in ops:
|
||||
self._test_special_stacks(0, 2, torch_op, np_op, device, dtype)
|
||||
for i in range(5):
|
||||
for _ in range(5):
|
||||
# Test dimension change for 1D tensor of size (N) and 2D tensor of size (1, N)
|
||||
n = random.randint(1, 10)
|
||||
input_a = _generate_input((n,), dtype, device, with_extremal=False)
|
||||
|
|
@ -1012,7 +1012,7 @@ class TestTensorCreation(TestCase):
|
|||
@dtypes(*all_types_and_complex_and(torch.half))
|
||||
def test_dstack(self, device, dtype):
|
||||
self._test_special_stacks(2, 3, torch.dstack, np.dstack, device, dtype)
|
||||
for i in range(5):
|
||||
for _ in range(5):
|
||||
# Test dimension change for 1D tensor of size (N), 2D tensor of size (1, N), and 3D tensor of size (1, N, 1)
|
||||
n = random.randint(1, 10)
|
||||
input_a = _generate_input((n,), dtype, device, with_extremal=False)
|
||||
|
|
@ -2885,7 +2885,7 @@ class TestTensorCreation(TestCase):
|
|||
@dtypesIfCUDA(torch.float, torch.double, torch.bfloat16, torch.half, torch.long)
|
||||
@dtypes(torch.float, torch.double, torch.long, torch.bfloat16, torch.float16)
|
||||
def test_kaiser_window(self, device, dtype):
|
||||
for num_test in range(50):
|
||||
for _ in range(50):
|
||||
self._test_signal_window_functions('kaiser', dtype, device, beta=random.random() * 30)
|
||||
|
||||
def _test_signal_windows_functions(self, name, dtype, device, **kwargs):
|
||||
|
|
@ -2918,7 +2918,7 @@ class TestTensorCreation(TestCase):
|
|||
@unittest.skipIf(not TEST_SCIPY, "Scipy not found")
|
||||
@dtypes(torch.float, torch.double)
|
||||
def test_kaiser(self, device, dtype):
|
||||
for num_test in range(50):
|
||||
for _ in range(50):
|
||||
self._test_signal_windows_functions('kaiser', dtype, device, beta=random.random() * 30)
|
||||
|
||||
def test_tensor_factories_empty(self, device):
|
||||
|
|
|
|||
|
|
@ -1216,7 +1216,7 @@ class TestTensorExprFuser(BaseTestClass):
|
|||
@torch.jit.script
|
||||
def test(x: torch.Tensor, y: torch.Tensor, z: int) -> torch.Tensor:
|
||||
b = y
|
||||
for i in range(z):
|
||||
for _ in range(z):
|
||||
a = x + y
|
||||
b = b + y
|
||||
return b
|
||||
|
|
|
|||
|
|
@ -303,7 +303,7 @@ class TestTransformers(NNTestCase):
|
|||
encoder = nn.TransformerEncoder(layer, 2).to(device)
|
||||
optimizer = optim.SGD(encoder.parameters(), lr=0.1, momentum=0.9)
|
||||
encoder.train()
|
||||
for i in range(iters):
|
||||
for _ in range(iters):
|
||||
encoder.train()
|
||||
optimizer.zero_grad()
|
||||
inputs = torch.cat([torch.randn(1, 2, 2), torch.zeros(1, 2, 2)], dim=1).to(device)
|
||||
|
|
@ -537,7 +537,7 @@ class TestTransformers(NNTestCase):
|
|||
|
||||
with torch.no_grad():
|
||||
# set constant weights of the model
|
||||
for idx, p in enumerate(model.parameters()):
|
||||
for p in model.parameters():
|
||||
x = p.data
|
||||
sz = x.view(-1).size(0)
|
||||
shape = x.shape
|
||||
|
|
@ -587,7 +587,7 @@ class TestTransformers(NNTestCase):
|
|||
|
||||
with torch.no_grad():
|
||||
# set constant weights of the model
|
||||
for idx, p in enumerate(layer.parameters()):
|
||||
for p in layer.parameters():
|
||||
x = p.data
|
||||
sz = x.view(-1).size(0)
|
||||
shape = x.shape
|
||||
|
|
|
|||
Loading…
Reference in New Issue
Block a user