[4/N] Remove unused loop variables in tests (#166690)

This PR removes unused loop variables in tests.

Pull Request resolved: https://github.com/pytorch/pytorch/pull/166690
Approved by: https://github.com/justinchuby, https://github.com/mlazos
This commit is contained in:
Yuanyuan Chen 2025-10-31 10:20:48 +00:00 committed by PyTorch MergeBot
parent 030de07aff
commit fc8ac1216c
31 changed files with 78 additions and 82 deletions

View File

@ -341,7 +341,7 @@ class DictTests(torch._dynamo.test_case.TestCase):
def fn(x, d): def fn(x, d):
y = 0 y = 0
for idx, (key, value) in enumerate(d.items()): for idx, value in enumerate(d.values()):
if idx == 0: if idx == 0:
y += torch.sin(x * value) y += torch.sin(x * value)
else: else:
@ -366,7 +366,7 @@ class DictTests(torch._dynamo.test_case.TestCase):
def fn(x, d): def fn(x, d):
y = 0 y = 0
for idx, (key, value) in enumerate(d.items()): for idx, value in enumerate(d.values()):
if idx == 0: if idx == 0:
y += torch.sin(x * value) y += torch.sin(x * value)
else: else:
@ -847,7 +847,7 @@ class DictTests(torch._dynamo.test_case.TestCase):
d = {"a": 2, "b": 3, "c": 5 * x} d = {"a": 2, "b": 3, "c": 5 * x}
mp = types.MappingProxyType(d) mp = types.MappingProxyType(d)
y = torch.sin(x * mp["a"]) y = torch.sin(x * mp["a"])
for k, v in mp.items(): # noqa: PERF102 for v in mp.values():
y += torch.cos(x * v) y += torch.cos(x * v)
return mp return mp
@ -864,7 +864,7 @@ class DictTests(torch._dynamo.test_case.TestCase):
def fn(x): def fn(x):
mp = types.MappingProxyType(d) mp = types.MappingProxyType(d)
y = torch.sin(x * mp["a"]) y = torch.sin(x * mp["a"])
for k, v in mp.items(): # noqa: PERF102 for v in mp.values():
y += torch.cos(x * v) y += torch.cos(x * v)
d["d"] = 4 d["d"] = 4
return mp return mp
@ -885,7 +885,7 @@ class DictTests(torch._dynamo.test_case.TestCase):
def fn(x, mp): def fn(x, mp):
y = torch.sin(x * mp["a"]) y = torch.sin(x * mp["a"])
for k, v in mp.items(): # noqa: PERF102 for v in mp.values():
y += torch.cos(x * v) y += torch.cos(x * v)
if isinstance(mp, types.MappingProxyType): if isinstance(mp, types.MappingProxyType):
y *= 2 y *= 2

View File

@ -2858,7 +2858,7 @@ class GraphModule(torch.nn.Module):
def fn(x): def fn(x):
return wrap(lambda x: model(x), x) return wrap(lambda x: model(x), x)
for i in range(2): for _ in range(2):
# second iteration is key, hooks would have fired during aot trace # second iteration is key, hooks would have fired during aot trace
# on first iter # on first iter
activations.clear() activations.clear()

View File

@ -807,7 +807,7 @@ class HooksTests(torch._dynamo.test_case.TestCase):
def __init__(self) -> None: def __init__(self) -> None:
super().__init__() super().__init__()
self.layers = torch.nn.ModuleList() self.layers = torch.nn.ModuleList()
for i in range(10): for _ in range(10):
layer = torch.nn.Linear(16, 16) layer = torch.nn.Linear(16, 16)
layer.register_forward_pre_hook(lambda _, inp: fw_hook(inp)) layer.register_forward_pre_hook(lambda _, inp: fw_hook(inp))
layer = torch.compile(layer, backend=cnts) layer = torch.compile(layer, backend=cnts)

View File

@ -697,7 +697,7 @@ class UnspecTests(torch._dynamo.test_case.TestCase):
@torch._dynamo.config.patch(specialize_float=False, capture_scalar_outputs=True) @torch._dynamo.config.patch(specialize_float=False, capture_scalar_outputs=True)
def test_unspecialized_float_multiply_precision(self): def test_unspecialized_float_multiply_precision(self):
dtypes = [torch.bfloat16, torch.float16, torch.float32, torch.float64] dtypes = [torch.bfloat16, torch.float16, torch.float32, torch.float64]
for i, dtype in enumerate(dtypes): for dtype in dtypes:
def fn(x, y): def fn(x, y):
return x * y return x * y
@ -722,7 +722,7 @@ class UnspecTests(torch._dynamo.test_case.TestCase):
return x + y.item() return x + y.item()
dtypes = [torch.bfloat16, torch.float16, torch.float32, torch.float64] dtypes = [torch.bfloat16, torch.float16, torch.float32, torch.float64]
for i, dtype in enumerate(dtypes): for dtype in dtypes:
x = torch.ones(3, 3, dtype=dtype) x = torch.ones(3, 3, dtype=dtype)
self.assertEqual(f(x), x + x.sum().item()) self.assertEqual(f(x), x + x.sum().item())

View File

@ -675,7 +675,7 @@ class inner_f(torch.nn.Module):
# Verify buffer handling # Verify buffer handling
buffer_count = 0 buffer_count = 0
for desc, (node, grad_node) in input_grad_nodes.items(): for desc, (node, _grad_node) in input_grad_nodes.items():
if isinstance(desc, BufferAOTInput): if isinstance(desc, BufferAOTInput):
buffer_count += 1 buffer_count += 1
self.assertIsNotNone(node) self.assertIsNotNone(node)
@ -764,13 +764,13 @@ class inner_f(torch.nn.Module):
self.assertIn(node, named_params.values()) self.assertIn(node, named_params.values())
# Check that param_grads contains the same parameter nodes # Check that param_grads contains the same parameter nodes
for desc, (param_node, grad_node) in param_grads.items(): for desc, (param_node, _grad_node) in param_grads.items():
self.assertIn(param_node, param_nodes) self.assertIn(param_node, param_nodes)
self.assertEqual(param_node, named_params[desc.target]) self.assertEqual(param_node, named_params[desc.target])
# Check that all_input_grads contains the parameter nodes # Check that all_input_grads contains the parameter nodes
param_count = 0 param_count = 0
for desc, (input_node, grad_node) in all_input_grads.items(): for desc, (input_node, _grad_node) in all_input_grads.items():
if isinstance(desc, ParamAOTInput): if isinstance(desc, ParamAOTInput):
param_count += 1 param_count += 1
self.assertIn(input_node, param_nodes) self.assertIn(input_node, param_nodes)

View File

@ -3088,9 +3088,7 @@ class GraphModule(torch.nn.Module):
) )
# Compare gradients for each layer # Compare gradients for each layer
for i, (uncompiled_grad, compiled_grad) in enumerate( for uncompiled_grad, compiled_grad in zip(uncompiled_grads, compiled_grads):
zip(uncompiled_grads, compiled_grads)
):
self.assertEqual( self.assertEqual(
uncompiled_grad, uncompiled_grad,
compiled_grad, compiled_grad,

View File

@ -282,7 +282,7 @@ class TestMin(TestCase):
# python 3.11 adapts bytecode after a number of iterations # python 3.11 adapts bytecode after a number of iterations
# check that we still match names correctly # check that we still match names correctly
for i in range(10): for _ in range(10):
f() f()
@skipIf(not TEST_CUDA, "no CUDA") @skipIf(not TEST_CUDA, "no CUDA")

View File

@ -4869,7 +4869,7 @@ class AOTInductorTestsTemplate:
return result return result
inputs = [] inputs = []
for i in range(1000): for _ in range(1000):
inputs.append(torch.ones(8, 8, 8, dtype=torch.float16, device=self.device)) inputs.append(torch.ones(8, 8, 8, dtype=torch.float16, device=self.device))
inputs = tuple(inputs) inputs = tuple(inputs)
model = Model() model = Model()

View File

@ -182,7 +182,7 @@ class TestSubprocess(TestCase):
@torch.compile(fullgraph=True, backend="inductor") @torch.compile(fullgraph=True, backend="inductor")
def model_add(x, y): def model_add(x, y):
out = x out = x
for i in range(500): for _ in range(500):
out = torch.add(out, y) out = torch.add(out, y)
return out return out

View File

@ -405,7 +405,7 @@ main()
self.grad_acc_hooks = [] self.grad_acc_hooks = []
self.grad_acc = [] self.grad_acc = []
self.params = [self.fc1.weight, self.fc2.weight] self.params = [self.fc1.weight, self.fc2.weight]
for i, param in enumerate(self.params): for param in self.params:
def wrapper(param): def wrapper(param):
param_tmp = param.expand_as(param) param_tmp = param.expand_as(param)
@ -1558,7 +1558,7 @@ main()
dtype=input_tensor.dtype, device=DEVICE dtype=input_tensor.dtype, device=DEVICE
) )
for iteration in range(10): for _ in range(10):
for param in model_parameters: for param in model_parameters:
param.grad = None param.grad = None
output_tensor = model( output_tensor = model(
@ -1599,7 +1599,7 @@ main()
eager_check() eager_check()
for i in range(5): for _ in range(5):
with compiled_autograd._enable(compiler_fn): with compiled_autograd._enable(compiler_fn):
eager_check() eager_check()

View File

@ -544,7 +544,7 @@ class CudaReproTests(TestCase):
input = torch.randn(10, 10, device="cuda", requires_grad=True) input = torch.randn(10, 10, device="cuda", requires_grad=True)
for i in range(2): for _ in range(2):
output_ref = model_ref(input) output_ref = model_ref(input)
output_res = model_opt(input) output_res = model_opt(input)
output_ref.sum().backward() output_ref.sum().backward()

View File

@ -383,7 +383,7 @@ if HAS_CUDA_AND_TRITON:
foo = get_compile_fn(backend)(foo) foo = get_compile_fn(backend)(foo)
with capture_stderr() as captured_output: with capture_stderr() as captured_output:
for i in range(3): for _ in range(3):
torch.compiler.cudagraph_mark_step_begin() torch.compiler.cudagraph_mark_step_begin()
inp = torch.rand([4], device="cuda") inp = torch.rand([4], device="cuda")
@ -415,7 +415,7 @@ if HAS_CUDA_AND_TRITON:
foo = get_compile_fn(backend)(foo) foo = get_compile_fn(backend)(foo)
with capture_stderr() as captured_output: with capture_stderr() as captured_output:
for i in range(3): for _ in range(3):
torch.compiler.cudagraph_mark_step_begin() torch.compiler.cudagraph_mark_step_begin()
inp = torch.rand([4], device="cuda") inp = torch.rand([4], device="cuda")
@ -493,7 +493,7 @@ if HAS_CUDA_AND_TRITON:
# Should warn for current_node=None # Should warn for current_node=None
mut(inp()) mut(inp())
for i in range(3): for _ in range(3):
torch.compiler.cudagraph_mark_step_begin() torch.compiler.cudagraph_mark_step_begin()
tmp = foo(inp()) tmp = foo(inp())
mut(tmp) # should not warn mut(tmp) # should not warn
@ -2180,7 +2180,7 @@ if HAS_CUDA_AND_TRITON:
model = torch.nn.Linear(10, 10, bias=False, device="cuda") model = torch.nn.Linear(10, 10, bias=False, device="cuda")
x = torch.randn(10, 10, device="cuda") x = torch.randn(10, 10, device="cuda")
for i in range(5): for _ in range(5):
out = model(x) out = model(x)
bwd(out.sum()) bwd(out.sum())
model.weight.grad = None model.weight.grad = None
@ -4505,7 +4505,7 @@ if HAS_CUDA_AND_TRITON:
] ]
for i, compile_fn in enumerate(compile_fns): for i, compile_fn in enumerate(compile_fns):
torch.manual_seed(0) torch.manual_seed(0)
for index in range(3): for _ in range(3):
x = torch.randn(4, 4, device=device, requires_grad=True) x = torch.randn(4, 4, device=device, requires_grad=True)
y = torch.randn(4, 4, device=device, requires_grad=True) y = torch.randn(4, 4, device=device, requires_grad=True)

View File

@ -485,7 +485,7 @@ class TestCustomOpAutoTune(TestCase):
(3, 16), # einsum, chunk_size ignored (3, 16), # einsum, chunk_size ignored
] ]
for i, (scale_mode, chunk_size) in enumerate(configs): for scale_mode, chunk_size in configs:
result = multi_param_scaling( result = multi_param_scaling(
test_x, test_factor, scale_mode=scale_mode, chunk_size=chunk_size test_x, test_factor, scale_mode=scale_mode, chunk_size=chunk_size
) )

View File

@ -970,9 +970,7 @@ class OptimizeForInferenceTemplate(TestCase):
self.assertEqual(len(actual_outputs), len(expected_outputs)) self.assertEqual(len(actual_outputs), len(expected_outputs))
self.assertEqual(2, len(actual_outputs)) self.assertEqual(2, len(actual_outputs))
for i, actual, expected in zip( for actual, expected in zip(actual_outputs, expected_outputs):
itertools.count(), actual_outputs, expected_outputs
):
self.assertEqual(expected, actual) self.assertEqual(expected, actual)
if self.device == "cpu": if self.device == "cpu":

View File

@ -2095,7 +2095,7 @@ class TestMaxAutotune(TestCase):
# Test loop. # Test loop.
def test_func2(x): def test_func2(x):
for i in range(10): for _ in range(10):
x = torch.matmul(x, x) x = torch.matmul(x, x)
return x return x

View File

@ -343,7 +343,7 @@ class TestOperatorReorderForPeakMemory(TestCase):
def test_fusion_acc_large_reads(self): def test_fusion_acc_large_reads(self):
def f(x, y, z): def f(x, y, z):
res = torch.zeros_like(x[0]) res = torch.zeros_like(x[0])
for i in range(4): for _ in range(4):
temp = torch.matmul(x, y) + z temp = torch.matmul(x, y) + z
res = res + temp res = res + temp
return res return res

View File

@ -539,7 +539,7 @@ class TestSet(TestJointOps, TestCase):
# s.discard(self.thetype(self.word)) # s.discard(self.thetype(self.word))
def test_pop(self): def test_pop(self):
for i in range(len(self.s)): for _ in range(len(self.s)):
elem = self.s.pop() elem = self.s.pop()
self.assertNotIn(elem, self.s) self.assertNotIn(elem, self.s)
self.assertRaises(KeyError, self.s.pop) self.assertRaises(KeyError, self.s.pop)
@ -990,7 +990,7 @@ class TestExceptionPropagation(TestCase):
def test_changingSizeWhileIterating(self): def test_changingSizeWhileIterating(self):
s = OrderedSet([1, 2, 3]) s = OrderedSet([1, 2, 3])
try: try:
for i in s: for _ in s:
s.update([4]) # noqa: B909 s.update([4]) # noqa: B909
except RuntimeError: except RuntimeError:
pass pass

View File

@ -245,7 +245,7 @@ class DynamoProfilerTests(torch._inductor.test_case.TestCase):
skip_first=3, wait=1, warmup=1, active=2, repeat=1 skip_first=3, wait=1, warmup=1, active=2, repeat=1
), ),
) as prof: ) as prof:
for idx in range(10): for _ in range(10):
fn(*inputs) fn(*inputs)
prof.step() prof.step()

View File

@ -2090,7 +2090,7 @@ class CommonTemplate:
from torch._inductor.runtime.triton_heuristics import triton_config_reduction from torch._inductor.runtime.triton_heuristics import triton_config_reduction
size_hints = {"x": 67108864, "r0_": 8192} size_hints = {"x": 67108864, "r0_": 8192}
for i in range(4): for _ in range(4):
size_hints["x"] = next_power_of_2(size_hints["x"]) size_hints["x"] = next_power_of_2(size_hints["x"])
triton_config_reduction(size_hints, 1, 2048, 1, 8) triton_config_reduction(size_hints, 1, 2048, 1, 8)
@ -5033,13 +5033,13 @@ class CommonTemplate:
def run_weights_sharing_model(m, inp): def run_weights_sharing_model(m, inp):
with torch.no_grad(): with torch.no_grad():
for i in range(num_run): for _ in range(num_run):
y = m(inp) y = m(inp)
numb_instance = 2 numb_instance = 2
threads = [] threads = []
compiled_m = torch.compile(model) compiled_m = torch.compile(model)
for i in range(1, numb_instance + 1): for _ in range(1, numb_instance + 1):
thread = threading.Thread( thread = threading.Thread(
target=run_weights_sharing_model, args=(compiled_m, inp) target=run_weights_sharing_model, args=(compiled_m, inp)
) )

View File

@ -497,7 +497,7 @@ def forward(self, x_1, output_1):
x: torch.Tensor, x: torch.Tensor,
y: torch.Tensor, y: torch.Tensor,
): ):
for i in range(4): for _ in range(4):
x = add_in_loop(x, y) x = add_in_loop(x, y)
return x return x
@ -2971,7 +2971,7 @@ class MutationTests(torch._inductor.test_case.TestCase):
x = tl.load(in_ptr0 + offsets, mask=mask) x = tl.load(in_ptr0 + offsets, mask=mask)
y = tl.load(in_ptr1 + offsets, mask=mask) y = tl.load(in_ptr1 + offsets, mask=mask)
output = tl.zeros((n_elements,), dtype=tl.float32) output = tl.zeros((n_elements,), dtype=tl.float32)
for i in range(4): for _ in range(4):
output += x + y output += x + y
tl.store(out_ptr + offsets, output, mask=mask) tl.store(out_ptr + offsets, output, mask=mask)
@ -3041,8 +3041,8 @@ class MutationTests(torch._inductor.test_case.TestCase):
x = tl.load(in_ptr0 + offsets, mask=mask) x = tl.load(in_ptr0 + offsets, mask=mask)
y = tl.load(in_ptr1 + offsets, mask=mask) y = tl.load(in_ptr1 + offsets, mask=mask)
output = tl.zeros((n_elements,), dtype=tl.float32) output = tl.zeros((n_elements,), dtype=tl.float32)
for i in range(2): for _ in range(2):
for j in range(2): for _ in range(2):
output += x + y output += x + y
tl.store(out_ptr + offsets, output, mask=mask) tl.store(out_ptr + offsets, output, mask=mask)
@ -3078,8 +3078,8 @@ class MutationTests(torch._inductor.test_case.TestCase):
y = tl.load(in_ptr1 + offsets, mask=mask) y = tl.load(in_ptr1 + offsets, mask=mask)
output1 = tl.zeros((n_elements,), dtype=tl.float32) output1 = tl.zeros((n_elements,), dtype=tl.float32)
output2 = tl.zeros((n_elements,), dtype=tl.float32) output2 = tl.zeros((n_elements,), dtype=tl.float32)
for i in range(2): for _ in range(2):
for j in range(2): for _ in range(2):
output1 += y output1 += y
output2 += x output2 += x
output = output1 + output2 output = output1 + output2

View File

@ -873,7 +873,7 @@ class TestStateDictHooks(TestCase):
) )
def linear_state_dict_post_hook(module, state_dict, prefix, local_metadata): def linear_state_dict_post_hook(module, state_dict, prefix, local_metadata):
for name, param in module.named_parameters(recurse=False): for name, _param in module.named_parameters(recurse=False):
state_dict[prefix + name] = torch.nn.Parameter( state_dict[prefix + name] = torch.nn.Parameter(
state_dict[prefix + name] state_dict[prefix + name]
) )

View File

@ -6106,7 +6106,7 @@ class TestONNXRuntime(onnx_test_common._TestONNXRuntime):
class NestedLoopsModel(torch.jit.ScriptModule): class NestedLoopsModel(torch.jit.ScriptModule):
@torch.jit.script_method @torch.jit.script_method
def forward(self, x): def forward(self, x):
for i in range(5): for _ in range(5):
a = 0 a = 0
while a < 4: while a < 4:
a += 1 a += 1
@ -6145,7 +6145,7 @@ class TestONNXRuntime(onnx_test_common._TestONNXRuntime):
class LoopModel(torch.nn.Module): class LoopModel(torch.nn.Module):
def forward(self, x): def forward(self, x):
res = torch.zeros_like(x[0]) res = torch.zeros_like(x[0])
for i in range(x.size(0)): for _ in range(x.size(0)):
res += x[0].transpose(0, 1) res += x[0].transpose(0, 1)
return res return res
@ -6780,7 +6780,7 @@ class TestONNXRuntime(onnx_test_common._TestONNXRuntime):
a = torch.ones( a = torch.ones(
12, 12,
) )
for i in range(10): for _ in range(10):
a.add_( a.add_(
torch.ones( torch.ones(
12, 12,
@ -6809,7 +6809,7 @@ class TestONNXRuntime(onnx_test_common._TestONNXRuntime):
b_ref = b # not used in loop, should not be altered. b_ref = b # not used in loop, should not be altered.
for i in range(10): for i in range(10):
if i == 3: if i == 3:
for j in range(5): for _ in range(5):
a += _bias a += _bias
_bias.add_( _bias.add_(
torch.ones( torch.ones(
@ -6854,7 +6854,7 @@ class TestONNXRuntime(onnx_test_common._TestONNXRuntime):
) )
for i in range(10): for i in range(10):
if i == 3: if i == 3:
for j in range(5): for _ in range(5):
self._bias += torch.arange( self._bias += torch.arange(
12, 12,
) )
@ -6881,7 +6881,7 @@ class TestONNXRuntime(onnx_test_common._TestONNXRuntime):
) )
for i in range(10): for i in range(10):
if i == 3: if i == 3:
for j in range(5): for _ in range(5):
self._bias.copy_( self._bias.copy_(
torch.arange( torch.arange(
12, 12,
@ -8567,7 +8567,7 @@ class TestONNXRuntime(onnx_test_common._TestONNXRuntime):
class SequanceLoopModel(torch.nn.Module): class SequanceLoopModel(torch.nn.Module):
def forward(self, x): def forward(self, x):
outputs = [] outputs = []
for i in range(3): for _ in range(3):
outputs += [x] outputs += [x]
return torch.stack(outputs).transpose(0, 1) return torch.stack(outputs).transpose(0, 1)
@ -9768,9 +9768,9 @@ class TestONNXRuntime(onnx_test_common._TestONNXRuntime):
a = (input1, input2) a = (input1, input2)
b = a b = a
c = (input1, input2, input3) c = (input1, input2, input3)
for i in range(5): for _ in range(5):
d = a[0] d = a[0]
for j in range(2): for _ in range(2):
e, f = a e, f = a
a = (d, f) a = (d, f)
f = c[2] f = c[2]
@ -9794,7 +9794,7 @@ class TestONNXRuntime(onnx_test_common._TestONNXRuntime):
class TupleModule(torch.nn.Module): class TupleModule(torch.nn.Module):
def forward(self, input1: Tensor, input2: Tensor) -> tuple[Tensor, Tensor]: def forward(self, input1: Tensor, input2: Tensor) -> tuple[Tensor, Tensor]:
a = (input1, input2) a = (input1, input2)
for x in range(5): for _ in range(5):
c, d = a c, d = a
a = (c, d) a = (c, d)
return a return a
@ -9812,7 +9812,7 @@ class TestONNXRuntime(onnx_test_common._TestONNXRuntime):
) -> tuple[tuple[Tensor, Tensor], tuple[Tensor, Tensor]]: ) -> tuple[tuple[Tensor, Tensor], tuple[Tensor, Tensor]]:
a = input1 a = input1
b = input2 b = input2
for x in range(5): for _ in range(5):
c, d = a c, d = a
e, f = b e, f = b
if c.shape[0] == e.shape[0]: if c.shape[0] == e.shape[0]:
@ -11418,7 +11418,7 @@ class TestONNXRuntime(onnx_test_common._TestONNXRuntime):
self.conv.weight = torch.arange(10) self.conv.weight = torch.arange(10)
for i in range(10): for i in range(10):
if i == 3: if i == 3:
for j in range(10): for _ in range(10):
w = self.conv.weight w = self.conv.weight
self.conv.weight = torch.arange(10) + w self.conv.weight = torch.arange(10) + w
@ -11480,7 +11480,7 @@ class TestONNXRuntime(onnx_test_common._TestONNXRuntime):
def set_cell_anchors(self, anchors): def set_cell_anchors(self, anchors):
self.conv.weight = torch.randn(3, 10) self.conv.weight = torch.randn(3, 10)
for i in range(self.conv.weight.size(0)): for i in range(self.conv.weight.size(0)):
for j in range(10): for _ in range(10):
self.conv.bias = torch.randn(3, 10, 3) self.conv.bias = torch.randn(3, 10, 3)
self.conv.weight = anchors * i self.conv.weight = anchors * i
self.boxes.append(torch.ones(3, 3)) self.boxes.append(torch.ones(3, 3))
@ -12452,7 +12452,7 @@ class TestONNXRuntime(onnx_test_common._TestONNXRuntime):
self.loop_count = loop_count self.loop_count = loop_count
def forward(self, x): def forward(self, x):
for i in range(self.loop_count): for _ in range(self.loop_count):
x.index_add_(self.dim, self.index, self.updates) x.index_add_(self.dim, self.index, self.updates)
return x return x

View File

@ -192,7 +192,7 @@ class TestLRScheduler(TestCase):
def test_old_pattern_warning_resuming(self): def test_old_pattern_warning_resuming(self):
epochs = 35 epochs = 35
for i, group in enumerate(self.opt.param_groups): for group in self.opt.param_groups:
group["initial_lr"] = 0.01 group["initial_lr"] = 0.01
with warnings.catch_warnings(record=True) as ws: with warnings.catch_warnings(record=True) as ws:
@ -209,7 +209,7 @@ class TestLRScheduler(TestCase):
def test_old_pattern_warning_resuming_with_arg(self): def test_old_pattern_warning_resuming_with_arg(self):
epochs = 35 epochs = 35
for i, group in enumerate(self.opt.param_groups): for group in self.opt.param_groups:
group["initial_lr"] = 0.01 group["initial_lr"] = 0.01
with warnings.catch_warnings(record=True) as ws: with warnings.catch_warnings(record=True) as ws:
@ -226,7 +226,7 @@ class TestLRScheduler(TestCase):
def test_old_pattern_warning_with_overridden_optim_step(self): def test_old_pattern_warning_with_overridden_optim_step(self):
epochs = 35 epochs = 35
for i, group in enumerate(self.opt.param_groups): for group in self.opt.param_groups:
group["initial_lr"] = 0.01 group["initial_lr"] = 0.01
with warnings.catch_warnings(record=True) as ws: with warnings.catch_warnings(record=True) as ws:
@ -299,7 +299,7 @@ class TestLRScheduler(TestCase):
self.opt.step = types.MethodType(new_step, self.opt) self.opt.step = types.MethodType(new_step, self.opt)
def new_pattern(): def new_pattern():
for e in range(epochs): for _ in range(epochs):
self.opt.step() self.opt.step()
scheduler.step() scheduler.step()
@ -2617,7 +2617,7 @@ class TestLRScheduler(TestCase):
sch = SWALR(opt, swa_lr=swa_lr) sch = SWALR(opt, swa_lr=swa_lr)
ori_param_groups = copy.deepcopy(opt.param_groups) ori_param_groups = copy.deepcopy(opt.param_groups)
for i in range(2): for _ in range(2):
lr.multiply_(0.5) lr.multiply_(0.5)
swa_lr.multiply_(0.5) swa_lr.multiply_(0.5)
opt.step() opt.step()

View File

@ -344,7 +344,7 @@ class TestFakeQuantizeOps(TestCase):
maxi = 255 maxi = 255
mini = 0 mini = 0
for i in range(20): for _ in range(20):
X1 = torch.randn(5, 5).to(torch.float16) X1 = torch.randn(5, 5).to(torch.float16)
Y1 = torch.fake_quantize_per_tensor_affine(X1, scale, zero, mini, maxi) Y1 = torch.fake_quantize_per_tensor_affine(X1, scale, zero, mini, maxi)
Y1r = _fake_quantize_per_tensor_affine_reference(X1, scale, zero, mini, maxi) Y1r = _fake_quantize_per_tensor_affine_reference(X1, scale, zero, mini, maxi)
@ -770,7 +770,7 @@ class TestFakeQuantizeOps(TestCase):
mini = 0 mini = 0
maxi = 255 maxi = 255
for i in range(20): for _ in range(20):
X1 = torch.randn(4, 5).to(torch.float16) X1 = torch.randn(4, 5).to(torch.float16)
Y1 = torch.fake_quantize_per_channel_affine(X1, scale, zero, axis, mini, maxi) Y1 = torch.fake_quantize_per_channel_affine(X1, scale, zero, axis, mini, maxi)
Y1r = _fake_quantize_per_channel_affine_reference(X1, scale, zero, axis, mini, maxi) Y1r = _fake_quantize_per_channel_affine_reference(X1, scale, zero, axis, mini, maxi)
@ -1028,7 +1028,7 @@ class TestFakeQuantizeOps(TestCase):
zero_types = [torch.int] zero_types = [torch.int]
devices = [torch.device('cpu'), torch.device('cuda')] if torch.cuda.is_available() else [torch.device('cpu')] devices = [torch.device('cpu'), torch.device('cuda')] if torch.cuda.is_available() else [torch.device('cpu')]
axis = 1 axis = 1
for i in range(20): for _ in range(20):
for torch_type, float_type, device, zero_type in itertools.product(torch_types, float_types, devices, zero_types): for torch_type, float_type, device, zero_type in itertools.product(torch_types, float_types, devices, zero_types):
X = torch.randn(3, 3, device=device).to(float_type) X = torch.randn(3, 3, device=device).to(float_type)
scales = (10 * torch.randn(3, device=device)).abs() scales = (10 * torch.randn(3, device=device)).abs()

View File

@ -4672,7 +4672,7 @@ class TestQuantizeFx(QuantizationTestCase):
m = prepare(m, {"": qconfig}, example_inputs=example_inputs) m = prepare(m, {"": qconfig}, example_inputs=example_inputs)
# check that there is a duplicated observer instance # check that there is a duplicated observer instance
actpp_module_count = 0 actpp_module_count = 0
for name, module in m.named_modules(remove_duplicate=False): for module in m.modules(remove_duplicate=False):
if isinstance(module, actpp_module_class): if isinstance(module, actpp_module_class):
actpp_module_count += 1 actpp_module_count += 1
self.assertEqual(actpp_module_count, 2) self.assertEqual(actpp_module_count, 2)

View File

@ -331,7 +331,7 @@ class TestQuantizeJitPasses(QuantizationTestCase):
def __init__(self, dim, num_blocks, enable_bias, enable_affine): def __init__(self, dim, num_blocks, enable_bias, enable_affine):
super().__init__() super().__init__()
layers = [] layers = []
for i in range(num_blocks): for _ in range(num_blocks):
layers.append(conv_module[dim](20, 20, 5, 1, bias=enable_bias)) layers.append(conv_module[dim](20, 20, 5, 1, bias=enable_bias))
bn_obj = bn_module[dim](num_features=20, affine=enable_affine) bn_obj = bn_module[dim](num_features=20, affine=enable_affine)
if enable_affine: if enable_affine:

View File

@ -658,7 +658,7 @@ class TestDataFramesPipes(TestCase):
] ]
actual_i = [] actual_i = []
for i, j in df_numbers: for i, _ in df_numbers:
actual_i.append(i) actual_i.append(i)
self.assertEqual(expected_i, actual_i) self.assertEqual(expected_i, actual_i)
@ -2632,7 +2632,7 @@ class TestTyping(TestCase):
self.dp = dp self.dp = dp
def __iter__(self) -> Iterator[int]: def __iter__(self) -> Iterator[int]:
for a, b in self.dp: for a, _ in self.dp:
yield a yield a
# Non-DataPipe input with DataPipe hint # Non-DataPipe input with DataPipe hint

View File

@ -2365,7 +2365,7 @@ class TestFX(JitTestCase):
g = torch.fx.Graph() g = torch.fx.Graph()
x = g.placeholder("x") x = g.placeholder("x")
for i in range(depth): for _ in range(depth):
x = g.call_function(torch.relu, (x,)) x = g.call_function(torch.relu, (x,))
g.output(x) g.output(x)

View File

@ -295,7 +295,7 @@ class SerializationMixin:
5, 5,
6 6
] ]
for i in range(100): for _ in range(100):
data.append(0) data.append(0)
t = torch.tensor(data, dtype=torch.uint8) t = torch.tensor(data, dtype=torch.uint8)

View File

@ -1315,7 +1315,7 @@ class TestFFT(TestCase):
istft_kwargs = stft_kwargs.copy() istft_kwargs = stft_kwargs.copy()
del istft_kwargs['pad_mode'] del istft_kwargs['pad_mode']
for sizes in data_sizes: for sizes in data_sizes:
for i in range(num_trials): for _ in range(num_trials):
original = torch.randn(*sizes, dtype=dtype, device=device) original = torch.randn(*sizes, dtype=dtype, device=device)
stft = torch.stft(original, return_complex=True, **stft_kwargs) stft = torch.stft(original, return_complex=True, **stft_kwargs)
inversed = torch.istft(stft, length=original.size(1), **istft_kwargs) inversed = torch.istft(stft, length=original.size(1), **istft_kwargs)
@ -1386,7 +1386,7 @@ class TestFFT(TestCase):
del stft_kwargs['size'] del stft_kwargs['size']
istft_kwargs = stft_kwargs.copy() istft_kwargs = stft_kwargs.copy()
del istft_kwargs['pad_mode'] del istft_kwargs['pad_mode']
for i in range(num_trials): for _ in range(num_trials):
original = torch.randn(*sizes, dtype=dtype, device=device) original = torch.randn(*sizes, dtype=dtype, device=device)
stft = torch.stft(original, return_complex=True, **stft_kwargs) stft = torch.stft(original, return_complex=True, **stft_kwargs)
with self.assertWarnsOnceRegex(UserWarning, "The length of signal is shorter than the length parameter."): with self.assertWarnsOnceRegex(UserWarning, "The length of signal is shorter than the length parameter."):
@ -1501,7 +1501,7 @@ class TestFFT(TestCase):
complex_dtype = corresponding_complex_dtype(dtype) complex_dtype = corresponding_complex_dtype(dtype)
def _test(data_size, kwargs): def _test(data_size, kwargs):
for i in range(num_trials): for _ in range(num_trials):
tensor1 = torch.randn(data_size, device=device, dtype=complex_dtype) tensor1 = torch.randn(data_size, device=device, dtype=complex_dtype)
tensor2 = torch.randn(data_size, device=device, dtype=complex_dtype) tensor2 = torch.randn(data_size, device=device, dtype=complex_dtype)
a, b = torch.rand(2, dtype=dtype, device=device) a, b = torch.rand(2, dtype=dtype, device=device)

View File

@ -138,7 +138,7 @@ class TestTorchDeviceType(TestCase):
# TODO: move all tensor creation to common ops # TODO: move all tensor creation to common ops
def _rand_shape(self, dim, min_size, max_size): def _rand_shape(self, dim, min_size, max_size):
shape = [] shape = []
for i in range(dim): for _ in range(dim):
shape.append(random.randint(min_size, max_size)) shape.append(random.randint(min_size, max_size))
return tuple(shape) return tuple(shape)
@ -172,7 +172,7 @@ class TestTorchDeviceType(TestCase):
element_size = torch._utils._element_size(dtype) element_size = torch._utils._element_size(dtype)
for i in range(10): for _ in range(10):
bytes_list = [rand_byte() for _ in range(element_size)] bytes_list = [rand_byte() for _ in range(element_size)]
scalar = bytes_to_scalar(bytes_list, dtype, device) scalar = bytes_to_scalar(bytes_list, dtype, device)
self.assertEqual(scalar.storage().untyped().tolist(), bytes_list) self.assertEqual(scalar.storage().untyped().tolist(), bytes_list)
@ -2263,7 +2263,7 @@ class TestTorchDeviceType(TestCase):
if num_observations > 0: if num_observations > 0:
fweights = torch.randint(1, 10, (num_observations,), device=device) fweights = torch.randint(1, 10, (num_observations,), device=device)
aweights = make_tensor((num_observations,), dtype=torch.float, device=device, low=1) aweights = make_tensor((num_observations,), dtype=torch.float, device=device, low=1)
for correction, fw, aw in product([0, 1, 2], [None, fweights], [None, aweights]): for correction, _fw, _aw in product([0, 1, 2], [None, fweights], [None, aweights]):
check(x, correction, fweights, aweights) check(x, correction, fweights, aweights)
@skipIfNoSciPy @skipIfNoSciPy
@ -5151,7 +5151,7 @@ class TestTorchDeviceType(TestCase):
prob_dist = torch.rand(10000, 1000, device=device, dtype=dtype) prob_dist = torch.rand(10000, 1000, device=device, dtype=dtype)
n_sample = 1 n_sample = 1
for i in range(trials): for _ in range(trials):
gen.manual_seed(seed) gen.manual_seed(seed)
samples_1 = torch.multinomial(prob_dist, n_sample, True, generator=gen) samples_1 = torch.multinomial(prob_dist, n_sample, True, generator=gen)
@ -5229,7 +5229,7 @@ class TestTorchDeviceType(TestCase):
# TODO copy _like constructors to stride permutation instead of just layout # TODO copy _like constructors to stride permutation instead of just layout
if not TEST_WITH_TORCHINDUCTOR: if not TEST_WITH_TORCHINDUCTOR:
x = torch.randn((3, 4, 5, 6, 7, 8, 9), device=device) x = torch.randn((3, 4, 5, 6, 7, 8, 9), device=device)
for i in range(10): for _ in range(10):
permutation = list(range(len(x.shape))) permutation = list(range(len(x.shape)))
random.shuffle(permutation) random.shuffle(permutation)
x = x.permute(permutation) x = x.permute(permutation)