[4/N] Remove unused loop variables in tests (#166690)

This PR removes unused loop variables in tests.

Pull Request resolved: https://github.com/pytorch/pytorch/pull/166690
Approved by: https://github.com/justinchuby, https://github.com/mlazos
This commit is contained in:
Yuanyuan Chen 2025-10-31 10:20:48 +00:00 committed by PyTorch MergeBot
parent 030de07aff
commit fc8ac1216c
31 changed files with 78 additions and 82 deletions

View File

@ -341,7 +341,7 @@ class DictTests(torch._dynamo.test_case.TestCase):
def fn(x, d):
y = 0
for idx, (key, value) in enumerate(d.items()):
for idx, value in enumerate(d.values()):
if idx == 0:
y += torch.sin(x * value)
else:
@ -366,7 +366,7 @@ class DictTests(torch._dynamo.test_case.TestCase):
def fn(x, d):
y = 0
for idx, (key, value) in enumerate(d.items()):
for idx, value in enumerate(d.values()):
if idx == 0:
y += torch.sin(x * value)
else:
@ -847,7 +847,7 @@ class DictTests(torch._dynamo.test_case.TestCase):
d = {"a": 2, "b": 3, "c": 5 * x}
mp = types.MappingProxyType(d)
y = torch.sin(x * mp["a"])
for k, v in mp.items(): # noqa: PERF102
for v in mp.values():
y += torch.cos(x * v)
return mp
@ -864,7 +864,7 @@ class DictTests(torch._dynamo.test_case.TestCase):
def fn(x):
mp = types.MappingProxyType(d)
y = torch.sin(x * mp["a"])
for k, v in mp.items(): # noqa: PERF102
for v in mp.values():
y += torch.cos(x * v)
d["d"] = 4
return mp
@ -885,7 +885,7 @@ class DictTests(torch._dynamo.test_case.TestCase):
def fn(x, mp):
y = torch.sin(x * mp["a"])
for k, v in mp.items(): # noqa: PERF102
for v in mp.values():
y += torch.cos(x * v)
if isinstance(mp, types.MappingProxyType):
y *= 2

View File

@ -2858,7 +2858,7 @@ class GraphModule(torch.nn.Module):
def fn(x):
return wrap(lambda x: model(x), x)
for i in range(2):
for _ in range(2):
# second iteration is key, hooks would have fired during aot trace
# on first iter
activations.clear()

View File

@ -807,7 +807,7 @@ class HooksTests(torch._dynamo.test_case.TestCase):
def __init__(self) -> None:
super().__init__()
self.layers = torch.nn.ModuleList()
for i in range(10):
for _ in range(10):
layer = torch.nn.Linear(16, 16)
layer.register_forward_pre_hook(lambda _, inp: fw_hook(inp))
layer = torch.compile(layer, backend=cnts)

View File

@ -697,7 +697,7 @@ class UnspecTests(torch._dynamo.test_case.TestCase):
@torch._dynamo.config.patch(specialize_float=False, capture_scalar_outputs=True)
def test_unspecialized_float_multiply_precision(self):
dtypes = [torch.bfloat16, torch.float16, torch.float32, torch.float64]
for i, dtype in enumerate(dtypes):
for dtype in dtypes:
def fn(x, y):
return x * y
@ -722,7 +722,7 @@ class UnspecTests(torch._dynamo.test_case.TestCase):
return x + y.item()
dtypes = [torch.bfloat16, torch.float16, torch.float32, torch.float64]
for i, dtype in enumerate(dtypes):
for dtype in dtypes:
x = torch.ones(3, 3, dtype=dtype)
self.assertEqual(f(x), x + x.sum().item())

View File

@ -675,7 +675,7 @@ class inner_f(torch.nn.Module):
# Verify buffer handling
buffer_count = 0
for desc, (node, grad_node) in input_grad_nodes.items():
for desc, (node, _grad_node) in input_grad_nodes.items():
if isinstance(desc, BufferAOTInput):
buffer_count += 1
self.assertIsNotNone(node)
@ -764,13 +764,13 @@ class inner_f(torch.nn.Module):
self.assertIn(node, named_params.values())
# Check that param_grads contains the same parameter nodes
for desc, (param_node, grad_node) in param_grads.items():
for desc, (param_node, _grad_node) in param_grads.items():
self.assertIn(param_node, param_nodes)
self.assertEqual(param_node, named_params[desc.target])
# Check that all_input_grads contains the parameter nodes
param_count = 0
for desc, (input_node, grad_node) in all_input_grads.items():
for desc, (input_node, _grad_node) in all_input_grads.items():
if isinstance(desc, ParamAOTInput):
param_count += 1
self.assertIn(input_node, param_nodes)

View File

@ -3088,9 +3088,7 @@ class GraphModule(torch.nn.Module):
)
# Compare gradients for each layer
for i, (uncompiled_grad, compiled_grad) in enumerate(
zip(uncompiled_grads, compiled_grads)
):
for uncompiled_grad, compiled_grad in zip(uncompiled_grads, compiled_grads):
self.assertEqual(
uncompiled_grad,
compiled_grad,

View File

@ -282,7 +282,7 @@ class TestMin(TestCase):
# python 3.11 adapts bytecode after a number of iterations
# check that we still match names correctly
for i in range(10):
for _ in range(10):
f()
@skipIf(not TEST_CUDA, "no CUDA")

View File

@ -4869,7 +4869,7 @@ class AOTInductorTestsTemplate:
return result
inputs = []
for i in range(1000):
for _ in range(1000):
inputs.append(torch.ones(8, 8, 8, dtype=torch.float16, device=self.device))
inputs = tuple(inputs)
model = Model()

View File

@ -182,7 +182,7 @@ class TestSubprocess(TestCase):
@torch.compile(fullgraph=True, backend="inductor")
def model_add(x, y):
out = x
for i in range(500):
for _ in range(500):
out = torch.add(out, y)
return out

View File

@ -405,7 +405,7 @@ main()
self.grad_acc_hooks = []
self.grad_acc = []
self.params = [self.fc1.weight, self.fc2.weight]
for i, param in enumerate(self.params):
for param in self.params:
def wrapper(param):
param_tmp = param.expand_as(param)
@ -1558,7 +1558,7 @@ main()
dtype=input_tensor.dtype, device=DEVICE
)
for iteration in range(10):
for _ in range(10):
for param in model_parameters:
param.grad = None
output_tensor = model(
@ -1599,7 +1599,7 @@ main()
eager_check()
for i in range(5):
for _ in range(5):
with compiled_autograd._enable(compiler_fn):
eager_check()

View File

@ -544,7 +544,7 @@ class CudaReproTests(TestCase):
input = torch.randn(10, 10, device="cuda", requires_grad=True)
for i in range(2):
for _ in range(2):
output_ref = model_ref(input)
output_res = model_opt(input)
output_ref.sum().backward()

View File

@ -383,7 +383,7 @@ if HAS_CUDA_AND_TRITON:
foo = get_compile_fn(backend)(foo)
with capture_stderr() as captured_output:
for i in range(3):
for _ in range(3):
torch.compiler.cudagraph_mark_step_begin()
inp = torch.rand([4], device="cuda")
@ -415,7 +415,7 @@ if HAS_CUDA_AND_TRITON:
foo = get_compile_fn(backend)(foo)
with capture_stderr() as captured_output:
for i in range(3):
for _ in range(3):
torch.compiler.cudagraph_mark_step_begin()
inp = torch.rand([4], device="cuda")
@ -493,7 +493,7 @@ if HAS_CUDA_AND_TRITON:
# Should warn for current_node=None
mut(inp())
for i in range(3):
for _ in range(3):
torch.compiler.cudagraph_mark_step_begin()
tmp = foo(inp())
mut(tmp) # should not warn
@ -2180,7 +2180,7 @@ if HAS_CUDA_AND_TRITON:
model = torch.nn.Linear(10, 10, bias=False, device="cuda")
x = torch.randn(10, 10, device="cuda")
for i in range(5):
for _ in range(5):
out = model(x)
bwd(out.sum())
model.weight.grad = None
@ -4505,7 +4505,7 @@ if HAS_CUDA_AND_TRITON:
]
for i, compile_fn in enumerate(compile_fns):
torch.manual_seed(0)
for index in range(3):
for _ in range(3):
x = torch.randn(4, 4, device=device, requires_grad=True)
y = torch.randn(4, 4, device=device, requires_grad=True)

View File

@ -485,7 +485,7 @@ class TestCustomOpAutoTune(TestCase):
(3, 16), # einsum, chunk_size ignored
]
for i, (scale_mode, chunk_size) in enumerate(configs):
for scale_mode, chunk_size in configs:
result = multi_param_scaling(
test_x, test_factor, scale_mode=scale_mode, chunk_size=chunk_size
)

View File

@ -970,9 +970,7 @@ class OptimizeForInferenceTemplate(TestCase):
self.assertEqual(len(actual_outputs), len(expected_outputs))
self.assertEqual(2, len(actual_outputs))
for i, actual, expected in zip(
itertools.count(), actual_outputs, expected_outputs
):
for actual, expected in zip(actual_outputs, expected_outputs):
self.assertEqual(expected, actual)
if self.device == "cpu":

View File

@ -2095,7 +2095,7 @@ class TestMaxAutotune(TestCase):
# Test loop.
def test_func2(x):
for i in range(10):
for _ in range(10):
x = torch.matmul(x, x)
return x

View File

@ -343,7 +343,7 @@ class TestOperatorReorderForPeakMemory(TestCase):
def test_fusion_acc_large_reads(self):
def f(x, y, z):
res = torch.zeros_like(x[0])
for i in range(4):
for _ in range(4):
temp = torch.matmul(x, y) + z
res = res + temp
return res

View File

@ -539,7 +539,7 @@ class TestSet(TestJointOps, TestCase):
# s.discard(self.thetype(self.word))
def test_pop(self):
for i in range(len(self.s)):
for _ in range(len(self.s)):
elem = self.s.pop()
self.assertNotIn(elem, self.s)
self.assertRaises(KeyError, self.s.pop)
@ -990,7 +990,7 @@ class TestExceptionPropagation(TestCase):
def test_changingSizeWhileIterating(self):
s = OrderedSet([1, 2, 3])
try:
for i in s:
for _ in s:
s.update([4]) # noqa: B909
except RuntimeError:
pass

View File

@ -245,7 +245,7 @@ class DynamoProfilerTests(torch._inductor.test_case.TestCase):
skip_first=3, wait=1, warmup=1, active=2, repeat=1
),
) as prof:
for idx in range(10):
for _ in range(10):
fn(*inputs)
prof.step()

View File

@ -2090,7 +2090,7 @@ class CommonTemplate:
from torch._inductor.runtime.triton_heuristics import triton_config_reduction
size_hints = {"x": 67108864, "r0_": 8192}
for i in range(4):
for _ in range(4):
size_hints["x"] = next_power_of_2(size_hints["x"])
triton_config_reduction(size_hints, 1, 2048, 1, 8)
@ -5033,13 +5033,13 @@ class CommonTemplate:
def run_weights_sharing_model(m, inp):
with torch.no_grad():
for i in range(num_run):
for _ in range(num_run):
y = m(inp)
numb_instance = 2
threads = []
compiled_m = torch.compile(model)
for i in range(1, numb_instance + 1):
for _ in range(1, numb_instance + 1):
thread = threading.Thread(
target=run_weights_sharing_model, args=(compiled_m, inp)
)

View File

@ -497,7 +497,7 @@ def forward(self, x_1, output_1):
x: torch.Tensor,
y: torch.Tensor,
):
for i in range(4):
for _ in range(4):
x = add_in_loop(x, y)
return x
@ -2971,7 +2971,7 @@ class MutationTests(torch._inductor.test_case.TestCase):
x = tl.load(in_ptr0 + offsets, mask=mask)
y = tl.load(in_ptr1 + offsets, mask=mask)
output = tl.zeros((n_elements,), dtype=tl.float32)
for i in range(4):
for _ in range(4):
output += x + y
tl.store(out_ptr + offsets, output, mask=mask)
@ -3041,8 +3041,8 @@ class MutationTests(torch._inductor.test_case.TestCase):
x = tl.load(in_ptr0 + offsets, mask=mask)
y = tl.load(in_ptr1 + offsets, mask=mask)
output = tl.zeros((n_elements,), dtype=tl.float32)
for i in range(2):
for j in range(2):
for _ in range(2):
for _ in range(2):
output += x + y
tl.store(out_ptr + offsets, output, mask=mask)
@ -3078,8 +3078,8 @@ class MutationTests(torch._inductor.test_case.TestCase):
y = tl.load(in_ptr1 + offsets, mask=mask)
output1 = tl.zeros((n_elements,), dtype=tl.float32)
output2 = tl.zeros((n_elements,), dtype=tl.float32)
for i in range(2):
for j in range(2):
for _ in range(2):
for _ in range(2):
output1 += y
output2 += x
output = output1 + output2

View File

@ -873,7 +873,7 @@ class TestStateDictHooks(TestCase):
)
def linear_state_dict_post_hook(module, state_dict, prefix, local_metadata):
for name, param in module.named_parameters(recurse=False):
for name, _param in module.named_parameters(recurse=False):
state_dict[prefix + name] = torch.nn.Parameter(
state_dict[prefix + name]
)

View File

@ -6106,7 +6106,7 @@ class TestONNXRuntime(onnx_test_common._TestONNXRuntime):
class NestedLoopsModel(torch.jit.ScriptModule):
@torch.jit.script_method
def forward(self, x):
for i in range(5):
for _ in range(5):
a = 0
while a < 4:
a += 1
@ -6145,7 +6145,7 @@ class TestONNXRuntime(onnx_test_common._TestONNXRuntime):
class LoopModel(torch.nn.Module):
def forward(self, x):
res = torch.zeros_like(x[0])
for i in range(x.size(0)):
for _ in range(x.size(0)):
res += x[0].transpose(0, 1)
return res
@ -6780,7 +6780,7 @@ class TestONNXRuntime(onnx_test_common._TestONNXRuntime):
a = torch.ones(
12,
)
for i in range(10):
for _ in range(10):
a.add_(
torch.ones(
12,
@ -6809,7 +6809,7 @@ class TestONNXRuntime(onnx_test_common._TestONNXRuntime):
b_ref = b # not used in loop, should not be altered.
for i in range(10):
if i == 3:
for j in range(5):
for _ in range(5):
a += _bias
_bias.add_(
torch.ones(
@ -6854,7 +6854,7 @@ class TestONNXRuntime(onnx_test_common._TestONNXRuntime):
)
for i in range(10):
if i == 3:
for j in range(5):
for _ in range(5):
self._bias += torch.arange(
12,
)
@ -6881,7 +6881,7 @@ class TestONNXRuntime(onnx_test_common._TestONNXRuntime):
)
for i in range(10):
if i == 3:
for j in range(5):
for _ in range(5):
self._bias.copy_(
torch.arange(
12,
@ -8567,7 +8567,7 @@ class TestONNXRuntime(onnx_test_common._TestONNXRuntime):
class SequanceLoopModel(torch.nn.Module):
def forward(self, x):
outputs = []
for i in range(3):
for _ in range(3):
outputs += [x]
return torch.stack(outputs).transpose(0, 1)
@ -9768,9 +9768,9 @@ class TestONNXRuntime(onnx_test_common._TestONNXRuntime):
a = (input1, input2)
b = a
c = (input1, input2, input3)
for i in range(5):
for _ in range(5):
d = a[0]
for j in range(2):
for _ in range(2):
e, f = a
a = (d, f)
f = c[2]
@ -9794,7 +9794,7 @@ class TestONNXRuntime(onnx_test_common._TestONNXRuntime):
class TupleModule(torch.nn.Module):
def forward(self, input1: Tensor, input2: Tensor) -> tuple[Tensor, Tensor]:
a = (input1, input2)
for x in range(5):
for _ in range(5):
c, d = a
a = (c, d)
return a
@ -9812,7 +9812,7 @@ class TestONNXRuntime(onnx_test_common._TestONNXRuntime):
) -> tuple[tuple[Tensor, Tensor], tuple[Tensor, Tensor]]:
a = input1
b = input2
for x in range(5):
for _ in range(5):
c, d = a
e, f = b
if c.shape[0] == e.shape[0]:
@ -11418,7 +11418,7 @@ class TestONNXRuntime(onnx_test_common._TestONNXRuntime):
self.conv.weight = torch.arange(10)
for i in range(10):
if i == 3:
for j in range(10):
for _ in range(10):
w = self.conv.weight
self.conv.weight = torch.arange(10) + w
@ -11480,7 +11480,7 @@ class TestONNXRuntime(onnx_test_common._TestONNXRuntime):
def set_cell_anchors(self, anchors):
self.conv.weight = torch.randn(3, 10)
for i in range(self.conv.weight.size(0)):
for j in range(10):
for _ in range(10):
self.conv.bias = torch.randn(3, 10, 3)
self.conv.weight = anchors * i
self.boxes.append(torch.ones(3, 3))
@ -12452,7 +12452,7 @@ class TestONNXRuntime(onnx_test_common._TestONNXRuntime):
self.loop_count = loop_count
def forward(self, x):
for i in range(self.loop_count):
for _ in range(self.loop_count):
x.index_add_(self.dim, self.index, self.updates)
return x

View File

@ -192,7 +192,7 @@ class TestLRScheduler(TestCase):
def test_old_pattern_warning_resuming(self):
epochs = 35
for i, group in enumerate(self.opt.param_groups):
for group in self.opt.param_groups:
group["initial_lr"] = 0.01
with warnings.catch_warnings(record=True) as ws:
@ -209,7 +209,7 @@ class TestLRScheduler(TestCase):
def test_old_pattern_warning_resuming_with_arg(self):
epochs = 35
for i, group in enumerate(self.opt.param_groups):
for group in self.opt.param_groups:
group["initial_lr"] = 0.01
with warnings.catch_warnings(record=True) as ws:
@ -226,7 +226,7 @@ class TestLRScheduler(TestCase):
def test_old_pattern_warning_with_overridden_optim_step(self):
epochs = 35
for i, group in enumerate(self.opt.param_groups):
for group in self.opt.param_groups:
group["initial_lr"] = 0.01
with warnings.catch_warnings(record=True) as ws:
@ -299,7 +299,7 @@ class TestLRScheduler(TestCase):
self.opt.step = types.MethodType(new_step, self.opt)
def new_pattern():
for e in range(epochs):
for _ in range(epochs):
self.opt.step()
scheduler.step()
@ -2617,7 +2617,7 @@ class TestLRScheduler(TestCase):
sch = SWALR(opt, swa_lr=swa_lr)
ori_param_groups = copy.deepcopy(opt.param_groups)
for i in range(2):
for _ in range(2):
lr.multiply_(0.5)
swa_lr.multiply_(0.5)
opt.step()

View File

@ -344,7 +344,7 @@ class TestFakeQuantizeOps(TestCase):
maxi = 255
mini = 0
for i in range(20):
for _ in range(20):
X1 = torch.randn(5, 5).to(torch.float16)
Y1 = torch.fake_quantize_per_tensor_affine(X1, scale, zero, mini, maxi)
Y1r = _fake_quantize_per_tensor_affine_reference(X1, scale, zero, mini, maxi)
@ -770,7 +770,7 @@ class TestFakeQuantizeOps(TestCase):
mini = 0
maxi = 255
for i in range(20):
for _ in range(20):
X1 = torch.randn(4, 5).to(torch.float16)
Y1 = torch.fake_quantize_per_channel_affine(X1, scale, zero, axis, mini, maxi)
Y1r = _fake_quantize_per_channel_affine_reference(X1, scale, zero, axis, mini, maxi)
@ -1028,7 +1028,7 @@ class TestFakeQuantizeOps(TestCase):
zero_types = [torch.int]
devices = [torch.device('cpu'), torch.device('cuda')] if torch.cuda.is_available() else [torch.device('cpu')]
axis = 1
for i in range(20):
for _ in range(20):
for torch_type, float_type, device, zero_type in itertools.product(torch_types, float_types, devices, zero_types):
X = torch.randn(3, 3, device=device).to(float_type)
scales = (10 * torch.randn(3, device=device)).abs()

View File

@ -4672,7 +4672,7 @@ class TestQuantizeFx(QuantizationTestCase):
m = prepare(m, {"": qconfig}, example_inputs=example_inputs)
# check that there is a duplicated observer instance
actpp_module_count = 0
for name, module in m.named_modules(remove_duplicate=False):
for module in m.modules(remove_duplicate=False):
if isinstance(module, actpp_module_class):
actpp_module_count += 1
self.assertEqual(actpp_module_count, 2)

View File

@ -331,7 +331,7 @@ class TestQuantizeJitPasses(QuantizationTestCase):
def __init__(self, dim, num_blocks, enable_bias, enable_affine):
super().__init__()
layers = []
for i in range(num_blocks):
for _ in range(num_blocks):
layers.append(conv_module[dim](20, 20, 5, 1, bias=enable_bias))
bn_obj = bn_module[dim](num_features=20, affine=enable_affine)
if enable_affine:

View File

@ -658,7 +658,7 @@ class TestDataFramesPipes(TestCase):
]
actual_i = []
for i, j in df_numbers:
for i, _ in df_numbers:
actual_i.append(i)
self.assertEqual(expected_i, actual_i)
@ -2632,7 +2632,7 @@ class TestTyping(TestCase):
self.dp = dp
def __iter__(self) -> Iterator[int]:
for a, b in self.dp:
for a, _ in self.dp:
yield a
# Non-DataPipe input with DataPipe hint

View File

@ -2365,7 +2365,7 @@ class TestFX(JitTestCase):
g = torch.fx.Graph()
x = g.placeholder("x")
for i in range(depth):
for _ in range(depth):
x = g.call_function(torch.relu, (x,))
g.output(x)

View File

@ -295,7 +295,7 @@ class SerializationMixin:
5,
6
]
for i in range(100):
for _ in range(100):
data.append(0)
t = torch.tensor(data, dtype=torch.uint8)

View File

@ -1315,7 +1315,7 @@ class TestFFT(TestCase):
istft_kwargs = stft_kwargs.copy()
del istft_kwargs['pad_mode']
for sizes in data_sizes:
for i in range(num_trials):
for _ in range(num_trials):
original = torch.randn(*sizes, dtype=dtype, device=device)
stft = torch.stft(original, return_complex=True, **stft_kwargs)
inversed = torch.istft(stft, length=original.size(1), **istft_kwargs)
@ -1386,7 +1386,7 @@ class TestFFT(TestCase):
del stft_kwargs['size']
istft_kwargs = stft_kwargs.copy()
del istft_kwargs['pad_mode']
for i in range(num_trials):
for _ in range(num_trials):
original = torch.randn(*sizes, dtype=dtype, device=device)
stft = torch.stft(original, return_complex=True, **stft_kwargs)
with self.assertWarnsOnceRegex(UserWarning, "The length of signal is shorter than the length parameter."):
@ -1501,7 +1501,7 @@ class TestFFT(TestCase):
complex_dtype = corresponding_complex_dtype(dtype)
def _test(data_size, kwargs):
for i in range(num_trials):
for _ in range(num_trials):
tensor1 = torch.randn(data_size, device=device, dtype=complex_dtype)
tensor2 = torch.randn(data_size, device=device, dtype=complex_dtype)
a, b = torch.rand(2, dtype=dtype, device=device)

View File

@ -138,7 +138,7 @@ class TestTorchDeviceType(TestCase):
# TODO: move all tensor creation to common ops
def _rand_shape(self, dim, min_size, max_size):
shape = []
for i in range(dim):
for _ in range(dim):
shape.append(random.randint(min_size, max_size))
return tuple(shape)
@ -172,7 +172,7 @@ class TestTorchDeviceType(TestCase):
element_size = torch._utils._element_size(dtype)
for i in range(10):
for _ in range(10):
bytes_list = [rand_byte() for _ in range(element_size)]
scalar = bytes_to_scalar(bytes_list, dtype, device)
self.assertEqual(scalar.storage().untyped().tolist(), bytes_list)
@ -2263,7 +2263,7 @@ class TestTorchDeviceType(TestCase):
if num_observations > 0:
fweights = torch.randint(1, 10, (num_observations,), device=device)
aweights = make_tensor((num_observations,), dtype=torch.float, device=device, low=1)
for correction, fw, aw in product([0, 1, 2], [None, fweights], [None, aweights]):
for correction, _fw, _aw in product([0, 1, 2], [None, fweights], [None, aweights]):
check(x, correction, fweights, aweights)
@skipIfNoSciPy
@ -5151,7 +5151,7 @@ class TestTorchDeviceType(TestCase):
prob_dist = torch.rand(10000, 1000, device=device, dtype=dtype)
n_sample = 1
for i in range(trials):
for _ in range(trials):
gen.manual_seed(seed)
samples_1 = torch.multinomial(prob_dist, n_sample, True, generator=gen)
@ -5229,7 +5229,7 @@ class TestTorchDeviceType(TestCase):
# TODO copy _like constructors to stride permutation instead of just layout
if not TEST_WITH_TORCHINDUCTOR:
x = torch.randn((3, 4, 5, 6, 7, 8, 9), device=device)
for i in range(10):
for _ in range(10):
permutation = list(range(len(x.shape)))
random.shuffle(permutation)
x = x.permute(permutation)