mirror of
https://github.com/zebrajr/pytorch.git
synced 2025-12-06 12:20:52 +01:00
[3/N] Fix unused loop variables (#166509)
This PR removes unused loop variables in tests. Pull Request resolved: https://github.com/pytorch/pytorch/pull/166509 Approved by: https://github.com/Lucaskabela, https://github.com/Skylion007
This commit is contained in:
parent
99b05d1b78
commit
0d50e5d8d4
|
|
@ -827,7 +827,7 @@ class TestFullyShardShardPlacementFnMultiProcess(FSDPTest):
|
||||||
|
|
||||||
torch.manual_seed(42 + self.rank)
|
torch.manual_seed(42 + self.rank)
|
||||||
inp = torch.randint(0, model_args.vocab_size, (2, 16), device=device_type.type)
|
inp = torch.randint(0, model_args.vocab_size, (2, 16), device=device_type.type)
|
||||||
for iter_idx in range(5):
|
for _ in range(5):
|
||||||
ref_loss = ref_model(inp).sum()
|
ref_loss = ref_model(inp).sum()
|
||||||
loss = model(inp).sum()
|
loss = model(inp).sum()
|
||||||
self.assertEqual(ref_loss, loss)
|
self.assertEqual(ref_loss, loss)
|
||||||
|
|
|
||||||
|
|
@ -262,7 +262,7 @@ class ConstLoop(torch.nn.Module):
|
||||||
self.count = 3
|
self.count = 3
|
||||||
|
|
||||||
def forward(self, x):
|
def forward(self, x):
|
||||||
for i in range(self.count):
|
for _ in range(self.count):
|
||||||
x = torch.sigmoid(self.linear1(x))
|
x = torch.sigmoid(self.linear1(x))
|
||||||
return x
|
return x
|
||||||
|
|
||||||
|
|
@ -509,7 +509,7 @@ class CfgModule(torch.nn.Module):
|
||||||
self.layer = torch.nn.Linear(10, 10)
|
self.layer = torch.nn.Linear(10, 10)
|
||||||
|
|
||||||
def forward(self, x):
|
def forward(self, x):
|
||||||
for i in range(self.cfg.count):
|
for _ in range(self.cfg.count):
|
||||||
x = self.layer(x + self.cfg.val)
|
x = self.layer(x + self.cfg.val)
|
||||||
return x
|
return x
|
||||||
|
|
||||||
|
|
@ -781,7 +781,7 @@ class ParametersModule5(torch.nn.Module):
|
||||||
|
|
||||||
def forward(self, x):
|
def forward(self, x):
|
||||||
counter = 0
|
counter = 0
|
||||||
for param in self.parameters():
|
for _param in self.parameters():
|
||||||
counter += 1
|
counter += 1
|
||||||
|
|
||||||
return x * self.scale * counter
|
return x * self.scale * counter
|
||||||
|
|
@ -841,7 +841,7 @@ class EnumValues(torch.nn.ModuleDict):
|
||||||
|
|
||||||
def forward(self, init_features):
|
def forward(self, init_features):
|
||||||
features = [init_features]
|
features = [init_features]
|
||||||
for idx, layer in enumerate(self.values()):
|
for layer in self.values():
|
||||||
new_features = layer(features)
|
new_features = layer(features)
|
||||||
features.append(new_features)
|
features.append(new_features)
|
||||||
return torch.cat(features, 1)
|
return torch.cat(features, 1)
|
||||||
|
|
@ -2161,7 +2161,7 @@ class OptimizedModuleTest(torch._dynamo.test_case.TestCase):
|
||||||
|
|
||||||
cnts = torch._dynamo.testing.CompileCounterWithBackend("eager")
|
cnts = torch._dynamo.testing.CompileCounterWithBackend("eager")
|
||||||
opt_mod = torch.compile(fn, backend=cnts)
|
opt_mod = torch.compile(fn, backend=cnts)
|
||||||
for i in range(8):
|
for _ in range(8):
|
||||||
mod = Mod()
|
mod = Mod()
|
||||||
opt_mod(torch.randn(5, 5), mod)
|
opt_mod(torch.randn(5, 5), mod)
|
||||||
|
|
||||||
|
|
@ -2516,7 +2516,7 @@ class OptimizedModuleTest(torch._dynamo.test_case.TestCase):
|
||||||
compiled_model = torch.compile(model, backend="aot_eager")
|
compiled_model = torch.compile(model, backend="aot_eager")
|
||||||
|
|
||||||
activations = compiled_activations
|
activations = compiled_activations
|
||||||
for i in range(2):
|
for _ in range(2):
|
||||||
# second iteration is key, hooks would have fired during aot trace
|
# second iteration is key, hooks would have fired during aot trace
|
||||||
# on first iter
|
# on first iter
|
||||||
compiled_activations.clear()
|
compiled_activations.clear()
|
||||||
|
|
@ -2526,7 +2526,7 @@ class OptimizedModuleTest(torch._dynamo.test_case.TestCase):
|
||||||
loss.backward()
|
loss.backward()
|
||||||
|
|
||||||
activations = eager_activations
|
activations = eager_activations
|
||||||
for i in range(2):
|
for _ in range(2):
|
||||||
# second iteration is key, hooks would have fired during aot trace
|
# second iteration is key, hooks would have fired during aot trace
|
||||||
# on first iter
|
# on first iter
|
||||||
eager_activations.clear()
|
eager_activations.clear()
|
||||||
|
|
@ -2575,12 +2575,12 @@ class OptimizedModuleTest(torch._dynamo.test_case.TestCase):
|
||||||
def save_activations(mod, inp, out):
|
def save_activations(mod, inp, out):
|
||||||
activations.append(inp)
|
activations.append(inp)
|
||||||
|
|
||||||
for name, module in model.named_modules():
|
for module in model.modules():
|
||||||
module.register_forward_hook(save_activations)
|
module.register_forward_hook(save_activations)
|
||||||
|
|
||||||
cnt = torch._dynamo.testing.CompileCounter()
|
cnt = torch._dynamo.testing.CompileCounter()
|
||||||
model = torch.compile(model, backend=cnt, fullgraph=True)
|
model = torch.compile(model, backend=cnt, fullgraph=True)
|
||||||
for i in range(2):
|
for _ in range(2):
|
||||||
# second iteration is key, hooks would have fired during aot trace
|
# second iteration is key, hooks would have fired during aot trace
|
||||||
# on first iter
|
# on first iter
|
||||||
activations.clear()
|
activations.clear()
|
||||||
|
|
@ -2703,7 +2703,7 @@ class OptimizedModuleTest(torch._dynamo.test_case.TestCase):
|
||||||
|
|
||||||
model = torch.compile(model, backend="aot_eager")
|
model = torch.compile(model, backend="aot_eager")
|
||||||
|
|
||||||
for i in range(2):
|
for _ in range(2):
|
||||||
# second iteration is key, hooks would have fired during aot trace
|
# second iteration is key, hooks would have fired during aot trace
|
||||||
# on first iter
|
# on first iter
|
||||||
x = torch.randn((20, 10))
|
x = torch.randn((20, 10))
|
||||||
|
|
|
||||||
|
|
@ -7555,7 +7555,7 @@ metadata incorrectly.
|
||||||
(_inp, _tg3),
|
(_inp, _tg3),
|
||||||
]
|
]
|
||||||
|
|
||||||
for i, (inp_fn, tg_fn) in enumerate(TEST_CASES):
|
for inp_fn, tg_fn in TEST_CASES:
|
||||||
ref_x = inp_fn()
|
ref_x = inp_fn()
|
||||||
x = ref_x.detach().clone().requires_grad_()
|
x = ref_x.detach().clone().requires_grad_()
|
||||||
|
|
||||||
|
|
|
||||||
|
|
@ -491,9 +491,7 @@ def forward(self, arg0_1, arg1_1, arg2_1):
|
||||||
def ins_dense():
|
def ins_dense():
|
||||||
return torch.tensor([1.0, 2.0, 3.0]), torch.tensor([4.0, 5.0, 6.0])
|
return torch.tensor([1.0, 2.0, 3.0]), torch.tensor([4.0, 5.0, 6.0])
|
||||||
|
|
||||||
for i, (ins_fn, expected_fw_count) in enumerate(
|
for ins_fn, expected_fw_count in zip([ins_sc, ins_dense], [2, 1]):
|
||||||
zip([ins_sc, ins_dense], [2, 1])
|
|
||||||
):
|
|
||||||
reset_counter()
|
reset_counter()
|
||||||
ref_out = fn(*ins_fn())
|
ref_out = fn(*ins_fn())
|
||||||
assert_counter(expected_fw_count, 0)
|
assert_counter(expected_fw_count, 0)
|
||||||
|
|
@ -524,16 +522,14 @@ def forward(self, arg0_1, arg1_1, arg2_1):
|
||||||
),
|
),
|
||||||
)
|
)
|
||||||
|
|
||||||
for i, (
|
for (
|
||||||
ins_fn_req_grad,
|
ins_fn_req_grad,
|
||||||
(
|
(
|
||||||
expected_fw_count,
|
expected_fw_count,
|
||||||
expected_fw_count_after_bw,
|
expected_fw_count_after_bw,
|
||||||
expected_bw_count_after_bw,
|
expected_bw_count_after_bw,
|
||||||
),
|
),
|
||||||
) in enumerate(
|
) in zip([ins_dense_req_grad, ins_sc_req_grad], [(1, 1, 1), (2, 2, 2)]):
|
||||||
zip([ins_dense_req_grad, ins_sc_req_grad], [(1, 1, 1), (2, 2, 2)])
|
|
||||||
):
|
|
||||||
ref_ins = ins_fn_req_grad()
|
ref_ins = ins_fn_req_grad()
|
||||||
reset_counter()
|
reset_counter()
|
||||||
ref_out = fn(*ref_ins)
|
ref_out = fn(*ref_ins)
|
||||||
|
|
|
||||||
|
|
@ -35,7 +35,7 @@ class TestAsync(JitTestCase):
|
||||||
def test_async_future_type_python(self):
|
def test_async_future_type_python(self):
|
||||||
def foo(inp):
|
def foo(inp):
|
||||||
futures = torch.jit.annotate(List[torch.jit.Future[torch.Tensor]], [])
|
futures = torch.jit.annotate(List[torch.jit.Future[torch.Tensor]], [])
|
||||||
for i in range(5):
|
for _ in range(5):
|
||||||
futures.append(torch.jit.fork(lambda x: x, inp))
|
futures.append(torch.jit.fork(lambda x: x, inp))
|
||||||
all_outputs = []
|
all_outputs = []
|
||||||
for future in futures:
|
for future in futures:
|
||||||
|
|
@ -458,7 +458,7 @@ class TestAsync(JitTestCase):
|
||||||
class TestListFutureModule(nn.Module):
|
class TestListFutureModule(nn.Module):
|
||||||
def forward(self, input):
|
def forward(self, input):
|
||||||
input_list = []
|
input_list = []
|
||||||
for i in range(3):
|
for _ in range(3):
|
||||||
input_list.append(input)
|
input_list.append(input)
|
||||||
|
|
||||||
fut_list: List[Future[torch.Tensor]] = []
|
fut_list: List[Future[torch.Tensor]] = []
|
||||||
|
|
|
||||||
|
|
@ -68,7 +68,7 @@ class TestAutodiffJit(JitTestCase):
|
||||||
|
|
||||||
fn_s = torch.jit.script(fn)
|
fn_s = torch.jit.script(fn)
|
||||||
|
|
||||||
for i in range(4):
|
for _ in range(4):
|
||||||
x, y = fn_s(a, b, c)
|
x, y = fn_s(a, b, c)
|
||||||
self.assertFalse(x.requires_grad)
|
self.assertFalse(x.requires_grad)
|
||||||
self.assertTrue(y.requires_grad)
|
self.assertTrue(y.requires_grad)
|
||||||
|
|
@ -90,7 +90,7 @@ class TestAutodiffJit(JitTestCase):
|
||||||
b = torch.rand((10, 10), requires_grad=False)
|
b = torch.rand((10, 10), requires_grad=False)
|
||||||
c = torch.rand((10, 10), requires_grad=True)
|
c = torch.rand((10, 10), requires_grad=True)
|
||||||
|
|
||||||
for i in range(4):
|
for _ in range(4):
|
||||||
x_s, y_s, z_s = fn_s(a, b, c)
|
x_s, y_s, z_s = fn_s(a, b, c)
|
||||||
x, y, z = fn(a, b, c)
|
x, y, z = fn(a, b, c)
|
||||||
|
|
||||||
|
|
@ -115,7 +115,7 @@ class TestAutodiffJit(JitTestCase):
|
||||||
b = torch.rand((10, 10), requires_grad=False)
|
b = torch.rand((10, 10), requires_grad=False)
|
||||||
c = torch.rand((10, 10), requires_grad=True)
|
c = torch.rand((10, 10), requires_grad=True)
|
||||||
|
|
||||||
for i in range(4):
|
for _ in range(4):
|
||||||
x_s, y_s, z_s = fn_s(a, b, c)
|
x_s, y_s, z_s = fn_s(a, b, c)
|
||||||
x, y, z = fn(a, b, c)
|
x, y, z = fn(a, b, c)
|
||||||
|
|
||||||
|
|
@ -141,7 +141,7 @@ class TestAutodiffJit(JitTestCase):
|
||||||
b = torch.rand((10, 10), requires_grad=True)
|
b = torch.rand((10, 10), requires_grad=True)
|
||||||
c = torch.rand((10, 10), requires_grad=True)
|
c = torch.rand((10, 10), requires_grad=True)
|
||||||
|
|
||||||
for i in range(4):
|
for _ in range(4):
|
||||||
x_s, y_s, z_s = fn_s(a, b, c)
|
x_s, y_s, z_s = fn_s(a, b, c)
|
||||||
x, y, z = fn(a, b, c)
|
x, y, z = fn(a, b, c)
|
||||||
|
|
||||||
|
|
|
||||||
|
|
@ -2989,7 +2989,7 @@ class TestScriptList(JitTestCase):
|
||||||
test_script.segments_groupby_col
|
test_script.segments_groupby_col
|
||||||
|
|
||||||
# Smoketest for flakiness. Takes around 2s.
|
# Smoketest for flakiness. Takes around 2s.
|
||||||
for i in range(300):
|
for _ in range(300):
|
||||||
test = Test()
|
test = Test()
|
||||||
test_script = torch.jit.script(test)
|
test_script = torch.jit.script(test)
|
||||||
|
|
||||||
|
|
|
||||||
|
|
@ -19,7 +19,7 @@ class TestLogging(JitTestCase):
|
||||||
class ModuleThatLogs(torch.jit.ScriptModule):
|
class ModuleThatLogs(torch.jit.ScriptModule):
|
||||||
@torch.jit.script_method
|
@torch.jit.script_method
|
||||||
def forward(self, x):
|
def forward(self, x):
|
||||||
for i in range(x.size(0)):
|
for _ in range(x.size(0)):
|
||||||
x += 1.0
|
x += 1.0
|
||||||
torch.jit._logging.add_stat_value("foo", 1)
|
torch.jit._logging.add_stat_value("foo", 1)
|
||||||
|
|
||||||
|
|
@ -33,7 +33,7 @@ class TestLogging(JitTestCase):
|
||||||
old_logger = torch.jit._logging.set_logger(logger)
|
old_logger = torch.jit._logging.set_logger(logger)
|
||||||
try:
|
try:
|
||||||
mtl = ModuleThatLogs()
|
mtl = ModuleThatLogs()
|
||||||
for i in range(5):
|
for _ in range(5):
|
||||||
mtl(torch.rand(3, 4, 5))
|
mtl(torch.rand(3, 4, 5))
|
||||||
|
|
||||||
self.assertEqual(logger.get_counter_val("foo"), 15)
|
self.assertEqual(logger.get_counter_val("foo"), 15)
|
||||||
|
|
@ -60,7 +60,7 @@ class TestLogging(JitTestCase):
|
||||||
class ModuleThatTimes(torch.jit.ScriptModule):
|
class ModuleThatTimes(torch.jit.ScriptModule):
|
||||||
def forward(self, x):
|
def forward(self, x):
|
||||||
tp_start = torch.jit._logging.time_point()
|
tp_start = torch.jit._logging.time_point()
|
||||||
for i in range(30):
|
for _ in range(30):
|
||||||
x += 1.0
|
x += 1.0
|
||||||
tp_end = torch.jit._logging.time_point()
|
tp_end = torch.jit._logging.time_point()
|
||||||
torch.jit._logging.add_stat_value("mytimer", tp_end - tp_start)
|
torch.jit._logging.add_stat_value("mytimer", tp_end - tp_start)
|
||||||
|
|
@ -80,7 +80,7 @@ class TestLogging(JitTestCase):
|
||||||
@torch.jit.script_method
|
@torch.jit.script_method
|
||||||
def forward(self, x):
|
def forward(self, x):
|
||||||
tp_start = torch.jit._logging.time_point()
|
tp_start = torch.jit._logging.time_point()
|
||||||
for i in range(30):
|
for _ in range(30):
|
||||||
x += 1.0
|
x += 1.0
|
||||||
tp_end = torch.jit._logging.time_point()
|
tp_end = torch.jit._logging.time_point()
|
||||||
torch.jit._logging.add_stat_value("mytimer", tp_end - tp_start)
|
torch.jit._logging.add_stat_value("mytimer", tp_end - tp_start)
|
||||||
|
|
@ -97,7 +97,7 @@ class TestLogging(JitTestCase):
|
||||||
|
|
||||||
def test_counter_aggregation(self):
|
def test_counter_aggregation(self):
|
||||||
def foo(x):
|
def foo(x):
|
||||||
for i in range(3):
|
for _ in range(3):
|
||||||
torch.jit._logging.add_stat_value("foo", 1)
|
torch.jit._logging.add_stat_value("foo", 1)
|
||||||
return x + 1.0
|
return x + 1.0
|
||||||
|
|
||||||
|
|
|
||||||
|
|
@ -518,7 +518,7 @@ class TestMisc(JitTestCase):
|
||||||
ref = fn(x)
|
ref = fn(x)
|
||||||
|
|
||||||
script_fn = torch.jit.script(fn)
|
script_fn = torch.jit.script(fn)
|
||||||
for i in range(4):
|
for _ in range(4):
|
||||||
res = script_fn(x)
|
res = script_fn(x)
|
||||||
|
|
||||||
self.assertEqual(ref, res)
|
self.assertEqual(ref, res)
|
||||||
|
|
|
||||||
|
|
@ -2025,7 +2025,7 @@ class TestTracer(JitTestCase):
|
||||||
module = torch.jit.trace_module(n, inputs)
|
module = torch.jit.trace_module(n, inputs)
|
||||||
|
|
||||||
check_inputs = []
|
check_inputs = []
|
||||||
for i in range(2):
|
for _ in range(2):
|
||||||
check_weight = torch.rand(1, 1, 3, 3)
|
check_weight = torch.rand(1, 1, 3, 3)
|
||||||
check_forward_input = torch.rand(1, 1, 3, 3)
|
check_forward_input = torch.rand(1, 1, 3, 3)
|
||||||
check_inputs.append(
|
check_inputs.append(
|
||||||
|
|
|
||||||
|
|
@ -341,7 +341,7 @@ class TestTypesAndAnnotation(JitTestCase):
|
||||||
self.x = x
|
self.x = x
|
||||||
|
|
||||||
def set(self, val: int):
|
def set(self, val: int):
|
||||||
for i in range(3):
|
for _ in range(3):
|
||||||
self.x: int = val
|
self.x: int = val
|
||||||
|
|
||||||
# Type annotation in __init__, should not fail
|
# Type annotation in __init__, should not fail
|
||||||
|
|
|
||||||
|
|
@ -582,14 +582,14 @@ class TestAutograd(TestCase):
|
||||||
ctx_1 = torch.autograd.graph.saved_tensors_hooks(lambda x: x, unpack)
|
ctx_1 = torch.autograd.graph.saved_tensors_hooks(lambda x: x, unpack)
|
||||||
ctx_2 = torch.autograd.graph.saved_tensors_hooks(lambda x: x, lambda x: x)
|
ctx_2 = torch.autograd.graph.saved_tensors_hooks(lambda x: x, lambda x: x)
|
||||||
|
|
||||||
for i in range(10):
|
for _ in range(10):
|
||||||
with ctx_2:
|
with ctx_2:
|
||||||
ctx_1.__enter__()
|
ctx_1.__enter__()
|
||||||
x = torch.randn(3, 3, requires_grad=True)
|
x = torch.randn(3, 3, requires_grad=True)
|
||||||
x.sin().sum().backward()
|
x.sin().sum().backward()
|
||||||
|
|
||||||
# Clean up
|
# Clean up
|
||||||
for i in range(10):
|
for _ in range(10):
|
||||||
ctx_1.__exit__()
|
ctx_1.__exit__()
|
||||||
|
|
||||||
# Validate there are no more hooks on the stack
|
# Validate there are no more hooks on the stack
|
||||||
|
|
@ -2989,7 +2989,7 @@ class TestAutograd(TestCase):
|
||||||
state = set()
|
state = set()
|
||||||
with torch.enable_grad():
|
with torch.enable_grad():
|
||||||
coro = coro_no_grad(state)
|
coro = coro_no_grad(state)
|
||||||
for i in range(5):
|
for _ in range(5):
|
||||||
next(coro)
|
next(coro)
|
||||||
|
|
||||||
coro.close()
|
coro.close()
|
||||||
|
|
@ -2998,7 +2998,7 @@ class TestAutograd(TestCase):
|
||||||
state = set()
|
state = set()
|
||||||
with torch.no_grad():
|
with torch.no_grad():
|
||||||
coro = coro_enable_grad(state)
|
coro = coro_enable_grad(state)
|
||||||
for i in range(5):
|
for _ in range(5):
|
||||||
next(coro)
|
next(coro)
|
||||||
|
|
||||||
coro.close()
|
coro.close()
|
||||||
|
|
@ -5293,7 +5293,7 @@ Done""",
|
||||||
rnn = torch.nn.LSTM(10, 20, 2)
|
rnn = torch.nn.LSTM(10, 20, 2)
|
||||||
total_time_s = 0
|
total_time_s = 0
|
||||||
with profile(record_shapes=True, use_kineto=kineto_available()) as prof:
|
with profile(record_shapes=True, use_kineto=kineto_available()) as prof:
|
||||||
for i in range(20):
|
for _ in range(20):
|
||||||
input = torch.randn(5, 3, 10)
|
input = torch.randn(5, 3, 10)
|
||||||
h = torch.randn(2, 3, 20)
|
h = torch.randn(2, 3, 20)
|
||||||
c = torch.randn(2, 3, 20)
|
c = torch.randn(2, 3, 20)
|
||||||
|
|
@ -5925,7 +5925,7 @@ Done""",
|
||||||
self.assertTrue(p_a == p_g or p_b == p_g)
|
self.assertTrue(p_a == p_g or p_b == p_g)
|
||||||
|
|
||||||
# Run backwards multiple times to ensure accumulation works.
|
# Run backwards multiple times to ensure accumulation works.
|
||||||
for i in range(10):
|
for _ in range(10):
|
||||||
loss.backward(retain_graph=True)
|
loss.backward(retain_graph=True)
|
||||||
|
|
||||||
# non-contiguous indices and value, we should trigger a copy.
|
# non-contiguous indices and value, we should trigger a copy.
|
||||||
|
|
@ -5943,7 +5943,7 @@ Done""",
|
||||||
self.assertFalse(p_b == p_g)
|
self.assertFalse(p_b == p_g)
|
||||||
|
|
||||||
# Run backwards multiple times to ensure accumulation works.
|
# Run backwards multiple times to ensure accumulation works.
|
||||||
for i in range(10):
|
for _ in range(10):
|
||||||
loss.backward(retain_graph=True)
|
loss.backward(retain_graph=True)
|
||||||
|
|
||||||
def test_gradcheck_single_input(self):
|
def test_gradcheck_single_input(self):
|
||||||
|
|
@ -7132,7 +7132,7 @@ for shape in [(1,), ()]:
|
||||||
)
|
)
|
||||||
|
|
||||||
feat_combined = []
|
feat_combined = []
|
||||||
for r in range(num_inp):
|
for _ in range(num_inp):
|
||||||
data_r = torch.empty(1, nz_inp)
|
data_r = torch.empty(1, nz_inp)
|
||||||
data_r.uniform_()
|
data_r.uniform_()
|
||||||
data_r.requires_grad = True
|
data_r.requires_grad = True
|
||||||
|
|
@ -7202,7 +7202,7 @@ for shape in [(1,), ()]:
|
||||||
self.use_checkpoint = use_checkpoint
|
self.use_checkpoint = use_checkpoint
|
||||||
self.use_reentrant = use_reentrant
|
self.use_reentrant = use_reentrant
|
||||||
self.layers = nn.ModuleList()
|
self.layers = nn.ModuleList()
|
||||||
for i in range(self.n):
|
for _ in range(self.n):
|
||||||
layer = nn.Sequential(
|
layer = nn.Sequential(
|
||||||
nn.Linear(256, 256), nn.Linear(256, 256), nn.Linear(256, 256)
|
nn.Linear(256, 256), nn.Linear(256, 256), nn.Linear(256, 256)
|
||||||
)
|
)
|
||||||
|
|
@ -7513,7 +7513,7 @@ for shape in [(1,), ()]:
|
||||||
|
|
||||||
feat_combined = []
|
feat_combined = []
|
||||||
feat_combined_no_checkpoint = []
|
feat_combined_no_checkpoint = []
|
||||||
for r in range(num_inp):
|
for _ in range(num_inp):
|
||||||
data_r = torch.empty(1, nz_inp)
|
data_r = torch.empty(1, nz_inp)
|
||||||
data_r.uniform_()
|
data_r.uniform_()
|
||||||
data_r.requires_grad = input_requires_grad
|
data_r.requires_grad = input_requires_grad
|
||||||
|
|
@ -11714,7 +11714,7 @@ class TestAutogradDeviceType(TestCase):
|
||||||
def test_parameter_resize(self, device):
|
def test_parameter_resize(self, device):
|
||||||
asd = torch.nn.Parameter(torch.ones(16, dtype=torch.double, device=device))
|
asd = torch.nn.Parameter(torch.ones(16, dtype=torch.double, device=device))
|
||||||
|
|
||||||
for i in range(2):
|
for _ in range(2):
|
||||||
with torch.no_grad():
|
with torch.no_grad():
|
||||||
asd.set_(asd[1:])
|
asd.set_(asd[1:])
|
||||||
asd.grad = None
|
asd.grad = None
|
||||||
|
|
@ -11942,7 +11942,7 @@ class TestAutogradDeviceType(TestCase):
|
||||||
|
|
||||||
# Child gpu graph (much longer than parent graph).
|
# Child gpu graph (much longer than parent graph).
|
||||||
prev = t2 * t2
|
prev = t2 * t2
|
||||||
for i in range(10):
|
for _ in range(10):
|
||||||
prev = prev * t2
|
prev = prev * t2
|
||||||
reentrant_root = prev
|
reentrant_root = prev
|
||||||
|
|
||||||
|
|
|
||||||
|
|
@ -4784,7 +4784,7 @@ print(torch.cuda.get_allocator_backend())
|
||||||
total -= x.numel()
|
total -= x.numel()
|
||||||
|
|
||||||
choices = [alloc, free, torch.cuda.memory.empty_cache]
|
choices = [alloc, free, torch.cuda.memory.empty_cache]
|
||||||
for i in range(N):
|
for _ in range(N):
|
||||||
while total >= 1024 * 1024 * 1024 / (4 * 10):
|
while total >= 1024 * 1024 * 1024 / (4 * 10):
|
||||||
free()
|
free()
|
||||||
(action,) = random.choices(choices, weights=[1, 1 if mem else 0, 0.1])
|
(action,) = random.choices(choices, weights=[1, 1 if mem else 0, 0.1])
|
||||||
|
|
|
||||||
|
|
@ -512,7 +512,7 @@ class FakeTensorTest(TestCase):
|
||||||
def test_upsample_bilinear_small_channels(self):
|
def test_upsample_bilinear_small_channels(self):
|
||||||
out = []
|
out = []
|
||||||
mode = FakeTensorMode()
|
mode = FakeTensorMode()
|
||||||
for i, context in enumerate([contextlib.nullcontext, lambda: mode]):
|
for context in [contextlib.nullcontext, lambda: mode]:
|
||||||
with context():
|
with context():
|
||||||
arg0_1 = torch.empty_strided(
|
arg0_1 = torch.empty_strided(
|
||||||
(3, 427, 640), (1, 1920, 3), dtype=torch.float32, device="cuda"
|
(3, 427, 640), (1, 1920, 3), dtype=torch.float32, device="cuda"
|
||||||
|
|
|
||||||
|
|
@ -318,13 +318,11 @@ class TestForeach(TestCase):
|
||||||
return arg
|
return arg
|
||||||
|
|
||||||
scalar_self_arg_test_complete = False
|
scalar_self_arg_test_complete = False
|
||||||
for i, sample in enumerate(
|
for sample in op.sample_inputs(
|
||||||
op.sample_inputs(
|
device,
|
||||||
device,
|
dtype,
|
||||||
dtype,
|
noncontiguous=not is_fastpath,
|
||||||
noncontiguous=not is_fastpath,
|
allow_higher_dtype_scalars=True,
|
||||||
allow_higher_dtype_scalars=True,
|
|
||||||
)
|
|
||||||
):
|
):
|
||||||
(rhs_arg,) = sample.args
|
(rhs_arg,) = sample.args
|
||||||
kwargs = {} or sample.kwargs
|
kwargs = {} or sample.kwargs
|
||||||
|
|
|
||||||
|
|
@ -156,7 +156,7 @@ class TestIndexing(TestCase):
|
||||||
torch.DoubleTensor if not device.startswith("mps") else torch.FloatTensor
|
torch.DoubleTensor if not device.startswith("mps") else torch.FloatTensor
|
||||||
)
|
)
|
||||||
tensor = _make_tensor(lst).to(device)
|
tensor = _make_tensor(lst).to(device)
|
||||||
for _i in range(100):
|
for _ in range(100):
|
||||||
idx1_start = random.randrange(10)
|
idx1_start = random.randrange(10)
|
||||||
idx1_end = idx1_start + random.randrange(1, 10 - idx1_start + 1)
|
idx1_end = idx1_start + random.randrange(1, 10 - idx1_start + 1)
|
||||||
idx1_step = random.randrange(1, 8)
|
idx1_step = random.randrange(1, 8)
|
||||||
|
|
|
||||||
|
|
@ -2337,9 +2337,9 @@ graph(%Ra, %Rb):
|
||||||
print("stays")
|
print("stays")
|
||||||
while False:
|
while False:
|
||||||
print("removed")
|
print("removed")
|
||||||
for _i in range(0):
|
for _ in range(0):
|
||||||
print("removed")
|
print("removed")
|
||||||
for _i in range(-4):
|
for _ in range(-4):
|
||||||
print("removed")
|
print("removed")
|
||||||
return b
|
return b
|
||||||
|
|
||||||
|
|
@ -3138,7 +3138,7 @@ class TestScript(JitTestCase):
|
||||||
with enable_profiling_mode_for_profiling_tests():
|
with enable_profiling_mode_for_profiling_tests():
|
||||||
|
|
||||||
def fct_loop(x):
|
def fct_loop(x):
|
||||||
for i in range(3):
|
for _ in range(3):
|
||||||
x = torch.cat((x, x), 0)
|
x = torch.cat((x, x), 0)
|
||||||
return x
|
return x
|
||||||
|
|
||||||
|
|
@ -3245,7 +3245,7 @@ class TestScript(JitTestCase):
|
||||||
def test_nested_bailouts(self):
|
def test_nested_bailouts(self):
|
||||||
@torch.jit.script
|
@torch.jit.script
|
||||||
def fct_loop(x):
|
def fct_loop(x):
|
||||||
for i in range(3):
|
for _ in range(3):
|
||||||
x = torch.cat((x, x), 0)
|
x = torch.cat((x, x), 0)
|
||||||
return x
|
return x
|
||||||
|
|
||||||
|
|
@ -3907,7 +3907,7 @@ def foo(x):
|
||||||
else:
|
else:
|
||||||
return f'v{idx - len(exprs)}'
|
return f'v{idx - len(exprs)}'
|
||||||
|
|
||||||
for i in range(50):
|
for _ in range(50):
|
||||||
n = None
|
n = None
|
||||||
while n is None or n > len(exprs) + n_variables:
|
while n is None or n > len(exprs) + n_variables:
|
||||||
template = random.choice(templates)
|
template = random.choice(templates)
|
||||||
|
|
@ -3922,7 +3922,7 @@ def foo(x):
|
||||||
src_lines.append(' return ({})\n'.format(''.join(f'v{i},' for i in range(n_variables))))
|
src_lines.append(' return ({})\n'.format(''.join(f'v{i},' for i in range(n_variables))))
|
||||||
return '\n'.join(src_lines)
|
return '\n'.join(src_lines)
|
||||||
|
|
||||||
for i in range(100):
|
for _ in range(100):
|
||||||
g = {'torch': torch}
|
g = {'torch': torch}
|
||||||
code = gen_code()
|
code = gen_code()
|
||||||
builtins.exec(code, g, None)
|
builtins.exec(code, g, None)
|
||||||
|
|
@ -4602,7 +4602,7 @@ def foo(xyz):
|
||||||
y = torch.randn(3, 3, requires_grad=True)
|
y = torch.randn(3, 3, requires_grad=True)
|
||||||
|
|
||||||
def grad_in_loop(x, y):
|
def grad_in_loop(x, y):
|
||||||
for i in range(100):
|
for _ in range(100):
|
||||||
x = y @ x
|
x = y @ x
|
||||||
return x
|
return x
|
||||||
|
|
||||||
|
|
@ -5559,7 +5559,7 @@ a")
|
||||||
@torch.jit.script
|
@torch.jit.script
|
||||||
def test(x):
|
def test(x):
|
||||||
after_resize_alias = torch.zeros([2])
|
after_resize_alias = torch.zeros([2])
|
||||||
for _i in range(5):
|
for _ in range(5):
|
||||||
b = x + 1
|
b = x + 1
|
||||||
f = [1]
|
f = [1]
|
||||||
before_resize_alias = b.sub_(1)
|
before_resize_alias = b.sub_(1)
|
||||||
|
|
@ -5950,7 +5950,7 @@ a")
|
||||||
# type: (int) -> int
|
# type: (int) -> int
|
||||||
prev = 1
|
prev = 1
|
||||||
v = 1
|
v = 1
|
||||||
for i in range(x):
|
for _ in range(x):
|
||||||
save = v
|
save = v
|
||||||
v = v + prev
|
v = v + prev
|
||||||
prev = save
|
prev = save
|
||||||
|
|
@ -7785,7 +7785,7 @@ dedent """
|
||||||
while int(tensor.add_(1)) < 4:
|
while int(tensor.add_(1)) < 4:
|
||||||
if y == 1:
|
if y == 1:
|
||||||
continue
|
continue
|
||||||
for i in range(y):
|
for _ in range(y):
|
||||||
continue
|
continue
|
||||||
ret += 1
|
ret += 1
|
||||||
ret += 1
|
ret += 1
|
||||||
|
|
@ -7896,7 +7896,7 @@ dedent """
|
||||||
def assign_after_break_nested(y):
|
def assign_after_break_nested(y):
|
||||||
# type: (int)
|
# type: (int)
|
||||||
x = 0
|
x = 0
|
||||||
for i in range(y):
|
for _ in range(y):
|
||||||
if y == 1:
|
if y == 1:
|
||||||
x = 5
|
x = 5
|
||||||
break
|
break
|
||||||
|
|
@ -7916,7 +7916,7 @@ dedent """
|
||||||
def may_break(y):
|
def may_break(y):
|
||||||
# type: (int)
|
# type: (int)
|
||||||
x = 0
|
x = 0
|
||||||
for i in range(y):
|
for _ in range(y):
|
||||||
if y == 1:
|
if y == 1:
|
||||||
x = 5
|
x = 5
|
||||||
else:
|
else:
|
||||||
|
|
@ -7988,7 +7988,7 @@ dedent """
|
||||||
def test_varexit(cond):
|
def test_varexit(cond):
|
||||||
# type: (int)
|
# type: (int)
|
||||||
m = 0
|
m = 0
|
||||||
for i in range(3):
|
for _ in range(3):
|
||||||
if cond == 2:
|
if cond == 2:
|
||||||
if cond == 2:
|
if cond == 2:
|
||||||
m = 2
|
m = 2
|
||||||
|
|
@ -8376,7 +8376,7 @@ dedent """
|
||||||
# find the last output, then all subsequent uses
|
# find the last output, then all subsequent uses
|
||||||
fc.check(out_name[-1] + " : ")
|
fc.check(out_name[-1] + " : ")
|
||||||
# skip past node body
|
# skip past node body
|
||||||
for i in range(contained_blocks(node)):
|
for _ in range(contained_blocks(node)):
|
||||||
fc.check("->")
|
fc.check("->")
|
||||||
if (node.kind() == "prim::If"):
|
if (node.kind() == "prim::If"):
|
||||||
fc.check("->").check("->").check("\n")
|
fc.check("->").check("->").check("\n")
|
||||||
|
|
@ -8429,7 +8429,7 @@ dedent """
|
||||||
a = 1
|
a = 1
|
||||||
b = 2
|
b = 2
|
||||||
c = 3
|
c = 3
|
||||||
for i in range(iter):
|
for _ in range(iter):
|
||||||
a = 4
|
a = 4
|
||||||
b = 5
|
b = 5
|
||||||
c = 6
|
c = 6
|
||||||
|
|
@ -8445,7 +8445,7 @@ dedent """
|
||||||
a = 1
|
a = 1
|
||||||
b = 2
|
b = 2
|
||||||
c = 3
|
c = 3
|
||||||
for i in range(iter):
|
for _ in range(iter):
|
||||||
c = c + 1
|
c = c + 1
|
||||||
b = b + 1
|
b = b + 1
|
||||||
a = a + 1
|
a = a + 1
|
||||||
|
|
@ -10938,7 +10938,7 @@ dedent """
|
||||||
|
|
||||||
# Test symbolic differentiation
|
# Test symbolic differentiation
|
||||||
# Run Forward and Backward thrice to trigger autodiff graph
|
# Run Forward and Backward thrice to trigger autodiff graph
|
||||||
for i in range(3):
|
for _ in range(3):
|
||||||
y = jit_module(x)
|
y = jit_module(x)
|
||||||
y.backward(grad)
|
y.backward(grad)
|
||||||
x.grad.zero_()
|
x.grad.zero_()
|
||||||
|
|
@ -11030,7 +11030,7 @@ dedent """
|
||||||
W.data /= 4
|
W.data /= 4
|
||||||
|
|
||||||
with enable_profiling_mode_for_profiling_tests():
|
with enable_profiling_mode_for_profiling_tests():
|
||||||
for i in range(4):
|
for _ in range(4):
|
||||||
self.assertTrue((foo(x, y, W).grad_fn is None) == (jitted_foo(x, y, W).grad_fn is None))
|
self.assertTrue((foo(x, y, W).grad_fn is None) == (jitted_foo(x, y, W).grad_fn is None))
|
||||||
|
|
||||||
|
|
||||||
|
|
@ -11822,7 +11822,7 @@ dedent """
|
||||||
def test_for_in_tensors(self):
|
def test_for_in_tensors(self):
|
||||||
def test_sizes(x):
|
def test_sizes(x):
|
||||||
sumz = 0
|
sumz = 0
|
||||||
for s in x:
|
for _ in x:
|
||||||
sumz += 1
|
sumz += 1
|
||||||
return sumz
|
return sumz
|
||||||
self.checkScript(test_sizes, (torch.rand(5, 4, 3, 2, 1),))
|
self.checkScript(test_sizes, (torch.rand(5, 4, 3, 2, 1),))
|
||||||
|
|
@ -11834,7 +11834,7 @@ dedent """
|
||||||
@torch.jit.script
|
@torch.jit.script
|
||||||
def test_sizes(x):
|
def test_sizes(x):
|
||||||
sumz = 0
|
sumz = 0
|
||||||
for s in x:
|
for _ in x:
|
||||||
sumz += 1
|
sumz += 1
|
||||||
return sumz
|
return sumz
|
||||||
|
|
||||||
|
|
@ -11846,7 +11846,7 @@ dedent """
|
||||||
def test_sizes(x):
|
def test_sizes(x):
|
||||||
# type: (float) -> int
|
# type: (float) -> int
|
||||||
sumz = 0
|
sumz = 0
|
||||||
for s in x:
|
for _ in x:
|
||||||
sumz += 1
|
sumz += 1
|
||||||
return sumz
|
return sumz
|
||||||
|
|
||||||
|
|
@ -11856,7 +11856,7 @@ dedent """
|
||||||
def test_sizes(x):
|
def test_sizes(x):
|
||||||
sumz = 0
|
sumz = 0
|
||||||
for n in x:
|
for n in x:
|
||||||
for t in n:
|
for _ in n:
|
||||||
sumz += 1
|
sumz += 1
|
||||||
return sumz
|
return sumz
|
||||||
|
|
||||||
|
|
@ -12316,7 +12316,7 @@ dedent """
|
||||||
|
|
||||||
@torch.jit.script_method
|
@torch.jit.script_method
|
||||||
def forward(self, x):
|
def forward(self, x):
|
||||||
for _i in range(4):
|
for _ in range(4):
|
||||||
x += self.param
|
x += self.param
|
||||||
return x
|
return x
|
||||||
|
|
||||||
|
|
@ -12840,7 +12840,7 @@ dedent """
|
||||||
|
|
||||||
# Load from filename
|
# Load from filename
|
||||||
tracemalloc.start()
|
tracemalloc.start()
|
||||||
for i in range(num_iters):
|
for _ in range(num_iters):
|
||||||
torch._C.PyTorchFileReader(filename)
|
torch._C.PyTorchFileReader(filename)
|
||||||
_, peak_from_string = tracemalloc.get_traced_memory()
|
_, peak_from_string = tracemalloc.get_traced_memory()
|
||||||
tracemalloc.stop()
|
tracemalloc.stop()
|
||||||
|
|
@ -12848,7 +12848,7 @@ dedent """
|
||||||
# Load from stream
|
# Load from stream
|
||||||
tracemalloc.start()
|
tracemalloc.start()
|
||||||
with open(filename, 'rb') as f:
|
with open(filename, 'rb') as f:
|
||||||
for i in range(num_iters):
|
for _ in range(num_iters):
|
||||||
f.seek(0)
|
f.seek(0)
|
||||||
torch._C.PyTorchFileReader(f)
|
torch._C.PyTorchFileReader(f)
|
||||||
_, peak_from_file = tracemalloc.get_traced_memory()
|
_, peak_from_file = tracemalloc.get_traced_memory()
|
||||||
|
|
@ -13287,7 +13287,7 @@ dedent """
|
||||||
def test_pass(self):
|
def test_pass(self):
|
||||||
def foo(x):
|
def foo(x):
|
||||||
# type: (bool) -> int
|
# type: (bool) -> int
|
||||||
for _i in range(3):
|
for _ in range(3):
|
||||||
pass
|
pass
|
||||||
if x:
|
if x:
|
||||||
pass
|
pass
|
||||||
|
|
@ -13903,7 +13903,7 @@ dedent """
|
||||||
def test_loop_no_escape(x):
|
def test_loop_no_escape(x):
|
||||||
# type: (int)
|
# type: (int)
|
||||||
if x >= 0:
|
if x >= 0:
|
||||||
for i in range(x):
|
for _ in range(x):
|
||||||
raise RuntimeError("hi")
|
raise RuntimeError("hi")
|
||||||
else:
|
else:
|
||||||
return 5
|
return 5
|
||||||
|
|
@ -14116,7 +14116,7 @@ dedent """
|
||||||
|
|
||||||
def test_will_ret(y):
|
def test_will_ret(y):
|
||||||
# type: (int) -> int
|
# type: (int) -> int
|
||||||
for i in range(y):
|
for _ in range(y):
|
||||||
return 2
|
return 2
|
||||||
return 1
|
return 1
|
||||||
|
|
||||||
|
|
@ -14125,8 +14125,8 @@ dedent """
|
||||||
|
|
||||||
def test_loop_nest_ret(y):
|
def test_loop_nest_ret(y):
|
||||||
# type: (int) -> int
|
# type: (int) -> int
|
||||||
for i in range(y):
|
for _ in range(y):
|
||||||
for i in range(y - 2):
|
for _ in range(y - 2):
|
||||||
return 10
|
return 10
|
||||||
return 5
|
return 5
|
||||||
return 0
|
return 0
|
||||||
|
|
@ -15387,7 +15387,7 @@ dedent """
|
||||||
if isinstance(item, list):
|
if isinstance(item, list):
|
||||||
return is_tensor_value(item[0])
|
return is_tensor_value(item[0])
|
||||||
return False
|
return False
|
||||||
for name, value, the_type in self.get_pickle_values():
|
for name, value, _the_type in self.get_pickle_values():
|
||||||
if is_tensor_value(value):
|
if is_tensor_value(value):
|
||||||
continue
|
continue
|
||||||
self.assertEqual(value, getattr(loaded, "_" + name))
|
self.assertEqual(value, getattr(loaded, "_" + name))
|
||||||
|
|
@ -15768,7 +15768,7 @@ dedent """
|
||||||
def test_for_else(self):
|
def test_for_else(self):
|
||||||
def fn():
|
def fn():
|
||||||
c = 0
|
c = 0
|
||||||
for i in range(4):
|
for _ in range(4):
|
||||||
c += 10
|
c += 10
|
||||||
else:
|
else:
|
||||||
print("In else block of for...else")
|
print("In else block of for...else")
|
||||||
|
|
|
||||||
|
|
@ -115,7 +115,7 @@ class TestPythonJiterator(TestCase):
|
||||||
@parametrize("num_inputs", [1, 5, 8])
|
@parametrize("num_inputs", [1, 5, 8])
|
||||||
def test_various_num_inputs(self, num_inputs):
|
def test_various_num_inputs(self, num_inputs):
|
||||||
inputs = []
|
inputs = []
|
||||||
for i in range(num_inputs):
|
for _ in range(num_inputs):
|
||||||
inputs.append(torch.rand(3, device='cuda').mul(10))
|
inputs.append(torch.rand(3, device='cuda').mul(10))
|
||||||
|
|
||||||
input_string = ",".join([f"T i{i}" for i in range(num_inputs)])
|
input_string = ",".join([f"T i{i}" for i in range(num_inputs)])
|
||||||
|
|
|
||||||
|
|
@ -58,7 +58,7 @@ class SubProcess(mp.Process):
|
||||||
|
|
||||||
|
|
||||||
def _test_cuda_ipc_deadlock_actor(queue, iterations):
|
def _test_cuda_ipc_deadlock_actor(queue, iterations):
|
||||||
for i in range(iterations):
|
for _ in range(iterations):
|
||||||
if not queue.empty():
|
if not queue.empty():
|
||||||
queue.get()
|
queue.get()
|
||||||
time.sleep(0.01)
|
time.sleep(0.01)
|
||||||
|
|
@ -66,7 +66,7 @@ def _test_cuda_ipc_deadlock_actor(queue, iterations):
|
||||||
|
|
||||||
def _test_cuda_ipc_deadlock_learner(queue, iterations):
|
def _test_cuda_ipc_deadlock_learner(queue, iterations):
|
||||||
net = torch.nn.LSTM(1, 1).cuda()
|
net = torch.nn.LSTM(1, 1).cuda()
|
||||||
for i in range(iterations):
|
for _ in range(iterations):
|
||||||
if not queue.full():
|
if not queue.full():
|
||||||
queue.put(copy.deepcopy(net.state_dict()))
|
queue.put(copy.deepcopy(net.state_dict()))
|
||||||
time.sleep(0.01)
|
time.sleep(0.01)
|
||||||
|
|
@ -138,7 +138,7 @@ def send_tensor_with_untyped_storage(queue, event):
|
||||||
|
|
||||||
def receive_and_send_sum(queue, out_queue, event, device, dtype, count, size=5):
|
def receive_and_send_sum(queue, out_queue, event, device, dtype, count, size=5):
|
||||||
s = torch.full([size], 0, device=device, dtype=dtype)
|
s = torch.full([size], 0, device=device, dtype=dtype)
|
||||||
for i in range(count):
|
for _ in range(count):
|
||||||
t = queue.get()
|
t = queue.get()
|
||||||
s += t
|
s += t
|
||||||
out_queue.put(s)
|
out_queue.put(s)
|
||||||
|
|
@ -146,7 +146,7 @@ def receive_and_send_sum(queue, out_queue, event, device, dtype, count, size=5):
|
||||||
|
|
||||||
|
|
||||||
def receive_and_send(queue, out_queue, event, count):
|
def receive_and_send(queue, out_queue, event, count):
|
||||||
for i in range(count):
|
for _ in range(count):
|
||||||
t = queue.get()
|
t = queue.get()
|
||||||
out_queue.put(t.clone())
|
out_queue.put(t.clone())
|
||||||
event.wait()
|
event.wait()
|
||||||
|
|
|
||||||
|
|
@ -1238,7 +1238,7 @@ tensor(..., device='meta', size=(1,), requires_grad=True)""")
|
||||||
|
|
||||||
def check():
|
def check():
|
||||||
self.assertEqual(len(parameter_dict), len(parameters))
|
self.assertEqual(len(parameter_dict), len(parameters))
|
||||||
for i, (k1, (k2, m2)) in enumerate(zip(parameters, parameter_dict.named_parameters())):
|
for (k1, (k2, m2)) in zip(parameters, parameter_dict.named_parameters()):
|
||||||
self.assertEqual(k1, k2)
|
self.assertEqual(k1, k2)
|
||||||
self.assertIs(parameters[k1], m2)
|
self.assertIs(parameters[k1], m2)
|
||||||
for k1, k2 in zip(parameters, parameter_dict):
|
for k1, k2 in zip(parameters, parameter_dict):
|
||||||
|
|
@ -2958,7 +2958,7 @@ tensor(..., device='meta', size=(1,), requires_grad=True)""")
|
||||||
batch_first=batch_first)
|
batch_first=batch_first)
|
||||||
|
|
||||||
# set constant weights of the model
|
# set constant weights of the model
|
||||||
for idx, p in enumerate(model.parameters()):
|
for p in model.parameters():
|
||||||
x = p.data
|
x = p.data
|
||||||
sz = x.view(-1).size(0)
|
sz = x.view(-1).size(0)
|
||||||
shape = x.shape
|
shape = x.shape
|
||||||
|
|
@ -3108,7 +3108,7 @@ tensor(..., device='meta', size=(1,), requires_grad=True)""")
|
||||||
activation, batch_first=batch_first)
|
activation, batch_first=batch_first)
|
||||||
|
|
||||||
# set constant weights of the model
|
# set constant weights of the model
|
||||||
for idx, p in enumerate(model.parameters()):
|
for p in model.parameters():
|
||||||
x = p.data
|
x = p.data
|
||||||
sz = x.view(-1).size(0)
|
sz = x.view(-1).size(0)
|
||||||
shape = x.shape
|
shape = x.shape
|
||||||
|
|
@ -3185,7 +3185,7 @@ tensor(..., device='meta', size=(1,), requires_grad=True)""")
|
||||||
|
|
||||||
with torch.no_grad():
|
with torch.no_grad():
|
||||||
# set constant weights of the model
|
# set constant weights of the model
|
||||||
for idx, p in enumerate(layer.parameters()):
|
for p in layer.parameters():
|
||||||
x = p.data
|
x = p.data
|
||||||
sz = x.view(-1).size(0)
|
sz = x.view(-1).size(0)
|
||||||
shape = x.shape
|
shape = x.shape
|
||||||
|
|
@ -13129,7 +13129,7 @@ if __name__ == '__main__':
|
||||||
model = model.eval()
|
model = model.eval()
|
||||||
|
|
||||||
# set constant weights of the model
|
# set constant weights of the model
|
||||||
for idx, p in enumerate(model.parameters()):
|
for p in model.parameters():
|
||||||
x = p.data
|
x = p.data
|
||||||
sz = x.view(-1).size(0)
|
sz = x.view(-1).size(0)
|
||||||
shape = x.shape
|
shape = x.shape
|
||||||
|
|
@ -13349,7 +13349,7 @@ if __name__ == '__main__':
|
||||||
model = model.eval()
|
model = model.eval()
|
||||||
|
|
||||||
# set constant weights of the model
|
# set constant weights of the model
|
||||||
for idx, p in enumerate(model.parameters()):
|
for p in model.parameters():
|
||||||
x = p.data
|
x = p.data
|
||||||
sz = x.view(-1).size(0)
|
sz = x.view(-1).size(0)
|
||||||
shape = x.shape
|
shape = x.shape
|
||||||
|
|
|
||||||
|
|
@ -1483,7 +1483,7 @@ class TestSparse(TestSparseBase):
|
||||||
def test_shape(num_mats, dim_i, dim_j, dim_k, nnz):
|
def test_shape(num_mats, dim_i, dim_j, dim_k, nnz):
|
||||||
a_list = []
|
a_list = []
|
||||||
b_list = []
|
b_list = []
|
||||||
for mat_idx in range(num_mats):
|
for _ in range(num_mats):
|
||||||
a_list.append(self._gen_sparse(2, nnz, [dim_i, dim_j], dtype, device, coalesced)[0])
|
a_list.append(self._gen_sparse(2, nnz, [dim_i, dim_j], dtype, device, coalesced)[0])
|
||||||
b_list.append(torch.randn([dim_j, dim_k], dtype=dtype, device=device))
|
b_list.append(torch.randn([dim_j, dim_k], dtype=dtype, device=device))
|
||||||
|
|
||||||
|
|
|
||||||
|
|
@ -4251,9 +4251,9 @@ class TestSparseCompressedTritonKernels(TestCase):
|
||||||
# Test warn_once when requesting non-existing tuned parameters multiple times
|
# Test warn_once when requesting non-existing tuned parameters multiple times
|
||||||
f = io.StringIO()
|
f = io.StringIO()
|
||||||
with redirect_stderr(f):
|
with redirect_stderr(f):
|
||||||
for i in range(5):
|
for _ in range(5):
|
||||||
get_meta(16, 16, 16)
|
get_meta(16, 16, 16)
|
||||||
for i in range(5):
|
for _ in range(5):
|
||||||
get_meta(16, 16, 32)
|
get_meta(16, 16, 32)
|
||||||
|
|
||||||
msg = f.getvalue()
|
msg = f.getvalue()
|
||||||
|
|
|
||||||
|
|
@ -139,7 +139,7 @@ def fork_wait_graph_exception(input1, input2):
|
||||||
|
|
||||||
def loop_graph(a, b, iters: int):
|
def loop_graph(a, b, iters: int):
|
||||||
c = a + b * 2
|
c = a + b * 2
|
||||||
for i in range(iters):
|
for _ in range(iters):
|
||||||
c = c + b
|
c = c + b
|
||||||
c *= 2
|
c *= 2
|
||||||
c -= a
|
c -= a
|
||||||
|
|
|
||||||
|
|
@ -81,7 +81,7 @@ def _generate_input(shape, dtype, device, with_extremal):
|
||||||
# TODO: replace with make_tensor
|
# TODO: replace with make_tensor
|
||||||
def _rand_shape(dim, min_size, max_size):
|
def _rand_shape(dim, min_size, max_size):
|
||||||
shape = []
|
shape = []
|
||||||
for i in range(dim):
|
for _ in range(dim):
|
||||||
shape.append(random.randint(min_size, max_size))
|
shape.append(random.randint(min_size, max_size))
|
||||||
return tuple(shape)
|
return tuple(shape)
|
||||||
|
|
||||||
|
|
@ -942,7 +942,7 @@ class TestTensorCreation(TestCase):
|
||||||
num_tensors = random.randint(1, 5)
|
num_tensors = random.randint(1, 5)
|
||||||
torch_input = []
|
torch_input = []
|
||||||
# Create tensors with shape being different along one axis only
|
# Create tensors with shape being different along one axis only
|
||||||
for param in range(num_tensors):
|
for _ in range(num_tensors):
|
||||||
shape[i] = random.randint(1, 5)
|
shape[i] = random.randint(1, 5)
|
||||||
torch_input.append(_generate_input(tuple(shape), dtype, device, with_extremal=False))
|
torch_input.append(_generate_input(tuple(shape), dtype, device, with_extremal=False))
|
||||||
|
|
||||||
|
|
@ -997,7 +997,7 @@ class TestTensorCreation(TestCase):
|
||||||
ops = ((torch.vstack, np.vstack), (torch.row_stack, np.vstack))
|
ops = ((torch.vstack, np.vstack), (torch.row_stack, np.vstack))
|
||||||
for torch_op, np_op in ops:
|
for torch_op, np_op in ops:
|
||||||
self._test_special_stacks(0, 2, torch_op, np_op, device, dtype)
|
self._test_special_stacks(0, 2, torch_op, np_op, device, dtype)
|
||||||
for i in range(5):
|
for _ in range(5):
|
||||||
# Test dimension change for 1D tensor of size (N) and 2D tensor of size (1, N)
|
# Test dimension change for 1D tensor of size (N) and 2D tensor of size (1, N)
|
||||||
n = random.randint(1, 10)
|
n = random.randint(1, 10)
|
||||||
input_a = _generate_input((n,), dtype, device, with_extremal=False)
|
input_a = _generate_input((n,), dtype, device, with_extremal=False)
|
||||||
|
|
@ -1012,7 +1012,7 @@ class TestTensorCreation(TestCase):
|
||||||
@dtypes(*all_types_and_complex_and(torch.half))
|
@dtypes(*all_types_and_complex_and(torch.half))
|
||||||
def test_dstack(self, device, dtype):
|
def test_dstack(self, device, dtype):
|
||||||
self._test_special_stacks(2, 3, torch.dstack, np.dstack, device, dtype)
|
self._test_special_stacks(2, 3, torch.dstack, np.dstack, device, dtype)
|
||||||
for i in range(5):
|
for _ in range(5):
|
||||||
# Test dimension change for 1D tensor of size (N), 2D tensor of size (1, N), and 3D tensor of size (1, N, 1)
|
# Test dimension change for 1D tensor of size (N), 2D tensor of size (1, N), and 3D tensor of size (1, N, 1)
|
||||||
n = random.randint(1, 10)
|
n = random.randint(1, 10)
|
||||||
input_a = _generate_input((n,), dtype, device, with_extremal=False)
|
input_a = _generate_input((n,), dtype, device, with_extremal=False)
|
||||||
|
|
@ -2885,7 +2885,7 @@ class TestTensorCreation(TestCase):
|
||||||
@dtypesIfCUDA(torch.float, torch.double, torch.bfloat16, torch.half, torch.long)
|
@dtypesIfCUDA(torch.float, torch.double, torch.bfloat16, torch.half, torch.long)
|
||||||
@dtypes(torch.float, torch.double, torch.long, torch.bfloat16, torch.float16)
|
@dtypes(torch.float, torch.double, torch.long, torch.bfloat16, torch.float16)
|
||||||
def test_kaiser_window(self, device, dtype):
|
def test_kaiser_window(self, device, dtype):
|
||||||
for num_test in range(50):
|
for _ in range(50):
|
||||||
self._test_signal_window_functions('kaiser', dtype, device, beta=random.random() * 30)
|
self._test_signal_window_functions('kaiser', dtype, device, beta=random.random() * 30)
|
||||||
|
|
||||||
def _test_signal_windows_functions(self, name, dtype, device, **kwargs):
|
def _test_signal_windows_functions(self, name, dtype, device, **kwargs):
|
||||||
|
|
@ -2918,7 +2918,7 @@ class TestTensorCreation(TestCase):
|
||||||
@unittest.skipIf(not TEST_SCIPY, "Scipy not found")
|
@unittest.skipIf(not TEST_SCIPY, "Scipy not found")
|
||||||
@dtypes(torch.float, torch.double)
|
@dtypes(torch.float, torch.double)
|
||||||
def test_kaiser(self, device, dtype):
|
def test_kaiser(self, device, dtype):
|
||||||
for num_test in range(50):
|
for _ in range(50):
|
||||||
self._test_signal_windows_functions('kaiser', dtype, device, beta=random.random() * 30)
|
self._test_signal_windows_functions('kaiser', dtype, device, beta=random.random() * 30)
|
||||||
|
|
||||||
def test_tensor_factories_empty(self, device):
|
def test_tensor_factories_empty(self, device):
|
||||||
|
|
|
||||||
|
|
@ -1216,7 +1216,7 @@ class TestTensorExprFuser(BaseTestClass):
|
||||||
@torch.jit.script
|
@torch.jit.script
|
||||||
def test(x: torch.Tensor, y: torch.Tensor, z: int) -> torch.Tensor:
|
def test(x: torch.Tensor, y: torch.Tensor, z: int) -> torch.Tensor:
|
||||||
b = y
|
b = y
|
||||||
for i in range(z):
|
for _ in range(z):
|
||||||
a = x + y
|
a = x + y
|
||||||
b = b + y
|
b = b + y
|
||||||
return b
|
return b
|
||||||
|
|
|
||||||
|
|
@ -303,7 +303,7 @@ class TestTransformers(NNTestCase):
|
||||||
encoder = nn.TransformerEncoder(layer, 2).to(device)
|
encoder = nn.TransformerEncoder(layer, 2).to(device)
|
||||||
optimizer = optim.SGD(encoder.parameters(), lr=0.1, momentum=0.9)
|
optimizer = optim.SGD(encoder.parameters(), lr=0.1, momentum=0.9)
|
||||||
encoder.train()
|
encoder.train()
|
||||||
for i in range(iters):
|
for _ in range(iters):
|
||||||
encoder.train()
|
encoder.train()
|
||||||
optimizer.zero_grad()
|
optimizer.zero_grad()
|
||||||
inputs = torch.cat([torch.randn(1, 2, 2), torch.zeros(1, 2, 2)], dim=1).to(device)
|
inputs = torch.cat([torch.randn(1, 2, 2), torch.zeros(1, 2, 2)], dim=1).to(device)
|
||||||
|
|
@ -537,7 +537,7 @@ class TestTransformers(NNTestCase):
|
||||||
|
|
||||||
with torch.no_grad():
|
with torch.no_grad():
|
||||||
# set constant weights of the model
|
# set constant weights of the model
|
||||||
for idx, p in enumerate(model.parameters()):
|
for p in model.parameters():
|
||||||
x = p.data
|
x = p.data
|
||||||
sz = x.view(-1).size(0)
|
sz = x.view(-1).size(0)
|
||||||
shape = x.shape
|
shape = x.shape
|
||||||
|
|
@ -587,7 +587,7 @@ class TestTransformers(NNTestCase):
|
||||||
|
|
||||||
with torch.no_grad():
|
with torch.no_grad():
|
||||||
# set constant weights of the model
|
# set constant weights of the model
|
||||||
for idx, p in enumerate(layer.parameters()):
|
for p in layer.parameters():
|
||||||
x = p.data
|
x = p.data
|
||||||
sz = x.view(-1).size(0)
|
sz = x.view(-1).size(0)
|
||||||
shape = x.shape
|
shape = x.shape
|
||||||
|
|
|
||||||
Loading…
Reference in New Issue
Block a user