mirror of
https://github.com/zebrajr/pytorch.git
synced 2025-12-06 00:20:18 +01:00
Enable flake8-bugbear B020 lint (#110823)
Fixes part of https://github.com/pytorch/pytorch/issues/106571 Pull Request resolved: https://github.com/pytorch/pytorch/pull/110823 Approved by: https://github.com/Skylion007
This commit is contained in:
parent
b600aed237
commit
192477b5ba
2
.flake8
2
.flake8
|
|
@ -14,7 +14,7 @@ ignore =
|
|||
# to line this up with executable bit
|
||||
EXE001,
|
||||
# these ignores are from flake8-bugbear; please fix!
|
||||
B007,B008,B017,B019,B020,B023,B026,B028,B903,B904,B905,B906,B907
|
||||
B007,B008,B017,B019,B023,B026,B028,B903,B904,B905,B906,B907
|
||||
# these ignores are from flake8-comprehensions; please fix!
|
||||
C407,
|
||||
# these ignores are from flake8-logging-format; please fix!
|
||||
|
|
|
|||
|
|
@ -90,7 +90,7 @@ class TestNetBuilder(unittest.TestCase):
|
|||
plan.AddStep(to_execution_step(nb))
|
||||
ws = workspace.C.Workspace()
|
||||
ws.run(plan)
|
||||
expected = [
|
||||
expected_results = [
|
||||
(y, 5),
|
||||
(z, False),
|
||||
(w, True),
|
||||
|
|
@ -99,7 +99,7 @@ class TestNetBuilder(unittest.TestCase):
|
|||
(p, 2),
|
||||
(q, 3),
|
||||
]
|
||||
for b, expected in expected:
|
||||
for b, expected in expected_results:
|
||||
actual = ws.blobs[str(b)].fetch()
|
||||
self.assertEqual(actual, expected)
|
||||
|
||||
|
|
|
|||
|
|
@ -1081,7 +1081,7 @@ def from_column_list(
|
|||
'col_names and col_blobs must have the same length.'
|
||||
)
|
||||
root = _SchemaNode('root', 'Struct')
|
||||
for col_name, col_type, col_blob, col_metadata in zip(
|
||||
for col_name, col_type, col_blob, col_md in zip(
|
||||
col_names, col_types, col_blobs, col_metadata
|
||||
):
|
||||
columns = col_name.split(FIELD_SEPARATOR)
|
||||
|
|
@ -1095,7 +1095,7 @@ def from_column_list(
|
|||
field = Scalar(
|
||||
dtype=col_type,
|
||||
blob=col_blob,
|
||||
metadata=col_metadata
|
||||
metadata=col_md
|
||||
)
|
||||
next = current.add_child(name, type_str)
|
||||
if field is not None:
|
||||
|
|
|
|||
|
|
@ -28,7 +28,7 @@ ignore = [
|
|||
# these ignores are from flake8-bugbear; please fix!
|
||||
"B007", "B008", "B017",
|
||||
"B018", # Useless expression
|
||||
"B019", "B020",
|
||||
"B019",
|
||||
"B023", "B026",
|
||||
"B028", # No explicit `stacklevel` keyword argument found
|
||||
"B904",
|
||||
|
|
|
|||
|
|
@ -34,8 +34,8 @@ class TestCommitList(unittest.TestCase):
|
|||
expected.write_to_disk()
|
||||
|
||||
commit_list = CommitList.from_existing(commit_list_path)
|
||||
for commit, expected in zip(commit_list.commits, expected.commits):
|
||||
self.assertEqual(commit, expected)
|
||||
for commit, expected_commit in zip(commit_list.commits, expected.commits):
|
||||
self.assertEqual(commit, expected_commit)
|
||||
|
||||
def test_update_to(self):
|
||||
with tempfile.TemporaryDirectory() as tempdir:
|
||||
|
|
|
|||
|
|
@ -880,8 +880,8 @@ class LocalElasticAgentTest(unittest.TestCase):
|
|||
|
||||
# global world size == sum of all the role world sizes
|
||||
expected_world_size = sum(expected_role_world_sizes.values())
|
||||
for role, run_results in run_results.items():
|
||||
for result in run_results:
|
||||
for role, results in run_results.items():
|
||||
for result in results:
|
||||
res = result.return_values
|
||||
for role_info in res.values():
|
||||
rank = role_info.rank
|
||||
|
|
|
|||
|
|
@ -143,11 +143,11 @@ class TestFSDPWithDeviceMeshAndDTensor(DTensorTestBase):
|
|||
sharded_tensor_osd = FSDP.optim_state_dict(ref_model, ref_optim)
|
||||
|
||||
# Check dtensor and sharded_tensor model state dict values are identical
|
||||
for dtensor_sd, sharded_tensor_sd in zip(
|
||||
for dtensor_sd_item, sharded_tensor_sd_item in zip(
|
||||
dtensor_sd.items(), sharded_tensor_sd.items()
|
||||
):
|
||||
k1, v1 = dtensor_sd
|
||||
k2, v2 = sharded_tensor_sd
|
||||
k1, v1 = dtensor_sd_item
|
||||
k2, v2 = sharded_tensor_sd_item
|
||||
self.assertEqual(k1, k2)
|
||||
|
||||
# if the ShardedTensor is an empty shard,
|
||||
|
|
@ -227,15 +227,15 @@ class TestFSDPWithDeviceMeshAndDTensor(DTensorTestBase):
|
|||
new_optim_state_dict = FSDP.optim_state_dict(model, optim)
|
||||
|
||||
# Check whether new_optim_state_dict is the same as ref_optim_state_dict.
|
||||
for new_optim_state_dict, ref_optim_state_dict in zip(
|
||||
for new_optim_state_dict_item, ref_optim_state_dict_item in zip(
|
||||
new_optim_state_dict["state"].items(),
|
||||
ref_optim_state_dict["state"].items(),
|
||||
):
|
||||
# check FQN are the same
|
||||
self.assertEqual(new_optim_state_dict[0], ref_optim_state_dict[0])
|
||||
self.assertEqual(new_optim_state_dict_item[0], ref_optim_state_dict_item[0])
|
||||
for new_optim_hyper_param, ref_optim_hyper_param in zip(
|
||||
new_optim_state_dict[1].items(),
|
||||
ref_optim_state_dict[1].items(),
|
||||
new_optim_state_dict_item[1].items(),
|
||||
ref_optim_state_dict_item[1].items(),
|
||||
):
|
||||
k1, v1 = new_optim_hyper_param
|
||||
k2, v2 = ref_optim_hyper_param
|
||||
|
|
|
|||
|
|
@ -154,11 +154,11 @@ class TestHSDPWithDeviceMeshAndDTensor(DTensorTestBase):
|
|||
sharded_tensor_osd = FSDP.optim_state_dict(ref_model, ref_optim)
|
||||
|
||||
# Check dtensor and sharded_tensor model state dict values are identical
|
||||
for dtensor_sd, sharded_tensor_sd in zip(
|
||||
for dtensor_sd_item, sharded_tensor_sd_item in zip(
|
||||
dtensor_sd.items(), sharded_tensor_sd.items()
|
||||
):
|
||||
k1, v1 = dtensor_sd
|
||||
k2, v2 = sharded_tensor_sd
|
||||
k1, v1 = dtensor_sd_item
|
||||
k2, v2 = sharded_tensor_sd_item
|
||||
self.assertEqual(k1, k2)
|
||||
|
||||
self.assertEqual(type(v1), DTensor)
|
||||
|
|
@ -225,15 +225,15 @@ class TestHSDPWithDeviceMeshAndDTensor(DTensorTestBase):
|
|||
new_optim_state_dict = FSDP.optim_state_dict(model, optim)
|
||||
|
||||
# Check whether new_optim_state_dict is the same as ref_optim_state_dict.
|
||||
for new_optim_state_dict, ref_optim_state_dict in zip(
|
||||
for new_optim_state_dict_item, ref_optim_state_dict_item in zip(
|
||||
new_optim_state_dict["state"].items(),
|
||||
ref_optim_state_dict["state"].items(),
|
||||
):
|
||||
# check FQN are the same
|
||||
self.assertEqual(new_optim_state_dict[0], ref_optim_state_dict[0])
|
||||
self.assertEqual(new_optim_state_dict_item[0], ref_optim_state_dict_item[0])
|
||||
for new_optim_hyper_param, ref_optim_hyper_param in zip(
|
||||
new_optim_state_dict[1].items(),
|
||||
ref_optim_state_dict[1].items(),
|
||||
new_optim_state_dict_item[1].items(),
|
||||
ref_optim_state_dict_item[1].items(),
|
||||
):
|
||||
k1, v1 = new_optim_hyper_param
|
||||
k2, v2 = ref_optim_hyper_param
|
||||
|
|
|
|||
|
|
@ -3423,9 +3423,9 @@ class TestVmapOperatorsOpInfo(TestCase):
|
|||
sample_input = error_input.sample_input
|
||||
args = (sample_input.input,) + tuple(sample_input.args)
|
||||
kwargs = sample_input.kwargs
|
||||
for args, in_dims, _ in generate_vmap_inputs(args, {}):
|
||||
for batched_args, in_dims, _ in generate_vmap_inputs(args, {}):
|
||||
with self.assertRaises(Exception):
|
||||
vmap(op, in_dims)(*args, **kwargs)
|
||||
vmap(op, in_dims)(*batched_args, **kwargs)
|
||||
|
||||
# Sample inputs check
|
||||
sample_inputs_op = {
|
||||
|
|
@ -3455,16 +3455,16 @@ class TestVmapOperatorsOpInfo(TestCase):
|
|||
continue
|
||||
kwargs = sample_input.kwargs
|
||||
is_batch_norm_and_training = is_batch_norm_training(op.name, kwargs)
|
||||
for args, in_dims, _ in generate_vmap_inputs(
|
||||
for batched_args, in_dims, _ in generate_vmap_inputs(
|
||||
args, {}, is_batch_norm_and_training=is_batch_norm_and_training):
|
||||
for func in aliases:
|
||||
self.vmap_outplace_test(func, args, kwargs, in_dims, check_shape_only, postprocess_fn)
|
||||
self.vmap_outplace_test(func, batched_args, kwargs, in_dims, check_shape_only, postprocess_fn)
|
||||
if op.name in skip_inplace:
|
||||
continue
|
||||
if not is_valid_inplace_sample_input(sample_input, op, op.inplace_variant):
|
||||
continue
|
||||
for func in inplace_aliases:
|
||||
self.vmap_inplace_test(func, args, kwargs, in_dims, postprocess_fn)
|
||||
self.vmap_inplace_test(func, batched_args, kwargs, in_dims, postprocess_fn)
|
||||
|
||||
if check_has_batch_rule:
|
||||
check_vmap_fallback(self, test, op)
|
||||
|
|
@ -4195,11 +4195,11 @@ class TestVmapOperatorsOpInfo(TestCase):
|
|||
gout = torch.randn(2, 2, device=device)
|
||||
args = (leaf, gout)
|
||||
|
||||
for args, in_dims, _, in generate_vmap_inputs(args, {}):
|
||||
for batched_args, in_dims, _, in generate_vmap_inputs(args, {}):
|
||||
if in_dims[1] is None:
|
||||
# triggers some composite compliance problem
|
||||
continue
|
||||
self.vmap_outplace_test(push_vjp, args, {}, in_dims)
|
||||
self.vmap_outplace_test(push_vjp, batched_args, {}, in_dims)
|
||||
|
||||
def test_advanced_indexing(self, device):
|
||||
def test(f, args):
|
||||
|
|
|
|||
|
|
@ -2691,9 +2691,9 @@ class TestFrozenOptimizations(JitTestCase):
|
|||
with set_default_dtype(torch.float):
|
||||
conv_bias = [True, False]
|
||||
conv_ops = [nn.Conv2d, nn.Conv3d]
|
||||
add_z = [True, False]
|
||||
use_add_z = [True, False]
|
||||
use_tracing = [True, False]
|
||||
for use_bias, conv, add_z, tracing in product(conv_bias, conv_ops, add_z, use_tracing):
|
||||
for use_bias, conv, add_z, tracing in product(conv_bias, conv_ops, use_add_z, use_tracing):
|
||||
class Net(nn.Module):
|
||||
def __init__(self, in_channels, out_channels, **kwargs):
|
||||
super().__init__()
|
||||
|
|
|
|||
|
|
@ -944,10 +944,10 @@ class TestEmbeddingNNDeviceType(NNTestCase):
|
|||
atol=dtype2prec_DONTUSE[dtypes[2]], rtol=0)
|
||||
|
||||
trainable_scale = (True, False)
|
||||
include_last_offset = (True, False)
|
||||
include_last_offset_list = (True, False)
|
||||
modes = (('sum', False), ('sum', True), ('max', False), ('mean', False))
|
||||
for (mode, has_weight), trainable, include_last_offset in itertools.product(
|
||||
modes, trainable_scale, include_last_offset
|
||||
modes, trainable_scale, include_last_offset_list
|
||||
):
|
||||
test_per_sample_weights_new_offsets(
|
||||
mode, trainable, include_last_offset, has_weight
|
||||
|
|
|
|||
|
|
@ -364,8 +364,8 @@ class TestFakeQuantizeOps(TestCase):
|
|||
def _test_backward_per_tensor_cachemask_impl(self, device):
|
||||
float_types = (torch.float32, torch.float16, torch.float64)
|
||||
torch_types = (torch.qint8, torch.quint8)
|
||||
tensor_qparam = (True, False)
|
||||
for float_type, torch_type, tensor_qparam in itertools.product(float_types, torch_types, tensor_qparam):
|
||||
tensor_qparams = (True, False)
|
||||
for float_type, torch_type, tensor_qparam in itertools.product(float_types, torch_types, tensor_qparams):
|
||||
X = torch.randn(4, 8).to(device).to(float_type)
|
||||
X.requires_grad_()
|
||||
# pick the scale + zp so that some values get clipped
|
||||
|
|
|
|||
|
|
@ -2413,7 +2413,7 @@ class TestLinalg(TestCase):
|
|||
self.assertEqual(v.mT.matmul(V).det().abs(), torch.ones(batches, device=device, dtype=dtype))
|
||||
|
||||
all_batches = [(), (1,), (3,), (2, 3)]
|
||||
for actual_rank, size, all_batches in [
|
||||
for actual_rank, size, all_batches in [ # noqa: B020
|
||||
(2, (17, 4), all_batches),
|
||||
(4, (17, 4), all_batches),
|
||||
(4, (17, 17), all_batches),
|
||||
|
|
@ -7415,7 +7415,7 @@ scipy_lobpcg | {eq_err_scipy:10.2e} | {eq_err_general_scipy:10.2e} | {iters2:
|
|||
self.assertEqual(s[..., :actual_rank], S[..., :actual_rank])
|
||||
|
||||
all_batches = [(), (1,), (3,), (2, 3)]
|
||||
for actual_rank, size, all_batches in [
|
||||
for actual_rank, size, all_batches in [ # noqa: B020
|
||||
(2, (17, 4), all_batches),
|
||||
(2, (100, 4), all_batches),
|
||||
(6, (100, 40), all_batches),
|
||||
|
|
|
|||
|
|
@ -11760,7 +11760,7 @@ class TestNNDeviceType(NNTestCase):
|
|||
reductions = ['none', 'sum', 'mean']
|
||||
label_smoothings = [0.05, 0.15]
|
||||
|
||||
weight = torch.tensor([0.3, 0.6], device=device)
|
||||
wgt = torch.tensor([0.3, 0.6], device=device)
|
||||
inp1 = torch.tensor([[0.3, 0.4], [1, 2]], device=device)
|
||||
inp2 = torch.tensor([[0.3, 0.6], [1, 2]], device=device)
|
||||
|
||||
|
|
@ -11768,7 +11768,7 @@ class TestNNDeviceType(NNTestCase):
|
|||
targ_negative_ignore_index = torch.tensor([-2, 1], device=device)
|
||||
targ_positive_ignore_index = torch.tensor([2, 1], device=device)
|
||||
|
||||
for reduction, label_smoothing, weight in product(reductions, label_smoothings, (None, weight)):
|
||||
for reduction, label_smoothing, weight in product(reductions, label_smoothings, (None, wgt)):
|
||||
def check_equal(loss, inp_targ_1, inp_targ_2):
|
||||
inp1, targ1 = inp_targ_1
|
||||
inp2, targ2 = inp_targ_2
|
||||
|
|
|
|||
|
|
@ -734,9 +734,9 @@ class TestDiff(TestCase):
|
|||
x = list(range(3))
|
||||
assert_raises(ValueError, diff, x, n=-1)
|
||||
output = [diff(x, n=n) for n in range(1, 5)]
|
||||
expected = [[1, 1], [0], [], []]
|
||||
expected_output = [[1, 1], [0], [], []]
|
||||
# assert_(diff(x, n=0) is x)
|
||||
for n, (expected, out) in enumerate(zip(expected, output), start=1):
|
||||
for n, (expected, out) in enumerate(zip(expected_output, output), start=1):
|
||||
assert_(type(out) is np.ndarray)
|
||||
assert_array_equal(out, expected)
|
||||
assert_equal(out.dtype, np.int_)
|
||||
|
|
|
|||
|
|
@ -323,19 +323,19 @@ def broadcast_symbolic_shapes(a, b):
|
|||
are symbolic sympy formulas.
|
||||
"""
|
||||
output = []
|
||||
for a, b in itertools.zip_longest(
|
||||
for x, y in itertools.zip_longest(
|
||||
reversed(a), reversed(b), fillvalue=sympy.Integer(1)
|
||||
):
|
||||
if b == 1:
|
||||
output.append(a)
|
||||
elif a == 1:
|
||||
output.append(b)
|
||||
if y == 1:
|
||||
output.append(x)
|
||||
elif x == 1:
|
||||
output.append(y)
|
||||
else:
|
||||
V.graph.sizevars.guard_equals(a, b)
|
||||
if len(sympy.expand(b).free_symbols) < len(sympy.expand(a).free_symbols):
|
||||
output.append(b) # prefer shorter formula
|
||||
V.graph.sizevars.guard_equals(x, y)
|
||||
if len(sympy.expand(y).free_symbols) < len(sympy.expand(x).free_symbols):
|
||||
output.append(y) # prefer shorter formula
|
||||
else:
|
||||
output.append(a)
|
||||
output.append(x)
|
||||
return tuple(reversed(output))
|
||||
|
||||
|
||||
|
|
|
|||
|
|
@ -540,8 +540,9 @@ def _find_names(obj):
|
|||
import inspect
|
||||
|
||||
frame = inspect.currentframe()
|
||||
for frame in iter(lambda: frame.f_back, None): # type: ignore[union-attr]
|
||||
while frame is not None:
|
||||
frame.f_locals
|
||||
frame = frame.f_back
|
||||
obj_names = []
|
||||
for referrer in gc.get_referrers(obj):
|
||||
if isinstance(referrer, dict):
|
||||
|
|
|
|||
|
|
@ -550,9 +550,9 @@ def _jacfwd(func, inputs, strict=False, vectorize=False):
|
|||
is_outputs_tuple, outputs = output_info
|
||||
# Step 3: for each of the output tangents, split along dim 0
|
||||
jacobian_input_output = []
|
||||
for jac, output_i in zip(outputs_before_split, outputs):
|
||||
for jac_output_i, output_i in zip(outputs_before_split, outputs):
|
||||
jacobian_output_i_output = []
|
||||
for jac, input_j in zip(jac.split(input_numels, dim=0), inputs):
|
||||
for jac, input_j in zip(jac_output_i.split(input_numels, dim=0), inputs):
|
||||
# We need to transpose the Jacobian because in forward AD, the
|
||||
# batch dimension represents that of the inputs
|
||||
jacobian_input_i_output_j = jac.permute(*range(1, jac.ndim), 0).reshape(
|
||||
|
|
@ -758,9 +758,11 @@ def jacobian(
|
|||
# Step 3: The returned jacobian is one big tensor per input. In this step,
|
||||
# we split each Tensor by output.
|
||||
jacobian_input_output = []
|
||||
for jac, input_i in zip(jacobians_of_flat_output, inputs):
|
||||
for jac_input_i, input_i in zip(jacobians_of_flat_output, inputs):
|
||||
jacobian_input_i_output = []
|
||||
for jac, output_j in zip(jac.split(output_numels, dim=0), outputs):
|
||||
for jac, output_j in zip(
|
||||
jac_input_i.split(output_numels, dim=0), outputs
|
||||
):
|
||||
jacobian_input_i_output_j = jac.view(output_j.shape + input_i.shape)
|
||||
jacobian_input_i_output.append(jacobian_input_i_output_j)
|
||||
jacobian_input_output.append(jacobian_input_i_output)
|
||||
|
|
|
|||
|
|
@ -1262,8 +1262,8 @@ class ConstraintGenerator:
|
|||
if isinstance(t, torch.Tensor):
|
||||
if len(t.shape) > 0:
|
||||
res = []
|
||||
for t in t.shape:
|
||||
res.append(t)
|
||||
for d in t.shape:
|
||||
res.append(d)
|
||||
attr_type = TensorType(res)
|
||||
output, counter = gen_tvar(counter)
|
||||
self.symbol_dict[n] = output
|
||||
|
|
|
|||
|
|
@ -1286,8 +1286,8 @@ class DistributedDataParallel(Module, Joinable):
|
|||
)
|
||||
yield from ps
|
||||
|
||||
for m in m.modules() if recurse else [m]:
|
||||
yield from model_parameters(m)
|
||||
for mod in m.modules() if recurse else [m]:
|
||||
yield from model_parameters(mod)
|
||||
|
||||
def _check_default_group(self):
|
||||
pickle_not_supported = False
|
||||
|
|
|
|||
|
|
@ -458,7 +458,7 @@ class DeviceTypeTestBase(TestCase):
|
|||
parametrize_fn = compose_parametrize_fns(dtype_parametrize_fn, parametrize_fn)
|
||||
|
||||
# Instantiate the parametrized tests.
|
||||
for (test, test_suffix, param_kwargs, decorator_fn) in parametrize_fn(test, generic_cls, cls):
|
||||
for (test, test_suffix, param_kwargs, decorator_fn) in parametrize_fn(test, generic_cls, cls): # noqa: B020
|
||||
test_suffix = '' if test_suffix == '' else '_' + test_suffix
|
||||
device_suffix = '_' + cls.device_type
|
||||
|
||||
|
|
|
|||
|
|
@ -4926,7 +4926,7 @@ def sample_inputs_put(op_info, device, dtype, requires_grad, **kwargs):
|
|||
tgt_gen = (make_arg(size) for size in tgt_sizes)
|
||||
idx = make_idx((0,), high=1)
|
||||
src = make_arg((0,))
|
||||
for tgt, acc in product(tgt, (True, False)):
|
||||
for tgt, acc in product(tgt_gen, (True, False)):
|
||||
yield SampleInput(input=tgt.clone().requires_grad_(requires_grad),
|
||||
args=(idx.clone(),
|
||||
src.clone().requires_grad_(requires_grad),
|
||||
|
|
@ -8190,9 +8190,9 @@ def sample_inputs_scaled_dot_product_attention(op_info, device, dtype, requires_
|
|||
|
||||
qkv_shapes = [(dim_3_q_shape, dim_3_kv_shape), (dim_4_q_shape, dim_4_kv_shape), broadcast_tuple]
|
||||
samples = []
|
||||
for qkv_shapes, is_causal, dropout_p in product(
|
||||
for qkv_shape, is_causal, dropout_p in product(
|
||||
qkv_shapes, [True, False], [0.0, 0.5]):
|
||||
shape_q, shape_kv = qkv_shapes
|
||||
shape_q, shape_kv = qkv_shape
|
||||
samples.append(SampleInput(
|
||||
make(shape_q),
|
||||
make(shape_kv),
|
||||
|
|
|
|||
|
|
@ -40,9 +40,9 @@ def _generate_input_args_string(obj):
|
|||
for param_name in signature.parameters.keys():
|
||||
input_param_names.add(param_name)
|
||||
result = []
|
||||
for name, obj in inspect.getmembers(obj):
|
||||
for name, value in inspect.getmembers(obj):
|
||||
if name in input_param_names:
|
||||
result.append((name, _simplify_obj_name(obj)))
|
||||
result.append((name, _simplify_obj_name(value)))
|
||||
return ', '.join([f'{name}={value}' for name, value in result])
|
||||
|
||||
|
||||
|
|
|
|||
Loading…
Reference in New Issue
Block a user