From d8c8ba24404ef892d4d948eb095b69d90b9ba7e6 Mon Sep 17 00:00:00 2001 From: Tom Ritchford Date: Wed, 18 Dec 2024 18:14:52 +0000 Subject: [PATCH] Fix unused Python variables in test/[e-z]* (#136964) Pull Request resolved: https://github.com/pytorch/pytorch/pull/136964 Approved by: https://github.com/justinchuby, https://github.com/albanD --- test/export/opinfo_schema.py | 2 +- test/export/test_converter.py | 10 ++-- test/export/test_draft_export.py | 6 +-- test/export/test_experimental.py | 2 +- test/export/test_export.py | 1 + test/export/test_passes.py | 7 ++- test/export/test_torchbind.py | 1 + test/export/test_unflatten.py | 7 +-- test/functorch/discover_coverage.py | 8 --- test/functorch/test_ac.py | 16 ++---- test/functorch/test_aotdispatch.py | 46 ++++++++--------- test/functorch/test_control_flow.py | 49 ++++++++----------- test/functorch/test_dims.py | 18 ++----- test/functorch/test_eager_transforms.py | 23 ++++----- test/functorch/test_ops.py | 1 + test/functorch/test_vmap.py | 4 +- test/fx/quantization.py | 12 +---- test/fx/test_cse_pass.py | 2 +- test/fx/test_dce_pass.py | 21 +++----- test/fx/test_fx_split.py | 6 +-- test/fx/test_gradual_type.py | 4 +- test/fx/test_matcher_utils.py | 2 +- test/fx/test_subgraph_rewriter.py | 4 +- test/fx/test_z3_gradual_types.py | 22 ++------- test/higher_order_ops/test_invoke_subgraph.py | 2 - test/higher_order_ops/test_with_effects.py | 1 + test/inductor/s429861_repro.py | 1 + test/inductor/test_aot_inductor.py | 9 ++-- test/inductor/test_aot_inductor_arrayref.py | 2 +- test/inductor/test_aot_inductor_custom_ops.py | 2 +- test/inductor/test_aot_inductor_package.py | 1 - test/inductor/test_autoheuristic.py | 2 +- test/inductor/test_codecache.py | 13 +++-- test/inductor/test_compiled_autograd.py | 4 +- test/inductor/test_compiled_optimizers.py | 2 +- test/inductor/test_cpu_repro.py | 9 ++-- test/inductor/test_cuda_repro.py | 2 + test/inductor/test_cudacodecache.py | 4 +- test/inductor/test_cudagraph_trees.py | 1 + test/inductor/test_cutlass_backend.py | 4 +- test/inductor/test_decompose_mem_bound_mm.py | 2 +- test/inductor/test_dependencies.py | 8 --- test/inductor/test_flex_attention.py | 8 +-- test/inductor/test_flex_decoding.py | 2 +- test/inductor/test_fp8.py | 12 ++--- test/inductor/test_group_batch_fusion.py | 2 +- test/inductor/test_indexing.py | 1 - test/inductor/test_inplacing_pass.py | 23 +++++---- test/inductor/test_kernel_benchmark.py | 1 + test/inductor/test_layout_optim.py | 4 +- test/inductor/test_loop_ordering.py | 3 +- test/inductor/test_max_autotune.py | 8 +-- .../test_move_constructors_to_cuda.py | 4 +- test/inductor/test_multi_kernel.py | 4 +- test/inductor/test_ordered_set.py | 2 +- test/inductor/test_pad_mm.py | 2 +- test/inductor/test_pattern_matcher.py | 12 ++--- test/inductor/test_perf.py | 2 +- test/inductor/test_profiler.py | 2 +- test/inductor/test_smoke.py | 2 +- test/inductor/test_split_cat_fx_passes.py | 2 +- test/inductor/test_standalone_compile.py | 4 +- test/inductor/test_torchinductor.py | 1 + test/inductor/test_torchinductor_opinfo.py | 10 ++-- .../test_torchinductor_strided_blocks.py | 1 + test/inductor/test_triton_heuristics.py | 2 +- test/inductor/test_triton_kernels.py | 1 + test/inductor/test_triton_wrapper.py | 2 +- test/inductor/test_unbacked_symints.py | 2 +- test/jit/test_async.py | 1 + test/jit/test_autodiff.py | 1 + test/jit/test_autodiff_subgraph_slicing.py | 7 ++- test/jit/test_await.py | 8 +-- test/jit/test_backends.py | 6 +-- test/jit/test_builtins.py | 4 +- test/jit/test_class_type.py | 1 + test/jit/test_cuda.py | 1 + test/jit/test_dtype_analysis.py | 2 +- test/jit/test_freezing.py | 1 + test/jit/test_fuser_common.py | 2 +- test/jit/test_generator.py | 4 +- test/jit/test_hooks_modules.py | 4 +- test/jit/test_ignore_context_manager.py | 2 +- test/jit/test_isinstance.py | 2 +- test/jit/test_jit_utils.py | 2 +- test/jit/test_list_dict.py | 1 + test/jit/test_logging.py | 1 + test/jit/test_misc.py | 1 + test/jit/test_models.py | 3 +- test/jit/test_module_containers.py | 2 +- test/jit/test_module_interface.py | 1 + ...optimize_for_mobile_preserve_debug_info.py | 2 - test/jit/test_peephole.py | 6 +-- test/jit/test_profiler.py | 2 +- test/jit/test_recursive_script.py | 1 + test/jit/test_remove_mutation.py | 2 +- test/jit/test_save_load_for_op_version.py | 1 + test/jit/test_symbolic_shape_analysis.py | 2 +- test/jit/test_torchbind.py | 1 + test/jit/test_tracer.py | 1 + test/jit/test_types.py | 1 + test/jit/test_typing.py | 1 + test/jit/test_union.py | 1 + test/jit/test_union_pep604.py | 1 + test/jit/test_with.py | 1 + test/jit/xnnpack/test_xnnpack_delegate.py | 2 +- test/lazy/test_debug_util.py | 2 +- test/lazy/test_extract_compiled_graph.py | 2 +- test/lazy/test_generator.py | 2 +- test/lazy/test_meta_kernel.py | 2 +- test/lazy/test_reuse_ir.py | 8 +-- test/lazy/test_step_closures.py | 4 +- test/lazy/test_ts_opinfo.py | 6 +-- test/mobile/model_test/builtin_ops.py | 1 - test/mobile/model_test/gen_test_model.py | 4 +- test/mobile/model_test/math_ops.py | 1 - test/mobile/model_test/nn_ops.py | 2 +- test/mobile/model_test/quantization_ops.py | 1 - test/mobile/model_test/tensor_ops.py | 1 - test/mobile/test_bytecode.py | 3 +- test/mobile/test_lite_script_module.py | 4 +- .../test_quantize_fx_lite_script_module.py | 4 +- test/mobile/test_upgraders.py | 7 +-- test/nn/test_load_state_dict.py | 4 +- test/nn/test_packed_sequence.py | 2 +- .../test_dynamo_with_onnxruntime_backend.py | 8 +-- test/onnx/test_autograd_funs.py | 2 +- test/onnx/test_custom_ops.py | 2 +- test/onnx/test_fx_passes.py | 2 +- test/onnx/test_pytorch_onnx_no_runtime.py | 4 +- test/onnx/test_pytorch_onnx_onnxruntime.py | 1 + .../onnx/test_pytorch_onnx_shape_inference.py | 2 +- test/onnx/test_utility_funs.py | 2 +- test/onnx/verify.py | 3 +- test/optim/test_lrscheduler.py | 1 + test/optim/test_swa_utils.py | 2 +- test/package/test_directory_reader.py | 2 +- test/package/test_load_bc_packages.py | 6 +-- test/package/test_misc.py | 2 +- test/package/test_model.py | 2 +- test/package/test_package_script.py | 2 +- test/package/test_repackage.py | 2 +- test/package/test_save_load.py | 2 +- test/profiler/test_execution_trace.py | 2 +- test/profiler/test_memory_profiler.py | 6 +-- test/profiler/test_profiler.py | 1 + test/profiler/test_record_function.py | 1 + test/profiler/test_torch_tidy.py | 4 +- .../bc/test_backward_compatibility.py | 8 ++- .../experimental/apot_fx_graph_mode_ptq.py | 2 +- .../core/experimental/quantization_util.py | 5 +- .../core/experimental/test_bits.py | 2 +- .../core/experimental/test_fake_quantize.py | 2 +- .../core/experimental/test_float8.py | 2 +- .../experimental/test_nonuniform_observer.py | 1 + test/quantization/core/test_backend_config.py | 2 - .../core/test_quantized_module.py | 3 +- test/quantization/core/test_quantized_op.py | 1 + .../core/test_quantized_tensor.py | 1 + .../quantization/core/test_workflow_module.py | 1 + test/quantization/core/test_workflow_ops.py | 1 + .../eager/test_numeric_suite_eager.py | 1 + .../eager/test_quantize_eager_ptq.py | 1 + .../eager/test_quantize_eager_qat.py | 2 +- test/quantization/fx/test_model_report_fx.py | 1 + test/quantization/fx/test_numeric_suite_fx.py | 1 + test/quantization/fx/test_quantize_fx.py | 1 + .../quantization/fx/test_subgraph_rewriter.py | 4 +- .../jit/test_deprecated_jit_quant.py | 1 + .../jit/test_ondevice_quantization.py | 5 +- test/quantization/jit/test_quantize_jit.py | 1 + test/quantization/pt2e/test_duplicate_dq.py | 1 + test/quantization/pt2e/test_graph_utils.py | 6 +-- .../pt2e/test_metadata_porting.py | 6 +-- test/quantization/pt2e/test_quantize_pt2e.py | 1 + .../pt2e/test_quantize_pt2e_qat.py | 12 ++--- test/quantization/pt2e/test_representation.py | 6 +-- .../pt2e/test_x86inductor_quantizer.py | 2 +- .../pt2e/test_xnnpack_quantizer.py | 3 -- test/test_autograd.py | 1 + test/test_autograd_fallback.py | 6 +-- test/test_binary_ufuncs.py | 1 + test/test_cpp_extensions_aot.py | 8 +-- ...cpp_extensions_open_device_registration.py | 5 +- test/test_cuda.py | 47 +++++++++--------- test/test_cuda_multigpu.py | 4 +- test/test_cuda_sanitizer.py | 5 +- test/test_cuda_trace.py | 2 +- test/test_custom_ops.py | 1 + test/test_dataloader.py | 1 + test/test_datapipe.py | 1 + test/test_dispatch.py | 2 +- test/test_dlpack.py | 2 +- test/test_dynamic_shapes.py | 2 +- test/test_fake_tensor.py | 1 + test/test_file_check.py | 2 +- test/test_flop_counter.py | 2 +- test/test_foreach.py | 2 +- test/test_function_schema.py | 2 +- test/test_functional_optim.py | 1 - test/test_functionalization.py | 1 + test/test_functionalization_of_rng_ops.py | 2 +- test/test_fx.py | 1 + test/test_fx_experimental.py | 1 + test/test_fx_passes.py | 1 + test/test_fx_reinplace_pass.py | 9 ++-- test/test_indexing.py | 1 - test/test_jit.py | 1 + test/test_jit_autocast.py | 6 +-- test/test_jit_fuser.py | 6 +-- test/test_jit_fuser_te.py | 1 + test/test_jit_llga_fuser.py | 6 +-- test/test_jiterator.py | 2 +- test/test_legacy_vmap.py | 1 + test/test_linalg.py | 1 + test/test_maskedtensor.py | 2 +- test/test_meta.py | 1 + test/test_metal.py | 3 +- test/test_mkldnn.py | 10 ++-- test/test_mobile_optimizer.py | 3 +- test/test_module_tracker.py | 4 +- test/test_modules.py | 1 - test/test_monitor.py | 2 +- test/test_mps.py | 2 +- test/test_multiprocessing.py | 2 +- test/test_multiprocessing_spawn.py | 2 +- test/test_namedtensor.py | 2 +- test/test_nestedtensor.py | 2 +- test/test_nn.py | 1 + test/test_numba_integration.py | 2 +- test/test_numpy_interop.py | 2 +- test/test_openmp.py | 4 +- test/test_ops.py | 3 +- test/test_ops_jit.py | 2 +- test/test_optim.py | 2 - test/test_overrides.py | 4 -- test/test_prims.py | 4 +- test/test_proxy_tensor.py | 1 + test/test_python_dispatch.py | 1 + test/test_pytree.py | 6 +-- test/test_reductions.py | 4 +- test/test_schema_check.py | 1 + test/test_segment_reductions.py | 4 +- test/test_serialization.py | 1 + test/test_shape_ops.py | 2 +- test/test_sort_and_select.py | 5 +- test/test_sparse.py | 1 + test/test_sparse_csr.py | 1 + test/test_sparse_semi_structured.py | 1 + test/test_spectral_ops.py | 1 + test/test_stateless.py | 6 +-- test/test_static_runtime.py | 1 + test/test_subclass.py | 5 +- test/test_tensor_creation_ops.py | 1 + test/test_tensorboard.py | 8 +-- test/test_tensorexpr.py | 1 + test/test_testing.py | 2 +- test/test_type_hints.py | 1 - test/test_type_promotion.py | 1 - test/test_typing.py | 4 +- test/test_unary_ufuncs.py | 2 +- test/test_view_ops.py | 4 +- test/test_vulkan.py | 3 +- test/test_weak.py | 3 +- test/test_xnnpack_integration.py | 3 +- test/test_xpu.py | 6 +-- test/torch_np/numpy_tests/core/test_dtype.py | 3 +- test/torch_np/numpy_tests/core/test_einsum.py | 1 + .../numpy_tests/core/test_indexing.py | 3 +- .../numpy_tests/core/test_multiarray.py | 1 + .../torch_np/numpy_tests/core/test_numeric.py | 1 + .../numpy_tests/core/test_scalar_methods.py | 3 +- .../numpy_tests/core/test_scalarmath.py | 4 +- .../numpy_tests/core/test_shape_base.py | 3 +- .../numpy_tests/lib/test_function_base.py | 1 + .../numpy_tests/lib/test_histograms.py | 1 + .../numpy_tests/lib/test_twodim_base.py | 1 + .../numpy_tests/linalg/test_linalg.py | 2 +- test/torch_np/test_basic.py | 4 +- test/torch_np/test_reductions.py | 6 +-- test/xpu/test_gemm.py | 10 ++-- 281 files changed, 508 insertions(+), 565 deletions(-) diff --git a/test/export/opinfo_schema.py b/test/export/opinfo_schema.py index dba401e0e5c..83721365984 100644 --- a/test/export/opinfo_schema.py +++ b/test/export/opinfo_schema.py @@ -41,7 +41,7 @@ class PreDispatchSchemaCheckMode(SchemaCheckMode): if isinstance(e, torch.Tensor) and not type(e) == torch.Tensor: try: return e.elem - except AttributeError as t: + except AttributeError: return e return e diff --git a/test/export/test_converter.py b/test/export/test_converter.py index cc1adec9980..5f0f6d784d2 100644 --- a/test/export/test_converter.py +++ b/test/export/test_converter.py @@ -25,7 +25,7 @@ class TestConverter(TestCase): init_torchbind_implementations() @torch._library.register_fake_class("_TorchScriptTesting::_TensorQueue") - class FakeTensorQueue: + class _FakeTensorQueue: def __init__(self, queue): self.queue = queue @@ -1017,7 +1017,7 @@ class TestConverter(TestCase): torch.randn([2, 3, 4]).to(torch.float32), torch.randn([2, 3, 4]).to(torch.float64), ) - ep_list = self._check_equal_ts_ep_converter(func6, inp) + self._check_equal_ts_ep_converter(func6, inp) # TODO: Additional check once dynamic shape is supported. # for ep in ep_list: @@ -1353,7 +1353,7 @@ class TestConverter(TestCase): def test_ts2ep_with_loop(self): def func1(x, x_list: List[torch.Tensor]): a, b, c = x, x, x - for i in range(1, 5, 2): + for _ in range(1, 5, 2): for k in range(5): a = a + a + k b = b + b - k @@ -1364,12 +1364,12 @@ class TestConverter(TestCase): x_list.append(x_list[k] + x_list[k + 1] - x_list[k + 2]) return x, x_list - def func2(x): + def func2(x): # noqa: F841 for i in range(x.size(0)): x = x * x * i return x - def func3(x): + def func3(x): # noqa: F841 while x.sum() < 10: x += x.sin() return x diff --git a/test/export/test_draft_export.py b/test/export/test_draft_export.py index 67906894196..96060b99a14 100644 --- a/test/export/test_draft_export.py +++ b/test/export/test_draft_export.py @@ -56,7 +56,7 @@ class TestDraftExport(TestCase): ) def test_missing_meta_kernel_custom_op(self): - with torch.library._scoped_library("mylib", "FRAGMENT") as lib: + with torch.library._scoped_library("mylib", "FRAGMENT"): @torch.library.custom_op("mylib::foo2", mutates_args={}) def foo2_impl(a: torch.Tensor, b: torch.Tensor) -> torch.Tensor: @@ -111,7 +111,7 @@ class TestDraftExport(TestCase): @unittest.skipIf(not torch.cuda.is_available(), "Requires cuda") def test_missing_meta_kernel_guard(self): - with torch.library._scoped_library("mylib", "FRAGMENT") as lib: + with torch.library._scoped_library("mylib", "FRAGMENT"): @torch.library.custom_op("mylib::foo4", mutates_args={}) def foo4_impl(a: torch.Tensor, b: torch.Tensor) -> torch.Tensor: @@ -415,7 +415,7 @@ class TestDraftExport(TestCase): example_inputs = (torch.randn(3, 5), torch.randn(3)) draft_ep, _ = draft_export(mod, example_inputs) with tempfile.NamedTemporaryFile(suffix=".pt2") as f: - aoti_model_path = torch._inductor.aoti_compile_and_package( + torch._inductor.aoti_compile_and_package( draft_ep, example_inputs, package_path=f.name, diff --git a/test/export/test_experimental.py b/test/export/test_experimental.py index c78a5b215d8..307a8bcf01f 100644 --- a/test/export/test_experimental.py +++ b/test/export/test_experimental.py @@ -227,7 +227,7 @@ def forward(self, p_linear_weight, p_linear_bias, c_lifted_tensor_0, x): ep = torch.export.export_for_training( m, example_inputs, dynamic_shapes={"x": {0: Dim("x0")}} ) - joint_ep = _export_forward_backward(ep) + _export_forward_backward(ep) def test_joint_cifar10_backwards(self) -> None: import torch.nn as nn diff --git a/test/export/test_export.py b/test/export/test_export.py index 47d7bdd85ce..cbadc67e3a9 100755 --- a/test/export/test_export.py +++ b/test/export/test_export.py @@ -1,4 +1,5 @@ # Owner(s): ["oncall: export"] +# ruff: noqa: F841 # flake8: noqa import copy import dataclasses diff --git a/test/export/test_passes.py b/test/export/test_passes.py index ca4c3141ffe..130a6a64757 100644 --- a/test/export/test_passes.py +++ b/test/export/test_passes.py @@ -643,7 +643,7 @@ def forward(self, token, obj_attr, x): allow_non_fake_inputs=True, ) with _fakify_script_objects(m, (), {}, fake_mode) as ( - patched_mod, + _, _, _, fake_constant_attrs, @@ -657,17 +657,16 @@ def forward(self, token, obj_attr, x): @unittest.expectedFailure def test_fakify_script_objects_properly_handle_containers(self): m = ModelsWithScriptObjectAttr.SimpleWithAttrInContainer() - constant_attrs = _gather_constant_attrs(m) fake_mode = FakeTensorMode( shape_env=ShapeEnv(tracked_fakes=[]), allow_non_fake_inputs=True, ) with _fakify_script_objects(m, (), {}, fake_mode) as ( - patched_mod, + _, _, _, fake_constant_attrs, - fake_to_real, + _, ): self.assertTrue("attr" in fake_constant_attrs.values()) self.assertTrue("pytree_attr2" in fake_constant_attrs.values()) diff --git a/test/export/test_torchbind.py b/test/export/test_torchbind.py index fd9f98199d3..673c0f0286e 100644 --- a/test/export/test_torchbind.py +++ b/test/export/test_torchbind.py @@ -1,4 +1,5 @@ # Owner(s): ["oncall: export"] +# ruff: noqa: F841 import copy import unittest diff --git a/test/export/test_unflatten.py b/test/export/test_unflatten.py index e9bc6724d65..1784bec8a78 100644 --- a/test/export/test_unflatten.py +++ b/test/export/test_unflatten.py @@ -498,7 +498,8 @@ class TestUnflatten(TestCase): inp = (torch.randn(4, 4), [torch.randn(4, 4), torch.randn(4, 4)]) mod = Foo() - ep_strict = torch.export.export(mod, inp) + + ep_strict = torch.export.export(mod, inp) # noqa: F841 ep_non_strict = torch.export.export(mod, inp, strict=False) gm_unflat_non_strict = unflatten(ep_non_strict) @@ -610,7 +611,7 @@ class TestUnflatten(TestCase): init_torchbind_implementations() @torch._library.register_fake_class("_TorchScriptTesting::_Foo") - class FakeFoo: + class FakeFoo: # noqa: F841 def __init__(self, x: int, y: int): self.x = x self.y = y @@ -687,7 +688,7 @@ class TestUnflatten(TestCase): # The call chain looks like this: # A -> B -> C -> A.d ep = torch.export.export(a, (torch.randn(3),), strict=False) - unflattened = unflatten(ep) + unflatten(ep) def test_nested_leaf_non_strict(self): class Leaf(torch.nn.Module): diff --git a/test/functorch/discover_coverage.py b/test/functorch/discover_coverage.py index 972a77850c2..92ea79b8008 100644 --- a/test/functorch/discover_coverage.py +++ b/test/functorch/discover_coverage.py @@ -413,14 +413,6 @@ def get_statuses(for_subset=None, invert=False): result.remove(decorator.test_name) return result - def get_all_aliases(op): - opinfos = op_to_opinfo[op] - result = [] - for opinfo in opinfos: - result.append(opinfo.name) - result.extend(opinfo.aliases) - return set(result) - for name, op in get_covered_ops(overridable_outplace_we_care_about).items(): successful_tests = get_covered_tests(op) failed_tests = tests - successful_tests diff --git a/test/functorch/test_ac.py b/test/functorch/test_ac.py index 6e5ad377de8..c3bc905a937 100644 --- a/test/functorch/test_ac.py +++ b/test/functorch/test_ac.py @@ -104,7 +104,7 @@ class MemoryBudgetTest(TestCase): def call(): return f(x, ws) - eager_mem, eager_flops = get_mem_and_flops(call) + _, eager_flops = get_mem_and_flops(call) for budget in range(0, 11): mem, flops = get_mem_and_flops(call, memory_budget=budget / 10) if budget <= 5: @@ -133,18 +133,10 @@ class MemoryBudgetTest(TestCase): def make_weights(w_shapes): ws = [] - for idx, dim in enumerate(w_shapes): + for dim in w_shapes: ws.append(torch.randn(512, dim * 512, requires_grad=True)) return ws - def make_weights_chain(w_shapes): - ws = [] - for idx, _ in enumerate(w_shapes): - old_dim = 512 if idx == 0 else w_shapes[idx - 1] * 512 - new_dim = w_shapes[idx] * 512 - ws.append(torch.randn(old_dim, new_dim, requires_grad=True)) - return ws - weight_configs = [ ( [11, 3, 4, 2], @@ -186,7 +178,7 @@ class MemoryBudgetTest(TestCase): def call(): return f(x, ws) - eager_mem, eager_flops = get_mem_and_flops(call) + eager_mem, _ = get_mem_and_flops(call) total_mem = sum(weight_shapes) self.assertEqual(eager_mem, sum(weight_shapes)) for mem_achieved in exact_solves: @@ -302,7 +294,7 @@ class MemoryBudgetTest(TestCase): def call(): return f(x, ws) - eager_mem, eager_flops = get_mem_and_flops(call) + _, eager_flops = get_mem_and_flops(call) mem, flops = get_mem_and_flops(call, memory_budget=0.2) # We start saving the matmuls self.assertEqual(mem, 2) diff --git a/test/functorch/test_aotdispatch.py b/test/functorch/test_aotdispatch.py index 7ce69380272..15ccf41f292 100644 --- a/test/functorch/test_aotdispatch.py +++ b/test/functorch/test_aotdispatch.py @@ -697,7 +697,7 @@ def forward(self, primals_1, primals_2): with self.assertRaisesRegex( AssertionError, "but the input has other mutations that we cannot" ): - fw_graph = self.verify_aot_autograd( + self.verify_aot_autograd( f, inp, test_mutation=True, keep_inp_mutations=True ) @@ -756,8 +756,8 @@ def forward(self, primals_1): test = torch.ones(4, requires_grad=True) + 0 test_view = test[0::2] - out_ref = f(ref_view) - out_test = f_compiled(test_view) + out_ref = f(ref_view) # noqa: F841 + out_test = f_compiled(test_view) # noqa: F841 self.assertEqual(ref, test) def test_input_mutation_modifies_autograd_meta_of_aliases(self): @@ -1110,7 +1110,7 @@ def forward(self, arg0_1, arg1_1): keep_inference_input_mutations=True, dynamic=False, ) - out = compiled_f(inp) + compiled_f(inp) # Final functionalized graph has two mutation ops: # (1) a resize_() to resize input tensor up # (2) a copy_() to fill in the resized input with valid data @@ -1145,7 +1145,7 @@ def forward(self, primals_1): keep_inference_input_mutations=True, dynamic=False, ) - out = compiled_f(inp) + compiled_f(inp) # Final functionalized graph has one mutation ops: # (1) a resize_() to resize input tensor down # Even though there was technically a "data mutation" on the input (from a.copy_()), @@ -1217,7 +1217,7 @@ def forward(self, primals_1): with torch.no_grad(): allgather_param = torch.cat([param_shard, param_shard]) # simulate propagating grad state through dummy param, using data of allgather param - dummy_param_with_grad_state = TracableCreateParameter.apply( + dummy_param_with_grad_state = TracableCreateParameter.apply( # noqa: F841 allgather_param, dummy_param ) out = dummy_param.sin() @@ -1242,7 +1242,7 @@ def forward(self, primals_1): keep_inference_input_mutations=True, dynamic=False, ) - out = compiled_f(dummy_param, param_shard) + compiled_f(dummy_param, param_shard) # Important stuff to point out: # (1) We save cat for backward (input to the sin()). # While the original code was dummy_param.sin(), @@ -1276,7 +1276,7 @@ def forward(self, primals_1, primals_2): keep_inference_input_mutations=True, dynamic=False, ) - out = compiled_f(inp) + compiled_f(inp) # def test_input_mutation_storage_resize_not_supported(self): # def f(a): @@ -1412,7 +1412,7 @@ def forward(self, arg0_1): return a + 5 inp = [torch.ones(4, requires_grad=True)] - fw_graph = self.verify_aot_autograd(f, inp, test_mutation=True) + self.verify_aot_autograd(f, inp, test_mutation=True) def test_input_mutation_metadata2(self): def f(a): @@ -1482,7 +1482,7 @@ def forward(self, arg0_1): ) inp = torch.ones(4, 4, 4, 4) with torch.no_grad(): - out = compiled_m(inp) + compiled_m(inp) # expectation: there are no copy_() calls in the decomposed batch norm when running under training=False (eval mode) code = fw_graph_cell[0].code.strip() self.assertTrue("copy_" not in str(code)) @@ -1670,16 +1670,16 @@ def forward(self, primals_1): inp2.mul_(2) # In eager mode, if we mutate a tensor, any multi-output-view aliases # get their grad_fn replaced with error nodes, so accessing grad_fn should error - grad_fn = out_test2[0].grad_fn + out_test2[0].grad_fn with self.assertRaisesRegex( RuntimeError, "Such functions do not allow the output views" ): - out_test3 = f1_compiled(inp3) + f1_compiled(inp3) out_test1[0].detach().mul_(2) # The above case also applies to detached aliases (they turn the multi-output-view # alias's grad_fns into error nodes) - grad_fn = out_test2[0].grad_fn + out_test2[0].grad_fn def test_output_aliases_input_multi_output_view(self): # All aliased outs are from multi-output views, so AOTAutograd will hide the aliasing from autograd. @@ -1890,7 +1890,7 @@ def forward(self, primals_1, primals_2): inp = [torch.ones(3, 3, requires_grad=False)] self.verify_aot_autograd(f, inp, test_mutation=True) inp = [torch.ones(3, 3, requires_grad=True)] - fw_graph = self.verify_aot_autograd(f, inp, test_mutation=True) + self.verify_aot_autograd(f, inp, test_mutation=True) def test_output_aliases_intermediate_multiple(self): def f(a): @@ -1982,8 +1982,6 @@ def forward(self, primals_1): out.t_() return out - inp = [torch.ones(2, 4, requires_grad=True)] - # TODO: fix this test. # See https://github.com/pytorch/pytorch/issues/90507 # self.verify_aot_autograd(f, inp, test_mutation=True) @@ -2020,7 +2018,7 @@ def forward(self, primals_1): out_view2 = out.unsqueeze(0) return out_view, out, out_view2 - inp = [torch.ones(2, 4, requires_grad=True)] + inp = [torch.ones(2, 4, requires_grad=True)] # noqa: F841 # TODO: fix this test. # See @@ -2355,7 +2353,7 @@ def forward(self, primals_1, primals_2): with self.assertRaisesRegex( AssertionError, "input to the backward that was mutated during the backward" ): - out = f_compiled(*inp_grad) + f_compiled(*inp_grad) def test_backward_mutation_forward_inputs(self): @torch.library.custom_op("_test::_clone", mutates_args={}) @@ -2954,7 +2952,7 @@ def forward(self, primals_1, primals_2, primals_3): # detach() so that none of the inputs have a ._base attribute. a = base[0].detach() b = base[1].detach() - base2 = torch.ones(2, 2, requires_grad=True) + base2 = torch.ones(2, 2, requires_grad=True) # noqa: F841 return [base], [a, b] self.verify_aot_autograd(f, inp_callable, test_mutation=True) @@ -3499,7 +3497,6 @@ def forward(self, tangents_1): b.masked_fill_(c, 0) **also** mutates a (because b and a are aliased) The autograd engine yells at us if we save "a" for backward, and then try to mutate it. """ - inp = torch.randn(2, 2, requires_grad=True) def f(a): b = a[0] @@ -3990,7 +3987,7 @@ def forward(self, arg0_1, arg1_1): fw_graph_cell = [None] bw_graph_cell = [None] - compiled_outs = aot_function( + aot_function( fn, fw_compiler=partial(extract_graph, graph_cell=fw_graph_cell), bw_compiler=partial(extract_graph, graph_cell=bw_graph_cell), @@ -3999,7 +3996,6 @@ def forward(self, arg0_1, arg1_1): dynamic=True, )(*inp) fw_graph = fw_graph_cell[0] - bw_graph = bw_graph_cell[0] self.assertExpectedInline( str(fw_graph.code).strip(), @@ -4560,7 +4556,7 @@ def forward(self, arg0_1): mod = ConvBatchnormRelu() mod.train() inp = torch.randn(1, 1, 3, 3) - o_ref = mod(inp) + mod(inp) fx_g, signature = aot_export_module( mod, [inp], trace_joint=True, output_loss_index=0 ) @@ -5265,7 +5261,7 @@ class TestPartitioning(AOTTestCase): aot_fn = aot_function(generate, nop, inference_compiler=inference_compiler) # Even though x requires grad, we should still get an inference graph x = torch.randn(4, requires_grad=True) - res = aot_fn(x) + aot_fn(x) self.assertTrue(inference_graph_cell[0] is not None) @unittest.skipIf(not torch.cuda.is_available(), "CUDA is unavailable") @@ -5914,7 +5910,7 @@ class TestAOTModuleSimplified(AOTTestCase): x = torch.randn(2, 512, 40, 59) # NB: must not require grad inputs = [x] fake_inputs = [fake_mode.from_tensor(x) for x in inputs] - compiled_f = aot_module_simplified(mod, fake_inputs, nop) + aot_module_simplified(mod, fake_inputs, nop) def test_aot_module_simplified_preserves_stack_trace(self): class MockModule(torch.nn.Module): diff --git a/test/functorch/test_control_flow.py b/test/functorch/test_control_flow.py index f3252b0d478..f030f46d614 100644 --- a/test/functorch/test_control_flow.py +++ b/test/functorch/test_control_flow.py @@ -1202,15 +1202,15 @@ def forward(self, pred_1, x_1): with self.assertRaisesRegex( RuntimeError, r"Expect outputs of map only contains tensors or None\." ): - _ = control_flow.map(f, x, y) + control_flow.map(f, x, y) with self.assertRaisesRegex( RuntimeError, r"Expect outputs of map only contains tensors or None\." ): - out = control_flow.map(f1, x, y) + control_flow.map(f1, x, y) # return None is OK - _ = control_flow.map(f2, x, y) + control_flow.map(f2, x, y) def test_map_list_in_out(self): def f(x, y): @@ -1644,7 +1644,7 @@ def forward(self, pred_1, x_1): RuntimeError, "The number of leaves of the pytree of the new carry", ): - result = scan(fct_wrong_pytree, init, inp, dim=0) + scan(fct_wrong_pytree, init, inp, dim=0) @requires_cuda @parametrize("reverse", [False, True]) @@ -1975,7 +1975,7 @@ def forward(self, pred_1, x_1): RuntimeError, "xs leaves must have a scan dimension > 0", ): - result_init = scan_fct( + scan_fct( get_scan_combine_fn("add", False), init, inp, @@ -1987,7 +1987,7 @@ def forward(self, pred_1, x_1): torch._dynamo.exc.Unsupported, "Observed exception.*", ): - result_init = scan_fct( + scan_fct( get_scan_combine_fn("add", False), init, inp, @@ -2009,18 +2009,14 @@ def forward(self, pred_1, x_1): RuntimeError, "All init leaves must be a Tensor", ): - result_init = scan_fct( - get_scan_combine_fn("add", False), init, x, dim=dim - ) + scan_fct(get_scan_combine_fn("add", False), init, x, dim=dim) else: with self.assertRaisesRegex( # Should be: RuntimeError, "Init leaves must be a Tensor" torch._dynamo.exc.Unsupported, "Observed exception.*", ): - result_init = scan_fct( - get_scan_combine_fn("add", False), init, x, dim=dim - ) + scan_fct(get_scan_combine_fn("add", False), init, x, dim=dim) @requires_cuda @parametrize("compile_mode", ["none", "eager"]) @@ -2035,7 +2031,7 @@ def forward(self, pred_1, x_1): init = torch.randn(1, 2) if compile_mode == "none": with self.assertRaisesRegex(RuntimeError, "The shape of the new_carry"): - result_init = scan_fct( + scan_fct( get_scan_combine_fn("add", False), init, x, @@ -2047,7 +2043,7 @@ def forward(self, pred_1, x_1): torch._dynamo.exc.Unsupported, "Observed exception.*", ): - result_init = scan_fct( + scan_fct( get_scan_combine_fn("add", False), init, x, @@ -2077,7 +2073,7 @@ def forward(self, pred_1, x_1): RuntimeError, "The number of leaves of the pytree of the new carry produced by the operator", ): - result_init = scan_fct(add_one_carry, init, x, dim=dim) + scan_fct(add_one_carry, init, x, dim=dim) else: with self.assertRaisesRegex( @@ -2086,7 +2082,7 @@ def forward(self, pred_1, x_1): torch._dynamo.exc.Unsupported, "Observed exception.*", ): - result_init = scan_fct(add_one_carry, init, x, dim=dim) + scan_fct(add_one_carry, init, x, dim=dim) @requires_cuda @parametrize("reverse", [False, True]) @@ -2218,7 +2214,7 @@ def forward(self, pred_1, x_1): Exception, ".*", ): - result = scan( + scan( get_scan_combine_fn("complex_pointwise", False), init, inp, @@ -3079,7 +3075,7 @@ class AssociativeScanTests(TestCase): device = torch.device("cuda") def combine_fn(x, y): - cnt = torch.zeros_like(y[0, :]) + _cnt = torch.zeros_like(y[0, :]) if loop_type == "while": def cond_fn(ind, loop_val): @@ -3334,7 +3330,7 @@ class AssociativeScanTests(TestCase): torch._dynamo.exc.Unsupported, "Observed exception.*", ): - out = associative_scan( + associative_scan( get_scan_combine_fn("different_input_size_operator", True), elements, 3, @@ -3352,7 +3348,7 @@ class AssociativeScanTests(TestCase): RuntimeError, "torch.compile does not support sparse Tensors", ): - result = associative_scan( + associative_scan( get_scan_combine_fn("add", True), x, 0, @@ -3383,7 +3379,7 @@ class AssociativeScanTests(TestCase): torch._dynamo.exc.Unsupported, "Observed exception.*", ): - result = associative_scan(fct, x, 0) + associative_scan(fct, x, 0) @unittest.skipIf(not SM70OrLater, "triton") @requires_cuda @@ -3407,7 +3403,7 @@ class AssociativeScanTests(TestCase): torch._dynamo.exc.Unsupported, "Observed exception.*", ): - result = associative_scan(fct_wrong_pytree, inp, 0, combine_mode="generic") + associative_scan(fct_wrong_pytree, inp, 0, combine_mode="generic") @unittest.skipIf(not SM70OrLater, "triton") @requires_cuda @@ -3418,7 +3414,7 @@ class AssociativeScanTests(TestCase): Exception, "For combine_mode='pointwise', the combine_fn needs to be pointwise", ): - out = associative_scan( + associative_scan( get_scan_combine_fn("non_pointwise", True), x, 0, @@ -3875,7 +3871,6 @@ def forward(self, l_iter_, l_x_, l__self___dec_cond_fn, l__self___linear_bias_bo graphs = self._check_tracing(fn, inp) gm = graphs["symbolic"] outer_body = gm.while_loop_body_graph_0 - outer_cond = gm.while_loop_cond_graph_0 inner_body = outer_body.while_loop_body_graph_0 inner_cond = outer_body.while_loop_cond_graph_0 self.assertExpectedInline( @@ -5945,7 +5940,7 @@ def forward(self, s0 : torch.SymInt, L_a_ : torch.Tensor, L_b_ : torch.Tensor, L pass with self.assertRaisesRegex(TypeError, "WrongHop"): - wrong_hop = WrongHop("wrong_hop") + WrongHop("wrong_hop") def test_scan_functionalized(self): def f(init, xs): @@ -6153,7 +6148,6 @@ class TestHopSchema(TestCase): example_val = self._get_example_val(schema_type) li1 = [example_val] - li2 = [example_val, example_val] ty1 = TypeGen.from_example(li1) ty2 = TypeGen.from_example(li1) self.assertEqual(ty1.parse(str(ty1)), ty1) @@ -6166,7 +6160,6 @@ class TestHopSchema(TestCase): (schema_type + "_v", self._get_example_val(schema_type)) for schema_type in _hop_schema_test_schema_types ] - op_name = "test_op" schema1 = FunctionSchemaGen.from_example("test_op1", inps, torch.ones(1)) schema2 = FunctionSchemaGen.from_example( "test_op2", @@ -6245,7 +6238,7 @@ class TestHopSchema(TestCase): x, ) model = M() - ep = torch.export.export(model, args) + torch.export.export(model, args) graph_str = self._check_export(model, args, None) self.assertExpectedInline( graph_str, diff --git a/test/functorch/test_dims.py b/test/functorch/test_dims.py index 1d309f44d29..86d213a9d77 100644 --- a/test/functorch/test_dims.py +++ b/test/functorch/test_dims.py @@ -1,5 +1,4 @@ # Owner(s): ["module: functorch"] - # Copyright (c) Facebook, Inc. and its affiliates. # All rights reserved. # @@ -310,7 +309,7 @@ class TestMin(TestCase): def test_stack(self): i, j, d = dims() A = torch.rand(4, 5) - r = stack([A[i, j]], d, j) + _r = stack([A[i, j]], d, j) # a, b = r.unbind(d) # self.assertTrue(torch.allclose(a.order(i, j), i.expand(j).order(i, j))) # self.assertTrue(torch.allclose(b.order(i, j), j.expand(i).order(i, j))) @@ -329,7 +328,7 @@ class TestMin(TestCase): a_ = a[i, k] b_ = b[k, j] q.size = 1 - r = (a_.expand(j, q) * b_.expand(i, q)).sum(k).order(q, i, j) + _r = (a_.expand(j, q) * b_.expand(i, q)).sum(k).order(q, i, j) # r = (a_*b_).sum(k).order(q, i, j) # print(r) # print(a @ b) @@ -362,15 +361,8 @@ class TestMin(TestCase): # XXX - chunk changes the size of a dimension, has to take a new dimension... # assert torch.allclose(A.chunk(2,1)[0], A[i, k].chunk(2, k)[0].order(i, k)) assert torch.allclose(A[i].renorm(1, i, 7).order(i), A.renorm(1, 0, 7)) - kk = dims() - # assert torch.allclose( torch.stack([A, A], 1), stack([A[i,k], A[i, k]], kk, k).order(i, kk, k)) - - k2 = dims() - # r = cat((A[i, k], A[i,k]), k, k2) - # assert torch.allclose(torch.cat([A, A], 1), r.order(i, k2)) - # assert k2.size == 2*k.size - assert torch.allclose(A.expand(5, -1, -1), A[i, k].expand(j).order(j, i, k)) + z = dims() C = torch.arange(2) assert torch.allclose(A[:, 0:2], A[i, k].index(k, C[z]).order(i, z)) @@ -497,11 +489,10 @@ class TestMin(TestCase): _test_c() def test_seg(self): - A = torch.rand(3, 4) i, k = dims() i.size = 4 k.size = 3 - r = i + k - 1 + i + k - 1 def test_expand(self): A = torch.rand(3, 4) @@ -582,7 +573,6 @@ class TestMin(TestCase): def test_index(self): A = torch.rand(3, 4) - B = torch.rand(4, 5) i, j, k = dims() o, l = dims() diff --git a/test/functorch/test_eager_transforms.py b/test/functorch/test_eager_transforms.py index a1bd52a2fbb..ce0c43cdbc6 100644 --- a/test/functorch/test_eager_transforms.py +++ b/test/functorch/test_eager_transforms.py @@ -1,5 +1,4 @@ # Owner(s): ["module: functorch"] - # Copyright (c) Facebook, Inc. and its affiliates. # All rights reserved. # @@ -1302,8 +1301,6 @@ class TestAutogradFunction(TestCase): # https://github.com/pytorch/pytorch/issues/90224 @unittest.expectedFailure def test_once_differentiable_grad_vjp(self, device): - NumpyCubeNotComposable = self._get_NumpyCubeNotComposable() - # grad x vjp x = torch.randn([], device=device) grad_y = torch.randn_like(x) @@ -1554,7 +1551,7 @@ class TestAutogradFunctionVmapAPI(TestCase): B = 2 x = torch.randn(B, 3) with self.assertRaisesRegex(RuntimeError, "to have two returns"): - result = vmap(Zeros.apply)(x) + vmap(Zeros.apply)(x) class TwoZeros(torch.autograd.Function): @staticmethod @@ -1574,7 +1571,7 @@ class TestAutogradFunctionVmapAPI(TestCase): B = 2 x = torch.randn(B, 3) with self.assertRaisesRegex(RuntimeError, "to have two returns"): - result = vmap(Zeros.apply)(x) + vmap(Zeros.apply)(x) def test_incompatible_out_dims_error_msg(self, device): class Zeros(torch.autograd.Function): @@ -1595,7 +1592,7 @@ class TestAutogradFunctionVmapAPI(TestCase): B = 2 x = torch.randn(B, 3) with self.assertRaisesRegex(RuntimeError, "returned an incompatible"): - result = vmap(Zeros.apply)(x) + vmap(Zeros.apply)(x) class Zeros(torch.autograd.Function): @staticmethod @@ -1615,7 +1612,7 @@ class TestAutogradFunctionVmapAPI(TestCase): B = 2 x = torch.randn(B, 3) with self.assertRaisesRegex(RuntimeError, "returned an incompatible"): - result = vmap(Zeros.apply)(x) + vmap(Zeros.apply)(x) def test_kwarg_only_tensors(self, device): with self.assertRaisesRegex(NotImplementedError, "kwarg-only Tensor args"): @@ -3154,7 +3151,7 @@ class TestHelpers(TestCase): @staticmethod def backward(ctx, gy): - wrapped = torch._functorch.autograd_function.CtxWithSavedTensors( + wrapped = torch._functorch.autograd_function.CtxWithSavedTensors( # noqa: F841 ctx, (y,) ) return gy @@ -3168,7 +3165,7 @@ class TestHelpers(TestCase): @staticmethod def backward(ctx, gy): - wrapped = torch._functorch.autograd_function.CtxWithSavedTensors( + wrapped = torch._functorch.autograd_function.CtxWithSavedTensors( # noqa: F841 ctx, (y,) ) return gy @@ -3307,8 +3304,6 @@ class TestHelpers(TestCase): @markDynamoStrictTest class TestComposability(TestCase): def test_deprecation_vmap(self, device): - x = torch.randn(3, device=device) - # functorch version of the API is deprecated with self.assertWarnsRegex(FutureWarning, "Please use `torch.vmap`"): vmap(torch.sin) @@ -4758,9 +4753,9 @@ def forward(self, x_1, indices_1) -> torch.Tensor: y = x.detach() return y + y - with FakeTensorMode() as mode: + with FakeTensorMode(): x = torch.ones(2, device=device, requires_grad=True) - out = functionalize(f)(x) + functionalize(f)(x) self.assertEqual(x.size(), (2,)) def test_functionalize_fx_simple(self, device): @@ -5186,7 +5181,7 @@ class TestCompileTransforms(TestCase): actual = wrapper_fn(x, y) expected = torch.compile(wrapper_fn, backend="eager", fullgraph=True)(x, y) - fn = torch.compile(wrapper_fn, backend="eager", fullgraph=True) + torch.compile(wrapper_fn, backend="eager", fullgraph=True) self.assertEqual(actual, expected) def wrapper_fn(x, y): diff --git a/test/functorch/test_ops.py b/test/functorch/test_ops.py index a4269ff84d5..a8d5dd94389 100644 --- a/test/functorch/test_ops.py +++ b/test/functorch/test_ops.py @@ -1,4 +1,5 @@ # Owner(s): ["module: functorch"] +# ruff: noqa: F841 # Copyright (c) Facebook, Inc. and its affiliates. # All rights reserved. diff --git a/test/functorch/test_vmap.py b/test/functorch/test_vmap.py index 726eeeb90a3..1d9f25b6627 100644 --- a/test/functorch/test_vmap.py +++ b/test/functorch/test_vmap.py @@ -682,6 +682,8 @@ class TestVmapAPI(TestCase): vmap(torch.mul, (0, 0))(x, y) def test_integer_in_dim_but_not_tensor_input_err_msg(self): + # noqa: F841 + def foo(xy): return xy[0] * xy[1] @@ -1246,7 +1248,7 @@ class TestVmapAPI(TestCase): def test_data_attribute(self): def foo(x): - y = x.data + y = x.data # noqa: F841 return x with self.assertRaisesRegex( diff --git a/test/fx/quantization.py b/test/fx/quantization.py index d2869c2b27f..3daa4da479e 100644 --- a/test/fx/quantization.py +++ b/test/fx/quantization.py @@ -4,10 +4,9 @@ rely on it for anything!** """ import operator import sys -from typing import Optional import torch -from torch.fx import Graph, GraphModule, Node +from torch.fx import Graph, GraphModule from torch.fx.graph import map_arg from torch.fx.proxy import Proxy from torch.nn.utils import fuse_conv_bn_weights @@ -181,7 +180,7 @@ class ConvNormRelu(MinMaxObserver): parent_name, name = _parent_name(self.conv_node.target) setattr(quantizer.modules[parent_name], name, qconv) if self.bn_node is not None: - parent_bn, bn_name = _parent_name(self.bn_node.target) + _, bn_name = _parent_name(self.bn_node.target) # we can't just delete this because submodules's forwards (which are not longer use) # try to call it, so replace with something that does nothing. setattr(quantizer.modules[parent_name], bn_name, IdentityModule()) @@ -277,7 +276,6 @@ class Quantizer: def load_arg(a): return map_arg(a, lambda node: env[node.name]) - output_node: Optional[Node] = None for node in self.graph.nodes: if node.op == "placeholder": result = next(args_iter) @@ -322,12 +320,6 @@ class Quantizer: return quant_env[n.name] def copy_recursive(node): - def load_or_emit(n): - if n.name in env or e.name in quant_env: # noqa: F821 - return load_arg(n, quantized=False) - else: - return copy_recursive(n) - r = env[node.name] = self.quantized_graph.node_copy( node, lambda n: load_arg(n, quantized=False) ) diff --git a/test/fx/test_cse_pass.py b/test/fx/test_cse_pass.py index e0690ca56c0..16aa9e70a02 100644 --- a/test/fx/test_cse_pass.py +++ b/test/fx/test_cse_pass.py @@ -235,7 +235,7 @@ class TestCSEPass(TestCase): return a + b t = torch.randn(2, 2) - P_ban_add = P = CSEPass(banned_ops=[torch.ops.aten.add]) + P_ban_add = CSEPass(banned_ops=[torch.ops.aten.add]) check(self, f, t, 0, P=P_ban_add) # check that add is banned check(self, f, t, 1) # check that add is not banned by default diff --git a/test/fx/test_dce_pass.py b/test/fx/test_dce_pass.py index 2e6821f920b..a840c77419e 100644 --- a/test/fx/test_dce_pass.py +++ b/test/fx/test_dce_pass.py @@ -1,5 +1,4 @@ # Owner(s): ["module: fx"] - import copy import unittest from typing import Optional, Set, Type @@ -92,7 +91,7 @@ class TestDCE(TestCase): self.attr_1 = torch.nn.Parameter(torch.tensor([-0.9])) def forward(self, x): - a = x + 1 + a = x + 1 # noqa: F841 return x + self.attr_1 self._run_dce_and_test(TestModule(), expect_dce_changes=True) @@ -109,7 +108,7 @@ class TestDCE(TestCase): def forward(self, x): a = x + 1 - b = a * 7 + b = a * 7 # noqa: F841 return x + self.attr_1 self._run_dce_and_test(TestModule(), expect_dce_changes=True) @@ -126,7 +125,7 @@ class TestDCE(TestCase): def forward(self, x): a = x + 1 - b = a * self.attr_1 + b = a * self.attr_1 # noqa: F841 return x + 11 self._run_dce_and_test(TestModule(), expect_dce_changes=True) @@ -153,7 +152,7 @@ class TestDCE(TestCase): class TestModule(torch.nn.Module): def forward(self, x, y): - a = y + 2 + a = y + 2 # noqa: F841 return x + 7 self._run_dce_and_test(TestModule(), expect_dce_changes=True) @@ -172,7 +171,7 @@ class TestDCE(TestCase): self.relu = ReLUImpure() def forward(self, a: torch.Tensor) -> torch.Tensor: - r = self.relu(a) + r = self.relu(a) # noqa: F841 return a * 2 self._run_dce_and_test( @@ -228,7 +227,7 @@ class TestDCE(TestCase): class TestModule(torch.nn.Module): def forward(self, a: torch.Tensor) -> torch.Tensor: b = a + 1 - c = torch._ops.ops.aten.add(b, b) + c = torch._ops.ops.aten.add(b, b) # noqa: F841 return a # %add_out node should not be removed because it has side effects. @@ -249,9 +248,7 @@ class TestDCE(TestCase): d = torch.ops.aten.mul.Tensor(a, b) e = torch.ops.aten.mul.Tensor(a, c) future = torch.ops._c10d_functional.all_reduce.default(e, "sum", "0") - synced_e = torch.ops._c10d_functional.wait_tensor.default( - future - ) # synced_e is not used + torch.ops._c10d_functional.wait_tensor.default(future) return d torch.distributed.init_process_group( @@ -279,9 +276,7 @@ class TestDCE(TestCase): d = torch.ops.aten.mul(a, b) e = torch.ops.aten.mul(a, c) future = torch.ops._c10d_functional.all_reduce(e, "sum", "0") - synced_e = torch.ops._c10d_functional.wait_tensor( - future - ) # synced_e is not used + torch.ops._c10d_functional.wait_tensor(future) return d torch.distributed.init_process_group( diff --git a/test/fx/test_fx_split.py b/test/fx/test_fx_split.py index 12862cc1774..89574cdf4bc 100644 --- a/test/fx/test_fx_split.py +++ b/test/fx/test_fx_split.py @@ -214,10 +214,8 @@ class TestSplitOutputType(TestCase): inputs = torch.randn((1, 3, 224, 224)) - gm, tag_node = TestSplitOutputType.trace_and_tag(module, inputs, tags) - split_gm, orig_to_split_fqn_mapping = split_by_tags( - gm, tags, return_fqn_mapping=True - ) + gm, _ = TestSplitOutputType.trace_and_tag(module, inputs, tags) + split_gm, _ = split_by_tags(gm, tags, return_fqn_mapping=True) gm_output = module(inputs) split_gm_output = split_gm(inputs) diff --git a/test/fx/test_gradual_type.py b/test/fx/test_gradual_type.py index 01a76eaf98a..fcf50dad99e 100644 --- a/test/fx/test_gradual_type.py +++ b/test/fx/test_gradual_type.py @@ -913,7 +913,7 @@ class TypeCheckerTest(TestCase): (2, 2, 10, 10), ] - intermediate_list = [ + intermediate_list = [ # noqa: F841 Dyn, (2, 5, 6, 9), (10, 15, 13, 14), @@ -1139,7 +1139,7 @@ class TypeCheckerTest(TestCase): return out B = BasicBlock() - ast_rewriter = RewritingTracer() + ast_rewriter = RewritingTracer() # noqa: F841 traced = symbolic_trace(B) tc = GraphTypeChecker({}, traced) tc.type_check() diff --git a/test/fx/test_matcher_utils.py b/test/fx/test_matcher_utils.py index f1cb6105b94..26caf91485e 100644 --- a/test/fx/test_matcher_utils.py +++ b/test/fx/test_matcher_utils.py @@ -176,7 +176,7 @@ class TestMatcher(JitTestCase): WrapperModule(pattern), example_inputs ).module() before_split_res = pattern_gm(*example_inputs) - pattern_gm, name_node_map = _split_to_graph_and_name_node_map(pattern_gm) + pattern_gm, _ = _split_to_graph_and_name_node_map(pattern_gm) after_split_res = pattern_gm(*example_inputs) self.assertEqual(before_split_res[0], after_split_res[0]) self.assertEqual(before_split_res[1], after_split_res[1]) diff --git a/test/fx/test_subgraph_rewriter.py b/test/fx/test_subgraph_rewriter.py index 7f23e706216..5f0f91d0be4 100644 --- a/test/fx/test_subgraph_rewriter.py +++ b/test/fx/test_subgraph_rewriter.py @@ -342,10 +342,10 @@ class TestSubgraphRewriter(JitTestCase): ): class M(torch.nn.Module): def forward(self, x, w1, w2, b1, b2): - m0 = torch.cat([w1, w2]) + m0 = torch.cat([w1, w2]) # noqa: F841 m1 = torch.cat([w1, w2]) m2 = torch.cat([x, b2]) - t0 = torch.addmm(b1, m1, m2.t()) + t0 = torch.addmm(b1, m1, m2.t()) # noqa: F841 t1 = torch.sum(w1, 1) t2 = torch.addmm(b1, m1, m2.t()) return torch.sum(t1), torch.sum(t2) diff --git a/test/fx/test_z3_gradual_types.py b/test/fx/test_z3_gradual_types.py index be5fd3d73f6..70430e03c3a 100644 --- a/test/fx/test_z3_gradual_types.py +++ b/test/fx/test_z3_gradual_types.py @@ -1011,8 +1011,7 @@ class HFOperations(unittest.TestCase): size = x.size() getitem = size[-1] view = x.view(-1, getitem) - embed_tokens = self.embed_tokens(view) - mul = embed_tokens * 32.0 + _embed_tokens = self.embed_tokens(view) getitem_1 = size[-1] gt = getitem_1 > 1 return gt @@ -1076,8 +1075,7 @@ class HFOperations(unittest.TestCase): size = x.size() getitem = size[-1] view = x.view(-1, getitem) - embed_tokens = self.embed_tokens(view) - mul = embed_tokens * 32.0 + _embed_tokens = self.embed_tokens(view) getitem_1 = size[-1] lt = getitem_1 < 1 return lt @@ -1558,7 +1556,7 @@ class TestSingleOperation(unittest.TestCase): self.relu = torch.nn.ReLU(inplace=True) def forward(self, x: Dyn): - y = self.relu(self.conv1(x)) + y = self.relu(self.conv1(x)) # noqa: F841 z = self.relu(self.conv2(x)) return z @@ -1667,12 +1665,7 @@ class TestSingleOperation(unittest.TestCase): def test_add(self): s1, s2, s3, s4 = z3.Ints("s1 s2 s3 s4") s11, s22, s33, s44 = z3.Ints("s11 s22 s33 s44") - d1, d2, d3, d4 = ( - D(s11, s1), - D(s22, s2), - D(s33, s3), - D(s44, s4), - ) + d1, d2 = D(s11, s1), D(s22, s2) class BasicBlock(torch.nn.Module): def forward(self, x: Dyn, y: Dyn): @@ -2121,12 +2114,7 @@ class TestSingleOperation(unittest.TestCase): def test_reshape_annotated(self): s1, s2, s3, s4 = z3.Ints("s1 s2 s3 s4") s11, s22, s33, s44 = z3.Ints("s11 s22 s33 s44") - d1, d2, d3, d4 = ( - D(s11, s1), - D(s22, s2), - D(s33, s3), - D(s44, s4), - ) + d1, d2 = D(s11, s1), D(s22, s2) class BasicBlock(torch.nn.Module): def forward(self, x: TensorType([Dyn])): diff --git a/test/higher_order_ops/test_invoke_subgraph.py b/test/higher_order_ops/test_invoke_subgraph.py index f1ab93d34d9..e506d9a2a7d 100644 --- a/test/higher_order_ops/test_invoke_subgraph.py +++ b/test/higher_order_ops/test_invoke_subgraph.py @@ -73,8 +73,6 @@ class TestInvokeSubgraph(TestCase): self.assertEqual(y.grad, y_clone.grad) def test_multiple(self): - n_layers = 2 - @mark_compile_region def cos(x): return torch.cos(x) diff --git a/test/higher_order_ops/test_with_effects.py b/test/higher_order_ops/test_with_effects.py index 241a7e10319..c1b9fbda83e 100644 --- a/test/higher_order_ops/test_with_effects.py +++ b/test/higher_order_ops/test_with_effects.py @@ -1,4 +1,5 @@ # Owner(s): ["module: functorch"] +# ruff: noqa: F841 # flake8: noqa: B950 import unittest from collections import deque diff --git a/test/inductor/s429861_repro.py b/test/inductor/s429861_repro.py index 494bd3db817..239fe8241d4 100644 --- a/test/inductor/s429861_repro.py +++ b/test/inductor/s429861_repro.py @@ -1,4 +1,5 @@ # flake8: noqa +# ruff: noqa: F841 import torch diff --git a/test/inductor/test_aot_inductor.py b/test/inductor/test_aot_inductor.py index fad8805f85e..bb238768a08 100644 --- a/test/inductor/test_aot_inductor.py +++ b/test/inductor/test_aot_inductor.py @@ -111,7 +111,7 @@ try: requires_multigpu, TestFailure, ) -except (unittest.SkipTest, ImportError) as e: +except (unittest.SkipTest, ImportError): if __name__ == "__main__": sys.exit(0) raise @@ -2432,7 +2432,7 @@ class AOTInductorTestsTemplate: output_wo_y = torch.empty_like(x) output_with_y = torch.empty_like(x) - wo_kernel = add_kernel_with_optional_param[(1,)]( + add_kernel_with_optional_param[(1,)]( x, None, output_wo_y, @@ -2440,7 +2440,7 @@ class AOTInductorTestsTemplate: ARGS_PASSED="one", BLOCK_SIZE=BLOCK_SIZE, ) - with_kernel = add_kernel_with_optional_param[(1,)]( + add_kernel_with_optional_param[(1,)]( x, y, output_with_y, @@ -2870,8 +2870,6 @@ class AOTInductorTestsTemplate: x = self.bar(x) return x - orig_eager = MyModule() - self.check_model(MyModule(), (torch.randn(2, 3, device=self.device),)) def test_model_modified_weights(self): @@ -2887,7 +2885,6 @@ class AOTInductorTestsTemplate: M = 16 N = 10 K = 128 - batch = 8 example_inputs = (torch.randn(2, M, K, device=self.device),) model = Model(N, K, self.device) self.check_model(model, example_inputs) diff --git a/test/inductor/test_aot_inductor_arrayref.py b/test/inductor/test_aot_inductor_arrayref.py index 6521a24f6a0..97a11026062 100644 --- a/test/inductor/test_aot_inductor_arrayref.py +++ b/test/inductor/test_aot_inductor_arrayref.py @@ -34,7 +34,7 @@ try: copy_tests, TestFailure, ) -except (unittest.SkipTest, ImportError) as e: +except (unittest.SkipTest, ImportError): if __name__ == "__main__": sys.exit(0) raise diff --git a/test/inductor/test_aot_inductor_custom_ops.py b/test/inductor/test_aot_inductor_custom_ops.py index f3c5b41261f..9f35628f74b 100644 --- a/test/inductor/test_aot_inductor_custom_ops.py +++ b/test/inductor/test_aot_inductor_custom_ops.py @@ -50,7 +50,7 @@ try: copy_tests, TestFailure, ) -except (unittest.SkipTest, ImportError) as e: +except (unittest.SkipTest, ImportError): if __name__ == "__main__": sys.exit(0) raise diff --git a/test/inductor/test_aot_inductor_package.py b/test/inductor/test_aot_inductor_package.py index 456e4e7a759..a11e57f1ce9 100644 --- a/test/inductor/test_aot_inductor_package.py +++ b/test/inductor/test_aot_inductor_package.py @@ -225,7 +225,6 @@ class TestAOTInductorPackage(TestCase): def forward(self, a, b): return torch.cat([a, b], dim=0) - b = torch.randn(3, 4, device=self.device) dim0_a = Dim("dim0_a", min=1, max=10) dim0_b = Dim("dim0_b", min=1, max=20) dynamic_shapes = {"a": {0: dim0_a}, "b": {0: dim0_b}} diff --git a/test/inductor/test_autoheuristic.py b/test/inductor/test_autoheuristic.py index 196ccbfbde1..27060808679 100644 --- a/test/inductor/test_autoheuristic.py +++ b/test/inductor/test_autoheuristic.py @@ -48,7 +48,7 @@ class AutoHeuristicTest(TestCase): def assert_autoheuristic_collected_data(self): self.run_mm() - device_name = AutoHeuristic.get_device_identifier() + AutoHeuristic.get_device_identifier() path = self.get_path_to_autoheuristic_log("pad_mm") self.assertTrue(os.path.exists(path)) num_lines = self.count_lines_in_file(path) diff --git a/test/inductor/test_codecache.py b/test/inductor/test_codecache.py index 9eb68f90a64..e01d0f58357 100644 --- a/test/inductor/test_codecache.py +++ b/test/inductor/test_codecache.py @@ -512,7 +512,7 @@ class TestFxGraphCache(TestCase): compiled_fn = torch.compile(fn, dynamic=True, fullgraph=True) x = torch.randn(4, 4, device=GPU_TYPE) - result = compiled_fn(x) + compiled_fn(x) self.assertEqual(counters["inductor"]["fxgraph_cache_miss"], 0) self.assertEqual(counters["inductor"]["fxgraph_cache_hit"], 0) @@ -551,7 +551,7 @@ class TestFxGraphCache(TestCase): x = torch.randn(4, device=GPU_TYPE) y = torch.randn(4, device=GPU_TYPE) - result = compiled_fn(x, y) + compiled_fn(x, y) self.assertEqual(counters["inductor"]["fxgraph_cache_miss"], 1) self.assertEqual(counters["inductor"]["fxgraph_cache_hit"], 0) @@ -565,7 +565,7 @@ class TestFxGraphCache(TestCase): PyCodeCache.cache_clear() shutil.rmtree(os.path.join(cache_dir(), "triton"), ignore_errors=True) - result = compiled_fn(x, y) + compiled_fn(x, y) self.assertEqual(counters["inductor"]["fxgraph_cache_miss"], 1) self.assertEqual(counters["inductor"]["fxgraph_cache_hit"], 1) @@ -579,7 +579,7 @@ class TestFxGraphCache(TestCase): PyCodeCache.cache_clear() shutil.rmtree(os.path.join(cache_dir(), "triton"), ignore_errors=True) - result = compiled_fn2(x, y) + compiled_fn2(x, y) self.assertEqual(counters["inductor"]["fxgraph_cache_miss"], 2) self.assertEqual(counters["inductor"]["fxgraph_cache_hit"], 1) @@ -698,7 +698,7 @@ class TestFxGraphCache(TestCase): x = torch.randn(4, device=GPU_TYPE) y = torch.randn(4, device=GPU_TYPE) - result = compiled_fn(x, y) + compiled_fn(x, y) self.assertEqual(counters["inductor"]["fxgraph_cache_miss"], 1) self.assertEqual(counters["inductor"]["fxgraph_cache_hit"], 0) @@ -712,7 +712,7 @@ class TestFxGraphCache(TestCase): PyCodeCache.cache_clear() shutil.rmtree(os.path.join(cache_dir(), "triton"), ignore_errors=True) - result = compiled_fn(x, y) + compiled_fn(x, y) self.assertEqual(counters["inductor"]["fxgraph_cache_miss"], 1) self.assertEqual(counters["inductor"]["fxgraph_cache_hit"], 1) @@ -768,7 +768,6 @@ class TestFxGraphCache(TestCase): # Verify the "hit" case. self.reset() - counter_val = 5 self.assertEqual(fn(a, b), compiled_fn(a, b)) self.assertEqual(counters["inductor"]["fxgraph_cache_hit"], 1) diff --git a/test/inductor/test_compiled_autograd.py b/test/inductor/test_compiled_autograd.py index 8d6ac8983af..c06be6b3c29 100644 --- a/test/inductor/test_compiled_autograd.py +++ b/test/inductor/test_compiled_autograd.py @@ -1,4 +1,5 @@ # Owner(s): ["module: inductor"] +# ruff: noqa: F841 import contextlib import dataclasses import functools @@ -800,7 +801,6 @@ main() return torch.compile(gm, backend=inner_compiler) - fwd_compiler_fn = functools.partial(eager_with_check, is_bwd=False) bwd_compiler_fn = functools.partial(eager_with_check, is_bwd=True) def fn(inputs): @@ -941,7 +941,7 @@ main() torch._dynamo.reset() handle = torch._dynamo.convert_frame.register_bytecode_hook(bytecode_hook) try: - out = compiled_fn(inputs) + compiled_fn(inputs) self.assertTrue(len(inputs) == 0) finally: handle.remove() diff --git a/test/inductor/test_compiled_optimizers.py b/test/inductor/test_compiled_optimizers.py index d860afb4ae0..2a55d73cee3 100644 --- a/test/inductor/test_compiled_optimizers.py +++ b/test/inductor/test_compiled_optimizers.py @@ -407,7 +407,7 @@ def make_test( scheduler_eager.last_epoch = 1 with torch.set_grad_enabled(False): - for i in range(2): + for _ in range(2): compiled_step() opt_eager.step() if scheduler_cls: diff --git a/test/inductor/test_cpu_repro.py b/test/inductor/test_cpu_repro.py index 54189d97658..5f8e75bde48 100644 --- a/test/inductor/test_cpu_repro.py +++ b/test/inductor/test_cpu_repro.py @@ -152,7 +152,7 @@ class CPUReproTests(TestCase): return func(*args, **kwargs) with RecordFunctions(): - out = fn_compiled(inps) + fn_compiled(inps) self.assertTrue(conv_seen) @@ -2385,8 +2385,6 @@ class CPUReproTests(TestCase): x[0, 0] = torch.nan x[1, -1] = torch.nan - tol = 1e-2 if dtype == torch.bfloat16 else 1e-4 - with config.patch({"cpp.simdlen": None}): for cpp_wrapper_flag in [True, False]: with config.patch({"cpp_wrapper": cpp_wrapper_flag}): @@ -3121,7 +3119,6 @@ class CPUReproTests(TestCase): x1 = torch.randn((5, 20), dtype=dtype) x2 = torch.randn((5, 20), dtype=dtype) - tol = 1e-2 if dtype == torch.bfloat16 else 1e-4 with config.patch({"cpp.simdlen": 1}): torch._dynamo.reset() metrics.reset() @@ -3365,7 +3362,7 @@ class CPUReproTests(TestCase): permute_2, [16, 32], -1 ) getitem = split_with_sizes[0] - getitem_1 = split_with_sizes[1] + _getitem_1 = split_with_sizes[1] permute_3 = torch.ops.aten.permute.default(getitem, [0, 1, 3, 2]) expand_1 = torch.ops.aten.expand.default(permute_3, [8, 4, 16, 144]) clone_3 = torch.ops.aten.clone.default( @@ -4615,7 +4612,7 @@ class CPUReproTests(TestCase): ) ) permute_default_8 = None - permute_default_10 = torch.ops.aten.permute.default( + _permute_default_10 = torch.ops.aten.permute.default( convert_element_type_default_19, [0, 2, 1, 3] ) convert_element_type_default_19 = None diff --git a/test/inductor/test_cuda_repro.py b/test/inductor/test_cuda_repro.py index f4b45b064a3..8049f7bd1cf 100644 --- a/test/inductor/test_cuda_repro.py +++ b/test/inductor/test_cuda_repro.py @@ -1,4 +1,6 @@ # Owner(s): ["module: inductor"] +# ruff: noqa: F841 + import functools import gc import math diff --git a/test/inductor/test_cudacodecache.py b/test/inductor/test_cudacodecache.py index 549bfd31f3d..2054c9abb50 100644 --- a/test/inductor/test_cudacodecache.py +++ b/test/inductor/test_cudacodecache.py @@ -58,7 +58,7 @@ class TestCUDACodeCache(InductorTestCase): y = torch.rand(10).float().cuda() a = 5.0 expected_y = a * x + y - res = dll_wrapper.saxpy( + dll_wrapper.saxpy( ctypes.c_int(10), ctypes.c_float(a), ctypes.c_void_p(x.data_ptr()), @@ -83,7 +83,7 @@ class TestCUDACodeCache(InductorTestCase): y = torch.rand(5).float().cuda() a = 2.0 expected_y = a * x + y - res = compiled_res.result().saxpy( + compiled_res.result().saxpy( ctypes.c_int(5), ctypes.c_float(a), ctypes.c_void_p(x.data_ptr()), diff --git a/test/inductor/test_cudagraph_trees.py b/test/inductor/test_cudagraph_trees.py index 41a009a2483..5cf20869aa3 100644 --- a/test/inductor/test_cudagraph_trees.py +++ b/test/inductor/test_cudagraph_trees.py @@ -1,4 +1,5 @@ # Owner(s): ["module: inductor"] +# ruff: noqa: F841 import contextlib import functools import gc diff --git a/test/inductor/test_cutlass_backend.py b/test/inductor/test_cutlass_backend.py index 07555491a70..f78caed2e11 100644 --- a/test/inductor/test_cutlass_backend.py +++ b/test/inductor/test_cutlass_backend.py @@ -825,7 +825,7 @@ class TestCutlassBackend(TestCase): wraps=select_no_algorithm, ) as sa: torch.compile(my_addmm, dynamic=False)(x, a, b, 1.0, 2.0) - args, kwargs = sa.call_args + args, _ = sa.call_args op_name, choices, _, __ = args assert op_name == "addmm" cuda_template_count = 0 @@ -873,7 +873,7 @@ class TestCutlassBackend(TestCase): wraps=select_no_algorithm, ) as sa: torch.compile(addmm, dynamic=False)(x, a, b, 1.0, 1.0) - args, kwargs = sa.call_args + args, _ = sa.call_args op_name, choices, _, __ = args assert op_name == "addmm" cuda_template_count = 0 diff --git a/test/inductor/test_decompose_mem_bound_mm.py b/test/inductor/test_decompose_mem_bound_mm.py index e364f7c2a20..14775a7fd1a 100644 --- a/test/inductor/test_decompose_mem_bound_mm.py +++ b/test/inductor/test_decompose_mem_bound_mm.py @@ -389,7 +389,7 @@ class TestDecomposeMemMM(TestCase): def foo(x, y): return x.T.contiguous() @ y - out, code = run_and_get_code(foo, input1, input2) + _, code = run_and_get_code(foo, input1, input2) if GPU_TYPE == "xpu": # only 1 kernel generated on the XPU stack diff --git a/test/inductor/test_dependencies.py b/test/inductor/test_dependencies.py index d61317832ed..ea500c9727e 100644 --- a/test/inductor/test_dependencies.py +++ b/test/inductor/test_dependencies.py @@ -120,10 +120,6 @@ class TestDependencies(InductorTestCase): def test_normalize_with_stride_order_equal(self): x = sympy_index_symbol("x") y = sympy_index_symbol("y") - var_ranges = { - x: 1024, - y: 2048, - } loop_order1 = MemoryDep( "access_the_same_buffer", @@ -145,10 +141,6 @@ class TestDependencies(InductorTestCase): def test_normalize_with_stride_order_unequal(self): x = sympy_index_symbol("x") y = sympy_index_symbol("y") - var_ranges = { - x: 1024, - y: 2048, - } loop_order1 = MemoryDep( "access_the_same_buffer", diff --git a/test/inductor/test_flex_attention.py b/test/inductor/test_flex_attention.py index 8da44d8cdc5..26d2588deb3 100644 --- a/test/inductor/test_flex_attention.py +++ b/test/inductor/test_flex_attention.py @@ -932,7 +932,6 @@ class TestFlexAttention(InductorTestCase): test_inference_only = True else: test_inference_only = False - MAX_S = S block_mask1 = create_block_mask(noop_mask, 1, 1, S, S, device=self.device) sdpa_partial1 = create_attention(score_mod, block_mask=block_mask1) # The first eager batch, shape (B, H, S, D) @@ -3357,7 +3356,7 @@ def forward(self, arg0_1, arg1_1, arg2_1, arg3_1, arg4_1): # Run forward pass x = torch.randn(batch_shape, sequence_len, 512).cuda() - y = model(x, block_mask=block_mask) + model(x, block_mask=block_mask) self.assertEqual(torch._dynamo.utils.counters["aot_autograd"]["ok"], 2) @@ -3925,8 +3924,6 @@ BlockMask(shape=(1,s1,s2048,s2048),ssparsity=46.88%,s @supported_platform @common_utils.parametrize("compile", [False, True]) def test_no_q_info(self, compile: bool): - device = torch.device("cuda" if torch.cuda.is_available() else "cpu") - def causal_mask(b, h, q_idx, kv_idx): return q_idx >= kv_idx @@ -4001,7 +3998,7 @@ BlockMask(shape=(1,s1,s2048,s2048),ssparsity=46.88%,s device = "cuda" max_seq_len, doc_count = 128, 4 - B, H, SEQ_LEN, HEAD_DIM = 1, 1, max_seq_len, 8 + SEQ_LEN = max_seq_len lengths = generate_random_lengths(max_seq_len, doc_count) offsets = length_to_offsets(lengths, device) @@ -4031,7 +4028,6 @@ BlockMask(shape=(1,s1,s2048,s2048),ssparsity=46.88%,s lengths = generate_random_lengths(1024 + i, 5) offsets = length_to_offsets(lengths, "cuda") doc_ids = _offsets_to_doc_ids_tensor(offsets) - total_seq_len = 1024 + i def doc_mask_mod(b, h, q_idx, kv_idx): return ( diff --git a/test/inductor/test_flex_decoding.py b/test/inductor/test_flex_decoding.py index 3f10eebad49..93ddc784157 100644 --- a/test/inductor/test_flex_decoding.py +++ b/test/inductor/test_flex_decoding.py @@ -1554,7 +1554,7 @@ def forward(self, arg0_1, arg1_1, arg2_1, arg3_1, arg4_1): return causal_offset_mask - def noop(score, b, h, q_idx, kv_idx): + def noop(score, b, h, q_idx, kv_idx): # noqa: F841 return score mod = generate_causal_offset( diff --git a/test/inductor/test_fp8.py b/test/inductor/test_fp8.py index 6241853a288..8d11ec24406 100644 --- a/test/inductor/test_fp8.py +++ b/test/inductor/test_fp8.py @@ -147,11 +147,11 @@ class TestFP8Types(TestCase): x_shape = (16, 16) x = torch.rand(*x_shape, device="cuda", dtype=dtype).to(e4m3_type) - y_fp8 = compiled_fp8_matmul(x) + y_fp8 = compiled_fp8_matmul(x) # noqa: F841 x_shape = (15, 16) x = torch.rand(*x_shape, device="cuda", dtype=dtype).to(e4m3_type) - y_fp8 = compiled_fp8_matmul(x) + y_fp8 = compiled_fp8_matmul(x) # noqa: F841 @unittest.skipIf(not PLATFORM_SUPPORTS_FP8, f8_msg) @parametrize("dtype", (torch.float16, torch.bfloat16, torch.float)) @@ -193,14 +193,14 @@ class TestFP8Types(TestCase): "Conversions between float8_e5m2 and float8_e4m3fn is not supported!", ): x = torch.rand(*x_shape, device="cuda").to(dtype=torch.float8_e4m3fn) - y = compiled_fp8_cast(x, torch.float8_e5m2) + compiled_fp8_cast(x, torch.float8_e5m2) with self.assertRaisesRegex( torch._dynamo.exc.BackendCompilerFailed, "Conversions between float8_e5m2 and float8_e4m3fn is not supported!", ): x = torch.rand(*x_shape, device="cuda").to(dtype=torch.float8_e5m2) - y = compiled_fp8_cast(x, torch.float8_e4m3fn) + compiled_fp8_cast(x, torch.float8_e4m3fn) @unittest.skipIf(not PLATFORM_SUPPORTS_FP8, f8_msg) @parametrize("src_dtype", (torch.float16, torch.bfloat16, torch.float)) @@ -699,7 +699,7 @@ class TestFP8Lowering(TestCase): linear_compiled = torch.compile(linear, backend="inductor", mode="max-autotune") with self.assertRaises(torch._dynamo.exc.TorchRuntimeError) as cm: - y_compiled = linear_compiled( + linear_compiled( x, w_t_fp8, w_inverse_scale, @@ -738,7 +738,7 @@ class TestFP8Lowering(TestCase): linear_compiled = torch.compile(linear, backend="inductor", mode="max-autotune") with self.assertRaises(torch._dynamo.exc.TorchRuntimeError) as cm: - y_compiled = linear_compiled( + linear_compiled( x, w_t_fp8, w_inverse_scale, diff --git a/test/inductor/test_group_batch_fusion.py b/test/inductor/test_group_batch_fusion.py index 6bde0305137..ebcb3817324 100644 --- a/test/inductor/test_group_batch_fusion.py +++ b/test/inductor/test_group_batch_fusion.py @@ -633,7 +633,7 @@ class TestFindIndependentSubsetGreedy(TestCase): return g, lookup def verify(self, tree, subnodes, min_fuse, max_fuse, expected): - g, lookup = self.build_graph(tree) + _, lookup = self.build_graph(tree) subnodes = [lookup[n] for n in subnodes] expected = [[lookup[n] for n in sub] for sub in expected] opts = { diff --git a/test/inductor/test_indexing.py b/test/inductor/test_indexing.py index 9607daf7c2b..d2fbc6b836d 100644 --- a/test/inductor/test_indexing.py +++ b/test/inductor/test_indexing.py @@ -251,7 +251,6 @@ class ExprPrinterTests(InductorTestCase): def test_print_pow(self): s1 = sympy.Symbol("foo", integer=True) s2 = sympy.Symbol("bar", integer=True) - s3 = sympy.Symbol("baz", integer=True) common_cases = [ # expr, result diff --git a/test/inductor/test_inplacing_pass.py b/test/inductor/test_inplacing_pass.py index ed09e81af48..b76fefcc37f 100644 --- a/test/inductor/test_inplacing_pass.py +++ b/test/inductor/test_inplacing_pass.py @@ -1,5 +1,4 @@ # Owner(s): ["module: inductor"] - from typing import List import torch @@ -198,7 +197,7 @@ class TestReinplacingPassCorrectness(InductorTestCase): def test_view_inplaced_functionalize_v2(self): def f(arg0_1): - select = torch.ops.aten.select.int(arg0_1, 0, 0) + torch.ops.aten.select.int(arg0_1, 0, 0) auto_functionalized = auto_functionalized_v2( torch.ops.test_view.boo.default, _x_base_index=0, @@ -208,7 +207,7 @@ class TestReinplacingPassCorrectness(InductorTestCase): _all_bases=[arg0_1], ) getitem_1 = auto_functionalized[1] - copy_ = torch.ops.aten.copy_.default(arg0_1, getitem_1) + torch.ops.aten.copy_.default(arg0_1, getitem_1) return () x1 = torch.randn(3, device=device) @@ -220,7 +219,7 @@ class TestReinplacingPassCorrectness(InductorTestCase): # introduce a view another_view that is used `after` the copy def test_view_inplaced2_functionalize_v2(self): def f(arg0_1): - select = torch.ops.aten.select.int(arg0_1, 0, 0) + _select = torch.ops.aten.select.int(arg0_1, 0, 0) another_view = arg0_1[2] auto_functionalized = auto_functionalized_v2( torch.ops.test_view.boo.default, @@ -231,7 +230,7 @@ class TestReinplacingPassCorrectness(InductorTestCase): _all_bases=[arg0_1], ) getitem_1 = auto_functionalized[1] - copy_ = torch.ops.aten.copy_.default(arg0_1, getitem_1) + _copy = torch.ops.aten.copy_.default(arg0_1, getitem_1) return another_view x1 = torch.randn(3, device=device) @@ -243,7 +242,7 @@ class TestReinplacingPassCorrectness(InductorTestCase): # introduce a view another_view that is used `before` the copy def test_views_not_inplaced_functionalize_v2(self): def f(arg0_1): - select = torch.ops.aten.select.int(arg0_1, 0, 0) + _select = torch.ops.aten.select.int(arg0_1, 0, 0) another_view = arg0_1[2] auto_functionalized = auto_functionalized_v2( torch.ops.test_view.boo.default, @@ -255,7 +254,7 @@ class TestReinplacingPassCorrectness(InductorTestCase): ) getitem_1 = auto_functionalized[1] use_another_view = another_view * 10 - copy_ = torch.ops.aten.copy_.default(arg0_1, getitem_1) + _copy = torch.ops.aten.copy_.default(arg0_1, getitem_1) return use_another_view x1 = torch.randn(3, device=device) @@ -267,8 +266,8 @@ class TestReinplacingPassCorrectness(InductorTestCase): # a view over input without copy node, inplace not allowed def test_views_not_inplaced2_functionalize_v2(self): def f(arg0_1): - select = torch.ops.aten.select.int(arg0_1, 0, 0) - another_view = arg0_1[2] + _select = torch.ops.aten.select.int(arg0_1, 0, 0) + _another_view = arg0_1[2] auto_functionalized = auto_functionalized_v2( torch.ops.test_view.boo.default, _x_base_index=0, @@ -277,7 +276,7 @@ class TestReinplacingPassCorrectness(InductorTestCase): _x_storage_offset=0, _all_bases=[arg0_1], ) - getitem_1 = auto_functionalized[1] + _getitem_1 = auto_functionalized[1] return x1 = torch.randn(3, device=device) @@ -299,7 +298,7 @@ class TestReinplacingPassCorrectness(InductorTestCase): _x_storage_offset=0, _all_bases=[a], ) - getitem_1 = auto_functionalized[1] + _getitem_1 = auto_functionalized[1] return another_view x1 = torch.randn(3, device=device) @@ -450,7 +449,7 @@ class TestReinplacingPassCorrectness(InductorTestCase): return MySin.apply(x) x = torch.randn(3, requires_grad=True, device=device) - y = f(x) + f(x) self.assertEqual(num_reinplacing_failures(), 0) diff --git a/test/inductor/test_kernel_benchmark.py b/test/inductor/test_kernel_benchmark.py index 065d247e13d..e401101df20 100644 --- a/test/inductor/test_kernel_benchmark.py +++ b/test/inductor/test_kernel_benchmark.py @@ -1,4 +1,5 @@ # Owner(s): ["module: inductor"] +# ruff: noqa: F841 import contextlib import os import subprocess diff --git a/test/inductor/test_layout_optim.py b/test/inductor/test_layout_optim.py index 946cd45413f..52203caddab 100644 --- a/test/inductor/test_layout_optim.py +++ b/test/inductor/test_layout_optim.py @@ -79,7 +79,7 @@ class TestLayoutOptim(TestCase): x.sum().backward() grads = [] - for name, param in m.named_parameters(): + for _, param in m.named_parameters(): grad = param.grad if param.grad is None: grad = torch.zeros_like(param) @@ -327,7 +327,7 @@ class TestLayoutOptim(TestCase): model = MyModel(input_dim, num_classes) model.to(device) - opt_model = torch.compile(model) + opt_model = torch.compile(model) # noqa: F841 x = torch.ones((batch_size, 1, seq_len, input_dim), device=device) targets = torch.randint( diff --git a/test/inductor/test_loop_ordering.py b/test/inductor/test_loop_ordering.py index 38446a006d3..50cc0ef0303 100644 --- a/test/inductor/test_loop_ordering.py +++ b/test/inductor/test_loop_ordering.py @@ -158,7 +158,7 @@ class ImplDetailTest(TestCase): def _create_computed_buffer(): def inner_fn(index): - i0, i1, i2, i3 = index + i0, _, i2, i3 = index return ops.load( "primal", i3 + 49 * i2 + 2401 * ModularIndexing(i0, 1, 64) ) @@ -435,7 +435,6 @@ class LoopOrderingTest(TestCase): scale = torch.Tensor([10.0]).to("cuda") E4M3_MAX_POS = torch.finfo(torch.float8_e4m3fn).max - E5M2_MAX_POS = torch.finfo(torch.float8_e5m2).max def test_pattern2(tensor_x_inp, scale_x): tensor_x = tensor_x_inp * scale_x diff --git a/test/inductor/test_max_autotune.py b/test/inductor/test_max_autotune.py index d52727884db..ab20f58f642 100644 --- a/test/inductor/test_max_autotune.py +++ b/test/inductor/test_max_autotune.py @@ -1192,7 +1192,7 @@ class TestPrologueFusion(TestCase): self.check_code(code[0], num_kernels=1, num_allocs=1, num_deallocs=3) # should be done in low precision - f = ( + ( FileCheck() .check("for k_idx") .check_not("to(tl.float32)") @@ -1216,7 +1216,7 @@ class TestPrologueFusion(TestCase): self.check_code(code[0], num_kernels=1, num_allocs=1, num_deallocs=2) # should be done in low precision, no arithmetic - f = ( + ( FileCheck() .check("for k_idx") .check_not("to(tl.float32)") @@ -1232,7 +1232,7 @@ class TestPrologueFusion(TestCase): self.check_code(code[0], num_kernels=1, num_allocs=1, num_deallocs=2) # should not be done in low precision - f = ( + ( FileCheck() .check("for k_idx") .check("to(tl.float32)") @@ -1369,7 +1369,7 @@ class TestPrologueFusion(TestCase): @config.patch(realize_reads_threshold=1, realize_opcount_threshold=1) @parametrize("benchmark_fusion", (True, False)) def test_prologue_read_into_both_inputs(self, benchmark_fusion): - M = K = N = 256 + M = K = 256 # not supported today. it could be, but typically the pointwise nodes would get # inlined into separate nodes. diff --git a/test/inductor/test_move_constructors_to_cuda.py b/test/inductor/test_move_constructors_to_cuda.py index 43d146770e3..44723c8e8df 100644 --- a/test/inductor/test_move_constructors_to_cuda.py +++ b/test/inductor/test_move_constructors_to_cuda.py @@ -77,7 +77,7 @@ class TestMoveConstructorsToCuda(TestCase): return x[c1 + c2], c2 - 4 * 2 inp = torch.rand([4]).cuda() - out, code = run_and_get_code(foo, inp) + _, code = run_and_get_code(foo, inp) FileCheck().check_not("triton.jit").run(code[0]) @torch.compile() @@ -86,7 +86,7 @@ class TestMoveConstructorsToCuda(TestCase): c1 = torch.ones([4], dtype=torch.long) return x[c1 + c2], c2 - 4 * 2 - out, code = run_and_get_code(foo, inp) + _, code = run_and_get_code(foo, inp) FileCheck().check_not("triton.jit").run(code[0]) @requires_multigpu() diff --git a/test/inductor/test_multi_kernel.py b/test/inductor/test_multi_kernel.py index b125a33c187..78c8f7b5ea0 100644 --- a/test/inductor/test_multi_kernel.py +++ b/test/inductor/test_multi_kernel.py @@ -225,8 +225,8 @@ class MultiKernelTest(TestCase): y = torch.randn(8, device=GPU_TYPE) y_ref = y.clone() - ref = f(x, y_ref) - act = torch.compile(f)(x, y) + ref = f(x, y_ref) # noqa: F841 + act = torch.compile(f)(x, y) # noqa: F841 self.assertEqual(y_ref, y) def test_reduction_scratch_buffer(self, force_multi_kernel=1): diff --git a/test/inductor/test_ordered_set.py b/test/inductor/test_ordered_set.py index 15dee491b57..b057fe393de 100644 --- a/test/inductor/test_ordered_set.py +++ b/test/inductor/test_ordered_set.py @@ -1,5 +1,5 @@ # Owner(s): ["module: inductor"] -# mypy: ignore-errors +# ruff: noqa: F841 # flake8: noqa import collections import collections.abc diff --git a/test/inductor/test_pad_mm.py b/test/inductor/test_pad_mm.py index 5d5a28bd69e..01a58be327e 100644 --- a/test/inductor/test_pad_mm.py +++ b/test/inductor/test_pad_mm.py @@ -172,7 +172,7 @@ class PadMMTest(TestCase): ): res1 = fn(a, b) compiled_fn = torch.compile(fn) - res2, (code,) = run_and_get_code(compiled_fn, a, b) + res2, (_,) = run_and_get_code(compiled_fn, a, b) self.assertEqual(res1, res2) @inductor_config.patch(force_shape_pad=True) diff --git a/test/inductor/test_pattern_matcher.py b/test/inductor/test_pattern_matcher.py index 89468fe3b74..fdf30fd4eb4 100644 --- a/test/inductor/test_pattern_matcher.py +++ b/test/inductor/test_pattern_matcher.py @@ -1268,7 +1268,7 @@ class TestPatternMatcher(TestCase): def fn(a, b): return torch.mm(a, b).clone() - result, (code) = run_and_get_code(fn, torch.randn(8, 8), torch.randn(8, 8)) + _, (code) = run_and_get_code(fn, torch.randn(8, 8), torch.randn(8, 8)) # clone would create a buf1 self.assertIn("return (buf0, )", code[0]) self.assertNotIn("async_compile.cpp", code[0]) @@ -1679,7 +1679,7 @@ class TestPatternMatcher(TestCase): ) -> None: print("vllm::fused_rms_norm_quant_static") result_rms = torch.mul(input, weight) + epsilon - result = torch.mul(result_rms, scale).to(torch.int8) + _result = torch.mul(result_rms, scale).to(torch.int8) scale.fill_(0.5) @torch.library.custom_op("vllm::rms_norm", mutates_args=["result"]) @@ -1690,7 +1690,7 @@ class TestPatternMatcher(TestCase): epsilon: float, ) -> None: # bogus implementation doesn't matter - result = torch.mul(input, weight) + epsilon + _result = torch.mul(input, weight) + epsilon @torch.library.custom_op( "vllm::static_scaled_int8_quant", mutates_args=["result", "scale"] @@ -1702,7 +1702,7 @@ class TestPatternMatcher(TestCase): azp: Optional[torch.Tensor] = None, ) -> None: # bogus implementation doesn't matter - result = torch.mul(input, scale).to(torch.int8) + _result = torch.mul(input, scale).to(torch.int8) scale.fill_(0.5) def rms_pattern_static( @@ -1766,8 +1766,8 @@ class TestPatternMatcher(TestCase): ) def custom_pass(graph: torch.fx.Graph) -> torch.fx.Graph: - count = my_patterns.apply(graph) - # print(f"Count: {count}") + _count = my_patterns.apply(graph) + # print(f"Count: {_count}") graph.eliminate_dead_code() # graph.print_tabular() return graph diff --git a/test/inductor/test_perf.py b/test/inductor/test_perf.py index 7d9ec01e7a3..1e611bad979 100644 --- a/test/inductor/test_perf.py +++ b/test/inductor/test_perf.py @@ -1148,7 +1148,7 @@ class InplacingTests(TestCase): x = x + torch.ops.mylib.foo(q, k_cache, v_cache) return x - compiled_out, (code,) = run_and_get_code( + _, (code,) = run_and_get_code( torch.compile(f, fullgraph=True), ) diff --git a/test/inductor/test_profiler.py b/test/inductor/test_profiler.py index 08f81761030..45ca60b5fe4 100644 --- a/test/inductor/test_profiler.py +++ b/test/inductor/test_profiler.py @@ -44,7 +44,7 @@ class DynamoProfilerTests(torch._inductor.test_case.TestCase): kernel_name = "hipModuleLaunchKernel" if torch.version.hip else "cuLaunchKernel" - def nameMatchesLaunchKernel(event_name): + def nameMatchesLaunchKernel(event_name): # noqa: F841 return kernel_name in event_name self.assertTrue( diff --git a/test/inductor/test_smoke.py b/test/inductor/test_smoke.py index e8e0d685ba1..895e8ba16ab 100644 --- a/test/inductor/test_smoke.py +++ b/test/inductor/test_smoke.py @@ -55,7 +55,7 @@ class SmokeTest(TestCase): def test_compile_invalid_options(self): with self.assertRaises(RuntimeError): - opt_f = torch.compile(_test_f, mode="ha") + torch.compile(_test_f, mode="ha") if __name__ == "__main__": diff --git a/test/inductor/test_split_cat_fx_passes.py b/test/inductor/test_split_cat_fx_passes.py index 1cc32fd3fe2..0de987d9933 100644 --- a/test/inductor/test_split_cat_fx_passes.py +++ b/test/inductor/test_split_cat_fx_passes.py @@ -781,7 +781,7 @@ class TestSplitCatFxPasses(TestCase): def unbind_stack(x): return torch.stack(torch.unbind(x, 1), 1) - def unbind_cat(x): + def unbind_cat(x): # noqa: F841 return torch.cat(torch.unbind(x, dim=-3), 1) def unbind_stack_argspec1(x): diff --git a/test/inductor/test_standalone_compile.py b/test/inductor/test_standalone_compile.py index 2c1423a7caf..e1f4f146636 100644 --- a/test/inductor/test_standalone_compile.py +++ b/test/inductor/test_standalone_compile.py @@ -83,7 +83,7 @@ class TestStandaloneInductor(TestCase): mod = MyModule3().eval() inp = torch.randn(10) correct = mod(inp) - gm, guards = dynamo.export(mod, inp, aten_graph=True) + gm, _ = dynamo.export(mod, inp, aten_graph=True) mod_opt = inductor.compile(gm, [inp]) actual = mod_opt(inp) self.assertEqual(actual, correct) @@ -92,7 +92,7 @@ class TestStandaloneInductor(TestCase): mod = MyModule2().eval() inp = {"key": [torch.randn(10), torch.randn(10)]} correct = mod(inp) - gm, guards = dynamo.export(mod, inp) + gm, _ = dynamo.export(mod, inp) mod_opt = inductor.compile(gm, [inp]) actual = mod_opt(inp) self.assertEqual(actual, correct) diff --git a/test/inductor/test_torchinductor.py b/test/inductor/test_torchinductor.py index de66b58caba..d8beaebabe0 100644 --- a/test/inductor/test_torchinductor.py +++ b/test/inductor/test_torchinductor.py @@ -1,4 +1,5 @@ # Owner(s): ["module: inductor"] +# ruff: noqa: F841 import contextlib import copy import dataclasses diff --git a/test/inductor/test_torchinductor_opinfo.py b/test/inductor/test_torchinductor_opinfo.py index 1c93ae3bc40..b51123e27b4 100644 --- a/test/inductor/test_torchinductor_opinfo.py +++ b/test/inductor/test_torchinductor_opinfo.py @@ -110,8 +110,8 @@ def print_seen(): return "{" + r + "}" def sort_key(kv): - k, v = kv - device_type, op = k + k, _ = kv + _, op = k if isinstance(op, tuple): return op else: @@ -1015,7 +1015,7 @@ class TestInductorOpInfo(TestCase): # print(f"CONSIDERING OP {op_name} on {device_type} with {dtype} | # {inductor_skips[device_type].get(op_name, set())}", flush=True) if dtype in inductor_skips[device_type].get(op_name, set()): - test_expect = ExpectedTestResult.SKIP + test_expect = ExpectedTestResult.SKIP # noqa: F841 # with open("test_output.txt", "a") as f: # print(f"SKIPPING OP {op_name} on {device_type}", flush=True, file=f) # print(f"SKIPPING OP {op_name} on {device_type}", flush=True) @@ -1026,9 +1026,9 @@ class TestInductorOpInfo(TestCase): ].get( op_name, set() ): - test_expect = ExpectedTestResult.XFAILURE + test_expect = ExpectedTestResult.XFAILURE # noqa: F841 else: - test_expect = ExpectedTestResult.SUCCESS + test_expect = ExpectedTestResult.SUCCESS # noqa: F841 overridden_kwargs = {} overridden_kwargs.update( diff --git a/test/inductor/test_torchinductor_strided_blocks.py b/test/inductor/test_torchinductor_strided_blocks.py index 1a07827ac1c..8d9d32e2aca 100644 --- a/test/inductor/test_torchinductor_strided_blocks.py +++ b/test/inductor/test_torchinductor_strided_blocks.py @@ -1,4 +1,5 @@ # Owner(s): ["module: inductor"] +# ruff: noqa: F841 import contextlib import importlib import unittest diff --git a/test/inductor/test_triton_heuristics.py b/test/inductor/test_triton_heuristics.py index 7953444c930..d1843bfc848 100644 --- a/test/inductor/test_triton_heuristics.py +++ b/test/inductor/test_triton_heuristics.py @@ -146,7 +146,7 @@ class TestTritonHeuristics(TestCase): cfg.pre_hook = pre_hook with self.assertRaisesRegex(AssertionError, "pre_hook"): - autotuner = CachingAutotuner(**args) + CachingAutotuner(**args) def test_autotune_hints_to_configs(self): device_props = DeviceProperties.create(torch.device(GPU_TYPE)) diff --git a/test/inductor/test_triton_kernels.py b/test/inductor/test_triton_kernels.py index 4805233d344..3c76f560ddb 100644 --- a/test/inductor/test_triton_kernels.py +++ b/test/inductor/test_triton_kernels.py @@ -1,4 +1,5 @@ # Owner(s): ["module: inductor"] +# ruff: noqa: F841 # flake8: noqa: E731 # Skip do not assign a lambda expression, use a def import functools diff --git a/test/inductor/test_triton_wrapper.py b/test/inductor/test_triton_wrapper.py index 8d0f8afdd76..100507161c7 100644 --- a/test/inductor/test_triton_wrapper.py +++ b/test/inductor/test_triton_wrapper.py @@ -38,7 +38,7 @@ class TestTritonWrapper(TestCase): N = 10 x = torch.rand(N).to(device=GPU_TYPE) y = torch.rand(N).to(device=GPU_TYPE) - out = f(x, y) + out = f(x, y) # noqa: F841 compiled_module = self.get_compiled_module() # to make sure the subprocess runs on the exact same path as the parent process # we augment the PYTHONPATH env var diff --git a/test/inductor/test_unbacked_symints.py b/test/inductor/test_unbacked_symints.py index 62bc058bd9c..6a4556302b9 100644 --- a/test/inductor/test_unbacked_symints.py +++ b/test/inductor/test_unbacked_symints.py @@ -53,7 +53,7 @@ class TestUnbackedSymints(InductorTestCase): return nz.expand([128, -1, 2]) x = make_tensor(32, 4, device=device, dtype=torch.float32, exclude_zero=True) - actual = torch.compile(fn, fullgraph=True)(x) + torch.compile(fn, fullgraph=True)(x) @skipGPUIf(not HAS_GPU, "requires gpu and triton") @dynamo_config.patch({"capture_dynamic_output_shape_ops": True}) diff --git a/test/jit/test_async.py b/test/jit/test_async.py index 38d147ff204..e5d5de52bc1 100644 --- a/test/jit/test_async.py +++ b/test/jit/test_async.py @@ -1,4 +1,5 @@ # Owner(s): ["oncall: jit"] +# ruff: noqa: F841 import os import sys diff --git a/test/jit/test_autodiff.py b/test/jit/test_autodiff.py index be4c697a01b..0594efd6ea5 100644 --- a/test/jit/test_autodiff.py +++ b/test/jit/test_autodiff.py @@ -1,4 +1,5 @@ # Owner(s): ["oncall: jit"] +# ruff: noqa: F841 from typing import List diff --git a/test/jit/test_autodiff_subgraph_slicing.py b/test/jit/test_autodiff_subgraph_slicing.py index 5f9b7a15ea8..ea367108788 100644 --- a/test/jit/test_autodiff_subgraph_slicing.py +++ b/test/jit/test_autodiff_subgraph_slicing.py @@ -71,7 +71,7 @@ class TestAutodiffSubgraphSlicing(JitTestCase): input = torch.rand(6, 10).requires_grad_() with disable_autodiff_subgraph_inlining(): with enable_profiling_mode_for_profiling_tests(): - output = func(input, profile_and_replay=True) + func(input, profile_and_replay=True) FileCheck().check_not("prim::DifferentiableGraph").run( func.graph_for(input) ) @@ -225,7 +225,7 @@ class TestAutodiffSubgraphSlicing(JitTestCase): input0 = torch.randn((2,), requires_grad=True) input1 = torch.randn((2,)) output_ref = func(input0, input1) - for i in range(2): + for _ in range(2): output = jit_f(input0, input1) assert output_ref[0].requires_grad == output[0].requires_grad assert output_ref[1][0].requires_grad == output[1][0].requires_grad @@ -294,7 +294,7 @@ class TestAutodiffSubgraphSlicing(JitTestCase): NUM_PROFILED_RUNS = 1 with num_profiled_runs(NUM_PROFILED_RUNS): WARMUP = 3 # 2 runs to reach backward + 1 to optimize it - for x in range(WARMUP): + for _ in range(WARMUP): o = t(input, bias) o.sum().backward() @@ -416,7 +416,6 @@ class TestAutodiffSubgraphSlicing(JitTestCase): graph = self._perform_ad_subgraph_slicing(fn, 1, 1, 1, 1) - num_nodes = 4 if GRAPH_EXECUTOR == ProfilingMode.PROFILING else 3 # add moved down g_str = str(graph) FileCheck().check_not("aten::add").run(g_str[0 : g_str.find("return")]) diff --git a/test/jit/test_await.py b/test/jit/test_await.py index 9d77a94698f..7a65beb9bdb 100644 --- a/test/jit/test_await.py +++ b/test/jit/test_await.py @@ -193,14 +193,14 @@ class TestAwait(JitTestCase): def C_wait_impl(self: C) -> C: return C(self._a * 2, self._b * 3) - def fn_arg_C(x: C) -> Tensor: + def fn_arg_C(x: C) -> Tensor: # noqa: F841 return x._a + x._b def fn(x: Tensor): aw: Await[C] = torch.jit._awaitable(C_wait_impl, C(x, x)) _a = torch.eye(2) ai = aw._a - awb = aw.b() + awb = aw.b() # noqa: F841 c = C(2 * x, 2 * x) return _a + ai + x + c._a + c.b() @@ -320,7 +320,7 @@ class TestAwait(JitTestCase): def main(x: Tensor, y: Tensor) -> Tensor: aw = torch.jit._awaitable(delayed, x) - z = gap(y) + z = gap(y) # noqa: F841 k = torch.jit._awaitable_wait(aw) return y + k @@ -371,7 +371,7 @@ class TestAwait(JitTestCase): def main(x: Tensor) -> Tensor: aw = torch.jit._awaitable(delayed, x) - z = gap(x) + z = gap(x) # noqa: F841 y = fn(aw) return y + x diff --git a/test/jit/test_backends.py b/test/jit/test_backends.py index 451d6cd98cb..8453f59cfdb 100644 --- a/test/jit/test_backends.py +++ b/test/jit/test_backends.py @@ -207,7 +207,7 @@ class BasicModuleUnavailableTest(JitBackendTestCase): 'raise Exception("Backend is not available."', ): backend_method = self.lowered_module.__getattr__("forward") - backend_output = backend_method(*(input, input)) + backend_method(*(input, input)) @skipIfRocm def test_save_load(self): @@ -220,7 +220,7 @@ class BasicModuleUnavailableTest(JitBackendTestCase): r"Backend is not available.", 'raise Exception("Backend is not available."', ): - imported = torch.jit.load(buffer) + torch.jit.load(buffer) class NestedModuleTest(JitBackendTestCase): @@ -624,7 +624,7 @@ class ErrorMessagesWithCompiler(JitBackendTestCase): """, "", ): - lowered_module_n = torch._C._jit_to_backend( + torch._C._jit_to_backend( "backend_with_compiler_demo", scripted_module_n, {"forward": {"": ""}} ) diff --git a/test/jit/test_builtins.py b/test/jit/test_builtins.py index 44524ac6b78..510b911e463 100644 --- a/test/jit/test_builtins.py +++ b/test/jit/test_builtins.py @@ -287,9 +287,9 @@ class TestTensorBuiltins(JitTestCase): def test_func(func, x, tensor): try: result = func(x, tensor) - except RuntimeError as e: + except RuntimeError: result = True - except TypeError as e: + except TypeError: result = True return result diff --git a/test/jit/test_class_type.py b/test/jit/test_class_type.py index 891abf598b5..02182b3b2fb 100644 --- a/test/jit/test_class_type.py +++ b/test/jit/test_class_type.py @@ -1,4 +1,5 @@ # Owner(s): ["oncall: jit"] +# ruff: noqa: F841 import io import os diff --git a/test/jit/test_cuda.py b/test/jit/test_cuda.py index 7b0b7248f34..fb7e5cd325d 100644 --- a/test/jit/test_cuda.py +++ b/test/jit/test_cuda.py @@ -1,4 +1,5 @@ # Owner(s): ["oncall: jit"] +# ruff: noqa: F841 import gc import os diff --git a/test/jit/test_dtype_analysis.py b/test/jit/test_dtype_analysis.py index 0b910b865a5..1a5fd2038bd 100644 --- a/test/jit/test_dtype_analysis.py +++ b/test/jit/test_dtype_analysis.py @@ -136,7 +136,7 @@ class TestDtypeBase(JitTestCase): try: # Eager execution expected_res = fn(*args) - except RuntimeError as e: + except RuntimeError: return expected_dtype = expected_res.dtype diff --git a/test/jit/test_freezing.py b/test/jit/test_freezing.py index 9cbbacb0871..7da41f0cc71 100644 --- a/test/jit/test_freezing.py +++ b/test/jit/test_freezing.py @@ -1,4 +1,5 @@ # Owner(s): ["oncall: jit"] +# ruff: noqa: F841 import io import unittest diff --git a/test/jit/test_fuser_common.py b/test/jit/test_fuser_common.py index 6a982051b15..9b0921d22b1 100644 --- a/test/jit/test_fuser_common.py +++ b/test/jit/test_fuser_common.py @@ -14,7 +14,7 @@ class TestFuserCommon(JitTestCase): x = torch.randn(5, requires_grad=not rq) # cause optimization to be created - for i in range(5): + for _ in range(5): fn(x) # test fallback when optimization is not applicable y = fn(torch.randn(5, requires_grad=rq)) diff --git a/test/jit/test_generator.py b/test/jit/test_generator.py index fc54e1b5abe..5f6e15cf8fe 100644 --- a/test/jit/test_generator.py +++ b/test/jit/test_generator.py @@ -37,7 +37,7 @@ class TestGenerator(JitTestCase): # Run this 3 times to ensure that the generator is being manually seeded # each time the traced function is run - for i in range(3): + for _ in range(3): torch.manual_seed(1) eager_tensor = f() @@ -64,7 +64,7 @@ class TestGenerator(JitTestCase): # Run this 3 times to ensure that the generator is being manually seeded # each time the traced function is run - for i in range(3): + for _ in range(3): torch.manual_seed(1) eager_tensor = f() diff --git a/test/jit/test_hooks_modules.py b/test/jit/test_hooks_modules.py index 2a5e68ab1cc..ffcd6fea37f 100644 --- a/test/jit/test_hooks_modules.py +++ b/test/jit/test_hooks_modules.py @@ -88,7 +88,7 @@ class SubmoduleForwardTupleInput(torch.nn.Module): self.name = name def forward(self, input: Tuple[int]): - input_access = input[0] + input_access = input[0] # noqa: F841 return (1,) @@ -99,7 +99,7 @@ class ModuleForwardTupleInput(torch.nn.Module): self.submodule = SubmoduleForwardTupleInput(submodule_name) def forward(self, input: Tuple[int]): - input_access = input[0] + input_access = input[0] # noqa: F841 return self.submodule((1,)) diff --git a/test/jit/test_ignore_context_manager.py b/test/jit/test_ignore_context_manager.py index e9b6dae60c9..b0d5bf45700 100644 --- a/test/jit/test_ignore_context_manager.py +++ b/test/jit/test_ignore_context_manager.py @@ -82,7 +82,7 @@ class TestIgnoreContextManager(JitTestCase): a: int = 4 b: int = 5 with torch.jit._IgnoreContextManager(a="inp:int", b="inp:int"): - l = [2 + b for i in range(a) if i > 2] + l = [2 + b for i in range(a) if i > 2] # noqa: F841 return a model = A() diff --git a/test/jit/test_isinstance.py b/test/jit/test_isinstance.py index ce73c9cdffa..53b701590f7 100644 --- a/test/jit/test_isinstance.py +++ b/test/jit/test_isinstance.py @@ -206,7 +206,7 @@ class TestIsinstance(JitTestCase): hit = not hit for el in obj: # perform some tensor operation - y = el.clamp(0, 0.5) + y = el.clamp(0, 0.5) # noqa: F841 if torch.jit.isinstance(obj, Dict[str, str]): hit = not hit str_cat = "" diff --git a/test/jit/test_jit_utils.py b/test/jit/test_jit_utils.py index 8e31a90e68c..4e2e2898f09 100644 --- a/test/jit/test_jit_utils.py +++ b/test/jit/test_jit_utils.py @@ -113,6 +113,6 @@ class TestJitUtils(JitTestCase): def test_no_tracer_warn_context_manager(self): torch._C._jit_set_tracer_state_warn(True) - with jit_utils.NoTracerWarnContextManager() as no_warn: + with jit_utils.NoTracerWarnContextManager(): self.assertEqual(False, torch._C._jit_get_tracer_state_warn()) self.assertEqual(True, torch._C._jit_get_tracer_state_warn()) diff --git a/test/jit/test_list_dict.py b/test/jit/test_list_dict.py index 8ed22b93013..53245e811ec 100644 --- a/test/jit/test_list_dict.py +++ b/test/jit/test_list_dict.py @@ -1,4 +1,5 @@ # Owner(s): ["oncall: jit"] +# ruff: noqa: F841 import inspect import os diff --git a/test/jit/test_logging.py b/test/jit/test_logging.py index 7a251d8afa3..366a6b93442 100644 --- a/test/jit/test_logging.py +++ b/test/jit/test_logging.py @@ -1,4 +1,5 @@ # Owner(s): ["oncall: jit"] +# ruff: noqa: F841 import os import sys diff --git a/test/jit/test_misc.py b/test/jit/test_misc.py index 7e6698f6edc..8c63e61a8da 100644 --- a/test/jit/test_misc.py +++ b/test/jit/test_misc.py @@ -1,4 +1,5 @@ # Owner(s): ["oncall: jit"] +# ruff: noqa: F841 import os import sys diff --git a/test/jit/test_models.py b/test/jit/test_models.py index ec4342a4ce6..7ee9ef365eb 100644 --- a/test/jit/test_models.py +++ b/test/jit/test_models.py @@ -376,7 +376,7 @@ class TestModels(JitTestCase): batch_size = inputs.size()[1] state_shape = self.config.n_cells, batch_size, self.config.d_hidden h0 = c0 = inputs.new_zeros(state_shape) - outputs, (ht, ct) = self.rnn(inputs, (h0, c0)) + _, (ht, _) = self.rnn(inputs, (h0, c0)) return ( ht[-1] if not self.config.birnn @@ -593,7 +593,6 @@ class TestModels(JitTestCase): @slowTest @skipIfNoTorchVision def test_script_module_trace_resnet18(self): - x = torch.ones(1, 3, 224, 224) m_orig = torch.jit.trace( torchvision.models.resnet18(), torch.ones(1, 3, 224, 224) ) diff --git a/test/jit/test_module_containers.py b/test/jit/test_module_containers.py index 6d85753f649..e8200eb2c09 100644 --- a/test/jit/test_module_containers.py +++ b/test/jit/test_module_containers.py @@ -355,7 +355,7 @@ class TestModuleContainers(JitTestCase): m = MyModule() self.checkModule(m, [torch.randn(2, 2)]) - mm = torch.jit.script(m) + torch.jit.script(m) def test_moduledict_getitem(self): class MyModule(torch.nn.Module): diff --git a/test/jit/test_module_interface.py b/test/jit/test_module_interface.py index 3b72f682f16..ad30ea3492d 100644 --- a/test/jit/test_module_interface.py +++ b/test/jit/test_module_interface.py @@ -1,4 +1,5 @@ # Owner(s): ["oncall: jit"] +# ruff: noqa: F841 import os import sys diff --git a/test/jit/test_optimize_for_mobile_preserve_debug_info.py b/test/jit/test_optimize_for_mobile_preserve_debug_info.py index 9ccc796c925..d405b2764e6 100644 --- a/test/jit/test_optimize_for_mobile_preserve_debug_info.py +++ b/test/jit/test_optimize_for_mobile_preserve_debug_info.py @@ -105,9 +105,7 @@ class TestOptimizeForMobilePreserveDebugInfo(JitTestCase): bias=self.conv_transpose2d_bias, ) - minibatch = 1 in_channels = 6 - iH = 4 iW = 5 out_channels = 6 kH = 2 diff --git a/test/jit/test_peephole.py b/test/jit/test_peephole.py index a7f8086ed5d..ac2f54bfe26 100644 --- a/test/jit/test_peephole.py +++ b/test/jit/test_peephole.py @@ -44,7 +44,7 @@ class TestPeephole(JitTestCase): return y + y a = torch.ones(4, 4) - j = self.checkScript(test_write, (a,)) + self.checkScript(test_write, (a,)) def test_peephole_no_output_aliasing(self): def test_peephole(x): @@ -93,7 +93,7 @@ class TestPeephole(JitTestCase): @torch.jit.script def foo(x, y, z): li = [x, y, z] - for i in range(len(x)): + for _ in range(len(x)): li.append(x) return len([x, y, z]) @@ -120,7 +120,7 @@ class TestPeephole(JitTestCase): @torch.jit.script def foo(x, y, z): li = [x, y, z] - for i in range(len(x)): + for _ in range(len(x)): li.append(x) return li[-2] diff --git a/test/jit/test_profiler.py b/test/jit/test_profiler.py index 2aa6ecc4e8e..29f3cc9be4c 100644 --- a/test/jit/test_profiler.py +++ b/test/jit/test_profiler.py @@ -151,7 +151,7 @@ class TestProfiler(JitTestCase): x = torch.ones(1) y = torch.ones(1) foo(x, y) - b = foo(x, y) + b = foo(x, y) # noqa: F841 g = torch.jit.last_executed_optimized_graph() self.assertEqual(len(list(g.findAllNodes("prim::TypeCheck"))), 2) FileCheck().check("TensorExpr").check("aten::add_").check("TensorExpr").run(g) diff --git a/test/jit/test_recursive_script.py b/test/jit/test_recursive_script.py index d6858dd1c20..33fd38c2b9c 100644 --- a/test/jit/test_recursive_script.py +++ b/test/jit/test_recursive_script.py @@ -1,4 +1,5 @@ # Owner(s): ["oncall: jit"] +# ruff: noqa: F841 import os import re diff --git a/test/jit/test_remove_mutation.py b/test/jit/test_remove_mutation.py index 80ebb17cc50..8048d406ab3 100644 --- a/test/jit/test_remove_mutation.py +++ b/test/jit/test_remove_mutation.py @@ -196,7 +196,7 @@ class TestRemoveMutation(JitTestCase): def intermediary_use(): a = [1, 2] - b = len(a) + b = len(a) # noqa: F841 a.append(3) return a diff --git a/test/jit/test_save_load_for_op_version.py b/test/jit/test_save_load_for_op_version.py index 07efbdbedb8..1b62e4043eb 100644 --- a/test/jit/test_save_load_for_op_version.py +++ b/test/jit/test_save_load_for_op_version.py @@ -1,4 +1,5 @@ # Owner(s): ["oncall: jit"] +# ruff: noqa: F841 import io import os diff --git a/test/jit/test_symbolic_shape_analysis.py b/test/jit/test_symbolic_shape_analysis.py index 8a5cee1890b..f43105093d7 100644 --- a/test/jit/test_symbolic_shape_analysis.py +++ b/test/jit/test_symbolic_shape_analysis.py @@ -678,7 +678,7 @@ class TestSymbolicShapeAnalysis(JitTestCase): # to make into a jit function cant have multiple outputs g.makeMultiOutputIntoTuple() func = torch._C._create_function_from_graph("partial_eval_graph", g) - mapping = shape_compute_graph.graph_output_to_symbolic_shape_dim() + mapping = shape_compute_graph.graph_output_to_symbolic_shape_dim() # noqa: F841 output_shape = func(tensor.size()) # the first 4 dims are input sym dimensions, then the , self.assertEqual(list(output_shape[0:4]), list(tensor.size())) diff --git a/test/jit/test_torchbind.py b/test/jit/test_torchbind.py index 88bff3523b2..726d7ce189d 100644 --- a/test/jit/test_torchbind.py +++ b/test/jit/test_torchbind.py @@ -1,4 +1,5 @@ # Owner(s): ["oncall: jit"] +# ruff: noqa: F841 import copy import io diff --git a/test/jit/test_tracer.py b/test/jit/test_tracer.py index e9ef600c97b..78907399267 100644 --- a/test/jit/test_tracer.py +++ b/test/jit/test_tracer.py @@ -1,4 +1,5 @@ # Owner(s): ["oncall: jit"] +# ruff: noqa: F841 import copy import io diff --git a/test/jit/test_types.py b/test/jit/test_types.py index 9c8818911f9..c0e56bb47c8 100644 --- a/test/jit/test_types.py +++ b/test/jit/test_types.py @@ -1,4 +1,5 @@ # Owner(s): ["oncall: jit"] +# ruff: noqa: F841 import inspect import os diff --git a/test/jit/test_typing.py b/test/jit/test_typing.py index 45c1ccefd03..bf5e53b9e9f 100644 --- a/test/jit/test_typing.py +++ b/test/jit/test_typing.py @@ -1,4 +1,5 @@ # Owner(s): ["oncall: jit"] +# ruff: noqa: F841 import os import sys diff --git a/test/jit/test_union.py b/test/jit/test_union.py index ee83f62ae49..cd7a5baa2ba 100644 --- a/test/jit/test_union.py +++ b/test/jit/test_union.py @@ -1,4 +1,5 @@ # Owner(s): ["oncall: jit"] +# ruff: noqa: F841 import io import os diff --git a/test/jit/test_union_pep604.py b/test/jit/test_union_pep604.py index b6be1dd6e5c..4045d9368d1 100644 --- a/test/jit/test_union_pep604.py +++ b/test/jit/test_union_pep604.py @@ -1,4 +1,5 @@ # Owner(s): ["oncall: jit"] +# ruff: noqa: F841 import io import os diff --git a/test/jit/test_with.py b/test/jit/test_with.py index bdc045c2588..c03085efd32 100644 --- a/test/jit/test_with.py +++ b/test/jit/test_with.py @@ -1,4 +1,5 @@ # Owner(s): ["oncall: jit"] +# ruff: noqa: F841 import os import sys diff --git a/test/jit/xnnpack/test_xnnpack_delegate.py b/test/jit/xnnpack/test_xnnpack_delegate.py index e196119fe61..6996ee7e4d4 100644 --- a/test/jit/xnnpack/test_xnnpack_delegate.py +++ b/test/jit/xnnpack/test_xnnpack_delegate.py @@ -32,7 +32,7 @@ class TestXNNPackBackend(unittest.TestCase): }, ) - for i in range(0, 20): + for _ in range(0, 20): sample_input = torch.randn(4, 4, 4) actual_output = scripted_module(sample_input) expected_output = lowered_module(sample_input) diff --git a/test/lazy/test_debug_util.py b/test/lazy/test_debug_util.py index 9a3e784f2c5..e71f15e53cb 100644 --- a/test/lazy/test_debug_util.py +++ b/test/lazy/test_debug_util.py @@ -19,7 +19,7 @@ class DebugUtilTest(TestCase): def _run_linear(self): device = "lazy" model = nn.Linear(5, 5).to(device) - output = model(torch.randn(1, 5).to(device)) + output = model(torch.randn(1, 5).to(device)) # noqa: F841 torch._lazy.mark_step() def test_get_python_frames(self): diff --git a/test/lazy/test_extract_compiled_graph.py b/test/lazy/test_extract_compiled_graph.py index d62a99b7b6d..79359ddb769 100644 --- a/test/lazy/test_extract_compiled_graph.py +++ b/test/lazy/test_extract_compiled_graph.py @@ -131,7 +131,7 @@ def allclose(expected, actual): def verify_reusing_compiled_graph(mod, exception_msg_pattern, ncase=10): args = gen_rand_args(mod) - out = mod(*args) + mod(*args) dis.dis(mod.forward) diff --git a/test/lazy/test_generator.py b/test/lazy/test_generator.py index 7711eb19ada..e87043f8262 100644 --- a/test/lazy/test_generator.py +++ b/test/lazy/test_generator.py @@ -81,7 +81,7 @@ class LazyGeneratorTest(TestCase): uncached_compile == 2 ), f"Expected 2 uncached compiles, got {uncached_compile}" - t = generate_tensor(1) + t = generate_tensor(1) # noqa: F841 torch._lazy.mark_step() uncached_compile = metrics.counter_value("UncachedCompile") diff --git a/test/lazy/test_meta_kernel.py b/test/lazy/test_meta_kernel.py index 7382839fa8d..e212fca89ba 100644 --- a/test/lazy/test_meta_kernel.py +++ b/test/lazy/test_meta_kernel.py @@ -19,7 +19,7 @@ class TestMetaKernel(TestCase): fc_nobias = torch.nn.Linear(2, 2, bias=False, dtype=float32).to("lazy") with self.assertRaises(Exception): - out_nobias = fc_nobias(input) + fc_nobias(input) def test_addmm(self): """Tests that the addmm meta kernel returns the correct output type""" diff --git a/test/lazy/test_reuse_ir.py b/test/lazy/test_reuse_ir.py index 31a21b4ed91..ce06bddfd8b 100644 --- a/test/lazy/test_reuse_ir.py +++ b/test/lazy/test_reuse_ir.py @@ -33,10 +33,10 @@ class TestLazyReuseIr(TestCase): y_lazy = y.detach().clone().to(device=device) z_lazy = z.detach().clone().to(device=device) - for i in range(10): + for _ in range(10): z += x + y - for i in range(10): + for _ in range(10): z_lazy += x_lazy + y_lazy torch._lazy.mark_step() @@ -111,7 +111,7 @@ class TestLazyReuseIr(TestCase): weight = torch.randn(3, device=device) bias = torch.randn(3, device=device) - for i in range(10): + for _ in range(10): # BatchNorm2d does extra checks on dimensions which SymInts don't support yet # so we call `torch.ops.aten.native_batch_norm` to bypass the checks. z, _, _ = torch.ops.aten.native_batch_norm( @@ -125,7 +125,7 @@ class TestLazyReuseIr(TestCase): x_lazy = x.detach().clone().to(device=device) weight_lazy = weight.detach().clone().to(device=device) bias_lazy = bias.detach().clone().to(device=device) - for i in range(10): + for _ in range(10): z_lazy, _, _ = torch.ops.aten.native_batch_norm( x_lazy, weight_lazy, bias_lazy, None, None, True, 0.1, 1e-5 ) diff --git a/test/lazy/test_step_closures.py b/test/lazy/test_step_closures.py index 47230be783e..c982212fc4f 100644 --- a/test/lazy/test_step_closures.py +++ b/test/lazy/test_step_closures.py @@ -56,7 +56,7 @@ class ClosuresTest(TestCase): torch._lazy.mark_step() raise AssertionError # Should not reach here - except RuntimeError as e: + except RuntimeError: assert flag.is_set(), "Should have caught exception from closure" def test_asynchronous_exception(self): @@ -81,7 +81,7 @@ class ClosuresTest(TestCase): torch._lazy.mark_step() raise AssertionError # Should not reach here - except RuntimeError as e: + except RuntimeError: # Should have caught exception from closure1 pass diff --git a/test/lazy/test_ts_opinfo.py b/test/lazy/test_ts_opinfo.py index ddad7b931f3..2cc75f6e4ac 100644 --- a/test/lazy/test_ts_opinfo.py +++ b/test/lazy/test_ts_opinfo.py @@ -159,7 +159,7 @@ class TestLazyTensor(JitTestCase): def foo(x, *, mark_step): y = x.view(2, 2) y.add_(1) - z = x + x + z = x + x # noqa: F841 if mark_step: torch._lazy.mark_step() @@ -200,7 +200,7 @@ class TestLazyOpInfo(TestCase): allowed_dtypes=(torch.float,), ) def test_dispatched_to_lazy(self, device, dtype, op): - def get_name(op): + def get_name(op): # noqa: F841 l = [op.name] if op.variant_test_name != "": l.append(op.variant_test_name) @@ -215,7 +215,7 @@ class TestLazyOpInfo(TestCase): torch._lazy.wait_device_ops() torch._lazy.metrics.reset() - r = op(*args, **kwargs) + op(*args, **kwargs) torch._lazy.mark_step() torch._lazy.wait_device_ops() prefix = "aten" if op.name in FALLBACK_LIST else "lazy" diff --git a/test/mobile/model_test/builtin_ops.py b/test/mobile/model_test/builtin_ops.py index b315c4f3897..1f5d9d5313d 100644 --- a/test/mobile/model_test/builtin_ops.py +++ b/test/mobile/model_test/builtin_ops.py @@ -9,7 +9,6 @@ class TSBuiltinOpsModule(torch.nn.Module): x = torch.tensor(1) y = torch.tensor(0.5) b = float(1) - s = "abcde" l = ["1", "2", "test", "a{}b"] d = {"key": 1} d2 = {0: 100} diff --git a/test/mobile/model_test/gen_test_model.py b/test/mobile/model_test/gen_test_model.py index c6da5aa7081..f234210d289 100644 --- a/test/mobile/model_test/gen_test_model.py +++ b/test/mobile/model_test/gen_test_model.py @@ -164,8 +164,6 @@ def getModuleFromName(model_name): if not isinstance(module, torch.nn.Module): module = module.getModule() - has_bundled_inputs = False # module.find_method("get_all_bundled_inputs") - if model_name in models_need_trace: module = torch.jit.trace(module, []) else: @@ -208,7 +206,7 @@ def generateAllModels(folder, on_the_fly=False): # generate/update a given model for storage def generateModel(name): - module, ops = getModuleFromName(name) + module, _ = getModuleFromName(name) if module is None: return path_ios = test_path_ios + name + ".ptl" diff --git a/test/mobile/model_test/math_ops.py b/test/mobile/model_test/math_ops.py index 009ec2e0c0c..e1664658c2f 100644 --- a/test/mobile/model_test/math_ops.py +++ b/test/mobile/model_test/math_ops.py @@ -313,7 +313,6 @@ class OtherMathOpsModule(torch.nn.Module): c = torch.randint(0, 8, (5,), dtype=torch.int64) e = torch.randn(4, 3) f = torch.randn(4, 4, 4) - size = [0, 1] dims = [0, 1] return len( torch.atleast_1d(a), diff --git a/test/mobile/model_test/nn_ops.py b/test/mobile/model_test/nn_ops.py index fb6530daad8..da4fbe18736 100644 --- a/test/mobile/model_test/nn_ops.py +++ b/test/mobile/model_test/nn_ops.py @@ -387,7 +387,7 @@ class NNVisionModule(torch.nn.Module): def forward(self): input = torch.randn(1, 3, 16, 16) - for i, module in enumerate(self.vision_modules): + for module in self.vision_modules: r = module(self.input) return len( r, diff --git a/test/mobile/model_test/quantization_ops.py b/test/mobile/model_test/quantization_ops.py index 140894fddc4..eb96b3df71e 100644 --- a/test/mobile/model_test/quantization_ops.py +++ b/test/mobile/model_test/quantization_ops.py @@ -22,7 +22,6 @@ class GeneralQuantModule(torch.nn.Module): ) input1 = torch.randn(1, 16, 4) input2 = torch.randn(1, 16, 4, 4) - input3 = torch.randn(1, 16, 4, 4, 4) return len( self.func.add(a, b), self.func.cat((a, a), 0), diff --git a/test/mobile/model_test/tensor_ops.py b/test/mobile/model_test/tensor_ops.py index 089cf10c0f5..35ff0fcdf0a 100644 --- a/test/mobile/model_test/tensor_ops.py +++ b/test/mobile/model_test/tensor_ops.py @@ -104,7 +104,6 @@ class TensorCreationOpsModule(torch.nn.Module): def tensor_creation_ops(self): i = torch.tensor([[0, 1, 1], [2, 0, 2]]) - v = torch.tensor([3, 4, 5], dtype=torch.float32) real = torch.tensor([1, 2], dtype=torch.float32) imag = torch.tensor([3, 4], dtype=torch.float32) inp = torch.tensor([-1.5, 0.0, 2.0]) diff --git a/test/mobile/test_bytecode.py b/test/mobile/test_bytecode.py index 307921d7256..1e42493a72b 100644 --- a/test/mobile/test_bytecode.py +++ b/test/mobile/test_bytecode.py @@ -331,7 +331,6 @@ class testVariousModelVersions(TestCase): script_module_v4_buffer = _backport_for_mobile_to_buffer( script_module_v5_path, maximum_checked_in_model_version - 1 ) - buf = io.StringIO() # Check version of the model v4 from backport bytesio = io.BytesIO(script_module_v4_buffer) @@ -363,7 +362,7 @@ class testVariousModelVersions(TestCase): sample_input = torch.tensor([1]) script_module = torch.jit.script(MyTestModule()) - script_module_result = script_module(sample_input) + script_module(sample_input) buffer = io.BytesIO(script_module._save_to_buffer_for_lite_interpreter()) buffer.seek(0) diff --git a/test/mobile/test_lite_script_module.py b/test/mobile/test_lite_script_module.py index 05b9b30ea12..aae35533d40 100644 --- a/test/mobile/test_lite_script_module.py +++ b/test/mobile/test_lite_script_module.py @@ -349,7 +349,7 @@ class TestLiteScriptModule(TestCase): def forward(self): raise RuntimeError("foo") - _, lineno = inspect.getsourcelines(FooTest2) + _, _ = inspect.getsourcelines(FooTest2) # In C++ code, the type of exception thrown is torch::jit::JITException # which does not extend c10::Error, and hence it isn't possible to add @@ -426,7 +426,7 @@ class TestLiteScriptModule(TestCase): ft = FooTest5(42) loaded = self.getScriptExportImportCopy(ft) - _, lineno = inspect.getsourcelines(FooTest5) + _, _ = inspect.getsourcelines(FooTest5) try: loaded(42, torch.rand(3, 4), torch.rand(3, 4), torch.rand(30, 40)) diff --git a/test/mobile/test_quantize_fx_lite_script_module.py b/test/mobile/test_quantize_fx_lite_script_module.py index 2e5f5dd0046..30cd4647d17 100644 --- a/test/mobile/test_quantize_fx_lite_script_module.py +++ b/test/mobile/test_quantize_fx_lite_script_module.py @@ -31,14 +31,14 @@ class TestLiteFuseFx(QuantizationLiteTestCase): model = M().eval() indices = torch.randint(low=0, high=10, size=(20,)) - quantized_node = ns.call_module(nnq.Embedding) + ns.call_module(nnq.Embedding) configs = [ (float_qparams_weight_only_qconfig, ns.call_module(nnq.Embedding)), (None, ns.call_module(nn.Embedding)), (default_qconfig, ns.call_module(nn.Embedding)), ] - for qconfig, node in configs: + for qconfig, _ in configs: qconfig_dict = {"": qconfig} m = prepare_fx( model, diff --git a/test/mobile/test_upgraders.py b/test/mobile/test_upgraders.py index 5ebf9a27535..3567e0d030b 100644 --- a/test/mobile/test_upgraders.py +++ b/test/mobile/test_upgraders.py @@ -31,7 +31,8 @@ class TestLiteScriptModule(TestCase): return e def test_versioned_div_tensor(self): - def div_tensor_0_3(self, other): + # noqa: F841 + def div_tensor_0_3(self, other): # noqa: F841 if self.is_floating_point() or other.is_floating_point(): return self.true_divide(other) return self.divide(other, rounding_mode="trunc") @@ -43,9 +44,9 @@ class TestLiteScriptModule(TestCase): / "upgrader_models" / "test_versioned_div_tensor_v2.ptl" ) - mobile_module_v2 = _load_for_lite_interpreter(str(model_path)) + _load_for_lite_interpreter(str(model_path)) jit_module_v2 = torch.jit.load(str(model_path)) - current_mobile_module = self._save_load_mobile_module(jit_module_v2) + self._save_load_mobile_module(jit_module_v2) vals = (2.0, 3.0, 2, 3) for val_a, val_b in product(vals, vals): a = torch.tensor((val_a,)) diff --git a/test/nn/test_load_state_dict.py b/test/nn/test_load_state_dict.py index 8004252a37d..641017284c6 100644 --- a/test/nn/test_load_state_dict.py +++ b/test/nn/test_load_state_dict.py @@ -353,7 +353,7 @@ class TestLoadStateDict(NNTestCase): x = torch.randn(4, 3) num_iters = 3 - for i in range(num_iters): + for _ in range(num_iters): opt.zero_grad() out = net(x) out.sum().backward() @@ -371,7 +371,7 @@ class TestLoadStateDict(NNTestCase): opt2.load_state_dict(opt_state_dict) y = x.clone() - for i in range(num_iters): + for _ in range(num_iters): opt.zero_grad() out = net(x) out.sum().backward() diff --git a/test/nn/test_packed_sequence.py b/test/nn/test_packed_sequence.py index 2136482ab7a..1d8b8966af1 100644 --- a/test/nn/test_packed_sequence.py +++ b/test/nn/test_packed_sequence.py @@ -59,7 +59,7 @@ class PackedSequenceTest(TestCase): ) # Apply cast to `PackedSequence` instance and unpack masked = getattr(packed, cast_str)() - unpacked, lengths_out = rnn_utils.pad_packed_sequence(masked) + unpacked, _ = rnn_utils.pad_packed_sequence(masked) self.assertEqual(unpacked.type(), expected_type_str) def test_wrong_order(self): diff --git a/test/onnx/dynamo/test_dynamo_with_onnxruntime_backend.py b/test/onnx/dynamo/test_dynamo_with_onnxruntime_backend.py index 1d280dfa034..1d93fdf553d 100644 --- a/test/onnx/dynamo/test_dynamo_with_onnxruntime_backend.py +++ b/test/onnx/dynamo/test_dynamo_with_onnxruntime_backend.py @@ -696,9 +696,9 @@ class TestDynamoWithONNXRuntime(onnx_test_common._TestONNXRuntime): return tensor_x if test_local_backend: - local_aot_ort, local_ort = make_aot_ort(dynamic=True) + local_aot_ort, _ = make_aot_ort(dynamic=True) else: - local_aot_ort, local_ort = "onnxrt", None + local_aot_ort, _ = "onnxrt", None prefix = f"test_dump_model_{'local' if test_local_backend else 'onnxrt'}_" expected = f"{prefix}0.onnx" @@ -722,12 +722,12 @@ class TestDynamoWithONNXRuntime(onnx_test_common._TestONNXRuntime): with onnxrt_dump_path(prefix): example_args = example_args_collection[0] - result = compiled_model(*example_args) + compiled_model(*example_args) self.assertTrue(os.path.exists(expected)) self.assertTrue(os.path.exists(expected_graph)) self.assertFalse(os.path.exists(not_expected)) - result = compiled_model(*example_args) + compiled_model(*example_args) self.assertTrue(os.path.exists(expected)) self.assertFalse(os.path.exists(not_expected)) diff --git a/test/onnx/test_autograd_funs.py b/test/onnx/test_autograd_funs.py index 755f811f032..cfeec9553ab 100644 --- a/test/onnx/test_autograd_funs.py +++ b/test/onnx/test_autograd_funs.py @@ -65,7 +65,7 @@ class TestAutogradFuns(pytorch_test_common.ExportTestCase): @staticmethod def forward(ctx, input): ctx.save_for_backward(input) - values, indices = torch.topk(input, 3) + values, _ = torch.topk(input, 3) return values class Caller(torch.nn.Module): diff --git a/test/onnx/test_custom_ops.py b/test/onnx/test_custom_ops.py index d926375ee35..bf751822dea 100644 --- a/test/onnx/test_custom_ops.py +++ b/test/onnx/test_custom_ops.py @@ -96,7 +96,7 @@ class TestExportAsContribOps(pytorch_test_common.ExportTestCase): def forward(self, x): res = [] res2 = [] - for i in range(x.size(0)): + for _ in range(x.size(0)): if len(res) > 0: res2.append(res[0]) else: diff --git a/test/onnx/test_fx_passes.py b/test/onnx/test_fx_passes.py index e49b21dc708..51b791f38f7 100644 --- a/test/onnx/test_fx_passes.py +++ b/test/onnx/test_fx_passes.py @@ -133,7 +133,7 @@ class TestModularizePass(common_utils.TestCase): def forward(self, x, y): result = self.used_gelu(x + y) - unused_relu_result = self.unused_relu(x) + unused_relu_result = self.unused_relu(x) # noqa: F841 return result if is_exported_program: diff --git a/test/onnx/test_pytorch_onnx_no_runtime.py b/test/onnx/test_pytorch_onnx_no_runtime.py index bf5434b887f..41b2c78ca7c 100644 --- a/test/onnx/test_pytorch_onnx_no_runtime.py +++ b/test/onnx/test_pytorch_onnx_no_runtime.py @@ -122,7 +122,7 @@ class TestONNXExport(pytorch_test_common.ExportTestCase): class ModuleToExport(torch.jit.ScriptModule): @torch.jit.script_method def forward(self, x): - y = x - x + y = x - x # noqa: F841 return x + x mte = ModuleToExport() @@ -506,7 +506,7 @@ class TestONNXExport(pytorch_test_common.ExportTestCase): box_regression = torch.randn([4, 4]) proposal = [torch.randn(2, 4), torch.randn(2, 4)] - with self.assertRaises(RuntimeError) as cm: + with self.assertRaises(RuntimeError): f = io.BytesIO() torch.onnx.export( model, diff --git a/test/onnx/test_pytorch_onnx_onnxruntime.py b/test/onnx/test_pytorch_onnx_onnxruntime.py index c812b8e18b3..a6fd5780844 100644 --- a/test/onnx/test_pytorch_onnx_onnxruntime.py +++ b/test/onnx/test_pytorch_onnx_onnxruntime.py @@ -1,4 +1,5 @@ # Owner(s): ["module: onnx"] +# ruff: noqa: F841 from __future__ import annotations diff --git a/test/onnx/test_pytorch_onnx_shape_inference.py b/test/onnx/test_pytorch_onnx_shape_inference.py index 0d2b1ed756c..801d8484493 100644 --- a/test/onnx/test_pytorch_onnx_shape_inference.py +++ b/test/onnx/test_pytorch_onnx_shape_inference.py @@ -350,7 +350,7 @@ class TestONNXShapeInference(pytorch_test_common.ExportTestCase): # the added "Cast" node doesn't stop shape inference. cond = g.addInput() cond.setType(input.type().with_dtype(torch.int32).with_sizes([1])) - if_op, (if_context, else_context), new_node = jit_utils.add_op_with_blocks( + _, (if_context, else_context), new_node = jit_utils.add_op_with_blocks( as_graphcontext(g), "If", cond, n_blocks=2 ) block1_output = if_context.op("Add", input, input) diff --git a/test/onnx/test_utility_funs.py b/test/onnx/test_utility_funs.py index c6d382c1201..387a8985879 100644 --- a/test/onnx/test_utility_funs.py +++ b/test/onnx/test_utility_funs.py @@ -1873,7 +1873,7 @@ class TestUtilityFuns(_BaseTestCase): out2 = self.fc1(input1) return out1, out1, out2, out1, out2 - N, D_in, H, D_out = 64, 784, 500, 10 + N, D_in, D_out = 64, 784, 10 pt_model = DuplicatedOutputNet(D_in, D_out) f = io.BytesIO() diff --git a/test/onnx/verify.py b/test/onnx/verify.py index 95f8fe0e273..0dc2975df14 100644 --- a/test/onnx/verify.py +++ b/test/onnx/verify.py @@ -143,7 +143,6 @@ class Errors: """ # TODO: instead of immediately concatenating the context in the msg, # attach it as metadata and make a decision how to format it later. - msg_w_ctx = msg for c in reversed(self.context): msg += "\n\n * " + "\n ".join(c.splitlines()) self.errors.append(msg) @@ -523,7 +522,7 @@ def verify( run_helper(torch_out, args, remained_onnx_input_idx) if isinstance(test_args, int): - for i in range(test_args): + for _ in range(test_args): run(randomize_args(args), remained_onnx_input_idx) else: for test_arg in test_args: diff --git a/test/optim/test_lrscheduler.py b/test/optim/test_lrscheduler.py index 316d639a6b5..0dd69f9af95 100644 --- a/test/optim/test_lrscheduler.py +++ b/test/optim/test_lrscheduler.py @@ -1,4 +1,5 @@ # Owner(s): ["module: optimizer", "module: LrScheduler" ] +# ruff: noqa: F841 import copy import math import pickle diff --git a/test/optim/test_swa_utils.py b/test/optim/test_swa_utils.py index 560316fe3ae..ae9ff2cf01b 100644 --- a/test/optim/test_swa_utils.py +++ b/test/optim/test_swa_utils.py @@ -141,7 +141,7 @@ class TestSWAUtils(TestCase): averaged_dnn = AveragedModel(dnn) averaged_dnn2 = AveragedModel(dnn) n_updates = 10 - for i in range(n_updates): + for _ in range(n_updates): for p in dnn.parameters(): p.detach().add_(torch.randn_like(p)) averaged_dnn.update_parameters(dnn) diff --git a/test/package/test_directory_reader.py b/test/package/test_directory_reader.py index 512f80fe285..65cf538810a 100644 --- a/test/package/test_directory_reader.py +++ b/test/package/test_directory_reader.py @@ -284,7 +284,7 @@ class DirectoryReaderTest(PackageTestCase): with TemporaryDirectory() as temp_dir: zip_file.extractall(path=temp_dir) dir_importer = PackageImporter(Path(temp_dir) / Path(filename).name) - dir_mod = dir_importer.load_pickle("res", "mod.pkl") + dir_importer.load_pickle("res", "mod.pkl") if __name__ == "__main__": diff --git a/test/package/test_load_bc_packages.py b/test/package/test_load_bc_packages.py index e30df216f98..2536f81aaaa 100644 --- a/test/package/test_load_bc_packages.py +++ b/test/package/test_load_bc_packages.py @@ -26,7 +26,7 @@ class TestLoadBCPackages(PackageTestCase): def test_load_bc_packages_nn_module(self): """Tests for backwards compatible nn module""" importer1 = PackageImporter(f"{packaging_directory}/test_nn_module.pt") - loaded1 = importer1.load_pickle("nn_module", "nn_module.pkl") + importer1.load_pickle("nn_module", "nn_module.pkl") @skipIf( IS_FBCODE or IS_SANDCASTLE, @@ -35,7 +35,7 @@ class TestLoadBCPackages(PackageTestCase): def test_load_bc_packages_torchscript_module(self): """Tests for backwards compatible torchscript module""" importer2 = PackageImporter(f"{packaging_directory}/test_torchscript_module.pt") - loaded2 = importer2.load_pickle("torchscript_module", "torchscript_module.pkl") + importer2.load_pickle("torchscript_module", "torchscript_module.pkl") @skipIf( IS_FBCODE or IS_SANDCASTLE, @@ -44,7 +44,7 @@ class TestLoadBCPackages(PackageTestCase): def test_load_bc_packages_fx_module(self): """Tests for backwards compatible fx module""" importer3 = PackageImporter(f"{packaging_directory}/test_fx_module.pt") - loaded3 = importer3.load_pickle("fx_module", "fx_module.pkl") + importer3.load_pickle("fx_module", "fx_module.pkl") if __name__ == "__main__": diff --git a/test/package/test_misc.py b/test/package/test_misc.py index a024209ca91..850dec67681 100644 --- a/test/package/test_misc.py +++ b/test/package/test_misc.py @@ -241,7 +241,7 @@ class TestMisc(PackageTestCase): ) self.assertEqual(he.get_rdeps("package_b.subpackage_2"), ["package_b"]) - with self.assertRaises(PackagingError) as e: + with self.assertRaises(PackagingError): with PackageExporter(BytesIO()) as he: import package_b diff --git a/test/package/test_model.py b/test/package/test_model.py index 4e73a71e935..09b10a1ea2f 100644 --- a/test/package/test_model.py +++ b/test/package/test_model.py @@ -59,7 +59,7 @@ class ModelTest(PackageTestCase): self.assertEqual(r2(input), ref) # functions exist also to get at the private modules in each package - torchvision = i.import_module("torchvision") + torchvision = i.import_module("torchvision") # noqa: F841 f2 = BytesIO() # if we are doing transfer learning we might want to re-save diff --git a/test/package/test_package_script.py b/test/package/test_package_script.py index 19c09922464..13c2426f197 100644 --- a/test/package/test_package_script.py +++ b/test/package/test_package_script.py @@ -407,7 +407,7 @@ class TestPackageScript(PackageTestCase): e.save_pickle("res", "mod1.pkl", scripted_mod_0) buffer_0.seek(0) - importer_0 = importer = PackageImporter(buffer_0) + importer_0 = PackageImporter(buffer_0) buffer_1 = BytesIO() with PackageExporter(buffer_1) as e: diff --git a/test/package/test_repackage.py b/test/package/test_repackage.py index fb98675415a..0e21d7012f5 100644 --- a/test/package/test_repackage.py +++ b/test/package/test_repackage.py @@ -28,7 +28,7 @@ class TestRepackage(PackageTestCase): buffer.seek(0) pi = PackageImporter(buffer) - loaded_model = pi.load_pickle("default", "model.py") + pi.load_pickle("default", "model.py") model_b = ImportsIndirectlyFromSubPackage() buffer = BytesIO() diff --git a/test/package/test_save_load.py b/test/package/test_save_load.py index fdba77e9739..2f800c68792 100644 --- a/test/package/test_save_load.py +++ b/test/package/test_save_load.py @@ -82,7 +82,7 @@ class TestSaveLoad(PackageTestCase): buffer.seek(0) hi = PackageImporter(buffer) - loaded_obj = hi.load_pickle("res", "obj.pkl") + hi.load_pickle("res", "obj.pkl") package_b = hi.import_module("package_b") self.assertEqual(package_b.result, "package_b") diff --git a/test/profiler/test_execution_trace.py b/test/profiler/test_execution_trace.py index 2f8981742a7..fb69cd659cc 100644 --- a/test/profiler/test_execution_trace.py +++ b/test/profiler/test_execution_trace.py @@ -397,7 +397,7 @@ class TestExecutionTrace(TestCase): def fn(nt): return nt.sin().cos() - with torch.profiler.profile(execution_trace_observer=observer) as prof: + with torch.profiler.profile(execution_trace_observer=observer): for i in range(3): values = torch.rand((8 + i, 4 + i)) offsets = torch.tensor([0, 2, 4, 6, 8 + i]) diff --git a/test/profiler/test_memory_profiler.py b/test/profiler/test_memory_profiler.py index 365c2cd4b84..09d2eb8641b 100644 --- a/test/profiler/test_memory_profiler.py +++ b/test/profiler/test_memory_profiler.py @@ -496,7 +496,7 @@ class TestDataFlow(TestCase): z = x.mul(y) return {"z": z.view_as(z)} - def f1(x, y): + def f1(x, y): # noqa: F841 with torch.no_grad(): return f0(x, y) @@ -1124,8 +1124,8 @@ class TestMemoryProfilerE2E(TestCase): w1 = torch.ones((1,), requires_grad=True) def step_fn(_): - x = torch.ones((2, 2)) - y = torch.cat([x * w0, x * w1], dim=1) + x = torch.ones((2, 2)) # noqa: F841 + y = torch.cat([x * w0, x * w1], dim=1) # noqa: F841 # NOTE: We expect that all unknown categories. This is simply a sanity # check to ensure that we do not over-label. diff --git a/test/profiler/test_profiler.py b/test/profiler/test_profiler.py index b9263fcd8c5..1c7afba65f8 100644 --- a/test/profiler/test_profiler.py +++ b/test/profiler/test_profiler.py @@ -1,4 +1,5 @@ # Owner(s): ["oncall: profiler"] +# ruff: noqa: F841 import collections import gc diff --git a/test/profiler/test_record_function.py b/test/profiler/test_record_function.py index e024c7d4885..1608699d1ae 100644 --- a/test/profiler/test_record_function.py +++ b/test/profiler/test_record_function.py @@ -1,4 +1,5 @@ # Owner(s): ["oncall: profiler"] +# ruff: noqa: F841 # if tqdm is not shutdown properly, it will leave the monitor thread alive. # This causes an issue in the multithreading test because we check all events diff --git a/test/profiler/test_torch_tidy.py b/test/profiler/test_torch_tidy.py index 2ded8e515f4..be5884e93c6 100644 --- a/test/profiler/test_torch_tidy.py +++ b/test/profiler/test_torch_tidy.py @@ -100,7 +100,7 @@ class TestTorchTidyProfiler(TestCase): return self._get_tensor_fields(find_node_with_name(nodes, op_name), index) a_impl, a_storage_data, a_id = get_fields("aten::add", 0) - b_impl, b_storage_data, b_id = get_fields("aten::mul", 0) + b_impl, b_storage_data, _ = get_fields("aten::mul", 0) # Profiler matches ground truth from Python API. self.assertEqual(a_storage_data, a_initial_storage_data) @@ -467,7 +467,7 @@ class TestTorchTidyProfiler(TestCase): -1 ].extra_fields _, uniform_node = find_chain(["aten::rand", "aten::uniform_"]) - x_impl, x_storage_data, x_id = self._get_tensor_fields(uniform_node, 0) + _, x_storage_data, x_id = self._get_tensor_fields(uniform_node, 0) # Make sure IDs are consistent between allocations and op inputs self.assertEqual(allocation.ptr, x_storage_data) diff --git a/test/quantization/bc/test_backward_compatibility.py b/test/quantization/bc/test_backward_compatibility.py index 6416db5dfb5..ca236e9a27b 100644 --- a/test/quantization/bc/test_backward_compatibility.py +++ b/test/quantization/bc/test_backward_compatibility.py @@ -42,10 +42,8 @@ def get_filenames(self, subname): test_file = os.path.realpath(sys.modules[module_id].__file__) base_name = os.path.join(os.path.dirname(test_file), "../serialized", munged_id) - subname_output = "" if subname: base_name += "_" + subname - subname_output = f" ({subname})" input_file = base_name + ".input.pt" state_dict_file = base_name + ".state_dict.pt" @@ -143,7 +141,7 @@ class TestSerialization(TestCase): """ ( input_file, - state_dict_file, + _, scripted_module_file, traced_module_file, expected_file, @@ -194,7 +192,7 @@ class TestSerialization(TestCase): input_file, state_dict_file, _, - traced_module_file, + _, expected_file, _package_file, _get_attr_targets_file, @@ -218,7 +216,7 @@ class TestSerialization(TestCase): """ ( input_file, - state_dict_file, + _, _scripted_module_file, _traced_module_file, expected_file, diff --git a/test/quantization/core/experimental/apot_fx_graph_mode_ptq.py b/test/quantization/core/experimental/apot_fx_graph_mode_ptq.py index 65cbae3a163..3fdf0700dd5 100644 --- a/test/quantization/core/experimental/apot_fx_graph_mode_ptq.py +++ b/test/quantization/core/experimental/apot_fx_graph_mode_ptq.py @@ -31,7 +31,7 @@ from torch.ao.quantization.quantize_fx import prepare_qat_fx def calibrate(model, data_loader): model.eval() with torch.no_grad(): - for image, target in data_loader: + for image, _ in data_loader: model(image) from torch.ao.quantization.experimental.qconfig import ( diff --git a/test/quantization/core/experimental/quantization_util.py b/test/quantization/core/experimental/quantization_util.py index 90c29d19579..e2622b467c8 100644 --- a/test/quantization/core/experimental/quantization_util.py +++ b/test/quantization/core/experimental/quantization_util.py @@ -76,7 +76,8 @@ def evaluate(model, criterion, data_loader): with torch.no_grad(): for image, target in data_loader: output = model(image) - loss = criterion(output, target) + + loss = criterion(output, target) # noqa: F841 acc1, acc5 = accuracy(output, target, topk=(1, 5)) top1.update(acc1[0], image.size(0)) top5.update(acc5[0], image.size(0)) @@ -133,7 +134,7 @@ def training_loop(model, criterion, data_loader): optimizer = torch.optim.Adam(model.parameters(), lr=0.001) train_loss, correct, total = 0, 0, 0 model.train() - for i in range(10): + for _ in range(10): for data, target in data_loader: optimizer.zero_grad() output = model(data) diff --git a/test/quantization/core/experimental/test_bits.py b/test/quantization/core/experimental/test_bits.py index dfba754590d..daa780adde4 100644 --- a/test/quantization/core/experimental/test_bits.py +++ b/test/quantization/core/experimental/test_bits.py @@ -46,7 +46,7 @@ class Int16Tensor(torch.Tensor): def __repr__(self) -> str: with no_dispatch(): - t16 = self.view(torch.int16) + self.view(torch.int16) return f"TensorSubclassDemo{self.view(torch.int16)}" diff --git a/test/quantization/core/experimental/test_fake_quantize.py b/test/quantization/core/experimental/test_fake_quantize.py index 4e9464aca80..33c550f942e 100644 --- a/test/quantization/core/experimental/test_fake_quantize.py +++ b/test/quantization/core/experimental/test_fake_quantize.py @@ -86,7 +86,7 @@ class TestFakeQuantize(unittest.TestCase): observer(input) alpha, gamma, quantization_levels, level_indices = observer.calculate_qparams(signed=False) - test = gradcheck(fake_quantize_function.apply, (input, alpha, gamma, quantization_levels, level_indices), atol=1e-4) + gradcheck(fake_quantize_function.apply, (input, alpha, gamma, quantization_levels, level_indices), atol=1e-4) if __name__ == '__main__': unittest.main() diff --git a/test/quantization/core/experimental/test_float8.py b/test/quantization/core/experimental/test_float8.py index c3cf40835ac..e6b40d3edc1 100644 --- a/test/quantization/core/experimental/test_float8.py +++ b/test/quantization/core/experimental/test_float8.py @@ -267,7 +267,7 @@ class TestFloat8Dtype(TestCase): with DeterministicGuard(torch.are_deterministic_algorithms_enabled()): for use_deterministic in (True, False): torch.use_deterministic_algorithms(use_deterministic) - x = torch.empty(4, 4, device=device, dtype=dtype) + torch.empty(4, 4, device=device, dtype=dtype) instantiate_device_type_tests(TestFloat8Dtype, globals()) diff --git a/test/quantization/core/experimental/test_nonuniform_observer.py b/test/quantization/core/experimental/test_nonuniform_observer.py index 5e6205f2958..01564556834 100644 --- a/test/quantization/core/experimental/test_nonuniform_observer.py +++ b/test/quantization/core/experimental/test_nonuniform_observer.py @@ -1,4 +1,5 @@ # Owner(s): ["oncall: quantization"] +# ruff: noqa: F841 from torch.ao.quantization.experimental.observer import APoTObserver import unittest diff --git a/test/quantization/core/test_backend_config.py b/test/quantization/core/test_backend_config.py index 3cb6dcc9c4a..cc1f1ef4f9a 100644 --- a/test/quantization/core/test_backend_config.py +++ b/test/quantization/core/test_backend_config.py @@ -294,8 +294,6 @@ class TestBackendConfig(QuantizationTestCase): }) def test_backend_config_from_dict(self): - op1 = self._get_backend_op_config1() - op2 = self._get_backend_op_config2() op_dict1 = self._get_backend_pattern_config_dict1() op_dict2 = self._get_backend_pattern_config_dict2() conf_dict = { diff --git a/test/quantization/core/test_quantized_module.py b/test/quantization/core/test_quantized_module.py index e1b81bc181c..c31fe44fa29 100644 --- a/test/quantization/core/test_quantized_module.py +++ b/test/quantization/core/test_quantized_module.py @@ -203,7 +203,7 @@ class TestStaticQuantizedModule(QuantizationTestCase): self.assertEqual(qlinear.scale, loaded_from_package.scale) self.assertEqual(qlinear.zero_point, loaded_from_package.zero_point) - for name, module in loaded_from_package.named_modules(): + for name, _ in loaded_from_package.named_modules(): # noop, just make sure attribute "_modules" is restored correctly during torch.package import assert(name is not None) # noqa: E275 @@ -1157,7 +1157,6 @@ class TestStaticQuantizedModule(QuantizationTestCase): x_zero_point = 0 y_scale = 5.0 / 256 y_zero_point = 127 - alpha = 1.5 dims = (1, 4, 8) diff --git a/test/quantization/core/test_quantized_op.py b/test/quantization/core/test_quantized_op.py index dd1df1e0cd9..ba82b5d8760 100644 --- a/test/quantization/core/test_quantized_op.py +++ b/test/quantization/core/test_quantized_op.py @@ -1,4 +1,5 @@ # Owner(s): ["oncall: quantization"] +# ruff: noqa: F841 import copy diff --git a/test/quantization/core/test_quantized_tensor.py b/test/quantization/core/test_quantized_tensor.py index 1e43981eb56..5517b9d8edd 100644 --- a/test/quantization/core/test_quantized_tensor.py +++ b/test/quantization/core/test_quantized_tensor.py @@ -1,4 +1,5 @@ # Owner(s): ["oncall: quantization"] +# ruff: noqa: F841 import numpy as np import math diff --git a/test/quantization/core/test_workflow_module.py b/test/quantization/core/test_workflow_module.py index a3a611d3932..95c08a35642 100644 --- a/test/quantization/core/test_workflow_module.py +++ b/test/quantization/core/test_workflow_module.py @@ -1,4 +1,5 @@ # Owner(s): ["oncall: quantization"] +# ruff: noqa: F841 # Torch # Standard library diff --git a/test/quantization/core/test_workflow_ops.py b/test/quantization/core/test_workflow_ops.py index 967469a21a0..bc3d763ed87 100644 --- a/test/quantization/core/test_workflow_ops.py +++ b/test/quantization/core/test_workflow_ops.py @@ -1,4 +1,5 @@ # Owner(s): ["oncall: quantization"] +# ruff: noqa: F841 import torch import math diff --git a/test/quantization/eager/test_numeric_suite_eager.py b/test/quantization/eager/test_numeric_suite_eager.py index 37e642fde75..6ce8fe1255f 100644 --- a/test/quantization/eager/test_numeric_suite_eager.py +++ b/test/quantization/eager/test_numeric_suite_eager.py @@ -1,4 +1,5 @@ # Owner(s): ["oncall: quantization"] +# ruff: noqa: F841 import unittest import torch diff --git a/test/quantization/eager/test_quantize_eager_ptq.py b/test/quantization/eager/test_quantize_eager_ptq.py index c50ece71a3a..bb7963a027d 100644 --- a/test/quantization/eager/test_quantize_eager_ptq.py +++ b/test/quantization/eager/test_quantize_eager_ptq.py @@ -1,4 +1,5 @@ # Owner(s): ["oncall: quantization"] +# ruff: noqa: F841 import torch import torch.nn as nn diff --git a/test/quantization/eager/test_quantize_eager_qat.py b/test/quantization/eager/test_quantize_eager_qat.py index be7890c97a6..851ab388e82 100644 --- a/test/quantization/eager/test_quantize_eager_qat.py +++ b/test/quantization/eager/test_quantize_eager_qat.py @@ -855,7 +855,7 @@ class TestQuantizeEagerQATNumerics(QuantizationTestCase): ref_op = compose([conv_op, bn_op, relu_op]) input_clone = input.detach().clone().requires_grad_() - for i in range(2): + for _ in range(2): result_ref = ref_op(input) result_actual = qat_op(input_clone) self.assertEqual(result_ref, result_actual) diff --git a/test/quantization/fx/test_model_report_fx.py b/test/quantization/fx/test_model_report_fx.py index 69fec404de6..ed17db2b652 100644 --- a/test/quantization/fx/test_model_report_fx.py +++ b/test/quantization/fx/test_model_report_fx.py @@ -1,4 +1,5 @@ # Owner(s): ["oncall: quantization"] +# ruff: noqa: F841 from typing import Set import torch diff --git a/test/quantization/fx/test_numeric_suite_fx.py b/test/quantization/fx/test_numeric_suite_fx.py index f88485b961b..84c4f84fa35 100644 --- a/test/quantization/fx/test_numeric_suite_fx.py +++ b/test/quantization/fx/test_numeric_suite_fx.py @@ -1,4 +1,5 @@ # Owner(s): ["oncall: quantization"] +# ruff: noqa: F841 import copy import math diff --git a/test/quantization/fx/test_quantize_fx.py b/test/quantization/fx/test_quantize_fx.py index 4f3b3d73d99..a5ed2fdb4c6 100644 --- a/test/quantization/fx/test_quantize_fx.py +++ b/test/quantization/fx/test_quantize_fx.py @@ -1,4 +1,5 @@ # Owner(s): ["oncall: quantization"] +# ruff: noqa: F841 from collections import OrderedDict import contextlib diff --git a/test/quantization/fx/test_subgraph_rewriter.py b/test/quantization/fx/test_subgraph_rewriter.py index dc07ed2676a..41c085b34a0 100644 --- a/test/quantization/fx/test_subgraph_rewriter.py +++ b/test/quantization/fx/test_subgraph_rewriter.py @@ -287,10 +287,10 @@ class TestSubgraphRewriter(JitTestCase): def test_subgraph_rewriter_internal_pattern_nodes_cannot_have_users_that_are_not_matched(self): class M(torch.nn.Module): def forward(self, x, w1, w2, b1, b2): - m0 = torch.cat([w1, w2]) + m0 = torch.cat([w1, w2]) # noqa: F841 m1 = torch.cat([w1, w2]) m2 = torch.cat([x, b2]) - t0 = torch.addmm(b1, m1, m2.t()) + t0 = torch.addmm(b1, m1, m2.t()) # noqa: F841 t1 = torch.sum(w1, 1) t2 = torch.addmm(b1, m1, m2.t()) return torch.sum(t1), torch.sum(t2) diff --git a/test/quantization/jit/test_deprecated_jit_quant.py b/test/quantization/jit/test_deprecated_jit_quant.py index 491f0e928cc..a6fd49588da 100644 --- a/test/quantization/jit/test_deprecated_jit_quant.py +++ b/test/quantization/jit/test_deprecated_jit_quant.py @@ -1,4 +1,5 @@ # Owner(s): ["oncall: quantization"] +# ruff: noqa: F841 import torch from torch.testing._internal.common_quantization import skipIfNoFBGEMM diff --git a/test/quantization/jit/test_ondevice_quantization.py b/test/quantization/jit/test_ondevice_quantization.py index 1d85c3f6e52..6d58e5c0fb0 100644 --- a/test/quantization/jit/test_ondevice_quantization.py +++ b/test/quantization/jit/test_ondevice_quantization.py @@ -178,7 +178,6 @@ class TestOnDeviceDynamicPTQInsertObservers(TestCase): def test_weight_only_observers(self): model = MyConvLinearModule() qconfig_dict = {"": default_dynamic_qconfig} - inputs = model.get_example_inputs() scripted_model = OnDevicePTQUtils.insert_observers(model, qconfig_dict) observe_forward_graph = scripted_model.observe_forward.graph num_weight_only_observers = 0 @@ -379,7 +378,7 @@ class TestOnDeviceDynamicPTQFinalize(TestCase): thrown = False try: m(*inputs) - except Exception as e: + except Exception: thrown = True self.assertTrue(thrown) @@ -399,7 +398,7 @@ class TestOnDeviceDynamicPTQFinalize(TestCase): thrown = False try: m(*inputs) - except Exception as e: + except Exception: thrown = True self.assertTrue(thrown) diff --git a/test/quantization/jit/test_quantize_jit.py b/test/quantization/jit/test_quantize_jit.py index 64d975cc93f..351876fcdab 100644 --- a/test/quantization/jit/test_quantize_jit.py +++ b/test/quantization/jit/test_quantize_jit.py @@ -1,4 +1,5 @@ # Owner(s): ["oncall: quantization"] +# ruff: noqa: F841 # torch import io diff --git a/test/quantization/pt2e/test_duplicate_dq.py b/test/quantization/pt2e/test_duplicate_dq.py index e2b7236d2ef..169f45eb9cc 100644 --- a/test/quantization/pt2e/test_duplicate_dq.py +++ b/test/quantization/pt2e/test_duplicate_dq.py @@ -1,4 +1,5 @@ # Owner(s): ["oncall: quantization"] +# ruff: noqa: F841 import copy import unittest from typing import Any, Dict diff --git a/test/quantization/pt2e/test_graph_utils.py b/test/quantization/pt2e/test_graph_utils.py index 09a39c5b0ce..ac020795f5f 100644 --- a/test/quantization/pt2e/test_graph_utils.py +++ b/test/quantization/pt2e/test_graph_utils.py @@ -32,7 +32,7 @@ class TestGraphUtils(TestCase): example_inputs = (torch.randn(1, 3, 5, 5),) # program capture - m, guards = torchdynamo.export( + m, guards = torchdynamo.export( # noqa: F841© m, *copy.deepcopy(example_inputs), aten_graph=True, @@ -76,7 +76,7 @@ class TestGraphUtils(TestCase): example_inputs = (torch.randn(1, 3, 5, 5),) # program capture - m, guards = torchdynamo.export( + m, guards = torchdynamo.export( # noqa: F841 m, *copy.deepcopy(example_inputs), aten_graph=True, @@ -108,7 +108,7 @@ class TestGraphUtils(TestCase): example_inputs = (torch.randn(1, 3, 5, 5),) # program capture - m, guards = torchdynamo.export( + m, guards = torchdynamo.export( # noqa: F841 m, *copy.deepcopy(example_inputs), aten_graph=True, diff --git a/test/quantization/pt2e/test_metadata_porting.py b/test/quantization/pt2e/test_metadata_porting.py index c94f46d268a..251abeb9f64 100644 --- a/test/quantization/pt2e/test_metadata_porting.py +++ b/test/quantization/pt2e/test_metadata_porting.py @@ -109,7 +109,7 @@ class TestMetaDataPorting(QuantizationTestCase): m(*example_inputs) m = convert_pt2e(m) - pt2_quant_output = m(*example_inputs) + m(*example_inputs) recorded_node_tags = {} for n in m.graph.nodes: if "quantization_tag" not in n.meta: @@ -431,7 +431,6 @@ class TestMetaDataPorting(QuantizationTestCase): def test_no_metadata_porting(self): class BackendAQuantizer(Quantizer): def annotate(self, gm: torch.fx.GraphModule) -> torch.fx.GraphModule: - backend_string = "BackendA" quantization_config = get_symmetric_quantization_config( is_per_channel=True ) @@ -476,7 +475,6 @@ class TestMetaDataPorting(QuantizationTestCase): class BackendAQuantizer(Quantizer): def annotate(self, gm: torch.fx.GraphModule) -> torch.fx.GraphModule: - backend_string = "BackendA" qconfig = get_symmetric_quantization_config() for n in gm.graph.nodes: if n.op != "call_function": @@ -513,7 +511,7 @@ class TestMetaDataPorting(QuantizationTestCase): torch.ops.quantized_decomposed.quantize_per_tensor.default: quantize_per_tensor_tensor_tags, torch.ops.quantized_decomposed.dequantize_per_tensor.default: dequantize_per_tensor_tensor_tags, } - m = self._test_metadata_porting( + self._test_metadata_porting( MatmulWithConstInput(), example_inputs, BackendAQuantizer(), diff --git a/test/quantization/pt2e/test_quantize_pt2e.py b/test/quantization/pt2e/test_quantize_pt2e.py index 9978c2c1b94..a51614a32e1 100644 --- a/test/quantization/pt2e/test_quantize_pt2e.py +++ b/test/quantization/pt2e/test_quantize_pt2e.py @@ -1,4 +1,5 @@ # Owner(s): ["oncall: quantization"] +# ruff: noqa: F841 from typing import Dict, List, Tuple import torch diff --git a/test/quantization/pt2e/test_quantize_pt2e_qat.py b/test/quantization/pt2e/test_quantize_pt2e_qat.py index 3ecc1bef17b..46f1f349fef 100644 --- a/test/quantization/pt2e/test_quantize_pt2e_qat.py +++ b/test/quantization/pt2e/test_quantize_pt2e_qat.py @@ -648,8 +648,8 @@ class TestQuantizePT2EQAT_ConvBn_Base(PT2EQATTestCase): assert isinstance(bias_node, torch.fx.Node) return (qweight_node, bias_node) - first_conv_qweight, first_conv_bias = get_conv_weight_and_bias(first_conv) - second_conv_qweight, second_conv_bias = get_conv_weight_and_bias(second_conv) + _, first_conv_bias = get_conv_weight_and_bias(first_conv) + _, second_conv_bias = get_conv_weight_and_bias(second_conv) # Assert that each set of conv, conv weight, and conv bias are in the same partition def get_source_fn(node: torch.fx.Node): @@ -1111,10 +1111,10 @@ class TestQuantizeMixQATAndPTQ(QuantizationTestCase): self._prepare_qat_linears(model) - after_prepare_result_pt2e = model(*example_inputs) + model(*example_inputs) # must be fixed model.eval() self._convert_qat_linears(model) - quant_result_pt2e = model(*example_inputs) + model(*example_inputs) model_pt2e = export_for_training( model, @@ -1126,9 +1126,9 @@ class TestQuantizeMixQATAndPTQ(QuantizationTestCase): quantization_config = get_symmetric_quantization_config() quantizer.set_global(quantization_config) model_pt2e = prepare_pt2e(model_pt2e, quantizer) - after_prepare_result_pt2e = model_pt2e(*example_inputs) + after_prepare_result_pt2e = model_pt2e(*example_inputs) # noqa: F841 model_pt2e = convert_pt2e(model_pt2e) - quant_result_pt2e = model_pt2e(*example_inputs) + quant_result_pt2e = model_pt2e(*example_inputs) # noqa: F841 exported_model = torch.export.export(model_pt2e, example_inputs) diff --git a/test/quantization/pt2e/test_representation.py b/test/quantization/pt2e/test_representation.py index 07aedfdffb9..e2335dab3c9 100644 --- a/test/quantization/pt2e/test_representation.py +++ b/test/quantization/pt2e/test_representation.py @@ -159,7 +159,7 @@ class TestPT2ERepresentation(QuantizationTestCase): quantizer = XNNPACKQuantizer() quantization_config = get_symmetric_quantization_config(is_per_channel=True) quantizer.set_global(quantization_config) - m_eager = M().eval() + M().eval() example_inputs = ( torch.randn(1, 3, 3, 3), @@ -235,7 +235,7 @@ class TestPT2ERepresentation(QuantizationTestCase): # use per channel quantization for weight operator_config = get_symmetric_quantization_config(is_per_channel=True) quantizer.set_global(operator_config) - m_eager = M().eval() + M().eval() inputs = [ (torch.randn(1, 5),), @@ -284,7 +284,7 @@ class TestPT2ERepresentation(QuantizationTestCase): quantizer = XNNPACKQuantizer() quantization_config = get_symmetric_quantization_config(is_per_channel=True) quantizer.set_global(quantization_config) - m_eager = M().eval() + M().eval() example_inputs = ( torch.randn(1, 3, 3, 3), diff --git a/test/quantization/pt2e/test_x86inductor_quantizer.py b/test/quantization/pt2e/test_x86inductor_quantizer.py index 0c304d1e4c8..31cecf9aded 100644 --- a/test/quantization/pt2e/test_x86inductor_quantizer.py +++ b/test/quantization/pt2e/test_x86inductor_quantizer.py @@ -582,7 +582,7 @@ class X86InductorQuantTestCase(QuantizationTestCase): convert_model = copy.deepcopy(m) if debug: convert_model.print_readable(True) - pt2_quant_output = m(*example_inputs) + m(*example_inputs) node_occurrence = { ns.call_function(k): v for k, v in expected_node_occurrence.items() } diff --git a/test/quantization/pt2e/test_xnnpack_quantizer.py b/test/quantization/pt2e/test_xnnpack_quantizer.py index 730c5db5f66..36209e5aad1 100644 --- a/test/quantization/pt2e/test_xnnpack_quantizer.py +++ b/test/quantization/pt2e/test_xnnpack_quantizer.py @@ -504,7 +504,6 @@ class TestXNNPACKQuantizer(PT2EQuantizationTestCase): m = prepare_pt2e(m, quantizer) m(*example_inputs) - act_post_processes_pairs = [] for n in m.graph.nodes: if n.target in [ torch.ops.aten.view.default, @@ -741,7 +740,6 @@ class TestXNNPACKQuantizer(PT2EQuantizationTestCase): with override_quantized_engine("qnnpack"): model_fx = RNNDynamicModel("GRU") - module_types = [torch.nn.GRU] niter = 10 example_inputs = ( # input_tensor @@ -803,7 +801,6 @@ class TestXNNPACKQuantizer(PT2EQuantizationTestCase): with override_quantized_engine("qnnpack"): model_fx = RNNDynamicModel("GRU") - module_types = [torch.nn.GRU] niter = 10 example_inputs = ( # input_tensor diff --git a/test/test_autograd.py b/test/test_autograd.py index 215867b8a17..3fce999c0b1 100644 --- a/test/test_autograd.py +++ b/test/test_autograd.py @@ -1,4 +1,5 @@ # Owner(s): ["module: autograd"] +# ruff: noqa: F841 import collections import contextlib diff --git a/test/test_autograd_fallback.py b/test/test_autograd_fallback.py index 8c3b05992ed..d32bf870841 100644 --- a/test/test_autograd_fallback.py +++ b/test/test_autograd_fallback.py @@ -137,7 +137,7 @@ class TestAutogradFallback(TestCase): warnings.simplefilter("error") x = torch.randn([], requires_grad=True) y = x.clone() - z = op(y) + op(y) y.backward() self.assertEqual(x.grad, torch.ones_like(x)) @@ -320,7 +320,7 @@ class TestAutogradFallback(TestCase): "foo", lambda a: (a.clone(), a.detach().clone().requires_grad_()), "CPU" ) x = torch.randn(3, requires_grad=True) - y, z = op(x) + _, z = op(x) with self._check_ctx(mode): z.sum().backward() @@ -338,7 +338,7 @@ class TestAutogradFallback(TestCase): x = torch.randn(3, requires_grad=True) # NB: PyTorch dispatcher treats "None" as undefined Tensor. - y, z = op(None, x) + _, z = op(None, x) with self._check_ctx(mode): z.sum().backward() diff --git a/test/test_binary_ufuncs.py b/test/test_binary_ufuncs.py index ee9fb490356..0035ab30ad0 100644 --- a/test/test_binary_ufuncs.py +++ b/test/test_binary_ufuncs.py @@ -1,4 +1,5 @@ # Owner(s): ["module: tests"] +# ruff: noqa: F841 import itertools import math diff --git a/test/test_cpp_extensions_aot.py b/test/test_cpp_extensions_aot.py index 8659165fd66..985166c48c6 100644 --- a/test/test_cpp_extensions_aot.py +++ b/test/test_cpp_extensions_aot.py @@ -26,7 +26,7 @@ try: import pytest HAS_PYTEST = True -except ImportError as e: +except ImportError: HAS_PYTEST = False # TODO: Rewrite these tests so that they can be collected via pytest without @@ -311,9 +311,9 @@ class TestPybindTypeCasters(common.TestCase): @torch.testing._internal.common_utils.markDynamoStrictTest class TestMAIATensor(common.TestCase): def test_unregistered(self): - a = torch.arange(0, 10, device="cpu") + torch.arange(0, 10, device="cpu") with self.assertRaisesRegex(RuntimeError, "Could not run"): - b = torch.arange(0, 10, device="maia") + torch.arange(0, 10, device="maia") @skipIfTorchDynamo("dynamo cannot model maia device") def test_zeros(self): @@ -336,7 +336,7 @@ class TestMAIATensor(common.TestCase): b = torch.empty(5, 5, device="maia") self.assertEqual(maia_extension.get_test_int(), 0) - c = a + b + a + b self.assertEqual(maia_extension.get_test_int(), 1) def test_conv_backend_override(self): diff --git a/test/test_cpp_extensions_open_device_registration.py b/test/test_cpp_extensions_open_device_registration.py index d427bc2653b..3792d777924 100644 --- a/test/test_cpp_extensions_open_device_registration.py +++ b/test/test_cpp_extensions_open_device_registration.py @@ -269,7 +269,7 @@ class TestCppExtensionOpenRgistration(common.TestCase): self.assertTrue(z.is_foo) def test_open_device_packed_sequence(self): - device = self.module.custom_device() + device = self.module.custom_device() # noqa: F841 a = torch.rand(5, 3) b = torch.tensor([1, 1, 1, 1, 1]) input = torch.nn.utils.rnn.PackedSequence(a, b) @@ -445,7 +445,7 @@ class TestCppExtensionOpenRgistration(common.TestCase): with torch._subclasses.fake_tensor.FakeTensorMode.push(): a = torch.empty(1, device="foo") b = torch.empty(1, device="foo:0") - result = a + b + result = a + b # noqa: F841 def test_open_device_named_tensor(self): torch.empty([2, 3, 4, 5], device="foo", names=["N", "C", "H", "W"]) @@ -538,7 +538,6 @@ class TestCppExtensionOpenRgistration(common.TestCase): """ torch.utils.rename_privateuse1_backend("foo") device = self.module.custom_device() - default_protocol = torch.serialization.DEFAULT_PROTOCOL # Legacy data saved with _rebuild_device_tensor_from_numpy on f80ed0b8 via diff --git a/test/test_cuda.py b/test/test_cuda.py index c1408d47302..6aac24b08e5 100644 --- a/test/test_cuda.py +++ b/test/test_cuda.py @@ -1,4 +1,5 @@ # Owner(s): ["module: cuda"] +# ruff: noqa: F841 import contextlib import ctypes @@ -663,7 +664,6 @@ class TestCuda(TestCase): self.assertEqual(torch.cuda.current_stream(), user_stream) self.assertTrue(user_stream.query()) tensor1 = torch.ByteTensor(5).pin_memory() - tensor2 = tensor1.cuda(non_blocking=True) + 1 default_stream.synchronize() self.assertTrue(default_stream.query()) @@ -987,7 +987,7 @@ except RuntimeError as e: ) out, err = p.communicate(timeout=10) p.wait(timeout=10) - except subprocess.TimeoutExpired as e: + except subprocess.TimeoutExpired: p.kill() out, err = p.communicate() expected_messages = [ @@ -1175,7 +1175,7 @@ except RuntimeError as e: with self.assertLeaksNoCudaTensors(): x = torch.randn(3, 1, device="cuda") y = torch.randn(2, 1, device="cuda") - z = x + y + x + y @unittest.skipIf(not TEST_MEDIUM_TENSOR, "not enough memory") @serialTest() @@ -1303,7 +1303,7 @@ except RuntimeError as e: ) for p in model.parameters(): self.assertTrue(p.grad is None) - for i in range(iters): + for _ in range(iters): loss = model(x, x_first_use_on_ambient).sum() if out_of_place: x_grad = torch.autograd.grad((loss,), (x,))[0] @@ -1477,7 +1477,7 @@ torch.cuda.synchronize() # Line up threads to increase likelihood of race conditions. barrier.wait() with torch.cuda.stream(my_stream): - for i in range(test_iters): + for _ in range(test_iters): # If all threads are sharing the same cublas handle, # the following sequence may occur: # thread 0 calls cublasSetStream() @@ -1594,7 +1594,7 @@ torch.cuda.synchronize() # Line up threads to increase likelihood of race conditions. barrier.wait() with torch.cuda.stream(my_stream): - for i in range(test_iters): + for _ in range(test_iters): # If all threads are sharing the same cublas handle, # the following sequence may occur: # thread 0 calls cublasSetStream() @@ -1689,7 +1689,7 @@ torch.cuda.synchronize() return generator, old_state, new_state def register_states_to_graph(generator_state, graph): - generator, old_state, new_state = generator_state + _, old_state, new_state = generator_state graph.register_generator_state(old_state) graph.register_generator_state(new_state) @@ -1712,7 +1712,7 @@ torch.cuda.synchronize() # Define a function to retrieve the final offsets of the original and new generator states def get_final_offsets_of_states(generator_state): - generator, old_state, new_state = generator_state + _, old_state, new_state = generator_state old_state_offset = old_state.get_offset() new_state_offset = new_state.get_offset() return old_state_offset, new_state_offset @@ -1883,7 +1883,7 @@ torch.cuda.synchronize() z = x + y with torch.cuda.stream(s1): s1.wait_stream(s0) - w = z + y + z + y s0.wait_stream(s1) g.capture_end() s0.synchronize() @@ -1911,7 +1911,7 @@ except RuntimeError as e: exit(2) """ try: - a = subprocess.check_output( + subprocess.check_output( [sys.executable, "-c", script], stderr=subprocess.STDOUT, # On Windows, opening the subprocess with the default CWD makes `import torch` @@ -1976,7 +1976,7 @@ exit(2) free_bytes_before, total_bytes = torch.cuda.mem_get_info() used_gb_before = (total_bytes - free_bytes_before) / 1e9 - for i in range(100): + for _ in range(100): torch_graph = torch.cuda.CUDAGraph() with torch.cuda.graph(torch_graph): torch.mm(a, b) @@ -2648,7 +2648,7 @@ exit(2) torch.cuda.synchronize() # dummy allocation triggers process_events, Hopefully successfully processes b's end-of-life event. - c = torch.zeros((3,), device="cuda") + torch.zeros((3,), device="cuda") @skipIfRocm @unittest.skipIf( @@ -2668,20 +2668,20 @@ exit(2) model = torch.nn.LSTM(512, 512, 2, dropout=0.5).cuda() x = torch.ones(100, 192, 512, device="cuda") - y = model(x) + model(x) g = torch.cuda.CUDAGraph() s = torch.cuda.Stream() s.wait_stream(torch.cuda.current_stream()) with torch.cuda.stream(s): g.capture_begin() - y = model(x) + model(x) g.capture_end() torch.cuda.current_stream().wait_stream(s) g.replay() - y = model(x) + model(x) @unittest.skipIf( not TEST_CUDA_GRAPH, "CUDA >= 11.0 or ROCM >= 5.3 required for graphs" @@ -2858,7 +2858,7 @@ exit(2) torch.manual_seed(5) torch.cuda.manual_seed(5) - N, D_in, H, D_out = 640, 4096, 2048, 1024 + N, D_in, H, _ = 640, 4096, 2048, 1024 class ParameterlessModule(torch.nn.Module): def forward(self, input_dict: dict): @@ -2882,7 +2882,6 @@ exit(2) x = torch.randn(N, D_in, device="cuda", requires_grad=False) unused_input = torch.randn(N, H, device="cuda", requires_grad=False) - y_pred = torch.randn(N, D_in, device="cuda", requires_grad=False) y = torch.randn(N, D_in, device="cuda") # This is a good stress test. It graphs four callables: two Modules and two python functions. @@ -2907,7 +2906,7 @@ exit(2) with torch.amp.autocast( device_type="cuda", enabled=with_amp, cache_enabled=cache_enabled ): - out = m({"x": data, "unused_input": unused_input})["output"] + m({"x": data, "unused_input": unused_input})["output"] # We graphed the models in training mode. Eval should still run ungraphed. model_graphed.eval() @@ -3160,7 +3159,7 @@ exit(2) z = x + y with torch.cuda.stream(s1): s1.wait_stream(s0) - w = z + y + z + y s0.wait_stream(s1) with torch.cuda.stream(s0): g.capture_end() @@ -3362,7 +3361,7 @@ print(f"{{r1}}, {{r2}}") error_msg = "cuFileHandleRegister failed" with TemporaryFileName() as f: with self.assertRaisesRegex(RuntimeError, error_msg): - file = torch.cuda.gds._GdsFile(f, os.O_CREAT | os.O_RDWR) + torch.cuda.gds._GdsFile(f, os.O_CREAT | os.O_RDWR) @unittest.skipIf(not TEST_CUDA, "CUDA not available, skipping tests") @@ -3438,7 +3437,7 @@ class TestCudaMallocAsync(TestCase): try: torch.cuda.memory.empty_cache() torch.cuda.memory._record_memory_history("state", stacks="all") - x = torch.rand(311, 411, device="cuda") + x = torch.rand(311, 411, device="cuda") # noqa: F841 ss = torch.cuda.memory._snapshot()["segments"] found_it = False @@ -3532,8 +3531,8 @@ class TestCudaMallocAsync(TestCase): record_context = context is not None ss = torch.cuda.memory._snapshot() - tplot = trace_plot(ss) - splot = segment_plot(ss) + trace_plot(ss) + segment_plot(ss) text = json.dumps(ss) self.assertTrue(record_context == ("test_memory_plots" in text)) @@ -3638,7 +3637,7 @@ class TestCudaMallocAsync(TestCase): def foo(): return torch.rand(311, 411, device="cuda") - x = foo() + x = foo() # noqa: F841 ss = torch.cuda.memory._snapshot()["segments"] found_it = False diff --git a/test/test_cuda_multigpu.py b/test/test_cuda_multigpu.py index 3dfc82fbf31..ad43ac8b879 100644 --- a/test/test_cuda_multigpu.py +++ b/test/test_cuda_multigpu.py @@ -1011,7 +1011,8 @@ class TestCudaMultiGPU(TestCase): torch.cuda.synchronize() before_free_bytes, before_available_bytes = torch.cuda.mem_get_info(device) # increasing to 8MB to force acquiring a new block and overcome blocksize differences across platforms - t = torch.randn(1024 * 1024 * 8, device=device) + t = torch.randn(1024 * 1024 * 8, device=device) # noqa: F841 + if IS_JETSON: # w/o syncing, mem_get_info will run before memory allocated has actually increased. # This race condition causes consistent failure @@ -1302,7 +1303,6 @@ t2.start() device_count = torch.cuda.device_count() current_alloc = [memory_allocated(idx) for idx in range(device_count)] - x = torch.ones(10, device="cuda:0") self.assertGreater(memory_allocated(0), current_alloc[0]) self.assertTrue( all( diff --git a/test/test_cuda_sanitizer.py b/test/test_cuda_sanitizer.py index daf2cfda3dc..93a1f4e1150 100644 --- a/test/test_cuda_sanitizer.py +++ b/test/test_cuda_sanitizer.py @@ -58,7 +58,6 @@ class TestArgumentHandler(TestCase): out = torch.split(a, 2) argument_handler.parse_outputs(split_func._schema, out, is_factory=False) - outputs = {out[0].data_ptr(), out[1].data_ptr(), out[2].data_ptr()} # Split is a view op, no data is read or written! self.assertEqual(len(argument_handler.dataptrs_read), 0) self.assertEqual(len(argument_handler.dataptrs_written), 0) @@ -514,8 +513,8 @@ class TestMessages(TestCase): # These two tests ensure that subclass creation # happens smoothly under the mode used by csan - t = TwoTensor(torch.rand(2), torch.rand(2)) - t = MyT(torch.rand(2)) + TwoTensor(torch.rand(2), torch.rand(2)) + MyT(torch.rand(2)) finally: csan.cuda_sanitizer.disable() diff --git a/test/test_cuda_trace.py b/test/test_cuda_trace.py index f41f5120b29..124b0ac41b8 100644 --- a/test/test_cuda_trace.py +++ b/test/test_cuda_trace.py @@ -79,7 +79,7 @@ class TestCudaTrace(TestCase): if torch.version.hip: user_stream = torch.cuda.Stream() with torch.cuda.stream(user_stream): - tensor = torch.ones(5, device="cuda") + torch.ones(5, device="cuda") else: torch.cuda.Stream() diff --git a/test/test_custom_ops.py b/test/test_custom_ops.py index f5fd1c03fac..8b95b24a370 100644 --- a/test/test_custom_ops.py +++ b/test/test_custom_ops.py @@ -1,4 +1,5 @@ # Owner(s): ["module: custom-operators"] +# ruff: noqa: F841 import collections import itertools diff --git a/test/test_dataloader.py b/test/test_dataloader.py index 4c8004a157d..46ee9ef6fa8 100644 --- a/test/test_dataloader.py +++ b/test/test_dataloader.py @@ -1,4 +1,5 @@ # Owner(s): ["module: dataloader"] +# ruff: noqa: F841 import ctypes import errno diff --git a/test/test_datapipe.py b/test/test_datapipe.py index 30ae3e62040..514c4bef30c 100644 --- a/test/test_datapipe.py +++ b/test/test_datapipe.py @@ -1,4 +1,5 @@ # mypy: ignore-errors +# ruff: noqa: F841 # Owner(s): ["module: dataloader"] diff --git a/test/test_dispatch.py b/test/test_dispatch.py index 9a02e6712fa..f6e686a21dd 100644 --- a/test/test_dispatch.py +++ b/test/test_dispatch.py @@ -1,5 +1,5 @@ # Owner(s): ["module: dispatch"] - +# ruff: noqa: F841 import itertools import os import re diff --git a/test/test_dlpack.py b/test/test_dlpack.py index fe1107ac850..2ee4e64b9f3 100644 --- a/test/test_dlpack.py +++ b/test/test_dlpack.py @@ -224,7 +224,7 @@ class TestTorchDlPack(TestCase): x = torch.zeros(1, device=device) torch.cuda._sleep(2**20) self.assertTrue(torch.cuda.default_stream().query()) - d = x.__dlpack__(1) + x.__dlpack__(1) # check that the default stream has work (a pending cudaStreamWaitEvent) self.assertFalse(torch.cuda.default_stream().query()) diff --git a/test/test_dynamic_shapes.py b/test/test_dynamic_shapes.py index e2309456496..09c705ffa37 100644 --- a/test/test_dynamic_shapes.py +++ b/test/test_dynamic_shapes.py @@ -1,5 +1,5 @@ # Owner(s): ["oncall: jit"] - +# ruff: noqa: F841 import contextlib import copy import itertools diff --git a/test/test_fake_tensor.py b/test/test_fake_tensor.py index a894f16b757..29a897d32b3 100644 --- a/test/test_fake_tensor.py +++ b/test/test_fake_tensor.py @@ -1,4 +1,5 @@ # Owner(s): ["module: meta tensors"] +# ruff: noqa: F841 import contextlib diff --git a/test/test_file_check.py b/test/test_file_check.py index 6aea0653678..5b2101b81ac 100644 --- a/test/test_file_check.py +++ b/test/test_file_check.py @@ -6,7 +6,7 @@ from torch.testing._internal.common_utils import run_tests, TestCase class TestFileCheck(TestCase): def test_not_run(self): - stdout, stderr = self.run_process_no_exception( + stdout, _ = self.run_process_no_exception( """\ from torch.testing import FileCheck file_check = FileCheck().check("not run") diff --git a/test/test_flop_counter.py b/test/test_flop_counter.py index 6b0441e7955..ed44a17e5a0 100644 --- a/test/test_flop_counter.py +++ b/test/test_flop_counter.py @@ -1,5 +1,5 @@ # Owner(s): ["module: unknown"] - +# ruff: noqa: F841 import functools import unittest diff --git a/test/test_foreach.py b/test/test_foreach.py index be0aadfb82d..c0c81e09e00 100644 --- a/test/test_foreach.py +++ b/test/test_foreach.py @@ -1,5 +1,5 @@ # Owner(s): ["module: mta"] - +# ruff: noqa: F841 import itertools import os import random diff --git a/test/test_function_schema.py b/test/test_function_schema.py index 439a3c66d3f..d98b7054a6e 100644 --- a/test/test_function_schema.py +++ b/test/test_function_schema.py @@ -303,7 +303,7 @@ class TestFunctionSchema(TestCase): with self.assertRaisesRegex( RuntimeError, r"schemas with vararg \(...\) can't have default value args" ): - schema = parse_schema("any.foo(int arg1, int arg2=0, ...)") + parse_schema("any.foo(int arg1, int arg2=0, ...)") def test_tensor_list_alias_annotation_properly_parsed(self): schema_str = "foo(Tensor self, *, Tensor(a!)[] out) -> ()" diff --git a/test/test_functional_optim.py b/test/test_functional_optim.py index 92ce0d52cc1..29b240801b9 100644 --- a/test/test_functional_optim.py +++ b/test/test_functional_optim.py @@ -92,7 +92,6 @@ class TestFunctionalOptimParity(TestCase): module_optim = MyModule() module_functional = MyModule() optim_params = module_optim.parameters() - functional_params = module_functional.parameters() optim = optim_cls(optim_params, *args, **kwargs) functional_optim_cls = functional_optim_map.get(optim_cls, None) if not functional_optim_cls: diff --git a/test/test_functionalization.py b/test/test_functionalization.py index 35b41d008b2..ed74465369e 100644 --- a/test/test_functionalization.py +++ b/test/test_functionalization.py @@ -1,4 +1,5 @@ # Owner(s): ["module: codegen"] +# ruff: noqa: F841 import unittest from contextlib import nullcontext diff --git a/test/test_functionalization_of_rng_ops.py b/test/test_functionalization_of_rng_ops.py index 64985952b70..3cc9f272202 100644 --- a/test/test_functionalization_of_rng_ops.py +++ b/test/test_functionalization_of_rng_ops.py @@ -296,7 +296,7 @@ class TestFunctionalizationRngOps(TestCase): x = torch.ones(2, 2, device="cuda", requires_grad=True) y = torch.rand(2, 2, device="cuda", requires_grad=True) torch.cuda.manual_seed(123) - ref = fn(x, y) + fn(x, y) # With checkpointing we should recompute dropout in bwd, and philox_rand is passed from fwd fwd_compiler = functools.partial(count_philox_rand, freq=1) diff --git a/test/test_fx.py b/test/test_fx.py index 11fdea4cf3b..ae19d60c3b3 100644 --- a/test/test_fx.py +++ b/test/test_fx.py @@ -1,4 +1,5 @@ # Owner(s): ["module: fx"] +# ruff: noqa: F841 import builtins import contextlib diff --git a/test/test_fx_experimental.py b/test/test_fx_experimental.py index fac9365e60a..57cbd8f8be0 100644 --- a/test/test_fx_experimental.py +++ b/test/test_fx_experimental.py @@ -1,4 +1,5 @@ # Owner(s): ["module: fx"] +# ruff: noqa: F841 import functools import math diff --git a/test/test_fx_passes.py b/test/test_fx_passes.py index ac78aef325b..6d85b5faa53 100644 --- a/test/test_fx_passes.py +++ b/test/test_fx_passes.py @@ -1,4 +1,5 @@ # Owner(s): ["module: fx.passes"] +# ruff: noqa: F841 from dataclasses import dataclass import operator diff --git a/test/test_fx_reinplace_pass.py b/test/test_fx_reinplace_pass.py index f1d3ee0d714..6d7258deb48 100644 --- a/test/test_fx_reinplace_pass.py +++ b/test/test_fx_reinplace_pass.py @@ -10,7 +10,7 @@ from torch.fx.experimental.sym_node import SymNode try: from functorch.experimental import functionalize HAS_FUNCTIONALIZATION = True -except Exception as e: +except Exception: HAS_FUNCTIONALIZATION = False class TestReinplacePass(TestCase): @@ -44,7 +44,8 @@ def forward(self, x_1): a = x.clone() a_view = a.view(-1) # We shouldn't re-inplace the first add(), because an alias of a is re-used later in the program - b = a.add(1) + b = a.add(1) # noqa: F841 + # Second add() is fine to re-inplace c = a_view.add(1) return c @@ -287,8 +288,8 @@ def forward(self, a__1): inpt = torch.ones(4, 4) f2 = reinplace(make_fx(f)(inpt), inpt) - expected_out = f(inpt) - actual_out = f2(inpt) + expected_out = f(inpt) # noqa: F841 + actual_out = f2(inpt) # noqa: F841 # self.assertEqual(actual_out, expected_out) self.assertExpectedInline(f2.code, """\ diff --git a/test/test_indexing.py b/test/test_indexing.py index 5b9bafd5b29..68cd5361582 100644 --- a/test/test_indexing.py +++ b/test/test_indexing.py @@ -991,7 +991,6 @@ class TestIndexing(TestCase): num_indices = 401988 max_index_range = 2000 - results = [] target_index_range = [16, 256, 2000] for generated_index_range in target_index_range: # create CPU tensors diff --git a/test/test_jit.py b/test/test_jit.py index fd76a922536..eff28e36bdf 100644 --- a/test/test_jit.py +++ b/test/test_jit.py @@ -1,4 +1,5 @@ # Owner(s): ["oncall: jit"] +# ruff: noqa: F841 import torch diff --git a/test/test_jit_autocast.py b/test/test_jit_autocast.py index 4f9116d07fe..bb03faadb62 100644 --- a/test/test_jit_autocast.py +++ b/test/test_jit_autocast.py @@ -576,7 +576,7 @@ class TestAutocast(JitTestCase): cuda_o = torch.mm(cuda0, cuda1) return cpu_o, cuda_o - jit_t = torch.jit.script(t) + torch.jit.script(t) cpu0 = torch.randn(5, 5, device="cpu", dtype=torch.float32) cpu1 = torch.randn(5, 5, device="cpu", dtype=torch.float32) cuda0 = torch.randn(5, 5, device="cuda", dtype=torch.float32) @@ -591,7 +591,7 @@ class TestAutocast(JitTestCase): cuda_o = torch.mm(cuda0, cuda1) return cpu_o, cuda_o - jit_t = torch.jit.script(t) + torch.jit.script(t) cpu0 = torch.randn(5, 5, device="cpu", dtype=torch.float32) cpu1 = torch.randn(5, 5, device="cpu", dtype=torch.float32) cuda0 = torch.randn(5, 5, device="cuda", dtype=torch.float32) @@ -621,7 +621,7 @@ class TestAutocast(JitTestCase): t1 = torch.randn(5, 5, device="cuda", dtype=torch.float32).requires_grad_() # run optimization - for i in range(5): + for _ in range(5): with torch.autocast("cuda", torch.float16): jit_o = jit_t(t0, t1) jit_o.sum().backward() diff --git a/test/test_jit_fuser.py b/test/test_jit_fuser.py index 98cef4031c5..1ac7803a9d4 100644 --- a/test/test_jit_fuser.py +++ b/test/test_jit_fuser.py @@ -32,7 +32,7 @@ def strip_profiling_nodes(nodes): def warmup_forward(f, *args): profiling_count = 2 - for i in range(profiling_count): + for _ in range(profiling_count): results = f(*args) return results @@ -94,7 +94,7 @@ class TestFuser(JitTestCase): sin = torch.zeros(0, device="cuda") cos = torch.zeros(0, device="cuda") inputs = [sin, cos] - ge = self.checkScript(decode, inputs) + self.checkScript(decode, inputs) @unittest.skipIf(not RUN_CUDA, "fuser requires CUDA") def test_arg_configurations_smoke_cuda(self): @@ -587,7 +587,7 @@ class TestFuser(JitTestCase): return p * (x * x + x) scripted = torch.jit.script(fn_test_scalar_arg_requires_grad) - out = scripted(x, p) + scripted(x, p) self.assertAllFused(scripted.graph_for(x, p), except_for=("aten::size", "prim::BroadcastSizes", "aten::_size_if_not_equal")) diff --git a/test/test_jit_fuser_te.py b/test/test_jit_fuser_te.py index bcd6cca6d57..062d024528a 100644 --- a/test/test_jit_fuser_te.py +++ b/test/test_jit_fuser_te.py @@ -1,4 +1,5 @@ # Owner(s): ["NNC"] +# ruff: noqa: F841 import contextlib import math diff --git a/test/test_jit_llga_fuser.py b/test/test_jit_llga_fuser.py index 31de7062bed..40e658d4af4 100644 --- a/test/test_jit_llga_fuser.py +++ b/test/test_jit_llga_fuser.py @@ -44,7 +44,7 @@ LLGA_FUSION_GROUP = 'prim::oneDNNFusionGroup' LLGA_NOT_ENABLED = not torch.backends.mkldnn.is_available() or IS_WINDOWS or IS_MACOS def warmup_forward(f, *args, profiling_count=3): - for i in range(profiling_count): + for _ in range(profiling_count): results = f(*args) return results @@ -507,7 +507,7 @@ class TestFusionPattern(JitLlgaTestCase): x = torch.clamp(x, max=2) return x - for inplace in [False, True]: + for inplace in [False, True]: # noqa: F841 for memory_format in [torch.contiguous_format, torch.channels_last]: x = torch.rand(1, 32, 28, 28).to(memory_format=memory_format) m = M() @@ -722,7 +722,7 @@ class TestFusionPattern(JitLlgaTestCase): # The output of the second partition is input to adaptive_avg_pool2d, which is # unsupported by LLGA, so it must be handled by PyTorch, which should receive # correct strides info of the channels-last tensor. - graph, _ = self.checkTrace(m, [x, y], dtype) + self.checkTrace(m, [x, y], dtype) @unittest.skipIf(LLGA_NOT_ENABLED, "MKL-DNN build is disabled") class TestEnableDisableLlgaFuser(JitTestCase): diff --git a/test/test_jiterator.py b/test/test_jiterator.py index 9909f2bd7b5..813552f33a9 100644 --- a/test/test_jiterator.py +++ b/test/test_jiterator.py @@ -165,7 +165,7 @@ class TestPythonJiterator(TestCase): ]) def test_invalid_function_name(self, code_string): with self.assertRaises(Exception): - jitted_fn = create_jit_fn(code_string) + create_jit_fn(code_string) instantiate_device_type_tests(TestPythonJiterator, globals(), only_for="cuda") diff --git a/test/test_legacy_vmap.py b/test/test_legacy_vmap.py index 3165269105b..882838b6391 100644 --- a/test/test_legacy_vmap.py +++ b/test/test_legacy_vmap.py @@ -1,4 +1,5 @@ # Owner(s): ["module: vmap"] +# ruff: noqa: F841 import functools import itertools diff --git a/test/test_linalg.py b/test/test_linalg.py index c40ea267d2b..ce0c1972d5f 100644 --- a/test/test_linalg.py +++ b/test/test_linalg.py @@ -1,4 +1,5 @@ # Owner(s): ["module: linear algebra"] +# ruff: noqa: F841 import torch import numpy as np diff --git a/test/test_maskedtensor.py b/test/test_maskedtensor.py index bb23e623221..db1ffbc38c1 100644 --- a/test/test_maskedtensor.py +++ b/test/test_maskedtensor.py @@ -152,7 +152,7 @@ class TestBasics(TestCase): mask = _create_random_mask((3, 4), device=device) msg = "It is not recommended to create a MaskedTensor with a tensor that requires_grad." with self.assertWarnsRegex(UserWarning, msg): - mt = masked_tensor(data, mask) + masked_tensor(data, mask) def test_add(self, device): data = torch.arange(5.0, device=device) diff --git a/test/test_meta.py b/test/test_meta.py index 574a9b551db..61fabc513f5 100644 --- a/test/test_meta.py +++ b/test/test_meta.py @@ -1,4 +1,5 @@ # Owner(s): ["module: decompositions"] +# ruff: noqa: F841 import itertools import torch diff --git a/test/test_metal.py b/test/test_metal.py index 050816bff5d..21b55f3824f 100644 --- a/test/test_metal.py +++ b/test/test_metal.py @@ -20,7 +20,7 @@ class TestMetalRewritePass(TestCase): scripted_model = torch.jit.script(module_instance) scripted_model.eval() input_data = torch.normal(1, 20, size=data_shape) - ref_result = scripted_model(input_data) + scripted_model(input_data) torch._C._jit_pass_metal_insert_prepacked_ops(scripted_model._c) if fuse_clamping_ops or prepack_removal: scripted_model._c = torch._C._freeze_module(scripted_model._c) @@ -55,7 +55,6 @@ class TestMetalRewritePass(TestCase): dilation = 1 input_channels = input_channels_per_group * groups output_channels = output_channels_per_group * groups - kernels = (kernel_h, kernel_w) strides = (stride_h, stride_w) paddings = (pad_h, pad_w) dilations = (dilation, dilation) diff --git a/test/test_mkldnn.py b/test/test_mkldnn.py index 5f192d7c349..3b53e47ada1 100644 --- a/test/test_mkldnn.py +++ b/test/test_mkldnn.py @@ -153,18 +153,18 @@ class TestMkldnn(TestCase): # unsupported types and unsupported types with gpu for dtype in [torch.double, torch.uint8, torch.int8, torch.short, torch.int, torch.long]: - with self.assertRaises(RuntimeError) as context: + with self.assertRaises(RuntimeError): torch.randn(1, 2, 3, 4, dtype=dtype, device=torch.device('cpu')).to_mkldnn() if torch.cuda.is_available(): - with self.assertRaises(RuntimeError) as context: + with self.assertRaises(RuntimeError): torch.randn(1, 2, 3, 4, dtype=dtype, device=torch.device('cuda')).to_mkldnn() # supported type with gpu if torch.cuda.is_available(): - with self.assertRaises(RuntimeError) as context: + with self.assertRaises(RuntimeError): torch.randn(1, 2, 3, 4, dtype=torch.float, device=torch.device('cuda')).to_mkldnn() # some factory functions for creator in [torch.ones, torch.randn, torch.rand]: - with self.assertRaises(RuntimeError) as context: + with self.assertRaises(RuntimeError): creator(1, 2, 3, 4, dtype=torch.float, device=torch.device('cpu'), layout=torch._mkldnn) def test_mkldnn_conv_shapecheck(self): @@ -1016,7 +1016,7 @@ class TestMkldnn(TestCase): # TODO: support training for train in [False]: bn = bn_module[dim](channels).float().train(train) - mkldnn_bn = mkldnn_utils.to_mkldnn(copy.deepcopy(bn)) + mkldnn_bn = mkldnn_utils.to_mkldnn(copy.deepcopy(bn)) # noqa: F841 if torch.ops.mkldnn._is_mkldnn_bf16_supported(): y = bn(input.to_mkldnn().to_dense()) y_bf16 = bn(input.to_mkldnn().to_dense(torch.float)) diff --git a/test/test_mobile_optimizer.py b/test/test_mobile_optimizer.py index 2d97a41cb23..1f4a86eecd4 100644 --- a/test/test_mobile_optimizer.py +++ b/test/test_mobile_optimizer.py @@ -37,7 +37,6 @@ class TestOptimizer(TestCase): dilation = 1 input_channels = input_channels_per_group * groups output_channels = output_channels_per_group * groups - kernels = (kernel_h, kernel_w) strides = (stride_h, stride_w) paddings = (pad_h, pad_w) dilations = (dilation, dilation) @@ -303,7 +302,7 @@ class TestOptimizer(TestCase): torch.ao.quantization.convert(model, inplace=True) model = torch.jit.script(model) # this line should not have ASAN failures - model_optim = optimize_for_mobile(model) + optimize_for_mobile(model) def test_generate_mobile_module_lints(self): class MyTestModule(torch.nn.Module): diff --git a/test/test_module_tracker.py b/test/test_module_tracker.py index fc153b75aa5..50a5e3ff1a6 100644 --- a/test/test_module_tracker.py +++ b/test/test_module_tracker.py @@ -95,12 +95,12 @@ class TestModuleTracker(TestCase): inp = torch.rand(1, 2, requires_grad=True) # Should not fail - with ModuleTracker() as tracker: + with ModuleTracker(): res = mod(inp) res.sum().backward() # Should not fail - with ModuleTracker() as tracker: + with ModuleTracker(): res = checkpoint(lambda inp: mod(inp), inp) res.sum().backward() diff --git a/test/test_modules.py b/test/test_modules.py index 167a87325d0..b2655584a36 100644 --- a/test/test_modules.py +++ b/test/test_modules.py @@ -915,7 +915,6 @@ class TestModule(TestCase): # parameters will be wrapped in an nn.Parameter before swapping # which will cause the ._cdata to change g_no_swap = device_ == prev_device and dtype_ == prev_dtype - prev_prev_device, prev_prev_dtype = prev_device, prev_dtype prev_device, prev_dtype = device_, dtype_ p_ids_before = [id(p) for p in m.parameters()] diff --git a/test/test_monitor.py b/test/test_monitor.py index ff092478d6e..cf9cecc356f 100644 --- a/test/test_monitor.py +++ b/test/test_monitor.py @@ -104,7 +104,7 @@ class TestMonitor(TestCase): wait_counter = _WaitCounter( "test_wait_counter", ) - with wait_counter.guard() as wcg: + with wait_counter.guard(): pass diff --git a/test/test_mps.py b/test/test_mps.py index 6679344df0e..3a7ae3d1f28 100644 --- a/test/test_mps.py +++ b/test/test_mps.py @@ -1,5 +1,5 @@ # Owner(s): ["module: mps"] - +# ruff: noqa: F841 import io import sys import math diff --git a/test/test_multiprocessing.py b/test/test_multiprocessing.py index 3acccd5ae62..7fd650ddc9c 100644 --- a/test/test_multiprocessing.py +++ b/test/test_multiprocessing.py @@ -1,5 +1,5 @@ # Owner(s): ["module: multiprocessing"] - +# ruff: noqa: F841 import contextlib import copy import gc diff --git a/test/test_multiprocessing_spawn.py b/test/test_multiprocessing_spawn.py index a25f23012ab..dd391661091 100644 --- a/test/test_multiprocessing_spawn.py +++ b/test/test_multiprocessing_spawn.py @@ -185,7 +185,7 @@ class _TestMultiProcessing: context = mp.get_context(self.start_method) pids_queue = context.Queue() nested_child_sleep = 20.0 - mp_context = mp.start_processes( + mp_context = mp.start_processes( # noqa: F841 fn=_test_nested, args=(pids_queue, nested_child_sleep, self.start_method), nprocs=1, diff --git a/test/test_namedtensor.py b/test/test_namedtensor.py index 8387babc798..0076da2da48 100644 --- a/test/test_namedtensor.py +++ b/test/test_namedtensor.py @@ -1,5 +1,5 @@ # Owner(s): ["module: named tensor"] - +# ruff: noqa: F841 import unittest from torch.testing._internal.common_utils import TestCase, run_tests, TEST_NUMPY from torch.testing._internal.common_utils import skipIfTorchDynamo diff --git a/test/test_nestedtensor.py b/test/test_nestedtensor.py index 25e1bae1774..e85c087eff6 100644 --- a/test/test_nestedtensor.py +++ b/test/test_nestedtensor.py @@ -1,5 +1,5 @@ # Owner(s): ["module: nestedtensor"] - +# ruff: noqa: F841 import ast import io import itertools diff --git a/test/test_nn.py b/test/test_nn.py index 24b7969ef67..b97744ccb70 100644 --- a/test/test_nn.py +++ b/test/test_nn.py @@ -1,4 +1,5 @@ # Owner(s): ["module: nn"] +# ruff: noqa: F841 import contextlib import math diff --git a/test/test_numba_integration.py b/test/test_numba_integration.py index 8bed7220fe8..dc63d4910f5 100644 --- a/test/test_numba_integration.py +++ b/test/test_numba_integration.py @@ -186,7 +186,7 @@ class TestNumbaIntegration(common.TestCase): with self.assertRaises(TypeError): numba.cuda.as_cuda_array(sparset) - sparse_cuda_t = sparset.cuda() + sparset.cuda() self.assertFalse(numba.cuda.is_cuda_array(sparset)) with self.assertRaises(TypeError): diff --git a/test/test_numpy_interop.py b/test/test_numpy_interop.py index 4b96d2be1f4..bff7681bbc8 100644 --- a/test/test_numpy_interop.py +++ b/test/test_numpy_interop.py @@ -34,7 +34,7 @@ class TestNumPyInterop(TestCase): @onlyCPU def test_numpy_unresizable(self, device) -> None: x = np.zeros((2, 2)) - y = torch.from_numpy(x) + y = torch.from_numpy(x) # noqa: F841 with self.assertRaises(ValueError): x.resize((5, 5)) diff --git a/test/test_openmp.py b/test/test_openmp.py index 95a2bd0fdc5..f8ee9c1a2b2 100644 --- a/test/test_openmp.py +++ b/test/test_openmp.py @@ -38,8 +38,8 @@ class TestOpenMP_ParallelFor(TestCase): p = psutil.Process() # warm up for 5 runs, then things should be stable for the last 5 last_rss = collections.deque(maxlen=5) - for n in range(10): - for i in range(runs): + for _ in range(10): + for _ in range(runs): self.model(self.x) last_rss.append(p.memory_info().rss) return last_rss diff --git a/test/test_ops.py b/test/test_ops.py index 7b182ce47e3..4d59d9b18f5 100644 --- a/test/test_ops.py +++ b/test/test_ops.py @@ -1,5 +1,4 @@ # Owner(s): ["module: unknown"] - import contextlib import copy import inspect @@ -1944,7 +1943,7 @@ class TestCompositeCompliance(TestCase): output_grads_copy.append(output_grad.detach().clone()) output_grads.append(torch._lazy_clone(output_grad)) - input_grads = torch.autograd.grad( + torch.autograd.grad( results, leaf_tensors, output_grads, diff --git a/test/test_ops_jit.py b/test/test_ops_jit.py index ecc4518e0cd..a9a7ec5339b 100644 --- a/test/test_ops_jit.py +++ b/test/test_ops_jit.py @@ -335,7 +335,7 @@ class TestJit(JitCommonTestCase): try: inp = clone_input_helper(sample.input) scripted(inp) - except Exception as e: + except Exception: continue self.fail( "Inplace operation on integer tensor that should be promoted to float didn't fail!" diff --git a/test/test_optim.py b/test/test_optim.py index 8b6cc66681a..5936fc5f527 100644 --- a/test/test_optim.py +++ b/test/test_optim.py @@ -1275,7 +1275,6 @@ class TestOptimRenewed(TestCase): torch.randn(2, 3, requires_grad=False, device=device, dtype=dtype) for _ in range(2) ] - old_params = [p.detach().clone() for p in params] def closure(): return torch.tensor([1], device=device, dtype=dtype) @@ -1632,7 +1631,6 @@ class TestOptimRenewed(TestCase): return closure_loss if optim_info.step_requires_closure else None for optim_input in all_optim_inputs: - kwargs = optim_input.kwargs optimizer = optim_cls(params, **optim_input.kwargs) for _ in range(3): optimizer.step(closure) diff --git a/test/test_overrides.py b/test/test_overrides.py index 560b8033266..51ac94a068e 100644 --- a/test/test_overrides.py +++ b/test/test_overrides.py @@ -321,7 +321,6 @@ def implements_tensor_like(torch_function): return decorator def generate_tensor_like_torch_implementations(): - torch_vars = vars(torch) untested_funcs = [] testing_overrides = get_testing_overrides() # test/test_cpp_api_parity.py monkeypatches torch.nn to have a new @@ -1542,8 +1541,6 @@ class TestTorchFunctionMode(TestCase): self.assertFalse(called) def test_disable_enable_subclass(self): - called = False - class A(torch.Tensor): pass @@ -1645,7 +1642,6 @@ class TestTorchFunctionMode(TestCase): base_mode = BaseTorchFunctionMode() with base_mode: torch.set_default_device("cpu") - x = torch.ones(2, 2) stack = get_stack() self.assertIsInstance(stack[0], DeviceContext) self.assertEqual(stack[0].device, torch.device("cpu")) diff --git a/test/test_prims.py b/test/test_prims.py index c9041d54992..f0fb606d1c5 100644 --- a/test/test_prims.py +++ b/test/test_prims.py @@ -340,7 +340,7 @@ $1: f32[2] = torch._ops.prims.sin.default($0)""") def test_clone_complex(self): with torch._dispatch.python.enable_python_dispatcher(): x = torch.randn(4, dtype=torch.complex64, device='meta').conj() - out = x + 1 + x + 1 def test_check_deprecation_warning(self): with self.assertWarnsRegex(FutureWarning, 'will be removed in the future'): @@ -408,7 +408,7 @@ class TestRefs(TestCase): # enables prim decomps with torch._dispatch.python.enable_python_dispatcher(): x = torch.ones(4) - y = x.to(device="meta") + x.to(device="meta") def test_inferred_tags(self): self.assertEqual(torch.ops.prims.normal.default.tags, (torch.Tag.nondeterministic_seeded, torch.Tag.pt2_compliant_tag)) diff --git a/test/test_proxy_tensor.py b/test/test_proxy_tensor.py index 3053b49723f..74a13790a9a 100644 --- a/test/test_proxy_tensor.py +++ b/test/test_proxy_tensor.py @@ -1,4 +1,5 @@ # Owner(s): ["module: ProxyTensor"] +# ruff: noqa: F841 from torch.testing._internal.common_utils import TestCase, run_tests import torch diff --git a/test/test_python_dispatch.py b/test/test_python_dispatch.py index 1c932485fc1..d54d5dcf1cc 100644 --- a/test/test_python_dispatch.py +++ b/test/test_python_dispatch.py @@ -1,4 +1,5 @@ # Owner(s): ["module: __torch_dispatch__"] +# ruff: noqa: F841 import logging import sys diff --git a/test/test_pytree.py b/test/test_pytree.py index 7e2edac6e50..fb8739e4594 100644 --- a/test/test_pytree.py +++ b/test/test_pytree.py @@ -1024,7 +1024,7 @@ TreeSpec(tuple, None, [*, with self.assertRaisesRegex( NotImplementedError, "No registered serialization name" ): - roundtrip_spec = py_pytree.treespec_dumps(spec) + py_pytree.treespec_dumps(spec) def test_pytree_custom_type_serialize(self): class DummyType: @@ -1105,7 +1105,7 @@ TreeSpec(tuple, None, [*, py_pytree.treespec_dumps(spec, -1) serialized_spec = py_pytree.treespec_dumps(spec) - protocol, data = json.loads(serialized_spec) + _, data = json.loads(serialized_spec) bad_protocol_serialized_spec = json.dumps((-1, data)) with self.assertRaisesRegex(ValueError, "Unknown protocol"): @@ -1190,7 +1190,7 @@ TreeSpec(tuple, None, [*, def test_tree_flatten_with_path_is_leaf(self): leaf_dict = {"foo": [(3)]} pytree = (["hello", [1, 2], leaf_dict],) - key_leaves, spec = py_pytree.tree_flatten_with_path( + key_leaves, _ = py_pytree.tree_flatten_with_path( pytree, is_leaf=lambda x: isinstance(x, dict) ) self.assertTrue(key_leaves[-1][1] is leaf_dict) diff --git a/test/test_reductions.py b/test/test_reductions.py index 57486414010..8ce65c78985 100644 --- a/test/test_reductions.py +++ b/test/test_reductions.py @@ -61,7 +61,7 @@ def _generate_input(shape, dtype, device, with_extremal): # TODO: replace with make_tensor def _rand_shape(dim, min_size, max_size): shape = [] - for i in range(dim): + for _ in range(dim): shape.append(random.randint(min_size, max_size)) return tuple(shape) @@ -3643,7 +3643,7 @@ as the input tensor excluding its innermost dimension'): out_dtype = torch.bool # output of all/any is bool irrespective of input dtype xb = x.to(dtype) - yb = x.to(dtype) + # any self.assertEqual((2, 0), xb.any(2).shape) self.assertEqual((2, 0, 1), xb.any(2, keepdim=True).shape) diff --git a/test/test_schema_check.py b/test/test_schema_check.py index af36822e8e2..9e1d6a6f125 100644 --- a/test/test_schema_check.py +++ b/test/test_schema_check.py @@ -1,4 +1,5 @@ # Owner(s): ["oncall: jit"] +# ruff: noqa: F841 import os import sys diff --git a/test/test_segment_reductions.py b/test/test_segment_reductions.py index 8a1d09509de..9118674c763 100644 --- a/test/test_segment_reductions.py +++ b/test/test_segment_reductions.py @@ -239,7 +239,7 @@ class TestSegmentReductions(TestCase): ) ) def test_multi_d_simple(self, device, dtypes): - val_dtype, length_type = dtypes + val_dtype, _ = dtypes axis = 0 lengths = [1, 2, 3, 0] data = [[1, 1], [float("nan"), 1], [3, float("nan")], [4, 1], [3, 2], [2, 3]] @@ -489,7 +489,7 @@ class TestSegmentReductions(TestCase): ) ) def test_multi_d(self, device, dtypes): - val_dtype, length_type = dtypes + val_dtype, _ = dtypes axis = 0 lengths = [0, 2, 3, 0] data = np.arange(50).reshape(5, 2, 5).tolist() diff --git a/test/test_serialization.py b/test/test_serialization.py index d239dcdde7c..b90d8a89202 100644 --- a/test/test_serialization.py +++ b/test/test_serialization.py @@ -1,4 +1,5 @@ # Owner(s): ["module: serialization"] +# ruff: noqa: F841 import contextlib import copy diff --git a/test/test_shape_ops.py b/test/test_shape_ops.py index ddc5421dd53..f89bb81745f 100644 --- a/test/test_shape_ops.py +++ b/test/test_shape_ops.py @@ -751,7 +751,7 @@ class TestShapeOps(TestCase): return tuple_result, nontuple_result, out with self.assertRaises(RuntimeError): - scripted_foo = torch.jit.script(_foo) + torch.jit.script(_foo) # Verifies that JIT tracing works fine traced_foo = torch.jit.trace(_foo, t) diff --git a/test/test_sort_and_select.py b/test/test_sort_and_select.py index 6d37607ffbf..204cf02f11a 100644 --- a/test/test_sort_and_select.py +++ b/test/test_sort_and_select.py @@ -54,7 +54,6 @@ class TestSortAndSelect(TestCase): f'unknown order "{order}", must be "ascending" or "descending"' ) - are_ordered = True for k in range(1, SIZE): self.assertTrue( check_order(mxx[:, k - 1], mxx[:, k]), @@ -62,7 +61,6 @@ class TestSortAndSelect(TestCase): ) seen = set() - indicesCorrect = True size0 = x.size(0) size = x.size(x.dim() - 1) x = x.tolist() @@ -720,7 +718,8 @@ class TestSortAndSelect(TestCase): dtype=dtype, device=device, ) - expected_y_unique = torch.tensor( + + expected_y_unique = torch.tensor( # noqa: F841 [[0, 1], [1, 2], [3, 4], [0, 1], [3, 4], [1, 2]], dtype=dtype, device=device, diff --git a/test/test_sparse.py b/test/test_sparse.py index 7a80d578314..a1da3872fd8 100644 --- a/test/test_sparse.py +++ b/test/test_sparse.py @@ -1,4 +1,5 @@ # Owner(s): ["module: sparse"] +# ruff: noqa: F841 import torch import itertools diff --git a/test/test_sparse_csr.py b/test/test_sparse_csr.py index a63620dcdbe..d174587b887 100644 --- a/test/test_sparse_csr.py +++ b/test/test_sparse_csr.py @@ -1,4 +1,5 @@ # Owner(s): ["module: sparse"] +# ruff: noqa: F841 import torch import random diff --git a/test/test_sparse_semi_structured.py b/test/test_sparse_semi_structured.py index e871fc750d5..532a55f0bfd 100644 --- a/test/test_sparse_semi_structured.py +++ b/test/test_sparse_semi_structured.py @@ -1,4 +1,5 @@ # Owner(s): ["module: sparse"] +# ruff: noqa: F841 import itertools import random import unittest diff --git a/test/test_spectral_ops.py b/test/test_spectral_ops.py index e5b2c32e38a..580a7a3bfde 100644 --- a/test/test_spectral_ops.py +++ b/test/test_spectral_ops.py @@ -1,4 +1,5 @@ # Owner(s): ["module: fft"] +# ruff: noqa: F841 import torch import unittest diff --git a/test/test_stateless.py b/test/test_stateless.py index a62e88d2caf..983872992e4 100644 --- a/test/test_stateless.py +++ b/test/test_stateless.py @@ -182,13 +182,13 @@ class TestStatelessFunctionalAPI(TestCase): rm = torch.zeros(10) parameters = {'running_mean': rm} prev_rm = module.running_mean.clone() - res = functional_call(module, parameters, x) + functional_call(module, parameters, x) cur_rm = module.running_mean self.assertEqual(cur_rm, prev_rm) self.assertEqual(rm, torch.full((10,), 12.8)) # Now run functional without reparametrization and check that the module has # been updated - res = functional_call(module, {}, x) + functional_call(module, {}, x) self.assertEqual(module.running_mean, torch.full((10,), 12.8)) @parametrize("functional_call", [ @@ -272,8 +272,6 @@ class TestStatelessFunctionalAPI(TestCase): def test_reparametrize_some_weights(self, functional_call): module = MockModule() weight = torch.tensor([[2.0]]) - bias = torch.tensor([5.0]) - buffer = torch.tensor([3.0]) extra = torch.tensor([1.0]) parameters = {'l1.weight': weight} diff --git a/test/test_static_runtime.py b/test/test_static_runtime.py index 5665687446b..a5cf00e9522 100644 --- a/test/test_static_runtime.py +++ b/test/test_static_runtime.py @@ -1,4 +1,5 @@ # Owner(s): ["module: unknown"] +# ruff: noqa: F841 import unittest from typing import Dict, Optional diff --git a/test/test_subclass.py b/test/test_subclass.py index d3bb54ea288..36d870512cc 100644 --- a/test/test_subclass.py +++ b/test/test_subclass.py @@ -222,7 +222,7 @@ class TestSubclass(TestCase): m = MyLazyModule() self.assertTrue(m.has_uninitialized_params()) - output = m(self._create_tensor(tensor_cls)) + m(self._create_tensor(tensor_cls)) self.assertFalse(m.has_uninitialized_params()) self.assertIsInstance(m.param, tensor_cls) @@ -256,7 +256,7 @@ class TestSubclass(TestCase): return r with self.assertRaisesRegex(RuntimeError, r"requires that detach\(\) returns an instance of the same type"): - param = nn.Parameter(NonRewrappingTensor(torch.randn(3))) + nn.Parameter(NonRewrappingTensor(torch.randn(3))) def test_tensor_subclass_storage_data_accesses_throw(self): from torch.testing._internal.logging_tensor import LoggingTensor @@ -265,7 +265,6 @@ class TestSubclass(TestCase): # Accessing storage on a tensor subclass is valid storage = x_log.untyped_storage() # This includes accessing metadata on the storage - sz = storage.size() # But storage methods that access data will throw with self.assertRaisesRegex(RuntimeError, "on an invalid python storage"): storage.data_ptr() diff --git a/test/test_tensor_creation_ops.py b/test/test_tensor_creation_ops.py index 26d62d000ab..315089cf2cb 100644 --- a/test/test_tensor_creation_ops.py +++ b/test/test_tensor_creation_ops.py @@ -1,4 +1,5 @@ # Owner(s): ["module: tensor creation"] +# ruff: noqa: F841 import torch import numpy as np diff --git a/test/test_tensorboard.py b/test/test_tensorboard.py index 24f2687c7dc..c5a2e9702b2 100644 --- a/test/test_tensorboard.py +++ b/test/test_tensorboard.py @@ -409,11 +409,11 @@ class TestTensorBoardSummary(BaseTestCase): ) def test_list_input(self): - with self.assertRaises(Exception) as e_info: + with self.assertRaises(Exception): summary.histogram("dummy", [1, 3, 4, 5, 6], "tensorflow") def test_empty_input(self): - with self.assertRaises(Exception) as e_info: + with self.assertRaises(Exception): summary.histogram("dummy", np.ndarray(0), "tensorflow") def test_image_with_boxes(self): @@ -766,7 +766,7 @@ class TestTensorBoardPytorchGraph(BaseTestCase): w.add_graph(myMLP(), dummy_input) def test_wrong_input_size(self): - with self.assertRaises(RuntimeError) as e_info: + with self.assertRaises(RuntimeError): dummy_input = torch.rand(1, 9) model = torch.nn.Linear(3, 5) with self.createSummaryWriter() as w: @@ -867,7 +867,7 @@ class TestTensorBoardNumpy(BaseTestCase): def test_pytorch_np_expect_fail(self): with self.assertRaises(NotImplementedError): - res = make_np({"pytorch": 1.0}) + make_np({"pytorch": 1.0}) class TestTensorProtoSummary(BaseTestCase): diff --git a/test/test_tensorexpr.py b/test/test_tensorexpr.py index c6e3c66f8eb..3872fc1a321 100644 --- a/test/test_tensorexpr.py +++ b/test/test_tensorexpr.py @@ -1,4 +1,5 @@ # Owner(s): ["NNC"] +# ruff: noqa: F841 import numpy as np import torch diff --git a/test/test_testing.py b/test/test_testing.py index bdb045ca84a..2ff5cb00a02 100644 --- a/test/test_testing.py +++ b/test/test_testing.py @@ -2314,7 +2314,7 @@ class TestImports(TestCase): ignored_modules.append("torch.testing._internal.common_distributed") torch_dir = os.path.dirname(torch.__file__) - for base, folders, files in os.walk(torch_dir): + for base, _, files in os.walk(torch_dir): prefix = os.path.relpath(base, os.path.dirname(torch_dir)).replace(os.path.sep, ".") for f in files: if not f.endswith(".py"): diff --git a/test/test_type_hints.py b/test/test_type_hints.py index 696b4fbe9c2..0aae54be9b6 100644 --- a/test/test_type_hints.py +++ b/test/test_type_hints.py @@ -39,7 +39,6 @@ def get_all_examples(): "_np", "_InputT", } - allexamples = "" example_file_lines = [ "# mypy: allow-untyped-defs", diff --git a/test/test_type_promotion.py b/test/test_type_promotion.py index a4bbb8394da..1548b882fa0 100644 --- a/test/test_type_promotion.py +++ b/test/test_type_promotion.py @@ -922,7 +922,6 @@ class TestTypePromotion(TestCase): def test_sparse_div_promotion(self, device, dtype): for op in (torch.div, torch.true_divide): dividend = torch.randn(5, device=device).to(dtype) - divisor = 2 dividend_sparse = dividend.to_sparse() casting_result = dividend.to(torch.get_default_dtype()) / 2 self.assertEqual(casting_result, op(dividend_sparse, 2).to_dense()) diff --git a/test/test_typing.py b/test/test_typing.py index 703b5604357..7df3096fcc6 100644 --- a/test/test_typing.py +++ b/test/test_typing.py @@ -187,7 +187,7 @@ class TestTyping(TestCase): name_fn=lambda b: os.path.relpath(b, start=FAIL_DIR), ) def test_fail(self, path): - __tracebackhide__ = True + __tracebackhide__ = True # noqa: F841 with open(path) as fin: lines = fin.readlines() @@ -226,7 +226,7 @@ class TestTyping(TestCase): name_fn=lambda b: os.path.relpath(b, start=REVEAL_DIR), ) def test_reveal(self, path): - __tracebackhide__ = True + __tracebackhide__ = True # noqa: F841 with open(path) as fin: lines = _parse_reveals(fin) diff --git a/test/test_unary_ufuncs.py b/test/test_unary_ufuncs.py index 7ea1155165f..8a6d111364c 100644 --- a/test/test_unary_ufuncs.py +++ b/test/test_unary_ufuncs.py @@ -686,7 +686,7 @@ class TestUnaryUfuncs(TestCase): for dtype in (torch.half, torch.float, torch.double): a = torch.zeros(10, dtype=dtype) with self.assertRaises(TypeError): - b = ~a + ~a @dtypes(torch.complex64, torch.complex128) def test_abs_angle_complex_to_float(self, device, dtype): diff --git a/test/test_view_ops.py b/test/test_view_ops.py index 1d752dfe1e5..5ab6544a93c 100644 --- a/test/test_view_ops.py +++ b/test/test_view_ops.py @@ -74,7 +74,7 @@ def _generate_input(shape, dtype, device, with_extremal): # TODO: replace this with make_tensor() in common_utils.py def _rand_shape(dim, min_size, max_size): shape = [] - for i in range(dim): + for _ in range(dim): shape.append(random.randint(min_size, max_size)) return tuple(shape) @@ -1546,7 +1546,7 @@ class TestOldViewOps(TestCase): def _test_atleast_dim(self, torch_fn, np_fn, device, dtype): for ndims in range(0, 5): shape = _rand_shape(ndims, min_size=5, max_size=10) - for n in range(ndims + 1): + for _ in range(ndims + 1): for with_extremal in [False, True]: for contiguous in [False, True]: # Generate Input. diff --git a/test/test_vulkan.py b/test/test_vulkan.py index a93244bcc66..b8b7cb07f2f 100644 --- a/test/test_vulkan.py +++ b/test/test_vulkan.py @@ -23,7 +23,7 @@ class TestVulkanRewritePass(TestCase): scripted_model = torch.jit.script(module_instance) scripted_model.eval() input_data = torch.normal(1, 20, size=data_shape) - ref_result = scripted_model(input_data) + scripted_model(input_data) torch._C._jit_pass_vulkan_insert_prepacked_ops(scripted_model._c) if fuse_clamping_ops or prepack_removal: scripted_model._c = torch._C._freeze_module(scripted_model._c) @@ -58,7 +58,6 @@ class TestVulkanRewritePass(TestCase): dilation = 1 input_channels = input_channels_per_group * groups output_channels = output_channels_per_group * groups - kernels = (kernel_h, kernel_w) strides = (stride_h, stride_w) paddings = (pad_h, pad_w) dilations = (dilation, dilation) diff --git a/test/test_weak.py b/test/test_weak.py index e8b6ee6f556..30fef4d0e9d 100644 --- a/test/test_weak.py +++ b/test/test_weak.py @@ -36,8 +36,9 @@ class WeakTest(TestCase): def test_make_weak_keyed_dict_from_weak_keyed_dict(self): o = torch.randn(3) dict = WeakIdKeyDictionary({o: 364}) - dict2 = WeakIdKeyDictionary(dict) self.assertEqual(dict[o], 364) + dict2 = WeakIdKeyDictionary(dict) + self.assertEqual(dict2[o], 364) def check_popitem(self, klass, key1, value1, key2, value2): weakdict = klass() diff --git a/test/test_xnnpack_integration.py b/test/test_xnnpack_integration.py index d7ae1117757..b6018b99af4 100644 --- a/test/test_xnnpack_integration.py +++ b/test/test_xnnpack_integration.py @@ -860,7 +860,6 @@ class TestXNNPACKRewritePass(TestCase): dilation = 1 input_channels = input_channels_per_group * groups output_channels = output_channels_per_group * groups - kernels = (kernel_h, kernel_w) strides = (stride_h, stride_w) paddings = (pad_h, pad_w) output_paddings = (output_pad_h, output_pad_w) @@ -941,7 +940,7 @@ class TestXNNPACKRewritePass(TestCase): Conv2D(), pattern_count_map, data_shape ) - transpose_data_shape = (batch_size, input_channels, height, width) + transpose_data_shape = (batch_size, input_channels, height, width) # noqa: F841 transpose_pattern_count_map = { "Tensor = aten::conv_transpose2d": -1, "prepacked::conv2d_transpose_clamp_prepack": 1, diff --git a/test/test_xpu.py b/test/test_xpu.py index 741d99c3755..19cda5abae2 100644 --- a/test/test_xpu.py +++ b/test/test_xpu.py @@ -410,7 +410,7 @@ print(torch.xpu.device_count()) self.assertEqual(copy.get_device(), original.get_device()) def test_out_of_memory(self): - tensor = torch.zeros(1024, device="xpu") + tensor = torch.zeros(1024, device="xpu") # noqa: F841 with self.assertRaisesRegex(RuntimeError, "Tried to allocate 800000000.00 GiB"): torch.empty(1024 * 1024 * 1024 * 800000000, dtype=torch.int8, device="xpu") @@ -456,7 +456,7 @@ print(torch.xpu.device_count()) def test_device_memory_allocated(self): device_count = torch.xpu.device_count() current_alloc = [torch.xpu.memory_allocated(idx) for idx in range(device_count)] - x = torch.ones(10, device="xpu:0") + torch.ones(10, device="xpu:0") self.assertGreater(torch.xpu.memory_allocated(0), current_alloc[0]) self.assertTrue( all( @@ -474,7 +474,7 @@ print(torch.xpu.device_count()) torch.xpu.empty_cache() before_free_bytes, before_total_bytes = torch.xpu.mem_get_info() # increasing to 1MB to force acquiring a new block. - t = torch.randn(1024 * 256, device="xpu") + torch.randn(1024 * 256, device="xpu") torch.xpu.synchronize() after_free_bytes, after_total_bytes = torch.xpu.mem_get_info() diff --git a/test/torch_np/numpy_tests/core/test_dtype.py b/test/torch_np/numpy_tests/core/test_dtype.py index 4cbcef644ba..8fa68f4cdcd 100644 --- a/test/torch_np/numpy_tests/core/test_dtype.py +++ b/test/torch_np/numpy_tests/core/test_dtype.py @@ -327,8 +327,7 @@ class TestMisc(TestCase): @skipif(sys.version_info >= (3, 9), reason="Requires python 3.9") def test_class_getitem_38(self) -> None: - match = "Type subscription requires python >= 3.9" - with pytest.raises(TypeError): # , match=match): + with pytest.raises(TypeError): np.dtype[Any] diff --git a/test/torch_np/numpy_tests/core/test_einsum.py b/test/torch_np/numpy_tests/core/test_einsum.py index 5432fb63d18..96c85e307bd 100644 --- a/test/torch_np/numpy_tests/core/test_einsum.py +++ b/test/torch_np/numpy_tests/core/test_einsum.py @@ -1,4 +1,5 @@ # Owner(s): ["module: dynamo"] +# ruff: noqa: F841 import functools import itertools diff --git a/test/torch_np/numpy_tests/core/test_indexing.py b/test/torch_np/numpy_tests/core/test_indexing.py index 087875fa57e..55d7aa4675d 100644 --- a/test/torch_np/numpy_tests/core/test_indexing.py +++ b/test/torch_np/numpy_tests/core/test_indexing.py @@ -1020,11 +1020,12 @@ class TestMultiIndexingAutomated(TestCase): # np.VisibleDeprecationWarning moved to np.exceptions in numpy>=2.0.0 # np.exceptions only available in numpy>=1.25.0 has_exceptions_ns = hasattr(np, "exceptions") - VisibleDeprecationWarning = ( + VisibleDeprecationWarning = ( # noqa: F841 np.exceptions.VisibleDeprecationWarning if has_exceptions_ns else np.VisibleDeprecationWarning ) + # FIXME(rec): should this use VisibleDeprecationWarning instead? warnings.filterwarnings("error", "", np.VisibleDeprecationWarning) def isskip(idx): diff --git a/test/torch_np/numpy_tests/core/test_multiarray.py b/test/torch_np/numpy_tests/core/test_multiarray.py index 88986d2e215..fcafbcc2901 100644 --- a/test/torch_np/numpy_tests/core/test_multiarray.py +++ b/test/torch_np/numpy_tests/core/test_multiarray.py @@ -1,4 +1,5 @@ # Owner(s): ["module: dynamo"] +# ruff: noqa: F841 import builtins import collections.abc diff --git a/test/torch_np/numpy_tests/core/test_numeric.py b/test/torch_np/numpy_tests/core/test_numeric.py index c344bf6e8f7..12a3a6bc639 100644 --- a/test/torch_np/numpy_tests/core/test_numeric.py +++ b/test/torch_np/numpy_tests/core/test_numeric.py @@ -1,4 +1,5 @@ # Owner(s): ["module: dynamo"] +# ruff: noqa: F841 import functools import itertools diff --git a/test/torch_np/numpy_tests/core/test_scalar_methods.py b/test/torch_np/numpy_tests/core/test_scalar_methods.py index fd9596d7bff..629fbd7ac56 100644 --- a/test/torch_np/numpy_tests/core/test_scalar_methods.py +++ b/test/torch_np/numpy_tests/core/test_scalar_methods.py @@ -228,8 +228,7 @@ class TestClassGetitemMisc(TestCase): @skipif(sys.version_info >= (3, 9), reason="Requires python 3.8") @parametrize("cls", [np.number, np.complexfloating, np.int64]) def test_class_getitem_38(self, cls: Type[np.number]) -> None: - match = "Type subscription requires python >= 3.9" - with pytest.raises(TypeError): # , match=match): + with pytest.raises(TypeError): cls[Any] diff --git a/test/torch_np/numpy_tests/core/test_scalarmath.py b/test/torch_np/numpy_tests/core/test_scalarmath.py index 00d3627155c..05492a49d0e 100644 --- a/test/torch_np/numpy_tests/core/test_scalarmath.py +++ b/test/torch_np/numpy_tests/core/test_scalarmath.py @@ -116,7 +116,7 @@ class TestTypes(TestCase): ) def test_type_create(self): - for k, atype in enumerate(types): + for _, atype in enumerate(types): a = np.array([1, 2, 3], atype) b = atype([1, 2, 3]) assert_equal(a, b) @@ -125,7 +125,7 @@ class TestTypes(TestCase): def test_leak(self): # test leak of scalar objects # a leak would show up in valgrind as still-reachable of ~2.6MB - for i in range(200000): + for _ in range(200000): np.add(1, 1) diff --git a/test/torch_np/numpy_tests/core/test_shape_base.py b/test/torch_np/numpy_tests/core/test_shape_base.py index 74af59ce263..0b2024793e6 100644 --- a/test/torch_np/numpy_tests/core/test_shape_base.py +++ b/test/torch_np/numpy_tests/core/test_shape_base.py @@ -322,7 +322,7 @@ class TestConcatenate(TestCase): a = np.ones((1, 2, 3)) b = np.ones((2, 2, 3)) axis = list(range(3)) - for i in range(3): + for _ in range(3): np.concatenate((a, b), axis=axis[0]) # OK # assert_raises_regex( assert_raises( @@ -427,7 +427,6 @@ class TestConcatenate(TestCase): a = array([1, 2]) b = array([3, 4]) n = [1, 2] - res = array([1, 2, 3, 4]) assert_raises(TypeError, operator.concat, a, b) assert_raises(TypeError, operator.concat, a, n) assert_raises(TypeError, operator.concat, n, a) diff --git a/test/torch_np/numpy_tests/lib/test_function_base.py b/test/torch_np/numpy_tests/lib/test_function_base.py index 2d74321bee7..7eccf406c63 100644 --- a/test/torch_np/numpy_tests/lib/test_function_base.py +++ b/test/torch_np/numpy_tests/lib/test_function_base.py @@ -1,4 +1,5 @@ # Owner(s): ["module: dynamo"] +# ruff: noqa: F841 import functools import math diff --git a/test/torch_np/numpy_tests/lib/test_histograms.py b/test/torch_np/numpy_tests/lib/test_histograms.py index fadc8d96b99..f485a8c938f 100644 --- a/test/torch_np/numpy_tests/lib/test_histograms.py +++ b/test/torch_np/numpy_tests/lib/test_histograms.py @@ -1,4 +1,5 @@ # Owner(s): ["module: dynamo"] +# ruff: noqa: F841 import functools from unittest import expectedFailure as xfail, skipIf diff --git a/test/torch_np/numpy_tests/lib/test_twodim_base.py b/test/torch_np/numpy_tests/lib/test_twodim_base.py index 48bbb38390b..728d89ed305 100644 --- a/test/torch_np/numpy_tests/lib/test_twodim_base.py +++ b/test/torch_np/numpy_tests/lib/test_twodim_base.py @@ -1,4 +1,5 @@ # Owner(s): ["module: dynamo"] +# ruff: noqa: F841 """Test functions for matrix module diff --git a/test/torch_np/numpy_tests/linalg/test_linalg.py b/test/torch_np/numpy_tests/linalg/test_linalg.py index ae3a4bd55f5..f53cf346a61 100644 --- a/test/torch_np/numpy_tests/linalg/test_linalg.py +++ b/test/torch_np/numpy_tests/linalg/test_linalg.py @@ -1,5 +1,5 @@ # Owner(s): ["module: dynamo"] - +# ruff: noqa: F841 """ Test functions for linalg module """ diff --git a/test/torch_np/test_basic.py b/test/torch_np/test_basic.py index 4f7551bb471..772e9c244ad 100644 --- a/test/torch_np/test_basic.py +++ b/test/torch_np/test_basic.py @@ -482,8 +482,8 @@ class TestDivmod(TestCase): assert_equal(rem, x1 % x2) out1, out2 = out - assert quot is out[0] - assert rem is out[1] + assert quot is out1 + assert rem is out2 def test_divmod_out_list(self): x1 = [4, 5, 6] diff --git a/test/torch_np/test_reductions.py b/test/torch_np/test_reductions.py index 6963d2cb5d3..e2e5dc3fa54 100644 --- a/test/torch_np/test_reductions.py +++ b/test/torch_np/test_reductions.py @@ -155,13 +155,13 @@ class TestMean(TestCase): assert_allclose(a3d.mean(axis=2, where=_wh_partial), np.array(_res)) assert_allclose(np.mean(a3d, axis=2, where=_wh_partial), np.array(_res)) - with pytest.warns(RuntimeWarning) as w: + with pytest.warns(RuntimeWarning): assert_allclose( a.mean(axis=1, where=wh_partial), np.array([np.nan, 5.5, 9.5, np.nan]) ) - with pytest.warns(RuntimeWarning) as w: + with pytest.warns(RuntimeWarning): assert_equal(a.mean(where=False), np.nan) - with pytest.warns(RuntimeWarning) as w: + with pytest.warns(RuntimeWarning): assert_equal(np.mean(a, where=False), np.nan) diff --git a/test/xpu/test_gemm.py b/test/xpu/test_gemm.py index 2bc6d09eeea..0a7c338a467 100644 --- a/test/xpu/test_gemm.py +++ b/test/xpu/test_gemm.py @@ -753,11 +753,11 @@ class TestBasicGEMM(TestCase): input_tensor = torch.rand((1, 2, 2), device=device).to(dtype) if dtype != torch.float32: with self.assertRaisesRegex(RuntimeError, "Input dtypes must be the same"): - y = torch.baddbmm(input_tensor, batch1, batch2, beta=0.0) + torch.baddbmm(input_tensor, batch1, batch2, beta=0.0) else: out = torch.randn((1, 2, 2), dtype=dtype, device=device).fill_(torch.nan) y_ref = torch.bmm(batch1, batch2) - y = torch.baddbmm(input_tensor, batch1, batch2, beta=0.0, out=out) + torch.baddbmm(input_tensor, batch1, batch2, beta=0.0, out=out) self.assertEqual(out, y_ref) @dtypes(torch.float) @@ -838,9 +838,6 @@ class TestBasicGEMM(TestCase): a_data = torch.arange(1, o * s + 1, device=device, dtype=dtype).view(o, s) x_data = torch.arange(1, s + 1, 1, device=device, dtype=dtype) y_data = torch.ones(o, device=device, dtype=dtype) - control = torch.tensor( - [15.0, 33.0, 51.0, 69.0, 87.0], device=device, dtype=dtype - ) def _test(row_major, incx, incy, lda_tail): if row_major: @@ -917,7 +914,8 @@ class TestBasicGEMM(TestCase): return result else: out = torch.full_like(result, math.nan) - out1 = call_torch_fn(*args, **kwargs, out=out) + out1 = call_torch_fn(*args, **kwargs, out=out) # noqa: F841 + # FIXME(rec): should this return out1? return out # mm, addmm