From 9178deedff111bc404ae2ca3c376514efe37a68d Mon Sep 17 00:00:00 2001 From: Jirka Borovec <6035284+Borda@users.noreply.github.com> Date: Fri, 1 Sep 2023 00:22:55 +0000 Subject: [PATCH] removing some redundant str splits (#106089) drop some redundant string splits, no factual changes, just cleaning the codebase Pull Request resolved: https://github.com/pytorch/pytorch/pull/106089 Approved by: https://github.com/albanD, https://github.com/malfet --- test/distributed/launcher/bin/test_script_local_rank.py | 2 +- test/onnx/model_defs/squeezenet.py | 2 +- test/test_nestedtensor.py | 8 +++----- test/test_weak.py | 2 +- test/torch_np/numpy_tests/core/test_indexing.py | 2 +- test/torch_np/numpy_tests/lib/index_tricks.py | 2 +- test/torch_np/numpy_tests/lib/test_function_base.py | 2 +- test/torch_np/numpy_tests/lib/test_histograms.py | 2 +- test/torch_np/numpy_tests/linalg/test_linalg.py | 2 +- tools/lite_interpreter/gen_selected_mobile_ops_header.py | 2 +- tools/nightly.py | 2 +- torch/_higher_order_ops/cond.py | 8 ++++---- torch/_numpy/_funcs_impl.py | 2 +- torch/_numpy/_reductions_impl.py | 2 +- torch/autograd/function.py | 2 +- torch/distributed/fsdp/fully_sharded_data_parallel.py | 6 +++--- torch/jit/frontend.py | 2 +- torch/package/_importlib.py | 4 +--- 18 files changed, 25 insertions(+), 29 deletions(-) diff --git a/test/distributed/launcher/bin/test_script_local_rank.py b/test/distributed/launcher/bin/test_script_local_rank.py index e0468c96677..f6663db8c84 100755 --- a/test/distributed/launcher/bin/test_script_local_rank.py +++ b/test/distributed/launcher/bin/test_script_local_rank.py @@ -19,7 +19,7 @@ def parse_args(): "--local_rank", type=int, required=True, - help="The rank of the node for multi-node distributed " "training", + help="The rank of the node for multi-node distributed training", ) return parser.parse_args() diff --git a/test/onnx/model_defs/squeezenet.py b/test/onnx/model_defs/squeezenet.py index a97b399c7da..d0d9ccf1b1f 100644 --- a/test/onnx/model_defs/squeezenet.py +++ b/test/onnx/model_defs/squeezenet.py @@ -32,7 +32,7 @@ class SqueezeNet(nn.Module): super().__init__() if version not in [1.0, 1.1]: raise ValueError( - f"Unsupported SqueezeNet version {version}:" "1.0 or 1.1 expected" + f"Unsupported SqueezeNet version {version}:1.0 or 1.1 expected" ) self.num_classes = num_classes if version == 1.0: diff --git a/test/test_nestedtensor.py b/test/test_nestedtensor.py index 332741a83a5..d6f4274a5f6 100644 --- a/test/test_nestedtensor.py +++ b/test/test_nestedtensor.py @@ -360,19 +360,17 @@ class TestNestedTensor(TestCase): @torch.inference_mode() def test_repr_string(self): a = torch.nested.nested_tensor([]) - expected = "nested_tensor([" "\n\n])" + expected = "nested_tensor([\n\n])" self.assertEqual(str(a), expected) self.assertEqual(repr(a), expected) a = torch.nested.nested_tensor([torch.tensor(1.0)]) - expected = "nested_tensor([" "\n tensor(1.)" "\n])" + expected = "nested_tensor([\n tensor(1.)\n])" self.assertEqual(str(a), expected) self.assertEqual(repr(a), expected) a = torch.nested.nested_tensor([torch.tensor([[1, 2]]), torch.tensor([[4, 5]])]) - expected = ( - "nested_tensor([" "\n tensor([[1, 2]])" "," "\n tensor([[4, 5]])" "\n])" - ) + expected = "nested_tensor([\n tensor([[1, 2]]),\n tensor([[4, 5]])\n])" self.assertEqual(str(a), expected) self.assertEqual(repr(a), expected) diff --git a/test/test_weak.py b/test/test_weak.py index a59dc491c13..4bce413c397 100644 --- a/test/test_weak.py +++ b/test/test_weak.py @@ -57,7 +57,7 @@ class WeakTest(TestCase): self.assertIsNot( value1, value2, - "invalid test" " -- value parameters must be distinct objects", + "invalid test -- value parameters must be distinct objects", ) weakdict = klass() o = weakdict.setdefault(key, value1) diff --git a/test/torch_np/numpy_tests/core/test_indexing.py b/test/torch_np/numpy_tests/core/test_indexing.py index 41b931c77cc..581c8da9aba 100644 --- a/test/torch_np/numpy_tests/core/test_indexing.py +++ b/test/torch_np/numpy_tests/core/test_indexing.py @@ -700,7 +700,7 @@ class TestMultiIndexingAutomated: in_indices[i] = indx elif indx.dtype.kind != "b" and indx.dtype.kind != "i": raise IndexError( - "arrays used as indices must be of " "integer (or boolean) type" + "arrays used as indices must be of integer (or boolean) type" ) if indx.ndim != 0: no_copy = False diff --git a/test/torch_np/numpy_tests/lib/index_tricks.py b/test/torch_np/numpy_tests/lib/index_tricks.py index 9ad2981294a..77be1e4e70a 100644 --- a/test/torch_np/numpy_tests/lib/index_tricks.py +++ b/test/torch_np/numpy_tests/lib/index_tricks.py @@ -390,7 +390,7 @@ class AxisConcatenator: newobj = newobj.swapaxes(-1, trans1d) elif isinstance(item, str): if k != 0: - raise ValueError("special directives must be the " "first entry.") + raise ValueError("special directives must be the first entry.") if item in ("r", "c"): matrix = True col = item == "c" diff --git a/test/torch_np/numpy_tests/lib/test_function_base.py b/test/torch_np/numpy_tests/lib/test_function_base.py index 022072ad2a1..be8078b7cfc 100644 --- a/test/torch_np/numpy_tests/lib/test_function_base.py +++ b/test/torch_np/numpy_tests/lib/test_function_base.py @@ -2920,7 +2920,7 @@ class TestPercentile: assert_equal(c1.shape, r1.shape) @pytest.mark.xfail( - reason="numpy: x.dtype is int, out is int; " "torch: result is float" + reason="numpy: x.dtype is int, out is int; torch: result is float" ) def test_scalar_q_2(self): x = np.arange(12).reshape(3, 4) diff --git a/test/torch_np/numpy_tests/lib/test_histograms.py b/test/torch_np/numpy_tests/lib/test_histograms.py index 156a61b2d0a..f49f0b57dfb 100644 --- a/test/torch_np/numpy_tests/lib/test_histograms.py +++ b/test/torch_np/numpy_tests/lib/test_histograms.py @@ -566,7 +566,7 @@ class TestHistogramOptimBinNums: assert_equal( len(a), numbins, - err_msg=f"{estimator} estimator, " "No Variance test", + err_msg=f"{estimator} estimator, No Variance test", ) def test_limited_variance(self): diff --git a/test/torch_np/numpy_tests/linalg/test_linalg.py b/test/torch_np/numpy_tests/linalg/test_linalg.py index fdd458f5884..70a2caade1b 100644 --- a/test/torch_np/numpy_tests/linalg/test_linalg.py +++ b/test/torch_np/numpy_tests/linalg/test_linalg.py @@ -784,7 +784,7 @@ class TestCond(CondCases): linalg.cond(A, p) @pytest.mark.xfail( - True, run=False, reason="Platform/LAPACK-dependent failure, " "see gh-18914" + True, run=False, reason="Platform/LAPACK-dependent failure, see gh-18914" ) def test_nan(self): # nans should be passed through, not converted to infs diff --git a/tools/lite_interpreter/gen_selected_mobile_ops_header.py b/tools/lite_interpreter/gen_selected_mobile_ops_header.py index 18e09ddecd1..aa58bc5e90b 100644 --- a/tools/lite_interpreter/gen_selected_mobile_ops_header.py +++ b/tools/lite_interpreter/gen_selected_mobile_ops_header.py @@ -149,7 +149,7 @@ def main() -> None: "--yaml_file_path", type=str, required=True, - help="Path to the yaml" " file with a list of operators used by the model.", + help="Path to the yaml file with a list of operators used by the model.", ) parser.add_argument( "-o", diff --git a/tools/nightly.py b/tools/nightly.py index 28a8c6eb233..983e69150b5 100755 --- a/tools/nightly.py +++ b/tools/nightly.py @@ -343,7 +343,7 @@ def deps_install(deps: List[str], existing_env: bool, env_opts: List[str]) -> No @timed("Installing pytorch nightly binaries") def pytorch_install(url: str) -> "tempfile.TemporaryDirectory[str]": - """ "Install pytorch into a temporary directory""" + """Install pytorch into a temporary directory""" pytdir = tempfile.TemporaryDirectory() cmd = ["conda", "create", "--yes", "--no-deps", "--prefix", pytdir.name, url] p = subprocess.run(cmd, check=True) diff --git a/torch/_higher_order_ops/cond.py b/torch/_higher_order_ops/cond.py index 322669c67cd..85fecd66018 100644 --- a/torch/_higher_order_ops/cond.py +++ b/torch/_higher_order_ops/cond.py @@ -405,12 +405,12 @@ def cond_func(pred, true_fn, false_fn, inputs): for branch in [true_fn, false_fn]: if _has_potential_branch_input_mutation(branch, unwrapped_inputs): raise UnsupportedAliasMutationException( - "One of torch.cond branch " "might be modifying the input!" + "One of torch.cond branch might be modifying the input!" ) if _has_potential_branch_input_alias(branch, unwrapped_inputs): raise UnsupportedAliasMutationException( - "One of torch.cond branch " "might be aliasing the input!" + "One of torch.cond branch might be aliasing the input!" ) cond_return = cond_op( @@ -443,12 +443,12 @@ def cond_functionalize(interpreter, pred, true_fn, false_fn, inputs): for branch in [functional_true_fn, functional_false_fn]: if _has_potential_branch_input_mutation(branch, unwrapped_inputs): raise UnsupportedAliasMutationException( - "One of torch.cond branch " "might be modifying the input!" + "One of torch.cond branch might be modifying the input!" ) for branch in [true_fn, false_fn]: if _has_potential_branch_input_alias(branch, unwrapped_inputs): raise UnsupportedAliasMutationException( - "One of torch.cond branch " "might be aliasing the input!" + "One of torch.cond branch might be aliasing the input!" ) cond_return = cond_op( diff --git a/torch/_numpy/_funcs_impl.py b/torch/_numpy/_funcs_impl.py index 2ef29231b70..3261692dfb4 100644 --- a/torch/_numpy/_funcs_impl.py +++ b/torch/_numpy/_funcs_impl.py @@ -1226,7 +1226,7 @@ def cross(a: ArrayLike, b: ArrayLike, axisa=-1, axisb=-1, axisc=-1, axis=None): # Move working axis to the end of the shape a = torch.moveaxis(a, axisa, -1) b = torch.moveaxis(b, axisb, -1) - msg = "incompatible dimensions for cross product\n" "(dimension must be 2 or 3)" + msg = "incompatible dimensions for cross product\n(dimension must be 2 or 3)" if a.shape[-1] not in (2, 3) or b.shape[-1] not in (2, 3): raise ValueError(msg) diff --git a/torch/_numpy/_reductions_impl.py b/torch/_numpy/_reductions_impl.py index 2efe2b3cb59..7bc0099c8d8 100644 --- a/torch/_numpy/_reductions_impl.py +++ b/torch/_numpy/_reductions_impl.py @@ -321,7 +321,7 @@ def average( if a.shape != weights.shape: if axis is None: raise TypeError( - "Axis must be specified when shapes of a and weights " "differ." + "Axis must be specified when shapes of a and weights differ." ) if weights.ndim != 1: raise TypeError( diff --git a/torch/autograd/function.py b/torch/autograd/function.py index 77e8e56763b..080f1a4e944 100644 --- a/torch/autograd/function.py +++ b/torch/autograd/function.py @@ -358,7 +358,7 @@ class _SingleLevelFunction( if they are intended to be used for in ``jvp``. """ raise NotImplementedError( - "You must implement the forward function for custom" " autograd.Function." + "You must implement the forward function for custom autograd.Function." ) @staticmethod diff --git a/torch/distributed/fsdp/fully_sharded_data_parallel.py b/torch/distributed/fsdp/fully_sharded_data_parallel.py index 6b772597b71..322a9633892 100644 --- a/torch/distributed/fsdp/fully_sharded_data_parallel.py +++ b/torch/distributed/fsdp/fully_sharded_data_parallel.py @@ -2043,9 +2043,9 @@ def _get_param_to_fqn( """ param_to_param_names = _get_param_to_fqns(model) for param_names in param_to_param_names.values(): - assert len(param_names) > 0, ( - "`_get_param_to_fqns()` " "should not construct empty lists" - ) + assert ( + len(param_names) > 0 + ), "`_get_param_to_fqns()` should not construct empty lists" if len(param_names) > 1: raise RuntimeError( "Each parameter should only map to one parameter name but got " diff --git a/torch/jit/frontend.py b/torch/jit/frontend.py index 45a2145e1f0..445156b18f8 100644 --- a/torch/jit/frontend.py +++ b/torch/jit/frontend.py @@ -1060,7 +1060,7 @@ class ExprBuilder(Builder): if isinstance(index_expr.value, ast.Tuple): raise NotSupportedError( base.range(), - "slicing multiple dimensions with " "tuples not supported yet", + "slicing multiple dimensions with tuples not supported yet", ) return build_expr(ctx, index_expr.value) diff --git a/torch/package/_importlib.py b/torch/package/_importlib.py index 011567b89f5..fd303b6141e 100644 --- a/torch/package/_importlib.py +++ b/torch/package/_importlib.py @@ -44,9 +44,7 @@ def _sanity_check(name, package, level): if not isinstance(package, str): raise TypeError("__package__ not set to a string") elif not package: - raise ImportError( - "attempted relative import with no known parent " "package" - ) + raise ImportError("attempted relative import with no known parent package") if not name and level == 0: raise ValueError("Empty module name")