diff --git a/.ci/aarch64_linux/aarch64_wheel_ci_build.py b/.ci/aarch64_linux/aarch64_wheel_ci_build.py
index 9ba51f48c26..d463f847486 100755
--- a/.ci/aarch64_linux/aarch64_wheel_ci_build.py
+++ b/.ci/aarch64_linux/aarch64_wheel_ci_build.py
@@ -204,7 +204,7 @@ if __name__ == "__main__":
else:
build_vars += f"BUILD_TEST=0 PYTORCH_BUILD_VERSION={version}.dev{build_date} PYTORCH_BUILD_NUMBER=1 "
elif branch.startswith(("v1.", "v2.")):
- build_vars += f"BUILD_TEST=0 PYTORCH_BUILD_VERSION={branch[1:branch.find('-')]} PYTORCH_BUILD_NUMBER=1 "
+ build_vars += f"BUILD_TEST=0 PYTORCH_BUILD_VERSION={branch[1 : branch.find('-')]} PYTORCH_BUILD_NUMBER=1 "
if enable_mkldnn:
build_ArmComputeLibrary()
diff --git a/.ci/aarch64_linux/build_aarch64_wheel.py b/.ci/aarch64_linux/build_aarch64_wheel.py
index fbf7492364c..c6593a179cf 100755
--- a/.ci/aarch64_linux/build_aarch64_wheel.py
+++ b/.ci/aarch64_linux/build_aarch64_wheel.py
@@ -761,7 +761,7 @@ def start_build(
version = host.check_output("cat pytorch/version.txt").strip()[:-2]
build_vars += f"BUILD_TEST=0 PYTORCH_BUILD_VERSION={version}.dev{build_date} PYTORCH_BUILD_NUMBER=1"
if branch.startswith(("v1.", "v2.")):
- build_vars += f"BUILD_TEST=0 PYTORCH_BUILD_VERSION={branch[1:branch.find('-')]} PYTORCH_BUILD_NUMBER=1"
+ build_vars += f"BUILD_TEST=0 PYTORCH_BUILD_VERSION={branch[1 : branch.find('-')]} PYTORCH_BUILD_NUMBER=1"
if host.using_docker():
build_vars += " CMAKE_SHARED_LINKER_FLAGS=-Wl,-z,max-page-size=0x10000"
if enable_mkldnn:
diff --git a/.ci/pytorch/smoke_test/max_autotune.py b/.ci/pytorch/smoke_test/max_autotune.py
index 254b4206ad0..327c11ed62c 100644
--- a/.ci/pytorch/smoke_test/max_autotune.py
+++ b/.ci/pytorch/smoke_test/max_autotune.py
@@ -46,7 +46,9 @@ def train(args, model, device, train_loader, optimizer, epoch):
optimizer.step()
if batch_idx % args.log_interval == 0:
print(
- f"Train Epoch: {epoch} [{batch_idx * len(data)}/{len(train_loader.dataset)} ({100. * batch_idx / len(train_loader):.0f}%)]\tLoss: {loss.item():.6f}" # noqa: B950
+ f"Train Epoch: {epoch} "
+ f"[{batch_idx * len(data)}/{len(train_loader.dataset)} "
+ f"({100.0 * batch_idx / len(train_loader):.0f}%)]\tLoss: {loss.item():.6f}"
)
if args.dry_run:
break
@@ -71,7 +73,9 @@ def test(model, device, test_loader):
test_loss /= len(test_loader.dataset)
print(
- f"\nTest set: Average loss: {test_loss:.4f}, Accuracy: {correct}/{len(test_loader.dataset)} ({100. * correct / len(test_loader.dataset):.0f}%)\n" # noqa: B950
+ f"\nTest set: Average loss: {test_loss:.4f}, "
+ f"Accuracy: {correct}/{len(test_loader.dataset)} "
+ f"({100.0 * correct / len(test_loader.dataset):.0f}%)\n"
)
diff --git a/.github/scripts/github_utils.py b/.github/scripts/github_utils.py
index cd19907189f..3a42298cdf3 100644
--- a/.github/scripts/github_utils.py
+++ b/.github/scripts/github_utils.py
@@ -57,10 +57,10 @@ def gh_fetch_url_and_headers(
print(
f"""{url}
Rate limit exceeded:
- Used: {err.headers['X-RateLimit-Used']}
- Limit: {err.headers['X-RateLimit-Limit']}
- Remaining: {err.headers['X-RateLimit-Remaining']}
- Resets at: {err.headers['x-RateLimit-Reset']}"""
+ Used: {err.headers["X-RateLimit-Used"]}
+ Limit: {err.headers["X-RateLimit-Limit"]}
+ Remaining: {err.headers["X-RateLimit-Remaining"]}
+ Resets at: {err.headers["x-RateLimit-Reset"]}"""
)
else:
print(f"Error fetching {url} {err}")
diff --git a/.github/scripts/trymerge.py b/.github/scripts/trymerge.py
index 702d4375896..8efe01fe336 100755
--- a/.github/scripts/trymerge.py
+++ b/.github/scripts/trymerge.py
@@ -485,7 +485,7 @@ def get_check_run_name_prefix(workflow_run: Any) -> str:
if workflow_run is None:
return ""
else:
- return f'{workflow_run["workflow"]["name"]} / '
+ return f"{workflow_run['workflow']['name']} / "
def is_passing_status(status: Optional[str]) -> bool:
@@ -545,7 +545,7 @@ def add_workflow_conclusions(
if not isinstance(checkrun_node, dict):
warn(f"Expected dictionary, but got {type(checkrun_node)}")
continue
- checkrun_name = f'{get_check_run_name_prefix(workflow_run)}{checkrun_node["name"]}'
+ checkrun_name = f"{get_check_run_name_prefix(workflow_run)}{checkrun_node['name']}"
existing_checkrun = workflow_obj.jobs.get(checkrun_name)
if existing_checkrun is None or not is_passing_status(
existing_checkrun.status
diff --git a/.github/scripts/trymerge_explainer.py b/.github/scripts/trymerge_explainer.py
index 0527701291f..bbc85f020a0 100644
--- a/.github/scripts/trymerge_explainer.py
+++ b/.github/scripts/trymerge_explainer.py
@@ -79,7 +79,7 @@ class TryMergeExplainer:
(
"Advanced Debugging
",
"Check the merge workflow status ",
- f"here",
+ f'here',
" ",
)
)
diff --git a/aten/src/ATen/native/transformers/cuda/flash_attn/kernels/generate_kernels.py b/aten/src/ATen/native/transformers/cuda/flash_attn/kernels/generate_kernels.py
index a9276c98e65..803c5390768 100644
--- a/aten/src/ATen/native/transformers/cuda/flash_attn/kernels/generate_kernels.py
+++ b/aten/src/ATen/native/transformers/cuda/flash_attn/kernels/generate_kernels.py
@@ -103,7 +103,7 @@ if __name__ == "__main__":
"-o",
"--output_dir",
required=False,
- help="Where to generate the kernels " " will default to the current directory ",
+ help="Where to generate the kernels will default to the current directory",
)
args = parser.parse_args()
main(args.output_dir)
diff --git a/benchmarks/dynamo/check_accuracy.py b/benchmarks/dynamo/check_accuracy.py
index 92b7f552fc3..2de5f86aaa8 100644
--- a/benchmarks/dynamo/check_accuracy.py
+++ b/benchmarks/dynamo/check_accuracy.py
@@ -102,7 +102,7 @@ def check_accuracy(actual_csv, expected_csv, expected_filename):
msg += textwrap.dedent(
f"""
Error: {len(failed)} models have accuracy status regressed:
- {' '.join(failed)}
+ {" ".join(failed)}
"""
)
@@ -110,7 +110,7 @@ def check_accuracy(actual_csv, expected_csv, expected_filename):
msg += textwrap.dedent(
f"""
Improvement: {len(improved)} models have accuracy status improved:
- {' '.join(improved)}
+ {" ".join(improved)}
"""
)
diff --git a/benchmarks/dynamo/check_csv.py b/benchmarks/dynamo/check_csv.py
index d5598862730..95034488451 100644
--- a/benchmarks/dynamo/check_csv.py
+++ b/benchmarks/dynamo/check_csv.py
@@ -26,7 +26,7 @@ def check_csv(filename):
textwrap.dedent(
f"""
Error {len(failed)} models failed
- {' '.join(failed)}
+ {" ".join(failed)}
"""
)
)
diff --git a/benchmarks/dynamo/check_graph_breaks.py b/benchmarks/dynamo/check_graph_breaks.py
index 8a62538c96d..173f11acb13 100644
--- a/benchmarks/dynamo/check_graph_breaks.py
+++ b/benchmarks/dynamo/check_graph_breaks.py
@@ -91,7 +91,7 @@ def check_graph_breaks(actual_csv, expected_csv, expected_filename):
msg += textwrap.dedent(
f"""
Error: {len(failed)} models have new dynamo graph breaks:
- {' '.join(failed)}
+ {" ".join(failed)}
"""
)
@@ -99,7 +99,7 @@ def check_graph_breaks(actual_csv, expected_csv, expected_filename):
msg += textwrap.dedent(
f"""
Improvement: {len(improved)} models have fixed dynamo graph breaks:
- {' '.join(improved)}
+ {" ".join(improved)}
"""
)
diff --git a/benchmarks/dynamo/check_memory_compression_ratio.py b/benchmarks/dynamo/check_memory_compression_ratio.py
index 3308758943e..30d3b83a95f 100644
--- a/benchmarks/dynamo/check_memory_compression_ratio.py
+++ b/benchmarks/dynamo/check_memory_compression_ratio.py
@@ -40,7 +40,7 @@ def main(args):
textwrap.dedent(
f"""
Error: {len(failed)} models below expected memory compression ratio:
- {' '.join(failed)}
+ {" ".join(failed)}
If this drop is expected, you can update `{args.expected}`.
"""
)
diff --git a/benchmarks/dynamo/check_perf_csv.py b/benchmarks/dynamo/check_perf_csv.py
index f5911d6a8a5..320a4544f82 100644
--- a/benchmarks/dynamo/check_perf_csv.py
+++ b/benchmarks/dynamo/check_perf_csv.py
@@ -26,7 +26,7 @@ def check_perf_csv(filename, threshold, threshold_scale):
textwrap.dedent(
f"""
Error {len(failed)} models performance regressed
- {' '.join(failed)}
+ {" ".join(failed)}
"""
)
)
diff --git a/benchmarks/instruction_counts/core/api.py b/benchmarks/instruction_counts/core/api.py
index 97820f1d357..7d0b1a0f72e 100644
--- a/benchmarks/instruction_counts/core/api.py
+++ b/benchmarks/instruction_counts/core/api.py
@@ -368,7 +368,7 @@ class GroupedBenchmark:
return textwrap.dedent(
f"""\
- def model({', '.join(signature_args)}):
+ def model({", ".join(signature_args)}):
{{stmt_str}}
return {signature_output}
"""
@@ -397,7 +397,7 @@ class GroupedBenchmark:
cpp_invocation = textwrap.dedent(
f"""\
std::vector ivalue_inputs({{
- {', '.join([f'torch::jit::IValue({a})' for a in signature_args])}
+ {", ".join([f"torch::jit::IValue({a})" for a in signature_args])}
}});
{cpp_prefix}{model_name}.forward(ivalue_inputs);
"""
diff --git a/docs/source/scripts/exportdb/generate_example_rst.py b/docs/source/scripts/exportdb/generate_example_rst.py
index 4b7803b494e..8fdacad1105 100644
--- a/docs/source/scripts/exportdb/generate_example_rst.py
+++ b/docs/source/scripts/exportdb/generate_example_rst.py
@@ -49,7 +49,7 @@ def generate_example_rst(example_case: ExportCase):
# Generate contents of the .rst file
title = f"{example_case.name}"
doc_contents = f"""{title}
-{'^' * (len(title))}
+{"^" * (len(title))}
.. note::
@@ -117,7 +117,7 @@ def generate_index_rst(example_cases, tag_to_modules, support_level_to_modules):
module_contents = "\n\n".join(v)
support_contents += f"""
{support_level}
-{'-' * (len(support_level))}
+{"-" * (len(support_level))}
{module_contents}
"""
diff --git a/functorch/examples/dp_cifar10/cifar10_opacus.py b/functorch/examples/dp_cifar10/cifar10_opacus.py
index 3cea3901743..fef7bb66b5c 100644
--- a/functorch/examples/dp_cifar10/cifar10_opacus.py
+++ b/functorch/examples/dp_cifar10/cifar10_opacus.py
@@ -119,7 +119,7 @@ def test(args, model, test_loader, device):
top1_avg = np.mean(top1_acc)
- print(f"\tTest set:Loss: {np.mean(losses):.6f} Acc@1: {top1_avg :.6f} ")
+ print(f"\tTest set:Loss: {np.mean(losses):.6f} Acc@1: {top1_avg:.6f}")
return np.mean(top1_acc)
diff --git a/functorch/examples/dp_cifar10/cifar10_transforms.py b/functorch/examples/dp_cifar10/cifar10_transforms.py
index 29aa10a07ea..8b4d42c9f74 100644
--- a/functorch/examples/dp_cifar10/cifar10_transforms.py
+++ b/functorch/examples/dp_cifar10/cifar10_transforms.py
@@ -185,7 +185,7 @@ def test(args, model, test_loader, device):
top1_avg = np.mean(top1_acc)
- print(f"\tTest set:Loss: {np.mean(losses):.6f} Acc@1: {top1_avg :.6f} ")
+ print(f"\tTest set:Loss: {np.mean(losses):.6f} Acc@1: {top1_avg:.6f}")
return np.mean(top1_acc)
diff --git a/scripts/compile_tests/failures_histogram.py b/scripts/compile_tests/failures_histogram.py
index 9991043c766..00d8f00ceb2 100644
--- a/scripts/compile_tests/failures_histogram.py
+++ b/scripts/compile_tests/failures_histogram.py
@@ -108,7 +108,7 @@ def failures_histogram(eager_dir, dynamo_dir, verbose=False, format_issues=False
def as_issue(count, msg, repro, tests):
tests = "\n".join(tests)
result = f"""
-{'-' * 50}
+{"-" * 50}
{count} Dynamo test are failing with \"{msg}\".
## Repro
diff --git a/scripts/release_notes/categorize.py b/scripts/release_notes/categorize.py
index 6ef0e0c199f..10ee551b74f 100644
--- a/scripts/release_notes/categorize.py
+++ b/scripts/release_notes/categorize.py
@@ -145,7 +145,7 @@ Labels: {features.labels}
Current category: {commit.category}
-Select from: {', '.join(common.categories)}
+Select from: {", ".join(common.categories)}
"""
)
@@ -165,7 +165,7 @@ Select from: {', '.join(common.categories)}
cat_choice = choices[0]
print(f"\nSelected: {cat_choice}")
print(f"\nCurrent topic: {commit.topic}")
- print(f"""Select from: {', '.join(topics)}""")
+ print(f"""Select from: {", ".join(topics)}""")
topic_choice = None
while topic_choice is None:
value = input("topic> ").strip()
diff --git a/setup.py b/setup.py
index ea8b8c6f8c2..a85363a87cf 100644
--- a/setup.py
+++ b/setup.py
@@ -1454,8 +1454,7 @@ def main():
name=package_name,
version=version,
description=(
- "Tensors and Dynamic neural networks in "
- "Python with strong GPU acceleration"
+ "Tensors and Dynamic neural networks in Python with strong GPU acceleration"
),
long_description=long_description,
long_description_content_type="text/markdown",
diff --git a/test/run_test.py b/test/run_test.py
index 010bb368afd..47cf39d1edf 100755
--- a/test/run_test.py
+++ b/test/run_test.py
@@ -759,7 +759,7 @@ def run_test(
stepcurrent_key = f"{test_file}_{test_module.shard}_{os.urandom(8).hex()}"
if options.verbose:
- unittest_args.append(f'-{"v" * options.verbose}') # in case of pytest
+ unittest_args.append(f"-{'v' * options.verbose}") # in case of pytest
if test_file in RUN_PARALLEL_BLOCKLIST:
unittest_args = [
@@ -1895,8 +1895,7 @@ def get_selected_tests(options) -> list[str]:
selected_tests = exclude_tests(
TESTS_NOT_USING_GRADCHECK,
selected_tests,
- "Running in slow gradcheck mode, skipping tests "
- "that don't use gradcheck.",
+ "Running in slow gradcheck mode, skipping tests that don't use gradcheck.",
exact_match=True,
)
diff --git a/test/test_dispatch.py b/test/test_dispatch.py
index f6e686a21dd..0e77c31915e 100644
--- a/test/test_dispatch.py
+++ b/test/test_dispatch.py
@@ -151,7 +151,7 @@ class TestDispatch(TestCase):
active_ops.add(op_ix)
try:
ops[op_ix](refs[op_ix])
- check_invariants(f"running ctors {ctor_order[:i + 1]}")
+ check_invariants(f"running ctors {ctor_order[: i + 1]}")
except RuntimeError as e:
if not expect_raises:
raise
@@ -160,7 +160,7 @@ class TestDispatch(TestCase):
expected, _, expected_provenance = results.setdefault(
frozenset(active_ops),
Result(
- actual, "", f"error after running ctors {ctor_order[:i + 1]}"
+ actual, "", f"error after running ctors {ctor_order[: i + 1]}"
),
)
self.assertMultiLineEqual(expected, actual, expected_provenance)
@@ -195,7 +195,7 @@ class TestDispatch(TestCase):
else:
active_ops.remove(op_ix)
check_invariants(
- f"running ctors {ctor_order[:last_ctor + 1]}, then running dtors {dtor_order[:i + 1]}"
+ f"running ctors {ctor_order[: last_ctor + 1]}, then running dtors {dtor_order[: i + 1]}"
)
return results[set_to_report][0]
diff --git a/test/test_jit_fuser_te.py b/test/test_jit_fuser_te.py
index 0f255a6d391..17c83cc7264 100644
--- a/test/test_jit_fuser_te.py
+++ b/test/test_jit_fuser_te.py
@@ -2878,8 +2878,8 @@ class TestNNCOpInfo(TestNNCOpInfoParent):
fx_args.append(f"{k} = {repr(v)}")
code = f"""
-def f({', '.join(param_names)}):
- return op.op({', '.join(fx_args)})"""
+def f({", ".join(param_names)}):
+ return op.op({", ".join(fx_args)})"""
g = {"torch": torch, "inf": math.inf, "op": op}
exec(code, g)
f = g["f"]
diff --git a/tools/autograd/gen_inplace_or_view_type.py b/tools/autograd/gen_inplace_or_view_type.py
index 0e2927d3072..0fd882d00cf 100644
--- a/tools/autograd/gen_inplace_or_view_type.py
+++ b/tools/autograd/gen_inplace_or_view_type.py
@@ -575,7 +575,7 @@ def gen_formals(f: NativeFunction) -> str:
# See Note [Plumbing Keys Through The Dispatcher] for details.
["c10::DispatchKeySet ks"]
+ [
- f'{cpp.argument_type(a, binds="__placeholder__", symint=True).cpp_type()} {a.name}'
+ f"{cpp.argument_type(a, binds='__placeholder__', symint=True).cpp_type()} {a.name}"
for a in f.func.schema_order_arguments()
]
)
diff --git a/tools/autograd/gen_python_functions.py b/tools/autograd/gen_python_functions.py
index 5f54c69a10b..178fc9b1111 100644
--- a/tools/autograd/gen_python_functions.py
+++ b/tools/autograd/gen_python_functions.py
@@ -723,7 +723,7 @@ def emit_structseq_call(
tn_key = gen_structseq_typename_key(overload.function)
typename = typenames.get(tn_key)
if typename is None:
- typename = f'NamedTuple{"" if not typedefs else len(typedefs)}'
+ typename = f"NamedTuple{'' if not typedefs else len(typedefs)}"
typenames[tn_key] = typename
typedefs.append(
f"""\
@@ -759,7 +759,7 @@ def generate_return_type_definition_and_registrations(
typename = typenames.get(tn_key)
if typename is None:
- typename = f'{name}NamedTuple{"" if not definitions else len(definitions)}'
+ typename = f"{name}NamedTuple{'' if not definitions else len(definitions)}"
typenames[tn_key] = typename
definitions.append(
f"""\
@@ -807,7 +807,7 @@ def generate_return_type_declarations(
if typename is None:
typename = (
- f'{name}NamedTuple{"" if not declarations else len(declarations)}'
+ f"{name}NamedTuple{'' if not declarations else len(declarations)}"
)
typenames[tn_key] = typename
declarations.append(f"PyTypeObject* get_{name}_structseq();")
@@ -1351,7 +1351,7 @@ def emit_single_dispatch(
or (ps.method and ("requires_grad" in parser_outputs))
)
set_requires_grad = (
- f'.set_requires_grad({parser_outputs["requires_grad"].expr})'
+ f".set_requires_grad({parser_outputs['requires_grad'].expr})"
if need_set_requires_grad
else ""
)
diff --git a/tools/autograd/gen_trace_type.py b/tools/autograd/gen_trace_type.py
index 97881cbd0b6..67f71d2df50 100644
--- a/tools/autograd/gen_trace_type.py
+++ b/tools/autograd/gen_trace_type.py
@@ -381,9 +381,9 @@ def format_postrecord_trace(f: NativeFunction) -> str:
def tie_return_values(f: NativeFunction) -> str:
if len(f.func.returns) == 1:
- return f'auto {f.func.returns[0].name or "result"}'
+ return f"auto {f.func.returns[0].name or 'result'}"
names = cpp.return_names(f)
- return f'auto [{", ".join(names)}]'
+ return f"auto [{', '.join(names)}]"
def get_return_value(f: NativeFunction) -> str:
@@ -391,7 +391,7 @@ def get_return_value(f: NativeFunction) -> str:
if len(f.func.returns) == 1:
return names[0]
if f.func.kind() == SchemaKind.out:
- return f'std::forward_as_tuple({", ".join(names)})'
+ return f"std::forward_as_tuple({', '.join(names)})"
else:
moved = ", ".join(f"std::move({name})" for name in names)
return f"std::make_tuple({moved})"
@@ -474,7 +474,7 @@ def method_definition(f: NativeFunction) -> str:
# See Note [Plumbing Keys Through The Dispatcher] for details.
["c10::DispatchKeySet ks"]
+ [
- f'{cpp.argument_type(a, binds="__placeholder__", symint=True).cpp_type()} {a.name}'
+ f"{cpp.argument_type(a, binds='__placeholder__', symint=True).cpp_type()} {a.name}"
for a in f.func.schema_order_arguments()
]
)
diff --git a/tools/autograd/gen_variable_factories.py b/tools/autograd/gen_variable_factories.py
index f206939bd53..9916a77385d 100644
--- a/tools/autograd/gen_variable_factories.py
+++ b/tools/autograd/gen_variable_factories.py
@@ -108,9 +108,9 @@ def process_function(f: NativeFunction) -> str | None:
exprs.append(arg.name)
r += f"""\
-inline at::Tensor {sig.name()}({', '.join(formals)}) {{
+inline at::Tensor {sig.name()}({", ".join(formals)}) {{
at::AutoDispatchBelowADInplaceOrView guard;
- return autograd::make_variable(at::{sig.name()}({', '.join(exprs)}), /*requires_grad=*/{requires_grad});
+ return autograd::make_variable(at::{sig.name()}({", ".join(exprs)}), /*requires_grad=*/{requires_grad});
}}
"""
return r
diff --git a/tools/autograd/gen_variable_type.py b/tools/autograd/gen_variable_type.py
index acba0484e06..35030359203 100644
--- a/tools/autograd/gen_variable_type.py
+++ b/tools/autograd/gen_variable_type.py
@@ -1410,7 +1410,7 @@ def emit_body(
if all_forward_grad_cond:
if not is_inplace_foreach:
- body.append(f'if ({" || ".join(all_forward_grad_cond)}) {{')
+ body.append(f"if ({' || '.join(all_forward_grad_cond)}) {{")
body.append(" original_self = self.clone();")
body.append("}")
else:
@@ -1801,7 +1801,7 @@ def emit_body(
if len(var_names) == 1:
return f"_any_has_forward_grad_{var_names[0]}"
else:
- return f'_any_has_forward_grad_{"_".join(var_names)}'
+ return f"_any_has_forward_grad_{'_'.join(var_names)}"
def emit_any_has_forward_grad() -> list[str]:
content: list[str] = []
@@ -2089,7 +2089,7 @@ def emit_body(
raise RuntimeError(
f'Unsupported input type for "{name}" when forbidding forward AD usage.'
)
- return f'({" || ".join(to_check)})'
+ return f"({' || '.join(to_check)})"
else:
# (2) If derivative is provided, use that information to determine which inputs
# to check fw_grad for
diff --git a/tools/github/github_utils.py b/tools/github/github_utils.py
index 42ced571203..6442a064428 100644
--- a/tools/github/github_utils.py
+++ b/tools/github/github_utils.py
@@ -33,10 +33,10 @@ def gh_fetch_url_and_headers(
):
print(
f"""Rate limit exceeded:
- Used: {err.headers['X-RateLimit-Used']}
- Limit: {err.headers['X-RateLimit-Limit']}
- Remaining: {err.headers['X-RateLimit-Remaining']}
- Resets at: {err.headers['x-RateLimit-Reset']}"""
+ Used: {err.headers["X-RateLimit-Used"]}
+ Limit: {err.headers["X-RateLimit-Limit"]}
+ Remaining: {err.headers["X-RateLimit-Remaining"]}
+ Resets at: {err.headers["x-RateLimit-Reset"]}"""
)
raise
diff --git a/tools/iwyu/fixup.py b/tools/iwyu/fixup.py
index 50d2cf1103c..708c437d6a6 100644
--- a/tools/iwyu/fixup.py
+++ b/tools/iwyu/fixup.py
@@ -41,7 +41,7 @@ def main() -> None:
# Convert all quoted includes to angle brackets
match = QUOTE_INCLUDE_RE.match(line)
if match is not None:
- print(f"#include <{match.group(1)}>{line[match.end(0):]}", end="")
+ print(f"#include <{match.group(1)}>{line[match.end(0) :]}", end="")
continue
match = ANGLE_INCLUDE_RE.match(line)
diff --git a/tools/lite_interpreter/gen_selected_mobile_ops_header.py b/tools/lite_interpreter/gen_selected_mobile_ops_header.py
index 972c7167c0f..d62d622326a 100644
--- a/tools/lite_interpreter/gen_selected_mobile_ops_header.py
+++ b/tools/lite_interpreter/gen_selected_mobile_ops_header.py
@@ -161,8 +161,7 @@ def main() -> None:
"--output_file_path",
type=str,
required=True,
- help="Path to destination"
- "folder where selected_mobile_ops.h will be written.",
+ help="Path to destinationfolder where selected_mobile_ops.h will be written.",
)
parsed_args = parser.parse_args()
model_file_name = parsed_args.yaml_file_path
diff --git a/tools/packaging/build_wheel.py b/tools/packaging/build_wheel.py
index b7d25a487f4..96e4978c7fc 100644
--- a/tools/packaging/build_wheel.py
+++ b/tools/packaging/build_wheel.py
@@ -105,7 +105,7 @@ def parse_args() -> argparse.Namespace:
"--destination",
default="dist/",
type=str,
- help=("Destination to put the compailed binaries"),
+ help="Destination to put the compiled binaries",
)
return parser.parse_args()
diff --git a/torch/__init__.py b/torch/__init__.py
index eea6e5c0891..dfd46539681 100644
--- a/torch/__init__.py
+++ b/torch/__init__.py
@@ -1482,7 +1482,7 @@ def set_deterministic_debug_mode(debug_mode: _Union[builtins.int, str]) -> None:
_C._set_deterministic_algorithms(True)
else:
raise RuntimeError(
- "invalid value of debug_mode, expected 0, 1, or 2, " f"but got {debug_mode}"
+ f"invalid value of debug_mode, expected 0, 1, or 2, but got {debug_mode}"
)
diff --git a/torch/_jit_internal.py b/torch/_jit_internal.py
index d97647afa87..b76e8fd61cf 100644
--- a/torch/_jit_internal.py
+++ b/torch/_jit_internal.py
@@ -852,8 +852,7 @@ def ignore(drop=False, **kwargs):
if not isinstance(drop, bool):
raise RuntimeError(
- "Argument to @torch.jit.ignore must be a bool or "
- f"a function but got {drop}"
+ f"Argument to @torch.jit.ignore must be a bool or a function but got {drop}"
)
# for backwards compat
@@ -1541,7 +1540,7 @@ def _get_model_id(obj) -> Optional[str]:
# In Python-3.11+ typed enums (i.e. IntEnum for example) retain number of base class methods in subclass
# that were previously dropped. To preserve the behavior, explicitly drop them there
-if sys.version_info > (3, 10):
+if sys.version_info >= (3, 11):
_drop(enum.Enum.__new__)
_drop(enum.Enum.__format__)
_drop(enum.Enum.__repr__)
diff --git a/torch/_tensor_str.py b/torch/_tensor_str.py
index 182236d62e7..b13daaeba23 100644
--- a/torch/_tensor_str.py
+++ b/torch/_tensor_str.py
@@ -694,9 +694,7 @@ def _functorch_wrapper_str_intern(tensor, *, tensor_contents=None):
bdim = torch._C._functorch.maybe_get_bdim(tensor)
assert bdim != -1
return (
- f"BatchedTensor(lvl={level}, bdim={bdim}, value=\n"
- f"{indented_value_repr}\n"
- f")"
+ f"BatchedTensor(lvl={level}, bdim={bdim}, value=\n{indented_value_repr}\n)"
)
if torch._C._functorch.is_gradtrackingtensor(tensor):
return f"GradTrackingTensor(lvl={level}, value=\n{indented_value_repr}\n)"
diff --git a/torch/onnx/_internal/diagnostics/infra/context.py b/torch/onnx/_internal/diagnostics/infra/context.py
index 6f06061b00e..c5701a64aa4 100644
--- a/torch/onnx/_internal/diagnostics/infra/context.py
+++ b/torch/onnx/_internal/diagnostics/infra/context.py
@@ -143,7 +143,7 @@ class Diagnostic:
"""
if self.logger.isEnabledFor(level):
indented_format_message = (
- f"##{'#' * self._current_log_section_depth } {message}"
+ f"##{'#' * self._current_log_section_depth} {message}"
)
self.log(
level,
diff --git a/torch/onnx/_internal/exporter/_onnx_program.py b/torch/onnx/_internal/exporter/_onnx_program.py
index 02439132e1b..407355df520 100644
--- a/torch/onnx/_internal/exporter/_onnx_program.py
+++ b/torch/onnx/_internal/exporter/_onnx_program.py
@@ -81,10 +81,10 @@ class ONNXProgram:
return f"""\
ONNXProgram(
model=
-{textwrap.indent(str(self.model), ' ' * 8)}
+{textwrap.indent(str(self.model), " " * 8)}
,
exported_program=
-{textwrap.indent(str(self.exported_program), ' ' * 8)}
+{textwrap.indent(str(self.exported_program), " " * 8)}
)
"""
diff --git a/torch/onnx/symbolic_helper.py b/torch/onnx/symbolic_helper.py
index f57f65ead1b..dbf6beb648d 100644
--- a/torch/onnx/symbolic_helper.py
+++ b/torch/onnx/symbolic_helper.py
@@ -160,8 +160,7 @@ def _unpack_list(list_value: _C.Value) -> list[_C.Value]:
list_node = list_value.node()
if list_node.kind() != "prim::ListConstruct":
raise errors.SymbolicValueError(
- f"ONNX symbolic expected node type prim::ListConstruct, "
- f"got '{list_node}'.",
+ f"ONNX symbolic expected node type prim::ListConstruct, got '{list_node}'.",
list_value,
)
return list(list_node.inputs())
diff --git a/torchgen/api/python.py b/torchgen/api/python.py
index 017cc1d107e..1d40d607f4b 100644
--- a/torchgen/api/python.py
+++ b/torchgen/api/python.py
@@ -405,7 +405,7 @@ class PythonSignature:
if len(schema_formals) > positional_argc:
schema_formals.insert(positional_argc, "*")
- return f'{self.name}({", ".join(schema_formals)})'
+ return f"{self.name}({', '.join(schema_formals)})"
def signature_str_pyi(self, *, skip_outputs: bool = False) -> str:
args = self.arguments(skip_outputs=skip_outputs)
@@ -421,7 +421,7 @@ class PythonSignature:
# pyi also includes self (with no typing/defaults) for methods
if self.method:
schema_formals.insert(0, "self")
- return f'def {self.name}({", ".join(schema_formals)}) -> {returns_str}: ...'
+ return f"def {self.name}({', '.join(schema_formals)}) -> {returns_str}: ..."
def signature_str_pyi_vararg(self, *, skip_outputs: bool = False) -> str | None:
# only pyi uses vararg signatures
@@ -457,7 +457,7 @@ class PythonSignature:
# pyi also includes self (with no typing/defaults) for methods
if self.method:
schema_formals.insert(0, "self")
- return f'def {self.name}({", ".join(schema_formals)}) -> {returns_str}: ...'
+ return f"def {self.name}({', '.join(schema_formals)}) -> {returns_str}: ..."
# The deprecated python signature involves some special logic, so create a
@@ -498,7 +498,7 @@ class PythonSignatureDeprecated(PythonSignature):
schema_formals.insert(positional_argc, "*")
returns_str = returns_str_pyi(self)
- return f'def {self.name}({", ".join(schema_formals)}) -> {returns_str}: ...'
+ return f"def {self.name}({', '.join(schema_formals)}) -> {returns_str}: ..."
def signature_str_pyi_vararg(self, *, skip_outputs: bool = False) -> str | None:
# the codegen doesn't include vararg variants for deprecated signatures
@@ -1474,11 +1474,11 @@ def dispatch_lambda_exprs(
inits.append(
f"""\
const auto options = TensorOptions()
- .dtype({arg_parser_outputs['dtype'].expr})
- .device({arg_parser_outputs['device'].expr})
- .layout({arg_parser_outputs['layout'].expr})
- .requires_grad({arg_parser_outputs['requires_grad'].expr})
- .pinned_memory({arg_parser_outputs['pin_memory'].expr});
+ .dtype({arg_parser_outputs["dtype"].expr})
+ .device({arg_parser_outputs["device"].expr})
+ .layout({arg_parser_outputs["layout"].expr})
+ .requires_grad({arg_parser_outputs["requires_grad"].expr})
+ .pinned_memory({arg_parser_outputs["pin_memory"].expr});
torch::utils::maybe_initialize_device(options);
"""
)
@@ -1500,9 +1500,9 @@ torch::utils::maybe_initialize_device(options);
inits.append(
f"""\
-check_out_type_matches({arg_parser_outputs['out'].expr}, {arg_parser_outputs['dtype'].expr},
- {arg_parser_outputs['dtype'].is_none_expr}, {arg_parser_outputs['layout'].expr},
- {arg_parser_outputs['device'].expr}, {arg_parser_outputs['device'].is_none_expr});
+check_out_type_matches({arg_parser_outputs["out"].expr}, {arg_parser_outputs["dtype"].expr},
+ {arg_parser_outputs["dtype"].is_none_expr}, {arg_parser_outputs["layout"].expr},
+ {arg_parser_outputs["device"].expr}, {arg_parser_outputs["device"].is_none_expr});
"""
)
# we'll set requires_grad on outgoing tensor
diff --git a/torchgen/api/types/signatures.py b/torchgen/api/types/signatures.py
index d7c60e52d93..2b9b9c27f69 100644
--- a/torchgen/api/types/signatures.py
+++ b/torchgen/api/types/signatures.py
@@ -366,9 +366,9 @@ class FunctionalizationLambda:
e.expr for e in translate.translate(full_ctx, call_bindings, method=False)
]
if not self.is_reverse and maybe_index is not None:
- return f'{inner_call_name}({", ".join(call_exprs)})[{maybe_index.name}];'
+ return f"{inner_call_name}({', '.join(call_exprs)})[{maybe_index.name}];"
else:
- return f'{inner_call_name}({", ".join(call_exprs)});'
+ return f"{inner_call_name}({', '.join(call_exprs)});"
@staticmethod
def from_func(
diff --git a/torchgen/api/types/types_base.py b/torchgen/api/types/types_base.py
index 269f9438c36..08085fa0fa2 100644
--- a/torchgen/api/types/types_base.py
+++ b/torchgen/api/types/types_base.py
@@ -131,7 +131,7 @@ class TupleCType(CType):
def cpp_type(self, *, strip_ref: bool = False) -> str:
# Do not pass `strip_ref` recursively.
- return f'::std::tuple<{",".join([e.cpp_type() for e in self.elems])}>'
+ return f"::std::tuple<{','.join([e.cpp_type() for e in self.elems])}>"
def remove_const_ref(self) -> CType:
return TupleCType([e.remove_const_ref() for e in self.elems])
diff --git a/torchgen/dest/lazy_ir.py b/torchgen/dest/lazy_ir.py
index 976c823a165..8f260cc923a 100644
--- a/torchgen/dest/lazy_ir.py
+++ b/torchgen/dest/lazy_ir.py
@@ -543,7 +543,7 @@ std::vector shapes{torch::lazy::Shape(out_meta.scalar_type()
aten_name += "_symint"
shape_str = f"""\
{meta_conversion_str}
- auto out_meta = at::{dispatch_ns}::{aten_name}({', '.join(meta_call_args)});
+ auto out_meta = at::{dispatch_ns}::{aten_name}({", ".join(meta_call_args)});
{meta_out}"""
else:
shape_sig = ComputeShapeSignature(
@@ -559,7 +559,7 @@ std::vector shapes{torch::lazy::Shape(out_meta.scalar_type()
func_schema_str = "aten::" + str(func.func)
shape_str += f"""
if(torch::lazy::symbolicShapeEnabled()){{
- std::vector inputs = {{ {', '.join(str(a.name) for a in all_args)} }};
+ std::vector inputs = {{ {", ".join(str(a.name) for a in all_args)} }};
const char* schema_str = "{func_schema_str}";
applySymbolicShapesOnLT(schema_str, inputs, shapes);
}}
diff --git a/torchgen/dest/native_functions.py b/torchgen/dest/native_functions.py
index e9bf2dcb0d0..b1488b4f188 100644
--- a/torchgen/dest/native_functions.py
+++ b/torchgen/dest/native_functions.py
@@ -53,7 +53,7 @@ def gen_structured(g: NativeFunctionsGroup, backend_index: BackendIndex) -> list
return [
f"""\
struct {prefix}structured_{metadata.kernel} : public at::meta::structured_{meta_name} {{
-void impl({', '.join(a.decl() for a in out_args)});
+void impl({", ".join(a.decl() for a in out_args)});
}};
"""
]
diff --git a/torchgen/dest/register_dispatch_key.py b/torchgen/dest/register_dispatch_key.py
index 015537df12e..5b7feef8323 100644
--- a/torchgen/dest/register_dispatch_key.py
+++ b/torchgen/dest/register_dispatch_key.py
@@ -332,7 +332,7 @@ class RegisterDispatchKey:
f"{copy_op}(std::get<{i}>({func_res}), {ret_name});"
for i, ret_name in enumerate(return_names)
)
- returns = f'{sig.returns_type().cpp_type()}({", ".join(return_names)})'
+ returns = f"{sig.returns_type().cpp_type()}({', '.join(return_names)})"
elif len(return_names) == 1:
ret_name = return_names[0]
updates = f"{copy_op}({func_res}, {ret_name});"
@@ -448,7 +448,7 @@ class RegisterDispatchKey:
def generate_defn(cpp_sig: CppSignature) -> str:
return f"""
{cpp_sig.defn()} {{
-return {sig.name()}({', '.join(e.expr for e in translate(cpp_sig.arguments(), sig.arguments()))});
+return {sig.name()}({", ".join(e.expr for e in translate(cpp_sig.arguments(), sig.arguments()))});
}}
"""
@@ -802,7 +802,7 @@ resize_out(out, sizes, strides, options);
def generate_defn(cpp_sig: CppSignature) -> str:
return f"""
{cpp_sig.defn()} {{
-return {sig.name()}({', '.join(e.expr for e in translate(cpp_sig.arguments(), sig.arguments()))});
+return {sig.name()}({", ".join(e.expr for e in translate(cpp_sig.arguments(), sig.arguments()))});
}}
"""
@@ -986,12 +986,15 @@ return {sig.name()}({', '.join(e.expr for e in translate(cpp_sig.arguments(), si
# For an overview of what this template code looks like, see
# https://github.com/pytorch/rfcs/pull/9
return f"""\
-{self.gen_class(
-f, k,
-class_name=class_name,
-parent_class=parent_class,
-generate_super=self.g.out.structured_inherits is not None
-)}
+{
+ self.gen_class(
+ f,
+ k,
+ class_name=class_name,
+ parent_class=parent_class,
+ generate_super=self.g.out.structured_inherits is not None,
+ )
+ }
{sig.defn()} {{
{sig_body_str}
diff --git a/torchgen/dest/ufunc.py b/torchgen/dest/ufunc.py
index 8bb873d8f58..e66c9a4e526 100644
--- a/torchgen/dest/ufunc.py
+++ b/torchgen/dest/ufunc.py
@@ -477,15 +477,15 @@ def compute_ufunc_cpu_dtype_body(
return f"""
{body_str}
cpu_kernel_vec(iter,
- [=]({', '.join(b.decl() for b in scalar_bindings)}) {{ return {scalar_loop.call(with_ctx(scalar_bindings))}; }},
- [=]({', '.join(b.decl() for b in vec_bindings)}) {{ return {vec_loop.call(with_ctx(vec_bindings))}; }}
+ [=]({", ".join(b.decl() for b in scalar_bindings)}) {{ return {scalar_loop.call(with_ctx(scalar_bindings))}; }},
+ [=]({", ".join(b.decl() for b in vec_bindings)}) {{ return {vec_loop.call(with_ctx(vec_bindings))}; }}
);
"""
else:
return f"""
{body_str}
cpu_kernel(iter,
- [=]({', '.join(b.decl() for b in scalar_bindings)}) {{ return {scalar_loop.call(with_ctx(scalar_bindings))}; }}
+ [=]({", ".join(b.decl() for b in scalar_bindings)}) {{ return {scalar_loop.call(with_ctx(scalar_bindings))}; }}
);
"""
diff --git a/torchgen/gen.py b/torchgen/gen.py
index 7aa6cd4b212..63dd621cdd8 100644
--- a/torchgen/gen.py
+++ b/torchgen/gen.py
@@ -499,7 +499,7 @@ def generate_static_dispatch_fallback_call(
return f"return {ns}::{DispatchKey.CompositeImplicitAutogradNestedTensor.lower()}::{name}({exprs});"
else:
return f"""TORCH_CHECK(false, "Static dispatch does not support {name} for\
-{', '.join([str(index.dispatch_key)for index in backend_indices])} ");"""
+{", ".join([str(index.dispatch_key) for index in backend_indices])} ");"""
def static_dispatch(
@@ -552,7 +552,7 @@ def static_dispatch(
)
if tensor_args != "":
subexprs.append(f"c10::detail::multi_dispatch_key_set({tensor_args})")
- stmts.append(f"""DispatchKeySet _dk_set = {' | '.join(subexprs)};""")
+ stmts.append(f"""DispatchKeySet _dk_set = {" | ".join(subexprs)};""")
stmts.append("DispatchKey _dk = c10::highestPriorityBackendTypeId(_dk_set);")
dispatch_code = []
@@ -1016,7 +1016,7 @@ C10_ALWAYS_INLINE
{sig.defn(name)} {{
{compute_dk}
return at::_ops::{f.func.name.unambiguous_name()}::redispatch(
- _dk, {', '.join(a.expr for a in dispatcher_exprs)});
+ _dk, {", ".join(a.expr for a in dispatcher_exprs)});
}}
"""
elif self.target is Target.REGISTRATION:
diff --git a/torchgen/gen_aoti_c_shim.py b/torchgen/gen_aoti_c_shim.py
index 1fcae0d343d..c6b83b2b32e 100644
--- a/torchgen/gen_aoti_c_shim.py
+++ b/torchgen/gen_aoti_c_shim.py
@@ -299,7 +299,7 @@ def gen_declaration_and_definition(
{declaration} {{
AOTI_TORCH_CONVERT_EXCEPTION_TO_ERROR_CODE({{
{tmp_result}{backend_call}(
-{textwrap.indent(', '.join(callsite_exprs), " ")}
+{textwrap.indent(", ".join(callsite_exprs), " ")}
);{textwrap.indent(ret_assignments_str, " ")}
}});
}}
diff --git a/torchgen/gen_backend_stubs.py b/torchgen/gen_backend_stubs.py
index bc9e2959ab7..8c6d29258d4 100644
--- a/torchgen/gen_backend_stubs.py
+++ b/torchgen/gen_backend_stubs.py
@@ -119,10 +119,10 @@ def parse_backend_yaml(
# ir_gen is ignored by parse_backend_yaml, and re-parsed in gen_lazy_tensor.py
yaml_values.pop("ir_gen", {})
- assert (
- len(yaml_values.keys()) == 0
- ), f'{backend_yaml_path} contains unexpected keys: {", ".join(yaml_values.keys())}. \
-Only the following keys are supported: {", ".join(valid_keys)}'
+ assert len(yaml_values.keys()) == 0, (
+ f"{backend_yaml_path} contains unexpected keys: {', '.join(yaml_values.keys())}. "
+ f"Only the following keys are supported: {', '.join(valid_keys)}"
+ )
def create_backend_index(
backend_ops: list[str],
diff --git a/torchgen/gen_executorch.py b/torchgen/gen_executorch.py
index 4f222f7ee8a..a897bb5e1f9 100644
--- a/torchgen/gen_executorch.py
+++ b/torchgen/gen_executorch.py
@@ -280,7 +280,7 @@ class ComputeCodegenUnboxedKernels:
[
f"""
Kernel(
- "{f.namespace}::{f.func.name}",{newline + '"' + (k + '",') if k != 'default' else ''}
+ "{f.namespace}::{f.func.name}",{newline + '"' + (k + '",') if k != "default" else ""}
[]({contextArg.defn()}, EValue** stack) {{
{code_connector.join(code_list)}
diff --git a/torchgen/gen_functionalization_type.py b/torchgen/gen_functionalization_type.py
index 4f9865d6d3e..b855eb1032c 100644
--- a/torchgen/gen_functionalization_type.py
+++ b/torchgen/gen_functionalization_type.py
@@ -407,7 +407,7 @@ def emit_view_functionalization_body(
// functionalization is re-entrant, but will no-op if it wasn't passed a FunctionalTensorWrapper.
{unwrap_tensor_args_str}
at::AutoDispatchSkipFunctionalize guard;
- return at::_ops::{noop_api_name}::call({', '.join(view_redispatch_args)});
+ return at::_ops::{noop_api_name}::call({", ".join(view_redispatch_args)});
}}
auto reapply_views = at::functionalization::impl::getFunctionalizationReapplyViewsTLS();
auto inverse_return_mode = (
@@ -436,7 +436,7 @@ def emit_view_functionalization_body(
{meta_conversion_str}
at::AutoDispatchSkipFunctionalize func_guard;
c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
- reference_tensor_output = at::_ops::{noop_api_name}::call({', '.join(meta_call_args)});
+ reference_tensor_output = at::_ops::{noop_api_name}::call({", ".join(meta_call_args)});
}}
// This function adds the above view meta to the current tensor and replays them off the base,
// mutating the size/stride info of the current FunctionalTensorWrapper.
@@ -462,7 +462,7 @@ def emit_view_functionalization_body(
if (!at::functionalization::impl::isFunctionalTensor({view_tensor_name})) {{
// functionalization is re-entrant, but will no-op if it wasn't passed a FunctionalTensorWrapper.
at::AutoDispatchSkipFunctionalize guard;
- return at::_ops::{noop_api_name}::call({', '.join(view_redispatch_args)});
+ return at::_ops::{noop_api_name}::call({", ".join(view_redispatch_args)});
}}
auto reapply_views = at::functionalization::impl::getFunctionalizationReapplyViewsTLS();
auto inverse_return_mode = (
@@ -477,15 +477,15 @@ def emit_view_functionalization_body(
{meta_conversion_str}
at::AutoDispatchSkipFunctionalize func_guard;
c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
- reference_tensor_output = at::_ops::{noop_api_name}::call({', '.join(meta_call_args)});
+ reference_tensor_output = at::_ops::{noop_api_name}::call({", ".join(meta_call_args)});
}}
{return_type} tmp_output;
{{
at::AutoDispatchSkipFunctionalize guard;
if (reapply_views) {{
- tmp_output = at::_ops::{noop_api_name}::call({', '.join(view_redispatch_args)});
+ tmp_output = at::_ops::{noop_api_name}::call({", ".join(view_redispatch_args)});
}} else {{
- tmp_output = at::_ops::{api_name}::call({', '.join(view_redispatch_args)});
+ tmp_output = at::_ops::{api_name}::call({", ".join(view_redispatch_args)});
}}
}}
{symbolic_inputs_check}
@@ -502,7 +502,7 @@ def emit_view_functionalization_body(
}},
/*has_symbolic_inputs=*/{symbolic_inputs_varname},
/*is_multi_output=*/{str(is_multi_output_view).lower()},
- /*is_as_strided=*/{str(str(f.func.name) == 'as_strided').lower()}
+ /*is_as_strided=*/{str(str(f.func.name) == "as_strided").lower()}
);
auto out = at::functionalization::impl::create_functional_tensor_with_view_meta(tmp_output, {view_tensor_name}, view_meta);
// See Note [Propagating strides in the functionalization pass]
@@ -686,7 +686,7 @@ def emit_inplace_functionalization_body(
[
f"""
at::functionalization::impl::replace_(
- {a.name}, {'std::get<' + str(i) + '>(tmp_output)' if len(f.func.returns) > 1 else 'tmp_output'});
+ {a.name}, {"std::get<" + str(i) + ">(tmp_output)" if len(f.func.returns) > 1 else "tmp_output"});
at::functionalization::impl::commit_update({a.name});"""
for (i, a) in enumerate(f.func.arguments.out)
if a.annotation and a.annotation.is_write and a.type.is_tensor_like()
@@ -722,7 +722,7 @@ def emit_inplace_functionalization_body(
{meta_conversion_str}
at::AutoDispatchSkipFunctionalize func_guard;
c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
- at::_ops::{f.func.name.unambiguous_name()}::call({', '.join(a.name for a in meta_call_ctx)});
+ at::_ops::{f.func.name.unambiguous_name()}::call({", ".join(a.name for a in meta_call_ctx)});
}}
{unwrap_tensor_args_str}
if (!({check_all_mutated_args_are_functional})) {{
@@ -736,16 +736,16 @@ def emit_inplace_functionalization_body(
}} else {{
// case 2: arguments are not functional tensors, so we no-op and redispatch.
at::AutoDispatchSkipFunctionalize guard;
- {maybe_create_output(f, 'tmp_output')}at::_ops::{f.func.name.unambiguous_name()}::call({', '.join(inplace_exprs)});
- {return_from_mutable_noop_redispatch(f, 'tmp_output')}
+ {maybe_create_output(f, "tmp_output")}at::_ops::{f.func.name.unambiguous_name()}::call({", ".join(inplace_exprs)});
+ {return_from_mutable_noop_redispatch(f, "tmp_output")}
}}
}} else {{
{return_type} tmp_output;
{{
at::AutoDispatchSkipFunctionalize guard;
- tmp_output = at::_ops::{g.functional.func.name.unambiguous_name()}::call({', '.join(functional_exprs)});
+ tmp_output = at::_ops::{g.functional.func.name.unambiguous_name()}::call({", ".join(functional_exprs)});
}}
- {wrap_propagate_mutations_and_return(f, g.functional, 'tmp_output')}
+ {wrap_propagate_mutations_and_return(f, g.functional, "tmp_output")}
}}
}}"""
diff --git a/torchgen/gen_vmap_plumbing.py b/torchgen/gen_vmap_plumbing.py
index 0f1f14d4574..0632e7c4b96 100644
--- a/torchgen/gen_vmap_plumbing.py
+++ b/torchgen/gen_vmap_plumbing.py
@@ -97,7 +97,7 @@ def gen_case_where_all_bdims_are_none(
e.expr for e in translate(outer_sig.arguments(), sig.arguments())
)
return f"""\
-if ({' && '.join(conditions)}) {{
+if ({" && ".join(conditions)}) {{
return at::_ops::{sig.func.name.unambiguous_name()}::call({translated_args});
}}"""
@@ -124,7 +124,7 @@ def gen_returns(
if len(wrapped_returns) == 1:
result = f"return {wrapped_returns[0]};"
else:
- result = f'return std::make_tuple({", ".join(wrapped_returns)});'
+ result = f"return std::make_tuple({', '.join(wrapped_returns)});"
return result
@@ -168,14 +168,14 @@ def gen_vmap_inplace_plumbing(native_function: NativeFunction) -> str | None:
return f"""\
template
-{sig.decl(name=schema.name.unambiguous_name() + '_generated_plumbing')} {{
+{sig.decl(name=schema.name.unambiguous_name() + "_generated_plumbing")} {{
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
auto maybe_layer = maybeCurrentDynamicLayer();
vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing");
int64_t {cur_level_var} = maybe_layer->layerId();
{textwrap.indent(bdims_all_none_case, " ")}
{textwrap.indent(unwraps, " ")}
- batch_rule({', '.join(unwrapped_arg_list)});
+ batch_rule({", ".join(unwrapped_arg_list)});
return {schema.arguments.flat_all[0].name};
}}"""
@@ -190,14 +190,14 @@ def gen_vmap_plumbing_no_returns(native_function: NativeFunction) -> str:
return f"""\
template
-{sig.decl(name=schema.name.unambiguous_name() + '_generated_plumbing')} {{
+{sig.decl(name=schema.name.unambiguous_name() + "_generated_plumbing")} {{
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
auto maybe_layer = maybeCurrentDynamicLayer();
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing_no_returns");
int64_t {cur_level_var} = maybe_layer->layerId();
{textwrap.indent(bdims_all_none_case, " ")}
{textwrap.indent(unwraps, " ")}
- batch_rule({', '.join(unwrapped_arg_list)});
+ batch_rule({", ".join(unwrapped_arg_list)});
}}"""
@@ -240,14 +240,14 @@ def gen_vmap_plumbing(native_function: NativeFunction) -> str | None:
wrapped_returns = gen_returns(returns, cur_level_var, results_var)
return f"""\
template
-{sig.decl(name=schema.name.unambiguous_name() + '_generated_plumbing')} {{
+{sig.decl(name=schema.name.unambiguous_name() + "_generated_plumbing")} {{
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
auto maybe_layer = maybeCurrentDynamicLayer();
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
int64_t {cur_level_var} = maybe_layer->layerId();
{textwrap.indent(bdims_all_none_case, " ")}
{textwrap.indent(unwraps, " ")}
- auto {results_var} = batch_rule({', '.join(unwrapped_arg_list)});
+ auto {results_var} = batch_rule({", ".join(unwrapped_arg_list)});
{wrapped_returns}
}}"""
diff --git a/torchgen/model.py b/torchgen/model.py
index 0c35e3b98a6..7548a8853de 100644
--- a/torchgen/model.py
+++ b/torchgen/model.py
@@ -1822,7 +1822,7 @@ class Annotation:
alias_set = f"{alias_set}!"
alias_set_after = "|".join(self.alias_set_after)
if alias_set_after:
- alias_set = f'{alias_set}{" -> "}{alias_set_after}'
+ alias_set = f"{alias_set} -> {alias_set_after}"
return alias_set
diff --git a/torchgen/static_runtime/generator.py b/torchgen/static_runtime/generator.py
index 1ed70ec5200..bc1772422a8 100644
--- a/torchgen/static_runtime/generator.py
+++ b/torchgen/static_runtime/generator.py
@@ -534,7 +534,7 @@ def generate_non_out_variant_call(
kernel_name = get_kernel_name(g, backend_index)
arg_names = (arg.name for arg in schema.schema_order_arguments())
namespace_name = "cpu" if g.structured else "native"
- return f'at::{namespace_name}::{kernel_name}({",".join(arg_names)})'
+ return f"at::{namespace_name}::{kernel_name}({','.join(arg_names)})"
def generate_call_to_view_ops(
@@ -547,7 +547,7 @@ def generate_call_to_view_ops(
kernel_name = kernel.kernel
arg_names = (arg.name for arg in schema.schema_order_arguments())
namespace_name = "native"
- return f'at::{namespace_name}::{kernel_name}({",".join(arg_names)})'
+ return f"at::{namespace_name}::{kernel_name}({','.join(arg_names)})"
def generate_out_variant_call(