Revert "[BE]: Update ruff to 0.285 (#107519)"

This reverts commit 88ab3e4322.

Reverted https://github.com/pytorch/pytorch/pull/107519 on behalf of https://github.com/ZainRizvi due to Sorry, but this PR breaks internal tests. @ezyang, can you please hep them get unblocked? It seems like one of the strings was prob accidentally modified ([comment](https://github.com/pytorch/pytorch/pull/107519#issuecomment-1688833480))
This commit is contained in:
PyTorch MergeBot 2023-08-22 19:53:32 +00:00
parent 1e9b590df9
commit d59a6864fb
86 changed files with 403 additions and 319 deletions

View File

@ -2666,6 +2666,6 @@ init_command = [
'python3',
'tools/linter/adapters/pip_init.py',
'--dry-run={{DRYRUN}}',
'ruff==0.0.285',
'ruff==0.0.280',
]
is_formatter = true

View File

@ -168,7 +168,7 @@ class ResNet(nn.Module):
if len(replace_stride_with_dilation) != 3:
raise ValueError(
"replace_stride_with_dilation should be None "
f"or a 3-element tuple, got {replace_stride_with_dilation}"
"or a 3-element tuple, got {}".format(replace_stride_with_dilation)
)
self.groups = groups
self.base_width = width_per_group

View File

@ -200,10 +200,10 @@ class BenchmarkRunner:
def _print_header(self):
DASH_LINE = "-" * 40
print(
f"# {DASH_LINE}\n"
"# {}\n"
"# PyTorch/Caffe2 Operator Micro-benchmarks\n"
f"# {DASH_LINE}\n"
f"# Tag : {self.args.tag_filter}\n"
"# {}\n"
"# Tag : {}\n".format(DASH_LINE, DASH_LINE, self.args.tag_filter)
)
if self.args.list_tests:
print("# List of tests:")

View File

@ -57,8 +57,10 @@ def main():
bench_min, bench_std = bench(tensor_1, tensor_2)
print(
f"Type {t.__name__} had a minimum time of {10**6 * bench_min} us"
f" and a standard deviation of {(10**6) * bench_std} us."
"Type {} had a minimum time of {} us"
" and a standard deviation of {} us.".format(
t.__name__, (10**6 * bench_min), (10**6) * bench_std
)
)

View File

@ -74,7 +74,6 @@ select = [
"PIE807",
"PIE810",
"PLE",
"RUF017",
"TRY302",
]

View File

@ -173,7 +173,7 @@ def write_test_to_test_class(
assert not ('cpp_options_args' in test_params_dict and 'cpp_function_call' in test_params_dict), (
"Only one of `cpp_options_args` and `cpp_function_call` entries "
f"should be present in test params dict:\n{pprint.pformat(test_params_dict)}")
"should be present in test params dict:\n{}").format(pprint.pformat(test_params_dict))
functional_name = compute_functional_name(test_params_dict)

View File

@ -209,11 +209,11 @@ def process_test_params_for_module(test_params_dict, device, test_instance_class
if 'constructor_args' in test_params_dict:
assert 'cpp_constructor_args' in test_params_dict, (
"If `constructor_args` is present in test params dict, to enable C++ API parity test, "
f"`cpp_constructor_args` must be present in:\n{pprint.pformat(test_params_dict)}"
"`cpp_constructor_args` must be present in:\n{}"
"If you are interested in adding the C++ API parity test, please see:\n"
"NOTE [How to check NN module / functional API parity between Python and C++ frontends]. \n"
"If not, please add `test_cpp_api_parity=False` to the test params dict and file an issue about this."
)
).format(pprint.pformat(test_params_dict))
return TorchNNModuleTestParams(
module_name=module_name,
@ -233,16 +233,16 @@ def write_test_to_test_class(
module_name = compute_module_name(test_params_dict)
assert hasattr(torch.nn, module_name), (
f"`torch.nn` doesn't have module `{module_name}`. "
"`torch.nn` doesn't have module `{}`. "
"If you are adding a new test, please set `fullname` using format `ModuleName_desc` "
f"or set `module_name` using format `ModuleName` in the module test dict:\n{pprint.pformat(test_params_dict)}"
)
"or set `module_name` using format `ModuleName` in the module test dict:\n{}"
).format(module_name, pprint.pformat(test_params_dict))
module_full_name = 'torch::nn::' + module_name
assert module_full_name in parity_table['torch::nn'], (
f"Please add `{module_full_name}` entry to `torch::nn` section of `test/cpp_api_parity/parity-tracker.md`. "
f"(Discovered while processing\n{pprint.pformat(test_params_dict)}.)")
"Please add `{}` entry to `torch::nn` section of `test/cpp_api_parity/parity-tracker.md`. "
"(Discovered while processing\n{}.)").format(module_full_name, pprint.pformat(test_params_dict))
for device in devices:
test_params = process_test_params_for_module(

View File

@ -329,7 +329,7 @@ class TestMultiheadAttentionNN(NNTestCase):
key = torch.rand(batch_size, src_len, embed_dim) # [N, S, D]
value = key # [N, S, D]
attn_mask = torch.randint(0, 2, (batch_size, tgt_len, src_len)).float() # [N, T, S]
attn_mask = attn_mask.masked_fill(attn_mask == 0, float('-inf')).masked_fill(attn_mask == 1, 0.0)
attn_mask = attn_mask.masked_fill(attn_mask == 0, float('-inf')).masked_fill(attn_mask == 1, float(0.0))
mta_model = torch.nn.MultiheadAttention(embed_dim, num_heads)

View File

@ -32,7 +32,8 @@ class SqueezeNet(nn.Module):
super().__init__()
if version not in [1.0, 1.1]:
raise ValueError(
f"Unsupported SqueezeNet version {version}:" "1.0 or 1.1 expected"
"Unsupported SqueezeNet version {version}:"
"1.0 or 1.1 expected".format(version=version)
)
self.num_classes = num_classes
if version == 1.0:

View File

@ -82,8 +82,10 @@ class TestQuantizationDocs(QuantizationTestCase):
# want to make sure we are actually getting some code,
assert last_line_num - line_num_start > 3 or short_snippet, (
f"The code in {path_to_file} identified by {unique_identifier} seems suspiciously short:"
f"\n\n###code-start####\n{code}###code-end####"
"The code in {} identified by {} seems suspiciously short:"
"\n\n###code-start####\n{}###code-end####".format(
path_to_file, unique_identifier, code
)
)
return code

View File

@ -806,11 +806,11 @@ class TestQuantizedOps(TestCase):
C_relu, C_relu_hat.q_scale(), C_relu_hat.q_zero_point(), dtype)
self.assertEqual(C_ref.dequantize(), C_hat.dequantize(),
msg=f"{binary_op_name}_scalar results don't match: "
f"{C_ref.dequantize()} vs {C_hat.dequantize()}")
msg="{}_scalar results don't match: "
"{} vs {}".format(binary_op_name, C_ref.dequantize(), C_hat.dequantize()))
self.assertEqual(C_relu_ref.dequantize(), C_relu_hat.dequantize(),
msg=f"{binary_op_name}_scalar_relu results don't match: "
f"{C_relu_ref.dequantize()} vs {C_relu_hat.dequantize()}")
msg="{}_scalar_relu results don't match: "
"{} vs {}".format(binary_op_name, C_relu_ref.dequantize(), C_relu_hat.dequantize()))
@unittest.skipIf(IS_MACOS, "skipping macos test")
@given(A=hu.tensor(shapes=hu.array_shapes(1, 4, 1, 5),

View File

@ -67,7 +67,7 @@ class TestAutocastCPU(TestCase):
if (output is not None) and (output_method is not None):
self.assertTrue(type(output) == type(output_method))
comparison = compare(output, output_method)
self.assertTrue(comparison, f"torch.{op} result did not match Tensor.{op} result")
self.assertTrue(comparison, "torch.{0} result did not match Tensor.{0} result".format(op))
# Compare numerics to Python-side "autocasting" that (we expect) does the same thing
# as the C++-side autocasting, and should be bitwise accurate.

View File

@ -168,9 +168,9 @@ class TestBinaryUfuncs(TestCase):
if _numel(l) <= 100 and _numel(r) <= 100:
msg = (
"Failed to produce expected results! Input lhs tensor was"
f" {l}, rhs tensor was {r}, torch result is {actual}, and reference result is"
f" {expected}."
)
" {}, rhs tensor was {}, torch result is {}, and reference result is"
" {}."
).format(l, r, actual, expected)
else:
msg = None

View File

@ -150,14 +150,17 @@ class TestCppExtensionJIT(common.TestCase):
err = err.decode("ascii")
if not p.returncode == 0 or not err == '':
raise AssertionError(f"Flags: {flags}\nReturncode: {p.returncode}\nStderr: {err}\n"
f"Output: {output} ")
raise AssertionError("Flags: {}\nReturncode: {}\nStderr: {}\n"
"Output: {} ".format(flags, p.returncode,
err, output))
actual_arches = sorted(re.findall(r'sm_\d\d', output))
expected_arches = sorted(['sm_' + xx for xx in expected_values])
self.assertEqual(actual_arches, expected_arches,
msg=f"Flags: {flags}, Actual: {actual_arches}, Expected: {expected_arches}\n"
f"Stderr: {err}\nOutput: {output}")
msg="Flags: {}, Actual: {}, Expected: {}\n"
"Stderr: {}\nOutput: {}".format(
flags, actual_arches, expected_arches,
err, output))
temp_dir = tempfile.mkdtemp()
old_envvar = os.environ.get('TORCH_CUDA_ARCH_LIST', None)

View File

@ -168,7 +168,7 @@ class TestCuda(TestCase):
def test_set_per_process_memory_fraction(self):
# test invalid fraction value.
with self.assertRaisesRegex(TypeError, "Invalid type"):
torch.cuda.set_per_process_memory_fraction(1)
torch.cuda.set_per_process_memory_fraction(int(1))
with self.assertRaisesRegex(ValueError, "Invalid fraction value"):
torch.cuda.set_per_process_memory_fraction(-0.1)
with self.assertRaisesRegex(ValueError, "Invalid fraction value"):
@ -1765,7 +1765,7 @@ torch.cuda.synchronize()
if (output is not None) and (output_method is not None):
self.assertTrue(type(output) == type(output_method))
comparison = compare(output, output_method)
self.assertTrue(comparison, f"torch.{op} result did not match Tensor.{op} result")
self.assertTrue(comparison, "torch.{0} result did not match Tensor.{0} result".format(op))
# Compare numerics to Python-side "autocasting" that (we expect) does the same thing
# as the C++-side autocasting, and should be bitwise accurate.

View File

@ -152,10 +152,10 @@ class TestDispatch(TestCase):
# NB: this finally test asserts that if a registrations fails,
# the dispatcher is left in the same state *that it was before*!
check_invariants(
f"running ctors {ctor_order[:i]} and then failing to run ctor {op_ix} "
"running ctors {} and then failing to run ctor {} "
"(did this failure leave the dispatcher in a wedged state? "
"it shouldn't!)"
)
.format(ctor_order[:i], op_ix))
break
last_ctor = i
if expect_raises and len(active_ops) == len(ops):
@ -165,7 +165,7 @@ class TestDispatch(TestCase):
self.assertTrue(
False,
"expected exception to be raised, but nothing was raised "
f"(after running ctors {ctor_order})")
"(after running ctors {})".format(ctor_order))
# In the order specified by dtor_order, run deregistrations
for i, op_ix in enumerate(dtor_order):
# Trigger a destruction

View File

@ -5810,19 +5810,19 @@ a")
raise RuntimeError('Unknown dtype')
if binary:
code = f'''
code = '''
graph(%3 : Tensor, %4 : Tensor):
%2 : {dtype_str}(*, *) = aten::{op}(%3, %4)
%1 : {dtype_str}(*, *) = aten::relu(%2)
%2 : {dtype}(*, *) = aten::{op}(%3, %4)
%1 : {dtype}(*, *) = aten::relu(%2)
return (%1)
'''
'''.format(op=op, dtype=dtype_str)
else:
code = f'''
code = '''
graph(%3 : Tensor):
%2 : {dtype_str}(*, *) = aten::{op}(%3)
%1 : {dtype_str}(*, *) = aten::relu(%2)
%2 : {dtype}(*, *) = aten::{op}(%3)
%1 : {dtype}(*, *) = aten::relu(%2)
return (%1)
'''
'''.format(op=op, dtype=dtype_str)
graph = parse_ir(code)
inputs = (2 if binary else 1) * [torch.rand(26, 2048, dtype=dtype)]
@ -14936,7 +14936,7 @@ dedent """
value = torch.rand((src_l, bsz, embed_size))
mask = (torch.triu(torch.ones(src_l, src_l)) == 1).transpose(0, 1)
mask = mask.float().masked_fill(mask == 0, float('-inf')).masked_fill(mask == 1, 0.0).to(torch.get_default_dtype())
mask = mask.float().masked_fill(mask == 0, float('-inf')).masked_fill(mask == 1, float(0.0)).to(torch.get_default_dtype())
jit_out = jit_multihead_attn_forward(query, key, value,
embed_size, nhead,

View File

@ -582,7 +582,7 @@ class TestOptimizer(TestCase):
self.assertTrue(
cloned.qualified_name.startswith('__torch__.'),
("Expected the cloned module's name to start with the string "
f"'__torch__.', but got: {cloned.qualified_name}"),
"'__torch__.', but got: {}").format(cloned.qualified_name),
)

View File

@ -9436,8 +9436,8 @@ class TestConvolutionMPS(TestCaseMPS):
output = F.grid_sample(input, grid, mode=mode, padding_mode=padding_mode,
align_corners=align_corners)
self.assertEqual(output, groundtruth, atol=1e-5, rtol=0,
msg=f"groundtruth comparison failed for mode={mode}, "
f"padding_mode={padding_mode}")
msg="groundtruth comparison failed for mode={}, "
"padding_mode={}".format(mode, padding_mode))
class TestAdvancedIndexing(TestCaseMPS):
supported_dtypes = [torch.float32, torch.float16, torch.int64, torch.int32, torch.int16, torch.uint8]

View File

@ -5959,8 +5959,8 @@ tensor(..., device='meta', size=(1,), requires_grad=True)""")
output = F.grid_sample(input, grid, mode=mode, padding_mode=padding_mode,
align_corners=align_corners)
self.assertEqual(output, groundtruth, atol=1e-5, rtol=0,
msg=f"groundtruth comparison failed for mode={mode}, "
f"padding_mode={padding_mode}")
msg="groundtruth comparison failed for mode={}, "
"padding_mode={}".format(mode, padding_mode))
# See NOTE [ grid_sample CPU fallback ]
output = torch._grid_sampler_2d_cpu_fallback(
@ -6047,8 +6047,8 @@ tensor(..., device='meta', size=(1,), requires_grad=True)""")
F.grid_sample(input, grid, mode=mode, padding_mode=padding_mode,
align_corners=align_corners).sum().backward()
self.assertEqual(grid.grad, groundtruth, atol=1e-5, rtol=0,
msg=f"gradient groundtruth comparison failed for mode={mode}, "
f"padding_mode={padding_mode}, input_requires_grad={input_requires_grad}")
msg="gradient groundtruth comparison failed for mode={}, "
"padding_mode={}, input_requires_grad={}".format(mode, padding_mode, input_requires_grad))
grid.grad.zero_()
# See NOTE [ grid_sample CPU fallback ]

View File

@ -1098,8 +1098,10 @@ class TestCommon(TestCase):
RuntimeError,
msg=(
"inplace variant either incorrectly allowed "
f"resizing or you have marked the sample {sample.summary()}"
" incorrectly with `broadcasts_self=True"
"resizing or you have marked the sample {}"
" incorrectly with `broadcasts_self=True".format(
sample.summary()
)
),
):
variant_forward = variant(

View File

@ -3503,8 +3503,8 @@ as the input tensor excluding its innermost dimension'):
expected = np.asarray(expected) # transform numpy scalars to numpy.ndarray instances
msg = ("Failed to produce expected results! Input tensor was"
f" {t}, torch result is {actual}, and reference result is"
f" {expected}.") if t.numel() < 10 else None
" {}, torch result is {}, and reference result is"
" {}.").format(t, actual, expected) if t.numel() < 10 else None
self.assertEqual(actual, expected, msg, exact_dtype=exact_dtype)

View File

@ -451,8 +451,9 @@ class TestTensorCreation(TestCase):
other_dtype = torch.float64 if dtype == torch.float32 else torch.float32
a = torch.tensor([1, 2], device=device, dtype=dtype)
b = torch.tensor([3, 4], device=device, dtype=other_dtype)
error = f"Expected object of scalar type {dtype_name(dtype)} but got scalar type " \
f"{dtype_name(other_dtype)} for second argument"
error = "Expected object of scalar type {} but got scalar type " \
"{} for second argument".format(dtype_name(dtype),
dtype_name(other_dtype))
with self.assertRaisesRegex(RuntimeError, error):
op(a, b)
@ -471,8 +472,9 @@ class TestTensorCreation(TestCase):
b = torch.tensor([3, 4], device=device, dtype=dtype)
out = torch.zeros(2, device=device, dtype=dtype)
expected_dtype = torch.complex64 if dtype == torch.float32 else torch.complex128
error = f"Expected object of scalar type {complex_dtype_name(expected_dtype)} but got scalar type " \
f"{dtype_name(dtype)} for argument 'out'"
error = "Expected object of scalar type {} but got scalar type " \
"{} for argument 'out'".format(
complex_dtype_name(expected_dtype), dtype_name(dtype))
with self.assertRaisesRegex(RuntimeError, error):
op(a, b, out=out)

View File

@ -8682,8 +8682,8 @@ tensor([[[1.+1.j, 1.+1.j, 1.+1.j, ..., 1.+1.j, 1.+1.j, 1.+1.j],
for common_args in [multi_dim_common, single_dim_common, factory_common_args, factory_like_common_args]:
for k, v in common_args.items():
self.assertNotIn(v, desc, f'The argument description "{v}" in {func} can be '
f'replaced by {{{k}}}')
self.assertNotIn(v, desc, 'The argument description "{}" in {} can be '
'replaced by {{{}}}'.format(v, func, k))
def test_doc(self):
checked_types = (types.MethodType, types.FunctionType,
@ -8719,8 +8719,8 @@ tensor([[[1.+1.j, 1.+1.j, 1.+1.j, ..., 1.+1.j, 1.+1.j, 1.+1.j],
full_name = ns_name + '.' + name
if any(r.match(name) for r in skip_regexes):
self.assertFalse(has_doc,
f'New docs have been added for {full_name}, please remove '
'it from the skipped list in TestTorch.test_doc')
'New docs have been added for {}, please remove '
'it from the skipped list in TestTorch.test_doc'.format(full_name))
else:
self.assertTrue(has_doc, f'{full_name} is missing documentation')

View File

@ -146,7 +146,7 @@ class TestTransformers(NNTestCase):
query = torch.rand(batch_size, tgt_len, embed_dim, device=device) # [N, T, D]
attn_mask = torch.randint(0, 2, (tgt_len, tgt_len)).cuda().float() # [T, T]
attn_mask = attn_mask.masked_fill(attn_mask == 0, float('-inf')).masked_fill(attn_mask == 1, 0.0)
attn_mask = attn_mask.masked_fill(attn_mask == 0, float('-inf')).masked_fill(attn_mask == 1, float(0.0))
attn_mask_4d = attn_mask.expand(batch_size, num_heads, tgt_len, tgt_len)

View File

@ -101,9 +101,9 @@ class TestUnaryUfuncs(TestCase):
result.item(),
float("nan"),
msg=(
f"input of {lower_tensor.item()} outside lower domain boundary"
f" {low} produced {result.item()}, not nan!"
),
"input of {} outside lower domain boundary"
" {} produced {}, not nan!"
).format(lower_tensor.item(), low, result.item()),
)
if high is not None:
@ -120,9 +120,9 @@ class TestUnaryUfuncs(TestCase):
result.item(),
float("nan"),
msg=(
f"input of {higher_tensor.item()} outside upper domain boundary"
f" {high} produced {result.item()}, not nan!"
),
"input of {} outside upper domain boundary"
" {} produced {}, not nan!"
).format(higher_tensor.item(), high, result.item()),
)
# Helper for comparing torch tensors and numpy arrays
@ -245,9 +245,9 @@ class TestUnaryUfuncs(TestCase):
if t.numel() < 10:
msg = (
"Failed to produce expected results! Input tensor was"
f" {t}, torch result is {actual}, and reference result is"
f" {expected}."
)
" {}, torch result is {}, and reference result is"
" {}."
).format(t, actual, expected)
else:
msg = None

View File

@ -6431,7 +6431,7 @@ class TestWhere:
e = float("-Infinity")
assert_equal(np.where(True, d, e).dtype, np.float32)
# also check upcast
e = 1e150
e = float(1e150)
assert_equal(np.where(True, d, e).dtype, np.float64)
def test_ndim(self):

View File

@ -264,7 +264,9 @@ class TestPower:
a = t1(3)
b = t2(2)
result = a**b
msg = f"error with {t1!r} and {t2!r}:" f"got {result!r}, expected {9!r}"
msg = ("error with {!r} and {!r}:" "got {!r}, expected {!r}").format(
t1, t2, result, 9
)
if np.issubdtype(np.dtype(result), np.integer):
assert_(result == 9, msg)
else:

View File

@ -485,8 +485,8 @@ class TestHistogramOptimBinNums:
assert_equal(
len(a),
numbins,
err_msg=f"For the {estimator} estimator "
f"with datasize of {testlen}",
err_msg="For the {} estimator "
"with datasize of {}".format(estimator, testlen),
)
def test_small(self):
@ -532,8 +532,8 @@ class TestHistogramOptimBinNums:
assert_equal(
len(a),
expbins,
err_msg=f"For the {estimator} estimator "
f"with datasize of {testlen}",
err_msg="For the {} estimator "
"with datasize of {}".format(estimator, testlen),
)
def test_incorrect_methods(self):
@ -566,7 +566,7 @@ class TestHistogramOptimBinNums:
assert_equal(
len(a),
numbins,
err_msg=f"{estimator} estimator, " "No Variance test",
err_msg="{} estimator, " "No Variance test".format(estimator),
)
def test_limited_variance(self):

View File

@ -462,7 +462,7 @@ def gen_nn_functional(fm: FileManager) -> None:
"pdist",
"cosine_similarity",
]
imported_hints = [f"from .. import {_} as {_}" for _ in torch_imports]
imported_hints = ["from .. import {0} as {0}".format(_) for _ in torch_imports]
# Functions imported into `torch.nn.functional` from `torch._C._nn`
c_nn_imports = [
@ -479,7 +479,9 @@ def gen_nn_functional(fm: FileManager) -> None:
"one_hot",
"scaled_dot_product_attention",
]
imported_hints += [f"from .._C._nn import {_} as {_}" for _ in c_nn_imports]
imported_hints += [
"from .._C._nn import {0} as {0}".format(_) for _ in c_nn_imports
]
# This is from `torch._C._nn` but renamed
imported_hints.append("from .._C._nn import log_sigmoid\nlogsigmoid = log_sigmoid")
@ -873,13 +875,15 @@ def gen_pyi(
)
for binop in ["mul", "true_divide", "floor_divide"]:
unsorted_function_hints[binop].append(
f"def {binop}(input: Union[Tensor, Number], other: Union[Tensor, Number], "
"*, out: Optional[Tensor] = None) -> Tensor: ..."
"def {}(input: Union[Tensor, Number], other: Union[Tensor, Number], "
"*, out: Optional[Tensor] = None) -> Tensor: ...".format(binop)
)
for binop in ["add", "sub"]:
unsorted_function_hints[binop].append(
f"def {binop}(input: Union[Tensor, Number], other: Union[Tensor, Number], "
"*, alpha: Optional[Number] = 1, out: Optional[Tensor] = None) -> Tensor: ..."
"def {}(input: Union[Tensor, Number], other: Union[Tensor, Number], "
"*, alpha: Optional[Number] = 1, out: Optional[Tensor] = None) -> Tensor: ...".format(
binop
)
)
native_functions = parse_native_yaml(
@ -1082,8 +1086,8 @@ def gen_pyi(
binop += "_"
out_suffix = ""
unsorted_tensor_method_hints[binop].append(
f"def {binop}(self, other: Union[Tensor, Number, torch.SymInt, torch.SymFloat]{out_suffix})"
" -> Tensor: ..."
"def {}(self, other: Union[Tensor, Number, torch.SymInt, torch.SymFloat]{})"
" -> Tensor: ...".format(binop, out_suffix)
)
for binop in ["add", "sub"]:
for inplace in [False, True]:
@ -1092,9 +1096,9 @@ def gen_pyi(
binop += "_"
out_suffix = ""
unsorted_tensor_method_hints[binop].append(
f"def {binop}(self, other: Union[Tensor, Number, torch.SymInt, torch.SymFloat], "
f"*, alpha: Optional[Number] = 1{out_suffix})"
" -> Tensor: ..."
"def {}(self, other: Union[Tensor, Number, torch.SymInt, torch.SymFloat], "
"*, alpha: Optional[Number] = 1{})"
" -> Tensor: ...".format(binop, out_suffix)
)
simple_conversions = [
"byte",

View File

@ -1733,8 +1733,8 @@ def _register_device_module(device_type, module):
device_type = torch.device(device_type).type
m = sys.modules[__name__]
if hasattr(m, device_type):
raise RuntimeError(f"The runtime module of '{device_type}' has already "
f"been registered with '{getattr(m, device_type)}'")
raise RuntimeError("The runtime module of '{}' has already "
"been registered with '{}'".format(device_type, getattr(m, device_type)))
setattr(m, device_type, module)
torch_module_name = '.'.join([__name__, device_type])
sys.modules[torch_module_name] = module

View File

@ -614,8 +614,8 @@ def _lobpcg(
if m < 3 * n:
raise ValueError(
f"LPBPCG algorithm is not applicable when the number of A rows (={m})"
f" is smaller than 3 x the number of requested eigenpairs (={n})"
"LPBPCG algorithm is not applicable when the number of A rows (={})"
" is smaller than 3 x the number of requested eigenpairs (={})".format(m, n)
)
method = "ortho" if method is None else method
@ -1151,7 +1151,9 @@ class LOBPCG:
assert B is not None
raise ValueError(
"Overdetermined shape of U:"
f" #B-cols(={B.shape[-1]}) >= #U-cols(={U.shape[-1]}) + #V-cols(={V.shape[-1]}) must hold"
" #B-cols(={}) >= #U-cols(={}) + #V-cols(={}) must hold".format(
B.shape[-1], U.shape[-1], V.shape[-1]
)
)
self.ivars["ortho_i"] = i
self.ivars["ortho_j"] = j

View File

@ -50,8 +50,8 @@ def single_ellipsis_index(names, fn_name):
ellipsis_indices = [i for i, name in enumerate(names) if is_ellipsis(name)]
if len(ellipsis_indices) >= 2:
raise RuntimeError(
f"{fn_name}: More than one Ellipsis ('...') found in names ("
f"{names}). This function supports up to one Ellipsis."
"{}: More than one Ellipsis ('...') found in names ("
"{}). This function supports up to one Ellipsis.".format(fn_name, names)
)
if len(ellipsis_indices) == 1:
return ellipsis_indices[0]
@ -97,8 +97,15 @@ def update_names_with_mapping(tensor, rename_map, inplace):
dim_map[old_dim] = new_dim
else:
raise RuntimeError(
f"{namer_api_name(inplace)}: Tried to rename dim '{old_dim}' to dim "
f"{new_dim} in Tensor[{tensor.names}] but dim '{old_dim}' does not exist"
(
"{api_name}: Tried to rename dim '{old_dim}' to dim "
"{new_dim} in Tensor[{dims}] but dim '{old_dim}' does not exist"
).format(
old_dim=old_dim,
new_dim=new_dim,
dims=tensor.names,
api_name=namer_api_name(inplace),
)
)
return tensor._update_names(tuple(dim_map.values()), inplace)
@ -142,10 +149,10 @@ def update_names(tensor, names, rename_map, inplace):
has_rename_pairs = bool(rename_map)
if has_names and has_rename_pairs:
raise RuntimeError(
f"{namer_api_name(inplace)}: This function takes either positional "
f"args or keyword args, but not both. Use tensor.{namer_api_name(inplace)}(*names) "
f"to name dims and tensor.{namer_api_name(inplace)}(**rename_map) to rename "
"dims."
"{api_name}: This function takes either positional "
"args or keyword args, but not both. Use tensor.{api_name}(*names) "
"to name dims and tensor.{api_name}(**rename_map) to rename "
"dims.".format(api_name=namer_api_name(inplace))
)
# Special case for tensor.rename(*[]), which is valid for a 0 dim tensor.

View File

@ -1410,8 +1410,8 @@ def assert_array_max_ulp(a, b, maxulp=1, dtype=None):
ret = nulp_diff(a, b, dtype)
if not np.all(ret <= maxulp):
raise AssertionError(
f"Arrays are not almost equal up to {maxulp:g} "
f"ULP (max difference is {np.max(ret):g} ULP)"
"Arrays are not almost equal up to {:g} "
"ULP (max difference is {:g} ULP)".format(maxulp, np.max(ret))
)
return ret

View File

@ -646,8 +646,10 @@ class OpOverloadPacket:
# an object name different from the one the attribute
# query was performed on.
raise AttributeError(
f"'{str(self)}' can't have an overload name beginning with '__' and the "
f"underlying op {str(self._op)} has no attribute {key} either."
"'{}' can't have an overload name beginning with '__' and the "
"underlying op {} has no attribute {} either.".format(
str(self), str(self._op), key
)
) from None
try:

View File

@ -1483,8 +1483,10 @@ def _slice_meta(
raise ValueError(msg)
if x > y:
msg = (
f"Attempting to slice a tensor but a start index in {start_indices} is greater than"
f" the length of its corresponding dimension in shape {a.shape}"
"Attempting to slice a tensor but a start index in {} is greater than"
" the length of its corresponding dimension in shape {}".format(
start_indices, a.shape
)
)
raise ValueError(msg)
@ -1494,14 +1496,16 @@ def _slice_meta(
raise ValueError(msg)
if x > y:
msg = (
f"Attempting to slice a tensor but a stop index in {limit_indices} is greater than the length of "
f" its corresponding dimension in shape {a.shape}"
"Attempting to slice a tensor but a stop index in {} is greater than the length of "
" its corresponding dimension in shape {}".format(
limit_indices, a.shape
)
)
raise ValueError(msg)
if x < z:
msg = (
f"Attempting to slice a tensor but a start index in {x} is greater than "
f" its corresponding stop index {z}"
"Attempting to slice a tensor but a start index in {} is greater than "
" its corresponding stop index {}".format(x, z)
)
for x in _strides:

View File

@ -605,7 +605,9 @@ def margin_ranking_loss(
if input1.ndim != input2.ndim or input1.ndim != target.ndim:
raise RuntimeError(
"margin_ranking_loss : All input tensors should have same dimension but got sizes: "
f"input1: {input1.shape}, input2: {input2.shape}, target: {target.shape} "
"input1: {}, input2: {}, target: {} ".format(
input1.shape, input2.shape, target.shape
)
)
_check_reduction_value(reduction)
loss = torch.clamp_min(-target * (input1 - input2) + margin, 0)

View File

@ -318,8 +318,8 @@ class LSTM(torch.nn.Module):
if num_layers == 1:
warnings.warn("dropout option adds dropout after all but last "
"recurrent layer, so non-zero dropout expects "
f"num_layers greater than 1, but got dropout={dropout} "
f"and num_layers={num_layers}")
"num_layers greater than 1, but got dropout={} "
"and num_layers={}".format(dropout, num_layers))
layers = [_LSTMLayer(self.input_size, self.hidden_size,
self.bias, batch_first=False,

View File

@ -105,7 +105,7 @@ class Linear(nnq.Linear):
weight_observer = default_dynamic_qconfig.weight()
dtype = weight_observer.dtype
assert dtype in [torch.qint8, torch.float16], "The only supported dtypes for " \
f"dynamic quantized linear are qint8 and float16 got: {dtype}"
"dynamic quantized linear are qint8 and float16 got: {}".format(dtype)
weight_observer(mod.weight)
if dtype == torch.qint8:
qweight = _quantize_weight(mod.weight.float(), weight_observer)

View File

@ -93,8 +93,8 @@ class RNNBase(torch.nn.Module):
if dropout > 0 and num_layers == 1: # type: ignore[operator]
warnings.warn("dropout option adds dropout after all but last "
"recurrent layer, so non-zero dropout expects "
f"num_layers greater than 1, but got dropout={dropout} and "
f"num_layers={num_layers}")
"num_layers greater than 1, but got dropout={} and "
"num_layers={}".format(dropout, num_layers))
if mode == 'LSTM':
gate_size = 4 * hidden_size

View File

@ -977,8 +977,8 @@ def convert(
assert k in convert_node_name_to_qconfig, f'Expected key {k} in convert node_name_to_qconfig'
if convert_node_name_to_qconfig[k] is not None:
assert qconfig_equals(v, convert_node_name_to_qconfig[k]), \
f"Expected k {k} to have the same value in prepare and convert QConfigMappings, " \
f"but {v} was updated to {convert_node_name_to_qconfig[k]}"
"Expected k {} to have the same value in prepare and convert QConfigMappings, " \
"but {} was updated to {}".format(k, v, convert_node_name_to_qconfig[k])
node_name_to_qconfig = convert_node_name_to_qconfig
custom_module_classes = get_custom_module_class_keys(convert_custom_config.observed_to_quantized_mapping)

View File

@ -189,7 +189,7 @@ def _create_obs_or_fq_from_qspec(
edge_or_node = quantization_spec.edge_or_node
assert edge_or_node in obs_or_fq_map, \
"please make sure only refer to edge or node that has " \
f"observer/fake_quant inserted: '{edge_or_node}' not in\n{obs_or_fq_map.keys()}"
"observer/fake_quant inserted: '{}' not in\n{}".format(edge_or_node, obs_or_fq_map.keys())
return obs_or_fq_map[edge_or_node]
elif isinstance(quantization_spec, DerivedQuantizationSpec):
# can't use asdict, so not calling get_observer_kwargs here

View File

@ -226,10 +226,10 @@ def _get_quantized_qat_conv2d_bn_pattern(
)
else:
scaled_weight = torch.ops.quantized_decomposed.quantize_per_tensor(
scaled_weight, 1.0, 0, weight_quant_min, weight_quant_max, torch.int8,
scaled_weight, 1.0, int(0), weight_quant_min, weight_quant_max, torch.int8,
)
scaled_weight = torch.ops.quantized_decomposed.dequantize_per_tensor(
scaled_weight, 1.0, 0, weight_quant_min, weight_quant_max, torch.int8,
scaled_weight, 1.0, int(0), weight_quant_min, weight_quant_max, torch.int8,
)
if has_bias:
zero_bias = torch.zeros_like(kwargs["conv_bias"], dtype=x.dtype)
@ -283,10 +283,10 @@ def _get_folded_quantized_qat_conv2d_bn_pattern(
)
else:
conv_weight = torch.ops.quantized_decomposed.quantize_per_tensor(
conv_weight, 1.0, 0, weight_quant_min, weight_quant_max, torch.int8,
conv_weight, 1.0, int(0), weight_quant_min, weight_quant_max, torch.int8,
)
conv_weight = torch.ops.quantized_decomposed.dequantize_per_tensor(
conv_weight, 1.0, 0, weight_quant_min, weight_quant_max, torch.int8,
conv_weight, 1.0, int(0), weight_quant_min, weight_quant_max, torch.int8,
)
if has_bias:
x = F.conv2d(x, conv_weight, kwargs["conv_bias"])

View File

@ -201,7 +201,7 @@ def get_swapped_custom_module_class(custom_module, custom_module_class_mapping,
quant_type = get_quant_type(qconfig)
class_mapping = custom_module_class_mapping.get(quant_type, {})
assert type(custom_module) in class_mapping, "did not find corresponding observed " \
f"module class for {type(custom_module)} in mapping: {class_mapping}"
"module class for {} in mapping: {}".format(type(custom_module), class_mapping)
return class_mapping[type(custom_module)]
def activation_dtype(qconfig):
@ -298,8 +298,8 @@ def get_quant_type(qconfig):
elif activation.dtype == torch.float16:
return QuantType.STATIC
raise Exception(f"Unrecognized dtype combination in get_quant_type: activation({activation.dtype}),"
f"weight({weight.dtype})")
raise Exception("Unrecognized dtype combination in get_quant_type: activation({}),"
"weight({})".format(activation.dtype, weight.dtype))
def check_min_max_valid(min_val: torch.Tensor, max_val: torch.Tensor) -> bool:
""" Checks if the given minimum and maximum values are valid, meaning that

View File

@ -33,13 +33,17 @@ def _as_tuple(inp, arg_name=None, fn_name=None):
if not isinstance(el, torch.Tensor):
if is_inp_tuple:
raise TypeError(
f"The {arg_name} given to {fn_name} must be either a Tensor or a tuple of Tensors but the"
f" value at index {i} has type {type(el)}."
"The {} given to {} must be either a Tensor or a tuple of Tensors but the"
" value at index {} has type {}.".format(
arg_name, fn_name, i, type(el)
)
)
else:
raise TypeError(
f"The {arg_name} given to {fn_name} must be either a Tensor or a tuple of Tensors but the"
f" given {arg_name} has type {type(el)}."
"The {} given to {} must be either a Tensor or a tuple of Tensors but the"
" given {} has type {}.".format(
arg_name, fn_name, arg_name, type(el)
)
)
return is_inp_tuple, inp
@ -130,35 +134,37 @@ def _check_requires_grad(inputs, input_type, strict):
if inp is None:
# This can only be reached for grad_inputs.
raise RuntimeError(
f"The output of the user-provided function is independent of input {i}."
" This is not allowed in strict mode."
"The output of the user-provided function is independent of input {}."
" This is not allowed in strict mode.".format(i)
)
if not inp.requires_grad:
if input_type == "hessian":
raise RuntimeError(
f"The hessian of the user-provided function with respect to input {i}"
"The hessian of the user-provided function with respect to input {}"
" is independent of the input. This is not allowed in strict mode."
" You should ensure that your function is thrice differentiable and that"
" the hessian depends on the inputs."
" the hessian depends on the inputs.".format(i)
)
elif input_type == "jacobian":
raise RuntimeError(
"While computing the hessian, found that the jacobian of the user-provided"
f" function with respect to input {i} is independent of the input. This is not"
" function with respect to input {} is independent of the input. This is not"
" allowed in strict mode. You should ensure that your function is twice"
" differentiable and that the jacobian depends on the inputs (this would be"
" violated by a linear function for example)."
" violated by a linear function for example).".format(i)
)
elif input_type == "grad_inputs":
raise RuntimeError(
f"The gradient with respect to input {i} is independent of the inputs of the"
" user-provided function. This is not allowed in strict mode."
"The gradient with respect to input {} is independent of the inputs of the"
" user-provided function. This is not allowed in strict mode.".format(
i
)
)
else:
raise RuntimeError(
f"Output {i} of the user-provided function does not require gradients."
"Output {} of the user-provided function does not require gradients."
" The outputs must be computed in a differentiable manner from the input"
" when running in strict mode."
" when running in strict mode.".format(i)
)
@ -215,25 +221,27 @@ def _fill_in_zeros(grads, refs, strict, create_graph, stage):
if stage == "back":
raise RuntimeError(
"The output of the user-provided function is independent of "
f"input {i}. This is not allowed in strict mode."
"input {}. This is not allowed in strict mode.".format(i)
)
elif stage == "back_trick":
raise RuntimeError(
f"The gradient with respect to the input is independent of entry {i}"
"The gradient with respect to the input is independent of entry {}"
" in the grad_outputs when using the double backward trick to compute"
" forward mode gradients. This is not allowed in strict mode."
" forward mode gradients. This is not allowed in strict mode.".format(
i
)
)
elif stage == "double_back":
raise RuntimeError(
"The jacobian of the user-provided function is independent of "
f"input {i}. This is not allowed in strict mode."
"input {}. This is not allowed in strict mode.".format(i)
)
else:
raise RuntimeError(
"The hessian of the user-provided function is independent of "
f"entry {i} in the grad_jacobian. This is not allowed in strict "
"entry {} in the grad_jacobian. This is not allowed in strict "
"mode as it prevents from using the double backward trick to "
"replace forward mode AD."
"replace forward mode AD.".format(i)
)
grads_i = torch.zeros_like(refs[i])
@ -242,12 +250,16 @@ def _fill_in_zeros(grads, refs, strict, create_graph, stage):
if "double" not in stage:
raise RuntimeError(
"The jacobian of the user-provided function is independent of "
f"input {i}. This is not allowed in strict mode when create_graph=True."
"input {}. This is not allowed in strict mode when create_graph=True.".format(
i
)
)
else:
raise RuntimeError(
"The hessian of the user-provided function is independent of "
f"input {i}. This is not allowed in strict mode when create_graph=True."
"input {}. This is not allowed in strict mode when create_graph=True.".format(
i
)
)
res += (grads_i,)
@ -799,17 +811,17 @@ def jacobian(
if strict and create_graph and not vj_el.requires_grad:
msg = (
"The jacobian of the user-provided function is "
f"independent of input {i}. This is not allowed in "
"strict mode when create_graph=True."
"independent of input {}. This is not allowed in "
"strict mode when create_graph=True.".format(i)
)
raise RuntimeError(msg)
jac_i_el.append(vj_el)
else:
if strict:
msg = (
f"Output {i} of the user-provided function is "
f"independent of input {el_idx}. This is not allowed in "
"strict mode."
"Output {} of the user-provided function is "
"independent of input {}. This is not allowed in "
"strict mode.".format(i, el_idx)
)
raise RuntimeError(msg)
jac_i_el.append(torch.zeros_like(inp_el))

View File

@ -246,14 +246,20 @@ class EventList(list):
# 's' and 'f' draw Flow arrows from
# the CPU launch to the GPU kernel
f.write(
f'{{"name": "{evt.trace_name}", '
'{{"name": "{}", '
'"ph": "s", '
f'"ts": {evt.time_range.start}, '
f'"tid": {evt.thread}, '
'"ts": {}, '
'"tid": {}, '
'"pid": "CPU functions", '
f'"id": {next_id}, '
f'"cat": "cpu_to_{device_name}", '
'"args": {{}}}}, '
'"id": {}, '
'"cat": "cpu_to_{}", '
'"args": {{}}}}, '.format(
evt.trace_name,
evt.time_range.start,
evt.thread,
next_id,
device_name,
)
)
# Note: use torch.profiler to get device kernel trace
next_id += 1

View File

@ -94,8 +94,10 @@ class cuFFTPlanCacheManager:
index = torch.cuda._utils._get_device_index(device)
if index < 0 or index >= torch.cuda.device_count():
raise RuntimeError(
f"cufft_plan_cache: expected 0 <= device index < {torch.cuda.device_count()}, but got "
f"device with index {index}"
(
"cufft_plan_cache: expected 0 <= device index < {}, but got "
"device with index {}"
).format(torch.cuda.device_count(), index)
)
if len(self.caches) == 0:
self.caches.extend(

View File

@ -1123,9 +1123,9 @@ def init_process_group(
if backend == Backend.MPI:
if world_size != -1 or rank != -1:
warnings.warn(
f"For MPI backend, world_size ({world_size}) and rank ({rank}) "
"For MPI backend, world_size ({}) and rank ({}) "
"are ignored since they are assigned by the "
"MPI runtime."
"MPI runtime.".format(world_size, rank)
)
default_pg, _ = _new_process_group_helper(

View File

@ -500,8 +500,8 @@ class _RemoteModule(nn.Module):
and k not in _REMOTE_MODULE_ATTRIBUTES_IGNORE_FOR_PICKLING
):
raise AttributeError(
f"Attribute {k} must be either in ``_REMOTE_MODULE_PICKLED_ATTRIBUTES`` or "
"``_REMOTE_MODULE_ATTRIBUTES_IGNORE_FOR_PICKLING``."
"Attribute {} must be either in ``_REMOTE_MODULE_PICKLED_ATTRIBUTES`` or "
"``_REMOTE_MODULE_ATTRIBUTES_IGNORE_FOR_PICKLING``.".format(k)
)
def _install_generated_methods(self):
@ -729,9 +729,11 @@ def _remote_module_reducer(remote_module):
# Check if unpickled attributes are all in _REMOTE_MODULE_ATTRIBUTES_IGNORE_FOR_PICKLING.
elif k not in _REMOTE_MODULE_ATTRIBUTES_IGNORE_FOR_PICKLING:
print(
f"The new attribute ``{k}`` of RemoteModule is ignored during RPC pickling. "
"The new attribute ``{}`` of RemoteModule is ignored during RPC pickling. "
"To pickle this attribute, please add it to ``_REMOTE_MODULE_PICKLED_ATTRIBUTES``. "
"Otherwise, please explicitly add it to ``_REMOTE_MODULE_ATTRIBUTES_IGNORE_FOR_PICKLING``.",
"Otherwise, please explicitly add it to ``_REMOTE_MODULE_ATTRIBUTES_IGNORE_FOR_PICKLING``.".format(
k
),
file=sys.stderr,
)

View File

@ -112,8 +112,8 @@ def _retrieve_device(module: nn.Module) -> torch.device:
device = parameter.device
elif device != parameter.device:
raise ValueError(
f'nn.Module: {module}, should have all parameters on a single device,'
' please use .to() to place the module on a single device')
'nn.Module: {}, should have all parameters on a single device,'
' please use .to() to place the module on a single device'.format(module))
return device if device is not None else torch.device("cpu")

View File

@ -45,7 +45,9 @@ class Independent(Distribution):
if reinterpreted_batch_ndims > len(base_distribution.batch_shape):
raise ValueError(
"Expected reinterpreted_batch_ndims <= len(base_distribution.batch_shape), "
f"actual {reinterpreted_batch_ndims} vs {len(base_distribution.batch_shape)}"
"actual {} vs {}".format(
reinterpreted_batch_ndims, len(base_distribution.batch_shape)
)
)
shape = base_distribution.batch_shape + base_distribution.event_shape
event_dim = reinterpreted_batch_ndims + len(base_distribution.event_shape)

View File

@ -205,9 +205,9 @@ class Dispatcher:
if not isinstance(typ, (type, list)):
str_sig = ', '.join(c.__name__ if isinstance(c, type)
else str(c) for c in signature)
raise TypeError(f"Tried to dispatch on non-type: {typ}\n"
f"In signature: <{str_sig}>\n"
f"In function: {self.name}")
raise TypeError("Tried to dispatch on non-type: {}\n"
"In signature: <{}>\n"
"In function: {}".format(typ, str_sig, self.name))
# handle variadic signatures
if isinstance(typ, list):
@ -272,7 +272,8 @@ class Dispatcher:
raise NotImplementedError(
"Matching functions for "
f"{self.name}: <{str_signature(types)}> found, but none completed successfully",) from e
"{}: <{}> found, but none completed successfully".format(
self.name, str_signature(types),),) from e
def __str__(self):
return f"<dispatched {self.name}>"

View File

@ -310,17 +310,17 @@ def infer_concrete_type_builder(nn_module, share_types=True):
)
warnings.warn(
f"'{name}' was found in ScriptModule constants, "
f" but it is a non-constant {hint}. Consider removing it."
"'{}' was found in ScriptModule constants, "
" but it is a non-constant {}. Consider removing it.".format(name, hint)
)
continue
if not hasattr(nn_module, name):
# TODO: We should really error in this case, but its bc-breaking so
# we need to warn for at least one release
warnings.warn(
f"'{name}' was found in ScriptModule constants, "
"'{}' was found in ScriptModule constants, "
"but was not actually set in __init__. "
"Consider removing it."
"Consider removing it.".format(name)
)
continue
value = getattr(nn_module, name)
@ -370,8 +370,8 @@ def infer_concrete_type_builder(nn_module, share_types=True):
hint = (
"(This function exists as an attribute on the Python module, "
"but we failed to compile it to a TorchScript function. "
f"\nThe error stack is reproduced here:\n{e}"
)
"\nThe error stack is reproduced here:\n{}"
).format(e)
concrete_type_builder.add_failed_attribute(name, hint)
pass
@ -998,9 +998,9 @@ def try_compile_fn(fn, loc):
if not inspect.isfunction(fn) and not inspect.ismethod(fn):
raise RuntimeError(
f"`{fn}` is not a function. Recursive scripting only supports "
"`{}` is not a function. Recursive scripting only supports "
"Python functions or methods currently.\n"
f"Consider manually annotating `{fn}` with @torch.jit.script."
"Consider manually annotating `{}` with @torch.jit.script.".format(fn, fn)
)
# We don't have the actual scope where the function was defined, but we can

View File

@ -257,7 +257,7 @@ class OrderedModuleDict(OrderedDictWrapper):
else:
raise RuntimeError(
"Cannot re-assign modules in a ScriptModule with non-scripted "
f"module, tried to replace existing module '{k}': {v}"
"module, tried to replace existing module '{}': {}".format(k, v)
)
def __getitem__(self, k):
@ -1402,7 +1402,7 @@ def _check_overload_defaults(impl_defaults, overload_defaults, loc):
loc,
"Default parameters on overloads do not affect the runtime so they "
"must equal to the default parameter on the implementation function. Found on "
f"parameter {name}",
"parameter {name}".format(name=name),
)
@ -1461,9 +1461,9 @@ def _check_directly_compile_overloaded(obj):
qual_name = _qualified_name(obj)
if _jit_internal._get_fn_overloads(qual_name) or _try_get_jit_cached_overloads(obj):
raise RuntimeError(
f"Function {qual_name} cannot be directly compiled because it"
"Function {} cannot be directly compiled because it"
" is overloaded. It must be used in a context of a function"
" where its inputs can determine which overload to call."
" where its inputs can determine which overload to call.".format(qual_name)
)

View File

@ -254,8 +254,10 @@ def verify(model, args, loss_fn=torch.sum, devices=None):
out = (out,)
if loss_fn == torch.sum and len(out) != 1:
raise ValueError(
f"Model returns {len(out)} outputs, but default loss function "
"(torch.sum) can only handle a single output"
(
"Model returns {} outputs, but default loss function "
"(torch.sum) can only handle a single output"
).format(len(out))
)
out_vars, _ = _flatten(out)
saved_outs = [

View File

@ -903,7 +903,9 @@ def _unpool_output_size(
if len(output_size) != len(kernel_size):
raise ValueError(
"output_size should be a sequence containing "
f"{len(kernel_size)} or {len(kernel_size) + 2} elements, but it has a length of '{len(output_size)}'"
"{} or {} elements, but it has a length of '{}'".format(
len(kernel_size), len(kernel_size) + 2, len(output_size)
)
)
for d in range(len(kernel_size)):
min_size = default_size[d] - stride[d]
@ -2354,8 +2356,8 @@ def embedding_bag(
if per_sample_weights is not None and input.size() != per_sample_weights.size():
raise ValueError(
f"embedding_bag: If per_sample_weights ({per_sample_weights.shape}) is not None, "
f"then it must have the same shape as the input ({input.shape})"
"embedding_bag: If per_sample_weights ({}) is not None, "
"then it must have the same shape as the input ({})".format(per_sample_weights.shape, input.shape)
)
if not weight.dim() == 2:
@ -2373,7 +2375,7 @@ def embedding_bag(
"if input is 2D, then offsets has to be None"
", as input is treated is a mini-batch of"
" fixed length sequences. However, found "
f"offsets of type {type_str}"
"offsets of type {}".format(type_str)
)
offsets = torch.arange(0, input.numel(), input.size(1), dtype=input.dtype, device=input.device)
@ -2414,7 +2416,7 @@ def embedding_bag(
raise NotImplementedError(
"embedding_bag: per_sample_weights was not None. "
"per_sample_weights is only supported for mode='sum' "
f"(got mode='{mode}'). Please open a feature request on GitHub."
"(got mode='{}'). Please open a feature request on GitHub.".format(mode)
)
ret, _, _, _ = torch.embedding_bag(
@ -3221,9 +3223,9 @@ def smooth_l1_loss(
)
if not (target.size() == input.size()):
warnings.warn(
f"Using a target size ({target.size()}) that is different to the input size ({input.size()}). "
"Using a target size ({}) that is different to the input size ({}). "
"This will likely lead to incorrect results due to broadcasting. "
"Please ensure they have the same size.",
"Please ensure they have the same size.".format(target.size(), input.size()),
stacklevel=2,
)
if size_average is not None or reduce is not None:
@ -3258,9 +3260,9 @@ def huber_loss(
delta=delta,
)
if not (target.size() == input.size()):
warnings.warn(f"Using a target size ({target.size()}) that is different to the input size ({input.size()}). "
warnings.warn("Using a target size ({}) that is different to the input size ({}). "
"This will likely lead to incorrect results due to broadcasting. "
"Please ensure they have the same size.",
"Please ensure they have the same size.".format(target.size(), input.size()),
stacklevel=2)
expanded_input, expanded_target = torch.broadcast_tensors(input, target)
@ -3286,9 +3288,9 @@ def l1_loss(
)
if not (target.size() == input.size()):
warnings.warn(
f"Using a target size ({target.size()}) that is different to the input size ({input.size()}). "
"Using a target size ({}) that is different to the input size ({}). "
"This will likely lead to incorrect results due to broadcasting. "
"Please ensure they have the same size.",
"Please ensure they have the same size.".format(target.size(), input.size()),
stacklevel=2,
)
if size_average is not None or reduce is not None:
@ -3317,9 +3319,9 @@ def mse_loss(
)
if not (target.size() == input.size()):
warnings.warn(
f"Using a target size ({target.size()}) that is different to the input size ({input.size()}). "
"Using a target size ({}) that is different to the input size ({}). "
"This will likely lead to incorrect results due to broadcasting. "
"Please ensure they have the same size.",
"Please ensure they have the same size.".format(target.size(), input.size()),
stacklevel=2,
)
if size_average is not None or reduce is not None:
@ -4042,8 +4044,8 @@ def interpolate(input: Tensor, size: Optional[int] = None, scale_factor: Optiona
raise NotImplementedError(
"Input Error: Only 3D, 4D and 5D input Tensors supported"
f" (got {input.dim()}D) for the modes: nearest | linear | bilinear | bicubic | trilinear | area | nearest-exact"
f" (got {mode})"
" (got {}D) for the modes: nearest | linear | bilinear | bicubic | trilinear | area | nearest-exact"
" (got {})".format(input.dim(), mode)
)
@ -4275,7 +4277,7 @@ def grid_sample(
raise ValueError(
"nn.functional.grid_sample(): expected padding_mode "
"to be 'zeros', 'border', or 'reflection', "
f"but got: '{padding_mode}'"
"but got: '{}'".format(padding_mode)
)
if mode == "bilinear":
@ -4383,7 +4385,7 @@ def affine_grid(theta: Tensor, size: List[int], align_corners: Optional[bool] =
raise NotImplementedError(
"affine_grid only supports 4D and 5D sizes, "
"for 2D and 3D affine transforms, respectively. "
f"Got size {size}."
"Got size {}.".format(size)
)
# check for empty span
if align_corners and min(spatial_size) == 1:

View File

@ -543,13 +543,14 @@ def _make_deprecate(meth):
warnings.warn(f"nn.init.{old_name} is now deprecated in favor of nn.init.{new_name}.", stacklevel=2)
return meth(*args, **kwargs)
deprecated_init.__doc__ = fr"""
deprecated_init.__doc__ = r"""
{old_name}(...)
.. warning::
This method is now deprecated in favor of :func:`torch.nn.init.{new_name}`.
See :func:`~torch.nn.init.{new_name}` for details."""
See :func:`~torch.nn.init.{new_name}` for details.""".format(
old_name=old_name, new_name=new_name)
deprecated_init.__name__ = old_name
return deprecated_init

View File

@ -224,9 +224,11 @@ class AdaptiveLogSoftmaxWithLoss(Module):
used_rows += row_indices.numel()
if used_rows != batch_size:
raise RuntimeError(f"Target values should be in [0, {self.n_classes - 1}], "
f"but values in range [{target.min().item()}, {target.max().item()}] "
"were found. ")
raise RuntimeError("Target values should be in [0, {}], "
"but values in range [{}, {}] "
"were found. ".format(self.n_classes - 1,
target.min().item(),
target.max().item()))
head_output = self.head(input)
head_logprob = log_softmax(head_output, dim=1)

View File

@ -148,7 +148,8 @@ class Sequential(Module):
return ret
else:
raise ValueError('add operator supports only objects '
f'of Sequential class, but {str(type(other))} is given.')
'of Sequential class, but {} is given.'.format(
str(type(other))))
def pop(self, key: Union[int, slice]) -> Module:
v = self[key]
@ -163,7 +164,8 @@ class Sequential(Module):
return self
else:
raise ValueError('add operator supports only objects '
f'of Sequential class, but {str(type(other))} is given.')
'of Sequential class, but {} is given.'.format(
str(type(other))))
def __mul__(self, other: int) -> 'Sequential':
if not isinstance(other, int):

View File

@ -656,9 +656,10 @@ class _ConvTransposeNd(_ConvNd):
min_size = min_sizes[i]
max_size = max_sizes[i]
if size < min_size or size > max_size:
raise ValueError(
f"requested an output size of {output_size}, but valid sizes range "
f"from {min_sizes} to {max_sizes} (for an input of {input.size()[2:]})")
raise ValueError((
"requested an output size of {}, but valid sizes range "
"from {} to {} (for an input of {})").format(
output_size, min_sizes, max_sizes, input.size()[2:]))
res = torch.jit.annotate(List[int], [])
for d in range(num_spatial_dims):

View File

@ -461,8 +461,8 @@ class Module:
"".format(type(self).__name__, next(iter(kwargs))))
if self.call_super_init is False and bool(args):
raise TypeError(f"{type(self).__name__}.__init__() takes 1 positional argument but {len(args) + 1} were"
" given")
raise TypeError("{}.__init__() takes 1 positional argument but {} were"
" given".format(type(self).__name__, len(args) + 1))
"""
Calls super().__setattr__('a', a) instead of the typical self.a = a
@ -537,9 +537,9 @@ class Module:
elif hasattr(self, name) and name not in self._buffers:
raise KeyError(f"attribute '{name}' already exists")
elif tensor is not None and not isinstance(tensor, torch.Tensor):
raise TypeError(f"cannot assign '{torch.typename(tensor)}' object to buffer '{name}' "
raise TypeError("cannot assign '{}' object to buffer '{}' "
"(torch Tensor or None required)"
)
.format(torch.typename(tensor), name))
else:
for hook in _global_buffer_registration_hooks.values():
output = hook(self, name, tensor)
@ -580,15 +580,15 @@ class Module:
if param is None:
self._parameters[name] = None
elif not isinstance(param, Parameter):
raise TypeError(f"cannot assign '{torch.typename(param)}' object to parameter '{name}' "
raise TypeError("cannot assign '{}' object to parameter '{}' "
"(torch.nn.Parameter or None required)"
)
.format(torch.typename(param), name))
elif param.grad_fn:
raise ValueError(
f"Cannot assign non-leaf Tensor to parameter '{name}'. Model "
f"parameters must be created explicitly. To express '{name}' "
"Cannot assign non-leaf Tensor to parameter '{0}'. Model "
"parameters must be created explicitly. To express '{0}' "
"as a function of another Tensor, compute the value in "
"the forward() method.")
"the forward() method.".format(name))
else:
for hook in _global_parameter_registration_hooks.values():
output = hook(self, name, param)
@ -1143,7 +1143,7 @@ class Module:
if dtype is not None:
if not (dtype.is_floating_point or dtype.is_complex):
raise TypeError('nn.Module.to only accepts floating point or complex '
f'dtypes, but got desired dtype={dtype}')
'dtypes, but got desired dtype={}'.format(dtype))
if dtype.is_complex:
warnings.warn(
"Complex modules are a new feature under active development whose design may change, "
@ -1712,9 +1712,9 @@ class Module:
self.register_parameter(name, value)
elif params is not None and name in params:
if value is not None:
raise TypeError(f"cannot assign '{torch.typename(value)}' as parameter '{name}' "
raise TypeError("cannot assign '{}' as parameter '{}' "
"(torch.nn.Parameter or None expected)"
)
.format(torch.typename(value), name))
self.register_parameter(name, value)
else:
modules = self.__dict__.get('_modules')
@ -1730,9 +1730,9 @@ class Module:
modules[name] = value
elif modules is not None and name in modules:
if value is not None:
raise TypeError(f"cannot assign '{torch.typename(value)}' as child module '{name}' "
raise TypeError("cannot assign '{}' as child module '{}' "
"(torch.nn.Module or None expected)"
)
.format(torch.typename(value), name))
for hook in _global_module_registration_hooks.values():
output = hook(self, name, value)
if output is not None:
@ -1742,9 +1742,9 @@ class Module:
buffers = self.__dict__.get('_buffers')
if buffers is not None and name in buffers:
if value is not None and not isinstance(value, torch.Tensor):
raise TypeError(f"cannot assign '{torch.typename(value)}' as buffer '{name}' "
raise TypeError("cannot assign '{}' as buffer '{}' "
"(torch.Tensor or None expected)"
)
.format(torch.typename(value), name))
for hook in _global_buffer_registration_hooks.values():
output = hook(self, name, value)
if output is not None:
@ -2000,10 +2000,10 @@ class Module:
if key in state_dict:
input_param = state_dict[key]
if not torch.overrides.is_tensor_like(input_param):
error_msgs.append(f'While copying the parameter named "{key}", '
error_msgs.append('While copying the parameter named "{}", '
'expected torch.Tensor or Tensor-like object from checkpoint but '
f'received {type(input_param)}'
)
'received {}'
.format(key, type(input_param)))
continue
# This is used to avoid copying uninitialized parameters into
@ -2039,11 +2039,11 @@ class Module:
else:
param.copy_(input_param)
except Exception as ex:
error_msgs.append(f'While copying the parameter named "{key}", '
f'whose dimensions in the model are {param.size()} and '
f'whose dimensions in the checkpoint are {input_param.size()}, '
f'an exception occurred : {ex.args}.'
)
error_msgs.append('While copying the parameter named "{}", '
'whose dimensions in the model are {} and '
'whose dimensions in the checkpoint are {}, '
'an exception occurred : {}.'
.format(key, param.size(), input_param.size(), ex.args))
elif strict:
missing_keys.append(key)

View File

@ -70,8 +70,8 @@ class RNNBase(Module):
if dropout > 0 and num_layers == 1:
warnings.warn("dropout option adds dropout after all but last "
"recurrent layer, so non-zero dropout expects "
f"num_layers greater than 1, but got dropout={dropout} and "
f"num_layers={num_layers}")
"num_layers greater than 1, but got dropout={} and "
"num_layers={}".format(dropout, num_layers))
if not isinstance(hidden_size, int):
raise TypeError(f"hidden_size should be of type int, got: {type(hidden_size).__name__}")

View File

@ -169,8 +169,8 @@ class DataParallel(Module, Generic[T]):
for t in chain(self.module.parameters(), self.module.buffers()):
if t.device != self.src_device_obj:
raise RuntimeError("module must have its parameters and buffers "
f"on device {self.src_device_obj} (device_ids[0]) but found one of "
f"them on device: {t.device}")
"on device {} (device_ids[0]) but found one of "
"them on device: {}".format(self.src_device_obj, t.device))
inputs, module_kwargs = self.scatter(inputs, kwargs, self.device_ids)
# for forward function without any inputs, empty list and dict will be created
@ -249,8 +249,8 @@ def data_parallel(
for t in chain(module.parameters(), module.buffers()):
if t.device != src_device_obj:
raise RuntimeError("module must have its parameters and buffers "
f"on device {src_device_obj} (device_ids[0]) but found one of "
f"them on device: {t.device}")
"on device {} (device_ids[0]) but found one of "
"them on device: {}".format(src_device_obj, t.device))
inputs, module_kwargs = scatter_kwargs(inputs, module_kwargs, device_ids, dim)
# for module without any inputs, empty list and dict will be created

View File

@ -697,7 +697,9 @@ class DistributedDataParallel(Module, Joinable):
self._log_and_throw(
ValueError,
"DistributedDataParallel's input module must be on "
f"the same type of devices, but input module parameters locate in {distinct_device_types}.",
"the same type of devices, but input module parameters locate in {}.".format(
distinct_device_types
),
)
self.device_type = list(distinct_device_types)[0]

View File

@ -71,8 +71,8 @@ def parallel_apply(
if t is None:
with lock:
results[i] = ExceptionWrapper(
where=f"in replica {i}, no device was provided and no tensor input was found; "
"device cannot be resolved")
where="in replica {}, no device was provided and no tensor input was found; "
"device cannot be resolved".format(i))
return
device = t.get_device()
if stream is None:

View File

@ -154,11 +154,11 @@ class UninitializedTensorMixin:
kwargs = {}
return super().__torch_function__(func, types, args, kwargs)
raise ValueError(
f'Attempted to use an uninitialized parameter in {func}. '
'Attempted to use an uninitialized parameter in {}. '
'This error happens when you are using a `LazyModule` or '
f'explicitly manipulating `torch.nn.parameter.{cls.__name__}` '
'explicitly manipulating `torch.nn.parameter.{}` '
'objects. When using LazyModules Call `forward` with a dummy batch '
'to initialize the parameters before calling torch functions')
'to initialize the parameters before calling torch functions'.format(func, cls.__name__))
def is_lazy(param):

View File

@ -379,7 +379,7 @@ class _SpectralNorm(Module):
if n_power_iterations <= 0:
raise ValueError('Expected n_power_iterations to be positive, but '
f'got n_power_iterations={n_power_iterations}')
'got n_power_iterations={}'.format(n_power_iterations))
self.dim = dim if dim >= 0 else dim + ndim
self.eps = eps
if ndim > 1:

View File

@ -288,7 +288,9 @@ class PruningContainer(BasePruningMethod):
elif method is not None and self._tensor_name != method._tensor_name:
raise ValueError(
"Can only add pruning methods acting on "
f"the parameter named '{self._tensor_name}' to PruningContainer {self}."
"the parameter named '{}' to PruningContainer {}.".format(
self._tensor_name, self
)
+ f" Found '{method._tensor_name}'"
)
# if all checks passed, add to _pruning_methods tuple
@ -1090,7 +1092,9 @@ def global_unstructured(parameters, pruning_method, importance_scores=None, **kw
if method.PRUNING_TYPE != "unstructured":
raise TypeError(
'Only "unstructured" PRUNING_TYPE supported for '
f"the `pruning_method`. Found method {pruning_method} of type {method.PRUNING_TYPE}"
"the `pruning_method`. Found method {} of type {}".format(
pruning_method, method.PRUNING_TYPE
)
)
container.add_pruning_method(method)
@ -1276,7 +1280,7 @@ def _validate_structured_pruning(t):
raise ValueError(
"Structured pruning can only be applied to "
"multidimensional tensors. Found tensor of shape "
f"{shape} with {len(shape)} dims"
"{} with {} dims".format(shape, len(shape))
)

View File

@ -327,8 +327,8 @@ def pad_packed_sequence(
if total_length < max_seq_length:
raise ValueError("Expected total_length to be at least the length "
"of the longest sequence in input, but got "
f"total_length={total_length} and max sequence length being {max_seq_length}"
)
"total_length={} and max sequence length being {}"
.format(total_length, max_seq_length))
max_seq_length = total_length
padded_output, lengths = _VF._pad_packed_sequence(
sequence.data, sequence.batch_sizes, batch_first, padding_value, max_seq_length)

View File

@ -29,7 +29,7 @@ class SpectralNorm:
self.dim = dim
if n_power_iterations <= 0:
raise ValueError('Expected n_power_iterations to be positive, but '
f'got n_power_iterations={n_power_iterations}')
'got n_power_iterations={}'.format(n_power_iterations))
self.n_power_iterations = n_power_iterations
self.eps = eps

View File

@ -40,7 +40,7 @@ class LRScheduler:
for i, group in enumerate(optimizer.param_groups):
if 'initial_lr' not in group:
raise KeyError("param 'initial_lr' is not specified "
f"in param_groups[{i}] when resuming an optimizer")
"in param_groups[{}] when resuming an optimizer".format(i))
self.base_lrs = [group['initial_lr'] for group in optimizer.param_groups]
self.last_epoch = last_epoch
@ -645,8 +645,8 @@ class SequentialLR(LRScheduler):
if (len(milestones) != len(schedulers) - 1):
raise ValueError(
"Sequential Schedulers expects number of schedulers provided to be one more "
f"than the number of milestone points, but got number of schedulers {len(schedulers)} and the "
f"number of milestones to be equal to {len(milestones)}"
"than the number of milestone points, but got number of schedulers {} and the "
"number of milestones to be equal to {}".format(len(schedulers), len(milestones))
)
self._schedulers = schedulers
self._milestones = milestones
@ -862,7 +862,7 @@ class ChainedScheduler(LRScheduler):
if (schedulers[scheduler_idx].optimizer != schedulers[0].optimizer):
raise ValueError(
"ChainedScheduler expects all schedulers to belong to the same optimizer, but "
f"got schedulers at index {0} and {scheduler_idx} to be different"
"got schedulers at index {} and {} to be different".format(0, scheduler_idx)
)
self._schedulers = list(schedulers)
self.optimizer = schedulers[0].optimizer

View File

@ -1577,9 +1577,9 @@ def handle_torch_function(
func_name = f'{public_api.__module__}.{public_api.__name__}'
msg = (
f"no implementation found for '{func_name}' on types that implement "
f'__torch_function__: {[type(arg) for arg in overloaded_args]}'
)
"no implementation found for '{}' on types that implement "
'__torch_function__: {}'
).format(func_name, [type(arg) for arg in overloaded_args])
if _is_torch_function_mode_enabled():
msg += f" nor in mode {_get_current_function_mode()}"
raise TypeError(msg)

View File

@ -122,11 +122,11 @@ class SobolEngine:
total_n = self.num_generated + n
if not (total_n & (total_n - 1) == 0):
raise ValueError("The balance properties of Sobol' points require "
f"n to be a power of 2. {self.num_generated} points have been "
f"previously generated, then: n={self.num_generated}+2**{m}={total_n}. "
"n to be a power of 2. {0} points have been "
"previously generated, then: n={0}+2**{1}={2}. "
"If you still want to do this, please use "
"'SobolEngine.draw()' instead."
)
.format(self.num_generated, m, total_n))
return self.draw(n=n, out=out, dtype=dtype)
def reset(self):

View File

@ -202,8 +202,10 @@ def check_module_version_greater_or_equal(module, req_version_tuple, error_if_ma
except Exception as e:
message = (
f"'{module.__name__}' module version string is malformed '{module.__version__}' and cannot be compared"
f" with tuple {str(req_version_tuple)}"
"'{}' module version string is malformed '{}' and cannot be compared"
" with tuple {}"
).format(
module.__name__, module.__version__, str(req_version_tuple)
)
if error_if_malformed:
raise RuntimeError(message) from e

View File

@ -1195,7 +1195,7 @@ class dtypes:
assert isinstance(arg, (list, tuple)), \
"When one dtype variant is a tuple or list, " \
"all dtype variants must be. " \
f"Received non-list non-tuple dtype {str(arg)}"
"Received non-list non-tuple dtype {}".format(str(arg))
assert all(isinstance(dtype, torch.dtype) for dtype in arg), f"Unknown dtype in {str(arg)}"
else:
assert all(isinstance(arg, torch.dtype) for arg in args), f"Unknown dtype in {str(args)}"

View File

@ -506,8 +506,9 @@ class parametrize(_TestParametrizer):
values = list(values) if len(self.arg_names) > 1 else [values]
if len(values) != len(self.arg_names):
raise RuntimeError(f'Expected # values == # arg names, but got: {len(values)} '
f'values and {len(self.arg_names)} names for test "{test.__name__}"')
raise RuntimeError('Expected # values == # arg names, but got: {} '
'values and {} names for test "{}"'.format(
len(values), len(self.arg_names), test.__name__))
param_kwargs = dict(zip(self.arg_names, values))
@ -3466,9 +3467,9 @@ This message can be suppressed by setting PYTORCH_PRINT_REPRO_ON_FAILURE=0"""
return accept_output("output")
else:
raise RuntimeError(
f"I got this output for {munged_id}{subname_output}:\n\n{s}\n\n"
"No expect file exists; to accept the current output, run:\n"
f"python {__main__.__file__} {munged_id} --accept") from None
("I got this output for {}{}:\n\n{}\n\n"
"No expect file exists; to accept the current output, run:\n"
"python {} {} --accept").format(munged_id, subname_output, s, __main__.__file__, munged_id)) from None
# a hack for JIT tests
if IS_WINDOWS:
@ -4071,9 +4072,10 @@ def check_test_defined_in_running_script(test_case):
if running_script_path is None:
return
test_case_class_file = os.path.abspath(os.path.realpath(inspect.getfile(test_case.__class__)))
assert test_case_class_file == running_script_path, f"Class of loaded TestCase \"{test_case.id()}\" " \
f"is not defined in the running script \"{running_script_path}\", but in \"{test_case_class_file}\". Did you " \
"accidentally import a unittest.TestCase from another file?"
assert test_case_class_file == running_script_path, "Class of loaded TestCase \"{}\" " \
"is not defined in the running script \"{}\", but in \"{}\". Did you " \
"accidentally import a unittest.TestCase from another file?".format(
test_case.id(), running_script_path, test_case_class_file)
def load_tests(loader, tests, pattern):
set_running_script_path()

View File

@ -362,7 +362,7 @@ def assert_deadline_disabled():
warning_message = (
"Your version of hypothesis is outdated. "
"To avoid `DeadlineExceeded` errors, please update. "
f"Current hypothesis version: {hypothesis.__version__}"
"Current hypothesis version: {}".format(hypothesis.__version__)
)
warnings.warn(warning_message)
else:

View File

@ -66,7 +66,7 @@ def get_execution_plan(graph_executor_state):
num_plans = len(execution_plans)
if num_plans != 1:
raise RuntimeError('This test assumes this GraphExecutor should '
f'only have one execution plan, got: {num_plans}')
'only have one execution plan, got: {}'.format(num_plans))
return execution_plans[0]
class _AssertRaisesRegexWithHighlightContext:

View File

@ -334,14 +334,17 @@ def augment_many_model_functions_with_bundled_inputs(
# Add to the high level helper methods
inputs_info = repr(info[function]) if info and function in info else '[]'
get_bundled_inputs_functions_and_info_template += f"""
get_bundled_inputs_functions_and_info_template += """
temp_dict : Dict[str,List[str]] = {{}}
info: List[str] = {inputs_info}
info: List[str] = {info}
temp_dict['info'] = info
temp_dict['get_inputs_function_name'] = ['get_all_bundled_inputs_for_{function_name}']
all_inputs['{function_name}'] = temp_dict
"""
temp_dict['get_inputs_function_name'] = ['get_all_bundled_inputs_for_{name}']
all_inputs['{name}'] = temp_dict
""".format(
name=function_name,
info=inputs_info,
)
# To ensure backwards compatibility and a streamlined api for forward these wrappers are provided
if function_name == 'forward':

View File

@ -1611,9 +1611,11 @@ def load_inline(name,
raise ValueError(f"Expected 'functions' to be a list or dict, but was {type(functions)}")
for function_name, docstring in functions.items():
if with_pytorch_error_handling:
module_def.append(f'm.def("{function_name}", torch::wrap_pybind_function({function_name}), "{docstring}");')
module_def.append(
'm.def("{0}", torch::wrap_pybind_function({0}), "{1}");'
.format(function_name, docstring))
else:
module_def.append(f'm.def("{function_name}", {function_name}, "{docstring}");')
module_def.append('m.def("{0}", {0}, "{1}");'.format(function_name, docstring))
module_def.append('}')
cpp_sources += module_def

View File

@ -315,7 +315,7 @@ class DataLoader(Generic[T_co]):
# See NOTE [ Custom Samplers and IterableDataset ]
raise ValueError(
"DataLoader with IterableDataset: expected unspecified "
f"batch_sampler option, but got batch_sampler={batch_sampler}")
"batch_sampler option, but got batch_sampler={}".format(batch_sampler))
else:
shuffle = bool(shuffle)
self._dataset_kind = _DatasetKind.Map
@ -397,19 +397,19 @@ class DataLoader(Generic[T_co]):
valid_start_methods = multiprocessing.get_all_start_methods()
if multiprocessing_context not in valid_start_methods:
raise ValueError(
'multiprocessing_context option '
f'should specify a valid start method in {valid_start_methods!r}, but got '
f'multiprocessing_context={multiprocessing_context!r}')
('multiprocessing_context option '
'should specify a valid start method in {!r}, but got '
'multiprocessing_context={!r}').format(valid_start_methods, multiprocessing_context))
multiprocessing_context = multiprocessing.get_context(multiprocessing_context)
if not isinstance(multiprocessing_context, python_multiprocessing.context.BaseContext):
raise TypeError('multiprocessing_context option should be a valid context '
'object or a string specifying the start method, but got '
f'multiprocessing_context={multiprocessing_context}')
raise TypeError(('multiprocessing_context option should be a valid context '
'object or a string specifying the start method, but got '
'multiprocessing_context={}').format(multiprocessing_context))
else:
raise ValueError('multiprocessing_context can only be used with '
'multi-process loading (num_workers > 0), but got '
f'num_workers={self.num_workers}')
raise ValueError(('multiprocessing_context can only be used with '
'multi-process loading (num_workers > 0), but got '
'num_workers={}').format(self.num_workers))
self.__multiprocessing_context = multiprocessing_context

View File

@ -70,7 +70,7 @@ class non_deterministic:
if isinstance(arg, Type): # type: ignore[arg-type]
if not issubclass(arg, IterDataPipe): # type: ignore[arg-type]
raise TypeError("Only `IterDataPipe` can be decorated with `non_deterministic`"
f", but {arg.__name__} is found")
", but {} is found".format(arg.__name__))
self.cls = arg # type: ignore[assignment]
# 2. Decorator has an argument of a function
# This class should behave differently given different inputs. Use this
@ -103,13 +103,13 @@ class non_deterministic:
res = self.deterministic_fn(*args, **kwargs) # type: ignore[call-arg, misc]
if not isinstance(res, bool):
raise TypeError("deterministic_fn of `non_deterministic` decorator is required "
f"to return a boolean value, but {type(res)} is found")
"to return a boolean value, but {} is found".format(type(res)))
global _determinism
if _determinism and res:
raise TypeError(f"{self.cls.__name__} is non-deterministic with the inputs, but you set " # type: ignore[union-attr]
raise TypeError("{} is non-deterministic with the inputs, but you set "
"'guaranteed_datapipes_determinism'. You can turn off determinism "
"for this DataPipe if that is acceptable for your application"
)
.format(self.cls.__name__)) # type: ignore[union-attr]
return self.cls(*args, **kwargs) # type: ignore[call-arg, misc]
@ -130,9 +130,9 @@ def argument_validation(f):
if not isinstance(value, IterDataPipe):
raise TypeError(f"Expected argument '{argument_name}' as a IterDataPipe, but found {type(value)}")
if not value.type.issubtype(hint.type):
raise TypeError(f"Expected type of argument '{argument_name}' as a subtype of "
f"hint {hint.type}, but found {value.type}"
)
raise TypeError("Expected type of argument '{}' as a subtype of "
"hint {}, but found {}"
.format(argument_name, hint.type, value.type))
return f(*args, **kwargs)

View File

@ -223,7 +223,7 @@ class WeightedRandomSampler(Sampler[int]):
weights_tensor = torch.as_tensor(weights, dtype=torch.double)
if len(weights_tensor.shape) != 1:
raise ValueError("weights should be a 1d sequence but given "
f"weights have shape {tuple(weights_tensor.shape)}")
"weights have shape {}".format(tuple(weights_tensor.shape)))
self.weights = weights_tensor
self.num_samples = num_samples

View File

@ -502,7 +502,7 @@ def hip_header_magic(input_string):
# Check if one of the following headers is already included.
headers = ["hip/hip_runtime.h", "hip/hip_runtime_api.h"]
if any(re.search(fr'#include ("{ext}"|<{ext}>)', output_string) for ext in headers):
if any(re.search(r'#include ("{0}"|<{0}>)'.format(ext), output_string) for ext in headers):
return output_string
# Rough logic to detect if we're inside device code

View File

@ -83,10 +83,10 @@ def warn_if_has_hooks(tensor):
for k in tensor._backward_hooks:
hook = tensor._backward_hooks[k]
if not hasattr(k, "__torch_unserializable__"):
warnings.warn(f"backward hook {repr(hook)} on tensor will not be "
warnings.warn("backward hook {} on tensor will not be "
"serialized. If this is expected, you can "
"decorate the function with @torch.utils.hooks.unserializable_hook "
"to suppress this warning")
"to suppress this warning".format(repr(hook)))
class BackwardHook:
"""
@ -140,7 +140,7 @@ class BackwardHook:
if len(out) != len(res):
raise RuntimeError("Backward hook returned an invalid number of grad_input, "
f"got {len(out)}, but expected {len(res)}")
"got {}, but expected {}".format(len(out), len(res)))
res = out
@ -209,7 +209,7 @@ class BackwardHook:
actual_len = len(hook_grad_outputs)
if actual_len != expected_len:
raise RuntimeError("Backward pre hook returned an invalid number of grad_output, "
f"got {actual_len}, but expected {expected_len}")
"got {}, but expected {}".format(actual_len, expected_len))
self.grad_outputs = hook_grad_outputs
# Special case if no input required gradients, this hook should call the user

View File

@ -95,9 +95,9 @@ def generate_mobile_module_lints(script_module: torch.jit.ScriptModule):
for name, param in script_module.named_parameters():
if param.requires_grad:
lint_list.append({"name": LintCode.REQUIRES_GRAD.name, "message": f"Param {name} requires grad, "
lint_list.append({"name": LintCode.REQUIRES_GRAD.name, "message": "Param {} requires grad, "
"please set torch.no_grad() to reduce memory usage and improve computation speed during "
"inference phase."})
"inference phase.".format(name)})
op_names = torch.jit.export_opnames(script_module)
for op_name in op_names: