mirror of
https://github.com/zebrajr/pytorch.git
synced 2025-12-06 12:20:52 +01:00
[BE] Enable ruff's UP rules and autoformat test/ (#105434)
Pull Request resolved: https://github.com/pytorch/pytorch/pull/105434 Approved by: https://github.com/albanD
This commit is contained in:
parent
7b56238551
commit
73e1455327
|
|
@ -55,7 +55,7 @@ def generate_callgrind_artifacts() -> None:
|
|||
"ones_with_data_exclusive": to_entry(stats_with_data.stmt_exclusive_stats),
|
||||
}
|
||||
|
||||
with open(CALLGRIND_ARTIFACTS, "wt") as f:
|
||||
with open(CALLGRIND_ARTIFACTS, "w") as f:
|
||||
json.dump(artifacts, f, indent=4)
|
||||
|
||||
|
||||
|
|
@ -70,7 +70,7 @@ def load_callgrind_artifacts() -> Tuple[benchmark_utils.CallgrindStats, benchmar
|
|||
testing are stored in raw string form for easier inspection and to avoid
|
||||
baking any implementation details into the artifact itself.
|
||||
"""
|
||||
with open(CALLGRIND_ARTIFACTS, "rt") as f:
|
||||
with open(CALLGRIND_ARTIFACTS) as f:
|
||||
artifacts = json.load(f)
|
||||
|
||||
pattern = re.compile(r"^\s*([0-9]+)\s(.+)$")
|
||||
|
|
|
|||
|
|
@ -34,7 +34,7 @@ def emit(initializer_parameter_map):
|
|||
print(" {")
|
||||
for parameter in sample:
|
||||
parameter_values = "{{{}}}".format(", ".join(map(str, parameter)))
|
||||
print(" torch::tensor({}),".format(parameter_values))
|
||||
print(f" torch::tensor({parameter_values}),")
|
||||
print(" },")
|
||||
print(" };")
|
||||
print("}\n")
|
||||
|
|
@ -63,7 +63,7 @@ def run(initializer):
|
|||
def main():
|
||||
initializer_parameter_map = {}
|
||||
for initializer in INITIALIZERS.keys():
|
||||
sys.stderr.write('Evaluating {} ...\n'.format(initializer))
|
||||
sys.stderr.write(f'Evaluating {initializer} ...\n')
|
||||
initializer_parameter_map[initializer] = run(initializer)
|
||||
|
||||
emit(initializer_parameter_map)
|
||||
|
|
|
|||
|
|
@ -98,7 +98,7 @@ def emit(optimizer_parameter_map):
|
|||
print(" {")
|
||||
for parameter in sample:
|
||||
parameter_values = "{{{}}}".format(", ".join(map(str, parameter)))
|
||||
print(" torch::tensor({}),".format(parameter_values))
|
||||
print(f" torch::tensor({parameter_values}),")
|
||||
print(" },")
|
||||
print(" };")
|
||||
print("}\n")
|
||||
|
|
@ -115,7 +115,7 @@ def main():
|
|||
|
||||
optimizer_parameter_map = {}
|
||||
for optimizer in OPTIMIZERS.keys():
|
||||
sys.stderr.write('Evaluating {} ...\n'.format(optimizer))
|
||||
sys.stderr.write(f'Evaluating {optimizer} ...\n')
|
||||
optimizer_parameter_map[optimizer] = run(
|
||||
optimizer, options.iterations, options.sample_every
|
||||
)
|
||||
|
|
|
|||
|
|
@ -90,7 +90,7 @@ def test_forward(unit_test_class, test_params):
|
|||
arg_dict_file_path = compute_temp_file_path(cpp_tmp_folder, functional_variant_name, 'arg_dict')
|
||||
serialize_arg_dict_as_script_module(test_params.arg_dict).save(arg_dict_file_path)
|
||||
|
||||
cpp_test_name = '{}_test_forward'.format(test_params.functional_variant_name)
|
||||
cpp_test_name = f'{test_params.functional_variant_name}_test_forward'
|
||||
cpp_test_fn = getattr(unit_test_class.functional_impl_check_cpp_module, cpp_test_name)
|
||||
|
||||
def run_cpp_test_fn_and_check_output():
|
||||
|
|
@ -194,7 +194,7 @@ def write_test_to_test_class(
|
|||
test_instance_class=test_instance_class,
|
||||
)
|
||||
try_remove_folder(test_params.cpp_tmp_folder)
|
||||
unit_test_name = 'test_torch_nn_functional_{}'.format(test_params.functional_variant_name)
|
||||
unit_test_name = f'test_torch_nn_functional_{test_params.functional_variant_name}'
|
||||
unit_test_class.functional_test_params_map[unit_test_name] = test_params
|
||||
|
||||
def test_fn(self):
|
||||
|
|
@ -227,7 +227,7 @@ def build_cpp_tests(unit_test_class, print_cpp_source=False):
|
|||
functions = []
|
||||
for test_name, test_params in unit_test_class.functional_test_params_map.items():
|
||||
cpp_sources += generate_test_cpp_sources(test_params=test_params, template=TORCH_NN_FUNCTIONAL_TEST_FORWARD)
|
||||
functions.append('{}_test_forward'.format(test_params.functional_variant_name))
|
||||
functions.append(f'{test_params.functional_variant_name}_test_forward')
|
||||
if print_cpp_source:
|
||||
print(cpp_sources)
|
||||
|
||||
|
|
|
|||
|
|
@ -146,7 +146,7 @@ def test_forward_backward(unit_test_class, test_params):
|
|||
script_module.save(module_file_path)
|
||||
serialize_arg_dict_as_script_module(test_params.arg_dict).save(arg_dict_file_path)
|
||||
|
||||
cpp_test_name = '{}_test_forward_backward'.format(test_params.module_variant_name)
|
||||
cpp_test_name = f'{test_params.module_variant_name}_test_forward_backward'
|
||||
cpp_test_fn = getattr(unit_test_class.module_impl_check_cpp_module, cpp_test_name)
|
||||
|
||||
def run_cpp_test_fn_and_check_output():
|
||||
|
|
@ -177,12 +177,12 @@ def test_forward_backward(unit_test_class, test_params):
|
|||
unit_test_class.assertTrue(
|
||||
key in cpp_grad_dict,
|
||||
msg=generate_error_msg(
|
||||
"\"Does module have a parameter named `{}` with {} gradient?\"".format(param_name, sparsity_str),
|
||||
f"\"Does module have a parameter named `{param_name}` with {sparsity_str} gradient?\"",
|
||||
False, True))
|
||||
unit_test_class.assertEqual(
|
||||
python_grad_dict[key], cpp_grad_dict[key],
|
||||
msg=generate_error_msg(
|
||||
"`{}`'s {} gradient (`{}`)".format(param_name, sparsity_str, key),
|
||||
f"`{param_name}`'s {sparsity_str} gradient (`{key}`)",
|
||||
cpp_grad_dict[key], python_grad_dict[key]))
|
||||
|
||||
run_cpp_test_fn_and_check_output()
|
||||
|
|
@ -251,7 +251,7 @@ def write_test_to_test_class(
|
|||
test_instance_class=test_instance_class,
|
||||
)
|
||||
try_remove_folder(test_params.cpp_tmp_folder)
|
||||
unit_test_name = 'test_torch_nn_{}'.format(test_params.module_variant_name)
|
||||
unit_test_name = f'test_torch_nn_{test_params.module_variant_name}'
|
||||
unit_test_class.module_test_params_map[unit_test_name] = test_params
|
||||
|
||||
def test_fn(self):
|
||||
|
|
@ -272,14 +272,14 @@ def generate_test_cpp_sources(test_params, template):
|
|||
|
||||
cpp_constructor_args = test_params.cpp_constructor_args
|
||||
if cpp_constructor_args != '':
|
||||
cpp_constructor_args = '({})'.format(cpp_constructor_args)
|
||||
cpp_constructor_args = f'({cpp_constructor_args})'
|
||||
|
||||
cpp_args_construction_stmts, cpp_forward_args_symbols = \
|
||||
compute_cpp_args_construction_stmts_and_forward_arg_symbols(test_params)
|
||||
|
||||
test_cpp_sources = template.substitute(
|
||||
module_variant_name=test_params.module_variant_name,
|
||||
module_qualified_name='torch::nn::{}'.format(test_params.module_name),
|
||||
module_qualified_name=f'torch::nn::{test_params.module_name}',
|
||||
cpp_args_construction_stmts=";\n ".join(cpp_args_construction_stmts),
|
||||
cpp_constructor_args=cpp_constructor_args,
|
||||
cpp_forward_args_symbols=", ".join(cpp_forward_args_symbols),
|
||||
|
|
@ -295,7 +295,7 @@ def build_cpp_tests(unit_test_class, print_cpp_source=False):
|
|||
for test_name, test_params in unit_test_class.module_test_params_map.items():
|
||||
cpp_sources += generate_test_cpp_sources(
|
||||
test_params=test_params, template=TORCH_NN_MODULE_TEST_FORWARD_BACKWARD)
|
||||
functions.append('{}_test_forward_backward'.format(test_params.module_variant_name))
|
||||
functions.append(f'{test_params.module_variant_name}_test_forward_backward')
|
||||
if print_cpp_source:
|
||||
print(cpp_sources)
|
||||
|
||||
|
|
|
|||
|
|
@ -35,22 +35,22 @@ def parse_parity_tracker_table(file_path):
|
|||
return str == 'Yes'
|
||||
else:
|
||||
raise RuntimeError(
|
||||
'{} is not a supported parity choice. The valid choices are "Yes" and "No".'.format(str))
|
||||
f'{str} is not a supported parity choice. The valid choices are "Yes" and "No".')
|
||||
|
||||
parity_tracker_dict = {}
|
||||
|
||||
with open(file_path, 'r') as f:
|
||||
with open(file_path) as f:
|
||||
all_text = f.read()
|
||||
packages = all_text.split('##')
|
||||
for package in packages[1:]:
|
||||
lines = [line.strip() for line in package.split('\n') if line.strip() != '']
|
||||
package_name = lines[0]
|
||||
if package_name in parity_tracker_dict:
|
||||
raise RuntimeError("Duplicated package name `{}` found in {}".format(package_name, file_path))
|
||||
raise RuntimeError(f"Duplicated package name `{package_name}` found in {file_path}")
|
||||
else:
|
||||
parity_tracker_dict[package_name] = {}
|
||||
for api_status in lines[3:]:
|
||||
api_name, has_impl_parity_str, has_doc_parity_str = [x.strip() for x in api_status.split('|')]
|
||||
api_name, has_impl_parity_str, has_doc_parity_str = (x.strip() for x in api_status.split('|'))
|
||||
parity_tracker_dict[package_name][api_name] = ParityStatus(
|
||||
has_impl_parity=parse_parity_choice(has_impl_parity_str),
|
||||
has_doc_parity=parse_parity_choice(has_doc_parity_str))
|
||||
|
|
|
|||
|
|
@ -150,7 +150,7 @@ def compile_cpp_code_inline(name, cpp_sources, functions):
|
|||
return cpp_module
|
||||
|
||||
def compute_temp_file_path(cpp_tmp_folder, variant_name, file_suffix):
|
||||
return os.path.join(cpp_tmp_folder, '{}_{}.pt'.format(variant_name, file_suffix))
|
||||
return os.path.join(cpp_tmp_folder, f'{variant_name}_{file_suffix}.pt')
|
||||
|
||||
def is_torch_nn_functional_test(test_params_dict):
|
||||
return 'wrap_functional' in str(test_params_dict.get('constructor', ''))
|
||||
|
|
@ -177,11 +177,11 @@ def add_test(unit_test_class, test_name, test_fn):
|
|||
|
||||
def set_cpp_tensors_requires_grad(cpp_tensor_stmts, python_tensors):
|
||||
assert len(cpp_tensor_stmts) == len(python_tensors)
|
||||
return ['{}.requires_grad_(true)'.format(tensor_stmt) if tensor.dtype != torch.long else tensor_stmt
|
||||
return [f'{tensor_stmt}.requires_grad_(true)' if tensor.dtype != torch.long else tensor_stmt
|
||||
for tensor_stmt, (_, tensor) in zip(cpp_tensor_stmts, python_tensors)]
|
||||
|
||||
def move_cpp_tensors_to_device(cpp_tensor_stmts, device):
|
||||
return ['{}.to("{}")'.format(tensor_stmt, device) for tensor_stmt in cpp_tensor_stmts]
|
||||
return [f'{tensor_stmt}.to("{device}")' for tensor_stmt in cpp_tensor_stmts]
|
||||
|
||||
def is_criterion_test(test_instance):
|
||||
return isinstance(test_instance, common_nn.CriterionTest)
|
||||
|
|
@ -209,7 +209,7 @@ def compute_cpp_args_construction_stmts_and_forward_arg_symbols(test_params):
|
|||
def add_cpp_forward_args(args):
|
||||
args_stmts = []
|
||||
for arg_name, _ in args:
|
||||
args_stmts.append('auto {} = arg_dict.at("{}")'.format(arg_name, arg_name))
|
||||
args_stmts.append(f'auto {arg_name} = arg_dict.at("{arg_name}")')
|
||||
cpp_forward_args_symbols.append(arg_name)
|
||||
return args_stmts
|
||||
|
||||
|
|
@ -223,7 +223,7 @@ def compute_cpp_args_construction_stmts_and_forward_arg_symbols(test_params):
|
|||
# Build the list of other arguments needed
|
||||
cpp_other_args_stmts = []
|
||||
for arg_name, _ in test_params.arg_dict['other']:
|
||||
cpp_other_args_stmts.append('auto {} = arg_dict.at("{}")'.format(arg_name, arg_name))
|
||||
cpp_other_args_stmts.append(f'auto {arg_name} = arg_dict.at("{arg_name}")')
|
||||
cpp_other_args_stmts = move_cpp_tensors_to_device(cpp_other_args_stmts, device)
|
||||
|
||||
cpp_args_construction_stmts = cpp_forward_input_args_stmts + cpp_forward_target_args_stmts + \
|
||||
|
|
@ -292,11 +292,11 @@ def compute_arg_dict(test_params_dict, test_instance):
|
|||
if arg_value == '_get_input()':
|
||||
arg_dict['other'].append(CppArg(name=arg_name, value=test_instance._get_input()))
|
||||
else:
|
||||
raise RuntimeError("`{}` has unsupported string value: {}".format(arg_name, arg_value))
|
||||
raise RuntimeError(f"`{arg_name}` has unsupported string value: {arg_value}")
|
||||
elif isinstance(arg_value, torch.Tensor):
|
||||
arg_dict['other'].append(CppArg(name=arg_name, value=arg_value))
|
||||
else:
|
||||
raise RuntimeError("`{}` has unsupported value: {}".format(arg_name, arg_value))
|
||||
raise RuntimeError(f"`{arg_name}` has unsupported value: {arg_value}")
|
||||
|
||||
return arg_dict
|
||||
|
||||
|
|
@ -351,4 +351,4 @@ def try_remove_folder(folder_path):
|
|||
try:
|
||||
shutil.rmtree(folder_path)
|
||||
except Exception as e:
|
||||
warnings.warn("Non-blocking folder removal fails with the following error:\n{}".format(str(e)))
|
||||
warnings.warn(f"Non-blocking folder removal fails with the following error:\n{str(e)}")
|
||||
|
|
|
|||
|
|
@ -17,7 +17,7 @@ def get_custom_backend_library_path():
|
|||
library_filename = "libcustom_backend.dylib"
|
||||
else:
|
||||
library_filename = "libcustom_backend.so"
|
||||
path = os.path.abspath("build/{}".format(library_filename))
|
||||
path = os.path.abspath(f"build/{library_filename}")
|
||||
assert os.path.exists(path), path
|
||||
return path
|
||||
|
||||
|
|
|
|||
|
|
@ -12,7 +12,7 @@ def get_custom_op_library_path():
|
|||
library_filename = "libcustom_ops.dylib"
|
||||
else:
|
||||
library_filename = "libcustom_ops.so"
|
||||
path = os.path.abspath("build/{}".format(library_filename))
|
||||
path = os.path.abspath(f"build/{library_filename}")
|
||||
assert os.path.exists(path), path
|
||||
return path
|
||||
|
||||
|
|
|
|||
|
|
@ -638,7 +638,7 @@ class TestSaveLoad(JitTestCase):
|
|||
# Validate that with no input specified the traced inputs are stored
|
||||
traced_module = torch.jit.trace(module, input_tensor)
|
||||
traced_inputs = list(traced_module.graph.inputs())
|
||||
self.assertEquals(traced_module._c._retrieve_traced_inputs()['forward'], [input_tensor])
|
||||
self.assertEqual(traced_module._c._retrieve_traced_inputs()['forward'], [input_tensor])
|
||||
with TemporaryFileName() as fname:
|
||||
path = pathlib.Path(fname)
|
||||
traced_module.save(path)
|
||||
|
|
@ -654,7 +654,7 @@ class TestSaveLoad(JitTestCase):
|
|||
# Validate that inputs aren't saved when requested not to
|
||||
traced_module = torch.jit.trace(module, input_tensor, _store_inputs=False)
|
||||
traced_inputs = list(traced_module.graph.inputs())
|
||||
self.assertEquals(len(traced_module._c._retrieve_traced_inputs()), 0)
|
||||
self.assertEqual(len(traced_module._c._retrieve_traced_inputs()), 0)
|
||||
|
||||
with TemporaryFileName() as fname:
|
||||
path = pathlib.Path(fname)
|
||||
|
|
@ -721,7 +721,7 @@ class TestSaveLoad(JitTestCase):
|
|||
|
||||
class Model(torch.nn.Module):
|
||||
def __init__(self):
|
||||
super(Model, self).__init__()
|
||||
super().__init__()
|
||||
self.x = "x" * (2 ** 32 + 1)
|
||||
|
||||
def forward(self, i) -> int:
|
||||
|
|
|
|||
|
|
@ -60,7 +60,7 @@ class NNPoolingModule(torch.nn.Module):
|
|||
nn.FractionalMaxPool2d(3, output_ratio=(0.5, 0.5)),
|
||||
nn.LPPool2d(2, 3, stride=(2, 1)),
|
||||
nn.AdaptiveMaxPool2d((5, 7)),
|
||||
nn.AdaptiveAvgPool2d((7)),
|
||||
nn.AdaptiveAvgPool2d(7),
|
||||
]
|
||||
)
|
||||
|
||||
|
|
|
|||
|
|
@ -197,7 +197,7 @@ class TestLiteScriptModule(TestCase):
|
|||
)
|
||||
|
||||
def test_unsupported_classtype(self):
|
||||
class Foo():
|
||||
class Foo:
|
||||
def __init__(self):
|
||||
return
|
||||
|
||||
|
|
@ -313,7 +313,7 @@ class TestLiteScriptModule(TestCase):
|
|||
loaded = self.getScriptExportImportCopy(ft)
|
||||
_, lineno = inspect.getsourcelines(FooTest)
|
||||
|
||||
with self.assertRaisesRegex(RuntimeError, 'test_lite_script_module.py\", line {}'.format(lineno + 3)):
|
||||
with self.assertRaisesRegex(RuntimeError, f'test_lite_script_module.py\", line {lineno + 3}'):
|
||||
loaded(torch.rand(3, 4), torch.rand(30, 40))
|
||||
|
||||
def test_source_range_raise_exception(self):
|
||||
|
|
@ -357,8 +357,8 @@ class TestLiteScriptModule(TestCase):
|
|||
loaded(torch.rand(3, 4), torch.rand(3, 4), torch.rand(30, 40))
|
||||
except RuntimeError as e:
|
||||
error_message = f"{e}"
|
||||
self.assertTrue('test_lite_script_module.py\", line {}'.format(lineno + 3) in error_message)
|
||||
self.assertTrue('test_lite_script_module.py\", line {}'.format(lineno + 9) in error_message)
|
||||
self.assertTrue(f'test_lite_script_module.py\", line {lineno + 3}' in error_message)
|
||||
self.assertTrue(f'test_lite_script_module.py\", line {lineno + 9}' in error_message)
|
||||
self.assertTrue('top(FooTest3)' in error_message)
|
||||
|
||||
def test_source_range_no_debug_info(self):
|
||||
|
|
|
|||
|
|
@ -21,9 +21,9 @@ class TestLiteScriptModule(TestCase):
|
|||
upgrader_mobile_cpp_path = pytorch_caffe2_dir / "torch" / "csrc" / "jit" / "mobile" / "upgrader_mobile.cpp"
|
||||
with tempfile.TemporaryDirectory() as tmpdirname:
|
||||
write_cpp(tmpdirname, sorted_upgrader_list)
|
||||
with open(os.path.join(tmpdirname, 'upgrader_mobile.cpp'), 'r') as file_name:
|
||||
with open(os.path.join(tmpdirname, 'upgrader_mobile.cpp')) as file_name:
|
||||
actual_output = [line.strip() for line in file_name.readlines() if line]
|
||||
with open(str(upgrader_mobile_cpp_path), 'r') as file_name:
|
||||
with open(str(upgrader_mobile_cpp_path)) as file_name:
|
||||
expect_output = [line.strip() for line in file_name.readlines() if line]
|
||||
actual_output_filtered = list(filter(lambda token: len(token) != 0, actual_output))
|
||||
expect_output_filtered = list(filter(lambda token: len(token) != 0, expect_output))
|
||||
|
|
|
|||
|
|
@ -1,4 +1,3 @@
|
|||
# -*- coding: utf-8 -*-
|
||||
# Owner(s): ["oncall: package/deploy"]
|
||||
|
||||
import os
|
||||
|
|
@ -207,7 +206,7 @@ class DirectoryReaderTest(PackageTestCase):
|
|||
filename = self.temp()
|
||||
with PackageExporter(filename) as he:
|
||||
he.save_text("main", "main", "my string")
|
||||
he.save_binary("main", "main_binary", "my string".encode("utf-8"))
|
||||
he.save_binary("main", "main_binary", b"my string")
|
||||
src = dedent(
|
||||
"""\
|
||||
import importlib
|
||||
|
|
@ -226,7 +225,7 @@ class DirectoryReaderTest(PackageTestCase):
|
|||
dir_importer = PackageImporter(Path(temp_dir) / Path(filename).name)
|
||||
m = dir_importer.import_module("main")
|
||||
self.assertEqual(m.t, "my string")
|
||||
self.assertEqual(m.b, "my string".encode("utf-8"))
|
||||
self.assertEqual(m.b, b"my string")
|
||||
|
||||
@skipIf(version_info < (3, 7), "ResourceReader API introduced in Python 3.7")
|
||||
def test_resource_access_by_path(self):
|
||||
|
|
@ -235,7 +234,7 @@ class DirectoryReaderTest(PackageTestCase):
|
|||
"""
|
||||
filename = self.temp()
|
||||
with PackageExporter(filename) as e:
|
||||
e.save_binary("string_module", "my_string", "my string".encode("utf-8"))
|
||||
e.save_binary("string_module", "my_string", b"my string")
|
||||
src = dedent(
|
||||
"""\
|
||||
import importlib.resources
|
||||
|
|
|
|||
|
|
@ -1,4 +1,3 @@
|
|||
# -*- coding: utf-8 -*-
|
||||
# Owner(s): ["oncall: package/deploy"]
|
||||
|
||||
import inspect
|
||||
|
|
|
|||
|
|
@ -1,4 +1,3 @@
|
|||
# -*- coding: utf-8 -*-
|
||||
# Owner(s): ["oncall: package/deploy"]
|
||||
|
||||
from io import BytesIO
|
||||
|
|
@ -105,7 +104,7 @@ class TestResources(PackageTestCase):
|
|||
buffer = BytesIO()
|
||||
with PackageExporter(buffer) as he:
|
||||
he.save_text("main", "main", "my string")
|
||||
he.save_binary("main", "main_binary", "my string".encode("utf-8"))
|
||||
he.save_binary("main", "main_binary", b"my string")
|
||||
src = dedent(
|
||||
"""\
|
||||
import importlib
|
||||
|
|
@ -120,7 +119,7 @@ class TestResources(PackageTestCase):
|
|||
hi = PackageImporter(buffer)
|
||||
m = hi.import_module("main")
|
||||
self.assertEqual(m.t, "my string")
|
||||
self.assertEqual(m.b, "my string".encode("utf-8"))
|
||||
self.assertEqual(m.b, b"my string")
|
||||
|
||||
def test_resource_access_by_path(self):
|
||||
"""
|
||||
|
|
@ -128,7 +127,7 @@ class TestResources(PackageTestCase):
|
|||
"""
|
||||
buffer = BytesIO()
|
||||
with PackageExporter(buffer) as he:
|
||||
he.save_binary("string_module", "my_string", "my string".encode("utf-8"))
|
||||
he.save_binary("string_module", "my_string", b"my string")
|
||||
src = dedent(
|
||||
"""\
|
||||
import importlib.resources
|
||||
|
|
|
|||
|
|
@ -1,7 +1,6 @@
|
|||
# Owner(s): ["oncall: profiler"]
|
||||
import collections
|
||||
import gc
|
||||
import io
|
||||
import json
|
||||
import os
|
||||
import re
|
||||
|
|
@ -111,7 +110,7 @@ class TestProfilerCUDA(TestCase):
|
|||
for idx in range(1, len(last_rss)):
|
||||
max_diff = max(max_diff, last_rss[idx] - last_rss[idx - 1])
|
||||
self.assertTrue(not (is_increasing and max_diff > 100 * 1024),
|
||||
msg='memory usage is increasing, {}'.format(str(last_rss)))
|
||||
msg=f'memory usage is increasing, {str(last_rss)}')
|
||||
|
||||
def test_custom_module_input_op_ids(self):
|
||||
class MyFunc(torch.autograd.Function):
|
||||
|
|
@ -340,7 +339,7 @@ class TestExecutionTrace(TestCase):
|
|||
|
||||
def get_execution_trace_root(self, output_file_name):
|
||||
nodes = []
|
||||
with open(output_file_name, 'r') as f:
|
||||
with open(output_file_name) as f:
|
||||
et_graph = json.load(f)
|
||||
assert "nodes" in et_graph
|
||||
nodes = et_graph["nodes"]
|
||||
|
|
@ -575,7 +574,7 @@ class TestProfiler(TestCase):
|
|||
if kineto_available() and not IS_WINDOWS:
|
||||
with TemporaryFileName(mode="w+") as fname:
|
||||
p.export_chrome_trace(fname)
|
||||
with io.open(fname, 'r') as f:
|
||||
with open(fname) as f:
|
||||
events = json.load(f)["traceEvents"]
|
||||
|
||||
def extract(pattern: str):
|
||||
|
|
@ -870,7 +869,7 @@ class TestProfiler(TestCase):
|
|||
with record_function("test_user_scope_dealloc"):
|
||||
del x
|
||||
prof.export_chrome_trace(fname)
|
||||
with io.open(fname, 'r') as f:
|
||||
with open(fname) as f:
|
||||
trace = json.load(f)
|
||||
assert "traceEvents" in trace
|
||||
events = trace["traceEvents"]
|
||||
|
|
@ -974,7 +973,7 @@ class TestProfiler(TestCase):
|
|||
|
||||
def check_trace(fname):
|
||||
prof.export_chrome_trace(fname)
|
||||
with io.open(fname, 'r') as f:
|
||||
with open(fname) as f:
|
||||
trace = json.load(f)
|
||||
self.assertTrue("traceEvents" in trace)
|
||||
events = trace["traceEvents"]
|
||||
|
|
@ -1045,7 +1044,7 @@ class TestProfiler(TestCase):
|
|||
with profile(activities=[torch.profiler.ProfilerActivity.CPU], with_modules=True,) as prof:
|
||||
model(input_a, input_b)
|
||||
prof.export_chrome_trace(fname)
|
||||
with io.open(fname, 'r') as f:
|
||||
with open(fname) as f:
|
||||
trace = json.load(f)
|
||||
assert "traceEvents" in trace
|
||||
events = trace["traceEvents"]
|
||||
|
|
@ -1296,7 +1295,7 @@ class TestProfiler(TestCase):
|
|||
|
||||
with TemporaryFileName(mode="w+") as fname:
|
||||
p.export_stacks(fname)
|
||||
with io.open(fname, 'r') as f:
|
||||
with open(fname) as f:
|
||||
lines = f.readlines()
|
||||
assert len(lines) > 0, "Empty stacks file"
|
||||
for line in lines:
|
||||
|
|
@ -1383,7 +1382,7 @@ class TestProfiler(TestCase):
|
|||
|
||||
with TemporaryFileName(mode="w+") as fname:
|
||||
prof.export_chrome_trace(fname)
|
||||
with io.open(fname, 'r') as f:
|
||||
with open(fname) as f:
|
||||
trace = json.load(f)
|
||||
assert "test_key1" in trace
|
||||
assert trace["test_key1"] == "test_value1"
|
||||
|
|
@ -1399,7 +1398,7 @@ class TestProfiler(TestCase):
|
|||
prof.export_chrome_trace(fname)
|
||||
# read the trace and expect valid json
|
||||
# if the JSON generated by export_chrome_trace is not valid, this will throw and fail the test.
|
||||
with io.open(fname, 'r') as f:
|
||||
with open(fname) as f:
|
||||
json.load(f)
|
||||
|
||||
# test empty trace
|
||||
|
|
@ -1422,7 +1421,7 @@ class TestProfiler(TestCase):
|
|||
with TemporaryFileName(mode="w+") as fname:
|
||||
prof.export_chrome_trace(fname)
|
||||
# Now validate the json
|
||||
with io.open(fname, 'r') as f:
|
||||
with open(fname) as f:
|
||||
json.load(f)
|
||||
|
||||
def test_profiler_tracing(self):
|
||||
|
|
@ -1439,7 +1438,7 @@ class TestProfiler(TestCase):
|
|||
loss.backward()
|
||||
with TemporaryFileName(mode="w+") as fname:
|
||||
prof.export_chrome_trace(fname)
|
||||
with io.open(fname, 'r') as f:
|
||||
with open(fname) as f:
|
||||
j = json.load(f)
|
||||
events = j["traceEvents"]
|
||||
ts_to_name = {}
|
||||
|
|
@ -1481,7 +1480,7 @@ class TestProfiler(TestCase):
|
|||
|
||||
with TemporaryFileName(mode="w+") as fname:
|
||||
prof.export_chrome_trace(fname)
|
||||
with io.open(fname, 'r') as f:
|
||||
with open(fname) as f:
|
||||
j = json.load(f)
|
||||
events = j["traceEvents"]
|
||||
|
||||
|
|
@ -2452,7 +2451,7 @@ class TestTorchTidyProfiler(TestCase):
|
|||
|
||||
|
||||
@dataclass(frozen=True)
|
||||
class MockKinetoEvent():
|
||||
class MockKinetoEvent:
|
||||
_name: str
|
||||
_start_us: int
|
||||
_duration_us: int
|
||||
|
|
@ -2477,7 +2476,7 @@ class MockKinetoEvent():
|
|||
|
||||
|
||||
@dataclass(frozen=True)
|
||||
class MockProfilerEvent():
|
||||
class MockProfilerEvent:
|
||||
_name: str
|
||||
id: int
|
||||
start_time_ns: int
|
||||
|
|
@ -2646,7 +2645,7 @@ class TestExperimentalUtils(TestCase):
|
|||
json.dump([kineto_events, profiler_events], f)
|
||||
|
||||
assert (os.path.exists(json_file_path))
|
||||
with open(json_file_path, "r") as f:
|
||||
with open(json_file_path) as f:
|
||||
kineto_events, profiler_events = json.load(f)
|
||||
|
||||
cuda_events = [
|
||||
|
|
|
|||
|
|
@ -1,4 +1,3 @@
|
|||
# -*- coding: utf-8 -*-
|
||||
# Owner(s): ["oncall: quantization"]
|
||||
|
||||
import sys
|
||||
|
|
@ -42,7 +41,7 @@ def get_filenames(self, subname):
|
|||
subname_output = ""
|
||||
if subname:
|
||||
base_name += "_" + subname
|
||||
subname_output = " ({})".format(subname)
|
||||
subname_output = f" ({subname})"
|
||||
|
||||
input_file = base_name + ".input.pt"
|
||||
state_dict_file = base_name + ".state_dict.pt"
|
||||
|
|
|
|||
|
|
@ -49,7 +49,7 @@ Prepare full precision model
|
|||
full_precision_model = float_model
|
||||
|
||||
top1, top5 = evaluate(full_precision_model, criterion, data_loader_test)
|
||||
print("Model #0 Evaluation accuracy on test dataset: %2.2f, %2.2f" % (top1.avg, top5.avg))
|
||||
print(f"Model #0 Evaluation accuracy on test dataset: {top1.avg:2.2f}, {top5.avg:2.2f}")
|
||||
|
||||
"""
|
||||
Prepare model PTQ for specified qconfig for torch.nn.Linear
|
||||
|
|
@ -69,7 +69,7 @@ prepared_model = prepare_ptq_linear(uniform_qconfig_8bit)
|
|||
quantized_model = convert_fx(prepared_model) # convert the calibrated model to a quantized model
|
||||
|
||||
top1, top5 = evaluate(quantized_model, criterion, data_loader_test)
|
||||
print("Model #1 Evaluation accuracy on test dataset (b=8, k=2): %2.2f, %2.2f" % (top1.avg, top5.avg))
|
||||
print(f"Model #1 Evaluation accuracy on test dataset (b=8, k=2): {top1.avg:2.2f}, {top5.avg:2.2f}")
|
||||
|
||||
"""
|
||||
Prepare model with uniform activation, uniform weight
|
||||
|
|
@ -80,7 +80,7 @@ prepared_model = prepare_ptq_linear(uniform_qconfig_4bit)
|
|||
quantized_model = convert_fx(prepared_model) # convert the calibrated model to a quantized model
|
||||
|
||||
top1, top5 = evaluate(quantized_model1, criterion, data_loader_test)
|
||||
print("Model #1 Evaluation accuracy on test dataset (b=4, k=2): %2.2f, %2.2f" % (top1.avg, top5.avg))
|
||||
print(f"Model #1 Evaluation accuracy on test dataset (b=4, k=2): {top1.avg:2.2f}, {top5.avg:2.2f}")
|
||||
|
||||
"""
|
||||
Prepare model with uniform activation, APoT weight
|
||||
|
|
@ -90,7 +90,7 @@ Prepare model with uniform activation, APoT weight
|
|||
prepared_model = prepare_ptq_linear(apot_weights_qconfig_8bit)
|
||||
|
||||
top1, top5 = evaluate(prepared_model, criterion, data_loader_test)
|
||||
print("Model #2 Evaluation accuracy on test dataset (b=8, k=2): %2.2f, %2.2f" % (top1.avg, top5.avg))
|
||||
print(f"Model #2 Evaluation accuracy on test dataset (b=8, k=2): {top1.avg:2.2f}, {top5.avg:2.2f}")
|
||||
|
||||
"""
|
||||
Prepare model with uniform activation, APoT weight
|
||||
|
|
@ -100,7 +100,7 @@ Prepare model with uniform activation, APoT weight
|
|||
prepared_model = prepare_ptq_linear(apot_weights_qconfig_4bit)
|
||||
|
||||
top1, top5 = evaluate(prepared_model, criterion, data_loader_test)
|
||||
print("Model #2 Evaluation accuracy on test dataset (b=4, k=2): %2.2f, %2.2f" % (top1.avg, top5.avg))
|
||||
print(f"Model #2 Evaluation accuracy on test dataset (b=4, k=2): {top1.avg:2.2f}, {top5.avg:2.2f}")
|
||||
|
||||
|
||||
"""
|
||||
|
|
@ -111,7 +111,7 @@ Prepare model with APoT activation and weight
|
|||
prepared_model = prepare_ptq_linear(apot_qconfig_8bit)
|
||||
|
||||
top1, top5 = evaluate(prepared_model, criterion, data_loader_test)
|
||||
print("Model #3 Evaluation accuracy on test dataset (b=8, k=2): %2.2f, %2.2f" % (top1.avg, top5.avg))
|
||||
print(f"Model #3 Evaluation accuracy on test dataset (b=8, k=2): {top1.avg:2.2f}, {top5.avg:2.2f}")
|
||||
|
||||
"""
|
||||
Prepare model with APoT activation and weight
|
||||
|
|
@ -121,11 +121,11 @@ Prepare model with APoT activation and weight
|
|||
prepared_model = prepare_ptq_linear(apot_qconfig_4bit)
|
||||
|
||||
top1, top5 = evaluate(prepared_model, criterion, data_loader_test)
|
||||
print("Model #3 Evaluation accuracy on test dataset (b=4, k=2): %2.2f, %2.2f" % (top1.avg, top5.avg))
|
||||
print(f"Model #3 Evaluation accuracy on test dataset (b=4, k=2): {top1.avg:2.2f}, {top5.avg:2.2f}")
|
||||
|
||||
"""
|
||||
Prepare eager mode quantized model
|
||||
"""
|
||||
eager_quantized_model = resnet18(pretrained=True, quantize=True).eval()
|
||||
top1, top5 = evaluate(eager_quantized_model, criterion, data_loader_test)
|
||||
print("Eager mode quantized model evaluation accuracy on test dataset: %2.2f, %2.2f" % (top1.avg, top5.avg))
|
||||
print(f"Eager mode quantized model evaluation accuracy on test dataset: {top1.avg:2.2f}, {top5.avg:2.2f}")
|
||||
|
|
|
|||
|
|
@ -40,7 +40,7 @@ b=8, k=2
|
|||
prepared_model = prepare_qat_linear(uniform_qconfig_8bit)
|
||||
|
||||
top1, top5 = evaluate(prepared_model, criterion, data_loader_test)
|
||||
print("Model #1 Evaluation accuracy on test dataset (b=8, k=2): %2.2f, %2.2f" % (top1.avg, top5.avg))
|
||||
print(f"Model #1 Evaluation accuracy on test dataset (b=8, k=2): {top1.avg:2.2f}, {top5.avg:2.2f}")
|
||||
|
||||
"""
|
||||
Prepare model with uniform activation, uniform weight
|
||||
|
|
@ -50,7 +50,7 @@ b=4, k=2
|
|||
prepared_model = prepare_qat_linear(uniform_qconfig_4bit)
|
||||
|
||||
top1, top5 = evaluate(prepared_model, criterion, data_loader_test)
|
||||
print("Model #1 Evaluation accuracy on test dataset (b=4, k=2): %2.2f, %2.2f" % (top1.avg, top5.avg))
|
||||
print(f"Model #1 Evaluation accuracy on test dataset (b=4, k=2): {top1.avg:2.2f}, {top5.avg:2.2f}")
|
||||
|
||||
"""
|
||||
Prepare model with uniform activation, APoT weight
|
||||
|
|
@ -60,7 +60,7 @@ Prepare model with uniform activation, APoT weight
|
|||
prepared_model = prepare_qat_linear(apot_weights_qconfig_8bit)
|
||||
|
||||
top1, top5 = evaluate(prepared_model, criterion, data_loader_test)
|
||||
print("Model #2 Evaluation accuracy on test dataset (b=8, k=2): %2.2f, %2.2f" % (top1.avg, top5.avg))
|
||||
print(f"Model #2 Evaluation accuracy on test dataset (b=8, k=2): {top1.avg:2.2f}, {top5.avg:2.2f}")
|
||||
|
||||
"""
|
||||
Prepare model with uniform activation, APoT weight
|
||||
|
|
@ -70,7 +70,7 @@ Prepare model with uniform activation, APoT weight
|
|||
prepared_model = prepare_qat_linear(apot_weights_qconfig_4bit)
|
||||
|
||||
top1, top5 = evaluate(prepared_model, criterion, data_loader_test)
|
||||
print("Model #2 Evaluation accuracy on test dataset (b=4, k=2): %2.2f, %2.2f" % (top1.avg, top5.avg))
|
||||
print(f"Model #2 Evaluation accuracy on test dataset (b=4, k=2): {top1.avg:2.2f}, {top5.avg:2.2f}")
|
||||
|
||||
|
||||
"""
|
||||
|
|
@ -81,7 +81,7 @@ Prepare model with APoT activation and weight
|
|||
prepared_model = prepare_qat_linear(apot_qconfig_8bit)
|
||||
|
||||
top1, top5 = evaluate(prepared_model, criterion, data_loader_test)
|
||||
print("Model #3 Evaluation accuracy on test dataset (b=8, k=2): %2.2f, %2.2f" % (top1.avg, top5.avg))
|
||||
print(f"Model #3 Evaluation accuracy on test dataset (b=8, k=2): {top1.avg:2.2f}, {top5.avg:2.2f}")
|
||||
|
||||
"""
|
||||
Prepare model with APoT activation and weight
|
||||
|
|
@ -91,4 +91,4 @@ Prepare model with APoT activation and weight
|
|||
prepared_model = prepare_qat_linear(apot_qconfig_4bit)
|
||||
|
||||
top1, top5 = evaluate(prepared_model, criterion, data_loader_test)
|
||||
print("Model #3 Evaluation accuracy on test dataset (b=4, k=2): %2.2f, %2.2f" % (top1.avg, top5.avg))
|
||||
print(f"Model #3 Evaluation accuracy on test dataset (b=4, k=2): {top1.avg:2.2f}, {top5.avg:2.2f}")
|
||||
|
|
|
|||
|
|
@ -1705,12 +1705,12 @@ class TestDynamicQuantizedModule(QuantizationTestCase):
|
|||
for layer in range(num_layers):
|
||||
for direction in range(num_directions):
|
||||
suffix = '_reverse' if direction == 1 else ''
|
||||
key_name1 = 'weight_ih_l{layer_idx}{suffix}'.format(layer_idx=layer, suffix=suffix)
|
||||
key_name2 = 'weight_hh_l{layer_idx}{suffix}'.format(layer_idx=layer, suffix=suffix)
|
||||
key_name1 = f'weight_ih_l{layer}{suffix}'
|
||||
key_name2 = f'weight_hh_l{layer}{suffix}'
|
||||
weight_keys.append(key_name1)
|
||||
weight_keys.append(key_name2)
|
||||
key_name1 = 'bias_ih_l{layer_idx}{suffix}'.format(layer_idx=layer, suffix=suffix)
|
||||
key_name2 = 'bias_hh_l{layer_idx}{suffix}'.format(layer_idx=layer, suffix=suffix)
|
||||
key_name1 = f'bias_ih_l{layer}{suffix}'
|
||||
key_name2 = f'bias_hh_l{layer}{suffix}'
|
||||
bias_keys.append(key_name1)
|
||||
bias_keys.append(key_name2)
|
||||
|
||||
|
|
|
|||
|
|
@ -398,7 +398,7 @@ class TestQuantizedOps(TestCase):
|
|||
dtype=torch_type)
|
||||
qY_hat = op(qX, negative_slope=alpha)
|
||||
self.assertEqual(qY.dequantize(), qY_hat.dequantize(),
|
||||
msg="F.leaky_relu failed ({} vs {})".format(qY, qY_hat))
|
||||
msg=f"F.leaky_relu failed ({qY} vs {qY_hat})")
|
||||
|
||||
"""Tests the correctness of the quantized::elu op."""
|
||||
@given(X=hu.tensor(shapes=hu.array_shapes(1, 5, 1, 5),
|
||||
|
|
@ -423,7 +423,7 @@ class TestQuantizedOps(TestCase):
|
|||
|
||||
qY = torch.ao.nn.quantized.functional.elu(qX, output_scale, output_zero_point, alpha=alpha)
|
||||
self.assertEqual(qY, qY_hat,
|
||||
msg="F.elu failed ({} vs {})".format(qY, qY_hat))
|
||||
msg=f"F.elu failed ({qY} vs {qY_hat})")
|
||||
|
||||
|
||||
"""Tests the correctness of the quantized::celu op."""
|
||||
|
|
@ -449,7 +449,7 @@ class TestQuantizedOps(TestCase):
|
|||
# test regular
|
||||
qY = torch.ops.quantized.celu(qX, output_scale, output_zero_point, alpha=alpha)
|
||||
self.assertEqual(qY, qY_hat,
|
||||
msg="F.celu failed ({} vs {})".format(qY, qY_hat))
|
||||
msg=f"F.celu failed ({qY} vs {qY_hat})")
|
||||
|
||||
"""Tests the correctness of the quantized::gelu op."""
|
||||
def test_qgelu(self):
|
||||
|
|
@ -478,7 +478,7 @@ class TestQuantizedOps(TestCase):
|
|||
dtype=torch_type)
|
||||
qY_hat = op(qX)
|
||||
self.assertEqual(qY.dequantize(), qY_hat.dequantize(),
|
||||
msg="F.gelu failed ({} vs {})".format(qY, qY_hat))
|
||||
msg=f"F.gelu failed ({qY} vs {qY_hat})")
|
||||
|
||||
"""Tests the correctness of the quantized::prelu op."""
|
||||
def test_qprelu(self):
|
||||
|
|
@ -512,7 +512,7 @@ class TestQuantizedOps(TestCase):
|
|||
dtype=torch_type)
|
||||
qY_hat = qop(qX, qW, scale, zero_point)
|
||||
self.assertEqual(qY.dequantize(), qY_hat.dequantize(),
|
||||
msg="F.prelu failed ({} vs {})".format(qY, qY_hat))
|
||||
msg=f"F.prelu failed ({qY} vs {qY_hat})")
|
||||
|
||||
"""Tests the correctness of the quantized::qlayer_norm op."""
|
||||
@skipIfNoFBGEMM
|
||||
|
|
@ -636,7 +636,7 @@ class TestQuantizedOps(TestCase):
|
|||
dtype=torch_type)
|
||||
qY_hat = torch.tanh(qX)
|
||||
self.assertEqual(qY, qY_hat,
|
||||
msg="TanH failed: {} vs. {}".format(qY, qY_hat))
|
||||
msg=f"TanH failed: {qY} vs. {qY_hat}")
|
||||
|
||||
"""Tests the correctness of the quantized::threshold op."""
|
||||
@given(X=hu.tensor(shapes=hu.array_shapes(1, 5, 1, 5),
|
||||
|
|
@ -666,7 +666,7 @@ class TestQuantizedOps(TestCase):
|
|||
|
||||
for name, op in ops_under_test.items():
|
||||
qY = op(qX, threshold, value)
|
||||
self.assertEqual(qY, qY_hat, msg="{} qthreshold failed".format(name))
|
||||
self.assertEqual(qY, qY_hat, msg=f"{name} qthreshold failed")
|
||||
|
||||
"""Tests the correctness of the quantized::clamp op."""
|
||||
@given(X=hu.tensor(shapes=hu.array_shapes(1, 8, 1, 8, max_numel=10**5),
|
||||
|
|
@ -691,7 +691,7 @@ class TestQuantizedOps(TestCase):
|
|||
|
||||
for name, op in ops_under_test.items():
|
||||
qY_clamp_hat = op(qX, min=min_val, max=max_val)
|
||||
self.assertEqual(qY_clamp, qY_clamp_hat, msg="{} qclamp failed".format(name))
|
||||
self.assertEqual(qY_clamp, qY_clamp_hat, msg=f"{name} qclamp failed")
|
||||
|
||||
if torch.backends.quantized.engine == 'fbgemm':
|
||||
with override_quantized_engine('fbgemm'):
|
||||
|
|
@ -706,9 +706,9 @@ class TestQuantizedOps(TestCase):
|
|||
|
||||
for name, op in ops_under_test.items():
|
||||
qY_min_clamp_hat = op(qX, min=min_val)
|
||||
self.assertEqual(qY_min_clamp, qY_min_clamp_hat, msg="{} qclamp failed".format(name))
|
||||
self.assertEqual(qY_min_clamp, qY_min_clamp_hat, msg=f"{name} qclamp failed")
|
||||
qY_max_clamp_hat = op(qX, max=max_val)
|
||||
self.assertEqual(qY_max_clamp, qY_max_clamp_hat, msg="{} qclamp failed".format(name))
|
||||
self.assertEqual(qY_max_clamp, qY_max_clamp_hat, msg=f"{name} qclamp failed")
|
||||
|
||||
"""Tests the correctness of the quantized::hardtanh op."""
|
||||
@skipIfNoFBGEMM
|
||||
|
|
@ -740,7 +740,7 @@ class TestQuantizedOps(TestCase):
|
|||
|
||||
for name, op in ops_under_test.items():
|
||||
qY_hat = op(qX, min_val, max_val)
|
||||
self.assertEqual(qY, qY_hat, msg="{} hardtanh failed".format(name))
|
||||
self.assertEqual(qY, qY_hat, msg=f"{name} hardtanh failed")
|
||||
|
||||
ops_under_test_inplace = {
|
||||
'inplace nn.quantized.functional.hardtanh':
|
||||
|
|
@ -752,7 +752,7 @@ class TestQuantizedOps(TestCase):
|
|||
for name, op_ in ops_under_test_inplace.items():
|
||||
qY_hat = qX.clone()
|
||||
op_(qY_hat, min_val, max_val, inplace=True)
|
||||
self.assertEqual(qY, qY_hat, msg="{} hardtanh failed".format(name))
|
||||
self.assertEqual(qY, qY_hat, msg=f"{name} hardtanh failed")
|
||||
|
||||
"""Tests the correctness of the quantized::hardswish op."""
|
||||
@override_qengines
|
||||
|
|
@ -789,7 +789,7 @@ class TestQuantizedOps(TestCase):
|
|||
qX, scale=Y_scale, zero_point=Y_zero_point)
|
||||
self.assertEqual(
|
||||
qY, qY_hat,
|
||||
msg="Hardswish failed: {} vs {}, {}".format(qY, qY_hat, torch.backends.quantized.engine))
|
||||
msg=f"Hardswish failed: {qY} vs {qY_hat}, {torch.backends.quantized.engine}")
|
||||
|
||||
"""Tests the correctness of the binary op + scalar."""
|
||||
def _test_binary_op_scalar_relu(self, A, b, binary_op_name, binary_op, quantized_op, quantized_op_relu):
|
||||
|
|
@ -1353,7 +1353,7 @@ class TestQuantizedOps(TestCase):
|
|||
a_hat = op(qa, kernel_size=kernel, stride=stride, padding=padding,
|
||||
dilation=dilation, ceil_mode=ceil_mode)
|
||||
self.assertEqual(a_ref, a_hat.dequantize(),
|
||||
msg="{} results are off".format(name))
|
||||
msg=f"{name} results are off")
|
||||
# Test the ops.quantized separately, because None is not treated.
|
||||
a_hat = torch.ops.quantized.max_pool1d(
|
||||
qa, kernel_size=_single(kernel),
|
||||
|
|
@ -1450,7 +1450,7 @@ class TestQuantizedOps(TestCase):
|
|||
a_hat = op(qa, kernel_size=kernel, stride=stride, padding=padding,
|
||||
dilation=dilation, ceil_mode=ceil_mode)
|
||||
self.assertEqual(a_ref, a_hat.dequantize(),
|
||||
msg="{} results are off".format(name))
|
||||
msg=f"{name} results are off")
|
||||
# Test the ops.quantized separately, because None is not treated.
|
||||
a_hat = torch.ops.quantized.max_pool2d(
|
||||
qa, kernel_size=_pair(kernel),
|
||||
|
|
@ -1503,7 +1503,7 @@ class TestQuantizedOps(TestCase):
|
|||
a_hat = op(qa, kernel_size=kernel, stride=stride, padding=padding,
|
||||
dilation=dilation, ceil_mode=ceil_mode)
|
||||
self.assertEqual(a_ref, a_hat.dequantize(),
|
||||
msg="{} results are off".format(name))
|
||||
msg=f"{name} results are off")
|
||||
|
||||
"""Tests max pool operation on NHWC quantized tensors."""
|
||||
@given(X=hu.tensor(shapes=hu.array_shapes(min_dims=4, max_dims=4,
|
||||
|
|
@ -1556,7 +1556,7 @@ class TestQuantizedOps(TestCase):
|
|||
dilation=dilation, ceil_mode=ceil_mode)
|
||||
self.assertTrue(a_hat.stride() != sorted(a_hat.stride()))
|
||||
self.assertEqual(a_ref, a_hat.dequantize(),
|
||||
msg="{} results are off".format(name))
|
||||
msg=f"{name} results are off")
|
||||
# Test the ops.quantized separately, because None is not treated.
|
||||
a_hat = torch.ops.quantized.max_pool2d(
|
||||
qa, kernel_size=_pair(kernel),
|
||||
|
|
@ -1612,7 +1612,7 @@ class TestQuantizedOps(TestCase):
|
|||
a_hat = op(qa, kernel_size=kernel, stride=stride, padding=padding,
|
||||
dilation=dilation, ceil_mode=ceil_mode)
|
||||
self.assertEqual(a_ref, a_hat.dequantize(),
|
||||
msg="{} results are off".format(name))
|
||||
msg=f"{name} results are off")
|
||||
|
||||
@given(X=hu.tensor(shapes=hu.array_shapes(min_dims=3, max_dims=4,
|
||||
min_side=5, max_side=10),
|
||||
|
|
@ -1960,16 +1960,16 @@ class TestQuantizedOps(TestCase):
|
|||
output_size = (output_size_d, output_size_h, output_size_w)
|
||||
|
||||
# Run reference on int_repr + round to avoid double rounding error.
|
||||
ref_op = getattr(torch.nn.functional, 'adaptive_avg_pool{}d'.format(dim))
|
||||
ref_op = getattr(torch.nn.functional, f'adaptive_avg_pool{dim}d')
|
||||
X_ref = ref_op(qX.int_repr().to(torch.float), output_size).round()
|
||||
|
||||
ops_under_test = {
|
||||
"nn.functional":
|
||||
getattr(torch.nn.functional, 'adaptive_avg_pool{}d'.format(dim)),
|
||||
getattr(torch.nn.functional, f'adaptive_avg_pool{dim}d'),
|
||||
"nn.quantized.functional":
|
||||
getattr(torch.ao.nn.quantized.functional, 'adaptive_avg_pool{}d'.format(dim)),
|
||||
getattr(torch.ao.nn.quantized.functional, f'adaptive_avg_pool{dim}d'),
|
||||
"ao.nn.quantized.functional":
|
||||
getattr(torch.ao.nn.quantized.functional, 'adaptive_avg_pool{}d'.format(dim))
|
||||
getattr(torch.ao.nn.quantized.functional, f'adaptive_avg_pool{dim}d')
|
||||
}
|
||||
|
||||
error_message = r"Results are off for {}:\n\tExpected:\n{}\n\tGot:\n{}"
|
||||
|
|
@ -2678,7 +2678,7 @@ class TestQuantizedOps(TestCase):
|
|||
self.assertEqual(
|
||||
qy.int_repr().numpy(),
|
||||
quantize_ref.int_repr().numpy(),
|
||||
msg="{} vs {}".format(qy, quantize_ref))
|
||||
msg=f"{qy} vs {quantize_ref}")
|
||||
|
||||
@skipIfNoFBGEMM
|
||||
def test_batch_norm(self):
|
||||
|
|
@ -2723,7 +2723,7 @@ class TestQuantizedOps(TestCase):
|
|||
quantize_ref = torch.quantize_per_tensor(float_ref, Y_scale, Y_zero_point, dtype_x)
|
||||
self.assertEqual(
|
||||
qy.int_repr().numpy(), quantize_ref.int_repr().numpy(),
|
||||
msg="{} vs {}".format(qy, quantize_ref))
|
||||
msg=f"{qy} vs {quantize_ref}")
|
||||
|
||||
@override_qengines
|
||||
def test_empty_batch(self):
|
||||
|
|
@ -6477,10 +6477,10 @@ class TestQNNPackOps(TestCase):
|
|||
qY_hat = torch.tanh(qX)
|
||||
self.assertEqual(
|
||||
qY, qY_hat,
|
||||
msg="QNNPACK TanH failed (FP ref), memory_format {}".format(memory_format))
|
||||
msg=f"QNNPACK TanH failed (FP ref), memory_format {memory_format}")
|
||||
self.assertEqual(
|
||||
qYserver, qY_hat,
|
||||
msg="QNNPACK TanH failed (FBGEMM ref), memory_format {}".format(memory_format))
|
||||
msg=f"QNNPACK TanH failed (FBGEMM ref), memory_format {memory_format}")
|
||||
|
||||
"""Tests the correctness of the quantized::qnnpack_sigmoid op."""
|
||||
@skipIfNoFBGEMM
|
||||
|
|
@ -6509,10 +6509,10 @@ class TestQNNPackOps(TestCase):
|
|||
qY_hat = torch.sigmoid(qX)
|
||||
self.assertEqual(
|
||||
qY, qY_hat,
|
||||
msg="QNNPACK Sigmoid failed (FP ref), memory_format {}".format(memory_format))
|
||||
msg=f"QNNPACK Sigmoid failed (FP ref), memory_format {memory_format}")
|
||||
self.assertEqual(
|
||||
qYserver, qY_hat,
|
||||
msg="QNNPACK Sigmoid failed (FBGEMM ref), memory_format {}".format(memory_format))
|
||||
msg=f"QNNPACK Sigmoid failed (FBGEMM ref), memory_format {memory_format}")
|
||||
|
||||
@skipIfNoFBGEMM
|
||||
def test_qnnpack_sigmoid_sweep(self):
|
||||
|
|
@ -6904,7 +6904,7 @@ class TestQNNPackOps(TestCase):
|
|||
qY_hat = torch.ao.nn.quantized.functional.hardtanh(qX, min_val, max_val)
|
||||
self.assertEqual(
|
||||
qY, qY_hat,
|
||||
msg="hardtanh failed:\nactual {}\nexpected {}\nmemory_format {}".format(qY_hat, qY, memory_format))
|
||||
msg=f"hardtanh failed:\nactual {qY_hat}\nexpected {qY}\nmemory_format {memory_format}")
|
||||
|
||||
"""Tests the correctness of the tensor comparators."""
|
||||
class TestComparatorOps(TestCase):
|
||||
|
|
@ -6933,12 +6933,12 @@ class TestComparatorOps(TestCase):
|
|||
result_ref = getattr(dqA, op)(dqB)
|
||||
result = getattr(qA, op)(qB)
|
||||
self.assertEqual(result_ref, result,
|
||||
msg="'tensor.{}(tensor)'' failed".format(op))
|
||||
msg=f"'tensor.{op}(tensor)'' failed")
|
||||
# Reversed broadcasting.
|
||||
result_ref = getattr(dqB, op)(dqA)
|
||||
result = getattr(qB, op)(qA)
|
||||
self.assertEqual(result_ref, result,
|
||||
msg="'tensor.{}(tensor)'' failed".format(op))
|
||||
msg=f"'tensor.{op}(tensor)'' failed")
|
||||
|
||||
@given(A=hu.tensor(shapes=((3, 4, 5),),
|
||||
qparams=hu.qparams()),
|
||||
|
|
@ -6958,22 +6958,22 @@ class TestComparatorOps(TestCase):
|
|||
for op in ops_under_test_reversible:
|
||||
result_ref = getattr(dqA, op)(b)
|
||||
result = getattr(qA, op)(b)
|
||||
note("result_ref 1: {}".format(result_ref))
|
||||
note("result 1: {}".format(result))
|
||||
note(f"result_ref 1: {result_ref}")
|
||||
note(f"result 1: {result}")
|
||||
self.assertEqual(result_ref, result,
|
||||
msg="'tensor.{}(scalar)'' failed".format(op))
|
||||
msg=f"'tensor.{op}(scalar)'' failed")
|
||||
# Reversed broadcasting.
|
||||
result_ref = getattr(b, op)(dqA)
|
||||
result = getattr(b, op)(qA)
|
||||
note("result_ref 2: {}".format(result_ref))
|
||||
note("result 2: {}".format(result))
|
||||
note(f"result_ref 2: {result_ref}")
|
||||
note(f"result 2: {result}")
|
||||
self.assertEqual(result_ref, result,
|
||||
msg="'scalar.{}(tensor)'' failed".format(op))
|
||||
msg=f"'scalar.{op}(tensor)'' failed")
|
||||
|
||||
for op in ops_under_test_nonreversible:
|
||||
result_ref = getattr(dqA, op)(b)
|
||||
result = getattr(qA, op)(b)
|
||||
note("result_ref 3: {}".format(result_ref))
|
||||
note("result 3: {}".format(result))
|
||||
note(f"result_ref 3: {result_ref}")
|
||||
note(f"result 3: {result}")
|
||||
self.assertEqual(result_ref, result,
|
||||
msg="'tensor.{}(scalar)'' failed".format(op))
|
||||
msg=f"'tensor.{op}(scalar)'' failed")
|
||||
|
|
|
|||
|
|
@ -832,7 +832,7 @@ class TestDistributed(QuantizationTestCase):
|
|||
self.assertEqual(
|
||||
buffer_ids_before,
|
||||
buffer_ids_after,
|
||||
msg="{}: Buffers must be modified in place".format(str(observer)))
|
||||
msg=f"{str(observer)}: Buffers must be modified in place")
|
||||
|
||||
def test_fake_quant_preserves_buffers(self):
|
||||
"""
|
||||
|
|
|
|||
|
|
@ -1,4 +1,3 @@
|
|||
# -*- coding: utf-8 -*-
|
||||
# Owner(s): ["oncall: quantization"]
|
||||
|
||||
import torch
|
||||
|
|
|
|||
|
|
@ -3916,7 +3916,7 @@ class TestQuantizeFx(QuantizationTestCase):
|
|||
elif arg_node.op == "call_module":
|
||||
self.assertTrue(
|
||||
not _is_activation_post_process(getattr(model, arg_node.target)),
|
||||
"Arg: {0} of node: {1} is observed but is not a float tensor".format(
|
||||
"Arg: {} of node: {} is observed but is not a float tensor".format(
|
||||
arg_node, node
|
||||
),
|
||||
)
|
||||
|
|
@ -5519,19 +5519,19 @@ class TestQuantizeFx(QuantizationTestCase):
|
|||
self.addCleanup(cleanUp)
|
||||
|
||||
@_register_fusion_pattern("dummy_fusion")
|
||||
class DummyFusion():
|
||||
class DummyFusion:
|
||||
pass
|
||||
|
||||
@_register_quant_pattern("dummy_quant")
|
||||
class DummyQuant():
|
||||
class DummyQuant:
|
||||
pass
|
||||
|
||||
@_register_quant_pattern("dummy_quant2", default_fixed_qparams_range_0to1_observer)
|
||||
class DummyQuant2():
|
||||
class DummyQuant2:
|
||||
pass
|
||||
|
||||
@_register_quant_pattern("dummy_quant3", default_fixed_qparams_range_neg1to1_observer)
|
||||
class DummyQuant3():
|
||||
class DummyQuant3:
|
||||
pass
|
||||
|
||||
self.assertEqual(_DEFAULT_FUSION_PATTERNS["dummy_fusion"], DummyFusion)
|
||||
|
|
@ -8304,7 +8304,7 @@ class TestQuantizeFxOps(QuantizationTestCase):
|
|||
self.avg_pool1d = torch.nn.AvgPool1d(3)
|
||||
self.avg_pool2d = torch.nn.AvgPool2d(3)
|
||||
self.avg_pool3d = torch.nn.AvgPool3d(3)
|
||||
self.adaptive_avg_pool1d = torch.nn.AdaptiveAvgPool1d((1))
|
||||
self.adaptive_avg_pool1d = torch.nn.AdaptiveAvgPool1d(1)
|
||||
self.adaptive_avg_pool2d = torch.nn.AdaptiveAvgPool2d((1, 1))
|
||||
self.adaptive_avg_pool3d = torch.nn.AdaptiveAvgPool3d((1, 1, 1))
|
||||
|
||||
|
|
|
|||
|
|
@ -1,4 +1,3 @@
|
|||
# -*- coding: utf-8 -*-
|
||||
# Owner(s): ["oncall: quantization"]
|
||||
|
||||
# torch
|
||||
|
|
|
|||
|
|
@ -1,4 +1,3 @@
|
|||
# -*- coding: utf-8 -*-
|
||||
# Owner(s): ["oncall: quantization"]
|
||||
|
||||
import torch
|
||||
|
|
|
|||
|
|
@ -1,4 +1,3 @@
|
|||
# -*- coding: utf-8 -*-
|
||||
# Owner(s): ["oncall: quantization"]
|
||||
|
||||
# torch
|
||||
|
|
@ -1692,7 +1691,7 @@ class TestQuantizeJitOps(QuantizationTestCase):
|
|||
model = self.checkGraphModeOp(
|
||||
Conv(dim),
|
||||
self.img_data_dict[dim],
|
||||
"quantized::conv{}d".format(dim),
|
||||
f"quantized::conv{dim}d",
|
||||
tracing,
|
||||
)
|
||||
# make sure there is only one quantize_per_tensor for input
|
||||
|
|
@ -1701,7 +1700,7 @@ class TestQuantizeJitOps(QuantizationTestCase):
|
|||
model.graph
|
||||
)
|
||||
|
||||
FileCheck().check_not("quantized::conv{}d_prepack".format(dim)).run(
|
||||
FileCheck().check_not(f"quantized::conv{dim}d_prepack").run(
|
||||
model.graph
|
||||
)
|
||||
|
||||
|
|
@ -1743,17 +1742,17 @@ class TestQuantizeJitOps(QuantizationTestCase):
|
|||
ConvNdFunctionalRelu(dim),
|
||||
ConvNdInplaceFunctionalRelu(dim),
|
||||
]:
|
||||
conv_name = "conv{}d".format(dim)
|
||||
conv_name = f"conv{dim}d"
|
||||
m = self.checkGraphModeOp(
|
||||
orig_m,
|
||||
self.img_data_dict[dim],
|
||||
"quantized::conv{}d_relu(".format(dim),
|
||||
f"quantized::conv{dim}d_relu(",
|
||||
tracing=tracing,
|
||||
)
|
||||
|
||||
FileCheck().check_not("aten::conv{}d(".format(dim)).check_not(
|
||||
FileCheck().check_not(f"aten::conv{dim}d(").check_not(
|
||||
"aten::relu"
|
||||
).check_not("quantized::conv{}d(".format(dim)).check_not(
|
||||
).check_not(f"quantized::conv{dim}d(").check_not(
|
||||
"quantized::relu("
|
||||
).run(
|
||||
m.graph
|
||||
|
|
@ -2896,7 +2895,7 @@ class TestQuantizeJitOps(QuantizationTestCase):
|
|||
self.avg_pool1d = torch.nn.AvgPool1d(3)
|
||||
self.avg_pool2d = torch.nn.AvgPool2d(3)
|
||||
self.avg_pool3d = torch.nn.AvgPool3d(3)
|
||||
self.adaptive_avg_pool1d = torch.nn.AdaptiveAvgPool1d((1))
|
||||
self.adaptive_avg_pool1d = torch.nn.AdaptiveAvgPool1d(1)
|
||||
self.adaptive_avg_pool2d = torch.nn.AdaptiveAvgPool2d((1, 1))
|
||||
self.adaptive_avg_pool3d = torch.nn.AdaptiveAvgPool3d((1, 1, 1))
|
||||
self.leaky_relu = torch.nn.LeakyReLU()
|
||||
|
|
|
|||
|
|
@ -1272,7 +1272,7 @@ def exclude_tests(
|
|||
not exact_match and test.startswith(exclude_test)
|
||||
) or test == exclude_test:
|
||||
if exclude_message is not None:
|
||||
print_to_stderr("Excluding {} {}".format(test, exclude_message))
|
||||
print_to_stderr(f"Excluding {test} {exclude_message}")
|
||||
selected_tests.remove(test)
|
||||
return selected_tests
|
||||
|
||||
|
|
@ -1424,7 +1424,7 @@ def get_selected_tests(options) -> List[ShardedTest]:
|
|||
# Download previous test times to make sharding decisions
|
||||
path = os.path.join(str(REPO_ROOT), TEST_TIMES_FILE)
|
||||
if os.path.exists(path):
|
||||
with open(path, "r") as f:
|
||||
with open(path) as f:
|
||||
test_file_times = cast(Dict[str, Any], json.load(f))
|
||||
else:
|
||||
test_file_times = {}
|
||||
|
|
|
|||
|
|
@ -1,4 +1,3 @@
|
|||
# -*- coding: utf-8 -*-
|
||||
# Owner(s): ["module: unknown"]
|
||||
|
||||
from torch.testing._internal.common_utils import run_tests, IS_ARM64
|
||||
|
|
|
|||
|
|
@ -83,7 +83,7 @@ class TestAutocastCPU(TestCase):
|
|||
control = getattr(args[0].to(run_as_type), op)(*cast(args[1:], run_as_type), **add_kwargs)
|
||||
self.assertTrue(type(output_to_compare) == type(control))
|
||||
comparison = compare(output_to_compare, control)
|
||||
self.assertTrue(comparison, "torch.{} result did not match control".format(op))
|
||||
self.assertTrue(comparison, f"torch.{op} result did not match control")
|
||||
self.assertTrue(torch.is_autocast_cpu_enabled())
|
||||
self.assertFalse(torch.is_autocast_cpu_enabled())
|
||||
|
||||
|
|
|
|||
|
|
@ -121,7 +121,7 @@ class TestAutograd(TestCase):
|
|||
# Decorating class is deprecated and should not be used
|
||||
with self.assertWarnsRegex(UserWarning, "Decorating classes is deprecated"):
|
||||
@torch.no_grad()
|
||||
class Foo():
|
||||
class Foo:
|
||||
def __init__(self):
|
||||
assert not torch.is_grad_enabled()
|
||||
|
||||
|
|
@ -141,7 +141,7 @@ class TestAutograd(TestCase):
|
|||
|
||||
foo()
|
||||
|
||||
class Foo2():
|
||||
class Foo2:
|
||||
@torch.no_grad()
|
||||
def __init__(self):
|
||||
assert not torch.is_grad_enabled()
|
||||
|
|
@ -7505,7 +7505,7 @@ for shape in [(1,), ()]:
|
|||
#
|
||||
# grad_output -> grad_output.grad_fn -> graph -> hook -> grad_output
|
||||
#
|
||||
class TestCls():
|
||||
class TestCls:
|
||||
# Dummy class for the purpose of creating a weakref
|
||||
pass
|
||||
|
||||
|
|
@ -7570,7 +7570,7 @@ for shape in [(1,), ()]:
|
|||
def backward(ctx, grad):
|
||||
return grad
|
||||
|
||||
class Test():
|
||||
class Test:
|
||||
pass
|
||||
|
||||
count = [0]
|
||||
|
|
@ -9243,7 +9243,7 @@ class TestAutogradDeviceType(TestCase):
|
|||
if dtype.is_floating_point:
|
||||
f()
|
||||
else:
|
||||
with self.assertRaisesRegex(RuntimeError, 'floating point', msg="dt: {} device: {}".format(a.dtype, a.device)):
|
||||
with self.assertRaisesRegex(RuntimeError, 'floating point', msg=f"dt: {a.dtype} device: {a.device}"):
|
||||
f()
|
||||
|
||||
@onlyCUDA
|
||||
|
|
@ -10865,7 +10865,7 @@ class TestNestedCheckpoint(TestCase):
|
|||
|
||||
yield node
|
||||
|
||||
class Handle():
|
||||
class Handle:
|
||||
__slot__ = ["node_name"]
|
||||
|
||||
def __init__(self, node_name):
|
||||
|
|
|
|||
|
|
@ -168,8 +168,8 @@ class TestBinaryUfuncs(TestCase):
|
|||
if _numel(l) <= 100 and _numel(r) <= 100:
|
||||
msg = (
|
||||
"Failed to produce expected results! Input lhs tensor was"
|
||||
" {0}, rhs tensor was {1}, torch result is {2}, and reference result is"
|
||||
" {3}."
|
||||
" {}, rhs tensor was {}, torch result is {}, and reference result is"
|
||||
" {}."
|
||||
).format(l, r, actual, expected)
|
||||
else:
|
||||
msg = None
|
||||
|
|
@ -491,7 +491,7 @@ class TestBinaryUfuncs(TestCase):
|
|||
)
|
||||
|
||||
def _supported(dtypes):
|
||||
return all((x in supported_dtypes for x in dtypes))
|
||||
return all(x in supported_dtypes for x in dtypes)
|
||||
|
||||
# int x int type promotion
|
||||
if _supported((torch.int16, torch.int32, torch.int64)):
|
||||
|
|
@ -2424,21 +2424,21 @@ class TestBinaryUfuncs(TestCase):
|
|||
for i in range(750):
|
||||
self.assertTrue(
|
||||
torch.isnan(ma[i]),
|
||||
"max(a, b): {}, a: {}, b: {}".format(ma[i], a[i], b[i]),
|
||||
f"max(a, b): {ma[i]}, a: {a[i]}, b: {b[i]}",
|
||||
)
|
||||
self.assertTrue(
|
||||
torch.isnan(mi[i]),
|
||||
"min(a, b): {}, a: {}, b: {}".format(mi[i], a[i], b[i]),
|
||||
f"min(a, b): {mi[i]}, a: {a[i]}, b: {b[i]}",
|
||||
)
|
||||
|
||||
for i in range(750, 1000):
|
||||
self.assertFalse(
|
||||
torch.isnan(ma[i]),
|
||||
"max(a, b): {}, a: {}, b: {}".format(ma[i], a[i], b[i]),
|
||||
f"max(a, b): {ma[i]}, a: {a[i]}, b: {b[i]}",
|
||||
)
|
||||
self.assertFalse(
|
||||
torch.isnan(mi[i]),
|
||||
"min(a, b): {}, a: {}, b: {}".format(mi[i], a[i], b[i]),
|
||||
f"min(a, b): {mi[i]}, a: {a[i]}, b: {b[i]}",
|
||||
)
|
||||
|
||||
@dtypes(
|
||||
|
|
@ -4448,8 +4448,8 @@ def generate_not_implemented_tests(cls):
|
|||
return test
|
||||
|
||||
for op in tensor_binary_ops:
|
||||
test_name = "test_{}_not_implemented".format(op)
|
||||
assert not hasattr(cls, test_name), "{0} already in {1}".format(
|
||||
test_name = f"test_{op}_not_implemented"
|
||||
assert not hasattr(cls, test_name), "{} already in {}".format(
|
||||
test_name, cls.__name__
|
||||
)
|
||||
|
||||
|
|
|
|||
|
|
@ -23,7 +23,7 @@ class TestCompileBenchmarkUtil(TestCase):
|
|||
def test_training_and_inference(self):
|
||||
class ToyModel(torch.nn.Module):
|
||||
def __init__(self):
|
||||
super(ToyModel, self).__init__()
|
||||
super().__init__()
|
||||
self.weight = torch.nn.Parameter(torch.Tensor(2, 2))
|
||||
|
||||
def forward(self, x):
|
||||
|
|
|
|||
|
|
@ -230,7 +230,7 @@ class TestCppExtensionJIT(common.TestCase):
|
|||
# expected values is length-2 tuple: (list of ELF, list of PTX)
|
||||
# note: there should not be more than one PTX value
|
||||
archflags = {
|
||||
'': (['{}{}'.format(capability[0], capability[1]) for capability in capabilities], None),
|
||||
'': ([f'{capability[0]}{capability[1]}' for capability in capabilities], None),
|
||||
"Maxwell+Tegra;6.1": (['53', '61'], None),
|
||||
"Volta": (['70'], ['70']),
|
||||
}
|
||||
|
|
|
|||
|
|
@ -33,7 +33,7 @@ def remove_build_path():
|
|||
shutil.rmtree(default_build_root, ignore_errors=True)
|
||||
|
||||
|
||||
class DummyModule(object):
|
||||
class DummyModule:
|
||||
|
||||
@staticmethod
|
||||
def device_count() -> int:
|
||||
|
|
@ -381,7 +381,7 @@ class TestCppExtensionOpenRgistration(common.TestCase):
|
|||
foo_storage = foo_tensor.storage()
|
||||
self.assertEqual(foo_storage.type(), "torch.storage.TypedStorage")
|
||||
|
||||
class CustomFloatStorage():
|
||||
class CustomFloatStorage:
|
||||
@property
|
||||
def __module__(self):
|
||||
return "torch." + torch._C._get_privateuse1_backend_name()
|
||||
|
|
|
|||
|
|
@ -1781,7 +1781,7 @@ torch.cuda.synchronize()
|
|||
control = getattr(args[0].to(run_as_type), op)(*cast(args[1:], run_as_type), **add_kwargs)
|
||||
self.assertTrue(type(output_to_compare) == type(control))
|
||||
comparison = compare(output_to_compare, control)
|
||||
self.assertTrue(comparison, "torch.{} result did not match control".format(op))
|
||||
self.assertTrue(comparison, f"torch.{op} result did not match control")
|
||||
self.assertTrue(torch.is_autocast_enabled())
|
||||
self.assertFalse(torch.is_autocast_enabled())
|
||||
|
||||
|
|
@ -2727,7 +2727,7 @@ exit(2)
|
|||
stat = stat + pool_string + ".current"
|
||||
current = postcapture_stats[stat] - precapture_stats[stat]
|
||||
self.assertEqual(current, expected, "Pre to post capture delta of " +
|
||||
stat + " = {}, expected = {}, numel = {}".format(current, expected, numel))
|
||||
stat + f" = {current}, expected = {expected}, numel = {numel}")
|
||||
|
||||
g.replay()
|
||||
self.assertEqual(b.sum().item(), 6 * numel)
|
||||
|
|
@ -2748,7 +2748,7 @@ exit(2)
|
|||
stat = stat + pool_string + ".current"
|
||||
current = postdel_stats[stat] - precapture_stats[stat]
|
||||
self.assertEqual(current, expected, "Pre capture to post graph delete delta of " +
|
||||
stat + " = {}, expected = {}, numel = {}".format(current, expected, numel))
|
||||
stat + f" = {current}, expected = {expected}, numel = {numel}")
|
||||
|
||||
# del a, b before the next case is essential, otherwise overwriting a and b in the next case
|
||||
# can throw off its allocation/deallocation counts.
|
||||
|
|
@ -3068,10 +3068,10 @@ exit(2)
|
|||
# mimicking `_test_graphed_optimizer` maladroitly to pass two param_groups to optimizer.__init__
|
||||
n_warmup, n_replay = 3, 2
|
||||
for optimizer, second_param_group_capturable in product((torch.optim.Adam, torch.optim.AdamW), (True, False)):
|
||||
ref_p1, param1 = [torch.nn.Parameter(torch.ones(1, device="cuda")) for _ in range(2)]
|
||||
ref_p2, param2 = [torch.nn.Parameter(torch.ones(1, device="cuda")) for _ in range(2)]
|
||||
grads1, grads2 = [[torch.randn_like(param1) for _ in range(n_warmup + n_replay)] for _ in range(2)]
|
||||
ref_grads1, ref_grads2 = [[t.clone() for t in tensors] for tensors in (grads1, grads2)]
|
||||
ref_p1, param1 = (torch.nn.Parameter(torch.ones(1, device="cuda")) for _ in range(2))
|
||||
ref_p2, param2 = (torch.nn.Parameter(torch.ones(1, device="cuda")) for _ in range(2))
|
||||
grads1, grads2 = ([torch.randn_like(param1) for _ in range(n_warmup + n_replay)] for _ in range(2))
|
||||
ref_grads1, ref_grads2 = ([t.clone() for t in tensors] for tensors in (grads1, grads2))
|
||||
params = [
|
||||
{"params": [param1], "capturable": True},
|
||||
{"params": [param2], "capturable": second_param_group_capturable},
|
||||
|
|
@ -3313,7 +3313,7 @@ class TestCudaMallocAsync(TestCase):
|
|||
if not IS_WINDOWS:
|
||||
with tempfile.NamedTemporaryFile() as f:
|
||||
torch.cuda.memory._save_segment_usage(f.name)
|
||||
with open(f.name, 'r') as f2:
|
||||
with open(f.name) as f2:
|
||||
self.assertTrue('test_cuda.py' in f2.read())
|
||||
|
||||
del x
|
||||
|
|
|
|||
|
|
@ -9,4 +9,4 @@ if torch.cuda.is_available():
|
|||
|
||||
current_dir = os.path.dirname(os.path.abspath(__file__))
|
||||
filepath = os.path.join(current_dir, 'test_cuda.py')
|
||||
exec(compile(open(filepath, 'r').read(), filepath, mode='exec'))
|
||||
exec(compile(open(filepath).read(), filepath, mode='exec'))
|
||||
|
|
|
|||
|
|
@ -179,7 +179,7 @@ class TestDatasetRandomSplit(TestCase):
|
|||
r"""Indices generated by random_split
|
||||
should be of integer type
|
||||
"""
|
||||
class CustomDataset():
|
||||
class CustomDataset:
|
||||
def __init__(self, test_object, custom_list):
|
||||
self.data = custom_list
|
||||
self.test_object = test_object
|
||||
|
|
@ -874,7 +874,7 @@ def _test_worker_info_init_fn(worker_id):
|
|||
except RuntimeError as e:
|
||||
assert str(e) == "Cannot assign attributes to WorkerInfo objects"
|
||||
for k in ['id', 'num_workers', 'seed', 'dataset']:
|
||||
assert "{}=".format(k) in repr(worker_info)
|
||||
assert f"{k}=" in repr(worker_info)
|
||||
dataset.value = [worker_id, os.getpid()]
|
||||
|
||||
|
||||
|
|
@ -1960,11 +1960,11 @@ except RuntimeError as e:
|
|||
continue
|
||||
|
||||
desc = []
|
||||
desc.append('is_iterable_dataset={}'.format(is_iterable_dataset))
|
||||
desc.append('use_workers={}'.format(use_workers))
|
||||
desc.append('pin_memory={}'.format(pin_memory))
|
||||
desc.append('hold_iter_reference={}'.format(hold_iter_reference))
|
||||
desc.append('exit_method={}'.format(exit_method))
|
||||
desc.append(f'is_iterable_dataset={is_iterable_dataset}')
|
||||
desc.append(f'use_workers={use_workers}')
|
||||
desc.append(f'pin_memory={pin_memory}')
|
||||
desc.append(f'hold_iter_reference={hold_iter_reference}')
|
||||
desc.append(f'exit_method={exit_method}')
|
||||
desc = 'test_proper_exit with ' + ', '.join(desc)
|
||||
|
||||
# Event that the loader process uses to signal testing process
|
||||
|
|
@ -1992,9 +1992,9 @@ except RuntimeError as e:
|
|||
if not loader_setup_event.is_set():
|
||||
fail_msg = desc + ': loader process failed to setup within given time'
|
||||
if loader_p.exception is not None:
|
||||
fail_msg += ', and had exception {}'.format(loader_p.exception)
|
||||
fail_msg += f', and had exception {loader_p.exception}'
|
||||
elif not loader_p.is_alive():
|
||||
fail_msg += ', and exited with code {} but had no exception'.format(loader_p.exitcode)
|
||||
fail_msg += f', and exited with code {loader_p.exitcode} but had no exception'
|
||||
else:
|
||||
fail_msg += ', and is still alive.'
|
||||
if loader_p.is_alive():
|
||||
|
|
@ -2013,18 +2013,18 @@ except RuntimeError as e:
|
|||
if reason is None:
|
||||
err_msg = desc
|
||||
else:
|
||||
err_msg = '{}: {}'.format(desc, reason)
|
||||
err_msg = f'{desc}: {reason}'
|
||||
err_msg += '\nLoader info:\n\t'
|
||||
if loader_psutil_p.is_running():
|
||||
err_msg += str(loader_psutil_p.as_dict(attrs=report_psutil_attrs))
|
||||
# this may kill the process, needs to run after the above line
|
||||
loader_p.print_traces_of_all_threads()
|
||||
else:
|
||||
err_msg += 'exited with code {}'.format(loader_p.exitcode)
|
||||
err_msg += f'exited with code {loader_p.exitcode}'
|
||||
if use_workers:
|
||||
err_msg += '\nWorker(s) info:'
|
||||
for idx, worker_psutil_p in enumerate(worker_psutil_ps):
|
||||
err_msg += '\n\tWorker {}:\n\t\t'.format(idx)
|
||||
err_msg += f'\n\tWorker {idx}:\n\t\t'
|
||||
if worker_psutil_p.is_running():
|
||||
err_msg += str(worker_psutil_p.as_dict(attrs=report_psutil_attrs))
|
||||
# this may kill the process, needs to run after the above line
|
||||
|
|
@ -2040,7 +2040,7 @@ except RuntimeError as e:
|
|||
if loader_p.is_alive():
|
||||
fail_reason = 'loader process did not terminate'
|
||||
if loader_p.exception is not None:
|
||||
fail(fail_reason + ', and had exception {}'.format(loader_p.exception))
|
||||
fail(fail_reason + f', and had exception {loader_p.exception}')
|
||||
else:
|
||||
fail(fail_reason + ', and had no exception')
|
||||
_, alive = psutil.wait_procs(worker_psutil_ps, timeout=(MP_STATUS_CHECK_INTERVAL + JOIN_TIMEOUT))
|
||||
|
|
@ -2049,7 +2049,7 @@ except RuntimeError as e:
|
|||
', '.join(str(p.pid) for p in alive)))
|
||||
if exit_method is None:
|
||||
if loader_p.exitcode != 0:
|
||||
fail('loader process had nonzero exitcode {}'.format(loader_p.exitcode))
|
||||
fail(f'loader process had nonzero exitcode {loader_p.exitcode}')
|
||||
else:
|
||||
if loader_p.exitcode == 0:
|
||||
fail('loader process had zero exitcode')
|
||||
|
|
|
|||
|
|
@ -206,7 +206,7 @@ class TestStreamWrapper(TestCase):
|
|||
if self.opened:
|
||||
return "".join(self)
|
||||
else:
|
||||
raise IOError("Cannot read from un-opened file descriptor")
|
||||
raise OSError("Cannot read from un-opened file descriptor")
|
||||
|
||||
def __iter__(self):
|
||||
for i in range(5):
|
||||
|
|
@ -285,7 +285,7 @@ class TestIterableDataPipeBasic(TestCase):
|
|||
self.temp_sub_dir.cleanup()
|
||||
self.temp_dir.cleanup()
|
||||
except Exception as e:
|
||||
warnings.warn("TestIterableDatasetBasic was not able to cleanup temp dir due to {}".format(str(e)))
|
||||
warnings.warn(f"TestIterableDatasetBasic was not able to cleanup temp dir due to {str(e)}")
|
||||
|
||||
def test_listdirfiles_iterable_datapipe(self):
|
||||
temp_dir = self.temp_dir.name
|
||||
|
|
|
|||
|
|
@ -101,15 +101,15 @@ class TestDispatch(TestCase):
|
|||
|
||||
# double underscore to make it less likely we conflict with something
|
||||
# else
|
||||
test_namespace = "__test{}__".format(self.namespace_index)
|
||||
test_namespace = f"__test{self.namespace_index}__"
|
||||
|
||||
def check_invariants(actual_provenance):
|
||||
C._dispatch_check_invariants(name)
|
||||
# Normalize the test namespace so that expected outputs are stable
|
||||
actual_state = C._dispatch_dump(
|
||||
"{}::{}".format(test_namespace, name)).replace(test_namespace, "test")
|
||||
f"{test_namespace}::{name}").replace(test_namespace, "test")
|
||||
actual_table = C._dispatch_dump_table(
|
||||
"{}::{}".format(test_namespace, name)).replace(test_namespace, "test")
|
||||
f"{test_namespace}::{name}").replace(test_namespace, "test")
|
||||
expected_state, expected_table, expected_provenance = results.setdefault(
|
||||
frozenset(active_ops),
|
||||
Result(actual_state, actual_table, actual_provenance)
|
||||
|
|
@ -138,7 +138,7 @@ class TestDispatch(TestCase):
|
|||
active_ops.add(op_ix)
|
||||
try:
|
||||
ops[op_ix](refs[op_ix])
|
||||
check_invariants("running ctors {}".format(ctor_order[:i + 1]))
|
||||
check_invariants(f"running ctors {ctor_order[:i + 1]}")
|
||||
except RuntimeError as e:
|
||||
if not expect_raises:
|
||||
raise
|
||||
|
|
@ -146,7 +146,7 @@ class TestDispatch(TestCase):
|
|||
actual = actual.split("\nException raised from ")[0]
|
||||
expected, _, expected_provenance = results.setdefault(
|
||||
frozenset(active_ops),
|
||||
Result(actual, "", "error after running ctors {}".format(ctor_order[:i + 1]))
|
||||
Result(actual, "", f"error after running ctors {ctor_order[:i + 1]}")
|
||||
)
|
||||
self.assertMultiLineEqual(expected, actual, expected_provenance)
|
||||
set_to_report = frozenset(active_ops)
|
||||
|
|
|
|||
|
|
@ -1,4 +1,3 @@
|
|||
# -*- coding: utf-8 -*-
|
||||
# Owner(s): ["module: tests"]
|
||||
|
||||
import torch
|
||||
|
|
|
|||
|
|
@ -1,4 +1,3 @@
|
|||
# -*- coding: utf-8 -*-
|
||||
# Owner(s): ["oncall: jit"]
|
||||
|
||||
import contextlib
|
||||
|
|
|
|||
|
|
@ -851,7 +851,7 @@ class FakeTensorConverterTest(TestCase):
|
|||
self.assertEqual(out.device.type, "cpu")
|
||||
|
||||
def test_multiple_modes(self):
|
||||
t = torch.rand(([4]))
|
||||
t = torch.rand([4])
|
||||
t2 = torch.rand([4])
|
||||
with FakeTensorMode() as m:
|
||||
with FakeTensorMode() as m2:
|
||||
|
|
|
|||
|
|
@ -1002,16 +1002,16 @@ class TestForeach(TestCase):
|
|||
num_tensors_seen = 0
|
||||
for (device, dtype), ([l1, l2, l3], indices) in grouped_tensors.items():
|
||||
for t in itertools.chain(l1, l3):
|
||||
self.assertEquals(t.device, device)
|
||||
self.assertEquals(t.dtype, dtype)
|
||||
self.assertEqual(t.device, device)
|
||||
self.assertEqual(t.dtype, dtype)
|
||||
num_tensors_seen += 1
|
||||
self.assertEqual(len(l1), len(l2))
|
||||
self.assertTrue(all(p is None for p in l2))
|
||||
for i, index in enumerate(indices):
|
||||
self.assertEquals(l1[i], list1[index])
|
||||
self.assertEquals(l2[i], list2[index])
|
||||
self.assertEquals(l3[i], list3[index])
|
||||
self.assertEquals(num_tensors_seen, 2 * num_tensors_per_list)
|
||||
self.assertEqual(l1[i], list1[index])
|
||||
self.assertEqual(l2[i], list2[index])
|
||||
self.assertEqual(l3[i], list3[index])
|
||||
self.assertEqual(num_tensors_seen, 2 * num_tensors_per_list)
|
||||
|
||||
|
||||
instantiate_device_type_tests(TestForeach, globals())
|
||||
|
|
|
|||
|
|
@ -34,15 +34,15 @@ class MyDummyFnOptimizer:
|
|||
):
|
||||
|
||||
if not 0.0 <= lr:
|
||||
raise ValueError("Invalid learning rate: {}".format(lr))
|
||||
raise ValueError(f"Invalid learning rate: {lr}")
|
||||
if not 0.0 <= eps:
|
||||
raise ValueError("Invalid epsilon value: {}".format(eps))
|
||||
raise ValueError(f"Invalid epsilon value: {eps}")
|
||||
if not 0.0 <= betas[0] < 1.0:
|
||||
raise ValueError("Invalid beta parameter at index 0: {}".format(betas[0]))
|
||||
raise ValueError(f"Invalid beta parameter at index 0: {betas[0]}")
|
||||
if not 0.0 <= betas[1] < 1.0:
|
||||
raise ValueError("Invalid beta parameter at index 1: {}".format(betas[1]))
|
||||
raise ValueError(f"Invalid beta parameter at index 1: {betas[1]}")
|
||||
if not 0.0 < weight_decay:
|
||||
raise ValueError("Invalid weight_decay value: {}".format(weight_decay))
|
||||
raise ValueError(f"Invalid weight_decay value: {weight_decay}")
|
||||
|
||||
self.defaults = {
|
||||
"lr": lr,
|
||||
|
|
|
|||
|
|
@ -1,4 +1,3 @@
|
|||
# -*- coding: utf-8 -*-
|
||||
# Owner(s): ["oncall: jit"]
|
||||
|
||||
import torch
|
||||
|
|
@ -1186,7 +1185,7 @@ class TestJit(JitTestCase):
|
|||
|
||||
def test_script_backward_twice(self):
|
||||
def checkBackwardTwiceScript(fn, inputs, retain_graph_=False):
|
||||
class jit_profiling_executor_false():
|
||||
class jit_profiling_executor_false:
|
||||
def __enter__(self):
|
||||
torch._C._jit_set_profiling_executor(False)
|
||||
|
||||
|
|
@ -2897,7 +2896,7 @@ graph(%Ra, %Rb):
|
|||
|
||||
with TemporaryFileName() as fname:
|
||||
fn.save(fname)
|
||||
with io.open(fname, 'rb') as f:
|
||||
with open(fname, 'rb') as f:
|
||||
self.assertTrue(torch.serialization._is_zipfile(f))
|
||||
|
||||
def test_python_bindings(self):
|
||||
|
|
@ -3954,7 +3953,7 @@ def foo(x):
|
|||
if idx < len(exprs):
|
||||
return get_expr(idx)
|
||||
else:
|
||||
return 'v{}'.format(idx - len(exprs))
|
||||
return f'v{idx - len(exprs)}'
|
||||
|
||||
for i in range(50):
|
||||
n = None
|
||||
|
|
@ -3963,12 +3962,12 @@ def foo(x):
|
|||
n = template.count('{}')
|
||||
|
||||
if 'VAR' in template:
|
||||
src_lines.append(' v{} = {}'.format(n_variables, select_expr_or_var()))
|
||||
src_lines.append(f' v{n_variables} = {select_expr_or_var()}')
|
||||
n_variables += 1
|
||||
else:
|
||||
exprs.append(template.format(*(select_expr_or_var() for _ in range(n))))
|
||||
|
||||
src_lines.append(' return ({})\n'.format(''.join('v{},'.format(i) for i in range(n_variables))))
|
||||
src_lines.append(' return ({})\n'.format(''.join(f'v{i},' for i in range(n_variables))))
|
||||
return '\n'.join(src_lines)
|
||||
|
||||
for i in range(100):
|
||||
|
|
@ -4392,7 +4391,7 @@ def foo(x):
|
|||
return torch.blargh(xyz)
|
||||
|
||||
_, lineno = inspect.getsourcelines(foobar)
|
||||
with self.assertRaisesRegex(RuntimeError, "test_jit.py\", line {}".format(lineno + 1)):
|
||||
with self.assertRaisesRegex(RuntimeError, f"test_jit.py\", line {lineno + 1}"):
|
||||
scripted = torch.jit.script(foobar)
|
||||
|
||||
def test_file_line_error_class_defn(self):
|
||||
|
|
@ -4401,7 +4400,7 @@ def foo(x):
|
|||
return torch.blargh(xyz)
|
||||
|
||||
_, lineno = inspect.getsourcelines(FooBar)
|
||||
with self.assertRaisesRegex(RuntimeError, "test_jit.py\", line {}".format(lineno + 2)):
|
||||
with self.assertRaisesRegex(RuntimeError, f"test_jit.py\", line {lineno + 2}"):
|
||||
torch.jit.script(FooBar)
|
||||
|
||||
def test_file_line_graph(self):
|
||||
|
|
@ -4411,7 +4410,7 @@ def foo(x):
|
|||
scripted = torch.jit.script(foobar)
|
||||
|
||||
_, lineno = inspect.getsourcelines(foobar)
|
||||
fc = FileCheck().check('test_jit.py:{}:19'.format(lineno + 1))
|
||||
fc = FileCheck().check(f'test_jit.py:{lineno + 1}:19')
|
||||
fc.run(scripted.graph)
|
||||
fc.run(str(scripted.graph))
|
||||
|
||||
|
|
@ -4431,7 +4430,7 @@ def foo(x):
|
|||
scripted = torch.jit.load(bytesio)
|
||||
|
||||
_, lineno = inspect.getsourcelines(Scripted)
|
||||
fc = FileCheck().check(':{}'.format(lineno + 3))
|
||||
fc = FileCheck().check(f':{lineno + 3}')
|
||||
fc.run(scripted.graph)
|
||||
fc.run(str(scripted.graph))
|
||||
|
||||
|
|
@ -4453,7 +4452,7 @@ def foo(xyz):
|
|||
scripted = torch.jit.trace(foobar, (torch.rand(3, 4)))
|
||||
|
||||
_, lineno = inspect.getsourcelines(foobar)
|
||||
fc = FileCheck().check('test_jit.py:{}:0'.format(lineno + 1))
|
||||
fc = FileCheck().check(f'test_jit.py:{lineno + 1}:0')
|
||||
fc.run(scripted.graph)
|
||||
fc.run(str(scripted.graph))
|
||||
|
||||
|
|
@ -4468,7 +4467,7 @@ def foo(xyz):
|
|||
loaded = self.getExportImportCopy(ft)
|
||||
_, lineno = inspect.getsourcelines(FooTest)
|
||||
|
||||
with self.assertRaisesRegex(RuntimeError, 'test_jit.py\", line {}'.format(lineno + 3)):
|
||||
with self.assertRaisesRegex(RuntimeError, f'test_jit.py\", line {lineno + 3}'):
|
||||
loaded(torch.rand(3, 4), torch.rand(30, 40))
|
||||
|
||||
def test_serialized_source_ranges_graph(self):
|
||||
|
|
@ -4482,7 +4481,7 @@ def foo(xyz):
|
|||
loaded = self.getExportImportCopy(ft)
|
||||
_, lineno = inspect.getsourcelines(FooTest3)
|
||||
|
||||
fc = FileCheck().check('test_jit.py:{}'.format(lineno + 3))
|
||||
fc = FileCheck().check(f'test_jit.py:{lineno + 3}')
|
||||
fc.run(loaded.graph)
|
||||
|
||||
def test_serialized_source_ranges2(self):
|
||||
|
|
@ -4494,7 +4493,7 @@ def foo(xyz):
|
|||
|
||||
_, lineno = inspect.getsourcelines(FooTest2)
|
||||
|
||||
with self.assertRaisesRegex(torch.jit.Error, 'test_jit.py\", line {}'.format(lineno + 3)):
|
||||
with self.assertRaisesRegex(torch.jit.Error, f'test_jit.py\", line {lineno + 3}'):
|
||||
ft = FooTest2()
|
||||
loaded = self.getExportImportCopy(ft)
|
||||
loaded()
|
||||
|
|
@ -7240,7 +7239,7 @@ a")
|
|||
scope = {}
|
||||
execWrapper(code, globals(), scope)
|
||||
cu = torch.jit.CompilationUnit(code)
|
||||
message = 'with code `{} {} {}` and t={}'.format(args[0], op, args[1], tensor)
|
||||
message = f'with code `{args[0]} {op} {args[1]}` and t={tensor}'
|
||||
res1 = cu.func(tensor)
|
||||
res2 = scope['func'](tensor)
|
||||
self.assertEqual(res1, res2, msg=message + "\nres1=" + str(res1) + "\nres2=" + str(res2))
|
||||
|
|
@ -7338,7 +7337,7 @@ a")
|
|||
exec(code, globals(), scope)
|
||||
cu = torch.jit.CompilationUnit(code)
|
||||
torch._C._jit_pass_complete_shape_analysis(cu.func.graph, (), False)
|
||||
FileCheck().check(expect).check("aten::{tensor_op}".format(tensor_op=op)).run(cu.func.graph)
|
||||
FileCheck().check(expect).check(f"aten::{op}").run(cu.func.graph)
|
||||
|
||||
@torch.jit.script
|
||||
def test_dtype(inp_dtype: torch.dtype):
|
||||
|
|
@ -7545,7 +7544,7 @@ dedent """
|
|||
devices = [t.device]
|
||||
if t.device.type == 'cuda':
|
||||
if t.device.index == -1:
|
||||
devices.append('cuda:{}'.format(torch.cuda.current_device()))
|
||||
devices.append(f'cuda:{torch.cuda.current_device()}')
|
||||
elif t.device.index == torch.cuda.current_device():
|
||||
devices.append('cuda')
|
||||
for device in devices:
|
||||
|
|
@ -8600,7 +8599,7 @@ dedent """
|
|||
if(not tensor_type.is_floating_point or (dtype is not None and not dtype.is_floating_point)):
|
||||
if op in ['mean', 'softmax', 'log_softmax']:
|
||||
continue
|
||||
return_line = "torch.tensor({}, dtype={}).{}({}dtype={})".format(tensor_data, tensor_type, op, str_args, dtype)
|
||||
return_line = f"torch.tensor({tensor_data}, dtype={tensor_type}).{op}({str_args}dtype={dtype})"
|
||||
# uncomment for debugging a failed test:
|
||||
# print("testing {}".format(return_line))
|
||||
code = template.format(return_line=return_line)
|
||||
|
|
@ -8653,7 +8652,7 @@ dedent """
|
|||
|
||||
args = []
|
||||
for dtype in dtypes:
|
||||
args = args + ["torch.tensor({}, dtype={})".format(shape, dtype)]
|
||||
args = args + [f"torch.tensor({shape}, dtype={dtype})"]
|
||||
args = args + [1, 1.5]
|
||||
|
||||
def isBool(arg):
|
||||
|
|
@ -8670,7 +8669,7 @@ dedent """
|
|||
isinstance(first_arg, int) or
|
||||
(isinstance(first_arg, str) and 'int' in first_arg))):
|
||||
continue
|
||||
return_line = "torch.{}({}, {})".format(op, first_arg, second_arg)
|
||||
return_line = f"torch.{op}({first_arg}, {second_arg})"
|
||||
# uncomment for debugging a failed test:
|
||||
# print("testing {}".format(return_line))
|
||||
code = template.format(first_arg, second_arg, op)
|
||||
|
|
@ -10220,7 +10219,7 @@ dedent """
|
|||
def fn3(x, y, z):
|
||||
return fn_varargs(x, y, z)
|
||||
|
||||
x, y, z = [torch.randn(2, 2) for _ in range(3)]
|
||||
x, y, z = (torch.randn(2, 2) for _ in range(3))
|
||||
self.checkScript(fn1, (x, y, z), optimize=True)
|
||||
self.checkScript(fn2, (x, y, z), optimize=True)
|
||||
self.checkScript(fn3, (x, y, z), optimize=True)
|
||||
|
|
@ -12964,7 +12963,7 @@ dedent """
|
|||
returns = fn.schema.returns
|
||||
self.assertEqual(str(args[0].type), pair[1])
|
||||
self.assertEqual(str(args[1].type), "Tuple[Tensor, Tensor]")
|
||||
self.assertEqual(str(returns[0].type), "Tuple[{}, {}]".format(pair[1], pair[1]))
|
||||
self.assertEqual(str(returns[0].type), f"Tuple[{pair[1]}, {pair[1]}]")
|
||||
|
||||
def test_bad_multiline_annotations(self):
|
||||
with self.assertRaisesRegex(RuntimeError, "Return type line"):
|
||||
|
|
@ -13570,7 +13569,7 @@ dedent """
|
|||
# type: (str) -> Tensor
|
||||
return torch.tensor(ord(s)) # noqa: T484
|
||||
|
||||
s = u'\u00a3'.encode('utf8')[:1]
|
||||
s = '\u00a3'.encode()[:1]
|
||||
self.checkScript(index_str_to_tensor, (s,))
|
||||
|
||||
def test_chr(self):
|
||||
|
|
@ -14707,7 +14706,7 @@ dedent """
|
|||
|
||||
over = Over()
|
||||
self.assertEqual(over((x, x)), x + 5)
|
||||
self.assertEqual(over((x)), x + 20)
|
||||
self.assertEqual(over(x), x + 20)
|
||||
|
||||
class Unannotated(torch.nn.Module):
|
||||
@torch.jit._overload_method # noqa: F811
|
||||
|
|
@ -16130,7 +16129,7 @@ def add_nn_module_test(*args, **kwargs):
|
|||
|
||||
method_args = ', '.join(['self'] + actuals)
|
||||
call_args_str = ', '.join(actuals)
|
||||
call = "self.submodule({})".format(call_args_str)
|
||||
call = f"self.submodule({call_args_str})"
|
||||
script = script_method_template.format(method_args, call)
|
||||
|
||||
submodule_constants = []
|
||||
|
|
|
|||
|
|
@ -1,4 +1,3 @@
|
|||
# -*- coding: utf-8 -*-
|
||||
# Owner(s): ["oncall: jit"]
|
||||
|
||||
import unittest
|
||||
|
|
@ -51,7 +50,7 @@ class TestFuser(JitTestCase):
|
|||
allowed_nodes = {'prim::Constant', 'prim::FusionGroup', 'prim::BailoutTemplate',
|
||||
'prim::BailOut', 'prim::TupleConstruct'} | set(except_for)
|
||||
self.assertTrue(all(node.kind() in allowed_nodes for node in graph.nodes()),
|
||||
'got {}'.format(graph))
|
||||
f'got {graph}')
|
||||
self.assertTrue([node.kind() for node in graph.nodes()].count('prim::FusionGroup') == 1)
|
||||
|
||||
def _test_fused_abs(self, device='cpu'):
|
||||
|
|
|
|||
|
|
@ -1676,7 +1676,7 @@ class TestTEFuser(JitTestCase):
|
|||
self.assertEqual(ref, t(x))
|
||||
except Exception as e:
|
||||
raise RuntimeError(
|
||||
"Failed: {} {} {} {}".format(dtype, op.__name__, device, scalar)
|
||||
f"Failed: {dtype} {op.__name__} {device} {scalar}"
|
||||
) from e
|
||||
|
||||
def test_binary_pow(self):
|
||||
|
|
|
|||
|
|
@ -847,7 +847,7 @@ for model_name, enabled in [
|
|||
return test
|
||||
|
||||
for dtype in [torch.bfloat16, torch.float32]:
|
||||
setattr(TestModel, 'test_vision_%s_%s' % (model_name, str(dtype).split("torch.")[1]), _wrapper(model_name, dtype))
|
||||
setattr(TestModel, 'test_vision_{}_{}'.format(model_name, str(dtype).split("torch.")[1]), _wrapper(model_name, dtype))
|
||||
|
||||
|
||||
instantiate_device_type_tests(TestFusionPattern, globals())
|
||||
|
|
|
|||
|
|
@ -1,4 +1,3 @@
|
|||
# -*- coding: utf-8 -*-
|
||||
# Owner(s): ["module: linear algebra"]
|
||||
|
||||
import torch
|
||||
|
|
@ -1554,7 +1553,7 @@ class TestLinalg(TestCase):
|
|||
@precisionOverride({torch.cfloat: 5e-4})
|
||||
def test_norm_complex(self, device, dtype):
|
||||
def gen_error_message(input_size, ord, keepdim, dim=None):
|
||||
return "complex norm failed for input size %s, ord=%s, keepdim=%s, dim=%s" % (
|
||||
return "complex norm failed for input size {}, ord={}, keepdim={}, dim={}".format(
|
||||
input_size, ord, keepdim, dim)
|
||||
|
||||
vector_ords = [None, 0, 1, 2, 3, inf, -1, -2, -3, -inf]
|
||||
|
|
@ -2095,13 +2094,13 @@ class TestLinalg(TestCase):
|
|||
@skipCPUIfNoLapack
|
||||
def test_norm_old(self, device):
|
||||
def gen_error_message(input_size, p, keepdim, dim=None):
|
||||
return "norm failed for input size %s, p=%s, keepdim=%s, dim=%s" % (
|
||||
return "norm failed for input size {}, p={}, keepdim={}, dim={}".format(
|
||||
input_size, p, keepdim, dim)
|
||||
|
||||
# 'nuc' norm uses SVD, and thus its precsion is much lower than other norms.
|
||||
# test_svd takes @precisionOverride({torch.float: 1e-4, torch.cfloat: 2e-4}),
|
||||
# and here we are doing the same thing for nuc norm.
|
||||
class PrecisionContext(object):
|
||||
class PrecisionContext:
|
||||
def __init__(self, test, norm):
|
||||
self.norm = norm
|
||||
self.saved_overrides = getattr(test, 'precision_overrides', None)
|
||||
|
|
@ -2193,7 +2192,7 @@ class TestLinalg(TestCase):
|
|||
@skipCPUIfNoLapack
|
||||
def test_norm_complex_old(self, device):
|
||||
def gen_error_message(input_size, p, keepdim, dim=None):
|
||||
return "complex norm failed for input size %s, p=%s, keepdim=%s, dim=%s" % (
|
||||
return "complex norm failed for input size {}, p={}, keepdim={}, dim={}".format(
|
||||
input_size, p, keepdim, dim)
|
||||
|
||||
for keepdim in [False, True]:
|
||||
|
|
@ -4740,7 +4739,7 @@ class TestLinalg(TestCase):
|
|||
for p in [1, 2, 3, 4, inf]:
|
||||
res = x.renorm(p, 1, 1)
|
||||
expected = x / x.norm(p, 0, keepdim=True).clamp(min=1)
|
||||
self.assertEqual(res, expected, msg="renorm failed for {}-norm".format(p))
|
||||
self.assertEqual(res, expected, msg=f"renorm failed for {p}-norm")
|
||||
|
||||
@skipCPUIfNoLapack
|
||||
@skipCUDAIfNoCusolver
|
||||
|
|
@ -5608,7 +5607,7 @@ scipy_lobpcg | {:10.2e} | {:10.2e} | {:6} | N/A
|
|||
@dtypes(torch.float)
|
||||
def test_baddbmm_nan_input_with_zero_beta(self, device, dtype):
|
||||
for shape in [[3, 2, 2], [2, 20, 20]]:
|
||||
mat1, mat2 = [torch.randn(shape, dtype=dtype, device=device) for _ in range(2)]
|
||||
mat1, mat2 = (torch.randn(shape, dtype=dtype, device=device) for _ in range(2))
|
||||
inputs = [torch.randn(shape, dtype=dtype, device=device),
|
||||
torch.randn(shape, dtype=dtype, device=device).fill_(torch.nan)]
|
||||
outs = [None, torch.randn(shape, dtype=dtype, device=device),
|
||||
|
|
@ -6670,15 +6669,15 @@ scipy_lobpcg | {:10.2e} | {:10.2e} | {:6} | N/A
|
|||
|
||||
# Test det
|
||||
self.assertEqual(det, target_sdet * target_logabsdet.exp(),
|
||||
atol=1e-6, rtol=0, msg='{} (det)'.format(desc))
|
||||
atol=1e-6, rtol=0, msg=f'{desc} (det)')
|
||||
|
||||
# Test slogdet
|
||||
# Compare the overall value rather than individual parts because of
|
||||
# precision issues when det is near zero.
|
||||
self.assertEqual(sdet * logabsdet.exp(), target_sdet * target_logabsdet.exp(),
|
||||
atol=1e-6, rtol=0, msg='{} (slogdet)'.format(desc))
|
||||
atol=1e-6, rtol=0, msg=f'{desc} (slogdet)')
|
||||
self.assertEqual(linalg_sdet * linalg_logabsdet.exp(), target_sdet * target_logabsdet.exp(),
|
||||
atol=1e-6, rtol=0, msg='{} (linalg_slogdet)'.format(desc))
|
||||
atol=1e-6, rtol=0, msg=f'{desc} (linalg_slogdet)')
|
||||
|
||||
# Test logdet
|
||||
# Compare logdet against our own pytorch slogdet because they should
|
||||
|
|
@ -6686,10 +6685,10 @@ scipy_lobpcg | {:10.2e} | {:10.2e} | {:6} | N/A
|
|||
# slogdet implementations when det is near zero due to precision
|
||||
# issues.
|
||||
if sdet.item() < 0:
|
||||
self.assertTrue(logdet.item() != logdet.item(), '{} (logdet negative case)'.format(desc))
|
||||
self.assertTrue(logdet.item() != logdet.item(), f'{desc} (logdet negative case)')
|
||||
else:
|
||||
self.assertEqual(logdet.exp(), target_logabsdet.exp(),
|
||||
atol=1e-6, rtol=0, msg='{} (logdet non-negative case)'.format(desc))
|
||||
atol=1e-6, rtol=0, msg=f'{desc} (logdet non-negative case)')
|
||||
|
||||
eye = torch.eye(5, dtype=dtype, device=device)
|
||||
test_single_det(eye, (torch.ones((), dtype=dtype, device=device), torch.zeros((), dtype=dtype, device=device)), 'identity')
|
||||
|
|
|
|||
|
|
@ -14,7 +14,7 @@ class LoggingTest(TestCase):
|
|||
s = TestCase.runWithPytorchAPIUsageStderr("import torch")
|
||||
self.assertRegex(s, "PYTORCH_API_USAGE.*import")
|
||||
# import the shared library directly - it triggers static init but doesn't call anything
|
||||
s = TestCase.runWithPytorchAPIUsageStderr("from ctypes import CDLL; CDLL('{}')".format(torch._C.__file__))
|
||||
s = TestCase.runWithPytorchAPIUsageStderr(f"from ctypes import CDLL; CDLL('{torch._C.__file__}')")
|
||||
self.assertNotRegex(s, "PYTORCH_API_USAGE")
|
||||
|
||||
|
||||
|
|
|
|||
|
|
@ -1,4 +1,3 @@
|
|||
# -*- coding: utf-8 -*-
|
||||
# Owner(s): ["module: linear algebra"]
|
||||
|
||||
import unittest
|
||||
|
|
@ -133,7 +132,7 @@ class TestMatmulCuda(TestCase):
|
|||
(2, 1000, 1000, 1000),
|
||||
(1, 10000, 1000, 10000),
|
||||
(1, 10000, 10000, 10000)],
|
||||
name_fn=lambda batch_size, N, M, P: "{}_{}_{}_{}".format(batch_size, N, M, P),
|
||||
name_fn=lambda batch_size, N, M, P: f"{batch_size}_{N}_{M}_{P}",
|
||||
)
|
||||
def test_cublas_baddbmm_large_input(self, device, batch_size, N, M, P, dtype):
|
||||
cpu_dtype = dtype
|
||||
|
|
|
|||
|
|
@ -1343,7 +1343,7 @@ def print_op_str_if_not_supported(op_str):
|
|||
if __name__ == "__main__":
|
||||
COMPARE_XLA = os.getenv('PYTORCH_COMPARE_XLA', None)
|
||||
if COMPARE_XLA is not None:
|
||||
with open(COMPARE_XLA, "r") as f:
|
||||
with open(COMPARE_XLA) as f:
|
||||
d = yaml.load(f, Loader=YamlLoader)
|
||||
ops = d.get("full_codegen", []) + d.get("supported", []) + d.get("autograd", [])
|
||||
for op_str in ops:
|
||||
|
|
@ -1352,7 +1352,7 @@ if __name__ == "__main__":
|
|||
|
||||
COMPARE_TEXT = os.getenv('PYTORCH_COMPARE_TEXT', None)
|
||||
if COMPARE_TEXT is not None:
|
||||
with open(COMPARE_TEXT, "r") as f:
|
||||
with open(COMPARE_TEXT) as f:
|
||||
for op_str in f:
|
||||
print_op_str_if_not_supported(op_str.strip())
|
||||
sys.exit(0)
|
||||
|
|
|
|||
|
|
@ -46,7 +46,7 @@ class TestOptimizer(TestCase):
|
|||
|
||||
input_data = torch.rand((batch_size, input_channels, height, width))
|
||||
conv_weight = torch.rand((output_channels, input_channels_per_group, kernel_h, kernel_w))
|
||||
conv_bias = torch.rand((output_channels))
|
||||
conv_bias = torch.rand(output_channels)
|
||||
result = F.conv2d(input_data, conv_weight, conv_bias, strides, paddings, dilations, groups)
|
||||
weight_output_dim = 24
|
||||
linear_input_shape = result.shape[1]
|
||||
|
|
@ -56,9 +56,9 @@ class TestOptimizer(TestCase):
|
|||
def __init__(self):
|
||||
super().__init__()
|
||||
self.conv_weight = torch.nn.Parameter(torch.rand(conv_weight_shape))
|
||||
self.conv_bias = torch.nn.Parameter(torch.rand((conv_bias_shape)))
|
||||
self.conv_bias = torch.nn.Parameter(torch.rand(conv_bias_shape))
|
||||
self.linear_weight = torch.nn.Parameter(torch.rand(linear_weight_shape))
|
||||
self.linear_bias = torch.nn.Parameter(torch.rand((weight_output_dim)))
|
||||
self.linear_bias = torch.nn.Parameter(torch.rand(weight_output_dim))
|
||||
self.strides = strides
|
||||
self.paddings = paddings
|
||||
self.dilations = dilations
|
||||
|
|
@ -169,7 +169,7 @@ class TestOptimizer(TestCase):
|
|||
def __init__(self):
|
||||
super().__init__()
|
||||
self.linear_weight = torch.nn.Parameter(torch.rand(linear_weight_shape))
|
||||
self.linear_bias = torch.nn.Parameter(torch.rand((weight_output_dim)))
|
||||
self.linear_bias = torch.nn.Parameter(torch.rand(weight_output_dim))
|
||||
|
||||
def forward(self, x):
|
||||
o = F.linear(x, self.linear_weight, self.linear_bias)
|
||||
|
|
@ -186,7 +186,7 @@ class TestOptimizer(TestCase):
|
|||
def __init__(self):
|
||||
super().__init__()
|
||||
self.linear_weight = torch.nn.Parameter(torch.rand(linear_weight_shape))
|
||||
self.linear_bias = torch.nn.Parameter(torch.rand((weight_output_dim)))
|
||||
self.linear_bias = torch.nn.Parameter(torch.rand(weight_output_dim))
|
||||
|
||||
def forward(self, x):
|
||||
o = F.linear(x, self.linear_weight, self.linear_bias)
|
||||
|
|
@ -582,7 +582,7 @@ class TestOptimizer(TestCase):
|
|||
self.assertTrue(
|
||||
cloned.qualified_name.startswith('__torch__.'),
|
||||
("Expected the cloned module's name to start with the string "
|
||||
"'__torch__.', but got: {0}").format(cloned.qualified_name),
|
||||
"'__torch__.', but got: {}").format(cloned.qualified_name),
|
||||
)
|
||||
|
||||
|
||||
|
|
|
|||
|
|
@ -1,4 +1,3 @@
|
|||
# -*- coding: utf-8 -*-
|
||||
# Owner(s): ["module: mps"]
|
||||
|
||||
import io
|
||||
|
|
@ -831,7 +830,7 @@ def skipMPSMemoryLeakCheckIf(condition):
|
|||
return fn
|
||||
return dec
|
||||
|
||||
class MpsMemoryLeakCheck():
|
||||
class MpsMemoryLeakCheck:
|
||||
def __init__(self, testcase, name=None):
|
||||
self.name = testcase.id() if name is None else name
|
||||
self.testcase = testcase
|
||||
|
|
@ -3744,7 +3743,7 @@ class TestLogical(TestCaseMPS):
|
|||
result_cpu = torch.logical_and(cpu_x, cpu_other)
|
||||
self.assertEqual(result, result_cpu)
|
||||
|
||||
helper(self._wrap_tensor([1, 1, 0, 0]), self._wrap_tensor(([1, 0, 0, 1])))
|
||||
helper(self._wrap_tensor([1, 1, 0, 0]), self._wrap_tensor([1, 0, 0, 1]))
|
||||
helper(
|
||||
self._wrap_tensor([1, 1, 0, 0], dtype=torch.float, requires_grad=True),
|
||||
self._wrap_tensor([1, 0, 0, 1], dtype=torch.float)
|
||||
|
|
@ -3768,7 +3767,7 @@ class TestLogical(TestCaseMPS):
|
|||
|
||||
self.assertEqual(result, result_cpu)
|
||||
|
||||
helper(self._wrap_tensor([1, 1, 0, 0]), self._wrap_tensor(([1, 0, 0, 1])))
|
||||
helper(self._wrap_tensor([1, 1, 0, 0]), self._wrap_tensor([1, 0, 0, 1]))
|
||||
helper(
|
||||
self._wrap_tensor([1, 1, 0, 0], dtype=torch.float, requires_grad=True),
|
||||
self._wrap_tensor([1, 0, 0, 1], dtype=torch.float)
|
||||
|
|
@ -3792,7 +3791,7 @@ class TestLogical(TestCaseMPS):
|
|||
|
||||
self.assertEqual(result, result_cpu)
|
||||
|
||||
helper(self._wrap_tensor([1, 1, 0, 0]), self._wrap_tensor(([1, 0, 0, 1])))
|
||||
helper(self._wrap_tensor([1, 1, 0, 0]), self._wrap_tensor([1, 0, 0, 1]))
|
||||
helper(
|
||||
self._wrap_tensor([1, 1, 0, 0], dtype=torch.float, requires_grad=True),
|
||||
self._wrap_tensor([1, 0, 0, 1], dtype=torch.float)
|
||||
|
|
@ -9251,7 +9250,7 @@ class TestConvolutionMPS(TestCaseMPS):
|
|||
[[3.0000004768, 6.5000000000, 5.0000, 4.6675000191, 9.2500],
|
||||
[1.0000000000, 7.1665000916, 5.0000, 5.0000000000, 9.2500]], device="mps").view(1, 1, 2, 5)
|
||||
else:
|
||||
raise AssertionError("missing groundtruth test for padding mode '{}'".format(padding_mode))
|
||||
raise AssertionError(f"missing groundtruth test for padding mode '{padding_mode}'")
|
||||
elif mode == 'nearest':
|
||||
if padding_mode == 'zeros':
|
||||
if align_corners:
|
||||
|
|
@ -9281,7 +9280,7 @@ class TestConvolutionMPS(TestCaseMPS):
|
|||
[[1., 8., 5., 7., 9.],
|
||||
[1., 8., 5., 8., 9.]], device="mps").view(1, 1, 2, 5)
|
||||
else:
|
||||
raise AssertionError("missing groundtruth test for padding mode '{}'".format(padding_mode))
|
||||
raise AssertionError(f"missing groundtruth test for padding mode '{padding_mode}'")
|
||||
elif mode == 'bicubic':
|
||||
if padding_mode == 'zeros':
|
||||
if align_corners:
|
||||
|
|
@ -9311,10 +9310,10 @@ class TestConvolutionMPS(TestCaseMPS):
|
|||
[[2.7993753, 6.6050020, 4.25, 4.7138715, 10.269531],
|
||||
[0.8125000, 7.2822485, 4.25, 5.0000052, 9.332031]], device="mps").view(1, 1, 2, 5)
|
||||
else:
|
||||
raise AssertionError("missing groundtruth test for padding mode '{}'".format(padding_mode))
|
||||
raise AssertionError(f"missing groundtruth test for padding mode '{padding_mode}'")
|
||||
|
||||
else:
|
||||
raise AssertionError("missing groundtruth test for interpolation mode '{}'".format(mode))
|
||||
raise AssertionError(f"missing groundtruth test for interpolation mode '{mode}'")
|
||||
output = F.grid_sample(input, grid, mode=mode, padding_mode=padding_mode,
|
||||
align_corners=align_corners)
|
||||
self.assertEqual(output, groundtruth, atol=1e-5, rtol=0,
|
||||
|
|
|
|||
|
|
@ -328,7 +328,7 @@ class TestNamedTensor(TestCase):
|
|||
def test_big_tensor_repr_has_names(self):
|
||||
def check_repr(named_tensor):
|
||||
unnamed_tensor = named_tensor.rename(None)
|
||||
names_tag = 'names={}'.format(named_tensor.names)
|
||||
names_tag = f'names={named_tensor.names}'
|
||||
self.assertIn(names_tag, repr(named_tensor))
|
||||
|
||||
check_repr(torch.randn(128, 3, 64, 64, names=('N', 'C', 'H', 'W')))
|
||||
|
|
@ -854,7 +854,7 @@ class TestNamedTensor(TestCase):
|
|||
out = testcase.lambd(tensor)
|
||||
except RuntimeError as err:
|
||||
# Get a better error message by catching the error and asserting.
|
||||
raise RuntimeError('{}: {}'.format(testcase.name, err)) from err
|
||||
raise RuntimeError(f'{testcase.name}: {err}') from err
|
||||
self.assertEqual(out.names, tensor.names,
|
||||
msg=testcase.name)
|
||||
|
||||
|
|
|
|||
|
|
@ -39,7 +39,7 @@ class TestNamedTupleAPI(TestCase):
|
|||
def test_native_functions_yaml(self):
|
||||
operators_found = set()
|
||||
regex = re.compile(r"^(\w*)(\(|\.)")
|
||||
with open(aten_native_yaml, 'r') as file:
|
||||
with open(aten_native_yaml) as file:
|
||||
for f in yaml.safe_load(file.read()):
|
||||
f = f['func']
|
||||
ret = f.split('->')[1].strip()
|
||||
|
|
|
|||
|
|
@ -403,7 +403,7 @@ class TestNestedTensor(TestCase):
|
|||
devices = [t.device]
|
||||
if t.device.type == 'cuda':
|
||||
if t.device.index == -1:
|
||||
devices.append('cuda:{}'.format(torch.cuda.current_device()))
|
||||
devices.append(f'cuda:{torch.cuda.current_device()}')
|
||||
elif t.device.index == torch.cuda.current_device():
|
||||
devices.append('cuda')
|
||||
for device in devices:
|
||||
|
|
|
|||
|
|
@ -5915,7 +5915,7 @@ tensor(..., device='meta', size=(1,), requires_grad=True)""")
|
|||
[[3.0000004768, 6.5000000000, 5.0000, 4.6675000191, 9.2500],
|
||||
[1.0000000000, 7.1665000916, 5.0000, 5.0000000000, 9.2500]]).view(1, 1, 2, 5)
|
||||
else:
|
||||
raise AssertionError("missing groundtruth test for padding mode '{}'".format(padding_mode))
|
||||
raise AssertionError(f"missing groundtruth test for padding mode '{padding_mode}'")
|
||||
elif mode == 'nearest':
|
||||
if padding_mode == 'zeros':
|
||||
if align_corners:
|
||||
|
|
@ -5945,7 +5945,7 @@ tensor(..., device='meta', size=(1,), requires_grad=True)""")
|
|||
[[1., 8., 5., 7., 9.],
|
||||
[1., 8., 5., 8., 9.]]).view(1, 1, 2, 5)
|
||||
else:
|
||||
raise AssertionError("missing groundtruth test for padding mode '{}'".format(padding_mode))
|
||||
raise AssertionError(f"missing groundtruth test for padding mode '{padding_mode}'")
|
||||
elif mode == 'bicubic':
|
||||
if padding_mode == 'zeros':
|
||||
if align_corners:
|
||||
|
|
@ -5975,10 +5975,10 @@ tensor(..., device='meta', size=(1,), requires_grad=True)""")
|
|||
[[2.7993753, 6.6050020, 4.25, 4.7138715, 10.269531],
|
||||
[0.8125000, 7.2822485, 4.25, 5.0000052, 9.332031]]).view(1, 1, 2, 5)
|
||||
else:
|
||||
raise AssertionError("missing groundtruth test for padding mode '{}'".format(padding_mode))
|
||||
raise AssertionError(f"missing groundtruth test for padding mode '{padding_mode}'")
|
||||
|
||||
else:
|
||||
raise AssertionError("missing groundtruth test for interpolation mode '{}'".format(mode))
|
||||
raise AssertionError(f"missing groundtruth test for interpolation mode '{mode}'")
|
||||
output = F.grid_sample(input, grid, mode=mode, padding_mode=padding_mode,
|
||||
align_corners=align_corners)
|
||||
self.assertEqual(output, groundtruth, atol=1e-5, rtol=0,
|
||||
|
|
@ -6027,7 +6027,7 @@ tensor(..., device='meta', size=(1,), requires_grad=True)""")
|
|||
[[[[-0., -0.], [-0., 0.], [-0., -0.], [-0., 0.]],
|
||||
[[0., 0.], [0., 0.], [0., 0.], [0., 0.]]]]).view(1, 2, 4, 2)
|
||||
else:
|
||||
raise AssertionError("missing gradient groundtruth test for padding mode '{}'".format(padding_mode))
|
||||
raise AssertionError(f"missing gradient groundtruth test for padding mode '{padding_mode}'")
|
||||
elif mode == 'nearest':
|
||||
groundtruth = torch.tensor(
|
||||
[[[[-0., -0.], [-0., 0.], [-0., -0.], [-0., 0.]],
|
||||
|
|
@ -6062,9 +6062,9 @@ tensor(..., device='meta', size=(1,), requires_grad=True)""")
|
|||
[[[[0., 0.], [0., 0.], [1.875, 0.], [1.875, 0.]],
|
||||
[[0., 0.], [0., 0.], [1.875, 0.], [1.875, 0.]]]]).view(1, 2, 4, 2)
|
||||
else:
|
||||
raise AssertionError("missing gradient groundtruth test for padding mode '{}'".format(padding_mode))
|
||||
raise AssertionError(f"missing gradient groundtruth test for padding mode '{padding_mode}'")
|
||||
else:
|
||||
raise AssertionError("missing gradient groundtruth test for interpolation mode '{}'".format(mode))
|
||||
raise AssertionError(f"missing gradient groundtruth test for interpolation mode '{mode}'")
|
||||
for input_requires_grad in [False, True]:
|
||||
input = input.requires_grad_(input_requires_grad)
|
||||
F.grid_sample(input, grid, mode=mode, padding_mode=padding_mode,
|
||||
|
|
@ -7747,7 +7747,7 @@ def _buildEquivalentAffineTransforms2d(device, input_size, output_size, angle_ra
|
|||
outtrans_ary)
|
||||
grid_ary = np.dot(np.dot(np.dot(reorder_ary, rotation_ary.T), outscale_ary), outtrans_ary)
|
||||
|
||||
transform_tensor = torch.from_numpy((rotation_ary)).to(device, torch.float32)
|
||||
transform_tensor = torch.from_numpy(rotation_ary).to(device, torch.float32)
|
||||
transform_tensor = transform_tensor[:2].unsqueeze(0)
|
||||
|
||||
return transform_tensor, transform_ary, grid_ary
|
||||
|
|
@ -7820,7 +7820,7 @@ def _buildEquivalentAffineTransforms3d(device, input_size, output_size, angle_ra
|
|||
outtrans_ary)
|
||||
grid_ary = np.dot(np.dot(np.dot(reorder_ary, np.linalg.inv(scipyRotation_ary)), outscale_ary), outtrans_ary)
|
||||
|
||||
transform_tensor = torch.from_numpy((torchRotation_ary)).to(device, torch.float32)
|
||||
transform_tensor = torch.from_numpy(torchRotation_ary).to(device, torch.float32)
|
||||
transform_tensor = transform_tensor[:3].unsqueeze(0)
|
||||
|
||||
return transform_tensor, transform_ary, grid_ary
|
||||
|
|
@ -10462,7 +10462,7 @@ class TestNNDeviceType(NNTestCase):
|
|||
out_double = F.grid_sample(data.double(), grid.double(), mode=mode, padding_mode='zeros',
|
||||
align_corners=align_corners)
|
||||
|
||||
self.assertEqual(out_half, out_double.half(), msg="grid_sample with mode = {} doesn't match".format(mode))
|
||||
self.assertEqual(out_half, out_double.half(), msg=f"grid_sample with mode = {mode} doesn't match")
|
||||
|
||||
helper((32, 64, 16, 16), (32, 8, 8, 2), True)
|
||||
helper((32, 64, 16, 16, 16), (32, 8, 8, 8, 3), True)
|
||||
|
|
|
|||
|
|
@ -508,7 +508,7 @@ class TestNNAPI(TestCase):
|
|||
for kind in ["float", "float-nhwc", "quant", "quant-nhwc"]:
|
||||
for case in cases:
|
||||
in_ch, out_ch, kernel, stride, padding, groups, bias, input_dim, name = case
|
||||
with self.subTest("{}-{}".format(kind, name)):
|
||||
with self.subTest(f"{kind}-{name}"):
|
||||
inp = torch.randn(input_dim)
|
||||
model = torch.nn.Conv2d(in_ch, out_ch, kernel, stride, padding, groups=groups, bias=bool(bias))
|
||||
output_size = model(inp).numel()
|
||||
|
|
|
|||
|
|
@ -51,7 +51,7 @@ class TestOpenMP_ParallelFor(TestCase):
|
|||
continue
|
||||
is_increasing = is_increasing and (last_rss[idx] > last_rss[idx - 1])
|
||||
self.assertTrue(not is_increasing,
|
||||
msg='memory usage is increasing, {}'.format(str(last_rss)))
|
||||
msg=f'memory usage is increasing, {str(last_rss)}')
|
||||
|
||||
def test_one_thread(self):
|
||||
"""Make sure there is no memory leak with one thread: issue gh-32284
|
||||
|
|
|
|||
|
|
@ -157,7 +157,7 @@ class TestCommon(TestCase):
|
|||
if isinstance(result, torch.Tensor):
|
||||
self.assertTrue(result.device == cuda_device)
|
||||
elif is_iterable_of_tensors(result):
|
||||
self.assertTrue(all((t.device == cuda_device for t in result)))
|
||||
self.assertTrue(all(t.device == cuda_device for t in result))
|
||||
else:
|
||||
self.skipTest(
|
||||
"Skipped! Only supports single tensor or iterable of tensor outputs."
|
||||
|
|
@ -219,7 +219,7 @@ class TestCommon(TestCase):
|
|||
self.assertTrue(False)
|
||||
|
||||
for file_name in files:
|
||||
with open(os.path.join(pytorch_dir, file_name), "r") as f:
|
||||
with open(os.path.join(pytorch_dir, file_name)) as f:
|
||||
lines = f.read()
|
||||
matches = regex.findall(lines)
|
||||
for match in matches:
|
||||
|
|
@ -723,7 +723,7 @@ class TestCommon(TestCase):
|
|||
return (out.stride(),)
|
||||
|
||||
# assumes (see above) that out is an iterable of tensors
|
||||
return tuple((t.stride() for t in out))
|
||||
return tuple(t.stride() for t in out)
|
||||
|
||||
# Extracts data pointers from a tensor or iterable of tensors into a tuple
|
||||
# NOTE: only extracts on the CPU and CUDA device types since some
|
||||
|
|
@ -736,7 +736,7 @@ class TestCommon(TestCase):
|
|||
return (out.data_ptr(),)
|
||||
|
||||
# assumes (see above) that out is an iterable of tensors
|
||||
return tuple((t.data_ptr() for t in out))
|
||||
return tuple(t.data_ptr() for t in out)
|
||||
|
||||
@suppress_warnings
|
||||
def _compare_out(transform, *, compare_strides_and_data_ptrs=True):
|
||||
|
|
@ -751,7 +751,7 @@ class TestCommon(TestCase):
|
|||
self.assertEqual(expected, out)
|
||||
|
||||
if compare_strides_and_data_ptrs:
|
||||
stride_msg = "Strides are not the same! Original strides were {0} and strides are now {1}".format(
|
||||
stride_msg = "Strides are not the same! Original strides were {} and strides are now {}".format(
|
||||
original_strides, final_strides
|
||||
)
|
||||
self.assertEqual(original_strides, final_strides, msg=stride_msg)
|
||||
|
|
@ -843,7 +843,7 @@ class TestCommon(TestCase):
|
|||
return (out.stride(),)
|
||||
|
||||
# assumes (see above) that out is an iterable of tensors
|
||||
return tuple((t.stride() for t in out))
|
||||
return tuple(t.stride() for t in out)
|
||||
|
||||
# Extracts data pointers from a tensor or iterable of tensors into a tuple
|
||||
# NOTE: only extracts on the CPU and CUDA device types since some
|
||||
|
|
@ -856,7 +856,7 @@ class TestCommon(TestCase):
|
|||
return (out.data_ptr(),)
|
||||
|
||||
# assumes (see above) that out is an iterable of tensors
|
||||
return tuple((t.data_ptr() for t in out))
|
||||
return tuple(t.data_ptr() for t in out)
|
||||
|
||||
def _compare_out(transform, *, compare_strides_and_data_ptrs=True):
|
||||
out = _apply_out_transform(transform, expected)
|
||||
|
|
@ -869,7 +869,7 @@ class TestCommon(TestCase):
|
|||
self.assertEqual(expected, out)
|
||||
|
||||
if compare_strides_and_data_ptrs:
|
||||
stride_msg = "Strides are not the same! Original strides were {0} and strides are now {1}".format(
|
||||
stride_msg = "Strides are not the same! Original strides were {} and strides are now {}".format(
|
||||
original_strides, final_strides
|
||||
)
|
||||
self.assertEqual(original_strides, final_strides, msg=stride_msg)
|
||||
|
|
@ -1390,20 +1390,20 @@ class TestCommon(TestCase):
|
|||
|
||||
# Partially supporting a dtype is not an error, but we print a warning
|
||||
if (len(partially_supported_forward) + len(partially_supported_backward)) > 0:
|
||||
msg = "Some dtypes for {0} on device type {1} are only partially supported!\n".format(
|
||||
msg = "Some dtypes for {} on device type {} are only partially supported!\n".format(
|
||||
op.name, device_type
|
||||
)
|
||||
if len(partially_supported_forward) > 0:
|
||||
msg = (
|
||||
msg
|
||||
+ "The following dtypes only worked on some samples during forward: {0}.\n".format(
|
||||
+ "The following dtypes only worked on some samples during forward: {}.\n".format(
|
||||
partially_supported_forward
|
||||
)
|
||||
)
|
||||
if len(partially_supported_backward) > 0:
|
||||
msg = (
|
||||
msg
|
||||
+ "The following dtypes only worked on some samples during backward: {0}.\n".format(
|
||||
+ "The following dtypes only worked on some samples during backward: {}.\n".format(
|
||||
partially_supported_backward
|
||||
)
|
||||
)
|
||||
|
|
@ -1426,34 +1426,34 @@ class TestCommon(TestCase):
|
|||
return
|
||||
|
||||
# Generates error msg
|
||||
msg = "The supported dtypes for {0} on device type {1} are incorrect!\n".format(
|
||||
msg = "The supported dtypes for {} on device type {} are incorrect!\n".format(
|
||||
op.name, device_type
|
||||
)
|
||||
if len(supported_but_unclaimed_forward) > 0:
|
||||
msg = (
|
||||
msg
|
||||
+ "The following dtypes worked in forward but are not listed by the OpInfo: {0}.\n".format(
|
||||
+ "The following dtypes worked in forward but are not listed by the OpInfo: {}.\n".format(
|
||||
supported_but_unclaimed_forward
|
||||
)
|
||||
)
|
||||
if len(supported_but_unclaimed_backward) > 0:
|
||||
msg = (
|
||||
msg
|
||||
+ "The following dtypes worked in backward but are not listed by the OpInfo: {0}.\n".format(
|
||||
+ "The following dtypes worked in backward but are not listed by the OpInfo: {}.\n".format(
|
||||
supported_but_unclaimed_backward
|
||||
)
|
||||
)
|
||||
if len(claimed_but_unsupported_forward) > 0:
|
||||
msg = (
|
||||
msg
|
||||
+ "The following dtypes did not work in forward but are listed by the OpInfo: {0}.\n".format(
|
||||
+ "The following dtypes did not work in forward but are listed by the OpInfo: {}.\n".format(
|
||||
claimed_but_unsupported_forward
|
||||
)
|
||||
)
|
||||
if len(claimed_but_unsupported_backward) > 0:
|
||||
msg = (
|
||||
msg
|
||||
+ "The following dtypes did not work in backward but are listed by the OpInfo: {0}.\n".format(
|
||||
+ "The following dtypes did not work in backward but are listed by the OpInfo: {}.\n".format(
|
||||
claimed_but_unsupported_backward
|
||||
)
|
||||
)
|
||||
|
|
|
|||
|
|
@ -190,7 +190,7 @@ class TestJit(JitCommonTestCase):
|
|||
_alias_ops = partial(ops, dtypes=OpDTypes.supported,
|
||||
allowed_dtypes=(torch.float,))
|
||||
|
||||
@_alias_ops((op for op in op_db if op.aliases))
|
||||
@_alias_ops(op for op in op_db if op.aliases)
|
||||
def test_jit_alias_remapping(self, device, dtype, op):
|
||||
# NOTE: only tests on first sample
|
||||
samples = op.sample_inputs(device, dtype, requires_grad=True)
|
||||
|
|
|
|||
|
|
@ -129,7 +129,7 @@ class DiagonalTensor:
|
|||
self._i = value
|
||||
|
||||
def __repr__(self):
|
||||
return "DiagonalTensor(N={}, value={})".format(self._N, self._i)
|
||||
return f"DiagonalTensor(N={self._N}, value={self._i})"
|
||||
|
||||
def __array__(self):
|
||||
return self._i * np.eye(self._N)
|
||||
|
|
@ -271,7 +271,7 @@ class SubDiagonalTensor(DiagonalTensor):
|
|||
handled_functions = HANDLED_FUNCTIONS_SUB_DIAGONAL
|
||||
|
||||
def __repr__(self):
|
||||
return "SubDiagonalTensor(N={}, value={})".format(self._N, self._i)
|
||||
return f"SubDiagonalTensor(N={self._N}, value={self._i})"
|
||||
|
||||
|
||||
@implements_sub_diagonal(torch.mean)
|
||||
|
|
@ -336,7 +336,7 @@ def generate_tensor_like_torch_implementations():
|
|||
for namespace, funcs in get_overridable_functions().items():
|
||||
for func in funcs:
|
||||
if func not in testing_overrides and func.__name__ not in testing_ignore:
|
||||
untested_funcs.append("{}.{}".format(namespace, func.__name__))
|
||||
untested_funcs.append(f"{namespace}.{func.__name__}")
|
||||
msg = (
|
||||
"The following functions are not tested for __torch_function__ "
|
||||
"support, please ensure there is an entry in the dict returned by "
|
||||
|
|
@ -750,7 +750,7 @@ def generate_tensor_like_override_tests(cls):
|
|||
if module:
|
||||
name = 'test_{}_{}'.format(module.replace('.', '_'), func.__name__)
|
||||
else:
|
||||
name = 'test_{}'.format(func.__name__)
|
||||
name = f'test_{func.__name__}'
|
||||
test_method.__name__ = name
|
||||
setattr(cls, name, test_method)
|
||||
|
||||
|
|
@ -1094,7 +1094,7 @@ class TestRNN(TestCase):
|
|||
class TestDisabledTorchFunction(TestCase):
|
||||
# Regression test for gh-64687
|
||||
def test_parameter_does_not_prevent_dispatch(self):
|
||||
class MyTensor():
|
||||
class MyTensor:
|
||||
@classmethod
|
||||
def __torch_function__(cls, func, types, args=(), kwargs=None):
|
||||
return "called"
|
||||
|
|
@ -1119,7 +1119,7 @@ class TestResolveName(TestCase):
|
|||
|
||||
class TestTorchFunctionWarning(TestCase):
|
||||
def test_warn_on_invalid_torch_function(self):
|
||||
class Bad1():
|
||||
class Bad1:
|
||||
def __torch_function__(self, *args, **kwargs):
|
||||
pass
|
||||
|
||||
|
|
|
|||
|
|
@ -1,4 +1,3 @@
|
|||
# -*- coding: utf-8 -*-
|
||||
# Owner(s): ["module: autograd"]
|
||||
|
||||
from torch.testing._internal.common_utils import TestCase, run_tests, IS_JETSON, IS_WINDOWS
|
||||
|
|
|
|||
|
|
@ -417,7 +417,7 @@ class TestPythonRegistration(TestCase):
|
|||
|
||||
out = getattr(torch.ops, self.test_ns).sqsum.default(s0, s1)
|
||||
out_val = shape_env.evaluate_expr(out.node.expr)
|
||||
self.assertEquals(out_val, 13)
|
||||
self.assertEqual(out_val, 13)
|
||||
|
||||
def test_register_functional_op_error_cases(self):
|
||||
lib = Library(self.test_ns, "FRAGMENT")
|
||||
|
|
|
|||
|
|
@ -1,4 +1,3 @@
|
|||
# -*- coding: utf-8 -*-
|
||||
# Owner(s): ["oncall: quantization"]
|
||||
|
||||
import logging
|
||||
|
|
|
|||
|
|
@ -1968,9 +1968,9 @@ class TestReductions(TestCase):
|
|||
a[2, 2] = nan
|
||||
actual = f(a.to(device)).cpu()
|
||||
expected = f(a).cpu()
|
||||
self.assertEqual(torch.isnan(actual), torch.isnan(expected), msg='nans for {}'.format(name))
|
||||
self.assertEqual(torch.isnan(actual), torch.isnan(expected), msg=f'nans for {name}')
|
||||
self.assertEqual(actual[~torch.isnan(actual)],
|
||||
expected[~torch.isnan(expected)], msg='nans for {}'.format(name))
|
||||
expected[~torch.isnan(expected)], msg=f'nans for {name}')
|
||||
|
||||
# TODO: make this test generic using OpInfos
|
||||
@onlyCUDA
|
||||
|
|
@ -2199,16 +2199,16 @@ class TestReductions(TestCase):
|
|||
fn_tuple(y, 1, keepdim=False, out=(values[:, 1], indices[:, 1]))
|
||||
values_expected, indices_expected = fn_tuple(y, 1, keepdim=False)
|
||||
self.assertEqual(values[:, 1], values_expected,
|
||||
msg='{} values with out= kwarg'.format(fn_name))
|
||||
msg=f'{fn_name} values with out= kwarg')
|
||||
self.assertEqual(indices[:, 1], indices_expected,
|
||||
msg='{} indices with out= kwarg'.format(fn_name))
|
||||
msg=f'{fn_name} indices with out= kwarg')
|
||||
continue
|
||||
|
||||
x = torch.randn(5, 3, device=device)
|
||||
y = torch.randn(5, 3, device=device)
|
||||
fn(y, 1, keepdim=False, out=x[:, 1])
|
||||
expected = fn(y, 1, keepdim=False)
|
||||
self.assertEqual(x[:, 1], expected, msg='{} with out= kwarg'.format(fn_name))
|
||||
self.assertEqual(x[:, 1], expected, msg=f'{fn_name} with out= kwarg')
|
||||
|
||||
@onlyCUDA
|
||||
@largeTensorTest('10GB')
|
||||
|
|
@ -3498,8 +3498,8 @@ as the input tensor excluding its innermost dimension'):
|
|||
expected = np.asarray(expected) # transform numpy scalars to numpy.ndarray instances
|
||||
|
||||
msg = ("Failed to produce expected results! Input tensor was"
|
||||
" {0}, torch result is {1}, and reference result is"
|
||||
" {2}.").format(t, actual, expected) if t.numel() < 10 else None
|
||||
" {}, torch result is {}, and reference result is"
|
||||
" {}.").format(t, actual, expected) if t.numel() < 10 else None
|
||||
|
||||
self.assertEqual(actual, expected, msg, exact_dtype=exact_dtype)
|
||||
|
||||
|
|
|
|||
|
|
@ -1,4 +1,3 @@
|
|||
# -*- coding: utf-8 -*-
|
||||
# Owner(s): ["module: scatter & gather ops"]
|
||||
|
||||
import random
|
||||
|
|
|
|||
|
|
@ -258,7 +258,7 @@ class TestSchemaCheck(JitTestCase):
|
|||
@unittest.skipIf(not torch._C.has_spectral, "ATen not built with FFT.")
|
||||
def test_schema_check_mode_functionality_kwarg_tensor(self):
|
||||
x = torch.rand((3, 5))
|
||||
w = torch.rand((4))
|
||||
w = torch.rand(4)
|
||||
expected = torch.stft(x, 4, win_length=4, window=w, return_complex=True)
|
||||
with SchemaCheckMode():
|
||||
actual = torch.stft(x, 4, win_length=4, window=w, return_complex=True)
|
||||
|
|
|
|||
|
|
@ -183,7 +183,7 @@ class TestSegmentReductions(TestCase):
|
|||
def test_simple_zero_length(self, device, dtypes):
|
||||
val_dtype, length_type = dtypes
|
||||
lengths = [0, 0]
|
||||
data = torch.ones((0))
|
||||
data = torch.ones(0)
|
||||
|
||||
for reduction in reductions:
|
||||
for initial in [0, None]:
|
||||
|
|
|
|||
|
|
@ -1,4 +1,3 @@
|
|||
# -*- coding: utf-8 -*-
|
||||
# Owner(s): ["module: serialization"]
|
||||
|
||||
import torch
|
||||
|
|
@ -508,7 +507,7 @@ class SerializationMixin:
|
|||
torch.device('cuda', 0)
|
||||
]
|
||||
gpu_last_map_locations = [
|
||||
'cuda:{}'.format(torch.cuda.device_count() - 1),
|
||||
f'cuda:{torch.cuda.device_count() - 1}',
|
||||
]
|
||||
|
||||
def check_map_locations(map_locations, tensor_class, intended_device):
|
||||
|
|
@ -815,7 +814,7 @@ class TestOldSerialization(TestCase, SerializationMixin):
|
|||
# the warning module is the same, it is not raised again.
|
||||
def _test_serialization_container(self, unique_key, filecontext_lambda):
|
||||
|
||||
tmpmodule_name = 'tmpmodule{}'.format(unique_key)
|
||||
tmpmodule_name = f'tmpmodule{unique_key}'
|
||||
|
||||
def import_module(name, filename):
|
||||
import importlib.util
|
||||
|
|
|
|||
|
|
@ -33,12 +33,12 @@ class TestSortAndSelect(TestCase):
|
|||
# see above
|
||||
return ((b != b) | (a <= b)).all().item()
|
||||
else:
|
||||
error('unknown order "{}", must be "ascending" or "descending"'.format(order))
|
||||
error(f'unknown order "{order}", must be "ascending" or "descending"')
|
||||
|
||||
are_ordered = True
|
||||
for k in range(1, SIZE):
|
||||
self.assertTrue(check_order(mxx[:, k - 1], mxx[:, k]),
|
||||
'torch.sort ({}) values unordered for {}'.format(order, task))
|
||||
f'torch.sort ({order}) values unordered for {task}')
|
||||
|
||||
seen = set()
|
||||
indicesCorrect = True
|
||||
|
|
@ -51,7 +51,7 @@ class TestSortAndSelect(TestCase):
|
|||
seen.clear()
|
||||
for j in range(size):
|
||||
self.assertEqual(x[k][ixx[k][j]], mxx[k][j],
|
||||
msg='torch.sort ({}) indices wrong for {}'.format(order, task))
|
||||
msg=f'torch.sort ({order}) indices wrong for {task}')
|
||||
seen.add(ixx[k][j])
|
||||
self.assertEqual(len(seen), size)
|
||||
|
||||
|
|
|
|||
|
|
@ -285,11 +285,11 @@ class TestSparse(TestSparseBase):
|
|||
for shape, sparse_dim, nnz in shape_sparse_dim_nnz:
|
||||
indices_shape = torch.Size((sparse_dim, nnz))
|
||||
values_shape = torch.Size((nnz,) + shape[sparse_dim:])
|
||||
printed.append("# shape: {}".format(torch.Size(shape)))
|
||||
printed.append("# nnz: {}".format(nnz))
|
||||
printed.append("# sparse_dim: {}".format(sparse_dim))
|
||||
printed.append("# indices shape: {}".format(indices_shape))
|
||||
printed.append("# values shape: {}".format(values_shape))
|
||||
printed.append(f"# shape: {torch.Size(shape)}")
|
||||
printed.append(f"# nnz: {nnz}")
|
||||
printed.append(f"# sparse_dim: {sparse_dim}")
|
||||
printed.append(f"# indices shape: {indices_shape}")
|
||||
printed.append(f"# values shape: {values_shape}")
|
||||
|
||||
indices = torch.arange(indices_shape.numel(), dtype=self.index_tensor(0).dtype,
|
||||
device=device).view(indices_shape)
|
||||
|
|
@ -308,7 +308,7 @@ class TestSparse(TestSparseBase):
|
|||
else:
|
||||
dtypes.append(torch.double)
|
||||
for dtype in dtypes:
|
||||
printed.append("########## {} ##########".format(dtype))
|
||||
printed.append(f"########## {dtype} ##########")
|
||||
x = sp_tensor.detach().to(dtype)
|
||||
printed.append("# sparse tensor")
|
||||
printed.append(str(x))
|
||||
|
|
@ -3382,8 +3382,7 @@ class TestSparse(TestSparseBase):
|
|||
dtype=dtype, device=device)
|
||||
else:
|
||||
raise ValueError(
|
||||
'`dim(=%s)` must be smaller than `sparse_dim(=%s) + dense_dim(=%s)`'
|
||||
% (dim, sparse.sparse_dim(), sparse.dense_dim()))
|
||||
f'`dim(={dim})` must be smaller than `sparse_dim(={sparse.sparse_dim()}) + dense_dim(={sparse.dense_dim()})`')
|
||||
|
||||
def softmax_jacobian_analytic(x, dim):
|
||||
"""Return Jacobian of softmax using analytic formula
|
||||
|
|
|
|||
|
|
@ -904,12 +904,14 @@ class TestSparseCompressed(TestCase):
|
|||
|
||||
def is_view_of(base, other):
|
||||
# a shameless copy of TestViewOps.is_view_of
|
||||
if ((not other._is_view() or
|
||||
other is base or
|
||||
other._base is not base or
|
||||
base.device != other.device)):
|
||||
if (
|
||||
not other._is_view() or
|
||||
other is base or
|
||||
other._base is not base or
|
||||
base.device != other.device
|
||||
):
|
||||
return False
|
||||
if base.device.type == 'cpu' or base.device.type == 'cuda':
|
||||
if base.device.type in ('cpu', 'cuda'):
|
||||
if base.untyped_storage().data_ptr() != other.untyped_storage().data_ptr():
|
||||
return False
|
||||
return True
|
||||
|
|
@ -1815,8 +1817,8 @@ class TestSparseCSR(TestCase):
|
|||
yd = xd.transpose(-2, -1)
|
||||
zd = torch.rand(0, 0, device=device, dtype=dtype)
|
||||
|
||||
xls, yls, zls = [t.to_sparse(layout=lhs_layout) for t in (xd, yd, zd)]
|
||||
xrs, yrs, zrs = [t.to_sparse(layout=rhs_layout) for t in (xd, yd, zd)]
|
||||
xls, yls, zls = (t.to_sparse(layout=lhs_layout) for t in (xd, yd, zd))
|
||||
xrs, yrs, zrs = (t.to_sparse(layout=rhs_layout) for t in (xd, yd, zd))
|
||||
|
||||
for ls, rs, ld, rd in [(xls, yrs, xd, yd), (xls, zrs, xd, zd), (zls, yrs, zd, yd), (zls, zrs, zd, zd)]:
|
||||
res_sparse = ls @ rs
|
||||
|
|
|
|||
|
|
@ -228,7 +228,7 @@ class TestSubclass(TestCase):
|
|||
def __new__(
|
||||
cls, t: torch.Tensor
|
||||
):
|
||||
r = super(NonRewrappingTensor, cls)._make_wrapper_subclass(
|
||||
r = super()._make_wrapper_subclass(
|
||||
cls, t.shape, dtype=t.dtype, requires_grad=t.requires_grad, device=t.device)
|
||||
return r
|
||||
|
||||
|
|
|
|||
|
|
@ -1,4 +1,3 @@
|
|||
# -*- coding: utf-8 -*-
|
||||
# Owner(s): ["oncall: pt2"]
|
||||
|
||||
import itertools
|
||||
|
|
@ -229,12 +228,12 @@ class TestValueRanges(TestCase):
|
|||
from sympy import floor, Eq
|
||||
shape_0 = sympy.Symbol('shape_0', positive=True, integer=True)
|
||||
new_expr = (
|
||||
Eq(30 * floor(4 * (((shape_0 + 1) // 96)) *
|
||||
(((shape_0 + 62017) // (((shape_0 + 1) // 96) + 646))) / 647 +
|
||||
2584 * (((shape_0 + 62017) // (((shape_0 + 1) // 96) + 646))) / 647),
|
||||
2880 * floor((((shape_0 + 1) // 96)) *
|
||||
(((shape_0 + 62017) // (((shape_0 + 1) // 96) + 646))) / 15528 +
|
||||
323 * (((shape_0 + 62017) // (((shape_0 + 1) // 96) + 646))) / 7764)))
|
||||
Eq(30 * floor(4 * ((shape_0 + 1) // 96) *
|
||||
((shape_0 + 62017) // (((shape_0 + 1) // 96) + 646)) / 647 +
|
||||
2584 * ((shape_0 + 62017) // (((shape_0 + 1) // 96) + 646)) / 647),
|
||||
2880 * floor(((shape_0 + 1) // 96) *
|
||||
((shape_0 + 62017) // (((shape_0 + 1) // 96) + 646)) / 15528 +
|
||||
323 * ((shape_0 + 62017) // (((shape_0 + 1) // 96) + 646)) / 7764)))
|
||||
new_range_env = {shape_0: ValueRanges(lower=1, upper=190)}
|
||||
self.assertTrue(new_expr.subs({shape_0: 95}))
|
||||
self.assertIn(True, sympy_interp(ValueRangeAnalysis, new_range_env, new_expr))
|
||||
|
|
|
|||
|
|
@ -170,21 +170,21 @@ class TestTensorCreation(TestCase):
|
|||
|
||||
single_roll = numbers.roll(1, 0)
|
||||
expected = torch.tensor([8, 1, 2, 3, 4, 5, 6, 7], device=device)
|
||||
self.assertEqual(single_roll, expected, msg="{} did not equal expected result".format(single_roll))
|
||||
self.assertEqual(single_roll, expected, msg=f"{single_roll} did not equal expected result")
|
||||
|
||||
roll_backwards = numbers.roll(-2, 0)
|
||||
expected = torch.tensor([3, 4, 5, 6, 7, 8, 1, 2], device=device)
|
||||
self.assertEqual(roll_backwards, expected, msg="{} did not equal expected result".format(roll_backwards))
|
||||
self.assertEqual(roll_backwards, expected, msg=f"{roll_backwards} did not equal expected result")
|
||||
|
||||
data = numbers.view(2, 2, 2)
|
||||
rolled = data.roll(1, 0)
|
||||
expected = torch.tensor([5, 6, 7, 8, 1, 2, 3, 4], device=device).view(2, 2, 2)
|
||||
self.assertEqual(expected, rolled, msg="{} did not equal expected result: {}".format(rolled, expected))
|
||||
self.assertEqual(expected, rolled, msg=f"{rolled} did not equal expected result: {expected}")
|
||||
|
||||
data = data.view(2, 4)
|
||||
# roll a loop until back where started
|
||||
loop_rolled = data.roll(2, 0).roll(4, 1)
|
||||
self.assertEqual(data, loop_rolled, msg="{} did not equal the original: {}".format(loop_rolled, data))
|
||||
self.assertEqual(data, loop_rolled, msg=f"{loop_rolled} did not equal the original: {data}")
|
||||
# multiple inverse loops
|
||||
self.assertEqual(data, data.roll(-20, 0).roll(-40, 1))
|
||||
self.assertEqual(torch.tensor([8, 1, 2, 3, 4, 5, 6, 7], device=device), numbers.roll(1, 0))
|
||||
|
|
@ -196,7 +196,7 @@ class TestTensorCreation(TestCase):
|
|||
expected = torch.tensor([4, 8, 1, 5, 2, 6, 3, 7]).view(4, 2)
|
||||
rolled = strided.roll(1, 0)
|
||||
self.assertEqual(expected, rolled,
|
||||
msg="non contiguous tensor rolled to {} instead of {} ".format(rolled, expected))
|
||||
msg=f"non contiguous tensor rolled to {rolled} instead of {expected} ")
|
||||
|
||||
# test roll with no dimension specified
|
||||
expected = numbers.roll(1, 0).view(2, 4)
|
||||
|
|
@ -207,7 +207,7 @@ class TestTensorCreation(TestCase):
|
|||
expected = torch.tensor([[7, 8, 5, 6], [3, 4, 1, 2]], device=device)
|
||||
double_rolled = data.roll(shifts=(2, -1), dims=(1, 0))
|
||||
self.assertEqual(double_rolled, expected,
|
||||
msg="should be able to roll over two dimensions, got {}".format(double_rolled))
|
||||
msg=f"should be able to roll over two dimensions, got {double_rolled}")
|
||||
|
||||
self.assertRaisesRegex(RuntimeError, "required", lambda: data.roll(shifts=(), dims=()))
|
||||
self.assertRaisesRegex(RuntimeError, "required", lambda: data.roll(shifts=(), dims=1))
|
||||
|
|
@ -812,7 +812,7 @@ class TestTensorCreation(TestCase):
|
|||
torch_fn(t)
|
||||
# Test error for a single array
|
||||
with self.assertRaisesRegex(TypeError, "must be tuple of Tensors, not Tensor"):
|
||||
torch_fn((t))
|
||||
torch_fn(t)
|
||||
|
||||
# Test 0-D
|
||||
num_tensors = random.randint(1, 5)
|
||||
|
|
|
|||
|
|
@ -521,7 +521,7 @@ def get_expected_file(function_ptr):
|
|||
def read_expected_content(function_ptr):
|
||||
expected_file = get_expected_file(function_ptr)
|
||||
assert os.path.exists(expected_file), expected_file
|
||||
with open(expected_file, "r") as f:
|
||||
with open(expected_file) as f:
|
||||
return f.read()
|
||||
|
||||
def compare_image_proto(actual_proto, function_ptr):
|
||||
|
|
|
|||
|
|
@ -591,7 +591,7 @@ class TestTensorExprFuser(BaseTestClass):
|
|||
|
||||
xs = [(torch.rand(4) * 3 + 1).to(torch.int32) for i in range(3)]
|
||||
x, y, z = xs
|
||||
xn, yn, zn = [t.numpy() for t in xs]
|
||||
xn, yn, zn = (t.numpy() for t in xs)
|
||||
traced = torch.jit.trace(test, (x, y, z))
|
||||
res = warmup_and_run_forward(traced, x, y, z)
|
||||
self.assertLastGraphAllFused()
|
||||
|
|
@ -1205,7 +1205,7 @@ class TestTensorExprFuser(BaseTestClass):
|
|||
|
||||
for test in (test_float, test_int):
|
||||
for data_type in self.dtypes:
|
||||
x, y, z = [torch.rand(4, dtype=data_type) for i in range(3)]
|
||||
x, y, z = (torch.rand(4, dtype=data_type) for i in range(3))
|
||||
a, b = 1, 2
|
||||
test(x, y, z, a, b)
|
||||
r = test(x, y, z, a, b)
|
||||
|
|
@ -1379,7 +1379,7 @@ class TestTensorExprFuser(BaseTestClass):
|
|||
@torch.jit.script
|
||||
def test(x, y, z):
|
||||
return x * y * z
|
||||
x, y, z = [torch.rand(4, 8).cuda() for _ in range(3)]
|
||||
x, y, z = (torch.rand(4, 8).cuda() for _ in range(3))
|
||||
ref = test(x, y, z)
|
||||
_ = test(*[torch.rand(6, 8).cuda() for _ in range(3)])
|
||||
res = test(x, y, z)
|
||||
|
|
@ -1390,7 +1390,7 @@ class TestTensorExprFuser(BaseTestClass):
|
|||
y = torch.rand(1, 8).cuda()
|
||||
z = torch.rand(4, 1).cuda()
|
||||
res = test(x, y, z)
|
||||
xn, yn, zn = [t.cpu().numpy() for t in (x, y, z)]
|
||||
xn, yn, zn = (t.cpu().numpy() for t in (x, y, z))
|
||||
np.testing.assert_allclose(res.cpu().numpy(), xn * yn * zn)
|
||||
|
||||
# Mismatched shapes shouldn't reach codegen.
|
||||
|
|
|
|||
|
|
@ -392,9 +392,9 @@ graph(%a : Float(1, 3, 1, strides=[3, 1, 1], requires_grad=0, device=cpu)):
|
|||
|
||||
@unittest.skipIf(not LLVM_ENABLED, "LLVM backend not enabled")
|
||||
def test_alloc_in_loop(self):
|
||||
a, tmp, b = [
|
||||
a, tmp, b = (
|
||||
te.BufHandle(name, [1], torch.float32) for name in ["a", "tmp", "b"]
|
||||
]
|
||||
)
|
||||
body = te.Block([tmp.store([0], a.load([0])), b.store([0], tmp.load([0]))])
|
||||
for _ in range(4):
|
||||
i = te.VarHandle("i", torch.int32)
|
||||
|
|
@ -402,7 +402,7 @@ graph(%a : Float(1, 3, 1, strides=[3, 1, 1], requires_grad=0, device=cpu)):
|
|||
nest = te.LoopNest(body, [b])
|
||||
nest.prepare_for_codegen()
|
||||
f = te.construct_codegen("llvm", nest.simplify(), [a, b])
|
||||
ta, tb = [torch.ones(1) for _ in range(2)]
|
||||
ta, tb = (torch.ones(1) for _ in range(2))
|
||||
f.call([ta.data_ptr(), tb.data_ptr()])
|
||||
|
||||
|
||||
|
|
|
|||
|
|
@ -1483,7 +1483,7 @@ class TestMakeTensor(TestCase):
|
|||
@parametrize("value_types", list(itertools.product([int, float], repeat=2)))
|
||||
@supported_dtypes
|
||||
def test_low_ge_high(self, dtype, device, low_high, value_types):
|
||||
low, high = [value_type(value) for value, value_type in zip(low_high, value_types)]
|
||||
low, high = (value_type(value) for value, value_type in zip(low_high, value_types))
|
||||
|
||||
if low == high and (dtype.is_floating_point or dtype.is_complex):
|
||||
with self.assertWarnsRegex(
|
||||
|
|
@ -1561,7 +1561,7 @@ instantiate_device_type_tests(TestMakeTensor, globals())
|
|||
|
||||
def _get_test_names_for_test_class(test_cls):
|
||||
""" Convenience function to get all test names for a given test class. """
|
||||
test_names = ['{}.{}'.format(test_cls.__name__, key) for key in test_cls.__dict__
|
||||
test_names = [f'{test_cls.__name__}.{key}' for key in test_cls.__dict__
|
||||
if key.startswith('test_')]
|
||||
return sorted(test_names)
|
||||
|
||||
|
|
@ -1612,7 +1612,7 @@ class TestTestParametrization(TestCase):
|
|||
def test_three_things_composition_custom_names(self, x, y, z):
|
||||
pass
|
||||
|
||||
@parametrize("x,y", [(1, 2), (1, 3), (1, 4)], name_fn=lambda x, y: '{}__{}'.format(x, y))
|
||||
@parametrize("x,y", [(1, 2), (1, 3), (1, 4)], name_fn=lambda x, y: f'{x}__{y}')
|
||||
def test_two_things_custom_names_alternate(self, x, y):
|
||||
pass
|
||||
|
||||
|
|
@ -1767,7 +1767,7 @@ class TestTestParametrizationDeviceType(TestCase):
|
|||
|
||||
instantiate_device_type_tests(TestParametrized, locals(), only_for=device)
|
||||
|
||||
device_cls = locals()['TestParametrized{}'.format(device.upper())]
|
||||
device_cls = locals()[f'TestParametrized{device.upper()}']
|
||||
expected_test_names = [name.format(device_cls.__name__, device) for name in (
|
||||
'{}.test_device_dtype_specific_{}_float32',
|
||||
'{}.test_device_dtype_specific_{}_float64',
|
||||
|
|
@ -1791,7 +1791,7 @@ class TestTestParametrizationDeviceType(TestCase):
|
|||
|
||||
instantiate_device_type_tests(TestParametrized, locals(), only_for=device)
|
||||
|
||||
device_cls = locals()['TestParametrized{}'.format(device.upper())]
|
||||
device_cls = locals()[f'TestParametrized{device.upper()}']
|
||||
expected_test_names = [name.format(device_cls.__name__, device) for name in (
|
||||
'{}.test_bar_{}',
|
||||
'{}.test_foo_{}')
|
||||
|
|
@ -1834,7 +1834,7 @@ class TestTestParametrizationDeviceType(TestCase):
|
|||
|
||||
instantiate_device_type_tests(TestParametrized, locals(), only_for=device)
|
||||
|
||||
device_cls = locals()['TestParametrized{}'.format(device.upper())]
|
||||
device_cls = locals()[f'TestParametrized{device.upper()}']
|
||||
expected_test_names = [name.format(device_cls.__name__, device) for name in (
|
||||
'{}.test_default_names_x_0_{}',
|
||||
'{}.test_default_names_x_1_{}',
|
||||
|
|
@ -1862,13 +1862,13 @@ class TestTestParametrizationDeviceType(TestCase):
|
|||
def test_three_things_composition_custom_names(self, device, x, y, z):
|
||||
pass
|
||||
|
||||
@parametrize("x,y", [(1, 2), (1, 3), (1, 4)], name_fn=lambda x, y: '{}__{}'.format(x, y))
|
||||
@parametrize("x,y", [(1, 2), (1, 3), (1, 4)], name_fn=lambda x, y: f'{x}__{y}')
|
||||
def test_two_things_custom_names_alternate(self, device, x, y):
|
||||
pass
|
||||
|
||||
instantiate_device_type_tests(TestParametrized, locals(), only_for=device)
|
||||
|
||||
device_cls = locals()['TestParametrized{}'.format(device.upper())]
|
||||
device_cls = locals()[f'TestParametrized{device.upper()}']
|
||||
expected_test_names = [name.format(device_cls.__name__, device) for name in (
|
||||
'{}.test_custom_names_bias_{}',
|
||||
'{}.test_custom_names_no_bias_{}',
|
||||
|
|
@ -1904,7 +1904,7 @@ class TestTestParametrizationDeviceType(TestCase):
|
|||
|
||||
instantiate_device_type_tests(TestParametrized, locals(), only_for=device)
|
||||
|
||||
device_cls = locals()['TestParametrized{}'.format(device.upper())]
|
||||
device_cls = locals()[f'TestParametrized{device.upper()}']
|
||||
expected_test_names = [name.format(device_cls.__name__, device) for name in (
|
||||
'{}.test_custom_names_bias_{}',
|
||||
'{}.test_custom_names_no_bias_{}',
|
||||
|
|
@ -1926,7 +1926,7 @@ class TestTestParametrizationDeviceType(TestCase):
|
|||
|
||||
instantiate_device_type_tests(TestParametrized, locals(), only_for=device)
|
||||
|
||||
device_cls = locals()['TestParametrized{}'.format(device.upper())]
|
||||
device_cls = locals()[f'TestParametrized{device.upper()}']
|
||||
expected_test_names = []
|
||||
for op in op_db:
|
||||
for dtype in op.supported_dtypes(torch.device(device).type):
|
||||
|
|
@ -1949,7 +1949,7 @@ class TestTestParametrizationDeviceType(TestCase):
|
|||
|
||||
instantiate_device_type_tests(TestParametrized, locals(), only_for=device)
|
||||
|
||||
device_cls = locals()['TestParametrized{}'.format(device.upper())]
|
||||
device_cls = locals()[f'TestParametrized{device.upper()}']
|
||||
expected_test_names = []
|
||||
for module_info in module_db:
|
||||
for dtype in module_info.dtypes:
|
||||
|
|
@ -2003,7 +2003,7 @@ class TestTestParametrizationDeviceType(TestCase):
|
|||
|
||||
device = self.device_type
|
||||
instantiate_device_type_tests(TestParametrized, locals(), only_for=device)
|
||||
device_cls = locals()['TestParametrized{}'.format(device.upper())]
|
||||
device_cls = locals()[f'TestParametrized{device.upper()}']
|
||||
|
||||
for test_func, name in _get_test_funcs_for_test_class(device_cls):
|
||||
should_apply = (name == 'test_op_param_test_op_x_2_cpu_float64' or
|
||||
|
|
@ -2050,7 +2050,7 @@ class TestTestParametrizationDeviceType(TestCase):
|
|||
|
||||
device = self.device_type
|
||||
instantiate_device_type_tests(TestParametrized, locals(), only_for=device)
|
||||
device_cls = locals()['TestParametrized{}'.format(device.upper())]
|
||||
device_cls = locals()[f'TestParametrized{device.upper()}']
|
||||
|
||||
for test_func, name in _get_test_funcs_for_test_class(device_cls):
|
||||
should_apply = (name == 'test_module_param_TestModule_x_2_cpu_float64' or
|
||||
|
|
@ -2071,7 +2071,7 @@ class TestTestParametrizationDeviceType(TestCase):
|
|||
|
||||
instantiate_device_type_tests(TestParametrized, locals(), only_for=device)
|
||||
|
||||
device_cls = locals()['TestParametrized{}'.format(device.upper())]
|
||||
device_cls = locals()[f'TestParametrized{device.upper()}']
|
||||
expected_test_names = [name.format(device_cls.__name__, device) for name in (
|
||||
'{}.test_parametrized_x_0_{}_float32',
|
||||
'{}.test_parametrized_x_0_{}_float64',
|
||||
|
|
@ -2226,7 +2226,7 @@ class TestImports(TestCase):
|
|||
|
||||
class TestOpInfos(TestCase):
|
||||
def test_sample_input(self) -> None:
|
||||
a, b, c, d, e = [object() for _ in range(5)]
|
||||
a, b, c, d, e = (object() for _ in range(5))
|
||||
|
||||
# Construction with natural syntax
|
||||
s = SampleInput(a, b, c, d=d, e=e)
|
||||
|
|
@ -2270,7 +2270,7 @@ class TestOpInfos(TestCase):
|
|||
assert s.broadcasts_input
|
||||
|
||||
def test_sample_input_metadata(self) -> None:
|
||||
a, b = [object() for _ in range(2)]
|
||||
a, b = (object() for _ in range(2))
|
||||
s1 = SampleInput(a, b=b)
|
||||
self.assertIs(s1.output_process_fn_grad(None), None)
|
||||
self.assertFalse(s1.broadcasts_input)
|
||||
|
|
|
|||
|
|
@ -1,4 +1,3 @@
|
|||
# -*- coding: utf-8 -*-
|
||||
# Owner(s): ["module: tests"]
|
||||
|
||||
import torch
|
||||
|
|
@ -4882,26 +4881,26 @@ else:
|
|||
x_c_clone = x_c.clone() if is_inplace else x_c
|
||||
result_c = fn(x_c_clone, y_c)
|
||||
result = fn(x_clone, y)
|
||||
self.assertEqual(result, result_c, "Failed for '{}'".format(inspect.getsource(fn).strip()))
|
||||
self.assertEqual(result, result_c, f"Failed for '{inspect.getsource(fn).strip()}'")
|
||||
self.assertTrue(
|
||||
result.is_contiguous(memory_format=memory_format),
|
||||
"result of the '{}' is not in '{}' format".format(inspect.getsource(fn).strip(), memory_format))
|
||||
f"result of the '{inspect.getsource(fn).strip()}' is not in '{memory_format}' format")
|
||||
|
||||
for fn in bias_fns:
|
||||
result_c = fn(x_c, b_c)
|
||||
result = fn(x, bias)
|
||||
self.assertEqual(result, result_c, "Failed for '{}'".format(inspect.getsource(fn).strip()))
|
||||
self.assertEqual(result, result_c, f"Failed for '{inspect.getsource(fn).strip()}'")
|
||||
self.assertTrue(
|
||||
result.is_contiguous(memory_format=memory_format),
|
||||
"result of the '{}' is not in '{}' format".format(inspect.getsource(fn).strip(), memory_format))
|
||||
f"result of the '{inspect.getsource(fn).strip()}' is not in '{memory_format}' format")
|
||||
|
||||
for fn in return_contig_fns:
|
||||
result_c = fn(x_c, y_c)
|
||||
result = fn(x, y)
|
||||
self.assertEqual(result, result_c, "Failed for '{}'".format(inspect.getsource(fn).strip()))
|
||||
self.assertEqual(result, result_c, f"Failed for '{inspect.getsource(fn).strip()}'")
|
||||
self.assertTrue(
|
||||
result.is_contiguous(memory_format=torch.contiguous_format),
|
||||
"result of the '{}' is not in '{}' format".format(inspect.getsource(fn).strip(), torch.contiguous_format))
|
||||
f"result of the '{inspect.getsource(fn).strip()}' is not in '{torch.contiguous_format}' format")
|
||||
|
||||
_test_helper(
|
||||
torch.randn((4, 3, 8, 8), device=device).contiguous(memory_format=torch.channels_last),
|
||||
|
|
@ -6415,11 +6414,11 @@ class TestTorch(TestCase):
|
|||
|
||||
self.assertRaisesRegex(
|
||||
RuntimeError,
|
||||
"Tensor.__contains__ only supports Tensor or scalar, but you passed in a {}.".format(str),
|
||||
f"Tensor.__contains__ only supports Tensor or scalar, but you passed in a {str}.",
|
||||
lambda: "foo" in x)
|
||||
self.assertRaisesRegex(
|
||||
RuntimeError,
|
||||
"Tensor.__contains__ only supports Tensor or scalar, but you passed in a {}.".format(type([1, 2])),
|
||||
f"Tensor.__contains__ only supports Tensor or scalar, but you passed in a {type([1, 2])}.",
|
||||
lambda: [1, 2] in x)
|
||||
|
||||
@skipIfTorchDynamo("TorchDynamo fails with unknown reason")
|
||||
|
|
@ -6705,9 +6704,9 @@ class TestTorch(TestCase):
|
|||
|
||||
# fail parse with > 1 element variables
|
||||
self.assertRaises(TypeError, lambda: torch.ones(torch.tensor(3, 3)))
|
||||
self.assertRaises(TypeError, lambda: torch.ones((torch.tensor(3, 3))))
|
||||
self.assertRaises(TypeError, lambda: torch.ones(torch.tensor(3, 3)))
|
||||
self.assertRaises(TypeError, lambda: torch.ones(np.array(3, 3)))
|
||||
self.assertRaises(TypeError, lambda: torch.ones(np.array(3, 3)))
|
||||
self.assertRaises(TypeError, lambda: torch.ones((np.array(3, 3))))
|
||||
|
||||
# fail parse with additional positional args after intlist arg
|
||||
self.assertRaisesRegex(TypeError,
|
||||
|
|
@ -8220,7 +8219,7 @@ tensor([[[1.+1.j, 1.+1.j, 1.+1.j, ..., 1.+1.j, 1.+1.j, 1.+1.j],
|
|||
for seed, expected_initial_seed in test_cases:
|
||||
torch.manual_seed(seed)
|
||||
actual_initial_seed = torch.initial_seed()
|
||||
msg = "expected initial_seed() = %x after calling manual_seed(%x), but got %x instead" % (
|
||||
msg = "expected initial_seed() = {:x} after calling manual_seed({:x}), but got {:x} instead".format(
|
||||
expected_initial_seed, seed, actual_initial_seed)
|
||||
self.assertEqual(expected_initial_seed, actual_initial_seed, msg=msg)
|
||||
for invalid_seed in [min_int64 - 1, max_uint64 + 1]:
|
||||
|
|
@ -8328,7 +8327,7 @@ tensor([[[1.+1.j, 1.+1.j, 1.+1.j, ..., 1.+1.j, 1.+1.j, 1.+1.j],
|
|||
devices = [t.device]
|
||||
if t.device.type == 'cuda':
|
||||
if t.device.index == -1:
|
||||
devices.append('cuda:{}'.format(torch.cuda.current_device()))
|
||||
devices.append(f'cuda:{torch.cuda.current_device()}')
|
||||
elif t.device.index == torch.cuda.current_device():
|
||||
devices.append('cuda')
|
||||
for device in devices:
|
||||
|
|
@ -8445,7 +8444,7 @@ tensor([[[1.+1.j, 1.+1.j, 1.+1.j, ..., 1.+1.j, 1.+1.j, 1.+1.j],
|
|||
self.assertTrue(t.grad is not None)
|
||||
|
||||
# Make sure invalid subclasses raise nice errors
|
||||
class BadSubTensor():
|
||||
class BadSubTensor:
|
||||
member_var = object()
|
||||
|
||||
err_msg = "Creating a Tensor subclass from a class that does not inherit from Tensor"
|
||||
|
|
@ -8593,7 +8592,7 @@ tensor([[[1.+1.j, 1.+1.j, 1.+1.j, ..., 1.+1.j, 1.+1.j, 1.+1.j],
|
|||
return "device(type='{type}', index={index})".format(
|
||||
type=device.type, index=device.index)
|
||||
|
||||
return "device(type='{type}')".format(type=device.type)
|
||||
return f"device(type='{device.type}')"
|
||||
|
||||
for device in device_set:
|
||||
dev = torch.device(device)
|
||||
|
|
@ -8683,7 +8682,7 @@ tensor([[[1.+1.j, 1.+1.j, 1.+1.j, ..., 1.+1.j, 1.+1.j, 1.+1.j],
|
|||
from torch._torch_docs import __file__ as doc_file
|
||||
from torch._torch_docs import multi_dim_common, single_dim_common, factory_common_args, factory_like_common_args
|
||||
|
||||
with open(doc_file, "r", encoding="utf-8") as f:
|
||||
with open(doc_file, encoding="utf-8") as f:
|
||||
doc_strs = f.read()
|
||||
|
||||
matches = re.findall(
|
||||
|
|
@ -8714,7 +8713,7 @@ tensor([[[1.+1.j, 1.+1.j, 1.+1.j, ..., 1.+1.j, 1.+1.j, 1.+1.j],
|
|||
skip_regexes = []
|
||||
for r in skips:
|
||||
if isinstance(r, str):
|
||||
skip_regexes.append(re.compile('^{}$'.format(re.escape(r))))
|
||||
skip_regexes.append(re.compile(f'^{re.escape(r)}$'))
|
||||
else:
|
||||
skip_regexes.append(r)
|
||||
|
||||
|
|
@ -8739,7 +8738,7 @@ tensor([[[1.+1.j, 1.+1.j, 1.+1.j, ..., 1.+1.j, 1.+1.j, 1.+1.j],
|
|||
'New docs have been added for {}, please remove '
|
||||
'it from the skipped list in TestTorch.test_doc'.format(full_name))
|
||||
else:
|
||||
self.assertTrue(has_doc, '{} is missing documentation'.format(full_name))
|
||||
self.assertTrue(has_doc, f'{full_name} is missing documentation')
|
||||
|
||||
# FIXME: All of the following should be marked as expected failures
|
||||
# so that it is easier to tell when missing has been added.
|
||||
|
|
|
|||
|
|
@ -397,8 +397,8 @@ class TestTypePromotion(TestCase):
|
|||
self.assertEqual(not second.is_contiguous(), non_contiguous)
|
||||
result = op(first, second)
|
||||
expected = op(first.to(common_dtype), second.to(common_dtype))
|
||||
self.assertEqual(result.dtype, expected.dtype, msg='{} with {}, {}'.format(op.__name__, dt1, dt2))
|
||||
self.assertEqual(result, expected, msg='{} with {}, {}'.format(op.__name__, dt1, dt2))
|
||||
self.assertEqual(result.dtype, expected.dtype, msg=f'{op.__name__} with {dt1}, {dt2}')
|
||||
self.assertEqual(result, expected, msg=f'{op.__name__} with {dt1}, {dt2}')
|
||||
|
||||
@float_double_default_dtype
|
||||
def test_non_promoting_ops(self, device):
|
||||
|
|
@ -809,7 +809,7 @@ class TestTypePromotion(TestCase):
|
|||
return
|
||||
|
||||
suffix = '_' if inplace else ''
|
||||
err = "{} {}({}, {})".format(" coalesced" if coalesced else "uncoalesced", op_name + suffix, dtype1, dtype2)
|
||||
err = f"{' coalesced' if coalesced else 'uncoalesced'} {op_name + suffix}({dtype1}, {dtype2})"
|
||||
|
||||
def op(t1, t2, suf=None):
|
||||
suf = suffix if suf is None else suf
|
||||
|
|
@ -850,7 +850,7 @@ class TestTypePromotion(TestCase):
|
|||
# Test op(dense, sparse)
|
||||
if add_sub or op_name == 'mul':
|
||||
if inplace:
|
||||
e, d1, s1, d2, s2 = [x.clone() for x in test_tensors]
|
||||
e, d1, s1, d2, s2 = (x.clone() for x in test_tensors)
|
||||
dense_sparse = op(d1, s2)
|
||||
dense_sparse = dense_sparse.to_dense() if dense_sparse.is_sparse else dense_sparse
|
||||
self.assertEqual(e, dense_sparse, atol=precision, rtol=rtol, msg=err)
|
||||
|
|
@ -871,7 +871,7 @@ class TestTypePromotion(TestCase):
|
|||
# Test op(sparse, scalar)
|
||||
if not add_sub and not (self.device_type == 'cpu' and dtype1 == torch.half):
|
||||
if inplace:
|
||||
e, d1, s1, d2, s2 = [x.clone() for x in test_tensors]
|
||||
e, d1, s1, d2, s2 = (x.clone() for x in test_tensors)
|
||||
scalar = d2.view(d2.numel())[0].item()
|
||||
|
||||
sparse = op(s1, scalar)
|
||||
|
|
@ -984,21 +984,19 @@ class TestTypePromotion(TestCase):
|
|||
# Note: These cases prettyprint the failing inputs to make
|
||||
# debugging test failures easier.
|
||||
if undesired_failure and same_result:
|
||||
msg = ("Failure: {0} == {1}. "
|
||||
"torch type was {2}. NumPy type was {3}. np_first is {4} "
|
||||
"default type is {5}.").format(actual, expected,
|
||||
torch_type, np_type,
|
||||
np_first,
|
||||
torch.get_default_dtype())
|
||||
msg = (
|
||||
f"Failure: {actual} == {expected}. torch type was {torch_type}. "
|
||||
f"NumPy type was {np_type}. np_first is {np_first} default type is "
|
||||
f"{torch.get_default_dtype()}."
|
||||
)
|
||||
self.fail(msg)
|
||||
|
||||
if not undesired_failure and not same_result:
|
||||
msg = ("Failure: {0} != {1}. "
|
||||
"torch type was {2}. NumPy type was {3}. np_first is {4} "
|
||||
"default type is {5}.").format(actual, expected,
|
||||
torch_type, np_type,
|
||||
np_first,
|
||||
torch.get_default_dtype())
|
||||
msg = (
|
||||
f"Failure: {actual} != {expected}. torch type was {torch_type}. "
|
||||
f"NumPy type was {np_type}. np_first is {np_first} default type is "
|
||||
f"{torch.get_default_dtype()}."
|
||||
)
|
||||
self.fail(msg)
|
||||
|
||||
|
||||
|
|
|
|||
|
|
@ -102,8 +102,8 @@ class TestUnaryUfuncs(TestCase):
|
|||
result.item(),
|
||||
float("nan"),
|
||||
msg=(
|
||||
"input of {0} outside lower domain boundary"
|
||||
" {1} produced {2}, not nan!"
|
||||
"input of {} outside lower domain boundary"
|
||||
" {} produced {}, not nan!"
|
||||
).format(lower_tensor.item(), low, result.item()),
|
||||
)
|
||||
|
||||
|
|
@ -121,8 +121,8 @@ class TestUnaryUfuncs(TestCase):
|
|||
result.item(),
|
||||
float("nan"),
|
||||
msg=(
|
||||
"input of {0} outside upper domain boundary"
|
||||
" {1} produced {2}, not nan!"
|
||||
"input of {} outside upper domain boundary"
|
||||
" {} produced {}, not nan!"
|
||||
).format(higher_tensor.item(), high, result.item()),
|
||||
)
|
||||
|
||||
|
|
@ -162,7 +162,7 @@ class TestUnaryUfuncs(TestCase):
|
|||
)
|
||||
else:
|
||||
self.fail(
|
||||
"Expected dtype {0} but got {1}!".format(
|
||||
"Expected dtype {} but got {}!".format(
|
||||
expected.dtype, actual.dtype
|
||||
)
|
||||
)
|
||||
|
|
@ -248,8 +248,8 @@ class TestUnaryUfuncs(TestCase):
|
|||
if t.numel() < 10:
|
||||
msg = (
|
||||
"Failed to produce expected results! Input tensor was"
|
||||
" {0}, torch result is {1}, and reference result is"
|
||||
" {2}."
|
||||
" {}, torch result is {}, and reference result is"
|
||||
" {}."
|
||||
).format(t, actual, expected)
|
||||
else:
|
||||
msg = None
|
||||
|
|
@ -1146,7 +1146,7 @@ class TestUnaryUfuncs(TestCase):
|
|||
self.assertEqual(res.imag, out.imag, atol=0.0, rtol=1e-6)
|
||||
|
||||
# test the log1p in tensor
|
||||
inp_lst, out_lst = [list(elmt) for elmt in zip(*inouts)]
|
||||
inp_lst, out_lst = (list(elmt) for elmt in zip(*inouts))
|
||||
inp_tens = torch.tensor(inp_lst, dtype=dtype, device=device)
|
||||
out_tens = torch.tensor(out_lst, dtype=dtype, device=device)
|
||||
res_tens = torch.log1p(inp_tens)
|
||||
|
|
|
|||
|
|
@ -545,11 +545,11 @@ class TestBottleneck(TestCase):
|
|||
|
||||
def _run_bottleneck(self, test_file, scriptargs=''):
|
||||
curdir = os.path.dirname(os.path.abspath(__file__))
|
||||
filepath = '{}/{}'.format(curdir, test_file)
|
||||
filepath = f'{curdir}/{test_file}'
|
||||
if scriptargs != '':
|
||||
scriptargs = ' {}'.format(scriptargs)
|
||||
scriptargs = f' {scriptargs}'
|
||||
rc, out, err = self._run(
|
||||
'{} -m torch.utils.bottleneck {}{}'.format(sys.executable, filepath, scriptargs))
|
||||
f'{sys.executable} -m torch.utils.bottleneck {filepath}{scriptargs}')
|
||||
return rc, out, err
|
||||
|
||||
def _check_run_args(self):
|
||||
|
|
@ -562,7 +562,7 @@ class TestBottleneck(TestCase):
|
|||
self.assertEqual(rc, 0, atol=0, rtol=0, msg=self._fail_msg('Should pass args to script', out + err))
|
||||
|
||||
def _fail_msg(self, msg, output):
|
||||
return '{}, output was:\n{}'.format(msg, output)
|
||||
return f'{msg}, output was:\n{output}'
|
||||
|
||||
def _check_environment_summary(self, output):
|
||||
results = re.search('Environment Summary', output)
|
||||
|
|
@ -603,7 +603,7 @@ class TestBottleneck(TestCase):
|
|||
@unittest.skipIf(HAS_CUDA, 'CPU-only test')
|
||||
def test_bottleneck_cpu_only(self):
|
||||
rc, out, err = self._run_bottleneck('bottleneck_test/test.py')
|
||||
self.assertEqual(rc, 0, msg='Run failed with\n{}'.format(err))
|
||||
self.assertEqual(rc, 0, msg=f'Run failed with\n{err}')
|
||||
|
||||
self._check_run_args()
|
||||
self._check_environment_summary(out)
|
||||
|
|
@ -614,7 +614,7 @@ class TestBottleneck(TestCase):
|
|||
@unittest.skipIf(not HAS_CUDA, 'No CUDA')
|
||||
def test_bottleneck_cuda(self):
|
||||
rc, out, err = self._run_bottleneck('bottleneck_test/test_cuda.py')
|
||||
self.assertEqual(rc, 0, msg='Run failed with\n{}'.format(err))
|
||||
self.assertEqual(rc, 0, msg=f'Run failed with\n{err}')
|
||||
|
||||
self._check_run_args()
|
||||
self._check_environment_summary(out)
|
||||
|
|
@ -740,7 +740,7 @@ class TestStandaloneCPPJIT(TestCase):
|
|||
std::cout << x << std::endl;
|
||||
}
|
||||
""")
|
||||
with open(src_path, "wt") as f:
|
||||
with open(src_path, "w") as f:
|
||||
f.write(src)
|
||||
|
||||
exec_path = torch.utils.cpp_extension.load(
|
||||
|
|
|
|||
|
|
@ -31,7 +31,7 @@ class TestXNNPACKOps(TestCase):
|
|||
input_data = torch.rand(data_shape)
|
||||
weight = torch.rand((weight_output_dim, data_shape[-1]))
|
||||
if use_bias:
|
||||
bias = torch.rand((weight_output_dim))
|
||||
bias = torch.rand(weight_output_dim)
|
||||
else:
|
||||
bias = None
|
||||
ref_result = F.linear(input_data, weight, bias)
|
||||
|
|
@ -46,7 +46,7 @@ class TestXNNPACKOps(TestCase):
|
|||
input_data = torch.rand(input_size)
|
||||
weight = torch.rand((weight_output_dim, input_data.shape[-1]))
|
||||
if use_bias:
|
||||
bias = torch.rand((weight_output_dim))
|
||||
bias = torch.rand(weight_output_dim)
|
||||
else:
|
||||
bias = None
|
||||
ref_result = F.linear(input_data, weight, bias)
|
||||
|
|
@ -102,7 +102,7 @@ class TestXNNPACKOps(TestCase):
|
|||
weight = torch.rand((output_channels, input_channels_per_group, kernel_h, kernel_w))
|
||||
bias = None
|
||||
if use_bias:
|
||||
bias = torch.rand((output_channels))
|
||||
bias = torch.rand(output_channels)
|
||||
|
||||
ref_result = F.conv2d(input_data, weight, bias,
|
||||
strides, paddings, dilations, groups)
|
||||
|
|
@ -166,7 +166,7 @@ class TestXNNPACKOps(TestCase):
|
|||
weight = torch.rand((input_channels, output_channels_per_group, kernel_h, kernel_w))
|
||||
bias = None
|
||||
if use_bias:
|
||||
bias = torch.rand((output_channels))
|
||||
bias = torch.rand(output_channels)
|
||||
|
||||
# Note that groups/dilation is in reverse order from conv2d
|
||||
ref_result = F.conv_transpose2d(input_data, weight, bias,
|
||||
|
|
@ -209,7 +209,7 @@ class TestXNNPACKSerDes(TestCase):
|
|||
data_shape = [batch_size] + list(data_shape)
|
||||
weight = torch.rand((weight_output_dim, data_shape[-1]))
|
||||
if use_bias:
|
||||
bias = torch.rand((weight_output_dim))
|
||||
bias = torch.rand(weight_output_dim)
|
||||
else:
|
||||
bias = None
|
||||
scripted_linear = torch.jit.script(Linear(weight, bias))
|
||||
|
|
@ -304,7 +304,7 @@ class TestXNNPACKSerDes(TestCase):
|
|||
weight = torch.rand((output_channels, input_channels_per_group, kernel_h, kernel_w))
|
||||
bias = None
|
||||
if use_bias:
|
||||
bias = torch.rand((output_channels))
|
||||
bias = torch.rand(output_channels)
|
||||
|
||||
scripted_conv2d = torch.jit.script(Conv2D(weight, bias,
|
||||
strides, paddings, dilations, groups))
|
||||
|
|
@ -411,7 +411,7 @@ class TestXNNPACKSerDes(TestCase):
|
|||
weight = torch.rand((input_channels, output_channels_per_group, kernel_h, kernel_w))
|
||||
bias = None
|
||||
if use_bias:
|
||||
bias = torch.rand((output_channels))
|
||||
bias = torch.rand(output_channels)
|
||||
|
||||
scripted_conv2d = torch.jit.script(Conv2DT(weight, bias,
|
||||
strides, paddings,
|
||||
|
|
@ -525,7 +525,7 @@ class TestXNNPACKSerDes(TestCase):
|
|||
conv_weight = torch.rand((output_channels, input_channels_per_group, kernel_h, kernel_w))
|
||||
conv_bias = None
|
||||
if use_bias:
|
||||
conv_bias = torch.rand((output_channels))
|
||||
conv_bias = torch.rand(output_channels)
|
||||
|
||||
# This is done just to find the output shape of the result
|
||||
# so that the shape of weight for the following linear layer
|
||||
|
|
@ -537,7 +537,7 @@ class TestXNNPACKSerDes(TestCase):
|
|||
linear_weight = torch.rand((linear_weight_output_dim, linear_input_shape))
|
||||
linear_bias = None
|
||||
if use_bias:
|
||||
linear_bias = torch.rand((linear_weight_output_dim))
|
||||
linear_bias = torch.rand(linear_weight_output_dim)
|
||||
|
||||
scripted_m = torch.jit.script(M(conv_weight, conv_bias, linear_weight,
|
||||
linear_bias, strides, paddings, dilations, groups))
|
||||
|
|
@ -625,7 +625,7 @@ class TestXNNPACKRewritePass(TestCase):
|
|||
def __init__(self):
|
||||
super().__init__()
|
||||
self.weight = torch.nn.Parameter(torch.rand(weight_shape), requires_grad=False)
|
||||
self.bias = torch.nn.Parameter(torch.rand((weight_output_dim)), requires_grad=False)
|
||||
self.bias = torch.nn.Parameter(torch.rand(weight_output_dim), requires_grad=False)
|
||||
|
||||
def forward(self, x):
|
||||
return F.linear(x, self.weight, self.bias)
|
||||
|
|
@ -712,7 +712,7 @@ class TestXNNPACKRewritePass(TestCase):
|
|||
|
||||
input_data = torch.rand((batch_size, input_channels, height, width))
|
||||
conv_weight = torch.rand((output_channels, input_channels_per_group, kernel_h, kernel_w))
|
||||
conv_bias = torch.rand((output_channels))
|
||||
conv_bias = torch.rand(output_channels)
|
||||
result = F.conv2d(input_data, conv_weight, conv_bias,
|
||||
strides, paddings, dilations, groups)
|
||||
linear_input_shape = result.shape[1]
|
||||
|
|
@ -722,9 +722,9 @@ class TestXNNPACKRewritePass(TestCase):
|
|||
def __init__(self, activation_fn=F.relu):
|
||||
super().__init__()
|
||||
self.conv_weight = torch.nn.Parameter(torch.rand(conv_weight_shape), requires_grad=False)
|
||||
self.conv_bias = torch.nn.Parameter(torch.rand((conv_bias_shape)), requires_grad=False)
|
||||
self.conv_bias = torch.nn.Parameter(torch.rand(conv_bias_shape), requires_grad=False)
|
||||
self.linear_weight = torch.nn.Parameter(torch.rand(linear_weight_shape), requires_grad=False)
|
||||
self.linear_bias = torch.nn.Parameter(torch.rand((weight_output_dim)), requires_grad=False)
|
||||
self.linear_bias = torch.nn.Parameter(torch.rand(weight_output_dim), requires_grad=False)
|
||||
self.strides = strides
|
||||
self.paddings = paddings
|
||||
self.dilations = dilations
|
||||
|
|
@ -834,7 +834,7 @@ class TestXNNPACKRewritePass(TestCase):
|
|||
def __init__(self):
|
||||
super().__init__()
|
||||
self.linear_weight = torch.nn.Parameter(torch.rand(linear_weight_shape), requires_grad=False)
|
||||
self.linear_bias = torch.nn.Parameter(torch.rand((weight_output_dim)), requires_grad=False)
|
||||
self.linear_bias = torch.nn.Parameter(torch.rand(weight_output_dim), requires_grad=False)
|
||||
self.strides = strides
|
||||
self.paddings = paddings
|
||||
self.dilations = dilations
|
||||
|
|
@ -862,7 +862,7 @@ class TestXNNPACKRewritePass(TestCase):
|
|||
def __init__(self):
|
||||
super().__init__()
|
||||
self.linear_weight = torch.nn.Parameter(torch.rand(linear_weight_shape), requires_grad=False)
|
||||
self.linear_bias = torch.nn.Parameter(torch.rand((weight_output_dim)), requires_grad=False)
|
||||
self.linear_bias = torch.nn.Parameter(torch.rand(weight_output_dim), requires_grad=False)
|
||||
self.strides = strides
|
||||
self.paddings = paddings
|
||||
self.dilations = dilations
|
||||
|
|
@ -895,7 +895,7 @@ class TestXNNPACKRewritePass(TestCase):
|
|||
def __init__(self):
|
||||
super().__init__()
|
||||
self.weight = torch.nn.Parameter(torch.rand(weight_shape), requires_grad=False)
|
||||
self.bias = torch.nn.Parameter(torch.rand((weight_output_dim)), requires_grad=False)
|
||||
self.bias = torch.nn.Parameter(torch.rand(weight_output_dim), requires_grad=False)
|
||||
|
||||
def forward(self, x):
|
||||
weight_t = self.weight.t()
|
||||
|
|
@ -905,7 +905,7 @@ class TestXNNPACKRewritePass(TestCase):
|
|||
def __init__(self):
|
||||
super().__init__()
|
||||
self.weight = torch.nn.Parameter(torch.rand(weight_shape), requires_grad=False)
|
||||
self.bias = torch.nn.Parameter(torch.rand((weight_output_dim)), requires_grad=False)
|
||||
self.bias = torch.nn.Parameter(torch.rand(weight_output_dim), requires_grad=False)
|
||||
|
||||
def forward(self, x):
|
||||
weight_t = self.weight.t()
|
||||
|
|
@ -917,7 +917,7 @@ class TestXNNPACKRewritePass(TestCase):
|
|||
def __init__(self):
|
||||
super().__init__()
|
||||
self.weight = torch.nn.Parameter(torch.rand(weight_shape), requires_grad=False)
|
||||
self.bias = torch.nn.Parameter(torch.rand((weight_output_dim)), requires_grad=False)
|
||||
self.bias = torch.nn.Parameter(torch.rand(weight_output_dim), requires_grad=False)
|
||||
|
||||
def forward(self, x):
|
||||
weight_t = self.weight.t()
|
||||
|
|
|
|||
Loading…
Reference in New Issue
Block a user