[2/N] Apply py39 ruff fixes (#141938)

Fixes #ISSUE_NUMBER

Pull Request resolved: https://github.com/pytorch/pytorch/pull/141938
Approved by: https://github.com/ezyang
This commit is contained in:
cyy 2024-12-05 06:26:03 +00:00 committed by PyTorch MergeBot
parent 653efe14e4
commit aa95618268
16 changed files with 53 additions and 62 deletions

View File

@ -1063,13 +1063,6 @@ def gen_pyi(
# NB: Keep this in sync with enum in aten/src/ATen/core/Reduction.h # NB: Keep this in sync with enum in aten/src/ATen/core/Reduction.h
hint = hint.replace("at::Reduction::Mean", "1") hint = hint.replace("at::Reduction::Mean", "1")
hint = hint.replace(": Tensor = None", ": Optional[Tensor] = None") hint = hint.replace(": Tensor = None", ": Optional[Tensor] = None")
# Match both:
# ": Union[Tensor, Tuple[Tensor, ...], List[Tensor]] = None"
# ": Union[Tuple[Tensor, ...], List[Tensor]] = None"
hint = hint.replace(
"Tuple[Tensor, ...], List[Tensor]] = None",
"Tuple[Tensor, ...], List[Tensor], None] = None",
)
return hint return hint
docstrs = gather_docstrs() docstrs = gather_docstrs()

View File

@ -1,4 +1,4 @@
from typing import Any, Dict, List, Optional, Tuple from typing import Any, Optional
import numpy as np import numpy as np
from sklearn.tree import _tree # type: ignore[import-untyped] from sklearn.tree import _tree # type: ignore[import-untyped]
@ -34,10 +34,10 @@ class DecisionTree:
does not seem to be easy with sklearn. does not seem to be easy with sklearn.
""" """
def __init__(self, sklearn_tree: Any, feature_names: List[str]) -> None: def __init__(self, sklearn_tree: Any, feature_names: list[str]) -> None:
self.feature_names = feature_names self.feature_names = feature_names
self.root = self._convert_sklearn_tree(sklearn_tree.tree_) self.root = self._convert_sklearn_tree(sklearn_tree.tree_)
self.classes_: List[str] = sklearn_tree.classes_ self.classes_: list[str] = sklearn_tree.classes_
def _convert_sklearn_tree( def _convert_sklearn_tree(
self, sklearn_tree: Any, node_id: int = 0 self, sklearn_tree: Any, node_id: int = 0
@ -193,9 +193,9 @@ class DecisionTree:
def codegen( def codegen(
self, self,
dummy_col_2_col_val: Dict[str, Tuple[str, Any]], dummy_col_2_col_val: dict[str, tuple[str, Any]],
lines: List[str], lines: list[str],
unsafe_leaves: List[int], unsafe_leaves: list[int],
) -> None: ) -> None:
# generates python code for the decision tree # generates python code for the decision tree
def codegen_node(node: DecisionTreeNode, depth: int) -> None: def codegen_node(node: DecisionTreeNode, depth: int) -> None:
@ -223,7 +223,7 @@ class DecisionTree:
codegen_node(node.right, depth + 1) codegen_node(node.right, depth + 1)
def handle_leaf( def handle_leaf(
node: DecisionTreeNode, indent: str, unsafe_leaves: List[int] node: DecisionTreeNode, indent: str, unsafe_leaves: list[int]
) -> str: ) -> str:
""" """
This generates the code for a leaf node in the decision tree. If the leaf is unsafe, the learned heuristic This generates the code for a leaf node in the decision tree. If the leaf is unsafe, the learned heuristic

View File

@ -2,7 +2,7 @@ import argparse
import random import random
import time import time
from abc import abstractmethod from abc import abstractmethod
from typing import Any, Tuple from typing import Any
from tqdm import tqdm # type: ignore[import-untyped] from tqdm import tqdm # type: ignore[import-untyped]
@ -71,7 +71,7 @@ class BenchmarkRunner:
def run_benchmark(self, *args: Any) -> None: ... def run_benchmark(self, *args: Any) -> None: ...
@abstractmethod @abstractmethod
def create_input(self) -> Tuple[Any, ...]: ... def create_input(self) -> tuple[Any, ...]: ...
def main(self, num_samples: int, num_reps: int) -> None: def main(self, num_samples: int, num_reps: int) -> None:
for _ in tqdm(range(num_samples)): for _ in tqdm(range(num_samples)):

View File

@ -1,10 +1,10 @@
import random import random
from typing import Any, Tuple from typing import Any
import torch import torch
def transpose_tensors(p_transpose_both: float = 0.05) -> Tuple[bool, bool]: def transpose_tensors(p_transpose_both: float = 0.05) -> tuple[bool, bool]:
transpose_both = random.choices( transpose_both = random.choices(
[True, False], [p_transpose_both, 1 - p_transpose_both] [True, False], [p_transpose_both, 1 - p_transpose_both]
)[0] )[0]
@ -31,7 +31,7 @@ def get_mm_tensors(
transpose_right: bool, transpose_right: bool,
dtype_left: Any, dtype_left: Any,
dtype_right: Any, dtype_right: Any,
) -> Tuple[Any, Any]: ) -> tuple[Any, Any]:
if transpose_left: if transpose_left:
a = torch.randn(k, m, dtype=dtype_left).t() a = torch.randn(k, m, dtype=dtype_left).t()
else: else:

View File

@ -1,14 +1,13 @@
import sys import sys
from typing import List
def merge_txt_files(file_list: List[str], output_file: str) -> None: def merge_txt_files(file_list: list[str], output_file: str) -> None:
if not file_list: if not file_list:
print("No input files provided.") print("No input files provided.")
return return
metadata: List[str] = [] metadata: list[str] = []
content: List[str] = [] content: list[str] = []
# Read metadata and content from all files # Read metadata and content from all files
for file_path in file_list: for file_path in file_list:

View File

@ -6,7 +6,7 @@ import sys
sys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(__file__)))) sys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
from typing import Any, Tuple from typing import Any
from benchmark_runner import BenchmarkRunner # type: ignore[import-not-found] from benchmark_runner import BenchmarkRunner # type: ignore[import-not-found]
from benchmark_utils import ( # type: ignore[import-not-found] from benchmark_utils import ( # type: ignore[import-not-found]
@ -33,7 +33,7 @@ class BenchmarkRunnerMixedMM(BenchmarkRunner): # type: ignore[misc, no-any-unim
def __init__(self) -> None: def __init__(self) -> None:
super().__init__("mixed_mm") super().__init__("mixed_mm")
def create_input(self) -> Tuple[Any, ...]: def create_input(self) -> tuple[Any, ...]:
dtype1, dtype2 = self.get_dtypes() dtype1, dtype2 = self.get_dtypes()
m, k, n = self.get_m_k_n(dtype1) m, k, n = self.get_m_k_n(dtype1)
transpose_left, transpose_right = False, True transpose_left, transpose_right = False, True
@ -109,7 +109,7 @@ class BenchmarkRunnerMixedMM(BenchmarkRunner): # type: ignore[misc, no-any-unim
else: else:
return get_random_between_pow2(1, 7) return get_random_between_pow2(1, 7)
def get_m_k_n(self, dtype: Any) -> Tuple[int, int, int]: def get_m_k_n(self, dtype: Any) -> tuple[int, int, int]:
numel_max = 2**31 numel_max = 2**31
# repeat until tensors fit in memory # repeat until tensors fit in memory

View File

@ -24,7 +24,7 @@ class TestMixedMM(TestCase):
# fmt: off # fmt: off
# This file was generated by AutoHeuristic. Do not modify it manually! # This file was generated by AutoHeuristic. Do not modify it manually!
# To regenerate this file, take a look at the steps in the README.md file inside torchgen/_autoheuristic/mixed_mm/ # To regenerate this file, take a look at the steps in the README.md file inside torchgen/_autoheuristic/mixed_mm/
from typing import List, Optional, Tuple from typing import Optional
from torch._inductor.autoheuristic.autoheuristic_utils import ( from torch._inductor.autoheuristic.autoheuristic_utils import (
AHContext, AHContext,
@ -39,7 +39,7 @@ from torch._inductor.autoheuristic.learnedheuristic_interface import (
class MixedMMA100(LearnedHeuristicDecision): class MixedMMA100(LearnedHeuristicDecision):
def __init__(self) -> None: def __init__(self) -> None:
self.choices: List[Choice] = [] self.choices: list[Choice] = []
self.fill_choices() self.fill_choices()
def check_precondition(self, metadata: AHMetadata, context: AHContext,) -> bool: def check_precondition(self, metadata: AHMetadata, context: AHContext,) -> bool:
@ -84,7 +84,7 @@ class MixedMMA100(LearnedHeuristicDecision):
def get_name(self) -> str: def get_name(self) -> str:
return 'mixed_mm' return 'mixed_mm'
def get_best_choices(self, context: AHContext) -> Optional[List[Tuple[float, int]]]: def get_best_choices(self, context: AHContext) -> Optional[list[tuple[float, int]]]:
if str(context.get_value('1LEQmLEQ16')) != 'True': if str(context.get_value('1LEQmLEQ16')) != 'True':
if context.get_value('m') <= 32.5: if context.get_value('m') <= 32.5:
if context.get_value('n') <= 6976.0: if context.get_value('n') <= 6976.0:
@ -186,7 +186,7 @@ class MixedMMA100(LearnedHeuristicDecision):
# fmt: off # fmt: off
# This file was generated by AutoHeuristic. Do not modify it manually! # This file was generated by AutoHeuristic. Do not modify it manually!
# To regenerate this file, take a look at the steps in the README.md file inside torchgen/_autoheuristic/mixed_mm/ # To regenerate this file, take a look at the steps in the README.md file inside torchgen/_autoheuristic/mixed_mm/
from typing import List, Optional, Tuple from typing import Optional
from torch._inductor.autoheuristic.autoheuristic_utils import ( from torch._inductor.autoheuristic.autoheuristic_utils import (
AHContext, AHContext,
@ -201,7 +201,7 @@ from torch._inductor.autoheuristic.learnedheuristic_interface import (
class MixedMMH100(LearnedHeuristicDecision): class MixedMMH100(LearnedHeuristicDecision):
def __init__(self) -> None: def __init__(self) -> None:
self.choices: List[Choice] = [] self.choices: list[Choice] = []
self.fill_choices() self.fill_choices()
def check_precondition(self, metadata: AHMetadata, context: AHContext,) -> bool: def check_precondition(self, metadata: AHMetadata, context: AHContext,) -> bool:
@ -245,7 +245,7 @@ class MixedMMH100(LearnedHeuristicDecision):
def get_name(self) -> str: def get_name(self) -> str:
return 'mixed_mm' return 'mixed_mm'
def get_best_choices(self, context: AHContext) -> Optional[List[Tuple[float, int]]]: def get_best_choices(self, context: AHContext) -> Optional[list[tuple[float, int]]]:
if context.get_value('arith_intensity') <= 15.988086223602295: if context.get_value('arith_intensity') <= 15.988086223602295:
if context.get_value('n') <= 25280.0: if context.get_value('n') <= 25280.0:
if context.get_value('n') <= 1344.0: if context.get_value('n') <= 1344.0:

View File

@ -6,7 +6,7 @@ import sys
sys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(__file__)))) sys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
from typing import Any, Tuple from typing import Any
from benchmark_runner import BenchmarkRunner # type: ignore[import-not-found] from benchmark_runner import BenchmarkRunner # type: ignore[import-not-found]
from benchmark_utils import ( # type: ignore[import-not-found] from benchmark_utils import ( # type: ignore[import-not-found]
@ -28,7 +28,7 @@ class BenchmarkRunnerMM(BenchmarkRunner): # type: ignore[misc, no-any-unimporte
def __init__(self) -> None: def __init__(self) -> None:
super().__init__("mm") super().__init__("mm")
def create_input(self) -> Tuple[Any, ...]: def create_input(self) -> tuple[Any, ...]:
dtype = random.choices([torch.float32, torch.float16, torch.bfloat16])[0] dtype = random.choices([torch.float32, torch.float16, torch.bfloat16])[0]
set_precision(dtype) set_precision(dtype)
m, k, n = self.get_m_k_n(dtype) m, k, n = self.get_m_k_n(dtype)
@ -100,7 +100,7 @@ class BenchmarkRunnerMM(BenchmarkRunner): # type: ignore[misc, no-any-unimporte
print(f"random_type {distr_type} not supported") print(f"random_type {distr_type} not supported")
sys.exit(1) sys.exit(1)
def get_m_k_n(self, dtype: Any) -> Tuple[int, int, int]: def get_m_k_n(self, dtype: Any) -> tuple[int, int, int]:
numel_max = 2**31 numel_max = 2**31
# repeat until tensors fit in memory # repeat until tensors fit in memory

View File

@ -5,7 +5,7 @@ import sys
sys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(__file__)))) sys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
from typing import Any, Tuple from typing import Any
from benchmark_runner import BenchmarkRunner # type: ignore[import-not-found] from benchmark_runner import BenchmarkRunner # type: ignore[import-not-found]
from benchmark_utils import ( # type: ignore[import-not-found] from benchmark_utils import ( # type: ignore[import-not-found]
@ -30,7 +30,7 @@ class BenchmarkRunnerPadMM(BenchmarkRunner): # type: ignore[misc, no-any-unimpo
def __init__(self) -> None: def __init__(self) -> None:
super().__init__("pad_mm") super().__init__("pad_mm")
def create_input(self) -> Tuple[Any, ...]: def create_input(self) -> tuple[Any, ...]:
dtype = self.get_dtype() dtype = self.get_dtype()
set_precision(dtype) set_precision(dtype)
m, k, n = self.get_m_k_n(dtype) m, k, n = self.get_m_k_n(dtype)
@ -113,7 +113,7 @@ class BenchmarkRunnerPadMM(BenchmarkRunner): # type: ignore[misc, no-any-unimpo
def is_aligned(self, dim: int, align_size: int) -> bool: def is_aligned(self, dim: int, align_size: int) -> bool:
return dim % align_size == 0 return dim % align_size == 0
def get_m_k_n(self, dtype: Any) -> Tuple[int, int, int]: def get_m_k_n(self, dtype: Any) -> tuple[int, int, int]:
uniform = random.choices([True, False])[0] uniform = random.choices([True, False])[0]
align_size = get_alignment_size_dtype(dtype) align_size = get_alignment_size_dtype(dtype)

View File

@ -531,7 +531,7 @@ class AHTrainDecisionTree(AHTrain):
""" """
Generates the definition of the predict function. Generates the definition of the predict function.
""" """
return "def get_best_choices(self, context: AHContext) -> Optional[List[Tuple[float, int]]]:" return "def get_best_choices(self, context: AHContext) -> Optional[list[tuple[float, int]]]:"
def codegen_boilerplate( def codegen_boilerplate(
self, heuristic_name, opt_name, threshold, shared_memory, device_capa, classes self, heuristic_name, opt_name, threshold, shared_memory, device_capa, classes
@ -545,7 +545,7 @@ class AHTrainDecisionTree(AHTrain):
# fmt: off # fmt: off
# This file was generated by AutoHeuristic. Do not modify it manually! # This file was generated by AutoHeuristic. Do not modify it manually!
# To regenerate this file, take a look at the steps in the README.md file inside torchgen/_autoheuristic/{opt_name}/ # To regenerate this file, take a look at the steps in the README.md file inside torchgen/_autoheuristic/{opt_name}/
from typing import List, Optional, Tuple from typing import Optional
from torch._inductor.autoheuristic.autoheuristic_utils import ( from torch._inductor.autoheuristic.autoheuristic_utils import (
AHContext, AHContext,
@ -560,7 +560,7 @@ from torch._inductor.autoheuristic.learnedheuristic_interface import (
class {heuristic_name}(LearnedHeuristicDecision): class {heuristic_name}(LearnedHeuristicDecision):
def __init__(self) -> None: def __init__(self) -> None:
self.choices: List[Choice] = [] self.choices: list[Choice] = []
self.fill_choices() self.fill_choices()
{self.gen_precondition(opt_name, shared_memory, device_capa)} {self.gen_precondition(opt_name, shared_memory, device_capa)}

View File

@ -80,7 +80,7 @@ def process_ir_type(
(3) making cpp-reference types into cpp-value types (e.g. vector instead of IntArrayRef) (3) making cpp-reference types into cpp-value types (e.g. vector instead of IntArrayRef)
(1) converts at::Tensors to lazy::Values (which wrap lazy::Nodes, with which Lazy IR represents tensors.) (1) converts at::Tensors to lazy::Values (which wrap lazy::Nodes, with which Lazy IR represents tensors.)
There is special handling for Optional[Tensor] or List[Tensor], etc- hence 'tensor-like' There is special handling for Optional[Tensor] or list[Tensor], etc- hence 'tensor-like'
This is incomplete- there are assertions in places that it's expected to need to add This is incomplete- there are assertions in places that it's expected to need to add
more types as the codegen is used with more operators. more types as the codegen is used with more operators.

View File

@ -956,14 +956,13 @@ def argument_type_str_pyi(t: Type) -> str:
ret = "Union[_int, _size]" if t.size is not None else "_size" ret = "Union[_int, _size]" if t.size is not None else "_size"
elif t.is_tensor_like(): elif t.is_tensor_like():
# TODO: this doesn't seem right... # TODO: this doesn't seem right...
# Tensor?[] currently translates to Optional[Union[Tuple[Tensor, ...], List[Tensor]]] # Tensor?[] currently translates to Optional[Union[tuple[Tensor, ...], list[Tensor]]]
# It should probably translate to Union[Tuple[Optional[Tensor], ...], List[Optional[Tensor]]] # It should probably translate to Union[tuple[Optional[Tensor], ...], list[Optional[Tensor]]]
if isinstance(t.elem, OptionalType):
add_optional = True add_optional = True
ret = ( ret = (
"Union[Tensor, Tuple[Tensor, ...], List[Tensor]]" "Union[Tensor, tuple[Tensor, ...], list[Tensor]]"
if t.size is not None if t.size is not None
else "Union[Tuple[Tensor, ...], List[Tensor]]" else "Union[tuple[Tensor, ...], list[Tensor]]"
) )
elif str(t.elem) == "float": elif str(t.elem) == "float":
ret = "Sequence[_float]" ret = "Sequence[_float]"
@ -1001,7 +1000,7 @@ def return_type_str_pyi(t: Type) -> str:
if isinstance(t, ListType): if isinstance(t, ListType):
inner = return_type_str_pyi(t.elem) inner = return_type_str_pyi(t.elem)
return f"Tuple[{inner}, ...]" return f"tuple[{inner}, ...]"
return argument_type_str_pyi(t) return argument_type_str_pyi(t)
@ -1014,7 +1013,7 @@ def returns_structseq_pyi(signature: PythonSignature) -> tuple[str, str] | None:
# These types are structseq objects which act like named NamedTuples, but # These types are structseq objects which act like named NamedTuples, but
# the constructor acts like the constructor of tuple. Using typing.NamedTuple # the constructor acts like the constructor of tuple. Using typing.NamedTuple
# does not allow us to override __init__. # does not allow us to override __init__.
seq_type = f"Tuple[{', '.join(python_returns)}]" seq_type = f"tuple[{', '.join(python_returns)}]"
structseq_def_lines = [ structseq_def_lines = [
f"class {structseq_name}({seq_type}):", f"class {structseq_name}({seq_type}):",
] ]
@ -1038,12 +1037,12 @@ def returns_structseq_pyi(signature: PythonSignature) -> tuple[str, str] | None:
structseq_def = "\n".join(structseq_def_lines) structseq_def = "\n".join(structseq_def_lines)
# Example: # Example:
# structseq_def = ( # structseq_def = (
# "class max(Tuple[Tensor, Tensor]):\n" # "class max(tuple[Tensor, Tensor]):\n"
# " @property\n" # " @property\n"
# " def values(self) -> Tensor: ...\n" # " def values(self) -> Tensor: ...\n"
# " @property\n" # " @property\n"
# " def indices(self) -> Tensor: ...\n" # " def indices(self) -> Tensor: ...\n"
# " def __new__(cls, sequence: Tuple[Tensor, Tensor]): ...\n" # " def __new__(cls, sequence: tuple[Tensor, Tensor]): ...\n"
# " n_fields: _int = 2", # " n_fields: _int = 2",
# " n_sequeunce_fields: _int = 2", # " n_sequeunce_fields: _int = 2",
# " n_unnamed_fields: _int = 0", # " n_unnamed_fields: _int = 0",
@ -1060,7 +1059,7 @@ def returns_str_pyi(signature: PythonSignature) -> str:
python_returns = [return_type_str_pyi(r.type) for r in signature.returns.returns] python_returns = [return_type_str_pyi(r.type) for r in signature.returns.returns]
if len(python_returns) > 1: if len(python_returns) > 1:
return "Tuple[" + ", ".join(python_returns) + "]" return "tuple[" + ", ".join(python_returns) + "]"
if len(python_returns) == 1: if len(python_returns) == 1:
return python_returns[0] return python_returns[0]
return "None" return "None"

View File

@ -2,7 +2,7 @@ from __future__ import annotations
import contextlib import contextlib
import functools import functools
from typing import Any, Callable, List, Optional, Tuple, TYPE_CHECKING, TypeVar, Union from typing import Any, Callable, Optional, TYPE_CHECKING, TypeVar, Union
import torchgen.local as local import torchgen.local as local
from torchgen.model import ( from torchgen.model import (
@ -39,7 +39,7 @@ F2 = TypeVar(
str, str,
) )
F3 = TypeVar("F3", Tuple[NativeFunction, Any], List[NativeFunction]) F3 = TypeVar("F3", tuple[NativeFunction, Any], list[NativeFunction])
@contextlib.contextmanager @contextlib.contextmanager

View File

@ -1,5 +1,5 @@
# Represents all kernels used by an Executorch model. # Represents all kernels used by an Executorch model.
# It maintains a Dict[OperatorName, Dict[ETKernelKey, BackendMetadata]] structure. # It maintains a dict[OperatorName, dict[ETKernelKey, BackendMetadata]] structure.
from __future__ import annotations from __future__ import annotations

View File

@ -1,4 +1,4 @@
from typing import Any, Optional, Tuple, Union from typing import Any, Optional, Union
from torchgen.model import ( from torchgen.model import (
Annotation, Annotation,
@ -80,8 +80,8 @@ class FunctionSchemaGen:
@staticmethod @staticmethod
def from_example( def from_example(
op_name: str, op_name: str,
example_inputs: Tuple[Tuple[str, Any], ...], example_inputs: tuple[tuple[str, Any], ...],
example_outputs: Tuple[Any, ...], example_outputs: tuple[Any, ...],
) -> FunctionSchema: ) -> FunctionSchema:
args = [] args = []
for name, inp in example_inputs: for name, inp in example_inputs:

View File

@ -5,7 +5,7 @@ import itertools
import re import re
from dataclasses import dataclass from dataclasses import dataclass
from enum import auto, Enum from enum import auto, Enum
from typing import Callable, List, TYPE_CHECKING from typing import Callable, TYPE_CHECKING
from torchgen.utils import assert_never, NamespaceHelper, OrderedSet from torchgen.utils import assert_never, NamespaceHelper, OrderedSet
@ -249,7 +249,7 @@ class _TorchDispatchModeKey(Enum):
def codegen_per_backend_entries() -> str: def codegen_per_backend_entries() -> str:
r: List[str] = [] r: list[str] = []
for fk in FUNCTIONALITY_KEYS: for fk in FUNCTIONALITY_KEYS:
r.extend(f" {fk}{bc} = auto()" for bc in BACKEND_COMPONENTS) r.extend(f" {fk}{bc} = auto()" for bc in BACKEND_COMPONENTS)
return "\n".join(r) return "\n".join(r)
@ -1518,7 +1518,7 @@ class FunctionSchema:
and self.returns[0].annotation == self_a.argument.annotation and self.returns[0].annotation == self_a.argument.annotation
) )
else: else:
# You can't method chain on non-tensor self arguments though (like a List[Tensor]) # You can't method chain on non-tensor self arguments though (like a list[Tensor])
# so in all other cases we expect the return type to be none. # so in all other cases we expect the return type to be none.
assert len(self.returns) == 0 assert len(self.returns) == 0