[BE][Easy][12/19] enforce style for empty lines in import segments in test/i*/ (#129763)

See https://github.com/pytorch/pytorch/pull/129751#issue-2380881501. Most changes are auto-generated by linter.

You can review these PRs via:

```bash
git diff --ignore-all-space --ignore-blank-lines HEAD~1
```

Pull Request resolved: https://github.com/pytorch/pytorch/pull/129763
Approved by: https://github.com/jansel
This commit is contained in:
Xuehai Pan 2024-07-18 14:46:35 +08:00 committed by PyTorch MergeBot
parent dfc3347c4a
commit 134bc4fc34
74 changed files with 71 additions and 88 deletions

View File

@ -1,7 +1,6 @@
import sys import sys
import torch import torch
from torch.testing._internal.inductor_utils import GPU_TYPE from torch.testing._internal.inductor_utils import GPU_TYPE

View File

@ -4,6 +4,7 @@
# https://docs.google.com/document/d/18L9e7bZSBpJ7gGbwlUV13LasmjiEX2lree2pl-SdbCU/edit # https://docs.google.com/document/d/18L9e7bZSBpJ7gGbwlUV13LasmjiEX2lree2pl-SdbCU/edit
import os import os
os.environ["TORCHDYNAMO_REPRO_AFTER"] = "dynamo" os.environ["TORCHDYNAMO_REPRO_AFTER"] = "dynamo"
import torch import torch
import torch._dynamo as torchdynamo import torch._dynamo as torchdynamo

View File

@ -3,6 +3,7 @@ import subprocess
from torch.testing._internal.common_methods_invocations import op_db from torch.testing._internal.common_methods_invocations import op_db
if __name__ == "__main__": if __name__ == "__main__":
i = 0 i = 0
while i < len(op_db): while i < len(op_db):

View File

@ -20,7 +20,6 @@ from torch._inductor import config
from torch._inductor.exc import CppWrapperCodeGenError from torch._inductor.exc import CppWrapperCodeGenError
from torch._inductor.runtime.runtime_utils import cache_dir from torch._inductor.runtime.runtime_utils import cache_dir
from torch._inductor.test_case import TestCase from torch._inductor.test_case import TestCase
from torch.export import Dim, export from torch.export import Dim, export
from torch.testing import FileCheck from torch.testing import FileCheck
from torch.testing._internal import common_utils from torch.testing._internal import common_utils
@ -37,10 +36,10 @@ from torch.testing._internal.common_utils import (
skipIfRocm, skipIfRocm,
TEST_WITH_ROCM, TEST_WITH_ROCM,
) )
from torch.testing._internal.triton_utils import HAS_CUDA, requires_cuda from torch.testing._internal.triton_utils import HAS_CUDA, requires_cuda
from torch.utils import _pytree as pytree from torch.utils import _pytree as pytree
if HAS_CUDA: if HAS_CUDA:
import triton import triton

View File

@ -11,6 +11,7 @@ from torch.testing._internal import common_utils
from torch.testing._internal.common_utils import IS_FBCODE from torch.testing._internal.common_utils import IS_FBCODE
from torch.testing._internal.triton_utils import HAS_CUDA from torch.testing._internal.triton_utils import HAS_CUDA
try: try:
try: try:
from .test_torchinductor import copy_tests from .test_torchinductor import copy_tests

View File

@ -5,9 +5,7 @@ import torch._export
import torch._inductor import torch._inductor
import torch.export._trace import torch.export._trace
import torch.fx._pytree as fx_pytree import torch.fx._pytree as fx_pytree
from torch.testing._internal.common_utils import IS_FBCODE from torch.testing._internal.common_utils import IS_FBCODE
from torch.utils import _pytree as pytree from torch.utils import _pytree as pytree

View File

@ -3,9 +3,7 @@ import os
import unittest import unittest
import torch import torch
import torch._inductor.config as inductor_config import torch._inductor.config as inductor_config
from torch._inductor.autoheuristic.autoheuristic import ( from torch._inductor.autoheuristic.autoheuristic import (
AHContext, AHContext,
AutoHeuristic, AutoHeuristic,

View File

@ -13,9 +13,9 @@ from torch.testing._internal.common_utils import (
slowTest, slowTest,
TEST_WITH_ASAN, TEST_WITH_ASAN,
) )
from torch.testing._internal.inductor_utils import HAS_CPU, HAS_CUDA from torch.testing._internal.inductor_utils import HAS_CPU, HAS_CUDA
# Make the helper files in test/ importable # Make the helper files in test/ importable
pytorch_test_dir = os.path.dirname(os.path.dirname(os.path.realpath(__file__))) pytorch_test_dir = os.path.dirname(os.path.dirname(os.path.realpath(__file__)))
sys.path.append(pytorch_test_dir) sys.path.append(pytorch_test_dir)

View File

@ -11,6 +11,7 @@ from torch import nn
from torch._inductor import config as inductor_config from torch._inductor import config as inductor_config
from torch.testing._internal.common_cuda import TEST_CUDNN from torch.testing._internal.common_cuda import TEST_CUDNN
# Make the helper files in test/ importable # Make the helper files in test/ importable
pytorch_test_dir = os.path.dirname(os.path.dirname(os.path.realpath(__file__))) pytorch_test_dir = os.path.dirname(os.path.dirname(os.path.realpath(__file__)))
sys.path.append(pytorch_test_dir) sys.path.append(pytorch_test_dir)
@ -18,6 +19,7 @@ sys.path.append(pytorch_test_dir)
from torch.testing._internal.common_utils import IS_CI, IS_WINDOWS, TEST_WITH_ASAN from torch.testing._internal.common_utils import IS_CI, IS_WINDOWS, TEST_WITH_ASAN
from torch.testing._internal.inductor_utils import skipCUDAIf from torch.testing._internal.inductor_utils import skipCUDAIf
if IS_WINDOWS and IS_CI: if IS_WINDOWS and IS_CI:
sys.stderr.write( sys.stderr.write(
"Windows CI does not have necessary dependencies for test_torchinductor yet\n" "Windows CI does not have necessary dependencies for test_torchinductor yet\n"
@ -29,11 +31,13 @@ if IS_WINDOWS and IS_CI:
from inductor.test_inductor_freezing import TestCase from inductor.test_inductor_freezing import TestCase
from inductor.test_torchinductor import check_model, check_model_gpu, copy_tests from inductor.test_torchinductor import check_model, check_model_gpu, copy_tests
importlib.import_module("functorch") importlib.import_module("functorch")
importlib.import_module("filelock") importlib.import_module("filelock")
from torch.testing._internal.inductor_utils import GPU_TYPE, HAS_CPU, HAS_GPU from torch.testing._internal.inductor_utils import GPU_TYPE, HAS_CPU, HAS_GPU
aten = torch.ops.aten aten = torch.ops.aten

View File

@ -6,13 +6,13 @@ import unittest
import torch import torch
from torch._inductor import config from torch._inductor import config
from torch._inductor.test_case import run_tests, TestCase from torch._inductor.test_case import run_tests, TestCase
from torch.testing._internal.common_utils import ( from torch.testing._internal.common_utils import (
instantiate_parametrized_tests, instantiate_parametrized_tests,
parametrize, parametrize,
) )
from torch.testing._internal.inductor_utils import HAS_CPU, HAS_CUDA from torch.testing._internal.inductor_utils import HAS_CPU, HAS_CUDA
torch.set_float32_matmul_precision("high") torch.set_float32_matmul_precision("high")
if HAS_CUDA: if HAS_CUDA:
torch.cuda.memory._set_allocator_settings("expandable_segments:False") torch.cuda.memory._set_allocator_settings("expandable_segments:False")

View File

@ -40,6 +40,7 @@ from torch.testing._internal.inductor_utils import (
) )
from torch.utils._triton import has_triton from torch.utils._triton import has_triton
HAS_TRITON = has_triton() HAS_TRITON = has_triton()
if HAS_TRITON: if HAS_TRITON:

View File

@ -4,14 +4,12 @@ import contextlib
import sympy import sympy
import torch import torch
import torch._inductor.config as inductor_config import torch._inductor.config as inductor_config
from torch._inductor.codegen import triton_utils from torch._inductor.codegen import triton_utils
from torch._inductor.codegen.common import SizeArg from torch._inductor.codegen.common import SizeArg
from torch._inductor.graph import GraphLowering from torch._inductor.graph import GraphLowering
from torch._inductor.test_case import TestCase as InductorTestCase from torch._inductor.test_case import TestCase as InductorTestCase
from torch._inductor.virtualized import V from torch._inductor.virtualized import V
from torch.testing._internal.inductor_utils import HAS_CPU, HAS_GPU from torch.testing._internal.inductor_utils import HAS_CPU, HAS_GPU

View File

@ -7,7 +7,6 @@ from torch._inductor.compile_worker.subproc_pool import (
SubprocException, SubprocException,
SubprocPool, SubprocPool,
) )
from torch._inductor.test_case import TestCase from torch._inductor.test_case import TestCase
from torch.testing._internal.inductor_utils import HAS_CPU from torch.testing._internal.inductor_utils import HAS_CPU

View File

@ -20,6 +20,7 @@ from torch._inductor.test_case import run_tests, TestCase
from torch.testing._internal.inductor_utils import HAS_CPU, HAS_CUDA from torch.testing._internal.inductor_utils import HAS_CPU, HAS_CUDA
from torch.testing._internal.logging_utils import logs_to_string from torch.testing._internal.logging_utils import logs_to_string
# note: these tests are not run on windows due to inductor_utils.HAS_CPU # note: these tests are not run on windows due to inductor_utils.HAS_CPU

View File

@ -4,19 +4,15 @@ import sys
import unittest import unittest
import weakref import weakref
from contextlib import ExitStack from contextlib import ExitStack
from copy import deepcopy from copy import deepcopy
from typing import NamedTuple from typing import NamedTuple
import torch import torch
import torch._inductor import torch._inductor
import torch._inductor.cudagraph_trees import torch._inductor.cudagraph_trees
import torch.optim.lr_scheduler import torch.optim.lr_scheduler
from torch._inductor import config from torch._inductor import config
from torch._inductor.test_case import TestCase from torch._inductor.test_case import TestCase
from torch.optim import ( from torch.optim import (
Adadelta, Adadelta,
Adagrad, Adagrad,
@ -31,7 +27,6 @@ from torch.optim import (
SGD, SGD,
SparseAdam, SparseAdam,
) )
from torch.optim.lr_scheduler import ( from torch.optim.lr_scheduler import (
ChainedScheduler, ChainedScheduler,
ConstantLR, ConstantLR,
@ -48,18 +43,15 @@ from torch.optim.lr_scheduler import (
ReduceLROnPlateau, ReduceLROnPlateau,
StepLR, StepLR,
) )
from torch.testing._internal.common_device_type import ( from torch.testing._internal.common_device_type import (
instantiate_device_type_tests, instantiate_device_type_tests,
skipCUDAIf, skipCUDAIf,
) )
from torch.testing._internal.common_optimizers import ( from torch.testing._internal.common_optimizers import (
_get_optim_inputs_including_global_cliquey_kwargs, _get_optim_inputs_including_global_cliquey_kwargs,
optim_db, optim_db,
optims, optims,
) )
from torch.testing._internal.common_utils import parametrize from torch.testing._internal.common_utils import parametrize
from torch.testing._internal.inductor_utils import HAS_CPU, HAS_CUDA, has_triton from torch.testing._internal.inductor_utils import HAS_CPU, HAS_CUDA, has_triton
from torch.testing._internal.triton_utils import requires_cuda from torch.testing._internal.triton_utils import requires_cuda

View File

@ -3,9 +3,7 @@ import math
import unittest import unittest
import torch import torch
from torch._inductor import config from torch._inductor import config
from torch._inductor.test_case import run_tests, TestCase from torch._inductor.test_case import run_tests, TestCase
from torch.testing._internal.inductor_utils import HAS_CPU from torch.testing._internal.inductor_utils import HAS_CPU

View File

@ -3,7 +3,6 @@ import itertools
import torch import torch
import torch._dynamo.testing import torch._dynamo.testing
from torch._inductor.test_case import TestCase from torch._inductor.test_case import TestCase
from torch.testing._internal.common_utils import ( from torch.testing._internal.common_utils import (
instantiate_parametrized_tests, instantiate_parametrized_tests,

View File

@ -6,11 +6,11 @@ from unittest import mock
import torch import torch
from torch._inductor.runtime.hints import TRITON_MAX_BLOCK from torch._inductor.runtime.hints import TRITON_MAX_BLOCK
from torch._inductor.test_case import run_tests, TestCase from torch._inductor.test_case import run_tests, TestCase
from torch.testing._internal.common_utils import IS_LINUX from torch.testing._internal.common_utils import IS_LINUX
from torch.testing._internal.inductor_utils import GPU_TYPE, HAS_GPU from torch.testing._internal.inductor_utils import GPU_TYPE, HAS_GPU
try: try:
import triton import triton
except ImportError: except ImportError:
@ -21,6 +21,7 @@ except ImportError:
from torch._inductor import config from torch._inductor import config
from torch._inductor.runtime.coordinate_descent_tuner import CoordescTuner from torch._inductor.runtime.coordinate_descent_tuner import CoordescTuner
config.benchmark_kernel = True config.benchmark_kernel = True
config.coordinate_descent_tuning = True config.coordinate_descent_tuning = True

View File

@ -1,9 +1,7 @@
# Owner(s): ["module: inductor"] # Owner(s): ["module: inductor"]
import torch import torch
from torch._inductor.codegen.aoti_hipify_utils import maybe_hipify_code_wrapper from torch._inductor.codegen.aoti_hipify_utils import maybe_hipify_code_wrapper
from torch._inductor.codegen.codegen_device_driver import cuda_kernel_driver from torch._inductor.codegen.codegen_device_driver import cuda_kernel_driver
from torch._inductor.test_case import run_tests, TestCase from torch._inductor.test_case import run_tests, TestCase

View File

@ -45,6 +45,7 @@ from torch.testing._internal.common_utils import (
) )
from torch.utils._python_dispatch import TorchDispatchMode from torch.utils._python_dispatch import TorchDispatchMode
try: try:
try: try:
from . import test_torchinductor from . import test_torchinductor

View File

@ -1,7 +1,6 @@
# Owner(s): ["oncall: cpu inductor"] # Owner(s): ["oncall: cpu inductor"]
import contextlib import contextlib
import functools import functools
import sys import sys
import unittest import unittest
from typing import Optional from typing import Optional
@ -20,9 +19,9 @@ from torch.testing._internal.common_device_type import (
instantiate_device_type_tests, instantiate_device_type_tests,
) )
from torch.testing._internal.common_quantization import _generate_qdq_quantized_model from torch.testing._internal.common_quantization import _generate_qdq_quantized_model
from torch.testing._internal.common_utils import IS_MACOS, parametrize, TEST_MKL from torch.testing._internal.common_utils import IS_MACOS, parametrize, TEST_MKL
try: try:
try: try:
from . import test_torchinductor from . import test_torchinductor

View File

@ -29,9 +29,9 @@ from torch.testing._internal.common_utils import (
skipIfRocm, skipIfRocm,
TEST_WITH_ASAN, TEST_WITH_ASAN,
) )
from torch.testing._internal.inductor_utils import skipCUDAIf from torch.testing._internal.inductor_utils import skipCUDAIf
try: try:
try: try:
import triton import triton

View File

@ -4,7 +4,6 @@ import ctypes
import unittest import unittest
import torch import torch
from torch._inductor import config from torch._inductor import config
from torch._inductor.async_compile import AsyncCompile from torch._inductor.async_compile import AsyncCompile
from torch._inductor.codecache import CUDACodeCache from torch._inductor.codecache import CUDACodeCache
@ -12,6 +11,7 @@ from torch._inductor.codegen.cuda.cuda_env import nvcc_exist
from torch._inductor.exc import CUDACompileError from torch._inductor.exc import CUDACompileError
from torch._inductor.test_case import TestCase as InductorTestCase from torch._inductor.test_case import TestCase as InductorTestCase
_SOURCE_CODE = r""" _SOURCE_CODE = r"""
#include <stdio.h> #include <stdio.h>

View File

@ -8,7 +8,6 @@ import unittest
import warnings import warnings
import torch import torch
import torch._dynamo.config as dynamo_config import torch._dynamo.config as dynamo_config
import torch.nn as nn import torch.nn as nn
from torch._dynamo.utils import counters from torch._dynamo.utils import counters
@ -19,7 +18,6 @@ from torch._inductor.cudagraph_utils import FunctionID
from torch._inductor.test_case import TestCase as InductorTestCase from torch._inductor.test_case import TestCase as InductorTestCase
from torch.fx.experimental.proxy_tensor import make_fx from torch.fx.experimental.proxy_tensor import make_fx
from torch.testing import FileCheck from torch.testing import FileCheck
from torch.testing._internal.common_cuda import TEST_MULTIGPU from torch.testing._internal.common_cuda import TEST_MULTIGPU
from torch.testing._internal.common_utils import ( from torch.testing._internal.common_utils import (
instantiate_parametrized_tests, instantiate_parametrized_tests,
@ -33,6 +31,7 @@ from torch.testing._internal.common_utils import (
) )
from torch.utils._python_dispatch import TorchDispatchMode from torch.utils._python_dispatch import TorchDispatchMode
if IS_WINDOWS and IS_CI: if IS_WINDOWS and IS_CI:
sys.stderr.write( sys.stderr.write(
"Windows CI does not have necessary dependencies for test_torchinductor yet\n" "Windows CI does not have necessary dependencies for test_torchinductor yet\n"
@ -46,6 +45,7 @@ importlib.import_module("filelock")
from torch.testing._internal.inductor_utils import HAS_CPU, HAS_CUDA from torch.testing._internal.inductor_utils import HAS_CPU, HAS_CUDA
aten = torch.ops.aten aten = torch.ops.aten
requires_cuda = unittest.skipUnless(HAS_CUDA, "requires cuda") requires_cuda = unittest.skipUnless(HAS_CUDA, "requires cuda")
requires_multigpu = functools.partial( requires_multigpu = functools.partial(

View File

@ -6,7 +6,6 @@ import pathlib
import sys import sys
import torch import torch
from torch.testing._internal.common_cuda import IS_JETSON, IS_WINDOWS from torch.testing._internal.common_cuda import IS_JETSON, IS_WINDOWS
from torch.testing._internal.common_utils import ( from torch.testing._internal.common_utils import (
run_tests, run_tests,
@ -15,6 +14,7 @@ from torch.testing._internal.common_utils import (
) )
from torch.testing._internal.inductor_utils import HAS_CUDA from torch.testing._internal.inductor_utils import HAS_CUDA
pytorch_test_dir = os.path.dirname(os.path.dirname(os.path.realpath(__file__))) pytorch_test_dir = os.path.dirname(os.path.dirname(os.path.realpath(__file__)))
sys.path.append(pytorch_test_dir) sys.path.append(pytorch_test_dir)
@ -26,6 +26,7 @@ REPO_ROOT = pathlib.Path(__file__).resolve().parent.parent.parent
sys.path.insert(0, str(REPO_ROOT)) sys.path.insert(0, str(REPO_ROOT))
from tools.stats.import_test_stats import get_disabled_tests from tools.stats.import_test_stats import get_disabled_tests
# Make sure to remove REPO_ROOT after import is done # Make sure to remove REPO_ROOT after import is done
sys.path.remove(str(REPO_ROOT)) sys.path.remove(str(REPO_ROOT))

View File

@ -4,13 +4,11 @@ import unittest
from functools import partial from functools import partial
import torch import torch
from torch._inductor.ir import Pointwise from torch._inductor.ir import Pointwise
from torch._inductor.lowering import make_pointwise, register_lowering from torch._inductor.lowering import make_pointwise, register_lowering
from torch._inductor.test_case import TestCase as InductorTestCase from torch._inductor.test_case import TestCase as InductorTestCase
from torch._inductor.virtualized import ops from torch._inductor.virtualized import ops
from torch.testing._internal.common_utils import skipIfRocm from torch.testing._internal.common_utils import skipIfRocm
from torch.testing._internal.inductor_utils import HAS_CPU, HAS_CUDA from torch.testing._internal.inductor_utils import HAS_CPU, HAS_CUDA

View File

@ -7,13 +7,10 @@ import torch
import torch._inductor.pattern_matcher as pattern_matcher import torch._inductor.pattern_matcher as pattern_matcher
import torch.fx as fx import torch.fx as fx
from torch._dynamo.utils import counters from torch._dynamo.utils import counters
from torch._inductor import config from torch._inductor import config
from torch._inductor.lowering import lowerings as L from torch._inductor.lowering import lowerings as L
from torch._inductor.pattern_matcher import Arg, CallFunction, PatternMatcherPass from torch._inductor.pattern_matcher import Arg, CallFunction, PatternMatcherPass
from torch._inductor.test_case import run_tests, TestCase from torch._inductor.test_case import run_tests, TestCase
from torch.testing._internal.common_utils import IS_LINUX from torch.testing._internal.common_utils import IS_LINUX
from torch.testing._internal.inductor_utils import HAS_CPU from torch.testing._internal.inductor_utils import HAS_CPU

View File

@ -19,9 +19,9 @@ from torch.testing._internal.common_utils import (
instantiate_parametrized_tests, instantiate_parametrized_tests,
parametrize, parametrize,
) )
from torch.testing._internal.inductor_utils import HAS_CPU, HAS_CUDA from torch.testing._internal.inductor_utils import HAS_CPU, HAS_CUDA
torch.set_float32_matmul_precision("high") torch.set_float32_matmul_precision("high")
if HAS_CUDA: if HAS_CUDA:
torch.cuda.memory._set_allocator_settings("expandable_segments:False") torch.cuda.memory._set_allocator_settings("expandable_segments:False")

View File

@ -9,13 +9,13 @@ from torch._dynamo.utils import counters
from torch._inductor.test_case import run_tests, TestCase from torch._inductor.test_case import run_tests, TestCase
from torch._inductor.utils import run_and_get_code from torch._inductor.utils import run_and_get_code
from torch.testing import FileCheck from torch.testing import FileCheck
from torch.testing._internal.common_utils import ( from torch.testing._internal.common_utils import (
instantiate_parametrized_tests, instantiate_parametrized_tests,
parametrize, parametrize,
) )
from torch.testing._internal.inductor_utils import HAS_CUDA from torch.testing._internal.inductor_utils import HAS_CUDA
requires_cuda = unittest.skipUnless(HAS_CUDA, "requires cuda") requires_cuda = unittest.skipUnless(HAS_CUDA, "requires cuda")

View File

@ -3,13 +3,11 @@ import contextlib
import torch import torch
from torch._inductor.dependencies import MemoryDep from torch._inductor.dependencies import MemoryDep
from torch._inductor.graph import GraphLowering from torch._inductor.graph import GraphLowering
from torch._inductor.ir import Buffer, FixedLayout, Pointwise from torch._inductor.ir import Buffer, FixedLayout, Pointwise
from torch._inductor.test_case import TestCase as InductorTestCase from torch._inductor.test_case import TestCase as InductorTestCase
from torch._inductor.utils import sympy_index_symbol from torch._inductor.utils import sympy_index_symbol
from torch._inductor.virtualized import ops, V from torch._inductor.virtualized import ops, V
from torch.testing._internal.inductor_utils import GPU_TYPE, HAS_CPU, HAS_GPU from torch.testing._internal.inductor_utils import GPU_TYPE, HAS_CPU, HAS_GPU

View File

@ -10,6 +10,7 @@ from torch._dynamo.testing import CompileCounter
from torch.testing._internal.common_utils import IS_MACOS, skipIfRocm, skipIfXpu from torch.testing._internal.common_utils import IS_MACOS, skipIfRocm, skipIfXpu
from torch.testing._internal.inductor_utils import GPU_TYPE, HAS_CPU, requires_gpu from torch.testing._internal.inductor_utils import GPU_TYPE, HAS_CPU, requires_gpu
# Fake distributed # Fake distributed
WORLD_SIZE = 2 WORLD_SIZE = 2

View File

@ -9,6 +9,7 @@ import unittest
import torch import torch
from torch import nn from torch import nn
# Make the helper files in test/ importable # Make the helper files in test/ importable
pytorch_test_dir = os.path.dirname(os.path.dirname(os.path.realpath(__file__))) pytorch_test_dir = os.path.dirname(os.path.dirname(os.path.realpath(__file__)))
sys.path.append(pytorch_test_dir) sys.path.append(pytorch_test_dir)
@ -16,11 +17,10 @@ sys.path.append(pytorch_test_dir)
from torch._dynamo.utils import counters from torch._dynamo.utils import counters
from torch._inductor import config as inductor_config from torch._inductor import config as inductor_config
from torch._inductor.test_case import TestCase from torch._inductor.test_case import TestCase
from torch.testing._internal.common_utils import IS_CI, IS_WINDOWS, TEST_WITH_ASAN from torch.testing._internal.common_utils import IS_CI, IS_WINDOWS, TEST_WITH_ASAN
from torch.testing._internal.inductor_utils import HAS_CPU, HAS_CUDA from torch.testing._internal.inductor_utils import HAS_CPU, HAS_CUDA
if IS_WINDOWS and IS_CI: if IS_WINDOWS and IS_CI:
sys.stderr.write( sys.stderr.write(
"Windows CI does not have necessary dependencies for test_torchinductor yet\n" "Windows CI does not have necessary dependencies for test_torchinductor yet\n"

View File

@ -9,6 +9,7 @@ import torch._dynamo
import torch.utils.cpp_extension import torch.utils.cpp_extension
from torch._C import FileCheck from torch._C import FileCheck
try: try:
from extension_backends.cpp.extension_codegen_backend import ( from extension_backends.cpp.extension_codegen_backend import (
ExtensionCppWrapperCodegen, ExtensionCppWrapperCodegen,
@ -32,6 +33,7 @@ from torch._inductor.codegen.common import (
) )
from torch.testing._internal.common_utils import IS_FBCODE, IS_MACOS from torch.testing._internal.common_utils import IS_FBCODE, IS_MACOS
try: try:
try: try:
from . import test_torchinductor from . import test_torchinductor

View File

@ -5,12 +5,10 @@ import functools
import string import string
from collections import namedtuple from collections import namedtuple
from typing import Callable, Optional from typing import Callable, Optional
from unittest import expectedFailure, skip, skipUnless from unittest import expectedFailure, skip, skipUnless
from unittest.mock import patch from unittest.mock import patch
import torch import torch
from torch._dynamo.testing import CompileCounterWithBackend, normalize_gm from torch._dynamo.testing import CompileCounterWithBackend, normalize_gm
from torch._higher_order_ops.flex_attention import flex_attention as flex_attention_hop from torch._higher_order_ops.flex_attention import flex_attention as flex_attention_hop
from torch._inductor import metrics from torch._inductor import metrics
@ -33,6 +31,7 @@ from torch.testing._internal import common_utils
from torch.testing._internal.common_cuda import PLATFORM_SUPPORTS_BF16 from torch.testing._internal.common_cuda import PLATFORM_SUPPORTS_BF16
from torch.utils._triton import has_triton from torch.utils._triton import has_triton
# Skip tests if Triton is not available # Skip tests if Triton is not available
supported_platform = skipUnless( supported_platform = skipUnless(
torch.cuda.is_available() torch.cuda.is_available()

View File

@ -4,12 +4,10 @@
import functools import functools
from collections import namedtuple from collections import namedtuple
from typing import Callable, Optional from typing import Callable, Optional
from unittest import expectedFailure, skip, skipUnless from unittest import expectedFailure, skip, skipUnless
from unittest.mock import patch from unittest.mock import patch
import torch import torch
from torch._higher_order_ops.flex_attention import flex_attention as flex_attention_hop from torch._higher_order_ops.flex_attention import flex_attention as flex_attention_hop
from torch._inductor.test_case import TestCase as InductorTestCase from torch._inductor.test_case import TestCase as InductorTestCase
from torch._inductor.utils import run_and_get_code from torch._inductor.utils import run_and_get_code
@ -28,6 +26,7 @@ from torch.testing._internal import common_utils
from torch.testing._internal.common_cuda import PLATFORM_SUPPORTS_BF16 from torch.testing._internal.common_cuda import PLATFORM_SUPPORTS_BF16
from torch.utils._triton import has_triton from torch.utils._triton import has_triton
# Skip tests if Triton is not available # Skip tests if Triton is not available
supported_platform = skipUnless( supported_platform = skipUnless(
torch.cuda.is_available() torch.cuda.is_available()

View File

@ -4,19 +4,17 @@ import sys
import unittest import unittest
import torch import torch
import torch._inductor import torch._inductor
from torch._inductor.test_case import TestCase from torch._inductor.test_case import TestCase
from torch.testing._internal.common_utils import ( from torch.testing._internal.common_utils import (
instantiate_parametrized_tests, instantiate_parametrized_tests,
IS_FBCODE, IS_FBCODE,
parametrize, parametrize,
) )
from torch.testing._internal.inductor_utils import HAS_CPU, HAS_CUDA from torch.testing._internal.inductor_utils import HAS_CPU, HAS_CUDA
from torch.testing._internal.triton_utils import requires_cuda from torch.testing._internal.triton_utils import requires_cuda
aten = torch.ops.aten aten = torch.ops.aten
try: try:

View File

@ -15,6 +15,7 @@ from torch.testing._internal.common_utils import (
) )
from torch.testing._internal.inductor_utils import HAS_CUDA from torch.testing._internal.inductor_utils import HAS_CUDA
torch.set_float32_matmul_precision("high") torch.set_float32_matmul_precision("high")

View File

@ -14,6 +14,7 @@ from torch._inductor.fx_passes.pre_grad import (
from torch._inductor.test_case import run_tests, TestCase from torch._inductor.test_case import run_tests, TestCase
from torch.fx.passes.shape_prop import ShapeProp from torch.fx.passes.shape_prop import ShapeProp
PassFunc = Callable[[torch.fx.GraphModule, Any], torch.fx.GraphModule] PassFunc = Callable[[torch.fx.GraphModule, Any], torch.fx.GraphModule]

View File

@ -13,6 +13,7 @@ from torch.testing._internal.common_cuda import PLATFORM_SUPPORTS_FUSED_ATTENTIO
from torch.testing._internal.common_utils import IS_LINUX, skipIfRocm from torch.testing._internal.common_utils import IS_LINUX, skipIfRocm
from torch.testing._internal.inductor_utils import HAS_CUDA from torch.testing._internal.inductor_utils import HAS_CUDA
try: try:
import pydot # noqa: F401 import pydot # noqa: F401

View File

@ -11,6 +11,7 @@ from torch._dynamo.utils import counters, optimus_scuba_log
from torch._inductor.test_case import run_tests, TestCase from torch._inductor.test_case import run_tests, TestCase
from torch.testing._internal.inductor_utils import HAS_CUDA from torch.testing._internal.inductor_utils import HAS_CUDA
try: try:
# importing this will register fbgemm lowerings for inductor # importing this will register fbgemm lowerings for inductor
import deeplearning.fbgemm.fbgemm_gpu.fb.inductor_lowerings # noqa: F401 import deeplearning.fbgemm.fbgemm_gpu.fb.inductor_lowerings # noqa: F401

View File

@ -13,7 +13,6 @@ from torch._inductor.codecache import HalideCodeCache
from torch._inductor.runtime.hints import HalideInputSpec, HalideMeta from torch._inductor.runtime.hints import HalideInputSpec, HalideMeta
from torch._inductor.test_case import run_tests, TestCase from torch._inductor.test_case import run_tests, TestCase
from torch._inductor.utils import parallel_num_threads from torch._inductor.utils import parallel_num_threads
from torch.testing._internal.common_utils import IS_CI, IS_MACOS, IS_WINDOWS from torch.testing._internal.common_utils import IS_CI, IS_MACOS, IS_WINDOWS
from torch.testing._internal.inductor_utils import HAS_CPU from torch.testing._internal.inductor_utils import HAS_CPU
from torch.utils._triton import has_triton from torch.utils._triton import has_triton

View File

@ -5,12 +5,10 @@ import unittest
import sympy import sympy
import torch import torch
from torch._inductor.codegen.cpp import cexpr from torch._inductor.codegen.cpp import cexpr
from torch._inductor.codegen.triton import texpr from torch._inductor.codegen.triton import texpr
from torch._inductor.codegen.wrapper import pexpr from torch._inductor.codegen.wrapper import pexpr
from torch._inductor.runtime.runtime_utils import do_bench_gpu from torch._inductor.runtime.runtime_utils import do_bench_gpu
from torch._inductor.sizevars import SizeVarAllocator from torch._inductor.sizevars import SizeVarAllocator
from torch._inductor.test_case import TestCase as InductorTestCase from torch._inductor.test_case import TestCase as InductorTestCase
from torch._inductor.utils import run_and_get_triton_code from torch._inductor.utils import run_and_get_triton_code
@ -26,6 +24,7 @@ from torch.utils._sympy.functions import (
RoundToInt, RoundToInt,
) )
DO_PERF_TEST = os.environ.get("DO_PERF_TEST") == "1" DO_PERF_TEST = os.environ.get("DO_PERF_TEST") == "1"

View File

@ -9,7 +9,6 @@ import unittest
import weakref import weakref
import torch import torch
from torch import nn from torch import nn
from torch._inductor import config from torch._inductor import config
from torch._inductor.test_case import TestCase as InductorTestCase from torch._inductor.test_case import TestCase as InductorTestCase
@ -18,6 +17,7 @@ from torch.testing import FileCheck
from torch.testing._internal.common_cuda import SM80OrLater from torch.testing._internal.common_cuda import SM80OrLater
from torch.testing._internal.common_utils import skipIfRocm from torch.testing._internal.common_utils import skipIfRocm
# Make the helper files in test/ importable # Make the helper files in test/ importable
pytorch_test_dir = os.path.dirname(os.path.dirname(os.path.realpath(__file__))) pytorch_test_dir = os.path.dirname(os.path.dirname(os.path.realpath(__file__)))
sys.path.append(pytorch_test_dir) sys.path.append(pytorch_test_dir)
@ -29,6 +29,7 @@ from torch.testing._internal.common_utils import (
TEST_WITH_ROCM, TEST_WITH_ROCM,
) )
if IS_WINDOWS and IS_CI: if IS_WINDOWS and IS_CI:
sys.stderr.write( sys.stderr.write(
"Windows CI does not have necessary dependencies for test_torchinductor yet\n" "Windows CI does not have necessary dependencies for test_torchinductor yet\n"
@ -39,11 +40,13 @@ if IS_WINDOWS and IS_CI:
from inductor.test_torchinductor import check_model, check_model_cuda, copy_tests from inductor.test_torchinductor import check_model, check_model_cuda, copy_tests
importlib.import_module("functorch") importlib.import_module("functorch")
importlib.import_module("filelock") importlib.import_module("filelock")
from torch.testing._internal.inductor_utils import HAS_CPU, HAS_CUDA from torch.testing._internal.inductor_utils import HAS_CPU, HAS_CUDA
aten = torch.ops.aten aten = torch.ops.aten
prims = torch.ops.prims prims = torch.ops.prims
requires_cuda = unittest.skipUnless(HAS_CUDA, "requires cuda") requires_cuda = unittest.skipUnless(HAS_CUDA, "requires cuda")

View File

@ -5,11 +5,10 @@ import logging
import torch import torch
from torch._inductor.runtime.runtime_utils import do_bench from torch._inductor.runtime.runtime_utils import do_bench
from torch._inductor.test_case import run_tests, TestCase from torch._inductor.test_case import run_tests, TestCase
from torch._inductor.utils import do_bench_using_profiling from torch._inductor.utils import do_bench_using_profiling
log = logging.getLogger(__name__) log = logging.getLogger(__name__)

View File

@ -5,6 +5,7 @@ from torch._inductor.test_case import run_tests, TestCase
from torch.testing._internal.common_utils import IS_LINUX from torch.testing._internal.common_utils import IS_LINUX
from torch.testing._internal.inductor_utils import HAS_CUDA from torch.testing._internal.inductor_utils import HAS_CUDA
aten = torch.ops.aten aten = torch.ops.aten

View File

@ -11,6 +11,7 @@ from torch._inductor.test_case import run_tests, TestCase
from torch.testing._internal.common_cuda import tf32_off from torch.testing._internal.common_cuda import tf32_off
from torch.testing._internal.inductor_utils import HAS_CUDA from torch.testing._internal.inductor_utils import HAS_CUDA
USE_DDP_WRAPPER = os.environ.get("USE_DDP_WRAPPER", "1") == "1" USE_DDP_WRAPPER = os.environ.get("USE_DDP_WRAPPER", "1") == "1"

View File

@ -7,6 +7,7 @@ from torch._inductor import config as inductor_config, metrics
from torch._inductor.test_case import run_tests, TestCase from torch._inductor.test_case import run_tests, TestCase
from torch.testing._internal.inductor_utils import HAS_CUDA from torch.testing._internal.inductor_utils import HAS_CUDA
if HAS_CUDA: if HAS_CUDA:
torch.set_default_device("cuda") torch.set_default_device("cuda")

View File

@ -2,7 +2,6 @@
import json import json
import os import os
import unittest import unittest
from typing import Callable, List, Optional from typing import Callable, List, Optional
import torch import torch
@ -24,7 +23,6 @@ from torch._inductor.select_algorithm import (
TritonTemplateCaller, TritonTemplateCaller,
) )
from torch._inductor.test_case import run_tests, TestCase from torch._inductor.test_case import run_tests, TestCase
from torch._inductor.utils import fresh_inductor_cache, run_and_get_code from torch._inductor.utils import fresh_inductor_cache, run_and_get_code
from torch._inductor.virtualized import V from torch._inductor.virtualized import V
from torch.fx.experimental.proxy_tensor import make_fx from torch.fx.experimental.proxy_tensor import make_fx
@ -34,9 +32,9 @@ from torch.testing._internal.common_utils import (
parametrize, parametrize,
skipIfRocm, skipIfRocm,
) )
from torch.testing._internal.inductor_utils import HAS_CPU, HAS_CUDA from torch.testing._internal.inductor_utils import HAS_CPU, HAS_CUDA
torch.set_float32_matmul_precision("high") torch.set_float32_matmul_precision("high")
if HAS_CUDA: if HAS_CUDA:
torch.cuda.memory._set_allocator_settings("expandable_segments:False") torch.cuda.memory._set_allocator_settings("expandable_segments:False")

View File

@ -1,12 +1,12 @@
# Owner(s): ["module: inductor"] # Owner(s): ["module: inductor"]
import sys import sys
import unittest import unittest
from torch.testing._internal.common_utils import IS_CI, IS_WINDOWS, skipIfRocm from torch.testing._internal.common_utils import IS_CI, IS_WINDOWS, skipIfRocm
from torch.testing._internal.inductor_utils import HAS_CUDA from torch.testing._internal.inductor_utils import HAS_CUDA
if IS_WINDOWS and IS_CI: if IS_WINDOWS and IS_CI:
sys.stderr.write( sys.stderr.write(
"Windows CI does not have necessary dependencies for test_memory_planning yet\n" "Windows CI does not have necessary dependencies for test_memory_planning yet\n"

View File

@ -5,9 +5,9 @@ from torch._inductor.test_case import run_tests, TestCase
from torch._inductor.utils import collect_defined_kernels from torch._inductor.utils import collect_defined_kernels
from torch._inductor.wrapper_benchmark import get_kernel_category_by_source_code from torch._inductor.wrapper_benchmark import get_kernel_category_by_source_code
from torch.testing._internal.common_device_type import largeTensorTest from torch.testing._internal.common_device_type import largeTensorTest
from torch.testing._internal.inductor_utils import GPU_TYPE, HAS_GPU from torch.testing._internal.inductor_utils import GPU_TYPE, HAS_GPU
example_kernel = """ example_kernel = """
@triton_heuristics.reduction( @triton_heuristics.reduction(
size_hints=[1024, 2048], size_hints=[1024, 2048],

View File

@ -11,6 +11,7 @@ from torch.testing._internal.common_utils import (
) )
from torch.testing._internal.inductor_utils import HAS_CUDA from torch.testing._internal.inductor_utils import HAS_CUDA
requires_cuda = unittest.skipUnless(HAS_CUDA, "requires cuda") requires_cuda = unittest.skipUnless(HAS_CUDA, "requires cuda")

View File

@ -6,7 +6,6 @@ import unittest
import torch import torch
import torch.ao.quantization.quantizer.x86_inductor_quantizer as xiq import torch.ao.quantization.quantizer.x86_inductor_quantizer as xiq
from torch._dynamo import config as dynamo_config from torch._dynamo import config as dynamo_config
from torch._dynamo.utils import counters from torch._dynamo.utils import counters
from torch._inductor import config, metrics from torch._inductor import config, metrics

View File

@ -12,6 +12,7 @@ from torch.testing._internal.common_nn import NNTestCase
from torch.testing._internal.common_utils import IS_WINDOWS, parametrize, run_tests from torch.testing._internal.common_utils import IS_WINDOWS, parametrize, run_tests
from torch.testing._internal.inductor_utils import GPU_TYPE, HAS_GPU from torch.testing._internal.inductor_utils import GPU_TYPE, HAS_GPU
default_atol = { default_atol = {
torch.float16: 1e-3, torch.float16: 1e-3,
torch.bfloat16: float("infinity"), torch.bfloat16: float("infinity"),

View File

@ -11,6 +11,7 @@ from torch.testing._internal.common_cuda import TEST_MULTIGPU
from torch.testing._internal.common_utils import IS_LINUX from torch.testing._internal.common_utils import IS_LINUX
from torch.testing._internal.inductor_utils import HAS_CUDA from torch.testing._internal.inductor_utils import HAS_CUDA
requires_multigpu = functools.partial( requires_multigpu = functools.partial(
unittest.skipIf, not TEST_MULTIGPU, "requires multiple cuda devices" unittest.skipIf, not TEST_MULTIGPU, "requires multiple cuda devices"
) )

View File

@ -7,7 +7,6 @@ import unittest
import torch import torch
from torch import nn from torch import nn
from torch._dynamo.testing import reset_rng_state from torch._dynamo.testing import reset_rng_state
from torch._inductor import config, test_operators from torch._inductor import config, test_operators
from torch._inductor.codegen.multi_kernel import MultiKernelCall from torch._inductor.codegen.multi_kernel import MultiKernelCall
from torch._inductor.test_case import TestCase from torch._inductor.test_case import TestCase

View File

@ -2,7 +2,6 @@
import unittest import unittest
import torch import torch
import torch._inductor.config as inductor_config import torch._inductor.config as inductor_config
from torch._dynamo.testing import rand_strided from torch._dynamo.testing import rand_strided
from torch._inductor.fx_passes.pad_mm import ( from torch._inductor.fx_passes.pad_mm import (
@ -11,7 +10,6 @@ from torch._inductor.fx_passes.pad_mm import (
get_padded_length, get_padded_length,
should_pad_common, should_pad_common,
) )
from torch._inductor.test_case import run_tests, TestCase from torch._inductor.test_case import run_tests, TestCase
from torch._inductor.utils import fresh_inductor_cache, run_and_get_code from torch._inductor.utils import fresh_inductor_cache, run_and_get_code
from torch.testing import FileCheck from torch.testing import FileCheck

View File

@ -1,6 +1,5 @@
# Owner(s): ["module: inductor"] # Owner(s): ["module: inductor"]
import copy import copy
import functools import functools
import os import os
import unittest import unittest
@ -17,6 +16,7 @@ from torch._inductor.utils import run_and_get_code
from torch.testing._internal.common_utils import serialTest from torch.testing._internal.common_utils import serialTest
from torch.testing._internal.inductor_utils import HAS_CUDA from torch.testing._internal.inductor_utils import HAS_CUDA
DO_PERF_TEST = os.environ.get("DO_PERF_TEST") == "1" DO_PERF_TEST = os.environ.get("DO_PERF_TEST") == "1"
DO_ACC_TEST = os.environ.get("DO_ACC_TEST", "1") == "1" DO_ACC_TEST = os.environ.get("DO_ACC_TEST", "1") == "1"
WITH_STACK = os.environ.get("WITH_STACK") == "1" WITH_STACK = os.environ.get("WITH_STACK") == "1"

View File

@ -11,7 +11,6 @@ import torch.nn.functional as F
from torch._dynamo.utils import count_calls, counters from torch._dynamo.utils import count_calls, counters
from torch._higher_order_ops.out_dtype import out_dtype from torch._higher_order_ops.out_dtype import out_dtype
from torch._inductor.fx_passes import joint_graph from torch._inductor.fx_passes import joint_graph
from torch._inductor.pattern_matcher import ( from torch._inductor.pattern_matcher import (
Arg, Arg,
CallFunction, CallFunction,

View File

@ -3,7 +3,6 @@ import contextlib
from unittest.mock import patch from unittest.mock import patch
import functorch import functorch
import torch import torch
import torch._inductor.config as config import torch._inductor.config as config
import torch.autograd import torch.autograd
@ -25,10 +24,11 @@ from torch._inductor.test_case import TestCase as InductorTestCase
# #
# That may still be aceeptable, but be aware that you are likely lowering # That may still be aceeptable, but be aware that you are likely lowering
# performance for that setting. # performance for that setting.
#
# Defines all the kernels for tests # Defines all the kernels for tests
from torch.testing._internal.triton_utils import HAS_CUDA, requires_cuda from torch.testing._internal.triton_utils import HAS_CUDA, requires_cuda
if HAS_CUDA: if HAS_CUDA:
from torch.testing._internal.triton_utils import add_kernel from torch.testing._internal.triton_utils import add_kernel

View File

@ -5,15 +5,13 @@ import unittest
import torch import torch
import torch._inductor.test_case import torch._inductor.test_case
import torch._inductor.utils import torch._inductor.utils
from torch._inductor import config from torch._inductor import config
from torch.profiler import ProfilerActivity from torch.profiler import ProfilerActivity
from torch.testing._internal.common_utils import TemporaryFileName from torch.testing._internal.common_utils import TemporaryFileName
from torch.testing._internal.inductor_utils import HAS_CUDA from torch.testing._internal.inductor_utils import HAS_CUDA
from torch.utils._triton import has_triton from torch.utils._triton import has_triton
HAS_TRITON = has_triton() HAS_TRITON = has_triton()

View File

@ -12,6 +12,7 @@ from torch._inductor.runtime.runtime_utils import do_bench_gpu as do_bench
from torch._inductor.test_case import TestCase from torch._inductor.test_case import TestCase
from torch.testing._internal.inductor_utils import GPU_TYPE, HAS_GPU from torch.testing._internal.inductor_utils import GPU_TYPE, HAS_GPU
DO_PERF_TEST = os.environ.get("DO_PERF_TEST") == "1" DO_PERF_TEST = os.environ.get("DO_PERF_TEST") == "1"

View File

@ -11,10 +11,10 @@ from torch._dynamo.testing import expectedFailureDynamicWrapper
from torch._dynamo.utils import counters from torch._dynamo.utils import counters
from torch._inductor.autotune_process import TritonBenchmarkRequest from torch._inductor.autotune_process import TritonBenchmarkRequest
from torch._inductor.test_case import run_tests, TestCase from torch._inductor.test_case import run_tests, TestCase
from torch.testing._internal.common_utils import IS_LINUX, skipIfRocm from torch.testing._internal.common_utils import IS_LINUX, skipIfRocm
from torch.testing._internal.inductor_utils import HAS_CUDA from torch.testing._internal.inductor_utils import HAS_CUDA
aten = torch.ops.aten aten = torch.ops.aten

View File

@ -4,7 +4,6 @@ import unittest
import torch import torch
import torch._logging import torch._logging
from torch._inductor.test_case import TestCase from torch._inductor.test_case import TestCase
from torch.testing._internal.common_utils import IS_LINUX from torch.testing._internal.common_utils import IS_LINUX
from torch.testing._internal.inductor_utils import GPU_TYPE, HAS_CUDA, HAS_GPU from torch.testing._internal.inductor_utils import GPU_TYPE, HAS_CUDA, HAS_GPU

View File

@ -5,7 +5,6 @@ from unittest import skipIf
import torch import torch
import torch.distributed as dist import torch.distributed as dist
from torch._inductor import config, metrics from torch._inductor import config, metrics
from torch._inductor.comm_analysis import estimate_nccl_collective_runtime from torch._inductor.comm_analysis import estimate_nccl_collective_runtime
from torch._inductor.compile_fx import compile_fx, compile_fx_inner from torch._inductor.compile_fx import compile_fx, compile_fx_inner
@ -13,6 +12,7 @@ from torch._inductor.test_case import TestCase as InductorTestCase
from torch._inductor.utils import is_collective from torch._inductor.utils import is_collective
from torch.testing._internal.inductor_utils import HAS_CUDA from torch.testing._internal.inductor_utils import HAS_CUDA
aten = torch.ops.aten aten = torch.ops.aten
c10d = torch.ops.c10d_functional c10d = torch.ops.c10d_functional
_c10d = torch.ops._c10d_functional _c10d = torch.ops._c10d_functional

View File

@ -6,7 +6,6 @@ import torch._inductor
import torch._inductor.decomposition import torch._inductor.decomposition
from torch._higher_order_ops.torchbind import enable_torchbind_tracing from torch._higher_order_ops.torchbind import enable_torchbind_tracing
from torch._inductor.test_case import run_tests, TestCase from torch._inductor.test_case import run_tests, TestCase
from torch.testing._internal.torchbind_impls import init_torchbind_implementations from torch.testing._internal.torchbind_impls import init_torchbind_implementations

View File

@ -26,7 +26,6 @@ from unittest.mock import patch
import numpy as np import numpy as np
import torch import torch
import torch._dynamo.config as dynamo_config import torch._dynamo.config as dynamo_config
import torch._inductor.aoti_eager import torch._inductor.aoti_eager
import torch.nn as nn import torch.nn as nn
@ -67,7 +66,6 @@ from torch.testing._internal.common_cuda import (
TEST_CUDNN, TEST_CUDNN,
with_tf32_off, with_tf32_off,
) )
from torch.testing._internal.common_device_type import ( from torch.testing._internal.common_device_type import (
_has_sufficient_memory, _has_sufficient_memory,
expectedFailureXPU, expectedFailureXPU,
@ -95,6 +93,7 @@ from torch.utils._python_dispatch import TorchDispatchMode
from torch.utils._pytree import tree_flatten, tree_unflatten from torch.utils._pytree import tree_flatten, tree_unflatten
from torch.utils.weak import WeakTensorKeyDictionary from torch.utils.weak import WeakTensorKeyDictionary
DO_PERF_TEST = os.environ.get("DO_PERF_TEST") == "1" DO_PERF_TEST = os.environ.get("DO_PERF_TEST") == "1"
if IS_WINDOWS and IS_CI: if IS_WINDOWS and IS_CI:
@ -109,14 +108,12 @@ importlib.import_module("functorch")
importlib.import_module("filelock") importlib.import_module("filelock")
from torch._inductor import config, test_operators from torch._inductor import config, test_operators
from torch._inductor.compile_fx import ( from torch._inductor.compile_fx import (
compile_fx, compile_fx,
compile_fx_inner, compile_fx_inner,
complex_memory_overlap, complex_memory_overlap,
) )
from torch._inductor.utils import has_torchvision_roi_align from torch._inductor.utils import has_torchvision_roi_align
from torch.testing._internal.common_utils import slowTest from torch.testing._internal.common_utils import slowTest
from torch.testing._internal.inductor_utils import ( from torch.testing._internal.inductor_utils import (
GPU_TYPE, GPU_TYPE,
@ -128,6 +125,7 @@ from torch.testing._internal.inductor_utils import (
skipCUDAIf, skipCUDAIf,
) )
HAS_AVX2 = "fbgemm" in torch.backends.quantized.supported_engines HAS_AVX2 = "fbgemm" in torch.backends.quantized.supported_engines
aten = torch.ops.aten aten = torch.ops.aten

View File

@ -20,6 +20,7 @@ from torch.testing._internal.inductor_utils import (
HAS_GPU, HAS_GPU,
) )
if IS_WINDOWS and IS_CI: if IS_WINDOWS and IS_CI:
sys.stderr.write( sys.stderr.write(
"Windows CI does not have necessary dependencies for test_torchinductor_codegen_dynamic_shapes yet\n" "Windows CI does not have necessary dependencies for test_torchinductor_codegen_dynamic_shapes yet\n"

View File

@ -1,7 +1,6 @@
# Owner(s): ["module: inductor"] # Owner(s): ["module: inductor"]
import contextlib import contextlib
import importlib import importlib
import math import math
import operator import operator
import os import os
@ -37,6 +36,7 @@ from torch.testing._internal.common_utils import (
) )
from torch.testing._internal.inductor_utils import GPU_TYPE, HAS_CPU, HAS_GPU from torch.testing._internal.inductor_utils import GPU_TYPE, HAS_CPU, HAS_GPU
if IS_WINDOWS and IS_CI: if IS_WINDOWS and IS_CI:
sys.stderr.write( sys.stderr.write(
"Windows CI does not have necessary dependencies for test_torchinductor_dynamic_shapes yet\n" "Windows CI does not have necessary dependencies for test_torchinductor_dynamic_shapes yet\n"
@ -56,6 +56,7 @@ from inductor.test_torchinductor import (
TestFailure, TestFailure,
) )
importlib.import_module("filelock") importlib.import_module("filelock")
# xfail by default, set is_skip=True to skip # xfail by default, set is_skip=True to skip

View File

@ -11,7 +11,6 @@ from functools import partial
from unittest.mock import patch from unittest.mock import patch
import torch import torch
from torch._dispatch.python import enable_python_dispatcher from torch._dispatch.python import enable_python_dispatcher
from torch._inductor.test_case import run_tests, TestCase from torch._inductor.test_case import run_tests, TestCase
from torch._subclasses.fake_tensor import ( from torch._subclasses.fake_tensor import (
@ -45,6 +44,7 @@ from torch.testing._internal.inductor_utils import GPU_TYPE, HAS_CPU, HAS_CUDA
from torch.utils._python_dispatch import TorchDispatchMode from torch.utils._python_dispatch import TorchDispatchMode
from torch.utils._pytree import tree_map from torch.utils._pytree import tree_map
try: try:
try: try:
from .test_torchinductor import check_model, check_model_gpu from .test_torchinductor import check_model, check_model_gpu
@ -263,6 +263,7 @@ intentionally_not_handled = {
# We should eventually always turn it on # We should eventually always turn it on
import torch._functorch.config as functorch_config import torch._functorch.config as functorch_config
if not functorch_config.view_replay_for_aliased_outputs: if not functorch_config.view_replay_for_aliased_outputs:
intentionally_not_handled['("as_strided", "partial_views")'] = { intentionally_not_handled['("as_strided", "partial_views")'] = {
b8, b8,

View File

@ -8,6 +8,7 @@ import torch
import torch._dynamo import torch._dynamo
import torch.utils.cpp_extension import torch.utils.cpp_extension
try: try:
from extension_backends.triton.device_interface import DeviceInterface from extension_backends.triton.device_interface import DeviceInterface
from extension_backends.triton.extension_codegen_backend import ( from extension_backends.triton.extension_codegen_backend import (
@ -35,6 +36,7 @@ from torch._inductor.codegen.common import (
from torch._inductor.utils import get_triton_code from torch._inductor.utils import get_triton_code
from torch.testing._internal.common_utils import IS_MACOS from torch.testing._internal.common_utils import IS_MACOS
try: try:
try: try:
from . import test_torchinductor from . import test_torchinductor

View File

@ -4,10 +4,10 @@ import sys
import unittest import unittest
import torch import torch
from torch.testing._internal.common_utils import IS_LINUX, skipIfXpu from torch.testing._internal.common_utils import IS_LINUX, skipIfXpu
from torch.testing._internal.inductor_utils import GPU_TYPE, HAS_GPU from torch.testing._internal.inductor_utils import GPU_TYPE, HAS_GPU
try: try:
import triton # noqa: F401 import triton # noqa: F401
except ImportError: except ImportError:

View File

@ -6,9 +6,7 @@ from unittest.mock import patch
import torch import torch
import torch._dynamo.testing import torch._dynamo.testing
import torch._inductor.test_case import torch._inductor.test_case
from torch._higher_order_ops.triton_kernel_wrap import ( from torch._higher_order_ops.triton_kernel_wrap import (
generate_ttir, generate_ttir,
triton_kernel_wrapper_functional, triton_kernel_wrapper_functional,
@ -19,10 +17,11 @@ from torch._inductor.utils import run_and_get_code
from torch._library import capture_triton from torch._library import capture_triton
from torch.testing._internal import common_utils from torch.testing._internal import common_utils
from torch.testing._internal.common_utils import skipIfRocm, skipIfXpu, TEST_WITH_ROCM from torch.testing._internal.common_utils import skipIfRocm, skipIfXpu, TEST_WITH_ROCM
from torch.testing._internal.inductor_utils import GPU_TYPE, HAS_CUDA, HAS_GPU, HAS_XPU
# Defines all the kernels for tests # Defines all the kernels for tests
from torch.testing._internal.triton_utils import * # noqa: F403 from torch.testing._internal.triton_utils import * # noqa: F403
from torch.testing._internal.inductor_utils import GPU_TYPE, HAS_CUDA, HAS_GPU, HAS_XPU
if HAS_GPU: if HAS_GPU:
import triton import triton

View File

@ -7,6 +7,7 @@ import unittest
import torch import torch
from torch.testing._internal.common_utils import IS_CI, IS_WINDOWS from torch.testing._internal.common_utils import IS_CI, IS_WINDOWS
if IS_WINDOWS and IS_CI: if IS_WINDOWS and IS_CI:
sys.stderr.write( sys.stderr.write(
"Windows CI does not have necessary dependencies for test_xpu_basic yet\n" "Windows CI does not have necessary dependencies for test_xpu_basic yet\n"

View File

@ -43,7 +43,6 @@ ISORT_SKIPLIST = re.compile(
"test/dy*/**", "test/dy*/**",
# test/[e-h]*/** # test/[e-h]*/**
# test/i*/** # test/i*/**
"test/i*/**",
# test/j*/** # test/j*/**
"test/j*/**", "test/j*/**",
# test/[k-p]*/** # test/[k-p]*/**