diff --git a/test/export/test_export.py b/test/export/test_export.py index 8896ef3983a..db67266632c 100644 --- a/test/export/test_export.py +++ b/test/export/test_export.py @@ -84,7 +84,7 @@ except ImportError: try: from . import testing except ImportError: - import testing + import testing # @manual=fbcode//caffe2/test:test_export-library # The following import pattern matters as `test_export.export` is patched # in other files (like test_export_nonstrict.py). `torch.export.export` # will invalidate the patch. diff --git a/test/export/test_export_nonstrict.py b/test/export/test_export_nonstrict.py index c368eb069a2..99944e4841b 100644 --- a/test/export/test_export_nonstrict.py +++ b/test/export/test_export_nonstrict.py @@ -3,8 +3,8 @@ try: from . import test_export, testing except ImportError: - import test_export - import testing + import test_export # @manual=fbcode//caffe2/test:test_export-library + import testing # @manual=fbcode//caffe2/test:test_export-library from torch.export import export diff --git a/test/export/test_export_training_ir_to_run_decomp.py b/test/export/test_export_training_ir_to_run_decomp.py index 6f780c1fec8..3cab41c6d09 100644 --- a/test/export/test_export_training_ir_to_run_decomp.py +++ b/test/export/test_export_training_ir_to_run_decomp.py @@ -5,9 +5,9 @@ import torch try: from . import test_export, testing except ImportError: - import test_export + import test_export # @manual=fbcode//caffe2/test:test_export-library - import testing + import testing # @manual=fbcode//caffe2/test:test_export-library test_classes = {} diff --git a/test/export/test_retraceability.py b/test/export/test_retraceability.py index d3f914188cc..e7f243fd9fb 100644 --- a/test/export/test_retraceability.py +++ b/test/export/test_retraceability.py @@ -3,8 +3,8 @@ try: from . import test_export, testing except ImportError: - import test_export - import testing + import test_export # @manual=fbcode//caffe2/test:test_export-library + import testing # @manual=fbcode//caffe2/test:test_export-library from torch.export import export diff --git a/test/export/test_serdes.py b/test/export/test_serdes.py index 59b83f22c3d..a1ced9dd4e5 100644 --- a/test/export/test_serdes.py +++ b/test/export/test_serdes.py @@ -6,8 +6,8 @@ import io try: from . import test_export, testing except ImportError: - import test_export - import testing + import test_export # @manual=fbcode//caffe2/test:test_export-library + import testing # @manual=fbcode//caffe2/test:test_export-library from torch.export import export, load, save diff --git a/test/export/testing.py b/test/export/testing.py index 054e3a611df..3647d4c9edd 100644 --- a/test/export/testing.py +++ b/test/export/testing.py @@ -226,7 +226,7 @@ def _make_fn_with_mocked_export(fn, mocked_export_fn): try: from . import test_export except ImportError: - import test_export + import test_export # @manual=fbcode//caffe2/test:test_export-library with patch(f"{test_export.__name__}.export", mocked_export_fn): return fn(*args, **kwargs) diff --git a/test/inductor/custom_ops.cpp b/test/inductor/custom_ops.cpp index 360a2d0b862..39c1098d95b 100644 --- a/test/inductor/custom_ops.cpp +++ b/test/inductor/custom_ops.cpp @@ -1,4 +1,4 @@ -#include +#include // @manual=fbcode//caffe2:libtorch #include #include diff --git a/test/inductor/test_aot_inductor.py b/test/inductor/test_aot_inductor.py index 2c85c488b2b..4dee53c4dc4 100644 --- a/test/inductor/test_aot_inductor.py +++ b/test/inductor/test_aot_inductor.py @@ -45,7 +45,7 @@ from torch.utils import _pytree as pytree if HAS_CUDA: - import triton + import triton # @manual from torch.testing._internal.triton_utils import ( add_kernel, @@ -76,14 +76,20 @@ try: ) from .test_torchinductor import copy_tests, requires_multigpu, TestFailure except ImportError: - from test_aot_inductor_utils import AOTIRunnerUtil - from test_control_flow import ( + from test_aot_inductor_utils import ( + AOTIRunnerUtil, # @manual=fbcode//caffe2/test/inductor:aot_inductor_utils-library + ) + from test_control_flow import ( # @manual=fbcode//caffe2/test/inductor:control_flow-library CondModels, prepend_counters, prepend_predicates, WhileLoopModels, ) - from test_torchinductor import copy_tests, requires_multigpu, TestFailure + from test_torchinductor import ( # @manual=fbcode//caffe2/test/inductor:test_inductor-library + copy_tests, + requires_multigpu, + TestFailure, + ) except (unittest.SkipTest, ImportError) as e: if __name__ == "__main__": sys.exit(0) diff --git a/test/inductor/test_aot_inductor_utils.py b/test/inductor/test_aot_inductor_utils.py index 27433876c9c..425e87bcfbb 100644 --- a/test/inductor/test_aot_inductor_utils.py +++ b/test/inductor/test_aot_inductor_utils.py @@ -67,7 +67,7 @@ class AOTIRunnerUtil: @staticmethod def load_runner(device, so_path): if IS_FBCODE: - from .fb import test_aot_inductor_model_runner_pybind + from .fb import test_aot_inductor_model_runner_pybind # @manual with tempfile.TemporaryDirectory() as temp_dir: # copy *.so file to a unique path just before loading diff --git a/test/inductor/test_benchmark_fusion.py b/test/inductor/test_benchmark_fusion.py index 706c2d9ae76..9eb25aa305a 100644 --- a/test/inductor/test_benchmark_fusion.py +++ b/test/inductor/test_benchmark_fusion.py @@ -20,7 +20,11 @@ sys.path.append(pytorch_test_dir) import contextlib import unittest -from inductor.test_torchinductor import check_model, check_model_cuda, copy_tests +from inductor.test_torchinductor import ( # @manual=fbcode//caffe2/test/inductor:test_inductor-library + check_model, + check_model_cuda, + copy_tests, +) from torch._inductor import config from torch._inductor.scheduler import Scheduler diff --git a/test/inductor/test_binary_folding.py b/test/inductor/test_binary_folding.py index a8b39c1bba0..20f613fc746 100644 --- a/test/inductor/test_binary_folding.py +++ b/test/inductor/test_binary_folding.py @@ -15,8 +15,14 @@ from torch.testing._internal.common_cuda import TEST_CUDNN pytorch_test_dir = os.path.dirname(os.path.dirname(os.path.realpath(__file__))) sys.path.append(pytorch_test_dir) -from inductor.test_inductor_freezing import TestCase -from inductor.test_torchinductor import check_model, check_model_gpu, copy_tests +from inductor.test_inductor_freezing import ( + TestCase, # @manual=fbcode//caffe2/test/inductor:inductor_freezing-library +) +from inductor.test_torchinductor import ( # @manual=fbcode//caffe2/test/inductor:test_inductor-library + check_model, + check_model_gpu, + copy_tests, +) from torch.testing._internal.common_utils import TEST_WITH_ASAN from torch.testing._internal.inductor_utils import skipCUDAIf diff --git a/test/inductor/test_ck_backend.py b/test/inductor/test_ck_backend.py index 94e53ad1b35..dc507e42aeb 100644 --- a/test/inductor/test_ck_backend.py +++ b/test/inductor/test_ck_backend.py @@ -43,7 +43,7 @@ class TestCKBackend(TestCase): torch.random.manual_seed(1234) try: - import ck4inductor + import ck4inductor # @manual self.ck_dir = os.path.dirname(ck4inductor.__file__) os.environ["TORCHINDUCTOR_CK_DIR"] = self.ck_dir diff --git a/test/inductor/test_combo_kernels.py b/test/inductor/test_combo_kernels.py index 0f997d01535..bccdacab2a6 100644 --- a/test/inductor/test_combo_kernels.py +++ b/test/inductor/test_combo_kernels.py @@ -20,7 +20,10 @@ try: try: from .test_torchinductor import check_model, check_model_cuda except ImportError: - from test_torchinductor import check_model, check_model_cuda + from test_torchinductor import ( # @manual=fbcode//caffe2/test/inductor:test_inductor-library + check_model, + check_model_cuda, + ) except (unittest.SkipTest, ImportError) as e: sys.stderr.write(f"{type(e)}: {e}\n") if __name__ == "__main__": diff --git a/test/inductor/test_compiled_optimizers.py b/test/inductor/test_compiled_optimizers.py index abd62fbd584..b7fde0bb9fa 100644 --- a/test/inductor/test_compiled_optimizers.py +++ b/test/inductor/test_compiled_optimizers.py @@ -270,7 +270,10 @@ try: try: from .test_torchinductor import check_model, check_model_gpu except ImportError: - from test_torchinductor import check_model, check_model_gpu + from test_torchinductor import ( # @manual=fbcode//caffe2/test/inductor:test_inductor-library + check_model, + check_model_gpu, + ) except (unittest.SkipTest, ImportError) as e: sys.stderr.write(f"{type(e)}: {e}\n") if __name__ == "__main__": @@ -835,7 +838,7 @@ class CompiledOptimizerTests(TestCase): try: from . import s429861_repro except ImportError: - import s429861_repro + import s429861_repro # @manual forward = s429861_repro.forward diff --git a/test/inductor/test_coordinate_descent_tuner.py b/test/inductor/test_coordinate_descent_tuner.py index dbe92859a08..bedb9a64727 100644 --- a/test/inductor/test_coordinate_descent_tuner.py +++ b/test/inductor/test_coordinate_descent_tuner.py @@ -12,7 +12,7 @@ from torch.testing._internal.inductor_utils import GPU_TYPE, HAS_GPU try: - import triton + import triton # @manual except ImportError: if __name__ == "__main__": sys.exit(0) diff --git a/test/inductor/test_cpu_cpp_wrapper.py b/test/inductor/test_cpu_cpp_wrapper.py index 8063d0545ac..5dfe5487799 100644 --- a/test/inductor/test_cpu_cpp_wrapper.py +++ b/test/inductor/test_cpu_cpp_wrapper.py @@ -28,11 +28,11 @@ try: test_torchinductor_dynamic_shapes, ) except ImportError: - import test_cpu_repro - import test_cpu_select_algorithm - import test_mkldnn_pattern_matcher - import test_torchinductor - import test_torchinductor_dynamic_shapes + import test_cpu_repro # @manual=fbcode//caffe2/test/inductor:test_cpu_repro-library + import test_cpu_select_algorithm # @manual=fbcode//caffe2/test/inductor:cpu_select_algorithm_cpu-library + import test_mkldnn_pattern_matcher # @manual + import test_torchinductor # @manual=fbcode//caffe2/test/inductor:test_inductor-library + import test_torchinductor_dynamic_shapes # @manual=fbcode//caffe2/test/inductor:test_inductor-library_dynamic_shapes except unittest.SkipTest: if __name__ == "__main__": sys.exit(0) diff --git a/test/inductor/test_cpu_repro.py b/test/inductor/test_cpu_repro.py index f90b9da1f11..bc65658a1e2 100644 --- a/test/inductor/test_cpu_repro.py +++ b/test/inductor/test_cpu_repro.py @@ -42,7 +42,7 @@ try: try: from . import test_torchinductor except ImportError: - import test_torchinductor + import test_torchinductor # @manual=fbcode//caffe2/test/inductor:test_inductor-library except unittest.SkipTest: if __name__ == "__main__": sys.exit(0) diff --git a/test/inductor/test_cpu_select_algorithm.py b/test/inductor/test_cpu_select_algorithm.py index 2034fa05653..320c51087b2 100644 --- a/test/inductor/test_cpu_select_algorithm.py +++ b/test/inductor/test_cpu_select_algorithm.py @@ -39,8 +39,8 @@ try: try: from . import test_cpu_repro, test_torchinductor except ImportError: - import test_cpu_repro - import test_torchinductor + import test_cpu_repro # @manual=fbcode//caffe2/test/inductor:test_cpu_repro-library + import test_torchinductor # @manual=fbcode//caffe2/test/inductor:test_inductor-library except unittest.SkipTest: if __name__ == "__main__": sys.exit(0) diff --git a/test/inductor/test_cuda_cpp_wrapper.py b/test/inductor/test_cuda_cpp_wrapper.py index 241bad352cc..2a01509b26c 100644 --- a/test/inductor/test_cuda_cpp_wrapper.py +++ b/test/inductor/test_cuda_cpp_wrapper.py @@ -26,11 +26,11 @@ try: except ImportError: import test_combo_kernels - import test_foreach - import test_pattern_matcher - import test_select_algorithm - import test_torchinductor - import test_torchinductor_dynamic_shapes + import test_foreach # @manual=fbcode//caffe2/test/inductor:foreach-library + import test_pattern_matcher # @manual=fbcode//caffe2/test/inductor:pattern_matcher-library + import test_select_algorithm # @manual=fbcode//caffe2/test/inductor:select_algorithm-library + import test_torchinductor # @manual=fbcode//caffe2/test/inductor:test_inductor-library + import test_torchinductor_dynamic_shapes # @manual=fbcode//caffe2/test/inductor:test_inductor-library_dynamic_shapes except unittest.SkipTest: if __name__ == "__main__": sys.exit(0) diff --git a/test/inductor/test_cuda_repro.py b/test/inductor/test_cuda_repro.py index 29653ffcda3..852b56e6326 100644 --- a/test/inductor/test_cuda_repro.py +++ b/test/inductor/test_cuda_repro.py @@ -38,15 +38,15 @@ from torch.testing._internal.inductor_utils import skipCUDAIf try: try: - import triton - from triton import language as tl + import triton # @manual + from triton import language as tl # @manual except ImportError: raise unittest.SkipTest("requires triton") # noqa: B904 try: from . import test_torchinductor except ImportError: - import test_torchinductor + import test_torchinductor # @manual=fbcode//caffe2/test/inductor:test_inductor-library except unittest.SkipTest: if __name__ == "__main__": sys.exit(0) diff --git a/test/inductor/test_cudagraph_trees_expandable_segments.py b/test/inductor/test_cudagraph_trees_expandable_segments.py index 9f57aa0d30a..aa1e85fd82d 100644 --- a/test/inductor/test_cudagraph_trees_expandable_segments.py +++ b/test/inductor/test_cudagraph_trees_expandable_segments.py @@ -18,12 +18,14 @@ if HAS_CUDA and not TEST_WITH_ASAN: try: from .test_cudagraph_trees import CudaGraphTreeTests except ImportError: - from test_cudagraph_trees import CudaGraphTreeTests # noqa: F401 + from test_cudagraph_trees import ( # noqa: F401 # @manual=fbcode//caffe2/test/inductor:cudagraph_trees-library + CudaGraphTreeTests, + ) REPO_ROOT = pathlib.Path(__file__).resolve().parent.parent.parent sys.path.insert(0, str(REPO_ROOT)) -from tools.stats.import_test_stats import get_disabled_tests +from tools.stats.import_test_stats import get_disabled_tests # @manual # Make sure to remove REPO_ROOT after import is done diff --git a/test/inductor/test_debug_trace.py b/test/inductor/test_debug_trace.py index 304cba00110..701d4e6cd9f 100644 --- a/test/inductor/test_debug_trace.py +++ b/test/inductor/test_debug_trace.py @@ -18,7 +18,7 @@ try: try: from . import test_torchinductor except ImportError: - import test_torchinductor + import test_torchinductor # @manual=fbcode//caffe2/test/inductor:test_inductor-library except unittest.SkipTest: if __name__ == "__main__": sys.exit(0) diff --git a/test/inductor/test_efficient_conv_bn_eval.py b/test/inductor/test_efficient_conv_bn_eval.py index c1864168272..90628a4c6a1 100644 --- a/test/inductor/test_efficient_conv_bn_eval.py +++ b/test/inductor/test_efficient_conv_bn_eval.py @@ -23,7 +23,9 @@ from torch.testing._internal.inductor_utils import HAS_CPU, HAS_CUDA importlib.import_module("functorch") importlib.import_module("filelock") -from inductor.test_torchinductor import copy_tests +from inductor.test_torchinductor import ( + copy_tests, # @manual=fbcode//caffe2/test/inductor:test_inductor-library +) class ConvOp(nn.Module): diff --git a/test/inductor/test_extension_backend.py b/test/inductor/test_extension_backend.py index 22fea818b56..6f972e46a1d 100644 --- a/test/inductor/test_extension_backend.py +++ b/test/inductor/test_extension_backend.py @@ -11,7 +11,7 @@ from torch._C import FileCheck try: - from extension_backends.cpp.extension_codegen_backend import ( + from extension_backends.cpp.extension_codegen_backend import ( # @manual=fbcode//caffe2/test/inductor/extension_backends:extension_codegen_backend # noqa: B950 ExtensionCppWrapperCodegen, ExtensionScheduling, ExtensionWrapperCodegen, @@ -38,7 +38,7 @@ try: try: from . import test_torchinductor except ImportError: - import test_torchinductor + import test_torchinductor # @manual=fbcode//caffe2/test/inductor:test_inductor-library except unittest.SkipTest: if __name__ == "__main__": sys.exit(0) diff --git a/test/inductor/test_foreach.py b/test/inductor/test_foreach.py index 5d30af0f79d..e1ba38af845 100644 --- a/test/inductor/test_foreach.py +++ b/test/inductor/test_foreach.py @@ -21,7 +21,10 @@ try: try: from .test_torchinductor import check_model, check_model_cuda except ImportError: - from test_torchinductor import check_model, check_model_cuda + from test_torchinductor import ( # @manual=fbcode//caffe2/test/inductor:test_inductor-library + check_model, + check_model_cuda, + ) except (unittest.SkipTest, ImportError) as e: sys.stderr.write(f"{type(e)}: {e}\n") if __name__ == "__main__": diff --git a/test/inductor/test_halide.py b/test/inductor/test_halide.py index 806d71b6605..a54a9d71ba8 100644 --- a/test/inductor/test_halide.py +++ b/test/inductor/test_halide.py @@ -27,7 +27,7 @@ if IS_WINDOWS and IS_CI: raise unittest.SkipTest("requires sympy/functorch/filelock") try: - import halide + import halide # @manual HAS_HALIDE = halide is not None except ImportError: @@ -37,7 +37,7 @@ except ImportError: try: from . import test_torchinductor except ImportError: - import test_torchinductor + import test_torchinductor # @manual=fbcode//caffe2/test/inductor:test_inductor-library make_halide = config.patch( diff --git a/test/inductor/test_inductor_freezing.py b/test/inductor/test_inductor_freezing.py index 9ea952ca02b..88f5530b578 100644 --- a/test/inductor/test_inductor_freezing.py +++ b/test/inductor/test_inductor_freezing.py @@ -23,7 +23,11 @@ from torch.testing._internal.common_utils import skipIfRocm pytorch_test_dir = os.path.dirname(os.path.dirname(os.path.realpath(__file__))) sys.path.append(pytorch_test_dir) -from inductor.test_torchinductor import check_model, check_model_cuda, copy_tests +from inductor.test_torchinductor import ( # @manual=fbcode//caffe2/test/inductor:test_inductor-library + check_model, + check_model_cuda, + copy_tests, +) from torch.testing._internal.common_utils import TEST_WITH_ASAN, TEST_WITH_ROCM diff --git a/test/inductor/test_inplacing_pass.py b/test/inductor/test_inplacing_pass.py index 309e7936671..280bcb25c37 100644 --- a/test/inductor/test_inplacing_pass.py +++ b/test/inductor/test_inplacing_pass.py @@ -46,8 +46,8 @@ def sin_cos(x: torch.Tensor, out_sin: torch.Tensor, out_cos: torch.Tensor) -> No if HAS_GPU: - import triton - import triton.language as tl + import triton # @manual + import triton.language as tl # @manual @triton.jit def sin_kernel( diff --git a/test/inductor/test_memory_planning.py b/test/inductor/test_memory_planning.py index df125324e89..d3e07670492 100644 --- a/test/inductor/test_memory_planning.py +++ b/test/inductor/test_memory_planning.py @@ -84,7 +84,9 @@ class TestMemoryPlanning(TestCase): try: from .test_aot_inductor import AOTIRunnerUtil except ImportError: - from test_aot_inductor import AOTIRunnerUtil + from test_aot_inductor import ( + AOTIRunnerUtil, # @manual=fbcode//caffe2/test/inductor:test_aot_inductor-library + ) f, args = self._generate(device="cuda") dim0_x = Dim("dim0_x", min=1, max=2048) diff --git a/test/inductor/test_perf.py b/test/inductor/test_perf.py index 302e246de62..7de94642f31 100644 --- a/test/inductor/test_perf.py +++ b/test/inductor/test_perf.py @@ -32,8 +32,8 @@ from torch.testing._internal.triton_utils import HAS_CUDA, requires_cuda if HAS_CUDA: - import triton - import triton.language as tl + import triton # @manual + import triton.language as tl # @manual from torch.testing._internal.triton_utils import add_kernel diff --git a/test/inductor/test_profiler.py b/test/inductor/test_profiler.py index e4af44c761d..016ee768f89 100644 --- a/test/inductor/test_profiler.py +++ b/test/inductor/test_profiler.py @@ -172,7 +172,7 @@ class DynamoProfilerTests(torch._inductor.test_case.TestCase): @unittest.skipIf(not HAS_TRITON, "requires cuda & triton") def test_inductor_profiling_triton_hooks(self): - from triton.compiler import CompiledKernel + from triton.compiler import CompiledKernel # @manual hooks_called = {"enter": False, "exit": False} diff --git a/test/inductor/test_torchinductor_codegen_dynamic_shapes.py b/test/inductor/test_torchinductor_codegen_dynamic_shapes.py index bfadb09344f..729d368a1e5 100644 --- a/test/inductor/test_torchinductor_codegen_dynamic_shapes.py +++ b/test/inductor/test_torchinductor_codegen_dynamic_shapes.py @@ -20,14 +20,14 @@ importlib.import_module("filelock") # Make the helper files in test/ importable pytorch_test_dir = os.path.dirname(os.path.dirname(os.path.realpath(__file__))) sys.path.append(pytorch_test_dir) -from inductor.test_torchinductor import ( +from inductor.test_torchinductor import ( # @manual=fbcode//caffe2/test/inductor:test_inductor-library CommonTemplate, copy_tests, run_and_get_cpp_code, run_and_get_triton_code, TestFailure, ) -from inductor.test_torchinductor_dynamic_shapes import ( +from inductor.test_torchinductor_dynamic_shapes import ( # @manual make_dynamic_cls, test_failures as dynamic_shapes_test_failures, ) diff --git a/test/inductor/test_torchinductor_dynamic_shapes.py b/test/inductor/test_torchinductor_dynamic_shapes.py index 2e631445919..5dee3e2956b 100644 --- a/test/inductor/test_torchinductor_dynamic_shapes.py +++ b/test/inductor/test_torchinductor_dynamic_shapes.py @@ -39,7 +39,7 @@ from torch.testing._internal.inductor_utils import GPU_TYPE, HAS_CPU, HAS_GPU # Make the helper files in test/ importable pytorch_test_dir = os.path.dirname(os.path.dirname(os.path.realpath(__file__))) sys.path.append(pytorch_test_dir) -from inductor.test_torchinductor import ( +from inductor.test_torchinductor import ( # @manual=fbcode//caffe2/test/inductor:test_inductor-library check_model, check_model_gpu, CommonTemplate, diff --git a/test/inductor/test_torchinductor_opinfo.py b/test/inductor/test_torchinductor_opinfo.py index fdac0cc9cbe..2281acee967 100644 --- a/test/inductor/test_torchinductor_opinfo.py +++ b/test/inductor/test_torchinductor_opinfo.py @@ -49,7 +49,10 @@ try: try: from .test_torchinductor import check_model, check_model_gpu except ImportError: - from test_torchinductor import check_model, check_model_gpu + from test_torchinductor import ( # @manual=fbcode//caffe2/test/inductor:test_inductor-library + check_model, + check_model_gpu, + ) except (unittest.SkipTest, ImportError) as e: sys.stderr.write(f"{type(e)}: {e}\n") if __name__ == "__main__": diff --git a/test/inductor/test_triton_extension_backend.py b/test/inductor/test_triton_extension_backend.py index b6e04bf9922..3d3fc29f3b3 100644 --- a/test/inductor/test_triton_extension_backend.py +++ b/test/inductor/test_triton_extension_backend.py @@ -10,8 +10,10 @@ import torch.utils.cpp_extension try: - from extension_backends.triton.device_interface import DeviceInterface - from extension_backends.triton.extension_codegen_backend import ( + from extension_backends.triton.device_interface import ( + DeviceInterface, # @manual=fbcode//caffe2/test/inductor/extension_backends:extension_codegen_backend + ) + from extension_backends.triton.extension_codegen_backend import ( # @manual=fbcode//caffe2/test/inductor/extension_backends:extension_codegen_backend # noqa: B950 CPUDeviceOpOverrides, ExtensionScheduling, ExtensionWrapperCodegen, @@ -41,7 +43,7 @@ try: try: from . import test_torchinductor except ImportError: - import test_torchinductor + import test_torchinductor # @manual=fbcode//caffe2/test/inductor:test_inductor-library except unittest.SkipTest: if __name__ == "__main__": sys.exit(0) diff --git a/test/inductor/test_triton_heuristics.py b/test/inductor/test_triton_heuristics.py index c4add081aa4..24f322dfebb 100644 --- a/test/inductor/test_triton_heuristics.py +++ b/test/inductor/test_triton_heuristics.py @@ -9,8 +9,8 @@ from torch.testing._internal.inductor_utils import GPU_TYPE, HAS_GPU try: - import triton # noqa: F401 - import triton.language as tl + import triton # noqa: F401 # @manual + import triton.language as tl # @manual except ImportError: if __name__ == "__main__": sys.exit(0) @@ -88,7 +88,7 @@ class TestTritonHeuristics(TestCase): self._test_artificial_zgrid() def _get_cos_kernel_caching_autotuner_args(self): - from triton.compiler.compiler import AttrsDescriptor + from triton.compiler.compiler import AttrsDescriptor # @manual @triton.jit def triton_(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): diff --git a/test/inductor/test_triton_kernels.py b/test/inductor/test_triton_kernels.py index 9d426926d7c..f0aea3f74e4 100644 --- a/test/inductor/test_triton_kernels.py +++ b/test/inductor/test_triton_kernels.py @@ -36,12 +36,12 @@ if HAS_GPU: if not TEST_WITH_ROCM: if HAS_CUDA: - from triton.language.extra.cuda.libdevice import ( + from triton.language.extra.cuda.libdevice import ( # @manual fast_dividef, fast_dividef as my_fast_dividef, ) elif HAS_XPU: - from triton.language.extra.intel.libdevice import ( + from triton.language.extra.intel.libdevice import ( # @manual fast_dividef, fast_dividef as my_fast_dividef, ) @@ -2543,8 +2543,8 @@ class CustomOpTests(torch._inductor.test_case.TestCase): @requires_gpu def test_capture_triton_disabled_in_triton_op(self): - import triton - import triton.language as tl + import triton # @manual + import triton.language as tl # @manual @triton.jit def add_kernel( diff --git a/test/inductor/test_xpu_basic.py b/test/inductor/test_xpu_basic.py index acc197c35f2..f4bf30e4f2d 100644 --- a/test/inductor/test_xpu_basic.py +++ b/test/inductor/test_xpu_basic.py @@ -20,7 +20,10 @@ importlib.import_module("filelock") pytorch_test_dir = os.path.dirname(os.path.dirname(os.path.realpath(__file__))) sys.path.append(pytorch_test_dir) -from inductor.test_torchinductor import check_model_gpu, TestCase +from inductor.test_torchinductor import ( # @manual=fbcode//caffe2/test/inductor:test_inductor-library + check_model_gpu, + TestCase, +) # TODO: Remove this file. diff --git a/test/profiler/test_cpp_thread.cpp b/test/profiler/test_cpp_thread.cpp index 09d2fb42a24..ce60d9c816c 100644 --- a/test/profiler/test_cpp_thread.cpp +++ b/test/profiler/test_cpp_thread.cpp @@ -1,5 +1,5 @@ -#include +#include // @manual #include #include diff --git a/test/profiler/test_cpp_thread.py b/test/profiler/test_cpp_thread.py index 109831b6634..5dd12277e18 100644 --- a/test/profiler/test_cpp_thread.py +++ b/test/profiler/test_cpp_thread.py @@ -26,7 +26,7 @@ def is_fbcode(): if is_fbcode(): - import caffe2.test.profiler_test_cpp_thread_lib as cpp + import caffe2.test.profiler_test_cpp_thread_lib as cpp # @manual=//caffe2/test:profiler_test_cpp_thread_lib else: # cpp extensions use relative paths. Those paths are relative to # this file, so we'll change the working directory temporarily diff --git a/test/profiler/test_cpp_thread_lib.pyi b/test/profiler/test_cpp_thread_lib.pyi new file mode 100644 index 00000000000..e69de29bb2d diff --git a/test/test_bundled_images.py b/test/test_bundled_images.py index c91814af31a..1919e1cd4fe 100644 --- a/test/test_bundled_images.py +++ b/test/test_bundled_images.py @@ -4,7 +4,7 @@ import io -import cv2 +import cv2 # @manual import torch import torch.utils.bundled_inputs diff --git a/test/test_cuda.py b/test/test_cuda.py index 2195e71d6ef..a8e35c1c9a3 100644 --- a/test/test_cuda.py +++ b/test/test_cuda.py @@ -3345,7 +3345,7 @@ class TestCudaMallocAsync(TestCase): @unittest.skipIf(IS_ARM64 or not IS_LINUX, "x86 linux only cpp unwinding") def test_direct_traceback(self): - from torch._C._profiler import gather_traceback, symbolize_tracebacks + from torch._C._profiler import gather_traceback, symbolize_tracebacks # @manual c = gather_traceback(True, True, True) (r,) = symbolize_tracebacks([c]) diff --git a/test/test_custom_ops.py b/test/test_custom_ops.py index 524095f741a..816b640eec8 100644 --- a/test/test_custom_ops.py +++ b/test/test_custom_ops.py @@ -20,7 +20,7 @@ from functorch import make_fx from torch import Tensor from torch._custom_op.impl import CustomOp, infer_schema from torch._library.infer_schema import tuple_to_list -from torch._utils_internal import get_file_path_2 +from torch._utils_internal import get_file_path_2 # @manual from torch.testing._internal import custom_op_db from torch.testing._internal.common_cuda import TEST_CUDA from torch.testing._internal.common_device_type import ( diff --git a/test/test_public_bindings.py b/test/test_public_bindings.py index 94d5f7804b6..e2d7d8236c6 100644 --- a/test/test_public_bindings.py +++ b/test/test_public_bindings.py @@ -10,7 +10,7 @@ import unittest from typing import Callable import torch -from torch._utils_internal import get_file_path_2 +from torch._utils_internal import get_file_path_2 # @manual from torch.testing._internal.common_utils import ( IS_JETSON, IS_MACOS,