diff --git a/test/inductor/test_aot_inductor_windows.py b/test/inductor/test_aot_inductor_windows.py deleted file mode 100644 index 5a22bf7bb76..00000000000 --- a/test/inductor/test_aot_inductor_windows.py +++ /dev/null @@ -1,74 +0,0 @@ -# Owner(s): ["module: inductor"] -import tempfile -import unittest -import zipfile - -import torch -import torch._inductor.config -from torch._environment import is_fbcode -from torch._inductor.test_case import TestCase -from torch.testing._internal.common_utils import IS_CI -from torch.testing._internal.inductor_utils import HAS_GPU, requires_gpu - - -class Simple(torch.nn.Module): - def __init__(self): - super().__init__() - self.fc1 = torch.nn.Linear(10, 16) - self.relu = torch.nn.ReLU() - self.fc2 = torch.nn.Linear(16, 1) - self.sigmoid = torch.nn.Sigmoid() - - def forward(self, x): - x = self.fc1(x) - x = self.relu(x) - x = self.fc2(x) - x = self.sigmoid(x) - return x - - -class TestAOTInductorWindowsCrossCompilation(TestCase): - @requires_gpu() - def test_simple_so(self): - if is_fbcode() or IS_CI: - raise unittest.SkipTest("requires x86_64-w64-mingw32-gcc") - - # TODO: enable in CI - with torch.no_grad(): - device = "cuda" - model = Simple().to(device=device) - example_inputs = (torch.randn(8, 10, device=device),) - batch_dim = torch.export.Dim("batch", min=1, max=1024) - exported = torch.export.export( - model, example_inputs, dynamic_shapes={"x": {0: batch_dim}} - ) - package_path = torch._inductor.aoti_compile_and_package( - exported, - inductor_configs={ - "aot_inductor.model_name_for_generated_files": "model", - "aot_inductor.cross_target_platform": "windows", - "aot_inductor.link_libtorch": False, - # TODO: need to add aoti_shim_library_path for CI - "aot_inductor.aoti_shim_library": "executorch", - # no fallback ops - "max_autotune": True, - "max_autotune_gemm_backends": "TRITON,CPP", - "max_autotune_conv_backends": "TRITON,CPP", - "aot_inductor.embed_kernel_binary": True, - # simplify things for now - "aot_inductor.precompile_headers": False, - "aot_inductor.package_constants_on_disk_format": "binary_blob", - "aot_inductor.package_constants_in_so": False, - }, - ) - - with tempfile.TemporaryDirectory() as tmpdir: - with zipfile.ZipFile(package_path, "r") as zf: - zf.extractall(tmpdir) - - -if __name__ == "__main__": - from torch._inductor.test_case import run_tests - - if HAS_GPU: - run_tests(needs="filelock") diff --git a/test/inductor/test_aoti_cross_compile_windows.py b/test/inductor/test_aoti_cross_compile_windows.py index 04065add908..15904a8d9dc 100644 --- a/test/inductor/test_aoti_cross_compile_windows.py +++ b/test/inductor/test_aoti_cross_compile_windows.py @@ -9,6 +9,7 @@ from typing import Any, Optional import torch import torch._inductor.config +from torch._environment import is_fbcode from torch._inductor.test_case import TestCase from torch.testing._internal.inductor_utils import GPU_TYPE, HAS_GPU, requires_gpu @@ -77,6 +78,9 @@ class WindowsCrossCompilationTestFramework: "This test should run on Linux for cross-compilation" ) + if is_fbcode(): + raise unittest.SkipTest("requires x86_64-w64-mingw32-gcc") + self.assertTrue("WINDOWS_CUDA_HOME" in os.environ) with torch.no_grad(): @@ -128,6 +132,9 @@ class WindowsCrossCompilationTestFramework: if platform.system() != "Windows": raise unittest.SkipTest("This test should run on Windows") + if is_fbcode(): + raise unittest.SkipTest("requires x86_64-w64-mingw32-gcc") + if not HAS_GPU: raise unittest.SkipTest("Test requires GPU")