mirror of
https://github.com/zebrajr/pytorch.git
synced 2025-12-06 12:20:52 +01:00
Remove AOTI cross compilation time from internal CI (#165935)
Summary: as title Test Plan: CI Differential Revision: D85088451 Pull Request resolved: https://github.com/pytorch/pytorch/pull/165935 Approved by: https://github.com/desertfire
This commit is contained in:
parent
3dfd0c7584
commit
c40048472c
|
|
@ -1,74 +0,0 @@
|
|||
# Owner(s): ["module: inductor"]
|
||||
import tempfile
|
||||
import unittest
|
||||
import zipfile
|
||||
|
||||
import torch
|
||||
import torch._inductor.config
|
||||
from torch._environment import is_fbcode
|
||||
from torch._inductor.test_case import TestCase
|
||||
from torch.testing._internal.common_utils import IS_CI
|
||||
from torch.testing._internal.inductor_utils import HAS_GPU, requires_gpu
|
||||
|
||||
|
||||
class Simple(torch.nn.Module):
|
||||
def __init__(self):
|
||||
super().__init__()
|
||||
self.fc1 = torch.nn.Linear(10, 16)
|
||||
self.relu = torch.nn.ReLU()
|
||||
self.fc2 = torch.nn.Linear(16, 1)
|
||||
self.sigmoid = torch.nn.Sigmoid()
|
||||
|
||||
def forward(self, x):
|
||||
x = self.fc1(x)
|
||||
x = self.relu(x)
|
||||
x = self.fc2(x)
|
||||
x = self.sigmoid(x)
|
||||
return x
|
||||
|
||||
|
||||
class TestAOTInductorWindowsCrossCompilation(TestCase):
|
||||
@requires_gpu()
|
||||
def test_simple_so(self):
|
||||
if is_fbcode() or IS_CI:
|
||||
raise unittest.SkipTest("requires x86_64-w64-mingw32-gcc")
|
||||
|
||||
# TODO: enable in CI
|
||||
with torch.no_grad():
|
||||
device = "cuda"
|
||||
model = Simple().to(device=device)
|
||||
example_inputs = (torch.randn(8, 10, device=device),)
|
||||
batch_dim = torch.export.Dim("batch", min=1, max=1024)
|
||||
exported = torch.export.export(
|
||||
model, example_inputs, dynamic_shapes={"x": {0: batch_dim}}
|
||||
)
|
||||
package_path = torch._inductor.aoti_compile_and_package(
|
||||
exported,
|
||||
inductor_configs={
|
||||
"aot_inductor.model_name_for_generated_files": "model",
|
||||
"aot_inductor.cross_target_platform": "windows",
|
||||
"aot_inductor.link_libtorch": False,
|
||||
# TODO: need to add aoti_shim_library_path for CI
|
||||
"aot_inductor.aoti_shim_library": "executorch",
|
||||
# no fallback ops
|
||||
"max_autotune": True,
|
||||
"max_autotune_gemm_backends": "TRITON,CPP",
|
||||
"max_autotune_conv_backends": "TRITON,CPP",
|
||||
"aot_inductor.embed_kernel_binary": True,
|
||||
# simplify things for now
|
||||
"aot_inductor.precompile_headers": False,
|
||||
"aot_inductor.package_constants_on_disk_format": "binary_blob",
|
||||
"aot_inductor.package_constants_in_so": False,
|
||||
},
|
||||
)
|
||||
|
||||
with tempfile.TemporaryDirectory() as tmpdir:
|
||||
with zipfile.ZipFile(package_path, "r") as zf:
|
||||
zf.extractall(tmpdir)
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
from torch._inductor.test_case import run_tests
|
||||
|
||||
if HAS_GPU:
|
||||
run_tests(needs="filelock")
|
||||
|
|
@ -9,6 +9,7 @@ from typing import Any, Optional
|
|||
|
||||
import torch
|
||||
import torch._inductor.config
|
||||
from torch._environment import is_fbcode
|
||||
from torch._inductor.test_case import TestCase
|
||||
from torch.testing._internal.inductor_utils import GPU_TYPE, HAS_GPU, requires_gpu
|
||||
|
||||
|
|
@ -77,6 +78,9 @@ class WindowsCrossCompilationTestFramework:
|
|||
"This test should run on Linux for cross-compilation"
|
||||
)
|
||||
|
||||
if is_fbcode():
|
||||
raise unittest.SkipTest("requires x86_64-w64-mingw32-gcc")
|
||||
|
||||
self.assertTrue("WINDOWS_CUDA_HOME" in os.environ)
|
||||
|
||||
with torch.no_grad():
|
||||
|
|
@ -128,6 +132,9 @@ class WindowsCrossCompilationTestFramework:
|
|||
if platform.system() != "Windows":
|
||||
raise unittest.SkipTest("This test should run on Windows")
|
||||
|
||||
if is_fbcode():
|
||||
raise unittest.SkipTest("requires x86_64-w64-mingw32-gcc")
|
||||
|
||||
if not HAS_GPU:
|
||||
raise unittest.SkipTest("Test requires GPU")
|
||||
|
||||
|
|
|
|||
Loading…
Reference in New Issue
Block a user