mirror of
https://github.com/zebrajr/pytorch.git
synced 2025-12-06 12:20:52 +01:00
[inductor] Mark / restrict tests that only work if ATen is used for matmul (#166518)
These tests only work if max_autotune=False (default), which for matmul means falling back to ATen. This PR just documents / makes that transparent. Pull Request resolved: https://github.com/pytorch/pytorch/pull/166518 Approved by: https://github.com/eellison
This commit is contained in:
parent
c01636e1bc
commit
b0831930ed
|
|
@ -6474,7 +6474,11 @@ class CommonTemplate:
|
|||
# Constant folding was explicitly turned off due to issue #108388
|
||||
# Turn it back on for test
|
||||
@unittest.skipIf(config.triton.native_matmul, "native matmul has better precision")
|
||||
@torch._inductor.config.patch(joint_graph_constant_folding=True)
|
||||
@torch._inductor.config.patch(
|
||||
joint_graph_constant_folding=True,
|
||||
# Numerical accuracy failure for triton fp16
|
||||
max_autotune_gemm_backends="ATEN",
|
||||
)
|
||||
def test_remove_no_ops(self):
|
||||
def matmul_with_op(x, y, fn):
|
||||
return fn(x @ y)
|
||||
|
|
@ -6902,7 +6906,11 @@ def forward(self, arg0_1: "Sym(s77)", arg1_1: "Sym(s27)", arg2_1: "Sym(s53)", ar
|
|||
_, (code0, code1) = _run_and_get_stripped_kernels(b, x)
|
||||
self.assertEqual(code0, code1)
|
||||
|
||||
@config.patch(force_disable_caches=True)
|
||||
@config.patch(
|
||||
force_disable_caches=True,
|
||||
# Test expects a single (fused) kernel to be generated
|
||||
max_autotune_gemm_backends="ATEN",
|
||||
)
|
||||
@skip_if_cpp_wrapper("run_and_get_kernels issue")
|
||||
@unittest.skipIf(config.triton.native_matmul, "matmul is now generated")
|
||||
def test_deterministic_codegen_with_suffix(self):
|
||||
|
|
@ -14154,6 +14162,8 @@ def forward(self, arg0_1: "Sym(s77)", arg1_1: "Sym(s27)", arg2_1: "Sym(s53)", ar
|
|||
code_disallowed = re.sub(r"AOT ID: .*", "AOT ID: ['test']", code_disallowed)
|
||||
return code_allowed != code_disallowed
|
||||
|
||||
# If matmul is implemented by triton there is more reuse
|
||||
@config.patch(max_autotune_gemm_backends="ATEN")
|
||||
@unittest.skipIf(config.triton.native_matmul, "matmul is now generated")
|
||||
def test_allow_reuse_disable_if_exceed_peak(self):
|
||||
@torch.compile
|
||||
|
|
|
|||
Loading…
Reference in New Issue
Block a user