[Fix XPU CI][Inductor UT] Fix test cases broken by community. (#160403)

Fixes #160243, Fixes #160244, Fixes #160245

Pull Request resolved: https://github.com/pytorch/pytorch/pull/160403
Approved by: https://github.com/janeyx99
This commit is contained in:
xinan.lin 2025-08-12 00:38:40 -07:00 committed by PyTorch MergeBot
parent a354fa91e2
commit 5a9c4cfce4
2 changed files with 13 additions and 4 deletions

View File

@ -26,6 +26,7 @@ from torch.testing._internal.common_device_type import (
OpDTypes,
ops,
skipCPUIf,
skipCUDAIf,
skipXPUIf,
)
from torch.testing._internal.common_methods_invocations import op_db, skipOps
@ -45,11 +46,11 @@ from torch.testing._internal.common_utils import (
from torch.testing._internal.inductor_utils import (
GPU_TYPE,
HAS_CPU,
HAS_CUDA_AND_TRITON,
has_triton,
HAS_XPU_AND_TRITON,
maybe_skip_size_asserts,
)
from torch.testing._internal.triton_utils import requires_cuda_and_triton
from torch.utils._dtype_abbrs import dtype_abbrs
from torch.utils._python_dispatch import TorchDispatchMode
from torch.utils._pytree import tree_map
@ -682,6 +683,14 @@ inductor_override_kwargs["xpu"] = {
("nn.functional.unfold", f16): {
"reference_in_float": True,
},
# Reference crash on Intel LTS2 driver.
("nn.functional.interpolate.trilinear", f32): {
"check_gradient": False,
},
# Reference crash on Intel LTS2 driver.
("nn.functional.interpolate.trilinear", f64): {
"check_gradient": False,
},
}
if TEST_WITH_ROCM:
inductor_override_kwargs["cuda"].update(
@ -1125,7 +1134,7 @@ class TestInductorOpInfo(TestCase):
@skipCUDAMemoryLeakCheckIf(
True
) # inductor kernels failing this test intermittently
@requires_cuda_and_triton
@skipCUDAIf(not HAS_CUDA_AND_TRITON, "Skipped! Triton not found")
@skipXPUIf(
not HAS_XPU_AND_TRITON, "Skipped! Supported XPU compiler and Triton not found"
)

View File

@ -7078,10 +7078,10 @@ class DeviceCopy(ExternKernelOut):
# x.get_stride() may be unimplemented if x's size is empty
stride = x.get_stride()
is_destination_pinned = (
x_device.type == "cuda" and device.type == "cpu" and non_blocking
is_gpu(x_device.type) and device.type == "cpu" and non_blocking
)
is_source_pinned = (
x_device.type == "cpu" and device.type == "cuda" and non_blocking
x_device.type == "cpu" and is_gpu(device.type) and non_blocking
)
if is_source_pinned and is_storage_and_layout(x):
x.get_layout().is_pinned = True