[Inductor UT][Break XPU] Apply CUDA tolerances changes on XPU that introduced by #144579. (#149862)

Pull Request resolved: https://github.com/pytorch/pytorch/pull/149862
Approved by: https://github.com/desertfire, https://github.com/jansel
This commit is contained in:
xinan.lin 2025-04-10 16:38:48 -07:00 committed by PyTorch MergeBot
parent a22d3e778e
commit d186c933f8

View File

@ -540,7 +540,7 @@ inductor_override_kwargs["xpu"] = {
("linalg.vecdot", f16): {"atol": 1e-5, "rtol": 2e-2},
"log_normal": {"reference_in_float": True},
("logsumexp", f16): {"atol": 1e-5, "rtol": 1e-2},
("masked.cumprod", f16): {"atol": 1e-5, "rtol": 5e-2},
("masked.cumprod", f16): {"reference_in_float": True, "atol": 1e-5, "rtol": 5e-2},
("masked.cumsum", f16): {"atol": 1e-5, "rtol": 5e-3},
("masked.softmin", f16): {"atol": 1e-4, "rtol": 0.01},
("masked.softmax", f16): {"atol": 2e-4, "rtol": 0.01},
@ -590,7 +590,6 @@ inductor_override_kwargs["xpu"] = {
"rtol": 0.02,
},
("remainder", f16): {"atol": 1e-4, "rtol": 0.005},
("nn.functional.upsample_bilinear", f16): {"atol": 1e-5, "rtol": 0.002},
("sinc", f16): {"atol": 0.008, "rtol": 0.002},
("softmax", f16): {"atol": 1e-4, "rtol": 0.02},
("_softmax_backward_data", f16): {"atol": 0.008, "rtol": 0.002},
@ -623,11 +622,44 @@ inductor_override_kwargs["xpu"] = {
("index_reduce.amax", f32): {"check_gradient": False},
("index_reduce.amax", f16): {"check_gradient": False},
("tanh", f16): {"atol": 1e-4, "rtol": 1e-2},
("nn.functional.embedding_bag", f16): {"check_gradient": False},
("nn.functional.embedding_bag", f32): {"check_gradient": False},
("nn.functional.embedding_bag", f64): {"check_gradient": False},
("_unsafe_masked_index", f16): {"atol": 1e-5, "rtol": 2e-3},
("_unsafe_masked_index_put_accumulate", f16): {"atol": 1e-5, "rtol": 5e-3},
("_unsafe_masked_index", f16): {
"reference_in_float": True,
"atol": 3e-4,
"rtol": 2e-3,
},
("nn.functional.interpolate.linear", f16): {"reference_in_float": True},
("nn.functional.prelu", f16): {
"reference_in_float": True,
"atol": 1e-3,
"rtol": 4e-3,
},
("addmm", f16): {"reference_in_float": True},
("logaddexp", f16): {"reference_in_float": True},
("std_mean", f16): {"reference_in_float": True},
("hypot", f16): {"reference_in_float": True, "atol": 3e-4, "rtol": 2e-3},
("cummin", f16): {"reference_in_float": True, "atol": 5e-5, "rtol": 2e-3},
("unfold_copy", f16): {"reference_in_float": True, "atol": 2e-5, "rtol": 1e-2},
("nn.functional.upsample_bilinear", f16): {
"reference_in_float": True,
"atol": 1e-4,
"rtol": 2e-3,
},
("nn.functional.embedding_bag", f16): {
"check_gradient": False,
"atol": 1e-4,
"rtol": 1e-2,
},
("nn.functional.max_pool2d", f16): {
"reference_in_float": True,
"atol": 1e-4,
"rtol": 2e-3,
},
("nn.functional.unfold", f16): {
"reference_in_float": True,
},
}
# Test with one sample only for following ops