diff --git a/torch/testing/_internal/distributed/distributed_test.py b/torch/testing/_internal/distributed/distributed_test.py index df55573c16b..5bf0152630f 100644 --- a/torch/testing/_internal/distributed/distributed_test.py +++ b/torch/testing/_internal/distributed/distributed_test.py @@ -4064,8 +4064,8 @@ class DistributedTest: dist.barrier() @sandcastle_skip_if( - BACKEND not in DistTestCases.backend_feature["ddp"], - f"The {BACKEND} backend does not support DistributedDataParallel" + BACKEND == "nccl", + "Issues with async error handling, see https://github.com/pytorch/pytorch/issues/73259" ) @skip_if_lt_x_gpu(2) @skip_if_rocm @@ -4092,8 +4092,8 @@ class DistributedTest: ) @sandcastle_skip_if( - BACKEND not in DistTestCases.backend_feature["ddp"], - f"The {BACKEND} backend does not support DistributedDataParallel" + BACKEND == "nccl", + "Issues with async error handling, see https://github.com/pytorch/pytorch/issues/73259" ) @skip_if_lt_x_gpu(2) @skip_if_rocm @@ -4113,8 +4113,8 @@ class DistributedTest: ) @sandcastle_skip_if( - BACKEND not in DistTestCases.backend_feature["ddp"], - f"The {BACKEND} backend does not support DistributedDataParallel" + BACKEND == "nccl", + "Issues with async error handling, see https://github.com/pytorch/pytorch/issues/73259" ) @skip_if_lt_x_gpu(2) @skip_if_rocm