mirror of
https://github.com/zebrajr/pytorch.git
synced 2025-12-06 12:20:52 +01:00
[CI][CUDA][Distributed]Update test_composability.py (#148578)
world_size = int(os.getenv("WORLD_SIZE", 4)) in subsequent lines indicate the tests in this file do not only require > 1 GPU, but at least 4 GPUs. skip_if_lt_x_gpu(4) does not properly skip this on a platform with 2 GPUs.
skip_if_lt_x_gpu being broken, potentially related to a similar issue: https://github.com/pytorch/pytorch/issues/146094
Pull Request resolved: https://github.com/pytorch/pytorch/pull/148578
Approved by: https://github.com/atalman
This commit is contained in:
parent
786422a4d7
commit
cc2decdb25
|
|
@ -385,7 +385,7 @@ if __name__ == "__main__":
|
|||
if not (
|
||||
dist.is_available()
|
||||
and dist.is_nccl_available()
|
||||
and torch.cuda.device_count() > 1
|
||||
and torch.cuda.device_count() > 3
|
||||
):
|
||||
print(
|
||||
"c10d NCCL not available or not enough GPUs, skipping tests",
|
||||
|
|
|
|||
Loading…
Reference in New Issue
Block a user