mirror of
https://github.com/zebrajr/pytorch.git
synced 2025-12-06 12:20:52 +01:00
[CUDA] Skip test on low vram machines (#156548)
I noticed some jobs error out after merging #155397 due to the test requiring >15GB GPU memory to execute and some of the machines it's running on has 8GB GPUs. This PR adds the skip option on those machines. CC: @eqy @ngimel Pull Request resolved: https://github.com/pytorch/pytorch/pull/156548 Approved by: https://github.com/eqy, https://github.com/malfet
This commit is contained in:
parent
e4ae60a413
commit
9e132b770e
|
|
@ -17,7 +17,7 @@ from unittest.mock import patch, MagicMock, ANY
|
|||
import math
|
||||
import itertools
|
||||
import torch.optim as optim
|
||||
from torch.testing._internal.common_device_type import instantiate_device_type_tests, onlyCUDA, onlyCPU
|
||||
from torch.testing._internal.common_device_type import instantiate_device_type_tests, onlyCUDA, onlyCPU, largeTensorTest
|
||||
from typing import Optional
|
||||
import torch.utils.cpp_extension
|
||||
from torch.testing._internal.common_nn import NNTestCase
|
||||
|
|
@ -1931,6 +1931,7 @@ class TestSDPAFailureModes(NNTestCase):
|
|||
attn_bias=None, compute_log_sumexp=True,
|
||||
dropout_p=0.01)
|
||||
|
||||
@largeTensorTest("15GB", "cuda")
|
||||
@onlyCUDA
|
||||
def test_mem_eff_attention_large_seq_len_uniform_attention(self):
|
||||
device = torch.device("cuda")
|
||||
|
|
|
|||
Loading…
Reference in New Issue
Block a user