mirror of
https://github.com/zebrajr/pytorch.git
synced 2025-12-06 00:20:18 +01:00
Change how cuda available memory is calculated in largeTensorTest decorator (#72207)
Summary:
Related PR https://github.com/pytorch/pytorch/issues/45332
Related discussion https://github.com/pytorch/pytorch/pull/45332#issuecomment-985996064
Pull Request resolved: https://github.com/pytorch/pytorch/pull/72207
Reviewed By: ngimel
Differential Revision: D34387921
Pulled By: mruberry
fbshipit-source-id: 2d842a25a5d3d1fc48917ba8fb29ff96d7bc2650
(cherry picked from commit 01a9e980c7)
This commit is contained in:
parent
491ee70e6e
commit
2051068233
|
|
@ -829,7 +829,10 @@ def _has_sufficient_memory(device, size):
|
|||
return False
|
||||
gc.collect()
|
||||
torch.cuda.empty_cache()
|
||||
return torch.cuda.get_device_properties(device).total_memory - torch.cuda.memory_allocated(device) >= size
|
||||
# torch.cuda.mem_get_info, aka cudaMemGetInfo, returns a tuple of (free memory, total memory) of a GPU
|
||||
if device == 'cuda':
|
||||
device = 'cuda:0'
|
||||
return torch.cuda.memory.mem_get_info(device)[0] >= size
|
||||
|
||||
if device == 'xla':
|
||||
raise unittest.SkipTest('TODO: Memory availability checks for XLA?')
|
||||
|
|
|
|||
Loading…
Reference in New Issue
Block a user