mirror of
https://github.com/zebrajr/pytorch.git
synced 2025-12-06 12:20:52 +01:00
[inductor] Change minimum number of SMs to 60 to let Ada use Triton GEMM backend (#150888)
context: https://github.com/pytorch/pytorch/issues/150390#issuecomment-2790272814 Pull Request resolved: https://github.com/pytorch/pytorch/pull/150888 Approved by: https://github.com/jansel
This commit is contained in:
parent
4161c752bb
commit
115a165f9b
|
|
@ -1275,7 +1275,7 @@ def is_big_gpu(index_or_device: Union[int, torch.device] = 0) -> bool:
|
|||
return False
|
||||
return True
|
||||
|
||||
min_sms = 16 if device.type == "xpu" else 68 # 3080
|
||||
min_sms = 16 if device.type == "xpu" else 60 # 3080
|
||||
avail_sms = prop.multi_processor_count
|
||||
if avail_sms < min_sms:
|
||||
log.warning(
|
||||
|
|
|
|||
Loading…
Reference in New Issue
Block a user