mirror of
https://github.com/zebrajr/pytorch.git
synced 2025-12-06 12:20:52 +01:00
[Proposal] Drop legacy CUDA support to slim down the wheels (#152069)
Proposal of dropping legacy CUDA support to slim down the Windows wheels. With the latest release of 2.7.0 and the new Blackwell support we've seen yet another rise in size to the wheel, going from ~2.5GB with Pytorch 2.6.0 all the way to ~3.1GB with pytorch 2.7.0 CUDA 12.8 on Python 3.12 and ~3.3GB with Python 3.13. Python 3.12, Pytorch 2.7.0 Cuda 12.8  Python 3.13, Pytorch 2.7.0, Cuda 12.8  These .CI changes should imply the removal of support for many GPUs which are now about 8 years old if not older, including GPUs like the GTX960M, 950M, 940M, 930M and some other Quadro GPUs all the way from april 2016 like Quadro M500M as per [Nvidia's Documentation](https://developer.nvidia.com/cuda-gpus). This change would also save on our bandwidth 😅 @seemethere Pull Request resolved: https://github.com/pytorch/pytorch/pull/152069 Approved by: https://github.com/seemethere, https://github.com/eqy, https://github.com/atalman
This commit is contained in:
parent
a811d3351b
commit
f38dae76ee
|
|
@ -37,7 +37,7 @@ IF "%CUDA_PATH_V124%"=="" (
|
|||
)
|
||||
|
||||
IF "%BUILD_VISION%" == "" (
|
||||
set TORCH_CUDA_ARCH_LIST=5.0;6.0;6.1;7.0;7.5;8.0;8.6;9.0
|
||||
set TORCH_CUDA_ARCH_LIST=6.1;7.0;7.5;8.0;8.6;9.0
|
||||
set TORCH_NVCC_FLAGS=-Xfatbin -compress-all
|
||||
) ELSE (
|
||||
set NVCC_FLAGS=-D__CUDA_NO_HALF_OPERATORS__ --expt-relaxed-constexpr -gencode=arch=compute_50,code=sm_50 -gencode=arch=compute_60,code=sm_60 -gencode=arch=compute_70,code=sm_70 -gencode=arch=compute_75,code=sm_75 -gencode=arch=compute_80,code=compute_80 -gencode=arch=compute_86,code=compute_86 -gencode=arch=compute_90,code=compute_90
|
||||
|
|
|
|||
|
|
@ -37,7 +37,7 @@ IF "%CUDA_PATH_V126%"=="" (
|
|||
)
|
||||
|
||||
IF "%BUILD_VISION%" == "" (
|
||||
set TORCH_CUDA_ARCH_LIST=5.0;6.0;6.1;7.0;7.5;8.0;8.6;9.0
|
||||
set TORCH_CUDA_ARCH_LIST=6.1;7.0;7.5;8.0;8.6;9.0
|
||||
set TORCH_NVCC_FLAGS=-Xfatbin -compress-all
|
||||
) ELSE (
|
||||
set NVCC_FLAGS=-D__CUDA_NO_HALF_OPERATORS__ --expt-relaxed-constexpr -gencode=arch=compute_50,code=sm_50 -gencode=arch=compute_60,code=sm_60 -gencode=arch=compute_70,code=sm_70 -gencode=arch=compute_75,code=sm_75 -gencode=arch=compute_80,code=compute_80 -gencode=arch=compute_86,code=compute_86 -gencode=arch=compute_90,code=compute_90
|
||||
|
|
|
|||
|
|
@ -37,7 +37,7 @@ IF "%CUDA_PATH_V128%"=="" (
|
|||
)
|
||||
|
||||
IF "%BUILD_VISION%" == "" (
|
||||
set TORCH_CUDA_ARCH_LIST=5.0;6.0;6.1;7.0;7.5;8.0;8.6;9.0;10.0;12.0
|
||||
set TORCH_CUDA_ARCH_LIST=6.1;7.0;7.5;8.0;8.6;9.0;10.0;12.0
|
||||
set TORCH_NVCC_FLAGS=-Xfatbin -compress-all
|
||||
) ELSE (
|
||||
set NVCC_FLAGS=-D__CUDA_NO_HALF_OPERATORS__ --expt-relaxed-constexpr -gencode=arch=compute_50,code=sm_50 -gencode=arch=compute_60,code=sm_60 -gencode=arch=compute_70,code=sm_70 -gencode=arch=compute_75,code=sm_75 -gencode=arch=compute_80,code=compute_80 -gencode=arch=compute_86,code=compute_86 -gencode=arch=compute_90,code=compute_90 -gencode=arch=compute_100,code=compute_100 -gencode=arch=compute_120,code=compute_120
|
||||
|
|
|
|||
Loading…
Reference in New Issue
Block a user