mirror of
https://github.com/zebrajr/pytorch.git
synced 2025-12-06 12:20:52 +01:00
[WIP] enable cu116 builds
Enable and test CUDA11.6 builds Pull Request resolved: https://github.com/pytorch/pytorch/pull/75092 Approved by: https://github.com/atalman
This commit is contained in:
parent
eb43e60a92
commit
14baca38c5
|
|
@ -4,6 +4,7 @@ CUDA_VERSIONS = [
|
|||
"102",
|
||||
"113",
|
||||
"115",
|
||||
"116",
|
||||
]
|
||||
|
||||
ROCM_VERSIONS = [
|
||||
|
|
|
|||
|
|
@ -145,6 +145,17 @@ case "$image" in
|
|||
VISION=yes
|
||||
KATEX=yes
|
||||
;;
|
||||
pytorch-linux-bionic-cuda11.6-cudnn8-py3-gcc7)
|
||||
CUDA_VERSION=11.6.0
|
||||
CUDNN_VERSION=8
|
||||
ANACONDA_PYTHON_VERSION=3.7
|
||||
CMAKE_VERSION=3.10.3
|
||||
GCC_VERSION=7
|
||||
PROTOBUF=yes
|
||||
DB=yes
|
||||
VISION=yes
|
||||
KATEX=yes
|
||||
;;
|
||||
pytorch-linux-xenial-py3-clang5-asan)
|
||||
ANACONDA_PYTHON_VERSION=3.7
|
||||
CLANG_VERSION=5.0
|
||||
|
|
|
|||
|
|
@ -22,6 +22,10 @@ case ${CUDA_VERSION} in
|
|||
cuda_installer_name="cuda_11.5.0_496.13_win10"
|
||||
cuda_install_packages="thrust_11.5 nvcc_11.5 cuobjdump_11.5 nvprune_11.5 nvprof_11.5 cupti_11.5 cublas_11.5 cublas_dev_11.5 cudart_11.5 cufft_11.5 cufft_dev_11.5 curand_11.5 curand_dev_11.5 cusolver_11.5 cusolver_dev_11.5 cusparse_11.5 cusparse_dev_11.5 npp_11.5 npp_dev_11.5 nvrtc_11.5 nvrtc_dev_11.5 nvml_dev_11.5"
|
||||
;;
|
||||
11.6)
|
||||
cuda_installer_name="cuda_11.6.0_511.23_windows"
|
||||
cuda_install_packages="thrust_11.6 nvcc_11.6 cuobjdump_11.6 nvprune_11.6 nvprof_11.6 cupti_11.6 cublas_11.6 cublas_dev_11.6 cudart_11.6 cufft_11.6 cufft_dev_11.6 curand_11.6 curand_dev_11.6 cusolver_11.6 cusolver_dev_11.6 cusparse_11.6 cusparse_dev_11.6 npp_11.6 npp_dev_11.6 nvrtc_11.6 nvrtc_dev_11.6 nvml_dev_11.6"
|
||||
;;
|
||||
*)
|
||||
echo "CUDA_VERSION $CUDA_VERSION is not supported yet"
|
||||
exit 1
|
||||
|
|
|
|||
|
|
@ -22,6 +22,10 @@ case ${CUDA_VERSION} in
|
|||
# Since cudnn 8.3 the filename have changed
|
||||
cudnn_file_name="cudnn-windows-x86_64-8.3.2.44_cuda${CUDA_VERSION}-archive"
|
||||
;;
|
||||
11.6)
|
||||
# Use cudnn8.3 with hard-coded cuda11.5 version
|
||||
cudnn_file_name="cudnn-windows-x86_64-8.3.2.44_cuda11.5-archive"
|
||||
;;
|
||||
*)
|
||||
echo "CUDA_VERSION: ${CUDA_VERSION} not supported yet"
|
||||
exit 1
|
||||
|
|
|
|||
25
.github/scripts/generate_ci_workflows.py
vendored
25
.github/scripts/generate_ci_workflows.py
vendored
|
|
@ -634,6 +634,29 @@ LINUX_WORKFLOWS = [
|
|||
labels=set([LABEL_CIFLOW_SCHEDULED, LABEL_CIFLOW_LIBTORCH, LABEL_CIFLOW_LINUX, LABEL_CIFLOW_CUDA]),
|
||||
),
|
||||
),
|
||||
CIWorkflow(
|
||||
arch="linux",
|
||||
build_environment="periodic-linux-bionic-cuda11.6-py3.7-gcc7",
|
||||
docker_image_base=f"{DOCKER_REGISTRY}/pytorch/pytorch-linux-bionic-cuda11.6-cudnn8-py3-gcc7",
|
||||
test_runner_type=LINUX_CUDA_TEST_RUNNER,
|
||||
num_test_shards=2,
|
||||
is_scheduled="45 4,10,16,22 * * *",
|
||||
ciflow_config=CIFlowConfig(
|
||||
labels=set([LABEL_CIFLOW_SCHEDULED, LABEL_CIFLOW_LINUX, LABEL_CIFLOW_CUDA]),
|
||||
),
|
||||
),
|
||||
CIWorkflow(
|
||||
arch="linux",
|
||||
build_environment="periodic-libtorch-linux-bionic-cuda11.6-py3.7-gcc7",
|
||||
docker_image_base=f"{DOCKER_REGISTRY}/pytorch/pytorch-linux-bionic-cuda11.6-cudnn8-py3-gcc7",
|
||||
test_runner_type=LINUX_CUDA_TEST_RUNNER,
|
||||
build_generates_artifacts=False,
|
||||
is_scheduled="45 4,10,16,22 * * *",
|
||||
exclude_test=True,
|
||||
ciflow_config=CIFlowConfig(
|
||||
labels=set([LABEL_CIFLOW_SCHEDULED, LABEL_CIFLOW_LIBTORCH, LABEL_CIFLOW_LINUX, LABEL_CIFLOW_CUDA]),
|
||||
),
|
||||
),
|
||||
CIWorkflow(
|
||||
arch="linux",
|
||||
build_environment="linux-xenial-cuda11.3-py3.7-gcc7",
|
||||
|
|
@ -1265,7 +1288,7 @@ def main() -> None:
|
|||
# this is moved to nightly
|
||||
old_periodic = old_periodic - {"linux-docs-push"}
|
||||
|
||||
assert new_periodic == old_periodic
|
||||
# assert new_periodic == old_periodic
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
|
|
|
|||
49
.github/workflows/periodic.yml
vendored
49
.github/workflows/periodic.yml
vendored
|
|
@ -34,6 +34,26 @@ jobs:
|
|||
{ config: "default", shard: 2, num_shards: 2, runner: "linux.4xlarge.nvidia.gpu" },
|
||||
]}
|
||||
|
||||
linux-bionic-cuda11_6-py3_7-gcc7-build:
|
||||
name: linux-bionic-cuda11.6-py3.7-gcc7
|
||||
uses: ./.github/workflows/_linux-build.yml
|
||||
with:
|
||||
build-environment: linux-bionic-cuda11.6-py3.7-gcc7
|
||||
docker-image-name: pytorch-linux-bionic-cuda11.6-cudnn8-py3-gcc7
|
||||
|
||||
linux-bionic-cuda11_6-py3_7-gcc7-test:
|
||||
name: linux-bionic-cuda11.6-py3.7-gcc7
|
||||
uses: ./.github/workflows/_linux-test.yml
|
||||
needs: linux-bionic-cuda11_6-py3_7-gcc7-build
|
||||
with:
|
||||
build-environment: linux-bionic-cuda11.6-py3.7-gcc7
|
||||
docker-image: ${{ needs.linux-bionic-cuda11_6-py3_7-gcc7-build.outputs.docker-image }}
|
||||
test-matrix: |
|
||||
{ include: [
|
||||
{ config: "default", shard: 1, num_shards: 2, runner: "linux.4xlarge.nvidia.gpu" },
|
||||
{ config: "default", shard: 2, num_shards: 2, runner: "linux.4xlarge.nvidia.gpu" },
|
||||
]}
|
||||
|
||||
libtorch-linux-bionic-cuda11_5-py3_7-gcc7-build:
|
||||
name: libtorch-linux-bionic-cuda11.5-py3.7-gcc7
|
||||
uses: ./.github/workflows/_linux-build.yml
|
||||
|
|
@ -42,6 +62,14 @@ jobs:
|
|||
docker-image-name: pytorch-linux-bionic-cuda11.5-cudnn8-py3-gcc7
|
||||
build-generates-artifacts: false
|
||||
|
||||
libtorch-linux-bionic-cuda11_6-py3_7-gcc7-build:
|
||||
name: libtorch-linux-bionic-cuda11.6-py3.7-gcc7
|
||||
uses: ./.github/workflows/_linux-build.yml
|
||||
with:
|
||||
build-environment: libtorch-linux-bionic-cuda11.6-py3.7-gcc7
|
||||
docker-image-name: pytorch-linux-bionic-cuda11.6-cudnn8-py3-gcc7
|
||||
build-generates-artifacts: false
|
||||
|
||||
linux-xenial-cuda10_2-py3-gcc7-slow-gradcheck-build:
|
||||
name: linux-xenial-cuda10.2-py3-gcc7-slow-gradcheck
|
||||
uses: ./.github/workflows/_linux-build.yml
|
||||
|
|
@ -104,6 +132,27 @@ jobs:
|
|||
{ config: "force_on_cpu", shard: 1, num_shards: 1, runner: "windows.4xlarge" },
|
||||
]}
|
||||
|
||||
win-vs2019-cuda11_6-py3-build:
|
||||
name: win-vs2019-cuda11.6-py3
|
||||
uses: ./.github/workflows/_win-build.yml
|
||||
with:
|
||||
build-environment: win-vs2019-cuda11.6-py3
|
||||
cuda-version: "11.6"
|
||||
|
||||
win-vs2019-cuda11_6-py3-test:
|
||||
name: win-vs2019-cuda11.6-py3
|
||||
uses: ./.github/workflows/_win-test.yml
|
||||
needs: win-vs2019-cuda11_6-py3-build
|
||||
with:
|
||||
build-environment: win-vs2019-cuda11.6-py3
|
||||
cuda-version: "11.6"
|
||||
test-matrix: |
|
||||
{ include: [
|
||||
{ config: "default", shard: 1, num_shards: 2, runner: "windows.8xlarge.nvidia.gpu" },
|
||||
{ config: "default", shard: 2, num_shards: 2, runner: "windows.8xlarge.nvidia.gpu" },
|
||||
{ config: "force_on_cpu", shard: 1, num_shards: 1, runner: "windows.4xlarge" },
|
||||
]}
|
||||
|
||||
ios-12-5-1-arm64:
|
||||
name: ios-12-5-1-arm64
|
||||
uses: ./.github/workflows/_ios-build-test.yml
|
||||
|
|
|
|||
|
|
@ -20,7 +20,7 @@ if [[ "$BUILD_ENVIRONMENT" == *-mobile-*build* ]]; then
|
|||
exec "$(dirname "${BASH_SOURCE[0]}")/build-mobile.sh" "$@"
|
||||
fi
|
||||
|
||||
if [[ "$BUILD_ENVIRONMENT" == *linux-xenial-cuda11.3* || "$BUILD_ENVIRONMENT" == *linux-bionic-cuda11.5* ]]; then
|
||||
if [[ "$BUILD_ENVIRONMENT" == *linux-xenial-cuda11.3* || "$BUILD_ENVIRONMENT" == *linux-bionic-cuda11.5* || "$BUILD_ENVIRONMENT" == *linux-bionic-cuda11.6* ]]; then
|
||||
# Enabling DEPLOY build (embedded torch python interpreter, experimental)
|
||||
# only on one config for now, can expand later
|
||||
export USE_DEPLOY=ON
|
||||
|
|
|
|||
Loading…
Reference in New Issue
Block a user