Revert "[CI] Move CUDA-11.6 to Python-3.10 configuration (#81233)"

This reverts commit 7ccf693cf6.

Reverted https://github.com/pytorch/pytorch/pull/81233 on behalf of https://github.com/janeyx99 due to this should have been reverted along with 81372 for breaking internal builds
This commit is contained in:
PyTorch MergeBot 2022-07-18 17:15:50 +00:00
parent 1233c3c256
commit c96485804f
7 changed files with 13 additions and 25 deletions

View File

@ -141,7 +141,7 @@ case "$image" in
pytorch-linux-bionic-cuda11.6-cudnn8-py3-gcc7)
CUDA_VERSION=11.6.2
CUDNN_VERSION=8
ANACONDA_PYTHON_VERSION=3.10
ANACONDA_PYTHON_VERSION=3.7
GCC_VERSION=7
PROTOBUF=yes
DB=yes

View File

@ -76,10 +76,7 @@ if [ -n "$ANACONDA_PYTHON_VERSION" ]; then
# DO NOT install cmake here as it would install a version newer than 3.13, but
# we want to pin to version 3.13.
CONDA_COMMON_DEPS="astunparse pyyaml mkl=2022.0.1 mkl-include=2022.0.1 setuptools cffi future six"
if [ "$ANACONDA_PYTHON_VERSION" = "3.10" ]; then
# Install llvm-8 as it is required to compile llvmlite-0.30.0 from source
conda_install numpy=1.21.2 ${CONDA_COMMON_DEPS} llvmdev=8.0.0
elif [ "$ANACONDA_PYTHON_VERSION" = "3.9" ]; then
if [ "$ANACONDA_PYTHON_VERSION" = "3.9" ]; then
# Install llvm-8 as it is required to compile llvmlite-0.30.0 from source
conda_install numpy=1.19.2 ${CONDA_COMMON_DEPS} llvmdev=8.0.0
elif [ "$ANACONDA_PYTHON_VERSION" = "3.8" ]; then

View File

@ -100,7 +100,6 @@ networkx==2.6.3
numba==0.49.0 ; python_version < "3.9"
numba==0.54.1 ; python_version == "3.9"
numba==0.55.2 ; python_version == "3.10"
#Description: Just-In-Time Compiler for Numerical Functions
#Pinned versions: 0.54.1, 0.49.0, <=0.49.1
#test that import: test_numba_integration.py
@ -179,8 +178,7 @@ scikit-image
#Pinned versions: 0.20.3
#test that import:
scipy==1.6.3 ; python_version < "3.10"
scipy==1.8.1 ; python_version == "3.10"
scipy==1.6.3
# Pin SciPy because of failing distribution tests (see #60347)
#Description: scientific python
#Pinned versions: 1.6.3

View File

@ -148,20 +148,20 @@ jobs:
{ config: "default", shard: 1, num_shards: 1, runner: "linux.2xlarge" },
]}
linux-bionic-cuda11_6-py3_10-gcc7-build:
name: linux-bionic-cuda11.6-py3.10-gcc7
linux-bionic-cuda11_6-py3_7-gcc7-build:
name: linux-bionic-cuda11.6-py3.7-gcc7
uses: ./.github/workflows/_linux-build.yml
with:
build-environment: linux-bionic-cuda11.6-py3.10-gcc7
build-environment: linux-bionic-cuda11.6-py3.7-gcc7
docker-image-name: pytorch-linux-bionic-cuda11.6-cudnn8-py3-gcc7
linux-bionic-cuda11_6-py3_10-gcc7-test:
name: linux-bionic-cuda11.6-py3.10-gcc7
linux-bionic-cuda11_6-py3_7-gcc7-test:
name: linux-bionic-cuda11.6-py3.7-gcc7
uses: ./.github/workflows/_linux-test.yml
needs: linux-bionic-cuda11_6-py3_10-gcc7-build
needs: linux-bionic-cuda11_6-py3_7-gcc7-build
with:
build-environment: linux-bionic-cuda11.6-py3.10-gcc7
docker-image: ${{ needs.linux-bionic-cuda11_6-py3_10-gcc7-build.outputs.docker-image }}
build-environment: linux-bionic-cuda11.6-py3.7-gcc7
docker-image: ${{ needs.linux-bionic-cuda11_6-py3_7-gcc7-build.outputs.docker-image }}
test-matrix: |
{ include: [
{ config: "default", shard: 1, num_shards: 4, runner: "linux.4xlarge.nvidia.gpu" },

View File

@ -75,9 +75,6 @@ class TestHash(JitTestCase):
self.checkScript(fn, (1.2345, float("inf")))
self.checkScript(fn, (float("inf"), float("inf")))
self.checkScript(fn, (1.2345, float('nan')))
if sys.version_info < (3, 10):
# Hash of two nans are not guaranteed to be equal. From https://docs.python.org/3/whatsnew/3.10.html :
# Hashes of NaN values of both float type and decimal.Decimal type now depend on object identity.
self.checkScript(fn, (float("nan"), float("nan")))
self.checkScript(fn, (float("nan"), float("inf")))

View File

@ -3280,7 +3280,6 @@ class TestFX(JitTestCase):
.run(scripted.code)
@unittest.skipIf(IS_WINDOWS, "Python Windows bug? https://bugs.python.org/issue45108")
@unittest.skipIf(sys.version_info >= (3, 10), "Does not work on Python-3.10")
def test_assert(self):
def f(x):
assert x > 1
@ -4056,7 +4055,7 @@ class TestFunctionalTracing(JitTestCase):
def functional_test(self):
if func_name in self.UNTRACEABLE_FUNCTIONALS_PY38 and \
sys.version_info >= (3, 8) and sys.version_info < (3, 11):
sys.version_info >= (3, 8) and sys.version_info < (3, 10):
exc, err = self.UNTRACEABLE_FUNCTIONALS_PY38[func_name]
with self.assertRaisesRegex(exc, err):
symbolic_trace(fn)

View File

@ -7442,9 +7442,6 @@ a")
# tensor from empty list is type float in python and annotated type in torchscript
if "annotate" in li and "dtype" not in option:
continue
# Skip unsigned tensor initializaton for signed values on 3.10
if sys.version_info[:2] >= (3, 10) and "torch.uint8" in option and "-" in li:
continue
code = tensor_template.format(list_create=li, tensor_op=op, options=option)
scope = {}
exec(code, globals(), scope)