mirror of
https://github.com/zebrajr/pytorch.git
synced 2025-12-06 12:20:52 +01:00
[Reland] Add py-3.10 config (#82329)
This is a re-land of #81372 and #81233 with the exception that it does not force the range-checks on older Python runtime versions and as such should not affect the internal workloads, which were the reason for revert, see https://github.com/pytorch/pytorch/pull/81372#issuecomment-1187516464 - [Py3.10] Allow floats to be imported as Long (#81372) - [CI] Move CUDA-11.6 to Python-3.10 configuration (#81233) - Don't do anything about range checks for pre-py3.10 Pull Request resolved: https://github.com/pytorch/pytorch/pull/82329 Approved by: https://github.com/kit1980
This commit is contained in:
parent
3b6b27e9d7
commit
d80fe49de0
|
|
@ -141,7 +141,7 @@ case "$image" in
|
||||||
pytorch-linux-bionic-cuda11.6-cudnn8-py3-gcc7)
|
pytorch-linux-bionic-cuda11.6-cudnn8-py3-gcc7)
|
||||||
CUDA_VERSION=11.6.2
|
CUDA_VERSION=11.6.2
|
||||||
CUDNN_VERSION=8
|
CUDNN_VERSION=8
|
||||||
ANACONDA_PYTHON_VERSION=3.7
|
ANACONDA_PYTHON_VERSION=3.10
|
||||||
GCC_VERSION=7
|
GCC_VERSION=7
|
||||||
PROTOBUF=yes
|
PROTOBUF=yes
|
||||||
DB=yes
|
DB=yes
|
||||||
|
|
|
||||||
|
|
@ -76,7 +76,10 @@ if [ -n "$ANACONDA_PYTHON_VERSION" ]; then
|
||||||
# DO NOT install cmake here as it would install a version newer than 3.13, but
|
# DO NOT install cmake here as it would install a version newer than 3.13, but
|
||||||
# we want to pin to version 3.13.
|
# we want to pin to version 3.13.
|
||||||
CONDA_COMMON_DEPS="astunparse pyyaml mkl=2022.0.1 mkl-include=2022.0.1 setuptools cffi future six"
|
CONDA_COMMON_DEPS="astunparse pyyaml mkl=2022.0.1 mkl-include=2022.0.1 setuptools cffi future six"
|
||||||
if [ "$ANACONDA_PYTHON_VERSION" = "3.9" ]; then
|
if [ "$ANACONDA_PYTHON_VERSION" = "3.10" ]; then
|
||||||
|
# Install llvm-8 as it is required to compile llvmlite-0.30.0 from source
|
||||||
|
conda_install numpy=1.21.2 ${CONDA_COMMON_DEPS} llvmdev=8.0.0
|
||||||
|
elif [ "$ANACONDA_PYTHON_VERSION" = "3.9" ]; then
|
||||||
# Install llvm-8 as it is required to compile llvmlite-0.30.0 from source
|
# Install llvm-8 as it is required to compile llvmlite-0.30.0 from source
|
||||||
conda_install numpy=1.19.2 ${CONDA_COMMON_DEPS} llvmdev=8.0.0
|
conda_install numpy=1.19.2 ${CONDA_COMMON_DEPS} llvmdev=8.0.0
|
||||||
elif [ "$ANACONDA_PYTHON_VERSION" = "3.8" ]; then
|
elif [ "$ANACONDA_PYTHON_VERSION" = "3.8" ]; then
|
||||||
|
|
|
||||||
|
|
@ -100,6 +100,7 @@ networkx==2.6.3
|
||||||
|
|
||||||
numba==0.49.0 ; python_version < "3.9"
|
numba==0.49.0 ; python_version < "3.9"
|
||||||
numba==0.54.1 ; python_version == "3.9"
|
numba==0.54.1 ; python_version == "3.9"
|
||||||
|
numba==0.55.2 ; python_version == "3.10"
|
||||||
#Description: Just-In-Time Compiler for Numerical Functions
|
#Description: Just-In-Time Compiler for Numerical Functions
|
||||||
#Pinned versions: 0.54.1, 0.49.0, <=0.49.1
|
#Pinned versions: 0.54.1, 0.49.0, <=0.49.1
|
||||||
#test that import: test_numba_integration.py
|
#test that import: test_numba_integration.py
|
||||||
|
|
@ -188,7 +189,8 @@ scikit-image
|
||||||
#Pinned versions: 0.20.3
|
#Pinned versions: 0.20.3
|
||||||
#test that import:
|
#test that import:
|
||||||
|
|
||||||
scipy==1.6.3
|
scipy==1.6.3 ; python_version < "3.10"
|
||||||
|
scipy==1.8.1 ; python_version == "3.10"
|
||||||
# Pin SciPy because of failing distribution tests (see #60347)
|
# Pin SciPy because of failing distribution tests (see #60347)
|
||||||
#Description: scientific python
|
#Description: scientific python
|
||||||
#Pinned versions: 1.6.3
|
#Pinned versions: 1.6.3
|
||||||
|
|
|
||||||
16
.github/workflows/pull.yml
vendored
16
.github/workflows/pull.yml
vendored
|
|
@ -149,20 +149,20 @@ jobs:
|
||||||
{ config: "default", shard: 1, num_shards: 1, runner: "linux.2xlarge" },
|
{ config: "default", shard: 1, num_shards: 1, runner: "linux.2xlarge" },
|
||||||
]}
|
]}
|
||||||
|
|
||||||
linux-bionic-cuda11_6-py3_7-gcc7-build:
|
linux-bionic-cuda11_6-py3_10-gcc7-build:
|
||||||
name: linux-bionic-cuda11.6-py3.7-gcc7
|
name: linux-bionic-cuda11.6-py3.10-gcc7
|
||||||
uses: ./.github/workflows/_linux-build.yml
|
uses: ./.github/workflows/_linux-build.yml
|
||||||
with:
|
with:
|
||||||
build-environment: linux-bionic-cuda11.6-py3.7-gcc7
|
build-environment: linux-bionic-cuda11.6-py3.10-gcc7
|
||||||
docker-image-name: pytorch-linux-bionic-cuda11.6-cudnn8-py3-gcc7
|
docker-image-name: pytorch-linux-bionic-cuda11.6-cudnn8-py3-gcc7
|
||||||
|
|
||||||
linux-bionic-cuda11_6-py3_7-gcc7-test:
|
linux-bionic-cuda11_6-py3_10-gcc7-test:
|
||||||
name: linux-bionic-cuda11.6-py3.7-gcc7
|
name: linux-bionic-cuda11.6-py3.10-gcc7
|
||||||
uses: ./.github/workflows/_linux-test.yml
|
uses: ./.github/workflows/_linux-test.yml
|
||||||
needs: linux-bionic-cuda11_6-py3_7-gcc7-build
|
needs: linux-bionic-cuda11_6-py3_10-gcc7-build
|
||||||
with:
|
with:
|
||||||
build-environment: linux-bionic-cuda11.6-py3.7-gcc7
|
build-environment: linux-bionic-cuda11.6-py3.10-gcc7
|
||||||
docker-image: ${{ needs.linux-bionic-cuda11_6-py3_7-gcc7-build.outputs.docker-image }}
|
docker-image: ${{ needs.linux-bionic-cuda11_6-py3_10-gcc7-build.outputs.docker-image }}
|
||||||
test-matrix: |
|
test-matrix: |
|
||||||
{ include: [
|
{ include: [
|
||||||
{ config: "default", shard: 1, num_shards: 4, runner: "linux.4xlarge.nvidia.gpu" },
|
{ config: "default", shard: 1, num_shards: 4, runner: "linux.4xlarge.nvidia.gpu" },
|
||||||
|
|
|
||||||
|
|
@ -75,7 +75,10 @@ class TestHash(JitTestCase):
|
||||||
self.checkScript(fn, (1.2345, float("inf")))
|
self.checkScript(fn, (1.2345, float("inf")))
|
||||||
self.checkScript(fn, (float("inf"), float("inf")))
|
self.checkScript(fn, (float("inf"), float("inf")))
|
||||||
self.checkScript(fn, (1.2345, float('nan')))
|
self.checkScript(fn, (1.2345, float('nan')))
|
||||||
self.checkScript(fn, (float("nan"), float("nan")))
|
if sys.version_info < (3, 10):
|
||||||
|
# Hash of two nans are not guaranteed to be equal. From https://docs.python.org/3/whatsnew/3.10.html :
|
||||||
|
# Hashes of NaN values of both float type and decimal.Decimal type now depend on object identity.
|
||||||
|
self.checkScript(fn, (float("nan"), float("nan")))
|
||||||
self.checkScript(fn, (float("nan"), float("inf")))
|
self.checkScript(fn, (float("nan"), float("inf")))
|
||||||
|
|
||||||
def test_hash_int(self):
|
def test_hash_int(self):
|
||||||
|
|
|
||||||
|
|
@ -3283,6 +3283,7 @@ class TestFX(JitTestCase):
|
||||||
.run(scripted.code)
|
.run(scripted.code)
|
||||||
|
|
||||||
@unittest.skipIf(IS_WINDOWS, "Python Windows bug? https://bugs.python.org/issue45108")
|
@unittest.skipIf(IS_WINDOWS, "Python Windows bug? https://bugs.python.org/issue45108")
|
||||||
|
@unittest.skipIf(sys.version_info >= (3, 10), "Does not work on Python-3.10")
|
||||||
def test_assert(self):
|
def test_assert(self):
|
||||||
def f(x):
|
def f(x):
|
||||||
assert x > 1
|
assert x > 1
|
||||||
|
|
@ -4020,7 +4021,7 @@ class TestFunctionalTracing(JitTestCase):
|
||||||
|
|
||||||
def functional_test(self):
|
def functional_test(self):
|
||||||
if func_name in self.UNTRACEABLE_FUNCTIONALS_PY38 and \
|
if func_name in self.UNTRACEABLE_FUNCTIONALS_PY38 and \
|
||||||
sys.version_info >= (3, 8) and sys.version_info < (3, 10):
|
sys.version_info >= (3, 8) and sys.version_info < (3, 11):
|
||||||
exc, err = self.UNTRACEABLE_FUNCTIONALS_PY38[func_name]
|
exc, err = self.UNTRACEABLE_FUNCTIONALS_PY38[func_name]
|
||||||
with self.assertRaisesRegex(exc, err):
|
with self.assertRaisesRegex(exc, err):
|
||||||
symbolic_trace(fn)
|
symbolic_trace(fn)
|
||||||
|
|
|
||||||
|
|
@ -7442,6 +7442,9 @@ a")
|
||||||
# tensor from empty list is type float in python and annotated type in torchscript
|
# tensor from empty list is type float in python and annotated type in torchscript
|
||||||
if "annotate" in li and "dtype" not in option:
|
if "annotate" in li and "dtype" not in option:
|
||||||
continue
|
continue
|
||||||
|
# Skip unsigned tensor initializaton for signed values on 3.10
|
||||||
|
if sys.version_info[:2] >= (3, 10) and "torch.uint8" in option and "-" in li:
|
||||||
|
continue
|
||||||
code = tensor_template.format(list_create=li, tensor_op=op, options=option)
|
code = tensor_template.format(list_create=li, tensor_op=op, options=option)
|
||||||
scope = {}
|
scope = {}
|
||||||
exec(code, globals(), scope)
|
exec(code, globals(), scope)
|
||||||
|
|
|
||||||
|
|
@ -207,11 +207,14 @@ class TestSparseCompressed(TestCase):
|
||||||
# replaced with a N-list where N==len(densesize) and the
|
# replaced with a N-list where N==len(densesize) and the
|
||||||
# shape corresponds to densesize.
|
# shape corresponds to densesize.
|
||||||
|
|
||||||
|
max_val = torch.iinfo(dtype).max if dtype in [torch.int16, torch.int8, torch.uint8] else None
|
||||||
|
|
||||||
def list_add(lst, value):
|
def list_add(lst, value):
|
||||||
# recursively add a value to lst items
|
# recursively add a value to lst items
|
||||||
if isinstance(lst, list):
|
if isinstance(lst, list):
|
||||||
return [list_add(item, value) for item in lst]
|
return [list_add(item, value) for item in lst]
|
||||||
return lst + value
|
rc = lst + value
|
||||||
|
return rc if max_val is None else (rc % max_val)
|
||||||
|
|
||||||
def stretch_values(value, bdim, values_item_shape):
|
def stretch_values(value, bdim, values_item_shape):
|
||||||
# replace a value with a new value that extends the
|
# replace a value with a new value that extends the
|
||||||
|
|
|
||||||
|
|
@ -1,6 +1,7 @@
|
||||||
#pragma once
|
#pragma once
|
||||||
|
|
||||||
#include <ATen/ATen.h>
|
#include <ATen/ATen.h>
|
||||||
|
#include <c10/util/TypeCast.h>
|
||||||
#include <torch/csrc/python_headers.h>
|
#include <torch/csrc/python_headers.h>
|
||||||
|
|
||||||
#include <torch/csrc/Exceptions.h>
|
#include <torch/csrc/Exceptions.h>
|
||||||
|
|
@ -9,22 +10,36 @@
|
||||||
namespace torch {
|
namespace torch {
|
||||||
namespace utils {
|
namespace utils {
|
||||||
|
|
||||||
|
template <typename T>
|
||||||
|
inline T unpackIntegral(PyObject* obj, const char* type) {
|
||||||
|
#if PY_VERSION_HEX >= 0x030a00f0
|
||||||
|
// In Python-3.10 floats can no longer be silently converted to integers
|
||||||
|
// Keep backward compatible behavior for now
|
||||||
|
if (PyFloat_Check(obj)) {
|
||||||
|
return c10::checked_convert<T>(THPUtils_unpackDouble(obj), type);
|
||||||
|
}
|
||||||
|
return c10::checked_convert<T>(THPUtils_unpackLong(obj), type);
|
||||||
|
#else
|
||||||
|
return static_cast<T>(THPUtils_unpackLong(obj));
|
||||||
|
#endif
|
||||||
|
}
|
||||||
|
|
||||||
inline void store_scalar(void* data, at::ScalarType scalarType, PyObject* obj) {
|
inline void store_scalar(void* data, at::ScalarType scalarType, PyObject* obj) {
|
||||||
switch (scalarType) {
|
switch (scalarType) {
|
||||||
case at::kByte:
|
case at::kByte:
|
||||||
*(uint8_t*)data = (uint8_t)THPUtils_unpackLong(obj);
|
*(uint8_t*)data = unpackIntegral<uint8_t>(obj, "uint8");
|
||||||
break;
|
break;
|
||||||
case at::kChar:
|
case at::kChar:
|
||||||
*(int8_t*)data = (int8_t)THPUtils_unpackLong(obj);
|
*(int8_t*)data = unpackIntegral<int8_t>(obj, "int8");
|
||||||
break;
|
break;
|
||||||
case at::kShort:
|
case at::kShort:
|
||||||
*(int16_t*)data = (int16_t)THPUtils_unpackLong(obj);
|
*(int16_t*)data = unpackIntegral<int16_t>(obj, "int16");
|
||||||
break;
|
break;
|
||||||
case at::kInt:
|
case at::kInt:
|
||||||
*(int32_t*)data = (int32_t)THPUtils_unpackLong(obj);
|
*(int32_t*)data = unpackIntegral<int32_t>(obj, "int32");
|
||||||
break;
|
break;
|
||||||
case at::kLong:
|
case at::kLong:
|
||||||
*(int64_t*)data = THPUtils_unpackLong(obj);
|
*(int64_t*)data = unpackIntegral<int64_t>(obj, "int64");
|
||||||
break;
|
break;
|
||||||
case at::kHalf:
|
case at::kHalf:
|
||||||
*(at::Half*)data =
|
*(at::Half*)data =
|
||||||
|
|
|
||||||
Loading…
Reference in New Issue
Block a user