#71946 Remove Python 3.6 references (#72211)

Summary:
Fixes https://github.com/pytorch/pytorch/issues/71946

This commit removes some bits of code that were hard coded for Python 3.6 support from the `.circleci` and `torch` folders. It should only be merged if https://github.com/pytorch/pytorch/issues/66462 is complete.

Pull Request resolved: https://github.com/pytorch/pytorch/pull/72211

Reviewed By: dagitses, seemethere

Differential Revision: D33982604

Pulled By: musebc

fbshipit-source-id: 8f453bf9909df615addd59538adb369c65484044
This commit is contained in:
Brian Muse 2022-02-07 19:41:08 -08:00 committed by Facebook GitHub Bot
parent 1f3e13efba
commit 944a9970fe
13 changed files with 23 additions and 80 deletions

View File

@ -71,9 +71,9 @@ A **binary configuration** is a collection of
* release or nightly
* releases are stable, nightlies are beta and built every night
* python version
* linux: 3.5m, 3.6m 3.7m (mu is wide unicode or something like that. It usually doesn't matter but you should know that it exists)
* macos: 3.6, 3.7, 3.8
* windows: 3.6, 3.7, 3.8
* linux: 3.7m (mu is wide unicode or something like that. It usually doesn't matter but you should know that it exists)
* macos: 3.7, 3.8
* windows: 3.7, 3.8
* cpu version
* cpu, cuda 9.0, cuda 10.0
* The supported cuda versions occasionally change
@ -428,7 +428,7 @@ docker run \
# possibly need are in .circleci/scripts/binary_populate_env.sh
# You should probably always export at least these 3 variables
export PACKAGE_TYPE=conda
export DESIRED_PYTHON=3.6
export DESIRED_PYTHON=3.7
export DESIRED_CUDA=cpu
# Call the entrypoint
@ -476,7 +476,7 @@ conda activate binary
# possibly need are in .circleci/scripts/binary_populate_env.sh
# You should probably always export at least these 3 variables
export PACKAGE_TYPE=conda
export DESIRED_PYTHON=3.6
export DESIRED_PYTHON=3.7
export DESIRED_CUDA=cpu
# Call the entrypoint you want

View File

@ -334,13 +334,12 @@ def instantiate_configs(only_slow_gradcheck):
build_only=build_only,
)
# run docs builds on "pytorch-linux-xenial-py3.6-gcc5.4". Docs builds
# run docs builds on "pytorch-linux-xenial-py3.7-gcc5.4". Docs builds
# should run on a CPU-only build that runs on all PRs.
# XXX should this be updated to a more modern build? Projects are
# beginning to drop python3.6
# XXX should this be updated to a more modern build?
if (
distro_name == "xenial"
and fc.find_prop("pyver") == "3.6"
and fc.find_prop("pyver") == "3.7"
and cuda_version is None
and parallel_backend is None
and not is_vulkan

View File

@ -26,7 +26,7 @@ def get_workflow_jobs(images=IMAGE_NAMES, only_slow_gradcheck=False):
"name": quote(f"docker-{image_name}"),
"image_name": quote(image_name),
})
if image_name == "pytorch-linux-xenial-py3.6-gcc5.4":
if image_name == "pytorch-linux-xenial-py3.7-gcc5.4":
# pushing documentation on tags requires CircleCI to also
# build all the dependencies on tags, including this docker image
parameters['filters'] = gen_filter_dict(branches_list=r"/.*/",

View File

@ -13,12 +13,7 @@ if [ -n "$ANACONDA_PYTHON_VERSION" ]; then
CONDA_FILE="Miniconda2-latest-Linux-x86_64.sh"
;;
3)
if [ "$ANACONDA_PYTHON_VERSION" = "3.6" ]; then
# Latest release of Conda that still supports python-3.6
CONDA_FILE="Miniconda3-py37_4.10.3-Linux-x86_64.sh"
else
CONDA_FILE="Miniconda3-latest-Linux-x86_64.sh"
fi
CONDA_FILE="Miniconda3-latest-Linux-x86_64.sh"
;;
*)
echo "Unsupported ANACONDA_PYTHON_VERSION: $ANACONDA_PYTHON_VERSION"
@ -61,9 +56,7 @@ if [ -n "$ANACONDA_PYTHON_VERSION" ]; then
pushd /opt/conda
# Track latest conda update
if [ "$ANACONDA_PYTHON_VERSION" != "3.6" ]; then
as_jenkins conda update -y -n base conda
fi
as_jenkins conda update -y -n base conda
# Install correct Python version
as_jenkins conda install -y python="$ANACONDA_PYTHON_VERSION"

View File

@ -1217,11 +1217,8 @@ def _isinstance(obj, target_type) -> bool:
if origin_type:
return container_checker(obj, target_type)
# Check to handle weird python type behaviors
# 1. python 3.6 returns None for origin of containers without
# contained type (intead of returning outer container type)
# 2. non-typed optional origin returns as none instead
# of as optional in 3.6-3.8
# Check to handle non-typed optional origin returns as none instead
# of as optional in 3.7-3.8
check_args_exist(target_type)
# handle non-containers

View File

@ -2,33 +2,9 @@
#include <torch/csrc/python_headers.h>
#if PY_VERSION_HEX < 0x03070000
// METH_FASTCALL was introduced in Python 3.7, so we wrap _PyCFunctionFast
// signatures for earlier versions.
template <PyObject* (*f)(PyObject*, PyObject *const *, Py_ssize_t)>
PyObject* maybe_wrap_fastcall(PyObject *module, PyObject *args) {
return f(
module,
// _PyTuple_ITEMS
// Because this is only a compat shim for Python 3.6, we don't have
// to worry about the representation changing.
((PyTupleObject *)args)->ob_item,
PySequence_Fast_GET_SIZE(args)
);
}
#define MAYBE_METH_FASTCALL METH_VARARGS
#define MAYBE_WRAP_FASTCALL(f) maybe_wrap_fastcall<f>
#else
#define MAYBE_METH_FASTCALL METH_FASTCALL
#define MAYBE_WRAP_FASTCALL(f) (PyCFunction)(void(*)(void))f
#endif
// PyPy 3.6 does not yet have PySlice_Unpack
#if PY_VERSION_HEX < 0x03060100 || defined(PYPY_VERSION)

View File

@ -297,9 +297,6 @@ class Join():
"""
ones = torch.ones(1, device=self._device)
dist.all_reduce(ones, group=self._process_group)
# NOTE: Raising `StopIteration` does not throw an error in Python 3.6
# and throws a `RuntimeError` in Python 3.7+ (PEP 479), so we just
# raise a `RuntimeError` here
raise RuntimeError(f"Rank {self._rank} exhausted all inputs.")
@staticmethod

View File

@ -14,7 +14,7 @@ import signal
import subprocess
import sys
import time
from contextlib import AbstractContextManager
from contextlib import nullcontext
from dataclasses import dataclass, field
from enum import IntFlag
from multiprocessing import synchronize
@ -334,22 +334,9 @@ class PContext(abc.ABC):
self._stderr_tail.stop()
class _nullcontext(AbstractContextManager):
# TODO remove and replace in favor of contextlib.nullcontext
# when torch drops support for python3.6
def __init__(self, enter_result=None):
self.enter_result = enter_result
def __enter__(self):
return self.enter_result
def __exit__(self, *excinfo):
pass
def get_std_cm(std_rd: str, redirect_fn):
if IS_WINDOWS or IS_MACOS or not std_rd:
return _nullcontext()
return nullcontext()
else:
return redirect_fn(std_rd)

View File

@ -1,3 +1,5 @@
from __future__ import annotations
from typing import cast, Callable, Generic, List, Optional, Type, TypeVar, Union
import torch
@ -90,8 +92,7 @@ class Future(torch._C.Future, Generic[T], metaclass=_PyFutureMeta):
"""
return super().value()
# Have to use string annotations because PEP-0563 is not available in 3.6
def then(self, callback): # type: (Callable[[Future[T]], S]) -> Future[S]
def then(self, callback: Callable[[Future[T]], S]) -> Future[S]:
r"""
Append the given callback function to this ``Future``, which will be run
when the ``Future`` is completed. Multiple callbacks can be added to
@ -154,8 +155,7 @@ class Future(torch._C.Future, Generic[T], metaclass=_PyFutureMeta):
"""
return cast(Future[S], super().then(callback))
# Have to use string annotations because PEP-0563 is not available in 3.6
def add_done_callback(self, callback): # type: (Callable[[Future[T]], None]) -> None
def add_done_callback(self, callback: Callable[[Future[T]], None]) -> None:
r"""
Append the given callback function to this ``Future``, which will be run
when the ``Future`` is completed. Multiple callbacks can be added to

View File

@ -49,11 +49,7 @@ def _type_repr(obj):
typically enough to uniquely identify a type. For everything
else, we fall back on repr(obj).
"""
# HACK: In Python 3.6, type aliases from ``typing`` are instances of ``type``, but in
# later Python versions, type aliases are not instances of ``type``!! We want
# all type aliases to fall through to ``repr``, so if we have a type that is
# in the module typing, don't go down this path.
if isinstance(obj, type) and obj.__module__ != 'typing':
if isinstance(obj, type):
if obj.__module__ == 'builtins':
return obj.__qualname__
return f'{obj.__module__}.{obj.__qualname__}'

View File

@ -8,6 +8,7 @@ from ..parameter import Parameter
import torch.utils.hooks as hooks
from torch import Tensor, device, dtype
import typing
from typing import Union, Tuple, Any, Callable, Iterator, Set, Optional, overload, TypeVar, Mapping, Dict, List
from ...utils.hooks import RemovableHandle
@ -1288,7 +1289,7 @@ class Module:
# TODO: Remove string escape once Python-3.6 no longer supported
# See https://github.com/python/mypy/issues/6904#issuecomment-496207426
@overload
def state_dict(self, prefix: str = ..., keep_vars: bool = ...) -> 'OrderedDict[str, Tensor]':
def state_dict(self, prefix: str = ..., keep_vars: bool = ...) -> typing.OrderedDict[str, Tensor]:
...
def state_dict(self, destination=None, prefix='', keep_vars=False):

View File

@ -1436,9 +1436,7 @@ has_torch_function_variadic = _add_docstr(
_has_torch_function_variadic,
r"""Special case of `has_torch_function` that skips tuple creation.
This uses the METH_FASTCALL protocol introduced in Python 3.7; for 3.6
and before it has roughly equivilent performance compared to
`has_torch_function`.
This uses the METH_FASTCALL protocol introduced in Python 3.7
Instead of:
`has_torch_function((a, b))`

View File

@ -258,7 +258,6 @@ class _DataPipeMeta(GenericMeta):
return super().__new__(cls, name, bases, namespace, **kwargs) # type: ignore[call-overload]
# For Python > 3.6
cls.__origin__ = None
if 'type' in namespace:
return super().__new__(cls, name, bases, namespace, **kwargs) # type: ignore[call-overload]