mirror of
https://github.com/zebrajr/pytorch.git
synced 2025-12-07 00:21:07 +01:00
Skipping importing some packages for now to make this change more tractable. For some reason, lintrunner on CI raises errors in all imported `.pyi` files, even though it doesn't on my local machine. The errors are all from missing generic types, as the MYPYINDUCTOR config has `disallow_any_generics` set. I have thus added `disable-error-code` comments to the relevant files, though I fixed a few that were easy enough. Pull Request resolved: https://github.com/pytorch/pytorch/pull/113830 Approved by: https://github.com/Skylion007 ghstack dependencies: #113722, #113721
68 lines
1.8 KiB
Python
68 lines
1.8 KiB
Python
# mypy: disable-error-code="type-arg"
|
|
from typing import List, Optional, overload, Sequence, Tuple, Union
|
|
|
|
from torch import memory_format, Tensor
|
|
from torch.types import _bool, _device, _dtype, _int, _size
|
|
|
|
# Defined in tools/autograd/templates/python_nn_functions.cpp
|
|
|
|
${c_nn_function_hints}
|
|
|
|
# Defined in aten/src/ATen/native/mkldnn/Linear.cpp
|
|
def mkldnn_linear(input: Tensor, weight: Tensor, bias: Optional[Tensor]) -> Tensor: ...
|
|
|
|
# Defined at aten/src/ATen/native/mkldnn/MKLDNNConversions.cpp
|
|
def mkldnn_reorder_conv2d_weight(
|
|
self: Tensor,
|
|
padding: List,
|
|
stride: List,
|
|
dilatation: List,
|
|
groups: int,
|
|
) -> Tensor: ...
|
|
def mkldnn_reorder_conv3d_weight(
|
|
self: Tensor,
|
|
padding: List,
|
|
stride: List,
|
|
dilatation: List,
|
|
groups: int,
|
|
) -> Tensor: ...
|
|
|
|
# Defined in aten/src/ATen/native/mkldnn/Prelu.cpp
|
|
def mkldnn_prelu(input: Tensor, weight: Tensor) -> Tensor: ...
|
|
|
|
# Defined at tools/autograd/templates/python_nn_functions.cpp
|
|
@overload
|
|
def _parse_to(
|
|
device: _device,
|
|
dtype: _dtype,
|
|
non_blocking: _bool,
|
|
copy: _bool,
|
|
*,
|
|
memory_format: memory_format,
|
|
) -> Tuple[_device, _dtype, _bool, memory_format]: ...
|
|
@overload
|
|
def _parse_to(
|
|
dtype: _dtype,
|
|
non_blocking: _bool,
|
|
copy: _bool,
|
|
*,
|
|
memory_format: memory_format,
|
|
) -> Tuple[_device, _dtype, _bool, memory_format]: ...
|
|
@overload
|
|
def _parse_to(
|
|
tensor: Tensor,
|
|
non_blocking: _bool,
|
|
copy: _bool,
|
|
*,
|
|
memory_format: memory_format,
|
|
) -> Tuple[_device, _dtype, _bool, memory_format]: ...
|
|
|
|
# Defined in aten/src/ATen/native/PadSequence.cpp
|
|
def pad_sequence(
|
|
sequences: List[Tensor],
|
|
batch_first: bool = False,
|
|
padding_value: float = ...,
|
|
) -> Tensor: ...
|
|
def flatten_dense_tensors(tensors: List[Tensor]) -> Tensor: ...
|
|
def unflatten_dense_tensors(flat: Tensor, tensors: List[Tensor]) -> List[Tensor]: ...
|