mirror of
https://github.com/zebrajr/pytorch.git
synced 2025-12-07 12:21:27 +01:00
Summary: Pull Request resolved: https://github.com/pytorch/pytorch/pull/38157 This removes the error prone process of assembling `torch/__init__.pyi` (and frequently forgetting to expose things), since now we can simply rely on the true source file to get things done. Most of the old codegen in gen_pyi.py is now rerouted to various files: - `torch/_C/__init__.pyi` (the dumping pile of all misc bindings) - `torch/_C/_nn.pyi` (NN function bindings) - `torch/_C/_VariableFunctions.pyi` (torch function bindings) `torch.types` grew a bunch more definitions that previously where defined in `torch/__init__.pyi` Some miscellaneous changes - Fixed a bug where we treat single TensorList argument as implying varargs are accepted. This is actually only supported on IntList. This means we can correctly generate a stub for dequantize. - Add missing manual stub for nonzero - Switched torch/onnx/operators.py to directly refer to _C module, since apparently mypy doesn't think that methods prefixed with underscores get reexported. This may be a recurring theme; maybe we need to find a better way to solve it. Because I was really lazy, I dumped namedtuple definitions in both `torch._C` and `torch._C._VariableFunctions`. This is definitely wrong. Signed-off-by: Edward Z. Yang <ezyang@fb.com> Test Plan: Imported from OSS Differential Revision: D21497400 Pulled By: ezyang fbshipit-source-id: 07b126141c82efaca37be27c07255cb2b9b3f064
76 lines
2.2 KiB
Python
76 lines
2.2 KiB
Python
from collections import namedtuple
|
|
from typing import Any, Optional, overload, Union, TypeVar, Tuple, Sequence
|
|
from torch import Tensor
|
|
from torch.types import _dtype, _device
|
|
|
|
PackedSequence_ = namedtuple('PackedSequence', ['data', 'batch_sizes', 'sorted_indices', 'unsorted_indices'])
|
|
|
|
|
|
def bind(optional: Any, fn: Any): ...
|
|
|
|
|
|
T = TypeVar('T')
|
|
|
|
|
|
class PackedSequence(PackedSequence_):
|
|
def __new__(cls, data: Tensor, batch_sizes: Optional[Tensor] = ..., sorted_indices: Optional[Tensor] = ...,
|
|
unsorted_indices: Optional[Tensor] = ...) -> PackedSequence: ...
|
|
|
|
def pin_memory(self: T) -> T: ...
|
|
|
|
def cuda(self: T, *args: Any, **kwargs: Any) -> T: ...
|
|
|
|
def cpu(self: T) -> T: ...
|
|
|
|
def double(self: T) -> T: ...
|
|
|
|
def float(self: T) -> T: ...
|
|
|
|
def half(self: T) -> T: ...
|
|
|
|
def long(self: T) -> T: ...
|
|
|
|
def int(self: T) -> T: ...
|
|
|
|
def short(self: T) -> T: ...
|
|
|
|
def char(self: T) -> T: ...
|
|
|
|
def byte(self: T) -> T: ...
|
|
|
|
@overload
|
|
def to(self: T, dtype: _dtype, non_blocking: bool = False, copy: bool = False) -> T: ...
|
|
|
|
@overload
|
|
def to(self: T, device: Optional[Union[_device, str]] = None, dtype: Optional[_dtype] = None,
|
|
non_blocking: bool = False, copy: bool = False) -> T: ...
|
|
|
|
@overload
|
|
def to(self, other: Tensor, non_blocking: bool = False, copy: bool = False) -> T: ...
|
|
|
|
@property
|
|
def is_cuda(self) -> bool: ...
|
|
|
|
def is_pinned(self) -> bool: ...
|
|
|
|
|
|
def invert_permutation(permutation: Optional[Tensor]): ...
|
|
|
|
|
|
def pack_padded_sequence(input: Tensor, lengths: Tensor, batch_first: bool = ...,
|
|
enforce_sorted: bool = ...) -> PackedSequence: ...
|
|
|
|
|
|
def pad_packed_sequence(sequence: PackedSequence, batch_first: bool = ..., padding_value: float = ...,
|
|
total_length: Optional[int] = ...) -> Tuple[Tensor, ...]: ...
|
|
|
|
|
|
def pad_sequence(sequences: Sequence[Tensor], batch_first: bool = ..., padding_value: int = ...) -> Tensor: ...
|
|
|
|
|
|
def pack_sequence(sequences: Sequence[Tensor], enforce_sorted: bool = ...) -> PackedSequence: ...
|
|
|
|
|
|
def get_packed_sequence(data: Tensor, batch_sizes: Optional[Tensor], sorted_indices: Optional[Tensor],
|
|
unsorted_indices: Optional[Tensor]) -> PackedSequence: ...
|