add type annotations to torch.nn.modules.fold (#49479)

Summary:
closes gh-49478

Fixes https://github.com/pytorch/pytorch/issues/49478

Pull Request resolved: https://github.com/pytorch/pytorch/pull/49479

Reviewed By: mruberry

Differential Revision: D25723838

Pulled By: walterddr

fbshipit-source-id: 45c4cbd6f147b6dc4a5f5419c17578c49c201022
This commit is contained in:
Guilherme Leobas 2021-01-08 13:47:41 -08:00 committed by Facebook GitHub Bot
parent 2c4b6ec457
commit aa18d17455
2 changed files with 5 additions and 8 deletions

View File

@ -76,9 +76,6 @@ ignore_errors = True
[mypy-torch.nn.modules.conv] [mypy-torch.nn.modules.conv]
ignore_errors = True ignore_errors = True
[mypy-torch.nn.modules.fold]
ignore_errors = True
[mypy-torch.nn.modules.module] [mypy-torch.nn.modules.module]
ignore_errors = True ignore_errors = True

View File

@ -1,7 +1,7 @@
from torch import Tensor from torch import Tensor
from torch.types import _size from torch.types import _size
from typing import Any, Optional, Tuple, Dict, List, Callable, Sequence, Union from typing import Any, Optional, Tuple, Dict, List, Callable, Sequence, Union
from .common_types import _ratio_any_t, _size_1_t, _size_2_t, _size_3_t, _size_2_opt_t, _size_3_opt_t from .common_types import _ratio_any_t, _size_any_t, _size_1_t, _size_2_t, _size_3_t, _size_2_opt_t, _size_3_opt_t
# 'TypedDict' is a new accepted type that represents a dictionary with a fixed set of allowed keys. # 'TypedDict' is a new accepted type that represents a dictionary with a fixed set of allowed keys.
# It is standards-track but not in `typing` yet. We leave this hear to be uncommented once the feature # It is standards-track but not in `typing` yet. We leave this hear to be uncommented once the feature
@ -335,12 +335,12 @@ def normalize(input: Tensor, p: float = ..., dim: int = ..., eps: float = ...,
def assert_int_or_pair(arg: Any, arg_name: Any, message: Any) -> None: ... def assert_int_or_pair(arg: Any, arg_name: Any, message: Any) -> None: ...
def unfold(input: Tensor, kernel_size: _size, dilation: _size = ..., padding: _size = ..., def unfold(input: Tensor, kernel_size: _size_any_t, dilation: _size_any_t = ..., padding: _size_any_t = ...,
stride: _size = ...) -> Tensor: ... stride: _size_any_t = ...) -> Tensor: ...
def fold(input: Tensor, output_size: _size, kernel_size: _size, dilation: _size = ..., padding: _size = ..., def fold(input: Tensor, output_size: _size_any_t, kernel_size: _size_any_t, dilation: _size_any_t = ..., padding: _size_any_t = ...,
stride: _size = ...) -> Tensor: ... stride: _size_any_t = ...) -> Tensor: ...
def multi_head_attention_forward(query: Tensor, def multi_head_attention_forward(query: Tensor,