mirror of
https://github.com/zebrajr/pytorch.git
synced 2025-12-07 12:21:27 +01:00
Summary: Added stubs for: * The `device` module * The `cuda` module * Parts of the `optim` module * Began adding stubs for the `autograd` module. I'll annotate more later but `no_grad` and friends are probably the most used exports from it so it seemed like a good place to start. This would close #16996, although comments on that issue reference other missing stubs so maybe it's worth keeping open as an umbrella issue. The big remaining missing package is `nn`. Also added a `py.typed` file so mypy will pick up on the type stubs. That closes #17639. Pull Request resolved: https://github.com/pytorch/pytorch/pull/18511 Differential Revision: D14715053 Pulled By: ezyang fbshipit-source-id: 9e4882ac997063650e6ce47604b3eaf1232c61c9
32 lines
1.5 KiB
Python
32 lines
1.5 KiB
Python
from typing import Iterable, Any, Optional
|
|
from .optimizer import Optimizer
|
|
|
|
class _LRScheduler:
|
|
def __init__(self, optimizer: Optimizer, last_epoch: int=...) -> None: ...
|
|
def state_dict(self) -> dict: ...
|
|
def load_state_dict(self, state_dict: dict) -> None: ...
|
|
def get_lr(self) -> float: ...
|
|
def step(self, epoch: int) -> None: ...
|
|
|
|
class LambdaLR(_LRScheduler):
|
|
def __init__(self, optimizer: Optimizer, lr_lambda: float, last_epoch: int=...) -> None: ...
|
|
|
|
class StepLR(_LRScheduler):
|
|
def __init__(self, optimizer: Optimizer, step_size: int, gamma: float=..., last_epoch: int=...) -> None:...
|
|
|
|
class MultiStepLR(_LRScheduler):
|
|
def __init__(self, optimizer: Optimizer, milestones: Iterable[int], gamma: float=..., last_epoch: int=...) -> None: ...
|
|
|
|
class ExponentialLR(_LRScheduler):
|
|
def __init__(self, optimizer: Optimizer, gamma: float, last_epoch: int=...) -> None: ...
|
|
|
|
class CosineAnnealingLr(_LRScheduler):
|
|
def __init__(self, optimizer: Optimizer, T_max: int, eta_min: float, last_epoch: int=...) -> None: ...
|
|
|
|
class ReduceLROnPlateau:
|
|
in_cooldown: bool
|
|
|
|
def __init__(self, optimizer: Optimizer, mode: str=..., factor: float=..., patience: int=..., verbose: bool=..., threshold: float=..., threshold_mode: str=..., cooldown: int=..., min_lr: float=..., eps: float=...) -> None: ...
|
|
def step(self, metrics: Any, epoch: Optional[int]=...) -> None: ...
|
|
def state_dict(self) -> dict: ...
|
|
def load_state_dict(self, state_dict: dict): ... |