mirror of
https://github.com/zebrajr/pytorch.git
synced 2025-12-08 07:39:33 +01:00
Summary: - ~optimizer isn't required for `SequentialLR` since it's already present in the schedulers. Trying to match the signature of it with `ChainedScheduler`.~ - ~`verbose` isn't really used anywhere so removed it.~ updated missing docs and added a small check Pull Request resolved: https://github.com/pytorch/pytorch/pull/69817 Reviewed By: ngimel Differential Revision: D33069589 Pulled By: albanD fbshipit-source-id: f015105a35a2ca39fe94c70acdfd55cdf5601419
52 lines
2.8 KiB
Python
52 lines
2.8 KiB
Python
from typing import Iterable, Any, Optional, Callable, Union, List
|
|
from .optimizer import Optimizer
|
|
|
|
class _LRScheduler:
|
|
def __init__(self, optimizer: Optimizer, last_epoch: int=...) -> None: ...
|
|
def state_dict(self) -> dict: ...
|
|
def load_state_dict(self, state_dict: dict) -> None: ...
|
|
def get_last_lr(self) -> List[float]: ...
|
|
def get_lr(self) -> float: ...
|
|
def step(self, epoch: Optional[int]=...) -> None: ...
|
|
|
|
class LambdaLR(_LRScheduler):
|
|
def __init__(self, optimizer: Optimizer, lr_lambda: Union[Callable[[int], float], List[Callable[[int], float]]], last_epoch: int=...) -> None: ...
|
|
|
|
class StepLR(_LRScheduler):
|
|
def __init__(self, optimizer: Optimizer, step_size: int, gamma: float=..., last_epoch: int=...) -> None:...
|
|
|
|
class MultiStepLR(_LRScheduler):
|
|
def __init__(self, optimizer: Optimizer, milestones: Iterable[int], gamma: float=..., last_epoch: int=...) -> None: ...
|
|
|
|
class ConstantLR(_LRScheduler):
|
|
def __init__(self, optimizer: Optimizer, factor: float=..., total_iters: int=..., last_epoch: int=...) -> None: ...
|
|
|
|
class LinearLR(_LRScheduler):
|
|
def __init__(self, optimizer: Optimizer, start_factor: float=..., end_factor: float=..., total_iters: int=..., last_epoch: int=...) -> None: ...
|
|
|
|
class ExponentialLR(_LRScheduler):
|
|
def __init__(self, optimizer: Optimizer, gamma: float, last_epoch: int=...) -> None: ...
|
|
|
|
class ChainedScheduler(_LRScheduler):
|
|
def __init__(self, schedulers: List[_LRScheduler]) -> None: ...
|
|
|
|
class SequentialLR(_LRScheduler):
|
|
def __init__(self, optimizer: Optimizer, schedulers: List[_LRScheduler], milestones: List[int], last_epoch: int=..., verbose: bool=...) -> None: ...
|
|
|
|
class CosineAnnealingLR(_LRScheduler):
|
|
def __init__(self, optimizer: Optimizer, T_max: int, eta_min: float=..., last_epoch: int=...) -> None: ...
|
|
|
|
class ReduceLROnPlateau:
|
|
in_cooldown: bool
|
|
|
|
def __init__(self, optimizer: Optimizer, mode: str=..., factor: float=..., patience: int=..., verbose: bool=..., threshold: float=..., threshold_mode: str=..., cooldown: int=..., min_lr: float=..., eps: float=...) -> None: ...
|
|
def step(self, metrics: Any, epoch: Optional[int]=...) -> None: ...
|
|
def state_dict(self) -> dict: ...
|
|
def load_state_dict(self, state_dict: dict): ...
|
|
|
|
class CyclicLR(_LRScheduler):
|
|
def __init__(self, optimizer: Optimizer, base_lr: float=..., max_lr: float=..., step_size_up: int=..., step_size_down: int=..., mode: str=..., gamma: float=..., scale_fn: Optional[Callable[[float], float]]=..., scale_mode: str=..., cycle_momentum: bool=..., base_momentum: float=..., max_momentum: float=..., last_epoch: int=...) -> None: ...
|
|
|
|
class CosineAnnealingWarmRestarts(_LRScheduler):
|
|
def __init__(self, optimizer: Optimizer, T_0: int=..., T_mult: int=..., eta_min: float=..., last_epoch: int=...) -> None: ...
|