mirror of
https://github.com/zebrajr/pytorch.git
synced 2025-12-06 12:20:52 +01:00
[BE] Remove more optim entries from docs coverage ignore list (#160194)
This PR does privatize ReduceLRSchedulerOnPlateau.is_better -> ReduceLRSchedulerOnPlateau._is_better because that API was never meant to be public. A GitHub search for it also reveals that the API is not commonly used much. https://github.com/search?q=.is_better%28&type=code&p=2 If you do use this API and you rely on it for some reason, please file an issue. In the meantime, you can access it through `_is_better(...)`. Pull Request resolved: https://github.com/pytorch/pytorch/pull/160194 Approved by: https://github.com/albanD, https://github.com/Skylion007
This commit is contained in:
parent
8c41cb800a
commit
9b803cdbe2
|
|
@ -1793,12 +1793,6 @@ coverage_ignore_functions = [
|
|||
# torch.optim.optimizer
|
||||
"register_optimizer_step_post_hook",
|
||||
"register_optimizer_step_pre_hook",
|
||||
# torch.optim.swa_utils
|
||||
"get_ema_avg_fn",
|
||||
"get_ema_multi_avg_fn",
|
||||
"get_swa_avg_fn",
|
||||
"get_swa_multi_avg_fn",
|
||||
"update_bn",
|
||||
# torch.overrides
|
||||
"enable_reentrant_dispatch",
|
||||
# torch.package.analyze.find_first_use_of_broken_modules
|
||||
|
|
@ -2909,31 +2903,6 @@ coverage_ignore_classes = [
|
|||
# torch.onnx.verification
|
||||
"OnnxBackend",
|
||||
"OnnxTestCaseRepro",
|
||||
# torch.optim.adamax
|
||||
"Adamax",
|
||||
# torch.optim.adamw
|
||||
"AdamW",
|
||||
# torch.optim.asgd
|
||||
"ASGD",
|
||||
# torch.optim.lbfgs
|
||||
"LBFGS",
|
||||
# torch.optim.lr_scheduler
|
||||
"ChainedScheduler",
|
||||
"ConstantLR",
|
||||
"CosineAnnealingLR",
|
||||
"CosineAnnealingWarmRestarts",
|
||||
"CyclicLR",
|
||||
"ExponentialLR",
|
||||
"LRScheduler",
|
||||
"LambdaLR",
|
||||
"LinearLR",
|
||||
"MultiStepLR",
|
||||
"MultiplicativeLR",
|
||||
"OneCycleLR",
|
||||
"PolynomialLR",
|
||||
"ReduceLROnPlateau",
|
||||
"SequentialLR",
|
||||
"StepLR",
|
||||
# torch.optim.optimizer
|
||||
"Optimizer",
|
||||
# torch.overrides
|
||||
|
|
|
|||
|
|
@ -1344,7 +1344,7 @@ class ReduceLROnPlateau(LRScheduler):
|
|||
warnings.warn(EPOCH_DEPRECATION_WARNING, UserWarning)
|
||||
self.last_epoch = epoch
|
||||
|
||||
if self.is_better(current, self.best):
|
||||
if self._is_better(current, self.best):
|
||||
self.best = current
|
||||
self.num_bad_epochs = 0
|
||||
else:
|
||||
|
|
@ -1386,7 +1386,7 @@ class ReduceLROnPlateau(LRScheduler):
|
|||
def in_cooldown(self): # noqa: D102
|
||||
return self.cooldown_counter > 0
|
||||
|
||||
def is_better(self, a, best): # noqa: D102
|
||||
def _is_better(self, a, best): # noqa: D102
|
||||
if self.mode == "min" and self.threshold_mode == "rel":
|
||||
rel_epsilon = 1.0 - self.threshold
|
||||
return a < best * rel_epsilon
|
||||
|
|
@ -1686,6 +1686,15 @@ class CyclicLR(LRScheduler):
|
|||
|
||||
@override
|
||||
def state_dict(self) -> dict[str, Any]: # noqa: D102
|
||||
"""Return the state of the scheduler as a :class:`dict`.
|
||||
|
||||
It contains an entry for every variable in self.__dict__ which
|
||||
is not the optimizer.
|
||||
The learning rate lambda functions will only be saved if they are callable objects
|
||||
and not if they are functions or lambdas.
|
||||
|
||||
When saving or loading the scheduler, please make sure to also save or load the state of the optimizer.
|
||||
"""
|
||||
state = super().state_dict()
|
||||
# We are dropping the `_scale_fn_ref` attribute because it is a
|
||||
# `weakref.WeakMethod` and can't be pickled.
|
||||
|
|
|
|||
Loading…
Reference in New Issue
Block a user