[BE] Remove more optim entries from docs coverage ignore list (#160194)

This PR does privatize ReduceLRSchedulerOnPlateau.is_better -> ReduceLRSchedulerOnPlateau._is_better because that API was never meant to be public. A GitHub search for it also reveals that the API is not commonly used much. https://github.com/search?q=.is_better%28&type=code&p=2

If you do use this API and you rely on it for some reason, please file an issue. In the meantime, you can access it through `_is_better(...)`.

Pull Request resolved: https://github.com/pytorch/pytorch/pull/160194
Approved by: https://github.com/albanD, https://github.com/Skylion007
This commit is contained in:
Jane Xu 2025-08-08 21:30:05 +00:00 committed by PyTorch MergeBot
parent 8c41cb800a
commit 9b803cdbe2
2 changed files with 11 additions and 33 deletions

View File

@ -1793,12 +1793,6 @@ coverage_ignore_functions = [
# torch.optim.optimizer
"register_optimizer_step_post_hook",
"register_optimizer_step_pre_hook",
# torch.optim.swa_utils
"get_ema_avg_fn",
"get_ema_multi_avg_fn",
"get_swa_avg_fn",
"get_swa_multi_avg_fn",
"update_bn",
# torch.overrides
"enable_reentrant_dispatch",
# torch.package.analyze.find_first_use_of_broken_modules
@ -2909,31 +2903,6 @@ coverage_ignore_classes = [
# torch.onnx.verification
"OnnxBackend",
"OnnxTestCaseRepro",
# torch.optim.adamax
"Adamax",
# torch.optim.adamw
"AdamW",
# torch.optim.asgd
"ASGD",
# torch.optim.lbfgs
"LBFGS",
# torch.optim.lr_scheduler
"ChainedScheduler",
"ConstantLR",
"CosineAnnealingLR",
"CosineAnnealingWarmRestarts",
"CyclicLR",
"ExponentialLR",
"LRScheduler",
"LambdaLR",
"LinearLR",
"MultiStepLR",
"MultiplicativeLR",
"OneCycleLR",
"PolynomialLR",
"ReduceLROnPlateau",
"SequentialLR",
"StepLR",
# torch.optim.optimizer
"Optimizer",
# torch.overrides

View File

@ -1344,7 +1344,7 @@ class ReduceLROnPlateau(LRScheduler):
warnings.warn(EPOCH_DEPRECATION_WARNING, UserWarning)
self.last_epoch = epoch
if self.is_better(current, self.best):
if self._is_better(current, self.best):
self.best = current
self.num_bad_epochs = 0
else:
@ -1386,7 +1386,7 @@ class ReduceLROnPlateau(LRScheduler):
def in_cooldown(self): # noqa: D102
return self.cooldown_counter > 0
def is_better(self, a, best): # noqa: D102
def _is_better(self, a, best): # noqa: D102
if self.mode == "min" and self.threshold_mode == "rel":
rel_epsilon = 1.0 - self.threshold
return a < best * rel_epsilon
@ -1686,6 +1686,15 @@ class CyclicLR(LRScheduler):
@override
def state_dict(self) -> dict[str, Any]: # noqa: D102
"""Return the state of the scheduler as a :class:`dict`.
It contains an entry for every variable in self.__dict__ which
is not the optimizer.
The learning rate lambda functions will only be saved if they are callable objects
and not if they are functions or lambdas.
When saving or loading the scheduler, please make sure to also save or load the state of the optimizer.
"""
state = super().state_dict()
# We are dropping the `_scale_fn_ref` attribute because it is a
# `weakref.WeakMethod` and can't be pickled.