fix forced loglevel in pytorch oss code (#158820)

Differential Revision: [D78715806](https://our.internmc.facebook.com/intern/diff/D78715806/)

Pull Request resolved: https://github.com/pytorch/pytorch/pull/158820
Approved by: https://github.com/Skylion007, https://github.com/pradeepfn
This commit is contained in:
Teja 2025-07-23 13:08:32 -07:00 committed by PyTorch MergeBot
parent 7001d6fbc9
commit febf3c475e
2 changed files with 4 additions and 13 deletions

View File

@ -16,10 +16,8 @@ Classes:
"""
import abc
import logging
from concurrent.futures import Future, ThreadPoolExecutor
from dataclasses import dataclass
from logging import getLogger
from typing import Any, TypeVar, Union
import torch
@ -30,9 +28,6 @@ from .types import STATE_DICT
T = TypeVar("T")
logger = getLogger()
logger.setLevel(logging.INFO)
class CheckpointStager(abc.ABC):
"""

View File

@ -1,9 +1,8 @@
# mypy: allow-untyped-defs
import logging
import types
import warnings
import weakref
from copyreg import dispatch_table
from logging import getLogger
from typing import Any
import torch
@ -12,10 +11,6 @@ from torch.storage import UntypedStorage
from torch.utils.weak import WeakIdKeyDictionary
logger = getLogger()
logger.setLevel(logging.INFO)
class StateDictStager:
"""
A class for optimizing storage objects during staging for async checkpointing.
@ -33,9 +28,10 @@ class StateDictStager:
def __init__(self, pin_memory: bool = False, share_memory: bool = False):
if pin_memory and not torch.cuda.is_available():
logger.warning(
warnings.warn(
"Ignoring pin_memory flag for checkpoint staging as pinning memory"
"requires CUDA, but CUDA is not available. "
"requires CUDA, but CUDA is not available. ",
stacklevel=2,
)
self.pin_memory = False
else: