mirror of
https://github.com/zebrajr/pytorch.git
synced 2025-12-07 00:21:07 +01:00
<!-- copilot:all --> ### <samp>🤖 Generated by Copilot at 8aef78f</samp> ### Summary 📝🚀🛠️ <!-- 1. 📝 for modifying the logging format and style 2. 🚀 for improving performance and avoiding unnecessary string creation 3. 🛠️ for fixing flake8 issues --> This pull request updates some logging calls to use old-style string formatting with `%s` placeholders instead of f-strings in `torch/_dynamo/logging.py`, `torch/_functorch/compilers.py`, and `torch/fx/passes/pass_manager.py` as part of a logging standardization effort. It also adds a `# noqa: F404` comment to the `import __future__` statement in `torch/overrides.py` to fix a flake8 warning. > _`log` uses old style_ > _formatting strings with `%s`_ > _logging is faster_ ### Walkthrough * Standardize logging format and style to use old-style string formatting with `%s` placeholders instead of f-string syntax for performance and consistency ([link](https://github.com/pytorch/pytorch/pull/99799/files?diff=unified&w=0#diff-18807f7fd187b8bc8e69e93722566195b36d5bf269099b415a6f90b552228d6bL55-R55), [link](https://github.com/pytorch/pytorch/pull/99799/files?diff=unified&w=0#diff-fae8a66564055743ec031edb87eb22edeebf7fdebef9d21660d5e6a6252e5222L370-R373), [link](https://github.com/pytorch/pytorch/pull/99799/files?diff=unified&w=0#diff-5f3e37ded032f24e247dcf4a3be4b73ea0cf21382e342631742e5a04550202e1L72-R72)) * Suppress flake8 warning for `import __future__` statement in `torch/overrides.py` with `# noqa: F404` comment ([link](https://github.com/pytorch/pytorch/pull/99799/files?diff=unified&w=0#diff-4f601fe7f31e875ee4354882c0bb490bc35e51d3d413d058cc5fda3be8ca9f15L23-R23)) Pull Request resolved: https://github.com/pytorch/pytorch/pull/99799 Approved by: https://github.com/Skylion007
58 lines
1.5 KiB
Python
58 lines
1.5 KiB
Python
import itertools
|
|
import logging
|
|
|
|
from torch.hub import _Faketqdm, tqdm
|
|
|
|
# Disable progress bar by default, not in dynamo config because otherwise get a circular import
|
|
disable_progress = True
|
|
|
|
|
|
# Return all loggers that torchdynamo/torchinductor is responsible for
|
|
def get_loggers():
|
|
return [
|
|
logging.getLogger("torch.fx.experimental.symbolic_shapes"),
|
|
logging.getLogger("torch._dynamo"),
|
|
logging.getLogger("torch._inductor"),
|
|
]
|
|
|
|
|
|
# Creates a logging function that logs a message with a step # prepended.
|
|
# get_step_logger should be lazily called (i.e. at runtime, not at module-load time)
|
|
# so that step numbers are initialized properly. e.g.:
|
|
|
|
# @functools.lru_cache(None)
|
|
# def _step_logger():
|
|
# return get_step_logger(logging.getLogger(...))
|
|
|
|
# def fn():
|
|
# _step_logger()(logging.INFO, "msg")
|
|
|
|
_step_counter = itertools.count(1)
|
|
|
|
# Update num_steps if more phases are added: Dynamo, AOT, Backend
|
|
# This is very inductor centric
|
|
# _inductor.utils.has_triton() gives a circular import error here
|
|
|
|
if not disable_progress:
|
|
try:
|
|
import triton # noqa: F401
|
|
|
|
num_steps = 3
|
|
except ImportError:
|
|
num_steps = 2
|
|
pbar = tqdm(total=num_steps, desc="torch.compile()", delay=0)
|
|
|
|
|
|
def get_step_logger(logger):
|
|
if not disable_progress:
|
|
pbar.update(1)
|
|
if not isinstance(pbar, _Faketqdm):
|
|
pbar.set_postfix_str(f"{logger.name}")
|
|
|
|
step = next(_step_counter)
|
|
|
|
def log(level, msg):
|
|
logger.log(level, "Step %s: %s", step, msg)
|
|
|
|
return log
|