add sticky cache pgo (#154418)

It's a reland of https://github.com/pytorch/pytorch/pull/154394 that hit some mergebot bug

Pull Request resolved: https://github.com/pytorch/pytorch/pull/154418
Approved by: https://github.com/malfet
This commit is contained in:
bobrenjc93 2025-05-27 07:28:26 -07:00 committed by PyTorch MergeBot
parent 514409d032
commit 2560c1f3f0
2 changed files with 12 additions and 0 deletions

View File

@ -534,6 +534,9 @@ def get_cache_key() -> Optional[str]:
)
return f"{r}:{rank}:{tag}"
if r := torch.compiler.config.sticky_pgo_key:
return f"sticky:{r}:{rank}:{tag}"
if (name_version := torch._utils_internal.get_mast_job_name_version()) is not None:
mast_job_name, mast_job_version = name_version
return f"mast:{mast_job_name}:{mast_job_version}:{rank}:{tag}"

View File

@ -74,5 +74,14 @@ This whitelist is dominant over all other flags dynamic=False, force_nn_module_p
and force_parameter_static_shapes.
"""
sticky_pgo_key: str = Config(
env_name_default="TORCH_COMPILE_STICKY_PGO_KEY", default=""
)
"""
If you want to share PGO profiles across different jobs (and not just attempts), you can set
this to a string that identifies the shared profile. This is useful if you want to share PGO profiles
for models that are not identical, but are similar enough to share PGO profiles.
"""
install_config_module(sys.modules[__name__])