mirror of
https://github.com/zebrajr/pytorch.git
synced 2025-12-06 12:20:52 +01:00
This PR adds CachingAutotuners that are statically launchable to FXGraphCache's cache entry. Regular CachingAutotuners, with triton kernels attached to them, are not very good to cache: they are very large, and take huge amounts of space since they track all of the various binary files, along with various metadata. We could probably figure out what information we could delete from the kernel and have it still work, but with StaticCudaLauncher, we no longer have to. Instead, we can cache every compiled triton kernel that is statically launchable. Because StaticTritonCompileResult is serializable, and designed to have a very small memory footprint, we can save it into FXGraphCache without increasing the cache size significantly. We store it as a part of CompiledFxGraph.triton_bundle. Then, on load, we repopulate the CachingAutotuner into our CompiledTritonKernel cache. The upsides of this are many: - We no longer need to call into a separate process on cache hit - We can *guarantee* that the triton kernel we got from our cache entry is the one we use to launch again, so no worries about triton's own caching logic - Once we achieve feature parity and all torch.compiled triton kernels are statically launchable, we can clean up a bunch of TritonBundler code and simplify the cache hit logic. Fixes #149449 Pull Request resolved: https://github.com/pytorch/pytorch/pull/149054 Approved by: https://github.com/oulgen ghstack dependencies: #149657
496 lines
18 KiB
Python
496 lines
18 KiB
Python
# mypy: allow-untyped-defs
|
|
from __future__ import annotations
|
|
|
|
import atexit
|
|
import functools
|
|
import logging
|
|
import multiprocessing
|
|
import os
|
|
import sys
|
|
from concurrent.futures import Future, ProcessPoolExecutor, ThreadPoolExecutor
|
|
from concurrent.futures.process import BrokenProcessPool
|
|
from functools import partial
|
|
from time import time, time_ns
|
|
from typing import Any, Callable, Optional, TYPE_CHECKING
|
|
|
|
import torch
|
|
from torch._dynamo.device_interface import get_registered_device_interfaces
|
|
from torch._dynamo.utils import (
|
|
counters,
|
|
dynamo_timed,
|
|
get_metrics_context,
|
|
set_feature_use,
|
|
)
|
|
from torch._inductor import config
|
|
from torch._inductor.codecache import (
|
|
_load_triton_kernel_from_source,
|
|
code_hash,
|
|
CodeCacheFuture,
|
|
CppCodeCache,
|
|
CppPythonBindingsCodeCache,
|
|
CUDACodeCache,
|
|
HalideCodeCache,
|
|
LambdaFuture,
|
|
ROCmCodeCache,
|
|
StaticAutotunerFuture,
|
|
torch_key,
|
|
)
|
|
from torch._inductor.compile_worker.subproc_pool import AnyPool, SubprocPool
|
|
from torch._inductor.compile_worker.utils import _async_compile_initializer
|
|
from torch._inductor.runtime.compile_tasks import (
|
|
_set_triton_ptxas_path,
|
|
_worker_compile_triton,
|
|
)
|
|
from torch._inductor.utils import clear_on_fresh_inductor_cache
|
|
from torch._inductor.virtualized import V
|
|
from torch.hub import _Faketqdm, tqdm
|
|
from torch.utils._ordered_set import OrderedSet
|
|
from torch.utils._triton import has_triton_package
|
|
|
|
|
|
if TYPE_CHECKING:
|
|
from torch._inductor.runtime.hints import HalideMeta
|
|
from torch._inductor.runtime.triton_heuristics import CachingAutotuner
|
|
|
|
# timing metrics for time spent in the compilation
|
|
_cumulative_compile_time = 0.0
|
|
_t0: Optional[float] = None
|
|
|
|
kernel_code_log = torch._logging.getArtifactLogger(__name__, "kernel_code")
|
|
|
|
log = logging.getLogger(__name__)
|
|
|
|
|
|
def pre_fork_setup():
|
|
"""
|
|
Setup that must be done prior to forking with a process pool.
|
|
"""
|
|
# ensure properties have been calculated before processes
|
|
# are forked
|
|
caching_device_properties()
|
|
|
|
# Computing the triton key can be slow. If we call it before fork,
|
|
# it will be cached for the forked subprocesses.
|
|
try:
|
|
from triton.compiler.compiler import triton_key
|
|
|
|
triton_key()
|
|
except ImportError:
|
|
# Triton might not be installed or might be an old version.
|
|
pass
|
|
|
|
|
|
def caching_device_properties():
|
|
for _, device_interface in get_registered_device_interfaces():
|
|
if device_interface.is_available():
|
|
device_interface.Worker.get_device_properties()
|
|
|
|
|
|
def _compile_start() -> None:
|
|
global _t0
|
|
if _t0 is None:
|
|
_t0 = time()
|
|
|
|
|
|
def _compile_end() -> None:
|
|
global _cumulative_compile_time, _t0
|
|
if _t0 is not None:
|
|
t1 = time()
|
|
_cumulative_compile_time += t1 - _t0
|
|
_t0 = None
|
|
# print("CUMULATIVE COMPILE TIME", _cumulative_compile_time)
|
|
|
|
|
|
_IS_WINDOWS = sys.platform == "win32"
|
|
|
|
log = logging.getLogger(__name__)
|
|
|
|
# Used to keep track of all process pools invoked so far.
|
|
_pool_set = OrderedSet[AnyPool]()
|
|
|
|
|
|
def shutdown_compile_workers() -> None:
|
|
"""Shut down all outstanding compile-worker pools."""
|
|
for pool in _pool_set:
|
|
pool.shutdown()
|
|
after_fork()
|
|
|
|
|
|
def after_fork():
|
|
"""Reset pools to initial state without shutting them down"""
|
|
_pool_set.clear()
|
|
AsyncCompile.process_pool.cache_clear()
|
|
|
|
|
|
try:
|
|
os.register_at_fork(after_in_child=after_fork)
|
|
except AttributeError:
|
|
pass # register_at_fork does not exists on windows
|
|
|
|
|
|
def get_compile_threads() -> int:
|
|
"""
|
|
Temporary for internal rollout. Assign config.compile_threads lazily and return it.
|
|
TODO: remove after rollout.
|
|
"""
|
|
if config.compile_threads is None:
|
|
config.compile_threads = config.decide_compile_threads()
|
|
return config.compile_threads
|
|
|
|
|
|
@clear_on_fresh_inductor_cache
|
|
class CompiledTritonKernels:
|
|
"""
|
|
In memory cache for storing compiled triton kernels.
|
|
|
|
Each triton kernel is keyed by the hash of its source code. Each value stored
|
|
in the cache is a return value of AsyncCompile.triton().
|
|
|
|
Currently, the cache stores Future objects, but it should be generalizable for any kernels.
|
|
"""
|
|
|
|
_cache: dict[str, CodeCacheFuture] = {}
|
|
|
|
@staticmethod
|
|
def key(kernel_src: str):
|
|
"""
|
|
Generates a cache key given a triton kernel's full source code.
|
|
This source includes the inductor meta, compilation metadata, the kernel itself, etc.
|
|
`kernel_src` should be the exact string passed to async_compile.triton()'s first argument.
|
|
"""
|
|
# Hashes the kernel source with torch_key into a single hash key
|
|
return code_hash(kernel_src, extra=torch_key())
|
|
|
|
@staticmethod
|
|
def save(kernel_src: str, future: CodeCacheFuture):
|
|
"""
|
|
Saves a compiled triton kernel to the cache.
|
|
TODO: We store a LambdaFuture as that's the callable returned by async_compile.triton,
|
|
but the real type we want to return here is actually an abstract triton kernel.
|
|
|
|
TODO: Source code here is not just the kernel's source code, but also includes the inductor preamble, etc.
|
|
so it could be less strict.
|
|
"""
|
|
key = CompiledTritonKernels.key(kernel_src)
|
|
CompiledTritonKernels._cache[key] = future
|
|
|
|
@staticmethod
|
|
def get(kernel_src: str) -> Optional[CodeCacheFuture]:
|
|
key = CompiledTritonKernels.key(kernel_src)
|
|
return CompiledTritonKernels._cache.get(key, None)
|
|
|
|
@staticmethod
|
|
def cache_clear():
|
|
CompiledTritonKernels._cache = {}
|
|
|
|
@staticmethod
|
|
def remove_future(kernel_src: str) -> None:
|
|
key = CompiledTritonKernels.key(kernel_src)
|
|
|
|
# Delete the LambdaFuture if there is one
|
|
if key in CompiledTritonKernels._cache:
|
|
del CompiledTritonKernels._cache[key]
|
|
|
|
|
|
class AsyncCompile:
|
|
def __init__(self) -> None:
|
|
pass
|
|
|
|
@staticmethod
|
|
@functools.lru_cache(1)
|
|
def pool() -> ThreadPoolExecutor:
|
|
assert get_compile_threads() > 1
|
|
return ThreadPoolExecutor(get_compile_threads())
|
|
|
|
@staticmethod
|
|
def _get_ready():
|
|
"""No-op function to help mark when the subprocess pool is ready."""
|
|
return "ready"
|
|
|
|
@staticmethod
|
|
@functools.lru_cache(1)
|
|
def process_pool() -> AnyPool:
|
|
assert get_compile_threads() > 1
|
|
log.info(
|
|
"Creating '%s' pool with %d workers",
|
|
config.worker_start_method,
|
|
get_compile_threads(),
|
|
)
|
|
|
|
pool: AnyPool
|
|
if config.worker_start_method == "subprocess":
|
|
# Wrapper around ProcessPoolExecutor forks in a new process we control
|
|
pool = SubprocPool(get_compile_threads())
|
|
else:
|
|
if config.worker_start_method == "spawn":
|
|
# Avoid creating pools in the spawned subprocs themselves:
|
|
os.environ["TORCH_WARM_POOL"] = "0"
|
|
pre_fork_setup()
|
|
ctx = multiprocessing.get_context(config.worker_start_method)
|
|
pool = ProcessPoolExecutor(
|
|
get_compile_threads(),
|
|
mp_context=ctx,
|
|
initializer=partial(_async_compile_initializer, os.getpid()),
|
|
)
|
|
# when this pool is created in a subprocess object, the normal exit handler
|
|
# doesn't run, and we need to register our own handler.
|
|
# exitpriority has to be high, because another one of the finalizers will
|
|
# kill the worker thread that sends the shutdown message to the workers...
|
|
multiprocessing.util.Finalize(None, pool.shutdown, exitpriority=sys.maxsize)
|
|
|
|
# Set an attribute we can check to see if the pool is ready.
|
|
pool.ready_future = pool.submit(AsyncCompile._get_ready) # type: ignore[union-attr]
|
|
_pool_set.add(pool)
|
|
return pool
|
|
|
|
@classmethod
|
|
def warm_pool(cls) -> None:
|
|
if get_compile_threads() <= 1:
|
|
return
|
|
_compile_start()
|
|
# Pool is initialized on first access
|
|
cls.process_pool()
|
|
_compile_end()
|
|
|
|
@classmethod
|
|
def submit(cls, task: Callable[..., Any]) -> Any:
|
|
if get_compile_threads() <= 1:
|
|
return task()
|
|
return cls.pool().submit(task)
|
|
|
|
def use_process_pool(self):
|
|
return (
|
|
get_compile_threads() > 1 and self.process_pool().ready_future.done() # type: ignore[union-attr]
|
|
)
|
|
|
|
def triton(self, kernel_name: str, source_code: str, device_str: str = "cuda"):
|
|
"""
|
|
Async_compile.triton is more complicated than the other backends because
|
|
we're trying to optimize compile time as much as possible for this hot callsite.
|
|
|
|
First of all, the function is cached by CompiledTritonKernels; if there's a kernel
|
|
already compiled, we grab it directly from the cache and return.
|
|
|
|
Otherwise, if we have multiple compile threads, we kick off triton compilations on each
|
|
worker process by giving it a kernel and source code to compile. The worker initializes
|
|
a CachingAutotuner, runs triton compilation, and pickles the kernel back to us.
|
|
We use TritonCompileResult to represent the objects being pickled back to us by each
|
|
worker.
|
|
|
|
Some maybe not obvious things that are pickled back to us:
|
|
- Most of the time, we can avoid sending back CachingAutotuner.fn and other metadata
|
|
and do not have to pay the cost of loading the triton kernel on the parent. But certain
|
|
cases, like coordesc tuning and dynamic_scale_rblock, require us to reload the function
|
|
in the parent lazily when we require it.
|
|
- The AutotuneCache, if enabled, is constructed on each worker per triton config
|
|
and pickled by to us via `CachingAutotuner.save_cache_hook`.
|
|
"""
|
|
load_kernel = functools.partial(
|
|
_load_triton_kernel_from_source, kernel_name, source_code
|
|
)
|
|
|
|
def reload_kernel_in_parent():
|
|
# Benchmark how often this happens
|
|
with dynamo_timed("reload_kernel_in_parent"):
|
|
return load_kernel()
|
|
|
|
counters["inductor"]["async_compile_cache_miss"] += 1
|
|
|
|
kernel_code_log.info("Triton Kernel:\n%s", source_code)
|
|
_compile_start()
|
|
|
|
if os.environ.get("TRITON_INTERPRET", "0") == "1":
|
|
return getattr(
|
|
torch._inductor.codecache.PyCodeCache.load(source_code), kernel_name
|
|
)
|
|
|
|
is_parallel = self.use_process_pool()
|
|
set_feature_use("parallel_compile_post_warmup", is_parallel)
|
|
|
|
compile_id = torch._guards.CompileContext.current_compile_id()
|
|
is_backward = getattr(V.graph, "is_backward", False)
|
|
|
|
if (future := CompiledTritonKernels.get(source_code)) is not None:
|
|
counters["inductor"]["async_compile_cache_hit"] += 1
|
|
# Set reload_kernel_from_src properly based on source_code
|
|
if isinstance(future, StaticAutotunerFuture):
|
|
future.reload_kernel_from_src = reload_kernel_in_parent
|
|
if is_parallel:
|
|
return future
|
|
else:
|
|
return future.result()
|
|
|
|
if is_parallel:
|
|
# We want to support changing these env vars after (and while) the
|
|
# process pool is running, so pass them to the subprocess to reset.
|
|
env_vars = ["TORCHINDUCTOR_CACHE_DIR", "TRITON_CACHE_DIR"]
|
|
extra_env = {v: os.environ[v] for v in env_vars if v in os.environ}
|
|
|
|
task = self.process_pool().submit(
|
|
_worker_compile_triton,
|
|
load_kernel,
|
|
extra_env,
|
|
)
|
|
|
|
def get_result() -> CachingAutotuner:
|
|
kernel, elapsed_us = task.result()
|
|
# Now that we've compiled, we should clear the future
|
|
# so it can't be used again
|
|
kernel.set_compile_info(compile_id, is_backward)
|
|
CompiledTritonKernels.remove_future(source_code)
|
|
kernel.precompile(
|
|
warm_cache_only=False,
|
|
reload_kernel=reload_kernel_in_parent,
|
|
static_triton_bundle_key=CompiledTritonKernels.key(source_code),
|
|
)
|
|
get_metrics_context().add_top_n(
|
|
"triton_kernel_compile_times_us", kernel_name, elapsed_us
|
|
)
|
|
return kernel
|
|
|
|
future = LambdaFuture(get_result, future=task)
|
|
CompiledTritonKernels.save(source_code, future)
|
|
return future
|
|
else:
|
|
with dynamo_timed(
|
|
"async_compile.precompile",
|
|
log_pt2_compile_event=True,
|
|
dynamo_compile_column_us="triton_compile_time_us",
|
|
log_waitcounter=True,
|
|
):
|
|
start_ns = time_ns()
|
|
_set_triton_ptxas_path()
|
|
kernel = load_kernel()
|
|
kernel.set_compile_info(compile_id, is_backward)
|
|
kernel.precompile(
|
|
warm_cache_only=False,
|
|
static_triton_bundle_key=CompiledTritonKernels.key(source_code),
|
|
)
|
|
elapsed_us = (time_ns() - start_ns) // 1000
|
|
get_metrics_context().add_top_n(
|
|
"triton_kernel_compile_times_us", kernel_name, elapsed_us
|
|
)
|
|
return kernel
|
|
|
|
def multi_kernel(self, *args, **kwargs) -> Any:
|
|
from torch._inductor.codegen.multi_kernel import MultiKernelCall
|
|
|
|
# no need to call this in parallel since the sub-kernels are already parallel tasks
|
|
return MultiKernelCall(*args, **kwargs)
|
|
|
|
def cpp(self, source_code: str):
|
|
kernel_code_log.info("CPP Kernel:\n%s", source_code)
|
|
if get_compile_threads() <= 1:
|
|
return CppCodeCache.load(source_code).kernel
|
|
else:
|
|
get_result = CppCodeCache.load_async(source_code, submit_fn=self.submit)
|
|
return LambdaFuture(lambda: get_result().kernel)
|
|
|
|
def cpp_pybinding(self, argtypes: list[str], source_code: str):
|
|
kernel_code_log.info("CPP+Bindings Kernel:\n%s", source_code)
|
|
if get_compile_threads() <= 1:
|
|
return CppPythonBindingsCodeCache.load_pybinding(argtypes, source_code)
|
|
else:
|
|
get_result = CppPythonBindingsCodeCache.load_pybinding_async(
|
|
argtypes, source_code, submit_fn=self.submit
|
|
)
|
|
return LambdaFuture(get_result)
|
|
|
|
def cuda(self, source_code, dst_file_ext, aot_compile=False):
|
|
kernel_code_log.info("CUDA Kernel:\n%s", source_code)
|
|
|
|
def task():
|
|
if aot_compile:
|
|
# We rely on JITInductor to compile the CUDA code,
|
|
# so that we can load it into AOTInductor.
|
|
CUDACodeCache.compile(source_code, "o")
|
|
return CUDACodeCache.load(source_code, dst_file_ext)[0]
|
|
|
|
return self.submit(task)
|
|
|
|
def rocm(
|
|
self,
|
|
source_code,
|
|
dst_file_ext,
|
|
aot_compile=False,
|
|
):
|
|
kernel_code_log.info("ROCm Kernel:\n%s", source_code)
|
|
|
|
def task():
|
|
if aot_compile:
|
|
_ = ROCmCodeCache.compile(source_code, dst_file_ext="o")
|
|
if config.rocm.generate_test_runner:
|
|
_ = ROCmCodeCache.compile(source_code, dst_file_ext="exe")
|
|
return ROCmCodeCache.load(source_code, dst_file_ext)[0]
|
|
|
|
return self.submit(task)
|
|
|
|
def halide(self, meta: HalideMeta, source_code: str):
|
|
kernel_code_log.info("Halide Kernel:\n%r\n%s", meta, source_code)
|
|
if get_compile_threads() <= 1:
|
|
return HalideCodeCache.generate_halide(meta, source_code)
|
|
else:
|
|
get_result = HalideCodeCache.generate_halide_async(
|
|
meta, source_code, submit_fn=self.submit
|
|
)
|
|
return LambdaFuture(get_result)
|
|
|
|
def wait(self, scope: dict[str, Any]) -> None:
|
|
if get_compile_threads() > 1:
|
|
with dynamo_timed(
|
|
"async_compile.wait",
|
|
log_pt2_compile_event=True,
|
|
dynamo_compile_column_us="triton_compile_time_us",
|
|
log_waitcounter=True,
|
|
):
|
|
self._wait_futures(scope)
|
|
|
|
_compile_end()
|
|
|
|
def _wait_futures(self, scope: dict[str, Any]) -> None:
|
|
kernels = {
|
|
key: value
|
|
for key, value in scope.items()
|
|
if isinstance(value, (Future, CodeCacheFuture))
|
|
}
|
|
pbar = tqdm(
|
|
total=len(kernels),
|
|
desc="Inductor Compilation",
|
|
disable=config.disable_progress,
|
|
delay=0,
|
|
)
|
|
for key, result in kernels.items():
|
|
if config.verbose_progress and not isinstance(pbar, _Faketqdm):
|
|
pbar.set_postfix_str(key)
|
|
try:
|
|
scope[key] = result.result()
|
|
except BrokenProcessPool as e:
|
|
raise RuntimeError(
|
|
"A compilation subprocess exited unexpectedly. This "
|
|
"is likely due to a crash. To facilitate debugging, "
|
|
"you can re-run with TORCHINDUCTOR_COMPILE_THREADS=1 "
|
|
"to cause compilation to occur in the main process."
|
|
) from e
|
|
pbar.update(1)
|
|
|
|
|
|
if (
|
|
os.environ.get("TORCH_TNT_IN_USE", "0") == "1"
|
|
or os.environ.get("TORCH_WARM_POOL", "1") != "1"
|
|
# The subprocess pool is only used for the Triton backend
|
|
or not has_triton_package()
|
|
# Skip for fbcode. We have internal reports of usages inside multiprocessing
|
|
# pools that lead a multiplicative number of compile subprocesses.
|
|
or config.is_fbcode()
|
|
):
|
|
pass
|
|
else:
|
|
AsyncCompile.warm_pool()
|
|
|
|
# On exit give the workers a chance to clean themselves up. Without this the
|
|
# resource_tracker can complain about leaked semaphores coming from the
|
|
# ProcessPoolExecutor:
|
|
# UserWarning: resource_tracker: There appear to be 5 leaked semaphore objects
|
|
# to clean up at shutdown
|
|
atexit.register(shutdown_compile_workers)
|