[dynamo] add benchmark for guard eval (#142430)

Benchmarks:
- 713.2us (3.10)
- 598.8us (3.12)

Pull Request resolved: https://github.com/pytorch/pytorch/pull/142430
Approved by: https://github.com/jansel
ghstack dependencies: #142117
This commit is contained in:
William Wen 2024-12-16 22:30:51 +00:00 committed by PyTorch MergeBot
parent 97ca09f692
commit c04f0bb7b9

View File

@ -0,0 +1,46 @@
import time
import timeit
import numpy as np
import torch
import torch._dynamo.config
# to satisfy linter complaining about undefined variable
foo = None
args = [f"x{i}" for i in range(100)]
fn_str = f"""\
def foo({", ".join(args)}):
n = {" + ".join(arg + ".shape[0]" for arg in args)}
return x0 + n
"""
exec(fn_str, globals())
torch._dynamo.config.cache_size_limit = 16
def bench(name, fn):
torch._dynamo.reset()
inps = [[torch.randn(i) for _ in range(100)] for i in range(10, 101, 10)]
def run_fn():
for inp in inps:
fn(*inp)
start = time.perf_counter()
for _ in range(3):
run_fn()
end = time.perf_counter()
results = timeit.repeat(lambda: run_fn(), number=1000, repeat=10)
print(f"{name} {np.median(results) * 1000:.1f}us (warmup={end - start:.1f}s)")
def main():
bench("compiled", torch.compile(foo, dynamic=False)) # type: ignore[F821]
if __name__ == "__main__":
main()