mirror of
https://github.com/zebrajr/pytorch.git
synced 2025-12-07 12:21:27 +01:00
Summary: Pull Request resolved: https://github.com/pytorch/pytorch/pull/54652 This PR adds a fairly robust runner for the instruction count microbenchmarks. Key features are: * Timeout and retry. (In rare cases, Callgrind will hang under heavy load.) * Robust error handling and keyboard interrupt support. * Benchmarks are pinned to cores. (Wall times still won't be great, but it's something.) * Progress printouts, including a rough ETA. Test Plan: Imported from OSS Reviewed By: pbelevich Differential Revision: D27537823 Pulled By: robieta fbshipit-source-id: 699ac907281d28bf7ffa08594253716ca40204ba
28 lines
937 B
Python
28 lines
937 B
Python
"""Basic runner for the instruction count microbenchmarks.
|
|
|
|
The contents of this file are placeholders, and will be replaced by more
|
|
expressive and robust components (e.g. better runner and result display
|
|
components) in future iterations. However this allows us to excercise the
|
|
underlying benchmark generation infrastructure in the mean time.
|
|
"""
|
|
|
|
from core.expand import materialize
|
|
from definitions.standard import BENCHMARKS
|
|
from execution.runner import Runner
|
|
from execution.work import WorkOrder
|
|
|
|
|
|
def main() -> None:
|
|
work_orders = tuple(
|
|
WorkOrder(label, autolabels, timer_args, timeout=600, retries=2)
|
|
for label, autolabels, timer_args in materialize(BENCHMARKS)
|
|
)
|
|
|
|
results = Runner(work_orders).run()
|
|
for work_order in work_orders:
|
|
print(work_order.label, work_order.autolabels, work_order.timer_args.num_threads, results[work_order].instructions)
|
|
|
|
|
|
if __name__ == "__main__":
|
|
main()
|