mirror of
https://github.com/zebrajr/pytorch.git
synced 2025-12-06 12:20:52 +01:00
[benchmarks] Set model name early to keep warmup and main model same (#159231)
Pull Request resolved: https://github.com/pytorch/pytorch/pull/159231 Approved by: https://github.com/williamwen42 ghstack dependencies: #159209
This commit is contained in:
parent
2d1e92307d
commit
8c0c5c58c7
|
|
@ -2425,6 +2425,8 @@ class BenchmarkRunner:
|
|||
# Use distributed wrapping as necessary
|
||||
model = self.deepcopy_and_maybe_parallelize(model)
|
||||
|
||||
if not hasattr(model, name):
|
||||
model.name = name
|
||||
self.init_optimizer(name, current_device, model.parameters())
|
||||
|
||||
# The self.autocast context is needed for the model we export with aot_compile,
|
||||
|
|
@ -2528,8 +2530,6 @@ class BenchmarkRunner:
|
|||
result_summary = latency_experiment_summary(
|
||||
self.suite_name, self.args, model, timings, **experiment_kwargs
|
||||
)
|
||||
if not hasattr(model, name):
|
||||
model.name = name
|
||||
results.append(result_summary)
|
||||
return " ".join(map(str, results))
|
||||
|
||||
|
|
@ -2586,6 +2586,9 @@ class BenchmarkRunner:
|
|||
# Use distributed wrapping as necessary
|
||||
model = self.deepcopy_and_maybe_parallelize(model)
|
||||
|
||||
if not hasattr(model, name):
|
||||
model.name = name
|
||||
|
||||
self.init_optimizer(name, current_device, model.parameters())
|
||||
|
||||
# The self.autocast context is needed for the model we export with aot_compile,
|
||||
|
|
@ -2699,8 +2702,6 @@ class BenchmarkRunner:
|
|||
f"{ok:3}/{total:3} +{frames_third_pass} frames {compilation_time:3.0f}s"
|
||||
)
|
||||
|
||||
if not hasattr(model, name):
|
||||
model.name = name
|
||||
results.append(experiment(model, example_inputs, **experiment_kwargs))
|
||||
return " ".join(map(str, results))
|
||||
|
||||
|
|
|
|||
Loading…
Reference in New Issue
Block a user