report p50 time instead of avg (#28722)

Summary:
Pull Request resolved: https://github.com/pytorch/pytorch/pull/28722

as title

Test Plan:
```buck run mode/opt caffe2/benchmarks/operator_benchmark:benchmark_all_test -- --operator sigmoid

# ----------------------------------------
# PyTorch/Caffe2 Operator Micro-benchmarks
# ----------------------------------------
# Tag : short

# Benchmarking PyTorch: sigmoid
iters: 200, 462.6029555220157
iters: 400, 441.04792759753764
iters: 800, 441.81562116136774
iters: 1600, 440.79964311094955
iters: 3200, 436.3108493271284
iters: 6400, 440.87966314691585
iters: 12800, 452.29464218209614
# Mode: Eager
# Name: sigmoid_M512_N512
# Input: M: 512, N: 512
Forward Execution Time (us) : 441.048

Reviewed By: hl475

Differential Revision: D18149525

fbshipit-source-id: 5fe70a35b790ee7ad3ff57c0cb0b1c29cb609b83
This commit is contained in:
Mingzhe Li 2019-10-25 17:20:29 -07:00 committed by Facebook Github Bot
parent 60d606094c
commit e886450863

View File

@ -185,6 +185,7 @@ class BenchmarkRunner(object):
The execution stops when the time becomes significant.
"""
curr_test_total_time = 0
time_trace = []
while True:
# Wipe cache
if self.args.wipe_cache:
@ -196,6 +197,8 @@ class BenchmarkRunner(object):
results_are_significant = self._iteration_result_is_significant(
iters, run_time_sec, curr_test_total_time, self.has_explicit_iteration_count)
report_run_time = 1e6 * run_time_sec / iters
time_trace.append(report_run_time)
if results_are_significant:
# Print out the last 50 values when running with AI PEP
if self.args.ai_pep_format:
@ -205,8 +208,7 @@ class BenchmarkRunner(object):
# Re-estimate the hopefully-sufficient
# iteration count, and run the benchmark again...
iters = self._predict_num_iter_needed(iters)
reported_run_time_us = (1e6 * run_time_sec / iters)
reported_run_time_us = np.percentile(np.array(time_trace), 50)
return reported_run_time_us
def _check_keep(self, test_flag, cmd_flag):