Make speed_benchmark_torch report latency in us (#37953)

Summary:
Pull Request resolved: https://github.com/pytorch/pytorch/pull/37953

Earlier it said us but reported ms.

Test Plan: buck run aibench:run_bench -- -b aibench/specifications/models/pytorch/fbnet/fbnet_mobile_inference.json --devices s9u --remote --framework pytorch --logger_level info --job_queue aibench_interactive --platform android/full_jit

Reviewed By: xcheng16

Differential Revision: D21349612

fbshipit-source-id: b97b6216eb0264123ff2c7852a0678b2008b0bf1
This commit is contained in:
Kimish Patel 2020-05-07 11:05:16 -07:00 committed by Facebook GitHub Bot
parent 85fccba224
commit dd64d26d74

View File

@ -211,23 +211,23 @@ int main(int argc, char** argv) {
".");
caffe2::Timer timer;
std::vector<float> times;
auto millis = timer.MilliSeconds();
auto micros = timer.MicroSeconds();
for (int i = 0; i < FLAGS_iter; ++i) {
auto start = high_resolution_clock::now();
module.forward(inputs);
auto stop = high_resolution_clock::now();
auto duration = duration_cast<milliseconds>(stop - start);
auto duration = duration_cast<microseconds>(stop - start);
times.push_back(duration.count());
}
millis = timer.MilliSeconds();
micros = timer.MicroSeconds();
if (FLAGS_report_pep) {
for (auto t : times) {
std::cout << "PyTorchObserver {\"type\": \"NET\", \"unit\": \"us\", \"metric\": \"latency\", \"value\": \"" << t << "\"}" << std::endl;
}
}
std::cout << "Main run finished. Milliseconds per iter: "
<< millis / FLAGS_iter
<< ". Iters per second: " << 1000.0 * FLAGS_iter / millis
std::cout << "Main run finished. Microseconds per iter: "
<< micros / FLAGS_iter
<< ". Iters per second: " << 1000.0 * 1000 * FLAGS_iter / micros
<< std::endl;
return 0;