diff --git a/test/cpp/jit/test_backend_compiler_lib.cpp b/test/cpp/jit/test_backend_compiler_lib.cpp index 1bdf255a0d5..372b08a392d 100644 --- a/test/cpp/jit/test_backend_compiler_lib.cpp +++ b/test/cpp/jit/test_backend_compiler_lib.cpp @@ -2,7 +2,10 @@ #include #include #include + +#ifndef NO_PROFILING #include +#endif namespace torch { namespace jit { @@ -109,13 +112,17 @@ class BackendWithCompiler : public PyTorchBackendInterface { op_runtimes_us.reserve(handle.toList().size()); c10::List output_list; +#ifndef NO_PROFILING auto start_us = torch::profiler::impl::getTime() / 1000; +#endif for (const auto& token : handle.toList()) { IValue val = token; auto instruction = val.toTupleRef().elements()[0].toStringRef(); auto debug_handle = val.toTupleRef().elements()[1].toInt(); double const_val = 1.0; +#ifndef NO_PROFILING auto start_time_us = torch::profiler::impl::getTime() / 1000; +#endif try { if (instruction.rfind("prim::Constant", 0) == 0) { // 15 is the length of 'prim::Constant#' the constant val comes after @@ -158,10 +165,13 @@ class BackendWithCompiler : public PyTorchBackendInterface { } catch (c10::Error& e) { TORCH_DELEGATED_BACKEND_THROW(false, e.what(), debug_handle); } +#ifndef NO_PROFILING auto end_time_us = torch::profiler::impl::getTime() / 1000; auto duration = end_time_us - start_time_us; op_runtimes_us.emplace_back(duration, debug_handle, instruction); +#endif } +#ifndef NO_PROFILING for (const auto& tup : op_runtimes_us) { RECORD_BACKEND_EVENT_TO_EDGE_PROFILER( start_us, @@ -171,6 +181,7 @@ class BackendWithCompiler : public PyTorchBackendInterface { "test_backend"); start_us = start_us + std::get<0>(tup); } +#endif return c10::impl::toList(output_list); } };