mirror of
https://github.com/zebrajr/pytorch.git
synced 2025-12-07 00:21:07 +01:00
[Pytorch Edge] Lean Runtime Test
Summary: As far as I can tell theres no CI that actually runs the lean_runtime. This should add it I think. (Is this directory covered by CI?) Next up is to create some test for min_runtime_lib (Note: this ignores all push blocking failures!) Test Plan: buck test :lean_runtime_delegate_flatbuffer_test Reviewed By: iseeyuan Differential Revision: D34255148 fbshipit-source-id: b44693220e93869edd984bbcd17d33db4007a4ea (cherry picked from commit 0a4a6b5bd2b4a1f8cce8bc1c4a22dad9539631c1)
This commit is contained in:
parent
3f7c17a2b9
commit
faacb8ab36
|
|
@ -2,7 +2,10 @@
|
|||
#include <c10/core/TensorImpl.h>
|
||||
#include <torch/csrc/jit/backends/backend.h>
|
||||
#include <torch/csrc/jit/backends/backend_exception.h>
|
||||
|
||||
#ifndef NO_PROFILING
|
||||
#include <torch/csrc/jit/mobile/profiler_edge.h>
|
||||
#endif
|
||||
|
||||
namespace torch {
|
||||
namespace jit {
|
||||
|
|
@ -109,13 +112,17 @@ class BackendWithCompiler : public PyTorchBackendInterface {
|
|||
op_runtimes_us.reserve(handle.toList().size());
|
||||
|
||||
c10::List<at::Tensor> output_list;
|
||||
#ifndef NO_PROFILING
|
||||
auto start_us = torch::profiler::impl::getTime() / 1000;
|
||||
#endif
|
||||
for (const auto& token : handle.toList()) {
|
||||
IValue val = token;
|
||||
auto instruction = val.toTupleRef().elements()[0].toStringRef();
|
||||
auto debug_handle = val.toTupleRef().elements()[1].toInt();
|
||||
double const_val = 1.0;
|
||||
#ifndef NO_PROFILING
|
||||
auto start_time_us = torch::profiler::impl::getTime() / 1000;
|
||||
#endif
|
||||
try {
|
||||
if (instruction.rfind("prim::Constant", 0) == 0) {
|
||||
// 15 is the length of 'prim::Constant#' the constant val comes after
|
||||
|
|
@ -158,10 +165,13 @@ class BackendWithCompiler : public PyTorchBackendInterface {
|
|||
} catch (c10::Error& e) {
|
||||
TORCH_DELEGATED_BACKEND_THROW(false, e.what(), debug_handle);
|
||||
}
|
||||
#ifndef NO_PROFILING
|
||||
auto end_time_us = torch::profiler::impl::getTime() / 1000;
|
||||
auto duration = end_time_us - start_time_us;
|
||||
op_runtimes_us.emplace_back(duration, debug_handle, instruction);
|
||||
#endif
|
||||
}
|
||||
#ifndef NO_PROFILING
|
||||
for (const auto& tup : op_runtimes_us) {
|
||||
RECORD_BACKEND_EVENT_TO_EDGE_PROFILER(
|
||||
start_us,
|
||||
|
|
@ -171,6 +181,7 @@ class BackendWithCompiler : public PyTorchBackendInterface {
|
|||
"test_backend");
|
||||
start_us = start_us + std::get<0>(tup);
|
||||
}
|
||||
#endif
|
||||
return c10::impl::toList(output_list);
|
||||
}
|
||||
};
|
||||
|
|
|
|||
Loading…
Reference in New Issue
Block a user