pytorch/torch/csrc/jit/mobile/function.cpp
Kimish Patel 17a5c67796 Add support to dump unsupported ops. Add lite_interpter_load test. (#34072)
Summary:
Pull Request resolved: https://github.com/pytorch/pytorch/pull/34072

This diff helps check all the ops not supported by lite_interpreter.
Helpful mainly to find all the ops that need to be added instead of adding them
one by one.

Test Plan:
buck run caffe2/binaries:lite_interpreter_model_load --
--model=<bytecode-model-path>

Reviewed By: iseeyuan

Differential Revision: D20194092

fbshipit-source-id: 0d596cd0204308027194af7ed738551d0c32a374
2020-03-04 13:18:12 -08:00

63 lines
1.9 KiB
C++

#include "function.h"
#include "interpreter.h"
#include <torch/csrc/jit/runtime/instruction.h>
#include <torch/csrc/jit/runtime/vararg_functions.h>
#include <ATen/core/op_registration/op_registration.h>
namespace torch{
namespace jit{
char const * toString(OpCode op);
namespace mobile {
Function::Function(c10::QualifiedName name)
: name_(name), code_(std::make_shared<Code>()) {}
void Function::append_instruction(OpCode op, int X, int N) {
TORCH_CHECK(isOpSupportedInMobile(op), toString(op),
" is not supported in mobile module.");
code_->instructions_.emplace_back(op, X, N);
}
bool Function::append_operator(const std::string& name,
const std::string& overload_name) {
// Keep the original opname in code_
code_->op_names_.emplace_back(name, overload_name);
auto opname = code_->op_names_.back();
// Add "_" prefix to work around the double registration both of jit/generated
// and here. TODO: remove it when we have separate build for lite interpreter.
if (opname.name != "aten::Int") {
opname.name = "_" + opname.name;
}
auto op = c10::Dispatcher::singleton().findSchema(opname);
if (not op.has_value()) {
return false;
}
// TODO: operator.h now does not depend on Node* so we can also look up operators from
// that registry for use in mobile as a way to share implementations.
auto fn = [op](Stack& stack) {
c10::Dispatcher::singleton().callBoxed(*op, &stack);
};
code_->operators_.emplace_back(fn);
return true;
}
void Function::append_constant(const c10::IValue& constant) {
code_->constants_.push_back(constant);
}
void Function::append_type(const at::TypePtr& type) {
code_->types_.push_back(type);
}
void Function::set_register_size(size_t size) {
code_->register_size_ = size;
}
bool Function::run(Stack& stack) const {
InterpreterState interp_state(code_);
return interp_state.run(stack);
}
} // namespace mobile
} // namespace torch
} // namespace jit