pytorch/torch/csrc/jit/mobile/code.h
Zhengxu Chen 12daa4f663 [jit][edge] Enable CALL instruction in lite interpreter. (#65964)
Summary:
Pull Request resolved: https://github.com/pytorch/pytorch/pull/65964

ghstack-source-id: 141425519

Test Plan: buck run xplat/caffe2:test_lite_interpreter

Reviewed By: cccclai

Differential Revision: D31326149

fbshipit-source-id: 8a599d92f3fa4e6c125100adb36d89592e71e547
2021-10-25 14:44:33 -07:00

37 lines
1.0 KiB
C++

#pragma once
#include <vector>
#include <ATen/core/ivalue.h>
#include <ATen/core/operator_name.h>
#include <torch/csrc/jit/runtime/instruction.h>
namespace torch {
namespace jit {
namespace mobile {
using Stack = std::vector<c10::IValue>;
using DebugHandle = int64_t;
class Function;
// NOLINTNEXTLINE(cppcoreguidelines-pro-type-member-init)
struct Code {
std::vector<Instruction> instructions_;
std::vector<DebugHandle> debug_handles_;
std::vector<c10::OperatorName> op_names_;
std::vector<std::function<void(Stack&)>> operators_;
std::vector<c10::IValue> constants_;
std::vector<c10::TypePtr> types_;
// TODO After we actually export CALL instructions we can remove this.
// We may need a two-stage importing scheme, where we firstly construct all
// function objects, and then append referenced function pointers. This could
// be done in parseMethods().
std::vector<mobile::Function*> functions_;
size_t register_size_; // Aggregated output size.
};
} // namespace mobile
} // namespace jit
} // namespace torch