pytorch/torch/csrc/jit/serialization/flatbuffer_serializer.h
Han Qi (qihqi) fed12ff680 [BE][flatbuffer] Remove code duplications and refactor (#79184)
Summary:
Remove code dup in import.cpp / export_modules.cpp such that
1. Only one copy of switching logic (detect flatbuffer / is_flatbuffer);
2. Move detection of includeness of flatbuffer to runtime (so no more macros)

This also reverts the dependency of import.cpp -> flatbuffer_loader.cpp to flatbuffer_loader.cpp -> import.cpp.

Differential Revision: D36926217

Pull Request resolved: https://github.com/pytorch/pytorch/pull/79184
Approved by: https://github.com/zhxchen17
2022-06-20 16:37:38 +00:00

40 lines
1.3 KiB
C++

#pragma once
#include <ATen/core/qualified_name.h>
#include <flatbuffers/flatbuffers.h>
#include <string>
#include <vector>
#include <ATen/core/ivalue.h>
#include <ATen/core/jit_type.h>
#include <torch/csrc/jit/backends/backend_debug_handler.h>
#include <torch/csrc/jit/mobile/module.h>
#include <torch/csrc/jit/serialization/type_name_uniquer.h>
#include <torch/csrc/jit/serialization/mobile_bytecode_generated.h> // NOLINT
namespace torch {
namespace jit {
TORCH_API void save_mobile_module(
const mobile::Module& module,
const std::string& filename,
const ExtraFilesMap& extra_files = ExtraFilesMap(),
const ExtraFilesMap& jit_sources = ExtraFilesMap(),
const std::vector<IValue>& jit_constants = {});
TORCH_API flatbuffers::DetachedBuffer save_mobile_module_to_bytes(
const mobile::Module& module,
const ExtraFilesMap& extra_files = ExtraFilesMap(),
const ExtraFilesMap& jit_sources = ExtraFilesMap(),
const std::vector<IValue>& jit_constants = {});
// This function will make the capabilities to load and safe
// Module as a flatbuffer file available for use by _load_for_mobile
// and friends. This is NOT needed if using the other functions
// in this file directly.
TORCH_API bool register_flatbuffer_serializer();
} // namespace jit
} // namespace torch