mirror of
https://github.com/zebrajr/pytorch.git
synced 2025-12-06 12:20:52 +01:00
Summary: Pull Request resolved: https://github.com/pytorch/pytorch/pull/29694 This PR adds preliminary support required to be able to run quantized pytorch models on a C2 backend. For quantized ops we use a custom domain name 'caffe2' to register the ops if they are in the "quantized" namespace. The change also adds JIT pass to unpack the quantized weights and insert the unpacked values into the graph. The actual tensor values are looked up from the params dict. Test Plan: python test/onnx/test_pytorch_onnx_caffe2.py TestQuantizedOps Imported from OSS Reviewed By: houseroad Differential Revision: D18467130 fbshipit-source-id: 53ebd8c43935f7d7e74305dad6c231a2247df176
20 lines
472 B
C++
20 lines
472 B
C++
#pragma once
|
|
|
|
#include <torch/csrc/jit/ir.h>
|
|
#include <torch/csrc/jit/script/module.h>
|
|
|
|
#include <memory>
|
|
|
|
namespace torch {
|
|
namespace jit {
|
|
using ValueToParamPairMap =
|
|
std::map<Value*, std::pair<std::string, at::Tensor>>;
|
|
|
|
using ParamMap = std::map<std::string, at::Tensor>;
|
|
|
|
ValueToParamPairMap buildValueToParamsMap(Block* b, const ParamMap& paramsDict);
|
|
void eraseUnusedValuesFromMap(ValueToParamPairMap& valsToParamsMap);
|
|
|
|
} // namespace jit
|
|
} // namespace torch
|