mirror of
https://github.com/zebrajr/pytorch.git
synced 2025-12-07 12:21:27 +01:00
Summary: Pull Request resolved: https://github.com/pytorch/pytorch/pull/34762 So far it's by luck that we somehow include "caffe2/core/tensor.h" before including "caffe2/caffe2/quantization/server/fbgemm_pack_blob.h". This is not safe and this diff fixes it. Test Plan: unittest Reviewed By: jianyuh Differential Revision: D20455352 fbshipit-source-id: 777dae32a23d0ec75fd7e5e1627426b5a5f81f5a
48 lines
1.3 KiB
C++
48 lines
1.3 KiB
C++
#pragma once
|
|
|
|
#include <memory>
|
|
|
|
#include <fbgemm/Fbgemm.h>
|
|
|
|
#include <caffe2/core/tensor.h>
|
|
#include "caffe2/quantization/server/dnnlowp.h"
|
|
|
|
namespace caffe2 {
|
|
|
|
/**
|
|
* Packed weight matrix for DNNLOWP Int8FC operator
|
|
*/
|
|
struct Int8FCDNNLowPPackedWeightBlob {
|
|
std::vector<dnnlowp::TensorQuantizationParams> qparams;
|
|
std::shared_ptr<std::vector<std::int32_t>> column_offsets;
|
|
|
|
// The original tensor before packing but only with meta information
|
|
Tensor original_tensor{CPU};
|
|
|
|
std::shared_ptr<std::vector<std::int32_t>> bias;
|
|
|
|
// Only for 32-bit accumulation
|
|
std::shared_ptr<fbgemm::PackBMatrix<std::int8_t>> W;
|
|
|
|
// Only for 16-bit accumulation
|
|
// Dense matrix holding common values
|
|
std::shared_ptr<fbgemm::PackBMatrix<std::int8_t, std::int16_t>> W_acc16;
|
|
// Sparse matrix holding outliers
|
|
std::shared_ptr<fbgemm::CompressedSparseColumn> W_outlier;
|
|
int nbits_in_non_outlier;
|
|
};
|
|
|
|
/**
|
|
* Packed weight matrix for DNNLOWP Int8Conv operator
|
|
*/
|
|
struct Int8ConvDNNLowPPackedWeightBlob : public Int8FCDNNLowPPackedWeightBlob {
|
|
// Only for 32-bit accumulation
|
|
std::shared_ptr<fbgemm::PackedDepthWiseConvMatrix> W_depthwise;
|
|
std::shared_ptr<fbgemm::PackWeightMatrixForGConv<std::int8_t>> W_gconv;
|
|
std::shared_ptr<
|
|
fbgemm::PackWeightMatrixForGConv<std::int8_t, std::int32_t, 3>>
|
|
W_gconv3d;
|
|
};
|
|
|
|
} // namespace caffe2
|