mirror of
https://github.com/zebrajr/pytorch.git
synced 2025-12-07 12:21:27 +01:00
Summary: Pull Request resolved: https://github.com/pytorch/pytorch/pull/41934 The model exported from online training workflow with int8 quantization contains FCs with 4 inputs. The extra input is the quant_param blob. This diff is to adjust the bound_shape_inferencer and int8 op schema to get shape info for the quant_param input. Test Plan: ``` buck test caffe2/caffe2/opt:bound_shape_inference_test ``` Reviewed By: yinghai Differential Revision: D22683554 fbshipit-source-id: 684d1433212a528120aba1c37d27e26b6a31b403 |
||
|---|---|---|
| .. | ||
| CMakeLists.txt | ||
| init_qnnpack.cc | ||
| int8_add_op.cc | ||
| int8_add_op.h | ||
| int8_average_pool_op.cc | ||
| int8_average_pool_op.h | ||
| int8_channel_shuffle_op.cc | ||
| int8_channel_shuffle_op.h | ||
| int8_concat_op.cc | ||
| int8_concat_op.h | ||
| int8_conv_op_relu.cc | ||
| int8_conv_op.cc | ||
| int8_conv_op.h | ||
| int8_conv_transpose_op.cc | ||
| int8_conv_transpose_op.h | ||
| int8_dequantize_op.cc | ||
| int8_dequantize_op.h | ||
| int8_fc_op.cc | ||
| int8_fc_op.h | ||
| int8_flatten_op.cc | ||
| int8_flatten_op.h | ||
| int8_given_tensor_fill_op.cc | ||
| int8_given_tensor_fill_op.h | ||
| int8_leaky_relu_op.cc | ||
| int8_leaky_relu_op.h | ||
| int8_max_pool_op.cc | ||
| int8_max_pool_op.h | ||
| int8_quantize_op.cc | ||
| int8_quantize_op.h | ||
| int8_relu_op.cc | ||
| int8_relu_op.h | ||
| int8_reshape_op.cc | ||
| int8_reshape_op.h | ||
| int8_resize_nearest_op.cc | ||
| int8_resize_nearest_op.h | ||
| int8_roi_align_op_test.cc | ||
| int8_roi_align_op.cc | ||
| int8_roi_align_op.h | ||
| int8_sigmoid_op.cc | ||
| int8_sigmoid_op.h | ||
| int8_simd.h | ||
| int8_slice_op.cc | ||
| int8_slice_op.h | ||
| int8_softmax_op.cc | ||
| int8_softmax_op.h | ||
| int8_test_utils.h | ||
| int8_test.cc | ||
| int8_transpose_op.cc | ||
| int8_transpose_op.h | ||
| int8_utils.h | ||