mirror of
https://github.com/zebrajr/tensorflow.git
synced 2025-12-06 12:20:11 +01:00
Make the type of cell clip and projection clip more precise.
PiperOrigin-RevId: 267151703
This commit is contained in:
parent
8ba40bd9d4
commit
d1deb4212c
|
|
@ -934,7 +934,7 @@ inline void LstmStepQuantized(
|
|||
int32_t layer_norm_output_scale_a, int32_t layer_norm_output_scale_b,
|
||||
const int32_t* input_bias_ptr, const int32_t* forget_bias_ptr,
|
||||
const int32_t* cell_bias_ptr, const int32_t* output_bias_ptr,
|
||||
int32 quantized_cell_clip, int32 quantized_proj_clip, int32_t cell_scale,
|
||||
int16_t quantized_cell_clip, int8_t quantized_proj_clip, int32_t cell_scale,
|
||||
const int32_t* inv_large_value,
|
||||
const int32_t* input_to_forget_effective_bias,
|
||||
const int32_t* recurrent_to_forget_effective_bias,
|
||||
|
|
|
|||
|
|
@ -65,8 +65,8 @@ struct QuantizedLstmParameter {
|
|||
int32_t layer_norm_output_scale_a;
|
||||
int32_t layer_norm_output_scale_b;
|
||||
// Quantized clip value for cell and projection. Zero value means no clipping.
|
||||
int32_t quantized_cell_clip;
|
||||
int32_t quantized_proj_clip;
|
||||
int16_t quantized_cell_clip;
|
||||
int8_t quantized_proj_clip;
|
||||
int32_t hidden_zp;
|
||||
int32_t cell_scale;
|
||||
std::vector<int32_t> inv_large_value;
|
||||
|
|
|
|||
Loading…
Reference in New Issue
Block a user