mirror of
https://github.com/zebrajr/tensorflow.git
synced 2025-12-06 12:20:11 +01:00
Merge pull request #49768 from geetachavan1/cherrypicks_EKTA4
Prevent division by 0.
This commit is contained in:
commit
1b1824829f
|
|
@ -521,6 +521,7 @@ TfLiteStatus Prepare(KernelType kernel_type, TfLiteContext* context,
|
|||
// Only one scale factor per batch is typically necessary. See optimized
|
||||
// implementation for why we need to allocate for the height of the inputs
|
||||
// flattened to 2D.
|
||||
TF_LITE_ENSURE(context, channels_in != 0);
|
||||
const int height = NumElements(input) / channels_in;
|
||||
int scaling_dims[1] = {height};
|
||||
if (!TfLiteIntArrayEqualsArray(scaling_factors->dims, 1, scaling_dims)) {
|
||||
|
|
@ -563,6 +564,7 @@ TfLiteStatus Prepare(KernelType kernel_type, TfLiteContext* context,
|
|||
input_offsets->type = kTfLiteInt32;
|
||||
input_offsets->allocation_type = kTfLiteArenaRw;
|
||||
// See above comment for the need to allocate for height of inputs.
|
||||
TF_LITE_ENSURE(context, channels_in != 0);
|
||||
const int height = NumElements(input) / channels_in;
|
||||
const int input_offset_dims[1] = {height};
|
||||
if (!TfLiteIntArrayEqualsArray(input_offsets->dims, 1,
|
||||
|
|
@ -827,8 +829,9 @@ TfLiteStatus EvalHybridPerChannel(TfLiteContext* context, TfLiteNode* node,
|
|||
CalculateActivationRange(params->activation, &output_activation_min,
|
||||
&output_activation_max);
|
||||
|
||||
const int input_size = NumElements(input) / SizeOfDimension(input, 0);
|
||||
const int batch_size = SizeOfDimension(input, 0);
|
||||
TF_LITE_ENSURE(context, batch_size != 0);
|
||||
const int input_size = NumElements(input) / batch_size;
|
||||
TfLiteTensor* quantized_input_tensor;
|
||||
TF_LITE_ENSURE_OK(context,
|
||||
GetTemporarySafe(context, node, data->input_quantized_index,
|
||||
|
|
@ -921,8 +924,9 @@ TfLiteStatus EvalHybrid(TfLiteContext* context, TfLiteNode* node,
|
|||
CalculateActivationRange(params->activation, &output_activation_min,
|
||||
&output_activation_max);
|
||||
|
||||
const int input_size = NumElements(input) / SizeOfDimension(input, 0);
|
||||
const int batch_size = SizeOfDimension(input, 0);
|
||||
TF_LITE_ENSURE(context, batch_size != 0);
|
||||
const int input_size = NumElements(input) / batch_size;
|
||||
|
||||
const float* input_ptr = GetTensorData<float>(input);
|
||||
TfLiteTensor* quantized_input_tensor;
|
||||
|
|
|
|||
Loading…
Reference in New Issue
Block a user