Merge pull request #49768 from geetachavan1/cherrypicks_EKTA4

Prevent division by 0.
This commit is contained in:
Mihai Maruseac 2021-05-30 15:20:21 -07:00 committed by GitHub
commit 1b1824829f
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23

View File

@ -521,6 +521,7 @@ TfLiteStatus Prepare(KernelType kernel_type, TfLiteContext* context,
// Only one scale factor per batch is typically necessary. See optimized
// implementation for why we need to allocate for the height of the inputs
// flattened to 2D.
TF_LITE_ENSURE(context, channels_in != 0);
const int height = NumElements(input) / channels_in;
int scaling_dims[1] = {height};
if (!TfLiteIntArrayEqualsArray(scaling_factors->dims, 1, scaling_dims)) {
@ -563,6 +564,7 @@ TfLiteStatus Prepare(KernelType kernel_type, TfLiteContext* context,
input_offsets->type = kTfLiteInt32;
input_offsets->allocation_type = kTfLiteArenaRw;
// See above comment for the need to allocate for height of inputs.
TF_LITE_ENSURE(context, channels_in != 0);
const int height = NumElements(input) / channels_in;
const int input_offset_dims[1] = {height};
if (!TfLiteIntArrayEqualsArray(input_offsets->dims, 1,
@ -827,8 +829,9 @@ TfLiteStatus EvalHybridPerChannel(TfLiteContext* context, TfLiteNode* node,
CalculateActivationRange(params->activation, &output_activation_min,
&output_activation_max);
const int input_size = NumElements(input) / SizeOfDimension(input, 0);
const int batch_size = SizeOfDimension(input, 0);
TF_LITE_ENSURE(context, batch_size != 0);
const int input_size = NumElements(input) / batch_size;
TfLiteTensor* quantized_input_tensor;
TF_LITE_ENSURE_OK(context,
GetTemporarySafe(context, node, data->input_quantized_index,
@ -921,8 +924,9 @@ TfLiteStatus EvalHybrid(TfLiteContext* context, TfLiteNode* node,
CalculateActivationRange(params->activation, &output_activation_min,
&output_activation_max);
const int input_size = NumElements(input) / SizeOfDimension(input, 0);
const int batch_size = SizeOfDimension(input, 0);
TF_LITE_ENSURE(context, batch_size != 0);
const int input_size = NumElements(input) / batch_size;
const float* input_ptr = GetTensorData<float>(input);
TfLiteTensor* quantized_input_tensor;