diff --git a/EdgeImpulse.EI-SDK.pdsc b/EdgeImpulse.EI-SDK.pdsc
index b401b55..10bbafc 100644
--- a/EdgeImpulse.EI-SDK.pdsc
+++ b/EdgeImpulse.EI-SDK.pdsc
@@ -5,13 +5,16 @@
EI-SDK
LICENSE-apache-2.0.txt
Edge Impulse SDK
- https://github.com/edgeimpulse/edge-impulse-sdk-pack/releases/download/v1.54.4/
+ https://github.com/edgeimpulse/edge-impulse-sdk-pack/releases/download/v1.54.8/
hello@edgeimpulse.com
https://github.com/edgeimpulse/edge-impulse-sdk-pack.git
-
+
EI-SDK
+
+ EI-SDK
+
EI-SDK
@@ -98,9 +101,6 @@
EI-SDK
-
-
- EI-SDK
@@ -146,7 +146,7 @@
-
+
Edge Impulse SDK
@@ -302,6 +302,7 @@
+
@@ -516,6 +517,7 @@
+
@@ -535,6 +537,7 @@
+
diff --git a/EdgeImpulse.pidx b/EdgeImpulse.pidx
index 2f4e788..3481163 100644
--- a/EdgeImpulse.pidx
+++ b/EdgeImpulse.pidx
@@ -2,8 +2,8 @@
EdgeImpulse
https://raw.githubusercontent.com/edgeimpulse/edge-impulse-sdk-pack/main/
- 2024-07-24 12:24:56
+ 2024-07-30 10:53:15
-
+
diff --git a/edgeimpulse/edge-impulse-sdk/tensorflow/lite/core/api/flatbuffer_conversions.cpp b/edgeimpulse/edge-impulse-sdk/tensorflow/lite/core/api/flatbuffer_conversions.cpp
index 31d4af9..3ac48cc 100644
--- a/edgeimpulse/edge-impulse-sdk/tensorflow/lite/core/api/flatbuffer_conversions.cpp
+++ b/edgeimpulse/edge-impulse-sdk/tensorflow/lite/core/api/flatbuffer_conversions.cpp
@@ -458,6 +458,10 @@ TfLiteStatus ParseOpDataTfLite(const Operator* op, BuiltinOperator op_type,
return ParseRsqrt(op, error_reporter, allocator, builtin_data);
}
+ case BuiltinOperator_SCATTER_ND: {
+ return ParseScatterNd(op, error_reporter, allocator, builtin_data);
+ }
+
case BuiltinOperator_SELECT_V2: {
return ParseSelectV2(op, error_reporter, allocator, builtin_data);
}
@@ -868,7 +872,6 @@ TfLiteStatus ParseOpDataTfLite(const Operator* op, BuiltinOperator op_type,
case BuiltinOperator_NON_MAX_SUPPRESSION_V5:
case BuiltinOperator_RELU_N1_TO_1:
case BuiltinOperator_RELU_0_TO_1:
- case BuiltinOperator_SCATTER_ND:
case BuiltinOperator_SELECT:
case BuiltinOperator_SLICE:
case BuiltinOperator_TILE:
@@ -2022,6 +2025,14 @@ TfLiteStatus ParseRsqrt(const Operator*, ErrorReporter*, BuiltinDataAllocator*,
return kTfLiteOk;
}
+// We have this parse function instead of directly returning kTfLiteOk from the
+// switch-case in ParseOpData because this function is used as part of the
+// selective registration for the OpResolver implementation in micro.
+TfLiteStatus ParseScatterNd(const Operator*, ErrorReporter*, BuiltinDataAllocator*,
+ void**) {
+ return kTfLiteOk;
+}
+
// We have this parse function instead of directly returning kTfLiteOk from the
// switch-case in ParseOpData because this function is used as part of the
// selective registration for the OpResolver implementation in micro.
diff --git a/edgeimpulse/edge-impulse-sdk/tensorflow/lite/core/api/flatbuffer_conversions.h b/edgeimpulse/edge-impulse-sdk/tensorflow/lite/core/api/flatbuffer_conversions.h
index b8e6019..fea9725 100644
--- a/edgeimpulse/edge-impulse-sdk/tensorflow/lite/core/api/flatbuffer_conversions.h
+++ b/edgeimpulse/edge-impulse-sdk/tensorflow/lite/core/api/flatbuffer_conversions.h
@@ -331,6 +331,9 @@ TfLiteStatus ParseRound(const Operator* op, ErrorReporter* error_reporter,
TfLiteStatus ParseRsqrt(const Operator* op, ErrorReporter* error_reporter,
BuiltinDataAllocator* allocator, void** builtin_data);
+TfLiteStatus ParseScatterNd(const Operator* op, ErrorReporter* error_reporter,
+ BuiltinDataAllocator* allocator, void** builtin_data);
+
TfLiteStatus ParseSelect(const Operator* op, ErrorReporter* error_reporter,
BuiltinDataAllocator* allocator, void** builtin_data);
diff --git a/edgeimpulse/edge-impulse-sdk/tensorflow/lite/kernels/internal/reduce_common.h b/edgeimpulse/edge-impulse-sdk/tensorflow/lite/kernels/internal/reduce_common.h
new file mode 100644
index 0000000..6928981
--- /dev/null
+++ b/edgeimpulse/edge-impulse-sdk/tensorflow/lite/kernels/internal/reduce_common.h
@@ -0,0 +1,37 @@
+/* Copyright 2022 The TensorFlow Authors. All Rights Reserved.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+==============================================================================*/
+#ifndef TENSORFLOW_LITE_KERNELS_INTERNAL_REDUCE_COMMON_H_
+#define TENSORFLOW_LITE_KERNELS_INTERNAL_REDUCE_COMMON_H_
+
+namespace tflite {
+namespace ops {
+namespace builtin {
+namespace reduce {
+
+enum ReduceType {
+ kSum,
+ kProd,
+ kMax,
+ kMin,
+ kAny,
+ kAll,
+};
+
+} // namespace reduce
+} // namespace builtin
+} // namespace ops
+} // namespace tflite
+
+#endif // TENSORFLOW_LITE_KERNELS_INTERNAL_REDUCE_COMMON_H_
diff --git a/edgeimpulse/edge-impulse-sdk/tensorflow/lite/kernels/internal/reference/add.h b/edgeimpulse/edge-impulse-sdk/tensorflow/lite/kernels/internal/reference/add.h
index ada6696..a2e05e4 100644
--- a/edgeimpulse/edge-impulse-sdk/tensorflow/lite/kernels/internal/reference/add.h
+++ b/edgeimpulse/edge-impulse-sdk/tensorflow/lite/kernels/internal/reference/add.h
@@ -194,6 +194,50 @@ inline void Add(const ArithmeticParams& params,
}
}
+inline void Add(const ArithmeticParams& params,
+ const RuntimeShape& input1_shape, const int32_t* input1_data,
+ const RuntimeShape& input2_shape, const int32_t* input2_data,
+ const RuntimeShape& output_shape, int32_t* output_data,
+ bool pot_scale = true) {
+ // if (!pot_scale) {
+ // AddGeneralParamScale(params, input1_shape, input1_data, input2_shape,
+ // input2_data, output_shape, output_data);
+ // return;
+ // }
+
+ TFLITE_DCHECK_LE(params.quantized_activation_min,
+ params.quantized_activation_max);
+
+ const int input1_shift = params.input1_shift;
+ const int flat_size =
+ MatchingElementsSize(input1_shape, input2_shape, output_shape);
+ const int32_t output_activation_min = params.quantized_activation_min;
+ const int32_t output_activation_max = params.quantized_activation_max;
+
+ TFLITE_DCHECK(input1_shift == 0 || params.input2_shift == 0);
+ TFLITE_DCHECK_LE(input1_shift, 0);
+ TFLITE_DCHECK_LE(params.input2_shift, 0);
+ const int32_t* not_shift_input =
+ input1_shift == 0 ? input1_data : input2_data;
+ const int32_t* shift_input = input1_shift == 0 ? input2_data : input1_data;
+ const int input_right_shift =
+ input1_shift == 0 ? -params.input2_shift : -input1_shift;
+
+ for (int i = 0; i < flat_size; i++) {
+ // F0 uses 0 integer bits, range [-1, 1].
+ using F0 = gemmlowp::FixedPoint;
+
+ F0 input_ready_scaled = F0::FromRaw(not_shift_input[i]);
+ F0 scaled_input = F0::FromRaw(
+ gemmlowp::RoundingDivideByPOT(shift_input[i], input_right_shift));
+ F0 result = gemmlowp::SaturatingAdd(scaled_input, input_ready_scaled);
+ const int32_t raw_output = result.raw();
+ const int32_t clamped_output = std::min(
+ output_activation_max, std::max(output_activation_min, raw_output));
+ output_data[i] = clamped_output;
+ }
+}
+
template
inline typename std::enable_if::value, void>::type
BroadcastAdd4DSlow(const ArithmeticParams& params,
diff --git a/edgeimpulse/edge-impulse-sdk/tensorflow/lite/kernels/internal/reference/scatter_nd.h b/edgeimpulse/edge-impulse-sdk/tensorflow/lite/kernels/internal/reference/scatter_nd.h
new file mode 100644
index 0000000..48fd010
--- /dev/null
+++ b/edgeimpulse/edge-impulse-sdk/tensorflow/lite/kernels/internal/reference/scatter_nd.h
@@ -0,0 +1,77 @@
+/* Copyright 2018 The TensorFlow Authors. All Rights Reserved.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+==============================================================================*/
+#ifndef TENSORFLOW_LITE_KERNELS_INTERNAL_REFERENCE_SCATTER_ND_H_
+#define TENSORFLOW_LITE_KERNELS_INTERNAL_REFERENCE_SCATTER_ND_H_
+
+#include "edge-impulse-sdk/tensorflow/lite/kernels/internal/common.h"
+#include "edge-impulse-sdk/tensorflow/lite/kernels/internal/types.h"
+#include "edge-impulse-sdk/tensorflow/lite/kernels/internal/tensor_ctypes.h"
+#include "edge-impulse-sdk/tensorflow/lite/kernels/kernel_util.h"
+#include "edge-impulse-sdk/tensorflow/lite/micro/kernels/kernel_util.h"
+
+namespace tflite {
+
+namespace reference_ops {
+
+template
+inline TfLiteStatus ScatterNd(const RuntimeShape& indices_shape,
+ const IndicesT* indices_data,
+ const RuntimeShape& updates_shape,
+ const UpdatesT* updates_data,
+ const RuntimeShape& output_shape,
+ UpdatesT* output_data) {
+ int n_slices = 1;
+ int slice_size = 1;
+ const int outer_dims = indices_shape.DimensionsCount() - 1;
+ const int indices_nd = indices_shape.Dims(outer_dims);
+ const int updates_dims = updates_shape.DimensionsCount();
+ for (int i = 0; i < outer_dims; ++i) {
+ n_slices *= indices_shape.Dims(i);
+ }
+ for (int i = outer_dims; i < updates_dims; ++i) {
+ slice_size *= updates_shape.Dims(i);
+ }
+
+ int output_flat_size = output_shape.FlatSize();
+ int remain_flat_size = output_flat_size;
+ std::vector dims_to_count(indices_nd, 0);
+ for (int i = 0; i < indices_nd; ++i) {
+ dims_to_count[i] = remain_flat_size / output_shape.Dims(i);
+ remain_flat_size = dims_to_count[i];
+ }
+
+ if (n_slices * slice_size > updates_shape.FlatSize()) {
+ return kTfLiteError;
+ }
+ memset(output_data, 0, sizeof(UpdatesT) * output_flat_size);
+ for (int i = 0; i < n_slices; ++i) {
+ int to_pos = 0;
+ for (int j = 0; j < indices_nd; ++j) {
+ IndicesT idx = indices_data[i * indices_nd + j];
+ to_pos += idx * dims_to_count[j];
+ }
+ if (to_pos < 0 || to_pos + slice_size > output_flat_size) {
+ return kTfLiteError;
+ }
+ for (int j = 0; j < slice_size; j++) {
+ output_data[to_pos + j] += updates_data[i * slice_size + j];
+ }
+ }
+ return kTfLiteOk;
+}
+
+} // namespace reference_ops
+} // namespace tflite
+#endif // TENSORFLOW_LITE_KERNELS_INTERNAL_REFERENCE_SCATTER_ND_H_
\ No newline at end of file
diff --git a/edgeimpulse/edge-impulse-sdk/tensorflow/lite/micro/all_ops_resolver.cpp b/edgeimpulse/edge-impulse-sdk/tensorflow/lite/micro/all_ops_resolver.cpp
index e9d2d6f..dff04c9 100644
--- a/edgeimpulse/edge-impulse-sdk/tensorflow/lite/micro/all_ops_resolver.cpp
+++ b/edgeimpulse/edge-impulse-sdk/tensorflow/lite/micro/all_ops_resolver.cpp
@@ -91,6 +91,8 @@ AllOpsResolver::AllOpsResolver() {
AddQuantize();
AddReal();
AddReadVariable();
+ AddReduceAny();
+ AddReduceAll();
AddReduceMax();
AddReduceMin();
AddRelu();
@@ -101,6 +103,7 @@ AllOpsResolver::AllOpsResolver() {
AddRfft2D();
AddRound();
AddRsqrt();
+ AddScatterNd();
#ifndef TF_LITE_STATIC_MEMORY
AddSelect();
AddSelectV2();
diff --git a/edgeimpulse/edge-impulse-sdk/tensorflow/lite/micro/kernels/add.cpp b/edgeimpulse/edge-impulse-sdk/tensorflow/lite/micro/kernels/add.cpp
index 50c5470..2a8c2c8 100644
--- a/edgeimpulse/edge-impulse-sdk/tensorflow/lite/micro/kernels/add.cpp
+++ b/edgeimpulse/edge-impulse-sdk/tensorflow/lite/micro/kernels/add.cpp
@@ -243,6 +243,43 @@ TfLiteStatus EvalAddQuantized(TfLiteContext* context, TfLiteNode* node,
output);
break;
}
+ case kTfLiteInt32: {
+ tflite::ArithmeticParams op_params;
+ op_params.left_shift = data->left_shift;
+ op_params.input1_offset = data->input1_offset;
+ op_params.input1_multiplier = data->input1_multiplier;
+ op_params.input1_shift = data->input1_shift;
+ op_params.input2_offset = data->input2_offset;
+ op_params.input2_multiplier = data->input2_multiplier;
+ op_params.input2_shift = data->input2_shift;
+ op_params.output_offset = data->output_offset;
+ op_params.output_multiplier = data->output_multiplier;
+ op_params.output_shift = data->output_shift;
+ SetActivationParams(data->output_activation_min, data->output_activation_max,
+ &op_params);
+ bool need_broadcast = reference_ops::ProcessBroadcastShapes(
+ tflite::micro::GetTensorShape(input1),
+ tflite::micro::GetTensorShape(input2), &op_params);
+
+ if (need_broadcast) {
+ reference_ops::BroadcastAdd4DSlow(
+ op_params, tflite::micro::GetTensorShape(input1),
+ tflite::micro::GetTensorData(input1),
+ tflite::micro::GetTensorShape(input2),
+ tflite::micro::GetTensorData(input2),
+ tflite::micro::GetTensorShape(output),
+ tflite::micro::GetTensorData(output));
+ } else {
+ reference_ops::Add(op_params, tflite::micro::GetTensorShape(input1),
+ tflite::micro::GetTensorData(input1),
+ tflite::micro::GetTensorShape(input2),
+ tflite::micro::GetTensorData(input2),
+ tflite::micro::GetTensorShape(output),
+ tflite::micro::GetTensorData(output),
+ false);
+ }
+ break;
+ }
default:
MicroPrintf("Type %s (%d) not supported.",
TfLiteTypeGetName(output->type), output->type);
@@ -309,7 +346,7 @@ TfLiteStatus EvalAdd(TfLiteContext* context, TfLiteNode* node) {
if (output->type == kTfLiteFloat32) {
EvalAddFloat(context, node, params, data, input1, input2, output);
- } else if (output->type == kTfLiteInt8 || output->type == kTfLiteInt16) {
+ } else if (output->type == kTfLiteInt8 || output->type == kTfLiteInt16 || output->type == kTfLiteInt32) {
TF_LITE_ENSURE_OK(context, EvalAddQuantized(context, node, params, data,
input1, input2, output));
} else {
@@ -1333,6 +1370,26 @@ TfLiteStatus EvalAddQuantized(TfLiteContext* context, TfLiteNode* node,
}
break;
}
+ case kTfLiteInt32: {
+ if (need_broadcast) {
+ reference_ops::BroadcastAdd4DSlow(
+ op_params, tflite::micro::GetTensorShape(input1),
+ tflite::micro::GetTensorData(input1),
+ tflite::micro::GetTensorShape(input2),
+ tflite::micro::GetTensorData(input2),
+ tflite::micro::GetTensorShape(output),
+ tflite::micro::GetTensorData(output));
+ } else {
+ reference_ops::Add(op_params, tflite::micro::GetTensorShape(input1),
+ tflite::micro::GetTensorData(input1),
+ tflite::micro::GetTensorShape(input2),
+ tflite::micro::GetTensorData(input2),
+ tflite::micro::GetTensorShape(output),
+ tflite::micro::GetTensorData(output),
+ false);
+ }
+ break;
+ }
default:
MicroPrintf("Type %s (%d) not supported.",
TfLiteTypeGetName(output->type), output->type);
@@ -1362,7 +1419,7 @@ TfLiteStatus AddEval(TfLiteContext* context, TfLiteNode* node) {
if (output->type == kTfLiteFloat32) {
EvalAdd(context, node, params, data, input1, input2, output);
- } else if (output->type == kTfLiteInt8 || output->type == kTfLiteInt16) {
+ } else if (output->type == kTfLiteInt8 || output->type == kTfLiteInt16 || output->type == kTfLiteInt32) {
TF_LITE_ENSURE_OK(context, EvalAddQuantized(context, node, params, data,
input1, input2, output));
} else {
diff --git a/edgeimpulse/edge-impulse-sdk/tensorflow/lite/micro/kernels/arg_min_max.cpp b/edgeimpulse/edge-impulse-sdk/tensorflow/lite/micro/kernels/arg_min_max.cpp
index f781ab5..4c7afd9 100644
--- a/edgeimpulse/edge-impulse-sdk/tensorflow/lite/micro/kernels/arg_min_max.cpp
+++ b/edgeimpulse/edge-impulse-sdk/tensorflow/lite/micro/kernels/arg_min_max.cpp
@@ -74,6 +74,9 @@ TfLiteStatus Eval(TfLiteContext* context, TfLiteNode* node, bool is_arg_max) {
case kTfLiteInt8:
TF_LITE_ARG_MIN_MAX(int8_t, int32_t, int32_t);
break;
+ case kTfLiteInt32:
+ TF_LITE_ARG_MIN_MAX(int32_t, int32_t, int32_t);
+ break;
default:
MicroPrintf(
"Only float32, uint8_t and int8_t are "
diff --git a/edgeimpulse/edge-impulse-sdk/tensorflow/lite/micro/kernels/cast.cpp b/edgeimpulse/edge-impulse-sdk/tensorflow/lite/micro/kernels/cast.cpp
index 19e545f..91e70fe 100644
--- a/edgeimpulse/edge-impulse-sdk/tensorflow/lite/micro/kernels/cast.cpp
+++ b/edgeimpulse/edge-impulse-sdk/tensorflow/lite/micro/kernels/cast.cpp
@@ -83,6 +83,8 @@ TfLiteStatus Eval(TfLiteContext* context, TfLiteNode* node) {
tflite::micro::GetTensorShape(output));
switch (input->type) {
+ case kTfLiteBool:
+ return copyToTensor(context, input->data.b, output, num_elements);
case kTfLiteInt8:
return copyToTensor(context, input->data.int8, output, num_elements);
case kTfLiteInt16:
diff --git a/edgeimpulse/edge-impulse-sdk/tensorflow/lite/micro/kernels/gather.cpp b/edgeimpulse/edge-impulse-sdk/tensorflow/lite/micro/kernels/gather.cpp
index 4fb05d8..24b16a2 100644
--- a/edgeimpulse/edge-impulse-sdk/tensorflow/lite/micro/kernels/gather.cpp
+++ b/edgeimpulse/edge-impulse-sdk/tensorflow/lite/micro/kernels/gather.cpp
@@ -118,6 +118,7 @@ TfLiteStatus Prepare(TfLiteContext* context, TfLiteNode* node) {
switch (coords->type) {
case kTfLiteInt32:
+ case kTfLiteInt64:
break;
default:
MicroPrintf("Positions of type '%s' are not supported by gather.",
@@ -198,7 +199,6 @@ TfLiteStatus Eval(TfLiteContext* context, TfLiteNode* node) {
tflite::micro::GetEvalInput(context, node, kInputPositions);
TfLiteEvalTensor* output =
tflite::micro::GetEvalOutput(context, node, kOutputTensor);
-
if (coords->type == kTfLiteInt32) {
switch (input->type) {
case kTfLiteFloat32:
@@ -214,6 +214,21 @@ TfLiteStatus Eval(TfLiteContext* context, TfLiteNode* node) {
break;
}
}
+ else if (coords->type == kTfLiteInt64) {
+ switch (input->type) {
+ case kTfLiteFloat32:
+ return Gather(params, input, coords, output);
+ break;
+ case kTfLiteInt8:
+ return Gather(params, input, coords, output);
+ break;
+ default:
+ MicroPrintf("Type '%s' is not supported by gather.",
+ TfLiteTypeGetName(input->type));
+ return kTfLiteError;
+ break;
+ }
+ }
return kTfLiteOk;
}
} // namespace
diff --git a/edgeimpulse/edge-impulse-sdk/tensorflow/lite/micro/kernels/micro_ops.h b/edgeimpulse/edge-impulse-sdk/tensorflow/lite/micro/kernels/micro_ops.h
index fd28a32..32c5680 100644
--- a/edgeimpulse/edge-impulse-sdk/tensorflow/lite/micro/kernels/micro_ops.h
+++ b/edgeimpulse/edge-impulse-sdk/tensorflow/lite/micro/kernels/micro_ops.h
@@ -96,12 +96,15 @@ TfLiteRegistration Register_PRELU();
TfLiteRegistration Register_QUANTIZE();
TfLiteRegistration Register_READ_VARIABLE();
TfLiteRegistration Register_REAL();
+TfLiteRegistration Register_REDUCE_ANY();
+TfLiteRegistration Register_REDUCE_ALL();
TfLiteRegistration Register_REDUCE_MAX();
TfLiteRegistration Register_REDUCE_MIN();
TfLiteRegistration Register_RELU();
TfLiteRegistration Register_RELU6();
TfLiteRegistration Register_RESIZE_BILINEAR();
TfLiteRegistration Register_RFFT2D();
+TfLiteRegistration Register_SCATTER_ND();
#ifndef TF_LITE_STATIC_MEMORY
TfLiteRegistration Register_SELECT();
TfLiteRegistration Register_SELECT_V2();
diff --git a/edgeimpulse/edge-impulse-sdk/tensorflow/lite/micro/kernels/reduce.cpp b/edgeimpulse/edge-impulse-sdk/tensorflow/lite/micro/kernels/reduce.cpp
index b346282..de4659d 100644
--- a/edgeimpulse/edge-impulse-sdk/tensorflow/lite/micro/kernels/reduce.cpp
+++ b/edgeimpulse/edge-impulse-sdk/tensorflow/lite/micro/kernels/reduce.cpp
@@ -32,14 +32,9 @@ void* InitReduce(TfLiteContext* context, const char* buffer, size_t length) {
return context->AllocatePersistentBuffer(context, sizeof(OpDataReduce));
}
-TfLiteStatus PrepareMax(TfLiteContext* context, TfLiteNode* node) {
- return PrepareMinMaxHelper(context, node,
- static_cast(node->user_data));
-}
-
-TfLiteStatus PrepareMin(TfLiteContext* context, TfLiteNode* node) {
- return PrepareMinMaxHelper(context, node,
- static_cast(node->user_data));
+TfLiteStatus PrepareMaxOrMinOrAny(TfLiteContext* context, TfLiteNode* node) {
+ return PrepareReduceHelper(context, node,
+ static_cast(node->user_data));
}
TfLiteStatus PrepareMeanOrSum(TfLiteContext* context, TfLiteNode* node) {
@@ -47,6 +42,16 @@ TfLiteStatus PrepareMeanOrSum(TfLiteContext* context, TfLiteNode* node) {
static_cast(node->user_data));
}
+TfLiteStatus EvalAny(TfLiteContext* context, TfLiteNode* node) {
+ OpDataReduce* op_data = static_cast(node->user_data);
+ return EvalReduceHelper(context, node, op_data, ReduceType::kAny);
+}
+
+TfLiteStatus EvalAll(TfLiteContext* context, TfLiteNode* node) {
+ OpDataReduce* op_data = static_cast(node->user_data);
+ return EvalReduceHelper(context, node, op_data, ReduceType::kAll);
+}
+
TfLiteStatus EvalMean(TfLiteContext* context, TfLiteNode* node) {
return EvalMeanHelper(context, node,
static_cast(node->user_data));
@@ -54,12 +59,12 @@ TfLiteStatus EvalMean(TfLiteContext* context, TfLiteNode* node) {
TfLiteStatus EvalMax(TfLiteContext* context, TfLiteNode* node) {
OpDataReduce* op_data = static_cast(node->user_data);
- return EvalMaxHelper(context, node, op_data);
+ return EvalReduceHelper(context, node, op_data, ReduceType::kMax);
}
TfLiteStatus EvalMin(TfLiteContext* context, TfLiteNode* node) {
OpDataReduce* op_data = static_cast(node->user_data);
- return EvalMinHelper(context, node, op_data);
+ return EvalReduceHelper(context, node, op_data, ReduceType::kMin);
}
TfLiteStatus EvalSum(TfLiteContext* context, TfLiteNode* node) {
@@ -72,11 +77,19 @@ TfLiteRegistration Register_MEAN() {
}
TfLiteRegistration Register_REDUCE_MAX() {
- return tflite::micro::RegisterOp(InitReduce, PrepareMax, EvalMax);
+ return tflite::micro::RegisterOp(InitReduce, PrepareMaxOrMinOrAny, EvalMax);
}
TfLiteRegistration Register_REDUCE_MIN() {
- return tflite::micro::RegisterOp(InitReduce, PrepareMin, EvalMin);
+ return tflite::micro::RegisterOp(InitReduce, PrepareMaxOrMinOrAny, EvalMin);
+}
+
+TfLiteRegistration Register_REDUCE_ANY() {
+ return tflite::micro::RegisterOp(InitReduce, PrepareMaxOrMinOrAny, EvalAny);
+}
+
+TfLiteRegistration Register_REDUCE_ALL() {
+ return tflite::micro::RegisterOp(InitReduce, PrepareMaxOrMinOrAny, EvalAll);
}
TfLiteRegistration Register_SUM() {
diff --git a/edgeimpulse/edge-impulse-sdk/tensorflow/lite/micro/kernels/reduce.h b/edgeimpulse/edge-impulse-sdk/tensorflow/lite/micro/kernels/reduce.h
index 6780df4..07e2923 100644
--- a/edgeimpulse/edge-impulse-sdk/tensorflow/lite/micro/kernels/reduce.h
+++ b/edgeimpulse/edge-impulse-sdk/tensorflow/lite/micro/kernels/reduce.h
@@ -21,6 +21,9 @@ limitations under the License.
#include "edge-impulse-sdk/tensorflow/lite/c/builtin_op_data.h"
#include "edge-impulse-sdk/tensorflow/lite/c/common.h"
#include "edge-impulse-sdk/tensorflow/lite/kernels/internal/types.h"
+#include "edge-impulse-sdk/tensorflow/lite/kernels/internal/reduce_common.h"
+
+using ReduceType = tflite::ops::builtin::reduce::ReduceType;
namespace tflite {
@@ -37,23 +40,19 @@ struct OpDataReduce {
int output_zp;
float output_scale;
int num_output_elements;
- int num_axis;
};
-TfLiteStatus PrepareMinMaxHelper(TfLiteContext* context, TfLiteNode* node,
+TfLiteStatus PrepareReduceHelper(TfLiteContext* context, TfLiteNode* node,
OpDataReduce* op_data);
+TfLiteStatus EvalReduceHelper(TfLiteContext* context, TfLiteNode* node,
+ OpDataReduce* op_data, ReduceType reduce_type);
+
TfLiteStatus PrepareMeanOrSumHelper(TfLiteContext* context, TfLiteNode* node,
OpDataReduce* op_data);
-TfLiteStatus EvalMaxHelper(TfLiteContext* context, TfLiteNode* node,
- OpDataReduce* op_data);
-
-TfLiteStatus EvalMinHelper(TfLiteContext* context, TfLiteNode* node,
- OpDataReduce* op_data);
-
TfLiteStatus EvalMeanHelper(TfLiteContext* context, TfLiteNode* node,
- OpDataReduce* op_data);
+ OpDataReduce* op_data);
TfLiteStatus EvalSumHelper(TfLiteContext* context, TfLiteNode* node,
OpDataReduce* op_data);
@@ -62,6 +61,8 @@ void ReduceResolveAxis(const int* axis_data, int axis_count,
MeanParams* op_params);
TfLiteRegistration Register_MEAN();
+TfLiteRegistration Register_REDUCE_ANY();
+TfLiteRegistration Register_REDUCE_ALL();
TfLiteRegistration Register_REDUCE_MAX();
TfLiteRegistration Register_REDUCE_MIN();
TfLiteRegistration Register_SUM();
diff --git a/edgeimpulse/edge-impulse-sdk/tensorflow/lite/micro/kernels/reduce_common.cpp b/edgeimpulse/edge-impulse-sdk/tensorflow/lite/micro/kernels/reduce_common.cpp
index a2c5c38..f56353b 100644
--- a/edgeimpulse/edge-impulse-sdk/tensorflow/lite/micro/kernels/reduce_common.cpp
+++ b/edgeimpulse/edge-impulse-sdk/tensorflow/lite/micro/kernels/reduce_common.cpp
@@ -64,8 +64,8 @@ TfLiteStatus PrepareSimple(TfLiteContext* context, TfLiteNode* node,
return kTfLiteOk;
}
-TfLiteStatus PrepareMinMaxHelper(TfLiteContext* context, TfLiteNode* node,
- OpDataReduce* op_data) {
+TfLiteStatus PrepareReduceHelper(TfLiteContext* context, TfLiteNode* node,
+ OpDataReduce* op_data) {
TF_LITE_ENSURE_OK(context, PrepareSimple(context, node, &op_data->multiplier,
&op_data->shift));
@@ -95,7 +95,6 @@ TfLiteStatus PrepareMeanOrSumHelper(TfLiteContext* context, TfLiteNode* node,
MicroContext* micro_context = GetMicroContext(context);
TfLiteTensor* input = micro_context->AllocateTempInputTensor(node, 0);
TfLiteTensor* output = micro_context->AllocateTempOutputTensor(node, 0);
- TfLiteTensor* axis = micro_context->AllocateTempInputTensor(node, 1);
if (input->type == kTfLiteInt8 || input->type == kTfLiteInt16) {
const double real_multiplier = static_cast(input->params.scale) /
static_cast(output->params.scale);
@@ -103,8 +102,6 @@ TfLiteStatus PrepareMeanOrSumHelper(TfLiteContext* context, TfLiteNode* node,
}
int output_size = NumElements(output);
- op_data->num_axis = NumElements(axis);
-
if (input->type == kTfLiteInt8 || input->type == kTfLiteInt16) {
context->RequestScratchBufferInArena(context, output_size * sizeof(int32_t),
&op_data->temp_buffer_idx);
@@ -120,7 +117,6 @@ TfLiteStatus PrepareMeanOrSumHelper(TfLiteContext* context, TfLiteNode* node,
// TODO(b/144955155): Support uint8_t(b/144955155) and int8_t(b/144955018)
micro_context->DeallocateTempTfLiteTensor(input);
micro_context->DeallocateTempTfLiteTensor(output);
- micro_context->DeallocateTempTfLiteTensor(axis);
return kTfLiteOk;
}
@@ -136,68 +132,6 @@ void ResolveAxis(const int* axis_data, int axis_count,
op_params->axis_count = axis_count;
}
-template
-TfLiteStatus QuantizedMeanOrSum(TfLiteContext* context, TfLiteNode* node,
- int* temp_index, int* resolved_axis,
- int32_t* temp_sum, OpDataReduce* op_data,
- bool compute_sum) {
- const TfLiteEvalTensor* input = tflite::micro::GetEvalInput(context, node, 0);
- const TfLiteEvalTensor* axis = tflite::micro::GetEvalInput(context, node, 1);
- TfLiteEvalTensor* output = tflite::micro::GetEvalOutput(context, node, 0);
- TfLiteReducerParams* params =
- static_cast(node->builtin_data);
-
- bool result = reference_ops::QuantizedMeanOrSumExtraArgs(
- tflite::micro::GetTensorData(input), op_data->input_zp,
- op_data->input_scale, &input->dims->data[0], input->dims->size,
- tflite::micro::GetTensorData(output), op_data->output_scale,
- op_data->multiplier, op_data->shift, op_data->output_zp,
- &output->dims->data[0], output->dims->size,
- tflite::micro::GetTensorData(axis), op_data->num_axis,
- params->keep_dims, temp_index, resolved_axis, temp_sum, compute_sum);
- TF_LITE_ENSURE(context, result);
-
- return kTfLiteOk;
-}
-
-template
-TfLiteStatus Mean(TfLiteContext* context, TfLiteNode* node,
- OpDataReduce* op_data, int* temp_index, int* resolved_axis,
- U* temp_sum) {
- const TfLiteEvalTensor* input = tflite::micro::GetEvalInput(context, node, 0);
- const TfLiteEvalTensor* axis = tflite::micro::GetEvalInput(context, node, 1);
- TfLiteEvalTensor* output = tflite::micro::GetEvalOutput(context, node, 0);
- TfLiteReducerParams* params =
- static_cast(node->builtin_data);
-
- reference_ops::Mean(
- tflite::micro::GetTensorData(input), &input->dims->data[0],
- input->dims->size, tflite::micro::GetTensorData(output),
- &output->dims->data[0], output->dims->size,
- tflite::micro::GetTensorData(axis), op_data->num_axis,
- params->keep_dims, temp_index, resolved_axis, temp_sum);
-
- return kTfLiteOk;
-}
-
-template
-TfLiteStatus EvalIntegerMean(TfLiteContext* context, TfLiteNode* node,
- int num_axis, OpDataReduce* op_data,
- int* temp_index, int* resolved_axis) {
- int32_t* temp_sum = static_cast(
- context->GetScratchBuffer(context, op_data->temp_buffer_idx));
-
- if (op_data->input_zp == op_data->output_zp &&
- op_data->input_scale == op_data->output_scale) {
- Mean(context, node, op_data, temp_index,
- resolved_axis, temp_sum);
- } else {
- QuantizedMeanOrSum(context, node, temp_index, resolved_axis,
- temp_sum, op_data, /*compute_sum=*/false);
- }
- return kTfLiteOk;
-}
-
TfLiteStatus EvalMeanHelper(TfLiteContext* context, TfLiteNode* node,
OpDataReduce* op_data) {
const TfLiteEvalTensor* input = tflite::micro::GetEvalInput(context, node, 0);
@@ -210,19 +144,17 @@ TfLiteStatus EvalMeanHelper(TfLiteContext* context, TfLiteNode* node,
int temp_index[kMaxNumberOfAxis];
int resolved_axis[kMaxNumberOfReducedAxis];
- switch (input->type) {
- case kTfLiteFloat32: {
- tflite::MeanParams op_params;
- ResolveAxis(tflite::micro::GetTensorData(axis), num_axis,
- &op_params);
+ tflite::MeanParams op_params;
+ ResolveAxis(tflite::micro::GetTensorData(axis), num_axis, &op_params);
- // Special case mean implementation exists for 4D mean across axes 1
- // and 2.
- bool special_case_4d_axes_1_and_2 =
- input->dims->size == 4 && op_params.axis_count == 2 &&
- ((op_params.axis[0] == 1 && op_params.axis[1] == 2) ||
- (op_params.axis[0] == 2 && op_params.axis[1] == 1));
+ // Special case mean implementation exists for 4D mean across axes 1 and 2.
+ bool special_case_4d_axes_1_and_2 =
+ input->dims->size == 4 && op_params.axis_count == 2 &&
+ ((op_params.axis[0] == 1 && op_params.axis[1] == 2) ||
+ (op_params.axis[0] == 2 && op_params.axis[1] == 1));
+ switch (input->type) {
+ case kTfLiteFloat32: {
// Defer to specialized implementation for 4D Mean across axes 1 & 2.
if (params->keep_dims && special_case_4d_axes_1_and_2) {
reference_ops::Mean(op_params, tflite::micro::GetTensorShape(input),
@@ -242,14 +174,77 @@ TfLiteStatus EvalMeanHelper(TfLiteContext* context, TfLiteNode* node,
}
} break;
case kTfLiteInt8: {
- TF_LITE_ENSURE_OK(
- context, EvalIntegerMean(context, node, num_axis, op_data,
- temp_index, resolved_axis));
+ // Defer to specialized implementation for 4D Mean across axes 1 & 2.
+ if (params->keep_dims && special_case_4d_axes_1_and_2) {
+ reference_integer_ops::Mean(
+ op_params, op_data->multiplier, op_data->shift,
+ tflite::micro::GetTensorShape(input),
+ tflite::micro::GetTensorData(input), op_data->input_zp,
+ tflite::micro::GetTensorShape(output),
+ tflite::micro::GetTensorData(output), op_data->output_zp);
+ } else if (op_data->input_zp == op_data->output_zp &&
+ op_data->input_scale == op_data->output_scale) {
+ int32_t* temp_buffer = static_cast(
+ context->GetScratchBuffer(context, op_data->temp_buffer_idx));
+ TF_LITE_ENSURE(
+ context,
+ reference_ops::Mean(
+ tflite::micro::GetTensorData(input), input->dims->data,
+ input->dims->size, tflite::micro::GetTensorData(output),
+ output->dims->data, output->dims->size,
+ tflite::micro::GetTensorData(axis), num_axis,
+ params->keep_dims, temp_index, resolved_axis, temp_buffer));
+ } else {
+ int32_t* temp_buffer = static_cast(
+ context->GetScratchBuffer(context, op_data->temp_buffer_idx));
+ TF_LITE_ENSURE(
+ context,
+ reference_ops::QuantizedMeanOrSum(
+ tflite::micro::GetTensorData(input), op_data->input_zp,
+ op_data->input_scale, input->dims->data, input->dims->size,
+ tflite::micro::GetTensorData(output),
+ op_data->output_zp, op_data->output_scale, output->dims->data,
+ output->dims->size, tflite::micro::GetTensorData(axis),
+ num_axis, params->keep_dims, temp_index, resolved_axis,
+ temp_buffer, false));
+ }
} break;
case kTfLiteInt16: {
- TF_LITE_ENSURE_OK(
- context, EvalIntegerMean(context, node, num_axis, op_data,
- temp_index, resolved_axis));
+ // Defer to specialized implementation for 4D Mean across axes 1 & 2.
+ if (params->keep_dims && special_case_4d_axes_1_and_2) {
+ reference_integer_ops::Mean(
+ op_params, op_data->multiplier, op_data->shift,
+ tflite::micro::GetTensorShape(input),
+ tflite::micro::GetTensorData(input), op_data->input_zp,
+ tflite::micro::GetTensorShape(output),
+ tflite::micro::GetTensorData(output), op_data->output_zp);
+ } else if (op_data->input_zp == op_data->output_zp &&
+ op_data->input_scale == op_data->output_scale) {
+ int32_t* temp_buffer = static_cast(
+ context->GetScratchBuffer(context, op_data->temp_buffer_idx));
+ TF_LITE_ENSURE(
+ context,
+ reference_ops::Mean(tflite::micro::GetTensorData(input),
+ input->dims->data, input->dims->size,
+ tflite::micro::GetTensorData(output),
+ output->dims->data, output->dims->size,
+ tflite::micro::GetTensorData(axis),
+ num_axis, params->keep_dims, temp_index,
+ resolved_axis, temp_buffer));
+ } else {
+ int32_t* temp_buffer = static_cast(
+ context->GetScratchBuffer(context, op_data->temp_buffer_idx));
+ TF_LITE_ENSURE(
+ context,
+ reference_ops::QuantizedMeanOrSum(
+ tflite::micro::GetTensorData(input), op_data->input_zp,
+ op_data->input_scale, input->dims->data, input->dims->size,
+ tflite::micro::GetTensorData(output),
+ op_data->output_zp, op_data->output_scale, output->dims->data,
+ output->dims->size, tflite::micro::GetTensorData(axis),
+ num_axis, params->keep_dims, temp_index, resolved_axis,
+ temp_buffer, false));
+ }
} break;
default:
TF_LITE_ENSURE_MSG(context, false,
@@ -259,7 +254,7 @@ TfLiteStatus EvalMeanHelper(TfLiteContext* context, TfLiteNode* node,
return kTfLiteOk;
}
-TfLiteStatus EvalMaxHelper(TfLiteContext* context, TfLiteNode* node,
+TfLiteStatus EvalSumHelper(TfLiteContext* context, TfLiteNode* node,
OpDataReduce* op_data) {
const TfLiteEvalTensor* input = tflite::micro::GetEvalInput(context, node, 0);
const TfLiteEvalTensor* axis = tflite::micro::GetEvalInput(context, node, 1);
@@ -268,14 +263,13 @@ TfLiteStatus EvalMaxHelper(TfLiteContext* context, TfLiteNode* node,
TfLiteReducerParams* params =
static_cast(node->builtin_data);
- // Interpret an axis tensor with null dimensions as a scalar
+ // Interpret an axis tensor with null dimensions as a scalar.
int num_axis = static_cast(ElementCount(*axis->dims));
- int* temp_buffer = static_cast(
- context->GetScratchBuffer(context, op_data->temp_buffer_idx));
- int* resolved_axis = static_cast(
- context->GetScratchBuffer(context, op_data->resolved_axis_idx));
+ int temp_index[kMaxNumberOfAxis];
+ int resolved_axis[kMaxNumberOfReducedAxis];
+
switch (input->type) {
- case kTfLiteFloat32:
+ case kTfLiteFloat32: {
TF_LITE_ENSURE(
context,
reference_ops::ReduceGeneric(
@@ -283,38 +277,114 @@ TfLiteStatus EvalMaxHelper(TfLiteContext* context, TfLiteNode* node,
input->dims->size, tflite::micro::GetTensorData(output),
output->dims->data, output->dims->size,
tflite::micro::GetTensorData(axis), num_axis,
- params->keep_dims, temp_buffer, resolved_axis,
- std::numeric_limits::lowest(),
+ params->keep_dims, temp_index, resolved_axis, /*init_value=*/0.f,
[](const float current, const float in) -> float {
- return (in > current) ? in : current;
+ return in + current;
}));
- break;
- case kTfLiteInt8:
- TF_LITE_ENSURE_EQ(context, static_cast(op_data->input_scale),
- static_cast(op_data->output_scale));
- TF_LITE_ENSURE_EQ(context, op_data->input_zp, op_data->output_zp);
+ } break;
+ case kTfLiteInt8: {
+ int32_t* temp_buffer = static_cast(
+ context->GetScratchBuffer(context, op_data->temp_buffer_idx));
TF_LITE_ENSURE(
context,
- reference_ops::ReduceGeneric(
- tflite::micro::GetTensorData(input), input->dims->data,
- input->dims->size, tflite::micro::GetTensorData(output),
- output->dims->data, output->dims->size,
+ reference_ops::QuantizedMeanOrSum(
+ tflite::micro::GetTensorData(input), op_data->input_zp,
+ op_data->input_scale, input->dims->data, input->dims->size,
+ tflite::micro::GetTensorData(output), op_data->output_zp,
+ op_data->output_scale, output->dims->data, output->dims->size,
tflite::micro::GetTensorData(axis), num_axis,
- params->keep_dims, temp_buffer, resolved_axis,
- std::numeric_limits::lowest(),
- [](const int8_t current, const int8_t in) -> int8_t {
- return (in > current) ? in : current;
- }));
+ params->keep_dims, temp_index, resolved_axis, temp_buffer,
+ /*compute_sum=*/true));
+ } break;
+ case kTfLiteInt16: {
+ int32_t* temp_buffer = static_cast(
+ context->GetScratchBuffer(context, op_data->temp_buffer_idx));
+ TF_LITE_ENSURE(
+ context,
+ reference_ops::QuantizedMeanOrSum(
+ tflite::micro::GetTensorData(input), op_data->input_zp,
+ op_data->input_scale, input->dims->data, input->dims->size,
+ tflite::micro::GetTensorData(output), op_data->output_zp,
+ op_data->output_scale, output->dims->data, output->dims->size,
+ tflite::micro::GetTensorData(axis), num_axis,
+ params->keep_dims, temp_index, resolved_axis, temp_buffer,
+ /*compute_sum=*/true));
+ } break;
+ default:
+ MicroPrintf("Only float32, int8, and int16 types are supported.");
+ return kTfLiteError;
+ }
+ return kTfLiteOk;
+}
+
+template
+TfLiteStatus GetReducerInitValue(ReduceType reduce_type, T& init_value) {
+ switch (reduce_type) {
+ case ReduceType::kSum:
+ init_value = T(0);
+ break;
+ case ReduceType::kProd:
+ init_value = static_cast(1);
+ break;
+ case ReduceType::kMax:
+ init_value = std::numeric_limits::lowest();
+ break;
+ case ReduceType::kMin:
+ init_value = std::numeric_limits::max();
+ break;
+ case ReduceType::kAny:
+ init_value = false;
+ break;
+ case ReduceType::kAll:
+ init_value = true;
break;
default:
- MicroPrintf("Only float32 and int8 types are supported.");
+ MicroPrintf("GetReducerInitValue: Unsupported ReduceType: %d",
+ reduce_type);
return kTfLiteError;
}
return kTfLiteOk;
}
-TfLiteStatus EvalMinHelper(TfLiteContext* context, TfLiteNode* node,
- OpDataReduce* op_data) {
+template
+T (*getReducerForProd())(const T current, const T in) {
+ return [](const T current, const T in) -> T { return in * current; };
+}
+
+// Specialize for the warning: int-in-bool-context
+template <>
+bool (*getReducerForProd())(const bool current, const bool in) {
+ return [](const bool current, const bool in) -> bool { return in && current; };
+}
+
+template
+T (*GetReducer(ReduceType reduce_type))
+(const T current, const T in) {
+ switch (reduce_type) {
+ case ReduceType::kSum:
+ return [](const T current, const T in) -> T { return in + current; };
+ case ReduceType::kProd:
+ return getReducerForProd();
+ case ReduceType::kMax:
+ return [](const T current, const T in) -> T {
+ return (in > current) ? in : current;
+ };
+ case ReduceType::kMin:
+ return [](const T current, const T in) -> T {
+ return (in < current) ? in : current;
+ };
+ case ReduceType::kAny:
+ return [](const T current, const T in) -> T { return in || current; };
+ case ReduceType::kAll:
+ return [](const T current, const T in) -> T { return in && current; };
+ default:
+ MicroPrintf("GetReducer: Unsupported ReduceType: %d", reduce_type);
+ }
+ return nullptr;
+}
+
+TfLiteStatus EvalReduceHelper(TfLiteContext* context, TfLiteNode* node,
+ OpDataReduce* op_data, ReduceType reduce_type) {
const TfLiteEvalTensor* input = tflite::micro::GetEvalInput(context, node, 0);
const TfLiteEvalTensor* axis = tflite::micro::GetEvalInput(context, node, 1);
TfLiteEvalTensor* output = tflite::micro::GetEvalOutput(context, node, 0);
@@ -329,86 +399,59 @@ TfLiteStatus EvalMinHelper(TfLiteContext* context, TfLiteNode* node,
int* resolved_axis = static_cast(
context->GetScratchBuffer(context, op_data->resolved_axis_idx));
switch (input->type) {
- case kTfLiteFloat32:
- TF_LITE_ENSURE(
- context,
- reference_ops::ReduceGeneric(
- tflite::micro::GetTensorData(input), input->dims->data,
- input->dims->size, tflite::micro::GetTensorData(output),
- output->dims->data, output->dims->size,
- tflite::micro::GetTensorData(axis), num_axis,
- params->keep_dims, temp_buffer, resolved_axis,
- std::numeric_limits::max(),
- [](const float current, const float in) -> float {
- return (in < current) ? in : current;
- }));
+ case kTfLiteBool: {
+ bool init_value;
+ TF_LITE_ENSURE_EQ(context, GetReducerInitValue(reduce_type, init_value),
+ kTfLiteOk);
+ auto reducer = GetReducer(reduce_type);
+ TF_LITE_ENSURE(context, reducer != nullptr);
+ TF_LITE_ENSURE(context, reference_ops::ReduceGeneric(
+ tflite::micro::GetTensorData(input),
+ input->dims->data, input->dims->size,
+ tflite::micro::GetTensorData(output),
+ output->dims->data, output->dims->size,
+ tflite::micro::GetTensorData(axis),
+ num_axis, params->keep_dims, temp_buffer,
+ resolved_axis, init_value, reducer));
break;
- case kTfLiteInt8:
+ }
+ case kTfLiteFloat32: {
+ float init_value;
+ TF_LITE_ENSURE_EQ(context, GetReducerInitValue(reduce_type, init_value),
+ kTfLiteOk);
+ auto reducer = GetReducer(reduce_type);
+ TF_LITE_ENSURE(context, reducer != nullptr);
+ TF_LITE_ENSURE(context, reference_ops::ReduceGeneric(
+ tflite::micro::GetTensorData(input),
+ input->dims->data, input->dims->size,
+ tflite::micro::GetTensorData(output),
+ output->dims->data, output->dims->size,
+ tflite::micro::GetTensorData(axis),
+ num_axis, params->keep_dims, temp_buffer,
+ resolved_axis, init_value, reducer));
+ break;
+ }
+ case kTfLiteInt8: {
+ int8_t init_value;
+ TF_LITE_ENSURE_EQ(context, GetReducerInitValue(reduce_type, init_value),
+ kTfLiteOk);
TF_LITE_ENSURE_EQ(context, static_cast(op_data->input_scale),
static_cast(op_data->output_scale));
TF_LITE_ENSURE_EQ(context, op_data->input_zp, op_data->output_zp);
- TF_LITE_ENSURE(
- context,
- reference_ops::ReduceGeneric(
- tflite::micro::GetTensorData(input), input->dims->data,
- input->dims->size, tflite::micro::GetTensorData(output),
- output->dims->data, output->dims->size,
- tflite::micro::GetTensorData(axis), num_axis,
- params->keep_dims, temp_buffer, resolved_axis,
- std::numeric_limits::max(),
- [](const int8_t current, const int8_t in) -> int8_t {
- return (in < current) ? in : current;
- }));
+ auto reducer = GetReducer(reduce_type);
+ TF_LITE_ENSURE(context, reducer != nullptr);
+ TF_LITE_ENSURE(context, reference_ops::ReduceGeneric(
+ tflite::micro::GetTensorData(input),
+ input->dims->data, input->dims->size,
+ tflite::micro::GetTensorData(output),
+ output->dims->data, output->dims->size,
+ tflite::micro::GetTensorData(axis),
+ num_axis, params->keep_dims, temp_buffer,
+ resolved_axis, init_value, reducer));
break;
+ }
default:
- MicroPrintf("Only float32 and int8 types are supported.");
- return kTfLiteError;
- }
- return kTfLiteOk;
-}
-
-TfLiteStatus EvalSumHelper(TfLiteContext* context, TfLiteNode* node,
- OpDataReduce* op_data) {
- const TfLiteEvalTensor* input = tflite::micro::GetEvalInput(context, node, 0);
- const TfLiteEvalTensor* axis = tflite::micro::GetEvalInput(context, node, 1);
- TfLiteEvalTensor* output = tflite::micro::GetEvalOutput(context, node, 0);
- TF_LITE_ENSURE_TYPES_EQ(context, input->type, output->type);
- TfLiteReducerParams* params =
- static_cast(node->builtin_data);
-
- // Interpret an axis tensor with null dimensions as a scalar.
- int num_axis = static_cast(ElementCount(*axis->dims));
- int temp_index[kMaxNumberOfAxis];
- int resolved_axis[kMaxNumberOfReducedAxis];
-
- switch (input->type) {
- case kTfLiteFloat32: {
- TF_LITE_ENSURE(
- context,
- reference_ops::ReduceGeneric(
- tflite::micro::GetTensorData(input), input->dims->data,
- input->dims->size, tflite::micro::GetTensorData(output),
- output->dims->data, output->dims->size,
- tflite::micro::GetTensorData(axis), num_axis,
- params->keep_dims, temp_index, resolved_axis, /*init_value=*/0.f,
- [](const float current, const float in) -> float {
- return in + current;
- }));
- } break;
- case kTfLiteInt8: {
- int32_t* temp_sum = static_cast(
- context->GetScratchBuffer(context, op_data->temp_buffer_idx));
- QuantizedMeanOrSum(context, node, temp_index, resolved_axis,
- temp_sum, op_data, /*compute_sum=*/true);
- } break;
- case kTfLiteInt16: {
- int32_t* temp_sum = static_cast(
- context->GetScratchBuffer(context, op_data->temp_buffer_idx));
- QuantizedMeanOrSum(context, node, temp_index, resolved_axis,
- temp_sum, op_data, /*compute_sum=*/true);
- } break;
- default:
- MicroPrintf("Only float32, int8, and int16 types are supported.");
+ MicroPrintf("Only bool, float32 and int8 types are supported.");
return kTfLiteError;
}
return kTfLiteOk;
diff --git a/edgeimpulse/edge-impulse-sdk/tensorflow/lite/micro/kernels/scatter_nd.cpp b/edgeimpulse/edge-impulse-sdk/tensorflow/lite/micro/kernels/scatter_nd.cpp
new file mode 100644
index 0000000..912a3c1
--- /dev/null
+++ b/edgeimpulse/edge-impulse-sdk/tensorflow/lite/micro/kernels/scatter_nd.cpp
@@ -0,0 +1,194 @@
+/* Copyright 2019 The TensorFlow Authors. All Rights Reserved.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+==============================================================================*/
+
+#include
+
+#include "edge-impulse-sdk/tensorflow/lite/kernels/internal/reference/scatter_nd.h"
+
+#include "edge-impulse-sdk/tensorflow/lite/core/c/common.h"
+#include "edge-impulse-sdk/tensorflow/lite/kernels/internal/tensor_ctypes.h"
+#include "edge-impulse-sdk/tensorflow/lite/kernels/kernel_util.h"
+#include "edge-impulse-sdk/tensorflow/lite/micro/kernels/kernel_util.h"
+#include "edge-impulse-sdk/tensorflow/lite/kernels/internal/types.h"
+
+namespace tflite {
+namespace scatter_nd {
+
+constexpr int kIndices = 0;
+constexpr int kUpdates = 1;
+constexpr int kShape = 2;
+constexpr int kOutputTensor = 0;
+
+TfLiteStatus Prepare(TfLiteContext* context, TfLiteNode* node) {
+
+ TF_LITE_ENSURE_EQ(context, NumInputs(node), 3);
+ TF_LITE_ENSURE_EQ(context, NumOutputs(node), 1);
+
+ MicroContext* micro_context = GetMicroContext(context);
+
+ TfLiteTensor* indices =
+ micro_context->AllocateTempInputTensor(node, kIndices);
+ TfLiteTensor* updates =
+ micro_context->AllocateTempInputTensor(node, kUpdates);
+ TfLiteTensor* shape =
+ micro_context->AllocateTempInputTensor(node, kShape);
+ TF_LITE_ENSURE(context, indices != nullptr);
+ TF_LITE_ENSURE(context, updates != nullptr);
+ TF_LITE_ENSURE(context, shape != nullptr);
+
+ switch (updates->type) {
+ case kTfLiteFloat32:
+ case kTfLiteUInt8:
+ case kTfLiteBool:
+ case kTfLiteInt8:
+ case kTfLiteInt64:
+ case kTfLiteInt32:
+ break;
+ default:
+ TF_LITE_KERNEL_LOG(
+ context, "Updates of type '%s' are not supported by scatter_nd.",
+ TfLiteTypeGetName(updates->type));
+ return kTfLiteError;
+ }
+ if (indices->type != shape->type) {
+ TF_LITE_KERNEL_LOG(context, "Indices and shape must have the same type.");
+ return kTfLiteError;
+ }
+
+ TfLiteTensor* output =
+ micro_context->AllocateTempOutputTensor(node, kOutputTensor);
+ TF_LITE_ENSURE(context, output != nullptr);
+ output->type = updates->type;
+
+ TfLiteStatus ret = kTfLiteError;
+
+ if (IsDynamicTensor(output)) {
+ TF_LITE_KERNEL_LOG(context, "DynamicTensor is not yet supported by scatter_nd.");
+ return ret;
+ }
+
+ const int shape_rank = SizeOfDimension(shape, 0);
+ const auto* shape_data = GetTensorData(shape);
+
+ if (IsConstantOrPersistentTensor(shape)) {
+ switch (indices->type) {
+ case kTfLiteInt32:
+ // check if output tensor needs resizing
+ // throw an error if it does
+ if (output->dims->size != shape_rank) {
+ TF_LITE_KERNEL_LOG(context, "Tensor resizing is not supported by scatter_nd.");
+ return kTfLiteError;
+ }
+ for (int i = 0; i < shape_rank; i++) {
+ if (output->dims->data[i] != shape_data[i]) {
+ TF_LITE_KERNEL_LOG(context, "Tensor resizing is not supported by scatter_nd.");
+ return kTfLiteError;
+ }
+ }
+ break;
+ default:
+ TF_LITE_KERNEL_LOG(
+ context, "Indices of type '%s' are not supported by scatter_nd.",
+ TfLiteTypeGetName(indices->type));
+ return ret;
+ }
+ } else {
+ TF_LITE_KERNEL_LOG(context, "DynamicTensor is not yet supported by scatter_nd.");
+ return ret;
+ }
+
+ micro_context->DeallocateTempTfLiteTensor(indices);
+ micro_context->DeallocateTempTfLiteTensor(updates);
+ micro_context->DeallocateTempTfLiteTensor(shape);
+ micro_context->DeallocateTempTfLiteTensor(output);
+ return kTfLiteOk;
+}
+
+template
+TfLiteStatus ScatterNd(const TfLiteEvalTensor* indices, const TfLiteEvalTensor* updates,
+ TfLiteEvalTensor* output) {
+ return reference_ops::ScatterNd(
+ tflite::micro::GetTensorShape(indices), tflite::micro::GetTensorData(indices),
+ tflite::micro::GetTensorShape(updates), tflite::micro::GetTensorData(updates),
+ tflite::micro::GetTensorShape(output), tflite::micro::GetTensorData(output));
+}
+
+template
+TfLiteStatus EvalScatterNd(TfLiteContext* context, const TfLiteEvalTensor* indices,
+ const TfLiteEvalTensor* updates,
+ const TfLiteEvalTensor* shape, TfLiteEvalTensor* output) {
+
+ TfLiteStatus status = kTfLiteError;
+ switch (updates->type) {
+ case kTfLiteFloat32:
+ status = ScatterNd(indices, updates, output);
+ break;
+ case kTfLiteUInt8:
+ status = ScatterNd(indices, updates, output);
+ break;
+ case kTfLiteBool:
+ status = ScatterNd(indices, updates, output);
+ break;
+ case kTfLiteInt8:
+ status = ScatterNd(indices, updates, output);
+ break;
+ case kTfLiteInt32:
+ status = ScatterNd(indices, updates, output);
+ break;
+ case kTfLiteInt64:
+ status = ScatterNd(indices, updates, output);
+ break;
+ default:
+ TF_LITE_KERNEL_LOG(
+ context, "Updates of type '%s' are not supported by scatter_nd.",
+ TfLiteTypeGetName(updates->type));
+ return kTfLiteError;
+ }
+ if (status != kTfLiteOk) {
+ TF_LITE_KERNEL_LOG(context, "scatter_nd index out of bounds");
+ }
+ return status;
+}
+
+TfLiteStatus Eval(TfLiteContext* context, TfLiteNode* node) {
+
+
+ const TfLiteEvalTensor* indices =
+ tflite::micro::GetEvalInput(context, node, kIndices);
+ const TfLiteEvalTensor* updates =
+ tflite::micro::GetEvalInput(context, node, kUpdates);
+ const TfLiteEvalTensor* shape =
+ tflite::micro::GetEvalInput(context, node, kShape);
+ TfLiteEvalTensor* output =
+ tflite::micro::GetEvalOutput(context, node, kOutputTensor);
+
+ switch (indices->type) {
+ case kTfLiteInt32:
+ return EvalScatterNd(context, indices, updates, shape, output);
+ default:
+ TF_LITE_KERNEL_LOG(
+ context, "Indices of type '%s' are not supported by scatter_nd.",
+ TfLiteTypeGetName(indices->type));
+ return kTfLiteError;
+ }
+}
+
+} // namespace scatter_nd
+
+TfLiteRegistration Register_SCATTER_ND() {
+ return tflite::micro::RegisterOp(nullptr, scatter_nd::Prepare, scatter_nd::Eval);
+}
+
+} // namespace tflite
diff --git a/edgeimpulse/edge-impulse-sdk/tensorflow/lite/micro/kernels/select.cpp b/edgeimpulse/edge-impulse-sdk/tensorflow/lite/micro/kernels/select.cpp
index 68cf319..f213b34 100644
--- a/edgeimpulse/edge-impulse-sdk/tensorflow/lite/micro/kernels/select.cpp
+++ b/edgeimpulse/edge-impulse-sdk/tensorflow/lite/micro/kernels/select.cpp
@@ -117,7 +117,6 @@ TfLiteStatus SelectPrepare(TfLiteContext* context, TfLiteNode* node) {
micro_context->DeallocateTempTfLiteTensor(input_x);
micro_context->DeallocateTempTfLiteTensor(input_y);
micro_context->DeallocateTempTfLiteTensor(output);
-
return kTfLiteOk;
}
@@ -220,6 +219,10 @@ TfLiteStatus SelectEval(TfLiteContext* context, TfLiteNode* node) {
CallSelect(input_condition, input_x, input_y, output,
data->requires_broadcast);
break;
+ case kTfLiteInt32:
+ CallSelect(input_condition, input_x, input_y, output,
+ data->requires_broadcast);
+ break;
default:
MicroPrintf("Does not support type other than %s, but got %s",
"int8|int16|float32", TfLiteTypeGetName(input_x->type));
diff --git a/edgeimpulse/edge-impulse-sdk/tensorflow/lite/micro/micro_mutable_op_resolver.h b/edgeimpulse/edge-impulse-sdk/tensorflow/lite/micro/micro_mutable_op_resolver.h
index 798787a..17600db 100644
--- a/edgeimpulse/edge-impulse-sdk/tensorflow/lite/micro/micro_mutable_op_resolver.h
+++ b/edgeimpulse/edge-impulse-sdk/tensorflow/lite/micro/micro_mutable_op_resolver.h
@@ -441,6 +441,16 @@ class MicroMutableOpResolver : public MicroOpResolver {
tflite::Register_READ_VARIABLE(), ParseReadVariable);
}
+ TfLiteStatus AddReduceAny() {
+ return AddBuiltin(BuiltinOperator_REDUCE_ANY, Register_REDUCE_ANY(),
+ ParseReducer);
+ }
+
+ TfLiteStatus AddReduceAll() {
+ return AddBuiltin(BuiltinOperator_REDUCE_ALL, Register_REDUCE_ALL(),
+ ParseReducer);
+ }
+
TfLiteStatus AddReduceMax() {
return AddBuiltin(BuiltinOperator_REDUCE_MAX, Register_REDUCE_MAX(),
ParseReducer);
@@ -491,6 +501,11 @@ class MicroMutableOpResolver : public MicroOpResolver {
tflite::ops::micro::Register_RSQRT(), ParseRsqrt);
}
+ TfLiteStatus AddScatterNd() {
+ return AddBuiltin(BuiltinOperator_SCATTER_ND,
+ Register_SCATTER_ND(), ParseScatterNd);
+ }
+
#ifndef TF_LITE_STATIC_MEMORY
TfLiteStatus AddSelect() {
return AddBuiltin(BuiltinOperator_SELECT, Register_SELECT(),