Skip to content

Commit

Permalink
SDK release v1.54.8
Browse files Browse the repository at this point in the history
  • Loading branch information
francovaro committed Jul 30, 2024
1 parent 23a4f73 commit 1cd3b47
Show file tree
Hide file tree
Showing 19 changed files with 747 additions and 220 deletions.
15 changes: 9 additions & 6 deletions EdgeImpulse.EI-SDK.pdsc
Original file line number Diff line number Diff line change
Expand Up @@ -5,13 +5,16 @@
<name>EI-SDK</name>
<license>LICENSE-apache-2.0.txt</license>
<description>Edge Impulse SDK</description>
<url>https://github.com/edgeimpulse/edge-impulse-sdk-pack/releases/download/v1.54.4/</url>
<url>https://github.com/edgeimpulse/edge-impulse-sdk-pack/releases/download/v1.54.8/</url>
<supportContact>hello@edgeimpulse.com</supportContact>
<repository type="git">https://github.com/edgeimpulse/edge-impulse-sdk-pack.git</repository>
<releases>
<release version="1.54.4" tag="v1.54.4" date="2024-07-24" url="https://github.com/edgeimpulse/edge-impulse-sdk-pack/releases/download/v1.54.4/EdgeImpulse.EI-SDK.1.54.4.pack">
<release version="1.54.8" tag="v1.54.8" date="2024-07-30" url="https://github.com/edgeimpulse/edge-impulse-sdk-pack/releases/download/v1.54.8/EdgeImpulse.EI-SDK.1.54.8.pack">
EI-SDK
</release>
<release version="1.54.4" tag="v1.54.4" date="2024-07-24" url="https://github.com/edgeimpulse/edge-impulse-sdk-pack/releases/download/v1.54.4/EdgeImpulse.EI-SDK.1.54.4.pack">
EI-SDK
</release>
<release version="1.54.1" tag="v1.54.1" date="2024-07-18" url="https://github.com/edgeimpulse/edge-impulse-sdk-pack/releases/download/v1.54.1/EdgeImpulse.EI-SDK.1.54.1.pack">
EI-SDK
</release>
Expand Down Expand Up @@ -98,9 +101,6 @@
</release>
<release version="1.49.14" tag="v1.49.14" date="2024-04-25" url="https://github.com/edgeimpulse/edge-impulse-sdk-pack/releases/download/v1.49.14/EdgeImpulse.EI-SDK.1.49.14.pack">
EI-SDK
</release>
<release version="1.49.11" tag="v1.49.11" date="2024-04-22" url="https://github.com/edgeimpulse/edge-impulse-sdk-pack/releases/download/v1.49.11/EdgeImpulse.EI-SDK.1.49.11.pack">
EI-SDK
</release>
</releases>
<keywords>
Expand Down Expand Up @@ -146,7 +146,7 @@
</packages>
</requirements>
<components>
<component Cclass="EdgeImpulse" Cgroup="SDK" Cversion="1.54.4">
<component Cclass="EdgeImpulse" Cgroup="SDK" Cversion="1.54.8">
<description>Edge Impulse SDK</description>
<!-- short component description -->
<files>
Expand Down Expand Up @@ -302,6 +302,7 @@
<file category="source" name="edgeimpulse/edge-impulse-sdk/tensorflow/lite/micro/kernels/activations.cpp"/>
<file category="source" name="edgeimpulse/edge-impulse-sdk/tensorflow/lite/micro/kernels/space_to_batch_nd.cpp"/>
<file category="source" name="edgeimpulse/edge-impulse-sdk/tensorflow/lite/micro/kernels/logical.cpp"/>
<file category="source" name="edgeimpulse/edge-impulse-sdk/tensorflow/lite/micro/kernels/scatter_nd.cpp"/>
<file category="source" name="edgeimpulse/edge-impulse-sdk/tensorflow/lite/micro/kernels/log_softmax.cpp"/>
<file category="source" name="edgeimpulse/edge-impulse-sdk/tensorflow/lite/micro/kernels/assign_variable.cpp"/>
<file category="source" name="edgeimpulse/edge-impulse-sdk/tensorflow/lite/micro/kernels/scratch_buffers.cpp"/>
Expand Down Expand Up @@ -516,6 +517,7 @@
<file category="header" name="edgeimpulse/edge-impulse-sdk/tensorflow/lite/kernels/internal/reference_portable_tensor_utils_impl.h"/>
<file category="header" name="edgeimpulse/edge-impulse-sdk/tensorflow/lite/kernels/internal/portable_tensor.h"/>
<file category="header" name="edgeimpulse/edge-impulse-sdk/tensorflow/lite/kernels/internal/min.h"/>
<file category="header" name="edgeimpulse/edge-impulse-sdk/tensorflow/lite/kernels/internal/reduce_common.h"/>
<file category="header" name="edgeimpulse/edge-impulse-sdk/tensorflow/lite/kernels/internal/types.h"/>
<file category="header" name="edgeimpulse/edge-impulse-sdk/tensorflow/lite/kernels/internal/cppmath.h"/>
<file category="header" name="edgeimpulse/edge-impulse-sdk/tensorflow/lite/kernels/internal/quantization_util.h"/>
Expand All @@ -535,6 +537,7 @@
<file category="header" name="edgeimpulse/edge-impulse-sdk/tensorflow/lite/kernels/internal/reference/fill.h"/>
<file category="header" name="edgeimpulse/edge-impulse-sdk/tensorflow/lite/kernels/internal/reference/requantize.h"/>
<file category="header" name="edgeimpulse/edge-impulse-sdk/tensorflow/lite/kernels/internal/reference/leaky_relu.h"/>
<file category="header" name="edgeimpulse/edge-impulse-sdk/tensorflow/lite/kernels/internal/reference/scatter_nd.h"/>
<file category="header" name="edgeimpulse/edge-impulse-sdk/tensorflow/lite/kernels/internal/reference/floor_mod.h"/>
<file category="header" name="edgeimpulse/edge-impulse-sdk/tensorflow/lite/kernels/internal/reference/logistic.h"/>
<file category="header" name="edgeimpulse/edge-impulse-sdk/tensorflow/lite/kernels/internal/reference/fully_connected.h"/>
Expand Down
4 changes: 2 additions & 2 deletions EdgeImpulse.pidx
Original file line number Diff line number Diff line change
Expand Up @@ -2,8 +2,8 @@
<index schemaVersion="1.0.0" xs:noNamespaceSchemaLocation="PackIndex.xsd" xmlns:xs="http://www.w3.org/2001/XMLSchema-instance">
<vendor>EdgeImpulse</vendor>
<url>https://raw.githubusercontent.com/edgeimpulse/edge-impulse-sdk-pack/main/</url>
<timestamp>2024-07-24 12:24:56</timestamp>
<timestamp>2024-07-30 10:53:15</timestamp>
<pindex>
<pdsc url="https://github.com/edgeimpulse/edge-impulse-sdk-pack/releases/download/v1.54.4/" vendor="EdgeImpulse" name="EI-SDK" version="1.54.4"/>
<pdsc url="https://github.com/edgeimpulse/edge-impulse-sdk-pack/releases/download/v1.54.8/" vendor="EdgeImpulse" name="EI-SDK" version="1.54.8"/>
</pindex>
</index>
Original file line number Diff line number Diff line change
Expand Up @@ -458,6 +458,10 @@ TfLiteStatus ParseOpDataTfLite(const Operator* op, BuiltinOperator op_type,
return ParseRsqrt(op, error_reporter, allocator, builtin_data);
}

case BuiltinOperator_SCATTER_ND: {
return ParseScatterNd(op, error_reporter, allocator, builtin_data);
}

case BuiltinOperator_SELECT_V2: {
return ParseSelectV2(op, error_reporter, allocator, builtin_data);
}
Expand Down Expand Up @@ -868,7 +872,6 @@ TfLiteStatus ParseOpDataTfLite(const Operator* op, BuiltinOperator op_type,
case BuiltinOperator_NON_MAX_SUPPRESSION_V5:
case BuiltinOperator_RELU_N1_TO_1:
case BuiltinOperator_RELU_0_TO_1:
case BuiltinOperator_SCATTER_ND:
case BuiltinOperator_SELECT:
case BuiltinOperator_SLICE:
case BuiltinOperator_TILE:
Expand Down Expand Up @@ -2022,6 +2025,14 @@ TfLiteStatus ParseRsqrt(const Operator*, ErrorReporter*, BuiltinDataAllocator*,
return kTfLiteOk;
}

// We have this parse function instead of directly returning kTfLiteOk from the
// switch-case in ParseOpData because this function is used as part of the
// selective registration for the OpResolver implementation in micro.
TfLiteStatus ParseScatterNd(const Operator*, ErrorReporter*, BuiltinDataAllocator*,
void**) {
return kTfLiteOk;
}

// We have this parse function instead of directly returning kTfLiteOk from the
// switch-case in ParseOpData because this function is used as part of the
// selective registration for the OpResolver implementation in micro.
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -331,6 +331,9 @@ TfLiteStatus ParseRound(const Operator* op, ErrorReporter* error_reporter,
TfLiteStatus ParseRsqrt(const Operator* op, ErrorReporter* error_reporter,
BuiltinDataAllocator* allocator, void** builtin_data);

TfLiteStatus ParseScatterNd(const Operator* op, ErrorReporter* error_reporter,
BuiltinDataAllocator* allocator, void** builtin_data);

TfLiteStatus ParseSelect(const Operator* op, ErrorReporter* error_reporter,
BuiltinDataAllocator* allocator, void** builtin_data);

Expand Down
Original file line number Diff line number Diff line change
@@ -0,0 +1,37 @@
/* Copyright 2022 The TensorFlow Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
==============================================================================*/
#ifndef TENSORFLOW_LITE_KERNELS_INTERNAL_REDUCE_COMMON_H_
#define TENSORFLOW_LITE_KERNELS_INTERNAL_REDUCE_COMMON_H_

namespace tflite {
namespace ops {
namespace builtin {
namespace reduce {

enum ReduceType {
kSum,
kProd,
kMax,
kMin,
kAny,
kAll,
};

} // namespace reduce
} // namespace builtin
} // namespace ops
} // namespace tflite

#endif // TENSORFLOW_LITE_KERNELS_INTERNAL_REDUCE_COMMON_H_
Original file line number Diff line number Diff line change
Expand Up @@ -194,6 +194,50 @@ inline void Add(const ArithmeticParams& params,
}
}

inline void Add(const ArithmeticParams& params,
const RuntimeShape& input1_shape, const int32_t* input1_data,
const RuntimeShape& input2_shape, const int32_t* input2_data,
const RuntimeShape& output_shape, int32_t* output_data,
bool pot_scale = true) {
// if (!pot_scale) {
// AddGeneralParamScale(params, input1_shape, input1_data, input2_shape,
// input2_data, output_shape, output_data);
// return;
// }

TFLITE_DCHECK_LE(params.quantized_activation_min,
params.quantized_activation_max);

const int input1_shift = params.input1_shift;
const int flat_size =
MatchingElementsSize(input1_shape, input2_shape, output_shape);
const int32_t output_activation_min = params.quantized_activation_min;
const int32_t output_activation_max = params.quantized_activation_max;

TFLITE_DCHECK(input1_shift == 0 || params.input2_shift == 0);
TFLITE_DCHECK_LE(input1_shift, 0);
TFLITE_DCHECK_LE(params.input2_shift, 0);
const int32_t* not_shift_input =
input1_shift == 0 ? input1_data : input2_data;
const int32_t* shift_input = input1_shift == 0 ? input2_data : input1_data;
const int input_right_shift =
input1_shift == 0 ? -params.input2_shift : -input1_shift;

for (int i = 0; i < flat_size; i++) {
// F0 uses 0 integer bits, range [-1, 1].
using F0 = gemmlowp::FixedPoint<std::int16_t, 0>;

F0 input_ready_scaled = F0::FromRaw(not_shift_input[i]);
F0 scaled_input = F0::FromRaw(
gemmlowp::RoundingDivideByPOT(shift_input[i], input_right_shift));
F0 result = gemmlowp::SaturatingAdd(scaled_input, input_ready_scaled);
const int32_t raw_output = result.raw();
const int32_t clamped_output = std::min(
output_activation_max, std::max(output_activation_min, raw_output));
output_data[i] = clamped_output;
}
}

template <typename T>
inline typename std::enable_if<!is_small_integer<T>::value, void>::type
BroadcastAdd4DSlow(const ArithmeticParams& params,
Expand Down
Original file line number Diff line number Diff line change
@@ -0,0 +1,77 @@
/* Copyright 2018 The TensorFlow Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
==============================================================================*/
#ifndef TENSORFLOW_LITE_KERNELS_INTERNAL_REFERENCE_SCATTER_ND_H_
#define TENSORFLOW_LITE_KERNELS_INTERNAL_REFERENCE_SCATTER_ND_H_

#include "edge-impulse-sdk/tensorflow/lite/kernels/internal/common.h"
#include "edge-impulse-sdk/tensorflow/lite/kernels/internal/types.h"
#include "edge-impulse-sdk/tensorflow/lite/kernels/internal/tensor_ctypes.h"
#include "edge-impulse-sdk/tensorflow/lite/kernels/kernel_util.h"
#include "edge-impulse-sdk/tensorflow/lite/micro/kernels/kernel_util.h"

namespace tflite {

namespace reference_ops {

template <typename IndicesT, typename UpdatesT>
inline TfLiteStatus ScatterNd(const RuntimeShape& indices_shape,
const IndicesT* indices_data,
const RuntimeShape& updates_shape,
const UpdatesT* updates_data,
const RuntimeShape& output_shape,
UpdatesT* output_data) {
int n_slices = 1;
int slice_size = 1;
const int outer_dims = indices_shape.DimensionsCount() - 1;
const int indices_nd = indices_shape.Dims(outer_dims);
const int updates_dims = updates_shape.DimensionsCount();
for (int i = 0; i < outer_dims; ++i) {
n_slices *= indices_shape.Dims(i);
}
for (int i = outer_dims; i < updates_dims; ++i) {
slice_size *= updates_shape.Dims(i);
}

int output_flat_size = output_shape.FlatSize();
int remain_flat_size = output_flat_size;
std::vector<int> dims_to_count(indices_nd, 0);
for (int i = 0; i < indices_nd; ++i) {
dims_to_count[i] = remain_flat_size / output_shape.Dims(i);
remain_flat_size = dims_to_count[i];
}

if (n_slices * slice_size > updates_shape.FlatSize()) {
return kTfLiteError;
}
memset(output_data, 0, sizeof(UpdatesT) * output_flat_size);
for (int i = 0; i < n_slices; ++i) {
int to_pos = 0;
for (int j = 0; j < indices_nd; ++j) {
IndicesT idx = indices_data[i * indices_nd + j];
to_pos += idx * dims_to_count[j];
}
if (to_pos < 0 || to_pos + slice_size > output_flat_size) {
return kTfLiteError;
}
for (int j = 0; j < slice_size; j++) {
output_data[to_pos + j] += updates_data[i * slice_size + j];
}
}
return kTfLiteOk;
}

} // namespace reference_ops
} // namespace tflite
#endif // TENSORFLOW_LITE_KERNELS_INTERNAL_REFERENCE_SCATTER_ND_H_
Original file line number Diff line number Diff line change
Expand Up @@ -91,6 +91,8 @@ AllOpsResolver::AllOpsResolver() {
AddQuantize();
AddReal();
AddReadVariable();
AddReduceAny();
AddReduceAll();
AddReduceMax();
AddReduceMin();
AddRelu();
Expand All @@ -101,6 +103,7 @@ AllOpsResolver::AllOpsResolver() {
AddRfft2D();
AddRound();
AddRsqrt();
AddScatterNd();
#ifndef TF_LITE_STATIC_MEMORY
AddSelect();
AddSelectV2();
Expand Down
61 changes: 59 additions & 2 deletions edgeimpulse/edge-impulse-sdk/tensorflow/lite/micro/kernels/add.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -243,6 +243,43 @@ TfLiteStatus EvalAddQuantized(TfLiteContext* context, TfLiteNode* node,
output);
break;
}
case kTfLiteInt32: {
tflite::ArithmeticParams op_params;
op_params.left_shift = data->left_shift;
op_params.input1_offset = data->input1_offset;
op_params.input1_multiplier = data->input1_multiplier;
op_params.input1_shift = data->input1_shift;
op_params.input2_offset = data->input2_offset;
op_params.input2_multiplier = data->input2_multiplier;
op_params.input2_shift = data->input2_shift;
op_params.output_offset = data->output_offset;
op_params.output_multiplier = data->output_multiplier;
op_params.output_shift = data->output_shift;
SetActivationParams(data->output_activation_min, data->output_activation_max,
&op_params);
bool need_broadcast = reference_ops::ProcessBroadcastShapes(
tflite::micro::GetTensorShape(input1),
tflite::micro::GetTensorShape(input2), &op_params);

if (need_broadcast) {
reference_ops::BroadcastAdd4DSlow(
op_params, tflite::micro::GetTensorShape(input1),
tflite::micro::GetTensorData<int32_t>(input1),
tflite::micro::GetTensorShape(input2),
tflite::micro::GetTensorData<int32_t>(input2),
tflite::micro::GetTensorShape(output),
tflite::micro::GetTensorData<int32_t>(output));
} else {
reference_ops::Add(op_params, tflite::micro::GetTensorShape(input1),
tflite::micro::GetTensorData<int32_t>(input1),
tflite::micro::GetTensorShape(input2),
tflite::micro::GetTensorData<int32_t>(input2),
tflite::micro::GetTensorShape(output),
tflite::micro::GetTensorData<int32_t>(output),
false);
}
break;
}
default:
MicroPrintf("Type %s (%d) not supported.",
TfLiteTypeGetName(output->type), output->type);
Expand Down Expand Up @@ -309,7 +346,7 @@ TfLiteStatus EvalAdd(TfLiteContext* context, TfLiteNode* node) {

if (output->type == kTfLiteFloat32) {
EvalAddFloat(context, node, params, data, input1, input2, output);
} else if (output->type == kTfLiteInt8 || output->type == kTfLiteInt16) {
} else if (output->type == kTfLiteInt8 || output->type == kTfLiteInt16 || output->type == kTfLiteInt32) {
TF_LITE_ENSURE_OK(context, EvalAddQuantized(context, node, params, data,
input1, input2, output));
} else {
Expand Down Expand Up @@ -1333,6 +1370,26 @@ TfLiteStatus EvalAddQuantized(TfLiteContext* context, TfLiteNode* node,
}
break;
}
case kTfLiteInt32: {
if (need_broadcast) {
reference_ops::BroadcastAdd4DSlow(
op_params, tflite::micro::GetTensorShape(input1),
tflite::micro::GetTensorData<int32_t>(input1),
tflite::micro::GetTensorShape(input2),
tflite::micro::GetTensorData<int32_t>(input2),
tflite::micro::GetTensorShape(output),
tflite::micro::GetTensorData<int32_t>(output));
} else {
reference_ops::Add(op_params, tflite::micro::GetTensorShape(input1),
tflite::micro::GetTensorData<int32_t>(input1),
tflite::micro::GetTensorShape(input2),
tflite::micro::GetTensorData<int32_t>(input2),
tflite::micro::GetTensorShape(output),
tflite::micro::GetTensorData<int32_t>(output),
false);
}
break;
}
default:
MicroPrintf("Type %s (%d) not supported.",
TfLiteTypeGetName(output->type), output->type);
Expand Down Expand Up @@ -1362,7 +1419,7 @@ TfLiteStatus AddEval(TfLiteContext* context, TfLiteNode* node) {

if (output->type == kTfLiteFloat32) {
EvalAdd(context, node, params, data, input1, input2, output);
} else if (output->type == kTfLiteInt8 || output->type == kTfLiteInt16) {
} else if (output->type == kTfLiteInt8 || output->type == kTfLiteInt16 || output->type == kTfLiteInt32) {
TF_LITE_ENSURE_OK(context, EvalAddQuantized(context, node, params, data,
input1, input2, output));
} else {
Expand Down
Loading

0 comments on commit 1cd3b47

Please sign in to comment.