lib生成文档结束

This commit is contained in:
QingChuanWS
2020-12-23 20:10:07 +08:00
parent 954de91ef4
commit 78bbdab4ec
34 changed files with 387 additions and 27245 deletions

View File

@@ -36,12 +36,12 @@ inline void InfiniteLoop() {
#else // TF_LITE_MCU_DEBUG_LOG
#include <stdio.h>
#include <cstdio>
#include <cstdlib>
#define DEBUG_LOG(x) \
do { \
printf("%s", (x)); \
fprintf(stderr, "%s", (x)); \
} while (0)
// Report Error for unsupported type by op 'op_name' and returns kTfLiteError.

View File

@@ -1,22 +0,0 @@
/* Copyright 2020 The TensorFlow Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
==============================================================================*/
#ifndef TENSORFLOW_LITE_MICRO_BENCHMARKS_KEYWORD_SCRAMBLED_MODEL_DATA_H_
#define TENSORFLOW_LITE_MICRO_BENCHMARKS_KEYWORD_SCRAMBLED_MODEL_DATA_H_
extern const unsigned char g_keyword_scrambled_model_data[];
extern const unsigned int g_keyword_scrambled_model_data_length;
#endif // TENSORFLOW_LITE_MICRO_BENCHMARKS_KEYWORD_SCRAMBLED_MODEL_DATA_H_

View File

@@ -13,15 +13,29 @@ See the License for the specific language governing permissions and
limitations under the License.
==============================================================================*/
// Reference implementation of the DebugLog() function that's required for a
// platform to support the TensorFlow Lite for Microcontrollers library. This is
// the only function that's absolutely required to be available on a target
// device, since it's used for communicating test results back to the host so
// that we can verify the implementation is working correctly.
// It's designed to be as easy as possible to supply an implementation though.
// On platforms that have a POSIX stack or C library, it can be written as a
// single call to `fprintf(stderr, "%s", s)` to output a string to the error
// stream of the console, but if there's no OS or C library available, there's
// almost always an equivalent way to write out a string to some serial
// interface that can be used instead. For example on Arm M-series MCUs, calling
// the `bkpt #0xAB` assembler instruction will output the string in r1 to
// whatever debug serial connection is available. If you're running mbed, you
// can do the same by creating `Serial pc(USBTX, USBRX)` and then calling
// `pc.printf("%s", s)`.
// To add an equivalent function for your own platform, create your own
// implementation file, and place it in a subfolder with named after the OS
// you're targeting. For example, see the Cortex M bare metal version in
// tensorflow/lite/micro/bluepill/debug_log.cc or the mbed one on
// tensorflow/lite/micro/mbed/debug_log.cc.
#include "tensorflow/lite/micro/debug_log.h"
#include "stdio.h"
#ifdef __cplusplus
extern "C" {
#endif
#include <cstdio>
void DebugLog(const char *s) { printf("%s", s); }
#ifdef __cplusplus
}
#endif
extern "C" void DebugLog(const char* s) { fprintf(stderr, "%s", s); }

View File

@@ -1,25 +0,0 @@
/* Copyright 2019 The TensorFlow Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
==============================================================================*/
#include "tensorflow/lite/micro/examples/person_detection_experimental/detection_responder.h"
// This dummy implementation writes person and no person scores to the error
// console. Real applications will want to take some custom action instead, and
// should implement their own versions of this function.
void RespondToDetection(tflite::ErrorReporter* error_reporter,
int8_t person_score, int8_t no_person_score) {
TF_LITE_REPORT_ERROR(error_reporter, "person score:%d no person score %d",
person_score, no_person_score);
}

View File

@@ -1,34 +0,0 @@
/* Copyright 2019 The TensorFlow Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
==============================================================================*/
// Provides an interface to take an action based on the output from the person
// detection model.
#ifndef TENSORFLOW_LITE_MICRO_EXAMPLES_PERSON_DETECTION_EXPERIMENTAL_DETECTION_RESPONDER_H_
#define TENSORFLOW_LITE_MICRO_EXAMPLES_PERSON_DETECTION_EXPERIMENTAL_DETECTION_RESPONDER_H_
#include "tensorflow/lite/c/common.h"
#include "tensorflow/lite/micro/micro_error_reporter.h"
// Called every time the results of a person detection run are available. The
// `person_score` has the numerical confidence that the captured image contains
// a person, and `no_person_score` has the numerical confidence that the image
// does not contain a person. Typically if person_score > no person score, the
// image is considered to contain a person. This threshold may be adjusted for
// particular applications.
void RespondToDetection(tflite::ErrorReporter* error_reporter,
int8_t person_score, int8_t no_person_score);
#endif // TENSORFLOW_LITE_MICRO_EXAMPLES_PERSON_DETECTION_EXPERIMENTAL_DETECTION_RESPONDER_H_

View File

@@ -1,26 +0,0 @@
/* Copyright 2019 The TensorFlow Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
==============================================================================*/
#include "tensorflow/lite/micro/examples/person_detection_experimental/image_provider.h"
#include "tensorflow/lite/micro/examples/person_detection_experimental/model_settings.h"
TfLiteStatus GetImage(tflite::ErrorReporter* error_reporter, int image_width,
int image_height, int channels, int8_t* image_data,
uint8_t * hardware_input) {
for (int i = 0; i < image_width * image_height * channels; ++i) {
image_data[i] = hardware_input[i];
}
return kTfLiteOk;
}

View File

@@ -1,40 +0,0 @@
/* Copyright 2019 The TensorFlow Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
==============================================================================*/
#ifndef TENSORFLOW_LITE_MICRO_EXAMPLES_PERSON_DETECTION_EXPERIMENTAL_IMAGE_PROVIDER_H_
#define TENSORFLOW_LITE_MICRO_EXAMPLES_PERSON_DETECTION_EXPERIMENTAL_IMAGE_PROVIDER_H_
#include "tensorflow/lite/c/common.h"
#include "tensorflow/lite/micro/micro_error_reporter.h"
// This is an abstraction around an image source like a camera, and is
// expected to return 8-bit sample data. The assumption is that this will be
// called in a low duty-cycle fashion in a low-power application. In these
// cases, the imaging sensor need not be run in a streaming mode, but rather can
// be idled in a relatively low-power mode between calls to GetImage(). The
// assumption is that the overhead and time of bringing the low-power sensor out
// of this standby mode is commensurate with the expected duty cycle of the
// application. The underlying sensor may actually be put into a streaming
// configuration, but the image buffer provided to GetImage should not be
// overwritten by the driver code until the next call to GetImage();
//
// The reference implementation can have no platform-specific dependencies, so
// it just returns a static image. For real applications, you should
// ensure there's a specialized implementation that accesses hardware APIs.
TfLiteStatus GetImage(tflite::ErrorReporter* error_reporter, int image_width,
int image_height, int channels, int8_t* image_data,
uint8_t * hardware_input);
#endif // TENSORFLOW_LITE_MICRO_EXAMPLES_PERSON_DETECTION_EXPERIMENTAL_IMAGE_PROVIDER_H_

View File

@@ -1,27 +0,0 @@
/* Copyright 2019 The TensorFlow Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
==============================================================================*/
#include "tensorflow/lite/micro/examples/person_detection_experimental/main_functions.h"
// This is the default main used on systems that have the standard C entry
// point. Other devices (for example FreeRTOS or ESP32) that have different
// requirements for entry code (like an app_main function) should specialize
// this main.cc file in a target-specific subfolder.
int main(int argc, char* argv[]) {
setup();
while (true) {
loop();
}
}

View File

@@ -1,119 +0,0 @@
/* Copyright 2019 The TensorFlow Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
==============================================================================*/
#include "tensorflow/lite/micro/examples/person_detection_experimental/main_functions.h"
#include "tensorflow/lite/micro/examples/person_detection_experimental/detection_responder.h"
#include "tensorflow/lite/micro/examples/person_detection_experimental/image_provider.h"
#include "tensorflow/lite/micro/examples/person_detection_experimental/model_settings.h"
#include "tensorflow/lite/micro/examples/person_detection_experimental/person_detect_model_data.h"
#include "tensorflow/lite/micro/micro_error_reporter.h"
#include "tensorflow/lite/micro/micro_interpreter.h"
#include "tensorflow/lite/micro/micro_mutable_op_resolver.h"
#include "tensorflow/lite/schema/schema_generated.h"
#include "tensorflow/lite/version.h"
// Globals, used for compatibility with Arduino-style sketches.
namespace {
tflite::ErrorReporter* error_reporter = nullptr;
const tflite::Model* model = nullptr;
tflite::MicroInterpreter* interpreter = nullptr;
TfLiteTensor* input = nullptr;
// In order to use optimized tensorflow lite kernels, a signed int8_t quantized
// model is preferred over the legacy unsigned model format. This means that
// throughout this project, input images must be converted from unisgned to
// signed format. The easiest and quickest way to convert from unsigned to
// signed 8-bit integers is to subtract 128 from the unsigned value to get a
// signed value.
// An area of memory to use for input, output, and intermediate arrays.
constexpr int kTensorArenaSize = 115 * 1024;
static uint8_t tensor_arena[kTensorArenaSize];
} // namespace
// The name of this function is important for Arduino compatibility.
void person_detect_init() {
// Set up logging. Google style is to avoid globals or statics because of
// lifetime uncertainty, but since this has a trivial destructor it's okay.
// NOLINTNEXTLINE(runtime-global-variables)
static tflite::MicroErrorReporter micro_error_reporter;
error_reporter = &micro_error_reporter;
// Map the model into a usable data structure. This doesn't involve any
// copying or parsing, it's a very lightweight operation.
model = tflite::GetModel(g_person_detect_model_data);
if (model->version() != TFLITE_SCHEMA_VERSION) {
TF_LITE_REPORT_ERROR(error_reporter,
"Model provided is schema version %d not equal "
"to supported version %d.",
model->version(), TFLITE_SCHEMA_VERSION);
return;
}
// Pull in only the operation implementations we need.
// This relies on a complete list of all the ops needed by this graph.
// An easier approach is to just use the AllOpsResolver, but this will
// incur some penalty in code space for op implementations that are not
// needed by this graph.
//
// tflite::AllOpsResolver resolver;
// NOLINTNEXTLINE(runtime-global-variables)
static tflite::MicroMutableOpResolver<5> micro_op_resolver;
micro_op_resolver.AddAveragePool2D();
micro_op_resolver.AddConv2D();
micro_op_resolver.AddDepthwiseConv2D();
micro_op_resolver.AddReshape();
micro_op_resolver.AddSoftmax();
// Build an interpreter to run the model with.
// NOLINTNEXTLINE(runtime-global-variables)
static tflite::MicroInterpreter static_interpreter(
model, micro_op_resolver, tensor_arena, kTensorArenaSize, error_reporter);
interpreter = &static_interpreter;
// Allocate memory from the tensor_arena for the model's tensors.
TfLiteStatus allocate_status = interpreter->AllocateTensors();
if (allocate_status != kTfLiteOk) {
TF_LITE_REPORT_ERROR(error_reporter, "AllocateTensors() failed");
return;
}
// Get information about the memory area to use for the model's input.
input = interpreter->input(0);
}
// The name of this function is important for Arduino compatibility.
int person_detect(uint8_t * hardware_input) {
// Get image from provider.
if (kTfLiteOk != GetImage(error_reporter, kNumCols, kNumRows, kNumChannels,
input->data.int8, hardware_input)) {
TF_LITE_REPORT_ERROR(error_reporter, "Image capture failed.");
}
// Run the model on this input and make sure it succeeds.
if (kTfLiteOk != interpreter->Invoke()) {
TF_LITE_REPORT_ERROR(error_reporter, "Invoke failed.");
}
TfLiteTensor* output = interpreter->output(0);
// Process the inference results.
int8_t person_score = output->data.uint8[kPersonIndex];
int8_t no_person_score = output->data.uint8[kNotAPersonIndex];
RespondToDetection(error_reporter, person_score, no_person_score);
if(person_score >= no_person_score + 50) return 1;
else return 0;
}

View File

@@ -1,30 +0,0 @@
/* Copyright 2019 The TensorFlow Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
==============================================================================*/
#ifndef TENSORFLOW_LITE_MICRO_EXAMPLES_PERSON_DETECTION_EXPERIMENTAL_MAIN_FUNCTIONS_H_
#define TENSORFLOW_LITE_MICRO_EXAMPLES_PERSON_DETECTION_EXPERIMENTAL_MAIN_FUNCTIONS_H_
#include "tensorflow/lite/c/common.h"
// Initializes all data needed for the example. The name is important, and needs
// to be setup() for Arduino compatibility.
extern "C" void person_detect_init();
// Runs one iteration of data gathering and inference. This should be called
// repeatedly from the application code. The name needs to be loop() for Arduino
// compatibility.
extern "C" int person_detect(uint8_t * hardware_input);
#endif // TENSORFLOW_LITE_MICRO_EXAMPLES_PERSON_DETECTION_EXPERIMENTAL_MAIN_FUNCTIONS_H_

View File

@@ -1,21 +0,0 @@
/* Copyright 2019 The TensorFlow Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
==============================================================================*/
#include "tensorflow/lite/micro/examples/person_detection_experimental/model_settings.h"
const char* kCategoryLabels[kCategoryCount] = {
"notperson",
"person",
};

View File

@@ -1,35 +0,0 @@
/* Copyright 2019 The TensorFlow Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
==============================================================================*/
#ifndef TENSORFLOW_LITE_MICRO_EXAMPLES_PERSON_DETECTION_EXPERIMENTAL_MODEL_SETTINGS_H_
#define TENSORFLOW_LITE_MICRO_EXAMPLES_PERSON_DETECTION_EXPERIMENTAL_MODEL_SETTINGS_H_
// Keeping these as constant expressions allow us to allocate fixed-sized arrays
// on the stack for our working memory.
// All of these values are derived from the values used during model training,
// if you change your model you'll need to update these constants.
constexpr int kNumCols = 96;
constexpr int kNumRows = 96;
constexpr int kNumChannels = 1;
constexpr int kMaxImageSize = kNumCols * kNumRows * kNumChannels;
constexpr int kCategoryCount = 2;
constexpr int kPersonIndex = 1;
constexpr int kNotAPersonIndex = 0;
extern const char* kCategoryLabels[kCategoryCount];
#endif // TENSORFLOW_LITE_MICRO_EXAMPLES_PERSON_DETECTION_EXPERIMENTAL_MODEL_SETTINGS_H_

View File

@@ -1,27 +0,0 @@
/* Copyright 2019 The TensorFlow Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
==============================================================================*/
// This is a standard TensorFlow Lite model file that has been converted into a
// C data array, so it can be easily compiled into a binary for devices that
// don't have a file system. It was created using the command:
// xxd -i person_detect.tflite > person_detect_model_data.cc
#ifndef TENSORFLOW_LITE_MICRO_EXAMPLES_PERSON_DETECTION_EXPERIMENTAL_PERSON_DETECT_MODEL_DATA_H_
#define TENSORFLOW_LITE_MICRO_EXAMPLES_PERSON_DETECTION_EXPERIMENTAL_PERSON_DETECT_MODEL_DATA_H_
extern const unsigned char g_person_detect_model_data[];
extern const int g_person_detect_model_data_len;
#endif // TENSORFLOW_LITE_MICRO_EXAMPLES_PERSON_DETECTION_EXPERIMENTAL_PERSON_DETECT_MODEL_DATA_H_

View File

@@ -1,378 +0,0 @@
/* Copyright 2019 The TensorFlow Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
==============================================================================*/
#include "mcu_init.h"
#include "tensorflow/lite/c/builtin_op_data.h"
#include "tensorflow/lite/c/common.h"
#include "tensorflow/lite/micro/all_ops_resolver.h"
#include "tensorflow/lite/micro/kernels/kernel_runner.h"
#include "tensorflow/lite/micro/testing/micro_test.h"
#include "tensorflow/lite/micro/testing/test_utils.h"
namespace tflite {
namespace testing {
namespace {
void TestReluFloat(const int* input_dims_data, const float* input_data,
const int* output_dims_data, const float* golden,
float* output_data) {
TfLiteIntArray* input_dims = IntArrayFromInts(input_dims_data);
TfLiteIntArray* output_dims = IntArrayFromInts(output_dims_data);
const int output_elements_count = ElementCount(*output_dims);
constexpr int inputs_size = 1;
constexpr int outputs_size = 1;
constexpr int tensors_size = inputs_size + outputs_size;
TfLiteTensor tensors[tensors_size] = {
CreateFloatTensor(input_data, input_dims),
CreateFloatTensor(output_data, output_dims),
};
int inputs_array_data[] = {1, 0};
TfLiteIntArray* inputs_array = IntArrayFromInts(inputs_array_data);
int outputs_array_data[] = {1, 1};
TfLiteIntArray* outputs_array = IntArrayFromInts(outputs_array_data);
const TfLiteRegistration registration = ops::micro::Register_RELU();
micro::KernelRunner runner(registration, tensors, tensors_size, inputs_array,
outputs_array,
/*builtin_data=*/nullptr, micro_test::reporter);
TF_LITE_MICRO_EXPECT_EQ(kTfLiteOk, runner.InitAndPrepare());
TF_LITE_MICRO_EXPECT_EQ(kTfLiteOk, runner.Invoke());
for (int i = 0; i < output_elements_count; ++i) {
TF_LITE_MICRO_EXPECT_NEAR(golden[i], output_data[i], 1e-5f);
}
}
void TestRelu6Float(const int* input_dims_data, const float* input_data,
const int* output_dims_data, const float* golden,
float* output_data) {
TfLiteIntArray* input_dims = IntArrayFromInts(input_dims_data);
TfLiteIntArray* output_dims = IntArrayFromInts(output_dims_data);
const int output_elements_count = ElementCount(*output_dims);
constexpr int inputs_size = 1;
constexpr int outputs_size = 1;
constexpr int tensors_size = inputs_size + outputs_size;
TfLiteTensor tensors[tensors_size] = {
CreateFloatTensor(input_data, input_dims),
CreateFloatTensor(output_data, output_dims),
};
int inputs_array_data[] = {1, 0};
TfLiteIntArray* inputs_array = IntArrayFromInts(inputs_array_data);
int outputs_array_data[] = {1, 1};
TfLiteIntArray* outputs_array = IntArrayFromInts(outputs_array_data);
const TfLiteRegistration registration = ops::micro::Register_RELU6();
micro::KernelRunner runner(registration, tensors, tensors_size, inputs_array,
outputs_array,
/*builtin_data=*/nullptr, micro_test::reporter);
TF_LITE_MICRO_EXPECT_EQ(kTfLiteOk, runner.InitAndPrepare());
TF_LITE_MICRO_EXPECT_EQ(kTfLiteOk, runner.Invoke());
for (int i = 0; i < output_elements_count; ++i) {
TF_LITE_MICRO_EXPECT_NEAR(golden[i], output_data[i], 1e-5f);
}
}
void TestReluUint8(const int* input_dims_data, const float* input_data,
uint8_t* input_data_quantized, const float input_scale,
const int input_zero_point, const float* golden,
uint8_t* golden_quantized, const int* output_dims_data,
const float output_scale, const int output_zero_point,
uint8_t* output_data) {
TfLiteIntArray* input_dims = IntArrayFromInts(input_dims_data);
TfLiteIntArray* output_dims = IntArrayFromInts(output_dims_data);
const int output_elements_count = ElementCount(*output_dims);
constexpr int inputs_size = 1;
constexpr int outputs_size = 1;
constexpr int tensors_size = inputs_size + outputs_size;
TfLiteTensor tensors[tensors_size] = {
CreateQuantizedTensor(input_data, input_data_quantized, input_dims,
input_scale, input_zero_point),
CreateQuantizedTensor(output_data, output_dims, output_scale,
output_zero_point),
};
int inputs_array_data[] = {1, 0};
TfLiteIntArray* inputs_array = IntArrayFromInts(inputs_array_data);
int outputs_array_data[] = {1, 1};
TfLiteIntArray* outputs_array = IntArrayFromInts(outputs_array_data);
const TfLiteRegistration registration = ops::micro::Register_RELU();
micro::KernelRunner runner(registration, tensors, tensors_size, inputs_array,
outputs_array,
/*builtin_data=*/nullptr, micro_test::reporter);
TF_LITE_MICRO_EXPECT_EQ(kTfLiteOk, runner.InitAndPrepare());
TF_LITE_MICRO_EXPECT_EQ(kTfLiteOk, runner.Invoke());
AsymmetricQuantize(golden, golden_quantized, output_elements_count,
output_scale, output_zero_point);
for (int i = 0; i < output_elements_count; ++i) {
TF_LITE_MICRO_EXPECT_EQ(golden_quantized[i], output_data[i]);
}
}
void TestRelu6Uint8(const int* input_dims_data, const float* input_data,
uint8_t* input_data_quantized, const float input_scale,
const int input_zero_point, const float* golden,
uint8_t* golden_quantized, const int* output_dims_data,
const float output_scale, const int output_zero_point,
uint8_t* output_data) {
TfLiteIntArray* input_dims = IntArrayFromInts(input_dims_data);
TfLiteIntArray* output_dims = IntArrayFromInts(output_dims_data);
const int output_elements_count = ElementCount(*output_dims);
constexpr int inputs_size = 1;
constexpr int outputs_size = 1;
constexpr int tensors_size = inputs_size + outputs_size;
TfLiteTensor tensors[tensors_size] = {
CreateQuantizedTensor(input_data, input_data_quantized, input_dims,
input_scale, input_zero_point),
CreateQuantizedTensor(output_data, output_dims, output_scale,
output_zero_point),
};
int inputs_array_data[] = {1, 0};
TfLiteIntArray* inputs_array = IntArrayFromInts(inputs_array_data);
int outputs_array_data[] = {1, 1};
TfLiteIntArray* outputs_array = IntArrayFromInts(outputs_array_data);
const TfLiteRegistration registration = ops::micro::Register_RELU6();
micro::KernelRunner runner(registration, tensors, tensors_size, inputs_array,
outputs_array,
/*builtin_data=*/nullptr, micro_test::reporter);
TF_LITE_MICRO_EXPECT_EQ(kTfLiteOk, runner.InitAndPrepare());
TF_LITE_MICRO_EXPECT_EQ(kTfLiteOk, runner.Invoke());
AsymmetricQuantize(golden, golden_quantized, output_elements_count,
output_scale, output_zero_point);
for (int i = 0; i < output_elements_count; ++i) {
TF_LITE_MICRO_EXPECT_EQ(golden_quantized[i], output_data[i]);
}
}
void TestReluInt8(const int* input_dims_data, const float* input_data,
int8_t* input_data_quantized, const float input_scale,
const int input_zero_point, const float* golden,
int8_t* golden_quantized, const int* output_dims_data,
const float output_scale, const int output_zero_point,
int8_t* output_data) {
TfLiteIntArray* input_dims = IntArrayFromInts(input_dims_data);
TfLiteIntArray* output_dims = IntArrayFromInts(output_dims_data);
const int output_elements_count = ElementCount(*output_dims);
constexpr int inputs_size = 1;
constexpr int outputs_size = 1;
constexpr int tensors_size = inputs_size + outputs_size;
TfLiteTensor tensors[tensors_size] = {
CreateQuantizedTensor(input_data, input_data_quantized, input_dims,
input_scale, input_zero_point),
CreateQuantizedTensor(output_data, output_dims, output_scale,
output_zero_point),
};
int inputs_array_data[] = {1, 0};
TfLiteIntArray* inputs_array = IntArrayFromInts(inputs_array_data);
int outputs_array_data[] = {1, 1};
TfLiteIntArray* outputs_array = IntArrayFromInts(outputs_array_data);
const TfLiteRegistration registration = ops::micro::Register_RELU();
micro::KernelRunner runner(registration, tensors, tensors_size, inputs_array,
outputs_array,
/*builtin_data=*/nullptr, micro_test::reporter);
TF_LITE_MICRO_EXPECT_EQ(kTfLiteOk, runner.InitAndPrepare());
TF_LITE_MICRO_EXPECT_EQ(kTfLiteOk, runner.Invoke());
AsymmetricQuantize(golden, golden_quantized, output_elements_count,
output_scale, output_zero_point);
for (int i = 0; i < output_elements_count; ++i) {
TF_LITE_MICRO_EXPECT_EQ(golden_quantized[i], output_data[i]);
}
}
void TestRelu6Int8(const int* input_dims_data, const float* input_data,
int8_t* input_data_quantized, const float input_scale,
const int input_zero_point, const float* golden,
int8_t* golden_quantized, const int* output_dims_data,
const float output_scale, const int output_zero_point,
int8_t* output_data) {
TfLiteIntArray* input_dims = IntArrayFromInts(input_dims_data);
TfLiteIntArray* output_dims = IntArrayFromInts(output_dims_data);
const int output_elements_count = ElementCount(*output_dims);
constexpr int inputs_size = 1;
constexpr int outputs_size = 1;
constexpr int tensors_size = inputs_size + outputs_size;
TfLiteTensor tensors[tensors_size] = {
CreateQuantizedTensor(input_data, input_data_quantized, input_dims,
input_scale, input_zero_point),
CreateQuantizedTensor(output_data, output_dims, output_scale,
output_zero_point),
};
int inputs_array_data[] = {1, 0};
TfLiteIntArray* inputs_array = IntArrayFromInts(inputs_array_data);
int outputs_array_data[] = {1, 1};
TfLiteIntArray* outputs_array = IntArrayFromInts(outputs_array_data);
const TfLiteRegistration registration = ops::micro::Register_RELU6();
micro::KernelRunner runner(registration, tensors, tensors_size, inputs_array,
outputs_array,
/*builtin_data=*/nullptr, micro_test::reporter);
TF_LITE_MICRO_EXPECT_EQ(kTfLiteOk, runner.InitAndPrepare());
TF_LITE_MICRO_EXPECT_EQ(kTfLiteOk, runner.Invoke());
AsymmetricQuantize(golden, golden_quantized, output_elements_count,
output_scale, output_zero_point);
for (int i = 0; i < output_elements_count; ++i) {
TF_LITE_MICRO_EXPECT_EQ(golden_quantized[i], output_data[i]);
}
}
} // namespace
} // namespace testing
} // namespace tflite
TF_LITE_MICRO_TESTS_BEGIN
TF_LITE_MICRO_TEST(SimpleReluTestFloat) {
const int output_elements_count = 10;
const int input_shape[] = {2, 1, 5};
const float input_data[] = {
1.0, 2.0, 3.0, 4.0, 5.0, -1.0, -2.0, -3.0, -4.0, -5.0,
};
const float golden[] = {1.0, 2.0, 3.0, 4.0, 5.0, 0, 0, 0, 0, 0};
const int output_shape[] = {2, 1, 5};
float output_data[output_elements_count];
tflite::testing::TestReluFloat(input_shape, input_data, output_shape, golden,
output_data);
}
TF_LITE_MICRO_TEST(SimpleRelu6TestFloat) {
const int output_elements_count = 10;
float output_data[output_elements_count];
const int input_shape[] = {2, 1, 5};
const float input_data[] = {4.0, 5.0, 6.0, 7.0, 8.0,
-4.0, -5.0, -6.0, -7.0, -8.0};
const int output_shape[] = {2, 1, 5};
const float golden[] = {
4.0, 5.0, 6.0, 6.0, 6.0, 0.0, 0.0, 0.0, 0.0, 0.0,
};
tflite::testing::TestRelu6Float(input_shape, input_data, output_shape, golden,
output_data);
}
TF_LITE_MICRO_TEST(SimpleReluTestUint8) {
const int elements_count = 10;
const int input_shape[] = {2, 1, 5};
const float input_data[] = {1, 2, 3, 4, 5, -1, -2, -3, -4, -5};
uint8_t input_quantized[elements_count];
const int output_shape[] = {2, 1, 5};
const float golden[] = {1, 2, 3, 4, 5, 0, 0, 0, 0, 0};
uint8_t golden_quantized[elements_count];
uint8_t output_data[elements_count];
const float input_scale = 0.5f;
const int input_zero_point = 127;
const float output_scale = 0.5f;
const int output_zero_point = 127;
tflite::testing::TestReluUint8(input_shape, input_data, input_quantized,
input_scale, input_zero_point, golden,
golden_quantized, output_shape, output_scale,
output_zero_point, output_data);
}
TF_LITE_MICRO_TEST(SimpleRelu6TestUint8) {
const int elements_count = 10;
const int input_shape[] = {2, 1, 5};
const float input_data[] = {4, 5, 6, 7, 8, -1, -2, -3, -4, -5};
uint8_t input_quantized[elements_count];
const int output_shape[] = {2, 1, 5};
const float golden[] = {4, 5, 6, 6, 6, 0, 0, 0, 0, 0};
uint8_t golden_quantized[elements_count];
uint8_t output_data[elements_count];
const float input_scale = 0.5f;
const int input_zero_point = 127;
const float output_scale = 0.5f;
const int output_zero_point = 127;
tflite::testing::TestRelu6Uint8(input_shape, input_data, input_quantized,
input_scale, input_zero_point, golden,
golden_quantized, output_shape, output_scale,
output_zero_point, output_data);
}
TF_LITE_MICRO_TEST(SimpleReluTestInt8) {
const int elements_count = 10;
const int input_shape[] = {2, 1, 5};
const float input_data[] = {1, 2, 3, 4, 5, -1, -2, -3, -4, -5};
int8_t input_quantized[elements_count];
const int output_shape[] = {2, 1, 5};
const float golden[] = {1, 2, 3, 4, 5, 0, 0, 0, 0, 0};
int8_t golden_quantized[elements_count];
int8_t output_data[elements_count];
const float input_scale = 0.5f;
const int input_zero_point = 0;
const float output_scale = 0.5f;
const int output_zero_point = 0;
tflite::testing::TestReluInt8(input_shape, input_data, input_quantized,
input_scale, input_zero_point, golden,
golden_quantized, output_shape, output_scale,
output_zero_point, output_data);
}
TF_LITE_MICRO_TEST(SimpleRelu6TestInt8) {
const int elements_count = 10;
const int input_shape[] = {2, 1, 5};
const float input_data[] = {4, 5, 6, 7, 8, -1, -2, -3, -4, -5};
int8_t input_quantized[elements_count];
const int output_shape[] = {2, 1, 5};
const float golden[] = {4, 5, 6, 6, 6, 0, 0, 0, 0, 0};
int8_t golden_quantized[elements_count];
int8_t output_data[elements_count];
const float input_scale = 0.5f;
const int input_zero_point = 127;
const float output_scale = 0.5f;
const int output_zero_point = 127;
tflite::testing::TestRelu6Int8(input_shape, input_data, input_quantized,
input_scale, input_zero_point, golden,
golden_quantized, output_shape, output_scale,
output_zero_point, output_data);
}
TF_LITE_MICRO_TESTS_END

View File

@@ -14,11 +14,11 @@ limitations under the License.
==============================================================================*/
#include "tensorflow/lite/kernels/internal/reference/comparisons.h"
#include "tensorflow/lite/micro/kernels/kernel_util.h"
#include "tensorflow/lite/c/common.h"
#include "tensorflow/lite/kernels/internal/quantization_util.h"
#include "tensorflow/lite/kernels/internal/tensor_ctypes.h"
#include "tensorflow/lite/kernels/kernel_util.h"
#include "tensorflow/lite/micro/kernels/kernel_util.h"
namespace tflite {
namespace ops {

View File

@@ -159,7 +159,8 @@ TfLiteStatus Prepare(TfLiteContext* context, TfLiteNode* node) {
int num_dimensions = NumDimensions(input);
if (num_dimensions > 4) {
TF_LITE_KERNEL_LOG(context,
TF_LITE_KERNEL_LOG(
context,
"Op Concatenation does not currently support num dimensions >4 "
"Tensor has %d dimensions.",
num_dimensions);
@@ -209,7 +210,8 @@ TfLiteStatus Prepare(TfLiteContext* context, TfLiteNode* node) {
break;
}
default:
TF_LITE_KERNEL_LOG(context, "Op Concatenation does not currently support Type '%s'.",
TF_LITE_KERNEL_LOG(
context, "Op Concatenation does not currently support Type '%s'.",
TfLiteTypeGetName(output_type));
return kTfLiteError;
}
@@ -238,7 +240,8 @@ TfLiteStatus Eval(TfLiteContext* context, TfLiteNode* node) {
break;
default:
TF_LITE_KERNEL_LOG(context, "Op Concatenation does not currently support Type '%s'.",
TF_LITE_KERNEL_LOG(
context, "Op Concatenation does not currently support Type '%s'.",
TfLiteTypeGetName(output_type));
return kTfLiteError;
}

View File

@@ -15,7 +15,7 @@ limitations under the License.
#ifndef TENSORFLOW_LITE_MICRO_MICRO_MUTABLE_OP_RESOLVER_H_
#define TENSORFLOW_LITE_MICRO_MICRO_MUTABLE_OP_RESOLVER_H_
#include <stdio.h>
#include <cstdio>
#include <cstring>
#include "tensorflow/lite/c/common.h"

View File

@@ -22,7 +22,7 @@ limitations under the License.
#include <cinttypes>
#include <cstddef>
#include <cstdint>
#include <stdio.h>
#include <cstdio>
#include <vector>
#include "flatbuffers/flatbuffers.h" // from @flatbuffers

View File

@@ -74,26 +74,23 @@ extern tflite::ErrorReporter* reporter;
tflite::ErrorReporter* reporter; \
} \
\
int main(void) { \
int main(int argc, char** argv) { \
micro_test::tests_passed = 0; \
micro_test::tests_failed = 0; \
tflite::MicroErrorReporter error_reporter; \
micro_test::reporter = &error_reporter; \
HAL_Init(); \
SystemClock_Config(); \
board_init(); \
printf("Init Successful");
micro_test::reporter = &error_reporter;
#define TF_LITE_MICRO_TESTS_END \
micro_test::reporter->Report( \
"%d/%d tests passed", micro_test::tests_passed, \
(micro_test::tests_failed + micro_test::tests_passed)); \
if (micro_test::tests_failed == 0) { \
micro_test::reporter->Report("~~~ALL TESTS PASSED~~~\n"); \
return kTfLiteOk; \
} else { \
micro_test::reporter->Report("~~~SOME TESTS FAILED~~~\n"); \
return kTfLiteError; \
} \
while(1); \
}
// TODO(petewarden): I'm going to hell for what I'm doing to this poor for loop.

View File

@@ -33,12 +33,12 @@
#include "cmsis/CMSIS/NN/Include/arm_nnsupportfunctions.h"
// Work around for https://github.com/ARMmbed/mbed-os/issues/12568
#define __patched_SXTB16_RORn(op1, rotate) \
({ \
uint32_t result; \
__ASM ("sxtb16 %0, %1, ROR %2" : "=r" (result) : "r" (op1), "i" (rotate) ); \
result; \
})
__STATIC_FORCEINLINE uint32_t __patched_SXTB16_RORn(uint32_t op1, uint32_t rotate) {
uint32_t result;
__ASM ("sxtb16 %0, %1, ROR %2" : "=r" (result) : "r" (op1), "i" (rotate) );
return result;
}
/**
* @ingroup groupSupport
*/

View File

@@ -2735,16 +2735,16 @@ struct TypeTable {
// Weak linkage is culled by VS & doesn't work on cygwin.
// clang-format off
//#if !defined(_WIN32) && !defined(__CYGWIN__)
#if !defined(_WIN32) && !defined(__CYGWIN__)
//extern volatile __attribute__((weak)) const char *flatbuffer_version_string;
//volatile __attribute__((weak)) const char *flatbuffer_version_string =
// "FlatBuffers "
// FLATBUFFERS_STRING(FLATBUFFERS_VERSION_MAJOR) "."
// FLATBUFFERS_STRING(FLATBUFFERS_VERSION_MINOR) "."
// FLATBUFFERS_STRING(FLATBUFFERS_VERSION_REVISION);
extern volatile __attribute__((weak)) const char *flatbuffer_version_string;
volatile __attribute__((weak)) const char *flatbuffer_version_string =
"FlatBuffers "
FLATBUFFERS_STRING(FLATBUFFERS_VERSION_MAJOR) "."
FLATBUFFERS_STRING(FLATBUFFERS_VERSION_MINOR) "."
FLATBUFFERS_STRING(FLATBUFFERS_VERSION_REVISION);
//#endif // !defined(_WIN32) && !defined(__CYGWIN__)
#endif // !defined(_WIN32) && !defined(__CYGWIN__)
#define FLATBUFFERS_DEFINE_BITMASK_OPERATORS(E, T)\
inline E operator | (E lhs, E rhs){\

View File

@@ -1,11 +0,0 @@
Copyright (c) 2003-2010 Mark Borgerding
All rights reserved.
Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met:
* Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution.
* Neither the author nor the names of any contributors may be used to endorse or promote products derived from this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.

View File

@@ -1,164 +0,0 @@
/*
Copyright (c) 2003-2010, Mark Borgerding
All rights reserved.
Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met:
* Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution.
* Neither the author nor the names of any contributors may be used to endorse or promote products derived from this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
/* kiss_fft.h
defines kiss_fft_scalar as either short or a float type
and defines
typedef struct { kiss_fft_scalar r; kiss_fft_scalar i; }kiss_fft_cpx; */
#include "kiss_fft.h"
#include <limits.h>
#define MAXFACTORS 32
/* e.g. an fft of length 128 has 4 factors
as far as kissfft is concerned
4*4*4*2
*/
struct kiss_fft_state{
int nfft;
int inverse;
int factors[2*MAXFACTORS];
kiss_fft_cpx twiddles[1];
};
/*
Explanation of macros dealing with complex math:
C_MUL(m,a,b) : m = a*b
C_FIXDIV( c , div ) : if a fixed point impl., c /= div. noop otherwise
C_SUB( res, a,b) : res = a - b
C_SUBFROM( res , a) : res -= a
C_ADDTO( res , a) : res += a
* */
#ifdef FIXED_POINT
#if (FIXED_POINT==32)
# define FRACBITS 31
# define SAMPPROD int64_t
#define SAMP_MAX 2147483647
#else
# define FRACBITS 15
# define SAMPPROD int32_t
#define SAMP_MAX 32767
#endif
#define SAMP_MIN -SAMP_MAX
#if defined(CHECK_OVERFLOW)
# define CHECK_OVERFLOW_OP(a,op,b) \
if ( (SAMPPROD)(a) op (SAMPPROD)(b) > SAMP_MAX || (SAMPPROD)(a) op (SAMPPROD)(b) < SAMP_MIN ) { \
fprintf(stderr,"WARNING:overflow @ " __FILE__ "(%d): (%d " #op" %d) = %ld\n",__LINE__,(a),(b),(SAMPPROD)(a) op (SAMPPROD)(b) ); }
#endif
# define smul(a,b) ( (SAMPPROD)(a)*(b) )
# define sround( x ) (kiss_fft_scalar)( ( (x) + (1<<(FRACBITS-1)) ) >> FRACBITS )
# define S_MUL(a,b) sround( smul(a,b) )
# define C_MUL(m,a,b) \
do{ (m).r = sround( smul((a).r,(b).r) - smul((a).i,(b).i) ); \
(m).i = sround( smul((a).r,(b).i) + smul((a).i,(b).r) ); }while(0)
# define DIVSCALAR(x,k) \
(x) = sround( smul( x, SAMP_MAX/k ) )
# define C_FIXDIV(c,div) \
do { DIVSCALAR( (c).r , div); \
DIVSCALAR( (c).i , div); }while (0)
# define C_MULBYSCALAR( c, s ) \
do{ (c).r = sround( smul( (c).r , s ) ) ;\
(c).i = sround( smul( (c).i , s ) ) ; }while(0)
#else /* not FIXED_POINT*/
# define S_MUL(a,b) ( (a)*(b) )
#define C_MUL(m,a,b) \
do{ (m).r = (a).r*(b).r - (a).i*(b).i;\
(m).i = (a).r*(b).i + (a).i*(b).r; }while(0)
# define C_FIXDIV(c,div) /* NOOP */
# define C_MULBYSCALAR( c, s ) \
do{ (c).r *= (s);\
(c).i *= (s); }while(0)
#endif
#ifndef CHECK_OVERFLOW_OP
# define CHECK_OVERFLOW_OP(a,op,b) /* noop */
#endif
#define C_ADD( res, a,b)\
do { \
CHECK_OVERFLOW_OP((a).r,+,(b).r)\
CHECK_OVERFLOW_OP((a).i,+,(b).i)\
(res).r=(a).r+(b).r; (res).i=(a).i+(b).i; \
}while(0)
#define C_SUB( res, a,b)\
do { \
CHECK_OVERFLOW_OP((a).r,-,(b).r)\
CHECK_OVERFLOW_OP((a).i,-,(b).i)\
(res).r=(a).r-(b).r; (res).i=(a).i-(b).i; \
}while(0)
#define C_ADDTO( res , a)\
do { \
CHECK_OVERFLOW_OP((res).r,+,(a).r)\
CHECK_OVERFLOW_OP((res).i,+,(a).i)\
(res).r += (a).r; (res).i += (a).i;\
}while(0)
#define C_SUBFROM( res , a)\
do {\
CHECK_OVERFLOW_OP((res).r,-,(a).r)\
CHECK_OVERFLOW_OP((res).i,-,(a).i)\
(res).r -= (a).r; (res).i -= (a).i; \
}while(0)
#ifdef FIXED_POINT
# define KISS_FFT_COS(phase) floor(.5+SAMP_MAX * cos (phase))
# define KISS_FFT_SIN(phase) floor(.5+SAMP_MAX * sin (phase))
# define HALF_OF(x) ((x)>>1)
#elif defined(USE_SIMD)
# define KISS_FFT_COS(phase) _mm_set1_ps( cos(phase) )
# define KISS_FFT_SIN(phase) _mm_set1_ps( sin(phase) )
# define HALF_OF(x) ((x)*_mm_set1_ps(.5))
#else
# define KISS_FFT_COS(phase) (kiss_fft_scalar) cos(phase)
# define KISS_FFT_SIN(phase) (kiss_fft_scalar) sin(phase)
# define HALF_OF(x) ((x)*.5)
#endif
#define kf_cexp(x,phase) \
do{ \
(x)->r = KISS_FFT_COS(phase);\
(x)->i = KISS_FFT_SIN(phase);\
}while(0)
/* a debugging function */
#define pcpx(c)\
fprintf(stderr,"%g + %gi\n",(double)((c)->r),(double)((c)->i) )
#ifdef KISS_FFT_USE_ALLOCA
// define this to allow use of alloca instead of malloc for temporary buffers
// Temporary buffers are used in two case:
// 1. FFT sizes that have "bad" factors. i.e. not 2,3 and 5
// 2. "in-place" FFTs. Notice the quotes, since kissfft does not really do an in-place transform.
#include <alloca.h>
#define KISS_FFT_TMP_ALLOC(nbytes) alloca(nbytes)
#define KISS_FFT_TMP_FREE(ptr)
#else
#define KISS_FFT_TMP_ALLOC(nbytes) KISS_FFT_MALLOC(nbytes)
#define KISS_FFT_TMP_FREE(ptr) KISS_FFT_FREE(ptr)
#endif

View File

@@ -1,131 +0,0 @@
#ifndef KISS_FFT_H
#define KISS_FFT_H
#include <stdlib.h>
#include <stdio.h>
#include <math.h>
#include <string.h>
#ifdef __cplusplus
extern "C" {
#endif
/*
ATTENTION!
If you would like a :
-- a utility that will handle the caching of fft objects
-- real-only (no imaginary time component ) FFT
-- a multi-dimensional FFT
-- a command-line utility to perform ffts
-- a command-line utility to perform fast-convolution filtering
Then see kfc.h kiss_fftr.h kiss_fftnd.h fftutil.c kiss_fastfir.c
in the tools/ directory.
*/
#ifdef USE_SIMD
# include <xmmintrin.h>
# define kiss_fft_scalar __m128
#define KISS_FFT_MALLOC(nbytes) _mm_malloc(nbytes,16)
#define KISS_FFT_FREE _mm_free
#else
#define KISS_FFT_MALLOC(X) (void*)(0) /* Patched. */
#define KISS_FFT_FREE(X) /* Patched. */
#endif
// Patched automatically by download_dependencies.sh so default is 16 bit.
#ifndef FIXED_POINT
#define FIXED_POINT (16)
#endif
// End patch.
#ifdef FIXED_POINT
#include <stdint.h> /* Patched. */
#include <sys/types.h>
# if (FIXED_POINT == 32)
# define kiss_fft_scalar int32_t
# else
# define kiss_fft_scalar int16_t
# endif
#else
# ifndef kiss_fft_scalar
/* default is float */
# define kiss_fft_scalar float
# endif
#endif
typedef struct {
kiss_fft_scalar r;
kiss_fft_scalar i;
}kiss_fft_cpx;
typedef struct kiss_fft_state* kiss_fft_cfg;
/*
* kiss_fft_alloc
*
* Initialize a FFT (or IFFT) algorithm's cfg/state buffer.
*
* typical usage: kiss_fft_cfg mycfg=kiss_fft_alloc(1024,0,NULL,NULL);
*
* The return value from fft_alloc is a cfg buffer used internally
* by the fft routine or NULL.
*
* If lenmem is NULL, then kiss_fft_alloc will allocate a cfg buffer using malloc.
* The returned value should be free()d when done to avoid memory leaks.
*
* The state can be placed in a user supplied buffer 'mem':
* If lenmem is not NULL and mem is not NULL and *lenmem is large enough,
* then the function places the cfg in mem and the size used in *lenmem
* and returns mem.
*
* If lenmem is not NULL and ( mem is NULL or *lenmem is not large enough),
* then the function returns NULL and places the minimum cfg
* buffer size in *lenmem.
* */
kiss_fft_cfg kiss_fft_alloc(int nfft,int inverse_fft,void * mem,size_t * lenmem);
/*
* kiss_fft(cfg,in_out_buf)
*
* Perform an FFT on a complex input buffer.
* for a forward FFT,
* fin should be f[0] , f[1] , ... ,f[nfft-1]
* fout will be F[0] , F[1] , ... ,F[nfft-1]
* Note that each element is complex and can be accessed like
f[k].r and f[k].i
* */
void kiss_fft(kiss_fft_cfg cfg,const kiss_fft_cpx *fin,kiss_fft_cpx *fout);
/*
A more generic version of the above function. It reads its input from every Nth sample.
* */
void kiss_fft_stride(kiss_fft_cfg cfg,const kiss_fft_cpx *fin,kiss_fft_cpx *fout,int fin_stride);
/* If kiss_fft_alloc allocated a buffer, it is one contiguous
buffer and can be simply free()d when no longer needed*/
#define kiss_fft_free free
/*
Cleans up some memory that gets managed internally. Not necessary to call, but it might clean up
your compiler output to call this before you exit.
*/
void kiss_fft_cleanup(void);
/*
* Returns the smallest integer k, such that k>=n and k has only "fast" factors (2,3,5)
*/
int kiss_fft_next_fast_size(int n);
/* for real ffts, we need an even size */
#define kiss_fftr_next_fast_size_real(n) \
(kiss_fft_next_fast_size( ((n)+1)>>1)<<1)
#ifdef __cplusplus
}
#endif
#endif

View File

@@ -1,46 +0,0 @@
#ifndef KISS_FTR_H
#define KISS_FTR_H
#include "kiss_fft.h"
#ifdef __cplusplus
extern "C" {
#endif
/*
Real optimized version can save about 45% cpu time vs. complex fft of a real seq.
*/
typedef struct kiss_fftr_state *kiss_fftr_cfg;
kiss_fftr_cfg kiss_fftr_alloc(int nfft,int inverse_fft,void * mem, size_t * lenmem);
/*
nfft must be even
If you don't care to allocate space, use mem = lenmem = NULL
*/
void kiss_fftr(kiss_fftr_cfg cfg,const kiss_fft_scalar *timedata,kiss_fft_cpx *freqdata);
/*
input timedata has nfft scalar points
output freqdata has nfft/2+1 complex points
*/
void kiss_fftri(kiss_fftr_cfg cfg,const kiss_fft_cpx *freqdata,kiss_fft_scalar *timedata);
/*
input freqdata has nfft/2+1 complex points
output timedata has nfft scalar points
*/
#define kiss_fftr_free free
#ifdef __cplusplus
}
#endif
#endif

View File

@@ -17,7 +17,7 @@ limitations under the License.
#define RUY_RUY_PROFILER_INSTRUMENTATION_H_
#ifdef RUY_PROFILER
#include <stdio.h>
#include <cstdio>
#include <mutex>
#include <vector>
#endif

View File

@@ -1,50 +1,313 @@
# 1. 概述
# Tensorflow Lite Micro组件使用说明
## 1.1. TensorFlow Lite Micro
作者:刘恒言,杨庆生,邓可笈
参考 doc文件夹的AT组件的user guide.md
日期:
## 1.2. CMSIS-NN
联系方式bingshan45@163.com
# 概述
`Tensorflow Lite Micro`` TensorFlow Lite `针对MCU的实验性端口专门用于在微控制器和其他只有几千字节内存的设备上运行机器学习模型。
## 1.3.
# 1. 建立与转换模型
# 2. TencentOS-tiny的AT框架
由于嵌入式设备具有有限的 RAM 和存储空间因此限制了深度学习模型的规模。同时TensorFlow Lite Micro 目前只支持有限的一部分深度学习运算算子,因此并非所有的模型结构都是可行的。
## 2.1. 整体架构
本部分将介绍由 TensorFlow 模型转换为可在嵌入式设备中上运行的过程。本部分也概述了可支持的运算,并对设计与训练一个模型以使其符合内存限制给出了一些指导。
## 2.2. 实现原理
## 1.1 模型转换
## 2.3. TencentOS-tiny AT框架参数配置
将一个已训练好的 TensorFlow 模型转换为可以在嵌入式设备中运行的Tensorflow Lite模型可以使用 [TensorFlow Lite 转换器 Python API](https://tensorflow.google.cn/lite/microcontrollers/build_convert) 。它能够将模型转换成 [`FlatBuffer`](https://google.github.io/flatbuffers/) 格式,减小模型规模,并修改模型以使用 TensorFlow Lite 支持的运算。
## 2.4. AT框架提供的API
### 1.1.1 量化
为了获得尽可能小的模型规模,你应该考虑使用[训练后量化](https://tensorflow.google.cn/lite/performance/post_training_quantization)。它会降低你模型中数字的精度,从而减小模型规模。不过,这种操作可能会导致模型推理准确性的下降,对于小规模模型来说尤为如此, 所有我们需要在量化前后分析模型的准确性变换以确保这种损失在可接受范围内。
以下这段 Python 代码片段展示了如何使用预训练量化进行模型转换:
# 3. TencentOS-tiny的SAL框架
```python
import tensorflow as tf
converter = tf.lite.TFLiteConverter.from_saved_model(saved_model_dir)
converter.optimizations = [tf.lite.Optimize.OPTIMIZE_FOR_SIZE]
tflite_quant_model = converter.convert()
open("converted_model.tflite", "wb").write(tflite_quant_model)
```
## 3.1. 什么是SAL框架
### 1.1.2 转换为一个 C 数组
## 3.2. SL框架的实现原理
许多微控制器平台没有本地文件系统的支持。从程序中使用一个模型最简单的方式是将其以一个 C 数组的形式并将其编译进你的程序。
## 3.3. SAL框架提供的网络编程API
以下的 unix 命令会生成一个以 `char` 数组形式包含 TensorFlow Lite 模型的 C 源文件:
# 4. TFlite_Micro组件加入到keil工程
```bash
xxd -i converted_model.tflite > model_data.cc
```
## 4.1. 移植前的准备
其输出类似如下:
## 4.3. 移植SAL框架
```c
unsigned char converted_model_tflite[] = {
0x18, 0x00, 0x00, 0x00, 0x54, 0x46, 0x4c, 0x33, 0x00, 0x00, 0x0e, 0x00,
// <Lines omitted>
};
unsigned int converted_model_tflite_len = 18200;
```
## 4.4. 移植通信模组驱动
在生成了此文件之后,你可以将它包含到你的程序。在嵌入式平台上,我们需要将该数组声明为 `const` 类型以获得更好的内存效率。
## 4.5. 测试网络通信
## 1.2 模型结构与训练
# 5. 制作TFlite_Micro.lib
在设计一个面向微控制器的模型时,考虑模型的规模、工作负载,以及用到的运算是非常重要的。
## Step1.
### 1.2.1 模型规模
## Step2.
一个模型必须在二进制和运行时方面都足够小,以使其可以和你程序的其他部分一起符合你目标设备的内存限制。
## Step3.
为了创建一个更小的模型,你可以在你的结构里使用更少和更小的层。然而,小规模的模型更易面临欠拟合问题。这意味着对于许多问题,尝试并使用符合内存限制的尽可能大规模的模型是有意义的。但是,使用更大规模的模型也会导致处理器工作负载的增加。
注:在一个 Cortex M3 上,面向微控制器的 TensorFlow Lite 的核心运行时占 16 KB。
### 1.2.2 工作负载
工作负载受到模型规模与复杂度的影响。大规模、复杂的模型可能会导致更高的功耗,根据实际的应用场景,这种情况所带来的功耗与热量的增加可能会成为一个问题。
### 1.2.3 运算支持
面向微控制器的 TensorFlow Lite 目前仅支持有限的部分 TensorFlow 运算,这影响了可以运行的模型结构。我们正致力于在参考实现和针对特定结构的优化方面扩展运算支持。
已支持的运算可以在文件 [`all_ops_resolver.cc`](https://github.com/QingChuanWS/tensorflow/tree/master/tensorflow/lite/micro/all_ops_resolver.cc) 中看到。
## 1.3 运行推断
以下部分将介绍软件包自带语音历程中的 [main_functions.cc](https://github.com/QingChuanWS/tensorflow/tree/master/tensorflow/lite/micro/examples/person_detection_experimental/main_functions.cc) 文件并解释了它如何使用用于微控制器的 Tensorflow Lite 来运行推断。
### 1.3.1 包含项
要使用库,必须包含以下头文件:
```C++
#include "tensorflow/lite/micro/kernels/micro_ops.h"
#include "tensorflow/lite/micro/micro_error_reporter.h"
#include "tensorflow/lite/micro/micro_interpreter.h"
#include "tensorflow/lite/schema/schema_generated.h"
#include "tensorflow/lite/version.h"
```
- [`micro_ops.h`](https://github.com/QingChuanWS/tensorflow/tree/master/tensorflow/lite/micro/kernels/micro_ops.h) 提供给解释器interpreter用于运行模型的操作。
- [`micro_error_reporter.h`](https://github.com/QingChuanWS/tensorflow/tree/master/tensorflow/lite/micro/micro_error_reporter.h) 输出调试信息。
- [`micro_interpreter.h`](https://github.com/QingChuanWS/tensorflow/tree/master/tensorflow/lite/micro/micro_interpreter.h) 包含处理和运行模型的代码。
- [`schema_generated.h`](https://github.com/QingChuanWS/tensorflow/tree/master/tensorflow/lite/schema/schema_generated.h) 包含 TensorFlow Lite [`FlatBuffer`](https://google.github.io/flatbuffers/) 模型文件格式的模式。
- [`version.h`](https://github.com/QingChuanWS/tensorflow/tree/master/tensorflow/lite/version.h) 提供 Tensorflow Lite 架构的版本信息。
示例还包括其他一些文件。以下这些是最重要的:
```C++
#include "tensorflow/lite/micro/examples/micro_speech/micro_features/micro_model_settings.h"
#include "tensorflow/lite/micro/examples/micro_speech/micro_features/model.h"
```
- [`model.h`](https://github.com/QingChuanWS/tensorflow/tree/master/tensorflow/lite/micro_speech/micro_features/model.h) 包含存储为 `char` 数组的模型。阅读 [“构建与转换模型”](https://tensorflow.google.cn/lite/microcontrollers/build_convert)来了解如何将 Tensorflow Lite 模型转换为该格式。
- [`micro_model_settings.h`](https://github.com/QingChuanWS/tensorflow/tree/master/tensorflow/lite/micro/examples/micro_speech/micro_features/micro_model_settings.h) 定义与模型相关的各种常量。
### 1.3.2 设置日志记录
要设置日志记录,需要使用一个指向 `tflite::MicroErrorReporter` 实例的指针来创建一个 `tflite::ErrorReporter` 指针:
```C++
tflite::MicroErrorReporter micro_error_reporter;
tflite::ErrorReporter* error_reporter = &micro_error_reporter;
```
该变量被传递到解释器interpreter解释器允许它写日志。由于微控制器通常具有多种日志记录机制`tflite::MicroErrorReporter` 的实现是为您的特定设备所定制的。
### 1.3.3 加载模型
在以下代码中,模型是从一个 `char` 数组中实例化的,`g_tiny_conv_micro_features_model_data` (要了解其是如何构建的,请参见[“构建与转换模型”](ModelConvert.md))。 随后我们检查模型来确保其架构版本与我们使用的版本所兼容:
```C++
const tflite::Model* model =
::tflite::GetModel(g_tiny_conv_micro_features_model_data);
if (model->version() != TFLITE_SCHEMA_VERSION) {
error_reporter->Report(
"Model provided is schema version %d not equal "
"to supported version %d.\n",
model->version(), TFLITE_SCHEMA_VERSION);
return 1;
}
```
### 1.3.4实例化操作解析器
解释器interpreter需要一个 [`micro_ops`](https://github.com/QingChuanWS/tensorflow/tree/master/tensorflow/lite/micro/kernels/micro_ops.h) 实例来访问 Tensorflow 操作。可以扩展此类以向您的项目添加自定义操作:
```C++
tflite::ops::micro::micro_op_resolver resolver;
```
### 1.3.5 分配内存
我们需要预先为输入、输出以及中间数组分配一定的内存。该预分配的内存是一个大小为 `tensor_arena_size` 的 `uint8_t` 数组,它被传递给 `tflite::SimpleTensorAllocator` 实例:
```C++
const int tensor_arena_size = 10 * 1024;
uint8_t tensor_arena[tensor_arena_size];
tflite::SimpleTensorAllocator tensor_allocator(tensor_arena,
tensor_arena_size);
```
注意:所需内存大小取决于您使用的模型,可能需要通过实验来确定。
### 1.3.6 实例化解释器Interpreter
我们创建一个 `tflite::MicroInterpreter` 实例,传递给之前创建的变量:
```C++
tflite::MicroInterpreter interpreter(model, resolver, &tensor_allocator,
error_reporter);
```
### 1.3.7 验证输入维度
`MicroInterpreter` 实例可以通过调用 `.input(0)` 为我们提供一个指向模型输入张量的指针,其中 `0` 代表第一个(也是唯一一个)输入张量。我们检查这个张量以确认它的维度与类型是我们所期望的:
```C++
TfLiteTensor* model_input = interpreter.input(0);
if ((model_input->dims->size != 4) || (model_input->dims->data[0] != 1) ||
(model_input->dims->data[1] != kFeatureSliceCount) ||
(model_input->dims->data[2] != kFeatureSliceSize) ||
(model_input->type != kTfLiteUInt8)) {
error_reporter->Report("Bad input tensor parameters in model");
return 1;
}
```
在这个代码段中,变量 `kFeatureSliceCount` 和 `kFeatureSliceSize` 与输入的属性相关,它们定义在 [`micro_model_settings.h`](https://github.com/QingChuanWS/tensorflow/tree/master/tensorflow/lite/micro/examples/micro_speech/micro_features/micro_model_settings.h) 中。枚举值 `kTfLiteUInt8` 是对 Tensorflow Lite 某一数据类型的引用,它定义在 [`common.h`](https://github.com/QingChuanWS/tensorflow/tree/master/tensorflow/lite/c/common.h) 中。
### 1.3.8 生成特征
我们输入到模型中的数据必须由微控制器的音频输入生成。[`feature_provider.h`](https://github.com/QingChuanWS/tensorflow/tree/master/tensorflow/lite/micro/examples/micro_speech/feature_provider.h) 中定义的 `FeatureProvider` 类捕获音频并将其转换为一组将被传入模型的特征集合。当该类被实例化时,我们用之前获取的 `TfLiteTensor` 来传入一个指向输入数组的指针。`FeatureProvider` 使用它来填充将传递给模型的输入数据:
```C++
FeatureProvider feature_provider(kFeatureElementCount,
model_input->data.uint8);
```
以下代码使 `FeatureProvider` 从最近一秒的音频生成一组特征并填充进输入张量:
```C++
TfLiteStatus feature_status = feature_provider.PopulateFeatureData(
error_reporter, previous_time, current_time, &how_many_new_slices);
```
在此例子中,特征生成和推断是在一个循环中发生的,因此设备能够不断地捕捉和处理新的音频。
当在编写自己的程序时,您可能会以其它的方式生成特征,但您总需要在运行模型之前就用数据填充输入张量。
### 1.3.9 运行模型
要运行模型,我们可以在 `tflite::MicroInterpreter` 实例上调用 `Invoke()`
```C++
TfLiteStatus invoke_status = interpreter.Invoke();
if (invoke_status != kTfLiteOk) {
error_reporter->Report("Invoke failed");
return 1;
}
```
我们可以检查返回值 `TfLiteStatus` 以确定运行是否成功。在 [`common.h`](https://github.com/QingChuanWS/tensorflow/tree/master/tensorflow/lite/c/common.h) 中定义的 `TfLiteStatus` 的可能值有 `kTfLiteOk` 和 `kTfLiteError`。
### 1.3.10 获取输出
模型的输出张量可以通过在 `tflite::MicroIntepreter` 上调用 `output(0)` 获得,其中 `0` 代表第一个(也是唯一一个)输出张量。
在示例中输出是一个数组表示输入属于不同类别“是”yes、“否”no、“未知”unknown以及“静默”silence的概率。由于它们是按照集合顺序排列的我们可以使用简单的逻辑来确定概率最高的类别
```C++
TfLiteTensor* output = interpreter.output(0);
uint8_t top_category_score = 0;
int top_category_index;
for (int category_index = 0; category_index < kCategoryCount;
++category_index) {
const uint8_t category_score = output->data.uint8[category_index];
if (category_score > top_category_score) {
top_category_score = category_score;
top_category_index = category_index;
}
}
```
在示例其他部分中,使用了一个更加复杂的算法来平滑多帧的识别结果。该部分在 [recognize_commands.h](https://github.com/QingChuanWS/tensorflow/tree/master/tensorflow/lite/micro/examples/micro_speech/recognize_commands.h) 中有所定义。在处理任何连续的数据流时,也可以使用相同的技术来提高可靠性。
# 2. 制作tensorflow_lite_micro.lib
## 2.1 获得Tensorflow Lite Micro库
要构建库并从主 TensorFlow 存储库中运行测试,请执行以下命令:
从 GitHub 中把 TensorFlow 存储库克隆到方便的地方。
```
git clone --depth 1 https://github.com/QingChuanWS/tensorflow.git
```
由于Tensorflow官方仓库的迭代速度较快所以笔者将本组件目前使用的tensorflow版本上传到了个人仓库同时本教程主要是为了方便开发者学习.lib库的制作方法。
进入上一步创建的目录。
```
cd tensorflow
```
项目中的 `Makefile` 能够生成包含所有必需源文件的独立项目,并支持导入目前成熟的嵌入式开发环境。目前支持的环境主要有 Arduino, Keil, Make 和 Mbed。
注意:我们为其中一些环境托管预建项目。参阅 [支持的平台](https://tensorflow.google.cn/lite/microcontrollers/overview#supported_platforms) 以下载。
要在 Make 中生成项目,请使用如下指令:
```
make -f tensorflow/lite/micro/tools/make/Makefile generate_projects
```
这需要几分钟,因为它需要下载一些大型工具链依赖。结束后,你应看到像这样的路径中,创建了一些文件夹: `tensorflow/lite/micro/tools/make/gen/linux_x86_64/prj/` (确切的路径取决于您的主机操作系统)。这些文件夹包含生成的项目和源文件。例如: `tensorflow/lite/micro/tools/make/gen/linux_x86_64/prj/hello_world/keil` 包含了hello world 的 Keil uVision 目标。
以下我们以hello_world工程为例分离出与实际应用无关的tflite_micro源文件用于后续的.lib库生成。
运行本目录下的`lib_extra.py`生成tflite micro源文件包
| 参数 | 含义 |
| ------------------ | ----------------------------------------------- |
| --tensorflow_path | 所下载的tensorflow仓库的仓库根目录绝对路径 |
| --tflitemicro_path | 存放生成的tensorflow lite micro路径绝对路径 |
成功运行之后会打印`--tensorflow lite micro source file extract successful--`字样,并在对应的`tflitemicro_path`路径下生成`Source`文件夹存放生成的tensorflow Lite Micro源文件。
## 2. 将源文件加入KEIL工程并生成.lib库
新建目标芯片的KEIL工程本次示例以ARM Cortex M4为例将Source目录下的`tensorflow`和`third_party`文件夹拷贝到KEIL工程的根目录下并添加`Source`目录下的所有源文件(包含.c和.cc),例如下图所示:
<div align=center>
<img src="image/lib文件目录.png" width=80% />
</div>
同时采用compiler version 6编译器并关闭Microlib
<div align=center>
<img src="image/编译器配置.png" width=80% />
</div>
选择编译为库并编辑库名为`tensorflow_lite_micro`
<div align=center>
<img src="image/编译目标为.lib.png" width=80% />
</div>
最后配置有关的宏、包含的文件路径和优化等级:
<div align=center>
<img src="image/配置include和优化等级等.png" width=80% />
</div>
最后点击编译链接之后,即可在工程根目录的`Objects`文件夹下生成ARM Cortex M4对应的.lib库其他内核型号的tflite_micro库以此类推。

Binary file not shown.

After

Width:  |  Height:  |  Size: 68 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 97 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 60 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 125 KiB

View File

@@ -0,0 +1,47 @@
#--coding=utf-8--
import os
import shutil
import argparse
parser = argparse.ArgumentParser(description="Tensorflow Lite Micro Lib gen")
parser.add_argument("--tensorflow_path", default="/home/hyliu/ARM/tensorflow",
type=str, help="tensorflow path(input)")
parser.add_argument("--tflitemicro_path", default='/home/hyliu/ARM/',
type=str, help="tflitemicro path(output)")
opt = parser.parse_args()
tflu_lib_path = os.path.join(opt.tflitemicro_path, 'Source')
if not os.path.isdir(tflu_lib_path):
os.mkdir(tflu_lib_path)
tflite_micro_hello_path = os.path.join(opt.tensorflow_path,
"tensorflow/lite/micro/tools/make/gen/linux_x86_64/prj/hello_world/keil")
def copydirs(from_file, to_file):
if not os.path.exists(to_file): # 如不存在目标目录则创建
os.makedirs(to_file)
files = os.listdir(from_file) # 获取文件夹中文件和目录列表
for f in files:
if os.path.isdir(from_file + '/' + f): # 判断是否是文件夹
copydirs(from_file + '/' + f, to_file + '/' + f) # 递归调用本函数
else:
shutil.copy(from_file + '/' + f, to_file + '/' + f) # 拷贝文件
copydirs(os.path.join(tflite_micro_hello_path, 'tensorflow'), os.path.join(tflu_lib_path, "tensorflow"))
copydirs(os.path.join(tflite_micro_hello_path, 'third_party'), os.path.join(tflu_lib_path, "third_party"))
cmsis = os.path.join(opt.tensorflow_path, "tensorflow/lite/micro/tools/make/downloads/cmsis/CMSIS")
tflu_lib_tensorflow = os.path.join(tflu_lib_path, 'tensorflow')
#拷贝缺失文件
shutil.copy(os.path.join(os.path.join(cmsis,"DSP/Include/arm_helium_utils.h")),
os.path.join(tflu_lib_tensorflow,"lite/micro/tools/make/downloads/cmsis/CMSIS/DSP/Include/"))
shutil.copy(os.path.join(os.path.join(cmsis,"Core/Include/cmsis_armclang.h")),
os.path.join(tflu_lib_tensorflow,"lite/micro/tools/make/downloads/cmsis/CMSIS/DSP/Include/"))
shutil.copy(os.path.join(os.path.join(cmsis,"Core/Include/cmsis_compiler.h")),
os.path.join(tflu_lib_tensorflow,"lite/micro/tools/make/downloads/cmsis/CMSIS/DSP/Include/"))
#删除benchmarks和example
shutil.rmtree(os.path.join(tflu_lib_tensorflow, "lite/micro/benchmarks"))
shutil.rmtree(os.path.join(tflu_lib_tensorflow, "lite/micro/examples"))
print("---tensorflow lite micro source file extract successful---")