tflite_micro_person_detection_init
This commit is contained in:
@@ -0,0 +1,241 @@
|
||||
/* Copyright 2018 The TensorFlow Authors. All Rights Reserved.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
==============================================================================*/
|
||||
|
||||
// An ultra-lightweight testing framework designed for use with microcontroller
|
||||
// applications. Its only dependency is on TensorFlow Lite's ErrorReporter
|
||||
// interface, where log messages are output. This is designed to be usable even
|
||||
// when no standard C or C++ libraries are available, and without any dynamic
|
||||
// memory allocation or reliance on global constructors.
|
||||
//
|
||||
// To build a test, you use syntax similar to gunit, but with some extra
|
||||
// decoration to create a hidden 'main' function containing each of the tests to
|
||||
// be run. Your code should look something like:
|
||||
// ----------------------------------------------------------------------------
|
||||
// #include "path/to/this/header"
|
||||
//
|
||||
// TF_LITE_MICRO_TESTS_BEGIN
|
||||
//
|
||||
// TF_LITE_MICRO_TEST(SomeTest) {
|
||||
// TF_LITE_LOG_EXPECT_EQ(true, true);
|
||||
// }
|
||||
//
|
||||
// TF_LITE_MICRO_TESTS_END
|
||||
// ----------------------------------------------------------------------------
|
||||
// If you compile this for your platform, you'll get a normal binary that you
|
||||
// should be able to run. Executing it will output logging information like this
|
||||
// to stderr (or whatever equivalent is available and written to by
|
||||
// ErrorReporter):
|
||||
// ----------------------------------------------------------------------------
|
||||
// Testing SomeTest
|
||||
// 1/1 tests passed
|
||||
// ~~~ALL TESTS PASSED~~~
|
||||
// ----------------------------------------------------------------------------
|
||||
// This is designed to be human-readable, so you can just run tests manually,
|
||||
// but the string "~~~ALL TESTS PASSED~~~" should only appear if all of the
|
||||
// tests do pass. This makes it possible to integrate with automated test
|
||||
// systems by scanning the output logs and looking for that magic value.
|
||||
//
|
||||
// This framework is intended to be a rudimentary alternative to no testing at
|
||||
// all on systems that struggle to run more conventional approaches, so use with
|
||||
// caution!
|
||||
|
||||
#ifndef TENSORFLOW_LITE_MICRO_TESTING_MICRO_TEST_H_
|
||||
#define TENSORFLOW_LITE_MICRO_TESTING_MICRO_TEST_H_
|
||||
|
||||
#include "tensorflow/lite/c/common.h"
|
||||
#include "tensorflow/lite/micro/micro_error_reporter.h"
|
||||
|
||||
namespace micro_test {
|
||||
extern int tests_passed;
|
||||
extern int tests_failed;
|
||||
extern bool is_test_complete;
|
||||
extern bool did_test_fail;
|
||||
extern tflite::ErrorReporter* reporter;
|
||||
} // namespace micro_test
|
||||
|
||||
#define TF_LITE_MICRO_TESTS_BEGIN \
|
||||
namespace micro_test { \
|
||||
int tests_passed; \
|
||||
int tests_failed; \
|
||||
bool is_test_complete; \
|
||||
bool did_test_fail; \
|
||||
tflite::ErrorReporter* reporter; \
|
||||
} \
|
||||
\
|
||||
int main(void) { \
|
||||
micro_test::tests_passed = 0; \
|
||||
micro_test::tests_failed = 0; \
|
||||
tflite::MicroErrorReporter error_reporter; \
|
||||
micro_test::reporter = &error_reporter; \
|
||||
HAL_Init(); \
|
||||
SystemClock_Config(); \
|
||||
board_init(); \
|
||||
printf("Init Successful");
|
||||
|
||||
#define TF_LITE_MICRO_TESTS_END \
|
||||
micro_test::reporter->Report( \
|
||||
"%d/%d tests passed", micro_test::tests_passed, \
|
||||
(micro_test::tests_failed + micro_test::tests_passed)); \
|
||||
if (micro_test::tests_failed == 0) { \
|
||||
micro_test::reporter->Report("~~~ALL TESTS PASSED~~~\n"); \
|
||||
} else { \
|
||||
micro_test::reporter->Report("~~~SOME TESTS FAILED~~~\n"); \
|
||||
} \
|
||||
while(1); \
|
||||
}
|
||||
|
||||
// TODO(petewarden): I'm going to hell for what I'm doing to this poor for loop.
|
||||
#define TF_LITE_MICRO_TEST(name) \
|
||||
micro_test::reporter->Report("Testing " #name); \
|
||||
for (micro_test::is_test_complete = false, \
|
||||
micro_test::did_test_fail = false; \
|
||||
!micro_test::is_test_complete; micro_test::is_test_complete = true, \
|
||||
micro_test::tests_passed += (micro_test::did_test_fail) ? 0 : 1, \
|
||||
micro_test::tests_failed += (micro_test::did_test_fail) ? 1 : 0)
|
||||
|
||||
#define TF_LITE_MICRO_EXPECT(x) \
|
||||
do { \
|
||||
if (!(x)) { \
|
||||
micro_test::reporter->Report(#x " failed at %s:%d", __FILE__, __LINE__); \
|
||||
micro_test::did_test_fail = true; \
|
||||
} \
|
||||
} while (false)
|
||||
|
||||
// TODO(b/139142772): this macro is used with types other than ints even though
|
||||
// the printf specifier is %d.
|
||||
#define TF_LITE_MICRO_EXPECT_EQ(x, y) \
|
||||
do { \
|
||||
auto vx = x; \
|
||||
auto vy = y; \
|
||||
if ((vx) != (vy)) { \
|
||||
micro_test::reporter->Report(#x " == " #y " failed at %s:%d (%d vs %d)", \
|
||||
__FILE__, __LINE__, static_cast<int>(vx), \
|
||||
static_cast<int>(vy)); \
|
||||
micro_test::did_test_fail = true; \
|
||||
} \
|
||||
} while (false)
|
||||
|
||||
#define TF_LITE_MICRO_EXPECT_NE(x, y) \
|
||||
do { \
|
||||
if ((x) == (y)) { \
|
||||
micro_test::reporter->Report(#x " != " #y " failed at %s:%d", __FILE__, \
|
||||
__LINE__); \
|
||||
micro_test::did_test_fail = true; \
|
||||
} \
|
||||
} while (false)
|
||||
|
||||
// TODO(wangtz): Making it more generic once needed.
|
||||
#define TF_LITE_MICRO_ARRAY_ELEMENT_EXPECT_NEAR(arr1, idx1, arr2, idx2, \
|
||||
epsilon) \
|
||||
do { \
|
||||
auto delta = ((arr1)[(idx1)] > (arr2)[(idx2)]) \
|
||||
? ((arr1)[(idx1)] - (arr2)[(idx2)]) \
|
||||
: ((arr2)[(idx2)] - (arr1)[(idx1)]); \
|
||||
if (delta > epsilon) { \
|
||||
micro_test::reporter->Report( \
|
||||
#arr1 "[%d] (%f) near " #arr2 "[%d] (%f) failed at %s:%d", \
|
||||
static_cast<int>(idx1), static_cast<float>((arr1)[(idx1)]), \
|
||||
static_cast<int>(idx2), static_cast<float>((arr2)[(idx2)]), \
|
||||
__FILE__, __LINE__); \
|
||||
micro_test::did_test_fail = true; \
|
||||
} \
|
||||
} while (false)
|
||||
|
||||
#define TF_LITE_MICRO_EXPECT_NEAR(x, y, epsilon) \
|
||||
do { \
|
||||
auto vx = (x); \
|
||||
auto vy = (y); \
|
||||
auto delta = ((vx) > (vy)) ? ((vx) - (vy)) : ((vy) - (vx)); \
|
||||
if (delta > epsilon) { \
|
||||
micro_test::reporter->Report( \
|
||||
#x " (%f) near " #y " (%f) failed at %s:%d", \
|
||||
static_cast<double>(vx), static_cast<double>(vy), __FILE__, \
|
||||
__LINE__); \
|
||||
micro_test::did_test_fail = true; \
|
||||
} \
|
||||
} while (false)
|
||||
|
||||
#define TF_LITE_MICRO_EXPECT_GT(x, y) \
|
||||
do { \
|
||||
if ((x) <= (y)) { \
|
||||
micro_test::reporter->Report(#x " > " #y " failed at %s:%d", __FILE__, \
|
||||
__LINE__); \
|
||||
micro_test::did_test_fail = true; \
|
||||
} \
|
||||
} while (false)
|
||||
|
||||
#define TF_LITE_MICRO_EXPECT_LT(x, y) \
|
||||
do { \
|
||||
if ((x) >= (y)) { \
|
||||
micro_test::reporter->Report(#x " < " #y " failed at %s:%d", __FILE__, \
|
||||
__LINE__); \
|
||||
micro_test::did_test_fail = true; \
|
||||
} \
|
||||
} while (false)
|
||||
|
||||
#define TF_LITE_MICRO_EXPECT_GE(x, y) \
|
||||
do { \
|
||||
if ((x) < (y)) { \
|
||||
micro_test::reporter->Report(#x " >= " #y " failed at %s:%d", __FILE__, \
|
||||
__LINE__); \
|
||||
micro_test::did_test_fail = true; \
|
||||
} \
|
||||
} while (false)
|
||||
|
||||
#define TF_LITE_MICRO_EXPECT_LE(x, y) \
|
||||
do { \
|
||||
if ((x) > (y)) { \
|
||||
micro_test::reporter->Report(#x " <= " #y " failed at %s:%d", __FILE__, \
|
||||
__LINE__); \
|
||||
micro_test::did_test_fail = true; \
|
||||
} \
|
||||
} while (false)
|
||||
|
||||
#define TF_LITE_MICRO_EXPECT_TRUE(x) \
|
||||
do { \
|
||||
if (!(x)) { \
|
||||
micro_test::reporter->Report(#x " was not true failed at %s:%d", \
|
||||
__FILE__, __LINE__); \
|
||||
micro_test::did_test_fail = true; \
|
||||
} \
|
||||
} while (false)
|
||||
|
||||
#define TF_LITE_MICRO_EXPECT_FALSE(x) \
|
||||
do { \
|
||||
if (x) { \
|
||||
micro_test::reporter->Report(#x " was not false failed at %s:%d", \
|
||||
__FILE__, __LINE__); \
|
||||
micro_test::did_test_fail = true; \
|
||||
} \
|
||||
} while (false)
|
||||
|
||||
#define TF_LITE_MICRO_FAIL(msg) \
|
||||
do { \
|
||||
micro_test::reporter->Report("FAIL: %s", msg, __FILE__, __LINE__); \
|
||||
micro_test::did_test_fail = true; \
|
||||
} while (false)
|
||||
|
||||
#define TF_LITE_MICRO_EXPECT_STRING_EQ(string1, string2) \
|
||||
do { \
|
||||
for (int i = 0; string1[i] != '\0' && string2[i] != '\0'; i++) { \
|
||||
if (string1[i] != string2[i]) { \
|
||||
micro_test::reporter->Report("FAIL: %s did not match %s", string1, \
|
||||
string2, __FILE__, __LINE__); \
|
||||
micro_test::did_test_fail = true; \
|
||||
} \
|
||||
} \
|
||||
} while (false)
|
||||
|
||||
#endif // TENSORFLOW_LITE_MICRO_TESTING_MICRO_TEST_H_
|
File diff suppressed because it is too large
Load Diff
@@ -0,0 +1,23 @@
|
||||
/* Copyright 2020 The TensorFlow Authors. All Rights Reserved.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
==============================================================================*/
|
||||
|
||||
#ifndef TENSORFLOW_LITE_MICRO_TESTING_TEST_CONV_MODEL_H_
|
||||
#define TENSORFLOW_LITE_MICRO_TESTING_TEST_CONV_MODEL_H_
|
||||
|
||||
// See generate_test_models.py for updating the contents of this model:
|
||||
extern const unsigned char kTestConvModelData[];
|
||||
extern const unsigned int kTestConvModelDataSize;
|
||||
|
||||
#endif // TENSORFLOW_LITE_MICRO_TESTING_TEST_CONV_MODEL_H_
|
@@ -0,0 +1,240 @@
|
||||
/* Copyright 2020 The TensorFlow Authors. All Rights Reserved.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
==============================================================================*/
|
||||
|
||||
#include "tensorflow/lite/micro/testing/test_utils.h"
|
||||
|
||||
#include "tensorflow/lite/micro/simple_memory_allocator.h"
|
||||
|
||||
namespace tflite {
|
||||
namespace testing {
|
||||
|
||||
namespace {
|
||||
// TODO(b/141330728): Refactor out of test_utils.cc
|
||||
// The variables below (and the AllocatePersistentBuffer function) are only
|
||||
// needed for the kernel tests and benchmarks, i.e. where we do not have an
|
||||
// interpreter object, and the fully featured MicroAllocator.
|
||||
// Currently, these need to be sufficient for all the kernel_tests. If that
|
||||
// becomes problematic, we can investigate allowing the arena_size to be
|
||||
// specified for each call to PopulatContext.
|
||||
constexpr size_t kArenaSize = 10000;
|
||||
uint8_t raw_arena_[kArenaSize];
|
||||
SimpleMemoryAllocator* simple_memory_allocator_ = nullptr;
|
||||
constexpr size_t kBufferAlignment = 16;
|
||||
|
||||
// We store the pointer to the ith scratch buffer to implement the Request/Get
|
||||
// ScratchBuffer API for the tests. scratch_buffers_[i] will be the ith scratch
|
||||
// buffer and will still be allocated from within raw_arena_.
|
||||
constexpr int kNumScratchBuffers = 5;
|
||||
uint8_t* scratch_buffers_[kNumScratchBuffers];
|
||||
int scratch_buffer_count_ = 0;
|
||||
|
||||
// Note that the context parameter in this function is only needed to match the
|
||||
// signature of TfLiteContext::AllocatePersistentBuffer and isn't needed in the
|
||||
// implementation because we are assuming a single global
|
||||
// simple_memory_allocator_
|
||||
void* AllocatePersistentBuffer(TfLiteContext* context, size_t bytes) {
|
||||
TFLITE_DCHECK(simple_memory_allocator_ != nullptr);
|
||||
return simple_memory_allocator_->AllocateFromTail(bytes, kBufferAlignment);
|
||||
}
|
||||
|
||||
TfLiteStatus RequestScratchBufferInArena(TfLiteContext* context, size_t bytes,
|
||||
int* buffer_index) {
|
||||
TFLITE_DCHECK(simple_memory_allocator_ != nullptr);
|
||||
TFLITE_DCHECK(buffer_index != nullptr);
|
||||
|
||||
if (scratch_buffer_count_ == kNumScratchBuffers) {
|
||||
TF_LITE_REPORT_ERROR(
|
||||
static_cast<ErrorReporter*>(context->impl_),
|
||||
"Exceeded the maximum number of scratch tensors allowed (%d).",
|
||||
kNumScratchBuffers);
|
||||
return kTfLiteError;
|
||||
}
|
||||
|
||||
// For tests, we allocate scratch buffers from the tail and keep them around
|
||||
// for the lifetime of model. This means that the arena size in the tests will
|
||||
// be more than what we would have if the scratch buffers could share memory.
|
||||
scratch_buffers_[scratch_buffer_count_] =
|
||||
simple_memory_allocator_->AllocateFromTail(bytes, kBufferAlignment);
|
||||
TFLITE_DCHECK(scratch_buffers_[scratch_buffer_count_] != nullptr);
|
||||
|
||||
*buffer_index = scratch_buffer_count_++;
|
||||
return kTfLiteOk;
|
||||
}
|
||||
|
||||
void* GetScratchBuffer(TfLiteContext* context, int buffer_index) {
|
||||
TFLITE_DCHECK(scratch_buffer_count_ <= kNumScratchBuffers);
|
||||
if (buffer_index >= scratch_buffer_count_) {
|
||||
return nullptr;
|
||||
}
|
||||
return scratch_buffers_[buffer_index];
|
||||
}
|
||||
|
||||
TfLiteTensor* GetTensor(const struct TfLiteContext* context, int subgraph_idx) {
|
||||
// TODO(b/160894903): Return this value from temp allocated memory.
|
||||
return &context->tensors[subgraph_idx];
|
||||
}
|
||||
|
||||
} // namespace
|
||||
|
||||
uint8_t F2Q(float value, float min, float max) {
|
||||
int32_t result = ZeroPointFromMinMax<uint8_t>(min, max) +
|
||||
(value / ScaleFromMinMax<uint8_t>(min, max)) + 0.5f;
|
||||
if (result < std::numeric_limits<uint8_t>::min()) {
|
||||
result = std::numeric_limits<uint8_t>::min();
|
||||
}
|
||||
if (result > std::numeric_limits<uint8_t>::max()) {
|
||||
result = std::numeric_limits<uint8_t>::max();
|
||||
}
|
||||
return result;
|
||||
}
|
||||
|
||||
// Converts a float value into a signed eight-bit quantized value.
|
||||
int8_t F2QS(float value, float min, float max) {
|
||||
return F2Q(value, min, max) + std::numeric_limits<int8_t>::min();
|
||||
}
|
||||
|
||||
int32_t F2Q32(float value, float scale) {
|
||||
double quantized = static_cast<double>(value / scale);
|
||||
if (quantized > std::numeric_limits<int32_t>::max()) {
|
||||
quantized = std::numeric_limits<int32_t>::max();
|
||||
} else if (quantized < std::numeric_limits<int32_t>::min()) {
|
||||
quantized = std::numeric_limits<int32_t>::min();
|
||||
}
|
||||
return static_cast<int>(quantized);
|
||||
}
|
||||
|
||||
// TODO(b/141330728): Move this method elsewhere as part clean up.
|
||||
void PopulateContext(TfLiteTensor* tensors, int tensors_size,
|
||||
ErrorReporter* error_reporter, TfLiteContext* context) {
|
||||
simple_memory_allocator_ =
|
||||
SimpleMemoryAllocator::Create(error_reporter, raw_arena_, kArenaSize);
|
||||
TFLITE_DCHECK(simple_memory_allocator_ != nullptr);
|
||||
scratch_buffer_count_ = 0;
|
||||
|
||||
context->tensors_size = tensors_size;
|
||||
context->tensors = tensors;
|
||||
context->impl_ = static_cast<void*>(error_reporter);
|
||||
context->GetExecutionPlan = nullptr;
|
||||
context->ResizeTensor = nullptr;
|
||||
context->ReportError = ReportOpError;
|
||||
context->AddTensors = nullptr;
|
||||
context->GetNodeAndRegistration = nullptr;
|
||||
context->ReplaceNodeSubsetsWithDelegateKernels = nullptr;
|
||||
context->recommended_num_threads = 1;
|
||||
context->GetExternalContext = nullptr;
|
||||
context->SetExternalContext = nullptr;
|
||||
|
||||
context->GetTensor = GetTensor;
|
||||
context->GetEvalTensor = nullptr;
|
||||
|
||||
context->AllocatePersistentBuffer = AllocatePersistentBuffer;
|
||||
context->RequestScratchBufferInArena = RequestScratchBufferInArena;
|
||||
context->GetScratchBuffer = GetScratchBuffer;
|
||||
|
||||
for (int i = 0; i < tensors_size; ++i) {
|
||||
if (context->tensors[i].is_variable) {
|
||||
ResetVariableTensor(&context->tensors[i]);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
TfLiteTensor CreateQuantizedTensor(const uint8_t* data, TfLiteIntArray* dims,
|
||||
float min, float max, bool is_variable) {
|
||||
TfLiteTensor result;
|
||||
result.type = kTfLiteUInt8;
|
||||
result.data.uint8 = const_cast<uint8_t*>(data);
|
||||
result.dims = dims;
|
||||
result.params = {ScaleFromMinMax<uint8_t>(min, max),
|
||||
ZeroPointFromMinMax<uint8_t>(min, max)};
|
||||
result.allocation_type = kTfLiteMemNone;
|
||||
result.bytes = ElementCount(*dims) * sizeof(uint8_t);
|
||||
result.is_variable = false;
|
||||
return result;
|
||||
}
|
||||
|
||||
TfLiteTensor CreateQuantizedTensor(const int8_t* data, TfLiteIntArray* dims,
|
||||
float min, float max, bool is_variable) {
|
||||
TfLiteTensor result;
|
||||
result.type = kTfLiteInt8;
|
||||
result.data.int8 = const_cast<int8_t*>(data);
|
||||
result.dims = dims;
|
||||
result.params = {ScaleFromMinMax<int8_t>(min, max),
|
||||
ZeroPointFromMinMax<int8_t>(min, max)};
|
||||
result.allocation_type = kTfLiteMemNone;
|
||||
result.bytes = ElementCount(*dims) * sizeof(int8_t);
|
||||
result.is_variable = is_variable;
|
||||
return result;
|
||||
}
|
||||
|
||||
TfLiteTensor CreateQuantizedTensor(float* data, uint8_t* quantized_data,
|
||||
TfLiteIntArray* dims, bool is_variable) {
|
||||
TfLiteTensor result;
|
||||
SymmetricQuantize(data, dims, quantized_data, &result.params.scale);
|
||||
result.data.uint8 = quantized_data;
|
||||
result.type = kTfLiteUInt8;
|
||||
result.dims = dims;
|
||||
result.params.zero_point = 128;
|
||||
result.allocation_type = kTfLiteMemNone;
|
||||
result.bytes = ElementCount(*dims) * sizeof(uint8_t);
|
||||
result.is_variable = is_variable;
|
||||
return result;
|
||||
}
|
||||
|
||||
TfLiteTensor CreateQuantizedTensor(float* data, int8_t* quantized_data,
|
||||
TfLiteIntArray* dims, bool is_variable) {
|
||||
TfLiteTensor result;
|
||||
SignedSymmetricQuantize(data, dims, quantized_data, &result.params.scale);
|
||||
result.data.int8 = quantized_data;
|
||||
result.type = kTfLiteInt8;
|
||||
result.dims = dims;
|
||||
result.params.zero_point = 0;
|
||||
result.allocation_type = kTfLiteMemNone;
|
||||
result.bytes = ElementCount(*dims) * sizeof(int8_t);
|
||||
result.is_variable = is_variable;
|
||||
return result;
|
||||
}
|
||||
|
||||
TfLiteTensor CreateQuantizedTensor(float* data, int16_t* quantized_data,
|
||||
TfLiteIntArray* dims, bool is_variable) {
|
||||
TfLiteTensor result;
|
||||
SignedSymmetricQuantize(data, dims, quantized_data, &result.params.scale);
|
||||
result.data.i16 = quantized_data;
|
||||
result.type = kTfLiteInt16;
|
||||
result.dims = dims;
|
||||
result.params.zero_point = 0;
|
||||
result.allocation_type = kTfLiteMemNone;
|
||||
result.bytes = ElementCount(*dims) * sizeof(int16_t);
|
||||
result.is_variable = is_variable;
|
||||
return result;
|
||||
}
|
||||
|
||||
TfLiteTensor CreateQuantized32Tensor(const int32_t* data, TfLiteIntArray* dims,
|
||||
float scale, bool is_variable) {
|
||||
TfLiteTensor result;
|
||||
result.type = kTfLiteInt32;
|
||||
result.data.i32 = const_cast<int32_t*>(data);
|
||||
result.dims = dims;
|
||||
// Quantized int32_t tensors always have a zero point of 0, since the range of
|
||||
// int32_t values is large, and because zero point costs extra cycles during
|
||||
// processing.
|
||||
result.params = {scale, 0};
|
||||
result.allocation_type = kTfLiteMemNone;
|
||||
result.bytes = ElementCount(*dims) * sizeof(int32_t);
|
||||
result.is_variable = is_variable;
|
||||
return result;
|
||||
}
|
||||
|
||||
} // namespace testing
|
||||
} // namespace tflite
|
@@ -0,0 +1,116 @@
|
||||
/* Copyright 2018 The TensorFlow Authors. All Rights Reserved.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
==============================================================================*/
|
||||
#ifndef TENSORFLOW_LITE_MICRO_TESTING_TEST_UTILS_H_
|
||||
#define TENSORFLOW_LITE_MICRO_TESTING_TEST_UTILS_H_
|
||||
|
||||
#include <cmath>
|
||||
#include <cstdint>
|
||||
#include <limits>
|
||||
|
||||
#include "tensorflow/lite/c/common.h"
|
||||
#include "tensorflow/lite/core/api/tensor_utils.h"
|
||||
#include "tensorflow/lite/micro/micro_utils.h"
|
||||
#include "tensorflow/lite/micro/test_helpers.h"
|
||||
#include "tensorflow/lite/micro/testing/micro_test.h"
|
||||
|
||||
namespace tflite {
|
||||
namespace testing {
|
||||
|
||||
// Note: These methods are deprecated, do not use. See b/141332970.
|
||||
|
||||
|
||||
// Derives the quantization range max from scaling factor and zero point.
|
||||
template <typename T>
|
||||
inline float MaxFromZeroPointScale(const int zero_point, const float scale) {
|
||||
return (std::numeric_limits<T>::max() - zero_point) * scale;
|
||||
}
|
||||
|
||||
// Derives the quantization range min from scaling factor and zero point.
|
||||
template <typename T>
|
||||
inline float MinFromZeroPointScale(const int zero_point, const float scale) {
|
||||
return (std::numeric_limits<T>::min() - zero_point) * scale;
|
||||
}
|
||||
|
||||
// Derives the quantization scaling factor from a min and max range.
|
||||
template <typename T>
|
||||
inline float ScaleFromMinMax(const float min, const float max) {
|
||||
return (max - min) /
|
||||
static_cast<float>((std::numeric_limits<T>::max() * 1.0) -
|
||||
std::numeric_limits<T>::min());
|
||||
}
|
||||
|
||||
// Derives the quantization zero point from a min and max range.
|
||||
template <typename T>
|
||||
inline int ZeroPointFromMinMax(const float min, const float max) {
|
||||
return static_cast<int>(std::numeric_limits<T>::min()) +
|
||||
static_cast<int>(-min / ScaleFromMinMax<T>(min, max) + 0.5f);
|
||||
}
|
||||
|
||||
// Converts a float value into an unsigned eight-bit quantized value.
|
||||
uint8_t F2Q(float value, float min, float max);
|
||||
|
||||
// Converts a float value into a signed eight-bit quantized value.
|
||||
int8_t F2QS(const float value, const float min, const float max);
|
||||
|
||||
// Converts a float value into a signed thirty-two-bit quantized value. Note
|
||||
// that values close to max int and min int may see significant error due to
|
||||
// a lack of floating point granularity for large values.
|
||||
int32_t F2Q32(const float value, const float scale);
|
||||
|
||||
// TODO(b/141330728): Move this method elsewhere as part clean up.
|
||||
void PopulateContext(TfLiteTensor* tensors, int tensors_size,
|
||||
ErrorReporter* error_reporter, TfLiteContext* context);
|
||||
|
||||
TfLiteTensor CreateQuantizedTensor(const uint8_t* data, TfLiteIntArray* dims,
|
||||
float min, float max,
|
||||
bool is_variable = false);
|
||||
|
||||
TfLiteTensor CreateQuantizedTensor(const int8_t* data, TfLiteIntArray* dims,
|
||||
float min, float max,
|
||||
bool is_variable = false);
|
||||
|
||||
TfLiteTensor CreateQuantizedTensor(float* data, uint8_t* quantized_data,
|
||||
TfLiteIntArray* dims,
|
||||
bool is_variable = false);
|
||||
|
||||
TfLiteTensor CreateQuantizedTensor(float* data, int8_t* quantized_data,
|
||||
TfLiteIntArray* dims,
|
||||
bool is_variable = false);
|
||||
|
||||
TfLiteTensor CreateQuantizedTensor(float* data, int16_t* quantized_data,
|
||||
TfLiteIntArray* dims,
|
||||
bool is_variable = false);
|
||||
|
||||
TfLiteTensor CreateQuantized32Tensor(const int32_t* data, TfLiteIntArray* dims,
|
||||
float scale, bool is_variable = false);
|
||||
|
||||
template <typename input_type = int32_t,
|
||||
TfLiteType tensor_input_type = kTfLiteInt32>
|
||||
inline TfLiteTensor CreateTensor(const input_type* data, TfLiteIntArray* dims,
|
||||
bool is_variable = false) {
|
||||
TfLiteTensor result;
|
||||
result.type = tensor_input_type;
|
||||
result.data.raw = reinterpret_cast<char*>(const_cast<input_type*>(data));
|
||||
result.dims = dims;
|
||||
result.allocation_type = kTfLiteMemNone;
|
||||
result.bytes = ElementCount(*dims) * sizeof(input_type);
|
||||
result.is_variable = is_variable;
|
||||
return result;
|
||||
}
|
||||
|
||||
} // namespace testing
|
||||
} // namespace tflite
|
||||
|
||||
#endif // TENSORFLOW_LITE_MICRO_TESTING_TEST_UTILS_H_
|
Reference in New Issue
Block a user