| 123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122 |
- /*
- * main_functions.cc
- *
- * Created on: Feb 27, 2022
- * Author: pgj
- */
- #include "ImC/ai.h"
- #include "tensorflow/lite/micro/all_ops_resolver.h"
- #include "tensorflow/lite/micro/examples/hello_world/constants.h"
- #include "tensorflow/lite/micro/examples/hello_world/output_handler.h"
- #include "tensorflow/lite/micro/micro_error_reporter.h"
- #include "tensorflow/lite/micro/micro_interpreter.h"
- #include "tensorflow/lite/micro/recording_micro_interpreter.h"
- #include "tensorflow/lite/micro/system_setup.h"
- #include "tensorflow/lite/schema/schema_generated.h"
- #include "stm32l496xx.h"
- #include "ImC/new_model.h"
- namespace {
- tflite::ErrorReporter* error_reporter = nullptr;
- const tflite::Model* model = nullptr;
- tflite::MicroInterpreter* interpreter = nullptr;
- TfLiteTensor* input = nullptr;
- TfLiteTensor* output_tensor = nullptr;
- }
- uint8_t* setup(uint8_t* tensor_arena, int kTensorArenaSize, float *scale, int32_t* zero_point) {
- tflite::InitializeTarget();
- model = tflite::GetModel(resnet_quant_tflite);
- static tflite::MicroErrorReporter micro_error_reporter;
- error_reporter = µ_error_reporter;
- static tflite::AllOpsResolver resolver;
- static tflite::MicroInterpreter static_interpreter( model, resolver, tensor_arena, kTensorArenaSize, error_reporter);
- interpreter = &static_interpreter;
- TfLiteStatus allocate_status = interpreter->AllocateTensors();
- if (allocate_status != kTfLiteOk) {
- TF_LITE_REPORT_ERROR(error_reporter, "AllocateTensors() failed");
- return NULL;
- }
- input = interpreter->input(0);
- output_tensor = interpreter->output(0);
- *scale = interpreter->input(0)->params.scale;
- *zero_point = interpreter->input(0)->params.zero_point;
- return input->data.uint8;
- }
- extern uint8_t* dst_image[120][160][3];
- /*void preprocess_input(){
- printf("[AI ] input size: %d bytes \r\n", input->bytes);
- uint32_t index = 0;
- float temp = 0.0;
- for(uint32_t i = 0; i < 120; ++i){
- for(uint32_t j = 0; j < 160; ++j){
- for(uint32_t k = 0; k < 3; k++){
- temp = (dst_image[i][j][k] / 255.0) / interpreter->input(0)->params.scale + interpreter->input(0)->params.zero_point;
- input->data.int8[index++] = (int8_t) ( temp);
- }
- }
- }
- }
- */
- void print_output(){
- uint32_t max_index = 0;
- float max_value = output_tensor->data.f[0];
- // output_tensor->bytes << int , bytes/4 << float
- // printf("Output size is %d\r\n", output_tensor->bytes);
- for(uint32_t i = 0; i < (uint32_t)(output_tensor->bytes/4.0); ++i){
- if(output_tensor->data.f[i] > max_value){
- max_value = output_tensor->data.f[i];
- max_index = i;
- }
- // printf("[AI ] class %d: %f\r\n", (uint8_t)i, output_tensor->data.f[i]);
- }
- if(max_index >= 10 || max_index < 0){
- max_index = -1;
- }
- switch (max_index) {
- case 1:
- printf("[AI ] OK Person, %f\r\n", output_tensor->data.f[1]);
- break;
- default:
- printf("[AI ] OK NOT person, %f\r\n", output_tensor->data.f[1]);
- break;
- }
- }
- int perform_inference() {
- TfLiteStatus invoke_status = interpreter->Invoke();
- if (invoke_status != kTfLiteOk) {
- TF_LITE_REPORT_ERROR(error_reporter, "[AI ] FAILED invoke\n");
- return -1;
- }
- return 0;
- }
|