删除m55库,完善md文档

This commit is contained in:
QingChuanWS
2021-01-27 01:01:53 +08:00
20 changed files with 23826 additions and 27493 deletions

File diff suppressed because one or more lines are too long

File diff suppressed because one or more lines are too long

View File

@@ -2,6 +2,18 @@
#define _OV2640_H
#include "sys.h"
#include "sccb.h"
//////////////////////////////////////////////////////////////////////////////////
//本程序只供学习使用,未经作者许可,不得用于其它任何用途
//ALIENTEK STM32F407开发板
//OV2640 驱动代码
//正点原子@ALIENTEK
//技术论坛:www.openedv.com
//创建日期:2014/5/14
//版本V1.0
//版权所有,盗版必究。
//Copyright(C) 广州市星翼电子科技有限公司 2014-2024
//All rights reserved
//////////////////////////////////////////////////////////////////////////////////
/*
* picture size
@@ -9,8 +21,8 @@
#define OV2640_PIXEL_WIDTH ((uint16_t)96)
#define OV2640_PIXEL_HEIGHT ((uint16_t)96)
//#define OV2640_PWDN PGout(9) //POWER DOWN<EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD>ź<EFBFBD>
//#define OV2640_RST PGout(15) //<EFBFBD><EFBFBD>λ<EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD>ź<EFBFBD>
//#define OV2640_PWDN PGout(9) //POWER DOWN控制信号
//#define OV2640_RST PGout(15) //复位控制信号
void OV2640_PWDN(uint8_t signal);
void OV2640_RST(uint8_t signal);
//////////////////////////////////////////////////////////////////////////////////
@@ -18,7 +30,7 @@ void OV2640_RST(uint8_t signal);
#define OV2640_PID 0X2642
//<EFBFBD><EFBFBD>ѡ<EFBFBD><EFBFBD>DSP<EFBFBD><EFBFBD>ַ(0XFF=0X00)ʱ,OV2640<EFBFBD><EFBFBD>DSP<EFBFBD>Ĵ<EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD>ַӳ<EFBFBD><EFBFBD><EFBFBD><EFBFBD>
//当选择DSP地址(0XFF=0X00),OV2640DSP寄存器地址映射表
#define OV2640_DSP_R_BYPASS 0x05
#define OV2640_DSP_Qs 0x44
#define OV2640_DSP_CTRL 0x50
@@ -54,7 +66,7 @@ void OV2640_RST(uint8_t signal);
#define OV2640_DSP_P_STATUS 0xFE
#define OV2640_DSP_RA_DLMT 0xFF
//<EFBFBD><EFBFBD>ѡ<EFBFBD>񴫸<EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD>ַ(0XFF=0X01)ʱ,OV2640<EFBFBD><EFBFBD>DSP<EFBFBD>Ĵ<EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD>ַӳ<EFBFBD><EFBFBD><EFBFBD><EFBFBD>
//当选择传感器地址(0XFF=0X01),OV2640DSP寄存器地址映射表
#define OV2640_SENSOR_GAIN 0x00
#define OV2640_SENSOR_COM1 0x03
#define OV2640_SENSOR_REG04 0x04

View File

@@ -2,17 +2,26 @@
#define __SCCB_H
#include "sys.h"
#include "gpio.h"
//////////////////////////////////////////////////////////////////////////////////
//本程序参考自网友guanfu_wang代码。
//ALIENTEK STM32F103开发板
//SCCB 驱动代码
//正点原子@ALIENTEK
//技术论坛:www.openedv.com
//创建日期:2015/4/16
//版本V1.0
//////////////////////////////////////////////////////////////////////////////////
//IO<EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD>
#define SCCB_SDA_IN() {GPIOB->MODER&=~(3<<(5*2));GPIOB->MODER|=0<<5*2;} //PD7 <EFBFBD><EFBFBD><EFBFBD><EFBFBD>
#define SCCB_SDA_OUT() {GPIOB->MODER&=~(3<<(5*2));GPIOB->MODER|=1<<5*2;} //PD7 <EFBFBD><EFBFBD><EFBFBD><EFBFBD>
#define SCCB_ID 0X60 //OV2640<EFBFBD><EFBFBD>ID
//IO操作函数
#define SCCB_SDA_IN() {GPIOB->MODER&=~(3<<(5*2));GPIOB->MODER|=0<<5*2;} //PD7 输入
#define SCCB_SDA_OUT() {GPIOB->MODER&=~(3<<(5*2));GPIOB->MODER|=1<<5*2;} //PD7 输出
#define SCCB_ID 0X60 //OV2640ID
//IO<EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD>
#define SCCB_SDA_IN() {GPIOB->MODER&=~(3<<(5*2));GPIOB->MODER|=0<<5*2;} //PD7 <EFBFBD><EFBFBD><EFBFBD><EFBFBD>
#define SCCB_SDA_OUT() {GPIOB->MODER&=~(3<<(5*2));GPIOB->MODER|=1<<5*2;} //PD7 <EFBFBD><EFBFBD><EFBFBD><EFBFBD>
//IO方向设置
#define SCCB_SDA_IN() {GPIOB->MODER&=~(3<<(5*2));GPIOB->MODER|=0<<5*2;} //PD7 输入
#define SCCB_SDA_OUT() {GPIOB->MODER&=~(3<<(5*2));GPIOB->MODER|=1<<5*2;} //PD7 输出
#define SCCB_ID 0X60 //OV2640<EFBFBD><EFBFBD>ID
#define SCCB_ID 0X60 //OV2640ID
void SCCB_Init(void);
void SCCB_Start(void);

View File

@@ -1,3 +1,32 @@
/*****************************************************************************
* | File : LCD_2IN4_Driver.c
* | Author : Waveshare team
* | Function : LCD driver
* | Info :
*----------------
* | This version: V1.0
* | Date : 2020-07-29
* | Info :
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documnetation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS OR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
#
******************************************************************************/
#include "lcd_2inch4.h"
#include <string.h>
/*******************************************************************************
@@ -50,8 +79,8 @@ void LCD_2IN4_Init(void)
{
LCD_2IN4_Reset();
LCD_2IN4_SetBackLight(500);//<2F>򿪱<EFBFBD><F2BFAAB1><EFBFBD>
HAL_Delay(100);
LCD_2IN4_SetBackLight(500);
HAL_Delay(100);
//************* Start Initial Sequence **********//
LCD_2IN4_Write_Command(0x11); //Sleep out

View File

@@ -6,11 +6,22 @@
#include "usart.h"
#include "sccb.h"
#include "stdio.h"
//<2F><>ʼ<EFBFBD><CABC>OV2640
//<EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD>Ժ<EFBFBD><><C4AC><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD>1600*1200<30>ߴ<EFBFBD><DFB4><EFBFBD>ͼƬ!!
//<EFBFBD><EFBFBD><EFBFBD><EFBFBD>ֵ:0,<2C>ɹ<EFBFBD>
// <20><><EFBFBD><EFBFBD>,<2C><><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD>
//////////////////////////////////////////////////////////////////////////////////
//本程序只供学习使用,未经作者许可,不得用于其它任何用途
//ALIENTEK STM32F103开发板
//OV2640 驱动代码
//正点原子@ALIENTEK
//技术论坛:www.openedv.com
//创建日期:2015/4/16
//版本V1.0
//版权所有,盗版必究。
//Copyright(C) 广州市星翼电子科技有限公司 2014-2024
//All rights reserved
//////////////////////////////////////////////////////////////////////////////////
//初始化OV2640
//配置完以后,默认输出是1600*1200尺寸的图片!!
//返回值:0,成功
// 其他,错误代码
void OV2640_PWDN(uint8_t signal)
{
HAL_GPIO_WritePin(GPIOB, GPIO_PIN_15, (GPIO_PinState)signal);
@@ -25,38 +36,38 @@ uint8_t OV2640_Init(void)
{
uint16_t i=0;
uint16_t reg;
//<EFBFBD><EFBFBD><EFBFBD><EFBFBD>IO
//设置IO
GPIO_InitTypeDef GPIO_InitStructure;
__HAL_RCC_GPIOG_CLK_ENABLE(); //ʹ<EFBFBD><EFBFBD>GPIOBʱ<EFBFBD><EFBFBD>
__HAL_RCC_GPIOG_CLK_ENABLE(); //使能GPIOB时钟
HAL_GPIO_WritePin(GPIOB, GPIO_PIN_13|GPIO_PIN_15, GPIO_PIN_RESET);
//GPIOF9,F10<EFBFBD><EFBFBD>ʼ<EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD>
//GPIOF9,F10初始化设置
GPIO_InitStructure.Pin = GPIO_PIN_13|GPIO_PIN_15;
GPIO_InitStructure.Mode = GPIO_MODE_OUTPUT_PP;
GPIO_InitStructure.Speed = GPIO_SPEED_FAST;
GPIO_InitStructure.Pull = GPIO_PULLUP;//<EFBFBD><EFBFBD><EFBFBD><EFBFBD>
HAL_GPIO_Init(GPIOB, &GPIO_InitStructure);//<EFBFBD><EFBFBD>ʼ<EFBFBD>
GPIO_InitStructure.Pull = GPIO_PULLUP;//上拉
HAL_GPIO_Init(GPIOB, &GPIO_InitStructure);//初始<EFBFBD>
OV2640_PWDN(0); //POWER ON
delay_ms(10);
OV2640_RST(0); //<EFBFBD><EFBFBD>λOV2640
OV2640_RST(0); //复位OV2640
delay_ms(10);
OV2640_RST(1); //<EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD>λ
SCCB_Init(); //<EFBFBD><EFBFBD>ʼ<EFBFBD><EFBFBD>SCCB <20><>IO<49><4F>
SCCB_WR_Reg(OV2640_DSP_RA_DLMT, 0x01); //<EFBFBD><EFBFBD><EFBFBD><EFBFBD>sensor<EFBFBD>Ĵ<EFBFBD><EFBFBD><EFBFBD>
SCCB_WR_Reg(OV2640_SENSOR_COM7, 0x80); //<EFBFBD><EFBFBD><EFBFBD><EFBFBD>λOV2640
OV2640_RST(1); //结束复位
SCCB_Init(); //初始化SCCB 的IO口
SCCB_WR_Reg(OV2640_DSP_RA_DLMT, 0x01); //操作sensor寄存器
SCCB_WR_Reg(OV2640_SENSOR_COM7, 0x80); //软复位OV2640
delay_ms(50);
reg=SCCB_RD_Reg(OV2640_SENSOR_MIDH); //<EFBFBD><EFBFBD>ȡ<EFBFBD><EFBFBD><EFBFBD><EFBFBD>ID <20>߰<EFBFBD>λ
reg=SCCB_RD_Reg(OV2640_SENSOR_MIDH); //读取厂家ID 高八位
reg<<=8;
reg|=SCCB_RD_Reg(OV2640_SENSOR_MIDL); //<EFBFBD><EFBFBD>ȡ<EFBFBD><EFBFBD><EFBFBD><EFBFBD>ID <20>Ͱ<EFBFBD>λ
reg|=SCCB_RD_Reg(OV2640_SENSOR_MIDL); //读取厂家ID 低八位
printf("OV2640_MID = %#X\n" , reg);
if(reg!=OV2640_MID)
{
printf("MID:%d\r\n",reg);
return 1;
}
reg=SCCB_RD_Reg(OV2640_SENSOR_PIDH); //<EFBFBD><EFBFBD>ȡ<EFBFBD><EFBFBD><EFBFBD><EFBFBD>ID <20>߰<EFBFBD>λ
reg=SCCB_RD_Reg(OV2640_SENSOR_PIDH); //读取厂家ID 高八位
reg<<=8;
reg|=SCCB_RD_Reg(OV2640_SENSOR_PIDL); //<EFBFBD><EFBFBD>ȡ<EFBFBD><EFBFBD><EFBFBD><EFBFBD>ID <20>Ͱ<EFBFBD>λ
reg|=SCCB_RD_Reg(OV2640_SENSOR_PIDL); //读取厂家ID 低八位
if(reg!=OV2640_PID)
{
printf("HID:%d\r\n",reg);
@@ -69,34 +80,34 @@ uint8_t OV2640_Init(void)
printf("OV2640_init SUCCESS\n");
return 0x00; //ok
}
//OV2640<EFBFBD>л<EFBFBD>ΪJPEGģʽ
//OV2640切换为JPEG模式
void OV2640_JPEG_Mode(void)
{
uint16_t i=0;
//<EFBFBD><EFBFBD><EFBFBD><EFBFBD>:YUV422<EFBFBD><EFBFBD>ʽ
//设置:YUV422格式
for(i=0;i<(sizeof(ov2640_yuv422_reg_tbl)/2);i++)
{
SCCB_WR_Reg(ov2640_yuv422_reg_tbl[i][0],ov2640_yuv422_reg_tbl[i][1]);
}
//<EFBFBD><EFBFBD><EFBFBD><EFBFBD>:<3A><><EFBFBD><EFBFBD>JPEG<45><47><EFBFBD><EFBFBD>
//设置:输出JPEG数据
for(i=0;i<(sizeof(ov2640_jpeg_reg_tbl)/2);i++)
{
SCCB_WR_Reg(ov2640_jpeg_reg_tbl[i][0],ov2640_jpeg_reg_tbl[i][1]);
}
}
//OV2640<EFBFBD>л<EFBFBD>ΪRGB565ģʽ
//OV2640切换为RGB565模式
void OV2640_RGB565_Mode(void)
{
uint16_t i=0;
//<EFBFBD><EFBFBD><EFBFBD><EFBFBD>:RGB565<EFBFBD><EFBFBD><EFBFBD><EFBFBD>
//设置:RGB565输出
for(i=0;i<(sizeof(ov2640_rgb565_reg_tbl)/2);i++)
{
SCCB_WR_Reg(ov2640_rgb565_reg_tbl[i][0],ov2640_rgb565_reg_tbl[i][1]);
}
printf("OV2640_RGB565 SET!\n");
}
//<EFBFBD>Զ<EFBFBD><EFBFBD>ع<EFBFBD><EFBFBD><EFBFBD><EFBFBD>ò<EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><><D6A7>5<EFBFBD><35><EFBFBD>ȼ<EFBFBD>
//自动曝光设置参数表,支持5个等级
const static uint8_t OV2640_AUTOEXPOSURE_LEVEL[5][8]=
{
{
@@ -130,7 +141,7 @@ const static uint8_t OV2640_AUTOEXPOSURE_LEVEL[5][8]=
0x26,0x92,
},
};
//OV2640<EFBFBD>Զ<EFBFBD><EFBFBD>ع<EFBFBD><EFBFBD>ȼ<EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD>
//OV2640自动曝光等级设置
//level:0~4
void OV2640_Auto_Exposure(uint8_t level)
{
@@ -141,12 +152,12 @@ void OV2640_Auto_Exposure(uint8_t level)
SCCB_WR_Reg(p[i*2],p[i*2+1]);
}
}
//<EFBFBD><EFBFBD>ƽ<EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD>
//0:<EFBFBD>Զ<EFBFBD>
//1:̫<EFBFBD><EFBFBD>sunny
//2,<EFBFBD><EFBFBD><EFBFBD><EFBFBD>cloudy
//3,<EFBFBD><EFBFBD><EFBFBD>office
//4,<EFBFBD><EFBFBD><EFBFBD><EFBFBD>home
//白平衡设置
//0:自动
//1:太阳sunny
//2,阴天cloudy
//3,办公室office
//4,家里home
void OV2640_Light_Mode(uint8_t mode)
{
uint8_t regccval=0X5E;//Sunny
@@ -180,7 +191,7 @@ void OV2640_Light_Mode(uint8_t mode)
SCCB_WR_Reg(0XCD,regcdval);
SCCB_WR_Reg(0XCE,regceval);
}
//ɫ<EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD>
//色度设置
//0:-2
//1:-1
//2,0
@@ -196,7 +207,7 @@ void OV2640_Color_Saturation(uint8_t sat)
SCCB_WR_Reg(0X7D,reg7dval);
SCCB_WR_Reg(0X7D,reg7dval);
}
//<EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD>
//亮度设置
//0:(0X00)-2
//1:(0X10)-1
//2,(0X20) 0
@@ -211,7 +222,7 @@ void OV2640_Brightness(uint8_t bright)
SCCB_WR_Reg(0x7d, bright<<4);
SCCB_WR_Reg(0x7d, 0x00);
}
//<EFBFBD>Աȶ<EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD>
//对比度设置
//0:-2
//1:-1
//2,0
@@ -219,7 +230,7 @@ void OV2640_Brightness(uint8_t bright)
//4,+2
void OV2640_Contrast(uint8_t contrast)
{
uint8_t reg7d0val=0X20;//Ĭ<EFBFBD><EFBFBD>Ϊ<EFBFBD><EFBFBD>ͨģʽ
uint8_t reg7d0val=0X20;//默认为普通模式
uint8_t reg7d1val=0X20;
switch(contrast)
{
@@ -249,43 +260,43 @@ void OV2640_Contrast(uint8_t contrast)
SCCB_WR_Reg(0x7d,reg7d1val);
SCCB_WR_Reg(0x7d,0x06);
}
//<EFBFBD><EFBFBD>Ч<EFBFBD><EFBFBD><EFBFBD><EFBFBD>
//0:<EFBFBD><EFBFBD>ͨģʽ
//1,<EFBFBD><EFBFBD>Ƭ
//2,<EFBFBD>ڰ<EFBFBD>
//3,ƫ<EFBFBD><EFBFBD>ɫ
//4,ƫ<EFBFBD><EFBFBD>ɫ
//5,ƫ<EFBFBD><EFBFBD>ɫ
//6,<EFBFBD><EFBFBD><EFBFBD><EFBFBD>
//特效设置
//0:普通模式
//1,负片
//2,黑白
//3,偏红色
//4,偏绿色
//5,偏蓝色
//6,复古
void OV2640_Special_Effects(uint8_t eft)
{
uint8_t reg7d0val=0X00;//Ĭ<EFBFBD><EFBFBD>Ϊ<EFBFBD><EFBFBD>ͨģʽ
uint8_t reg7d0val=0X00;//默认为普通模式
uint8_t reg7d1val=0X80;
uint8_t reg7d2val=0X80;
switch(eft)
{
case 1://<EFBFBD><EFBFBD>Ƭ
case 1://负片
reg7d0val=0X40;
break;
case 2://<EFBFBD>ڰ<EFBFBD>
case 2://黑白
reg7d0val=0X18;
break;
case 3://ƫ<EFBFBD><EFBFBD>ɫ
case 3://偏红色
reg7d0val=0X18;
reg7d1val=0X40;
reg7d2val=0XC0;
break;
case 4://ƫ<EFBFBD><EFBFBD>ɫ
case 4://偏绿色
reg7d0val=0X18;
reg7d1val=0X40;
reg7d2val=0X40;
break;
case 5://ƫ<EFBFBD><EFBFBD>ɫ
case 5://偏蓝色
reg7d0val=0X18;
reg7d1val=0XA0;
reg7d2val=0X40;
break;
case 6://<EFBFBD><EFBFBD><EFBFBD><EFBFBD>
case 6://复古
reg7d0val=0X18;
reg7d1val=0X40;
reg7d2val=0XA6;
@@ -298,9 +309,9 @@ void OV2640_Special_Effects(uint8_t eft)
SCCB_WR_Reg(0x7d,reg7d1val);
SCCB_WR_Reg(0x7d,reg7d2val);
}
//<EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD>
//sw:0,<EFBFBD>رղ<EFBFBD><EFBFBD><EFBFBD>
// 1,<EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><><D7A2>OV2640<34>IJ<EFBFBD><C4B2><EFBFBD><EFBFBD>ǵ<EFBFBD><C7B5><EFBFBD><EFBFBD><EFBFBD>ͼ<EFBFBD><CDBC><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD>)
//彩条测试
//sw:0,关闭彩条
// 1,开启彩条(注意OV2640的彩条是叠加在图像上面的)
void OV2640_Color_Bar(uint8_t sw)
{
uint8_t reg;
@@ -310,9 +321,9 @@ void OV2640_Color_Bar(uint8_t sw)
if(sw)reg|=1<<1;
SCCB_WR_Reg(0X12,reg);
}
//<EFBFBD><EFBFBD><EFBFBD><EFBFBD>ͼ<EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD>
//sx,sy,<EFBFBD><EFBFBD>ʼ<EFBFBD><EFBFBD>ַ
//width,height:<EFBFBD><EFBFBD><EFBFBD><EFBFBD>(<28><>Ӧ:horizontal)<EFBFBD>͸߶<EFBFBD>(<28><>Ӧ:vertical)
//设置图像输出窗口
//sx,sy,起始地址
//width,height:宽度(对应:horizontal)和高度(对应:vertical)
void OV2640_Window_Set(uint16_t sx,uint16_t sy,uint16_t width,uint16_t height)
{
uint16_t endx;
@@ -322,25 +333,25 @@ void OV2640_Window_Set(uint16_t sx,uint16_t sy,uint16_t width,uint16_t height)
endy=sy+height/2;
SCCB_WR_Reg(0XFF,0X01);
temp=SCCB_RD_Reg(0X03); //<EFBFBD><EFBFBD>ȡVref֮ǰ<EFBFBD><EFBFBD>ֵ
temp=SCCB_RD_Reg(0X03); //读取Vref之前的值
temp&=0XF0;
temp|=((endy&0X03)<<2)|(sy&0X03);
SCCB_WR_Reg(0X03,temp); //<EFBFBD><EFBFBD><EFBFBD><EFBFBD>Vref<EFBFBD><EFBFBD>start<EFBFBD><EFBFBD>end<EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD>
SCCB_WR_Reg(0X19,sy>>2); //<EFBFBD><EFBFBD><EFBFBD><EFBFBD>Vref<EFBFBD><EFBFBD>start<EFBFBD><EFBFBD>
SCCB_WR_Reg(0X1A,endy>>2); //<EFBFBD><EFBFBD><EFBFBD><EFBFBD>Vref<EFBFBD><EFBFBD>end<EFBFBD>ĸ<EFBFBD>
SCCB_WR_Reg(0X03,temp); //设置Vrefstartend的最低2位
SCCB_WR_Reg(0X19,sy>>2); //设置Vrefstart高8位
SCCB_WR_Reg(0X1A,endy>>2); //设置Vrefend的高8位
temp=SCCB_RD_Reg(0X32); //<EFBFBD><EFBFBD>ȡHref֮ǰ<EFBFBD><EFBFBD>ֵ
temp=SCCB_RD_Reg(0X32); //读取Href之前的值
temp&=0XC0;
temp|=((endx&0X07)<<3)|(sx&0X07);
SCCB_WR_Reg(0X32,temp); //<EFBFBD><EFBFBD><EFBFBD><EFBFBD>Href<EFBFBD><EFBFBD>start<EFBFBD><EFBFBD>end<EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD>
SCCB_WR_Reg(0X17,sx>>3); //<EFBFBD><EFBFBD><EFBFBD><EFBFBD>Href<EFBFBD><EFBFBD>start<EFBFBD><EFBFBD>
SCCB_WR_Reg(0X18,endx>>3); //<EFBFBD><EFBFBD><EFBFBD><EFBFBD>Href<EFBFBD><EFBFBD>end<EFBFBD>ĸ<EFBFBD>
SCCB_WR_Reg(0X32,temp); //设置Hrefstartend的最低3位
SCCB_WR_Reg(0X17,sx>>3); //设置Hrefstart高8位
SCCB_WR_Reg(0X18,endx>>3); //设置Hrefend的高8位
}
//<EFBFBD><EFBFBD><EFBFBD><EFBFBD>ͼ<EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD>С
//OV2640<EFBFBD><EFBFBD><EFBFBD><EFBFBD>ͼ<EFBFBD><EFBFBD><EFBFBD>Ĵ<EFBFBD>С(<28>ֱ<EFBFBD><D6B1><EFBFBD>),<2C><>ȫ<EFBFBD>ɸĺ<C9B8><C4BA><EFBFBD>ȷ<EFBFBD><C8B7>
//width,height:<EFBFBD><EFBFBD><EFBFBD><EFBFBD>(<28><>Ӧ:horizontal)<EFBFBD>͸߶<EFBFBD>(<28><>Ӧ:vertical),width<EFBFBD><EFBFBD>height<EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD>4<EFBFBD>ı<EFBFBD><EFBFBD><EFBFBD>
//<EFBFBD><EFBFBD><EFBFBD><EFBFBD>ֵ:0,<2C><><EFBFBD>óɹ<C3B3>
// <EFBFBD><EFBFBD><EFBFBD><EFBFBD>,<2C><><EFBFBD><EFBFBD>ʧ<EFBFBD><CAA7>
//设置图像输出大小
//OV2640输出图像的大小(分辨率),完全由改函数确定
//width,height:宽度(对应:horizontal)和高度(对应:vertical),widthheight必须是4的倍数
//返回值:0,设置成功
// 其他,设置失败
uint8_t OV2640_OutSize_Set(uint16_t width,uint16_t height)
{
uint16_t outh;
@@ -352,23 +363,23 @@ uint8_t OV2640_OutSize_Set(uint16_t width,uint16_t height)
outh=height/4;
SCCB_WR_Reg(0XFF,0X00);
SCCB_WR_Reg(0XE0,0X04);
SCCB_WR_Reg(0X5A,outw&0XFF); //<EFBFBD><EFBFBD><EFBFBD><EFBFBD>OUTW<EFBFBD>ĵͰ<EFBFBD>λ
SCCB_WR_Reg(0X5B,outh&0XFF); //<EFBFBD><EFBFBD><EFBFBD><EFBFBD>OUTH<EFBFBD>ĵͰ<EFBFBD>λ
SCCB_WR_Reg(0X5A,outw&0XFF); //设置OUTW的低八位
SCCB_WR_Reg(0X5B,outh&0XFF); //设置OUTH的低八位
temp=(outw>>8)&0X03;
temp|=(outh>>6)&0X04;
SCCB_WR_Reg(0X5C,temp); //<EFBFBD><EFBFBD><EFBFBD><EFBFBD>OUTH/OUTW<EFBFBD>ĸ<EFBFBD>λ
SCCB_WR_Reg(0X5C,temp); //设置OUTH/OUTW的高位
SCCB_WR_Reg(0XE0,0X00);
return 0;
}
//<EFBFBD><EFBFBD><EFBFBD><EFBFBD>ͼ<EFBFBD>񿪴<EFBFBD><EFBFBD><EFBFBD>С
//<EFBFBD><EFBFBD>:OV2640_ImageSize_Setȷ<EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD>ֱ<EFBFBD><EFBFBD>ʴӴ<EFBFBD>С.
//<EFBFBD>ú<EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD>Χ<EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD>п<EFBFBD><EFBFBD><EFBFBD>,<2C><><EFBFBD><EFBFBD>OV2640_OutSize_Set<EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD>
//ע<EFBFBD><EFBFBD>:<3A><><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD>Ŀ<EFBFBD><C4BF>Ⱥ͸߶<CDB8>,<2C><><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD>ڵ<EFBFBD><DAB5><EFBFBD>OV2640_OutSize_Set<EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD>Ŀ<EFBFBD><EFBFBD>Ⱥ͸߶<EFBFBD>
// OV2640_OutSize_Set<EFBFBD><EFBFBD><EFBFBD>õĿ<EFBFBD><EFBFBD>Ⱥ͸߶<EFBFBD>,<2C><><EFBFBD>ݱ<EFBFBD><DDB1><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD>õĿ<C3B5><C4BF>Ⱥ͸߶<CDB8>,<2C><>DSP
// <EFBFBD>Զ<EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD>ű<EFBFBD><EFBFBD><EFBFBD>,<2C><><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD>ⲿ<EFBFBD>.
//width,height:<EFBFBD><EFBFBD><EFBFBD><EFBFBD>(<28><>Ӧ:horizontal)<EFBFBD>͸߶<EFBFBD>(<28><>Ӧ:vertical),width<EFBFBD><EFBFBD>height<EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD>4<EFBFBD>ı<EFBFBD><EFBFBD><EFBFBD>
//<EFBFBD><EFBFBD><EFBFBD><EFBFBD>ֵ:0,<2C><><EFBFBD>óɹ<C3B3>
// <EFBFBD><EFBFBD><EFBFBD><EFBFBD>,<2C><><EFBFBD><EFBFBD>ʧ<EFBFBD><CAA7>
//设置图像开窗大小
//:OV2640_ImageSize_Set确定传感器输出分辨率从大小.
//该函数则在这个范围上面进行开窗,用于OV2640_OutSize_Set的输出
//注意:本函数的宽度和高度,必须大于等于OV2640_OutSize_Set函数的宽度和高度
// OV2640_OutSize_Set设置的宽度和高度,根据本函数设置的宽度和高度,由DSP
// 自动计算缩放比例,输出给外部设备.
//width,height:宽度(对应:horizontal)和高度(对应:vertical),widthheight必须是4的倍数
//返回值:0,设置成功
// 其他,设置失败
uint8_t OV2640_ImageWin_Set(uint16_t offx,uint16_t offy,uint16_t width,uint16_t height)
{
uint16_t hsize;
@@ -380,31 +391,31 @@ uint8_t OV2640_ImageWin_Set(uint16_t offx,uint16_t offy,uint16_t width,uint16_t
vsize=height/4;
SCCB_WR_Reg(0XFF,0X00);
SCCB_WR_Reg(0XE0,0X04);
SCCB_WR_Reg(0X51,hsize&0XFF); //<EFBFBD><EFBFBD><EFBFBD><EFBFBD>H_SIZE<EFBFBD>ĵͰ<EFBFBD>λ
SCCB_WR_Reg(0X52,vsize&0XFF); //<EFBFBD><EFBFBD><EFBFBD><EFBFBD>V_SIZE<EFBFBD>ĵͰ<EFBFBD>λ
SCCB_WR_Reg(0X53,offx&0XFF); //<EFBFBD><EFBFBD><EFBFBD><EFBFBD>offx<EFBFBD>ĵͰ<EFBFBD>λ
SCCB_WR_Reg(0X54,offy&0XFF); //<EFBFBD><EFBFBD><EFBFBD><EFBFBD>offy<EFBFBD>ĵͰ<EFBFBD>λ
SCCB_WR_Reg(0X51,hsize&0XFF); //设置H_SIZE的低八位
SCCB_WR_Reg(0X52,vsize&0XFF); //设置V_SIZE的低八位
SCCB_WR_Reg(0X53,offx&0XFF); //设置offx的低八位
SCCB_WR_Reg(0X54,offy&0XFF); //设置offy的低八位
temp=(vsize>>1)&0X80;
temp|=(offy>>4)&0X70;
temp|=(hsize>>5)&0X08;
temp|=(offx>>8)&0X07;
SCCB_WR_Reg(0X55,temp); //<EFBFBD><EFBFBD><EFBFBD><EFBFBD>H_SIZE/V_SIZE/OFFX,OFFY<EFBFBD>ĸ<EFBFBD>λ
SCCB_WR_Reg(0X57,(hsize>>2)&0X80); //<EFBFBD><EFBFBD><EFBFBD><EFBFBD>H_SIZE/V_SIZE/OFFX,OFFY<EFBFBD>ĸ<EFBFBD>λ
SCCB_WR_Reg(0X55,temp); //设置H_SIZE/V_SIZE/OFFX,OFFY的高位
SCCB_WR_Reg(0X57,(hsize>>2)&0X80); //设置H_SIZE/V_SIZE/OFFX,OFFY的高位
SCCB_WR_Reg(0XE0,0X00);
return 0;
}
//<EFBFBD>ú<EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD>ͼ<EFBFBD><EFBFBD><EFBFBD>ߴ<EFBFBD><EFBFBD><EFBFBD>С<><D2B2><EFBFBD><EFBFBD><EFBFBD><EFBFBD>ѡ<EFBFBD><D1A1>ʽ<EFBFBD><CABD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD>ֱ<EFBFBD><D6B1><EFBFBD>
//该函数设置图像尺寸大小,也就是所选格式的输出分辨率
//UXGA:1600*1200,SVGA:800*600,CIF:352*288
//width,height:ͼ<EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD>Ⱥ<EFBFBD>ͼ<EFBFBD><EFBFBD><EFBFBD>߶<EFBFBD>
//<EFBFBD><EFBFBD><EFBFBD><EFBFBD>ֵ:0,<2C><><EFBFBD>óɹ<C3B3>
// <EFBFBD><EFBFBD><EFBFBD><EFBFBD>,<2C><><EFBFBD><EFBFBD>ʧ<EFBFBD><CAA7>
//width,height:图像宽度和图像高度
//返回值:0,设置成功
// 其他,设置失败
uint8_t OV2640_ImageSize_Set(uint16_t width,uint16_t height)
{
uint8_t temp;
SCCB_WR_Reg(0XFF,0X00);
SCCB_WR_Reg(0XE0,0X04);
SCCB_WR_Reg(0XC0,(width)>>3&0XFF); //<EFBFBD><EFBFBD><EFBFBD><EFBFBD>HSIZE<EFBFBD><EFBFBD>10:3λ
SCCB_WR_Reg(0XC1,(height)>>3&0XFF); //<EFBFBD><EFBFBD><EFBFBD><EFBFBD>VSIZE<EFBFBD><EFBFBD>10:3λ
SCCB_WR_Reg(0XC0,(width)>>3&0XFF); //设置HSIZE10:3
SCCB_WR_Reg(0XC1,(height)>>3&0XFF); //设置VSIZE10:3
temp=(width&0X07)<<3;
temp|=height&0X07;
temp|=(width>>4)&0X80;

View File

@@ -2,11 +2,11 @@
**作者:**
Github ID: [Derekduke](https://github.com/Derekduke) E-mail: dkeji627@gmail.com
Github: [Derekduke](https://github.com/Derekduke) E-mail: dkeji627@gmail.com
Github ID: [QingChuanWS](https://github.com/QingChuanWS) E-mail: bingshan45@163.com
Github: [QingChuanWS](https://github.com/QingChuanWS) E-mail: bingshan45@163.com
Github ID: [yangqings](https://github.com/yangqings) E-mail: yangqingsheng12@outlook.com
Github: [yangqings](https://github.com/yangqings) E-mail: yangqingsheng12@outlook.com
## 概述
@@ -50,7 +50,7 @@ Github ID: [yangqings](https://github.com/yangqings) E-mail: yangqingsheng12@ou
有三种方式获取tflite_micro
1. 从TencentOS tiny 代码仓库 `components\ai\tflite_micro`目录获取;
2. 以lib文件的形式使用tflite_micro组件lib文件`TencentOS-tiny\components\ai\tflite_micro`的ARM_CortexM4_lib、ARM_CortexM7_lib和ARM_CortexM55_lib文件夹
2. 以lib文件的形式使用tflite_micro组件lib文件`TencentOS-tiny\components\ai\tflite_micro`的ARM_CortexM4_lib、ARM_CortexM7_lib和ARM_CortexM55_lib文件夹
3. 从Tensorflow代码仓库获取TFlite_Micro的源码已经开源github仓库地址为https://github.com/tensorflow/tensorflow 可根据google TFLite Micro官方教程获得Tensorflow Lite Micro的全部源码。
如果没有tflite_micro开发经验建议以**第一种**或者**第二种**方式获取tflite_micro希望自行获取最新源码或者编译lib文件请参考`TencentOS-tiny\components\tflite_micro`目录的TFlite_Micro_Component_User_Guide.md文档本指南将直接使用TencentOS tiny 代码仓库内的tflite_micro组件。
@@ -61,16 +61,17 @@ Github ID: [yangqings](https://github.com/yangqings) E-mail: yangqingsheng12@ou
以下是整个例程的目录规划:
| 一级目录 | 二级目录 | 三级目录 | 说明 |
| :-------: | :--------------------------: | :----------: | :----------------------------------------------------------: |
| arch | arm | | TencentOS tiny适配的IP核架构含M核中断、调度、tick相关代码 |
| board | NUCLEO_STM32L496ZG | | 移植目标芯片的工程文件 |
| | | BSP | 板级支持包外设驱动代码在Hardware目录 |
| component | ai | tflite_micro | tflite_micro源码 |
| examples | tflitemicro_person_detection | | 行人检测demo示例 |
| kernel | core | | TencentOS tiny内核源码 |
| | pm | | TencentOS tiny低功耗模块源码 |
| osal | cmsis_os | | TencentOS tiny提供的cmsis os 适配 |
| 一级目录 | 二级目录 | 三级目录 | 说明 |
| :-------: | :--------------------------: | :-------------------: | :----------------------------------------------------------: |
| arch | arm | | TencentOS tiny适配的IP核架构含M核中断、调度、tick相关代码 |
| board | NUCLEO_STM32L496ZG | | 移植目标芯片的工程文件 |
| | | BSP | 板级支持包外设驱动代码在Hardware目录 |
| component | ai | tflite_micro | tflite_micro源码及有关库文件 |
| examples | tflitemicro_person_detection | | 行人检测demo示例 |
| | | tflu_person_detection | 行人检测实例代码 |
| kernel | core | | TencentOS tiny内核源码 |
| | pm | | TencentOS tiny低功耗模块源码 |
| osal | cmsis_os | | TencentOS tiny提供的cmsis os 适配 |
完成TencentOS tiny基础keil工程准备工作后在这个keil工程的基础上继续添加外设驱动代码。
@@ -191,7 +192,7 @@ void task1(void *arg)
### 1. tflite_micro组件加入到keil工程
由于 NUCLEO-L496ZG 芯片中的内核为 ARM Cortex M4所以本次我们可以直接使用 ARM Cortex M4 版本的tensorflow_lite_micro.lib 库来简化 tflite_micro 搭建流程。
由于NUCLEO-L496ZG芯片中的内核为ARM Cortex M4所以本次我们可以直接使用ARM Cortex M4版本的tensorflow_lite_micro.lib库来简化tflite_micro搭建流程。
#### 1.1 在project中加入新的文件夹tensorflow
@@ -209,9 +210,9 @@ void task1(void *arg)
其中retarget.c的路径为`TencentOS-tiny\components\ai\tflite_micro\KEIL\retarget.c`
tensorflow_lite_micro.lib的路径为`TencentOS-tiny\components\ai\tflite_micro\ARM_CortexM4_lib\tensorflow_lite_micro.lib`
tensorflow_lite_micro.lib的路径为`TencentOS-stiny\components\ai\tflite_micro\ARM_CortexM4_lib\tensorflow_lite_micro.lib`
其余.cc文件均在当前目录下的`tflu_person_detection`文件夹中。
其余.cc文件和.h均在`examples\tflu_person_detection\tflu_person_detection`文件夹中。
#### 1.3 关闭Keil的MicroLib库
@@ -237,13 +238,13 @@ TencentOS-tiny\components\ai\tflite_micro\ARM_CortexM4_lib\tensorflow\lite\micro
<img src="./image/tflu_STM32496宏.png" width=80% />
</div>
其中宏`NUCLEO_STM32L496ZG`是指定 Nucleo STM32L496hlpuart1 为系统 printf 函数的输出串口,具体定义在 Nucleo STM32L496BSP 文件夹中的`mcu_init.c`中。
其中宏`NUCLEO_STM32L496ZG`是指定Nucleo STM32L496hlpuart1为系统printf函数的输出串口具体定义在Nucleo STM32L496BSP文件夹中的`mcu_init.c`中。
### 2. 编写Person_Detection 任务函数
本例程的任务函数在
`TencentOS-tiny\examples\tflitemicro_person_detection\tflitemicro_person_detection.c`目录下
`TencentOS-tiny\examples\tflitemicro_person_detection\tflitemicro_person_detection.c`
#### 2.1 图像预处理
@@ -311,13 +312,13 @@ void task2(void *arg)
#### 2.3 运行效果
通过串行输出实时打印信息,移动摄像头,镜头没有对准行人时,输出如下:
通过串行输出实时打印信息,移动摄像头,没有对准行人时,输出如下:
<div align=center>
<img src="./image/reasult_no_person.png" width=70% />
</div>
头对准行人时,输出如下:
摄像头对准行人时,输出如下:
<div align=center>
<img src="./image/reasult_person.png" width=70% />

View File

@@ -103,7 +103,7 @@
<bEvRecOn>1</bEvRecOn>
<bSchkAxf>0</bSchkAxf>
<bTchkAxf>0</bTchkAxf>
<nTsel>0</nTsel>
<nTsel>6</nTsel>
<sDll></sDll>
<sDllPa></sDllPa>
<sDlgDll></sDlgDll>
@@ -114,7 +114,7 @@
<tDlgDll></tDlgDll>
<tDlgPa></tDlgPa>
<tIfile></tIfile>
<pMon>BIN\UL2CM3.DLL</pMon>
<pMon>STLink\ST-LINKIII-KEIL_SWO.dll</pMon>
</DebugOpt>
<TargetDriverDllRegistry>
<SetRegEntry>
@@ -622,7 +622,7 @@
<Group>
<GroupName>Drivers/CMSIS</GroupName>
<tvExp>0</tvExp>
<tvExp>1</tvExp>
<tvExpOptDlg>0</tvExpOptDlg>
<cbSel>0</cbSel>
<RteFlg>0</RteFlg>
@@ -642,7 +642,7 @@
<Group>
<GroupName>tos/arch</GroupName>
<tvExp>0</tvExp>
<tvExp>1</tvExp>
<tvExpOptDlg>0</tvExpOptDlg>
<cbSel>0</cbSel>
<RteFlg>0</RteFlg>
@@ -982,7 +982,7 @@
<Group>
<GroupName>tos/cmsis_os</GroupName>
<tvExp>0</tvExp>
<tvExp>1</tvExp>
<tvExpOptDlg>0</tvExpOptDlg>
<cbSel>0</cbSel>
<RteFlg>0</RteFlg>
@@ -1002,7 +1002,7 @@
<Group>
<GroupName>hal</GroupName>
<tvExp>0</tvExp>
<tvExp>1</tvExp>
<tvExpOptDlg>0</tvExpOptDlg>
<cbSel>0</cbSel>
<RteFlg>0</RteFlg>
@@ -1070,7 +1070,7 @@
<Group>
<GroupName>examples</GroupName>
<tvExp>0</tvExp>
<tvExp>1</tvExp>
<tvExpOptDlg>0</tvExpOptDlg>
<cbSel>0</cbSel>
<RteFlg>0</RteFlg>
@@ -1090,13 +1090,25 @@
<Group>
<GroupName>tensorflow</GroupName>
<tvExp>0</tvExp>
<tvExp>1</tvExp>
<tvExpOptDlg>0</tvExpOptDlg>
<cbSel>0</cbSel>
<RteFlg>0</RteFlg>
<File>
<GroupNumber>10</GroupNumber>
<FileNumber>71</FileNumber>
<FileType>1</FileType>
<tvExp>0</tvExp>
<tvExpOptDlg>0</tvExpOptDlg>
<bDave2>0</bDave2>
<PathWithFileName>..\..\..\..\components\ai\tflite_micro\KEIL\retarget.c</PathWithFileName>
<FilenameWithoutPath>retarget.c</FilenameWithoutPath>
<RteFlg>0</RteFlg>
<bShared>0</bShared>
</File>
<File>
<GroupNumber>10</GroupNumber>
<FileNumber>72</FileNumber>
<FileType>4</FileType>
<tvExp>0</tvExp>
<tvExpOptDlg>0</tvExpOptDlg>
@@ -1106,18 +1118,6 @@
<RteFlg>0</RteFlg>
<bShared>0</bShared>
</File>
<File>
<GroupNumber>10</GroupNumber>
<FileNumber>72</FileNumber>
<FileType>8</FileType>
<tvExp>0</tvExp>
<tvExpOptDlg>0</tvExpOptDlg>
<bDave2>0</bDave2>
<PathWithFileName>.\tflu_person_detection\detection_responder.cc</PathWithFileName>
<FilenameWithoutPath>detection_responder.cc</FilenameWithoutPath>
<RteFlg>0</RteFlg>
<bShared>0</bShared>
</File>
<File>
<GroupNumber>10</GroupNumber>
<FileNumber>73</FileNumber>
@@ -1125,8 +1125,8 @@
<tvExp>0</tvExp>
<tvExpOptDlg>0</tvExpOptDlg>
<bDave2>0</bDave2>
<PathWithFileName>.\tflu_person_detection\image_provider.cc</PathWithFileName>
<FilenameWithoutPath>image_provider.cc</FilenameWithoutPath>
<PathWithFileName>..\..\..\..\examples\tflitemicro_person_detection\tflu_person_detection\detection_responder.cc</PathWithFileName>
<FilenameWithoutPath>detection_responder.cc</FilenameWithoutPath>
<RteFlg>0</RteFlg>
<bShared>0</bShared>
</File>
@@ -1137,8 +1137,8 @@
<tvExp>0</tvExp>
<tvExpOptDlg>0</tvExpOptDlg>
<bDave2>0</bDave2>
<PathWithFileName>.\tflu_person_detection\main_functions.cc</PathWithFileName>
<FilenameWithoutPath>main_functions.cc</FilenameWithoutPath>
<PathWithFileName>..\..\..\..\examples\tflitemicro_person_detection\tflu_person_detection\image_provider.cc</PathWithFileName>
<FilenameWithoutPath>image_provider.cc</FilenameWithoutPath>
<RteFlg>0</RteFlg>
<bShared>0</bShared>
</File>
@@ -1149,8 +1149,8 @@
<tvExp>0</tvExp>
<tvExpOptDlg>0</tvExpOptDlg>
<bDave2>0</bDave2>
<PathWithFileName>.\tflu_person_detection\model_settings.cc</PathWithFileName>
<FilenameWithoutPath>model_settings.cc</FilenameWithoutPath>
<PathWithFileName>..\..\..\..\examples\tflitemicro_person_detection\tflu_person_detection\main_functions.cc</PathWithFileName>
<FilenameWithoutPath>main_functions.cc</FilenameWithoutPath>
<RteFlg>0</RteFlg>
<bShared>0</bShared>
</File>
@@ -1161,7 +1161,19 @@
<tvExp>0</tvExp>
<tvExpOptDlg>0</tvExpOptDlg>
<bDave2>0</bDave2>
<PathWithFileName>.\tflu_person_detection\person_detect_model_data.cc</PathWithFileName>
<PathWithFileName>..\..\..\..\examples\tflitemicro_person_detection\tflu_person_detection\model_settings.cc</PathWithFileName>
<FilenameWithoutPath>model_settings.cc</FilenameWithoutPath>
<RteFlg>0</RteFlg>
<bShared>0</bShared>
</File>
<File>
<GroupNumber>10</GroupNumber>
<FileNumber>77</FileNumber>
<FileType>8</FileType>
<tvExp>0</tvExp>
<tvExpOptDlg>0</tvExpOptDlg>
<bDave2>0</bDave2>
<PathWithFileName>..\..\..\..\examples\tflitemicro_person_detection\tflu_person_detection\person_detect_model_data.cc</PathWithFileName>
<FilenameWithoutPath>person_detect_model_data.cc</FilenameWithoutPath>
<RteFlg>0</RteFlg>
<bShared>0</bShared>

View File

@@ -339,7 +339,7 @@
<MiscControls></MiscControls>
<Define>USE_HAL_DRIVER,STM32L496xx,NUCLEO_STM32L496ZG</Define>
<Undefine></Undefine>
<IncludePath>..\..\BSP\Inc;..\..\..\..\platform\vendor_bsp\st\STM32L4xx_HAL_Driver\Inc;..\..\..\..\platform\vendor_bsp\st\STM32L4xx_HAL_Driver\Inc\Legacy;..\..\..\..\platform\vendor_bsp\st\CMSIS\Device\ST\STM32L4xx\Include;..\..\..\..\platform\vendor_bsp\st\CMSIS\Include;..\..\..\..\arch\arm\arm-v7m\common\include;..\..\..\..\arch\arm\arm-v7m\cortex-m4\armcc;..\..\..\..\kernel\core\include;..\..\..\..\kernel\pm\include;..\..\..\..\osal\cmsis_os;..\..\..\..\examples\hello_world;..\..\TOS_CONFIG;..\..\..\..\net\at\include;..\..\..\..\kernel\hal\include;..\..\BSP\Hardware\Inc;..\..\..\..\components\ai\tflite_micro\ARM_CortexM4_lib;..\..\..\..\components\ai\tflite_micro\ARM_CortexM4_lib\third_party\flatbuffers\include;..\..\..\..\components\ai\tflite_micro\ARM_CortexM4_lib\third_party\gemmlowp;..\..\..\..\components\ai\tflite_micro\ARM_CortexM4_lib\third_party\kissfft;..\..\..\..\components\ai\tflite_micro\ARM_CortexM4_lib\third_party\ruy;..\..\..\..\components\ai\tflite_micro\ARM_CortexM4_lib\tensorflow\lite\micro\tools\make\downloads</IncludePath>
<IncludePath>..\..\BSP\Inc;..\..\..\..\platform\vendor_bsp\st\STM32L4xx_HAL_Driver\Inc;..\..\..\..\platform\vendor_bsp\st\STM32L4xx_HAL_Driver\Inc\Legacy;..\..\..\..\platform\vendor_bsp\st\CMSIS\Device\ST\STM32L4xx\Include;..\..\..\..\platform\vendor_bsp\st\CMSIS\Include;..\..\..\..\arch\arm\arm-v7m\common\include;..\..\..\..\arch\arm\arm-v7m\cortex-m4\armcc;..\..\..\..\kernel\core\include;..\..\..\..\kernel\pm\include;..\..\..\..\osal\cmsis_os;..\..\..\..\examples\hello_world;..\..\TOS_CONFIG;..\..\..\..\net\at\include;..\..\..\..\kernel\hal\include;..\..\BSP\Hardware\Inc;..\..\..\..\components\ai\tflite_micro\ARM_CortexM4_lib;..\..\..\..\components\ai\tflite_micro\ARM_CortexM4_lib\third_party\flatbuffers\include;..\..\..\..\components\ai\tflite_micro\ARM_CortexM4_lib\third_party\gemmlowp;..\..\..\..\components\ai\tflite_micro\ARM_CortexM4_lib\third_party\kissfft;..\..\..\..\components\ai\tflite_micro\ARM_CortexM4_lib\third_party\ruy;..\..\..\..\components\ai\tflite_micro\ARM_CortexM4_lib\tensorflow\lite\micro\tools\make\downloads;..\..\..\..\examples\tflitemicro_person_detection\tflu_person_detection</IncludePath>
</VariousControls>
</Cads>
<Aads>
@@ -778,6 +778,11 @@
<Group>
<GroupName>tensorflow</GroupName>
<Files>
<File>
<FileName>retarget.c</FileName>
<FileType>1</FileType>
<FilePath>..\..\..\..\components\ai\tflite_micro\KEIL\retarget.c</FilePath>
</File>
<File>
<FileName>tensorflow_lite_micro_M4.lib</FileName>
<FileType>4</FileType>
@@ -786,27 +791,27 @@
<File>
<FileName>detection_responder.cc</FileName>
<FileType>8</FileType>
<FilePath>.\tflu_person_detection\detection_responder.cc</FilePath>
<FilePath>..\..\..\..\examples\tflitemicro_person_detection\tflu_person_detection\detection_responder.cc</FilePath>
</File>
<File>
<FileName>image_provider.cc</FileName>
<FileType>8</FileType>
<FilePath>.\tflu_person_detection\image_provider.cc</FilePath>
<FilePath>..\..\..\..\examples\tflitemicro_person_detection\tflu_person_detection\image_provider.cc</FilePath>
</File>
<File>
<FileName>main_functions.cc</FileName>
<FileType>8</FileType>
<FilePath>.\tflu_person_detection\main_functions.cc</FilePath>
<FilePath>..\..\..\..\examples\tflitemicro_person_detection\tflu_person_detection\main_functions.cc</FilePath>
</File>
<File>
<FileName>model_settings.cc</FileName>
<FileType>8</FileType>
<FilePath>.\tflu_person_detection\model_settings.cc</FilePath>
<FilePath>..\..\..\..\examples\tflitemicro_person_detection\tflu_person_detection\model_settings.cc</FilePath>
</File>
<File>
<FileName>person_detect_model_data.cc</FileName>
<FileType>8</FileType>
<FilePath>.\tflu_person_detection\person_detect_model_data.cc</FilePath>
<FilePath>..\..\..\..\examples\tflitemicro_person_detection\tflu_person_detection\person_detect_model_data.cc</FilePath>
</File>
</Files>
</Group>

View File

@@ -1,25 +0,0 @@
/* Copyright 2019 The TensorFlow Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
==============================================================================*/
#include "tensorflow/lite/micro/examples/person_detection_experimental/detection_responder.h"
// This dummy implementation writes person and no person scores to the error
// console. Real applications will want to take some custom action instead, and
// should implement their own versions of this function.
void RespondToDetection(tflite::ErrorReporter* error_reporter,
int8_t person_score, int8_t no_person_score) {
TF_LITE_REPORT_ERROR(error_reporter, "person score:%d no person score %d",
person_score, no_person_score);
}

View File

@@ -1,34 +0,0 @@
/* Copyright 2019 The TensorFlow Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
==============================================================================*/
// Provides an interface to take an action based on the output from the person
// detection model.
#ifndef TENSORFLOW_LITE_MICRO_EXAMPLES_PERSON_DETECTION_EXPERIMENTAL_DETECTION_RESPONDER_H_
#define TENSORFLOW_LITE_MICRO_EXAMPLES_PERSON_DETECTION_EXPERIMENTAL_DETECTION_RESPONDER_H_
#include "tensorflow/lite/c/common.h"
#include "tensorflow/lite/micro/micro_error_reporter.h"
// Called every time the results of a person detection run are available. The
// `person_score` has the numerical confidence that the captured image contains
// a person, and `no_person_score` has the numerical confidence that the image
// does not contain a person. Typically if person_score > no person score, the
// image is considered to contain a person. This threshold may be adjusted for
// particular applications.
void RespondToDetection(tflite::ErrorReporter* error_reporter,
int8_t person_score, int8_t no_person_score);
#endif // TENSORFLOW_LITE_MICRO_EXAMPLES_PERSON_DETECTION_EXPERIMENTAL_DETECTION_RESPONDER_H_

View File

@@ -1,26 +0,0 @@
/* Copyright 2019 The TensorFlow Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
==============================================================================*/
#include "tensorflow/lite/micro/examples/person_detection_experimental/image_provider.h"
#include "tensorflow/lite/micro/examples/person_detection_experimental/model_settings.h"
TfLiteStatus GetImage(tflite::ErrorReporter* error_reporter, int image_width,
int image_height, int channels, int8_t* image_data,
uint8_t * hardware_input) {
for (int i = 0; i < image_width * image_height * channels; ++i) {
image_data[i] = hardware_input[i];
}
return kTfLiteOk;
}

View File

@@ -1,40 +0,0 @@
/* Copyright 2019 The TensorFlow Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
==============================================================================*/
#ifndef TENSORFLOW_LITE_MICRO_EXAMPLES_PERSON_DETECTION_EXPERIMENTAL_IMAGE_PROVIDER_H_
#define TENSORFLOW_LITE_MICRO_EXAMPLES_PERSON_DETECTION_EXPERIMENTAL_IMAGE_PROVIDER_H_
#include "tensorflow/lite/c/common.h"
#include "tensorflow/lite/micro/micro_error_reporter.h"
// This is an abstraction around an image source like a camera, and is
// expected to return 8-bit sample data. The assumption is that this will be
// called in a low duty-cycle fashion in a low-power application. In these
// cases, the imaging sensor need not be run in a streaming mode, but rather can
// be idled in a relatively low-power mode between calls to GetImage(). The
// assumption is that the overhead and time of bringing the low-power sensor out
// of this standby mode is commensurate with the expected duty cycle of the
// application. The underlying sensor may actually be put into a streaming
// configuration, but the image buffer provided to GetImage should not be
// overwritten by the driver code until the next call to GetImage();
//
// The reference implementation can have no platform-specific dependencies, so
// it just returns a static image. For real applications, you should
// ensure there's a specialized implementation that accesses hardware APIs.
TfLiteStatus GetImage(tflite::ErrorReporter* error_reporter, int image_width,
int image_height, int channels, int8_t* image_data,
uint8_t * hardware_input);
#endif // TENSORFLOW_LITE_MICRO_EXAMPLES_PERSON_DETECTION_EXPERIMENTAL_IMAGE_PROVIDER_H_

View File

@@ -1,119 +0,0 @@
/* Copyright 2019 The TensorFlow Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
==============================================================================*/
#include "tensorflow/lite/micro/examples/person_detection_experimental/main_functions.h"
#include "tensorflow/lite/micro/examples/person_detection_experimental/detection_responder.h"
#include "tensorflow/lite/micro/examples/person_detection_experimental/image_provider.h"
#include "tensorflow/lite/micro/examples/person_detection_experimental/model_settings.h"
#include "tensorflow/lite/micro/examples/person_detection_experimental/person_detect_model_data.h"
#include "tensorflow/lite/micro/micro_error_reporter.h"
#include "tensorflow/lite/micro/micro_interpreter.h"
#include "tensorflow/lite/micro/micro_mutable_op_resolver.h"
#include "tensorflow/lite/schema/schema_generated.h"
#include "tensorflow/lite/version.h"
// Globals, used for compatibility with Arduino-style sketches.
namespace {
tflite::ErrorReporter* error_reporter = nullptr;
const tflite::Model* model = nullptr;
tflite::MicroInterpreter* interpreter = nullptr;
TfLiteTensor* input = nullptr;
// In order to use optimized tensorflow lite kernels, a signed int8_t quantized
// model is preferred over the legacy unsigned model format. This means that
// throughout this project, input images must be converted from unisgned to
// signed format. The easiest and quickest way to convert from unsigned to
// signed 8-bit integers is to subtract 128 from the unsigned value to get a
// signed value.
// An area of memory to use for input, output, and intermediate arrays.
constexpr int kTensorArenaSize = 115 * 1024;
static uint8_t tensor_arena[kTensorArenaSize];
} // namespace
// The name of this function is important for Arduino compatibility.
void person_detect_init() {
// Set up logging. Google style is to avoid globals or statics because of
// lifetime uncertainty, but since this has a trivial destructor it's okay.
// NOLINTNEXTLINE(runtime-global-variables)
static tflite::MicroErrorReporter micro_error_reporter;
error_reporter = &micro_error_reporter;
// Map the model into a usable data structure. This doesn't involve any
// copying or parsing, it's a very lightweight operation.
model = tflite::GetModel(g_person_detect_model_data);
if (model->version() != TFLITE_SCHEMA_VERSION) {
TF_LITE_REPORT_ERROR(error_reporter,
"Model provided is schema version %d not equal "
"to supported version %d.",
model->version(), TFLITE_SCHEMA_VERSION);
return;
}
// Pull in only the operation implementations we need.
// This relies on a complete list of all the ops needed by this graph.
// An easier approach is to just use the AllOpsResolver, but this will
// incur some penalty in code space for op implementations that are not
// needed by this graph.
//
// tflite::AllOpsResolver resolver;
// NOLINTNEXTLINE(runtime-global-variables)
static tflite::MicroMutableOpResolver<5> micro_op_resolver;
micro_op_resolver.AddAveragePool2D();
micro_op_resolver.AddConv2D();
micro_op_resolver.AddDepthwiseConv2D();
micro_op_resolver.AddReshape();
micro_op_resolver.AddSoftmax();
// Build an interpreter to run the model with.
// NOLINTNEXTLINE(runtime-global-variables)
static tflite::MicroInterpreter static_interpreter(
model, micro_op_resolver, tensor_arena, kTensorArenaSize, error_reporter);
interpreter = &static_interpreter;
// Allocate memory from the tensor_arena for the model's tensors.
TfLiteStatus allocate_status = interpreter->AllocateTensors();
if (allocate_status != kTfLiteOk) {
TF_LITE_REPORT_ERROR(error_reporter, "AllocateTensors() failed");
return;
}
// Get information about the memory area to use for the model's input.
input = interpreter->input(0);
}
// The name of this function is important for Arduino compatibility.
int person_detect(uint8_t * hardware_input) {
// Get image from provider.
if (kTfLiteOk != GetImage(error_reporter, kNumCols, kNumRows, kNumChannels,
input->data.int8, hardware_input)) {
TF_LITE_REPORT_ERROR(error_reporter, "Image capture failed.");
}
// Run the model on this input and make sure it succeeds.
if (kTfLiteOk != interpreter->Invoke()) {
TF_LITE_REPORT_ERROR(error_reporter, "Invoke failed.");
}
TfLiteTensor* output = interpreter->output(0);
// Process the inference results.
int8_t person_score = output->data.uint8[kPersonIndex];
int8_t no_person_score = output->data.uint8[kNotAPersonIndex];
RespondToDetection(error_reporter, person_score, no_person_score);
if(person_score >= no_person_score + 50) return 1;
else return 0;
}

View File

@@ -1,30 +0,0 @@
/* Copyright 2019 The TensorFlow Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
==============================================================================*/
#ifndef TENSORFLOW_LITE_MICRO_EXAMPLES_PERSON_DETECTION_EXPERIMENTAL_MAIN_FUNCTIONS_H_
#define TENSORFLOW_LITE_MICRO_EXAMPLES_PERSON_DETECTION_EXPERIMENTAL_MAIN_FUNCTIONS_H_
#include "tensorflow/lite/c/common.h"
// Initializes all data needed for the example. The name is important, and needs
// to be setup() for Arduino compatibility.
extern "C" void person_detect_init();
// Runs one iteration of data gathering and inference. This should be called
// repeatedly from the application code. The name needs to be loop() for Arduino
// compatibility.
extern "C" int person_detect(uint8_t * hardware_input);
#endif // TENSORFLOW_LITE_MICRO_EXAMPLES_PERSON_DETECTION_EXPERIMENTAL_MAIN_FUNCTIONS_H_

View File

@@ -1,21 +0,0 @@
/* Copyright 2019 The TensorFlow Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
==============================================================================*/
#include "tensorflow/lite/micro/examples/person_detection_experimental/model_settings.h"
const char* kCategoryLabels[kCategoryCount] = {
"notperson",
"person",
};

View File

@@ -1,35 +0,0 @@
/* Copyright 2019 The TensorFlow Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
==============================================================================*/
#ifndef TENSORFLOW_LITE_MICRO_EXAMPLES_PERSON_DETECTION_EXPERIMENTAL_MODEL_SETTINGS_H_
#define TENSORFLOW_LITE_MICRO_EXAMPLES_PERSON_DETECTION_EXPERIMENTAL_MODEL_SETTINGS_H_
// Keeping these as constant expressions allow us to allocate fixed-sized arrays
// on the stack for our working memory.
// All of these values are derived from the values used during model training,
// if you change your model you'll need to update these constants.
constexpr int kNumCols = 96;
constexpr int kNumRows = 96;
constexpr int kNumChannels = 1;
constexpr int kMaxImageSize = kNumCols * kNumRows * kNumChannels;
constexpr int kCategoryCount = 2;
constexpr int kPersonIndex = 1;
constexpr int kNotAPersonIndex = 0;
extern const char* kCategoryLabels[kCategoryCount];
#endif // TENSORFLOW_LITE_MICRO_EXAMPLES_PERSON_DETECTION_EXPERIMENTAL_MODEL_SETTINGS_H_

View File

@@ -1,27 +0,0 @@
/* Copyright 2019 The TensorFlow Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
==============================================================================*/
// This is a standard TensorFlow Lite model file that has been converted into a
// C data array, so it can be easily compiled into a binary for devices that
// don't have a file system. It was created using the command:
// xxd -i person_detect.tflite > person_detect_model_data.cc
#ifndef TENSORFLOW_LITE_MICRO_EXAMPLES_PERSON_DETECTION_EXPERIMENTAL_PERSON_DETECT_MODEL_DATA_H_
#define TENSORFLOW_LITE_MICRO_EXAMPLES_PERSON_DETECTION_EXPERIMENTAL_PERSON_DETECT_MODEL_DATA_H_
extern const unsigned char g_person_detect_model_data[];
extern const int g_person_detect_model_data_len;
#endif // TENSORFLOW_LITE_MICRO_EXAMPLES_PERSON_DETECTION_EXPERIMENTAL_PERSON_DETECT_MODEL_DATA_H_