first commit for opensource

first commit for opensource
This commit is contained in:
supowang
2019-09-16 13:19:50 +08:00
parent 08ab013b8e
commit edb2879617
6303 changed files with 5472815 additions and 23 deletions

40
kernel/core/include/tos.h Normal file
View File

@@ -0,0 +1,40 @@
#ifndef _TOS_H_
#define _TOS_H_
#include <tos_compiler.h>
#include <tos_err.h>
#include <tos_ktypes.h>
#include <tos_cpu_def.h>
#include <tos_config.h>
#include <tos_config_default.h>
#include <tos_cpu_types.h>
#include <port_config.h>
#include <port.h>
#include <tos_cpu.h>
#include <tos_config_check.h>
#include <tos_fault.h>
#include <tos_klib.h>
#include <tos_list.h>
#include <tos_pm.h>
#include <tos_pend.h>
#include <tos_sys.h>
#include <tos_fifo.h>
#include <tos_task.h>
#include <tos_robin.h>
#include <tos_msg.h>
#include <tos_queue.h>
#include <tos_mutex.h>
#include <tos_sem.h>
#include <tos_event.h>
#include <tos_timer.h>
#include <tos_time.h>
#include <tos_mmblk.h>
#include <tos_mmheap.h>
#include <tos_tick.h>
#include <tos_sched.h>
#include <tos_pm.h>
#include <tos_tickless.h>
#include <tos_global.h>
#endif /* _TOS_H_ */

View File

@@ -0,0 +1,110 @@
#ifndef _TOS_COMPILER_H_
#define _TOS_COMPILER_H_
// function with __API__ prefix, api for user
#define __API__
// function with __KERNEL__ prefix, only for kernel
#define __KERNEL__
// function with __HOOK__ prefix, should be implemented by user
#define __HOOK__
// function with __DEBUG__ prefix, only for debug
#define __DEBUG__
// function with __PORT__ is architecture depended
#define __PORT__
/*------------------ RealView Compiler -----------------*/
#if defined(__CC_ARM)
#define __ASM__ __asm
#define __VOLATILE__ volatile
#define __INLINE__ inline
#define __STATIC__ static
#if (__ARMCC_VERSION < 5060750) // how to know the exact number?
#define __STATIC_INLINE__ static
#else
#define __STATIC_INLINE__ static inline
#endif
#define likely(x) __builtin_expect(!!(x), 1)
#define unlikely(x) __builtin_expect(!!(x), 0)
#define __UNUSED__ __attribute__((__unused__))
#define __USED__ __attribute__((__used__))
#define __PACKED__ __attribute__((packed))
#define __ALIGNED__(x) __attribute__((aligned(x)))
#define __PURE__ __attribute__((__pure__))
#define __CONST__ __attribute__((__const__))
#define __NO_RETURN__ __attribute__((__noreturn__))
/*------------------ ARM Compiler V6 -------------------*/
#elif defined(__ARMCC_VERSION) && (__ARMCC_VERSION >= 6010050)
#define __ASM__ __asm
#define __VOLATILE__ volatile
#define __INLINE__ inline
#define __STATIC__ static
#define __STATIC_INLINE__ static inline
#define likely(x) __builtin_expect(!!(x), 1)
#define unlikely(x) __builtin_expect(!!(x), 0)
#define __UNUSED__ __attribute__((__unused__))
#define __USED__ __attribute__((__used__))
#define __PACKED__ __attribute__((packed))
#define __ALIGNED__(x) __attribute__((aligned(x)))
#define __PURE__ __attribute__((__pure__))
#define __CONST__ __attribute__((__const__))
#define __NO_RETURN__ __attribute__((__noreturn__))
#define __NAKED__ __attribute__((naked))
/*------------------ ICC Compiler ----------------------*/
#elif defined(__ICCARM__)
#define __ASM__ __asm
#define __VOLATILE__ volatile
#define __INLINE__ inline
#define __STATIC__ static
#define __STATIC_INLINE__ static inline
#define likely(x) (x)
#define unlikely(x) (x)
#define __UNUSED__
#define __USED__
#define __PACKED__
#define __ALIGNED__(x)
#define __PURE__
#define __CONST__
#define __NO_RETURN__
#define __NAKED__
/*------------------ GNU Compiler ----------------------*/
#elif defined(__GNUC__)
#define __ASM__ __asm
#define __VOLATILE__ volatile
#define __INLINE__ inline
#define __STATIC__ static
#define __STATIC_INLINE__ static inline
#define likely(x) __builtin_expect(!!(x), 1)
#define unlikely(x) __builtin_expect(!!(x), 0)
#define __UNUSED__ __attribute__((__unused__))
#define __USED__ __attribute__((__used__))
#define __PACKED__ __attribute__((packed))
#define __ALIGNED__(x) __attribute__((aligned(x)))
#define __PURE__ __attribute__((__pure__))
#define __CONST__ __attribute__((__const__))
#define __NO_RETURN__ __attribute__((__noreturn__))
#define __NAKED__ __attribute__((naked))
#endif
#endif /* _TOS_COMPILER_H_ */

View File

@@ -0,0 +1,61 @@
#ifndef _TOS_CONFIG_CHECK_H_
#define _TOS_CONFIG_CHECK_H_
#if TOS_CFG_TASK_PRIO_MAX < 8u
#error "INVALID config, TOS_CFG_TASK_PRIO_MAX must be >= 8"
#endif
#if (TOS_CFG_QUEUE_EN > 0u) && (TOS_CFG_MSG_EN == 0u)
#error "INVALID config, must enable tos_msg to use tos_queue"
#endif
#if ((TOS_CFG_TIMER_EN > 0u) && !defined(TOS_CFG_TIMER_AS_PROC))
#error "UNDECLARED config, TOS_CFG_TIMER_AS_PROC"
#endif
#if (TOS_CFG_VFS_EN > 0u) && (TOS_CFG_MMHEAP_EN == 0u)
#error "INVALID config, must enable tos_mmheap to use tos_vfs"
#endif
#ifndef TOS_CFG_CPU_HRTIMER_EN
#error "UNDECLARED config, TOS_CFG_CPU_HRTIMER_EN should be declared in 'port.h'"
#elif (TOS_CFG_CPU_HRTIMER_EN > 0u) && !defined(TOS_CFG_CPU_HRTIMER_SIZE)
#error "UNDECLARED config, TOS_CFG_CPU_HRTIMER_SIZE should be declared in 'port.h'"
#elif ((TOS_CFG_CPU_HRTIMER_SIZE != CPU_WORD_SIZE_08) && \
(TOS_CFG_CPU_HRTIMER_SIZE != CPU_WORD_SIZE_16) && \
(TOS_CFG_CPU_HRTIMER_SIZE != CPU_WORD_SIZE_32) && \
(TOS_CFG_CPU_HRTIMER_SIZE != CPU_WORD_SIZE_64))
#error "INVALID config, TOS_CFG_CPU_HRTIMER_SIZE"
#endif
#ifndef TOS_CFG_CPU_LEAD_ZEROS_ASM_PRESENT
#error "UNDECLARED config, TOS_CFG_CPU_LEAD_ZEROS_ASM_PRESENT, should be declared in 'port.h'"
#endif
#ifndef TOS_CFG_CPU_STK_GROWTH
#error "UNDECLARED config, TOS_CFG_CPU_STK_GROWTH, should be declared in 'port.h'"
#elif ((TOS_CFG_CPU_STK_GROWTH != CPU_STK_GROWTH_ASCENDING) && \
(TOS_CFG_CPU_STK_GROWTH != CPU_STK_GROWTH_DESCENDING))
#error "INVALID config, TOS_CFG_CPU_STK_GROWTH"
#endif
#ifndef TOS_CFG_CPU_ADDR_SIZE
#error "UNDECLARED config, TOS_CFG_CPU_ADDR_SIZE, should be declared in 'port.h'"
#elif ((TOS_CFG_CPU_ADDR_SIZE != CPU_WORD_SIZE_08) && \
(TOS_CFG_CPU_ADDR_SIZE != CPU_WORD_SIZE_16) && \
(TOS_CFG_CPU_ADDR_SIZE != CPU_WORD_SIZE_32) && \
(TOS_CFG_CPU_ADDR_SIZE != CPU_WORD_SIZE_64))
#error "INVALID config, TOS_CFG_CPU_ADDR_SIZE"
#endif
#ifndef TOS_CFG_CPU_DATA_SIZE
#error "UNDECLARED config, TOS_CFG_CPU_DATA_SIZE, should be declared in 'port.h'"
#elif ((TOS_CFG_CPU_DATA_SIZE != CPU_WORD_SIZE_08) && \
(TOS_CFG_CPU_DATA_SIZE != CPU_WORD_SIZE_16) && \
(TOS_CFG_CPU_DATA_SIZE != CPU_WORD_SIZE_32) && \
(TOS_CFG_CPU_DATA_SIZE != CPU_WORD_SIZE_64))
#error "INVALID config, TOS_CFG_CPU_DATA_SIZE"
#endif
#endif /* _TOS_CHECK_CONFIG_H_ */

View File

@@ -0,0 +1,114 @@
#ifndef _TOS_CONFIG_DEFAULT_H_
#define _TOS_CONFIG_DEFAULT_H_
#ifndef TOS_CFG_TASK_STACK_DRAUGHT_DEPTH_DETACT_EN
#define TOS_CFG_TASK_STACK_DRAUGHT_DEPTH_DETACT_EN 0u
#endif
#ifndef TOS_CFG_ROUND_ROBIN_EN
#define TOS_CFG_ROUND_ROBIN_EN 0u
#endif
#ifndef TOS_CFG_EVENT_EN
#define TOS_CFG_EVENT_EN 0u
#endif
#ifndef TOS_CFG_MMHEAP_EN
#define TOS_CFG_MMHEAP_EN 0u
#endif
#ifndef TOS_CFG_MUTEX_EN
#define TOS_CFG_MUTEX_EN 0u
#endif
#ifndef TOS_CFG_QUEUE_EN
#define TOS_CFG_QUEUE_EN 0u
#endif
#ifndef TOS_CFG_SEM_EN
#define TOS_CFG_SEM_EN 0u
#endif
#if (TOS_CFG_QUEUE_EN > 0u) && !defined(TOS_CFG_MSG_EN)
#define TOS_CFG_MSG_EN 1u
#elif (TOS_CFG_QUEUE_EN == 0u) && !defined(TOS_CFG_MSG_EN)
#define TOS_CFG_MSG_EN 0u
#endif
#ifndef TOS_CFG_TIMER_EN
#define TOS_CFG_TIMER_EN 0u
#endif
#if (TOS_CFG_TIMER_EN > 0u) && !defined(TOS_CFG_TIMER_AS_PROC)
#define TOS_CFG_TIMER_AS_PROC 0u
#endif
#ifndef TOS_CFG_MSG_POOL_SIZE
#define TOS_CFG_MSG_POOL_SIZE 100u
#endif
#ifndef TOS_CFG_IDLE_TASK_STK_SIZE
#define TOS_CFG_IDLE_TASK_STK_SIZE 128u
#endif
#ifndef TOS_CFG_OBJECT_VERIFY_EN
#define TOS_CFG_OBJECT_VERIFY_EN 0u
#endif
#if (TOS_CFG_TIMER_AS_PROC == 0u) && !defined(TOS_CFG_TIMER_TASK_PRIO)
#define TOS_CFG_TIMER_TASK_PRIO (k_prio_t)(K_TASK_PRIO_IDLE - (k_prio_t)1u)
#endif
#if (TOS_CFG_TIMER_AS_PROC == 0u) && !defined(TOS_CFG_TIMER_TASK_STK_SIZE)
#define TOS_CFG_TIMER_TASK_STK_SIZE 128u
#endif
#ifndef TOS_CFG_CPU_SYSTICK_PRIO
#define TOS_CFG_CPU_SYSTICK_PRIO 0u
#endif
#ifndef TOS_CFG_CPU_TICK_PER_SECOND
#define TOS_CFG_CPU_TICK_PER_SECOND 1000u
#endif
#ifndef TOS_CFG_CPU_CLOCK
#define TOS_CFG_CPU_CLOCK 16000000u
#endif
#ifndef TOS_CFG_TASK_PRIO_MAX
#define TOS_CFG_TASK_PRIO_MAX 10u
#endif
#ifndef TOS_CFG_MMBLK_EN
#define TOS_CFG_MMBLK_EN 0u
#endif
#ifndef TOS_CFG_MMHEAP_EN
#define TOS_CFG_MMHEAP_EN 0u
#endif
#if (TOS_CFG_MMHEAP_EN > 0u) && !defined(TOS_CFG_MMHEAP_POOL_SIZE)
#define TOS_CFG_MMHEAP_POOL_SIZE 0x1000
#endif
#ifndef TOS_CFG_PWR_MGR_EN
#define TOS_CFG_PWR_MGR_EN 0u
#endif
#ifndef TOS_CFG_TICKLESS_EN
#define TOS_CFG_TICKLESS_EN 0u
#endif
#if (TOS_CFG_PWR_MGR_EN > 0u) || (TOS_CFG_TICKLESS_EN > 0u)
#if TOS_CFG_IDLE_TASK_STK_SIZE < 256
#undef TOS_CFG_IDLE_TASK_STK_SIZE
#define TOS_CFG_IDLE_TASK_STK_SIZE 256u
#endif
#endif
#ifndef TOS_CFG_FAULT_BACKTRACE_EN
#define TOS_CFG_FAULT_BACKTRACE_EN 0u
#endif
#endif /* _TOS_CONFIG_DEFAULT_H_ */

View File

@@ -0,0 +1,83 @@
#ifndef _TOS_ERR_H_
#define _TOS_ERR_H_
typedef enum k_err_en {
K_ERR_NONE = 0u,
K_ERR_DELAY_ZERO = 100u,
K_ERR_DELAY_FOREVER,
K_ERR_EVENT_PEND_OPT_INVALID = 200u,
K_ERR_FIFO_FULL = 300u,
K_ERR_FIFO_EMPTY,
K_ERR_IN_IRQ = 400u,
K_ERR_KNL_NOT_RUNNING = 500u,
K_ERR_KNL_RUNNING,
K_ERR_LOCK_NESTING_OVERFLOW = 600u,
K_ERR_MMBLK_POOL_FULL = 700u,
K_ERR_MMBLK_POOL_EMPTY,
K_ERR_MMBLK_INVALID_BLK_SIZE,
K_ERR_MMBLK_INVALID_POOL_ADDR,
K_ERR_MMHEAP_INVALID_POOL_ADDR = 800u,
K_ERR_MMHEAP_INVALID_POOL_SIZE,
K_ERR_MSG_QUEUE_FULL = 900u,
K_ERR_MSG_QUEUE_EMPTY,
K_ERR_MUTEX_NOT_OWNER = 1000u,
K_ERR_MUTEX_NESTING,
K_ERR_MUTEX_NESTING_OVERFLOW,
K_ERR_OBJ_PTR_NULL = 1100u,
K_ERR_OBJ_INVALID,
K_ERR_PM_DEVICE_ALREADY_REG = 1200u,
K_ERR_PM_DEVICE_OVERFLOW = 1300u,
K_ERR_PM_WKUP_SOURCE_NOT_INSTALL = 1400u,
K_ERR_QUEUE_EMPTY = 1500u,
K_ERR_QUEUE_FULL,
K_ERR_PEND_NOWAIT = 1600u,
K_ERR_PEND_SCHED_LOCKED,
K_ERR_PEND_ABNORMAL,
K_ERR_PEND_TIMEOUT,
K_ERR_PEND_DESTROY,
K_ERR_PEND_OWNER_DIE,
K_ERR_SCHED_LOCKED = 1700u,
K_ERR_SCHED_NOT_LOCKED,
K_ERR_SEM_OVERFLOW = 1800u,
K_ERR_TASK_DESTROY_IDLE = 1900u,
K_ERR_TASK_NOT_DELAY,
K_ERR_TASK_PRIO_INVALID,
K_ERR_TASK_RESUME_SELF,
K_ERR_TASK_SUSPENDED,
K_ERR_TASK_SUSPEND_IDLE,
K_ERR_TASK_STK_OVERFLOW,
K_ERR_TASK_STK_SIZE_INVALID,
K_ERR_TICKLESS_WKUP_ALARM_NOT_INSTALLED = 2000u,
K_ERR_TICKLESS_WKUP_ALARM_NO_INIT,
K_ERR_TICKLESS_WKUP_ALARM_INIT_FAILED,
K_ERR_TIMER_INACTIVE = 2100u,
K_ERR_TIMER_DELAY_FOREVER,
K_ERR_TIMER_PERIOD_FOREVER,
K_ERR_TIMER_INVALID_DELAY,
K_ERR_TIMER_INVALID_PERIOD,
K_ERR_TIMER_INVALID_STATE,
K_ERR_TIMER_INVALID_OPT,
K_ERR_TIMER_STOPPED,
} k_err_t;
#endif /* _TOS_ERR_H_ */

View File

@@ -0,0 +1,116 @@
#ifndef _TOS_EVENT_H_
#define _TOS_EVENT_H_
#if TOS_CFG_EVENT_EN > 0
// if we are pending an event, for any flag we expect is set is ok, this flag should be passed to tos_event_pend
#define TOS_OPT_EVENT_PEND_ANY (k_opt_t)0x0001
// if we are pending an event, must all the flag we expect is set is ok, this flag should be passed to tos_event_pend
#define TOS_OPT_EVENT_PEND_ALL (k_opt_t)0x0002
// if we are pending an event, and we wanna clear the event's flag after we read, this flag should be passed to tos_event_pend
/* ATTENTION:
we can pass both TOS_OPT_EVENT_PEND_CLR and TOS_OPT_EVENT_PEND_ANY, or TOS_OPT_EVENT_PEND_CLR and TOS_OPT_EVENT_PEND_ALL
to tos_event_pend, if we wanna do this, a (TOS_OPT_EVENT_PEND_CLR | TOS_OPT_EVENT_PEND_ANY) or
(TOS_OPT_EVENT_PEND_CLR | TOS_OPT_EVENT_PEND_ALL) should be passed.
but, (TOS_OPT_EVENT_PEND_ANY | TOS_OPT_EVENT_PEND_ALL) is invalid, we cannot both wanna any and all flag is set.
*/
#define TOS_OPT_EVENT_PEND_CLR (k_opt_t)0x0004
typedef enum opt_event_post_en {
OPT_EVENT_POST_KEP,
OPT_EVENT_POST_CLR,
} opt_event_post_t;
typedef struct k_event_st {
pend_obj_t pend_obj;
k_event_flag_t flag;
} k_event_t;
/**
* @brief Create an event.
* create an event.
*
* @attention None
*
* @param[in] event pointer to the handler of the event.
* @param[in] init_flag initial flag of the event.
*
* @return errcode
* @retval #K_ERR_NONE return successfully.
*/
__API__ k_err_t tos_event_create(k_event_t *event, k_event_flag_t init_flag);
/**
* @brief Destroy an event.
* destroy an event.
*
* @attention None
*
* @param[in] event pointer to the handler of the event.
*
* @return errcode
* @retval #K_ERR_NONE return successfully.
*/
__API__ k_err_t tos_event_destroy(k_event_t *event);
/**
* @brief Pend an event.
* pend an event.
*
* @attention if opt is TOS_OPT_EVENT_PEND_ANY, any of the flag_expect is set is ok;
* if opt is TOS_OPT_EVENT_PEND_ALL<4C><4C> must all the flag_expect is set is ok.
*
* @param[in] event pointer to the handler of the event.
* @param[in] flag_expect the flag we expect from the event.
* @param[OUT] flag_match if we get the flag we expect, what exactly they are?
* @param[in] timeout how much time(in k_tick_t) we would like to wait.
* @param[in] opt option for pend.
*
* @return errcode
* @retval #K_ERR_EVENT_PEND_OPT_INVALID opt is invalid
* @retval #K_ERR_PEND_NOWAIT we get nothing, and we don't wanna wait.
* @retval #K_ERR_PEND_SCHED_LOCKED we can wait, but scheduler is locked.
* @retval #K_ERR_PEND_TIMEOUT the time we wait is up, we get nothing.
* @retval #K_ERR_PEND_DESTROY the event we are pending is destroyed.
* @retval #K_ERR_NONE return successfully.
*/
__API__ k_err_t tos_event_pend(k_event_t *event, k_event_flag_t flag_expect, k_event_flag_t *flag_match, k_tick_t timeout, k_opt_t opt);
/**
* @brief Post an event.
* post an event.
*
* @attention if you are posting an event in tos_event_post, event's own flag will be overwrited by the flag we post.
* eg. if an event's own flag is 0x0001, and we are posting a flag 0x0030, after the post, the event's flag
* will be overwrited to 0x0030.
*
* @param[in] event pointer to the handler of the event.
* @param[in] flag the flag we post.
*
* @return errcode
* @retval #K_ERR_NONE return successfully.
*/
__API__ k_err_t tos_event_post(k_event_t *event, k_event_flag_t flag);
/**
* @brief Post an event.
* post an event, and keep the original own flag of the event.
*
* @attention the original own flag of the event will be keeped.
* eg.if an event's own flag is 0x0001, and we are posting a flag 0x0030, after the post, the event's flag
* will be changed to 0x0031(0x0030 | 0x0001), which means the event's original flag is keeped.
*
* @param[in] event pointer to the handler of the event.
* @param[in] flag the flag we post.
*
* @return errcode
* @retval #K_ERR_NONE return successfully.
*/
__API__ k_err_t tos_event_post_keep(k_event_t *event, k_event_flag_t flag);
#endif
#endif /* _TOS_EVENT_H_ */

View File

@@ -0,0 +1,146 @@
#ifndef _TOS_FIFO_H_
#define _TOS_FIFO_H_
typedef struct k_fifo_st {
#if TOS_CFG_OBJECT_VERIFY_EN > 0u
knl_obj_t knl_obj;
#endif
int beg;
int end;
size_t cnt;
uint8_t *buf;
size_t siz;
} k_fifo_t;
/**
* @brief Create a fifo.
* Create a fifo.
*
* @attention None
*
* @param[in] fifo pointer to the handler of the fifo.
* @param[in] buffer memory buffer provided to be as the inner buffer.
* @param[in] size size of the memory buffer.
*
* @return errno
* @retval #K_ERR_OBJ_PTR_NULL fifo is a NULL pointer.
* @retval #K_ERR_NONE return successfully.
*/
__API__ k_err_t tos_fifo_create(k_fifo_t *fifo, uint8_t *buffer, size_t size);
/**
* @brief Destroy a fifo.
* Destroy a fifo.
*
* @attention None
*
* @param[in] fifo pointer to the handler of the fifo.
*
* @return errno
* @retval #K_ERR_OBJ_PTR_NULL fifo is a NULL pointer.
* @retval #K_ERR_OBJ_INVALID not a valid pointer to a fifo.
* @retval #K_ERR_NONE return successfully.
*/
__API__ k_err_t tos_fifo_destroy(k_fifo_t *fifo);
/**
* @brief Push data into fifo.
* Push one single data into the fifo.
*
* @attention None
*
* @param[in] fifo pointer to the handler of the fifo.
* @param[in] data data to push into the fifo.
*
* @return errno
* @retval #K_ERR_FIFO_FULL the fifo is full.
* @retval #K_ERR_NONE return successfully.
*/
__API__ k_err_t tos_fifo_push(k_fifo_t *fifo, uint8_t data);
/**
* @brief Push stream into fifo.
* Push a stream data into the fifo.
*
* @attention None
*
* @param[in] fifo pointer to the handler of the fifo.
* @param[IN] stream stream to be pushed into the fifo.
* @param[OUT] size size of the stream.
*
* @return the actual number of the data pushed into the fifo.
*/
__API__ int tos_fifo_push_stream(k_fifo_t *fifo, uint8_t *stream, size_t size);
/**
* @brief Pop data from fifo.
* Pop one single data from the fifo.
*
* @attention None
*
* @param[in] fifo pointer to the handler of the fifo.
* @param[OUT] out one signle buffer to hold the data poped from the fifo.
*
* @return errno
* @retval #K_ERR_FIFO_EMPTY the fifo is empty.
* @retval #K_ERR_NONE return successfully.
*/
__API__ k_err_t tos_fifo_pop(k_fifo_t *fifo, uint8_t *out);
/**
* @brief Pop stream from fifo.
* Pop a stream data from the fifo.
*
* @attention None
*
* @param[in] fifo pointer to the handler of the fifo.
* @param[OUT] buffer pointer to the buffer to receive the stream poped.
* @param[OUT] size size of the buffer.
*
* @return the actual number of the data poped from the fifo.
*/
__API__ int tos_fifo_pop_stream(k_fifo_t *fifo, uint8_t *buffer, size_t size);
/**
* @brief Flush fifo.
* Flush/reset the fifo.
*
* @attention None
*
* @param[in] fifo pointer to the handler of the fifo.
*
* @return None.
*/
__API__ void tos_fifo_flush(k_fifo_t *fifo);
/**
* @brief Whether the fifo is empty.
* Whether the fifo is empty.
*
* @attention None
*
* @param[in] fifo pointer to the handler of the fifo.
*
* @return whether the fifo is emtpy.
* @retval #0 the fifo is not empty.
* @retval #Not 0 the fifo is empty.
*/
__API__ int tos_fifo_is_empty(k_fifo_t *fifo);
/**
* @brief Whether the fifo is full.
* Whether the fifo is full.
*
* @attention None
*
* @param[in] fifo pointer to the handler of the fifo.
*
* @return whether the fifo is full.
* @retval #0 the fifo is not full.
* @retval #Not 0 the fifo is full.
*/
__API__ int tos_fifo_is_full(k_fifo_t *fifo);
#endif // _TOS_FIFO_H_

View File

@@ -0,0 +1,85 @@
#ifndef _TOS_GLOBAL_H_
#define _TOS_GLOBAL_H_
/* interrupt nesting count */
extern k_nesting_t k_irq_nest_cnt;
/* schedule lock nesting count */
extern k_nesting_t k_sched_lock_nest_cnt;
/* kernel running state */
extern knl_state_t k_knl_state;
/* ready queue of tasks */
extern readyqueue_t k_rdyq;
/* ticks since boot up */
extern k_tick_t k_tick_count;
/* current task */
extern k_task_t *k_curr_task;
/* next task to run */
extern k_task_t *k_next_task;
/* idle task related stuff */
extern k_task_t k_idle_task;
extern k_stack_t k_idle_task_stk[];
extern k_stack_t *const k_idle_task_stk_addr;
extern size_t const k_idle_task_stk_size;
/* list to hold all the task delayed or pend for timeout */
extern k_list_t k_tick_list;
/* how many ticks will be triggered in a second */
extern k_tick_t k_cpu_tick_per_second;
/* how many cycle per tick */
extern k_cycle_t k_cpu_cycle_per_tick;
#if TOS_CFG_FAULT_BACKTRACE_EN > 0u
extern k_fault_log_writer_t k_fault_log_writer;
#endif
#if TOS_CFG_MMHEAP_EN > 0u
extern uint8_t k_mmheap_pool[] __ALIGNED__(4);
extern k_mmheap_ctl_t k_mmheap_ctl;
#endif
#if TOS_CFG_ROUND_ROBIN_EN > 0u
extern k_timeslice_t k_robin_default_timeslice;
extern k_robin_state_t k_robin_state;
#endif
#if TOS_CFG_TIMER_EN > 0u
/* list holding all the timer */
extern timer_ctl_t k_timer_ctl;
#if TOS_CFG_TIMER_AS_PROC == 0u
extern k_task_t k_timer_task;
extern k_stack_t k_timer_task_stk[];
extern k_prio_t const k_timer_task_prio;
extern k_stack_t *const k_timer_task_stk_addr;
extern size_t const k_timer_task_stk_size;
#endif
#endif
#if (TOS_CFG_MSG_EN > 0u)
extern k_list_t k_msg_freelist;
extern k_msg_t k_msg_pool[];
#endif
#if TOS_CFG_PWR_MGR_EN > 0u
extern pm_device_ctl_t k_pm_device_ctl;
extern idle_pwrmgr_mode_t k_idle_pwr_mgr_mode;
extern k_cpu_lpwr_mode_t k_cpu_lpwr_mode;
#endif
#if TOS_CFG_TICKLESS_EN > 0u
extern k_tickless_wkup_alarm_t *k_tickless_wkup_alarm[__LOW_POWER_MODE_DUMMY];
#endif
#endif /* _TOS_GLOBAL_H_ */

View File

@@ -0,0 +1,38 @@
#ifndef _TOS_KLIB_H_
#define _TOS_KLIB_H_
#include <stdarg.h>
#include <stdio.h>
#include <stdlib.h>
#include <stddef.h>
#include <string.h>
#define TOS_OFFSET_OF_FIELD(type, field) \
((uint32_t)&(((type *)0)->field))
#define TOS_CONTAINER_OF_FIELD(ptr, type, field) \
((type *)((uint8_t *)(ptr) - TOS_OFFSET_OF_FIELD(type, field)))
#define TOS_PTR_SANITY_CHECK(ptr) \
do { \
if (unlikely((ptr) == K_NULL)) { \
return K_ERR_OBJ_PTR_NULL; \
} \
} while(0)
#define TOS_IN_IRQ_CHECK() \
do { \
if (unlikely(knl_is_inirq())) { \
return K_ERR_IN_IRQ; \
} \
} while(0)
// currently we use default microlib supplied by mdk
#define tos_kprintf(...) printf(__VA_ARGS__);
#define tos_kprintln(...) \
printf(__VA_ARGS__); \
printf("\n");
#endif /* _TOS_KLIB_H_ */

View File

@@ -0,0 +1,30 @@
#ifndef _TOS_KTYPES_H_
#define _TOS_KTYPES_H_
#include <stdint.h>
typedef uint8_t k_prio_t;
typedef uint8_t k_stack_t;
typedef uint8_t k_task_state_t;
typedef uint8_t k_nesting_t;
typedef uint16_t k_opt_t;
typedef uint16_t k_sem_cnt_t;
typedef uint32_t k_event_flag_t;
typedef uint32_t k_time_t;
typedef uint32_t k_timeslice_t;
typedef uint32_t k_cycle_t;
typedef uint64_t k_tick_t;
#define K_TRUE (1u)
#define K_FALSE (0u)
#ifndef K_NULL
#define K_NULL 0
#endif
#endif /* _TOS_KTYPES_H_ */

View File

@@ -0,0 +1,103 @@
#ifndef _TOS_LIST_H_
#define _TOS_LIST_H_
typedef struct k_list_node_st {
struct k_list_node_st *next;
struct k_list_node_st *prev;
} k_list_t;
#define TOS_LIST_NODE(node) \
{ &(node), &(node) }
#define TOS_LIST_DEFINE(list) \
k_list_t list = { &(list), &(list) }
#define TOS_LIST_ENTRY(list, type, field) \
TOS_CONTAINER_OF_FIELD(list, type, field)
#define TOS_LIST_FIRST_ENTRY(list, type, field) \
TOS_LIST_ENTRY((list)->next, type, field)
#define TOS_LIST_FIRST_ENTRY_OR_NULL(list, type, field) \
(tos_list_empty(list) ? K_NULL : TOS_LIST_FIRST_ENTRY(list, type, field))
#define TOS_LIST_FOR_EACH(curr, list) \
for (curr = (list)->next; curr != (list); curr = curr->next)
#define TOS_LIST_FOR_EACH_PREV(curr, list) \
for (curr = (list)->prev; curr != (list); curr = curr->prev)
#define TOS_LIST_FOR_EACH_SAFE(curr, next, list) \
for (curr = (list)->next, next = curr->next; curr != (list); \
curr = next, next = curr->next)
#define TOS_LIST_FOR_EACH_PREV_SAFE(curr, next, list) \
for (curr = (list)->prev, next = curr->prev; \
curr != (list); \
curr = next, next = curr->prev)
__STATIC_INLINE__ void _list_add(k_list_t *node, k_list_t *prev, k_list_t *next)
{
next->prev = node;
node->next = next;
node->prev = prev;
prev->next = node;
}
__STATIC_INLINE__ void _list_del(k_list_t *prev, k_list_t *next)
{
next->prev = prev;
prev->next = next;
}
__STATIC_INLINE__ void _list_del_entry(k_list_t *entry)
{
_list_del(entry->prev, entry->next);
}
__API__ __STATIC_INLINE__ void tos_list_init(k_list_t *list)
{
list->next = list;
list->prev = list;
}
__API__ __STATIC_INLINE__ void tos_list_add(k_list_t *node, k_list_t *list)
{
_list_add(node, list, list->next);
}
__API__ __STATIC_INLINE__ void tos_list_add_tail(k_list_t *node, k_list_t *list)
{
_list_add(node, list->prev, list);
}
__API__ __STATIC_INLINE__ void tos_list_del(k_list_t *entry)
{
_list_del(entry->prev, entry->next);
}
__API__ __STATIC_INLINE__ void tos_list_del_init(k_list_t *entry)
{
_list_del_entry(entry);
tos_list_init(entry);
}
__API__ __STATIC_INLINE__ void tos_list_move(k_list_t *node, k_list_t *list)
{
_list_del_entry(node);
tos_list_add(node, list);
}
__API__ __STATIC_INLINE__ void tos_list_move_tail(k_list_t *node, k_list_t *list)
{
_list_del_entry(node);
tos_list_add_tail(node, list);
}
__API__ __STATIC_INLINE__ int tos_list_empty(const k_list_t *list)
{
return list->next == list;
}
#endif /* _TOS_LIST_H_ */

View File

@@ -0,0 +1,90 @@
#ifndef _TOS_MMBLK_H_
#define _TOS_MMBLK_H_
#if TOS_CFG_MMBLK_EN > 0u
#define K_MMBLK_NEXT_BLK(blk_curr, blk_size) ((void *)((cpu_addr_t)blk_curr + blk_size))
#define K_MMBLK_ALIGN_MASK (sizeof(void *) - 1u)
typedef struct k_mmblk_pool_st {
#if TOS_CFG_OBJECT_VERIFY_EN > 0u
knl_obj_t knl_obj;
#endif
void *pool_start;
void *free_list;
size_t blk_size;
size_t blk_max;
size_t blk_free;
} k_mmblk_pool_t;
/**
* @brief Create a memory manage block pool.
* Create a memory manage block pool.
*
* @attention None
*
* @param[in] mbp pointer to the memory block pool handler.
* @param[in] pool_start start address of the pool.
* @param[in] blk_num number of the blocks in the pool.
* @param[in] blk_size size of each block in the pool.
*
* @return errcode
* @retval #K_ERR_MMBLK_INVALID_POOL_ADDR start address of the pool is invalid.
* @retval #K_ERR_MMBLK_INVALID_BLK_SIZE size of the block is invalid.
* @retval #K_ERR_NONE return successfully.
*/
__API__ k_err_t tos_mmblk_pool_create(k_mmblk_pool_t *mbp, void *pool_start, size_t blk_num, size_t blk_size);
/**
* @brief Destroy a memory manage block pool.
* Destroy a memory manage block pool.
*
* @attention None
*
* @param[in] mbp pointer to the memory block pool handler.
*
* @return errcode
* @retval #K_ERR_OBJ_INVALID mbp is not a valid memory block pool handler.
* @retval #K_ERR_NONE return successfully.
*/
__API__ k_err_t tos_mmblk_pool_destroy(k_mmblk_pool_t *mbp);
/**
* @brief Create a memory manage block pool.
* Create a memory manage block pool.
*
* @attention None
*
* @param[in] mbp pointer to the memory block pool handler.
* @param[in] pool_start start address of the pool.
* @param[in] blk_num number of the blocks in the pool.
* @param[in] blk_size size of each block in the pool.
*
* @return errcode
* @retval #K_ERR_MMBLK_POOL_EMPTY the pool is empty.
* @retval #K_ERR_NONE return successfully.
*/
__API__ k_err_t tos_mmblk_alloc(k_mmblk_pool_t *mbp, void **blk);
/**
* @brief Create a memory manage block pool.
* Create a memory manage block pool.
*
* @attention None
*
* @param[in] mbp pointer to the memory block pool handler.
* @param[in] pool_start start address of the pool.
* @param[in] blk_num number of the blocks in the pool.
* @param[in] blk_size size of each block in the pool.
*
* @return errcode
* @retval #K_ERR_MMBLK_POOL_FULL the pool is full.
* @retval #K_ERR_NONE return successfully.
*/
__API__ k_err_t tos_mmblk_free(k_mmblk_pool_t *mbp, void *blk);
#endif
#endif /* _TOS_MMBLK_H_ */

View File

@@ -0,0 +1,207 @@
/*
** Two Level Segregated Fit memory allocator, version 3.1.
** Written by Matthew Conte
** http://tlsf.baisoku.org
**
** Based on the original documentation by Miguel Masmano:
** http://www.gii.upv.es/tlsf/main/docs
**
** This implementation was written to the specification
** of the document, therefore no GPL restrictions apply.
**
** Copyright (c) 2006-2016, Matthew Conte
** All rights reserved.
**
** Redistribution and use in source and binary forms, with or without
** modification, are permitted provided that the following conditions are met:
** * Redistributions of source code must retain the above copyright
** notice, this list of conditions and the following disclaimer.
** * Redistributions in binary form must reproduce the above copyright
** notice, this list of conditions and the following disclaimer in the
** documentation and/or other materials provided with the distribution.
** * Neither the name of the copyright holder nor the
** names of its contributors may be used to endorse or promote products
** derived from this software without specific prior written permission.
**
** THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
** ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
** WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
** DISCLAIMED. IN NO EVENT SHALL MATTHEW CONTE BE LIABLE FOR ANY
** DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
** (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
** LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
** ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
** (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
** SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#ifndef _TOS_MMHEAP_H_
#define _TOS_MMHEAP_H_
#if TOS_CFG_MMHEAP_EN > 0u
/**
* log2 of number of linear subdivisions of block sizes. Larger
* values require more memory in the control structure. Values of
* 4 or 5 are typical.
*/
#define K_MMHEAP_SL_INDEX_COUNT_LOG2 5
/* All allocation sizes and addresses are aligned to 4 bytes. */
#define K_MMHEAP_ALIGN_SIZE_LOG2 2
#define K_MMHEAP_ALIGN_SIZE (1 << K_MMHEAP_ALIGN_SIZE_LOG2)
/*
* We support allocations of sizes up to (1 << K_MMHEAP_FL_INDEX_MAX) bits.
* However, because we linearly subdivide the second-level lists, and
* our minimum size granularity is 4 bytes, it doesn't make sense to
* create first-level lists for sizes smaller than K_MMHEAP_SL_INDEX_COUNT * 4,
* or (1 << (K_MMHEAP_SL_INDEX_COUNT_LOG2 + 2)) bytes, as there we will be
* trying to split size ranges into more slots than we have available.
* Instead, we calculate the minimum threshold size, and place all
* blocks below that size into the 0th first-level list.
*/
#define K_MMHEAP_FL_INDEX_MAX 30
#define K_MMHEAP_SL_INDEX_COUNT (1 << K_MMHEAP_SL_INDEX_COUNT_LOG2)
#define K_MMHEAP_FL_INDEX_SHIFT (K_MMHEAP_SL_INDEX_COUNT_LOG2 + K_MMHEAP_ALIGN_SIZE_LOG2)
#define K_MMHEAP_FL_INDEX_COUNT (K_MMHEAP_FL_INDEX_MAX - K_MMHEAP_FL_INDEX_SHIFT + 1)
#define K_MMHEAP_SMALL_BLOCK_SIZE (1 << K_MMHEAP_FL_INDEX_SHIFT)
#define K_MMHEAP_BLOCK_CURR_FREE (1 << 0)
#define K_MMHEAP_BLOCK_PREV_FREE (1 << 1)
#define K_MMHEAP_BLOCK_SIZE_MASK ~(K_MMHEAP_BLOCK_CURR_FREE | K_MMHEAP_BLOCK_PREV_FREE)
#define K_MMHEAP_BLOCK_STATE_MASK (K_MMHEAP_BLOCK_CURR_FREE | K_MMHEAP_BLOCK_PREV_FREE)
/**
* Block structure.
*
* There are several implementation subtleties involved:
* - The prev_phys_block field is only valid if the previous block is free.
* - The prev_phys_block field is actually stored at the end of the
* previous block. It appears at the beginning of this structure only to
* simplify the implementation.
* - The next_free / prev_free fields are only valid if the block is free.
*/
typedef struct mmheap_blk_st {
struct mmheap_blk_st *prev_phys_blk;
size_t size;
struct mmheap_blk_st *next_free;
struct mmheap_blk_st *prev_free;
} mmheap_blk_t;
/**
* A free block must be large enough to store its header minus the size of
* the prev_phys_block field, and no larger than the number of addressable
* bits for FL_INDEX.
*/
#define K_MMHEAP_BLK_SIZE_MIN (sizeof(mmheap_blk_t) - sizeof(mmheap_blk_t *))
#define K_MMHEAP_BLK_SIZE_MAX (1 << K_MMHEAP_FL_INDEX_MAX)
#define K_MMHEAP_BLK_HEADER_OVERHEAD (sizeof(size_t))
#define K_MMHEAP_BLK_START_OFFSET (TOS_OFFSET_OF_FIELD(mmheap_blk_t, size) + sizeof(size_t))
/**
* memory heap control
*/
typedef struct k_mmheap_control_st {
mmheap_blk_t block_null; /**< Empty lists point at this block to indicate they are free. */
uint32_t fl_bitmap; /**< Bitmaps for free lists. */
uint32_t sl_bitmap[K_MMHEAP_FL_INDEX_COUNT];
mmheap_blk_t *blocks[K_MMHEAP_FL_INDEX_COUNT][K_MMHEAP_SL_INDEX_COUNT]; /**< Head of free lists. */
} k_mmheap_ctl_t;
/**
* @brief Add a pool.
* Add addtional pool to the heap.
*
* @attention None
*
* @param[in] pool_start start address of the pool.
* @param[in] pool_size size of the pool.
*
* @return errcode
* @retval #K_ERR_MMHEAP_INVALID_POOL_ADDR start address of the pool is invalid.
* @retval #K_ERR_MMHEAP_INVALID_POOL_SIZE size of the pool is invalid.
* @retval #K_ERR_NONE return successfully.
*/
__API__ k_err_t tos_mmheap_pool_add(void *pool_start, size_t pool_size);
/**
* @brief Remove a pool.
* Remove a pool from the heap.
*
* @attention None
*
* @param[in] pool_start start address of the pool.
*
* @return None
*/
__API__ void tos_mmheap_pool_rmv(void *pool_start);
/**
* @brief Alloc memory.
* Allocate size bytes and returns a pointer to the allocated memory.
*
* @attention size should no bigger than K_MMHEAP_BLK_SIZE_MAX.
*
* @param[in] size size of the memory.
*
* @return the pointer to the allocated memory.
*/
__API__ void *tos_mmheap_alloc(size_t size);
__API__ void *tos_mmheap_calloc(size_t num, size_t size);
/**
* @brief Alloc start address aligned memory from the heap.
* Alloc aligned address and specified size memory from the heap.
*
* @attention
*
* @param[in] size size of the memory.
* @param[in] align address align mask of the memory.
*
* @return the pointer to the allocated memory.
*/
__API__ void *tos_mmheap_aligned_alloc(size_t size, size_t align);
/**
* @brief Realloc memory from the heap.
* Change the size of the memory block pointed to by ptr to size bytes.
*
* @attention
* <ul>
* <li> if ptr is K_NULL, then the call is equivalent to tos_mmheap_alloc(size), for all values of size.
* <li> if ptr is if size is equal to zero, and ptr is not K_NULL, then the call is equivalent to tos_mmheap_free(ptr).
* </ul>
*
* @param[in] ptr old pointer to the memory space.
* @param[in] size new size of the memory space.
*
* @return the new pointer to the allocated memory.
*/
__API__ void *tos_mmheap_realloc(void *ptr, size_t size);
/**
* @brief Free the memory.
* Free the memory space pointed to by ptr, which must have been returned by a previous call to tos_mmheap_alloc(), tos_mmheap_aligned_alloc(), or tos_mmheap_realloc().
*
* @attention
*
* @param[in] ptr pointer to the memory.
*
* @return None.
*/
__API__ void tos_mmheap_free(void *ptr);
__KERNEL__ k_err_t mmheap_init(void *pool_start, size_t pool_size);
#endif
#endif /* _TOS_MMHEAP_H_ */

View File

@@ -0,0 +1,97 @@
#ifndef _TOS_MSG_H_
#define _TOS_MSG_H_
#define TOS_OPT_MSG_PUT_LIFO (k_opt_t)0x0001
#define TOS_OPT_MSG_PUT_FIFO (k_opt_t)0x0002
typedef struct k_message_st {
k_list_t list;
void *msg_addr;
size_t msg_size;
} k_msg_t;
typedef struct k_msg_queue_st {
#if TOS_CFG_OBJECT_VERIFY_EN > 0u
knl_obj_t knl_obj;
#endif
k_list_t queue_head;
} k_msg_queue_t;
/**
* @brief Create a message queue.
* Initialize a message queue.
*
* @attention None
*
* @param[IN] msg_queue the pointer to the handler of the message queue.
*
* @return errcode.
* @retval #K_ERR_OBJ_PTR_NULL msg_queue is a null pointer
* @retval #K_ERR_NONE return successfully
*/
__API__ k_err_t tos_msg_queue_create(k_msg_queue_t *msg_queue);
/**
* @brief Destroy a message queue.
*
* @attention None
*
* @param[IN] msg_queue the pointer to the handler of the message queue.
*
* @return errcode.
* @retval #K_ERR_OBJ_PTR_NULL msg_queue is a null pointer
* @retval #K_ERR_OBJ_INVALID msg_queue is not a valid pointer to a message queue
* @retval #K_ERR_NONE return successfully
*/
__API__ k_err_t tos_msg_queue_destroy(k_msg_queue_t *msg_queue);
/**
* @brief Get a message.
* Get a message from the queue.
*
* @attention None
*
* @param[IN] msg_queue the pointer to the handler of the message queue.
* @param[OUT] msg_body the pointer to the body of the message.
* @param[OUT] msg_size the pointer to the size of the message.
*
* @return errcode.
* @retval #K_ERR_QUEUE_EMPTY the queue is empty.
* @retval #K_ERR_NONE return successfully.
*/
__API__ k_err_t tos_msg_queue_get(k_msg_queue_t *msg_queue, void **msg_addr, size_t *msg_size);
/**
* @brief Put a message.
* Put a message to the queue.
*
* @attention None
*
* @param[IN] msg_queue the pointer to the handler of the message queue.
* @param[IN] msg_body the pointer to the body of the message.
* @param[IN] msg_size the pointer to the size of the message.
* @param[IN] opt option for the function call.
*
* @return errcode.
* @retval #K_ERR_QUEUE_FULL the queue is full.
* @retval #K_ERR_NONE return successfully.
*/
__API__ k_err_t tos_msg_queue_put(k_msg_queue_t *msg_queue, void *msg_addr, size_t msg_size, k_opt_t opt);
/**
* @brief Flush all of the messages.
* Flush all of the messages in the queue.
*
* @attention None
*
* @param[IN] msg_queue the pointer to the handler of the message queue.
*
* @return None.
*/
__API__ void tos_msg_queue_flush(k_msg_queue_t *msg_queue);
__KERNEL__ void msgpool_init(void);
#endif /* _TOS_MSG_H_ */

View File

@@ -0,0 +1,97 @@
#ifndef _TOS_MUTEX_H_
#define _TOS_MUTEX_H_
#if TOS_CFG_MUTEX_EN > 0u
typedef struct k_mutex_st {
pend_obj_t pend_obj;
k_nesting_t pend_nesting;
k_task_t *owner;
k_prio_t owner_orig_prio;
k_list_t owner_list;
} k_mutex_t;
/**
* @brief Create a mutex.
* create a mutex.
*
* @attention None
*
* @param[in] mutex pointer to the handler of the mutex.
*
* @return errcode
* @retval #K_ERR_NONE return successfully.
*/
__API__ k_err_t tos_mutex_create(k_mutex_t *mutex);
/**
* @brief Destroy a mutex.
* destroy a mutex.
*
* @attention None
*
* @param[in] mutex pointer to the handler of the mutex.
*
* @return errcode
* @retval #K_ERR_NONE return successfully.
*/
__API__ k_err_t tos_mutex_destroy(k_mutex_t *mutex);
/**
* @brief Pend a mutex.
* pend a mutex.
*
* @attention The task will keep blocked until the mutex is obtained or a timeout comes.
*
* @param[in] mutex pointer to the handler of the mutex.
* @param[in] timeout how much time(in k_tick_t) we would like to wait.
*
* @return errcode
* @retval #K_ERR_MUTEX_NESTING_OVERFLOW we are the owner of the mutex, and we are nesting pend too much on this mutex.
* @retval #K_ERR_MUTEX_NESTING we are the owner of the mutex, and we are nesting pend on it.
* @retval #K_ERR_PEND_NOWAIT we get nothing, and we don't wanna wait.
* @retval #K_ERR_PEND_SCHED_LOCKED we can wait, but scheduler is locked.
* @retval #K_ERR_PEND_TIMEOUT the time we wait is up, we get nothing.
* @retval #K_ERR_PEND_DESTROY the mutex we are pending is destroyed.
* @retval #K_ERR_NONE return successfully.
*/
__API__ k_err_t tos_mutex_pend_timed(k_mutex_t *mutex, k_tick_t timeout);
/**
* @brief Pend a mutex.
* pend a mutex.
*
* @attention The task will keep blocked until the mutex is obtained.
*
* @param[in] mutex pointer to the handler of the mutex.
*
* @return errcode
* @retval #K_ERR_MUTEX_NESTING_OVERFLOW we are the owner of the mutex, and we are nesting pend too much on this mutex.
* @retval #K_ERR_MUTEX_NESTING we are the owner of the mutex, and we are nesting pend on it.
* @retval #K_ERR_PEND_SCHED_LOCKED we can wait, but scheduler is locked.
* @retval #K_ERR_PEND_DESTROY the mutex we are pending is destroyed.
* @retval #K_ERR_NONE return successfully.
*/
__API__ k_err_t tos_mutex_pend(k_mutex_t *mutex);
/**
* @brief Post a mutex.
* post a mutex.
*
* @attention None
*
* @param[in] mutex pointer to the handler of the mutex.
*
* @return errcode
* @retval #K_ERR_MUTEX_NOT_OWNER we are posting a mutex of which the owner is not us.
* @retval #K_ERR_MUTEX_NESTING we are posting a mutex owned by us, and we are still in a nesting.
* @retval #K_ERR_NONE return successfully.
*/
__API__ k_err_t tos_mutex_post(k_mutex_t *mutex);
__KERNEL__ void mutex_release(k_mutex_t *mutex);
#endif
#endif /* _TOS_MUTEX_H_ */

View File

@@ -0,0 +1,67 @@
#ifndef _TOS_PEND_H_
#define _TOS_PEND_H_
typedef struct k_task_st k_task_t;
/**
* The reason why we wakeup from a pend.
* when we wakeup, we need to know why.
*/
typedef enum pend_state_en {
PEND_STATE_NONE, /**< nothing. */
PEND_STATE_POST, /**< someone has post, we get what we want. */
PEND_STATE_TIMEOUT, /**< a post has never came until time is out. */
PEND_STATE_DESTROY, /**< someone has destroyed what we are pending for. */
PEND_STATE_OWNER_DIE, /**< the pend object owner task is destroyed. */
} pend_state_t;
// what we are pending
/* actually, it's some kind of magic number, mainly for identifing whether the pend
is initialized, or whether user pass the correct parameter.
*/
typedef enum pend_type_en {
PEND_TYPE_NONE = 0x0000,
PEND_TYPE_SEM = 0x1BEE,
PEND_TYPE_MUTEX = 0x2BEE,
PEND_TYPE_EVENT = 0x3BEE,
PEND_TYPE_QUEUE = 0x4BEE,
} pend_type_t;
typedef enum opt_post_en {
OPT_POST_ONE,
OPT_POST_ALL,
} opt_post_t;
typedef struct pend_object_st {
pend_type_t type;
k_list_t list;
} pend_obj_t;
__KERNEL__ void pend_object_init(pend_obj_t *object, pend_type_t type);
__KERNEL__ void pend_object_deinit(pend_obj_t *object);
__KERNEL__ int pend_object_verify(pend_obj_t *object, pend_type_t type);
__KERNEL__ int pend_is_nopending(pend_obj_t *object);
__KERNEL__ k_prio_t pend_highest_prio_get(pend_obj_t *object);
__KERNEL__ void pend_list_remove(k_task_t *task);
__KERNEL__ void pend_list_adjust(k_task_t *task);
__KERNEL__ k_err_t pend_state2errno(pend_state_t state);
__KERNEL__ void pend_task_wakeup(k_task_t *task, pend_state_t state);
__KERNEL__ void pend_task_block(k_task_t *task, pend_obj_t *object, k_tick_t timeout);
__KERNEL__ void pend_wakeup_one(pend_obj_t *object, pend_state_t state);
__KERNEL__ void pend_wakeup_all(pend_obj_t *object, pend_state_t state);
__KERNEL__ void pend_wakeup(pend_obj_t *object, pend_state_t state, opt_post_t opt);
#endif /* _TOS_PEND_H_ */

View File

@@ -0,0 +1,102 @@
#ifndef _TOS_QUEUE_H_
#define _TOS_QUEUE_H_
#if TOS_CFG_QUEUE_EN > 0u
typedef struct k_queue_st {
pend_obj_t pend_obj;
k_msg_queue_t msg_queue;
} k_queue_t;
/**
* @brief Create a queue.
* create a queue.
*
* @attention None
*
* @param[in] queue pointer to the handler of the queue.
*
* @return errcode
* @retval #K_ERR_NONE return successfully.
*/
__API__ k_err_t tos_queue_create(k_queue_t *queue);
/**
* @brief Destroy a queue.
* destroy a queue.
*
* @attention None
*
* @param[in] queue pointer to the handler of the queue.
*
* @return errcode
* @retval #K_ERR_NONE return successfully.
*/
__API__ k_err_t tos_queue_destroy(k_queue_t *queue);
/**
* @brief Flush a queue.
* flush a queue, clear all the msg in the queue.
*
* @attention None
*
* @param[in] queue pointer to the handler of the queue.
*
* @return errcode
* @retval #K_ERR_NONE return successfully.
*/
__API__ k_err_t tos_queue_flush(k_queue_t *queue);
/**
* @brief Pend a queue.
* pend a queue.
*
* @attention we DONNOT perform a memcpy when msg_addr returned, we just let the *msg_addr point to an inner memory block.
* that means you DONNOT need to alloc memory for msg_addr, msg_addr can just be a pointer.
*
* @param[in] queue pointer to the handler of the queue.
* @param[OUT] msg_addr a pointer point to the message we wanna recive.
* @param[OUT] msg_size pointer to the message size returned.
* @param[in] timeout how much time(in k_tick_t) we would like to wait.
*
* @return errcode
* @retval #K_ERR_PEND_NOWAIT we get nothing, and we don't wanna wait.
* @retval #K_ERR_PEND_SCHED_LOCKED we can wait, but scheduler is locked.
* @retval #K_ERR_PEND_TIMEOUT the time we wait is up, we get nothing.
* @retval #K_ERR_PEND_DESTROY the queue we are pending is destroyed.
* @retval #K_ERR_NONE return successfully.
*/
__API__ k_err_t tos_queue_pend(k_queue_t *queue, void **msg_addr, size_t *msg_size, k_tick_t timeout);
/**
* @brief Post a queue.
* post a queue and wakeup one pending task.
*
* @attention when tos_queue_post return successfully, only one task who are waitting for the queue will be woken up.
*
* @param[in] queue pointer to the handler of the queue.
*
* @return errcode
* @retval #K_ERR_QUEUE_FULL the message pool is full.
* @retval #K_ERR_NONE return successfully.
*/
__API__ k_err_t tos_queue_post(k_queue_t *queue, void *msg_addr, size_t msg_size);
/**
* @brief Post a queue.
* post a queue and wakeup all the pending task.
*
* @attention when tos_queue_post_all return successfully, all of the tasks who are waitting for the queue will be woken up.
*
* @param[in] queue pointer to the handler of the queue.
*
* @return errcode
* @retval #K_ERR_QUEUE_FULL the message pool is full.
* @retval #K_ERR_NONE return successfully.
*/
__API__ k_err_t tos_queue_post_all(k_queue_t *queue, void *msg_addr, size_t msg_size);
#endif
#endif /* _TOS_QUEUE_H_ */

View File

@@ -0,0 +1,42 @@
#ifndef _TOS_ROBIN_H_
#define _TOS_ROBIN_H_
#if TOS_CFG_ROUND_ROBIN_EN > 0u
typedef enum k_robin_state_en {
TOS_ROBIN_STATE_ENABLED,
TOS_ROBIN_STATE_DISABLED,
} k_robin_state_t;
/**
* @brief Set time slice.
* Set time slice of a task.
*
* @attention None
*
* @param[in] task pointer to the handler of the task.
* @param[in] timeslice time slice of the task
*
* @return None
*/
__API__ void tos_robin_timeslice_set(k_task_t *task, k_timeslice_t timeslice);
/**
* @brief Configure round robin.
* Set the round robin state and the default time slice of the task.
*
* @attention None
*
* @param[in] robin_state state of the round robin.
* @param[in] default_timeslice default time slice of the task.
*
* @return None
*/
__API__ void tos_robin_config(k_robin_state_t robin_state, k_timeslice_t default_timeslice);
__KERNEL__ void robin_sched(k_prio_t prio);
#endif
#endif /* _TOS_ROBIN_H_ */

View File

@@ -0,0 +1,35 @@
#ifndef _TOS_SCHED_H_
#define _TOS_SCHED_H_
#define K_PRIO_TBL_SIZE ((TOS_CFG_TASK_PRIO_MAX + 31) / 32)
#define K_PRIO_TBL_SLOT_SIZE (32u)
#define K_PRIO_NDX(prio) ((prio) >> 5u) /* prio / 32u */
#define K_PRIO_BIT(prio) (1u << (K_PRIO_TBL_SLOT_SIZE - 1u - ((prio) & (K_PRIO_TBL_SLOT_SIZE - 1u))))
typedef struct readyqueue_st {
k_list_t task_list_head[TOS_CFG_TASK_PRIO_MAX];
uint32_t prio_mask[K_PRIO_TBL_SIZE];
k_prio_t highest_prio;
} readyqueue_t;
__KERNEL__ void readyqueue_init(void);
__KERNEL__ int readyqueue_is_prio_onlyone(k_prio_t prio);
__KERNEL__ k_task_t *readyqueue_first_task_get(k_prio_t prio);
__KERNEL__ k_task_t *readyqueue_highest_ready_task_get(void);
__KERNEL__ void readyqueue_add_head(k_task_t *task);
__KERNEL__ void readyqueue_add_tail(k_task_t *task);
__KERNEL__ void readyqueue_add(k_task_t *task);
__KERNEL__ void readyqueue_remove(k_task_t *task);
__KERNEL__ void readyqueue_move_head_to_tail(k_prio_t prio);
#endif /* _TOS_SCHED_H_ */

View File

@@ -0,0 +1,86 @@
#ifndef _TOS_SEM_H_
#define _TOS_SEM_H_
#if TOS_CFG_SEM_EN > 0u
typedef struct k_sem_st {
pend_obj_t pend_obj;
k_sem_cnt_t count;
} k_sem_t;
/**
* @brief Create a semaphore.
* create a semaphore.
*
* @attention None
*
* @param[in] sem pointer to the handler of the semaphore.
*
* @return errcode
* @retval #K_ERR_NONE return successfully.
*/
__API__ k_err_t tos_sem_create(k_sem_t *sem, k_sem_cnt_t init_count);
/**
* @brief Destroy a semaphore.
* destroy a semaphore.
*
* @attention None
*
* @param[in] semaphore pointer to the handler of the semaphore.
*
* @return errcode
* @retval #K_ERR_NONE return successfully.
*/
__API__ k_err_t tos_sem_destroy(k_sem_t *sem);
/**
* @brief Pend a semaphore.
* pend a semaphore.
*
* @attention None
*
* @param[in] sem pointer to the handler of the semaphore.
* @param[in] timeout how much time(in k_tick_t) we would like to wait.
*
* @return errcode
* @retval #K_ERR_PEND_NOWAIT we get nothing, and we don't wanna wait.
* @retval #K_ERR_PEND_SCHED_LOCKED we can wait, but scheduler is locked.
* @retval #K_ERR_PEND_TIMEOUT the time we wait is up, we get nothing.
* @retval #K_ERR_PEND_DESTROY the semaphore we are pending is destroyed.
* @retval #K_ERR_NONE return successfully.
*/
__API__ k_err_t tos_sem_pend(k_sem_t *sem, k_tick_t timeout);
/**
* @brief Post a semaphore.
* post a semaphore and wakeup one pending task.
*
* @attention when tos_sem_post return successfully, only one task who are waitting for the semaphore will be woken up.
*
* @param[in] sem pointer to the handler of the semaphore.
*
* @return errcode
* @retval #K_ERR_SEM_OVERFLOW we are nesting post a semaphore too much.
* @retval #K_ERR_NONE return successfully.
*/
__API__ k_err_t tos_sem_post(k_sem_t *sem);
/**
* @brief Post a semaphore.
* post a semaphore and wakeup all the pending task.
*
* @attention when tos_sem_post_all return successfully, all of the tasks who are waitting for the semaphore will be woken up.
*
* @param[in] sem pointer to the handler of the semaphore.
*
* @return errcode
* @retval #K_ERR_SEM_OVERFLOW we are nesting post a semaphore too much.
* @retval #K_ERR_NONE return successfully.
*/
__API__ k_err_t tos_sem_post_all(k_sem_t *sem);
#endif
#endif /* _TOS_SEM_H_ */

View File

@@ -0,0 +1,143 @@
#ifndef _TOS_SYS_H_
#define _TOS_SYS_H_
#define K_NESTING_LIMIT_IRQ (k_nesting_t)250u
#define K_NESTING_LIMIT_SCHED_LOCK (k_nesting_t)250u
typedef enum knl_state_en {
KNL_STATE_STOPPED,
KNL_STATE_RUNNING,
} knl_state_t;
#if TOS_CFG_OBJECT_VERIFY_EN > 0u
// some kind of magic number, mainly for identifing whether the object is initialized, or whether user pass the correct parameter.
typedef enum knl_obj_type_en {
KNL_OBJ_TYPE_NONE = 0x0000,
KNL_OBJ_TYPE_TASK = 0xDAD1,
KNL_OBJ_TYPE_TIMER = 0xDAD2,
KNL_OBJ_TYPE_MSG_QUEUE = 0xDAD4,
KNL_OBJ_TYPE_MMBLK_POOL = 0xDAD8,
KNL_OBJ_TYPE_FIFO = 0xDAE1,
} knl_obj_type_t;
typedef struct knl_object_st {
knl_obj_type_t type;
} knl_obj_t;
#endif
/**
* @brief Initialize the kernel.
* initialize the tos tiny kernel.
*
* @attention None
*
* @param None
*
* @return errcode
* @retval Non-#K_ERR_NONE return failed.
* @retval #K_ERR_NONE return successfully.
*/
__API__ k_err_t tos_knl_init(void);
/**
* @brief Start the kernel.
* get the kernel start to run, which means start the multitask scheduling.
*
* @attention None
*
* @param None
*
* @return errcode
* @retval #K_ERR_KNL_RUNNING the kernel is already running.
* @retval #K_ERR_NONE return successfully.
*/
__API__ k_err_t tos_knl_start(void);
/**
* @brief Get the kernel state.
* whether the kernel is running.
*
* @attention None
*
* @param None
*
* @return whether the kernel is running.
* @retval Non-0 the kernel is running.
* @retval 0 the kernel is not running.
*/
__API__ int tos_knl_is_running(void);
/**
* @brief Kernel enter the interrupt.
* this function should be called in the entrance of a interrupt handler.
*
* @attention None
*
* @param None
*
* @return None
*/
__API__ void tos_knl_irq_enter(void);
/**
* @brief Kernel exit the interrupt.
* this function should be called in the exit of a interrupt handler.
*
* @attention None
*
* @param None
*
* @return None
*/
__API__ void tos_knl_irq_leave(void);
/**
* @brief Lock the scheduler.
* prevent the kernel from performing task schedule.
*
* @attention None
*
* @param None
*
* @return errcode
* @retval K_ERR_KNL_NOT_RUNNING the kernel is not running.
* @retval K_ERR_LOCK_NESTING_OVERFLOW the schedule lock nesting is overflow.
* @retval K_ERR_NONE return successfully.
*/
__API__ k_err_t tos_knl_sched_lock(void);
/**
* @brief Unlock the scheduler.
* re-enable the task schedule.
*
* @attention None
*
* @param None
*
* @return errcode
* @retval K_ERR_KNL_NOT_RUNNING the kernel is not running.
* @retval K_ERR_SCHED_NOT_LOCKED the scheduler is not locked.
* @retval K_ERR_NONE return successfully.
*/
__API__ k_err_t tos_knl_sched_unlock(void);
#if TOS_CFG_TICKLESS_EN > 0u
__KERNEL__ k_tick_t knl_next_expires_get(void);
#endif
__KERNEL__ void knl_sched(void);
#if TOS_CFG_OBJECT_VERIFY_EN > 0u
__KERNEL__ int knl_object_verify(knl_obj_t *object, knl_obj_type_t type);
__KERNEL__ int knl_object_init(knl_obj_t *object, knl_obj_type_t type);
__KERNEL__ int knl_object_deinit(knl_obj_t *object);
#endif
__KERNEL__ int knl_is_sched_locked(void);
__KERNEL__ int knl_is_inirq(void);
__KERNEL__ int knl_is_idle(k_task_t *task);
__KERNEL__ int knl_is_self(k_task_t *task);
__KERNEL__ k_err_t knl_idle_init(void);
#endif /* _TOS_SYS_H_ */

View File

@@ -0,0 +1,311 @@
#ifndef _TOS_TASK_H_
#define _TOS_TASK_H_
// task state is just a flag, indicating which manager list we are in.
// ready to schedule
// a task's pend_list is in readyqueue
#define K_TASK_STATE_READY (k_task_state_t)0x0000
// delayed, or pend for a timeout
// a task's tick_list is in k_tick_list
#define K_TASK_STATE_SLEEP (k_task_state_t)0x0001
// pend for something
// a task's pend_list is in some pend object's list
#define K_TASK_STATE_PEND (k_task_state_t)0x0002
// suspended
#define K_TASK_STATE_SUSPENDED (k_task_state_t)0x0004
// deleted
#define K_TASK_STATE_DELETED (k_task_state_t)0x0008
// actually we don't really need those TASK_STATE below, if you understand the task state deeply, the code can be much more elegant.
// we are pending, also we are waitting for a timeout(eg. tos_sem_pend with a valid timeout, not TOS_TIME_FOREVER)
// both a task's tick_list and pend_list is not empty
#define K_TASK_STATE_PENDTIMEOUT (k_task_state_t)(K_TASK_STATE_PEND | K_TASK_STATE_SLEEP)
// suspended when sleeping
#define K_TASK_STATE_SLEEP_SUSPENDED (k_task_state_t)(K_TASK_STATE_SLEEP | K_TASK_STATE_SUSPENDED)
// suspened when pending
#define K_TASK_STATE_PEND_SUSPENDED (k_task_state_t)(K_TASK_STATE_PEND | K_TASK_STATE_SUSPENDED)
// suspended when pendtimeout
#define K_TASK_STATE_PENDTIMEOUT_SUSPENDED (k_task_state_t)(K_TASK_STATE_PENDTIMEOUT | K_TASK_STATE_SUSPENDED)
// if you configure TOS_CFG_TASK_PRIO_MAX as 10, means the priority for kernel is (0 ... 9]
// the priority 9(TOS_CFG_TASK_PRIO_MAX - 1) is only for idle, so avaliable priority for you is (0 ... 8]
#define K_TASK_PRIO_IDLE (k_prio_t)(TOS_CFG_TASK_PRIO_MAX - (k_prio_t)1u)
#define K_TASK_PRIO_INVALID (k_prio_t)(TOS_CFG_TASK_PRIO_MAX)
typedef void (*k_task_entry_t)(void *arg);
/**
* task control block
*/
typedef struct k_task_st {
k_stack_t *sp; /**< task stack pointer. This lady always comes first, we use her in port_s.S for context switch. */
#if TOS_CFG_OBJECT_VERIFY_EN > 0u
knl_obj_t knl_obj; /**< just for verification, test whether current object is really a task. */
#endif
char *name; /**< task name */
k_task_entry_t entry; /**< task entry */
void *arg; /**< argument for task entry */
k_task_state_t state; /**< just state */
k_prio_t prio; /**< just priority */
k_stack_t *stk_base; /**< task stack base address */
size_t stk_size; /**< stack size of the task */
k_tick_t tick_expires; /**< if we are in k_tick_list, how much time will we wait for? */
k_list_t tick_list; /**< list for hooking us to the k_tick_list */
k_list_t pend_list; /**< when we are ready, our pend_list is in readyqueue; when pend, in a certain pend object's list. */
#if TOS_CFG_MUTEX_EN > 0u
k_list_t mutex_own_list; /**< the list hold all the mutex we own.
When we die(tos_task_destroy), we have an obligation to wakeup all the task pending for those mutexs we own;
if not, those pending tasks may never get a change to wakeup. */
k_prio_t prio_pending; /*< when tos_task_prio_change called, we may be just the owner of a mutex.
to avoid PRIORITY INVERSION, must make sure our priority is higher than any one who is pending for
the mutex we hold. So, if the prio_new of tos_task_prio_change is not appropriate
(may against the principle of PRIORITY INVERSION), we just mark the prio_new here, do the real priority
change in the right time(mutex_old_owner_release) later. */
#endif
pend_obj_t *pending_obj; /**< if we are pending, which pend object's list we are in? */
pend_state_t pend_state; /**< why we wakeup from a pend */
#if TOS_CFG_ROUND_ROBIN_EN > 0u
k_timeslice_t timeslice_reload; /**< if current time slice is used up, use time_slice_reload to reload our time slice */
k_timeslice_t timeslice; /**< how much time slice left for us? */
#endif
#if TOS_CFG_MSG_EN > 0u
void *msg_addr; /**< if we pend a queue successfully, our msg_addr and msg_size will be set by the queue poster */
size_t msg_size;
#endif
#if TOS_CFG_EVENT_EN > 0u
k_opt_t opt_event_pend; /**< if we are pending an event, what's the option for the pending(TOS_OPT_EVENT_PEND_*)? */
k_event_flag_t flag_expect; /**< if we are pending an event, what event flag are we pending for ? */
k_event_flag_t *flag_match; /**< if we pend an event successfully, flag_match will be set by the event poster, and will be returned
by tos_event_pend to the caller */
#endif
} k_task_t;
/**
* @brief Create a task.
* create a task.
*
* @attention None
*
* @param[in] task pointer to the handler of the task.
* @param[in] name name of the task.
* @param[in] entry running entry of the task.
* @param[in] arg argument for the entry of the task.
* @param[in] prio priority of the task.
* @param[in] stk_base stack base address of the task.
* @param[in] stk_size stack size of the task.
* @param[in] timeslice time slice of the task.
* @param[in] opt option for the function call.
*
* @return errcode
* @retval #K_ERR_TASK_STK_SIZE_INVALID stack size is invalid.
* @retval #K_ERR_TASK_PRIO_INVALID priority is invalid.
* @retval #K_ERR_NONE return successfully.
*/
__API__ k_err_t tos_task_create(k_task_t *task,
char *name,
k_task_entry_t entry,
void *arg,
k_prio_t prio,
k_stack_t *stk_base,
size_t stk_size,
k_timeslice_t timeslice);
/**
* @brief Destroy a task.
* delete a task.
*
* @attention None
*
* @param[in] task pointer to the handler of the task to be deleted.
*
* @return errcode
* @retval #K_ERR_TASK_DESTROY_IDLE attempt to destroy idle task.
* @retval #K_ERR_NONE return successfully.
*/
__API__ k_err_t tos_task_destroy(k_task_t *task);
/**
* @brief Delay current task for ticks.
* Delay for a specified amount of ticks.
*
* @attention None
*
* @param[in] delay amount of ticks to delay.
*
* @return errcode
* @retval #K_ERR_DELAY_ZERO delay is zero.
* @retval #K_ERR_NONE return successfully.
*/
__API__ k_err_t tos_task_delay(k_tick_t delay);
/**
* @brief Resume task from delay.
* Resume a delayed task from delay.
*
* @attention None
*
* @param[in] task the pointer to the handler of the task.
*
* @return errcode
* @retval #K_ERR_TASK_NOT_DELAY task is not delayed.
* @retval #K_ERR_TASK_SUSPENDED task is suspended.
* @retval #K_ERR_NONE return successfully.
*/
__API__ k_err_t tos_task_delay_abort(k_task_t *task);
/**
* @brief Suspend a task.
* Bring a task to sleep.
*
* @attention None
*
* @param[in] task pointer to the handler of the task to be resume.
*
* @return errcode
* @retval #K_ERR_TASK_SUSPEND_IDLE attempt to suspend idle task.
* @retval #K_ERR_NONE return successfully.
*/
__API__ k_err_t tos_task_suspend(k_task_t *task);
/**
* @brief Resume a task.
* Bring a task to run.
*
* @attention None
*
* @param[in] task pointer to the handler of the task to be resume.
*
* @return errcode
* @retval #K_ERR_TASK_RESUME_SELF attempt to resume self-task.
* @retval #K_ERR_NONE return successfully.
*/
__API__ k_err_t tos_task_resume(k_task_t *task);
/**
* @brief Change task priority.
* Change a priority of the task.
*
* @attention None
*
* @param[in] task pointer to the handler of the task to be resume.
* @param[in] prio_new new priority.
*
* @return errcode
* @retval #K_ERR_TASK_PRIO_INVALID new priority is invalid.
* @retval #K_ERR_NONE return successfully.
*/
__API__ k_err_t tos_task_prio_change(k_task_t *task, k_prio_t prio_new);
/**
* @brief Quit schedule this time.
* Quit the cpu this time.
*
* @attention None
*
* @param None
*
* @return None
*/
__API__ void tos_task_yield(void);
#if TOS_CFG_TASK_STACK_DRAUGHT_DEPTH_DETACT_EN > 0u
/**
* @brief Get the maximum stack draught depth of a task.
*
* @attention None
*
* @param[in] task pointer to the handler of the task.
* @param[out] depth task stack draught depth.
*
* @return errcode
* @retval #K_ERR_NONE get depth successfully.
* @retval #K_ERR_TASK_STK_OVERFLOW task stack is overflow.
*/
__API__ k_err_t tos_task_stack_draught_depth(k_task_t *task, int *depth);
#endif
__KERNEL__ __STATIC_INLINE__ int task_state_is_ready(k_task_t *task)
{
return task->state == K_TASK_STATE_READY;
}
__KERNEL__ __STATIC_INLINE__ int task_state_is_sleeping(k_task_t *task)
{
return task->state & K_TASK_STATE_SLEEP;
}
__KERNEL__ __STATIC_INLINE__ int task_state_is_pending(k_task_t *task)
{
return task->state & K_TASK_STATE_PEND;
}
__KERNEL__ __STATIC_INLINE__ int task_state_is_suspended(k_task_t *task)
{
return task->state & K_TASK_STATE_SUSPENDED;
}
__KERNEL__ __STATIC_INLINE__ void task_state_reset_pending(k_task_t *task)
{
task->state &= ~K_TASK_STATE_PEND;
}
__KERNEL__ __STATIC_INLINE__ void task_state_reset_sleeping(k_task_t *task)
{
task->state &= ~K_TASK_STATE_SLEEP;
}
__KERNEL__ __STATIC_INLINE__ void task_state_reset_suspended(k_task_t *task)
{
task->state &= ~K_TASK_STATE_SUSPENDED;
}
__KERNEL__ __STATIC_INLINE__ void task_state_set_suspended(k_task_t *task)
{
task->state |= K_TASK_STATE_SUSPENDED;
}
__KERNEL__ __STATIC_INLINE__ void task_state_set_pend(k_task_t *task)
{
task->state |= K_TASK_STATE_PEND;
}
__KERNEL__ __STATIC_INLINE__ void task_state_set_ready(k_task_t *task)
{
task->state = K_TASK_STATE_READY;
}
__KERNEL__ __STATIC_INLINE__ void task_state_set_deleted(k_task_t *task)
{
task->state = K_TASK_STATE_DELETED;
}
__KERNEL__ __STATIC_INLINE__ void task_state_set_sleeping(k_task_t *task)
{
task->state |= K_TASK_STATE_SLEEP;
}
#endif /* _TOS_TASK_H_ */

View File

@@ -0,0 +1,27 @@
#ifndef _TOS_TICK_H_
#define _TOS_TICK_H_
/**
* @brief Systick interrupt handler.
* systick interrupt handler.
*
* @attention called from the systick interrupt entrance.
*
* @param None
*
* @return None
*/
__API__ void tos_tick_handler(void);
__KERNEL__ void tick_update(k_tick_t tick);
__KERNEL__ k_err_t tick_list_add(k_task_t *task, k_tick_t timeout);
__KERNEL__ void tick_list_remove(k_task_t *task);
#if TOS_CFG_TICKLESS_EN > 0u
__KERNEL__ k_tick_t tick_next_expires_get(void);
#endif
#endif /* _TOS_TICK_H_ */

View File

@@ -0,0 +1,93 @@
#ifndef _TOS_TIME_H_
#define _TOS_TIME_H_
// if you wanna pend for something forever, use TOS_TIME_FOREVER
#define TOS_TIME_FOREVER (k_tick_t)(-1)
// if you don't wanna wait when you pend nothing, use TOS_TIME_NOWAIT
#define TOS_TIME_NOWAIT (k_tick_t)0u
// those two are not for you, for kernel only.
#define K_TIME_MILLISEC_PER_SEC 1000u
#define K_TIME_MAX (k_tick_t)(TOS_TIME_FOREVER - 1)
/**
* @brief Get system tick.
* Get the number of ticks since boot.
*
* @attention None
*
* @param None
*
* @return tick count since boot
*/
__API__ k_tick_t tos_systick_get(void);
/**
* @brief Set system tick.
* Set the number of ticks.
*
* @attention None
*
* @param tick systick count to set
*
* @return tick count since boot
*/
__API__ void tos_systick_set(k_tick_t tick);
/**
* @brief Convert ticks to milliseconds.
* Convert tick to millisecond.
*
* @attention None
*
* @param[in] tick tick to convert.
*
* @return milliseconds equals to the ticks.
*/
__API__ k_time_t tos_tick2millisec(k_tick_t tick);
/**
* @brief Convert milliseconds to ticks.
* Convert milliseconds to ticks.
*
* @attention None
*
* @param[in] millisec millisecond to convert.
*
* @return ticks equals to the millisecond.
*/
__API__ k_tick_t tos_millisec2tick(k_time_t millisec);
/**
* @brief Sleep current task.
* Sleep for a specified amount of milliseconds.
*
* @attention None
*
* @param[in] millisec amount of milliseconds to delay.
*
* @return errcode
* @retval #K_ERR_DELAY_ZERO millisec is zero.
* @retval #K_ERR_NONE return successfully.
*/
__API__ k_err_t tos_sleep_ms(k_time_t millisec);
/**
* @brief Sleep current task.
* Sleep for a specified amount of time.
*
* @attention None
*
* @param[in] hour amount of hours.
* @param[in] minute amount of minutes.
* @param[in] second amount of seconds.
* @param[in] millisec amount of milliseconds.
*
* @return errcode
* @retval #K_ERR_DELAY_ZERO time is zero.
* @retval #K_ERR_NONE return successfully.
*/
__API__ k_err_t tos_sleep_hmsm(k_time_t hour, k_time_t minute, k_time_t second, k_time_t millisec);
#endif /* _TOS_TIME_H_ */

View File

@@ -0,0 +1,136 @@
#ifndef _TOS_TIMER_H_
#define _TOS_TIMER_H_
#if TOS_CFG_TIMER_EN > 0u
// if we just want the timer to run only once, this option should be passed to tos_timer_create.
#define TOS_OPT_TIMER_ONESHOT (k_opt_t)(0x0001u)
// if we want the timer run periodically, this option should be passed to tos_timer_create.
#define TOS_OPT_TIMER_PERIODIC (k_opt_t)(0x0002u)
/**
* state for timer
*/
typedef enum timer_state_en {
TIMER_STATE_UNUSED, /**< the timer has been destroyed */
TIMER_STATE_STOPPED, /**< the timer has been created but not been started, or just be stopped(tos_timer_stop) */
TIMER_STATE_RUNNING, /**< the timer has been created and been started */
TIMER_STATE_COMPLETED /**< the timer has finished its expires, it can only happen when the timer's opt is TOS_OPT_TIMER_ONESHOT */
} timer_state_t;
// callback function type for a timer
typedef void (*k_timer_callback_t)(void *arg);
/**
* timer control block
*/
typedef struct k_timer_st {
#if TOS_CFG_OBJECT_VERIFY_EN > 0u
knl_obj_t knl_obj; /**< just for verification, test whether current object is really a timer */
#endif
k_timer_callback_t cb; /**< callback when time is up */
void *cb_arg; /**< argument for callback */
k_list_t list; /**< list for hooking us to the k_tick_list */
k_tick_t expires; /**< how much time left until time expires */
k_tick_t delay; /**< how much time from now to begin the first run of the timer */
k_tick_t period; /**< if the time expires, how much time after should we begin the next round */
k_opt_t opt; /**< option for the timer, see TOS_OPT_TIMER_* */
timer_state_t state; /**< state for the timer, see TIMER_STATE_* */
} k_timer_t;
typedef struct timer_control_st {
k_tick_t next_expires;
k_list_t list;
} timer_ctl_t;
/**
* @brief Create a timer.
* Create a timer.
*
* @attention I dont't think a timer need a name. If you do, help yourself.
*
* @param[in] tmr pointer to the handler of the timer.
* @param[in] delay time interval for a timer to run.
* @param[in] period period for a timer to restart to run.
* @param[in] callback callback function called when the timer expires.
* @param[in] cb_arg argument for the callback.
* @param[in] opt option for the function call.
*
* @return errcode
* @retval #K_ERR_TIMER_INVALID_PERIOD period is invalid.
* @retval #K_ERR_TIMER_INVALID_DELAY delay is invalid.
* @retval #K_ERR_NONE return successfully.
*/
__API__ k_err_t tos_timer_create(k_timer_t *tmr, k_tick_t delay, k_tick_t period,
k_timer_callback_t callback, void *cb_arg, k_opt_t opt);
/**
* @brief Delete a timer.
* Delete the timer.
*
* @attention None
*
* @param[in] tmr pointer to the handler of the timer.
*
* @return errcode
* @retval #K_ERR_TIMER_INACTIVE the timer is not active yet.
* @retval #K_ERR_NONE return successfully.
*/
__API__ k_err_t tos_timer_destroy(k_timer_t *tmr);
/**
* @brief Start a timer.
* Start the timer to run.
*
* @attention None
*
* @param[in] tmr pointer to the handler of the timer.
*
* @return errcode
* @retval #K_ERR_TIMER_INACTIVE the timer is not active yet.
* @retval #K_ERR_TIMER_INVALID_STATE state of the timer is invalid.
* @retval #K_ERR_NONE return successfully.
*/
__API__ k_err_t tos_timer_start(k_timer_t *tmr);
/**
* @brief Stop a timer.
* Stop the timer from running.
*
* @attention None
*
* @param[in] tmr pointer to the handler of the timer.
*
* @return errcode
* @retval #K_ERR_TIMER_INACTIVE the timer is not active yet.
* @retval #K_ERR_TIMER_STOPPED the timer is already stoppped.
* @retval #K_ERR_NONE return successfully.
*/
__API__ k_err_t tos_timer_stop(k_timer_t *tmr);
#if TOS_CFG_TIMER_AS_PROC > 0u
/**
* @brief Timer update function.
* When enable timer as a process function not a task, this will be the function entry.
*
* @attention None
*
* @param None
*
* @return None
*/
__KERNEL__ void timer_update(void);
#endif
__KERNEL__ k_err_t timer_init(void);
__KERNEL__ k_tick_t timer_next_expires_get(void);
#endif
#endif /* _TOS_TIMER_H_ */

168
kernel/core/tos_event.c Normal file
View File

@@ -0,0 +1,168 @@
#include "tos.h"
#if TOS_CFG_EVENT_EN > 0
__API__ k_err_t tos_event_create(k_event_t *event, k_event_flag_t init_flag)
{
TOS_PTR_SANITY_CHECK(event);
pend_object_init(&event->pend_obj, PEND_TYPE_EVENT);
event->flag = init_flag;
return K_ERR_NONE;
}
__API__ k_err_t tos_event_destroy(k_event_t *event)
{
TOS_CPU_CPSR_ALLOC();
TOS_PTR_SANITY_CHECK(event);
#if TOS_CFG_OBJECT_VERIFY_EN > 0u
if (!pend_object_verify(&event->pend_obj, PEND_TYPE_EVENT)) {
return K_ERR_OBJ_INVALID;
}
#endif
TOS_CPU_INT_DISABLE();
if (!pend_is_nopending(&event->pend_obj)) {
pend_wakeup_all(&event->pend_obj, PEND_STATE_DESTROY);
}
pend_object_deinit(&event->pend_obj);
event->flag = (k_event_flag_t)0u;
TOS_CPU_INT_ENABLE();
knl_sched();
return K_ERR_NONE;
}
__STATIC__ int event_is_match(k_event_flag_t event, k_event_flag_t flag_expect, k_event_flag_t *flag_match, k_opt_t opt_pend)
{
if (opt_pend & TOS_OPT_EVENT_PEND_ALL) {
if ((event & flag_expect) == flag_expect) {
*flag_match = flag_expect;
return K_TRUE;
}
} else if (opt_pend & TOS_OPT_EVENT_PEND_ANY) {
if (event & flag_expect) {
*flag_match = event & flag_expect;
return K_TRUE;
}
}
return K_FALSE;
}
__API__ k_err_t tos_event_pend(k_event_t *event, k_event_flag_t flag_expect, k_event_flag_t *flag_match, k_tick_t timeout, k_opt_t opt_pend)
{
TOS_CPU_CPSR_ALLOC();
TOS_PTR_SANITY_CHECK(event);
TOS_PTR_SANITY_CHECK(flag_match);
TOS_IN_IRQ_CHECK();
#if TOS_CFG_OBJECT_VERIFY_EN > 0u
if (!pend_object_verify(&event->pend_obj, PEND_TYPE_EVENT)) {
return K_ERR_OBJ_INVALID;
}
#endif
if (!(opt_pend & TOS_OPT_EVENT_PEND_ALL) && !(opt_pend & TOS_OPT_EVENT_PEND_ANY)) {
return K_ERR_EVENT_PEND_OPT_INVALID;
}
if ((opt_pend & TOS_OPT_EVENT_PEND_ALL) && (opt_pend & TOS_OPT_EVENT_PEND_ANY)) {
return K_ERR_EVENT_PEND_OPT_INVALID;
}
TOS_CPU_INT_DISABLE();
if (event_is_match(event->flag, flag_expect, flag_match, opt_pend)) {
if (opt_pend & TOS_OPT_EVENT_PEND_CLR) { // destroy the bridge after get across the river
event->flag = (k_event_flag_t)0u;
}
TOS_CPU_INT_ENABLE();
return K_ERR_NONE;
}
if (timeout == TOS_TIME_NOWAIT) {
TOS_CPU_INT_ENABLE();
return K_ERR_PEND_NOWAIT;
}
if (knl_is_sched_locked()) {
TOS_CPU_INT_ENABLE();
return K_ERR_PEND_SCHED_LOCKED;
}
k_curr_task->flag_expect = flag_expect;
k_curr_task->flag_match = flag_match;
k_curr_task->opt_event_pend = opt_pend;
pend_task_block(k_curr_task, &event->pend_obj, timeout);
TOS_CPU_INT_ENABLE();
knl_sched();
k_curr_task->flag_expect = (k_event_flag_t)0u;
k_curr_task->flag_match = (k_event_flag_t *)K_NULL;
k_curr_task->opt_event_pend = (k_opt_t)0u;
return pend_state2errno(k_curr_task->pend_state);
}
__STATIC__ k_err_t event_do_post(k_event_t *event, k_event_flag_t flag, opt_event_post_t opt_post)
{
TOS_CPU_CPSR_ALLOC();
k_task_t *task;
k_list_t *curr, *next;
TOS_PTR_SANITY_CHECK(event);
#if TOS_CFG_OBJECT_VERIFY_EN > 0u
if (!pend_object_verify(&event->pend_obj, PEND_TYPE_EVENT)) {
return K_ERR_OBJ_INVALID;
}
#endif
if (opt_post == OPT_EVENT_POST_KEP) {
event->flag |= flag;
} else {
event->flag = flag;
}
TOS_CPU_INT_DISABLE();
TOS_LIST_FOR_EACH_SAFE(curr, next, &event->pend_obj.list) {
task = TOS_LIST_ENTRY(curr, k_task_t, pend_list);
if (event_is_match(event->flag, task->flag_expect, task->flag_match, task->opt_event_pend)) {
pend_task_wakeup(TOS_LIST_ENTRY(curr, k_task_t, pend_list), PEND_STATE_POST);
// if anyone pending the event has set the TOS_OPT_EVENT_PEND_CLR, then no wakeup for the others pendig for the event.
if (task->opt_event_pend & TOS_OPT_EVENT_PEND_CLR) {
event->flag = (k_event_flag_t)0u;
break;
}
}
}
TOS_CPU_INT_ENABLE();
knl_sched();
return K_ERR_NONE;
}
__API__ k_err_t tos_event_post(k_event_t *event, k_event_flag_t flag)
{
return event_do_post(event, flag, OPT_EVENT_POST_CLR);
}
__API__ k_err_t tos_event_post_keep(k_event_t *event, k_event_flag_t flag)
{
return event_do_post(event, flag, OPT_EVENT_POST_KEP);
}
#endif

196
kernel/core/tos_fifo.c Normal file
View File

@@ -0,0 +1,196 @@
#include "tos.h"
__STATIC_INLINE__ int fifo_next(k_fifo_t *fifo, int index)
{
return (index + 1) % fifo->siz;
}
__API__ k_err_t tos_fifo_create(k_fifo_t *fifo, uint8_t *buffer, size_t size)
{
TOS_PTR_SANITY_CHECK(fifo);
#if TOS_CFG_OBJECT_VERIFY_EN > 0u
knl_object_init(&fifo->knl_obj, KNL_OBJ_TYPE_FIFO);
#endif
fifo->beg = 0;
fifo->end = 0;
fifo->cnt = 0;
fifo->buf = buffer;
fifo->siz = size;
return K_ERR_NONE;
}
__API__ k_err_t tos_fifo_destroy(k_fifo_t *fifo)
{
TOS_PTR_SANITY_CHECK(fifo);
#if TOS_CFG_OBJECT_VERIFY_EN > 0u
if (!knl_object_verify(&fifo->knl_obj, KNL_OBJ_TYPE_FIFO)) {
return K_ERR_OBJ_INVALID;
}
#endif
fifo->beg = 0;
fifo->end = 0;
fifo->cnt = 0;
fifo->buf = K_NULL;
fifo->siz = 0;
#if TOS_CFG_OBJECT_VERIFY_EN > 0u
knl_object_deinit(&fifo->knl_obj);
#endif
return K_ERR_NONE;
}
__API__ k_err_t tos_fifo_push(k_fifo_t *fifo, uint8_t data)
{
TOS_CPU_CPSR_ALLOC();
#if TOS_CFG_OBJECT_VERIFY_EN > 0u
if (!knl_object_verify(&fifo->knl_obj, KNL_OBJ_TYPE_FIFO)) {
return K_ERR_OBJ_INVALID;
}
#endif
TOS_CPU_INT_DISABLE();
if (tos_fifo_is_full(fifo)) {
TOS_CPU_INT_ENABLE();
return K_ERR_FIFO_FULL;
}
fifo->buf[fifo->end] = data;
fifo->end = fifo_next(fifo, fifo->end);
++fifo->cnt;
TOS_CPU_INT_ENABLE();
return K_ERR_NONE;
}
__API__ int tos_fifo_push_stream(k_fifo_t *fifo, uint8_t *stream, size_t size)
{
TOS_CPU_CPSR_ALLOC();
int i = 0;
#if TOS_CFG_OBJECT_VERIFY_EN > 0u
if (!knl_object_verify(&fifo->knl_obj, KNL_OBJ_TYPE_FIFO)) {
return K_ERR_OBJ_INVALID;
}
#endif
TOS_CPU_INT_DISABLE();
while (!tos_fifo_is_full(fifo) && i < size) {
if (tos_fifo_push(fifo, stream[i]) != K_ERR_NONE) {
TOS_CPU_INT_ENABLE();
return i;
}
++i;
}
TOS_CPU_INT_ENABLE();
return i;
}
__API__ k_err_t tos_fifo_pop(k_fifo_t *fifo, uint8_t *out)
{
TOS_CPU_CPSR_ALLOC();
uint8_t data;
#if TOS_CFG_OBJECT_VERIFY_EN > 0u
if (!knl_object_verify(&fifo->knl_obj, KNL_OBJ_TYPE_FIFO)) {
return K_ERR_OBJ_INVALID;
}
#endif
TOS_CPU_INT_DISABLE();
if (tos_fifo_is_empty(fifo)) {
TOS_CPU_INT_ENABLE();
return K_ERR_FIFO_EMPTY;
}
data = fifo->buf[fifo->beg];
fifo->beg = fifo_next(fifo, fifo->beg);
--fifo->cnt;
TOS_CPU_INT_ENABLE();
*out = data;
return K_ERR_NONE;
}
__API__ int tos_fifo_pop_stream(k_fifo_t *fifo, uint8_t *buffer, size_t size)
{
TOS_CPU_CPSR_ALLOC();
int i = 0;
uint8_t data;
#if TOS_CFG_OBJECT_VERIFY_EN > 0u
if (!knl_object_verify(&fifo->knl_obj, KNL_OBJ_TYPE_FIFO)) {
return K_ERR_OBJ_INVALID;
}
#endif
TOS_CPU_INT_DISABLE();
while (!tos_fifo_is_empty(fifo) && i < size) {
if (tos_fifo_pop(fifo, &data) != K_ERR_NONE) {
TOS_CPU_INT_ENABLE();
return i;
}
buffer[i++] = data;
}
TOS_CPU_INT_ENABLE();
return i;
}
__API__ void tos_fifo_flush(k_fifo_t *fifo)
{
#if TOS_CFG_OBJECT_VERIFY_EN > 0u
if (!knl_object_verify(&fifo->knl_obj, KNL_OBJ_TYPE_FIFO)) {
return;
}
#endif
fifo->beg = 0;
fifo->end = 0;
fifo->cnt = 0;
}
__API__ int tos_fifo_is_empty(k_fifo_t *fifo)
{
TOS_CPU_CPSR_ALLOC();
int is_empty = 0;
#if TOS_CFG_OBJECT_VERIFY_EN > 0u
if (!knl_object_verify(&fifo->knl_obj, KNL_OBJ_TYPE_FIFO)) {
return K_ERR_OBJ_INVALID;
}
#endif
TOS_CPU_INT_DISABLE();
is_empty = (fifo->cnt == 0);
TOS_CPU_INT_ENABLE();
return is_empty;
}
__API__ int tos_fifo_is_full(k_fifo_t *fifo)
{
TOS_CPU_CPSR_ALLOC();
int is_full = 0;
#if TOS_CFG_OBJECT_VERIFY_EN > 0u
if (!knl_object_verify(&fifo->knl_obj, KNL_OBJ_TYPE_FIFO)) {
return K_ERR_OBJ_INVALID;
}
#endif
TOS_CPU_INT_DISABLE();
is_full = (fifo->cnt == fifo->siz);
TOS_CPU_INT_ENABLE();
return is_full;
}

69
kernel/core/tos_global.c Normal file
View File

@@ -0,0 +1,69 @@
#include <tos.h>
k_nesting_t k_irq_nest_cnt = (k_nesting_t)0;
k_nesting_t k_sched_lock_nest_cnt = (k_nesting_t)0;
knl_state_t k_knl_state = KNL_STATE_STOPPED;
readyqueue_t k_rdyq;
k_tick_t k_tick_count = (k_tick_t)0u;
k_task_t *k_curr_task = K_NULL;
k_task_t *k_next_task = K_NULL;
k_task_t k_idle_task;
k_stack_t k_idle_task_stk[TOS_CFG_IDLE_TASK_STK_SIZE];
k_stack_t *const k_idle_task_stk_addr = &k_idle_task_stk[0];
size_t const k_idle_task_stk_size = TOS_CFG_IDLE_TASK_STK_SIZE;
k_tick_t k_cpu_tick_per_second = TOS_CFG_CPU_TICK_PER_SECOND;
k_cycle_t k_cpu_cycle_per_tick = (k_cycle_t)0u;
TOS_LIST_DEFINE(k_tick_list);
#if TOS_CFG_FAULT_BACKTRACE_EN > 0u
k_fault_log_writer_t k_fault_log_writer = fault_default_log_writer;
#endif
#if TOS_CFG_MMHEAP_EN > 0u
uint8_t k_mmheap_pool[TOS_CFG_MMHEAP_POOL_SIZE] __ALIGNED__(4);
k_mmheap_ctl_t k_mmheap_ctl;
#endif
#if TOS_CFG_ROUND_ROBIN_EN > 0u
k_timeslice_t k_robin_default_timeslice = TOS_CFG_CPU_TICK_PER_SECOND / 10;
k_robin_state_t k_robin_state = TOS_ROBIN_STATE_DISABLED;
#endif
#if TOS_CFG_TIMER_EN > 0u
timer_ctl_t k_timer_ctl = { TOS_TIME_FOREVER, TOS_LIST_NODE(k_timer_ctl.list) };
#if TOS_CFG_TIMER_AS_PROC == 0u
k_task_t k_timer_task;
k_stack_t k_timer_task_stk[TOS_CFG_TIMER_TASK_STK_SIZE];
k_prio_t const k_timer_task_prio = TOS_CFG_TIMER_TASK_PRIO;
k_stack_t *const k_timer_task_stk_addr = &k_timer_task_stk[0];
size_t const k_timer_task_stk_size = TOS_CFG_TIMER_TASK_STK_SIZE;
#endif /* TOS_CFG_TIMER_AS_PROC == 0u */
#endif
#if TOS_CFG_MSG_EN > 0u
TOS_LIST_DEFINE(k_msg_freelist);
k_msg_t k_msg_pool[TOS_CFG_MSG_POOL_SIZE];
#endif
#if TOS_CFG_PWR_MGR_EN > 0u
pm_device_ctl_t k_pm_device_ctl = { 0u };
/* default idle power manager mode is SLEEP */
idle_pwrmgr_mode_t k_idle_pwr_mgr_mode = IDLE_POWER_MANAGER_MODE_SLEEP;
/* default low power mode is SLEEP */
k_cpu_lpwr_mode_t k_cpu_lpwr_mode = TOS_LOW_POWER_MODE_SLEEP;
#endif
#if TOS_CFG_TICKLESS_EN > 0u
k_tickless_wkup_alarm_t *k_tickless_wkup_alarm[__LOW_POWER_MODE_DUMMY] = { K_NULL };
#endif

118
kernel/core/tos_mmblk.c Normal file
View File

@@ -0,0 +1,118 @@
#include <tos.h>
#if TOS_CFG_MMBLK_EN > 0u
__API__ k_err_t tos_mmblk_pool_create(k_mmblk_pool_t *mbp, void *pool_start, size_t blk_num, size_t blk_size)
{
uint32_t i;
void *blk_curr;
void *blk_next;
TOS_IN_IRQ_CHECK();
TOS_PTR_SANITY_CHECK(pool_start);
if (((cpu_addr_t)pool_start & K_MMBLK_ALIGN_MASK) != 0u) {
return K_ERR_MMBLK_INVALID_POOL_ADDR;
}
if ((blk_size & K_MMBLK_ALIGN_MASK) != 0u) {
return K_ERR_MMBLK_INVALID_BLK_SIZE;
}
blk_curr = pool_start;
blk_next = K_MMBLK_NEXT_BLK(blk_curr, blk_size);
for (i = 0; i < blk_num - 1u; ++i) {
*(void **)blk_curr = blk_next;
blk_curr = blk_next;
blk_next = K_MMBLK_NEXT_BLK(blk_next, blk_size);
}
*(void **)blk_next = K_NULL;
#if TOS_CFG_OBJECT_VERIFY_EN > 0u
knl_object_init(&mbp->knl_obj, KNL_OBJ_TYPE_MMBLK_POOL);
#endif
mbp->pool_start = pool_start;
mbp->free_list = pool_start;
mbp->blk_free = blk_num;
mbp->blk_max = blk_num;
mbp->blk_size = blk_size;
return K_ERR_NONE;
}
__API__ k_err_t tos_mmblk_pool_destroy(k_mmblk_pool_t *mbp)
{
TOS_PTR_SANITY_CHECK(mbp);
#if TOS_CFG_OBJECT_VERIFY_EN > 0u
if (!knl_object_verify(&mbp->knl_obj, KNL_OBJ_TYPE_MMBLK_POOL)) {
return K_ERR_OBJ_INVALID;
}
#endif
mbp->pool_start = K_NULL;
mbp->free_list = K_NULL;
mbp->blk_free = 0;
mbp->blk_max = 0;
mbp->blk_size = 0;
#if TOS_CFG_OBJECT_VERIFY_EN > 0u
knl_object_deinit(&mbp->knl_obj);
#endif
return K_ERR_NONE;
}
__API__ k_err_t tos_mmblk_alloc(k_mmblk_pool_t *mbp, void **blk)
{
TOS_CPU_CPSR_ALLOC();
TOS_PTR_SANITY_CHECK(mbp);
#if TOS_CFG_OBJECT_VERIFY_EN > 0u
if (!knl_object_verify(&mbp->knl_obj, KNL_OBJ_TYPE_MMBLK_POOL)) {
return K_ERR_OBJ_INVALID;
}
#endif
TOS_CPU_INT_DISABLE();
if (mbp->blk_free == 0) {
TOS_CPU_INT_ENABLE();
*blk = K_NULL;
return K_ERR_MMBLK_POOL_EMPTY;
}
*blk = mbp->free_list;
mbp->free_list = *(void **)mbp->free_list;
--mbp->blk_free;
TOS_CPU_INT_ENABLE();
return K_ERR_NONE;
}
__API__ k_err_t tos_mmblk_free(k_mmblk_pool_t *mbp, void *blk)
{
TOS_CPU_CPSR_ALLOC();
TOS_PTR_SANITY_CHECK(mbp);
TOS_PTR_SANITY_CHECK(blk);
#if TOS_CFG_OBJECT_VERIFY_EN > 0u
if (!knl_object_verify(&mbp->knl_obj, KNL_OBJ_TYPE_MMBLK_POOL)) {
return K_ERR_OBJ_INVALID;
}
#endif
TOS_CPU_INT_DISABLE();
if (mbp->blk_free >= mbp->blk_max) {
TOS_CPU_INT_ENABLE();
return K_ERR_MMBLK_POOL_FULL;
}
*(void **)blk = mbp->free_list;
mbp->free_list = blk;
++mbp->blk_free;
TOS_CPU_INT_ENABLE();
return K_ERR_NONE;
}
#endif

685
kernel/core/tos_mmheap.c Normal file
View File

@@ -0,0 +1,685 @@
/*
** Two Level Segregated Fit memory allocator, version 3.1.
** Written by Matthew Conte
** http://tlsf.baisoku.org
**
** Based on the original documentation by Miguel Masmano:
** http://www.gii.upv.es/tlsf/main/docs
**
** This implementation was written to the specification
** of the document, therefore no GPL restrictions apply.
**
** Copyright (c) 2006-2016, Matthew Conte
** All rights reserved.
**
** Redistribution and use in source and binary forms, with or without
** modification, are permitted provided that the following conditions are met:
** * Redistributions of source code must retain the above copyright
** notice, this list of conditions and the following disclaimer.
** * Redistributions in binary form must reproduce the above copyright
** notice, this list of conditions and the following disclaimer in the
** documentation and/or other materials provided with the distribution.
** * Neither the name of the copyright holder nor the
** names of its contributors may be used to endorse or promote products
** derived from this software without specific prior written permission.
**
** THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
** ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
** WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
** DISCLAIMED. IN NO EVENT SHALL MATTHEW CONTE BE LIABLE FOR ANY
** DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
** (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
** LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
** ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
** (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
** SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#include <tos.h>
#if TOS_CFG_MMHEAP_EN > 0u
#if defined(TOS_CFG_CPU_LEAD_ZEROS_ASM_PRESENT) && (TOS_CFG_CPU_LEAD_ZEROS_ASM_PRESENT == 0u)
__STATIC__ int generic_fls(uint32_t x)
{
int r = 32;
if (!x)
return 0;
if (!(x & 0xffff0000u)) {
x <<= 16;
r -= 16;
}
if (!(x & 0xff000000u)) {
x <<= 8;
r -= 8;
}
if (!(x & 0xf0000000u)) {
x <<= 4;
r -= 4;
}
if (!(x & 0xc0000000u)) {
x <<= 2;
r -= 2;
}
if (!(x & 0x80000000u)) {
x <<= 1;
r -= 1;
}
return r;
}
#else
__STATIC__ int generic_fls(uint32_t x)
{
return 32 - tos_cpu_clz(x);
}
#endif
__STATIC__ int __ffs(uint32_t word)
{
return generic_fls(word & (~word + 1)) - 1;
}
__STATIC__ int __fls(uint32_t word)
{
return generic_fls(word) - 1;
}
/*
** TLSF utility functions. In most cases, these are direct translations of
** the documentation found in the white paper.
*/
__STATIC__ void mapping_insert(size_t size, int *fli, int *sli)
{
int fl, sl;
if (size < K_MMHEAP_SMALL_BLOCK_SIZE) {
/* Store small blocks in first list. */
fl = 0;
sl = (int)size / (K_MMHEAP_SMALL_BLOCK_SIZE / K_MMHEAP_SL_INDEX_COUNT);
} else {
fl = __fls(size);
sl = ((int)size >> (fl - K_MMHEAP_SL_INDEX_COUNT_LOG2)) ^ (1 << K_MMHEAP_SL_INDEX_COUNT_LOG2);
fl -= (K_MMHEAP_FL_INDEX_SHIFT - 1);
}
*fli = fl;
*sli = sl;
}
/* This version rounds up to the next block size (for allocations) */
__STATIC__ void mapping_search(size_t size, int *fli, int *sli)
{
size_t round;
if (size >= K_MMHEAP_SMALL_BLOCK_SIZE) {
round = (1 << (__fls(size) - K_MMHEAP_SL_INDEX_COUNT_LOG2)) - 1;
size += round;
}
mapping_insert(size, fli, sli);
}
__STATIC__ size_t blk_size(const mmheap_blk_t *blk)
{
return blk->size & K_MMHEAP_BLOCK_SIZE_MASK;
}
__STATIC__ void blk_set_size(mmheap_blk_t *blk, size_t size)
{
blk->size = size | (blk->size & K_MMHEAP_BLOCK_STATE_MASK);
}
__STATIC__ int blk_is_free(const mmheap_blk_t *blk)
{
return blk->size & K_MMHEAP_BLOCK_CURR_FREE;
}
__STATIC__ void blk_set_free(mmheap_blk_t *blk)
{
blk->size |= K_MMHEAP_BLOCK_CURR_FREE;
}
__STATIC__ void blk_set_used(mmheap_blk_t *blk)
{
blk->size &= ~K_MMHEAP_BLOCK_CURR_FREE;
}
__STATIC__ int blk_is_prev_free(const mmheap_blk_t *blk)
{
return blk->size & K_MMHEAP_BLOCK_PREV_FREE;
}
__STATIC__ void blk_set_prev_free(mmheap_blk_t *blk)
{
blk->size |= K_MMHEAP_BLOCK_PREV_FREE;
}
__STATIC__ void blk_set_prev_used(mmheap_blk_t *blk)
{
blk->size &= ~K_MMHEAP_BLOCK_PREV_FREE;
}
__STATIC__ mmheap_blk_t *blk_from_ptr(const void *ptr)
{
return (mmheap_blk_t *)((cpu_addr_t)ptr - K_MMHEAP_BLK_START_OFFSET);
}
__STATIC__ void *blk_to_ptr(const mmheap_blk_t *blk)
{
return (void *)((cpu_addr_t)blk + K_MMHEAP_BLK_START_OFFSET);
}
/* Return location of next block after block of given size. */
__STATIC__ mmheap_blk_t *offset_to_block(const void *ptr, int diff)
{
return (mmheap_blk_t *)((cpu_addr_t)ptr + diff);
}
/* Return location of previous block. */
__STATIC__ mmheap_blk_t *blk_prev(const mmheap_blk_t *blk)
{
return blk->prev_phys_blk;
}
/* Return location of next existing block. */
__STATIC__ mmheap_blk_t *blk_next(const mmheap_blk_t *blk)
{
mmheap_blk_t *next_blk;
next_blk = offset_to_block(blk_to_ptr(blk), blk_size(blk) - K_MMHEAP_BLK_HEADER_OVERHEAD);
return next_blk;
}
/* Link a new block with its physical neighbor, return the neighbor. */
__STATIC__ mmheap_blk_t *blk_link_next(mmheap_blk_t *blk)
{
mmheap_blk_t *next_blk;
next_blk = blk_next(blk);
next_blk->prev_phys_blk = blk;
return next_blk;
}
__STATIC__ void blk_mark_as_free(mmheap_blk_t *blk)
{
mmheap_blk_t *next_blk;
/* Link the block to the next block, first. */
next_blk = blk_link_next(blk);
blk_set_prev_free(next_blk);
blk_set_free(blk);
}
__STATIC__ void blk_mark_as_used(mmheap_blk_t *blk)
{
mmheap_blk_t *next_blk;
next_blk = blk_next(blk);
blk_set_prev_used(next_blk);
blk_set_used(blk);
}
__STATIC__ size_t align_up(size_t x, size_t align)
{
return (x + (align - 1)) & ~(align - 1);
}
__STATIC__ size_t align_down(size_t x, size_t align)
{
return x - (x & (align - 1));
}
__STATIC__ void *align_ptr(const void *ptr, size_t align)
{
return (void *)(((cpu_addr_t)ptr + (align -1)) & ~(align -1));
}
/* Insert a free block into the free block list. */
__STATIC__ void insert_free_block(mmheap_blk_t *blk, int fl, int sl)
{
mmheap_blk_t *curr;
curr = k_mmheap_ctl.blocks[fl][sl];
blk->next_free = curr;
blk->prev_free = &k_mmheap_ctl.block_null;
curr->prev_free = blk;
/*
** Insert the new block at the head of the list, and mark the first-
** and second-level bitmaps appropriately.
*/
k_mmheap_ctl.blocks[fl][sl] = blk;
k_mmheap_ctl.fl_bitmap |= (1 << fl);
k_mmheap_ctl.sl_bitmap[fl] |= (1 << sl);
}
/* Remove a free block from the free list.*/
__STATIC__ void remove_free_block(mmheap_blk_t *blk, int fl, int sl)
{
mmheap_blk_t *prev_blk;
mmheap_blk_t *next_blk;
prev_blk = blk->prev_free;
next_blk = blk->next_free;
next_blk->prev_free = prev_blk;
prev_blk->next_free = next_blk;
/* If this block is the head of the free list, set new head. */
if (k_mmheap_ctl.blocks[fl][sl] == blk) {
k_mmheap_ctl.blocks[fl][sl] = next_blk;
/* If the new head is null, clear the bitmap. */
if (next_blk == &k_mmheap_ctl.block_null) {
k_mmheap_ctl.sl_bitmap[fl] &= ~(1 << sl);
/* If the second bitmap is now empty, clear the fl bitmap. */
if (!k_mmheap_ctl.sl_bitmap[fl]) {
k_mmheap_ctl.fl_bitmap &= ~(1 << fl);
}
}
}
}
/* Remove a given block from the free list. */
__STATIC__ void blk_remove(mmheap_blk_t *blk)
{
int fl, sl;
mapping_insert(blk_size(blk), &fl, &sl);
remove_free_block(blk, fl, sl);
}
/* Insert a given block into the free list. */
__STATIC__ void blk_insert(mmheap_blk_t *blk)
{
int fl, sl;
mapping_insert(blk_size(blk), &fl, &sl);
insert_free_block(blk, fl, sl);
}
__STATIC__ int blk_can_split(mmheap_blk_t *blk, size_t size)
{
return blk_size(blk) >= sizeof(mmheap_blk_t) + size;
}
/* Split a block into two, the second of which is free. */
__STATIC__ mmheap_blk_t *blk_split(mmheap_blk_t *blk, size_t size)
{
mmheap_blk_t *remaining;
size_t remain_size;
/* Calculate the amount of space left in the remaining block. */
remaining = offset_to_block(blk_to_ptr(blk), size - K_MMHEAP_BLK_HEADER_OVERHEAD);
remain_size = blk_size(blk) - (size + K_MMHEAP_BLK_HEADER_OVERHEAD);
blk_set_size(remaining, remain_size);
blk_set_size(blk, size);
blk_mark_as_free(remaining);
return remaining;
}
/* Absorb a free block's storage into an adjacent previous free block. */
__STATIC__ mmheap_blk_t *blk_absorb(mmheap_blk_t *prev_blk, mmheap_blk_t *blk)
{
prev_blk->size += blk_size(blk) + K_MMHEAP_BLK_HEADER_OVERHEAD;
blk_link_next(prev_blk);
return prev_blk;
}
/* Merge a just-freed block with an adjacent previous free block. */
__STATIC__ mmheap_blk_t *blk_merge_prev(mmheap_blk_t *blk)
{
mmheap_blk_t *prev_blk;
if (blk_is_prev_free(blk)) {
prev_blk = blk_prev(blk);
blk_remove(prev_blk);
blk = blk_absorb(prev_blk, blk);
}
return blk;
}
/* Merge a just-freed block with an adjacent free block. */
__STATIC__ mmheap_blk_t *blk_merge_next(mmheap_blk_t *blk)
{
mmheap_blk_t *next_blk;
next_blk = blk_next(blk);
if (blk_is_free(next_blk)) {
blk_remove(next_blk);
blk = blk_absorb(blk, next_blk);
}
return blk;
}
/* Trim any trailing block space off the end of a block, return to pool. */
__STATIC__ void blk_trim_free(mmheap_blk_t *blk, size_t size)
{
mmheap_blk_t *remaining_blk;
if (blk_can_split(blk, size)) {
remaining_blk = blk_split(blk, size);
blk_link_next(blk);
blk_set_prev_free(remaining_blk);
blk_insert(remaining_blk);
}
}
/* Trim any trailing block space off the end of a used block, return to pool. */
__STATIC__ void blk_trim_used(mmheap_blk_t *blk, size_t size)
{
mmheap_blk_t *remaining_blk;
if (blk_can_split(blk, size)) {
/* If the next block is free, we must coalesce. */
remaining_blk = blk_split(blk, size);
blk_set_prev_used(remaining_blk);
remaining_blk = blk_merge_next(remaining_blk);
blk_insert(remaining_blk);
}
}
__STATIC__ mmheap_blk_t *blk_trim_free_leading(mmheap_blk_t *blk, size_t size)
{
mmheap_blk_t *remaining_blk;
remaining_blk = blk;
if (blk_can_split(blk, size)) {
/* We want the 2nd block. */
remaining_blk = blk_split(blk, size - K_MMHEAP_BLK_HEADER_OVERHEAD);
blk_set_prev_free(remaining_blk);
blk_link_next(blk);
blk_insert(blk);
}
return remaining_blk;
}
__STATIC__ mmheap_blk_t *blk_search_suitable(int *fli, int *sli)
{
int fl, sl;
uint32_t sl_map, fl_map;
fl = *fli;
sl = *sli;
/*
** First, search for a block in the list associated with the given
** fl/sl index.
*/
sl_map = k_mmheap_ctl.sl_bitmap[fl] & (~0U << sl);
if (!sl_map) {
/* No block exists. Search in the next largest first-level list. */
fl_map = k_mmheap_ctl.fl_bitmap & (~0U << (fl + 1));
if (!fl_map) {
/* No free blocks available, memory has been exhausted. */
return 0;
}
fl = __ffs(fl_map);
*fli = fl;
sl_map = k_mmheap_ctl.sl_bitmap[fl];
}
sl = __ffs(sl_map);
*sli = sl;
/* Return the first block in the free list. */
return k_mmheap_ctl.blocks[fl][sl];
}
__STATIC__ mmheap_blk_t *blk_locate_free(size_t size)
{
int fl = 0, sl = 0;
mmheap_blk_t *blk = K_NULL;
if (!size) {
return K_NULL;
}
mapping_search(size, &fl, &sl);
/*
** mapping_search can futz with the size, so for excessively large sizes it can sometimes wind up
** with indices that are off the end of the block array.
** So, we protect against that here, since this is the only callsite of mapping_search.
** Note that we don't need to check sl, since it comes from a modulo operation that guarantees it's always in range.
*/
if (fl < K_MMHEAP_FL_INDEX_COUNT) {
blk = blk_search_suitable(&fl, &sl);
}
if (blk) {
remove_free_block(blk, fl, sl);
}
return blk;
}
/*
** Adjust an allocation size to be aligned to word size, and no smaller
** than internal minimum.
*/
__STATIC__ size_t adjust_request_size(size_t size, size_t align)
{
size_t adjust_size = 0;
if (!size) {
return 0;
}
adjust_size = align_up(size, align);
if (adjust_size > K_MMHEAP_BLK_SIZE_MAX) {
return 0;
}
/* aligned sized must not exceed block_size_max or we'll go out of bounds on sl_bitmap */
return adjust_size > K_MMHEAP_BLK_SIZE_MIN ? adjust_size : K_MMHEAP_BLK_SIZE_MIN;
}
__STATIC__ void *blk_prepare_used(mmheap_blk_t *blk, size_t size)
{
if (!blk) {
return K_NULL;
}
blk_trim_free(blk, size);
blk_mark_as_used(blk);
return blk_to_ptr(blk);
}
__STATIC__ void mmheap_ctl_init(void)
{
int i, j;
k_mmheap_ctl.block_null.next_free = &k_mmheap_ctl.block_null;
k_mmheap_ctl.block_null.prev_free = &k_mmheap_ctl.block_null;
k_mmheap_ctl.fl_bitmap = 0;
for (i = 0; i < K_MMHEAP_FL_INDEX_COUNT; ++i) {
k_mmheap_ctl.sl_bitmap[i] = 0;
for (j = 0; j < K_MMHEAP_SL_INDEX_COUNT; ++j) {
k_mmheap_ctl.blocks[i][j] = &k_mmheap_ctl.block_null;
}
}
}
__KERNEL__ k_err_t mmheap_init(void *pool_start, size_t pool_size)
{
mmheap_ctl_init();
return tos_mmheap_pool_add(pool_start, pool_size);
}
__API__ void *tos_mmheap_alloc(size_t size)
{
size_t adjust_size;
mmheap_blk_t *blk;
adjust_size = adjust_request_size(size, K_MMHEAP_ALIGN_SIZE);
blk = blk_locate_free(adjust_size);
if (!blk) {
return K_NULL;
}
return blk_prepare_used(blk, adjust_size);
}
__API__ void *tos_mmheap_calloc(size_t num, size_t size)
{
void *ptr;
ptr = tos_mmheap_alloc(num * size);
if (ptr) {
memset(ptr, 0, num * size);
}
return ptr;
}
__API__ void *tos_mmheap_aligned_alloc(size_t size, size_t align)
{
mmheap_blk_t *blk;
void *ptr, *aligned, *next_aligned;
size_t adjust_size, aligned_size;
size_t gap_minimum, size_with_gap, gap, gap_remain, offset;
adjust_size = adjust_request_size(size, K_MMHEAP_ALIGN_SIZE);
gap_minimum = sizeof(mmheap_blk_t);
size_with_gap = adjust_request_size(adjust_size + align + gap_minimum, align);
aligned_size = (adjust_size && align > K_MMHEAP_ALIGN_SIZE) ? size_with_gap : adjust_size;
blk = blk_locate_free(aligned_size);
if (!blk) {
return K_NULL;
}
ptr = blk_to_ptr(blk);
aligned = align_ptr(ptr, align);
gap = (size_t)((cpu_addr_t)aligned - (cpu_addr_t)ptr);
if (gap && gap < gap_minimum) {
gap_remain = gap_minimum - gap;
offset = gap_remain > align ? gap_remain : align;
next_aligned = (void *)((cpu_data_t)aligned + offset);
aligned = align_ptr(next_aligned, align);
gap = (size_t)((cpu_addr_t)aligned - (cpu_addr_t)ptr);
}
if (gap) {
blk = blk_trim_free_leading(blk, gap);
}
return blk_prepare_used(blk, adjust_size);
}
__API__ void tos_mmheap_free(void *ptr)
{
mmheap_blk_t *blk;
if (!ptr) {
return;
}
blk = blk_from_ptr(ptr);
blk_mark_as_free(blk);
blk = blk_merge_prev(blk);
blk = blk_merge_next(blk);
blk_insert(blk);
}
__API__ void *tos_mmheap_realloc(void *ptr, size_t size)
{
void *p = 0;
mmheap_blk_t *curr_blk, *next_blk;
size_t curr_size, combined_size, adjust_size, min_size;
if (ptr && size == 0) {
tos_mmheap_free(ptr);
return K_NULL;
}
if (!ptr) {
return tos_mmheap_alloc(size);
}
curr_blk = blk_from_ptr(ptr);
next_blk = blk_next(curr_blk);
curr_size = blk_size(curr_blk);
combined_size = curr_size + blk_size(next_blk) + K_MMHEAP_BLK_HEADER_OVERHEAD;
adjust_size = adjust_request_size(size, K_MMHEAP_ALIGN_SIZE);
if (adjust_size > curr_size && (!blk_is_free(next_blk) || adjust_size > combined_size)) {
p = tos_mmheap_alloc(size);
if (p) {
min_size = curr_size < size ? curr_size : size;
memcpy(p, ptr, min_size);
tos_mmheap_free(ptr);
}
} else {
if (adjust_size > curr_size) {
blk_merge_next(curr_blk);
blk_mark_as_used(curr_blk);
}
blk_trim_used(curr_blk, adjust_size);
p = ptr;
}
return p;
}
__API__ k_err_t tos_mmheap_pool_add(void *pool_start, size_t pool_size)
{
mmheap_blk_t *curr_blk;
mmheap_blk_t *next_blk;
size_t size_aligned;
size_aligned = align_down(pool_size - 2 * K_MMHEAP_BLK_HEADER_OVERHEAD, K_MMHEAP_ALIGN_SIZE);
if (((cpu_addr_t)pool_start % K_MMHEAP_ALIGN_SIZE) != 0u) {
return K_ERR_MMHEAP_INVALID_POOL_ADDR;
}
if (size_aligned < K_MMHEAP_BLK_SIZE_MIN ||
size_aligned > K_MMHEAP_BLK_SIZE_MAX) {
return K_ERR_MMHEAP_INVALID_POOL_SIZE;
}
/*
** Create the main free block. Offset the start of the block slightly
** so that the prev_phys_block field falls outside of the pool -
** it will never be used.
*/
curr_blk = offset_to_block(pool_start, -K_MMHEAP_BLK_HEADER_OVERHEAD);
blk_set_size(curr_blk, size_aligned);
blk_set_free(curr_blk);
blk_set_prev_used(curr_blk);
blk_insert(curr_blk);
/* Split the block to create a zero-size sentinel block. */
next_blk = blk_link_next(curr_blk);
blk_set_size(next_blk, 0);
blk_set_used(next_blk);
blk_set_prev_free(next_blk);
return K_ERR_NONE;
}
__API__ void tos_mmheap_pool_rmv(void *pool_start)
{
int fl = 0, sl = 0;
mmheap_blk_t *blk;
blk = offset_to_block(pool_start, -K_MMHEAP_BLK_HEADER_OVERHEAD);
mapping_insert(blk_size(blk), &fl, &sl);
remove_free_block(blk, fl, sl);
}
#endif

142
kernel/core/tos_msg.c Normal file
View File

@@ -0,0 +1,142 @@
#include <tos.h>
#if TOS_CFG_MSG_EN > 0u
__KERNEL__ void msgpool_init(void)
{
uint32_t i;
for (i = 0; i < TOS_CFG_MSG_POOL_SIZE; ++i) {
tos_list_init(&k_msg_pool[i].list);
tos_list_add(&k_msg_pool[i].list, &k_msg_freelist);
}
}
__STATIC__ k_msg_t *msgpool_alloc(void)
{
k_msg_t *msg = K_NULL;
if (tos_list_empty(&k_msg_freelist)) {
return K_NULL;
}
msg = TOS_LIST_FIRST_ENTRY(&k_msg_freelist, k_msg_t, list);
tos_list_del(&msg->list);
return msg;
}
__STATIC__ void msgpool_free(k_msg_t *msg)
{
tos_list_del(&msg->list);
tos_list_add(&msg->list, &k_msg_freelist);
}
__API__ void tos_msg_queue_flush(k_msg_queue_t *msg_queue)
{
TOS_CPU_CPSR_ALLOC();
k_list_t *curr, *next;
TOS_CPU_INT_DISABLE();
TOS_LIST_FOR_EACH_SAFE(curr, next, &msg_queue->queue_head) {
msgpool_free(TOS_LIST_ENTRY(curr, k_msg_t, list));
}
TOS_CPU_INT_ENABLE();
}
__API__ k_err_t tos_msg_queue_create(k_msg_queue_t *msg_queue)
{
TOS_PTR_SANITY_CHECK(msg_queue);
#if TOS_CFG_OBJECT_VERIFY_EN > 0u
knl_object_init(&msg_queue->knl_obj, KNL_OBJ_TYPE_MSG_QUEUE);
#endif
tos_list_init(&msg_queue->queue_head);
return K_ERR_NONE;
}
__API__ k_err_t tos_msg_queue_destroy(k_msg_queue_t *msg_queue)
{
TOS_PTR_SANITY_CHECK(msg_queue);
#if TOS_CFG_OBJECT_VERIFY_EN > 0u
if (!knl_object_verify(&msg_queue->knl_obj, KNL_OBJ_TYPE_MSG_QUEUE)) {
return K_ERR_OBJ_INVALID;
}
#endif
tos_msg_queue_flush(msg_queue);
tos_list_init(&msg_queue->queue_head);
#if TOS_CFG_OBJECT_VERIFY_EN > 0u
knl_object_deinit(&msg_queue->knl_obj);
#endif
return K_ERR_NONE;
}
__API__ k_err_t tos_msg_queue_get(k_msg_queue_t *msg_queue, void **msg_addr, size_t *msg_size)
{
TOS_CPU_CPSR_ALLOC();
k_msg_t *msg;
#if TOS_CFG_OBJECT_VERIFY_EN > 0u
if (!knl_object_verify(&msg_queue->knl_obj, KNL_OBJ_TYPE_MSG_QUEUE)) {
return K_ERR_OBJ_INVALID;
}
#endif
TOS_CPU_INT_DISABLE();
msg = TOS_LIST_FIRST_ENTRY_OR_NULL(&msg_queue->queue_head, k_msg_t, list);
if (!msg) {
TOS_CPU_INT_ENABLE();
return K_ERR_MSG_QUEUE_EMPTY;
}
*msg_addr = msg->msg_addr;
*msg_size = msg->msg_size;
msgpool_free(msg);
TOS_CPU_INT_ENABLE();
return K_ERR_NONE;
}
__API__ k_err_t tos_msg_queue_put(k_msg_queue_t *msg_queue, void *msg_addr, size_t msg_size, k_opt_t opt)
{
TOS_CPU_CPSR_ALLOC();
k_msg_t *msg;
#if TOS_CFG_OBJECT_VERIFY_EN > 0u
if (!knl_object_verify(&msg_queue->knl_obj, KNL_OBJ_TYPE_MSG_QUEUE)) {
return K_ERR_OBJ_INVALID;
}
#endif
TOS_CPU_INT_DISABLE();
msg = msgpool_alloc();
if (!msg) {
TOS_CPU_INT_ENABLE();
return K_ERR_MSG_QUEUE_FULL;
}
msg->msg_addr = msg_addr;
msg->msg_size = msg_size;
if (opt & TOS_OPT_MSG_PUT_LIFO) {
tos_list_add(&msg->list, &msg_queue->queue_head);
} else {
tos_list_add_tail(&msg->list, &msg_queue->queue_head);
}
TOS_CPU_INT_ENABLE();
return K_ERR_NONE;
}
#endif

205
kernel/core/tos_mutex.c Normal file
View File

@@ -0,0 +1,205 @@
#include "tos.h"
#if TOS_CFG_MUTEX_EN > 0u
__STATIC_INLINE__ void mutex_old_owner_release(k_mutex_t *mutex)
{
k_task_t *owner;
owner = mutex->owner;
tos_list_del(&mutex->owner_list);
mutex->owner = K_NULL;
// the right time comes! let's do it!
if (owner->prio_pending != K_TASK_PRIO_INVALID) {
tos_task_prio_change(owner, owner->prio_pending);
owner->prio_pending = K_TASK_PRIO_INVALID;
} else if (owner->prio != mutex->owner_orig_prio) {
tos_task_prio_change(owner, mutex->owner_orig_prio);
mutex->owner_orig_prio = K_TASK_PRIO_INVALID;
}
}
__STATIC_INLINE__ void mutex_fresh_owner_mark(k_mutex_t *mutex, k_task_t *task)
{
mutex->pend_nesting = (k_nesting_t)1u;
mutex->owner = task;
mutex->owner_orig_prio = task->prio;
tos_list_add(&mutex->owner_list, &task->mutex_own_list);
}
__STATIC_INLINE__ void mutex_new_owner_mark(k_mutex_t *mutex, k_task_t *task)
{
k_prio_t highest_pending_prio;
mutex_fresh_owner_mark(mutex, task);
// we own the mutex now, make sure our priority is higher than any one in the pend list.
highest_pending_prio = pend_highest_prio_get(&mutex->pend_obj);
if (task->prio > highest_pending_prio) {
tos_task_prio_change(task, highest_pending_prio);
}
}
__KERNEL__ void mutex_release(k_mutex_t *mutex)
{
mutex_old_owner_release(mutex);
pend_wakeup_all(&mutex->pend_obj, PEND_STATE_OWNER_DIE);
}
__API__ k_err_t tos_mutex_create(k_mutex_t *mutex)
{
TOS_PTR_SANITY_CHECK(mutex);
pend_object_init(&mutex->pend_obj, PEND_TYPE_MUTEX);
mutex->pend_nesting = (k_nesting_t)0u;
mutex->owner = K_NULL;
mutex->owner_orig_prio = K_TASK_PRIO_INVALID;
tos_list_init(&mutex->owner_list);
return K_ERR_NONE;
}
__API__ k_err_t tos_mutex_destroy(k_mutex_t *mutex)
{
TOS_CPU_CPSR_ALLOC();
TOS_PTR_SANITY_CHECK(mutex);
#if TOS_CFG_OBJECT_VERIFY_EN > 0u
if (!pend_object_verify(&mutex->pend_obj, PEND_TYPE_MUTEX)) {
return K_ERR_OBJ_INVALID;
}
#endif
TOS_CPU_INT_DISABLE();
if (!pend_is_nopending(&mutex->pend_obj)) {
pend_wakeup_all(&mutex->pend_obj, PEND_STATE_DESTROY);
}
pend_object_deinit(&mutex->pend_obj);
mutex->pend_nesting = (k_nesting_t)0u;
if (mutex->owner) {
mutex_old_owner_release(mutex);
}
TOS_CPU_INT_ENABLE();
knl_sched();
return K_ERR_NONE;
}
__API__ k_err_t tos_mutex_pend_timed(k_mutex_t *mutex, k_tick_t timeout)
{
TOS_CPU_CPSR_ALLOC();
k_err_t err;
TOS_PTR_SANITY_CHECK(mutex);
TOS_IN_IRQ_CHECK();
#if TOS_CFG_OBJECT_VERIFY_EN > 0u
if (!pend_object_verify(&mutex->pend_obj, PEND_TYPE_MUTEX)) {
return K_ERR_OBJ_INVALID;
}
#endif
TOS_CPU_INT_DISABLE();
if (mutex->pend_nesting == (k_nesting_t)0u) { // first come
mutex_fresh_owner_mark(mutex, k_curr_task);
TOS_CPU_INT_ENABLE();
return K_ERR_NONE;
}
if (knl_is_self(mutex->owner)) { // come again
if (mutex->pend_nesting == (k_nesting_t)-1) {
TOS_CPU_INT_ENABLE();
return K_ERR_MUTEX_NESTING_OVERFLOW;
}
++mutex->pend_nesting;
TOS_CPU_INT_ENABLE();
return K_ERR_MUTEX_NESTING;
}
if (timeout == TOS_TIME_NOWAIT) { // no wait, return immediately
TOS_CPU_INT_ENABLE();
return K_ERR_PEND_NOWAIT;
}
if (knl_is_sched_locked()) {
TOS_CPU_INT_ENABLE();
return K_ERR_PEND_SCHED_LOCKED;
}
if (mutex->owner->prio > k_curr_task->prio) {
// PRIORITY INVERSION:
// we are declaring a mutex, which's owner has a lower(numerically bigger) priority.
// make owner the same priority with us.
tos_task_prio_change(mutex->owner, k_curr_task->prio);
}
pend_task_block(k_curr_task, &mutex->pend_obj, timeout);
TOS_CPU_INT_ENABLE();
knl_sched();
err = pend_state2errno(k_curr_task->pend_state);
if (err == K_ERR_NONE) {
// good, we are the owner now.
TOS_CPU_INT_DISABLE();
mutex_new_owner_mark(mutex, k_curr_task);
TOS_CPU_INT_ENABLE();
}
return err;
}
__API__ k_err_t tos_mutex_pend(k_mutex_t *mutex)
{
return tos_mutex_pend_timed(mutex, TOS_TIME_FOREVER);
}
__API__ k_err_t tos_mutex_post(k_mutex_t *mutex)
{
TOS_CPU_CPSR_ALLOC();
TOS_PTR_SANITY_CHECK(mutex);
#if TOS_CFG_OBJECT_VERIFY_EN > 0u
if (!pend_object_verify(&mutex->pend_obj, PEND_TYPE_MUTEX)) {
return K_ERR_OBJ_INVALID;
}
#endif
TOS_CPU_INT_DISABLE();
if (!knl_is_self(mutex->owner)) {
TOS_CPU_INT_ENABLE();
return K_ERR_MUTEX_NOT_OWNER;
}
--mutex->pend_nesting;
if (mutex->pend_nesting > (k_nesting_t)0u) {
TOS_CPU_INT_ENABLE();
return K_ERR_MUTEX_NESTING;
}
mutex_old_owner_release(mutex);
if (pend_is_nopending(&mutex->pend_obj)) {
TOS_CPU_INT_ENABLE();
return K_ERR_NONE;
}
pend_wakeup_one(&mutex->pend_obj, PEND_STATE_POST);
TOS_CPU_INT_ENABLE();
knl_sched();
return K_ERR_NONE;
}
#endif

138
kernel/core/tos_pend.c Normal file
View File

@@ -0,0 +1,138 @@
#include "tos.h"
__STATIC__ void pend_list_add(k_task_t *task, pend_obj_t *pend_obj)
{
k_list_t *curr, *pend_list;
k_task_t *iter;
pend_list = &pend_obj->list;
/* keep priority in descending order, the boss(task with highest priority,
numerically smallest) always comes first
*/
TOS_LIST_FOR_EACH(curr, pend_list) {
iter = TOS_LIST_ENTRY(curr, k_task_t, pend_list);
if (task->prio < iter->prio) {
break;
}
}
tos_list_add_tail(&task->pend_list, curr);
// remember me, you may use me someday
task->pending_obj = pend_obj;
task_state_set_pend(task);
}
__KERNEL__ k_prio_t pend_highest_prio_get(pend_obj_t *object)
{
k_task_t *task;
// we keep the task priority in descending order, so the first one is just fine.
task = TOS_LIST_FIRST_ENTRY_OR_NULL(&object->list, k_task_t, pend_list);
return task ? task->prio : K_TASK_PRIO_INVALID;
}
__KERNEL__ void pend_list_remove(k_task_t *task)
{
tos_list_del(&task->pend_list);
task->pending_obj = (pend_obj_t *)K_NULL;
task_state_reset_pending(task);
}
__KERNEL__ void pend_object_init(pend_obj_t *object, pend_type_t type)
{
object->type = type;
tos_list_init(&object->list);
}
__KERNEL__ void pend_object_deinit(pend_obj_t *object)
{
object->type = PEND_TYPE_NONE;
tos_list_init(&object->list);
}
__KERNEL__ int pend_is_nopending(pend_obj_t *object)
{
return tos_list_empty(&object->list);
}
__KERNEL__ void pend_list_adjust(k_task_t *task)
{
// we may be the boss, so re-enter the pend list
tos_list_del(&task->pend_list);
// the "someday" comes
pend_list_add(task, task->pending_obj);
}
__KERNEL__ int pend_object_verify(pend_obj_t *object, pend_type_t type)
{
return object->type == type;
}
__KERNEL__ k_err_t pend_state2errno(pend_state_t state)
{
if (state == PEND_STATE_POST) {
return K_ERR_NONE;
} else if (state == PEND_STATE_TIMEOUT) {
return K_ERR_PEND_TIMEOUT;
} else if (state == PEND_STATE_DESTROY) {
return K_ERR_PEND_DESTROY;
} else if (state == PEND_STATE_OWNER_DIE) {
return K_ERR_PEND_OWNER_DIE;
} else {
return K_ERR_PEND_ABNORMAL;
}
}
__KERNEL__ void pend_task_wakeup(k_task_t *task, pend_state_t state)
{
if (task_state_is_pending(task)) {
// mark why we wakeup
task->pend_state = state;
pend_list_remove(task);
}
if (task_state_is_sleeping(task)) {
tick_list_remove(task);
}
if (task_state_is_suspended(task)) {
return;
}
readyqueue_add(task);
}
__KERNEL__ void pend_task_block(k_task_t *task, pend_obj_t *object, k_tick_t timeout)
{
readyqueue_remove(task);
pend_list_add(task, object);
if (timeout != TOS_TIME_FOREVER) {
tick_list_add(task, timeout);
}
}
__KERNEL__ void pend_wakeup_one(pend_obj_t *object, pend_state_t state)
{
pend_task_wakeup(TOS_LIST_FIRST_ENTRY(&object->list, k_task_t, pend_list), state);
}
__KERNEL__ void pend_wakeup_all(pend_obj_t *object, pend_state_t state)
{
k_list_t *curr, *next;
TOS_LIST_FOR_EACH_SAFE(curr, next, &object->list) {
pend_task_wakeup(TOS_LIST_ENTRY(curr, k_task_t, pend_list), state);
}
}
__KERNEL__ void pend_wakeup(pend_obj_t *object, pend_state_t state, opt_post_t opt)
{
if (opt == OPT_POST_ONE) {
pend_wakeup_one(object, state);
} else {
pend_wakeup_all(object, state);
}
}

170
kernel/core/tos_queue.c Normal file
View File

@@ -0,0 +1,170 @@
#include "tos.h"
#if TOS_CFG_QUEUE_EN > 0u
__API__ k_err_t tos_queue_create(k_queue_t *queue)
{
TOS_PTR_SANITY_CHECK(queue);
pend_object_init(&queue->pend_obj, PEND_TYPE_QUEUE);
tos_msg_queue_create(&queue->msg_queue);
return K_ERR_NONE;
}
__API__ k_err_t tos_queue_destroy(k_queue_t *queue)
{
TOS_CPU_CPSR_ALLOC();
TOS_PTR_SANITY_CHECK(queue);
#if TOS_CFG_OBJECT_VERIFY_EN > 0u
if (!pend_object_verify(&queue->pend_obj, PEND_TYPE_QUEUE)) {
return K_ERR_OBJ_INVALID;
}
#endif
TOS_CPU_INT_DISABLE();
if (!pend_is_nopending(&queue->pend_obj)) {
pend_wakeup_all(&queue->pend_obj, PEND_STATE_DESTROY);
}
pend_object_deinit(&queue->pend_obj);
tos_msg_queue_flush(&queue->msg_queue);
TOS_CPU_INT_ENABLE();
knl_sched();
return K_ERR_NONE;
}
__API__ k_err_t tos_queue_flush(k_queue_t *queue)
{
TOS_CPU_CPSR_ALLOC();
TOS_PTR_SANITY_CHECK(queue);
#if TOS_CFG_OBJECT_VERIFY_EN > 0u
if (!pend_object_verify(&queue->pend_obj, PEND_TYPE_QUEUE)) {
return K_ERR_OBJ_INVALID;
}
#endif
TOS_CPU_INT_DISABLE();
tos_msg_queue_flush(&queue->msg_queue);
TOS_CPU_INT_ENABLE();
return K_ERR_NONE;
}
__API__ k_err_t tos_queue_pend(k_queue_t *queue, void **msg_addr, size_t *msg_size, k_tick_t timeout)
{
TOS_CPU_CPSR_ALLOC();
k_err_t err;
TOS_PTR_SANITY_CHECK(queue);
TOS_PTR_SANITY_CHECK(msg_addr);
TOS_PTR_SANITY_CHECK(msg_size);
#if TOS_CFG_OBJECT_VERIFY_EN > 0u
if (!pend_object_verify(&queue->pend_obj, PEND_TYPE_QUEUE)) {
return K_ERR_OBJ_INVALID;
}
#endif
TOS_CPU_INT_DISABLE();
if (tos_msg_queue_get(&queue->msg_queue, msg_addr, msg_size) == K_ERR_NONE) {
TOS_CPU_INT_ENABLE();
return K_ERR_NONE;
}
if (timeout == TOS_TIME_NOWAIT) {
*msg_addr = K_NULL;
*msg_size = 0;
TOS_CPU_INT_ENABLE();
return K_ERR_PEND_NOWAIT;
}
if (knl_is_sched_locked()) {
TOS_CPU_INT_ENABLE();
return K_ERR_PEND_SCHED_LOCKED;
}
pend_task_block(k_curr_task, &queue->pend_obj, timeout);
TOS_CPU_INT_ENABLE();
knl_sched();
err = pend_state2errno(k_curr_task->pend_state);
if (err == K_ERR_NONE) {
*msg_addr = k_curr_task->msg_addr;
*msg_size = k_curr_task->msg_size;
k_curr_task->msg_addr = K_NULL;
k_curr_task->msg_size = 0;
}
return err;
}
__STATIC__ void queue_task_msg_recv(k_task_t *task, void *msg_addr, size_t msg_size)
{
task->msg_addr = msg_addr;
task->msg_size = msg_size;
pend_task_wakeup(task, PEND_STATE_POST);
}
__STATIC__ k_err_t queue_do_post(k_queue_t *queue, void *msg_addr, size_t msg_size, opt_post_t opt)
{
TOS_CPU_CPSR_ALLOC();
k_list_t *curr, *next;
TOS_PTR_SANITY_CHECK(queue);
TOS_PTR_SANITY_CHECK(msg_addr);
#if TOS_CFG_OBJECT_VERIFY_EN > 0u
if (!pend_object_verify(&queue->pend_obj, PEND_TYPE_QUEUE)) {
return K_ERR_OBJ_INVALID;
}
#endif
TOS_CPU_INT_DISABLE();
if (pend_is_nopending(&queue->pend_obj)) {
if (tos_msg_queue_put(&queue->msg_queue, msg_addr, msg_size, TOS_OPT_MSG_PUT_FIFO) != K_ERR_NONE) {
TOS_CPU_INT_ENABLE();
return K_ERR_QUEUE_FULL;
}
TOS_CPU_INT_ENABLE();
return K_ERR_NONE;
}
if (opt == OPT_POST_ONE) {
queue_task_msg_recv(TOS_LIST_FIRST_ENTRY(&queue->pend_obj.list, k_task_t, pend_list),
msg_addr, msg_size);
} else { // OPT_QUEUE_POST_ALL
TOS_LIST_FOR_EACH_SAFE(curr, next, &queue->pend_obj.list) {
queue_task_msg_recv(TOS_LIST_ENTRY(curr, k_task_t, pend_list),
msg_addr, msg_size);
}
}
TOS_CPU_INT_ENABLE();
knl_sched();
return K_ERR_NONE;
}
__API__ k_err_t tos_queue_post(k_queue_t *queue, void *msg_addr, size_t msg_size)
{
return queue_do_post(queue, msg_addr, msg_size, OPT_POST_ONE);
}
__API__ k_err_t tos_queue_post_all(k_queue_t *queue, void *msg_addr, size_t msg_size)
{
return queue_do_post(queue, msg_addr, msg_size, OPT_POST_ALL);
}
#endif

92
kernel/core/tos_robin.c Normal file
View File

@@ -0,0 +1,92 @@
#include "tos.h"
#if TOS_CFG_ROUND_ROBIN_EN > 0u
__API__ void tos_robin_config(k_robin_state_t robin_state, k_timeslice_t default_timeslice)
{
TOS_CPU_CPSR_ALLOC();
TOS_CPU_INT_DISABLE();
k_robin_state = robin_state;
if (default_timeslice > (k_timeslice_t)0u) {
k_robin_default_timeslice = default_timeslice;
} else {
k_robin_default_timeslice = TOS_CFG_CPU_TICK_PER_SECOND / 10;
}
TOS_CPU_INT_ENABLE();
}
__API__ void tos_robin_timeslice_set(k_task_t *task, k_timeslice_t timeslice)
{
TOS_CPU_CPSR_ALLOC();
if (!task) {
task = k_curr_task;
}
TOS_CPU_INT_DISABLE();
if (timeslice == (k_timeslice_t)0u) {
task->timeslice_reload = k_robin_default_timeslice;
} else {
task->timeslice_reload = timeslice;
}
if (task->timeslice_reload > task->timeslice) {
task->timeslice = task->timeslice_reload;
}
TOS_CPU_INT_ENABLE();
}
__KERNEL__ void robin_sched(k_prio_t prio)
{
TOS_CPU_CPSR_ALLOC();
k_task_t *task;
if (k_robin_state != TOS_ROBIN_STATE_ENABLED) {
return;
}
TOS_CPU_INT_DISABLE();
task = readyqueue_first_task_get(prio);
if (!task || knl_is_idle(task)) {
TOS_CPU_INT_ENABLE();
return;
}
if (readyqueue_is_prio_onlyone(prio)) {
TOS_CPU_INT_ENABLE();
return;
}
if (knl_is_sched_locked()) {
TOS_CPU_INT_ENABLE();
return;
}
if (task->timeslice > (k_timeslice_t)0u) {
--task->timeslice;
}
if (task->timeslice > (k_timeslice_t)0u) {
TOS_CPU_INT_ENABLE();
return;
}
readyqueue_move_head_to_tail(k_curr_task->prio);
task = readyqueue_first_task_get(prio);
if (task->timeslice_reload == (k_timeslice_t)0u) {
task->timeslice = k_robin_default_timeslice;
} else {
task->timeslice = task->timeslice_reload;
}
TOS_CPU_INT_ENABLE();
knl_sched();
}
#endif

171
kernel/core/tos_sched.c Normal file
View File

@@ -0,0 +1,171 @@
#include <tos.h>
__STATIC__ k_prio_t readyqueue_prio_highest_get(void)
{
uint32_t *tbl;
k_prio_t prio;
prio = 0;
tbl = &k_rdyq.prio_mask[0];
while (*tbl == 0) {
prio += K_PRIO_TBL_SLOT_SIZE;
++tbl;
}
prio += tos_cpu_clz(*tbl);
return prio;
}
__STATIC_INLINE__ void readyqueue_prio_insert(k_prio_t prio)
{
k_rdyq.prio_mask[K_PRIO_NDX(prio)] |= K_PRIO_BIT(prio);
}
__STATIC_INLINE__ void readyqueue_prio_remove(k_prio_t prio)
{
k_rdyq.prio_mask[K_PRIO_NDX(prio)] &= ~K_PRIO_BIT(prio);
}
__STATIC_INLINE__ void readyqueue_prio_mark(k_prio_t prio)
{
readyqueue_prio_insert(prio);
if (prio < k_rdyq.highest_prio) {
k_rdyq.highest_prio = prio;
}
}
/**
* when this function involved, must be at least one task in the task list of the certain priority
*/
__KERNEL__ int readyqueue_is_prio_onlyone(k_prio_t prio)
{
k_list_t *task_list;
k_task_t *task;
task_list = &k_rdyq.task_list_head[prio];
task = TOS_LIST_FIRST_ENTRY(task_list, k_task_t, pend_list);
return task->pend_list.next == task_list;
}
__KERNEL__ k_task_t *readyqueue_first_task_get(k_prio_t prio)
{
k_list_t *task_list;
task_list = &k_rdyq.task_list_head[prio];
return TOS_LIST_FIRST_ENTRY_OR_NULL(task_list, k_task_t, pend_list);
}
__KERNEL__ k_task_t *readyqueue_highest_ready_task_get(void)
{
k_list_t *task_list;
task_list = &k_rdyq.task_list_head[k_rdyq.highest_prio];
return TOS_LIST_FIRST_ENTRY(task_list, k_task_t, pend_list);
}
__KERNEL__ void readyqueue_init(void)
{
uint8_t i;
k_rdyq.highest_prio = TOS_CFG_TASK_PRIO_MAX;
for (i = 0; i < TOS_CFG_TASK_PRIO_MAX; ++i) {
tos_list_init(&k_rdyq.task_list_head[i]);
}
for (i = 0; i < K_PRIO_TBL_SIZE; ++i) {
k_rdyq.prio_mask[i] = 0;
}
}
__DEBUG__ void readyqueue_walkthru(void)
{
uint8_t i;
k_task_t *task;
k_list_t *task_list, *curr;
tos_kprintf("==========================\n");
tos_kprintf("%d\n", k_rdyq.highest_prio);
for (i = 0; i < TOS_CFG_TASK_PRIO_MAX; ++i) {
task_list = &k_rdyq.task_list_head[i];
if (!tos_list_empty(task_list)) {
TOS_LIST_FOR_EACH(curr, task_list) {
task = TOS_LIST_ENTRY(curr, k_task_t, pend_list);
tos_kprintf("---- %d %d [%d] %s\n", task->prio, i, task->state, task->name);
}
}
}
tos_kprintf("\n\n");
}
__KERNEL__ void readyqueue_add_head(k_task_t *task)
{
k_prio_t task_prio;
k_list_t *task_list;
task_prio = task->prio;
task_list = &k_rdyq.task_list_head[task_prio];
if (tos_list_empty(task_list)) {
readyqueue_prio_mark(task_prio);
}
tos_list_add(&task->pend_list, task_list);
}
__KERNEL__ void readyqueue_add_tail(k_task_t *task)
{
k_prio_t task_prio;
k_list_t *task_list;
task_prio = task->prio;
task_list = &k_rdyq.task_list_head[task_prio];
if (tos_list_empty(task_list)) {
readyqueue_prio_mark(task_prio);
}
tos_list_add_tail(&task->pend_list, task_list);
}
__KERNEL__ void readyqueue_add(k_task_t *task)
{
if (task->prio == k_curr_task->prio) {
readyqueue_add_tail(task);
} else {
readyqueue_add_head(task);
}
}
__KERNEL__ void readyqueue_remove(k_task_t *task)
{
k_prio_t task_prio;
k_list_t *task_list;
task_prio = task->prio;
task_list = &k_rdyq.task_list_head[task_prio];
tos_list_del(&task->pend_list);
if (tos_list_empty(task_list)) {
readyqueue_prio_remove(task_prio);
}
if (task_prio == k_rdyq.highest_prio) {
k_rdyq.highest_prio = readyqueue_prio_highest_get();
}
}
__KERNEL__ void readyqueue_move_head_to_tail(k_prio_t prio)
{
k_list_t *task_list;
task_list = &k_rdyq.task_list_head[prio];
if (!tos_list_empty(task_list)) {
tos_list_move_tail(task_list->next, task_list);
}
}

124
kernel/core/tos_sem.c Normal file
View File

@@ -0,0 +1,124 @@
#include "tos.h"
#if TOS_CFG_SEM_EN > 0u
__API__ k_err_t tos_sem_create(k_sem_t *sem, k_sem_cnt_t init_count)
{
TOS_PTR_SANITY_CHECK(sem);
pend_object_init(&sem->pend_obj, PEND_TYPE_SEM);
sem->count = init_count;
return K_ERR_NONE;
}
__API__ k_err_t tos_sem_destroy(k_sem_t *sem)
{
TOS_CPU_CPSR_ALLOC();
TOS_PTR_SANITY_CHECK(sem);
#if TOS_CFG_OBJECT_VERIFY_EN > 0u
if (!pend_object_verify(&sem->pend_obj, PEND_TYPE_SEM)) {
return K_ERR_OBJ_INVALID;
}
#endif
TOS_CPU_INT_DISABLE();
if (!pend_is_nopending(&sem->pend_obj)) {
pend_wakeup_all(&sem->pend_obj, PEND_STATE_DESTROY);
}
pend_object_deinit(&sem->pend_obj);
TOS_CPU_INT_ENABLE();
knl_sched();
return K_ERR_NONE;
}
__STATIC__ k_err_t sem_do_post(k_sem_t *sem, opt_post_t opt)
{
TOS_CPU_CPSR_ALLOC();
TOS_PTR_SANITY_CHECK(sem);
#if TOS_CFG_OBJECT_VERIFY_EN > 0u
if (!pend_object_verify(&sem->pend_obj, PEND_TYPE_SEM)) {
return K_ERR_OBJ_INVALID;
}
#endif
TOS_CPU_INT_DISABLE();
if (sem->count == (k_sem_cnt_t)-1) {
TOS_CPU_INT_ENABLE();
return K_ERR_SEM_OVERFLOW;
}
if (pend_is_nopending(&sem->pend_obj)) {
++sem->count;
TOS_CPU_INT_ENABLE();
return K_ERR_NONE;
}
pend_wakeup(&sem->pend_obj, PEND_STATE_POST, opt);
TOS_CPU_INT_ENABLE();
knl_sched();
return K_ERR_NONE;
}
__API__ k_err_t tos_sem_post(k_sem_t *sem)
{
return sem_do_post(sem, OPT_POST_ONE);
}
__API__ k_err_t tos_sem_post_all(k_sem_t *sem)
{
return sem_do_post(sem, OPT_POST_ALL);
}
__API__ k_err_t tos_sem_pend(k_sem_t *sem, k_tick_t timeout)
{
TOS_CPU_CPSR_ALLOC();
TOS_PTR_SANITY_CHECK(sem);
TOS_IN_IRQ_CHECK();
#if TOS_CFG_OBJECT_VERIFY_EN > 0u
if (!pend_object_verify(&sem->pend_obj, PEND_TYPE_SEM)) {
return K_ERR_OBJ_INVALID;
}
#endif
TOS_CPU_INT_DISABLE();
if (sem->count > (k_sem_cnt_t)0u) {
--sem->count;
TOS_CPU_INT_ENABLE();
return K_ERR_NONE;
}
if (timeout == TOS_TIME_NOWAIT) { // no wait, return immediately
TOS_CPU_INT_ENABLE();
return K_ERR_PEND_NOWAIT;
}
if (knl_is_sched_locked()) {
TOS_CPU_INT_ENABLE();
return K_ERR_PEND_SCHED_LOCKED;
}
pend_task_block(k_curr_task, &sem->pend_obj, timeout);
TOS_CPU_INT_ENABLE();
knl_sched();
return pend_state2errno(k_curr_task->pend_state);
}
#endif // TOS_CFG_SEM_EN

262
kernel/core/tos_sys.c Normal file
View File

@@ -0,0 +1,262 @@
#include <tos.h>
__API__ k_err_t tos_knl_init(void)
{
k_err_t err;
cpu_init();
readyqueue_init();
#if TOS_CFG_MMHEAP_EN > 0
mmheap_init(k_mmheap_pool, TOS_CFG_MMHEAP_POOL_SIZE);
#endif
#if (TOS_CFG_MSG_EN) > 0
msgpool_init();
#endif
err = knl_idle_init();
if (err != K_ERR_NONE) {
return err;
}
#if TOS_CFG_TIMER_EN > 0
err = timer_init();
if (err != K_ERR_NONE) {
return err;
}
#endif
#if TOS_CFG_PWR_MGR_EN > 0U
pm_init();
#endif
#if TOS_CFG_TICKLESS_EN > 0u
tickless_init();
#endif
return K_ERR_NONE;
}
__API__ void tos_knl_irq_enter(void)
{
if (!tos_knl_is_running()) {
return;
}
if (unlikely(k_irq_nest_cnt >= K_NESTING_LIMIT_IRQ)) {
return;
}
++k_irq_nest_cnt;
}
__API__ void tos_knl_irq_leave(void)
{
TOS_CPU_CPSR_ALLOC();
if (!tos_knl_is_running()) {
return;
}
TOS_CPU_INT_DISABLE();
if (!knl_is_inirq()) {
TOS_CPU_INT_ENABLE();
return;
}
--k_irq_nest_cnt;
if (knl_is_inirq()) {
TOS_CPU_INT_ENABLE();
return;
}
if (knl_is_sched_locked()) {
TOS_CPU_INT_ENABLE();
return;
}
k_next_task = readyqueue_highest_ready_task_get();
if (knl_is_self(k_next_task)) {
TOS_CPU_INT_ENABLE();
return;
}
cpu_irq_context_switch();
TOS_CPU_INT_ENABLE();
}
__API__ k_err_t tos_knl_sched_lock(void)
{
TOS_CPU_CPSR_ALLOC();
TOS_IN_IRQ_CHECK();
if (!tos_knl_is_running()) {
return K_ERR_KNL_NOT_RUNNING;
}
if (k_sched_lock_nest_cnt >= K_NESTING_LIMIT_SCHED_LOCK) {
return K_ERR_LOCK_NESTING_OVERFLOW;
}
TOS_CPU_INT_DISABLE();
++k_sched_lock_nest_cnt;
TOS_CPU_INT_ENABLE();
return K_ERR_NONE;
}
__API__ k_err_t tos_knl_sched_unlock(void)
{
TOS_CPU_CPSR_ALLOC();
TOS_IN_IRQ_CHECK();
if (!tos_knl_is_running()) {
return K_ERR_KNL_NOT_RUNNING;
}
if (!knl_is_sched_locked()) {
return K_ERR_SCHED_NOT_LOCKED;
}
TOS_CPU_INT_DISABLE();
--k_sched_lock_nest_cnt;
TOS_CPU_INT_ENABLE();
knl_sched();
return K_ERR_NONE;
}
__API__ k_err_t tos_knl_start(void)
{
if (tos_knl_is_running()) {
return K_ERR_KNL_RUNNING;
}
k_next_task = readyqueue_highest_ready_task_get();
k_curr_task = k_next_task;
k_knl_state = KNL_STATE_RUNNING;
cpu_sched_start();
return K_ERR_NONE;
}
__API__ int tos_knl_is_running(void)
{
return k_knl_state == KNL_STATE_RUNNING;
}
#if TOS_CFG_TICKLESS_EN > 0u
/**
* @brief Get the remain ticks of the first oncoming task.
*
* @return The remian ticks of the first oncoming task to be scheduled.
*/
__KERNEL__ k_tick_t knl_next_expires_get(void)
{
k_tick_t tick_next_expires;
#if TOS_CFG_TIMER_EN > 0u
k_tick_t timer_next_expires;
#endif
tick_next_expires = tick_next_expires_get();
#if TOS_CFG_TIMER_EN > 0u
timer_next_expires = timer_next_expires_get();
#endif
#if TOS_CFG_TIMER_EN > 0u
return tick_next_expires < timer_next_expires ? tick_next_expires : timer_next_expires;
#else
return tick_next_expires;
#endif
}
#endif
#if TOS_CFG_OBJECT_VERIFY_EN > 0u
__KERNEL__ int knl_object_verify(knl_obj_t *object, knl_obj_type_t type)
{
return object->type == type;
}
__KERNEL__ int knl_object_init(knl_obj_t *object, knl_obj_type_t type)
{
return object->type = type;
}
__KERNEL__ int knl_object_deinit(knl_obj_t *object)
{
return object->type = KNL_OBJ_TYPE_NONE;
}
#endif
__KERNEL__ void knl_sched(void)
{
TOS_CPU_CPSR_ALLOC();
if (knl_is_inirq()) {
return;
}
if (knl_is_sched_locked()) {
return;
}
TOS_CPU_INT_DISABLE();
k_next_task = readyqueue_highest_ready_task_get();
if (knl_is_self(k_next_task)) {
TOS_CPU_INT_ENABLE();
return;
}
cpu_context_switch();
TOS_CPU_INT_ENABLE();
}
__KERNEL__ int knl_is_sched_locked(void)
{
return k_sched_lock_nest_cnt > 0u;
}
__KERNEL__ int knl_is_inirq(void)
{
return k_irq_nest_cnt > 0u;
}
__KERNEL__ int knl_is_idle(k_task_t *task)
{
return task == &k_idle_task;
}
__KERNEL__ int knl_is_self(k_task_t *task)
{
return task == k_curr_task;
}
__STATIC__ void knl_idle_entry(void *arg)
{
arg = arg; // make compiler happy
while (K_TRUE) {
#if TOS_CFG_PWR_MGR_EN > 0u
pm_power_manager();
#endif
}
}
__KERNEL__ k_err_t knl_idle_init(void)
{
return tos_task_create(&k_idle_task, "idle",
knl_idle_entry, K_NULL,
K_TASK_PRIO_IDLE,
k_idle_task_stk_addr,
k_idle_task_stk_size,
0);
}

416
kernel/core/tos_task.c Normal file
View File

@@ -0,0 +1,416 @@
#include <tos.h>
__STATIC_INLINE__ void task_reset(k_task_t *task)
{
#if TOS_CFG_OBJECT_VERIFY_EN > 0u
knl_object_deinit(&task->knl_obj);
#endif
tos_list_init(&task->tick_list);
tos_list_init(&task->pend_list);
#if TOS_CFG_MUTEX_EN > 0u
tos_list_init(&task->mutex_own_list);
task->prio_pending = K_TASK_PRIO_INVALID;
#endif
task->pend_state = PEND_STATE_NONE;
task->pending_obj = (pend_obj_t *)K_NULL;
#if TOS_CFG_MSG_EN > 0u
task->msg_addr = K_NULL;
task->msg_size = 0;
#endif
}
__STATIC__ void task_exit(void)
{
tos_task_destroy(K_NULL);
}
#if TOS_CFG_MUTEX_EN > 0u
__STATIC__ k_prio_t task_highest_pending_prio_get(k_task_t *task)
{
k_list_t *curr;
k_mutex_t *mutex;
k_prio_t prio, highest_prio_pending = K_TASK_PRIO_INVALID;
TOS_LIST_FOR_EACH(curr, &task->mutex_own_list) {
mutex = TOS_LIST_ENTRY(curr, k_mutex_t, owner_list);
prio = pend_highest_prio_get(&mutex->pend_obj);
if (prio < highest_prio_pending) {
highest_prio_pending = prio;
}
}
return highest_prio_pending;
}
__STATIC__ void task_mutex_release(k_task_t *task)
{
k_list_t *curr, *next;
TOS_LIST_FOR_EACH_SAFE(curr, next, &task->mutex_own_list) {
mutex_release(TOS_LIST_ENTRY(curr, k_mutex_t, owner_list));
}
}
#endif
__API__ k_err_t tos_task_create(k_task_t *task,
char *name,
k_task_entry_t entry,
void *arg,
k_prio_t prio,
k_stack_t *stk_base,
size_t stk_size,
k_timeslice_t timeslice)
{
TOS_CPU_CPSR_ALLOC();
TOS_IN_IRQ_CHECK();
TOS_PTR_SANITY_CHECK(task);
TOS_PTR_SANITY_CHECK(entry);
TOS_PTR_SANITY_CHECK(stk_base);
if (unlikely(stk_size < sizeof(cpu_context_t))) {
return K_ERR_TASK_STK_SIZE_INVALID;
}
if (unlikely(prio == K_TASK_PRIO_IDLE && !knl_is_idle(task))) {
return K_ERR_TASK_PRIO_INVALID;
}
if (unlikely(prio > K_TASK_PRIO_IDLE)) {
return K_ERR_TASK_PRIO_INVALID;
}
task_reset(task);
#if TOS_CFG_OBJECT_VERIFY_EN > 0u
knl_object_init(&task->knl_obj, KNL_OBJ_TYPE_TASK);
#endif
task->sp = cpu_task_stk_init((void *)entry, arg, (void *)task_exit, stk_base, stk_size);
task->entry = entry;
task->arg = arg;
task->name = name;
task->prio = prio;
task->stk_base = stk_base;
task->stk_size = stk_size;
#if TOS_CFG_ROUND_ROBIN_EN > 0u
task->timeslice_reload = timeslice;
if (timeslice == (k_timeslice_t)0u) {
task->timeslice = k_robin_default_timeslice;
} else {
task->timeslice = timeslice;
}
#endif
TOS_CPU_INT_DISABLE();
task_state_set_ready(task);
readyqueue_add_tail(task);
TOS_CPU_INT_ENABLE();
if (tos_knl_is_running()) {
knl_sched();
}
return K_ERR_NONE;
}
__API__ k_err_t tos_task_destroy(k_task_t *task)
{
TOS_CPU_CPSR_ALLOC();
TOS_IN_IRQ_CHECK();
if (unlikely(!task)) {
task = k_curr_task;
}
#if TOS_CFG_OBJECT_VERIFY_EN > 0u
if (!knl_object_verify(&task->knl_obj, KNL_OBJ_TYPE_TASK)) {
return K_ERR_OBJ_INVALID;
}
#endif
if (knl_is_idle(task)) {
return K_ERR_TASK_DESTROY_IDLE;
}
if (knl_is_self(task) && knl_is_sched_locked()) {
return K_ERR_SCHED_LOCKED;
}
TOS_CPU_INT_DISABLE();
#if TOS_CFG_MUTEX_EN > 0u
// when we die, wakeup all the people in this land.
if (!tos_list_empty(&task->mutex_own_list)) {
task_mutex_release(task);
}
#endif
if (task_state_is_ready(task)) { // that's simple, good kid
readyqueue_remove(task);
}
if (task_state_is_sleeping(task)) {
tick_list_remove(task);
}
if (task_state_is_pending(task)) {
pend_list_remove(task);
}
task_reset(task);
task_state_set_deleted(task);
TOS_CPU_INT_ENABLE();
knl_sched();
return K_ERR_NONE;
}
__API__ void tos_task_yield(void)
{
TOS_CPU_CPSR_ALLOC();
if (knl_is_inirq()) {
return;
}
TOS_CPU_INT_DISABLE();
readyqueue_remove(k_curr_task);
readyqueue_add_tail(k_curr_task);
TOS_CPU_INT_ENABLE();
knl_sched();
}
__API__ k_err_t tos_task_prio_change(k_task_t *task, k_prio_t prio_new)
{
TOS_CPU_CPSR_ALLOC();
#if TOS_CFG_MUTEX_EN > 0u
k_prio_t highest_pending_prio;
#endif
TOS_PTR_SANITY_CHECK(task);
TOS_IN_IRQ_CHECK();
#if TOS_CFG_OBJECT_VERIFY_EN > 0u
if (!knl_object_verify(&task->knl_obj, KNL_OBJ_TYPE_TASK)) {
return K_ERR_OBJ_INVALID;
}
#endif
if (unlikely(prio_new >= K_TASK_PRIO_IDLE)) {
return K_ERR_TASK_PRIO_INVALID;
}
TOS_CPU_INT_DISABLE();
if (task->prio == prio_new) { // just kidding
TOS_CPU_INT_ENABLE();
knl_sched();
return K_ERR_NONE;
}
#if TOS_CFG_MUTEX_EN > 0u
if (!tos_list_empty(&task->mutex_own_list)) {
highest_pending_prio = task_highest_pending_prio_get(task);
if (prio_new > highest_pending_prio) {
task->prio_pending = prio_new;
prio_new = highest_pending_prio;
}
}
#endif
if (task_state_is_pending(task)) {
task->prio = prio_new;
pend_list_adjust(task);
} else if (task_state_is_sleeping(task)) {
task->prio = prio_new;
} else if (task_state_is_ready(task)) { // good kid
readyqueue_remove(task);
/* ATTENTION:
must do the prio assignment after readyqueue_remove
otherwise the k_rdyq.highest_prio refresh in readyqueue_remove will be wrong.
*/
task->prio = prio_new;
if (knl_is_self(task)) {
readyqueue_add_head(task);
} else {
readyqueue_add_tail(task);
}
}
TOS_CPU_INT_ENABLE();
knl_sched();
return K_ERR_NONE;
}
__API__ k_err_t tos_task_suspend(k_task_t *task)
{
TOS_CPU_CPSR_ALLOC();
if (unlikely(!task)) {
task = k_curr_task;
}
#if TOS_CFG_OBJECT_VERIFY_EN > 0u
if (!knl_object_verify(&task->knl_obj, KNL_OBJ_TYPE_TASK)) {
return K_ERR_OBJ_INVALID;
}
#endif
if (knl_is_idle(task)) {
return K_ERR_TASK_SUSPEND_IDLE;
}
if (unlikely(knl_is_self(task)) && knl_is_sched_locked()) { // if not you, who?
return K_ERR_SCHED_LOCKED;
}
TOS_CPU_INT_DISABLE();
if (task_state_is_ready(task)) { // kill the good kid
readyqueue_remove(task);
}
task_state_set_suspended(task);
TOS_CPU_INT_ENABLE();
knl_sched();
return K_ERR_NONE;
}
__API__ k_err_t tos_task_resume(k_task_t *task)
{
TOS_CPU_CPSR_ALLOC();
TOS_PTR_SANITY_CHECK(task);
#if TOS_CFG_OBJECT_VERIFY_EN > 0u
if (!knl_object_verify(&task->knl_obj, KNL_OBJ_TYPE_TASK)) {
return K_ERR_OBJ_INVALID;
}
#endif
if (unlikely(knl_is_self(task))) {
return K_ERR_TASK_RESUME_SELF;
}
TOS_CPU_INT_DISABLE();
if (!task_state_is_suspended(task)) {
TOS_CPU_INT_ENABLE();
knl_sched();
return K_ERR_NONE;
}
task_state_reset_suspended(task);
if (task_state_is_ready(task)) { // we are good kid now
readyqueue_add(task);
}
TOS_CPU_INT_ENABLE();
knl_sched();
return K_ERR_NONE;
}
__API__ k_err_t tos_task_delay(k_tick_t delay)
{
TOS_CPU_CPSR_ALLOC();
TOS_IN_IRQ_CHECK();
if (knl_is_sched_locked()) {
return K_ERR_SCHED_LOCKED;
}
if (unlikely(delay == (k_tick_t)0u)) {
tos_task_yield();
return K_ERR_NONE;
}
TOS_CPU_INT_DISABLE();
if (tick_list_add(k_curr_task, delay) != K_ERR_NONE) {
TOS_CPU_INT_ENABLE();
return K_ERR_DELAY_FOREVER;
}
readyqueue_remove(k_curr_task);
TOS_CPU_INT_ENABLE();
knl_sched();
return K_ERR_NONE;
}
__API__ k_err_t tos_task_delay_abort(k_task_t *task)
{
TOS_CPU_CPSR_ALLOC();
TOS_PTR_SANITY_CHECK(task);
TOS_IN_IRQ_CHECK();
#if TOS_CFG_OBJECT_VERIFY_EN > 0u
if (!knl_object_verify(&task->knl_obj, KNL_OBJ_TYPE_TASK)) {
return K_ERR_OBJ_INVALID;
}
#endif
TOS_CPU_INT_DISABLE();
if (knl_is_self(task) || !task_state_is_sleeping(task)) {
TOS_CPU_INT_ENABLE();
return K_ERR_TASK_NOT_DELAY;
}
if (task_state_is_suspended(task)) {
TOS_CPU_INT_ENABLE();
return K_ERR_TASK_SUSPENDED;
}
tick_list_remove(task);
readyqueue_add(task);
TOS_CPU_INT_ENABLE();
knl_sched();
return K_ERR_NONE;
}
#if TOS_CFG_TASK_STACK_DRAUGHT_DEPTH_DETACT_EN > 0u
__API__ k_err_t tos_task_stack_draught_depth(k_task_t *task, int *depth)
{
TOS_CPU_CPSR_ALLOC();
k_err_t rc;
TOS_PTR_SANITY_CHECK(depth);
if (unlikely(!task)) {
task = k_curr_task;
}
#if TOS_CFG_OBJECT_VERIFY_EN > 0u
if (!knl_object_verify(&task->knl_obj, KNL_OBJ_TYPE_TASK)) {
return K_ERR_OBJ_INVALID;
}
#endif
TOS_CPU_INT_DISABLE();
rc = cpu_task_stack_draught_depth(task->stk_base, task->stk_size, depth);
TOS_CPU_INT_ENABLE();
return rc;
}
#endif

143
kernel/core/tos_tick.c Normal file
View File

@@ -0,0 +1,143 @@
#include <tos.h>
__STATIC__ void tick_task_place(k_task_t *task, k_tick_t timeout)
{
TOS_CPU_CPSR_ALLOC();
k_list_t *curr;
k_task_t *curr_task = K_NULL;
k_tick_t curr_expires, prev_expires = (k_tick_t)0u;
TOS_CPU_INT_DISABLE();
task->tick_expires = timeout;
TOS_LIST_FOR_EACH(curr, &k_tick_list) {
curr_task = TOS_LIST_ENTRY(curr, k_task_t, tick_list);
curr_expires = prev_expires + curr_task->tick_expires;
if (task->tick_expires < curr_expires) {
break;
}
prev_expires = curr_expires;
}
task->tick_expires -= prev_expires;
if (curr != &k_tick_list && curr_task) {
curr_task->tick_expires -= task->tick_expires;
}
tos_list_add_tail(&task->tick_list, curr);
TOS_CPU_INT_ENABLE();
}
__STATIC__ void tick_task_takeoff(k_task_t *task)
{
TOS_CPU_CPSR_ALLOC();
k_task_t *next;
TOS_CPU_INT_DISABLE();
next = TOS_LIST_FIRST_ENTRY_OR_NULL(&task->tick_list, k_task_t, tick_list);
if (next && task->tick_list.next != &k_tick_list) { // not the only one
if (next->tick_expires <= K_TIME_MAX - task->tick_expires) {
next->tick_expires += task->tick_expires;
} else {
next->tick_expires = K_TIME_MAX;
}
}
tos_list_del(&task->tick_list);
TOS_CPU_INT_ENABLE();
}
__KERNEL__ k_err_t tick_list_add(k_task_t *task, k_tick_t timeout)
{
if (timeout == TOS_TIME_NOWAIT) {
return K_ERR_DELAY_ZERO;
}
if (timeout == TOS_TIME_FOREVER) {
return K_ERR_DELAY_FOREVER;
}
task->tick_expires = timeout;
tick_task_place(task, timeout);
task_state_set_sleeping(task);
return K_ERR_NONE;
}
__KERNEL__ void tick_list_remove(k_task_t *task)
{
tick_task_takeoff(task);
task_state_reset_sleeping(task);
}
__KERNEL__ void tick_update(k_tick_t tick)
{
TOS_CPU_CPSR_ALLOC();
k_task_t *first, *task;
k_list_t *curr, *next;
TOS_CPU_INT_DISABLE();
k_tick_count += tick;
if (tos_list_empty(&k_tick_list)) {
TOS_CPU_INT_ENABLE();
return;
}
first = TOS_LIST_FIRST_ENTRY(&k_tick_list, k_task_t, tick_list);
if (first->tick_expires <= tick) {
first->tick_expires = (k_tick_t)0u;
} else {
first->tick_expires -= tick;
TOS_CPU_INT_ENABLE();
return;
}
TOS_LIST_FOR_EACH_SAFE(curr, next, &k_tick_list) {
task = TOS_LIST_ENTRY(curr, k_task_t, tick_list);
if (task->tick_expires > (k_tick_t)0u) {
break;
}
// we are pending on something, but tick's up, no longer waitting
pend_task_wakeup(task, PEND_STATE_TIMEOUT);
}
TOS_CPU_INT_ENABLE();
}
__KERNEL__ k_tick_t tick_next_expires_get(void)
{
TOS_CPU_CPSR_ALLOC();
k_tick_t next_expires;
k_task_t *first;
TOS_CPU_INT_DISABLE();
first = TOS_LIST_FIRST_ENTRY_OR_NULL(&k_tick_list, k_task_t, tick_list);
next_expires = first ? first->tick_expires : TOS_TIME_FOREVER;
TOS_CPU_INT_ENABLE();
return next_expires;
}
__API__ void tos_tick_handler(void)
{
if (unlikely(!tos_knl_is_running())) {
return;
}
tick_update((k_tick_t)1u);
#if TOS_CFG_TIMER_EN > 0u && TOS_CFG_TIMER_AS_PROC > 0u
timer_update();
#endif
#if TOS_CFG_ROUND_ROBIN_EN > 0u
robin_sched(k_curr_task->prio);
#endif
}

49
kernel/core/tos_time.c Normal file
View File

@@ -0,0 +1,49 @@
#include <tos.h>
__API__ k_tick_t tos_systick_get(void)
{
TOS_CPU_CPSR_ALLOC();
k_tick_t tick;
TOS_CPU_INT_DISABLE();
tick = k_tick_count;
TOS_CPU_INT_ENABLE();
return tick;
}
__API__ void tos_systick_set(k_tick_t tick)
{
TOS_CPU_CPSR_ALLOC();
TOS_CPU_INT_DISABLE();
k_tick_count = tick;
TOS_CPU_INT_ENABLE();
}
__API__ k_time_t tos_tick2millisec(k_tick_t tick)
{
return (k_time_t)(tick * K_TIME_MILLISEC_PER_SEC / TOS_CFG_CPU_TICK_PER_SECOND);
}
__API__ k_tick_t tos_millisec2tick(k_time_t ms)
{
return ((k_tick_t)ms * TOS_CFG_CPU_TICK_PER_SECOND / K_TIME_MILLISEC_PER_SEC);
}
__API__ k_err_t tos_sleep_ms(k_time_t ms)
{
return tos_task_delay(tos_millisec2tick(ms));
}
__STATIC_INLINE__ k_tick_t time_hmsm2tick(k_time_t hour, k_time_t minute, k_time_t second, k_time_t millisec)
{
return ((k_tick_t)hour * (k_tick_t)3600u + (k_tick_t)minute * (k_tick_t)60u +
(k_tick_t)second) * TOS_CFG_CPU_TICK_PER_SECOND +
(TOS_CFG_CPU_TICK_PER_SECOND * ((k_tick_t)millisec + (k_tick_t)500u / TOS_CFG_CPU_TICK_PER_SECOND)) / (k_tick_t)1000u;
}
__API__ k_err_t tos_sleep_hmsm(k_time_t hour, k_time_t minute, k_time_t second, k_time_t millisec)
{
return tos_task_delay(time_hmsm2tick(hour, minute, second, millisec));
}

329
kernel/core/tos_timer.c Normal file
View File

@@ -0,0 +1,329 @@
#include <tos.h>
#if TOS_CFG_TIMER_EN > 0u
__STATIC__ void timer_place(k_timer_t *tmr)
{
TOS_CPU_CPSR_ALLOC();
k_list_t *curr;
k_timer_t *iter = K_NULL;
TOS_CPU_INT_DISABLE();
tmr->expires += k_tick_count;
TOS_LIST_FOR_EACH(curr, &k_timer_ctl.list) {
iter = TOS_LIST_ENTRY(curr, k_timer_t, list);
if (tmr->expires < iter->expires) {
break;
}
}
tos_list_add_tail(&tmr->list, curr);
if (k_timer_ctl.list.next == &tmr->list) {
// we are the first guy now
k_timer_ctl.next_expires = tmr->expires;
#if TOS_CFG_TIMER_AS_PROC == 0u
if (task_state_is_sleeping(&k_timer_task)) {
tos_task_delay_abort(&k_timer_task);
}
#endif
}
#if TOS_CFG_TIMER_AS_PROC == 0u
if (task_state_is_suspended(&k_timer_task)) {
tos_task_resume(&k_timer_task);
}
#endif
TOS_CPU_INT_ENABLE();
}
__STATIC__ void timer_takeoff(k_timer_t *tmr)
{
TOS_CPU_CPSR_ALLOC();
k_timer_t *first, *next;
TOS_CPU_INT_DISABLE();
first = TOS_LIST_FIRST_ENTRY(&k_timer_ctl.list, k_timer_t, list);
tos_list_del(&tmr->list);
if (first == tmr) {
// if the first guy removed, we need to refresh k_timer_ctl.next_expires
next = TOS_LIST_FIRST_ENTRY_OR_NULL(&tmr->list, k_timer_t, list);
if (!next) {
// the only guy removed
k_timer_ctl.next_expires = TOS_TIME_FOREVER;
} else {
k_timer_ctl.next_expires = next->expires;
}
}
TOS_CPU_INT_ENABLE();
}
__STATIC_INLINE__ void timer_reset(k_timer_t *tmr)
{
#if TOS_CFG_OBJECT_VERIFY_EN > 0u
knl_object_deinit(&tmr->knl_obj);
#endif
tmr->state = TIMER_STATE_UNUSED;
tmr->delay = (k_tick_t)0u;
tmr->expires = (k_tick_t)0u;
tmr->period = (k_tick_t)0u;
tmr->opt = (k_opt_t)0u;
tmr->cb = K_NULL;
tmr->cb_arg = K_NULL;
tos_list_init(&tmr->list);
}
__API__ k_err_t tos_timer_create(k_timer_t *tmr,
k_tick_t delay,
k_tick_t period,
k_timer_callback_t callback,
void *cb_arg,
k_opt_t opt)
{
TOS_PTR_SANITY_CHECK(tmr);
TOS_PTR_SANITY_CHECK(callback);
if (opt == TOS_OPT_TIMER_PERIODIC && period == (k_tick_t)0u) {
return K_ERR_TIMER_INVALID_PERIOD;
}
if (opt == TOS_OPT_TIMER_ONESHOT && delay == (k_tick_t)0u) {
return K_ERR_TIMER_INVALID_DELAY;
}
if (opt != TOS_OPT_TIMER_ONESHOT && opt != TOS_OPT_TIMER_PERIODIC) {
return K_ERR_TIMER_INVALID_OPT;
}
if (delay == TOS_TIME_FOREVER) {
return K_ERR_TIMER_DELAY_FOREVER;
}
if (period == TOS_TIME_FOREVER) {
return K_ERR_TIMER_PERIOD_FOREVER;
}
#if TOS_CFG_OBJECT_VERIFY_EN > 0u
knl_object_init(&tmr->knl_obj, KNL_OBJ_TYPE_TIMER);
#endif
tmr->state = TIMER_STATE_STOPPED;
tmr->delay = delay;
tmr->expires = (k_tick_t)0u;
tmr->period = period;
tmr->opt = opt;
tmr->cb = callback;
tmr->cb_arg = cb_arg;
tos_list_init(&tmr->list);
return K_ERR_NONE;
}
__API__ k_err_t tos_timer_destroy(k_timer_t *tmr)
{
TOS_PTR_SANITY_CHECK(tmr);
#if TOS_CFG_OBJECT_VERIFY_EN > 0u
if (!knl_object_verify(&tmr->knl_obj, KNL_OBJ_TYPE_TIMER)) {
return K_ERR_OBJ_INVALID;
}
#endif
if (tmr->state == TIMER_STATE_UNUSED) {
return K_ERR_TIMER_INACTIVE;
}
if (tmr->state == TIMER_STATE_RUNNING) {
timer_takeoff(tmr);
}
timer_reset(tmr);
return K_ERR_NONE;
}
__API__ k_err_t tos_timer_start(k_timer_t *tmr)
{
TOS_PTR_SANITY_CHECK(tmr);
#if TOS_CFG_OBJECT_VERIFY_EN > 0u
if (!knl_object_verify(&tmr->knl_obj, KNL_OBJ_TYPE_TIMER)) {
return K_ERR_OBJ_INVALID;
}
#endif
if (tmr->state == TIMER_STATE_UNUSED) {
return K_ERR_TIMER_INACTIVE;
}
if (tmr->state == TIMER_STATE_RUNNING) {
timer_takeoff(tmr);
tmr->expires = tmr->delay;
timer_place(tmr);
return K_ERR_NONE;
}
if (tmr->state == TIMER_STATE_STOPPED ||
tmr->state == TIMER_STATE_COMPLETED) {
tmr->state = TIMER_STATE_RUNNING;
if (tmr->delay == (k_tick_t)0u) {
tmr->expires = tmr->period;
} else {
tmr->expires = tmr->delay;
}
timer_place(tmr);
return K_ERR_NONE;
}
return K_ERR_TIMER_INVALID_STATE;
}
__API__ k_err_t tos_timer_stop(k_timer_t *tmr)
{
TOS_PTR_SANITY_CHECK(tmr);
#if TOS_CFG_OBJECT_VERIFY_EN > 0u
if (!knl_object_verify(&tmr->knl_obj, KNL_OBJ_TYPE_TIMER)) {
return K_ERR_OBJ_INVALID;
}
#endif
if (tmr->state == TIMER_STATE_UNUSED) {
return K_ERR_TIMER_INACTIVE;
}
if (tmr->state == TIMER_STATE_COMPLETED ||
tmr->state == TIMER_STATE_STOPPED) {
return K_ERR_TIMER_STOPPED;
}
if (tmr->state == TIMER_STATE_RUNNING) {
tmr->state = TIMER_STATE_STOPPED;
timer_takeoff(tmr);
}
return K_ERR_NONE;
}
__KERNEL__ k_tick_t timer_next_expires_get(void)
{
TOS_CPU_CPSR_ALLOC();
k_tick_t next_expires;
TOS_CPU_INT_DISABLE();
if (k_timer_ctl.next_expires == TOS_TIME_FOREVER) {
next_expires = TOS_TIME_FOREVER;
} else if (k_timer_ctl.next_expires <= k_tick_count) {
next_expires = (k_tick_t)0u;
} else {
next_expires = k_timer_ctl.next_expires - k_tick_count;
}
TOS_CPU_INT_ENABLE();
return next_expires;
}
#if TOS_CFG_TIMER_AS_PROC > 0u
__KERNEL__ void timer_update(void)
{
k_timer_t *tmr;
k_list_t *curr, *next;
if (k_timer_ctl.next_expires < k_tick_count) {
return;
}
tos_knl_sched_lock();
TOS_LIST_FOR_EACH_SAFE(curr, next, &k_timer_ctl.list) {
tmr = TOS_LIST_ENTRY(curr, k_timer_t, list);
if (tmr->expires > k_tick_count) {
break;
}
// time's up
timer_takeoff(tmr);
if (tmr->opt == TOS_OPT_TIMER_PERIODIC) {
tmr->expires = tmr->period;
timer_place(tmr);
} else {
tmr->state = TIMER_STATE_COMPLETED;
}
(*tmr->cb)(tmr->cb_arg);
}
tos_knl_sched_unlock();
}
#else /* TOS_CFG_TIMER_AS_PROC > 0u */
__STATIC__ void timer_task_entry(void *arg)
{
k_timer_t *tmr;
k_list_t *curr, *next;
k_tick_t next_expires;
arg = arg; // make compiler happy
while (K_TRUE) {
next_expires = timer_next_expires_get();
if (next_expires == TOS_TIME_FOREVER) {
tos_task_suspend(K_NULL);
} else if (next_expires > (k_tick_t)0u) {
tos_task_delay(next_expires);
}
tos_knl_sched_lock();
TOS_LIST_FOR_EACH_SAFE(curr, next, &k_timer_ctl.list) {
tmr = TOS_LIST_ENTRY(curr, k_timer_t, list);
if (tmr->expires > k_tick_count) { // not yet
break;
}
// time's up
timer_takeoff(tmr);
if (tmr->opt == TOS_OPT_TIMER_PERIODIC) {
tmr->expires = tmr->period;
timer_place(tmr);
} else {
tmr->state = TIMER_STATE_COMPLETED;
}
(*tmr->cb)(tmr->cb_arg);
}
tos_knl_sched_unlock();
}
}
#endif
__KERNEL__ k_err_t timer_init(void)
{
#if TOS_CFG_TIMER_AS_PROC > 0u
return K_ERR_NONE;
#else
return tos_task_create(&k_timer_task,
"timer",
timer_task_entry,
K_NULL,
k_timer_task_prio,
k_timer_task_stk_addr,
k_timer_task_stk_size,
0);
#endif
}
#endif