add countdownlatch and completion

This commit is contained in:
SheldonDai
2019-10-10 14:15:25 +08:00
parent b6af64f40d
commit 04694f6013
15 changed files with 688 additions and 31 deletions

View File

@@ -43,6 +43,8 @@
#include <tos_mutex.h> #include <tos_mutex.h>
#include <tos_sem.h> #include <tos_sem.h>
#include <tos_event.h> #include <tos_event.h>
#include <tos_countdownlatch.h>
#include <tos_completion.h>
#include <tos_timer.h> #include <tos_timer.h>
#include <tos_time.h> #include <tos_time.h>
#include <tos_mmblk.h> #include <tos_mmblk.h>

View File

@@ -0,0 +1,151 @@
/*----------------------------------------------------------------------------
* Tencent is pleased to support the open source community by making TencentOS
* available.
*
* Copyright (C) 2019 THL A29 Limited, a Tencent company. All rights reserved.
* If you have downloaded a copy of the TencentOS binary from Tencent, please
* note that the TencentOS binary is licensed under the BSD 3-Clause License.
*
* If you have downloaded a copy of the TencentOS source code from Tencent,
* please note that TencentOS source code is licensed under the BSD 3-Clause
* License, except for the third-party components listed below which are
* subject to different license terms. Your integration of TencentOS into your
* own projects may require compliance with the BSD 3-Clause License, as well
* as the other licenses applicable to the third-party components included
* within TencentOS.
*---------------------------------------------------------------------------*/
#ifndef _TOS_COMPLETION_H_
#define _TOS_COMPLETION_H_
#if TOS_CFG_COMPLETION_EN > 0u
typedef uint16_t completion_done_t;
typedef struct k_completion_st {
pend_obj_t pend_obj;
completion_done_t done;
} k_completion_t;
/**
* @brief Create a completion.
* create a completion.
*
* @attention None
*
* @param[in] completion pointer to the handler of the completion.
*
* @return errcode
* @retval #K_ERR_NONE return successfully.
*/
__API__ k_err_t tos_completion_create(k_completion_t *completion);
/**
* @brief Destroy a completion.
* destroy a completion.
*
* @attention None
*
* @param[in] completion pointer to the handler of the completion.
*
* @return errcode
* @retval #K_ERR_OBJ_INVALID completion is not a valid pointer to completion
* @retval #K_ERR_NONE return successfully.
*/
__API__ k_err_t tos_completion_destroy(k_completion_t *completion);
/**
* @brief Pend a completion.
* pend a completion.
*
* @attention None
*
* @param[in] completion pointer to the handler of the completion.
* @param[in] timeout how much time(in k_tick_t) we would like to wait.
*
* @return errcode
* @retval #K_ERR_PEND_NOWAIT we get nothing, and we don't wanna wait.
* @retval #K_ERR_PEND_SCHED_LOCKED we can wait, but scheduler is locked.
* @retval #K_ERR_PEND_TIMEOUT the time we wait is up, we get nothing.
* @retval #K_ERR_PEND_DESTROY the completion we are pending is destroyed.
* @retval #K_ERR_NONE return successfully.
*/
__API__ k_err_t tos_completion_pend_timed(k_completion_t *completion, k_tick_t timeout);
/**
* @brief Pend a completion.
* pend a completion.
*
* @attention None
*
* @param[in] completion pointer to the handler of the completion.
* @param[in] timeout how much time(in k_tick_t) we would like to wait.
*
* @return errcode
* @retval #K_ERR_PEND_NOWAIT we get nothing, and we don't wanna wait.
* @retval #K_ERR_PEND_SCHED_LOCKED we can wait, but scheduler is locked.
* @retval #K_ERR_PEND_TIMEOUT the time we wait is up, we get nothing.
* @retval #K_ERR_PEND_DESTROY the completion we are pending is destroyed.
* @retval #K_ERR_NONE return successfully.
*/
__API__ k_err_t tos_completion_pend(k_completion_t *completion);
/**
* @brief Post a completion.
* post a completion and wakeup one pending task.
*
* @attention when tos_completion_post return successfully, only one task who are waitting for the completion will be woken up.
*
* @param[in] completion pointer to the handler of the completion.
*
* @return errcode
* @retval #K_ERR_COMPLETION_OVERFLOW we are nesting post a completion too much.
* @retval #K_ERR_NONE return successfully.
*/
__API__ k_err_t tos_completion_post(k_completion_t *completion);
/**
* @brief Post a completion.
* post a completion and wakeup all the pending task.
*
* @attention when tos_completion_post_all return successfully, all of the tasks who are waitting for the completion will be woken up.
*
* @param[in] completion pointer to the handler of the completion.
*
* @return errcode
* @retval #K_ERR_COMPLETION_OVERFLOW we are nesting post a completion too much.
* @retval #K_ERR_NONE return successfully.
*/
__API__ k_err_t tos_completion_post_all(k_completion_t *completion);
/**
* @brief Reset a completion.
* reset a completion to un-done.
*
* @attention None.
*
* @param[in] completion pointer to the handler of the completion.
*
* @return errcode
* @retval #K_ERR_NONE return successfully.
*/
__API__ k_err_t tos_completion_reset(k_completion_t *completion);
/**
* @brief Test whether a completion is done.
* test whether a completion is done.
*
* @attention None
*
* @param[in] completion pointer to the handler of the completion.
*
* @return whether a completion is done
* @retval K_TRUE the completion is done.
* @retval K_FALSE the completion is not done.
*/
__API__ int tos_completion_is_done(k_completion_t *completion);
#endif
#endif

View File

@@ -69,6 +69,24 @@
///////////////////////////////////////// /////////////////////////////////////////
/////////////////////////////////////////
// disable countdownlatch
#ifdef TOS_CFG_COUNTDOWNLATCH_EN
#undef TOS_CFG_COUNTDOWNLATCH_EN
#endif
#define TOS_CFG_COUNTDOWNLATCH_EN 0u
/////////////////////////////////////////
/////////////////////////////////////////
// disable completion
#ifdef TOS_CFG_COMPLETION_EN
#undef TOS_CFG_COMPLETION_EN
#endif
#define TOS_CFG_COMPLETION_EN 0u
/////////////////////////////////////////
///////////////////////////////////////// /////////////////////////////////////////
// disable the "traditional" timer // disable the "traditional" timer
#ifdef TOS_CFG_TIMER_EN #ifdef TOS_CFG_TIMER_EN
@@ -172,6 +190,14 @@
#define TOS_CFG_SEM_EN 0u #define TOS_CFG_SEM_EN 0u
#endif #endif
#ifndef TOS_CFG_COUNTDOWNLATCH_EN
#define TOS_CFG_COUNTDOWNLATCH_EN 0u
#endif
#ifndef TOS_CFG_COMPLETION_EN
#define TOS_CFG_COMPLETION_EN 0u
#endif
#ifndef TOS_CFG_MMHEAP_EN #ifndef TOS_CFG_MMHEAP_EN
#define TOS_CFG_MMHEAP_EN 0u #define TOS_CFG_MMHEAP_EN 0u
#endif #endif

View File

@@ -0,0 +1,116 @@
/*----------------------------------------------------------------------------
* Tencent is pleased to support the open source community by making TencentOS
* available.
*
* Copyright (C) 2019 THL A29 Limited, a Tencent company. All rights reserved.
* If you have downloaded a copy of the TencentOS binary from Tencent, please
* note that the TencentOS binary is licensed under the BSD 3-Clause License.
*
* If you have downloaded a copy of the TencentOS source code from Tencent,
* please note that TencentOS source code is licensed under the BSD 3-Clause
* License, except for the third-party components listed below which are
* subject to different license terms. Your integration of TencentOS into your
* own projects may require compliance with the BSD 3-Clause License, as well
* as the other licenses applicable to the third-party components included
* within TencentOS.
*---------------------------------------------------------------------------*/
#ifndef _TOS_COUNTDOWNLATCH_H_
#define _TOS_COUNTDOWNLATCH_H_
#if TOS_CFG_COUNTDOWNLATCH_EN > 0u
typedef struct k_countdownlatch_st {
pend_obj_t pend_obj;
k_countdownlatch_cnt_t count;
} k_countdownlatch_t;
/**
* @brief Create a countdown-latch.
* create a countdown latch.
*
* @attention the count is how many posts have been done the pender would wakeup.
*
* @param[in] countdownlatch pointer to the handler of the countdown-latch.
* @param[in] count the count to wait of the countdown-latch.
*
* @return errcode
* @retval #K_ERR_NONE return successfully.
*/
__API__ k_err_t tos_countdownlatch_create(k_countdownlatch_t *countdownlatch, k_countdownlatch_cnt_t count);
/**
* @brief Destroy a countdown-latch.
* destroy a countdown-latch.
*
* @attention None
*
* @param[in] countdownlatch pointer to the handler of the countdown-latch.
*
* @return errcode
* @retval #K_ERR_NONE return successfully.
*/
__API__ k_err_t tos_countdownlatch_destroy(k_countdownlatch_t *countdownlatch);
/**
* @brief Pend a countdown-latch.
* pend a countdown-latch.
*
* @attention The task will keep blocked until the countdown-latch is obtained or a timeout comes.
*
* @param[in] countdownlatch pointer to the handler of the countdown-latch.
* @param[in] timeout how much time(in k_tick_t) we would like to wait.
*
* @return errcode
* @retval #K_ERR_PEND_NOWAIT we get nothing, and we don't wanna wait.
* @retval #K_ERR_PEND_SCHED_LOCKED we can wait, but scheduler is locked.
* @retval #K_ERR_PEND_TIMEOUT the time we wait is up, we get nothing.
* @retval #K_ERR_NONE return successfully.
*/
__API__ k_err_t tos_countdownlatch_pend_timed(k_countdownlatch_t *countdownlatch, k_tick_t timeout);
/**
* @brief Pend a countdown-latch.
* pend a countdown latch.
*
* @attention until (countdownlatch->count) of tasks have done the post, the pender would wake up.
*
* @param[in] countdownlatch pointer to the handler of the countdown-latch.
*
* @return errcode
* @retval #K_ERR_PEND_SCHED_LOCKED the schedule is locked.
* @retval #K_ERR_NONE return successfully.
*/
__API__ k_err_t tos_countdownlatch_pend(k_countdownlatch_t *countdownlatch);
/**
* @brief Post a countdown-latch.
* post a countdown-latch.
*
* @attention until (countdownlatch->count) of tasks have done the post, the pender would wake up.
*
* @param[in] countdownlatch pointer to the handler of the countdown-latch.
*
* @return errcode
* @retval #K_ERR_COUNTDOWNLATCH_OVERFLOW we are posting the countdown-latch too much.
* @retval #K_ERR_NONE return successfully.
*/
__API__ k_err_t tos_countdownlatch_post(k_countdownlatch_t *countdownlatch);
/**
* @brief Reset a countdown-latch.
* reset a countdown-latch's count.
*
* @attention None.
*
* @param[in] countdownlatch pointer to the handler of the countdown-latch.
*
* @return errcode
* @retval #K_ERR_NONE return successfully.
*/
__API__ k_err_t tos_countdownlatch_reset(k_countdownlatch_t *countdownlatch, k_countdownlatch_cnt_t count);
#endif
#endif

View File

@@ -21,6 +21,10 @@
typedef enum k_err_en { typedef enum k_err_en {
K_ERR_NONE = 0u, K_ERR_NONE = 0u,
K_ERR_COMPLETION_OVERFLOW = 25u,
K_ERR_COUNTDOWNLATCH_OVERFLOW = 50u,
K_ERR_DELAY_ZERO = 100u, K_ERR_DELAY_ZERO = 100u,
K_ERR_DELAY_FOREVER, K_ERR_DELAY_FOREVER,

View File

@@ -29,6 +29,7 @@ typedef uint8_t k_nesting_t;
typedef uint16_t k_opt_t; typedef uint16_t k_opt_t;
typedef uint16_t k_sem_cnt_t; typedef uint16_t k_sem_cnt_t;
typedef uint32_t k_event_flag_t; typedef uint32_t k_event_flag_t;
typedef uint16_t k_countdownlatch_cnt_t;
typedef uint32_t k_time_t; typedef uint32_t k_time_t;
typedef uint32_t k_timeslice_t; typedef uint32_t k_timeslice_t;

View File

@@ -25,7 +25,7 @@ typedef struct k_mutex_st {
k_nesting_t pend_nesting; k_nesting_t pend_nesting;
k_task_t *owner; k_task_t *owner;
k_prio_t owner_orig_prio; k_prio_t owner_orig_prio;
k_list_t owner_list; k_list_t owner_anchor;
} k_mutex_t; } k_mutex_t;
/** /**

View File

@@ -37,11 +37,13 @@ typedef enum pend_state_en {
is initialized, or whether user pass the correct parameter. is initialized, or whether user pass the correct parameter.
*/ */
typedef enum pend_type_en { typedef enum pend_type_en {
PEND_TYPE_NONE = 0x0000, PEND_TYPE_NONE = 0x0000,
PEND_TYPE_SEM = 0x1BEE, PEND_TYPE_SEM = 0x1BEE,
PEND_TYPE_MUTEX = 0x2BEE, PEND_TYPE_MUTEX = 0x2BEE,
PEND_TYPE_EVENT = 0x3BEE, PEND_TYPE_EVENT = 0x3BEE,
PEND_TYPE_QUEUE = 0x4BEE, PEND_TYPE_QUEUE = 0x4BEE,
PEND_TYPE_COUNTDOWNLATCH = 0x5BEE,
PEND_TYPE_COMPLETION = 0x6BEE,
} pend_type_t; } pend_type_t;
typedef enum opt_post_en { typedef enum opt_post_en {
@@ -62,7 +64,9 @@ __KERNEL__ int pend_object_verify(pend_obj_t *object, pend_type_t type);
__KERNEL__ int pend_is_nopending(pend_obj_t *object); __KERNEL__ int pend_is_nopending(pend_obj_t *object);
__KERNEL__ k_prio_t pend_highest_prio_get(pend_obj_t *object); __KERNEL__ k_prio_t pend_highest_pending_prio_get(pend_obj_t *object);
__KERNEL__ k_task_t *pend_highest_pending_task_get(pend_obj_t *object);
__KERNEL__ void pend_list_remove(k_task_t *task); __KERNEL__ void pend_list_remove(k_task_t *task);

View File

@@ -0,0 +1,189 @@
/*----------------------------------------------------------------------------
* Tencent is pleased to support the open source community by making TencentOS
* available.
*
* Copyright (C) 2019 THL A29 Limited, a Tencent company. All rights reserved.
* If you have downloaded a copy of the TencentOS binary from Tencent, please
* note that the TencentOS binary is licensed under the BSD 3-Clause License.
*
* If you have downloaded a copy of the TencentOS source code from Tencent,
* please note that TencentOS source code is licensed under the BSD 3-Clause
* License, except for the third-party components listed below which are
* subject to different license terms. Your integration of TencentOS into your
* own projects may require compliance with the BSD 3-Clause License, as well
* as the other licenses applicable to the third-party components included
* within TencentOS.
*---------------------------------------------------------------------------*/
#include "tos.h"
#if TOS_CFG_COMPLETION_EN > 0u
__API__ k_err_t tos_completion_create(k_completion_t *completion)
{
TOS_PTR_SANITY_CHECK(completion);
pend_object_init(&completion->pend_obj, PEND_TYPE_COMPLETION);
completion->done = (completion_done_t)0u;
return K_ERR_NONE;
}
__API__ k_err_t tos_completion_destroy(k_completion_t *completion)
{
TOS_CPU_CPSR_ALLOC();
TOS_PTR_SANITY_CHECK(completion);
#if TOS_CFG_OBJECT_VERIFY_EN > 0u
if (!pend_object_verify(&completion->pend_obj, PEND_TYPE_COMPLETION)) {
return K_ERR_OBJ_INVALID;
}
#endif
TOS_CPU_INT_DISABLE();
if (!pend_is_nopending(&completion->pend_obj)) {
pend_wakeup_all(&completion->pend_obj, PEND_STATE_DESTROY);
}
pend_object_deinit(&completion->pend_obj);
TOS_CPU_INT_ENABLE();
knl_sched();
return K_ERR_NONE;
}
__API__ k_err_t tos_completion_pend_timed(k_completion_t *completion, k_tick_t timeout)
{
TOS_CPU_CPSR_ALLOC();
TOS_PTR_SANITY_CHECK(completion);
#if TOS_CFG_OBJECT_VERIFY_EN > 0u
if (!pend_object_verify(&completion->pend_obj, PEND_TYPE_COMPLETION)) {
return K_ERR_OBJ_INVALID;
}
#endif
TOS_CPU_INT_DISABLE();
if (completion->done > (completion_done_t)0u) {
TOS_CPU_INT_ENABLE();
return K_ERR_NONE;
}
if (timeout == TOS_TIME_NOWAIT) { // no wait, return immediately
TOS_CPU_INT_ENABLE();
return K_ERR_PEND_NOWAIT;
}
if (knl_is_inirq()) {
TOS_CPU_INT_ENABLE();
return K_ERR_PEND_IN_IRQ;
}
if (knl_is_sched_locked()) {
TOS_CPU_INT_ENABLE();
return K_ERR_PEND_SCHED_LOCKED;
}
pend_task_block(k_curr_task, &completion->pend_obj, timeout);
TOS_CPU_INT_ENABLE();
knl_sched();
return pend_state2errno(k_curr_task->pend_state);
}
__API__ k_err_t tos_completion_pend(k_completion_t *completion)
{
return tos_completion_pend_timed(completion, TOS_TIME_FOREVER);
}
__STATIC__ k_err_t completion_do_post(k_completion_t *completion, opt_post_t opt)
{
TOS_CPU_CPSR_ALLOC();
TOS_PTR_SANITY_CHECK(completion);
#if TOS_CFG_OBJECT_VERIFY_EN > 0u
if (!pend_object_verify(&completion->pend_obj, PEND_TYPE_COMPLETION)) {
return K_ERR_OBJ_INVALID;
}
#endif
TOS_CPU_INT_DISABLE();
if (completion->done == (completion_done_t)-1) {
TOS_CPU_INT_ENABLE();
return K_ERR_COMPLETION_OVERFLOW;
}
++completion->done;
if (pend_is_nopending(&completion->pend_obj)) {
TOS_CPU_INT_ENABLE();
return K_ERR_NONE;
}
pend_wakeup(&completion->pend_obj, PEND_STATE_POST, opt);
TOS_CPU_INT_ENABLE();
knl_sched();
return K_ERR_NONE;
}
__API__ k_err_t tos_completion_post(k_completion_t *completion)
{
return completion_do_post(completion, OPT_POST_ONE);
}
__API__ k_err_t tos_completion_post_all(k_completion_t *completion)
{
return completion_do_post(completion, OPT_POST_ALL);
}
__API__ k_err_t tos_completion_reset(k_completion_t *completion)
{
TOS_CPU_CPSR_ALLOC();
TOS_PTR_SANITY_CHECK(completion);
#if TOS_CFG_OBJECT_VERIFY_EN > 0u
if (!pend_object_verify(&completion->pend_obj, PEND_TYPE_COMPLETION)) {
return K_ERR_OBJ_INVALID;
}
#endif
TOS_CPU_INT_DISABLE();
completion->done = (completion_done_t)0u;
TOS_CPU_INT_ENABLE();
return K_ERR_NONE;
}
__API__ int tos_completion_is_done(k_completion_t *completion)
{
TOS_CPU_CPSR_ALLOC();
int is_done = K_FALSE;
TOS_PTR_SANITY_CHECK(completion);
#if TOS_CFG_OBJECT_VERIFY_EN > 0u
if (!pend_object_verify(&completion->pend_obj, PEND_TYPE_COMPLETION)) {
return K_ERR_OBJ_INVALID;
}
#endif
TOS_CPU_INT_DISABLE();
is_done = (completion->done > (completion_done_t)0u ? K_TRUE : K_FALSE);
TOS_CPU_INT_ENABLE();
return is_done;
}
#endif

View File

@@ -0,0 +1,159 @@
/*----------------------------------------------------------------------------
* Tencent is pleased to support the open source community by making TencentOS
* available.
*
* Copyright (C) 2019 THL A29 Limited, a Tencent company. All rights reserved.
* If you have downloaded a copy of the TencentOS binary from Tencent, please
* note that the TencentOS binary is licensed under the BSD 3-Clause License.
*
* If you have downloaded a copy of the TencentOS source code from Tencent,
* please note that TencentOS source code is licensed under the BSD 3-Clause
* License, except for the third-party components listed below which are
* subject to different license terms. Your integration of TencentOS into your
* own projects may require compliance with the BSD 3-Clause License, as well
* as the other licenses applicable to the third-party components included
* within TencentOS.
*---------------------------------------------------------------------------*/
#include "tos.h"
#if TOS_CFG_COUNTDOWNLATCH_EN > 0u
__API__ k_err_t tos_countdownlatch_create(k_countdownlatch_t *countdownlatch, k_countdownlatch_cnt_t count)
{
TOS_PTR_SANITY_CHECK(countdownlatch);
pend_object_init(&countdownlatch->pend_obj, PEND_TYPE_COUNTDOWNLATCH);
countdownlatch->count = count;
return K_ERR_NONE;
}
__API__ k_err_t tos_countdownlatch_destroy(k_countdownlatch_t *countdownlatch)
{
TOS_CPU_CPSR_ALLOC();
TOS_PTR_SANITY_CHECK(countdownlatch);
#if TOS_CFG_OBJECT_VERIFY_EN > 0u
if (!pend_object_verify(&countdownlatch->pend_obj, PEND_TYPE_COUNTDOWNLATCH)) {
return K_ERR_OBJ_INVALID;
}
#endif
TOS_CPU_INT_DISABLE();
if (!pend_is_nopending(&countdownlatch->pend_obj)) {
pend_wakeup_all(&countdownlatch->pend_obj, PEND_STATE_DESTROY);
}
pend_object_deinit(&countdownlatch->pend_obj);
TOS_CPU_INT_ENABLE();
knl_sched();
return K_ERR_NONE;
}
__API__ k_err_t tos_countdownlatch_pend_timed(k_countdownlatch_t *countdownlatch, k_tick_t timeout)
{
TOS_CPU_CPSR_ALLOC();
TOS_PTR_SANITY_CHECK(countdownlatch);
#if TOS_CFG_OBJECT_VERIFY_EN > 0u
if (!pend_object_verify(&countdownlatch->pend_obj, PEND_TYPE_COUNTDOWNLATCH)) {
return K_ERR_OBJ_INVALID;
}
#endif
TOS_CPU_INT_DISABLE();
if (countdownlatch->count == (k_countdownlatch_cnt_t)0u) {
TOS_CPU_INT_ENABLE();
return K_ERR_NONE;
}
if (timeout == TOS_TIME_NOWAIT) { // no wait, return immediately
TOS_CPU_INT_ENABLE();
return K_ERR_PEND_NOWAIT;
}
if (knl_is_inirq()) {
TOS_CPU_INT_ENABLE();
return K_ERR_PEND_IN_IRQ;
}
if (knl_is_sched_locked()) {
TOS_CPU_INT_ENABLE();
return K_ERR_PEND_SCHED_LOCKED;
}
pend_task_block(k_curr_task, &countdownlatch->pend_obj, timeout);
TOS_CPU_INT_ENABLE();
knl_sched();
return pend_state2errno(k_curr_task->pend_state);
}
__API__ k_err_t tos_countdownlatch_pend(k_countdownlatch_t *countdownlatch)
{
return tos_countdownlatch_pend_timed(countdownlatch, TOS_TIME_FOREVER);
}
__API__ k_err_t tos_countdownlatch_post(k_countdownlatch_t *countdownlatch)
{
TOS_CPU_CPSR_ALLOC();
TOS_PTR_SANITY_CHECK(countdownlatch);
#if TOS_CFG_OBJECT_VERIFY_EN > 0u
if (!pend_object_verify(&countdownlatch->pend_obj, PEND_TYPE_COUNTDOWNLATCH)) {
return K_ERR_OBJ_INVALID;
}
#endif
TOS_CPU_INT_DISABLE();
if (countdownlatch->count == (k_countdownlatch_cnt_t)0) {
TOS_CPU_INT_ENABLE();
return K_ERR_COUNTDOWNLATCH_OVERFLOW;
}
--countdownlatch->count;
if (countdownlatch->count > (k_countdownlatch_cnt_t)0) {
TOS_CPU_INT_ENABLE();
return K_ERR_NONE;
}
pend_wakeup_one(&countdownlatch->pend_obj, PEND_STATE_POST);
TOS_CPU_INT_ENABLE();
knl_sched();
return K_ERR_NONE;
}
__API__ k_err_t tos_countdownlatch_reset(k_countdownlatch_t *countdownlatch, k_countdownlatch_cnt_t count)
{
TOS_CPU_CPSR_ALLOC();
TOS_PTR_SANITY_CHECK(countdownlatch);
#if TOS_CFG_OBJECT_VERIFY_EN > 0u
if (!pend_object_verify(&countdownlatch->pend_obj, PEND_TYPE_COUNTDOWNLATCH)) {
return K_ERR_OBJ_INVALID;
}
#endif
TOS_CPU_INT_DISABLE();
countdownlatch->count = count;
TOS_CPU_INT_ENABLE();
return K_ERR_NONE;
}
#endif

View File

@@ -25,8 +25,9 @@ __STATIC_INLINE__ void mutex_old_owner_release(k_mutex_t *mutex)
owner = mutex->owner; owner = mutex->owner;
tos_list_del(&mutex->owner_list); tos_list_del(&mutex->owner_anchor);
mutex->owner = K_NULL; mutex->owner = K_NULL;
mutex->pend_nesting = (k_nesting_t)0u;
// the right time comes! let's do it! // the right time comes! let's do it!
if (owner->prio_pending != K_TASK_PRIO_INVALID) { if (owner->prio_pending != K_TASK_PRIO_INVALID) {
@@ -40,11 +41,11 @@ __STATIC_INLINE__ void mutex_old_owner_release(k_mutex_t *mutex)
__STATIC_INLINE__ void mutex_fresh_owner_mark(k_mutex_t *mutex, k_task_t *task) __STATIC_INLINE__ void mutex_fresh_owner_mark(k_mutex_t *mutex, k_task_t *task)
{ {
mutex->pend_nesting = (k_nesting_t)1u;
mutex->owner = task; mutex->owner = task;
mutex->owner_orig_prio = task->prio; mutex->owner_orig_prio = task->prio;
mutex->pend_nesting = (k_nesting_t)1u;
tos_list_add(&mutex->owner_list, &task->mutex_own_list); tos_list_add(&mutex->owner_anchor, &task->mutex_own_list);
} }
__STATIC_INLINE__ void mutex_new_owner_mark(k_mutex_t *mutex, k_task_t *task) __STATIC_INLINE__ void mutex_new_owner_mark(k_mutex_t *mutex, k_task_t *task)
@@ -54,7 +55,7 @@ __STATIC_INLINE__ void mutex_new_owner_mark(k_mutex_t *mutex, k_task_t *task)
mutex_fresh_owner_mark(mutex, task); mutex_fresh_owner_mark(mutex, task);
// we own the mutex now, make sure our priority is higher than any one in the pend list. // we own the mutex now, make sure our priority is higher than any one in the pend list.
highest_pending_prio = pend_highest_prio_get(&mutex->pend_obj); highest_pending_prio = pend_highest_pending_prio_get(&mutex->pend_obj);
if (task->prio > highest_pending_prio) { if (task->prio > highest_pending_prio) {
tos_task_prio_change(task, highest_pending_prio); tos_task_prio_change(task, highest_pending_prio);
} }
@@ -76,7 +77,7 @@ __API__ k_err_t tos_mutex_create(k_mutex_t *mutex)
mutex->pend_nesting = (k_nesting_t)0u; mutex->pend_nesting = (k_nesting_t)0u;
mutex->owner = K_NULL; mutex->owner = K_NULL;
mutex->owner_orig_prio = K_TASK_PRIO_INVALID; mutex->owner_orig_prio = K_TASK_PRIO_INVALID;
tos_list_init(&mutex->owner_list); tos_list_init(&mutex->owner_anchor);
return K_ERR_NONE; return K_ERR_NONE;
} }
@@ -101,7 +102,6 @@ __API__ k_err_t tos_mutex_destroy(k_mutex_t *mutex)
} }
pend_object_deinit(&mutex->pend_obj); pend_object_deinit(&mutex->pend_obj);
mutex->pend_nesting = (k_nesting_t)0u;
if (mutex->owner) { if (mutex->owner) {
mutex_old_owner_release(mutex); mutex_old_owner_release(mutex);
@@ -116,7 +116,6 @@ __API__ k_err_t tos_mutex_destroy(k_mutex_t *mutex)
__API__ k_err_t tos_mutex_pend_timed(k_mutex_t *mutex, k_tick_t timeout) __API__ k_err_t tos_mutex_pend_timed(k_mutex_t *mutex, k_tick_t timeout)
{ {
TOS_CPU_CPSR_ALLOC(); TOS_CPU_CPSR_ALLOC();
k_err_t err;
TOS_PTR_SANITY_CHECK(mutex); TOS_PTR_SANITY_CHECK(mutex);
TOS_IN_IRQ_CHECK(); TOS_IN_IRQ_CHECK();
@@ -166,16 +165,7 @@ __API__ k_err_t tos_mutex_pend_timed(k_mutex_t *mutex, k_tick_t timeout)
TOS_CPU_INT_ENABLE(); TOS_CPU_INT_ENABLE();
knl_sched(); knl_sched();
err = pend_state2errno(k_curr_task->pend_state); return pend_state2errno(k_curr_task->pend_state);
if (err == K_ERR_NONE) {
// good, we are the owner now.
TOS_CPU_INT_DISABLE();
mutex_new_owner_mark(mutex, k_curr_task);
TOS_CPU_INT_ENABLE();
}
return err;
} }
__API__ k_err_t tos_mutex_pend(k_mutex_t *mutex) __API__ k_err_t tos_mutex_pend(k_mutex_t *mutex)
@@ -186,6 +176,7 @@ __API__ k_err_t tos_mutex_pend(k_mutex_t *mutex)
__API__ k_err_t tos_mutex_post(k_mutex_t *mutex) __API__ k_err_t tos_mutex_post(k_mutex_t *mutex)
{ {
TOS_CPU_CPSR_ALLOC(); TOS_CPU_CPSR_ALLOC();
k_task_t *pending_task;
TOS_PTR_SANITY_CHECK(mutex); TOS_PTR_SANITY_CHECK(mutex);
TOS_IN_IRQ_CHECK(); TOS_IN_IRQ_CHECK();
@@ -215,6 +206,14 @@ __API__ k_err_t tos_mutex_post(k_mutex_t *mutex)
return K_ERR_NONE; return K_ERR_NONE;
} }
/* must do the mutex owner switch right here
if the pender don't get a chance to schedule, the poster(old owner) may obtain the mutex immediately again
but the pender already get ready(already in the critical section).
we switch the owner right here to avoid the old owner obtain the mutex again
*/
pending_task = pend_highest_pending_task_get(&mutex->pend_obj);
mutex_new_owner_mark(mutex, pending_task);
pend_wakeup_one(&mutex->pend_obj, PEND_STATE_POST); pend_wakeup_one(&mutex->pend_obj, PEND_STATE_POST);
TOS_CPU_INT_ENABLE(); TOS_CPU_INT_ENABLE();
knl_sched(); knl_sched();

View File

@@ -39,7 +39,7 @@ __STATIC__ void pend_list_add(k_task_t *task, pend_obj_t *pend_obj)
task_state_set_pend(task); task_state_set_pend(task);
} }
__KERNEL__ k_prio_t pend_highest_prio_get(pend_obj_t *object) __KERNEL__ k_prio_t pend_highest_pending_prio_get(pend_obj_t *object)
{ {
k_task_t *task; k_task_t *task;
@@ -48,6 +48,11 @@ __KERNEL__ k_prio_t pend_highest_prio_get(pend_obj_t *object)
return task ? task->prio : K_TASK_PRIO_INVALID; return task ? task->prio : K_TASK_PRIO_INVALID;
} }
__KERNEL__ k_task_t *pend_highest_pending_task_get(pend_obj_t *object)
{
return TOS_LIST_FIRST_ENTRY(&object->list, k_task_t, pend_list);
}
__KERNEL__ void pend_list_remove(k_task_t *task) __KERNEL__ void pend_list_remove(k_task_t *task)
{ {
tos_list_del(&task->pend_list); tos_list_del(&task->pend_list);

View File

@@ -78,8 +78,7 @@ __KERNEL__ k_task_t *readyqueue_highest_ready_task_get(void)
k_list_t *task_list; k_list_t *task_list;
task_list = &k_rdyq.task_list_head[k_rdyq.highest_prio]; task_list = &k_rdyq.task_list_head[k_rdyq.highest_prio];
k_task_t *fuck = TOS_LIST_FIRST_ENTRY(task_list, k_task_t, pend_list); return TOS_LIST_FIRST_ENTRY(task_list, k_task_t, pend_list);
return fuck;
} }
__KERNEL__ void readyqueue_init(void) __KERNEL__ void readyqueue_init(void)

View File

@@ -53,8 +53,8 @@ __STATIC__ k_prio_t task_highest_pending_prio_get(k_task_t *task)
k_prio_t prio, highest_prio_pending = K_TASK_PRIO_INVALID; k_prio_t prio, highest_prio_pending = K_TASK_PRIO_INVALID;
TOS_LIST_FOR_EACH(curr, &task->mutex_own_list) { TOS_LIST_FOR_EACH(curr, &task->mutex_own_list) {
mutex = TOS_LIST_ENTRY(curr, k_mutex_t, owner_list); mutex = TOS_LIST_ENTRY(curr, k_mutex_t, owner_anchor);
prio = pend_highest_prio_get(&mutex->pend_obj); prio = pend_highest_pending_prio_get(&mutex->pend_obj);
if (prio < highest_prio_pending) { if (prio < highest_prio_pending) {
highest_prio_pending = prio; highest_prio_pending = prio;
} }
@@ -67,7 +67,7 @@ __STATIC__ void task_mutex_release(k_task_t *task)
k_list_t *curr, *next; k_list_t *curr, *next;
TOS_LIST_FOR_EACH_SAFE(curr, next, &task->mutex_own_list) { TOS_LIST_FOR_EACH_SAFE(curr, next, &task->mutex_own_list) {
mutex_release(TOS_LIST_ENTRY(curr, k_mutex_t, owner_list)); mutex_release(TOS_LIST_ENTRY(curr, k_mutex_t, owner_anchor));
} }
} }
#endif #endif
@@ -358,6 +358,7 @@ __API__ k_err_t tos_task_delay(k_tick_t delay)
if (tick_list_add(k_curr_task, delay) != K_ERR_NONE) { if (tick_list_add(k_curr_task, delay) != K_ERR_NONE) {
TOS_CPU_INT_ENABLE(); TOS_CPU_INT_ENABLE();
// if you wanna delay your task forever, why don't just suspend?
return K_ERR_DELAY_FOREVER; return K_ERR_DELAY_FOREVER;
} }

View File

@@ -113,6 +113,7 @@ __API__ k_err_t tos_timer_create(k_timer_t *tmr,
} }
if (opt == TOS_OPT_TIMER_ONESHOT && delay == (k_tick_t)0u) { if (opt == TOS_OPT_TIMER_ONESHOT && delay == (k_tick_t)0u) {
// if you create a oneshot timer and delay 0 to trigger, why don't just call the timer_callback?
return K_ERR_TIMER_INVALID_DELAY; return K_ERR_TIMER_INVALID_DELAY;
} }