17 Commits

Author SHA1 Message Date
Mculover666
9a13cf1860 Revert "add onnx pack" 2021-09-07 12:58:23 +08:00
Mculover666
da4cfd716e Merge pull request #316 from Derekduke/master
add onnx pack
2021-09-07 11:10:35 +08:00
dkk0918
c55e48b61b add onnx pack 2021-09-07 11:02:26 +08:00
dkk0918
6fb59b9313 add onnx pack 2021-09-06 21:50:44 +08:00
daishengdong
427b13d14a bugfix for kv workspace locate
if only one fresh block left when kv_init, we should not just locate the workspace to this fresh one, otherwise we probably get a KV_ERR_NO_WRITEABLE_BLK in kv_init next time(if we write something to this workspace). We try a gc here to locate workspace to an inuse block rather than the fresh.
2021-09-06 13:06:31 +08:00
Mculover666
c13e3d95e4 Merge pull request #315 from DavidLin1577/master
Update tos_fault.c
2021-09-03 14:18:27 +08:00
David Lin
c15fd1f17a Update tos_fault.c 2021-09-03 12:14:26 +08:00
David Lin
090018470d Update tos_fault.c 2021-09-03 12:13:36 +08:00
David Lin
ad41df0717 Update tos_fault.c 2021-09-03 12:13:01 +08:00
David Lin
8882ce6d12 Update tos_fault.c 2021-09-03 12:12:11 +08:00
David Lin
fb4674f65e Update tos_fault.c 2021-09-03 12:11:45 +08:00
David Lin
4d8f7a4e2c Update tos_fault.c 2021-09-03 12:11:05 +08:00
David Lin
35c97d196c Update tos_fault.c 2021-09-03 12:10:20 +08:00
Supowang
99302c1512 Update tos_kv.c 2021-08-19 17:47:06 +08:00
Arthur
f73f27f060 Merge branch 'master' of https://github.com/Tencent/TencentOS-tiny 2021-08-03 15:26:28 +08:00
Arthur
cb36ebae84 kv gc bugfix
if a power down or crash happened during gc process,  kv_init will fail with NO_WRITEABLE_BLOK next time.
2021-08-03 12:19:19 +08:00
Mculover666
644e2bf2e9 Merge pull request #314 from Tencent/tos_evb_g0
doc: improve documentation of examples/tos_meets_rust
2021-07-27 15:33:22 +08:00
10 changed files with 270 additions and 52 deletions

View File

@@ -114,7 +114,7 @@ __STATIC__ void fault_dump_cpu_frame(fault_cpu_frame_t *cpu_frame)
__STATIC__ void fault_dump_stack(fault_info_t *info, size_t depth)
{
cpu_addr_t sp = info->sp_before_fault;;
cpu_addr_t sp = info->sp_before_fault;
k_fault_log_writer("\nTASK STACK DUMP:\n");
while (sp <= info->stack_limit && depth--) {

View File

@@ -114,7 +114,7 @@ __STATIC__ void fault_dump_cpu_frame(fault_cpu_frame_t *cpu_frame)
__STATIC__ void fault_dump_stack(fault_info_t *info, size_t depth)
{
cpu_addr_t sp = info->sp_before_fault;;
cpu_addr_t sp = info->sp_before_fault;
k_fault_log_writer("\nTASK STACK DUMP:\n");
while (sp <= info->stack_limit && depth--) {

View File

@@ -114,7 +114,7 @@ __STATIC__ void fault_dump_cpu_frame(fault_cpu_frame_t *cpu_frame)
__STATIC__ void fault_dump_stack(fault_info_t *info, size_t depth)
{
cpu_addr_t sp = info->sp_before_fault;;
cpu_addr_t sp = info->sp_before_fault;
k_fault_log_writer("\nTASK STACK DUMP:\n");
while (sp <= info->stack_limit && depth--) {

View File

@@ -114,7 +114,7 @@ __STATIC__ void fault_dump_cpu_frame(fault_cpu_frame_t *cpu_frame)
__STATIC__ void fault_dump_stack(fault_info_t *info, size_t depth)
{
cpu_addr_t sp = info->sp_before_fault;;
cpu_addr_t sp = info->sp_before_fault;
k_fault_log_writer("\nTASK STACK DUMP:\n");
while (sp <= info->stack_limit && depth--) {

View File

@@ -69,7 +69,7 @@ __STATIC__ void fault_dump_cpu_frame(fault_cpu_frame_t *cpu_frame)
__STATIC__ void fault_dump_stack(fault_info_t *info, size_t depth)
{
cpu_addr_t sp = info->sp_before_fault;;
cpu_addr_t sp = info->sp_before_fault;
k_fault_log_writer("\nTASK STACK DUMP:\n");
while (sp <= info->stack_limit && depth--) {

View File

@@ -114,7 +114,7 @@ __STATIC__ void fault_dump_cpu_frame(fault_cpu_frame_t *cpu_frame)
__STATIC__ void fault_dump_stack(fault_info_t *info, size_t depth)
{
cpu_addr_t sp = info->sp_before_fault;;
cpu_addr_t sp = info->sp_before_fault;
k_fault_log_writer("\nTASK STACK DUMP:\n");
while (sp <= info->stack_limit && depth--) {

View File

@@ -69,7 +69,7 @@ __STATIC__ void fault_dump_cpu_frame(fault_cpu_frame_t *cpu_frame)
__STATIC__ void fault_dump_stack(fault_info_t *info, size_t depth)
{
cpu_addr_t sp = info->sp_before_fault;;
cpu_addr_t sp = info->sp_before_fault;
k_fault_log_writer("\nTASK STACK DUMP:\n");
while (sp <= info->stack_limit && depth--) {

View File

@@ -81,7 +81,7 @@ typedef uint64_t kv_dword_t; // double word
#define KV_NO_WRITEABLE_BLK() (KV_MGR_BLK_NUM_INUSE == 0 && KV_MGR_BLK_NUM_FRESH == 0)
#define KV_ITEM_HDR_MAGIC 0xABCD1234DCBA4321
#define KV_ITEM_HDR_MAGIC 0x69745F6D61676963 /* "it_magic" */
#define KV_ITEM_DISCARDED 0x0F0F0F0F0F0F0F0F
#define KV_ITEM_IS_DISCARDED(item_hdr) ((item_hdr)->discarded_flag == KV_ITEM_DISCARDED)
#define KV_ITEM_IS_LEGAL(item_hdr) ((item_hdr)->magic == KV_ITEM_HDR_MAGIC)
@@ -100,8 +100,22 @@ typedef uint64_t kv_dword_t; // double word
#define KV_ITEM_SIZE_OF_BODY(item) KV_ITEM_BODY_SIZE(item->hdr.k_len, item->hdr.v_len)
#define KV_ITEM_ADDR_OF_BODY(item) (item->pos + KV_ITEM_HDR_SIZE)
#define KV_BLK_HDR_MAGIC 0x1234ABCD4321DCBA
#define KV_BLK_HDR_MAGIC 0x48445F4D41474943 /* "HD_MAGIC" */
#define KV_BLK_HDR_GC_SRC 0x6D61726B5F737263 /* "mark_src" */
#define KV_BLK_HDR_GC_DST 0x6D61726B5F647374 /* "mark_dst" */
#define KV_BLK_HDR_GC_DONE 0x6B67635F646F6E65 /* "kgc_done" */
#define KV_BLK_IS_LEGAL(blk_hdr) ((blk_hdr)->magic == KV_BLK_HDR_MAGIC)
// DO NOT use gc_src == KV_BLK_HDR_GC_SRC here, in case of an incomplete write of src magic due to a power down
#define KV_BLK_IS_GC_SRC(blk_hdr) ((blk_hdr)->gc_src != (kv_dword_t)-1)
// DO NOT use gc_dst == KV_BLK_HDR_GC_DST here, in case of an incomplete write of dst magic due to a power down
#define KV_BLK_IS_GC_DST(blk_hdr) ((blk_hdr)->gc_dst != (kv_dword_t)-1)
// DO NOT use gc_done == KV_BLK_HDR_GC_DONE here, in case of an incomplete write of done magic due to a power down
#define KV_BLK_IS_GC_DONE(blk_hdr) ((blk_hdr)->gc_done != (kv_dword_t)-1)
#define KV_BLK_IS_GC_DST_NOT_DONE(blk_hdr) (KV_BLK_IS_GC_DST(blk_hdr) && !KV_BLK_IS_GC_DONE(blk_hdr))
#define KV_BLK_INVALID ((uint32_t)-1)
#define KV_BLK_HDR_SIZE KV_ALIGNED_SIZE(sizeof(kv_blk_hdr_t))
#define KV_BLK_SIZE (KV_FLASH_SECTOR_SIZE)
@@ -113,7 +127,7 @@ typedef uint64_t kv_dword_t; // double word
#define KV_BLK_NEXT(blk_start) (blk_start + KV_BLK_SIZE >= KV_FLASH_END ? KV_FLASH_START : blk_start + KV_BLK_SIZE)
#define KV_BLK_FOR_EACH_FROM(cur_blk, start_blk) \
for (cur_blk = KV_BLK_NEXT(start_blk); \
for (cur_blk = KV_BLK_NEXT(start_blk); \
cur_blk != start_blk; \
cur_blk = KV_BLK_NEXT(cur_blk))
@@ -159,6 +173,9 @@ typedef struct kv_control_st {
typedef struct kv_block_header_st {
kv_wunit_t magic; /*< is this block formatted? */
kv_wunit_t gc_src; /*< is this block gc-ing(as a source block)? */
kv_wunit_t gc_dst; /*< is this block gc-ing(as a destination block)? */
kv_wunit_t gc_done; /*< is this block gc done(as a destination block)? */
} __PACKED__ kv_blk_hdr_t;
typedef struct kv_item_header_st {
@@ -179,7 +196,7 @@ typedef struct kv_item_st {
uint8_t *body; /*< item body: key/value buffer */
} kv_item_t;
__STATIC__ kv_ctl_t kv_ctl;
extern kv_ctl_t kv_ctl;
__STATIC_INLINE__ void kv_blk_freesz_set(uint32_t blk_start, uint32_t free_size)
{

View File

@@ -145,6 +145,20 @@ __STATIC__ uint32_t kv_blk_next_fresh(void)
return KV_BLK_INVALID;
}
__STATIC__ uint32_t kv_blk_get_a_fresh(void)
{
uint32_t cur_blk;
KV_BLK_FOR_EACH(cur_blk) {
if (kv_blk_is_fresh(cur_blk)) {
return cur_blk;
}
}
return KV_BLK_INVALID;
}
__STATIC__ uint32_t kv_blk_search_inuse(uint32_t item_size)
{
uint32_t cur_blk;
@@ -213,6 +227,36 @@ __STATIC__ uint32_t kv_blk_search_suitable(uint32_t item_size)
} while (K_TRUE);
}
__STATIC__ kv_err_t kv_blk_mark_gc_src(uint32_t blk_start)
{
if (kv_flash_wunit_modify(KV_ADDR_OF_FIELD(blk_start, kv_blk_hdr_t, gc_src),
KV_BLK_HDR_GC_SRC) != KV_ERR_NONE) {
return KV_ERR_FLASH_WRITE_FAILED;
}
return KV_ERR_NONE;
}
__STATIC__ kv_err_t kv_blk_mark_gc_dst(uint32_t blk_start)
{
if (kv_flash_wunit_modify(KV_ADDR_OF_FIELD(blk_start, kv_blk_hdr_t, gc_dst),
KV_BLK_HDR_GC_DST) != KV_ERR_NONE) {
return KV_ERR_FLASH_WRITE_FAILED;
}
return KV_ERR_NONE;
}
__STATIC__ kv_err_t kv_blk_mark_gc_done(uint32_t blk_start)
{
if (kv_flash_wunit_modify(KV_ADDR_OF_FIELD(blk_start, kv_blk_hdr_t, gc_done),
KV_BLK_HDR_GC_DONE) != KV_ERR_NONE) {
return KV_ERR_FLASH_WRITE_FAILED;
}
return KV_ERR_NONE;
}
__STATIC__ kv_err_t kv_item_hdr_write(uint32_t item_start, kv_item_hdr_t *item_hdr)
{
if (kv_flash_write(KV_ADDR_OF_FIELD(item_start, kv_item_hdr_t, checksum),
@@ -337,11 +381,13 @@ __STATIC__ kv_err_t kv_item_try_delete(kv_item_t *item)
}
// key changes, means another turn of gc happened, the previous block is filled with new item.
if (memcmp(prev_key, (void *)KV_ITEM_ADDR_OF_BODY(item), k_len) != 0) {
if (memcmp(prev_key, (void *)item->body, k_len) != 0) {
tos_mmheap_free(prev_key);
return KV_ERR_NONE;
}
tos_mmheap_free(prev_key);
// the previous item is still there, delete it.
return kv_item_delete_aux(prev_pos);
}
@@ -436,23 +482,24 @@ __STATIC__ int kv_item_is_moved(kv_item_t *item)
return is_moved;
}
__STATIC__ kv_err_t kv_item_do_gc(kv_item_t *item, const void *dummy)
__STATIC__ kv_err_t kv_item_do_gc(kv_item_t *item, const void *p_blk_dst)
{
kv_err_t err;
uint32_t blk_dst = *(uint32_t *)p_blk_dst;
err = kv_item_body_read(item);
if (err != KV_ERR_NONE) {
return err;
}
if (kv_item_write(KV_BLK_USABLE_ADDR(KV_MGR_WORKSPACE),
if (kv_item_write(KV_BLK_USABLE_ADDR(blk_dst),
&item->hdr, item->body,
KV_ITEM_SIZE_OF_BODY(item)) != KV_ERR_NONE) {
return KV_ERR_FLASH_WRITE_FAILED;
}
// reduce the free_size
kv_blk_freesz_reduce(KV_MGR_WORKSPACE, KV_ITEM_SIZE_OF_ITEM(item));
kv_blk_freesz_reduce(blk_dst, KV_ITEM_SIZE_OF_ITEM(item));
return KV_ERR_NEXT_LOOP;
}
@@ -550,7 +597,7 @@ __STATIC__ kv_err_t kv_item_do_recovery(kv_item_t *item, const void *dummy)
*/
__STATIC__ kv_err_t kv_item_walkthru(uint32_t blk_start,
kv_item_walker_t walker,
const void *patten,
const void *arg,
kv_item_t **item_out)
{
kv_err_t err;
@@ -616,10 +663,12 @@ __STATIC__ kv_err_t kv_item_walkthru(uint32_t blk_start,
// tell the item where he is, he does not know yet.
item->pos = cur_item;
err = walker(item, patten);
err = walker(item, arg);
if (err == KV_ERR_NONE) {
if (item_out) {
*item_out = item;
} else {
kv_item_free(item);
}
return KV_ERR_NONE;
} else if (err != KV_ERR_NEXT_LOOP) {
@@ -905,6 +954,26 @@ __STATIC__ int kv_mgr_blk_index_rebuild(void)
return is_rebuild_done;
}
__STATIC__ kv_err_t kv_try_gc(void)
{
uint32_t cur_blk, blk_dst;
blk_dst = kv_blk_get_a_fresh();
if (blk_dst == KV_BLK_INVALID) {
return KV_ERR_GC_NOTHING;
}
KV_BLK_FOR_EACH(cur_blk) {
if (kv_blk_is_dirty(cur_blk)) {
if (kv_do_gc(cur_blk, blk_dst, K_FALSE) == KV_ERR_NONE) {
return KV_ERR_NONE;
}
}
}
return KV_ERR_GC_NOTHING;
}
__STATIC__ kv_err_t kv_mgr_workspace_locate(void)
{
uint32_t cur_blk;
@@ -914,6 +983,12 @@ __STATIC__ kv_err_t kv_mgr_workspace_locate(void)
kv_mgr_blk_index_rebuild();
}
if (KV_MGR_BLK_NUM_INUSE == 0 && KV_MGR_BLK_NUM_FRESH == 1) {
/* if here, we cannot just give out the last fresh block, otherwise the kv will get into
KV_ERR_NO_WRITEABLE_BLK next time, try a gc here to get a "rescue" */
kv_try_gc();
}
if (KV_NO_WRITEABLE_BLK()) {
return KV_ERR_NO_WRITEABLE_BLK;
}
@@ -937,11 +1012,119 @@ __STATIC__ kv_err_t kv_mgr_workspace_locate(void)
return KV_ERR_NONE;
}
__STATIC__ void kv_mgr_ctl_build(void)
/*
* src dst
* 1. mark gc_src tag
* 2. mark gc_dst tag
* 3. copy data to dst
* 4. format src block
* 5. mark gc_done tag
*/
__STATIC__ kv_err_t kv_do_gc(uint32_t blk_src, uint32_t blk_dst, int is_handle_incomplete)
{
kv_err_t err;
// step 1
if (!is_handle_incomplete) {
// in handle incomplete case, gc_src already been marked before power down
err = kv_blk_mark_gc_src(blk_src);
if (err != KV_ERR_NONE) {
return err;
}
}
// step 2
err = kv_blk_mark_gc_dst(blk_dst);
if (err != KV_ERR_NONE) {
return err;
}
// step 3
err = kv_item_walkthru(blk_src, kv_item_do_gc, (const void *)&blk_dst, K_NULL);
if (err != KV_ERR_NONE) {
return err;
}
// step 4
kv_blk_reset_inuse(blk_src);
if (kv_blk_format(blk_src) != KV_ERR_NONE) {
kv_blk_set_bad(blk_src);
}
// step 5
err = kv_blk_mark_gc_done(blk_dst);
if (err != KV_ERR_NONE) {
return err;
}
kv_blk_reset_fresh(blk_dst);
if (!kv_blk_is_full(blk_dst)) {
kv_blk_set_inuse(blk_dst);
}
return KV_ERR_NONE;
}
struct blk_info {
int nr;
#define NR_ABNORMAL_BLK_MAX 3
uint32_t blks[NR_ABNORMAL_BLK_MAX];
};
/*
* a power down may happen during any process of a gc action, we should fix
* all incomplete gc when kv bootup.
*/
__STATIC__ int kv_handle_incomplete_gc(struct blk_info gc_src_blk, struct blk_info gc_dst_not_done_blk, uint32_t fresh_blk)
{
kv_err_t err;
uint32_t src_blk, dst_blk;
if (gc_src_blk.nr > 1 || gc_dst_not_done_blk.nr > 1) {
printf("warning: more than one gc_src[%d] or gc_dst_not_done[%d] block\n", gc_src_blk.nr, gc_dst_not_done_blk.nr);
return KV_ERR_BLK_STATUS_ERROR;
}
if (gc_src_blk.nr == 0) {
return KV_ERR_NONE;
}
src_blk = gc_src_blk.blks[0];
if (gc_dst_not_done_blk.nr == 0) {
if (fresh_blk == (uint32_t)-1) {
printf("warning: no gc_dst_not_done block, neither fresh block\n");
return KV_ERR_BLK_STATUS_ERROR;
} else {
printf("info: no gc_dst_not_done block, choose a fresh block[0x%x]\n", fresh_blk);
dst_blk = fresh_blk;
}
} else {
dst_blk = gc_dst_not_done_blk.blks[0];
/* format dst_blk first(make it fresh), for an in-complete gc happends in a very small
probability, for a clean code, we just do a totally retry here. */
err = kv_blk_format(dst_blk);
if (err != KV_ERR_NONE) {
kv_blk_set_bad(dst_blk);
return err;
}
}
return kv_do_gc(src_blk, dst_blk, K_TRUE);
}
__STATIC__ int kv_mgr_ctl_build(void)
{
uint32_t cur_blk;
kv_blk_hdr_t blk_hdr;
/* theoretically, should at least only one gc_src/gc_dst_not_done block */
uint32_t fresh_blk = (uint32_t)-1;
struct blk_info gc_src_blk = { 0 }, gc_dst_not_done_blk = { 0 };
KV_BLK_FOR_EACH(cur_blk) {
if (kv_blk_hdr_read(cur_blk, &blk_hdr) != KV_ERR_NONE) {
// sth must be wrong seriously with this block
@@ -953,18 +1136,36 @@ __STATIC__ void kv_mgr_ctl_build(void)
if (kv_blk_format(cur_blk) != KV_ERR_NONE) {
// sth must be wrong seriously with this block
kv_blk_set_bad(cur_blk);
} else {
fresh_blk = cur_blk;
}
// we get a fresh block
continue;
}
if (KV_BLK_IS_GC_SRC(&blk_hdr)) {
gc_src_blk.blks[gc_src_blk.nr++] = cur_blk;
continue;
}
if (KV_BLK_IS_GC_DST_NOT_DONE(&blk_hdr)) {
gc_dst_not_done_blk.blks[gc_dst_not_done_blk.nr++] = cur_blk;
continue;
}
// do index building
if (kv_mgr_index_build(cur_blk) != KV_ERR_NONE) {
// sth goes wrong while index building, we give it a mark
kv_blk_set_hanging(cur_blk);
continue;
}
if (kv_blk_is_fresh(cur_blk)) {
fresh_blk = cur_blk;
}
}
return kv_handle_incomplete_gc(gc_src_blk, gc_dst_not_done_blk, fresh_blk);
}
__STATIC__ kv_err_t kv_mgr_ctl_init(void)
@@ -997,11 +1198,6 @@ __STATIC__ void kv_mgr_ctl_deinit(void)
memset(&kv_ctl, 0, sizeof(kv_ctl));
}
__STATIC__ kv_err_t kv_do_gc(uint32_t dirty_blk)
{
return kv_item_walkthru(dirty_blk, kv_item_do_gc, K_NULL, K_NULL);
}
/* on each turn of gc, we free some discarded items in the dirty block.
so we get more space to save the new item.
gc should be done only when necessary, if there is no sitiation of an item too big to save,
@@ -1009,7 +1205,7 @@ __STATIC__ kv_err_t kv_do_gc(uint32_t dirty_blk)
*/
__STATIC__ kv_err_t kv_gc(void)
{
uint32_t cur_blk, workspace_backup;
uint32_t cur_blk, blk_dst;
int is_gc_done = K_FALSE, is_rebuild_done = K_FALSE;
/* we give blocks with KV_BLK_FLAG_HANGING a chance to rebuild index */
@@ -1017,39 +1213,24 @@ __STATIC__ kv_err_t kv_gc(void)
is_rebuild_done = kv_mgr_blk_index_rebuild();
}
workspace_backup = KV_MGR_WORKSPACE;
// there must be at least one fresh block left, make workspace pointer to the fresh one
KV_MGR_WORKSPACE = kv_blk_next_fresh();
blk_dst = kv_blk_next_fresh();
if (blk_dst == KV_BLK_INVALID) {
/* kinda a bug here, KV_MGR_BLK_NUM_FRESH == 1 */
return KV_ERR_GC_NOTHING;
}
KV_BLK_FOR_EACH(cur_blk) {
if (kv_blk_is_dirty(cur_blk)) {
if (kv_do_gc(cur_blk) != KV_ERR_NONE) {
// cannot do gc for this block, give others a try
continue;
if (kv_do_gc(cur_blk, blk_dst, K_FALSE) == KV_ERR_NONE) {
is_gc_done = K_TRUE;
break;
}
kv_blk_reset_inuse(cur_blk);
if (kv_blk_format(cur_blk) != KV_ERR_NONE) {
kv_blk_set_bad(cur_blk);
}
kv_blk_reset_fresh(KV_MGR_WORKSPACE);
if (!kv_blk_is_full(KV_MGR_WORKSPACE)) {
kv_blk_set_inuse(KV_MGR_WORKSPACE);
}
is_gc_done = K_TRUE;
break;
}
}
if (!is_gc_done) {
// if do nothing, should restore the workspace;
KV_MGR_WORKSPACE = workspace_backup;
if (is_gc_done) {
KV_MGR_WORKSPACE = blk_dst;
}
return (is_gc_done || is_rebuild_done) ? KV_ERR_NONE : KV_ERR_GC_NOTHING;
@@ -1185,6 +1366,22 @@ __DEBUG__ kv_err_t tos_kv_walkthru(void)
continue;
}
if (KV_BLK_IS_GC_SRC(&blk_hdr)) {
printf("block is gc-src\n");
}
if (KV_BLK_IS_GC_DST(&blk_hdr)) {
printf("block is gc-dst\n");
}
if (KV_BLK_IS_GC_DONE(&blk_hdr)) {
printf("block is gc-done\n");
}
if (KV_BLK_IS_GC_DST_NOT_DONE(&blk_hdr)) {
printf("block is gc-dst but not done\n");
}
if (kv_block_walkthru(cur_blk) != KV_ERR_NONE) {
printf("block diagnosis failed\n");
continue;
@@ -1213,7 +1410,10 @@ __API__ kv_err_t tos_kv_init(uint32_t flash_start, uint32_t flash_end, kv_flash_
return err;
}
kv_mgr_ctl_build();
err = kv_mgr_ctl_build();
if (err != KV_ERR_NONE) {
return err;
}
return kv_mgr_workspace_locate();
}

View File

@@ -496,7 +496,7 @@ __STATIC__ size_t adjust_request_size(size_t size, size_t align)
}
adjust_size = align_up(size, align);
if ((adjust_size > K_MMHEAP_BLK_SIZE_MAX)||(!adjust_size)) {
if (!adjust_size || adjust_size > K_MMHEAP_BLK_SIZE_MAX) {
return 0;
}
@@ -590,9 +590,10 @@ __API__ void *tos_mmheap_alloc(size_t size)
size_t adjust_size;
mmheap_blk_t *blk;
if (size>K_MMHEAP_BLK_SIZE_MAX) {
if (size > K_MMHEAP_BLK_SIZE_MAX) {
return K_NULL;
}
adjust_size = adjust_request_size(size, K_MMHEAP_ALIGN_SIZE);
blk = blk_locate_free(adjust_size);
if (!blk) {