[smart] replace varea pgmgr and fixup of dfs mmap (#8184)
Signed-off-by: shell <smokewood@qq.com> Signed-off-by: Shell <smokewood@qq.com>
This commit is contained in:
parent
adbb5fd94a
commit
fb78a71020
|
@ -35,10 +35,10 @@
|
|||
static rt_mem_obj_t dfs_get_mem_obj(struct dfs_file *file);
|
||||
static void *dfs_mem_obj_get_file(rt_mem_obj_t mem_obj);
|
||||
|
||||
static rt_varea_t _dfs_map_user_varea_data(struct rt_lwp *lwp, void *map_vaddr, size_t map_size, size_t attr, mm_flag_t flags, off_t pgoffset, void *data)
|
||||
static void *_do_mmap(struct rt_lwp *lwp, void *map_vaddr, size_t map_size, size_t attr,
|
||||
mm_flag_t flags, off_t pgoffset, void *data, rt_err_t *code)
|
||||
{
|
||||
int ret = 0;
|
||||
rt_varea_t varea;
|
||||
void *vaddr = map_vaddr;
|
||||
rt_mem_obj_t mem_obj = dfs_get_mem_obj(data);
|
||||
|
||||
|
@ -46,25 +46,21 @@ static rt_varea_t _dfs_map_user_varea_data(struct rt_lwp *lwp, void *map_vaddr,
|
|||
attr, flags, mem_obj, pgoffset);
|
||||
if (ret != RT_EOK)
|
||||
{
|
||||
varea = RT_NULL;
|
||||
}
|
||||
else
|
||||
{
|
||||
varea = rt_aspace_query(lwp->aspace, vaddr);
|
||||
}
|
||||
|
||||
if (ret != RT_EOK)
|
||||
{
|
||||
vaddr = RT_NULL;
|
||||
LOG_E("failed to map %lx with size %lx with errno %d", map_vaddr,
|
||||
map_size, ret);
|
||||
}
|
||||
|
||||
return varea;
|
||||
if (code)
|
||||
{
|
||||
*code = ret;
|
||||
}
|
||||
|
||||
return vaddr;
|
||||
}
|
||||
|
||||
static rt_varea_t dfs_map_user_varea_data(struct dfs_mmap2_args *mmap2, void *data)
|
||||
static void *_map_data_to_uspace(struct dfs_mmap2_args *mmap2, void *data, rt_err_t *code)
|
||||
{
|
||||
rt_varea_t varea = RT_NULL;
|
||||
size_t offset = 0;
|
||||
void *map_vaddr = mmap2->addr;
|
||||
size_t map_size = mmap2->length;
|
||||
|
@ -82,10 +78,10 @@ static rt_varea_t dfs_map_user_varea_data(struct dfs_mmap2_args *mmap2, void *da
|
|||
k_flags = lwp_user_mm_flag_to_kernel(mmap2->flags);
|
||||
k_attr = lwp_user_mm_attr_to_kernel(mmap2->prot);
|
||||
|
||||
varea = _dfs_map_user_varea_data(lwp, map_vaddr, map_size, k_attr, k_flags, mmap2->pgoffset, data);
|
||||
map_vaddr = _do_mmap(lwp, map_vaddr, map_size, k_attr, k_flags, mmap2->pgoffset, data, code);
|
||||
}
|
||||
|
||||
return varea;
|
||||
return map_vaddr;
|
||||
}
|
||||
|
||||
static void hint_free(rt_mm_va_hint_t hint)
|
||||
|
@ -405,7 +401,8 @@ static void *dfs_mem_obj_get_file(rt_mem_obj_t mem_obj)
|
|||
|
||||
int dfs_file_mmap(struct dfs_file *file, struct dfs_mmap2_args *mmap2)
|
||||
{
|
||||
int ret = -EINVAL;
|
||||
rt_err_t ret = -EINVAL;
|
||||
void *map_vaddr;
|
||||
|
||||
LOG_I("mmap2 args addr: %p length: 0x%x prot: %d flags: 0x%x pgoffset: 0x%x",
|
||||
mmap2->addr, mmap2->length, mmap2->prot, mmap2->flags, mmap2->pgoffset);
|
||||
|
@ -414,19 +411,11 @@ int dfs_file_mmap(struct dfs_file *file, struct dfs_mmap2_args *mmap2)
|
|||
if (file->vnode->aspace)
|
||||
{
|
||||
/* create a va area in user space (lwp) */
|
||||
rt_varea_t varea = dfs_map_user_varea_data(mmap2, file);
|
||||
if (varea)
|
||||
map_vaddr = _map_data_to_uspace(mmap2, file, &ret);
|
||||
if (map_vaddr)
|
||||
{
|
||||
mmap2->ret = varea->start;
|
||||
LOG_I("%s varea: %p", __func__, varea);
|
||||
LOG_I("varea start: %p size: 0x%x offset: 0x%x attr: 0x%x flag: 0x%x",
|
||||
varea->start, varea->size, varea->offset, varea->attr, varea->flag);
|
||||
mmap2->ret = map_vaddr;
|
||||
LOG_I("file: %s%s", file->dentry->mnt->fullpath, file->dentry->pathname);
|
||||
ret = RT_EOK;
|
||||
}
|
||||
else
|
||||
{
|
||||
ret = -ENOMEM;
|
||||
}
|
||||
}
|
||||
else
|
||||
|
|
|
@ -80,7 +80,6 @@ struct rt_lwp
|
|||
#ifdef ARCH_MM_MMU
|
||||
size_t end_heap;
|
||||
rt_aspace_t aspace;
|
||||
struct rt_lwp_objs *lwp_obj;
|
||||
#else
|
||||
#ifdef ARCH_MM_MPU
|
||||
struct rt_mpu_info mpu_info;
|
||||
|
|
|
@ -2780,7 +2780,6 @@ sysret_t sys_execve(const char *path, char *const argv[], char *const envp[])
|
|||
|
||||
#ifdef ARCH_MM_MMU
|
||||
_swap_lwp_data(lwp, new_lwp, struct rt_aspace *, aspace);
|
||||
_swap_lwp_data(lwp, new_lwp, struct rt_lwp_objs *, lwp_obj);
|
||||
|
||||
_swap_lwp_data(lwp, new_lwp, size_t, end_heap);
|
||||
#endif
|
||||
|
|
|
@ -47,8 +47,6 @@
|
|||
|
||||
#define STACK_OBJ _null_object
|
||||
|
||||
static void _init_lwp_objs(struct rt_lwp_objs *lwp_objs, rt_aspace_t aspace);
|
||||
|
||||
static const char *_null_get_name(rt_varea_t varea)
|
||||
{
|
||||
return "null";
|
||||
|
@ -123,13 +121,9 @@ int lwp_user_space_init(struct rt_lwp *lwp, rt_bool_t is_fork)
|
|||
int err = -RT_ENOMEM;
|
||||
const size_t flags = MMF_MAP_PRIVATE;
|
||||
|
||||
lwp->lwp_obj = rt_malloc(sizeof(struct rt_lwp_objs));
|
||||
if (lwp->lwp_obj)
|
||||
{
|
||||
err = arch_user_space_init(lwp);
|
||||
if (err == RT_EOK)
|
||||
{
|
||||
_init_lwp_objs(lwp->lwp_obj, lwp->aspace);
|
||||
if (!is_fork)
|
||||
{
|
||||
stk_addr = (void *)USER_STACK_VSTART;
|
||||
|
@ -138,7 +132,6 @@ int lwp_user_space_init(struct rt_lwp *lwp, rt_bool_t is_fork)
|
|||
MMU_MAP_U_RWCB, flags, &STACK_OBJ, 0);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return err;
|
||||
}
|
||||
|
@ -167,107 +160,8 @@ void lwp_aspace_switch(struct rt_thread *thread)
|
|||
void lwp_unmap_user_space(struct rt_lwp *lwp)
|
||||
{
|
||||
arch_user_space_free(lwp);
|
||||
rt_free(lwp->lwp_obj);
|
||||
}
|
||||
|
||||
static const char *_user_get_name(rt_varea_t varea)
|
||||
{
|
||||
char *name;
|
||||
if (varea->flag & MMF_TEXT)
|
||||
{
|
||||
name = "user.text";
|
||||
}
|
||||
else
|
||||
{
|
||||
if (varea->start == (void *)USER_STACK_VSTART)
|
||||
{
|
||||
name = "user.stack";
|
||||
}
|
||||
else if (varea->start >= (void *)USER_HEAP_VADDR &&
|
||||
varea->start < (void *)USER_HEAP_VEND)
|
||||
{
|
||||
name = "user.heap";
|
||||
}
|
||||
else
|
||||
{
|
||||
name = "user.data";
|
||||
}
|
||||
}
|
||||
return name;
|
||||
}
|
||||
|
||||
#define NO_AUTO_FETCH 0x1
|
||||
#define VAREA_CAN_AUTO_FETCH(varea) (!((rt_ubase_t)((varea)->data) & NO_AUTO_FETCH))
|
||||
|
||||
static void _user_do_page_fault(struct rt_varea *varea,
|
||||
struct rt_aspace_fault_msg *msg)
|
||||
{
|
||||
struct rt_lwp_objs *lwp_objs;
|
||||
lwp_objs = rt_container_of(varea->mem_obj, struct rt_lwp_objs, mem_obj);
|
||||
|
||||
if (lwp_objs->source)
|
||||
{
|
||||
char *paddr = rt_hw_mmu_v2p(lwp_objs->source, msg->fault_vaddr);
|
||||
if (paddr != ARCH_MAP_FAILED)
|
||||
{
|
||||
void *vaddr;
|
||||
vaddr = paddr - PV_OFFSET;
|
||||
|
||||
if (!(varea->flag & MMF_TEXT))
|
||||
{
|
||||
void *cp = rt_pages_alloc_ext(0, PAGE_ANY_AVAILABLE);
|
||||
if (cp)
|
||||
{
|
||||
memcpy(cp, vaddr, ARCH_PAGE_SIZE);
|
||||
rt_varea_pgmgr_insert(varea, cp);
|
||||
msg->response.status = MM_FAULT_STATUS_OK;
|
||||
msg->response.vaddr = cp;
|
||||
msg->response.size = ARCH_PAGE_SIZE;
|
||||
}
|
||||
else
|
||||
{
|
||||
LOG_W("%s: page alloc failed at %p", __func__,
|
||||
varea->start);
|
||||
}
|
||||
}
|
||||
else
|
||||
{
|
||||
rt_page_t page = rt_page_addr2page(vaddr);
|
||||
page->ref_cnt += 1;
|
||||
rt_varea_pgmgr_insert(varea, vaddr);
|
||||
msg->response.status = MM_FAULT_STATUS_OK;
|
||||
msg->response.vaddr = vaddr;
|
||||
msg->response.size = ARCH_PAGE_SIZE;
|
||||
}
|
||||
}
|
||||
else if (!(varea->flag & MMF_TEXT))
|
||||
{
|
||||
/* if data segment not exist in source do a fallback */
|
||||
rt_mm_dummy_mapper.on_page_fault(varea, msg);
|
||||
}
|
||||
}
|
||||
else if (VAREA_CAN_AUTO_FETCH(varea))
|
||||
{
|
||||
/* if (!lwp_objs->source), no aspace as source data */
|
||||
rt_mm_dummy_mapper.on_page_fault(varea, msg);
|
||||
}
|
||||
}
|
||||
|
||||
static void _init_lwp_objs(struct rt_lwp_objs *lwp_objs, rt_aspace_t aspace)
|
||||
{
|
||||
if (lwp_objs)
|
||||
{
|
||||
/**
|
||||
* @brief one lwp_obj represent an base layout of page based memory in user space
|
||||
* This is useful on duplication. Where we only have a (lwp_objs and offset) to
|
||||
* provide identical memory. This is implemented by lwp_objs->source.
|
||||
*/
|
||||
lwp_objs->source = NULL;
|
||||
memcpy(&lwp_objs->mem_obj, &rt_mm_dummy_mapper, sizeof(struct rt_mem_obj));
|
||||
lwp_objs->mem_obj.get_name = _user_get_name;
|
||||
lwp_objs->mem_obj.on_page_fault = _user_do_page_fault;
|
||||
}
|
||||
}
|
||||
|
||||
static void *_lwp_map_user(struct rt_lwp *lwp, void *map_va, size_t map_size,
|
||||
int text)
|
||||
|
|
|
@ -127,6 +127,7 @@ static void _anon_varea_open(struct rt_varea *varea)
|
|||
static void _anon_varea_close(struct rt_varea *varea)
|
||||
{
|
||||
rt_aspace_anon_ref_dec(varea->mem_obj);
|
||||
rt_mm_dummy_mapper.on_varea_close(varea);
|
||||
}
|
||||
|
||||
static rt_err_t _anon_varea_expand(struct rt_varea *varea, void *new_vaddr, rt_size_t size)
|
||||
|
@ -154,14 +155,14 @@ static rt_err_t _anon_varea_merge(struct rt_varea *merge_to, struct rt_varea *me
|
|||
rt_inline void _map_page_in_varea(rt_aspace_t asapce, rt_varea_t varea,
|
||||
struct rt_aspace_fault_msg *msg, char *fault_addr)
|
||||
{
|
||||
if (rt_varea_map_page(varea, fault_addr, msg->response.vaddr) == RT_EOK)
|
||||
char *page_va = msg->response.vaddr;
|
||||
if (rt_varea_map_page(varea, fault_addr, page_va) == RT_EOK)
|
||||
{
|
||||
msg->response.status = MM_FAULT_STATUS_OK_MAPPED;
|
||||
rt_varea_pgmgr_insert(varea, page_va);
|
||||
}
|
||||
else
|
||||
{
|
||||
/* revoke the allocated page */
|
||||
rt_varea_pgmgr_pop(varea, msg->response.vaddr, ARCH_PAGE_SIZE);
|
||||
msg->response.status = MM_FAULT_STATUS_UNRECOVERABLE;
|
||||
LOG_W("%s: failed to map page into varea", __func__);
|
||||
}
|
||||
|
@ -201,6 +202,7 @@ static void *_get_page_from_backup(rt_aspace_t backup, rt_base_t offset_in_mobj)
|
|||
{
|
||||
rc = msg.response.vaddr;
|
||||
}
|
||||
rt_pages_free(msg.response.vaddr, 0);
|
||||
}
|
||||
}
|
||||
else
|
||||
|
@ -221,28 +223,33 @@ static void *_get_page_from_backup(rt_aspace_t backup, rt_base_t offset_in_mobj)
|
|||
}
|
||||
|
||||
/* get the backup page in kernel for the address in user space */
|
||||
static void _anon_page_fault(struct rt_varea *varea, struct rt_aspace_fault_msg *msg)
|
||||
static void _fetch_page_for_varea(struct rt_varea *varea, struct rt_aspace_fault_msg *msg, rt_bool_t need_map)
|
||||
{
|
||||
void *paddr;
|
||||
char *frame_ka;
|
||||
rt_aspace_t from_aspace = varea->aspace;
|
||||
rt_aspace_t curr_aspace = varea->aspace;
|
||||
rt_aspace_t backup = _anon_obj_get_backup(varea->mem_obj);
|
||||
|
||||
RDWR_LOCK(from_aspace);
|
||||
RDWR_LOCK(curr_aspace);
|
||||
|
||||
/**
|
||||
* if the page is already mapped(this may caused by data race while other
|
||||
* thread success to take the lock and mapped the page before this), return okay
|
||||
*/
|
||||
paddr = rt_hw_mmu_v2p(from_aspace, msg->fault_vaddr);
|
||||
paddr = rt_hw_mmu_v2p(curr_aspace, msg->fault_vaddr);
|
||||
if (paddr == ARCH_MAP_FAILED)
|
||||
{
|
||||
if (backup == from_aspace)
|
||||
if (backup == curr_aspace)
|
||||
{
|
||||
rt_mm_dummy_mapper.on_page_fault(varea, msg);
|
||||
if (msg->response.status != MM_FAULT_STATUS_UNRECOVERABLE)
|
||||
{
|
||||
/* if backup == curr_aspace, a page fetch always binding with a pte filling */
|
||||
_map_page_in_varea(backup, varea, msg, msg->fault_vaddr);
|
||||
if (msg->response.status != MM_FAULT_STATUS_UNRECOVERABLE)
|
||||
{
|
||||
rt_pages_free(msg->response.vaddr, 0);
|
||||
}
|
||||
}
|
||||
}
|
||||
else
|
||||
|
@ -252,7 +259,14 @@ static void _anon_page_fault(struct rt_varea *varea, struct rt_aspace_fault_msg
|
|||
{
|
||||
msg->response.vaddr = frame_ka;
|
||||
msg->response.size = ARCH_PAGE_SIZE;
|
||||
_map_page_in_varea(from_aspace, varea, msg, msg->fault_vaddr);
|
||||
if (!need_map)
|
||||
{
|
||||
msg->response.status = MM_FAULT_STATUS_OK;
|
||||
}
|
||||
else
|
||||
{
|
||||
_map_page_in_varea(curr_aspace, varea, msg, msg->fault_vaddr);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -260,7 +274,12 @@ static void _anon_page_fault(struct rt_varea *varea, struct rt_aspace_fault_msg
|
|||
{
|
||||
msg->response.status = MM_FAULT_STATUS_OK_MAPPED;
|
||||
}
|
||||
RDWR_UNLOCK(from_aspace);
|
||||
RDWR_UNLOCK(curr_aspace);
|
||||
}
|
||||
|
||||
static void _anon_page_fault(struct rt_varea *varea, struct rt_aspace_fault_msg *msg)
|
||||
{
|
||||
_fetch_page_for_varea(varea, msg, RT_TRUE);
|
||||
}
|
||||
|
||||
static void read_by_mte(rt_aspace_t aspace, struct rt_aspace_io_msg *iomsg)
|
||||
|
@ -271,9 +290,10 @@ static void read_by_mte(rt_aspace_t aspace, struct rt_aspace_io_msg *iomsg)
|
|||
|
||||
static void _anon_page_read(struct rt_varea *varea, struct rt_aspace_io_msg *iomsg)
|
||||
{
|
||||
rt_aspace_t from_aspace = varea->aspace;
|
||||
rt_aspace_t curr_aspace = varea->aspace;
|
||||
rt_aspace_t backup = _anon_obj_get_backup(varea->mem_obj);
|
||||
|
||||
if (rt_hw_mmu_v2p(from_aspace, iomsg->fault_vaddr) == ARCH_MAP_FAILED)
|
||||
if (rt_hw_mmu_v2p(curr_aspace, iomsg->fault_vaddr) == ARCH_MAP_FAILED)
|
||||
{
|
||||
struct rt_aspace_fault_msg msg;
|
||||
msg.fault_op = MM_FAULT_OP_READ;
|
||||
|
@ -282,15 +302,18 @@ static void _anon_page_read(struct rt_varea *varea, struct rt_aspace_io_msg *iom
|
|||
msg.off = iomsg->off;
|
||||
rt_mm_fault_res_init(&msg.response);
|
||||
|
||||
_anon_page_fault(varea, &msg);
|
||||
_fetch_page_for_varea(varea, &msg, RT_FALSE);
|
||||
if (msg.response.status != MM_FAULT_STATUS_UNRECOVERABLE)
|
||||
{
|
||||
read_by_mte(from_aspace, iomsg);
|
||||
void *saved_fault_va = iomsg->fault_vaddr;
|
||||
iomsg->fault_vaddr = (void *)(iomsg->off << MM_PAGE_SHIFT);
|
||||
read_by_mte(backup, iomsg);
|
||||
iomsg->fault_vaddr = saved_fault_va;
|
||||
}
|
||||
}
|
||||
else
|
||||
{
|
||||
read_by_mte(from_aspace, iomsg);
|
||||
read_by_mte(curr_aspace, iomsg);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -303,8 +326,14 @@ static void write_by_mte(rt_aspace_t aspace, struct rt_aspace_io_msg *iomsg)
|
|||
static void _anon_page_write(struct rt_varea *varea, struct rt_aspace_io_msg *iomsg)
|
||||
{
|
||||
rt_aspace_t from_aspace = varea->aspace;
|
||||
rt_aspace_t backup = _anon_obj_get_backup(varea->mem_obj);
|
||||
|
||||
if (rt_hw_mmu_v2p(from_aspace, iomsg->fault_vaddr) == ARCH_MAP_FAILED)
|
||||
if (from_aspace != backup)
|
||||
{
|
||||
/* varea in guest aspace cannot modify the page */
|
||||
iomsg->response.status = MM_FAULT_STATUS_UNRECOVERABLE;
|
||||
}
|
||||
else if (rt_hw_mmu_v2p(from_aspace, iomsg->fault_vaddr) == ARCH_MAP_FAILED)
|
||||
{
|
||||
struct rt_aspace_fault_msg msg;
|
||||
msg.fault_op = MM_FAULT_OP_WRITE;
|
||||
|
@ -313,15 +342,20 @@ static void _anon_page_write(struct rt_varea *varea, struct rt_aspace_io_msg *io
|
|||
msg.off = iomsg->off;
|
||||
rt_mm_fault_res_init(&msg.response);
|
||||
|
||||
_anon_page_fault(varea, &msg);
|
||||
if (msg.response.status != MM_FAULT_STATUS_UNRECOVERABLE)
|
||||
_fetch_page_for_varea(varea, &msg, RT_TRUE);
|
||||
if (msg.response.status == MM_FAULT_STATUS_OK_MAPPED)
|
||||
{
|
||||
write_by_mte(from_aspace, iomsg);
|
||||
write_by_mte(backup, iomsg);
|
||||
}
|
||||
else
|
||||
{
|
||||
/* mapping failed, report an error */
|
||||
iomsg->response.status = MM_FAULT_STATUS_UNRECOVERABLE;
|
||||
}
|
||||
}
|
||||
else
|
||||
{
|
||||
write_by_mte(from_aspace, iomsg);
|
||||
write_by_mte(backup, iomsg);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -413,6 +447,7 @@ static int _override_map(rt_varea_t varea, rt_aspace_t aspace, void *fault_vaddr
|
|||
RT_ASSERT(rt_hw_mmu_v2p(aspace, msg->fault_vaddr) == (page + PV_OFFSET));
|
||||
rc = MM_FAULT_FIXABLE_TRUE;
|
||||
rt_varea_pgmgr_insert(map_varea, page);
|
||||
rt_pages_free(page, 0);
|
||||
}
|
||||
else
|
||||
{
|
||||
|
|
|
@ -495,7 +495,6 @@ static inline void _varea_post_install(rt_varea_t varea, rt_aspace_t aspace,
|
|||
varea->mem_obj = mem_obj;
|
||||
varea->flag = flags;
|
||||
varea->offset = offset;
|
||||
varea->frames = NULL;
|
||||
|
||||
if (varea->mem_obj && varea->mem_obj->on_varea_open)
|
||||
varea->mem_obj->on_varea_open(varea);
|
||||
|
@ -565,11 +564,11 @@ void _varea_uninstall_locked(rt_varea_t varea)
|
|||
|
||||
if (varea->mem_obj && varea->mem_obj->on_varea_close)
|
||||
varea->mem_obj->on_varea_close(varea);
|
||||
|
||||
else
|
||||
{
|
||||
rt_hw_mmu_unmap(aspace, varea->start, varea->size);
|
||||
rt_hw_tlb_invalidate_range(aspace, varea->start, varea->size, ARCH_PAGE_SIZE);
|
||||
|
||||
rt_varea_pgmgr_pop_all(varea);
|
||||
}
|
||||
|
||||
_aspace_bst_remove(aspace, varea);
|
||||
}
|
||||
|
@ -960,7 +959,6 @@ static rt_err_t _split_varea(rt_varea_t existed, char *ex_end, char *unmap_start
|
|||
subset->mem_obj = existed->mem_obj;
|
||||
subset->flag = existed->flag & ~MMF_STATIC_ALLOC;
|
||||
subset->offset = existed->offset + rela_offset;
|
||||
subset->frames = NULL;
|
||||
|
||||
error = existed->mem_obj->on_varea_split(existed, unmap_start, unmap_len, subset);
|
||||
if (error == RT_EOK)
|
||||
|
|
|
@ -76,7 +76,6 @@ typedef struct rt_varea
|
|||
|
||||
struct _aspace_node node;
|
||||
|
||||
struct rt_page *frames;
|
||||
void *data;
|
||||
} *rt_varea_t;
|
||||
|
||||
|
|
|
@ -1,5 +1,5 @@
|
|||
/*
|
||||
* Copyright (c) 2006-2022, RT-Thread Development Team
|
||||
* Copyright (c) 2006-2023, RT-Thread Development Team
|
||||
*
|
||||
* SPDX-License-Identifier: Apache-2.0
|
||||
*
|
||||
|
@ -7,6 +7,7 @@
|
|||
* Date Author Notes
|
||||
* 2022-11-30 WangXiaoyao the first version
|
||||
* 2023-08-19 Shell Support varea modification handler
|
||||
* 2023-10-13 Shell Replace the page management algorithm of pgmgr
|
||||
*/
|
||||
|
||||
#define DBG_TAG "mm.object"
|
||||
|
@ -30,79 +31,28 @@ static const char *get_name(rt_varea_t varea)
|
|||
return "dummy-mapper";
|
||||
}
|
||||
|
||||
static rt_bool_t _varea_pgmgr_frame_is_member(rt_varea_t varea, rt_page_t frame)
|
||||
{
|
||||
rt_page_t iter;
|
||||
rt_bool_t rc = RT_FALSE;
|
||||
|
||||
if (varea->frames)
|
||||
{
|
||||
iter = varea->frames;
|
||||
do
|
||||
{
|
||||
if (iter == frame)
|
||||
{
|
||||
rc = RT_TRUE;
|
||||
break;
|
||||
}
|
||||
iter = iter->next;
|
||||
} while (iter);
|
||||
}
|
||||
|
||||
return rc;
|
||||
}
|
||||
|
||||
void rt_varea_pgmgr_insert(rt_varea_t varea, void *page_addr)
|
||||
{
|
||||
rt_page_t page = rt_page_addr2page(page_addr);
|
||||
|
||||
if (varea->frames == NULL)
|
||||
{
|
||||
varea->frames = page;
|
||||
page->pre = RT_NULL;
|
||||
page->next = RT_NULL;
|
||||
}
|
||||
else
|
||||
{
|
||||
page->pre = RT_NULL;
|
||||
varea->frames->pre = page;
|
||||
page->next = varea->frames;
|
||||
varea->frames = page;
|
||||
}
|
||||
/* each mapping of page frame in the varea is binding with a reference */
|
||||
rt_page_ref_inc(page_addr, 0);
|
||||
}
|
||||
|
||||
/* resource recycling of page frames */
|
||||
void rt_varea_pgmgr_pop_all(rt_varea_t varea)
|
||||
{
|
||||
rt_page_t page = varea->frames;
|
||||
rt_aspace_t aspace = varea->aspace;
|
||||
char *end_addr = varea->start + varea->size;
|
||||
RT_ASSERT(!((long)end_addr & ARCH_PAGE_MASK));
|
||||
|
||||
while (page)
|
||||
for (char *iter = varea->start; iter != end_addr; iter += ARCH_PAGE_SIZE)
|
||||
{
|
||||
rt_page_t next = page->next;
|
||||
void *pg_va = rt_page_page2addr(page);
|
||||
rt_pages_free(pg_va, 0);
|
||||
page = next;
|
||||
void *page_pa = rt_hw_mmu_v2p(aspace, iter);
|
||||
char *page_va = rt_kmem_p2v(page_pa);
|
||||
if (page_pa != ARCH_MAP_FAILED && page_va)
|
||||
{
|
||||
rt_hw_mmu_unmap(aspace, iter, ARCH_PAGE_SIZE);
|
||||
rt_pages_free(page_va, 0);
|
||||
}
|
||||
|
||||
varea->frames = RT_NULL;
|
||||
}
|
||||
|
||||
void rt_varea_pgmgr_pop(rt_varea_t varea, void *vaddr, rt_size_t size)
|
||||
{
|
||||
void *vend = (char *)vaddr + size;
|
||||
while (vaddr != vend)
|
||||
{
|
||||
rt_page_t page = rt_page_addr2page(vaddr);
|
||||
if (_varea_pgmgr_frame_is_member(varea, page))
|
||||
{
|
||||
if (page->pre)
|
||||
page->pre->next = page->next;
|
||||
if (page->next)
|
||||
page->next->pre = page->pre;
|
||||
if (varea->frames == page)
|
||||
varea->frames = page->next;
|
||||
rt_pages_free(vaddr, 0);
|
||||
}
|
||||
vaddr = (char *)vaddr + ARCH_PAGE_SIZE;
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -120,8 +70,6 @@ static void on_page_fault(struct rt_varea *varea, struct rt_aspace_fault_msg *ms
|
|||
msg->response.status = MM_FAULT_STATUS_OK;
|
||||
msg->response.size = ARCH_PAGE_SIZE;
|
||||
msg->response.vaddr = page;
|
||||
|
||||
rt_varea_pgmgr_insert(varea, page);
|
||||
}
|
||||
|
||||
static void on_varea_open(struct rt_varea *varea)
|
||||
|
@ -131,6 +79,8 @@ static void on_varea_open(struct rt_varea *varea)
|
|||
|
||||
static void on_varea_close(struct rt_varea *varea)
|
||||
{
|
||||
/* unmap and dereference page frames in the varea region */
|
||||
rt_varea_pgmgr_pop_all(varea);
|
||||
}
|
||||
|
||||
static rt_err_t on_varea_expand(struct rt_varea *varea, void *new_vaddr, rt_size_t size)
|
||||
|
@ -153,7 +103,7 @@ static void _remove_pages(rt_varea_t varea, void *rm_start, void *rm_end)
|
|||
page_va -= PV_OFFSET;
|
||||
LOG_D("%s: free page %p", __func__, page_va);
|
||||
rt_varea_unmap_page(varea, rm_start);
|
||||
rt_varea_pgmgr_pop(varea, page_va, ARCH_PAGE_SIZE);
|
||||
rt_pages_free(page_va, 0);
|
||||
}
|
||||
rm_start += ARCH_PAGE_SIZE;
|
||||
}
|
||||
|
@ -183,70 +133,15 @@ static rt_err_t on_varea_shrink(rt_varea_t varea, void *new_start, rt_size_t siz
|
|||
|
||||
static rt_err_t on_varea_split(struct rt_varea *existed, void *unmap_start, rt_size_t unmap_len, struct rt_varea *subset)
|
||||
{
|
||||
void *sub_start = subset->start;
|
||||
void *sub_end = sub_start + subset->size;
|
||||
void *page_va;
|
||||
|
||||
/* remove the resource in the unmap region, and do nothing for the subset */
|
||||
_remove_pages(existed, unmap_start, (char *)unmap_start + unmap_len);
|
||||
|
||||
RT_ASSERT(!((rt_ubase_t)sub_start & ARCH_PAGE_MASK));
|
||||
RT_ASSERT(!((rt_ubase_t)sub_end & ARCH_PAGE_MASK));
|
||||
while (sub_start != sub_end)
|
||||
{
|
||||
page_va = rt_hw_mmu_v2p(existed->aspace, sub_start);
|
||||
|
||||
if (page_va != ARCH_MAP_FAILED)
|
||||
{
|
||||
rt_page_t frame;
|
||||
page_va = rt_kmem_p2v(page_va);
|
||||
if (page_va)
|
||||
{
|
||||
frame = rt_page_addr2page(page_va);
|
||||
if (frame && _varea_pgmgr_frame_is_member(existed, frame))
|
||||
{
|
||||
LOG_D("%s: free page %p", __func__, page_va);
|
||||
rt_page_ref_inc(page_va, 0);
|
||||
rt_varea_pgmgr_pop(existed, page_va, ARCH_PAGE_SIZE);
|
||||
rt_varea_pgmgr_insert(subset, page_va);
|
||||
}
|
||||
}
|
||||
}
|
||||
sub_start += ARCH_PAGE_SIZE;
|
||||
}
|
||||
|
||||
return RT_EOK;
|
||||
}
|
||||
|
||||
static rt_err_t on_varea_merge(struct rt_varea *merge_to, struct rt_varea *merge_from)
|
||||
{
|
||||
/* transport page */
|
||||
void *mr_start = merge_from->start;
|
||||
void *mr_end = mr_start + merge_from->size;
|
||||
void *page_va;
|
||||
|
||||
RT_ASSERT(!((rt_ubase_t)mr_start & ARCH_PAGE_MASK));
|
||||
RT_ASSERT(!((rt_ubase_t)mr_end & ARCH_PAGE_MASK));
|
||||
while (mr_start != mr_end)
|
||||
{
|
||||
page_va = rt_hw_mmu_v2p(merge_from->aspace, mr_start);
|
||||
if (page_va != ARCH_MAP_FAILED)
|
||||
{
|
||||
rt_page_t frame;
|
||||
page_va = rt_kmem_p2v(page_va);
|
||||
if (page_va)
|
||||
{
|
||||
frame = rt_page_addr2page(page_va);
|
||||
if (frame && _varea_pgmgr_frame_is_member(merge_from, frame))
|
||||
{
|
||||
LOG_D("%s: free page %p", __func__, page_va);
|
||||
rt_page_ref_inc(page_va, 0);
|
||||
rt_varea_pgmgr_pop(merge_from, page_va, ARCH_PAGE_SIZE);
|
||||
rt_varea_pgmgr_insert(merge_to, page_va);
|
||||
}
|
||||
}
|
||||
}
|
||||
mr_start += ARCH_PAGE_SIZE;
|
||||
}
|
||||
/* do nothing for the migration */
|
||||
return RT_EOK;
|
||||
}
|
||||
|
||||
|
|
|
@ -134,7 +134,6 @@ static void _trace_alloc(rt_page_t page, void *caller, size_t size_bits)
|
|||
{
|
||||
if (enable)
|
||||
{
|
||||
char *page_va = rt_page_page2addr(page);
|
||||
page->caller = caller;
|
||||
page->trace_size = size_bits;
|
||||
page->tl_prev = NULL;
|
||||
|
|
|
@ -33,7 +33,6 @@
|
|||
|
||||
#define PAGE_ANY_AVAILABLE 0x1ul
|
||||
|
||||
|
||||
#ifdef RT_DEBUGING_PAGE_LEAK
|
||||
#define DEBUG_FIELD struct { \
|
||||
/* trace list */ \
|
||||
|
|
|
@ -105,8 +105,6 @@ void _aspace_bst_insert(struct rt_aspace *aspace, struct rt_varea *varea);
|
|||
*/
|
||||
void _aspace_bst_remove(struct rt_aspace *aspace, struct rt_varea *varea);
|
||||
|
||||
void rt_varea_pgmgr_pop(rt_varea_t varea, void *vaddr, rt_size_t size);
|
||||
|
||||
void rt_varea_pgmgr_pop_all(rt_varea_t varea);
|
||||
|
||||
int rt_varea_fix_private_locked(rt_varea_t ex_varea, void *pa,
|
||||
|
|
|
@ -6,8 +6,15 @@ src = []
|
|||
CPPPATH = [cwd]
|
||||
|
||||
if GetDepend(['UTEST_MM_API_TC', 'RT_USING_SMART']):
|
||||
src += ['mm_api_tc.c', 'mm_libcpu_tc.c']
|
||||
if GetDepend(['RT_USING_MEMBLOCK']):
|
||||
# deprecated test, will be rewrited in the future
|
||||
# src += ['mm_api_tc.c', 'mm_libcpu_tc.c']
|
||||
src += ['rt_ioremap.c']
|
||||
src += ['aspace_unmap_range_invalid_param.c', 'aspace_unmap_range_shrink.c']
|
||||
src += ['aspace_unmap_range_split.c', 'aspace_map_expand.c']
|
||||
src += ['lwp_mmap_expand.c', 'lwp_mmap_map_fixed.c', 'lwp_mmap_fix_private.c']
|
||||
src += ['lwp_mmap_fd.c', 'lwp_mmap_fd_map_fixed_merge.c', 'lwp_mmap_fd_map_fixed_split.c']
|
||||
|
||||
if GetDepend(['UTEST_MM_API_TC', 'RT_USING_MEMBLOCK']):
|
||||
src += ['mm_memblock_tc.c']
|
||||
|
||||
if GetDepend(['UTEST_MM_LWP_TC', 'RT_USING_SMART']):
|
||||
|
|
|
@ -0,0 +1,119 @@
|
|||
/*
|
||||
* Copyright (c) 2006-2023, RT-Thread Development Team
|
||||
*
|
||||
* SPDX-License-Identifier: Apache-2.0
|
||||
*
|
||||
* Change Logs:
|
||||
* Date Author Notes
|
||||
* 2023-08-22 Shell test case for aspace_map with varea_expand
|
||||
*/
|
||||
#include "common.h"
|
||||
#include "lwp_user_mm.h"
|
||||
#include <mm_aspace.h>
|
||||
|
||||
#include <rtthread.h>
|
||||
|
||||
static size_t flags = MMF_PREFETCH | MMF_MAP_FIXED;
|
||||
static size_t attr = MMU_MAP_K_RWCB;
|
||||
static rt_mem_obj_t mem_obj = &rt_mm_dummy_mapper;
|
||||
|
||||
static char *ex_vaddr = (void *)0x100000000;
|
||||
static size_t ex_offset = 1024;
|
||||
static size_t map_size = 0x3000;
|
||||
|
||||
static size_t former_vsz;
|
||||
static size_t former_vcount;
|
||||
|
||||
static struct rt_lwp *lwp;
|
||||
|
||||
static int _count_vsz(rt_varea_t varea, void *arg)
|
||||
{
|
||||
rt_base_t *pvsz = arg;
|
||||
*pvsz += 1;
|
||||
return 0;
|
||||
}
|
||||
|
||||
static rt_base_t count_vcount(rt_aspace_t aspace)
|
||||
{
|
||||
rt_base_t vcount = 0;
|
||||
rt_aspace_traversal(aspace, _count_vsz, &vcount);
|
||||
return vcount;
|
||||
}
|
||||
|
||||
static void test_map_varea_expand(void)
|
||||
{
|
||||
char *next_va;
|
||||
size_t next_offset;
|
||||
|
||||
/* create an existed mapping */
|
||||
next_va = ex_vaddr;
|
||||
former_vsz = rt_aspace_count_vsz(lwp->aspace);
|
||||
former_vcount = count_vcount(lwp->aspace);
|
||||
utest_int_equal(
|
||||
RT_EOK,
|
||||
rt_aspace_map(lwp->aspace, (void *)&ex_vaddr, map_size, attr, flags, mem_obj, ex_offset)
|
||||
);
|
||||
uassert_true(next_va == ex_vaddr);
|
||||
utest_int_equal(former_vsz + map_size, rt_aspace_count_vsz(lwp->aspace));
|
||||
utest_int_equal(former_vcount + 1, count_vcount(lwp->aspace));
|
||||
former_vsz += map_size;
|
||||
former_vcount += 1;
|
||||
|
||||
/* test the RIGHT side expansion of varea by rt_aspace_map */
|
||||
next_va = ex_vaddr + map_size;
|
||||
next_offset = ex_offset + (map_size >> MM_PAGE_SHIFT);
|
||||
utest_int_equal(
|
||||
RT_EOK,
|
||||
rt_aspace_map(lwp->aspace, (void *)&next_va, map_size, attr, flags, mem_obj, next_offset)
|
||||
);
|
||||
uassert_true(next_va == (char *)ex_vaddr + map_size);
|
||||
utest_int_equal(former_vsz + map_size, rt_aspace_count_vsz(lwp->aspace));
|
||||
utest_int_equal(former_vcount, count_vcount(lwp->aspace));
|
||||
former_vsz += map_size;
|
||||
|
||||
/* test the LEFT side expansion of varea by rt_aspace_map */
|
||||
next_va = ex_vaddr - map_size;
|
||||
next_offset = ex_offset - (map_size >> MM_PAGE_SHIFT);
|
||||
utest_int_equal(
|
||||
RT_EOK,
|
||||
rt_aspace_map(lwp->aspace, (void *)&next_va, map_size, attr, flags, mem_obj, next_offset)
|
||||
);
|
||||
uassert_true(next_va == ex_vaddr - map_size);
|
||||
utest_int_equal(former_vsz + map_size, rt_aspace_count_vsz(lwp->aspace));
|
||||
utest_int_equal(former_vcount, count_vcount(lwp->aspace));
|
||||
former_vsz += map_size;
|
||||
|
||||
/* test the expand varea routine from rt_aspace_map_static */
|
||||
utest_int_equal(RT_EOK, rt_aspace_unmap_range(lwp->aspace, next_va, 3 * map_size));
|
||||
|
||||
/* test the expand varea routine from rt_aspace_map_phy */
|
||||
/* test the expand varea routine from rt_aspace_map_phy_static */
|
||||
/* these 2 from another file */
|
||||
}
|
||||
|
||||
static void aspace_map_tc(void)
|
||||
{
|
||||
CONSIST_HEAP(test_map_varea_expand());
|
||||
}
|
||||
|
||||
static rt_err_t utest_tc_init(void)
|
||||
{
|
||||
lwp = lwp_create(0);
|
||||
if (lwp)
|
||||
lwp_user_space_init(lwp, 1);
|
||||
else
|
||||
return -RT_ENOMEM;
|
||||
return RT_EOK;
|
||||
}
|
||||
|
||||
static rt_err_t utest_tc_cleanup(void)
|
||||
{
|
||||
lwp_ref_dec(lwp);
|
||||
return RT_EOK;
|
||||
}
|
||||
|
||||
static void testcase(void)
|
||||
{
|
||||
UTEST_UNIT_RUN(aspace_map_tc);
|
||||
}
|
||||
UTEST_TC_EXPORT(testcase, "testcases.mm.aspace_map.varea_expand", utest_tc_init, utest_tc_cleanup, 10);
|
|
@ -0,0 +1,84 @@
|
|||
/*
|
||||
* Copyright (c) 2006-2023, RT-Thread Development Team
|
||||
*
|
||||
* SPDX-License-Identifier: Apache-2.0
|
||||
*
|
||||
* Change Logs:
|
||||
* Date Author Notes
|
||||
* 2023-08-17 Shell test case for aspace_unmap_range
|
||||
*/
|
||||
#include "common.h"
|
||||
#include "lwp_user_mm.h"
|
||||
#include <mm_aspace.h>
|
||||
|
||||
#include <rtthread.h>
|
||||
|
||||
static void *vaddr = (void *)0x100000000;
|
||||
static size_t existed_size = 0x5000;
|
||||
static char *unmap_start;
|
||||
static size_t unmap_size = 0x2000;
|
||||
static size_t former_vsz;
|
||||
static struct rt_lwp *lwp;
|
||||
static size_t flags = MMF_PREFETCH | MMF_MAP_FIXED;
|
||||
|
||||
static void test_unmap_range_invalid_param(void)
|
||||
{
|
||||
rt_mem_obj_t notsupp_object;
|
||||
|
||||
/* create an existed mapping */
|
||||
former_vsz = rt_aspace_count_vsz(lwp->aspace);
|
||||
uassert_true(!rt_aspace_map(lwp->aspace, &vaddr, existed_size, MMU_MAP_K_RWCB, flags, &rt_mm_dummy_mapper, 0));
|
||||
utest_int_equal(former_vsz + existed_size, rt_aspace_count_vsz(lwp->aspace));
|
||||
former_vsz += existed_size;
|
||||
|
||||
/* test unaligned vaddr start */
|
||||
unmap_start = (char *)vaddr - 0x1234;
|
||||
utest_int_equal(-RT_EINVAL, rt_aspace_unmap_range(lwp->aspace, unmap_start, unmap_size));
|
||||
utest_int_equal(former_vsz, rt_aspace_count_vsz(lwp->aspace));
|
||||
|
||||
/* test unaligned size */
|
||||
unmap_size = 0x2000;
|
||||
unmap_start = (char *)vaddr + existed_size - unmap_size;
|
||||
utest_int_equal(RT_EOK, rt_aspace_unmap_range(lwp->aspace, unmap_start, unmap_size - 0x123));
|
||||
utest_int_equal(former_vsz - unmap_size, rt_aspace_count_vsz(lwp->aspace));
|
||||
|
||||
/* create another mapping binding to mem_obj without proper handler */
|
||||
notsupp_object = rt_mem_obj_create(&rt_mm_dummy_mapper);
|
||||
notsupp_object->on_varea_shrink = RT_NULL;
|
||||
|
||||
utest_int_equal(
|
||||
RT_EOK,
|
||||
rt_aspace_map(lwp->aspace, (void *)&unmap_start, unmap_size, MMU_MAP_K_RWCB, flags, notsupp_object, 0)
|
||||
);
|
||||
|
||||
utest_int_equal(-RT_EPERM, rt_aspace_unmap_range(lwp->aspace, unmap_start, 0x1000));
|
||||
utest_int_equal(RT_EOK, rt_aspace_unmap_range(lwp->aspace, vaddr, existed_size));
|
||||
rt_free(notsupp_object);
|
||||
}
|
||||
|
||||
static void aspace_unmap_tc(void)
|
||||
{
|
||||
CONSIST_HEAP(test_unmap_range_invalid_param());
|
||||
}
|
||||
|
||||
static rt_err_t utest_tc_init(void)
|
||||
{
|
||||
lwp = lwp_create(0);
|
||||
if (lwp)
|
||||
lwp_user_space_init(lwp, 1);
|
||||
else
|
||||
return -RT_ENOMEM;
|
||||
return RT_EOK;
|
||||
}
|
||||
|
||||
static rt_err_t utest_tc_cleanup(void)
|
||||
{
|
||||
lwp_ref_dec(lwp);
|
||||
return RT_EOK;
|
||||
}
|
||||
|
||||
static void testcase(void)
|
||||
{
|
||||
UTEST_UNIT_RUN(aspace_unmap_tc);
|
||||
}
|
||||
UTEST_TC_EXPORT(testcase, "testcases.mm.aspace_unmap_range.invalid_param", utest_tc_init, utest_tc_cleanup, 10);
|
|
@ -0,0 +1,75 @@
|
|||
/*
|
||||
* Copyright (c) 2006-2023, RT-Thread Development Team
|
||||
*
|
||||
* SPDX-License-Identifier: Apache-2.0
|
||||
*
|
||||
* Change Logs:
|
||||
* Date Author Notes
|
||||
* 2023-08-17 Shell test case for aspace_unmap_range
|
||||
*/
|
||||
#include "common.h"
|
||||
#include "lwp_user_mm.h"
|
||||
#include <mm_aspace.h>
|
||||
|
||||
#include <rtthread.h>
|
||||
|
||||
static void *vaddr = (void *)0x100000000;
|
||||
static size_t existed_size = 0x5000;
|
||||
static char *unmap_start;
|
||||
static char *unmap_end;
|
||||
static size_t former_vsz;
|
||||
static size_t unmap_size = 0x2000;
|
||||
static struct rt_lwp *lwp;
|
||||
|
||||
static void test_unmap_range_shrink(void)
|
||||
{
|
||||
/* create an existed mapping */
|
||||
former_vsz = rt_aspace_count_vsz(lwp->aspace);
|
||||
uassert_true(!rt_aspace_map(lwp->aspace, &vaddr, existed_size, MMU_MAP_K_RWCB, MMF_PREFETCH, &rt_mm_dummy_mapper, 0));
|
||||
utest_int_equal(former_vsz + existed_size, rt_aspace_count_vsz(lwp->aspace));
|
||||
former_vsz += existed_size;
|
||||
|
||||
/* test the shrink mode of unmap from LEFT side */
|
||||
unmap_start = (char *)vaddr - unmap_size/2;
|
||||
uassert_true(!rt_aspace_unmap_range(lwp->aspace, unmap_start, unmap_size));
|
||||
unmap_end = unmap_start + unmap_size;
|
||||
uassert_true(rt_hw_mmu_v2p(lwp->aspace, unmap_end) != ARCH_MAP_FAILED);
|
||||
utest_int_equal(former_vsz - (unmap_end - (char *)vaddr), rt_aspace_count_vsz(lwp->aspace));
|
||||
former_vsz -= unmap_end - (char *)vaddr;
|
||||
|
||||
/* test the shrink mode of unmap from RIGHT side */
|
||||
unmap_start = (char *)vaddr + existed_size - unmap_size / 2;
|
||||
uassert_true(!rt_aspace_unmap_range(lwp->aspace, unmap_start, unmap_size));
|
||||
uassert_true(rt_hw_mmu_v2p(lwp->aspace, unmap_start - 1) != ARCH_MAP_FAILED);
|
||||
utest_int_equal(former_vsz - (unmap_end - (char *)vaddr), rt_aspace_count_vsz(lwp->aspace));
|
||||
former_vsz -= unmap_end - (char *)vaddr;
|
||||
|
||||
utest_int_equal(RT_EOK, rt_aspace_unmap_range(lwp->aspace, vaddr, existed_size));
|
||||
}
|
||||
|
||||
static void aspace_unmap_tc(void)
|
||||
{
|
||||
CONSIST_HEAP(test_unmap_range_shrink());
|
||||
}
|
||||
|
||||
static rt_err_t utest_tc_init(void)
|
||||
{
|
||||
lwp = lwp_create(0);
|
||||
if (lwp)
|
||||
lwp_user_space_init(lwp, 1);
|
||||
else
|
||||
return -RT_ENOMEM;
|
||||
return RT_EOK;
|
||||
}
|
||||
|
||||
static rt_err_t utest_tc_cleanup(void)
|
||||
{
|
||||
lwp_ref_dec(lwp);
|
||||
return RT_EOK;
|
||||
}
|
||||
|
||||
static void testcase(void)
|
||||
{
|
||||
UTEST_UNIT_RUN(aspace_unmap_tc);
|
||||
}
|
||||
UTEST_TC_EXPORT(testcase, "testcases.mm.aspace_unmap_range.shrink", utest_tc_init, utest_tc_cleanup, 10);
|
|
@ -0,0 +1,65 @@
|
|||
/*
|
||||
* Copyright (c) 2006-2023, RT-Thread Development Team
|
||||
*
|
||||
* SPDX-License-Identifier: Apache-2.0
|
||||
*
|
||||
* Change Logs:
|
||||
* Date Author Notes
|
||||
* 2023-08-17 Shell test case for aspace_unmap_range
|
||||
*/
|
||||
#include "common.h"
|
||||
#include "lwp_user_mm.h"
|
||||
#include <mm_aspace.h>
|
||||
|
||||
#include <rtthread.h>
|
||||
|
||||
static void *vaddr = (void *)0x100000000;
|
||||
static size_t existed_size = 0x5000;
|
||||
static char *unmap_start = (char *)0x100000000 + 0x3000;
|
||||
static size_t former_vsz;
|
||||
static size_t unmap_size = 0x1000;
|
||||
static struct rt_lwp *lwp;
|
||||
|
||||
static void test_unmap_range_split(void)
|
||||
{
|
||||
/* create an existed mapping */
|
||||
former_vsz = rt_aspace_count_vsz(lwp->aspace);
|
||||
uassert_true(!rt_aspace_map(lwp->aspace, &vaddr, existed_size, MMU_MAP_K_RWCB, MMF_PREFETCH, &rt_mm_dummy_mapper, 0));
|
||||
utest_int_equal(former_vsz + existed_size, rt_aspace_count_vsz(lwp->aspace));
|
||||
former_vsz += existed_size;
|
||||
|
||||
/* test the split mode of unmap */
|
||||
utest_int_equal(RT_EOK, rt_aspace_unmap_range(lwp->aspace, unmap_start, unmap_size));
|
||||
uassert_true(rt_hw_mmu_v2p(lwp->aspace, unmap_start - 1) != ARCH_MAP_FAILED);
|
||||
uassert_true(rt_hw_mmu_v2p(lwp->aspace, unmap_start + unmap_size) != ARCH_MAP_FAILED);
|
||||
utest_int_equal(former_vsz - unmap_size, rt_aspace_count_vsz(lwp->aspace));
|
||||
|
||||
utest_int_equal(RT_EOK, rt_aspace_unmap_range(lwp->aspace, vaddr, existed_size));
|
||||
}
|
||||
|
||||
static void aspace_unmap_tc(void)
|
||||
{
|
||||
CONSIST_HEAP(test_unmap_range_split());
|
||||
}
|
||||
|
||||
static rt_err_t utest_tc_init(void)
|
||||
{
|
||||
lwp = lwp_create(0);
|
||||
if (lwp)
|
||||
lwp_user_space_init(lwp, 1);
|
||||
else
|
||||
return -RT_ENOMEM;
|
||||
return RT_EOK;
|
||||
}
|
||||
|
||||
static rt_err_t utest_tc_cleanup(void)
|
||||
{
|
||||
lwp_ref_dec(lwp);
|
||||
return RT_EOK;
|
||||
}
|
||||
|
||||
static void testcase(void)
|
||||
{
|
||||
UTEST_UNIT_RUN(aspace_unmap_tc);
|
||||
}
|
||||
UTEST_TC_EXPORT(testcase, "testcases.mm.aspace_unmap_range.split", utest_tc_init, utest_tc_cleanup, 10);
|
|
@ -35,6 +35,11 @@
|
|||
extern rt_base_t rt_heap_lock(void);
|
||||
extern void rt_heap_unlock(rt_base_t level);
|
||||
|
||||
#define __int_compare(a, b, operator) do{long _a = (long)(a); long _b = (long)(b); __utest_assert((_a) operator (_b), "Assertion Failed: (" #a ") "#operator" (" #b ")"); if (!((_a) operator (_b)))LOG_E("\t"#a"=%ld(0x%lx), "#b"=%ld(0x%lx)", _a, _a, _b, _b);} while (0)
|
||||
#define utest_int_equal(a, b) __int_compare(a, b, ==)
|
||||
#define utest_int_less(a, b) __int_compare(a, b, <)
|
||||
#define utest_int_less_equal(a, b) __int_compare(a, b, <=)
|
||||
|
||||
/**
|
||||
* @brief During the operations, is heap still the same;
|
||||
*/
|
||||
|
@ -46,11 +51,19 @@ extern void rt_heap_unlock(rt_base_t level);
|
|||
statement; \
|
||||
rt_memory_info(&totala, &useda, &max_useda); \
|
||||
rt_heap_unlock(level); \
|
||||
uassert_true(total == totala); \
|
||||
uassert_true(used == useda); \
|
||||
uassert_true(max_used == max_useda); \
|
||||
utest_int_equal(total, totala); \
|
||||
utest_int_equal(used, useda); \
|
||||
} while (0)
|
||||
|
||||
#ifdef STANDALONE_TC
|
||||
#define TC_ASSERT(expr) \
|
||||
((expr) \
|
||||
? 0 \
|
||||
: rt_kprintf("AssertFault(%d): %s\n", __LINE__, RT_STRINGIFY(expr)))
|
||||
#else
|
||||
#define TC_ASSERT(expr) uassert_true(expr)
|
||||
#endif
|
||||
|
||||
rt_inline int memtest(volatile char *buf, int value, size_t buf_sz)
|
||||
{
|
||||
int ret = 0;
|
||||
|
|
|
@ -0,0 +1,109 @@
|
|||
/*
|
||||
* Copyright (c) 2006-2023, RT-Thread Development Team
|
||||
*
|
||||
* SPDX-License-Identifier: Apache-2.0
|
||||
*
|
||||
* Change Logs:
|
||||
* Date Author Notes
|
||||
* 2023-08-22 Shell test case for aspace_map with varea_expand
|
||||
*/
|
||||
#include "common.h"
|
||||
#include "lwp_user_mm.h"
|
||||
#include <mm_aspace.h>
|
||||
|
||||
#include <rtthread.h>
|
||||
|
||||
static long fd = -1;
|
||||
static long pgoffset = 0;
|
||||
static size_t flags = MAP_FIXED | MAP_ANONYMOUS;
|
||||
static size_t prot1 = PROT_READ | PROT_WRITE;
|
||||
|
||||
static char *ex_vaddr = (void *)0x100000000;
|
||||
static size_t map_size = 0x3000;
|
||||
|
||||
static size_t former_vsz;
|
||||
static size_t former_vcount;
|
||||
|
||||
static struct rt_lwp *lwp;
|
||||
|
||||
static int _count_vsz(rt_varea_t varea, void *arg)
|
||||
{
|
||||
rt_base_t *pvsz = arg;
|
||||
*pvsz += 1;
|
||||
return 0;
|
||||
}
|
||||
|
||||
static rt_base_t count_vcount(rt_aspace_t aspace)
|
||||
{
|
||||
rt_base_t vcount = 0;
|
||||
rt_aspace_traversal(aspace, _count_vsz, &vcount);
|
||||
return vcount;
|
||||
}
|
||||
|
||||
static void test_mmap_expand(void)
|
||||
{
|
||||
char *next_va;
|
||||
|
||||
/* map new pages at ex_vaddr to anonymous */
|
||||
next_va = ex_vaddr;
|
||||
former_vsz = rt_aspace_count_vsz(lwp->aspace);
|
||||
former_vcount = count_vcount(lwp->aspace);
|
||||
next_va = lwp_mmap2(lwp, next_va, map_size, prot1, flags, fd, pgoffset);
|
||||
uassert_true(next_va == ex_vaddr);
|
||||
utest_int_equal(former_vsz + map_size, rt_aspace_count_vsz(lwp->aspace));
|
||||
utest_int_equal(former_vcount + 1, count_vcount(lwp->aspace));
|
||||
former_vsz += map_size;
|
||||
former_vcount += 1;
|
||||
|
||||
/* test the RIGHT side expansion of varea by lwp_mmap2 */
|
||||
next_va = ex_vaddr + map_size;
|
||||
uassert_true(
|
||||
lwp_mmap2(lwp, next_va, map_size, prot1, flags, fd, pgoffset)
|
||||
== next_va
|
||||
);
|
||||
utest_int_equal(former_vsz + map_size, rt_aspace_count_vsz(lwp->aspace));
|
||||
utest_int_equal(former_vcount, count_vcount(lwp->aspace));
|
||||
former_vsz += map_size;
|
||||
|
||||
/* test the LEFT side expansion of varea by rt_aspace_map */
|
||||
next_va = ex_vaddr - map_size;
|
||||
uassert_true(
|
||||
lwp_mmap2(lwp, next_va, map_size, prot1, flags, fd, pgoffset)
|
||||
== next_va
|
||||
);
|
||||
utest_int_equal(former_vsz + map_size, rt_aspace_count_vsz(lwp->aspace));
|
||||
utest_int_equal(former_vcount, count_vcount(lwp->aspace));
|
||||
former_vsz += map_size;
|
||||
|
||||
/* test other prot/offset/flags */
|
||||
|
||||
/* clear mapping */
|
||||
utest_int_equal(RT_EOK, rt_aspace_unmap_range(lwp->aspace, next_va, 3 * map_size));
|
||||
}
|
||||
|
||||
static void aspace_map_tc(void)
|
||||
{
|
||||
CONSIST_HEAP(test_mmap_expand());
|
||||
}
|
||||
|
||||
static rt_err_t utest_tc_init(void)
|
||||
{
|
||||
lwp = lwp_create(0);
|
||||
if (lwp)
|
||||
lwp_user_space_init(lwp, 1);
|
||||
else
|
||||
return -RT_ENOMEM;
|
||||
return RT_EOK;
|
||||
}
|
||||
|
||||
static rt_err_t utest_tc_cleanup(void)
|
||||
{
|
||||
lwp_ref_dec(lwp);
|
||||
return RT_EOK;
|
||||
}
|
||||
|
||||
static void testcase(void)
|
||||
{
|
||||
UTEST_UNIT_RUN(aspace_map_tc);
|
||||
}
|
||||
UTEST_TC_EXPORT(testcase, "testcases.lwp.mman.mmap_anon.expand", utest_tc_init, utest_tc_cleanup, 10);
|
|
@ -0,0 +1,140 @@
|
|||
/*
|
||||
* Copyright (c) 2006-2023, RT-Thread Development Team
|
||||
*
|
||||
* SPDX-License-Identifier: Apache-2.0
|
||||
*
|
||||
* Change Logs:
|
||||
* Date Author Notes
|
||||
* 2023-08-17 Shell test case for aspace_map(MAP_FIXED)
|
||||
*/
|
||||
#include "common.h"
|
||||
#include "lwp_user_mm.h"
|
||||
#include "utest_assert.h"
|
||||
#include <mm_private.h>
|
||||
|
||||
#include <rtthread.h>
|
||||
|
||||
#define PAGE_SZ (1 << MM_PAGE_SHIFT)
|
||||
#define PAGE_COUNT ('z' - 'a' + 1)
|
||||
#define FILE_PATH "/test_mmap"
|
||||
#define FILE_SZ (PAGE_COUNT * PAGE_SZ)
|
||||
|
||||
static struct rt_lwp *lwp;
|
||||
static size_t former_vsz;
|
||||
static size_t former_vcount;
|
||||
static char page_sz_buf[PAGE_SZ];
|
||||
|
||||
static void *vaddr = (void *)0x100000000;
|
||||
static long pgoffset = 0;
|
||||
static size_t ex_prot = PROT_NONE;
|
||||
static size_t ex_flags = MAP_PRIVATE | MAP_ANONYMOUS;
|
||||
|
||||
static int _count_vsz(rt_varea_t varea, void *arg)
|
||||
{
|
||||
rt_base_t *pvsz = arg;
|
||||
*pvsz += 1;
|
||||
return 0;
|
||||
}
|
||||
|
||||
static rt_base_t count_vcount(rt_aspace_t aspace)
|
||||
{
|
||||
rt_base_t vcount = 0;
|
||||
rt_aspace_traversal(aspace, _count_vsz, &vcount);
|
||||
return vcount;
|
||||
}
|
||||
|
||||
static rt_err_t _lwp_get_user(struct rt_lwp *lwp, char *vaddr, char *buffer)
|
||||
{
|
||||
rt_varea_t varea = _aspace_bst_search(lwp->aspace, vaddr);
|
||||
if (varea && varea->mem_obj && varea->mem_obj->page_read)
|
||||
{
|
||||
struct rt_aspace_io_msg io_msg;
|
||||
rt_mm_io_msg_init(&io_msg, MM_PA_TO_OFF(vaddr), vaddr, buffer);
|
||||
varea->mem_obj->page_read(varea, &io_msg);
|
||||
}
|
||||
return RT_EOK;
|
||||
}
|
||||
|
||||
static void _verify_file_content(const char *mmap_buf)
|
||||
{
|
||||
char ch = 'a';
|
||||
for (char *read_va = (char *)mmap_buf; read_va < mmap_buf + FILE_SZ; read_va += PAGE_SZ, ch++)
|
||||
{
|
||||
_lwp_get_user(lwp, read_va, page_sz_buf);
|
||||
utest_int_equal(RT_EOK, memtest(page_sz_buf, ch, PAGE_SZ));
|
||||
}
|
||||
}
|
||||
|
||||
static void test_mmap_fd(void)
|
||||
{
|
||||
former_vsz = rt_aspace_count_vsz(lwp->aspace);
|
||||
former_vcount = count_vcount(lwp->aspace);
|
||||
|
||||
/* create an existed mapping */
|
||||
long temp_fd;
|
||||
temp_fd = open(FILE_PATH, O_RDONLY);
|
||||
LOG_D("New fd=%ld path=%s", temp_fd, FILE_PATH);
|
||||
uassert_true(temp_fd >= 0);
|
||||
utest_int_equal(
|
||||
lwp_mmap2(lwp, vaddr, FILE_SZ, ex_prot, ex_flags, temp_fd, pgoffset),
|
||||
vaddr);
|
||||
close(temp_fd);
|
||||
|
||||
utest_int_equal(former_vsz + FILE_SZ, rt_aspace_count_vsz(lwp->aspace));
|
||||
utest_int_equal(former_vcount + 1, count_vcount(lwp->aspace));
|
||||
|
||||
_verify_file_content(vaddr);
|
||||
|
||||
/* create an override mapping */
|
||||
|
||||
/* close */
|
||||
|
||||
utest_int_equal(RT_EOK, rt_aspace_unmap_range(lwp->aspace, vaddr, FILE_SZ));
|
||||
}
|
||||
|
||||
static void testcase_main(void)
|
||||
{
|
||||
test_mmap_fd();
|
||||
}
|
||||
|
||||
static void _setup_file_content(long fd)
|
||||
{
|
||||
char ch = 'a';
|
||||
|
||||
for (size_t i = 0; i < PAGE_COUNT; i++, ch++)
|
||||
{
|
||||
memset(page_sz_buf, ch, PAGE_SZ);
|
||||
write(fd, page_sz_buf, PAGE_SZ);
|
||||
}
|
||||
}
|
||||
|
||||
static rt_err_t utest_tc_init(void)
|
||||
{
|
||||
/* setup file */
|
||||
long temp_file_des;
|
||||
temp_file_des = open(FILE_PATH, O_RDWR | O_CREAT, 0777);
|
||||
LOG_D("New fd=%ld path=%s", temp_file_des, FILE_PATH);
|
||||
if (temp_file_des < 0)
|
||||
return -RT_ERROR;
|
||||
_setup_file_content(temp_file_des);
|
||||
close(temp_file_des);
|
||||
|
||||
lwp = lwp_create(0);
|
||||
if (lwp)
|
||||
lwp_user_space_init(lwp, 1);
|
||||
else
|
||||
return -RT_ENOMEM;
|
||||
return RT_EOK;
|
||||
}
|
||||
|
||||
static rt_err_t utest_tc_cleanup(void)
|
||||
{
|
||||
lwp_ref_dec(lwp);
|
||||
return RT_EOK;
|
||||
}
|
||||
|
||||
static void testcase(void)
|
||||
{
|
||||
UTEST_UNIT_RUN(testcase_main);
|
||||
}
|
||||
UTEST_TC_EXPORT(testcase, "testcases.lwp.mman.mmap_fd.basic", utest_tc_init, utest_tc_cleanup, 10);
|
|
@ -0,0 +1,214 @@
|
|||
/*
|
||||
* Copyright (c) 2006-2023, RT-Thread Development Team
|
||||
*
|
||||
* SPDX-License-Identifier: Apache-2.0
|
||||
*
|
||||
* Change Logs:
|
||||
* Date Author Notes
|
||||
* 2023-08-17 Shell test case for aspace_map(MAP_FIXED)
|
||||
*/
|
||||
#include "common.h"
|
||||
#include "lwp_user_mm.h"
|
||||
#include "utest_assert.h"
|
||||
#include <mm_private.h>
|
||||
|
||||
#include <rtthread.h>
|
||||
|
||||
#define PAGE_SZ (1 << MM_PAGE_SHIFT)
|
||||
#define PAGE_COUNT ('z' - 'a' + 1)
|
||||
#define FILE_PATH "/test_mmap"
|
||||
#define FILE_SZ (PAGE_COUNT * PAGE_SZ)
|
||||
|
||||
static struct rt_lwp *lwp;
|
||||
static size_t former_vsz;
|
||||
static size_t former_vcount;
|
||||
static char page_sz_buf[PAGE_SZ];
|
||||
|
||||
static void *ex_start = (void *)0x100000000;
|
||||
static size_t ex_size = 0x5000;
|
||||
static long pgoffset = 0;
|
||||
static size_t ex_prot = PROT_NONE;
|
||||
static size_t ex_flags = MAP_PRIVATE | MAP_ANONYMOUS;
|
||||
|
||||
static char *private0 = (char *)0x100000000;
|
||||
static char *private1 = (char *)0x100000000 + 0x1000;
|
||||
static char *private2 = (char *)0x100000000 + 0x2000;
|
||||
static char *private3 = (char *)0x100000000 + 0x3000;
|
||||
static char *private4 = (char *)0x100000000 + 0x4000;
|
||||
static size_t or_size = 0x1000;
|
||||
static size_t or_prot = PROT_READ | PROT_WRITE;
|
||||
static size_t or_flags = MAP_ANON | MAP_FIXED;
|
||||
|
||||
static long anon_fd = -1;
|
||||
|
||||
static int _count_vsz(rt_varea_t varea, void *arg)
|
||||
{
|
||||
rt_base_t *pvsz = arg;
|
||||
*pvsz += 1;
|
||||
return 0;
|
||||
}
|
||||
|
||||
static rt_base_t count_vcount(rt_aspace_t aspace)
|
||||
{
|
||||
rt_base_t vcount = 0;
|
||||
rt_aspace_traversal(aspace, _count_vsz, &vcount);
|
||||
return vcount;
|
||||
}
|
||||
|
||||
static rt_err_t _lwp_get_user(struct rt_lwp *lwp, char *vaddr, char *buffer)
|
||||
{
|
||||
rt_varea_t varea = _aspace_bst_search(lwp->aspace, vaddr);
|
||||
if (varea && varea->mem_obj && varea->mem_obj->page_read)
|
||||
{
|
||||
struct rt_aspace_io_msg io_msg;
|
||||
rt_mm_io_msg_init(&io_msg, MM_PA_TO_OFF(vaddr), vaddr, buffer);
|
||||
varea->mem_obj->page_read(varea, &io_msg);
|
||||
}
|
||||
return RT_EOK;
|
||||
}
|
||||
|
||||
static void _verify_file_content(struct rt_lwp *lwp, const char *mmap_buf, int ch)
|
||||
{
|
||||
_lwp_get_user(lwp, (char *)mmap_buf, page_sz_buf);
|
||||
utest_int_equal(RT_EOK, memtest(page_sz_buf, ch, PAGE_SZ));
|
||||
}
|
||||
|
||||
static void test_mmap_fd_fixed(void)
|
||||
{
|
||||
former_vsz = rt_aspace_count_vsz(lwp->aspace);
|
||||
former_vcount = count_vcount(lwp->aspace);
|
||||
|
||||
/* create an existed mapping */
|
||||
long temp_fd;
|
||||
temp_fd = open(FILE_PATH, O_RDONLY);
|
||||
LOG_D("New fd=%ld path=%s", temp_fd, FILE_PATH);
|
||||
uassert_true(temp_fd >= 0);
|
||||
utest_int_equal(
|
||||
lwp_mmap2(lwp, ex_start, ex_size, ex_prot, ex_flags, anon_fd, pgoffset),
|
||||
ex_start);
|
||||
utest_int_equal(former_vsz + ex_size, rt_aspace_count_vsz(lwp->aspace));
|
||||
utest_int_equal(former_vcount + 1, count_vcount(lwp->aspace));
|
||||
former_vsz += ex_size;
|
||||
former_vcount += 1;
|
||||
_verify_file_content(lwp, private0, 0);
|
||||
_verify_file_content(lwp, private1, 0);
|
||||
_verify_file_content(lwp, private2, 0);
|
||||
_verify_file_content(lwp, private3, 0);
|
||||
_verify_file_content(lwp, private4, 0);
|
||||
|
||||
/* create an override mapping */
|
||||
utest_int_equal(
|
||||
lwp_mmap2(lwp, private2, or_size, or_prot, or_flags, temp_fd, 2),
|
||||
private2);
|
||||
utest_int_equal(former_vsz, rt_aspace_count_vsz(lwp->aspace));
|
||||
utest_int_equal(former_vcount + 2, count_vcount(lwp->aspace));
|
||||
former_vcount += 2;
|
||||
_verify_file_content(lwp, private0, 0);
|
||||
_verify_file_content(lwp, private1, 0);
|
||||
_verify_file_content(lwp, private2, 'c');
|
||||
_verify_file_content(lwp, private3, 0);
|
||||
_verify_file_content(lwp, private4, 0);
|
||||
|
||||
/* fix private from left most */
|
||||
utest_int_equal(
|
||||
lwp_mmap2(lwp, private0, or_size, or_prot, or_flags, temp_fd, 0),
|
||||
private0);
|
||||
utest_int_equal(former_vsz, rt_aspace_count_vsz(lwp->aspace));
|
||||
utest_int_equal(former_vcount + 1, count_vcount(lwp->aspace));
|
||||
former_vcount += 1;
|
||||
_verify_file_content(lwp, private0, 'a');
|
||||
_verify_file_content(lwp, private1, 0);
|
||||
_verify_file_content(lwp, private2, 'c');
|
||||
_verify_file_content(lwp, private3, 0);
|
||||
_verify_file_content(lwp, private4, 0);
|
||||
|
||||
/* fix private from right most */
|
||||
utest_int_equal(
|
||||
lwp_mmap2(lwp, private4, or_size, or_prot, or_flags, temp_fd, 4),
|
||||
private4);
|
||||
utest_int_equal(former_vsz, rt_aspace_count_vsz(lwp->aspace));
|
||||
utest_int_equal(former_vcount + 1, count_vcount(lwp->aspace));
|
||||
former_vcount += 1;
|
||||
_verify_file_content(lwp, private0, 'a');
|
||||
_verify_file_content(lwp, private1, 0);
|
||||
_verify_file_content(lwp, private2, 'c');
|
||||
_verify_file_content(lwp, private3, 0);
|
||||
_verify_file_content(lwp, private4, 'e');
|
||||
|
||||
/* fix private from left-middle */
|
||||
utest_int_equal(
|
||||
lwp_mmap2(lwp, private1, or_size, or_prot, or_flags, temp_fd, 1),
|
||||
private1);
|
||||
rt_aspace_print_all(lwp->aspace);
|
||||
utest_int_equal(former_vsz, rt_aspace_count_vsz(lwp->aspace));
|
||||
utest_int_equal(former_vcount - 1, count_vcount(lwp->aspace));
|
||||
former_vcount -= 1;
|
||||
_verify_file_content(lwp, private0, 'a');
|
||||
_verify_file_content(lwp, private1, 'b');
|
||||
_verify_file_content(lwp, private2, 'c');
|
||||
_verify_file_content(lwp, private3, 0);
|
||||
_verify_file_content(lwp, private4, 'e');
|
||||
|
||||
/* fix private from right-middle */
|
||||
utest_int_equal(
|
||||
lwp_mmap2(lwp, private3, or_size, or_prot, or_flags, temp_fd, 3),
|
||||
private3);
|
||||
utest_int_equal(former_vsz, rt_aspace_count_vsz(lwp->aspace));
|
||||
utest_int_equal(former_vcount - 1, count_vcount(lwp->aspace));
|
||||
former_vcount -= 1;
|
||||
_verify_file_content(lwp, private0, 'a');
|
||||
_verify_file_content(lwp, private1, 'b');
|
||||
_verify_file_content(lwp, private2, 'c');
|
||||
_verify_file_content(lwp, private3, 'd');
|
||||
_verify_file_content(lwp, private4, 'e');
|
||||
|
||||
/* close */
|
||||
close(temp_fd);
|
||||
utest_int_equal(RT_EOK, rt_aspace_unmap_range(lwp->aspace, ex_start, FILE_SZ));
|
||||
}
|
||||
|
||||
static void testcase_main(void)
|
||||
{
|
||||
test_mmap_fd_fixed();
|
||||
}
|
||||
|
||||
static void _setup_file_content(long fd)
|
||||
{
|
||||
char ch = 'a';
|
||||
for (size_t i = 0; i < PAGE_COUNT; i++, ch++)
|
||||
{
|
||||
memset(page_sz_buf, ch, PAGE_SZ);
|
||||
write(fd, page_sz_buf, PAGE_SZ);
|
||||
}
|
||||
}
|
||||
|
||||
static rt_err_t utest_tc_init(void)
|
||||
{
|
||||
/* setup file */
|
||||
long temp_file_des;
|
||||
temp_file_des = open(FILE_PATH, O_RDWR | O_CREAT, 0777);
|
||||
LOG_D("New fd=%ld path=%s", temp_file_des, FILE_PATH);
|
||||
if (temp_file_des < 0)
|
||||
return -RT_ERROR;
|
||||
_setup_file_content(temp_file_des);
|
||||
close(temp_file_des);
|
||||
|
||||
lwp = lwp_create(0);
|
||||
if (lwp)
|
||||
lwp_user_space_init(lwp, 1);
|
||||
else
|
||||
return -RT_ENOMEM;
|
||||
return RT_EOK;
|
||||
}
|
||||
|
||||
static rt_err_t utest_tc_cleanup(void)
|
||||
{
|
||||
lwp_ref_dec(lwp);
|
||||
return RT_EOK;
|
||||
}
|
||||
|
||||
static void testcase(void)
|
||||
{
|
||||
UTEST_UNIT_RUN(testcase_main);
|
||||
}
|
||||
UTEST_TC_EXPORT(testcase, "testcases.lwp.mman.mmap_fd.map_fixed_merge", utest_tc_init, utest_tc_cleanup, 10);
|
|
@ -0,0 +1,215 @@
|
|||
/*
|
||||
* Copyright (c) 2006-2023, RT-Thread Development Team
|
||||
*
|
||||
* SPDX-License-Identifier: Apache-2.0
|
||||
*
|
||||
* Change Logs:
|
||||
* Date Author Notes
|
||||
* 2023-08-17 Shell test case for aspace_map(MAP_FIXED)
|
||||
*/
|
||||
#include "common.h"
|
||||
#include "lwp_user_mm.h"
|
||||
#include "utest_assert.h"
|
||||
#include <mm_private.h>
|
||||
|
||||
#include <rtthread.h>
|
||||
|
||||
#define PAGE_SZ (1 << MM_PAGE_SHIFT)
|
||||
#define PAGE_COUNT ('z' - 'a' + 1)
|
||||
#define FILE_PATH "/test_mmap"
|
||||
#define FILE_SZ (PAGE_COUNT * PAGE_SZ)
|
||||
|
||||
static struct rt_lwp *lwp;
|
||||
static size_t former_vsz;
|
||||
static size_t former_vcount;
|
||||
static char page_sz_buf[PAGE_SZ];
|
||||
|
||||
static void *ex_start = (void *)0x100000000;
|
||||
static size_t ex_size = 0x5000;
|
||||
static long pgoffset = 0;
|
||||
static size_t ex_prot = PROT_NONE;
|
||||
static size_t ex_flags = MAP_PRIVATE | MAP_ANONYMOUS;
|
||||
|
||||
static char *private0 = (char *)0x100000000;
|
||||
static char *private1 = (char *)0x100000000 + 0x1000;
|
||||
static char *private2 = (char *)0x100000000 + 0x2000;
|
||||
static char *private3 = (char *)0x100000000 + 0x3000;
|
||||
static char *private4 = (char *)0x100000000 + 0x4000;
|
||||
static size_t or_size = 0x1000;
|
||||
static size_t or_prot = PROT_READ | PROT_WRITE;
|
||||
static size_t or_flags = MAP_ANON | MAP_FIXED;
|
||||
|
||||
static long anon_fd = -1;
|
||||
|
||||
static int _count_vsz(rt_varea_t varea, void *arg)
|
||||
{
|
||||
rt_base_t *pvsz = arg;
|
||||
*pvsz += 1;
|
||||
return 0;
|
||||
}
|
||||
|
||||
static rt_base_t count_vcount(rt_aspace_t aspace)
|
||||
{
|
||||
rt_base_t vcount = 0;
|
||||
rt_aspace_traversal(aspace, _count_vsz, &vcount);
|
||||
return vcount;
|
||||
}
|
||||
|
||||
static rt_err_t _lwp_get_user(struct rt_lwp *lwp, char *vaddr, char *buffer)
|
||||
{
|
||||
rt_varea_t varea = _aspace_bst_search(lwp->aspace, vaddr);
|
||||
if (varea && varea->mem_obj && varea->mem_obj->page_read)
|
||||
{
|
||||
struct rt_aspace_io_msg io_msg;
|
||||
rt_mm_io_msg_init(&io_msg, MM_PA_TO_OFF(vaddr), vaddr, buffer);
|
||||
varea->mem_obj->page_read(varea, &io_msg);
|
||||
}
|
||||
else
|
||||
return -RT_ERROR;
|
||||
return RT_EOK;
|
||||
}
|
||||
|
||||
static void _verify_file_content(struct rt_lwp *lwp, const char *mmap_buf, int ch)
|
||||
{
|
||||
utest_int_equal(RT_EOK, _lwp_get_user(lwp, (char *)mmap_buf, page_sz_buf));
|
||||
utest_int_equal(RT_EOK, memtest(page_sz_buf, ch, PAGE_SZ));
|
||||
}
|
||||
|
||||
static void test_mmap_fd_fixed(void)
|
||||
{
|
||||
former_vsz = rt_aspace_count_vsz(lwp->aspace);
|
||||
former_vcount = count_vcount(lwp->aspace);
|
||||
|
||||
/* create an existed mapping */
|
||||
long temp_fd;
|
||||
temp_fd = open(FILE_PATH, O_RDONLY);
|
||||
LOG_D("New fd=%ld path=%s", temp_fd, FILE_PATH);
|
||||
uassert_true(temp_fd >= 0);
|
||||
utest_int_equal(
|
||||
lwp_mmap2(lwp, ex_start, ex_size, ex_prot, ex_flags, temp_fd, pgoffset),
|
||||
ex_start);
|
||||
utest_int_equal(former_vsz + ex_size, rt_aspace_count_vsz(lwp->aspace));
|
||||
utest_int_equal(former_vcount + 1, count_vcount(lwp->aspace));
|
||||
_verify_file_content(lwp, private0, 'a');
|
||||
_verify_file_content(lwp, private1, 'b');
|
||||
_verify_file_content(lwp, private2, 'c');
|
||||
_verify_file_content(lwp, private3, 'd');
|
||||
_verify_file_content(lwp, private4, 'e');
|
||||
former_vsz += ex_size;
|
||||
former_vcount += 1;
|
||||
|
||||
/* create an override mapping */
|
||||
utest_int_equal(
|
||||
lwp_mmap2(lwp, private2, or_size, or_prot, or_flags, anon_fd, pgoffset),
|
||||
private2);
|
||||
utest_int_equal(former_vsz, rt_aspace_count_vsz(lwp->aspace));
|
||||
utest_int_equal(former_vcount + 2, count_vcount(lwp->aspace));
|
||||
former_vcount += 2;
|
||||
_verify_file_content(lwp, private0, 'a');
|
||||
_verify_file_content(lwp, private1, 'b');
|
||||
_verify_file_content(lwp, private2, 0);
|
||||
_verify_file_content(lwp, private3, 'd');
|
||||
_verify_file_content(lwp, private4, 'e');
|
||||
|
||||
/* fix private from left most */
|
||||
utest_int_equal(
|
||||
lwp_mmap2(lwp, private0, or_size, or_prot, or_flags, anon_fd, pgoffset),
|
||||
private0);
|
||||
utest_int_equal(former_vsz, rt_aspace_count_vsz(lwp->aspace));
|
||||
utest_int_equal(former_vcount + 1, count_vcount(lwp->aspace));
|
||||
former_vcount += 1;
|
||||
_verify_file_content(lwp, private0, 0);
|
||||
_verify_file_content(lwp, private1, 'b');
|
||||
_verify_file_content(lwp, private2, 0);
|
||||
_verify_file_content(lwp, private3, 'd');
|
||||
_verify_file_content(lwp, private4, 'e');
|
||||
|
||||
/* fix private from right most */
|
||||
utest_int_equal(
|
||||
lwp_mmap2(lwp, private4, or_size, or_prot, or_flags, anon_fd, pgoffset),
|
||||
private4);
|
||||
utest_int_equal(former_vsz, rt_aspace_count_vsz(lwp->aspace));
|
||||
utest_int_equal(former_vcount + 1, count_vcount(lwp->aspace));
|
||||
former_vcount += 1;
|
||||
_verify_file_content(lwp, private0, 0);
|
||||
_verify_file_content(lwp, private1, 'b');
|
||||
_verify_file_content(lwp, private2, 0);
|
||||
_verify_file_content(lwp, private3, 'd');
|
||||
_verify_file_content(lwp, private4, 0);
|
||||
|
||||
/* fix private from left-middle */
|
||||
utest_int_equal(
|
||||
lwp_mmap2(lwp, private1, or_size, or_prot, or_flags, anon_fd, pgoffset),
|
||||
private1);
|
||||
utest_int_equal(former_vsz, rt_aspace_count_vsz(lwp->aspace));
|
||||
utest_int_equal(former_vcount - 1, count_vcount(lwp->aspace));
|
||||
former_vcount -= 1;
|
||||
_verify_file_content(lwp, private0, 0);
|
||||
_verify_file_content(lwp, private1, 0);
|
||||
_verify_file_content(lwp, private2, 0);
|
||||
_verify_file_content(lwp, private3, 'd');
|
||||
_verify_file_content(lwp, private4, 0);
|
||||
|
||||
/* fix private from right-middle */
|
||||
utest_int_equal(
|
||||
lwp_mmap2(lwp, private3, or_size, or_prot, or_flags, anon_fd, pgoffset),
|
||||
private3);
|
||||
utest_int_equal(former_vsz, rt_aspace_count_vsz(lwp->aspace));
|
||||
utest_int_equal(former_vcount - 1, count_vcount(lwp->aspace));
|
||||
former_vcount -= 1;
|
||||
_verify_file_content(lwp, private0, 0);
|
||||
_verify_file_content(lwp, private1, 0);
|
||||
_verify_file_content(lwp, private2, 0);
|
||||
_verify_file_content(lwp, private3, 0);
|
||||
_verify_file_content(lwp, private4, 0);
|
||||
|
||||
/* close */
|
||||
close(temp_fd);
|
||||
utest_int_equal(RT_EOK, rt_aspace_unmap_range(lwp->aspace, ex_start, FILE_SZ));
|
||||
}
|
||||
|
||||
static void testcase_main(void)
|
||||
{
|
||||
test_mmap_fd_fixed();
|
||||
}
|
||||
|
||||
static void _setup_file_content(long fd)
|
||||
{
|
||||
char ch = 'a';
|
||||
for (size_t i = 0; i < PAGE_COUNT; i++, ch++)
|
||||
{
|
||||
memset(page_sz_buf, ch, PAGE_SZ);
|
||||
write(fd, page_sz_buf, PAGE_SZ);
|
||||
}
|
||||
}
|
||||
|
||||
static rt_err_t utest_tc_init(void)
|
||||
{
|
||||
/* setup file */
|
||||
long temp_file_des;
|
||||
temp_file_des = open(FILE_PATH, O_RDWR | O_CREAT, 0777);
|
||||
LOG_D("New fd=%ld path=%s", temp_file_des, FILE_PATH);
|
||||
if (temp_file_des < 0)
|
||||
return -RT_ERROR;
|
||||
_setup_file_content(temp_file_des);
|
||||
close(temp_file_des);
|
||||
|
||||
lwp = lwp_create(0);
|
||||
if (lwp)
|
||||
lwp_user_space_init(lwp, 1);
|
||||
else
|
||||
return -RT_ENOMEM;
|
||||
return RT_EOK;
|
||||
}
|
||||
|
||||
static rt_err_t utest_tc_cleanup(void)
|
||||
{
|
||||
lwp_ref_dec(lwp);
|
||||
return RT_EOK;
|
||||
}
|
||||
|
||||
static void testcase(void)
|
||||
{
|
||||
UTEST_UNIT_RUN(testcase_main);
|
||||
}
|
||||
UTEST_TC_EXPORT(testcase, "testcases.lwp.mman.mmap_fd.map_fixed_split", utest_tc_init, utest_tc_cleanup, 10);
|
|
@ -0,0 +1,138 @@
|
|||
/*
|
||||
* Copyright (c) 2006-2023, RT-Thread Development Team
|
||||
*
|
||||
* SPDX-License-Identifier: Apache-2.0
|
||||
*
|
||||
* Change Logs:
|
||||
* Date Author Notes
|
||||
* 2023-08-22 Shell test case for aspace_map with varea_expand
|
||||
*/
|
||||
#include "common.h"
|
||||
#include "lwp_user_mm.h"
|
||||
#include "mm_fault.h"
|
||||
#include <mm_aspace.h>
|
||||
|
||||
#include <rtthread.h>
|
||||
|
||||
static long fd = -1;
|
||||
static long pgoffset = 0;
|
||||
static size_t flags = MAP_FIXED | MAP_ANONYMOUS;
|
||||
static size_t prot = PROT_READ | PROT_WRITE;
|
||||
|
||||
static char *ex_vaddr = (char *)0x100000000;
|
||||
static size_t ex_size = 0x5000;
|
||||
static char *private0 = (char *)0x100000000;
|
||||
static char *private1 = (char *)0x100000000 + 0x1000;
|
||||
static char *private2 = (char *)0x100000000 + 0x2000;
|
||||
static char *private3 = (char *)0x100000000 + 0x3000;
|
||||
static char *private4 = (char *)0x100000000 + 0x4000;
|
||||
|
||||
/**
|
||||
* todo: suppoprt prefetch pages, so more than 1 page can install to private at a time
|
||||
* static size_t priv_size = 0x1000;
|
||||
*/
|
||||
|
||||
static size_t former_vsz;
|
||||
static size_t former_vcount;
|
||||
|
||||
static struct rt_lwp *lwp;
|
||||
|
||||
static int _count_vsz(rt_varea_t varea, void *arg)
|
||||
{
|
||||
rt_base_t *pvsz = arg;
|
||||
*pvsz += 1;
|
||||
return 0;
|
||||
}
|
||||
|
||||
static rt_base_t count_vcount(rt_aspace_t aspace)
|
||||
{
|
||||
rt_base_t vcount = 0;
|
||||
rt_aspace_traversal(aspace, _count_vsz, &vcount);
|
||||
return vcount;
|
||||
}
|
||||
|
||||
static void test_mmap_fix_private(void)
|
||||
{
|
||||
char *next_va;
|
||||
struct rt_aspace_fault_msg msg;
|
||||
msg.fault_op = MM_FAULT_OP_WRITE;
|
||||
msg.fault_type = MM_FAULT_TYPE_ACCESS_FAULT;
|
||||
|
||||
/* map new pages at ex_vaddr to anonymous */
|
||||
next_va = ex_vaddr;
|
||||
former_vsz = rt_aspace_count_vsz(lwp->aspace);
|
||||
former_vcount = count_vcount(lwp->aspace);
|
||||
next_va = lwp_mmap2(lwp, next_va, ex_size, prot, flags, fd, pgoffset);
|
||||
uassert_true(next_va == ex_vaddr);
|
||||
utest_int_equal(former_vsz + ex_size, rt_aspace_count_vsz(lwp->aspace));
|
||||
utest_int_equal(former_vcount + 1, count_vcount(lwp->aspace));
|
||||
former_vsz += ex_size;
|
||||
former_vcount += 1;
|
||||
|
||||
/* fix private in the middle */
|
||||
msg.fault_vaddr = private2;
|
||||
utest_int_equal(MM_FAULT_FIXABLE_TRUE, rt_aspace_fault_try_fix(lwp->aspace, &msg));
|
||||
utest_int_equal(former_vsz, rt_aspace_count_vsz(lwp->aspace));
|
||||
utest_int_equal(former_vcount + 2, count_vcount(lwp->aspace));
|
||||
former_vcount += 2;
|
||||
|
||||
/* fix private from left most */
|
||||
msg.fault_vaddr = private0;
|
||||
utest_int_equal(MM_FAULT_FIXABLE_TRUE, rt_aspace_fault_try_fix(lwp->aspace, &msg));
|
||||
utest_int_equal(former_vsz, rt_aspace_count_vsz(lwp->aspace));
|
||||
utest_int_equal(former_vcount + 1, count_vcount(lwp->aspace));
|
||||
former_vcount += 1;
|
||||
|
||||
/* fix private from right most */
|
||||
msg.fault_vaddr = private4;
|
||||
utest_int_equal(MM_FAULT_FIXABLE_TRUE, rt_aspace_fault_try_fix(lwp->aspace, &msg));
|
||||
utest_int_equal(former_vsz, rt_aspace_count_vsz(lwp->aspace));
|
||||
utest_int_equal(former_vcount + 1, count_vcount(lwp->aspace));
|
||||
former_vcount += 1;
|
||||
|
||||
/* fix private from left-middle */
|
||||
msg.fault_vaddr = private1;
|
||||
utest_int_equal(MM_FAULT_FIXABLE_TRUE, rt_aspace_fault_try_fix(lwp->aspace, &msg));
|
||||
utest_int_equal(former_vsz, rt_aspace_count_vsz(lwp->aspace));
|
||||
utest_int_equal(former_vcount - 1, count_vcount(lwp->aspace));
|
||||
former_vcount -= 1;
|
||||
|
||||
/* fix private from right-middle */
|
||||
msg.fault_vaddr = private3;
|
||||
utest_int_equal(MM_FAULT_FIXABLE_TRUE, rt_aspace_fault_try_fix(lwp->aspace, &msg));
|
||||
utest_int_equal(former_vsz, rt_aspace_count_vsz(lwp->aspace));
|
||||
utest_int_equal(former_vcount - 1, count_vcount(lwp->aspace));
|
||||
former_vcount -= 1;
|
||||
|
||||
/* clear mapping */
|
||||
utest_int_equal(RT_EOK, rt_aspace_unmap_range(lwp->aspace, ex_vaddr, ex_size));
|
||||
rt_free(lwp->aspace->private_object);
|
||||
lwp->aspace->private_object = RT_NULL;
|
||||
}
|
||||
|
||||
static void testcase_main(void)
|
||||
{
|
||||
CONSIST_HEAP(test_mmap_fix_private());
|
||||
}
|
||||
|
||||
static rt_err_t utest_tc_init(void)
|
||||
{
|
||||
lwp = lwp_create(0);
|
||||
if (lwp)
|
||||
lwp_user_space_init(lwp, 1);
|
||||
else
|
||||
return -RT_ENOMEM;
|
||||
return RT_EOK;
|
||||
}
|
||||
|
||||
static rt_err_t utest_tc_cleanup(void)
|
||||
{
|
||||
lwp_ref_dec(lwp);
|
||||
return RT_EOK;
|
||||
}
|
||||
|
||||
static void testcase(void)
|
||||
{
|
||||
UTEST_UNIT_RUN(testcase_main);
|
||||
}
|
||||
UTEST_TC_EXPORT(testcase, "testcases.lwp.mman.mmap_anon.fix_private", utest_tc_init, utest_tc_cleanup, 10);
|
|
@ -0,0 +1,117 @@
|
|||
/*
|
||||
* Copyright (c) 2006-2023, RT-Thread Development Team
|
||||
*
|
||||
* SPDX-License-Identifier: Apache-2.0
|
||||
*
|
||||
* Change Logs:
|
||||
* Date Author Notes
|
||||
* 2023-08-17 Shell test case for aspace_map(MAP_FIXED)
|
||||
*/
|
||||
#include "common.h"
|
||||
#include "lwp_user_mm.h"
|
||||
#include "utest_assert.h"
|
||||
#include <mm_aspace.h>
|
||||
|
||||
#include <rtthread.h>
|
||||
|
||||
static struct rt_lwp *lwp;
|
||||
static size_t former_vsz;
|
||||
static size_t former_vcount;
|
||||
|
||||
static void *vaddr = (void *)0x100000000;
|
||||
static size_t ex_size = 0x5000;
|
||||
static char *override_start;
|
||||
static size_t override_size = 0x2000;
|
||||
static long fd = -1;
|
||||
static long pgoffset = 0;
|
||||
static size_t ex_prot = PROT_NONE;
|
||||
static size_t ex_flags = MAP_PRIVATE | MAP_ANONYMOUS;
|
||||
static size_t override_prot = PROT_READ | PROT_WRITE;
|
||||
static size_t override_flags = MAP_PRIVATE | MAP_ANONYMOUS | MAP_FIXED;
|
||||
|
||||
static int _count_vsz(rt_varea_t varea, void *arg)
|
||||
{
|
||||
rt_base_t *pvsz = arg;
|
||||
*pvsz += 1;
|
||||
return 0;
|
||||
}
|
||||
|
||||
static rt_base_t count_vcount(rt_aspace_t aspace)
|
||||
{
|
||||
rt_base_t vcount = 0;
|
||||
rt_aspace_traversal(aspace, _count_vsz, &vcount);
|
||||
return vcount;
|
||||
}
|
||||
|
||||
static char put_data[] = "hello,world";
|
||||
|
||||
static void test_map_fixed(void)
|
||||
{
|
||||
void *effect_override;
|
||||
|
||||
former_vsz = rt_aspace_count_vsz(lwp->aspace);
|
||||
former_vcount = count_vcount(lwp->aspace);
|
||||
|
||||
/* create an existed mapping */
|
||||
vaddr = lwp_mmap2(lwp, vaddr, ex_size, ex_prot, ex_flags, fd, pgoffset);
|
||||
uassert_true((long)vaddr > 0);
|
||||
utest_int_equal(former_vsz + ex_size, rt_aspace_count_vsz(lwp->aspace));
|
||||
utest_int_equal(former_vcount + 1, count_vcount(lwp->aspace));
|
||||
former_vsz += ex_size;
|
||||
former_vcount += 1;
|
||||
|
||||
/* fix private in the middle */
|
||||
override_start = (char *)vaddr + 0x1000;
|
||||
effect_override = lwp_mmap2(lwp, override_start, override_size, override_prot, override_flags, fd, pgoffset);
|
||||
uassert_true(effect_override == override_start);
|
||||
utest_int_equal(former_vsz, rt_aspace_count_vsz(lwp->aspace));
|
||||
utest_int_equal(former_vcount + 2, count_vcount(lwp->aspace));
|
||||
utest_int_equal(
|
||||
lwp_data_put(lwp, effect_override, put_data, sizeof(put_data)),
|
||||
sizeof(put_data)
|
||||
);
|
||||
|
||||
utest_int_equal(RT_EOK, rt_aspace_unmap_range(lwp->aspace, vaddr, ex_size));
|
||||
}
|
||||
|
||||
static void aspace_unmap_tc(void)
|
||||
{
|
||||
test_map_fixed();
|
||||
}
|
||||
|
||||
static rt_size_t total, used, max_used;
|
||||
static rt_size_t totala, useda, max_useda;
|
||||
static rt_ubase_t level;
|
||||
static rt_err_t utest_tc_init(void)
|
||||
{
|
||||
lwp = lwp_create(0);
|
||||
if (lwp)
|
||||
lwp_user_space_init(lwp, 1);
|
||||
else
|
||||
return -RT_ENOMEM;
|
||||
|
||||
/* stats */
|
||||
level = rt_heap_lock();
|
||||
rt_memory_info(&total, &used, &max_used);
|
||||
|
||||
return RT_EOK;
|
||||
}
|
||||
|
||||
static rt_err_t utest_tc_cleanup(void)
|
||||
{
|
||||
lwp_ref_dec(lwp);
|
||||
|
||||
/* check */
|
||||
rt_memory_info(&totala, &useda, &max_useda);
|
||||
rt_heap_unlock(level);
|
||||
utest_int_equal(total, totala);
|
||||
utest_int_less_equal(useda, used);
|
||||
|
||||
return RT_EOK;
|
||||
}
|
||||
|
||||
static void testcase(void)
|
||||
{
|
||||
UTEST_UNIT_RUN(aspace_unmap_tc);
|
||||
}
|
||||
UTEST_TC_EXPORT(testcase, "testcases.lwp.mman.mmap_anon.fix_private", utest_tc_init, utest_tc_cleanup, 10);
|
|
@ -40,9 +40,9 @@ static rt_err_t utest_tc_cleanup(void)
|
|||
|
||||
static void testcase(void)
|
||||
{
|
||||
UTEST_UNIT_RUN(aspace_tc);
|
||||
UTEST_UNIT_RUN(ioremap_tc);
|
||||
UTEST_UNIT_RUN(flag_tc);
|
||||
aspace_tc();
|
||||
ioremap_tc();
|
||||
flag_tc();
|
||||
}
|
||||
UTEST_TC_EXPORT(testcase, "testcases.mm.api_tc", utest_tc_init, utest_tc_cleanup, 20);
|
||||
|
||||
|
|
|
@ -13,6 +13,7 @@
|
|||
#include "lwp_arch.h"
|
||||
#include "lwp_user_mm.h"
|
||||
#include "mm_aspace.h"
|
||||
#include "mm_flag.h"
|
||||
#include "mmu.h"
|
||||
|
||||
/**
|
||||
|
@ -62,7 +63,7 @@ static void test_user_map_varea(void)
|
|||
uassert_true(varea->attr == (MMU_MAP_U_RWCB));
|
||||
uassert_true(varea->size == buf_sz);
|
||||
uassert_true(varea->aspace == lwp->aspace);
|
||||
uassert_true(varea->flag == 0);
|
||||
uassert_true(varea->flag == MMF_MAP_PRIVATE);
|
||||
uassert_true(varea->start != 0);
|
||||
uassert_true(varea->start >= (void *)USER_VADDR_START && varea->start < (void *)USER_VADDR_TOP);
|
||||
|
||||
|
@ -86,7 +87,7 @@ static void test_user_map_varea_ext(void)
|
|||
uassert_true(varea->attr == (MMU_MAP_U_RW));
|
||||
uassert_true(varea->size == buf_sz);
|
||||
uassert_true(varea->aspace == lwp->aspace);
|
||||
uassert_true(varea->flag == 0);
|
||||
uassert_true(varea->flag == MMF_MAP_PRIVATE);
|
||||
uassert_true(varea->start != 0);
|
||||
uassert_true(varea->start >= (void *)USER_VADDR_START && varea->start < (void *)USER_VADDR_TOP);
|
||||
|
||||
|
|
|
@ -0,0 +1,43 @@
|
|||
/*
|
||||
* Copyright (c) 2006-2023, RT-Thread Development Team
|
||||
*
|
||||
* SPDX-License-Identifier: Apache-2.0
|
||||
*
|
||||
* Change Logs:
|
||||
* Date Author Notes
|
||||
* 2022-12-14 WangXiaoyao the first version
|
||||
* 2023-03-20 WangXiaoyao Format & add more testcases for API under mm_aspace.h
|
||||
*/
|
||||
|
||||
#include "common.h"
|
||||
|
||||
void ioremap_tc(void)
|
||||
{
|
||||
const size_t bufsz = 0x1000;
|
||||
void *paddr = (void *)rt_pages_alloc(rt_page_bits(bufsz)) + PV_OFFSET;
|
||||
int *vaddr;
|
||||
vaddr = rt_ioremap_cached(paddr, bufsz);
|
||||
if (vaddr)
|
||||
{
|
||||
TC_ASSERT(*vaddr == *(int *)(paddr - PV_OFFSET));
|
||||
|
||||
rt_iounmap(vaddr);
|
||||
rt_pages_free(paddr - PV_OFFSET, 0);
|
||||
}
|
||||
}
|
||||
|
||||
static rt_err_t utest_tc_init(void)
|
||||
{
|
||||
return RT_EOK;
|
||||
}
|
||||
|
||||
static rt_err_t utest_tc_cleanup(void)
|
||||
{
|
||||
return RT_EOK;
|
||||
}
|
||||
|
||||
static void test_main(void)
|
||||
{
|
||||
CONSIST_HEAP(ioremap_tc());
|
||||
}
|
||||
UTEST_TC_EXPORT(test_main, "testcases.mm.ioremap", utest_tc_init, utest_tc_cleanup, 20);
|
|
@ -11,6 +11,8 @@
|
|||
#define __TEST_ASPACE_API_H__
|
||||
|
||||
#include "common.h"
|
||||
#include "mm_aspace.h"
|
||||
#include "mm_flag.h"
|
||||
#include "test_aspace_api_internal.h"
|
||||
#include "test_synchronization.h"
|
||||
|
||||
|
@ -122,11 +124,11 @@ static void aspace_map_tc(void)
|
|||
* in _rt_aspace_map:_varea_install
|
||||
* not covering an existed varea if a named mapping is mandatory
|
||||
*/
|
||||
vaddr = (void *)((rt_ubase_t)aspace_map_tc & ~ARCH_PAGE_MASK);
|
||||
CONSIST_HEAP(
|
||||
uassert_true(
|
||||
rt_aspace_map(&rt_kernel_space, &vaddr, 0x1000, MMU_MAP_K_RWCB, MMF_MAP_FIXED, &rt_mm_dummy_mapper, 0)));
|
||||
uassert_true(vaddr == RT_NULL);
|
||||
// vaddr = (void *)((rt_ubase_t)aspace_map_tc & ~ARCH_PAGE_MASK);
|
||||
// CONSIST_HEAP(
|
||||
// uassert_true(
|
||||
// rt_aspace_map(&rt_kernel_space, &vaddr, 0x1000, MMU_MAP_K_RWCB, 0, &rt_mm_dummy_mapper, 0)));
|
||||
// uassert_true(vaddr == RT_NULL);
|
||||
|
||||
/**
|
||||
* @brief Requirement:
|
||||
|
|
|
@ -36,8 +36,8 @@ static void test_find_free(void)
|
|||
uassert_true(!rt_aspace_map(&rt_kernel_space, &vaddr, 0x1000, MMU_MAP_K_RWCB, MMF_MAP_FIXED, &rt_mm_dummy_mapper, 0));
|
||||
uassert_true(vaddr == top_page);
|
||||
/* type 1, on failure */
|
||||
uassert_true(rt_aspace_map(&rt_kernel_space, &vaddr, 0x1000, MMU_MAP_K_RWCB, MMF_MAP_FIXED, &rt_mm_dummy_mapper, 0));
|
||||
uassert_true(!vaddr);
|
||||
// uassert_true(rt_aspace_map(&rt_kernel_space, &vaddr, 0x1000, MMU_MAP_K_RWCB, MMF_MAP_FIXED, &rt_mm_dummy_mapper, 0));
|
||||
// uassert_true(!vaddr);
|
||||
|
||||
/* type 2, on success */
|
||||
vaddr = top_page;
|
||||
|
|
|
@ -35,7 +35,7 @@ void test_bst_adpt(void)
|
|||
uassert_true(!!lwp);
|
||||
uassert_true(!lwp_user_space_init(lwp, 0));
|
||||
aspace = lwp->aspace;
|
||||
mem_obj = &lwp->lwp_obj->mem_obj;
|
||||
mem_obj = &rt_mm_dummy_mapper;
|
||||
uassert_true(!!aspace);
|
||||
uassert_true(!!mem_obj);
|
||||
|
||||
|
@ -46,9 +46,9 @@ void test_bst_adpt(void)
|
|||
!rt_aspace_map(aspace, &target_va, map_size, MMU_MAP_K_RWCB, flags, mem_obj, 0));
|
||||
/* 2 wrappers */
|
||||
uassert_true(
|
||||
!rt_aspace_map(aspace, &prev_va, map_size - 1, MMU_MAP_K_RWCB, flags, mem_obj, 0));
|
||||
!rt_aspace_map(aspace, &prev_va, map_size, MMU_MAP_K_RWCB, flags, mem_obj, 0));
|
||||
uassert_true(
|
||||
!rt_aspace_map(aspace, &next_va, map_size - 1, MMU_MAP_K_RWCB, flags, mem_obj, 0));
|
||||
!rt_aspace_map(aspace, &next_va, map_size, MMU_MAP_K_RWCB, flags, mem_obj, 0));
|
||||
|
||||
/* _aspace_bst_search */
|
||||
uassert_true(!!_aspace_bst_search(aspace, target_va));
|
||||
|
|
|
@ -13,9 +13,10 @@ bsp_path = Dir('#').abspath
|
|||
if not os.path.exists(bsp_path + "/link.lds"):
|
||||
Env['LINKFLAGS'] = Env['LINKFLAGS'].replace('link.lds', cwd + "/link.lds")
|
||||
# fix the linker with crtx.o
|
||||
Env['LINKFLAGS'] += ' -nostartfiles'
|
||||
Preprocessing("link.lds.S", ".lds", CPPPATH=[bsp_path])
|
||||
|
||||
Env['LINKFLAGS'] += ' -nostartfiles'
|
||||
|
||||
# add common code files
|
||||
group = group + SConscript(os.path.join('common', 'SConscript'))
|
||||
|
||||
|
|
Loading…
Reference in New Issue