From 9ba6cec663428bad57204e2e3373b48c70efe61f Mon Sep 17 00:00:00 2001 From: Shell Date: Wed, 8 May 2024 09:23:31 +0800 Subject: [PATCH] [libcpu/arm64] dealing with mem region out of kernel space (#8847) * [ofw] dealing with mem region out of kernel space - Fix parameter checking in _out_of_range() that NULL is excluded for fixed mapping - Split page install with a deferred stage to avoid mapping over ARCH_EARLY_MAP_SIZE Signed-off-by: Shell * fixup: restrict vstart for using of RT_NULL --------- Signed-off-by: Shell --- components/mm/mm_aspace.c | 34 ++++++++++-------- libcpu/aarch64/common/mmu.c | 3 +- libcpu/aarch64/common/mmu.h | 4 +++ libcpu/aarch64/common/setup.c | 51 ++++++++++++++++++++++++--- libcpu/aarch64/cortex-a/entry_point.S | 2 +- 5 files changed, 73 insertions(+), 21 deletions(-) diff --git a/components/mm/mm_aspace.c b/components/mm/mm_aspace.c index 8370bea8d7..acea6cc080 100644 --- a/components/mm/mm_aspace.c +++ b/components/mm/mm_aspace.c @@ -643,11 +643,11 @@ static rt_varea_t _varea_create(void *start, rt_size_t size) #define _IS_OVERFLOW(start, length) ((length) > (0ul - (uintptr_t)(start))) #define _IS_OVERSIZE(start, length, limit_s, limit_sz) (((length) + (rt_size_t)((char *)(start) - (char *)(limit_start))) > (limit_size)) -static inline int _not_in_range(void *start, rt_size_t length, +static inline int _not_in_range(rt_size_t flags, void *start, rt_size_t length, void *limit_start, rt_size_t limit_size) { /* assuming (base + length) will not overflow except (0) */ - int rc = start != RT_NULL + int rc = (flags & MMF_MAP_FIXED || start != RT_NULL) ? (_IS_OVERFLOW(start, length) || start < limit_start || _IS_OVERSIZE(start, length, limit_start, limit_size)) : length > limit_size; @@ -684,7 +684,7 @@ int rt_aspace_map(rt_aspace_t aspace, void **addr, rt_size_t length, LOG_I("%s(%p, %p, %lx, %lx, %lx, %p, %lx): Invalid input", __func__, aspace, addr, length, attr, flags, mem_obj, offset); } - else if (_not_in_range(*addr, length, aspace->start, aspace->size)) + else if (_not_in_range(flags, *addr, length, aspace->start, aspace->size)) { err = -RT_EINVAL; LOG_I("%s(addr:%p, len:%lx): out of range", __func__, *addr, length); @@ -716,7 +716,7 @@ int rt_aspace_map_static(rt_aspace_t aspace, rt_varea_t varea, void **addr, int err; if (!aspace || !varea || !addr || !mem_obj || length == 0 || - _not_in_range(*addr, length, aspace->start, aspace->size)) + _not_in_range(flags, *addr, length, aspace->start, aspace->size)) { err = -RT_EINVAL; LOG_W("%s: Invalid input", __func__); @@ -766,9 +766,9 @@ int _mm_aspace_map_phy(rt_aspace_t aspace, rt_varea_t varea, LOG_W("%s: not aligned", __func__); err = -RT_EINVAL; } - else if (_not_in_range(hint->limit_start, hint->limit_range_size, aspace->start, + else if (_not_in_range(hint->flags, hint->limit_start, hint->limit_range_size, aspace->start, aspace->size) || - _not_in_range(hint->prefer, hint->map_size, aspace->start, + _not_in_range(hint->flags, hint->prefer, hint->map_size, aspace->start, aspace->size)) { LOG_W("%s: not in range", __func__); @@ -892,7 +892,7 @@ int rt_aspace_unmap(rt_aspace_t aspace, void *addr) LOG_I("%s: Invalid input", __func__); error = -RT_EINVAL; } - else if (_not_in_range(addr, 1, aspace->start, aspace->size)) + else if (_not_in_range(MMF_MAP_FIXED, addr, 1, aspace->start, aspace->size)) { LOG_I("%s: %lx not in range of aspace[%lx:%lx]", __func__, addr, aspace->start, (char *)aspace->start + aspace->size); @@ -1041,7 +1041,7 @@ int rt_aspace_unmap_range(rt_aspace_t aspace, void *addr, size_t length) LOG_I("%s: Invalid input", __func__); error = -RT_EINVAL; } - else if (_not_in_range(addr, length, aspace->start, aspace->size)) + else if (_not_in_range(MMF_MAP_FIXED, addr, length, aspace->start, aspace->size)) { LOG_I("%s: %lx not in range of aspace[%lx:%lx]", __func__, addr, aspace->start, (char *)aspace->start + aspace->size); @@ -1069,7 +1069,7 @@ int rt_aspace_unmap_range(rt_aspace_t aspace, void *addr, size_t length) } void *rt_aspace_mremap_range(rt_aspace_t aspace, void *old_address, size_t old_size, - size_t new_size, int flags, void *new_address) + size_t new_size, int flags, void *new_address) { void *ret = RT_NULL; @@ -1077,7 +1077,8 @@ void *rt_aspace_mremap_range(rt_aspace_t aspace, void *old_address, size_t old_s { LOG_I("%s: Invalid input", __func__); } - else if (_not_in_range(old_address, old_size, aspace->start, aspace->size)) + else if (_not_in_range(MMF_MAP_FIXED, old_address, old_size, + aspace->start, aspace->size)) { LOG_I("%s: %lx not in range of aspace[%lx:%lx]", __func__, old_address, aspace->start, (char *)aspace->start + aspace->size); @@ -1282,8 +1283,8 @@ int rt_aspace_load_page(rt_aspace_t aspace, void *addr, rt_size_t npage) err = -RT_ENOENT; } else if ((char *)addr >= end || (rt_size_t)addr & ARCH_PAGE_MASK || - _not_in_range(addr, npage << ARCH_PAGE_SHIFT, varea->start, - varea->size)) + _not_in_range(MMF_MAP_FIXED, addr, npage << ARCH_PAGE_SHIFT, + varea->start, varea->size)) { LOG_W("%s: Unaligned parameter or out of range", __func__); err = -RT_EINVAL; @@ -1310,7 +1311,8 @@ int rt_varea_map_page(rt_varea_t varea, void *vaddr, void *page) LOG_W("%s: page is not in kernel space", __func__); err = -RT_ERROR; } - else if (_not_in_range(vaddr, ARCH_PAGE_SIZE, varea->start, varea->size)) + else if (_not_in_range(MMF_MAP_FIXED, vaddr, ARCH_PAGE_SIZE, + varea->start, varea->size)) { LOG_W("%s(%p,%lx): not in range of varea(%p,%lx)", __func__, vaddr, ARCH_PAGE_SIZE, varea->start, varea->size); @@ -1349,7 +1351,8 @@ int rt_varea_map_range(rt_varea_t varea, void *vaddr, void *paddr, rt_size_t len LOG_W("%s(%p,%p,%p,%lx): invalid input", __func__, varea, vaddr, paddr, length); err = -RT_EINVAL; } - else if (_not_in_range(vaddr, length, varea->start, varea->size)) + else if (_not_in_range(MMF_MAP_FIXED, vaddr, length, + varea->start, varea->size)) { LOG_W("%s(%p,%lx): not in range of varea(%p,%lx)", __func__, vaddr, length, varea->start, varea->size); @@ -1382,7 +1385,8 @@ int rt_varea_unmap_range(rt_varea_t varea, void *vaddr, rt_size_t length) LOG_W("%s(%p,%p,%lx): invalid input", __func__, varea, vaddr, length); err = -RT_EINVAL; } - else if (_not_in_range(vaddr, length, varea->start, varea->size)) + else if (_not_in_range(MMF_MAP_FIXED, vaddr, length, + varea->start, varea->size)) { LOG_W("%s(%p,%lx): not in range of varea(%p,%lx)", __func__, vaddr, length, varea->start, varea->size); diff --git a/libcpu/aarch64/common/mmu.c b/libcpu/aarch64/common/mmu.c index 9583b9d207..106705c677 100644 --- a/libcpu/aarch64/common/mmu.c +++ b/libcpu/aarch64/common/mmu.c @@ -51,8 +51,9 @@ #define MMU_TBL_PAGE_4k_LEVEL 3 #define MMU_TBL_LEVEL_NR 4 +/* restrict virtual address on usage of RT_NULL */ #ifndef KERNEL_VADDR_START -#define KERNEL_VADDR_START ARCH_TEXT_OFFSET +#define KERNEL_VADDR_START 0x1000 #endif volatile unsigned long MMUTable[512] __attribute__((aligned(4 * 1024))); diff --git a/libcpu/aarch64/common/mmu.h b/libcpu/aarch64/common/mmu.h index f780c5eb97..e2599d2d30 100644 --- a/libcpu/aarch64/common/mmu.h +++ b/libcpu/aarch64/common/mmu.h @@ -89,6 +89,10 @@ struct mem_desc #define ARCH_MAP_FAILED ((void *)0x1ffffffffffff) +#define ARCH_EARLY_MAP_SIZE (0x40000000) +/* this is big enough for even 16TB first-time mapping */ +#define ARCH_PAGE_INIT_THRESHOLD (0x10000000) + #ifndef __ASSEMBLY__ struct rt_aspace; diff --git a/libcpu/aarch64/common/setup.c b/libcpu/aarch64/common/setup.c index 5afcebfe8f..27c647bc6e 100644 --- a/libcpu/aarch64/common/setup.c +++ b/libcpu/aarch64/common/setup.c @@ -284,6 +284,15 @@ void rt_hw_common_setup(void) .end = (rt_size_t)kernel_end, }, RT_TRUE); +#ifndef RT_USING_SMART + rt_fdt_commit_memregion_early(&(rt_region_t) + { + .name = "null", + .start = (rt_size_t)RT_NULL, + .end = (rt_size_t)RT_NULL + ARCH_PAGE_SIZE, + }, RT_TRUE); +#endif /* !RT_USING_SMART */ + if (rt_fdt_prefetch(fdt_ptr)) { /* Platform cannot be initialized */ @@ -332,7 +341,10 @@ void rt_hw_common_setup(void) if (!rt_fdt_commit_memregion_request(&mem_region, &mem_region_nr, RT_FALSE)) { rt_ubase_t best_offset = ~0UL; - rt_region_t *usable_mem_region = mem_region, *page_region = RT_NULL, init_page_region = { 0 }; + rt_region_t *usable_mem_region = mem_region, *page_region = RT_NULL; + rt_region_t init_page_region = { 0 }; + rt_region_t defer_hi = { 0 }; + rt_err_t error; LOG_I("Usable memory:"); @@ -369,6 +381,16 @@ void rt_hw_common_setup(void) RT_ASSERT(page_region != RT_NULL); + /* don't map more than ARCH_EARLY_MAP_SIZE */ + if (page_region->end - page_region->start > ARCH_PAGE_INIT_THRESHOLD) + { + defer_hi.name = page_region->name; + defer_hi.end = page_region->end; + defer_hi.start = RT_ALIGN_DOWN(page_region->start + ARCH_PAGE_INIT_THRESHOLD, + ARCH_SECTION_SIZE); + page_region->end = defer_hi.start; + } + init_page_region.start = page_region->start - PV_OFFSET; init_page_region.end = page_region->end - PV_OFFSET; @@ -389,13 +411,34 @@ void rt_hw_common_setup(void) mem_region = usable_mem_region; + if (defer_hi.start) + { + /* to virt address */ + init_page_region.start = defer_hi.start - PV_OFFSET; + init_page_region.end = defer_hi.end - PV_OFFSET; + error = rt_page_install(init_page_region); + + if (error) + { + LOG_W("Deferred page installation FAILED:"); + LOG_W(" %-*.s [%p, %p]", RT_NAME_MAX, + defer_hi.name, defer_hi.start, defer_hi.end); + } + else + { + LOG_I("Deferred page installation SUCCEED:"); + LOG_I(" %-*.s [%p, %p]", RT_NAME_MAX, + defer_hi.name, defer_hi.start, defer_hi.end); + } + } + for (int i = 0; i < mem_region_nr; ++i, ++mem_region) { if (mem_region != page_region && mem_region->name) { - mem_region->start -= PV_OFFSET; - mem_region->end -= PV_OFFSET; - rt_page_install(*mem_region); + init_page_region.start = mem_region->start - PV_OFFSET; + init_page_region.end = mem_region->end - PV_OFFSET; + rt_page_install(init_page_region); } } } diff --git a/libcpu/aarch64/cortex-a/entry_point.S b/libcpu/aarch64/cortex-a/entry_point.S index b93978ea6e..361c9d4f67 100644 --- a/libcpu/aarch64/cortex-a/entry_point.S +++ b/libcpu/aarch64/cortex-a/entry_point.S @@ -278,7 +278,7 @@ init_mmu_early: get_phy x1, .early_tbl1_page get_pvoff x2 x3 - ldr x2, =0x40000000 /* Map 1G memory for kernel space */ + ldr x2, =ARCH_EARLY_MAP_SIZE /* Map 1G memory for kernel space */ bl rt_hw_mem_setup_early b enable_mmu_early