[libcpu/arm64] dealing with mem region out of kernel space (#8847)

* [ofw] dealing with mem region out of kernel space

- Fix parameter checking in _out_of_range() that NULL is excluded for
  fixed mapping
- Split page install with a deferred stage to avoid mapping over
  ARCH_EARLY_MAP_SIZE

Signed-off-by: Shell <smokewood@qq.com>

* fixup: restrict vstart for using of RT_NULL

---------

Signed-off-by: Shell <smokewood@qq.com>
This commit is contained in:
Shell 2024-05-08 09:23:31 +08:00 committed by GitHub
parent 6977cf9101
commit 9ba6cec663
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
5 changed files with 73 additions and 21 deletions

View File

@ -643,11 +643,11 @@ static rt_varea_t _varea_create(void *start, rt_size_t size)
#define _IS_OVERFLOW(start, length) ((length) > (0ul - (uintptr_t)(start)))
#define _IS_OVERSIZE(start, length, limit_s, limit_sz) (((length) + (rt_size_t)((char *)(start) - (char *)(limit_start))) > (limit_size))
static inline int _not_in_range(void *start, rt_size_t length,
static inline int _not_in_range(rt_size_t flags, void *start, rt_size_t length,
void *limit_start, rt_size_t limit_size)
{
/* assuming (base + length) will not overflow except (0) */
int rc = start != RT_NULL
int rc = (flags & MMF_MAP_FIXED || start != RT_NULL)
? (_IS_OVERFLOW(start, length) || start < limit_start ||
_IS_OVERSIZE(start, length, limit_start, limit_size))
: length > limit_size;
@ -684,7 +684,7 @@ int rt_aspace_map(rt_aspace_t aspace, void **addr, rt_size_t length,
LOG_I("%s(%p, %p, %lx, %lx, %lx, %p, %lx): Invalid input",
__func__, aspace, addr, length, attr, flags, mem_obj, offset);
}
else if (_not_in_range(*addr, length, aspace->start, aspace->size))
else if (_not_in_range(flags, *addr, length, aspace->start, aspace->size))
{
err = -RT_EINVAL;
LOG_I("%s(addr:%p, len:%lx): out of range", __func__, *addr, length);
@ -716,7 +716,7 @@ int rt_aspace_map_static(rt_aspace_t aspace, rt_varea_t varea, void **addr,
int err;
if (!aspace || !varea || !addr || !mem_obj || length == 0 ||
_not_in_range(*addr, length, aspace->start, aspace->size))
_not_in_range(flags, *addr, length, aspace->start, aspace->size))
{
err = -RT_EINVAL;
LOG_W("%s: Invalid input", __func__);
@ -766,9 +766,9 @@ int _mm_aspace_map_phy(rt_aspace_t aspace, rt_varea_t varea,
LOG_W("%s: not aligned", __func__);
err = -RT_EINVAL;
}
else if (_not_in_range(hint->limit_start, hint->limit_range_size, aspace->start,
else if (_not_in_range(hint->flags, hint->limit_start, hint->limit_range_size, aspace->start,
aspace->size) ||
_not_in_range(hint->prefer, hint->map_size, aspace->start,
_not_in_range(hint->flags, hint->prefer, hint->map_size, aspace->start,
aspace->size))
{
LOG_W("%s: not in range", __func__);
@ -892,7 +892,7 @@ int rt_aspace_unmap(rt_aspace_t aspace, void *addr)
LOG_I("%s: Invalid input", __func__);
error = -RT_EINVAL;
}
else if (_not_in_range(addr, 1, aspace->start, aspace->size))
else if (_not_in_range(MMF_MAP_FIXED, addr, 1, aspace->start, aspace->size))
{
LOG_I("%s: %lx not in range of aspace[%lx:%lx]", __func__, addr,
aspace->start, (char *)aspace->start + aspace->size);
@ -1041,7 +1041,7 @@ int rt_aspace_unmap_range(rt_aspace_t aspace, void *addr, size_t length)
LOG_I("%s: Invalid input", __func__);
error = -RT_EINVAL;
}
else if (_not_in_range(addr, length, aspace->start, aspace->size))
else if (_not_in_range(MMF_MAP_FIXED, addr, length, aspace->start, aspace->size))
{
LOG_I("%s: %lx not in range of aspace[%lx:%lx]", __func__, addr,
aspace->start, (char *)aspace->start + aspace->size);
@ -1069,7 +1069,7 @@ int rt_aspace_unmap_range(rt_aspace_t aspace, void *addr, size_t length)
}
void *rt_aspace_mremap_range(rt_aspace_t aspace, void *old_address, size_t old_size,
size_t new_size, int flags, void *new_address)
size_t new_size, int flags, void *new_address)
{
void *ret = RT_NULL;
@ -1077,7 +1077,8 @@ void *rt_aspace_mremap_range(rt_aspace_t aspace, void *old_address, size_t old_s
{
LOG_I("%s: Invalid input", __func__);
}
else if (_not_in_range(old_address, old_size, aspace->start, aspace->size))
else if (_not_in_range(MMF_MAP_FIXED, old_address, old_size,
aspace->start, aspace->size))
{
LOG_I("%s: %lx not in range of aspace[%lx:%lx]", __func__, old_address,
aspace->start, (char *)aspace->start + aspace->size);
@ -1282,8 +1283,8 @@ int rt_aspace_load_page(rt_aspace_t aspace, void *addr, rt_size_t npage)
err = -RT_ENOENT;
}
else if ((char *)addr >= end || (rt_size_t)addr & ARCH_PAGE_MASK ||
_not_in_range(addr, npage << ARCH_PAGE_SHIFT, varea->start,
varea->size))
_not_in_range(MMF_MAP_FIXED, addr, npage << ARCH_PAGE_SHIFT,
varea->start, varea->size))
{
LOG_W("%s: Unaligned parameter or out of range", __func__);
err = -RT_EINVAL;
@ -1310,7 +1311,8 @@ int rt_varea_map_page(rt_varea_t varea, void *vaddr, void *page)
LOG_W("%s: page is not in kernel space", __func__);
err = -RT_ERROR;
}
else if (_not_in_range(vaddr, ARCH_PAGE_SIZE, varea->start, varea->size))
else if (_not_in_range(MMF_MAP_FIXED, vaddr, ARCH_PAGE_SIZE,
varea->start, varea->size))
{
LOG_W("%s(%p,%lx): not in range of varea(%p,%lx)", __func__,
vaddr, ARCH_PAGE_SIZE, varea->start, varea->size);
@ -1349,7 +1351,8 @@ int rt_varea_map_range(rt_varea_t varea, void *vaddr, void *paddr, rt_size_t len
LOG_W("%s(%p,%p,%p,%lx): invalid input", __func__, varea, vaddr, paddr, length);
err = -RT_EINVAL;
}
else if (_not_in_range(vaddr, length, varea->start, varea->size))
else if (_not_in_range(MMF_MAP_FIXED, vaddr, length,
varea->start, varea->size))
{
LOG_W("%s(%p,%lx): not in range of varea(%p,%lx)", __func__,
vaddr, length, varea->start, varea->size);
@ -1382,7 +1385,8 @@ int rt_varea_unmap_range(rt_varea_t varea, void *vaddr, rt_size_t length)
LOG_W("%s(%p,%p,%lx): invalid input", __func__, varea, vaddr, length);
err = -RT_EINVAL;
}
else if (_not_in_range(vaddr, length, varea->start, varea->size))
else if (_not_in_range(MMF_MAP_FIXED, vaddr, length,
varea->start, varea->size))
{
LOG_W("%s(%p,%lx): not in range of varea(%p,%lx)", __func__,
vaddr, length, varea->start, varea->size);

View File

@ -51,8 +51,9 @@
#define MMU_TBL_PAGE_4k_LEVEL 3
#define MMU_TBL_LEVEL_NR 4
/* restrict virtual address on usage of RT_NULL */
#ifndef KERNEL_VADDR_START
#define KERNEL_VADDR_START ARCH_TEXT_OFFSET
#define KERNEL_VADDR_START 0x1000
#endif
volatile unsigned long MMUTable[512] __attribute__((aligned(4 * 1024)));

View File

@ -89,6 +89,10 @@ struct mem_desc
#define ARCH_MAP_FAILED ((void *)0x1ffffffffffff)
#define ARCH_EARLY_MAP_SIZE (0x40000000)
/* this is big enough for even 16TB first-time mapping */
#define ARCH_PAGE_INIT_THRESHOLD (0x10000000)
#ifndef __ASSEMBLY__
struct rt_aspace;

View File

@ -284,6 +284,15 @@ void rt_hw_common_setup(void)
.end = (rt_size_t)kernel_end,
}, RT_TRUE);
#ifndef RT_USING_SMART
rt_fdt_commit_memregion_early(&(rt_region_t)
{
.name = "null",
.start = (rt_size_t)RT_NULL,
.end = (rt_size_t)RT_NULL + ARCH_PAGE_SIZE,
}, RT_TRUE);
#endif /* !RT_USING_SMART */
if (rt_fdt_prefetch(fdt_ptr))
{
/* Platform cannot be initialized */
@ -332,7 +341,10 @@ void rt_hw_common_setup(void)
if (!rt_fdt_commit_memregion_request(&mem_region, &mem_region_nr, RT_FALSE))
{
rt_ubase_t best_offset = ~0UL;
rt_region_t *usable_mem_region = mem_region, *page_region = RT_NULL, init_page_region = { 0 };
rt_region_t *usable_mem_region = mem_region, *page_region = RT_NULL;
rt_region_t init_page_region = { 0 };
rt_region_t defer_hi = { 0 };
rt_err_t error;
LOG_I("Usable memory:");
@ -369,6 +381,16 @@ void rt_hw_common_setup(void)
RT_ASSERT(page_region != RT_NULL);
/* don't map more than ARCH_EARLY_MAP_SIZE */
if (page_region->end - page_region->start > ARCH_PAGE_INIT_THRESHOLD)
{
defer_hi.name = page_region->name;
defer_hi.end = page_region->end;
defer_hi.start = RT_ALIGN_DOWN(page_region->start + ARCH_PAGE_INIT_THRESHOLD,
ARCH_SECTION_SIZE);
page_region->end = defer_hi.start;
}
init_page_region.start = page_region->start - PV_OFFSET;
init_page_region.end = page_region->end - PV_OFFSET;
@ -389,13 +411,34 @@ void rt_hw_common_setup(void)
mem_region = usable_mem_region;
if (defer_hi.start)
{
/* to virt address */
init_page_region.start = defer_hi.start - PV_OFFSET;
init_page_region.end = defer_hi.end - PV_OFFSET;
error = rt_page_install(init_page_region);
if (error)
{
LOG_W("Deferred page installation FAILED:");
LOG_W(" %-*.s [%p, %p]", RT_NAME_MAX,
defer_hi.name, defer_hi.start, defer_hi.end);
}
else
{
LOG_I("Deferred page installation SUCCEED:");
LOG_I(" %-*.s [%p, %p]", RT_NAME_MAX,
defer_hi.name, defer_hi.start, defer_hi.end);
}
}
for (int i = 0; i < mem_region_nr; ++i, ++mem_region)
{
if (mem_region != page_region && mem_region->name)
{
mem_region->start -= PV_OFFSET;
mem_region->end -= PV_OFFSET;
rt_page_install(*mem_region);
init_page_region.start = mem_region->start - PV_OFFSET;
init_page_region.end = mem_region->end - PV_OFFSET;
rt_page_install(init_page_region);
}
}
}

View File

@ -278,7 +278,7 @@ init_mmu_early:
get_phy x1, .early_tbl1_page
get_pvoff x2 x3
ldr x2, =0x40000000 /* Map 1G memory for kernel space */
ldr x2, =ARCH_EARLY_MAP_SIZE /* Map 1G memory for kernel space */
bl rt_hw_mem_setup_early
b enable_mmu_early