[libcpu][aarch64]memory setup using memblock (#9092)
* memory setup using memblock * map pages later
This commit is contained in:
parent
ac2664438d
commit
b785ef9ed7
|
@ -21,12 +21,5 @@ extern size_t MMUTable[];
|
|||
|
||||
void rt_hw_board_init(void)
|
||||
{
|
||||
rt_fdt_commit_memregion_early(&(rt_region_t)
|
||||
{
|
||||
.name = "memheap",
|
||||
.start = (rt_size_t)rt_kmem_v2p(HEAP_BEGIN),
|
||||
.end = (rt_size_t)rt_kmem_v2p(HEAP_END),
|
||||
}, RT_TRUE);
|
||||
|
||||
rt_hw_common_setup();
|
||||
}
|
||||
|
|
|
@ -21,10 +21,10 @@
|
|||
#include <msh.h>
|
||||
#endif
|
||||
#include <ioremap.h>
|
||||
#include <mm_memblock.h>
|
||||
|
||||
#ifdef RT_USING_OFW
|
||||
#define bootargs_select rt_ofw_bootargs_select
|
||||
#define memregion_request rt_fdt_commit_memregion_request
|
||||
#else
|
||||
#error Platform have not kernel parameters select interfaces!
|
||||
#endif
|
||||
|
@ -41,23 +41,16 @@ static int rootfs_mnt_init(void)
|
|||
if (!dev || !fstype)
|
||||
{
|
||||
const char *name = "initrd";
|
||||
rt_size_t mem_region_nr;
|
||||
rt_region_t *mem_region;
|
||||
rt_uint64_t initrd_start = 0, initrd_end = 0;
|
||||
struct rt_mmblk_reg *iter = RT_NULL;
|
||||
|
||||
if (!memregion_request(&mem_region, &mem_region_nr, RT_TRUE))
|
||||
rt_slist_for_each_entry(iter, &(rt_memblock_get_reserved()->reg_list), node)
|
||||
{
|
||||
while (mem_region_nr-- > 0)
|
||||
if (rt_strcmp(iter->memreg.name, name) == 0)
|
||||
{
|
||||
if (mem_region->name == name || !rt_strcmp(mem_region->name, name))
|
||||
{
|
||||
initrd_start = mem_region->start;
|
||||
initrd_end = mem_region->end;
|
||||
|
||||
break;
|
||||
}
|
||||
|
||||
mem_region++;
|
||||
initrd_start = iter->memreg.start;
|
||||
initrd_end = iter->memreg.end;
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -3,6 +3,7 @@ menuconfig RT_USING_OFW
|
|||
select RT_USING_ADT
|
||||
select RT_USING_ADT_REF
|
||||
select RT_USING_ADT_BITMAP
|
||||
select RT_USING_MEMBLOCK
|
||||
depends on RT_USING_DM
|
||||
default n
|
||||
|
||||
|
|
|
@ -15,6 +15,8 @@
|
|||
#include <drivers/ofw_raw.h>
|
||||
#include <drivers/core/dm.h>
|
||||
|
||||
#include <mm_memblock.h>
|
||||
|
||||
#define DBG_TAG "rtdm.ofw"
|
||||
#define DBG_LVL DBG_INFO
|
||||
#include <rtdbg.h>
|
||||
|
@ -29,9 +31,6 @@ RT_OFW_SYMBOL_TYPE_RANGE(earlycon, struct rt_fdt_earlycon_id, _earlycon_start =
|
|||
#define ARCH_INIT_MEMREGION_NR 128
|
||||
#endif
|
||||
|
||||
static rt_region_t _memregion[ARCH_INIT_MEMREGION_NR] rt_section(".bss.noclean.memregion");
|
||||
static int _memregion_front_idx = 0, _memregion_last_idx = RT_ARRAY_SIZE(_memregion) - 1;
|
||||
|
||||
static void *_fdt = RT_NULL;
|
||||
static rt_phandle _phandle_min;
|
||||
static rt_phandle _phandle_max;
|
||||
|
@ -140,71 +139,6 @@ rt_bool_t rt_fdt_device_is_available(void *fdt, int nodeoffset)
|
|||
return ret;
|
||||
}
|
||||
|
||||
rt_err_t rt_fdt_commit_memregion_early(rt_region_t *region, rt_bool_t is_reserved)
|
||||
{
|
||||
rt_err_t err = RT_EOK;
|
||||
|
||||
if (region && region->name)
|
||||
{
|
||||
if (_memregion_front_idx < _memregion_last_idx)
|
||||
{
|
||||
int idx;
|
||||
|
||||
if (!_memregion_front_idx && _memregion_last_idx == RT_ARRAY_SIZE(_memregion) - 1)
|
||||
{
|
||||
for (int i = 0; i < RT_ARRAY_SIZE(_memregion); ++i)
|
||||
{
|
||||
_memregion[i].name = RT_NULL;
|
||||
}
|
||||
}
|
||||
|
||||
idx = is_reserved ? _memregion_last_idx-- : _memregion_front_idx++;
|
||||
|
||||
rt_memcpy(&_memregion[idx], region, sizeof(*region));
|
||||
}
|
||||
else
|
||||
{
|
||||
err = -RT_EEMPTY;
|
||||
}
|
||||
}
|
||||
else
|
||||
{
|
||||
err = -RT_EINVAL;
|
||||
}
|
||||
|
||||
return err;
|
||||
}
|
||||
|
||||
rt_err_t rt_fdt_commit_memregion_request(rt_region_t **out_region, rt_size_t *out_nr, rt_bool_t is_reserved)
|
||||
{
|
||||
rt_err_t err = RT_EOK;
|
||||
|
||||
if (out_region && out_nr)
|
||||
{
|
||||
if (is_reserved)
|
||||
{
|
||||
*out_region = &_memregion[_memregion_last_idx + 1];
|
||||
*out_nr = RT_ARRAY_SIZE(_memregion) - 1 - _memregion_last_idx;
|
||||
}
|
||||
else
|
||||
{
|
||||
*out_region = &_memregion[0];
|
||||
*out_nr = _memregion_front_idx;
|
||||
}
|
||||
|
||||
if (*out_nr == 0)
|
||||
{
|
||||
err = -RT_EEMPTY;
|
||||
}
|
||||
}
|
||||
else
|
||||
{
|
||||
err = -RT_EINVAL;
|
||||
}
|
||||
|
||||
return err;
|
||||
}
|
||||
|
||||
rt_err_t rt_fdt_prefetch(void *fdt)
|
||||
{
|
||||
rt_err_t err = -RT_ERROR;
|
||||
|
@ -256,26 +190,6 @@ rt_err_t rt_fdt_scan_root(void)
|
|||
return err;
|
||||
}
|
||||
|
||||
rt_inline rt_err_t commit_memregion(const char *name, rt_uint64_t base, rt_uint64_t size, rt_bool_t is_reserved)
|
||||
{
|
||||
return rt_fdt_commit_memregion_early(&(rt_region_t)
|
||||
{
|
||||
.name = name,
|
||||
.start = (rt_size_t)base,
|
||||
.end = (rt_size_t)(base + size),
|
||||
}, is_reserved);
|
||||
}
|
||||
|
||||
static rt_err_t reserve_memregion(const char *name, rt_uint64_t base, rt_uint64_t size)
|
||||
{
|
||||
if (commit_memregion(name, base, size, RT_TRUE) == -RT_EEMPTY)
|
||||
{
|
||||
LOG_W("Reserved memory: %p - %p%s", base, base + size, " unable to record");
|
||||
}
|
||||
|
||||
return RT_EOK;
|
||||
}
|
||||
|
||||
static rt_err_t fdt_reserved_mem_check_root(int nodeoffset)
|
||||
{
|
||||
rt_err_t err = RT_EOK;
|
||||
|
@ -331,8 +245,9 @@ static rt_err_t fdt_reserved_memory_reg(int nodeoffset, const char *uname)
|
|||
continue;
|
||||
}
|
||||
|
||||
rt_bool_t is_nomap = fdt_getprop(_fdt, nodeoffset, "no-map", RT_NULL) ? RT_TRUE : RT_FALSE;
|
||||
base = rt_fdt_translate_address(_fdt, nodeoffset, base);
|
||||
reserve_memregion(fdt_get_name(_fdt, nodeoffset, RT_NULL), base, size);
|
||||
rt_memblock_reserve_memory(uname, base, base + size, is_nomap);
|
||||
|
||||
len -= t_len;
|
||||
}
|
||||
|
@ -371,7 +286,7 @@ static void fdt_scan_reserved_memory(void)
|
|||
|
||||
if (err == -RT_EEMPTY && fdt_getprop(_fdt, child, "size", RT_NULL))
|
||||
{
|
||||
reserve_memregion(fdt_get_name(_fdt, child, RT_NULL), 0, 0);
|
||||
LOG_E("Allocating reserved memory in setup is not yet supported");
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -385,7 +300,6 @@ static void fdt_scan_reserved_memory(void)
|
|||
static rt_err_t fdt_scan_memory(void)
|
||||
{
|
||||
int nodeoffset, no;
|
||||
rt_region_t *region;
|
||||
rt_uint64_t base, size;
|
||||
rt_err_t err = -RT_EEMPTY;
|
||||
|
||||
|
@ -399,11 +313,9 @@ static rt_err_t fdt_scan_memory(void)
|
|||
break;
|
||||
}
|
||||
|
||||
reserve_memregion("memreserve", base, size);
|
||||
rt_memblock_reserve_memory("memreserve", base, base + size, MEMBLOCK_NONE);
|
||||
}
|
||||
|
||||
no = 0;
|
||||
|
||||
fdt_for_each_subnode(nodeoffset, _fdt, 0)
|
||||
{
|
||||
int len;
|
||||
|
@ -441,7 +353,8 @@ static rt_err_t fdt_scan_memory(void)
|
|||
continue;
|
||||
}
|
||||
|
||||
err = commit_memregion(name, base, size, RT_FALSE);
|
||||
bool is_hotpluggable = fdt_getprop(_fdt, nodeoffset, "hotpluggable", RT_NULL) ? RT_TRUE : RT_FALSE;
|
||||
err = rt_memblock_add_memory(name, base, base + size, is_hotpluggable);
|
||||
|
||||
if (!err)
|
||||
{
|
||||
|
@ -451,8 +364,6 @@ static rt_err_t fdt_scan_memory(void)
|
|||
{
|
||||
LOG_W("Memory node(%d) ranges: %p - %p%s", no, base, base + size, " unable to record");
|
||||
}
|
||||
|
||||
++no;
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -461,95 +372,6 @@ static rt_err_t fdt_scan_memory(void)
|
|||
fdt_scan_reserved_memory();
|
||||
}
|
||||
|
||||
region = &_memregion[0];
|
||||
|
||||
for (no = 0; region->name; ++region)
|
||||
{
|
||||
/* We need check the memory region now. */
|
||||
for (int i = RT_ARRAY_SIZE(_memregion) - 1; i > no; --i)
|
||||
{
|
||||
rt_region_t *res_region = &_memregion[i];
|
||||
|
||||
if (!res_region->name)
|
||||
{
|
||||
break;
|
||||
}
|
||||
|
||||
/*
|
||||
* +--------+ +--------+
|
||||
* | memory | | memory |
|
||||
* +--------+ +----------+ +----------+ +--------+
|
||||
* | reserved | | reserved |
|
||||
* +----------+ +----------+
|
||||
*/
|
||||
if (res_region->start >= region->end || res_region->end <= region->start)
|
||||
{
|
||||
/* No adjustments needed */
|
||||
continue;
|
||||
}
|
||||
|
||||
/*
|
||||
* case 0: case 1:
|
||||
* +------------------+ +----------+
|
||||
* | memory | | memory |
|
||||
* +---+----------+---+ +---+----------+---+
|
||||
* | reserved | | reserved |
|
||||
* +----------+ +---+----------+---+
|
||||
*
|
||||
* case 2: case 3:
|
||||
* +------------------+ +------------------+
|
||||
* | memory | | memory |
|
||||
* +--------------+---+------+ +------+---+--------------+
|
||||
* | reserved | | reserved |
|
||||
* +----------+ +----------+
|
||||
*/
|
||||
if (res_region->start > region->start)
|
||||
{
|
||||
if (res_region->end < region->end)
|
||||
{
|
||||
/* case 0 */
|
||||
rt_size_t new_size = region->end - res_region->end;
|
||||
|
||||
region->end = res_region->start;
|
||||
|
||||
/* Commit part next block */
|
||||
err = commit_memregion(region->name, res_region->end, new_size, RT_FALSE);
|
||||
|
||||
if (!err)
|
||||
{
|
||||
++no;
|
||||
|
||||
/* Scan again */
|
||||
region = &_memregion[0];
|
||||
--region;
|
||||
|
||||
break;
|
||||
}
|
||||
}
|
||||
else
|
||||
{
|
||||
/* case 2 */
|
||||
region->end = res_region->start;
|
||||
}
|
||||
}
|
||||
else
|
||||
{
|
||||
if (res_region->end < region->end)
|
||||
{
|
||||
/* case 3 */
|
||||
region->start = res_region->end;
|
||||
}
|
||||
else
|
||||
{
|
||||
/* case 1 */
|
||||
region->name = RT_NULL;
|
||||
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return err;
|
||||
}
|
||||
|
||||
|
@ -649,7 +471,7 @@ static rt_err_t fdt_scan_initrd(rt_uint64_t *ranges, const char *name, const cha
|
|||
|
||||
if (!err)
|
||||
{
|
||||
commit_memregion("initrd", ranges[0], ranges[1] - ranges[0], RT_TRUE);
|
||||
rt_memblock_reserve_memory("initrd", ranges[0], ranges[1], MEMBLOCK_NONE);
|
||||
}
|
||||
}
|
||||
else if (!ranges)
|
||||
|
|
|
@ -112,9 +112,9 @@ rt_inline void _reg_remove_after(struct rt_mmblk_reg *prev)
|
|||
|
||||
/* adding overlapped regions is banned */
|
||||
static rt_err_t _memblock_add_range(struct rt_memblock *memblock,
|
||||
char *name, rt_size_t start, rt_size_t end, mm_flag_t flag)
|
||||
const char *name, rt_size_t start, rt_size_t end, mm_flag_t flag)
|
||||
{
|
||||
struct rt_mmblk_reg *reg, *reg_next;
|
||||
struct rt_mmblk_reg *reg = RT_NULL, *reg_next = RT_NULL;
|
||||
rt_slist_t sentinel;
|
||||
rt_region_t new_region;
|
||||
|
||||
|
@ -153,7 +153,7 @@ static rt_err_t _memblock_add_range(struct rt_memblock *memblock,
|
|||
return _reg_insert_after(reg, &new_region, flag);
|
||||
}
|
||||
|
||||
rt_err_t rt_memblock_add_memory(char *name, rt_size_t start, rt_size_t end, mmblk_flag_t flags)
|
||||
rt_err_t rt_memblock_add_memory(const char *name, rt_size_t start, rt_size_t end, mmblk_flag_t flags)
|
||||
{
|
||||
LOG_D("add physical address range [%p-%p) with flag 0x%x" \
|
||||
" to overall memory regions\n", base, base + size, flag);
|
||||
|
@ -161,7 +161,7 @@ rt_err_t rt_memblock_add_memory(char *name, rt_size_t start, rt_size_t end, mmbl
|
|||
return _memblock_add_range(&mmblk_memory, name, start, end, flags);
|
||||
}
|
||||
|
||||
rt_err_t rt_memblock_reserve_memory(char *name, rt_size_t start, rt_size_t end, mmblk_flag_t flags)
|
||||
rt_err_t rt_memblock_reserve_memory(const char *name, rt_size_t start, rt_size_t end, mmblk_flag_t flags)
|
||||
{
|
||||
LOG_D("add physical address range [%p-%p) to reserved memory regions\n",\
|
||||
base, base + size);
|
||||
|
@ -319,7 +319,7 @@ static void _next_free_region(struct rt_mmblk_reg **m, struct rt_mmblk_reg **r,
|
|||
/* merge normal memory regions */
|
||||
static void _memblock_merge_memory(void)
|
||||
{
|
||||
struct rt_mmblk_reg *reg;
|
||||
struct rt_mmblk_reg *reg = RT_NULL;
|
||||
|
||||
rt_slist_for_each_entry(reg, &(mmblk_memory.reg_list), node)
|
||||
{
|
||||
|
@ -333,43 +333,62 @@ static void _memblock_merge_memory(void)
|
|||
}
|
||||
}
|
||||
|
||||
/* free all available memory to buddy system */
|
||||
static void _memblock_free_all(void)
|
||||
void rt_memblock_setup_memory_environment(void)
|
||||
{
|
||||
rt_region_t reg;
|
||||
struct rt_mmblk_reg *iter = RT_NULL, *start_reg = RT_NULL, *end_reg = RT_NULL;
|
||||
rt_region_t reg = {0};
|
||||
rt_size_t mem = 0;
|
||||
struct rt_mmblk_reg *m, *r;
|
||||
void *err;
|
||||
|
||||
_memblock_merge_memory();
|
||||
|
||||
LOG_I("System memory:");
|
||||
|
||||
rt_slist_for_each_entry(iter, &(mmblk_memory.reg_list), node)
|
||||
{
|
||||
LOG_I(" %-*.s [%p, %p]", RT_NAME_MAX, iter->memreg.name, iter->memreg.start, iter->memreg.end);
|
||||
}
|
||||
|
||||
LOG_I("Reserved memory:");
|
||||
|
||||
rt_slist_for_each_entry(iter, &(mmblk_reserved.reg_list), node)
|
||||
{
|
||||
LOG_I(" %-*.s [%p, %p]", RT_NAME_MAX, iter->memreg.name, iter->memreg.start, iter->memreg.end);
|
||||
|
||||
if (iter->flags != MEMBLOCK_NONE)
|
||||
{
|
||||
_memblock_separate_range(&mmblk_memory, iter->memreg.start, iter->memreg.end, &start_reg, &end_reg);
|
||||
_memblock_set_flag(start_reg, end_reg, iter->flags);
|
||||
}
|
||||
}
|
||||
|
||||
/* install usable memory to system page */
|
||||
for_each_free_region(m, r, MEMBLOCK_NONE, ®.start, ®.end)
|
||||
{
|
||||
reg.start = RT_ALIGN(reg.start, ARCH_PAGE_SIZE);
|
||||
reg.end = RT_ALIGN_DOWN(reg.end, ARCH_PAGE_SIZE);
|
||||
|
||||
if (reg.start >= reg.end)
|
||||
continue;
|
||||
|
||||
LOG_I("physical memory region [%p-%p] installed to system page", reg.start, reg.end);
|
||||
|
||||
reg.start -= PV_OFFSET;
|
||||
reg.end -= PV_OFFSET;
|
||||
rt_page_install(reg);
|
||||
|
||||
LOG_D("region [%p-%p) added to buddy system\n", reg.start, reg.end);
|
||||
struct rt_mm_va_hint hint = {.flags = MMF_MAP_FIXED,
|
||||
.limit_start = rt_kernel_space.start,
|
||||
.limit_range_size = rt_kernel_space.size,
|
||||
.map_size = reg.end - reg.start,
|
||||
.prefer = (void *)reg.start};
|
||||
|
||||
rt_aspace_map_phy(&rt_kernel_space, &hint, MMU_MAP_K_RWCB, (reg.start + PV_OFFSET) >> MM_PAGE_SHIFT, &err);
|
||||
rt_page_install(reg);
|
||||
mem += reg.end - reg.start;
|
||||
}
|
||||
|
||||
LOG_D("0x%lx(%ld) bytes memory added to buddy system\n", mem, mem);
|
||||
}
|
||||
|
||||
void rt_memblock_setup_memory_environment(void)
|
||||
{
|
||||
struct rt_mmblk_reg *reg, *start_reg, *end_reg;
|
||||
rt_err_t err = RT_EOK;
|
||||
|
||||
_memblock_merge_memory();
|
||||
rt_slist_for_each_entry(reg, &(mmblk_reserved.reg_list), node)
|
||||
{
|
||||
if (reg->flags != MEMBLOCK_NONE)
|
||||
{
|
||||
err = _memblock_separate_range(&mmblk_memory, reg->memreg.start, reg->memreg.end, &start_reg, &end_reg);
|
||||
RT_ASSERT(err == RT_EOK);
|
||||
|
||||
_memblock_set_flag(start_reg, end_reg, reg->flags);
|
||||
}
|
||||
}
|
||||
_memblock_free_all();
|
||||
LOG_I("%ld MB memory installed to system page", mem/1000000);
|
||||
}
|
||||
|
||||
#ifdef UTEST_MM_API_TC
|
||||
|
|
|
@ -56,7 +56,7 @@ struct rt_memblock
|
|||
* @param end the size of the physical address range
|
||||
* @param flags the flags of the region
|
||||
*/
|
||||
rt_err_t rt_memblock_add_memory(char *name, rt_size_t start, rt_size_t end, mmblk_flag_t flags);
|
||||
rt_err_t rt_memblock_add_memory(const char *name, rt_size_t start, rt_size_t end, mmblk_flag_t flags);
|
||||
|
||||
/**
|
||||
* @brief Add a physical address range to the reserved memory region
|
||||
|
@ -68,7 +68,7 @@ rt_err_t rt_memblock_add_memory(char *name, rt_size_t start, rt_size_t end, mmbl
|
|||
* @param end the size of the physical address range
|
||||
* @param flags the flags of the region
|
||||
*/
|
||||
rt_err_t rt_memblock_reserve_memory(char *name, rt_size_t start, rt_size_t end, mmblk_flag_t flags);
|
||||
rt_err_t rt_memblock_reserve_memory(const char *name, rt_size_t start, rt_size_t end, mmblk_flag_t flags);
|
||||
|
||||
/**
|
||||
* @brief To conclude the management of memory by the memblock.
|
||||
|
|
|
@ -17,6 +17,12 @@ if ARCH_ARMV8 && ARCH_CPU_64BIT
|
|||
select ARCH_USING_HW_THREAD_SELF
|
||||
default y if RT_USING_OFW
|
||||
default n
|
||||
config ARCH_HEAP_SIZE
|
||||
hex "Size of system heap"
|
||||
default 0x4000000
|
||||
config ARCH_INIT_PAGE_SIZE
|
||||
hex "Size of init page region"
|
||||
default 0x200000
|
||||
endmenu
|
||||
endif
|
||||
|
||||
|
|
|
@ -828,14 +828,9 @@ void rt_hw_mem_setup_early(unsigned long *tbl0, unsigned long *tbl1,
|
|||
int ret;
|
||||
unsigned long count = (size + ARCH_SECTION_MASK) >> ARCH_SECTION_SHIFT;
|
||||
unsigned long normal_attr = MMU_MAP_CUSTOM(MMU_AP_KAUN, NORMAL_MEM);
|
||||
|
||||
#ifdef RT_USING_SMART
|
||||
unsigned long va = KERNEL_VADDR_START;
|
||||
#else
|
||||
extern unsigned char _start;
|
||||
unsigned long va = (unsigned long) &_start;
|
||||
unsigned long va = (unsigned long) &_start - pv_off;
|
||||
va = RT_ALIGN_DOWN(va, 0x200000);
|
||||
#endif
|
||||
|
||||
/* setup pv off */
|
||||
rt_kmem_pvoff_set(pv_off);
|
||||
|
|
|
@ -25,6 +25,7 @@
|
|||
#include <rtdevice.h>
|
||||
#include <gic.h>
|
||||
#include <gicv3.h>
|
||||
#include <mm_memblock.h>
|
||||
|
||||
#define SIZE_KB 1024
|
||||
#define SIZE_MB (1024 * SIZE_KB)
|
||||
|
@ -198,38 +199,15 @@ rt_inline void cpu_info_init(void)
|
|||
#endif /* RT_USING_HWTIMER */
|
||||
}
|
||||
|
||||
rt_inline rt_bool_t is_kernel_aspace(const char *name)
|
||||
{
|
||||
static char * const names[] =
|
||||
{
|
||||
"kernel",
|
||||
"memheap",
|
||||
};
|
||||
|
||||
if (!name)
|
||||
{
|
||||
return RT_FALSE;
|
||||
}
|
||||
|
||||
for (int i = 0; i < RT_ARRAY_SIZE(names); ++i)
|
||||
{
|
||||
if (!rt_strcmp(names[i], name))
|
||||
{
|
||||
return RT_TRUE;
|
||||
}
|
||||
}
|
||||
|
||||
return RT_FALSE;
|
||||
}
|
||||
|
||||
void rt_hw_common_setup(void)
|
||||
{
|
||||
rt_size_t mem_region_nr;
|
||||
rt_region_t *mem_region;
|
||||
rt_size_t page_best_start;
|
||||
rt_region_t platform_mem_region;
|
||||
rt_size_t kernel_start, kernel_end;
|
||||
rt_size_t heap_start, heap_end;
|
||||
rt_size_t init_page_start, init_page_end;
|
||||
rt_size_t fdt_start, fdt_end;
|
||||
rt_region_t init_page_region = { 0 };
|
||||
rt_region_t platform_mem_region = { 0 };
|
||||
static struct mem_desc platform_mem_desc;
|
||||
void *kernel_start, *kernel_end, *memheap_start = RT_NULL, *memheap_end = RT_NULL;
|
||||
|
||||
system_vectors_init();
|
||||
|
||||
|
@ -239,61 +217,42 @@ void rt_hw_common_setup(void)
|
|||
rt_hw_mmu_map_init(&rt_kernel_space, (void*)0xffffd0000000, 0x10000000, MMUTable, 0);
|
||||
#endif
|
||||
|
||||
kernel_start = rt_kmem_v2p((void *)&_start) - 64;
|
||||
kernel_end = rt_kmem_v2p((void *)&_end);
|
||||
kernel_start = RT_ALIGN_DOWN((rt_size_t)rt_kmem_v2p((void *)&_start) - 64, ARCH_PAGE_SIZE);
|
||||
kernel_end = RT_ALIGN((rt_size_t)rt_kmem_v2p((void *)&_end), ARCH_PAGE_SIZE);
|
||||
heap_start = kernel_end;
|
||||
heap_end = RT_ALIGN(heap_start + ARCH_HEAP_SIZE, ARCH_PAGE_SIZE);
|
||||
init_page_start = heap_end;
|
||||
init_page_end = RT_ALIGN(init_page_start + ARCH_INIT_PAGE_SIZE, ARCH_PAGE_SIZE);
|
||||
fdt_start = init_page_end;
|
||||
fdt_end = RT_ALIGN(fdt_start + fdt_size, ARCH_PAGE_SIZE);
|
||||
|
||||
if (!rt_fdt_commit_memregion_request(&mem_region, &mem_region_nr, RT_TRUE))
|
||||
{
|
||||
const char *name = "memheap";
|
||||
platform_mem_region.start = kernel_start;
|
||||
platform_mem_region.end = fdt_end;
|
||||
|
||||
while (mem_region_nr --> 0)
|
||||
{
|
||||
if (mem_region->name == name || !rt_strcmp(mem_region->name, name))
|
||||
{
|
||||
memheap_start = (void *)mem_region->start;
|
||||
memheap_end = (void *)mem_region->end;
|
||||
rt_memblock_reserve_memory("kernel", kernel_start, kernel_end, MEMBLOCK_NONE);
|
||||
rt_memblock_reserve_memory("memheap", heap_start, heap_end, MEMBLOCK_NONE);
|
||||
rt_memblock_reserve_memory("init-page", init_page_start, init_page_end, MEMBLOCK_NONE);
|
||||
rt_memblock_reserve_memory("fdt", fdt_start, fdt_end, MEMBLOCK_NONE);
|
||||
|
||||
break;
|
||||
}
|
||||
mem_region++;
|
||||
}
|
||||
}
|
||||
rt_memmove((void *)(fdt_start - PV_OFFSET), (void *)(fdt_ptr - PV_OFFSET), fdt_size);
|
||||
fdt_ptr = (void *)fdt_start;
|
||||
|
||||
page_best_start = (rt_size_t)(memheap_end ? : kernel_end);
|
||||
rt_system_heap_init((void *)(heap_start - PV_OFFSET), (void *)(heap_end - PV_OFFSET));
|
||||
|
||||
if (memheap_end && fdt_ptr > kernel_start)
|
||||
{
|
||||
rt_memmove(memheap_end - PV_OFFSET, fdt_ptr - PV_OFFSET, fdt_size);
|
||||
init_page_region.start = init_page_start - PV_OFFSET;
|
||||
init_page_region.end = init_page_end - PV_OFFSET;
|
||||
rt_page_init(init_page_region);
|
||||
|
||||
fdt_ptr = memheap_end;
|
||||
/* create MMU mapping of kernel memory */
|
||||
platform_mem_region.start = RT_ALIGN_DOWN(platform_mem_region.start, ARCH_PAGE_SIZE);
|
||||
platform_mem_region.end = RT_ALIGN(platform_mem_region.end, ARCH_PAGE_SIZE);
|
||||
|
||||
page_best_start = (rt_size_t)fdt_ptr + fdt_size;
|
||||
}
|
||||
platform_mem_desc.paddr_start = platform_mem_region.start;
|
||||
platform_mem_desc.vaddr_start = platform_mem_region.start - PV_OFFSET;
|
||||
platform_mem_desc.vaddr_end = platform_mem_region.end - PV_OFFSET - 1;
|
||||
platform_mem_desc.attr = NORMAL_MEM;
|
||||
|
||||
rt_fdt_commit_memregion_early(&(rt_region_t)
|
||||
{
|
||||
.name = "fdt",
|
||||
.start = (rt_size_t)fdt_ptr,
|
||||
.end = (rt_size_t)(fdt_ptr + fdt_size),
|
||||
}, RT_TRUE);
|
||||
|
||||
fdt_ptr -= PV_OFFSET;
|
||||
|
||||
rt_fdt_commit_memregion_early(&(rt_region_t)
|
||||
{
|
||||
.name = "kernel",
|
||||
.start = (rt_size_t)kernel_start,
|
||||
.end = (rt_size_t)kernel_end,
|
||||
}, RT_TRUE);
|
||||
|
||||
#ifndef RT_USING_SMART
|
||||
rt_fdt_commit_memregion_early(&(rt_region_t)
|
||||
{
|
||||
.name = "null",
|
||||
.start = (rt_size_t)RT_NULL,
|
||||
.end = (rt_size_t)RT_NULL + ARCH_PAGE_SIZE,
|
||||
}, RT_TRUE);
|
||||
#endif /* !RT_USING_SMART */
|
||||
rt_hw_mmu_setup(&rt_kernel_space, &platform_mem_desc, 1);
|
||||
|
||||
if (rt_fdt_prefetch(fdt_ptr))
|
||||
{
|
||||
|
@ -307,143 +266,9 @@ void rt_hw_common_setup(void)
|
|||
|
||||
rt_fdt_scan_memory();
|
||||
|
||||
if (memheap_start && memheap_end)
|
||||
{
|
||||
rt_system_heap_init(memheap_start - PV_OFFSET, memheap_end - PV_OFFSET);
|
||||
}
|
||||
rt_memblock_setup_memory_environment();
|
||||
|
||||
platform_mem_region.start = ~0UL;
|
||||
platform_mem_region.end = 0;
|
||||
|
||||
if (!rt_fdt_commit_memregion_request(&mem_region, &mem_region_nr, RT_TRUE))
|
||||
{
|
||||
LOG_I("Reserved memory:");
|
||||
|
||||
while (mem_region_nr --> 0)
|
||||
{
|
||||
if (is_kernel_aspace(mem_region->name))
|
||||
{
|
||||
if (platform_mem_region.start > mem_region->start)
|
||||
{
|
||||
platform_mem_region.start = mem_region->start;
|
||||
}
|
||||
|
||||
if (platform_mem_region.end < mem_region->end)
|
||||
{
|
||||
platform_mem_region.end = mem_region->end;
|
||||
}
|
||||
}
|
||||
|
||||
LOG_I(" %-*.s [%p, %p]", RT_NAME_MAX, mem_region->name, mem_region->start, mem_region->end);
|
||||
|
||||
++mem_region;
|
||||
}
|
||||
}
|
||||
|
||||
if (!rt_fdt_commit_memregion_request(&mem_region, &mem_region_nr, RT_FALSE))
|
||||
{
|
||||
rt_ubase_t best_offset = ~0UL;
|
||||
rt_region_t *usable_mem_region = mem_region, *page_region = RT_NULL;
|
||||
rt_region_t init_page_region = { 0 };
|
||||
rt_region_t defer_hi = { 0 };
|
||||
rt_err_t error;
|
||||
|
||||
LOG_I("Usable memory:");
|
||||
|
||||
for (int i = 0; i < mem_region_nr; ++i, ++mem_region)
|
||||
{
|
||||
if (!mem_region->name)
|
||||
{
|
||||
continue;
|
||||
}
|
||||
|
||||
if (platform_mem_region.start > mem_region->start)
|
||||
{
|
||||
platform_mem_region.start = mem_region->start;
|
||||
}
|
||||
|
||||
if (platform_mem_region.end < mem_region->end)
|
||||
{
|
||||
platform_mem_region.end = mem_region->end;
|
||||
}
|
||||
|
||||
if (mem_region->start >= page_best_start &&
|
||||
mem_region->start - page_best_start < best_offset &&
|
||||
/* MUST >= 1MB */
|
||||
mem_region->end - mem_region->start >= SIZE_MB)
|
||||
{
|
||||
page_region = mem_region;
|
||||
|
||||
best_offset = page_region->start - page_best_start;
|
||||
}
|
||||
|
||||
LOG_I(" %-*.s [%p, %p]", RT_NAME_MAX, mem_region->name, mem_region->start, mem_region->end);
|
||||
|
||||
}
|
||||
|
||||
RT_ASSERT(page_region != RT_NULL);
|
||||
|
||||
/* don't map more than ARCH_EARLY_MAP_SIZE */
|
||||
if (page_region->end - page_region->start > ARCH_PAGE_INIT_THRESHOLD)
|
||||
{
|
||||
defer_hi.name = page_region->name;
|
||||
defer_hi.end = page_region->end;
|
||||
defer_hi.start = RT_ALIGN_DOWN(page_region->start + ARCH_PAGE_INIT_THRESHOLD,
|
||||
ARCH_SECTION_SIZE);
|
||||
page_region->end = defer_hi.start;
|
||||
}
|
||||
|
||||
init_page_region.start = page_region->start - PV_OFFSET;
|
||||
init_page_region.end = page_region->end - PV_OFFSET;
|
||||
|
||||
rt_page_init(init_page_region);
|
||||
|
||||
platform_mem_region.start = RT_ALIGN(platform_mem_region.start, ARCH_PAGE_SIZE);
|
||||
platform_mem_region.end = RT_ALIGN_DOWN(platform_mem_region.end, ARCH_PAGE_SIZE);
|
||||
RT_ASSERT(platform_mem_region.end - platform_mem_region.start != 0);
|
||||
|
||||
platform_mem_desc.paddr_start = platform_mem_region.start;
|
||||
platform_mem_desc.vaddr_start = platform_mem_region.start - PV_OFFSET;
|
||||
platform_mem_desc.vaddr_end = platform_mem_region.end - PV_OFFSET - 1;
|
||||
platform_mem_desc.attr = NORMAL_MEM;
|
||||
|
||||
rt_hw_mmu_setup(&rt_kernel_space, &platform_mem_desc, 1);
|
||||
|
||||
rt_fdt_earlycon_kick(FDT_EARLYCON_KICK_UPDATE);
|
||||
|
||||
mem_region = usable_mem_region;
|
||||
|
||||
if (defer_hi.start)
|
||||
{
|
||||
/* to virt address */
|
||||
init_page_region.start = defer_hi.start - PV_OFFSET;
|
||||
init_page_region.end = defer_hi.end - PV_OFFSET;
|
||||
error = rt_page_install(init_page_region);
|
||||
|
||||
if (error)
|
||||
{
|
||||
LOG_W("Deferred page installation FAILED:");
|
||||
LOG_W(" %-*.s [%p, %p]", RT_NAME_MAX,
|
||||
defer_hi.name, defer_hi.start, defer_hi.end);
|
||||
}
|
||||
else
|
||||
{
|
||||
LOG_I("Deferred page installation SUCCEED:");
|
||||
LOG_I(" %-*.s [%p, %p]", RT_NAME_MAX,
|
||||
defer_hi.name, defer_hi.start, defer_hi.end);
|
||||
}
|
||||
}
|
||||
|
||||
for (int i = 0; i < mem_region_nr; ++i, ++mem_region)
|
||||
{
|
||||
if (mem_region != page_region && mem_region->name)
|
||||
{
|
||||
init_page_region.start = mem_region->start - PV_OFFSET;
|
||||
init_page_region.end = mem_region->end - PV_OFFSET;
|
||||
rt_page_install(init_page_region);
|
||||
}
|
||||
}
|
||||
}
|
||||
rt_fdt_earlycon_kick(FDT_EARLYCON_KICK_UPDATE);
|
||||
|
||||
rt_fdt_unflatten();
|
||||
|
||||
|
|
Loading…
Reference in New Issue