[rt-smart] PV_OFFSET as a variable (#6904)
* [rt-smart/mem] remove pv_offset * [rt-smart] list kernel space command * [rt-smart] restore ioremap region * [revert] restore kernel space isolation * [rt-smart/pv_off] code format * [rt-smart] add get_pvoff() * [pvoffset] pvoff as constant for C codes * [pvoff] pvoff as interfaces
This commit is contained in:
parent
fc1aced665
commit
2d09749086
@ -92,7 +92,6 @@ CONFIG_RT_USING_CACHE=y
|
|||||||
# CONFIG_RT_USING_CPU_FFS is not set
|
# CONFIG_RT_USING_CPU_FFS is not set
|
||||||
CONFIG_ARCH_MM_MMU=y
|
CONFIG_ARCH_MM_MMU=y
|
||||||
CONFIG_KERNEL_VADDR_START=0x40000000
|
CONFIG_KERNEL_VADDR_START=0x40000000
|
||||||
CONFIG_PV_OFFSET=0x0
|
|
||||||
CONFIG_ARCH_RISCV=y
|
CONFIG_ARCH_RISCV=y
|
||||||
CONFIG_ARCH_RISCV64=y
|
CONFIG_ARCH_RISCV64=y
|
||||||
|
|
||||||
|
@ -94,7 +94,7 @@ stack_size = 4096
|
|||||||
|
|
||||||
stack_lds = open('link_stacksize.lds', 'w')
|
stack_lds = open('link_stacksize.lds', 'w')
|
||||||
if GetDepend('__STACKSIZE__'): stack_size = GetDepend('__STACKSIZE__')
|
if GetDepend('__STACKSIZE__'): stack_size = GetDepend('__STACKSIZE__')
|
||||||
stack_lds.write('__STACKSIZE__ = %d;' % stack_size)
|
stack_lds.write('__STACKSIZE__ = %d;\n' % stack_size)
|
||||||
stack_lds.close()
|
stack_lds.close()
|
||||||
|
|
||||||
# make a building
|
# make a building
|
||||||
|
@ -44,8 +44,8 @@ rt_region_t init_page_region =
|
|||||||
extern volatile rt_size_t MMUTable[__SIZE(VPN2_BIT)] __attribute__((aligned(4 * 1024)));
|
extern volatile rt_size_t MMUTable[__SIZE(VPN2_BIT)] __attribute__((aligned(4 * 1024)));
|
||||||
|
|
||||||
struct mem_desc platform_mem_desc[] = {
|
struct mem_desc platform_mem_desc[] = {
|
||||||
{KERNEL_VADDR_START, KERNEL_VADDR_START + 0x4000000 - 1, KERNEL_VADDR_START + PV_OFFSET, NORMAL_MEM},
|
{KERNEL_VADDR_START, KERNEL_VADDR_START + 0x4000000 - 1, (rt_size_t)ARCH_MAP_FAILED, NORMAL_MEM},
|
||||||
{0x1000, 0x3ffff000 - 1, 0x1000 + PV_OFFSET, DEVICE_MEM},
|
{0x1000, 0x3ffff000 - 1, (rt_size_t)ARCH_MAP_FAILED, DEVICE_MEM},
|
||||||
};
|
};
|
||||||
|
|
||||||
#define NUM_MEM_DESC (sizeof(platform_mem_desc) / sizeof(platform_mem_desc[0]))
|
#define NUM_MEM_DESC (sizeof(platform_mem_desc) / sizeof(platform_mem_desc[0]))
|
||||||
|
@ -17,14 +17,14 @@
|
|||||||
extern unsigned int __bss_start;
|
extern unsigned int __bss_start;
|
||||||
extern unsigned int __bss_end;
|
extern unsigned int __bss_end;
|
||||||
|
|
||||||
#define RAM_SIZE (64 * 1024 * 1024)
|
#define RAM_SIZE (64 * 1024 * 1024)
|
||||||
#define RAM_BASE (0x40000000)
|
#define RAM_BASE (0x40000000)
|
||||||
#define RAM_END (RAM_BASE + RAM_SIZE - PV_OFFSET)
|
#define RAM_END_VADDR (KERNEL_VADDR_START + RAM_SIZE)
|
||||||
|
|
||||||
#define RT_HW_HEAP_BEGIN ((void *)&__bss_end)
|
#define RT_HW_HEAP_BEGIN ((void *)&__bss_end)
|
||||||
#define RT_HW_HEAP_END ((void *)(((rt_size_t)RT_HW_HEAP_BEGIN) + 16 * 1024 * 1024))
|
#define RT_HW_HEAP_END ((void *)(((rt_size_t)RT_HW_HEAP_BEGIN) + 16 * 1024 * 1024))
|
||||||
#define RT_HW_PAGE_START (RT_HW_HEAP_END)
|
#define RT_HW_PAGE_START (RT_HW_HEAP_END)
|
||||||
#define RT_HW_PAGE_END ((void *)RAM_END)
|
#define RT_HW_PAGE_END ((void *)RAM_END_VADDR)
|
||||||
|
|
||||||
void rt_hw_board_init(void);
|
void rt_hw_board_init(void);
|
||||||
void rt_init_user_mem(struct rt_thread *thread, const char *name, unsigned long *entry);
|
void rt_init_user_mem(struct rt_thread *thread, const char *name, unsigned long *entry);
|
||||||
|
@ -55,7 +55,6 @@
|
|||||||
#define RT_USING_CACHE
|
#define RT_USING_CACHE
|
||||||
#define ARCH_MM_MMU
|
#define ARCH_MM_MMU
|
||||||
#define KERNEL_VADDR_START 0x40000000
|
#define KERNEL_VADDR_START 0x40000000
|
||||||
#define PV_OFFSET 0x0
|
|
||||||
#define ARCH_RISCV
|
#define ARCH_RISCV
|
||||||
#define ARCH_RISCV64
|
#define ARCH_RISCV64
|
||||||
|
|
||||||
|
@ -26,7 +26,7 @@
|
|||||||
|
|
||||||
#ifdef RT_USING_SMART
|
#ifdef RT_USING_SMART
|
||||||
struct mem_desc platform_mem_desc[] = {
|
struct mem_desc platform_mem_desc[] = {
|
||||||
{KERNEL_VADDR_START, KERNEL_VADDR_START + 0x0fffffff, KERNEL_VADDR_START + PV_OFFSET, NORMAL_MEM}
|
{KERNEL_VADDR_START, KERNEL_VADDR_START + 0x0fffffff, (rt_size_t)ARCH_MAP_FAILED, NORMAL_MEM}
|
||||||
};
|
};
|
||||||
#else
|
#else
|
||||||
struct mem_desc platform_mem_desc[] = {
|
struct mem_desc platform_mem_desc[] = {
|
||||||
|
@ -11,7 +11,6 @@
|
|||||||
* add smp ipi init
|
* add smp ipi init
|
||||||
*/
|
*/
|
||||||
|
|
||||||
#include "mm_aspace.h"
|
|
||||||
#include <rthw.h>
|
#include <rthw.h>
|
||||||
#include <rtthread.h>
|
#include <rtthread.h>
|
||||||
#include <mmu.h>
|
#include <mmu.h>
|
||||||
@ -21,6 +20,7 @@
|
|||||||
#endif
|
#endif
|
||||||
|
|
||||||
#include "board.h"
|
#include "board.h"
|
||||||
|
#include <mm_aspace.h>
|
||||||
#include <mm_page.h>
|
#include <mm_page.h>
|
||||||
#include <interrupt.h>
|
#include <interrupt.h>
|
||||||
|
|
||||||
@ -33,7 +33,7 @@
|
|||||||
|
|
||||||
#ifdef RT_USING_SMART
|
#ifdef RT_USING_SMART
|
||||||
struct mem_desc platform_mem_desc[] = {
|
struct mem_desc platform_mem_desc[] = {
|
||||||
{KERNEL_VADDR_START, KERNEL_VADDR_START + 0x0fffffff, KERNEL_VADDR_START + PV_OFFSET, NORMAL_MEM}
|
{KERNEL_VADDR_START, KERNEL_VADDR_START + 0x0fffffff, (rt_size_t)ARCH_MAP_FAILED, NORMAL_MEM}
|
||||||
};
|
};
|
||||||
#else
|
#else
|
||||||
|
|
||||||
|
@ -26,7 +26,6 @@ extern unsigned char __bss_end;
|
|||||||
#else
|
#else
|
||||||
#define HEAP_END ((void *)HEAP_BEGIN + 64 * 1024 * 1024)
|
#define HEAP_END ((void *)HEAP_BEGIN + 64 * 1024 * 1024)
|
||||||
#define KERNEL_VADDR_START 0x40000000
|
#define KERNEL_VADDR_START 0x40000000
|
||||||
#define PV_OFFSET 0
|
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
void rt_hw_board_init(void);
|
void rt_hw_board_init(void);
|
||||||
|
@ -31,7 +31,7 @@ stack_size = 4096
|
|||||||
|
|
||||||
stack_lds = open('link_stacksize.lds', 'w')
|
stack_lds = open('link_stacksize.lds', 'w')
|
||||||
if GetDepend('__STACKSIZE__'): stack_size = GetDepend('__STACKSIZE__')
|
if GetDepend('__STACKSIZE__'): stack_size = GetDepend('__STACKSIZE__')
|
||||||
stack_lds.write('__STACKSIZE__ = %d;' % stack_size)
|
stack_lds.write('__STACKSIZE__ = %d;\n' % stack_size)
|
||||||
stack_lds.close()
|
stack_lds.close()
|
||||||
|
|
||||||
# make a building
|
# make a building
|
||||||
|
@ -35,7 +35,7 @@ rt_region_t init_page_region = {(rt_size_t)RT_HW_PAGE_START, (rt_size_t)RT_HW_PA
|
|||||||
extern size_t MMUTable[];
|
extern size_t MMUTable[];
|
||||||
|
|
||||||
struct mem_desc platform_mem_desc[] = {
|
struct mem_desc platform_mem_desc[] = {
|
||||||
{KERNEL_VADDR_START, KERNEL_VADDR_START + 0x10000000 - 1, KERNEL_VADDR_START + PV_OFFSET, NORMAL_MEM},
|
{KERNEL_VADDR_START, (rt_size_t)RT_HW_PAGE_END - 1, (rt_size_t)ARCH_MAP_FAILED, NORMAL_MEM},
|
||||||
};
|
};
|
||||||
|
|
||||||
#define NUM_MEM_DESC (sizeof(platform_mem_desc) / sizeof(platform_mem_desc[0]))
|
#define NUM_MEM_DESC (sizeof(platform_mem_desc) / sizeof(platform_mem_desc[0]))
|
||||||
@ -54,11 +54,17 @@ void primary_cpu_entry(void)
|
|||||||
|
|
||||||
#define IOREMAP_SIZE (1ul << 30)
|
#define IOREMAP_SIZE (1ul << 30)
|
||||||
|
|
||||||
|
#ifndef ARCH_KERNEL_IN_HIGH_VA
|
||||||
|
#define IOREMAP_VEND USER_VADDR_START
|
||||||
|
#else
|
||||||
|
#define IOREMAP_VEND 0ul
|
||||||
|
#endif
|
||||||
|
|
||||||
void rt_hw_board_init(void)
|
void rt_hw_board_init(void)
|
||||||
{
|
{
|
||||||
#ifdef RT_USING_SMART
|
#ifdef RT_USING_SMART
|
||||||
/* init data structure */
|
/* init data structure */
|
||||||
rt_hw_mmu_map_init(&rt_kernel_space, (void *)(USER_VADDR_START - IOREMAP_SIZE), IOREMAP_SIZE, (rt_size_t *)MMUTable, 0);
|
rt_hw_mmu_map_init(&rt_kernel_space, (void *)(IOREMAP_VEND - IOREMAP_SIZE), IOREMAP_SIZE, (rt_size_t *)MMUTable, PV_OFFSET);
|
||||||
|
|
||||||
/* init page allocator */
|
/* init page allocator */
|
||||||
rt_page_init(init_page_region);
|
rt_page_init(init_page_region);
|
||||||
|
@ -31,10 +31,6 @@
|
|||||||
#include <mmu.h>
|
#include <mmu.h>
|
||||||
#include <tlb.h>
|
#include <tlb.h>
|
||||||
|
|
||||||
#ifndef RT_USING_SMART
|
|
||||||
#define PV_OFFSET 0
|
|
||||||
#endif
|
|
||||||
|
|
||||||
static void _aspace_unmap(rt_aspace_t aspace, void *addr, rt_size_t length);
|
static void _aspace_unmap(rt_aspace_t aspace, void *addr, rt_size_t length);
|
||||||
static void *_find_free(rt_aspace_t aspace, void *prefer, rt_size_t req_size,
|
static void *_find_free(rt_aspace_t aspace, void *prefer, rt_size_t req_size,
|
||||||
void *limit_start, rt_size_t limit_size,
|
void *limit_start, rt_size_t limit_size,
|
||||||
@ -488,6 +484,7 @@ int rt_aspace_map_phy_static(rt_aspace_t aspace, rt_varea_t varea,
|
|||||||
varea->start = hint->prefer;
|
varea->start = hint->prefer;
|
||||||
varea->size = hint->map_size;
|
varea->size = hint->map_size;
|
||||||
hint->flags |= MMF_MAP_FIXED;
|
hint->flags |= MMF_MAP_FIXED;
|
||||||
|
LOG_D("%s: start %p size %p phy at %p", __func__, varea->start, varea->size, pa_off << MM_PAGE_SHIFT);
|
||||||
err = _mm_aspace_map_phy(aspace, varea, hint, attr, pa_off, ret_va);
|
err = _mm_aspace_map_phy(aspace, varea, hint, attr, pa_off, ret_va);
|
||||||
}
|
}
|
||||||
else
|
else
|
||||||
@ -743,8 +740,16 @@ int rt_aspace_traversal(rt_aspace_t aspace,
|
|||||||
|
|
||||||
static int _dump(rt_varea_t varea, void *arg)
|
static int _dump(rt_varea_t varea, void *arg)
|
||||||
{
|
{
|
||||||
rt_kprintf("%s[%p - %p]\n", varea->mem_obj->get_name(varea), varea->start,
|
if (varea->mem_obj && varea->mem_obj->get_name)
|
||||||
varea->start + varea->size);
|
{
|
||||||
|
rt_kprintf("[%p - %p] %s\n", varea->start, varea->start + varea->size,
|
||||||
|
varea->mem_obj->get_name(varea));
|
||||||
|
}
|
||||||
|
else
|
||||||
|
{
|
||||||
|
rt_kprintf("[%p - %p] phy-map\n", varea->start, varea->start + varea->size);
|
||||||
|
rt_kprintf("\t\\_ paddr = %p\n", varea->offset << MM_PAGE_SHIFT);
|
||||||
|
}
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -20,6 +20,7 @@
|
|||||||
|
|
||||||
#define MM_PAGE_SHIFT 12
|
#define MM_PAGE_SHIFT 12
|
||||||
#define MM_PA_TO_OFF(pa) ((uintptr_t)(pa) >> MM_PAGE_SHIFT)
|
#define MM_PA_TO_OFF(pa) ((uintptr_t)(pa) >> MM_PAGE_SHIFT)
|
||||||
|
#define PV_OFFSET (rt_kmem_pvoff())
|
||||||
|
|
||||||
#ifndef RT_USING_SMP
|
#ifndef RT_USING_SMP
|
||||||
typedef rt_spinlock_t mm_spinlock;
|
typedef rt_spinlock_t mm_spinlock;
|
||||||
@ -201,4 +202,8 @@ void rt_varea_free_pages(rt_varea_t varea);
|
|||||||
|
|
||||||
void rt_varea_offload_page(rt_varea_t varea, void *vaddr, rt_size_t size);
|
void rt_varea_offload_page(rt_varea_t varea, void *vaddr, rt_size_t size);
|
||||||
|
|
||||||
|
rt_ubase_t rt_kmem_pvoff(void);
|
||||||
|
|
||||||
|
void rt_kmem_pvoff_set(rt_ubase_t pvoff);
|
||||||
|
|
||||||
#endif /* __MM_ASPACE_H__ */
|
#endif /* __MM_ASPACE_H__ */
|
||||||
|
32
components/mm/mm_kmem.c
Normal file
32
components/mm/mm_kmem.c
Normal file
@ -0,0 +1,32 @@
|
|||||||
|
/*
|
||||||
|
* Copyright (c) 2006-2022, RT-Thread Development Team
|
||||||
|
*
|
||||||
|
* SPDX-License-Identifier: Apache-2.0
|
||||||
|
*
|
||||||
|
* Change Logs:
|
||||||
|
* Date Author Notes
|
||||||
|
* 2022-11-14 WangXiaoyao the first version
|
||||||
|
*/
|
||||||
|
|
||||||
|
#include "mm_aspace.h"
|
||||||
|
#include "mm_private.h"
|
||||||
|
|
||||||
|
static void list_kernel_space(void)
|
||||||
|
{
|
||||||
|
rt_aspace_print_all(&rt_kernel_space);
|
||||||
|
}
|
||||||
|
MSH_CMD_EXPORT(list_kernel_space, List varea in kernel space);
|
||||||
|
|
||||||
|
void rt_kmem_list_varea(void) __attribute__((alias("list_kernel_space")));
|
||||||
|
|
||||||
|
static rt_ubase_t rt_pv_offset;
|
||||||
|
|
||||||
|
rt_ubase_t rt_kmem_pvoff(void)
|
||||||
|
{
|
||||||
|
return rt_pv_offset;
|
||||||
|
}
|
||||||
|
|
||||||
|
void rt_kmem_pvoff_set(rt_ubase_t pvoff)
|
||||||
|
{
|
||||||
|
rt_pv_offset = pvoff;
|
||||||
|
}
|
@ -40,8 +40,6 @@ CT_ASSERT(order_huge_pg, RT_PAGE_MAX_ORDER > ARCH_PAGE_SHIFT - 2);
|
|||||||
CT_ASSERT(size_width, sizeof(rt_size_t) == sizeof(rt_size_t));
|
CT_ASSERT(size_width, sizeof(rt_size_t) == sizeof(rt_size_t));
|
||||||
#endif /* ARCH_CPU_64BIT */
|
#endif /* ARCH_CPU_64BIT */
|
||||||
|
|
||||||
#else
|
|
||||||
#define PV_OFFSET 0
|
|
||||||
#endif /* RT_USING_SMART */
|
#endif /* RT_USING_SMART */
|
||||||
|
|
||||||
static rt_size_t init_mpr_align_start;
|
static rt_size_t init_mpr_align_start;
|
||||||
|
@ -87,13 +87,6 @@ config KERNEL_VADDR_START
|
|||||||
default 0x80000000 if ARCH_RISCV
|
default 0x80000000 if ARCH_RISCV
|
||||||
depends on ARCH_MM_MMU
|
depends on ARCH_MM_MMU
|
||||||
|
|
||||||
config PV_OFFSET
|
|
||||||
hex "The offset of kernel physical address and virtural address"
|
|
||||||
default 0x1000040000000 if ARCH_ARMV8
|
|
||||||
default 0xa0000000 if ARCH_ARM
|
|
||||||
default 0x0 if ARCH_RISCV
|
|
||||||
depends on ARCH_MM_MMU
|
|
||||||
|
|
||||||
config RT_IOREMAP_LATE
|
config RT_IOREMAP_LATE
|
||||||
bool "Support to create IO mapping in the kernel address space after system initlalization."
|
bool "Support to create IO mapping in the kernel address space after system initlalization."
|
||||||
default n
|
default n
|
||||||
|
@ -309,6 +309,9 @@ void rt_hw_mmu_setup(rt_aspace_t aspace, struct mem_desc *mdesc, int desc_nr)
|
|||||||
mdesc->vaddr_start + 1,
|
mdesc->vaddr_start + 1,
|
||||||
.prefer = (void *)mdesc->vaddr_start};
|
.prefer = (void *)mdesc->vaddr_start};
|
||||||
|
|
||||||
|
if (mdesc->paddr_start == (rt_size_t)ARCH_MAP_FAILED)
|
||||||
|
mdesc->paddr_start = mdesc->vaddr_start + PV_OFFSET;
|
||||||
|
|
||||||
rt_aspace_map_phy_static(aspace, &mdesc->varea, &hint, attr,
|
rt_aspace_map_phy_static(aspace, &mdesc->varea, &hint, attr,
|
||||||
mdesc->paddr_start >> MM_PAGE_SHIFT, &err);
|
mdesc->paddr_start >> MM_PAGE_SHIFT, &err);
|
||||||
mdesc++;
|
mdesc++;
|
||||||
@ -656,10 +659,14 @@ int rt_hw_mmu_control(struct rt_aspace *aspace, void *vaddr, size_t size,
|
|||||||
return err;
|
return err;
|
||||||
}
|
}
|
||||||
|
|
||||||
void rt_hw_mmu_setup_early(unsigned long *tbl0, unsigned long *tbl1,
|
void rt_hw_mem_setup_early(unsigned long *tbl0, unsigned long *tbl1,
|
||||||
unsigned long size, unsigned long pv_off)
|
unsigned long size, unsigned long pv_off)
|
||||||
{
|
{
|
||||||
int ret;
|
int ret;
|
||||||
|
|
||||||
|
/* setup pv off */
|
||||||
|
rt_kmem_pvoff_set(pv_off);
|
||||||
|
|
||||||
unsigned long va = KERNEL_VADDR_START;
|
unsigned long va = KERNEL_VADDR_START;
|
||||||
unsigned long count = (size + ARCH_SECTION_MASK) >> ARCH_SECTION_SHIFT;
|
unsigned long count = (size + ARCH_SECTION_MASK) >> ARCH_SECTION_SHIFT;
|
||||||
unsigned long normal_attr = MMU_MAP_CUSTOM(MMU_AP_KAUN, NORMAL_MEM);
|
unsigned long normal_attr = MMU_MAP_CUSTOM(MMU_AP_KAUN, NORMAL_MEM);
|
||||||
|
@ -74,7 +74,7 @@ struct mem_desc
|
|||||||
struct rt_aspace;
|
struct rt_aspace;
|
||||||
|
|
||||||
void rt_hw_mmu_ktbl_set(unsigned long tbl);
|
void rt_hw_mmu_ktbl_set(unsigned long tbl);
|
||||||
void rt_hw_mmu_setup_early(unsigned long *tbl0, unsigned long *tbl1,
|
void rt_hw_mem_setup_early(unsigned long *tbl0, unsigned long *tbl1,
|
||||||
unsigned long size, unsigned long pv_off);
|
unsigned long size, unsigned long pv_off);
|
||||||
void rt_hw_mmu_setup(struct rt_aspace *aspace, struct mem_desc *mdesc,
|
void rt_hw_mmu_setup(struct rt_aspace *aspace, struct mem_desc *mdesc,
|
||||||
int desc_nr);
|
int desc_nr);
|
||||||
|
@ -45,6 +45,7 @@ void check_user_fault(struct rt_hw_exp_stack *regs, uint32_t pc_adj, char *info)
|
|||||||
#ifdef LWP_USING_CORE_DUMP
|
#ifdef LWP_USING_CORE_DUMP
|
||||||
lwp_core_dump(regs, pc_adj);
|
lwp_core_dump(regs, pc_adj);
|
||||||
#endif
|
#endif
|
||||||
|
backtrace((unsigned long)regs->pc, (unsigned long)regs->x30, (unsigned long)regs->x29);
|
||||||
sys_exit(-1);
|
sys_exit(-1);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -78,12 +78,12 @@ __start:
|
|||||||
|
|
||||||
eret /* exception return. from EL2. continue from .L__in_el1 */
|
eret /* exception return. from EL2. continue from .L__in_el1 */
|
||||||
|
|
||||||
|
.macro GET_PHY reg, symbol
|
||||||
|
adrp \reg, \symbol
|
||||||
|
add \reg, \reg, #:lo12:\symbol
|
||||||
|
.endm
|
||||||
|
|
||||||
.L__in_el1:
|
.L__in_el1:
|
||||||
#ifdef RT_USING_LWP
|
|
||||||
ldr x9, =PV_OFFSET
|
|
||||||
#else
|
|
||||||
mov x9, #0
|
|
||||||
#endif
|
|
||||||
mov sp, x1 /* in EL1. Set sp to _start */
|
mov sp, x1 /* in EL1. Set sp to _start */
|
||||||
|
|
||||||
/* Set CPACR_EL1 (Architecture Feature Access Control Register) to avoid trap from SIMD or float point instruction */
|
/* Set CPACR_EL1 (Architecture Feature Access Control Register) to avoid trap from SIMD or float point instruction */
|
||||||
@ -91,15 +91,13 @@ __start:
|
|||||||
msr cpacr_el1, x1
|
msr cpacr_el1, x1
|
||||||
|
|
||||||
/* clear bss */
|
/* clear bss */
|
||||||
adrp x1, __bss_start /* get bss start address */
|
GET_PHY x1, __bss_start
|
||||||
add x1, x1, #:lo12:__bss_start
|
GET_PHY x2, __bss_end
|
||||||
adrp x2, __bss_end
|
sub x2, x2, x1 /* get bss size */
|
||||||
add x1, x1, #:lo12:__bss_end
|
|
||||||
sub x2, x2, x1 /* get bss size */
|
|
||||||
|
|
||||||
and x3, x2, #7 /* x3 is < 7 */
|
and x3, x2, #7 /* x3 is < 7 */
|
||||||
ldr x4, =~0x7
|
ldr x4, =~0x7
|
||||||
and x2, x2, x4 /* mask ~7 */
|
and x2, x2, x4 /* mask ~7 */
|
||||||
|
|
||||||
.L__clean_bss_loop:
|
.L__clean_bss_loop:
|
||||||
cbz x2, .L__clean_bss_loop_1
|
cbz x2, .L__clean_bss_loop_1
|
||||||
@ -116,7 +114,7 @@ __start:
|
|||||||
.L__jump_to_entry: /* jump to C code, should not return */
|
.L__jump_to_entry: /* jump to C code, should not return */
|
||||||
bl mmu_tcr_init
|
bl mmu_tcr_init
|
||||||
|
|
||||||
adr x1, __start
|
adr x1, __start /* install early page table */
|
||||||
ldr x0, =~0x1fffff
|
ldr x0, =~0x1fffff
|
||||||
and x0, x1, x0
|
and x0, x1, x0
|
||||||
add x1, x0, #0x1000
|
add x1, x0, #0x1000
|
||||||
@ -125,11 +123,13 @@ __start:
|
|||||||
msr ttbr1_el1, x1
|
msr ttbr1_el1, x1
|
||||||
dsb sy
|
dsb sy
|
||||||
|
|
||||||
ldr x2, =0x40000000 /* map 1G memory for kernel space */
|
#ifdef RT_USING_SMART
|
||||||
#ifdef RT_USING_LWP
|
ldr x2, =__start
|
||||||
ldr x3, =PV_OFFSET
|
GET_PHY x3, __start
|
||||||
|
sub x3, x3, x2
|
||||||
#endif
|
#endif
|
||||||
bl rt_hw_mmu_setup_early
|
ldr x2, =0x40000000 /* map 1G memory for kernel space */
|
||||||
|
bl rt_hw_mem_setup_early
|
||||||
|
|
||||||
ldr x30, =after_mmu_enable /* set LR to after_mmu_enable function, it's a v_addr */
|
ldr x30, =after_mmu_enable /* set LR to after_mmu_enable function, it's a v_addr */
|
||||||
|
|
||||||
@ -152,7 +152,7 @@ __start:
|
|||||||
ret
|
ret
|
||||||
|
|
||||||
after_mmu_enable:
|
after_mmu_enable:
|
||||||
#ifdef RT_USING_LWP
|
#ifdef RT_USING_SMART
|
||||||
mrs x0, tcr_el1 /* disable ttbr0, only using kernel space */
|
mrs x0, tcr_el1 /* disable ttbr0, only using kernel space */
|
||||||
orr x0, x0, #(1 << 7)
|
orr x0, x0, #(1 << 7)
|
||||||
msr tcr_el1, x0
|
msr tcr_el1, x0
|
||||||
@ -172,7 +172,7 @@ after_mmu_enable:
|
|||||||
* secondary cpu
|
* secondary cpu
|
||||||
*/
|
*/
|
||||||
|
|
||||||
.globl _secondary_cpu_entry
|
.global _secondary_cpu_entry
|
||||||
_secondary_cpu_entry:
|
_secondary_cpu_entry:
|
||||||
bl rt_hw_cpu_id_set
|
bl rt_hw_cpu_id_set
|
||||||
adr x1, __start
|
adr x1, __start
|
||||||
@ -225,10 +225,6 @@ _secondary_cpu_entry:
|
|||||||
eret /* exception return. from EL2. continue from .L__in_el1 */
|
eret /* exception return. from EL2. continue from .L__in_el1 */
|
||||||
|
|
||||||
.L__in_el1_cpux:
|
.L__in_el1_cpux:
|
||||||
adr x19, .L__in_el1_cpux
|
|
||||||
ldr x8, =.L__in_el1_cpux
|
|
||||||
sub x19, x19, x8 /* get PV_OFFSET */
|
|
||||||
|
|
||||||
mrs x0, tpidr_el1
|
mrs x0, tpidr_el1
|
||||||
/* each cpu init stack is 8k */
|
/* each cpu init stack is 8k */
|
||||||
sub x1, x1, x0, lsl #13
|
sub x1, x1, x0, lsl #13
|
||||||
@ -244,7 +240,7 @@ _secondary_cpu_entry:
|
|||||||
|
|
||||||
bl mmu_tcr_init
|
bl mmu_tcr_init
|
||||||
|
|
||||||
adr x1, __start
|
adr x1, __start /* GET & setup early page table */
|
||||||
ldr x0, =~0x1fffff
|
ldr x0, =~0x1fffff
|
||||||
and x0, x1, x0
|
and x0, x1, x0
|
||||||
add x1, x0, #0x1000
|
add x1, x0, #0x1000
|
||||||
|
@ -24,7 +24,6 @@
|
|||||||
#include "ioremap.h"
|
#include "ioremap.h"
|
||||||
#else
|
#else
|
||||||
#define KERNEL_VADDR_START 0
|
#define KERNEL_VADDR_START 0
|
||||||
#define PV_OFFSET 0
|
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
/* level1 page table, each entry for 1MB memory. */
|
/* level1 page table, each entry for 1MB memory. */
|
||||||
@ -59,6 +58,8 @@ void rt_hw_init_mmu_table(struct mem_desc *mdesc, rt_uint32_t size)
|
|||||||
/* set page table */
|
/* set page table */
|
||||||
for(; size > 0; size--)
|
for(; size > 0; size--)
|
||||||
{
|
{
|
||||||
|
if (mdesc->paddr_start == (rt_uint32_t)ARCH_MAP_FAILED)
|
||||||
|
mdesc->paddr_start = mdesc->vaddr_start + PV_OFFSET;
|
||||||
rt_hw_mmu_setmtt(mdesc->vaddr_start, mdesc->vaddr_end,
|
rt_hw_mmu_setmtt(mdesc->vaddr_start, mdesc->vaddr_end,
|
||||||
mdesc->paddr_start, mdesc->attr);
|
mdesc->paddr_start, mdesc->attr);
|
||||||
mdesc++;
|
mdesc++;
|
||||||
|
@ -102,7 +102,10 @@ continue:
|
|||||||
isb
|
isb
|
||||||
|
|
||||||
#ifdef RT_USING_SMART
|
#ifdef RT_USING_SMART
|
||||||
ldr r5, =PV_OFFSET
|
/* load r5 with PV_OFFSET */
|
||||||
|
ldr r7, =_reset
|
||||||
|
adr r5, _reset
|
||||||
|
sub r5, r5, r7
|
||||||
|
|
||||||
mov r7, #0x100000
|
mov r7, #0x100000
|
||||||
sub r7, #1
|
sub r7, #1
|
||||||
@ -166,6 +169,9 @@ bss_loop:
|
|||||||
strlo r0,[r1],#4 /* clear 4 bytes */
|
strlo r0,[r1],#4 /* clear 4 bytes */
|
||||||
blo bss_loop /* loop until done */
|
blo bss_loop /* loop until done */
|
||||||
|
|
||||||
|
mov r0, r5
|
||||||
|
bl rt_kmem_pvoff_set
|
||||||
|
|
||||||
#ifdef RT_USING_SMP
|
#ifdef RT_USING_SMP
|
||||||
mrc p15, 0, r1, c1, c0, 1
|
mrc p15, 0, r1, c1, c0, 1
|
||||||
mov r0, #(1<<6)
|
mov r0, #(1<<6)
|
||||||
@ -173,7 +179,10 @@ bss_loop:
|
|||||||
mcr p15, 0, r1, c1, c0, 1 /* enable smp */
|
mcr p15, 0, r1, c1, c0, 1 /* enable smp */
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
/* initialize the mmu table and enable mmu */
|
/**
|
||||||
|
* void rt_hw_init_mmu_table(struct mem_desc *mdesc, rt_uint32_t size)
|
||||||
|
* initialize the mmu table and enable mmu
|
||||||
|
*/
|
||||||
ldr r0, =platform_mem_desc
|
ldr r0, =platform_mem_desc
|
||||||
ldr r1, =platform_mem_desc_size
|
ldr r1, =platform_mem_desc_size
|
||||||
ldr r1, [r1]
|
ldr r1, [r1]
|
||||||
@ -595,16 +604,19 @@ vector_resv:
|
|||||||
bl rt_hw_trap_resv
|
bl rt_hw_trap_resv
|
||||||
b .
|
b .
|
||||||
|
|
||||||
#ifdef RT_USING_SMP
|
|
||||||
.global rt_hw_clz
|
.global rt_hw_clz
|
||||||
rt_hw_clz:
|
rt_hw_clz:
|
||||||
clz r0, r0
|
clz r0, r0
|
||||||
bx lr
|
bx lr
|
||||||
|
|
||||||
|
#ifdef RT_USING_SMP
|
||||||
|
|
||||||
.global rt_secondary_cpu_entry
|
.global rt_secondary_cpu_entry
|
||||||
rt_secondary_cpu_entry:
|
rt_secondary_cpu_entry:
|
||||||
#ifdef RT_USING_SMART
|
#ifdef RT_USING_SMART
|
||||||
ldr r5, =PV_OFFSET
|
ldr r0, =_reset
|
||||||
|
adr r5, _reset
|
||||||
|
sub r5, r5, r0
|
||||||
|
|
||||||
ldr lr, =after_enable_mmu_n
|
ldr lr, =after_enable_mmu_n
|
||||||
ldr r0, =init_mtbl
|
ldr r0, =init_mtbl
|
||||||
|
@ -30,7 +30,6 @@
|
|||||||
#endif
|
#endif
|
||||||
|
|
||||||
#ifndef RT_USING_SMART
|
#ifndef RT_USING_SMART
|
||||||
#define PV_OFFSET 0
|
|
||||||
#define USER_VADDR_START 0
|
#define USER_VADDR_START 0
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
@ -533,6 +532,9 @@ void rt_hw_mmu_setup(rt_aspace_t aspace, struct mem_desc *mdesc, int desc_nr)
|
|||||||
mdesc->vaddr_start + 1,
|
mdesc->vaddr_start + 1,
|
||||||
.prefer = (void *)mdesc->vaddr_start};
|
.prefer = (void *)mdesc->vaddr_start};
|
||||||
|
|
||||||
|
if (mdesc->paddr_start == (rt_size_t)ARCH_MAP_FAILED)
|
||||||
|
mdesc->paddr_start = mdesc->vaddr_start + PV_OFFSET;
|
||||||
|
|
||||||
rt_aspace_map_phy_static(aspace, &mdesc->varea, &hint, attr,
|
rt_aspace_map_phy_static(aspace, &mdesc->varea, &hint, attr,
|
||||||
mdesc->paddr_start >> MM_PAGE_SHIFT, &err);
|
mdesc->paddr_start >> MM_PAGE_SHIFT, &err);
|
||||||
mdesc++;
|
mdesc++;
|
||||||
|
@ -25,12 +25,12 @@
|
|||||||
#include <tlb.h>
|
#include <tlb.h>
|
||||||
|
|
||||||
#ifdef RT_USING_SMART
|
#ifdef RT_USING_SMART
|
||||||
|
#include <board.h>
|
||||||
#include <ioremap.h>
|
#include <ioremap.h>
|
||||||
#include <lwp_user_mm.h>
|
#include <lwp_user_mm.h>
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
#ifndef RT_USING_SMART
|
#ifndef RT_USING_SMART
|
||||||
#define PV_OFFSET 0
|
|
||||||
#define USER_VADDR_START 0
|
#define USER_VADDR_START 0
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
@ -476,6 +476,9 @@ void rt_hw_mmu_setup(rt_aspace_t aspace, struct mem_desc *mdesc, int desc_nr)
|
|||||||
mdesc->vaddr_start + 1,
|
mdesc->vaddr_start + 1,
|
||||||
.prefer = (void *)mdesc->vaddr_start};
|
.prefer = (void *)mdesc->vaddr_start};
|
||||||
|
|
||||||
|
if (mdesc->paddr_start == (rt_size_t)ARCH_MAP_FAILED)
|
||||||
|
mdesc->paddr_start = mdesc->vaddr_start + PV_OFFSET;
|
||||||
|
|
||||||
rt_aspace_map_phy_static(aspace, &mdesc->varea, &hint, attr,
|
rt_aspace_map_phy_static(aspace, &mdesc->varea, &hint, attr,
|
||||||
mdesc->paddr_start >> MM_PAGE_SHIFT, &err);
|
mdesc->paddr_start >> MM_PAGE_SHIFT, &err);
|
||||||
mdesc++;
|
mdesc++;
|
||||||
|
Loading…
x
Reference in New Issue
Block a user