4
0
mirror of https://github.com/RT-Thread/rt-thread.git synced 2025-01-31 23:20:34 +08:00

修改:启动过程和__start解耦,对于RT_AMP_SLAVE 跳过cpu 非0核等待

This commit is contained in:
wangqinglin 2023-06-05 14:18:00 +08:00 committed by guo
parent 0c29bed30f
commit 216d72ef1c
2 changed files with 30 additions and 19 deletions

View File

@ -535,10 +535,8 @@ unsigned long get_free_page(void)
{ {
if (!__init_page_array) if (!__init_page_array)
{ {
unsigned long temp_page_start; extern unsigned char __bss_end;
asm volatile("mov %0, sp" : "=r"(temp_page_start)); __init_page_array = (struct page_table *) RT_ALIGN((unsigned long) &__bss_end, 0x1000);
__init_page_array =
(struct page_table *)(temp_page_start & ~(ARCH_SECTION_MASK));
__page_off = 2; /* 0, 1 for ttbr0, ttrb1 */ __page_off = 2; /* 0, 1 for ttbr0, ttrb1 */
} }
__page_off++; __page_off++;
@ -778,7 +776,13 @@ void rt_hw_mem_setup_early(unsigned long *tbl0, unsigned long *tbl1,
/* setup pv off */ /* setup pv off */
rt_kmem_pvoff_set(pv_off); rt_kmem_pvoff_set(pv_off);
#ifdef RT_USING_SMART
unsigned long va = KERNEL_VADDR_START; unsigned long va = KERNEL_VADDR_START;
#else
extern unsigned char __start;
unsigned long va = (unsigned long) &__start;
#endif
unsigned long count = (size + ARCH_SECTION_MASK) >> ARCH_SECTION_SHIFT; unsigned long count = (size + ARCH_SECTION_MASK) >> ARCH_SECTION_SHIFT;
unsigned long normal_attr = MMU_MAP_CUSTOM(MMU_AP_KAUN, NORMAL_MEM); unsigned long normal_attr = MMU_MAP_CUSTOM(MMU_AP_KAUN, NORMAL_MEM);

View File

@ -21,15 +21,17 @@ __start:
mrs x0, tpidr_el1 mrs x0, tpidr_el1
cbz x0, .L__cpu_0 /* .L prefix is the local label in ELF */ cbz x0, .L__cpu_0 /* .L prefix is the local label in ELF */
#ifndef RT_AMP_SLAVE
/* cpu id > 0, stop */ /* cpu id > 0, stop */
/* cpu id == 0 will also goto here after returned from entry() if possible */ /* cpu id == 0 will also goto here after returned from entry() if possible */
.L__current_cpu_idle: .L__current_cpu_idle:
wfe wfe
b .L__current_cpu_idle b .L__current_cpu_idle
#endif
.L__cpu_0: .L__cpu_0:
/* set stack before our code, Define stack pointer for current exception level */ /* set stack before our code, Define stack pointer for current exception level */
adr x1, __start adr x1, .el_stack_top
/* set up EL1 */ /* set up EL1 */
mrs x0, CurrentEL /* CurrentEL Register. bit 2, 3. Others reserved */ mrs x0, CurrentEL /* CurrentEL Register. bit 2, 3. Others reserved */
@ -117,9 +119,7 @@ __start:
.L__jump_to_entry: /* jump to C code, should not return */ .L__jump_to_entry: /* jump to C code, should not return */
bl mmu_tcr_init bl mmu_tcr_init
adr x1, __start /* install early page table */ adr x0, .early_mmu_table /* install early page table */
ldr x0, =~0x1fffff
and x0, x1, x0
add x1, x0, #0x1000 add x1, x0, #0x1000
msr ttbr0_el1, x0 msr ttbr0_el1, x0
@ -134,7 +134,7 @@ __start:
mov x3,0 mov x3,0
#endif #endif
ldr x2, =0x40000000 /* map 1G memory for kernel space */ ldr x2, =0x01000000 /* map 16M memory for kernel space */
bl rt_hw_mem_setup_early bl rt_hw_mem_setup_early
ldr x30, =after_mmu_enable /* set LR to after_mmu_enable function, it's a v_addr */ ldr x30, =after_mmu_enable /* set LR to after_mmu_enable function, it's a v_addr */
@ -168,7 +168,7 @@ after_mmu_enable:
mov x0, #1 mov x0, #1
msr spsel, x0 msr spsel, x0
adr x1, __start adr x1, .el_stack_top
mov sp, x1 /* sp_el1 set to _start */ mov sp, x1 /* sp_el1 set to _start */
b rtthread_startup b rtthread_startup
@ -181,7 +181,7 @@ after_mmu_enable:
.global _secondary_cpu_entry .global _secondary_cpu_entry
_secondary_cpu_entry: _secondary_cpu_entry:
bl rt_hw_cpu_id_set bl rt_hw_cpu_id_set
adr x1, __start adr x1, .el_stack_top
/* set up EL1 */ /* set up EL1 */
mrs x0, CurrentEL /* CurrentEL Register. bit 2, 3. Others reserved */ mrs x0, CurrentEL /* CurrentEL Register. bit 2, 3. Others reserved */
@ -246,9 +246,7 @@ _secondary_cpu_entry:
bl mmu_tcr_init bl mmu_tcr_init
adr x1, __start /* GET & setup early page table */ adr x0, .early_mmu_table /* GET & setup early page table */
ldr x0, =~0x1fffff
and x0, x1, x0
add x1, x0, #0x1000 add x1, x0, #0x1000
msr ttbr0_el1, x0 msr ttbr0_el1, x0
@ -288,9 +286,18 @@ after_mmu_enable_cpux:
msr spsel, x0 msr spsel, x0
mrs x0, tpidr_el1 mrs x0, tpidr_el1
/* each cpu init stack is 8k */ /* each cpu init stack is 8k */
adr x1, __start adr x1, .el_stack_top
sub x1, x1, x0, lsl #13 sub x1, x1, x0, lsl #13
mov sp, x1 /* in EL1. Set sp to _start */ mov sp, x1 /* in EL1. Set sp to _start */
b rt_hw_secondary_cpu_bsp_start b rt_hw_secondary_cpu_bsp_start
#endif #endif
.align 12
.early_mmu_table:
.space (4096 * 2)
.align 12
.el_stack:
.space (8192)
.el_stack_top: