[format] cleanup libcpu/aarch64 (#8950)

* [dfs] fixup compiler warning

Signed-off-by: Shell <smokewood@qq.com>

* [format] libcpu/arm64: separate context switching codes

Signed-off-by: Shell <smokewood@qq.com>

* [format] libcpu/arm64: separate vector

Signed-off-by: Shell <smokewood@qq.com>

* [format] libcpu/arm64: separate context_gcc.S

Signed-off-by: Shell <smokewood@qq.com>

* [format] libcpu/arm64: moving headers to include directory

Signed-off-by: Shell <smokewood@qq.com>

* style: replace tab with space

---------

Signed-off-by: Shell <smokewood@qq.com>
This commit is contained in:
Shell 2024-06-28 00:23:09 +08:00 committed by GitHub
parent 3d8c27bcc9
commit 90917cc75a
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
38 changed files with 1064 additions and 804 deletions

View File

@ -494,7 +494,7 @@ int dfs_tmpfs_stat(struct dfs_filesystem *fs,
if (d_file == NULL) if (d_file == NULL)
return -ENOENT; return -ENOENT;
st->st_dev = (rt_device_t)dfs_filesystem_lookup(fs->path); st->st_dev = (dev_t)dfs_filesystem_lookup(fs->path);
st->st_mode = S_IFREG | S_IRUSR | S_IRGRP | S_IROTH | st->st_mode = S_IFREG | S_IRUSR | S_IRGRP | S_IROTH |
S_IWUSR | S_IWGRP | S_IWOTH; S_IWUSR | S_IWGRP | S_IWOTH;
if (d_file->type == TMPFS_TYPE_DIR) if (d_file->type == TMPFS_TYPE_DIR)

View File

@ -14,11 +14,11 @@
#define __ASSEMBLY__ #define __ASSEMBLY__
#endif #endif
#include "rtconfig.h" #include <rtconfig.h>
#include "asm-generic.h" #include <asm-generic.h>
#include "asm-fpu.h" #include <asm-fpu.h>
#include "armv8.h" #include <armv8.h>
#include "lwp_arch.h" #include <lwp_arch.h>
/********************* /*********************
* SPSR BIT * * SPSR BIT *

View File

@ -6,7 +6,14 @@ Import('rtconfig')
cwd = GetCurrentDir() cwd = GetCurrentDir()
src = Glob('*.c') + Glob('*.cpp') + Glob('*.S') src = Glob('*.c') + Glob('*.cpp') + Glob('*.S')
CPPPATH = [cwd] CPPPATH = [cwd, cwd + '/include']
if GetDepend('RT_USING_SMP'):
core_model = 'mp'
else:
core_model = 'up'
src += Glob(core_model + '/*.S')
if GetDepend('RT_USING_OFW') == False: if GetDepend('RT_USING_OFW') == False:
SrcRemove(src, ['setup.c', 'cpu_psci.c', 'psci.c']) SrcRemove(src, ['setup.c', 'cpu_psci.c', 'psci.c'])

View File

@ -1,79 +0,0 @@
/*
* Copyright (c) 2006-2023, RT-Thread Development Team
*
* SPDX-License-Identifier: Apache-2.0
*
* Change Logs:
* Date Author Notes
* 2021-05-18 Jesven the first version
* 2023-07-13 GuEe-GUI append Q16 ~ Q31
*/
.macro SAVE_FPU, reg
STR Q0, [\reg, #-0x10]!
STR Q1, [\reg, #-0x10]!
STR Q2, [\reg, #-0x10]!
STR Q3, [\reg, #-0x10]!
STR Q4, [\reg, #-0x10]!
STR Q5, [\reg, #-0x10]!
STR Q6, [\reg, #-0x10]!
STR Q7, [\reg, #-0x10]!
STR Q8, [\reg, #-0x10]!
STR Q9, [\reg, #-0x10]!
STR Q10, [\reg, #-0x10]!
STR Q11, [\reg, #-0x10]!
STR Q12, [\reg, #-0x10]!
STR Q13, [\reg, #-0x10]!
STR Q14, [\reg, #-0x10]!
STR Q15, [\reg, #-0x10]!
STR Q16, [\reg, #-0x10]!
STR Q17, [\reg, #-0x10]!
STR Q18, [\reg, #-0x10]!
STR Q19, [\reg, #-0x10]!
STR Q20, [\reg, #-0x10]!
STR Q21, [\reg, #-0x10]!
STR Q22, [\reg, #-0x10]!
STR Q23, [\reg, #-0x10]!
STR Q24, [\reg, #-0x10]!
STR Q25, [\reg, #-0x10]!
STR Q26, [\reg, #-0x10]!
STR Q27, [\reg, #-0x10]!
STR Q28, [\reg, #-0x10]!
STR Q29, [\reg, #-0x10]!
STR Q30, [\reg, #-0x10]!
STR Q31, [\reg, #-0x10]!
.endm
.macro RESTORE_FPU, reg
LDR Q31, [\reg], #0x10
LDR Q30, [\reg], #0x10
LDR Q29, [\reg], #0x10
LDR Q28, [\reg], #0x10
LDR Q27, [\reg], #0x10
LDR Q26, [\reg], #0x10
LDR Q25, [\reg], #0x10
LDR Q24, [\reg], #0x10
LDR Q23, [\reg], #0x10
LDR Q22, [\reg], #0x10
LDR Q21, [\reg], #0x10
LDR Q20, [\reg], #0x10
LDR Q19, [\reg], #0x10
LDR Q18, [\reg], #0x10
LDR Q17, [\reg], #0x10
LDR Q16, [\reg], #0x10
LDR Q15, [\reg], #0x10
LDR Q14, [\reg], #0x10
LDR Q13, [\reg], #0x10
LDR Q12, [\reg], #0x10
LDR Q11, [\reg], #0x10
LDR Q10, [\reg], #0x10
LDR Q9, [\reg], #0x10
LDR Q8, [\reg], #0x10
LDR Q7, [\reg], #0x10
LDR Q6, [\reg], #0x10
LDR Q5, [\reg], #0x10
LDR Q4, [\reg], #0x10
LDR Q3, [\reg], #0x10
LDR Q2, [\reg], #0x10
LDR Q1, [\reg], #0x10
LDR Q0, [\reg], #0x10
.endm

View File

@ -5,7 +5,7 @@
* *
* Change Logs: * Change Logs:
* Date Author Notes * Date Author Notes
* 2019-03-29 quanzhao the first version * 2019-03-29 quanzhao the first version
*/ */
#include <rthw.h> #include <rthw.h>
#include <rtdef.h> #include <rtdef.h>

View File

@ -1,642 +0,0 @@
/*
* Copyright (c) 2006-2024, RT-Thread Development Team
*
* SPDX-License-Identifier: Apache-2.0
*
* Change Logs:
* Date Author Notes
* 2021-05-18 Jesven the first version
* 2023-06-24 WangXiaoyao Support backtrace for user thread
* 2024-01-06 Shell Fix barrier on irq_disable/enable
* 2024-01-18 Shell fix implicit dependency of cpuid management
*/
#ifndef __ASSEMBLY__
#define __ASSEMBLY__
#endif
#include "rtconfig.h"
#include "asm-generic.h"
#include "asm-fpu.h"
#include "armv8.h"
#ifndef RT_USING_SMP
.section .data
rt_interrupt_from_thread: .zero 8
rt_interrupt_to_thread: .zero 8
rt_thread_switch_interrupt_flag: .zero 8
#endif
.text
/**
* #ifdef RT_USING_OFW
* void rt_hw_cpu_id_set(long cpuid)
* #else
* void rt_hw_cpu_id_set(void)
* #endif
*/
.type rt_hw_cpu_id_set, @function
rt_hw_cpu_id_set:
#ifdef ARCH_USING_GENERIC_CPUID
.globl rt_hw_cpu_id_set
#else /* !ARCH_USING_GENERIC_CPUID */
.weak rt_hw_cpu_id_set
#endif /* ARCH_USING_GENERIC_CPUID */
#ifndef RT_USING_OFW
mrs x0, mpidr_el1 /* MPIDR_EL1: Multi-Processor Affinity Register */
#ifdef ARCH_ARM_CORTEX_A55
lsr x0, x0, #8
#endif /* ARCH_ARM_CORTEX_A55 */
and x0, x0, #15
#endif /* !RT_USING_OFW */
#ifdef ARCH_USING_HW_THREAD_SELF
msr tpidrro_el0, x0
#else /* !ARCH_USING_HW_THREAD_SELF */
msr tpidr_el1, x0
#endif /* ARCH_USING_HW_THREAD_SELF */
ret
/*
int rt_hw_cpu_id(void)
*/
.type rt_hw_cpu_id, @function
rt_hw_cpu_id:
#ifdef ARCH_USING_GENERIC_CPUID
.globl rt_hw_cpu_id
#else /* !ARCH_USING_GENERIC_CPUID */
.weak rt_hw_cpu_id
#endif /* ARCH_USING_GENERIC_CPUID */
#if RT_CPUS_NR > 1
#ifdef ARCH_USING_GENERIC_CPUID
mrs x0, tpidrro_el0
#else /* !ARCH_USING_GENERIC_CPUID */
mrs x0, tpidr_el1
#endif /* ARCH_USING_GENERIC_CPUID */
#else /* RT_CPUS_NR == 1 */
mov x0, xzr
#endif
ret
/*
void rt_hw_set_process_id(size_t id)
*/
.global rt_hw_set_process_id
rt_hw_set_process_id:
msr CONTEXTIDR_EL1, x0
ret
/*
*enable gtimer
*/
.globl rt_hw_gtimer_enable
rt_hw_gtimer_enable:
MOV X0,#1
MSR CNTP_CTL_EL0,X0
RET
/*
*set gtimer CNTP_TVAL_EL0 value
*/
.globl rt_hw_set_gtimer_val
rt_hw_set_gtimer_val:
MSR CNTP_TVAL_EL0,X0
RET
/*
*get gtimer CNTP_TVAL_EL0 value
*/
.globl rt_hw_get_gtimer_val
rt_hw_get_gtimer_val:
MRS X0,CNTP_TVAL_EL0
RET
.globl rt_hw_get_cntpct_val
rt_hw_get_cntpct_val:
MRS X0, CNTPCT_EL0
RET
/*
*get gtimer frq value
*/
.globl rt_hw_get_gtimer_frq
rt_hw_get_gtimer_frq:
MRS X0,CNTFRQ_EL0
RET
START_POINT(_thread_start)
blr x19
mov x29, #0
blr x20
b . /* never here */
START_POINT_END(_thread_start)
.macro SAVE_CONTEXT
/* Save the entire context. */
SAVE_FPU SP
STP X0, X1, [SP, #-0x10]!
STP X2, X3, [SP, #-0x10]!
STP X4, X5, [SP, #-0x10]!
STP X6, X7, [SP, #-0x10]!
STP X8, X9, [SP, #-0x10]!
STP X10, X11, [SP, #-0x10]!
STP X12, X13, [SP, #-0x10]!
STP X14, X15, [SP, #-0x10]!
STP X16, X17, [SP, #-0x10]!
STP X18, X19, [SP, #-0x10]!
STP X20, X21, [SP, #-0x10]!
STP X22, X23, [SP, #-0x10]!
STP X24, X25, [SP, #-0x10]!
STP X26, X27, [SP, #-0x10]!
STP X28, X29, [SP, #-0x10]!
MRS X28, FPCR
MRS X29, FPSR
STP X28, X29, [SP, #-0x10]!
MRS X29, SP_EL0
STP X29, X30, [SP, #-0x10]!
MRS X3, SPSR_EL1
MRS X2, ELR_EL1
STP X2, X3, [SP, #-0x10]!
MOV X0, SP /* Move SP into X0 for saving. */
.endm
.macro SAVE_CONTEXT_FROM_EL1
/* Save the entire context. */
SAVE_FPU SP
STP X0, X1, [SP, #-0x10]!
STP X2, X3, [SP, #-0x10]!
STP X4, X5, [SP, #-0x10]!
STP X6, X7, [SP, #-0x10]!
STP X8, X9, [SP, #-0x10]!
STP X10, X11, [SP, #-0x10]!
STP X12, X13, [SP, #-0x10]!
STP X14, X15, [SP, #-0x10]!
STP X16, X17, [SP, #-0x10]!
STP X18, X19, [SP, #-0x10]!
STP X20, X21, [SP, #-0x10]!
STP X22, X23, [SP, #-0x10]!
STP X24, X25, [SP, #-0x10]!
STP X26, X27, [SP, #-0x10]!
STP X28, X29, [SP, #-0x10]!
MRS X28, FPCR
MRS X29, FPSR
STP X28, X29, [SP, #-0x10]!
MRS X29, SP_EL0
STP X29, X30, [SP, #-0x10]!
MOV X19, #((3 << 6) | 0x4 | 0x1) /* el1h, disable interrupt */
MOV X18, X30
STP X18, X19, [SP, #-0x10]!
.endm
#ifdef RT_USING_SMP
.macro RESTORE_CONTEXT
/* Set the SP to point to the stack of the task being restored. */
MOV SP, X0
LDP X2, X3, [SP], #0x10 /* SPSR and ELR. */
TST X3, #0x1f
MSR SPSR_EL1, X3
MSR ELR_EL1, X2
LDP X29, X30, [SP], #0x10
MSR SP_EL0, X29
LDP X28, X29, [SP], #0x10
MSR FPCR, X28
MSR FPSR, X29
LDP X28, X29, [SP], #0x10
LDP X26, X27, [SP], #0x10
LDP X24, X25, [SP], #0x10
LDP X22, X23, [SP], #0x10
LDP X20, X21, [SP], #0x10
LDP X18, X19, [SP], #0x10
LDP X16, X17, [SP], #0x10
LDP X14, X15, [SP], #0x10
LDP X12, X13, [SP], #0x10
LDP X10, X11, [SP], #0x10
LDP X8, X9, [SP], #0x10
LDP X6, X7, [SP], #0x10
LDP X4, X5, [SP], #0x10
LDP X2, X3, [SP], #0x10
LDP X0, X1, [SP], #0x10
RESTORE_FPU SP
#ifdef RT_USING_SMART
BEQ arch_ret_to_user
#endif
ERET
.endm
#else
.macro RESTORE_CONTEXT
/* Set the SP to point to the stack of the task being restored. */
MOV SP, X0
#ifdef RT_USING_SMART
BL rt_thread_self
MOV X19, X0
BL lwp_aspace_switch
MOV X0, X19
BL lwp_user_setting_restore
#endif
LDP X2, X3, [SP], #0x10 /* SPSR and ELR. */
TST X3, #0x1f
MSR SPSR_EL1, X3
MSR ELR_EL1, X2
LDP X29, X30, [SP], #0x10
MSR SP_EL0, X29
LDP X28, X29, [SP], #0x10
MSR FPCR, X28
MSR FPSR, X29
LDP X28, X29, [SP], #0x10
LDP X26, X27, [SP], #0x10
LDP X24, X25, [SP], #0x10
LDP X22, X23, [SP], #0x10
LDP X20, X21, [SP], #0x10
LDP X18, X19, [SP], #0x10
LDP X16, X17, [SP], #0x10
LDP X14, X15, [SP], #0x10
LDP X12, X13, [SP], #0x10
LDP X10, X11, [SP], #0x10
LDP X8, X9, [SP], #0x10
LDP X6, X7, [SP], #0x10
LDP X4, X5, [SP], #0x10
LDP X2, X3, [SP], #0x10
LDP X0, X1, [SP], #0x10
RESTORE_FPU SP
#ifdef RT_USING_SMART
BEQ arch_ret_to_user
#endif
ERET
.endm
#endif
.macro RESTORE_CONTEXT_WITHOUT_MMU_SWITCH
/* the SP is already ok */
LDP X2, X3, [SP], #0x10 /* SPSR and ELR. */
TST X3, #0x1f
MSR SPSR_EL1, X3
MSR ELR_EL1, X2
LDP X29, X30, [SP], #0x10
MSR SP_EL0, X29
LDP X28, X29, [SP], #0x10
MSR FPCR, X28
MSR FPSR, X29
LDP X28, X29, [SP], #0x10
LDP X26, X27, [SP], #0x10
LDP X24, X25, [SP], #0x10
LDP X22, X23, [SP], #0x10
LDP X20, X21, [SP], #0x10
LDP X18, X19, [SP], #0x10
LDP X16, X17, [SP], #0x10
LDP X14, X15, [SP], #0x10
LDP X12, X13, [SP], #0x10
LDP X10, X11, [SP], #0x10
LDP X8, X9, [SP], #0x10
LDP X6, X7, [SP], #0x10
LDP X4, X5, [SP], #0x10
LDP X2, X3, [SP], #0x10
LDP X0, X1, [SP], #0x10
RESTORE_FPU SP
#ifdef RT_USING_SMART
BEQ arch_ret_to_user
#endif
ERET
.endm
.macro SAVE_USER_CTX
MRS X1, SPSR_EL1
AND X1, X1, 0xf
CMP X1, XZR
BNE 1f
BL lwp_uthread_ctx_save
LDP X0, X1, [SP]
1:
.endm
.macro RESTORE_USER_CTX, ctx
LDR X1, [\ctx, #CONTEXT_OFFSET_SPSR_EL1]
AND X1, X1, 0x1f
CMP X1, XZR
BNE 1f
BL lwp_uthread_ctx_restore
1:
.endm
#ifdef RT_USING_SMP
#define rt_hw_interrupt_disable rt_hw_local_irq_disable
#define rt_hw_interrupt_enable rt_hw_local_irq_enable
#endif
.text
.global rt_hw_interrupt_is_disabled
rt_hw_interrupt_is_disabled:
MRS X0, DAIF
TST X0, #0xc0
CSET X0, NE
RET
/*
* rt_base_t rt_hw_interrupt_disable();
*/
.globl rt_hw_interrupt_disable
rt_hw_interrupt_disable:
MRS X0, DAIF
AND X0, X0, #0xc0
CMP X0, #0xc0
/* branch if bits not both set(zero) */
BNE 1f
RET
1:
MSR DAIFSet, #3
DSB NSH
ISB
RET
/*
* void rt_hw_interrupt_enable(rt_base_t level);
*/
.globl rt_hw_interrupt_enable
rt_hw_interrupt_enable:
AND X0, X0, #0xc0
CMP X0, #0xc0
/* branch if one of the bits not set(zero) */
BNE 1f
RET
1:
ISB
DSB NSH
AND X0, X0, #0xc0
MRS X1, DAIF
BIC X1, X1, #0xc0
ORR X0, X0, X1
MSR DAIF, X0
RET
.text
#ifdef RT_USING_SMP
/*
* void rt_hw_context_switch_to(rt_uint3 to, struct rt_thread *to_thread);
* X0 --> to (thread stack)
* X1 --> to_thread
*/
.globl rt_hw_context_switch_to
rt_hw_context_switch_to:
LDR X0, [X0]
MOV SP, X0
MOV X0, X1
BL rt_cpus_lock_status_restore
#ifdef RT_USING_SMART
BL rt_thread_self
BL lwp_user_setting_restore
#endif
B rt_hw_context_switch_exit
/*
* void rt_hw_context_switch(rt_uint32 from, rt_uint32 to, struct rt_thread *to_thread);
* X0 --> from (from_thread stack)
* X1 --> to (to_thread stack)
* X2 --> to_thread
*/
.globl rt_hw_context_switch
rt_hw_context_switch:
SAVE_CONTEXT_FROM_EL1
mov X3, SP
str X3, [X0] // store sp in preempted tasks TCB
ldr X0, [X1] // get new task stack pointer
mov SP, X0
/* backup thread self */
mov x19, x2
mov x0, x19
bl rt_cpus_lock_status_restore
#ifdef RT_USING_SMART
mov x0, x19
bl lwp_user_setting_restore
#endif
b rt_hw_context_switch_exit
/*
* void rt_hw_context_switch_interrupt(context, from sp, to sp, tp tcb)
* X0 :interrupt context
* X1 :addr of from_thread's sp
* X2 :addr of to_thread's sp
* X3 :to_thread's tcb
*/
.globl rt_hw_context_switch_interrupt
rt_hw_context_switch_interrupt:
STP X0, X1, [SP, #-0x10]!
STP X2, X3, [SP, #-0x10]!
STP X29, X30, [SP, #-0x10]!
#ifdef RT_USING_SMART
BL rt_thread_self
BL lwp_user_setting_save
#endif
LDP X29, X30, [SP], #0x10
LDP X2, X3, [SP], #0x10
LDP X0, X1, [SP], #0x10
STR X0, [X1]
LDR X0, [X2]
MOV SP, X0
MOV X0, X3
MOV X19, X0
BL rt_cpus_lock_status_restore
MOV X0, X19
#ifdef RT_USING_SMART
BL lwp_user_setting_restore
#endif
B rt_hw_context_switch_exit
.globl vector_fiq
vector_fiq:
B .
START_POINT(vector_irq)
SAVE_CONTEXT
STP X0, X1, [SP, #-0x10]! /* X0 is thread sp */
BL rt_interrupt_enter
LDP X0, X1, [SP]
#ifdef RT_USING_SMART
SAVE_USER_CTX
#endif
BL rt_hw_trap_irq
#ifdef RT_USING_SMART
LDP X0, X1, [SP]
RESTORE_USER_CTX X0
#endif
BL rt_interrupt_leave
LDP X0, X1, [SP], #0x10
BL rt_scheduler_do_irq_switch
B rt_hw_context_switch_exit
START_POINT_END(vector_irq)
.global rt_hw_context_switch_exit
rt_hw_context_switch_exit:
CLREX
MOV X0, SP
RESTORE_CONTEXT
#else /* !RT_USING_SMP */
/*
* void rt_hw_context_switch_to(rt_ubase_t to);
* X0 --> to sp
*/
.globl rt_hw_context_switch_to
rt_hw_context_switch_to:
CLREX
LDR X0, [X0]
RESTORE_CONTEXT
/*
* void rt_hw_context_switch(rt_ubase_t from, rt_ubase_t to);
* X0 --> from sp
* X1 --> to sp
* X2 --> to thread
*/
.globl rt_hw_context_switch
rt_hw_context_switch:
CLREX
SAVE_CONTEXT_FROM_EL1
MOV X2, SP
STR X2, [X0] // store sp in preempted tasks TCB
LDR X0, [X1] // get new task stack pointer
RESTORE_CONTEXT
/*
* void rt_hw_context_switch_interrupt(rt_ubase_t from, rt_ubase_t to, rt_thread_t from_thread, rt_thread_t to_thread);
*/
.globl rt_thread_switch_interrupt_flag
.globl rt_interrupt_from_thread
.globl rt_interrupt_to_thread
.globl rt_hw_context_switch_interrupt
rt_hw_context_switch_interrupt:
CLREX
LDR X6, =rt_thread_switch_interrupt_flag
LDR X7, [X6]
CMP X7, #1
B.EQ _reswitch
LDR X4, =rt_interrupt_from_thread // set rt_interrupt_from_thread
STR X0, [X4]
MOV X7, #1 // set rt_thread_switch_interrupt_flag to 1
STR X7, [X6]
STP X1, X30, [SP, #-0x10]!
#ifdef RT_USING_SMART
MOV X0, X2
BL lwp_user_setting_save
#endif
LDP X1, X30, [SP], #0x10
_reswitch:
LDR X6, =rt_interrupt_to_thread // set rt_interrupt_to_thread
STR X1, [X6]
RET
.text
// -- Exception handlers ----------------------------------
.align 8
.globl vector_fiq
vector_fiq:
SAVE_CONTEXT
STP X0, X1, [SP, #-0x10]!
BL rt_hw_trap_fiq
LDP X0, X1, [SP], #0x10
RESTORE_CONTEXT
.globl rt_interrupt_enter
.globl rt_interrupt_leave
.globl rt_thread_switch_interrupt_flag
.globl rt_interrupt_from_thread
.globl rt_interrupt_to_thread
// -------------------------------------------------------------------
.align 8
.globl vector_irq
vector_irq:
SAVE_CONTEXT
STP X0, X1, [SP, #-0x10]! /* X0 is thread sp */
BL rt_interrupt_enter
BL rt_hw_trap_irq
BL rt_interrupt_leave
LDP X0, X1, [SP], #0x10
// if rt_thread_switch_interrupt_flag set, jump to
// rt_hw_context_switch_interrupt_do and don't return
LDR X1, =rt_thread_switch_interrupt_flag
LDR X2, [X1]
CMP X2, #1
B.NE vector_irq_exit
MOV X2, #0 // clear flag
STR X2, [X1]
LDR X3, =rt_interrupt_from_thread
LDR X4, [X3]
STR x0, [X4] // store sp in preempted tasks's TCB
LDR x3, =rt_interrupt_to_thread
LDR X4, [X3]
LDR x0, [X4] // get new task's stack pointer
RESTORE_CONTEXT
vector_irq_exit:
MOV SP, X0
RESTORE_CONTEXT_WITHOUT_MMU_SWITCH
#endif /* RT_USING_SMP */
// -------------------------------------------------
START_POINT(vector_exception)
SAVE_CONTEXT
STP X0, X1, [SP, #-0x10]!
#ifdef RT_USING_SMART
SAVE_USER_CTX
#endif
BL rt_hw_trap_exception
#ifdef RT_USING_SMART
LDP X0, X1, [SP]
RESTORE_USER_CTX X0
#endif
LDP X0, X1, [SP], #0x10
MOV SP, X0
RESTORE_CONTEXT_WITHOUT_MMU_SWITCH
START_POINT_END(vector_exception)
START_POINT(vector_serror)
SAVE_CONTEXT
#ifdef RT_USING_SMART
SAVE_USER_CTX
#endif
STP X0, X1, [SP, #-0x10]!
BL rt_hw_trap_serror
b .
START_POINT_END(vector_serror)

View File

@ -20,7 +20,6 @@
#define DBG_LVL DBG_INFO #define DBG_LVL DBG_INFO
#include <rtdbg.h> #include <rtdbg.h>
#ifdef RT_USING_SMP #ifdef RT_USING_SMP
#define REPORT_ERR(retval) LOG_E("got error code %d in %s(), %s:%d", (retval), __func__, __FILE__, __LINE__) #define REPORT_ERR(retval) LOG_E("got error code %d in %s(), %s:%d", (retval), __func__, __FILE__, __LINE__)

View File

@ -4,87 +4,248 @@
* SPDX-License-Identifier: Apache-2.0 * SPDX-License-Identifier: Apache-2.0
* *
* Date Author Notes * Date Author Notes
* 2018-10-06 ZhaoXiaowei the first version * 2018-10-06 ZhaoXiaowei the first version (cpu_gcc.S)
* 2024-04-28 Shell add generic spinlock implementation * 2021-05-18 Jesven the first version (context_gcc.S)
* 2024-01-06 Shell Fix barrier on irq_disable/enable
* 2024-01-18 Shell fix implicit dependency of cpuid management
* 2024-03-28 Shell Move cpu codes from context_gcc.S
*/ */
#ifndef __ASSEMBLY__
#define __ASSEMBLY__
#endif
#include "rtconfig.h"
#include "asm-generic.h"
#include "asm-fpu.h"
#include "armv8.h"
#ifdef RT_USING_SMP
#define rt_hw_interrupt_disable rt_hw_local_irq_disable
#define rt_hw_interrupt_enable rt_hw_local_irq_enable
#endif /* RT_USING_SMP */
.text .text
/**
* #ifdef RT_USING_OFW
* void rt_hw_cpu_id_set(long cpuid)
* #else
* void rt_hw_cpu_id_set(void)
* #endif
*/
.type rt_hw_cpu_id_set, @function
rt_hw_cpu_id_set:
#ifdef ARCH_USING_GENERIC_CPUID
.globl rt_hw_cpu_id_set
#else /* !ARCH_USING_GENERIC_CPUID */
.weak rt_hw_cpu_id_set
#endif /* ARCH_USING_GENERIC_CPUID */
#ifndef RT_USING_OFW
mrs x0, mpidr_el1 /* MPIDR_EL1: Multi-Processor Affinity Register */
#ifdef ARCH_ARM_CORTEX_A55
lsr x0, x0, #8
#endif /* ARCH_ARM_CORTEX_A55 */
and x0, x0, #15
#endif /* !RT_USING_OFW */
#ifdef ARCH_USING_HW_THREAD_SELF
msr tpidrro_el0, x0
#else /* !ARCH_USING_HW_THREAD_SELF */
msr tpidr_el1, x0
#endif /* ARCH_USING_HW_THREAD_SELF */
ret
/*
int rt_hw_cpu_id(void)
*/
.type rt_hw_cpu_id, @function
rt_hw_cpu_id:
#ifdef ARCH_USING_GENERIC_CPUID
.globl rt_hw_cpu_id
#else /* !ARCH_USING_GENERIC_CPUID */
.weak rt_hw_cpu_id
#endif /* ARCH_USING_GENERIC_CPUID */
#if RT_CPUS_NR > 1
#ifdef ARCH_USING_GENERIC_CPUID
mrs x0, tpidrro_el0
#else /* !ARCH_USING_GENERIC_CPUID */
mrs x0, tpidr_el1
#endif /* ARCH_USING_GENERIC_CPUID */
#else /* RT_CPUS_NR == 1 */
mov x0, xzr
#endif
ret
/*
void rt_hw_set_process_id(size_t id)
*/
.global rt_hw_set_process_id
rt_hw_set_process_id:
msr CONTEXTIDR_EL1, x0
ret
/*
*enable gtimer
*/
.globl rt_hw_gtimer_enable
rt_hw_gtimer_enable:
mov x0, #1
msr CNTP_CTL_EL0, x0
ret
/*
*set gtimer CNTP_TVAL_EL0 value
*/
.globl rt_hw_set_gtimer_val
rt_hw_set_gtimer_val:
msr CNTP_TVAL_EL0, x0
ret
/*
*get gtimer CNTP_TVAL_EL0 value
*/
.globl rt_hw_get_gtimer_val
rt_hw_get_gtimer_val:
mrs x0, CNTP_TVAL_EL0
ret
.globl rt_hw_get_cntpct_val
rt_hw_get_cntpct_val:
mrs x0, CNTPCT_EL0
ret
/*
*get gtimer frq value
*/
.globl rt_hw_get_gtimer_frq
rt_hw_get_gtimer_frq:
mrs x0, CNTFRQ_EL0
ret
.global rt_hw_interrupt_is_disabled
rt_hw_interrupt_is_disabled:
mrs x0, DAIF
tst x0, #0xc0
cset x0, NE
ret
/*
* rt_base_t rt_hw_interrupt_disable();
*/
.globl rt_hw_interrupt_disable
rt_hw_interrupt_disable:
mrs x0, DAIF
and x0, x0, #0xc0
cmp x0, #0xc0
/* branch if bits not both set(zero) */
bne 1f
ret
1:
msr DAIFSet, #3
dsb nsh
isb
ret
/*
* void rt_hw_interrupt_enable(rt_base_t level);
*/
.globl rt_hw_interrupt_enable
rt_hw_interrupt_enable:
and x0, x0, #0xc0
cmp x0, #0xc0
/* branch if one of the bits not set(zero) */
bne 1f
ret
1:
isb
dsb nsh
and x0, x0, #0xc0
mrs x1, DAIF
bic x1, x1, #0xc0
orr x0, x0, x1
msr DAIF, x0
ret
.globl rt_hw_get_current_el .globl rt_hw_get_current_el
rt_hw_get_current_el: rt_hw_get_current_el:
MRS X0, CurrentEL mrs x0, CurrentEL
CMP X0, 0xc cmp x0, 0xc
B.EQ 3f b.eq 3f
CMP X0, 0x8 cmp x0, 0x8
B.EQ 2f b.eq 2f
CMP X0, 0x4 cmp x0, 0x4
B.EQ 1f b.eq 1f
LDR X0, =0 ldr x0, =0
B 0f b 0f
3: 3:
LDR X0, =3 ldr x0, =3
B 0f b 0f
2: 2:
LDR X0, =2 ldr x0, =2
B 0f b 0f
1: 1:
LDR X0, =1 ldr x0, =1
B 0f b 0f
0: 0:
RET ret
.globl rt_hw_set_current_vbar .globl rt_hw_set_current_vbar
rt_hw_set_current_vbar: rt_hw_set_current_vbar:
MRS X1, CurrentEL mrs x1, CurrentEL
CMP X1, 0xc cmp x1, 0xc
B.EQ 3f b.eq 3f
CMP X1, 0x8 cmp x1, 0x8
B.EQ 2f b.eq 2f
CMP X1, 0x4 cmp x1, 0x4
B.EQ 1f b.eq 1f
B 0f b 0f
3: 3:
MSR VBAR_EL3,X0 msr VBAR_EL3,x0
B 0f b 0f
2: 2:
MSR VBAR_EL2,X0 msr VBAR_EL2,x0
B 0f b 0f
1: 1:
MSR VBAR_EL1,X0 msr VBAR_EL1,x0
B 0f b 0f
0: 0:
RET ret
.globl rt_hw_set_elx_env .globl rt_hw_set_elx_env
rt_hw_set_elx_env: rt_hw_set_elx_env:
MRS X1, CurrentEL mrs x1, CurrentEL
CMP X1, 0xc cmp x1, 0xc
B.EQ 3f b.eq 3f
CMP X1, 0x8 cmp x1, 0x8
B.EQ 2f b.eq 2f
CMP X1, 0x4 cmp x1, 0x4
B.EQ 1f b.eq 1f
B 0f b 0f
3: 3:
MRS X0, SCR_EL3 mrs x0, SCR_EL3
ORR X0, X0, #0xF /* SCR_EL3.NS|IRQ|FIQ|EA */ orr x0, x0, #0xf /* SCR_EL3.NS|IRQ|FIQ|EA */
MSR SCR_EL3, X0 msr SCR_EL3, x0
B 0f b 0f
2: 2:
MRS X0, HCR_EL2 mrs x0, HCR_EL2
ORR X0, X0, #0x38 orr x0, x0, #0x38
MSR HCR_EL2, X0 msr HCR_EL2, x0
B 0f b 0f
1: 1:
B 0f b 0f
0: 0:
RET ret
.globl rt_cpu_vector_set_base .globl rt_cpu_vector_set_base
rt_cpu_vector_set_base: rt_cpu_vector_set_base:
MSR VBAR_EL1,X0 msr VBAR_EL1, x0
RET ret
/** /**

View File

@ -0,0 +1,83 @@
/*
* Copyright (c) 2006-2023, RT-Thread Development Team
*
* SPDX-License-Identifier: Apache-2.0
*
* Change Logs:
* Date Author Notes
* 2021-05-18 Jesven the first version
* 2023-07-13 GuEe-GUI append Q16 ~ Q31
*/
#ifndef __ARM64_ASM_FPU_H__
#define __ARM64_ASM_FPU_H__
.macro SAVE_FPU, reg
str q0, [\reg, #-0x10]!
str q1, [\reg, #-0x10]!
str q2, [\reg, #-0x10]!
str q3, [\reg, #-0x10]!
str q4, [\reg, #-0x10]!
str q5, [\reg, #-0x10]!
str q6, [\reg, #-0x10]!
str q7, [\reg, #-0x10]!
str q8, [\reg, #-0x10]!
str q9, [\reg, #-0x10]!
str q10, [\reg, #-0x10]!
str q11, [\reg, #-0x10]!
str q12, [\reg, #-0x10]!
str q13, [\reg, #-0x10]!
str q14, [\reg, #-0x10]!
str q15, [\reg, #-0x10]!
str q16, [\reg, #-0x10]!
str q17, [\reg, #-0x10]!
str q18, [\reg, #-0x10]!
str q19, [\reg, #-0x10]!
str q20, [\reg, #-0x10]!
str q21, [\reg, #-0x10]!
str q22, [\reg, #-0x10]!
str q23, [\reg, #-0x10]!
str q24, [\reg, #-0x10]!
str q25, [\reg, #-0x10]!
str q26, [\reg, #-0x10]!
str q27, [\reg, #-0x10]!
str q28, [\reg, #-0x10]!
str q29, [\reg, #-0x10]!
str q30, [\reg, #-0x10]!
str q31, [\reg, #-0x10]!
.endm
.macro RESTORE_FPU, reg
ldr q31, [\reg], #0x10
ldr q30, [\reg], #0x10
ldr q29, [\reg], #0x10
ldr q28, [\reg], #0x10
ldr q27, [\reg], #0x10
ldr q26, [\reg], #0x10
ldr q25, [\reg], #0x10
ldr q24, [\reg], #0x10
ldr q23, [\reg], #0x10
ldr q22, [\reg], #0x10
ldr q21, [\reg], #0x10
ldr q20, [\reg], #0x10
ldr q19, [\reg], #0x10
ldr q18, [\reg], #0x10
ldr q17, [\reg], #0x10
ldr q16, [\reg], #0x10
ldr q15, [\reg], #0x10
ldr q14, [\reg], #0x10
ldr q13, [\reg], #0x10
ldr q12, [\reg], #0x10
ldr q11, [\reg], #0x10
ldr q10, [\reg], #0x10
ldr q9, [\reg], #0x10
ldr q8, [\reg], #0x10
ldr q7, [\reg], #0x10
ldr q6, [\reg], #0x10
ldr q5, [\reg], #0x10
ldr q4, [\reg], #0x10
ldr q3, [\reg], #0x10
ldr q2, [\reg], #0x10
ldr q1, [\reg], #0x10
ldr q0, [\reg], #0x10
.endm
#endif /* __ARM64_ASM_FPU_H__ */

View File

@ -13,7 +13,7 @@
/* use to mark a start point where every task start from */ /* use to mark a start point where every task start from */
#define START_POINT(funcname) \ #define START_POINT(funcname) \
.global funcname; \ .global funcname; \
.type funcname, %function; \ .type funcname, %function; \
funcname: \ funcname: \
.cfi_sections .debug_frame, .eh_frame; \ .cfi_sections .debug_frame, .eh_frame; \
.cfi_startproc; \ .cfi_startproc; \

View File

@ -0,0 +1,77 @@
/*
* Copyright (c) 2006-2020, RT-Thread Development Team
*
* SPDX-License-Identifier: Apache-2.0
*
* Change Logs:
* Date Author Notes
* 2024-03-28 Shell Move vector handling codes from context_gcc.S
*/
#ifndef __ARM64_INC_CONTEXT_H__
#define __ARM64_INC_CONTEXT_H__
.macro SAVE_CONTEXT_SWITCH
/* Save the entire context. */
SAVE_FPU sp
stp x0, x1, [sp, #-0x10]!
stp x2, x3, [sp, #-0x10]!
stp x4, x5, [sp, #-0x10]!
stp x6, x7, [sp, #-0x10]!
stp x8, x9, [sp, #-0x10]!
stp x10, x11, [sp, #-0x10]!
stp x12, x13, [sp, #-0x10]!
stp x14, x15, [sp, #-0x10]!
stp x16, x17, [sp, #-0x10]!
stp x18, x19, [sp, #-0x10]!
stp x20, x21, [sp, #-0x10]!
stp x22, x23, [sp, #-0x10]!
stp x24, x25, [sp, #-0x10]!
stp x26, x27, [sp, #-0x10]!
stp x28, x29, [sp, #-0x10]!
mrs x28, fpcr
mrs x29, fpsr
stp x28, x29, [sp, #-0x10]!
mrs x29, sp_el0
stp x29, x30, [sp, #-0x10]!
mov x19, #((3 << 6) | 0x4 | 0x1) /* el1h, disable interrupt */
mov x18, x30
stp x18, x19, [sp, #-0x10]!
.endm
.macro _RESTORE_CONTEXT_SWITCH
ldp x2, x3, [sp], #0x10 /* SPSR and ELR. */
tst x3, #0x1f
msr spsr_el1, x3
msr elr_el1, x2
ldp x29, x30, [sp], #0x10
msr sp_el0, x29
ldp x28, x29, [sp], #0x10
msr fpcr, x28
msr fpsr, x29
ldp x28, x29, [sp], #0x10
ldp x26, x27, [sp], #0x10
ldp x24, x25, [sp], #0x10
ldp x22, x23, [sp], #0x10
ldp x20, x21, [sp], #0x10
ldp x18, x19, [sp], #0x10
ldp x16, x17, [sp], #0x10
ldp x14, x15, [sp], #0x10
ldp x12, x13, [sp], #0x10
ldp x10, x11, [sp], #0x10
ldp x8, x9, [sp], #0x10
ldp x6, x7, [sp], #0x10
ldp x4, x5, [sp], #0x10
ldp x2, x3, [sp], #0x10
ldp x0, x1, [sp], #0x10
RESTORE_FPU sp
#ifdef RT_USING_SMART
beq arch_ret_to_user
#endif
eret
.endm
#endif /* __ARM64_INC_CONTEXT_H__ */

View File

@ -0,0 +1,114 @@
/*
* Copyright (c) 2006-2020, RT-Thread Development Team
*
* SPDX-License-Identifier: Apache-2.0
*
* Change Logs:
* Date Author Notes
* 2024-03-28 Shell Move vector handling codes from context_gcc.S
*/
#ifndef __ARM64_INC_VECTOR_H__
#define __ARM64_INC_VECTOR_H__
#include "asm-generic.h"
#include <rtconfig.h>
#include <asm-fpu.h>
#include <armv8.h>
.macro SAVE_IRQ_CONTEXT
/* Save the entire context. */
SAVE_FPU sp
stp x0, x1, [sp, #-0x10]!
stp x2, x3, [sp, #-0x10]!
stp x4, x5, [sp, #-0x10]!
stp x6, x7, [sp, #-0x10]!
stp x8, x9, [sp, #-0x10]!
stp x10, x11, [sp, #-0x10]!
stp x12, x13, [sp, #-0x10]!
stp x14, x15, [sp, #-0x10]!
stp x16, x17, [sp, #-0x10]!
stp x18, x19, [sp, #-0x10]!
stp x20, x21, [sp, #-0x10]!
stp x22, x23, [sp, #-0x10]!
stp x24, x25, [sp, #-0x10]!
stp x26, x27, [sp, #-0x10]!
stp x28, x29, [sp, #-0x10]!
mrs x28, fpcr
mrs x29, fpsr
stp x28, x29, [sp, #-0x10]!
mrs x29, sp_el0
stp x29, x30, [sp, #-0x10]!
mrs x3, spsr_el1
mrs x2, elr_el1
stp x2, x3, [sp, #-0x10]!
mov x0, sp /* Move SP into X0 for saving. */
.endm
#ifdef RT_USING_SMP
#include "../mp/context_gcc.h"
#else
#include "../up/context_gcc.h"
#endif
.macro RESTORE_IRQ_CONTEXT_WITHOUT_MMU_SWITCH
/* the SP is already ok */
ldp x2, x3, [sp], #0x10 /* SPSR and ELR. */
tst x3, #0x1f
msr spsr_el1, x3
msr elr_el1, x2
ldp x29, x30, [sp], #0x10
msr sp_el0, x29
ldp x28, x29, [sp], #0x10
msr fpcr, x28
msr fpsr, x29
ldp x28, x29, [sp], #0x10
ldp x26, x27, [sp], #0x10
ldp x24, x25, [sp], #0x10
ldp x22, x23, [sp], #0x10
ldp x20, x21, [sp], #0x10
ldp x18, x19, [sp], #0x10
ldp x16, x17, [sp], #0x10
ldp x14, x15, [sp], #0x10
ldp x12, x13, [sp], #0x10
ldp x10, x11, [sp], #0x10
ldp x8, x9, [sp], #0x10
ldp x6, x7, [sp], #0x10
ldp x4, x5, [sp], #0x10
ldp x2, x3, [sp], #0x10
ldp x0, x1, [sp], #0x10
RESTORE_FPU sp
#ifdef RT_USING_SMART
beq arch_ret_to_user
#endif
eret
.endm
.macro SAVE_USER_CTX
mrs x1, spsr_el1
and x1, x1, 0xf
cmp x1, xzr
bne 1f
bl lwp_uthread_ctx_save
ldp x0, x1, [sp]
1:
.endm
.macro RESTORE_USER_CTX, ctx
ldr x1, [\ctx, #CONTEXT_OFFSET_SPSR_EL1]
and x1, x1, 0x1f
cmp x1, xzr
bne 1f
bl lwp_uthread_ctx_restore
1:
.endm
#endif /* __ARM64_INC_VECTOR_H__ */

View File

@ -0,0 +1,109 @@
/*
* Copyright (c) 2006-2024, RT-Thread Development Team
*
* SPDX-License-Identifier: Apache-2.0
*
* Change Logs:
* Date Author Notes
* 2021-05-18 Jesven the first version
* 2023-06-24 Shell Support backtrace for user thread
* 2024-01-06 Shell Fix barrier on irq_disable/enable
* 2024-03-28 Shell Move vector handling codes from context_gcc.S
*/
#ifndef __ASSEMBLY__
#define __ASSEMBLY__
#endif
#include "context_gcc.h"
#include "../include/vector_gcc.h"
#include <rtconfig.h>
#include <asm-generic.h>
#include <asm-fpu.h>
#include <armv8.h>
.section .text
.globl rt_hw_context_switch_to
/*
* void rt_hw_context_switch_to(rt_uint3 to, struct rt_thread *to_thread);
* X0 --> to (thread stack)
* X1 --> to_thread
*/
rt_hw_context_switch_to:
ldr x0, [x0]
mov sp, x0
mov x0, x1
bl rt_cpus_lock_status_restore
#ifdef RT_USING_SMART
bl rt_thread_self
bl lwp_user_setting_restore
#endif
b rt_hw_context_switch_exit
.globl rt_hw_context_switch
/*
* void rt_hw_context_switch(rt_uint32 from, rt_uint32
to, struct rt_thread *to_thread);
* X0 --> from (from_thread stack)
* X1 --> to (to_thread stack)
* X2 --> to_thread
*/
rt_hw_context_switch:
SAVE_CONTEXT_SWITCH
mov x3, sp
str x3, [x0] // store sp in preempted tasks TCB
ldr x0, [x1] // get new task stack pointer
mov sp, x0
/* backup thread self */
mov x19, x2
mov x0, x19
bl rt_cpus_lock_status_restore
#ifdef RT_USING_SMART
mov x0, x19
bl lwp_user_setting_restore
#endif
b rt_hw_context_switch_exit
.globl rt_hw_context_switch_interrupt
/*
* void rt_hw_context_switch_interrupt(context, from sp, to sp, tp tcb)
* X0 :interrupt context
* X1 :addr of from_thread's sp
* X2 :addr of to_thread's sp
* X3 :to_thread's tcb
*/
rt_hw_context_switch_interrupt:
stp x0, x1, [sp, #-0x10]!
stp x2, x3, [sp, #-0x10]!
stp x29, x30, [sp, #-0x10]!
#ifdef RT_USING_SMART
bl rt_thread_self
bl lwp_user_setting_save
#endif
ldp x29, x30, [sp], #0x10
ldp x2, x3, [sp], #0x10
ldp x0, x1, [sp], #0x10
str x0, [x1]
ldr x0, [x2]
mov sp, x0
mov x0, x3
mov x19, x0
bl rt_cpus_lock_status_restore
mov x0, x19
#ifdef RT_USING_SMART
bl lwp_user_setting_restore
#endif
b rt_hw_context_switch_exit
.global rt_hw_context_switch_exit
rt_hw_context_switch_exit:
clrex
mov x0, sp
RESTORE_CONTEXT_SWITCH

View File

@ -0,0 +1,66 @@
/*
* Copyright (c) 2006-2024, RT-Thread Development Team
*
* SPDX-License-Identifier: Apache-2.0
*
* Change Logs:
* Date Author Notes
* 2024-03-25 Shell Trimming unecessary ops and
* improve the performance of ctx switch
*/
#ifndef __ARM64_CONTEXT_H__
#define __ARM64_CONTEXT_H__
#include "../include/context_gcc.h"
#include <rtconfig.h>
#include <asm-generic.h>
#include <asm-fpu.h>
#include <armv8.h>
.macro RESTORE_CONTEXT_SWITCH
/* Set the SP to point to the stack of the task being restored. */
mov sp, x0
_RESTORE_CONTEXT_SWITCH
.endm
.macro RESTORE_IRQ_CONTEXT
/* Set the SP to point to the stack of the task being restored. */
mov sp, x0
ldp x2, x3, [sp], #0x10 /* SPSR and ELR. */
tst x3, #0x1f
msr spsr_el1, x3
msr elr_el1, x2
ldp x29, x30, [sp], #0x10
msr sp_el0, x29
ldp x28, x29, [sp], #0x10
msr fpcr, x28
msr fpsr, x29
ldp x28, x29, [sp], #0x10
ldp x26, x27, [sp], #0x10
ldp x24, x25, [sp], #0x10
ldp x22, x23, [sp], #0x10
ldp x20, x21, [sp], #0x10
ldp x18, x19, [sp], #0x10
ldp x16, x17, [sp], #0x10
ldp x14, x15, [sp], #0x10
ldp x12, x13, [sp], #0x10
ldp x10, x11, [sp], #0x10
ldp x8, x9, [sp], #0x10
ldp x6, x7, [sp], #0x10
ldp x4, x5, [sp], #0x10
ldp x2, x3, [sp], #0x10
ldp x0, x1, [sp], #0x10
RESTORE_FPU sp
#ifdef RT_USING_SMART
beq arch_ret_to_user
#endif
eret
.endm
#endif /* __ARM64_CONTEXT_H__ */

View File

@ -0,0 +1,48 @@
/*
* Copyright (c) 2006-2020, RT-Thread Development Team
*
* SPDX-License-Identifier: Apache-2.0
*
* Change Logs:
* Date Author Notes
* 2024-03-28 Shell Move vector handling codes from context_gcc.S
*/
#ifndef __ASSEMBLY__
#define __ASSEMBLY__
#endif
#include "../include/vector_gcc.h"
#include "context_gcc.h"
.globl vector_fiq
vector_fiq:
b .
START_POINT(vector_irq)
SAVE_IRQ_CONTEXT
stp x0, x1, [sp, #-0x10]! /* X0 is thread sp */
bl rt_interrupt_enter
ldp x0, x1, [sp]
#ifdef RT_USING_SMART
SAVE_USER_CTX
#endif /* RT_USING_SMART */
bl rt_hw_trap_irq
#ifdef RT_USING_SMART
ldp x0, x1, [sp]
RESTORE_USER_CTX x0
#endif /* RT_USING_SMART */
bl rt_interrupt_leave
ldp x0, x1, [sp], #0x10
bl rt_scheduler_do_irq_switch
mov x0, sp
RESTORE_IRQ_CONTEXT
START_POINT_END(vector_irq)

View File

@ -0,0 +1,28 @@
/*
* Copyright (c) 2006-2024, RT-Thread Development Team
*
* SPDX-License-Identifier: Apache-2.0
*
* Change Logs:
* Date Author Notes
* 2021-05-18 Jesven the first version
* 2023-06-24 Shell Support debug frame for user thread
*/
#ifndef __ASSEMBLY__
#define __ASSEMBLY__
#endif
#include "rtconfig.h"
#include "asm-generic.h"
#include "asm-fpu.h"
#include "armv8.h"
.section .text
START_POINT(_thread_start)
blr x19
mov x29, #0
blr x20
b . /* never here */
START_POINT_END(_thread_start)

View File

@ -7,7 +7,7 @@
* Date Author Notes * Date Author Notes
*/ */
.global Reset_Handler .global Reset_Handler
.section ".start", "ax" .section ".start", "ax"
Reset_Handler: Reset_Handler:
nop nop

View File

@ -0,0 +1,97 @@
/*
* Copyright (c) 2006-2020, RT-Thread Development Team
*
* SPDX-License-Identifier: Apache-2.0
*
* Change Logs:
* Date Author Notes
* 2021-05-18 Jesven the first version
* 2023-06-24 Shell Support backtrace for user thread
* 2024-01-06 Shell Fix barrier on irq_disable/enable
* 2024-03-28 Shell Move vector handling codes from context_gcc.S
*/
#ifndef __ASSEMBLY__
#define __ASSEMBLY__
#endif
#include "context_gcc.h"
#include "../include/vector_gcc.h"
#include <rtconfig.h>
#include <asm-generic.h>
#include <asm-fpu.h>
#include <armv8.h>
/**
* Context switch status
*/
.section .bss
rt_interrupt_from_thread:
.quad 0
rt_interrupt_to_thread:
.quad 0
rt_thread_switch_interrupt_flag:
.quad 0
.section .text
/*
* void rt_hw_context_switch_to(rt_ubase_t to);
* X0 --> to sp
*/
.globl rt_hw_context_switch_to
rt_hw_context_switch_to:
clrex
ldr x0, [x0]
RESTORE_CONTEXT_SWITCH
/*
* void rt_hw_context_switch(rt_ubase_t from, rt_ubase_t to);
* X0 --> from sp
* X1 --> to sp
* X2 --> to thread
*/
.globl rt_hw_context_switch
rt_hw_context_switch:
clrex
SAVE_CONTEXT_SWITCH
mov x2, sp
str x2, [x0] // store sp in preempted tasks TCB
ldr x0, [x1] // get new task stack pointer
RESTORE_CONTEXT_SWITCH
/*
* void rt_hw_context_switch_interrupt(rt_ubase_t from, rt_ubase_t to, rt_thread_t from_thread, rt_thread_t to_thread);
*/
.globl rt_thread_switch_interrupt_flag
.globl rt_interrupt_from_thread
.globl rt_interrupt_to_thread
.globl rt_hw_context_switch_interrupt
rt_hw_context_switch_interrupt:
clrex
ldr x6, =rt_thread_switch_interrupt_flag
ldr x7, [x6]
cmp x7, #1
b.eq _reswitch
/* set rt_interrupt_from_thread */
ldr x4, =rt_interrupt_from_thread
str x0, [x4]
/* set rt_thread_switch_interrupt_flag to 1 */
mov x7, #1
str x7, [x6]
stp x1, x30, [sp, #-0x10]!
#ifdef RT_USING_SMART
mov x0, x2
bl lwp_user_setting_save
#endif
ldp x1, x30, [sp], #0x10
_reswitch:
ldr x6, =rt_interrupt_to_thread // set rt_interrupt_to_thread
str x1, [x6]
ret

View File

@ -0,0 +1,79 @@
/*
* Copyright (c) 2006-2024, RT-Thread Development Team
*
* SPDX-License-Identifier: Apache-2.0
*
* Change Logs:
* Date Author Notes
* 2024-03-25 Shell Trimming unecessary ops and
* improve the performance of ctx switch
*/
#ifndef __ARM64_CONTEXT_H__
#define __ARM64_CONTEXT_H__
#include "../include/context_gcc.h"
#include <rtconfig.h>
#include <asm-generic.h>
#include <asm-fpu.h>
#include <armv8.h>
.macro RESTORE_CONTEXT_SWITCH
/* Set the SP to point to the stack of the task being restored. */
mov sp, x0
#ifdef RT_USING_SMART
bl rt_thread_self
mov x19, x0
bl lwp_aspace_switch
mov x0, x19
bl lwp_user_setting_restore
#endif /* RT_USING_SMART */
_RESTORE_CONTEXT_SWITCH
.endm
.macro RESTORE_IRQ_CONTEXT
/* Set the SP to point to the stack of the task being restored. */
MOV SP, X0
#ifdef RT_USING_SMART
BL rt_thread_self
MOV X19, X0
BL lwp_aspace_switch
MOV X0, X19
BL lwp_user_setting_restore
#endif
LDP X2, X3, [SP], #0x10 /* SPSR and ELR. */
TST X3, #0x1f
MSR SPSR_EL1, X3
MSR ELR_EL1, X2
LDP X29, X30, [SP], #0x10
MSR SP_EL0, X29
LDP X28, X29, [SP], #0x10
MSR FPCR, X28
MSR FPSR, X29
LDP X28, X29, [SP], #0x10
LDP X26, X27, [SP], #0x10
LDP X24, X25, [SP], #0x10
LDP X22, X23, [SP], #0x10
LDP X20, X21, [SP], #0x10
LDP X18, X19, [SP], #0x10
LDP X16, X17, [SP], #0x10
LDP X14, X15, [SP], #0x10
LDP X12, X13, [SP], #0x10
LDP X10, X11, [SP], #0x10
LDP X8, X9, [SP], #0x10
LDP X6, X7, [SP], #0x10
LDP X4, X5, [SP], #0x10
LDP X2, X3, [SP], #0x10
LDP X0, X1, [SP], #0x10
RESTORE_FPU SP
#ifdef RT_USING_SMART
BEQ arch_ret_to_user
#endif
ERET
.endm
#endif /* __ARM64_CONTEXT_H__ */

View File

@ -0,0 +1,75 @@
/*
* Copyright (c) 2006-2020, RT-Thread Development Team
*
* SPDX-License-Identifier: Apache-2.0
*
* Change Logs:
* Date Author Notes
* 2024-03-28 Shell Move vector handling codes from context_gcc.S
*/
#ifndef __ASSEMBLY__
#define __ASSEMBLY__
#endif
#include "../include/vector_gcc.h"
#include "context_gcc.h"
#include <rtconfig.h>
#include <asm-generic.h>
#include <asm-fpu.h>
#include <armv8.h>
.section .text
.align 8
.globl vector_fiq
vector_fiq:
SAVE_IRQ_CONTEXT
stp x0, x1, [sp, #-0x10]!
bl rt_hw_trap_fiq
ldp x0, x1, [sp], #0x10
RESTORE_IRQ_CONTEXT
.globl rt_interrupt_enter
.globl rt_interrupt_leave
.globl rt_thread_switch_interrupt_flag
.globl rt_interrupt_from_thread
.globl rt_interrupt_to_thread
.align 8
.globl vector_irq
vector_irq:
SAVE_IRQ_CONTEXT
stp x0, x1, [sp, #-0x10]! /* X0 is thread sp */
bl rt_interrupt_enter
bl rt_hw_trap_irq
bl rt_interrupt_leave
ldp x0, x1, [sp], #0x10
/**
* if rt_thread_switch_interrupt_flag set, jump to
* rt_hw_context_switch_interrupt_do and don't return
*/
ldr x1, =rt_thread_switch_interrupt_flag
ldr x2, [x1]
cmp x2, #1
b.ne vector_irq_exit
mov x2, #0 // clear flag
str x2, [x1]
ldr x3, =rt_interrupt_from_thread
ldr x4, [x3]
str x0, [x4] // store sp in preempted tasks's tcb
ldr x3, =rt_interrupt_to_thread
ldr x4, [x3]
ldr x0, [x4] // get new task's stack pointer
RESTORE_IRQ_CONTEXT
vector_irq_exit:
RESTORE_IRQ_CONTEXT_WITHOUT_MMU_SWITCH

View File

@ -3,12 +3,19 @@
* *
* SPDX-License-Identifier: Apache-2.0 * SPDX-License-Identifier: Apache-2.0
* *
* Change Logs:
* Date Author Notes * Date Author Notes
* 2018-10-06 ZhaoXiaowei the first version * 2018-10-06 ZhaoXiaowei the first version
* 2024-03-28 Shell Move vector handling codes from context_gcc.S
*/ */
.text
#ifndef __ASSEMBLY__
#define __ASSEMBLY__
#endif
#include <rtconfig.h>
.text
.globl system_vectors .globl system_vectors
.globl vector_exception .globl vector_exception
.globl vector_irq .globl vector_irq
@ -21,40 +28,71 @@ system_vectors:
/* Exception from CurrentEL (EL1) with SP_EL0 (SPSEL=1) */ /* Exception from CurrentEL (EL1) with SP_EL0 (SPSEL=1) */
.org (VBAR + 0x00 + 0) .org (VBAR + 0x00 + 0)
B vector_serror /* Synchronous */ b vector_serror /* Synchronous */
.org (VBAR + 0x80 + 0) .org (VBAR + 0x80 + 0)
B vector_serror /* IRQ/vIRQ */ b vector_serror /* IRQ/vIRQ */
.org (VBAR + 0x100 + 0) .org (VBAR + 0x100 + 0)
B vector_serror /* FIQ/vFIQ */ b vector_serror /* FIQ/vFIQ */
.org (VBAR + 0x180 + 0) .org (VBAR + 0x180 + 0)
B vector_serror /* Error/vError */ b vector_serror /* Error/vError */
/* Exception from CurrentEL (EL1) with SP_ELn */ /* Exception from CurrentEL (EL1) with SP_ELn */
.org (VBAR + 0x200 + 0) .org (VBAR + 0x200 + 0)
B vector_exception /* Synchronous */ b vector_exception /* Synchronous */
.org (VBAR + 0x280 + 0) .org (VBAR + 0x280 + 0)
B vector_irq /* IRQ/vIRQ */ b vector_irq /* IRQ/vIRQ */
.org (VBAR + 0x300 + 0) .org (VBAR + 0x300 + 0)
B vector_fiq /* FIQ/vFIQ */ b vector_fiq /* FIQ/vFIQ */
.org (VBAR + 0x380 + 0) .org (VBAR + 0x380 + 0)
B vector_serror b vector_serror
/* Exception from lower EL, aarch64 */ /* Exception from lower EL, aarch64 */
.org (VBAR + 0x400 + 0) .org (VBAR + 0x400 + 0)
B vector_exception b vector_exception
.org (VBAR + 0x480 + 0) .org (VBAR + 0x480 + 0)
B vector_irq b vector_irq
.org (VBAR + 0x500 + 0) .org (VBAR + 0x500 + 0)
B vector_fiq b vector_fiq
.org (VBAR + 0x580 + 0) .org (VBAR + 0x580 + 0)
B vector_serror b vector_serror
/* Exception from lower EL, aarch32 */ /* Exception from lower EL, aarch32 */
.org (VBAR + 0x600 + 0) .org (VBAR + 0x600 + 0)
B vector_serror b vector_serror
.org (VBAR + 0x680 + 0) .org (VBAR + 0x680 + 0)
B vector_serror b vector_serror
.org (VBAR + 0x700 + 0) .org (VBAR + 0x700 + 0)
B vector_serror b vector_serror
.org (VBAR + 0x780 + 0) .org (VBAR + 0x780 + 0)
B vector_serror b vector_serror
#include "include/vector_gcc.h"
START_POINT(vector_exception)
SAVE_IRQ_CONTEXT
stp x0, x1, [sp, #-0x10]!
#ifdef RT_USING_SMART
SAVE_USER_CTX
#endif
bl rt_hw_trap_exception
#ifdef RT_USING_SMART
ldp x0, x1, [sp]
RESTORE_USER_CTX x0
#endif
ldp x0, x1, [sp], #0x10
RESTORE_IRQ_CONTEXT_WITHOUT_MMU_SWITCH
START_POINT_END(vector_exception)
START_POINT(vector_serror)
SAVE_IRQ_CONTEXT
#ifdef RT_USING_SMART
SAVE_USER_CTX
#endif
stp x0, x1, [sp, #-0x10]!
bl rt_hw_trap_serror
b .
START_POINT_END(vector_serror)