This commit is contained in:
2024-08-05 20:57:09 +08:00
commit 46d9ee7795
3020 changed files with 1725767 additions and 0 deletions

View File

@@ -0,0 +1,265 @@
/*
* Copyright (c) 2006-2023, RT-Thread Development Team
*
* SPDX-License-Identifier: Apache-2.0
*
* Change Logs:
* Date Author Notes
* 2021-05-18 Jesven first version
* 2023-07-16 Shell Move part of the codes to C from asm in signal handling
* 2023-10-16 Shell Support a new backtrace framework
* 2023-08-03 Shell Support of syscall restart (SA_RESTART)
*/
#include <armv8.h>
#include <rthw.h>
#include <rtthread.h>
#include <stdlib.h>
#include <string.h>
#include <lwp_internal.h>
#ifdef ARCH_MM_MMU
#define DBG_TAG "lwp.arch"
#define DBG_LVL DBG_INFO
#include <rtdbg.h>
#include <lwp_arch.h>
#include <lwp_user_mm.h>
extern size_t MMUTable[];
int arch_user_space_init(struct rt_lwp *lwp)
{
size_t *mmu_table;
mmu_table = rt_hw_mmu_pgtbl_create();
if (mmu_table)
{
lwp->end_heap = USER_HEAP_VADDR;
lwp->aspace = rt_aspace_create(
(void *)USER_VADDR_START, USER_VADDR_TOP - USER_VADDR_START, mmu_table);
if (!lwp->aspace)
{
return -RT_ERROR;
}
}
else
{
return -RT_ENOMEM;
}
return 0;
}
void *arch_kernel_mmu_table_get(void)
{
return (void *)NULL;
}
void arch_user_space_free(struct rt_lwp *lwp)
{
if (lwp)
{
RT_ASSERT(lwp->aspace);
void *pgtbl = lwp->aspace->page_table;
rt_aspace_delete(lwp->aspace);
/* must be freed after aspace delete, pgtbl is required for unmap */
rt_pages_free(pgtbl, 0);
lwp->aspace = NULL;
}
else
{
LOG_W("%s: NULL lwp as parameter", __func__);
RT_ASSERT(0);
}
}
int arch_expand_user_stack(void *addr)
{
int ret = 0;
size_t stack_addr = (size_t)addr;
stack_addr &= ~ARCH_PAGE_MASK;
if ((stack_addr >= (size_t)USER_STACK_VSTART) &&
(stack_addr < (size_t)USER_STACK_VEND))
{
void *map =
lwp_map_user(lwp_self(), (void *)stack_addr, ARCH_PAGE_SIZE, 0);
if (map || lwp_user_accessable(addr, 1))
{
ret = 1;
}
}
return ret;
}
#endif
int arch_set_thread_context(void (*exit)(void), void *new_thread_stack,
void *user_stack, void **thread_sp)
{
struct rt_hw_exp_stack *syscall_frame;
struct rt_hw_exp_stack *thread_frame;
struct rt_hw_exp_stack *ori_syscall = rt_thread_self()->user_ctx.ctx;
RT_ASSERT(ori_syscall != RT_NULL);
thread_frame = (void *)((long)new_thread_stack - sizeof(struct rt_hw_exp_stack));
syscall_frame = (void *)((long)new_thread_stack - 2 * sizeof(struct rt_hw_exp_stack));
memcpy(syscall_frame, ori_syscall, sizeof(*syscall_frame));
syscall_frame->sp_el0 = (long)user_stack;
syscall_frame->x0 = 0;
thread_frame->cpsr = ((3 << 6) | 0x4 | 0x1);
thread_frame->pc = (long)exit;
thread_frame->x0 = 0;
*thread_sp = syscall_frame;
return 0;
}
#define ALGIN_BYTES (16)
/* the layout is part of ABI, dont change it */
struct signal_ucontext
{
rt_int64_t sigreturn;
lwp_sigset_t save_sigmask;
siginfo_t si;
rt_align(ALGIN_BYTES)
struct rt_hw_exp_stack frame;
};
RT_STATIC_ASSERT(abi_offset_compatible, offsetof(struct signal_ucontext, si) == UCTX_ABI_OFFSET_TO_SI);
void *arch_signal_ucontext_get_frame(struct signal_ucontext *uctx)
{
return &uctx->frame;
}
/* internal used only */
void arch_syscall_prepare_signal(rt_base_t rc, struct rt_hw_exp_stack *exp_frame)
{
long x0 = exp_frame->x0;
exp_frame->x0 = rc;
exp_frame->x7 = x0;
return ;
}
void arch_syscall_restart(void *sp, void *ksp);
void arch_syscall_set_errno(void *eframe, int expected, int code)
{
struct rt_hw_exp_stack *exp_frame = eframe;
if (exp_frame->x0 == -expected)
exp_frame->x0 = -code;
return ;
}
void arch_signal_check_erestart(void *eframe, void *ksp)
{
struct rt_hw_exp_stack *exp_frame = eframe;
long rc = exp_frame->x0;
long sys_id = exp_frame->x8;
(void)sys_id;
if (rc == -ERESTART)
{
LOG_D("%s(rc=%ld,sys_id=%ld,pid=%d)", __func__, rc, sys_id, lwp_self()->pid);
LOG_D("%s: restart rc = %ld", lwp_get_syscall_name(sys_id), rc);
exp_frame->x0 = exp_frame->x7;
arch_syscall_restart(eframe, ksp);
}
return ;
}
static void arch_signal_post_action(struct signal_ucontext *new_sp, rt_base_t kernel_sp)
{
arch_signal_check_erestart(&new_sp->frame, (void *)kernel_sp);
return ;
}
void *arch_signal_ucontext_restore(rt_base_t user_sp, rt_base_t kernel_sp)
{
struct signal_ucontext *new_sp;
new_sp = (void *)user_sp;
if (lwp_user_accessable(new_sp, sizeof(*new_sp)))
{
lwp_thread_signal_mask(rt_thread_self(), LWP_SIG_MASK_CMD_SET_MASK, &new_sp->save_sigmask, RT_NULL);
arch_signal_post_action(new_sp, kernel_sp);
}
else
{
LOG_I("User frame corrupted during signal handling\nexiting...");
sys_exit_group(EXIT_FAILURE);
}
return (char *)&new_sp->frame + sizeof(struct rt_hw_exp_stack);
}
void *arch_signal_ucontext_save(rt_base_t user_sp, siginfo_t *psiginfo,
struct rt_hw_exp_stack *exp_frame,
lwp_sigset_t *save_sig_mask)
{
struct signal_ucontext *new_sp;
new_sp = (void *)((user_sp - sizeof(struct signal_ucontext)) & ~0xf);
if (lwp_user_accessable(new_sp, sizeof(*new_sp)))
{
/* push psiginfo */
if (psiginfo)
{
lwp_memcpy(&new_sp->si, psiginfo, sizeof(*psiginfo));
}
/* exp frame is already aligned as AAPCS64 required */
lwp_memcpy(&new_sp->frame, exp_frame, sizeof(*exp_frame));
/* copy the save_sig_mask */
lwp_memcpy(&new_sp->save_sigmask, save_sig_mask, sizeof(lwp_sigset_t));
/* copy lwp_sigreturn */
const size_t lwp_sigreturn_bytes = 8;
extern void lwp_sigreturn(void);
/* -> ensure that the sigreturn start at the outer most boundary */
lwp_memcpy(&new_sp->sigreturn, &lwp_sigreturn, lwp_sigreturn_bytes);
}
else
{
LOG_I("%s: User stack overflow", __func__);
sys_exit_group(EXIT_FAILURE);
}
return new_sp;
}
int arch_backtrace_uthread(rt_thread_t thread)
{
struct rt_hw_backtrace_frame frame;
struct rt_hw_exp_stack *stack;
if (thread && thread->lwp)
{
stack = thread->user_ctx.ctx;
if ((long)stack > (unsigned long)thread->stack_addr
&& (long)stack < (unsigned long)thread->stack_addr + thread->stack_size)
{
frame.pc = stack->pc;
frame.fp = stack->x29;
lwp_backtrace_frame(thread, &frame);
return 0;
}
else
return -1;
}
return -1;
}

View File

@@ -0,0 +1,76 @@
/*
* Copyright (c) 2006-2021, RT-Thread Development Team
*
* SPDX-License-Identifier: Apache-2.0
*
* Change Logs:
* Date Author Notes
* 2021-05-18 Jesven first version
*/
#ifndef LWP_ARCH_H__
#define LWP_ARCH_H__
#include <rtconfig.h>
#ifdef ARCH_MM_MMU
#define USER_VADDR_TOP 0x0001000000000000UL
#define USER_HEAP_VADDR (0x0000ffff40000000UL)
#define USER_HEAP_VEND USER_STACK_VSTART
#define USER_STACK_VSTART 0x0000ffff70000000UL
#define USER_STACK_VEND (USER_STACK_VSTART + 0x10000000)
#define USER_ARG_VADDR USER_STACK_VEND
#define LDSO_LOAD_VADDR 0x60000000UL
#define USER_VADDR_START 0x00200000UL
#define USER_LOAD_VADDR USER_VADDR_START
#define UCTX_ABI_OFFSET_TO_SI 16
#ifndef __ASSEMBLY__
#include <lwp.h>
#include <lwp_arch_comm.h>
#ifdef __cplusplus
extern "C" {
#endif
unsigned long rt_hw_ffz(unsigned long x);
rt_inline void icache_invalid_all(void)
{
__asm__ volatile ("ic ialluis\n\tisb sy":::"memory");
}
/**
* @brief Save signal-related context to user stack
*
* @param user_sp the current sp of user
* @param exp_frame exception frame to resume former execution
* @param psiginfo pointer to the siginfo
* @param elr pc of former execution
* @param spsr program status of former execution
* @return void* the new user sp
*/
void *arch_signal_ucontext_save(rt_base_t user_sp, siginfo_t *psiginfo,
struct rt_hw_exp_stack *exp_frame,
lwp_sigset_t *save_sig_mask);
/**
* @brief Restore the signal mask after return
*
* @param user_sp sp of user
* @return void*
*/
void *arch_signal_ucontext_restore(rt_base_t user_sp, rt_base_t kernel_sp);
void arch_syscall_restart(void *sp, void *ksp);
#ifdef __cplusplus
}
#endif
#endif /* __ASSEMBLY__ */
#endif /* ARCH_MM_MMU */
#endif /*LWP_ARCH_H__*/

View File

@@ -0,0 +1,588 @@
/*
* Copyright (c) 2006-2023, RT-Thread Development Team
*
* SPDX-License-Identifier: Apache-2.0
*
* Change Logs:
* Date Author Notes
* 2021-05-18 Jesven first version
* 2023-07-16 Shell Move part of the codes to C from asm in signal handling
* 2023-08-03 Shell Support of syscall restart (SA_RESTART)
*/
#ifndef __ASSEMBLY__
#define __ASSEMBLY__
#endif
#include <rtconfig.h>
#include <asm-generic.h>
#include <asm-fpu.h>
#include <armv8.h>
#include <lwp_arch.h>
/*********************
* SPSR BIT *
*********************/
#define SPSR_Mode(v) ((v) << 0)
#define SPSR_A64 (0 << 4)
#define SPSR_RESEVRED_5 (0 << 5)
#define SPSR_FIQ_MASKED(v) ((v) << 6)
#define SPSR_IRQ_MASKED(v) ((v) << 7)
#define SPSR_SERROR_MASKED(v) ((v) << 8)
#define SPSR_D_MASKED(v) ((v) << 9)
#define SPSR_RESEVRED_10_19 (0 << 10)
#define SPSR_IL(v) ((v) << 20)
#define SPSR_SS(v) ((v) << 21)
#define SPSR_RESEVRED_22_27 (0 << 22)
#define SPSR_V(v) ((v) << 28)
#define SPSR_C(v) ((v) << 29)
#define SPSR_Z(v) ((v) << 30)
#define SPSR_N(v) ((v) << 31)
/**************************************************/
.text
/*
* void arch_start_umode(args, text, ustack, kstack);
*/
.global arch_start_umode
.type arch_start_umode, % function
arch_start_umode:
mov sp, x3
mov x4, #(SPSR_Mode(0) | SPSR_A64)
msr daifset, #3
dsb sy
mrs x30, sp_el0
/* user stack top */
msr sp_el0, x2
mov x3, x2
msr spsr_el1, x4
msr elr_el1, x1
eret
/*
* void arch_crt_start_umode(args, text, ustack, kstack);
*/
.global arch_crt_start_umode
.type arch_crt_start_umode, % function
arch_crt_start_umode:
sub x4, x2, #0x10
adr x2, lwp_thread_return
ldr x5, [x2]
str x5, [x4]
ldr x5, [x2, #4]
str x5, [x4, #4]
ldr x5, [x2, #8]
str x5, [x4, #8]
mov x5, x4
dc cvau, x5
add x5, x5, #8
dc cvau, x5
dsb sy
ic ialluis
dsb sy
msr sp_el0, x4
mov sp, x3
mov x4, #(SPSR_Mode(0) | SPSR_A64)
msr daifset, #3
dsb sy
mrs x30, sp_el0
msr spsr_el1, x4
msr elr_el1, x1
eret
.global arch_get_user_sp
arch_get_user_sp:
mrs x0, sp_el0
ret
.global arch_fork_exit
.global arch_clone_exit
arch_fork_exit:
arch_clone_exit:
b arch_syscall_exit
/*
void lwp_exec_user(void *args, void *kernel_stack, void *user_entry)
*/
.global lwp_exec_user
lwp_exec_user:
mov sp, x1
mov x4, #(SPSR_Mode(0) | SPSR_A64)
ldr x3, =0x0000ffff80000000
msr daifset, #3
msr spsr_el1, x4
msr elr_el1, x2
eret
/*
* void SVC_Handler(regs);
* since this routine reset the SP, we take it as a start point
*/
START_POINT(SVC_Handler)
/* x0 is initial sp */
mov sp, x0
msr daifclr, #3 /* enable interrupt */
bl rt_thread_self
bl lwp_user_setting_save
ldp x8, x9, [sp, #(CONTEXT_OFFSET_X8)]
and x0, x8, #0xf000
cmp x0, #0xe000
beq arch_signal_quit
cmp x0, #0xf000
beq ret_from_user
uxtb x0, w8
bl lwp_get_sys_api
cmp x0, xzr
mov x30, x0
beq arch_syscall_exit
ldp x0, x1, [sp, #(CONTEXT_OFFSET_X0)]
ldp x2, x3, [sp, #(CONTEXT_OFFSET_X2)]
ldp x4, x5, [sp, #(CONTEXT_OFFSET_X4)]
ldp x6, x7, [sp, #(CONTEXT_OFFSET_X6)]
blr x30
/* jump explictly, make this code position independant */
b arch_syscall_exit
START_POINT_END(SVC_Handler)
.global arch_syscall_exit
arch_syscall_exit:
/**
* @brief back up former x0 which is required to restart syscall, then setup
* syscall return value in stack frame
*/
mov x1, sp
bl arch_syscall_prepare_signal
msr daifset, #3
ldp x2, x3, [sp], #0x10 /* SPSR and ELR. */
msr spsr_el1, x3
msr elr_el1, x2
ldp x29, x30, [sp], #0x10
msr sp_el0, x29
ldp x28, x29, [sp], #0x10
msr fpcr, x28
msr fpsr, x29
ldp x28, x29, [sp], #0x10
ldp x26, x27, [sp], #0x10
ldp x24, x25, [sp], #0x10
ldp x22, x23, [sp], #0x10
ldp x20, x21, [sp], #0x10
ldp x18, x19, [sp], #0x10
ldp x16, x17, [sp], #0x10
ldp x14, x15, [sp], #0x10
ldp x12, x13, [sp], #0x10
ldp x10, x11, [sp], #0x10
ldp x8, x9, [sp], #0x10
ldp x6, x7, [sp], #0x10
ldp x4, x5, [sp], #0x10
ldp x2, x3, [sp], #0x10
ldp x0, x1, [sp], #0x10
RESTORE_FPU sp
/* the sp is reset to the outer most level, irq and fiq are disabled */
START_POINT(arch_ret_to_user)
msr daifset, #3
/* save exception frame */
SAVE_FPU sp
stp x0, x1, [sp, #-0x10]!
stp x2, x3, [sp, #-0x10]!
stp x4, x5, [sp, #-0x10]!
stp x6, x7, [sp, #-0x10]!
stp x8, x9, [sp, #-0x10]!
stp x10, x11, [sp, #-0x10]!
stp x12, x13, [sp, #-0x10]!
stp x14, x15, [sp, #-0x10]!
stp x16, x17, [sp, #-0x10]!
stp x18, x19, [sp, #-0x10]!
stp x20, x21, [sp, #-0x10]!
stp x22, x23, [sp, #-0x10]!
stp x24, x25, [sp, #-0x10]!
stp x26, x27, [sp, #-0x10]!
stp x28, x29, [sp, #-0x10]!
mrs x0, fpcr
mrs x1, fpsr
stp x0, x1, [sp, #-0x10]!
stp x29, x30, [sp, #-0x10]!
/* pre-action */
bl lwp_check_debug
bl lwp_check_exit_request
cbz w0, 1f
/* exit on event */
msr daifclr, #3
mov x0, xzr
b sys_exit
1:
/* check if dbg ops exist */
ldr x0, =rt_dbg_ops
ldr x0, [x0]
cbz x0, 3f
bl dbg_thread_in_debug
mov x1, #(1 << 21)
mrs x2, spsr_el1
cbz w0, 2f
orr x2, x2, x1
msr spsr_el1, x2
b 3f
2:
bic x2, x2, x1
msr spsr_el1, x2
3:
/**
* push 2 dummy words to simulate a exception frame of interrupt
* Note: in kernel state, the context switch dont saved the context
*/
mrs x0, spsr_el1
mrs x1, elr_el1
stp x1, x0, [sp, #-0x10]!
mov x0, sp
msr daifclr, #3
bl lwp_thread_signal_catch
msr daifset, #3
ldp x1, x0, [sp], #0x10
msr spsr_el1, x0
msr elr_el1, x1
/* check debug */
/* restore exception frame */
ldp x29, x30, [sp], #0x10
ldp x0, x1, [sp], #0x10
msr fpcr, x0
msr fpsr, x1
ldp x28, x29, [sp], #0x10
ldp x26, x27, [sp], #0x10
ldp x24, x25, [sp], #0x10
ldp x22, x23, [sp], #0x10
ldp x20, x21, [sp], #0x10
ldp x18, x19, [sp], #0x10
ldp x16, x17, [sp], #0x10
ldp x14, x15, [sp], #0x10
ldp x12, x13, [sp], #0x10
ldp x10, x11, [sp], #0x10
ldp x8, x9, [sp], #0x10
ldp x6, x7, [sp], #0x10
ldp x4, x5, [sp], #0x10
ldp x2, x3, [sp], #0x10
ldp x0, x1, [sp], #0x10
RESTORE_FPU sp
stp x0, x1, [sp, #-0x10]!
ldr x0, =rt_dbg_ops
ldr x0, [x0]
cmp x0, xzr
ldp x0, x1, [sp], #0x10
beq 1f
/* save */
SAVE_FPU sp
stp x0, x1, [sp, #-0x10]!
stp x2, x3, [sp, #-0x10]!
stp x4, x5, [sp, #-0x10]!
stp x6, x7, [sp, #-0x10]!
stp x8, x9, [sp, #-0x10]!
stp x10, x11, [sp, #-0x10]!
stp x12, x13, [sp, #-0x10]!
stp x14, x15, [sp, #-0x10]!
stp x16, x17, [sp, #-0x10]!
stp x18, x19, [sp, #-0x10]!
stp x20, x21, [sp, #-0x10]!
stp x22, x23, [sp, #-0x10]!
stp x24, x25, [sp, #-0x10]!
stp x26, x27, [sp, #-0x10]!
stp x28, x29, [sp, #-0x10]!
mrs x0, fpcr
mrs x1, fpsr
stp x0, x1, [sp, #-0x10]!
stp x29, x30, [sp, #-0x10]!
mrs x0, elr_el1
bl dbg_attach_req
/* restore */
ldp x29, x30, [sp], #0x10
ldp x0, x1, [sp], #0x10
msr fpcr, x0
msr fpsr, x1
ldp x28, x29, [sp], #0x10
ldp x26, x27, [sp], #0x10
ldp x24, x25, [sp], #0x10
ldp x22, x23, [sp], #0x10
ldp x20, x21, [sp], #0x10
ldp x18, x19, [sp], #0x10
ldp x16, x17, [sp], #0x10
ldp x14, x15, [sp], #0x10
ldp x12, x13, [sp], #0x10
ldp x10, x11, [sp], #0x10
ldp x8, x9, [sp], #0x10
ldp x6, x7, [sp], #0x10
ldp x4, x5, [sp], #0x10
ldp x2, x3, [sp], #0x10
ldp x0, x1, [sp], #0x10
RESTORE_FPU sp
1:
eret
START_POINT_END(arch_ret_to_user)
.global lwp_check_debug
lwp_check_debug:
ldr x0, =rt_dbg_ops
ldr x0, [x0]
cbnz x0, 1f
ret
1:
stp x29, x30, [sp, #-0x10]!
bl dbg_check_suspend
cbz w0, lwp_check_debug_quit
mrs x2, sp_el0
sub x2, x2, #0x10
mov x3, x2
msr sp_el0, x2
ldr x0, =lwp_debugreturn
ldr w1, [x0]
str w1, [x2]
ldr w1, [x0, #4]
str w1, [x2, #4]
dc cvau, x2
add x2, x2, #4
dc cvau, x2
dsb sy
isb sy
ic ialluis
isb sy
mrs x0, elr_el1
mrs x1, spsr_el1
stp x0, x1, [sp, #-0x10]!
msr elr_el1, x3 /* lwp_debugreturn */
mov x1, #(SPSR_Mode(0) | SPSR_A64)
orr x1, x1, #(1 << 21)
msr spsr_el1, x1
eret
ret_from_user:
/* sp_el0 += 16 for drop ins lwp_debugreturn */
mrs x0, sp_el0
add x0, x0, #0x10
msr sp_el0, x0
/* now is el1, sp is pos(empty) - sizeof(context) */
mov x0, sp
add x0, x0, #0x220
mov sp, x0
ldp x0, x1, [sp], #0x10 /* x1 is origin spsr_el1 */
msr elr_el1, x0 /* x0 is origin elr_el1 */
msr spsr_el1, x1
lwp_check_debug_quit:
ldp x29, x30, [sp], #0x10
ret
.global arch_syscall_restart
arch_syscall_restart:
msr daifset, 3
mov sp, x1
/* drop exception frame in user stack */
msr sp_el0, x0
/* restore previous exception frame */
msr spsel, #0
ldp x2, x3, [sp], #0x10
msr elr_el1, x2
msr spsr_el1, x3
ldp x29, x30, [sp], #0x10
ldp x28, x29, [sp], #0x10
msr fpcr, x28
msr fpsr, x29
ldp x28, x29, [sp], #0x10
ldp x26, x27, [sp], #0x10
ldp x24, x25, [sp], #0x10
ldp x22, x23, [sp], #0x10
ldp x20, x21, [sp], #0x10
ldp x18, x19, [sp], #0x10
ldp x16, x17, [sp], #0x10
ldp x14, x15, [sp], #0x10
ldp x12, x13, [sp], #0x10
ldp x10, x11, [sp], #0x10
ldp x8, x9, [sp], #0x10
ldp x6, x7, [sp], #0x10
ldp x4, x5, [sp], #0x10
ldp x2, x3, [sp], #0x10
ldp x0, x1, [sp], #0x10
RESTORE_FPU sp
msr spsel, #1
b vector_exception
arch_signal_quit:
/* drop current exception frame */
add sp, sp, #CONTEXT_SIZE
mov x1, sp
mrs x0, sp_el0
bl arch_signal_ucontext_restore
add x0, x0, #-CONTEXT_SIZE
msr sp_el0, x0
/**
* Note: Since we will reset spsr, but the reschedule will
* corrupt the spsr, we diable irq for a short period here
*/
msr daifset, #3
/* restore previous exception frame */
msr spsel, #0
ldp x2, x3, [sp], #0x10
msr elr_el1, x2
msr spsr_el1, x3
ldp x29, x30, [sp], #0x10
ldp x28, x29, [sp], #0x10
msr fpcr, x28
msr fpsr, x29
ldp x28, x29, [sp], #0x10
ldp x26, x27, [sp], #0x10
ldp x24, x25, [sp], #0x10
ldp x22, x23, [sp], #0x10
ldp x20, x21, [sp], #0x10
ldp x18, x19, [sp], #0x10
ldp x16, x17, [sp], #0x10
ldp x14, x15, [sp], #0x10
ldp x12, x13, [sp], #0x10
ldp x10, x11, [sp], #0x10
ldp x8, x9, [sp], #0x10
ldp x6, x7, [sp], #0x10
ldp x4, x5, [sp], #0x10
ldp x2, x3, [sp], #0x10
ldp x0, x1, [sp], #0x10
RESTORE_FPU sp
msr spsel, #1
b arch_ret_to_user
/**
* rt_noreturn
* void arch_thread_signal_enter(
* int signo, -> x0
* siginfo_t *psiginfo, -> x1
* void *exp_frame, -> x2
* void *entry_uaddr, -> x3
* lwp_sigset_t *save_sig_mask, -> x4
* )
*/
.global arch_thread_signal_enter
arch_thread_signal_enter:
mov x19, x0
mov x20, x2 /* exp_frame */
mov x21, x3
/**
* move exception frame to user stack
*/
mrs x0, sp_el0
mov x3, x4
/* arch_signal_ucontext_save(user_sp, psiginfo, exp_frame, save_sig_mask); */
bl arch_signal_ucontext_save
mov x22, x0
/* get and saved pointer to uframe */
bl arch_signal_ucontext_get_frame
mov x2, x0
mov x0, x22
dc cvau, x0
dsb sy
ic ialluis
dsb sy
/**
* Brief: Prepare the environment for signal handler
*/
/**
* reset the cpsr
* and drop exp frame on kernel stack, reset kernel sp
*
* Note: Since we will reset spsr, but the reschedule will
* corrupt the spsr, we diable irq for a short period here
*/
msr daifset, #3
ldr x1, [x20, #CONTEXT_OFFSET_SPSR_EL1]
msr spsr_el1, x1
add sp, x20, #CONTEXT_SIZE
/** reset user sp */
msr sp_el0, x0
/** set the return address to the sigreturn */
mov x30, x0
cbnz x21, 1f
mov x21, x30
1:
/** set the entry address of signal handler */
msr elr_el1, x21
/* siginfo is above the return address */
add x1, x30, UCTX_ABI_OFFSET_TO_SI
/* uframe is saved in x2 */
mov x0, x19
/**
* handler(signo, psi, ucontext);
*
*/
eret
lwp_debugreturn:
mov x8, 0xf000
svc #0
.global lwp_sigreturn
lwp_sigreturn:
mov x8, #0xe000
svc #0
lwp_thread_return:
mov x0, xzr
mov x8, #0x01
svc #0
.globl arch_get_tidr
arch_get_tidr:
mrs x0, tpidr_el0
ret
.global arch_set_thread_area
arch_set_thread_area:
.globl arch_set_tidr
arch_set_tidr:
msr tpidr_el0, x0
ret