This commit is contained in:
2024-08-05 20:57:09 +08:00
commit 46d9ee7795
3020 changed files with 1725767 additions and 0 deletions

View File

@@ -0,0 +1,28 @@
#include <rtthread.h>
#include <stdint.h>
#include <string.h>
#include <elf.h>
#ifdef ARCH_MM_MMU
#include <mmu.h>
#include <page.h>
#endif
#define Elf_Word Elf64_Word
#define Elf_Addr Elf64_Addr
#define Elf_Half Elf64_Half
#define Elf_Ehdr Elf64_Ehdr #define Elf_Phdr Elf64_Phdr
#define Elf_Shdr Elf64_Shdr
typedef struct
{
Elf_Word st_name;
Elf_Addr st_value;
Elf_Word st_size;
unsigned char st_info;
unsigned char st_other;
Elf_Half st_shndx;
} Elf_sym;
void arch_elf_reloc(void *text_start, void *rel_dyn_start, size_t rel_dyn_size, void *got_start, size_t got_size, Elf_sym *dynsym)
{
}

View File

@@ -0,0 +1,265 @@
/*
* Copyright (c) 2006-2023, RT-Thread Development Team
*
* SPDX-License-Identifier: Apache-2.0
*
* Change Logs:
* Date Author Notes
* 2021-05-18 Jesven first version
* 2023-07-16 Shell Move part of the codes to C from asm in signal handling
* 2023-10-16 Shell Support a new backtrace framework
* 2023-08-03 Shell Support of syscall restart (SA_RESTART)
*/
#include <armv8.h>
#include <rthw.h>
#include <rtthread.h>
#include <stdlib.h>
#include <string.h>
#include <lwp_internal.h>
#ifdef ARCH_MM_MMU
#define DBG_TAG "lwp.arch"
#define DBG_LVL DBG_INFO
#include <rtdbg.h>
#include <lwp_arch.h>
#include <lwp_user_mm.h>
extern size_t MMUTable[];
int arch_user_space_init(struct rt_lwp *lwp)
{
size_t *mmu_table;
mmu_table = rt_hw_mmu_pgtbl_create();
if (mmu_table)
{
lwp->end_heap = USER_HEAP_VADDR;
lwp->aspace = rt_aspace_create(
(void *)USER_VADDR_START, USER_VADDR_TOP - USER_VADDR_START, mmu_table);
if (!lwp->aspace)
{
return -RT_ERROR;
}
}
else
{
return -RT_ENOMEM;
}
return 0;
}
void *arch_kernel_mmu_table_get(void)
{
return (void *)NULL;
}
void arch_user_space_free(struct rt_lwp *lwp)
{
if (lwp)
{
RT_ASSERT(lwp->aspace);
void *pgtbl = lwp->aspace->page_table;
rt_aspace_delete(lwp->aspace);
/* must be freed after aspace delete, pgtbl is required for unmap */
rt_pages_free(pgtbl, 0);
lwp->aspace = NULL;
}
else
{
LOG_W("%s: NULL lwp as parameter", __func__);
RT_ASSERT(0);
}
}
int arch_expand_user_stack(void *addr)
{
int ret = 0;
size_t stack_addr = (size_t)addr;
stack_addr &= ~ARCH_PAGE_MASK;
if ((stack_addr >= (size_t)USER_STACK_VSTART) &&
(stack_addr < (size_t)USER_STACK_VEND))
{
void *map =
lwp_map_user(lwp_self(), (void *)stack_addr, ARCH_PAGE_SIZE, 0);
if (map || lwp_user_accessable(addr, 1))
{
ret = 1;
}
}
return ret;
}
#endif
int arch_set_thread_context(void (*exit)(void), void *new_thread_stack,
void *user_stack, void **thread_sp)
{
struct rt_hw_exp_stack *syscall_frame;
struct rt_hw_exp_stack *thread_frame;
struct rt_hw_exp_stack *ori_syscall = rt_thread_self()->user_ctx.ctx;
RT_ASSERT(ori_syscall != RT_NULL);
thread_frame = (void *)((long)new_thread_stack - sizeof(struct rt_hw_exp_stack));
syscall_frame = (void *)((long)new_thread_stack - 2 * sizeof(struct rt_hw_exp_stack));
memcpy(syscall_frame, ori_syscall, sizeof(*syscall_frame));
syscall_frame->sp_el0 = (long)user_stack;
syscall_frame->x0 = 0;
thread_frame->cpsr = ((3 << 6) | 0x4 | 0x1);
thread_frame->pc = (long)exit;
thread_frame->x0 = 0;
*thread_sp = syscall_frame;
return 0;
}
#define ALGIN_BYTES (16)
/* the layout is part of ABI, dont change it */
struct signal_ucontext
{
rt_int64_t sigreturn;
lwp_sigset_t save_sigmask;
siginfo_t si;
rt_align(ALGIN_BYTES)
struct rt_hw_exp_stack frame;
};
RT_STATIC_ASSERT(abi_offset_compatible, offsetof(struct signal_ucontext, si) == UCTX_ABI_OFFSET_TO_SI);
void *arch_signal_ucontext_get_frame(struct signal_ucontext *uctx)
{
return &uctx->frame;
}
/* internal used only */
void arch_syscall_prepare_signal(rt_base_t rc, struct rt_hw_exp_stack *exp_frame)
{
long x0 = exp_frame->x0;
exp_frame->x0 = rc;
exp_frame->x7 = x0;
return ;
}
void arch_syscall_restart(void *sp, void *ksp);
void arch_syscall_set_errno(void *eframe, int expected, int code)
{
struct rt_hw_exp_stack *exp_frame = eframe;
if (exp_frame->x0 == -expected)
exp_frame->x0 = -code;
return ;
}
void arch_signal_check_erestart(void *eframe, void *ksp)
{
struct rt_hw_exp_stack *exp_frame = eframe;
long rc = exp_frame->x0;
long sys_id = exp_frame->x8;
(void)sys_id;
if (rc == -ERESTART)
{
LOG_D("%s(rc=%ld,sys_id=%ld,pid=%d)", __func__, rc, sys_id, lwp_self()->pid);
LOG_D("%s: restart rc = %ld", lwp_get_syscall_name(sys_id), rc);
exp_frame->x0 = exp_frame->x7;
arch_syscall_restart(eframe, ksp);
}
return ;
}
static void arch_signal_post_action(struct signal_ucontext *new_sp, rt_base_t kernel_sp)
{
arch_signal_check_erestart(&new_sp->frame, (void *)kernel_sp);
return ;
}
void *arch_signal_ucontext_restore(rt_base_t user_sp, rt_base_t kernel_sp)
{
struct signal_ucontext *new_sp;
new_sp = (void *)user_sp;
if (lwp_user_accessable(new_sp, sizeof(*new_sp)))
{
lwp_thread_signal_mask(rt_thread_self(), LWP_SIG_MASK_CMD_SET_MASK, &new_sp->save_sigmask, RT_NULL);
arch_signal_post_action(new_sp, kernel_sp);
}
else
{
LOG_I("User frame corrupted during signal handling\nexiting...");
sys_exit_group(EXIT_FAILURE);
}
return (char *)&new_sp->frame + sizeof(struct rt_hw_exp_stack);
}
void *arch_signal_ucontext_save(rt_base_t user_sp, siginfo_t *psiginfo,
struct rt_hw_exp_stack *exp_frame,
lwp_sigset_t *save_sig_mask)
{
struct signal_ucontext *new_sp;
new_sp = (void *)((user_sp - sizeof(struct signal_ucontext)) & ~0xf);
if (lwp_user_accessable(new_sp, sizeof(*new_sp)))
{
/* push psiginfo */
if (psiginfo)
{
lwp_memcpy(&new_sp->si, psiginfo, sizeof(*psiginfo));
}
/* exp frame is already aligned as AAPCS64 required */
lwp_memcpy(&new_sp->frame, exp_frame, sizeof(*exp_frame));
/* copy the save_sig_mask */
lwp_memcpy(&new_sp->save_sigmask, save_sig_mask, sizeof(lwp_sigset_t));
/* copy lwp_sigreturn */
const size_t lwp_sigreturn_bytes = 8;
extern void lwp_sigreturn(void);
/* -> ensure that the sigreturn start at the outer most boundary */
lwp_memcpy(&new_sp->sigreturn, &lwp_sigreturn, lwp_sigreturn_bytes);
}
else
{
LOG_I("%s: User stack overflow", __func__);
sys_exit_group(EXIT_FAILURE);
}
return new_sp;
}
int arch_backtrace_uthread(rt_thread_t thread)
{
struct rt_hw_backtrace_frame frame;
struct rt_hw_exp_stack *stack;
if (thread && thread->lwp)
{
stack = thread->user_ctx.ctx;
if ((long)stack > (unsigned long)thread->stack_addr
&& (long)stack < (unsigned long)thread->stack_addr + thread->stack_size)
{
frame.pc = stack->pc;
frame.fp = stack->x29;
lwp_backtrace_frame(thread, &frame);
return 0;
}
else
return -1;
}
return -1;
}

View File

@@ -0,0 +1,76 @@
/*
* Copyright (c) 2006-2021, RT-Thread Development Team
*
* SPDX-License-Identifier: Apache-2.0
*
* Change Logs:
* Date Author Notes
* 2021-05-18 Jesven first version
*/
#ifndef LWP_ARCH_H__
#define LWP_ARCH_H__
#include <rtconfig.h>
#ifdef ARCH_MM_MMU
#define USER_VADDR_TOP 0x0001000000000000UL
#define USER_HEAP_VADDR (0x0000ffff40000000UL)
#define USER_HEAP_VEND USER_STACK_VSTART
#define USER_STACK_VSTART 0x0000ffff70000000UL
#define USER_STACK_VEND (USER_STACK_VSTART + 0x10000000)
#define USER_ARG_VADDR USER_STACK_VEND
#define LDSO_LOAD_VADDR 0x60000000UL
#define USER_VADDR_START 0x00200000UL
#define USER_LOAD_VADDR USER_VADDR_START
#define UCTX_ABI_OFFSET_TO_SI 16
#ifndef __ASSEMBLY__
#include <lwp.h>
#include <lwp_arch_comm.h>
#ifdef __cplusplus
extern "C" {
#endif
unsigned long rt_hw_ffz(unsigned long x);
rt_inline void icache_invalid_all(void)
{
__asm__ volatile ("ic ialluis\n\tisb sy":::"memory");
}
/**
* @brief Save signal-related context to user stack
*
* @param user_sp the current sp of user
* @param exp_frame exception frame to resume former execution
* @param psiginfo pointer to the siginfo
* @param elr pc of former execution
* @param spsr program status of former execution
* @return void* the new user sp
*/
void *arch_signal_ucontext_save(rt_base_t user_sp, siginfo_t *psiginfo,
struct rt_hw_exp_stack *exp_frame,
lwp_sigset_t *save_sig_mask);
/**
* @brief Restore the signal mask after return
*
* @param user_sp sp of user
* @return void*
*/
void *arch_signal_ucontext_restore(rt_base_t user_sp, rt_base_t kernel_sp);
void arch_syscall_restart(void *sp, void *ksp);
#ifdef __cplusplus
}
#endif
#endif /* __ASSEMBLY__ */
#endif /* ARCH_MM_MMU */
#endif /*LWP_ARCH_H__*/

View File

@@ -0,0 +1,588 @@
/*
* Copyright (c) 2006-2023, RT-Thread Development Team
*
* SPDX-License-Identifier: Apache-2.0
*
* Change Logs:
* Date Author Notes
* 2021-05-18 Jesven first version
* 2023-07-16 Shell Move part of the codes to C from asm in signal handling
* 2023-08-03 Shell Support of syscall restart (SA_RESTART)
*/
#ifndef __ASSEMBLY__
#define __ASSEMBLY__
#endif
#include <rtconfig.h>
#include <asm-generic.h>
#include <asm-fpu.h>
#include <armv8.h>
#include <lwp_arch.h>
/*********************
* SPSR BIT *
*********************/
#define SPSR_Mode(v) ((v) << 0)
#define SPSR_A64 (0 << 4)
#define SPSR_RESEVRED_5 (0 << 5)
#define SPSR_FIQ_MASKED(v) ((v) << 6)
#define SPSR_IRQ_MASKED(v) ((v) << 7)
#define SPSR_SERROR_MASKED(v) ((v) << 8)
#define SPSR_D_MASKED(v) ((v) << 9)
#define SPSR_RESEVRED_10_19 (0 << 10)
#define SPSR_IL(v) ((v) << 20)
#define SPSR_SS(v) ((v) << 21)
#define SPSR_RESEVRED_22_27 (0 << 22)
#define SPSR_V(v) ((v) << 28)
#define SPSR_C(v) ((v) << 29)
#define SPSR_Z(v) ((v) << 30)
#define SPSR_N(v) ((v) << 31)
/**************************************************/
.text
/*
* void arch_start_umode(args, text, ustack, kstack);
*/
.global arch_start_umode
.type arch_start_umode, % function
arch_start_umode:
mov sp, x3
mov x4, #(SPSR_Mode(0) | SPSR_A64)
msr daifset, #3
dsb sy
mrs x30, sp_el0
/* user stack top */
msr sp_el0, x2
mov x3, x2
msr spsr_el1, x4
msr elr_el1, x1
eret
/*
* void arch_crt_start_umode(args, text, ustack, kstack);
*/
.global arch_crt_start_umode
.type arch_crt_start_umode, % function
arch_crt_start_umode:
sub x4, x2, #0x10
adr x2, lwp_thread_return
ldr x5, [x2]
str x5, [x4]
ldr x5, [x2, #4]
str x5, [x4, #4]
ldr x5, [x2, #8]
str x5, [x4, #8]
mov x5, x4
dc cvau, x5
add x5, x5, #8
dc cvau, x5
dsb sy
ic ialluis
dsb sy
msr sp_el0, x4
mov sp, x3
mov x4, #(SPSR_Mode(0) | SPSR_A64)
msr daifset, #3
dsb sy
mrs x30, sp_el0
msr spsr_el1, x4
msr elr_el1, x1
eret
.global arch_get_user_sp
arch_get_user_sp:
mrs x0, sp_el0
ret
.global arch_fork_exit
.global arch_clone_exit
arch_fork_exit:
arch_clone_exit:
b arch_syscall_exit
/*
void lwp_exec_user(void *args, void *kernel_stack, void *user_entry)
*/
.global lwp_exec_user
lwp_exec_user:
mov sp, x1
mov x4, #(SPSR_Mode(0) | SPSR_A64)
ldr x3, =0x0000ffff80000000
msr daifset, #3
msr spsr_el1, x4
msr elr_el1, x2
eret
/*
* void SVC_Handler(regs);
* since this routine reset the SP, we take it as a start point
*/
START_POINT(SVC_Handler)
/* x0 is initial sp */
mov sp, x0
msr daifclr, #3 /* enable interrupt */
bl rt_thread_self
bl lwp_user_setting_save
ldp x8, x9, [sp, #(CONTEXT_OFFSET_X8)]
and x0, x8, #0xf000
cmp x0, #0xe000
beq arch_signal_quit
cmp x0, #0xf000
beq ret_from_user
uxtb x0, w8
bl lwp_get_sys_api
cmp x0, xzr
mov x30, x0
beq arch_syscall_exit
ldp x0, x1, [sp, #(CONTEXT_OFFSET_X0)]
ldp x2, x3, [sp, #(CONTEXT_OFFSET_X2)]
ldp x4, x5, [sp, #(CONTEXT_OFFSET_X4)]
ldp x6, x7, [sp, #(CONTEXT_OFFSET_X6)]
blr x30
/* jump explictly, make this code position independant */
b arch_syscall_exit
START_POINT_END(SVC_Handler)
.global arch_syscall_exit
arch_syscall_exit:
/**
* @brief back up former x0 which is required to restart syscall, then setup
* syscall return value in stack frame
*/
mov x1, sp
bl arch_syscall_prepare_signal
msr daifset, #3
ldp x2, x3, [sp], #0x10 /* SPSR and ELR. */
msr spsr_el1, x3
msr elr_el1, x2
ldp x29, x30, [sp], #0x10
msr sp_el0, x29
ldp x28, x29, [sp], #0x10
msr fpcr, x28
msr fpsr, x29
ldp x28, x29, [sp], #0x10
ldp x26, x27, [sp], #0x10
ldp x24, x25, [sp], #0x10
ldp x22, x23, [sp], #0x10
ldp x20, x21, [sp], #0x10
ldp x18, x19, [sp], #0x10
ldp x16, x17, [sp], #0x10
ldp x14, x15, [sp], #0x10
ldp x12, x13, [sp], #0x10
ldp x10, x11, [sp], #0x10
ldp x8, x9, [sp], #0x10
ldp x6, x7, [sp], #0x10
ldp x4, x5, [sp], #0x10
ldp x2, x3, [sp], #0x10
ldp x0, x1, [sp], #0x10
RESTORE_FPU sp
/* the sp is reset to the outer most level, irq and fiq are disabled */
START_POINT(arch_ret_to_user)
msr daifset, #3
/* save exception frame */
SAVE_FPU sp
stp x0, x1, [sp, #-0x10]!
stp x2, x3, [sp, #-0x10]!
stp x4, x5, [sp, #-0x10]!
stp x6, x7, [sp, #-0x10]!
stp x8, x9, [sp, #-0x10]!
stp x10, x11, [sp, #-0x10]!
stp x12, x13, [sp, #-0x10]!
stp x14, x15, [sp, #-0x10]!
stp x16, x17, [sp, #-0x10]!
stp x18, x19, [sp, #-0x10]!
stp x20, x21, [sp, #-0x10]!
stp x22, x23, [sp, #-0x10]!
stp x24, x25, [sp, #-0x10]!
stp x26, x27, [sp, #-0x10]!
stp x28, x29, [sp, #-0x10]!
mrs x0, fpcr
mrs x1, fpsr
stp x0, x1, [sp, #-0x10]!
stp x29, x30, [sp, #-0x10]!
/* pre-action */
bl lwp_check_debug
bl lwp_check_exit_request
cbz w0, 1f
/* exit on event */
msr daifclr, #3
mov x0, xzr
b sys_exit
1:
/* check if dbg ops exist */
ldr x0, =rt_dbg_ops
ldr x0, [x0]
cbz x0, 3f
bl dbg_thread_in_debug
mov x1, #(1 << 21)
mrs x2, spsr_el1
cbz w0, 2f
orr x2, x2, x1
msr spsr_el1, x2
b 3f
2:
bic x2, x2, x1
msr spsr_el1, x2
3:
/**
* push 2 dummy words to simulate a exception frame of interrupt
* Note: in kernel state, the context switch dont saved the context
*/
mrs x0, spsr_el1
mrs x1, elr_el1
stp x1, x0, [sp, #-0x10]!
mov x0, sp
msr daifclr, #3
bl lwp_thread_signal_catch
msr daifset, #3
ldp x1, x0, [sp], #0x10
msr spsr_el1, x0
msr elr_el1, x1
/* check debug */
/* restore exception frame */
ldp x29, x30, [sp], #0x10
ldp x0, x1, [sp], #0x10
msr fpcr, x0
msr fpsr, x1
ldp x28, x29, [sp], #0x10
ldp x26, x27, [sp], #0x10
ldp x24, x25, [sp], #0x10
ldp x22, x23, [sp], #0x10
ldp x20, x21, [sp], #0x10
ldp x18, x19, [sp], #0x10
ldp x16, x17, [sp], #0x10
ldp x14, x15, [sp], #0x10
ldp x12, x13, [sp], #0x10
ldp x10, x11, [sp], #0x10
ldp x8, x9, [sp], #0x10
ldp x6, x7, [sp], #0x10
ldp x4, x5, [sp], #0x10
ldp x2, x3, [sp], #0x10
ldp x0, x1, [sp], #0x10
RESTORE_FPU sp
stp x0, x1, [sp, #-0x10]!
ldr x0, =rt_dbg_ops
ldr x0, [x0]
cmp x0, xzr
ldp x0, x1, [sp], #0x10
beq 1f
/* save */
SAVE_FPU sp
stp x0, x1, [sp, #-0x10]!
stp x2, x3, [sp, #-0x10]!
stp x4, x5, [sp, #-0x10]!
stp x6, x7, [sp, #-0x10]!
stp x8, x9, [sp, #-0x10]!
stp x10, x11, [sp, #-0x10]!
stp x12, x13, [sp, #-0x10]!
stp x14, x15, [sp, #-0x10]!
stp x16, x17, [sp, #-0x10]!
stp x18, x19, [sp, #-0x10]!
stp x20, x21, [sp, #-0x10]!
stp x22, x23, [sp, #-0x10]!
stp x24, x25, [sp, #-0x10]!
stp x26, x27, [sp, #-0x10]!
stp x28, x29, [sp, #-0x10]!
mrs x0, fpcr
mrs x1, fpsr
stp x0, x1, [sp, #-0x10]!
stp x29, x30, [sp, #-0x10]!
mrs x0, elr_el1
bl dbg_attach_req
/* restore */
ldp x29, x30, [sp], #0x10
ldp x0, x1, [sp], #0x10
msr fpcr, x0
msr fpsr, x1
ldp x28, x29, [sp], #0x10
ldp x26, x27, [sp], #0x10
ldp x24, x25, [sp], #0x10
ldp x22, x23, [sp], #0x10
ldp x20, x21, [sp], #0x10
ldp x18, x19, [sp], #0x10
ldp x16, x17, [sp], #0x10
ldp x14, x15, [sp], #0x10
ldp x12, x13, [sp], #0x10
ldp x10, x11, [sp], #0x10
ldp x8, x9, [sp], #0x10
ldp x6, x7, [sp], #0x10
ldp x4, x5, [sp], #0x10
ldp x2, x3, [sp], #0x10
ldp x0, x1, [sp], #0x10
RESTORE_FPU sp
1:
eret
START_POINT_END(arch_ret_to_user)
.global lwp_check_debug
lwp_check_debug:
ldr x0, =rt_dbg_ops
ldr x0, [x0]
cbnz x0, 1f
ret
1:
stp x29, x30, [sp, #-0x10]!
bl dbg_check_suspend
cbz w0, lwp_check_debug_quit
mrs x2, sp_el0
sub x2, x2, #0x10
mov x3, x2
msr sp_el0, x2
ldr x0, =lwp_debugreturn
ldr w1, [x0]
str w1, [x2]
ldr w1, [x0, #4]
str w1, [x2, #4]
dc cvau, x2
add x2, x2, #4
dc cvau, x2
dsb sy
isb sy
ic ialluis
isb sy
mrs x0, elr_el1
mrs x1, spsr_el1
stp x0, x1, [sp, #-0x10]!
msr elr_el1, x3 /* lwp_debugreturn */
mov x1, #(SPSR_Mode(0) | SPSR_A64)
orr x1, x1, #(1 << 21)
msr spsr_el1, x1
eret
ret_from_user:
/* sp_el0 += 16 for drop ins lwp_debugreturn */
mrs x0, sp_el0
add x0, x0, #0x10
msr sp_el0, x0
/* now is el1, sp is pos(empty) - sizeof(context) */
mov x0, sp
add x0, x0, #0x220
mov sp, x0
ldp x0, x1, [sp], #0x10 /* x1 is origin spsr_el1 */
msr elr_el1, x0 /* x0 is origin elr_el1 */
msr spsr_el1, x1
lwp_check_debug_quit:
ldp x29, x30, [sp], #0x10
ret
.global arch_syscall_restart
arch_syscall_restart:
msr daifset, 3
mov sp, x1
/* drop exception frame in user stack */
msr sp_el0, x0
/* restore previous exception frame */
msr spsel, #0
ldp x2, x3, [sp], #0x10
msr elr_el1, x2
msr spsr_el1, x3
ldp x29, x30, [sp], #0x10
ldp x28, x29, [sp], #0x10
msr fpcr, x28
msr fpsr, x29
ldp x28, x29, [sp], #0x10
ldp x26, x27, [sp], #0x10
ldp x24, x25, [sp], #0x10
ldp x22, x23, [sp], #0x10
ldp x20, x21, [sp], #0x10
ldp x18, x19, [sp], #0x10
ldp x16, x17, [sp], #0x10
ldp x14, x15, [sp], #0x10
ldp x12, x13, [sp], #0x10
ldp x10, x11, [sp], #0x10
ldp x8, x9, [sp], #0x10
ldp x6, x7, [sp], #0x10
ldp x4, x5, [sp], #0x10
ldp x2, x3, [sp], #0x10
ldp x0, x1, [sp], #0x10
RESTORE_FPU sp
msr spsel, #1
b vector_exception
arch_signal_quit:
/* drop current exception frame */
add sp, sp, #CONTEXT_SIZE
mov x1, sp
mrs x0, sp_el0
bl arch_signal_ucontext_restore
add x0, x0, #-CONTEXT_SIZE
msr sp_el0, x0
/**
* Note: Since we will reset spsr, but the reschedule will
* corrupt the spsr, we diable irq for a short period here
*/
msr daifset, #3
/* restore previous exception frame */
msr spsel, #0
ldp x2, x3, [sp], #0x10
msr elr_el1, x2
msr spsr_el1, x3
ldp x29, x30, [sp], #0x10
ldp x28, x29, [sp], #0x10
msr fpcr, x28
msr fpsr, x29
ldp x28, x29, [sp], #0x10
ldp x26, x27, [sp], #0x10
ldp x24, x25, [sp], #0x10
ldp x22, x23, [sp], #0x10
ldp x20, x21, [sp], #0x10
ldp x18, x19, [sp], #0x10
ldp x16, x17, [sp], #0x10
ldp x14, x15, [sp], #0x10
ldp x12, x13, [sp], #0x10
ldp x10, x11, [sp], #0x10
ldp x8, x9, [sp], #0x10
ldp x6, x7, [sp], #0x10
ldp x4, x5, [sp], #0x10
ldp x2, x3, [sp], #0x10
ldp x0, x1, [sp], #0x10
RESTORE_FPU sp
msr spsel, #1
b arch_ret_to_user
/**
* rt_noreturn
* void arch_thread_signal_enter(
* int signo, -> x0
* siginfo_t *psiginfo, -> x1
* void *exp_frame, -> x2
* void *entry_uaddr, -> x3
* lwp_sigset_t *save_sig_mask, -> x4
* )
*/
.global arch_thread_signal_enter
arch_thread_signal_enter:
mov x19, x0
mov x20, x2 /* exp_frame */
mov x21, x3
/**
* move exception frame to user stack
*/
mrs x0, sp_el0
mov x3, x4
/* arch_signal_ucontext_save(user_sp, psiginfo, exp_frame, save_sig_mask); */
bl arch_signal_ucontext_save
mov x22, x0
/* get and saved pointer to uframe */
bl arch_signal_ucontext_get_frame
mov x2, x0
mov x0, x22
dc cvau, x0
dsb sy
ic ialluis
dsb sy
/**
* Brief: Prepare the environment for signal handler
*/
/**
* reset the cpsr
* and drop exp frame on kernel stack, reset kernel sp
*
* Note: Since we will reset spsr, but the reschedule will
* corrupt the spsr, we diable irq for a short period here
*/
msr daifset, #3
ldr x1, [x20, #CONTEXT_OFFSET_SPSR_EL1]
msr spsr_el1, x1
add sp, x20, #CONTEXT_SIZE
/** reset user sp */
msr sp_el0, x0
/** set the return address to the sigreturn */
mov x30, x0
cbnz x21, 1f
mov x21, x30
1:
/** set the entry address of signal handler */
msr elr_el1, x21
/* siginfo is above the return address */
add x1, x30, UCTX_ABI_OFFSET_TO_SI
/* uframe is saved in x2 */
mov x0, x19
/**
* handler(signo, psi, ucontext);
*
*/
eret
lwp_debugreturn:
mov x8, 0xf000
svc #0
.global lwp_sigreturn
lwp_sigreturn:
mov x8, #0xe000
svc #0
lwp_thread_return:
mov x0, xzr
mov x8, #0x01
svc #0
.globl arch_get_tidr
arch_get_tidr:
mrs x0, tpidr_el0
ret
.global arch_set_thread_area
arch_set_thread_area:
.globl arch_set_tidr
arch_set_tidr:
msr tpidr_el0, x0
ret

View File

@@ -0,0 +1,121 @@
#include "mm_aspace.h"
#include <rtthread.h>
#include <stdint.h>
#include <string.h>
#include <lwp_elf.h>
#ifdef ARCH_MM_MMU
#include <mmu.h>
#include <page.h>
#endif
typedef struct
{
Elf32_Word st_name;
Elf32_Addr st_value;
Elf32_Word st_size;
unsigned char st_info;
unsigned char st_other;
Elf32_Half st_shndx;
} Elf32_sym;
#ifdef ARCH_MM_MMU
void arch_elf_reloc(rt_aspace_t aspace, void *text_start, void *rel_dyn_start, size_t rel_dyn_size, void *got_start, size_t got_size, Elf32_sym *dynsym)
{
size_t rel_off;
void* addr;
if (rel_dyn_size && !dynsym)
{
return;
}
for (rel_off = 0; rel_off < rel_dyn_size; rel_off += 8)
{
uint32_t v1, v2;
/*
memcpy(&v1, rel_dyn_start + rel_off, 4);
memcpy(&v2, rel_dyn_start + rel_off + 4, 4);
*/
addr = rt_hw_mmu_v2p(aspace, (void*)((char*)rel_dyn_start + rel_off));
addr = (void*)((char*)addr - PV_OFFSET);
memcpy(&v1, addr, 4);
addr = rt_hw_mmu_v2p(aspace, (void*)((char*)rel_dyn_start + rel_off + 4));
addr = (void*)((char*)addr - PV_OFFSET);
memcpy(&v2, addr, 4);
addr = rt_hw_mmu_v2p(aspace, (void*)((char*)text_start + v1));
addr = (void*)((char*)addr - PV_OFFSET);
if ((v2 & 0xff) == R_ARM_RELATIVE)
{
// *(uint32_t*)(text_start + v1) += (uint32_t)text_start;
*(uint32_t*)addr += (uint32_t)text_start;
}
else if ((v2 & 0xff) == R_ARM_ABS32)
{
uint32_t t;
t = (v2 >> 8);
if (t) /* 0 is UDF */
{
// *(uint32_t*)(text_start + v1) = (uint32_t)(text_start + dynsym[t].st_value);
*(uint32_t*)addr = (uint32_t)((char*)text_start + dynsym[t].st_value);
}
}
}
/* modify got */
if (got_size)
{
uint32_t *got_item = (uint32_t*)got_start;
for (rel_off = 0; rel_off < got_size; rel_off += 4, got_item++)
{
//*got_item += (uint32_t)text_start;
addr = rt_hw_mmu_v2p(aspace, got_item);
addr = (void*)((char*)addr - PV_OFFSET);
*(uint32_t *)addr += (uint32_t)text_start;
}
}
}
#else
void arch_elf_reloc(void *text_start, void *rel_dyn_start, size_t rel_dyn_size, void *got_start, size_t got_size, Elf32_sym *dynsym)
{
size_t rel_off;
if (rel_dyn_size && !dynsym)
{
return;
}
for (rel_off = 0; rel_off < rel_dyn_size; rel_off += 8)
{
uint32_t v1, v2;
memcpy(&v1, (void*)((char*)rel_dyn_start + rel_off), 4);
memcpy(&v2, (void*)((char*)rel_dyn_start + rel_off + 4), 4);
if ((v2 & 0xff) == R_ARM_RELATIVE)
{
*(uint32_t*)((char*)text_start + v1) += (uint32_t)text_start;
}
else if ((v2 & 0xff) == R_ARM_ABS32)
{
uint32_t t;
t = (v2 >> 8);
if (t) /* 0 is UDF */
{
*(uint32_t*)((char*)text_start + v1) = (uint32_t)((char*)text_start + dynsym[t].st_value);
}
}
}
/* modify got */
if (got_size)
{
uint32_t *got_item = (uint32_t*)got_start;
for (rel_off = 0; rel_off < got_size; rel_off += 4, got_item++)
{
*got_item += (uint32_t)text_start;
}
}
}
#endif

View File

@@ -0,0 +1,254 @@
/*
* Copyright (c) 2006-2023, RT-Thread Development Team
*
* SPDX-License-Identifier: Apache-2.0
*
* Change Logs:
* Date Author Notes
* 2019-10-28 Jesven first version
* 2023-07-16 Shell Move part of the codes to C from asm in signal handling
*/
#include <rthw.h>
#include <rtthread.h>
#include <stddef.h>
#include <stdlib.h>
#ifdef ARCH_MM_MMU
#define DBG_TAG "lwp.arch"
#define DBG_LVL DBG_INFO
#include <rtdbg.h>
#include <lwp_arch.h>
#include <lwp_user_mm.h>
int arch_user_space_init(struct rt_lwp *lwp)
{
size_t *mmu_table;
mmu_table = rt_hw_mmu_pgtbl_create();
if (!mmu_table)
{
return -RT_ENOMEM;
}
lwp->end_heap = USER_HEAP_VADDR;
lwp->aspace = rt_aspace_create((void *)USER_VADDR_START, USER_VADDR_TOP - USER_VADDR_START, mmu_table);
if (!lwp->aspace)
{
return -RT_ERROR;
}
return 0;
}
static struct rt_varea kuser_varea;
void arch_kuser_init(rt_aspace_t aspace, void *vectors)
{
int err;
const size_t kuser_size = 0x1000;
extern char __kuser_helper_start[];
extern char __kuser_helper_end[];
rt_base_t start = (rt_base_t)__kuser_helper_start;
rt_base_t end = (rt_base_t)__kuser_helper_end;
int kuser_sz = end - start;
err = rt_aspace_map_static(aspace, &kuser_varea, &vectors, kuser_size,
MMU_MAP_U_RO, MMF_MAP_FIXED | MMF_PREFETCH,
&rt_mm_dummy_mapper, 0);
if (err != 0)
while (1)
; // early failed
lwp_memcpy((void *)((char *)vectors + 0x1000 - kuser_sz), __kuser_helper_start, kuser_sz);
/*
* vectors + 0xfe0 = __kuser_get_tls
* vectors + 0xfe8 = hardware TLS instruction at 0xffff0fe8
*/
rt_hw_cpu_dcache_ops(RT_HW_CACHE_FLUSH, (void *)((char *)vectors + 0x1000 - kuser_sz), kuser_sz);
rt_hw_cpu_icache_ops(RT_HW_CACHE_INVALIDATE, (void *)((char *)vectors + 0x1000 - kuser_sz), kuser_sz);
}
void arch_user_space_free(struct rt_lwp *lwp)
{
if (lwp)
{
RT_ASSERT(lwp->aspace);
void *pgtbl = lwp->aspace->page_table;
rt_aspace_delete(lwp->aspace);
/* must be freed after aspace delete, pgtbl is required for unmap */
rt_hw_mmu_pgtbl_delete(pgtbl);
lwp->aspace = RT_NULL;
}
else
{
LOG_W("%s: NULL lwp as parameter", __func__);
RT_ASSERT(0);
}
}
int arch_expand_user_stack(void *addr)
{
int ret = 0;
size_t stack_addr = (size_t)addr;
stack_addr &= ~ARCH_PAGE_MASK;
if ((stack_addr >= (size_t)USER_STACK_VSTART) && (stack_addr < (size_t)USER_STACK_VEND))
{
void *map = lwp_map_user(lwp_self(), (void *)stack_addr, ARCH_PAGE_SIZE, 0);
if (map || lwp_user_accessable(addr, 1))
{
ret = 1;
}
}
return ret;
}
#define ALGIN_BYTES 8
#define lwp_sigreturn_bytes 8
struct signal_regs {
rt_base_t lr;
rt_base_t spsr;
rt_base_t r0_to_r12[13];
rt_base_t ip;
};
struct signal_ucontext
{
rt_base_t sigreturn[lwp_sigreturn_bytes / sizeof(rt_base_t)];
lwp_sigset_t save_sigmask;
siginfo_t si;
rt_align(8)
struct signal_regs frame;
};
void *arch_signal_ucontext_restore(rt_base_t user_sp)
{
struct signal_ucontext *new_sp;
rt_base_t ip;
new_sp = (void *)user_sp;
if (lwp_user_accessable(new_sp, sizeof(*new_sp)))
{
lwp_thread_signal_mask(rt_thread_self(), LWP_SIG_MASK_CMD_SET_MASK, &new_sp->save_sigmask, RT_NULL);
ip = new_sp->frame.ip;
/* let user restore its lr from frame.ip */
new_sp->frame.ip = new_sp->frame.lr;
/* kernel will pick eip from frame.lr */
new_sp->frame.lr = ip;
}
else
{
LOG_I("User frame corrupted during signal handling\nexiting...");
sys_exit_group(EXIT_FAILURE);
}
return (void *)&new_sp->frame;
}
void *arch_signal_ucontext_save(rt_base_t lr, siginfo_t *psiginfo,
struct signal_regs *exp_frame, rt_base_t user_sp,
lwp_sigset_t *save_sig_mask)
{
rt_base_t spsr;
struct signal_ucontext *new_sp;
new_sp = (void *)(user_sp - sizeof(struct signal_ucontext));
if (lwp_user_accessable(new_sp, sizeof(*new_sp)))
{
/* push psiginfo */
if (psiginfo)
{
lwp_memcpy(&new_sp->si, psiginfo, sizeof(*psiginfo));
}
lwp_memcpy(&new_sp->frame.r0_to_r12, exp_frame, sizeof(new_sp->frame.r0_to_r12) + sizeof(rt_base_t));
new_sp->frame.lr = lr;
__asm__ volatile("mrs %0, spsr":"=r"(spsr));
new_sp->frame.spsr = spsr;
/* copy the save_sig_mask */
lwp_memcpy(&new_sp->save_sigmask, save_sig_mask, sizeof(lwp_sigset_t));
/* copy lwp_sigreturn */
extern void lwp_sigreturn(void);
/* -> ensure that the sigreturn start at the outer most boundary */
lwp_memcpy(&new_sp->sigreturn, &lwp_sigreturn, lwp_sigreturn_bytes);
}
else
{
LOG_I("%s: User stack overflow", __func__);
sys_exit_group(EXIT_FAILURE);
}
return new_sp;
}
void arch_syscall_set_errno(void *eframe, int expected, int code)
{
/* NO support */
return ;
}
void *arch_kernel_mmu_table_get(void)
{
return rt_kernel_space.page_table;
}
#ifdef LWP_ENABLE_ASID
#define MAX_ASID_BITS 8
#define MAX_ASID (1 << MAX_ASID_BITS)
static uint64_t global_generation = 1;
static char asid_valid_bitmap[MAX_ASID];
unsigned int arch_get_asid(struct rt_lwp *lwp)
{
if (lwp == RT_NULL)
{
// kernel
return 0;
}
if (lwp->generation == global_generation)
{
return lwp->asid;
}
if (lwp->asid && !asid_valid_bitmap[lwp->asid])
{
asid_valid_bitmap[lwp->asid] = 1;
return lwp->asid;
}
for (unsigned i = 1; i < MAX_ASID; i++)
{
if (asid_valid_bitmap[i] == 0)
{
asid_valid_bitmap[i] = 1;
lwp->generation = global_generation;
lwp->asid = i;
return lwp->asid;
}
}
global_generation++;
memset(asid_valid_bitmap, 0, MAX_ASID * sizeof(char));
asid_valid_bitmap[1] = 1;
lwp->generation = global_generation;
lwp->asid = 1;
asm volatile ("mcr p15, 0, r0, c8, c7, 0\ndsb\nisb" ::: "memory");
return lwp->asid;
}
#endif
#endif

View File

@@ -0,0 +1,55 @@
/*
* Copyright (c) 2006-2020, RT-Thread Development Team
*
* SPDX-License-Identifier: Apache-2.0
*
* Change Logs:
* Date Author Notes
*/
#ifndef LWP_ARCH_H__
#define LWP_ARCH_H__
#include <lwp.h>
#include <lwp_arch_comm.h>
#ifdef ARCH_MM_MMU
#define USER_VADDR_TOP 0xC0000000UL
#define USER_HEAP_VEND 0xB0000000UL
#define USER_HEAP_VADDR 0x80000000UL
#define USER_STACK_VSTART 0x70000000UL
#define USER_STACK_VEND USER_HEAP_VADDR
#define LDSO_LOAD_VADDR 0x60000000UL
#define USER_VADDR_START 0x00010000UL
#define USER_LOAD_VADDR USER_VADDR_START
#ifdef __cplusplus
extern "C" {
#endif
rt_inline unsigned long rt_hw_ffz(unsigned long x)
{
return __builtin_ffsl(~x) - 1;
}
rt_inline void icache_invalid_all(void)
{
__asm__ volatile ("mcr p15, 0, r0, c7, c5, 0\ndsb\nisb":::"memory");//iciallu
}
unsigned int arch_get_asid(struct rt_lwp *lwp);
struct signal_regs;
void *arch_signal_ucontext_restore(rt_base_t user_sp);
void *arch_signal_ucontext_save(rt_base_t lr, siginfo_t *psiginfo,
struct signal_regs *exp_frame, rt_base_t user_sp,
lwp_sigset_t *save_sig_mask);
#ifdef __cplusplus
}
#endif
#endif
#endif /*LWP_ARCH_H__*/

View File

@@ -0,0 +1,470 @@
/*
* Copyright (c) 2006-2020, RT-Thread Development Team
*
* SPDX-License-Identifier: Apache-2.0
*
* Change Logs:
* Date Author Notes
* 2018-12-10 Jesven first version
* 2023-07-16 Shell Move part of the codes to C from asm in signal handling
*/
#include "rtconfig.h"
#include "asm-generic.h"
#define Mode_USR 0x10
#define Mode_FIQ 0x11
#define Mode_IRQ 0x12
#define Mode_SVC 0x13
#define Mode_MON 0x16
#define Mode_ABT 0x17
#define Mode_UDF 0x1B
#define Mode_SYS 0x1F
#define A_Bit 0x100
#define I_Bit 0x80 @; when I bit is set, IRQ is disabled
#define F_Bit 0x40 @; when F bit is set, FIQ is disabled
#define T_Bit 0x20
.cpu cortex-a9
.syntax unified
.text
/*
* void arch_start_umode(args, text, ustack, kstack);
*/
.global arch_start_umode
.type arch_start_umode, % function
arch_start_umode:
mrs r9, cpsr
bic r9, #0x1f
orr r9, #Mode_USR
cpsid i
msr spsr, r9
mov sp, r3
/* set user stack top */
cps #Mode_SYS
mov sp, r2
cps #Mode_SVC
mov r3, r2
/* set data address. */
movs pc, r1
/*
* void arch_crt_start_umode(args, text, ustack, kstack);
*/
.global arch_crt_start_umode
.type arch_crt_start_umode, % function
arch_crt_start_umode:
cps #Mode_SYS
sub sp, r2, #16
ldr r2, =lwp_thread_return
ldr r4, [r2]
str r4, [sp]
ldr r4, [r2, #4]
str r4, [sp, #4]
ldr r4, [r2, #8]
str r4, [sp, #8]
mov r4, sp
mcr p15, 0, r4, c7, c11, 1 ;//dc cmvau
add r4, #4
mcr p15, 0, r4, c7, c11, 1 ;//dc cmvau
add r4, #4
mcr p15, 0, r4, c7, c11, 1 ;//dc cmvau
dsb
isb
mcr p15, 0, r4, c7, c5, 0 ;//iciallu
dsb
isb
mov lr, sp
cps #Mode_SVC
mrs r9, cpsr
bic r9, #0x1f
orr r9, #Mode_USR
cpsid i
msr spsr, r9
mov sp, r3
/* set data address. */
movs pc, r1
/*
void arch_set_thread_context(void *exit_addr, void *new_thread_stack, void *user_stack, void **thread_sp);
*/
.global arch_set_thread_context
arch_set_thread_context:
sub r1, #(10 * 4 + 4 * 4) /* {r4 - r12, lr} , {r4, r5, spsr, u_pc} */
stmfd r1!, {r0}
mov r12, #0
stmfd r1!, {r12}
stmfd r1!, {r1 - r12}
stmfd r1!, {r12} /* new thread return value */
mrs r12, cpsr
orr r12, #(1 << 7) /* disable irq */
stmfd r1!, {r12} /* spsr */
mov r12, #0
stmfd r1!, {r12} /* now user lr is 0 */
stmfd r1!, {r2} /* user sp */
#ifdef RT_USING_FPU
stmfd r1!, {r12} /* not use fpu */
#endif
str r1, [r3]
mov pc, lr
.global arch_get_user_sp
arch_get_user_sp:
cps #Mode_SYS
mov r0, sp
cps #Mode_SVC
mov pc, lr
.global sys_fork
.global sys_vfork
.global arch_fork_exit
sys_fork:
sys_vfork:
push {r4 - r12, lr}
bl _sys_fork
arch_fork_exit:
pop {r4 - r12, lr}
b arch_syscall_exit
.global sys_clone
.global arch_clone_exit
sys_clone:
push {r4 - r12, lr}
bl _sys_clone
arch_clone_exit:
pop {r4 - r12, lr}
b arch_syscall_exit
/*
void lwp_exec_user(void *args, void *kernel_stack, void *user_entry)
*/
.global lwp_exec_user
lwp_exec_user:
cpsid i
mov sp, r1
mov lr, r2
mov r2, #Mode_USR
msr spsr_cxsf, r2
ldr r3, =0x80000000
b arch_ret_to_user
/*
* void SVC_Handler(void);
*/
.global vector_swi
.type vector_swi, % function
START_POINT(vector_swi)
push {lr}
mrs lr, spsr
push {r4, r5, lr}
cpsie i
push {r0 - r3, r12}
bl rt_thread_self
bl lwp_user_setting_save
and r0, r7, #0xf000
cmp r0, #0xe000
beq arch_signal_quit
cmp r0, #0xf000
beq ret_from_user
and r0, r7, #0xff
bl lwp_get_sys_api
cmp r0, #0 /* r0 = api */
mov lr, r0
pop {r0 - r3, r12}
beq arch_syscall_exit
blx lr
START_POINT_END(vector_swi)
.global arch_syscall_exit
arch_syscall_exit:
cpsid i
pop {r4, r5, lr}
msr spsr_cxsf, lr
pop {lr}
.global arch_ret_to_user
arch_ret_to_user:
/* save all context for signal handler */
push {r0-r12, lr}
bl lwp_check_debug
bl lwp_check_exit_request
cmp r0, #0
beq 1f
mov r0, #0
b sys_exit
1:
mov r0, sp
/* r0 -> exp frame */
bl lwp_thread_signal_catch
ldr r0, =rt_dbg_ops
ldr r0, [r0]
cmp r0, #0
beq 2f
mov r0, lr
bl dbg_attach_req
2:
pop {r0-r12, lr}
movs pc, lr
#ifdef RT_USING_SMART
.global lwp_check_debug
lwp_check_debug:
ldr r0, =rt_dbg_ops
ldr r0, [r0]
cmp r0, #0
bne 1f
bx lr
1:
push {lr}
bl dbg_check_suspend
cmp r0, #0
beq lwp_check_debug_quit
cps #Mode_SYS
sub sp, #8
ldr r0, =lwp_debugreturn
ldr r1, [r0]
str r1, [sp]
ldr r1, [r0, #4]
str r1, [sp, #4]
mov r1, sp
mcr p15, 0, r1, c7, c11, 1 ;//dc cmvau
add r1, #4
mcr p15, 0, r1, c7, c11, 1 ;//dc cmvau
dsb
isb
mcr p15, 0, r0, c7, c5, 0 ;//iciallu
dsb
isb
mov r0, sp /* lwp_debugreturn */
cps #Mode_SVC
mrs r1, spsr
push {r1}
mov r1, #Mode_USR
msr spsr_cxsf, r1
movs pc, r0
ret_from_user:
cps #Mode_SYS
add sp, #8
cps #Mode_SVC
/*
pop {r0 - r3, r12}
pop {r4 - r6, lr}
*/
add sp, #(4*9)
pop {r4}
msr spsr_cxsf, r4
lwp_check_debug_quit:
pop {pc}
arch_signal_quit:
cpsid i
/* drop context of signal handler */
pop {r0 - r3, r12}
pop {r4, r5, lr}
pop {lr}
/* restore context */
cps #Mode_SYS
mov r0, sp
cps #Mode_SVC
bl arch_signal_ucontext_restore
/* lr <- *(&frame.ip) */
ldr lr, [r0]
cps #Mode_SYS
mov sp, r0
/* drop ip in the frame and restore cpsr */
pop {r0}
pop {r0}
msr spsr_cxsf, r0
pop {r0-r12, lr}
cps #Mode_SVC
b arch_ret_to_user
/**
* rt_noreturn
* void arch_thread_signal_enter(
* int signo, -> r0
* siginfo_t *psiginfo, -> r1
* void *exp_frame, -> r2
* void *entry_uaddr, -> r3
* lwp_sigset_t *save_sig_mask, -> ??
* )
*/
.global arch_thread_signal_enter
arch_thread_signal_enter:
mov r4, r0
mov r5, r3
mov r6, r2
cps #Mode_SYS
mov r0, lr
mov r3, sp
cps #Mode_SVC
bl arch_signal_ucontext_save
/* drop volatile frame {r0-r12, lr} */
add sp, r6, #14*4
/* reset user sp */
cps #Mode_SYS
mov sp, r0
mov lr, r0
cps #Mode_SVC
/* r1,r2 <- new_user_sp */
mov r1, r0
mov r2, r0
mcr p15, 0, r0, c7, c11, 1 ;//dc cmvau
add r0, #4
mcr p15, 0, r0, c7, c11, 1 ;//dc cmvau
dsb
isb
mcr p15, 0, r1, c7, c5, 0 ;//iciallu
dsb
isb
/* r0 <- signo */
mov r0, r4
/* r4 <- &sigreturn */
mov r4, r2
/* lr <- user_handler() */
mov lr, r5
cmp lr, #0
moveq lr, r4
/* r1 <- siginfo */
mov r1, r2
add r1, #8
/* handler(signo, siginfo, ucontext) */
movs pc, lr
lwp_debugreturn:
mov r7, #0xf000
svc #0
.global lwp_sigreturn
lwp_sigreturn:
mov r7, #0xe000
svc #0
lwp_thread_return:
mov r0, #0
mov r7, #0x01
svc #0
#endif
.global check_vfp
check_vfp:
#ifdef RT_USING_FPU
vmrs r0, fpexc
ubfx r0, r0, #30, #1
#else
mov r0, #0
#endif
mov pc, lr
.global get_vfp
get_vfp:
#ifdef RT_USING_FPU
vstmia r0!, {d0-d15}
vstmia r0!, {d16-d31}
vmrs r1, fpscr
str r1, [r0]
#endif
mov pc, lr
.globl arch_get_tidr
arch_get_tidr:
mrc p15, 0, r0, c13, c0, 3
bx lr
.global arch_set_thread_area
arch_set_thread_area:
.globl arch_set_tidr
arch_set_tidr:
mcr p15, 0, r0, c13, c0, 3
bx lr
/* kuser suppurt */
.macro kuser_pad, sym, size
.if (. - \sym) & 3
.rept 4 - (. - \sym) & 3
.byte 0
.endr
.endif
.rept (\size - (. - \sym)) / 4
.word 0xe7fddef1
.endr
.endm
.align 5
.globl __kuser_helper_start
__kuser_helper_start:
__kuser_cmpxchg64: @ 0xffff0f60
stmfd sp!, {r4, r5, r6, lr}
ldmia r0, {r4, r5} @ load old val
ldmia r1, {r6, lr} @ load new val
1: ldmia r2, {r0, r1} @ load current val
eors r3, r0, r4 @ compare with oldval (1)
eorseq r3, r1, r5 @ compare with oldval (2)
2: stmiaeq r2, {r6, lr} @ store newval if eq
rsbs r0, r3, #0 @ set return val and C flag
ldmfd sp!, {r4, r5, r6, pc}
kuser_pad __kuser_cmpxchg64, 64
__kuser_memory_barrier: @ 0xffff0fa0
dmb
mov pc, lr
kuser_pad __kuser_memory_barrier, 32
__kuser_cmpxchg: @ 0xffff0fc0
1: ldr r3, [r2] @ load current val
subs r3, r3, r0 @ compare with oldval
2: streq r1, [r2] @ store newval if eq
rsbs r0, r3, #0 @ set return val and C flag
mov pc, lr
kuser_pad __kuser_cmpxchg, 32
__kuser_get_tls: @ 0xffff0fe0
mrc p15, 0, r0, c13, c0, 3 @ 0xffff0fe8 hardware TLS code
mov pc, lr
ldr r0, [pc, #(16 - 8)] @ read TLS, set in kuser_get_tls_init
kuser_pad __kuser_get_tls, 16
.rep 3
.word 0 @ 0xffff0ff0 software TLS value, then
.endr @ pad up to __kuser_helper_version
__kuser_helper_version: @ 0xffff0ffc
.word ((__kuser_helper_end - __kuser_helper_start) >> 5)
.globl __kuser_helper_end
__kuser_helper_end:

View File

@@ -0,0 +1,11 @@
# RT-Thread building script for component
from building import *
cwd = GetCurrentDir()
src = Glob('*.c') + Glob('*.S')
CPPPATH = [cwd]
group = DefineGroup('lwp-riscv', src, depend = ['RT_USING_SMART'], CPPPATH = CPPPATH)
Return('group')

View File

@@ -0,0 +1,357 @@
/*
* Copyright (c) 2006-2021, RT-Thread Development Team
*
* SPDX-License-Identifier: Apache-2.0
*
* Change Logs:
* Date Author Notes
* 2020-11-18 Jesven first version
* 2021-02-03 lizhirui port to riscv64
* 2021-02-06 lizhirui add thread filter
* 2021-02-19 lizhirui port to new version of rt-smart
* 2021-03-02 lizhirui add a auxillary function for interrupt
* 2021-03-04 lizhirui delete thread filter
* 2021-03-04 lizhirui modify for new version of rt-smart
* 2021-11-22 JasonHu add lwp_set_thread_context
* 2021-11-30 JasonHu add clone/fork support
* 2023-07-16 Shell Move part of the codes to C from asm in signal handling
* 2023-10-16 Shell Support a new backtrace framework
*/
#include <rthw.h>
#include <rtthread.h>
#include <stddef.h>
#ifdef ARCH_MM_MMU
#define DBG_TAG "lwp.arch"
#define DBG_LVL DBG_INFO
#include <rtdbg.h>
#include <lwp_internal.h>
#include <lwp_arch.h>
#include <lwp_user_mm.h>
#include <page.h>
#include <cpuport.h>
#include <encoding.h>
#include <stack.h>
#include <cache.h>
extern rt_ubase_t MMUTable[];
void *lwp_copy_return_code_to_user_stack()
{
void lwp_thread_return();
void lwp_thread_return_end();
rt_thread_t tid = rt_thread_self();
if (tid->user_stack != RT_NULL)
{
rt_size_t size = (rt_size_t)lwp_thread_return_end - (rt_size_t)lwp_thread_return;
rt_size_t userstack = (rt_size_t)tid->user_stack + tid->user_stack_size - size;
lwp_memcpy((void *)userstack, lwp_thread_return, size);
return (void *)userstack;
}
return RT_NULL;
}
rt_ubase_t lwp_fix_sp(rt_ubase_t cursp)
{
void lwp_thread_return();
void lwp_thread_return_end();
if (cursp == 0)
{
return 0;
}
return cursp - ((rt_size_t)lwp_thread_return_end - (rt_size_t)lwp_thread_return);
}
rt_thread_t rt_thread_sp_to_thread(void *spmember_addr)
{
return (rt_thread_t)(((rt_ubase_t)spmember_addr) - (offsetof(struct rt_thread, sp)));
}
void *get_thread_kernel_stack_top(rt_thread_t thread)
{
return (void *)(((rt_size_t)thread->stack_addr) + ((rt_size_t)thread->stack_size));
}
void *arch_get_user_sp(void)
{
/* user sp saved in interrupt context */
rt_thread_t self = rt_thread_self();
rt_uint8_t *stack_top = (rt_uint8_t *)self->stack_addr + self->stack_size;
struct rt_hw_stack_frame *frame = (struct rt_hw_stack_frame *)(stack_top - sizeof(struct rt_hw_stack_frame));
return (void *)frame->user_sp_exc_stack;
}
int arch_user_space_init(struct rt_lwp *lwp)
{
rt_ubase_t *mmu_table;
mmu_table = rt_hw_mmu_pgtbl_create();
if (!mmu_table)
{
return -RT_ENOMEM;
}
lwp->end_heap = USER_HEAP_VADDR;
lwp->aspace = rt_aspace_create(
(void *)USER_VADDR_START, USER_VADDR_TOP - USER_VADDR_START, mmu_table);
if (!lwp->aspace)
{
return -RT_ERROR;
}
return 0;
}
void *arch_kernel_mmu_table_get(void)
{
return (void *)((char *)MMUTable);
}
void arch_user_space_free(struct rt_lwp *lwp)
{
if (lwp)
{
RT_ASSERT(lwp->aspace);
void *pgtbl = lwp->aspace->page_table;
rt_aspace_delete(lwp->aspace);
/* must be freed after aspace delete, pgtbl is required for unmap */
rt_hw_mmu_pgtbl_delete(pgtbl);
lwp->aspace = RT_NULL;
}
else
{
LOG_W("%s: NULL lwp as parameter", __func__);
RT_ASSERT(0);
}
}
long _sys_clone(void *arg[]);
long sys_clone(void *arg[])
{
return _sys_clone(arg);
}
long _sys_fork(void);
long sys_fork(void)
{
return _sys_fork();
}
long _sys_vfork(void);
long sys_vfork(void)
{
return _sys_fork();
}
/**
* set exec context for fork/clone.
*/
int arch_set_thread_context(void (*exit)(void), void *new_thread_stack,
void *user_stack, void **thread_sp)
{
RT_ASSERT(exit != RT_NULL);
RT_ASSERT(user_stack != RT_NULL);
RT_ASSERT(new_thread_stack != RT_NULL);
RT_ASSERT(thread_sp != RT_NULL);
struct rt_hw_stack_frame *syscall_frame;
struct rt_hw_stack_frame *thread_frame;
rt_uint8_t *stk;
rt_uint8_t *syscall_stk;
stk = (rt_uint8_t *)new_thread_stack;
/* reserve syscall context, all the registers are copyed from parent */
stk -= CTX_REG_NR * REGBYTES;
syscall_stk = stk;
syscall_frame = (struct rt_hw_stack_frame *)stk;
/* modify user sp */
syscall_frame->user_sp_exc_stack = (rt_ubase_t)user_stack;
/* skip ecall */
syscall_frame->epc += 4;
/* child return value is 0 */
syscall_frame->a0 = 0;
syscall_frame->a1 = 0;
/* reset thread area */
rt_thread_t thread = rt_container_of((unsigned long)thread_sp, struct rt_thread, sp);
syscall_frame->tp = (rt_ubase_t)thread->thread_idr;
#ifdef ARCH_USING_NEW_CTX_SWITCH
extern void *_rt_hw_stack_init(rt_ubase_t *sp, rt_ubase_t ra, rt_ubase_t sstatus);
rt_ubase_t sstatus = read_csr(sstatus) | SSTATUS_SPP;
sstatus &= ~SSTATUS_SIE;
/* compatible to RESTORE_CONTEXT */
stk = (void *)_rt_hw_stack_init((rt_ubase_t *)stk, (rt_ubase_t)exit, sstatus);
#else
/* build temp thread context */
stk -= sizeof(struct rt_hw_stack_frame);
thread_frame = (struct rt_hw_stack_frame *)stk;
int i;
for (i = 0; i < sizeof(struct rt_hw_stack_frame) / sizeof(rt_ubase_t); i++)
{
((rt_ubase_t *)thread_frame)[i] = 0xdeadbeaf;
}
/* set pc for thread */
thread_frame->epc = (rt_ubase_t)exit;
/* set old exception mode as supervisor, because in kernel */
thread_frame->sstatus = read_csr(sstatus) | SSTATUS_SPP;
thread_frame->sstatus &= ~SSTATUS_SIE; /* must disable interrupt */
/* set stack as syscall stack */
thread_frame->user_sp_exc_stack = (rt_ubase_t)syscall_stk;
#endif /* ARCH_USING_NEW_CTX_SWITCH */
/* save new stack top */
*thread_sp = (void *)stk;
/**
* The stack for child thread:
*
* +------------------------+ --> kernel stack top
* | syscall stack |
* | |
* | @sp | --> `user_stack`
* | @epc | --> user ecall addr + 4 (skip ecall)
* | @a0&a1 | --> 0 (for child return 0)
* | |
* +------------------------+ --> temp thread stack top
* | temp thread stack | ^
* | | |
* | @sp | ---------/
* | @epc | --> `exit` (arch_clone_exit/arch_fork_exit)
* | |
* +------------------------+ --> thread sp
*/
return 0;
}
#define ALGIN_BYTES (16)
struct signal_ucontext
{
rt_int64_t sigreturn;
lwp_sigset_t save_sigmask;
siginfo_t si;
rt_align(16)
struct rt_hw_stack_frame frame;
};
void *arch_signal_ucontext_restore(rt_base_t user_sp)
{
struct signal_ucontext *new_sp;
new_sp = (void *)user_sp;
if (lwp_user_accessable(new_sp, sizeof(*new_sp)))
{
lwp_thread_signal_mask(rt_thread_self(), LWP_SIG_MASK_CMD_SET_MASK, &new_sp->save_sigmask, RT_NULL);
}
else
{
LOG_I("User frame corrupted during signal handling\nexiting...");
sys_exit_group(EXIT_FAILURE);
}
return (void *)&new_sp->frame;
}
void *arch_signal_ucontext_save(int signo, siginfo_t *psiginfo,
struct rt_hw_stack_frame *exp_frame, rt_base_t user_sp,
lwp_sigset_t *save_sig_mask)
{
struct signal_ucontext *new_sp;
new_sp = (void *)(user_sp - sizeof(struct signal_ucontext));
if (lwp_user_accessable(new_sp, sizeof(*new_sp)))
{
/* push psiginfo */
if (psiginfo)
{
lwp_memcpy(&new_sp->si, psiginfo, sizeof(*psiginfo));
}
lwp_memcpy(&new_sp->frame, exp_frame, sizeof(*exp_frame));
/* copy the save_sig_mask */
lwp_memcpy(&new_sp->save_sigmask, save_sig_mask, sizeof(lwp_sigset_t));
/* copy lwp_sigreturn */
const size_t lwp_sigreturn_bytes = 8;
extern void lwp_sigreturn(void);
/* -> ensure that the sigreturn start at the outer most boundary */
lwp_memcpy(&new_sp->sigreturn, &lwp_sigreturn, lwp_sigreturn_bytes);
/**
* synchronize dcache & icache if target is
* a Harvard Architecture machine, otherwise
* do nothing
*/
rt_hw_sync_cache_local(&new_sp->sigreturn, 8);
}
else
{
LOG_I("%s: User stack overflow", __func__);
sys_exit_group(EXIT_FAILURE);
}
return new_sp;
}
void arch_syscall_set_errno(void *eframe, int expected, int code)
{
/* NO support */
return ;
}
/**
* void lwp_exec_user(void *args, void *kernel_stack, void *user_entry)
*/
void lwp_exec_user(void *args, void *kernel_stack, void *user_entry)
{
arch_start_umode(args, user_entry, (void *)USER_STACK_VEND, kernel_stack);
}
#endif /* ARCH_MM_MMU */
int arch_backtrace_uthread(rt_thread_t thread)
{
struct rt_hw_backtrace_frame frame;
struct rt_hw_stack_frame *stack;
if (thread && thread->lwp)
{
stack = thread->user_ctx.ctx;
if ((long)stack > (unsigned long)thread->stack_addr
&& (long)stack < (unsigned long)thread->stack_addr + thread->stack_size)
{
frame.pc = stack->epc;
frame.fp = stack->s0_fp;
lwp_backtrace_frame(thread, &frame);
return 0;
}
else
return -1;
}
return -1;
}

View File

@@ -0,0 +1,82 @@
/*
* Copyright (c) 2006-2021, RT-Thread Development Team
*
* SPDX-License-Identifier: Apache-2.0
*
* Change Logs:
* Date Author Notes
*/
#ifndef LWP_ARCH_H__
#define LWP_ARCH_H__
#include <rthw.h>
#include <lwp.h>
#include <lwp_arch_comm.h>
#ifdef ARCH_MM_MMU
#ifdef ARCH_MM_MMU_32BIT_LIMIT
#define USER_HEAP_VADDR 0xF0000000UL
#define USER_HEAP_VEND 0xFE000000UL
#define USER_STACK_VSTART 0xE0000000UL
#define USER_STACK_VEND USER_HEAP_VADDR
#define USER_VADDR_START 0xC0000000UL
#define USER_VADDR_TOP 0xFF000000UL
#define USER_LOAD_VADDR 0xD0000000UL
#define LDSO_LOAD_VADDR USER_LOAD_VADDR
#elif defined(ARCH_REMAP_KERNEL)
#define USER_VADDR_START 0x00001000UL
#define USER_VADDR_TOP 0x003ffffff000UL
#define USER_STACK_VSTART 0x000270000000UL
#define USER_STACK_VEND USER_HEAP_VADDR
#define USER_HEAP_VADDR 0x000300000000UL
#define USER_HEAP_VEND USER_VADDR_TOP
#define USER_LOAD_VADDR 0x200000000
#define LDSO_LOAD_VADDR 0x200000000
#else
#define USER_HEAP_VADDR 0x300000000UL
#define USER_HEAP_VEND USER_STACK_VSTART
#define USER_STACK_VSTART 0x370000000UL
#define USER_STACK_VEND 0x400000000UL
#define USER_VADDR_START 0x200000000UL
#define USER_VADDR_TOP 0xfffffffffffff000UL
#define USER_LOAD_VADDR 0x200000000UL
#define LDSO_LOAD_VADDR 0x200000000UL
#endif
/* this attribution is cpu specified, and it should be defined in riscv_mmu.h */
#ifndef MMU_MAP_U_RWCB
#define MMU_MAP_U_RWCB 0
#endif
#ifndef MMU_MAP_U_RW
#define MMU_MAP_U_RW 0
#endif
#ifdef __cplusplus
extern "C" {
#endif
rt_inline unsigned long rt_hw_ffz(unsigned long x)
{
return __builtin_ffsl(~x) - 1;
}
rt_inline void icache_invalid_all(void)
{
rt_hw_cpu_icache_invalidate_all();
}
struct rt_hw_stack_frame;
void *arch_signal_ucontext_restore(rt_base_t user_sp);
void *arch_signal_ucontext_save(int signo, siginfo_t *psiginfo,
struct rt_hw_stack_frame *exp_frame, rt_base_t user_sp,
lwp_sigset_t *save_sig_mask);
#ifdef __cplusplus
}
#endif
#endif
#endif /*LWP_ARCH_H__*/

View File

@@ -0,0 +1,303 @@
/*
* Copyright (c) 2006-2020, RT-Thread Development Team
*
* SPDX-License-Identifier: Apache-2.0
*
* Change Logs:
* Date Author Notes
* 2018-12-10 Jesven first version
* 2021-02-03 lizhirui port to riscv64
* 2021-02-19 lizhirui port to new version of rt-smart
* 2022-11-08 Wangxiaoyao Cleanup codes;
* Support new context switch
* 2023-07-16 Shell Move part of the codes to C from asm in signal handling
*/
#include "rtconfig.h"
#ifndef __ASSEMBLY__
#define __ASSEMBLY__
#endif /* __ASSEMBLY__ */
#include "cpuport.h"
#include "encoding.h"
#include "stackframe.h"
#include "asm-generic.h"
.section .text.lwp
/*
* void arch_start_umode(args, text, ustack, kstack);
*/
.global arch_start_umode
.type arch_start_umode, % function
arch_start_umode:
// load kstack for user process
csrw sscratch, a3
li t0, SSTATUS_SPP | SSTATUS_SIE // set as user mode, close interrupt
csrc sstatus, t0
li t0, SSTATUS_SPIE // enable interrupt when return to user mode
csrs sstatus, t0
csrw sepc, a1
mv sp, a2
sret//enter user mode
/*
* void arch_crt_start_umode(args, text, ustack, kstack);
*/
.global arch_crt_start_umode
.type arch_crt_start_umode, % function
arch_crt_start_umode:
li t0, SSTATUS_SPP | SSTATUS_SIE // set as user mode, close interrupt
csrc sstatus, t0
li t0, SSTATUS_SPIE // enable interrupt when return to user mode
csrs sstatus, t0
csrw sepc, a1
mv s0, a0
mv s1, a1
mv s2, a2
mv s3, a3
mv a0, s2
call lwp_copy_return_code_to_user_stack
mv a0, s2
call lwp_fix_sp
mv sp, a0//user_sp
mv ra, a0//return address
mv a0, s0//args
csrw sscratch, s3
sret//enter user mode
/**
* Unify exit point from kernel mode to enter user space
* we handle following things here:
* 1. restoring user mode debug state (not support yet)
* 2. handling thread's exit request
* 3. handling POSIX signal
* 4. restoring user context
* 5. jump to user mode
*/
.global arch_ret_to_user
arch_ret_to_user:
// TODO: we don't support kernel gdb server in risc-v yet
// so we don't check debug state here and handle debugging bussiness
call lwp_check_exit_request
beqz a0, 1f
mv a0, x0
call sys_exit
1:
mv a0, sp
call lwp_thread_signal_catch
ret_to_user_exit:
RESTORE_ALL
// `RESTORE_ALL` also reset sp to user sp, and setup sscratch
sret
/**
* Restore user context from exception frame stroraged in ustack
* And handle pending signals;
*/
arch_signal_quit:
LOAD a0, FRAME_OFF_SP(sp)
call arch_signal_ucontext_restore
/* reset kernel sp to the stack */
addi sp, sp, CTX_REG_NR * REGBYTES
STORE sp, FRAME_OFF_SP(a0)
/* return value is user sp */
mv sp, a0
/* restore user sp before enter trap */
addi a0, sp, CTX_REG_NR * REGBYTES
csrw sscratch, a0
RESTORE_ALL
SAVE_ALL
j arch_ret_to_user
/**
* rt_noreturn
* void arch_thread_signal_enter(
* int signo, -> a0
* siginfo_t *psiginfo, -> a1
* void *exp_frame, -> a2
* void *entry_uaddr, -> a3
* lwp_sigset_t *save_sig_mask, -> a4
* )
*/
.global arch_thread_signal_enter
arch_thread_signal_enter:
mv s3, a2
mv s2, a0
mv s1, a3
LOAD t0, FRAME_OFF_SP(a2)
mv a3, t0
call arch_signal_ucontext_save
/** restore kernel sp */
addi sp, s3, CTX_REG_NR * REGBYTES
/**
* set regiter RA to user signal handler
* set sp to user sp & save kernel sp in sscratch
*/
mv ra, a0
csrw sscratch, sp
mv sp, a0
/**
* s1 is signal_handler,
* s1 = !s1 ? lwp_sigreturn : s1;
*/
bnez s1, 1f
mv s1, ra
1:
/* enter user mode and enable interrupt when return to user mode */
li t0, SSTATUS_SPP
csrc sstatus, t0
li t0, SSTATUS_SPIE
csrs sstatus, t0
/* sepc <- signal_handler */
csrw sepc, s1
/* a0 <- signal id */
mv a0, s2
/* a1 <- siginfo */
add a1, sp, 16
/* dummy a2 */
mv a2, a1
/* restore user GP */
LOAD gp, FRAME_OFF_GP(s3)
/**
* handler(signo, psi, ucontext);
*/
sret
.align 3
lwp_debugreturn:
li a7, 0xff
ecall
.align 3
.global lwp_sigreturn
lwp_sigreturn:
li a7, 0xfe
ecall
.align 3
lwp_sigreturn_end:
.align 3
.global lwp_thread_return
lwp_thread_return:
li a0, 0
li a7, 1
ecall
.align 3
.global lwp_thread_return_end
lwp_thread_return_end:
.globl arch_get_tidr
arch_get_tidr:
mv a0, tp
ret
.global arch_set_thread_area
arch_set_thread_area:
.globl arch_set_tidr
arch_set_tidr:
mv tp, a0
ret
.global arch_clone_exit
.global arch_fork_exit
arch_fork_exit:
arch_clone_exit:
j arch_syscall_exit
START_POINT(syscall_entry)
#ifndef ARCH_USING_NEW_CTX_SWITCH
//swap to thread kernel stack
csrr t0, sstatus
andi t0, t0, 0x100
beqz t0, __restore_sp_from_tcb
__restore_sp_from_sscratch: // from kernel
csrr t0, sscratch
j __move_stack_context
__restore_sp_from_tcb: // from user
jal rt_thread_self
jal get_thread_kernel_stack_top
mv t0, a0
__move_stack_context:
mv t1, sp//src
mv sp, t0//switch stack
addi sp, sp, -CTX_REG_NR * REGBYTES
//copy context
li s0, CTX_REG_NR//cnt
mv t2, sp//dst
copy_context_loop:
LOAD t0, 0(t1)
STORE t0, 0(t2)
addi s0, s0, -1
addi t1, t1, 8
addi t2, t2, 8
bnez s0, copy_context_loop
#endif /* ARCH_USING_NEW_CTX_SWITCH */
/* fetch SYSCALL ID */
LOAD a7, 17 * REGBYTES(sp)
addi a7, a7, -0xfe
beqz a7, arch_signal_quit
#ifdef ARCH_MM_MMU
/* save setting when syscall enter */
call rt_thread_self
call lwp_user_setting_save
#endif
mv a0, sp
OPEN_INTERRUPT
call syscall_handler
j arch_syscall_exit
START_POINT_END(syscall_entry)
.global arch_syscall_exit
arch_syscall_exit:
CLOSE_INTERRUPT
#if defined(ARCH_MM_MMU)
LOAD s0, FRAME_OFF_SSTATUS(sp)
andi s0, s0, 0x100
bnez s0, dont_ret_to_user
j arch_ret_to_user
#endif
dont_ret_to_user:
#ifdef ARCH_MM_MMU
/* restore setting when syscall exit */
call rt_thread_self
call lwp_user_setting_restore
/* after restore the reg `tp`, need modify context */
STORE tp, 4 * REGBYTES(sp)
#endif
//restore context
RESTORE_ALL
csrw sscratch, zero
sret

View File

@@ -0,0 +1,109 @@
#include "mm_aspace.h"
#include <rtthread.h>
#include <stdint.h>
#include <string.h>
#include <elf.h>
#ifdef ARCH_MM_MMU
#include <mmu.h>
#include <page.h>
#endif
typedef struct
{
Elf64_Word st_name;
Elf64_Addr st_value;
Elf64_Word st_size;
unsigned char st_info;
unsigned char st_other;
Elf64_Half st_shndx;
} Elf64_sym;
#ifdef ARCH_MM_MMU
void arch_elf_reloc(rt_aspace_t aspace, void *text_start, void *rel_dyn_start, size_t rel_dyn_size, void *got_start, size_t got_size, Elf64_sym *dynsym)
{
size_t rel_off;
void* addr;
if (rel_dyn_size && !dynsym)
{
return;
}
for (rel_off = 0; rel_off < rel_dyn_size; rel_off += 8)
{
uint32_t v1, v2;
addr = rt_hw_mmu_v2p(aspace, (void *)(((rt_size_t)rel_dyn_start) + rel_off));
memcpy(&v1, addr, 4);
addr = rt_hw_mmu_v2p(aspace, (void *)(((rt_size_t)rel_dyn_start) + rel_off + 4));
memcpy(&v2, addr, 4);
addr = rt_hw_mmu_v2p(aspace, (void *)((rt_size_t)text_start + v1));
if ((v2 & 0xff) == R_ARM_RELATIVE)
{
*(rt_size_t*)addr += (rt_size_t)text_start;
}
else if ((v2 & 0xff) == R_ARM_ABS32)
{
uint32_t t;
t = (v2 >> 8);
if (t) /* 0 is UDF */
{
*(rt_size_t*)addr = (((rt_size_t)text_start) + dynsym[t].st_value);
}
}
}
/* modify got */
if (got_size)
{
uint32_t *got_item = (uint32_t*)got_start;
for (rel_off = 0; rel_off < got_size; rel_off += 4, got_item++)
{
addr = rt_hw_mmu_v2p(aspace, got_item);
*(rt_size_t *)addr += (rt_size_t)text_start;
}
}
}
#else
void arch_elf_reloc(void *text_start, void *rel_dyn_start, size_t rel_dyn_size, void *got_start, size_t got_size, Elf64_sym *dynsym)
{
size_t rel_off;
if (rel_dyn_size && !dynsym)
{
return;
}
for (rel_off = 0; rel_off < rel_dyn_size; rel_off += 8)
{
uint32_t v1, v2;
memcpy(&v1, ((rt_uint8_t *)rel_dyn_start) + rel_off, 4);
memcpy(&v2, ((rt_uint8_t *)rel_dyn_start) + rel_off + 4, 4);
if ((v2 & 0xff) == R_ARM_RELATIVE)
{
*(uint32_t*)(((rt_size_t)text_start) + v1) += (uint32_t)text_start;
}
else if ((v2 & 0xff) == R_ARM_ABS32)
{
uint32_t t;
t = (v2 >> 8);
if (t) /* 0 is UDF */
{
*(uint32_t*)(((rt_size_t)text_start) + v1) = (uint32_t)(((rt_size_t)text_start) + dynsym[t].st_value);
}
}
}
/* modify got */
if (got_size)
{
uint32_t *got_item = (uint32_t*)got_start;
for (rel_off = 0; rel_off < got_size; rel_off += 4, got_item++)
{
*got_item += (uint32_t)text_start;
}
}
}
#endif

View File

@@ -0,0 +1,11 @@
# RT-Thread building script for component
from building import *
cwd = GetCurrentDir()
src = Glob('*.c') + Glob('*.S')
CPPPATH = [cwd]
group = DefineGroup('lwp-x86-i386', src, depend = ['RT_USING_SMART'], CPPPATH = CPPPATH)
Return('group')

View File

@@ -0,0 +1,371 @@
/*
* Copyright (c) 2006-2021, RT-Thread Development Team
*
* SPDX-License-Identifier: Apache-2.0
*
* Change Logs:
* Date Author Notes
* 2021-7-14 JasonHu first version
*/
#include <rthw.h>
#include <stddef.h>
#include <rtconfig.h>
#include <rtdbg.h>
#ifdef ARCH_MM_MMU
#include <stackframe.h>
#include <interrupt.h>
#include <segment.h>
#include <mmu.h>
#include <page.h>
#include <lwp_mm_area.h>
#include <lwp_user_mm.h>
#include <lwp_arch.h>
#ifdef RT_USING_SIGNALS
#include <lwp_signal.h>
#endif /* RT_USING_SIGNALS */
extern size_t g_mmu_table[];
int arch_expand_user_stack(void *addr)
{
int ret = 0;
size_t stack_addr = (size_t)addr;
stack_addr &= ~PAGE_OFFSET_MASK;
if ((stack_addr >= (size_t)USER_STACK_VSTART) && (stack_addr < (size_t)USER_STACK_VEND))
{
void *map = lwp_map_user(lwp_self(), (void *)stack_addr, PAGE_SIZE, RT_FALSE);
if (map || lwp_user_accessable(addr, 1))
{
ret = 1; /* map success */
}
else /* map failed, send signal SIGSEGV */
{
#ifdef RT_USING_SIGNALS
dbg_log(DBG_ERROR, "[fault] thread %s mapped addr %p failed!\n", rt_thread_self()->parent.name, addr);
lwp_thread_kill(rt_thread_self(), SIGSEGV);
ret = 1; /* return 1, will return back to intr, then check exit */
#endif
}
}
else /* not stack, send signal SIGSEGV */
{
#ifdef RT_USING_SIGNALS
dbg_log(DBG_ERROR, "[fault] thread %s access unmapped addr %p!\n", rt_thread_self()->parent.name, addr);
lwp_thread_kill(rt_thread_self(), SIGSEGV);
ret = 1; /* return 1, will return back to intr, then check exit */
#endif
}
return ret;
}
void *get_thread_kernel_stack_top(rt_thread_t thread)
{
return RT_NULL;
}
/**
* don't support this in i386, it's ok!
*/
void *arch_get_user_sp()
{
return RT_NULL;
}
int arch_user_space_init(struct rt_lwp *lwp)
{
rt_size_t *mmu_table;
mmu_table = (rt_size_t *)rt_pages_alloc_ext(0, PAGE_ANY_AVAILABLE);
if (!mmu_table)
{
return -1;
}
rt_memset(mmu_table, 0, ARCH_PAGE_SIZE);
lwp->end_heap = USER_HEAP_VADDR;
memcpy(mmu_table, g_mmu_table, ARCH_PAGE_SIZE / 4);
memset((rt_uint8_t *)mmu_table + ARCH_PAGE_SIZE / 4, 0, ARCH_PAGE_SIZE / 4 * 3);
rt_hw_cpu_dcache_ops(RT_HW_CACHE_FLUSH, mmu_table, ARCH_PAGE_SIZE);
if (rt_hw_mmu_map_init(&lwp->mmu_info, (void*)USER_VADDR_START, USER_VADDR_TOP - USER_VADDR_START, mmu_table, PV_OFFSET) < 0)
{
rt_pages_free(mmu_table, 0);
return -1;
}
return 0;
}
void *arch_kernel_mmu_table_get(void)
{
return (void *)((char *)g_mmu_table);
}
void arch_user_space_vtable_free(struct rt_lwp *lwp)
{
if (lwp && lwp->mmu_info.vtable)
{
rt_pages_free(lwp->mmu_info.vtable, 0);
lwp->mmu_info.vtable = NULL;
}
}
void arch_set_thread_area(void *p)
{
rt_hw_seg_tls_set((rt_ubase_t) p);
rt_thread_t cur = rt_thread_self();
cur->thread_idr = p; /* update thread idr after first set */
}
void *arch_get_tidr(void)
{
rt_thread_t cur = rt_thread_self();
if (!cur->lwp) /* no lwp, don't get thread idr from tls seg */
return NULL;
return (void *)rt_hw_seg_tls_get(); /* get thread idr from tls seg */
}
void arch_set_tidr(void *p)
{
rt_thread_t cur = rt_thread_self();
if (!cur->lwp) /* no lwp, don't set thread idr to tls seg */
return;
rt_hw_seg_tls_set((rt_ubase_t) p); /* set tls seg addr as thread idr */
}
static void lwp_user_stack_init(rt_hw_stack_frame_t *frame)
{
frame->ds = frame->es = USER_DATA_SEL;
frame->cs = USER_CODE_SEL;
frame->ss = USER_STACK_SEL;
frame->gs = USER_TLS_SEL;
frame->fs = 0; /* unused */
frame->edi = frame->esi = \
frame->ebp = frame->esp_dummy = 0;
frame->eax = frame->ebx = \
frame->ecx = frame->edx = 0;
frame->error_code = 0;
frame->vec_no = 0;
frame->eflags = (EFLAGS_MBS | EFLAGS_IF_1 | EFLAGS_IOPL_3);
}
extern void lwp_switch_to_user(void *frame);
/**
* user entry, set frame.
* at the end of execute, we need enter user mode,
* in x86, we can set stack, arg, text entry in a stack frame,
* then pop then into register, final use iret to switch kernel mode to user mode.
*/
void arch_start_umode(void *args, const void *text, void *ustack, void *k_stack)
{
rt_uint8_t *stk = k_stack;
stk -= sizeof(struct rt_hw_stack_frame);
struct rt_hw_stack_frame *frame = (struct rt_hw_stack_frame *)stk;
lwp_user_stack_init(frame);
frame->esp = (rt_uint32_t)ustack - 32;
frame->ebx = (rt_uint32_t)args;
frame->eip = (rt_uint32_t)text;
lwp_switch_to_user(frame);
/* should never return */
}
void lwp_exec_user(void *args, void *kernel_stack, void *user_entry)
{
arch_start_umode(args, (const void *)user_entry, (void *)USER_STACK_VEND, kernel_stack);
}
extern void lwp_thread_return();
extern void lwp_thread_return_end();
static void *lwp_copy_return_code_to_user_stack(void *ustack)
{
size_t size = (size_t)lwp_thread_return_end - (size_t)lwp_thread_return;
void *retcode = (void *)((size_t)ustack - size);
memcpy(retcode, (void *)lwp_thread_return, size);
return retcode;
}
/**
* when called sys_thread_create, need create a thread, after thread stared, will come here,
* like arch_start_umode, will enter user mode, but we must set thread exit function. it looks like:
* void func(void *arg)
* {
* ...
* }
* when thread func return, we must call exit code to exit thread, or not the program runs away.
* so we need copy exit code to user and call exit code when func return.
*/
void arch_crt_start_umode(void *args, const void *text, void *ustack, void *k_stack)
{
RT_ASSERT(ustack != NULL);
rt_uint8_t *stk;
stk = (rt_uint8_t *)((rt_uint8_t *)k_stack + sizeof(rt_ubase_t));
stk = (rt_uint8_t *)RT_ALIGN_DOWN(((rt_ubase_t)stk), sizeof(rt_ubase_t));
stk -= sizeof(struct rt_hw_stack_frame);
struct rt_hw_stack_frame *frame = (struct rt_hw_stack_frame *)stk;
lwp_user_stack_init(frame);
/* make user thread stack */
unsigned long *retcode = lwp_copy_return_code_to_user_stack(ustack); /* copy ret code */
unsigned long *retstack = (unsigned long *)RT_ALIGN_DOWN(((rt_ubase_t)retcode), sizeof(rt_ubase_t));
/**
* x86 call stack
*
* retcode here
*
* arg n
* arg n - 1
* ...
* arg 2
* arg 1
* arg 0
* eip (caller return addr, point to retcode)
* esp
*/
*(--retstack) = (unsigned long) args; /* arg */
*(--retstack) = (unsigned long) retcode; /* ret eip */
frame->esp = (rt_uint32_t)retstack;
frame->eip = (rt_uint32_t)text;
lwp_switch_to_user(frame);
/* should never return */
}
rt_thread_t rt_thread_sp_to_thread(void *spmember_addr)
{
return (rt_thread_t)(((rt_ubase_t)spmember_addr) - (offsetof(struct rt_thread, sp)));
}
/**
* set exec context for fork/clone.
* user_stack(unused)
*/
void arch_set_thread_context(void *exit_addr, void *new_thread_stack, void *user_stack, void **thread_sp)
{
/**
* thread kernel stack was set to tss.esp0, when intrrupt/syscall occur,
* the stack frame will store in kernel stack top, so we can get the stack
* frame by kernel stack top.
*/
rt_hw_stack_frame_t *frame = (rt_hw_stack_frame_t *)((rt_ubase_t)new_thread_stack - sizeof(rt_hw_stack_frame_t));
frame->eax = 0; /* child return 0 */
rt_hw_context_t *context = (rt_hw_context_t *) (((rt_uint32_t *)frame) - HW_CONTEXT_MEMBER_NR);
context->eip = (void *)exit_addr; /* when thread started, jump to intr exit for enter user mode */
context->ebp = context->ebx = context->esi = context->edi = 0;
/**
* set sp as the address of first member of rt_hw_context,
* when scheduler call switch, pop stack from context stack.
*/
*thread_sp = (void *)&context->ebp;
/**
* after set context, the stack like this:
*
* -----------
* stack frame| eax = 0
* -----------
* context(only HW_CONTEXT_MEMBER_NR)| eip = rt_hw_intr_exit
* -----------
* thread sp | to <- rt_hw_context_switch(from, to)
* -----------
*/
}
#ifdef RT_USING_SIGNALS
#define SIGNAL_RET_CODE_SIZE 16
struct rt_signal_frame
{
char *ret_addr; /* return addr when handler return */
int signo; /* signal for user handler arg */
rt_hw_stack_frame_t frame; /* save kernel signal stack */
char ret_code[SIGNAL_RET_CODE_SIZE]; /* save return code */
};
typedef struct rt_signal_frame rt_signal_frame_t;
extern void lwp_signal_return();
extern void lwp_signal_return_end();
void lwp_try_do_signal(rt_hw_stack_frame_t *frame)
{
if (!lwp_signal_check())
return;
/* 1. backup signal mask */
int signal = lwp_signal_backup((void *) frame->esp, (void *) frame->eip, (void *) frame->eflags);
/* 2. get signal handler */
lwp_sighandler_t handler = lwp_sighandler_get(signal);
if (handler == RT_NULL) /* no handler, ignore */
{
lwp_signal_restore();
return;
}
rt_base_t level = rt_hw_interrupt_disable();
/* 3. backup frame */
rt_signal_frame_t *sig_frame = (rt_signal_frame_t *)((frame->esp - sizeof(rt_signal_frame_t)) & -8UL);
memcpy(&sig_frame->frame, frame, sizeof(rt_hw_stack_frame_t));
sig_frame->signo = signal;
/**
* 4. copy user return code into user stack
*
* save current frame on user stack. the user stack like:
*
* ----------
* user code stack
* ----------+ -> esp before enter kernel
* signal frame
* ----------+ -> esp when handle signal handler
* signal handler stack
* ----------
*/
size_t ret_code_size = (size_t)lwp_signal_return_end - (size_t)lwp_signal_return;
memcpy(sig_frame->ret_code, (void *)lwp_signal_return, ret_code_size);
sig_frame->ret_addr = sig_frame->ret_code;
/* 5. jmp to user execute handler, update frame register info */
lwp_user_stack_init(frame);
frame->eip = (rt_uint32_t) handler;
frame->esp = (rt_uint32_t) sig_frame;
rt_hw_interrupt_enable(level);
}
void lwp_signal_do_return(rt_hw_stack_frame_t *frame)
{
/**
* ASSUME: in x86, each stack push and pop element is 4 byte. so STACK_ELEM_SIZE = sizeof(int) => 4.
* when signal handler return, the stack move to the buttom of signal frame.
* but return will pop eip from esp, then {esp += STACK_ELEM_SIZE}, thus {esp = (signal frame) + STACK_ELEM_SIZE}.
* so {(signal frame) = esp - STACK_ELEM_SIZE}
*/
rt_signal_frame_t *sig_frame = (rt_signal_frame_t *)(frame->esp - sizeof(rt_uint32_t));
memcpy(frame, &sig_frame->frame, sizeof(rt_hw_stack_frame_t));
/**
* restore signal info, but don't use rt_user_context,
* we use sig_frame to restore stack frame
*/
lwp_signal_restore();
}
#endif /* RT_USING_SIGNALS */
#endif /* ARCH_MM_MMU */

View File

@@ -0,0 +1,49 @@
/*
* Copyright (c) 2006-2021, RT-Thread Development Team
*
* SPDX-License-Identifier: Apache-2.0
*
* Change Logs:
* Date Author Notes
* 2021-07-18 JasonHu first version
*/
#ifndef LWP_ARCH_H__
#define LWP_ARCH_H__
#include <lwp.h>
#include <lwp_arch_comm.h>
#include <stackframe.h>
#ifdef ARCH_MM_MMU
#define USER_VADDR_TOP 0xFFFFF000UL
#define USER_HEAP_VEND 0xE0000000UL
#define USER_HEAP_VADDR 0x90000000UL
#define USER_STACK_VSTART 0x80000000UL
#define USER_STACK_VEND USER_HEAP_VADDR
#define LDSO_LOAD_VADDR 0x70000000UL
#define USER_VADDR_START 0x40000000UL
#define USER_LOAD_VADDR USER_VADDR_START
#define SIGNAL_RETURN_SYSCAL_ID 0xe000
#ifdef __cplusplus
extern "C" {
#endif
rt_thread_t rt_thread_sp_to_thread(void *spmember_addr);
void lwp_signal_do_return(rt_hw_stack_frame_t *frame);
rt_inline unsigned long rt_hw_ffz(unsigned long x)
{
return __builtin_ffsl(~x) - 1;
}
#ifdef __cplusplus
}
#endif
#endif /* ARCH_MM_MMU */
#endif /*LWP_ARCH_H__*/

View File

@@ -0,0 +1,73 @@
/*
* Copyright (c) 2006-2021, RT-Thread Development Team
*
* SPDX-License-Identifier: Apache-2.0
*
* Change Logs:
* Date Author Notes
* 2021-7-14 JasonHu first version
*/
#include "rtconfig.h"
.section .text.lwp
/*
* void lwp_switch_to_user(frame);
*/
.global lwp_switch_to_user
lwp_switch_to_user:
movl 0x4(%esp), %esp
addl $4,%esp // skip intr no
popal
popl %gs
popl %fs
popl %es
popl %ds
addl $4, %esp // skip error_code
iret // enter to user mode
.extern arch_syscall_exit
.global sys_fork
.global sys_vfork
.global arch_fork_exit
sys_fork:
sys_vfork:
jmp _sys_fork
arch_fork_exit:
jmp arch_syscall_exit
.global sys_clone
.global arch_clone_exit
sys_clone:
jmp _sys_clone
arch_clone_exit:
jmp arch_syscall_exit
/**
* rt thread return code
*/
.align 4
.global lwp_thread_return
lwp_thread_return:
movl $1, %eax // eax = 1, sys_exit
movl $0, %ebx
int $0x80
.align 4
.global lwp_thread_return_end
lwp_thread_return_end:
#ifdef RT_USING_SIGNALS
/**
* signal return code
*/
.align 4
.global lwp_signal_return
lwp_signal_return:
movl $0xe000, %eax // special syscall id for return code
int $0x80
.align 4
.global lwp_signal_return_end
lwp_signal_return_end:
#endif /* RT_USING_SIGNALS */

View File

@@ -0,0 +1,41 @@
/*
* Copyright (c) 2006-2021, RT-Thread Development Team
*
* SPDX-License-Identifier: Apache-2.0
*
* Change Logs:
* Date Author Notes
* 2021-07-28 JasonHu first version
*/
#include <rtthread.h>
#include <stdint.h>
#include <string.h>
#include <elf.h>
#ifdef ARCH_MM_MMU
#include <mmu.h>
#include <page.h>
#endif
typedef struct
{
Elf32_Word st_name;
Elf32_Addr st_value;
Elf32_Word st_size;
unsigned char st_info;
unsigned char st_other;
Elf32_Half st_shndx;
} Elf32_sym;
#ifdef ARCH_MM_MMU
void arch_elf_reloc(rt_mmu_info *m_info, void *text_start, void *rel_dyn_start, size_t rel_dyn_size, void *got_start, size_t got_size, Elf32_sym *dynsym)
{
}
#else
void arch_elf_reloc(void *text_start, void *rel_dyn_start, size_t rel_dyn_size, void *got_start, size_t got_size, Elf32_sym *dynsym)
{
}
#endif