[smart] fixup arm64 aspace switch (#8925)

* [smart] fixup arm64 aspace switch

Signed-off-by: Shell <smokewood@qq.com>

* fixup: arm add arch_kernel_mmu_table_get

---------

Signed-off-by: Shell <smokewood@qq.com>
This commit is contained in:
Shell 2024-05-10 09:17:19 +08:00 committed by GitHub
parent 5712ff1a32
commit 017b2a5a44
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
4 changed files with 62 additions and 47 deletions

View File

@ -198,6 +198,11 @@ void arch_syscall_set_errno(void *eframe, int expected, int code)
return ;
}
void *arch_kernel_mmu_table_get(void)
{
return rt_kernel_space.page_table;
}
#ifdef LWP_ENABLE_ASID
#define MAX_ASID_BITS 8
#define MAX_ASID (1 << MAX_ASID_BITS)

View File

@ -136,23 +136,27 @@ int lwp_user_space_init(struct rt_lwp *lwp, rt_bool_t is_fork)
void lwp_aspace_switch(struct rt_thread *thread)
{
struct rt_lwp *lwp = RT_NULL;
rt_aspace_t aspace;
void *from_tbl;
rt_aspace_t to_aspace;
void *from_tbl, *to_table;
if (thread->lwp)
{
lwp = (struct rt_lwp *)thread->lwp;
aspace = lwp->aspace;
to_aspace = lwp->aspace;
to_table = to_aspace->page_table;
}
else
{
aspace = &rt_kernel_space;
to_aspace = &rt_kernel_space;
/* the page table is arch dependent but not aspace->page_table */
to_table = arch_kernel_mmu_table_get();
}
/* must fetch the effected page table to avoid hot update */
from_tbl = rt_hw_mmu_tbl_get();
if (aspace->page_table != from_tbl)
if (to_table != from_tbl)
{
rt_hw_aspace_switch(aspace);
rt_hw_aspace_switch(to_aspace);
}
}

View File

@ -195,7 +195,7 @@ START_POINT_END(_thread_start)
LDP X2, X3, [SP], #0x10
LDP X0, X1, [SP], #0x10
RESTORE_FPU SP
#ifdef RT_USING_LWP
#ifdef RT_USING_SMART
BEQ arch_ret_to_user
#endif
ERET
@ -204,7 +204,7 @@ START_POINT_END(_thread_start)
.macro RESTORE_CONTEXT
/* Set the SP to point to the stack of the task being restored. */
MOV SP, X0
#ifdef RT_USING_LWP
#ifdef RT_USING_SMART
BL rt_thread_self
MOV X19, X0
BL lwp_aspace_switch
@ -238,7 +238,7 @@ START_POINT_END(_thread_start)
LDP X2, X3, [SP], #0x10
LDP X0, X1, [SP], #0x10
RESTORE_FPU SP
#ifdef RT_USING_LWP
#ifdef RT_USING_SMART
BEQ arch_ret_to_user
#endif
ERET
@ -274,7 +274,7 @@ START_POINT_END(_thread_start)
LDP X2, X3, [SP], #0x10
LDP X0, X1, [SP], #0x10
RESTORE_FPU SP
#ifdef RT_USING_LWP
#ifdef RT_USING_SMART
BEQ arch_ret_to_user
#endif
ERET
@ -368,15 +368,14 @@ rt_hw_context_switch_to:
MOV SP, X0
MOV X0, X1
BL rt_cpus_lock_status_restore
#ifdef RT_USING_LWP
#ifdef RT_USING_SMART
BL rt_thread_self
BL lwp_user_setting_restore
#endif
B rt_hw_context_switch_exit
/*
* void rt_hw_context_switch(rt_uint32 from, rt_uint32
to, struct rt_thread *to_thread);
* void rt_hw_context_switch(rt_uint32 from, rt_uint32 to, struct rt_thread *to_thread);
* X0 --> from (from_thread stack)
* X1 --> to (to_thread stack)
* X2 --> to_thread
@ -384,17 +383,21 @@ to, struct rt_thread *to_thread);
.globl rt_hw_context_switch
rt_hw_context_switch:
SAVE_CONTEXT_FROM_EL1
MOV X3, SP
STR X3, [X0] // store sp in preempted tasks TCB
LDR X0, [X1] // get new task stack pointer
MOV SP, X0
MOV X0, X2
BL rt_cpus_lock_status_restore
#ifdef RT_USING_LWP
BL rt_thread_self
BL lwp_user_setting_restore
mov X3, SP
str X3, [X0] // store sp in preempted tasks TCB
ldr X0, [X1] // get new task stack pointer
mov SP, X0
/* backup thread self */
mov x19, x2
mov x0, x19
bl rt_cpus_lock_status_restore
#ifdef RT_USING_SMART
mov x0, x19
bl lwp_user_setting_restore
#endif
B rt_hw_context_switch_exit
b rt_hw_context_switch_exit
/*
* void rt_hw_context_switch_interrupt(context, from sp, to sp, tp tcb)
@ -408,7 +411,7 @@ rt_hw_context_switch_interrupt:
STP X0, X1, [SP, #-0x10]!
STP X2, X3, [SP, #-0x10]!
STP X29, X30, [SP, #-0x10]!
#ifdef RT_USING_LWP
#ifdef RT_USING_SMART
BL rt_thread_self
BL lwp_user_setting_save
#endif
@ -422,7 +425,7 @@ rt_hw_context_switch_interrupt:
MOV X19, X0
BL rt_cpus_lock_status_restore
MOV X0, X19
#ifdef RT_USING_LWP
#ifdef RT_USING_SMART
BL lwp_user_setting_restore
#endif
B rt_hw_context_switch_exit
@ -437,11 +440,11 @@ START_POINT(vector_irq)
BL rt_interrupt_enter
LDP X0, X1, [SP]
#ifdef RT_USING_LWP
#ifdef RT_USING_SMART
SAVE_USER_CTX
#endif
BL rt_hw_trap_irq
#ifdef RT_USING_LWP
#ifdef RT_USING_SMART
LDP X0, X1, [SP]
RESTORE_USER_CTX X0
#endif
@ -505,7 +508,7 @@ rt_hw_context_switch_interrupt:
MOV X7, #1 // set rt_thread_switch_interrupt_flag to 1
STR X7, [X6]
STP X1, X30, [SP, #-0x10]!
#ifdef RT_USING_LWP
#ifdef RT_USING_SMART
MOV X0, X2
BL lwp_user_setting_save
#endif
@ -579,12 +582,12 @@ vector_irq_exit:
START_POINT(vector_exception)
SAVE_CONTEXT
STP X0, X1, [SP, #-0x10]!
#ifdef RT_USING_LWP
#ifdef RT_USING_SMART
SAVE_USER_CTX
#endif
BL rt_hw_trap_exception
#ifdef RT_USING_LWP
#ifdef RT_USING_SMART
LDP X0, X1, [SP]
RESTORE_USER_CTX X0
#endif
@ -596,7 +599,7 @@ START_POINT_END(vector_exception)
START_POINT(vector_serror)
SAVE_CONTEXT
#ifdef RT_USING_LWP
#ifdef RT_USING_SMART
SAVE_USER_CTX
#endif
STP X0, X1, [SP, #-0x10]!

View File

@ -912,25 +912,28 @@ rt_err_t rt_thread_suspend_to_list(rt_thread_t thread, rt_list_t *susp_list, int
}
#ifdef RT_USING_SMART
rt_sched_unlock(slvl);
/* check pending signals for thread before suspend */
if (lwp_thread_signal_suspend_check(thread, suspend_flag) == 0)
if (thread->lwp)
{
/* not to suspend */
return -RT_EINTR;
}
rt_sched_unlock(slvl);
rt_sched_lock(&slvl);
if (stat == RT_THREAD_READY)
{
stat = rt_sched_thread_get_stat(thread);
if (stat != RT_THREAD_READY)
/* check pending signals for thread before suspend */
if (lwp_thread_signal_suspend_check(thread, suspend_flag) == 0)
{
/* status updated while we check for signal */
rt_sched_unlock(slvl);
return -RT_ERROR;
/* not to suspend */
return -RT_EINTR;
}
rt_sched_lock(&slvl);
if (stat == RT_THREAD_READY)
{
stat = rt_sched_thread_get_stat(thread);
if (stat != RT_THREAD_READY)
{
/* status updated while we check for signal */
rt_sched_unlock(slvl);
return -RT_ERROR;
}
}
}
#endif