[smart] fixup arm64 aspace switch (#8925)

* [smart] fixup arm64 aspace switch

Signed-off-by: Shell <smokewood@qq.com>

* fixup: arm add arch_kernel_mmu_table_get

---------

Signed-off-by: Shell <smokewood@qq.com>
This commit is contained in:
Shell 2024-05-10 09:17:19 +08:00 committed by GitHub
parent 5712ff1a32
commit 017b2a5a44
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
4 changed files with 62 additions and 47 deletions

View File

@ -198,6 +198,11 @@ void arch_syscall_set_errno(void *eframe, int expected, int code)
return ; return ;
} }
void *arch_kernel_mmu_table_get(void)
{
return rt_kernel_space.page_table;
}
#ifdef LWP_ENABLE_ASID #ifdef LWP_ENABLE_ASID
#define MAX_ASID_BITS 8 #define MAX_ASID_BITS 8
#define MAX_ASID (1 << MAX_ASID_BITS) #define MAX_ASID (1 << MAX_ASID_BITS)

View File

@ -136,23 +136,27 @@ int lwp_user_space_init(struct rt_lwp *lwp, rt_bool_t is_fork)
void lwp_aspace_switch(struct rt_thread *thread) void lwp_aspace_switch(struct rt_thread *thread)
{ {
struct rt_lwp *lwp = RT_NULL; struct rt_lwp *lwp = RT_NULL;
rt_aspace_t aspace; rt_aspace_t to_aspace;
void *from_tbl; void *from_tbl, *to_table;
if (thread->lwp) if (thread->lwp)
{ {
lwp = (struct rt_lwp *)thread->lwp; lwp = (struct rt_lwp *)thread->lwp;
aspace = lwp->aspace; to_aspace = lwp->aspace;
to_table = to_aspace->page_table;
} }
else else
{ {
aspace = &rt_kernel_space; to_aspace = &rt_kernel_space;
/* the page table is arch dependent but not aspace->page_table */
to_table = arch_kernel_mmu_table_get();
} }
/* must fetch the effected page table to avoid hot update */
from_tbl = rt_hw_mmu_tbl_get(); from_tbl = rt_hw_mmu_tbl_get();
if (aspace->page_table != from_tbl) if (to_table != from_tbl)
{ {
rt_hw_aspace_switch(aspace); rt_hw_aspace_switch(to_aspace);
} }
} }

View File

@ -195,7 +195,7 @@ START_POINT_END(_thread_start)
LDP X2, X3, [SP], #0x10 LDP X2, X3, [SP], #0x10
LDP X0, X1, [SP], #0x10 LDP X0, X1, [SP], #0x10
RESTORE_FPU SP RESTORE_FPU SP
#ifdef RT_USING_LWP #ifdef RT_USING_SMART
BEQ arch_ret_to_user BEQ arch_ret_to_user
#endif #endif
ERET ERET
@ -204,7 +204,7 @@ START_POINT_END(_thread_start)
.macro RESTORE_CONTEXT .macro RESTORE_CONTEXT
/* Set the SP to point to the stack of the task being restored. */ /* Set the SP to point to the stack of the task being restored. */
MOV SP, X0 MOV SP, X0
#ifdef RT_USING_LWP #ifdef RT_USING_SMART
BL rt_thread_self BL rt_thread_self
MOV X19, X0 MOV X19, X0
BL lwp_aspace_switch BL lwp_aspace_switch
@ -238,7 +238,7 @@ START_POINT_END(_thread_start)
LDP X2, X3, [SP], #0x10 LDP X2, X3, [SP], #0x10
LDP X0, X1, [SP], #0x10 LDP X0, X1, [SP], #0x10
RESTORE_FPU SP RESTORE_FPU SP
#ifdef RT_USING_LWP #ifdef RT_USING_SMART
BEQ arch_ret_to_user BEQ arch_ret_to_user
#endif #endif
ERET ERET
@ -274,7 +274,7 @@ START_POINT_END(_thread_start)
LDP X2, X3, [SP], #0x10 LDP X2, X3, [SP], #0x10
LDP X0, X1, [SP], #0x10 LDP X0, X1, [SP], #0x10
RESTORE_FPU SP RESTORE_FPU SP
#ifdef RT_USING_LWP #ifdef RT_USING_SMART
BEQ arch_ret_to_user BEQ arch_ret_to_user
#endif #endif
ERET ERET
@ -368,15 +368,14 @@ rt_hw_context_switch_to:
MOV SP, X0 MOV SP, X0
MOV X0, X1 MOV X0, X1
BL rt_cpus_lock_status_restore BL rt_cpus_lock_status_restore
#ifdef RT_USING_LWP #ifdef RT_USING_SMART
BL rt_thread_self BL rt_thread_self
BL lwp_user_setting_restore BL lwp_user_setting_restore
#endif #endif
B rt_hw_context_switch_exit B rt_hw_context_switch_exit
/* /*
* void rt_hw_context_switch(rt_uint32 from, rt_uint32 * void rt_hw_context_switch(rt_uint32 from, rt_uint32 to, struct rt_thread *to_thread);
to, struct rt_thread *to_thread);
* X0 --> from (from_thread stack) * X0 --> from (from_thread stack)
* X1 --> to (to_thread stack) * X1 --> to (to_thread stack)
* X2 --> to_thread * X2 --> to_thread
@ -384,17 +383,21 @@ to, struct rt_thread *to_thread);
.globl rt_hw_context_switch .globl rt_hw_context_switch
rt_hw_context_switch: rt_hw_context_switch:
SAVE_CONTEXT_FROM_EL1 SAVE_CONTEXT_FROM_EL1
MOV X3, SP mov X3, SP
STR X3, [X0] // store sp in preempted tasks TCB str X3, [X0] // store sp in preempted tasks TCB
LDR X0, [X1] // get new task stack pointer ldr X0, [X1] // get new task stack pointer
MOV SP, X0 mov SP, X0
MOV X0, X2
BL rt_cpus_lock_status_restore /* backup thread self */
#ifdef RT_USING_LWP mov x19, x2
BL rt_thread_self
BL lwp_user_setting_restore mov x0, x19
bl rt_cpus_lock_status_restore
#ifdef RT_USING_SMART
mov x0, x19
bl lwp_user_setting_restore
#endif #endif
B rt_hw_context_switch_exit b rt_hw_context_switch_exit
/* /*
* void rt_hw_context_switch_interrupt(context, from sp, to sp, tp tcb) * void rt_hw_context_switch_interrupt(context, from sp, to sp, tp tcb)
@ -408,7 +411,7 @@ rt_hw_context_switch_interrupt:
STP X0, X1, [SP, #-0x10]! STP X0, X1, [SP, #-0x10]!
STP X2, X3, [SP, #-0x10]! STP X2, X3, [SP, #-0x10]!
STP X29, X30, [SP, #-0x10]! STP X29, X30, [SP, #-0x10]!
#ifdef RT_USING_LWP #ifdef RT_USING_SMART
BL rt_thread_self BL rt_thread_self
BL lwp_user_setting_save BL lwp_user_setting_save
#endif #endif
@ -422,7 +425,7 @@ rt_hw_context_switch_interrupt:
MOV X19, X0 MOV X19, X0
BL rt_cpus_lock_status_restore BL rt_cpus_lock_status_restore
MOV X0, X19 MOV X0, X19
#ifdef RT_USING_LWP #ifdef RT_USING_SMART
BL lwp_user_setting_restore BL lwp_user_setting_restore
#endif #endif
B rt_hw_context_switch_exit B rt_hw_context_switch_exit
@ -437,11 +440,11 @@ START_POINT(vector_irq)
BL rt_interrupt_enter BL rt_interrupt_enter
LDP X0, X1, [SP] LDP X0, X1, [SP]
#ifdef RT_USING_LWP #ifdef RT_USING_SMART
SAVE_USER_CTX SAVE_USER_CTX
#endif #endif
BL rt_hw_trap_irq BL rt_hw_trap_irq
#ifdef RT_USING_LWP #ifdef RT_USING_SMART
LDP X0, X1, [SP] LDP X0, X1, [SP]
RESTORE_USER_CTX X0 RESTORE_USER_CTX X0
#endif #endif
@ -505,7 +508,7 @@ rt_hw_context_switch_interrupt:
MOV X7, #1 // set rt_thread_switch_interrupt_flag to 1 MOV X7, #1 // set rt_thread_switch_interrupt_flag to 1
STR X7, [X6] STR X7, [X6]
STP X1, X30, [SP, #-0x10]! STP X1, X30, [SP, #-0x10]!
#ifdef RT_USING_LWP #ifdef RT_USING_SMART
MOV X0, X2 MOV X0, X2
BL lwp_user_setting_save BL lwp_user_setting_save
#endif #endif
@ -579,12 +582,12 @@ vector_irq_exit:
START_POINT(vector_exception) START_POINT(vector_exception)
SAVE_CONTEXT SAVE_CONTEXT
STP X0, X1, [SP, #-0x10]! STP X0, X1, [SP, #-0x10]!
#ifdef RT_USING_LWP #ifdef RT_USING_SMART
SAVE_USER_CTX SAVE_USER_CTX
#endif #endif
BL rt_hw_trap_exception BL rt_hw_trap_exception
#ifdef RT_USING_LWP #ifdef RT_USING_SMART
LDP X0, X1, [SP] LDP X0, X1, [SP]
RESTORE_USER_CTX X0 RESTORE_USER_CTX X0
#endif #endif
@ -596,7 +599,7 @@ START_POINT_END(vector_exception)
START_POINT(vector_serror) START_POINT(vector_serror)
SAVE_CONTEXT SAVE_CONTEXT
#ifdef RT_USING_LWP #ifdef RT_USING_SMART
SAVE_USER_CTX SAVE_USER_CTX
#endif #endif
STP X0, X1, [SP, #-0x10]! STP X0, X1, [SP, #-0x10]!

View File

@ -912,6 +912,8 @@ rt_err_t rt_thread_suspend_to_list(rt_thread_t thread, rt_list_t *susp_list, int
} }
#ifdef RT_USING_SMART #ifdef RT_USING_SMART
if (thread->lwp)
{
rt_sched_unlock(slvl); rt_sched_unlock(slvl);
/* check pending signals for thread before suspend */ /* check pending signals for thread before suspend */
@ -933,6 +935,7 @@ rt_err_t rt_thread_suspend_to_list(rt_thread_t thread, rt_list_t *susp_list, int
return -RT_ERROR; return -RT_ERROR;
} }
} }
}
#endif #endif
/* change thread stat */ /* change thread stat */