4
0
mirror of https://github.com/RT-Thread/rt-thread.git synced 2025-01-20 04:43:48 +08:00
Shell 40cd8cce99 feat: Added rt_interrupt_context* family for nested interrupt handling
These changes introduce the rt_interrupt_context family, providing a
mechanism for managing nested interrupts. The context management
ensures proper storage and retrieval of interrupt states, improving
reliability in nested interrupt scenarios by enabling context tracking
across different interrupt levels. This enhancement is essential for
platforms where nested interrupt handling is crucial, such as in real-
time or multi-threaded applications.

Changes:
- Defined rt_interrupt_context structure with context and node fields
  in `rtdef.h` to support nested interrupts.
- Added rt_slist_pop function in `rtservice.h` for simplified node
  removal in singly linked lists.
- Declared rt_interrupt_context_push, rt_interrupt_context_pop, and
  rt_interrupt_context_get functions in `rtthread.h` to manage the
  interrupt/exception stack.
- Modified AArch64 CPU support in `cpuport.h` to include
  rt_hw_show_register for debugging registers.
- Refactored `_rt_hw_trap_irq` in `trap.c` for context-aware IRQ
  handling, with stack push/pop logic to handle nested contexts.
- Implemented interrupt context push, pop, and retrieval logic in
  `irq.c` to manage context at the CPU level.

Signed-off-by: Shell <smokewood@qq.com>
2024-11-03 10:08:45 +08:00

399 lines
10 KiB
C

/*
* Copyright (c) 2006-2018, RT-Thread Development Team
*
* SPDX-License-Identifier: Apache-2.0
*
* Change Logs:
* Date Author Notes
* 2013-07-20 Bernard first version
*/
#include <rtthread.h>
#include <rthw.h>
#include <board.h>
#include <armv8.h>
#include "interrupt.h"
#include "mm_aspace.h"
#define DBG_TAG "libcpu.trap"
#define DBG_LVL DBG_LOG
#include <rtdbg.h>
#ifdef RT_USING_FINSH
extern long list_thread(void);
#endif
#ifdef RT_USING_LWP
#include <lwp.h>
#include <lwp_arch.h>
#ifdef LWP_USING_CORE_DUMP
#include <lwp_core_dump.h>
#endif
static void _check_fault(struct rt_hw_exp_stack *regs, uint32_t pc_adj, char *info)
{
uint32_t is_user_fault;
rt_thread_t th;
is_user_fault = !(regs->cpsr & 0x1f);
if (is_user_fault)
{
rt_kprintf("%s! pc = 0x%x\n", info, regs->pc - pc_adj);
}
/* user stack backtrace */
th = rt_thread_self();
if (th && th->lwp)
{
arch_backtrace_uthread(th);
}
if (is_user_fault)
{
#ifdef LWP_USING_CORE_DUMP
lwp_core_dump(regs, pc_adj);
#endif
sys_exit_group(-1);
}
}
rt_inline int _get_type(unsigned long esr)
{
int ret;
int fsc = ARM64_ESR_EXTRACT_FSC(esr);
switch (fsc)
{
case ARM64_FSC_TRANSLATION_FAULT_LEVEL_0:
case ARM64_FSC_TRANSLATION_FAULT_LEVEL_1:
case ARM64_FSC_TRANSLATION_FAULT_LEVEL_2:
case ARM64_FSC_TRANSLATION_FAULT_LEVEL_3:
ret = MM_FAULT_TYPE_PAGE_FAULT;
break;
case ARM64_FSC_PERMISSION_FAULT_LEVEL_0:
case ARM64_FSC_PERMISSION_FAULT_LEVEL_1:
case ARM64_FSC_PERMISSION_FAULT_LEVEL_2:
case ARM64_FSC_PERMISSION_FAULT_LEVEL_3:
ret = MM_FAULT_TYPE_RWX_PERM;
break;
case ARM64_FSC_ACCESS_FLAG_FAULT_LEVEL_0:
case ARM64_FSC_ACCESS_FLAG_FAULT_LEVEL_1:
case ARM64_FSC_ACCESS_FLAG_FAULT_LEVEL_2:
case ARM64_FSC_ACCESS_FLAG_FAULT_LEVEL_3:
/* access flag fault, not handle currently */
default:
ret = MM_FAULT_TYPE_GENERIC;
}
return ret;
}
rt_inline long _irq_is_disable(long cpsr)
{
return !!(cpsr & 0x80);
}
static int user_fault_fixable(unsigned long esr, struct rt_hw_exp_stack *regs)
{
rt_ubase_t level;
enum rt_mm_fault_op fault_op;
enum rt_mm_fault_type fault_type;
struct rt_lwp *lwp;
void *dfar;
int ret = 0;
unsigned char ec = ARM64_ESR_EXTRACT_EC(esr);
rt_bool_t is_write = ARM64_ABORT_WNR(esr);
switch (ec)
{
case ARM64_EC_INST_ABORT_FROM_LO_EXCEPTION:
fault_op = MM_FAULT_OP_EXECUTE;
fault_type = _get_type(esr);
break;
case ARM64_EC_INST_ABORT_WITHOUT_A_CHANGE:
case ARM64_EC_DATA_ABORT_FROM_LO_EXCEPTION:
case ARM64_EC_DATA_ABORT_WITHOUT_A_CHANGE:
fault_op = is_write ? MM_FAULT_OP_WRITE : MM_FAULT_OP_READ;
fault_type = _get_type(esr);
break;
default:
/* non-fixable */
fault_op = 0;
break;
}
/* page fault exception only allow from user space */
lwp = lwp_self();
if (lwp && fault_op)
{
__asm__ volatile("mrs %0, far_el1":"=r"(dfar));
struct rt_aspace_fault_msg msg = {
.fault_op = fault_op,
.fault_type = fault_type,
.fault_vaddr = dfar,
};
lwp_user_setting_save(rt_thread_self());
__asm__ volatile("mrs %0, daif\nmsr daifclr, 0x3\nisb\n":"=r"(level));
if (rt_aspace_fault_try_fix(lwp->aspace, &msg))
{
ret = 1;
}
__asm__ volatile("msr daif, %0\nisb\n"::"r"(level));
}
return ret;
}
#endif
/**
* this function will show registers of CPU
*
* @param regs the registers point
*/
void rt_hw_show_register(struct rt_hw_exp_stack *regs)
{
rt_kprintf("Execption:\n");
rt_kprintf("X00:0x%16.16p X01:0x%16.16p X02:0x%16.16p X03:0x%16.16p\n", (void *)regs->x0, (void *)regs->x1, (void *)regs->x2, (void *)regs->x3);
rt_kprintf("X04:0x%16.16p X05:0x%16.16p X06:0x%16.16p X07:0x%16.16p\n", (void *)regs->x4, (void *)regs->x5, (void *)regs->x6, (void *)regs->x7);
rt_kprintf("X08:0x%16.16p X09:0x%16.16p X10:0x%16.16p X11:0x%16.16p\n", (void *)regs->x8, (void *)regs->x9, (void *)regs->x10, (void *)regs->x11);
rt_kprintf("X12:0x%16.16p X13:0x%16.16p X14:0x%16.16p X15:0x%16.16p\n", (void *)regs->x12, (void *)regs->x13, (void *)regs->x14, (void *)regs->x15);
rt_kprintf("X16:0x%16.16p X17:0x%16.16p X18:0x%16.16p X19:0x%16.16p\n", (void *)regs->x16, (void *)regs->x17, (void *)regs->x18, (void *)regs->x19);
rt_kprintf("X20:0x%16.16p X21:0x%16.16p X22:0x%16.16p X23:0x%16.16p\n", (void *)regs->x20, (void *)regs->x21, (void *)regs->x22, (void *)regs->x23);
rt_kprintf("X24:0x%16.16p X25:0x%16.16p X26:0x%16.16p X27:0x%16.16p\n", (void *)regs->x24, (void *)regs->x25, (void *)regs->x26, (void *)regs->x27);
rt_kprintf("X28:0x%16.16p X29:0x%16.16p X30:0x%16.16p\n", (void *)regs->x28, (void *)regs->x29, (void *)regs->x30);
rt_kprintf("SP_EL0:0x%16.16p\n", (void *)regs->sp_el0);
rt_kprintf("SPSR :0x%16.16p\n", (void *)regs->cpsr);
rt_kprintf("EPC :0x%16.16p\n", (void *)regs->pc);
}
#ifndef RT_USING_PIC
static void _rt_hw_trap_irq(rt_interrupt_context_t irq_context)
{
#ifdef SOC_BCM283x
extern rt_uint8_t core_timer_flag;
void *param;
uint32_t irq;
rt_isr_handler_t isr_func;
extern struct rt_irq_desc isr_table[];
uint32_t value = 0;
value = IRQ_PEND_BASIC & 0x3ff;
if(core_timer_flag != 0)
{
uint32_t cpu_id = rt_hw_cpu_id();
uint32_t int_source = CORE_IRQSOURCE(cpu_id);
if (int_source & 0x0f)
{
if (int_source & 0x08)
{
isr_func = isr_table[IRQ_ARM_TIMER].handler;
#ifdef RT_USING_INTERRUPT_INFO
isr_table[IRQ_ARM_TIMER].counter++;
#endif
if (isr_func)
{
param = isr_table[IRQ_ARM_TIMER].param;
isr_func(IRQ_ARM_TIMER, param);
}
}
}
}
/* local interrupt*/
if (value)
{
if (value & (1 << 8))
{
value = IRQ_PEND1;
irq = __rt_ffs(value) - 1;
}
else if (value & (1 << 9))
{
value = IRQ_PEND2;
irq = __rt_ffs(value) + 31;
}
else
{
value &= 0x0f;
irq = __rt_ffs(value) + 63;
}
/* get interrupt service routine */
isr_func = isr_table[irq].handler;
#ifdef RT_USING_INTERRUPT_INFO
isr_table[irq].counter++;
#endif
if (isr_func)
{
/* Interrupt for myself. */
param = isr_table[irq].param;
/* turn to interrupt service routine */
isr_func(irq, param);
}
}
#else
void *param;
int ir, ir_self;
rt_isr_handler_t isr_func;
extern struct rt_irq_desc isr_table[];
ir = rt_hw_interrupt_get_irq();
if (ir == 1023)
{
/* Spurious interrupt */
return;
}
/* bit 10~12 is cpuid, bit 0~9 is interrupt id */
ir_self = ir & 0x3ffUL;
/* get interrupt service routine */
isr_func = isr_table[ir_self].handler;
#ifdef RT_USING_INTERRUPT_INFO
isr_table[ir_self].counter++;
#ifdef RT_USING_SMP
isr_table[ir_self].cpu_counter[rt_hw_cpu_id()]++;
#endif
#endif
if (isr_func)
{
/* Interrupt for myself. */
param = isr_table[ir_self].param;
/* turn to interrupt service routine */
isr_func(ir_self, param);
}
/* end of interrupt */
rt_hw_interrupt_ack(ir);
#endif
}
#else
static void _rt_hw_trap_irq(struct rt_interrupt_context *this_ctx)
{
rt_pic_do_traps();
}
#endif
void rt_hw_trap_irq(struct rt_hw_exp_stack *regs)
{
struct rt_interrupt_context this_ctx = {
.context = regs,
.node = RT_SLIST_OBJECT_INIT(this_ctx.node),
};
rt_interrupt_context_push(&this_ctx);
_rt_hw_trap_irq(&this_ctx);
rt_interrupt_context_pop();
}
#ifdef RT_USING_SMART
#define DBG_CHECK_EVENT(regs, esr) dbg_check_event(regs, esr)
#else
#define DBG_CHECK_EVENT(regs, esr) (0)
#endif
#ifndef RT_USING_PIC
void rt_hw_trap_fiq(void)
{
void *param;
int ir, ir_self;
rt_isr_handler_t isr_func;
extern struct rt_irq_desc isr_table[];
ir = rt_hw_interrupt_get_irq();
/* bit 10~12 is cpuid, bit 0~9 is interrup id */
ir_self = ir & 0x3ffUL;
/* get interrupt service routine */
isr_func = isr_table[ir_self].handler;
param = isr_table[ir_self].param;
/* turn to interrupt service routine */
isr_func(ir_self, param);
/* end of interrupt */
rt_hw_interrupt_ack(ir);
}
#else
void rt_hw_trap_fiq(void)
{
rt_pic_do_traps();
}
#endif
void print_exception(unsigned long esr, unsigned long epc);
void SVC_Handler(struct rt_hw_exp_stack *regs);
void rt_hw_trap_exception(struct rt_hw_exp_stack *regs)
{
unsigned long esr;
unsigned char ec;
asm volatile("mrs %0, esr_el1":"=r"(esr));
ec = (unsigned char)((esr >> 26) & 0x3fU);
if (DBG_CHECK_EVENT(regs, esr))
{
return;
}
else if (ec == 0x15) /* is 64bit syscall ? */
{
SVC_Handler(regs);
/* never return here */
}
#ifdef RT_USING_SMART
/**
* Note: check_user_stack will take lock and it will possibly be a dead-lock
* if exception comes from kernel.
*/
if ((regs->cpsr & 0x1f) == 0)
{
if (user_fault_fixable(esr, regs))
return;
}
else
{
if (_irq_is_disable(regs->cpsr))
{
LOG_E("Kernel fault from interrupt/critical section");
}
if (rt_critical_level() != 0)
{
LOG_E("scheduler is not available");
}
else if (user_fault_fixable(esr, regs))
return;
}
#endif
print_exception(esr, regs->pc);
rt_hw_show_register(regs);
LOG_E("current thread: %s\n", rt_thread_self()->parent.name);
#ifdef RT_USING_FINSH
list_thread();
#endif
#ifdef RT_USING_LWP
/* restore normal execution environment */
__asm__ volatile("msr daifclr, 0x3\ndmb ishst\nisb\n");
_check_fault(regs, 0, "user fault");
#endif
struct rt_hw_backtrace_frame frame = {.fp = regs->x29, .pc = regs->pc};
rt_backtrace_frame(rt_thread_self(), &frame);
rt_hw_cpu_shutdown();
}
void rt_hw_trap_serror(struct rt_hw_exp_stack *regs)
{
rt_kprintf("SError\n");
rt_hw_show_register(regs);
rt_kprintf("current: %s\n", rt_thread_self()->parent.name);
#ifdef RT_USING_FINSH
list_thread();
#endif
rt_hw_cpu_shutdown();
}