2021-09-22 17:57:45 +08:00
|
|
|
/*
|
2022-12-20 17:49:37 +08:00
|
|
|
* Copyright (c) 2006-2018, RT-Thread Development Team
|
2021-09-22 17:57:45 +08:00
|
|
|
*
|
|
|
|
* SPDX-License-Identifier: Apache-2.0
|
|
|
|
*
|
|
|
|
* Change Logs:
|
|
|
|
* Date Author Notes
|
|
|
|
* 2013-07-20 Bernard first version
|
|
|
|
*/
|
|
|
|
|
|
|
|
#include <rtthread.h>
|
|
|
|
#include <rthw.h>
|
|
|
|
#include <board.h>
|
|
|
|
|
|
|
|
#include <armv8.h>
|
2022-12-20 17:49:37 +08:00
|
|
|
#include "interrupt.h"
|
2023-08-16 15:38:59 +08:00
|
|
|
#include "mm_aspace.h"
|
2022-12-20 17:49:37 +08:00
|
|
|
|
2023-10-17 13:07:59 +08:00
|
|
|
#define DBG_TAG "libcpu.trap"
|
|
|
|
#define DBG_LVL DBG_LOG
|
|
|
|
#include <rtdbg.h>
|
|
|
|
|
2021-09-22 17:57:45 +08:00
|
|
|
#ifdef RT_USING_FINSH
|
|
|
|
extern long list_thread(void);
|
|
|
|
#endif
|
|
|
|
|
2022-12-20 17:49:37 +08:00
|
|
|
#ifdef RT_USING_LWP
|
|
|
|
#include <lwp.h>
|
|
|
|
#include <lwp_arch.h>
|
|
|
|
|
|
|
|
#ifdef LWP_USING_CORE_DUMP
|
|
|
|
#include <lwp_core_dump.h>
|
|
|
|
#endif
|
|
|
|
|
2023-09-11 14:05:41 +08:00
|
|
|
static void _check_fault(struct rt_hw_exp_stack *regs, uint32_t pc_adj, char *info)
|
2022-12-20 17:49:37 +08:00
|
|
|
{
|
|
|
|
uint32_t mode = regs->cpsr;
|
|
|
|
|
|
|
|
if ((mode & 0x1f) == 0x00)
|
|
|
|
{
|
2023-10-17 13:07:59 +08:00
|
|
|
rt_kprintf("%s! pc = 0x%x\n", info, regs->pc - pc_adj);
|
2023-09-11 14:05:41 +08:00
|
|
|
|
|
|
|
/* user stack backtrace */
|
|
|
|
#ifdef RT_USING_LWP
|
|
|
|
{
|
|
|
|
rt_thread_t th;
|
|
|
|
|
|
|
|
th = rt_thread_self();
|
|
|
|
if (th && th->lwp)
|
|
|
|
{
|
2023-10-21 20:14:45 +08:00
|
|
|
arch_backtrace_uthread(th);
|
2023-09-11 14:05:41 +08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
#endif
|
|
|
|
|
2022-12-20 17:49:37 +08:00
|
|
|
#ifdef LWP_USING_CORE_DUMP
|
|
|
|
lwp_core_dump(regs, pc_adj);
|
|
|
|
#endif
|
2023-09-09 09:35:56 +08:00
|
|
|
sys_exit_group(-1);
|
2022-12-20 17:49:37 +08:00
|
|
|
}
|
2023-09-11 14:05:41 +08:00
|
|
|
else
|
|
|
|
{
|
|
|
|
/* user stack backtrace */
|
|
|
|
#ifdef RT_USING_LWP
|
|
|
|
{
|
|
|
|
rt_thread_t th;
|
|
|
|
|
|
|
|
th = rt_thread_self();
|
|
|
|
if (th && th->lwp)
|
|
|
|
{
|
2023-10-21 20:14:45 +08:00
|
|
|
arch_backtrace_uthread(th);
|
2023-09-11 14:05:41 +08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
#endif
|
2023-10-21 20:14:45 +08:00
|
|
|
|
2023-09-11 14:05:41 +08:00
|
|
|
/* kernel stack backtrace */
|
2023-10-21 20:14:45 +08:00
|
|
|
struct rt_hw_backtrace_frame frame = {
|
|
|
|
.fp = regs->x29,
|
|
|
|
.pc = regs->pc
|
|
|
|
};
|
|
|
|
rt_backtrace_frame(&frame);
|
2023-09-11 14:05:41 +08:00
|
|
|
}
|
2022-12-20 17:49:37 +08:00
|
|
|
}
|
|
|
|
|
2023-10-17 13:07:59 +08:00
|
|
|
rt_inline int _get_type(unsigned long esr)
|
2023-01-09 10:08:55 +08:00
|
|
|
{
|
|
|
|
int ret;
|
|
|
|
int fsc = esr & 0x3f;
|
|
|
|
switch (fsc)
|
|
|
|
{
|
|
|
|
case 0x4:
|
|
|
|
case 0x5:
|
|
|
|
case 0x6:
|
|
|
|
case 0x7:
|
|
|
|
ret = MM_FAULT_TYPE_PAGE_FAULT;
|
|
|
|
break;
|
2023-10-17 13:07:59 +08:00
|
|
|
case 0xc:
|
|
|
|
case 0xd:
|
|
|
|
case 0xe:
|
|
|
|
case 0xf:
|
|
|
|
ret = MM_FAULT_TYPE_ACCESS_FAULT;
|
|
|
|
break;
|
|
|
|
case 0x8:
|
2023-01-09 10:08:55 +08:00
|
|
|
case 0x9:
|
|
|
|
case 0xa:
|
|
|
|
case 0xb:
|
2023-10-17 13:07:59 +08:00
|
|
|
/* access flag fault */
|
2023-01-09 10:08:55 +08:00
|
|
|
default:
|
|
|
|
ret = MM_FAULT_TYPE_GENERIC;
|
|
|
|
}
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
2023-10-17 13:07:59 +08:00
|
|
|
rt_inline long _irq_is_disable(long cpsr)
|
2022-12-20 17:49:37 +08:00
|
|
|
{
|
2023-10-17 13:07:59 +08:00
|
|
|
return !!(cpsr & 0x80);
|
|
|
|
}
|
|
|
|
|
|
|
|
static int user_fault_fixable(unsigned long esr, struct rt_hw_exp_stack *regs)
|
|
|
|
{
|
|
|
|
rt_ubase_t level;
|
2022-12-20 17:49:37 +08:00
|
|
|
unsigned char ec;
|
|
|
|
void *dfar;
|
|
|
|
int ret = 0;
|
|
|
|
|
|
|
|
ec = (unsigned char)((esr >> 26) & 0x3fU);
|
2023-01-09 10:08:55 +08:00
|
|
|
enum rt_mm_fault_op fault_op;
|
|
|
|
enum rt_mm_fault_type fault_type;
|
2023-08-16 15:38:59 +08:00
|
|
|
struct rt_lwp *lwp;
|
|
|
|
|
2022-12-20 17:49:37 +08:00
|
|
|
switch (ec)
|
|
|
|
{
|
|
|
|
case 0x20:
|
2023-01-09 10:08:55 +08:00
|
|
|
fault_op = MM_FAULT_OP_EXECUTE;
|
|
|
|
fault_type = _get_type(esr);
|
|
|
|
break;
|
2022-12-20 17:49:37 +08:00
|
|
|
case 0x21:
|
|
|
|
case 0x24:
|
2023-03-30 08:25:15 +08:00
|
|
|
case 0x25:
|
2023-01-09 10:08:55 +08:00
|
|
|
fault_op = MM_FAULT_OP_WRITE;
|
|
|
|
fault_type = _get_type(esr);
|
|
|
|
break;
|
|
|
|
default:
|
|
|
|
fault_op = 0;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
2023-10-17 13:07:59 +08:00
|
|
|
/* page fault exception only allow from user space */
|
|
|
|
lwp = lwp_self();
|
|
|
|
if (lwp && fault_op)
|
2023-01-09 10:08:55 +08:00
|
|
|
{
|
2023-10-17 13:07:59 +08:00
|
|
|
__asm__ volatile("mrs %0, far_el1":"=r"(dfar));
|
2023-03-30 08:25:15 +08:00
|
|
|
struct rt_aspace_fault_msg msg = {
|
2023-01-09 10:08:55 +08:00
|
|
|
.fault_op = fault_op,
|
|
|
|
.fault_type = fault_type,
|
2023-03-30 08:25:15 +08:00
|
|
|
.fault_vaddr = dfar,
|
2023-01-09 10:08:55 +08:00
|
|
|
};
|
2023-10-17 13:07:59 +08:00
|
|
|
|
|
|
|
lwp_user_setting_save(rt_thread_self());
|
|
|
|
__asm__ volatile("mrs %0, daif\nmsr daifclr, 0x3\nisb\n":"=r"(level));
|
2023-08-16 15:38:59 +08:00
|
|
|
if (rt_aspace_fault_try_fix(lwp->aspace, &msg))
|
2022-12-20 17:49:37 +08:00
|
|
|
{
|
|
|
|
ret = 1;
|
|
|
|
}
|
2023-10-17 13:07:59 +08:00
|
|
|
__asm__ volatile("msr daif, %0\nisb\n"::"r"(level));
|
2022-12-20 17:49:37 +08:00
|
|
|
}
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
#endif
|
|
|
|
|
2021-09-22 17:57:45 +08:00
|
|
|
/**
|
|
|
|
* this function will show registers of CPU
|
|
|
|
*
|
|
|
|
* @param regs the registers point
|
|
|
|
*/
|
|
|
|
void rt_hw_show_register(struct rt_hw_exp_stack *regs)
|
|
|
|
{
|
|
|
|
rt_kprintf("Execption:\n");
|
|
|
|
rt_kprintf("X00:0x%16.16p X01:0x%16.16p X02:0x%16.16p X03:0x%16.16p\n", (void *)regs->x0, (void *)regs->x1, (void *)regs->x2, (void *)regs->x3);
|
|
|
|
rt_kprintf("X04:0x%16.16p X05:0x%16.16p X06:0x%16.16p X07:0x%16.16p\n", (void *)regs->x4, (void *)regs->x5, (void *)regs->x6, (void *)regs->x7);
|
|
|
|
rt_kprintf("X08:0x%16.16p X09:0x%16.16p X10:0x%16.16p X11:0x%16.16p\n", (void *)regs->x8, (void *)regs->x9, (void *)regs->x10, (void *)regs->x11);
|
|
|
|
rt_kprintf("X12:0x%16.16p X13:0x%16.16p X14:0x%16.16p X15:0x%16.16p\n", (void *)regs->x12, (void *)regs->x13, (void *)regs->x14, (void *)regs->x15);
|
|
|
|
rt_kprintf("X16:0x%16.16p X17:0x%16.16p X18:0x%16.16p X19:0x%16.16p\n", (void *)regs->x16, (void *)regs->x17, (void *)regs->x18, (void *)regs->x19);
|
|
|
|
rt_kprintf("X20:0x%16.16p X21:0x%16.16p X22:0x%16.16p X23:0x%16.16p\n", (void *)regs->x20, (void *)regs->x21, (void *)regs->x22, (void *)regs->x23);
|
|
|
|
rt_kprintf("X24:0x%16.16p X25:0x%16.16p X26:0x%16.16p X27:0x%16.16p\n", (void *)regs->x24, (void *)regs->x25, (void *)regs->x26, (void *)regs->x27);
|
|
|
|
rt_kprintf("X28:0x%16.16p X29:0x%16.16p X30:0x%16.16p\n", (void *)regs->x28, (void *)regs->x29, (void *)regs->x30);
|
2022-12-20 17:49:37 +08:00
|
|
|
rt_kprintf("SP_EL0:0x%16.16p\n", (void *)regs->sp_el0);
|
|
|
|
rt_kprintf("SPSR :0x%16.16p\n", (void *)regs->cpsr);
|
2021-09-22 17:57:45 +08:00
|
|
|
rt_kprintf("EPC :0x%16.16p\n", (void *)regs->pc);
|
|
|
|
}
|
|
|
|
|
|
|
|
void rt_hw_trap_irq(void)
|
|
|
|
{
|
2022-12-20 17:49:37 +08:00
|
|
|
#ifdef SOC_BCM283x
|
|
|
|
extern rt_uint8_t core_timer_flag;
|
2021-09-22 17:57:45 +08:00
|
|
|
void *param;
|
|
|
|
uint32_t irq;
|
|
|
|
rt_isr_handler_t isr_func;
|
|
|
|
extern struct rt_irq_desc isr_table[];
|
2022-12-20 17:49:37 +08:00
|
|
|
uint32_t value = 0;
|
|
|
|
value = IRQ_PEND_BASIC & 0x3ff;
|
2022-01-07 13:49:06 +08:00
|
|
|
|
2022-12-20 17:49:37 +08:00
|
|
|
if(core_timer_flag != 0)
|
2022-01-07 13:49:06 +08:00
|
|
|
{
|
2022-12-20 17:49:37 +08:00
|
|
|
uint32_t cpu_id = rt_hw_cpu_id();
|
|
|
|
uint32_t int_source = CORE_IRQSOURCE(cpu_id);
|
|
|
|
if (int_source & 0x0f)
|
2022-01-07 13:49:06 +08:00
|
|
|
{
|
2022-12-20 17:49:37 +08:00
|
|
|
if (int_source & 0x08)
|
|
|
|
{
|
|
|
|
isr_func = isr_table[IRQ_ARM_TIMER].handler;
|
2022-01-07 13:49:06 +08:00
|
|
|
#ifdef RT_USING_INTERRUPT_INFO
|
2022-12-20 17:49:37 +08:00
|
|
|
isr_table[IRQ_ARM_TIMER].counter++;
|
2021-09-22 17:57:45 +08:00
|
|
|
#endif
|
2022-12-20 17:49:37 +08:00
|
|
|
if (isr_func)
|
|
|
|
{
|
|
|
|
param = isr_table[IRQ_ARM_TIMER].param;
|
|
|
|
isr_func(IRQ_ARM_TIMER, param);
|
|
|
|
}
|
2022-01-07 13:49:06 +08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
2021-09-22 17:57:45 +08:00
|
|
|
|
|
|
|
/* local interrupt*/
|
|
|
|
if (value)
|
|
|
|
{
|
|
|
|
if (value & (1 << 8))
|
|
|
|
{
|
|
|
|
value = IRQ_PEND1;
|
|
|
|
irq = __rt_ffs(value) - 1;
|
|
|
|
}
|
|
|
|
else if (value & (1 << 9))
|
|
|
|
{
|
|
|
|
value = IRQ_PEND2;
|
|
|
|
irq = __rt_ffs(value) + 31;
|
|
|
|
}
|
|
|
|
else
|
|
|
|
{
|
|
|
|
value &= 0x0f;
|
|
|
|
irq = __rt_ffs(value) + 63;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* get interrupt service routine */
|
|
|
|
isr_func = isr_table[irq].handler;
|
|
|
|
#ifdef RT_USING_INTERRUPT_INFO
|
|
|
|
isr_table[irq].counter++;
|
|
|
|
#endif
|
|
|
|
if (isr_func)
|
|
|
|
{
|
|
|
|
/* Interrupt for myself. */
|
|
|
|
param = isr_table[irq].param;
|
|
|
|
/* turn to interrupt service routine */
|
|
|
|
isr_func(irq, param);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
#else
|
|
|
|
void *param;
|
2022-02-17 01:00:29 +08:00
|
|
|
int ir, ir_self;
|
2021-09-22 17:57:45 +08:00
|
|
|
rt_isr_handler_t isr_func;
|
|
|
|
extern struct rt_irq_desc isr_table[];
|
|
|
|
|
|
|
|
ir = rt_hw_interrupt_get_irq();
|
|
|
|
|
|
|
|
if (ir == 1023)
|
|
|
|
{
|
|
|
|
/* Spurious interrupt */
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
2022-02-17 01:00:29 +08:00
|
|
|
/* bit 10~12 is cpuid, bit 0~9 is interrupt id */
|
|
|
|
ir_self = ir & 0x3ffUL;
|
|
|
|
|
2021-09-22 17:57:45 +08:00
|
|
|
/* get interrupt service routine */
|
2022-02-17 01:00:29 +08:00
|
|
|
isr_func = isr_table[ir_self].handler;
|
2021-09-22 17:57:45 +08:00
|
|
|
#ifdef RT_USING_INTERRUPT_INFO
|
2022-02-17 01:00:29 +08:00
|
|
|
isr_table[ir_self].counter++;
|
2023-10-25 20:31:25 +08:00
|
|
|
#ifdef RT_USING_SMP
|
|
|
|
isr_table[ir_self].cpu_counter[rt_hw_cpu_id()]++;
|
|
|
|
#endif
|
2021-09-22 17:57:45 +08:00
|
|
|
#endif
|
|
|
|
if (isr_func)
|
|
|
|
{
|
|
|
|
/* Interrupt for myself. */
|
2022-02-17 01:00:29 +08:00
|
|
|
param = isr_table[ir_self].param;
|
2021-09-22 17:57:45 +08:00
|
|
|
/* turn to interrupt service routine */
|
2022-02-17 01:00:29 +08:00
|
|
|
isr_func(ir_self, param);
|
2021-09-22 17:57:45 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
/* end of interrupt */
|
|
|
|
rt_hw_interrupt_ack(ir);
|
|
|
|
#endif
|
|
|
|
}
|
|
|
|
|
2023-10-17 13:07:59 +08:00
|
|
|
#ifdef RT_USING_SMART
|
|
|
|
#define DBG_CHECK_EVENT(regs, esr) dbg_check_event(regs, esr)
|
|
|
|
#else
|
|
|
|
#define DBG_CHECK_EVENT(regs, esr) (0)
|
|
|
|
#endif
|
|
|
|
|
2021-09-22 17:57:45 +08:00
|
|
|
void rt_hw_trap_fiq(void)
|
|
|
|
{
|
|
|
|
void *param;
|
2022-12-20 17:49:37 +08:00
|
|
|
int ir, ir_self;
|
2021-09-22 17:57:45 +08:00
|
|
|
rt_isr_handler_t isr_func;
|
|
|
|
extern struct rt_irq_desc isr_table[];
|
|
|
|
|
|
|
|
ir = rt_hw_interrupt_get_irq();
|
|
|
|
|
2022-12-20 17:49:37 +08:00
|
|
|
/* bit 10~12 is cpuid, bit 0~9 is interrup id */
|
|
|
|
ir_self = ir & 0x3ffUL;
|
|
|
|
|
2021-09-22 17:57:45 +08:00
|
|
|
/* get interrupt service routine */
|
2022-12-20 17:49:37 +08:00
|
|
|
isr_func = isr_table[ir_self].handler;
|
|
|
|
param = isr_table[ir_self].param;
|
2021-09-22 17:57:45 +08:00
|
|
|
|
|
|
|
/* turn to interrupt service routine */
|
2022-12-20 17:49:37 +08:00
|
|
|
isr_func(ir_self, param);
|
2021-09-22 17:57:45 +08:00
|
|
|
|
|
|
|
/* end of interrupt */
|
|
|
|
rt_hw_interrupt_ack(ir);
|
|
|
|
}
|
2022-12-20 17:49:37 +08:00
|
|
|
|
2023-10-17 13:07:59 +08:00
|
|
|
void print_exception(unsigned long esr, unsigned long epc);
|
2022-12-20 17:49:37 +08:00
|
|
|
void SVC_Handler(struct rt_hw_exp_stack *regs);
|
|
|
|
void rt_hw_trap_exception(struct rt_hw_exp_stack *regs)
|
|
|
|
{
|
|
|
|
unsigned long esr;
|
|
|
|
unsigned char ec;
|
|
|
|
|
|
|
|
asm volatile("mrs %0, esr_el1":"=r"(esr));
|
|
|
|
ec = (unsigned char)((esr >> 26) & 0x3fU);
|
|
|
|
|
2023-10-17 13:07:59 +08:00
|
|
|
if (DBG_CHECK_EVENT(regs, esr))
|
2022-12-20 17:49:37 +08:00
|
|
|
{
|
|
|
|
return;
|
|
|
|
}
|
2023-10-17 13:07:59 +08:00
|
|
|
else if (ec == 0x15) /* is 64bit syscall ? */
|
2022-12-20 17:49:37 +08:00
|
|
|
{
|
|
|
|
SVC_Handler(regs);
|
|
|
|
/* never return here */
|
|
|
|
}
|
2023-10-17 13:07:59 +08:00
|
|
|
|
|
|
|
#ifdef RT_USING_SMART
|
|
|
|
/**
|
|
|
|
* Note: check_user_stack will take lock and it will possibly be a dead-lock
|
|
|
|
* if exception comes from kernel.
|
|
|
|
*/
|
|
|
|
if ((regs->cpsr & 0x1f) == 0)
|
2022-12-20 17:49:37 +08:00
|
|
|
{
|
2023-10-17 13:07:59 +08:00
|
|
|
if (user_fault_fixable(esr, regs))
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
else
|
|
|
|
{
|
|
|
|
if (_irq_is_disable(regs->cpsr))
|
|
|
|
{
|
|
|
|
LOG_E("Kernel fault from interrupt/critical section");
|
|
|
|
}
|
|
|
|
if (rt_critical_level() != 0)
|
|
|
|
{
|
|
|
|
LOG_E("scheduler is not available");
|
|
|
|
}
|
|
|
|
else if (user_fault_fixable(esr, regs))
|
|
|
|
return;
|
2022-12-20 17:49:37 +08:00
|
|
|
}
|
|
|
|
#endif
|
2023-10-17 13:07:59 +08:00
|
|
|
print_exception(esr, regs->pc);
|
2022-12-20 17:49:37 +08:00
|
|
|
rt_hw_show_register(regs);
|
2023-10-17 13:07:59 +08:00
|
|
|
LOG_E("current thread: %s\n", rt_thread_self()->parent.name);
|
2023-09-11 14:05:41 +08:00
|
|
|
|
2022-12-20 17:49:37 +08:00
|
|
|
#ifdef RT_USING_FINSH
|
|
|
|
list_thread();
|
|
|
|
#endif
|
2023-08-08 00:22:14 +08:00
|
|
|
|
|
|
|
#ifdef RT_USING_LWP
|
2023-09-11 14:05:41 +08:00
|
|
|
_check_fault(regs, 0, "user fault");
|
2023-08-08 00:22:14 +08:00
|
|
|
#endif
|
2023-10-21 20:14:45 +08:00
|
|
|
|
|
|
|
struct rt_hw_backtrace_frame frame = {.fp = regs->x29, .pc = regs->pc};
|
|
|
|
rt_backtrace_frame(&frame);
|
2022-12-20 17:49:37 +08:00
|
|
|
rt_hw_cpu_shutdown();
|
|
|
|
}
|
|
|
|
|
2023-09-11 14:05:41 +08:00
|
|
|
|
2022-12-20 17:49:37 +08:00
|
|
|
void rt_hw_trap_serror(struct rt_hw_exp_stack *regs)
|
|
|
|
{
|
|
|
|
rt_kprintf("SError\n");
|
|
|
|
rt_hw_show_register(regs);
|
2023-04-04 21:06:27 +08:00
|
|
|
rt_kprintf("current: %s\n", rt_thread_self()->parent.name);
|
2022-12-20 17:49:37 +08:00
|
|
|
#ifdef RT_USING_FINSH
|
|
|
|
list_thread();
|
|
|
|
#endif
|
|
|
|
rt_hw_cpu_shutdown();
|
|
|
|
}
|