This commit is contained in:
2024-08-05 20:57:09 +08:00
commit 46d9ee7795
3020 changed files with 1725767 additions and 0 deletions

View File

@@ -0,0 +1,26 @@
# RT-Thread building script for component
from building import *
Import('rtconfig')
cwd = GetCurrentDir()
src = Glob('*.c') + Glob('*.cpp')
CPPPATH = [cwd]
if rtconfig.PLATFORM in ['armcc', 'armclang']:
src += Glob('*_rvds.S')
if rtconfig.PLATFORM in ['gcc']:
src += Glob('*_init.S')
src += Glob('*_gcc.S')
if rtconfig.PLATFORM in ['iccarm']:
src += Glob('*_iar.S')
if not GetDepend('RT_USING_MEM_PROTECTION') and not GetDepend('RT_USING_HW_STACK_GUARD'):
SrcRemove(src, 'mpu.c')
group = DefineGroup('libcpu', src, depend = [''], CPPPATH = CPPPATH)
Return('group')

View File

@@ -0,0 +1,262 @@
/*
* Copyright (c) 2006-2018, RT-Thread Development Team
*
* SPDX-License-Identifier: Apache-2.0
*
* Change Logs:
* Date Author Notes
* 2009-10-11 Bernard first version
* 2012-01-01 aozima support context switch load/store FPU register.
* 2013-06-18 aozima add restore MSP feature.
* 2013-06-23 aozima support lazy stack optimized.
* 2018-07-24 aozima enhancement hard fault exception handler.
*/
/**
* @addtogroup cortex-m4
*/
/*@{*/
#include <rtconfig.h>
.cpu cortex-m4
.syntax unified
.thumb
.text
.equ SCB_VTOR, 0xE000ED08 /* Vector Table Offset Register */
.equ NVIC_INT_CTRL, 0xE000ED04 /* interrupt control state register */
.equ NVIC_SYSPRI2, 0xE000ED20 /* system priority register (2) */
.equ NVIC_PENDSV_PRI, 0xFFFF0000 /* PendSV and SysTick priority value (lowest) */
.equ NVIC_PENDSVSET, 0x10000000 /* value to trigger PendSV exception */
/*
* rt_base_t rt_hw_interrupt_disable();
*/
.global rt_hw_interrupt_disable
.type rt_hw_interrupt_disable, %function
rt_hw_interrupt_disable:
MRS r0, PRIMASK
CPSID I
BX LR
/*
* void rt_hw_interrupt_enable(rt_base_t level);
*/
.global rt_hw_interrupt_enable
.type rt_hw_interrupt_enable, %function
rt_hw_interrupt_enable:
MSR PRIMASK, r0
BX LR
/*
* void rt_hw_context_switch(rt_uint32 from, rt_uint32 to);
* r0 --> from
* r1 --> to
*/
.global rt_hw_context_switch_interrupt
.type rt_hw_context_switch_interrupt, %function
.global rt_hw_context_switch
.type rt_hw_context_switch, %function
rt_hw_context_switch_interrupt:
rt_hw_context_switch:
/* set rt_thread_switch_interrupt_flag to 1 */
LDR r2, =rt_thread_switch_interrupt_flag
LDR r3, [r2]
CMP r3, #1
BEQ _reswitch
MOV r3, #1
STR r3, [r2]
LDR r2, =rt_interrupt_from_thread /* set rt_interrupt_from_thread */
STR r0, [r2]
_reswitch:
LDR r2, =rt_interrupt_to_thread /* set rt_interrupt_to_thread */
STR r1, [r2]
LDR r0, =NVIC_INT_CTRL /* trigger the PendSV exception (causes context switch) */
LDR r1, =NVIC_PENDSVSET
STR r1, [r0]
BX LR
/* r0 --> switch from thread stack
* r1 --> switch to thread stack
* psr, pc, lr, r12, r3, r2, r1, r0 are pushed into [from] stack
*/
.global PendSV_Handler
.type PendSV_Handler, %function
PendSV_Handler:
/* disable interrupt to protect context switch */
MRS r2, PRIMASK
CPSID I
/* get rt_thread_switch_interrupt_flag */
LDR r0, =rt_thread_switch_interrupt_flag
LDR r1, [r0]
CBZ r1, pendsv_exit /* pendsv already handled */
/* clear rt_thread_switch_interrupt_flag to 0 */
MOV r1, #0x00
STR r1, [r0]
LDR r0, =rt_interrupt_from_thread
LDR r1, [r0]
CBZ r1, switch_to_thread /* skip register save at the first time */
MRS r1, psp /* get from thread stack pointer */
#if defined (__VFP_FP__) && !defined(__SOFTFP__)
TST lr, #0x10 /* if(!EXC_RETURN[4]) */
IT EQ
VSTMDBEQ r1!, {d8 - d15} /* push FPU register s16~s31 */
#endif
STMFD r1!, {r4 - r11} /* push r4 - r11 register */
#if defined (__VFP_FP__) && !defined(__SOFTFP__)
MOV r4, #0x00 /* flag = 0 */
TST lr, #0x10 /* if(!EXC_RETURN[4]) */
IT EQ
MOVEQ r4, #0x01 /* flag = 1 */
STMFD r1!, {r4} /* push flag */
#endif
LDR r0, [r0]
STR r1, [r0] /* update from thread stack pointer */
switch_to_thread:
LDR r1, =rt_interrupt_to_thread
LDR r1, [r1]
LDR r1, [r1] /* load thread stack pointer */
#if defined (__VFP_FP__) && !defined(__SOFTFP__)
LDMFD r1!, {r3} /* pop flag */
#endif
LDMFD r1!, {r4 - r11} /* pop r4 - r11 register */
#if defined (__VFP_FP__) && !defined(__SOFTFP__)
CMP r3, #0 /* if(flag_r3 != 0) */
IT NE
VLDMIANE r1!, {d8 - d15} /* pop FPU register s16~s31 */
#endif
MSR psp, r1 /* update stack pointer */
#if defined (__VFP_FP__) && !defined(__SOFTFP__)
ORR lr, lr, #0x10 /* lr |= (1 << 4), clean FPCA. */
CMP r3, #0 /* if(flag_r3 != 0) */
IT NE
BICNE lr, lr, #0x10 /* lr &= ~(1 << 4), set FPCA. */
#endif
#if defined (RT_USING_MEM_PROTECTION)
PUSH {r0-r3, r12, lr}
BL rt_thread_self
BL rt_hw_mpu_table_switch
POP {r0-r3, r12, lr}
#endif
pendsv_exit:
/* restore interrupt */
MSR PRIMASK, r2
ORR lr, lr, #0x04
BX lr
/*
* void rt_hw_context_switch_to(rt_uint32 to);
* r0 --> to
*/
.global rt_hw_context_switch_to
.type rt_hw_context_switch_to, %function
rt_hw_context_switch_to:
LDR r1, =rt_interrupt_to_thread
STR r0, [r1]
#if defined (__VFP_FP__) && !defined(__SOFTFP__)
/* CLEAR CONTROL.FPCA */
MRS r2, CONTROL /* read */
BIC r2, #0x04 /* modify */
MSR CONTROL, r2 /* write-back */
#endif
/* set from thread to 0 */
LDR r1, =rt_interrupt_from_thread
MOV r0, #0x0
STR r0, [r1]
/* set interrupt flag to 1 */
LDR r1, =rt_thread_switch_interrupt_flag
MOV r0, #1
STR r0, [r1]
/* set the PendSV and SysTick exception priority */
LDR r0, =NVIC_SYSPRI2
LDR r1, =NVIC_PENDSV_PRI
LDR.W r2, [r0,#0x00] /* read */
ORR r1,r1,r2 /* modify */
STR r1, [r0] /* write-back */
LDR r0, =NVIC_INT_CTRL /* trigger the PendSV exception (causes context switch) */
LDR r1, =NVIC_PENDSVSET
STR r1, [r0]
/* restore MSP */
LDR r0, =SCB_VTOR
LDR r0, [r0]
LDR r0, [r0]
NOP
MSR msp, r0
/* enable interrupts at processor level */
CPSIE F
CPSIE I
/* ensure PendSV exception taken place before subsequent operation */
DSB
ISB
/* never reach here! */
/* compatible with old version */
.global rt_hw_interrupt_thread_switch
.type rt_hw_interrupt_thread_switch, %function
rt_hw_interrupt_thread_switch:
BX lr
NOP
.global HardFault_Handler
.type HardFault_Handler, %function
HardFault_Handler:
/* get current context */
MRS r0, msp /* get fault context from handler. */
TST lr, #0x04 /* if(!EXC_RETURN[2]) */
BEQ _get_sp_done
MRS r0, psp /* get fault context from thread. */
_get_sp_done:
STMFD r0!, {r4 - r11} /* push r4 - r11 register */
#if defined (__VFP_FP__) && !defined(__SOFTFP__)
STMFD r0!, {lr} /* push dummy for flag */
#endif
STMFD r0!, {lr} /* push exec_return register */
TST lr, #0x04 /* if(!EXC_RETURN[2]) */
BEQ _update_msp
MSR psp, r0 /* update stack pointer to PSP. */
B _update_done
_update_msp:
MSR msp, r0 /* update stack pointer to MSP. */
_update_done:
PUSH {LR}
BL rt_hw_hard_fault_exception
POP {LR}
ORR lr, lr, #0x04
BX lr

View File

@@ -0,0 +1,257 @@
;/*
; * Copyright (c) 2006-2018, RT-Thread Development Team
; *
; * SPDX-License-Identifier: Apache-2.0
; *
; * Change Logs:
; * Date Author Notes
; * 2009-01-17 Bernard first version
; * 2009-09-27 Bernard add protect when contex switch occurs
; * 2012-01-01 aozima support context switch load/store FPU register.
; * 2013-06-18 aozima add restore MSP feature.
; * 2013-06-23 aozima support lazy stack optimized.
; * 2018-07-24 aozima enhancement hard fault exception handler.
; */
;/**
; * @addtogroup cortex-m4
; */
;/*@{*/
SCB_VTOR EQU 0xE000ED08 ; Vector Table Offset Register
NVIC_INT_CTRL EQU 0xE000ED04 ; interrupt control state register
NVIC_SYSPRI2 EQU 0xE000ED20 ; system priority register (2)
NVIC_PENDSV_PRI EQU 0xFFFF0000 ; PendSV and SysTick priority value (lowest)
NVIC_PENDSVSET EQU 0x10000000 ; value to trigger PendSV exception
SECTION .text:CODE(2)
THUMB
REQUIRE8
PRESERVE8
IMPORT rt_thread_switch_interrupt_flag
IMPORT rt_interrupt_from_thread
IMPORT rt_interrupt_to_thread
;/*
; * rt_base_t rt_hw_interrupt_disable();
; */
EXPORT rt_hw_interrupt_disable
rt_hw_interrupt_disable:
MRS r0, PRIMASK
CPSID I
BX LR
;/*
; * void rt_hw_interrupt_enable(rt_base_t level);
; */
EXPORT rt_hw_interrupt_enable
rt_hw_interrupt_enable:
MSR PRIMASK, r0
BX LR
;/*
; * void rt_hw_context_switch(rt_uint32 from, rt_uint32 to);
; * r0 --> from
; * r1 --> to
; */
EXPORT rt_hw_context_switch_interrupt
EXPORT rt_hw_context_switch
rt_hw_context_switch_interrupt:
rt_hw_context_switch:
; set rt_thread_switch_interrupt_flag to 1
LDR r2, =rt_thread_switch_interrupt_flag
LDR r3, [r2]
CMP r3, #1
BEQ _reswitch
MOV r3, #1
STR r3, [r2]
LDR r2, =rt_interrupt_from_thread ; set rt_interrupt_from_thread
STR r0, [r2]
_reswitch
LDR r2, =rt_interrupt_to_thread ; set rt_interrupt_to_thread
STR r1, [r2]
LDR r0, =NVIC_INT_CTRL ; trigger the PendSV exception (causes context switch)
LDR r1, =NVIC_PENDSVSET
STR r1, [r0]
BX LR
; r0 --> switch from thread stack
; r1 --> switch to thread stack
; psr, pc, lr, r12, r3, r2, r1, r0 are pushed into [from] stack
EXPORT PendSV_Handler
PendSV_Handler:
; disable interrupt to protect context switch
MRS r2, PRIMASK
CPSID I
; get rt_thread_switch_interrupt_flag
LDR r0, =rt_thread_switch_interrupt_flag
LDR r1, [r0]
CBZ r1, pendsv_exit ; pendsv already handled
; clear rt_thread_switch_interrupt_flag to 0
MOV r1, #0x00
STR r1, [r0]
LDR r0, =rt_interrupt_from_thread
LDR r1, [r0]
CBZ r1, switch_to_thread ; skip register save at the first time
MRS r1, psp ; get from thread stack pointer
#if defined ( __ARMVFP__ )
TST lr, #0x10 ; if(!EXC_RETURN[4])
BNE skip_push_fpu
VSTMDB r1!, {d8 - d15} ; push FPU register s16~s31
skip_push_fpu
#endif
STMFD r1!, {r4 - r11} ; push r4 - r11 register
#if defined ( __ARMVFP__ )
MOV r4, #0x00 ; flag = 0
TST lr, #0x10 ; if(!EXC_RETURN[4])
BNE push_flag
MOV r4, #0x01 ; flag = 1
push_flag
;STMFD r1!, {r4} ; push flag
SUB r1, r1, #0x04
STR r4, [r1]
#endif
LDR r0, [r0]
STR r1, [r0] ; update from thread stack pointer
switch_to_thread
LDR r1, =rt_interrupt_to_thread
LDR r1, [r1]
LDR r1, [r1] ; load thread stack pointer
#if defined ( __ARMVFP__ )
LDMFD r1!, {r3} ; pop flag
#endif
LDMFD r1!, {r4 - r11} ; pop r4 - r11 register
#if defined ( __ARMVFP__ )
CBZ r3, skip_pop_fpu
VLDMIA r1!, {d8 - d15} ; pop FPU register s16~s31
skip_pop_fpu
#endif
MSR psp, r1 ; update stack pointer
#if defined ( __ARMVFP__ )
ORR lr, lr, #0x10 ; lr |= (1 << 4), clean FPCA.
CBZ r3, return_without_fpu ; if(flag_r3 != 0)
BIC lr, lr, #0x10 ; lr &= ~(1 << 4), set FPCA.
return_without_fpu
#endif
pendsv_exit
; restore interrupt
MSR PRIMASK, r2
ORR lr, lr, #0x04
BX lr
;/*
; * void rt_hw_context_switch_to(rt_uint32 to);
; * r0 --> to
; */
EXPORT rt_hw_context_switch_to
rt_hw_context_switch_to:
LDR r1, =rt_interrupt_to_thread
STR r0, [r1]
#if defined ( __ARMVFP__ )
; CLEAR CONTROL.FPCA
MRS r2, CONTROL ; read
BIC r2, r2, #0x04 ; modify
MSR CONTROL, r2 ; write-back
#endif
; set from thread to 0
LDR r1, =rt_interrupt_from_thread
MOV r0, #0x0
STR r0, [r1]
; set interrupt flag to 1
LDR r1, =rt_thread_switch_interrupt_flag
MOV r0, #1
STR r0, [r1]
; set the PendSV and SysTick exception priority
LDR r0, =NVIC_SYSPRI2
LDR r1, =NVIC_PENDSV_PRI
LDR.W r2, [r0,#0x00] ; read
ORR r1,r1,r2 ; modify
STR r1, [r0] ; write-back
LDR r0, =NVIC_INT_CTRL ; trigger the PendSV exception (causes context switch)
LDR r1, =NVIC_PENDSVSET
STR r1, [r0]
; restore MSP
LDR r0, =SCB_VTOR
LDR r0, [r0]
LDR r0, [r0]
NOP
MSR msp, r0
; enable interrupts at processor level
CPSIE F
CPSIE I
; ensure PendSV exception taken place before subsequent operation
DSB
ISB
; never reach here!
; compatible with old version
EXPORT rt_hw_interrupt_thread_switch
rt_hw_interrupt_thread_switch:
BX lr
IMPORT rt_hw_hard_fault_exception
EXPORT HardFault_Handler
HardFault_Handler:
; get current context
MRS r0, msp ; get fault context from handler.
TST lr, #0x04 ; if(!EXC_RETURN[2])
BEQ _get_sp_done
MRS r0, psp ; get fault context from thread.
_get_sp_done
STMFD r0!, {r4 - r11} ; push r4 - r11 register
;STMFD r0!, {lr} ; push exec_return register
#if defined ( __ARMVFP__ )
SUB r0, r0, #0x04 ; push dummy for flag
STR lr, [r0]
#endif
SUB r0, r0, #0x04
STR lr, [r0]
TST lr, #0x04 ; if(!EXC_RETURN[2])
BEQ _update_msp
MSR psp, r0 ; update stack pointer to PSP.
B _update_done
_update_msp
MSR msp, r0 ; update stack pointer to MSP.
_update_done
PUSH {lr}
BL rt_hw_hard_fault_exception
POP {lr}
ORR lr, lr, #0x04
BX lr
END

View File

@@ -0,0 +1,257 @@
;/*
; * Copyright (c) 2006-2018, RT-Thread Development Team
; *
; * SPDX-License-Identifier: Apache-2.0
; *
; * Change Logs:
; * Date Author Notes
; * 2009-01-17 Bernard first version.
; * 2012-01-01 aozima support context switch load/store FPU register.
; * 2013-06-18 aozima add restore MSP feature.
; * 2013-06-23 aozima support lazy stack optimized.
; * 2018-07-24 aozima enhancement hard fault exception handler.
; */
;/**
; * @addtogroup cortex-m4
; */
;/*@{*/
SCB_VTOR EQU 0xE000ED08 ; Vector Table Offset Register
NVIC_INT_CTRL EQU 0xE000ED04 ; interrupt control state register
NVIC_SYSPRI2 EQU 0xE000ED20 ; system priority register (2)
NVIC_PENDSV_PRI EQU 0xFFFF0000 ; PendSV and SysTick priority value (lowest)
NVIC_PENDSVSET EQU 0x10000000 ; value to trigger PendSV exception
AREA |.text|, CODE, READONLY, ALIGN=2
THUMB
REQUIRE8
PRESERVE8
IMPORT rt_thread_switch_interrupt_flag
IMPORT rt_interrupt_from_thread
IMPORT rt_interrupt_to_thread
;/*
; * rt_base_t rt_hw_interrupt_disable();
; */
rt_hw_interrupt_disable PROC
EXPORT rt_hw_interrupt_disable
MRS r0, PRIMASK
CPSID I
BX LR
ENDP
;/*
; * void rt_hw_interrupt_enable(rt_base_t level);
; */
rt_hw_interrupt_enable PROC
EXPORT rt_hw_interrupt_enable
MSR PRIMASK, r0
BX LR
ENDP
;/*
; * void rt_hw_context_switch(rt_uint32 from, rt_uint32 to);
; * r0 --> from
; * r1 --> to
; */
rt_hw_context_switch_interrupt
EXPORT rt_hw_context_switch_interrupt
rt_hw_context_switch PROC
EXPORT rt_hw_context_switch
; set rt_thread_switch_interrupt_flag to 1
LDR r2, =rt_thread_switch_interrupt_flag
LDR r3, [r2]
CMP r3, #1
BEQ _reswitch
MOV r3, #1
STR r3, [r2]
LDR r2, =rt_interrupt_from_thread ; set rt_interrupt_from_thread
STR r0, [r2]
_reswitch
LDR r2, =rt_interrupt_to_thread ; set rt_interrupt_to_thread
STR r1, [r2]
LDR r0, =NVIC_INT_CTRL ; trigger the PendSV exception (causes context switch)
LDR r1, =NVIC_PENDSVSET
STR r1, [r0]
BX LR
ENDP
; r0 --> switch from thread stack
; r1 --> switch to thread stack
; psr, pc, lr, r12, r3, r2, r1, r0 are pushed into [from] stack
PendSV_Handler PROC
EXPORT PendSV_Handler
; disable interrupt to protect context switch
MRS r2, PRIMASK
CPSID I
; get rt_thread_switch_interrupt_flag
LDR r0, =rt_thread_switch_interrupt_flag
LDR r1, [r0]
CBZ r1, pendsv_exit ; pendsv already handled
; clear rt_thread_switch_interrupt_flag to 0
MOV r1, #0x00
STR r1, [r0]
LDR r0, =rt_interrupt_from_thread
LDR r1, [r0]
CBZ r1, switch_to_thread ; skip register save at the first time
MRS r1, psp ; get from thread stack pointer
IF {FPU} != "SoftVFP"
TST lr, #0x10 ; if(!EXC_RETURN[4])
VSTMFDEQ r1!, {d8 - d15} ; push FPU register s16~s31
ENDIF
STMFD r1!, {r4 - r11} ; push r4 - r11 register
IF {FPU} != "SoftVFP"
MOV r4, #0x00 ; flag = 0
TST lr, #0x10 ; if(!EXC_RETURN[4])
MOVEQ r4, #0x01 ; flag = 1
STMFD r1!, {r4} ; push flag
ENDIF
LDR r0, [r0]
STR r1, [r0] ; update from thread stack pointer
switch_to_thread
LDR r1, =rt_interrupt_to_thread
LDR r1, [r1]
LDR r1, [r1] ; load thread stack pointer
IF {FPU} != "SoftVFP"
LDMFD r1!, {r3} ; pop flag
ENDIF
LDMFD r1!, {r4 - r11} ; pop r4 - r11 register
IF {FPU} != "SoftVFP"
CMP r3, #0 ; if(flag_r3 != 0)
VLDMFDNE r1!, {d8 - d15} ; pop FPU register s16~s31
ENDIF
MSR psp, r1 ; update stack pointer
IF {FPU} != "SoftVFP"
ORR lr, lr, #0x10 ; lr |= (1 << 4), clean FPCA.
CMP r3, #0 ; if(flag_r3 != 0)
BICNE lr, lr, #0x10 ; lr &= ~(1 << 4), set FPCA.
ENDIF
pendsv_exit
; restore interrupt
MSR PRIMASK, r2
ORR lr, lr, #0x04
BX lr
ENDP
;/*
; * void rt_hw_context_switch_to(rt_uint32 to);
; * r0 --> to
; * this fucntion is used to perform the first thread switch
; */
rt_hw_context_switch_to PROC
EXPORT rt_hw_context_switch_to
; set to thread
LDR r1, =rt_interrupt_to_thread
STR r0, [r1]
IF {FPU} != "SoftVFP"
; CLEAR CONTROL.FPCA
MRS r2, CONTROL ; read
BIC r2, #0x04 ; modify
MSR CONTROL, r2 ; write-back
ENDIF
; set from thread to 0
LDR r1, =rt_interrupt_from_thread
MOV r0, #0x0
STR r0, [r1]
; set interrupt flag to 1
LDR r1, =rt_thread_switch_interrupt_flag
MOV r0, #1
STR r0, [r1]
; set the PendSV and SysTick exception priority
LDR r0, =NVIC_SYSPRI2
LDR r1, =NVIC_PENDSV_PRI
LDR.W r2, [r0,#0x00] ; read
ORR r1,r1,r2 ; modify
STR r1, [r0] ; write-back
; trigger the PendSV exception (causes context switch)
LDR r0, =NVIC_INT_CTRL
LDR r1, =NVIC_PENDSVSET
STR r1, [r0]
; restore MSP
LDR r0, =SCB_VTOR
LDR r0, [r0]
LDR r0, [r0]
MSR msp, r0
; enable interrupts at processor level
CPSIE F
CPSIE I
; ensure PendSV exception taken place before subsequent operation
DSB
ISB
; never reach here!
ENDP
; compatible with old version
rt_hw_interrupt_thread_switch PROC
EXPORT rt_hw_interrupt_thread_switch
BX lr
ENDP
IMPORT rt_hw_hard_fault_exception
EXPORT HardFault_Handler
EXPORT MemManage_Handler
HardFault_Handler PROC
MemManage_Handler
; get current context
TST lr, #0x04 ; if(!EXC_RETURN[2])
ITE EQ
MRSEQ r0, msp ; [2]=0 ==> Z=1, get fault context from handler.
MRSNE r0, psp ; [2]=1 ==> Z=0, get fault context from thread.
STMFD r0!, {r4 - r11} ; push r4 - r11 register
IF {FPU} != "SoftVFP"
STMFD r0!, {lr} ; push dummy for flag
ENDIF
STMFD r0!, {lr} ; push exec_return register
TST lr, #0x04 ; if(!EXC_RETURN[2])
ITE EQ
MSREQ msp, r0 ; [2]=0 ==> Z=1, update stack pointer to MSP.
MSRNE psp, r0 ; [2]=1 ==> Z=0, update stack pointer to PSP.
PUSH {lr}
BL rt_hw_hard_fault_exception
POP {lr}
ORR lr, lr, #0x04
BX lr
ENDP
ALIGN 4
END

View File

@@ -0,0 +1,96 @@
/*
* Copyright (c) 2006-2023, RT-Thread Development Team
*
* SPDX-License-Identifier: Apache-2.0
*
* Change Logs:
* Date Author Notes
* 2018-04-02 tanek first implementation
* 2019-04-27 misonyo update to cortex-m7 series
*/
#include <rtthread.h>
#include <rthw.h>
#include <rtdef.h>
#include <board.h>
/* The L1-caches on all Cortex®-M7s are divided into lines of 32 bytes. */
#define L1CACHE_LINESIZE_BYTE (32)
#ifdef RT_USING_CACHE
void rt_hw_cpu_icache_enable(void)
{
SCB_EnableICache();
}
void rt_hw_cpu_icache_disable(void)
{
SCB_DisableICache();
}
rt_base_t rt_hw_cpu_icache_status(void)
{
return 0;
}
void rt_hw_cpu_icache_ops(int ops, void* addr, int size)
{
rt_uint32_t address = (rt_uint32_t)addr & (rt_uint32_t) ~(L1CACHE_LINESIZE_BYTE - 1);
rt_int32_t size_byte = size + address - (rt_uint32_t)addr;
rt_uint32_t linesize = 32U;
if (ops & RT_HW_CACHE_INVALIDATE)
{
__DSB();
while (size_byte > 0)
{
SCB->ICIMVAU = address;
address += linesize;
size_byte -= linesize;
}
__DSB();
__ISB();
}
}
void rt_hw_cpu_dcache_enable(void)
{
SCB_EnableDCache();
}
void rt_hw_cpu_dcache_disable(void)
{
SCB_DisableDCache();
}
rt_base_t rt_hw_cpu_dcache_status(void)
{
return 0;
}
void rt_hw_cpu_dcache_ops(int ops, void* addr, int size)
{
rt_uint32_t startAddr = (rt_uint32_t)addr & (rt_uint32_t)~(L1CACHE_LINESIZE_BYTE - 1);
rt_uint32_t size_byte = size + (rt_uint32_t)addr - startAddr;
rt_uint32_t clean_invalid = RT_HW_CACHE_FLUSH | RT_HW_CACHE_INVALIDATE;
if ((ops & clean_invalid) == clean_invalid)
{
SCB_CleanInvalidateDCache_by_Addr((rt_uint32_t *)startAddr, size_byte);
}
else if (ops & RT_HW_CACHE_FLUSH)
{
SCB_CleanDCache_by_Addr((rt_uint32_t *)startAddr, size_byte);
}
else if (ops & RT_HW_CACHE_INVALIDATE)
{
SCB_InvalidateDCache_by_Addr((rt_uint32_t *)startAddr, size_byte);
}
else
{
RT_ASSERT(0);
}
}
#endif /* RT_USING_CACHE */

View File

@@ -0,0 +1,523 @@
/*
* Copyright (c) 2006-2021, RT-Thread Development Team
*
* SPDX-License-Identifier: Apache-2.0
*
* Change Logs:
* Date Author Notes
* 2011-10-21 Bernard the first version.
* 2011-10-27 aozima update for cortex-M4 FPU.
* 2011-12-31 aozima fixed stack align issues.
* 2012-01-01 aozima support context switch load/store FPU register.
* 2012-12-11 lgnq fixed the coding style.
* 2012-12-23 aozima stack addr align to 8byte.
* 2012-12-29 Bernard Add exception hook.
* 2013-06-23 aozima support lazy stack optimized.
* 2018-07-24 aozima enhancement hard fault exception handler.
* 2019-07-03 yangjie add __rt_ffs() for armclang.
*/
#include <rtthread.h>
#ifdef RT_USING_HW_STACK_GUARD
#include <mprotect.h>
#endif
#if /* ARMCC */ ( (defined ( __CC_ARM ) && defined ( __TARGET_FPU_VFP )) \
/* Clang */ || (defined ( __clang__ ) && defined ( __VFP_FP__ ) && !defined(__SOFTFP__)) \
/* IAR */ || (defined ( __ICCARM__ ) && defined ( __ARMVFP__ )) \
/* GNU */ || (defined ( __GNUC__ ) && defined ( __VFP_FP__ ) && !defined(__SOFTFP__)) )
#define USE_FPU 1
#else
#define USE_FPU 0
#endif
/* exception and interrupt handler table */
rt_uint32_t rt_interrupt_from_thread;
rt_uint32_t rt_interrupt_to_thread;
rt_uint32_t rt_thread_switch_interrupt_flag;
/* exception hook */
static rt_err_t (*rt_exception_hook)(void *context) = RT_NULL;
struct exception_stack_frame
{
rt_uint32_t r0;
rt_uint32_t r1;
rt_uint32_t r2;
rt_uint32_t r3;
rt_uint32_t r12;
rt_uint32_t lr;
rt_uint32_t pc;
rt_uint32_t psr;
};
struct stack_frame
{
#if USE_FPU
rt_uint32_t flag;
#endif /* USE_FPU */
/* r4 ~ r11 register */
rt_uint32_t r4;
rt_uint32_t r5;
rt_uint32_t r6;
rt_uint32_t r7;
rt_uint32_t r8;
rt_uint32_t r9;
rt_uint32_t r10;
rt_uint32_t r11;
struct exception_stack_frame exception_stack_frame;
};
struct exception_stack_frame_fpu
{
rt_uint32_t r0;
rt_uint32_t r1;
rt_uint32_t r2;
rt_uint32_t r3;
rt_uint32_t r12;
rt_uint32_t lr;
rt_uint32_t pc;
rt_uint32_t psr;
#if USE_FPU
/* FPU register */
rt_uint32_t S0;
rt_uint32_t S1;
rt_uint32_t S2;
rt_uint32_t S3;
rt_uint32_t S4;
rt_uint32_t S5;
rt_uint32_t S6;
rt_uint32_t S7;
rt_uint32_t S8;
rt_uint32_t S9;
rt_uint32_t S10;
rt_uint32_t S11;
rt_uint32_t S12;
rt_uint32_t S13;
rt_uint32_t S14;
rt_uint32_t S15;
rt_uint32_t FPSCR;
rt_uint32_t NO_NAME;
#endif
};
struct stack_frame_fpu
{
rt_uint32_t flag;
/* r4 ~ r11 register */
rt_uint32_t r4;
rt_uint32_t r5;
rt_uint32_t r6;
rt_uint32_t r7;
rt_uint32_t r8;
rt_uint32_t r9;
rt_uint32_t r10;
rt_uint32_t r11;
#if USE_FPU
/* FPU register s16 ~ s31 */
rt_uint32_t s16;
rt_uint32_t s17;
rt_uint32_t s18;
rt_uint32_t s19;
rt_uint32_t s20;
rt_uint32_t s21;
rt_uint32_t s22;
rt_uint32_t s23;
rt_uint32_t s24;
rt_uint32_t s25;
rt_uint32_t s26;
rt_uint32_t s27;
rt_uint32_t s28;
rt_uint32_t s29;
rt_uint32_t s30;
rt_uint32_t s31;
#endif
struct exception_stack_frame_fpu exception_stack_frame;
};
rt_uint8_t *rt_hw_stack_init(void *tentry,
void *parameter,
rt_uint8_t *stack_addr,
void *texit)
{
struct stack_frame *stack_frame;
rt_uint8_t *stk;
unsigned long i;
stk = stack_addr + sizeof(rt_uint32_t);
stk = (rt_uint8_t *)RT_ALIGN_DOWN((rt_uint32_t)stk, 8);
stk -= sizeof(struct stack_frame);
stack_frame = (struct stack_frame *)stk;
/* init all register */
for (i = 0; i < sizeof(struct stack_frame) / sizeof(rt_uint32_t); i ++)
{
((rt_uint32_t *)stack_frame)[i] = 0xdeadbeef;
}
stack_frame->exception_stack_frame.r0 = (unsigned long)parameter; /* r0 : argument */
stack_frame->exception_stack_frame.r1 = 0; /* r1 */
stack_frame->exception_stack_frame.r2 = 0; /* r2 */
stack_frame->exception_stack_frame.r3 = 0; /* r3 */
stack_frame->exception_stack_frame.r12 = 0; /* r12 */
stack_frame->exception_stack_frame.lr = (unsigned long)texit; /* lr */
stack_frame->exception_stack_frame.pc = (unsigned long)tentry; /* entry point, pc */
stack_frame->exception_stack_frame.psr = 0x01000000L; /* PSR */
#if USE_FPU
stack_frame->flag = 0;
#endif /* USE_FPU */
/* return task's current stack address */
return stk;
}
#ifdef RT_USING_HW_STACK_GUARD
void rt_hw_stack_guard_init(rt_thread_t thread)
{
rt_mem_region_t stack_top_region, stack_bottom_region;
rt_ubase_t stack_bottom = (rt_ubase_t)thread->stack_addr;
rt_ubase_t stack_top = (rt_ubase_t)((rt_uint8_t *)thread->stack_addr + thread->stack_size);
rt_ubase_t stack_bottom_region_start = RT_ALIGN(stack_bottom, MPU_MIN_REGION_SIZE);
rt_ubase_t stack_top_region_start = RT_ALIGN_DOWN(stack_top - MPU_MIN_REGION_SIZE, MPU_MIN_REGION_SIZE);
stack_top_region.start = (void *)stack_top_region_start;
stack_top_region.size = MPU_MIN_REGION_SIZE;
stack_top_region.attr = RT_MEM_REGION_P_NA_U_NA;
stack_bottom_region.start = (void *)stack_bottom_region_start;
stack_bottom_region.size = MPU_MIN_REGION_SIZE;
stack_bottom_region.attr = RT_MEM_REGION_P_NA_U_NA;
rt_mprotect_add_region(thread, &stack_top_region);
rt_mprotect_add_region(thread, &stack_bottom_region);
thread->stack_buf = thread->stack_addr;
thread->stack_addr = (void *)(stack_bottom_region_start + MPU_MIN_REGION_SIZE);
thread->stack_size = (rt_uint32_t)(stack_top_region_start - stack_bottom_region_start - MPU_MIN_REGION_SIZE);
}
#endif
/**
* This function set the hook, which is invoked on fault exception handling.
*
* @param exception_handle the exception handling hook function.
*/
void rt_hw_exception_install(rt_err_t (*exception_handle)(void *context))
{
rt_exception_hook = exception_handle;
}
#define SCB_CFSR (*(volatile const unsigned *)0xE000ED28) /* Configurable Fault Status Register */
#define SCB_HFSR (*(volatile const unsigned *)0xE000ED2C) /* HardFault Status Register */
#define SCB_MMAR (*(volatile const unsigned *)0xE000ED34) /* MemManage Fault Address register */
#define SCB_BFAR (*(volatile const unsigned *)0xE000ED38) /* Bus Fault Address Register */
#define SCB_AIRCR (*(volatile unsigned long *)0xE000ED0C) /* Reset control Address Register */
#define SCB_RESET_VALUE 0x05FA0004 /* Reset value, write to SCB_AIRCR can reset cpu */
#define SCB_CFSR_MFSR (*(volatile const unsigned char*)0xE000ED28) /* Memory-management Fault Status Register */
#define SCB_CFSR_BFSR (*(volatile const unsigned char*)0xE000ED29) /* Bus Fault Status Register */
#define SCB_CFSR_UFSR (*(volatile const unsigned short*)0xE000ED2A) /* Usage Fault Status Register */
#ifdef RT_USING_FINSH
static void usage_fault_track(void)
{
rt_kprintf("usage fault:\n");
rt_kprintf("SCB_CFSR_UFSR:0x%02X ", SCB_CFSR_UFSR);
if(SCB_CFSR_UFSR & (1<<0))
{
/* [0]:UNDEFINSTR */
rt_kprintf("UNDEFINSTR ");
}
if(SCB_CFSR_UFSR & (1<<1))
{
/* [1]:INVSTATE */
rt_kprintf("INVSTATE ");
}
if(SCB_CFSR_UFSR & (1<<2))
{
/* [2]:INVPC */
rt_kprintf("INVPC ");
}
if(SCB_CFSR_UFSR & (1<<3))
{
/* [3]:NOCP */
rt_kprintf("NOCP ");
}
if(SCB_CFSR_UFSR & (1<<8))
{
/* [8]:UNALIGNED */
rt_kprintf("UNALIGNED ");
}
if(SCB_CFSR_UFSR & (1<<9))
{
/* [9]:DIVBYZERO */
rt_kprintf("DIVBYZERO ");
}
rt_kprintf("\n");
}
static void bus_fault_track(void)
{
rt_kprintf("bus fault:\n");
rt_kprintf("SCB_CFSR_BFSR:0x%02X ", SCB_CFSR_BFSR);
if(SCB_CFSR_BFSR & (1<<0))
{
/* [0]:IBUSERR */
rt_kprintf("IBUSERR ");
}
if(SCB_CFSR_BFSR & (1<<1))
{
/* [1]:PRECISERR */
rt_kprintf("PRECISERR ");
}
if(SCB_CFSR_BFSR & (1<<2))
{
/* [2]:IMPRECISERR */
rt_kprintf("IMPRECISERR ");
}
if(SCB_CFSR_BFSR & (1<<3))
{
/* [3]:UNSTKERR */
rt_kprintf("UNSTKERR ");
}
if(SCB_CFSR_BFSR & (1<<4))
{
/* [4]:STKERR */
rt_kprintf("STKERR ");
}
if(SCB_CFSR_BFSR & (1<<7))
{
rt_kprintf("SCB->BFAR:%08X\n", SCB_BFAR);
}
else
{
rt_kprintf("\n");
}
}
static void mem_manage_fault_track(void)
{
rt_kprintf("mem manage fault:\n");
rt_kprintf("SCB_CFSR_MFSR:0x%02X ", SCB_CFSR_MFSR);
if(SCB_CFSR_MFSR & (1<<0))
{
/* [0]:IACCVIOL */
rt_kprintf("IACCVIOL ");
}
if(SCB_CFSR_MFSR & (1<<1))
{
/* [1]:DACCVIOL */
rt_kprintf("DACCVIOL ");
}
if(SCB_CFSR_MFSR & (1<<3))
{
/* [3]:MUNSTKERR */
rt_kprintf("MUNSTKERR ");
}
if(SCB_CFSR_MFSR & (1<<4))
{
/* [4]:MSTKERR */
rt_kprintf("MSTKERR ");
}
if(SCB_CFSR_MFSR & (1<<7))
{
/* [7]:MMARVALID */
rt_kprintf("SCB->MMAR:%08X\n", SCB_MMAR);
}
else
{
rt_kprintf("\n");
}
}
static void hard_fault_track(void)
{
if(SCB_HFSR & (1UL<<1))
{
/* [1]:VECTBL, Indicates hard fault is caused by failed vector fetch. */
rt_kprintf("failed vector fetch\n");
}
if(SCB_HFSR & (1UL<<30))
{
/* [30]:FORCED, Indicates hard fault is taken because of bus fault,
memory management fault, or usage fault. */
if(SCB_CFSR_BFSR)
{
bus_fault_track();
}
if(SCB_CFSR_MFSR)
{
mem_manage_fault_track();
}
if(SCB_CFSR_UFSR)
{
usage_fault_track();
}
}
if(SCB_HFSR & (1UL<<31))
{
/* [31]:DEBUGEVT, Indicates hard fault is triggered by debug event. */
rt_kprintf("debug event\n");
}
}
#endif /* RT_USING_FINSH */
struct exception_info
{
rt_uint32_t exc_return;
struct stack_frame stack_frame;
};
void rt_hw_hard_fault_exception(struct exception_info *exception_info)
{
#if defined(RT_USING_FINSH) && defined(MSH_USING_BUILT_IN_COMMANDS)
extern long list_thread(void);
#endif
struct exception_stack_frame *exception_stack = &exception_info->stack_frame.exception_stack_frame;
struct stack_frame *context = &exception_info->stack_frame;
if (rt_exception_hook != RT_NULL)
{
rt_err_t result;
result = rt_exception_hook(exception_stack);
if (result == RT_EOK) return;
}
rt_kprintf("psr: 0x%08x\n", context->exception_stack_frame.psr);
rt_kprintf("r00: 0x%08x\n", context->exception_stack_frame.r0);
rt_kprintf("r01: 0x%08x\n", context->exception_stack_frame.r1);
rt_kprintf("r02: 0x%08x\n", context->exception_stack_frame.r2);
rt_kprintf("r03: 0x%08x\n", context->exception_stack_frame.r3);
rt_kprintf("r04: 0x%08x\n", context->r4);
rt_kprintf("r05: 0x%08x\n", context->r5);
rt_kprintf("r06: 0x%08x\n", context->r6);
rt_kprintf("r07: 0x%08x\n", context->r7);
rt_kprintf("r08: 0x%08x\n", context->r8);
rt_kprintf("r09: 0x%08x\n", context->r9);
rt_kprintf("r10: 0x%08x\n", context->r10);
rt_kprintf("r11: 0x%08x\n", context->r11);
rt_kprintf("r12: 0x%08x\n", context->exception_stack_frame.r12);
rt_kprintf(" lr: 0x%08x\n", context->exception_stack_frame.lr);
rt_kprintf(" pc: 0x%08x\n", context->exception_stack_frame.pc);
if (exception_info->exc_return & (1 << 2))
{
rt_kprintf("hard fault on thread: %s\r\n\r\n", rt_thread_self()->parent.name);
#if defined(RT_USING_FINSH) && defined(MSH_USING_BUILT_IN_COMMANDS)
list_thread();
#endif
}
else
{
rt_kprintf("hard fault on handler\r\n\r\n");
}
if ( (exception_info->exc_return & 0x10) == 0)
{
rt_kprintf("FPU active!\r\n");
}
#ifdef RT_USING_FINSH
hard_fault_track();
#endif /* RT_USING_FINSH */
while (1);
}
/**
* reset CPU
*/
void rt_hw_cpu_reset(void)
{
SCB_AIRCR = SCB_RESET_VALUE;
}
#ifdef RT_USING_CPU_FFS
/**
* This function finds the first bit set (beginning with the least significant bit)
* in value and return the index of that bit.
*
* Bits are numbered starting at 1 (the least significant bit). A return value of
* zero from any of these functions means that the argument was zero.
*
* @return return the index of the first bit set. If value is 0, then this function
* shall return 0.
*/
#if defined(__CC_ARM)
__asm int __rt_ffs(int value)
{
CMP r0, #0x00
BEQ exit
RBIT r0, r0
CLZ r0, r0
ADDS r0, r0, #0x01
exit
BX lr
}
#elif defined(__clang__)
int __rt_ffs(int value)
{
__asm volatile(
"CMP %1, #0x00 \n"
"BEQ 1f \n"
"RBIT %1, %1 \n"
"CLZ %0, %1 \n"
"ADDS %0, %0, #0x01 \n"
"1: \n"
: "=r"(value)
: "r"(value)
);
return value;
}
#elif defined(__IAR_SYSTEMS_ICC__)
int __rt_ffs(int value)
{
if (value == 0) return value;
asm("RBIT %0, %1" : "=r"(value) : "r"(value));
asm("CLZ %0, %1" : "=r"(value) : "r"(value));
asm("ADDS %0, %1, #0x01" : "=r"(value) : "r"(value));
return value;
}
#elif defined(__GNUC__)
int __rt_ffs(int value)
{
return __builtin_ffs(value);
}
#endif
#endif

View File

@@ -0,0 +1,23 @@
/*
* Copyright (c) 2006-2020, RT-Thread Development Team
*
* SPDX-License-Identifier: Apache-2.0
*
* Change Logs:
* Date Author Notes
*/
#ifndef CPUPORT_H__
#define CPUPORT_H__
#ifdef RT_USING_SMP
typedef union {
unsigned long slock;
struct __arch_tickets {
unsigned short owner;
unsigned short next;
} tickets;
} rt_hw_spinlock_t;
#endif
#endif /*CPUPORT_H__*/

View File

@@ -0,0 +1,299 @@
/*
* Copyright (c) 2006-2023, RT-Thread Development Team
*
* SPDX-License-Identifier: Apache-2.0
*
* Change Logs:
* Date Author Notes
* 2023-09-25 tangzz98 the first version
*/
#include <rtdef.h>
#include <mprotect.h>
#define DBG_ENABLE
#define DBG_SECTION_NAME "MEMORY PROTECTION"
#define DBG_LEVEL DBG_ERROR
#include <rtdbg.h>
#define MEM_REGION_TO_MPU_INDEX(thread, region) ((((rt_size_t)region - (rt_size_t)(thread->mem_regions)) / sizeof(rt_mem_region_t)) + NUM_STATIC_REGIONS)
extern rt_mem_region_t *rt_mprotect_find_free_region(rt_thread_t thread);
extern rt_mem_region_t *rt_mprotect_find_region(rt_thread_t thread, rt_mem_region_t *region);
static rt_hw_mpu_exception_hook_t mem_manage_hook = RT_NULL;
rt_weak rt_uint32_t rt_hw_mpu_region_default_attr(rt_mem_region_t *region)
{
static rt_uint32_t default_mem_attr[] =
{
NORMAL_OUTER_INNER_WRITE_THROUGH_NON_SHAREABLE,
NORMAL_OUTER_INNER_WRITE_BACK_WRITE_READ_ALLOCATE_NON_SHAREABLE,
DEVICE_NON_SHAREABLE,
NORMAL_OUTER_INNER_WRITE_BACK_WRITE_READ_ALLOCATE_NON_SHAREABLE,
NORMAL_OUTER_INNER_WRITE_THROUGH_NON_SHAREABLE,
DEVICE_SHAREABLE,
DEVICE_NON_SHAREABLE
};
rt_uint32_t attr = 0U;
if ((rt_uint32_t)region->start >= 0xE0000000U)
{
attr = ((rt_uint32_t)region->start >= 0xE0100000U) ? STRONGLY_ORDERED_SHAREABLE : DEVICE_SHAREABLE;
}
else
{
attr = default_mem_attr[((rt_uint32_t)region->start & ~0xFFFFFFFU) >> 29U];
}
return attr;
}
static rt_uint32_t _mpu_rasr(rt_mem_region_t *region)
{
rt_uint32_t rasr = 0U;
if ((region->attr.rasr & RESERVED) == RESERVED)
{
rasr |= rt_hw_mpu_region_default_attr(region);
rasr |= region->attr.rasr & (MPU_RASR_XN_Msk | MPU_RASR_AP_Msk);
}
else
{
rasr |= region->attr.rasr & MPU_RASR_ATTRS_Msk;
}
rasr |= ((32U - __builtin_clz(region->size - 1U) - 2U + 1U) << MPU_RASR_SIZE_Pos) & MPU_RASR_SIZE_Msk;
rasr |= MPU_RASR_ENABLE_Msk;
return rasr;
}
rt_bool_t rt_hw_mpu_region_valid(rt_mem_region_t *region)
{
if (region->size < MPU_MIN_REGION_SIZE)
{
LOG_E("Region size is too small");
return RT_FALSE;
}
if (region->size & (region->size - 1U) != 0U)
{
LOG_E("Region size is not power of 2");
return RT_FALSE;
}
if ((rt_uint32_t)region->start & (region->size - 1U) != 0U)
{
LOG_E("Region is not naturally aligned");
return RT_FALSE;
}
return RT_TRUE;
}
rt_err_t rt_hw_mpu_init(void)
{
extern rt_mem_region_t static_regions[NUM_STATIC_REGIONS];
rt_uint8_t num_mpu_regions;
rt_uint8_t num_dynamic_regions;
rt_uint8_t index;
num_mpu_regions = (rt_uint8_t)((MPU->TYPE & MPU_TYPE_DREGION_Msk) >> MPU_TYPE_DREGION_Pos);
if (num_mpu_regions == 0U)
{
LOG_E("Hardware does not support MPU");
return RT_ERROR;
}
if (num_mpu_regions != NUM_MEM_REGIONS)
{
LOG_E("Incorrect setting of NUM_MEM_REGIONS");
LOG_E("NUM_MEM_REGIONS = %d, hardware support %d MPU regions", NUM_MEM_REGIONS, num_mpu_regions);
return RT_ERROR;
}
num_dynamic_regions = NUM_DYNAMIC_REGIONS + NUM_EXCLUSIVE_REGIONS;
if (num_dynamic_regions + NUM_STATIC_REGIONS > num_mpu_regions)
{
LOG_E("Insufficient MPU regions: %d hardware MPU regions", num_mpu_regions);
#ifdef RT_USING_HW_STACK_GUARD
LOG_E("Current configuration requires %d static regions + %d configurable regions + %d exclusive regions + %d stack guard regions", NUM_STATIC_REGIONS, NUM_CONFIGURABLE_REGIONS, NUM_EXCLUSIVE_REGIONS, 2);
#else
LOG_E("Current configuration requires %d static regions + %d configurable regions + %d exclusive regions", NUM_STATIC_REGIONS, NUM_CONFIGURABLE_REGIONS, NUM_EXCLUSIVE_REGIONS);
#endif
return RT_ERROR;
}
ARM_MPU_Disable();
for (index = 0U; index < NUM_STATIC_REGIONS; index++)
{
if (rt_hw_mpu_region_valid(&(static_regions[index])) == RT_FALSE)
{
return RT_ERROR;
}
static_regions[index].attr.rasr = _mpu_rasr(&(static_regions[index]));
ARM_MPU_SetRegion(ARM_MPU_RBAR(index, (rt_uint32_t)static_regions[index].start), static_regions[index].attr.rasr);
}
/* Enable background region. */
ARM_MPU_Enable(MPU_CTRL_PRIVDEFENA_Msk);
return RT_EOK;
}
rt_err_t rt_hw_mpu_add_region(rt_thread_t thread, rt_mem_region_t *region)
{
rt_uint8_t index;
rt_mem_region_t *free_region;
if (rt_hw_mpu_region_valid(region) == RT_FALSE)
{
return RT_ERROR;
}
region->attr.rasr = _mpu_rasr(region);
if (thread == RT_NULL)
{
return RT_EOK;
}
rt_enter_critical();
free_region = rt_mprotect_find_free_region(thread);
if (free_region == RT_NULL)
{
rt_exit_critical();
LOG_E("Insufficient regions");
return RT_ERROR;
}
rt_memcpy(free_region, region, sizeof(rt_mem_region_t));
if (thread == rt_thread_self())
{
index = MEM_REGION_TO_MPU_INDEX(thread, free_region);
ARM_MPU_SetRegion(ARM_MPU_RBAR(index, (rt_uint32_t)region->start), region->attr.rasr);
}
rt_exit_critical();
return RT_EOK;
}
rt_err_t rt_hw_mpu_delete_region(rt_thread_t thread, rt_mem_region_t *region)
{
rt_uint8_t index;
rt_enter_critical();
rt_mem_region_t *found_region = rt_mprotect_find_region(thread, region);
if (found_region == RT_NULL)
{
rt_exit_critical();
LOG_E("Region not found");
return RT_ERROR;
}
rt_memset(found_region, 0, sizeof(rt_mem_region_t));
if (thread == rt_thread_self())
{
index = MEM_REGION_TO_MPU_INDEX(thread, found_region);
ARM_MPU_ClrRegion(index);
}
rt_exit_critical();
return RT_EOK;
}
rt_err_t rt_hw_mpu_update_region(rt_thread_t thread, rt_mem_region_t *region)
{
rt_uint8_t index;
if (rt_hw_mpu_region_valid(region) == RT_FALSE)
{
return RT_ERROR;
}
region->attr.rasr = _mpu_rasr(region);
rt_enter_critical();
rt_mem_region_t *old_region = rt_mprotect_find_region(thread, region);
if (old_region == RT_NULL)
{
rt_exit_critical();
LOG_E("Region not found");
return RT_ERROR;
}
rt_memcpy(old_region, region, sizeof(rt_mem_region_t));
if (thread == rt_thread_self())
{
index = MEM_REGION_TO_MPU_INDEX(thread, old_region);
ARM_MPU_SetRegion(ARM_MPU_RBAR(index, (rt_uint32_t)region->start), region->attr.rasr);
}
rt_exit_critical();
return RT_EOK;
}
rt_err_t rt_hw_mpu_exception_set_hook(rt_hw_mpu_exception_hook_t hook)
{
mem_manage_hook = hook;
return RT_EOK;
}
void rt_hw_mpu_table_switch(rt_thread_t thread)
{
extern rt_mem_exclusive_region_t exclusive_regions[NUM_EXCLUSIVE_REGIONS];
rt_uint8_t i;
rt_uint8_t index = NUM_STATIC_REGIONS;
if (thread->mem_regions != RT_NULL)
{
for (i = 0U; i < NUM_DYNAMIC_REGIONS; i++)
{
if (((rt_mem_region_t *)thread->mem_regions)[i].size != 0U)
{
ARM_MPU_SetRegion(ARM_MPU_RBAR(index, (rt_uint32_t)(((rt_mem_region_t *)thread->mem_regions)[i].start)), ((rt_mem_region_t *)thread->mem_regions)[i].attr.rasr);
index += 1U;
}
}
}
for (i = 0U; i < NUM_EXCLUSIVE_REGIONS; i++)
{
if ((exclusive_regions[i].owner != RT_NULL) && (exclusive_regions[i].owner != thread))
{
ARM_MPU_SetRegion(ARM_MPU_RBAR(index, (rt_uint32_t)(exclusive_regions[i].region.start)), exclusive_regions[i].region.attr.rasr);
index += 1U;
}
}
for ( ; index < NUM_MEM_REGIONS; index++)
{
ARM_MPU_ClrRegion(index);
}
}
void MemManage_Handler(void)
{
extern rt_mem_region_t static_regions[NUM_STATIC_REGIONS];
extern rt_mem_exclusive_region_t exclusive_regions[NUM_EXCLUSIVE_REGIONS];
rt_mem_exception_info_t info;
rt_int8_t i;
rt_memset(&info, 0, sizeof(rt_mem_exception_info_t));
info.thread = rt_thread_self();
if (SCB->CFSR & SCB_CFSR_MMARVALID_Msk)
{
info.addr = (void *)(SCB->MMFAR);
for (i = NUM_EXCLUSIVE_REGIONS - 1; i >= 0; i--)
{
if ((exclusive_regions[i].owner != RT_NULL) && ((exclusive_regions[i].owner != rt_thread_self())) && ADDR_IN_REGION(info.addr, (rt_mem_region_t *)&(exclusive_regions[i])))
{
rt_memcpy(&(info.region), &(exclusive_regions[i]), sizeof(rt_mem_region_t));
break;
}
}
if (info.region.size == 0U)
{
if (info.thread->mem_regions != RT_NULL)
{
for (i = NUM_DYNAMIC_REGIONS - 1; i >= 0; i--)
{
if ((((rt_mem_region_t *)info.thread->mem_regions)[i].size != 0U) && ADDR_IN_REGION(info.addr, &(((rt_mem_region_t *)info.thread->mem_regions)[i])))
{
rt_memcpy(&(info.region), &(((rt_mem_region_t *)info.thread->mem_regions)[i]), sizeof(rt_mem_region_t));
break;
}
}
}
if (info.region.size == 0U)
{
for (i = NUM_STATIC_REGIONS - 1; i >= 0; i--)
{
if (ADDR_IN_REGION(info.addr, &(static_regions[i])))
{
rt_memcpy(&(info.region), &(static_regions[i]), sizeof(rt_mem_region_t));
break;
}
}
}
}
}
info.mmfsr = (SCB->CFSR & SCB_CFSR_MEMFAULTSR_Msk) >> SCB_CFSR_MEMFAULTSR_Pos;
if (mem_manage_hook != RT_NULL)
{
mem_manage_hook(&info);
}
while (1);
}

View File

@@ -0,0 +1,104 @@
/*
* Copyright (c) 2006-2023, RT-Thread Development Team
*
* SPDX-License-Identifier: Apache-2.0
*
* Change Logs:
* Date Author Notes
* 2023-09-25 tangzz98 the first version
*/
#ifndef __MPU_H__
#define __MPU_H__
#ifdef RT_USING_MEM_PROTECTION
#include <board.h>
#define MPU_MIN_REGION_SIZE 32U
/* MPU attributes for configuring data region permission */
/* Privileged No Access, Unprivileged No Access */
#define P_NA_U_NA ((0x0 << MPU_RASR_AP_Pos) & MPU_RASR_AP_Msk | MPU_RASR_XN_Msk)
/* Privileged Read Write, Unprivileged No Access */
#define P_RW_U_NA ((0x1 << MPU_RASR_AP_Pos) & MPU_RASR_AP_Msk | MPU_RASR_XN_Msk)
/* Privileged Read Write, Unprivileged Read Only */
#define P_RW_U_RO ((0x2 << MPU_RASR_AP_Pos) & MPU_RASR_AP_Msk | MPU_RASR_XN_Msk)
/* Privileged Read Write, Unprivileged Read Write */
#define P_RW_U_RW ((0x3 << MPU_RASR_AP_Pos) & MPU_RASR_AP_Msk | MPU_RASR_XN_Msk)
/* Privileged Read Only, Unprivileged No Access */
#define P_RO_U_NA ((0x5 << MPU_RASR_AP_Pos) & MPU_RASR_AP_Msk | MPU_RASR_XN_Msk)
/* Privileged Read Only, Unprivileged Read Only */
#define P_RO_U_RO ((0x6 << MPU_RASR_AP_Pos) & MPU_RASR_AP_Msk | MPU_RASR_XN_Msk)
/* MPU attributes for configuring code region permission */
/* Privileged Read Write Execute, Unprivileged Read Write Execute */
#define P_RWX_U_RWX ((0x3 << MPU_RASR_AP_Pos) & MPU_RASR_AP_Msk)
/* Privileged Read Write Execute, Unprivileged Read Execute */
#define P_RWX_U_RX ((0x2 << MPU_RASR_AP_Pos) & MPU_RASR_AP_Msk)
/* Privileged Read Write Execute, Unprivileged No Access */
#define P_RWX_U_NA ((0x1 << MPU_RASR_AP_Pos) & MPU_RASR_AP_Msk)
/* Privileged Read Execute, Unprivileged Read Execute */
#define P_RX_U_RX ((0x6 << MPU_RASR_AP_Pos) & MPU_RASR_AP_Msk)
/* Privileged Read Execute, Unprivileged No Access */
#define P_RX_U_NA ((0x5 << MPU_RASR_AP_Pos) & MPU_RASR_AP_Msk)
/* MPU attributes for configuring memory type, cacheability and shareability */
#define STRONGLY_ORDERED_SHAREABLE MPU_RASR_S_Msk
#define DEVICE_SHAREABLE (MPU_RASR_B_Msk | MPU_RASR_S_Msk)
#define NORMAL_OUTER_INNER_WRITE_THROUGH_SHAREABLE \
(MPU_RASR_C_Msk | MPU_RASR_S_Msk)
#define NORMAL_OUTER_INNER_WRITE_THROUGH_NON_SHAREABLE MPU_RASR_C_Msk
#define NORMAL_OUTER_INNER_WRITE_BACK_SHAREABLE \
(MPU_RASR_C_Msk | MPU_RASR_B_Msk | MPU_RASR_S_Msk)
#define NORMAL_OUTER_INNER_WRITE_BACK_NON_SHAREABLE \
(MPU_RASR_C_Msk | MPU_RASR_B_Msk)
#define NORMAL_OUTER_INNER_NON_CACHEABLE_SHAREABLE \
((1 << MPU_RASR_TEX_Pos) | MPU_RASR_S_Msk)
#define NORMAL_OUTER_INNER_NON_CACHEABLE_NON_SHAREABLE \
(1 << MPU_RASR_TEX_Pos)
#define NORMAL_OUTER_INNER_WRITE_BACK_WRITE_READ_ALLOCATE_SHAREABLE \
((1 << MPU_RASR_TEX_Pos) |\
MPU_RASR_C_Msk | MPU_RASR_B_Msk | MPU_RASR_S_Msk)
#define NORMAL_OUTER_INNER_WRITE_BACK_WRITE_READ_ALLOCATE_NON_SHAREABLE \
((1 << MPU_RASR_TEX_Pos) | MPU_RASR_C_Msk | MPU_RASR_B_Msk)
#define DEVICE_NON_SHAREABLE (2 << MPU_RASR_TEX_Pos)
#define RESERVED ((2 << MPU_RASR_TEX_Pos) | MPU_RASR_B_Msk)
typedef struct
{
rt_thread_t thread; /* Thread that triggered exception */
void *addr; /* Address of faulting memory access */
rt_mem_region_t region; /* Configurations of the memory region containing the address */
rt_uint8_t mmfsr; /* Content of MemManage Status Register */
} rt_mem_exception_info_t;
typedef void (*rt_hw_mpu_exception_hook_t)(rt_mem_exception_info_t *);
#define RT_ARM_MEM_ATTR(perm, type) ((rt_mem_attr_t){ (perm) | (type)})
/* Convenient macros for configuring data region attributes with default memory type */
#define RT_MEM_REGION_P_NA_U_NA RT_ARM_MEM_ATTR(P_NA_U_NA, RESERVED)
#define RT_MEM_REGION_P_RW_U_RW RT_ARM_MEM_ATTR(P_RW_U_RW, RESERVED)
#define RT_MEM_REGION_P_RW_U_RO RT_ARM_MEM_ATTR(P_RW_U_RO, RESERVED)
#define RT_MEM_REGION_P_RW_U_NA RT_ARM_MEM_ATTR(P_RW_U_NA, RESERVED)
#define RT_MEM_REGION_P_RO_U_RO RT_ARM_MEM_ATTR(P_RO_U_RO, RESERVED)
#define RT_MEM_REGION_P_RO_U_NA RT_ARM_MEM_ATTR(P_RO_U_NA, RESERVED)
/* Convenient macros for configuring code region attributes with default memory type */
#define RT_MEM_REGION_P_RWX_U_RWX RT_ARM_MEM_ATTR(P_RWX_U_RWX, RESERVED)
#define RT_MEM_REGION_P_RWX_U_RX RT_ARM_MEM_ATTR(P_RWX_U_RX, RESERVED)
#define RT_MEM_REGION_P_RWX_U_NA RT_ARM_MEM_ATTR(P_RWX_U_NA, RESERVED)
#define RT_MEM_REGION_P_RX_U_RX RT_ARM_MEM_ATTR(P_RX_U_RX, RESERVED)
#define RT_MEM_REGION_P_RX_U_NA RT_ARM_MEM_ATTR(P_RX_U_NA, RESERVED)
rt_bool_t rt_hw_mpu_region_valid(rt_mem_region_t *region);
rt_err_t rt_hw_mpu_init(void);
rt_err_t rt_hw_mpu_add_region(rt_thread_t thread, rt_mem_region_t *region);
rt_err_t rt_hw_mpu_delete_region(rt_thread_t thread, rt_mem_region_t *region);
rt_err_t rt_hw_mpu_update_region(rt_thread_t thread, rt_mem_region_t *region);
rt_err_t rt_hw_mpu_exception_set_hook(rt_hw_mpu_exception_hook_t hook);
#endif /* RT_USING_MEM_PROTECTION */
#endif /* __MPU_H__ */

View File

@@ -0,0 +1,29 @@
/*
* Copyright (c) 2006-2023, RT-Thread Development Team
*
* SPDX-License-Identifier: Apache-2.0
*
* Change Logs:
* Date Author Notes
* 2023-09-25 tangzz98 the first version
*/
#ifndef __MPUTYPE_H__
#define __MPUTYPE_H__
#ifdef RT_USING_MEM_PROTECTION
#ifdef RT_USING_HW_STACK_GUARD
#define NUM_DYNAMIC_REGIONS (2 + NUM_CONFIGURABLE_REGIONS)
#else
#define NUM_DYNAMIC_REGIONS (NUM_CONFIGURABLE_REGIONS)
#endif
typedef struct
{
rt_uint32_t rasr;
} rt_mem_attr_t;
#endif /* RT_USING_MEM_PROTECTION */
#endif /* __MPUTYPE_H__ */