feat: kernel/libcpu: fit into ilp32d
This commit is contained in:
parent
b6f1b16d7a
commit
dfd8ccf262
|
@ -172,8 +172,8 @@ void rt_hw_context_switch_interrupt(rt_ubase_t from, rt_ubase_t to, rt_thread_t
|
|||
* Hardware Layer Backtrace Service
|
||||
*/
|
||||
struct rt_hw_backtrace_frame {
|
||||
rt_base_t fp;
|
||||
rt_base_t pc;
|
||||
rt_uintptr_t fp;
|
||||
rt_uintptr_t pc;
|
||||
};
|
||||
|
||||
rt_err_t rt_hw_backtrace_frame_get(rt_thread_t thread, struct rt_hw_backtrace_frame *frame);
|
||||
|
|
|
@ -33,8 +33,6 @@ extern "C" {
|
|||
*/
|
||||
|
||||
typedef int rt_bool_t; /**< boolean type */
|
||||
typedef signed long rt_base_t; /**< Nbit CPU related data type */
|
||||
typedef unsigned long rt_ubase_t; /**< Nbit unsigned CPU related data type */
|
||||
|
||||
#ifndef RT_USING_ARCH_DATA_TYPE
|
||||
#ifdef RT_USING_LIBC
|
||||
|
@ -63,12 +61,24 @@ typedef unsigned long long rt_uint64_t; /**< 64bit unsigned inte
|
|||
#endif /* RT_USING_LIBC */
|
||||
#endif /* RT_USING_ARCH_DATA_TYPE */
|
||||
|
||||
#ifdef ARCH_CPU_64BIT
|
||||
typedef rt_int64_t rt_base_t; /**< Nbit CPU related data type */
|
||||
typedef rt_uint64_t rt_ubase_t; /**< Nbit unsigned CPU related data type */
|
||||
#else
|
||||
typedef rt_int32_t rt_base_t; /**< Nbit CPU related data type */
|
||||
typedef rt_uint32_t rt_ubase_t; /**< Nbit unsigned CPU related data type */
|
||||
#endif
|
||||
|
||||
#if defined(RT_USING_LIBC) && !defined(RT_USING_NANO)
|
||||
typedef size_t rt_size_t; /**< Type for size number */
|
||||
typedef ssize_t rt_ssize_t; /**< Used for a count of bytes or an error indication */
|
||||
typedef intptr_t rt_intptr_t; /**< Type for signed pointer length integer */
|
||||
typedef uintptr_t rt_uintptr_t; /**< Type for unsigned pointer length integer */
|
||||
#else
|
||||
typedef rt_ubase_t rt_size_t; /**< Type for size number */
|
||||
typedef rt_base_t rt_ssize_t; /**< Used for a count of bytes or an error indication */
|
||||
typedef rt_ubase_t rt_intptr_t; /**< Type for signed pointer length integer */
|
||||
typedef rt_base_t rt_uintptr_t; /**< Type for unsigned pointer length integer */
|
||||
#endif /* defined(RT_USING_LIBC) && !defined(RT_USING_NANO) */
|
||||
|
||||
typedef rt_base_t rt_err_t; /**< Type for error number */
|
||||
|
|
|
@ -67,7 +67,7 @@ rt_inline rt_err_t _bt_uaddr(rt_lwp_t lwp, rt_ubase_t *fp, struct rt_hw_backtrac
|
|||
rt_err_t rt_hw_backtrace_frame_unwind(rt_thread_t thread, struct rt_hw_backtrace_frame *frame)
|
||||
{
|
||||
rt_err_t rc = -RT_ERROR;
|
||||
rt_ubase_t *fp = (rt_ubase_t *)frame->fp;
|
||||
rt_uintptr_t *fp = (rt_uintptr_t *)frame->fp;
|
||||
|
||||
if (fp && !((long)fp & 0x7))
|
||||
{
|
||||
|
|
|
@ -15,56 +15,56 @@
|
|||
#include "cpuport.h"
|
||||
#include "stackframe.h"
|
||||
|
||||
.macro PUSH_8 reg
|
||||
.macro PUSH_REG reg
|
||||
addi sp, sp, -REGBYTES
|
||||
STORE \reg, (sp)
|
||||
.endm
|
||||
|
||||
.macro POP_8 reg
|
||||
.macro POP_REG reg
|
||||
LOAD \reg, (sp)
|
||||
addi sp, sp, REGBYTES
|
||||
.endm
|
||||
|
||||
.macro RESERVE_CONTEXT
|
||||
PUSH_8 tp
|
||||
PUSH_8 ra
|
||||
PUSH_8 s0
|
||||
PUSH_8 s1
|
||||
PUSH_8 s2
|
||||
PUSH_8 s3
|
||||
PUSH_8 s4
|
||||
PUSH_8 s5
|
||||
PUSH_8 s6
|
||||
PUSH_8 s7
|
||||
PUSH_8 s8
|
||||
PUSH_8 s9
|
||||
PUSH_8 s10
|
||||
PUSH_8 s11
|
||||
PUSH_REG tp
|
||||
PUSH_REG ra
|
||||
PUSH_REG s0
|
||||
PUSH_REG s1
|
||||
PUSH_REG s2
|
||||
PUSH_REG s3
|
||||
PUSH_REG s4
|
||||
PUSH_REG s5
|
||||
PUSH_REG s6
|
||||
PUSH_REG s7
|
||||
PUSH_REG s8
|
||||
PUSH_REG s9
|
||||
PUSH_REG s10
|
||||
PUSH_REG s11
|
||||
csrr s11, sstatus
|
||||
li s10, (SSTATUS_SPP)
|
||||
or s11, s11, s10
|
||||
PUSH_8 s11
|
||||
PUSH_REG s11
|
||||
addi sp, sp, -REGBYTES
|
||||
.endm
|
||||
|
||||
.macro RESTORE_CONTEXT
|
||||
addi sp, sp, REGBYTES
|
||||
POP_8 s11
|
||||
POP_REG s11
|
||||
csrw sstatus, s11
|
||||
POP_8 s11
|
||||
POP_8 s10
|
||||
POP_8 s9
|
||||
POP_8 s8
|
||||
POP_8 s7
|
||||
POP_8 s6
|
||||
POP_8 s5
|
||||
POP_8 s4
|
||||
POP_8 s3
|
||||
POP_8 s2
|
||||
POP_8 s1
|
||||
POP_8 s0
|
||||
POP_8 ra
|
||||
POP_8 tp
|
||||
POP_REG s11
|
||||
POP_REG s10
|
||||
POP_REG s9
|
||||
POP_REG s8
|
||||
POP_REG s7
|
||||
POP_REG s6
|
||||
POP_REG s5
|
||||
POP_REG s4
|
||||
POP_REG s3
|
||||
POP_REG s2
|
||||
POP_REG s1
|
||||
POP_REG s0
|
||||
POP_REG ra
|
||||
POP_REG tp
|
||||
csrw sepc, ra
|
||||
.endm
|
||||
|
||||
|
|
|
@ -16,7 +16,6 @@
|
|||
#include "stack.h"
|
||||
#include <sbi.h>
|
||||
#include <encoding.h>
|
||||
#include "regtypes.h"
|
||||
|
||||
#define K_SSTATUS_DEFAULT (SSTATUS_SPP | SSTATUS_SPIE | SSTATUS_SUM | SSTATUS_FS)
|
||||
|
||||
|
@ -36,7 +35,7 @@ volatile rt_ubase_t rt_interrupt_to_thread = 0;
|
|||
*/
|
||||
volatile rt_ubase_t rt_thread_switch_interrupt_flag = 0;
|
||||
|
||||
void *_rt_hw_stack_init(rt_uintreg_t *sp, rt_uintreg_t ra, rt_uintreg_t sstatus)
|
||||
void *_rt_hw_stack_init(rt_ubase_t *sp, rt_ubase_t ra, rt_ubase_t sstatus)
|
||||
{
|
||||
(*--sp) = 0; /* tp */
|
||||
(*--sp) = ra; /* ra */
|
||||
|
@ -81,17 +80,17 @@ rt_uint8_t *rt_hw_stack_init(void *tentry,
|
|||
rt_uint8_t *stack_addr,
|
||||
void *texit)
|
||||
{
|
||||
rt_uintreg_t *sp = (rt_uintreg_t *)stack_addr;
|
||||
rt_ubase_t *sp = (rt_ubase_t *)stack_addr;
|
||||
// we use a strict alignment requirement for Q extension
|
||||
sp = (rt_uintreg_t *)RT_ALIGN_DOWN((rt_uintreg_t)sp, 16);
|
||||
sp = (rt_ubase_t *)RT_ALIGN_DOWN((rt_ubase_t)sp, 16);
|
||||
|
||||
(*--sp) = (rt_uintreg_t)tentry;
|
||||
(*--sp) = (rt_uintreg_t)parameter;
|
||||
(*--sp) = (rt_uintreg_t)texit;
|
||||
(*--sp) = (rt_ubase_t)tentry;
|
||||
(*--sp) = (rt_ubase_t)parameter;
|
||||
(*--sp) = (rt_ubase_t)texit;
|
||||
|
||||
/* compatible to RESTORE_CONTEXT */
|
||||
extern void _rt_thread_entry(void);
|
||||
return (rt_uint8_t *)_rt_hw_stack_init(sp, (rt_uintreg_t)_rt_thread_entry, K_SSTATUS_DEFAULT);
|
||||
return (rt_uint8_t *)_rt_hw_stack_init(sp, (rt_ubase_t)_rt_thread_entry, K_SSTATUS_DEFAULT);
|
||||
}
|
||||
|
||||
/*
|
||||
|
|
|
@ -28,38 +28,38 @@
|
|||
*/
|
||||
|
||||
#ifdef ENABLE_FPU
|
||||
#define FPU_CTX_F0_OFF REGBYTES * 0 /* offsetof(fpu_context_t, fpustatus.f[0]) - offsetof(fpu_context_t, fpustatus.f[0]) */
|
||||
#define FPU_CTX_F1_OFF REGBYTES * 1 /* offsetof(fpu_context_t, fpustatus.f[1]) - offsetof(fpu_context_t, fpustatus.f[0]) */
|
||||
#define FPU_CTX_F2_OFF REGBYTES * 2 /* offsetof(fpu_context_t, fpustatus.f[2]) - offsetof(fpu_context_t, fpustatus.f[0]) */
|
||||
#define FPU_CTX_F3_OFF REGBYTES * 3 /* offsetof(fpu_context_t, fpustatus.f[3]) - offsetof(fpu_context_t, fpustatus.f[0]) */
|
||||
#define FPU_CTX_F4_OFF REGBYTES * 4 /* offsetof(fpu_context_t, fpustatus.f[4]) - offsetof(fpu_context_t, fpustatus.f[0]) */
|
||||
#define FPU_CTX_F5_OFF REGBYTES * 5 /* offsetof(fpu_context_t, fpustatus.f[5]) - offsetof(fpu_context_t, fpustatus.f[0]) */
|
||||
#define FPU_CTX_F6_OFF REGBYTES * 6 /* offsetof(fpu_context_t, fpustatus.f[6]) - offsetof(fpu_context_t, fpustatus.f[0]) */
|
||||
#define FPU_CTX_F7_OFF REGBYTES * 7 /* offsetof(fpu_context_t, fpustatus.f[7]) - offsetof(fpu_context_t, fpustatus.f[0]) */
|
||||
#define FPU_CTX_F8_OFF REGBYTES * 8 /* offsetof(fpu_context_t, fpustatus.f[8]) - offsetof(fpu_context_t, fpustatus.f[0]) */
|
||||
#define FPU_CTX_F9_OFF REGBYTES * 9 /* offsetof(fpu_context_t, fpustatus.f[9]) - offsetof(fpu_context_t, fpustatus.f[0]) */
|
||||
#define FPU_CTX_F10_OFF REGBYTES * 10 /* offsetof(fpu_context_t, fpustatus.f[10]) - offsetof(fpu_context_t, fpustatus.f[0]) */
|
||||
#define FPU_CTX_F11_OFF REGBYTES * 11 /* offsetof(fpu_context_t, fpustatus.f[11]) - offsetof(fpu_context_t, fpustatus.f[0]) */
|
||||
#define FPU_CTX_F12_OFF REGBYTES * 12 /* offsetof(fpu_context_t, fpustatus.f[12]) - offsetof(fpu_context_t, fpustatus.f[0]) */
|
||||
#define FPU_CTX_F13_OFF REGBYTES * 13 /* offsetof(fpu_context_t, fpustatus.f[13]) - offsetof(fpu_context_t, fpustatus.f[0]) */
|
||||
#define FPU_CTX_F14_OFF REGBYTES * 14 /* offsetof(fpu_context_t, fpustatus.f[14]) - offsetof(fpu_context_t, fpustatus.f[0]) */
|
||||
#define FPU_CTX_F15_OFF REGBYTES * 15 /* offsetof(fpu_context_t, fpustatus.f[15]) - offsetof(fpu_context_t, fpustatus.f[0]) */
|
||||
#define FPU_CTX_F16_OFF REGBYTES * 16 /* offsetof(fpu_context_t, fpustatus.f[16]) - offsetof(fpu_context_t, fpustatus.f[0]) */
|
||||
#define FPU_CTX_F17_OFF REGBYTES * 17 /* offsetof(fpu_context_t, fpustatus.f[17]) - offsetof(fpu_context_t, fpustatus.f[0]) */
|
||||
#define FPU_CTX_F18_OFF REGBYTES * 18 /* offsetof(fpu_context_t, fpustatus.f[18]) - offsetof(fpu_context_t, fpustatus.f[0]) */
|
||||
#define FPU_CTX_F19_OFF REGBYTES * 19 /* offsetof(fpu_context_t, fpustatus.f[19]) - offsetof(fpu_context_t, fpustatus.f[0]) */
|
||||
#define FPU_CTX_F20_OFF REGBYTES * 20 /* offsetof(fpu_context_t, fpustatus.f[20]) - offsetof(fpu_context_t, fpustatus.f[0]) */
|
||||
#define FPU_CTX_F21_OFF REGBYTES * 21 /* offsetof(fpu_context_t, fpustatus.f[21]) - offsetof(fpu_context_t, fpustatus.f[0]) */
|
||||
#define FPU_CTX_F22_OFF REGBYTES * 22 /* offsetof(fpu_context_t, fpustatus.f[22]) - offsetof(fpu_context_t, fpustatus.f[0]) */
|
||||
#define FPU_CTX_F23_OFF REGBYTES * 23 /* offsetof(fpu_context_t, fpustatus.f[23]) - offsetof(fpu_context_t, fpustatus.f[0]) */
|
||||
#define FPU_CTX_F24_OFF REGBYTES * 24 /* offsetof(fpu_context_t, fpustatus.f[24]) - offsetof(fpu_context_t, fpustatus.f[0]) */
|
||||
#define FPU_CTX_F25_OFF REGBYTES * 25 /* offsetof(fpu_context_t, fpustatus.f[25]) - offsetof(fpu_context_t, fpustatus.f[0]) */
|
||||
#define FPU_CTX_F26_OFF REGBYTES * 26 /* offsetof(fpu_context_t, fpustatus.f[26]) - offsetof(fpu_context_t, fpustatus.f[0]) */
|
||||
#define FPU_CTX_F27_OFF REGBYTES * 27 /* offsetof(fpu_context_t, fpustatus.f[27]) - offsetof(fpu_context_t, fpustatus.f[0]) */
|
||||
#define FPU_CTX_F28_OFF REGBYTES * 28 /* offsetof(fpu_context_t, fpustatus.f[28]) - offsetof(fpu_context_t, fpustatus.f[0]) */
|
||||
#define FPU_CTX_F29_OFF REGBYTES * 29 /* offsetof(fpu_context_t, fpustatus.f[29]) - offsetof(fpu_context_t, fpustatus.f[0]) */
|
||||
#define FPU_CTX_F30_OFF REGBYTES * 30 /* offsetof(fpu_context_t, fpustatus.f[30]) - offsetof(fpu_context_t, fpustatus.f[0]) */
|
||||
#define FPU_CTX_F31_OFF REGBYTES * 31 /* offsetof(fpu_context_t, fpustatus.f[31]) - offsetof(fpu_context_t, fpustatus.f[0]) */
|
||||
#define FPU_CTX_F0_OFF (REGBYTES * 0) /* offsetof(fpu_context_t, fpustatus.f[0]) - offsetof(fpu_context_t, fpustatus.f[0]) */
|
||||
#define FPU_CTX_F1_OFF (REGBYTES * 1) /* offsetof(fpu_context_t, fpustatus.f[1]) - offsetof(fpu_context_t, fpustatus.f[0]) */
|
||||
#define FPU_CTX_F2_OFF (REGBYTES * 2) /* offsetof(fpu_context_t, fpustatus.f[2]) - offsetof(fpu_context_t, fpustatus.f[0]) */
|
||||
#define FPU_CTX_F3_OFF (REGBYTES * 3) /* offsetof(fpu_context_t, fpustatus.f[3]) - offsetof(fpu_context_t, fpustatus.f[0]) */
|
||||
#define FPU_CTX_F4_OFF (REGBYTES * 4) /* offsetof(fpu_context_t, fpustatus.f[4]) - offsetof(fpu_context_t, fpustatus.f[0]) */
|
||||
#define FPU_CTX_F5_OFF (REGBYTES * 5) /* offsetof(fpu_context_t, fpustatus.f[5]) - offsetof(fpu_context_t, fpustatus.f[0]) */
|
||||
#define FPU_CTX_F6_OFF (REGBYTES * 6) /* offsetof(fpu_context_t, fpustatus.f[6]) - offsetof(fpu_context_t, fpustatus.f[0]) */
|
||||
#define FPU_CTX_F7_OFF (REGBYTES * 7) /* offsetof(fpu_context_t, fpustatus.f[7]) - offsetof(fpu_context_t, fpustatus.f[0]) */
|
||||
#define FPU_CTX_F8_OFF (REGBYTES * 8) /* offsetof(fpu_context_t, fpustatus.f[8]) - offsetof(fpu_context_t, fpustatus.f[0]) */
|
||||
#define FPU_CTX_F9_OFF (REGBYTES * 9) /* offsetof(fpu_context_t, fpustatus.f[9]) - offsetof(fpu_context_t, fpustatus.f[0]) */
|
||||
#define FPU_CTX_F10_OFF (REGBYTES * 10) /* offsetof(fpu_context_t, fpustatus.f[10]) - offsetof(fpu_context_t, fpustatus.f[0]) */
|
||||
#define FPU_CTX_F11_OFF (REGBYTES * 11) /* offsetof(fpu_context_t, fpustatus.f[11]) - offsetof(fpu_context_t, fpustatus.f[0]) */
|
||||
#define FPU_CTX_F12_OFF (REGBYTES * 12) /* offsetof(fpu_context_t, fpustatus.f[12]) - offsetof(fpu_context_t, fpustatus.f[0]) */
|
||||
#define FPU_CTX_F13_OFF (REGBYTES * 13) /* offsetof(fpu_context_t, fpustatus.f[13]) - offsetof(fpu_context_t, fpustatus.f[0]) */
|
||||
#define FPU_CTX_F14_OFF (REGBYTES * 14) /* offsetof(fpu_context_t, fpustatus.f[14]) - offsetof(fpu_context_t, fpustatus.f[0]) */
|
||||
#define FPU_CTX_F15_OFF (REGBYTES * 15) /* offsetof(fpu_context_t, fpustatus.f[15]) - offsetof(fpu_context_t, fpustatus.f[0]) */
|
||||
#define FPU_CTX_F16_OFF (REGBYTES * 16) /* offsetof(fpu_context_t, fpustatus.f[16]) - offsetof(fpu_context_t, fpustatus.f[0]) */
|
||||
#define FPU_CTX_F17_OFF (REGBYTES * 17) /* offsetof(fpu_context_t, fpustatus.f[17]) - offsetof(fpu_context_t, fpustatus.f[0]) */
|
||||
#define FPU_CTX_F18_OFF (REGBYTES * 18) /* offsetof(fpu_context_t, fpustatus.f[18]) - offsetof(fpu_context_t, fpustatus.f[0]) */
|
||||
#define FPU_CTX_F19_OFF (REGBYTES * 19) /* offsetof(fpu_context_t, fpustatus.f[19]) - offsetof(fpu_context_t, fpustatus.f[0]) */
|
||||
#define FPU_CTX_F20_OFF (REGBYTES * 20) /* offsetof(fpu_context_t, fpustatus.f[20]) - offsetof(fpu_context_t, fpustatus.f[0]) */
|
||||
#define FPU_CTX_F21_OFF (REGBYTES * 21) /* offsetof(fpu_context_t, fpustatus.f[21]) - offsetof(fpu_context_t, fpustatus.f[0]) */
|
||||
#define FPU_CTX_F22_OFF (REGBYTES * 22) /* offsetof(fpu_context_t, fpustatus.f[22]) - offsetof(fpu_context_t, fpustatus.f[0]) */
|
||||
#define FPU_CTX_F23_OFF (REGBYTES * 23) /* offsetof(fpu_context_t, fpustatus.f[23]) - offsetof(fpu_context_t, fpustatus.f[0]) */
|
||||
#define FPU_CTX_F24_OFF (REGBYTES * 24) /* offsetof(fpu_context_t, fpustatus.f[24]) - offsetof(fpu_context_t, fpustatus.f[0]) */
|
||||
#define FPU_CTX_F25_OFF (REGBYTES * 25) /* offsetof(fpu_context_t, fpustatus.f[25]) - offsetof(fpu_context_t, fpustatus.f[0]) */
|
||||
#define FPU_CTX_F26_OFF (REGBYTES * 26) /* offsetof(fpu_context_t, fpustatus.f[26]) - offsetof(fpu_context_t, fpustatus.f[0]) */
|
||||
#define FPU_CTX_F27_OFF (REGBYTES * 27) /* offsetof(fpu_context_t, fpustatus.f[27]) - offsetof(fpu_context_t, fpustatus.f[0]) */
|
||||
#define FPU_CTX_F28_OFF (REGBYTES * 28) /* offsetof(fpu_context_t, fpustatus.f[28]) - offsetof(fpu_context_t, fpustatus.f[0]) */
|
||||
#define FPU_CTX_F29_OFF (REGBYTES * 29) /* offsetof(fpu_context_t, fpustatus.f[29]) - offsetof(fpu_context_t, fpustatus.f[0]) */
|
||||
#define FPU_CTX_F30_OFF (REGBYTES * 30) /* offsetof(fpu_context_t, fpustatus.f[30]) - offsetof(fpu_context_t, fpustatus.f[0]) */
|
||||
#define FPU_CTX_F31_OFF (REGBYTES * 31) /* offsetof(fpu_context_t, fpustatus.f[31]) - offsetof(fpu_context_t, fpustatus.f[0]) */
|
||||
#endif /* ENABLE_FPU */
|
||||
|
||||
/**
|
||||
|
|
|
@ -15,7 +15,6 @@
|
|||
|
||||
#include <rthw.h>
|
||||
#include "stack.h"
|
||||
#include "regtypes.h"
|
||||
|
||||
enum
|
||||
{
|
||||
|
@ -42,6 +41,6 @@ int rt_hw_plic_irq_disable(int irq_number);
|
|||
void rt_hw_interrupt_init(void);
|
||||
void rt_hw_interrupt_mask(int vector);
|
||||
rt_isr_handler_t rt_hw_interrupt_install(int vector, rt_isr_handler_t handler, void *param, const char *name);
|
||||
void handle_trap(rt_uintreg_t xcause, rt_uintreg_t xtval, rt_uintreg_t xepc, struct rt_hw_stack_frame *sp);
|
||||
void handle_trap(rt_ubase_t xcause, rt_ubase_t xtval, rt_ubase_t xepc, struct rt_hw_stack_frame *sp);
|
||||
|
||||
#endif
|
||||
|
|
|
@ -61,28 +61,28 @@ void *rt_hw_mmu_tbl_get()
|
|||
static int _map_one_page(struct rt_aspace *aspace, void *va, void *pa,
|
||||
size_t attr)
|
||||
{
|
||||
rt_size_t l1_off, l2_off, l3_off;
|
||||
rt_size_t *mmu_l1, *mmu_l2, *mmu_l3;
|
||||
rt_ubase_t l1_off, l2_off, l3_off;
|
||||
rt_ubase_t *mmu_l1, *mmu_l2, *mmu_l3;
|
||||
|
||||
l1_off = GET_L1((size_t)va);
|
||||
l2_off = GET_L2((size_t)va);
|
||||
l3_off = GET_L3((size_t)va);
|
||||
|
||||
mmu_l1 = ((rt_size_t *)aspace->page_table) + l1_off;
|
||||
mmu_l1 = ((rt_ubase_t *)aspace->page_table) + l1_off;
|
||||
|
||||
if (PTE_USED(*mmu_l1))
|
||||
{
|
||||
mmu_l2 = (rt_size_t *)PPN_TO_VPN(GET_PADDR(*mmu_l1), PV_OFFSET);
|
||||
mmu_l2 = (rt_ubase_t *)PPN_TO_VPN(GET_PADDR(*mmu_l1), PV_OFFSET);
|
||||
}
|
||||
else
|
||||
{
|
||||
mmu_l2 = (rt_size_t *)rt_pages_alloc(0);
|
||||
mmu_l2 = (rt_ubase_t *)rt_pages_alloc(0);
|
||||
|
||||
if (mmu_l2)
|
||||
{
|
||||
rt_memset(mmu_l2, 0, PAGE_SIZE);
|
||||
rt_hw_cpu_dcache_clean(mmu_l2, PAGE_SIZE);
|
||||
*mmu_l1 = COMBINEPTE((rt_size_t)VPN_TO_PPN(mmu_l2, PV_OFFSET),
|
||||
*mmu_l1 = COMBINEPTE((rt_ubase_t)VPN_TO_PPN(mmu_l2, PV_OFFSET),
|
||||
PAGE_DEFAULT_ATTR_NEXT);
|
||||
rt_hw_cpu_dcache_clean(mmu_l1, sizeof(*mmu_l1));
|
||||
}
|
||||
|
@ -96,18 +96,18 @@ static int _map_one_page(struct rt_aspace *aspace, void *va, void *pa,
|
|||
{
|
||||
RT_ASSERT(!PAGE_IS_LEAF(*(mmu_l2 + l2_off)));
|
||||
mmu_l3 =
|
||||
(rt_size_t *)PPN_TO_VPN(GET_PADDR(*(mmu_l2 + l2_off)), PV_OFFSET);
|
||||
(rt_ubase_t *)PPN_TO_VPN(GET_PADDR(*(mmu_l2 + l2_off)), PV_OFFSET);
|
||||
}
|
||||
else
|
||||
{
|
||||
mmu_l3 = (rt_size_t *)rt_pages_alloc(0);
|
||||
mmu_l3 = (rt_ubase_t *)rt_pages_alloc(0);
|
||||
|
||||
if (mmu_l3)
|
||||
{
|
||||
rt_memset(mmu_l3, 0, PAGE_SIZE);
|
||||
rt_hw_cpu_dcache_clean(mmu_l3, PAGE_SIZE);
|
||||
*(mmu_l2 + l2_off) =
|
||||
COMBINEPTE((rt_size_t)VPN_TO_PPN(mmu_l3, PV_OFFSET),
|
||||
COMBINEPTE((rt_ubase_t)VPN_TO_PPN(mmu_l3, PV_OFFSET),
|
||||
PAGE_DEFAULT_ATTR_NEXT);
|
||||
rt_hw_cpu_dcache_clean(mmu_l2, sizeof(*mmu_l2));
|
||||
// declares a reference to parent page table
|
||||
|
@ -122,7 +122,7 @@ static int _map_one_page(struct rt_aspace *aspace, void *va, void *pa,
|
|||
RT_ASSERT(!PTE_USED(*(mmu_l3 + l3_off)));
|
||||
// declares a reference to parent page table
|
||||
rt_page_ref_inc((void *)mmu_l3, 0);
|
||||
*(mmu_l3 + l3_off) = COMBINEPTE((rt_size_t)pa, attr);
|
||||
*(mmu_l3 + l3_off) = COMBINEPTE((rt_ubase_t)pa, attr);
|
||||
rt_hw_cpu_dcache_clean(mmu_l3 + l3_off, sizeof(*(mmu_l3 + l3_off)));
|
||||
return 0;
|
||||
}
|
||||
|
@ -165,7 +165,7 @@ void *rt_hw_mmu_map(struct rt_aspace *aspace, void *v_addr, void *p_addr,
|
|||
return NULL;
|
||||
}
|
||||
|
||||
static void _unmap_pte(rt_size_t *pentry, rt_size_t *lvl_entry[], int level)
|
||||
static void _unmap_pte(rt_ubase_t *pentry, rt_ubase_t *lvl_entry[], int level)
|
||||
{
|
||||
int loop_flag = 1;
|
||||
while (loop_flag)
|
||||
|
@ -195,26 +195,26 @@ static void _unmap_pte(rt_size_t *pentry, rt_size_t *lvl_entry[], int level)
|
|||
|
||||
static size_t _unmap_area(struct rt_aspace *aspace, void *v_addr, size_t size)
|
||||
{
|
||||
rt_size_t loop_va = __UMASKVALUE((rt_size_t)v_addr, PAGE_OFFSET_MASK);
|
||||
rt_ubase_t loop_va = __UMASKVALUE((rt_ubase_t)v_addr, PAGE_OFFSET_MASK);
|
||||
size_t unmapped = 0;
|
||||
|
||||
int i = 0;
|
||||
rt_size_t lvl_off[3];
|
||||
rt_size_t *lvl_entry[3];
|
||||
lvl_off[0] = (rt_size_t)GET_L1(loop_va);
|
||||
lvl_off[1] = (rt_size_t)GET_L2(loop_va);
|
||||
lvl_off[2] = (rt_size_t)GET_L3(loop_va);
|
||||
rt_ubase_t lvl_off[3];
|
||||
rt_ubase_t *lvl_entry[3];
|
||||
lvl_off[0] = (rt_ubase_t)GET_L1(loop_va);
|
||||
lvl_off[1] = (rt_ubase_t)GET_L2(loop_va);
|
||||
lvl_off[2] = (rt_ubase_t)GET_L3(loop_va);
|
||||
unmapped = 1 << (ARCH_PAGE_SHIFT + ARCH_INDEX_WIDTH * 2ul);
|
||||
|
||||
rt_size_t *pentry;
|
||||
lvl_entry[i] = ((rt_size_t *)aspace->page_table + lvl_off[i]);
|
||||
rt_ubase_t *pentry;
|
||||
lvl_entry[i] = ((rt_ubase_t *)aspace->page_table + lvl_off[i]);
|
||||
pentry = lvl_entry[i];
|
||||
|
||||
// find leaf page table entry
|
||||
while (PTE_USED(*pentry) && !PAGE_IS_LEAF(*pentry))
|
||||
{
|
||||
i += 1;
|
||||
lvl_entry[i] = ((rt_size_t *)PPN_TO_VPN(GET_PADDR(*pentry), PV_OFFSET) +
|
||||
lvl_entry[i] = ((rt_ubase_t *)PPN_TO_VPN(GET_PADDR(*pentry), PV_OFFSET) +
|
||||
lvl_off[i]);
|
||||
pentry = lvl_entry[i];
|
||||
unmapped >>= ARCH_INDEX_WIDTH;
|
||||
|
@ -277,8 +277,8 @@ static inline void _init_region(void *vaddr, size_t size)
|
|||
#define KERN_SPACE_SIZE ((size_t)USER_VADDR_START - 0x1000)
|
||||
#endif
|
||||
|
||||
int rt_hw_mmu_map_init(rt_aspace_t aspace, void *v_address, rt_size_t size,
|
||||
rt_size_t *vtable, rt_size_t pv_off)
|
||||
int rt_hw_mmu_map_init(rt_aspace_t aspace, void *v_address, rt_ubase_t size,
|
||||
rt_ubase_t *vtable, rt_ubase_t pv_off)
|
||||
{
|
||||
size_t l1_off, va_s, va_e;
|
||||
rt_base_t level;
|
||||
|
@ -288,8 +288,8 @@ int rt_hw_mmu_map_init(rt_aspace_t aspace, void *v_address, rt_size_t size,
|
|||
return -1;
|
||||
}
|
||||
|
||||
va_s = (rt_size_t)v_address;
|
||||
va_e = ((rt_size_t)v_address) + size - 1;
|
||||
va_s = (rt_ubase_t)v_address;
|
||||
va_e = ((rt_ubase_t)v_address) + size - 1;
|
||||
|
||||
if (va_e < va_s)
|
||||
{
|
||||
|
@ -330,15 +330,15 @@ static inline uintptr_t _get_level_size(int level)
|
|||
return 1ul << (ARCH_PAGE_SHIFT + (max_level - level) * ARCH_INDEX_WIDTH);
|
||||
}
|
||||
|
||||
static rt_size_t *_query(struct rt_aspace *aspace, void *vaddr, int *level)
|
||||
static rt_ubase_t *_query(struct rt_aspace *aspace, void *vaddr, int *level)
|
||||
{
|
||||
rt_size_t l1_off, l2_off, l3_off;
|
||||
rt_size_t *mmu_l1, *mmu_l2, *mmu_l3;
|
||||
rt_size_t pa;
|
||||
rt_ubase_t l1_off, l2_off, l3_off;
|
||||
rt_ubase_t *mmu_l1, *mmu_l2, *mmu_l3;
|
||||
rt_ubase_t pa;
|
||||
|
||||
l1_off = GET_L1((rt_size_t)vaddr);
|
||||
l2_off = GET_L2((rt_size_t)vaddr);
|
||||
l3_off = GET_L3((rt_size_t)vaddr);
|
||||
l1_off = GET_L1((rt_uintptr_t)vaddr);
|
||||
l2_off = GET_L2((rt_uintptr_t)vaddr);
|
||||
l3_off = GET_L3((rt_uintptr_t)vaddr);
|
||||
|
||||
if (!aspace)
|
||||
{
|
||||
|
@ -346,7 +346,7 @@ static rt_size_t *_query(struct rt_aspace *aspace, void *vaddr, int *level)
|
|||
return RT_NULL;
|
||||
}
|
||||
|
||||
mmu_l1 = ((rt_size_t *)aspace->page_table) + l1_off;
|
||||
mmu_l1 = ((rt_ubase_t *)aspace->page_table) + l1_off;
|
||||
|
||||
if (PTE_USED(*mmu_l1))
|
||||
{
|
||||
|
@ -356,7 +356,7 @@ static rt_size_t *_query(struct rt_aspace *aspace, void *vaddr, int *level)
|
|||
return mmu_l1;
|
||||
}
|
||||
|
||||
mmu_l2 = (rt_size_t *)PPN_TO_VPN(GET_PADDR(*mmu_l1), PV_OFFSET);
|
||||
mmu_l2 = (rt_ubase_t *)PPN_TO_VPN(GET_PADDR(*mmu_l1), PV_OFFSET);
|
||||
|
||||
if (PTE_USED(*(mmu_l2 + l2_off)))
|
||||
{
|
||||
|
@ -366,7 +366,7 @@ static rt_size_t *_query(struct rt_aspace *aspace, void *vaddr, int *level)
|
|||
return mmu_l2 + l2_off;
|
||||
}
|
||||
|
||||
mmu_l3 = (rt_size_t *)PPN_TO_VPN(GET_PADDR(*(mmu_l2 + l2_off)),
|
||||
mmu_l3 = (rt_ubase_t *)PPN_TO_VPN(GET_PADDR(*(mmu_l2 + l2_off)),
|
||||
PV_OFFSET);
|
||||
|
||||
if (PTE_USED(*(mmu_l3 + l3_off)))
|
||||
|
@ -383,7 +383,7 @@ static rt_size_t *_query(struct rt_aspace *aspace, void *vaddr, int *level)
|
|||
void *rt_hw_mmu_v2p(struct rt_aspace *aspace, void *vaddr)
|
||||
{
|
||||
int level;
|
||||
uintptr_t *pte = _query(aspace, vaddr, &level);
|
||||
rt_ubase_t *pte = _query(aspace, vaddr, &level);
|
||||
uintptr_t paddr;
|
||||
|
||||
if (pte)
|
||||
|
@ -398,17 +398,17 @@ void *rt_hw_mmu_v2p(struct rt_aspace *aspace, void *vaddr)
|
|||
return (void *)paddr;
|
||||
}
|
||||
|
||||
static int _noncache(uintptr_t *pte)
|
||||
static int _noncache(rt_base_t *pte)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int _cache(uintptr_t *pte)
|
||||
static int _cache(rt_base_t *pte)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int (*control_handler[MMU_CNTL_DUMMY_END])(uintptr_t *pte) = {
|
||||
static int (*control_handler[MMU_CNTL_DUMMY_END])(rt_base_t *pte) = {
|
||||
[MMU_CNTL_CACHE] = _cache,
|
||||
[MMU_CNTL_NONCACHE] = _noncache,
|
||||
};
|
||||
|
@ -420,14 +420,14 @@ int rt_hw_mmu_control(struct rt_aspace *aspace, void *vaddr, size_t size,
|
|||
int err = -RT_EINVAL;
|
||||
void *vend = vaddr + size;
|
||||
|
||||
int (*handler)(uintptr_t * pte);
|
||||
int (*handler)(rt_base_t *pte);
|
||||
if (cmd >= 0 && cmd < MMU_CNTL_DUMMY_END)
|
||||
{
|
||||
handler = control_handler[cmd];
|
||||
|
||||
while (vaddr < vend)
|
||||
{
|
||||
uintptr_t *pte = _query(aspace, vaddr, &level);
|
||||
rt_base_t *pte = _query(aspace, vaddr, &level);
|
||||
void *range_end = vaddr + _get_level_size(level);
|
||||
RT_ASSERT(range_end <= vend);
|
||||
|
||||
|
@ -487,7 +487,7 @@ void rt_hw_mmu_setup(rt_aspace_t aspace, struct mem_desc *mdesc, int desc_nr)
|
|||
mdesc->vaddr_start + 1,
|
||||
.prefer = (void *)mdesc->vaddr_start};
|
||||
|
||||
if (mdesc->paddr_start == (rt_size_t)ARCH_MAP_FAILED)
|
||||
if (mdesc->paddr_start == (rt_uintptr_t)ARCH_MAP_FAILED)
|
||||
mdesc->paddr_start = mdesc->vaddr_start + PV_OFFSET;
|
||||
|
||||
rt_aspace_map_phy_static(aspace, &mdesc->varea, &hint, attr,
|
||||
|
@ -499,13 +499,13 @@ void rt_hw_mmu_setup(rt_aspace_t aspace, struct mem_desc *mdesc, int desc_nr)
|
|||
rt_page_cleanup();
|
||||
}
|
||||
|
||||
#define SATP_BASE ((size_t)SATP_MODE << SATP_MODE_OFFSET)
|
||||
#define SATP_BASE ((rt_ubase_t)SATP_MODE << SATP_MODE_OFFSET)
|
||||
void rt_hw_mem_setup_early(void)
|
||||
{
|
||||
rt_size_t pv_off;
|
||||
rt_size_t ps = 0x0;
|
||||
rt_size_t vs = 0x0;
|
||||
rt_size_t *early_pgtbl = (size_t *)(((size_t)&__bss_end + 4095) & ~0xfff);
|
||||
rt_ubase_t pv_off;
|
||||
rt_ubase_t ps = 0x0;
|
||||
rt_ubase_t vs = 0x0;
|
||||
rt_ubase_t *early_pgtbl = (rt_ubase_t *)(((size_t)&__bss_end + 4095) & ~0xfff);
|
||||
|
||||
/* calculate pv_offset */
|
||||
void *symb_pc;
|
||||
|
@ -539,8 +539,8 @@ void rt_hw_mem_setup_early(void)
|
|||
vs = ps - pv_off;
|
||||
|
||||
/* relocate region */
|
||||
rt_size_t vs_idx = GET_L1(vs);
|
||||
rt_size_t ve_idx = GET_L1(vs + 0x80000000);
|
||||
rt_ubase_t vs_idx = GET_L1(vs);
|
||||
rt_ubase_t ve_idx = GET_L1(vs + 0x80000000);
|
||||
for (size_t i = vs_idx; i < ve_idx; i++)
|
||||
{
|
||||
early_pgtbl[i] = COMBINEPTE(ps, PAGE_ATTR_RWX | PTE_G | PTE_V);
|
||||
|
@ -557,7 +557,7 @@ void rt_hw_mem_setup_early(void)
|
|||
|
||||
void *rt_hw_mmu_pgtbl_create(void)
|
||||
{
|
||||
size_t *mmu_table;
|
||||
rt_ubase_t *mmu_table;
|
||||
mmu_table = (rt_ubase_t *)rt_pages_alloc_ext(0, PAGE_ANY_AVAILABLE);
|
||||
if (!mmu_table)
|
||||
{
|
||||
|
|
|
@ -30,7 +30,7 @@ struct mem_desc
|
|||
{
|
||||
rt_size_t vaddr_start;
|
||||
rt_size_t vaddr_end;
|
||||
rt_size_t paddr_start;
|
||||
rt_ubase_t paddr_start;
|
||||
rt_size_t attr;
|
||||
struct rt_varea varea;
|
||||
};
|
||||
|
@ -43,8 +43,8 @@ struct mem_desc
|
|||
#define GET_PPN(pte) \
|
||||
(__PARTBIT(pte, PTE_PPN_SHIFT, PHYSICAL_ADDRESS_WIDTH_BITS - PTE_PPN_SHIFT))
|
||||
#define GET_PADDR(pte) (GET_PPN(pte) << PAGE_OFFSET_BIT)
|
||||
#define VPN_TO_PPN(vaddr, pv_off) (((rt_size_t)(vaddr)) + (pv_off))
|
||||
#define PPN_TO_VPN(paddr, pv_off) (((rt_size_t)(paddr)) - (pv_off))
|
||||
#define VPN_TO_PPN(vaddr, pv_off) (((rt_uintptr_t)(vaddr)) + (pv_off))
|
||||
#define PPN_TO_VPN(paddr, pv_off) (((rt_uintptr_t)(paddr)) - (pv_off))
|
||||
#define COMBINEVADDR(l1_off, l2_off, l3_off) \
|
||||
(((l1_off) << VPN2_SHIFT) | ((l2_off) << VPN1_SHIFT) | \
|
||||
((l3_off) << VPN0_SHIFT))
|
||||
|
@ -57,11 +57,11 @@ struct mem_desc
|
|||
#define MMU_MAP_ERROR_CONFLICT -4
|
||||
|
||||
void *rt_hw_mmu_tbl_get(void);
|
||||
int rt_hw_mmu_map_init(rt_aspace_t aspace, void *v_address, rt_size_t size,
|
||||
rt_size_t *vtable, rt_size_t pv_off);
|
||||
int rt_hw_mmu_map_init(rt_aspace_t aspace, void *v_address, rt_ubase_t size,
|
||||
rt_ubase_t *vtable, rt_ubase_t pv_off);
|
||||
void rt_hw_mmu_setup(rt_aspace_t aspace, struct mem_desc *mdesc, int desc_nr);
|
||||
void rt_hw_mmu_kernel_map_init(rt_aspace_t aspace, rt_size_t vaddr_start,
|
||||
rt_size_t size);
|
||||
void rt_hw_mmu_kernel_map_init(rt_aspace_t aspace, rt_ubase_t vaddr_start,
|
||||
rt_ubase_t size);
|
||||
void *rt_hw_mmu_map(rt_aspace_t aspace, void *v_addr, void *p_addr, size_t size,
|
||||
size_t attr);
|
||||
void rt_hw_mmu_unmap(rt_aspace_t aspace, void *v_addr, size_t size);
|
||||
|
|
|
@ -114,7 +114,7 @@ void plic_complete(int irq)
|
|||
|
||||
void plic_set_ie(rt_uint32_t word_index, rt_uint32_t val)
|
||||
{
|
||||
volatile void *plic_ie = (void *)(rt_size_t)(plic_base + PLIC_ENABLE_BASE + 0x80 + word_index * 4);
|
||||
volatile void *plic_ie = (void *)(rt_ubase_t)(plic_base + PLIC_ENABLE_BASE + 0x80 + word_index * 4);
|
||||
writel(val, plic_ie);
|
||||
}
|
||||
|
||||
|
|
|
@ -1,19 +0,0 @@
|
|||
/*
|
||||
* Copyright (c) 2006-2024, RT-Thread Development Team
|
||||
*
|
||||
* SPDX-License-Identifier: Apache-2.0
|
||||
*
|
||||
* Change Logs:
|
||||
* Date Author Notes
|
||||
* 2024-07-15 WangShun The first version
|
||||
*/
|
||||
|
||||
#ifndef REGTYPES_H__
|
||||
#define REGTYPES_H__
|
||||
#include <rtconfig.h>
|
||||
#if defined(RT_USING_RV64ILP32)
|
||||
typedef unsigned long long rt_uintreg_t;
|
||||
#else
|
||||
typedef unsigned long rt_uintreg_t;
|
||||
#endif
|
||||
#endif /* REGTYPES_H__ */
|
|
@ -6,26 +6,23 @@
|
|||
* Change Logs:
|
||||
* Date Author Notes
|
||||
* 2021-01-30 lizhirui first version
|
||||
* 2024-08-28 RT-Thread Fit into rv64ilp32 ABI
|
||||
*/
|
||||
|
||||
#ifndef __RISCV_H__
|
||||
#define __RISCV_H__
|
||||
|
||||
#include <encoding.h>
|
||||
#include <rtconfig.h>
|
||||
|
||||
#if defined(RT_USING_RV64ILP32)
|
||||
/* using unsigned long long for the case of rv64ilp32 */
|
||||
#define __SIZE(bit) (1ULL << (bit))
|
||||
#define __MASK(bit) (__SIZE(bit) - 1ULL)
|
||||
#else
|
||||
#define __SIZE(bit) (1UL << (bit))
|
||||
#define __MASK(bit) (__SIZE(bit) - 1UL)
|
||||
#endif /* RT_USING_RV64ILP32 */
|
||||
|
||||
#define __UMASK(bit) (~(__MASK(bit)))
|
||||
#define __MASKVALUE(value,maskvalue) ((value) & (maskvalue))
|
||||
#define __UMASKVALUE(value,maskvalue) ((value) & (~(maskvalue)))
|
||||
#define __CHECKUPBOUND(value,bit_count) (!(((rt_size_t)value) & (~__MASK(bit_count))))
|
||||
#define __CHECKALIGN(value,start_bit) (!(((rt_size_t)value) & (__MASK(start_bit))))
|
||||
#define __CHECKUPBOUND(value,bit_count) (!(((rt_ubase_t)value) & (~__MASK(bit_count))))
|
||||
#define __CHECKALIGN(value,start_bit) (!(((rt_ubase_t)value) & (__MASK(start_bit))))
|
||||
|
||||
#define __PARTBIT(value,start_bit,length) (((value) >> (start_bit)) & __MASK(length))
|
||||
|
||||
|
|
|
@ -88,7 +88,7 @@
|
|||
#define ARCH_INDEX_SIZE (1ul << ARCH_INDEX_WIDTH)
|
||||
#define ARCH_INDEX_MASK (ARCH_INDEX_SIZE - 1)
|
||||
|
||||
#define ARCH_MAP_FAILED ((void *)0x8000000000000000)
|
||||
#define ARCH_MAP_FAILED ((void *)-1)
|
||||
|
||||
void mmu_set_pagetable(rt_ubase_t addr);
|
||||
void mmu_enable_user_page_access(void);
|
||||
|
|
|
@ -26,7 +26,7 @@
|
|||
#include "riscv_mmu.h"
|
||||
#include "stack.h"
|
||||
|
||||
typedef rt_size_t (*syscallfunc_t)(rt_size_t, rt_size_t, rt_size_t, rt_size_t, rt_size_t, rt_size_t, rt_size_t);
|
||||
typedef rt_ubase_t (*syscallfunc_t)(rt_ubase_t, rt_ubase_t, rt_ubase_t, rt_ubase_t, rt_ubase_t, rt_ubase_t, rt_ubase_t);
|
||||
|
||||
void syscall_handler(struct rt_hw_stack_frame *regs)
|
||||
{
|
||||
|
|
|
@ -61,7 +61,7 @@ void dump_regs(struct rt_hw_stack_frame *regs)
|
|||
rt_kprintf("\t%s\n", (regs->sstatus & SSTATUS_SPP) ? "Last Privilege is Supervisor Mode" : "Last Privilege is User Mode");
|
||||
rt_kprintf("\t%s\n", (regs->sstatus & SSTATUS_SUM) ? "Permit to Access User Page" : "Not Permit to Access User Page");
|
||||
rt_kprintf("\t%s\n", (regs->sstatus & (1 << 19)) ? "Permit to Read Executable-only Page" : "Not Permit to Read Executable-only Page");
|
||||
rt_uintreg_t satp_v = read_csr(satp);
|
||||
rt_ubase_t satp_v = read_csr(satp);
|
||||
rt_kprintf("satp = 0x%p\n", satp_v);
|
||||
rt_kprintf("\tCurrent Page Table(Physical) = 0x%p\n", __MASKVALUE(satp_v, __MASK(44)) << PAGE_OFFSET_BIT);
|
||||
rt_kprintf("\tCurrent ASID = 0x%p\n", __MASKVALUE(satp_v >> 44, __MASK(16)) << PAGE_OFFSET_BIT);
|
||||
|
@ -150,9 +150,9 @@ static const char *get_exception_msg(int id)
|
|||
|
||||
#ifdef RT_USING_SMART
|
||||
#include "lwp.h"
|
||||
void handle_user(rt_size_t scause, rt_size_t stval, rt_size_t sepc, struct rt_hw_stack_frame *sp)
|
||||
void handle_user(rt_ubase_t scause, rt_ubase_t stval, rt_ubase_t sepc, struct rt_hw_stack_frame *sp)
|
||||
{
|
||||
rt_size_t id = __MASKVALUE(scause, __MASK(63UL));
|
||||
rt_ubase_t id = __MASKVALUE(scause, __MASK(63UL));
|
||||
struct rt_lwp *lwp;
|
||||
|
||||
/* user page fault */
|
||||
|
@ -275,9 +275,9 @@ static int illegal_inst_recoverable(rt_ubase_t stval, struct rt_hw_stack_frame *
|
|||
#endif
|
||||
|
||||
static void handle_nested_trap_panic(
|
||||
rt_size_t cause,
|
||||
rt_size_t tval,
|
||||
rt_size_t epc,
|
||||
rt_ubase_t cause,
|
||||
rt_ubase_t tval,
|
||||
rt_ubase_t epc,
|
||||
struct rt_hw_stack_frame *eframe)
|
||||
{
|
||||
LOG_E("\n-------- [SEVER ERROR] --------");
|
||||
|
@ -291,10 +291,10 @@ static void handle_nested_trap_panic(
|
|||
#define PAGE_FAULT (id == EP_LOAD_PAGE_FAULT || id == EP_STORE_PAGE_FAULT)
|
||||
|
||||
/* Trap entry */
|
||||
void handle_trap(rt_uintreg_t scause, rt_uintreg_t stval, rt_uintreg_t sepc, struct rt_hw_stack_frame *sp)
|
||||
void handle_trap(rt_ubase_t scause, rt_ubase_t stval, rt_ubase_t sepc, struct rt_hw_stack_frame *sp)
|
||||
{
|
||||
ENTER_TRAP;
|
||||
rt_uintreg_t id = __MASKVALUE(scause, __MASK(63UL));
|
||||
rt_ubase_t id = __MASKVALUE(scause, __MASK(63UL));
|
||||
const char *msg;
|
||||
|
||||
/* supervisor external interrupt */
|
||||
|
@ -316,7 +316,7 @@ void handle_trap(rt_uintreg_t scause, rt_uintreg_t stval, rt_uintreg_t sepc, str
|
|||
{
|
||||
// trap cannot nested when handling another trap / interrupt
|
||||
CHECK_NESTED_PANIC(scause, stval, sepc, sp);
|
||||
rt_uintreg_t id = __MASKVALUE(scause, __MASK(63UL));
|
||||
rt_ubase_t id = __MASKVALUE(scause, __MASK(63UL));
|
||||
const char *msg;
|
||||
|
||||
if (scause >> 63)
|
||||
|
|
|
@ -769,7 +769,7 @@ rt_err_t rt_sem_control(rt_sem_t sem, int cmd, void *arg)
|
|||
rt_ubase_t value;
|
||||
|
||||
/* get value */
|
||||
value = (rt_ubase_t)arg;
|
||||
value = (rt_uintptr_t)arg;
|
||||
level = rt_spin_lock_irqsave(&(sem->spinlock));
|
||||
|
||||
/* resume all waiting thread */
|
||||
|
@ -787,7 +787,7 @@ rt_err_t rt_sem_control(rt_sem_t sem, int cmd, void *arg)
|
|||
rt_ubase_t max_value;
|
||||
rt_bool_t need_schedule = RT_FALSE;
|
||||
|
||||
max_value = (rt_uint16_t)((rt_ubase_t)arg);
|
||||
max_value = (rt_uint16_t)((rt_uintptr_t)arg);
|
||||
if (max_value > RT_SEM_VALUE_MAX || max_value < 1)
|
||||
{
|
||||
return -RT_EINVAL;
|
||||
|
|
|
@ -93,8 +93,8 @@ rt_weak void rt_hw_cpu_shutdown(void)
|
|||
|
||||
#ifdef __GNUC__
|
||||
#define RT_HW_BACKTRACE_FRAME_GET_SELF(frame) do { \
|
||||
(frame)->fp = (rt_base_t)__builtin_frame_address(0U); \
|
||||
(frame)->pc = ({__label__ pc; pc: (rt_base_t)&&pc;}); \
|
||||
(frame)->fp = (rt_uintptr_t)__builtin_frame_address(0U); \
|
||||
(frame)->pc = ({__label__ pc; pc: (rt_uintptr_t)&&pc;}); \
|
||||
} while (0)
|
||||
|
||||
#else
|
||||
|
@ -545,7 +545,7 @@ rt_err_t rt_backtrace_thread(rt_thread_t thread)
|
|||
|
||||
static void cmd_backtrace(int argc, char** argv)
|
||||
{
|
||||
rt_ubase_t pid;
|
||||
rt_uintptr_t pid;
|
||||
char *end_ptr;
|
||||
|
||||
if (argc != 2)
|
||||
|
@ -778,8 +778,8 @@ rt_inline void _slab_info(rt_size_t *total,
|
|||
*/
|
||||
void rt_system_heap_init_generic(void *begin_addr, void *end_addr)
|
||||
{
|
||||
rt_ubase_t begin_align = RT_ALIGN((rt_ubase_t)begin_addr, RT_ALIGN_SIZE);
|
||||
rt_ubase_t end_align = RT_ALIGN_DOWN((rt_ubase_t)end_addr, RT_ALIGN_SIZE);
|
||||
rt_uintptr_t begin_align = RT_ALIGN((rt_uintptr_t)begin_addr, RT_ALIGN_SIZE);
|
||||
rt_uintptr_t end_align = RT_ALIGN_DOWN((rt_uintptr_t)end_addr, RT_ALIGN_SIZE);
|
||||
|
||||
RT_ASSERT(end_align > begin_align);
|
||||
|
||||
|
@ -988,17 +988,17 @@ rt_weak void *rt_malloc_align(rt_size_t size, rt_size_t align)
|
|||
if (ptr != RT_NULL)
|
||||
{
|
||||
/* the allocated memory block is aligned */
|
||||
if (((rt_ubase_t)ptr & (align - 1)) == 0)
|
||||
if (((rt_uintptr_t)ptr & (align - 1)) == 0)
|
||||
{
|
||||
align_ptr = (void *)((rt_ubase_t)ptr + align);
|
||||
align_ptr = (void *)((rt_uintptr_t)ptr + align);
|
||||
}
|
||||
else
|
||||
{
|
||||
align_ptr = (void *)(((rt_ubase_t)ptr + (align - 1)) & ~(align - 1));
|
||||
align_ptr = (void *)(((rt_uintptr_t)ptr + (align - 1)) & ~(align - 1));
|
||||
}
|
||||
|
||||
/* set the pointer before alignment pointer to the real pointer */
|
||||
*((rt_ubase_t *)((rt_ubase_t)align_ptr - sizeof(void *))) = (rt_ubase_t)ptr;
|
||||
*((rt_uintptr_t *)((rt_uintptr_t)align_ptr - sizeof(void *))) = (rt_uintptr_t)ptr;
|
||||
|
||||
ptr = align_ptr;
|
||||
}
|
||||
|
@ -1019,7 +1019,7 @@ rt_weak void rt_free_align(void *ptr)
|
|||
|
||||
/* NULL check */
|
||||
if (ptr == RT_NULL) return;
|
||||
real_ptr = (void *) * (rt_ubase_t *)((rt_ubase_t)ptr - sizeof(void *));
|
||||
real_ptr = (void *) * (rt_uintptr_t *)((rt_uintptr_t)ptr - sizeof(void *));
|
||||
rt_free(real_ptr);
|
||||
}
|
||||
RTM_EXPORT(rt_free_align);
|
||||
|
|
50
src/mem.c
50
src/mem.c
|
@ -58,7 +58,7 @@
|
|||
|
||||
struct rt_small_mem_item
|
||||
{
|
||||
rt_ubase_t pool_ptr; /**< small memory object addr */
|
||||
rt_uintptr_t pool_ptr; /**< small memory object addr */
|
||||
rt_size_t next; /**< next free item */
|
||||
rt_size_t prev; /**< prev free item */
|
||||
#ifdef RT_USING_MEMTRACE
|
||||
|
@ -82,19 +82,19 @@ struct rt_small_mem
|
|||
rt_size_t mem_size_aligned; /**< aligned memory size */
|
||||
};
|
||||
|
||||
#define MIN_SIZE (sizeof(rt_ubase_t) + sizeof(rt_size_t) + sizeof(rt_size_t))
|
||||
#define MIN_SIZE (sizeof(rt_uintptr_t) + sizeof(rt_size_t) + sizeof(rt_size_t))
|
||||
|
||||
#define MEM_MASK ((~(rt_size_t)0) - 1)
|
||||
|
||||
#define MEM_USED(_mem) ((((rt_base_t)(_mem)) & MEM_MASK) | 0x1)
|
||||
#define MEM_FREED(_mem) ((((rt_base_t)(_mem)) & MEM_MASK) | 0x0)
|
||||
#define MEM_USED(_mem) ((((rt_uintptr_t)(_mem)) & MEM_MASK) | 0x1)
|
||||
#define MEM_FREED(_mem) ((((rt_uintptr_t)(_mem)) & MEM_MASK) | 0x0)
|
||||
#define MEM_ISUSED(_mem) \
|
||||
(((rt_base_t)(((struct rt_small_mem_item *)(_mem))->pool_ptr)) & (~MEM_MASK))
|
||||
(((rt_uintptr_t)(((struct rt_small_mem_item *)(_mem))->pool_ptr)) & (~MEM_MASK))
|
||||
#define MEM_POOL(_mem) \
|
||||
((struct rt_small_mem *)(((rt_base_t)(((struct rt_small_mem_item *)(_mem))->pool_ptr)) & (MEM_MASK)))
|
||||
((struct rt_small_mem *)(((rt_uintptr_t)(((struct rt_small_mem_item *)(_mem))->pool_ptr)) & (MEM_MASK)))
|
||||
#define MEM_SIZE(_heap, _mem) \
|
||||
(((struct rt_small_mem_item *)(_mem))->next - ((rt_ubase_t)(_mem) - \
|
||||
(rt_ubase_t)((_heap)->heap_ptr)) - RT_ALIGN(sizeof(struct rt_small_mem_item), RT_ALIGN_SIZE))
|
||||
(((struct rt_small_mem_item *)(_mem))->next - ((rt_uintptr_t)(_mem) - \
|
||||
(rt_uintptr_t)((_heap)->heap_ptr)) - RT_ALIGN(sizeof(struct rt_small_mem_item), RT_ALIGN_SIZE))
|
||||
|
||||
#define MIN_SIZE_ALIGNED RT_ALIGN(MIN_SIZE, RT_ALIGN_SIZE)
|
||||
#define SIZEOF_STRUCT_MEM RT_ALIGN(sizeof(struct rt_small_mem_item), RT_ALIGN_SIZE)
|
||||
|
@ -173,12 +173,12 @@ rt_smem_t rt_smem_init(const char *name,
|
|||
{
|
||||
struct rt_small_mem_item *mem;
|
||||
struct rt_small_mem *small_mem;
|
||||
rt_ubase_t start_addr, begin_align, end_align, mem_size;
|
||||
rt_uintptr_t start_addr, begin_align, end_align, mem_size;
|
||||
|
||||
small_mem = (struct rt_small_mem *)RT_ALIGN((rt_ubase_t)begin_addr, RT_ALIGN_SIZE);
|
||||
start_addr = (rt_ubase_t)small_mem + sizeof(*small_mem);
|
||||
begin_align = RT_ALIGN((rt_ubase_t)start_addr, RT_ALIGN_SIZE);
|
||||
end_align = RT_ALIGN_DOWN((rt_ubase_t)begin_addr + size, RT_ALIGN_SIZE);
|
||||
small_mem = (struct rt_small_mem *)RT_ALIGN((rt_uintptr_t)begin_addr, RT_ALIGN_SIZE);
|
||||
start_addr = (rt_uintptr_t)small_mem + sizeof(*small_mem);
|
||||
begin_align = RT_ALIGN((rt_uintptr_t)start_addr, RT_ALIGN_SIZE);
|
||||
end_align = RT_ALIGN_DOWN((rt_uintptr_t)begin_addr + size, RT_ALIGN_SIZE);
|
||||
|
||||
/* alignment addr */
|
||||
if ((end_align > (2 * SIZEOF_STRUCT_MEM)) &&
|
||||
|
@ -190,7 +190,7 @@ rt_smem_t rt_smem_init(const char *name,
|
|||
else
|
||||
{
|
||||
rt_kprintf("mem init, error begin address 0x%x, and end address 0x%x\n",
|
||||
(rt_ubase_t)begin_addr, (rt_ubase_t)begin_addr + size);
|
||||
(rt_uintptr_t)begin_addr, (rt_uintptr_t)begin_addr + size);
|
||||
|
||||
return RT_NULL;
|
||||
}
|
||||
|
@ -207,7 +207,7 @@ rt_smem_t rt_smem_init(const char *name,
|
|||
small_mem->heap_ptr = (rt_uint8_t *)begin_align;
|
||||
|
||||
LOG_D("mem init, heap begin address 0x%x, size %d",
|
||||
(rt_ubase_t)small_mem->heap_ptr, small_mem->mem_size_aligned);
|
||||
(rt_uintptr_t)small_mem->heap_ptr, small_mem->mem_size_aligned);
|
||||
|
||||
/* initialize the start of the heap */
|
||||
mem = (struct rt_small_mem_item *)small_mem->heap_ptr;
|
||||
|
@ -372,13 +372,13 @@ void *rt_smem_alloc(rt_smem_t m, rt_size_t size)
|
|||
|
||||
RT_ASSERT(((small_mem->lfree == small_mem->heap_end) || (!MEM_ISUSED(small_mem->lfree))));
|
||||
}
|
||||
RT_ASSERT((rt_ubase_t)mem + SIZEOF_STRUCT_MEM + size <= (rt_ubase_t)small_mem->heap_end);
|
||||
RT_ASSERT((rt_ubase_t)((rt_uint8_t *)mem + SIZEOF_STRUCT_MEM) % RT_ALIGN_SIZE == 0);
|
||||
RT_ASSERT((((rt_ubase_t)mem) & (RT_ALIGN_SIZE - 1)) == 0);
|
||||
RT_ASSERT((rt_uintptr_t)mem + SIZEOF_STRUCT_MEM + size <= (rt_uintptr_t)small_mem->heap_end);
|
||||
RT_ASSERT((rt_uintptr_t)((rt_uint8_t *)mem + SIZEOF_STRUCT_MEM) % RT_ALIGN_SIZE == 0);
|
||||
RT_ASSERT((((rt_uintptr_t)mem) & (RT_ALIGN_SIZE - 1)) == 0);
|
||||
|
||||
LOG_D("allocate memory at 0x%x, size: %d",
|
||||
(rt_ubase_t)((rt_uint8_t *)mem + SIZEOF_STRUCT_MEM),
|
||||
(rt_ubase_t)(mem->next - ((rt_uint8_t *)mem - small_mem->heap_ptr)));
|
||||
(rt_uintptr_t)((rt_uint8_t *)mem + SIZEOF_STRUCT_MEM),
|
||||
(rt_uintptr_t)(mem->next - ((rt_uint8_t *)mem - small_mem->heap_ptr)));
|
||||
|
||||
/* return the memory data except mem struct */
|
||||
return (rt_uint8_t *)mem + SIZEOF_STRUCT_MEM;
|
||||
|
@ -431,7 +431,7 @@ void *rt_smem_realloc(rt_smem_t m, void *rmem, rt_size_t newsize)
|
|||
if (rmem == RT_NULL)
|
||||
return rt_smem_alloc(&small_mem->parent, newsize);
|
||||
|
||||
RT_ASSERT((((rt_ubase_t)rmem) & (RT_ALIGN_SIZE - 1)) == 0);
|
||||
RT_ASSERT((((rt_uintptr_t)rmem) & (RT_ALIGN_SIZE - 1)) == 0);
|
||||
RT_ASSERT((rt_uint8_t *)rmem >= (rt_uint8_t *)small_mem->heap_ptr);
|
||||
RT_ASSERT((rt_uint8_t *)rmem < (rt_uint8_t *)small_mem->heap_end);
|
||||
|
||||
|
@ -502,7 +502,7 @@ void rt_smem_free(void *rmem)
|
|||
if (rmem == RT_NULL)
|
||||
return;
|
||||
|
||||
RT_ASSERT((((rt_ubase_t)rmem) & (RT_ALIGN_SIZE - 1)) == 0);
|
||||
RT_ASSERT((((rt_uintptr_t)rmem) & (RT_ALIGN_SIZE - 1)) == 0);
|
||||
|
||||
/* Get the corresponding struct rt_small_mem_item ... */
|
||||
mem = (struct rt_small_mem_item *)((rt_uint8_t *)rmem - SIZEOF_STRUCT_MEM);
|
||||
|
@ -517,8 +517,8 @@ void rt_smem_free(void *rmem)
|
|||
RT_ASSERT(MEM_POOL(&small_mem->heap_ptr[mem->next]) == small_mem);
|
||||
|
||||
LOG_D("release memory 0x%x, size: %d",
|
||||
(rt_ubase_t)rmem,
|
||||
(rt_ubase_t)(mem->next - ((rt_uint8_t *)mem - small_mem->heap_ptr)));
|
||||
(rt_uintptr_t)rmem,
|
||||
(rt_uintptr_t)(mem->next - ((rt_uint8_t *)mem - small_mem->heap_ptr)));
|
||||
|
||||
/* ... and is now unused. */
|
||||
mem->pool_ptr = MEM_FREED(small_mem);
|
||||
|
@ -578,7 +578,7 @@ static int memcheck(int argc, char *argv[])
|
|||
/* check mem */
|
||||
for (mem = (struct rt_small_mem_item *)m->heap_ptr; mem != m->heap_end; mem = (struct rt_small_mem_item *)&m->heap_ptr[mem->next])
|
||||
{
|
||||
position = (rt_ubase_t)mem - (rt_ubase_t)m->heap_ptr;
|
||||
position = (rt_uintptr_t)mem - (rt_uintptr_t)m->heap_ptr;
|
||||
if (position < 0) goto __exit;
|
||||
if (position > (int)m->mem_size_aligned) goto __exit;
|
||||
if (MEM_POOL(mem) != m) goto __exit;
|
||||
|
|
|
@ -40,7 +40,7 @@
|
|||
#define RT_MEMHEAP_MINIALLOC RT_ALIGN(12, RT_ALIGN_SIZE)
|
||||
|
||||
#define RT_MEMHEAP_SIZE RT_ALIGN(sizeof(struct rt_memheap_item), RT_ALIGN_SIZE)
|
||||
#define MEMITEM_SIZE(item) ((rt_ubase_t)item->next - (rt_ubase_t)item - RT_MEMHEAP_SIZE)
|
||||
#define MEMITEM_SIZE(item) ((rt_uintptr_t)item->next - (rt_uintptr_t)item - RT_MEMHEAP_SIZE)
|
||||
#define MEMITEM(ptr) (struct rt_memheap_item*)((rt_uint8_t*)ptr - RT_MEMHEAP_SIZE)
|
||||
|
||||
static void _remove_next_ptr(volatile struct rt_memheap_item *next_ptr)
|
||||
|
@ -899,10 +899,10 @@ static int memheapcheck(int argc, char *argv[])
|
|||
break;
|
||||
}
|
||||
/* check next and prev */
|
||||
if (!((rt_ubase_t)item->next <= (rt_ubase_t)((rt_ubase_t)heap->start_addr + heap->pool_size) &&
|
||||
(rt_ubase_t)item->prev >= (rt_ubase_t)heap->start_addr) &&
|
||||
(rt_ubase_t)item->next == RT_ALIGN((rt_ubase_t)item->next, RT_ALIGN_SIZE) &&
|
||||
(rt_ubase_t)item->prev == RT_ALIGN((rt_ubase_t)item->prev, RT_ALIGN_SIZE))
|
||||
if (!((rt_uintptr_t)item->next <= (rt_uintptr_t)((rt_uintptr_t)heap->start_addr + heap->pool_size) &&
|
||||
(rt_uintptr_t)item->prev >= (rt_uintptr_t)heap->start_addr) &&
|
||||
(rt_uintptr_t)item->next == RT_ALIGN((rt_uintptr_t)item->next, RT_ALIGN_SIZE) &&
|
||||
(rt_uintptr_t)item->prev == RT_ALIGN((rt_uintptr_t)item->prev, RT_ALIGN_SIZE))
|
||||
{
|
||||
has_bad = RT_TRUE;
|
||||
break;
|
||||
|
|
|
@ -244,13 +244,13 @@ void rt_scheduler_stack_check(struct rt_thread *thread)
|
|||
|
||||
#ifndef RT_USING_HW_STACK_GUARD
|
||||
#ifdef ARCH_CPU_STACK_GROWS_UPWARD
|
||||
if (*((rt_uint8_t *)((rt_ubase_t)thread->stack_addr + thread->stack_size - 1)) != '#' ||
|
||||
if (*((rt_uint8_t *)((rt_uintptr_t)thread->stack_addr + thread->stack_size - 1)) != '#' ||
|
||||
#else
|
||||
if (*((rt_uint8_t *)thread->stack_addr) != '#' ||
|
||||
#endif /* ARCH_CPU_STACK_GROWS_UPWARD */
|
||||
(rt_ubase_t)thread->sp <= (rt_ubase_t)thread->stack_addr ||
|
||||
(rt_ubase_t)thread->sp >
|
||||
(rt_ubase_t)thread->stack_addr + (rt_ubase_t)thread->stack_size)
|
||||
(rt_uintptr_t)thread->sp <= (rt_uintptr_t)thread->stack_addr ||
|
||||
(rt_uintptr_t)thread->sp >
|
||||
(rt_uintptr_t)thread->stack_addr + (rt_uintptr_t)thread->stack_size)
|
||||
{
|
||||
rt_base_t dummy = 1;
|
||||
|
||||
|
@ -261,9 +261,9 @@ void rt_scheduler_stack_check(struct rt_thread *thread)
|
|||
#endif /* RT_USING_HW_STACK_GUARD */
|
||||
#ifdef ARCH_CPU_STACK_GROWS_UPWARD
|
||||
#ifndef RT_USING_HW_STACK_GUARD
|
||||
else if ((rt_ubase_t)thread->sp > ((rt_ubase_t)thread->stack_addr + thread->stack_size))
|
||||
else if ((rt_uintptr_t)thread->sp > ((rt_uintptr_t)thread->stack_addr + thread->stack_size))
|
||||
#else
|
||||
if ((rt_ubase_t)thread->sp > ((rt_ubase_t)thread->stack_addr + thread->stack_size))
|
||||
if ((rt_uintptr_t)thread->sp > ((rt_uintptr_t)thread->stack_addr + thread->stack_size))
|
||||
#endif
|
||||
{
|
||||
LOG_W("warning: %s stack is close to the top of stack address.\n",
|
||||
|
@ -271,9 +271,9 @@ void rt_scheduler_stack_check(struct rt_thread *thread)
|
|||
}
|
||||
#else
|
||||
#ifndef RT_USING_HW_STACK_GUARD
|
||||
else if ((rt_ubase_t)thread->sp <= ((rt_ubase_t)thread->stack_addr + 32))
|
||||
else if ((rt_uintptr_t)thread->sp <= ((rt_uintptr_t)thread->stack_addr + 32))
|
||||
#else
|
||||
if ((rt_ubase_t)thread->sp <= ((rt_ubase_t)thread->stack_addr + 32))
|
||||
if ((rt_uintptr_t)thread->sp <= ((rt_uintptr_t)thread->stack_addr + 32))
|
||||
#endif
|
||||
{
|
||||
LOG_W("warning: %s stack is close to end of stack address.\n",
|
||||
|
|
|
@ -181,7 +181,7 @@ void rt_system_scheduler_start(void)
|
|||
|
||||
/* switch to new thread */
|
||||
|
||||
rt_hw_context_switch_to((rt_ubase_t)&to_thread->sp);
|
||||
rt_hw_context_switch_to((rt_uintptr_t)&to_thread->sp);
|
||||
|
||||
/* never come back */
|
||||
}
|
||||
|
@ -275,8 +275,8 @@ void rt_schedule(void)
|
|||
|
||||
RT_OBJECT_HOOK_CALL(rt_scheduler_switch_hook, (from_thread));
|
||||
|
||||
rt_hw_context_switch((rt_ubase_t)&from_thread->sp,
|
||||
(rt_ubase_t)&to_thread->sp);
|
||||
rt_hw_context_switch((rt_uintptr_t)&from_thread->sp,
|
||||
(rt_uintptr_t)&to_thread->sp);
|
||||
|
||||
/* enable interrupt */
|
||||
rt_hw_interrupt_enable(level);
|
||||
|
@ -306,8 +306,8 @@ void rt_schedule(void)
|
|||
{
|
||||
LOG_D("switch in interrupt");
|
||||
|
||||
rt_hw_context_switch_interrupt((rt_ubase_t)&from_thread->sp,
|
||||
(rt_ubase_t)&to_thread->sp, from_thread, to_thread);
|
||||
rt_hw_context_switch_interrupt((rt_uintptr_t)&from_thread->sp,
|
||||
(rt_uintptr_t)&to_thread->sp, from_thread, to_thread);
|
||||
}
|
||||
}
|
||||
else
|
||||
|
|
|
@ -72,9 +72,9 @@ static void _signal_entry(void *parameter)
|
|||
RT_SCHED_CTX(tid).stat &= ~RT_THREAD_STAT_SIGNAL;
|
||||
|
||||
#ifdef RT_USING_SMP
|
||||
rt_hw_context_switch_to((rt_base_t)¶meter, tid);
|
||||
rt_hw_context_switch_to((rt_uintptr_t)¶meter, tid);
|
||||
#else
|
||||
rt_hw_context_switch_to((rt_ubase_t)&(tid->sp));
|
||||
rt_hw_context_switch_to((rt_uintptr_t)&(tid->sp));
|
||||
#endif /* RT_USING_SMP */
|
||||
}
|
||||
|
||||
|
|
50
src/slab.c
50
src/slab.c
|
@ -136,7 +136,7 @@
|
|||
#define PAGE_TYPE_LARGE 0x02
|
||||
|
||||
#define btokup(addr) \
|
||||
(&slab->memusage[((rt_ubase_t)(addr) - slab->heap_start) >> RT_MM_PAGE_BITS])
|
||||
(&slab->memusage[((rt_uintptr_t)(addr) - slab->heap_start) >> RT_MM_PAGE_BITS])
|
||||
|
||||
/**
|
||||
* Base structure of slab memory object
|
||||
|
@ -194,8 +194,8 @@ struct rt_slab_page
|
|||
struct rt_slab
|
||||
{
|
||||
struct rt_memory parent; /**< inherit from rt_memory */
|
||||
rt_ubase_t heap_start; /**< memory start address */
|
||||
rt_ubase_t heap_end; /**< memory end address */
|
||||
rt_uintptr_t heap_start; /**< memory start address */
|
||||
rt_uintptr_t heap_end; /**< memory end address */
|
||||
struct rt_slab_memusage *memusage;
|
||||
struct rt_slab_zone *zone_array[RT_SLAB_NZONES]; /* linked list of zones NFree > 0 */
|
||||
struct rt_slab_zone *zone_free; /* whole zones that have become free */
|
||||
|
@ -261,7 +261,7 @@ void rt_slab_page_free(rt_slab_t m, void *addr, rt_size_t npages)
|
|||
struct rt_slab *slab = (struct rt_slab *)m;
|
||||
|
||||
RT_ASSERT(addr != RT_NULL);
|
||||
RT_ASSERT((rt_ubase_t)addr % RT_MM_PAGE_SIZE == 0);
|
||||
RT_ASSERT((rt_uintptr_t)addr % RT_MM_PAGE_SIZE == 0);
|
||||
RT_ASSERT(npages != 0);
|
||||
|
||||
n = (struct rt_slab_page *)addr;
|
||||
|
@ -324,18 +324,18 @@ static void rt_slab_page_init(struct rt_slab *slab, void *addr, rt_size_t npages
|
|||
rt_slab_t rt_slab_init(const char *name, void *begin_addr, rt_size_t size)
|
||||
{
|
||||
rt_uint32_t limsize, npages;
|
||||
rt_ubase_t start_addr, begin_align, end_align;
|
||||
rt_uintptr_t start_addr, begin_align, end_align;
|
||||
struct rt_slab *slab;
|
||||
|
||||
slab = (struct rt_slab *)RT_ALIGN((rt_ubase_t)begin_addr, RT_ALIGN_SIZE);
|
||||
start_addr = (rt_ubase_t)slab + sizeof(*slab);
|
||||
slab = (struct rt_slab *)RT_ALIGN((rt_uintptr_t)begin_addr, RT_ALIGN_SIZE);
|
||||
start_addr = (rt_uintptr_t)slab + sizeof(*slab);
|
||||
/* align begin and end addr to page */
|
||||
begin_align = RT_ALIGN((rt_ubase_t)start_addr, RT_MM_PAGE_SIZE);
|
||||
end_align = RT_ALIGN_DOWN((rt_ubase_t)begin_addr + size, RT_MM_PAGE_SIZE);
|
||||
begin_align = RT_ALIGN((rt_uintptr_t)start_addr, RT_MM_PAGE_SIZE);
|
||||
end_align = RT_ALIGN_DOWN((rt_uintptr_t)begin_addr + size, RT_MM_PAGE_SIZE);
|
||||
if (begin_align >= end_align)
|
||||
{
|
||||
rt_kprintf("slab init errr. wrong address[0x%x - 0x%x]\n",
|
||||
(rt_ubase_t)begin_addr, (rt_ubase_t)begin_addr + size);
|
||||
(rt_uintptr_t)begin_addr, (rt_uintptr_t)begin_addr + size);
|
||||
return RT_NULL;
|
||||
}
|
||||
|
||||
|
@ -378,7 +378,7 @@ rt_slab_t rt_slab_init(const char *name, void *begin_addr, rt_size_t size)
|
|||
slab->memusage = rt_slab_page_alloc((rt_slab_t)(&slab->parent), limsize / RT_MM_PAGE_SIZE);
|
||||
|
||||
LOG_D("slab->memusage 0x%x, size 0x%x",
|
||||
(rt_ubase_t)slab->memusage, limsize);
|
||||
(rt_uintptr_t)slab->memusage, limsize);
|
||||
return &slab->parent;
|
||||
}
|
||||
RTM_EXPORT(rt_slab_init);
|
||||
|
@ -411,7 +411,7 @@ RTM_EXPORT(rt_slab_detach);
|
|||
rt_inline int zoneindex(rt_size_t *bytes)
|
||||
{
|
||||
/* unsigned for shift opt */
|
||||
rt_ubase_t n = (rt_ubase_t)(*bytes);
|
||||
rt_uintptr_t n = (rt_uintptr_t)(*bytes);
|
||||
|
||||
if (n < 128)
|
||||
{
|
||||
|
@ -519,7 +519,7 @@ void *rt_slab_alloc(rt_slab_t m, rt_size_t size)
|
|||
LOG_D("alloc a large memory 0x%x, page cnt %d, kup %d",
|
||||
size,
|
||||
size >> RT_MM_PAGE_BITS,
|
||||
((rt_ubase_t)chunk - slab->heap_start) >> RT_MM_PAGE_BITS);
|
||||
((rt_uintptr_t)chunk - slab->heap_start) >> RT_MM_PAGE_BITS);
|
||||
/* mem stat */
|
||||
slab->parent.used += size;
|
||||
if (slab->parent.used > slab->parent.max)
|
||||
|
@ -605,7 +605,7 @@ void *rt_slab_alloc(rt_slab_t m, rt_size_t size)
|
|||
}
|
||||
|
||||
LOG_D("alloc a new zone: 0x%x",
|
||||
(rt_ubase_t)z);
|
||||
(rt_uintptr_t)z);
|
||||
|
||||
/* set message usage */
|
||||
for (off = 0, kup = btokup(z); off < slab->zone_page_cnt; off ++)
|
||||
|
@ -686,7 +686,7 @@ void *rt_slab_realloc(rt_slab_t m, void *ptr, rt_size_t size)
|
|||
* Get the original allocation's zone. If the new request winds up
|
||||
* using the same chunk size we do not have to do anything.
|
||||
*/
|
||||
kup = btokup((rt_ubase_t)ptr & ~RT_MM_PAGE_MASK);
|
||||
kup = btokup((rt_uintptr_t)ptr & ~RT_MM_PAGE_MASK);
|
||||
if (kup->type == PAGE_TYPE_LARGE)
|
||||
{
|
||||
rt_size_t osize;
|
||||
|
@ -701,7 +701,7 @@ void *rt_slab_realloc(rt_slab_t m, void *ptr, rt_size_t size)
|
|||
}
|
||||
else if (kup->type == PAGE_TYPE_SMALL)
|
||||
{
|
||||
z = (struct rt_slab_zone *)(((rt_ubase_t)ptr & ~RT_MM_PAGE_MASK) -
|
||||
z = (struct rt_slab_zone *)(((rt_uintptr_t)ptr & ~RT_MM_PAGE_MASK) -
|
||||
kup->size * RT_MM_PAGE_SIZE);
|
||||
RT_ASSERT(z->z_magic == ZALLOC_SLAB_MAGIC);
|
||||
|
||||
|
@ -749,19 +749,19 @@ void rt_slab_free(rt_slab_t m, void *ptr)
|
|||
/* get memory usage */
|
||||
#if (DBG_LVL == DBG_LOG)
|
||||
{
|
||||
rt_ubase_t addr = ((rt_ubase_t)ptr & ~RT_MM_PAGE_MASK);
|
||||
rt_uintptr_t addr = ((rt_uintptr_t)ptr & ~RT_MM_PAGE_MASK);
|
||||
LOG_D("free a memory 0x%x and align to 0x%x, kup index %d",
|
||||
(rt_ubase_t)ptr,
|
||||
(rt_ubase_t)addr,
|
||||
((rt_ubase_t)(addr) - slab->heap_start) >> RT_MM_PAGE_BITS);
|
||||
(rt_uintptr_t)ptr,
|
||||
(rt_uintptr_t)addr,
|
||||
((rt_uintptr_t)(addr) - slab->heap_start) >> RT_MM_PAGE_BITS);
|
||||
}
|
||||
#endif /* DBG_LVL == DBG_LOG */
|
||||
|
||||
kup = btokup((rt_ubase_t)ptr & ~RT_MM_PAGE_MASK);
|
||||
kup = btokup((rt_uintptr_t)ptr & ~RT_MM_PAGE_MASK);
|
||||
/* release large allocation */
|
||||
if (kup->type == PAGE_TYPE_LARGE)
|
||||
{
|
||||
rt_ubase_t size;
|
||||
rt_uintptr_t size;
|
||||
|
||||
/* clear page counter */
|
||||
size = kup->size;
|
||||
|
@ -770,7 +770,7 @@ void rt_slab_free(rt_slab_t m, void *ptr)
|
|||
slab->parent.used -= size * RT_MM_PAGE_SIZE;
|
||||
|
||||
LOG_D("free large memory block 0x%x, page count %d",
|
||||
(rt_ubase_t)ptr, size);
|
||||
(rt_uintptr_t)ptr, size);
|
||||
|
||||
/* free this page */
|
||||
rt_slab_page_free(m, ptr, size);
|
||||
|
@ -779,7 +779,7 @@ void rt_slab_free(rt_slab_t m, void *ptr)
|
|||
}
|
||||
|
||||
/* zone case. get out zone. */
|
||||
z = (struct rt_slab_zone *)(((rt_ubase_t)ptr & ~RT_MM_PAGE_MASK) -
|
||||
z = (struct rt_slab_zone *)(((rt_uintptr_t)ptr & ~RT_MM_PAGE_MASK) -
|
||||
kup->size * RT_MM_PAGE_SIZE);
|
||||
RT_ASSERT(z->z_magic == ZALLOC_SLAB_MAGIC);
|
||||
|
||||
|
@ -811,7 +811,7 @@ void rt_slab_free(rt_slab_t m, void *ptr)
|
|||
struct rt_slab_zone **pz;
|
||||
|
||||
LOG_D("free zone %#x, zoneindex %d",
|
||||
(rt_ubase_t)z, z->z_zoneindex);
|
||||
(rt_uintptr_t)z, z->z_zoneindex);
|
||||
|
||||
/* remove zone from zone array list */
|
||||
for (pz = &slab->zone_array[z->z_zoneindex]; z != *pz; pz = &(*pz)->z_next)
|
||||
|
|
Loading…
Reference in New Issue