feat: remove redundant codes under virt64, c906

Changes:

- create new folder name `common64` and save all common codes of rv64
  inside

Signed-off-by: Shell <smokewood@qq.com>
This commit is contained in:
Shell 2024-09-12 11:14:12 +08:00 committed by Meco Man
parent 2d026a316a
commit 57d002b25e
55 changed files with 16 additions and 4614 deletions

View File

@ -5,17 +5,16 @@ from building import *
Import('rtconfig')
common64_arch = ['virt64', 'c906']
cwd = GetCurrentDir()
group = []
list = os.listdir(cwd)
# add common code files
if rtconfig.CPU == "virt64" :
group = group
elif rtconfig.CPU == "c906" :
group = group
if rtconfig.CPU in common64_arch :
group += SConscript(os.path.join('common64', 'SConscript'))
else :
group = group + SConscript(os.path.join('common', 'SConscript'))
group += SConscript(os.path.join('common', 'SConscript'))
# cpu porting code files
if rtconfig.CPU == "c906":

View File

@ -0,0 +1,12 @@
from building import *
cwd = GetCurrentDir()
src = Glob('*.c') + Glob('*.cpp') + Glob('*_gcc.S')
CPPPATH = [cwd]
if not GetDepend('ARCH_USING_ASID'):
SrcRemove(src, ['asid.c'])
group = DefineGroup('libcpu', src, depend = [''], CPPPATH = CPPPATH)
Return('group')

View File

@ -5,9 +5,6 @@ cwd = GetCurrentDir()
src = Glob('*.c') + Glob('*.cpp') + Glob('*_gcc.S')
CPPPATH = [cwd]
if not GetDepend('ARCH_USING_ASID'):
SrcRemove(src, ['asid.c'])
if not GetDepend('ARCH_RISCV_VECTOR'):
SrcRemove(src, ['vector_gcc.S'])

View File

@ -1,85 +0,0 @@
/*
* Copyright (c) 2006-2024, RT-Thread Development Team
*
* SPDX-License-Identifier: Apache-2.0
*
* Change Logs:
* Date Author Notes
* 2024-07-12 RT-Thread first version.
*/
#define DBG_TAG "hw.asid"
#define DBG_LVL DBG_INFO
#include <rtdbg.h>
#include <rtthread.h>
#include <board.h>
#include <cache.h>
#include <mm_aspace.h>
#include <mm_page.h>
#include <mmu.h>
#include <riscv_mmu.h>
#include <tlb.h>
static rt_uint8_t ASID_BITS = 0;
static rt_uint32_t next_asid;
static rt_uint64_t global_asid_generation;
#define ASID_MASK ((1 << ASID_BITS) - 1)
#define ASID_FIRST_GENERATION (1 << ASID_BITS)
#define MAX_ASID ASID_FIRST_GENERATION
void rt_hw_asid_init(void)
{
unsigned int satp_reg = read_csr(satp);
satp_reg |= (((rt_uint64_t)0xffff) << PPN_BITS);
write_csr(satp, satp_reg);
unsigned short valid_asid_bit = ((read_csr(satp) >> PPN_BITS) & 0xffff);
// The maximal value of ASIDLEN, is 9 for Sv32 or 16 for Sv39, Sv48, and Sv57
for (unsigned i = 0; i < 16; i++)
{
if (!(valid_asid_bit & 0x1))
{
break;
}
valid_asid_bit >>= 1;
ASID_BITS++;
}
global_asid_generation = ASID_FIRST_GENERATION;
next_asid = 1;
}
static rt_uint64_t _asid_acquire(rt_aspace_t aspace)
{
if ((aspace->asid ^ global_asid_generation) >> ASID_BITS) // not same generation
{
if (next_asid != MAX_ASID)
{
aspace->asid = global_asid_generation | next_asid;
next_asid++;
}
else
{
// scroll to next generation
global_asid_generation += ASID_FIRST_GENERATION;
next_asid = 1;
rt_hw_tlb_invalidate_all_local();
aspace->asid = global_asid_generation | next_asid;
next_asid++;
}
}
return aspace->asid & ASID_MASK;
}
void rt_hw_asid_switch_pgtbl(struct rt_aspace *aspace, rt_ubase_t pgtbl)
{
rt_uint64_t asid = _asid_acquire(aspace);
write_csr(satp, (((size_t)SATP_MODE) << SATP_MODE_OFFSET) |
(asid << PPN_BITS) |
((rt_ubase_t)pgtbl >> PAGE_OFFSET_BIT));
asm volatile("sfence.vma x0,%0"::"r"(asid):"memory");
}

View File

@ -1,26 +0,0 @@
/*
* Copyright (c) 2006-2023 RT-Thread Development Team
*
* SPDX-License-Identifier: Apache-2.0
*
* Change Logs:
* Date Author Notes
* 2023-03-12 WangXiaoyao the first version
*/
#ifndef __ASM_GENERIC_H__
#define __ASM_GENERIC_H__
/* use to mark a start point where every task start from */
#define START_POINT(funcname) \
.global funcname; \
.type funcname, %function; \
funcname: \
.cfi_sections .debug_frame, .eh_frame; \
.cfi_startproc; \
.cfi_undefined ra
#define START_POINT_END(name) \
.cfi_endproc; \
.size name, .-name;
#endif /* __ASM_GENERIC_H__ */

View File

@ -1,127 +0,0 @@
/*
* Copyright (c) 2006-2023, RT-Thread Development Team
*
* SPDX-License-Identifier: Apache-2.0
*
* Change Logs:
* Date Author Notes
* 2023-10-18 Shell Add backtrace support
*/
#define DBG_TAG "hw.backtrace"
#define DBG_LVL DBG_INFO
#include <rtdbg.h>
#include <rtthread.h>
#include <mm_aspace.h>
#include "riscv_mmu.h"
#include "stack.h"
#define WORD sizeof(rt_base_t)
#define ARCH_CONTEXT_FETCH(pctx, id) (*(((unsigned long *)pctx) + (id)))
rt_inline rt_err_t _bt_kaddr(rt_ubase_t *fp, struct rt_hw_backtrace_frame *frame)
{
rt_err_t rc;
frame->fp = *(fp - 2);
frame->pc = *(fp - 1);
if ((rt_ubase_t)fp == frame->fp)
{
rc = -RT_ERROR;
}
else
{
rc = RT_EOK;
}
return rc;
}
#ifdef RT_USING_SMART
#include <lwp_arch.h>
#include <lwp_user_mm.h>
rt_inline rt_err_t _bt_uaddr(rt_lwp_t lwp, rt_ubase_t *fp, struct rt_hw_backtrace_frame *frame)
{
rt_err_t rc;
if (lwp_data_get(lwp, &frame->fp, fp - 2, WORD) != WORD)
{
rc = -RT_EFAULT;
}
else if (lwp_data_get(lwp, &frame->pc, fp - 1, WORD) != WORD)
{
rc = -RT_EFAULT;
}
else if ((rt_ubase_t)fp == frame->fp)
{
rc = -RT_ERROR;
}
else
{
frame->pc -= 0;
rc = RT_EOK;
}
return rc;
}
#endif /* RT_USING_SMART */
rt_err_t rt_hw_backtrace_frame_unwind(rt_thread_t thread, struct rt_hw_backtrace_frame *frame)
{
rt_err_t rc = -RT_ERROR;
rt_uintptr_t *fp = (rt_uintptr_t *)frame->fp;
if (fp && !((long)fp & 0x7))
{
#ifdef RT_USING_SMART
if (thread->lwp)
{
void *lwp = thread->lwp;
void *this_lwp = lwp_self();
if (this_lwp == lwp && rt_hw_mmu_v2p(((rt_lwp_t)lwp)->aspace, fp) != ARCH_MAP_FAILED)
{
rc = _bt_kaddr(fp, frame);
}
else if (lwp_user_accessible_ext(lwp, (void *)fp, WORD))
{
rc = _bt_uaddr(lwp, fp, frame);
}
else
{
rc = -RT_EFAULT;
}
}
else
#endif
if ((rt_kmem_v2p(fp) != ARCH_MAP_FAILED))
{
rc = _bt_kaddr(fp, frame);
}
else
{
rc = -RT_EINVAL;
}
}
else
{
rc = -RT_EFAULT;
}
return rc;
}
rt_err_t rt_hw_backtrace_frame_get(rt_thread_t thread, struct rt_hw_backtrace_frame *frame)
{
rt_err_t rc;
if (!thread || !frame)
{
rc = -RT_EINVAL;
}
else
{
rt_hw_switch_frame_t sframe = thread->sp;
frame->pc = sframe->regs[RT_HW_SWITCH_CONTEXT_RA];
frame->fp = sframe->regs[RT_HW_SWITCH_CONTEXT_S0];;
rc = RT_EOK;
}
return rc;
}

View File

@ -1,115 +0,0 @@
/*
* Copyright (c) 2006-2024, RT-Thread Development Team
*
* SPDX-License-Identifier: Apache-2.0
*
* Change Logs:
* Date Author Notes
* 2018/10/28 Bernard The unify RISC-V porting implementation
* 2018/12/27 Jesven Add SMP support
* 2021/02/02 lizhirui Add userspace support
* 2022/10/22 Shell Support User mode RVV;
* Trimming process switch context
*/
#include "cpuport.h"
#include "stackframe.h"
#define _REG_IDX(name) RT_HW_SWITCH_CONTEXT_##name
#define REG_IDX(name) _REG_IDX(name)
.macro SAVE_REG reg, index
STORE \reg, \index*REGBYTES(sp)
.endm
.macro LOAD_REG reg, index
LOAD \reg, \index*REGBYTES(sp)
.endm
.macro RESERVE_CONTEXT
addi sp, sp, -(RT_HW_SWITCH_CONTEXT_SIZE * REGBYTES)
SAVE_REG tp, REG_IDX(TP)
SAVE_REG ra, REG_IDX(RA)
SAVE_REG s0, REG_IDX(S0)
SAVE_REG s1, REG_IDX(S1)
SAVE_REG s2, REG_IDX(S2)
SAVE_REG s3, REG_IDX(S3)
SAVE_REG s4, REG_IDX(S4)
SAVE_REG s5, REG_IDX(S5)
SAVE_REG s6, REG_IDX(S6)
SAVE_REG s7, REG_IDX(S7)
SAVE_REG s8, REG_IDX(S8)
SAVE_REG s9, REG_IDX(S9)
SAVE_REG s10, REG_IDX(S10)
SAVE_REG s11, REG_IDX(S11)
csrr s11, sstatus
li s10, (SSTATUS_SPP)
or s11, s11, s10
SAVE_REG s11, REG_IDX(SSTATUS)
.endm
.macro RESTORE_CONTEXT
LOAD_REG s11, REG_IDX(SSTATUS)
csrw sstatus, s11
LOAD_REG s11, REG_IDX(S11)
LOAD_REG s10, REG_IDX(S10)
LOAD_REG s9, REG_IDX(S9)
LOAD_REG s8, REG_IDX(S8)
LOAD_REG s7, REG_IDX(S7)
LOAD_REG s6, REG_IDX(S6)
LOAD_REG s5, REG_IDX(S5)
LOAD_REG s4, REG_IDX(S4)
LOAD_REG s3, REG_IDX(S3)
LOAD_REG s2, REG_IDX(S2)
LOAD_REG s1, REG_IDX(S1)
LOAD_REG s0, REG_IDX(S0)
LOAD_REG ra, REG_IDX(RA)
LOAD_REG tp, REG_IDX(TP)
addi sp, sp, RT_HW_SWITCH_CONTEXT_SIZE * REGBYTES
csrw sepc, ra
.endm
/*
* void rt_hw_context_switch_to(rt_ubase_t to);
*
* a0 --> to SP pointer
*/
.globl rt_hw_context_switch_to
rt_hw_context_switch_to:
LOAD sp, (a0)
jal rt_thread_self
mv s1, a0
#ifdef RT_USING_SMART
jal lwp_aspace_switch
#endif
RESTORE_CONTEXT
sret
/*
* void rt_hw_context_switch(rt_ubase_t from, rt_ubase_t to);
*
* a0 --> from SP pointer
* a1 --> to SP pointer
*
* It should only be used on local interrupt disable
*/
.globl rt_hw_context_switch
rt_hw_context_switch:
RESERVE_CONTEXT
STORE sp, (a0)
// restore to thread SP
LOAD sp, (a1)
// restore Address Space
jal rt_thread_self
mv s1, a0
#ifdef RT_USING_SMART
jal lwp_aspace_switch
#endif
RESTORE_CONTEXT
sret

View File

@ -1,133 +0,0 @@
/*
* Copyright (c) 2006-2024, RT-Thread Development Team
*
* SPDX-License-Identifier: Apache-2.0
*
* Change Logs:
* Date Author Notes
* 2018/10/28 Bernard The unify RISC-V porting code.
* 2021-02-11 lizhirui add gp support
* 2021-11-19 JasonHu add fpu support
*/
#include <rthw.h>
#include <rtthread.h>
#include "cpuport.h"
#include "stack.h"
#include <sbi.h>
#include <encoding.h>
#ifdef ARCH_RISCV_FPU
#define K_SSTATUS_DEFAULT (SSTATUS_SPP | SSTATUS_SPIE | SSTATUS_SUM | SSTATUS_FS)
#else
#define K_SSTATUS_DEFAULT (SSTATUS_SPP | SSTATUS_SPIE | SSTATUS_SUM)
#endif
#ifdef RT_USING_SMART
#include <lwp_arch.h>
#endif
/**
* @brief from thread used interrupt context switch
*
*/
volatile rt_ubase_t rt_interrupt_from_thread = 0;
/**
* @brief to thread used interrupt context switch
*
*/
volatile rt_ubase_t rt_interrupt_to_thread = 0;
/**
* @brief flag to indicate context switch in interrupt or not
*
*/
volatile rt_ubase_t rt_thread_switch_interrupt_flag = 0;
void *_rt_hw_stack_init(rt_ubase_t *sp, rt_ubase_t ra, rt_ubase_t sstatus)
{
rt_hw_switch_frame_t frame = (rt_hw_switch_frame_t)
((rt_ubase_t)sp - sizeof(struct rt_hw_switch_frame));
rt_memset(frame, 0, sizeof(struct rt_hw_switch_frame));
frame->regs[RT_HW_SWITCH_CONTEXT_RA] = ra;
frame->regs[RT_HW_SWITCH_CONTEXT_SSTATUS] = sstatus;
return (void *)frame;
}
int rt_hw_cpu_id(void)
{
return 0;
}
/**
* This function will initialize thread stack, we assuming
* when scheduler restore this new thread, context will restore
* an entry to user first application
*
* s0-s11, ra, sstatus, a0
* @param tentry the entry of thread
* @param parameter the parameter of entry
* @param stack_addr the beginning stack address
* @param texit the function will be called when thread exit
*
* @return stack address
*/
rt_uint8_t *rt_hw_stack_init(void *tentry,
void *parameter,
rt_uint8_t *stack_addr,
void *texit)
{
rt_ubase_t *sp = (rt_ubase_t *)stack_addr;
// we use a strict alignment requirement for Q extension
sp = (rt_ubase_t *)RT_ALIGN_DOWN((rt_ubase_t)sp, 16);
(*--sp) = (rt_ubase_t)tentry;
(*--sp) = (rt_ubase_t)parameter;
(*--sp) = (rt_ubase_t)texit;
--sp; /* alignment */
/* compatible to RESTORE_CONTEXT */
extern void _rt_thread_entry(void);
return (rt_uint8_t *)_rt_hw_stack_init(sp, (rt_ubase_t)_rt_thread_entry, K_SSTATUS_DEFAULT);
}
/*
* #ifdef RT_USING_SMP
* void rt_hw_context_switch_interrupt(void *context, rt_ubase_t from, rt_ubase_t to, struct rt_thread *to_thread);
* #else
* void rt_hw_context_switch_interrupt(rt_ubase_t from, rt_ubase_t to);
* #endif
*/
#ifndef RT_USING_SMP
void rt_hw_context_switch_interrupt(rt_ubase_t from, rt_ubase_t to, rt_thread_t from_thread, rt_thread_t to_thread)
{
if (rt_thread_switch_interrupt_flag == 0)
rt_interrupt_from_thread = from;
rt_interrupt_to_thread = to;
rt_thread_switch_interrupt_flag = 1;
return;
}
#endif /* end of RT_USING_SMP */
/** shutdown CPU */
void rt_hw_cpu_shutdown(void)
{
rt_uint32_t level;
rt_kprintf("shutdown...\n");
level = rt_hw_interrupt_disable();
sbi_shutdown();
while (1)
;
}
void rt_hw_set_process_id(int pid)
{
// TODO
}

View File

@ -1,52 +0,0 @@
/*
* Copyright (c) 2006-2024, RT-Thread Development Team
*
* SPDX-License-Identifier: Apache-2.0
*
* Change Logs:
* Date Author Notes
* 2018-10-03 Bernard The first version
*/
#ifndef CPUPORT_H__
#define CPUPORT_H__
#include <rtconfig.h>
#include <opcode.h>
#ifdef RT_USING_SMP
typedef union {
unsigned long slock;
struct __arch_tickets {
unsigned short owner;
unsigned short next;
} tickets;
} rt_hw_spinlock_t;
#endif
#ifndef __ASSEMBLY__
#include <rtdef.h>
rt_inline void rt_hw_dsb(void)
{
__asm__ volatile("fence":::"memory");
}
rt_inline void rt_hw_dmb(void)
{
__asm__ volatile("fence":::"memory");
}
rt_inline void rt_hw_isb(void)
{
__asm__ volatile(OPC_FENCE_I:::"memory");
}
int rt_hw_cpu_id(void);
#endif
#endif
#ifdef RISCV_U_MODE
#define RISCV_USER_ENTRY 0xFFFFFFE000000000ULL
#endif

View File

@ -1,25 +0,0 @@
/*
* Copyright (c) 2006-2022, RT-Thread Development Team
*
* SPDX-License-Identifier: Apache-2.0
*
* Change Logs:
* Date Author Notes
* 2022-10-19 RT-Thread the first version
*/
#include "cpuport.h"
#include "stackframe.h"
#include "asm-generic.h"
START_POINT(_rt_thread_entry)
LOAD ra, REGBYTES(sp) /* thread exit */
addi sp, sp, 2 * REGBYTES
LOAD a0, (sp) /* parameter */
LOAD t0, REGBYTES(sp) /* tentry */
addi sp, sp, 2 * REGBYTES
mv s1, ra
jalr t0
jalr s1
j . /* never here */
START_POINT_END(_rt_thread_entry)

File diff suppressed because it is too large Load Diff

View File

@ -1,73 +0,0 @@
/*
* Copyright (c) 2006-2024, RT-Thread Development Team
*
* SPDX-License-Identifier: Apache-2.0
*
* Change Logs:
* Date Author Notes
* 2022-10-10 RT-Thread the first version
*/
#ifndef __EXT_CONTEXT_H__
#define __EXT_CONTEXT_H__
#include <rtconfig.h>
#ifdef ARCH_RISCV_FPU
/* 32 fpu register */
#define CTX_FPU_REG_NR 32
#else
#define CTX_FPU_REG_NR 0
#endif /* ARCH_RISCV_FPU */
#ifdef __ASSEMBLY__
/**
* ==================================
* RISC-V D ISA (Floating)
* ==================================
*/
#ifdef ARCH_RISCV_FPU
#define FPU_CTX_F0_OFF (REGBYTES * 0) /* offsetof(fpu_context_t, fpustatus.f[0]) - offsetof(fpu_context_t, fpustatus.f[0]) */
#define FPU_CTX_F1_OFF (REGBYTES * 1) /* offsetof(fpu_context_t, fpustatus.f[1]) - offsetof(fpu_context_t, fpustatus.f[0]) */
#define FPU_CTX_F2_OFF (REGBYTES * 2) /* offsetof(fpu_context_t, fpustatus.f[2]) - offsetof(fpu_context_t, fpustatus.f[0]) */
#define FPU_CTX_F3_OFF (REGBYTES * 3) /* offsetof(fpu_context_t, fpustatus.f[3]) - offsetof(fpu_context_t, fpustatus.f[0]) */
#define FPU_CTX_F4_OFF (REGBYTES * 4) /* offsetof(fpu_context_t, fpustatus.f[4]) - offsetof(fpu_context_t, fpustatus.f[0]) */
#define FPU_CTX_F5_OFF (REGBYTES * 5) /* offsetof(fpu_context_t, fpustatus.f[5]) - offsetof(fpu_context_t, fpustatus.f[0]) */
#define FPU_CTX_F6_OFF (REGBYTES * 6) /* offsetof(fpu_context_t, fpustatus.f[6]) - offsetof(fpu_context_t, fpustatus.f[0]) */
#define FPU_CTX_F7_OFF (REGBYTES * 7) /* offsetof(fpu_context_t, fpustatus.f[7]) - offsetof(fpu_context_t, fpustatus.f[0]) */
#define FPU_CTX_F8_OFF (REGBYTES * 8) /* offsetof(fpu_context_t, fpustatus.f[8]) - offsetof(fpu_context_t, fpustatus.f[0]) */
#define FPU_CTX_F9_OFF (REGBYTES * 9) /* offsetof(fpu_context_t, fpustatus.f[9]) - offsetof(fpu_context_t, fpustatus.f[0]) */
#define FPU_CTX_F10_OFF (REGBYTES * 10) /* offsetof(fpu_context_t, fpustatus.f[10]) - offsetof(fpu_context_t, fpustatus.f[0]) */
#define FPU_CTX_F11_OFF (REGBYTES * 11) /* offsetof(fpu_context_t, fpustatus.f[11]) - offsetof(fpu_context_t, fpustatus.f[0]) */
#define FPU_CTX_F12_OFF (REGBYTES * 12) /* offsetof(fpu_context_t, fpustatus.f[12]) - offsetof(fpu_context_t, fpustatus.f[0]) */
#define FPU_CTX_F13_OFF (REGBYTES * 13) /* offsetof(fpu_context_t, fpustatus.f[13]) - offsetof(fpu_context_t, fpustatus.f[0]) */
#define FPU_CTX_F14_OFF (REGBYTES * 14) /* offsetof(fpu_context_t, fpustatus.f[14]) - offsetof(fpu_context_t, fpustatus.f[0]) */
#define FPU_CTX_F15_OFF (REGBYTES * 15) /* offsetof(fpu_context_t, fpustatus.f[15]) - offsetof(fpu_context_t, fpustatus.f[0]) */
#define FPU_CTX_F16_OFF (REGBYTES * 16) /* offsetof(fpu_context_t, fpustatus.f[16]) - offsetof(fpu_context_t, fpustatus.f[0]) */
#define FPU_CTX_F17_OFF (REGBYTES * 17) /* offsetof(fpu_context_t, fpustatus.f[17]) - offsetof(fpu_context_t, fpustatus.f[0]) */
#define FPU_CTX_F18_OFF (REGBYTES * 18) /* offsetof(fpu_context_t, fpustatus.f[18]) - offsetof(fpu_context_t, fpustatus.f[0]) */
#define FPU_CTX_F19_OFF (REGBYTES * 19) /* offsetof(fpu_context_t, fpustatus.f[19]) - offsetof(fpu_context_t, fpustatus.f[0]) */
#define FPU_CTX_F20_OFF (REGBYTES * 20) /* offsetof(fpu_context_t, fpustatus.f[20]) - offsetof(fpu_context_t, fpustatus.f[0]) */
#define FPU_CTX_F21_OFF (REGBYTES * 21) /* offsetof(fpu_context_t, fpustatus.f[21]) - offsetof(fpu_context_t, fpustatus.f[0]) */
#define FPU_CTX_F22_OFF (REGBYTES * 22) /* offsetof(fpu_context_t, fpustatus.f[22]) - offsetof(fpu_context_t, fpustatus.f[0]) */
#define FPU_CTX_F23_OFF (REGBYTES * 23) /* offsetof(fpu_context_t, fpustatus.f[23]) - offsetof(fpu_context_t, fpustatus.f[0]) */
#define FPU_CTX_F24_OFF (REGBYTES * 24) /* offsetof(fpu_context_t, fpustatus.f[24]) - offsetof(fpu_context_t, fpustatus.f[0]) */
#define FPU_CTX_F25_OFF (REGBYTES * 25) /* offsetof(fpu_context_t, fpustatus.f[25]) - offsetof(fpu_context_t, fpustatus.f[0]) */
#define FPU_CTX_F26_OFF (REGBYTES * 26) /* offsetof(fpu_context_t, fpustatus.f[26]) - offsetof(fpu_context_t, fpustatus.f[0]) */
#define FPU_CTX_F27_OFF (REGBYTES * 27) /* offsetof(fpu_context_t, fpustatus.f[27]) - offsetof(fpu_context_t, fpustatus.f[0]) */
#define FPU_CTX_F28_OFF (REGBYTES * 28) /* offsetof(fpu_context_t, fpustatus.f[28]) - offsetof(fpu_context_t, fpustatus.f[0]) */
#define FPU_CTX_F29_OFF (REGBYTES * 29) /* offsetof(fpu_context_t, fpustatus.f[29]) - offsetof(fpu_context_t, fpustatus.f[0]) */
#define FPU_CTX_F30_OFF (REGBYTES * 30) /* offsetof(fpu_context_t, fpustatus.f[30]) - offsetof(fpu_context_t, fpustatus.f[0]) */
#define FPU_CTX_F31_OFF (REGBYTES * 31) /* offsetof(fpu_context_t, fpustatus.f[31]) - offsetof(fpu_context_t, fpustatus.f[0]) */
#endif /* ARCH_RISCV_FPU */
#endif /* __ASSEMBLY__ */
#ifdef ARCH_RISCV_VECTOR
#include "rvv_context.h"
#else /* !ARCH_RISCV_VECTOR */
#define CTX_VECTOR_REG_NR 0
#endif /* ARCH_RISCV_VECTOR */
#endif /* __EXT_CONTEXT_H__ */

View File

@ -1,97 +0,0 @@
/*
* Copyright (c) 2006-2021, RT-Thread Development Team
*
* SPDX-License-Identifier: Apache-2.0
*
* Change Logs:
* Date Author Notes
* 2018/10/02 Bernard The first version
* 2018/12/27 Jesven Add SMP schedule
* 2021/02/02 lizhirui Add userspace support
* 2021/12/24 JasonHu Add user setting save/restore
* 2022/10/22 Shell Support kernel mode RVV;
* Rewrite trap handling routine
*/
#include "cpuport.h"
#include "encoding.h"
#include "stackframe.h"
.align 2
.global trap_entry
.global debug_check_sp
trap_entry:
// distingush exception from kernel or user
csrrw sp, sscratch, sp
bnez sp, _save_context
// BE REALLY careful with sscratch,
// if it's wrong, we could looping here forever
// or accessing random memory and seeing things totally
// messy after a long time and don't even know why
_from_kernel:
csrr sp, sscratch
j _save_context
_save_context:
SAVE_ALL
// clear sscratch to say 'now in kernel mode'
csrw sscratch, zero
RESTORE_SYS_GP
// now we are ready to enter interrupt / excepiton handler
_distinguish_syscall:
csrr t0, scause
#ifdef RT_USING_SMART
// TODO swap 8 with config macro name
li t1, 8
beq t0, t1, syscall_entry
// syscall never return here
#endif
_handle_interrupt_and_exception:
mv a0, t0
csrrc a1, stval, zero
csrr a2, sepc
// sp as exception frame pointer
mv a3, sp
call handle_trap
_interrupt_exit:
la s0, rt_thread_switch_interrupt_flag
lw s2, 0(s0)
beqz s2, _resume_execution
sw zero, 0(s0)
_context_switch:
la t0, rt_interrupt_from_thread
LOAD a0, 0(t0)
la t0, rt_interrupt_to_thread
LOAD a1, 0(t0)
csrr t0, sstatus
andi t0, t0, ~SSTATUS_SPIE
csrw sstatus, t0
jal rt_hw_context_switch
_resume_execution:
#ifdef RT_USING_SMART
LOAD t0, FRAME_OFF_SSTATUS(sp)
andi t0, t0, SSTATUS_SPP
beqz t0, arch_ret_to_user
#endif
_resume_kernel:
RESTORE_ALL
csrw sscratch, zero
sret
.global rt_hw_interrupt_enable
rt_hw_interrupt_enable:
csrs sstatus, a0 /* restore to old csr */
jr ra
.global rt_hw_interrupt_disable
rt_hw_interrupt_disable:
csrrci a0, sstatus, 2 /* clear SIE */
jr ra

View File

@ -1,52 +0,0 @@
/*
* Copyright (c) 2019-2020, Xim
*
* SPDX-License-Identifier: Apache-2.0
*
*/
#ifndef ARCH_IO_H
#define ARCH_IO_H
#include <rtthread.h>
#define RISCV_FENCE(p, s) \
__asm__ __volatile__ ("fence " #p "," #s : : : "memory")
/* These barriers need to enforce ordering on both devices or memory. */
#define mb() RISCV_FENCE(iorw,iorw)
#define rmb() RISCV_FENCE(ir,ir)
#define wmb() RISCV_FENCE(ow,ow)
#define __arch_getl(a) (*(unsigned int *)(a))
#define __arch_putl(v, a) (*(unsigned int *)(a) = (v))
#define dmb() mb()
#define __iormb() rmb()
#define __iowmb() wmb()
static inline void writel(uint32_t val, volatile void *addr)
{
__iowmb();
__arch_putl(val, addr);
}
static inline uint32_t readl(const volatile void *addr)
{
uint32_t val;
val = __arch_getl(addr);
__iormb();
return val;
}
static inline void write_reg(
uint32_t val, volatile void *addr, unsigned offset)
{
writel(val, (void *)((rt_size_t)addr + offset));
}
static inline uint32_t read_reg(
const volatile void *addr, unsigned offset)
{
return readl((void *)((rt_size_t)addr + offset));
}
#endif // ARCH_IO_H

View File

@ -1,595 +0,0 @@
/*
* Copyright (c) 2006-2021, RT-Thread Development Team
*
* SPDX-License-Identifier: Apache-2.0
*
* Change Logs:
* Date Author Notes
* 2021-01-30 lizhirui first version
* 2022-12-13 WangXiaoyao Port to new mm
* 2023-10-12 Shell Add permission control API
*/
#include <rtthread.h>
#include <stddef.h>
#include <stdint.h>
#define DBG_TAG "hw.mmu"
#define DBG_LVL DBG_INFO
#include <rtdbg.h>
#include <board.h>
#include <cache.h>
#include <mm_aspace.h>
#include <mm_page.h>
#include <mmu.h>
#include <riscv_mmu.h>
#include <tlb.h>
#ifdef RT_USING_SMART
#include <board.h>
#include <ioremap.h>
#include <lwp_user_mm.h>
#endif
#ifndef RT_USING_SMART
#define USER_VADDR_START 0
#endif
static size_t _unmap_area(struct rt_aspace *aspace, void *v_addr, size_t size);
static void *current_mmu_table = RT_NULL;
volatile __attribute__((aligned(4 * 1024)))
rt_ubase_t MMUTable[__SIZE(VPN2_BIT)];
#ifdef ARCH_USING_ASID
void rt_hw_aspace_switch(rt_aspace_t aspace)
{
uintptr_t page_table = (uintptr_t)rt_kmem_v2p(aspace->page_table);
current_mmu_table = aspace->page_table;
rt_hw_asid_switch_pgtbl(aspace, page_table);
}
#else /* !ARCH_USING_ASID */
void rt_hw_aspace_switch(rt_aspace_t aspace)
{
uintptr_t page_table = (uintptr_t)rt_kmem_v2p(aspace->page_table);
current_mmu_table = aspace->page_table;
write_csr(satp, (((size_t)SATP_MODE) << SATP_MODE_OFFSET) |
((rt_ubase_t)page_table >> PAGE_OFFSET_BIT));
rt_hw_tlb_invalidate_all_local();
}
void rt_hw_asid_init(void)
{
}
#endif /* ARCH_USING_ASID */
void *rt_hw_mmu_tbl_get()
{
return current_mmu_table;
}
static int _map_one_page(struct rt_aspace *aspace, void *va, void *pa,
size_t attr)
{
rt_ubase_t l1_off, l2_off, l3_off;
rt_ubase_t *mmu_l1, *mmu_l2, *mmu_l3;
l1_off = GET_L1((size_t)va);
l2_off = GET_L2((size_t)va);
l3_off = GET_L3((size_t)va);
mmu_l1 = ((rt_ubase_t *)aspace->page_table) + l1_off;
if (PTE_USED(*mmu_l1))
{
mmu_l2 = (rt_ubase_t *)PPN_TO_VPN(GET_PADDR(*mmu_l1), PV_OFFSET);
}
else
{
mmu_l2 = (rt_ubase_t *)rt_pages_alloc(0);
if (mmu_l2)
{
rt_memset(mmu_l2, 0, PAGE_SIZE);
rt_hw_cpu_dcache_clean(mmu_l2, PAGE_SIZE);
*mmu_l1 = COMBINEPTE((rt_ubase_t)VPN_TO_PPN(mmu_l2, PV_OFFSET),
PAGE_DEFAULT_ATTR_NEXT);
rt_hw_cpu_dcache_clean(mmu_l1, sizeof(*mmu_l1));
}
else
{
return -1;
}
}
if (PTE_USED(*(mmu_l2 + l2_off)))
{
RT_ASSERT(!PAGE_IS_LEAF(*(mmu_l2 + l2_off)));
mmu_l3 =
(rt_ubase_t *)PPN_TO_VPN(GET_PADDR(*(mmu_l2 + l2_off)), PV_OFFSET);
}
else
{
mmu_l3 = (rt_ubase_t *)rt_pages_alloc(0);
if (mmu_l3)
{
rt_memset(mmu_l3, 0, PAGE_SIZE);
rt_hw_cpu_dcache_clean(mmu_l3, PAGE_SIZE);
*(mmu_l2 + l2_off) =
COMBINEPTE((rt_ubase_t)VPN_TO_PPN(mmu_l3, PV_OFFSET),
PAGE_DEFAULT_ATTR_NEXT);
rt_hw_cpu_dcache_clean(mmu_l2, sizeof(*mmu_l2));
// declares a reference to parent page table
rt_page_ref_inc((void *)mmu_l2, 0);
}
else
{
return -1;
}
}
RT_ASSERT(!PTE_USED(*(mmu_l3 + l3_off)));
// declares a reference to parent page table
rt_page_ref_inc((void *)mmu_l3, 0);
*(mmu_l3 + l3_off) = COMBINEPTE((rt_ubase_t)pa, attr);
rt_hw_cpu_dcache_clean(mmu_l3 + l3_off, sizeof(*(mmu_l3 + l3_off)));
return 0;
}
/** rt_hw_mmu_map will never override existed page table entry */
void *rt_hw_mmu_map(struct rt_aspace *aspace, void *v_addr, void *p_addr,
size_t size, size_t attr)
{
int ret = -1;
void *unmap_va = v_addr;
size_t npages = size >> ARCH_PAGE_SHIFT;
// TODO trying with HUGEPAGE here
while (npages--)
{
MM_PGTBL_LOCK(aspace);
ret = _map_one_page(aspace, v_addr, p_addr, attr);
MM_PGTBL_UNLOCK(aspace);
if (ret != 0)
{
/* error, undo map */
while (unmap_va != v_addr)
{
MM_PGTBL_LOCK(aspace);
_unmap_area(aspace, unmap_va, ARCH_PAGE_SIZE);
MM_PGTBL_UNLOCK(aspace);
unmap_va += ARCH_PAGE_SIZE;
}
break;
}
v_addr += ARCH_PAGE_SIZE;
p_addr += ARCH_PAGE_SIZE;
}
if (ret == 0)
{
return unmap_va;
}
return NULL;
}
static void _unmap_pte(rt_ubase_t *pentry, rt_ubase_t *lvl_entry[], int level)
{
int loop_flag = 1;
while (loop_flag)
{
loop_flag = 0;
*pentry = 0;
rt_hw_cpu_dcache_clean(pentry, sizeof(*pentry));
// we don't handle level 0, which is maintained by caller
if (level > 0)
{
void *page = (void *)((rt_ubase_t)pentry & ~ARCH_PAGE_MASK);
// decrease reference from child page to parent
rt_pages_free(page, 0);
int free = rt_page_ref_get(page, 0);
if (free == 1)
{
rt_pages_free(page, 0);
pentry = lvl_entry[--level];
loop_flag = 1;
}
}
}
}
static size_t _unmap_area(struct rt_aspace *aspace, void *v_addr, size_t size)
{
rt_ubase_t loop_va = __UMASKVALUE((rt_ubase_t)v_addr, PAGE_OFFSET_MASK);
size_t unmapped = 0;
int i = 0;
rt_ubase_t lvl_off[3];
rt_ubase_t *lvl_entry[3];
lvl_off[0] = (rt_ubase_t)GET_L1(loop_va);
lvl_off[1] = (rt_ubase_t)GET_L2(loop_va);
lvl_off[2] = (rt_ubase_t)GET_L3(loop_va);
unmapped = 1 << (ARCH_PAGE_SHIFT + ARCH_INDEX_WIDTH * 2ul);
rt_ubase_t *pentry;
lvl_entry[i] = ((rt_ubase_t *)aspace->page_table + lvl_off[i]);
pentry = lvl_entry[i];
// find leaf page table entry
while (PTE_USED(*pentry) && !PAGE_IS_LEAF(*pentry))
{
i += 1;
lvl_entry[i] = ((rt_ubase_t *)PPN_TO_VPN(GET_PADDR(*pentry), PV_OFFSET) +
lvl_off[i]);
pentry = lvl_entry[i];
unmapped >>= ARCH_INDEX_WIDTH;
}
// clear PTE & setup its
if (PTE_USED(*pentry))
{
_unmap_pte(pentry, lvl_entry, i);
}
return unmapped;
}
/** unmap is different from map that it can handle multiple pages */
void rt_hw_mmu_unmap(struct rt_aspace *aspace, void *v_addr, size_t size)
{
// caller guarantee that v_addr & size are page aligned
if (!aspace->page_table)
{
return;
}
size_t unmapped = 0;
while (size > 0)
{
MM_PGTBL_LOCK(aspace);
unmapped = _unmap_area(aspace, v_addr, size);
MM_PGTBL_UNLOCK(aspace);
// when unmapped == 0, region not exist in pgtbl
if (!unmapped || unmapped > size) break;
size -= unmapped;
v_addr += unmapped;
}
}
#ifdef RT_USING_SMART
static inline void _init_region(void *vaddr, size_t size)
{
rt_ioremap_start = vaddr;
rt_ioremap_size = size;
rt_mpr_start = rt_ioremap_start - rt_mpr_size;
LOG_D("rt_ioremap_start: %p, rt_mpr_start: %p", rt_ioremap_start,
rt_mpr_start);
}
#else
static inline void _init_region(void *vaddr, size_t size)
{
rt_mpr_start = vaddr - rt_mpr_size;
}
#endif
#if defined(RT_USING_SMART) && defined(ARCH_REMAP_KERNEL)
#define KERN_SPACE_START ((void *)KERNEL_VADDR_START)
#define KERN_SPACE_SIZE (0xfffffffffffff000UL - KERNEL_VADDR_START + 0x1000)
#else
#define KERN_SPACE_START ((void *)0x1000)
#define KERN_SPACE_SIZE ((size_t)USER_VADDR_START - 0x1000)
#endif
int rt_hw_mmu_map_init(rt_aspace_t aspace, void *v_address, rt_ubase_t size,
rt_ubase_t *vtable, rt_ubase_t pv_off)
{
size_t l1_off, va_s, va_e;
rt_base_t level;
if ((!aspace) || (!vtable))
{
return -1;
}
va_s = (rt_ubase_t)v_address;
va_e = ((rt_ubase_t)v_address) + size - 1;
if (va_e < va_s)
{
return -1;
}
// convert address to PPN2 index
va_s = GET_L1(va_s);
va_e = GET_L1(va_e);
if (va_s == 0)
{
return -1;
}
// vtable initialization check
for (l1_off = va_s; l1_off <= va_e; l1_off++)
{
size_t v = vtable[l1_off];
if (v)
{
return -1;
}
}
rt_aspace_init(&rt_kernel_space, KERN_SPACE_START, KERN_SPACE_SIZE, vtable);
_init_region(v_address, size);
return 0;
}
const static int max_level =
(ARCH_VADDR_WIDTH - ARCH_PAGE_SHIFT) / ARCH_INDEX_WIDTH;
static inline uintptr_t _get_level_size(int level)
{
return 1ul << (ARCH_PAGE_SHIFT + (max_level - level) * ARCH_INDEX_WIDTH);
}
static rt_ubase_t *_query(struct rt_aspace *aspace, void *vaddr, int *level)
{
rt_ubase_t l1_off, l2_off, l3_off;
rt_ubase_t *mmu_l1, *mmu_l2, *mmu_l3;
rt_ubase_t pa;
l1_off = GET_L1((rt_uintptr_t)vaddr);
l2_off = GET_L2((rt_uintptr_t)vaddr);
l3_off = GET_L3((rt_uintptr_t)vaddr);
if (!aspace)
{
LOG_W("%s: no aspace", __func__);
return RT_NULL;
}
mmu_l1 = ((rt_ubase_t *)aspace->page_table) + l1_off;
if (PTE_USED(*mmu_l1))
{
if (*mmu_l1 & PTE_XWR_MASK)
{
*level = 1;
return mmu_l1;
}
mmu_l2 = (rt_ubase_t *)PPN_TO_VPN(GET_PADDR(*mmu_l1), PV_OFFSET);
if (PTE_USED(*(mmu_l2 + l2_off)))
{
if (*(mmu_l2 + l2_off) & PTE_XWR_MASK)
{
*level = 2;
return mmu_l2 + l2_off;
}
mmu_l3 = (rt_ubase_t *)PPN_TO_VPN(GET_PADDR(*(mmu_l2 + l2_off)),
PV_OFFSET);
if (PTE_USED(*(mmu_l3 + l3_off)))
{
*level = 3;
return mmu_l3 + l3_off;
}
}
}
return RT_NULL;
}
void *rt_hw_mmu_v2p(struct rt_aspace *aspace, void *vaddr)
{
int level;
rt_ubase_t *pte = _query(aspace, vaddr, &level);
uintptr_t paddr;
if (pte)
{
paddr = GET_PADDR(*pte);
paddr |= ((intptr_t)vaddr & (_get_level_size(level) - 1));
}
else
{
LOG_D("%s: failed at %p", __func__, vaddr);
paddr = (uintptr_t)ARCH_MAP_FAILED;
}
return (void *)paddr;
}
static int _noncache(rt_base_t *pte)
{
return 0;
}
static int _cache(rt_base_t *pte)
{
return 0;
}
static int (*control_handler[MMU_CNTL_DUMMY_END])(rt_base_t *pte) = {
[MMU_CNTL_CACHE] = _cache,
[MMU_CNTL_NONCACHE] = _noncache,
};
int rt_hw_mmu_control(struct rt_aspace *aspace, void *vaddr, size_t size,
enum rt_mmu_cntl cmd)
{
int level;
int err = -RT_EINVAL;
void *vend = vaddr + size;
int (*handler)(rt_base_t *pte);
if (cmd >= 0 && cmd < MMU_CNTL_DUMMY_END)
{
handler = control_handler[cmd];
while (vaddr < vend)
{
rt_base_t *pte = _query(aspace, vaddr, &level);
void *range_end = vaddr + _get_level_size(level);
RT_ASSERT(range_end <= vend);
if (pte)
{
err = handler(pte);
RT_ASSERT(err == RT_EOK);
}
vaddr = range_end;
}
}
else
{
err = -RT_ENOSYS;
}
return err;
}
/**
* @brief setup Page Table for kernel space. It's a fixed map
* and all mappings cannot be changed after initialization.
*
* Memory region in struct mem_desc must be page aligned,
* otherwise is a failure and no report will be
* returned.
*
* @param aspace
* @param mdesc
* @param desc_nr
*/
void rt_hw_mmu_setup(rt_aspace_t aspace, struct mem_desc *mdesc, int desc_nr)
{
void *err;
for (size_t i = 0; i < desc_nr; i++)
{
size_t attr;
switch (mdesc->attr)
{
case NORMAL_MEM:
attr = MMU_MAP_K_RWCB;
break;
case NORMAL_NOCACHE_MEM:
attr = MMU_MAP_K_RWCB;
break;
case DEVICE_MEM:
attr = MMU_MAP_K_DEVICE;
break;
default:
attr = MMU_MAP_K_DEVICE;
}
struct rt_mm_va_hint hint = {
.flags = MMF_MAP_FIXED,
.limit_start = aspace->start,
.limit_range_size = aspace->size,
.map_size = mdesc->vaddr_end - mdesc->vaddr_start + 1,
.prefer = (void *)mdesc->vaddr_start};
if (mdesc->paddr_start == (rt_uintptr_t)ARCH_MAP_FAILED)
mdesc->paddr_start = mdesc->vaddr_start + PV_OFFSET;
rt_aspace_map_phy_static(aspace, &mdesc->varea, &hint, attr,
mdesc->paddr_start >> MM_PAGE_SHIFT, &err);
mdesc++;
}
rt_hw_asid_init();
rt_hw_aspace_switch(&rt_kernel_space);
rt_page_cleanup();
}
#define SATP_BASE ((rt_ubase_t)SATP_MODE << SATP_MODE_OFFSET)
void rt_hw_mem_setup_early(void)
{
rt_ubase_t pv_off;
rt_ubase_t ps = 0x0;
rt_ubase_t vs = 0x0;
rt_ubase_t *early_pgtbl = (rt_ubase_t *)(((size_t)&__bss_end + 4095) & ~0xfff);
/* calculate pv_offset */
void *symb_pc;
void *symb_linker;
__asm__ volatile("la %0, _start\n" : "=r"(symb_pc));
__asm__ volatile("la %0, _start_link_addr\n" : "=r"(symb_linker));
symb_linker = *(void **)symb_linker;
pv_off = symb_pc - symb_linker;
rt_kmem_pvoff_set(pv_off);
if (pv_off)
{
if (pv_off & (1ul << (ARCH_INDEX_WIDTH * 2 + ARCH_PAGE_SHIFT)))
{
LOG_E("%s: not aligned virtual address. pv_offset %p", __func__,
pv_off);
RT_ASSERT(0);
}
/**
* identical mapping,
* PC are still at lower region before relocating to high memory
*/
for (size_t i = 0; i < __SIZE(PPN0_BIT); i++)
{
early_pgtbl[i] = COMBINEPTE(ps, MMU_MAP_EARLY);
ps += L1_PAGE_SIZE;
}
/* relocate text region */
__asm__ volatile("la %0, _start\n" : "=r"(ps));
ps &= ~(L1_PAGE_SIZE - 1);
vs = ps - pv_off;
/* relocate region */
rt_ubase_t vs_idx = GET_L1(vs);
rt_ubase_t ve_idx = GET_L1(vs + 0x80000000);
for (size_t i = vs_idx; i < ve_idx; i++)
{
early_pgtbl[i] = COMBINEPTE(ps, MMU_MAP_EARLY);
ps += L1_PAGE_SIZE;
}
/* apply new mapping */
asm volatile("sfence.vma x0, x0");
write_csr(satp, SATP_BASE | ((size_t)early_pgtbl >> PAGE_OFFSET_BIT));
asm volatile("sfence.vma x0, x0");
}
/* return to lower text section */
}
void *rt_hw_mmu_pgtbl_create(void)
{
rt_ubase_t *mmu_table;
mmu_table = (rt_ubase_t *)rt_pages_alloc_ext(0, PAGE_ANY_AVAILABLE);
if (!mmu_table)
{
return RT_NULL;
}
rt_memcpy(mmu_table, rt_kernel_space.page_table, ARCH_PAGE_SIZE);
rt_hw_cpu_dcache_ops(RT_HW_CACHE_FLUSH, mmu_table, ARCH_PAGE_SIZE);
return mmu_table;
}
void rt_hw_mmu_pgtbl_delete(void *pgtbl)
{
rt_pages_free(pgtbl, 0);
}

View File

@ -1,77 +0,0 @@
/*
* Copyright (c) 2006-2021, RT-Thread Development Team
*
* SPDX-License-Identifier: Apache-2.0
*
* Change Logs:
* Date Author Notes
* 2021-01-30 lizhirui first version
* 2023-10-12 Shell Add permission control API
*/
#ifndef __MMU_H__
#define __MMU_H__
#include "riscv.h"
#include "riscv_mmu.h"
#include <mm_aspace.h>
#include <stddef.h>
/* RAM, Flash, or ROM */
#define NORMAL_MEM 0
/* normal nocache memory mapping type */
#define NORMAL_NOCACHE_MEM 1
/* MMIO region */
#define DEVICE_MEM 2
typedef size_t rt_pte_t;
struct mem_desc
{
rt_size_t vaddr_start;
rt_size_t vaddr_end;
rt_ubase_t paddr_start;
rt_size_t attr;
struct rt_varea varea;
};
#define GET_PF_ID(addr) ((addr) >> PAGE_OFFSET_BIT)
#define GET_PF_OFFSET(addr) __MASKVALUE(addr, PAGE_OFFSET_MASK)
#define GET_L1(addr) __PARTBIT(addr, VPN2_SHIFT, VPN2_BIT)
#define GET_L2(addr) __PARTBIT(addr, VPN1_SHIFT, VPN1_BIT)
#define GET_L3(addr) __PARTBIT(addr, VPN0_SHIFT, VPN0_BIT)
#define GET_PPN(pte) \
(__PARTBIT(pte, PTE_PPN_SHIFT, PHYSICAL_ADDRESS_WIDTH_BITS - PTE_PPN_SHIFT))
#define GET_PADDR(pte) (GET_PPN(pte) << PAGE_OFFSET_BIT)
#define VPN_TO_PPN(vaddr, pv_off) (((rt_uintptr_t)(vaddr)) + (pv_off))
#define PPN_TO_VPN(paddr, pv_off) (((rt_uintptr_t)(paddr)) - (pv_off))
#define COMBINEVADDR(l1_off, l2_off, l3_off) \
(((l1_off) << VPN2_SHIFT) | ((l2_off) << VPN1_SHIFT) | \
((l3_off) << VPN0_SHIFT))
#define COMBINEPTE(paddr, attr) \
((((paddr) >> PAGE_OFFSET_BIT) << PTE_PPN_SHIFT) | (attr))
#define MMU_MAP_ERROR_VANOTALIGN -1
#define MMU_MAP_ERROR_PANOTALIGN -2
#define MMU_MAP_ERROR_NOPAGE -3
#define MMU_MAP_ERROR_CONFLICT -4
void *rt_hw_mmu_tbl_get(void);
int rt_hw_mmu_map_init(rt_aspace_t aspace, void *v_address, rt_ubase_t size,
rt_ubase_t *vtable, rt_ubase_t pv_off);
void rt_hw_mmu_setup(rt_aspace_t aspace, struct mem_desc *mdesc, int desc_nr);
void rt_hw_mmu_kernel_map_init(rt_aspace_t aspace, rt_ubase_t vaddr_start,
rt_ubase_t size);
void *rt_hw_mmu_map(rt_aspace_t aspace, void *v_addr, void *p_addr, size_t size,
size_t attr);
void rt_hw_mmu_unmap(rt_aspace_t aspace, void *v_addr, size_t size);
void rt_hw_aspace_switch(rt_aspace_t aspace);
void *rt_hw_mmu_v2p(rt_aspace_t aspace, void *vaddr);
int rt_hw_mmu_control(struct rt_aspace *aspace, void *vaddr, size_t size,
enum rt_mmu_cntl cmd);
void *rt_hw_mmu_pgtbl_create(void);
void rt_hw_mmu_pgtbl_delete(void *pgtbl);
#endif

View File

@ -1,32 +0,0 @@
/*
* Copyright (c) 2006-2024, RT-Thread Development Team
*
* SPDX-License-Identifier: Apache-2.0
*
* Change Logs:
* Date Author Notes
* 2021-01-30 lizhirui first version
* 2024-08-28 RT-Thread Fit into rv64ilp32 ABI
*/
#ifndef __RISCV_H__
#define __RISCV_H__
#include <encoding.h>
/* using unsigned long long for the case of rv64ilp32 */
#define __SIZE(bit) (1ULL << (bit))
#define __MASK(bit) (__SIZE(bit) - 1ULL)
#define __UMASK(bit) (~(__MASK(bit)))
#define __MASKVALUE(value,maskvalue) ((value) & (maskvalue))
#define __UMASKVALUE(value,maskvalue) ((value) & (~(maskvalue)))
#define __CHECKUPBOUND(value,bit_count) (!(((rt_ubase_t)value) & (~__MASK(bit_count))))
#define __CHECKALIGN(value,start_bit) (!(((rt_ubase_t)value) & (__MASK(start_bit))))
#define __PARTBIT(value,start_bit,length) (((value) >> (start_bit)) & __MASK(length))
#define __ALIGNUP(value,bit) (((value) + __MASK(bit)) & __UMASK(bit))
#define __ALIGNDOWN(value,bit) ((value) & __UMASK(bit))
#endif

View File

@ -1,115 +0,0 @@
/*
* SPDX-License-Identifier: BSD-2-Clause
*
* Copyright (c) 2019 Western Digital Corporation or its affiliates.
*
* Authors:
* Anup Patel <anup.patel@wdc.com>
*/
#ifndef __RISCV_IO_H__
#define __RISCV_IO_H__
static inline uint32_t __raw_hartid(void)
{
extern int boot_hartid;
return boot_hartid;
}
static inline void __raw_writeb(rt_uint8_t val, volatile void *addr)
{
asm volatile("sb %0, 0(%1)" : : "r"(val), "r"(addr));
}
static inline void __raw_writew(rt_uint16_t val, volatile void *addr)
{
asm volatile("sh %0, 0(%1)" : : "r"(val), "r"(addr));
}
static inline void __raw_writel(rt_uint32_t val, volatile void *addr)
{
asm volatile("sw %0, 0(%1)" : : "r"(val), "r"(addr));
}
#if __riscv_xlen != 32
static inline void __raw_writeq(rt_uint64_t val, volatile void *addr)
{
asm volatile("sd %0, 0(%1)" : : "r"(val), "r"(addr));
}
#endif
static inline rt_uint8_t __raw_readb(const volatile void *addr)
{
rt_uint8_t val;
asm volatile("lb %0, 0(%1)" : "=r"(val) : "r"(addr));
return val;
}
static inline rt_uint16_t __raw_readw(const volatile void *addr)
{
rt_uint16_t val;
asm volatile("lh %0, 0(%1)" : "=r"(val) : "r"(addr));
return val;
}
static inline rt_uint32_t __raw_readl(const volatile void *addr)
{
rt_uint32_t val;
asm volatile("lw %0, 0(%1)" : "=r"(val) : "r"(addr));
return val;
}
#if __riscv_xlen != 32
static inline rt_uint64_t __raw_readq(const volatile void *addr)
{
rt_uint64_t val;
asm volatile("ld %0, 0(%1)" : "=r"(val) : "r"(addr));
return val;
}
#endif
/* FIXME: These are now the same as asm-generic */
/* clang-format off */
#define __io_rbr() do {} while (0)
#define __io_rar() do {} while (0)
#define __io_rbw() do {} while (0)
#define __io_raw() do {} while (0)
#define readb_relaxed(c) ({ rt_uint8_t __v; __io_rbr(); __v = __raw_readb(c); __io_rar(); __v; })
#define readw_relaxed(c) ({ rt_uint16_t __v; __io_rbr(); __v = __raw_readw(c); __io_rar(); __v; })
#define readl_relaxed(c) ({ rt_uint32_t __v; __io_rbr(); __v = __raw_readl(c); __io_rar(); __v; })
#define writeb_relaxed(v,c) ({ __io_rbw(); __raw_writeb((v),(c)); __io_raw(); })
#define writew_relaxed(v,c) ({ __io_rbw(); __raw_writew((v),(c)); __io_raw(); })
#define writel_relaxed(v,c) ({ __io_rbw(); __raw_writel((v),(c)); __io_raw(); })
#if __riscv_xlen != 32
#define readq_relaxed(c) ({ rt_uint64_t __v; __io_rbr(); __v = __raw_readq(c); __io_rar(); __v; })
#define writeq_relaxed(v,c) ({ __io_rbw(); __raw_writeq((v),(c)); __io_raw(); })
#endif
#define __io_br() do {} while (0)
#define __io_ar() __asm__ __volatile__ ("fence i,r" : : : "memory");
#define __io_bw() __asm__ __volatile__ ("fence w,o" : : : "memory");
#define __io_aw() do {} while (0)
#define readb(c) ({ rt_uint8_t __v; __io_br(); __v = __raw_readb(c); __io_ar(); __v; })
#define readw(c) ({ rt_uint16_t __v; __io_br(); __v = __raw_readw(c); __io_ar(); __v; })
#define readl(c) ({ rt_uint32_t __v; __io_br(); __v = __raw_readl(c); __io_ar(); __v; })
#define writeb(v,c) ({ __io_bw(); __raw_writeb((v),(c)); __io_aw(); })
#define writew(v,c) ({ __io_bw(); __raw_writew((v),(c)); __io_aw(); })
#define writel(v,c) ({ __io_bw(); __raw_writel((v),(c)); __io_aw(); })
#if __riscv_xlen != 32
#define readq(c) ({ rt_uint64_t __v; __io_br(); __v = __raw_readq(c); __io_ar(); __v; })
#define writeq(v,c) ({ __io_bw(); __raw_writeq((v),(c)); __io_aw(); })
#endif
#endif

View File

@ -1,29 +0,0 @@
/*
* Copyright (c) 2006-2021, RT-Thread Development Team
*
* SPDX-License-Identifier: Apache-2.0
*
* Change Logs:
* Date Author Notes
* 2021-01-30 lizhirui first version
*/
#include <rthw.h>
#include <rtthread.h>
#include <stdint.h>
#include <riscv.h>
#include <string.h>
#include <stdlib.h>
#include "riscv_mmu.h"
void mmu_enable_user_page_access(void)
{
set_csr(sstatus, SSTATUS_SUM);
}
void mmu_disable_user_page_access(void)
{
clear_csr(sstatus, SSTATUS_SUM);
}

View File

@ -1,264 +0,0 @@
/*
* Copyright (c) 2006-2021, RT-Thread Development Team
*
* SPDX-License-Identifier: Apache-2.0
*
* Change Logs:
* Date Author Notes
* 2021-05-18 Bernard port from FreeBSD
*/
/*-
* SPDX-License-Identifier: BSD-2-Clause-FreeBSD
*
* Copyright (c) 2019 Mitchell Horne <mhorne@FreeBSD.org>
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*/
#include "sbi.h"
#include <rtthread.h>
#include <stdbool.h>
/* SBI Implementation-Specific Definitions */
#define OPENSBI_VERSION_MAJOR_OFFSET 16
#define OPENSBI_VERSION_MINOR_MASK 0xFFFF
unsigned long sbi_spec_version;
unsigned long sbi_impl_id;
unsigned long sbi_impl_version;
static bool has_time_extension = false;
static bool has_ipi_extension = false;
static bool has_rfnc_extension = false;
static struct sbi_ret sbi_get_spec_version(void)
{
return (SBI_CALL0(SBI_EXT_ID_BASE, SBI_BASE_GET_SPEC_VERSION));
}
static struct sbi_ret sbi_get_impl_id(void)
{
return (SBI_CALL0(SBI_EXT_ID_BASE, SBI_BASE_GET_IMPL_ID));
}
static struct sbi_ret sbi_get_impl_version(void)
{
return (SBI_CALL0(SBI_EXT_ID_BASE, SBI_BASE_GET_IMPL_VERSION));
}
void sbi_print_version(void)
{
uint32_t major;
uint32_t minor;
/* For legacy SBI implementations. */
if (sbi_spec_version == 0)
{
rt_kprintf("SBI: Unknown (Legacy) Implementation\n");
rt_kprintf("SBI Specification Version: 0.1\n");
return;
}
switch (sbi_impl_id)
{
case (SBI_IMPL_ID_BBL):
rt_kprintf("SBI: Berkely Boot Loader %lu\n", sbi_impl_version);
break;
case (SBI_IMPL_ID_XVISOR):
rt_kprintf("SBI: eXtensible Versatile hypervISOR %lu\n",
sbi_impl_version);
break;
case (SBI_IMPL_ID_KVM):
rt_kprintf("SBI: Kernel-based Virtual Machine %lu\n", sbi_impl_version);
break;
case (SBI_IMPL_ID_RUSTSBI):
rt_kprintf("SBI: RustSBI %lu\n", sbi_impl_version);
break;
case (SBI_IMPL_ID_DIOSIX):
rt_kprintf("SBI: Diosix %lu\n", sbi_impl_version);
break;
case (SBI_IMPL_ID_OPENSBI):
major = sbi_impl_version >> OPENSBI_VERSION_MAJOR_OFFSET;
minor = sbi_impl_version & OPENSBI_VERSION_MINOR_MASK;
rt_kprintf("SBI: OpenSBI v%u.%u\n", major, minor);
break;
default:
rt_kprintf("SBI: Unrecognized Implementation: %lu\n", sbi_impl_id);
break;
}
major = (sbi_spec_version & SBI_SPEC_VERS_MAJOR_MASK) >>
SBI_SPEC_VERS_MAJOR_OFFSET;
minor = (sbi_spec_version & SBI_SPEC_VERS_MINOR_MASK);
rt_kprintf("SBI Specification Version: %u.%u\n", major, minor);
}
void sbi_set_timer(uint64_t val)
{
struct sbi_ret ret;
/* Use the TIME legacy replacement extension, if available. */
if (has_time_extension)
{
ret = SBI_CALL1(SBI_EXT_ID_TIME, SBI_TIME_SET_TIMER, val);
RT_ASSERT(ret.error == SBI_SUCCESS);
}
else
{
(void)SBI_CALL1(SBI_SET_TIMER, 0, val);
}
}
void sbi_send_ipi(const unsigned long *hart_mask)
{
struct sbi_ret ret;
/* Use the IPI legacy replacement extension, if available. */
if (has_ipi_extension)
{
ret = SBI_CALL2(SBI_EXT_ID_IPI, SBI_IPI_SEND_IPI, *hart_mask, 0);
RT_ASSERT(ret.error == SBI_SUCCESS);
}
else
{
(void)SBI_CALL1(SBI_SEND_IPI, 0, (uint64_t)hart_mask);
}
}
void sbi_remote_fence_i(const unsigned long *hart_mask)
{
struct sbi_ret ret;
/* Use the RFENCE legacy replacement extension, if available. */
if (has_rfnc_extension)
{
ret =
SBI_CALL2(SBI_EXT_ID_RFNC, SBI_RFNC_REMOTE_FENCE_I, *hart_mask, 0);
RT_ASSERT(ret.error == SBI_SUCCESS);
}
else
{
(void)SBI_CALL1(SBI_REMOTE_FENCE_I, 0, (uint64_t)hart_mask);
}
}
int sbi_remote_sfence_vma(const unsigned long *hart_mask,
const unsigned long hart_mask_base,
unsigned long start, unsigned long size)
{
struct sbi_ret ret = {.error = SBI_SUCCESS};
/* Use the RFENCE legacy replacement extension, if available. */
if (has_rfnc_extension)
{
ret = SBI_CALL4(SBI_EXT_ID_RFNC, SBI_RFNC_REMOTE_SFENCE_VMA, *hart_mask,
hart_mask_base, start, size);
}
else
{
(void)SBI_CALL3(SBI_REMOTE_SFENCE_VMA, 0, (uint64_t)hart_mask, start,
size);
}
return ret.error;
}
void sbi_remote_sfence_vma_asid(const unsigned long *hart_mask,
unsigned long start, unsigned long size,
unsigned long asid)
{
struct sbi_ret ret;
/* Use the RFENCE legacy replacement extension, if available. */
if (has_rfnc_extension)
{
ret = SBI_CALL5(SBI_EXT_ID_RFNC, SBI_RFNC_REMOTE_SFENCE_VMA_ASID,
*hart_mask, 0, start, size, asid);
RT_ASSERT(ret.error == SBI_SUCCESS);
}
else
{
(void)SBI_CALL4(SBI_REMOTE_SFENCE_VMA_ASID, 0, (uint64_t)hart_mask,
start, size, asid);
}
}
int sbi_hsm_hart_start(unsigned long hart, unsigned long start_addr,
unsigned long priv)
{
struct sbi_ret ret;
ret = SBI_CALL3(SBI_EXT_ID_HSM, SBI_HSM_HART_START, hart, start_addr, priv);
return (ret.error != 0 ? (int)ret.error : 0);
}
void sbi_hsm_hart_stop(void)
{
(void)SBI_CALL0(SBI_EXT_ID_HSM, SBI_HSM_HART_STOP);
}
int sbi_hsm_hart_status(unsigned long hart)
{
struct sbi_ret ret;
ret = SBI_CALL1(SBI_EXT_ID_HSM, SBI_HSM_HART_STATUS, hart);
return (ret.error != 0 ? (int)ret.error : (int)ret.value);
}
void sbi_init(void)
{
struct sbi_ret sret;
/*
* Get the spec version. For legacy SBI implementations this will
* return an error, otherwise it is guaranteed to succeed.
*/
sret = sbi_get_spec_version();
if (sret.error != 0)
{
/* We are running a legacy SBI implementation. */
sbi_spec_version = 0;
return;
}
/* Set the SBI implementation info. */
sbi_spec_version = sret.value;
sbi_impl_id = sbi_get_impl_id().value;
sbi_impl_version = sbi_get_impl_version().value;
/* Probe for legacy replacement extensions. */
if (sbi_probe_extension(SBI_EXT_ID_TIME) != 0)
has_time_extension = true;
if (sbi_probe_extension(SBI_EXT_ID_IPI) != 0)
has_ipi_extension = true;
if (sbi_probe_extension(SBI_EXT_ID_RFNC) != 0)
has_rfnc_extension = true;
}
void rt_hw_console_output(const char *str)
{
while (*str)
{
sbi_console_putchar(*str++);
}
}

View File

@ -1,244 +0,0 @@
/*
* Copyright (c) 2006-2021, RT-Thread Development Team
*
* SPDX-License-Identifier: Apache-2.0
*
* Change Logs:
* Date Author Notes
* 2021-05-18 Bernard port from FreeBSD
*/
/*-
* Copyright (c) 2016-2017 Ruslan Bukin <br@bsdpad.com>
* All rights reserved.
* Copyright (c) 2019 Mitchell Horne <mhorne@FreeBSD.org>
*
* Portions of this software were developed by SRI International and the
* University of Cambridge Computer Laboratory under DARPA/AFRL contract
* FA8750-10-C-0237 ("CTSRD"), as part of the DARPA CRASH research programme.
*
* Portions of this software were developed by the University of Cambridge
* Computer Laboratory as part of the CTSRD Project, with support from the
* UK Higher Education Innovation Fund (HEIF).
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*
* $FreeBSD$
*/
#ifndef _MACHINE_SBI_H_
#define _MACHINE_SBI_H_
#include <stdint.h>
#include <rtdef.h>
/* SBI Specification Version */
#define SBI_SPEC_VERS_MAJOR_OFFSET 24
#define SBI_SPEC_VERS_MAJOR_MASK (0x7F << SBI_SPEC_VERS_MAJOR_OFFSET)
#define SBI_SPEC_VERS_MINOR_OFFSET 0
#define SBI_SPEC_VERS_MINOR_MASK (0xFFFFFF << SBI_SPEC_VERS_MINOR_OFFSET)
/* SBI Implementation IDs */
#define SBI_IMPL_ID_BBL 0
#define SBI_IMPL_ID_OPENSBI 1
#define SBI_IMPL_ID_XVISOR 2
#define SBI_IMPL_ID_KVM 3
#define SBI_IMPL_ID_RUSTSBI 4
#define SBI_IMPL_ID_DIOSIX 5
/* SBI Error Codes */
#define SBI_SUCCESS 0
#define SBI_ERR_FAILURE -1
#define SBI_ERR_NOT_SUPPORTED -2
#define SBI_ERR_INVALID_PARAM -3
#define SBI_ERR_DENIED -4
#define SBI_ERR_INVALID_ADDRESS -5
#define SBI_ERR_ALREADY_AVAILABLE -6
/* SBI Base Extension */
#define SBI_EXT_ID_BASE 0x10
#define SBI_BASE_GET_SPEC_VERSION 0
#define SBI_BASE_GET_IMPL_ID 1
#define SBI_BASE_GET_IMPL_VERSION 2
#define SBI_BASE_PROBE_EXTENSION 3
#define SBI_BASE_GET_MVENDORID 4
#define SBI_BASE_GET_MARCHID 5
#define SBI_BASE_GET_MIMPID 6
/* Timer (TIME) Extension */
#define SBI_EXT_ID_TIME 0x54494D45
#define SBI_TIME_SET_TIMER 0
/* IPI (IPI) Extension */
#define SBI_EXT_ID_IPI 0x735049
#define SBI_IPI_SEND_IPI 0
/* RFENCE (RFNC) Extension */
#define SBI_EXT_ID_RFNC 0x52464E43
#define SBI_RFNC_REMOTE_FENCE_I 0
#define SBI_RFNC_REMOTE_SFENCE_VMA 1
#define SBI_RFNC_REMOTE_SFENCE_VMA_ASID 2
#define SBI_RFNC_REMOTE_HFENCE_GVMA_VMID 3
#define SBI_RFNC_REMOTE_HFENCE_GVMA 4
#define SBI_RFNC_REMOTE_HFENCE_VVMA_ASID 5
#define SBI_RFNC_REMOTE_HFENCE_VVMA 6
/* Hart State Management (HSM) Extension */
#define SBI_EXT_ID_HSM 0x48534D
#define SBI_HSM_HART_START 0
#define SBI_HSM_HART_STOP 1
#define SBI_HSM_HART_STATUS 2
#define SBI_HSM_STATUS_STARTED 0
#define SBI_HSM_STATUS_STOPPED 1
#define SBI_HSM_STATUS_START_PENDING 2
#define SBI_HSM_STATUS_STOP_PENDING 3
/* Legacy Extensions */
#define SBI_SET_TIMER 0
#define SBI_CONSOLE_PUTCHAR 1
#define SBI_CONSOLE_GETCHAR 2
#define SBI_CLEAR_IPI 3
#define SBI_SEND_IPI 4
#define SBI_REMOTE_FENCE_I 5
#define SBI_REMOTE_SFENCE_VMA 6
#define SBI_REMOTE_SFENCE_VMA_ASID 7
#define SBI_SHUTDOWN 8
#define SBI_CALL0(e, f) SBI_CALL5(e, f, 0, 0, 0, 0, 0)
#define SBI_CALL1(e, f, p1) SBI_CALL5(e, f, p1, 0, 0, 0, 0)
#define SBI_CALL2(e, f, p1, p2) SBI_CALL5(e, f, p1, p2, 0, 0, 0)
#define SBI_CALL3(e, f, p1, p2, p3) SBI_CALL5(e, f, p1, p2, p3, 0, 0)
#define SBI_CALL4(e, f, p1, p2, p3, p4) SBI_CALL5(e, f, p1, p2, p3, p4, 0)
#define SBI_CALL5(e, f, p1, p2, p3, p4, p5) sbi_call(e, f, p1, p2, p3, p4, p5)
/*
* Documentation available at
* https://github.com/riscv/riscv-sbi-doc/blob/master/riscv-sbi.adoc
*/
struct sbi_ret
{
long error;
long value;
};
rt_inline struct sbi_ret
sbi_call(uint64_t arg7, uint64_t arg6, uint64_t arg0, uint64_t arg1,
uint64_t arg2, uint64_t arg3, uint64_t arg4)
{
struct sbi_ret ret;
register uintptr_t a0 __asm("a0") = (uintptr_t)(arg0);
register uintptr_t a1 __asm("a1") = (uintptr_t)(arg1);
register uintptr_t a2 __asm("a2") = (uintptr_t)(arg2);
register uintptr_t a3 __asm("a3") = (uintptr_t)(arg3);
register uintptr_t a4 __asm("a4") = (uintptr_t)(arg4);
register uintptr_t a6 __asm("a6") = (uintptr_t)(arg6);
register uintptr_t a7 __asm("a7") = (uintptr_t)(arg7);
__asm __volatile(\
"ecall" \
: "+r"(a0), "+r"(a1) \
: "r"(a2), "r"(a3), "r"(a4), "r"(a6), "r"(a7) \
: "memory");
ret.error = a0;
ret.value = a1;
return (ret);
}
/* Base extension functions and variables. */
extern unsigned long sbi_spec_version;
extern unsigned long sbi_impl_id;
extern unsigned long sbi_impl_version;
static __inline long
sbi_probe_extension(long id)
{
return (SBI_CALL1(SBI_EXT_ID_BASE, SBI_BASE_PROBE_EXTENSION, id).value);
}
/* TIME extension functions. */
void sbi_set_timer(uint64_t val);
/* IPI extension functions. */
void sbi_send_ipi(const unsigned long *hart_mask);
/* RFENCE extension functions. */
void sbi_remote_fence_i(const unsigned long *hart_mask);
int sbi_remote_sfence_vma(const unsigned long *hart_mask,
const unsigned long hart_mask_base,
unsigned long start, unsigned long size);
void sbi_remote_sfence_vma_asid(const unsigned long *hart_mask, unsigned long start,
unsigned long size, unsigned long asid);
/* Hart State Management extension functions. */
/*
* Start execution on the specified hart at physical address start_addr. The
* register a0 will contain the hart's ID, and a1 will contain the value of
* priv.
*/
int sbi_hsm_hart_start(unsigned long hart, unsigned long start_addr, unsigned long priv);
/*
* Stop execution on the current hart. Interrupts should be disabled, or this
* function may return.
*/
void sbi_hsm_hart_stop(void);
/*
* Get the execution status of the specified hart. The status will be one of:
* - SBI_HSM_STATUS_STARTED
* - SBI_HSM_STATUS_STOPPED
* - SBI_HSM_STATUS_START_PENDING
* - SBI_HSM_STATUS_STOP_PENDING
*/
int sbi_hsm_hart_status(unsigned long hart);
/* Legacy extension functions. */
static __inline void
sbi_console_putchar(int ch)
{
(void)SBI_CALL1(SBI_CONSOLE_PUTCHAR, 0, ch);
}
static __inline int
sbi_console_getchar(void)
{
/*
* XXX: The "error" is returned here because legacy SBI functions
* continue to return their value in a0.
*/
return (SBI_CALL0(SBI_CONSOLE_GETCHAR, 0).error);
}
static __inline void
sbi_shutdown(void)
{
(void)SBI_CALL0(SBI_SHUTDOWN, 0);
}
void sbi_print_version(void);
void sbi_init(void);
#endif /* !_MACHINE_SBI_H_ */

View File

@ -1,70 +0,0 @@
/*
* Copyright (c) 2006-2021, RT-Thread Development Team
*
* SPDX-License-Identifier: Apache-2.0
*
* Change Logs:
* Date Author Notes
* 2021-01-30 lizhirui first version
* 2021-11-18 JasonHu add fpu member
* 2022-10-22 Shell Support kernel mode RVV
*/
#ifndef __STACK_H__
#define __STACK_H__
#include "stackframe.h"
#include <rtthread.h>
typedef struct rt_hw_switch_frame
{
uint64_t regs[RT_HW_SWITCH_CONTEXT_SIZE];
} *rt_hw_switch_frame_t;
struct rt_hw_stack_frame
{
rt_ubase_t epc; /* epc - epc - program counter */
rt_ubase_t ra; /* x1 - ra - return address for jumps */
rt_ubase_t sstatus; /* - supervisor status register */
rt_ubase_t gp; /* x3 - gp - global pointer */
rt_ubase_t tp; /* x4 - tp - thread pointer */
rt_ubase_t t0; /* x5 - t0 - temporary register 0 */
rt_ubase_t t1; /* x6 - t1 - temporary register 1 */
rt_ubase_t t2; /* x7 - t2 - temporary register 2 */
rt_ubase_t s0_fp; /* x8 - s0/fp - saved register 0 or frame pointer */
rt_ubase_t s1; /* x9 - s1 - saved register 1 */
rt_ubase_t a0; /* x10 - a0 - return value or function argument 0 */
rt_ubase_t a1; /* x11 - a1 - return value or function argument 1 */
rt_ubase_t a2; /* x12 - a2 - function argument 2 */
rt_ubase_t a3; /* x13 - a3 - function argument 3 */
rt_ubase_t a4; /* x14 - a4 - function argument 4 */
rt_ubase_t a5; /* x15 - a5 - function argument 5 */
rt_ubase_t a6; /* x16 - a6 - function argument 6 */
rt_ubase_t a7; /* x17 - s7 - function argument 7 */
rt_ubase_t s2; /* x18 - s2 - saved register 2 */
rt_ubase_t s3; /* x19 - s3 - saved register 3 */
rt_ubase_t s4; /* x20 - s4 - saved register 4 */
rt_ubase_t s5; /* x21 - s5 - saved register 5 */
rt_ubase_t s6; /* x22 - s6 - saved register 6 */
rt_ubase_t s7; /* x23 - s7 - saved register 7 */
rt_ubase_t s8; /* x24 - s8 - saved register 8 */
rt_ubase_t s9; /* x25 - s9 - saved register 9 */
rt_ubase_t s10; /* x26 - s10 - saved register 10 */
rt_ubase_t s11; /* x27 - s11 - saved register 11 */
rt_ubase_t t3; /* x28 - t3 - temporary register 3 */
rt_ubase_t t4; /* x29 - t4 - temporary register 4 */
rt_ubase_t t5; /* x30 - t5 - temporary register 5 */
rt_ubase_t t6; /* x31 - t6 - temporary register 6 */
rt_ubase_t user_sp_exc_stack; /* sscratch - user mode sp/exception stack */
rt_ubase_t __padding; /* align to 16bytes */
#ifdef ARCH_RISCV_FPU
rt_ubase_t f[CTX_FPU_REG_NR]; /* f0~f31 */
#endif /* ARCH_RISCV_FPU */
#ifdef ARCH_RISCV_VECTOR
rt_ubase_t v[CTX_VECTOR_REG_NR];
#endif /* ARCH_RISCV_VECTOR */
};
#endif

View File

@ -1,312 +0,0 @@
/*
* Copyright (c) 2006-2024, RT-Thread Development Team
*
* SPDX-License-Identifier: Apache-2.0
*
* Change Logs:
* Date Author Notes
* 2021-02-02 lizhirui first version
* 2021-02-11 lizhirui fixed gp save/store bug
* 2021-11-18 JasonHu add fpu registers save/restore
* 2022-10-22 Shell Support kernel mode RVV
*/
#ifndef __STACKFRAME_H__
#define __STACKFRAME_H__
#include <rtconfig.h>
#include "encoding.h"
#include "ext_context.h"
/* bytes of register width */
#ifdef ARCH_CPU_64BIT
#define STORE sd
#define LOAD ld
#define FSTORE fsd
#define FLOAD fld
#define REGBYTES 8
#else
// error here, not portable
#error "Not supported XLEN"
#endif
/* 33 general register + 1 padding */
#define CTX_GENERAL_REG_NR 34
/* all context registers */
#define CTX_REG_NR (CTX_GENERAL_REG_NR + CTX_FPU_REG_NR + CTX_VECTOR_REG_NR)
#define BYTES(idx) ((idx) * REGBYTES)
#define FRAME_OFF_SSTATUS BYTES(2)
#define FRAME_OFF_SP BYTES(32)
#define FRAME_OFF_GP BYTES(3)
/* switch frame */
#define RT_HW_SWITCH_CONTEXT_SSTATUS 0
#define RT_HW_SWITCH_CONTEXT_S11 1
#define RT_HW_SWITCH_CONTEXT_S10 2
#define RT_HW_SWITCH_CONTEXT_S9 3
#define RT_HW_SWITCH_CONTEXT_S8 4
#define RT_HW_SWITCH_CONTEXT_S7 5
#define RT_HW_SWITCH_CONTEXT_S6 6
#define RT_HW_SWITCH_CONTEXT_S5 7
#define RT_HW_SWITCH_CONTEXT_S4 8
#define RT_HW_SWITCH_CONTEXT_S3 9
#define RT_HW_SWITCH_CONTEXT_S2 10
#define RT_HW_SWITCH_CONTEXT_S1 11
#define RT_HW_SWITCH_CONTEXT_S0 12
#define RT_HW_SWITCH_CONTEXT_RA 13
#define RT_HW_SWITCH_CONTEXT_TP 14
#define RT_HW_SWITCH_CONTEXT_ALIGNMENT 15 // Padding for alignment
#define RT_HW_SWITCH_CONTEXT_SIZE 16 // Total size of the structure
#ifdef __ASSEMBLY__
.macro SAVE_ALL
#ifdef ARCH_RISCV_FPU
/* reserve float registers */
addi sp, sp, -CTX_FPU_REG_NR * REGBYTES
#endif /* ARCH_RISCV_FPU */
#ifdef ARCH_RISCV_VECTOR
/* reserve float registers */
addi sp, sp, -CTX_VECTOR_REG_NR * REGBYTES
#endif /* ARCH_RISCV_VECTOR */
/* save general registers */
addi sp, sp, -CTX_GENERAL_REG_NR * REGBYTES
STORE x1, 1 * REGBYTES(sp)
csrr x1, sstatus
STORE x1, FRAME_OFF_SSTATUS(sp)
csrr x1, sepc
STORE x1, 0 * REGBYTES(sp)
STORE x3, 3 * REGBYTES(sp)
STORE x4, 4 * REGBYTES(sp) /* save tp */
STORE x5, 5 * REGBYTES(sp)
STORE x6, 6 * REGBYTES(sp)
STORE x7, 7 * REGBYTES(sp)
STORE x8, 8 * REGBYTES(sp)
STORE x9, 9 * REGBYTES(sp)
STORE x10, 10 * REGBYTES(sp)
STORE x11, 11 * REGBYTES(sp)
STORE x12, 12 * REGBYTES(sp)
STORE x13, 13 * REGBYTES(sp)
STORE x14, 14 * REGBYTES(sp)
STORE x15, 15 * REGBYTES(sp)
STORE x16, 16 * REGBYTES(sp)
STORE x17, 17 * REGBYTES(sp)
STORE x18, 18 * REGBYTES(sp)
STORE x19, 19 * REGBYTES(sp)
STORE x20, 20 * REGBYTES(sp)
STORE x21, 21 * REGBYTES(sp)
STORE x22, 22 * REGBYTES(sp)
STORE x23, 23 * REGBYTES(sp)
STORE x24, 24 * REGBYTES(sp)
STORE x25, 25 * REGBYTES(sp)
STORE x26, 26 * REGBYTES(sp)
STORE x27, 27 * REGBYTES(sp)
STORE x28, 28 * REGBYTES(sp)
STORE x29, 29 * REGBYTES(sp)
STORE x30, 30 * REGBYTES(sp)
STORE x31, 31 * REGBYTES(sp)
csrr t0, sscratch
STORE t0, 32 * REGBYTES(sp)
#ifdef ARCH_RISCV_FPU
/* backup sp and adjust sp to save float registers */
mv t1, sp
addi t1, t1, CTX_GENERAL_REG_NR * REGBYTES
li t0, SSTATUS_FS
csrs sstatus, t0
FSTORE f0, FPU_CTX_F0_OFF(t1)
FSTORE f1, FPU_CTX_F1_OFF(t1)
FSTORE f2, FPU_CTX_F2_OFF(t1)
FSTORE f3, FPU_CTX_F3_OFF(t1)
FSTORE f4, FPU_CTX_F4_OFF(t1)
FSTORE f5, FPU_CTX_F5_OFF(t1)
FSTORE f6, FPU_CTX_F6_OFF(t1)
FSTORE f7, FPU_CTX_F7_OFF(t1)
FSTORE f8, FPU_CTX_F8_OFF(t1)
FSTORE f9, FPU_CTX_F9_OFF(t1)
FSTORE f10, FPU_CTX_F10_OFF(t1)
FSTORE f11, FPU_CTX_F11_OFF(t1)
FSTORE f12, FPU_CTX_F12_OFF(t1)
FSTORE f13, FPU_CTX_F13_OFF(t1)
FSTORE f14, FPU_CTX_F14_OFF(t1)
FSTORE f15, FPU_CTX_F15_OFF(t1)
FSTORE f16, FPU_CTX_F16_OFF(t1)
FSTORE f17, FPU_CTX_F17_OFF(t1)
FSTORE f18, FPU_CTX_F18_OFF(t1)
FSTORE f19, FPU_CTX_F19_OFF(t1)
FSTORE f20, FPU_CTX_F20_OFF(t1)
FSTORE f21, FPU_CTX_F21_OFF(t1)
FSTORE f22, FPU_CTX_F22_OFF(t1)
FSTORE f23, FPU_CTX_F23_OFF(t1)
FSTORE f24, FPU_CTX_F24_OFF(t1)
FSTORE f25, FPU_CTX_F25_OFF(t1)
FSTORE f26, FPU_CTX_F26_OFF(t1)
FSTORE f27, FPU_CTX_F27_OFF(t1)
FSTORE f28, FPU_CTX_F28_OFF(t1)
FSTORE f29, FPU_CTX_F29_OFF(t1)
FSTORE f30, FPU_CTX_F30_OFF(t1)
FSTORE f31, FPU_CTX_F31_OFF(t1)
/* clr FS domain */
csrc sstatus, t0
/* clean status would clr sr_sd; */
li t0, SSTATUS_FS_CLEAN
csrs sstatus, t0
#endif /* ARCH_RISCV_FPU */
#ifdef ARCH_RISCV_VECTOR
csrr t0, sstatus
andi t0, t0, SSTATUS_VS
beqz t0, 0f
/* push vector frame */
addi t1, sp, (CTX_GENERAL_REG_NR + CTX_FPU_REG_NR) * REGBYTES
SAVE_VECTOR t1
0:
#endif /* ARCH_RISCV_VECTOR */
.endm
/**
* @brief Restore All General Registers, for interrupt handling
*
*/
.macro RESTORE_ALL
#ifdef ARCH_RISCV_VECTOR
// skip on close
ld t0, 2 * REGBYTES(sp)
// cannot use vector on initial
andi t0, t0, SSTATUS_VS_CLEAN
beqz t0, 0f
/* push vector frame */
addi t1, sp, (CTX_GENERAL_REG_NR + CTX_FPU_REG_NR) * REGBYTES
RESTORE_VECTOR t1
0:
#endif /* ARCH_RISCV_VECTOR */
#ifdef ARCH_RISCV_FPU
/* restore float register */
addi t2, sp, CTX_GENERAL_REG_NR * REGBYTES
li t0, SSTATUS_FS
csrs sstatus, t0
FLOAD f0, FPU_CTX_F0_OFF(t2)
FLOAD f1, FPU_CTX_F1_OFF(t2)
FLOAD f2, FPU_CTX_F2_OFF(t2)
FLOAD f3, FPU_CTX_F3_OFF(t2)
FLOAD f4, FPU_CTX_F4_OFF(t2)
FLOAD f5, FPU_CTX_F5_OFF(t2)
FLOAD f6, FPU_CTX_F6_OFF(t2)
FLOAD f7, FPU_CTX_F7_OFF(t2)
FLOAD f8, FPU_CTX_F8_OFF(t2)
FLOAD f9, FPU_CTX_F9_OFF(t2)
FLOAD f10, FPU_CTX_F10_OFF(t2)
FLOAD f11, FPU_CTX_F11_OFF(t2)
FLOAD f12, FPU_CTX_F12_OFF(t2)
FLOAD f13, FPU_CTX_F13_OFF(t2)
FLOAD f14, FPU_CTX_F14_OFF(t2)
FLOAD f15, FPU_CTX_F15_OFF(t2)
FLOAD f16, FPU_CTX_F16_OFF(t2)
FLOAD f17, FPU_CTX_F17_OFF(t2)
FLOAD f18, FPU_CTX_F18_OFF(t2)
FLOAD f19, FPU_CTX_F19_OFF(t2)
FLOAD f20, FPU_CTX_F20_OFF(t2)
FLOAD f21, FPU_CTX_F21_OFF(t2)
FLOAD f22, FPU_CTX_F22_OFF(t2)
FLOAD f23, FPU_CTX_F23_OFF(t2)
FLOAD f24, FPU_CTX_F24_OFF(t2)
FLOAD f25, FPU_CTX_F25_OFF(t2)
FLOAD f26, FPU_CTX_F26_OFF(t2)
FLOAD f27, FPU_CTX_F27_OFF(t2)
FLOAD f28, FPU_CTX_F28_OFF(t2)
FLOAD f29, FPU_CTX_F29_OFF(t2)
FLOAD f30, FPU_CTX_F30_OFF(t2)
FLOAD f31, FPU_CTX_F31_OFF(t2)
/* clr FS domain */
csrc sstatus, t0
/* clean status would clr sr_sd; */
li t0, SSTATUS_FS_CLEAN
csrs sstatus, t0
#endif /* ARCH_RISCV_FPU */
/* restore general register */
addi t0, sp, CTX_REG_NR * REGBYTES
csrw sscratch, t0
/* resw ra to sepc */
LOAD x1, 0 * REGBYTES(sp)
csrw sepc, x1
LOAD x1, 2 * REGBYTES(sp)
csrw sstatus, x1
LOAD x1, 1 * REGBYTES(sp)
LOAD x3, 3 * REGBYTES(sp)
LOAD x4, 4 * REGBYTES(sp) /* restore tp */
LOAD x5, 5 * REGBYTES(sp)
LOAD x6, 6 * REGBYTES(sp)
LOAD x7, 7 * REGBYTES(sp)
LOAD x8, 8 * REGBYTES(sp)
LOAD x9, 9 * REGBYTES(sp)
LOAD x10, 10 * REGBYTES(sp)
LOAD x11, 11 * REGBYTES(sp)
LOAD x12, 12 * REGBYTES(sp)
LOAD x13, 13 * REGBYTES(sp)
LOAD x14, 14 * REGBYTES(sp)
LOAD x15, 15 * REGBYTES(sp)
LOAD x16, 16 * REGBYTES(sp)
LOAD x17, 17 * REGBYTES(sp)
LOAD x18, 18 * REGBYTES(sp)
LOAD x19, 19 * REGBYTES(sp)
LOAD x20, 20 * REGBYTES(sp)
LOAD x21, 21 * REGBYTES(sp)
LOAD x22, 22 * REGBYTES(sp)
LOAD x23, 23 * REGBYTES(sp)
LOAD x24, 24 * REGBYTES(sp)
LOAD x25, 25 * REGBYTES(sp)
LOAD x26, 26 * REGBYTES(sp)
LOAD x27, 27 * REGBYTES(sp)
LOAD x28, 28 * REGBYTES(sp)
LOAD x29, 29 * REGBYTES(sp)
LOAD x30, 30 * REGBYTES(sp)
LOAD x31, 31 * REGBYTES(sp)
/* restore user sp */
LOAD sp, 32 * REGBYTES(sp)
.endm
.macro RESTORE_SYS_GP
.option push
.option norelax
la gp, __global_pointer$
.option pop
.endm
.macro OPEN_INTERRUPT
csrsi sstatus, 2
.endm
.macro CLOSE_INTERRUPT
csrci sstatus, 2
.endm
#endif /* __ASSEMBLY__ */
#endif /* __STACKFRAME_H__ */

View File

@ -1,133 +0,0 @@
/*
* Copyright (c) 2006-2018, RT-Thread Development Team
*
* SPDX-License-Identifier: Apache-2.0
*
* Change Logs:
* Date Author Notes
* 2018/10/01 Bernard The first version
* 2018/12/27 Jesven Add SMP support
* 2020/6/12 Xim Port to QEMU and remove SMP support
* 2024-06-30 Shell Support of kernel remapping
*/
#include <encoding.h>
#include <cpuport.h>
.data
.global boot_hartid /* global varible rt_boot_hartid in .data section */
boot_hartid:
.word 0xdeadbeef
.global _start
.section ".start", "ax"
_start:
j 1f
.word 0xdeadbeef
.align 3
.global g_wake_up
g_wake_up:
.dword 1
.dword 0
1:
/* save hartid */
la t0, boot_hartid /* global varible rt_boot_hartid */
mv t1, a0 /* get hartid in S-mode frome a0 register */
sw t1, (t0) /* store t1 register low 4 bits in memory address which is stored in t0 */
/* clear Interrupt Registers */
csrw sie, 0
csrw sip, 0
/* set Trap Vector Base Address Register */
la t0, trap_entry
csrw stvec, t0
li x1, 0
li x2, 0
li x3, 0
li x4, 0
li x5, 0
li x6, 0
li x7, 0
li x8, 0
li x9, 0
li x10,0
li x11,0
li x12,0
li x13,0
li x14,0
li x15,0
li x16,0
li x17,0
li x18,0
li x19,0
li x20,0
li x21,0
li x22,0
li x23,0
li x24,0
li x25,0
li x26,0
li x27,0
li x28,0
li x29,0
li x30,0
li x31,0
/* set to disable FPU */
li t0, SSTATUS_FS + SSTATUS_VS
csrc sstatus, t0
li t0, SSTATUS_SUM
csrs sstatus, t0
.option push
.option norelax
la gp, __global_pointer$
.option pop
/* removed SMP support here */
la sp, __stack_start__
li t0, __STACKSIZE__
add sp, sp, t0
/**
* sscratch is always zero on kernel mode
*/
csrw sscratch, zero
call init_bss
#ifdef ARCH_MM_MMU
call rt_hw_mem_setup_early
call rt_kmem_pvoff
/* a0 := pvoff */
beq a0, zero, 1f
/* relocate pc */
la x1, _after_pc_relocation
sub x1, x1, a0
ret
_after_pc_relocation:
/* relocate gp */
sub gp, gp, a0
/* relocate context: sp */
la sp, __stack_start__
li t0, __STACKSIZE__
add sp, sp, t0
/* reset s0-fp */
mv s0, zero
/* relocate stvec */
la t0, trap_entry
csrw stvec, t0
1:
#endif
call sbi_init
call primary_cpu_entry
_never_return_here:
j .
.global _start_link_addr
_start_link_addr:
.dword __text_start

View File

@ -1,62 +0,0 @@
/*
* Copyright (c) 2006-2021, RT-Thread Development Team
*
* SPDX-License-Identifier: Apache-2.0
*
* Change Logs:
* Date Author Notes
* 2021-02-03 lizhirui first version
* 2022-11-10 WangXiaoyao Add readable syscall tracing
*/
#include <rthw.h>
#include <rtthread.h>
#ifdef RT_USING_SMART
#define DBG_TAG "syscall"
#define DBG_LVL DBG_WARNING
#include <rtdbg.h>
#include <stdint.h>
#include <mmu.h>
#include <page.h>
#include <lwp_user_mm.h>
#include "riscv_mmu.h"
#include "stack.h"
typedef rt_ubase_t (*syscallfunc_t)(rt_ubase_t, rt_ubase_t, rt_ubase_t, rt_ubase_t, rt_ubase_t, rt_ubase_t, rt_ubase_t);
void syscall_handler(struct rt_hw_stack_frame *regs)
{
const char *syscall_name;
int syscallid = regs->a7;
if (syscallid == 0)
{
LOG_E("syscall id = 0!\n");
while (1)
;
}
syscallfunc_t syscallfunc = (syscallfunc_t)lwp_get_sys_api(syscallid);
if (syscallfunc == RT_NULL)
{
LOG_E("unsupported syscall!\n");
sys_exit_group(-1);
}
#if DBG_LVL >= DBG_INFO
syscall_name = lwp_get_syscall_name(syscallid);
#endif
LOG_I("[0x%lx] %s(0x%lx, 0x%lx, 0x%lx, 0x%lx, 0x%lx, 0x%lx, 0x%lx)", rt_thread_self(), syscall_name,
regs->a0, regs->a1, regs->a2, regs->a3, regs->a4, regs->a5, regs->a6);
regs->a0 = syscallfunc(regs->a0, regs->a1, regs->a2, regs->a3, regs->a4, regs->a5, regs->a6);
regs->a7 = 0;
regs->epc += 4; // skip ecall instruction
LOG_I("[0x%lx] %s ret: 0x%lx", rt_thread_self(), syscall_name, regs->a0);
}
#endif /* RT_USING_SMART */

View File

@ -1,76 +0,0 @@
/*
* Copyright (c) 2006-2021, RT-Thread Development Team
*
* SPDX-License-Identifier: Apache-2.0
*
* Change Logs:
* Date Author Notes
* 2018/10/28 Bernard The unify RISC-V porting code.
* 2024/07/08 Shell Using CPUTIME as tick
*/
#include <rthw.h>
#include <rtthread.h>
#include <drivers/cputime.h>
#include <encoding.h>
#include "sbi.h"
#ifdef RT_USING_KTIME
#include <ktime.h>
#endif
static volatile unsigned long tick_cycles = 0;
int tick_isr(void)
{
rt_tick_increase();
sbi_set_timer(clock_cpu_gettime() + tick_cycles);
return 0;
}
/* BSP should config clockbase frequency */
RT_STATIC_ASSERT(defined_clockbase_freq, CPUTIME_TIMER_FREQ != 0);
/* Sets and enable the timer interrupt */
int rt_hw_tick_init(void)
{
/* calculate the tick cycles */
tick_cycles = CPUTIME_TIMER_FREQ / RT_TICK_PER_SECOND;
/* Clear the Supervisor-Timer bit in SIE */
clear_csr(sie, SIP_STIP);
/* Init riscv timer */
riscv_cputime_init();
/* Set timer */
sbi_set_timer(clock_cpu_gettime() + tick_cycles);
#ifdef RT_USING_KTIME
rt_ktime_cputimer_init();
#endif
/* Enable the Supervisor-Timer bit in SIE */
set_csr(sie, SIP_STIP);
return 0;
}
/**
* This function will delay for some us.
*
* @param us the delay time of us
*/
void rt_hw_us_delay(rt_uint32_t us)
{
unsigned long start_time;
unsigned long end_time;
unsigned long run_time;
start_time = clock_cpu_gettime();
end_time = start_time + us * (CPUTIME_TIMER_FREQ / 1000000);
do
{
run_time = clock_cpu_gettime();
} while(run_time < end_time);
}

View File

@ -1,17 +0,0 @@
/*
* Copyright (c) 2006-2024, RT-Thread Development Team
*
* SPDX-License-Identifier: Apache-2.0
*
* Change Logs:
* Date Author Notes
* 2018/10/28 Bernard The unify RISC-V porting code.
*/
#ifndef TICK_H__
#define TICK_H__
int tick_isr(void);
int rt_hw_tick_init(void);
#endif

View File

@ -1,61 +0,0 @@
/*
* Copyright (c) 2006-2022, RT-Thread Development Team
*
* SPDX-License-Identifier: Apache-2.0
*
* Change Logs:
* Date Author Notes
* 2022-11-28 WangXiaoyao the first version
*/
#ifndef __TLB_H__
#define __TLB_H__
#include <stddef.h>
#include <stdint.h>
#include <rtthread.h>
#include <mm_aspace.h>
#include "sbi.h"
#include "riscv_mmu.h"
#define HANDLE_FAULT(ret) \
if (__builtin_expect((ret) != SBI_SUCCESS, 0)) \
LOG_W("%s failed", __FUNCTION__);
static inline void rt_hw_tlb_invalidate_all(void)
{
uintptr_t mask = -1ul;
HANDLE_FAULT(sbi_remote_sfence_vma(&mask, -1ul, 0, mask));
}
static inline void rt_hw_tlb_invalidate_all_local(void)
{
__asm__ volatile("sfence.vma" ::: "memory");
}
static inline void rt_hw_tlb_invalidate_aspace(rt_aspace_t aspace)
{
// TODO ASID
rt_hw_tlb_invalidate_all_local();
}
static inline void rt_hw_tlb_invalidate_page(rt_aspace_t aspace, void *start)
{
__asm__ volatile("sfence.vma %0, zero" ::"r"(start) : "memory");
}
static inline void rt_hw_tlb_invalidate_range(rt_aspace_t aspace, void *start,
size_t size, size_t stride)
{
// huge page is taking as normal page
if (size <= ARCH_PAGE_SIZE)
{
rt_hw_tlb_invalidate_page(aspace, start);
}
else
{
rt_hw_tlb_invalidate_aspace(aspace);
}
}
#endif /* __TLB_H__ */

View File

@ -1,386 +0,0 @@
/*
* Copyright (c) 2006-2024, RT-Thread Development Team
*
* SPDX-License-Identifier: Apache-2.0
*
* Change Logs:
* Date Author Notes
* 2022-12-08 RT-Thread first version
*/
#include <rthw.h>
#include <rtthread.h>
#include <stdint.h>
#include <mm_fault.h>
#include <mmu.h>
#include <encoding.h>
#include <stack.h>
#include <sbi.h>
#include <riscv.h>
#include <interrupt.h>
#include <plic.h>
#include <tick.h>
#ifdef RT_USING_SMART
#include <lwp_arch.h>
#endif
#define DBG_TAG "libcpu.trap"
#define DBG_LVL DBG_INFO
#include <rtdbg.h>
void dump_regs(struct rt_hw_stack_frame *regs)
{
rt_kprintf("--------------Dump Registers-----------------\n");
rt_kprintf("Function Registers:\n");
rt_kprintf("\tra(x1) = 0x%p\tuser_sp = 0x%p\n", regs->ra,
regs->user_sp_exc_stack);
rt_kprintf("\tgp(x3) = 0x%p\ttp(x4) = 0x%p\n", regs->gp, regs->tp);
rt_kprintf("Temporary Registers:\n");
rt_kprintf("\tt0(x5) = 0x%p\tt1(x6) = 0x%p\n", regs->t0, regs->t1);
rt_kprintf("\tt2(x7) = 0x%p\n", regs->t2);
rt_kprintf("\tt3(x28) = 0x%p\tt4(x29) = 0x%p\n", regs->t3, regs->t4);
rt_kprintf("\tt5(x30) = 0x%p\tt6(x31) = 0x%p\n", regs->t5, regs->t6);
rt_kprintf("Saved Registers:\n");
rt_kprintf("\ts0/fp(x8) = 0x%p\ts1(x9) = 0x%p\n", regs->s0_fp, regs->s1);
rt_kprintf("\ts2(x18) = 0x%p\ts3(x19) = 0x%p\n", regs->s2, regs->s3);
rt_kprintf("\ts4(x20) = 0x%p\ts5(x21) = 0x%p\n", regs->s4, regs->s5);
rt_kprintf("\ts6(x22) = 0x%p\ts7(x23) = 0x%p\n", regs->s6, regs->s7);
rt_kprintf("\ts8(x24) = 0x%p\ts9(x25) = 0x%p\n", regs->s8, regs->s9);
rt_kprintf("\ts10(x26) = 0x%p\ts11(x27) = 0x%p\n", regs->s10, regs->s11);
rt_kprintf("Function Arguments Registers:\n");
rt_kprintf("\ta0(x10) = 0x%p\ta1(x11) = 0x%p\n", regs->a0, regs->a1);
rt_kprintf("\ta2(x12) = 0x%p\ta3(x13) = 0x%p\n", regs->a2, regs->a3);
rt_kprintf("\ta4(x14) = 0x%p\ta5(x15) = 0x%p\n", regs->a4, regs->a5);
rt_kprintf("\ta6(x16) = 0x%p\ta7(x17) = 0x%p\n", regs->a6, regs->a7);
rt_kprintf("sstatus = 0x%p\n", regs->sstatus);
rt_kprintf("\t%s\n", (regs->sstatus & SSTATUS_SIE)
? "Supervisor Interrupt Enabled"
: "Supervisor Interrupt Disabled");
rt_kprintf("\t%s\n", (regs->sstatus & SSTATUS_SPIE)
? "Last Time Supervisor Interrupt Enabled"
: "Last Time Supervisor Interrupt Disabled");
rt_kprintf("\t%s\n", (regs->sstatus & SSTATUS_SPP)
? "Last Privilege is Supervisor Mode"
: "Last Privilege is User Mode");
rt_kprintf("\t%s\n", (regs->sstatus & SSTATUS_SUM)
? "Permit to Access User Page"
: "Not Permit to Access User Page");
rt_kprintf("\t%s\n", (regs->sstatus & (1 << 19))
? "Permit to Read Executable-only Page"
: "Not Permit to Read Executable-only Page");
rt_ubase_t satp_v = read_csr(satp);
rt_kprintf("satp = 0x%p\n", satp_v);
rt_kprintf("\tCurrent Page Table(Physical) = 0x%p\n",
__MASKVALUE(satp_v, __MASK(44)) << PAGE_OFFSET_BIT);
rt_kprintf("\tCurrent ASID = 0x%p\n", __MASKVALUE(satp_v >> 44, __MASK(16))
<< PAGE_OFFSET_BIT);
const char *mode_str = "Unknown Address Translation/Protection Mode";
switch (__MASKVALUE(satp_v >> 60, __MASK(4)))
{
case 0:
mode_str = "No Address Translation/Protection Mode";
break;
case 8:
mode_str = "Page-based 39-bit Virtual Addressing Mode";
break;
case 9:
mode_str = "Page-based 48-bit Virtual Addressing Mode";
break;
}
rt_kprintf("\tMode = %s\n", mode_str);
rt_kprintf("-----------------Dump OK---------------------\n");
}
static const char *Exception_Name[] = {"Instruction Address Misaligned",
"Instruction Access Fault",
"Illegal Instruction",
"Breakpoint",
"Load Address Misaligned",
"Load Access Fault",
"Store/AMO Address Misaligned",
"Store/AMO Access Fault",
"Environment call from U-mode",
"Environment call from S-mode",
"Reserved-10",
"Reserved-11",
"Instruction Page Fault",
"Load Page Fault",
"Reserved-14",
"Store/AMO Page Fault"};
static const char *Interrupt_Name[] = {
"User Software Interrupt",
"Supervisor Software Interrupt",
"Reversed-2",
"Reversed-3",
"User Timer Interrupt",
"Supervisor Timer Interrupt",
"Reversed-6",
"Reversed-7",
"User External Interrupt",
"Supervisor External Interrupt",
"Reserved-10",
"Reserved-11",
};
#ifndef RT_USING_SMP
static volatile int nested = 0;
#define ENTER_TRAP nested += 1
#define EXIT_TRAP nested -= 1
#define CHECK_NESTED_PANIC(cause, tval, epc, eframe) \
if (nested != 1) handle_nested_trap_panic(cause, tval, epc, eframe)
#endif /* RT_USING_SMP */
static const char *get_exception_msg(int id)
{
const char *msg;
if (id < sizeof(Exception_Name) / sizeof(const char *))
{
msg = Exception_Name[id];
}
else
{
msg = "Unknown Exception";
}
return msg;
}
#ifdef RT_USING_SMART
#include "lwp.h"
void handle_user(rt_ubase_t scause, rt_ubase_t stval, rt_ubase_t sepc,
struct rt_hw_stack_frame *sp)
{
rt_ubase_t id = __MASKVALUE(scause, __MASK(63UL));
struct rt_lwp *lwp;
/* user page fault */
enum rt_mm_fault_op fault_op;
enum rt_mm_fault_type fault_type;
switch (id)
{
case EP_LOAD_PAGE_FAULT:
fault_op = MM_FAULT_OP_READ;
fault_type = MM_FAULT_TYPE_GENERIC_MMU;
break;
case EP_LOAD_ACCESS_FAULT:
fault_op = MM_FAULT_OP_READ;
fault_type = MM_FAULT_TYPE_BUS_ERROR;
break;
case EP_LOAD_ADDRESS_MISALIGNED:
fault_op = MM_FAULT_OP_READ;
fault_type = MM_FAULT_TYPE_BUS_ERROR;
break;
case EP_STORE_PAGE_FAULT:
fault_op = MM_FAULT_OP_WRITE;
fault_type = MM_FAULT_TYPE_GENERIC_MMU;
break;
case EP_STORE_ACCESS_FAULT:
fault_op = MM_FAULT_OP_WRITE;
fault_type = MM_FAULT_TYPE_BUS_ERROR;
break;
case EP_STORE_ADDRESS_MISALIGNED:
fault_op = MM_FAULT_OP_WRITE;
fault_type = MM_FAULT_TYPE_BUS_ERROR;
break;
case EP_INSTRUCTION_PAGE_FAULT:
fault_op = MM_FAULT_OP_EXECUTE;
fault_type = MM_FAULT_TYPE_GENERIC_MMU;
break;
case EP_INSTRUCTION_ACCESS_FAULT:
fault_op = MM_FAULT_OP_EXECUTE;
fault_type = MM_FAULT_TYPE_BUS_ERROR;
break;
case EP_INSTRUCTION_ADDRESS_MISALIGNED:
fault_op = MM_FAULT_OP_EXECUTE;
fault_type = MM_FAULT_TYPE_BUS_ERROR;
break;
default:
fault_op = 0;
}
if (fault_op)
{
rt_base_t saved_stat;
lwp = lwp_self();
struct rt_aspace_fault_msg msg = {
.fault_op = fault_op,
.fault_type = fault_type,
.fault_vaddr = (void *)stval,
};
__asm__ volatile("csrrsi %0, sstatus, 2" : "=r"(saved_stat));
if (lwp && rt_aspace_fault_try_fix(lwp->aspace, &msg))
{
__asm__ volatile("csrw sstatus, %0" ::"r"(saved_stat));
return;
}
__asm__ volatile("csrw sstatus, %0" ::"r"(saved_stat));
}
LOG_E("[FATAL ERROR] Exception %ld:%s\n", id, get_exception_msg(id));
LOG_E("scause:0x%p,stval:0x%p,sepc:0x%p\n", scause, stval, sepc);
dump_regs(sp);
rt_thread_t cur_thr = rt_thread_self();
struct rt_hw_backtrace_frame frame = {.fp = sp->s0_fp, .pc = sepc};
rt_kprintf("fp = %p\n", frame.fp);
lwp_backtrace_frame(cur_thr, &frame);
LOG_E("User Fault, killing thread: %s", cur_thr->parent.name);
EXIT_TRAP;
sys_exit_group(-1);
}
#endif
#ifdef ARCH_RISCV_VECTOR
static void vector_enable(struct rt_hw_stack_frame *sp)
{
sp->sstatus |= SSTATUS_VS_INITIAL;
}
/**
* detect V/D support, and do not distinguish V/D instruction
*/
static int illegal_inst_recoverable(rt_ubase_t stval,
struct rt_hw_stack_frame *sp)
{
// first 7 bits is opcode
int opcode = stval & 0x7f;
int csr = (stval & 0xFFF00000) >> 20;
// ref riscv-v-spec-1.0, [Vector Instruction Formats]
int width = ((stval & 0x7000) >> 12) - 1;
int flag = 0;
switch (opcode)
{
case 0x57: // V
case 0x27: // scalar FLOAT
case 0x07:
case 0x73: // CSR
flag = 1;
break;
}
if (flag)
{
vector_enable(sp);
}
return flag;
}
#endif
static void handle_nested_trap_panic(rt_ubase_t cause, rt_ubase_t tval,
rt_ubase_t epc,
struct rt_hw_stack_frame *eframe)
{
LOG_E("\n-------- [SEVER ERROR] --------");
LOG_E("Nested trap detected");
LOG_E("scause:0x%p,stval:0x%p,sepc:0x%p\n", cause, tval, epc);
dump_regs(eframe);
rt_hw_cpu_shutdown();
}
#define IN_USER_SPACE (stval >= USER_VADDR_START && stval < USER_VADDR_TOP)
#define PAGE_FAULT (id == EP_LOAD_PAGE_FAULT || id == EP_STORE_PAGE_FAULT)
/* Trap entry */
void handle_trap(rt_ubase_t scause, rt_ubase_t stval, rt_ubase_t sepc,
struct rt_hw_stack_frame *sp)
{
ENTER_TRAP;
rt_ubase_t id = __MASKVALUE(scause, __MASK(63UL));
const char *msg;
/* supervisor external interrupt */
if ((SCAUSE_INTERRUPT & scause) &&
SCAUSE_S_EXTERNAL_INTR == (scause & 0xff))
{
rt_interrupt_enter();
plic_handle_irq();
rt_interrupt_leave();
}
else if ((SCAUSE_INTERRUPT | SCAUSE_S_TIMER_INTR) == scause)
{
/* supervisor timer */
rt_interrupt_enter();
tick_isr();
rt_interrupt_leave();
}
else
{
if (SCAUSE_INTERRUPT & scause)
{
if (id < sizeof(Interrupt_Name) / sizeof(const char *))
{
msg = Interrupt_Name[id];
}
else
{
msg = "Unknown Interrupt";
}
LOG_E("Unhandled Interrupt %ld:%s\n", id, msg);
}
else
{
#ifdef ARCH_RISCV_VECTOR
if (scause == 0x2)
{
if (!(sp->sstatus & SSTATUS_VS) &&
illegal_inst_recoverable(stval, sp))
goto _exit;
}
#endif /* ARCH_RISCV_VECTOR */
#ifdef RT_USING_SMART
if (!(sp->sstatus & 0x100) || (PAGE_FAULT && IN_USER_SPACE))
{
handle_user(scause, stval, sepc, sp);
// if handle_user() return here, jump to u mode then
goto _exit;
}
#endif
// handle kernel exception:
rt_kprintf("Unhandled Exception %ld:%s\n", id,
get_exception_msg(id));
}
// trap cannot nested when handling another trap / interrupt
CHECK_NESTED_PANIC(scause, stval, sepc, sp);
rt_kprintf("scause:0x%p,stval:0x%p,sepc:0x%p\n", scause, stval, sepc);
dump_regs(sp);
rt_thread_t cur_thr = rt_thread_self();
rt_kprintf("--------------Thread list--------------\n");
rt_kprintf("current thread: %s\n", cur_thr->parent.name);
rt_kprintf("--------------Backtrace--------------\n");
struct rt_hw_backtrace_frame frame = {.fp = sp->s0_fp, .pc = sepc};
#ifdef RT_USING_SMART
if (!(sp->sstatus & 0x100))
{
lwp_backtrace_frame(cur_thr, &frame);
}
else
#endif
{
rt_backtrace_frame(cur_thr, &frame);
}
while (1)
;
}
_exit:
EXIT_TRAP;
return;
}