feat: libcpu/risc-v: unify mmu related works

These changes are designed to standardize the memory management across
'virt64' and 'c906', ensuring efficient handling of address spaces and
page tables.

Changes:
- Creation of ASID management files (`asid.c`) for both 'c906' and
  'virt64' architectures, which is essential for maintaining stability.
- Extensive updates to the MMU configuration and handling in `mmu.c` and `mmu.h` files.
- Addition of functions to manage ASID allocation and switching of page tables.
- For c906, accommodated the early memory setup to the one from virt64.

Signed-off-by: Shell <smokewood@qq.com>
This commit is contained in:
Shell 2024-09-02 17:58:56 +08:00 committed by Meco Man
parent 7528645f59
commit a00aaab2ba
10 changed files with 433 additions and 308 deletions

View File

@ -0,0 +1,85 @@
/*
* Copyright (c) 2006-2024, RT-Thread Development Team
*
* SPDX-License-Identifier: Apache-2.0
*
* Change Logs:
* Date Author Notes
* 2024-07-12 RT-Thread first version.
*/
#define DBG_TAG "hw.asid"
#define DBG_LVL DBG_INFO
#include <rtdbg.h>
#include <rtthread.h>
#include <board.h>
#include <cache.h>
#include <mm_aspace.h>
#include <mm_page.h>
#include <mmu.h>
#include <riscv_mmu.h>
#include <tlb.h>
static rt_uint8_t ASID_BITS = 0;
static rt_uint32_t next_asid;
static rt_uint64_t global_asid_generation;
#define ASID_MASK ((1 << ASID_BITS) - 1)
#define ASID_FIRST_GENERATION (1 << ASID_BITS)
#define MAX_ASID ASID_FIRST_GENERATION
void rt_hw_asid_init(void)
{
unsigned int satp_reg = read_csr(satp);
satp_reg |= (((rt_uint64_t)0xffff) << PPN_BITS);
write_csr(satp, satp_reg);
unsigned short valid_asid_bit = ((read_csr(satp) >> PPN_BITS) & 0xffff);
// The maximal value of ASIDLEN, is 9 for Sv32 or 16 for Sv39, Sv48, and Sv57
for (unsigned i = 0; i < 16; i++)
{
if (!(valid_asid_bit & 0x1))
{
break;
}
valid_asid_bit >>= 1;
ASID_BITS++;
}
global_asid_generation = ASID_FIRST_GENERATION;
next_asid = 1;
}
static rt_uint64_t _asid_acquire(rt_aspace_t aspace)
{
if ((aspace->asid ^ global_asid_generation) >> ASID_BITS) // not same generation
{
if (next_asid != MAX_ASID)
{
aspace->asid = global_asid_generation | next_asid;
next_asid++;
}
else
{
// scroll to next generation
global_asid_generation += ASID_FIRST_GENERATION;
next_asid = 1;
rt_hw_tlb_invalidate_all_local();
aspace->asid = global_asid_generation | next_asid;
next_asid++;
}
}
return aspace->asid & ASID_MASK;
}
void rt_hw_asid_switch_pgtbl(struct rt_aspace *aspace, rt_ubase_t pgtbl)
{
rt_uint64_t asid = _asid_acquire(aspace);
write_csr(satp, (((size_t)SATP_MODE) << SATP_MODE_OFFSET) |
(asid << PPN_BITS) |
((rt_ubase_t)pgtbl >> PAGE_OFFSET_BIT));
asm volatile("sfence.vma x0,%0"::"r"(asid):"memory");
}

View File

@ -15,7 +15,7 @@
#include <stdint.h>
#define DBG_TAG "hw.mmu"
#define DBG_LVL DBG_WARNING
#define DBG_LVL DBG_INFO
#include <rtdbg.h>
#include <board.h>
@ -44,78 +44,15 @@ volatile __attribute__((aligned(4 * 1024)))
rt_ubase_t MMUTable[__SIZE(VPN2_BIT)];
#ifdef ARCH_USING_ASID
static rt_uint8_t ASID_BITS = 0;
static rt_uint32_t next_asid;
static rt_uint64_t global_asid_generation;
#define ASID_MASK ((1 << ASID_BITS) - 1)
#define ASID_FIRST_GENERATION (1 << ASID_BITS)
#define MAX_ASID ASID_FIRST_GENERATION
static void _asid_init()
{
unsigned int satp_reg = read_csr(satp);
satp_reg |= (((rt_uint64_t)0xffff) << PPN_BITS);
write_csr(satp, satp_reg);
unsigned short valid_asid_bit = ((read_csr(satp) >> PPN_BITS) & 0xffff);
// The maximal value of ASIDLEN, is 9 for Sv32 or 16 for Sv39, Sv48, and Sv57
for (unsigned i = 0; i < 16; i++)
{
if (!(valid_asid_bit & 0x1))
{
break;
}
valid_asid_bit >>= 1;
ASID_BITS++;
}
global_asid_generation = ASID_FIRST_GENERATION;
next_asid = 1;
}
static rt_uint64_t _asid_check_switch(rt_aspace_t aspace)
{
if ((aspace->asid ^ global_asid_generation) >> ASID_BITS) // not same generation
{
if (next_asid != MAX_ASID)
{
aspace->asid = global_asid_generation | next_asid;
next_asid++;
}
else
{
// scroll to next generation
global_asid_generation += ASID_FIRST_GENERATION;
next_asid = 1;
rt_hw_tlb_invalidate_all_local();
aspace->asid = global_asid_generation | next_asid;
next_asid++;
}
}
return aspace->asid & ASID_MASK;
}
void rt_hw_aspace_switch(rt_aspace_t aspace)
{
uintptr_t page_table = (uintptr_t)rt_kmem_v2p(aspace->page_table);
current_mmu_table = aspace->page_table;
rt_uint64_t asid = _asid_check_switch(aspace);
write_csr(satp, (((size_t)SATP_MODE) << SATP_MODE_OFFSET) |
(asid << PPN_BITS) |
((rt_ubase_t)page_table >> PAGE_OFFSET_BIT));
asm volatile("sfence.vma x0,%0"::"r"(asid):"memory");
rt_hw_asid_switch_pgtbl(aspace, page_table);
}
#define ASID_INIT() _asid_init()
#else /* ARCH_USING_ASID */
#define ASID_INIT()
#else /* !ARCH_USING_ASID */
void rt_hw_aspace_switch(rt_aspace_t aspace)
{
uintptr_t page_table = (uintptr_t)rt_kmem_v2p(aspace->page_table);
@ -126,6 +63,9 @@ void rt_hw_aspace_switch(rt_aspace_t aspace)
rt_hw_tlb_invalidate_all_local();
}
void rt_hw_asid_init(void)
{
}
#endif /* ARCH_USING_ASID */
void *rt_hw_mmu_tbl_get()
@ -136,28 +76,28 @@ void *rt_hw_mmu_tbl_get()
static int _map_one_page(struct rt_aspace *aspace, void *va, void *pa,
size_t attr)
{
rt_size_t l1_off, l2_off, l3_off;
rt_size_t *mmu_l1, *mmu_l2, *mmu_l3;
rt_ubase_t l1_off, l2_off, l3_off;
rt_ubase_t *mmu_l1, *mmu_l2, *mmu_l3;
l1_off = GET_L1((size_t)va);
l2_off = GET_L2((size_t)va);
l3_off = GET_L3((size_t)va);
mmu_l1 = ((rt_size_t *)aspace->page_table) + l1_off;
mmu_l1 = ((rt_ubase_t *)aspace->page_table) + l1_off;
if (PTE_USED(*mmu_l1))
{
mmu_l2 = (rt_size_t *)PPN_TO_VPN(GET_PADDR(*mmu_l1), PV_OFFSET);
mmu_l2 = (rt_ubase_t *)PPN_TO_VPN(GET_PADDR(*mmu_l1), PV_OFFSET);
}
else
{
mmu_l2 = (rt_size_t *)rt_pages_alloc(0);
mmu_l2 = (rt_ubase_t *)rt_pages_alloc(0);
if (mmu_l2)
{
rt_memset(mmu_l2, 0, PAGE_SIZE);
rt_hw_cpu_dcache_clean(mmu_l2, PAGE_SIZE);
*mmu_l1 = COMBINEPTE((rt_size_t)VPN_TO_PPN(mmu_l2, PV_OFFSET),
*mmu_l1 = COMBINEPTE((rt_ubase_t)VPN_TO_PPN(mmu_l2, PV_OFFSET),
PAGE_DEFAULT_ATTR_NEXT);
rt_hw_cpu_dcache_clean(mmu_l1, sizeof(*mmu_l1));
}
@ -171,18 +111,18 @@ static int _map_one_page(struct rt_aspace *aspace, void *va, void *pa,
{
RT_ASSERT(!PAGE_IS_LEAF(*(mmu_l2 + l2_off)));
mmu_l3 =
(rt_size_t *)PPN_TO_VPN(GET_PADDR(*(mmu_l2 + l2_off)), PV_OFFSET);
(rt_ubase_t *)PPN_TO_VPN(GET_PADDR(*(mmu_l2 + l2_off)), PV_OFFSET);
}
else
{
mmu_l3 = (rt_size_t *)rt_pages_alloc(0);
mmu_l3 = (rt_ubase_t *)rt_pages_alloc(0);
if (mmu_l3)
{
rt_memset(mmu_l3, 0, PAGE_SIZE);
rt_hw_cpu_dcache_clean(mmu_l3, PAGE_SIZE);
*(mmu_l2 + l2_off) =
COMBINEPTE((rt_size_t)VPN_TO_PPN(mmu_l3, PV_OFFSET),
COMBINEPTE((rt_ubase_t)VPN_TO_PPN(mmu_l3, PV_OFFSET),
PAGE_DEFAULT_ATTR_NEXT);
rt_hw_cpu_dcache_clean(mmu_l2, sizeof(*mmu_l2));
// declares a reference to parent page table
@ -197,7 +137,7 @@ static int _map_one_page(struct rt_aspace *aspace, void *va, void *pa,
RT_ASSERT(!PTE_USED(*(mmu_l3 + l3_off)));
// declares a reference to parent page table
rt_page_ref_inc((void *)mmu_l3, 0);
*(mmu_l3 + l3_off) = COMBINEPTE((rt_size_t)pa, attr);
*(mmu_l3 + l3_off) = COMBINEPTE((rt_ubase_t)pa, attr);
rt_hw_cpu_dcache_clean(mmu_l3 + l3_off, sizeof(*(mmu_l3 + l3_off)));
return 0;
}
@ -240,7 +180,7 @@ void *rt_hw_mmu_map(struct rt_aspace *aspace, void *v_addr, void *p_addr,
return NULL;
}
static void _unmap_pte(rt_size_t *pentry, rt_size_t *lvl_entry[], int level)
static void _unmap_pte(rt_ubase_t *pentry, rt_ubase_t *lvl_entry[], int level)
{
int loop_flag = 1;
while (loop_flag)
@ -270,26 +210,26 @@ static void _unmap_pte(rt_size_t *pentry, rt_size_t *lvl_entry[], int level)
static size_t _unmap_area(struct rt_aspace *aspace, void *v_addr, size_t size)
{
rt_size_t loop_va = __UMASKVALUE((rt_size_t)v_addr, PAGE_OFFSET_MASK);
rt_ubase_t loop_va = __UMASKVALUE((rt_ubase_t)v_addr, PAGE_OFFSET_MASK);
size_t unmapped = 0;
int i = 0;
rt_size_t lvl_off[3];
rt_size_t *lvl_entry[3];
lvl_off[0] = (rt_size_t)GET_L1(loop_va);
lvl_off[1] = (rt_size_t)GET_L2(loop_va);
lvl_off[2] = (rt_size_t)GET_L3(loop_va);
rt_ubase_t lvl_off[3];
rt_ubase_t *lvl_entry[3];
lvl_off[0] = (rt_ubase_t)GET_L1(loop_va);
lvl_off[1] = (rt_ubase_t)GET_L2(loop_va);
lvl_off[2] = (rt_ubase_t)GET_L3(loop_va);
unmapped = 1 << (ARCH_PAGE_SHIFT + ARCH_INDEX_WIDTH * 2ul);
rt_size_t *pentry;
lvl_entry[i] = ((rt_size_t *)aspace->page_table + lvl_off[i]);
rt_ubase_t *pentry;
lvl_entry[i] = ((rt_ubase_t *)aspace->page_table + lvl_off[i]);
pentry = lvl_entry[i];
// find leaf page table entry
while (PTE_USED(*pentry) && !PAGE_IS_LEAF(*pentry))
{
i += 1;
lvl_entry[i] = ((rt_size_t *)PPN_TO_VPN(GET_PADDR(*pentry), PV_OFFSET) +
lvl_entry[i] = ((rt_ubase_t *)PPN_TO_VPN(GET_PADDR(*pentry), PV_OFFSET) +
lvl_off[i]);
pentry = lvl_entry[i];
unmapped >>= ARCH_INDEX_WIDTH;
@ -321,8 +261,7 @@ void rt_hw_mmu_unmap(struct rt_aspace *aspace, void *v_addr, size_t size)
MM_PGTBL_UNLOCK(aspace);
// when unmapped == 0, region not exist in pgtbl
if (!unmapped || unmapped > size)
break;
if (!unmapped || unmapped > size) break;
size -= unmapped;
v_addr += unmapped;
@ -335,7 +274,8 @@ static inline void _init_region(void *vaddr, size_t size)
rt_ioremap_start = vaddr;
rt_ioremap_size = size;
rt_mpr_start = rt_ioremap_start - rt_mpr_size;
LOG_D("rt_ioremap_start: %p, rt_mpr_start: %p", rt_ioremap_start, rt_mpr_start);
LOG_D("rt_ioremap_start: %p, rt_mpr_start: %p", rt_ioremap_start,
rt_mpr_start);
}
#else
static inline void _init_region(void *vaddr, size_t size)
@ -352,8 +292,8 @@ static inline void _init_region(void *vaddr, size_t size)
#define KERN_SPACE_SIZE ((size_t)USER_VADDR_START - 0x1000)
#endif
int rt_hw_mmu_map_init(rt_aspace_t aspace, void *v_address, rt_size_t size,
rt_size_t *vtable, rt_size_t pv_off)
int rt_hw_mmu_map_init(rt_aspace_t aspace, void *v_address, rt_ubase_t size,
rt_ubase_t *vtable, rt_ubase_t pv_off)
{
size_t l1_off, va_s, va_e;
rt_base_t level;
@ -363,8 +303,8 @@ int rt_hw_mmu_map_init(rt_aspace_t aspace, void *v_address, rt_size_t size,
return -1;
}
va_s = (rt_size_t)v_address;
va_e = ((rt_size_t)v_address) + size - 1;
va_s = (rt_ubase_t)v_address;
va_e = ((rt_ubase_t)v_address) + size - 1;
if (va_e < va_s)
{
@ -405,15 +345,15 @@ static inline uintptr_t _get_level_size(int level)
return 1ul << (ARCH_PAGE_SHIFT + (max_level - level) * ARCH_INDEX_WIDTH);
}
static rt_size_t *_query(struct rt_aspace *aspace, void *vaddr, int *level)
static rt_ubase_t *_query(struct rt_aspace *aspace, void *vaddr, int *level)
{
rt_size_t l1_off, l2_off, l3_off;
rt_size_t *mmu_l1, *mmu_l2, *mmu_l3;
rt_size_t pa;
rt_ubase_t l1_off, l2_off, l3_off;
rt_ubase_t *mmu_l1, *mmu_l2, *mmu_l3;
rt_ubase_t pa;
l1_off = GET_L1((rt_size_t)vaddr);
l2_off = GET_L2((rt_size_t)vaddr);
l3_off = GET_L3((rt_size_t)vaddr);
l1_off = GET_L1((rt_uintptr_t)vaddr);
l2_off = GET_L2((rt_uintptr_t)vaddr);
l3_off = GET_L3((rt_uintptr_t)vaddr);
if (!aspace)
{
@ -421,7 +361,7 @@ static rt_size_t *_query(struct rt_aspace *aspace, void *vaddr, int *level)
return RT_NULL;
}
mmu_l1 = ((rt_size_t *)aspace->page_table) + l1_off;
mmu_l1 = ((rt_ubase_t *)aspace->page_table) + l1_off;
if (PTE_USED(*mmu_l1))
{
@ -431,7 +371,7 @@ static rt_size_t *_query(struct rt_aspace *aspace, void *vaddr, int *level)
return mmu_l1;
}
mmu_l2 = (rt_size_t *)PPN_TO_VPN(GET_PADDR(*mmu_l1), PV_OFFSET);
mmu_l2 = (rt_ubase_t *)PPN_TO_VPN(GET_PADDR(*mmu_l1), PV_OFFSET);
if (PTE_USED(*(mmu_l2 + l2_off)))
{
@ -441,7 +381,7 @@ static rt_size_t *_query(struct rt_aspace *aspace, void *vaddr, int *level)
return mmu_l2 + l2_off;
}
mmu_l3 = (rt_size_t *)PPN_TO_VPN(GET_PADDR(*(mmu_l2 + l2_off)),
mmu_l3 = (rt_ubase_t *)PPN_TO_VPN(GET_PADDR(*(mmu_l2 + l2_off)),
PV_OFFSET);
if (PTE_USED(*(mmu_l3 + l3_off)))
@ -458,7 +398,7 @@ static rt_size_t *_query(struct rt_aspace *aspace, void *vaddr, int *level)
void *rt_hw_mmu_v2p(struct rt_aspace *aspace, void *vaddr)
{
int level;
uintptr_t *pte = _query(aspace, vaddr, &level);
rt_ubase_t *pte = _query(aspace, vaddr, &level);
uintptr_t paddr;
if (pte)
@ -468,23 +408,23 @@ void *rt_hw_mmu_v2p(struct rt_aspace *aspace, void *vaddr)
}
else
{
LOG_I("%s: failed at %p", __func__, vaddr);
LOG_D("%s: failed at %p", __func__, vaddr);
paddr = (uintptr_t)ARCH_MAP_FAILED;
}
return (void *)paddr;
}
static int _noncache(uintptr_t *pte)
static int _noncache(rt_base_t *pte)
{
return 0;
}
static int _cache(uintptr_t *pte)
static int _cache(rt_base_t *pte)
{
return 0;
}
static int (*control_handler[MMU_CNTL_DUMMY_END])(uintptr_t *pte) = {
static int (*control_handler[MMU_CNTL_DUMMY_END])(rt_base_t *pte) = {
[MMU_CNTL_CACHE] = _cache,
[MMU_CNTL_NONCACHE] = _noncache,
};
@ -496,14 +436,14 @@ int rt_hw_mmu_control(struct rt_aspace *aspace, void *vaddr, size_t size,
int err = -RT_EINVAL;
void *vend = vaddr + size;
int (*handler)(uintptr_t * pte);
int (*handler)(rt_base_t *pte);
if (cmd >= 0 && cmd < MMU_CNTL_DUMMY_END)
{
handler = control_handler[cmd];
while (vaddr < vend)
{
uintptr_t *pte = _query(aspace, vaddr, &level);
rt_base_t *pte = _query(aspace, vaddr, &level);
void *range_end = vaddr + _get_level_size(level);
RT_ASSERT(range_end <= vend);
@ -556,14 +496,14 @@ void rt_hw_mmu_setup(rt_aspace_t aspace, struct mem_desc *mdesc, int desc_nr)
attr = MMU_MAP_K_DEVICE;
}
struct rt_mm_va_hint hint = {.flags = MMF_MAP_FIXED,
struct rt_mm_va_hint hint = {
.flags = MMF_MAP_FIXED,
.limit_start = aspace->start,
.limit_range_size = aspace->size,
.map_size = mdesc->vaddr_end -
mdesc->vaddr_start + 1,
.map_size = mdesc->vaddr_end - mdesc->vaddr_start + 1,
.prefer = (void *)mdesc->vaddr_start};
if (mdesc->paddr_start == (rt_size_t)ARCH_MAP_FAILED)
if (mdesc->paddr_start == (rt_uintptr_t)ARCH_MAP_FAILED)
mdesc->paddr_start = mdesc->vaddr_start + PV_OFFSET;
rt_aspace_map_phy_static(aspace, &mdesc->varea, &hint, attr,
@ -571,38 +511,19 @@ void rt_hw_mmu_setup(rt_aspace_t aspace, struct mem_desc *mdesc, int desc_nr)
mdesc++;
}
ASID_INIT();
rt_hw_asid_init();
rt_hw_aspace_switch(&rt_kernel_space);
rt_page_cleanup();
}
void rt_hw_mmu_kernel_map_init(rt_aspace_t aspace, rt_size_t vaddr_start, rt_size_t size)
{
rt_size_t paddr_start =
__UMASKVALUE(VPN_TO_PPN(vaddr_start, PV_OFFSET), PAGE_OFFSET_MASK);
rt_size_t va_s = GET_L1(vaddr_start);
rt_size_t va_e = GET_L1(vaddr_start + size - 1);
rt_size_t i;
for (i = va_s; i <= va_e; i++)
{
MMUTable[i] =
COMBINEPTE(paddr_start, PAGE_ATTR_RWX | PTE_G | PTE_V | PTE_CACHE |
PTE_SHARE | PTE_BUF | PTE_A | PTE_D);
paddr_start += L1_PAGE_SIZE;
}
rt_hw_tlb_invalidate_all_local();
}
#define SATP_BASE ((size_t)SATP_MODE << SATP_MODE_OFFSET)
#define SATP_BASE ((rt_ubase_t)SATP_MODE << SATP_MODE_OFFSET)
void rt_hw_mem_setup_early(void)
{
rt_size_t pv_off;
rt_size_t ps = 0x0;
rt_size_t vs = 0x0;
rt_size_t *early_pgtbl = (size_t *)(((size_t)&__bss_end + 4095) & ~0xfff);
rt_ubase_t pv_off;
rt_ubase_t ps = 0x0;
rt_ubase_t vs = 0x0;
rt_ubase_t *early_pgtbl = (rt_ubase_t *)(((size_t)&__bss_end + 4095) & ~0xfff);
/* calculate pv_offset */
void *symb_pc;
@ -617,7 +538,8 @@ void rt_hw_mem_setup_early(void)
{
if (pv_off & (1ul << (ARCH_INDEX_WIDTH * 2 + ARCH_PAGE_SHIFT)))
{
LOG_E("%s: not aligned virtual address. pv_offset %p", __func__, pv_off);
LOG_E("%s: not aligned virtual address. pv_offset %p", __func__,
pv_off);
RT_ASSERT(0);
}
@ -627,8 +549,7 @@ void rt_hw_mem_setup_early(void)
*/
for (size_t i = 0; i < __SIZE(PPN0_BIT); i++)
{
early_pgtbl[i] = COMBINEPTE(ps, PAGE_ATTR_RWX | PTE_G | PTE_V | PTE_CACHE |
PTE_SHARE | PTE_BUF | PTE_A | PTE_D);
early_pgtbl[i] = COMBINEPTE(ps, MMU_MAP_EARLY);
ps += L1_PAGE_SIZE;
}
@ -638,12 +559,11 @@ void rt_hw_mem_setup_early(void)
vs = ps - pv_off;
/* relocate region */
rt_size_t vs_idx = GET_L1(vs);
rt_size_t ve_idx = GET_L1(vs + 0x80000000);
rt_ubase_t vs_idx = GET_L1(vs);
rt_ubase_t ve_idx = GET_L1(vs + 0x80000000);
for (size_t i = vs_idx; i < ve_idx; i++)
{
early_pgtbl[i] = COMBINEPTE(ps, PAGE_ATTR_RWX | PTE_G | PTE_V | PTE_CACHE |
PTE_SHARE | PTE_BUF | PTE_A | PTE_D);
early_pgtbl[i] = COMBINEPTE(ps, MMU_MAP_EARLY);
ps += L1_PAGE_SIZE;
}
@ -657,7 +577,7 @@ void rt_hw_mem_setup_early(void)
void *rt_hw_mmu_pgtbl_create(void)
{
size_t *mmu_table;
rt_ubase_t *mmu_table;
mmu_table = (rt_ubase_t *)rt_pages_alloc_ext(0, PAGE_ANY_AVAILABLE);
if (!mmu_table)
{

View File

@ -30,7 +30,7 @@ struct mem_desc
{
rt_size_t vaddr_start;
rt_size_t vaddr_end;
rt_size_t paddr_start;
rt_ubase_t paddr_start;
rt_size_t attr;
struct rt_varea varea;
};
@ -43,8 +43,8 @@ struct mem_desc
#define GET_PPN(pte) \
(__PARTBIT(pte, PTE_PPN_SHIFT, PHYSICAL_ADDRESS_WIDTH_BITS - PTE_PPN_SHIFT))
#define GET_PADDR(pte) (GET_PPN(pte) << PAGE_OFFSET_BIT)
#define VPN_TO_PPN(vaddr, pv_off) (((rt_size_t)(vaddr)) + (pv_off))
#define PPN_TO_VPN(paddr, pv_off) (((rt_size_t)(paddr)) - (pv_off))
#define VPN_TO_PPN(vaddr, pv_off) (((rt_uintptr_t)(vaddr)) + (pv_off))
#define PPN_TO_VPN(paddr, pv_off) (((rt_uintptr_t)(paddr)) - (pv_off))
#define COMBINEVADDR(l1_off, l2_off, l3_off) \
(((l1_off) << VPN2_SHIFT) | ((l2_off) << VPN1_SHIFT) | \
((l3_off) << VPN0_SHIFT))
@ -57,11 +57,11 @@ struct mem_desc
#define MMU_MAP_ERROR_CONFLICT -4
void *rt_hw_mmu_tbl_get(void);
int rt_hw_mmu_map_init(rt_aspace_t aspace, void *v_address, rt_size_t size,
rt_size_t *vtable, rt_size_t pv_off);
int rt_hw_mmu_map_init(rt_aspace_t aspace, void *v_address, rt_ubase_t size,
rt_ubase_t *vtable, rt_ubase_t pv_off);
void rt_hw_mmu_setup(rt_aspace_t aspace, struct mem_desc *mdesc, int desc_nr);
void rt_hw_mmu_kernel_map_init(rt_aspace_t aspace, rt_size_t vaddr_start,
rt_size_t size);
void rt_hw_mmu_kernel_map_init(rt_aspace_t aspace, rt_ubase_t vaddr_start,
rt_ubase_t size);
void *rt_hw_mmu_map(rt_aspace_t aspace, void *v_addr, void *p_addr, size_t size,
size_t attr);
void rt_hw_mmu_unmap(rt_aspace_t aspace, void *v_addr, size_t size);

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2006-2021, RT-Thread Development Team
* Copyright (c) 2006-2024, RT-Thread Development Team
*
* SPDX-License-Identifier: Apache-2.0
*
@ -43,6 +43,7 @@
#define PPN1_BIT 9
#define PPN2_SHIFT (PPN1_SHIFT + PPN1_BIT)
#define PPN2_BIT 26
#define PPN_BITS (PPN0_BIT + PPN1_BIT + PPN2_BIT)
#define L1_PAGE_SIZE __SIZE(PAGE_OFFSET_BIT + VPN0_BIT + VPN1_BIT)
#define L2_PAGE_SIZE __SIZE(PAGE_OFFSET_BIT + VPN0_BIT)
@ -64,8 +65,11 @@
#define PAGE_ATTR_CB (PTE_BUF | PTE_CACHE)
#define PAGE_ATTR_DEV (PTE_SO)
#define PAGE_DEFAULT_ATTR_LEAF (PTE_SHARE | PTE_BUF | PTE_CACHE | PTE_A | PTE_D | PTE_G | PTE_U | PAGE_ATTR_RWX | PTE_V)
#define PAGE_DEFAULT_ATTR_NEXT (PTE_SHARE | PTE_BUF | PTE_CACHE | PTE_A | PTE_D | PTE_G | PTE_V)
#define PAGE_DEFAULT_ATTR_LEAF \
(PTE_SHARE | PTE_BUF | PTE_CACHE | PTE_A | PTE_D | PTE_G | PTE_U | \
PAGE_ATTR_RWX | PTE_V)
#define PAGE_DEFAULT_ATTR_NEXT \
(PTE_SHARE | PTE_BUF | PTE_CACHE | PTE_A | PTE_D | PTE_G | PTE_V)
#define PAGE_IS_LEAF(pte) __MASKVALUE(pte, PAGE_ATTR_RWX)
@ -82,8 +86,6 @@
#define SATP_MODE_SV57 10
#define SATP_MODE_SV64 11
#define PPN_BITS 44
#define ARCH_VADDR_WIDTH 39
#define SATP_MODE SATP_MODE_SV39
@ -91,9 +93,12 @@
#define MMU_MAP_K_RWCB PTE_WRAP(PAGE_ATTR_CB | PTE_G | PAGE_ATTR_RWX | PTE_V)
#define MMU_MAP_K_RW PTE_WRAP(PTE_G | PAGE_ATTR_RWX | PTE_V)
#define MMU_MAP_U_RWCB PTE_WRAP(PAGE_ATTR_CB | PTE_U | PAGE_ATTR_RWX | PTE_V)
#define MMU_MAP_U_ROCB PTE_WRAP(PAGE_ATTR_CB | PTE_U | PAGE_ATTR_READONLY | PTE_V)
#define MMU_MAP_U_ROCB \
PTE_WRAP(PAGE_ATTR_CB | PTE_U | PAGE_ATTR_READONLY | PTE_V)
#define MMU_MAP_U_RWCB_XN PTE_WRAP(PAGE_ATTR_CB | PTE_U | PAGE_ATTR_XN | PTE_V)
#define MMU_MAP_U_RW PTE_WRAP(PTE_U | PAGE_ATTR_RWX | PTE_V)
#define MMU_MAP_EARLY \
PTE_WRAP(PAGE_ATTR_RWX | PTE_G | PTE_V | PTE_CACHE | PTE_SHARE | PTE_BUF)
#define MMU_MAP_TRACE(attr) (attr)
#define PTE_XWR_MASK 0xe
@ -118,6 +123,10 @@ void mmu_disable_user_page_access(void);
#define RT_HW_MMU_PROT_USER 16
#define RT_HW_MMU_PROT_CACHE 32
void rt_hw_asid_init(void);
struct rt_aspace;
void rt_hw_asid_switch_pgtbl(struct rt_aspace *aspace, rt_ubase_t pgtbl);
/**
* @brief Remove permission from attribution
*

View File

@ -20,7 +20,7 @@
#define HANDLE_FAULT(ret) \
if (__builtin_expect((ret) != SBI_SUCCESS, 0)) \
LOG_W("%s failed\n", __FUNCTION__);
LOG_W("%s failed", __FUNCTION__);
static inline void rt_hw_tlb_invalidate_all(void)
{

View File

@ -0,0 +1,85 @@
/*
* Copyright (c) 2006-2024, RT-Thread Development Team
*
* SPDX-License-Identifier: Apache-2.0
*
* Change Logs:
* Date Author Notes
* 2024-07-12 RT-Thread first version.
*/
#define DBG_TAG "hw.asid"
#define DBG_LVL DBG_INFO
#include <rtdbg.h>
#include <rtthread.h>
#include <board.h>
#include <cache.h>
#include <mm_aspace.h>
#include <mm_page.h>
#include <mmu.h>
#include <riscv_mmu.h>
#include <tlb.h>
static rt_uint8_t ASID_BITS = 0;
static rt_uint32_t next_asid;
static rt_uint64_t global_asid_generation;
#define ASID_MASK ((1 << ASID_BITS) - 1)
#define ASID_FIRST_GENERATION (1 << ASID_BITS)
#define MAX_ASID ASID_FIRST_GENERATION
void rt_hw_asid_init(void)
{
unsigned int satp_reg = read_csr(satp);
satp_reg |= (((rt_uint64_t)0xffff) << PPN_BITS);
write_csr(satp, satp_reg);
unsigned short valid_asid_bit = ((read_csr(satp) >> PPN_BITS) & 0xffff);
// The maximal value of ASIDLEN, is 9 for Sv32 or 16 for Sv39, Sv48, and Sv57
for (unsigned i = 0; i < 16; i++)
{
if (!(valid_asid_bit & 0x1))
{
break;
}
valid_asid_bit >>= 1;
ASID_BITS++;
}
global_asid_generation = ASID_FIRST_GENERATION;
next_asid = 1;
}
static rt_uint64_t _asid_acquire(rt_aspace_t aspace)
{
if ((aspace->asid ^ global_asid_generation) >> ASID_BITS) // not same generation
{
if (next_asid != MAX_ASID)
{
aspace->asid = global_asid_generation | next_asid;
next_asid++;
}
else
{
// scroll to next generation
global_asid_generation += ASID_FIRST_GENERATION;
next_asid = 1;
rt_hw_tlb_invalidate_all_local();
aspace->asid = global_asid_generation | next_asid;
next_asid++;
}
}
return aspace->asid & ASID_MASK;
}
void rt_hw_asid_switch_pgtbl(struct rt_aspace *aspace, rt_ubase_t pgtbl)
{
rt_uint64_t asid = _asid_acquire(aspace);
write_csr(satp, (((size_t)SATP_MODE) << SATP_MODE_OFFSET) |
(asid << PPN_BITS) |
((rt_ubase_t)pgtbl >> PAGE_OFFSET_BIT));
asm volatile("sfence.vma x0,%0"::"r"(asid):"memory");
}

View File

@ -43,6 +43,16 @@ static void *current_mmu_table = RT_NULL;
volatile __attribute__((aligned(4 * 1024)))
rt_ubase_t MMUTable[__SIZE(VPN2_BIT)];
#ifdef ARCH_USING_ASID
void rt_hw_aspace_switch(rt_aspace_t aspace)
{
uintptr_t page_table = (uintptr_t)rt_kmem_v2p(aspace->page_table);
current_mmu_table = aspace->page_table;
rt_hw_asid_switch_pgtbl(aspace, page_table);
}
#else /* !ARCH_USING_ASID */
void rt_hw_aspace_switch(rt_aspace_t aspace)
{
uintptr_t page_table = (uintptr_t)rt_kmem_v2p(aspace->page_table);
@ -53,6 +63,11 @@ void rt_hw_aspace_switch(rt_aspace_t aspace)
rt_hw_tlb_invalidate_all_local();
}
void rt_hw_asid_init(void)
{
}
#endif /* ARCH_USING_ASID */
void *rt_hw_mmu_tbl_get()
{
return current_mmu_table;
@ -246,8 +261,7 @@ void rt_hw_mmu_unmap(struct rt_aspace *aspace, void *v_addr, size_t size)
MM_PGTBL_UNLOCK(aspace);
// when unmapped == 0, region not exist in pgtbl
if (!unmapped || unmapped > size)
break;
if (!unmapped || unmapped > size) break;
size -= unmapped;
v_addr += unmapped;
@ -260,7 +274,8 @@ static inline void _init_region(void *vaddr, size_t size)
rt_ioremap_start = vaddr;
rt_ioremap_size = size;
rt_mpr_start = rt_ioremap_start - rt_mpr_size;
LOG_D("rt_ioremap_start: %p, rt_mpr_start: %p", rt_ioremap_start, rt_mpr_start);
LOG_D("rt_ioremap_start: %p, rt_mpr_start: %p", rt_ioremap_start,
rt_mpr_start);
}
#else
static inline void _init_region(void *vaddr, size_t size)
@ -393,6 +408,7 @@ void *rt_hw_mmu_v2p(struct rt_aspace *aspace, void *vaddr)
}
else
{
LOG_D("%s: failed at %p", __func__, vaddr);
paddr = (uintptr_t)ARCH_MAP_FAILED;
}
return (void *)paddr;
@ -480,11 +496,11 @@ void rt_hw_mmu_setup(rt_aspace_t aspace, struct mem_desc *mdesc, int desc_nr)
attr = MMU_MAP_K_DEVICE;
}
struct rt_mm_va_hint hint = {.flags = MMF_MAP_FIXED,
struct rt_mm_va_hint hint = {
.flags = MMF_MAP_FIXED,
.limit_start = aspace->start,
.limit_range_size = aspace->size,
.map_size = mdesc->vaddr_end -
mdesc->vaddr_start + 1,
.map_size = mdesc->vaddr_end - mdesc->vaddr_start + 1,
.prefer = (void *)mdesc->vaddr_start};
if (mdesc->paddr_start == (rt_uintptr_t)ARCH_MAP_FAILED)
@ -495,6 +511,8 @@ void rt_hw_mmu_setup(rt_aspace_t aspace, struct mem_desc *mdesc, int desc_nr)
mdesc++;
}
rt_hw_asid_init();
rt_hw_aspace_switch(&rt_kernel_space);
rt_page_cleanup();
}
@ -520,16 +538,18 @@ void rt_hw_mem_setup_early(void)
{
if (pv_off & (1ul << (ARCH_INDEX_WIDTH * 2 + ARCH_PAGE_SHIFT)))
{
LOG_E("%s: not aligned virtual address. pv_offset %p", __func__, pv_off);
LOG_E("%s: not aligned virtual address. pv_offset %p", __func__,
pv_off);
RT_ASSERT(0);
}
/**
* identical mapping,
* PC are still at lower region before relocating to high memory
*/
for (size_t i = 0; i < __SIZE(PPN0_BIT); i++)
{
early_pgtbl[i] = COMBINEPTE(ps, PAGE_ATTR_RWX | PTE_G | PTE_V);
early_pgtbl[i] = COMBINEPTE(ps, MMU_MAP_EARLY);
ps += L1_PAGE_SIZE;
}
@ -543,7 +563,7 @@ void rt_hw_mem_setup_early(void)
rt_ubase_t ve_idx = GET_L1(vs + 0x80000000);
for (size_t i = vs_idx; i < ve_idx; i++)
{
early_pgtbl[i] = COMBINEPTE(ps, PAGE_ATTR_RWX | PTE_G | PTE_V);
early_pgtbl[i] = COMBINEPTE(ps, MMU_MAP_EARLY);
ps += L1_PAGE_SIZE;
}

View File

@ -11,10 +11,10 @@
#include <rthw.h>
#include <rtthread.h>
#include <riscv.h>
#include <stdint.h>
#include <stdlib.h>
#include <riscv.h>
#include <string.h>
#include <stdlib.h>
#include "riscv_mmu.h"

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2006-2021, RT-Thread Development Team
* Copyright (c) 2006-2024, RT-Thread Development Team
*
* SPDX-License-Identifier: Apache-2.0
*
@ -35,6 +35,7 @@
#define PPN1_BIT 9
#define PPN2_SHIFT (PPN1_SHIFT + PPN1_BIT)
#define PPN2_BIT 26
#define PPN_BITS (PPN0_BIT + PPN1_BIT + PPN2_BIT)
#define L1_PAGE_SIZE __SIZE(PAGE_OFFSET_BIT + VPN0_BIT + VPN1_BIT)
#define L2_PAGE_SIZE __SIZE(PAGE_OFFSET_BIT + VPN0_BIT)
@ -78,6 +79,7 @@
#define MMU_MAP_U_RWCB (PTE_U | PTE_X | PTE_W | PTE_R | PTE_V)
#define MMU_MAP_U_RWCB_XN (PTE_U | PTE_W | PTE_R | PTE_V)
#define MMU_MAP_U_RW (PTE_U | PTE_X | PTE_W | PTE_R | PTE_V)
#define MMU_MAP_EARLY (PAGE_ATTR_RWX | PTE_G | PTE_V)
#define PTE_XWR_MASK 0xe
@ -101,6 +103,10 @@ void mmu_disable_user_page_access(void);
#define RT_HW_MMU_PROT_USER 16
#define RT_HW_MMU_PROT_CACHE 32
void rt_hw_asid_init(void);
struct rt_aspace;
void rt_hw_asid_switch_pgtbl(struct rt_aspace *aspace, rt_ubase_t pgtbl);
/**
* @brief Remove permission from attribution
*