add ls2k mmu

This commit is contained in:
bigmagic 2020-09-04 09:31:42 +08:00
parent bd95f3a94f
commit 1556ba8e7f
8 changed files with 1467 additions and 4 deletions

View File

@ -24,6 +24,41 @@ rt_ubase_t rt_interrupt_from_thread;
rt_ubase_t rt_interrupt_to_thread;
rt_ubase_t rt_thread_switch_interrupt_flag;
const char *exception_name[] = {
"Interrupt",
"(X)TLB Modify Exception",
"(X)TLB Read/Fetch Exception",
"(X)TLB Write Exception",
"Address Read/Fetch Exception",
"Address Write Exception",
"",
"",
"Syscall",
"Breakpoint",
"Reversed Instruction Exception",
"Coprocessor Unit Invalid",
"Overflow",
"Trap",
"FPU Exception in Vector Instruction",
"FPU Exception",
"Loongson Custom Exception",
"",
"",
"(X)TLB Read Denied Exception",
"(X)TLB Execute Denied Exception",
"Vector Module Disabled Exception",
"",
"",
"",
"",
"",
"",
"",
"",
"Cache Error Exception",
""
};
rt_base_t rt_hw_interrupt_disable(void)
{
rt_base_t status = read_c0_status();
@ -87,10 +122,11 @@ void cache_error_handler(void)
static void unhandled_exception_handle(struct pt_regs *regs)
{
rt_kprintf("Unknown Exception, EPC: 0x%08x, CAUSE: 0x%08x\n", read_c0_epc(),
read_c0_cause());
rt_kprintf("ST0: 0x%08x ", regs->cp0_status);
rt_kprintf("ErrorPC: 0x%08x\n", read_c0_errorepc());
rt_kprintf("Unknown Exception, EPC: 0x%p, CAUSE: 0x%08x\n", read_c0_epc(), read_c0_cause());
rt_kprintf("Exception Name:%s\n",exception_name[(read_c0_cause() >> 2) & 0x1f]);
rt_kprintf("ExeCode = 0x%08x,BadAddr = 0x%p\n",(read_c0_cause() >> 2) & 0x1f,mmu_tlb_get_bad_vaddr());
rt_kprintf("ST0: 0x%08x ",regs->cp0_status);
rt_kprintf("ErrorPC: 0x%p\n",read_c0_errorepc());
mips_dump_regs(regs);
rt_hw_cpu_shutdown();
}

70
libcpu/mips/gs264/cache.c Normal file
View File

@ -0,0 +1,70 @@
/*
* Copyright (c) 2006-2020, RT-Thread Development Team
*
* SPDX-License-Identifier: Apache-2.0
*
* Change Logs:
* Date Author Notes
* 2019-03-29 quanzhao the first version
* 2020-07-26 lizhirui porting to ls2k
*/
#include <rthw.h>
#include <rtdef.h>
rt_inline rt_uint64_t rt_cpu_icache_line_size(void)
{
rt_uint64_t ctr;
return 4 << (ctr & 0xF);
}
rt_inline rt_uint64_t rt_cpu_dcache_line_size(void)
{
rt_uint64_t ctr;
return 4 << ((ctr >> 16) & 0xF);
}
void rt_hw_cpu_icache_invalidate(void *addr, int size)
{
rt_uint64_t line_size = rt_cpu_icache_line_size();
rt_uint64_t start_addr = (rt_uint64_t)addr;
rt_uint64_t end_addr = (rt_uint64_t) addr + size + line_size - 1;
}
void rt_hw_cpu_dcache_invalidate(void *addr, int size)
{
rt_uint64_t line_size = rt_cpu_dcache_line_size();
rt_uint64_t start_addr = (rt_uint64_t)addr;
rt_uint64_t end_addr = (rt_uint64_t) addr + size + line_size - 1;
}
void rt_hw_cpu_dcache_clean(void *addr, int size)
{
rt_uint64_t line_size = rt_cpu_dcache_line_size();
rt_uint64_t start_addr = (rt_uint64_t)addr;
rt_uint64_t end_addr = (rt_uint64_t) addr + size + line_size - 1;
}
void rt_hw_cpu_icache_ops(int ops, void *addr, int size)
{
if (ops == RT_HW_CACHE_INVALIDATE)
rt_hw_cpu_icache_invalidate(addr, size);
}
void rt_hw_cpu_dcache_ops(int ops, void *addr, int size)
{
if (ops == RT_HW_CACHE_FLUSH)
rt_hw_cpu_dcache_clean(addr, size);
else if (ops == RT_HW_CACHE_INVALIDATE)
rt_hw_cpu_dcache_invalidate(addr, size);
}
rt_base_t rt_hw_cpu_icache_status(void)
{
return 0;
}
rt_base_t rt_hw_cpu_dcache_status(void)
{
return 0;
}

24
libcpu/mips/gs264/cache.h Normal file
View File

@ -0,0 +1,24 @@
/*
* Copyright (c) 2006-2018, RT-Thread Development Team
*
* SPDX-License-Identifier: Apache-2.0
*
* Change Logs:
* Date Author Notes
* 2018-03-25 quanzhao the first version
*/
#ifndef __CACHE_H__
#define __CACHE_H__
unsigned long rt_cpu_get_smp_id(void);
void rt_cpu_mmu_disable(void);
void rt_cpu_mmu_enable(void);
void rt_cpu_tlb_set(volatile unsigned long*);
void rt_cpu_dcache_clean_flush(void);
void rt_cpu_icache_flush(void);
void rt_cpu_vector_set_base(unsigned int addr);
#endif

View File

@ -0,0 +1,222 @@
/*
* Copyright (c) 2006-2020, RT-Thread Development Team
*
* SPDX-License-Identifier: Apache-2.0
*
* Change Logs:
* Date Author Notes
* 2020-07-26 lizhirui the first version
*/
#include <string.h>
#include <stdlib.h>
#include "mips.h"
#include "mips_mmu.h"
void mmu_init()
{
uint32_t status = read_c0_status();
status |= 0x07 << 5;//ux = 1,sx = 1,kx = 1
write_c0_status(status);
mmu_clear_tlb();
mmu_clear_itlb();
}
void mmu_set_cpu_mode(cpu_mode_t cpu_mode)
{
uint32_t status = read_c0_status();
status &= ~(0x03 << 3);
status |= ((uint32_t)cpu_mode & 0x03) << 3;
write_c0_status(status);
}
cpu_mode_t mmu_get_cpu_mode()
{
uint32_t status = read_c0_status();
return (cpu_mode_t)((status >> 3) & 0x03);
}
void mmu_clear_tlb()
{
uint32_t max_tlb_index = mmu_get_max_tlb_index();
uint64_t va = KSEG0BASE;
uint32_t entry;
tlb_item_t tlb_item;
for(entry = 0;entry <= max_tlb_index;entry++)
{
mmu_tlb_item_init(&tlb_item);
mmu_tlb_write_indexed(entry,&tlb_item);
}
}
void mmu_clear_itlb()
{
uint32_t diag = read_c0_diag();
write_c0_diag(diag | (0x01 << 2));//write ITLB bit
read_c0_entrylo0();
}
uint32_t mmu_get_max_tlb_index()
{
uint32_t config1 = read_c0_config1();
return ((config1 >> 25) & 0x3F);
}
void mmu_tlb_write_indexed(uint32_t index,tlb_item_t *tlb_item)
{
tlb_item -> entry_lo[0].g |= tlb_item -> entry_lo[1].g;
tlb_item -> entry_lo[1].g |= tlb_item -> entry_lo[0].g;
mmu_tlb_set_index(index);
write_c0_entrylo0(reg_type_convert(tlb_item -> entry_lo[0],uint64_t));
write_c0_entrylo1(reg_type_convert(tlb_item -> entry_lo[1],uint64_t));
write_c0_entryhi(reg_type_convert(tlb_item -> entry_hi,uint64_t));
write_c0_pagemask(reg_type_convert(tlb_item -> page_mask,uint64_t));
tlb_write_indexed();
read_c0_entrylo0();
}
void mmu_tlb_write_random(tlb_item_t *tlb_item)
{
tlb_item -> entry_lo[0].g |= tlb_item -> entry_lo[1].g;
tlb_item -> entry_lo[1].g |= tlb_item -> entry_lo[0].g;
write_c0_entrylo0(reg_type_convert(tlb_item -> entry_lo[0],uint64_t));
write_c0_entrylo1(reg_type_convert(tlb_item -> entry_lo[1],uint64_t));
write_c0_entryhi(reg_type_convert(tlb_item -> entry_hi,uint64_t));
write_c0_pagemask(reg_type_convert(tlb_item -> page_mask,uint64_t));
tlb_write_random();
read_c0_entrylo0();
}
void mmu_tlb_read(uint32_t index,tlb_item_t *tlb_item)
{
mmu_tlb_set_index(index);
tlb_read();
uint64_t entrylo[2];
uint64_t entryhi;
uint64_t page_mask;
entrylo[0] = read_c0_entrylo0();
entrylo[1] = read_c0_entrylo1();
entryhi = read_c0_entryhi();
page_mask = read_c0_pagemask();
tlb_item -> entry_lo[0] = reg_type_convert(entrylo[0],entry_lo_t);
tlb_item -> entry_lo[1] = reg_type_convert(entrylo[1],entry_lo_t);
tlb_item -> entry_hi = reg_type_convert(entryhi,entry_hi_t);
tlb_item -> page_mask = reg_type_convert(page_mask,page_mask_t);
}
uint32_t mmu_tlb_find(uint64_t vpn,uint32_t asid,uint32_t *index)
{
entry_hi_t entry_hi;
entry_hi.r = (vpn >> 62) & 0x03;
entry_hi.vpn2 = (vpn >> 13) & 0x7FFFFFFU;
entry_hi.asid = asid & 0xFFU;
tlb_item_t tlb_item;
//mmu_tlb_read(6,&tlb_item);
//tlb_dump();
mmu_tlb_item_init(&tlb_item);
tlb_item.entry_lo[0].g = tlb_item.entry_lo[1].g = 1;
read_c0_entrylo0();//i don't know why,but if i don't read any register of mmu,tplb will be failed in qemu.
write_c0_entrylo0(reg_type_convert(tlb_item.entry_lo[0],uint64_t));
write_c0_entrylo1(reg_type_convert(tlb_item.entry_lo[1],uint64_t));
write_c0_entryhi(reg_type_convert(entry_hi,uint64_t));
//__asm__ __volatile__("ehb");
//read_c0_entryhi();
//rt_kprintf("entry_hi = %p\n",read_c0_entryhi());
tlb_probe();
*index = mmu_tlb_get_index();
return mmu_tlb_is_matched();
}
void mmu_tlb_item_init(tlb_item_t *tlb_item)
{
memset(tlb_item,0,sizeof(tlb_item_t));
tlb_item -> entry_lo[0].c = 0x03;
tlb_item -> entry_lo[1].c = 0x03;
}
void mmu_set_map(uint64_t vpn,uint64_t ppn,page_mask_enum_t page_mask,uint32_t asid,uint32_t global)
{
uint64_t page_mask_v = (uint64_t)page_mask;
/*if(page_mask_v & (1 << 13))
{
page_mask_v |= (1 << 12);
}*/
uint64_t lb = lowbit((~(page_mask_v)) << 12);
uint64_t pn_remained = ((~(page_mask_v)) << 12) | lb;
vpn &= pn_remained;
ppn &= pn_remained;
uint64_t odd_vpn = vpn | lb;
uint64_t even_vpn = vpn & (~lb);
uint32_t index;
tlb_item_t tlb_item,tlb2_item;
mmu_tlb_item_init(&tlb_item);
mmu_tlb_item_init(&tlb2_item);
tlb_item.page_mask.mask = page_mask;
if(mmu_tlb_find(vpn & (~lb),asid,&index))
{
mmu_tlb_read(index,&tlb_item);
mmu_tlb_write_indexed(index,&tlb2_item);
}
entry_lo_t *entry_lo = &tlb_item.entry_lo[vpn == even_vpn ? 0 : 1];
tlb_item.entry_lo[0].g = tlb_item.entry_lo[1].g = global;
entry_lo -> d = 1;
entry_lo -> ri = 0;
entry_lo -> xi = 0;
entry_lo -> v = 1;
entry_lo -> pfn = ppn >> 12;
tlb_item.entry_hi.r = (vpn >> 62) & 0x03;
tlb_item.entry_hi.vpn2 = (vpn >> 13) & 0x7FFFFFFU;
tlb_item.entry_hi.asid = asid & 0xFFU;
mmu_tlb_write_random(&tlb_item);
}
uint32_t mmu_tlb_get_random()
{
return read_c0_random();
}
uint32_t mmu_tlb_get_index()
{
return read_c0_index() & 0x3F;
}
void mmu_tlb_set_index(uint32_t index)
{
write_c0_index(index & 0x3F);
}
uint32_t mmu_tlb_is_matched()
{
return (read_c0_index() & 0x80000000) == 0;
}
uint64_t mmu_tlb_get_bad_vaddr()
{
return read_c0_badvaddr();
}
void tlb_dump()
{
uint32_t max_index = mmu_get_max_tlb_index();
//uint32_t max_index = 10;
uint32_t entry;
tlb_item_t tlb_item;
for(entry = 0;entry <= max_index;entry++)
{
mmu_tlb_read(entry,&tlb_item);
//mmu_tlb_write_indexed(entry,&tlb_item);
//mmu_tlb_read(entry,&tlb_item);
rt_kprintf("vpn = 0x%p,ppn0 = 0x%p,ppn1 = 0x%p\n",(uint64_t)tlb_item.entry_hi.vpn2 << 13 | (uint64_t)tlb_item.entry_hi.asid << 62,(uint64_t)tlb_item.entry_lo[0].pfn << 12,(uint64_t)tlb_item.entry_lo[1].pfn << 12);
rt_kprintf("v = %d,d = %d,g = %d,ri = %d,xi = %d,c = %d\n",tlb_item.entry_lo[0].v,tlb_item.entry_lo[0].d,tlb_item.entry_lo[0].g,tlb_item.entry_lo[0].ri,tlb_item.entry_lo[0].xi,tlb_item.entry_lo[0].c);
rt_kprintf("v = %d,d = %d,g = %d,ri = %d,xi = %d,c = %d\n",tlb_item.entry_lo[1].v,tlb_item.entry_lo[1].d,tlb_item.entry_lo[1].g,tlb_item.entry_lo[1].ri,tlb_item.entry_lo[1].xi,tlb_item.entry_lo[1].c);
}
}

View File

@ -0,0 +1,100 @@
/*
* Copyright (c) 2006-2020, RT-Thread Development Team
*
* SPDX-License-Identifier: Apache-2.0
*
* Change Logs:
* Date Author Notes
* 2020-07-26 lizhirui the first version
*/
#ifndef __MIPS_MMU_H__
#define __MIPS_MMU_H__
typedef enum cpu_mode_t
{
CPU_MODE_KERNEL = 0x00,
CPU_MODE_SUPERVISOR = 0x01,
CPU_MODE_USER = 0x02
}cpu_mode_t;
typedef enum page_mask_enum_t
{
PAGE_MASK_4KB = 0x00,
PAGE_MASK_16KB = 0x03,
PAGE_MASK_64KB = 0x0F,
PAGE_MASK_256KB = 0x3F,
PAGE_MASK_1MB = 0xFF,
PAGE_MASK_4MB = 0x3FF,
PAGE_MASK_16MB = 0xFFF,
PAGE_MASK_64MB = 0x3FFF,
PAGE_MASK_256MB = 0xFFFF,
PAGE_MASK_1GB = 0x3FFFF
}page_mask_enum_t;
typedef struct page_mask_t
{
uint64_t : 11;
uint64_t : 2;
uint64_t mask : 18;
uint64_t : 33;
}page_mask_t;
typedef struct entry_lo_t
{
uint64_t g : 1;
uint64_t v : 1;
uint64_t d : 1;
uint64_t c : 3;
uint64_t pfn : 24;
uint64_t pfnx : 3;
uint64_t : 29;
uint64_t xi : 1;
uint64_t ri : 1;
}entry_lo_t;
typedef struct entry_hi_t
{
uint64_t asid : 8;
uint64_t : 5;
uint64_t vpn2 : 27;
uint64_t : 22;
uint64_t r : 2;
}entry_hi_t;
typedef struct tlb_item_t
{
entry_lo_t entry_lo[2];
entry_hi_t entry_hi;
page_mask_t page_mask;
}tlb_item_t;
#define read_c0_diag() __read_32bit_c0_register($22, 0)
#define write_c0_diag(val) __write_32bit_c0_register($22, 0, val)
#define read_c0_badvaddr() __read_64bit_c0_register($8, 0)
#define read_c0_random() __read_32bit_c0_register($1, 0)
#define reg_type_convert(variable,new_type) *((new_type *)(&variable))
#define lowbit(x) ((x) & (-(x)))
void mmu_init();
void mmu_set_cpu_mode(cpu_mode_t cpu_mode);
cpu_mode_t mmu_get_cpu_mode();
void mmu_clear_tlb();
void mmu_clear_itlb();
uint32_t mmu_get_max_tlb_index();
void mmu_tlb_write_indexed(uint32_t index,tlb_item_t *tlb_item);
void mmu_tlb_write_random(tlb_item_t *tlb_item);
void mmu_tlb_read(uint32_t index,tlb_item_t *tlb_item);
uint32_t mmu_tlb_find(uint64_t vpn,uint32_t asid,uint32_t *index);
void mmu_tlb_item_init(tlb_item_t *tlb_item);
void mmu_set_map(uint64_t vpn,uint64_t ppn,page_mask_enum_t page_mask,uint32_t asid,uint32_t global);
uint32_t mmu_tlb_get_random();
uint32_t mmu_tlb_get_index();
void mmu_tlb_set_index(uint32_t index);
uint32_t mmu_tlb_is_matched();
uint64_t mmu_tlb_get_bad_vaddr();
void tlb_dump();
#endif

View File

@ -0,0 +1,79 @@
/*
* Copyright (c) 2006-2020, RT-Thread Development Team
*
* SPDX-License-Identifier: Apache-2.0
*
* Change Logs:
* Date Author Notes
* 2020-07-28 lizhirui first version
*/
#include <rtthread.h>
#include <mips.h>
mips32_core_cfg_t g_mips_core =
{
64, /* icache_line_size */
128, /* icache_lines_per_way */
4, /* icache_ways */
32768,
64, /* dcache_line_size */
128, /* dcache_lines_per_way */
4, /* dcache_ways */
32768,
64, /* max_tlb_entries */
};
static rt_uint16_t m_pow(rt_uint16_t b, rt_uint16_t n)
{
rt_uint16_t rets = 1;
while (n--)
rets *= b;
return rets;
}
static rt_uint16_t m_log2(rt_uint16_t b)
{
rt_uint16_t rets = 0;
while (b != 1)
{
b /= 2;
rets++;
}
return rets;
}
/**
* read core attribute
*/
void mips32_cfg_init(void)
{
rt_uint16_t val;
rt_uint32_t cp0_config1;
cp0_config1 = read_c0_config();
if (cp0_config1 & 0x80000000)
{
cp0_config1 = read_c0_config1();
val = (cp0_config1 & (7<<22))>>22;
g_mips_core.icache_lines_per_way = 64 * m_pow(2, val);
val = (cp0_config1 & (7<<19))>>19;
g_mips_core.icache_line_size = 2 * m_pow(2, val);
val = (cp0_config1 & (7<<16))>>16;
g_mips_core.icache_ways = val + 1;
val = (cp0_config1 & (7<<13))>>13;
g_mips_core.dcache_lines_per_way = 64 * m_pow(2, val);
val = (cp0_config1 & (7<<10))>>10;
g_mips_core.dcache_line_size = 2 * m_pow(2, val);
val = (cp0_config1 & (7<<7))>>7;
g_mips_core.dcache_ways = val + 1;
val = (cp0_config1 & (0x3F<<25))>>25;
g_mips_core.max_tlb_entries = val + 1;
}
}

819
libcpu/mips/gs264/mmu.c Normal file
View File

@ -0,0 +1,819 @@
/*
* Copyright (c) 2006-2020, RT-Thread Development Team
*
* SPDX-License-Identifier: Apache-2.0
*
* Change Logs:
* Date Author Notes
* 2012-01-10 bernard porting to AM1808
* 2020-07-26 lizhirui porting to ls2k
*/
#include <rtthread.h>
#include <rthw.h>
#include <board.h>
#include "cache.h"
#include "mips_mmu.h"
#include "mmu.h"
void *current_mmu_table = RT_NULL;
void *mmu_table_get()
{
return current_mmu_table;
}
void switch_mmu(void *mmu_table)
{
current_mmu_table = mmu_table;
mmu_clear_tlb();
mmu_clear_itlb();
}
/* dump 2nd level page table */
void rt_hw_cpu_dump_page_table_2nd(rt_uint32_t *ptb)
{
int i;
int fcnt = 0;
for (i = 0; i < 256; i++)
{
rt_uint32_t pte2 = ptb[i];
if ((pte2 & 0x3) == 0)
{
if (fcnt == 0)
rt_kprintf(" ");
rt_kprintf("%04x: ", i);
fcnt++;
if (fcnt == 16)
{
rt_kprintf("fault\n");
fcnt = 0;
}
continue;
}
if (fcnt != 0)
{
rt_kprintf("fault\n");
fcnt = 0;
}
rt_kprintf(" %04x: %x: ", i, pte2);
if ((pte2 & 0x3) == 0x1)
{
rt_kprintf("L,ap:%x,xn:%d,texcb:%02x\n",
((pte2 >> 7) | (pte2 >> 4))& 0xf,
(pte2 >> 15) & 0x1,
((pte2 >> 10) | (pte2 >> 2)) & 0x1f);
}
else
{
rt_kprintf("S,ap:%x,xn:%d,texcb:%02x\n",
((pte2 >> 7) | (pte2 >> 4))& 0xf, pte2 & 0x1,
((pte2 >> 4) | (pte2 >> 2)) & 0x1f);
}
}
}
void rt_hw_cpu_dump_page_table(rt_uint32_t *ptb)
{
int i;
int fcnt = 0;
rt_kprintf("page table@%p\n", ptb);
for (i = 0; i < 1024*4; i++)
{
rt_uint32_t pte1 = ptb[i];
if ((pte1 & 0x3) == 0)
{
rt_kprintf("%03x: ", i);
fcnt++;
if (fcnt == 16)
{
rt_kprintf("fault\n");
fcnt = 0;
}
continue;
}
if (fcnt != 0)
{
rt_kprintf("fault\n");
fcnt = 0;
}
rt_kprintf("%03x: %08x: ", i, pte1);
if ((pte1 & 0x3) == 0x3)
{
rt_kprintf("LPAE\n");
}
else if ((pte1 & 0x3) == 0x1)
{
rt_kprintf("pte,ns:%d,domain:%d\n",
(pte1 >> 3) & 0x1, (pte1 >> 5) & 0xf);
/*
*rt_hw_cpu_dump_page_table_2nd((void*)((pte1 & 0xfffffc000)
* - 0x80000000 + 0xC0000000));
*/
}
else if (pte1 & (1 << 18))
{
rt_kprintf("super section,ns:%d,ap:%x,xn:%d,texcb:%02x\n",
(pte1 >> 19) & 0x1,
((pte1 >> 13) | (pte1 >> 10))& 0xf,
(pte1 >> 4) & 0x1,
((pte1 >> 10) | (pte1 >> 2)) & 0x1f);
}
else
{
rt_kprintf("section,ns:%d,ap:%x,"
"xn:%d,texcb:%02x,domain:%d\n",
(pte1 >> 19) & 0x1,
((pte1 >> 13) | (pte1 >> 10))& 0xf,
(pte1 >> 4) & 0x1,
(((pte1 & (0x7 << 12)) >> 10) |
((pte1 & 0x0c) >> 2)) & 0x1f,
(pte1 >> 5) & 0xf);
}
}
}
/* level1 page table, each entry for 1MB memory. */
volatile unsigned long MMUTable[4*1024] __attribute__((aligned(16*1024)));
void rt_hw_mmu_setmtt(rt_uint32_t vaddrStart,
rt_uint32_t vaddrEnd,
rt_uint32_t paddrStart,
rt_uint32_t attr)
{
volatile rt_uint32_t *pTT;
volatile int i, nSec;
pTT = (rt_uint32_t *)MMUTable + (vaddrStart >> 20);
nSec = (vaddrEnd >> 20) - (vaddrStart >> 20);
for(i = 0; i <= nSec; i++)
{
*pTT = attr | (((paddrStart >> 20) + i) << 20);
pTT++;
}
}
unsigned long rt_hw_set_domain_register(unsigned long domain_val)
{
unsigned long old_domain;
//asm volatile ("mrc p15, 0, %0, c3, c0\n" : "=r" (old_domain));
//asm volatile ("mcr p15, 0, %0, c3, c0\n" : :"r" (domain_val) : "memory");
return old_domain;
}
void rt_hw_cpu_dcache_clean(void *addr, int size);
void rt_hw_init_mmu_table(struct mem_desc *mdesc, rt_uint32_t size)
{
/* set page table */
for(; size > 0; size--)
{
rt_hw_mmu_setmtt(mdesc->vaddr_start, mdesc->vaddr_end,
mdesc->paddr_start, mdesc->attr);
mdesc++;
}
rt_hw_cpu_dcache_clean((void*)MMUTable, sizeof MMUTable);
}
void rt_hw_mmu_init(void)
{
rt_cpu_dcache_clean_flush();
rt_cpu_icache_flush();
rt_hw_cpu_dcache_disable();
rt_hw_cpu_icache_disable();
rt_cpu_mmu_disable();
/*rt_hw_cpu_dump_page_table(MMUTable);*/
rt_hw_set_domain_register(0x55555555);
rt_cpu_tlb_set(MMUTable);
rt_cpu_mmu_enable();
rt_hw_cpu_icache_enable();
rt_hw_cpu_dcache_enable();
}
/*
mem map
*/
void rt_hw_cpu_dcache_clean(void *addr, int size);
int rt_hw_mmu_map_init(rt_mmu_info *mmu_info, void* v_address, size_t size, size_t *vtable, size_t pv_off)
{
size_t l1_off, va_s, va_e;
rt_base_t level;
if (!mmu_info || !vtable)
{
return -1;
}
va_s = (size_t)v_address;
va_e = (size_t)v_address + size - 1;
if ( va_e < va_s)
{
return -1;
}
va_s >>= ARCH_SECTION_SHIFT;
va_e >>= ARCH_SECTION_SHIFT;
if (va_s == 0)
{
return -1;
}
level = rt_hw_interrupt_disable();
for (l1_off = va_s; l1_off <= va_e; l1_off++)
{
size_t v = vtable[l1_off];
if (v & ARCH_MMU_USED_MASK)
{
rt_kprintf("Error:vtable[%d] = 0x%p(is not zero),va_s = 0x%p,va_e = 0x%p!\n",l1_off,v,va_s,va_e);
rt_hw_interrupt_enable(level);
return -1;
}
}
mmu_info->vtable = vtable;
mmu_info->vstart = va_s;
mmu_info->vend = va_e;
mmu_info->pv_off = pv_off;
rt_hw_interrupt_enable(level);
return 0;
}
static size_t find_vaddr(rt_mmu_info *mmu_info, int pages)
{
size_t l1_off, l2_off;
size_t *mmu_l1, *mmu_l2;
size_t find_off = 0;
size_t find_va = 0;
int n = 0;
if (!pages)
{
return 0;
}
if (!mmu_info)
{
return 0;
}
for (l1_off = mmu_info->vstart; l1_off <= mmu_info->vend; l1_off++)
{
mmu_l1 = (size_t*)mmu_info->vtable + l1_off;
if (*mmu_l1 & ARCH_MMU_USED_MASK)
{
mmu_l2 = (size_t *)((*mmu_l1 & ~ARCH_PAGE_TBL_MASK) - mmu_info->pv_off);
for (l2_off = 0; l2_off < (ARCH_SECTION_SIZE/ARCH_PAGE_SIZE); l2_off++)
{
if (*(mmu_l2 + l2_off) & ARCH_MMU_USED_MASK)
{
/* in use */
n = 0;
}
else
{
if (!n)
{
find_va = l1_off;
find_off = l2_off;
}
n++;
if (n >= pages)
{
return (find_va << ARCH_SECTION_SHIFT) + (find_off << ARCH_PAGE_SHIFT);
}
}
}
}
else
{
if (!n)
{
find_va = l1_off;
find_off = 0;
}
n += (ARCH_SECTION_SIZE/ARCH_PAGE_SIZE);
if (n >= pages)
{
return (find_va << ARCH_SECTION_SHIFT) + (find_off << ARCH_PAGE_SHIFT);
}
}
}
return 0;
}
#ifdef RT_USING_USERSPACE
static int check_vaddr(rt_mmu_info *mmu_info, void *va, int pages)
{
size_t loop_va = (size_t)va & ~ARCH_PAGE_MASK;
size_t l1_off, l2_off;
size_t *mmu_l1, *mmu_l2;
if (!pages)
{
return -1;
}
if (!mmu_info)
{
return -1;
}
while (pages--)
{
l1_off = (loop_va >> ARCH_SECTION_SHIFT);
l2_off = ((loop_va & ARCH_SECTION_MASK) >> ARCH_PAGE_SHIFT);
mmu_l1 = (size_t*)mmu_info->vtable + l1_off;
if (*mmu_l1 & ARCH_MMU_USED_MASK)
{
mmu_l2 = (size_t *)((*mmu_l1 & ~ARCH_PAGE_TBL_MASK) - mmu_info->pv_off);
if (*(mmu_l2 + l2_off) & ARCH_MMU_USED_MASK)
{
return -1;
}
}
loop_va += ARCH_PAGE_SIZE;
}
return 0;
}
#endif
static void __rt_hw_mmu_unmap(rt_mmu_info *mmu_info, void* v_addr, size_t npages)
{
size_t loop_va = (size_t)v_addr & ~ARCH_PAGE_MASK;
size_t l1_off, l2_off;
size_t *mmu_l1, *mmu_l2;
size_t *ref_cnt;
if (!mmu_info)
{
return;
}
while (npages--)
{
l1_off = (loop_va >> ARCH_SECTION_SHIFT);
if (l1_off < mmu_info->vstart || l1_off > mmu_info->vend)
{
return;
}
l2_off = ((loop_va & ARCH_SECTION_MASK) >> ARCH_PAGE_SHIFT);
mmu_l1 = (size_t*)mmu_info->vtable + l1_off;
if (*mmu_l1 & ARCH_MMU_USED_MASK)
{
mmu_l2 = (size_t *)((*mmu_l1 & ~ARCH_PAGE_TBL_MASK) - mmu_info->pv_off);
}
else
{
return;
}
if (*(mmu_l2 + l2_off) & ARCH_MMU_USED_MASK)
{
*(mmu_l2 + l2_off) = 0;
/* cache maintain */
rt_hw_cpu_dcache_clean(mmu_l2 + l2_off, 4);
ref_cnt = mmu_l2 + (ARCH_SECTION_SIZE/ARCH_PAGE_SIZE);
(*ref_cnt)--;
if (!*ref_cnt)
{
#ifdef RT_USING_USERSPACE
rt_pages_free(mmu_l2, 0);
#else
rt_free_align(mmu_l2);
#endif
*mmu_l1 = 0;
/* cache maintain */
rt_hw_cpu_dcache_clean(mmu_l1, 4);
}
}
loop_va += ARCH_PAGE_SIZE;
}
}
static int __rt_hw_mmu_map(rt_mmu_info *mmu_info, void* v_addr, void* p_addr, size_t npages, size_t attr)
{
size_t loop_va = (size_t)v_addr & ~ARCH_PAGE_MASK;
size_t loop_pa = (size_t)p_addr & ~ARCH_PAGE_MASK;
size_t l1_off, l2_off;
size_t *mmu_l1, *mmu_l2;
size_t *ref_cnt;
if (!mmu_info)
{
return -1;
}
while (npages--)
{
l1_off = (loop_va >> ARCH_SECTION_SHIFT);
l2_off = ((loop_va & ARCH_SECTION_MASK) >> ARCH_PAGE_SHIFT);
mmu_l1 = (size_t*)mmu_info->vtable + l1_off;
if (*mmu_l1 & ARCH_MMU_USED_MASK)
{
mmu_l2 = (size_t *)((*mmu_l1 & ~ARCH_PAGE_TBL_MASK) - mmu_info->pv_off);
}
else
{
#ifdef RT_USING_USERSPACE
mmu_l2 = (size_t*)rt_pages_alloc(0);
#else
mmu_l2 = (size_t*)rt_malloc_align(ARCH_PAGE_TBL_SIZE * 2, ARCH_PAGE_TBL_SIZE);
#endif
if (mmu_l2)
{
rt_memset(mmu_l2, 0, ARCH_PAGE_TBL_SIZE * 2);
/* cache maintain */
rt_hw_cpu_dcache_clean(mmu_l2, ARCH_PAGE_TBL_SIZE);
*mmu_l1 = (((size_t)mmu_l2 + mmu_info->pv_off) | 0x1);
/* cache maintain */
rt_hw_cpu_dcache_clean(mmu_l1, 4);
}
else
{
/* error, unmap and quit */
__rt_hw_mmu_unmap(mmu_info, v_addr, npages);
return -1;
}
}
ref_cnt = mmu_l2 + (ARCH_SECTION_SIZE/ARCH_PAGE_SIZE);
(*ref_cnt)++;
*(mmu_l2 + l2_off) = (loop_pa | attr);
/* cache maintain */
rt_hw_cpu_dcache_clean(mmu_l2 + l2_off, 4);
loop_va += ARCH_PAGE_SIZE;
loop_pa += ARCH_PAGE_SIZE;
}
return 0;
}
static void rt_hw_cpu_tlb_invalidate(void)
{
mmu_clear_tlb();
mmu_clear_itlb();
}
#ifdef RT_USING_USERSPACE
void *_rt_hw_mmu_map(rt_mmu_info *mmu_info, void *v_addr, void* p_addr, size_t size, size_t attr)
{
size_t pa_s, pa_e;
size_t vaddr;
int pages;
int ret;
if (!size)
{
return 0;
}
pa_s = (size_t)p_addr;
pa_e = (size_t)p_addr + size - 1;
pa_s >>= ARCH_PAGE_SHIFT;
pa_e >>= ARCH_PAGE_SHIFT;
pages = pa_e - pa_s + 1;
if (v_addr)
{
vaddr = (size_t)v_addr;
pa_s = (size_t)p_addr;
if ((vaddr & ARCH_PAGE_MASK) != (pa_s & ARCH_PAGE_MASK))
{
return 0;
}
vaddr &= ~ARCH_PAGE_MASK;
if (check_vaddr(mmu_info, (void*)vaddr, pages) != 0)
{
return 0;
}
}
else
{
vaddr = find_vaddr(mmu_info, pages);
}
if (vaddr) {
ret = __rt_hw_mmu_map(mmu_info, (void*)vaddr, p_addr, pages, attr);
if (ret == 0)
{
rt_hw_cpu_tlb_invalidate();
return (void*)(vaddr + ((size_t)p_addr & ARCH_PAGE_MASK));
}
}
return 0;
}
#else
void *_rt_hw_mmu_map(rt_mmu_info *mmu_info, void* p_addr, size_t size, size_t attr)
{
size_t pa_s, pa_e;
size_t vaddr;
int pages;
int ret;
pa_s = (size_t)p_addr;
pa_e = (size_t)p_addr + size - 1;
pa_s >>= ARCH_PAGE_SHIFT;
pa_e >>= ARCH_PAGE_SHIFT;
pages = pa_e - pa_s + 1;
vaddr = find_vaddr(mmu_info, pages);
if (vaddr) {
ret = __rt_hw_mmu_map(mmu_info, (void*)vaddr, p_addr, pages, attr);
if (ret == 0)
{
rt_hw_cpu_tlb_invalidate();
return (void*)(vaddr + ((size_t)p_addr & ARCH_PAGE_MASK));
}
}
return 0;
}
#endif
#ifdef RT_USING_USERSPACE
static int __rt_hw_mmu_map_auto(rt_mmu_info *mmu_info, void* v_addr, size_t npages, size_t attr)
{
size_t loop_va = (size_t)v_addr & ~ARCH_PAGE_MASK;
size_t loop_pa;
size_t l1_off, l2_off;
size_t *mmu_l1, *mmu_l2;
size_t *ref_cnt;
if (!mmu_info)
{
return -1;
}
while (npages--)
{
loop_pa = (size_t)rt_pages_alloc(0) + mmu_info->pv_off;
if (!loop_pa)
goto err;
//rt_kprintf("vaddr = %08x is mapped to paddr = %08x\n",v_addr,loop_pa);
l1_off = (loop_va >> ARCH_SECTION_SHIFT);
l2_off = ((loop_va & ARCH_SECTION_MASK) >> ARCH_PAGE_SHIFT);
mmu_l1 = (size_t*)mmu_info->vtable + l1_off;
if (*mmu_l1 & ARCH_MMU_USED_MASK)
{
mmu_l2 = (size_t *)((*mmu_l1 & ~ARCH_PAGE_TBL_MASK) - mmu_info->pv_off);
}
else
{
//mmu_l2 = (size_t*)rt_malloc_align(ARCH_PAGE_TBL_SIZE * 2, ARCH_PAGE_TBL_SIZE);
mmu_l2 = (size_t*)rt_pages_alloc(0);
if (mmu_l2)
{
rt_memset(mmu_l2, 0, ARCH_PAGE_TBL_SIZE * 2);
/* cache maintain */
rt_hw_cpu_dcache_clean(mmu_l2, ARCH_PAGE_TBL_SIZE);
*mmu_l1 = (((size_t)mmu_l2 + mmu_info->pv_off) | 0x1);
/* cache maintain */
rt_hw_cpu_dcache_clean(mmu_l1, 4);
}
else
goto err;
}
ref_cnt = mmu_l2 + (ARCH_SECTION_SIZE/ARCH_PAGE_SIZE);
(*ref_cnt)++;
//loop_pa += mmu_info->pv_off;
*(mmu_l2 + l2_off) = (loop_pa | attr);
/* cache maintain */
rt_hw_cpu_dcache_clean(mmu_l2 + l2_off, 4);
loop_va += ARCH_PAGE_SIZE;
}
return 0;
err:
{
/* error, unmap and quit */
int i;
void *va, *pa;
va = (void*)((size_t)v_addr & ~ARCH_PAGE_MASK);
for (i = 0; i < npages; i++)
{
pa = rt_hw_mmu_v2p(mmu_info, va);
pa -= mmu_info->pv_off;
rt_pages_free(pa, 0);
va += ARCH_PAGE_SIZE;
}
__rt_hw_mmu_unmap(mmu_info, v_addr, npages);
return -1;
}
}
void *_rt_hw_mmu_map_auto(rt_mmu_info *mmu_info, void *v_addr, size_t size, size_t attr)
{
size_t vaddr;
size_t offset;
int pages;
int ret;
if (!size)
{
return 0;
}
offset = (size_t)v_addr & ARCH_PAGE_MASK;
size += (offset + ARCH_PAGE_SIZE - 1);
pages = (size >> ARCH_PAGE_SHIFT);
if (v_addr)
{
vaddr = (size_t)v_addr;
vaddr &= ~ARCH_PAGE_MASK;
if (check_vaddr(mmu_info, (void*)vaddr, pages) != 0)
{
return 0;
}
}
else
{
vaddr = find_vaddr(mmu_info, pages);
}
if (vaddr) {
ret = __rt_hw_mmu_map_auto(mmu_info, (void*)vaddr, pages, attr);
if (ret == 0)
{
rt_hw_cpu_tlb_invalidate();
return (void*)vaddr + offset;
}
}
return 0;
}
#endif
void _rt_hw_mmu_unmap(rt_mmu_info *mmu_info, void* v_addr, size_t size)
{
size_t va_s, va_e;
int pages;
va_s = (size_t)v_addr;
va_e = (size_t)v_addr + size - 1;
va_s >>= ARCH_PAGE_SHIFT;
va_e >>= ARCH_PAGE_SHIFT;
pages = va_e - va_s + 1;
__rt_hw_mmu_unmap(mmu_info, v_addr, pages);
rt_hw_cpu_tlb_invalidate();
}
//va --> pa
void *rt_hw_kernel_virt_to_phys(void *v_addr, size_t size)
{
void *p_addr = 0;
return p_addr;
}
//pa --> va
void *rt_hw_kernel_phys_to_virt(void *p_addr, size_t size)
{
void *v_addr = 0;
#ifdef RT_USING_USERSPACE
extern rt_mmu_info mmu_info;
v_addr = rt_hw_mmu_map(&mmu_info, 0, p_addr, size, MMU_MAP_K_RW);
#else
v_addr = p_addr;
#endif
return v_addr;
}
#ifdef RT_USING_USERSPACE
void *rt_hw_mmu_map(rt_mmu_info *mmu_info, void *v_addr, void* p_addr, size_t size, size_t attr)
{
void *ret;
rt_base_t level;
level = rt_hw_interrupt_disable();
ret = _rt_hw_mmu_map(mmu_info, v_addr, p_addr, size, attr);
rt_hw_interrupt_enable(level);
return ret;
}
void *rt_hw_mmu_map_auto(rt_mmu_info *mmu_info, void *v_addr, size_t size, size_t attr)
{
void *ret;
rt_base_t level;
level = rt_hw_interrupt_disable();
ret = _rt_hw_mmu_map_auto(mmu_info, v_addr, size, attr);
rt_hw_interrupt_enable(level);
return ret;
}
#endif
void rt_hw_mmu_unmap(rt_mmu_info *mmu_info, void* v_addr, size_t size)
{
rt_base_t level;
level = rt_hw_interrupt_disable();
_rt_hw_mmu_unmap(mmu_info, v_addr, size);
rt_hw_interrupt_enable(level);
}
void *_rt_hw_mmu_v2p(rt_mmu_info *mmu_info, void* v_addr)
{
size_t l1_off, l2_off;
size_t *mmu_l1, *mmu_l2;
size_t tmp;
size_t pa;
l1_off = (size_t)v_addr >> ARCH_SECTION_SHIFT;
if (!mmu_info)
{
return (void*)0;
}
mmu_l1 = (size_t*)mmu_info->vtable + l1_off;
tmp = *mmu_l1;
switch (tmp & ARCH_MMU_USED_MASK)
{
case 0: /* not used */
break;
case 1: /* page table */
mmu_l2 = (size_t *)((tmp & ~ARCH_PAGE_TBL_MASK) - mmu_info->pv_off);
l2_off = (((size_t)v_addr & ARCH_SECTION_MASK) >> ARCH_PAGE_SHIFT);
pa = *(mmu_l2 + l2_off);
if (pa & ARCH_MMU_USED_MASK)
{
if ((pa & ARCH_MMU_USED_MASK) == 1)
{
/* lage page, not support */
break;
}
pa &= ~(ARCH_PAGE_MASK);
pa += ((size_t)v_addr & ARCH_PAGE_MASK);
return (void*)pa;
}
break;
case 2:
case 3:
/* section */
if (tmp & ARCH_TYPE_SUPERSECTION)
{
/* super section, not support */
break;
}
pa = (tmp & ~ARCH_SECTION_MASK);
pa += ((size_t)v_addr & ARCH_SECTION_MASK);
return (void*)pa;
}
return (void*)0;
}
void *rt_hw_mmu_v2p(rt_mmu_info *mmu_info, void* v_addr)
{
void *ret;
rt_base_t level;
level = rt_hw_interrupt_disable();
ret = _rt_hw_mmu_v2p(mmu_info, v_addr);
rt_hw_interrupt_enable(level);
return ret;
}
#ifdef RT_USING_USERSPACE
void init_mm_setup(unsigned int *mtbl, unsigned int size, unsigned int pv_off) {
unsigned int va;
for (va = 0; va < 0x1000; va++) {
unsigned int vaddr = (va << 20);
if (vaddr >= KERNEL_VADDR_START && vaddr - KERNEL_VADDR_START < size) {
mtbl[va] = ((va << 20) + pv_off) | NORMAL_MEM;
} else if (vaddr >= (KERNEL_VADDR_START + pv_off) && vaddr - (KERNEL_VADDR_START + pv_off) < size) {
mtbl[va] = (va << 20) | NORMAL_MEM;
} else {
mtbl[va] = 0;
}
}
}
#endif

113
libcpu/mips/gs264/mmu.h Normal file
View File

@ -0,0 +1,113 @@
/*
* Copyright (c) 2006-2018, RT-Thread Development Team
*
* SPDX-License-Identifier: Apache-2.0
*
* Change Logs:
* Date Author Notes
* 2019-03-25 quanzhao the first version
* 2020-07-26 lizhirui porting to ls2k
*/
#ifndef __MMU_H_
#define __MMU_H_
#include <rtthread.h>
#define DESC_SEC (0x2)
#define MEMWBWA ((1<<12)|(3<<2)) /* write back, write allocate */
#define MEMWB (3<<2) /* write back, no write allocate */
#define MEMWT (2<<2) /* write through, no write allocate */
#define SHAREDEVICE (1<<2) /* shared device */
#define STRONGORDER (0<<2) /* strong ordered */
#define XN (1<<4) /* eXecute Never */
#ifdef RT_USING_USERSPACE
#define AP_RW (1<<10) /* supervisor=RW, user=No */
#define AP_RO ((1<<10) |(1 << 15)) /* supervisor=RW, user=No */
#else
#define AP_RW (3<<10) /* supervisor=RW, user=RW */
#define AP_RO ((2<<10) /* supervisor=RW, user=RO */
#endif
#define SHARED (1<<16) /* shareable */
#define DOMAIN_FAULT (0x0)
#define DOMAIN_CHK (0x1)
#define DOMAIN_NOTCHK (0x3)
#define DOMAIN0 (0x0<<5)
#define DOMAIN1 (0x1<<5)
#define DOMAIN0_ATTR (DOMAIN_CHK<<0)
#define DOMAIN1_ATTR (DOMAIN_FAULT<<2)
/* device mapping type */
#define DEVICE_MEM (SHARED|AP_RW|DOMAIN0|SHAREDEVICE|DESC_SEC|XN)
/* normal memory mapping type */
#define NORMAL_MEM (SHARED|AP_RW|DOMAIN0|MEMWBWA|DESC_SEC)
#define STRONG_ORDER_MEM (SHARED|AP_RO|XN|DESC_SEC)
struct mem_desc
{
rt_uint32_t vaddr_start;
rt_uint32_t vaddr_end;
rt_uint32_t paddr_start;
rt_uint32_t attr;
};
#define MMU_MAP_MTBL_XN (1<<0)
#define MMU_MAP_MTBL_A (1<<1)
#define MMU_MAP_MTBL_B (1<<2)
#define MMU_MAP_MTBL_C (1<<3)
#define MMU_MAP_MTBL_AP01(x) (x<<4)
#define MMU_MAP_MTBL_TEX(x) (x<<6)
#define MMU_MAP_MTBL_AP2(x) (x<<9)
#define MMU_MAP_MTBL_SHARE (1<<10)
#define MMU_MAP_K_RO (MMU_MAP_MTBL_A|MMU_MAP_MTBL_AP2(1)|MMU_MAP_MTBL_AP01(1)|MMU_MAP_MTBL_TEX(0)|MMU_MAP_MTBL_C|MMU_MAP_MTBL_SHARE)
#define MMU_MAP_K_RWCB (MMU_MAP_MTBL_A|MMU_MAP_MTBL_AP2(0)|MMU_MAP_MTBL_AP01(1)|MMU_MAP_MTBL_TEX(0)|MMU_MAP_MTBL_B|MMU_MAP_MTBL_C|MMU_MAP_MTBL_SHARE)
#define MMU_MAP_K_RW (MMU_MAP_MTBL_A|MMU_MAP_MTBL_AP2(0)|MMU_MAP_MTBL_AP01(1)|MMU_MAP_MTBL_TEX(0)|MMU_MAP_MTBL_SHARE)
#define MMU_MAP_K_DEVICE (MMU_MAP_MTBL_A|MMU_MAP_MTBL_AP2(0)|MMU_MAP_MTBL_AP01(1)|MMU_MAP_MTBL_TEX(0)|MMU_MAP_MTBL_B|MMU_MAP_MTBL_SHARE)
#define MMU_MAP_U_RO (MMU_MAP_MTBL_A|MMU_MAP_MTBL_AP2(1)|MMU_MAP_MTBL_AP01(2)|MMU_MAP_MTBL_TEX(0)|MMU_MAP_MTBL_C|MMU_MAP_MTBL_SHARE)
#define MMU_MAP_U_RWCB (MMU_MAP_MTBL_A|MMU_MAP_MTBL_AP2(0)|MMU_MAP_MTBL_AP01(3)|MMU_MAP_MTBL_TEX(0)|MMU_MAP_MTBL_B|MMU_MAP_MTBL_C|MMU_MAP_MTBL_SHARE)
#define MMU_MAP_U_RW (MMU_MAP_MTBL_A|MMU_MAP_MTBL_AP2(0)|MMU_MAP_MTBL_AP01(3)|MMU_MAP_MTBL_TEX(0)|MMU_MAP_MTBL_SHARE)
#define MMU_MAP_U_DEVICE (MMU_MAP_MTBL_A|MMU_MAP_MTBL_AP2(0)|MMU_MAP_MTBL_AP01(3)|MMU_MAP_MTBL_TEX(0)|MMU_MAP_MTBL_B|MMU_MAP_MTBL_SHARE)
#define ARCH_SECTION_SHIFT 20
#define ARCH_SECTION_SIZE (1 << ARCH_SECTION_SHIFT)
#define ARCH_SECTION_MASK (ARCH_SECTION_SIZE - 1)
#define ARCH_PAGE_SHIFT 14
#define ARCH_PAGE_SIZE (1 << ARCH_PAGE_SHIFT)
#define ARCH_PAGE_MASK (ARCH_PAGE_SIZE - 1)
#define ARCH_PAGE_TBL_SHIFT 10
#define ARCH_PAGE_TBL_SIZE (1 << ARCH_PAGE_TBL_SHIFT)
#define ARCH_PAGE_TBL_MASK (ARCH_PAGE_TBL_SIZE - 1)
#define ARCH_MMU_USED_MASK 3
#define ARCH_TYPE_SUPERSECTION (1 << 18)
typedef struct
{
size_t *vtable;
size_t vstart;
size_t vend;
size_t pv_off;
} rt_mmu_info;
void *mmu_table_get();
void switch_mmu(void *mmu_table);
int rt_hw_mmu_map_init(rt_mmu_info *mmu_info, void* v_address, size_t size, size_t *vtable, size_t pv_off);
#ifdef RT_USING_USERSPACE
void *rt_hw_mmu_map(rt_mmu_info *mmu_info, void *v_addr, void* p_addr, size_t size, size_t attr);
void *rt_hw_mmu_map_auto(rt_mmu_info *mmu_info, void *v_addr, size_t size, size_t attr);
#else
void *rt_hw_mmu_map(rt_mmu_info *mmu_info, void* p_addr, size_t size, size_t attr);
#endif
void rt_hw_mmu_unmap(rt_mmu_info *mmu_info, void* v_addr, size_t size);
void *rt_hw_mmu_v2p(rt_mmu_info *mmu_info, void* v_addr);
void *rt_hw_kernel_phys_to_virt(void *p_addr, size_t size);
void *rt_hw_kernel_virt_to_phys(void *v_addr, size_t size);
#endif