feat: support ARCH_REMAP_KERNEL on libcpu/c906 (#9123)

feat: [libcpu/c906] support ARCH_REMAP_KERNEL

This change was necessary to enable the remapping of the kernel image to
a high virtual address region on the c906 platform.

Changes:
- Introduced new configuration options `ARCH_REMAP_KERNEL`, and
  `ARCH_USING_ASID` under the `ARCH_RISCV64` section.
- Updated MMU initialization and switching functions to incorporate
  remapping handling.
- Modified page table setup for proper memory attribute settings.
- Added support for early memory setup, kernel remapping
- Added conditional compilation for ASID support in the `rt_aspace` struct,
  since this is not enable currently for most architecture.

Signed-off-by: Shell <smokewood@qq.com>
This commit is contained in:
Shell 2024-07-11 11:00:04 +08:00 committed by GitHub
parent 6180dabab3
commit beee77f372
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
4 changed files with 85 additions and 17 deletions

View File

@ -49,7 +49,11 @@ typedef struct rt_aspace
struct rt_mutex bst_lock; struct rt_mutex bst_lock;
struct rt_mem_obj *private_object; struct rt_mem_obj *private_object;
#ifdef ARCH_USING_ASID
rt_uint64_t asid; rt_uint64_t asid;
#endif /* ARCH_USING_ASID */
} *rt_aspace_t; } *rt_aspace_t;
typedef struct rt_varea typedef struct rt_varea

View File

@ -262,13 +262,17 @@ config ARCH_RISCV64
select ARCH_CPU_64BIT select ARCH_CPU_64BIT
bool bool
if ARCH_RISCV64
config ARCH_REMAP_KERNEL config ARCH_REMAP_KERNEL
bool bool
depends on RT_USING_SMART depends on RT_USING_SMART
help help
Remapping kernel image to high virtual address region Remapping kernel image to high virtual address region
endif
config ARCH_USING_ASID
bool
depends on RT_USING_SMART
help
Using ASID support from architecture
config ARCH_IA32 config ARCH_IA32
bool bool

View File

@ -43,6 +43,7 @@ static void *current_mmu_table = RT_NULL;
volatile __attribute__((aligned(4 * 1024))) volatile __attribute__((aligned(4 * 1024)))
rt_ubase_t MMUTable[__SIZE(VPN2_BIT)]; rt_ubase_t MMUTable[__SIZE(VPN2_BIT)];
#ifdef ARCH_USING_ASID
static rt_uint8_t ASID_BITS = 0; static rt_uint8_t ASID_BITS = 0;
static rt_uint32_t next_asid; static rt_uint32_t next_asid;
static rt_uint64_t global_asid_generation; static rt_uint64_t global_asid_generation;
@ -109,6 +110,24 @@ void rt_hw_aspace_switch(rt_aspace_t aspace)
asm volatile("sfence.vma x0,%0"::"r"(asid):"memory"); asm volatile("sfence.vma x0,%0"::"r"(asid):"memory");
} }
#define ASID_INIT() _asid_init()
#else /* ARCH_USING_ASID */
#define ASID_INIT()
void rt_hw_aspace_switch(rt_aspace_t aspace)
{
uintptr_t page_table = (uintptr_t)rt_kmem_v2p(aspace->page_table);
current_mmu_table = aspace->page_table;
write_csr(satp, (((size_t)SATP_MODE) << SATP_MODE_OFFSET) |
((rt_ubase_t)page_table >> PAGE_OFFSET_BIT));
rt_hw_tlb_invalidate_all_local();
}
#endif /* ARCH_USING_ASID */
void *rt_hw_mmu_tbl_get() void *rt_hw_mmu_tbl_get()
{ {
return current_mmu_table; return current_mmu_table;
@ -552,7 +571,7 @@ void rt_hw_mmu_setup(rt_aspace_t aspace, struct mem_desc *mdesc, int desc_nr)
mdesc++; mdesc++;
} }
_asid_init(); ASID_INIT();
rt_hw_aspace_switch(&rt_kernel_space); rt_hw_aspace_switch(&rt_kernel_space);
rt_page_cleanup(); rt_page_cleanup();
@ -601,13 +620,15 @@ void rt_hw_mem_setup_early(void)
LOG_E("%s: not aligned virtual address. pv_offset %p", __func__, pv_off); LOG_E("%s: not aligned virtual address. pv_offset %p", __func__, pv_off);
RT_ASSERT(0); RT_ASSERT(0);
} }
/** /**
* identical mapping, * identical mapping,
* PC are still at lower region before relocating to high memory * PC are still at lower region before relocating to high memory
*/ */
for (size_t i = 0; i < __SIZE(PPN0_BIT); i++) for (size_t i = 0; i < __SIZE(PPN0_BIT); i++)
{ {
early_pgtbl[i] = COMBINEPTE(ps, PAGE_ATTR_RWX | PTE_G | PTE_V); early_pgtbl[i] = COMBINEPTE(ps, PAGE_ATTR_RWX | PTE_G | PTE_V | PTE_CACHE |
PTE_SHARE | PTE_BUF | PTE_A | PTE_D);
ps += L1_PAGE_SIZE; ps += L1_PAGE_SIZE;
} }
@ -621,7 +642,8 @@ void rt_hw_mem_setup_early(void)
rt_size_t ve_idx = GET_L1(vs + 0x80000000); rt_size_t ve_idx = GET_L1(vs + 0x80000000);
for (size_t i = vs_idx; i < ve_idx; i++) for (size_t i = vs_idx; i < ve_idx; i++)
{ {
early_pgtbl[i] = COMBINEPTE(ps, PAGE_ATTR_RWX | PTE_G | PTE_V); early_pgtbl[i] = COMBINEPTE(ps, PAGE_ATTR_RWX | PTE_G | PTE_V | PTE_CACHE |
PTE_SHARE | PTE_BUF | PTE_A | PTE_D);
ps += L1_PAGE_SIZE; ps += L1_PAGE_SIZE;
} }

View File

@ -8,13 +8,16 @@
* 2018/10/01 Bernard The first version * 2018/10/01 Bernard The first version
* 2018/12/27 Jesven Add SMP support * 2018/12/27 Jesven Add SMP support
* 2020/6/12 Xim Port to QEMU and remove SMP support * 2020/6/12 Xim Port to QEMU and remove SMP support
* 2024-06-30 Shell Support of kernel remapping
*/ */
#include <encoding.h> #include <encoding.h>
#include <cpuport.h> #include <cpuport.h>
boot_hartid: .int .data
.global boot_hartid .global boot_hartid /* global varible rt_boot_hartid in .data section */
boot_hartid:
.word 0xdeadbeef
.global _start .global _start
.section ".start", "ax" .section ".start", "ax"
@ -72,8 +75,6 @@ _start:
li x31,0 li x31,0
/* set to disable FPU */ /* set to disable FPU */
li t0, SSTATUS_FS
csrc sstatus, t0
li t0, SSTATUS_SUM li t0, SSTATUS_SUM
csrs sstatus, t0 csrs sstatus, t0
@ -86,8 +87,45 @@ _start:
la sp, __stack_start__ la sp, __stack_start__
li t0, __STACKSIZE__ li t0, __STACKSIZE__
add sp, sp, t0 add sp, sp, t0
csrw sscratch, sp
/**
* sscratch is always zero on kernel mode
*/
csrw sscratch, zero
call init_bss call init_bss
#ifdef ARCH_MM_MMU
call rt_hw_mem_setup_early
call rt_kmem_pvoff
/* a0 := pvoff */
beq a0, zero, 1f
/* relocate pc */
la x1, _after_pc_relocation
sub x1, x1, a0
ret
_after_pc_relocation:
/* relocate gp */
sub gp, gp, a0
/* relocate context: sp */
la sp, __stack_start__
li t0, __STACKSIZE__
add sp, sp, t0
/* reset s0-fp */
mv s0, zero
/* relocate stvec */
la t0, trap_entry
csrw stvec, t0
1:
#endif
call sbi_init call sbi_init
j primary_cpu_entry call primary_cpu_entry
_never_return_here:
j .
.global _start_link_addr
_start_link_addr:
.dword __text_start