rt-thread/components/lwp/arch/risc-v/rv64/lwp_arch.h

83 lines
2.1 KiB
C
Raw Normal View History

/*
* Copyright (c) 2006-2021, RT-Thread Development Team
*
* SPDX-License-Identifier: Apache-2.0
*
* Change Logs:
* Date Author Notes
*/
#ifndef LWP_ARCH_H__
#define LWP_ARCH_H__
#include <rthw.h>
#include <lwp.h>
#include <lwp_arch_comm.h>
#ifdef ARCH_MM_MMU
#ifdef ARCH_MM_MMU_32BIT_LIMIT
#define USER_HEAP_VADDR 0xF0000000UL
#define USER_HEAP_VEND 0xFE000000UL
#define USER_STACK_VSTART 0xE0000000UL
#define USER_STACK_VEND USER_HEAP_VADDR
#define USER_VADDR_START 0xC0000000UL
#define USER_VADDR_TOP 0xFF000000UL
#define USER_LOAD_VADDR 0xD0000000UL
#define LDSO_LOAD_VADDR USER_LOAD_VADDR
[libcpu] rv64: support for ARCH_REMAP_KERNEL (#9067) * [libcpu] support for ARCH_REMAP_KERNEL These changes introduce support for the ARCH_REMAP_KERNEL configuration, which isolates kernel space in high virtual address regions. This feature is necessary to enhance memory protection and management by segregating user and kernel spaces more effectively. Changes: - Updated conditional macros to check for ARCH_REMAP_KERNEL instead of ARCH_KERNEL_IN_HIGH_VA in board initialization files to reflect the new configuration option. - Modified qemu-virt64-riscv Kconfig and SConstruct files to include and utilize ARCH_REMAP_KERNEL. - Created a new linker script `link_smart.lds` for smart linking in qemu-virt64-riscv. - Updated rtconfig.py to use a more flexible execution path setup. - Enhanced user address space definitions in `lwp_arch.h` to support the new virtual address mappings. - Adjusted kernel memory initialization and mapping logic in `c906/mmu.c` and `virt64/mmu.c` to account for high virtual address regions. - Added Kconfig option to enable ARCH_REMAP_KERNEL for RISCV64 architectures. - Enhanced memory setup functions to support new mapping scheme, including updates to early page table setup and address relocation logic. These modifications ensure that the system can utilize high memory addresses for the kernel, improving memory isolation and system stability. Signed-off-by: Shell <smokewood@qq.com> * fixup: CI run failed * bsp: default config without using smart * fixup: static checks * restore rt_hw_mmu_kernel_map_init for D1 --------- Signed-off-by: Shell <smokewood@qq.com>
2024-06-18 11:15:59 +08:00
#elif defined(ARCH_REMAP_KERNEL)
#define USER_VADDR_START 0x00001000UL
#define USER_VADDR_TOP 0x003ffffff000UL
#define USER_STACK_VSTART 0x000270000000UL
#define USER_STACK_VEND USER_HEAP_VADDR
#define USER_HEAP_VADDR 0x000300000000UL
#define USER_HEAP_VEND USER_VADDR_TOP
#define USER_LOAD_VADDR 0x200000000
#define LDSO_LOAD_VADDR 0x200000000
#else
#define USER_HEAP_VADDR 0x300000000UL
#define USER_HEAP_VEND USER_STACK_VSTART
#define USER_STACK_VSTART 0x370000000UL
#define USER_STACK_VEND 0x400000000UL
#define USER_VADDR_START 0x200000000UL
#define USER_VADDR_TOP 0xfffffffffffff000UL
#define USER_LOAD_VADDR 0x200000000UL
#define LDSO_LOAD_VADDR 0x200000000UL
#endif
/* this attribution is cpu specified, and it should be defined in riscv_mmu.h */
#ifndef MMU_MAP_U_RWCB
#define MMU_MAP_U_RWCB 0
#endif
#ifndef MMU_MAP_U_RW
#define MMU_MAP_U_RW 0
#endif
#ifdef __cplusplus
extern "C" {
#endif
rt_inline unsigned long rt_hw_ffz(unsigned long x)
{
return __builtin_ffsl(~x) - 1;
}
rt_inline void icache_invalid_all(void)
{
rt_hw_cpu_icache_invalidate_all();
}
struct rt_hw_stack_frame;
void *arch_signal_ucontext_restore(rt_base_t user_sp);
void *arch_signal_ucontext_save(int signo, siginfo_t *psiginfo,
struct rt_hw_stack_frame *exp_frame, rt_base_t user_sp,
lwp_sigset_t *save_sig_mask);
#ifdef __cplusplus
}
#endif
#endif
#endif /*LWP_ARCH_H__*/