4
0
mirror of https://github.com/RT-Thread/rt-thread.git synced 2025-02-21 01:07:18 +08:00

[libcpu/risc-v] support noncached normal memory (#7051)

* [libcpu/risc-v] support noncached normal memory

* [mm] check before dereference in _fetch_page

* [mm] add comments on ioremap

* [ioremap] report more info on failed
This commit is contained in:
Shell 2023-03-16 10:26:55 +08:00 committed by GitHub
parent 65301b9cdd
commit 2394e75265
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
5 changed files with 35 additions and 16 deletions

View File

@ -62,7 +62,7 @@ static void *_ioremap_type(void *paddr, size_t size, enum ioremap_type type)
if (err)
{
LOG_W("IOREMAP 0x%lx failed", paddr);
LOG_W("IOREMAP 0x%lx failed %d\n", paddr, err);
v_addr = NULL;
}
else

View File

@ -16,6 +16,20 @@
extern "C" {
#endif
/**
* IOREMAP family
* `rt_ioremap` default to map physical memory in MMIO region as DEVICE memory
* to kernel space. And there are 3 variants currently supported.
*
* name | attribution
* ------------------ | -----------
* rt_ioremap_nocache | Device (MMU_MAP_K_DEVICE)
* rt_ioremap_cache | Normal memory (MMU_MAP_K_RWCB)
* rt_ioremap_wt | Normal memory but guarantee that
* | Each write access should go to system memory directly
* | Currently as non-cacheable
*/
void *rt_ioremap(void *paddr, size_t size);
void *rt_ioremap_nocache(void *paddr, size_t size);
void *rt_ioremap_cached (void *paddr, size_t size);

View File

@ -29,23 +29,26 @@
static int _fetch_page(rt_varea_t varea, struct rt_mm_fault_msg *msg)
{
int err = UNRECOVERABLE;
varea->mem_obj->on_page_fault(varea, msg);
if (msg->response.status == MM_FAULT_STATUS_OK)
if (varea->mem_obj && varea->mem_obj->on_page_fault)
{
void *store = msg->response.vaddr;
rt_size_t store_sz = msg->response.size;
varea->mem_obj->on_page_fault(varea, msg);
if (msg->response.status == MM_FAULT_STATUS_OK)
{
void *store = msg->response.vaddr;
rt_size_t store_sz = msg->response.size;
if (msg->vaddr + store_sz > varea->start + varea->size)
{
LOG_W("%s more size of buffer is provided than varea", __func__);
}
else
{
rt_hw_mmu_map(varea->aspace, msg->vaddr, store + PV_OFFSET,
store_sz, varea->attr);
rt_hw_tlb_invalidate_range(varea->aspace, msg->vaddr, store_sz,
ARCH_PAGE_SIZE);
err = RECOVERABLE;
if (msg->vaddr + store_sz > varea->start + varea->size)
{
LOG_W("%s more size of buffer is provided than varea", __func__);
}
else
{
rt_hw_mmu_map(varea->aspace, msg->vaddr, store + PV_OFFSET,
store_sz, varea->attr);
rt_hw_tlb_invalidate_range(varea->aspace, msg->vaddr, store_sz,
ARCH_PAGE_SIZE);
err = RECOVERABLE;
}
}
}
return err;

View File

@ -88,6 +88,7 @@
#define MMU_MAP_K_DEVICE PTE_WRAP(PAGE_ATTR_DEV | PTE_G | PAGE_ATTR_XN | PTE_V)
#define MMU_MAP_K_RWCB PTE_WRAP(PAGE_ATTR_CB | PTE_G | PAGE_ATTR_RWX | PTE_V)
#define MMU_MAP_K_RW PTE_WRAP(PTE_G | PAGE_ATTR_RWX | PTE_V)
#define MMU_MAP_U_RWCB PTE_WRAP(PAGE_ATTR_CB | PTE_U | PAGE_ATTR_RWX | PTE_V)
#define MMU_MAP_U_RWCB_XN PTE_WRAP(PAGE_ATTR_CB | PTE_U | PAGE_ATTR_XN | PTE_V)
#define MMU_MAP_U_RW PTE_WRAP(PTE_U | PAGE_ATTR_RWX | PTE_V)

View File

@ -73,6 +73,7 @@
#define MMU_MAP_K_DEVICE (PTE_G | PTE_W | PTE_R | PTE_V)
#define MMU_MAP_K_RWCB (PTE_G | PTE_X | PTE_W | PTE_R | PTE_V)
#define MMU_MAP_K_RW (PTE_G | PTE_X | PTE_W | PTE_R | PTE_V)
#define MMU_MAP_U_RWCB (PTE_U | PTE_X | PTE_W | PTE_R | PTE_V)
#define MMU_MAP_U_RWCB_XN (PTE_U | PTE_W | PTE_R | PTE_V)
#define MMU_MAP_U_RW (PTE_U | PTE_X | PTE_W | PTE_R | PTE_V)