diff --git a/components/mm/ioremap.c b/components/mm/ioremap.c index a24672a9f2..a3b8a9e1f2 100644 --- a/components/mm/ioremap.c +++ b/components/mm/ioremap.c @@ -62,7 +62,7 @@ static void *_ioremap_type(void *paddr, size_t size, enum ioremap_type type) if (err) { - LOG_W("IOREMAP 0x%lx failed", paddr); + LOG_W("IOREMAP 0x%lx failed %d\n", paddr, err); v_addr = NULL; } else diff --git a/components/mm/ioremap.h b/components/mm/ioremap.h index 2611b3a520..dc219630a3 100644 --- a/components/mm/ioremap.h +++ b/components/mm/ioremap.h @@ -16,6 +16,20 @@ extern "C" { #endif +/** + * IOREMAP family + * `rt_ioremap` default to map physical memory in MMIO region as DEVICE memory + * to kernel space. And there are 3 variants currently supported. + * + * name | attribution + * ------------------ | ----------- + * rt_ioremap_nocache | Device (MMU_MAP_K_DEVICE) + * rt_ioremap_cache | Normal memory (MMU_MAP_K_RWCB) + * rt_ioremap_wt | Normal memory but guarantee that + * | Each write access should go to system memory directly + * | Currently as non-cacheable + */ + void *rt_ioremap(void *paddr, size_t size); void *rt_ioremap_nocache(void *paddr, size_t size); void *rt_ioremap_cached (void *paddr, size_t size); diff --git a/components/mm/mm_fault.c b/components/mm/mm_fault.c index 79274e7f8a..e36d39a93f 100644 --- a/components/mm/mm_fault.c +++ b/components/mm/mm_fault.c @@ -29,23 +29,26 @@ static int _fetch_page(rt_varea_t varea, struct rt_mm_fault_msg *msg) { int err = UNRECOVERABLE; - varea->mem_obj->on_page_fault(varea, msg); - if (msg->response.status == MM_FAULT_STATUS_OK) + if (varea->mem_obj && varea->mem_obj->on_page_fault) { - void *store = msg->response.vaddr; - rt_size_t store_sz = msg->response.size; + varea->mem_obj->on_page_fault(varea, msg); + if (msg->response.status == MM_FAULT_STATUS_OK) + { + void *store = msg->response.vaddr; + rt_size_t store_sz = msg->response.size; - if (msg->vaddr + store_sz > varea->start + varea->size) - { - LOG_W("%s more size of buffer is provided than varea", __func__); - } - else - { - rt_hw_mmu_map(varea->aspace, msg->vaddr, store + PV_OFFSET, - store_sz, varea->attr); - rt_hw_tlb_invalidate_range(varea->aspace, msg->vaddr, store_sz, - ARCH_PAGE_SIZE); - err = RECOVERABLE; + if (msg->vaddr + store_sz > varea->start + varea->size) + { + LOG_W("%s more size of buffer is provided than varea", __func__); + } + else + { + rt_hw_mmu_map(varea->aspace, msg->vaddr, store + PV_OFFSET, + store_sz, varea->attr); + rt_hw_tlb_invalidate_range(varea->aspace, msg->vaddr, store_sz, + ARCH_PAGE_SIZE); + err = RECOVERABLE; + } } } return err; diff --git a/libcpu/risc-v/t-head/c906/riscv_mmu.h b/libcpu/risc-v/t-head/c906/riscv_mmu.h index ddb3d0277d..87db683b05 100644 --- a/libcpu/risc-v/t-head/c906/riscv_mmu.h +++ b/libcpu/risc-v/t-head/c906/riscv_mmu.h @@ -88,6 +88,7 @@ #define MMU_MAP_K_DEVICE PTE_WRAP(PAGE_ATTR_DEV | PTE_G | PAGE_ATTR_XN | PTE_V) #define MMU_MAP_K_RWCB PTE_WRAP(PAGE_ATTR_CB | PTE_G | PAGE_ATTR_RWX | PTE_V) +#define MMU_MAP_K_RW PTE_WRAP(PTE_G | PAGE_ATTR_RWX | PTE_V) #define MMU_MAP_U_RWCB PTE_WRAP(PAGE_ATTR_CB | PTE_U | PAGE_ATTR_RWX | PTE_V) #define MMU_MAP_U_RWCB_XN PTE_WRAP(PAGE_ATTR_CB | PTE_U | PAGE_ATTR_XN | PTE_V) #define MMU_MAP_U_RW PTE_WRAP(PTE_U | PAGE_ATTR_RWX | PTE_V) diff --git a/libcpu/risc-v/virt64/riscv_mmu.h b/libcpu/risc-v/virt64/riscv_mmu.h index fc5a143b65..2e255263f5 100644 --- a/libcpu/risc-v/virt64/riscv_mmu.h +++ b/libcpu/risc-v/virt64/riscv_mmu.h @@ -73,6 +73,7 @@ #define MMU_MAP_K_DEVICE (PTE_G | PTE_W | PTE_R | PTE_V) #define MMU_MAP_K_RWCB (PTE_G | PTE_X | PTE_W | PTE_R | PTE_V) +#define MMU_MAP_K_RW (PTE_G | PTE_X | PTE_W | PTE_R | PTE_V) #define MMU_MAP_U_RWCB (PTE_U | PTE_X | PTE_W | PTE_R | PTE_V) #define MMU_MAP_U_RWCB_XN (PTE_U | PTE_W | PTE_R | PTE_V) #define MMU_MAP_U_RW (PTE_U | PTE_X | PTE_W | PTE_R | PTE_V)