2023-01-09 10:08:55 +08:00
|
|
|
/*
|
|
|
|
* Copyright (c) 2006-2022, RT-Thread Development Team
|
|
|
|
*
|
|
|
|
* SPDX-License-Identifier: Apache-2.0
|
|
|
|
*
|
|
|
|
* Change Logs:
|
|
|
|
* Date Author Notes
|
|
|
|
* 2022-12-06 WangXiaoyao the first version
|
2023-10-17 13:07:59 +08:00
|
|
|
* 2023-08-19 Shell Support PRIVATE mapping and COW
|
2023-01-09 10:08:55 +08:00
|
|
|
*/
|
|
|
|
#include <rtthread.h>
|
|
|
|
|
|
|
|
#ifdef RT_USING_SMART
|
2023-01-16 08:24:03 +08:00
|
|
|
#define DBG_TAG "mm.fault"
|
|
|
|
#define DBG_LVL DBG_INFO
|
|
|
|
#include <rtdbg.h>
|
|
|
|
|
2023-01-09 10:08:55 +08:00
|
|
|
#include <lwp.h>
|
|
|
|
#include <lwp_syscall.h>
|
|
|
|
#include "mm_aspace.h"
|
|
|
|
#include "mm_fault.h"
|
|
|
|
#include "mm_flag.h"
|
|
|
|
#include "mm_private.h"
|
|
|
|
#include <mmu.h>
|
|
|
|
#include <tlb.h>
|
|
|
|
|
2023-03-30 08:25:15 +08:00
|
|
|
static int _fetch_page(rt_varea_t varea, struct rt_aspace_fault_msg *msg)
|
2023-01-09 10:08:55 +08:00
|
|
|
{
|
2023-10-17 13:07:59 +08:00
|
|
|
int err = MM_FAULT_FIXABLE_FALSE;
|
2023-03-16 10:26:55 +08:00
|
|
|
if (varea->mem_obj && varea->mem_obj->on_page_fault)
|
2023-01-09 10:08:55 +08:00
|
|
|
{
|
2023-03-16 10:26:55 +08:00
|
|
|
varea->mem_obj->on_page_fault(varea, msg);
|
2023-10-17 13:07:59 +08:00
|
|
|
err = rt_varea_map_with_msg(varea, msg);
|
|
|
|
err = (err == RT_EOK ? MM_FAULT_FIXABLE_TRUE : MM_FAULT_FIXABLE_FALSE);
|
2023-01-09 10:08:55 +08:00
|
|
|
}
|
|
|
|
return err;
|
|
|
|
}
|
|
|
|
|
2023-03-30 08:25:15 +08:00
|
|
|
static int _read_fault(rt_varea_t varea, void *pa, struct rt_aspace_fault_msg *msg)
|
2023-01-09 10:08:55 +08:00
|
|
|
{
|
2023-10-17 13:07:59 +08:00
|
|
|
int err = MM_FAULT_FIXABLE_FALSE;
|
2023-01-09 10:08:55 +08:00
|
|
|
if (msg->fault_type == MM_FAULT_TYPE_PAGE_FAULT)
|
|
|
|
{
|
|
|
|
RT_ASSERT(pa == ARCH_MAP_FAILED);
|
2023-03-30 08:25:15 +08:00
|
|
|
RT_ASSERT(!(varea->flag & MMF_PREFETCH));
|
2023-01-09 10:08:55 +08:00
|
|
|
err = _fetch_page(varea, msg);
|
|
|
|
}
|
|
|
|
else
|
|
|
|
{
|
|
|
|
/* signal a fault to user? */
|
|
|
|
}
|
|
|
|
return err;
|
|
|
|
}
|
|
|
|
|
2023-03-30 08:25:15 +08:00
|
|
|
static int _write_fault(rt_varea_t varea, void *pa, struct rt_aspace_fault_msg *msg)
|
2023-01-09 10:08:55 +08:00
|
|
|
{
|
2023-10-17 13:07:59 +08:00
|
|
|
rt_aspace_t aspace = varea->aspace;
|
|
|
|
int err = MM_FAULT_FIXABLE_FALSE;
|
|
|
|
|
|
|
|
if (rt_varea_is_private_locked(varea))
|
|
|
|
{
|
|
|
|
if (VAREA_IS_WRITABLE(varea) && (
|
2024-06-07 21:34:03 +08:00
|
|
|
msg->fault_type == MM_FAULT_TYPE_RWX_PERM ||
|
2023-10-17 13:07:59 +08:00
|
|
|
msg->fault_type == MM_FAULT_TYPE_PAGE_FAULT))
|
|
|
|
{
|
|
|
|
RDWR_LOCK(aspace);
|
|
|
|
err = rt_varea_fix_private_locked(varea, pa, msg, RT_FALSE);
|
|
|
|
RDWR_UNLOCK(aspace);
|
|
|
|
if (err == MM_FAULT_FIXABLE_FALSE)
|
|
|
|
LOG_I("%s: fix private failure", __func__);
|
|
|
|
}
|
|
|
|
else
|
|
|
|
{
|
2024-06-22 13:30:55 +08:00
|
|
|
LOG_I("%s: No permission on %s(attr=0x%lx,writable=%s,fault_type=%d)",
|
|
|
|
__func__, VAREA_NAME(varea), varea->attr,
|
|
|
|
VAREA_IS_WRITABLE(varea) ? "True" : "False", msg->fault_type);
|
2023-10-17 13:07:59 +08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
else if (msg->fault_type == MM_FAULT_TYPE_PAGE_FAULT)
|
2023-01-09 10:08:55 +08:00
|
|
|
{
|
|
|
|
RT_ASSERT(pa == ARCH_MAP_FAILED);
|
2023-03-30 08:25:15 +08:00
|
|
|
RT_ASSERT(!(varea->flag & MMF_PREFETCH));
|
2023-01-09 10:08:55 +08:00
|
|
|
err = _fetch_page(varea, msg);
|
2023-10-17 13:07:59 +08:00
|
|
|
if (err == MM_FAULT_FIXABLE_FALSE)
|
|
|
|
LOG_I("%s: page fault failure", __func__);
|
2023-01-09 10:08:55 +08:00
|
|
|
}
|
|
|
|
else
|
|
|
|
{
|
2023-10-17 13:07:59 +08:00
|
|
|
LOG_D("%s: can not fix", __func__);
|
2023-01-09 10:08:55 +08:00
|
|
|
/* signal a fault to user? */
|
|
|
|
}
|
|
|
|
return err;
|
|
|
|
}
|
|
|
|
|
2023-03-30 08:25:15 +08:00
|
|
|
static int _exec_fault(rt_varea_t varea, void *pa, struct rt_aspace_fault_msg *msg)
|
2023-01-09 10:08:55 +08:00
|
|
|
{
|
2023-10-17 13:07:59 +08:00
|
|
|
int err = MM_FAULT_FIXABLE_FALSE;
|
2023-01-09 10:08:55 +08:00
|
|
|
if (msg->fault_type == MM_FAULT_TYPE_PAGE_FAULT)
|
|
|
|
{
|
|
|
|
RT_ASSERT(pa == ARCH_MAP_FAILED);
|
2023-03-30 08:25:15 +08:00
|
|
|
RT_ASSERT(!(varea->flag & MMF_PREFETCH));
|
2023-01-09 10:08:55 +08:00
|
|
|
err = _fetch_page(varea, msg);
|
|
|
|
}
|
|
|
|
return err;
|
|
|
|
}
|
|
|
|
|
2024-06-07 21:34:03 +08:00
|
|
|
static void _determine_precise_fault_type(struct rt_aspace_fault_msg *msg, rt_ubase_t pa, rt_varea_t varea)
|
|
|
|
{
|
|
|
|
if (msg->fault_type == MM_FAULT_TYPE_GENERIC_MMU)
|
|
|
|
{
|
|
|
|
rt_base_t requesting_perm;
|
|
|
|
switch (msg->fault_op)
|
|
|
|
{
|
|
|
|
case MM_FAULT_OP_READ:
|
|
|
|
requesting_perm = RT_HW_MMU_PROT_READ | RT_HW_MMU_PROT_USER;
|
|
|
|
break;
|
|
|
|
case MM_FAULT_OP_WRITE:
|
|
|
|
requesting_perm = RT_HW_MMU_PROT_WRITE | RT_HW_MMU_PROT_USER;
|
|
|
|
break;
|
|
|
|
case MM_FAULT_OP_EXECUTE:
|
|
|
|
requesting_perm = RT_HW_MMU_PROT_EXECUTE | RT_HW_MMU_PROT_USER;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* always checking the user privileges since dynamic permission is not
|
|
|
|
* supported in kernel. So those faults are never fixable. Hence, adding
|
|
|
|
* permission check never changes the result of checking. In other
|
|
|
|
* words, { 0 && (expr) } is always false.
|
|
|
|
*/
|
|
|
|
if (rt_hw_mmu_attr_test_perm(varea->attr, requesting_perm))
|
|
|
|
{
|
|
|
|
if (pa == (rt_ubase_t)ARCH_MAP_FAILED)
|
|
|
|
{
|
|
|
|
msg->fault_type = MM_FAULT_TYPE_PAGE_FAULT;
|
|
|
|
}
|
|
|
|
else
|
|
|
|
{
|
|
|
|
msg->fault_type = MM_FAULT_TYPE_RWX_PERM;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2023-08-16 15:38:59 +08:00
|
|
|
int rt_aspace_fault_try_fix(rt_aspace_t aspace, struct rt_aspace_fault_msg *msg)
|
2023-01-09 10:08:55 +08:00
|
|
|
{
|
2023-10-17 13:07:59 +08:00
|
|
|
int err = MM_FAULT_FIXABLE_FALSE;
|
2023-03-30 08:25:15 +08:00
|
|
|
uintptr_t va = (uintptr_t)msg->fault_vaddr;
|
2023-01-09 10:08:55 +08:00
|
|
|
va &= ~ARCH_PAGE_MASK;
|
2023-03-30 08:25:15 +08:00
|
|
|
msg->fault_vaddr = (void *)va;
|
2023-10-17 13:07:59 +08:00
|
|
|
rt_mm_fault_res_init(&msg->response);
|
|
|
|
|
|
|
|
RT_DEBUG_SCHEDULER_AVAILABLE(1);
|
2023-01-09 10:08:55 +08:00
|
|
|
|
2023-08-16 15:38:59 +08:00
|
|
|
if (aspace)
|
2023-01-09 10:08:55 +08:00
|
|
|
{
|
2023-08-16 15:38:59 +08:00
|
|
|
rt_varea_t varea;
|
|
|
|
|
|
|
|
RD_LOCK(aspace);
|
|
|
|
varea = _aspace_bst_search(aspace, msg->fault_vaddr);
|
2023-01-09 10:08:55 +08:00
|
|
|
if (varea)
|
|
|
|
{
|
2023-03-30 08:25:15 +08:00
|
|
|
void *pa = rt_hw_mmu_v2p(aspace, msg->fault_vaddr);
|
2024-06-07 21:34:03 +08:00
|
|
|
_determine_precise_fault_type(msg, (rt_ubase_t)pa, varea);
|
|
|
|
|
2023-10-17 13:07:59 +08:00
|
|
|
if (pa != ARCH_MAP_FAILED && msg->fault_type == MM_FAULT_TYPE_PAGE_FAULT)
|
2023-01-09 10:08:55 +08:00
|
|
|
{
|
2023-10-17 13:07:59 +08:00
|
|
|
LOG_D("%s(fault=%p) has already fixed", __func__, msg->fault_vaddr);
|
|
|
|
err = MM_FAULT_FIXABLE_TRUE;
|
2023-01-09 10:08:55 +08:00
|
|
|
}
|
2023-10-17 13:07:59 +08:00
|
|
|
else
|
|
|
|
{
|
|
|
|
LOG_D("%s(varea=%s,fault=%p,fault_op=%d,phy=%p)", __func__, VAREA_NAME(varea), msg->fault_vaddr, msg->fault_op, pa);
|
|
|
|
msg->off = varea->offset + ((long)msg->fault_vaddr - (long)varea->start) / ARCH_PAGE_SIZE;
|
|
|
|
|
|
|
|
/* permission checked by fault op */
|
|
|
|
switch (msg->fault_op)
|
|
|
|
{
|
|
|
|
case MM_FAULT_OP_READ:
|
|
|
|
err = _read_fault(varea, pa, msg);
|
|
|
|
break;
|
|
|
|
case MM_FAULT_OP_WRITE:
|
|
|
|
err = _write_fault(varea, pa, msg);
|
|
|
|
break;
|
|
|
|
case MM_FAULT_OP_EXECUTE:
|
|
|
|
err = _exec_fault(varea, pa, msg);
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
else
|
|
|
|
{
|
|
|
|
LOG_I("%s: varea not found at 0x%lx", __func__, msg->fault_vaddr);
|
2023-01-09 10:08:55 +08:00
|
|
|
}
|
2023-08-16 15:38:59 +08:00
|
|
|
RD_UNLOCK(aspace);
|
2023-01-09 10:08:55 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
return err;
|
|
|
|
}
|
|
|
|
|
|
|
|
#endif /* RT_USING_SMART */
|