2023-01-09 10:08:55 +08:00
|
|
|
/*
|
2023-10-17 13:07:59 +08:00
|
|
|
* Copyright (c) 2006-2023, RT-Thread Development Team
|
2023-01-09 10:08:55 +08:00
|
|
|
*
|
|
|
|
* SPDX-License-Identifier: Apache-2.0
|
|
|
|
*
|
|
|
|
* Change Logs:
|
|
|
|
* Date Author Notes
|
|
|
|
* 2022-11-14 WangXiaoyao the first version
|
2023-10-17 13:07:59 +08:00
|
|
|
* 2023-08-17 Shell Add unmap_range for MAP_PRIVATE
|
2023-01-09 10:08:55 +08:00
|
|
|
*/
|
|
|
|
#ifndef __MM_ASPACE_H__
|
|
|
|
#define __MM_ASPACE_H__
|
|
|
|
|
|
|
|
#include <rthw.h>
|
|
|
|
#include <rtthread.h>
|
|
|
|
|
|
|
|
#include "avl_adpt.h"
|
|
|
|
#include "mm_fault.h"
|
|
|
|
#include "mm_flag.h"
|
|
|
|
|
2023-10-17 13:07:59 +08:00
|
|
|
#include <stddef.h>
|
|
|
|
#include <string.h>
|
|
|
|
|
2023-01-09 10:08:55 +08:00
|
|
|
#define MM_PAGE_SHIFT 12
|
|
|
|
#define MM_PA_TO_OFF(pa) ((uintptr_t)(pa) >> MM_PAGE_SHIFT)
|
2023-02-14 23:08:32 +08:00
|
|
|
#define PV_OFFSET (rt_kmem_pvoff())
|
2023-01-09 10:08:55 +08:00
|
|
|
|
2024-02-23 17:49:15 +08:00
|
|
|
typedef struct rt_spinlock mm_spinlock_t;
|
2023-01-09 10:08:55 +08:00
|
|
|
|
2023-02-20 13:44:20 +08:00
|
|
|
#define MM_PGTBL_LOCK_INIT(aspace) (rt_spin_lock_init(&((aspace)->pgtbl_lock)))
|
|
|
|
#define MM_PGTBL_LOCK(aspace) (rt_spin_lock(&((aspace)->pgtbl_lock)))
|
|
|
|
#define MM_PGTBL_UNLOCK(aspace) (rt_spin_unlock(&((aspace)->pgtbl_lock)))
|
2023-01-09 10:08:55 +08:00
|
|
|
|
|
|
|
struct rt_aspace;
|
|
|
|
struct rt_varea;
|
|
|
|
struct rt_mem_obj;
|
|
|
|
|
|
|
|
extern struct rt_aspace rt_kernel_space;
|
|
|
|
|
|
|
|
typedef struct rt_aspace
|
|
|
|
{
|
|
|
|
void *start;
|
|
|
|
rt_size_t size;
|
|
|
|
|
|
|
|
void *page_table;
|
2024-02-23 17:49:15 +08:00
|
|
|
mm_spinlock_t pgtbl_lock;
|
2023-01-09 10:08:55 +08:00
|
|
|
|
|
|
|
struct _aspace_tree tree;
|
|
|
|
struct rt_mutex bst_lock;
|
2023-01-29 02:08:40 +08:00
|
|
|
|
2023-10-17 13:07:59 +08:00
|
|
|
struct rt_mem_obj *private_object;
|
2024-07-11 11:00:04 +08:00
|
|
|
|
|
|
|
#ifdef ARCH_USING_ASID
|
2023-01-29 02:08:40 +08:00
|
|
|
rt_uint64_t asid;
|
2024-07-11 11:00:04 +08:00
|
|
|
#endif /* ARCH_USING_ASID */
|
|
|
|
|
2023-01-09 10:08:55 +08:00
|
|
|
} *rt_aspace_t;
|
|
|
|
|
|
|
|
typedef struct rt_varea
|
|
|
|
{
|
|
|
|
void *start;
|
|
|
|
rt_size_t size;
|
|
|
|
rt_size_t offset;
|
|
|
|
|
|
|
|
rt_size_t attr;
|
|
|
|
rt_size_t flag;
|
|
|
|
|
|
|
|
struct rt_aspace *aspace;
|
|
|
|
struct rt_mem_obj *mem_obj;
|
|
|
|
|
|
|
|
struct _aspace_node node;
|
|
|
|
|
|
|
|
void *data;
|
|
|
|
} *rt_varea_t;
|
|
|
|
|
|
|
|
typedef struct rt_mm_va_hint
|
|
|
|
{
|
|
|
|
void *limit_start;
|
|
|
|
rt_size_t limit_range_size;
|
|
|
|
|
|
|
|
void *prefer;
|
|
|
|
const rt_size_t map_size;
|
|
|
|
|
|
|
|
mm_flag_t flags;
|
|
|
|
} *rt_mm_va_hint_t;
|
|
|
|
|
|
|
|
typedef struct rt_mem_obj
|
|
|
|
{
|
|
|
|
void (*hint_free)(rt_mm_va_hint_t hint);
|
2023-03-30 08:25:15 +08:00
|
|
|
void (*on_page_fault)(struct rt_varea *varea, struct rt_aspace_fault_msg *msg);
|
2023-01-09 10:08:55 +08:00
|
|
|
|
|
|
|
/* do pre open bushiness like inc a ref */
|
|
|
|
void (*on_varea_open)(struct rt_varea *varea);
|
|
|
|
/* do post close bushiness like def a ref */
|
|
|
|
void (*on_varea_close)(struct rt_varea *varea);
|
|
|
|
|
2023-10-17 13:07:59 +08:00
|
|
|
/* do preparation for address space modification of varea */
|
|
|
|
rt_err_t (*on_varea_shrink)(struct rt_varea *varea, void *new_vaddr, rt_size_t size);
|
|
|
|
/* do preparation for address space modification of varea */
|
|
|
|
rt_err_t (*on_varea_expand)(struct rt_varea *varea, void *new_vaddr, rt_size_t size);
|
|
|
|
/**
|
|
|
|
* this is like an on_varea_open() to `subset`, and an on_varea_shrink() to `existed`
|
|
|
|
* while resource can migrate from `existed` to `subset` at the same time
|
|
|
|
*/
|
|
|
|
rt_err_t (*on_varea_split)(struct rt_varea *existed, void *unmap_start,
|
|
|
|
rt_size_t unmap_len, struct rt_varea *subset);
|
|
|
|
/**
|
|
|
|
* this is like a on_varea_expand() to `merge_to` and on_varea_close() to `merge_from`
|
|
|
|
* while resource can migrate from `merge_from` to `merge_to` at the same time
|
|
|
|
*/
|
|
|
|
rt_err_t (*on_varea_merge)(struct rt_varea *merge_to, struct rt_varea *merge_from);
|
|
|
|
|
|
|
|
/* dynamic mem_obj API */
|
|
|
|
void (*page_read)(struct rt_varea *varea, struct rt_aspace_io_msg *msg);
|
|
|
|
void (*page_write)(struct rt_varea *varea, struct rt_aspace_io_msg *msg);
|
2023-01-09 10:08:55 +08:00
|
|
|
|
|
|
|
const char *(*get_name)(rt_varea_t varea);
|
2023-12-05 16:44:00 +08:00
|
|
|
|
|
|
|
void *(*on_varea_mremap)(struct rt_varea *varea, rt_size_t new_size, int flags, void *new_address);
|
2023-01-09 10:08:55 +08:00
|
|
|
} *rt_mem_obj_t;
|
|
|
|
|
|
|
|
extern struct rt_mem_obj rt_mm_dummy_mapper;
|
|
|
|
|
|
|
|
enum rt_mmu_cntl
|
|
|
|
{
|
|
|
|
MMU_CNTL_NONCACHE,
|
|
|
|
MMU_CNTL_CACHE,
|
2023-02-20 13:44:20 +08:00
|
|
|
MMU_CNTL_READONLY,
|
|
|
|
MMU_CNTL_READWRITE,
|
2023-10-17 13:07:59 +08:00
|
|
|
MMU_CNTL_OFFLOAD,
|
|
|
|
MMU_CNTL_INSTALL,
|
2023-01-09 10:08:55 +08:00
|
|
|
MMU_CNTL_DUMMY_END,
|
|
|
|
};
|
|
|
|
|
|
|
|
/**
|
|
|
|
* @brief Lock to access page table of address space
|
|
|
|
*/
|
|
|
|
#define WR_LOCK(aspace) \
|
|
|
|
rt_thread_self() ? rt_mutex_take(&(aspace)->bst_lock, RT_WAITING_FOREVER) \
|
|
|
|
: 0
|
|
|
|
#define WR_UNLOCK(aspace) \
|
|
|
|
rt_thread_self() ? rt_mutex_release(&(aspace)->bst_lock) : 0
|
|
|
|
|
2023-10-17 13:07:59 +08:00
|
|
|
/* FIXME: fix rd_lock */
|
2023-01-09 10:08:55 +08:00
|
|
|
#define RD_LOCK(aspace) WR_LOCK(aspace)
|
|
|
|
#define RD_UNLOCK(aspace) WR_UNLOCK(aspace)
|
2023-10-17 13:07:59 +08:00
|
|
|
#define RDWR_LOCK(aspace) ((void)aspace)
|
|
|
|
#define RDWR_UNLOCK(aspace) ((void)aspace)
|
2023-01-09 10:08:55 +08:00
|
|
|
|
|
|
|
rt_aspace_t rt_aspace_create(void *start, rt_size_t length, void *pgtbl);
|
|
|
|
|
2023-03-30 08:25:15 +08:00
|
|
|
rt_err_t rt_aspace_init(rt_aspace_t aspace, void *start, rt_size_t length, void *pgtbl);
|
2023-01-09 10:08:55 +08:00
|
|
|
|
|
|
|
void rt_aspace_delete(rt_aspace_t aspace);
|
|
|
|
|
|
|
|
void rt_aspace_detach(rt_aspace_t aspace);
|
|
|
|
|
|
|
|
/**
|
|
|
|
* @brief Memory Map on Virtual Address Space to Mappable Object
|
|
|
|
* *INFO There is no restriction to use NULL address(physical/virtual).
|
2023-02-20 13:44:20 +08:00
|
|
|
* Vaddr passing in addr must be page aligned. If vaddr is RT_NULL,
|
2023-01-09 10:08:55 +08:00
|
|
|
* a suitable address will be chose automatically.
|
|
|
|
*
|
|
|
|
* @param aspace target virtual address space
|
|
|
|
* @param addr virtual address of the mapping
|
|
|
|
* @param length length of mapping region
|
|
|
|
* @param attr MMU attribution
|
|
|
|
* @param flags desired memory protection and behaviour of the mapping
|
|
|
|
* @param mem_obj memory map backing store object
|
|
|
|
* @param offset offset of mapping in 4KB page for mem_obj
|
|
|
|
* @return int E_OK on success, with addr set to vaddr of mapping
|
|
|
|
* E_INVAL
|
|
|
|
*/
|
|
|
|
int rt_aspace_map(rt_aspace_t aspace, void **addr, rt_size_t length, rt_size_t attr,
|
|
|
|
mm_flag_t flags, rt_mem_obj_t mem_obj, rt_size_t offset);
|
|
|
|
|
|
|
|
/** no malloc routines call */
|
|
|
|
int rt_aspace_map_static(rt_aspace_t aspace, rt_varea_t varea, void **addr,
|
|
|
|
rt_size_t length, rt_size_t attr, mm_flag_t flags,
|
|
|
|
rt_mem_obj_t mem_obj, rt_size_t offset);
|
|
|
|
|
|
|
|
/**
|
|
|
|
* @brief Memory Map on Virtual Address Space to Physical Memory
|
|
|
|
*
|
|
|
|
* @param aspace target virtual address space
|
|
|
|
* @param hint hint of mapping va
|
|
|
|
* @param attr MMU attribution
|
|
|
|
* @param pa_off (physical address >> 12)
|
|
|
|
* @param ret_va pointer to the location to store va
|
|
|
|
* @return int E_OK on success, with ret_va set to vaddr of mapping
|
|
|
|
* E_INVAL
|
|
|
|
*/
|
|
|
|
int rt_aspace_map_phy(rt_aspace_t aspace, rt_mm_va_hint_t hint, rt_size_t attr,
|
|
|
|
rt_size_t pa_off, void **ret_va);
|
|
|
|
|
|
|
|
/** no malloc routines call */
|
|
|
|
int rt_aspace_map_phy_static(rt_aspace_t aspace, rt_varea_t varea,
|
|
|
|
rt_mm_va_hint_t hint, rt_size_t attr, rt_size_t pa_off,
|
|
|
|
void **ret_va);
|
|
|
|
|
2023-10-17 13:07:59 +08:00
|
|
|
/** map a private memory region to aspace */
|
|
|
|
int rt_aspace_map_private(rt_aspace_t aspace, void **addr, rt_size_t length,
|
|
|
|
rt_size_t attr, mm_flag_t flags);
|
|
|
|
|
|
|
|
/**
|
|
|
|
* @brief Remove mappings containing address specified by addr
|
|
|
|
*
|
|
|
|
* @param aspace target virtual address space
|
|
|
|
* @param addr addresses that mapping to be removed contains
|
|
|
|
* @return int rt errno
|
|
|
|
*/
|
|
|
|
int rt_aspace_unmap(rt_aspace_t aspace, void *addr);
|
|
|
|
|
|
|
|
/**
|
|
|
|
* @brief Remove pages of existed mappings in the range [addr, addr+length)
|
|
|
|
* Length is automatically rounded up to the next multiple of the page size.
|
|
|
|
*
|
|
|
|
* @param aspace target virtual address space
|
|
|
|
* @param addr the beginning of the range of pages to be unmapped
|
|
|
|
* @param length length of range in bytes
|
|
|
|
* @return int rt errno
|
|
|
|
*/
|
|
|
|
int rt_aspace_unmap_range(rt_aspace_t aspace, void *addr, size_t length);
|
|
|
|
|
2023-12-05 16:44:00 +08:00
|
|
|
void *rt_aspace_mremap_range(rt_aspace_t aspace, void *old_address, size_t old_size,
|
|
|
|
size_t new_size, int flags, void *new_address);
|
2023-01-09 10:08:55 +08:00
|
|
|
|
2023-03-30 08:25:15 +08:00
|
|
|
int rt_aspace_control(rt_aspace_t aspace, void *addr, enum rt_mmu_cntl cmd);
|
2023-01-09 10:08:55 +08:00
|
|
|
|
|
|
|
int rt_aspace_load_page(rt_aspace_t aspace, void *addr, rt_size_t npage);
|
|
|
|
|
|
|
|
int rt_aspace_offload_page(rt_aspace_t aspace, void *addr, rt_size_t npage);
|
|
|
|
|
2023-10-17 13:07:59 +08:00
|
|
|
rt_err_t rt_aspace_page_put(rt_aspace_t aspace, void *page_va, void *buffer);
|
|
|
|
|
|
|
|
rt_err_t rt_aspace_page_get(rt_aspace_t aspace, void *page_va, void *buffer);
|
|
|
|
|
2023-01-09 10:08:55 +08:00
|
|
|
int rt_aspace_traversal(rt_aspace_t aspace,
|
|
|
|
int (*fn)(rt_varea_t varea, void *arg), void *arg);
|
|
|
|
|
|
|
|
void rt_aspace_print_all(rt_aspace_t aspace);
|
|
|
|
|
2023-10-17 13:07:59 +08:00
|
|
|
rt_base_t rt_aspace_count_vsz(rt_aspace_t aspace);
|
|
|
|
|
|
|
|
rt_varea_t rt_aspace_query(rt_aspace_t aspace, void *vaddr);
|
|
|
|
|
|
|
|
rt_err_t rt_aspace_duplicate_locked(rt_aspace_t src, rt_aspace_t dst);
|
|
|
|
rt_err_t rt_aspace_fork(rt_aspace_t *psrc, rt_aspace_t *pdst);
|
|
|
|
rt_err_t rt_aspace_compare(rt_aspace_t src, rt_aspace_t dst);
|
|
|
|
|
2023-03-30 08:25:15 +08:00
|
|
|
/**
|
|
|
|
* @brief Map one page to varea
|
|
|
|
*
|
2023-10-17 13:07:59 +08:00
|
|
|
* @note caller should take the read/write lock
|
|
|
|
*
|
2023-03-30 08:25:15 +08:00
|
|
|
* @param varea target varea
|
|
|
|
* @param addr user address
|
|
|
|
* @param page the page frame to be mapped
|
|
|
|
* @return int
|
|
|
|
*/
|
|
|
|
int rt_varea_map_page(rt_varea_t varea, void *vaddr, void *page);
|
2023-01-09 10:08:55 +08:00
|
|
|
|
2023-08-16 15:38:59 +08:00
|
|
|
/**
|
|
|
|
* @brief Unmap one page in varea
|
|
|
|
*
|
2023-10-17 13:07:59 +08:00
|
|
|
* @note caller should take the read/write lock
|
|
|
|
*
|
2023-08-16 15:38:59 +08:00
|
|
|
* @param varea target varea
|
|
|
|
* @param addr user address
|
|
|
|
* @param page the page frame to be mapped
|
|
|
|
* @return int
|
|
|
|
*/
|
|
|
|
int rt_varea_unmap_page(rt_varea_t varea, void *vaddr);
|
|
|
|
|
2023-03-30 08:25:15 +08:00
|
|
|
/**
|
|
|
|
* @brief Map a range of physical address to varea
|
|
|
|
*
|
2023-08-16 15:38:59 +08:00
|
|
|
* @warning Caller should take care of synchronization of its varea among all
|
|
|
|
* the map/unmap operation
|
|
|
|
*
|
2023-03-30 08:25:15 +08:00
|
|
|
* @param varea target varea
|
|
|
|
* @param vaddr user address
|
|
|
|
* @param paddr physical address
|
|
|
|
* @param length map range
|
|
|
|
* @return int
|
|
|
|
*/
|
|
|
|
int rt_varea_map_range(rt_varea_t varea, void *vaddr, void *paddr, rt_size_t length);
|
2023-01-09 10:08:55 +08:00
|
|
|
|
2023-08-16 15:38:59 +08:00
|
|
|
/**
|
|
|
|
* @brief Unmap a range of physical address in varea
|
|
|
|
*
|
|
|
|
* @warning Caller should take care of synchronization of its varea among all
|
|
|
|
* the map/unmap operation
|
|
|
|
*
|
|
|
|
* @param varea target varea
|
|
|
|
* @param vaddr user address
|
|
|
|
* @param length map range
|
|
|
|
* @return int
|
|
|
|
*/
|
|
|
|
int rt_varea_unmap_range(rt_varea_t varea, void *vaddr, rt_size_t length);
|
|
|
|
|
2023-03-30 08:25:15 +08:00
|
|
|
/**
|
|
|
|
* @brief Insert page to page manager of varea
|
|
|
|
* The page will be freed by varea on uninstall automatically
|
|
|
|
*
|
|
|
|
* @param varea target varea
|
|
|
|
* @param page_addr the page frame to be added
|
|
|
|
*/
|
|
|
|
void rt_varea_pgmgr_insert(rt_varea_t varea, void *page_addr);
|
2023-01-09 10:08:55 +08:00
|
|
|
|
2023-10-17 13:07:59 +08:00
|
|
|
rt_inline rt_mem_obj_t rt_mem_obj_create(rt_mem_obj_t source)
|
|
|
|
{
|
|
|
|
rt_mem_obj_t target;
|
|
|
|
target = rt_malloc(sizeof(*target));
|
|
|
|
if (target)
|
|
|
|
memcpy(target, source, sizeof(*target));
|
|
|
|
return target;
|
|
|
|
}
|
|
|
|
|
|
|
|
const rt_ubase_t rt_kmem_pvoff(void);
|
2023-02-14 23:08:32 +08:00
|
|
|
|
|
|
|
void rt_kmem_pvoff_set(rt_ubase_t pvoff);
|
|
|
|
|
2023-02-20 13:44:20 +08:00
|
|
|
int rt_kmem_map_phy(void *va, void *pa, rt_size_t length, rt_size_t attr);
|
|
|
|
|
|
|
|
void *rt_kmem_v2p(void *vaddr);
|
|
|
|
|
2023-10-17 13:07:59 +08:00
|
|
|
void *rt_kmem_p2v(void *paddr);
|
|
|
|
|
2023-03-30 08:25:15 +08:00
|
|
|
void rt_kmem_list(void);
|
|
|
|
|
2023-01-09 10:08:55 +08:00
|
|
|
#endif /* __MM_ASPACE_H__ */
|