Sync dfs lwp (#8123)

This commit is contained in:
geniusgogo 2023-10-17 13:07:59 +08:00 committed by GitHub
parent 7d64cdcf58
commit ecd29fda60
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
79 changed files with 7410 additions and 729 deletions

View File

@ -193,6 +193,40 @@ if RT_USING_DFS_V1
endif
endif
if RT_USING_DFS_V2
config RT_USING_PAGECACHE
bool "Enable page cache"
default n
if RT_USING_PAGECACHE
menu "page cache config"
config RT_PAGECACHE_COUNT
int "page cache max total pages."
default 4096
config RT_PAGECACHE_ASPACE_COUNT
int "aspace max active pages."
default 1024
config RT_PAGECACHE_PRELOAD
int "max pre load pages."
default 4
config RT_PAGECACHE_HASH_NR
int "page cache hash size."
default 1024
config RT_PAGECACHE_GC_WORK_LEVEL
int "page cache gc work trigger min percentage, default 90%."
default 90
config RT_PAGECACHE_GC_STOP_LEVEL
int "page cache gc to min percentage, default 70%."
default 70
endmenu
endif
endif
endif
endmenu

View File

@ -286,11 +286,10 @@ int dfs_device_fs_unlink(struct dfs_filesystem *fs, const char *path)
int dfs_device_fs_stat(struct dfs_filesystem *fs, const char *path, struct stat *st)
{
st->st_dev = (dev_t)((size_t)dfs_filesystem_lookup(fs->path));
/* stat root directory */
if ((path[0] == '/') && (path[1] == '\0'))
{
st->st_dev = 0;
st->st_mode = S_IFREG | S_IRUSR | S_IRGRP | S_IROTH |
S_IWUSR | S_IWGRP | S_IWOTH;
st->st_mode &= ~S_IFREG;
@ -308,8 +307,6 @@ int dfs_device_fs_stat(struct dfs_filesystem *fs, const char *path, struct stat
dev_id = rt_device_find(&path[1]);
if (dev_id != RT_NULL)
{
st->st_dev = 0;
st->st_mode = S_IRUSR | S_IRGRP | S_IROTH |
S_IWUSR | S_IWGRP | S_IWOTH;

View File

@ -198,9 +198,11 @@ int dfs_tmpfs_ioctl(struct dfs_file *file, int cmd, void *args)
{
return -RT_ENOMEM;
}
else if (mmap2->lwp == RT_NULL)
return -RT_EINVAL;
LOG_D("tmpfile mmap ptr:%x , size:%d\n", d_file->data, mmap2->length);
mmap2->ret = lwp_map_user_phy(lwp_self(), RT_NULL, d_file->data, mmap2->length, 0);
mmap2->ret = lwp_map_user_phy(mmap2->lwp, mmap2->addr, d_file->data, mmap2->length, 0);
}
return RT_EOK;
break;

View File

@ -64,6 +64,7 @@ struct dfs_file
void *data; /* Specific fd data */
};
#ifdef RT_USING_SMART
struct dfs_mmap2_args
{
void *addr;
@ -72,8 +73,10 @@ struct dfs_mmap2_args
int flags;
off_t pgoffset;
struct rt_lwp *lwp;
void *ret;
};
#endif
void dfs_vnode_mgr_init(void);
int dfs_vnode_init(struct dfs_vnode *vnode, int type, const struct dfs_file_ops *fops);
@ -92,8 +95,9 @@ off_t dfs_file_lseek(struct dfs_file *fd, off_t offset);
int dfs_file_stat(const char *path, struct stat *buf);
int dfs_file_rename(const char *oldpath, const char *newpath);
int dfs_file_ftruncate(struct dfs_file *fd, off_t length);
#ifdef RT_USING_SMART
int dfs_file_mmap2(struct dfs_file *fd, struct dfs_mmap2_args *mmap2);
#endif
/* 0x5254 is just a magic number to make these relatively unique ("RT") */
#define RT_FIOFTRUNCATE 0x52540000U
#define RT_FIOGETADDR 0x52540001U

View File

@ -713,6 +713,7 @@ int dfs_file_ftruncate(struct dfs_file *fd, off_t length)
return result;
}
#ifdef RT_USING_SMART
int dfs_file_mmap2(struct dfs_file *fd, struct dfs_mmap2_args *mmap2)
{
int ret = 0;
@ -736,6 +737,7 @@ int dfs_file_mmap2(struct dfs_file *fd, struct dfs_mmap2_args *mmap2)
return ret;
}
#endif
#ifdef RT_USING_FINSH
#include <finsh.h>

View File

@ -2,20 +2,12 @@ from building import *
import os
# The set of source files associated with this SConscript file.
src = Split('''
src/dfs.c
src/dfs_file.c
src/dfs_fs.c
src/dfs_dentry.c
src/dfs_vnode.c
src/dfs_mnt.c
src/dfs_posix.c
''')
src = Glob('src/*.c') + Glob('src/*.cpp')
cwd = GetCurrentDir()
CPPPATH = [cwd + "/include"]
if GetDepend('RT_USING_POSIX'):
src += ['src/poll.c', 'src/select.c']
if not GetDepend('RT_USING_SMART'):
SrcRemove(src, ['src/dfs_file_mmap.c'])
group = DefineGroup('Filesystem', src, depend = ['RT_USING_DFS', 'RT_USING_DFS_V2'], CPPPATH = CPPPATH)

View File

@ -32,6 +32,24 @@
#include <dfs_file.h>
#include <dfs_mnt.h>
#ifdef RT_USING_PAGECACHE
#include "dfs_pcache.h"
#endif
static int dfs_elm_free_vnode(struct dfs_vnode *vnode);
#ifdef RT_USING_PAGECACHE
static ssize_t dfs_elm_page_read(struct dfs_file *file, struct dfs_page *page);
static ssize_t dfs_elm_page_write(struct dfs_page *page);
static struct dfs_aspace_ops dfs_elm_aspace_ops =
{
.read = dfs_elm_page_read,
.write = dfs_elm_page_write,
};
#endif
#undef SS
#if FF_MAX_SS == FF_MIN_SS
#define SS(fs) ((UINT)FF_MAX_SS) /* Fixed sector size */
@ -361,7 +379,7 @@ int dfs_elm_open(struct dfs_file *file)
extern int elm_get_vol(FATFS * fat);
RT_ASSERT(file->vnode->ref_count > 0);
if (file->vnode->ref_count > 1)
if (file->vnode->data)
{
if (file->vnode->type == FT_DIRECTORY
&& !(file->flags & O_DIRECTORY))
@ -425,6 +443,7 @@ int dfs_elm_open(struct dfs_file *file)
}
file->vnode->data = dir;
rt_mutex_init(&file->vnode->lock, file->dentry->pathname, RT_IPC_FLAG_PRIO);
return RT_EOK;
}
else
@ -465,6 +484,7 @@ int dfs_elm_open(struct dfs_file *file)
file->vnode->size = f_size(fd);
file->vnode->type = FT_REGULAR;
file->vnode->data = fd;
rt_mutex_init(&file->vnode->lock, file->dentry->pathname, RT_IPC_FLAG_PRIO);
if (file->flags & O_APPEND)
{
@ -516,6 +536,9 @@ int dfs_elm_close(struct dfs_file *file)
rt_free(fd);
}
file->vnode->data = RT_NULL;
rt_mutex_detach(&file->vnode->lock);
return elm_result_to_dfs(result);
}
@ -558,7 +581,7 @@ int dfs_elm_ioctl(struct dfs_file *file, int cmd, void *args)
ssize_t dfs_elm_read(struct dfs_file *file, void *buf, size_t len, off_t *pos)
{
FIL *fd;
FRESULT result;
FRESULT result = FR_OK;
UINT byte_read;
if (file->vnode->type == FT_DIRECTORY)
@ -566,14 +589,19 @@ ssize_t dfs_elm_read(struct dfs_file *file, void *buf, size_t len, off_t *pos)
return -EISDIR;
}
if (file->vnode->size > *pos)
{
fd = (FIL *)(file->vnode->data);
RT_ASSERT(fd != RT_NULL);
rt_mutex_take(&file->vnode->lock, RT_WAITING_FOREVER);
f_lseek(fd, *pos);
result = f_read(fd, buf, len, &byte_read);
/* update position */
*pos = fd->fptr;
rt_mutex_release(&file->vnode->lock);
if (result == FR_OK)
return byte_read;
}
return elm_result_to_dfs(result);
}
@ -591,11 +619,13 @@ ssize_t dfs_elm_write(struct dfs_file *file, const void *buf, size_t len, off_t
fd = (FIL *)(file->vnode->data);
RT_ASSERT(fd != RT_NULL);
rt_mutex_take(&file->vnode->lock, RT_WAITING_FOREVER);
f_lseek(fd, *pos);
result = f_write(fd, buf, len, &byte_write);
/* update position and file size */
*pos = fd->fptr;
file->vnode->size = f_size(fd);
rt_mutex_release(&file->vnode->lock);
if (result == FR_OK)
return byte_write;
@ -642,8 +672,9 @@ off_t dfs_elm_lseek(struct dfs_file *file, off_t offset, int wherece)
/* regular file type */
fd = (FIL *)(file->vnode->data);
RT_ASSERT(fd != RT_NULL);
rt_mutex_take(&file->vnode->lock, RT_WAITING_FOREVER);
result = f_lseek(fd, offset);
rt_mutex_release(&file->vnode->lock);
if (result == FR_OK)
{
/* return current position */
@ -657,8 +688,9 @@ off_t dfs_elm_lseek(struct dfs_file *file, off_t offset, int wherece)
dir = (DIR *)(file->vnode->data);
RT_ASSERT(dir != RT_NULL);
rt_mutex_take(&file->vnode->lock, RT_WAITING_FOREVER);
result = f_seekdir(dir, offset / sizeof(struct dirent));
rt_mutex_release(&file->vnode->lock);
if (result == FR_OK)
{
/* update file position */
@ -827,21 +859,35 @@ int dfs_elm_stat(struct dfs_dentry *dentry, struct stat *st)
st->st_dev = (dev_t)(size_t)(dentry->mnt->dev_id);
st->st_ino = (ino_t)dfs_dentry_full_path_crc32(dentry);
st->st_mode = S_IFREG | S_IRUSR | S_IRGRP | S_IROTH |
S_IWUSR | S_IWGRP | S_IWOTH;
if (file_info.fattrib & AM_DIR)
{
st->st_mode &= ~S_IFREG;
st->st_mode |= S_IFDIR | S_IXUSR | S_IXGRP | S_IXOTH;
st->st_mode = S_IFDIR | (S_IRUSR | S_IXUSR | S_IRGRP | S_IXGRP | S_IROTH | S_IXOTH);
}
else
{
st->st_mode = S_IFREG | (S_IRWXU | S_IRWXG | S_IRWXO);
}
if (file_info.fattrib & AM_RDO)
st->st_mode &= ~(S_IWUSR | S_IWGRP | S_IWOTH);
if (S_IFDIR & st->st_mode)
{
st->st_size = file_info.fsize;
}
else
{
#ifdef RT_USING_PAGECACHE
st->st_size = (dentry->vnode && dentry->vnode->aspace) ? dentry->vnode->size : file_info.fsize;
#else
st->st_size = file_info.fsize;
#endif
}
st->st_blksize = fat->csize * SS(fat);
if (file_info.fattrib & AM_ARC)
{
st->st_blocks = file_info.fsize ? ((file_info.fsize - 1) / SS(fat) / fat->csize + 1) : 0;
st->st_blocks = st->st_size ? ((st->st_size - 1) / SS(fat) / fat->csize + 1) : 0;
st->st_blocks *= (st->st_blksize / 512); // man say st_blocks is number of 512B blocks allocated
}
else
@ -901,20 +947,23 @@ static struct dfs_vnode *dfs_elm_lookup(struct dfs_dentry *dentry)
vnode = dfs_vnode_create();
if (vnode)
{
vnode->mnt = dentry->mnt;
vnode->size = st.st_size;
vnode->data = NULL;
if (S_ISDIR(st.st_mode))
{
vnode->mode = S_IFDIR | (S_IRUSR | S_IWUSR | S_IRGRP | S_IWGRP | S_IROTH | S_IWOTH);
vnode->mode = S_IFDIR | (S_IRUSR | S_IXUSR | S_IRGRP | S_IXGRP | S_IROTH | S_IXOTH);
vnode->type = FT_DIRECTORY;
}
else
{
vnode->mode = S_IFREG | S_IXUSR | (S_IRUSR | S_IWUSR | S_IRGRP | S_IWGRP | S_IROTH | S_IWOTH);
vnode->mode = S_IFREG | (S_IRWXU | S_IRWXG | S_IRWXO);
vnode->type = FT_REGULAR;
#ifdef RT_USING_PAGECACHE
vnode->aspace = dfs_aspace_create(dentry, vnode, &dfs_elm_aspace_ops);
#endif
}
vnode->mnt = dentry->mnt;
vnode->data = NULL;
vnode->size = 0;
}
return vnode;
@ -934,13 +983,18 @@ static struct dfs_vnode *dfs_elm_create_vnode(struct dfs_dentry *dentry, int typ
{
if (type == FT_DIRECTORY)
{
vnode->mode = S_IFDIR | mode;
/* fat directory force mode 0555 */
vnode->mode = S_IFDIR | (S_IRUSR | S_IXUSR | S_IRGRP | S_IXGRP | S_IROTH | S_IXOTH);
vnode->type = FT_DIRECTORY;
}
else
{
vnode->mode = S_IFREG | mode;
/* fat REGULAR file mode force mode 0777 */
vnode->mode = S_IFREG | (S_IRWXU | S_IRWXG | S_IRWXO);
vnode->type = FT_REGULAR;
#ifdef RT_USING_PAGECACHE
vnode->aspace = dfs_aspace_create(dentry, vnode, &dfs_elm_aspace_ops);
#endif
}
vnode->mnt = dentry->mnt;
@ -962,6 +1016,46 @@ static int dfs_elm_free_vnode(struct dfs_vnode *vnode)
return 0;
}
#ifdef RT_USING_PAGECACHE
static ssize_t dfs_elm_page_read(struct dfs_file *file, struct dfs_page *page)
{
int ret = -EINVAL;
if (page->page)
{
off_t fpos = page->fpos;
ret = dfs_elm_read(file, page->page, page->size, &fpos);
}
return ret;
}
ssize_t dfs_elm_page_write(struct dfs_page *page)
{
FIL *fd;
FRESULT result;
UINT byte_write;
if (page->aspace->vnode->type == FT_DIRECTORY)
{
return -EISDIR;
}
fd = (FIL *)(page->aspace->vnode->data);
RT_ASSERT(fd != RT_NULL);
rt_mutex_take(&page->aspace->vnode->lock, RT_WAITING_FOREVER);
f_lseek(fd, page->fpos);
result = f_write(fd, page->page, page->len, &byte_write);
rt_mutex_release(&page->aspace->vnode->lock);
if (result == FR_OK)
{
return byte_write;
}
return elm_result_to_dfs(result);
}
#endif
static const struct dfs_file_ops dfs_elm_fops =
{
.open = dfs_elm_open,

View File

@ -149,7 +149,7 @@ static struct dfs_vnode *dfs_mqueue_create_vnode(struct dfs_dentry *dentry, int
dfs_mqueue_insert_after(&(mq_file->list));
}
vnode->mode = S_IFREG | mode;
vnode->mode = S_IFREG | (S_IRWXU | S_IRWXG | S_IRWXO);
vnode->type = FT_REGULAR;
rt_mq_t mq = rt_mq_create(dentry->pathname + 1, mq_file->msg_size, mq_file->max_msgs,
RT_IPC_FLAG_FIFO);
@ -191,7 +191,7 @@ struct dfs_vnode *_dfs_mqueue_lookup(struct dfs_dentry *dentry) {
vnode = dfs_vnode_create();
if (mq_file && mq_file->data) {
vnode->mode = S_IFREG | S_IRUSR | S_IWUSR | S_IXUSR;
vnode->mode = S_IFREG | (S_IRWXU | S_IRWXG | S_IRWXO);
vnode->type = FT_REGULAR;
vnode->mnt = dentry->mnt;
vnode->data = mq_file;
@ -202,7 +202,7 @@ struct dfs_vnode *_dfs_mqueue_lookup(struct dfs_dentry *dentry) {
vnode->fops = &_mqueue_fops;
vnode->mnt = dentry->mnt;
vnode->type = FT_DIRECTORY;
vnode->mode = S_IFDIR | S_IRUSR | S_IWUSR | S_IXUSR;
vnode->mode = S_IFDIR | (S_IRUSR | S_IXUSR | S_IRGRP | S_IXGRP | S_IROTH | S_IXOTH);
}
return vnode;
}

View File

@ -191,12 +191,12 @@ static struct dfs_vnode *dfs_romfs_lookup (struct dfs_dentry *dentry)
vnode->size = dirent->size;
if (dirent->type == ROMFS_DIRENT_DIR)
{
vnode->mode = romfs_modemap[ROMFS_DIRENT_DIR] | S_IRUSR;
vnode->mode = romfs_modemap[ROMFS_DIRENT_DIR] | (S_IRUSR | S_IXUSR | S_IRGRP | S_IXGRP | S_IROTH | S_IXOTH);
vnode->type = FT_DIRECTORY;
}
else if (dirent->type == ROMFS_DIRENT_FILE)
{
vnode->mode = romfs_modemap[ROMFS_DIRENT_FILE] | S_IRUSR | S_IXUSR;
vnode->mode = romfs_modemap[ROMFS_DIRENT_FILE] | (S_IRUSR | S_IXUSR | S_IRGRP | S_IXGRP | S_IROTH | S_IXOTH);
vnode->type = FT_REGULAR;
}

View File

@ -28,6 +28,20 @@
#define DBG_TAG "tmpfs"
#define DBG_LVL DBG_INFO
#include <rtdbg.h>
#ifdef RT_USING_PAGECACHE
#include "dfs_pcache.h"
#endif
#ifdef RT_USING_PAGECACHE
static ssize_t dfs_tmp_page_read(struct dfs_file *file, struct dfs_page *page);
static ssize_t dfs_tmp_page_write(struct dfs_page *page);
static struct dfs_aspace_ops dfs_tmp_aspace_ops =
{
.read = dfs_tmp_page_read,
.write = dfs_tmp_page_write,
};
#endif
static int _path_separate(const char *path, char *parent_path, char *file_name)
{
@ -285,6 +299,8 @@ static ssize_t dfs_tmpfs_read(struct dfs_file *file, void *buf, size_t count, of
d_file = (struct tmpfs_file *)file->vnode->data;
RT_ASSERT(d_file != NULL);
rt_mutex_take(&file->vnode->lock, RT_WAITING_FOREVER);
if (count < file->vnode->size - *pos)
length = count;
else
@ -296,6 +312,8 @@ static ssize_t dfs_tmpfs_read(struct dfs_file *file, void *buf, size_t count, of
/* update file current position */
*pos += length;
rt_mutex_release(&file->vnode->lock);
return length;
}
@ -310,12 +328,15 @@ static ssize_t dfs_tmpfs_write(struct dfs_file *file, const void *buf, size_t co
superblock = d_file->sb;
RT_ASSERT(superblock != NULL);
rt_mutex_take(&file->vnode->lock, RT_WAITING_FOREVER);
if (count + *pos > file->vnode->size)
{
rt_uint8_t *ptr;
ptr = rt_realloc(d_file->data, *pos + count);
if (ptr == NULL)
{
rt_mutex_release(&file->vnode->lock);
rt_set_errno(-ENOMEM);
return 0;
}
@ -335,6 +356,7 @@ static ssize_t dfs_tmpfs_write(struct dfs_file *file, const void *buf, size_t co
/* update file current position */
*pos += count;
rt_mutex_release(&file->vnode->lock);
return count;
}
@ -371,6 +393,7 @@ static int dfs_tmpfs_close(struct dfs_file *file)
struct tmpfs_file *d_file;
RT_ASSERT(file->vnode->ref_count > 0);
if (file->vnode->ref_count != 1)
return 0;
@ -390,6 +413,8 @@ static int dfs_tmpfs_close(struct dfs_file *file)
rt_free(d_file);
}
rt_mutex_detach(&file->vnode->lock);
return RT_EOK;
}
@ -424,6 +449,12 @@ static int dfs_tmpfs_open(struct dfs_file *file)
file->fpos = 0;
}
RT_ASSERT(file->vnode->ref_count > 0);
if(file->vnode->ref_count == 1)
{
rt_mutex_init(&file->vnode->lock, file->dentry->pathname, RT_IPC_FLAG_PRIO);
}
return 0;
}
@ -442,12 +473,13 @@ static int dfs_tmpfs_stat(struct dfs_dentry *dentry, struct stat *st)
st->st_dev = (dev_t)(size_t)(dentry->mnt->dev_id);
st->st_ino = (ino_t)dfs_dentry_full_path_crc32(dentry);
st->st_mode = S_IFREG | S_IRUSR | S_IRGRP | S_IROTH |
S_IWUSR | S_IWGRP | S_IWOTH;
if (d_file->type == TMPFS_TYPE_DIR)
{
st->st_mode &= ~S_IFREG;
st->st_mode |= S_IFDIR | S_IXUSR | S_IXGRP | S_IXOTH;
st->st_mode = S_IFDIR | (S_IRUSR | S_IXUSR | S_IRGRP | S_IXGRP | S_IROTH | S_IXOTH);
}
else
{
st->st_mode = S_IFREG | (S_IRWXU | S_IRWXG | S_IRWXO);
}
st->st_size = d_file->size;
@ -468,13 +500,18 @@ static int dfs_tmpfs_getdents(struct dfs_file *file,
d_file = (struct tmpfs_file *)file->vnode->data;
rt_mutex_take(&file->vnode->lock, RT_WAITING_FOREVER);
superblock = d_file->sb;
RT_ASSERT(superblock != RT_NULL);
/* make integer count */
count = (count / sizeof(struct dirent));
if (count == 0)
{
rt_mutex_release(&file->vnode->lock);
return -EINVAL;
}
end = file->fpos + count;
index = 0;
@ -507,6 +544,7 @@ static int dfs_tmpfs_getdents(struct dfs_file *file,
break;
}
}
rt_mutex_release(&file->vnode->lock);
return count * sizeof(struct dirent);
}
@ -607,13 +645,16 @@ static struct dfs_vnode *_dfs_tmpfs_lookup(struct dfs_dentry *dentry)
{
if (d_file->type == TMPFS_TYPE_DIR)
{
vnode->mode = S_IFDIR | S_IRUSR | S_IWUSR | S_IXUSR;
vnode->mode = S_IFDIR | (S_IRUSR | S_IXUSR | S_IRGRP | S_IXGRP | S_IROTH | S_IXOTH);
vnode->type = FT_DIRECTORY;
}
else
{
vnode->mode = S_IFREG | S_IRUSR | S_IWUSR | S_IXUSR;
vnode->mode = S_IFREG | (S_IRWXU | S_IRWXG | S_IRWXO);
vnode->type = FT_REGULAR;
#ifdef RT_USING_PAGECACHE
vnode->aspace = dfs_aspace_create(dentry, vnode, &dfs_tmp_aspace_ops);
#endif
}
vnode->mnt = dentry->mnt;
@ -681,14 +722,17 @@ static struct dfs_vnode *dfs_tmpfs_create_vnode(struct dfs_dentry *dentry, int t
if (type == FT_DIRECTORY)
{
d_file->type = TMPFS_TYPE_DIR;
vnode->mode = S_IFDIR | mode;
vnode->mode = S_IFDIR | (S_IRUSR | S_IXUSR | S_IRGRP | S_IXGRP | S_IROTH | S_IXOTH);
vnode->type = FT_DIRECTORY;
}
else
{
d_file->type = TMPFS_TYPE_FILE;
vnode->mode = S_IFREG | mode;
vnode->mode = S_IFREG | (S_IRWXU | S_IRWXG | S_IRWXO);
vnode->type = FT_REGULAR;
#ifdef RT_USING_PAGECACHE
vnode->aspace = dfs_aspace_create(dentry, vnode, &dfs_tmp_aspace_ops);
#endif
}
rt_spin_lock(&superblock->lock);
rt_list_insert_after(&(p_file->subdirs), &(d_file->sibling));
@ -713,6 +757,41 @@ static int dfs_tmpfs_free_vnode(struct dfs_vnode *vnode)
return 0;
}
#ifdef RT_USING_PAGECACHE
static ssize_t dfs_tmp_page_read(struct dfs_file *file, struct dfs_page *page)
{
int ret = -EINVAL;
if (page->page)
{
off_t fpos = page->fpos;
ret = dfs_tmpfs_read(file, page->page, page->size, &fpos);
}
return ret;
}
ssize_t dfs_tmp_page_write(struct dfs_page *page)
{
struct tmpfs_file *d_file;
if (page->aspace->vnode->type == FT_DIRECTORY)
{
return -EISDIR;
}
d_file = (struct tmpfs_file *)(page->aspace->vnode->data);
RT_ASSERT(d_file != RT_NULL);
rt_mutex_take(&page->aspace->vnode->lock, RT_WAITING_FOREVER);
if (page->len > 0)
memcpy(d_file->data + page->fpos, page->page, page->len);
rt_mutex_release(&page->aspace->vnode->lock);
return F_OK;
}
#endif
static int dfs_tmpfs_truncate(struct dfs_file *file, off_t offset)
{
struct tmpfs_file *d_file = RT_NULL;

View File

@ -33,6 +33,7 @@ struct rt_pollreq;
struct dirent;
struct lwp_avl_struct;
struct file_lock;
struct dfs_aspace;
struct dfs_file_ops
{
@ -73,6 +74,9 @@ struct dfs_vnode
struct timespec mtime;
struct timespec ctime;
struct dfs_aspace *aspace;
struct rt_mutex lock;
void *data; /* private data of this file system */
};
@ -93,6 +97,8 @@ struct dfs_file
struct dfs_dentry *dentry; /* dentry of this file */
struct dfs_vnode *vnode; /* vnode of this file */
void *mmap_context; /* used by mmap routine */
void *data;
};
@ -122,7 +128,7 @@ struct dfs_vnode *dfs_vnode_ref(struct dfs_vnode *vnode);
void dfs_vnode_unref(struct dfs_vnode *vnode);
/*dfs_file.c*/
#ifdef RT_USING_SMART
struct dfs_mmap2_args
{
void *addr;
@ -131,8 +137,10 @@ struct dfs_mmap2_args
int flags;
off_t pgoffset;
struct rt_lwp *lwp;
void *ret;
};
#endif
void dfs_file_init(struct dfs_file *file);
void dfs_file_deinit(struct dfs_file *file);
@ -166,8 +174,13 @@ int dfs_file_isdir(const char *path);
int dfs_file_access(const char *path, mode_t mode);
int dfs_file_chdir(const char *path);
char *dfs_file_getcwd(char *buf, size_t size);
#ifdef RT_USING_SMART
int dfs_file_mmap2(struct dfs_file *file, struct dfs_mmap2_args *mmap2);
int dfs_file_mmap(struct dfs_file *file, struct dfs_mmap2_args *mmap2);
#endif
/* 0x5254 is just a magic number to make these relatively unique ("RT") */
#define RT_FIOFTRUNCATE 0x52540000U
#define RT_FIOGETADDR 0x52540001U

View File

@ -53,6 +53,7 @@ int dfs_mnt_destroy(struct dfs_mnt* mnt);
int dfs_mnt_list(struct dfs_mnt* mnt);
int dfs_mnt_insert(struct dfs_mnt* mnt, struct dfs_mnt* child);
struct dfs_mnt *dfs_mnt_dev_lookup(rt_device_t dev_id);
struct dfs_mnt *dfs_mnt_lookup(const char *path);
const char *dfs_mnt_get_mounted_path(struct rt_device *device);

View File

@ -0,0 +1,125 @@
/*
* Copyright (c) 2006-2023, RT-Thread Development Team
*
* SPDX-License-Identifier: Apache-2.0
*
* Change Logs:
* Date Author Notes
* 2023-05-05 RTT Implement dentry in dfs v2.0
*/
#ifndef DFS_PAGE_CACHE_H__
#define DFS_PAGE_CACHE_H__
#include <rtthread.h>
#ifdef RT_USING_PAGECACHE
#include <dfs_file.h>
#include <avl.h>
#ifdef __cplusplus
extern "C"
{
#endif
struct dfs_vnode;
struct dfs_dentry;
struct dfs_aspace;
struct dfs_mmap
{
rt_list_t mmap_node;
struct rt_varea *varea;
};
struct dfs_page
{
rt_list_t space_node;
rt_list_t dirty_node;
struct util_avl_struct avl_node;
rt_list_t mmap_head;
rt_atomic_t ref_count;
void *page;
off_t fpos;
size_t size;
size_t len;
int is_dirty;
rt_tick_t tick_ms;
struct dfs_aspace *aspace;
};
struct dfs_aspace_ops
{
ssize_t (*read)(struct dfs_file *file, struct dfs_page *page);
ssize_t (*write)(struct dfs_page *page);
};
struct dfs_aspace
{
rt_list_t hash_node, cache_node;
char *fullpath, *pathname;
struct dfs_mnt *mnt;
rt_list_t list_active, list_inactive;
rt_list_t list_dirty;
size_t pages_count;
struct util_avl_root avl_root;
struct dfs_page *avl_page;
rt_bool_t is_active;
struct rt_mutex lock;
rt_atomic_t ref_count;
struct dfs_vnode *vnode;
const struct dfs_aspace_ops *ops;
};
#ifndef RT_PAGECACHE_HASH_NR
#define RT_PAGECACHE_HASH_NR 1024
#endif
struct dfs_pcache
{
rt_list_t head[RT_PAGECACHE_HASH_NR];
rt_list_t list_active, list_inactive;
rt_atomic_t pages_count;
struct rt_mutex lock;
struct rt_messagequeue *mqueue;
rt_tick_t last_time_wb;
};
struct dfs_aspace *dfs_aspace_create(struct dfs_dentry *dentry, struct dfs_vnode *vnode, const struct dfs_aspace_ops *ops);
int dfs_aspace_destroy(struct dfs_aspace *aspace);
int dfs_aspace_read(struct dfs_file *file, void *buf, size_t count, off_t *pos);
int dfs_aspace_write(struct dfs_file *file, const void *buf, size_t count, off_t *pos);
int dfs_aspace_flush(struct dfs_aspace *aspace);
int dfs_aspace_clean(struct dfs_aspace *aspace);
void *dfs_aspace_mmap(struct dfs_file *file, struct rt_varea *varea, void *vaddr);
int dfs_aspace_unmap(struct dfs_file *file, struct rt_varea *varea);
int dfs_aspace_page_unmap(struct dfs_file *file, struct rt_varea *varea, void *vaddr);
int dfs_aspace_page_dirty(struct dfs_file *file, struct rt_varea *varea, void *vaddr);
off_t dfs_aspace_fpos(struct rt_varea *varea, void *vaddr);
void *dfs_aspace_vaddr(struct rt_varea *varea, off_t fpos);
int dfs_aspace_mmap_read(struct dfs_file *file, struct rt_varea *varea, void *data);
int dfs_aspace_mmap_write(struct dfs_file *file, struct rt_varea *varea, void *data);
void dfs_pcache_release(size_t count);
void dfs_pcache_unmount(struct dfs_mnt *mnt);
#ifdef __cplusplus
}
#endif
#endif
#endif

View File

@ -0,0 +1,69 @@
/*
* Copyright (c) 2006-2023, RT-Thread Development Team
*
* SPDX-License-Identifier: Apache-2.0
*
* Change Logs:
* Date Author Notes
*/
#ifndef __DFS_SEQ_FILE_H__
#define __DFS_SEQ_FILE_H__
#include <dfs.h>
#include <dfs_fs.h>
struct dfs_seq_ops;
struct dfs_seq_file
{
char *buf;
size_t size;
size_t from;
size_t count;
size_t pad_until;
off_t index;
off_t read_pos;
struct rt_mutex lock;
const struct dfs_seq_ops *ops;
const struct dfs_file *file;
void *data;
};
struct dfs_seq_ops
{
void *(*start)(struct dfs_seq_file *seq, off_t *index);
void (*stop)(struct dfs_seq_file *seq, void *data);
void *(*next)(struct dfs_seq_file *seq, void *data, off_t *index);
int (*show)(struct dfs_seq_file *seq, void *data);
};
/**
* check if the buffer is full
*/
static inline rt_bool_t dfs_seq_is_full(struct dfs_seq_file *seq)
{
return seq->count == seq->size;
}
/**
* set padding width size
*/
static inline void dfs_seq_setwidth(struct dfs_seq_file *seq, size_t size)
{
seq->pad_until = seq->count + size;
}
int dfs_seq_open(struct dfs_file *file, const struct dfs_seq_ops *ops);
ssize_t dfs_seq_read(struct dfs_file *file, void *buf, size_t size, off_t *pos);
ssize_t dfs_seq_lseek(struct dfs_file *file, off_t offset, int whence);
int dfs_seq_release(struct dfs_file *file);
int dfs_seq_write(struct dfs_seq_file *seq, const void *data, size_t len);
void dfs_seq_vprintf(struct dfs_seq_file *seq, const char *fmt, va_list args);
void dfs_seq_printf(struct dfs_seq_file *seq, const char *fmt, ...);
void dfs_seq_putc(struct dfs_seq_file *seq, char c);
void dfs_seq_puts(struct dfs_seq_file *seq, const char *s);
void dfs_seq_pad(struct dfs_seq_file *seq, char c);
#endif

View File

@ -197,7 +197,7 @@ struct dfs_dentry *dfs_dentry_lookup(struct dfs_mnt *mnt, const char *path, uint
path = "/";
}
}
dfs_file_lock();
dentry = _dentry_hash_lookup(mnt, path);
if (!dentry)
{
@ -252,7 +252,7 @@ struct dfs_dentry *dfs_dentry_lookup(struct dfs_mnt *mnt, const char *path, uint
{
DLOG(note, "dentry", "found dentry");
}
dfs_file_unlock();
return dentry;
}

View File

@ -19,6 +19,10 @@
#include "dfs_mnt.h"
#include "dfs_private.h"
#ifdef RT_USING_PAGECACHE
#include "dfs_pcache.h"
#endif
#define DBG_TAG "DFS.file"
#define DBG_LVL DBG_WARNING
#include <rtdbg.h>
@ -114,7 +118,7 @@ static void dfs_file_unref(struct dfs_file *file)
{
if (file->vnode->ref_count > 1)
{
file->vnode->ref_count--;
rt_atomic_sub(&(file->vnode->ref_count), 1);
}
else if (file->vnode->ref_count == 1)
{
@ -448,6 +452,7 @@ int dfs_file_open(struct dfs_file *file, const char *path, int oflags, mode_t mo
struct dfs_vnode *vnode = RT_NULL;
DLOG(msg, "dfs_file", "dentry", DLOG_MSG, "dfs_dentry_create(%s)", fullpath);
dfs_file_lock();
dentry = dfs_dentry_create(mnt, fullpath);
if (dentry)
{
@ -472,6 +477,7 @@ int dfs_file_open(struct dfs_file *file, const char *path, int oflags, mode_t mo
dentry = RT_NULL;
}
}
dfs_file_unlock();
}
}
}
@ -518,7 +524,9 @@ int dfs_file_open(struct dfs_file *file, const char *path, int oflags, mode_t mo
if (dfs_is_mounted(file->vnode->mnt) == 0)
{
dfs_file_lock();
ret = file->fops->open(file);
dfs_file_unlock();
}
else
{
@ -572,6 +580,12 @@ int dfs_file_open(struct dfs_file *file, const char *path, int oflags, mode_t mo
if (dfs_is_mounted(file->vnode->mnt) == 0)
{
#ifdef RT_USING_PAGECACHE
if (file->vnode->aspace)
{
dfs_aspace_clean(file->vnode->aspace);
}
#endif
ret = file->fops->truncate(file, 0);
}
else
@ -612,7 +626,12 @@ int dfs_file_close(struct dfs_file *file)
if (ref_count == 1 && file->fops && file->fops->close)
{
DLOG(msg, "dfs_file", file->dentry->mnt->fs_ops->name, DLOG_MSG, "fops->close(file)");
#ifdef RT_USING_PAGECACHE
if (file->vnode->aspace)
{
dfs_aspace_flush(file->vnode->aspace);
}
#endif
ret = file->fops->close(file);
if (ret == 0) /* close file sucessfully */
@ -665,8 +684,17 @@ ssize_t dfs_file_read(struct dfs_file *file, void *buf, size_t len)
if (dfs_is_mounted(file->vnode->mnt) == 0)
{
#ifdef RT_USING_PAGECACHE
if (file->vnode->aspace)
{
ret = dfs_aspace_read(file, buf, len, &pos);
}
else
#endif
{
ret = file->fops->read(file, buf, len, &pos);
}
}
else
{
ret = -EINVAL;
@ -710,8 +738,17 @@ ssize_t dfs_file_write(struct dfs_file *file, const void *buf, size_t len)
if (dfs_is_mounted(file->vnode->mnt) == 0)
{
#ifdef RT_USING_PAGECACHE
if (file->vnode->aspace)
{
ret = dfs_aspace_write(file, buf, len, &pos);
}
else
#endif
{
ret = file->fops->write(file, buf, len, &pos);
}
}
else
{
ret = -EINVAL;
@ -1047,6 +1084,12 @@ int dfs_file_fsync(struct dfs_file *file)
{
if (dfs_is_mounted(file->vnode->mnt) == 0)
{
#ifdef RT_USING_PAGECACHE
if (file->vnode->aspace)
{
dfs_aspace_flush(file->vnode->aspace);
}
#endif
ret = file->fops->flush(file);
}
else
@ -1089,6 +1132,14 @@ int dfs_file_unlink(const char *path)
rt_bool_t has_child = RT_FALSE;
has_child = dfs_mnt_has_child_mnt(mnt, fullpath);
#ifdef RT_USING_PAGECACHE
if (dentry->vnode->aspace)
{
dfs_aspace_clean(dentry->vnode->aspace);
}
#endif
dfs_file_lock();
if (has_child == RT_FALSE)
{
/* no child mnt point, unlink it */
@ -1106,6 +1157,7 @@ int dfs_file_unlink(const char *path)
{
ret = -EBUSY;
}
dfs_file_unlock();
/* release this dentry */
dfs_dentry_unref(dentry);
@ -1467,6 +1519,12 @@ int dfs_file_rename(const char *old_file, const char *new_file)
{
if (dfs_is_mounted(mnt) == 0)
{
#ifdef RT_USING_PAGECACHE
if (old_dentry->vnode->aspace)
{
dfs_aspace_clean(old_dentry->vnode->aspace);
}
#endif
ret = mnt->fs_ops->rename(old_dentry, new_dentry);
}
}
@ -1499,6 +1557,12 @@ int dfs_file_ftruncate(struct dfs_file *file, off_t length)
{
if (dfs_is_mounted(file->vnode->mnt) == 0)
{
#ifdef RT_USING_PAGECACHE
if (file->vnode->aspace)
{
dfs_aspace_clean(file->vnode->aspace);
}
#endif
ret = file->fops->truncate(file, length);
}
else
@ -1529,6 +1593,12 @@ int dfs_file_flush(struct dfs_file *file)
{
if (dfs_is_mounted(file->vnode->mnt) == 0)
{
#ifdef RT_USING_PAGECACHE
if (file->vnode->aspace)
{
dfs_aspace_flush(file->vnode->aspace);
}
#endif
ret = file->fops->flush(file);
}
else
@ -1669,13 +1739,23 @@ int dfs_file_access(const char *path, mode_t mode)
return ret;
}
#ifdef RT_USING_SMART
int dfs_file_mmap2(struct dfs_file *file, struct dfs_mmap2_args *mmap2)
{
int ret = 0;
int ret = RT_EOK;
if (file && mmap2)
{
if (file->vnode->type != FT_DEVICE || !file->vnode->fops->ioctl)
if (file->vnode->type == FT_REGULAR)
{
ret = dfs_file_mmap(file, mmap2);
if (ret != 0)
{
ret = ret > 0 ? ret : -ret;
rt_set_errno(ret);
}
}
else if (file->vnode->type != FT_DEVICE || !file->vnode->fops->ioctl)
{
rt_set_errno(EINVAL);
}
@ -1700,6 +1780,7 @@ int dfs_file_mmap2(struct dfs_file *file, struct dfs_mmap2_args *mmap2)
return ret;
}
#endif
#ifdef RT_USING_FINSH

View File

@ -0,0 +1,443 @@
/*
* Copyright (c) 2006-2023, RT-Thread Development Team
*
* SPDX-License-Identifier: Apache-2.0
*
* Change Logs:
* Date Author Notes
*/
#include "dfs_file.h"
#include "dfs_dentry.h"
#include "dfs_mnt.h"
#define DBG_TAG "dfs.mmap"
#define DBG_LVL DBG_WARNING
#include <rtdbg.h>
#if defined(RT_USING_SMART) && defined(ARCH_MM_MMU) && defined(RT_USING_PAGECACHE)
#include "dfs_pcache.h"
#include <lwp.h>
#include <sys/mman.h>
#include <lwp_user_mm.h>
#include <mm_aspace.h>
#include <mm_fault.h>
#include <mm_flag.h>
#include <mm_page.h>
#include <mmu.h>
#include <page.h>
#include <tlb.h>
static rt_mem_obj_t dfs_get_mem_obj(struct dfs_file *file);
static void *dfs_mem_obj_get_file(rt_mem_obj_t mem_obj);
static rt_varea_t _dfs_map_user_varea_data(struct rt_lwp *lwp, void *map_vaddr, size_t map_size, size_t attr, mm_flag_t flags, off_t pgoffset, void *data)
{
int ret = 0;
rt_varea_t varea;
void *vaddr = map_vaddr;
rt_mem_obj_t mem_obj = dfs_get_mem_obj(data);
ret = rt_aspace_map(lwp->aspace, &vaddr, map_size,
attr, flags, mem_obj, pgoffset);
if (ret != RT_EOK)
{
varea = RT_NULL;
}
else
{
varea = rt_aspace_query(lwp->aspace, vaddr);
}
if (ret != RT_EOK)
{
LOG_E("failed to map %lx with size %lx with errno %d", map_vaddr,
map_size, ret);
}
return varea;
}
static rt_varea_t dfs_map_user_varea_data(struct dfs_mmap2_args *mmap2, void *data)
{
rt_varea_t varea = RT_NULL;
size_t offset = 0;
void *map_vaddr = mmap2->addr;
size_t map_size = mmap2->length;
struct rt_lwp *lwp = mmap2->lwp;
rt_size_t k_attr;
rt_size_t k_flags;
if (map_size)
{
offset = (size_t)map_vaddr & ARCH_PAGE_MASK;
map_size += (offset + ARCH_PAGE_SIZE - 1);
map_size &= ~ARCH_PAGE_MASK;
map_vaddr = (void *)((size_t)map_vaddr & ~ARCH_PAGE_MASK);
k_flags = lwp_user_mm_flag_to_kernel(mmap2->flags);
k_attr = lwp_user_mm_attr_to_kernel(mmap2->prot);
varea = _dfs_map_user_varea_data(lwp, map_vaddr, map_size, k_attr, k_flags, mmap2->pgoffset, data);
}
return varea;
}
static void hint_free(rt_mm_va_hint_t hint)
{
}
static void on_page_fault(struct rt_varea *varea, struct rt_aspace_fault_msg *msg)
{
void *page;
struct dfs_file *file = dfs_mem_obj_get_file(varea->mem_obj);
if (file)
{
LOG_I("%s varea: %p", __func__, varea);
LOG_I("varea start: %p size: 0x%x offset: 0x%x attr: 0x%x flag: 0x%x",
varea->start, varea->size, varea->offset, varea->attr, varea->flag);
LOG_I("fault vaddr: %p", msg->fault_vaddr);
if (file->dentry)
{
LOG_I("file: %s%s", file->dentry->mnt->fullpath, file->dentry->pathname);
}
page = dfs_aspace_mmap(file, varea, msg->fault_vaddr);
if (page)
{
msg->response.status = MM_FAULT_STATUS_OK_MAPPED;
msg->response.size = ARCH_PAGE_SIZE;
msg->response.vaddr = page;
}
else
{
LOG_E("%s varea %p mmap failed at vaddr %p", __func__, varea, msg->fault_vaddr);
}
}
else
{
LOG_E("%s varea %p not a file, vaddr %p", __func__, varea, varea->start);
}
}
/* do pre open bushiness like inc a ref */
static void on_varea_open(struct rt_varea *varea)
{
struct dfs_file *file = dfs_mem_obj_get_file(varea->mem_obj);
varea->data = RT_NULL;
rt_atomic_add(&(file->ref_count), 1);
}
/* do post close bushiness like def a ref */
static void on_varea_close(struct rt_varea *varea)
{
struct dfs_file *file = dfs_mem_obj_get_file(varea->mem_obj);
if (file)
{
LOG_I("%s varea: %p", __func__, varea);
LOG_I("varea start: %p size: 0x%x offset: 0x%x attr: 0x%x flag: 0x%x",
varea->start, varea->size, varea->offset, varea->attr, varea->flag);
if (file->dentry)
{
LOG_I("file: %s%s", file->dentry->mnt->fullpath, file->dentry->pathname);
}
dfs_aspace_unmap(file, varea);
dfs_file_lock();
if (rt_atomic_load(&(file->ref_count)) == 1)
{
dfs_file_close(file);
}
else
{
rt_atomic_sub(&(file->ref_count), 1);
}
dfs_file_unlock();
}
else
{
LOG_E("%s varea %p not a file, vaddr %p", __func__, varea, varea->start);
}
}
static const char *get_name(rt_varea_t varea)
{
struct dfs_file *file = dfs_mem_obj_get_file(varea->mem_obj);
return (file && file->dentry) ? file->dentry->pathname : "file-mapper";
}
void page_read(struct rt_varea *varea, struct rt_aspace_io_msg *msg)
{
rt_ubase_t ret;
struct dfs_file *file = dfs_mem_obj_get_file(varea->mem_obj);
if (file)
{
LOG_I("%s varea: %p", __func__, varea);
LOG_I("varea start: %p size: 0x%x offset: 0x%x attr: 0x%x flag: 0x%x",
varea->start, varea->size, varea->offset, varea->attr, varea->flag);
ret = dfs_aspace_mmap_read(file, varea, msg);
if (ret > 0)
{
msg->response.status = MM_FAULT_STATUS_OK;
if (ret < ARCH_PAGE_SIZE)
{
memset((char *)msg->buffer_vaddr + ret, 0, ARCH_PAGE_SIZE - ret);
}
}
}
else
{
LOG_E("%s varea %p not a file, vaddr %p", __func__, varea, varea->start);
}
}
void page_write(struct rt_varea *varea, struct rt_aspace_io_msg *msg)
{
rt_ubase_t ret;
struct dfs_file *file = dfs_mem_obj_get_file(varea->mem_obj);
if (file)
{
LOG_I("%s varea: %p", __func__, varea);
LOG_I("varea start: %p size: 0x%x offset: 0x%x attr: 0x%x flag: 0x%x",
varea->start, varea->size, varea->offset, varea->attr, varea->flag);
ret = dfs_aspace_mmap_write(file, varea, msg);
if (ret > 0)
{
msg->response.status = MM_FAULT_STATUS_OK;
if (ret < ARCH_PAGE_SIZE)
{
memset((char *)msg->buffer_vaddr + ret, 0, ARCH_PAGE_SIZE - ret);
}
}
}
else
{
LOG_E("%s varea %p not a file, vaddr %p", __func__, varea, varea->start);
}
}
static rt_err_t unmap_pages(rt_varea_t varea, void *rm_start, void *rm_end)
{
struct dfs_file *file = dfs_mem_obj_get_file(varea->mem_obj);
if (file)
{
LOG_I("%s varea: %p start: %p end: %p", __func__, varea, rm_start, rm_end);
RT_ASSERT(!((rt_ubase_t)rm_start & ARCH_PAGE_MASK));
RT_ASSERT(!((rt_ubase_t)rm_end & ARCH_PAGE_MASK));
while (rm_start != rm_end)
{
dfs_aspace_page_unmap(file, varea, rm_start);
rm_start += ARCH_PAGE_SIZE;
}
return RT_EOK;
}
else
{
LOG_E("%s varea %p not a file, vaddr %p", __func__, varea, varea->start);
}
return -RT_ERROR;
}
rt_err_t on_varea_shrink(struct rt_varea *varea, void *new_vaddr, rt_size_t size)
{
char *varea_start = varea->start;
void *rm_start;
void *rm_end;
LOG_I("%s varea: %p", __func__, varea);
LOG_I("varea start: %p size: 0x%x offset: 0x%x attr: 0x%x flag: 0x%x",
varea->start, varea->size, varea->offset, varea->attr, varea->flag);
LOG_I("new_vaddr: %p size: %p", new_vaddr, size);
if (varea_start == (char *)new_vaddr)
{
rm_start = varea_start + size;
rm_end = varea_start + varea->size;
}
else if (varea_start < (char *)new_vaddr)
{
rm_start = varea_start;
rm_end = new_vaddr;
}
return unmap_pages(varea, rm_start, rm_end);
}
rt_err_t on_varea_expand(struct rt_varea *varea, void *new_vaddr, rt_size_t size)
{
LOG_I("%s varea: %p", __func__, varea);
LOG_I("varea start: %p size: 0x%x offset: 0x%x attr: 0x%x flag: 0x%x",
varea->start, varea->size, varea->offset, varea->attr, varea->flag);
LOG_I("new_vaddr: %p size: %p", new_vaddr, size);
return RT_EOK;
}
rt_err_t on_varea_split(struct rt_varea *existed, void *unmap_start, rt_size_t unmap_len, struct rt_varea *subset)
{
struct dfs_file *file = dfs_mem_obj_get_file(existed->mem_obj);
if (file)
{
LOG_I("%s varea: %p", __func__, existed);
LOG_I("varea start: %p size: 0x%x offset: 0x%x attr: 0x%x flag: 0x%x",
existed->start, existed->size, existed->offset, existed->attr, existed->flag);
LOG_I("unmap_start: %p unmap_len: %p", unmap_start, unmap_len);
if (file->dentry)
{
LOG_I("file: %s%s", file->dentry->mnt->fullpath, file->dentry->pathname);
}
unmap_pages(existed, unmap_start, (char *)unmap_start + unmap_len);
subset->data = existed->data;
rt_atomic_add(&(file->ref_count), 1);
return RT_EOK;
}
else
{
LOG_E("%s varea %p not a file, vaddr %p", __func__, existed, existed->start);
}
return -RT_ERROR;
}
rt_err_t on_varea_merge(struct rt_varea *merge_to, struct rt_varea *merge_from)
{
struct dfs_file *file = dfs_mem_obj_get_file(merge_from->mem_obj);
if (file)
{
LOG_I("%s varea: %p", __func__, merge_from);
LOG_I("varea start: %p size: 0x%x offset: 0x%x attr: 0x%x flag: 0x%x",
merge_from->start, merge_from->size, merge_from->offset, merge_from->attr, merge_from->flag);
if (file->dentry)
{
LOG_I("file: %s%s", file->dentry->mnt->fullpath, file->dentry->pathname);
}
dfs_aspace_unmap(file, merge_from);
merge_from->data = RT_NULL;
rt_atomic_sub(&(file->ref_count), 1);
return RT_EOK;
}
else
{
LOG_E("%s varea %p not a file, vaddr %p", __func__, merge_from, merge_from->start);
}
return -RT_ERROR;
}
static struct rt_mem_obj _mem_obj =
{
.hint_free = hint_free,
.on_page_fault = on_page_fault,
.on_varea_open = on_varea_open,
.on_varea_close = on_varea_close,
.get_name = get_name,
.page_read = page_read,
.page_write = page_write,
.on_varea_shrink = on_varea_shrink,
.on_varea_expand = on_varea_expand,
.on_varea_split = on_varea_split,
.on_varea_merge = on_varea_merge,
};
struct dfs_mem_obj {
struct rt_mem_obj mem_obj;
void *file;
};
static rt_mem_obj_t dfs_get_mem_obj(struct dfs_file *file)
{
rt_mem_obj_t mobj = file->mmap_context;
if (!mobj)
{
struct dfs_mem_obj *dfs_mobj;
dfs_file_lock();
dfs_mobj = rt_malloc(sizeof(*dfs_mobj));
if (dfs_mobj)
{
dfs_mobj->file = file;
mobj = &dfs_mobj->mem_obj;
memcpy(mobj, &_mem_obj, sizeof(*mobj));
file->mmap_context = mobj;
}
dfs_file_unlock();
}
return mobj;
}
static void *dfs_mem_obj_get_file(rt_mem_obj_t mem_obj)
{
struct dfs_mem_obj *dfs_mobj;
dfs_mobj = rt_container_of(mem_obj, struct dfs_mem_obj, mem_obj);
return dfs_mobj->file;
}
int dfs_file_mmap(struct dfs_file *file, struct dfs_mmap2_args *mmap2)
{
int ret = -EINVAL;
LOG_I("mmap2 args addr: %p length: 0x%x prot: %d flags: 0x%x pgoffset: 0x%x",
mmap2->addr, mmap2->length, mmap2->prot, mmap2->flags, mmap2->pgoffset);
if (file && file->vnode && file->vnode->aspace)
{
/* create a va area in user space (lwp) */
rt_varea_t varea = dfs_map_user_varea_data(mmap2, file);
if (varea)
{
mmap2->ret = varea->start;
LOG_I("%s varea: %p", __func__, varea);
LOG_I("varea start: %p size: 0x%x offset: 0x%x attr: 0x%x flag: 0x%x",
varea->start, varea->size, varea->offset, varea->attr, varea->flag);
LOG_I("file: %s%s", file->dentry->mnt->fullpath, file->dentry->pathname);
ret = RT_EOK;
}
else
{
ret = -ENOMEM;
}
}
else if (file->vnode->aspace == RT_NULL)
{
LOG_E("File mapping is not supported, file: %s%s", file->dentry->mnt->fullpath, file->dentry->pathname);
}
return ret;
}
#else
int dfs_file_mmap(struct dfs_file *file, struct dfs_mmap2_args *mmap2)
{
LOG_E("File mapping support is not enabled, file: %s%s", file->dentry->mnt->fullpath, file->dentry->pathname);
LOG_E("mmap2 args addr: %p length: 0x%x prot: %d flags: 0x%x pgoffset: 0x%x",
mmap2->addr, mmap2->length, mmap2->prot, mmap2->flags, mmap2->pgoffset);
return -EPERM;
}
#endif

View File

@ -19,6 +19,10 @@
#include <dfs_mnt.h>
#include "dfs_private.h"
#ifdef RT_USING_PAGECACHE
#include "dfs_pcache.h"
#endif
#define DBG_TAG "DFS.fs"
#define DBG_LVL DBG_INFO
#include <rtdbg.h>
@ -314,6 +318,9 @@ int dfs_umount(const char *specialfile, int flags)
if (!(mnt->flags & MNT_IS_LOCKED) && rt_list_isempty(&mnt->child) && (ref_count == 1 || (flags & MNT_FORCE)))
{
#ifdef RT_USING_PAGECACHE
dfs_pcache_unmount(mnt);
#endif
/* destroy this mount point */
DLOG(msg, "dfs", "mnt", DLOG_MSG, "dfs_mnt_destroy(mnt)");
ret = dfs_mnt_destroy(mnt);
@ -396,6 +403,18 @@ int dfs_mkfs(const char *fs_name, const char *device_name)
if (type->fs_ops->mkfs)
{
ret = type->fs_ops->mkfs(dev_id, type->fs_ops->name);
#ifdef RT_USING_PAGECACHE
if (ret == RT_EOK)
{
struct dfs_mnt *mnt = RT_NULL;
mnt = dfs_mnt_dev_lookup(dev_id);
if (mnt)
{
dfs_pcache_unmount(mnt);
}
}
#endif
}
return ret;

View File

@ -131,6 +131,53 @@ int dfs_mnt_remove(struct dfs_mnt* mnt)
return ret;
}
static struct dfs_mnt *_dfs_mnt_dev_lookup(struct dfs_mnt *mnt, rt_device_t dev_id)
{
struct dfs_mnt *ret = RT_NULL, *iter = RT_NULL;
rt_list_for_each_entry(iter, &mnt->child, sibling)
{
if (iter->dev_id == dev_id)
{
ret = iter;
break;
}
else
{
ret = _dfs_mnt_dev_lookup(iter, dev_id);
if (ret)
{
break;
}
}
}
return ret;
}
struct dfs_mnt *dfs_mnt_dev_lookup(rt_device_t dev_id)
{
struct dfs_mnt *mnt = _root_mnt;
struct dfs_mnt *ret = RT_NULL;
if (mnt)
{
dfs_lock();
if (mnt->dev_id == dev_id)
{
dfs_unlock();
return mnt;
}
ret = _dfs_mnt_dev_lookup(mnt, dev_id);
dfs_unlock();
}
return ret;
}
/**
* this function will return the file system mounted on specified path.
*
@ -139,24 +186,26 @@ int dfs_mnt_remove(struct dfs_mnt* mnt)
* @return the found file system or NULL if no file system mounted on
* specified path
*/
struct dfs_mnt* dfs_mnt_lookup(const char* fullpath)
struct dfs_mnt *dfs_mnt_lookup(const char *fullpath)
{
struct dfs_mnt *mnt = _root_mnt;
struct dfs_mnt *iter = RT_NULL;
if (mnt)
{
int mnt_len = rt_strlen(mnt->fullpath);
dfs_lock();
if (strncmp(mnt->fullpath, fullpath, strlen(fullpath))!= 0)
if ((strncmp(mnt->fullpath, fullpath, mnt_len) == 0) &&
(mnt_len == 1 || (fullpath[mnt_len] == '\0') || (fullpath[mnt_len] == '/')))
{
while (!rt_list_isempty(&mnt->child))
{
rt_list_for_each_entry(iter, &mnt->child, sibling)
{
int mnt_len = rt_strlen(iter->fullpath);
mnt_len = rt_strlen(iter->fullpath);
if ((strncmp(iter->fullpath, fullpath, mnt_len) == 0) &&
((fullpath[mnt_len] == '\0') ||
(fullpath[mnt_len] == '/')))
((fullpath[mnt_len] == '\0') || (fullpath[mnt_len] == '/')))
{
mnt = iter;
break;
@ -166,6 +215,10 @@ struct dfs_mnt* dfs_mnt_lookup(const char* fullpath)
if (mnt != iter) break;
}
}
else
{
mnt = RT_NULL;
}
dfs_unlock();
if (mnt)

File diff suppressed because it is too large Load Diff

View File

@ -1216,53 +1216,46 @@ FINSH_FUNCTION_EXPORT_ALIAS(chdir, cd, change current working directory);
*/
int access(const char *path, int amode)
{
int fd, ret = -1, flags = 0;
struct stat sb;
struct stat st;
if (path == NULL)
{
rt_set_errno(-EBADF);
rt_set_errno(-EINVAL);
return -1;
}
if (stat(path, &st) < 0)
{
rt_set_errno(-ENOENT);
return -1;
}
if (amode == F_OK)
{
if (stat(path, &sb) < 0)
return -1; /* already sets errno */
else
return 0;
}
/* ignore R_OK,W_OK,X_OK condition */
if (dfs_file_isdir(path) == 0)
if ((amode & R_OK) && !(st.st_mode & S_IRUSR))
{
flags |= O_DIRECTORY;
rt_set_errno(-EACCES);
return -1;
}
if (amode & R_OK)
if ((amode & W_OK) && !(st.st_mode & S_IWUSR))
{
flags |= O_RDONLY;
rt_set_errno(-EACCES);
return -1;
}
if (amode & W_OK)
if ((amode & X_OK) && !(st.st_mode & S_IXUSR))
{
flags |= O_WRONLY;
rt_set_errno(-EACCES);
return -1;
}
if (amode & X_OK)
{
flags |= O_EXEC;
}
fd = open(path, flags, 0);
if (fd >= 0)
{
ret = 0;
close(fd);
}
return ret;
return 0;
}
/**
* this function is a POSIX compliant version, which will set current
* working directory.

View File

@ -0,0 +1,494 @@
/*
* Copyright (c) 2006-2023, RT-Thread Development Team
*
* SPDX-License-Identifier: Apache-2.0
*
* Change Logs:
* Date Author Notes
*/
#include <dfs_seq_file.h>
#include <dfs_dentry.h>
#define DBG_TAG "DFS.seq"
#define DBG_LVL DBG_WARNING
#include <rtdbg.h>
#ifndef PAGE_SIZE
#define PAGE_SIZE 4096
#endif
static void dfs_seq_overflow(struct dfs_seq_file *seq)
{
seq->count = seq->size;
}
static void *dfs_seq_alloc(unsigned long size)
{
return rt_calloc(1, size);
}
int dfs_seq_open(struct dfs_file *file, const struct dfs_seq_ops *ops)
{
struct dfs_seq_file *seq;
if (!ops)
{
LOG_E("dfs_seq_open: ops = null, pathname: %s\n", file->dentry->pathname);
return -EINVAL;
}
if (file->data)
{
LOG_W("dfs_seq_open: file->data != null\n");
}
seq = rt_calloc(1, sizeof(struct dfs_seq_file));
if (!seq)
return -ENOMEM;
file->data = seq;
rt_mutex_init(&seq->lock, "dfs_seq", RT_IPC_FLAG_PRIO);
seq->ops = ops;
seq->file = file;
return 0;
}
static int dfs_seq_traverse(struct dfs_seq_file *seq, off_t offset)
{
off_t pos = 0;
int error = 0;
void *p;
seq->index = 0;
seq->count = seq->from = 0;
if (!offset)
return 0;
if (!seq->buf)
{
seq->buf = dfs_seq_alloc(seq->size = PAGE_SIZE);
if (!seq->buf)
return -ENOMEM;
}
p = seq->ops->start(seq, &seq->index);
while (p)
{
error = seq->ops->show(seq, p);
if (error < 0)
break;
if (error)
{
error = 0;
seq->count = 0;
}
if (dfs_seq_is_full(seq))
goto Eoverflow;
p = seq->ops->next(seq, p, &seq->index);
if (pos + seq->count > offset)
{
seq->from = offset - pos;
seq->count -= seq->from;
break;
}
pos += seq->count;
seq->count = 0;
if (pos == offset)
break;
}
seq->ops->stop(seq, p);
return error;
Eoverflow:
seq->ops->stop(seq, p);
rt_free(seq->buf);
seq->count = 0;
seq->buf = dfs_seq_alloc(seq->size <<= 1);
return !seq->buf ? -ENOMEM : -EAGAIN;
}
ssize_t dfs_seq_read(struct dfs_file *file, void *buf, size_t size, off_t *pos)
{
struct dfs_seq_file *seq = file->data;
size_t copied = 0;
size_t n;
void *p;
int err = 0;
if (!size)
return 0;
rt_mutex_take(&seq->lock, RT_WAITING_FOREVER);
/*
* if request is to read from zero offset, reset iterator to first
* record as it might have been already advanced by previous requests
*/
if (*pos == 0)
{
seq->index = 0;
seq->count = 0;
}
/* Don't assume ki_pos is where we left it */
if (*pos != seq->read_pos)
{
while ((err = dfs_seq_traverse(seq, *pos)) == -EAGAIN)
;
if (err)
{
/* With prejudice... */
seq->read_pos = 0;
seq->index = 0;
seq->count = 0;
goto Done;
}
else
{
seq->read_pos = *pos;
}
}
/* grab buffer if we didn't have one */
if (!seq->buf)
{
seq->buf = dfs_seq_alloc(seq->size = PAGE_SIZE);
if (!seq->buf)
goto Enomem;
}
// something left in the buffer - copy it out first
if (seq->count)
{
n = seq->count > size ? size : seq->count;
rt_memcpy((char *)buf + copied, seq->buf + seq->from, n);
size -= n;
seq->count -= n;
seq->from += n;
copied += n;
if (seq->count) // hadn't managed to copy everything
goto Done;
}
// get a non-empty record in the buffer
seq->from = 0;
p = seq->ops->start(seq, &seq->index);
while (p)
{
err = seq->ops->show(seq, p);
if (err < 0) // hard error
break;
if (err) // ->show() says "skip it"
seq->count = 0;
if (!seq->count)
{ // empty record
p = seq->ops->next(seq, p, &seq->index);
continue;
}
if (!dfs_seq_is_full(seq)) // got it
goto Fill;
// need a bigger buffer
seq->ops->stop(seq, p);
rt_free(seq->buf);
seq->count = 0;
seq->buf = dfs_seq_alloc(seq->size <<= 1);
if (!seq->buf)
goto Enomem;
p = seq->ops->start(seq, &seq->index);
}
// EOF or an error
seq->ops->stop(seq, p);
seq->count = 0;
goto Done;
Fill:
// one non-empty record is in the buffer; if they want more,
// try to fit more in, but in any case we need to advance
// the iterator once for every record shown.
while (1)
{
size_t offs = seq->count;
off_t pos = seq->index;
p = seq->ops->next(seq, p, &seq->index);
if (pos == seq->index)
{
LOG_W(".next function %p did not update position index\n", seq->ops->next);
seq->index++;
}
if (!p) // no next record for us
break;
if (seq->count >= size)
break;
err = seq->ops->show(seq, p);
if (err > 0)
{ // ->show() says "skip it"
seq->count = offs;
}
else if (err || dfs_seq_is_full(seq))
{
seq->count = offs;
break;
}
}
seq->ops->stop(seq, p);
n = seq->count > size ? size : seq->count;
rt_memcpy((char *)buf + copied, seq->buf, n);
size -= n;
copied += n;
seq->count -= n;
seq->from = n;
Done:
if (!copied)
{
copied = seq->count ? -EFAULT : err;
}
else
{
*pos += copied;
seq->read_pos += copied;
}
rt_mutex_release(&seq->lock);
return copied;
Enomem:
err = -ENOMEM;
goto Done;
}
ssize_t dfs_seq_lseek(struct dfs_file *file, off_t offset, int whence)
{
struct dfs_seq_file *seq = file->data;
off_t retval = -EINVAL;
rt_mutex_take(&seq->lock, RT_WAITING_FOREVER);
switch (whence)
{
case SEEK_CUR:
offset += file->fpos;
case SEEK_SET:
if (offset < 0)
break;
retval = offset;
if (offset != seq->read_pos)
{
while ((retval = dfs_seq_traverse(seq, offset)) == -EAGAIN);
if (retval)
{
/* with extreme prejudice... */
retval = 0;
seq->read_pos = 0;
seq->index = 0;
seq->count = 0;
}
else
{
seq->read_pos = offset;
retval = offset;
}
}
}
rt_mutex_release(&seq->lock);
return retval;
}
int dfs_seq_release(struct dfs_file *file)
{
struct dfs_seq_file *seq = file->data;
if (seq)
{
rt_mutex_detach(&seq->lock);
if (seq->buf)
{
rt_free(seq->buf);
}
rt_free(seq);
}
return 0;
}
void dfs_seq_vprintf(struct dfs_seq_file *seq, const char *f, va_list args)
{
int len;
if (seq->count < seq->size)
{
len = vsnprintf(seq->buf + seq->count, seq->size - seq->count, f, args);
if (seq->count + len < seq->size)
{
seq->count += len;
return;
}
}
dfs_seq_overflow(seq);
}
void dfs_seq_printf(struct dfs_seq_file *seq, const char *f, ...)
{
va_list args;
va_start(args, f);
dfs_seq_vprintf(seq, f, args);
va_end(args);
}
/**
* write char to buffer
*/
void dfs_seq_putc(struct dfs_seq_file *seq, char c)
{
if (seq->count < seq->size)
{
seq->buf[seq->count++] = c;
}
}
/**
* write string to buffer
*/
void dfs_seq_puts(struct dfs_seq_file *seq, const char *s)
{
int len = strlen(s);
if (seq->count + len >= seq->size)
{
dfs_seq_overflow(seq);
return;
}
rt_memcpy(seq->buf + seq->count, s, len);
seq->count += len;
}
/**
* write arbitrary data to buffer
*/
int dfs_seq_write(struct dfs_seq_file *seq, const void *data, size_t len)
{
if (seq->count + len < seq->size)
{
rt_memcpy(seq->buf + seq->count, data, len);
seq->count += len;
return 0;
}
dfs_seq_overflow(seq);
return -1;
}
/**
* write padding spaces to buffer
*/
void dfs_seq_pad(struct dfs_seq_file *seq, char c)
{
int size = seq->pad_until - seq->count;
if (size > 0)
{
if (size + seq->count > seq->size)
{
dfs_seq_overflow(seq);
return;
}
rt_memset(seq->buf + seq->count, ' ', size);
seq->count += size;
}
if (c)
{
dfs_seq_putc(seq, c);
}
}
#if 1
/* test demo */
static char *txt[4] = {
"text1",
"text2",
"text3",
"text4",
};
static void *seq_test_start(struct dfs_seq_file *seq, off_t *index)
{
off_t i = *index; // seq->index
if (i >= 0 && i < 4)
{
return txt[i];
}
return RT_NULL;
}
static void seq_test_stop(struct dfs_seq_file *seq, void *data)
{
}
static void *seq_test_next(struct dfs_seq_file *seq, void *data, off_t *index)
{
off_t i = *index + 1; // seq->index
*index = i;
if (i >= 0 && i < 4)
{
return txt[i];
}
return RT_NULL;
}
static int seq_test_show(struct dfs_seq_file *seq, void *data)
{
const char *text = (const char *)data;
dfs_seq_setwidth(seq, 20);
dfs_seq_puts(seq, "puts ");
dfs_seq_putc(seq, 'c');
dfs_seq_write(seq, " write", 6);
dfs_seq_printf(seq, " %s", text);
dfs_seq_pad(seq, 0);
return 0;
}
static const struct dfs_seq_ops _test_ops = {
.start = seq_test_start,
.stop = seq_test_stop,
.next = seq_test_next,
.show = seq_test_show,
};
static int dfs_seq_test(int argc, char **argv)
{
struct dfs_file file = {0};
int ret = dfs_seq_open(&file, &_test_ops);
if (ret == 0)
{
char buf[256] = {0};
off_t pos = (argc > 1) ? atoi(argv[1]) : 0;
ssize_t len = (argc > 2) ? atoi(argv[2]) : 255;
if (len > 255)
{
len = 255;
rt_kprintf("buf len is %d, max read is 255\n", 256, len);
}
len = dfs_seq_read(&file, buf, len, &pos);
buf[len] = '\0';
rt_kprintf("show: \"%s\" len: %d\n", buf, len);
dfs_seq_release(&file);
}
return 0;
}
MSH_CMD_EXPORT_ALIAS(dfs_seq_test, seq_test, seq_test[pos][read_len]);
#endif

View File

@ -10,6 +10,9 @@
#include <dfs_file.h>
#include <dfs_mnt.h>
#ifdef RT_USING_PAGECACHE
#include "dfs_pcache.h"
#endif
#define DBG_TAG "DFS.vnode"
#define DBG_LVL DBG_WARNING
@ -58,7 +61,12 @@ int dfs_vnode_destroy(struct dfs_vnode* vnode)
if (rt_atomic_load(&(vnode->ref_count)) == 1)
{
LOG_I("free a vnode: %p", vnode);
#ifdef RT_USING_PAGECACHE
if (vnode->aspace)
{
dfs_aspace_destroy(vnode->aspace);
}
#endif
if (vnode->mnt)
{
DLOG(msg, "vnode", vnode->mnt->fs_ops->name, DLOG_MSG, "fs_ops->free_vnode");
@ -106,7 +114,12 @@ void dfs_vnode_unref(struct dfs_vnode *vnode)
{
rt_atomic_sub(&(vnode->ref_count), 1);
DLOG(note, "vnode", "vnode ref_count=%d", rt_atomic_load(&(vnode->ref_count)));
#ifdef RT_USING_PAGECACHE
if (vnode->aspace)
{
dfs_aspace_destroy(vnode->aspace);
}
#endif
if (rt_atomic_load(&(vnode->ref_count)) == 0)
{
LOG_I("free a vnode: %p", vnode);

View File

@ -2008,12 +2008,22 @@ static int job_control(struct tty_struct *tty)
return __tty_check_change(tty, SIGTTIN);
}
static struct rt_wqueue *_wait_queue_current_get(struct tty_struct *tty)
{
struct rt_lwp *lwp;
lwp = lwp_self();
if (!lwp || !lwp->tty)
lwp = RT_NULL;
return wait_queue_get(lwp, tty);
}
static int n_tty_read(struct dfs_file *fd, void *buf, size_t count)
{
int level = 0;
char *b = (char *)buf;
struct tty_struct *tty = RT_NULL;
struct rt_lwp *lwp = RT_NULL;
struct rt_wqueue *wq = RT_NULL;
int wait_ret = 0;
int retval = 0;
@ -2031,8 +2041,7 @@ static int n_tty_read(struct dfs_file *fd, void *buf, size_t count)
struct n_tty_data *ldata = tty->disc_data;
lwp = (struct rt_lwp *)(rt_thread_self()->lwp);
wq = wait_queue_get(lwp, tty);
wq = _wait_queue_current_get(tty);
while(count)
{
@ -2171,15 +2180,13 @@ static int n_tty_poll(struct dfs_file *fd, struct rt_pollreq *req)
int mask = POLLOUT;
struct tty_struct *tty = RT_NULL;
struct rt_wqueue *wq = RT_NULL;
struct rt_lwp *lwp = RT_NULL;
tty = (struct tty_struct *)fd->vnode->data;
RT_ASSERT(tty != RT_NULL);
RT_ASSERT(tty->init_flag == TTY_INIT_FLAG_INITED);
lwp = (struct rt_lwp *)(rt_thread_self()->lwp);
wq = wait_queue_get(lwp, tty);
wq = _wait_queue_current_get(tty);
rt_poll_add(wq, req);
level = rt_hw_interrupt_disable();

View File

@ -15,6 +15,7 @@
#include <rtthread.h>
#include <tty.h>
#include <tty_ldisc.h>
#include <shell.h>
#if defined(RT_USING_POSIX_DEVIO)
#include <termios.h>
@ -272,9 +273,97 @@ static int tiocsctty(struct tty_struct *tty, int arg)
return 0;
}
static int tiocswinsz(struct tty_struct *tty, struct winsize *p_winsize)
{
rt_kprintf("\x1b[8;%d;%dt", p_winsize->ws_col, p_winsize->ws_row);
return 0;
}
static int tiocgwinsz(struct tty_struct *tty, struct winsize *p_winsize)
{
if(rt_thread_self() != rt_thread_find(FINSH_THREAD_NAME))
{
/* only can be used in tshell thread; otherwise, return default size */
p_winsize->ws_col = 80;
p_winsize->ws_row = 24;
}
else
{
#define _TIO_BUFLEN 20
char _tio_buf[_TIO_BUFLEN];
unsigned char cnt1, cnt2, cnt3, i;
char row_s[4], col_s[4];
char *p;
rt_memset(_tio_buf, 0, _TIO_BUFLEN);
/* send the command to terminal for getting the window size of the terminal */
rt_kprintf("\033[18t");
/* waiting for the response from the terminal */
i = 0;
while(i < _TIO_BUFLEN)
{
_tio_buf[i] = finsh_getchar();
if(_tio_buf[i] != 't')
{
i ++;
}
else
{
break;
}
}
if(i == _TIO_BUFLEN)
{
/* buffer overloaded, and return default size */
p_winsize->ws_col = 80;
p_winsize->ws_row = 24;
return 0;
}
/* interpreting data eg: "\033[8;1;15t" which means row is 1 and col is 15 (unit: size of ONE character) */
rt_memset(row_s,0,4);
rt_memset(col_s,0,4);
cnt1 = 0;
while(cnt1 < _TIO_BUFLEN && _tio_buf[cnt1] != ';')
{
cnt1++;
}
cnt2 = ++cnt1;
while(cnt2 < _TIO_BUFLEN && _tio_buf[cnt2] != ';')
{
cnt2++;
}
p = row_s;
while(cnt1 < cnt2)
{
*p++ = _tio_buf[cnt1++];
}
p = col_s;
cnt2++;
cnt3 = rt_strlen(_tio_buf) - 1;
while(cnt2 < cnt3)
{
*p++ = _tio_buf[cnt2++];
}
/* load the window size date */
p_winsize->ws_col = atoi(col_s);
p_winsize->ws_row = atoi(row_s);
#undef _TIO_BUFLEN
}
p_winsize->ws_xpixel = 0;/* unused */
p_winsize->ws_ypixel = 0;/* unused */
return 0;
}
static int tty_ioctl(struct dfs_file *fd, int cmd, void *args)
{
int ret = 0;
void *p = (void *)args;
struct tty_struct *tty = RT_NULL;
struct tty_struct *real_tty = RT_NULL;
struct tty_ldisc *ld = RT_NULL;
@ -295,6 +384,10 @@ static int tty_ioctl(struct dfs_file *fd, int cmd, void *args)
{
case TIOCSCTTY:
return tiocsctty(real_tty, 1);
case TIOCGWINSZ:
return tiocgwinsz(real_tty, p);
case TIOCSWINSZ:
return tiocswinsz(real_tty, p);
}
ld = tty->ldisc;

View File

@ -14,6 +14,7 @@
#include <stdint.h>
#include <unistd.h>
#include <dfs_file.h>
#include <dfs.h>
#include "poll.h"
#include "eventfd.h"

View File

@ -68,4 +68,20 @@ if RT_USING_LWP
bool "The unix98 PTY debug output"
default n
endif
menuconfig RT_USING_LDSO
bool "LDSO: dynamic load shared objects"
default n
if RT_USING_LDSO
config ELF_DEBUG_ENABLE
bool "Enable ldso debug"
default n
config ELF_LOAD_RANDOMIZE
bool "Enable random load address"
default n
endif
endif

View File

@ -31,23 +31,21 @@ int arch_user_space_init(struct rt_lwp *lwp)
{
size_t *mmu_table;
mmu_table = (size_t *)rt_pages_alloc_ext(0, PAGE_ANY_AVAILABLE);
if (!mmu_table)
mmu_table = rt_hw_mmu_pgtbl_create();
if (mmu_table)
{
return -RT_ENOMEM;
}
lwp->end_heap = USER_HEAP_VADDR;
memset(mmu_table, 0, ARCH_PAGE_SIZE);
rt_hw_cpu_dcache_ops(RT_HW_CACHE_FLUSH, mmu_table, ARCH_PAGE_SIZE);
lwp->aspace = rt_aspace_create(
(void *)USER_VADDR_START, USER_VADDR_TOP - USER_VADDR_START, mmu_table);
if (!lwp->aspace)
{
return -RT_ERROR;
}
}
else
{
return -RT_ENOMEM;
}
return 0;
}

View File

@ -17,10 +17,11 @@
#ifdef ARCH_MM_MMU
#define USER_VADDR_TOP 0x0001000000000000UL
#define USER_HEAP_VEND 0x0000ffffB0000000UL
#define USER_HEAP_VADDR 0x0000ffff80000000UL
#define USER_HEAP_VADDR (0x0000ffff40000000UL)
#define USER_HEAP_VEND USER_STACK_VSTART
#define USER_STACK_VSTART 0x0000ffff70000000UL
#define USER_STACK_VEND USER_HEAP_VADDR
#define USER_STACK_VEND (USER_STACK_VSTART + 0x10000000)
#define USER_ARG_VADDR USER_STACK_VEND
#define LDSO_LOAD_VADDR 0x60000000UL
#define USER_VADDR_START 0x00200000UL
#define USER_LOAD_VADDR USER_VADDR_START

View File

@ -50,10 +50,12 @@
arch_start_umode:
mov sp, x3
mov x4, #(SPSR_Mode(0) | SPSR_A64)
mov x3, x2 ;/* user stack top */
msr daifset, #3
dsb sy
mrs x30, sp_el0
/* user stack top */
msr sp_el0, x2
mov x3, x2
msr spsr_el1, x4
msr elr_el1, x1
eret

View File

@ -23,13 +23,11 @@
#include <lwp_arch.h>
#include <lwp_user_mm.h>
#define KPTE_START (KERNEL_VADDR_START >> ARCH_SECTION_SHIFT)
int arch_user_space_init(struct rt_lwp *lwp)
{
size_t *mmu_table;
mmu_table = (size_t *)rt_pages_alloc(2);
mmu_table = rt_hw_mmu_pgtbl_create();
if (!mmu_table)
{
return -RT_ENOMEM;
@ -37,9 +35,6 @@ int arch_user_space_init(struct rt_lwp *lwp)
lwp->end_heap = USER_HEAP_VADDR;
rt_memcpy(mmu_table + KPTE_START, (size_t *)rt_kernel_space.page_table + KPTE_START, ARCH_PAGE_SIZE);
rt_memset(mmu_table, 0, 3 * ARCH_PAGE_SIZE);
rt_hw_cpu_dcache_ops(RT_HW_CACHE_FLUSH, mmu_table, 4 * ARCH_PAGE_SIZE);
lwp->aspace = rt_aspace_create((void *)USER_VADDR_START, USER_VADDR_TOP - USER_VADDR_START, mmu_table);
if (!lwp->aspace)
@ -87,7 +82,7 @@ void arch_user_space_free(struct rt_lwp *lwp)
rt_aspace_delete(lwp->aspace);
/* must be freed after aspace delete, pgtbl is required for unmap */
rt_pages_free(pgtbl, 2);
rt_hw_mmu_pgtbl_delete(pgtbl);
lwp->aspace = RT_NULL;
}
else

View File

@ -21,7 +21,7 @@
#define USER_STACK_VSTART 0x70000000UL
#define USER_STACK_VEND USER_HEAP_VADDR
#define LDSO_LOAD_VADDR 0x60000000UL
#define USER_VADDR_START 0x00100000UL
#define USER_VADDR_START 0x00010000UL
#define USER_LOAD_VADDR USER_VADDR_START
#ifdef __cplusplus

View File

@ -43,7 +43,12 @@ arch_start_umode:
msr spsr, r9
mov sp, r3
mov r3, r2 ;/* user stack top */
/* set user stack top */
cps #Mode_SYS
mov sp, r2
cps #Mode_SVC
mov r3, r2
/* set data address. */
movs pc, r1

View File

@ -93,17 +93,13 @@ int arch_user_space_init(struct rt_lwp *lwp)
{
rt_ubase_t *mmu_table;
mmu_table = (rt_ubase_t *)rt_pages_alloc_ext(0, PAGE_ANY_AVAILABLE);
mmu_table = rt_hw_mmu_pgtbl_create();
if (!mmu_table)
{
return -RT_ENOMEM;
}
lwp->end_heap = USER_HEAP_VADDR;
rt_memcpy(mmu_table, rt_kernel_space.page_table, ARCH_PAGE_SIZE);
rt_hw_cpu_dcache_ops(RT_HW_CACHE_FLUSH, mmu_table, ARCH_PAGE_SIZE);
lwp->aspace = rt_aspace_create(
(void *)USER_VADDR_START, USER_VADDR_TOP - USER_VADDR_START, mmu_table);
if (!lwp->aspace)
@ -129,7 +125,7 @@ void arch_user_space_free(struct rt_lwp *lwp)
rt_aspace_delete(lwp->aspace);
/* must be freed after aspace delete, pgtbl is required for unmap */
rt_pages_free(pgtbl, 0);
rt_hw_mmu_pgtbl_delete(pgtbl);
lwp->aspace = RT_NULL;
}
else

View File

@ -153,6 +153,9 @@ struct process_aux *lwp_argscopy(struct rt_lwp *lwp, int argc, char **argv, char
int len;
size_t *args_k;
struct process_aux *aux;
size_t prot = PROT_READ | PROT_WRITE;
size_t flags = MAP_FIXED | MAP_PRIVATE;
size_t zero = 0;
for (i = 0; i < argc; i++)
{
@ -179,9 +182,8 @@ struct process_aux *lwp_argscopy(struct rt_lwp *lwp, int argc, char **argv, char
return RT_NULL;
}
/* args = (int *)lwp_map_user(lwp, 0, size); */
args = (int *)lwp_map_user(lwp, (void *)(USER_VADDR_TOP - ARCH_PAGE_SIZE), size, 0);
if (args == RT_NULL)
args = lwp_mmap2(lwp, (void *)(USER_STACK_VEND), size, prot, flags, -1, 0);
if (args == RT_NULL || lwp_data_put(lwp, args, &zero, sizeof(zero)) != sizeof(zero))
{
return RT_NULL;
}
@ -1417,3 +1419,37 @@ void lwp_uthread_ctx_restore(void)
thread = rt_thread_self();
thread->user_ctx.ctx = RT_NULL;
}
void rt_update_process_times(void)
{
struct rt_thread *thread;
#ifdef RT_USING_SMP
struct rt_cpu* pcpu;
pcpu = rt_cpu_self();
#endif
thread = rt_thread_self();
if (!IS_USER_MODE(thread))
{
thread->user_time += 1;
#ifdef RT_USING_SMP
pcpu->cpu_stat.user += 1;
#endif
}
else
{
thread->system_time += 1;
#ifdef RT_USING_SMP
if (thread == pcpu->idle_thread)
{
pcpu->cpu_stat.idle += 1;
}
else
{
pcpu->cpu_stat.system += 1;
}
#endif
}
}

View File

@ -240,7 +240,9 @@ struct __pthread {
}
#endif
#define AUX_ARRAY_ITEMS_NR 6
#ifndef AUX_ARRAY_ITEMS_NR
#define AUX_ARRAY_ITEMS_NR 32
#endif
/* aux key */
#define AT_NULL 0

811
components/lwp/lwp_elf.c Normal file
View File

@ -0,0 +1,811 @@
/*
* Copyright (c) 2006-2023, RT-Thread Development Team
*
* SPDX-License-Identifier: Apache-2.0
*
* Change Logs:
* Date Author Notes
* 2023-08-23 zhangsz first version
*/
#include <rtthread.h>
#ifdef RT_USING_LDSO
#include <dfs_file.h>
#include <unistd.h>
#include <stdio.h>
#include <fcntl.h>
#include <lwp_elf.h>
#include "lwp.h"
#include "lwp_arch.h"
#ifdef ARCH_MM_MMU
#include <lwp_user_mm.h>
#endif
#define DBG_TAG "load.elf"
#ifdef ELF_DEBUG_ENABLE
#define DBG_LVL DBG_LOG
#else
#define DBG_LVL DBG_INFO
#endif
#include <rtdbg.h>
#define ELF_INVALID_FD -1
#define ELF_PHDR_NUM_MAX 128
#define FILE_LENGTH_MAX 0xC0000000
#define MEM_SIZE_MAX 0xC0000000
#define ELF_PATH_MAX 256
#define FLF_PATH_MIN 1
#define ELF_PAGESTART(_v) ((_v) & ~(rt_ubase_t)(ARCH_PAGE_SIZE - 1))
#define ELF_PAGEOFFSET(_v) ((_v) & (ARCH_PAGE_SIZE - 1))
#define ELF_PAGEALIGN(_v) (((_v) + ARCH_PAGE_SIZE - 1) & ~(ARCH_PAGE_SIZE - 1))
#define ELF_EXEC_LOAD_ADDR USER_VADDR_START
#define ELF_INTERP_LOAD_ADDR LDSO_LOAD_VADDR
#define ELF_AUX_ENT(aux, id, val) \
do \
{ \
*aux++ = id; \
*aux++ = val; \
} while (0)
typedef struct
{
int fd;
char *filename;
rt_size_t file_len;
Elf_Ehdr ehdr;
Elf_Phdr *phdr;
rt_ubase_t map_size;
} elf_info_t;
typedef struct
{
struct rt_lwp *lwp;
struct process_aux *aux;
elf_info_t exec_info;
elf_info_t interp_info;
rt_ubase_t load_addr;
rt_ubase_t e_entry;
rt_ubase_t interp_base;
} elf_load_info_t;
static void elf_user_dump(struct rt_lwp *lwp, void *va, size_t len)
{
#ifdef ELF_DEBUG_DUMP
uint8_t *k_va;
int ret;
if (len < 16)
len = 16;
rt_kprintf("\r\n");
rt_kprintf("%s : user va : %p, len : 0x%x(%d)\n", __func__, va, len, len);
k_va = rt_malloc(len);
if (k_va == RT_NULL)
{
rt_kprintf("%s : malloc failed\n", __func__);
return;
}
rt_memset(k_va, 0, len);
ret = lwp_data_get(lwp, k_va, va, len);
if (ret != len)
{
rt_kprintf("%s : lwp_get_from_user failed, ret = %d\n", __func__, ret);
return;
}
rt_kprintf("%s : k_va : %p\n", __func__, k_va);
for (size_t i = 0; i < len; i += 16)
{
rt_kprintf(" %02x %02x %02x %02x %02x %02x %02x %02x ", k_va[i], k_va[i+1], k_va[i+2], k_va[i+3],
k_va[i+4], k_va[i+5], k_va[i+6], k_va[i+7]);
rt_kprintf(" %02x %02x %02x %02x %02x %02x %02x %02x \n", k_va[i+8], k_va[i+9], k_va[i+10], k_va[i+11],
k_va[i+12], k_va[i+13], k_va[i+14], k_va[i+15]);
}
rt_kprintf("\r\n");
rt_free(k_va);
#endif
}
rt_ubase_t elf_random_offset(void)
{
#ifdef ELF_LOAD_RANDOMIZE
return (rt_tick_get() % 65535) * ARCH_PAGE_SIZE;
#else
return ELF_PAGEALIGN(0);
#endif
}
static void *file_mmap(struct rt_lwp *lwp, int fd, rt_ubase_t load_addr,
rt_ubase_t map_size, size_t prot, size_t flags, rt_ubase_t offset)
{
uint8_t *map_va;
map_va = (uint8_t *)lwp_mmap2(lwp, (void *)load_addr, map_size, prot, flags, fd, offset >> ARCH_PAGE_SHIFT);
if (!map_va || (map_va != (uint8_t *)load_addr))
{
LOG_E("%s : lwp map user failed!", __func__);
return RT_NULL;
}
LOG_D(" %s : map va = %p load_addr : %p size : 0x%x", __func__, map_va, load_addr, map_size);
return map_va;
}
static int elf_file_open(const char *filename)
{
int fd = -1;
fd = open(filename, O_BINARY | O_RDONLY, 0);
if (fd < 0)
{
LOG_E("%s : elf file [%s] open failed!", __func__, filename);
}
return fd;
}
static int elf_file_close(int fd)
{
return close(fd);
}
static int elf_file_length(char *filename, rt_size_t *file_len)
{
int ret;
struct stat s = { 0 };
ret = stat(filename, &s);
if (ret != 0)
{
LOG_E("%s : error", __func__);
return -RT_ERROR;
}
*file_len = (rt_size_t)s.st_size;
return RT_EOK;
}
static int elf_file_read(rt_int32_t fd, rt_uint8_t *buffer, size_t size, off_t offset)
{
ssize_t read_len;
off_t pos;
if (size > 0)
{
pos = lseek(fd, offset, SEEK_SET);
if (pos != offset)
{
LOG_E("%s : seek file offset: 0x%x failed", __func__, offset);
return -RT_ERROR;
}
read_len = read(fd, buffer, size);
if (read_len != size)
{
LOG_E("%s : read from offset: 0x%x error", __func__, offset);
return -RT_ERROR;
}
}
return RT_EOK;
}
static rt_int32_t elf_check_ehdr(const Elf_Ehdr *ehdr, rt_uint32_t file_len)
{
if (memcmp(ehdr->e_ident, ELFMAG, SELFMAG) != 0)
{
LOG_E("%s : e_ident error", __func__);
return -RT_ERROR;
}
if ((ehdr->e_type != ET_EXEC) && (ehdr->e_type != ET_DYN))
{
LOG_E("%s : e_type error", __func__);
return -RT_ERROR;
}
if (ehdr->e_machine == EM_NONE)
{
LOG_E("%s : e_machine is EM_NONE", __func__);
return -RT_ERROR;
}
if (ehdr->e_phnum > ELF_PHDR_NUM_MAX)
{
LOG_E("%s : e_phnum error", __func__);
return -RT_ERROR;
}
if (ehdr->e_phoff > file_len)
{
LOG_E("%s : e_phoff error", __func__);
return -RT_ERROR;
}
LOG_D("%s : e_entry : 0x%x", __func__, ehdr->e_entry);
return RT_EOK;
}
static int elf_check_phdr(const Elf_Phdr *phdr)
{
if (phdr->p_filesz > FILE_LENGTH_MAX)
{
LOG_E("%s : phdr p_filesz 0x%x error", __func__, phdr->p_filesz);
return -RT_ERROR;
}
if (phdr->p_offset > FILE_LENGTH_MAX)
{
LOG_E("%s : phdr p_offset 0x%x error", __func__, phdr->p_offset);
return -RT_ERROR;
}
if (phdr->p_memsz > MEM_SIZE_MAX)
{
LOG_E("%s[%d], phdr p_memsz 0x%x error", __func__, phdr->p_memsz);
return -RT_ERROR;
}
LOG_D("%s : phdr p_vaddr : 0x%x", __func__, phdr->p_vaddr);
return RT_EOK;
}
static int elf_load_ehdr(elf_info_t *elf_info)
{
int ret;
ret = elf_file_open(elf_info->filename);
if (ret < 0)
{
LOG_E("%s : elf_file_open %s failed", __func__, elf_info->filename);
return ret;
}
elf_info->fd = ret;
ret = elf_file_length(elf_info->filename, &elf_info->file_len);
if (ret != RT_EOK)
{
return -RT_ERROR;
}
ret = elf_file_read(elf_info->fd, (rt_uint8_t *)&elf_info->ehdr, sizeof(Elf_Ehdr), 0);
if (ret != RT_EOK)
{
LOG_E("%s : elf_file_read failed, ret : %d", __func__, ret);
return -RT_ERROR;
}
ret = elf_check_ehdr(&elf_info->ehdr, elf_info->file_len);
if (ret != RT_EOK)
{
LOG_E("%s : elf_check_ehdr failed, ret : %d", __func__, ret);
return -RT_ERROR;
}
return RT_EOK;
}
static int elf_load_phdr(elf_info_t *elf_info)
{
Elf_Ehdr *ehdr = &elf_info->ehdr;
uint32_t size;
int ret;
if (ehdr->e_phnum < 1)
{
return -RT_ERROR;
}
if (ehdr->e_phentsize != sizeof(Elf_Phdr))
{
return -RT_ERROR;
}
size = sizeof(Elf_Phdr) * ehdr->e_phnum;
if ((ehdr->e_phoff + size) > elf_info->file_len)
{
return -RT_ERROR;
}
elf_info->phdr = rt_malloc(size);
if (elf_info->phdr == RT_NULL)
{
LOG_E("%s : alloc phdr failed", __func__);
return -RT_ENOMEM;
}
ret = elf_file_read(elf_info->fd, (rt_uint8_t *)elf_info->phdr, size, ehdr->e_phoff);
if (ret != RT_EOK)
{
rt_free(elf_info->phdr);
elf_info->phdr = RT_NULL;
LOG_E("%s : elf_file_read failed, ret = %d", __func__, ret);
return -RT_ERROR;
}
return RT_EOK;
}
static int elf_load_interp(elf_load_info_t *load_info)
{
Elf_Phdr *phdr = load_info->exec_info.phdr;
int ret;
int i;
for (i = 0; i < load_info->exec_info.ehdr.e_phnum; ++i, ++phdr)
{
if (phdr->p_type != PT_INTERP)
{
continue;
}
if (elf_check_phdr(phdr) != RT_EOK)
{
return -RT_ERROR;
}
if ((phdr->p_filesz > ELF_PATH_MAX) || (phdr->p_filesz < FLF_PATH_MIN))
{
LOG_E("%s : phdr p_filesz error", __func__, phdr->p_filesz);
return -RT_ERROR;
}
if (phdr->p_offset + phdr->p_filesz > load_info->exec_info.file_len)
{
LOG_E("%s : phdr p_offset error", __func__, phdr->p_offset);
return -RT_ERROR;
}
load_info->interp_info.filename = rt_malloc(phdr->p_filesz);
if (load_info->interp_info.filename == RT_NULL)
{
LOG_E("%s : alloc elf interpreter failed", __func__);
return -RT_ENOMEM;
}
ret = elf_file_read(load_info->exec_info.fd, (rt_uint8_t *)load_info->interp_info.filename,
phdr->p_filesz, phdr->p_offset);
if (ret != RT_EOK)
{
LOG_E("%s : elf_file_read failed, ret = %d", __func__, ret);
ret = -RT_ERROR;
goto error_exit;
}
if (load_info->interp_info.filename[phdr->p_filesz - 1] != '\0')
{
LOG_E("%s : elf interpreter is invalid", __func__);
ret = -RT_ERROR;
goto error_exit;
}
LOG_D("%s : elf interpreter : %s", __func__, load_info->interp_info.filename);
ret = elf_load_ehdr(&load_info->interp_info);
if (ret != RT_EOK)
{
LOG_E("%s : elf_load_ehdr failed, ret = %d", __func__, ret);
goto error_exit;
}
ret = elf_load_phdr(&load_info->interp_info);
if (ret != RT_EOK)
{
LOG_E("%s : elf_load_phdr failed, ret = %d", __func__, ret);
goto error_exit;
}
break;
}
return RT_EOK;
error_exit:
return ret;
}
static int total_mapping_size(elf_info_t *elf_info)
{
int i;
int first_idx = -1;
int last_idx = -1;
for (i = 0; i < elf_info->ehdr.e_phnum; i++)
{
if (elf_info->phdr[i].p_type == PT_LOAD)
{
last_idx = i;
if (first_idx == -1)
first_idx = i;
}
}
if (first_idx == -1)
return -1;
elf_info->map_size = elf_info->phdr[last_idx].p_vaddr + elf_info->phdr[last_idx].p_memsz -
ELF_PAGESTART(elf_info->phdr[first_idx].p_vaddr);
return 0;
}
static rt_ubase_t elf_map(struct rt_lwp *lwp, const Elf_Phdr *elf_phdr, int fd, rt_ubase_t addr, size_t prot, size_t flags, rt_ubase_t map_size)
{
rt_ubase_t map_va = 0;
rt_ubase_t va_offset;
addr = ELF_PAGESTART(addr);
va_offset = elf_phdr->p_offset - ELF_PAGEOFFSET(elf_phdr->p_vaddr);
rt_ubase_t size;
if (map_size != 0)
{
size = map_size;
}
else
{
size = elf_phdr->p_memsz + ELF_PAGEOFFSET(elf_phdr->p_vaddr);
if (size == 0)
{
return addr;
}
}
map_va = (rt_ubase_t)file_mmap(lwp, fd, addr, size, prot, flags, va_offset);
return map_va;
}
static int elf_zero_bss(struct rt_lwp *lwp, int fd, const Elf_Phdr *phdr, rt_ubase_t bss_start,
rt_ubase_t bss_end)
{
lwp_data_set(lwp, (void *)bss_start, 0, bss_end - bss_start);
return RT_EOK;
}
static int elf_file_mmap(elf_load_info_t *load_info, elf_info_t *elf_info, rt_ubase_t *elfload_addr,
rt_uint32_t map_size, rt_ubase_t *load_base)
{
int ret, i;
rt_ubase_t map_va, bss_start, bss_end;
Elf_Ehdr *ehdr = &elf_info->ehdr;
Elf_Phdr *phdr = elf_info->phdr;
const Elf_Phdr *tmp_phdr = phdr;
int fd = elf_info->fd;
rt_ubase_t load_addr;
size_t prot = PROT_READ | PROT_WRITE;
size_t flags = MAP_FIXED | MAP_PRIVATE;
for (i = 0; i < ehdr->e_phnum; ++i, ++tmp_phdr)
{
if (tmp_phdr->p_type != PT_LOAD)
{
continue;
}
if (ehdr->e_type == ET_EXEC)
{
if (elf_check_phdr(tmp_phdr) != RT_EOK)
{
LOG_E("%s : elf_check_phdr failed", __func__);
return -RT_ERROR;
}
}
load_addr = tmp_phdr->p_vaddr + *load_base;
LOG_D("%s : p_vaddr : 0x%x, load_addr : 0x%x", __func__, tmp_phdr->p_vaddr, load_addr);
if ((tmp_phdr->p_vaddr == 0) && (*load_base == 0))
{
flags &= ~MAP_FIXED;
}
map_va = elf_map(load_info->lwp, tmp_phdr, fd, load_addr, prot, flags, map_size);
if (!map_va)
{
LOG_E("%s : elf_map failed", __func__);
return -ENOMEM;
}
map_size = 0;
elf_user_dump(load_info->lwp, (void *)load_addr, 64);
if ((tmp_phdr->p_memsz > tmp_phdr->p_filesz) && (tmp_phdr->p_flags & PF_W))
{
bss_start = load_addr + tmp_phdr->p_filesz;
bss_end = load_addr + tmp_phdr->p_memsz;
ret = elf_zero_bss(load_info->lwp, fd, tmp_phdr, bss_start, bss_end);
if (ret)
{
LOG_E("%s : elf_zero_bss error", __func__);
return ret;
}
}
if (*elfload_addr == 0)
{
*elfload_addr = map_va + ELF_PAGEOFFSET(tmp_phdr->p_vaddr);
LOG_D("%s elf_load_addr : %p, vAddr : %p, load_base : %p, map_va : %p", __func__,
*elfload_addr, tmp_phdr->p_vaddr, *load_base, map_va);
}
if ((*load_base == 0) && (ehdr->e_type == ET_DYN))
{
*load_base = map_va;
}
}
return RT_EOK;
}
static int load_elf_interp(elf_load_info_t *load_info, rt_ubase_t *interp_base)
{
int ret;
rt_ubase_t load_base = ELF_INTERP_LOAD_ADDR + elf_random_offset();
ret = total_mapping_size(&load_info->interp_info);
if (ret)
{
LOG_E("%s : total_mapping_size failed", __func__);
return -RT_ERROR;
}
LOG_D("%s : total_mapping_size 0x%x", __func__, load_info->interp_info.map_size);
return elf_file_mmap(load_info, &load_info->interp_info, interp_base,
load_info->interp_info.map_size, &load_base);
}
static int elf_aux_fill(elf_load_info_t *load_info)
{
uint8_t *random;
struct process_aux *aux = load_info->aux;
elf_addr_t *aux_info;
uint32_t random_value = rt_tick_get();
size_t prot = PROT_READ | PROT_WRITE;
size_t flags = MAP_PRIVATE;
void *va;
if (!aux)
{
LOG_E("%s : aux is null", __func__);
return -1;
}
aux_info = (elf_addr_t *)aux->item;
ELF_AUX_ENT(aux_info, AT_PAGESZ, ARCH_PAGE_SIZE);
va = lwp_mmap2(load_info->lwp, (void *)(USER_VADDR_TOP - ARCH_PAGE_SIZE * 2), ARCH_PAGE_SIZE, prot, flags, -1, 0);
if (!va)
{
LOG_E("lwp map user failed!");
return -RT_ERROR;
}
random = (uint8_t *)(USER_VADDR_TOP - ARCH_PAGE_SIZE - sizeof(char[16]));
lwp_data_put(load_info->lwp, random, &random_value, sizeof(random_value));
ELF_AUX_ENT(aux_info, AT_RANDOM, (size_t)random);
ELF_AUX_ENT(aux_info, AT_PHDR, (size_t)load_info->load_addr + load_info->exec_info.ehdr.e_phoff);
ELF_AUX_ENT(aux_info, AT_PHNUM, (size_t)load_info->exec_info.ehdr.e_phnum);
ELF_AUX_ENT(aux_info, AT_PHENT, sizeof(Elf_Phdr));
ELF_AUX_ENT(aux_info, AT_BASE, load_info->interp_base);
ELF_AUX_ENT(aux_info, AT_FLAGS, 0);
ELF_AUX_ENT(aux_info, AT_ENTRY, load_info->exec_info.ehdr.e_entry);
ELF_AUX_ENT(aux_info, AT_UID, 0);
ELF_AUX_ENT(aux_info, AT_EUID, 0);
ELF_AUX_ENT(aux_info, AT_GID, 0);
ELF_AUX_ENT(aux_info, AT_EGID, 0);
ELF_AUX_ENT(aux_info, AT_HWCAP, 0);
ELF_AUX_ENT(aux_info, AT_CLKTCK, 0);
ELF_AUX_ENT(aux_info, AT_SECURE, 0);
#ifdef ARCH_MM_MMU
rt_hw_cpu_dcache_ops(RT_HW_CACHE_FLUSH, aux, sizeof(*aux));
#endif
return 0;
}
static int elf_load_segment(elf_load_info_t *load_info)
{
int ret;
rt_ubase_t app_load_base = 0;
load_info->load_addr = 0;
load_info->interp_base = 0;
load_info->exec_info.map_size = 0;
if (load_info->exec_info.ehdr.e_type == ET_DYN)
{
ret = total_mapping_size(&load_info->exec_info);
if (ret)
{
LOG_E("%s : total_mapping_size failed", __func__);
return -RT_ERROR;
}
LOG_D("%s : map_size : 0x%x", __func__, load_info->exec_info.map_size);
app_load_base = ELF_EXEC_LOAD_ADDR + elf_random_offset();
}
ret = elf_file_mmap(load_info, &load_info->exec_info, &load_info->load_addr,
load_info->exec_info.map_size, &app_load_base);
elf_file_close(load_info->exec_info.fd);
if (ret != RT_EOK)
{
LOG_W("%s : elf_file_close exec failed", __func__);
}
load_info->exec_info.fd = ELF_INVALID_FD;
if (load_info->interp_info.fd != ELF_INVALID_FD)
{
ret = load_elf_interp(load_info, &load_info->interp_base);
if (ret)
{
LOG_E("%s : load_elf_interp failed, ret = %d", __func__, ret);
return ret;
}
elf_file_close(load_info->interp_info.fd);
if (ret != RT_EOK)
{
LOG_W("%s : elf_file_close interp failed, ret = %d", __func__, ret);
}
load_info->interp_info.fd = ELF_INVALID_FD;
load_info->e_entry = load_info->interp_info.ehdr.e_entry + load_info->interp_base;
load_info->exec_info.ehdr.e_entry = load_info->exec_info.ehdr.e_entry + app_load_base;
}
else
{
load_info->e_entry = load_info->exec_info.ehdr.e_entry;
}
load_info->lwp->text_entry = (void *)load_info->e_entry;
LOG_D("%s : lwp->text_entry : %p loadaddr : %p", __func__, load_info->lwp->text_entry, app_load_base);
elf_user_dump(load_info->lwp, load_info->lwp->text_entry, 64);
ret = elf_aux_fill(load_info);
if (ret)
{
LOG_E("%s : elf_aux_fill failed", __func__);
return ret;
}
return RT_EOK;
}
static void elf_load_deinit(elf_load_info_t *load_info)
{
if (load_info->exec_info.fd != ELF_INVALID_FD)
{
elf_file_close(load_info->exec_info.fd);
}
if (load_info->interp_info.fd != ELF_INVALID_FD)
{
elf_file_close(load_info->interp_info.fd);
}
if (load_info->exec_info.phdr != RT_NULL)
{
rt_free(load_info->exec_info.phdr);
}
if (load_info->exec_info.filename != RT_NULL)
{
rt_free(load_info->exec_info.filename);
}
if (load_info->interp_info.phdr != RT_NULL)
{
rt_free(load_info->interp_info.phdr);
}
if (load_info->interp_info.filename != RT_NULL)
{
rt_free(load_info->interp_info.filename);
}
}
static int elf_load_app(elf_info_t *exec_info)
{
int ret;
ret = elf_load_ehdr(exec_info);
if (ret != RT_EOK)
{
return ret;
}
ret = elf_load_phdr(exec_info);
if (ret != RT_EOK)
{
return ret;
}
return ret;
}
static int elf_file_load(elf_load_info_t *load_info)
{
int ret;
ret = elf_load_app(&load_info->exec_info);
if (ret != RT_EOK)
{
goto OUT;
}
ret = elf_load_interp(load_info);
if (ret != RT_EOK)
{
goto OUT;
}
ret = elf_load_segment(load_info);
if (ret != RT_EOK)
{
goto OUT;
}
OUT:
elf_load_deinit(load_info);
return ret;
}
int lwp_load(const char *filename, struct rt_lwp *lwp, uint8_t *load_addr, size_t addr_size,
struct process_aux *aux)
{
elf_load_info_t load_info = { 0 };
int len;
int ret;
if (filename == RT_NULL)
{
LOG_E("%s : file is NULL", __func__);
return -RT_ERROR;
}
len = rt_strlen(filename);
if (len < FLF_PATH_MIN || len > ELF_PATH_MAX)
{
LOG_E("%s : file length (%d) invalid", __func__, len);
return -RT_ERROR;
}
load_info.exec_info.filename = rt_malloc(len + 1);
if (!load_info.exec_info.filename)
{
LOG_E("%s : alloc filename failed", __func__, len);
return -RT_ERROR;
}
else
{
rt_memset(load_info.exec_info.filename, 0, len + 1);
rt_strncpy(load_info.exec_info.filename, filename, len);
}
load_info.lwp = lwp;
load_info.aux = aux;
load_info.exec_info.fd = ELF_INVALID_FD;
load_info.interp_info.fd = ELF_INVALID_FD;
load_info.load_addr = (rt_ubase_t)load_addr;
/* copy file name to process name */
rt_strncpy(lwp->cmd, filename, RT_NAME_MAX);
ret = elf_file_load(&load_info);
if (ret != RT_EOK)
{
LOG_E("%s : elf_file_load error, ret : %d", __func__, ret);
return ret;
}
return RT_EOK;
}
#endif

View File

@ -427,7 +427,6 @@ static int _ipc_msg_fd_new(void *file)
#endif
return fd;
}

View File

@ -700,7 +700,7 @@ pid_t waitpid(pid_t pid, int *status, int options)
/* delete from sibling list of its parent */
struct rt_lwp **lwp_node;
*status = lwp->lwp_ret;
lwp_data_put(this_lwp, status, &lwp->lwp_ret, sizeof(*status));
lwp_node = &this_lwp->first_child;
while (*lwp_node != lwp)
{

View File

@ -156,7 +156,6 @@ static int _lwp_shmget(size_t key, size_t size, int create)
p->mem_obj.on_varea_open = on_shm_varea_open;
p->mem_obj.on_varea_close = on_shm_varea_close;
p->mem_obj.hint_free = NULL;
p->mem_obj.on_page_offload = NULL;
/* then insert it into the balancing binary tree */
node_key = (struct lwp_avl_struct *)rt_malloc(sizeof(struct lwp_avl_struct) * 2);

View File

@ -1305,12 +1305,41 @@ rt_base_t sys_brk(void *addr)
void *sys_mmap2(void *addr, size_t length, int prot,
int flags, int fd, size_t pgoffset)
{
return lwp_mmap2(addr, length, prot, flags, fd, pgoffset);
sysret_t rc = 0;
long offset = 0;
/* aligned for user addr */
if ((rt_base_t)addr & ARCH_PAGE_MASK)
{
if (flags & MAP_FIXED)
rc = -EINVAL;
else
{
offset = (char *)addr - (char *)RT_ALIGN_DOWN((rt_base_t)addr, ARCH_PAGE_SIZE);
length += offset;
addr = (void *)RT_ALIGN_DOWN((rt_base_t)addr, ARCH_PAGE_SIZE);
}
}
if (rc == 0)
{
/* fix parameter passing (both along have same effect) */
if (fd == -1 || flags & MAP_ANONYMOUS)
{
fd = -1;
/* MAP_SHARED has no effect and treated as nothing */
flags &= ~MAP_SHARED;
flags |= MAP_PRIVATE | MAP_ANONYMOUS;
}
rc = (sysret_t)lwp_mmap2(lwp_self(), addr, length, prot, flags, fd, pgoffset);
}
return (char *)rc + offset;
}
sysret_t sys_munmap(void *addr, size_t length)
{
return lwp_munmap(addr);
return lwp_munmap(lwp_self(), addr, length);
}
void *sys_mremap(void *old_address, size_t old_size,
@ -1999,17 +2028,6 @@ rt_weak long sys_clone(void *arg[])
return _sys_clone(arg);
}
int lwp_dup_user(rt_varea_t varea, void *arg);
static int _copy_process(struct rt_lwp *dest_lwp, struct rt_lwp *src_lwp)
{
int err;
dest_lwp->lwp_obj->source = src_lwp->aspace;
err = rt_aspace_traversal(src_lwp->aspace, lwp_dup_user, dest_lwp);
dest_lwp->lwp_obj->source = NULL;
return err;
}
static void lwp_struct_copy(struct rt_lwp *dst, struct rt_lwp *src)
{
#ifdef ARCH_MM_MMU
@ -2106,8 +2124,8 @@ sysret_t _sys_fork(void)
self_lwp = lwp_self();
/* copy process */
if (_copy_process(lwp, self_lwp) != 0)
/* copy address space of process from this proc to forked one */
if (lwp_fork_aspace(lwp, self_lwp) != 0)
{
SET_ERRNO(ENOMEM);
goto fail;
@ -4222,13 +4240,27 @@ sysret_t sys_getaddrinfo(const char *nodename,
SET_ERRNO(EFAULT);
goto exit;
}
#endif
k_nodename = (char *)kmem_get(len + 1);
if (!k_nodename)
{
SET_ERRNO(ENOMEM);
goto exit;
}
if (lwp_get_from_user(k_nodename, (void *)nodename, len + 1) != len + 1)
{
SET_ERRNO(EFAULT);
goto exit;
}
#else
k_nodename = rt_strdup(nodename);
if (!k_nodename)
{
SET_ERRNO(ENOMEM);
goto exit;
}
#endif
}
if (servname)
{
@ -4239,13 +4271,27 @@ sysret_t sys_getaddrinfo(const char *nodename,
SET_ERRNO(EFAULT);
goto exit;
}
#endif
k_servname = (char *)kmem_get(len + 1);
if (!k_servname)
{
SET_ERRNO(ENOMEM);
goto exit;
}
if (lwp_get_from_user(k_servname, (void *)servname, len + 1) < 0)
{
SET_ERRNO(EFAULT);
goto exit;
}
#else
k_servname = rt_strdup(servname);
if (!k_servname)
{
SET_ERRNO(ENOMEM);
goto exit;
}
#endif
}
if (hints)
@ -4300,15 +4346,28 @@ exit:
{
ret = GET_ERRNO();
}
#ifdef ARCH_MM_MMU
if (k_nodename)
{
kmem_put(k_nodename);
}
#else
if (k_nodename)
{
rt_free(k_nodename);
}
#endif
#ifdef ARCH_MM_MMU
if (k_servname)
{
kmem_put(k_servname);
}
#else
if (k_servname)
{
rt_free(k_servname);
}
#endif
if (k_hints)
{
rt_free(k_hints);
@ -4324,7 +4383,7 @@ sysret_t sys_gethostbyname2_r(const char *name, int af, struct hostent *ret,
{
int ret_val = -1;
int sal_ret = -1 , sal_err = -1;
struct hostent sal_he;
struct hostent sal_he, sal_tmp;
struct hostent *sal_result = NULL;
char *sal_buf = NULL;
char *k_name = NULL;
@ -4354,22 +4413,35 @@ sysret_t sys_gethostbyname2_r(const char *name, int af, struct hostent *ret,
SET_ERRNO(EFAULT);
goto __exit;
}
#endif
*result = ret;
sal_buf = (char *)malloc(HOSTENT_BUFSZ);
if (sal_buf == NULL)
k_name = (char *)kmem_get(len + 1);
if (!k_name)
{
SET_ERRNO(ENOMEM);
goto __exit;
}
if (lwp_get_from_user(k_name, (void *)name, len + 1) < 0)
{
SET_ERRNO(EFAULT);
goto __exit;
}
#else
k_name = rt_strdup(name);
if (k_name == NULL)
{
SET_ERRNO(ENOMEM);
goto __exit;
}
#endif
*result = ret;
sal_buf = (char *)malloc(HOSTENT_BUFSZ);
if (sal_buf == NULL)
{
SET_ERRNO(ENOMEM);
goto __exit;
}
/* get host by name in SAL */
sal_ret = sal_gethostbyname_r(k_name, &sal_he, sal_buf, HOSTENT_BUFSZ, &sal_result, &sal_err);
@ -4386,6 +4458,28 @@ sysret_t sys_gethostbyname2_r(const char *name, int af, struct hostent *ret,
}
cnt = index + 1;
#ifdef ARCH_MM_MMU
/* update user space hostent */
lwp_put_to_user(buf, k_name, buflen - (ptr - buf));
lwp_memcpy(&sal_tmp, &sal_he, sizeof(sal_he));
sal_tmp.h_name = ptr;
ptr += rt_strlen(k_name);
sal_tmp.h_addr_list = (char**)ptr;
ptr += cnt * sizeof(char *);
index = 0;
while (sal_he.h_addr_list[index] != NULL)
{
sal_tmp.h_addr_list[index] = ptr;
lwp_memcpy(ptr, sal_he.h_addr_list[index], sal_he.h_length);
ptr += sal_he.h_length;
index++;
}
sal_tmp.h_addr_list[index] = NULL;
lwp_put_to_user(ret, &sal_tmp, sizeof(sal_tmp));
#else
/* update user space hostent */
ret->h_addrtype = sal_he.h_addrtype;
ret->h_length = sal_he.h_length;
@ -4407,9 +4501,9 @@ sysret_t sys_gethostbyname2_r(const char *name, int af, struct hostent *ret,
index++;
}
ret->h_addr_list[index] = NULL;
}
#endif
ret_val = 0;
}
__exit:
if (ret_val < 0)
@ -4422,10 +4516,17 @@ __exit:
{
free(sal_buf);
}
#ifdef ARCH_MM_MMU
if (k_name)
{
kmem_put(k_name);
}
#else
if (k_name)
{
free(k_name);
}
#endif
return ret_val;
}

View File

@ -11,7 +11,10 @@
* 2021-02-19 lizhirui add riscv64 support for lwp_user_accessable and lwp_get_from_user
* 2021-06-07 lizhirui modify user space bound check
* 2022-12-25 wangxiaoyao adapt to new mm
* 2023-08-12 Shell Fix parameter passing of lwp_mmap()/lwp_munmap()
* 2023-08-29 Shell Add API accessible()/data_get()/data_set()/data_put()
* 2023-09-13 Shell Add lwp_memcpy and support run-time choice of memcpy base on memory attr
* 2023-09-19 Shell add lwp_user_memory_remap_to_kernel
*/
#include <rtthread.h>
@ -36,28 +39,104 @@
#include "libc_musl.h"
#endif
#define DBG_TAG "LwP"
#define DBG_LVL DBG_LOG
#define DBG_TAG "LwP.mman"
#define DBG_LVL DBG_INFO
#include <rtdbg.h>
#include <stdlib.h>
#define STACK_OBJ _null_object
static void _init_lwp_objs(struct rt_lwp_objs *lwp_objs, rt_aspace_t aspace);
static const char *_null_get_name(rt_varea_t varea)
{
return "null";
}
static void _null_page_fault(struct rt_varea *varea,
struct rt_aspace_fault_msg *msg)
{
static void *null_page;
if (!null_page)
{
null_page = rt_pages_alloc_ext(0, PAGE_ANY_AVAILABLE);
if (null_page)
memset(null_page, 0, ARCH_PAGE_SIZE);
else
return;
}
msg->response.status = MM_FAULT_STATUS_OK;
msg->response.size = ARCH_PAGE_SIZE;
msg->response.vaddr = null_page;
}
static rt_err_t _null_shrink(rt_varea_t varea, void *new_start, rt_size_t size)
{
return RT_EOK;
}
static rt_err_t _null_split(struct rt_varea *existed, void *unmap_start, rt_size_t unmap_len, struct rt_varea *subset)
{
return RT_EOK;
}
static rt_err_t _null_expand(struct rt_varea *varea, void *new_vaddr, rt_size_t size)
{
return RT_EOK;
}
static void _null_page_read(struct rt_varea *varea, struct rt_aspace_io_msg *msg)
{
void *dest = msg->buffer_vaddr;
memset(dest, 0, ARCH_PAGE_SIZE);
msg->response.status = MM_FAULT_STATUS_OK;
return ;
}
static void _null_page_write(struct rt_varea *varea, struct rt_aspace_io_msg *msg)
{
/* write operation is not allowed */
msg->response.status = MM_FAULT_STATUS_UNRECOVERABLE;
return ;
}
static struct rt_mem_obj _null_object = {
.get_name = _null_get_name,
.hint_free = RT_NULL,
.on_page_fault = _null_page_fault,
.page_read = _null_page_read,
.page_write = _null_page_write,
.on_varea_expand = _null_expand,
.on_varea_shrink = _null_shrink,
.on_varea_split = _null_split,
};
int lwp_user_space_init(struct rt_lwp *lwp, rt_bool_t is_fork)
{
void *stk_addr;
int err = -RT_ENOMEM;
const size_t flags = MMF_MAP_PRIVATE;
lwp->lwp_obj = rt_malloc(sizeof(struct rt_lwp_objs));
if (lwp->lwp_obj)
{
_init_lwp_objs(lwp->lwp_obj, lwp->aspace);
err = arch_user_space_init(lwp);
if (!is_fork && err == RT_EOK)
if (err == RT_EOK)
{
void *addr = (void *)USER_STACK_VSTART;
err = rt_aspace_map(lwp->aspace, &addr,
_init_lwp_objs(lwp->lwp_obj, lwp->aspace);
if (!is_fork)
{
stk_addr = (void *)USER_STACK_VSTART;
err = rt_aspace_map(lwp->aspace, &stk_addr,
USER_STACK_VEND - USER_STACK_VSTART,
MMU_MAP_U_RWCB, 0, &lwp->lwp_obj->mem_obj, 0);
MMU_MAP_U_RWCB, flags, &STACK_OBJ, 0);
}
}
}
@ -91,7 +170,7 @@ void lwp_unmap_user_space(struct rt_lwp *lwp)
rt_free(lwp->lwp_obj);
}
static const char *user_get_name(rt_varea_t varea)
static const char *_user_get_name(rt_varea_t varea)
{
char *name;
if (varea->flag & MMF_TEXT)
@ -184,12 +263,9 @@ static void _init_lwp_objs(struct rt_lwp_objs *lwp_objs, rt_aspace_t aspace)
* provide identical memory. This is implemented by lwp_objs->source.
*/
lwp_objs->source = NULL;
lwp_objs->mem_obj.get_name = user_get_name;
lwp_objs->mem_obj.hint_free = NULL;
memcpy(&lwp_objs->mem_obj, &rt_mm_dummy_mapper, sizeof(struct rt_mem_obj));
lwp_objs->mem_obj.get_name = _user_get_name;
lwp_objs->mem_obj.on_page_fault = _user_do_page_fault;
lwp_objs->mem_obj.on_page_offload = rt_mm_dummy_mapper.on_page_offload;
lwp_objs->mem_obj.on_varea_open = rt_mm_dummy_mapper.on_varea_open;
lwp_objs->mem_obj.on_varea_close = rt_mm_dummy_mapper.on_varea_close;
}
}
@ -198,14 +274,14 @@ static void *_lwp_map_user(struct rt_lwp *lwp, void *map_va, size_t map_size,
{
void *va = map_va;
int ret = 0;
size_t flags = MMF_PREFETCH;
rt_size_t flags = MMF_PREFETCH;
if (text)
flags |= MMF_TEXT;
if (va != RT_NULL)
flags |= MMF_MAP_FIXED;
rt_mem_obj_t mem_obj = &lwp->lwp_obj->mem_obj;
ret = rt_aspace_map(lwp->aspace, &va, map_size, MMU_MAP_U_RWCB, flags,
mem_obj, 0);
ret = rt_aspace_map_private(lwp->aspace, &va, map_size, MMU_MAP_U_RWCB, flags);
if (ret != RT_EOK)
{
va = RT_NULL;
@ -223,98 +299,17 @@ int lwp_unmap_user(struct rt_lwp *lwp, void *va)
return err;
}
static void _dup_varea(rt_varea_t varea, struct rt_lwp *src_lwp,
rt_aspace_t dst)
{
char *vaddr = varea->start;
char *vend = vaddr + varea->size;
if (vaddr < (char *)USER_STACK_VSTART || vaddr >= (char *)USER_STACK_VEND)
{
while (vaddr != vend)
{
void *paddr;
paddr = lwp_v2p(src_lwp, vaddr);
if (paddr != ARCH_MAP_FAILED)
{
rt_aspace_load_page(dst, vaddr, 1);
}
vaddr += ARCH_PAGE_SIZE;
}
}
else
{
while (vaddr != vend)
{
vend -= ARCH_PAGE_SIZE;
void *paddr;
paddr = lwp_v2p(src_lwp, vend);
if (paddr != ARCH_MAP_FAILED)
{
rt_aspace_load_page(dst, vend, 1);
}
else
{
break;
}
}
}
}
int lwp_dup_user(rt_varea_t varea, void *arg)
/** fork the src_lwp->aspace in current */
int lwp_fork_aspace(struct rt_lwp *dest_lwp, struct rt_lwp *src_lwp)
{
int err;
struct rt_lwp *self_lwp = lwp_self();
struct rt_lwp *new_lwp = (struct rt_lwp *)arg;
void *pa = RT_NULL;
void *va = RT_NULL;
rt_mem_obj_t mem_obj = varea->mem_obj;
if (!mem_obj)
err = rt_aspace_fork(&src_lwp->aspace, &dest_lwp->aspace);
if (!err)
{
/* duplicate a physical mapping */
pa = lwp_v2p(self_lwp, (void *)varea->start);
RT_ASSERT(pa != ARCH_MAP_FAILED);
struct rt_mm_va_hint hint = {.flags = MMF_MAP_FIXED,
.limit_range_size = new_lwp->aspace->size,
.limit_start = new_lwp->aspace->start,
.prefer = varea->start,
.map_size = varea->size};
err = rt_aspace_map_phy(new_lwp->aspace, &hint, varea->attr,
MM_PA_TO_OFF(pa), &va);
if (err != RT_EOK)
{
LOG_W("%s: aspace map failed at %p with size %p", __func__,
varea->start, varea->size);
/* do a explicit aspace switch if the page table is changed */
lwp_aspace_switch(rt_thread_self());
}
}
else
{
/* duplicate a mem_obj backing mapping */
va = varea->start;
err = rt_aspace_map(new_lwp->aspace, &va, varea->size, varea->attr,
varea->flag, &new_lwp->lwp_obj->mem_obj,
varea->offset);
if (err != RT_EOK)
{
LOG_W("%s: aspace map failed at %p with size %p", __func__,
varea->start, varea->size);
}
else
{
/* loading page frames for !MMF_PREFETCH varea */
if (!(varea->flag & MMF_PREFETCH))
{
_dup_varea(varea, self_lwp, new_lwp->aspace);
}
}
}
if (va != (void *)varea->start)
{
return -1;
}
return 0;
return err;
}
int lwp_unmap_user_phy(struct rt_lwp *lwp, void *va)
@ -364,6 +359,10 @@ static inline size_t _flags_to_attr(size_t flags)
static inline mm_flag_t _flags_to_aspace_flag(size_t flags)
{
mm_flag_t mm_flag = 0;
if (flags & LWP_MAP_FLAG_MAP_FIXED)
mm_flag |= MMF_MAP_FIXED;
if (flags & LWP_MAP_FLAG_PREFETCH)
mm_flag |= MMF_PREFETCH;
return mm_flag;
}
@ -372,26 +371,17 @@ static rt_varea_t _lwp_map_user_varea(struct rt_lwp *lwp, void *map_va, size_t m
{
void *va = map_va;
int ret = 0;
rt_mem_obj_t mem_obj = &lwp->lwp_obj->mem_obj;
rt_varea_t varea;
rt_varea_t varea = RT_NULL;
mm_flag_t mm_flags;
size_t attr;
varea = rt_malloc(sizeof(*varea));
if (varea)
{
attr = _flags_to_attr(flags);
mm_flags = _flags_to_aspace_flag(flags);
ret = rt_aspace_map_static(lwp->aspace, varea, &va, map_size,
attr, mm_flags, mem_obj, 0);
/* let aspace handle the free of varea */
varea->flag &= ~MMF_STATIC_ALLOC;
/* don't apply auto fetch on this */
varea->data = (void *)NO_AUTO_FETCH;
}
else
ret = rt_aspace_map_private(lwp->aspace, &va, map_size,
attr, mm_flags);
if (ret == RT_EOK)
{
ret = -RT_ENOMEM;
varea = rt_aspace_query(lwp->aspace, va);
}
if (ret != RT_EOK)
@ -405,7 +395,6 @@ static rt_varea_t _lwp_map_user_varea(struct rt_lwp *lwp, void *map_va, size_t m
static rt_varea_t _map_user_varea_ext(struct rt_lwp *lwp, void *map_va, size_t map_size, size_t flags)
{
rt_varea_t varea = RT_NULL;
size_t offset = 0;
if (!map_size)
@ -417,9 +406,7 @@ static rt_varea_t _map_user_varea_ext(struct rt_lwp *lwp, void *map_va, size_t m
map_size &= ~ARCH_PAGE_MASK;
map_va = (void *)((size_t)map_va & ~ARCH_PAGE_MASK);
varea = _lwp_map_user_varea(lwp, map_va, map_size, flags);
return varea;
return _lwp_map_user_varea(lwp, map_va, map_size, flags);
}
rt_varea_t lwp_map_user_varea_ext(struct rt_lwp *lwp, void *map_va, size_t map_size, size_t flags)
@ -456,11 +443,14 @@ void *lwp_map_user_phy(struct rt_lwp *lwp, void *map_va, void *map_pa,
map_size &= ~ARCH_PAGE_MASK;
map_pa = (void *)((size_t)map_pa & ~ARCH_PAGE_MASK);
struct rt_mm_va_hint hint = {.flags = MMF_MAP_FIXED,
struct rt_mm_va_hint hint = {.flags = 0,
.limit_range_size = lwp->aspace->size,
.limit_start = lwp->aspace->start,
.prefer = map_va,
.map_size = map_size};
if (map_va != RT_NULL)
hint.flags |= MMF_MAP_FIXED;
rt_size_t attr = cached ? MMU_MAP_U_RWCB : MMU_MAP_U_RW;
err =
@ -481,56 +471,160 @@ void *lwp_map_user_phy(struct rt_lwp *lwp, void *map_va, void *map_pa,
rt_base_t lwp_brk(void *addr)
{
rt_base_t ret = -1;
rt_varea_t varea = RT_NULL;
struct rt_lwp *lwp = RT_NULL;
rt_mm_lock();
lwp = rt_thread_self()->lwp;
if ((size_t)addr <= lwp->end_heap)
{
ret = (rt_base_t)lwp->end_heap;
}
else
{
size_t size = 0;
void *va = RT_NULL;
if ((size_t)addr <= USER_HEAP_VEND)
lwp = lwp_self();
if ((size_t)addr == RT_NULL)
{
size = (((size_t)addr - lwp->end_heap) + ARCH_PAGE_SIZE - 1) &
~ARCH_PAGE_MASK;
va = lwp_map_user(lwp, (void *)lwp->end_heap, size, 0);
addr = (char *)lwp->end_heap + 1;
}
if (va)
if ((size_t)addr <= lwp->end_heap && (size_t)addr > USER_HEAP_VADDR)
{
lwp->end_heap += size;
ret = (size_t)addr;
}
else if ((size_t)addr <= USER_HEAP_VEND)
{
size = RT_ALIGN((size_t)addr - lwp->end_heap, ARCH_PAGE_SIZE);
varea = lwp_map_user_varea_ext(lwp, (void *)lwp->end_heap, size, LWP_MAP_FLAG_PREFETCH);
if (varea)
{
lwp->end_heap = (long)(varea->start + varea->size);
ret = lwp->end_heap;
}
}
rt_mm_unlock();
return ret;
}
void *lwp_mmap2(void *addr, size_t length, int prot, int flags, int fd,
off_t pgoffset)
rt_inline rt_mem_obj_t _get_mmap_obj(struct rt_lwp *lwp)
{
void *ret = (void *)-1;
return &_null_object;
}
if (fd == -1)
{
rt_inline rt_bool_t _memory_threshold_ok(void)
{
#define GUARDIAN_BITS (10)
size_t total, free;
ret = lwp_map_user(lwp_self(), addr, length, 0);
if (ret)
rt_page_get_info(&total, &free);
if (free * (0x1000) < 0x100000)
{
if ((flags & MAP_ANONYMOUS) != 0)
{
rt_memset(ret, 0, length);
LOG_I("%s: low of system memory", __func__);
return RT_FALSE;
}
return RT_TRUE;
}
rt_inline long _uflag_to_kernel(long flag)
{
flag &= ~MMF_MAP_FIXED;
flag &= ~MMF_MAP_PRIVATE;
flag &= ~MMF_MAP_PRIVATE_DONT_SYNC;
return flag;
}
rt_inline long _uattr_to_kernel(long attr)
{
/* Warning: be careful with the case if user attribution is unwritable */
return attr;
}
static void _prefetch_mmap(rt_aspace_t aspace, void *addr, long size)
{
struct rt_aspace_fault_msg msg;
msg.fault_op = MM_FAULT_OP_WRITE;
msg.fault_type = MM_FAULT_TYPE_PAGE_FAULT;
for (char *base = addr; size > 0; base += ARCH_PAGE_SIZE, size -= ARCH_PAGE_SIZE)
{
msg.fault_vaddr = base;
msg.off = (long)base >> MM_PAGE_SHIFT;
rt_aspace_fault_try_fix(aspace, &msg);
}
return ;
}
void *lwp_user_memory_remap_to_kernel(rt_lwp_t lwp, void *uaddr, size_t length)
{
long kattr;
long kflag;
long offset_in_mobj;
long offset_in_page;
rt_err_t error;
rt_varea_t uarea;
rt_mem_obj_t mobj;
void *kaddr = 0;
uarea = rt_aspace_query(lwp->aspace, uaddr);
if (uarea)
{
/* setup the identical mapping, and align up for address & length */
kattr = _uattr_to_kernel(uarea->attr);
kflag = _uflag_to_kernel(uarea->flag);
offset_in_mobj = uarea->offset + ((long)uaddr - (long)uarea->start) / ARCH_PAGE_SIZE;
mobj = uarea->mem_obj;
offset_in_page = (long)uaddr & ARCH_PAGE_MASK;
length = RT_ALIGN(length + offset_in_page, ARCH_PAGE_SIZE);
error = rt_aspace_map(&rt_kernel_space, &kaddr, length, kattr, kflag, mobj, offset_in_mobj);
if (error)
{
LOG_I("%s(length=0x%lx,attr=0x%lx,flags=0x%lx): do map failed", __func__, length, kattr, kflag);
kaddr = 0;
}
else
{
ret = (void *)-1;
/* TODO: {make a memory lock?} */
LOG_D("%s(length=0x%lx,attr=0x%lx,flags=0x%lx,offset=0x%lx) => %p", __func__, length, kattr, kflag, offset_in_mobj, kaddr);
_prefetch_mmap(&rt_kernel_space, kaddr, length);
kaddr += offset_in_page;
}
}
return kaddr;
}
void *lwp_mmap2(struct rt_lwp *lwp, void *addr, size_t length, int prot,
int flags, int fd, off_t pgoffset)
{
rt_err_t rc;
rt_size_t k_attr;
rt_size_t k_flags;
rt_size_t k_offset;
rt_aspace_t uspace;
rt_mem_obj_t mem_obj;
void *ret = 0;
LOG_D("%s(addr=0x%lx,length=%ld,fd=%d)", __func__, addr, length, fd);
if (fd == -1)
{
/**
* todo: add threshold
*/
if (!_memory_threshold_ok())
return (void *)-ENOMEM;
k_offset = MM_PA_TO_OFF(addr);
k_flags = lwp_user_mm_flag_to_kernel(flags) | MMF_MAP_PRIVATE;
k_attr = lwp_user_mm_attr_to_kernel(prot);
uspace = lwp->aspace;
length = RT_ALIGN(length, ARCH_PAGE_SIZE);
mem_obj = _get_mmap_obj(lwp);
rc = rt_aspace_map(uspace, &addr, length, k_attr, k_flags, mem_obj, k_offset);
if (rc == RT_EOK)
{
ret = addr;
}
else
{
ret = (void *)lwp_errno_to_posix(rc);
}
}
else
@ -538,7 +632,7 @@ void *lwp_mmap2(void *addr, size_t length, int prot, int flags, int fd,
struct dfs_file *d;
d = fd_get(fd);
if (d && d->vnode->type == FT_DEVICE)
if (d)
{
struct dfs_mmap2_args mmap2;
@ -548,26 +642,32 @@ void *lwp_mmap2(void *addr, size_t length, int prot, int flags, int fd,
mmap2.flags = flags;
mmap2.pgoffset = pgoffset;
mmap2.ret = (void *)-1;
mmap2.lwp = lwp;
if (dfs_file_mmap2(d, &mmap2) == 0)
rc = dfs_file_mmap2(d, &mmap2);
if (rc == RT_EOK)
{
ret = mmap2.ret;
}
else
{
ret = (void *)lwp_errno_to_posix(rc);
}
}
}
if ((long)ret <= 0)
LOG_D("%s() => %ld", __func__, ret);
return ret;
}
int lwp_munmap(void *addr)
int lwp_munmap(struct rt_lwp *lwp, void *addr, size_t length)
{
int ret = 0;
int ret;
rt_mm_lock();
ret = lwp_unmap_user(lwp_self(), addr);
rt_mm_unlock();
return ret;
RT_ASSERT(lwp);
ret = rt_aspace_unmap_range(lwp->aspace, addr, length);
return lwp_errno_to_posix(ret);
}
size_t lwp_get_from_user(void *dst, void *src, size_t size)
@ -725,19 +825,9 @@ int lwp_user_accessible_ext(struct rt_lwp *lwp, void *addr, size_t size)
len = size;
}
tmp_addr = lwp_v2p(lwp, addr_start);
if (tmp_addr == ARCH_MAP_FAILED)
if (tmp_addr == ARCH_MAP_FAILED &&
!rt_aspace_query(lwp->aspace, addr_start))
{
if ((rt_ubase_t)addr_start >= USER_STACK_VSTART && (rt_ubase_t)addr_start < USER_STACK_VEND)
{
struct rt_aspace_fault_msg msg = {
.fault_op = MM_FAULT_OP_WRITE,
.fault_type = MM_FAULT_TYPE_PAGE_FAULT,
.fault_vaddr = addr_start,
};
if (!rt_aspace_fault_try_fix(lwp->aspace, &msg))
return RT_FALSE;
}
else
return RT_FALSE;
}
addr_start = (void *)((char *)addr_start + len);
@ -752,135 +842,234 @@ int lwp_user_accessable(void *addr, size_t size)
return lwp_user_accessible_ext(lwp_self(), addr, size);
}
/* src is in mmu_info space, dst is in current thread space */
#define ALIGNED(addr) (!((rt_size_t)(addr) & ARCH_PAGE_MASK))
/* src is in lwp address space, dst is in current thread space */
size_t lwp_data_get(struct rt_lwp *lwp, void *dst, void *src, size_t size)
{
size_t copy_len = 0;
void *addr_start = RT_NULL, *addr_end = RT_NULL, *next_page = RT_NULL;
void *tmp_dst = RT_NULL, *tmp_src = RT_NULL;
char *temp_page = 0;
char *dst_iter, *dst_next_page;
char *src_copy_end, *src_iter, *src_iter_aligned;
if (!size || !dst)
{
return 0;
}
tmp_dst = dst;
addr_start = src;
addr_end = (void *)((char *)src + size);
next_page =
(void *)(((size_t)addr_start + ARCH_PAGE_SIZE) & ~(ARCH_PAGE_SIZE - 1));
dst_iter = dst;
src_iter = src;
src_copy_end = src + size;
dst_next_page =
(char *)(((size_t)src_iter + ARCH_PAGE_SIZE) & ~(ARCH_PAGE_SIZE - 1));
do
{
size_t len = (char *)next_page - (char *)addr_start;
if (size < len)
size_t bytes_to_copy = (char *)dst_next_page - (char *)src_iter;
if (bytes_to_copy > size)
{
len = size;
bytes_to_copy = size;
}
tmp_src = lwp_v2p(lwp, addr_start);
if (tmp_src == ARCH_MAP_FAILED)
if (ALIGNED(src_iter) && bytes_to_copy == ARCH_PAGE_SIZE)
{
/* get page to kernel buffer */
if (rt_aspace_page_get(lwp->aspace, src_iter, dst_iter))
break;
}
tmp_src = (void *)((char *)tmp_src - PV_OFFSET);
rt_memcpy(tmp_dst, tmp_src, len);
tmp_dst = (void *)((char *)tmp_dst + len);
addr_start = (void *)((char *)addr_start + len);
size -= len;
next_page = (void *)((char *)next_page + ARCH_PAGE_SIZE);
copy_len += len;
} while (addr_start < addr_end);
else
{
if (!temp_page)
temp_page = rt_pages_alloc_ext(0, PAGE_ANY_AVAILABLE);
if (!temp_page)
break;
src_iter_aligned = (char *)((long)src_iter & ~ARCH_PAGE_MASK);
if (rt_aspace_page_get(lwp->aspace, src_iter_aligned, temp_page))
break;
memcpy(dst_iter, temp_page + (src_iter - src_iter_aligned), bytes_to_copy);
}
dst_iter = dst_iter + bytes_to_copy;
src_iter = src_iter + bytes_to_copy;
size -= bytes_to_copy;
dst_next_page = (void *)((char *)dst_next_page + ARCH_PAGE_SIZE);
copy_len += bytes_to_copy;
} while (src_iter < src_copy_end);
if (temp_page)
rt_pages_free(temp_page, 0);
return copy_len;
}
/* dst is in kernel space, src is in current thread space */
/* dst is in lwp address space, src is in current thread space */
size_t lwp_data_put(struct rt_lwp *lwp, void *dst, void *src, size_t size)
{
size_t copy_len = 0;
void *addr_start = RT_NULL, *addr_end = RT_NULL, *next_page = RT_NULL;
void *tmp_dst = RT_NULL, *tmp_src = RT_NULL;
char *temp_page = 0;
char *dst_iter, *dst_iter_aligned, *dst_next_page;
char *src_put_end, *src_iter;
if (!size || !dst)
{
return 0;
}
tmp_src = src;
addr_start = dst;
addr_end = (void *)((char *)dst + size);
next_page =
(void *)(((size_t)addr_start + ARCH_PAGE_SIZE) & ~(ARCH_PAGE_SIZE - 1));
src_iter = src;
dst_iter = dst;
src_put_end = dst + size;
dst_next_page =
(char *)(((size_t)dst_iter + ARCH_PAGE_SIZE) & ~(ARCH_PAGE_SIZE - 1));
do
{
size_t len = (char *)next_page - (char *)addr_start;
if (size < len)
size_t bytes_to_put = (char *)dst_next_page - (char *)dst_iter;
if (bytes_to_put > size)
{
len = size;
bytes_to_put = size;
}
tmp_dst = lwp_v2p(lwp, addr_start);
if (tmp_dst == ARCH_MAP_FAILED)
if (ALIGNED(dst_iter) && bytes_to_put == ARCH_PAGE_SIZE)
{
/* write to page in kernel */
if (rt_aspace_page_put(lwp->aspace, dst_iter, src_iter))
break;
}
tmp_dst = (void *)((char *)tmp_dst - PV_OFFSET);
rt_memcpy(tmp_dst, tmp_src, len);
tmp_src = (void *)((char *)tmp_src + len);
addr_start = (void *)((char *)addr_start + len);
size -= len;
next_page = (void *)((char *)next_page + ARCH_PAGE_SIZE);
copy_len += len;
} while (addr_start < addr_end);
else
{
if (!temp_page)
temp_page = rt_pages_alloc_ext(0, PAGE_ANY_AVAILABLE);
if (!temp_page)
break;
dst_iter_aligned = (void *)((long)dst_iter & ~ARCH_PAGE_MASK);
if (rt_aspace_page_get(lwp->aspace, dst_iter_aligned, temp_page))
break;
memcpy(temp_page + (dst_iter - dst_iter_aligned), src_iter, bytes_to_put);
if (rt_aspace_page_put(lwp->aspace, dst_iter_aligned, temp_page))
break;
}
src_iter = src_iter + bytes_to_put;
dst_iter = dst_iter + bytes_to_put;
size -= bytes_to_put;
dst_next_page = dst_next_page + ARCH_PAGE_SIZE;
copy_len += bytes_to_put;
} while (dst_iter < src_put_end);
if (temp_page)
rt_pages_free(temp_page, 0);
return copy_len;
}
/* Set N bytes of S to C */
size_t lwp_data_set(struct rt_lwp *lwp, void *dst, int byte, size_t size)
{
size_t copy_len = 0;
char *temp_page = 0;
char *dst_iter, *dst_iter_aligned, *dst_next_page;
char *dst_put_end;
if (!size || !dst)
{
return 0;
}
dst_iter = dst;
dst_put_end = dst + size;
dst_next_page =
(char *)(((size_t)dst_iter + ARCH_PAGE_SIZE) & ~(ARCH_PAGE_SIZE - 1));
temp_page = rt_pages_alloc_ext(0, PAGE_ANY_AVAILABLE);
if (temp_page)
{
do
{
size_t bytes_to_put = (char *)dst_next_page - (char *)dst_iter;
if (bytes_to_put > size)
{
bytes_to_put = size;
}
dst_iter_aligned = (void *)((long)dst_iter & ~ARCH_PAGE_MASK);
if (!ALIGNED(dst_iter) || bytes_to_put != ARCH_PAGE_SIZE)
if (rt_aspace_page_get(lwp->aspace, dst_iter_aligned, temp_page))
break;
memset(temp_page + (dst_iter - dst_iter_aligned), byte, bytes_to_put);
if (rt_aspace_page_put(lwp->aspace, dst_iter_aligned, temp_page))
break;
dst_iter = dst_iter + bytes_to_put;
size -= bytes_to_put;
dst_next_page = dst_next_page + ARCH_PAGE_SIZE;
copy_len += bytes_to_put;
} while (dst_iter < dst_put_end);
rt_pages_free(temp_page, 0);
}
return copy_len;
}
size_t lwp_user_strlen_ext(struct rt_lwp *lwp, const char *s)
{
int len = 0;
char *new_buf = RT_NULL;
char *temp_buf = RT_NULL;
void *addr_start = RT_NULL;
int size = 0;
int err = 0;
int get_bytes = 0;
int index = 0;
if (s == RT_NULL)
return 0;
addr_start = (void *)s;
new_buf = rt_malloc(ARCH_PAGE_SIZE);
if (lwp == RT_NULL)
{
LOG_W("%s: lwp is NULL", __func__);
return -1;
}
err = lwp_data_get(lwp, new_buf, addr_start, ARCH_PAGE_SIZE);
if (err == 0)
addr_start = (void *)s;
temp_buf = rt_pages_alloc_ext(0, PAGE_ANY_AVAILABLE);
if (!temp_buf)
{
rt_free(new_buf);
LOG_W("%s: No memory", __func__);
return -1;
}
while (new_buf[size] != '\0')
get_bytes = lwp_data_get(lwp, temp_buf, addr_start, ARCH_PAGE_SIZE);
if (get_bytes == 0)
{
len ++;
if (size == (ARCH_PAGE_SIZE -1))
{
err = lwp_data_get(lwp, new_buf, addr_start + len, ARCH_PAGE_SIZE);
if (err == 0)
{
rt_free(new_buf);
LOG_I("lwp_data_get(lwp=%p,dst=0x%lx,src=0x%lx,size=0x1000) failed", lwp, temp_buf, addr_start);
rt_pages_free(temp_buf, 0);
return -1;
}
size = 0;
while (temp_buf[index] != '\0')
{
len++;
index++;
if (index == get_bytes)
{
if (get_bytes == ARCH_PAGE_SIZE)
{
get_bytes = lwp_data_get(lwp, temp_buf, addr_start + len, ARCH_PAGE_SIZE);
if (get_bytes == 0)
{
LOG_I("lwp_data_get(lwp=%p,dst=0x%lx,src=0x%lx,size=0x1000): user data unaccessible",
lwp, temp_buf, addr_start);
len = -1;
break;
}
index = 0;
}
else
{
size ++;
LOG_I("lwp_data_get(lwp=%p,dst=0x%lx,src=0x%lx,size=0x1000): user data unaccessible",
lwp, temp_buf, addr_start);
len = -1;
break;
}
}
}
rt_free(new_buf);
rt_pages_free(temp_buf, 0);
return len;
}
@ -894,4 +1083,72 @@ size_t lwp_user_strlen(const char *s)
return lwp_user_strlen_ext(lwp, s);
}
char** lwp_get_command_line_args(struct rt_lwp *lwp)
{
size_t argc = 0;
char** argv = NULL;
int ret;
size_t i;
size_t len;
if (lwp)
{
ret = lwp_data_get(lwp, &argc, lwp->args, sizeof(argc));
if (ret == 0)
{
return RT_NULL;
}
argv = (char**)rt_malloc((argc + 1) * sizeof(char*));
if (argv)
{
for (i = 0; i < argc; i++)
{
char *argvp = NULL;
ret = lwp_data_get(lwp, &argvp, &((char **)lwp->args)[1 + i], sizeof(argvp));
if (ret == 0)
{
lwp_free_command_line_args(argv);
return RT_NULL;
}
len = lwp_user_strlen_ext(lwp, argvp);
if (len > 0)
{
argv[i] = (char*)rt_malloc(len + 1);
ret = lwp_data_get(lwp, argv[i], argvp, len);
if (ret == 0)
{
lwp_free_command_line_args(argv);
return RT_NULL;
}
argv[i][len] = '\0';
}
else
{
argv[i] = NULL;
}
}
argv[argc] = NULL;
}
}
return argv;
}
void lwp_free_command_line_args(char** argv)
{
size_t i;
if (argv)
{
for (i = 0; argv[i]; i++)
{
rt_free(argv[i]);
}
rt_free(argv);
}
}
#endif

View File

@ -7,6 +7,7 @@
* Date Author Notes
* 2019-10-28 Jesven first version
* 2021-02-12 lizhirui add 64-bit support for lwp_brk
* 2023-09-19 Shell add lwp_user_memory_remap_to_kernel
*/
#ifndef __LWP_USER_MM_H__
#define __LWP_USER_MM_H__
@ -27,6 +28,8 @@ extern "C" {
#define LWP_MAP_FLAG_NONE 0x0000
#define LWP_MAP_FLAG_NOCACHE 0x0001
#define LWP_MAP_FLAG_MAP_FIXED 0x00010000ul
#define LWP_MAP_FLAG_PREFETCH 0x00020000ul
/**
* @brief Map files or devices into memory
@ -41,7 +44,7 @@ extern "C" {
* @param pgoffset offset to fd in 4096 bytes unit
* @return void* the address is successful, otherwise return MAP_FAILED
*/
void* lwp_mmap2(void *addr, size_t length, int prot, int flags, int fd, off_t pgoffset);
void* lwp_mmap2(struct rt_lwp *lwp, void *addr, size_t length, int prot, int flags, int fd, off_t pgoffset);
/**
* @brief Unmap memory region in user space
@ -51,7 +54,7 @@ void* lwp_mmap2(void *addr, size_t length, int prot, int flags, int fd, off_t pg
* @param length length in bytes of unmapping
* @return int errno
*/
int lwp_munmap(void *addr);
int lwp_munmap(struct rt_lwp *lwp, void *addr, size_t length);
/**
* @brief Test if address from user is accessible address by user
@ -145,8 +148,8 @@ void lwp_unmap_user_space(struct rt_lwp *lwp);
int lwp_unmap_user(struct rt_lwp *lwp, void *va);
void *lwp_map_user(struct rt_lwp *lwp, void *map_va, size_t map_size, rt_bool_t text);
size_t lwp_user_strlen(const char *s);
size_t lwp_user_strlen_ext(struct rt_lwp *lwp, const char *s);
void lwp_free_command_line_args(char** argv);
char** lwp_get_command_line_args(struct rt_lwp *lwp);
rt_varea_t lwp_map_user_varea(struct rt_lwp *lwp, void *map_va, size_t map_size);
@ -158,6 +161,11 @@ int lwp_unmap_user_phy(struct rt_lwp *lwp, void *va);
rt_base_t lwp_brk(void *addr);
size_t lwp_user_strlen(const char *s);
size_t lwp_user_strlen_ext(struct rt_lwp *lwp, const char *s);
int lwp_fork_aspace(struct rt_lwp *dest_lwp, struct rt_lwp *src_lwp);
void lwp_data_cache_flush(struct rt_lwp *lwp, void *vaddr, size_t size);
static inline void *_lwp_v2p(struct rt_lwp *lwp, void *vaddr)
@ -173,6 +181,49 @@ static inline void *lwp_v2p(struct rt_lwp *lwp, void *vaddr)
return paddr;
}
/**
* @brief Remapping user space memory region to kernel
*
* @warning the remapped region in kernel should be unmapped after usage
*
* @param lwp target process
* @param uaddr user space address where the data writes to
* @param length the bytes to redirect
* @return void * the redirection address in kernel space
*/
void *lwp_user_memory_remap_to_kernel(rt_lwp_t lwp, void *uaddr, size_t length);
rt_inline rt_size_t lwp_user_mm_flag_to_kernel(int flags)
{
rt_size_t k_flags = 0;
if (flags & MAP_FIXED)
k_flags |= MMF_MAP_FIXED;
if (flags & (MAP_PRIVATE | MAP_ANON | MAP_ANONYMOUS))
k_flags |= MMF_MAP_PRIVATE;
if (flags & MAP_SHARED)
k_flags |= MMF_MAP_SHARED;
return k_flags;
}
rt_inline rt_size_t lwp_user_mm_attr_to_kernel(int prot)
{
rt_size_t k_attr = 0;
#ifdef IMPL_MPROTECT
if ((prot & PROT_EXEC) || (prot & PROT_WRITE) ||
((prot & PROT_READ) && (prot & PROT_WRITE)))
k_attr = MMU_MAP_U_RWCB;
else if (prot == PROT_NONE)
k_attr = MMU_MAP_K_RWCB;
else
k_attr = MMU_MAP_U_ROCB;
#else
k_attr = MMU_MAP_U_RWCB;
#endif /* IMPL_MPROTECT */
return k_attr;
}
#ifdef __cplusplus
}
#endif

View File

@ -13,6 +13,9 @@
#include <rtthread.h>
#include <errno.h>
#include <stdlib.h>
typedef long sysret_t;
struct rt_syscall_def
@ -40,4 +43,36 @@ struct rt_syscall_def
#define _SYS_WRAP(func) ({int _ret = func; _ret < 0 ? GET_ERRNO() : _ret;})
rt_inline sysret_t lwp_errno_to_posix(rt_err_t error)
{
sysret_t posix_rc;
switch (labs(error))
{
case RT_EOK:
posix_rc = 0;
break;
case RT_ETIMEOUT:
posix_rc = -ETIMEDOUT;
break;
case RT_EINVAL:
posix_rc = -EINVAL;
break;
case RT_ENOENT:
posix_rc = -ENOENT;
break;
case RT_ENOSPC:
posix_rc = -ENOSPC;
break;
case RT_EPERM:
posix_rc = -EPERM;
break;
default:
posix_rc = -1;
break;
}
return posix_rc;
}
#endif /* __SYSCALL_DATA_H__ */

View File

@ -2,13 +2,13 @@ import os
from building import *
objs = []
src = []
if GetDepend('ARCH_ARM_CORTEX_A') or GetDepend('ARCH_ARMV8') or GetDepend('ARCH_RISCV64'):
cwd = GetCurrentDir()
src += ['avl_adpt.c', 'ioremap.c', 'mm_aspace.c', 'mm_fault.c', 'mm_kmem.c', 'mm_object.c', 'mm_page.c']
if GetDepend('RT_USING_MEMBLOCK'):
src += ['mm_memblock.c']
src = Glob('*.c') + Glob('*_gcc.S')
if not GetDepend('RT_USING_MEMBLOCK'):
SrcRemove(src, ['mm_memblock.c'])
CPPPATH = [cwd]
group = DefineGroup('mm', src, depend = ['ARCH_MM_MMU'], CPPPATH = CPPPATH)

620
components/mm/mm_anon.c Normal file
View File

@ -0,0 +1,620 @@
/*
* Copyright (c) 2006-2023, RT-Thread Development Team
*
* SPDX-License-Identifier: Apache-2.0
*
* Change Logs:
* Date Author Notes
* 2023-08-19 Shell Support PRIVATE mapping and COW
*/
#define DBG_TAG "mm.anon"
#define DBG_LVL DBG_INFO
#include <rtdbg.h>
#include <string.h>
#include "mm_private.h"
#include <mmu.h>
/**
* Anonymous Object directly represent the mappings without backup files in the
* aspace. Their only backup is in the aspace->pgtbl.
*/
typedef struct rt_private_ctx {
struct rt_mem_obj mem_obj;
rt_aspace_t backup_aspace;
/* both varea and aspace can holds a reference */
rt_atomic_t reference;
/* readonly `private` is shared object */
long readonly;
} *rt_private_ctx_t;
rt_inline rt_aspace_t _anon_obj_get_backup(rt_mem_obj_t mobj)
{
rt_private_ctx_t pctx;
rt_aspace_t backup;
pctx = rt_container_of(mobj, struct rt_private_ctx, mem_obj);
backup = pctx->backup_aspace;
return backup;
}
rt_inline rt_atomic_t *_anon_obj_get_reference(rt_mem_obj_t mobj)
{
rt_private_ctx_t pctx;
pctx = rt_container_of(mobj, struct rt_private_ctx, mem_obj);
return &pctx->reference;
}
rt_inline rt_private_ctx_t _anon_mobj_to_pctx(rt_mem_obj_t mobj)
{
return rt_container_of(mobj, struct rt_private_ctx, mem_obj);
}
static long rt_aspace_anon_ref_inc(rt_mem_obj_t aobj)
{
long rc;
if (aobj)
{
rc = rt_atomic_add(_anon_obj_get_reference(aobj), 1);
LOG_D("%s(aobj=%p) Cur %ld", __func__, aobj, rc + 1);
}
else
rc = -1;
return rc;
}
rt_err_t rt_aspace_anon_ref_dec(rt_mem_obj_t aobj)
{
rt_err_t rc;
rt_aspace_t aspace;
rt_private_ctx_t pctx;
long former_reference;
if (aobj)
{
pctx = _anon_mobj_to_pctx(aobj);
RT_ASSERT(pctx);
former_reference = rt_atomic_add(_anon_obj_get_reference(aobj), -1);
LOG_D("%s(aobj=%p) Cur %ld", __func__, aobj, former_reference - 1);
if (pctx->readonly)
{
if (former_reference - 1 <= pctx->readonly)
{
void *pgtbl;
RT_ASSERT(former_reference - 1 == pctx->readonly);
aspace = _anon_obj_get_backup(aobj);
pctx->readonly = 0;
pgtbl = aspace->page_table;
rt_aspace_delete(aspace);
rt_hw_mmu_pgtbl_delete(pgtbl);
}
}
else if (former_reference < 2)
{
aspace = _anon_obj_get_backup(aobj);
aspace->private_object = RT_NULL;
rt_free(pctx);
}
rc = RT_EOK;
}
else
{
rc = -RT_EINVAL;
}
return rc;
}
static const char *_anon_get_name(rt_varea_t varea)
{
return varea->aspace == _anon_obj_get_backup(varea->mem_obj) ? "anonymous" : "reference";
}
static void _anon_varea_open(struct rt_varea *varea)
{
rt_aspace_anon_ref_inc(varea->mem_obj);
if (varea->aspace == _anon_obj_get_backup(varea->mem_obj))
varea->offset = MM_PA_TO_OFF(varea->start);
varea->data = NULL;
}
static void _anon_varea_close(struct rt_varea *varea)
{
rt_aspace_anon_ref_dec(varea->mem_obj);
}
static rt_err_t _anon_varea_expand(struct rt_varea *varea, void *new_vaddr, rt_size_t size)
{
return RT_EOK;
}
static rt_err_t _anon_varea_shrink(rt_varea_t varea, void *new_start, rt_size_t size)
{
return rt_mm_dummy_mapper.on_varea_shrink(varea, new_start, size);
}
static rt_err_t _anon_varea_split(struct rt_varea *existed, void *unmap_start, rt_size_t unmap_len, struct rt_varea *subset)
{
_anon_varea_open(subset);
return rt_mm_dummy_mapper.on_varea_split(existed, unmap_start, unmap_len, subset);
}
static rt_err_t _anon_varea_merge(struct rt_varea *merge_to, struct rt_varea *merge_from)
{
_anon_varea_close(merge_from);
return rt_mm_dummy_mapper.on_varea_merge(merge_to, merge_from);
}
rt_inline void _map_page_in_varea(rt_aspace_t asapce, rt_varea_t varea,
struct rt_aspace_fault_msg *msg, char *fault_addr)
{
if (rt_varea_map_page(varea, fault_addr, msg->response.vaddr) == RT_EOK)
{
msg->response.status = MM_FAULT_STATUS_OK_MAPPED;
}
else
{
/* revoke the allocated page */
rt_varea_pgmgr_pop(varea, msg->response.vaddr, ARCH_PAGE_SIZE);
msg->response.status = MM_FAULT_STATUS_UNRECOVERABLE;
LOG_W("%s: failed to map page into varea", __func__);
}
}
static void *_get_page_from_backup(rt_aspace_t backup, rt_base_t offset_in_mobj)
{
void *frame_pa;
char *backup_addr;
rt_varea_t backup_varea;
void *rc = RT_NULL;
backup_addr = (char *)(offset_in_mobj << MM_PAGE_SHIFT);
backup_varea = rt_aspace_query(backup, backup_addr);
if (backup_varea)
{
/* synchronize between multiple request by aspace lock of backup */
WR_LOCK(backup);
frame_pa = rt_hw_mmu_v2p(backup, backup_addr);
if (frame_pa == ARCH_MAP_FAILED)
{
/* provide the page in backup varea */
struct rt_aspace_fault_msg msg;
msg.fault_op = MM_FAULT_OP_WRITE;
msg.fault_type = MM_FAULT_TYPE_PAGE_FAULT;
msg.fault_vaddr = backup_addr;
msg.off = offset_in_mobj;
rt_mm_fault_res_init(&msg.response);
rt_mm_dummy_mapper.on_page_fault(backup_varea, &msg);
if (msg.response.status != MM_FAULT_STATUS_UNRECOVERABLE)
{
_map_page_in_varea(backup, backup_varea, &msg, backup_addr);
if (msg.response.status == MM_FAULT_STATUS_OK_MAPPED)
{
rc = msg.response.vaddr;
}
}
}
else
{
rc = rt_kmem_p2v(frame_pa);
if (!rc)
RT_ASSERT(0 && "No kernel address of target page frame");
}
WR_UNLOCK(backup);
}
else
{
/* out of range error */
LOG_E("(backup_addr=%p): Page request out of range", backup_addr);
}
return rc;
}
/* get the backup page in kernel for the address in user space */
static void _anon_page_fault(struct rt_varea *varea, struct rt_aspace_fault_msg *msg)
{
void *paddr;
char *frame_ka;
rt_aspace_t from_aspace = varea->aspace;
rt_aspace_t backup = _anon_obj_get_backup(varea->mem_obj);
RDWR_LOCK(from_aspace);
/**
* if the page is already mapped(this may caused by data race while other
* thread success to take the lock and mapped the page before this), return okay
*/
paddr = rt_hw_mmu_v2p(from_aspace, msg->fault_vaddr);
if (paddr == ARCH_MAP_FAILED)
{
if (backup == from_aspace)
{
rt_mm_dummy_mapper.on_page_fault(varea, msg);
if (msg->response.status != MM_FAULT_STATUS_UNRECOVERABLE)
{
_map_page_in_varea(backup, varea, msg, msg->fault_vaddr);
}
}
else
{
frame_ka = _get_page_from_backup(backup, msg->off);
if (frame_ka)
{
msg->response.vaddr = frame_ka;
msg->response.size = ARCH_PAGE_SIZE;
_map_page_in_varea(from_aspace, varea, msg, msg->fault_vaddr);
}
}
}
else
{
msg->response.status = MM_FAULT_STATUS_OK_MAPPED;
}
RDWR_UNLOCK(from_aspace);
}
static void read_by_mte(rt_aspace_t aspace, struct rt_aspace_io_msg *iomsg)
{
if (rt_aspace_page_get_phy(aspace, iomsg->fault_vaddr, iomsg->buffer_vaddr) == RT_EOK)
iomsg->response.status = MM_FAULT_STATUS_OK;
}
static void _anon_page_read(struct rt_varea *varea, struct rt_aspace_io_msg *iomsg)
{
rt_aspace_t from_aspace = varea->aspace;
if (rt_hw_mmu_v2p(from_aspace, iomsg->fault_vaddr) == ARCH_MAP_FAILED)
{
struct rt_aspace_fault_msg msg;
msg.fault_op = MM_FAULT_OP_READ;
msg.fault_type = MM_FAULT_TYPE_PAGE_FAULT;
msg.fault_vaddr = iomsg->fault_vaddr;
msg.off = iomsg->off;
rt_mm_fault_res_init(&msg.response);
_anon_page_fault(varea, &msg);
if (msg.response.status != MM_FAULT_STATUS_UNRECOVERABLE)
{
read_by_mte(from_aspace, iomsg);
}
}
else
{
read_by_mte(from_aspace, iomsg);
}
}
static void write_by_mte(rt_aspace_t aspace, struct rt_aspace_io_msg *iomsg)
{
if (rt_aspace_page_put_phy(aspace, iomsg->fault_vaddr, iomsg->buffer_vaddr) == RT_EOK)
iomsg->response.status = MM_FAULT_STATUS_OK;
}
static void _anon_page_write(struct rt_varea *varea, struct rt_aspace_io_msg *iomsg)
{
rt_aspace_t from_aspace = varea->aspace;
if (rt_hw_mmu_v2p(from_aspace, iomsg->fault_vaddr) == ARCH_MAP_FAILED)
{
struct rt_aspace_fault_msg msg;
msg.fault_op = MM_FAULT_OP_WRITE;
msg.fault_type = MM_FAULT_TYPE_PAGE_FAULT;
msg.fault_vaddr = iomsg->fault_vaddr;
msg.off = iomsg->off;
rt_mm_fault_res_init(&msg.response);
_anon_page_fault(varea, &msg);
if (msg.response.status != MM_FAULT_STATUS_UNRECOVERABLE)
{
write_by_mte(from_aspace, iomsg);
}
}
else
{
write_by_mte(from_aspace, iomsg);
}
}
static struct rt_private_ctx _priv_obj = {
.mem_obj.get_name = _anon_get_name,
.mem_obj.on_page_fault = _anon_page_fault,
.mem_obj.hint_free = NULL,
.mem_obj.on_varea_open = _anon_varea_open,
.mem_obj.on_varea_close = _anon_varea_close,
.mem_obj.on_varea_shrink = _anon_varea_shrink,
.mem_obj.on_varea_split = _anon_varea_split,
.mem_obj.on_varea_expand = _anon_varea_expand,
.mem_obj.on_varea_merge = _anon_varea_merge,
.mem_obj.page_read = _anon_page_read,
.mem_obj.page_write = _anon_page_write,
};
rt_inline rt_private_ctx_t rt_private_obj_create_n_bind(rt_aspace_t aspace)
{
rt_private_ctx_t private_object;
private_object = rt_malloc(sizeof(struct rt_private_ctx));
if (private_object)
{
memcpy(&private_object->mem_obj, &_priv_obj, sizeof(_priv_obj));
/* hold a init ref from backup aspace */
rt_atomic_store(&private_object->reference, 1);
private_object->readonly = RT_FALSE;
private_object->backup_aspace = aspace;
aspace->private_object = &private_object->mem_obj;
}
return private_object;
}
rt_inline rt_mem_obj_t _get_private_obj(rt_aspace_t aspace)
{
rt_private_ctx_t priv;
rt_mem_obj_t rc;
rc = aspace->private_object;
if (!aspace->private_object)
{
priv = rt_private_obj_create_n_bind(aspace);
if (priv)
{
rc = &priv->mem_obj;
aspace->private_object = rc;
}
}
return rc;
}
static int _override_map(rt_varea_t varea, rt_aspace_t aspace, void *fault_vaddr, struct rt_aspace_fault_msg *msg, void *page)
{
int rc = MM_FAULT_FIXABLE_FALSE;
rt_mem_obj_t private_object;
rt_varea_t map_varea = RT_NULL;
rt_err_t error;
rt_size_t flags;
rt_size_t attr;
LOG_D("%s", __func__);
private_object = _get_private_obj(aspace);
if (private_object)
{
flags = varea->flag | MMF_MAP_FIXED;
/* don't prefetch and do it latter */
flags &= ~MMF_PREFETCH;
attr = rt_hw_mmu_attr_add_perm(varea->attr, RT_HW_MMU_PROT_USER | RT_HW_MMU_PROT_WRITE);
/* override existing mapping at fault_vaddr */
error = _mm_aspace_map(
aspace, &map_varea, &fault_vaddr, ARCH_PAGE_SIZE, attr,
flags, private_object, MM_PA_TO_OFF(fault_vaddr));
if (error == RT_EOK)
{
msg->response.status = MM_FAULT_STATUS_OK;
msg->response.vaddr = page;
msg->response.size = ARCH_PAGE_SIZE;
if (rt_varea_map_with_msg(map_varea, msg) != RT_EOK)
{
LOG_E("%s: fault_va=%p,(priv_va=%p,priv_sz=0x%lx) at %s", __func__, msg->fault_vaddr, map_varea->start, map_varea->size, VAREA_NAME(map_varea));
RT_ASSERT(0 && "should never failed");
}
RT_ASSERT(rt_hw_mmu_v2p(aspace, msg->fault_vaddr) == (page + PV_OFFSET));
rc = MM_FAULT_FIXABLE_TRUE;
rt_varea_pgmgr_insert(map_varea, page);
}
else
{
/* private object will be release on destruction of aspace */
rt_free(map_varea);
}
}
else
{
LOG_I("%s: out of memory", __func__);
rc = MM_FAULT_FIXABLE_FALSE;
}
return rc;
}
/**
* replace an existing mapping to a private one, this is identical to:
* => aspace_unmap(ex_varea, )
* => aspace_map()
*/
int rt_varea_fix_private_locked(rt_varea_t ex_varea, void *pa,
struct rt_aspace_fault_msg *msg,
rt_bool_t dont_copy)
{
/**
* todo: READ -> WRITE lock here
*/
void *page;
void *fault_vaddr;
rt_aspace_t aspace;
rt_mem_obj_t ex_obj;
int rc = MM_FAULT_FIXABLE_FALSE;
ex_obj = ex_varea->mem_obj;
if (ex_obj)
{
fault_vaddr = msg->fault_vaddr;
aspace = ex_varea->aspace;
RT_ASSERT(!!aspace);
/**
* todo: what if multiple pages are required?
*/
if (aspace->private_object == ex_obj)
{
RT_ASSERT(0 && "recursion");
}
else if (ex_obj->page_read)
{
page = rt_pages_alloc_ext(0, PAGE_ANY_AVAILABLE);
if (page)
{
/** setup message & fetch the data from source object */
if (!dont_copy)
{
struct rt_aspace_io_msg io_msg;
rt_mm_io_msg_init(&io_msg, msg->off, msg->fault_vaddr, page);
ex_obj->page_read(ex_varea, &io_msg);
/**
* Note: if ex_obj have mapped into varea, it's still okay since
* we will override it latter
*/
if (io_msg.response.status != MM_FAULT_STATUS_UNRECOVERABLE)
{
rc = _override_map(ex_varea, aspace, fault_vaddr, msg, page);
}
else
{
rt_pages_free(page, 0);
LOG_I("%s: page read(va=%p) fault from %s(start=%p,size=%p)", __func__,
msg->fault_vaddr, VAREA_NAME(ex_varea), ex_varea->start, ex_varea->size);
}
}
else
{
rc = _override_map(ex_varea, aspace, fault_vaddr, msg, page);
}
}
else
{
LOG_I("%s: pages allocation failed", __func__);
}
}
else
{
LOG_I("%s: no page read method provided from %s", __func__, VAREA_NAME(ex_varea));
}
}
else
{
LOG_I("%s: unavailable memory object", __func__);
}
return rc;
}
int rt_aspace_map_private(rt_aspace_t aspace, void **addr, rt_size_t length,
rt_size_t attr, mm_flag_t flags)
{
int rc;
rt_mem_obj_t priv_obj;
if (flags & MMF_STATIC_ALLOC)
{
rc = -RT_EINVAL;
}
else
{
priv_obj = _get_private_obj(aspace);
if (priv_obj)
{
flags |= MMF_MAP_PRIVATE;
flags &= ~MMF_PREFETCH;
rc = rt_aspace_map(aspace, addr, length, attr, flags, priv_obj, 0);
}
else
{
rc = -RT_ENOMEM;
}
}
return rc;
}
static int _release_shared(rt_varea_t varea, void *arg)
{
rt_aspace_t src = varea->aspace;
rt_mem_obj_t mem_obj = varea->mem_obj;
if (mem_obj != _get_private_obj(src))
{
_varea_uninstall_locked(varea);
if (VAREA_NOT_STATIC(varea))
{
rt_free(varea);
}
}
return 0;
}
static rt_err_t _convert_readonly(rt_aspace_t aspace, long base_reference)
{
rt_mem_obj_t aobj;
rt_private_ctx_t pctx;
aobj = _get_private_obj(aspace);
pctx = _anon_mobj_to_pctx(aobj);
LOG_D("Ref(cur=%d,base=%d)", pctx->reference, base_reference);
rt_aspace_traversal(aspace, _release_shared, 0);
pctx->readonly = base_reference;
return 0;
}
rt_inline void _switch_aspace(rt_aspace_t *pa, rt_aspace_t *pb)
{
rt_aspace_t temp;
temp = *pa;
*pa = *pb;
*pb = temp;
}
rt_err_t rt_aspace_fork(rt_aspace_t *psrc, rt_aspace_t *pdst)
{
rt_err_t rc;
void *pgtbl;
rt_aspace_t backup;
rt_aspace_t src = *psrc;
rt_aspace_t dst = *pdst;
long base_reference;
pgtbl = rt_hw_mmu_pgtbl_create();
if (pgtbl)
{
backup = rt_aspace_create(src->start, src->size, pgtbl);
if (backup)
{
WR_LOCK(src);
base_reference = rt_atomic_load(_anon_obj_get_reference(src->private_object));
rc = rt_aspace_duplicate_locked(src, dst);
WR_UNLOCK(src);
if (!rc)
{
/* WR_LOCK(dst) is not necessary since dst is not available currently */
rc = rt_aspace_duplicate_locked(dst, backup);
if (!rc)
{
_switch_aspace(psrc, &backup);
_convert_readonly(backup, base_reference);
}
}
}
else
{
rc = -RT_ENOMEM;
}
}
else
{
rc = -RT_ENOMEM;
}
return rc;
}

File diff suppressed because it is too large Load Diff

View File

@ -1,23 +1,26 @@
/*
* Copyright (c) 2006-2022, RT-Thread Development Team
* Copyright (c) 2006-2023, RT-Thread Development Team
*
* SPDX-License-Identifier: Apache-2.0
*
* Change Logs:
* Date Author Notes
* 2022-11-14 WangXiaoyao the first version
* 2023-08-17 Shell Add unmap_range for MAP_PRIVATE
*/
#ifndef __MM_ASPACE_H__
#define __MM_ASPACE_H__
#include <rthw.h>
#include <rtthread.h>
#include <stddef.h>
#include "avl_adpt.h"
#include "mm_fault.h"
#include "mm_flag.h"
#include <stddef.h>
#include <string.h>
#define MM_PAGE_SHIFT 12
#define MM_PA_TO_OFF(pa) ((uintptr_t)(pa) >> MM_PAGE_SHIFT)
#define PV_OFFSET (rt_kmem_pvoff())
@ -55,6 +58,7 @@ typedef struct rt_aspace
struct _aspace_tree tree;
struct rt_mutex bst_lock;
struct rt_mem_obj *private_object;
rt_uint64_t asid;
} *rt_aspace_t;
@ -97,7 +101,25 @@ typedef struct rt_mem_obj
/* do post close bushiness like def a ref */
void (*on_varea_close)(struct rt_varea *varea);
void (*on_page_offload)(struct rt_varea *varea, void *vaddr, rt_size_t size);
/* do preparation for address space modification of varea */
rt_err_t (*on_varea_shrink)(struct rt_varea *varea, void *new_vaddr, rt_size_t size);
/* do preparation for address space modification of varea */
rt_err_t (*on_varea_expand)(struct rt_varea *varea, void *new_vaddr, rt_size_t size);
/**
* this is like an on_varea_open() to `subset`, and an on_varea_shrink() to `existed`
* while resource can migrate from `existed` to `subset` at the same time
*/
rt_err_t (*on_varea_split)(struct rt_varea *existed, void *unmap_start,
rt_size_t unmap_len, struct rt_varea *subset);
/**
* this is like a on_varea_expand() to `merge_to` and on_varea_close() to `merge_from`
* while resource can migrate from `merge_from` to `merge_to` at the same time
*/
rt_err_t (*on_varea_merge)(struct rt_varea *merge_to, struct rt_varea *merge_from);
/* dynamic mem_obj API */
void (*page_read)(struct rt_varea *varea, struct rt_aspace_io_msg *msg);
void (*page_write)(struct rt_varea *varea, struct rt_aspace_io_msg *msg);
const char *(*get_name)(rt_varea_t varea);
} *rt_mem_obj_t;
@ -110,6 +132,8 @@ enum rt_mmu_cntl
MMU_CNTL_CACHE,
MMU_CNTL_READONLY,
MMU_CNTL_READWRITE,
MMU_CNTL_OFFLOAD,
MMU_CNTL_INSTALL,
MMU_CNTL_DUMMY_END,
};
@ -122,8 +146,11 @@ enum rt_mmu_cntl
#define WR_UNLOCK(aspace) \
rt_thread_self() ? rt_mutex_release(&(aspace)->bst_lock) : 0
/* FIXME: fix rd_lock */
#define RD_LOCK(aspace) WR_LOCK(aspace)
#define RD_UNLOCK(aspace) WR_UNLOCK(aspace)
#define RDWR_LOCK(aspace) ((void)aspace)
#define RDWR_UNLOCK(aspace) ((void)aspace)
rt_aspace_t rt_aspace_create(void *start, rt_size_t length, void *pgtbl);
@ -176,14 +203,50 @@ int rt_aspace_map_phy_static(rt_aspace_t aspace, rt_varea_t varea,
rt_mm_va_hint_t hint, rt_size_t attr, rt_size_t pa_off,
void **ret_va);
/** map a private memory region to aspace */
int rt_aspace_map_private(rt_aspace_t aspace, void **addr, rt_size_t length,
rt_size_t attr, mm_flag_t flags);
/**
* @brief Remove any mappings overlap the range [addr, addr + bytes)
* @brief Remove mappings containing address specified by addr
*
* @param aspace
* @param aspace target virtual address space
* @param addr addresses that mapping to be removed contains
* @return int rt errno
*/
int rt_aspace_unmap(rt_aspace_t aspace, void *addr);
/**
* @brief Remove pages of existed mappings in the range [addr, addr+length)
* Length is automatically rounded up to the next multiple of the page size.
*
* @param aspace target virtual address space
* @param addr the beginning of the range of pages to be unmapped
* @param length length of range in bytes
* @return int rt errno
*/
int rt_aspace_unmap_range(rt_aspace_t aspace, void *addr, size_t length);
/**
* @brief Remove pages of existed mappings in the range [addr, addr+length)
* Length is automatically rounded up to the next multiple of the page size.
*
* @param aspace target virtual address space
* @param addr the beginning of the range of pages to be unmapped
* @param length length of range in bytes
* @return int rt errno
*/
int rt_aspace_unmap_range(rt_aspace_t aspace, void *addr, size_t length);
/**
* @brief Remove pages of existed mappings in the range [addr, addr+length)
* Length is automatically rounded up to the next multiple of the page size.
*
* @param aspace target virtual address space
* @param addr
* @return int
*/
int rt_aspace_unmap(rt_aspace_t aspace, void *addr);
int rt_aspace_unmap_range(rt_aspace_t aspace, void *addr, size_t length);
int rt_aspace_control(rt_aspace_t aspace, void *addr, enum rt_mmu_cntl cmd);
@ -191,14 +254,28 @@ int rt_aspace_load_page(rt_aspace_t aspace, void *addr, rt_size_t npage);
int rt_aspace_offload_page(rt_aspace_t aspace, void *addr, rt_size_t npage);
rt_err_t rt_aspace_page_put(rt_aspace_t aspace, void *page_va, void *buffer);
rt_err_t rt_aspace_page_get(rt_aspace_t aspace, void *page_va, void *buffer);
int rt_aspace_traversal(rt_aspace_t aspace,
int (*fn)(rt_varea_t varea, void *arg), void *arg);
void rt_aspace_print_all(rt_aspace_t aspace);
rt_base_t rt_aspace_count_vsz(rt_aspace_t aspace);
rt_varea_t rt_aspace_query(rt_aspace_t aspace, void *vaddr);
rt_err_t rt_aspace_duplicate_locked(rt_aspace_t src, rt_aspace_t dst);
rt_err_t rt_aspace_fork(rt_aspace_t *psrc, rt_aspace_t *pdst);
rt_err_t rt_aspace_compare(rt_aspace_t src, rt_aspace_t dst);
/**
* @brief Map one page to varea
*
* @note caller should take the read/write lock
*
* @param varea target varea
* @param addr user address
* @param page the page frame to be mapped
@ -209,6 +286,8 @@ int rt_varea_map_page(rt_varea_t varea, void *vaddr, void *page);
/**
* @brief Unmap one page in varea
*
* @note caller should take the read/write lock
*
* @param varea target varea
* @param addr user address
* @param page the page frame to be mapped
@ -252,7 +331,16 @@ int rt_varea_unmap_range(rt_varea_t varea, void *vaddr, rt_size_t length);
*/
void rt_varea_pgmgr_insert(rt_varea_t varea, void *page_addr);
rt_ubase_t rt_kmem_pvoff(void);
rt_inline rt_mem_obj_t rt_mem_obj_create(rt_mem_obj_t source)
{
rt_mem_obj_t target;
target = rt_malloc(sizeof(*target));
if (target)
memcpy(target, source, sizeof(*target));
return target;
}
const rt_ubase_t rt_kmem_pvoff(void);
void rt_kmem_pvoff_set(rt_ubase_t pvoff);
@ -260,6 +348,8 @@ int rt_kmem_map_phy(void *va, void *pa, rt_size_t length, rt_size_t attr);
void *rt_kmem_v2p(void *vaddr);
void *rt_kmem_p2v(void *paddr);
void rt_kmem_list(void);
#endif /* __MM_ASPACE_H__ */

View File

@ -6,6 +6,7 @@
* Change Logs:
* Date Author Notes
* 2022-12-06 WangXiaoyao the first version
* 2023-08-19 Shell Support PRIVATE mapping and COW
*/
#include <rtthread.h>
@ -23,27 +24,21 @@
#include <mmu.h>
#include <tlb.h>
#define UNRECOVERABLE 0
#define RECOVERABLE 1
static int _fetch_page(rt_varea_t varea, struct rt_aspace_fault_msg *msg)
{
int err = UNRECOVERABLE;
msg->response.status = MM_FAULT_STATUS_UNRECOVERABLE;
msg->response.vaddr = 0;
msg->response.size = 0;
int err = MM_FAULT_FIXABLE_FALSE;
if (varea->mem_obj && varea->mem_obj->on_page_fault)
{
varea->mem_obj->on_page_fault(varea, msg);
err = _varea_map_with_msg(varea, msg);
err = (err == RT_EOK ? RECOVERABLE : UNRECOVERABLE);
err = rt_varea_map_with_msg(varea, msg);
err = (err == RT_EOK ? MM_FAULT_FIXABLE_TRUE : MM_FAULT_FIXABLE_FALSE);
}
return err;
}
static int _read_fault(rt_varea_t varea, void *pa, struct rt_aspace_fault_msg *msg)
{
int err = UNRECOVERABLE;
int err = MM_FAULT_FIXABLE_FALSE;
if (msg->fault_type == MM_FAULT_TYPE_PAGE_FAULT)
{
RT_ASSERT(pa == ARCH_MAP_FAILED);
@ -59,19 +54,37 @@ static int _read_fault(rt_varea_t varea, void *pa, struct rt_aspace_fault_msg *m
static int _write_fault(rt_varea_t varea, void *pa, struct rt_aspace_fault_msg *msg)
{
int err = UNRECOVERABLE;
if (msg->fault_type == MM_FAULT_TYPE_PAGE_FAULT)
rt_aspace_t aspace = varea->aspace;
int err = MM_FAULT_FIXABLE_FALSE;
if (rt_varea_is_private_locked(varea))
{
if (VAREA_IS_WRITABLE(varea) && (
msg->fault_type == MM_FAULT_TYPE_ACCESS_FAULT ||
msg->fault_type == MM_FAULT_TYPE_PAGE_FAULT))
{
RDWR_LOCK(aspace);
err = rt_varea_fix_private_locked(varea, pa, msg, RT_FALSE);
RDWR_UNLOCK(aspace);
if (err == MM_FAULT_FIXABLE_FALSE)
LOG_I("%s: fix private failure", __func__);
}
else
{
LOG_I("%s: No permission on %s(attr=0x%lx)", __func__, VAREA_NAME(varea), varea->attr);
}
}
else if (msg->fault_type == MM_FAULT_TYPE_PAGE_FAULT)
{
RT_ASSERT(pa == ARCH_MAP_FAILED);
RT_ASSERT(!(varea->flag & MMF_PREFETCH));
err = _fetch_page(varea, msg);
}
else if (msg->fault_type == MM_FAULT_TYPE_ACCESS_FAULT &&
varea->flag & MMF_COW)
{
if (err == MM_FAULT_FIXABLE_FALSE)
LOG_I("%s: page fault failure", __func__);
}
else
{
LOG_D("%s: can not fix", __func__);
/* signal a fault to user? */
}
return err;
@ -79,7 +92,7 @@ static int _write_fault(rt_varea_t varea, void *pa, struct rt_aspace_fault_msg *
static int _exec_fault(rt_varea_t varea, void *pa, struct rt_aspace_fault_msg *msg)
{
int err = UNRECOVERABLE;
int err = MM_FAULT_FIXABLE_FALSE;
if (msg->fault_type == MM_FAULT_TYPE_PAGE_FAULT)
{
RT_ASSERT(pa == ARCH_MAP_FAILED);
@ -91,10 +104,13 @@ static int _exec_fault(rt_varea_t varea, void *pa, struct rt_aspace_fault_msg *m
int rt_aspace_fault_try_fix(rt_aspace_t aspace, struct rt_aspace_fault_msg *msg)
{
int err = UNRECOVERABLE;
int err = MM_FAULT_FIXABLE_FALSE;
uintptr_t va = (uintptr_t)msg->fault_vaddr;
va &= ~ARCH_PAGE_MASK;
msg->fault_vaddr = (void *)va;
rt_mm_fault_res_init(&msg->response);
RT_DEBUG_SCHEDULER_AVAILABLE(1);
if (aspace)
{
@ -105,7 +121,15 @@ int rt_aspace_fault_try_fix(rt_aspace_t aspace, struct rt_aspace_fault_msg *msg)
if (varea)
{
void *pa = rt_hw_mmu_v2p(aspace, msg->fault_vaddr);
msg->off = ((char *)msg->fault_vaddr - (char *)varea->start) >> ARCH_PAGE_SHIFT;
if (pa != ARCH_MAP_FAILED && msg->fault_type == MM_FAULT_TYPE_PAGE_FAULT)
{
LOG_D("%s(fault=%p) has already fixed", __func__, msg->fault_vaddr);
err = MM_FAULT_FIXABLE_TRUE;
}
else
{
LOG_D("%s(varea=%s,fault=%p,fault_op=%d,phy=%p)", __func__, VAREA_NAME(varea), msg->fault_vaddr, msg->fault_op, pa);
msg->off = varea->offset + ((long)msg->fault_vaddr - (long)varea->start) / ARCH_PAGE_SIZE;
/* permission checked by fault op */
switch (msg->fault_op)
@ -121,6 +145,11 @@ int rt_aspace_fault_try_fix(rt_aspace_t aspace, struct rt_aspace_fault_msg *msg)
break;
}
}
}
else
{
LOG_I("%s: varea not found at 0x%lx", __func__, msg->fault_vaddr);
}
RD_UNLOCK(aspace);
}

View File

@ -20,12 +20,8 @@
#define MM_FAULT_STATUS_OK_MAPPED 1
#define MM_FAULT_STATUS_UNRECOVERABLE 4
struct rt_mm_fault_res
{
void *vaddr;
rt_size_t size;
int status;
};
#define MM_FAULT_FIXABLE_FALSE 0
#define MM_FAULT_FIXABLE_TRUE 1
enum rt_mm_fault_op
{
@ -36,10 +32,41 @@ enum rt_mm_fault_op
enum rt_mm_fault_type
{
/**
* Occurs when an instruction attempts to access a memory address that it
* does not have permission to access
*/
MM_FAULT_TYPE_ACCESS_FAULT,
/**
* Occurs when a load or store instruction accesses a virtual memory
* address that is not currently mapped to a physical memory page
*/
MM_FAULT_TYPE_PAGE_FAULT,
/**
* Occurs like a SIGBUS
*/
MM_FAULT_TYPE_BUS_ERROR,
MM_FAULT_TYPE_GENERIC,
__PRIVATE_PAGE_INSERT,
};
enum rt_mm_hint_prefetch
{
MM_FAULT_HINT_PREFETCH_NONE,
MM_FAULT_HINT_PREFETCH_READY,
};
struct rt_mm_fault_res
{
void *vaddr;
rt_size_t size;
int status;
/* hint for prefetch strategy */
enum rt_mm_hint_prefetch hint;
};
struct rt_aspace_fault_msg
@ -52,8 +79,36 @@ struct rt_aspace_fault_msg
struct rt_mm_fault_res response;
};
struct rt_aspace_io_msg
{
/* offset in varea */
rt_size_t off;
/* fault address in target address space */
void *fault_vaddr;
/* read/write buffer in kernel space */
void *buffer_vaddr;
struct rt_mm_fault_res response;
};
rt_inline void rt_mm_fault_res_init(struct rt_mm_fault_res *res)
{
res->vaddr = RT_NULL;
res->size = 0;
res->hint = MM_FAULT_HINT_PREFETCH_NONE;
res->status = MM_FAULT_STATUS_UNRECOVERABLE;
}
rt_inline void rt_mm_io_msg_init(struct rt_aspace_io_msg *io, rt_size_t off, void *fault_vaddr, void *buffer_vaddr)
{
io->off = off;
io->fault_vaddr = fault_vaddr;
io->buffer_vaddr = buffer_vaddr;
rt_mm_fault_res_init(&io->response);
}
struct rt_aspace;
/* MMU base page fault handler, return 1 is fixable */
/* MMU base page fault handler, MM_FAULT_FIXABLE_TRUE/MM_FAULT_FIXABLE_FALSE will be returned */
int rt_aspace_fault_try_fix(struct rt_aspace *aspace, struct rt_aspace_fault_msg *msg);
#endif /* __MM_FAULT_H__ */

View File

@ -10,6 +10,8 @@
#ifndef __MM_FLAG_H__
#define __MM_FLAG_H__
#include <rtthread.h>
/**
* @brief mm_flag_t
* |max ------- 7|6 ----- 0|
@ -27,10 +29,19 @@ typedef unsigned long mm_flag_t;
enum mm_flag_cntl
{
/**
* @brief Indicate a possible COW mapping
* @brief Modifications to the mapped data shall be visible only to the
* aspace only and shall not change the underlying object. It is
* unspecified whether modifications to the underlying object done after
* the MAP_PRIVATE mapping is established are visible through the
* MAP_PRIVATE mapping.
*/
MMF_MAP_PRIVATE = _DEF_FLAG(0),
MMF_COW = _DEF_FLAG(1),
/**
* @brief Same as MMF_MAP_PRIVATE, except the modification after mapping is
* invisible to the varea
*/
MMF_MAP_PRIVATE_DONT_SYNC = _DEF_FLAG(1),
/**
* @brief [POSIX MAP_FIXED] When MAP_FIXED is set in the flags argument, the

View File

@ -27,7 +27,7 @@ void rt_kmem_list(void) __attribute__((alias("list_kmem")));
static rt_ubase_t rt_pv_offset;
rt_ubase_t rt_kmem_pvoff(void)
const rt_ubase_t rt_kmem_pvoff(void)
{
return rt_pv_offset;
}
@ -78,3 +78,29 @@ void *rt_kmem_v2p(void *vaddr)
{
return rt_hw_mmu_v2p(&rt_kernel_space, vaddr);
}
void *rt_kmem_p2v(void *paddr)
{
char *rc;
char *linear_va;
char *linear_pa;
if (paddr != ARCH_MAP_FAILED)
{
linear_va = (char *)paddr - PV_OFFSET;
linear_pa = rt_kmem_v2p(linear_va);
if (linear_pa != paddr)
{
rc = RT_NULL;
}
else
{
rc = linear_va;
}
}
else
{
rc = RT_NULL;
}
return rc;
}

View File

@ -6,8 +6,13 @@
* Change Logs:
* Date Author Notes
* 2022-11-30 WangXiaoyao the first version
* 2023-08-19 Shell Support varea modification handler
*/
#define DBG_TAG "mm.object"
#define DBG_LVL DBG_INFO
#include "rtdbg.h"
#include <rtthread.h>
#include "mm_aspace.h"
@ -15,15 +20,38 @@
#include "mm_page.h"
#include <mmu.h>
#define DBG_TAG "mm.object"
#define DBG_LVL DBG_INFO
#include "rtdbg.h"
#include <string.h>
#include <stdlib.h>
/** varea based dummy memory object whose data comes directly from page frame */
static const char *get_name(rt_varea_t varea)
{
return "dummy-mapper";
}
static rt_bool_t _varea_pgmgr_frame_is_member(rt_varea_t varea, rt_page_t frame)
{
rt_page_t iter;
rt_bool_t rc = RT_FALSE;
if (varea->frames)
{
iter = varea->frames;
do
{
if (iter == frame)
{
rc = RT_TRUE;
break;
}
iter = iter->next;
} while (iter);
}
return rc;
}
void rt_varea_pgmgr_insert(rt_varea_t varea, void *page_addr)
{
rt_page_t page = rt_page_addr2page(page_addr);
@ -31,10 +59,12 @@ void rt_varea_pgmgr_insert(rt_varea_t varea, void *page_addr)
if (varea->frames == NULL)
{
varea->frames = page;
page->next = NULL;
page->pre = RT_NULL;
page->next = RT_NULL;
}
else
{
page->pre = RT_NULL;
varea->frames->pre = page;
page->next = varea->frames;
varea->frames = page;
@ -52,6 +82,8 @@ void rt_varea_pgmgr_pop_all(rt_varea_t varea)
rt_pages_free(pg_va, 0);
page = next;
}
varea->frames = RT_NULL;
}
void rt_varea_pgmgr_pop(rt_varea_t varea, void *vaddr, rt_size_t size)
@ -60,9 +92,16 @@ void rt_varea_pgmgr_pop(rt_varea_t varea, void *vaddr, rt_size_t size)
while (vaddr != vend)
{
rt_page_t page = rt_page_addr2page(vaddr);
if (_varea_pgmgr_frame_is_member(varea, page))
{
if (page->pre)
page->pre->next = page->next;
if (page->next)
page->next->pre = page->pre;
if (varea->frames == page)
varea->frames = page->next;
rt_pages_free(vaddr, 0);
}
vaddr = (char *)vaddr + ARCH_PAGE_SIZE;
}
}
@ -94,9 +133,149 @@ static void on_varea_close(struct rt_varea *varea)
{
}
static void on_page_offload(rt_varea_t varea, void *vaddr, rt_size_t size)
static rt_err_t on_varea_expand(struct rt_varea *varea, void *new_vaddr, rt_size_t size)
{
rt_varea_pgmgr_pop(varea, vaddr, size);
return RT_EOK;
}
static void _remove_pages(rt_varea_t varea, void *rm_start, void *rm_end)
{
void *page_va;
RT_ASSERT(!((rt_ubase_t)rm_start & ARCH_PAGE_MASK));
RT_ASSERT(!((rt_ubase_t)rm_end & ARCH_PAGE_MASK));
while (rm_start != rm_end)
{
page_va = rt_hw_mmu_v2p(varea->aspace, rm_start);
if (page_va != ARCH_MAP_FAILED)
{
page_va -= PV_OFFSET;
LOG_D("%s: free page %p", __func__, page_va);
rt_varea_unmap_page(varea, rm_start);
rt_varea_pgmgr_pop(varea, page_va, ARCH_PAGE_SIZE);
}
rm_start += ARCH_PAGE_SIZE;
}
}
static rt_err_t on_varea_shrink(rt_varea_t varea, void *new_start, rt_size_t size)
{
char *varea_start = varea->start;
void *rm_start;
void *rm_end;
if (varea_start == (char *)new_start)
{
rm_start = varea_start + size;
rm_end = varea_start + varea->size;
}
else /* if (varea_start < (char *)new_start) */
{
RT_ASSERT(varea_start < (char *)new_start);
rm_start = varea_start;
rm_end = new_start;
}
_remove_pages(varea, rm_start, rm_end);
return RT_EOK;
}
static rt_err_t on_varea_split(struct rt_varea *existed, void *unmap_start, rt_size_t unmap_len, struct rt_varea *subset)
{
void *sub_start = subset->start;
void *sub_end = sub_start + subset->size;
void *page_va;
_remove_pages(existed, unmap_start, (char *)unmap_start + unmap_len);
RT_ASSERT(!((rt_ubase_t)sub_start & ARCH_PAGE_MASK));
RT_ASSERT(!((rt_ubase_t)sub_end & ARCH_PAGE_MASK));
while (sub_start != sub_end)
{
page_va = rt_hw_mmu_v2p(existed->aspace, sub_start);
if (page_va != ARCH_MAP_FAILED)
{
rt_page_t frame;
page_va = rt_kmem_p2v(page_va);
if (page_va)
{
frame = rt_page_addr2page(page_va);
if (frame && _varea_pgmgr_frame_is_member(existed, frame))
{
LOG_D("%s: free page %p", __func__, page_va);
rt_page_ref_inc(page_va, 0);
rt_varea_pgmgr_pop(existed, page_va, ARCH_PAGE_SIZE);
rt_varea_pgmgr_insert(subset, page_va);
}
}
}
sub_start += ARCH_PAGE_SIZE;
}
return RT_EOK;
}
static rt_err_t on_varea_merge(struct rt_varea *merge_to, struct rt_varea *merge_from)
{
/* transport page */
void *mr_start = merge_from->start;
void *mr_end = mr_start + merge_from->size;
void *page_va;
RT_ASSERT(!((rt_ubase_t)mr_start & ARCH_PAGE_MASK));
RT_ASSERT(!((rt_ubase_t)mr_end & ARCH_PAGE_MASK));
while (mr_start != mr_end)
{
page_va = rt_hw_mmu_v2p(merge_from->aspace, mr_start);
if (page_va != ARCH_MAP_FAILED)
{
rt_page_t frame;
page_va = rt_kmem_p2v(page_va);
if (page_va)
{
frame = rt_page_addr2page(page_va);
if (frame && _varea_pgmgr_frame_is_member(merge_from, frame))
{
LOG_D("%s: free page %p", __func__, page_va);
rt_page_ref_inc(page_va, 0);
rt_varea_pgmgr_pop(merge_from, page_va, ARCH_PAGE_SIZE);
rt_varea_pgmgr_insert(merge_to, page_va);
}
}
}
mr_start += ARCH_PAGE_SIZE;
}
return RT_EOK;
}
static void page_read(struct rt_varea *varea, struct rt_aspace_io_msg *msg)
{
char *dst_k;
rt_aspace_t aspace = varea->aspace;
dst_k = rt_hw_mmu_v2p(aspace, msg->fault_vaddr);
if (dst_k != ARCH_MAP_FAILED)
{
RT_ASSERT(!((long)dst_k & ARCH_PAGE_MASK));
dst_k = (void *)((char *)dst_k - PV_OFFSET);
memcpy(msg->buffer_vaddr, dst_k, ARCH_PAGE_SIZE);
msg->response.status = MM_FAULT_STATUS_OK;
}
}
static void page_write(struct rt_varea *varea, struct rt_aspace_io_msg *msg)
{
void *dst_k;
rt_aspace_t aspace = varea->aspace;
dst_k = rt_hw_mmu_v2p(aspace, msg->fault_vaddr);
if (dst_k != ARCH_MAP_FAILED)
{
RT_ASSERT(!((long)dst_k & ARCH_PAGE_MASK));
dst_k = (void *)((char *)dst_k - PV_OFFSET);
memcpy(dst_k, msg->buffer_vaddr, ARCH_PAGE_SIZE);
msg->response.status = MM_FAULT_STATUS_OK;
}
}
struct rt_mem_obj rt_mm_dummy_mapper = {
@ -105,5 +284,12 @@ struct rt_mem_obj rt_mm_dummy_mapper = {
.hint_free = NULL,
.on_varea_open = on_varea_open,
.on_varea_close = on_varea_close,
.on_page_offload = on_page_offload,
.on_varea_shrink = on_varea_shrink,
.on_varea_split = on_varea_split,
.on_varea_expand = on_varea_expand,
.on_varea_merge = on_varea_merge,
.page_write = page_write,
.page_read = page_read,
};

View File

@ -91,10 +91,13 @@ static rt_page_t _trace_head;
#define TRACE_ALLOC(pg, size) _trace_alloc(pg, __builtin_return_address(0), size)
#define TRACE_FREE(pgaddr, size) _trace_free(pgaddr, __builtin_return_address(0), size)
static long _alloc_cnt;
void rt_page_leak_trace_start()
{
// TODO multicore safety
_trace_head = NULL;
_alloc_cnt = 0;
enable = 1;
}
MSH_CMD_EXPORT(rt_page_leak_trace_start, start page leak tracer);
@ -104,17 +107,19 @@ static void _collect()
rt_page_t page = _trace_head;
if (!page)
{
rt_kputs("ok!\n");
LOG_RAW("ok! ALLOC CNT %ld\n", _alloc_cnt);
}
else
{
while (page)
{
rt_page_t next = page->next;
rt_page_t next = page->tl_next;
void *pg_va = rt_page_page2addr(page);
LOG_W("LEAK: %p, allocator: %p, size bits: %lx", pg_va, page->caller, page->trace_size);
rt_pages_free(pg_va, page->trace_size);
page = next;
}
}
}
void rt_page_leak_trace_stop()
@ -129,11 +134,13 @@ static void _trace_alloc(rt_page_t page, void *caller, size_t size_bits)
{
if (enable)
{
char *page_va = rt_page_page2addr(page);
page->caller = caller;
page->trace_size = size_bits;
page->tl_prev = NULL;
page->tl_next = NULL;
_alloc_cnt++;
if (_trace_head == NULL)
{
_trace_head = page;
@ -147,12 +154,12 @@ static void _trace_alloc(rt_page_t page, void *caller, size_t size_bits)
}
}
void _report(rt_page_t page, size_bits, char *msg)
void _report(rt_page_t page, size_t size_bits, char *msg)
{
void *pg_va = rt_page_page2addr(page);
LOG_W("%s: %p, allocator: %p, size bits: %lx", msg, pg_va, page->caller, page->trace_size);
rt_kputs("backtrace\n");
rt_hw_backtrace(0, 0);
LOG_RAW("backtrace\n");
rt_backtrace();
}
static void _trace_free(rt_page_t page, void *caller, size_t size_bits)
@ -162,25 +169,26 @@ static void _trace_free(rt_page_t page, void *caller, size_t size_bits)
/* free after free */
if (page->trace_size == 0xabadcafe)
{
_report("free after free")
_report(page, size_bits, "free after free");
return ;
}
else if (page->trace_size != size_bits)
{
rt_kprintf("free with size bits %lx\n", size_bits);
_report("incompatible size bits parameter");
LOG_RAW("free with size bits %lx\n", size_bits);
_report(page, size_bits, "incompatible size bits parameter");
return ;
}
if (page->ref_cnt == 1)
if (page->ref_cnt == 0)
{
_alloc_cnt--;
if (page->tl_prev)
page->tl_prev->tl_next = page->tl_next;
if (page->tl_next)
page->tl_next->tl_prev = page->tl_prev;
if (page == _trace_head)
_trace_head = page->next;
_trace_head = page->tl_next;
page->tl_prev = NULL;
page->tl_next = NULL;
@ -531,7 +539,7 @@ static rt_page_t *_flag_to_page_list(size_t flags)
return page_list;
}
static void *_do_pages_alloc(rt_uint32_t size_bits, size_t flags)
rt_inline void *_do_pages_alloc(rt_uint32_t size_bits, size_t flags)
{
void *alloc_buf = RT_NULL;
struct rt_page *p;
@ -613,27 +621,27 @@ void list_page(void)
struct rt_page *lp = page_list_low[i];
struct rt_page *hp = page_list_high[i];
rt_kprintf("level %d ", i);
LOG_RAW("level %d ", i);
while (lp)
{
free += (1UL << i);
rt_kprintf("[0x%08p]", rt_page_page2addr(lp));
LOG_RAW("[0x%08p]", rt_page_page2addr(lp));
lp = lp->next;
}
while (hp)
{
free += (1UL << i);
rt_kprintf("[0x%08p]", rt_page_page2addr(hp));
LOG_RAW("[0x%08p]", rt_page_page2addr(hp));
hp = hp->next;
}
rt_kprintf("\n");
LOG_RAW("\n");
}
rt_hw_interrupt_enable(level);
rt_kprintf("-------------------------------\n");
rt_kprintf("Page Summary:\n => free/installed: 0x%lx/0x%lx (%ld/%ld KB)\n", free, installed, PGNR2SIZE(free), PGNR2SIZE(installed));
rt_kprintf("-------------------------------\n");
LOG_RAW("-------------------------------\n");
LOG_RAW("Page Summary:\n => free/installed: 0x%lx/0x%lx (%ld/%ld KB)\n", free, installed, PGNR2SIZE(free), PGNR2SIZE(installed));
LOG_RAW("-------------------------------\n");
}
MSH_CMD_EXPORT(list_page, show page info);

View File

@ -35,7 +35,7 @@
#ifdef RT_DEBUGING_PAGE_LEAK
#define DEBUG_FIELD { \
#define DEBUG_FIELD struct { \
/* trace list */ \
struct rt_page *tl_next; \
struct rt_page *tl_prev; \
@ -50,7 +50,7 @@ DEF_PAGE_T(
struct rt_page *next; /* same level next */
struct rt_page *pre; /* same level pre */
DEBUG_FIELD
DEBUG_FIELD;
rt_uint32_t size_bits; /* if is ARCH_ADDRESS_WIDTH_BITS, means not free */
rt_uint32_t ref_cnt; /* page group ref count */

View File

@ -11,6 +11,10 @@
#define __MM_PRIVATE_H__
#include "mm_aspace.h"
#include "mm_fault.h"
#include "mm_flag.h"
#include "mm_page.h"
#include <rtdef.h>
#include <stddef.h>
@ -29,6 +33,17 @@
* where both start and end are inclusive.
*/
#define VAREA_NOT_STATIC(varea) (!((varea)->flag & MMF_STATIC_ALLOC))
#define VAREA_NAME(varea) \
((!varea->mem_obj || !varea->mem_obj->get_name) \
? "unknow" \
: varea->mem_obj->get_name(varea))
#define VAREA_IS_WRITABLE(varea) \
(rt_hw_mmu_attr_test_perm(varea->attr, \
RT_HW_MMU_PROT_USER | RT_HW_MMU_PROT_WRITE))
#define VAREA_VA_TO_OFFSET(varea, va) \
((varea)->offset + MM_PA_TO_OFF((long)(va) - (long)(varea)->start))
struct _mm_range
{
void *start;
@ -94,6 +109,29 @@ void rt_varea_pgmgr_pop(rt_varea_t varea, void *vaddr, rt_size_t size);
void rt_varea_pgmgr_pop_all(rt_varea_t varea);
int _varea_map_with_msg(rt_varea_t varea, struct rt_aspace_fault_msg *msg);
int rt_varea_fix_private_locked(rt_varea_t ex_varea, void *pa,
struct rt_aspace_fault_msg *msg,
rt_bool_t dont_copy);
int rt_varea_map_with_msg(rt_varea_t varea, struct rt_aspace_fault_msg *msg);
void _varea_uninstall_locked(rt_varea_t varea);
int _mm_aspace_map(rt_aspace_t aspace, rt_varea_t *pvarea, void **addr,
rt_size_t length, rt_size_t attr, mm_flag_t flags,
rt_mem_obj_t mem_obj, rt_size_t offset);
rt_inline rt_bool_t rt_varea_is_private_locked(rt_varea_t varea)
{
rt_base_t flags = varea->flag;
return !!(
(flags & (MMF_MAP_PRIVATE | MMF_MAP_PRIVATE_DONT_SYNC))
&& (varea->aspace->private_object != varea->mem_obj)
);
}
rt_err_t rt_aspace_anon_ref_dec(rt_mem_obj_t aobj);
rt_err_t rt_aspace_page_get_phy(rt_aspace_t aspace, void *page_va, void *buffer);
rt_err_t rt_aspace_page_put_phy(rt_aspace_t aspace, void *page_va, void *buffer);
#endif /* __MM_PRIVATE_H__ */

View File

@ -726,11 +726,11 @@ enum
#define RT_THREAD_CTRL_INFO 0x03 /**< Get thread information. */
#define RT_THREAD_CTRL_BIND_CPU 0x04 /**< Set thread bind cpu. */
#ifdef RT_USING_SMP
#define RT_CPU_DETACHED RT_CPUS_NR /**< The thread not running on cpu. */
#define RT_CPU_MASK ((1 << RT_CPUS_NR) - 1) /**< All CPUs mask bit. */
#ifdef RT_USING_SMP
#ifndef RT_SCHEDULE_IPI
#define RT_SCHEDULE_IPI 0
#endif /* RT_SCHEDULE_IPI */
@ -739,6 +739,17 @@ enum
#define RT_STOP_IPI 1
#endif /* RT_STOP_IPI */
#endif /* RT_USING_SMP */
struct rt_cpu_usage_stats
{
rt_uint64_t user;
rt_uint64_t system;
rt_uint64_t irq;
rt_uint64_t idle;
};
typedef struct rt_cpu_usage_stats *rt_cpu_usage_stats_t;
/**
* CPUs definitions
*
@ -746,7 +757,7 @@ enum
struct rt_cpu
{
struct rt_thread *current_thread;
struct rt_thread *idle_thread;
rt_uint16_t irq_nest;
rt_uint8_t irq_switch_flag;
@ -760,9 +771,11 @@ struct rt_cpu
#endif /* RT_THREAD_PRIORITY_MAX > 32 */
rt_tick_t tick;
#ifdef RT_USING_SMART
struct rt_cpu_usage_stats cpu_stat;
#endif
};
#endif /* RT_USING_SMP */
typedef struct rt_cpu *rt_cpu_t;
struct rt_thread;
@ -936,6 +949,9 @@ struct rt_thread
int exit_request;
int tid;
rt_uint64_t user_time;
rt_uint64_t system_time;
#ifndef ARCH_MM_MMU
lwp_sighandler_t signal_handler[32];
#else
@ -948,11 +964,14 @@ struct rt_thread
int *clear_child_tid;
#endif /* ARCH_MM_MMU */
#endif /* RT_USING_SMART */
rt_ubase_t user_data; /**< private user data beyond this thread */
};
typedef struct rt_thread *rt_thread_t;
#ifdef RT_USING_SMART
#define IS_USER_MODE(t) ((t)->user_ctx.ctx == RT_NULL)
#endif /* RT_USING_SMART */
/**@}*/
/**

View File

@ -127,6 +127,8 @@ void rt_hw_local_irq_enable(rt_base_t level);
#define rt_hw_interrupt_disable rt_cpus_lock
#define rt_hw_interrupt_enable rt_cpus_unlock
#else
#define rt_hw_local_irq_disable rt_hw_interrupt_disable
#define rt_hw_local_irq_enable rt_hw_interrupt_enable
rt_base_t rt_hw_interrupt_disable(void);
void rt_hw_interrupt_enable(rt_base_t level);
#endif /*RT_USING_SMP*/

View File

@ -624,11 +624,11 @@ void rt_interrupt_leave(void);
rt_base_t rt_cpus_lock(void);
void rt_cpus_unlock(rt_base_t level);
#endif /* RT_USING_SMP */
struct rt_cpu *rt_cpu_self(void);
struct rt_cpu *rt_cpu_index(int index);
#endif /* RT_USING_SMP */
/*
* the number of nested interrupts.
*/

View File

@ -13,10 +13,6 @@
#include <rtthread.h>
#include <stdbool.h>
#ifndef RT_CPUS_NR
#define RT_CPUS_NR 1
#endif /* RT_CPUS_NR */
#ifdef RT_USING_SMP
struct cpu_ops_t
{

View File

@ -54,6 +54,10 @@ static void data_abort(unsigned long far, unsigned long iss)
rt_kprintf("Translation fault, third level\n");
break;
case 0b001000:
rt_kprintf("Access flag fault, zeroth level\n");
break;
case 0b001001:
rt_kprintf("Access flag fault, first level\n");
break;
@ -66,6 +70,10 @@ static void data_abort(unsigned long far, unsigned long iss)
rt_kprintf("Access flag fault, third level\n");
break;
case 0b001100:
rt_kprintf("Permission fault, zeroth level\n");
break;
case 0b001101:
rt_kprintf("Permission fault, first level\n");
break;
@ -148,7 +156,7 @@ static void data_abort(unsigned long far, unsigned long iss)
}
}
void process_exception(unsigned long esr, unsigned long epc)
void print_exception(unsigned long esr, unsigned long epc)
{
rt_uint8_t ec;
rt_uint32_t iss;

View File

@ -204,7 +204,7 @@ static int _kernel_map_2M(unsigned long *lv0_tbl, void *vaddr, void *paddr, unsi
{
return MMU_MAP_ERROR_VANOTALIGN;
}
if (pa & ARCH_SECTION_MASK)
if (pa & ARCH_PAGE_MASK)
{
return MMU_MAP_ERROR_PANOTALIGN;
}
@ -556,7 +556,7 @@ static int _map_single_page_2M(unsigned long *lv0_tbl, unsigned long va,
{
return MMU_MAP_ERROR_VANOTALIGN;
}
if (pa & ARCH_SECTION_MASK)
if (pa & ARCH_PAGE_MASK)
{
return MMU_MAP_ERROR_PANOTALIGN;
}
@ -803,8 +803,8 @@ void rt_hw_mem_setup_early(unsigned long *tbl0, unsigned long *tbl1,
#ifdef RT_USING_SMART
unsigned long va = KERNEL_VADDR_START;
#else
extern unsigned char _start;
unsigned long va = (unsigned long) &_start;
extern unsigned char __start;
unsigned long va = (unsigned long) &__start;
va = RT_ALIGN_DOWN(va, 0x200000);
#endif
@ -826,3 +826,22 @@ void rt_hw_mem_setup_early(unsigned long *tbl0, unsigned long *tbl1,
while (1);
}
}
void *rt_hw_mmu_pgtbl_create(void)
{
size_t *mmu_table;
mmu_table = (size_t *)rt_pages_alloc_ext(0, PAGE_ANY_AVAILABLE);
if (!mmu_table)
{
return RT_NULL;
}
memset(mmu_table, 0, ARCH_PAGE_SIZE);
rt_hw_cpu_dcache_ops(RT_HW_CACHE_FLUSH, mmu_table, ARCH_PAGE_SIZE);
return mmu_table;
}
void rt_hw_mmu_pgtbl_delete(void *pgtbl)
{
rt_pages_free(pgtbl, 0);
}

View File

@ -1,11 +1,12 @@
/*
* Copyright (c) 2006-2018, RT-Thread Development Team
* Copyright (c) 2006-2023, RT-Thread Development Team
*
* SPDX-License-Identifier: Apache-2.0
*
* Change Logs:
* Date Author Notes
* 2021-05-12 RT-Thread the first version
* 2023-08-15 Shell Support more mapping attribution
*/
#ifndef __MMU_H_
#define __MMU_H_
@ -29,27 +30,42 @@ struct mem_desc
struct rt_varea varea;
};
enum rt_hw_mmu_prot_t {
RT_HW_MMU_PROT_READ,
RT_HW_MMU_PROT_WRITE,
RT_HW_MMU_PROT_EXECUTE,
RT_HW_MMU_PROT_KERNEL,
RT_HW_MMU_PROT_USER,
RT_HW_MMU_PROT_CACHE,
};
#define MMU_AF_SHIFT 10
#define MMU_SHARED_SHIFT 8
#define MMU_AP_SHIFT 6
#define MMU_MA_SHIFT 2
#define MMU_AP_MASK (0x3 << MMU_AP_SHIFT)
#define MMU_AP_KAUN 0UL /* kernel r/w, user none */
#define MMU_AP_KAUA 1UL /* kernel r/w, user r/w */
#define MMU_AP_KRUN 2UL /* kernel r, user none */
#define MMU_AP_KRUR 3UL /* kernel r, user r */
#define MMU_ATTR_AF (1ul << MMU_AF_SHIFT) /* the access flag */
#define MMU_ATTR_DBM (1ul << 51) /* the dirty bit modifier */
#define MMU_MAP_CUSTOM(ap, mtype) \
((0x1UL << MMU_AF_SHIFT) | (0x2UL << MMU_SHARED_SHIFT) | \
((ap) << MMU_AP_SHIFT) | ((mtype) << MMU_MA_SHIFT))
#define MMU_MAP_K_RO MMU_MAP_CUSTOM(MMU_AP_KRUN, NORMAL_MEM)
#define MMU_MAP_K_ROCB MMU_MAP_CUSTOM(MMU_AP_KRUN, NORMAL_MEM)
#define MMU_MAP_K_RO MMU_MAP_CUSTOM(MMU_AP_KRUN, NORMAL_NOCACHE_MEM)
#define MMU_MAP_K_RWCB MMU_MAP_CUSTOM(MMU_AP_KAUN, NORMAL_MEM)
#define MMU_MAP_K_RW MMU_MAP_CUSTOM(MMU_AP_KAUN, NORMAL_NOCACHE_MEM)
#define MMU_MAP_K_DEVICE MMU_MAP_CUSTOM(MMU_AP_KAUN, DEVICE_MEM)
#define MMU_MAP_U_ROCB MMU_MAP_CUSTOM(MMU_AP_KRUR, NORMAL_MEM)
#define MMU_MAP_U_RO MMU_MAP_CUSTOM(MMU_AP_KRUR, NORMAL_NOCACHE_MEM)
#define MMU_MAP_U_RWCB MMU_MAP_CUSTOM(MMU_AP_KAUA, NORMAL_MEM)
#define MMU_MAP_U_RW MMU_MAP_CUSTOM(MMU_AP_KAUA, NORMAL_NOCACHE_MEM)
#define MMU_MAP_U_DEVICE MMU_MAP_CUSTOM(MMU_AP_KAUA, DEVICE_MEM)
#define MMU_MAP_TRACE(attr) ((attr) & ~(MMU_ATTR_AF | MMU_ATTR_DBM))
#define ARCH_SECTION_SHIFT 21
#define ARCH_SECTION_SIZE (1 << ARCH_SECTION_SHIFT)
@ -88,7 +104,8 @@ void rt_hw_aspace_switch(struct rt_aspace *aspace);
void *rt_hw_mmu_v2p(struct rt_aspace *aspace, void *vaddr);
void rt_hw_mmu_kernel_map_init(struct rt_aspace *aspace, rt_size_t vaddr_start,
rt_size_t size);
void rt_hw_mmu_ktbl_set(unsigned long tbl);
void *rt_hw_mmu_pgtbl_create(void);
void rt_hw_mmu_pgtbl_delete(void *pgtbl);
static inline void *rt_hw_mmu_tbl_get()
{
@ -101,8 +118,8 @@ static inline void *rt_hw_mmu_kernel_v2p(void *v_addr)
{
rt_ubase_t par;
void *paddr;
asm volatile("at s1e1w, %0"::"r"(v_addr):"memory");
asm volatile("mrs %0, par_el1":"=r"(par)::"memory");
__asm__ volatile("at s1e1w, %0"::"r"(v_addr):"memory");
__asm__ volatile("mrs %0, par_el1":"=r"(par)::"memory");
if (par & 0x1)
{
@ -118,6 +135,73 @@ static inline void *rt_hw_mmu_kernel_v2p(void *v_addr)
return paddr;
}
/**
* @brief Add permission from attribution
*
* @param attr architecture specified mmu attribution
* @param prot protect that will be added
* @return size_t returned attribution
*/
rt_inline size_t rt_hw_mmu_attr_add_perm(size_t attr, enum rt_hw_mmu_prot_t prot)
{
switch (prot)
{
/* remove write permission for user */
case RT_HW_MMU_PROT_WRITE | RT_HW_MMU_PROT_USER:
attr = (attr & ~MMU_AP_MASK) | (MMU_AP_KAUA << MMU_AP_SHIFT);
break;
default:
RT_ASSERT(0);
}
return attr;
}
/**
* @brief Remove permission from attribution
*
* @param attr architecture specified mmu attribution
* @param prot protect that will be removed
* @return size_t returned attribution
*/
rt_inline size_t rt_hw_mmu_attr_rm_perm(size_t attr, enum rt_hw_mmu_prot_t prot)
{
switch (prot)
{
/* remove write permission for user */
case RT_HW_MMU_PROT_WRITE | RT_HW_MMU_PROT_USER:
if (attr & 0x40)
attr |= 0x80;
break;
default:
RT_ASSERT(0);
}
return attr;
}
/**
* @brief Test permission from attribution
*
* @param attr architecture specified mmu attribution
* @param prot protect that will be test
* @return rt_bool_t RT_TRUE if the prot is allowed, otherwise RT_FALSE
*/
rt_inline rt_bool_t rt_hw_mmu_attr_test_perm(size_t attr, enum rt_hw_mmu_prot_t prot)
{
rt_bool_t rc;
switch (prot)
{
/* test write permission for user */
case RT_HW_MMU_PROT_WRITE | RT_HW_MMU_PROT_USER:
if ((attr & MMU_AP_MASK) == (MMU_AP_KAUA << MMU_AP_SHIFT))
rc = RT_TRUE;
else
rc = RT_FALSE;
break;
default:
RT_ASSERT(0);
}
return rc;
}
int rt_hw_mmu_control(struct rt_aspace *aspace, void *vaddr, size_t size,
enum rt_mmu_cntl cmd);

View File

@ -18,6 +18,10 @@
#include <backtrace.h>
#define DBG_TAG "libcpu.trap"
#define DBG_LVL DBG_LOG
#include <rtdbg.h>
void rt_unwind(struct rt_hw_exp_stack *regs, int pc_adj)
{
}
@ -40,7 +44,7 @@ static void _check_fault(struct rt_hw_exp_stack *regs, uint32_t pc_adj, char *in
if ((mode & 0x1f) == 0x00)
{
rt_kprintf("%s! pc = 0x%08x\n", info, regs->pc - pc_adj);
rt_kprintf("%s! pc = 0x%x\n", info, regs->pc - pc_adj);
/* user stack backtrace */
#ifdef RT_USING_LWP
@ -79,7 +83,7 @@ static void _check_fault(struct rt_hw_exp_stack *regs, uint32_t pc_adj, char *in
}
}
int _get_type(unsigned long esr)
rt_inline int _get_type(unsigned long esr)
{
int ret;
int fsc = esr & 0x3f;
@ -91,19 +95,31 @@ int _get_type(unsigned long esr)
case 0x7:
ret = MM_FAULT_TYPE_PAGE_FAULT;
break;
case 0xc:
case 0xd:
case 0xe:
case 0xf:
ret = MM_FAULT_TYPE_ACCESS_FAULT;
break;
case 0x8:
case 0x9:
case 0xa:
case 0xb:
ret = MM_FAULT_TYPE_ACCESS_FAULT;
break;
/* access flag fault */
default:
ret = MM_FAULT_TYPE_GENERIC;
}
return ret;
}
int check_user_stack(unsigned long esr, struct rt_hw_exp_stack *regs)
rt_inline long _irq_is_disable(long cpsr)
{
return !!(cpsr & 0x80);
}
static int user_fault_fixable(unsigned long esr, struct rt_hw_exp_stack *regs)
{
rt_ubase_t level;
unsigned char ec;
void *dfar;
int ret = 0;
@ -130,20 +146,24 @@ int check_user_stack(unsigned long esr, struct rt_hw_exp_stack *regs)
break;
}
if (fault_op)
/* page fault exception only allow from user space */
lwp = lwp_self();
if (lwp && fault_op)
{
asm volatile("mrs %0, far_el1":"=r"(dfar));
__asm__ volatile("mrs %0, far_el1":"=r"(dfar));
struct rt_aspace_fault_msg msg = {
.fault_op = fault_op,
.fault_type = fault_type,
.fault_vaddr = dfar,
};
lwp = lwp_self();
RT_ASSERT(lwp);
lwp_user_setting_save(rt_thread_self());
__asm__ volatile("mrs %0, daif\nmsr daifclr, 0x3\nisb\n":"=r"(level));
if (rt_aspace_fault_try_fix(lwp->aspace, &msg))
{
ret = 1;
}
__asm__ volatile("msr daif, %0\nisb\n"::"r"(level));
}
return ret;
}
@ -269,6 +289,12 @@ void rt_hw_trap_irq(void)
#endif
}
#ifdef RT_USING_SMART
#define DBG_CHECK_EVENT(regs, esr) dbg_check_event(regs, esr)
#else
#define DBG_CHECK_EVENT(regs, esr) (0)
#endif
void rt_hw_trap_fiq(void)
{
void *param;
@ -292,7 +318,7 @@ void rt_hw_trap_fiq(void)
rt_hw_interrupt_ack(ir);
}
void process_exception(unsigned long esr, unsigned long epc);
void print_exception(unsigned long esr, unsigned long epc);
void SVC_Handler(struct rt_hw_exp_stack *regs);
void rt_hw_trap_exception(struct rt_hw_exp_stack *regs)
{
@ -302,27 +328,43 @@ void rt_hw_trap_exception(struct rt_hw_exp_stack *regs)
asm volatile("mrs %0, esr_el1":"=r"(esr));
ec = (unsigned char)((esr >> 26) & 0x3fU);
#ifdef RT_USING_LWP
if (dbg_check_event(regs, esr))
if (DBG_CHECK_EVENT(regs, esr))
{
return;
}
else
#endif
if (ec == 0x15) /* is 64bit syscall ? */
else if (ec == 0x15) /* is 64bit syscall ? */
{
SVC_Handler(regs);
/* never return here */
}
#ifdef RT_USING_LWP
if (check_user_stack(esr, regs))
#ifdef RT_USING_SMART
/**
* Note: check_user_stack will take lock and it will possibly be a dead-lock
* if exception comes from kernel.
*/
if ((regs->cpsr & 0x1f) == 0)
{
if (user_fault_fixable(esr, regs))
return;
}
else
{
if (_irq_is_disable(regs->cpsr))
{
LOG_E("Kernel fault from interrupt/critical section");
}
if (rt_critical_level() != 0)
{
LOG_E("scheduler is not available");
}
else if (user_fault_fixable(esr, regs))
return;
}
#endif
process_exception(esr, regs->pc);
print_exception(esr, regs->pc);
rt_hw_show_register(regs);
rt_kprintf("current: %s\n", rt_thread_self()->parent.name);
LOG_E("current thread: %s\n", rt_thread_self()->parent.name);
#ifdef RT_USING_FINSH
list_thread();

View File

@ -38,8 +38,8 @@ boot_arg1 .req x23
boot_arg2 .req x24
stack_top .req x25
.global _start
_start:
.global __start
__start:
/*
* Boot CPU general-purpose register settings:
* x0 = physical address of device tree blob (dtb) in system RAM.
@ -165,8 +165,8 @@ _start:
dsb sy
#ifdef RT_USING_SMART
ldr x2, =_start
GET_PHY x3, _start
ldr x2, =__start
GET_PHY x3, __start
sub x3, x3, x2
#else
mov x3,0

View File

@ -1,11 +1,12 @@
/*
* Copyright (c) 2006-2021, RT-Thread Development Team
* Copyright (c) 2006-2023, RT-Thread Development Team
*
* SPDX-License-Identifier: Apache-2.0
*
* Change Logs:
* Date Author Notes
* 2012-01-10 bernard porting to AM1808
* 2023-10-10 Shell Add permission control API
*/
#include <rthw.h>
@ -440,3 +441,25 @@ int rt_hw_mmu_control(struct rt_aspace *aspace, void *vaddr, size_t size,
{
return -RT_ENOSYS;
}
#define KPTE_START (KERNEL_VADDR_START >> ARCH_SECTION_SHIFT)
void *rt_hw_mmu_pgtbl_create(void)
{
size_t *mmu_table;
mmu_table = (size_t *)rt_pages_alloc_ext(2, PAGE_ANY_AVAILABLE);
if (!mmu_table)
{
return RT_NULL;
}
rt_memcpy(mmu_table + KPTE_START, (size_t *)rt_kernel_space.page_table + KPTE_START, ARCH_PAGE_SIZE);
rt_memset(mmu_table, 0, 3 * ARCH_PAGE_SIZE);
rt_hw_cpu_dcache_ops(RT_HW_CACHE_FLUSH, mmu_table, 4 * ARCH_PAGE_SIZE);
return mmu_table;
}
void rt_hw_mmu_pgtbl_delete(void *pgtbl)
{
rt_pages_free(pgtbl, 2);
}

View File

@ -1,11 +1,12 @@
/*
* Copyright (c) 2006-2021, RT-Thread Development Team
* Copyright (c) 2006-2023, RT-Thread Development Team
*
* SPDX-License-Identifier: Apache-2.0
*
* Change Logs:
* Date Author Notes
* 2019-03-25 quanzhao the first version
* 2023-10-10 Shell Add permission control API
*/
#ifndef __MMU_H_
#define __MMU_H_
@ -25,7 +26,7 @@
#define AP_RO ((1<<10) |(1 << 15)) /* supervisor=RW, user=No */
#else
#define AP_RW (3<<10) /* supervisor=RW, user=RW */
#define AP_RO ((2<<10) /* supervisor=RW, user=RO */
#define AP_RO (2<<10) /* supervisor=RW, user=RO */
#endif
#define SHARED (1<<16) /* shareable */
@ -65,14 +66,17 @@ struct mem_desc
#define MMU_MAP_MTBL_SHARE (1<<10)
#define MMU_MAP_MTBL_NG(x) (x<<11)
#define MMU_MAP_K_RO (MMU_MAP_MTBL_NG(0))|(MMU_MAP_MTBL_A|MMU_MAP_MTBL_AP2(1)|MMU_MAP_MTBL_AP01(1)|MMU_MAP_MTBL_TEX(0)|MMU_MAP_MTBL_C|MMU_MAP_MTBL_SHARE)
#define MMU_MAP_K_RWCB (MMU_MAP_MTBL_NG(0))|(MMU_MAP_MTBL_A|MMU_MAP_MTBL_AP2(0)|MMU_MAP_MTBL_AP01(1)|MMU_MAP_MTBL_TEX(0)|MMU_MAP_MTBL_B|MMU_MAP_MTBL_C|MMU_MAP_MTBL_SHARE)
#define MMU_MAP_K_RW (MMU_MAP_MTBL_NG(0))|(MMU_MAP_MTBL_A|MMU_MAP_MTBL_AP2(0)|MMU_MAP_MTBL_AP01(1)|MMU_MAP_MTBL_TEX(0)|MMU_MAP_MTBL_SHARE)
#define MMU_MAP_K_DEVICE (MMU_MAP_MTBL_NG(0))|(MMU_MAP_MTBL_A|MMU_MAP_MTBL_AP2(0)|MMU_MAP_MTBL_AP01(1)|MMU_MAP_MTBL_TEX(0)|MMU_MAP_MTBL_B|MMU_MAP_MTBL_SHARE)
#define MMU_MAP_U_RO (MMU_MAP_MTBL_NG(1))|(MMU_MAP_MTBL_A|MMU_MAP_MTBL_AP2(0)|MMU_MAP_MTBL_AP01(2)|MMU_MAP_MTBL_TEX(0)|MMU_MAP_MTBL_C|MMU_MAP_MTBL_SHARE)
#define MMU_MAP_U_RWCB (MMU_MAP_MTBL_NG(1))|(MMU_MAP_MTBL_A|MMU_MAP_MTBL_AP2(0)|MMU_MAP_MTBL_AP01(3)|MMU_MAP_MTBL_TEX(0)|MMU_MAP_MTBL_B|MMU_MAP_MTBL_C|MMU_MAP_MTBL_SHARE)
#define MMU_MAP_U_RW (MMU_MAP_MTBL_NG(1))|(MMU_MAP_MTBL_A|MMU_MAP_MTBL_AP2(0)|MMU_MAP_MTBL_AP01(3)|MMU_MAP_MTBL_TEX(0)|MMU_MAP_MTBL_SHARE)
#define MMU_MAP_U_DEVICE (MMU_MAP_MTBL_NG(1))|(MMU_MAP_MTBL_A|MMU_MAP_MTBL_AP2(0)|MMU_MAP_MTBL_AP01(3)|MMU_MAP_MTBL_TEX(0)|MMU_MAP_MTBL_B|MMU_MAP_MTBL_SHARE)
#define MMU_MAP_K_ROCB ((MMU_MAP_MTBL_NG(0))|(MMU_MAP_MTBL_A|MMU_MAP_MTBL_AP2(1)|MMU_MAP_MTBL_AP01(1)|MMU_MAP_MTBL_TEX(0)|MMU_MAP_MTBL_B|MMU_MAP_MTBL_C|MMU_MAP_MTBL_SHARE))
#define MMU_MAP_K_RO ((MMU_MAP_MTBL_NG(0))|(MMU_MAP_MTBL_A|MMU_MAP_MTBL_AP2(1)|MMU_MAP_MTBL_AP01(1)|MMU_MAP_MTBL_TEX(0)|MMU_MAP_MTBL_C|MMU_MAP_MTBL_SHARE))
#define MMU_MAP_K_RWCB ((MMU_MAP_MTBL_NG(0))|(MMU_MAP_MTBL_A|MMU_MAP_MTBL_AP2(0)|MMU_MAP_MTBL_AP01(1)|MMU_MAP_MTBL_TEX(0)|MMU_MAP_MTBL_B|MMU_MAP_MTBL_C|MMU_MAP_MTBL_SHARE))
#define MMU_MAP_K_RW ((MMU_MAP_MTBL_NG(0))|(MMU_MAP_MTBL_A|MMU_MAP_MTBL_AP2(0)|MMU_MAP_MTBL_AP01(1)|MMU_MAP_MTBL_TEX(0)|MMU_MAP_MTBL_SHARE))
#define MMU_MAP_K_DEVICE ((MMU_MAP_MTBL_NG(0))|(MMU_MAP_MTBL_A|MMU_MAP_MTBL_AP2(0)|MMU_MAP_MTBL_AP01(1)|MMU_MAP_MTBL_TEX(0)|MMU_MAP_MTBL_B|MMU_MAP_MTBL_SHARE))
#define MMU_MAP_U_ROCB ((MMU_MAP_MTBL_NG(1))|(MMU_MAP_MTBL_A|MMU_MAP_MTBL_AP2(0)|MMU_MAP_MTBL_AP01(2)|MMU_MAP_MTBL_TEX(0)|MMU_MAP_MTBL_B|MMU_MAP_MTBL_C|MMU_MAP_MTBL_SHARE))
#define MMU_MAP_U_RO ((MMU_MAP_MTBL_NG(1))|(MMU_MAP_MTBL_A|MMU_MAP_MTBL_AP2(0)|MMU_MAP_MTBL_AP01(2)|MMU_MAP_MTBL_TEX(0)|MMU_MAP_MTBL_C|MMU_MAP_MTBL_SHARE))
#define MMU_MAP_U_RWCB ((MMU_MAP_MTBL_NG(1))|(MMU_MAP_MTBL_A|MMU_MAP_MTBL_AP2(0)|MMU_MAP_MTBL_AP01(3)|MMU_MAP_MTBL_TEX(0)|MMU_MAP_MTBL_B|MMU_MAP_MTBL_C|MMU_MAP_MTBL_SHARE))
#define MMU_MAP_U_RW ((MMU_MAP_MTBL_NG(1))|(MMU_MAP_MTBL_A|MMU_MAP_MTBL_AP2(0)|MMU_MAP_MTBL_AP01(3)|MMU_MAP_MTBL_TEX(0)|MMU_MAP_MTBL_SHARE))
#define MMU_MAP_U_DEVICE ((MMU_MAP_MTBL_NG(1))|(MMU_MAP_MTBL_A|MMU_MAP_MTBL_AP2(0)|MMU_MAP_MTBL_AP01(3)|MMU_MAP_MTBL_TEX(0)|MMU_MAP_MTBL_B|MMU_MAP_MTBL_SHARE))
#define MMU_MAP_TRACE(attr) (attr)
#define ARCH_SECTION_SHIFT 20
#define ARCH_SECTION_SIZE (1 << ARCH_SECTION_SHIFT)
@ -97,6 +101,15 @@ struct mem_desc
*/
#define ARCH_MAP_FAILED ((void *)-1)
enum rt_hw_mmu_prot_t {
RT_HW_MMU_PROT_READ,
RT_HW_MMU_PROT_WRITE,
RT_HW_MMU_PROT_EXECUTE,
RT_HW_MMU_PROT_KERNEL,
RT_HW_MMU_PROT_USER,
RT_HW_MMU_PROT_CACHE,
};
int rt_hw_mmu_ioremap_init(struct rt_aspace *aspace, void *v_address, size_t size);
void rt_hw_init_mmu_table(struct mem_desc *mdesc, rt_uint32_t size);
@ -115,4 +128,90 @@ void *rt_hw_mmu_tbl_get();
int rt_hw_mmu_control(struct rt_aspace *aspace, void *vaddr, size_t size, enum rt_mmu_cntl cmd);
void *rt_hw_mmu_pgtbl_create(void);
void rt_hw_mmu_pgtbl_delete(void *pgtbl);
#define AP_APX_MASK (MMU_MAP_MTBL_AP2(0x1) | MMU_MAP_MTBL_AP01(0x3))
#define AP_APX_URW_KRW (MMU_MAP_MTBL_AP2(0x0) | MMU_MAP_MTBL_AP01(0x3))
#define AP_APX_URO_KRO (MMU_MAP_MTBL_AP2(0x1) | MMU_MAP_MTBL_AP01(0x2))
/**
* @brief Remove permission from attribution
*
* @param attr architecture specified mmu attribution
* @param prot protect that will be removed
* @return size_t returned attribution
*/
rt_inline size_t rt_hw_mmu_attr_rm_perm(size_t attr, enum rt_hw_mmu_prot_t prot)
{
switch (prot)
{
/* remove write permission for user */
case RT_HW_MMU_PROT_WRITE | RT_HW_MMU_PROT_USER:
if ((attr & AP_APX_MASK) == AP_APX_URW_KRW)
attr &= ~MMU_MAP_MTBL_AP01(0x1);
break;
case RT_HW_MMU_PROT_WRITE | RT_HW_MMU_PROT_KERNEL:
switch (attr & AP_APX_MASK)
{
case MMU_MAP_MTBL_AP01(0):
break;
case MMU_MAP_MTBL_AP01(3):
attr = (attr & AP_APX_MASK) | AP_APX_URO_KRO;
default:
attr |= MMU_MAP_MTBL_AP2(0x1);
break;
}
break;
default:
RT_ASSERT(0);
}
return attr;
}
/**
* @brief Add permission from attribution
*
* @param attr architecture specified mmu attribution
* @param prot protect that will be added
* @return size_t returned attribution
*/
rt_inline size_t rt_hw_mmu_attr_add_perm(size_t attr, enum rt_hw_mmu_prot_t prot)
{
switch (prot)
{
/* add write permission for user */
case RT_HW_MMU_PROT_WRITE | RT_HW_MMU_PROT_USER:
case RT_HW_MMU_PROT_WRITE | RT_HW_MMU_PROT_KERNEL:
attr |= MMU_MAP_MTBL_AP01(0x3);
attr &= ~MMU_MAP_MTBL_AP2(0x1);
break;
default:
RT_ASSERT(0);
}
return attr;
}
/**
* @brief Test permission from attribution
*
* @param attr architecture specified mmu attribution
* @param prot protect that will be test
* @return rt_bool_t RT_TRUE if the prot is allowed, otherwise RT_FALSE
*/
rt_inline rt_bool_t rt_hw_mmu_attr_test_perm(size_t attr, enum rt_hw_mmu_prot_t prot)
{
rt_bool_t rc = 0;
switch (prot)
{
/* test write permission for user */
case RT_HW_MMU_PROT_WRITE | RT_HW_MMU_PROT_USER:
rc = (AP_APX_MASK & attr) == (AP_APX_URW_KRW);
break;
default:
RT_ASSERT(0);
}
return rc;
}
#endif

View File

@ -8,14 +8,16 @@
* 2013-07-20 Bernard first version
*/
#include <backtrace.h>
#include <board.h>
#include <rthw.h>
#include <rtthread.h>
#include <board.h>
#include <backtrace.h>
#include "interrupt.h"
#include "mm_fault.h"
#include <rtdbg.h>
#ifdef RT_USING_FINSH
extern long list_thread(void);
#endif
@ -42,14 +44,14 @@ void check_user_fault(struct rt_hw_exp_stack *regs, uint32_t pc_adj, char *info)
}
}
int check_user_stack(struct rt_hw_exp_stack *regs)
int check_data_abort(struct rt_hw_exp_stack *regs)
{
void *dfar = RT_NULL;
struct rt_lwp *lwp;
asm volatile("MRC p15, 0, %0, c6, c0, 0" : "=r"(dfar));
void *dfar = RT_NULL;
rt_base_t dfsr = RT_NULL;
__asm__ volatile("mrc p15, 0, %0, c6, c0, 0" : "=r"(dfar));
__asm__ volatile("mrc p15, 0, %0, c5, c0, 0" : "=r"(dfsr));
if ((dfar >= (void *)USER_STACK_VSTART) && (dfar < (void *)USER_STACK_VEND))
{
struct rt_aspace_fault_msg msg = {
.fault_op = MM_FAULT_OP_WRITE,
.fault_type = MM_FAULT_TYPE_PAGE_FAULT,
@ -61,6 +63,28 @@ int check_user_stack(struct rt_hw_exp_stack *regs)
regs->pc -= 8;
return 1;
}
return 0;
}
int check_prefetch_abort(struct rt_hw_exp_stack *regs)
{
struct rt_lwp *lwp;
void *ifar = RT_NULL;
rt_base_t ifsr = RT_NULL;
__asm__ volatile("mrc p15, 0, %0, c6, c0, 2" : "=r"(ifar));
__asm__ volatile("mrc p15, 0, %0, c5, c0, 1" : "=r"(ifsr));
struct rt_aspace_fault_msg msg = {
.fault_op = MM_FAULT_OP_READ,
.fault_type = MM_FAULT_TYPE_PAGE_FAULT,
.fault_vaddr = ifar,
};
lwp = lwp_self();
if (lwp && rt_aspace_fault_try_fix(lwp->aspace, &msg))
{
regs->pc -= 4;
return 1;
}
return 0;
@ -185,6 +209,10 @@ void rt_hw_trap_pabt(struct rt_hw_exp_stack *regs)
{
return;
}
if (check_prefetch_abort(regs))
{
return;
}
check_user_fault(regs, 4, "User prefetch abort");
#endif
rt_unwind(regs, 4);
@ -211,7 +239,7 @@ void rt_hw_trap_dabt(struct rt_hw_exp_stack *regs)
{
return;
}
if (check_user_stack(regs))
if (check_data_abort(regs))
{
return;
}

View File

@ -7,6 +7,7 @@
* Date Author Notes
* 2021-01-30 lizhirui first version
* 2022-12-13 WangXiaoyao Port to new mm
* 2023-10-12 Shell Add permission control API
*/
#include <rtthread.h>
@ -567,3 +568,22 @@ void rt_hw_mmu_kernel_map_init(rt_aspace_t aspace, rt_size_t vaddr_start, rt_siz
rt_hw_tlb_invalidate_all_local();
}
void *rt_hw_mmu_pgtbl_create(void)
{
size_t *mmu_table;
mmu_table = (rt_ubase_t *)rt_pages_alloc_ext(0, PAGE_ANY_AVAILABLE);
if (!mmu_table)
{
return -RT_ENOMEM;
}
rt_memcpy(mmu_table, rt_kernel_space.page_table, ARCH_PAGE_SIZE);
rt_hw_cpu_dcache_ops(RT_HW_CACHE_FLUSH, mmu_table, ARCH_PAGE_SIZE);
return mmu_table;
}
void rt_hw_mmu_pgtbl_delete(void *pgtbl)
{
rt_pages_free(pgtbl, 0);
}

View File

@ -6,6 +6,7 @@
* Change Logs:
* Date Author Notes
* 2021-01-30 lizhirui first version
* 2023-10-12 Shell Add permission control API
*/
#ifndef __MMU_H__
@ -70,4 +71,7 @@ void *rt_hw_mmu_v2p(rt_aspace_t aspace, void *vaddr);
int rt_hw_mmu_control(struct rt_aspace *aspace, void *vaddr, size_t size,
enum rt_mmu_cntl cmd);
void *rt_hw_mmu_pgtbl_create(void);
void rt_hw_mmu_pgtbl_delete(void *pgtbl);
#endif

View File

@ -7,6 +7,7 @@
* Date Author Notes
* 2021-01-30 lizhirui first version
* 2021-05-03 lizhirui porting to c906
* 2023-10-12 Shell Add permission control API
*/
#ifndef __RISCV_MMU_H__
@ -90,8 +91,10 @@
#define MMU_MAP_K_RWCB PTE_WRAP(PAGE_ATTR_CB | PTE_G | PAGE_ATTR_RWX | PTE_V)
#define MMU_MAP_K_RW PTE_WRAP(PTE_G | PAGE_ATTR_RWX | PTE_V)
#define MMU_MAP_U_RWCB PTE_WRAP(PAGE_ATTR_CB | PTE_U | PAGE_ATTR_RWX | PTE_V)
#define MMU_MAP_U_ROCB PTE_WRAP(PAGE_ATTR_CB | PTE_U | PAGE_ATTR_READONLY | PTE_V)
#define MMU_MAP_U_RWCB_XN PTE_WRAP(PAGE_ATTR_CB | PTE_U | PAGE_ATTR_XN | PTE_V)
#define MMU_MAP_U_RW PTE_WRAP(PTE_U | PAGE_ATTR_RWX | PTE_V)
#define MMU_MAP_TRACE(attr) (attr)
#define PTE_XWR_MASK 0xe
@ -108,4 +111,71 @@ void mmu_set_pagetable(rt_ubase_t addr);
void mmu_enable_user_page_access();
void mmu_disable_user_page_access();
enum rt_hw_mmu_prot_t {
RT_HW_MMU_PROT_READ,
RT_HW_MMU_PROT_WRITE,
RT_HW_MMU_PROT_EXECUTE,
RT_HW_MMU_PROT_KERNEL,
RT_HW_MMU_PROT_USER,
RT_HW_MMU_PROT_CACHE,
};
/**
* @brief Remove permission from attribution
*
* @param attr architecture specified mmu attribution
* @param prot protect that will be removed
* @return size_t returned attribution
*/
rt_inline size_t rt_hw_mmu_attr_rm_perm(size_t attr, enum rt_hw_mmu_prot_t prot)
{
switch (prot)
{
/* remove write permission for user */
case RT_HW_MMU_PROT_WRITE | RT_HW_MMU_PROT_USER:
default:
RT_ASSERT(0);
}
return attr;
}
/**
* @brief Add permission from attribution
*
* @param attr architecture specified mmu attribution
* @param prot protect that will be added
* @return size_t returned attribution
*/
rt_inline size_t rt_hw_mmu_attr_add_perm(size_t attr, enum rt_hw_mmu_prot_t prot)
{
switch (prot)
{
/* add write permission for user */
case RT_HW_MMU_PROT_WRITE | RT_HW_MMU_PROT_USER:
default:
RT_ASSERT(0);
}
return attr;
}
/**
* @brief Test permission from attribution
*
* @param attr architecture specified mmu attribution
* @param prot protect that will be test
* @return rt_bool_t RT_TRUE if the prot is allowed, otherwise RT_FALSE
*/
rt_inline rt_bool_t rt_hw_mmu_attr_test_perm(size_t attr, enum rt_hw_mmu_prot_t prot)
{
rt_bool_t rc = 0;
switch (prot)
{
/* test write permission for user */
case RT_HW_MMU_PROT_WRITE | RT_HW_MMU_PROT_USER:
default:
RT_ASSERT(0);
}
return rc;
}
#endif

View File

@ -7,6 +7,7 @@
* Date Author Notes
* 2021-01-30 lizhirui first version
* 2022-12-13 WangXiaoyao Port to new mm
* 2023-10-12 Shell Add permission control API
*/
#include <rtthread.h>
@ -490,3 +491,22 @@ void rt_hw_mmu_setup(rt_aspace_t aspace, struct mem_desc *mdesc, int desc_nr)
rt_hw_aspace_switch(&rt_kernel_space);
rt_page_cleanup();
}
void *rt_hw_mmu_pgtbl_create(void)
{
size_t *mmu_table;
mmu_table = (rt_ubase_t *)rt_pages_alloc_ext(0, PAGE_ANY_AVAILABLE);
if (!mmu_table)
{
return RT_NULL;
}
rt_memcpy(mmu_table, rt_kernel_space.page_table, ARCH_PAGE_SIZE);
rt_hw_cpu_dcache_ops(RT_HW_CACHE_FLUSH, mmu_table, ARCH_PAGE_SIZE);
return mmu_table;
}
void rt_hw_mmu_pgtbl_delete(void *pgtbl)
{
rt_pages_free(pgtbl, 0);
}

View File

@ -6,6 +6,7 @@
* Change Logs:
* Date Author Notes
* 2021-01-30 lizhirui first version
* 2023-10-12 Shell Add permission control API
*/
#ifndef __MMU_H__
@ -70,4 +71,7 @@ void *rt_hw_mmu_v2p(rt_aspace_t aspace, void *vaddr);
int rt_hw_mmu_control(struct rt_aspace *aspace, void *vaddr, size_t size,
enum rt_mmu_cntl cmd);
void *rt_hw_mmu_pgtbl_create(void);
void rt_hw_mmu_pgtbl_delete(void *pgtbl);
#endif

View File

@ -6,6 +6,7 @@
* Change Logs:
* Date Author Notes
* 2021-01-30 lizhirui first version
* 2023-10-12 Shell Add permission control API
*/
#ifndef __RISCV_MMU_H__
@ -93,4 +94,74 @@ void mmu_set_pagetable(rt_ubase_t addr);
void mmu_enable_user_page_access();
void mmu_disable_user_page_access();
enum rt_hw_mmu_prot_t {
RT_HW_MMU_PROT_READ,
RT_HW_MMU_PROT_WRITE,
RT_HW_MMU_PROT_EXECUTE,
RT_HW_MMU_PROT_KERNEL,
RT_HW_MMU_PROT_USER,
RT_HW_MMU_PROT_CACHE,
};
/**
* @brief Remove permission from attribution
*
* @param attr architecture specified mmu attribution
* @param prot protect that will be removed
* @return size_t returned attribution
*/
rt_inline size_t rt_hw_mmu_attr_rm_perm(size_t attr, enum rt_hw_mmu_prot_t prot)
{
switch (prot)
{
/* remove write permission for user */
case RT_HW_MMU_PROT_WRITE | RT_HW_MMU_PROT_USER:
attr &= ~PTE_W;
default:
RT_ASSERT(0);
}
return attr;
}
/**
* @brief Add permission from attribution
*
* @param attr architecture specified mmu attribution
* @param prot protect that will be added
* @return size_t returned attribution
*/
rt_inline size_t rt_hw_mmu_attr_add_perm(size_t attr, enum rt_hw_mmu_prot_t prot)
{
switch (prot)
{
/* add write permission for user */
case RT_HW_MMU_PROT_WRITE | RT_HW_MMU_PROT_USER:
attr |= PTE_W;
default:
RT_ASSERT(0);
}
return attr;
}
/**
* @brief Test permission from attribution
*
* @param attr architecture specified mmu attribution
* @param prot protect that will be test
* @return rt_bool_t RT_TRUE if the prot is allowed, otherwise RT_FALSE
*/
rt_inline rt_bool_t rt_hw_mmu_attr_test_perm(size_t attr, enum rt_hw_mmu_prot_t prot)
{
rt_bool_t rc = 0;
switch (prot)
{
/* test write permission for user */
case RT_HW_MMU_PROT_WRITE | RT_HW_MMU_PROT_USER:
rc = !!(attr & PTE_W);
default:
RT_ASSERT(0);
}
return rc;
}
#endif

View File

@ -66,8 +66,7 @@ config RT_USING_SMP
config RT_CPUS_NR
int "Number of CPUs"
default 2
depends on RT_USING_SMP || RT_USING_AMP
default 1
help
Number of CPUs in the system

View File

@ -25,7 +25,7 @@ if GetDepend('RT_USING_DEVICE') == False:
SrcRemove(src, ['device.c'])
if GetDepend('RT_USING_SMP') == False:
SrcRemove(src, ['cpu.c','scheduler_mp.c'])
SrcRemove(src, ['cpu.c', 'scheduler_mp.c'])
if GetDepend('RT_USING_SMP') == True:
SrcRemove(src, ['scheduler_up.c'])

View File

@ -68,7 +68,6 @@ static void _cpu_preempt_enable(void)
/* enable interrupt */
rt_hw_local_irq_enable(level);
}
#endif /* RT_USING_SMP */
/**
* @brief Initialize a static spinlock object.
@ -77,9 +76,7 @@ static void _cpu_preempt_enable(void)
*/
void rt_spin_lock_init(struct rt_spinlock *lock)
{
#ifdef RT_USING_SMP
rt_hw_spin_lock_init(&lock->lock);
#endif
}
RTM_EXPORT(rt_spin_lock_init)
@ -93,12 +90,8 @@ RTM_EXPORT(rt_spin_lock_init)
*/
void rt_spin_lock(struct rt_spinlock *lock)
{
#ifdef RT_USING_SMP
_cpu_preempt_disable();
rt_hw_spin_lock(&lock->lock);
#else
rt_enter_critical();
#endif
}
RTM_EXPORT(rt_spin_lock)
@ -109,12 +102,8 @@ RTM_EXPORT(rt_spin_lock)
*/
void rt_spin_unlock(struct rt_spinlock *lock)
{
#ifdef RT_USING_SMP
rt_hw_spin_unlock(&lock->lock);
_cpu_preempt_enable();
#else
rt_exit_critical();
#endif
}
RTM_EXPORT(rt_spin_unlock)
@ -130,7 +119,6 @@ RTM_EXPORT(rt_spin_unlock)
*/
rt_base_t rt_spin_lock_irqsave(struct rt_spinlock *lock)
{
#ifdef RT_USING_SMP
unsigned long level;
_cpu_preempt_disable();
@ -139,9 +127,6 @@ rt_base_t rt_spin_lock_irqsave(struct rt_spinlock *lock)
rt_hw_spin_lock(&lock->lock);
return level;
#else
return rt_hw_interrupt_disable();
#endif
}
RTM_EXPORT(rt_spin_lock_irqsave)
@ -154,14 +139,10 @@ RTM_EXPORT(rt_spin_lock_irqsave)
*/
void rt_spin_unlock_irqrestore(struct rt_spinlock *lock, rt_base_t level)
{
#ifdef RT_USING_SMP
rt_hw_spin_unlock(&lock->lock);
rt_hw_local_irq_enable(level);
_cpu_preempt_enable();
#else
rt_hw_interrupt_enable(level);
#endif
}
RTM_EXPORT(rt_spin_unlock_irqrestore)
@ -261,3 +242,4 @@ void rt_cpus_lock_status_restore(struct rt_thread *thread)
}
}
RTM_EXPORT(rt_cpus_lock_status_restore);
#endif

View File

@ -39,11 +39,7 @@
#endif /* (RT_USING_IDLE_HOOK) || defined(RT_USING_HEAP) */
#endif /* IDLE_THREAD_STACK_SIZE */
#ifdef RT_USING_SMP
#define _CPUS_NR RT_CPUS_NR
#else
#define _CPUS_NR 1
#endif /* RT_USING_SMP */
static rt_list_t _rt_thread_defunct = RT_LIST_OBJECT_INIT(_rt_thread_defunct);
@ -335,6 +331,8 @@ void rt_thread_idle_init(void)
32);
#ifdef RT_USING_SMP
rt_thread_control(&idle_thread[i], RT_THREAD_CTRL_BIND_CPU, (void*)i);
rt_cpu_index(i)->idle_thread = &idle_thread[i];
#endif /* RT_USING_SMP */
/* startup */
rt_thread_startup(&idle_thread[i]);

View File

@ -269,6 +269,10 @@ static rt_err_t _thread_init(struct rt_thread *thread,
rt_list_init(&thread->signal.sig_queue.siginfo_list);
rt_memset(&thread->user_ctx, 0, sizeof thread->user_ctx);
/* initialize user_time and system_time */
thread->user_time = 0;
thread->system_time = 0;
#endif
#ifdef RT_USING_CPU_USAGE