[dfs] fixup: file_mmap and page cache
Including cache maintenance, potential dereference of null pointer and the use-after-free issue in page cache Signed-off-by: shell <smokewood@qq.com>
This commit is contained in:
parent
50bb71e458
commit
b40d106bdc
|
@ -272,7 +272,7 @@ rt_err_t on_varea_shrink(struct rt_varea *varea, void *new_vaddr, rt_size_t size
|
||||||
rm_start = varea_start + size;
|
rm_start = varea_start + size;
|
||||||
rm_end = varea_start + varea->size;
|
rm_end = varea_start + varea->size;
|
||||||
}
|
}
|
||||||
else if (varea_start < (char *)new_vaddr)
|
else
|
||||||
{
|
{
|
||||||
rm_start = varea_start;
|
rm_start = varea_start;
|
||||||
rm_end = new_vaddr;
|
rm_end = new_vaddr;
|
||||||
|
@ -293,6 +293,7 @@ rt_err_t on_varea_expand(struct rt_varea *varea, void *new_vaddr, rt_size_t size
|
||||||
|
|
||||||
rt_err_t on_varea_split(struct rt_varea *existed, void *unmap_start, rt_size_t unmap_len, struct rt_varea *subset)
|
rt_err_t on_varea_split(struct rt_varea *existed, void *unmap_start, rt_size_t unmap_len, struct rt_varea *subset)
|
||||||
{
|
{
|
||||||
|
rt_err_t rc;
|
||||||
struct dfs_file *file = dfs_mem_obj_get_file(existed->mem_obj);
|
struct dfs_file *file = dfs_mem_obj_get_file(existed->mem_obj);
|
||||||
|
|
||||||
if (file)
|
if (file)
|
||||||
|
@ -307,12 +308,15 @@ rt_err_t on_varea_split(struct rt_varea *existed, void *unmap_start, rt_size_t u
|
||||||
LOG_I("file: %s%s", file->dentry->mnt->fullpath, file->dentry->pathname);
|
LOG_I("file: %s%s", file->dentry->mnt->fullpath, file->dentry->pathname);
|
||||||
}
|
}
|
||||||
|
|
||||||
unmap_pages(existed, unmap_start, (char *)unmap_start + unmap_len);
|
rc = unmap_pages(existed, unmap_start, (char *)unmap_start + unmap_len);
|
||||||
|
if (!rc)
|
||||||
|
{
|
||||||
|
rc = unmap_pages(existed, subset->start, (char *)subset->start + subset->size);
|
||||||
|
if (!rc)
|
||||||
|
on_varea_open(subset);
|
||||||
|
}
|
||||||
|
|
||||||
subset->data = existed->data;
|
return rc;
|
||||||
rt_atomic_add(&(file->ref_count), 1);
|
|
||||||
|
|
||||||
return RT_EOK;
|
|
||||||
}
|
}
|
||||||
else
|
else
|
||||||
{
|
{
|
||||||
|
@ -338,8 +342,7 @@ rt_err_t on_varea_merge(struct rt_varea *merge_to, struct rt_varea *merge_from)
|
||||||
}
|
}
|
||||||
|
|
||||||
dfs_aspace_unmap(file, merge_from);
|
dfs_aspace_unmap(file, merge_from);
|
||||||
merge_from->data = RT_NULL;
|
on_varea_close(merge_from);
|
||||||
rt_atomic_sub(&(file->ref_count), 1);
|
|
||||||
|
|
||||||
return RT_EOK;
|
return RT_EOK;
|
||||||
}
|
}
|
||||||
|
@ -406,28 +409,31 @@ int dfs_file_mmap(struct dfs_file *file, struct dfs_mmap2_args *mmap2)
|
||||||
|
|
||||||
LOG_I("mmap2 args addr: %p length: 0x%x prot: %d flags: 0x%x pgoffset: 0x%x",
|
LOG_I("mmap2 args addr: %p length: 0x%x prot: %d flags: 0x%x pgoffset: 0x%x",
|
||||||
mmap2->addr, mmap2->length, mmap2->prot, mmap2->flags, mmap2->pgoffset);
|
mmap2->addr, mmap2->length, mmap2->prot, mmap2->flags, mmap2->pgoffset);
|
||||||
if (file && file->vnode && file->vnode->aspace)
|
if (file && file->vnode)
|
||||||
{
|
{
|
||||||
/* create a va area in user space (lwp) */
|
if (file->vnode->aspace)
|
||||||
rt_varea_t varea = dfs_map_user_varea_data(mmap2, file);
|
|
||||||
if (varea)
|
|
||||||
{
|
{
|
||||||
mmap2->ret = varea->start;
|
/* create a va area in user space (lwp) */
|
||||||
LOG_I("%s varea: %p", __func__, varea);
|
rt_varea_t varea = dfs_map_user_varea_data(mmap2, file);
|
||||||
LOG_I("varea start: %p size: 0x%x offset: 0x%x attr: 0x%x flag: 0x%x",
|
if (varea)
|
||||||
varea->start, varea->size, varea->offset, varea->attr, varea->flag);
|
{
|
||||||
LOG_I("file: %s%s", file->dentry->mnt->fullpath, file->dentry->pathname);
|
mmap2->ret = varea->start;
|
||||||
ret = RT_EOK;
|
LOG_I("%s varea: %p", __func__, varea);
|
||||||
|
LOG_I("varea start: %p size: 0x%x offset: 0x%x attr: 0x%x flag: 0x%x",
|
||||||
|
varea->start, varea->size, varea->offset, varea->attr, varea->flag);
|
||||||
|
LOG_I("file: %s%s", file->dentry->mnt->fullpath, file->dentry->pathname);
|
||||||
|
ret = RT_EOK;
|
||||||
|
}
|
||||||
|
else
|
||||||
|
{
|
||||||
|
ret = -ENOMEM;
|
||||||
|
}
|
||||||
}
|
}
|
||||||
else
|
else
|
||||||
{
|
{
|
||||||
ret = -ENOMEM;
|
LOG_E("File mapping is not supported, file: %s%s", file->dentry->mnt->fullpath, file->dentry->pathname);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
else if (file->vnode->aspace == RT_NULL)
|
|
||||||
{
|
|
||||||
LOG_E("File mapping is not supported, file: %s%s", file->dentry->mnt->fullpath, file->dentry->pathname);
|
|
||||||
}
|
|
||||||
|
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
|
@ -6,6 +6,7 @@
|
||||||
* Change Logs:
|
* Change Logs:
|
||||||
* Date Author Notes
|
* Date Author Notes
|
||||||
* 2023-05-05 RTT Implement mnt in dfs v2.0
|
* 2023-05-05 RTT Implement mnt in dfs v2.0
|
||||||
|
* 2023-10-23 Shell fix synchronization of data to icache
|
||||||
*/
|
*/
|
||||||
|
|
||||||
#include "dfs_pcache.h"
|
#include "dfs_pcache.h"
|
||||||
|
@ -15,6 +16,8 @@
|
||||||
#include <mmu.h>
|
#include <mmu.h>
|
||||||
#include <tlb.h>
|
#include <tlb.h>
|
||||||
|
|
||||||
|
#include <rthw.h>
|
||||||
|
|
||||||
#ifdef RT_USING_PAGECACHE
|
#ifdef RT_USING_PAGECACHE
|
||||||
|
|
||||||
#define DBG_TAG "dfs.pcache"
|
#define DBG_TAG "dfs.pcache"
|
||||||
|
@ -1270,6 +1273,21 @@ void *dfs_aspace_mmap(struct dfs_file *file, struct rt_varea *varea, void *vaddr
|
||||||
int err = rt_varea_map_range(varea, vaddr, pg_paddr, page->size);
|
int err = rt_varea_map_range(varea, vaddr, pg_paddr, page->size);
|
||||||
if (err == RT_EOK)
|
if (err == RT_EOK)
|
||||||
{
|
{
|
||||||
|
/**
|
||||||
|
* Note: While the page is mapped into user area, the data writing into the page
|
||||||
|
* is not guaranteed to be visible for machines with the *weak* memory model and
|
||||||
|
* those Harvard architecture (especially for those ARM64) cores for their
|
||||||
|
* out-of-order pipelines of data buffer. Besides if the instruction cache in the
|
||||||
|
* L1 memory system is a VIPT cache, there are chances to have the alias matching
|
||||||
|
* entry if we reuse the same page frame and map it into the same virtual address
|
||||||
|
* of the previous one.
|
||||||
|
*
|
||||||
|
* That's why we have to do synchronization and cleanup manually to ensure that
|
||||||
|
* fetching of the next instruction can see the coherent data with the data cache,
|
||||||
|
* TLB, MMU, main memory, and all the other observers in the computer system.
|
||||||
|
*/
|
||||||
|
rt_hw_cpu_icache_ops(RT_HW_CACHE_INVALIDATE, vaddr, ARCH_PAGE_SIZE);
|
||||||
|
|
||||||
ret = page->page;
|
ret = page->page;
|
||||||
map->varea = varea;
|
map->varea = varea;
|
||||||
dfs_aspace_lock(aspace);
|
dfs_aspace_lock(aspace);
|
||||||
|
@ -1358,6 +1376,7 @@ int dfs_aspace_page_unmap(struct dfs_file *file, struct rt_varea *varea, void *v
|
||||||
{
|
{
|
||||||
rt_list_t *node, *tmp;
|
rt_list_t *node, *tmp;
|
||||||
struct dfs_mmap *map;
|
struct dfs_mmap *map;
|
||||||
|
rt_varea_unmap_page(varea, vaddr);
|
||||||
|
|
||||||
node = page->mmap_head.next;
|
node = page->mmap_head.next;
|
||||||
|
|
||||||
|
|
Loading…
Reference in New Issue