[smart] fixup of lwp recycling and mm varea (#8206)

Signed-off-by: shell <wangxiaoyao@rt-thread.com>
Signed-off-by: Shell <smokewood@qq.com>
Co-authored-by: xqyjlj <xqyjlj@126.com>
This commit is contained in:
Shell 2023-11-02 20:23:11 +08:00 committed by GitHub
parent 1b6f0e88a3
commit c2036e769a
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
17 changed files with 464 additions and 297 deletions

View File

@ -295,6 +295,10 @@ void fdt_fd_release(struct dfs_fdtable *fdt, int fd)
if (file && file->ref_count == 1)
{
rt_mutex_detach(&file->pos_lock);
if (file->mmap_context)
{
rt_free(file->mmap_context);
}
rt_free(file);
}
else

View File

@ -35,7 +35,6 @@
#define PMUTEX_DESTROY 3
/* for sys/mman.h */
#define MAP_FAILED ((void *)-1)
#define MAP_SHARED 0x01
#define MAP_PRIVATE 0x02

View File

@ -1157,6 +1157,10 @@ rt_err_t lwp_children_register(struct rt_lwp *parent, struct rt_lwp *child)
LWP_UNLOCK(parent);
LOG_D("%s(parent=%p, child=%p)", __func__, parent, child);
/* parent holds reference to child */
lwp_ref_inc(parent);
/* child holds reference to parent */
lwp_ref_inc(child);
return 0;
}
@ -1178,6 +1182,8 @@ rt_err_t lwp_children_unregister(struct rt_lwp *parent, struct rt_lwp *child)
LWP_UNLOCK(parent);
LOG_D("%s(parent=%p, child=%p)", __func__, parent, child);
lwp_ref_dec(child);
lwp_ref_dec(parent);
return 0;
}
@ -1195,7 +1201,7 @@ pid_t lwp_execve(char *filename, int debug, int argc, char **argv, char **envp)
if (filename == RT_NULL)
{
return -RT_ERROR;
return -EINVAL;
}
if (access(filename, X_OK) != 0)
@ -1208,7 +1214,7 @@ pid_t lwp_execve(char *filename, int debug, int argc, char **argv, char **envp)
if (lwp == RT_NULL)
{
dbg_log(DBG_ERROR, "lwp struct out of memory!\n");
return -RT_ENOMEM;
return -ENOMEM;
}
LOG_D("lwp malloc : %p, size: %d!", lwp, sizeof(struct rt_lwp));

View File

@ -171,7 +171,6 @@ char *lwp_getcwd(void);
void lwp_request_thread_exit(rt_thread_t thread_to_exit);
int lwp_check_exit_request(void);
void lwp_terminate(struct rt_lwp *lwp);
void lwp_wait_subthread_exit(void);
int lwp_tid_init(void);
int lwp_tid_get(void);

View File

@ -12,6 +12,8 @@
* 2023-07-27 shell Move the detach of children process on parent exit to lwp_terminate.
* Make lwp_from_pid locked by caller to avoid possible use-after-free
* error
* 2023-10-27 shell Format codes of sys_exit(). Fix the data racing where lock is missed
* Add reference on pid/tid, so the resource is not freed while using.
*/
#include <rthw.h>
@ -149,6 +151,8 @@ void lwp_pid_put(struct rt_lwp *lwp)
/* reset pid field */
lwp->pid = 0;
/* clear reference */
lwp_ref_dec(lwp);
}
static void lwp_pid_set_lwp_locked(pid_t pid, struct rt_lwp *lwp)
@ -159,6 +163,7 @@ static void lwp_pid_set_lwp_locked(pid_t pid, struct rt_lwp *lwp)
if (p)
{
p->data = lwp;
lwp_ref_inc(lwp);
}
}
@ -393,8 +398,9 @@ void lwp_free(struct rt_lwp* lwp)
* - lwp (RW. there is no other writer/reader compete with lwp_free, since
* all the reference is clear)
*/
LOG_D("lwp free: %p\n", lwp);
LOG_D("lwp free: %p", lwp);
LWP_LOCK(lwp);
if (lwp->args != RT_NULL)
{
@ -407,18 +413,8 @@ void lwp_free(struct rt_lwp* lwp)
lwp->args = RT_NULL;
}
if (lwp->fdt.fds != RT_NULL)
{
/* auto clean fds */
__exit_files(lwp);
rt_free(lwp->fdt.fds);
lwp->fdt.fds = RT_NULL;
}
lwp_user_object_clear(lwp);
lwp_user_object_lock_destroy(lwp);
RT_ASSERT(lwp->lwp_lock.owner == RT_NULL);
rt_mutex_detach(&lwp->lwp_lock);
/* free data section */
if (lwp->data_entry != RT_NULL)
@ -453,27 +449,134 @@ void lwp_free(struct rt_lwp* lwp)
lwp_unmap_user_space(lwp);
#endif
timer_list_free(&lwp->timer);
/* for children */
while (lwp->first_child)
{
struct rt_lwp *child;
child = lwp->first_child;
lwp->first_child = child->sibling;
if (child->terminated)
{
lwp_pid_put(child);
rt_free(child);
}
else
{
/** Note: safe since the slist node is release */
child->sibling = RT_NULL;
/* Note: this may cause an orphan lwp */
child->parent = RT_NULL;
}
LWP_UNLOCK(lwp);
RT_ASSERT(lwp->lwp_lock.owner == RT_NULL);
rt_mutex_detach(&lwp->lwp_lock);
/**
* pid must have release before enter lwp_free()
* otherwise this is a data racing
*/
RT_ASSERT(lwp->pid == 0);
rt_free(lwp);
}
rt_inline rt_noreturn
void _thread_exit(rt_lwp_t lwp, rt_thread_t thread)
{
/**
* Note: the tid tree always hold a reference to thread, hence the tid must
* be release before cleanup of thread
*/
lwp_tid_put(thread->tid);
thread->tid = 0;
LWP_LOCK(lwp);
rt_list_remove(&thread->sibling);
LWP_UNLOCK(lwp);
rt_thread_delete(thread);
rt_schedule();
while (1) ;
}
rt_inline void _clear_child_tid(rt_thread_t thread)
{
if (thread->clear_child_tid)
{
int t = 0;
int *clear_child_tid = thread->clear_child_tid;
thread->clear_child_tid = RT_NULL;
lwp_put_to_user(clear_child_tid, &t, sizeof t);
sys_futex(clear_child_tid, FUTEX_WAKE | FUTEX_PRIVATE, 1, RT_NULL, RT_NULL, 0);
}
}
void lwp_exit(rt_lwp_t lwp, rt_base_t status)
{
rt_thread_t thread;
if (!lwp)
{
LOG_W("%s: lwp should not be null", __func__);
return ;
}
thread = rt_thread_self();
RT_ASSERT((struct rt_lwp *)thread->lwp == lwp);
LOG_D("process(lwp.pid=%d) exit", lwp->pid);
#ifdef ARCH_MM_MMU
_clear_child_tid(thread);
LWP_LOCK(lwp);
/**
* Brief: only one thread should calls exit_group(),
* but we can not ensured that during run-time
*/
lwp->lwp_ret = LWP_CREATE_STAT(status);
LWP_UNLOCK(lwp);
lwp_terminate(lwp);
#else
main_thread = rt_list_entry(lwp->t_grp.prev, struct rt_thread, sibling);
if (main_thread == tid)
{
rt_thread_t sub_thread;
rt_list_t *list;
lwp_terminate(lwp);
/* delete all subthread */
while ((list = tid->sibling.prev) != &lwp->t_grp)
{
sub_thread = rt_list_entry(list, struct rt_thread, sibling);
rt_list_remove(&sub_thread->sibling);
rt_thread_delete(sub_thread);
}
lwp->lwp_ret = value;
}
#endif /* ARCH_MM_MMU */
_thread_exit(lwp, thread);
}
void lwp_thread_exit(rt_thread_t thread, rt_base_t status)
{
rt_thread_t header_thr;
struct rt_lwp *lwp;
LOG_D("%s", __func__);
RT_ASSERT(thread == rt_thread_self());
lwp = (struct rt_lwp *)thread->lwp;
RT_ASSERT(lwp != RT_NULL);
#ifdef ARCH_MM_MMU
_clear_child_tid(thread);
LWP_LOCK(lwp);
header_thr = rt_list_entry(lwp->t_grp.prev, struct rt_thread, sibling);
if (header_thr == thread && thread->sibling.prev == &lwp->t_grp)
{
lwp->lwp_ret = LWP_CREATE_STAT(status);
LWP_UNLOCK(lwp);
lwp_terminate(lwp);
}
else
{
LWP_UNLOCK(lwp);
}
#endif /* ARCH_MM_MMU */
_thread_exit(lwp, thread);
}
static void _pop_tty(rt_lwp_t lwp)
{
if (!lwp->background)
{
struct termios *old_stdin_termios = get_old_termios();
@ -497,36 +600,11 @@ void lwp_free(struct rt_lwp* lwp)
}
rt_mutex_release(&lwp->tty->lock);
LWP_LOCK(lwp);
lwp->tty = RT_NULL;
LWP_UNLOCK(lwp);
}
}
/* for parent */
if (lwp->parent)
{
struct rt_thread *thread;
if (!rt_list_isempty(&lwp->wait_list))
{
thread = rt_list_entry(lwp->wait_list.next, struct rt_thread, tlist);
thread->error = RT_EOK;
thread->msg_ret = (void*)(rt_size_t)lwp->lwp_ret;
rt_thread_resume(thread);
return;
}
else
{
struct rt_lwp **it = &lwp->parent->first_child;
while (*it != lwp)
{
it = &(*it)->sibling;
}
*it = lwp->sibling;
}
}
lwp_pid_put(lwp);
rt_free(lwp);
}
/** @note the reference is not for synchronization, but for the release of resource. the synchronization is done through lwp & pid lock */
@ -689,13 +767,29 @@ static sysret_t _lwp_wait_and_recycle(struct rt_lwp *child, rt_thread_t cur_thr,
rt_list_insert_before(&child->wait_list, &(cur_thr->tlist));
LWP_UNLOCK(child);
rt_set_errno(RT_EINTR);
rt_exit_critical();
rt_schedule();
if (child->terminated)
error = child->pid;
/**
* Since parent is holding a reference to children this lock will
* not be freed before parent dereference to it.
*/
LWP_LOCK(child);
error = rt_get_errno();
if (error == RT_EINTR)
{
error = -EINTR;
}
else if (error != RT_EOK)
{
LOG_W("%s: unexpected error code %ld", __func__, error);
}
else
error = -RT_EINTR;
{
error = child->pid;
}
}
else
rt_exit_critical();
@ -705,18 +799,18 @@ static sysret_t _lwp_wait_and_recycle(struct rt_lwp *child, rt_thread_t cur_thr,
lwp_stat = child->lwp_ret;
terminated = child->terminated;
if (!terminated)
LWP_UNLOCK(child);
LWP_UNLOCK(child);
if (error > 0)
{
if (terminated)
{
LOG_D("func %s: child detached", __func__);
/** Reap the child process if it's exited */
lwp_children_unregister(self_lwp, child);
child->parent = RT_NULL;
lwp_pid_put(child);
lwp_children_unregister(self_lwp, child);
}
if (status)
lwp_data_put(self_lwp, status, &lwp_stat, sizeof(*status));
}
@ -880,16 +974,15 @@ long list_process(void)
thread = threads[index];
/** FIXME: take the rt_thread_t lock */
level = rt_hw_interrupt_disable();
level = rt_spin_lock_irqsave(&thread->spinlock);
if ((rt_object_get_type(&thread->parent) & ~RT_Object_Class_Static) != RT_Object_Class_Thread)
{
rt_hw_interrupt_enable(level);
rt_spin_unlock_irqrestore(&thread->spinlock, level);
continue;
}
rt_memcpy(&th, thread, sizeof(struct rt_thread));
rt_hw_interrupt_enable(level);
rt_spin_unlock_irqrestore(&thread->spinlock, level);
if (th.lwp == RT_NULL)
{
@ -989,8 +1082,7 @@ static int found_thread(struct rt_lwp* lwp, rt_thread_t thread)
rt_base_t level;
rt_list_t *list;
/** FIXME: take the rt_thread_t lock */
level = rt_hw_interrupt_disable();
level = rt_spin_lock_irqsave(&thread->spinlock);
list = lwp->t_grp.next;
while (list != &lwp->t_grp)
{
@ -1004,7 +1096,7 @@ static int found_thread(struct rt_lwp* lwp, rt_thread_t thread)
}
list = list->next;
}
rt_hw_interrupt_enable(level);
rt_spin_unlock_irqrestore(&thread->spinlock, level);
return found;
}
@ -1022,8 +1114,7 @@ void lwp_request_thread_exit(rt_thread_t thread_to_exit)
return;
}
/* FIXME: take the rt_thread_t lock */
level = rt_hw_interrupt_disable();
level = rt_spin_lock_irqsave(&thread_to_exit->spinlock);
main_thread = rt_list_entry(lwp->t_grp.prev, struct rt_thread, sibling);
if (thread_to_exit == main_thread)
@ -1063,14 +1154,15 @@ void lwp_request_thread_exit(rt_thread_t thread_to_exit)
}
finish:
rt_hw_interrupt_enable(level);
rt_spin_unlock_irqrestore(&thread_to_exit->spinlock, level);
return;
}
static void _wait_sibling_exit(rt_lwp_t lwp, rt_thread_t curr_thread);
static void _resr_cleanup(struct rt_lwp *lwp);
void lwp_terminate(struct rt_lwp *lwp)
{
rt_list_t *list;
if (!lwp)
{
/* kernel thread not support */
@ -1085,46 +1177,50 @@ void lwp_terminate(struct rt_lwp *lwp)
{
/* stop the receiving of signals */
lwp->terminated = RT_TRUE;
LWP_UNLOCK(lwp);
/* broadcast exit request for sibling threads */
for (list = lwp->t_grp.next; list != &lwp->t_grp; list = list->next)
_wait_sibling_exit(lwp, rt_thread_self());
_resr_cleanup(lwp);
}
else
{
LWP_UNLOCK(lwp);
}
}
static void _wait_sibling_exit(rt_lwp_t lwp, rt_thread_t curr_thread)
{
rt_base_t level;
rt_list_t *list;
rt_thread_t thread;
/* broadcast exit request for sibling threads */
LWP_LOCK(lwp);
for (list = lwp->t_grp.next; list != &lwp->t_grp; list = list->next)
{
thread = rt_list_entry(list, struct rt_thread, sibling);
level = rt_spin_lock_irqsave(&thread->spinlock);
if (thread->exit_request == LWP_EXIT_REQUEST_NONE)
{
rt_thread_t thread;
thread->exit_request = LWP_EXIT_REQUEST_TRIGGERED;
}
rt_spin_unlock_irqrestore(&thread->spinlock, level);
thread = rt_list_entry(list, struct rt_thread, sibling);
if (thread->exit_request == LWP_EXIT_REQUEST_NONE)
{
thread->exit_request = LWP_EXIT_REQUEST_TRIGGERED;
}
if ((thread->stat & RT_THREAD_SUSPEND_MASK) == RT_THREAD_SUSPEND_MASK)
{
thread->error = RT_EINTR;
rt_hw_dsb();
rt_thread_wakeup(thread);
}
level = rt_spin_lock_irqsave(&thread->spinlock);
if ((thread->stat & RT_THREAD_SUSPEND_MASK) == RT_THREAD_SUSPEND_MASK)
{
thread->error = RT_EINTR;
rt_spin_unlock_irqrestore(&thread->spinlock, level);
rt_hw_dsb();
rt_thread_wakeup(thread);
}
else
{
rt_spin_unlock_irqrestore(&thread->spinlock, level);
}
}
LWP_UNLOCK(lwp);
}
void lwp_wait_subthread_exit(void)
{
struct rt_lwp *lwp;
rt_thread_t thread;
rt_thread_t main_thread;
lwp = lwp_self();
if (!lwp)
{
return;
}
thread = rt_thread_self();
main_thread = rt_list_entry(lwp->t_grp.prev, struct rt_thread, sibling);
if (thread != main_thread)
{
return;
}
while (1)
{
@ -1138,7 +1234,7 @@ void lwp_wait_subthread_exit(void)
* - sibling list of lwp (RW. It will clear all siblings finally)
*/
LWP_LOCK(lwp);
subthread_is_terminated = (int)(thread->sibling.prev == &lwp->t_grp);
subthread_is_terminated = (int)(curr_thread->sibling.prev == &lwp->t_grp);
if (!subthread_is_terminated)
{
rt_thread_t sub_thread;
@ -1146,7 +1242,7 @@ void lwp_wait_subthread_exit(void)
int all_subthread_in_init = 1;
/* check all subthread is in init state */
for (list = thread->sibling.prev; list != &lwp->t_grp; list = list->prev)
for (list = curr_thread->sibling.prev; list != &lwp->t_grp; list = list->prev)
{
sub_thread = rt_list_entry(list, struct rt_thread, sibling);
@ -1159,7 +1255,7 @@ void lwp_wait_subthread_exit(void)
if (all_subthread_in_init)
{
/* delete all subthread */
while ((list = thread->sibling.prev) != &lwp->t_grp)
while ((list = curr_thread->sibling.prev) != &lwp->t_grp)
{
sub_thread = rt_list_entry(list, struct rt_thread, sibling);
rt_list_remove(&sub_thread->sibling);
@ -1187,13 +1283,104 @@ void lwp_wait_subthread_exit(void)
}
}
static void _resr_cleanup(struct rt_lwp *lwp)
{
LWP_LOCK(lwp);
lwp_signal_detach(&lwp->signal);
/**
* @brief Detach children from lwp
*
* @note Critical Section
* - the lwp (RW. Release lwp)
* - the pid resource manager (RW. Release the pid)
*/
while (lwp->first_child)
{
struct rt_lwp *child;
child = lwp->first_child;
lwp->first_child = child->sibling;
/** @note safe since the slist node is release */
LWP_UNLOCK(lwp);
LWP_LOCK(child);
child->sibling = RT_NULL;
/* info: this may cause an orphan lwp */
child->parent = RT_NULL;
LWP_UNLOCK(child);
lwp_ref_dec(child);
lwp_ref_dec(lwp);
LWP_LOCK(lwp);
}
LWP_UNLOCK(lwp);
_pop_tty(lwp);
/**
* @brief Wakeup parent if it's waiting for this lwp, otherwise a signal
* will be sent to parent
*
* @note Critical Section
* - the parent lwp (RW.)
*/
LWP_LOCK(lwp);
if (lwp->parent)
{
struct rt_thread *thread;
LWP_UNLOCK(lwp);
if (!rt_list_isempty(&lwp->wait_list))
{
thread = rt_list_entry(lwp->wait_list.next, struct rt_thread, tlist);
thread->error = RT_EOK;
thread->msg_ret = (void*)(rt_size_t)lwp->lwp_ret;
rt_thread_resume(thread);
}
else
{
/* children cannot detach itself and must wait for parent to take care of it */
lwp_signal_kill(lwp->parent, SIGCHLD, CLD_EXITED, 0);
}
}
else
{
LWP_UNLOCK(lwp);
/* INFO: orphan hasn't parents to do the reap of pid */
lwp_pid_put(lwp);
}
LWP_LOCK(lwp);
if (lwp->fdt.fds != RT_NULL)
{
struct dfs_file **fds;
/* auto clean fds */
__exit_files(lwp);
fds = lwp->fdt.fds;
lwp->fdt.fds = RT_NULL;
LWP_UNLOCK(lwp);
rt_free(fds);
}
else
{
LWP_UNLOCK(lwp);
}
}
static int _lwp_setaffinity(pid_t pid, int cpu)
{
struct rt_lwp *lwp;
int ret = -1;
lwp_pid_lock_take();
lwp = lwp_from_pid_locked(pid);
if(pid == 0)
lwp = lwp_self();
else
lwp = lwp_from_pid_locked(pid);
if (lwp)
{
#ifdef RT_USING_SMP

View File

@ -85,6 +85,9 @@ rt_inline void lwp_from_pid_release_lock(struct rt_lwp *lwp)
lwp_ref_dec(lwp);
}
void lwp_thread_exit(rt_thread_t thread, rt_base_t status);
void lwp_exit(struct rt_lwp *lwp, rt_base_t status);
#ifdef __cplusplus
}
#endif

View File

@ -298,12 +298,12 @@ static int _pthread_mutex_lock_timeout(void *umutex, struct timespec *timeout)
lwp_mutex_release_safe(&_pmutex_lock);
return -EDEADLK;
}
lwp_mutex_release_safe(&_pmutex_lock);
lock_ret = rt_mutex_take_interruptible(pmutex->lock.kmutex, time);
if (lock_ret == RT_EOK)
{
umutex_p->_m_lock = rt_thread_self()->tid;
}
lwp_mutex_release_safe(&_pmutex_lock);
break;
default: /* unknown type */
return -EINVAL;

View File

@ -764,7 +764,6 @@ rt_err_t lwp_signal_action(struct rt_lwp *lwp, int signo,
rt_list_t *thread_list;
rt_err_t ret = RT_EOK;
if (lwp)
{
/** acquire READ access to lwp */

View File

@ -328,104 +328,35 @@ static void _crt_thread_entry(void *parameter)
/* exit group */
sysret_t sys_exit_group(int value)
{
rt_thread_t tid, main_thread;
struct rt_lwp *lwp;
sysret_t rc = 0;
struct rt_lwp *lwp = lwp_self();
tid = rt_thread_self();
lwp = (struct rt_lwp *)tid->lwp;
LOG_D("process(%p) exit.", lwp);
#ifdef ARCH_MM_MMU
if (tid->clear_child_tid)
if (lwp)
lwp_exit(lwp, value);
else
{
int t = 0;
int *clear_child_tid = tid->clear_child_tid;
tid->clear_child_tid = RT_NULL;
lwp_put_to_user(clear_child_tid, &t, sizeof t);
sys_futex(clear_child_tid, FUTEX_WAKE | FUTEX_PRIVATE, 1, RT_NULL, RT_NULL, 0);
LOG_E("Can't find matching process of current thread");
rc = -EINVAL;
}
lwp_terminate(lwp);
main_thread = rt_list_entry(lwp->t_grp.prev, struct rt_thread, sibling);
if (main_thread == tid)
{
lwp_wait_subthread_exit();
lwp->lwp_ret = LWP_CREATE_STAT(value);
}
#else
main_thread = rt_list_entry(lwp->t_grp.prev, struct rt_thread, sibling);
if (main_thread == tid)
{
rt_thread_t sub_thread;
rt_list_t *list;
lwp_terminate(lwp);
/* delete all subthread */
while ((list = tid->sibling.prev) != &lwp->t_grp)
{
sub_thread = rt_list_entry(list, struct rt_thread, sibling);
rt_list_remove(&sub_thread->sibling);
rt_thread_delete(sub_thread);
}
lwp->lwp_ret = value;
}
#endif /* ARCH_MM_MMU */
/**
* Note: the tid tree always hold a reference to thread, hence the tid must
* be release before cleanup of thread
*/
lwp_tid_put(tid->tid);
tid->tid = 0;
rt_list_remove(&tid->sibling);
rt_thread_delete(tid);
rt_schedule();
/* never reach here */
RT_ASSERT(0);
return 0;
return rc;
}
/* thread exit */
void sys_exit(int status)
sysret_t sys_exit(int status)
{
rt_thread_t tid, main_thread;
struct rt_lwp *lwp;
LOG_D("thread exit");
sysret_t rc = 0;
rt_thread_t tid;
tid = rt_thread_self();
lwp = (struct rt_lwp *)tid->lwp;
#ifdef ARCH_MM_MMU
if (tid->clear_child_tid)
if (tid && tid->lwp)
lwp_thread_exit(tid, status);
{
int t = 0;
int *clear_child_tid = tid->clear_child_tid;
tid->clear_child_tid = RT_NULL;
lwp_put_to_user(clear_child_tid, &t, sizeof t);
sys_futex(clear_child_tid, FUTEX_WAKE, 1, RT_NULL, RT_NULL, 0);
LOG_E("Can't find matching process of current thread");
rc = -EINVAL;
}
main_thread = rt_list_entry(lwp->t_grp.prev, struct rt_thread, sibling);
if (main_thread == tid && tid->sibling.prev == &lwp->t_grp)
{
lwp_terminate(lwp);
lwp_wait_subthread_exit();
lwp->lwp_ret = LWP_CREATE_STAT(status);
}
#endif /* ARCH_MM_MMU */
lwp_tid_put(tid->tid);
tid->tid = 0;
rt_list_remove(&tid->sibling);
rt_thread_delete(tid);
rt_schedule();
return;
return rc;
}
/* syscall: "read" ret: "ssize_t" args: "int" "void *" "size_t" */
@ -1174,18 +1105,28 @@ sysret_t sys_getpid(void)
/* syscall: "getpriority" ret: "int" args: "int" "id_t" */
sysret_t sys_getpriority(int which, id_t who)
{
long prio = 0xff;
if (which == PRIO_PROCESS)
{
rt_thread_t tid;
struct rt_lwp *lwp = RT_NULL;
tid = rt_thread_self();
if (who == (id_t)(rt_size_t)tid || who == 0xff)
lwp_pid_lock_take();
if(who == 0)
lwp = lwp_self();
else
lwp = lwp_from_pid_locked(who);
if (lwp)
{
return tid->current_priority;
rt_thread_t thread = rt_list_entry(lwp->t_grp.prev, struct rt_thread, sibling);
prio = thread->current_priority;
}
lwp_pid_lock_release();
}
return 0xff;
return prio;
}
/* syscall: "setpriority" ret: "int" args: "int" "id_t" "int" */
@ -1193,14 +1134,30 @@ sysret_t sys_setpriority(int which, id_t who, int prio)
{
if (which == PRIO_PROCESS)
{
rt_thread_t tid;
struct rt_lwp *lwp = RT_NULL;
tid = rt_thread_self();
if ((who == (id_t)(rt_size_t)tid || who == 0xff) && (prio >= 0 && prio < RT_THREAD_PRIORITY_MAX))
lwp_pid_lock_take();
if(who == 0)
lwp = lwp_self();
else
lwp = lwp_from_pid_locked(who);
if (lwp && prio >= 0 && prio < RT_THREAD_PRIORITY_MAX)
{
rt_thread_control(tid, RT_THREAD_CTRL_CHANGE_PRIORITY, &prio);
rt_list_t *list;
rt_thread_t thread;
for (list = lwp->t_grp.next; list != &lwp->t_grp; list = list->next)
{
thread = rt_list_entry(list, struct rt_thread, sibling);
rt_thread_control(thread, RT_THREAD_CTRL_CHANGE_PRIORITY, &prio);
}
lwp_pid_lock_release();
return 0;
}
else
{
lwp_pid_lock_release();
}
}
return -1;
@ -2790,6 +2747,7 @@ sysret_t sys_execve(const char *path, char *const argv[], char *const envp[])
* Since no other threads can access the lwp field, it't uneccessary to
* take a lock here
*/
RT_ASSERT(rt_list_entry(lwp->t_grp.prev, struct rt_thread, sibling) == thread);
strncpy(thread->parent.name, run_name + last_backslash, RT_NAME_MAX);
strncpy(lwp->cmd, new_lwp->cmd, RT_NAME_MAX);
@ -5560,7 +5518,6 @@ sysret_t sys_sched_getscheduler(int tid, int *policy, void *param)
struct sched_param *sched_param = RT_NULL;
rt_thread_t thread = RT_NULL;
if (!lwp_user_accessable(param, sizeof(struct sched_param)))
{
return -EFAULT;

View File

@ -48,7 +48,7 @@ typedef uint32_t id_t; /* may contain pid, uid or gid */
const char *lwp_get_syscall_name(rt_uint32_t number);
const void *lwp_get_sys_api(rt_uint32_t number);
void sys_exit(int value);
sysret_t sys_exit(int value);
sysret_t sys_exit_group(int status);
ssize_t sys_read(int fd, void *buf, size_t nbyte);
ssize_t sys_write(int fd, const void *buf, size_t nbyte);

View File

@ -109,11 +109,67 @@ rt_err_t rt_aspace_anon_ref_dec(rt_mem_obj_t aobj)
return rc;
}
void rt_varea_pgmgr_insert(rt_varea_t varea, void *page_addr)
{
/* each mapping of page frame in the varea is binding with a reference */
rt_page_ref_inc(page_addr, 0);
}
/**
* Private unmapping of address space
*/
static void _pgmgr_pop_all(rt_varea_t varea)
{
rt_aspace_t aspace = varea->aspace;
char *iter = varea->start;
char *end_addr = iter + varea->size;
RT_ASSERT(iter < end_addr);
RT_ASSERT(!((long)iter & ARCH_PAGE_MASK));
RT_ASSERT(!((long)end_addr & ARCH_PAGE_MASK));
for (; iter != end_addr; iter += ARCH_PAGE_SIZE)
{
void *page_pa = rt_hw_mmu_v2p(aspace, iter);
char *page_va = rt_kmem_p2v(page_pa);
if (page_pa != ARCH_MAP_FAILED && page_va)
{
rt_hw_mmu_unmap(aspace, iter, ARCH_PAGE_SIZE);
rt_pages_free(page_va, 0);
}
}
}
static void _pgmgr_pop_range(rt_varea_t varea, void *rm_start, void *rm_end)
{
void *page_va;
RT_ASSERT(!((rt_ubase_t)rm_start & ARCH_PAGE_MASK));
RT_ASSERT(!((rt_ubase_t)rm_end & ARCH_PAGE_MASK));
while (rm_start != rm_end)
{
page_va = rt_hw_mmu_v2p(varea->aspace, rm_start);
if (page_va != ARCH_MAP_FAILED)
{
page_va -= PV_OFFSET;
LOG_D("%s: free page %p", __func__, page_va);
rt_varea_unmap_page(varea, rm_start);
rt_pages_free(page_va, 0);
}
rm_start += ARCH_PAGE_SIZE;
}
}
static const char *_anon_get_name(rt_varea_t varea)
{
return varea->aspace == _anon_obj_get_backup(varea->mem_obj) ? "anonymous" : "reference";
}
/**
* Migration handler on varea re-construction
*/
static void _anon_varea_open(struct rt_varea *varea)
{
rt_aspace_anon_ref_inc(varea->mem_obj);
@ -127,7 +183,9 @@ static void _anon_varea_open(struct rt_varea *varea)
static void _anon_varea_close(struct rt_varea *varea)
{
rt_aspace_anon_ref_dec(varea->mem_obj);
rt_mm_dummy_mapper.on_varea_close(varea);
/* unmap and dereference page frames in the varea region */
_pgmgr_pop_all(varea);
}
static rt_err_t _anon_varea_expand(struct rt_varea *varea, void *new_vaddr, rt_size_t size)
@ -137,21 +195,45 @@ static rt_err_t _anon_varea_expand(struct rt_varea *varea, void *new_vaddr, rt_s
static rt_err_t _anon_varea_shrink(rt_varea_t varea, void *new_start, rt_size_t size)
{
return rt_mm_dummy_mapper.on_varea_shrink(varea, new_start, size);
char *varea_start = varea->start;
void *rm_start;
void *rm_end;
if (varea_start == (char *)new_start)
{
rm_start = varea_start + size;
rm_end = varea_start + varea->size;
}
else /* if (varea_start < (char *)new_start) */
{
RT_ASSERT(varea_start < (char *)new_start);
rm_start = varea_start;
rm_end = new_start;
}
_pgmgr_pop_range(varea, rm_start, rm_end);
return RT_EOK;
}
static rt_err_t _anon_varea_split(struct rt_varea *existed, void *unmap_start, rt_size_t unmap_len, struct rt_varea *subset)
{
/* remove the resource in the unmap region, and do nothing for the subset */
_pgmgr_pop_range(existed, unmap_start, (char *)unmap_start + unmap_len);
_anon_varea_open(subset);
return rt_mm_dummy_mapper.on_varea_split(existed, unmap_start, unmap_len, subset);
return RT_EOK;
}
static rt_err_t _anon_varea_merge(struct rt_varea *merge_to, struct rt_varea *merge_from)
{
_anon_varea_close(merge_from);
return rt_mm_dummy_mapper.on_varea_merge(merge_to, merge_from);
/* do nothing for the varea merge */
return RT_EOK;
}
/**
* Private mapping of address space
*/
rt_inline void _map_page_in_varea(rt_aspace_t asapce, rt_varea_t varea,
struct rt_aspace_fault_msg *msg, char *fault_addr)
{
@ -168,6 +250,7 @@ rt_inline void _map_page_in_varea(rt_aspace_t asapce, rt_varea_t varea,
}
}
/* page frame inquiry or allocation in backup address space */
static void *_get_page_from_backup(rt_aspace_t backup, rt_base_t offset_in_mobj)
{
void *frame_pa;

View File

@ -12,7 +12,7 @@
#define DBG_TAG "mm.object"
#define DBG_LVL DBG_INFO
#include "rtdbg.h"
#include <rtdbg.h>
#include <rtthread.h>
@ -31,31 +31,6 @@ static const char *get_name(rt_varea_t varea)
return "dummy-mapper";
}
void rt_varea_pgmgr_insert(rt_varea_t varea, void *page_addr)
{
/* each mapping of page frame in the varea is binding with a reference */
rt_page_ref_inc(page_addr, 0);
}
/* resource recycling of page frames */
void rt_varea_pgmgr_pop_all(rt_varea_t varea)
{
rt_aspace_t aspace = varea->aspace;
char *end_addr = varea->start + varea->size;
RT_ASSERT(!((long)end_addr & ARCH_PAGE_MASK));
for (char *iter = varea->start; iter != end_addr; iter += ARCH_PAGE_SIZE)
{
void *page_pa = rt_hw_mmu_v2p(aspace, iter);
char *page_va = rt_kmem_p2v(page_pa);
if (page_pa != ARCH_MAP_FAILED && page_va)
{
rt_hw_mmu_unmap(aspace, iter, ARCH_PAGE_SIZE);
rt_pages_free(page_va, 0);
}
}
}
static void on_page_fault(struct rt_varea *varea, struct rt_aspace_fault_msg *msg)
{
void *page;
@ -79,8 +54,6 @@ static void on_varea_open(struct rt_varea *varea)
static void on_varea_close(struct rt_varea *varea)
{
/* unmap and dereference page frames in the varea region */
rt_varea_pgmgr_pop_all(varea);
}
static rt_err_t on_varea_expand(struct rt_varea *varea, void *new_vaddr, rt_size_t size)
@ -88,60 +61,18 @@ static rt_err_t on_varea_expand(struct rt_varea *varea, void *new_vaddr, rt_size
return RT_EOK;
}
static void _remove_pages(rt_varea_t varea, void *rm_start, void *rm_end)
{
void *page_va;
RT_ASSERT(!((rt_ubase_t)rm_start & ARCH_PAGE_MASK));
RT_ASSERT(!((rt_ubase_t)rm_end & ARCH_PAGE_MASK));
while (rm_start != rm_end)
{
page_va = rt_hw_mmu_v2p(varea->aspace, rm_start);
if (page_va != ARCH_MAP_FAILED)
{
page_va -= PV_OFFSET;
LOG_D("%s: free page %p", __func__, page_va);
rt_varea_unmap_page(varea, rm_start);
rt_pages_free(page_va, 0);
}
rm_start += ARCH_PAGE_SIZE;
}
}
static rt_err_t on_varea_shrink(rt_varea_t varea, void *new_start, rt_size_t size)
{
char *varea_start = varea->start;
void *rm_start;
void *rm_end;
if (varea_start == (char *)new_start)
{
rm_start = varea_start + size;
rm_end = varea_start + varea->size;
}
else /* if (varea_start < (char *)new_start) */
{
RT_ASSERT(varea_start < (char *)new_start);
rm_start = varea_start;
rm_end = new_start;
}
_remove_pages(varea, rm_start, rm_end);
return RT_EOK;
}
static rt_err_t on_varea_split(struct rt_varea *existed, void *unmap_start, rt_size_t unmap_len, struct rt_varea *subset)
{
/* remove the resource in the unmap region, and do nothing for the subset */
_remove_pages(existed, unmap_start, (char *)unmap_start + unmap_len);
return RT_EOK;
}
static rt_err_t on_varea_merge(struct rt_varea *merge_to, struct rt_varea *merge_from)
{
/* do nothing for the migration */
return RT_EOK;
}

View File

@ -863,8 +863,7 @@ int rt_page_install(rt_region_t region)
{
int err = -RT_EINVAL;
if (region.end != region.start && !(region.start & ARCH_PAGE_MASK) &&
!(region.end & ARCH_PAGE_MASK) &&
!((region.end - region.start) & shadow_mask))
!(region.end & ARCH_PAGE_MASK))
{
void *head = addr_to_page(page_start, (void *)region.start);
void *tail = addr_to_page(page_start, (void *)region.end);

View File

@ -105,8 +105,6 @@ void _aspace_bst_insert(struct rt_aspace *aspace, struct rt_varea *varea);
*/
void _aspace_bst_remove(struct rt_aspace *aspace, struct rt_varea *varea);
void rt_varea_pgmgr_pop_all(rt_varea_t varea);
int rt_varea_fix_private_locked(rt_varea_t ex_varea, void *pa,
struct rt_aspace_fault_msg *msg,
rt_bool_t dont_copy);

View File

@ -67,7 +67,7 @@ static void test_user_map_varea(void)
uassert_true(varea->start != 0);
uassert_true(varea->start >= (void *)USER_VADDR_START && varea->start < (void *)USER_VADDR_TOP);
uassert_true(!lwp_ref_dec(lwp));
uassert_true(!(lwp_ref_dec(lwp) - 1));
}
static void test_user_map_varea_ext(void)
@ -91,7 +91,7 @@ static void test_user_map_varea_ext(void)
uassert_true(varea->start != 0);
uassert_true(varea->start >= (void *)USER_VADDR_START && varea->start < (void *)USER_VADDR_TOP);
uassert_true(!lwp_ref_dec(lwp));
uassert_true(!(lwp_ref_dec(lwp) - 1));
}
static void user_map_varea_tc(void)

View File

@ -12,9 +12,9 @@ bsp_path = Dir('#').abspath
if not os.path.exists(bsp_path + "/link.lds"):
Env['LINKFLAGS'] = Env['LINKFLAGS'].replace('link.lds', cwd + "/link.lds")
# fix the linker with crtx.o
Preprocessing("link.lds.S", ".lds", CPPPATH=[bsp_path])
# fix the linker with crtx.o
Env['LINKFLAGS'] += ' -nostartfiles'
# add common code files

View File

@ -373,6 +373,8 @@ void rt_hw_trap_exception(struct rt_hw_exp_stack *regs)
#endif
#ifdef RT_USING_LWP
/* restore normal execution environment */
__asm__ volatile("msr daifclr, 0x3\ndmb ishst\nisb\n");
_check_fault(regs, 0, "user fault");
#endif