2022-12-03 12:07:44 +08:00
|
|
|
/*
|
2023-10-25 20:31:25 +08:00
|
|
|
* Copyright (c) 2006-2023, RT-Thread Development Team
|
2022-12-03 12:07:44 +08:00
|
|
|
*
|
|
|
|
* SPDX-License-Identifier: Apache-2.0
|
|
|
|
*
|
|
|
|
* Change Logs:
|
|
|
|
* Date Author Notes
|
|
|
|
* 2019-10-16 zhangjun first version
|
|
|
|
* 2021-02-20 lizhirui fix warning
|
2023-06-29 00:00:27 +08:00
|
|
|
* 2023-06-26 shell clear ref to parent on waitpid()
|
|
|
|
* Remove recycling of lwp on waitpid() and leave it to defunct routine
|
2023-10-25 20:31:25 +08:00
|
|
|
* 2023-07-27 shell Move the detach of children process on parent exit to lwp_terminate.
|
|
|
|
* Make lwp_from_pid locked by caller to avoid possible use-after-free
|
|
|
|
* error
|
2023-11-02 20:23:11 +08:00
|
|
|
* 2023-10-27 shell Format codes of sys_exit(). Fix the data racing where lock is missed
|
|
|
|
* Add reference on pid/tid, so the resource is not freed while using.
|
2024-03-28 23:42:56 +08:00
|
|
|
* Add support for waitpid(options=WNOHANG)
|
|
|
|
* 2023-11-16 xqyjlj Fix the case where pid is 0
|
|
|
|
* 2023-11-17 xqyjlj add process group and session support
|
|
|
|
* 2023-11-24 shell Support of waitpid(options=WNOTRACED|WCONTINUED);
|
|
|
|
* Reimplement the waitpid with a wait queue method, and fixup problem
|
|
|
|
* with waitpid(pid=-1)/waitpid(pid=-pgid)/waitpid(pid=0) that only one
|
|
|
|
* process can be traced while waiter suspend
|
2024-02-23 17:49:15 +08:00
|
|
|
* 2024-01-25 shell porting to new sched API
|
2022-12-03 12:07:44 +08:00
|
|
|
*/
|
|
|
|
|
2024-02-23 17:49:15 +08:00
|
|
|
/* includes scheduler related API */
|
|
|
|
#define __RT_IPC_SOURCE__
|
|
|
|
|
2024-03-28 23:42:56 +08:00
|
|
|
/* for waitpid, we are compatible to GNU extension */
|
|
|
|
#define _GNU_SOURCE
|
2022-12-03 12:07:44 +08:00
|
|
|
|
2023-10-25 20:31:25 +08:00
|
|
|
#define DBG_TAG "lwp.pid"
|
|
|
|
#define DBG_LVL DBG_INFO
|
|
|
|
#include <rtdbg.h>
|
|
|
|
|
2024-03-28 23:42:56 +08:00
|
|
|
#include "lwp_internal.h"
|
|
|
|
|
|
|
|
#include <rthw.h>
|
|
|
|
#include <rtthread.h>
|
2022-12-03 12:07:44 +08:00
|
|
|
#include <dfs_file.h>
|
|
|
|
#include <unistd.h>
|
|
|
|
#include <stdio.h> /* rename() */
|
2024-03-28 23:42:56 +08:00
|
|
|
#include <stdlib.h>
|
2022-12-03 12:07:44 +08:00
|
|
|
#include <sys/stat.h>
|
|
|
|
#include <sys/statfs.h> /* statfs() */
|
2024-03-28 23:42:56 +08:00
|
|
|
#include <stdatomic.h>
|
2022-12-03 12:07:44 +08:00
|
|
|
|
2022-12-16 18:38:28 +08:00
|
|
|
#ifdef ARCH_MM_MMU
|
2022-12-03 12:07:44 +08:00
|
|
|
#include "lwp_user_mm.h"
|
|
|
|
#endif
|
|
|
|
|
2024-03-28 23:42:56 +08:00
|
|
|
#ifdef RT_USING_DFS_PROCFS
|
|
|
|
#include "proc.h"
|
|
|
|
#include "procfs.h"
|
|
|
|
#endif
|
|
|
|
|
2022-12-03 12:07:44 +08:00
|
|
|
#define PID_MAX 10000
|
|
|
|
|
|
|
|
#define PID_CT_ASSERT(name, x) \
|
|
|
|
struct assert_##name {char ary[2 * (x) - 1];}
|
|
|
|
|
|
|
|
PID_CT_ASSERT(pid_min_nr, RT_LWP_MAX_NR > 1);
|
|
|
|
PID_CT_ASSERT(pid_max_nr, RT_LWP_MAX_NR < PID_MAX);
|
|
|
|
|
|
|
|
static struct lwp_avl_struct lwp_pid_ary[RT_LWP_MAX_NR];
|
|
|
|
static struct lwp_avl_struct *lwp_pid_free_head = RT_NULL;
|
|
|
|
static int lwp_pid_ary_alloced = 0;
|
|
|
|
static struct lwp_avl_struct *lwp_pid_root = RT_NULL;
|
|
|
|
static pid_t current_pid = 0;
|
2023-10-25 20:31:25 +08:00
|
|
|
static struct rt_mutex pid_mtx;
|
|
|
|
|
|
|
|
int lwp_pid_init(void)
|
|
|
|
{
|
|
|
|
rt_mutex_init(&pid_mtx, "pidmtx", RT_IPC_FLAG_PRIO);
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
void lwp_pid_lock_take(void)
|
|
|
|
{
|
2024-02-23 17:49:15 +08:00
|
|
|
LWP_DEF_RETURN_CODE(rc);
|
2023-10-25 20:31:25 +08:00
|
|
|
|
|
|
|
rc = lwp_mutex_take_safe(&pid_mtx, RT_WAITING_FOREVER, 0);
|
|
|
|
/* should never failed */
|
|
|
|
RT_ASSERT(rc == RT_EOK);
|
2024-05-21 19:47:42 +08:00
|
|
|
RT_UNUSED(rc);
|
2023-10-25 20:31:25 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
void lwp_pid_lock_release(void)
|
|
|
|
{
|
|
|
|
/* should never failed */
|
|
|
|
if (lwp_mutex_release_safe(&pid_mtx) != RT_EOK)
|
|
|
|
RT_ASSERT(0);
|
|
|
|
}
|
2022-12-03 12:07:44 +08:00
|
|
|
|
|
|
|
struct lwp_avl_struct *lwp_get_pid_ary(void)
|
|
|
|
{
|
2022-12-16 18:38:28 +08:00
|
|
|
return lwp_pid_ary;
|
2022-12-03 12:07:44 +08:00
|
|
|
}
|
|
|
|
|
2023-10-25 20:31:25 +08:00
|
|
|
static pid_t lwp_pid_get_locked(void)
|
2022-12-03 12:07:44 +08:00
|
|
|
{
|
|
|
|
struct lwp_avl_struct *p;
|
|
|
|
pid_t pid = 0;
|
|
|
|
|
|
|
|
p = lwp_pid_free_head;
|
|
|
|
if (p)
|
|
|
|
{
|
|
|
|
lwp_pid_free_head = (struct lwp_avl_struct *)p->avl_right;
|
|
|
|
}
|
|
|
|
else if (lwp_pid_ary_alloced < RT_LWP_MAX_NR)
|
|
|
|
{
|
|
|
|
p = lwp_pid_ary + lwp_pid_ary_alloced;
|
|
|
|
lwp_pid_ary_alloced++;
|
|
|
|
}
|
|
|
|
if (p)
|
|
|
|
{
|
|
|
|
int found_noused = 0;
|
|
|
|
|
|
|
|
RT_ASSERT(p->data == RT_NULL);
|
|
|
|
for (pid = current_pid + 1; pid < PID_MAX; pid++)
|
|
|
|
{
|
|
|
|
if (!lwp_avl_find(pid, lwp_pid_root))
|
|
|
|
{
|
|
|
|
found_noused = 1;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
if (!found_noused)
|
|
|
|
{
|
|
|
|
for (pid = 1; pid <= current_pid; pid++)
|
|
|
|
{
|
|
|
|
if (!lwp_avl_find(pid, lwp_pid_root))
|
|
|
|
{
|
|
|
|
found_noused = 1;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
p->avl_key = pid;
|
|
|
|
lwp_avl_insert(p, &lwp_pid_root);
|
|
|
|
current_pid = pid;
|
|
|
|
}
|
|
|
|
return pid;
|
|
|
|
}
|
|
|
|
|
2023-10-25 20:31:25 +08:00
|
|
|
static void lwp_pid_put_locked(pid_t pid)
|
2022-12-03 12:07:44 +08:00
|
|
|
{
|
|
|
|
struct lwp_avl_struct *p;
|
|
|
|
|
2023-08-08 00:22:14 +08:00
|
|
|
if (pid == 0)
|
|
|
|
{
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
2022-12-03 12:07:44 +08:00
|
|
|
p = lwp_avl_find(pid, lwp_pid_root);
|
|
|
|
if (p)
|
|
|
|
{
|
|
|
|
p->data = RT_NULL;
|
|
|
|
lwp_avl_remove(p, &lwp_pid_root);
|
|
|
|
p->avl_right = lwp_pid_free_head;
|
|
|
|
lwp_pid_free_head = p;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2024-03-28 23:42:56 +08:00
|
|
|
#ifdef RT_USING_DFS_PROCFS
|
|
|
|
rt_inline void _free_proc_dentry(rt_lwp_t lwp)
|
|
|
|
{
|
|
|
|
char pid_str[64] = {0};
|
|
|
|
|
|
|
|
rt_snprintf(pid_str, 64, "%d", lwp->pid);
|
|
|
|
pid_str[63] = 0;
|
|
|
|
proc_remove_dentry(pid_str, 0);
|
|
|
|
}
|
|
|
|
#else
|
|
|
|
#define _free_proc_dentry(lwp)
|
|
|
|
#endif
|
|
|
|
|
2023-10-25 20:31:25 +08:00
|
|
|
void lwp_pid_put(struct rt_lwp *lwp)
|
|
|
|
{
|
2024-03-28 23:42:56 +08:00
|
|
|
_free_proc_dentry(lwp);
|
|
|
|
|
2023-10-25 20:31:25 +08:00
|
|
|
lwp_pid_lock_take();
|
|
|
|
lwp_pid_put_locked(lwp->pid);
|
|
|
|
lwp_pid_lock_release();
|
|
|
|
|
|
|
|
/* reset pid field */
|
|
|
|
lwp->pid = 0;
|
2023-11-02 20:23:11 +08:00
|
|
|
/* clear reference */
|
|
|
|
lwp_ref_dec(lwp);
|
2023-10-25 20:31:25 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
static void lwp_pid_set_lwp_locked(pid_t pid, struct rt_lwp *lwp)
|
2022-12-03 12:07:44 +08:00
|
|
|
{
|
|
|
|
struct lwp_avl_struct *p;
|
|
|
|
|
|
|
|
p = lwp_avl_find(pid, lwp_pid_root);
|
|
|
|
if (p)
|
|
|
|
{
|
|
|
|
p->data = lwp;
|
2023-11-02 20:23:11 +08:00
|
|
|
lwp_ref_inc(lwp);
|
2024-03-28 23:42:56 +08:00
|
|
|
|
|
|
|
#ifdef RT_USING_DFS_PROCFS
|
|
|
|
if (pid)
|
|
|
|
{
|
|
|
|
proc_pid(pid);
|
|
|
|
}
|
|
|
|
#endif
|
2022-12-03 12:07:44 +08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
static void __exit_files(struct rt_lwp *lwp)
|
|
|
|
{
|
|
|
|
int fd = lwp->fdt.maxfd - 1;
|
|
|
|
|
|
|
|
while (fd >= 0)
|
|
|
|
{
|
2023-04-08 22:25:51 +08:00
|
|
|
struct dfs_file *d;
|
2022-12-03 12:07:44 +08:00
|
|
|
|
|
|
|
d = lwp->fdt.fds[fd];
|
|
|
|
if (d)
|
|
|
|
{
|
|
|
|
dfs_file_close(d);
|
|
|
|
fdt_fd_release(&lwp->fdt, fd);
|
|
|
|
}
|
|
|
|
fd--;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
void lwp_user_object_lock_init(struct rt_lwp *lwp)
|
|
|
|
{
|
|
|
|
rt_mutex_init(&lwp->object_mutex, "lwp_obj", RT_IPC_FLAG_PRIO);
|
|
|
|
}
|
|
|
|
|
|
|
|
void lwp_user_object_lock_destroy(struct rt_lwp *lwp)
|
|
|
|
{
|
|
|
|
rt_mutex_detach(&lwp->object_mutex);
|
|
|
|
}
|
|
|
|
|
|
|
|
void lwp_user_object_lock(struct rt_lwp *lwp)
|
|
|
|
{
|
|
|
|
if (lwp)
|
|
|
|
{
|
|
|
|
rt_mutex_take(&lwp->object_mutex, RT_WAITING_FOREVER);
|
|
|
|
}
|
|
|
|
else
|
|
|
|
{
|
|
|
|
RT_ASSERT(0);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
void lwp_user_object_unlock(struct rt_lwp *lwp)
|
|
|
|
{
|
|
|
|
if (lwp)
|
|
|
|
{
|
|
|
|
rt_mutex_release(&lwp->object_mutex);
|
|
|
|
}
|
|
|
|
else
|
|
|
|
{
|
|
|
|
RT_ASSERT(0);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
int lwp_user_object_add(struct rt_lwp *lwp, rt_object_t object)
|
|
|
|
{
|
|
|
|
int ret = -1;
|
|
|
|
|
|
|
|
if (lwp && object)
|
|
|
|
{
|
|
|
|
lwp_user_object_lock(lwp);
|
|
|
|
if (!lwp_avl_find((avl_key_t)object, lwp->object_root))
|
|
|
|
{
|
|
|
|
struct lwp_avl_struct *node;
|
|
|
|
|
|
|
|
node = (struct lwp_avl_struct *)rt_malloc(sizeof(struct lwp_avl_struct));
|
|
|
|
if (node)
|
|
|
|
{
|
2023-10-25 20:31:25 +08:00
|
|
|
rt_atomic_add(&object->lwp_ref_count, 1);
|
2022-12-03 12:07:44 +08:00
|
|
|
node->avl_key = (avl_key_t)object;
|
|
|
|
lwp_avl_insert(node, &lwp->object_root);
|
|
|
|
ret = 0;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
lwp_user_object_unlock(lwp);
|
|
|
|
}
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
static rt_err_t _object_node_delete(struct rt_lwp *lwp, struct lwp_avl_struct *node)
|
|
|
|
{
|
|
|
|
rt_err_t ret = -1;
|
|
|
|
rt_object_t object;
|
|
|
|
|
|
|
|
if (!lwp || !node)
|
|
|
|
{
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
object = (rt_object_t)node->avl_key;
|
|
|
|
object->lwp_ref_count--;
|
|
|
|
if (object->lwp_ref_count == 0)
|
|
|
|
{
|
|
|
|
/* remove from kernel object list */
|
|
|
|
switch (object->type)
|
|
|
|
{
|
|
|
|
case RT_Object_Class_Semaphore:
|
|
|
|
ret = rt_sem_delete((rt_sem_t)object);
|
|
|
|
break;
|
|
|
|
case RT_Object_Class_Mutex:
|
|
|
|
ret = rt_mutex_delete((rt_mutex_t)object);
|
|
|
|
break;
|
|
|
|
case RT_Object_Class_Event:
|
|
|
|
ret = rt_event_delete((rt_event_t)object);
|
|
|
|
break;
|
|
|
|
case RT_Object_Class_MailBox:
|
|
|
|
ret = rt_mb_delete((rt_mailbox_t)object);
|
|
|
|
break;
|
|
|
|
case RT_Object_Class_MessageQueue:
|
|
|
|
ret = rt_mq_delete((rt_mq_t)object);
|
|
|
|
break;
|
|
|
|
case RT_Object_Class_Timer:
|
|
|
|
ret = rt_timer_delete((rt_timer_t)object);
|
|
|
|
break;
|
|
|
|
case RT_Object_Class_Custom:
|
|
|
|
ret = rt_custom_object_destroy(object);
|
|
|
|
break;
|
|
|
|
default:
|
|
|
|
LOG_E("input object type(%d) error", object->type);
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
else
|
|
|
|
{
|
|
|
|
ret = 0;
|
|
|
|
}
|
|
|
|
lwp_avl_remove(node, &lwp->object_root);
|
|
|
|
rt_free(node);
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
rt_err_t lwp_user_object_delete(struct rt_lwp *lwp, rt_object_t object)
|
|
|
|
{
|
|
|
|
rt_err_t ret = -1;
|
|
|
|
|
|
|
|
if (lwp && object)
|
|
|
|
{
|
|
|
|
struct lwp_avl_struct *node;
|
|
|
|
|
|
|
|
lwp_user_object_lock(lwp);
|
|
|
|
node = lwp_avl_find((avl_key_t)object, lwp->object_root);
|
|
|
|
ret = _object_node_delete(lwp, node);
|
|
|
|
lwp_user_object_unlock(lwp);
|
|
|
|
}
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
void lwp_user_object_clear(struct rt_lwp *lwp)
|
|
|
|
{
|
|
|
|
struct lwp_avl_struct *node;
|
|
|
|
|
|
|
|
lwp_user_object_lock(lwp);
|
|
|
|
while ((node = lwp_map_find_first(lwp->object_root)) != RT_NULL)
|
|
|
|
{
|
|
|
|
_object_node_delete(lwp, node);
|
|
|
|
}
|
|
|
|
lwp_user_object_unlock(lwp);
|
|
|
|
}
|
|
|
|
|
|
|
|
static int _object_dup(struct lwp_avl_struct *node, void *arg)
|
|
|
|
{
|
|
|
|
rt_object_t object;
|
|
|
|
struct rt_lwp *dst_lwp = (struct rt_lwp *)arg;
|
|
|
|
|
|
|
|
object = (rt_object_t)node->avl_key;
|
|
|
|
lwp_user_object_add(dst_lwp, object);
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
void lwp_user_object_dup(struct rt_lwp *dst_lwp, struct rt_lwp *src_lwp)
|
|
|
|
{
|
|
|
|
lwp_user_object_lock(src_lwp);
|
|
|
|
lwp_avl_traversal(src_lwp->object_root, _object_dup, dst_lwp);
|
|
|
|
lwp_user_object_unlock(src_lwp);
|
|
|
|
}
|
|
|
|
|
2023-09-25 16:12:49 +08:00
|
|
|
rt_lwp_t lwp_create(rt_base_t flags)
|
2022-12-03 12:07:44 +08:00
|
|
|
{
|
|
|
|
pid_t pid;
|
2023-09-25 16:12:49 +08:00
|
|
|
rt_lwp_t new_lwp = rt_calloc(1, sizeof(struct rt_lwp));
|
2022-12-03 12:07:44 +08:00
|
|
|
|
2023-09-25 16:12:49 +08:00
|
|
|
if (new_lwp)
|
2022-12-03 12:07:44 +08:00
|
|
|
{
|
2023-09-25 16:12:49 +08:00
|
|
|
/* minimal setup of lwp object */
|
|
|
|
new_lwp->ref = 1;
|
2023-11-02 23:12:44 +08:00
|
|
|
#ifdef RT_USING_SMP
|
|
|
|
new_lwp->bind_cpu = RT_CPUS_NR;
|
|
|
|
#endif
|
2024-03-28 23:42:56 +08:00
|
|
|
new_lwp->exe_file = RT_NULL;
|
2023-09-25 16:12:49 +08:00
|
|
|
rt_list_init(&new_lwp->t_grp);
|
2024-03-28 23:42:56 +08:00
|
|
|
rt_list_init(&new_lwp->pgrp_node);
|
2023-09-25 16:12:49 +08:00
|
|
|
rt_list_init(&new_lwp->timer);
|
|
|
|
lwp_user_object_lock_init(new_lwp);
|
|
|
|
rt_wqueue_init(&new_lwp->wait_queue);
|
2024-03-28 23:42:56 +08:00
|
|
|
rt_wqueue_init(&new_lwp->waitpid_waiters);
|
2023-09-25 16:12:49 +08:00
|
|
|
lwp_signal_init(&new_lwp->signal);
|
2023-10-14 13:07:45 +08:00
|
|
|
rt_mutex_init(&new_lwp->lwp_lock, "lwp_lock", RT_IPC_FLAG_PRIO);
|
2023-09-25 16:12:49 +08:00
|
|
|
|
2024-03-28 23:42:56 +08:00
|
|
|
if (flags & LWP_CREATE_FLAG_NOTRACE_EXEC)
|
|
|
|
new_lwp->did_exec = RT_TRUE;
|
|
|
|
|
2023-09-25 16:12:49 +08:00
|
|
|
/* lwp with pid */
|
|
|
|
if (flags & LWP_CREATE_FLAG_ALLOC_PID)
|
|
|
|
{
|
2023-10-25 20:31:25 +08:00
|
|
|
lwp_pid_lock_take();
|
|
|
|
pid = lwp_pid_get_locked();
|
2023-09-25 16:12:49 +08:00
|
|
|
if (pid == 0)
|
|
|
|
{
|
|
|
|
lwp_user_object_lock_destroy(new_lwp);
|
|
|
|
rt_free(new_lwp);
|
|
|
|
new_lwp = RT_NULL;
|
2024-03-28 23:42:56 +08:00
|
|
|
LOG_E("%s: pid slot fulled", __func__);
|
2023-09-25 16:12:49 +08:00
|
|
|
}
|
|
|
|
else
|
|
|
|
{
|
|
|
|
new_lwp->pid = pid;
|
2023-10-25 20:31:25 +08:00
|
|
|
lwp_pid_set_lwp_locked(pid, new_lwp);
|
2023-09-25 16:12:49 +08:00
|
|
|
}
|
2023-10-25 20:31:25 +08:00
|
|
|
lwp_pid_lock_release();
|
2023-09-25 16:12:49 +08:00
|
|
|
}
|
2024-03-28 23:42:56 +08:00
|
|
|
rt_memset(&new_lwp->rt_rusage,0, sizeof(new_lwp->rt_rusage));
|
|
|
|
|
|
|
|
if (flags & LWP_CREATE_FLAG_INIT_USPACE)
|
|
|
|
{
|
|
|
|
rt_err_t error = lwp_user_space_init(new_lwp, 0);
|
|
|
|
if (error)
|
|
|
|
{
|
|
|
|
lwp_pid_put(new_lwp);
|
|
|
|
lwp_user_object_lock_destroy(new_lwp);
|
|
|
|
rt_free(new_lwp);
|
|
|
|
new_lwp = RT_NULL;
|
|
|
|
LOG_E("%s: failed to initialize user space", __func__);
|
|
|
|
}
|
|
|
|
}
|
2022-12-03 12:07:44 +08:00
|
|
|
}
|
2023-10-25 20:31:25 +08:00
|
|
|
|
2024-02-23 17:49:15 +08:00
|
|
|
LOG_D("%s(pid=%d) => %p", __func__, new_lwp ? new_lwp->pid : -1, new_lwp);
|
2023-09-25 16:12:49 +08:00
|
|
|
return new_lwp;
|
2022-12-03 12:07:44 +08:00
|
|
|
}
|
|
|
|
|
2023-10-25 20:31:25 +08:00
|
|
|
/** when reference is 0, a lwp can be released */
|
2022-12-03 12:07:44 +08:00
|
|
|
void lwp_free(struct rt_lwp* lwp)
|
|
|
|
{
|
2024-03-28 23:42:56 +08:00
|
|
|
rt_processgroup_t group = RT_NULL;
|
|
|
|
|
2022-12-03 12:07:44 +08:00
|
|
|
if (lwp == RT_NULL)
|
|
|
|
{
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
2023-10-25 20:31:25 +08:00
|
|
|
/**
|
|
|
|
* Brief: Recycle the lwp when reference is cleared
|
|
|
|
*
|
|
|
|
* Note: Critical Section
|
|
|
|
* - lwp (RW. there is no other writer/reader compete with lwp_free, since
|
|
|
|
* all the reference is clear)
|
|
|
|
*/
|
2023-11-02 20:23:11 +08:00
|
|
|
LOG_D("lwp free: %p", lwp);
|
2024-03-28 23:42:56 +08:00
|
|
|
rt_free(lwp->exe_file);
|
|
|
|
group = lwp_pgrp_find(lwp_pgid_get_byprocess(lwp));
|
|
|
|
if (group)
|
|
|
|
lwp_pgrp_remove(group, lwp);
|
2022-12-03 12:07:44 +08:00
|
|
|
|
2023-11-02 20:23:11 +08:00
|
|
|
LWP_LOCK(lwp);
|
2022-12-03 12:07:44 +08:00
|
|
|
|
|
|
|
if (lwp->args != RT_NULL)
|
|
|
|
{
|
|
|
|
#ifndef ARCH_MM_MMU
|
|
|
|
lwp->args_length = RT_NULL;
|
|
|
|
#ifndef ARCH_MM_MPU
|
|
|
|
rt_free(lwp->args);
|
|
|
|
#endif /* not defined ARCH_MM_MPU */
|
|
|
|
#endif /* ARCH_MM_MMU */
|
|
|
|
lwp->args = RT_NULL;
|
|
|
|
}
|
|
|
|
|
|
|
|
lwp_user_object_clear(lwp);
|
|
|
|
lwp_user_object_lock_destroy(lwp);
|
|
|
|
|
|
|
|
/* free data section */
|
|
|
|
if (lwp->data_entry != RT_NULL)
|
|
|
|
{
|
|
|
|
#ifdef ARCH_MM_MMU
|
|
|
|
rt_free_align(lwp->data_entry);
|
|
|
|
#else
|
|
|
|
#ifdef ARCH_MM_MPU
|
|
|
|
rt_lwp_umap_user(lwp, lwp->text_entry, 0);
|
|
|
|
rt_lwp_free_user(lwp, lwp->data_entry, lwp->data_size);
|
|
|
|
#else
|
|
|
|
rt_free_align(lwp->data_entry);
|
|
|
|
#endif /* ARCH_MM_MPU */
|
|
|
|
#endif /* ARCH_MM_MMU */
|
|
|
|
lwp->data_entry = RT_NULL;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* free text section */
|
|
|
|
if (lwp->lwp_type == LWP_TYPE_DYN_ADDR)
|
|
|
|
{
|
|
|
|
if (lwp->text_entry)
|
|
|
|
{
|
|
|
|
LOG_D("lwp text free: %p", lwp->text_entry);
|
|
|
|
#ifndef ARCH_MM_MMU
|
|
|
|
rt_free((void*)lwp->text_entry);
|
|
|
|
#endif /* not defined ARCH_MM_MMU */
|
|
|
|
lwp->text_entry = RT_NULL;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2022-12-16 18:38:28 +08:00
|
|
|
#ifdef ARCH_MM_MMU
|
2022-12-03 12:07:44 +08:00
|
|
|
lwp_unmap_user_space(lwp);
|
|
|
|
#endif
|
2023-10-13 14:21:06 +08:00
|
|
|
timer_list_free(&lwp->timer);
|
2023-11-02 20:23:11 +08:00
|
|
|
|
|
|
|
LWP_UNLOCK(lwp);
|
|
|
|
RT_ASSERT(lwp->lwp_lock.owner == RT_NULL);
|
|
|
|
rt_mutex_detach(&lwp->lwp_lock);
|
|
|
|
|
|
|
|
/**
|
|
|
|
* pid must have release before enter lwp_free()
|
|
|
|
* otherwise this is a data racing
|
|
|
|
*/
|
|
|
|
RT_ASSERT(lwp->pid == 0);
|
|
|
|
rt_free(lwp);
|
|
|
|
}
|
|
|
|
|
|
|
|
rt_inline rt_noreturn
|
|
|
|
void _thread_exit(rt_lwp_t lwp, rt_thread_t thread)
|
|
|
|
{
|
2024-03-28 23:42:56 +08:00
|
|
|
LWP_LOCK(lwp);
|
|
|
|
lwp->rt_rusage.ru_stime.tv_sec += thread->system_time / RT_TICK_PER_SECOND;
|
|
|
|
lwp->rt_rusage.ru_stime.tv_usec += thread->system_time % RT_TICK_PER_SECOND * (1000000 / RT_TICK_PER_SECOND);
|
|
|
|
lwp->rt_rusage.ru_utime.tv_sec += thread->user_time / RT_TICK_PER_SECOND;
|
|
|
|
lwp->rt_rusage.ru_utime.tv_usec += thread->user_time % RT_TICK_PER_SECOND * (1000000 / RT_TICK_PER_SECOND);
|
|
|
|
rt_list_remove(&thread->sibling);
|
|
|
|
LWP_UNLOCK(lwp);
|
|
|
|
lwp_futex_exit_robust_list(thread);
|
|
|
|
|
2023-11-02 20:23:11 +08:00
|
|
|
/**
|
|
|
|
* Note: the tid tree always hold a reference to thread, hence the tid must
|
|
|
|
* be release before cleanup of thread
|
|
|
|
*/
|
|
|
|
lwp_tid_put(thread->tid);
|
|
|
|
thread->tid = 0;
|
|
|
|
|
|
|
|
rt_thread_delete(thread);
|
|
|
|
rt_schedule();
|
|
|
|
while (1) ;
|
|
|
|
}
|
|
|
|
|
|
|
|
rt_inline void _clear_child_tid(rt_thread_t thread)
|
|
|
|
{
|
|
|
|
if (thread->clear_child_tid)
|
2022-12-03 12:07:44 +08:00
|
|
|
{
|
2023-11-02 20:23:11 +08:00
|
|
|
int t = 0;
|
|
|
|
int *clear_child_tid = thread->clear_child_tid;
|
2022-12-03 12:07:44 +08:00
|
|
|
|
2023-11-02 20:23:11 +08:00
|
|
|
thread->clear_child_tid = RT_NULL;
|
|
|
|
lwp_put_to_user(clear_child_tid, &t, sizeof t);
|
2024-03-28 23:42:56 +08:00
|
|
|
sys_futex(clear_child_tid, FUTEX_WAKE, 1, RT_NULL, RT_NULL, 0);
|
2023-11-02 20:23:11 +08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2024-03-28 23:42:56 +08:00
|
|
|
void lwp_exit(rt_lwp_t lwp, lwp_status_t status)
|
2023-11-02 20:23:11 +08:00
|
|
|
{
|
|
|
|
rt_thread_t thread;
|
|
|
|
|
|
|
|
if (!lwp)
|
|
|
|
{
|
|
|
|
LOG_W("%s: lwp should not be null", __func__);
|
|
|
|
return ;
|
|
|
|
}
|
|
|
|
|
|
|
|
thread = rt_thread_self();
|
|
|
|
RT_ASSERT((struct rt_lwp *)thread->lwp == lwp);
|
|
|
|
LOG_D("process(lwp.pid=%d) exit", lwp->pid);
|
|
|
|
|
|
|
|
#ifdef ARCH_MM_MMU
|
|
|
|
_clear_child_tid(thread);
|
|
|
|
|
|
|
|
LWP_LOCK(lwp);
|
|
|
|
/**
|
|
|
|
* Brief: only one thread should calls exit_group(),
|
|
|
|
* but we can not ensured that during run-time
|
|
|
|
*/
|
2024-03-28 23:42:56 +08:00
|
|
|
lwp->lwp_status = status;
|
2023-11-02 20:23:11 +08:00
|
|
|
LWP_UNLOCK(lwp);
|
|
|
|
|
|
|
|
lwp_terminate(lwp);
|
|
|
|
#else
|
|
|
|
main_thread = rt_list_entry(lwp->t_grp.prev, struct rt_thread, sibling);
|
|
|
|
if (main_thread == tid)
|
|
|
|
{
|
|
|
|
rt_thread_t sub_thread;
|
|
|
|
rt_list_t *list;
|
|
|
|
|
|
|
|
lwp_terminate(lwp);
|
|
|
|
|
|
|
|
/* delete all subthread */
|
|
|
|
while ((list = tid->sibling.prev) != &lwp->t_grp)
|
2022-12-03 12:07:44 +08:00
|
|
|
{
|
2023-11-02 20:23:11 +08:00
|
|
|
sub_thread = rt_list_entry(list, struct rt_thread, sibling);
|
|
|
|
rt_list_remove(&sub_thread->sibling);
|
|
|
|
rt_thread_delete(sub_thread);
|
2022-12-03 12:07:44 +08:00
|
|
|
}
|
2023-11-02 20:23:11 +08:00
|
|
|
lwp->lwp_ret = value;
|
2022-12-03 12:07:44 +08:00
|
|
|
}
|
2023-11-02 20:23:11 +08:00
|
|
|
#endif /* ARCH_MM_MMU */
|
|
|
|
|
|
|
|
_thread_exit(lwp, thread);
|
|
|
|
}
|
|
|
|
|
2024-03-28 23:42:56 +08:00
|
|
|
void lwp_thread_exit(rt_thread_t thread, int status)
|
2023-11-02 20:23:11 +08:00
|
|
|
{
|
|
|
|
rt_thread_t header_thr;
|
|
|
|
struct rt_lwp *lwp;
|
|
|
|
|
|
|
|
LOG_D("%s", __func__);
|
|
|
|
|
|
|
|
RT_ASSERT(thread == rt_thread_self());
|
|
|
|
lwp = (struct rt_lwp *)thread->lwp;
|
|
|
|
RT_ASSERT(lwp != RT_NULL);
|
|
|
|
|
|
|
|
#ifdef ARCH_MM_MMU
|
|
|
|
_clear_child_tid(thread);
|
2022-12-03 12:07:44 +08:00
|
|
|
|
2023-11-02 20:23:11 +08:00
|
|
|
LWP_LOCK(lwp);
|
|
|
|
header_thr = rt_list_entry(lwp->t_grp.prev, struct rt_thread, sibling);
|
|
|
|
if (header_thr == thread && thread->sibling.prev == &lwp->t_grp)
|
|
|
|
{
|
2024-03-28 23:42:56 +08:00
|
|
|
/**
|
|
|
|
* if thread exit, treated as process exit normally.
|
|
|
|
* This is reasonable since trap event is exited through lwp_exit()
|
|
|
|
*/
|
|
|
|
lwp->lwp_status = LWP_CREATE_STAT_EXIT(status);
|
2023-11-02 20:23:11 +08:00
|
|
|
LWP_UNLOCK(lwp);
|
|
|
|
|
|
|
|
lwp_terminate(lwp);
|
|
|
|
}
|
|
|
|
else
|
|
|
|
{
|
|
|
|
LWP_UNLOCK(lwp);
|
|
|
|
}
|
|
|
|
#endif /* ARCH_MM_MMU */
|
|
|
|
|
|
|
|
_thread_exit(lwp, thread);
|
|
|
|
}
|
|
|
|
|
2023-10-25 20:31:25 +08:00
|
|
|
/** @note the reference is not for synchronization, but for the release of resource. the synchronization is done through lwp & pid lock */
|
2022-12-03 12:07:44 +08:00
|
|
|
int lwp_ref_inc(struct rt_lwp *lwp)
|
|
|
|
{
|
2023-10-25 20:31:25 +08:00
|
|
|
int ref;
|
|
|
|
ref = rt_atomic_add(&lwp->ref, 1);
|
|
|
|
LOG_D("%s(%p(%s)): before %d", __func__, lwp, lwp->cmd, ref);
|
2022-12-03 12:07:44 +08:00
|
|
|
|
2023-10-25 20:31:25 +08:00
|
|
|
return ref;
|
2022-12-03 12:07:44 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
int lwp_ref_dec(struct rt_lwp *lwp)
|
|
|
|
{
|
2023-10-25 20:31:25 +08:00
|
|
|
int ref;
|
2022-12-03 12:07:44 +08:00
|
|
|
|
2023-10-25 20:31:25 +08:00
|
|
|
ref = rt_atomic_add(&lwp->ref, -1);
|
|
|
|
LOG_D("%s(lwp=%p,lwp->cmd=%s): before ref=%d", __func__, lwp, lwp->cmd, ref);
|
|
|
|
|
|
|
|
if (ref == 1)
|
2022-12-03 12:07:44 +08:00
|
|
|
{
|
|
|
|
struct rt_channel_msg msg;
|
|
|
|
|
|
|
|
if (lwp->debug)
|
|
|
|
{
|
|
|
|
memset(&msg, 0, sizeof msg);
|
|
|
|
rt_raw_channel_send(gdb_server_channel(), &msg);
|
|
|
|
}
|
|
|
|
|
|
|
|
#ifndef ARCH_MM_MMU
|
|
|
|
#ifdef RT_LWP_USING_SHM
|
|
|
|
lwp_shm_lwp_free(lwp);
|
|
|
|
#endif /* RT_LWP_USING_SHM */
|
|
|
|
#endif /* not defined ARCH_MM_MMU */
|
|
|
|
lwp_free(lwp);
|
2023-10-25 20:31:25 +08:00
|
|
|
}
|
|
|
|
else
|
|
|
|
{
|
|
|
|
/* reference must be a positive integer */
|
|
|
|
RT_ASSERT(ref > 1);
|
2022-12-03 12:07:44 +08:00
|
|
|
}
|
|
|
|
|
2023-10-25 20:31:25 +08:00
|
|
|
return ref;
|
2022-12-03 12:07:44 +08:00
|
|
|
}
|
|
|
|
|
2024-03-28 23:42:56 +08:00
|
|
|
struct rt_lwp* lwp_from_pid_raw_locked(pid_t pid)
|
2022-12-03 12:07:44 +08:00
|
|
|
{
|
|
|
|
struct lwp_avl_struct *p;
|
|
|
|
struct rt_lwp *lwp = RT_NULL;
|
|
|
|
|
|
|
|
p = lwp_avl_find(pid, lwp_pid_root);
|
|
|
|
if (p)
|
|
|
|
{
|
|
|
|
lwp = (struct rt_lwp *)p->data;
|
|
|
|
}
|
2023-10-25 20:31:25 +08:00
|
|
|
|
2022-12-03 12:07:44 +08:00
|
|
|
return lwp;
|
|
|
|
}
|
|
|
|
|
2024-03-28 23:42:56 +08:00
|
|
|
struct rt_lwp* lwp_from_pid_locked(pid_t pid)
|
|
|
|
{
|
|
|
|
struct rt_lwp* lwp;
|
|
|
|
lwp = pid ? lwp_from_pid_raw_locked(pid) : lwp_self();
|
|
|
|
return lwp;
|
|
|
|
}
|
|
|
|
|
2022-12-03 12:07:44 +08:00
|
|
|
pid_t lwp_to_pid(struct rt_lwp* lwp)
|
|
|
|
{
|
|
|
|
if (!lwp)
|
|
|
|
{
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
return lwp->pid;
|
|
|
|
}
|
|
|
|
|
|
|
|
char* lwp_pid2name(int32_t pid)
|
|
|
|
{
|
|
|
|
struct rt_lwp *lwp;
|
|
|
|
char* process_name = RT_NULL;
|
|
|
|
|
2023-10-25 20:31:25 +08:00
|
|
|
lwp_pid_lock_take();
|
|
|
|
lwp = lwp_from_pid_locked(pid);
|
2022-12-03 12:07:44 +08:00
|
|
|
if (lwp)
|
|
|
|
{
|
|
|
|
process_name = strrchr(lwp->cmd, '/');
|
|
|
|
process_name = process_name? process_name + 1: lwp->cmd;
|
|
|
|
}
|
2023-10-25 20:31:25 +08:00
|
|
|
lwp_pid_lock_release();
|
|
|
|
|
2022-12-03 12:07:44 +08:00
|
|
|
return process_name;
|
|
|
|
}
|
|
|
|
|
|
|
|
pid_t lwp_name2pid(const char *name)
|
|
|
|
{
|
|
|
|
int idx;
|
|
|
|
pid_t pid = 0;
|
|
|
|
rt_thread_t main_thread;
|
|
|
|
char* process_name = RT_NULL;
|
2024-02-23 17:49:15 +08:00
|
|
|
rt_sched_lock_level_t slvl;
|
2022-12-03 12:07:44 +08:00
|
|
|
|
2023-10-25 20:31:25 +08:00
|
|
|
lwp_pid_lock_take();
|
2022-12-03 12:07:44 +08:00
|
|
|
for (idx = 0; idx < RT_LWP_MAX_NR; idx++)
|
|
|
|
{
|
|
|
|
/* 0 is reserved */
|
|
|
|
struct rt_lwp *lwp = (struct rt_lwp *)lwp_pid_ary[idx].data;
|
|
|
|
|
|
|
|
if (lwp)
|
|
|
|
{
|
|
|
|
process_name = strrchr(lwp->cmd, '/');
|
|
|
|
process_name = process_name? process_name + 1: lwp->cmd;
|
|
|
|
if (!rt_strncmp(name, process_name, RT_NAME_MAX))
|
|
|
|
{
|
|
|
|
main_thread = rt_list_entry(lwp->t_grp.prev, struct rt_thread, sibling);
|
2024-02-23 17:49:15 +08:00
|
|
|
rt_sched_lock(&slvl);
|
|
|
|
if (!(rt_sched_thread_get_stat(main_thread) == RT_THREAD_CLOSE))
|
2022-12-03 12:07:44 +08:00
|
|
|
{
|
|
|
|
pid = lwp->pid;
|
|
|
|
}
|
2024-02-23 17:49:15 +08:00
|
|
|
rt_sched_unlock(slvl);
|
2022-12-03 12:07:44 +08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
2023-10-25 20:31:25 +08:00
|
|
|
lwp_pid_lock_release();
|
2022-12-03 12:07:44 +08:00
|
|
|
return pid;
|
|
|
|
}
|
|
|
|
|
|
|
|
int lwp_getpid(void)
|
|
|
|
{
|
2024-03-28 23:42:56 +08:00
|
|
|
rt_lwp_t lwp = lwp_self();
|
|
|
|
return lwp ? lwp->pid : 1;
|
|
|
|
// return ((struct rt_lwp *)rt_thread_self()->lwp)->pid;
|
2022-12-03 12:07:44 +08:00
|
|
|
}
|
|
|
|
|
2024-03-28 23:42:56 +08:00
|
|
|
rt_inline void _update_ru(struct rt_lwp *child, struct rt_lwp *self_lwp, struct rusage *uru)
|
|
|
|
{
|
|
|
|
struct rusage rt_rusage;
|
|
|
|
if (uru != RT_NULL)
|
|
|
|
{
|
|
|
|
rt_rusage.ru_stime.tv_sec = child->rt_rusage.ru_stime.tv_sec;
|
|
|
|
rt_rusage.ru_stime.tv_usec = child->rt_rusage.ru_stime.tv_usec;
|
|
|
|
rt_rusage.ru_utime.tv_sec = child->rt_rusage.ru_utime.tv_sec;
|
|
|
|
rt_rusage.ru_utime.tv_usec = child->rt_rusage.ru_utime.tv_usec;
|
|
|
|
lwp_data_put(self_lwp, uru, &rt_rusage, sizeof(*uru));
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
/* do statistical summary and reap the child if neccessary */
|
|
|
|
static rt_err_t _stats_and_reap_child(rt_lwp_t child, rt_thread_t cur_thr,
|
|
|
|
struct rt_lwp *self_lwp, int *ustatus,
|
|
|
|
int options, struct rusage *uru)
|
2022-12-03 12:07:44 +08:00
|
|
|
{
|
2024-03-28 23:42:56 +08:00
|
|
|
int lwp_stat = child->lwp_status;
|
2022-12-03 12:07:44 +08:00
|
|
|
|
2024-03-28 23:42:56 +08:00
|
|
|
/* report statistical data to process */
|
|
|
|
_update_ru(child, self_lwp, uru);
|
|
|
|
|
|
|
|
if (child->terminated && !(options & WNOWAIT))
|
|
|
|
{
|
|
|
|
/** Reap the child process if it's exited */
|
|
|
|
LOG_D("func %s: child detached", __func__);
|
|
|
|
lwp_pid_put(child);
|
|
|
|
lwp_children_unregister(self_lwp, child);
|
|
|
|
}
|
|
|
|
|
|
|
|
if (ustatus)
|
|
|
|
lwp_data_put(self_lwp, ustatus, &lwp_stat, sizeof(*ustatus));
|
|
|
|
|
|
|
|
return RT_EOK;
|
|
|
|
}
|
|
|
|
|
|
|
|
#define HAS_CHILD_BUT_NO_EVT (-1024)
|
|
|
|
|
|
|
|
/* check if the process is already terminate */
|
|
|
|
static sysret_t _query_event_from_lwp(rt_lwp_t child, rt_thread_t cur_thr, rt_lwp_t self_lwp,
|
|
|
|
int options, int *status)
|
|
|
|
{
|
|
|
|
sysret_t rc;
|
|
|
|
|
|
|
|
LWP_LOCK(child);
|
|
|
|
if (child->terminated)
|
|
|
|
{
|
|
|
|
rc = child->pid;
|
|
|
|
}
|
|
|
|
else if ((options & WSTOPPED) && child->jobctl_stopped && !child->wait_reap_stp)
|
2022-12-03 12:07:44 +08:00
|
|
|
{
|
2024-03-28 23:42:56 +08:00
|
|
|
child->wait_reap_stp = 1;
|
|
|
|
rc = child->pid;
|
2022-12-03 12:07:44 +08:00
|
|
|
}
|
2023-09-09 09:35:33 +08:00
|
|
|
else
|
2024-03-28 23:42:56 +08:00
|
|
|
{
|
|
|
|
rc = HAS_CHILD_BUT_NO_EVT;
|
|
|
|
}
|
|
|
|
LWP_UNLOCK(child);
|
|
|
|
|
|
|
|
LOG_D("%s(child_pid=%d ('%s'), stopped=%d) => %d", __func__, child->pid, child->cmd, child->jobctl_stopped, rc);
|
|
|
|
return rc;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* verify if the process is child, and reap it */
|
|
|
|
static pid_t _verify_child_and_reap(rt_thread_t cur_thr, rt_lwp_t self_lwp,
|
|
|
|
pid_t wait_pid, int options, int *ustatus,
|
|
|
|
struct rusage *uru)
|
|
|
|
{
|
|
|
|
sysret_t rc;
|
|
|
|
struct rt_lwp *child;
|
|
|
|
|
|
|
|
/* check if pid is reference to a valid child */
|
|
|
|
lwp_pid_lock_take();
|
|
|
|
child = lwp_from_pid_locked(wait_pid);
|
|
|
|
if (!child)
|
|
|
|
rc = -EINVAL;
|
|
|
|
else if (child->parent != self_lwp)
|
|
|
|
rc = -ESRCH;
|
|
|
|
else
|
|
|
|
rc = wait_pid;
|
|
|
|
|
|
|
|
lwp_pid_lock_release();
|
|
|
|
|
|
|
|
if (rc > 0)
|
|
|
|
{
|
|
|
|
rc = _query_event_from_lwp(child, cur_thr, self_lwp, options, ustatus);
|
|
|
|
if (rc > 0)
|
|
|
|
{
|
|
|
|
_stats_and_reap_child(child, cur_thr, self_lwp, ustatus, options, uru);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return rc;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* try to reap any child */
|
|
|
|
static pid_t _reap_any_child_pid(rt_thread_t cur_thr, rt_lwp_t self_lwp, pid_t pair_pgid,
|
|
|
|
int options, int *ustatus, struct rusage *uru)
|
|
|
|
{
|
|
|
|
sysret_t rc = -ECHILD;
|
|
|
|
struct rt_lwp *child;
|
|
|
|
|
|
|
|
LWP_LOCK(self_lwp);
|
|
|
|
child = self_lwp->first_child;
|
|
|
|
|
|
|
|
/* find a exited child if any */
|
|
|
|
while (child)
|
|
|
|
{
|
|
|
|
if (pair_pgid && child->pgid != pair_pgid)
|
|
|
|
continue;
|
|
|
|
|
|
|
|
rc = _query_event_from_lwp(child, cur_thr, self_lwp, options, ustatus);
|
|
|
|
if (rc > 0)
|
|
|
|
break;
|
|
|
|
|
|
|
|
child = child->sibling;
|
|
|
|
}
|
|
|
|
LWP_UNLOCK(self_lwp);
|
|
|
|
|
|
|
|
if (rc > 0)
|
|
|
|
{
|
|
|
|
_stats_and_reap_child(child, cur_thr, self_lwp, ustatus, options, uru);
|
|
|
|
}
|
|
|
|
return rc;
|
|
|
|
}
|
|
|
|
|
|
|
|
rt_err_t lwp_waitpid_kick(rt_lwp_t parent, rt_lwp_t self_lwp)
|
|
|
|
{
|
|
|
|
/* waker provide the message mainly through its lwp_status */
|
|
|
|
rt_wqueue_wakeup(&parent->waitpid_waiters, self_lwp);
|
|
|
|
return RT_EOK;
|
|
|
|
}
|
|
|
|
|
|
|
|
struct waitpid_handle {
|
|
|
|
struct rt_wqueue_node wq_node;
|
|
|
|
int options;
|
|
|
|
rt_lwp_t waker_lwp;
|
|
|
|
};
|
|
|
|
|
|
|
|
/* the IPC message is setup and notify the parent */
|
|
|
|
static int _waitq_filter(struct rt_wqueue_node *wait_node, void *key)
|
|
|
|
{
|
|
|
|
int can_accept_evt = 0;
|
|
|
|
rt_thread_t waiter = wait_node->polling_thread;
|
|
|
|
pid_t destiny = (pid_t)wait_node->key;
|
|
|
|
rt_lwp_t waker_lwp = key;
|
|
|
|
struct waitpid_handle *handle;
|
|
|
|
rt_ubase_t options;
|
|
|
|
|
|
|
|
handle = rt_container_of(wait_node, struct waitpid_handle, wq_node);
|
|
|
|
|
|
|
|
RT_ASSERT(waiter != RT_NULL);
|
|
|
|
options = handle->options;
|
|
|
|
|
|
|
|
/* filter out if waker is not the one */
|
|
|
|
if (destiny > 0)
|
2023-09-09 09:35:33 +08:00
|
|
|
{
|
2023-10-25 20:31:25 +08:00
|
|
|
/**
|
2024-03-28 23:42:56 +08:00
|
|
|
* in waitpid immediately return routine, we already do the check
|
|
|
|
* that pid is one of the child process of waiting thread
|
2023-10-25 20:31:25 +08:00
|
|
|
*/
|
2024-03-28 23:42:56 +08:00
|
|
|
can_accept_evt = waker_lwp->pid == destiny;
|
|
|
|
}
|
|
|
|
else if (destiny == -1)
|
|
|
|
{
|
|
|
|
can_accept_evt = waker_lwp->parent == waiter->lwp;
|
|
|
|
}
|
|
|
|
else
|
|
|
|
{
|
|
|
|
/* destiny == 0 || destiny == -pgid */
|
|
|
|
pid_t waiter_pgid;
|
|
|
|
if (destiny == 0)
|
2023-09-09 09:35:33 +08:00
|
|
|
{
|
2024-03-28 23:42:56 +08:00
|
|
|
waiter_pgid = lwp_pgid_get_byprocess(waiter->lwp);
|
2023-09-09 09:35:33 +08:00
|
|
|
}
|
2024-03-28 23:42:56 +08:00
|
|
|
else
|
2023-10-25 20:31:25 +08:00
|
|
|
{
|
2024-03-28 23:42:56 +08:00
|
|
|
waiter_pgid = -destiny;
|
|
|
|
}
|
|
|
|
can_accept_evt = waiter_pgid == lwp_pgid_get_byprocess(waker_lwp);
|
|
|
|
}
|
2023-10-25 20:31:25 +08:00
|
|
|
|
2024-03-28 23:42:56 +08:00
|
|
|
/* filter out if event is not desired */
|
|
|
|
if (can_accept_evt)
|
|
|
|
{
|
|
|
|
if ((options & WEXITED) && waker_lwp->terminated)
|
|
|
|
can_accept_evt = 1;
|
|
|
|
else if ((options & WSTOPPED) && WIFSTOPPED(waker_lwp->lwp_status))
|
|
|
|
can_accept_evt = 1;
|
|
|
|
else if ((options & WCONTINUED) && WIFCONTINUED(waker_lwp->lwp_status))
|
|
|
|
can_accept_evt = 1;
|
|
|
|
else
|
|
|
|
can_accept_evt = 0;
|
|
|
|
}
|
2023-09-09 09:35:33 +08:00
|
|
|
|
2024-03-28 23:42:56 +08:00
|
|
|
/* setup message for waiter if accepted */
|
|
|
|
if (can_accept_evt)
|
|
|
|
handle->waker_lwp = waker_lwp;
|
2023-10-25 20:31:25 +08:00
|
|
|
|
2024-03-28 23:42:56 +08:00
|
|
|
/* 0 if event is accepted, otherwise discard */
|
|
|
|
return !can_accept_evt;
|
|
|
|
}
|
2023-11-02 20:23:11 +08:00
|
|
|
|
2024-03-28 23:42:56 +08:00
|
|
|
/* the waiter cleanup IPC message and wait for desired event here */
|
|
|
|
static rt_err_t _wait_for_event(rt_thread_t cur_thr, rt_lwp_t self_lwp,
|
|
|
|
struct waitpid_handle *handle, pid_t destiny)
|
|
|
|
{
|
|
|
|
rt_err_t ret;
|
2023-10-25 20:31:25 +08:00
|
|
|
|
2024-03-28 23:42:56 +08:00
|
|
|
/* current context checking */
|
|
|
|
RT_DEBUG_SCHEDULER_AVAILABLE(RT_TRUE);
|
2023-10-25 20:31:25 +08:00
|
|
|
|
2024-03-28 23:42:56 +08:00
|
|
|
handle->wq_node.polling_thread = cur_thr;
|
|
|
|
handle->wq_node.key = destiny;
|
|
|
|
handle->wq_node.wakeup = _waitq_filter;
|
|
|
|
handle->wq_node.wqueue = &self_lwp->waitpid_waiters;
|
|
|
|
rt_list_init(&handle->wq_node.list);
|
2023-11-02 20:23:11 +08:00
|
|
|
|
2024-03-28 23:42:56 +08:00
|
|
|
cur_thr->error = RT_EOK;
|
|
|
|
|
|
|
|
LOG_D("%s(self_lwp=%d) wait for event", __func__, self_lwp->pid);
|
|
|
|
|
|
|
|
rt_enter_critical();
|
|
|
|
ret = rt_thread_suspend_with_flag(cur_thr, RT_INTERRUPTIBLE);
|
|
|
|
if (ret == RT_EOK)
|
|
|
|
{
|
|
|
|
rt_wqueue_add(handle->wq_node.wqueue, &handle->wq_node);
|
|
|
|
rt_exit_critical();
|
|
|
|
|
|
|
|
rt_schedule();
|
|
|
|
|
|
|
|
ret = cur_thr->error;
|
|
|
|
|
|
|
|
/**
|
|
|
|
* cur_thr error is a positive value, but some legacy implementation
|
|
|
|
* use a negative one. So we check to avoid errors
|
|
|
|
*/
|
|
|
|
ret = ret > 0 ? -ret : ret;
|
|
|
|
|
|
|
|
/**
|
|
|
|
* we dont rely on this actually, but we cleanup it since wakeup API
|
|
|
|
* set this up durint operation, and this will cause some messy condition
|
|
|
|
*/
|
|
|
|
handle->wq_node.wqueue->flag = RT_WQ_FLAG_CLEAN;
|
|
|
|
rt_wqueue_remove(&handle->wq_node);
|
|
|
|
}
|
|
|
|
else
|
|
|
|
{
|
|
|
|
/* failed to suspend, return immediately with failure */
|
|
|
|
rt_exit_critical();
|
2022-12-03 12:07:44 +08:00
|
|
|
}
|
|
|
|
|
2024-03-28 23:42:56 +08:00
|
|
|
return ret;
|
2023-10-25 20:31:25 +08:00
|
|
|
}
|
|
|
|
|
2024-03-28 23:42:56 +08:00
|
|
|
/* wait for IPC event and do the cleanup if neccessary */
|
|
|
|
static sysret_t _wait_and_reap(rt_thread_t cur_thr, rt_lwp_t self_lwp, const pid_t pid,
|
|
|
|
int options, int *ustatus, struct rusage *uru)
|
|
|
|
{
|
|
|
|
sysret_t rc;
|
|
|
|
struct waitpid_handle handle;
|
|
|
|
rt_lwp_t waker;
|
|
|
|
|
|
|
|
/* wait for SIGCHLD or other async events */
|
|
|
|
handle.options = options;
|
|
|
|
handle.waker_lwp = 0;
|
|
|
|
rc = _wait_for_event(cur_thr, self_lwp, &handle, pid);
|
|
|
|
|
|
|
|
waker = handle.waker_lwp;
|
|
|
|
if (waker != RT_NULL)
|
|
|
|
{
|
|
|
|
rc = waker->pid;
|
|
|
|
|
|
|
|
/* check out if any process exited */
|
|
|
|
LOG_D("%s: woken up by lwp=%d", __func__, waker->pid);
|
|
|
|
_stats_and_reap_child(waker, cur_thr, self_lwp, ustatus, options, uru);
|
|
|
|
}
|
|
|
|
/**
|
|
|
|
* else if (rc != RT_EOK)
|
|
|
|
* unable to do a suspend, or wakeup unexpectedly
|
|
|
|
* -> then returned a failure
|
|
|
|
*/
|
|
|
|
|
|
|
|
return rc;
|
|
|
|
}
|
2023-10-25 20:31:25 +08:00
|
|
|
|
2024-03-28 23:42:56 +08:00
|
|
|
pid_t lwp_waitpid(const pid_t pid, int *status, int options, struct rusage *ru)
|
2023-10-25 20:31:25 +08:00
|
|
|
{
|
|
|
|
pid_t rc = -1;
|
2024-03-28 23:42:56 +08:00
|
|
|
struct rt_thread *cur_thr;
|
2023-10-25 20:31:25 +08:00
|
|
|
struct rt_lwp *self_lwp;
|
|
|
|
|
2024-03-28 23:42:56 +08:00
|
|
|
cur_thr = rt_thread_self();
|
2023-10-25 20:31:25 +08:00
|
|
|
self_lwp = lwp_self();
|
|
|
|
|
2024-03-28 23:42:56 +08:00
|
|
|
if (!cur_thr || !self_lwp)
|
2022-12-03 12:07:44 +08:00
|
|
|
{
|
2024-03-28 23:42:56 +08:00
|
|
|
rc = -EINVAL;
|
2022-12-03 12:07:44 +08:00
|
|
|
}
|
|
|
|
else
|
|
|
|
{
|
2024-03-28 23:42:56 +08:00
|
|
|
/* check if able to reap desired child immediately */
|
2023-10-25 20:31:25 +08:00
|
|
|
if (pid > 0)
|
2022-12-03 12:07:44 +08:00
|
|
|
{
|
2024-03-28 23:42:56 +08:00
|
|
|
/* if pid is child then try to reap it */
|
|
|
|
rc = _verify_child_and_reap(cur_thr, self_lwp, pid, options, status, ru);
|
2023-10-25 20:31:25 +08:00
|
|
|
}
|
|
|
|
else if (pid == -1)
|
|
|
|
{
|
2024-03-28 23:42:56 +08:00
|
|
|
/* any terminated child */
|
|
|
|
rc = _reap_any_child_pid(cur_thr, self_lwp, 0, options, status, ru);
|
2022-12-03 12:07:44 +08:00
|
|
|
}
|
2023-10-25 20:31:25 +08:00
|
|
|
else
|
2022-12-03 12:07:44 +08:00
|
|
|
{
|
2024-03-28 23:42:56 +08:00
|
|
|
/**
|
|
|
|
* (pid < -1 || pid == 0)
|
|
|
|
* any terminated child with matched pgid
|
|
|
|
*/
|
|
|
|
|
|
|
|
pid_t pair_pgid;
|
|
|
|
if (pid == 0)
|
|
|
|
{
|
|
|
|
pair_pgid = lwp_pgid_get_byprocess(self_lwp);
|
|
|
|
}
|
|
|
|
else
|
|
|
|
{
|
|
|
|
pair_pgid = -pid;
|
|
|
|
}
|
|
|
|
rc = _reap_any_child_pid(cur_thr, self_lwp, pair_pgid, options, status, ru);
|
2022-12-03 12:07:44 +08:00
|
|
|
}
|
|
|
|
|
2024-03-28 23:42:56 +08:00
|
|
|
if (rc == HAS_CHILD_BUT_NO_EVT)
|
|
|
|
{
|
|
|
|
if (!(options & WNOHANG))
|
|
|
|
{
|
|
|
|
/* otherwise, arrange a suspend and wait for async event */
|
|
|
|
options |= WEXITED;
|
|
|
|
rc = _wait_and_reap(cur_thr, self_lwp, pid, options, status, ru);
|
|
|
|
}
|
|
|
|
else
|
|
|
|
{
|
|
|
|
/**
|
|
|
|
* POSIX.1: If waitpid() was invoked with WNOHANG set in options,
|
|
|
|
* it has at least one child process specified by pid for which
|
|
|
|
* status is not available, and status is not available for any
|
|
|
|
* process specified by pid, 0 is returned
|
|
|
|
*/
|
|
|
|
rc = 0;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
else
|
|
|
|
{
|
|
|
|
RT_ASSERT(rc != 0);
|
|
|
|
}
|
2022-12-03 12:07:44 +08:00
|
|
|
}
|
|
|
|
|
2024-03-28 23:42:56 +08:00
|
|
|
LOG_D("waitpid() => %d, *status=0x%x", rc, status ? *status:0);
|
2023-10-25 20:31:25 +08:00
|
|
|
return rc;
|
2022-12-03 12:07:44 +08:00
|
|
|
}
|
|
|
|
|
2024-03-28 23:42:56 +08:00
|
|
|
pid_t waitpid(pid_t pid, int *status, int options)
|
|
|
|
{
|
|
|
|
return lwp_waitpid(pid, status, options, RT_NULL);
|
|
|
|
}
|
|
|
|
|
2022-12-03 12:07:44 +08:00
|
|
|
#ifdef RT_USING_FINSH
|
|
|
|
/* copy from components/finsh/cmd.c */
|
|
|
|
static void object_split(int len)
|
|
|
|
{
|
|
|
|
while (len--)
|
|
|
|
{
|
|
|
|
rt_kprintf("-");
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
static void print_thread_info(struct rt_thread* thread, int maxlen)
|
|
|
|
{
|
|
|
|
rt_uint8_t *ptr;
|
|
|
|
rt_uint8_t stat;
|
|
|
|
|
|
|
|
#ifdef RT_USING_SMP
|
2024-02-23 17:49:15 +08:00
|
|
|
if (RT_SCHED_CTX(thread).oncpu != RT_CPU_DETACHED)
|
|
|
|
rt_kprintf("%-*.*s %3d %3d ", maxlen, RT_NAME_MAX, thread->parent.name, RT_SCHED_CTX(thread).oncpu, RT_SCHED_PRIV(thread).current_priority);
|
2022-12-03 12:07:44 +08:00
|
|
|
else
|
2024-02-23 17:49:15 +08:00
|
|
|
rt_kprintf("%-*.*s N/A %3d ", maxlen, RT_NAME_MAX, thread->parent.name, RT_SCHED_PRIV(thread).current_priority);
|
2022-12-03 12:07:44 +08:00
|
|
|
#else
|
2024-03-28 23:42:56 +08:00
|
|
|
rt_kprintf("%-*.*s %3d ", maxlen, RT_NAME_MAX, thread->parent.name, RT_SCHED_PRIV(thread).current_priority);
|
2022-12-03 12:07:44 +08:00
|
|
|
#endif /*RT_USING_SMP*/
|
|
|
|
|
2024-02-23 17:49:15 +08:00
|
|
|
stat = (RT_SCHED_CTX(thread).stat & RT_THREAD_STAT_MASK);
|
2022-12-03 12:07:44 +08:00
|
|
|
if (stat == RT_THREAD_READY) rt_kprintf(" ready ");
|
|
|
|
else if ((stat & RT_THREAD_SUSPEND_MASK) == RT_THREAD_SUSPEND_MASK) rt_kprintf(" suspend");
|
|
|
|
else if (stat == RT_THREAD_INIT) rt_kprintf(" init ");
|
|
|
|
else if (stat == RT_THREAD_CLOSE) rt_kprintf(" close ");
|
|
|
|
else if (stat == RT_THREAD_RUNNING) rt_kprintf(" running");
|
|
|
|
|
|
|
|
#if defined(ARCH_CPU_STACK_GROWS_UPWARD)
|
|
|
|
ptr = (rt_uint8_t *)thread->stack_addr + thread->stack_size;
|
|
|
|
while (*ptr == '#')ptr--;
|
|
|
|
|
|
|
|
rt_kprintf(" 0x%08x 0x%08x %02d%% 0x%08x %03d\n",
|
|
|
|
((rt_uint32_t)thread->sp - (rt_uint32_t)thread->stack_addr),
|
|
|
|
thread->stack_size,
|
|
|
|
((rt_uint32_t)ptr - (rt_uint32_t)thread->stack_addr) * 100 / thread->stack_size,
|
|
|
|
thread->remaining_tick,
|
|
|
|
thread->error);
|
|
|
|
#else
|
|
|
|
ptr = (rt_uint8_t *)thread->stack_addr;
|
|
|
|
while (*ptr == '#')ptr++;
|
|
|
|
|
|
|
|
rt_kprintf(" 0x%08x 0x%08x %02d%% 0x%08x %03d\n",
|
|
|
|
(thread->stack_size + (rt_uint32_t)(rt_size_t)thread->stack_addr - (rt_uint32_t)(rt_size_t)thread->sp),
|
|
|
|
thread->stack_size,
|
|
|
|
(thread->stack_size + (rt_uint32_t)(rt_size_t)thread->stack_addr - (rt_uint32_t)(rt_size_t)ptr) * 100
|
|
|
|
/ thread->stack_size,
|
2024-02-23 17:49:15 +08:00
|
|
|
RT_SCHED_PRIV(thread).remaining_tick,
|
2022-12-03 12:07:44 +08:00
|
|
|
thread->error);
|
|
|
|
#endif
|
|
|
|
}
|
|
|
|
|
|
|
|
long list_process(void)
|
|
|
|
{
|
|
|
|
int index;
|
|
|
|
int maxlen;
|
|
|
|
rt_ubase_t level;
|
|
|
|
struct rt_thread *thread;
|
|
|
|
struct rt_list_node *node, *list;
|
|
|
|
const char *item_title = "thread";
|
|
|
|
|
|
|
|
int count = 0;
|
|
|
|
struct rt_thread **threads;
|
|
|
|
|
|
|
|
maxlen = RT_NAME_MAX;
|
|
|
|
#ifdef RT_USING_SMP
|
|
|
|
rt_kprintf("%-*.s %-*.s %-*.s cpu pri status sp stack size max used left tick error\n", 4, "PID", maxlen, "CMD", maxlen, item_title);
|
|
|
|
object_split(4);rt_kprintf(" ");object_split(maxlen);rt_kprintf(" ");object_split(maxlen);rt_kprintf(" ");
|
|
|
|
rt_kprintf( "--- --- ------- ---------- ---------- ------ ---------- ---\n");
|
|
|
|
#else
|
|
|
|
rt_kprintf("%-*.s %-*.s %-*.s pri status sp stack size max used left tick error\n", 4, "PID", maxlen, "CMD", maxlen, item_title);
|
|
|
|
object_split(4);rt_kprintf(" ");object_split(maxlen);rt_kprintf(" ");object_split(maxlen);rt_kprintf(" ");
|
|
|
|
rt_kprintf( "--- ------- ---------- ---------- ------ ---------- ---\n");
|
|
|
|
#endif /*RT_USING_SMP*/
|
|
|
|
|
|
|
|
count = rt_object_get_length(RT_Object_Class_Thread);
|
|
|
|
if (count > 0)
|
|
|
|
{
|
|
|
|
/* get thread pointers */
|
|
|
|
threads = (struct rt_thread **)rt_calloc(count, sizeof(struct rt_thread *));
|
|
|
|
if (threads)
|
|
|
|
{
|
|
|
|
index = rt_object_get_pointers(RT_Object_Class_Thread, (rt_object_t *)threads, count);
|
|
|
|
|
|
|
|
if (index > 0)
|
|
|
|
{
|
|
|
|
for (index = 0; index <count; index++)
|
|
|
|
{
|
|
|
|
struct rt_thread th;
|
|
|
|
|
|
|
|
thread = threads[index];
|
2023-10-25 20:31:25 +08:00
|
|
|
|
2023-11-02 20:23:11 +08:00
|
|
|
level = rt_spin_lock_irqsave(&thread->spinlock);
|
2023-10-25 20:31:25 +08:00
|
|
|
if ((rt_object_get_type(&thread->parent) & ~RT_Object_Class_Static) != RT_Object_Class_Thread)
|
2022-12-03 12:07:44 +08:00
|
|
|
{
|
2023-11-02 20:23:11 +08:00
|
|
|
rt_spin_unlock_irqrestore(&thread->spinlock, level);
|
2022-12-03 12:07:44 +08:00
|
|
|
continue;
|
|
|
|
}
|
|
|
|
|
|
|
|
rt_memcpy(&th, thread, sizeof(struct rt_thread));
|
2023-11-02 20:23:11 +08:00
|
|
|
rt_spin_unlock_irqrestore(&thread->spinlock, level);
|
2022-12-03 12:07:44 +08:00
|
|
|
|
|
|
|
if (th.lwp == RT_NULL)
|
|
|
|
{
|
|
|
|
rt_kprintf(" %-*.*s ", maxlen, RT_NAME_MAX, "kernel");
|
|
|
|
print_thread_info(&th, maxlen);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
rt_free(threads);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
for (index = 0; index < RT_LWP_MAX_NR; index++)
|
|
|
|
{
|
|
|
|
struct rt_lwp *lwp = (struct rt_lwp *)lwp_pid_ary[index].data;
|
|
|
|
|
|
|
|
if (lwp)
|
|
|
|
{
|
|
|
|
list = &lwp->t_grp;
|
|
|
|
for (node = list->next; node != list; node = node->next)
|
|
|
|
{
|
|
|
|
thread = rt_list_entry(node, struct rt_thread, sibling);
|
|
|
|
rt_kprintf("%4d %-*.*s ", lwp_to_pid(lwp), maxlen, RT_NAME_MAX, lwp->cmd);
|
|
|
|
print_thread_info(thread, maxlen);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
MSH_CMD_EXPORT(list_process, list process);
|
|
|
|
|
|
|
|
static void cmd_kill(int argc, char** argv)
|
|
|
|
{
|
|
|
|
int pid;
|
2023-09-22 15:06:07 +08:00
|
|
|
int sig = SIGKILL;
|
2022-12-03 12:07:44 +08:00
|
|
|
|
|
|
|
if (argc < 2)
|
|
|
|
{
|
|
|
|
rt_kprintf("kill pid or kill pid -s signal\n");
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
pid = atoi(argv[1]);
|
|
|
|
if (argc >= 4)
|
|
|
|
{
|
|
|
|
if (argv[2][0] == '-' && argv[2][1] == 's')
|
|
|
|
{
|
|
|
|
sig = atoi(argv[3]);
|
|
|
|
}
|
|
|
|
}
|
2023-10-25 20:31:25 +08:00
|
|
|
lwp_pid_lock_take();
|
2024-03-28 23:42:56 +08:00
|
|
|
lwp_signal_kill(lwp_from_pid_raw_locked(pid), sig, SI_USER, 0);
|
2023-10-25 20:31:25 +08:00
|
|
|
lwp_pid_lock_release();
|
2022-12-03 12:07:44 +08:00
|
|
|
}
|
|
|
|
MSH_CMD_EXPORT_ALIAS(cmd_kill, kill, send a signal to a process);
|
|
|
|
|
|
|
|
static void cmd_killall(int argc, char** argv)
|
|
|
|
{
|
|
|
|
int pid;
|
|
|
|
if (argc < 2)
|
|
|
|
{
|
|
|
|
rt_kprintf("killall processes_name\n");
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
while((pid = lwp_name2pid(argv[1])) > 0)
|
|
|
|
{
|
2023-10-25 20:31:25 +08:00
|
|
|
lwp_pid_lock_take();
|
2024-03-28 23:42:56 +08:00
|
|
|
lwp_signal_kill(lwp_from_pid_raw_locked(pid), SIGKILL, SI_USER, 0);
|
2023-10-25 20:31:25 +08:00
|
|
|
lwp_pid_lock_release();
|
2022-12-03 12:07:44 +08:00
|
|
|
rt_thread_mdelay(100);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
MSH_CMD_EXPORT_ALIAS(cmd_killall, killall, kill processes by name);
|
|
|
|
|
|
|
|
#endif
|
|
|
|
|
|
|
|
int lwp_check_exit_request(void)
|
|
|
|
{
|
|
|
|
rt_thread_t thread = rt_thread_self();
|
2024-03-28 23:42:56 +08:00
|
|
|
rt_size_t expected = LWP_EXIT_REQUEST_TRIGGERED;
|
2024-02-23 17:49:15 +08:00
|
|
|
|
2022-12-03 12:07:44 +08:00
|
|
|
if (!thread->lwp)
|
|
|
|
{
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2024-03-28 23:42:56 +08:00
|
|
|
return atomic_compare_exchange_strong(&thread->exit_request, &expected,
|
|
|
|
LWP_EXIT_REQUEST_IN_PROCESS);
|
2022-12-03 12:07:44 +08:00
|
|
|
}
|
|
|
|
|
2023-11-02 20:23:11 +08:00
|
|
|
static void _wait_sibling_exit(rt_lwp_t lwp, rt_thread_t curr_thread);
|
|
|
|
static void _resr_cleanup(struct rt_lwp *lwp);
|
|
|
|
|
2022-12-03 12:07:44 +08:00
|
|
|
void lwp_terminate(struct rt_lwp *lwp)
|
|
|
|
{
|
|
|
|
if (!lwp)
|
|
|
|
{
|
|
|
|
/* kernel thread not support */
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
2023-08-03 20:25:13 +08:00
|
|
|
LOG_D("%s(lwp=%p \"%s\")", __func__, lwp, lwp->cmd);
|
|
|
|
|
2023-10-25 20:31:25 +08:00
|
|
|
LWP_LOCK(lwp);
|
2023-07-20 00:02:41 +08:00
|
|
|
|
2023-08-03 20:25:13 +08:00
|
|
|
if (!lwp->terminated)
|
2022-12-03 12:07:44 +08:00
|
|
|
{
|
2023-10-25 20:31:25 +08:00
|
|
|
/* stop the receiving of signals */
|
2023-08-03 20:25:13 +08:00
|
|
|
lwp->terminated = RT_TRUE;
|
2023-11-02 20:23:11 +08:00
|
|
|
LWP_UNLOCK(lwp);
|
2022-12-03 12:07:44 +08:00
|
|
|
|
2023-11-02 20:23:11 +08:00
|
|
|
_wait_sibling_exit(lwp, rt_thread_self());
|
|
|
|
_resr_cleanup(lwp);
|
|
|
|
}
|
|
|
|
else
|
|
|
|
{
|
|
|
|
LWP_UNLOCK(lwp);
|
2022-12-03 12:07:44 +08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2023-11-02 20:23:11 +08:00
|
|
|
static void _wait_sibling_exit(rt_lwp_t lwp, rt_thread_t curr_thread)
|
2022-12-03 12:07:44 +08:00
|
|
|
{
|
2024-02-23 17:49:15 +08:00
|
|
|
rt_sched_lock_level_t slvl;
|
2023-11-02 20:23:11 +08:00
|
|
|
rt_list_t *list;
|
2022-12-03 12:07:44 +08:00
|
|
|
rt_thread_t thread;
|
2024-03-28 23:42:56 +08:00
|
|
|
rt_size_t expected = LWP_EXIT_REQUEST_NONE;
|
2022-12-03 12:07:44 +08:00
|
|
|
|
2023-11-02 20:23:11 +08:00
|
|
|
/* broadcast exit request for sibling threads */
|
|
|
|
LWP_LOCK(lwp);
|
|
|
|
for (list = lwp->t_grp.next; list != &lwp->t_grp; list = list->next)
|
2022-12-03 12:07:44 +08:00
|
|
|
{
|
2023-11-02 20:23:11 +08:00
|
|
|
thread = rt_list_entry(list, struct rt_thread, sibling);
|
2022-12-03 12:07:44 +08:00
|
|
|
|
2024-03-28 23:42:56 +08:00
|
|
|
atomic_compare_exchange_strong(&thread->exit_request, &expected,
|
|
|
|
LWP_EXIT_REQUEST_TRIGGERED);
|
2024-02-23 17:49:15 +08:00
|
|
|
|
|
|
|
rt_sched_lock(&slvl);
|
|
|
|
/* dont release, otherwise thread may have been freed */
|
|
|
|
if (rt_sched_thread_is_suspended(thread))
|
2023-11-02 20:23:11 +08:00
|
|
|
{
|
|
|
|
thread->error = RT_EINTR;
|
2024-02-23 17:49:15 +08:00
|
|
|
rt_sched_unlock(slvl);
|
2023-11-02 20:23:11 +08:00
|
|
|
|
|
|
|
rt_thread_wakeup(thread);
|
|
|
|
}
|
|
|
|
else
|
|
|
|
{
|
2024-02-23 17:49:15 +08:00
|
|
|
rt_sched_unlock(slvl);
|
2023-11-02 20:23:11 +08:00
|
|
|
}
|
2022-12-03 12:07:44 +08:00
|
|
|
}
|
2023-11-02 20:23:11 +08:00
|
|
|
LWP_UNLOCK(lwp);
|
2022-12-03 12:07:44 +08:00
|
|
|
|
|
|
|
while (1)
|
|
|
|
{
|
|
|
|
int subthread_is_terminated;
|
2023-08-03 20:25:13 +08:00
|
|
|
LOG_D("%s: wait for subthread exiting", __func__);
|
2022-12-03 12:07:44 +08:00
|
|
|
|
2023-10-25 20:31:25 +08:00
|
|
|
/**
|
|
|
|
* Brief: wait for all *running* sibling threads to exit
|
|
|
|
*
|
|
|
|
* Note: Critical Section
|
|
|
|
* - sibling list of lwp (RW. It will clear all siblings finally)
|
|
|
|
*/
|
|
|
|
LWP_LOCK(lwp);
|
2023-11-02 20:23:11 +08:00
|
|
|
subthread_is_terminated = (int)(curr_thread->sibling.prev == &lwp->t_grp);
|
2022-12-03 12:07:44 +08:00
|
|
|
if (!subthread_is_terminated)
|
|
|
|
{
|
2024-02-23 17:49:15 +08:00
|
|
|
rt_sched_lock_level_t slvl;
|
2022-12-03 12:07:44 +08:00
|
|
|
rt_thread_t sub_thread;
|
|
|
|
rt_list_t *list;
|
|
|
|
int all_subthread_in_init = 1;
|
|
|
|
|
|
|
|
/* check all subthread is in init state */
|
2023-11-02 20:23:11 +08:00
|
|
|
for (list = curr_thread->sibling.prev; list != &lwp->t_grp; list = list->prev)
|
2022-12-03 12:07:44 +08:00
|
|
|
{
|
2024-02-23 17:49:15 +08:00
|
|
|
rt_sched_lock(&slvl);
|
2022-12-03 12:07:44 +08:00
|
|
|
sub_thread = rt_list_entry(list, struct rt_thread, sibling);
|
2024-02-23 17:49:15 +08:00
|
|
|
if (rt_sched_thread_get_stat(sub_thread) != RT_THREAD_INIT)
|
2022-12-03 12:07:44 +08:00
|
|
|
{
|
2024-02-23 17:49:15 +08:00
|
|
|
rt_sched_unlock(slvl);
|
2022-12-03 12:07:44 +08:00
|
|
|
all_subthread_in_init = 0;
|
|
|
|
break;
|
|
|
|
}
|
2024-02-23 17:49:15 +08:00
|
|
|
else
|
|
|
|
{
|
|
|
|
rt_sched_unlock(slvl);
|
|
|
|
}
|
2022-12-03 12:07:44 +08:00
|
|
|
}
|
|
|
|
if (all_subthread_in_init)
|
|
|
|
{
|
|
|
|
/* delete all subthread */
|
2023-11-02 20:23:11 +08:00
|
|
|
while ((list = curr_thread->sibling.prev) != &lwp->t_grp)
|
2022-12-03 12:07:44 +08:00
|
|
|
{
|
|
|
|
sub_thread = rt_list_entry(list, struct rt_thread, sibling);
|
|
|
|
rt_list_remove(&sub_thread->sibling);
|
2023-10-25 20:31:25 +08:00
|
|
|
|
|
|
|
/**
|
|
|
|
* Note: Critical Section
|
|
|
|
* - thread control block (RW. Since it will free the thread
|
|
|
|
* control block, it must ensure no one else can access
|
|
|
|
* thread any more)
|
|
|
|
*/
|
|
|
|
lwp_tid_put(sub_thread->tid);
|
|
|
|
sub_thread->tid = 0;
|
2022-12-03 12:07:44 +08:00
|
|
|
rt_thread_delete(sub_thread);
|
|
|
|
}
|
|
|
|
subthread_is_terminated = 1;
|
|
|
|
}
|
|
|
|
}
|
2023-10-25 20:31:25 +08:00
|
|
|
LWP_UNLOCK(lwp);
|
2022-12-03 12:07:44 +08:00
|
|
|
|
|
|
|
if (subthread_is_terminated)
|
|
|
|
{
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
rt_thread_mdelay(10);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2024-03-28 23:42:56 +08:00
|
|
|
static void _notify_parent(rt_lwp_t lwp)
|
|
|
|
{
|
|
|
|
int si_code;
|
|
|
|
int signo_or_exitcode;
|
|
|
|
lwp_siginfo_ext_t ext;
|
|
|
|
lwp_status_t lwp_status = lwp->lwp_status;
|
|
|
|
rt_lwp_t parent = lwp->parent;
|
|
|
|
|
|
|
|
if (WIFSIGNALED(lwp_status))
|
|
|
|
{
|
|
|
|
si_code = (lwp_status & LWP_COREDUMP_FLAG) ? CLD_DUMPED : CLD_KILLED;
|
|
|
|
signo_or_exitcode = WTERMSIG(lwp_status);
|
|
|
|
}
|
|
|
|
else
|
|
|
|
{
|
|
|
|
si_code = CLD_EXITED;
|
|
|
|
signo_or_exitcode = WEXITSTATUS(lwp->lwp_status);
|
|
|
|
}
|
|
|
|
|
|
|
|
lwp_waitpid_kick(parent, lwp);
|
|
|
|
|
|
|
|
ext = rt_malloc(sizeof(struct lwp_siginfo));
|
|
|
|
|
|
|
|
if (ext)
|
|
|
|
{
|
|
|
|
rt_thread_t cur_thr = rt_thread_self();
|
|
|
|
ext->sigchld.status = signo_or_exitcode;
|
|
|
|
ext->sigchld.stime = cur_thr->system_time;
|
|
|
|
ext->sigchld.utime = cur_thr->user_time;
|
|
|
|
}
|
|
|
|
lwp_signal_kill(parent, SIGCHLD, si_code, ext);
|
|
|
|
}
|
|
|
|
|
2023-11-02 20:23:11 +08:00
|
|
|
static void _resr_cleanup(struct rt_lwp *lwp)
|
|
|
|
{
|
2024-03-28 23:42:56 +08:00
|
|
|
lwp_jobctrl_on_exit(lwp);
|
|
|
|
|
2023-11-02 20:23:11 +08:00
|
|
|
LWP_LOCK(lwp);
|
|
|
|
lwp_signal_detach(&lwp->signal);
|
|
|
|
|
|
|
|
/**
|
|
|
|
* @brief Detach children from lwp
|
|
|
|
*
|
|
|
|
* @note Critical Section
|
|
|
|
* - the lwp (RW. Release lwp)
|
|
|
|
* - the pid resource manager (RW. Release the pid)
|
|
|
|
*/
|
|
|
|
while (lwp->first_child)
|
|
|
|
{
|
|
|
|
struct rt_lwp *child;
|
|
|
|
|
|
|
|
child = lwp->first_child;
|
|
|
|
lwp->first_child = child->sibling;
|
|
|
|
|
|
|
|
/** @note safe since the slist node is release */
|
|
|
|
LWP_UNLOCK(lwp);
|
|
|
|
LWP_LOCK(child);
|
2023-11-06 13:22:27 +08:00
|
|
|
if (child->terminated)
|
|
|
|
{
|
|
|
|
lwp_pid_put(child);
|
|
|
|
}
|
|
|
|
else
|
|
|
|
{
|
|
|
|
child->sibling = RT_NULL;
|
|
|
|
/* info: this may cause an orphan lwp */
|
|
|
|
child->parent = RT_NULL;
|
|
|
|
}
|
|
|
|
|
2023-11-02 20:23:11 +08:00
|
|
|
LWP_UNLOCK(child);
|
|
|
|
lwp_ref_dec(child);
|
|
|
|
lwp_ref_dec(lwp);
|
|
|
|
|
|
|
|
LWP_LOCK(lwp);
|
|
|
|
}
|
|
|
|
LWP_UNLOCK(lwp);
|
|
|
|
|
|
|
|
/**
|
|
|
|
* @brief Wakeup parent if it's waiting for this lwp, otherwise a signal
|
|
|
|
* will be sent to parent
|
|
|
|
*
|
|
|
|
* @note Critical Section
|
|
|
|
* - the parent lwp (RW.)
|
|
|
|
*/
|
|
|
|
LWP_LOCK(lwp);
|
2024-03-28 23:42:56 +08:00
|
|
|
if (lwp->parent &&
|
|
|
|
!lwp_sigismember(&lwp->parent->signal.sig_action_nocldwait, SIGCHLD))
|
2023-11-02 20:23:11 +08:00
|
|
|
{
|
2024-03-28 23:42:56 +08:00
|
|
|
/* if successfully race to setup lwp->terminated before parent detach */
|
2023-11-02 20:23:11 +08:00
|
|
|
LWP_UNLOCK(lwp);
|
2024-03-28 23:42:56 +08:00
|
|
|
|
|
|
|
/**
|
|
|
|
* Note: children cannot detach itself and must wait for parent to take
|
|
|
|
* care of it
|
|
|
|
*/
|
|
|
|
_notify_parent(lwp);
|
2023-11-02 20:23:11 +08:00
|
|
|
}
|
|
|
|
else
|
|
|
|
{
|
|
|
|
LWP_UNLOCK(lwp);
|
|
|
|
|
2024-03-28 23:42:56 +08:00
|
|
|
/**
|
|
|
|
* if process is orphan, it doesn't have parent to do the recycling.
|
|
|
|
* Otherwise, its parent had setup a flag to mask out recycling event
|
|
|
|
*/
|
2023-11-02 20:23:11 +08:00
|
|
|
lwp_pid_put(lwp);
|
|
|
|
}
|
|
|
|
|
|
|
|
LWP_LOCK(lwp);
|
|
|
|
if (lwp->fdt.fds != RT_NULL)
|
|
|
|
{
|
|
|
|
struct dfs_file **fds;
|
|
|
|
|
|
|
|
/* auto clean fds */
|
|
|
|
__exit_files(lwp);
|
|
|
|
fds = lwp->fdt.fds;
|
|
|
|
lwp->fdt.fds = RT_NULL;
|
|
|
|
LWP_UNLOCK(lwp);
|
|
|
|
|
|
|
|
rt_free(fds);
|
|
|
|
}
|
|
|
|
else
|
|
|
|
{
|
|
|
|
LWP_UNLOCK(lwp);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
[smart] update sched_setaffinity() to use thread(task) ID (#9004)
Correct `sched_setaffinity()` to use the thread IDs (TIDs) instead of
process IDs (PIDs). The previous implementation used PIDs, which
caused issues since affinity settings need to be applied at the
thread level.
As the manual documented, the signature is:
> int sched_setaffinity(pid_t pid, size_t cpusetsize,
> const cpu_set_t *mask);
Yes, it's tricky, the identification passing in is called **'PID'**.
But when we talk about 'pid' from GNU libc, it's the **'task-id'**,
aka, `thread->tid` known in kernel.
Changes were made by updating the function signatures and logic in
`lwp.h`, `lwp_pid.c`, and `lwp_syscall.c` to accept TIDs. Specifically,
the `lwp_setaffinity` function and related internal functions now
operate using thread IDs and adjust thread affinity settings accordingly
Signed-off-by: Shell <smokewood@qq.com>
2024-05-29 06:53:22 +08:00
|
|
|
static int _lwp_setaffinity(int tid, int cpu)
|
2022-12-03 12:07:44 +08:00
|
|
|
{
|
[smart] update sched_setaffinity() to use thread(task) ID (#9004)
Correct `sched_setaffinity()` to use the thread IDs (TIDs) instead of
process IDs (PIDs). The previous implementation used PIDs, which
caused issues since affinity settings need to be applied at the
thread level.
As the manual documented, the signature is:
> int sched_setaffinity(pid_t pid, size_t cpusetsize,
> const cpu_set_t *mask);
Yes, it's tricky, the identification passing in is called **'PID'**.
But when we talk about 'pid' from GNU libc, it's the **'task-id'**,
aka, `thread->tid` known in kernel.
Changes were made by updating the function signatures and logic in
`lwp.h`, `lwp_pid.c`, and `lwp_syscall.c` to accept TIDs. Specifically,
the `lwp_setaffinity` function and related internal functions now
operate using thread IDs and adjust thread affinity settings accordingly
Signed-off-by: Shell <smokewood@qq.com>
2024-05-29 06:53:22 +08:00
|
|
|
rt_thread_t thread;
|
2022-12-03 12:07:44 +08:00
|
|
|
int ret = -1;
|
|
|
|
|
[smart] update sched_setaffinity() to use thread(task) ID (#9004)
Correct `sched_setaffinity()` to use the thread IDs (TIDs) instead of
process IDs (PIDs). The previous implementation used PIDs, which
caused issues since affinity settings need to be applied at the
thread level.
As the manual documented, the signature is:
> int sched_setaffinity(pid_t pid, size_t cpusetsize,
> const cpu_set_t *mask);
Yes, it's tricky, the identification passing in is called **'PID'**.
But when we talk about 'pid' from GNU libc, it's the **'task-id'**,
aka, `thread->tid` known in kernel.
Changes were made by updating the function signatures and logic in
`lwp.h`, `lwp_pid.c`, and `lwp_syscall.c` to accept TIDs. Specifically,
the `lwp_setaffinity` function and related internal functions now
operate using thread IDs and adjust thread affinity settings accordingly
Signed-off-by: Shell <smokewood@qq.com>
2024-05-29 06:53:22 +08:00
|
|
|
thread = lwp_tid_get_thread_and_inc_ref(tid);
|
2024-03-28 23:42:56 +08:00
|
|
|
|
[smart] update sched_setaffinity() to use thread(task) ID (#9004)
Correct `sched_setaffinity()` to use the thread IDs (TIDs) instead of
process IDs (PIDs). The previous implementation used PIDs, which
caused issues since affinity settings need to be applied at the
thread level.
As the manual documented, the signature is:
> int sched_setaffinity(pid_t pid, size_t cpusetsize,
> const cpu_set_t *mask);
Yes, it's tricky, the identification passing in is called **'PID'**.
But when we talk about 'pid' from GNU libc, it's the **'task-id'**,
aka, `thread->tid` known in kernel.
Changes were made by updating the function signatures and logic in
`lwp.h`, `lwp_pid.c`, and `lwp_syscall.c` to accept TIDs. Specifically,
the `lwp_setaffinity` function and related internal functions now
operate using thread IDs and adjust thread affinity settings accordingly
Signed-off-by: Shell <smokewood@qq.com>
2024-05-29 06:53:22 +08:00
|
|
|
if (thread)
|
2022-12-03 12:07:44 +08:00
|
|
|
{
|
|
|
|
#ifdef RT_USING_SMP
|
[smart] update sched_setaffinity() to use thread(task) ID (#9004)
Correct `sched_setaffinity()` to use the thread IDs (TIDs) instead of
process IDs (PIDs). The previous implementation used PIDs, which
caused issues since affinity settings need to be applied at the
thread level.
As the manual documented, the signature is:
> int sched_setaffinity(pid_t pid, size_t cpusetsize,
> const cpu_set_t *mask);
Yes, it's tricky, the identification passing in is called **'PID'**.
But when we talk about 'pid' from GNU libc, it's the **'task-id'**,
aka, `thread->tid` known in kernel.
Changes were made by updating the function signatures and logic in
`lwp.h`, `lwp_pid.c`, and `lwp_syscall.c` to accept TIDs. Specifically,
the `lwp_setaffinity` function and related internal functions now
operate using thread IDs and adjust thread affinity settings accordingly
Signed-off-by: Shell <smokewood@qq.com>
2024-05-29 06:53:22 +08:00
|
|
|
rt_thread_control(thread, RT_THREAD_CTRL_BIND_CPU, (void *)(rt_ubase_t)cpu);
|
2022-12-03 12:07:44 +08:00
|
|
|
#endif
|
|
|
|
ret = 0;
|
|
|
|
}
|
[smart] update sched_setaffinity() to use thread(task) ID (#9004)
Correct `sched_setaffinity()` to use the thread IDs (TIDs) instead of
process IDs (PIDs). The previous implementation used PIDs, which
caused issues since affinity settings need to be applied at the
thread level.
As the manual documented, the signature is:
> int sched_setaffinity(pid_t pid, size_t cpusetsize,
> const cpu_set_t *mask);
Yes, it's tricky, the identification passing in is called **'PID'**.
But when we talk about 'pid' from GNU libc, it's the **'task-id'**,
aka, `thread->tid` known in kernel.
Changes were made by updating the function signatures and logic in
`lwp.h`, `lwp_pid.c`, and `lwp_syscall.c` to accept TIDs. Specifically,
the `lwp_setaffinity` function and related internal functions now
operate using thread IDs and adjust thread affinity settings accordingly
Signed-off-by: Shell <smokewood@qq.com>
2024-05-29 06:53:22 +08:00
|
|
|
lwp_tid_dec_ref(thread);
|
2022-12-03 12:07:44 +08:00
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
[smart] update sched_setaffinity() to use thread(task) ID (#9004)
Correct `sched_setaffinity()` to use the thread IDs (TIDs) instead of
process IDs (PIDs). The previous implementation used PIDs, which
caused issues since affinity settings need to be applied at the
thread level.
As the manual documented, the signature is:
> int sched_setaffinity(pid_t pid, size_t cpusetsize,
> const cpu_set_t *mask);
Yes, it's tricky, the identification passing in is called **'PID'**.
But when we talk about 'pid' from GNU libc, it's the **'task-id'**,
aka, `thread->tid` known in kernel.
Changes were made by updating the function signatures and logic in
`lwp.h`, `lwp_pid.c`, and `lwp_syscall.c` to accept TIDs. Specifically,
the `lwp_setaffinity` function and related internal functions now
operate using thread IDs and adjust thread affinity settings accordingly
Signed-off-by: Shell <smokewood@qq.com>
2024-05-29 06:53:22 +08:00
|
|
|
int lwp_setaffinity(int tid, int cpu)
|
2022-12-03 12:07:44 +08:00
|
|
|
{
|
|
|
|
int ret;
|
|
|
|
|
|
|
|
#ifdef RT_USING_SMP
|
|
|
|
if (cpu < 0 || cpu > RT_CPUS_NR)
|
|
|
|
{
|
|
|
|
cpu = RT_CPUS_NR;
|
|
|
|
}
|
|
|
|
#endif
|
[smart] update sched_setaffinity() to use thread(task) ID (#9004)
Correct `sched_setaffinity()` to use the thread IDs (TIDs) instead of
process IDs (PIDs). The previous implementation used PIDs, which
caused issues since affinity settings need to be applied at the
thread level.
As the manual documented, the signature is:
> int sched_setaffinity(pid_t pid, size_t cpusetsize,
> const cpu_set_t *mask);
Yes, it's tricky, the identification passing in is called **'PID'**.
But when we talk about 'pid' from GNU libc, it's the **'task-id'**,
aka, `thread->tid` known in kernel.
Changes were made by updating the function signatures and logic in
`lwp.h`, `lwp_pid.c`, and `lwp_syscall.c` to accept TIDs. Specifically,
the `lwp_setaffinity` function and related internal functions now
operate using thread IDs and adjust thread affinity settings accordingly
Signed-off-by: Shell <smokewood@qq.com>
2024-05-29 06:53:22 +08:00
|
|
|
ret = _lwp_setaffinity(tid, cpu);
|
2022-12-03 12:07:44 +08:00
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
#ifdef RT_USING_SMP
|
|
|
|
static void cmd_cpu_bind(int argc, char** argv)
|
|
|
|
{
|
|
|
|
int pid;
|
|
|
|
int cpu;
|
|
|
|
|
|
|
|
if (argc < 3)
|
|
|
|
{
|
|
|
|
rt_kprintf("Useage: cpu_bind pid cpu\n");
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
pid = atoi(argv[1]);
|
|
|
|
cpu = atoi(argv[2]);
|
|
|
|
lwp_setaffinity((pid_t)pid, cpu);
|
|
|
|
}
|
|
|
|
MSH_CMD_EXPORT_ALIAS(cmd_cpu_bind, cpu_bind, set a process bind to a cpu);
|
|
|
|
#endif
|