rt-thread-official/components/libc/posix/pthreads/pthread.c

834 lines
19 KiB
C
Raw Normal View History

2013-06-26 23:18:30 +08:00
/*
2021-03-08 18:19:04 +08:00
* Copyright (c) 2006-2021, RT-Thread Development Team
2013-06-26 23:18:30 +08:00
*
2018-10-14 19:28:18 +08:00
* SPDX-License-Identifier: Apache-2.0
2013-06-26 23:18:30 +08:00
*
* Change Logs:
* Date Author Notes
* 2018-01-26 Bernard Fix pthread_detach issue for a none-joinable
* thread.
* 2019-02-07 Bernard Add _pthread_destroy to release pthread resource.
* 2022-05-10 xiangxistu Modify the recycle logic about resource of pthread.
* 2024-04-15 atwww Modify the recycle logic of TLS in function _pthread_data_destroy,
* make it safe for C++11's thread_local destructors.
2013-06-26 23:18:30 +08:00
*/
#include <rthw.h>
2013-01-08 22:40:58 +08:00
#include <pthread.h>
#include <sched.h>
#include <sys/time.h>
2013-01-08 22:40:58 +08:00
#include "pthread_internal.h"
RT_DEFINE_HW_SPINLOCK(pth_lock);
_pthread_data_t *pth_table[PTHREAD_NUM_MAX] = {NULL};
static int concurrency_level;
_pthread_data_t *_pthread_get_data(pthread_t thread)
{
_pthread_data_t *ptd;
if (thread >= PTHREAD_NUM_MAX) return NULL;
rt_hw_spin_lock(&pth_lock);
ptd = pth_table[thread];
rt_hw_spin_unlock(&pth_lock);
if (ptd && ptd->magic == PTHREAD_MAGIC) return ptd;
return NULL;
}
pthread_t _pthread_data_get_pth(_pthread_data_t *ptd)
{
int index;
rt_hw_spin_lock(&pth_lock);
for (index = 0; index < PTHREAD_NUM_MAX; index ++)
{
if (pth_table[index] == ptd) break;
}
rt_hw_spin_unlock(&pth_lock);
return index;
}
pthread_t _pthread_data_create(void)
{
int index;
_pthread_data_t *ptd = NULL;
ptd = (_pthread_data_t*)rt_malloc(sizeof(_pthread_data_t));
if (!ptd) return PTHREAD_NUM_MAX;
memset(ptd, 0x0, sizeof(_pthread_data_t));
ptd->canceled = 0;
2021-09-17 14:57:42 +08:00
ptd->cancelstate = PTHREAD_CANCEL_DISABLE;
ptd->canceltype = PTHREAD_CANCEL_DEFERRED;
ptd->magic = PTHREAD_MAGIC;
rt_hw_spin_lock(&pth_lock);
for (index = 0; index < PTHREAD_NUM_MAX; index ++)
{
if (pth_table[index] == NULL)
{
pth_table[index] = ptd;
break;
}
}
rt_hw_spin_unlock(&pth_lock);
/* full of pthreads, clean magic and release ptd */
if (index == PTHREAD_NUM_MAX)
{
ptd->magic = 0x0;
rt_free(ptd);
}
return index;
}
static inline void _destroy_item(int index, _pthread_data_t *ptd)
{
2021-09-03 11:38:48 +08:00
extern _pthread_key_data_t _thread_keys[PTHREAD_KEY_MAX];
void *data;
if (_thread_keys[index].is_used)
{
data = ptd->tls[index];
if (data && _thread_keys[index].destructor)
{
_thread_keys[index].destructor(data);
}
}
}
#ifdef RT_USING_CPLUSPLUS11
#define NOT_USE_CXX_TLS -1
#endif
void _pthread_data_destroy(_pthread_data_t *ptd)
{
pthread_t pth;
if (ptd)
{
/* if this thread create the local thread data,
* destruct thread local key
*/
2021-09-03 11:38:48 +08:00
if (ptd->tls != RT_NULL)
{
int index;
#ifdef RT_USING_CPLUSPLUS11
/* If C++11 is enabled and emutls is used,
* destructors of C++ object must be called safely.
*/
extern pthread_key_t emutls_get_pthread_key(void);
pthread_key_t emutls_pthread_key = emutls_get_pthread_key();
if (emutls_pthread_key != NOT_USE_CXX_TLS)
2021-09-03 11:38:48 +08:00
{
/* If execution reaches here, C++ 'thread_local' may be used.
* Destructors of c++ class object must be called before emutls_key_destructor.
*/
int start = ((emutls_pthread_key - 1 + PTHREAD_KEY_MAX) % PTHREAD_KEY_MAX);
int i = 0;
for (index = start; i < PTHREAD_KEY_MAX; index = (index - 1 + PTHREAD_KEY_MAX) % PTHREAD_KEY_MAX, i ++)
2021-09-03 11:38:48 +08:00
{
_destroy_item(index, ptd);
}
}
else
#endif
{
/* If only C TLS is used, that is, POSIX TLS or __Thread_local,
* just iterate the _thread_keys from index 0.
*/
for (index = 0; index < PTHREAD_KEY_MAX; index ++)
{
_destroy_item(index, ptd);
2021-09-03 11:38:48 +08:00
}
}
/* release tls area */
rt_free(ptd->tls);
ptd->tls = RT_NULL;
}
pth = _pthread_data_get_pth(ptd);
/* remove from pthread table */
rt_hw_spin_lock(&pth_lock);
pth_table[pth] = NULL;
rt_hw_spin_unlock(&pth_lock);
/* delete joinable semaphore */
if (ptd->joinable_sem != RT_NULL)
{
rt_sem_delete(ptd->joinable_sem);
ptd->joinable_sem = RT_NULL;
}
/* clean magic */
ptd->magic = 0x0;
/* clear the "ptd->tid->pthread_data" */
ptd->tid->pthread_data = RT_NULL;
/* free ptd */
rt_free(ptd);
}
}
static void _pthread_cleanup(rt_thread_t tid)
{
/* clear cleanup function */
tid->cleanup = RT_NULL;
/* restore tid stack */
rt_free(tid->stack_addr);
/* restore tid control block */
rt_free(tid);
}
static void pthread_entry_stub(void *parameter)
2013-01-08 22:40:58 +08:00
{
void *value;
2013-06-26 23:18:30 +08:00
_pthread_data_t *ptd;
ptd = (_pthread_data_t *)parameter;
2013-06-26 23:18:30 +08:00
/* execute pthread entry */
value = ptd->thread_entry(ptd->thread_parameter);
/* According to "detachstate" to whether or not to recycle resource immediately */
2013-06-26 23:18:30 +08:00
if (ptd->attr.detachstate == PTHREAD_CREATE_JOINABLE)
{
/* set value */
ptd->return_value = value;
2013-06-26 23:18:30 +08:00
rt_sem_release(ptd->joinable_sem);
}
else
{
/* release pthread resource */
_pthread_data_destroy(ptd);
2013-06-26 23:18:30 +08:00
}
2013-01-08 22:40:58 +08:00
}
int pthread_create(pthread_t *pid,
const pthread_attr_t *attr,
void *(*start)(void *), void *parameter)
2013-01-08 22:40:58 +08:00
{
int ret = 0;
2013-06-26 23:18:30 +08:00
void *stack;
char name[RT_NAME_MAX];
static rt_uint16_t pthread_number = 0;
pthread_t pth_id;
2013-06-26 23:18:30 +08:00
_pthread_data_t *ptd;
/* pid shall be provided */
RT_ASSERT(pid != RT_NULL);
2013-06-26 23:18:30 +08:00
/* allocate posix thread data */
pth_id = _pthread_data_create();
2021-03-08 18:19:04 +08:00
if (pth_id == PTHREAD_NUM_MAX)
{
ret = ENOMEM;
goto __exit;
}
/* get pthread data */
ptd = _pthread_get_data(pth_id);
2013-06-26 23:18:30 +08:00
2021-03-31 09:22:47 +08:00
RT_ASSERT(ptd != RT_NULL);
2013-06-26 23:18:30 +08:00
if (attr != RT_NULL)
{
2013-06-26 23:18:30 +08:00
ptd->attr = *attr;
}
else
2013-06-26 23:18:30 +08:00
{
/* use default attribute */
pthread_attr_init(&ptd->attr);
}
2021-09-03 11:38:48 +08:00
if (ptd->attr.stacksize == 0)
{
ret = EINVAL;
goto __exit;
}
2013-06-26 23:18:30 +08:00
rt_snprintf(name, sizeof(name), "pth%02d", pthread_number ++);
/* pthread is a static thread object */
ptd->tid = (rt_thread_t) rt_malloc(sizeof(struct rt_thread));
if (ptd->tid == RT_NULL)
{
ret = ENOMEM;
goto __exit;
2013-06-26 23:18:30 +08:00
}
2019-10-10 22:42:14 +08:00
memset(ptd->tid, 0, sizeof(struct rt_thread));
2013-06-26 23:18:30 +08:00
if (ptd->attr.detachstate == PTHREAD_CREATE_JOINABLE)
{
ptd->joinable_sem = rt_sem_create(name, 0, RT_IPC_FLAG_FIFO);
if (ptd->joinable_sem == RT_NULL)
{
ret = ENOMEM;
goto __exit;
2013-06-26 23:18:30 +08:00
}
}
else
{
2013-06-26 23:18:30 +08:00
ptd->joinable_sem = RT_NULL;
}
2013-06-26 23:18:30 +08:00
/* set parameter */
ptd->thread_entry = start;
ptd->thread_parameter = parameter;
2019-10-10 22:42:14 +08:00
/* stack */
if (ptd->attr.stackaddr == 0)
{
stack = (void *)rt_malloc(ptd->attr.stacksize);
}
else
{
stack = (void *)(ptd->attr.stackaddr);
}
if (stack == RT_NULL)
{
ret = ENOMEM;
goto __exit;
}
2013-06-26 23:18:30 +08:00
/* initial this pthread to system */
if (rt_thread_init(ptd->tid, name, pthread_entry_stub, ptd,
stack, ptd->attr.stacksize,
2021-09-03 11:38:48 +08:00
ptd->attr.schedparam.sched_priority, 20) != RT_EOK)
2013-06-26 23:18:30 +08:00
{
ret = EINVAL;
goto __exit;
2013-06-26 23:18:30 +08:00
}
/* set pthread id */
*pid = pth_id;
2013-06-26 23:18:30 +08:00
/* set pthread cleanup function and ptd data */
ptd->tid->cleanup = _pthread_cleanup;
ptd->tid->pthread_data = (void *)ptd;
2013-06-26 23:18:30 +08:00
/* start thread */
if (rt_thread_startup(ptd->tid) == RT_EOK)
2013-06-26 23:18:30 +08:00
return 0;
/* start thread failed */
rt_thread_detach(ptd->tid);
ret = EINVAL;
2013-06-26 23:18:30 +08:00
__exit:
if (pth_id != PTHREAD_NUM_MAX)
{
_pthread_data_destroy(ptd);
}
return ret;
2013-01-08 22:40:58 +08:00
}
RTM_EXPORT(pthread_create);
int pthread_detach(pthread_t thread)
{
int ret = 0;
_pthread_data_t *ptd = _pthread_get_data(thread);
2021-03-31 09:22:47 +08:00
if (ptd == RT_NULL)
{
/* invalid pthread id */
ret = EINVAL;
goto __exit;
}
2013-06-26 23:18:30 +08:00
if (ptd->attr.detachstate == PTHREAD_CREATE_DETACHED)
{
/* The implementation has detected that the value specified by thread does not refer
* to a joinable thread.
*/
ret = EINVAL;
goto __exit;
}
if ((RT_SCHED_CTX(ptd->tid).stat & RT_THREAD_STAT_MASK) == RT_THREAD_CLOSE)
2013-06-26 23:18:30 +08:00
{
/* destroy this pthread */
_pthread_data_destroy(ptd);
goto __exit;
2013-06-26 23:18:30 +08:00
}
else
{
/* change to detach state */
ptd->attr.detachstate = PTHREAD_CREATE_DETACHED;
/* detach joinable semaphore */
if (ptd->joinable_sem)
{
rt_sem_delete(ptd->joinable_sem);
ptd->joinable_sem = RT_NULL;
}
2013-06-26 23:18:30 +08:00
}
__exit:
return ret;
2013-01-08 22:40:58 +08:00
}
RTM_EXPORT(pthread_detach);
int pthread_join(pthread_t thread, void **value_ptr)
2013-01-08 22:40:58 +08:00
{
_pthread_data_t *ptd;
2013-06-26 23:18:30 +08:00
rt_err_t result;
ptd = _pthread_get_data(thread);
2021-03-31 09:22:47 +08:00
if (ptd == RT_NULL)
{
return EINVAL; /* invalid pthread id */
}
if (ptd && ptd->tid == rt_thread_self())
2013-06-26 23:18:30 +08:00
{
/* join self */
return EDEADLK;
}
if (ptd->attr.detachstate == PTHREAD_CREATE_DETACHED)
2021-03-31 09:22:47 +08:00
{
2013-06-26 23:18:30 +08:00
return EINVAL; /* join on a detached pthread */
2021-03-31 09:22:47 +08:00
}
2013-06-26 23:18:30 +08:00
result = rt_sem_take(ptd->joinable_sem, RT_WAITING_FOREVER);
if (result == RT_EOK)
{
/* get return value */
if (value_ptr != RT_NULL)
*value_ptr = ptd->return_value;
/* destroy this pthread */
_pthread_data_destroy(ptd);
2013-06-26 23:18:30 +08:00
}
else
{
2013-06-26 23:18:30 +08:00
return ESRCH;
}
2013-06-26 23:18:30 +08:00
return 0;
2013-01-08 22:40:58 +08:00
}
RTM_EXPORT(pthread_join);
pthread_t pthread_self (void)
{
rt_thread_t tid;
_pthread_data_t *ptd;
tid = rt_thread_self();
if (tid == NULL) return PTHREAD_NUM_MAX;
/* get pthread data from pthread_data of thread */
ptd = (_pthread_data_t *)rt_thread_self()->pthread_data;
RT_ASSERT(ptd != RT_NULL);
return _pthread_data_get_pth(ptd);
}
RTM_EXPORT(pthread_self);
int pthread_getcpuclockid(pthread_t thread, clockid_t *clock_id)
{
if(_pthread_get_data(thread) == NULL)
{
return EINVAL;
}
*clock_id = (clockid_t)rt_tick_get();
return 0;
}
RTM_EXPORT(pthread_getcpuclockid);
int pthread_getconcurrency(void)
{
return concurrency_level;
}
RTM_EXPORT(pthread_getconcurrency);
int pthread_setconcurrency(int new_level)
{
concurrency_level = new_level;
return 0;
}
RTM_EXPORT(pthread_setconcurrency);
int pthread_getschedparam(pthread_t thread, int *policy, struct sched_param *param)
{
_pthread_data_t *ptd;
ptd = _pthread_get_data(thread);
pthread_attr_getschedpolicy(&ptd->attr, policy);
pthread_attr_getschedparam(&ptd->attr, param);
return 0;
}
RTM_EXPORT(pthread_getschedparam);
int pthread_setschedparam(pthread_t thread, int policy, const struct sched_param *param)
{
_pthread_data_t *ptd;
ptd = _pthread_get_data(thread);
pthread_attr_setschedpolicy(&ptd->attr, policy);
pthread_attr_setschedparam(&ptd->attr, param);
return 0;
}
RTM_EXPORT(pthread_setschedparam);
int pthread_setschedprio(pthread_t thread, int prio)
{
_pthread_data_t *ptd;
struct sched_param param;
ptd = _pthread_get_data(thread);
param.sched_priority = prio;
pthread_attr_setschedparam(&ptd->attr, &param);
return 0;
}
RTM_EXPORT(pthread_setschedprio);
void pthread_exit(void *value)
2013-01-08 22:40:58 +08:00
{
2013-06-26 23:18:30 +08:00
_pthread_data_t *ptd;
_pthread_cleanup_t *cleanup;
rt_thread_t tid;
2013-06-26 23:18:30 +08:00
if (rt_thread_self() == RT_NULL)
{
return;
}
/* get pthread data from pthread_data of thread */
ptd = (_pthread_data_t *)rt_thread_self()->pthread_data;
2013-06-26 23:18:30 +08:00
rt_enter_critical();
/* disable cancel */
ptd->cancelstate = PTHREAD_CANCEL_DISABLE;
/* set return value */
ptd->return_value = value;
rt_exit_critical();
/*
* When use pthread_exit to exit.
* invoke pushed cleanup
*/
2013-06-26 23:18:30 +08:00
while (ptd->cleanup != RT_NULL)
{
cleanup = ptd->cleanup;
ptd->cleanup = cleanup->next;
cleanup->cleanup_func(cleanup->parameter);
/* release this cleanup function */
rt_free(cleanup);
}
/* get the info aboult "tid" early */
tid = ptd->tid;
/* According to "detachstate" to whether or not to recycle resource immediately */
if (ptd->attr.detachstate == PTHREAD_CREATE_JOINABLE)
{
/* set value */
rt_sem_release(ptd->joinable_sem);
}
else
{
/* release pthread resource */
_pthread_data_destroy(ptd);
2013-06-26 23:18:30 +08:00
}
/*
* second: detach thread.
* this thread will be removed from scheduler list
* and because there is a cleanup function in the
* thread (pthread_cleanup), it will move to defunct
* thread list and wait for handling in idle thread.
*/
rt_thread_detach(tid);
2013-06-26 23:18:30 +08:00
/* reschedule thread */
rt_schedule();
2013-01-08 22:40:58 +08:00
}
RTM_EXPORT(pthread_exit);
int pthread_once(pthread_once_t *once_control, void (*init_routine)(void))
2013-01-08 22:40:58 +08:00
{
2013-06-26 23:18:30 +08:00
RT_ASSERT(once_control != RT_NULL);
RT_ASSERT(init_routine != RT_NULL);
2013-01-08 22:40:58 +08:00
2013-06-26 23:18:30 +08:00
rt_enter_critical();
if (!(*once_control))
{
/* call routine once */
*once_control = 1;
rt_exit_critical();
2013-01-08 22:40:58 +08:00
2013-06-26 23:18:30 +08:00
init_routine();
}
rt_exit_critical();
2013-01-08 22:40:58 +08:00
2013-06-26 23:18:30 +08:00
return 0;
2013-01-08 22:40:58 +08:00
}
RTM_EXPORT(pthread_once);
int pthread_atfork(void (*prepare)(void), void (*parent)(void), void (*child)(void))
{
2014-03-11 16:05:14 +08:00
return EOPNOTSUPP;
2013-01-08 22:40:58 +08:00
}
RTM_EXPORT(pthread_atfork);
int pthread_kill(pthread_t thread, int sig)
{
#ifdef RT_USING_SIGNALS
_pthread_data_t *ptd;
2021-09-03 11:38:48 +08:00
int ret;
ptd = _pthread_get_data(thread);
if (ptd)
{
2021-09-03 11:38:48 +08:00
ret = rt_thread_kill(ptd->tid, sig);
if (ret == -RT_EINVAL)
{
return EINVAL;
}
return ret;
}
2021-09-03 11:38:48 +08:00
return ESRCH;
#else
return ENOSYS;
#endif
2013-01-08 22:40:58 +08:00
}
RTM_EXPORT(pthread_kill);
#ifdef RT_USING_SIGNALS
int pthread_sigmask(int how, const sigset_t *set, sigset_t *oset)
{
return sigprocmask(how, set, oset);
}
#endif
2013-01-08 22:40:58 +08:00
void pthread_cleanup_pop(int execute)
{
2013-06-26 23:18:30 +08:00
_pthread_data_t *ptd;
_pthread_cleanup_t *cleanup;
if (rt_thread_self() == NULL) return;
/* get pthread data from pthread_data of thread */
ptd = (_pthread_data_t *)rt_thread_self()->pthread_data;
2013-06-26 23:18:30 +08:00
RT_ASSERT(ptd != RT_NULL);
if (execute)
{
rt_enter_critical();
cleanup = ptd->cleanup;
if (cleanup)
ptd->cleanup = cleanup->next;
rt_exit_critical();
if (cleanup)
{
cleanup->cleanup_func(cleanup->parameter);
rt_free(cleanup);
}
}
2013-01-08 22:40:58 +08:00
}
RTM_EXPORT(pthread_cleanup_pop);
void pthread_cleanup_push(void (*routine)(void *), void *arg)
2013-01-08 22:40:58 +08:00
{
2013-06-26 23:18:30 +08:00
_pthread_data_t *ptd;
_pthread_cleanup_t *cleanup;
if (rt_thread_self() == NULL) return;
/* get pthread data from pthread_data of thread */
ptd = (_pthread_data_t *)rt_thread_self()->pthread_data;
2013-06-26 23:18:30 +08:00
RT_ASSERT(ptd != RT_NULL);
cleanup = (_pthread_cleanup_t *)rt_malloc(sizeof(_pthread_cleanup_t));
if (cleanup != RT_NULL)
{
cleanup->cleanup_func = routine;
cleanup->parameter = arg;
rt_enter_critical();
cleanup->next = ptd->cleanup;
ptd->cleanup = cleanup;
rt_exit_critical();
}
2013-01-08 22:40:58 +08:00
}
RTM_EXPORT(pthread_cleanup_push);
/*
* According to IEEE Std 1003.1, 2004 Edition , following pthreads
* interface support cancellation point:
* mq_receive()
* mq_send()
* mq_timedreceive()
* mq_timedsend()
* msgrcv()
* msgsnd()
* msync()
* pthread_cond_timedwait()
* pthread_cond_wait()
* pthread_join()
* pthread_testcancel()
* sem_timedwait()
* sem_wait()
*
* A cancellation point may also occur when a thread is
* executing the following functions:
* pthread_rwlock_rdlock()
* pthread_rwlock_timedrdlock()
* pthread_rwlock_timedwrlock()
* pthread_rwlock_wrlock()
*
* The pthread_cancel(), pthread_setcancelstate(), and pthread_setcanceltype()
* functions are defined to be async-cancel safe.
*/
int pthread_setcancelstate(int state, int *oldstate)
{
2013-06-26 23:18:30 +08:00
_pthread_data_t *ptd;
2013-01-08 22:40:58 +08:00
if (rt_thread_self() == NULL) return EINVAL;
/* get pthread data from pthread_data of thread */
ptd = (_pthread_data_t *)rt_thread_self()->pthread_data;
2013-06-26 23:18:30 +08:00
RT_ASSERT(ptd != RT_NULL);
2013-01-08 22:40:58 +08:00
2013-06-26 23:18:30 +08:00
if ((state == PTHREAD_CANCEL_ENABLE) || (state == PTHREAD_CANCEL_DISABLE))
{
if (oldstate)
*oldstate = ptd->cancelstate;
ptd->cancelstate = state;
2013-01-08 22:40:58 +08:00
2013-06-26 23:18:30 +08:00
return 0;
}
2013-01-08 22:40:58 +08:00
2013-06-26 23:18:30 +08:00
return EINVAL;
2013-01-08 22:40:58 +08:00
}
RTM_EXPORT(pthread_setcancelstate);
int pthread_setcanceltype(int type, int *oldtype)
{
2013-06-26 23:18:30 +08:00
_pthread_data_t *ptd;
2013-01-08 22:40:58 +08:00
if (rt_thread_self() == NULL) return EINVAL;
/* get pthread data from pthread_data of thread */
ptd = (_pthread_data_t *)rt_thread_self()->pthread_data;
2013-06-26 23:18:30 +08:00
RT_ASSERT(ptd != RT_NULL);
2013-01-08 22:40:58 +08:00
if ((type != PTHREAD_CANCEL_DEFERRED) && (type != PTHREAD_CANCEL_ASYNCHRONOUS))
2013-06-26 23:18:30 +08:00
return EINVAL;
2013-01-08 22:40:58 +08:00
2013-06-26 23:18:30 +08:00
if (oldtype)
*oldtype = ptd->canceltype;
ptd->canceltype = type;
2013-01-08 22:40:58 +08:00
2013-06-26 23:18:30 +08:00
return 0;
2013-01-08 22:40:58 +08:00
}
RTM_EXPORT(pthread_setcanceltype);
void pthread_testcancel(void)
{
int cancel = 0;
_pthread_data_t *ptd;
2013-01-08 22:40:58 +08:00
if (rt_thread_self() == NULL) return;
/* get pthread data from pthread_data of thread */
ptd = (_pthread_data_t *)rt_thread_self()->pthread_data;
2013-06-26 23:18:30 +08:00
RT_ASSERT(ptd != RT_NULL);
2013-01-08 22:40:58 +08:00
2013-06-26 23:18:30 +08:00
if (ptd->cancelstate == PTHREAD_CANCEL_ENABLE)
cancel = ptd->canceled;
if (cancel)
pthread_exit((void *)PTHREAD_CANCELED);
2013-01-08 22:40:58 +08:00
}
RTM_EXPORT(pthread_testcancel);
int pthread_cancel(pthread_t thread)
{
2013-06-26 23:18:30 +08:00
_pthread_data_t *ptd;
_pthread_cleanup_t *cleanup;
rt_thread_t tid;
2013-06-26 23:18:30 +08:00
/* get posix thread data */
ptd = _pthread_get_data(thread);
2021-03-31 09:22:47 +08:00
if (ptd == RT_NULL)
{
return EINVAL;
}
tid = ptd->tid;
2013-06-26 23:18:30 +08:00
/* cancel self */
if (ptd->tid == rt_thread_self())
return 0;
2013-06-26 23:18:30 +08:00
/* set canceled */
if (ptd->cancelstate == PTHREAD_CANCEL_ENABLE)
{
ptd->canceled = 1;
if (ptd->canceltype == PTHREAD_CANCEL_ASYNCHRONOUS)
{
/*
* When use pthread_cancel to exit.
* invoke pushed cleanup
*/
while (ptd->cleanup != RT_NULL)
{
cleanup = ptd->cleanup;
ptd->cleanup = cleanup->next;
cleanup->cleanup_func(cleanup->parameter);
/* release this cleanup function */
rt_free(cleanup);
}
/* According to "detachstate" to whether or not to recycle resource immediately */
if (ptd->attr.detachstate == PTHREAD_CREATE_JOINABLE)
{
/* set value */
rt_sem_release(ptd->joinable_sem);
}
else
{
/* release pthread resource */
_pthread_data_destroy(ptd);
}
/*
* second: detach thread.
2013-06-26 23:18:30 +08:00
* this thread will be removed from scheduler list
* and because there is a cleanup function in the
* thread (pthread_cleanup), it will move to defunct
* thread list and wait for handling in idle thread.
*/
rt_thread_detach(tid);
2013-06-26 23:18:30 +08:00
}
}
return 0;
2013-01-08 22:40:58 +08:00
}
RTM_EXPORT(pthread_cancel);
2021-09-03 11:38:48 +08:00