🎯 Sync smart & scheduler codes (#8537)
Signed-off-by: Shell <smokewood@qq.com> Co-authored-by: xqyjlj <xqyjlj@126.com>
This commit is contained in:
parent
6fe69d7431
commit
71560bafb5
|
@ -10,11 +10,11 @@
|
||||||
|
|
||||||
#include <stdint.h>
|
#include <stdint.h>
|
||||||
#include <stdio.h>
|
#include <stdio.h>
|
||||||
#include <stdlib.h>
|
#include <rtthread.h>
|
||||||
|
|
||||||
int main(void)
|
int main(void)
|
||||||
{
|
{
|
||||||
printf("Hello RT-Thread!\n");
|
rt_kprintf("Hello RT-Thread!\n");
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
|
@ -47,7 +47,7 @@ void rt_hw_secondary_cpu_up(void)
|
||||||
*plat_boot_reg-- = (void *)(size_t)-1;
|
*plat_boot_reg-- = (void *)(size_t)-1;
|
||||||
*plat_boot_reg = (void *)entry;
|
*plat_boot_reg = (void *)entry;
|
||||||
rt_hw_dsb();
|
rt_hw_dsb();
|
||||||
rt_hw_ipi_send(0, 1 << 1);
|
rt_hw_ipi_send(0, RT_CPU_MASK ^ (1 << rt_hw_cpu_id()));
|
||||||
}
|
}
|
||||||
|
|
||||||
/* Interface */
|
/* Interface */
|
||||||
|
|
|
@ -352,7 +352,7 @@ static int dfs_romfs_getdents(struct dfs_file *file, struct dirent *dirp, uint32
|
||||||
|
|
||||||
d->d_namlen = rt_strlen(name);
|
d->d_namlen = rt_strlen(name);
|
||||||
d->d_reclen = (rt_uint16_t)sizeof(struct dirent);
|
d->d_reclen = (rt_uint16_t)sizeof(struct dirent);
|
||||||
rt_strncpy(d->d_name, name, DFS_PATH_MAX);
|
rt_strncpy(d->d_name, name, DIRENT_NAME_MAX);
|
||||||
|
|
||||||
/* move to next position */
|
/* move to next position */
|
||||||
++ file->fpos;
|
++ file->fpos;
|
||||||
|
|
|
@ -822,7 +822,10 @@ static int dfs_page_insert(struct dfs_page *page)
|
||||||
rt_list_insert_before(&aspace->list_inactive, &page->space_node);
|
rt_list_insert_before(&aspace->list_inactive, &page->space_node);
|
||||||
aspace->pages_count ++;
|
aspace->pages_count ++;
|
||||||
|
|
||||||
RT_ASSERT(_dfs_page_insert(aspace, page) == 0);
|
if (_dfs_page_insert(aspace, page))
|
||||||
|
{
|
||||||
|
RT_ASSERT(0);
|
||||||
|
}
|
||||||
|
|
||||||
if (aspace->pages_count > RT_PAGECACHE_ASPACE_COUNT)
|
if (aspace->pages_count > RT_PAGECACHE_ASPACE_COUNT)
|
||||||
{
|
{
|
||||||
|
|
|
@ -21,9 +21,7 @@ static void _rt_pipe_resume_writer(struct rt_audio_pipe *pipe)
|
||||||
RT_ASSERT(pipe->flag & RT_PIPE_FLAG_BLOCK_WR);
|
RT_ASSERT(pipe->flag & RT_PIPE_FLAG_BLOCK_WR);
|
||||||
|
|
||||||
/* get suspended thread */
|
/* get suspended thread */
|
||||||
thread = rt_list_entry(pipe->suspended_write_list.next,
|
thread = RT_THREAD_LIST_NODE_ENTRY(pipe->suspended_write_list.next);
|
||||||
struct rt_thread,
|
|
||||||
tlist);
|
|
||||||
|
|
||||||
/* resume the write thread */
|
/* resume the write thread */
|
||||||
rt_thread_resume(thread);
|
rt_thread_resume(thread);
|
||||||
|
@ -73,7 +71,7 @@ static rt_ssize_t rt_pipe_read(rt_device_t dev,
|
||||||
rt_thread_suspend(thread);
|
rt_thread_suspend(thread);
|
||||||
/* waiting on suspended read list */
|
/* waiting on suspended read list */
|
||||||
rt_list_insert_before(&(pipe->suspended_read_list),
|
rt_list_insert_before(&(pipe->suspended_read_list),
|
||||||
&(thread->tlist));
|
&RT_THREAD_LIST_NODE(thread));
|
||||||
rt_hw_interrupt_enable(level);
|
rt_hw_interrupt_enable(level);
|
||||||
|
|
||||||
rt_schedule();
|
rt_schedule();
|
||||||
|
@ -103,9 +101,7 @@ static void _rt_pipe_resume_reader(struct rt_audio_pipe *pipe)
|
||||||
RT_ASSERT(pipe->flag & RT_PIPE_FLAG_BLOCK_RD);
|
RT_ASSERT(pipe->flag & RT_PIPE_FLAG_BLOCK_RD);
|
||||||
|
|
||||||
/* get suspended thread */
|
/* get suspended thread */
|
||||||
thread = rt_list_entry(pipe->suspended_read_list.next,
|
thread = RT_THREAD_LIST_NODE_ENTRY(pipe->suspended_read_list.next);
|
||||||
struct rt_thread,
|
|
||||||
tlist);
|
|
||||||
|
|
||||||
/* resume the read thread */
|
/* resume the read thread */
|
||||||
rt_thread_resume(thread);
|
rt_thread_resume(thread);
|
||||||
|
@ -161,7 +157,7 @@ static rt_ssize_t rt_pipe_write(rt_device_t dev,
|
||||||
rt_thread_suspend(thread);
|
rt_thread_suspend(thread);
|
||||||
/* waiting on suspended read list */
|
/* waiting on suspended read list */
|
||||||
rt_list_insert_before(&(pipe->suspended_write_list),
|
rt_list_insert_before(&(pipe->suspended_write_list),
|
||||||
&(thread->tlist));
|
&RT_THREAD_LIST_NODE(thread));
|
||||||
rt_hw_interrupt_enable(level);
|
rt_hw_interrupt_enable(level);
|
||||||
|
|
||||||
rt_schedule();
|
rt_schedule();
|
||||||
|
|
|
@ -13,18 +13,22 @@
|
||||||
#include <rtconfig.h>
|
#include <rtconfig.h>
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Completion
|
* Completion - A tiny IPC implementation for resource-constrained scenarios
|
||||||
|
*
|
||||||
|
* It's an IPC using one CPU word with the encoding:
|
||||||
|
*
|
||||||
|
* BIT | MAX-1 ----------------- 1 | 0 |
|
||||||
|
* CONTENT | suspended_thread & ~1 | completed flag |
|
||||||
*/
|
*/
|
||||||
|
|
||||||
struct rt_completion
|
struct rt_completion
|
||||||
{
|
{
|
||||||
rt_uint32_t flag;
|
/* suspended thread, and completed flag */
|
||||||
|
rt_base_t susp_thread_n_flag;
|
||||||
/* suspended list */
|
|
||||||
rt_list_t suspended_list;
|
|
||||||
struct rt_spinlock spinlock;
|
|
||||||
};
|
};
|
||||||
|
|
||||||
|
#define RT_COMPLETION_INIT(comp) {0}
|
||||||
|
|
||||||
void rt_completion_init(struct rt_completion *completion);
|
void rt_completion_init(struct rt_completion *completion);
|
||||||
rt_err_t rt_completion_wait(struct rt_completion *completion,
|
rt_err_t rt_completion_wait(struct rt_completion *completion,
|
||||||
rt_int32_t timeout);
|
rt_int32_t timeout);
|
||||||
|
|
|
@ -8,6 +8,6 @@ if not GetDepend('RT_USING_HEAP'):
|
||||||
SrcRemove(src, 'dataqueue.c')
|
SrcRemove(src, 'dataqueue.c')
|
||||||
SrcRemove(src, 'pipe.c')
|
SrcRemove(src, 'pipe.c')
|
||||||
|
|
||||||
group = DefineGroup('DeviceDrivers', src, depend = ['RT_USING_DEVICE_IPC'], CPPPATH = CPPPATH)
|
group = DefineGroup('DeviceDrivers', src, depend = ['RT_USING_DEVICE_IPC'], CPPPATH = CPPPATH, LOCAL_CPPDEFINES=['__RT_IPC_SOURCE__'])
|
||||||
|
|
||||||
Return('group')
|
Return('group')
|
||||||
|
|
|
@ -8,13 +8,24 @@
|
||||||
* 2012-09-30 Bernard first version.
|
* 2012-09-30 Bernard first version.
|
||||||
* 2021-08-18 chenyingchun add comments
|
* 2021-08-18 chenyingchun add comments
|
||||||
* 2023-09-15 xqyjlj perf rt_hw_interrupt_disable/enable
|
* 2023-09-15 xqyjlj perf rt_hw_interrupt_disable/enable
|
||||||
|
* 2024-01-25 Shell reduce resource usage in completion for better synchronization
|
||||||
|
* and smaller footprint.
|
||||||
*/
|
*/
|
||||||
|
|
||||||
|
#define DBG_TAG "drivers.ipc"
|
||||||
|
#define DBG_LVL DBG_INFO
|
||||||
|
#include <rtdbg.h>
|
||||||
|
|
||||||
#include <rthw.h>
|
#include <rthw.h>
|
||||||
#include <rtdevice.h>
|
#include <rtdevice.h>
|
||||||
|
|
||||||
#define RT_COMPLETED 1
|
#define RT_COMPLETED 1
|
||||||
#define RT_UNCOMPLETED 0
|
#define RT_UNCOMPLETED 0
|
||||||
|
#define RT_COMPLETION_FLAG(comp) ((comp)->susp_thread_n_flag & 1)
|
||||||
|
#define RT_COMPLETION_THREAD(comp) ((rt_thread_t)((comp)->susp_thread_n_flag & ~1))
|
||||||
|
#define RT_COMPLETION_NEW_STAT(thread, flag) (((flag) & 1) | (((rt_base_t)thread) & ~1))
|
||||||
|
|
||||||
|
static struct rt_spinlock _completion_lock = RT_SPINLOCK_INIT;
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* @brief This function will initialize a completion object.
|
* @brief This function will initialize a completion object.
|
||||||
|
@ -23,14 +34,9 @@
|
||||||
*/
|
*/
|
||||||
void rt_completion_init(struct rt_completion *completion)
|
void rt_completion_init(struct rt_completion *completion)
|
||||||
{
|
{
|
||||||
rt_base_t level;
|
|
||||||
RT_ASSERT(completion != RT_NULL);
|
RT_ASSERT(completion != RT_NULL);
|
||||||
|
|
||||||
rt_spin_lock_init(&(completion->spinlock));
|
completion->susp_thread_n_flag = RT_COMPLETION_NEW_STAT(RT_NULL, RT_UNCOMPLETED);
|
||||||
level = rt_spin_lock_irqsave(&(completion->spinlock));
|
|
||||||
completion->flag = RT_UNCOMPLETED;
|
|
||||||
rt_list_init(&completion->suspended_list);
|
|
||||||
rt_spin_unlock_irqrestore(&(completion->spinlock), level);
|
|
||||||
}
|
}
|
||||||
RTM_EXPORT(rt_completion_init);
|
RTM_EXPORT(rt_completion_init);
|
||||||
|
|
||||||
|
@ -64,11 +70,11 @@ rt_err_t rt_completion_wait(struct rt_completion *completion,
|
||||||
result = RT_EOK;
|
result = RT_EOK;
|
||||||
thread = rt_thread_self();
|
thread = rt_thread_self();
|
||||||
|
|
||||||
level = rt_spin_lock_irqsave(&(completion->spinlock));
|
level = rt_spin_lock_irqsave(&_completion_lock);
|
||||||
if (completion->flag != RT_COMPLETED)
|
if (RT_COMPLETION_FLAG(completion) != RT_COMPLETED)
|
||||||
{
|
{
|
||||||
/* only one thread can suspend on complete */
|
/* only one thread can suspend on complete */
|
||||||
RT_ASSERT(rt_list_isempty(&(completion->suspended_list)));
|
RT_ASSERT(RT_COMPLETION_THREAD(completion) == RT_NULL);
|
||||||
|
|
||||||
if (timeout == 0)
|
if (timeout == 0)
|
||||||
{
|
{
|
||||||
|
@ -81,10 +87,11 @@ rt_err_t rt_completion_wait(struct rt_completion *completion,
|
||||||
thread->error = RT_EOK;
|
thread->error = RT_EOK;
|
||||||
|
|
||||||
/* suspend thread */
|
/* suspend thread */
|
||||||
rt_thread_suspend_with_flag(thread, RT_UNINTERRUPTIBLE);
|
result = rt_thread_suspend_with_flag(thread, RT_UNINTERRUPTIBLE);
|
||||||
/* add to suspended list */
|
if (result == RT_EOK)
|
||||||
rt_list_insert_before(&(completion->suspended_list),
|
{
|
||||||
&(thread->tlist));
|
/* add to suspended thread */
|
||||||
|
completion->susp_thread_n_flag = RT_COMPLETION_NEW_STAT(thread, RT_UNCOMPLETED);
|
||||||
|
|
||||||
/* current context checking */
|
/* current context checking */
|
||||||
RT_DEBUG_NOT_IN_INTERRUPT;
|
RT_DEBUG_NOT_IN_INTERRUPT;
|
||||||
|
@ -99,7 +106,7 @@ rt_err_t rt_completion_wait(struct rt_completion *completion,
|
||||||
rt_timer_start(&(thread->thread_timer));
|
rt_timer_start(&(thread->thread_timer));
|
||||||
}
|
}
|
||||||
/* enable interrupt */
|
/* enable interrupt */
|
||||||
rt_spin_unlock_irqrestore(&(completion->spinlock), level);
|
rt_spin_unlock_irqrestore(&_completion_lock, level);
|
||||||
|
|
||||||
/* do schedule */
|
/* do schedule */
|
||||||
rt_schedule();
|
rt_schedule();
|
||||||
|
@ -107,14 +114,16 @@ rt_err_t rt_completion_wait(struct rt_completion *completion,
|
||||||
/* thread is waked up */
|
/* thread is waked up */
|
||||||
result = thread->error;
|
result = thread->error;
|
||||||
|
|
||||||
level = rt_spin_lock_irqsave(&(completion->spinlock));
|
level = rt_spin_lock_irqsave(&_completion_lock);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
/* clean completed flag */
|
}
|
||||||
completion->flag = RT_UNCOMPLETED;
|
|
||||||
|
/* clean completed flag & remove susp_thread on the case of waking by timeout */
|
||||||
|
completion->susp_thread_n_flag = RT_COMPLETION_NEW_STAT(RT_NULL, RT_UNCOMPLETED);
|
||||||
|
|
||||||
__exit:
|
__exit:
|
||||||
rt_spin_unlock_irqrestore(&(completion->spinlock), level);
|
rt_spin_unlock_irqrestore(&_completion_lock, level);
|
||||||
|
|
||||||
return result;
|
return result;
|
||||||
}
|
}
|
||||||
|
@ -128,35 +137,33 @@ RTM_EXPORT(rt_completion_wait);
|
||||||
void rt_completion_done(struct rt_completion *completion)
|
void rt_completion_done(struct rt_completion *completion)
|
||||||
{
|
{
|
||||||
rt_base_t level;
|
rt_base_t level;
|
||||||
|
rt_err_t error;
|
||||||
|
rt_thread_t suspend_thread;
|
||||||
RT_ASSERT(completion != RT_NULL);
|
RT_ASSERT(completion != RT_NULL);
|
||||||
|
|
||||||
if (completion->flag == RT_COMPLETED)
|
level = rt_spin_lock_irqsave(&_completion_lock);
|
||||||
|
if (RT_COMPLETION_FLAG(completion) == RT_COMPLETED)
|
||||||
|
{
|
||||||
|
rt_spin_unlock_irqrestore(&_completion_lock, level);
|
||||||
return;
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
level = rt_spin_lock_irqsave(&(completion->spinlock));
|
suspend_thread = RT_COMPLETION_THREAD(completion);
|
||||||
completion->flag = RT_COMPLETED;
|
if (suspend_thread)
|
||||||
|
|
||||||
if (!rt_list_isempty(&(completion->suspended_list)))
|
|
||||||
{
|
{
|
||||||
/* there is one thread in suspended list */
|
/* there is one thread in suspended list */
|
||||||
struct rt_thread *thread;
|
|
||||||
|
|
||||||
/* get thread entry */
|
|
||||||
thread = rt_list_entry(completion->suspended_list.next,
|
|
||||||
struct rt_thread,
|
|
||||||
tlist);
|
|
||||||
|
|
||||||
/* resume it */
|
/* resume it */
|
||||||
rt_thread_resume(thread);
|
error = rt_thread_resume(suspend_thread);
|
||||||
rt_spin_unlock_irqrestore(&(completion->spinlock), level);
|
if (error)
|
||||||
|
|
||||||
/* perform a schedule */
|
|
||||||
rt_schedule();
|
|
||||||
}
|
|
||||||
else
|
|
||||||
{
|
{
|
||||||
rt_spin_unlock_irqrestore(&(completion->spinlock), level);
|
LOG_D("%s: failed to resume thread", __func__);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
completion->susp_thread_n_flag = RT_COMPLETION_NEW_STAT(RT_NULL, RT_COMPLETED);
|
||||||
|
|
||||||
|
rt_spin_unlock_irqrestore(&_completion_lock, level);
|
||||||
|
}
|
||||||
RTM_EXPORT(rt_completion_done);
|
RTM_EXPORT(rt_completion_done);
|
||||||
|
|
||||||
|
|
|
@ -8,6 +8,7 @@
|
||||||
* 2012-09-30 Bernard first version.
|
* 2012-09-30 Bernard first version.
|
||||||
* 2016-10-31 armink fix some resume push and pop thread bugs
|
* 2016-10-31 armink fix some resume push and pop thread bugs
|
||||||
* 2023-09-15 xqyjlj perf rt_hw_interrupt_disable/enable
|
* 2023-09-15 xqyjlj perf rt_hw_interrupt_disable/enable
|
||||||
|
* 2024-01-25 Shell porting to susp_list API
|
||||||
*/
|
*/
|
||||||
|
|
||||||
#include <rthw.h>
|
#include <rthw.h>
|
||||||
|
@ -121,8 +122,10 @@ rt_err_t rt_data_queue_push(struct rt_data_queue *queue,
|
||||||
thread->error = RT_EOK;
|
thread->error = RT_EOK;
|
||||||
|
|
||||||
/* suspend thread on the push list */
|
/* suspend thread on the push list */
|
||||||
rt_thread_suspend_with_flag(thread, RT_UNINTERRUPTIBLE);
|
result = rt_thread_suspend_to_list(thread, &queue->suspended_push_list,
|
||||||
rt_list_insert_before(&(queue->suspended_push_list), &(thread->tlist));
|
RT_IPC_FLAG_FIFO, RT_UNINTERRUPTIBLE);
|
||||||
|
if (result == RT_EOK)
|
||||||
|
{
|
||||||
/* start timer */
|
/* start timer */
|
||||||
if (timeout > 0)
|
if (timeout > 0)
|
||||||
{
|
{
|
||||||
|
@ -140,8 +143,11 @@ rt_err_t rt_data_queue_push(struct rt_data_queue *queue,
|
||||||
rt_schedule();
|
rt_schedule();
|
||||||
|
|
||||||
/* thread is waked up */
|
/* thread is waked up */
|
||||||
result = thread->error;
|
|
||||||
level = rt_spin_lock_irqsave(&(queue->spinlock));
|
level = rt_spin_lock_irqsave(&(queue->spinlock));
|
||||||
|
|
||||||
|
/* error may be modified by waker, so take the lock before accessing it */
|
||||||
|
result = thread->error;
|
||||||
|
}
|
||||||
if (result != RT_EOK) goto __exit;
|
if (result != RT_EOK) goto __exit;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -159,15 +165,10 @@ rt_err_t rt_data_queue_push(struct rt_data_queue *queue,
|
||||||
}
|
}
|
||||||
|
|
||||||
/* there is at least one thread in suspended list */
|
/* there is at least one thread in suspended list */
|
||||||
if (!rt_list_isempty(&(queue->suspended_pop_list)))
|
if (rt_susp_list_dequeue(&queue->suspended_push_list,
|
||||||
|
RT_THREAD_RESUME_RES_THR_ERR))
|
||||||
{
|
{
|
||||||
/* get thread entry */
|
/* unlock and perform a schedule */
|
||||||
thread = rt_list_entry(queue->suspended_pop_list.next,
|
|
||||||
struct rt_thread,
|
|
||||||
tlist);
|
|
||||||
|
|
||||||
/* resume it */
|
|
||||||
rt_thread_resume(thread);
|
|
||||||
rt_spin_unlock_irqrestore(&(queue->spinlock), level);
|
rt_spin_unlock_irqrestore(&(queue->spinlock), level);
|
||||||
|
|
||||||
/* perform a schedule */
|
/* perform a schedule */
|
||||||
|
@ -239,8 +240,10 @@ rt_err_t rt_data_queue_pop(struct rt_data_queue *queue,
|
||||||
thread->error = RT_EOK;
|
thread->error = RT_EOK;
|
||||||
|
|
||||||
/* suspend thread on the pop list */
|
/* suspend thread on the pop list */
|
||||||
rt_thread_suspend_with_flag(thread, RT_UNINTERRUPTIBLE);
|
result = rt_thread_suspend_to_list(thread, &queue->suspended_pop_list,
|
||||||
rt_list_insert_before(&(queue->suspended_pop_list), &(thread->tlist));
|
RT_IPC_FLAG_FIFO, RT_UNINTERRUPTIBLE);
|
||||||
|
if (result == RT_EOK)
|
||||||
|
{
|
||||||
/* start timer */
|
/* start timer */
|
||||||
if (timeout > 0)
|
if (timeout > 0)
|
||||||
{
|
{
|
||||||
|
@ -258,11 +261,12 @@ rt_err_t rt_data_queue_pop(struct rt_data_queue *queue,
|
||||||
rt_schedule();
|
rt_schedule();
|
||||||
|
|
||||||
/* thread is waked up */
|
/* thread is waked up */
|
||||||
result = thread->error;
|
|
||||||
level = rt_spin_lock_irqsave(&(queue->spinlock));
|
level = rt_spin_lock_irqsave(&(queue->spinlock));
|
||||||
|
result = thread->error;
|
||||||
if (result != RT_EOK)
|
if (result != RT_EOK)
|
||||||
goto __exit;
|
goto __exit;
|
||||||
}
|
}
|
||||||
|
}
|
||||||
|
|
||||||
*data_ptr = queue->queue[queue->get_index].data_ptr;
|
*data_ptr = queue->queue[queue->get_index].data_ptr;
|
||||||
*size = queue->queue[queue->get_index].data_size;
|
*size = queue->queue[queue->get_index].data_size;
|
||||||
|
@ -280,15 +284,10 @@ rt_err_t rt_data_queue_pop(struct rt_data_queue *queue,
|
||||||
if (rt_data_queue_len(queue) <= queue->lwm)
|
if (rt_data_queue_len(queue) <= queue->lwm)
|
||||||
{
|
{
|
||||||
/* there is at least one thread in suspended list */
|
/* there is at least one thread in suspended list */
|
||||||
if (!rt_list_isempty(&(queue->suspended_push_list)))
|
if (rt_susp_list_dequeue(&queue->suspended_push_list,
|
||||||
|
RT_THREAD_RESUME_RES_THR_ERR))
|
||||||
{
|
{
|
||||||
/* get thread entry */
|
/* unlock and perform a schedule */
|
||||||
thread = rt_list_entry(queue->suspended_push_list.next,
|
|
||||||
struct rt_thread,
|
|
||||||
tlist);
|
|
||||||
|
|
||||||
/* resume it */
|
|
||||||
rt_thread_resume(thread);
|
|
||||||
rt_spin_unlock_irqrestore(&(queue->spinlock), level);
|
rt_spin_unlock_irqrestore(&(queue->spinlock), level);
|
||||||
|
|
||||||
/* perform a schedule */
|
/* perform a schedule */
|
||||||
|
@ -364,7 +363,6 @@ RTM_EXPORT(rt_data_queue_peek);
|
||||||
void rt_data_queue_reset(struct rt_data_queue *queue)
|
void rt_data_queue_reset(struct rt_data_queue *queue)
|
||||||
{
|
{
|
||||||
rt_base_t level;
|
rt_base_t level;
|
||||||
struct rt_thread *thread;
|
|
||||||
|
|
||||||
RT_ASSERT(queue != RT_NULL);
|
RT_ASSERT(queue != RT_NULL);
|
||||||
RT_ASSERT(queue->magic == DATAQUEUE_MAGIC);
|
RT_ASSERT(queue->magic == DATAQUEUE_MAGIC);
|
||||||
|
@ -382,52 +380,13 @@ void rt_data_queue_reset(struct rt_data_queue *queue)
|
||||||
/* wakeup all suspend threads */
|
/* wakeup all suspend threads */
|
||||||
|
|
||||||
/* resume on pop list */
|
/* resume on pop list */
|
||||||
while (!rt_list_isempty(&(queue->suspended_pop_list)))
|
rt_susp_list_resume_all_irq(&queue->suspended_pop_list, RT_ERROR,
|
||||||
{
|
&(queue->spinlock));
|
||||||
/* disable interrupt */
|
|
||||||
level = rt_spin_lock_irqsave(&(queue->spinlock));
|
|
||||||
|
|
||||||
/* get next suspend thread */
|
|
||||||
thread = rt_list_entry(queue->suspended_pop_list.next,
|
|
||||||
struct rt_thread,
|
|
||||||
tlist);
|
|
||||||
/* set error code to -RT_ERROR */
|
|
||||||
thread->error = -RT_ERROR;
|
|
||||||
|
|
||||||
/*
|
|
||||||
* resume thread
|
|
||||||
* In rt_thread_resume function, it will remove current thread from
|
|
||||||
* suspend list
|
|
||||||
*/
|
|
||||||
rt_thread_resume(thread);
|
|
||||||
|
|
||||||
/* enable interrupt */
|
|
||||||
rt_spin_unlock_irqrestore(&(queue->spinlock), level);
|
|
||||||
}
|
|
||||||
|
|
||||||
/* resume on push list */
|
/* resume on push list */
|
||||||
while (!rt_list_isempty(&(queue->suspended_push_list)))
|
rt_susp_list_resume_all_irq(&queue->suspended_push_list, RT_ERROR,
|
||||||
{
|
&(queue->spinlock));
|
||||||
/* disable interrupt */
|
|
||||||
level = rt_spin_lock_irqsave(&(queue->spinlock));
|
|
||||||
|
|
||||||
/* get next suspend thread */
|
|
||||||
thread = rt_list_entry(queue->suspended_push_list.next,
|
|
||||||
struct rt_thread,
|
|
||||||
tlist);
|
|
||||||
/* set error code to -RT_ERROR */
|
|
||||||
thread->error = -RT_ERROR;
|
|
||||||
|
|
||||||
/*
|
|
||||||
* resume thread
|
|
||||||
* In rt_thread_resume function, it will remove current thread from
|
|
||||||
* suspend list
|
|
||||||
*/
|
|
||||||
rt_thread_resume(thread);
|
|
||||||
|
|
||||||
/* enable interrupt */
|
|
||||||
rt_spin_unlock_irqrestore(&(queue->spinlock), level);
|
|
||||||
}
|
|
||||||
rt_exit_critical();
|
rt_exit_critical();
|
||||||
|
|
||||||
rt_schedule();
|
rt_schedule();
|
||||||
|
|
|
@ -64,7 +64,10 @@ static void _workqueue_thread_entry(void *parameter)
|
||||||
{
|
{
|
||||||
/* no software timer exist, suspend self. */
|
/* no software timer exist, suspend self. */
|
||||||
rt_thread_suspend_with_flag(rt_thread_self(), RT_UNINTERRUPTIBLE);
|
rt_thread_suspend_with_flag(rt_thread_self(), RT_UNINTERRUPTIBLE);
|
||||||
|
|
||||||
|
/* release lock after suspend so we will not lost any wakeups */
|
||||||
rt_spin_unlock_irqrestore(&(queue->spinlock), level);
|
rt_spin_unlock_irqrestore(&(queue->spinlock), level);
|
||||||
|
|
||||||
rt_schedule();
|
rt_schedule();
|
||||||
continue;
|
continue;
|
||||||
}
|
}
|
||||||
|
@ -105,13 +108,11 @@ static rt_err_t _workqueue_submit_work(struct rt_workqueue *queue,
|
||||||
work->workqueue = queue;
|
work->workqueue = queue;
|
||||||
|
|
||||||
/* whether the workqueue is doing work */
|
/* whether the workqueue is doing work */
|
||||||
if (queue->work_current == RT_NULL &&
|
if (queue->work_current == RT_NULL)
|
||||||
((queue->work_thread->stat & RT_THREAD_SUSPEND_MASK) == RT_THREAD_SUSPEND_MASK))
|
|
||||||
{
|
{
|
||||||
/* resume work thread */
|
/* resume work thread, and do a re-schedule if succeed */
|
||||||
rt_thread_resume(queue->work_thread);
|
rt_thread_resume(queue->work_thread);
|
||||||
rt_spin_unlock_irqrestore(&(queue->spinlock), level);
|
rt_spin_unlock_irqrestore(&(queue->spinlock), level);
|
||||||
rt_schedule();
|
|
||||||
}
|
}
|
||||||
else
|
else
|
||||||
{
|
{
|
||||||
|
@ -187,13 +188,11 @@ static void _delayed_work_timeout_handler(void *parameter)
|
||||||
work->flags |= RT_WORK_STATE_PENDING;
|
work->flags |= RT_WORK_STATE_PENDING;
|
||||||
}
|
}
|
||||||
/* whether the workqueue is doing work */
|
/* whether the workqueue is doing work */
|
||||||
if (queue->work_current == RT_NULL &&
|
if (queue->work_current == RT_NULL)
|
||||||
((queue->work_thread->stat & RT_THREAD_SUSPEND_MASK) == RT_THREAD_SUSPEND_MASK))
|
|
||||||
{
|
{
|
||||||
/* resume work thread */
|
/* resume work thread, and do a re-schedule if succeed */
|
||||||
rt_thread_resume(queue->work_thread);
|
rt_thread_resume(queue->work_thread);
|
||||||
rt_spin_unlock_irqrestore(&(queue->spinlock), level);
|
rt_spin_unlock_irqrestore(&(queue->spinlock), level);
|
||||||
rt_schedule();
|
|
||||||
}
|
}
|
||||||
else
|
else
|
||||||
{
|
{
|
||||||
|
@ -346,13 +345,11 @@ rt_err_t rt_workqueue_urgent_work(struct rt_workqueue *queue, struct rt_work *wo
|
||||||
rt_list_remove(&(work->list));
|
rt_list_remove(&(work->list));
|
||||||
rt_list_insert_after(&queue->work_list, &(work->list));
|
rt_list_insert_after(&queue->work_list, &(work->list));
|
||||||
/* whether the workqueue is doing work */
|
/* whether the workqueue is doing work */
|
||||||
if (queue->work_current == RT_NULL &&
|
if (queue->work_current == RT_NULL)
|
||||||
((queue->work_thread->stat & RT_THREAD_SUSPEND_MASK) == RT_THREAD_SUSPEND_MASK))
|
|
||||||
{
|
{
|
||||||
/* resume work thread */
|
/* resume work thread, and do a re-schedule if succeed */
|
||||||
rt_thread_resume(queue->work_thread);
|
rt_thread_resume(queue->work_thread);
|
||||||
rt_spin_unlock_irqrestore(&(queue->spinlock), level);
|
rt_spin_unlock_irqrestore(&(queue->spinlock), level);
|
||||||
rt_schedule();
|
|
||||||
}
|
}
|
||||||
else
|
else
|
||||||
{
|
{
|
||||||
|
|
|
@ -216,15 +216,23 @@ long list_thread(void)
|
||||||
rt_uint8_t *ptr;
|
rt_uint8_t *ptr;
|
||||||
|
|
||||||
#ifdef RT_USING_SMP
|
#ifdef RT_USING_SMP
|
||||||
if (thread->oncpu != RT_CPU_DETACHED)
|
/* no synchronization applied since it's only for debug */
|
||||||
rt_kprintf("%-*.*s %3d %3d %4d ", maxlen, RT_NAME_MAX, thread->parent.name, thread->oncpu, thread->bind_cpu, thread->current_priority);
|
if (RT_SCHED_CTX(thread).oncpu != RT_CPU_DETACHED)
|
||||||
|
rt_kprintf("%-*.*s %3d %3d %4d ", maxlen, RT_NAME_MAX,
|
||||||
|
thread->parent.name, RT_SCHED_CTX(thread).oncpu,
|
||||||
|
RT_SCHED_CTX(thread).bind_cpu,
|
||||||
|
RT_SCHED_PRIV(thread).current_priority);
|
||||||
else
|
else
|
||||||
rt_kprintf("%-*.*s N/A %3d %4d ", maxlen, RT_NAME_MAX, thread->parent.name, thread->bind_cpu, thread->current_priority);
|
rt_kprintf("%-*.*s N/A %3d %4d ", maxlen, RT_NAME_MAX,
|
||||||
|
thread->parent.name,
|
||||||
|
RT_SCHED_CTX(thread).bind_cpu,
|
||||||
|
RT_SCHED_PRIV(thread).current_priority);
|
||||||
|
|
||||||
#else
|
#else
|
||||||
rt_kprintf("%-*.*s %3d ", maxlen, RT_NAME_MAX, thread->parent.name, thread->current_priority);
|
/* no synchronization applied since it's only for debug */
|
||||||
|
rt_kprintf("%-*.*s %3d ", maxlen, RT_NAME_MAX, thread->parent.name, RT_SCHED_PRIV(thread).current_priority);
|
||||||
#endif /*RT_USING_SMP*/
|
#endif /*RT_USING_SMP*/
|
||||||
stat = (thread->stat & RT_THREAD_STAT_MASK);
|
stat = (RT_SCHED_CTX(thread).stat & RT_THREAD_STAT_MASK);
|
||||||
if (stat == RT_THREAD_READY) rt_kprintf(" ready ");
|
if (stat == RT_THREAD_READY) rt_kprintf(" ready ");
|
||||||
else if ((stat & RT_THREAD_SUSPEND_MASK) == RT_THREAD_SUSPEND_MASK) rt_kprintf(" suspend");
|
else if ((stat & RT_THREAD_SUSPEND_MASK) == RT_THREAD_SUSPEND_MASK) rt_kprintf(" suspend");
|
||||||
else if (stat == RT_THREAD_INIT) rt_kprintf(" init ");
|
else if (stat == RT_THREAD_INIT) rt_kprintf(" init ");
|
||||||
|
@ -250,7 +258,7 @@ long list_thread(void)
|
||||||
thread->stack_size,
|
thread->stack_size,
|
||||||
(thread->stack_size - ((rt_ubase_t) ptr - (rt_ubase_t) thread->stack_addr)) * 100
|
(thread->stack_size - ((rt_ubase_t) ptr - (rt_ubase_t) thread->stack_addr)) * 100
|
||||||
/ thread->stack_size,
|
/ thread->stack_size,
|
||||||
thread->remaining_tick,
|
RT_SCHED_PRIV(thread).remaining_tick,
|
||||||
rt_strerror(thread->error),
|
rt_strerror(thread->error),
|
||||||
thread);
|
thread);
|
||||||
#endif
|
#endif
|
||||||
|
@ -263,21 +271,6 @@ long list_thread(void)
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
static void show_wait_queue(struct rt_list_node *list)
|
|
||||||
{
|
|
||||||
struct rt_thread *thread;
|
|
||||||
struct rt_list_node *node;
|
|
||||||
|
|
||||||
for (node = list->next; node != list; node = node->next)
|
|
||||||
{
|
|
||||||
thread = rt_list_entry(node, struct rt_thread, tlist);
|
|
||||||
rt_kprintf("%.*s", RT_NAME_MAX, thread->parent.name);
|
|
||||||
|
|
||||||
if (node->next != list)
|
|
||||||
rt_kprintf("/");
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
#ifdef RT_USING_SEMAPHORE
|
#ifdef RT_USING_SEMAPHORE
|
||||||
long list_sem(void)
|
long list_sem(void)
|
||||||
{
|
{
|
||||||
|
@ -326,7 +319,7 @@ long list_sem(void)
|
||||||
sem->parent.parent.name,
|
sem->parent.parent.name,
|
||||||
sem->value,
|
sem->value,
|
||||||
rt_list_len(&sem->parent.suspend_thread));
|
rt_list_len(&sem->parent.suspend_thread));
|
||||||
show_wait_queue(&(sem->parent.suspend_thread));
|
rt_susp_list_print(&(sem->parent.suspend_thread));
|
||||||
rt_kprintf("\n");
|
rt_kprintf("\n");
|
||||||
}
|
}
|
||||||
else
|
else
|
||||||
|
@ -395,7 +388,7 @@ long list_event(void)
|
||||||
e->parent.parent.name,
|
e->parent.parent.name,
|
||||||
e->set,
|
e->set,
|
||||||
rt_list_len(&e->parent.suspend_thread));
|
rt_list_len(&e->parent.suspend_thread));
|
||||||
show_wait_queue(&(e->parent.suspend_thread));
|
rt_susp_list_print(&(e->parent.suspend_thread));
|
||||||
rt_kprintf("\n");
|
rt_kprintf("\n");
|
||||||
}
|
}
|
||||||
else
|
else
|
||||||
|
@ -464,7 +457,7 @@ long list_mutex(void)
|
||||||
m->hold,
|
m->hold,
|
||||||
m->priority,
|
m->priority,
|
||||||
rt_list_len(&m->parent.suspend_thread));
|
rt_list_len(&m->parent.suspend_thread));
|
||||||
show_wait_queue(&(m->parent.suspend_thread));
|
rt_susp_list_print(&(m->parent.suspend_thread));
|
||||||
rt_kprintf("\n");
|
rt_kprintf("\n");
|
||||||
}
|
}
|
||||||
else
|
else
|
||||||
|
@ -537,7 +530,7 @@ long list_mailbox(void)
|
||||||
m->entry,
|
m->entry,
|
||||||
m->size,
|
m->size,
|
||||||
rt_list_len(&m->parent.suspend_thread));
|
rt_list_len(&m->parent.suspend_thread));
|
||||||
show_wait_queue(&(m->parent.suspend_thread));
|
rt_susp_list_print(&(m->parent.suspend_thread));
|
||||||
rt_kprintf("\n");
|
rt_kprintf("\n");
|
||||||
}
|
}
|
||||||
else
|
else
|
||||||
|
@ -607,7 +600,7 @@ long list_msgqueue(void)
|
||||||
m->parent.parent.name,
|
m->parent.parent.name,
|
||||||
m->entry,
|
m->entry,
|
||||||
rt_list_len(&m->parent.suspend_thread));
|
rt_list_len(&m->parent.suspend_thread));
|
||||||
show_wait_queue(&(m->parent.suspend_thread));
|
rt_susp_list_print(&(m->parent.suspend_thread));
|
||||||
rt_kprintf("\n");
|
rt_kprintf("\n");
|
||||||
}
|
}
|
||||||
else
|
else
|
||||||
|
@ -744,7 +737,7 @@ long list_mempool(void)
|
||||||
mp->block_total_count,
|
mp->block_total_count,
|
||||||
mp->block_free_count,
|
mp->block_free_count,
|
||||||
suspend_thread_count);
|
suspend_thread_count);
|
||||||
show_wait_queue(&(mp->suspend_thread));
|
rt_susp_list_print(&(mp->suspend_thread));
|
||||||
rt_kprintf("\n");
|
rt_kprintf("\n");
|
||||||
}
|
}
|
||||||
else
|
else
|
||||||
|
|
|
@ -46,7 +46,17 @@ int8_t rt_tz_is_dst(void);
|
||||||
|
|
||||||
struct itimerspec;
|
struct itimerspec;
|
||||||
|
|
||||||
#if defined(_GNU_SOURCE) && (defined(__x86_64__) || defined(__i386__))
|
/* 'struct timeval' is defined on __x86_64__ toolchain */
|
||||||
|
#if !defined(__x86_64__) && !defined(_TIMEVAL_DEFINED)
|
||||||
|
#define _TIMEVAL_DEFINED
|
||||||
|
struct timeval
|
||||||
|
{
|
||||||
|
time_t tv_sec; /* seconds */
|
||||||
|
suseconds_t tv_usec; /* and microseconds */
|
||||||
|
};
|
||||||
|
#endif /* _TIMEVAL_DEFINED */
|
||||||
|
|
||||||
|
#if defined(_GNU_SOURCE) && (defined(__x86_64__) || defined(__i386__) || defined(RT_USING_SMART))
|
||||||
/* linux x86 platform gcc use! */
|
/* linux x86 platform gcc use! */
|
||||||
#define _TIMEVAL_DEFINED
|
#define _TIMEVAL_DEFINED
|
||||||
/* Values for the first argument to `getitimer' and `setitimer'. */
|
/* Values for the first argument to `getitimer' and `setitimer'. */
|
||||||
|
@ -71,16 +81,7 @@ struct itimerval
|
||||||
/* Time to the next timer expiration. */
|
/* Time to the next timer expiration. */
|
||||||
struct timeval it_value;
|
struct timeval it_value;
|
||||||
};
|
};
|
||||||
#endif /* defined(_GNU_SOURCE) && (defined(__x86_64__) || defined(__i386__)) */
|
#endif /* defined(_GNU_SOURCE) && (defined(__x86_64__) || defined(__i386__)) || defined(RT_USING_SMART) */
|
||||||
|
|
||||||
#ifndef _TIMEVAL_DEFINED
|
|
||||||
#define _TIMEVAL_DEFINED
|
|
||||||
struct timeval
|
|
||||||
{
|
|
||||||
time_t tv_sec; /* seconds */
|
|
||||||
suseconds_t tv_usec; /* and microseconds */
|
|
||||||
};
|
|
||||||
#endif /* _TIMEVAL_DEFINED */
|
|
||||||
|
|
||||||
#if defined(__ARMCC_VERSION) || defined(_WIN32) || (defined(__ICCARM__) && (__VER__ < 8010001))
|
#if defined(__ARMCC_VERSION) || defined(_WIN32) || (defined(__ICCARM__) && (__VER__ < 8010001))
|
||||||
struct timespec
|
struct timespec
|
||||||
|
|
|
@ -203,7 +203,7 @@ void dlmodule_destroy_subthread(struct rt_dlmodule *module, rt_thread_t thread)
|
||||||
rt_enter_critical();
|
rt_enter_critical();
|
||||||
|
|
||||||
/* remove thread from thread_list (ready or defunct thread list) */
|
/* remove thread from thread_list (ready or defunct thread list) */
|
||||||
rt_list_remove(&(thread->tlist));
|
rt_list_remove(&RT_THREAD_LIST_NODE(thread));
|
||||||
|
|
||||||
if ((thread->stat & RT_THREAD_STAT_MASK) != RT_THREAD_CLOSE &&
|
if ((thread->stat & RT_THREAD_STAT_MASK) != RT_THREAD_CLOSE &&
|
||||||
(thread->thread_timer.parent.type == (RT_Object_Class_Static | RT_Object_Class_Timer)))
|
(thread->thread_timer.parent.type == (RT_Object_Class_Static | RT_Object_Class_Timer)))
|
||||||
|
|
|
@ -285,7 +285,7 @@ rt_err_t _pthread_cond_timedwait(pthread_cond_t *cond,
|
||||||
rt_thread_suspend(thread);
|
rt_thread_suspend(thread);
|
||||||
|
|
||||||
/* Only support FIFO */
|
/* Only support FIFO */
|
||||||
rt_list_insert_before(&(sem->parent.suspend_thread), &(thread->tlist));
|
rt_list_insert_before(&(sem->parent.suspend_thread), &RT_THREAD_LIST_NODE(thread));
|
||||||
|
|
||||||
/**
|
/**
|
||||||
rt_ipc_list_suspend(&(sem->parent.suspend_thread),
|
rt_ipc_list_suspend(&(sem->parent.suspend_thread),
|
||||||
|
|
|
@ -28,6 +28,22 @@
|
||||||
|
|
||||||
#define FUTEX_CLOCK_REALTIME 256
|
#define FUTEX_CLOCK_REALTIME 256
|
||||||
|
|
||||||
|
#define FUTEX_WAITERS 0x80000000
|
||||||
|
#define FUTEX_OWNER_DIED 0x40000000
|
||||||
|
#define FUTEX_TID_MASK 0x3fffffff
|
||||||
|
|
||||||
|
struct robust_list
|
||||||
|
{
|
||||||
|
struct robust_list *next;
|
||||||
|
};
|
||||||
|
|
||||||
|
struct robust_list_head
|
||||||
|
{
|
||||||
|
struct robust_list list;
|
||||||
|
long futex_offset;
|
||||||
|
struct robust_list *list_op_pending;
|
||||||
|
};
|
||||||
|
|
||||||
/* for pmutex op */
|
/* for pmutex op */
|
||||||
#define PMUTEX_INIT 0
|
#define PMUTEX_INIT 0
|
||||||
#define PMUTEX_LOCK 1
|
#define PMUTEX_LOCK 1
|
||||||
|
|
|
@ -168,7 +168,6 @@ enum lwp_exit_request_type
|
||||||
struct termios *get_old_termios(void);
|
struct termios *get_old_termios(void);
|
||||||
void lwp_setcwd(char *buf);
|
void lwp_setcwd(char *buf);
|
||||||
char *lwp_getcwd(void);
|
char *lwp_getcwd(void);
|
||||||
void lwp_request_thread_exit(rt_thread_t thread_to_exit);
|
|
||||||
int lwp_check_exit_request(void);
|
int lwp_check_exit_request(void);
|
||||||
void lwp_terminate(struct rt_lwp *lwp);
|
void lwp_terminate(struct rt_lwp *lwp);
|
||||||
|
|
||||||
|
@ -213,52 +212,10 @@ pid_t exec(char *filename, int debug, int argc, char **argv);
|
||||||
/* ctime lwp API */
|
/* ctime lwp API */
|
||||||
int timer_list_free(rt_list_t *timer_list);
|
int timer_list_free(rt_list_t *timer_list);
|
||||||
|
|
||||||
struct rt_futex;
|
rt_err_t lwp_futex_init(void);
|
||||||
rt_err_t lwp_futex(struct rt_lwp *lwp, struct rt_futex *futex, int *uaddr, int op, int val, const struct timespec *timeout);
|
rt_err_t lwp_futex(struct rt_lwp *lwp, int *uaddr, int op, int val,
|
||||||
|
const struct timespec *timeout, int *uaddr2, int val3);
|
||||||
|
|
||||||
#ifdef ARCH_MM_MMU
|
|
||||||
struct __pthread {
|
|
||||||
/* Part 1 -- these fields may be external or
|
|
||||||
* * internal (accessed via asm) ABI. Do not change. */
|
|
||||||
struct pthread *self;
|
|
||||||
uintptr_t *dtv;
|
|
||||||
struct pthread *prev, *next; /* non-ABI */
|
|
||||||
uintptr_t sysinfo;
|
|
||||||
uintptr_t canary, canary2;
|
|
||||||
|
|
||||||
/* Part 2 -- implementation details, non-ABI. */
|
|
||||||
int tid;
|
|
||||||
int errno_val;
|
|
||||||
volatile int detach_state;
|
|
||||||
volatile int cancel;
|
|
||||||
volatile unsigned char canceldisable, cancelasync;
|
|
||||||
unsigned char tsd_used:1;
|
|
||||||
unsigned char dlerror_flag:1;
|
|
||||||
unsigned char *map_base;
|
|
||||||
size_t map_size;
|
|
||||||
void *stack;
|
|
||||||
size_t stack_size;
|
|
||||||
size_t guard_size;
|
|
||||||
void *result;
|
|
||||||
struct __ptcb *cancelbuf;
|
|
||||||
void **tsd;
|
|
||||||
struct {
|
|
||||||
volatile void *volatile head;
|
|
||||||
long off;
|
|
||||||
volatile void *volatile pending;
|
|
||||||
} robust_list;
|
|
||||||
volatile int timer_id;
|
|
||||||
locale_t locale;
|
|
||||||
volatile int killlock[1];
|
|
||||||
char *dlerror_buf;
|
|
||||||
void *stdio_locks;
|
|
||||||
|
|
||||||
/* Part 3 -- the positions of these fields relative to
|
|
||||||
* * the end of the structure is external and internal ABI. */
|
|
||||||
uintptr_t canary_at_end;
|
|
||||||
uintptr_t *dtv_copy;
|
|
||||||
};
|
|
||||||
#endif
|
|
||||||
|
|
||||||
#ifdef __cplusplus
|
#ifdef __cplusplus
|
||||||
}
|
}
|
||||||
|
|
File diff suppressed because it is too large
Load Diff
|
@ -0,0 +1,54 @@
|
||||||
|
/*
|
||||||
|
* Copyright (c) 2006-2023, RT-Thread Development Team
|
||||||
|
*
|
||||||
|
* SPDX-License-Identifier: Apache-2.0
|
||||||
|
*
|
||||||
|
* Change Logs:
|
||||||
|
* Date Author Notes
|
||||||
|
* 2023-11-01 Shell Init ver.
|
||||||
|
*/
|
||||||
|
#ifndef __LWP_FUTEX_INTERNAL_H__
|
||||||
|
#define __LWP_FUTEX_INTERNAL_H__
|
||||||
|
|
||||||
|
#define DBG_TAG "lwp.futex"
|
||||||
|
#define DBG_LVL DBG_INFO
|
||||||
|
#include <rtdbg.h>
|
||||||
|
|
||||||
|
#include "rt_uthash.h"
|
||||||
|
#include "lwp_internal.h"
|
||||||
|
#include "lwp_pid.h"
|
||||||
|
|
||||||
|
#include <rtthread.h>
|
||||||
|
#include <lwp.h>
|
||||||
|
|
||||||
|
#ifdef ARCH_MM_MMU
|
||||||
|
#include <lwp_user_mm.h>
|
||||||
|
#endif /* ARCH_MM_MMU */
|
||||||
|
|
||||||
|
struct shared_futex_key
|
||||||
|
{
|
||||||
|
rt_mem_obj_t mobj;
|
||||||
|
rt_base_t offset;
|
||||||
|
};
|
||||||
|
DEFINE_RT_UTHASH_TYPE(shared_futex_entry, struct shared_futex_key, key);
|
||||||
|
|
||||||
|
struct rt_futex
|
||||||
|
{
|
||||||
|
union {
|
||||||
|
/* for private futex */
|
||||||
|
struct lwp_avl_struct node;
|
||||||
|
/* for shared futex */
|
||||||
|
struct shared_futex_entry entry;
|
||||||
|
};
|
||||||
|
|
||||||
|
rt_list_t waiting_thread;
|
||||||
|
struct rt_object *custom_obj;
|
||||||
|
rt_mutex_t mutex;
|
||||||
|
};
|
||||||
|
typedef struct rt_futex *rt_futex_t;
|
||||||
|
|
||||||
|
rt_err_t futex_global_table_add(struct shared_futex_key *key, rt_futex_t futex);
|
||||||
|
rt_err_t futex_global_table_find(struct shared_futex_key *key, rt_futex_t *futex);
|
||||||
|
rt_err_t futex_global_table_delete(struct shared_futex_key *key);
|
||||||
|
|
||||||
|
#endif /* __LWP_FUTEX_INTERNAL_H__ */
|
|
@ -0,0 +1,65 @@
|
||||||
|
/*
|
||||||
|
* Copyright (c) 2006-2023, RT-Thread Development Team
|
||||||
|
*
|
||||||
|
* SPDX-License-Identifier: Apache-2.0
|
||||||
|
*
|
||||||
|
* Change Logs:
|
||||||
|
* Date Author Notes
|
||||||
|
* 2023-11-01 Shell Init ver.
|
||||||
|
*/
|
||||||
|
|
||||||
|
#include "lwp_futex_internal.h"
|
||||||
|
|
||||||
|
static struct shared_futex_entry *_futex_hash_head;
|
||||||
|
|
||||||
|
rt_err_t futex_global_table_add(struct shared_futex_key *key, rt_futex_t futex)
|
||||||
|
{
|
||||||
|
rt_err_t rc = 0;
|
||||||
|
struct shared_futex_entry *entry = &futex->entry;
|
||||||
|
futex->entry.key.mobj = key->mobj;
|
||||||
|
futex->entry.key.offset = key->offset;
|
||||||
|
|
||||||
|
RT_UTHASH_ADD(_futex_hash_head, key, sizeof(struct shared_futex_key), entry);
|
||||||
|
return rc;
|
||||||
|
}
|
||||||
|
|
||||||
|
rt_err_t futex_global_table_find(struct shared_futex_key *key, rt_futex_t *futex)
|
||||||
|
{
|
||||||
|
rt_err_t rc;
|
||||||
|
rt_futex_t found_futex;
|
||||||
|
struct shared_futex_entry *entry;
|
||||||
|
|
||||||
|
RT_UTHASH_FIND(_futex_hash_head, key, sizeof(struct shared_futex_key), entry);
|
||||||
|
if (entry)
|
||||||
|
{
|
||||||
|
rc = RT_EOK;
|
||||||
|
found_futex = rt_container_of(entry, struct rt_futex, entry);
|
||||||
|
}
|
||||||
|
else
|
||||||
|
{
|
||||||
|
rc = -RT_ENOENT;
|
||||||
|
found_futex = RT_NULL;
|
||||||
|
}
|
||||||
|
|
||||||
|
*futex = found_futex;
|
||||||
|
return rc;
|
||||||
|
}
|
||||||
|
|
||||||
|
rt_err_t futex_global_table_delete(struct shared_futex_key *key)
|
||||||
|
{
|
||||||
|
rt_err_t rc;
|
||||||
|
struct shared_futex_entry *entry;
|
||||||
|
|
||||||
|
RT_UTHASH_FIND(_futex_hash_head, key, sizeof(struct shared_futex_key), entry);
|
||||||
|
if (entry)
|
||||||
|
{
|
||||||
|
RT_UTHASH_DELETE(_futex_hash_head, entry);
|
||||||
|
rc = RT_EOK;
|
||||||
|
}
|
||||||
|
else
|
||||||
|
{
|
||||||
|
rc = -RT_ENOENT;
|
||||||
|
}
|
||||||
|
|
||||||
|
return rc;
|
||||||
|
}
|
|
@ -17,7 +17,7 @@
|
||||||
|
|
||||||
static rt_err_t _mutex_take_safe(rt_mutex_t mtx, rt_int32_t timeout, rt_bool_t interruptable)
|
static rt_err_t _mutex_take_safe(rt_mutex_t mtx, rt_int32_t timeout, rt_bool_t interruptable)
|
||||||
{
|
{
|
||||||
DEF_RETURN_CODE(rc);
|
LWP_DEF_RETURN_CODE(rc);
|
||||||
int retry;
|
int retry;
|
||||||
rt_int32_t effect_timeout;
|
rt_int32_t effect_timeout;
|
||||||
|
|
||||||
|
@ -92,19 +92,19 @@ static rt_err_t _mutex_take_safe(rt_mutex_t mtx, rt_int32_t timeout, rt_bool_t i
|
||||||
RT_ASSERT(0);
|
RT_ASSERT(0);
|
||||||
}
|
}
|
||||||
|
|
||||||
RETURN(rc);
|
LWP_RETURN(rc);
|
||||||
}
|
}
|
||||||
|
|
||||||
rt_err_t lwp_mutex_take_safe(rt_mutex_t mtx, rt_int32_t timeout, rt_bool_t interruptable)
|
rt_err_t lwp_mutex_take_safe(rt_mutex_t mtx, rt_int32_t timeout, rt_bool_t interruptable)
|
||||||
{
|
{
|
||||||
DEF_RETURN_CODE(rc);
|
LWP_DEF_RETURN_CODE(rc);
|
||||||
rc = _mutex_take_safe(mtx, timeout, interruptable);
|
rc = _mutex_take_safe(mtx, timeout, interruptable);
|
||||||
RETURN(rc);
|
LWP_RETURN(rc);
|
||||||
}
|
}
|
||||||
|
|
||||||
rt_err_t lwp_mutex_release_safe(rt_mutex_t mtx)
|
rt_err_t lwp_mutex_release_safe(rt_mutex_t mtx)
|
||||||
{
|
{
|
||||||
DEF_RETURN_CODE(rc);
|
LWP_DEF_RETURN_CODE(rc);
|
||||||
|
|
||||||
rc = rt_mutex_release(mtx);
|
rc = rt_mutex_release(mtx);
|
||||||
if (rc)
|
if (rc)
|
||||||
|
@ -113,7 +113,7 @@ rt_err_t lwp_mutex_release_safe(rt_mutex_t mtx)
|
||||||
rt_backtrace();
|
rt_backtrace();
|
||||||
}
|
}
|
||||||
|
|
||||||
RETURN(rc);
|
LWP_RETURN(rc);
|
||||||
}
|
}
|
||||||
|
|
||||||
rt_err_t lwp_critical_enter(struct rt_lwp *lwp)
|
rt_err_t lwp_critical_enter(struct rt_lwp *lwp)
|
||||||
|
|
|
@ -86,13 +86,13 @@ rt_err_t lwp_critical_exit(struct rt_lwp *lwp);
|
||||||
* There tend to be chances where a return value is returned without correctly init
|
* There tend to be chances where a return value is returned without correctly init
|
||||||
*/
|
*/
|
||||||
#ifndef LWP_DEBUG
|
#ifndef LWP_DEBUG
|
||||||
#define DEF_RETURN_CODE(name) rt_err_t name
|
#define LWP_DEF_RETURN_CODE(name) rt_err_t name;RT_UNUSED(name)
|
||||||
#define RETURN(name) return name
|
#define LWP_RETURN(name) return name
|
||||||
|
|
||||||
#else
|
#else
|
||||||
#define _LWP_UNINITIALIZED_RC 0xbeefcafe
|
#define _LWP_UNINITIALIZED_RC 0xbeefcafe
|
||||||
#define DEF_RETURN_CODE(name) rt_err_t name = _LWP_UNINITIALIZED_RC
|
#define LWP_DEF_RETURN_CODE(name) rt_err_t name = _LWP_UNINITIALIZED_RC
|
||||||
#define RETURN(name) {RT_ASSERT(name != _LWP_UNINITIALIZED_RC);return name;}
|
#define LWP_RETURN(name) {RT_ASSERT(name != _LWP_UNINITIALIZED_RC);return name;}
|
||||||
#endif /* LWP_DEBUG */
|
#endif /* LWP_DEBUG */
|
||||||
|
|
||||||
#endif /* __LWP_INTERNAL_H__ */
|
#endif /* __LWP_INTERNAL_H__ */
|
||||||
|
|
|
@ -8,7 +8,9 @@
|
||||||
* 2019-10-12 Jesven first version
|
* 2019-10-12 Jesven first version
|
||||||
* 2023-07-25 Shell Remove usage of rt_hw_interrupt API in the lwp
|
* 2023-07-25 Shell Remove usage of rt_hw_interrupt API in the lwp
|
||||||
* 2023-09-16 zmq810150896 Increased versatility of some features on dfs v2
|
* 2023-09-16 zmq810150896 Increased versatility of some features on dfs v2
|
||||||
|
* 2024-01-25 Shell porting to susp_list API
|
||||||
*/
|
*/
|
||||||
|
#define __RT_IPC_SOURCE__
|
||||||
|
|
||||||
#define DBG_TAG "lwp.ipc"
|
#define DBG_TAG "lwp.ipc"
|
||||||
#define DBG_LVL DBG_WARNING
|
#define DBG_LVL DBG_WARNING
|
||||||
|
@ -124,11 +126,9 @@ rt_inline rt_err_t rt_channel_list_resume(rt_list_t *list)
|
||||||
struct rt_thread *thread;
|
struct rt_thread *thread;
|
||||||
|
|
||||||
/* get the first thread entry waiting for sending */
|
/* get the first thread entry waiting for sending */
|
||||||
thread = rt_list_entry(list->next, struct rt_thread, tlist);
|
thread = rt_susp_list_dequeue(list, RT_THREAD_RESUME_RES_THR_ERR);
|
||||||
|
|
||||||
rt_thread_resume(thread);
|
return thread ? RT_EOK : -RT_ERROR;
|
||||||
|
|
||||||
return RT_EOK;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
|
@ -136,15 +136,8 @@ rt_inline rt_err_t rt_channel_list_resume(rt_list_t *list)
|
||||||
*/
|
*/
|
||||||
rt_inline rt_err_t _channel_list_resume_all_locked(rt_list_t *list)
|
rt_inline rt_err_t _channel_list_resume_all_locked(rt_list_t *list)
|
||||||
{
|
{
|
||||||
struct rt_thread *thread;
|
|
||||||
|
|
||||||
/* wakeup all suspended threads for sending */
|
/* wakeup all suspended threads for sending */
|
||||||
while (!rt_list_isempty(list))
|
rt_susp_list_resume_all(list, RT_ERROR);
|
||||||
{
|
|
||||||
thread = rt_list_entry(list->next, struct rt_thread, tlist);
|
|
||||||
thread->error = -RT_ERROR;
|
|
||||||
rt_thread_resume(thread);
|
|
||||||
}
|
|
||||||
|
|
||||||
return RT_EOK;
|
return RT_EOK;
|
||||||
}
|
}
|
||||||
|
@ -155,12 +148,7 @@ rt_inline rt_err_t _channel_list_resume_all_locked(rt_list_t *list)
|
||||||
rt_inline rt_err_t rt_channel_list_suspend(rt_list_t *list, struct rt_thread *thread)
|
rt_inline rt_err_t rt_channel_list_suspend(rt_list_t *list, struct rt_thread *thread)
|
||||||
{
|
{
|
||||||
/* suspend thread */
|
/* suspend thread */
|
||||||
rt_err_t ret = rt_thread_suspend_with_flag(thread, RT_INTERRUPTIBLE);
|
rt_err_t ret = rt_thread_suspend_to_list(thread, list, RT_IPC_FLAG_FIFO, RT_INTERRUPTIBLE);
|
||||||
|
|
||||||
if (ret == RT_EOK)
|
|
||||||
{
|
|
||||||
rt_list_insert_before(list, &(thread->tlist)); /* list end */
|
|
||||||
}
|
|
||||||
|
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
@ -372,10 +360,13 @@ static rt_err_t wakeup_sender_wait_reply(void *object, struct rt_thread *thread)
|
||||||
|
|
||||||
static void sender_timeout(void *parameter)
|
static void sender_timeout(void *parameter)
|
||||||
{
|
{
|
||||||
|
rt_sched_lock_level_t slvl;
|
||||||
struct rt_thread *thread = (struct rt_thread *)parameter;
|
struct rt_thread *thread = (struct rt_thread *)parameter;
|
||||||
rt_channel_t ch;
|
rt_channel_t ch;
|
||||||
|
|
||||||
ch = (rt_channel_t)(thread->wakeup.user_data);
|
rt_sched_lock(&slvl);
|
||||||
|
|
||||||
|
ch = (rt_channel_t)(thread->wakeup_handle.user_data);
|
||||||
if (ch->stat == RT_IPC_STAT_ACTIVE && ch->reply == thread)
|
if (ch->stat == RT_IPC_STAT_ACTIVE && ch->reply == thread)
|
||||||
{
|
{
|
||||||
ch->stat = RT_IPC_STAT_IDLE;
|
ch->stat = RT_IPC_STAT_IDLE;
|
||||||
|
@ -399,14 +390,14 @@ static void sender_timeout(void *parameter)
|
||||||
l = l->next;
|
l = l->next;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
thread->error = -RT_ETIMEOUT;
|
|
||||||
thread->wakeup.func = RT_NULL;
|
|
||||||
|
|
||||||
rt_list_remove(&(thread->tlist));
|
thread->wakeup_handle.func = RT_NULL;
|
||||||
|
thread->error = RT_ETIMEOUT;
|
||||||
|
|
||||||
/* insert to schedule ready list */
|
/* insert to schedule ready list */
|
||||||
rt_schedule_insert_thread(thread);
|
rt_sched_insert_thread(thread);
|
||||||
/* do schedule */
|
/* do schedule */
|
||||||
rt_schedule();
|
rt_sched_unlock_n_resched(slvl);
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
|
@ -522,7 +513,7 @@ static rt_err_t _send_recv_timeout(rt_channel_t ch, rt_channel_msg_t data, int n
|
||||||
|
|
||||||
static rt_err_t _do_send_recv_timeout(rt_channel_t ch, rt_channel_msg_t data, int need_reply, rt_channel_msg_t data_ret, rt_int32_t time, rt_ipc_msg_t msg)
|
static rt_err_t _do_send_recv_timeout(rt_channel_t ch, rt_channel_msg_t data, int need_reply, rt_channel_msg_t data_ret, rt_int32_t time, rt_ipc_msg_t msg)
|
||||||
{
|
{
|
||||||
DEF_RETURN_CODE(rc);
|
LWP_DEF_RETURN_CODE(rc);
|
||||||
rt_thread_t thread_recv;
|
rt_thread_t thread_recv;
|
||||||
rt_thread_t thread_send = 0;
|
rt_thread_t thread_send = 0;
|
||||||
void (*old_timeout_func)(void *) = 0;
|
void (*old_timeout_func)(void *) = 0;
|
||||||
|
@ -627,9 +618,12 @@ static rt_err_t _do_send_recv_timeout(rt_channel_t ch, rt_channel_msg_t data, in
|
||||||
|
|
||||||
if (!need_reply || rc == RT_EOK)
|
if (!need_reply || rc == RT_EOK)
|
||||||
{
|
{
|
||||||
thread_recv = rt_list_entry(ch->parent.suspend_thread.next, struct rt_thread, tlist);
|
rt_sched_lock_level_t slvl;
|
||||||
|
rt_sched_lock(&slvl);
|
||||||
|
thread_recv = RT_THREAD_LIST_NODE_ENTRY(ch->parent.suspend_thread.next);
|
||||||
thread_recv->msg_ret = msg; /* to the first suspended receiver */
|
thread_recv->msg_ret = msg; /* to the first suspended receiver */
|
||||||
thread_recv->error = RT_EOK;
|
thread_recv->error = RT_EOK;
|
||||||
|
rt_sched_unlock(slvl);
|
||||||
rt_channel_list_resume(&ch->parent.suspend_thread);
|
rt_channel_list_resume(&ch->parent.suspend_thread);
|
||||||
}
|
}
|
||||||
break;
|
break;
|
||||||
|
@ -706,7 +700,7 @@ rt_err_t rt_raw_channel_send_recv_timeout(rt_channel_t ch, rt_channel_msg_t data
|
||||||
*/
|
*/
|
||||||
rt_err_t rt_raw_channel_reply(rt_channel_t ch, rt_channel_msg_t data)
|
rt_err_t rt_raw_channel_reply(rt_channel_t ch, rt_channel_msg_t data)
|
||||||
{
|
{
|
||||||
DEF_RETURN_CODE(rc);
|
LWP_DEF_RETURN_CODE(rc);
|
||||||
rt_ipc_msg_t msg;
|
rt_ipc_msg_t msg;
|
||||||
struct rt_thread *thread;
|
struct rt_thread *thread;
|
||||||
rt_base_t level;
|
rt_base_t level;
|
||||||
|
@ -758,7 +752,7 @@ rt_err_t rt_raw_channel_reply(rt_channel_t ch, rt_channel_msg_t data)
|
||||||
rt_schedule();
|
rt_schedule();
|
||||||
}
|
}
|
||||||
|
|
||||||
RETURN(rc);
|
LWP_RETURN(rc);
|
||||||
}
|
}
|
||||||
|
|
||||||
static rt_err_t wakeup_receiver(void *object, struct rt_thread *thread)
|
static rt_err_t wakeup_receiver(void *object, struct rt_thread *thread)
|
||||||
|
@ -783,24 +777,27 @@ static void receiver_timeout(void *parameter)
|
||||||
{
|
{
|
||||||
struct rt_thread *thread = (struct rt_thread *)parameter;
|
struct rt_thread *thread = (struct rt_thread *)parameter;
|
||||||
rt_channel_t ch;
|
rt_channel_t ch;
|
||||||
rt_base_t level;
|
rt_sched_lock_level_t slvl;
|
||||||
|
|
||||||
ch = (rt_channel_t)(thread->wakeup.user_data);
|
rt_sched_lock(&slvl);
|
||||||
|
|
||||||
|
ch = (rt_channel_t)(thread->wakeup_handle.user_data);
|
||||||
|
|
||||||
level = rt_spin_lock_irqsave(&ch->slock);
|
|
||||||
ch->stat = RT_IPC_STAT_IDLE;
|
|
||||||
thread->error = -RT_ETIMEOUT;
|
thread->error = -RT_ETIMEOUT;
|
||||||
thread->wakeup.func = RT_NULL;
|
thread->wakeup_handle.func = RT_NULL;
|
||||||
|
|
||||||
rt_list_remove(&(thread->tlist));
|
rt_spin_lock(&ch->slock);
|
||||||
|
ch->stat = RT_IPC_STAT_IDLE;
|
||||||
|
|
||||||
|
rt_list_remove(&RT_THREAD_LIST_NODE(thread));
|
||||||
/* insert to schedule ready list */
|
/* insert to schedule ready list */
|
||||||
rt_schedule_insert_thread(thread);
|
rt_sched_insert_thread(thread);
|
||||||
|
|
||||||
_rt_channel_check_wq_wakup_locked(ch);
|
_rt_channel_check_wq_wakup_locked(ch);
|
||||||
rt_spin_unlock_irqrestore(&ch->slock, level);
|
rt_spin_unlock(&ch->slock);
|
||||||
|
|
||||||
/* do schedule */
|
/* do schedule */
|
||||||
rt_schedule();
|
rt_sched_unlock_n_resched(slvl);
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
|
@ -808,7 +805,7 @@ static void receiver_timeout(void *parameter)
|
||||||
*/
|
*/
|
||||||
static rt_err_t _rt_raw_channel_recv_timeout(rt_channel_t ch, rt_channel_msg_t data, rt_int32_t time)
|
static rt_err_t _rt_raw_channel_recv_timeout(rt_channel_t ch, rt_channel_msg_t data, rt_int32_t time)
|
||||||
{
|
{
|
||||||
DEF_RETURN_CODE(rc);
|
LWP_DEF_RETURN_CODE(rc);
|
||||||
struct rt_thread *thread;
|
struct rt_thread *thread;
|
||||||
rt_ipc_msg_t msg_ret;
|
rt_ipc_msg_t msg_ret;
|
||||||
void (*old_timeout_func)(void *) = 0;
|
void (*old_timeout_func)(void *) = 0;
|
||||||
|
@ -839,10 +836,12 @@ static rt_err_t _rt_raw_channel_recv_timeout(rt_channel_t ch, rt_channel_msg_t d
|
||||||
rt_list_remove(ch->wait_msg.next); /* remove the message from the channel */
|
rt_list_remove(ch->wait_msg.next); /* remove the message from the channel */
|
||||||
if (msg_ret->need_reply)
|
if (msg_ret->need_reply)
|
||||||
{
|
{
|
||||||
|
rt_sched_lock_level_t slvl;
|
||||||
|
rt_sched_lock(&slvl);
|
||||||
RT_ASSERT(ch->wait_thread.next != &ch->wait_thread);
|
RT_ASSERT(ch->wait_thread.next != &ch->wait_thread);
|
||||||
|
thread = RT_THREAD_LIST_NODE_ENTRY(ch->wait_thread.next);
|
||||||
thread = rt_list_entry(ch->wait_thread.next, struct rt_thread, tlist);
|
|
||||||
rt_list_remove(ch->wait_thread.next);
|
rt_list_remove(ch->wait_thread.next);
|
||||||
|
rt_sched_unlock(slvl);
|
||||||
ch->reply = thread; /* record the waiting sender */
|
ch->reply = thread; /* record the waiting sender */
|
||||||
ch->stat = RT_IPC_STAT_ACTIVE; /* no valid suspened receivers */
|
ch->stat = RT_IPC_STAT_ACTIVE; /* no valid suspened receivers */
|
||||||
}
|
}
|
||||||
|
@ -912,7 +911,7 @@ static rt_err_t _rt_raw_channel_recv_timeout(rt_channel_t ch, rt_channel_msg_t d
|
||||||
|
|
||||||
rt_spin_unlock_irqrestore(&ch->slock, level);
|
rt_spin_unlock_irqrestore(&ch->slock, level);
|
||||||
|
|
||||||
RETURN(rc);
|
LWP_RETURN(rc);
|
||||||
}
|
}
|
||||||
|
|
||||||
rt_err_t rt_raw_channel_recv(rt_channel_t ch, rt_channel_msg_t data)
|
rt_err_t rt_raw_channel_recv(rt_channel_t ch, rt_channel_msg_t data)
|
||||||
|
|
|
@ -0,0 +1,71 @@
|
||||||
|
/*
|
||||||
|
* Copyright (c) 2006-2023, RT-Thread Development Team
|
||||||
|
*
|
||||||
|
* SPDX-License-Identifier: Apache-2.0
|
||||||
|
*
|
||||||
|
* Change Logs:
|
||||||
|
* Date Author Notes
|
||||||
|
* 2023-11-30 Shell Add itimer support
|
||||||
|
*/
|
||||||
|
|
||||||
|
#define _GNU_SOURCE
|
||||||
|
#include <sys/time.h>
|
||||||
|
#undef _GNU_SOURCE
|
||||||
|
|
||||||
|
#define DBG_TAG "lwp.signal"
|
||||||
|
#define DBG_LVL DBG_INFO
|
||||||
|
#include <rtdbg.h>
|
||||||
|
|
||||||
|
#include <rthw.h>
|
||||||
|
#include <rtthread.h>
|
||||||
|
#include <string.h>
|
||||||
|
|
||||||
|
#include "lwp_internal.h"
|
||||||
|
#include "sys/signal.h"
|
||||||
|
#include "syscall_generic.h"
|
||||||
|
|
||||||
|
rt_err_t lwp_signal_setitimer(rt_lwp_t lwp, int which, const struct itimerspec *restrict new, struct itimerspec *restrict old)
|
||||||
|
{
|
||||||
|
rt_err_t rc = RT_EOK;
|
||||||
|
timer_t timerid = 0;
|
||||||
|
int flags = 0;
|
||||||
|
|
||||||
|
if (lwp->signal.real_timer == LWP_SIG_INVALID_TIMER)
|
||||||
|
{
|
||||||
|
struct sigevent sevp = {
|
||||||
|
.sigev_signo = SIGALRM,
|
||||||
|
.sigev_notify = SIGEV_SIGNAL,
|
||||||
|
};
|
||||||
|
|
||||||
|
rc = timer_create(CLOCK_REALTIME_ALARM, &sevp, &timerid);
|
||||||
|
if (rc == RT_EOK)
|
||||||
|
{
|
||||||
|
RT_ASSERT(timerid != LWP_SIG_INVALID_TIMER);
|
||||||
|
lwp->signal.real_timer = timerid;
|
||||||
|
}
|
||||||
|
else
|
||||||
|
{
|
||||||
|
/* failed to create timer */
|
||||||
|
}
|
||||||
|
}
|
||||||
|
else
|
||||||
|
{
|
||||||
|
timerid = lwp->signal.real_timer;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (rc == RT_EOK)
|
||||||
|
{
|
||||||
|
switch (which)
|
||||||
|
{
|
||||||
|
case ITIMER_REAL:
|
||||||
|
rc = timer_settime(timerid, flags, new, old);
|
||||||
|
break;
|
||||||
|
default:
|
||||||
|
rc = -ENOSYS;
|
||||||
|
LOG_W("%s() unsupported timer", __func__);
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return rc;
|
||||||
|
}
|
|
@ -14,8 +14,12 @@
|
||||||
* error
|
* error
|
||||||
* 2023-10-27 shell Format codes of sys_exit(). Fix the data racing where lock is missed
|
* 2023-10-27 shell Format codes of sys_exit(). Fix the data racing where lock is missed
|
||||||
* Add reference on pid/tid, so the resource is not freed while using.
|
* Add reference on pid/tid, so the resource is not freed while using.
|
||||||
|
* 2024-01-25 shell porting to new sched API
|
||||||
*/
|
*/
|
||||||
|
|
||||||
|
/* includes scheduler related API */
|
||||||
|
#define __RT_IPC_SOURCE__
|
||||||
|
|
||||||
#include <rthw.h>
|
#include <rthw.h>
|
||||||
#include <rtthread.h>
|
#include <rtthread.h>
|
||||||
|
|
||||||
|
@ -59,7 +63,7 @@ int lwp_pid_init(void)
|
||||||
|
|
||||||
void lwp_pid_lock_take(void)
|
void lwp_pid_lock_take(void)
|
||||||
{
|
{
|
||||||
DEF_RETURN_CODE(rc);
|
LWP_DEF_RETURN_CODE(rc);
|
||||||
|
|
||||||
rc = lwp_mutex_take_safe(&pid_mtx, RT_WAITING_FOREVER, 0);
|
rc = lwp_mutex_take_safe(&pid_mtx, RT_WAITING_FOREVER, 0);
|
||||||
/* should never failed */
|
/* should never failed */
|
||||||
|
@ -382,7 +386,7 @@ rt_lwp_t lwp_create(rt_base_t flags)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
LOG_D("%s(pid=%d) => %p", __func__, new_lwp->pid, new_lwp);
|
LOG_D("%s(pid=%d) => %p", __func__, new_lwp ? new_lwp->pid : -1, new_lwp);
|
||||||
return new_lwp;
|
return new_lwp;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -699,6 +703,7 @@ pid_t lwp_name2pid(const char *name)
|
||||||
pid_t pid = 0;
|
pid_t pid = 0;
|
||||||
rt_thread_t main_thread;
|
rt_thread_t main_thread;
|
||||||
char* process_name = RT_NULL;
|
char* process_name = RT_NULL;
|
||||||
|
rt_sched_lock_level_t slvl;
|
||||||
|
|
||||||
lwp_pid_lock_take();
|
lwp_pid_lock_take();
|
||||||
for (idx = 0; idx < RT_LWP_MAX_NR; idx++)
|
for (idx = 0; idx < RT_LWP_MAX_NR; idx++)
|
||||||
|
@ -713,10 +718,12 @@ pid_t lwp_name2pid(const char *name)
|
||||||
if (!rt_strncmp(name, process_name, RT_NAME_MAX))
|
if (!rt_strncmp(name, process_name, RT_NAME_MAX))
|
||||||
{
|
{
|
||||||
main_thread = rt_list_entry(lwp->t_grp.prev, struct rt_thread, sibling);
|
main_thread = rt_list_entry(lwp->t_grp.prev, struct rt_thread, sibling);
|
||||||
if (!(main_thread->stat & RT_THREAD_CLOSE))
|
rt_sched_lock(&slvl);
|
||||||
|
if (!(rt_sched_thread_get_stat(main_thread) == RT_THREAD_CLOSE))
|
||||||
{
|
{
|
||||||
pid = lwp->pid;
|
pid = lwp->pid;
|
||||||
}
|
}
|
||||||
|
rt_sched_unlock(slvl);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -767,7 +774,7 @@ static sysret_t _lwp_wait_and_recycle(struct rt_lwp *child, rt_thread_t cur_thr,
|
||||||
error = rt_thread_suspend_with_flag(cur_thr, RT_INTERRUPTIBLE);
|
error = rt_thread_suspend_with_flag(cur_thr, RT_INTERRUPTIBLE);
|
||||||
if (error == 0)
|
if (error == 0)
|
||||||
{
|
{
|
||||||
rt_list_insert_before(&child->wait_list, &(cur_thr->tlist));
|
rt_list_insert_before(&child->wait_list, &RT_THREAD_LIST_NODE(cur_thr));
|
||||||
LWP_UNLOCK(child);
|
LWP_UNLOCK(child);
|
||||||
|
|
||||||
rt_set_errno(RT_EINTR);
|
rt_set_errno(RT_EINTR);
|
||||||
|
@ -898,15 +905,15 @@ static void print_thread_info(struct rt_thread* thread, int maxlen)
|
||||||
rt_uint8_t stat;
|
rt_uint8_t stat;
|
||||||
|
|
||||||
#ifdef RT_USING_SMP
|
#ifdef RT_USING_SMP
|
||||||
if (thread->oncpu != RT_CPU_DETACHED)
|
if (RT_SCHED_CTX(thread).oncpu != RT_CPU_DETACHED)
|
||||||
rt_kprintf("%-*.*s %3d %3d ", maxlen, RT_NAME_MAX, thread->parent.name, thread->oncpu, thread->current_priority);
|
rt_kprintf("%-*.*s %3d %3d ", maxlen, RT_NAME_MAX, thread->parent.name, RT_SCHED_CTX(thread).oncpu, RT_SCHED_PRIV(thread).current_priority);
|
||||||
else
|
else
|
||||||
rt_kprintf("%-*.*s N/A %3d ", maxlen, RT_NAME_MAX, thread->parent.name, thread->current_priority);
|
rt_kprintf("%-*.*s N/A %3d ", maxlen, RT_NAME_MAX, thread->parent.name, RT_SCHED_PRIV(thread).current_priority);
|
||||||
#else
|
#else
|
||||||
rt_kprintf("%-*.*s %3d ", maxlen, RT_NAME_MAX, thread->parent.name, thread->current_priority);
|
rt_kprintf("%-*.*s %3d ", maxlen, RT_NAME_MAX, thread->parent.name, thread->current_priority);
|
||||||
#endif /*RT_USING_SMP*/
|
#endif /*RT_USING_SMP*/
|
||||||
|
|
||||||
stat = (thread->stat & RT_THREAD_STAT_MASK);
|
stat = (RT_SCHED_CTX(thread).stat & RT_THREAD_STAT_MASK);
|
||||||
if (stat == RT_THREAD_READY) rt_kprintf(" ready ");
|
if (stat == RT_THREAD_READY) rt_kprintf(" ready ");
|
||||||
else if ((stat & RT_THREAD_SUSPEND_MASK) == RT_THREAD_SUSPEND_MASK) rt_kprintf(" suspend");
|
else if ((stat & RT_THREAD_SUSPEND_MASK) == RT_THREAD_SUSPEND_MASK) rt_kprintf(" suspend");
|
||||||
else if (stat == RT_THREAD_INIT) rt_kprintf(" init ");
|
else if (stat == RT_THREAD_INIT) rt_kprintf(" init ");
|
||||||
|
@ -932,7 +939,7 @@ static void print_thread_info(struct rt_thread* thread, int maxlen)
|
||||||
thread->stack_size,
|
thread->stack_size,
|
||||||
(thread->stack_size + (rt_uint32_t)(rt_size_t)thread->stack_addr - (rt_uint32_t)(rt_size_t)ptr) * 100
|
(thread->stack_size + (rt_uint32_t)(rt_size_t)thread->stack_addr - (rt_uint32_t)(rt_size_t)ptr) * 100
|
||||||
/ thread->stack_size,
|
/ thread->stack_size,
|
||||||
thread->remaining_tick,
|
RT_SCHED_PRIV(thread).remaining_tick,
|
||||||
thread->error);
|
thread->error);
|
||||||
#endif
|
#endif
|
||||||
}
|
}
|
||||||
|
@ -1066,99 +1073,15 @@ MSH_CMD_EXPORT_ALIAS(cmd_killall, killall, kill processes by name);
|
||||||
int lwp_check_exit_request(void)
|
int lwp_check_exit_request(void)
|
||||||
{
|
{
|
||||||
rt_thread_t thread = rt_thread_self();
|
rt_thread_t thread = rt_thread_self();
|
||||||
|
rt_base_t expected = LWP_EXIT_REQUEST_TRIGGERED;
|
||||||
|
|
||||||
if (!thread->lwp)
|
if (!thread->lwp)
|
||||||
{
|
{
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (thread->exit_request == LWP_EXIT_REQUEST_TRIGGERED)
|
return rt_atomic_compare_exchange_strong(&thread->exit_request, &expected,
|
||||||
{
|
LWP_EXIT_REQUEST_IN_PROCESS);
|
||||||
thread->exit_request = LWP_EXIT_REQUEST_IN_PROCESS;
|
|
||||||
return 1;
|
|
||||||
}
|
|
||||||
return 0;
|
|
||||||
}
|
|
||||||
|
|
||||||
static int found_thread(struct rt_lwp* lwp, rt_thread_t thread)
|
|
||||||
{
|
|
||||||
int found = 0;
|
|
||||||
rt_base_t level;
|
|
||||||
rt_list_t *list;
|
|
||||||
|
|
||||||
level = rt_spin_lock_irqsave(&thread->spinlock);
|
|
||||||
list = lwp->t_grp.next;
|
|
||||||
while (list != &lwp->t_grp)
|
|
||||||
{
|
|
||||||
rt_thread_t iter_thread;
|
|
||||||
|
|
||||||
iter_thread = rt_list_entry(list, struct rt_thread, sibling);
|
|
||||||
if (thread == iter_thread)
|
|
||||||
{
|
|
||||||
found = 1;
|
|
||||||
break;
|
|
||||||
}
|
|
||||||
list = list->next;
|
|
||||||
}
|
|
||||||
rt_spin_unlock_irqrestore(&thread->spinlock, level);
|
|
||||||
return found;
|
|
||||||
}
|
|
||||||
|
|
||||||
void lwp_request_thread_exit(rt_thread_t thread_to_exit)
|
|
||||||
{
|
|
||||||
rt_thread_t main_thread;
|
|
||||||
rt_base_t level;
|
|
||||||
rt_list_t *list;
|
|
||||||
struct rt_lwp *lwp;
|
|
||||||
|
|
||||||
lwp = lwp_self();
|
|
||||||
|
|
||||||
if ((!thread_to_exit) || (!lwp))
|
|
||||||
{
|
|
||||||
return;
|
|
||||||
}
|
|
||||||
|
|
||||||
level = rt_spin_lock_irqsave(&thread_to_exit->spinlock);
|
|
||||||
|
|
||||||
main_thread = rt_list_entry(lwp->t_grp.prev, struct rt_thread, sibling);
|
|
||||||
if (thread_to_exit == main_thread)
|
|
||||||
{
|
|
||||||
goto finish;
|
|
||||||
}
|
|
||||||
if ((struct rt_lwp *)thread_to_exit->lwp != lwp)
|
|
||||||
{
|
|
||||||
goto finish;
|
|
||||||
}
|
|
||||||
|
|
||||||
for (list = lwp->t_grp.next; list != &lwp->t_grp; list = list->next)
|
|
||||||
{
|
|
||||||
rt_thread_t thread;
|
|
||||||
|
|
||||||
thread = rt_list_entry(list, struct rt_thread, sibling);
|
|
||||||
if (thread != thread_to_exit)
|
|
||||||
{
|
|
||||||
continue;
|
|
||||||
}
|
|
||||||
if (thread->exit_request == LWP_EXIT_REQUEST_NONE)
|
|
||||||
{
|
|
||||||
thread->exit_request = LWP_EXIT_REQUEST_TRIGGERED;
|
|
||||||
}
|
|
||||||
if ((thread->stat & RT_THREAD_SUSPEND_MASK) == RT_THREAD_SUSPEND_MASK)
|
|
||||||
{
|
|
||||||
thread->error = -RT_EINTR;
|
|
||||||
rt_hw_dsb();
|
|
||||||
rt_thread_wakeup(thread);
|
|
||||||
}
|
|
||||||
break;
|
|
||||||
}
|
|
||||||
|
|
||||||
while (found_thread(lwp, thread_to_exit))
|
|
||||||
{
|
|
||||||
rt_thread_mdelay(10);
|
|
||||||
}
|
|
||||||
|
|
||||||
finish:
|
|
||||||
rt_spin_unlock_irqrestore(&thread_to_exit->spinlock, level);
|
|
||||||
return;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
static void _wait_sibling_exit(rt_lwp_t lwp, rt_thread_t curr_thread);
|
static void _wait_sibling_exit(rt_lwp_t lwp, rt_thread_t curr_thread);
|
||||||
|
@ -1193,34 +1116,32 @@ void lwp_terminate(struct rt_lwp *lwp)
|
||||||
|
|
||||||
static void _wait_sibling_exit(rt_lwp_t lwp, rt_thread_t curr_thread)
|
static void _wait_sibling_exit(rt_lwp_t lwp, rt_thread_t curr_thread)
|
||||||
{
|
{
|
||||||
rt_base_t level;
|
rt_sched_lock_level_t slvl;
|
||||||
rt_list_t *list;
|
rt_list_t *list;
|
||||||
rt_thread_t thread;
|
rt_thread_t thread;
|
||||||
|
rt_base_t expected = LWP_EXIT_REQUEST_NONE;
|
||||||
|
|
||||||
/* broadcast exit request for sibling threads */
|
/* broadcast exit request for sibling threads */
|
||||||
LWP_LOCK(lwp);
|
LWP_LOCK(lwp);
|
||||||
for (list = lwp->t_grp.next; list != &lwp->t_grp; list = list->next)
|
for (list = lwp->t_grp.next; list != &lwp->t_grp; list = list->next)
|
||||||
{
|
{
|
||||||
thread = rt_list_entry(list, struct rt_thread, sibling);
|
thread = rt_list_entry(list, struct rt_thread, sibling);
|
||||||
level = rt_spin_lock_irqsave(&thread->spinlock);
|
|
||||||
if (thread->exit_request == LWP_EXIT_REQUEST_NONE)
|
|
||||||
{
|
|
||||||
thread->exit_request = LWP_EXIT_REQUEST_TRIGGERED;
|
|
||||||
}
|
|
||||||
rt_spin_unlock_irqrestore(&thread->spinlock, level);
|
|
||||||
|
|
||||||
level = rt_spin_lock_irqsave(&thread->spinlock);
|
rt_atomic_compare_exchange_strong(&thread->exit_request, &expected,
|
||||||
if ((thread->stat & RT_THREAD_SUSPEND_MASK) == RT_THREAD_SUSPEND_MASK)
|
LWP_EXIT_REQUEST_TRIGGERED);
|
||||||
|
|
||||||
|
rt_sched_lock(&slvl);
|
||||||
|
/* dont release, otherwise thread may have been freed */
|
||||||
|
if (rt_sched_thread_is_suspended(thread))
|
||||||
{
|
{
|
||||||
thread->error = RT_EINTR;
|
thread->error = RT_EINTR;
|
||||||
rt_spin_unlock_irqrestore(&thread->spinlock, level);
|
rt_sched_unlock(slvl);
|
||||||
|
|
||||||
rt_hw_dsb();
|
|
||||||
rt_thread_wakeup(thread);
|
rt_thread_wakeup(thread);
|
||||||
}
|
}
|
||||||
else
|
else
|
||||||
{
|
{
|
||||||
rt_spin_unlock_irqrestore(&thread->spinlock, level);
|
rt_sched_unlock(slvl);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
LWP_UNLOCK(lwp);
|
LWP_UNLOCK(lwp);
|
||||||
|
@ -1240,6 +1161,7 @@ static void _wait_sibling_exit(rt_lwp_t lwp, rt_thread_t curr_thread)
|
||||||
subthread_is_terminated = (int)(curr_thread->sibling.prev == &lwp->t_grp);
|
subthread_is_terminated = (int)(curr_thread->sibling.prev == &lwp->t_grp);
|
||||||
if (!subthread_is_terminated)
|
if (!subthread_is_terminated)
|
||||||
{
|
{
|
||||||
|
rt_sched_lock_level_t slvl;
|
||||||
rt_thread_t sub_thread;
|
rt_thread_t sub_thread;
|
||||||
rt_list_t *list;
|
rt_list_t *list;
|
||||||
int all_subthread_in_init = 1;
|
int all_subthread_in_init = 1;
|
||||||
|
@ -1247,13 +1169,18 @@ static void _wait_sibling_exit(rt_lwp_t lwp, rt_thread_t curr_thread)
|
||||||
/* check all subthread is in init state */
|
/* check all subthread is in init state */
|
||||||
for (list = curr_thread->sibling.prev; list != &lwp->t_grp; list = list->prev)
|
for (list = curr_thread->sibling.prev; list != &lwp->t_grp; list = list->prev)
|
||||||
{
|
{
|
||||||
|
rt_sched_lock(&slvl);
|
||||||
sub_thread = rt_list_entry(list, struct rt_thread, sibling);
|
sub_thread = rt_list_entry(list, struct rt_thread, sibling);
|
||||||
if ((sub_thread->stat & RT_THREAD_STAT_MASK) != RT_THREAD_INIT)
|
if (rt_sched_thread_get_stat(sub_thread) != RT_THREAD_INIT)
|
||||||
{
|
{
|
||||||
|
rt_sched_unlock(slvl);
|
||||||
all_subthread_in_init = 0;
|
all_subthread_in_init = 0;
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
|
else
|
||||||
|
{
|
||||||
|
rt_sched_unlock(slvl);
|
||||||
|
}
|
||||||
}
|
}
|
||||||
if (all_subthread_in_init)
|
if (all_subthread_in_init)
|
||||||
{
|
{
|
||||||
|
@ -1344,7 +1271,7 @@ static void _resr_cleanup(struct rt_lwp *lwp)
|
||||||
LWP_UNLOCK(lwp);
|
LWP_UNLOCK(lwp);
|
||||||
if (!rt_list_isempty(&lwp->wait_list))
|
if (!rt_list_isempty(&lwp->wait_list))
|
||||||
{
|
{
|
||||||
thread = rt_list_entry(lwp->wait_list.next, struct rt_thread, tlist);
|
thread = RT_THREAD_LIST_NODE_ENTRY(lwp->wait_list.next);
|
||||||
thread->error = RT_EOK;
|
thread->error = RT_EOK;
|
||||||
thread->msg_ret = (void*)(rt_size_t)lwp->lwp_ret;
|
thread->msg_ret = (void*)(rt_size_t)lwp->lwp_ret;
|
||||||
rt_thread_resume(thread);
|
rt_thread_resume(thread);
|
||||||
|
|
|
@ -11,7 +11,7 @@
|
||||||
* remove lwp_signal_backup/restore() to reduce architecture codes
|
* remove lwp_signal_backup/restore() to reduce architecture codes
|
||||||
* update the generation, pending and delivery routines
|
* update the generation, pending and delivery routines
|
||||||
*/
|
*/
|
||||||
|
#define __RT_IPC_SOURCE__
|
||||||
#define DBG_TAG "lwp.signal"
|
#define DBG_TAG "lwp.signal"
|
||||||
#define DBG_LVL DBG_INFO
|
#define DBG_LVL DBG_INFO
|
||||||
#include <rtdbg.h>
|
#include <rtdbg.h>
|
||||||
|
@ -408,6 +408,8 @@ rt_err_t lwp_signal_init(struct lwp_signal *sig)
|
||||||
{
|
{
|
||||||
rt_err_t rc = RT_EOK;
|
rt_err_t rc = RT_EOK;
|
||||||
|
|
||||||
|
sig->real_timer = LWP_SIG_INVALID_TIMER;
|
||||||
|
|
||||||
memset(&sig->sig_dispatch_thr, 0, sizeof(sig->sig_dispatch_thr));
|
memset(&sig->sig_dispatch_thr, 0, sizeof(sig->sig_dispatch_thr));
|
||||||
|
|
||||||
memset(&sig->sig_action, 0, sizeof(sig->sig_action));
|
memset(&sig->sig_action, 0, sizeof(sig->sig_action));
|
||||||
|
@ -423,6 +425,7 @@ rt_err_t lwp_signal_detach(struct lwp_signal *signal)
|
||||||
{
|
{
|
||||||
rt_err_t ret = RT_EOK;
|
rt_err_t ret = RT_EOK;
|
||||||
|
|
||||||
|
timer_delete(signal->real_timer);
|
||||||
lwp_sigqueue_clear(&signal->sig_queue);
|
lwp_sigqueue_clear(&signal->sig_queue);
|
||||||
|
|
||||||
return ret;
|
return ret;
|
||||||
|
@ -561,28 +564,42 @@ void lwp_thread_signal_catch(void *exp_frame)
|
||||||
static int _do_signal_wakeup(rt_thread_t thread, int sig)
|
static int _do_signal_wakeup(rt_thread_t thread, int sig)
|
||||||
{
|
{
|
||||||
int need_schedule;
|
int need_schedule;
|
||||||
|
rt_sched_lock_level_t slvl;
|
||||||
if (!_sigismember(&thread->signal.sigset_mask, sig))
|
if (!_sigismember(&thread->signal.sigset_mask, sig))
|
||||||
{
|
{
|
||||||
if ((thread->stat & RT_THREAD_SUSPEND_MASK) == RT_THREAD_SUSPEND_MASK)
|
rt_sched_lock(&slvl);
|
||||||
|
int stat = rt_sched_thread_get_stat(thread);
|
||||||
|
if ((stat & RT_THREAD_SUSPEND_MASK) == RT_THREAD_SUSPEND_MASK)
|
||||||
{
|
{
|
||||||
if ((thread->stat & RT_SIGNAL_COMMON_WAKEUP_MASK) != RT_SIGNAL_COMMON_WAKEUP_MASK)
|
if ((stat & RT_SIGNAL_COMMON_WAKEUP_MASK) != RT_SIGNAL_COMMON_WAKEUP_MASK)
|
||||||
{
|
{
|
||||||
|
thread->error = RT_EINTR;
|
||||||
|
rt_sched_unlock(slvl);
|
||||||
|
|
||||||
rt_thread_wakeup(thread);
|
rt_thread_wakeup(thread);
|
||||||
need_schedule = 1;
|
need_schedule = 1;
|
||||||
}
|
}
|
||||||
else if ((sig == SIGKILL) && ((thread->stat & RT_SIGNAL_KILL_WAKEUP_MASK) != RT_SIGNAL_KILL_WAKEUP_MASK))
|
else if ((sig == SIGKILL || sig == SIGSTOP) &&
|
||||||
|
((stat & RT_SIGNAL_KILL_WAKEUP_MASK) != RT_SIGNAL_KILL_WAKEUP_MASK))
|
||||||
{
|
{
|
||||||
|
thread->error = RT_EINTR;
|
||||||
|
rt_sched_unlock(slvl);
|
||||||
|
|
||||||
rt_thread_wakeup(thread);
|
rt_thread_wakeup(thread);
|
||||||
need_schedule = 1;
|
need_schedule = 1;
|
||||||
}
|
}
|
||||||
else
|
else
|
||||||
{
|
{
|
||||||
|
rt_sched_unlock(slvl);
|
||||||
need_schedule = 0;
|
need_schedule = 0;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
else
|
else
|
||||||
|
{
|
||||||
|
rt_sched_unlock(slvl);
|
||||||
need_schedule = 0;
|
need_schedule = 0;
|
||||||
}
|
}
|
||||||
|
}
|
||||||
else
|
else
|
||||||
need_schedule = 0;
|
need_schedule = 0;
|
||||||
|
|
||||||
|
@ -838,7 +855,7 @@ rt_err_t lwp_thread_signal_kill(rt_thread_t thread, long signo, long code, long
|
||||||
|
|
||||||
LOG_D("%s(signo=%d)", __func__, signo);
|
LOG_D("%s(signo=%d)", __func__, signo);
|
||||||
|
|
||||||
if (!thread || signo < 0 || signo >= _LWP_NSIG)
|
if (!thread || signo <= 0 || signo >= _LWP_NSIG)
|
||||||
{
|
{
|
||||||
ret = -RT_EINVAL;
|
ret = -RT_EINVAL;
|
||||||
}
|
}
|
||||||
|
|
|
@ -17,6 +17,9 @@
|
||||||
#include <rtthread.h>
|
#include <rtthread.h>
|
||||||
#include <sys/signal.h>
|
#include <sys/signal.h>
|
||||||
|
|
||||||
|
struct timespec;
|
||||||
|
struct itimerspec;
|
||||||
|
|
||||||
#ifdef __cplusplus
|
#ifdef __cplusplus
|
||||||
extern "C" {
|
extern "C" {
|
||||||
#endif
|
#endif
|
||||||
|
@ -28,6 +31,7 @@ extern "C" {
|
||||||
#define LWP_SIG_USER_SA_FLAGS \
|
#define LWP_SIG_USER_SA_FLAGS \
|
||||||
(SA_NOCLDSTOP | SA_NOCLDWAIT | SA_SIGINFO | SA_ONSTACK | SA_RESTART | \
|
(SA_NOCLDSTOP | SA_NOCLDWAIT | SA_SIGINFO | SA_ONSTACK | SA_RESTART | \
|
||||||
SA_NODEFER | SA_RESETHAND | SA_EXPOSE_TAGBITS)
|
SA_NODEFER | SA_RESETHAND | SA_EXPOSE_TAGBITS)
|
||||||
|
#define LWP_SIG_INVALID_TIMER ((timer_t)-1)
|
||||||
|
|
||||||
typedef enum {
|
typedef enum {
|
||||||
LWP_SIG_MASK_CMD_BLOCK,
|
LWP_SIG_MASK_CMD_BLOCK,
|
||||||
|
@ -40,6 +44,7 @@ typedef enum {
|
||||||
* LwP implementation of POSIX signal
|
* LwP implementation of POSIX signal
|
||||||
*/
|
*/
|
||||||
struct lwp_signal {
|
struct lwp_signal {
|
||||||
|
timer_t real_timer;
|
||||||
struct lwp_sigqueue sig_queue;
|
struct lwp_sigqueue sig_queue;
|
||||||
rt_thread_t sig_dispatch_thr[_LWP_NSIG];
|
rt_thread_t sig_dispatch_thr[_LWP_NSIG];
|
||||||
|
|
||||||
|
@ -167,6 +172,10 @@ rt_err_t lwp_thread_signal_timedwait(rt_thread_t thread, lwp_sigset_t *sigset,
|
||||||
*/
|
*/
|
||||||
void lwp_thread_signal_pending(rt_thread_t thread, lwp_sigset_t *sigset);
|
void lwp_thread_signal_pending(rt_thread_t thread, lwp_sigset_t *sigset);
|
||||||
|
|
||||||
|
rt_err_t lwp_signal_setitimer(struct rt_lwp *lwp, int which,
|
||||||
|
const struct itimerspec *restrict new,
|
||||||
|
struct itimerspec *restrict old);
|
||||||
|
|
||||||
#ifdef __cplusplus
|
#ifdef __cplusplus
|
||||||
}
|
}
|
||||||
#endif
|
#endif
|
||||||
|
|
|
@ -14,7 +14,7 @@
|
||||||
* 2023-07-06 Shell adapt the signal API, and clone, fork to new implementation of lwp signal
|
* 2023-07-06 Shell adapt the signal API, and clone, fork to new implementation of lwp signal
|
||||||
* 2023-07-27 Shell Move tid_put() from lwp_free() to sys_exit()
|
* 2023-07-27 Shell Move tid_put() from lwp_free() to sys_exit()
|
||||||
*/
|
*/
|
||||||
|
#define __RT_IPC_SOURCE__
|
||||||
#define _GNU_SOURCE
|
#define _GNU_SOURCE
|
||||||
|
|
||||||
/* RT-Thread System call */
|
/* RT-Thread System call */
|
||||||
|
@ -1120,7 +1120,7 @@ sysret_t sys_getpriority(int which, id_t who)
|
||||||
if (lwp)
|
if (lwp)
|
||||||
{
|
{
|
||||||
rt_thread_t thread = rt_list_entry(lwp->t_grp.prev, struct rt_thread, sibling);
|
rt_thread_t thread = rt_list_entry(lwp->t_grp.prev, struct rt_thread, sibling);
|
||||||
prio = thread->current_priority;
|
prio = RT_SCHED_PRIV(thread).current_priority;
|
||||||
}
|
}
|
||||||
|
|
||||||
lwp_pid_lock_release();
|
lwp_pid_lock_release();
|
||||||
|
@ -1808,7 +1808,7 @@ rt_thread_t sys_thread_create(void *arg[])
|
||||||
}
|
}
|
||||||
|
|
||||||
#ifdef RT_USING_SMP
|
#ifdef RT_USING_SMP
|
||||||
thread->bind_cpu = lwp->bind_cpu;
|
RT_SCHED_CTX(thread).bind_cpu = lwp->bind_cpu;
|
||||||
#endif
|
#endif
|
||||||
thread->cleanup = lwp_cleanup;
|
thread->cleanup = lwp_cleanup;
|
||||||
thread->user_entry = (void (*)(void *))arg[1];
|
thread->user_entry = (void (*)(void *))arg[1];
|
||||||
|
@ -1935,15 +1935,15 @@ long _sys_clone(void *arg[])
|
||||||
RT_NULL,
|
RT_NULL,
|
||||||
RT_NULL,
|
RT_NULL,
|
||||||
self->stack_size,
|
self->stack_size,
|
||||||
self->init_priority,
|
RT_SCHED_PRIV(self).init_priority,
|
||||||
self->init_tick);
|
RT_SCHED_PRIV(self).init_tick);
|
||||||
if (!thread)
|
if (!thread)
|
||||||
{
|
{
|
||||||
goto fail;
|
goto fail;
|
||||||
}
|
}
|
||||||
|
|
||||||
#ifdef RT_USING_SMP
|
#ifdef RT_USING_SMP
|
||||||
thread->bind_cpu = lwp->bind_cpu;
|
RT_SCHED_CTX(self).bind_cpu = lwp->bind_cpu;
|
||||||
#endif
|
#endif
|
||||||
thread->cleanup = lwp_cleanup;
|
thread->cleanup = lwp_cleanup;
|
||||||
thread->user_entry = RT_NULL;
|
thread->user_entry = RT_NULL;
|
||||||
|
@ -2120,8 +2120,8 @@ sysret_t _sys_fork(void)
|
||||||
RT_NULL,
|
RT_NULL,
|
||||||
RT_NULL,
|
RT_NULL,
|
||||||
self_thread->stack_size,
|
self_thread->stack_size,
|
||||||
self_thread->init_priority,
|
RT_SCHED_PRIV(self_thread).init_priority,
|
||||||
self_thread->init_tick);
|
RT_SCHED_PRIV(self_thread).init_tick);
|
||||||
if (!thread)
|
if (!thread)
|
||||||
{
|
{
|
||||||
SET_ERRNO(ENOMEM);
|
SET_ERRNO(ENOMEM);
|
||||||
|
@ -4231,6 +4231,9 @@ sysret_t sys_sigtimedwait(const sigset_t *sigset, siginfo_t *info, const struct
|
||||||
struct timespec ktimeout;
|
struct timespec ktimeout;
|
||||||
struct timespec *ptimeout;
|
struct timespec *ptimeout;
|
||||||
|
|
||||||
|
/* for RT_ASSERT */
|
||||||
|
RT_UNUSED(ret);
|
||||||
|
|
||||||
/* Fit sigset size to lwp set */
|
/* Fit sigset size to lwp set */
|
||||||
if (sizeof(lwpset) < sigsize)
|
if (sizeof(lwpset) < sigsize)
|
||||||
{
|
{
|
||||||
|
@ -5505,7 +5508,7 @@ sysret_t sys_sched_setaffinity(pid_t pid, size_t size, void *set)
|
||||||
sysret_t sys_sched_getaffinity(const pid_t pid, size_t size, void *set)
|
sysret_t sys_sched_getaffinity(const pid_t pid, size_t size, void *set)
|
||||||
{
|
{
|
||||||
#ifdef ARCH_MM_MMU
|
#ifdef ARCH_MM_MMU
|
||||||
DEF_RETURN_CODE(rc);
|
LWP_DEF_RETURN_CODE(rc);
|
||||||
void *mask;
|
void *mask;
|
||||||
struct rt_lwp *lwp;
|
struct rt_lwp *lwp;
|
||||||
rt_bool_t need_release = RT_FALSE;
|
rt_bool_t need_release = RT_FALSE;
|
||||||
|
@ -5571,7 +5574,7 @@ sysret_t sys_sched_getaffinity(const pid_t pid, size_t size, void *set)
|
||||||
|
|
||||||
kmem_put(mask);
|
kmem_put(mask);
|
||||||
|
|
||||||
RETURN(rc);
|
LWP_RETURN(rc);
|
||||||
#else
|
#else
|
||||||
return -1;
|
return -1;
|
||||||
#endif
|
#endif
|
||||||
|
@ -5679,13 +5682,11 @@ sysret_t sys_sched_yield(void)
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
sysret_t sys_sched_getparam(const pid_t pid, void *param)
|
sysret_t sys_sched_getparam(const pid_t tid, void *param)
|
||||||
{
|
{
|
||||||
struct sched_param *sched_param = RT_NULL;
|
struct sched_param *sched_param = RT_NULL;
|
||||||
struct rt_lwp *lwp = NULL;
|
rt_thread_t thread;
|
||||||
rt_thread_t main_thread;
|
|
||||||
int ret = -1;
|
int ret = -1;
|
||||||
rt_bool_t need_release = RT_FALSE;
|
|
||||||
|
|
||||||
if (!lwp_user_accessable(param, sizeof(struct sched_param)))
|
if (!lwp_user_accessable(param, sizeof(struct sched_param)))
|
||||||
{
|
{
|
||||||
|
@ -5698,27 +5699,16 @@ sysret_t sys_sched_getparam(const pid_t pid, void *param)
|
||||||
return -ENOMEM;
|
return -ENOMEM;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (pid > 0)
|
thread = lwp_tid_get_thread_and_inc_ref(tid);
|
||||||
{
|
|
||||||
need_release = RT_TRUE;
|
|
||||||
lwp_pid_lock_take();
|
|
||||||
lwp = lwp_from_pid_locked(pid);
|
|
||||||
}
|
|
||||||
else if (pid == 0)
|
|
||||||
{
|
|
||||||
lwp = lwp_self();
|
|
||||||
}
|
|
||||||
|
|
||||||
if (lwp)
|
if (thread)
|
||||||
{
|
{
|
||||||
main_thread = rt_list_entry(lwp->t_grp.prev, struct rt_thread, sibling);
|
sched_param->sched_priority = RT_SCHED_PRIV(thread).current_priority;
|
||||||
if (need_release)
|
|
||||||
lwp_pid_lock_release();
|
|
||||||
|
|
||||||
sched_param->sched_priority = main_thread->current_priority;
|
|
||||||
ret = 0;
|
ret = 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
lwp_tid_dec_ref(thread);
|
||||||
|
|
||||||
lwp_put_to_user((void *)param, sched_param, sizeof(struct sched_param));
|
lwp_put_to_user((void *)param, sched_param, sizeof(struct sched_param));
|
||||||
kmem_put(sched_param);
|
kmem_put(sched_param);
|
||||||
|
|
||||||
|
@ -5800,7 +5790,7 @@ sysret_t sys_sched_getscheduler(int tid, int *policy, void *param)
|
||||||
}
|
}
|
||||||
|
|
||||||
thread = lwp_tid_get_thread_and_inc_ref(tid);
|
thread = lwp_tid_get_thread_and_inc_ref(tid);
|
||||||
sched_param->sched_priority = thread->current_priority;
|
sched_param->sched_priority = RT_SCHED_PRIV(thread).current_priority;
|
||||||
lwp_tid_dec_ref(thread);
|
lwp_tid_dec_ref(thread);
|
||||||
|
|
||||||
lwp_put_to_user((void *)param, sched_param, sizeof(struct sched_param));
|
lwp_put_to_user((void *)param, sched_param, sizeof(struct sched_param));
|
||||||
|
@ -6814,20 +6804,24 @@ sysret_t sys_memfd_create()
|
||||||
{
|
{
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
sysret_t sys_setitimer(int which, const struct itimerspec *restrict new, struct itimerspec *restrict old)
|
sysret_t sys_setitimer(int which, const struct itimerspec *restrict new, struct itimerspec *restrict old)
|
||||||
{
|
{
|
||||||
int ret = 0;
|
sysret_t rc = 0;
|
||||||
timer_t timerid = 0;
|
rt_lwp_t lwp = lwp_self();
|
||||||
struct sigevent sevp_k = {0};
|
struct itimerspec new_value_k;
|
||||||
|
struct itimerspec old_value_k;
|
||||||
|
|
||||||
sevp_k.sigev_notify = SIGEV_SIGNAL;
|
if (lwp_get_from_user(&new_value_k, (void *)new, sizeof(*new)) != sizeof(*new))
|
||||||
sevp_k.sigev_signo = SIGALRM;
|
|
||||||
ret = timer_create(CLOCK_REALTIME_ALARM, &sevp_k, &timerid);
|
|
||||||
if (ret != 0)
|
|
||||||
{
|
{
|
||||||
return GET_ERRNO();
|
return -EFAULT;
|
||||||
}
|
}
|
||||||
return sys_timer_settime(timerid,0,new,old);
|
|
||||||
|
rc = lwp_signal_setitimer(lwp, which, &new_value_k, &old_value_k);
|
||||||
|
if (old && lwp_put_to_user(old, (void *)&old_value_k, sizeof old_value_k) != sizeof old_value_k)
|
||||||
|
return -EFAULT;
|
||||||
|
|
||||||
|
return rc;
|
||||||
}
|
}
|
||||||
|
|
||||||
const static struct rt_syscall_def func_table[] =
|
const static struct rt_syscall_def func_table[] =
|
||||||
|
|
|
@ -25,22 +25,12 @@
|
||||||
#define MM_PA_TO_OFF(pa) ((uintptr_t)(pa) >> MM_PAGE_SHIFT)
|
#define MM_PA_TO_OFF(pa) ((uintptr_t)(pa) >> MM_PAGE_SHIFT)
|
||||||
#define PV_OFFSET (rt_kmem_pvoff())
|
#define PV_OFFSET (rt_kmem_pvoff())
|
||||||
|
|
||||||
#ifndef RT_USING_SMP
|
typedef struct rt_spinlock mm_spinlock_t;
|
||||||
typedef rt_spinlock_t mm_spinlock;
|
|
||||||
|
|
||||||
#define MM_PGTBL_LOCK_INIT(aspace)
|
|
||||||
#define MM_PGTBL_LOCK(aspace) (rt_hw_spin_lock(&((aspace)->pgtbl_lock)))
|
|
||||||
#define MM_PGTBL_UNLOCK(aspace) (rt_hw_spin_unlock(&((aspace)->pgtbl_lock)))
|
|
||||||
|
|
||||||
#else
|
|
||||||
typedef struct rt_spinlock mm_spinlock;
|
|
||||||
|
|
||||||
#define MM_PGTBL_LOCK_INIT(aspace) (rt_spin_lock_init(&((aspace)->pgtbl_lock)))
|
#define MM_PGTBL_LOCK_INIT(aspace) (rt_spin_lock_init(&((aspace)->pgtbl_lock)))
|
||||||
#define MM_PGTBL_LOCK(aspace) (rt_spin_lock(&((aspace)->pgtbl_lock)))
|
#define MM_PGTBL_LOCK(aspace) (rt_spin_lock(&((aspace)->pgtbl_lock)))
|
||||||
#define MM_PGTBL_UNLOCK(aspace) (rt_spin_unlock(&((aspace)->pgtbl_lock)))
|
#define MM_PGTBL_UNLOCK(aspace) (rt_spin_unlock(&((aspace)->pgtbl_lock)))
|
||||||
|
|
||||||
#endif /* RT_USING_SMP */
|
|
||||||
|
|
||||||
struct rt_aspace;
|
struct rt_aspace;
|
||||||
struct rt_varea;
|
struct rt_varea;
|
||||||
struct rt_mem_obj;
|
struct rt_mem_obj;
|
||||||
|
@ -53,7 +43,7 @@ typedef struct rt_aspace
|
||||||
rt_size_t size;
|
rt_size_t size;
|
||||||
|
|
||||||
void *page_table;
|
void *page_table;
|
||||||
mm_spinlock pgtbl_lock;
|
mm_spinlock_t pgtbl_lock;
|
||||||
|
|
||||||
struct _aspace_tree tree;
|
struct _aspace_tree tree;
|
||||||
struct rt_mutex bst_lock;
|
struct rt_mutex bst_lock;
|
||||||
|
|
|
@ -0,0 +1,9 @@
|
||||||
|
from building import *
|
||||||
|
|
||||||
|
cwd = GetCurrentDir()
|
||||||
|
src = Glob('*.c')
|
||||||
|
CPPPATH = [cwd]
|
||||||
|
group = []
|
||||||
|
|
||||||
|
group = DefineGroup('LIBADT', src, depend = [], CPPPATH = CPPPATH)
|
||||||
|
Return('group')
|
|
@ -0,0 +1,16 @@
|
||||||
|
/*
|
||||||
|
* Copyright (c) 2006-2023, RT-Thread Development Team
|
||||||
|
*
|
||||||
|
* SPDX-License-Identifier: Apache-2.0
|
||||||
|
*
|
||||||
|
* Change Logs:
|
||||||
|
* Date Author Notes
|
||||||
|
* 2023-11-01 Shell Init ver.
|
||||||
|
*/
|
||||||
|
|
||||||
|
#ifndef __LIBADT_DICT_H__
|
||||||
|
#define __LIBADT_DICT_H__
|
||||||
|
|
||||||
|
#include "rt_uthash.h"
|
||||||
|
|
||||||
|
#endif
|
|
@ -0,0 +1,57 @@
|
||||||
|
/*
|
||||||
|
* Copyright (c) 2006-2023, RT-Thread Development Team
|
||||||
|
*
|
||||||
|
* SPDX-License-Identifier: Apache-2.0
|
||||||
|
*
|
||||||
|
* Change Logs:
|
||||||
|
* Date Author Notes
|
||||||
|
* 2023-11-01 Shell Porting to RTT API
|
||||||
|
*/
|
||||||
|
#ifndef __LIBADT_RT_UTHASH_H__
|
||||||
|
#define __LIBADT_RT_UTHASH_H__
|
||||||
|
|
||||||
|
#include <rtthread.h>
|
||||||
|
|
||||||
|
#define uthash_malloc(sz) rt_malloc(sz)
|
||||||
|
#define uthash_free(ptr, sz) rt_free(ptr)
|
||||||
|
|
||||||
|
/**
|
||||||
|
* for performance consideration, using libc implementations
|
||||||
|
* as the default case. If you care about the compatibility
|
||||||
|
* problem, define the RT_UTHASH_CONFIG_COMPATIBILITY_FIRST
|
||||||
|
* before including the rt_uthash.h.
|
||||||
|
*/
|
||||||
|
#ifndef RT_UTHASH_CONFIG_COMPATIBILITY_FIRST
|
||||||
|
#define uthash_bzero(a, n) memset(a, '\0', n)
|
||||||
|
#define uthash_strlen(s) strlen(s)
|
||||||
|
|
||||||
|
#else
|
||||||
|
#define uthash_bzero(a, n) rt_memset(a, '\0', n)
|
||||||
|
#define uthash_strlen(s) rt_strlen(s)
|
||||||
|
|
||||||
|
#endif /* RT_UTHASH_CONFIG_COMPATIBILITY_FIRST */
|
||||||
|
|
||||||
|
/* if any fatal happen, throw an exception and return a failure */
|
||||||
|
#define uthash_fatal(msg) \
|
||||||
|
do \
|
||||||
|
{ \
|
||||||
|
LOG_E(msg); \
|
||||||
|
return -RT_ENOMEM; \
|
||||||
|
} while (0)
|
||||||
|
|
||||||
|
#include "uthash.h"
|
||||||
|
|
||||||
|
#define DEFINE_RT_UTHASH_TYPE(entry_name, key_type, key_name) \
|
||||||
|
typedef struct entry_name \
|
||||||
|
{ \
|
||||||
|
key_type key_name; \
|
||||||
|
UT_hash_handle hh; \
|
||||||
|
} *entry_name##_t;
|
||||||
|
|
||||||
|
#define RT_UTHASH_ADD(head, key_member, keylen_in, value) \
|
||||||
|
HASH_ADD(hh, head, key_member, keylen_in, value)
|
||||||
|
#define RT_UTHASH_FIND(head, key_ptr, keylen_in, pval) \
|
||||||
|
HASH_FIND(hh, head, key_ptr, keylen_in, pval)
|
||||||
|
#define RT_UTHASH_DELETE(head, pobj) HASH_DELETE(hh, head, pobj)
|
||||||
|
|
||||||
|
#endif /* __LIBADT_RT_UTHASH_H__ */
|
File diff suppressed because it is too large
Load Diff
|
@ -89,7 +89,7 @@ void rt_prio_queue_detach(struct rt_prio_queue *que)
|
||||||
rt_base_t level = rt_hw_interrupt_disable();
|
rt_base_t level = rt_hw_interrupt_disable();
|
||||||
|
|
||||||
/* get next suspend thread */
|
/* get next suspend thread */
|
||||||
thread = rt_list_entry(que->suspended_pop_list.next, struct rt_thread, tlist);
|
thread = RT_THREAD_LIST_NODE_ENTRY(que->suspended_pop_list.next);
|
||||||
/* set error code to -RT_ERROR */
|
/* set error code to -RT_ERROR */
|
||||||
thread->error = -RT_ERROR;
|
thread->error = -RT_ERROR;
|
||||||
|
|
||||||
|
@ -160,9 +160,7 @@ rt_err_t rt_prio_queue_push(struct rt_prio_queue *que,
|
||||||
rt_thread_t thread;
|
rt_thread_t thread;
|
||||||
|
|
||||||
/* get thread entry */
|
/* get thread entry */
|
||||||
thread = rt_list_entry(que->suspended_pop_list.next,
|
thread = RT_THREAD_LIST_NODE_ENTRY(que->suspended_pop_list.next);
|
||||||
struct rt_thread,
|
|
||||||
tlist);
|
|
||||||
/* resume it */
|
/* resume it */
|
||||||
rt_thread_resume(thread);
|
rt_thread_resume(thread);
|
||||||
rt_hw_interrupt_enable(level);
|
rt_hw_interrupt_enable(level);
|
||||||
|
@ -207,7 +205,7 @@ rt_err_t rt_prio_queue_pop(struct rt_prio_queue *que,
|
||||||
thread->error = RT_EOK;
|
thread->error = RT_EOK;
|
||||||
rt_thread_suspend(thread);
|
rt_thread_suspend(thread);
|
||||||
|
|
||||||
rt_list_insert_before(&(que->suspended_pop_list), &(thread->tlist));
|
rt_list_insert_before(&(que->suspended_pop_list), &RT_THREAD_LIST_NODE(thread));
|
||||||
|
|
||||||
if (timeout > 0)
|
if (timeout > 0)
|
||||||
{
|
{
|
||||||
|
|
|
@ -336,7 +336,7 @@ rt_err_t rt_vbus_post(rt_uint8_t id,
|
||||||
rt_enter_critical();
|
rt_enter_critical();
|
||||||
rt_thread_suspend(thread);
|
rt_thread_suspend(thread);
|
||||||
|
|
||||||
rt_list_insert_after(&_chn_suspended_threads[id], &thread->tlist);
|
rt_list_insert_after(&_chn_suspended_threads[id], &RT_THREAD_LIST_NODE(thread));
|
||||||
if (timeout > 0)
|
if (timeout > 0)
|
||||||
{
|
{
|
||||||
rt_timer_control(&(thread->thread_timer),
|
rt_timer_control(&(thread->thread_timer),
|
||||||
|
@ -443,9 +443,7 @@ static void rt_vbus_notify_chn(unsigned char chnr, rt_err_t err)
|
||||||
{
|
{
|
||||||
rt_thread_t thread;
|
rt_thread_t thread;
|
||||||
|
|
||||||
thread = rt_list_entry(_chn_suspended_threads[chnr].next,
|
thread = RT_THREAD_LIST_NODE_ENTRY(_chn_suspended_threads[chnr].next);
|
||||||
struct rt_thread,
|
|
||||||
tlist);
|
|
||||||
thread->error = err;
|
thread->error = err;
|
||||||
rt_thread_resume(thread);
|
rt_thread_resume(thread);
|
||||||
}
|
}
|
||||||
|
@ -855,9 +853,7 @@ static int _chn0_actor(unsigned char *dp, size_t dsize)
|
||||||
{
|
{
|
||||||
rt_thread_t thread;
|
rt_thread_t thread;
|
||||||
|
|
||||||
thread = rt_list_entry(_chn_suspended_threads[chnr].next,
|
thread = RT_THREAD_LIST_NODE_ENTRY(_chn_suspended_threads[chnr].next);
|
||||||
struct rt_thread,
|
|
||||||
tlist);
|
|
||||||
rt_thread_resume(thread);
|
rt_thread_resume(thread);
|
||||||
}
|
}
|
||||||
rt_exit_critical();
|
rt_exit_critical();
|
||||||
|
|
|
@ -43,9 +43,7 @@ void rt_wm_que_dump(struct rt_watermark_queue *wg)
|
||||||
{
|
{
|
||||||
rt_thread_t thread;
|
rt_thread_t thread;
|
||||||
|
|
||||||
thread = rt_list_entry(wg->suspended_threads.next,
|
thread = RT_THREAD_LIST_NODE_ENTRY(wg->suspended_threads.next);
|
||||||
struct rt_thread,
|
|
||||||
tlist);
|
|
||||||
rt_kprintf(" %.*s", RT_NAME_MAX, thread->parent.name);
|
rt_kprintf(" %.*s", RT_NAME_MAX, thread->parent.name);
|
||||||
}
|
}
|
||||||
rt_kprintf("\n");
|
rt_kprintf("\n");
|
||||||
|
|
|
@ -64,7 +64,7 @@ rt_inline rt_err_t rt_wm_que_inc(struct rt_watermark_queue *wg,
|
||||||
thread = rt_thread_self();
|
thread = rt_thread_self();
|
||||||
thread->error = RT_EOK;
|
thread->error = RT_EOK;
|
||||||
rt_thread_suspend(thread);
|
rt_thread_suspend(thread);
|
||||||
rt_list_insert_after(&wg->suspended_threads, &thread->tlist);
|
rt_list_insert_after(&wg->suspended_threads, &RT_THREAD_LIST_NODE(thread));
|
||||||
if (timeout > 0)
|
if (timeout > 0)
|
||||||
{
|
{
|
||||||
rt_timer_control(&(thread->thread_timer),
|
rt_timer_control(&(thread->thread_timer),
|
||||||
|
@ -116,9 +116,7 @@ rt_inline void rt_wm_que_dec(struct rt_watermark_queue *wg)
|
||||||
{
|
{
|
||||||
rt_thread_t thread;
|
rt_thread_t thread;
|
||||||
|
|
||||||
thread = rt_list_entry(wg->suspended_threads.next,
|
thread = RT_THREAD_LIST_NODE_ENTRY(wg->suspended_threads.next);
|
||||||
struct rt_thread,
|
|
||||||
tlist);
|
|
||||||
rt_thread_resume(thread);
|
rt_thread_resume(thread);
|
||||||
need_sched = 1;
|
need_sched = 1;
|
||||||
}
|
}
|
||||||
|
|
|
@ -69,4 +69,8 @@ config UTEST_MTSAFE_KPRINT_TC
|
||||||
bool "mtsafe kprint test"
|
bool "mtsafe kprint test"
|
||||||
default n
|
default n
|
||||||
|
|
||||||
|
config UTEST_SCHEDULER_TC
|
||||||
|
bool "scheduler test"
|
||||||
|
default n
|
||||||
|
|
||||||
endmenu
|
endmenu
|
||||||
|
|
|
@ -50,6 +50,13 @@ if GetDepend(['UTEST_HOOKLIST_TC']):
|
||||||
if GetDepend(['UTEST_MTSAFE_KPRINT_TC']):
|
if GetDepend(['UTEST_MTSAFE_KPRINT_TC']):
|
||||||
src += ['mtsafe_kprint_tc.c']
|
src += ['mtsafe_kprint_tc.c']
|
||||||
|
|
||||||
|
# Stressful testcase for scheduler (MP/UP)
|
||||||
|
if GetDepend(['UTEST_SCHEDULER_TC']):
|
||||||
|
src += ['sched_timed_sem_tc.c']
|
||||||
|
src += ['sched_timed_mtx_tc.c']
|
||||||
|
src += ['sched_mtx_tc.c']
|
||||||
|
src += ['sched_sem_tc.c', 'sched_thread_tc.c']
|
||||||
|
|
||||||
group = DefineGroup('utestcases', src, depend = ['RT_USING_UTESTCASES'], CPPPATH = CPPPATH)
|
group = DefineGroup('utestcases', src, depend = ['RT_USING_UTESTCASES'], CPPPATH = CPPPATH)
|
||||||
|
|
||||||
Return('group')
|
Return('group')
|
||||||
|
|
|
@ -8,15 +8,16 @@
|
||||||
* 2021-09.01 luckyzjq the first version
|
* 2021-09.01 luckyzjq the first version
|
||||||
* 2023-09-15 xqyjlj change stack size in cpu64
|
* 2023-09-15 xqyjlj change stack size in cpu64
|
||||||
*/
|
*/
|
||||||
|
#define __RT_IPC_SOURCE__
|
||||||
|
|
||||||
#include <rtthread.h>
|
#include <rtthread.h>
|
||||||
#include <stdlib.h>
|
#include <stdlib.h>
|
||||||
#include "utest.h"
|
#include "utest.h"
|
||||||
|
|
||||||
#ifdef ARCH_CPU_64BIT
|
#ifdef ARCH_CPU_64BIT
|
||||||
#define THREAD_STACKSIZE 4096
|
#define THREAD_STACKSIZE 8192
|
||||||
#else
|
#else
|
||||||
#define THREAD_STACKSIZE 1024
|
#define THREAD_STACKSIZE 4096
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
static struct rt_mutex static_mutex;
|
static struct rt_mutex static_mutex;
|
||||||
|
@ -241,7 +242,7 @@ static void static_thread1_entry(void *param)
|
||||||
|
|
||||||
/* thread3 hode mutex thread2 take mutex */
|
/* thread3 hode mutex thread2 take mutex */
|
||||||
/* check thread2 and thread3 priority */
|
/* check thread2 and thread3 priority */
|
||||||
if (tid2->current_priority != tid3->current_priority)
|
if (RT_SCHED_PRIV(tid2).current_priority != RT_SCHED_PRIV(tid3).current_priority)
|
||||||
{
|
{
|
||||||
uassert_true(RT_FALSE);
|
uassert_true(RT_FALSE);
|
||||||
}
|
}
|
||||||
|
@ -550,7 +551,7 @@ static void dynamic_thread1_entry(void *param)
|
||||||
|
|
||||||
/* thread3 hode mutex thread2 take mutex */
|
/* thread3 hode mutex thread2 take mutex */
|
||||||
/* check thread2 and thread3 priority */
|
/* check thread2 and thread3 priority */
|
||||||
if (tid2->current_priority != tid3->current_priority)
|
if (RT_SCHED_PRIV(tid2).current_priority != RT_SCHED_PRIV(tid3).current_priority)
|
||||||
{
|
{
|
||||||
uassert_true(RT_FALSE);
|
uassert_true(RT_FALSE);
|
||||||
}
|
}
|
||||||
|
|
|
@ -0,0 +1,107 @@
|
||||||
|
/*
|
||||||
|
* Copyright (c) 2006-2024, RT-Thread Development Team
|
||||||
|
*
|
||||||
|
* SPDX-License-Identifier: Apache-2.0
|
||||||
|
*
|
||||||
|
* Change Logs:
|
||||||
|
* Date Author Notes
|
||||||
|
* 2024-01-17 Shell the first version
|
||||||
|
*/
|
||||||
|
#include <rtthread.h>
|
||||||
|
#include <stdlib.h>
|
||||||
|
#include "utest.h"
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Stressful Test for Mutex
|
||||||
|
*/
|
||||||
|
|
||||||
|
#define TEST_SECONDS 30
|
||||||
|
#define TEST_LOOP_TICKS (TEST_SECONDS * RT_TICK_PER_SECOND)
|
||||||
|
#define TEST_THREAD_COUNTS (RT_CPUS_NR)
|
||||||
|
#define TEST_PROGRESS_COUNTS (36)
|
||||||
|
#define TEST_PROGRESS_ON (TEST_LOOP_TICKS/TEST_PROGRESS_COUNTS)
|
||||||
|
#define TEST_PRIORITY_HIGHEST (UTEST_THR_PRIORITY+1)
|
||||||
|
#define TEST_RANDOM_LATENCY_MAX (1000 * 1000)
|
||||||
|
|
||||||
|
static struct rt_semaphore _thr_exit_sem;
|
||||||
|
static rt_atomic_t _progress_counter;
|
||||||
|
static rt_atomic_t _exit_flag;
|
||||||
|
static struct rt_mutex _racing_lock;
|
||||||
|
|
||||||
|
static void test_thread_entry(void *param)
|
||||||
|
{
|
||||||
|
while (1)
|
||||||
|
{
|
||||||
|
rt_mutex_take(&_racing_lock, RT_WAITING_FOREVER);
|
||||||
|
rt_mutex_release(&_racing_lock);
|
||||||
|
|
||||||
|
if (rt_atomic_load(&_exit_flag))
|
||||||
|
{
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
rt_sem_release(&_thr_exit_sem);
|
||||||
|
}
|
||||||
|
|
||||||
|
static void mutex_stress_tc(void)
|
||||||
|
{
|
||||||
|
rt_err_t error;
|
||||||
|
rt_thread_t tester;
|
||||||
|
const rt_base_t priority_base = TEST_PRIORITY_HIGHEST;
|
||||||
|
|
||||||
|
for (size_t i = 0; i < TEST_THREAD_COUNTS; i++)
|
||||||
|
{
|
||||||
|
tester = rt_thread_create(
|
||||||
|
"tester",
|
||||||
|
test_thread_entry,
|
||||||
|
(void *)0,
|
||||||
|
UTEST_THR_STACK_SIZE,
|
||||||
|
priority_base + (i % (RT_THREAD_PRIORITY_MAX - TEST_PRIORITY_HIGHEST)),
|
||||||
|
1);
|
||||||
|
|
||||||
|
rt_thread_startup(tester);
|
||||||
|
}
|
||||||
|
|
||||||
|
for (size_t i = 0; i < TEST_LOOP_TICKS; i++)
|
||||||
|
{
|
||||||
|
rt_thread_delay(1);
|
||||||
|
|
||||||
|
if (rt_atomic_add(&_progress_counter, 1) % TEST_PROGRESS_ON == 0)
|
||||||
|
uassert_true(1);
|
||||||
|
}
|
||||||
|
|
||||||
|
/* trigger exit request for all sub-threads */
|
||||||
|
rt_atomic_store(&_exit_flag, 1);
|
||||||
|
|
||||||
|
/* waiting for sub-threads to exit */
|
||||||
|
for (size_t i = 0; i < TEST_THREAD_COUNTS; i++)
|
||||||
|
{
|
||||||
|
error = rt_sem_take(&_thr_exit_sem, RT_WAITING_FOREVER);
|
||||||
|
uassert_int_equal(error, RT_EOK);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
static rt_err_t utest_tc_init(void)
|
||||||
|
{
|
||||||
|
int *pseed = rt_malloc(sizeof(int));
|
||||||
|
srand(*(int *)pseed);
|
||||||
|
rt_free(pseed);
|
||||||
|
|
||||||
|
rt_sem_init(&_thr_exit_sem, "test", 0, RT_IPC_FLAG_PRIO);
|
||||||
|
rt_mutex_init(&_racing_lock, "ipc", RT_IPC_FLAG_PRIO);
|
||||||
|
return RT_EOK;
|
||||||
|
}
|
||||||
|
|
||||||
|
static rt_err_t utest_tc_cleanup(void)
|
||||||
|
{
|
||||||
|
rt_sem_detach(&_thr_exit_sem);
|
||||||
|
rt_mutex_detach(&_racing_lock);
|
||||||
|
return RT_EOK;
|
||||||
|
}
|
||||||
|
|
||||||
|
static void testcase(void)
|
||||||
|
{
|
||||||
|
UTEST_UNIT_RUN(mutex_stress_tc);
|
||||||
|
}
|
||||||
|
UTEST_TC_EXPORT(testcase, "testcases.kernel.scheduler.mutex", utest_tc_init, utest_tc_cleanup, TEST_SECONDS);
|
|
@ -0,0 +1,196 @@
|
||||||
|
/*
|
||||||
|
* Copyright (c) 2006-2024, RT-Thread Development Team
|
||||||
|
*
|
||||||
|
* SPDX-License-Identifier: Apache-2.0
|
||||||
|
*
|
||||||
|
* Change Logs:
|
||||||
|
* Date Author Notes
|
||||||
|
* 2024-01-17 Shell the first version
|
||||||
|
*/
|
||||||
|
#define __RT_IPC_SOURCE__
|
||||||
|
|
||||||
|
#include <rtthread.h>
|
||||||
|
#include "rthw.h"
|
||||||
|
#include "utest.h"
|
||||||
|
|
||||||
|
#define KERN_TEST_CONFIG_LOOP_TIMES 160
|
||||||
|
#define KERN_TEST_CONCURRENT_THREADS (RT_CPUS_NR * 2)
|
||||||
|
#define KERN_TEST_CONFIG_HIGHEST_PRIO 3
|
||||||
|
#define KERN_TEST_CONFIG_LOWEST_PRIO (RT_THREAD_PRIORITY_MAX - 2)
|
||||||
|
|
||||||
|
#define TEST_LEVEL_COUNTS (KERN_TEST_CONFIG_LOWEST_PRIO - KERN_TEST_CONFIG_HIGHEST_PRIO + 1)
|
||||||
|
#if TEST_LEVEL_COUNTS <= RT_CPUS_NR
|
||||||
|
#warning for the best of this test, TEST_LEVEL_COUNTS should greater than RT_CPUS_NR
|
||||||
|
#endif
|
||||||
|
#if KERN_TEST_CONCURRENT_THREADS < RT_CPUS_NR
|
||||||
|
#warning for the best of this test, KERN_TEST_CONCURRENT_THREADS should greater than RT_CPUS_NR
|
||||||
|
#endif
|
||||||
|
#if KERN_TEST_CONFIG_LOWEST_PRIO >= RT_THREAD_PRIORITY_MAX - 1
|
||||||
|
#error the thread priority should at least be greater than idle
|
||||||
|
#endif
|
||||||
|
|
||||||
|
static rt_atomic_t _star_counter = 1;
|
||||||
|
static struct rt_semaphore _thr_exit_sem;
|
||||||
|
static struct rt_semaphore _level_waiting[TEST_LEVEL_COUNTS];
|
||||||
|
static rt_thread_t _thread_matrix[TEST_LEVEL_COUNTS][KERN_TEST_CONCURRENT_THREADS];
|
||||||
|
static rt_atomic_t _load_average[RT_CPUS_NR];
|
||||||
|
|
||||||
|
static void _print_char(rt_thread_t thr_self, int character)
|
||||||
|
{
|
||||||
|
rt_base_t current_counter;
|
||||||
|
|
||||||
|
#ifdef RT_USING_SMP
|
||||||
|
rt_kprintf("%c%d", character, RT_SCHED_CTX(thr_self).oncpu);
|
||||||
|
#else
|
||||||
|
rt_kprintf("%c0", character);
|
||||||
|
#endif /* RT_USING_SMP */
|
||||||
|
|
||||||
|
current_counter = rt_atomic_add(&_star_counter, 1);
|
||||||
|
if (current_counter % 30 == 0)
|
||||||
|
{
|
||||||
|
rt_kprintf("\n");
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
static void _stats_load_avg_inc(void)
|
||||||
|
{
|
||||||
|
int cpuid;
|
||||||
|
|
||||||
|
cpuid = rt_hw_cpu_id();
|
||||||
|
rt_atomic_add(&_load_average[cpuid], 1);
|
||||||
|
}
|
||||||
|
|
||||||
|
static void _stats_load_avg_print(void)
|
||||||
|
{
|
||||||
|
rt_base_t counts = 0;
|
||||||
|
const rt_base_t total_test_counts = KERN_TEST_CONFIG_LOOP_TIMES * TEST_LEVEL_COUNTS * KERN_TEST_CONCURRENT_THREADS;
|
||||||
|
|
||||||
|
for (size_t i = 0; i < RT_CPUS_NR; i++)
|
||||||
|
{
|
||||||
|
rt_kprintf("%ld ", _load_average[i]);
|
||||||
|
counts += _load_average[i];
|
||||||
|
}
|
||||||
|
|
||||||
|
rt_kprintf("\n");
|
||||||
|
uassert_int_equal(counts, total_test_counts);
|
||||||
|
}
|
||||||
|
|
||||||
|
static void _thread_entry(void *param)
|
||||||
|
{
|
||||||
|
int level = (rt_ubase_t)param;
|
||||||
|
rt_thread_t thr_self = rt_thread_self();
|
||||||
|
|
||||||
|
if (level == 0)
|
||||||
|
{
|
||||||
|
/* always the first to execute among other working threads */
|
||||||
|
for (size_t i = 0; i < KERN_TEST_CONFIG_LOOP_TIMES; i++)
|
||||||
|
{
|
||||||
|
/* notify our consumer */
|
||||||
|
rt_sem_release(&_level_waiting[level + 1]);
|
||||||
|
|
||||||
|
_stats_load_avg_inc();
|
||||||
|
|
||||||
|
/* waiting for resource of ours */
|
||||||
|
rt_sem_take(&_level_waiting[level], RT_WAITING_FOREVER);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
else if (level == TEST_LEVEL_COUNTS - 1)
|
||||||
|
{
|
||||||
|
|
||||||
|
for (size_t i = 0; i < KERN_TEST_CONFIG_LOOP_TIMES; i++)
|
||||||
|
{
|
||||||
|
/* waiting for our resource first */
|
||||||
|
rt_sem_take(&_level_waiting[level], RT_WAITING_FOREVER);
|
||||||
|
|
||||||
|
_stats_load_avg_inc();
|
||||||
|
|
||||||
|
_print_char(thr_self, '*');
|
||||||
|
|
||||||
|
rt_thread_delay(1);
|
||||||
|
|
||||||
|
/* produce for level 0 worker */
|
||||||
|
rt_sem_release(&_level_waiting[0]);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
else
|
||||||
|
{
|
||||||
|
for (size_t i = 0; i < KERN_TEST_CONFIG_LOOP_TIMES; i++)
|
||||||
|
{
|
||||||
|
/* waiting for resource of ours */
|
||||||
|
rt_sem_take(&_level_waiting[level], RT_WAITING_FOREVER);
|
||||||
|
|
||||||
|
_stats_load_avg_inc();
|
||||||
|
|
||||||
|
/* notify our consumer */
|
||||||
|
rt_sem_release(&_level_waiting[level + 1]);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
uassert_true(1);
|
||||||
|
rt_sem_release(&_thr_exit_sem);
|
||||||
|
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
static void scheduler_tc(void)
|
||||||
|
{
|
||||||
|
LOG_I("Test starts...");
|
||||||
|
for (size_t i = 0; i < TEST_LEVEL_COUNTS; i++)
|
||||||
|
{
|
||||||
|
for (size_t j = 0; j < KERN_TEST_CONCURRENT_THREADS; j++)
|
||||||
|
{
|
||||||
|
rt_thread_startup(_thread_matrix[i][j]);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
LOG_I("%d threads startup...", TEST_LEVEL_COUNTS * KERN_TEST_CONCURRENT_THREADS);
|
||||||
|
|
||||||
|
/* waiting for sub-threads to exit */
|
||||||
|
for (size_t i = 0; i < TEST_LEVEL_COUNTS * KERN_TEST_CONCURRENT_THREADS; i++)
|
||||||
|
{
|
||||||
|
rt_sem_take(&_thr_exit_sem, RT_WAITING_FOREVER);
|
||||||
|
}
|
||||||
|
|
||||||
|
/* print load average */
|
||||||
|
_stats_load_avg_print();
|
||||||
|
}
|
||||||
|
|
||||||
|
static rt_err_t utest_tc_init(void)
|
||||||
|
{
|
||||||
|
LOG_I("Setup environment...");
|
||||||
|
rt_sem_init(&_thr_exit_sem, "test", 0, RT_IPC_FLAG_PRIO);
|
||||||
|
|
||||||
|
for (size_t i = 0; i < TEST_LEVEL_COUNTS; i++)
|
||||||
|
{
|
||||||
|
rt_sem_init(&_level_waiting[i], "test", 0, RT_IPC_FLAG_PRIO);
|
||||||
|
|
||||||
|
for (size_t j = 0; j < KERN_TEST_CONCURRENT_THREADS; j++)
|
||||||
|
{
|
||||||
|
_thread_matrix[i][j] =
|
||||||
|
rt_thread_create("test",
|
||||||
|
_thread_entry,
|
||||||
|
(void *)i,
|
||||||
|
UTEST_THR_STACK_SIZE,
|
||||||
|
KERN_TEST_CONFIG_HIGHEST_PRIO+i,
|
||||||
|
5);
|
||||||
|
if (!_thread_matrix[i][j])
|
||||||
|
uassert_not_null(_thread_matrix[i][j]);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return RT_EOK;
|
||||||
|
}
|
||||||
|
|
||||||
|
static rt_err_t utest_tc_cleanup(void)
|
||||||
|
{
|
||||||
|
rt_sem_detach(&_thr_exit_sem);
|
||||||
|
for (size_t i = 0; i < TEST_LEVEL_COUNTS; i++)
|
||||||
|
{
|
||||||
|
rt_sem_detach(&_level_waiting[i]);
|
||||||
|
}
|
||||||
|
return RT_EOK;
|
||||||
|
}
|
||||||
|
|
||||||
|
static void testcase(void)
|
||||||
|
{
|
||||||
|
UTEST_UNIT_RUN(scheduler_tc);
|
||||||
|
}
|
||||||
|
UTEST_TC_EXPORT(testcase, "testcases.kernel.scheduler.sem", utest_tc_init, utest_tc_cleanup, 10);
|
|
@ -0,0 +1,121 @@
|
||||||
|
/*
|
||||||
|
* Copyright (c) 2006-2024, RT-Thread Development Team
|
||||||
|
*
|
||||||
|
* SPDX-License-Identifier: Apache-2.0
|
||||||
|
*
|
||||||
|
* Change Logs:
|
||||||
|
* Date Author Notes
|
||||||
|
* 2024-01-25 Shell init ver.
|
||||||
|
*/
|
||||||
|
#define __RT_KERNEL_SOURCE__
|
||||||
|
#include <rtthread.h>
|
||||||
|
#include "utest.h"
|
||||||
|
|
||||||
|
#define TEST_LOOP_TIMES (100 * 1000)
|
||||||
|
#define TEST_PROGRESS_COUNTS (36)
|
||||||
|
#define TEST_THREAD_COUNT (RT_CPUS_NR * 1)
|
||||||
|
#define TEST_PROGRESS_ON (TEST_LOOP_TIMES*TEST_THREAD_COUNT/TEST_PROGRESS_COUNTS)
|
||||||
|
|
||||||
|
static struct rt_semaphore _thr_exit_sem;
|
||||||
|
static rt_atomic_t _progress_counter;
|
||||||
|
|
||||||
|
static volatile rt_thread_t threads_group[TEST_THREAD_COUNT][2];
|
||||||
|
|
||||||
|
static void _thread_entry1(void *param)
|
||||||
|
{
|
||||||
|
rt_base_t critical_level;
|
||||||
|
size_t idx = (size_t)param;
|
||||||
|
|
||||||
|
for (size_t i = 0; i < TEST_LOOP_TIMES; i++)
|
||||||
|
{
|
||||||
|
critical_level = rt_enter_critical();
|
||||||
|
|
||||||
|
rt_thread_suspend(rt_thread_self());
|
||||||
|
rt_thread_resume(threads_group[idx][1]);
|
||||||
|
|
||||||
|
rt_exit_critical_safe(critical_level);
|
||||||
|
|
||||||
|
if (rt_atomic_add(&_progress_counter, 1) % TEST_PROGRESS_ON == 0)
|
||||||
|
uassert_true(1);
|
||||||
|
}
|
||||||
|
|
||||||
|
rt_sem_release(&_thr_exit_sem);
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
static void _thread_entry2(void *param)
|
||||||
|
{
|
||||||
|
rt_base_t critical_level;
|
||||||
|
size_t idx = (size_t)param;
|
||||||
|
|
||||||
|
for (size_t i = 0; i < TEST_LOOP_TIMES; i++)
|
||||||
|
{
|
||||||
|
critical_level = rt_enter_critical();
|
||||||
|
|
||||||
|
rt_thread_suspend(rt_thread_self());
|
||||||
|
rt_thread_resume(threads_group[idx][0]);
|
||||||
|
|
||||||
|
rt_exit_critical_safe(critical_level);
|
||||||
|
|
||||||
|
if (rt_atomic_add(&_progress_counter, 1) % TEST_PROGRESS_ON == 0)
|
||||||
|
uassert_true(1);
|
||||||
|
}
|
||||||
|
|
||||||
|
rt_sem_release(&_thr_exit_sem);
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
static void scheduler_tc(void)
|
||||||
|
{
|
||||||
|
for (size_t i = 0; i < TEST_THREAD_COUNT; i++)
|
||||||
|
{
|
||||||
|
rt_thread_t t1 =
|
||||||
|
rt_thread_create(
|
||||||
|
"t1",
|
||||||
|
_thread_entry1,
|
||||||
|
(void *)i,
|
||||||
|
UTEST_THR_STACK_SIZE,
|
||||||
|
UTEST_THR_PRIORITY + 1,
|
||||||
|
100);
|
||||||
|
rt_thread_t t2 =
|
||||||
|
rt_thread_create(
|
||||||
|
"t2",
|
||||||
|
_thread_entry2,
|
||||||
|
(void *)i,
|
||||||
|
UTEST_THR_STACK_SIZE,
|
||||||
|
UTEST_THR_PRIORITY + 1,
|
||||||
|
100);
|
||||||
|
|
||||||
|
threads_group[i][0] = t1;
|
||||||
|
threads_group[i][1] = t2;
|
||||||
|
}
|
||||||
|
|
||||||
|
for (size_t i = 0; i < TEST_THREAD_COUNT; i++)
|
||||||
|
{
|
||||||
|
rt_thread_startup(threads_group[i][0]);
|
||||||
|
rt_thread_startup(threads_group[i][1]);
|
||||||
|
}
|
||||||
|
|
||||||
|
for (size_t i = 0; i < TEST_THREAD_COUNT; i++)
|
||||||
|
{
|
||||||
|
rt_sem_take(&_thr_exit_sem, RT_WAITING_FOREVER);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
static rt_err_t utest_tc_init(void)
|
||||||
|
{
|
||||||
|
rt_sem_init(&_thr_exit_sem, "test", 0, RT_IPC_FLAG_PRIO);
|
||||||
|
return RT_EOK;
|
||||||
|
}
|
||||||
|
|
||||||
|
static rt_err_t utest_tc_cleanup(void)
|
||||||
|
{
|
||||||
|
rt_sem_detach(&_thr_exit_sem);
|
||||||
|
return RT_EOK;
|
||||||
|
}
|
||||||
|
|
||||||
|
static void testcase(void)
|
||||||
|
{
|
||||||
|
UTEST_UNIT_RUN(scheduler_tc);
|
||||||
|
}
|
||||||
|
UTEST_TC_EXPORT(testcase, "testcases.kernel.scheduler.thread", utest_tc_init, utest_tc_cleanup, 10);
|
|
@ -0,0 +1,232 @@
|
||||||
|
/*
|
||||||
|
* Copyright (c) 2006-2024, RT-Thread Development Team
|
||||||
|
*
|
||||||
|
* SPDX-License-Identifier: Apache-2.0
|
||||||
|
*
|
||||||
|
* Change Logs:
|
||||||
|
* Date Author Notes
|
||||||
|
* 2024-01-25 Shell init ver.
|
||||||
|
*/
|
||||||
|
#define __RT_KERNEL_SOURCE__
|
||||||
|
#include <rtthread.h>
|
||||||
|
#include <stdlib.h>
|
||||||
|
#include "utest.h"
|
||||||
|
|
||||||
|
#define TEST_SECONDS 10
|
||||||
|
#define TEST_LOOP_TICKS (TEST_SECONDS * RT_TICK_PER_SECOND)
|
||||||
|
#define TEST_PROGRESS_COUNTS (36)
|
||||||
|
#define TEST_PROGRESS_ON (TEST_LOOP_TICKS*2/TEST_PROGRESS_COUNTS)
|
||||||
|
|
||||||
|
static struct rt_semaphore _thr_exit_sem;
|
||||||
|
static struct rt_mutex _ipc_primitive;
|
||||||
|
static struct rt_semaphore _cons_can_take_mtx;
|
||||||
|
static struct rt_semaphore _prod_can_take_mtx;
|
||||||
|
static rt_atomic_t _progress_counter;
|
||||||
|
#define CONSUMER_MAGIC 0x11223344
|
||||||
|
#define PRODUCER_MAGIC 0x44332211
|
||||||
|
static rt_atomic_t _last_holder_flag = CONSUMER_MAGIC;
|
||||||
|
static rt_base_t _timedout_failed_times = 0;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Test on timedout IPC with racing condition where timedout routine and producer
|
||||||
|
* thread may race to wakeup sleeper.
|
||||||
|
*
|
||||||
|
* This test will fork 2 thread, one producer and one consumer. The producer will
|
||||||
|
* looping and trigger the IPC on the edge of new tick arrives. The consumer will
|
||||||
|
* wait on IPC with a timedout of 1 tick.
|
||||||
|
*/
|
||||||
|
|
||||||
|
static void _wait_until_edge(void)
|
||||||
|
{
|
||||||
|
rt_tick_t entry_level, current;
|
||||||
|
rt_base_t random_latency;
|
||||||
|
|
||||||
|
entry_level = rt_tick_get();
|
||||||
|
do
|
||||||
|
{
|
||||||
|
current = rt_tick_get();
|
||||||
|
}
|
||||||
|
while (current == entry_level);
|
||||||
|
|
||||||
|
/* give a random latency for test */
|
||||||
|
random_latency = rand() % 1000 * 1000;
|
||||||
|
entry_level = current;
|
||||||
|
for (size_t i = 0; i < random_latency; i++)
|
||||||
|
{
|
||||||
|
current = rt_tick_get();
|
||||||
|
if (current != entry_level)
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
static void _producer_entry(void *param)
|
||||||
|
{
|
||||||
|
rt_err_t error;
|
||||||
|
for (size_t i = 0; i < TEST_LOOP_TICKS; i++)
|
||||||
|
{
|
||||||
|
/**
|
||||||
|
* only try to take mutex after consumer have taken it after last
|
||||||
|
* release from us.
|
||||||
|
*/
|
||||||
|
error = rt_sem_take(&_prod_can_take_mtx, RT_WAITING_FOREVER);
|
||||||
|
if (error)
|
||||||
|
{
|
||||||
|
uassert_true(0);
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
|
||||||
|
error = rt_mutex_take(&_ipc_primitive, RT_WAITING_FOREVER);
|
||||||
|
if (error)
|
||||||
|
{
|
||||||
|
uassert_true(0);
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
|
||||||
|
/* ensure that mutex should be held in round-robin method */
|
||||||
|
if (rt_atomic_load(&_last_holder_flag) != CONSUMER_MAGIC)
|
||||||
|
{
|
||||||
|
uassert_true(0);
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
else
|
||||||
|
{
|
||||||
|
rt_atomic_store(&_last_holder_flag, PRODUCER_MAGIC);
|
||||||
|
rt_sem_release(&_cons_can_take_mtx);
|
||||||
|
}
|
||||||
|
|
||||||
|
_wait_until_edge();
|
||||||
|
|
||||||
|
rt_mutex_release(&_ipc_primitive);
|
||||||
|
|
||||||
|
if (rt_atomic_add(&_progress_counter, 1) % TEST_PROGRESS_ON == 0)
|
||||||
|
uassert_true(1);
|
||||||
|
}
|
||||||
|
|
||||||
|
rt_sem_release(&_thr_exit_sem);
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
static void _consumer_entry(void *param)
|
||||||
|
{
|
||||||
|
rt_err_t error;
|
||||||
|
for (size_t i = 0; i < TEST_LOOP_TICKS; i++)
|
||||||
|
{
|
||||||
|
/**
|
||||||
|
* only try to take mutex after producer have taken it after last
|
||||||
|
* release from us.
|
||||||
|
*/
|
||||||
|
error = rt_sem_take(&_cons_can_take_mtx, RT_WAITING_FOREVER);
|
||||||
|
if (error)
|
||||||
|
{
|
||||||
|
uassert_true(0);
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
|
||||||
|
while (1)
|
||||||
|
{
|
||||||
|
error = rt_mutex_take_interruptible(&_ipc_primitive, 1);
|
||||||
|
if (error == -RT_ETIMEOUT)
|
||||||
|
{
|
||||||
|
_timedout_failed_times++;
|
||||||
|
if (rt_mutex_get_owner(&_ipc_primitive) == rt_thread_self())
|
||||||
|
{
|
||||||
|
uassert_true(0);
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
else
|
||||||
|
{
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if (error != RT_EOK)
|
||||||
|
{
|
||||||
|
uassert_true(0);
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
|
||||||
|
/* ensure that mutex should be held in round-robin method */
|
||||||
|
if (rt_atomic_load(&_last_holder_flag) != PRODUCER_MAGIC)
|
||||||
|
{
|
||||||
|
uassert_true(0);
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
else
|
||||||
|
{
|
||||||
|
rt_atomic_store(&_last_holder_flag, CONSUMER_MAGIC);
|
||||||
|
rt_sem_release(&_prod_can_take_mtx);
|
||||||
|
}
|
||||||
|
|
||||||
|
rt_mutex_release(&_ipc_primitive);
|
||||||
|
if (rt_mutex_get_owner(&_ipc_primitive) == rt_thread_self())
|
||||||
|
{
|
||||||
|
uassert_true(0);
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (rt_atomic_add(&_progress_counter, 1) % TEST_PROGRESS_ON == 0)
|
||||||
|
uassert_true(1);
|
||||||
|
}
|
||||||
|
|
||||||
|
rt_sem_release(&_thr_exit_sem);
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
static void timed_mtx_tc(void)
|
||||||
|
{
|
||||||
|
rt_thread_t prod = rt_thread_create(
|
||||||
|
"prod",
|
||||||
|
_producer_entry,
|
||||||
|
(void *)0,
|
||||||
|
UTEST_THR_STACK_SIZE,
|
||||||
|
UTEST_THR_PRIORITY + 1,
|
||||||
|
4);
|
||||||
|
|
||||||
|
rt_thread_t cons = rt_thread_create(
|
||||||
|
"cons",
|
||||||
|
_consumer_entry,
|
||||||
|
(void *)0,
|
||||||
|
UTEST_THR_STACK_SIZE,
|
||||||
|
UTEST_THR_PRIORITY + 1,
|
||||||
|
100);
|
||||||
|
|
||||||
|
rt_thread_startup(prod);
|
||||||
|
rt_thread_startup(cons);
|
||||||
|
|
||||||
|
for (size_t i = 0; i < 2; i++)
|
||||||
|
{
|
||||||
|
uassert_int_equal(
|
||||||
|
rt_sem_take(&_thr_exit_sem, 2 * TEST_LOOP_TICKS),
|
||||||
|
RT_EOK);
|
||||||
|
}
|
||||||
|
|
||||||
|
/* Summary */
|
||||||
|
LOG_I("Total failed times: %ld(in %d)\n", _timedout_failed_times, TEST_LOOP_TICKS);
|
||||||
|
}
|
||||||
|
|
||||||
|
static rt_err_t utest_tc_init(void)
|
||||||
|
{
|
||||||
|
_timedout_failed_times = 0;
|
||||||
|
|
||||||
|
rt_mutex_init(&_ipc_primitive, "ipc", RT_IPC_FLAG_PRIO);
|
||||||
|
rt_sem_init(&_cons_can_take_mtx, "test", 0, RT_IPC_FLAG_PRIO);
|
||||||
|
rt_sem_init(&_prod_can_take_mtx, "test", 1, RT_IPC_FLAG_PRIO);
|
||||||
|
rt_sem_init(&_thr_exit_sem, "test", 0, RT_IPC_FLAG_PRIO);
|
||||||
|
return RT_EOK;
|
||||||
|
}
|
||||||
|
|
||||||
|
static rt_err_t utest_tc_cleanup(void)
|
||||||
|
{
|
||||||
|
rt_mutex_detach(&_ipc_primitive);
|
||||||
|
rt_sem_detach(&_cons_can_take_mtx);
|
||||||
|
rt_sem_detach(&_prod_can_take_mtx);
|
||||||
|
rt_sem_detach(&_thr_exit_sem);
|
||||||
|
return RT_EOK;
|
||||||
|
}
|
||||||
|
|
||||||
|
static void testcase(void)
|
||||||
|
{
|
||||||
|
UTEST_UNIT_RUN(timed_mtx_tc);
|
||||||
|
}
|
||||||
|
UTEST_TC_EXPORT(testcase, "testcases.kernel.scheduler.timed_mtx", utest_tc_init, utest_tc_cleanup, TEST_SECONDS * 2);
|
|
@ -0,0 +1,149 @@
|
||||||
|
/*
|
||||||
|
* Copyright (c) 2006-2024, RT-Thread Development Team
|
||||||
|
*
|
||||||
|
* SPDX-License-Identifier: Apache-2.0
|
||||||
|
*
|
||||||
|
* Change Logs:
|
||||||
|
* Date Author Notes
|
||||||
|
* 2024-01-25 Shell init ver.
|
||||||
|
*/
|
||||||
|
#define __RT_KERNEL_SOURCE__
|
||||||
|
#include <rtthread.h>
|
||||||
|
#include <stdlib.h>
|
||||||
|
#include "utest.h"
|
||||||
|
|
||||||
|
#define TEST_SECONDS 10
|
||||||
|
#define TEST_LOOP_TICKS (TEST_SECONDS * RT_TICK_PER_SECOND)
|
||||||
|
#define TEST_PROGRESS_COUNTS (36)
|
||||||
|
#define TEST_PROGRESS_ON (TEST_LOOP_TICKS*2/TEST_PROGRESS_COUNTS)
|
||||||
|
|
||||||
|
static struct rt_semaphore _thr_exit_sem;
|
||||||
|
static struct rt_semaphore _ipc_sem;
|
||||||
|
static rt_atomic_t _progress_counter;
|
||||||
|
static rt_base_t _timedout_failed_times = 0;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Test on timedout IPC with racing condition where timedout routine and producer
|
||||||
|
* thread may race to wakeup sleeper.
|
||||||
|
*
|
||||||
|
* This test will fork 2 thread, one producer and one consumer. The producer will
|
||||||
|
* looping and trigger the IPC on the edge of new tick arrives. The consumer will
|
||||||
|
* wait on IPC with a timedout of 1 tick.
|
||||||
|
*/
|
||||||
|
|
||||||
|
static void _wait_until_edge(void)
|
||||||
|
{
|
||||||
|
rt_tick_t entry_level, current;
|
||||||
|
rt_base_t random_latency;
|
||||||
|
|
||||||
|
entry_level = rt_tick_get();
|
||||||
|
do
|
||||||
|
{
|
||||||
|
current = rt_tick_get();
|
||||||
|
}
|
||||||
|
while (current == entry_level);
|
||||||
|
|
||||||
|
/* give a random latency for test */
|
||||||
|
random_latency = rand();
|
||||||
|
entry_level = current;
|
||||||
|
for (size_t i = 0; i < random_latency; i++)
|
||||||
|
{
|
||||||
|
current = rt_tick_get();
|
||||||
|
if (current != entry_level)
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
static void _producer_entry(void *param)
|
||||||
|
{
|
||||||
|
for (size_t i = 0; i < TEST_LOOP_TICKS; i++)
|
||||||
|
{
|
||||||
|
_wait_until_edge();
|
||||||
|
|
||||||
|
rt_sem_release(&_ipc_sem);
|
||||||
|
|
||||||
|
if (rt_atomic_add(&_progress_counter, 1) % TEST_PROGRESS_ON == 0)
|
||||||
|
uassert_true(1);
|
||||||
|
}
|
||||||
|
|
||||||
|
rt_sem_release(&_thr_exit_sem);
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
static void _consumer_entry(void *param)
|
||||||
|
{
|
||||||
|
int error;
|
||||||
|
for (size_t i = 0; i < TEST_LOOP_TICKS; i++)
|
||||||
|
{
|
||||||
|
error = rt_sem_take_interruptible(&_ipc_sem, 1);
|
||||||
|
if (error == -RT_ETIMEOUT)
|
||||||
|
{
|
||||||
|
_timedout_failed_times++;
|
||||||
|
}
|
||||||
|
else
|
||||||
|
{
|
||||||
|
if (error != RT_EOK)
|
||||||
|
uassert_true(0);
|
||||||
|
}
|
||||||
|
|
||||||
|
if (rt_atomic_add(&_progress_counter, 1) % TEST_PROGRESS_ON == 0)
|
||||||
|
uassert_true(1);
|
||||||
|
}
|
||||||
|
|
||||||
|
rt_sem_release(&_thr_exit_sem);
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
static void timed_sem_tc(void)
|
||||||
|
{
|
||||||
|
rt_thread_t prod = rt_thread_create(
|
||||||
|
"prod",
|
||||||
|
_producer_entry,
|
||||||
|
(void *)0,
|
||||||
|
UTEST_THR_STACK_SIZE,
|
||||||
|
UTEST_THR_PRIORITY + 1,
|
||||||
|
4);
|
||||||
|
|
||||||
|
rt_thread_t cons = rt_thread_create(
|
||||||
|
"cons",
|
||||||
|
_consumer_entry,
|
||||||
|
(void *)0,
|
||||||
|
UTEST_THR_STACK_SIZE,
|
||||||
|
UTEST_THR_PRIORITY + 1,
|
||||||
|
100);
|
||||||
|
|
||||||
|
rt_thread_startup(prod);
|
||||||
|
rt_thread_startup(cons);
|
||||||
|
|
||||||
|
for (size_t i = 0; i < 2; i++)
|
||||||
|
{
|
||||||
|
rt_sem_take(&_thr_exit_sem, RT_WAITING_FOREVER);
|
||||||
|
}
|
||||||
|
|
||||||
|
/* Summary */
|
||||||
|
LOG_I("Total failed times: %ld(in %d)\n", _timedout_failed_times, TEST_LOOP_TICKS);
|
||||||
|
}
|
||||||
|
|
||||||
|
static rt_err_t utest_tc_init(void)
|
||||||
|
{
|
||||||
|
int *pseed = rt_malloc(sizeof(int));
|
||||||
|
srand(*(int *)pseed);
|
||||||
|
rt_free(pseed);
|
||||||
|
|
||||||
|
rt_sem_init(&_ipc_sem, "ipc", 0, RT_IPC_FLAG_PRIO);
|
||||||
|
rt_sem_init(&_thr_exit_sem, "test", 0, RT_IPC_FLAG_PRIO);
|
||||||
|
return RT_EOK;
|
||||||
|
}
|
||||||
|
|
||||||
|
static rt_err_t utest_tc_cleanup(void)
|
||||||
|
{
|
||||||
|
rt_sem_detach(&_ipc_sem);
|
||||||
|
rt_sem_detach(&_thr_exit_sem);
|
||||||
|
return RT_EOK;
|
||||||
|
}
|
||||||
|
|
||||||
|
static void testcase(void)
|
||||||
|
{
|
||||||
|
UTEST_UNIT_RUN(timed_sem_tc);
|
||||||
|
}
|
||||||
|
UTEST_TC_EXPORT(testcase, "testcases.kernel.scheduler.timed_sem", utest_tc_init, utest_tc_cleanup, TEST_SECONDS * 2);
|
|
@ -22,7 +22,8 @@
|
||||||
#include <rtthread.h>
|
#include <rtthread.h>
|
||||||
#include "utest.h"
|
#include "utest.h"
|
||||||
|
|
||||||
int recive_sig = 0;
|
static volatile int recive_sig = 0;
|
||||||
|
static struct rt_semaphore _received_signal;
|
||||||
|
|
||||||
void sig_handle_default(int signo)
|
void sig_handle_default(int signo)
|
||||||
{
|
{
|
||||||
|
@ -125,12 +126,15 @@ void rt_signal_wait_thread(void *parm)
|
||||||
(void)sigaddset(&selectset, SIGUSR1);
|
(void)sigaddset(&selectset, SIGUSR1);
|
||||||
|
|
||||||
/* case 5:rt_signal_wait, two thread, thread1: install and unmask, then wait 1s; thread2: kill, should received. */
|
/* case 5:rt_signal_wait, two thread, thread1: install and unmask, then wait 1s; thread2: kill, should received. */
|
||||||
if (rt_signal_wait(&selectset, &recive_si, RT_TICK_PER_SECOND) != RT_EOK)
|
if (rt_signal_wait((void *)&selectset, &recive_si, RT_TICK_PER_SECOND) != RT_EOK)
|
||||||
{
|
{
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
recive_sig = recive_si.si_signo;
|
recive_sig = recive_si.si_signo;
|
||||||
|
|
||||||
|
LOG_I("received signal %d", recive_sig);
|
||||||
|
rt_sem_release(&_received_signal);
|
||||||
}
|
}
|
||||||
|
|
||||||
static void rt_signal_wait_test(void)
|
static void rt_signal_wait_test(void)
|
||||||
|
@ -147,7 +151,7 @@ static void rt_signal_wait_test(void)
|
||||||
rt_thread_mdelay(1);
|
rt_thread_mdelay(1);
|
||||||
/* case 5:rt_signal_wait, two thread, thread1: install and unmask, then wait 1s; thread2: kill, should received. */
|
/* case 5:rt_signal_wait, two thread, thread1: install and unmask, then wait 1s; thread2: kill, should received. */
|
||||||
uassert_int_equal(rt_thread_kill(t1, SIGUSR1), RT_EOK);
|
uassert_int_equal(rt_thread_kill(t1, SIGUSR1), RT_EOK);
|
||||||
rt_thread_mdelay(1);
|
rt_sem_take(&_received_signal, RT_WAITING_FOREVER);
|
||||||
uassert_int_equal(recive_sig, SIGUSR1);
|
uassert_int_equal(recive_sig, SIGUSR1);
|
||||||
|
|
||||||
return;
|
return;
|
||||||
|
@ -167,7 +171,9 @@ static void rt_signal_wait_test2(void)
|
||||||
/* case 6:rt_signal_wait, two thread, thread1: install and unmask, then wait 1s; thread2: sleep 2s then kill, should can't received. */
|
/* case 6:rt_signal_wait, two thread, thread1: install and unmask, then wait 1s; thread2: sleep 2s then kill, should can't received. */
|
||||||
rt_thread_mdelay(2000);
|
rt_thread_mdelay(2000);
|
||||||
uassert_int_equal(rt_thread_kill(t1, SIGUSR1), RT_EOK);
|
uassert_int_equal(rt_thread_kill(t1, SIGUSR1), RT_EOK);
|
||||||
rt_thread_mdelay(1);
|
uassert_int_not_equal(
|
||||||
|
rt_sem_take(&_received_signal, 1),
|
||||||
|
RT_EOK);
|
||||||
uassert_int_not_equal(recive_sig, SIGUSR1);
|
uassert_int_not_equal(recive_sig, SIGUSR1);
|
||||||
|
|
||||||
return;
|
return;
|
||||||
|
@ -175,11 +181,13 @@ static void rt_signal_wait_test2(void)
|
||||||
|
|
||||||
static rt_err_t utest_tc_init(void)
|
static rt_err_t utest_tc_init(void)
|
||||||
{
|
{
|
||||||
|
rt_sem_init(&_received_signal, "utest", 0, RT_IPC_FLAG_PRIO);
|
||||||
return RT_EOK;
|
return RT_EOK;
|
||||||
}
|
}
|
||||||
|
|
||||||
static rt_err_t utest_tc_cleanup(void)
|
static rt_err_t utest_tc_cleanup(void)
|
||||||
{
|
{
|
||||||
|
rt_sem_detach(&_received_signal);
|
||||||
return RT_EOK;
|
return RT_EOK;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -9,6 +9,8 @@
|
||||||
* 2021-10.11 mazhiyuan add idle, yield, suspend, control, priority, delay_until
|
* 2021-10.11 mazhiyuan add idle, yield, suspend, control, priority, delay_until
|
||||||
*/
|
*/
|
||||||
|
|
||||||
|
#define __RT_IPC_SOURCE__ /* include internal API for utest */
|
||||||
|
|
||||||
#include <rtthread.h>
|
#include <rtthread.h>
|
||||||
#include <stdlib.h>
|
#include <stdlib.h>
|
||||||
#include "utest.h"
|
#include "utest.h"
|
||||||
|
@ -56,7 +58,7 @@ static void test_dynamic_thread(void)
|
||||||
thread1_entry,
|
thread1_entry,
|
||||||
(void *)1,
|
(void *)1,
|
||||||
THREAD_STACK_SIZE,
|
THREAD_STACK_SIZE,
|
||||||
__current_thread->current_priority + 1,
|
UTEST_THR_PRIORITY + 1,
|
||||||
THREAD_TIMESLICE - 5);
|
THREAD_TIMESLICE - 5);
|
||||||
if (tid1 == RT_NULL)
|
if (tid1 == RT_NULL)
|
||||||
{
|
{
|
||||||
|
@ -105,7 +107,7 @@ static void test_static_thread(void)
|
||||||
(void *)2,
|
(void *)2,
|
||||||
&thread2_stack[0],
|
&thread2_stack[0],
|
||||||
sizeof(thread2_stack),
|
sizeof(thread2_stack),
|
||||||
__current_thread->current_priority + 1,
|
UTEST_THR_PRIORITY + 1,
|
||||||
THREAD_TIMESLICE);
|
THREAD_TIMESLICE);
|
||||||
if (ret_init != RT_EOK)
|
if (ret_init != RT_EOK)
|
||||||
{
|
{
|
||||||
|
@ -139,10 +141,11 @@ __exit:
|
||||||
|
|
||||||
static void thread3_entry(void *parameter)
|
static void thread3_entry(void *parameter)
|
||||||
{
|
{
|
||||||
rt_tick_t tick;
|
rt_tick_t tick, latency_tick;
|
||||||
tick = rt_tick_get();
|
tick = rt_tick_get();
|
||||||
rt_thread_delay(15);
|
rt_thread_delay(15);
|
||||||
if (rt_tick_get() - tick > 16)
|
latency_tick = rt_tick_get() - tick;
|
||||||
|
if (latency_tick > 16 || latency_tick < 15)
|
||||||
{
|
{
|
||||||
tid3_finish_flag = 1;
|
tid3_finish_flag = 1;
|
||||||
tid3_delay_pass_flag = 0;
|
tid3_delay_pass_flag = 0;
|
||||||
|
@ -160,7 +163,7 @@ static void test_thread_delay(void)
|
||||||
thread3_entry,
|
thread3_entry,
|
||||||
RT_NULL,
|
RT_NULL,
|
||||||
THREAD_STACK_SIZE,
|
THREAD_STACK_SIZE,
|
||||||
__current_thread->current_priority - 1,
|
UTEST_THR_PRIORITY - 1,
|
||||||
THREAD_TIMESLICE);
|
THREAD_TIMESLICE);
|
||||||
if (tid3 == RT_NULL)
|
if (tid3 == RT_NULL)
|
||||||
{
|
{
|
||||||
|
@ -210,7 +213,7 @@ static void test_idle_hook(void)
|
||||||
thread4_entry,
|
thread4_entry,
|
||||||
RT_NULL,
|
RT_NULL,
|
||||||
THREAD_STACK_SIZE,
|
THREAD_STACK_SIZE,
|
||||||
__current_thread->current_priority - 1,
|
UTEST_THR_PRIORITY - 1,
|
||||||
THREAD_TIMESLICE);
|
THREAD_TIMESLICE);
|
||||||
if (tid4 == RT_NULL)
|
if (tid4 == RT_NULL)
|
||||||
{
|
{
|
||||||
|
@ -264,7 +267,7 @@ static void test_thread_yield(void)
|
||||||
thread5_entry,
|
thread5_entry,
|
||||||
RT_NULL,
|
RT_NULL,
|
||||||
THREAD_STACK_SIZE,
|
THREAD_STACK_SIZE,
|
||||||
__current_thread->current_priority - 1,
|
UTEST_THR_PRIORITY - 1,
|
||||||
THREAD_TIMESLICE);
|
THREAD_TIMESLICE);
|
||||||
if (tid5 == RT_NULL)
|
if (tid5 == RT_NULL)
|
||||||
{
|
{
|
||||||
|
@ -283,7 +286,7 @@ static void test_thread_yield(void)
|
||||||
thread6_entry,
|
thread6_entry,
|
||||||
RT_NULL,
|
RT_NULL,
|
||||||
THREAD_STACK_SIZE,
|
THREAD_STACK_SIZE,
|
||||||
__current_thread->current_priority - 1,
|
UTEST_THR_PRIORITY - 1,
|
||||||
THREAD_TIMESLICE);
|
THREAD_TIMESLICE);
|
||||||
if (tid6 == RT_NULL)
|
if (tid6 == RT_NULL)
|
||||||
{
|
{
|
||||||
|
@ -319,12 +322,13 @@ static void test_thread_control(void)
|
||||||
{
|
{
|
||||||
rt_err_t ret_control = -RT_ERROR;
|
rt_err_t ret_control = -RT_ERROR;
|
||||||
rt_err_t rst_delete = -RT_ERROR;
|
rt_err_t rst_delete = -RT_ERROR;
|
||||||
|
rt_sched_lock_level_t slvl;
|
||||||
|
|
||||||
tid7 = rt_thread_create("thread7",
|
tid7 = rt_thread_create("thread7",
|
||||||
thread7_entry,
|
thread7_entry,
|
||||||
RT_NULL,
|
RT_NULL,
|
||||||
THREAD_STACK_SIZE,
|
THREAD_STACK_SIZE,
|
||||||
__current_thread->current_priority + 1,
|
UTEST_THR_PRIORITY + 1,
|
||||||
THREAD_TIMESLICE);
|
THREAD_TIMESLICE);
|
||||||
if (tid7 == RT_NULL)
|
if (tid7 == RT_NULL)
|
||||||
{
|
{
|
||||||
|
@ -342,12 +346,17 @@ static void test_thread_control(void)
|
||||||
}
|
}
|
||||||
rt_thread_mdelay(200);
|
rt_thread_mdelay(200);
|
||||||
rt_thread_control(tid7, RT_THREAD_CTRL_CHANGE_PRIORITY, &change_priority);
|
rt_thread_control(tid7, RT_THREAD_CTRL_CHANGE_PRIORITY, &change_priority);
|
||||||
if (tid7->current_priority != change_priority)
|
|
||||||
|
rt_sched_lock(&slvl);
|
||||||
|
if (rt_sched_thread_get_curr_prio(tid7) != change_priority)
|
||||||
{
|
{
|
||||||
LOG_E("rt_thread_control failed!");
|
LOG_E("rt_thread_control failed!");
|
||||||
uassert_false(1);
|
uassert_false(1);
|
||||||
|
rt_sched_unlock(slvl);
|
||||||
goto __exit;
|
goto __exit;
|
||||||
}
|
}
|
||||||
|
rt_sched_unlock(slvl);
|
||||||
|
|
||||||
rst_delete = rt_thread_control(tid7, RT_THREAD_CTRL_CLOSE, RT_NULL);
|
rst_delete = rt_thread_control(tid7, RT_THREAD_CTRL_CLOSE, RT_NULL);
|
||||||
if (rst_delete != RT_EOK)
|
if (rst_delete != RT_EOK)
|
||||||
{
|
{
|
||||||
|
@ -380,7 +389,7 @@ static void test_thread_priority(void)
|
||||||
thread8_entry,
|
thread8_entry,
|
||||||
RT_NULL,
|
RT_NULL,
|
||||||
THREAD_STACK_SIZE,
|
THREAD_STACK_SIZE,
|
||||||
__current_thread->current_priority - 1,
|
UTEST_THR_PRIORITY - 1,
|
||||||
THREAD_TIMESLICE);
|
THREAD_TIMESLICE);
|
||||||
if (tid8 == RT_NULL)
|
if (tid8 == RT_NULL)
|
||||||
{
|
{
|
||||||
|
@ -448,6 +457,10 @@ static void test_delay_until(void)
|
||||||
rt_kprintf("delta[20] -> %d\n", delta);
|
rt_kprintf("delta[20] -> %d\n", delta);
|
||||||
uassert_int_equal(delta, 20);
|
uassert_int_equal(delta, 20);
|
||||||
|
|
||||||
|
/**
|
||||||
|
* the rt_kprints above can take few ticks to complete, maybe more than 10
|
||||||
|
*/
|
||||||
|
tick = rt_tick_get();
|
||||||
check_tick = tick;
|
check_tick = tick;
|
||||||
rt_thread_delay(2);
|
rt_thread_delay(2);
|
||||||
rt_thread_delay_until(&tick, 10);
|
rt_thread_delay_until(&tick, 10);
|
||||||
|
@ -495,7 +508,7 @@ void test_timeslice(void)
|
||||||
timeslice_cntB2 = 0;
|
timeslice_cntB2 = 0;
|
||||||
|
|
||||||
tidA = rt_thread_create("timeslice", test_timeslice_threadA_entry, RT_NULL,
|
tidA = rt_thread_create("timeslice", test_timeslice_threadA_entry, RT_NULL,
|
||||||
2048, __current_thread->current_priority + 1, 10);
|
2048, UTEST_THR_PRIORITY + 1, 10);
|
||||||
if (!tidA)
|
if (!tidA)
|
||||||
{
|
{
|
||||||
LOG_E("rt_thread_create failed!");
|
LOG_E("rt_thread_create failed!");
|
||||||
|
@ -512,7 +525,7 @@ void test_timeslice(void)
|
||||||
}
|
}
|
||||||
|
|
||||||
tidB1 = rt_thread_create("timeslice", test_timeslice_threadB1_entry, RT_NULL,
|
tidB1 = rt_thread_create("timeslice", test_timeslice_threadB1_entry, RT_NULL,
|
||||||
2048, __current_thread->current_priority + 2, 2);
|
2048, UTEST_THR_PRIORITY + 2, 2);
|
||||||
if (!tidB1)
|
if (!tidB1)
|
||||||
{
|
{
|
||||||
LOG_E("rt_thread_create failed!");
|
LOG_E("rt_thread_create failed!");
|
||||||
|
@ -529,7 +542,7 @@ void test_timeslice(void)
|
||||||
}
|
}
|
||||||
|
|
||||||
tidB2 = rt_thread_create("timeslice", test_timeslice_threadB2_entry, RT_NULL,
|
tidB2 = rt_thread_create("timeslice", test_timeslice_threadB2_entry, RT_NULL,
|
||||||
2048, __current_thread->current_priority + 2, 2);
|
2048, UTEST_THR_PRIORITY + 2, 2);
|
||||||
if (!tidB2)
|
if (!tidB2)
|
||||||
{
|
{
|
||||||
LOG_E("rt_thread_create failed!");
|
LOG_E("rt_thread_create failed!");
|
||||||
|
@ -655,7 +668,7 @@ void test_thread_yield_nosmp(void)
|
||||||
// thread9_entry,
|
// thread9_entry,
|
||||||
// RT_NULL,
|
// RT_NULL,
|
||||||
// THREAD_STACK_SIZE,
|
// THREAD_STACK_SIZE,
|
||||||
// __current_thread->current_priority + 1,
|
// UTEST_THR_PRIORITY + 1,
|
||||||
// THREAD_TIMESLICE);
|
// THREAD_TIMESLICE);
|
||||||
// if (tid == RT_NULL)
|
// if (tid == RT_NULL)
|
||||||
// {
|
// {
|
||||||
|
@ -695,7 +708,7 @@ void test_thread_yield_nosmp(void)
|
||||||
static rt_err_t utest_tc_init(void)
|
static rt_err_t utest_tc_init(void)
|
||||||
{
|
{
|
||||||
__current_thread = rt_thread_self();
|
__current_thread = rt_thread_self();
|
||||||
change_priority = __current_thread->current_priority + 5;
|
change_priority = UTEST_THR_PRIORITY + 5;
|
||||||
tid3_delay_pass_flag = 0;
|
tid3_delay_pass_flag = 0;
|
||||||
tid3_finish_flag = 0;
|
tid3_finish_flag = 0;
|
||||||
tid4_finish_flag = 0;
|
tid4_finish_flag = 0;
|
||||||
|
|
|
@ -0,0 +1,100 @@
|
||||||
|
/*
|
||||||
|
* Copyright (c) 2006-2024, RT-Thread Development Team
|
||||||
|
*
|
||||||
|
* SPDX-License-Identifier: Apache-2.0
|
||||||
|
*
|
||||||
|
* Change Logs:
|
||||||
|
* Date Author Notes
|
||||||
|
* 2024-01-18 Shell Separate the compiler porting from rtdef.h
|
||||||
|
*/
|
||||||
|
#ifndef __RT_COMPILER_H__
|
||||||
|
#define __RT_COMPILER_H__
|
||||||
|
|
||||||
|
#include <rtconfig.h>
|
||||||
|
|
||||||
|
#if defined(__ARMCC_VERSION) /* ARM Compiler */
|
||||||
|
#define rt_section(x) __attribute__((section(x)))
|
||||||
|
#define rt_used __attribute__((used))
|
||||||
|
#define rt_align(n) __attribute__((aligned(n)))
|
||||||
|
#define rt_weak __attribute__((weak))
|
||||||
|
#define rt_typeof typeof
|
||||||
|
#define rt_noreturn
|
||||||
|
#define rt_inline static __inline
|
||||||
|
#define rt_always_inline rt_inline
|
||||||
|
#elif defined (__IAR_SYSTEMS_ICC__) /* for IAR Compiler */
|
||||||
|
#define rt_section(x) @ x
|
||||||
|
#define rt_used __root
|
||||||
|
#define PRAGMA(x) _Pragma(#x)
|
||||||
|
#define rt_align(n) PRAGMA(data_alignment=n)
|
||||||
|
#define rt_weak __weak
|
||||||
|
#define rt_typeof typeof
|
||||||
|
#define rt_noreturn
|
||||||
|
#define rt_inline static inline
|
||||||
|
#define rt_always_inline rt_inline
|
||||||
|
#elif defined (__GNUC__) /* GNU GCC Compiler */
|
||||||
|
#define __RT_STRINGIFY(x...) #x
|
||||||
|
#define RT_STRINGIFY(x...) __RT_STRINGIFY(x)
|
||||||
|
#define rt_section(x) __attribute__((section(x)))
|
||||||
|
#define rt_used __attribute__((used))
|
||||||
|
#define rt_align(n) __attribute__((aligned(n)))
|
||||||
|
#define rt_weak __attribute__((weak))
|
||||||
|
#define rt_typeof __typeof__
|
||||||
|
#define rt_noreturn __attribute__ ((noreturn))
|
||||||
|
#define rt_inline static __inline
|
||||||
|
#define rt_always_inline static inline __attribute__((always_inline))
|
||||||
|
#elif defined (__ADSPBLACKFIN__) /* for VisualDSP++ Compiler */
|
||||||
|
#define rt_section(x) __attribute__((section(x)))
|
||||||
|
#define rt_used __attribute__((used))
|
||||||
|
#define rt_align(n) __attribute__((aligned(n)))
|
||||||
|
#define rt_weak __attribute__((weak))
|
||||||
|
#define rt_typeof typeof
|
||||||
|
#define rt_noreturn
|
||||||
|
#define rt_inline static inline
|
||||||
|
#define rt_always_inline rt_inline
|
||||||
|
#elif defined (_MSC_VER) /* for Visual Studio Compiler */
|
||||||
|
#define rt_section(x)
|
||||||
|
#define rt_used
|
||||||
|
#define rt_align(n) __declspec(align(n))
|
||||||
|
#define rt_weak
|
||||||
|
#define rt_typeof typeof
|
||||||
|
#define rt_noreturn
|
||||||
|
#define rt_inline static __inline
|
||||||
|
#define rt_always_inline rt_inline
|
||||||
|
#elif defined (__TI_COMPILER_VERSION__) /* for TI CCS Compiler */
|
||||||
|
/**
|
||||||
|
* The way that TI compiler set section is different from other(at least
|
||||||
|
* GCC and MDK) compilers. See ARM Optimizing C/C++ Compiler 5.9.3 for more
|
||||||
|
* details.
|
||||||
|
*/
|
||||||
|
#define rt_section(x) __attribute__((section(x)))
|
||||||
|
#ifdef __TI_EABI__
|
||||||
|
#define rt_used __attribute__((retain)) __attribute__((used))
|
||||||
|
#else
|
||||||
|
#define rt_used __attribute__((used))
|
||||||
|
#endif
|
||||||
|
#define PRAGMA(x) _Pragma(#x)
|
||||||
|
#define rt_align(n) __attribute__((aligned(n)))
|
||||||
|
#ifdef __TI_EABI__
|
||||||
|
#define rt_weak __attribute__((weak))
|
||||||
|
#else
|
||||||
|
#define rt_weak
|
||||||
|
#endif
|
||||||
|
#define rt_typeof typeof
|
||||||
|
#define rt_noreturn
|
||||||
|
#define rt_inline static inline
|
||||||
|
#define rt_always_inline rt_inline
|
||||||
|
#elif defined (__TASKING__) /* for TASKING Compiler */
|
||||||
|
#define rt_section(x) __attribute__((section(x)))
|
||||||
|
#define rt_used __attribute__((used, protect))
|
||||||
|
#define PRAGMA(x) _Pragma(#x)
|
||||||
|
#define rt_align(n) __attribute__((__align(n)))
|
||||||
|
#define rt_weak __attribute__((weak))
|
||||||
|
#define rt_typeof typeof
|
||||||
|
#define rt_noreturn
|
||||||
|
#define rt_inline static inline
|
||||||
|
#define rt_always_inline rt_inline
|
||||||
|
#else /* Unkown Compiler */
|
||||||
|
#error not supported tool chain
|
||||||
|
#endif /* __ARMCC_VERSION */
|
||||||
|
|
||||||
|
#endif /* __RT_COMPILER_H__ */
|
274
include/rtdef.h
274
include/rtdef.h
|
@ -1,5 +1,5 @@
|
||||||
/*
|
/*
|
||||||
* Copyright (c) 2006-2022, RT-Thread Development Team
|
* Copyright (c) 2006-2024, RT-Thread Development Team
|
||||||
*
|
*
|
||||||
* SPDX-License-Identifier: Apache-2.0
|
* SPDX-License-Identifier: Apache-2.0
|
||||||
*
|
*
|
||||||
|
@ -55,6 +55,8 @@
|
||||||
* 2023-11-21 Meco Man add RT_USING_NANO macro
|
* 2023-11-21 Meco Man add RT_USING_NANO macro
|
||||||
* 2023-12-18 xqyjlj add rt_always_inline
|
* 2023-12-18 xqyjlj add rt_always_inline
|
||||||
* 2023-12-22 Shell Support hook list
|
* 2023-12-22 Shell Support hook list
|
||||||
|
* 2024-01-18 Shell Seperate basical types to a rttypes.h
|
||||||
|
* Seperate the compiler portings to rtcompiler.h
|
||||||
*/
|
*/
|
||||||
|
|
||||||
#ifndef __RT_DEF_H__
|
#ifndef __RT_DEF_H__
|
||||||
|
@ -96,71 +98,7 @@ extern "C" {
|
||||||
|
|
||||||
|
|
||||||
/* RT-Thread basic data type definitions */
|
/* RT-Thread basic data type definitions */
|
||||||
typedef int rt_bool_t; /**< boolean type */
|
#include "rttypes.h"
|
||||||
typedef signed long rt_base_t; /**< Nbit CPU related date type */
|
|
||||||
typedef unsigned long rt_ubase_t; /**< Nbit unsigned CPU related data type */
|
|
||||||
|
|
||||||
#ifndef RT_USING_ARCH_DATA_TYPE
|
|
||||||
#ifdef RT_USING_LIBC
|
|
||||||
typedef int8_t rt_int8_t; /**< 8bit integer type */
|
|
||||||
typedef int16_t rt_int16_t; /**< 16bit integer type */
|
|
||||||
typedef int32_t rt_int32_t; /**< 32bit integer type */
|
|
||||||
typedef uint8_t rt_uint8_t; /**< 8bit unsigned integer type */
|
|
||||||
typedef uint16_t rt_uint16_t; /**< 16bit unsigned integer type */
|
|
||||||
typedef uint32_t rt_uint32_t; /**< 32bit unsigned integer type */
|
|
||||||
typedef int64_t rt_int64_t; /**< 64bit integer type */
|
|
||||||
typedef uint64_t rt_uint64_t; /**< 64bit unsigned integer type */
|
|
||||||
#else
|
|
||||||
typedef signed char rt_int8_t; /**< 8bit integer type */
|
|
||||||
typedef signed short rt_int16_t; /**< 16bit integer type */
|
|
||||||
typedef signed int rt_int32_t; /**< 32bit integer type */
|
|
||||||
typedef unsigned char rt_uint8_t; /**< 8bit unsigned integer type */
|
|
||||||
typedef unsigned short rt_uint16_t; /**< 16bit unsigned integer type */
|
|
||||||
typedef unsigned int rt_uint32_t; /**< 32bit unsigned integer type */
|
|
||||||
#ifdef ARCH_CPU_64BIT
|
|
||||||
typedef signed long rt_int64_t; /**< 64bit integer type */
|
|
||||||
typedef unsigned long rt_uint64_t; /**< 64bit unsigned integer type */
|
|
||||||
#else
|
|
||||||
typedef signed long long rt_int64_t; /**< 64bit integer type */
|
|
||||||
typedef unsigned long long rt_uint64_t; /**< 64bit unsigned integer type */
|
|
||||||
#endif /* ARCH_CPU_64BIT */
|
|
||||||
#endif /* RT_USING_LIBC */
|
|
||||||
#endif /* RT_USING_ARCH_DATA_TYPE */
|
|
||||||
|
|
||||||
#if defined(RT_USING_LIBC) && !defined(RT_USING_NANO)
|
|
||||||
typedef size_t rt_size_t; /**< Type for size number */
|
|
||||||
typedef ssize_t rt_ssize_t; /**< Used for a count of bytes or an error indication */
|
|
||||||
#else
|
|
||||||
typedef rt_ubase_t rt_size_t; /**< Type for size number */
|
|
||||||
typedef rt_base_t rt_ssize_t; /**< Used for a count of bytes or an error indication */
|
|
||||||
#endif /* defined(RT_USING_LIBC) && !defined(RT_USING_NANO) */
|
|
||||||
|
|
||||||
typedef rt_base_t rt_err_t; /**< Type for error number */
|
|
||||||
typedef rt_uint32_t rt_time_t; /**< Type for time stamp */
|
|
||||||
typedef rt_uint32_t rt_tick_t; /**< Type for tick count */
|
|
||||||
typedef rt_base_t rt_flag_t; /**< Type for flags */
|
|
||||||
typedef rt_ubase_t rt_dev_t; /**< Type for device */
|
|
||||||
typedef rt_base_t rt_off_t; /**< Type for offset */
|
|
||||||
|
|
||||||
#ifdef __cplusplus
|
|
||||||
typedef rt_base_t rt_atomic_t;
|
|
||||||
#else
|
|
||||||
#if defined(RT_USING_HW_ATOMIC)
|
|
||||||
typedef rt_base_t rt_atomic_t;
|
|
||||||
#elif defined(RT_USING_STDC_ATOMIC)
|
|
||||||
#include <stdatomic.h>
|
|
||||||
typedef atomic_size_t rt_atomic_t;
|
|
||||||
#else
|
|
||||||
typedef rt_base_t rt_atomic_t;
|
|
||||||
#endif /* RT_USING_STDC_ATOMIC */
|
|
||||||
#endif /* __cplusplus */
|
|
||||||
|
|
||||||
/* boolean type definitions */
|
|
||||||
#define RT_TRUE 1 /**< boolean true */
|
|
||||||
#define RT_FALSE 0 /**< boolean fails */
|
|
||||||
|
|
||||||
/* null pointer definition */
|
|
||||||
#define RT_NULL 0
|
|
||||||
|
|
||||||
/**@}*/
|
/**@}*/
|
||||||
|
|
||||||
|
@ -194,90 +132,7 @@ typedef rt_base_t rt_off_t; /**< Type for offset */
|
||||||
#define RT_STATIC_ASSERT(name, expn) typedef char _static_assert_##name[(expn)?1:-1]
|
#define RT_STATIC_ASSERT(name, expn) typedef char _static_assert_##name[(expn)?1:-1]
|
||||||
|
|
||||||
/* Compiler Related Definitions */
|
/* Compiler Related Definitions */
|
||||||
#if defined(__ARMCC_VERSION) /* ARM Compiler */
|
#include "rtcompiler.h"
|
||||||
#define rt_section(x) __attribute__((section(x)))
|
|
||||||
#define rt_used __attribute__((used))
|
|
||||||
#define rt_align(n) __attribute__((aligned(n)))
|
|
||||||
#define rt_weak __attribute__((weak))
|
|
||||||
#define rt_typeof typeof
|
|
||||||
#define rt_noreturn
|
|
||||||
#define rt_inline static __inline
|
|
||||||
#define rt_always_inline rt_inline
|
|
||||||
#elif defined (__IAR_SYSTEMS_ICC__) /* for IAR Compiler */
|
|
||||||
#define rt_section(x) @ x
|
|
||||||
#define rt_used __root
|
|
||||||
#define PRAGMA(x) _Pragma(#x)
|
|
||||||
#define rt_align(n) PRAGMA(data_alignment=n)
|
|
||||||
#define rt_weak __weak
|
|
||||||
#define rt_typeof typeof
|
|
||||||
#define rt_noreturn
|
|
||||||
#define rt_inline static inline
|
|
||||||
#define rt_always_inline rt_inline
|
|
||||||
#elif defined (__GNUC__) /* GNU GCC Compiler */
|
|
||||||
#define __RT_STRINGIFY(x...) #x
|
|
||||||
#define RT_STRINGIFY(x...) __RT_STRINGIFY(x)
|
|
||||||
#define rt_section(x) __attribute__((section(x)))
|
|
||||||
#define rt_used __attribute__((used))
|
|
||||||
#define rt_align(n) __attribute__((aligned(n)))
|
|
||||||
#define rt_weak __attribute__((weak))
|
|
||||||
#define rt_typeof __typeof__
|
|
||||||
#define rt_noreturn __attribute__ ((noreturn))
|
|
||||||
#define rt_inline static __inline
|
|
||||||
#define rt_always_inline static inline __attribute__((always_inline))
|
|
||||||
#elif defined (__ADSPBLACKFIN__) /* for VisualDSP++ Compiler */
|
|
||||||
#define rt_section(x) __attribute__((section(x)))
|
|
||||||
#define rt_used __attribute__((used))
|
|
||||||
#define rt_align(n) __attribute__((aligned(n)))
|
|
||||||
#define rt_weak __attribute__((weak))
|
|
||||||
#define rt_typeof typeof
|
|
||||||
#define rt_noreturn
|
|
||||||
#define rt_inline static inline
|
|
||||||
#define rt_always_inline rt_inline
|
|
||||||
#elif defined (_MSC_VER) /* for Visual Studio Compiler */
|
|
||||||
#define rt_section(x)
|
|
||||||
#define rt_used
|
|
||||||
#define rt_align(n) __declspec(align(n))
|
|
||||||
#define rt_weak
|
|
||||||
#define rt_typeof typeof
|
|
||||||
#define rt_noreturn
|
|
||||||
#define rt_inline static __inline
|
|
||||||
#define rt_always_inline rt_inline
|
|
||||||
#elif defined (__TI_COMPILER_VERSION__) /* for TI CCS Compiler */
|
|
||||||
/**
|
|
||||||
* The way that TI compiler set section is different from other(at least
|
|
||||||
* GCC and MDK) compilers. See ARM Optimizing C/C++ Compiler 5.9.3 for more
|
|
||||||
* details.
|
|
||||||
*/
|
|
||||||
#define rt_section(x) __attribute__((section(x)))
|
|
||||||
#ifdef __TI_EABI__
|
|
||||||
#define rt_used __attribute__((retain)) __attribute__((used))
|
|
||||||
#else
|
|
||||||
#define rt_used __attribute__((used))
|
|
||||||
#endif
|
|
||||||
#define PRAGMA(x) _Pragma(#x)
|
|
||||||
#define rt_align(n) __attribute__((aligned(n)))
|
|
||||||
#ifdef __TI_EABI__
|
|
||||||
#define rt_weak __attribute__((weak))
|
|
||||||
#else
|
|
||||||
#define rt_weak
|
|
||||||
#endif
|
|
||||||
#define rt_typeof typeof
|
|
||||||
#define rt_noreturn
|
|
||||||
#define rt_inline static inline
|
|
||||||
#define rt_always_inline rt_inline
|
|
||||||
#elif defined (__TASKING__) /* for TASKING Compiler */
|
|
||||||
#define rt_section(x) __attribute__((section(x)))
|
|
||||||
#define rt_used __attribute__((used, protect))
|
|
||||||
#define PRAGMA(x) _Pragma(#x)
|
|
||||||
#define rt_align(n) __attribute__((__align(n)))
|
|
||||||
#define rt_weak __attribute__((weak))
|
|
||||||
#define rt_typeof typeof
|
|
||||||
#define rt_noreturn
|
|
||||||
#define rt_inline static inline
|
|
||||||
#define rt_always_inline rt_inline
|
|
||||||
#else /* Unkown Compiler */
|
|
||||||
#error not supported tool chain
|
|
||||||
#endif /* __ARMCC_VERSION */
|
|
||||||
|
|
||||||
/* initialization export */
|
/* initialization export */
|
||||||
#ifdef RT_USING_COMPONENTS_INIT
|
#ifdef RT_USING_COMPONENTS_INIT
|
||||||
|
@ -417,6 +272,8 @@ typedef int (*init_fn_t)(void);
|
||||||
#define RT_EPERM EPERM /**< Operation not permitted */
|
#define RT_EPERM EPERM /**< Operation not permitted */
|
||||||
#define RT_EFAULT EFAULT /**< Bad address */
|
#define RT_EFAULT EFAULT /**< Bad address */
|
||||||
#define RT_ENOBUFS ENOBUFS /**< No buffer space is available */
|
#define RT_ENOBUFS ENOBUFS /**< No buffer space is available */
|
||||||
|
#define RT_ESCHEDISR 253 /**< scheduler failure in isr context */
|
||||||
|
#define RT_ESCHEDLOCKED 252 /**< scheduler failure in critical region */
|
||||||
#define RT_ETRAP 254 /**< Trap event */
|
#define RT_ETRAP 254 /**< Trap event */
|
||||||
#else
|
#else
|
||||||
#define RT_EOK 0 /**< There is no error */
|
#define RT_EOK 0 /**< There is no error */
|
||||||
|
@ -436,6 +293,8 @@ typedef int (*init_fn_t)(void);
|
||||||
#define RT_ETRAP 14 /**< Trap event */
|
#define RT_ETRAP 14 /**< Trap event */
|
||||||
#define RT_EFAULT 15 /**< Bad address */
|
#define RT_EFAULT 15 /**< Bad address */
|
||||||
#define RT_ENOBUFS 16 /**< No buffer space is available */
|
#define RT_ENOBUFS 16 /**< No buffer space is available */
|
||||||
|
#define RT_ESCHEDISR 17 /**< scheduler failure in isr context */
|
||||||
|
#define RT_ESCHEDLOCKED 18 /**< scheduler failure in critical region */
|
||||||
#endif /* defined(RT_USING_LIBC) && !defined(RT_USING_NANO) */
|
#endif /* defined(RT_USING_LIBC) && !defined(RT_USING_NANO) */
|
||||||
|
|
||||||
/**@}*/
|
/**@}*/
|
||||||
|
@ -469,53 +328,6 @@ typedef int (*init_fn_t)(void);
|
||||||
*/
|
*/
|
||||||
#define RT_ALIGN_DOWN(size, align) ((size) & ~((align) - 1))
|
#define RT_ALIGN_DOWN(size, align) ((size) & ~((align) - 1))
|
||||||
|
|
||||||
/**
|
|
||||||
* Double List structure
|
|
||||||
*/
|
|
||||||
struct rt_list_node
|
|
||||||
{
|
|
||||||
struct rt_list_node *next; /**< point to next node. */
|
|
||||||
struct rt_list_node *prev; /**< point to prev node. */
|
|
||||||
};
|
|
||||||
typedef struct rt_list_node rt_list_t; /**< Type for lists. */
|
|
||||||
|
|
||||||
/**
|
|
||||||
* Single List structure
|
|
||||||
*/
|
|
||||||
struct rt_slist_node
|
|
||||||
{
|
|
||||||
struct rt_slist_node *next; /**< point to next node. */
|
|
||||||
};
|
|
||||||
typedef struct rt_slist_node rt_slist_t; /**< Type for single list. */
|
|
||||||
|
|
||||||
#ifdef RT_USING_SMP
|
|
||||||
#include <cpuport.h> /* for spinlock from arch */
|
|
||||||
|
|
||||||
struct rt_spinlock
|
|
||||||
{
|
|
||||||
rt_hw_spinlock_t lock;
|
|
||||||
#if defined(RT_DEBUGING_SPINLOCK)
|
|
||||||
void *owner;
|
|
||||||
void *pc;
|
|
||||||
#endif /* RT_DEBUGING_SPINLOCK */
|
|
||||||
};
|
|
||||||
typedef struct rt_spinlock rt_spinlock_t;
|
|
||||||
|
|
||||||
#ifndef RT_SPINLOCK_INIT
|
|
||||||
#define RT_SPINLOCK_INIT {{0}} // default
|
|
||||||
#endif /* RT_SPINLOCK_INIT */
|
|
||||||
|
|
||||||
#else
|
|
||||||
typedef rt_ubase_t rt_spinlock_t;
|
|
||||||
struct rt_spinlock
|
|
||||||
{
|
|
||||||
rt_spinlock_t lock;
|
|
||||||
};
|
|
||||||
#define RT_SPINLOCK_INIT {0}
|
|
||||||
#endif /* RT_USING_SMP */
|
|
||||||
|
|
||||||
#define RT_DEFINE_SPINLOCK(x) struct rt_spinlock x = RT_SPINLOCK_INIT
|
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* @addtogroup KernelObject
|
* @addtogroup KernelObject
|
||||||
*/
|
*/
|
||||||
|
@ -770,6 +582,8 @@ struct rt_object_information
|
||||||
|
|
||||||
#define RT_TIMER_FLAG_HARD_TIMER 0x0 /**< hard timer,the timer's callback function will be called in tick isr. */
|
#define RT_TIMER_FLAG_HARD_TIMER 0x0 /**< hard timer,the timer's callback function will be called in tick isr. */
|
||||||
#define RT_TIMER_FLAG_SOFT_TIMER 0x4 /**< soft timer,the timer's callback function will be called in timer thread. */
|
#define RT_TIMER_FLAG_SOFT_TIMER 0x4 /**< soft timer,the timer's callback function will be called in timer thread. */
|
||||||
|
#define RT_TIMER_FLAG_THREAD_TIMER \
|
||||||
|
(0x8 | RT_TIMER_FLAG_HARD_TIMER) /**< thread timer that cooperates with scheduler directly */
|
||||||
|
|
||||||
#define RT_TIMER_CTRL_SET_TIME 0x0 /**< set timer control command */
|
#define RT_TIMER_CTRL_SET_TIME 0x0 /**< set timer control command */
|
||||||
#define RT_TIMER_CTRL_GET_TIME 0x1 /**< get timer control command */
|
#define RT_TIMER_CTRL_GET_TIME 0x1 /**< get timer control command */
|
||||||
|
@ -791,6 +605,11 @@ struct rt_object_information
|
||||||
#define RT_TIMER_SKIP_LIST_MASK 0x3 /**< Timer skips the list mask */
|
#define RT_TIMER_SKIP_LIST_MASK 0x3 /**< Timer skips the list mask */
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
|
/**
|
||||||
|
* timeout handler of rt_timer
|
||||||
|
*/
|
||||||
|
typedef void (*rt_timer_func_t)(void *parameter);
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* timer structure
|
* timer structure
|
||||||
*/
|
*/
|
||||||
|
@ -800,7 +619,7 @@ struct rt_timer
|
||||||
|
|
||||||
rt_list_t row[RT_TIMER_SKIP_LIST_LEVEL];
|
rt_list_t row[RT_TIMER_SKIP_LIST_LEVEL];
|
||||||
|
|
||||||
void (*timeout_func)(void *parameter); /**< timeout function */
|
rt_timer_func_t timeout_func; /**< timeout function */
|
||||||
void *parameter; /**< timeout function's parameter */
|
void *parameter; /**< timeout function's parameter */
|
||||||
|
|
||||||
rt_tick_t init_tick; /**< timer timeout tick */
|
rt_tick_t init_tick; /**< timer timeout tick */
|
||||||
|
@ -901,16 +720,25 @@ struct rt_cpu_usage_stats
|
||||||
};
|
};
|
||||||
typedef struct rt_cpu_usage_stats *rt_cpu_usage_stats_t;
|
typedef struct rt_cpu_usage_stats *rt_cpu_usage_stats_t;
|
||||||
|
|
||||||
|
#define _SCHEDULER_CONTEXT(fileds) fileds
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* CPUs definitions
|
* CPUs definitions
|
||||||
*
|
*
|
||||||
*/
|
*/
|
||||||
struct rt_cpu
|
struct rt_cpu
|
||||||
{
|
{
|
||||||
|
/**
|
||||||
|
* protected by:
|
||||||
|
* - other cores: accessing from other coress is undefined behaviour
|
||||||
|
* - local core: rt_enter_critical()/rt_exit_critical()
|
||||||
|
*/
|
||||||
|
_SCHEDULER_CONTEXT(
|
||||||
struct rt_thread *current_thread;
|
struct rt_thread *current_thread;
|
||||||
struct rt_thread *idle_thread;
|
|
||||||
rt_atomic_t irq_nest;
|
rt_uint8_t irq_switch_flag:1;
|
||||||
rt_uint8_t irq_switch_flag;
|
rt_uint8_t critical_switch_flag:1;
|
||||||
|
rt_uint8_t sched_lock_flag:1;
|
||||||
|
|
||||||
rt_uint8_t current_priority;
|
rt_uint8_t current_priority;
|
||||||
rt_list_t priority_table[RT_THREAD_PRIORITY_MAX];
|
rt_list_t priority_table[RT_THREAD_PRIORITY_MAX];
|
||||||
|
@ -921,10 +749,14 @@ struct rt_cpu
|
||||||
rt_uint32_t priority_group;
|
rt_uint32_t priority_group;
|
||||||
#endif /* RT_THREAD_PRIORITY_MAX > 32 */
|
#endif /* RT_THREAD_PRIORITY_MAX > 32 */
|
||||||
|
|
||||||
rt_atomic_t tick;
|
rt_atomic_t tick; /**< Passing tickes on this core */
|
||||||
|
);
|
||||||
|
|
||||||
|
struct rt_thread *idle_thread;
|
||||||
|
rt_atomic_t irq_nest;
|
||||||
|
|
||||||
struct rt_spinlock spinlock;
|
|
||||||
#ifdef RT_USING_SMART
|
#ifdef RT_USING_SMART
|
||||||
|
struct rt_spinlock spinlock;
|
||||||
struct rt_cpu_usage_stats cpu_stat;
|
struct rt_cpu_usage_stats cpu_stat;
|
||||||
#endif
|
#endif
|
||||||
};
|
};
|
||||||
|
@ -1013,11 +845,12 @@ typedef void (*rt_thread_cleanup_t)(struct rt_thread *tid);
|
||||||
/**
|
/**
|
||||||
* Thread structure
|
* Thread structure
|
||||||
*/
|
*/
|
||||||
|
|
||||||
|
#include "rtsched.h" /* for struct rt_sched_thread_ctx */
|
||||||
|
|
||||||
struct rt_thread
|
struct rt_thread
|
||||||
{
|
{
|
||||||
struct rt_object parent;
|
struct rt_object parent;
|
||||||
rt_list_t tlist; /**< the thread list */
|
|
||||||
rt_list_t tlist_schedule; /**< the thread list */
|
|
||||||
|
|
||||||
/* stack point and entry */
|
/* stack point and entry */
|
||||||
void *sp; /**< stack point */
|
void *sp; /**< stack point */
|
||||||
|
@ -1029,24 +862,13 @@ struct rt_thread
|
||||||
/* error code */
|
/* error code */
|
||||||
rt_err_t error; /**< error code */
|
rt_err_t error; /**< error code */
|
||||||
|
|
||||||
rt_uint8_t stat; /**< thread status */
|
|
||||||
|
|
||||||
#ifdef RT_USING_SMP
|
#ifdef RT_USING_SMP
|
||||||
rt_uint8_t bind_cpu; /**< thread is bind to cpu */
|
|
||||||
rt_uint8_t oncpu; /**< process on cpu */
|
|
||||||
|
|
||||||
rt_atomic_t cpus_lock_nest; /**< cpus lock count */
|
rt_atomic_t cpus_lock_nest; /**< cpus lock count */
|
||||||
rt_atomic_t critical_lock_nest; /**< critical lock count */
|
#endif
|
||||||
#endif /*RT_USING_SMP*/
|
|
||||||
|
|
||||||
/* priority */
|
RT_SCHED_THREAD_CTX;
|
||||||
rt_uint8_t current_priority; /**< current priority */
|
struct rt_timer thread_timer; /**< built-in thread timer */
|
||||||
rt_uint8_t init_priority; /**< initialized priority */
|
rt_thread_cleanup_t cleanup; /**< cleanup function when thread exit */
|
||||||
#if RT_THREAD_PRIORITY_MAX > 32
|
|
||||||
rt_uint8_t number;
|
|
||||||
rt_uint8_t high_mask;
|
|
||||||
#endif /* RT_THREAD_PRIORITY_MAX > 32 */
|
|
||||||
rt_uint32_t number_mask; /**< priority number mask */
|
|
||||||
|
|
||||||
#ifdef RT_USING_MUTEX
|
#ifdef RT_USING_MUTEX
|
||||||
/* object for IPC */
|
/* object for IPC */
|
||||||
|
@ -1071,9 +893,6 @@ struct rt_thread
|
||||||
void *si_list; /**< the signal infor list */
|
void *si_list; /**< the signal infor list */
|
||||||
#endif /* RT_USING_SIGNALS */
|
#endif /* RT_USING_SIGNALS */
|
||||||
|
|
||||||
rt_atomic_t init_tick; /**< thread's initialized tick */
|
|
||||||
rt_atomic_t remaining_tick; /**< remaining tick */
|
|
||||||
|
|
||||||
#ifdef RT_USING_CPU_USAGE
|
#ifdef RT_USING_CPU_USAGE
|
||||||
rt_uint64_t duration_tick; /**< cpu usage tick */
|
rt_uint64_t duration_tick; /**< cpu usage tick */
|
||||||
#endif /* RT_USING_CPU_USAGE */
|
#endif /* RT_USING_CPU_USAGE */
|
||||||
|
@ -1082,10 +901,6 @@ struct rt_thread
|
||||||
void *pthread_data; /**< the handle of pthread data, adapt 32/64bit */
|
void *pthread_data; /**< the handle of pthread data, adapt 32/64bit */
|
||||||
#endif /* RT_USING_PTHREADS */
|
#endif /* RT_USING_PTHREADS */
|
||||||
|
|
||||||
struct rt_timer thread_timer; /**< built-in thread timer */
|
|
||||||
|
|
||||||
rt_thread_cleanup_t cleanup; /**< cleanup function when thread exit */
|
|
||||||
|
|
||||||
/* light weight process if present */
|
/* light weight process if present */
|
||||||
#ifdef RT_USING_SMART
|
#ifdef RT_USING_SMART
|
||||||
void *msg_ret; /**< the return msg */
|
void *msg_ret; /**< the return msg */
|
||||||
|
@ -1100,11 +915,12 @@ struct rt_thread
|
||||||
|
|
||||||
struct lwp_thread_signal signal; /**< lwp signal for user-space thread */
|
struct lwp_thread_signal signal; /**< lwp signal for user-space thread */
|
||||||
struct rt_user_context user_ctx; /**< user space context */
|
struct rt_user_context user_ctx; /**< user space context */
|
||||||
struct rt_wakeup wakeup; /**< wakeup data */
|
struct rt_wakeup wakeup_handle; /**< wakeup handle for IPC */
|
||||||
int exit_request; /**< pending exit request of thread */
|
rt_atomic_t exit_request; /**< pending exit request of thread */
|
||||||
int tid; /**< thread ID used by process */
|
int tid; /**< thread ID used by process */
|
||||||
int tid_ref_count; /**< reference of tid */
|
int tid_ref_count; /**< reference of tid */
|
||||||
void *susp_recycler; /**< suspended recycler on this thread */
|
void *susp_recycler; /**< suspended recycler on this thread */
|
||||||
|
void *robust_list; /**< pi lock, very carefully, it's a userspace list!*/
|
||||||
|
|
||||||
rt_uint64_t user_time;
|
rt_uint64_t user_time;
|
||||||
rt_uint64_t system_time;
|
rt_uint64_t system_time;
|
||||||
|
|
|
@ -0,0 +1,172 @@
|
||||||
|
/*
|
||||||
|
* Copyright (c) 2023-2024, RT-Thread Development Team
|
||||||
|
*
|
||||||
|
* SPDX-License-Identifier: Apache-2.0
|
||||||
|
*
|
||||||
|
* Change Logs:
|
||||||
|
* Date Author Notes
|
||||||
|
* 2024-01-19 Shell Seperate schduling statements from rt_thread_t
|
||||||
|
* to rt_sched_thread_ctx. Add definitions of scheduler.
|
||||||
|
*/
|
||||||
|
#ifndef __RT_SCHED_H__
|
||||||
|
#define __RT_SCHED_H__
|
||||||
|
|
||||||
|
#include "rttypes.h"
|
||||||
|
#include "rtcompiler.h"
|
||||||
|
|
||||||
|
struct rt_thread;
|
||||||
|
|
||||||
|
typedef rt_uint8_t rt_sched_thread_status_t;
|
||||||
|
|
||||||
|
#ifdef RT_USING_SCHED_THREAD_CTX
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Scheduler private status binding on thread. Caller should never accessing
|
||||||
|
* these members.
|
||||||
|
*/
|
||||||
|
struct rt_sched_thread_priv
|
||||||
|
{
|
||||||
|
rt_tick_t init_tick; /**< thread's initialized tick */
|
||||||
|
rt_tick_t remaining_tick; /**< remaining tick */
|
||||||
|
|
||||||
|
/* priority */
|
||||||
|
rt_uint8_t current_priority; /**< current priority */
|
||||||
|
rt_uint8_t init_priority; /**< initialized priority */
|
||||||
|
#if RT_THREAD_PRIORITY_MAX > 32
|
||||||
|
rt_uint8_t number; /**< priority low number */
|
||||||
|
rt_uint8_t high_mask; /**< priority high mask */
|
||||||
|
#endif /* RT_THREAD_PRIORITY_MAX > 32 */
|
||||||
|
rt_uint32_t number_mask; /**< priority number mask */
|
||||||
|
|
||||||
|
};
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Scheduler public status binding on thread. Caller must hold the scheduler
|
||||||
|
* lock before access any one of its member.
|
||||||
|
*/
|
||||||
|
struct rt_sched_thread_ctx
|
||||||
|
{
|
||||||
|
rt_list_t thread_list_node; /**< node in thread list */
|
||||||
|
|
||||||
|
rt_uint8_t stat; /**< thread status */
|
||||||
|
rt_uint8_t sched_flag_locked:1; /**< calling thread have the scheduler locked */
|
||||||
|
rt_uint8_t sched_flag_ttmr_set:1; /**< thread timer is start */
|
||||||
|
|
||||||
|
#ifdef RT_USING_SMP
|
||||||
|
rt_uint8_t bind_cpu; /**< thread is bind to cpu */
|
||||||
|
rt_uint8_t oncpu; /**< process on cpu */
|
||||||
|
|
||||||
|
rt_base_t critical_lock_nest; /**< critical lock count */
|
||||||
|
#endif
|
||||||
|
|
||||||
|
struct rt_sched_thread_priv sched_thread_priv; /**< private context of scheduler */
|
||||||
|
};
|
||||||
|
|
||||||
|
#define RT_SCHED_THREAD_CTX struct rt_sched_thread_ctx sched_thread_ctx
|
||||||
|
|
||||||
|
#define RT_SCHED_PRIV(thread) ((thread)->sched_thread_ctx.sched_thread_priv)
|
||||||
|
#define RT_SCHED_CTX(thread) ((thread)->sched_thread_ctx)
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Convert a list node in container RT_SCHED_CTX(thread)->thread_list_node
|
||||||
|
* to a thread pointer.
|
||||||
|
*/
|
||||||
|
#define RT_THREAD_LIST_NODE_ENTRY(node) \
|
||||||
|
rt_container_of( \
|
||||||
|
rt_list_entry((node), struct rt_sched_thread_ctx, thread_list_node), \
|
||||||
|
struct rt_thread, sched_thread_ctx)
|
||||||
|
#define RT_THREAD_LIST_NODE(thread) (RT_SCHED_CTX(thread).thread_list_node)
|
||||||
|
|
||||||
|
#else /* !defined(RT_USING_SCHED_THREAD_CTX) */
|
||||||
|
|
||||||
|
#if RT_THREAD_PRIORITY_MAX > 32
|
||||||
|
#define _RT_SCHED_THREAD_CTX_PRIO_EXT \
|
||||||
|
rt_uint8_t number; /**< priority low number */ \
|
||||||
|
rt_uint8_t high_mask; /**< priority high mask */
|
||||||
|
|
||||||
|
#else /* ! RT_THREAD_PRIORITY_MAX > 32 */
|
||||||
|
|
||||||
|
#define _RT_SCHED_THREAD_CTX_PRIO_EXT
|
||||||
|
#endif /* RT_THREAD_PRIORITY_MAX > 32 */
|
||||||
|
|
||||||
|
#define RT_SCHED_THREAD_CTX \
|
||||||
|
rt_list_t tlist; /**< node in thread list */ \
|
||||||
|
rt_uint8_t stat; /**< thread status */ \
|
||||||
|
rt_uint8_t sched_flag_locked:1; \
|
||||||
|
/**< calling thread have the scheduler locked */ \
|
||||||
|
rt_uint8_t sched_flag_ttmr_set:1; /**< thread timer is start */ \
|
||||||
|
rt_tick_t init_tick; /**< thread's initialized tick */ \
|
||||||
|
rt_tick_t remaining_tick; /**< remaining tick */ \
|
||||||
|
rt_uint8_t current_priority; /**< current priority */ \
|
||||||
|
rt_uint8_t init_priority; /**< initialized priority */ \
|
||||||
|
_RT_SCHED_THREAD_CTX_PRIO_EXT; \
|
||||||
|
rt_uint32_t number_mask; /**< priority number mask */
|
||||||
|
|
||||||
|
#define RT_SCHED_PRIV(thread) (*thread)
|
||||||
|
#define RT_SCHED_CTX(thread) (*thread)
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Convert a list node in container RT_SCHED_CTX(thread)->thread_list_node
|
||||||
|
* to a thread pointer.
|
||||||
|
*/
|
||||||
|
#define RT_THREAD_LIST_NODE_ENTRY(node) rt_list_entry((node), struct rt_thread, tlist)
|
||||||
|
#define RT_THREAD_LIST_NODE(thread) (RT_SCHED_CTX(thread).tlist)
|
||||||
|
|
||||||
|
#endif /* RT_USING_SCHED_THREAD_CTX */
|
||||||
|
|
||||||
|
/**
|
||||||
|
* System Scheduler Locking
|
||||||
|
*/
|
||||||
|
|
||||||
|
typedef rt_ubase_t rt_sched_lock_level_t;
|
||||||
|
|
||||||
|
rt_err_t rt_sched_lock(rt_sched_lock_level_t *plvl);
|
||||||
|
rt_err_t rt_sched_unlock(rt_sched_lock_level_t level);
|
||||||
|
rt_err_t rt_sched_unlock_n_resched(rt_sched_lock_level_t level);
|
||||||
|
|
||||||
|
rt_bool_t rt_sched_is_locked(void);
|
||||||
|
|
||||||
|
#ifdef RT_USING_SMP
|
||||||
|
#define RT_SCHED_DEBUG_IS_LOCKED do { RT_ASSERT(rt_sched_is_locked()); } while (0)
|
||||||
|
#define RT_SCHED_DEBUG_IS_UNLOCKED do { RT_ASSERT(!rt_sched_is_locked()); } while (0)
|
||||||
|
|
||||||
|
#else /* !RT_USING_SMP */
|
||||||
|
|
||||||
|
#define RT_SCHED_DEBUG_IS_LOCKED
|
||||||
|
#define RT_SCHED_DEBUG_IS_UNLOCKED
|
||||||
|
#endif /* RT_USING_SMP */
|
||||||
|
|
||||||
|
/**
|
||||||
|
* NOTE: user should NEVER use these APIs directly. See rt_thread_.* or IPC
|
||||||
|
* methods instead.
|
||||||
|
*/
|
||||||
|
#if defined(__RT_KERNEL_SOURCE__) || defined(__RT_IPC_SOURCE__)
|
||||||
|
|
||||||
|
/* thread initialization and startup routine */
|
||||||
|
void rt_sched_thread_init_ctx(struct rt_thread *thread, rt_uint32_t tick, rt_uint8_t priority);
|
||||||
|
void rt_sched_thread_init_priv(struct rt_thread *thread, rt_uint32_t tick, rt_uint8_t priority);
|
||||||
|
void rt_sched_thread_startup(struct rt_thread *thread);
|
||||||
|
|
||||||
|
/* scheduler related routine */
|
||||||
|
void rt_sched_post_ctx_switch(struct rt_thread *thread);
|
||||||
|
rt_err_t rt_sched_tick_increase(void);
|
||||||
|
|
||||||
|
/* thread status operation */
|
||||||
|
rt_uint8_t rt_sched_thread_get_stat(struct rt_thread *thread);
|
||||||
|
rt_uint8_t rt_sched_thread_get_curr_prio(struct rt_thread *thread);
|
||||||
|
rt_uint8_t rt_sched_thread_get_init_prio(struct rt_thread *thread);
|
||||||
|
rt_err_t rt_sched_thread_yield(struct rt_thread *thread);
|
||||||
|
rt_err_t rt_sched_thread_close(struct rt_thread *thread);
|
||||||
|
rt_err_t rt_sched_thread_ready(struct rt_thread *thread);
|
||||||
|
rt_err_t rt_sched_thread_suspend(struct rt_thread *thread, rt_sched_lock_level_t level);
|
||||||
|
rt_err_t rt_sched_thread_change_priority(struct rt_thread *thread, rt_uint8_t priority);
|
||||||
|
rt_err_t rt_sched_thread_bind_cpu(struct rt_thread *thread, int cpu);
|
||||||
|
rt_uint8_t rt_sched_thread_is_suspended(struct rt_thread *thread);
|
||||||
|
rt_err_t rt_sched_thread_timer_stop(struct rt_thread *thread);
|
||||||
|
rt_err_t rt_sched_thread_timer_start(struct rt_thread *thread);
|
||||||
|
void rt_sched_insert_thread(struct rt_thread *thread);
|
||||||
|
void rt_sched_remove_thread(struct rt_thread *thread);
|
||||||
|
|
||||||
|
#endif /* defined(__RT_KERNEL_SOURCE__) || defined(__RT_IPC_SOURCE__) */
|
||||||
|
|
||||||
|
#endif /* __RT_SCHED_H__ */
|
|
@ -21,6 +21,7 @@
|
||||||
* 2023-06-30 ChuShicheng move debug check from the rtdebug.h
|
* 2023-06-30 ChuShicheng move debug check from the rtdebug.h
|
||||||
* 2023-10-16 Shell Support a new backtrace framework
|
* 2023-10-16 Shell Support a new backtrace framework
|
||||||
* 2023-12-10 xqyjlj fix spinlock in up
|
* 2023-12-10 xqyjlj fix spinlock in up
|
||||||
|
* 2024-01-25 Shell Add rt_susp_list for IPC primitives
|
||||||
*/
|
*/
|
||||||
|
|
||||||
#ifndef __RT_THREAD_H__
|
#ifndef __RT_THREAD_H__
|
||||||
|
@ -173,7 +174,6 @@ rt_err_t rt_thread_resume(rt_thread_t thread);
|
||||||
rt_err_t rt_thread_wakeup(rt_thread_t thread);
|
rt_err_t rt_thread_wakeup(rt_thread_t thread);
|
||||||
void rt_thread_wakeup_set(struct rt_thread *thread, rt_wakeup_func_t func, void* user_data);
|
void rt_thread_wakeup_set(struct rt_thread *thread, rt_wakeup_func_t func, void* user_data);
|
||||||
#endif /* RT_USING_SMART */
|
#endif /* RT_USING_SMART */
|
||||||
void rt_thread_timeout(void *parameter);
|
|
||||||
rt_err_t rt_thread_get_name(rt_thread_t thread, char *name, rt_uint8_t name_size);
|
rt_err_t rt_thread_get_name(rt_thread_t thread, char *name, rt_uint8_t name_size);
|
||||||
#ifdef RT_USING_SIGNALS
|
#ifdef RT_USING_SIGNALS
|
||||||
void rt_thread_alloc_sig(rt_thread_t tid);
|
void rt_thread_alloc_sig(rt_thread_t tid);
|
||||||
|
@ -212,11 +212,10 @@ void rt_system_scheduler_start(void);
|
||||||
|
|
||||||
void rt_schedule(void);
|
void rt_schedule(void);
|
||||||
void rt_scheduler_do_irq_switch(void *context);
|
void rt_scheduler_do_irq_switch(void *context);
|
||||||
void rt_schedule_insert_thread(struct rt_thread *thread);
|
|
||||||
void rt_schedule_remove_thread(struct rt_thread *thread);
|
|
||||||
|
|
||||||
void rt_enter_critical(void);
|
rt_base_t rt_enter_critical(void);
|
||||||
void rt_exit_critical(void);
|
void rt_exit_critical(void);
|
||||||
|
void rt_exit_critical_safe(rt_base_t critical_level);
|
||||||
rt_uint16_t rt_critical_level(void);
|
rt_uint16_t rt_critical_level(void);
|
||||||
|
|
||||||
#ifdef RT_USING_HOOK
|
#ifdef RT_USING_HOOK
|
||||||
|
@ -368,6 +367,26 @@ void rt_slab_free(rt_slab_t m, void *ptr);
|
||||||
* @{
|
* @{
|
||||||
*/
|
*/
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Suspend list - A basic building block for IPC primitives which interacts with
|
||||||
|
* scheduler directly. Its API is similar to a FIFO list.
|
||||||
|
*
|
||||||
|
* Note: don't use in application codes directly
|
||||||
|
*/
|
||||||
|
void rt_susp_list_print(rt_list_t *list);
|
||||||
|
/* reserve thread error while resuming it */
|
||||||
|
#define RT_THREAD_RESUME_RES_THR_ERR (-1)
|
||||||
|
struct rt_thread *rt_susp_list_dequeue(rt_list_t *susp_list, rt_err_t thread_error);
|
||||||
|
rt_err_t rt_susp_list_resume_all(rt_list_t *susp_list, rt_err_t thread_error);
|
||||||
|
rt_err_t rt_susp_list_resume_all_irq(rt_list_t *susp_list,
|
||||||
|
rt_err_t thread_error,
|
||||||
|
struct rt_spinlock *lock);
|
||||||
|
|
||||||
|
/* suspend and enqueue */
|
||||||
|
rt_err_t rt_thread_suspend_to_list(rt_thread_t thread, rt_list_t *susp_list, int ipc_flags, int suspend_flag);
|
||||||
|
/* only for a suspended thread, and caller must hold the scheduler lock */
|
||||||
|
rt_err_t rt_susp_list_enqueue(rt_list_t *susp_list, rt_thread_t thread, int ipc_flags);
|
||||||
|
|
||||||
#ifdef RT_USING_SEMAPHORE
|
#ifdef RT_USING_SEMAPHORE
|
||||||
/*
|
/*
|
||||||
* semaphore interface
|
* semaphore interface
|
||||||
|
@ -725,11 +744,11 @@ int rt_snprintf(char *buf, rt_size_t size, const char *format, ...);
|
||||||
#if defined(RT_USING_DEVICE) && defined(RT_USING_CONSOLE)
|
#if defined(RT_USING_DEVICE) && defined(RT_USING_CONSOLE)
|
||||||
rt_device_t rt_console_set_device(const char *name);
|
rt_device_t rt_console_set_device(const char *name);
|
||||||
rt_device_t rt_console_get_device(void);
|
rt_device_t rt_console_get_device(void);
|
||||||
#ifdef RT_USING_THREDSAFE_PRINTF
|
#ifdef RT_USING_THREADSAFE_PRINTF
|
||||||
rt_thread_t rt_console_current_user(void);
|
rt_thread_t rt_console_current_user(void);
|
||||||
#else
|
#else
|
||||||
rt_inline void *rt_console_current_user(void) { return RT_NULL; }
|
rt_inline void *rt_console_current_user(void) { return RT_NULL; }
|
||||||
#endif /* RT_USING_THREDSAFE_PRINTF */
|
#endif /* RT_USING_THREADSAFE_PRINTF */
|
||||||
#endif /* defined(RT_USING_DEVICE) && defined(RT_USING_CONSOLE) */
|
#endif /* defined(RT_USING_DEVICE) && defined(RT_USING_CONSOLE) */
|
||||||
|
|
||||||
rt_err_t rt_get_errno(void);
|
rt_err_t rt_get_errno(void);
|
||||||
|
|
|
@ -0,0 +1,223 @@
|
||||||
|
/*
|
||||||
|
* Copyright (c) 2006-2024, RT-Thread Development Team
|
||||||
|
*
|
||||||
|
* SPDX-License-Identifier: Apache-2.0
|
||||||
|
*
|
||||||
|
* Change Logs:
|
||||||
|
* Date Author Notes
|
||||||
|
* 2024-01-18 Shell Separate the basic types from rtdef.h
|
||||||
|
*/
|
||||||
|
|
||||||
|
#ifndef __RT_TYPES_H__
|
||||||
|
#define __RT_TYPES_H__
|
||||||
|
|
||||||
|
#include <rtconfig.h>
|
||||||
|
|
||||||
|
#include <stdint.h>
|
||||||
|
#include <stddef.h>
|
||||||
|
#include <stdarg.h>
|
||||||
|
#ifndef RT_USING_NANO
|
||||||
|
#include <sys/types.h>
|
||||||
|
#include <sys/errno.h>
|
||||||
|
#if defined(RT_USING_SIGNALS) || defined(RT_USING_SMART)
|
||||||
|
#include <sys/signal.h>
|
||||||
|
#endif /* defined(RT_USING_SIGNALS) || defined(RT_USING_SMART) */
|
||||||
|
#endif /* RT_USING_NANO */
|
||||||
|
|
||||||
|
/**
|
||||||
|
* RT-Thread basic data types definition
|
||||||
|
*/
|
||||||
|
|
||||||
|
typedef int rt_bool_t; /**< boolean type */
|
||||||
|
typedef signed long rt_base_t; /**< Nbit CPU related date type */
|
||||||
|
typedef unsigned long rt_ubase_t; /**< Nbit unsigned CPU related data type */
|
||||||
|
|
||||||
|
#ifndef RT_USING_ARCH_DATA_TYPE
|
||||||
|
#ifdef RT_USING_LIBC
|
||||||
|
typedef int8_t rt_int8_t; /**< 8bit integer type */
|
||||||
|
typedef int16_t rt_int16_t; /**< 16bit integer type */
|
||||||
|
typedef int32_t rt_int32_t; /**< 32bit integer type */
|
||||||
|
typedef uint8_t rt_uint8_t; /**< 8bit unsigned integer type */
|
||||||
|
typedef uint16_t rt_uint16_t; /**< 16bit unsigned integer type */
|
||||||
|
typedef uint32_t rt_uint32_t; /**< 32bit unsigned integer type */
|
||||||
|
typedef int64_t rt_int64_t; /**< 64bit integer type */
|
||||||
|
typedef uint64_t rt_uint64_t; /**< 64bit unsigned integer type */
|
||||||
|
#else
|
||||||
|
typedef signed char rt_int8_t; /**< 8bit integer type */
|
||||||
|
typedef signed short rt_int16_t; /**< 16bit integer type */
|
||||||
|
typedef signed int rt_int32_t; /**< 32bit integer type */
|
||||||
|
typedef unsigned char rt_uint8_t; /**< 8bit unsigned integer type */
|
||||||
|
typedef unsigned short rt_uint16_t; /**< 16bit unsigned integer type */
|
||||||
|
typedef unsigned int rt_uint32_t; /**< 32bit unsigned integer type */
|
||||||
|
#ifdef ARCH_CPU_64BIT
|
||||||
|
typedef signed long rt_int64_t; /**< 64bit integer type */
|
||||||
|
typedef unsigned long rt_uint64_t; /**< 64bit unsigned integer type */
|
||||||
|
#else
|
||||||
|
typedef signed long long rt_int64_t; /**< 64bit integer type */
|
||||||
|
typedef unsigned long long rt_uint64_t; /**< 64bit unsigned integer type */
|
||||||
|
#endif /* ARCH_CPU_64BIT */
|
||||||
|
#endif /* RT_USING_LIBC */
|
||||||
|
#endif /* RT_USING_ARCH_DATA_TYPE */
|
||||||
|
|
||||||
|
#if defined(RT_USING_LIBC) && !defined(RT_USING_NANO)
|
||||||
|
typedef size_t rt_size_t; /**< Type for size number */
|
||||||
|
typedef ssize_t rt_ssize_t; /**< Used for a count of bytes or an error indication */
|
||||||
|
#else
|
||||||
|
typedef rt_ubase_t rt_size_t; /**< Type for size number */
|
||||||
|
typedef rt_base_t rt_ssize_t; /**< Used for a count of bytes or an error indication */
|
||||||
|
#endif /* defined(RT_USING_LIBC) && !defined(RT_USING_NANO) */
|
||||||
|
|
||||||
|
typedef rt_base_t rt_err_t; /**< Type for error number */
|
||||||
|
typedef rt_uint32_t rt_time_t; /**< Type for time stamp */
|
||||||
|
typedef rt_uint32_t rt_tick_t; /**< Type for tick count */
|
||||||
|
typedef rt_base_t rt_flag_t; /**< Type for flags */
|
||||||
|
typedef rt_ubase_t rt_dev_t; /**< Type for device */
|
||||||
|
typedef rt_base_t rt_off_t; /**< Type for offset */
|
||||||
|
|
||||||
|
#ifdef __cplusplus
|
||||||
|
typedef rt_base_t rt_atomic_t;
|
||||||
|
#else
|
||||||
|
#if defined(RT_USING_HW_ATOMIC)
|
||||||
|
typedef rt_base_t rt_atomic_t;
|
||||||
|
#elif defined(RT_USING_STDC_ATOMIC)
|
||||||
|
#include <stdatomic.h>
|
||||||
|
typedef atomic_size_t rt_atomic_t;
|
||||||
|
#else
|
||||||
|
typedef rt_base_t rt_atomic_t;
|
||||||
|
#endif /* RT_USING_STDC_ATOMIC */
|
||||||
|
#endif /* __cplusplus */
|
||||||
|
|
||||||
|
/* boolean type definitions */
|
||||||
|
#define RT_TRUE 1 /**< boolean true */
|
||||||
|
#define RT_FALSE 0 /**< boolean fails */
|
||||||
|
|
||||||
|
/* null pointer definition */
|
||||||
|
#define RT_NULL 0
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Double List structure
|
||||||
|
*/
|
||||||
|
struct rt_list_node
|
||||||
|
{
|
||||||
|
struct rt_list_node *next; /**< point to next node. */
|
||||||
|
struct rt_list_node *prev; /**< point to prev node. */
|
||||||
|
};
|
||||||
|
typedef struct rt_list_node rt_list_t; /**< Type for lists. */
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Single List structure
|
||||||
|
*/
|
||||||
|
struct rt_slist_node
|
||||||
|
{
|
||||||
|
struct rt_slist_node *next; /**< point to next node. */
|
||||||
|
};
|
||||||
|
typedef struct rt_slist_node rt_slist_t; /**< Type for single list. */
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Spinlock
|
||||||
|
*/
|
||||||
|
#ifdef RT_USING_SMP
|
||||||
|
#include <cpuport.h> /* for spinlock from arch */
|
||||||
|
|
||||||
|
struct rt_spinlock
|
||||||
|
{
|
||||||
|
rt_hw_spinlock_t lock;
|
||||||
|
#ifdef RT_USING_DEBUG
|
||||||
|
rt_uint32_t critical_level;
|
||||||
|
#endif /* RT_USING_DEBUG */
|
||||||
|
#if defined(RT_DEBUGING_SPINLOCK)
|
||||||
|
void *owner;
|
||||||
|
void *pc;
|
||||||
|
#endif /* RT_DEBUGING_SPINLOCK */
|
||||||
|
};
|
||||||
|
|
||||||
|
#ifdef RT_DEBUGING_SPINLOCK
|
||||||
|
|
||||||
|
#define __OWNER_MAGIC ((void *)0xdeadbeaf)
|
||||||
|
|
||||||
|
#if defined(__GNUC__)
|
||||||
|
#define __GET_RETURN_ADDRESS __builtin_return_address(0)
|
||||||
|
#else
|
||||||
|
#define __GET_RETURN_ADDRESS RT_NULL
|
||||||
|
#endif
|
||||||
|
|
||||||
|
#define _SPIN_LOCK_DEBUG_OWNER(lock) \
|
||||||
|
do \
|
||||||
|
{ \
|
||||||
|
struct rt_thread *_curthr = rt_thread_self(); \
|
||||||
|
if (_curthr != RT_NULL) \
|
||||||
|
{ \
|
||||||
|
(lock)->owner = _curthr; \
|
||||||
|
(lock)->pc = __GET_RETURN_ADDRESS; \
|
||||||
|
} \
|
||||||
|
} while (0)
|
||||||
|
|
||||||
|
#define _SPIN_UNLOCK_DEBUG_OWNER(lock) \
|
||||||
|
do \
|
||||||
|
{ \
|
||||||
|
(lock)->owner = __OWNER_MAGIC; \
|
||||||
|
(lock)->pc = RT_NULL; \
|
||||||
|
} while (0)
|
||||||
|
|
||||||
|
#else
|
||||||
|
|
||||||
|
#define _SPIN_LOCK_DEBUG_OWNER(lock)
|
||||||
|
#define _SPIN_UNLOCK_DEBUG_OWNER(lock)
|
||||||
|
#endif
|
||||||
|
|
||||||
|
#ifdef RT_USING_DEBUG
|
||||||
|
|
||||||
|
#define _SPIN_LOCK_DEBUG_CRITICAL(lock) \
|
||||||
|
do \
|
||||||
|
{ \
|
||||||
|
struct rt_thread *_curthr = rt_thread_self(); \
|
||||||
|
if (_curthr != RT_NULL) \
|
||||||
|
{ \
|
||||||
|
(lock)->critical_level = rt_critical_level(); \
|
||||||
|
} \
|
||||||
|
} while (0)
|
||||||
|
|
||||||
|
#define _SPIN_UNLOCK_DEBUG_CRITICAL(lock, critical) \
|
||||||
|
do \
|
||||||
|
{ \
|
||||||
|
(critical) = (lock)->critical_level; \
|
||||||
|
} while (0)
|
||||||
|
|
||||||
|
#else
|
||||||
|
|
||||||
|
#define _SPIN_LOCK_DEBUG_CRITICAL(lock)
|
||||||
|
#define _SPIN_UNLOCK_DEBUG_CRITICAL(lock, critical) (critical = 0)
|
||||||
|
#endif /* RT_USING_DEBUG */
|
||||||
|
|
||||||
|
#define RT_SPIN_LOCK_DEBUG(lock) \
|
||||||
|
do \
|
||||||
|
{ \
|
||||||
|
_SPIN_LOCK_DEBUG_OWNER(lock); \
|
||||||
|
_SPIN_LOCK_DEBUG_CRITICAL(lock); \
|
||||||
|
} while (0)
|
||||||
|
|
||||||
|
#define RT_SPIN_UNLOCK_DEBUG(lock, critical) \
|
||||||
|
do \
|
||||||
|
{ \
|
||||||
|
_SPIN_UNLOCK_DEBUG_OWNER(lock); \
|
||||||
|
_SPIN_UNLOCK_DEBUG_CRITICAL(lock, critical); \
|
||||||
|
} while (0)
|
||||||
|
|
||||||
|
#ifndef RT_SPINLOCK_INIT
|
||||||
|
#define RT_SPINLOCK_INIT {{0}} /* can be overridden by cpuport.h */
|
||||||
|
#endif /* RT_SPINLOCK_INIT */
|
||||||
|
|
||||||
|
#else
|
||||||
|
|
||||||
|
struct rt_spinlock
|
||||||
|
{
|
||||||
|
rt_ubase_t lock;
|
||||||
|
};
|
||||||
|
#define RT_SPINLOCK_INIT {0}
|
||||||
|
#endif /* RT_USING_SMP */
|
||||||
|
|
||||||
|
typedef struct rt_spinlock rt_spinlock_t;
|
||||||
|
|
||||||
|
#define RT_DEFINE_SPINLOCK(x) struct rt_spinlock x = RT_SPINLOCK_INIT
|
||||||
|
|
||||||
|
#endif /* __RT_TYPES_H__ */
|
|
@ -13,6 +13,7 @@
|
||||||
#define CPUPORT_H__
|
#define CPUPORT_H__
|
||||||
|
|
||||||
#include <armv8.h>
|
#include <armv8.h>
|
||||||
|
#include <rtcompiler.h>
|
||||||
#include <rtdef.h>
|
#include <rtdef.h>
|
||||||
|
|
||||||
#ifdef RT_USING_SMP
|
#ifdef RT_USING_SMP
|
||||||
|
|
|
@ -151,7 +151,7 @@ _secondary_cpu_entry:
|
||||||
#else
|
#else
|
||||||
bl rt_hw_cpu_id_set
|
bl rt_hw_cpu_id_set
|
||||||
mrs x0, tpidr_el1
|
mrs x0, tpidr_el1
|
||||||
#endif
|
#endif /* RT_USING_OFW */
|
||||||
|
|
||||||
/* Set current cpu's stack top */
|
/* Set current cpu's stack top */
|
||||||
sub x0, x0, #1
|
sub x0, x0, #1
|
||||||
|
|
|
@ -103,4 +103,11 @@ int __rt_ffs(int value)
|
||||||
}
|
}
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
|
rt_bool_t rt_hw_interrupt_is_disabled(void)
|
||||||
|
{
|
||||||
|
int rc;
|
||||||
|
__asm__ volatile("mrs %0, cpsr" : "=r" (rc));
|
||||||
|
return !!(rc & 0x80);
|
||||||
|
}
|
||||||
|
|
||||||
/*@}*/
|
/*@}*/
|
||||||
|
|
|
@ -10,6 +10,8 @@
|
||||||
#ifndef CPUPORT_H__
|
#ifndef CPUPORT_H__
|
||||||
#define CPUPORT_H__
|
#define CPUPORT_H__
|
||||||
|
|
||||||
|
#include <rtcompiler.h>
|
||||||
|
|
||||||
/* the exception stack without VFP registers */
|
/* the exception stack without VFP registers */
|
||||||
struct rt_hw_exp_stack
|
struct rt_hw_exp_stack
|
||||||
{
|
{
|
||||||
|
|
|
@ -56,6 +56,11 @@ void *_rt_hw_stack_init(rt_ubase_t *sp, rt_ubase_t ra, rt_ubase_t sstatus)
|
||||||
return (void *)sp;
|
return (void *)sp;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
int rt_hw_cpu_id(void)
|
||||||
|
{
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* This function will initialize thread stack, we assuming
|
* This function will initialize thread stack, we assuming
|
||||||
* when scheduler restore this new thread, context will restore
|
* when scheduler restore this new thread, context will restore
|
||||||
|
|
12
src/Kconfig
12
src/Kconfig
|
@ -35,6 +35,7 @@ config RT_USING_SMART
|
||||||
select RT_USING_POSIX_FS
|
select RT_USING_POSIX_FS
|
||||||
select RT_USING_POSIX_TERMIOS
|
select RT_USING_POSIX_TERMIOS
|
||||||
select RT_USING_KTIME
|
select RT_USING_KTIME
|
||||||
|
select RT_USING_STDC_ATOMIC
|
||||||
depends on ARCH_ARM_CORTEX_M || ARCH_ARM_ARM9 || ARCH_ARM_CORTEX_A || ARCH_ARMV8 || ARCH_RISCV64
|
depends on ARCH_ARM_CORTEX_M || ARCH_ARM_ARM9 || ARCH_ARM_CORTEX_A || ARCH_ARMV8 || ARCH_RISCV64
|
||||||
depends on !RT_USING_NANO
|
depends on !RT_USING_NANO
|
||||||
help
|
help
|
||||||
|
@ -66,6 +67,7 @@ config RT_USING_AMP
|
||||||
config RT_USING_SMP
|
config RT_USING_SMP
|
||||||
bool "Enable SMP (Symmetric multiprocessing)"
|
bool "Enable SMP (Symmetric multiprocessing)"
|
||||||
default n
|
default n
|
||||||
|
select RT_USING_SCHED_THREAD_CTX
|
||||||
help
|
help
|
||||||
This option should be selected by machines which have an SMP-
|
This option should be selected by machines which have an SMP-
|
||||||
capable CPU.
|
capable CPU.
|
||||||
|
@ -417,10 +419,18 @@ config RT_USING_INTERRUPT_INFO
|
||||||
help
|
help
|
||||||
Add name and counter information for interrupt trace.
|
Add name and counter information for interrupt trace.
|
||||||
|
|
||||||
config RT_USING_THREDSAFE_PRINTF
|
config RT_USING_THREADSAFE_PRINTF
|
||||||
bool "Enable thread safe kernel print service"
|
bool "Enable thread safe kernel print service"
|
||||||
default y if RT_USING_SMP && RT_USING_SMART
|
default y if RT_USING_SMP && RT_USING_SMART
|
||||||
|
|
||||||
|
config RT_USING_SCHED_THREAD_CTX
|
||||||
|
bool "Using the scheduler thread context"
|
||||||
|
help
|
||||||
|
Using the scheduler thread context embedded in the thread object.
|
||||||
|
This options is only for backward compatible codes. Maybe use as a
|
||||||
|
mandatory option in the future.
|
||||||
|
default y if RT_USING_SMP
|
||||||
|
|
||||||
config RT_USING_CONSOLE
|
config RT_USING_CONSOLE
|
||||||
bool "Using console for rt_kprintf"
|
bool "Using console for rt_kprintf"
|
||||||
default y
|
default y
|
||||||
|
|
|
@ -26,8 +26,7 @@ if GetDepend('RT_USING_DEVICE') == False:
|
||||||
|
|
||||||
if GetDepend('RT_USING_SMP') == False:
|
if GetDepend('RT_USING_SMP') == False:
|
||||||
SrcRemove(src, ['cpu.c', 'scheduler_mp.c'])
|
SrcRemove(src, ['cpu.c', 'scheduler_mp.c'])
|
||||||
|
else:
|
||||||
if GetDepend('RT_USING_SMP') == True:
|
|
||||||
SrcRemove(src, ['scheduler_up.c'])
|
SrcRemove(src, ['scheduler_up.c'])
|
||||||
|
|
||||||
LOCAL_CFLAGS = ''
|
LOCAL_CFLAGS = ''
|
||||||
|
@ -43,6 +42,6 @@ if rtconfig.PLATFORM in GetGCCLikePLATFORM():
|
||||||
LOCAL_CFLAGS += ' -Wimplicit-fallthrough' # implicit fallthrough warning
|
LOCAL_CFLAGS += ' -Wimplicit-fallthrough' # implicit fallthrough warning
|
||||||
LOCAL_CFLAGS += ' -Wduplicated-cond -Wduplicated-branches' # duplicated condition warning
|
LOCAL_CFLAGS += ' -Wduplicated-cond -Wduplicated-branches' # duplicated condition warning
|
||||||
|
|
||||||
group = DefineGroup('Kernel', src, depend=[''], CPPPATH=inc, CPPDEFINES=['__RTTHREAD__'], LOCAL_CFLAGS=LOCAL_CFLAGS)
|
group = DefineGroup('Kernel', src, depend=[''], CPPPATH=inc, CPPDEFINES=['__RTTHREAD__'], LOCAL_CFLAGS=LOCAL_CFLAGS, LOCAL_CPPDEFINES=['__RT_KERNEL_SOURCE__'])
|
||||||
|
|
||||||
Return('group')
|
Return('group')
|
||||||
|
|
19
src/clock.c
19
src/clock.c
|
@ -85,34 +85,19 @@ void rt_tick_set(rt_tick_t tick)
|
||||||
*/
|
*/
|
||||||
void rt_tick_increase(void)
|
void rt_tick_increase(void)
|
||||||
{
|
{
|
||||||
struct rt_thread *thread;
|
|
||||||
rt_base_t level;
|
|
||||||
rt_atomic_t oldval = 0;
|
|
||||||
|
|
||||||
RT_ASSERT(rt_interrupt_get_nest() > 0);
|
RT_ASSERT(rt_interrupt_get_nest() > 0);
|
||||||
|
|
||||||
RT_OBJECT_HOOK_CALL(rt_tick_hook, ());
|
RT_OBJECT_HOOK_CALL(rt_tick_hook, ());
|
||||||
/* increase the global tick */
|
/* increase the global tick */
|
||||||
#ifdef RT_USING_SMP
|
#ifdef RT_USING_SMP
|
||||||
|
/* get percpu and increase the tick */
|
||||||
rt_atomic_add(&(rt_cpu_self()->tick), 1);
|
rt_atomic_add(&(rt_cpu_self()->tick), 1);
|
||||||
#else
|
#else
|
||||||
rt_atomic_add(&(rt_tick), 1);
|
rt_atomic_add(&(rt_tick), 1);
|
||||||
#endif /* RT_USING_SMP */
|
#endif /* RT_USING_SMP */
|
||||||
|
|
||||||
/* check time slice */
|
/* check time slice */
|
||||||
thread = rt_thread_self();
|
rt_sched_tick_increase();
|
||||||
level = rt_spin_lock_irqsave(&(thread->spinlock));
|
|
||||||
rt_atomic_sub(&(thread->remaining_tick), 1);
|
|
||||||
if (rt_atomic_compare_exchange_strong(&(thread->remaining_tick), &oldval, thread->init_tick))
|
|
||||||
{
|
|
||||||
thread->stat |= RT_THREAD_STAT_YIELD;
|
|
||||||
rt_spin_unlock_irqrestore(&(thread->spinlock), level);
|
|
||||||
rt_schedule();
|
|
||||||
}
|
|
||||||
else
|
|
||||||
{
|
|
||||||
rt_spin_unlock_irqrestore(&(thread->spinlock), level);
|
|
||||||
}
|
|
||||||
|
|
||||||
/* check timer */
|
/* check timer */
|
||||||
#ifdef RT_USING_SMP
|
#ifdef RT_USING_SMP
|
||||||
|
|
77
src/cpu.c
77
src/cpu.c
|
@ -8,6 +8,7 @@
|
||||||
* 2018-10-30 Bernard The first version
|
* 2018-10-30 Bernard The first version
|
||||||
* 2023-09-15 xqyjlj perf rt_hw_interrupt_disable/enable
|
* 2023-09-15 xqyjlj perf rt_hw_interrupt_disable/enable
|
||||||
* 2023-12-10 xqyjlj spinlock should lock sched
|
* 2023-12-10 xqyjlj spinlock should lock sched
|
||||||
|
* 2024-01-25 Shell Using rt_exit_critical_safe
|
||||||
*/
|
*/
|
||||||
#include <rthw.h>
|
#include <rthw.h>
|
||||||
#include <rtthread.h>
|
#include <rtthread.h>
|
||||||
|
@ -16,6 +17,10 @@
|
||||||
#include <lwp.h>
|
#include <lwp.h>
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
|
#ifdef RT_USING_DEBUG
|
||||||
|
rt_base_t _cpus_critical_level;
|
||||||
|
#endif /* RT_USING_DEBUG */
|
||||||
|
|
||||||
#ifdef RT_USING_SMP
|
#ifdef RT_USING_SMP
|
||||||
static struct rt_cpu _cpus[RT_CPUS_NR];
|
static struct rt_cpu _cpus[RT_CPUS_NR];
|
||||||
rt_hw_spinlock_t _cpus_lock;
|
rt_hw_spinlock_t _cpus_lock;
|
||||||
|
@ -23,14 +28,6 @@ rt_hw_spinlock_t _cpus_lock;
|
||||||
void *_cpus_lock_owner = 0;
|
void *_cpus_lock_owner = 0;
|
||||||
void *_cpus_lock_pc = 0;
|
void *_cpus_lock_pc = 0;
|
||||||
|
|
||||||
#define __OWNER_MAGIC ((void *)0xdeadbeaf)
|
|
||||||
|
|
||||||
#if defined (__GNUC__)
|
|
||||||
#define __GET_RETURN_ADDRESS __builtin_return_address(0)
|
|
||||||
#else
|
|
||||||
#define __GET_RETURN_ADDRESS RT_NULL
|
|
||||||
#endif
|
|
||||||
|
|
||||||
#endif /* RT_DEBUGING_SPINLOCK */
|
#endif /* RT_DEBUGING_SPINLOCK */
|
||||||
|
|
||||||
/**
|
/**
|
||||||
|
@ -56,13 +53,7 @@ void rt_spin_lock(struct rt_spinlock *lock)
|
||||||
{
|
{
|
||||||
rt_enter_critical();
|
rt_enter_critical();
|
||||||
rt_hw_spin_lock(&lock->lock);
|
rt_hw_spin_lock(&lock->lock);
|
||||||
#if defined(RT_DEBUGING_SPINLOCK)
|
RT_SPIN_LOCK_DEBUG(lock);
|
||||||
if (rt_cpu_self() != RT_NULL)
|
|
||||||
{
|
|
||||||
lock->owner = rt_cpu_self()->current_thread;
|
|
||||||
}
|
|
||||||
lock->pc = __GET_RETURN_ADDRESS;
|
|
||||||
#endif /* RT_DEBUGING_SPINLOCK */
|
|
||||||
}
|
}
|
||||||
RTM_EXPORT(rt_spin_lock)
|
RTM_EXPORT(rt_spin_lock)
|
||||||
|
|
||||||
|
@ -73,12 +64,10 @@ RTM_EXPORT(rt_spin_lock)
|
||||||
*/
|
*/
|
||||||
void rt_spin_unlock(struct rt_spinlock *lock)
|
void rt_spin_unlock(struct rt_spinlock *lock)
|
||||||
{
|
{
|
||||||
|
rt_base_t critical_level;
|
||||||
|
RT_SPIN_UNLOCK_DEBUG(lock, critical_level);
|
||||||
rt_hw_spin_unlock(&lock->lock);
|
rt_hw_spin_unlock(&lock->lock);
|
||||||
#if defined(RT_DEBUGING_SPINLOCK)
|
rt_exit_critical_safe(critical_level);
|
||||||
lock->owner = __OWNER_MAGIC;
|
|
||||||
lock->pc = RT_NULL;
|
|
||||||
#endif /* RT_DEBUGING_SPINLOCK */
|
|
||||||
rt_exit_critical();
|
|
||||||
}
|
}
|
||||||
RTM_EXPORT(rt_spin_unlock)
|
RTM_EXPORT(rt_spin_unlock)
|
||||||
|
|
||||||
|
@ -99,13 +88,7 @@ rt_base_t rt_spin_lock_irqsave(struct rt_spinlock *lock)
|
||||||
level = rt_hw_local_irq_disable();
|
level = rt_hw_local_irq_disable();
|
||||||
rt_enter_critical();
|
rt_enter_critical();
|
||||||
rt_hw_spin_lock(&lock->lock);
|
rt_hw_spin_lock(&lock->lock);
|
||||||
#if defined(RT_DEBUGING_SPINLOCK)
|
RT_SPIN_LOCK_DEBUG(lock);
|
||||||
if (rt_cpu_self() != RT_NULL)
|
|
||||||
{
|
|
||||||
lock->owner = rt_cpu_self()->current_thread;
|
|
||||||
lock->pc = __GET_RETURN_ADDRESS;
|
|
||||||
}
|
|
||||||
#endif /* RT_DEBUGING_SPINLOCK */
|
|
||||||
return level;
|
return level;
|
||||||
}
|
}
|
||||||
RTM_EXPORT(rt_spin_lock_irqsave)
|
RTM_EXPORT(rt_spin_lock_irqsave)
|
||||||
|
@ -119,13 +102,12 @@ RTM_EXPORT(rt_spin_lock_irqsave)
|
||||||
*/
|
*/
|
||||||
void rt_spin_unlock_irqrestore(struct rt_spinlock *lock, rt_base_t level)
|
void rt_spin_unlock_irqrestore(struct rt_spinlock *lock, rt_base_t level)
|
||||||
{
|
{
|
||||||
#if defined(RT_DEBUGING_SPINLOCK)
|
rt_base_t critical_level;
|
||||||
lock->owner = __OWNER_MAGIC;
|
|
||||||
lock->pc = RT_NULL;
|
RT_SPIN_UNLOCK_DEBUG(lock, critical_level);
|
||||||
#endif /* RT_DEBUGING_SPINLOCK */
|
|
||||||
rt_hw_spin_unlock(&lock->lock);
|
rt_hw_spin_unlock(&lock->lock);
|
||||||
rt_hw_local_irq_enable(level);
|
rt_hw_local_irq_enable(level);
|
||||||
rt_exit_critical();
|
rt_exit_critical_safe(critical_level);
|
||||||
}
|
}
|
||||||
RTM_EXPORT(rt_spin_unlock_irqrestore)
|
RTM_EXPORT(rt_spin_unlock_irqrestore)
|
||||||
|
|
||||||
|
@ -162,7 +144,6 @@ rt_base_t rt_cpus_lock(void)
|
||||||
struct rt_cpu* pcpu;
|
struct rt_cpu* pcpu;
|
||||||
|
|
||||||
level = rt_hw_local_irq_disable();
|
level = rt_hw_local_irq_disable();
|
||||||
rt_enter_critical();
|
|
||||||
pcpu = rt_cpu_self();
|
pcpu = rt_cpu_self();
|
||||||
if (pcpu->current_thread != RT_NULL)
|
if (pcpu->current_thread != RT_NULL)
|
||||||
{
|
{
|
||||||
|
@ -171,11 +152,16 @@ rt_base_t rt_cpus_lock(void)
|
||||||
rt_atomic_add(&(pcpu->current_thread->cpus_lock_nest), 1);
|
rt_atomic_add(&(pcpu->current_thread->cpus_lock_nest), 1);
|
||||||
if (lock_nest == 0)
|
if (lock_nest == 0)
|
||||||
{
|
{
|
||||||
|
rt_enter_critical();
|
||||||
rt_hw_spin_lock(&_cpus_lock);
|
rt_hw_spin_lock(&_cpus_lock);
|
||||||
#if defined(RT_DEBUGING_SPINLOCK)
|
#ifdef RT_USING_DEBUG
|
||||||
|
_cpus_critical_level = rt_critical_level();
|
||||||
|
#endif /* RT_USING_DEBUG */
|
||||||
|
|
||||||
|
#ifdef RT_DEBUGING_SPINLOCK
|
||||||
_cpus_lock_owner = pcpu->current_thread;
|
_cpus_lock_owner = pcpu->current_thread;
|
||||||
_cpus_lock_pc = __GET_RETURN_ADDRESS;
|
_cpus_lock_pc = __GET_RETURN_ADDRESS;
|
||||||
#endif
|
#endif /* RT_DEBUGING_SPINLOCK */
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -194,6 +180,7 @@ void rt_cpus_unlock(rt_base_t level)
|
||||||
|
|
||||||
if (pcpu->current_thread != RT_NULL)
|
if (pcpu->current_thread != RT_NULL)
|
||||||
{
|
{
|
||||||
|
rt_base_t critical_level = 0;
|
||||||
RT_ASSERT(rt_atomic_load(&(pcpu->current_thread->cpus_lock_nest)) > 0);
|
RT_ASSERT(rt_atomic_load(&(pcpu->current_thread->cpus_lock_nest)) > 0);
|
||||||
rt_atomic_sub(&(pcpu->current_thread->cpus_lock_nest), 1);
|
rt_atomic_sub(&(pcpu->current_thread->cpus_lock_nest), 1);
|
||||||
|
|
||||||
|
@ -202,12 +189,16 @@ void rt_cpus_unlock(rt_base_t level)
|
||||||
#if defined(RT_DEBUGING_SPINLOCK)
|
#if defined(RT_DEBUGING_SPINLOCK)
|
||||||
_cpus_lock_owner = __OWNER_MAGIC;
|
_cpus_lock_owner = __OWNER_MAGIC;
|
||||||
_cpus_lock_pc = RT_NULL;
|
_cpus_lock_pc = RT_NULL;
|
||||||
#endif
|
#endif /* RT_DEBUGING_SPINLOCK */
|
||||||
|
#ifdef RT_USING_DEBUG
|
||||||
|
critical_level = _cpus_critical_level;
|
||||||
|
_cpus_critical_level = 0;
|
||||||
|
#endif /* RT_USING_DEBUG */
|
||||||
rt_hw_spin_unlock(&_cpus_lock);
|
rt_hw_spin_unlock(&_cpus_lock);
|
||||||
|
rt_exit_critical_safe(critical_level);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
rt_hw_local_irq_enable(level);
|
rt_hw_local_irq_enable(level);
|
||||||
rt_exit_critical();
|
|
||||||
}
|
}
|
||||||
RTM_EXPORT(rt_cpus_unlock);
|
RTM_EXPORT(rt_cpus_unlock);
|
||||||
|
|
||||||
|
@ -220,20 +211,10 @@ RTM_EXPORT(rt_cpus_unlock);
|
||||||
*/
|
*/
|
||||||
void rt_cpus_lock_status_restore(struct rt_thread *thread)
|
void rt_cpus_lock_status_restore(struct rt_thread *thread)
|
||||||
{
|
{
|
||||||
struct rt_cpu* pcpu = rt_cpu_self();
|
|
||||||
|
|
||||||
#if defined(ARCH_MM_MMU) && defined(RT_USING_SMART)
|
#if defined(ARCH_MM_MMU) && defined(RT_USING_SMART)
|
||||||
lwp_aspace_switch(thread);
|
lwp_aspace_switch(thread);
|
||||||
#endif
|
#endif
|
||||||
if (pcpu->current_thread != RT_NULL )
|
rt_sched_post_ctx_switch(thread);
|
||||||
{
|
|
||||||
rt_hw_spin_unlock(&(pcpu->current_thread->spinlock.lock));
|
|
||||||
if ((pcpu->current_thread->stat & RT_THREAD_STAT_MASK) == RT_THREAD_RUNNING)
|
|
||||||
{
|
|
||||||
rt_schedule_insert_thread(pcpu->current_thread);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
pcpu->current_thread = thread;
|
|
||||||
}
|
}
|
||||||
RTM_EXPORT(rt_cpus_lock_status_restore);
|
RTM_EXPORT(rt_cpus_lock_status_restore);
|
||||||
#endif /* RT_USING_SMP */
|
#endif /* RT_USING_SMP */
|
||||||
|
|
15
src/idle.c
15
src/idle.c
|
@ -146,7 +146,7 @@ void rt_thread_defunct_enqueue(rt_thread_t thread)
|
||||||
{
|
{
|
||||||
rt_base_t level;
|
rt_base_t level;
|
||||||
level = rt_spin_lock_irqsave(&_defunct_spinlock);
|
level = rt_spin_lock_irqsave(&_defunct_spinlock);
|
||||||
rt_list_insert_after(&_rt_thread_defunct, &thread->tlist);
|
rt_list_insert_after(&_rt_thread_defunct, &RT_THREAD_LIST_NODE(thread));
|
||||||
rt_spin_unlock_irqrestore(&_defunct_spinlock, level);
|
rt_spin_unlock_irqrestore(&_defunct_spinlock, level);
|
||||||
#ifdef RT_USING_SMP
|
#ifdef RT_USING_SMP
|
||||||
rt_sem_release(&system_sem);
|
rt_sem_release(&system_sem);
|
||||||
|
@ -166,20 +166,16 @@ rt_thread_t rt_thread_defunct_dequeue(void)
|
||||||
level = rt_spin_lock_irqsave(&_defunct_spinlock);
|
level = rt_spin_lock_irqsave(&_defunct_spinlock);
|
||||||
if (l->next != l)
|
if (l->next != l)
|
||||||
{
|
{
|
||||||
thread = rt_list_entry(l->next,
|
thread = RT_THREAD_LIST_NODE_ENTRY(l->next);
|
||||||
struct rt_thread,
|
rt_list_remove(&RT_THREAD_LIST_NODE(thread));
|
||||||
tlist);
|
|
||||||
rt_list_remove(&(thread->tlist));
|
|
||||||
}
|
}
|
||||||
rt_spin_unlock_irqrestore(&_defunct_spinlock, level);
|
rt_spin_unlock_irqrestore(&_defunct_spinlock, level);
|
||||||
#else
|
#else
|
||||||
if (l->next != l)
|
if (l->next != l)
|
||||||
{
|
{
|
||||||
thread = rt_list_entry(l->next,
|
thread = RT_THREAD_LIST_NODE_ENTRY(l->next);
|
||||||
struct rt_thread,
|
|
||||||
tlist);
|
|
||||||
level = rt_hw_interrupt_disable();
|
level = rt_hw_interrupt_disable();
|
||||||
rt_list_remove(&(thread->tlist));
|
rt_list_remove(&RT_THREAD_LIST_NODE(thread));
|
||||||
rt_hw_interrupt_enable(level);
|
rt_hw_interrupt_enable(level);
|
||||||
}
|
}
|
||||||
#endif
|
#endif
|
||||||
|
@ -311,6 +307,7 @@ static void rt_thread_system_entry(void *parameter)
|
||||||
int ret = rt_sem_take(&system_sem, RT_WAITING_FOREVER);
|
int ret = rt_sem_take(&system_sem, RT_WAITING_FOREVER);
|
||||||
if (ret != RT_EOK)
|
if (ret != RT_EOK)
|
||||||
{
|
{
|
||||||
|
rt_kprintf("failed to sem_take() error %d\n", ret);
|
||||||
RT_ASSERT(0);
|
RT_ASSERT(0);
|
||||||
}
|
}
|
||||||
rt_defunct_execute();
|
rt_defunct_execute();
|
||||||
|
|
|
@ -1500,14 +1500,21 @@ rt_weak void rt_hw_console_output(const char *str)
|
||||||
}
|
}
|
||||||
RTM_EXPORT(rt_hw_console_output);
|
RTM_EXPORT(rt_hw_console_output);
|
||||||
|
|
||||||
#ifdef RT_USING_THREDSAFE_PRINTF
|
#ifdef RT_USING_THREADSAFE_PRINTF
|
||||||
|
|
||||||
static struct rt_spinlock _pr_lock = RT_SPINLOCK_INIT;
|
/* system console lock */
|
||||||
static struct rt_spinlock _prf_lock = RT_SPINLOCK_INIT;
|
static struct rt_spinlock _syscon_lock = RT_SPINLOCK_INIT;
|
||||||
|
/* lock of kprintf buffer */
|
||||||
|
static struct rt_spinlock _prbuf_lock = RT_SPINLOCK_INIT;
|
||||||
/* current user of system console */
|
/* current user of system console */
|
||||||
static rt_thread_t _pr_curr_user;
|
static rt_thread_t _pr_curr_user;
|
||||||
|
|
||||||
|
#ifdef RT_USING_DEBUG
|
||||||
|
static rt_base_t _pr_critical_level;
|
||||||
|
#endif /* RT_USING_DEBUG */
|
||||||
|
|
||||||
/* nested level of current user */
|
/* nested level of current user */
|
||||||
static int _pr_curr_user_nested;
|
static volatile int _pr_curr_user_nested;
|
||||||
|
|
||||||
rt_thread_t rt_console_current_user(void)
|
rt_thread_t rt_console_current_user(void)
|
||||||
{
|
{
|
||||||
|
@ -1516,35 +1523,42 @@ rt_thread_t rt_console_current_user(void)
|
||||||
|
|
||||||
static void _console_take(void)
|
static void _console_take(void)
|
||||||
{
|
{
|
||||||
rt_ubase_t level = rt_spin_lock_irqsave(&_pr_lock);
|
rt_ubase_t level = rt_spin_lock_irqsave(&_syscon_lock);
|
||||||
rt_thread_t self_thread = rt_thread_self();
|
rt_thread_t self_thread = rt_thread_self();
|
||||||
|
rt_base_t critical_level;
|
||||||
|
RT_UNUSED(critical_level);
|
||||||
|
|
||||||
while (_pr_curr_user != self_thread)
|
while (_pr_curr_user != self_thread)
|
||||||
{
|
{
|
||||||
if (_pr_curr_user == RT_NULL)
|
if (_pr_curr_user == RT_NULL)
|
||||||
{
|
{
|
||||||
/* no preemption is allowed to avoid dead lock */
|
/* no preemption is allowed to avoid dead lock */
|
||||||
rt_enter_critical();
|
critical_level = rt_enter_critical();
|
||||||
|
#ifdef RT_USING_DEBUG
|
||||||
|
_pr_critical_level = _syscon_lock.critical_level;
|
||||||
|
_syscon_lock.critical_level = critical_level;
|
||||||
|
#endif
|
||||||
_pr_curr_user = self_thread;
|
_pr_curr_user = self_thread;
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
else
|
else
|
||||||
{
|
{
|
||||||
rt_spin_unlock_irqrestore(&_pr_lock, level);
|
rt_spin_unlock_irqrestore(&_syscon_lock, level);
|
||||||
rt_thread_yield();
|
rt_thread_yield();
|
||||||
level = rt_spin_lock_irqsave(&_pr_lock);
|
level = rt_spin_lock_irqsave(&_syscon_lock);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
_pr_curr_user_nested++;
|
_pr_curr_user_nested++;
|
||||||
|
|
||||||
rt_spin_unlock_irqrestore(&_pr_lock, level);
|
rt_spin_unlock_irqrestore(&_syscon_lock, level);
|
||||||
}
|
}
|
||||||
|
|
||||||
static void _console_release(void)
|
static void _console_release(void)
|
||||||
{
|
{
|
||||||
rt_ubase_t level = rt_spin_lock_irqsave(&_pr_lock);
|
rt_ubase_t level = rt_spin_lock_irqsave(&_syscon_lock);
|
||||||
rt_thread_t self_thread = rt_thread_self();
|
rt_thread_t self_thread = rt_thread_self();
|
||||||
|
RT_UNUSED(self_thread);
|
||||||
|
|
||||||
RT_ASSERT(_pr_curr_user == self_thread);
|
RT_ASSERT(_pr_curr_user == self_thread);
|
||||||
|
|
||||||
|
@ -1552,22 +1566,28 @@ static void _console_release(void)
|
||||||
if (!_pr_curr_user_nested)
|
if (!_pr_curr_user_nested)
|
||||||
{
|
{
|
||||||
_pr_curr_user = RT_NULL;
|
_pr_curr_user = RT_NULL;
|
||||||
|
|
||||||
|
#ifdef RT_USING_DEBUG
|
||||||
|
rt_exit_critical_safe(_syscon_lock.critical_level);
|
||||||
|
_syscon_lock.critical_level = _pr_critical_level;
|
||||||
|
#else
|
||||||
rt_exit_critical();
|
rt_exit_critical();
|
||||||
|
#endif
|
||||||
}
|
}
|
||||||
rt_spin_unlock_irqrestore(&_pr_lock, level);
|
rt_spin_unlock_irqrestore(&_syscon_lock, level);
|
||||||
}
|
}
|
||||||
|
|
||||||
#define CONSOLE_TAKE _console_take()
|
#define CONSOLE_TAKE _console_take()
|
||||||
#define CONSOLE_RELEASE _console_release()
|
#define CONSOLE_RELEASE _console_release()
|
||||||
#define PRINTF_BUFFER_TAKE rt_ubase_t level = rt_spin_lock_irqsave(&_prf_lock)
|
#define PRINTF_BUFFER_TAKE rt_ubase_t level = rt_spin_lock_irqsave(&_prbuf_lock)
|
||||||
#define PRINTF_BUFFER_RELEASE rt_spin_unlock_irqrestore(&_prf_lock, level)
|
#define PRINTF_BUFFER_RELEASE rt_spin_unlock_irqrestore(&_prbuf_lock, level)
|
||||||
#else
|
#else
|
||||||
|
|
||||||
#define CONSOLE_TAKE
|
#define CONSOLE_TAKE
|
||||||
#define CONSOLE_RELEASE
|
#define CONSOLE_RELEASE
|
||||||
#define PRINTF_BUFFER_TAKE
|
#define PRINTF_BUFFER_TAKE
|
||||||
#define PRINTF_BUFFER_RELEASE
|
#define PRINTF_BUFFER_RELEASE
|
||||||
#endif /* RT_USING_THREDSAFE_PRINTF */
|
#endif /* RT_USING_THREADSAFE_PRINTF */
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* @brief This function will put string to the console.
|
* @brief This function will put string to the console.
|
||||||
|
|
|
@ -141,7 +141,6 @@ RTM_EXPORT(rt_mp_init);
|
||||||
*/
|
*/
|
||||||
rt_err_t rt_mp_detach(struct rt_mempool *mp)
|
rt_err_t rt_mp_detach(struct rt_mempool *mp)
|
||||||
{
|
{
|
||||||
struct rt_thread *thread;
|
|
||||||
rt_base_t level;
|
rt_base_t level;
|
||||||
|
|
||||||
/* parameter check */
|
/* parameter check */
|
||||||
|
@ -151,21 +150,7 @@ rt_err_t rt_mp_detach(struct rt_mempool *mp)
|
||||||
|
|
||||||
level = rt_spin_lock_irqsave(&(mp->spinlock));
|
level = rt_spin_lock_irqsave(&(mp->spinlock));
|
||||||
/* wake up all suspended threads */
|
/* wake up all suspended threads */
|
||||||
while (!rt_list_isempty(&(mp->suspend_thread)))
|
rt_susp_list_resume_all(&mp->suspend_thread, RT_ERROR);
|
||||||
{
|
|
||||||
|
|
||||||
/* get next suspend thread */
|
|
||||||
thread = rt_list_entry(mp->suspend_thread.next, struct rt_thread, tlist);
|
|
||||||
/* set error code to -RT_ERROR */
|
|
||||||
thread->error = -RT_ERROR;
|
|
||||||
|
|
||||||
/*
|
|
||||||
* resume thread
|
|
||||||
* In rt_thread_resume function, it will remove current thread from
|
|
||||||
* suspend list
|
|
||||||
*/
|
|
||||||
rt_thread_resume(thread);
|
|
||||||
}
|
|
||||||
|
|
||||||
/* detach object */
|
/* detach object */
|
||||||
rt_object_detach(&(mp->parent));
|
rt_object_detach(&(mp->parent));
|
||||||
|
@ -257,7 +242,6 @@ RTM_EXPORT(rt_mp_create);
|
||||||
*/
|
*/
|
||||||
rt_err_t rt_mp_delete(rt_mp_t mp)
|
rt_err_t rt_mp_delete(rt_mp_t mp)
|
||||||
{
|
{
|
||||||
struct rt_thread *thread;
|
|
||||||
rt_base_t level;
|
rt_base_t level;
|
||||||
|
|
||||||
RT_DEBUG_NOT_IN_INTERRUPT;
|
RT_DEBUG_NOT_IN_INTERRUPT;
|
||||||
|
@ -269,20 +253,7 @@ rt_err_t rt_mp_delete(rt_mp_t mp)
|
||||||
|
|
||||||
level = rt_spin_lock_irqsave(&(mp->spinlock));
|
level = rt_spin_lock_irqsave(&(mp->spinlock));
|
||||||
/* wake up all suspended threads */
|
/* wake up all suspended threads */
|
||||||
while (!rt_list_isempty(&(mp->suspend_thread)))
|
rt_susp_list_resume_all(&mp->suspend_thread, RT_ERROR);
|
||||||
{
|
|
||||||
/* get next suspend thread */
|
|
||||||
thread = rt_list_entry(mp->suspend_thread.next, struct rt_thread, tlist);
|
|
||||||
/* set error code to -RT_ERROR */
|
|
||||||
thread->error = -RT_ERROR;
|
|
||||||
|
|
||||||
/*
|
|
||||||
* resume thread
|
|
||||||
* In rt_thread_resume function, it will remove current thread from
|
|
||||||
* suspend list
|
|
||||||
*/
|
|
||||||
rt_thread_resume(thread);
|
|
||||||
}
|
|
||||||
|
|
||||||
rt_spin_unlock_irqrestore(&(mp->spinlock), level);
|
rt_spin_unlock_irqrestore(&(mp->spinlock), level);
|
||||||
|
|
||||||
|
@ -339,8 +310,7 @@ void *rt_mp_alloc(rt_mp_t mp, rt_int32_t time)
|
||||||
thread->error = RT_EOK;
|
thread->error = RT_EOK;
|
||||||
|
|
||||||
/* need suspend thread */
|
/* need suspend thread */
|
||||||
rt_thread_suspend(thread);
|
rt_thread_suspend_to_list(thread, &mp->suspend_thread, RT_IPC_FLAG_FIFO, RT_UNINTERRUPTIBLE);
|
||||||
rt_list_insert_after(&(mp->suspend_thread), &(thread->tlist));
|
|
||||||
|
|
||||||
if (time > 0)
|
if (time > 0)
|
||||||
{
|
{
|
||||||
|
@ -403,7 +373,6 @@ void rt_mp_free(void *block)
|
||||||
{
|
{
|
||||||
rt_uint8_t **block_ptr;
|
rt_uint8_t **block_ptr;
|
||||||
struct rt_mempool *mp;
|
struct rt_mempool *mp;
|
||||||
struct rt_thread *thread;
|
|
||||||
rt_base_t level;
|
rt_base_t level;
|
||||||
|
|
||||||
/* parameter check */
|
/* parameter check */
|
||||||
|
@ -424,19 +393,8 @@ void rt_mp_free(void *block)
|
||||||
*block_ptr = mp->block_list;
|
*block_ptr = mp->block_list;
|
||||||
mp->block_list = (rt_uint8_t *)block_ptr;
|
mp->block_list = (rt_uint8_t *)block_ptr;
|
||||||
|
|
||||||
if (!rt_list_isempty(&(mp->suspend_thread)))
|
if (rt_susp_list_dequeue(&mp->suspend_thread, RT_EOK))
|
||||||
{
|
{
|
||||||
/* get the suspended thread */
|
|
||||||
thread = rt_list_entry(mp->suspend_thread.next,
|
|
||||||
struct rt_thread,
|
|
||||||
tlist);
|
|
||||||
|
|
||||||
/* set error */
|
|
||||||
thread->error = RT_EOK;
|
|
||||||
|
|
||||||
/* resume thread */
|
|
||||||
rt_thread_resume(thread);
|
|
||||||
|
|
||||||
rt_spin_unlock_irqrestore(&(mp->spinlock), level);
|
rt_spin_unlock_irqrestore(&(mp->spinlock), level);
|
||||||
|
|
||||||
/* do a schedule */
|
/* do a schedule */
|
||||||
|
|
|
@ -0,0 +1,217 @@
|
||||||
|
/*
|
||||||
|
* Copyright (c) 2006-2024, RT-Thread Development Team
|
||||||
|
*
|
||||||
|
* SPDX-License-Identifier: Apache-2.0
|
||||||
|
*
|
||||||
|
* (scheduler_comm.c) Common API of scheduling routines.
|
||||||
|
*
|
||||||
|
* Change Logs:
|
||||||
|
* Date Author Notes
|
||||||
|
* 2024-01-18 Shell Separate scheduling related codes from thread.c, scheduler_.*
|
||||||
|
*/
|
||||||
|
|
||||||
|
#define DBG_TAG "kernel.sched"
|
||||||
|
#define DBG_LVL DBG_INFO
|
||||||
|
#include <rtdbg.h>
|
||||||
|
|
||||||
|
#include <rtthread.h>
|
||||||
|
|
||||||
|
void rt_sched_thread_init_ctx(struct rt_thread *thread, rt_uint32_t tick, rt_uint8_t priority)
|
||||||
|
{
|
||||||
|
/* setup thread status */
|
||||||
|
RT_SCHED_CTX(thread).stat = RT_THREAD_INIT;
|
||||||
|
|
||||||
|
#ifdef RT_USING_SMP
|
||||||
|
/* not bind on any cpu */
|
||||||
|
RT_SCHED_CTX(thread).bind_cpu = RT_CPUS_NR;
|
||||||
|
RT_SCHED_CTX(thread).oncpu = RT_CPU_DETACHED;
|
||||||
|
#endif /* RT_USING_SMP */
|
||||||
|
|
||||||
|
rt_sched_thread_init_priv(thread, tick, priority);
|
||||||
|
}
|
||||||
|
|
||||||
|
rt_err_t rt_sched_thread_timer_start(struct rt_thread *thread)
|
||||||
|
{
|
||||||
|
RT_SCHED_DEBUG_IS_LOCKED;
|
||||||
|
RT_SCHED_CTX(thread).sched_flag_ttmr_set = 1;
|
||||||
|
return RT_EOK;
|
||||||
|
}
|
||||||
|
|
||||||
|
rt_err_t rt_sched_thread_timer_stop(struct rt_thread *thread)
|
||||||
|
{
|
||||||
|
rt_err_t error;
|
||||||
|
RT_SCHED_DEBUG_IS_LOCKED;
|
||||||
|
|
||||||
|
if (RT_SCHED_CTX(thread).sched_flag_ttmr_set)
|
||||||
|
{
|
||||||
|
error = rt_timer_stop(&thread->thread_timer);
|
||||||
|
|
||||||
|
/* mask out timer flag no matter stop success or not */
|
||||||
|
RT_SCHED_CTX(thread).sched_flag_ttmr_set = 0;
|
||||||
|
}
|
||||||
|
else
|
||||||
|
{
|
||||||
|
error = RT_EOK;
|
||||||
|
}
|
||||||
|
return error;
|
||||||
|
}
|
||||||
|
|
||||||
|
rt_uint8_t rt_sched_thread_get_stat(struct rt_thread *thread)
|
||||||
|
{
|
||||||
|
RT_SCHED_DEBUG_IS_LOCKED;
|
||||||
|
return RT_SCHED_CTX(thread).stat & RT_THREAD_STAT_MASK;
|
||||||
|
}
|
||||||
|
|
||||||
|
rt_uint8_t rt_sched_thread_get_curr_prio(struct rt_thread *thread)
|
||||||
|
{
|
||||||
|
RT_SCHED_DEBUG_IS_LOCKED;
|
||||||
|
return RT_SCHED_PRIV(thread).current_priority;
|
||||||
|
}
|
||||||
|
|
||||||
|
rt_uint8_t rt_sched_thread_get_init_prio(struct rt_thread *thread)
|
||||||
|
{
|
||||||
|
/* read only fields, so lock is unecessary */
|
||||||
|
return RT_SCHED_PRIV(thread).init_priority;
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* @note Caller must hold the scheduler lock
|
||||||
|
*/
|
||||||
|
rt_uint8_t rt_sched_thread_is_suspended(struct rt_thread *thread)
|
||||||
|
{
|
||||||
|
RT_SCHED_DEBUG_IS_LOCKED;
|
||||||
|
return (RT_SCHED_CTX(thread).stat & RT_THREAD_SUSPEND_MASK) == RT_THREAD_SUSPEND_MASK;
|
||||||
|
}
|
||||||
|
|
||||||
|
rt_err_t rt_sched_thread_close(struct rt_thread *thread)
|
||||||
|
{
|
||||||
|
RT_SCHED_DEBUG_IS_LOCKED;
|
||||||
|
RT_SCHED_CTX(thread).stat = RT_THREAD_CLOSE;
|
||||||
|
return RT_EOK;
|
||||||
|
}
|
||||||
|
|
||||||
|
rt_err_t rt_sched_thread_yield(struct rt_thread *thread)
|
||||||
|
{
|
||||||
|
RT_SCHED_DEBUG_IS_LOCKED;
|
||||||
|
|
||||||
|
RT_SCHED_PRIV(thread).remaining_tick = RT_SCHED_PRIV(thread).init_tick;
|
||||||
|
RT_SCHED_CTX(thread).stat |= RT_THREAD_STAT_YIELD;
|
||||||
|
|
||||||
|
return RT_EOK;
|
||||||
|
}
|
||||||
|
|
||||||
|
rt_err_t rt_sched_thread_ready(struct rt_thread *thread)
|
||||||
|
{
|
||||||
|
rt_err_t error;
|
||||||
|
|
||||||
|
RT_SCHED_DEBUG_IS_LOCKED;
|
||||||
|
|
||||||
|
if (!rt_sched_thread_is_suspended(thread))
|
||||||
|
{
|
||||||
|
/* failed to proceed, and that's possibly due to a racing condition */
|
||||||
|
error = -RT_EINVAL;
|
||||||
|
}
|
||||||
|
else
|
||||||
|
{
|
||||||
|
if (RT_SCHED_CTX(thread).sched_flag_ttmr_set)
|
||||||
|
{
|
||||||
|
/**
|
||||||
|
* Quiet timeout timer first if set. and don't continue if we
|
||||||
|
* failed, because it probably means that a timeout ISR racing to
|
||||||
|
* resume thread before us.
|
||||||
|
*/
|
||||||
|
error = rt_sched_thread_timer_stop(thread);
|
||||||
|
}
|
||||||
|
else
|
||||||
|
{
|
||||||
|
error = RT_EOK;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (!error)
|
||||||
|
{
|
||||||
|
/* remove from suspend list */
|
||||||
|
rt_list_remove(&RT_THREAD_LIST_NODE(thread));
|
||||||
|
|
||||||
|
#ifdef RT_USING_SMART
|
||||||
|
thread->wakeup_handle.func = RT_NULL;
|
||||||
|
#endif
|
||||||
|
|
||||||
|
/* insert to schedule ready list and remove from susp list */
|
||||||
|
rt_sched_insert_thread(thread);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return error;
|
||||||
|
}
|
||||||
|
|
||||||
|
rt_err_t rt_sched_tick_increase(void)
|
||||||
|
{
|
||||||
|
struct rt_thread *thread;
|
||||||
|
rt_sched_lock_level_t slvl;
|
||||||
|
|
||||||
|
thread = rt_thread_self();
|
||||||
|
|
||||||
|
rt_sched_lock(&slvl);
|
||||||
|
|
||||||
|
RT_SCHED_PRIV(thread).remaining_tick--;
|
||||||
|
if (RT_SCHED_PRIV(thread).remaining_tick)
|
||||||
|
{
|
||||||
|
rt_sched_unlock(slvl);
|
||||||
|
}
|
||||||
|
else
|
||||||
|
{
|
||||||
|
rt_sched_thread_yield(thread);
|
||||||
|
|
||||||
|
/* request a rescheduling even though we are probably in an ISR */
|
||||||
|
rt_sched_unlock_n_resched(slvl);
|
||||||
|
}
|
||||||
|
|
||||||
|
return RT_EOK;
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* @brief Update priority of the target thread
|
||||||
|
*/
|
||||||
|
rt_err_t rt_sched_thread_change_priority(struct rt_thread *thread, rt_uint8_t priority)
|
||||||
|
{
|
||||||
|
RT_ASSERT(priority < RT_THREAD_PRIORITY_MAX);
|
||||||
|
RT_SCHED_DEBUG_IS_LOCKED;
|
||||||
|
|
||||||
|
/* for ready thread, change queue; otherwise simply update the priority */
|
||||||
|
if ((RT_SCHED_CTX(thread).stat & RT_THREAD_STAT_MASK) == RT_THREAD_READY)
|
||||||
|
{
|
||||||
|
/* remove thread from schedule queue first */
|
||||||
|
rt_sched_remove_thread(thread);
|
||||||
|
|
||||||
|
/* change thread priority */
|
||||||
|
RT_SCHED_PRIV(thread).current_priority = priority;
|
||||||
|
|
||||||
|
/* recalculate priority attribute */
|
||||||
|
#if RT_THREAD_PRIORITY_MAX > 32
|
||||||
|
RT_SCHED_PRIV(thread).number = RT_SCHED_PRIV(thread).current_priority >> 3; /* 5bit */
|
||||||
|
RT_SCHED_PRIV(thread).number_mask = 1 << RT_SCHED_PRIV(thread).number;
|
||||||
|
RT_SCHED_PRIV(thread).high_mask = 1 << (RT_SCHED_PRIV(thread).current_priority & 0x07); /* 3bit */
|
||||||
|
#else
|
||||||
|
RT_SCHED_PRIV(thread).number_mask = 1 << RT_SCHED_PRIV(thread).current_priority;
|
||||||
|
#endif /* RT_THREAD_PRIORITY_MAX > 32 */
|
||||||
|
RT_SCHED_CTX(thread).stat = RT_THREAD_INIT;
|
||||||
|
|
||||||
|
/* insert thread to schedule queue again */
|
||||||
|
rt_sched_insert_thread(thread);
|
||||||
|
}
|
||||||
|
else
|
||||||
|
{
|
||||||
|
RT_SCHED_PRIV(thread).current_priority = priority;
|
||||||
|
|
||||||
|
/* recalculate priority attribute */
|
||||||
|
#if RT_THREAD_PRIORITY_MAX > 32
|
||||||
|
RT_SCHED_PRIV(thread).number = RT_SCHED_PRIV(thread).current_priority >> 3; /* 5bit */
|
||||||
|
RT_SCHED_PRIV(thread).number_mask = 1 << RT_SCHED_PRIV(thread).number;
|
||||||
|
RT_SCHED_PRIV(thread).high_mask = 1 << (RT_SCHED_PRIV(thread).current_priority & 0x07); /* 3bit */
|
||||||
|
#else
|
||||||
|
RT_SCHED_PRIV(thread).number_mask = 1 << RT_SCHED_PRIV(thread).current_priority;
|
||||||
|
#endif /* RT_THREAD_PRIORITY_MAX > 32 */
|
||||||
|
}
|
||||||
|
|
||||||
|
return RT_EOK;
|
||||||
|
}
|
1346
src/scheduler_mp.c
1346
src/scheduler_mp.c
File diff suppressed because it is too large
Load Diff
|
@ -161,15 +161,44 @@ static struct rt_thread* _scheduler_get_highest_priority_thread(rt_ubase_t *high
|
||||||
#endif /* RT_THREAD_PRIORITY_MAX > 32 */
|
#endif /* RT_THREAD_PRIORITY_MAX > 32 */
|
||||||
|
|
||||||
/* get highest ready priority thread */
|
/* get highest ready priority thread */
|
||||||
highest_priority_thread = rt_list_entry(rt_thread_priority_table[highest_ready_priority].next,
|
highest_priority_thread = RT_THREAD_LIST_NODE_ENTRY(rt_thread_priority_table[highest_ready_priority].next);
|
||||||
struct rt_thread,
|
|
||||||
tlist);
|
|
||||||
|
|
||||||
*highest_prio = highest_ready_priority;
|
*highest_prio = highest_ready_priority;
|
||||||
|
|
||||||
return highest_priority_thread;
|
return highest_priority_thread;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
rt_err_t rt_sched_lock(rt_sched_lock_level_t *plvl)
|
||||||
|
{
|
||||||
|
rt_base_t level;
|
||||||
|
if (!plvl)
|
||||||
|
return -RT_EINVAL;
|
||||||
|
|
||||||
|
level = rt_hw_interrupt_disable();
|
||||||
|
*plvl = level;
|
||||||
|
|
||||||
|
return RT_EOK;
|
||||||
|
}
|
||||||
|
|
||||||
|
rt_err_t rt_sched_unlock(rt_sched_lock_level_t level)
|
||||||
|
{
|
||||||
|
rt_hw_interrupt_enable(level);
|
||||||
|
|
||||||
|
return RT_EOK;
|
||||||
|
}
|
||||||
|
|
||||||
|
rt_err_t rt_sched_unlock_n_resched(rt_sched_lock_level_t level)
|
||||||
|
{
|
||||||
|
if (rt_thread_self())
|
||||||
|
{
|
||||||
|
/* if scheduler is available */
|
||||||
|
rt_schedule();
|
||||||
|
}
|
||||||
|
rt_hw_interrupt_enable(level);
|
||||||
|
|
||||||
|
return RT_EOK;
|
||||||
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* @brief This function will initialize the system scheduler.
|
* @brief This function will initialize the system scheduler.
|
||||||
*/
|
*/
|
||||||
|
@ -208,8 +237,8 @@ void rt_system_scheduler_start(void)
|
||||||
|
|
||||||
rt_current_thread = to_thread;
|
rt_current_thread = to_thread;
|
||||||
|
|
||||||
rt_schedule_remove_thread(to_thread);
|
rt_sched_remove_thread(to_thread);
|
||||||
to_thread->stat = RT_THREAD_RUNNING;
|
RT_SCHED_CTX(to_thread).stat = RT_THREAD_RUNNING;
|
||||||
|
|
||||||
/* switch to new thread */
|
/* switch to new thread */
|
||||||
|
|
||||||
|
@ -250,13 +279,13 @@ void rt_schedule(void)
|
||||||
|
|
||||||
to_thread = _scheduler_get_highest_priority_thread(&highest_ready_priority);
|
to_thread = _scheduler_get_highest_priority_thread(&highest_ready_priority);
|
||||||
|
|
||||||
if ((rt_current_thread->stat & RT_THREAD_STAT_MASK) == RT_THREAD_RUNNING)
|
if ((RT_SCHED_CTX(rt_current_thread).stat & RT_THREAD_STAT_MASK) == RT_THREAD_RUNNING)
|
||||||
{
|
{
|
||||||
if (rt_current_thread->current_priority < highest_ready_priority)
|
if (RT_SCHED_PRIV(rt_current_thread).current_priority < highest_ready_priority)
|
||||||
{
|
{
|
||||||
to_thread = rt_current_thread;
|
to_thread = rt_current_thread;
|
||||||
}
|
}
|
||||||
else if (rt_current_thread->current_priority == highest_ready_priority && (rt_current_thread->stat & RT_THREAD_STAT_YIELD_MASK) == 0)
|
else if (RT_SCHED_PRIV(rt_current_thread).current_priority == highest_ready_priority && (RT_SCHED_CTX(rt_current_thread).stat & RT_THREAD_STAT_YIELD_MASK) == 0)
|
||||||
{
|
{
|
||||||
to_thread = rt_current_thread;
|
to_thread = rt_current_thread;
|
||||||
}
|
}
|
||||||
|
@ -277,16 +306,16 @@ void rt_schedule(void)
|
||||||
|
|
||||||
if (need_insert_from_thread)
|
if (need_insert_from_thread)
|
||||||
{
|
{
|
||||||
rt_schedule_insert_thread(from_thread);
|
rt_sched_insert_thread(from_thread);
|
||||||
}
|
}
|
||||||
|
|
||||||
if ((from_thread->stat & RT_THREAD_STAT_YIELD_MASK) != 0)
|
if ((RT_SCHED_CTX(from_thread).stat & RT_THREAD_STAT_YIELD_MASK) != 0)
|
||||||
{
|
{
|
||||||
from_thread->stat &= ~RT_THREAD_STAT_YIELD_MASK;
|
RT_SCHED_CTX(from_thread).stat &= ~RT_THREAD_STAT_YIELD_MASK;
|
||||||
}
|
}
|
||||||
|
|
||||||
rt_schedule_remove_thread(to_thread);
|
rt_sched_remove_thread(to_thread);
|
||||||
to_thread->stat = RT_THREAD_RUNNING | (to_thread->stat & ~RT_THREAD_STAT_MASK);
|
RT_SCHED_CTX(to_thread).stat = RT_THREAD_RUNNING | (RT_SCHED_CTX(to_thread).stat & ~RT_THREAD_STAT_MASK);
|
||||||
|
|
||||||
/* switch to new thread */
|
/* switch to new thread */
|
||||||
LOG_D("[%d]switch to priority#%d "
|
LOG_D("[%d]switch to priority#%d "
|
||||||
|
@ -315,11 +344,11 @@ void rt_schedule(void)
|
||||||
#ifdef RT_USING_SIGNALS
|
#ifdef RT_USING_SIGNALS
|
||||||
/* check stat of thread for signal */
|
/* check stat of thread for signal */
|
||||||
level = rt_hw_interrupt_disable();
|
level = rt_hw_interrupt_disable();
|
||||||
if (rt_current_thread->stat & RT_THREAD_STAT_SIGNAL_PENDING)
|
if (RT_SCHED_CTX(rt_current_thread).stat & RT_THREAD_STAT_SIGNAL_PENDING)
|
||||||
{
|
{
|
||||||
extern void rt_thread_handle_sig(rt_bool_t clean_state);
|
extern void rt_thread_handle_sig(rt_bool_t clean_state);
|
||||||
|
|
||||||
rt_current_thread->stat &= ~RT_THREAD_STAT_SIGNAL_PENDING;
|
RT_SCHED_CTX(rt_current_thread).stat &= ~RT_THREAD_STAT_SIGNAL_PENDING;
|
||||||
|
|
||||||
rt_hw_interrupt_enable(level);
|
rt_hw_interrupt_enable(level);
|
||||||
|
|
||||||
|
@ -343,8 +372,8 @@ void rt_schedule(void)
|
||||||
}
|
}
|
||||||
else
|
else
|
||||||
{
|
{
|
||||||
rt_schedule_remove_thread(rt_current_thread);
|
rt_sched_remove_thread(rt_current_thread);
|
||||||
rt_current_thread->stat = RT_THREAD_RUNNING | (rt_current_thread->stat & ~RT_THREAD_STAT_MASK);
|
RT_SCHED_CTX(rt_current_thread).stat = RT_THREAD_RUNNING | (RT_SCHED_CTX(rt_current_thread).stat & ~RT_THREAD_STAT_MASK);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -356,6 +385,42 @@ __exit:
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/* Normally, there isn't anyone racing with us so this operation is lockless */
|
||||||
|
void rt_sched_thread_startup(struct rt_thread *thread)
|
||||||
|
{
|
||||||
|
#if RT_THREAD_PRIORITY_MAX > 32
|
||||||
|
RT_SCHED_PRIV(thread).number = RT_SCHED_PRIV(thread).current_priority >> 3; /* 5bit */
|
||||||
|
RT_SCHED_PRIV(thread).number_mask = 1L << RT_SCHED_PRIV(thread).number;
|
||||||
|
RT_SCHED_PRIV(thread).high_mask = 1L << (RT_SCHED_PRIV(thread).current_priority & 0x07); /* 3bit */
|
||||||
|
#else
|
||||||
|
RT_SCHED_PRIV(thread).number_mask = 1L << RT_SCHED_PRIV(thread).current_priority;
|
||||||
|
#endif /* RT_THREAD_PRIORITY_MAX > 32 */
|
||||||
|
|
||||||
|
/* change thread stat, so we can resume it */
|
||||||
|
RT_SCHED_CTX(thread).stat = RT_THREAD_SUSPEND;
|
||||||
|
}
|
||||||
|
|
||||||
|
void rt_sched_thread_init_priv(struct rt_thread *thread, rt_uint32_t tick, rt_uint8_t priority)
|
||||||
|
{
|
||||||
|
rt_list_init(&RT_THREAD_LIST_NODE(thread));
|
||||||
|
|
||||||
|
/* priority init */
|
||||||
|
RT_ASSERT(priority < RT_THREAD_PRIORITY_MAX);
|
||||||
|
RT_SCHED_PRIV(thread).init_priority = priority;
|
||||||
|
RT_SCHED_PRIV(thread).current_priority = priority;
|
||||||
|
|
||||||
|
/* don't add to scheduler queue as init thread */
|
||||||
|
RT_SCHED_PRIV(thread).number_mask = 0;
|
||||||
|
#if RT_THREAD_PRIORITY_MAX > 32
|
||||||
|
RT_SCHED_PRIV(thread).number = 0;
|
||||||
|
RT_SCHED_PRIV(thread).high_mask = 0;
|
||||||
|
#endif /* RT_THREAD_PRIORITY_MAX > 32 */
|
||||||
|
|
||||||
|
/* tick init */
|
||||||
|
RT_SCHED_PRIV(thread).init_tick = tick;
|
||||||
|
RT_SCHED_PRIV(thread).remaining_tick = tick;
|
||||||
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* @brief This function will insert a thread to the system ready queue. The state of
|
* @brief This function will insert a thread to the system ready queue. The state of
|
||||||
* thread will be set as READY and the thread will be removed from suspend queue.
|
* thread will be set as READY and the thread will be removed from suspend queue.
|
||||||
|
@ -364,7 +429,7 @@ __exit:
|
||||||
*
|
*
|
||||||
* @note Please do not invoke this function in user application.
|
* @note Please do not invoke this function in user application.
|
||||||
*/
|
*/
|
||||||
void rt_schedule_insert_thread(struct rt_thread *thread)
|
void rt_sched_insert_thread(struct rt_thread *thread)
|
||||||
{
|
{
|
||||||
rt_base_t level;
|
rt_base_t level;
|
||||||
|
|
||||||
|
@ -376,33 +441,33 @@ void rt_schedule_insert_thread(struct rt_thread *thread)
|
||||||
/* it's current thread, it should be RUNNING thread */
|
/* it's current thread, it should be RUNNING thread */
|
||||||
if (thread == rt_current_thread)
|
if (thread == rt_current_thread)
|
||||||
{
|
{
|
||||||
thread->stat = RT_THREAD_RUNNING | (thread->stat & ~RT_THREAD_STAT_MASK);
|
RT_SCHED_CTX(thread).stat = RT_THREAD_RUNNING | (RT_SCHED_CTX(thread).stat & ~RT_THREAD_STAT_MASK);
|
||||||
goto __exit;
|
goto __exit;
|
||||||
}
|
}
|
||||||
|
|
||||||
/* READY thread, insert to ready queue */
|
/* READY thread, insert to ready queue */
|
||||||
thread->stat = RT_THREAD_READY | (thread->stat & ~RT_THREAD_STAT_MASK);
|
RT_SCHED_CTX(thread).stat = RT_THREAD_READY | (RT_SCHED_CTX(thread).stat & ~RT_THREAD_STAT_MASK);
|
||||||
/* there is no time slices left(YIELD), inserting thread before ready list*/
|
/* there is no time slices left(YIELD), inserting thread before ready list*/
|
||||||
if((thread->stat & RT_THREAD_STAT_YIELD_MASK) != 0)
|
if((RT_SCHED_CTX(thread).stat & RT_THREAD_STAT_YIELD_MASK) != 0)
|
||||||
{
|
{
|
||||||
rt_list_insert_before(&(rt_thread_priority_table[thread->current_priority]),
|
rt_list_insert_before(&(rt_thread_priority_table[RT_SCHED_PRIV(thread).current_priority]),
|
||||||
&(thread->tlist));
|
&RT_THREAD_LIST_NODE(thread));
|
||||||
}
|
}
|
||||||
/* there are some time slices left, inserting thread after ready list to schedule it firstly at next time*/
|
/* there are some time slices left, inserting thread after ready list to schedule it firstly at next time*/
|
||||||
else
|
else
|
||||||
{
|
{
|
||||||
rt_list_insert_after(&(rt_thread_priority_table[thread->current_priority]),
|
rt_list_insert_after(&(rt_thread_priority_table[RT_SCHED_PRIV(thread).current_priority]),
|
||||||
&(thread->tlist));
|
&RT_THREAD_LIST_NODE(thread));
|
||||||
}
|
}
|
||||||
|
|
||||||
LOG_D("insert thread[%.*s], the priority: %d",
|
LOG_D("insert thread[%.*s], the priority: %d",
|
||||||
RT_NAME_MAX, thread->parent.name, thread->current_priority);
|
RT_NAME_MAX, thread->parent.name, RT_SCHED_PRIV(rt_current_thread).current_priority);
|
||||||
|
|
||||||
/* set priority mask */
|
/* set priority mask */
|
||||||
#if RT_THREAD_PRIORITY_MAX > 32
|
#if RT_THREAD_PRIORITY_MAX > 32
|
||||||
rt_thread_ready_table[thread->number] |= thread->high_mask;
|
rt_thread_ready_table[RT_SCHED_PRIV(thread).number] |= RT_SCHED_PRIV(thread).high_mask;
|
||||||
#endif /* RT_THREAD_PRIORITY_MAX > 32 */
|
#endif /* RT_THREAD_PRIORITY_MAX > 32 */
|
||||||
rt_thread_ready_priority_group |= thread->number_mask;
|
rt_thread_ready_priority_group |= RT_SCHED_PRIV(thread).number_mask;
|
||||||
|
|
||||||
__exit:
|
__exit:
|
||||||
/* enable interrupt */
|
/* enable interrupt */
|
||||||
|
@ -416,7 +481,7 @@ __exit:
|
||||||
*
|
*
|
||||||
* @note Please do not invoke this function in user application.
|
* @note Please do not invoke this function in user application.
|
||||||
*/
|
*/
|
||||||
void rt_schedule_remove_thread(struct rt_thread *thread)
|
void rt_sched_remove_thread(struct rt_thread *thread)
|
||||||
{
|
{
|
||||||
rt_base_t level;
|
rt_base_t level;
|
||||||
|
|
||||||
|
@ -427,20 +492,20 @@ void rt_schedule_remove_thread(struct rt_thread *thread)
|
||||||
|
|
||||||
LOG_D("remove thread[%.*s], the priority: %d",
|
LOG_D("remove thread[%.*s], the priority: %d",
|
||||||
RT_NAME_MAX, thread->parent.name,
|
RT_NAME_MAX, thread->parent.name,
|
||||||
thread->current_priority);
|
RT_SCHED_PRIV(rt_current_thread).current_priority);
|
||||||
|
|
||||||
/* remove thread from ready list */
|
/* remove thread from ready list */
|
||||||
rt_list_remove(&(thread->tlist));
|
rt_list_remove(&RT_THREAD_LIST_NODE(thread));
|
||||||
if (rt_list_isempty(&(rt_thread_priority_table[thread->current_priority])))
|
if (rt_list_isempty(&(rt_thread_priority_table[RT_SCHED_PRIV(thread).current_priority])))
|
||||||
{
|
{
|
||||||
#if RT_THREAD_PRIORITY_MAX > 32
|
#if RT_THREAD_PRIORITY_MAX > 32
|
||||||
rt_thread_ready_table[thread->number] &= ~thread->high_mask;
|
rt_thread_ready_table[RT_SCHED_PRIV(thread).number] &= ~RT_SCHED_PRIV(thread).high_mask;
|
||||||
if (rt_thread_ready_table[thread->number] == 0)
|
if (rt_thread_ready_table[RT_SCHED_PRIV(thread).number] == 0)
|
||||||
{
|
{
|
||||||
rt_thread_ready_priority_group &= ~thread->number_mask;
|
rt_thread_ready_priority_group &= ~RT_SCHED_PRIV(thread).number_mask;
|
||||||
}
|
}
|
||||||
#else
|
#else
|
||||||
rt_thread_ready_priority_group &= ~thread->number_mask;
|
rt_thread_ready_priority_group &= ~RT_SCHED_PRIV(thread).number_mask;
|
||||||
#endif /* RT_THREAD_PRIORITY_MAX > 32 */
|
#endif /* RT_THREAD_PRIORITY_MAX > 32 */
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -448,12 +513,54 @@ void rt_schedule_remove_thread(struct rt_thread *thread)
|
||||||
rt_hw_interrupt_enable(level);
|
rt_hw_interrupt_enable(level);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#ifdef RT_USING_DEBUG
|
||||||
|
|
||||||
|
static volatile int _critical_error_occurred = 0;
|
||||||
|
|
||||||
|
void rt_exit_critical_safe(rt_base_t critical_level)
|
||||||
|
{
|
||||||
|
rt_base_t level;
|
||||||
|
/* disable interrupt */
|
||||||
|
level = rt_hw_interrupt_disable();
|
||||||
|
|
||||||
|
if (!_critical_error_occurred)
|
||||||
|
{
|
||||||
|
if (critical_level != rt_scheduler_lock_nest)
|
||||||
|
{
|
||||||
|
int dummy = 1;
|
||||||
|
_critical_error_occurred = 1;
|
||||||
|
|
||||||
|
rt_kprintf("%s: un-compatible critical level\n" \
|
||||||
|
"\tCurrent %d\n\tCaller %d\n",
|
||||||
|
__func__, rt_scheduler_lock_nest,
|
||||||
|
critical_level);
|
||||||
|
rt_backtrace();
|
||||||
|
|
||||||
|
while (dummy) ;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
rt_hw_interrupt_enable(level);
|
||||||
|
|
||||||
|
rt_exit_critical();
|
||||||
|
}
|
||||||
|
|
||||||
|
#else
|
||||||
|
|
||||||
|
void rt_exit_critical_safe(rt_base_t critical_level)
|
||||||
|
{
|
||||||
|
return rt_exit_critical();
|
||||||
|
}
|
||||||
|
|
||||||
|
#endif
|
||||||
|
RTM_EXPORT(rt_exit_critical_safe);
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* @brief This function will lock the thread scheduler.
|
* @brief This function will lock the thread scheduler.
|
||||||
*/
|
*/
|
||||||
void rt_enter_critical(void)
|
rt_base_t rt_enter_critical(void)
|
||||||
{
|
{
|
||||||
rt_base_t level;
|
rt_base_t level;
|
||||||
|
rt_base_t critical_level;
|
||||||
|
|
||||||
/* disable interrupt */
|
/* disable interrupt */
|
||||||
level = rt_hw_interrupt_disable();
|
level = rt_hw_interrupt_disable();
|
||||||
|
@ -463,9 +570,12 @@ void rt_enter_critical(void)
|
||||||
* enough and does not check here
|
* enough and does not check here
|
||||||
*/
|
*/
|
||||||
rt_scheduler_lock_nest ++;
|
rt_scheduler_lock_nest ++;
|
||||||
|
critical_level = rt_scheduler_lock_nest;
|
||||||
|
|
||||||
/* enable interrupt */
|
/* enable interrupt */
|
||||||
rt_hw_interrupt_enable(level);
|
rt_hw_interrupt_enable(level);
|
||||||
|
|
||||||
|
return critical_level;
|
||||||
}
|
}
|
||||||
RTM_EXPORT(rt_enter_critical);
|
RTM_EXPORT(rt_enter_critical);
|
||||||
|
|
||||||
|
@ -511,5 +621,10 @@ rt_uint16_t rt_critical_level(void)
|
||||||
}
|
}
|
||||||
RTM_EXPORT(rt_critical_level);
|
RTM_EXPORT(rt_critical_level);
|
||||||
|
|
||||||
|
rt_err_t rt_sched_thread_bind_cpu(struct rt_thread *thread, int cpu)
|
||||||
|
{
|
||||||
|
return -RT_EINVAL;
|
||||||
|
}
|
||||||
|
|
||||||
/**@}*/
|
/**@}*/
|
||||||
/**@endcond*/
|
/**@endcond*/
|
||||||
|
|
122
src/signal.c
122
src/signal.c
|
@ -29,6 +29,8 @@
|
||||||
#define sig_mask(sig_no) (1u << sig_no)
|
#define sig_mask(sig_no) (1u << sig_no)
|
||||||
#define sig_valid(sig_no) (sig_no >= 0 && sig_no < RT_SIG_MAX)
|
#define sig_valid(sig_no) (sig_no >= 0 && sig_no < RT_SIG_MAX)
|
||||||
|
|
||||||
|
static struct rt_spinlock _thread_signal_lock = RT_SPINLOCK_INIT;
|
||||||
|
|
||||||
struct siginfo_node
|
struct siginfo_node
|
||||||
{
|
{
|
||||||
siginfo_t si;
|
siginfo_t si;
|
||||||
|
@ -63,7 +65,7 @@ static void _signal_entry(void *parameter)
|
||||||
#endif /* RT_USING_SMP */
|
#endif /* RT_USING_SMP */
|
||||||
|
|
||||||
LOG_D("switch back to: 0x%08x\n", tid->sp);
|
LOG_D("switch back to: 0x%08x\n", tid->sp);
|
||||||
tid->stat &= ~RT_THREAD_STAT_SIGNAL;
|
RT_SCHED_CTX(tid).stat &= ~RT_THREAD_STAT_SIGNAL;
|
||||||
|
|
||||||
#ifdef RT_USING_SMP
|
#ifdef RT_USING_SMP
|
||||||
rt_hw_context_switch_to((rt_base_t)¶meter, tid);
|
rt_hw_context_switch_to((rt_base_t)¶meter, tid);
|
||||||
|
@ -86,16 +88,16 @@ static void _signal_deliver(rt_thread_t tid)
|
||||||
{
|
{
|
||||||
rt_base_t level;
|
rt_base_t level;
|
||||||
|
|
||||||
level = rt_hw_interrupt_disable();
|
level = rt_spin_lock_irqsave(&_thread_signal_lock);
|
||||||
|
|
||||||
/* thread is not interested in pended signals */
|
/* thread is not interested in pended signals */
|
||||||
if (!(tid->sig_pending & tid->sig_mask))
|
if (!(tid->sig_pending & tid->sig_mask))
|
||||||
{
|
{
|
||||||
rt_hw_interrupt_enable(level);
|
rt_spin_unlock_irqrestore(&_thread_signal_lock, level);
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
if ((tid->stat & RT_THREAD_SUSPEND_MASK) == RT_THREAD_SUSPEND_MASK)
|
if ((RT_SCHED_CTX(tid).stat & RT_THREAD_SUSPEND_MASK) == RT_THREAD_SUSPEND_MASK)
|
||||||
{
|
{
|
||||||
/* resume thread to handle signal */
|
/* resume thread to handle signal */
|
||||||
#ifdef RT_USING_SMART
|
#ifdef RT_USING_SMART
|
||||||
|
@ -104,9 +106,9 @@ static void _signal_deliver(rt_thread_t tid)
|
||||||
rt_thread_resume(tid);
|
rt_thread_resume(tid);
|
||||||
#endif
|
#endif
|
||||||
/* add signal state */
|
/* add signal state */
|
||||||
tid->stat |= (RT_THREAD_STAT_SIGNAL | RT_THREAD_STAT_SIGNAL_PENDING);
|
RT_SCHED_CTX(tid).stat |= (RT_THREAD_STAT_SIGNAL | RT_THREAD_STAT_SIGNAL_PENDING);
|
||||||
|
|
||||||
rt_hw_interrupt_enable(level);
|
rt_spin_unlock_irqrestore(&_thread_signal_lock, level);
|
||||||
|
|
||||||
/* re-schedule */
|
/* re-schedule */
|
||||||
rt_schedule();
|
rt_schedule();
|
||||||
|
@ -116,9 +118,9 @@ static void _signal_deliver(rt_thread_t tid)
|
||||||
if (tid == rt_thread_self())
|
if (tid == rt_thread_self())
|
||||||
{
|
{
|
||||||
/* add signal state */
|
/* add signal state */
|
||||||
tid->stat |= RT_THREAD_STAT_SIGNAL;
|
RT_SCHED_CTX(tid).stat |= RT_THREAD_STAT_SIGNAL;
|
||||||
|
|
||||||
rt_hw_interrupt_enable(level);
|
rt_spin_unlock_irqrestore(&_thread_signal_lock, level);
|
||||||
|
|
||||||
/* do signal action in self thread context */
|
/* do signal action in self thread context */
|
||||||
if (rt_interrupt_get_nest() == 0)
|
if (rt_interrupt_get_nest() == 0)
|
||||||
|
@ -126,16 +128,16 @@ static void _signal_deliver(rt_thread_t tid)
|
||||||
rt_thread_handle_sig(RT_TRUE);
|
rt_thread_handle_sig(RT_TRUE);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
else if (!((tid->stat & RT_THREAD_STAT_SIGNAL_MASK) & RT_THREAD_STAT_SIGNAL))
|
else if (!((RT_SCHED_CTX(tid).stat & RT_THREAD_STAT_SIGNAL_MASK) & RT_THREAD_STAT_SIGNAL))
|
||||||
{
|
{
|
||||||
/* add signal state */
|
/* add signal state */
|
||||||
tid->stat |= (RT_THREAD_STAT_SIGNAL | RT_THREAD_STAT_SIGNAL_PENDING);
|
RT_SCHED_CTX(tid).stat |= (RT_THREAD_STAT_SIGNAL | RT_THREAD_STAT_SIGNAL_PENDING);
|
||||||
|
|
||||||
#ifdef RT_USING_SMP
|
#ifdef RT_USING_SMP
|
||||||
{
|
{
|
||||||
int cpu_id;
|
int cpu_id;
|
||||||
|
|
||||||
cpu_id = tid->oncpu;
|
cpu_id = RT_SCHED_CTX(tid).oncpu;
|
||||||
if ((cpu_id != RT_CPU_DETACHED) && (cpu_id != rt_hw_cpu_id()))
|
if ((cpu_id != RT_CPU_DETACHED) && (cpu_id != rt_hw_cpu_id()))
|
||||||
{
|
{
|
||||||
rt_uint32_t cpu_mask;
|
rt_uint32_t cpu_mask;
|
||||||
|
@ -146,13 +148,13 @@ static void _signal_deliver(rt_thread_t tid)
|
||||||
}
|
}
|
||||||
#else
|
#else
|
||||||
/* point to the signal handle entry */
|
/* point to the signal handle entry */
|
||||||
tid->stat &= ~RT_THREAD_STAT_SIGNAL_PENDING;
|
RT_SCHED_CTX(tid).stat &= ~RT_THREAD_STAT_SIGNAL_PENDING;
|
||||||
tid->sig_ret = tid->sp;
|
tid->sig_ret = tid->sp;
|
||||||
tid->sp = rt_hw_stack_init((void *)_signal_entry, RT_NULL,
|
tid->sp = rt_hw_stack_init((void *)_signal_entry, RT_NULL,
|
||||||
(void *)((char *)tid->sig_ret - 32), RT_NULL);
|
(void *)((char *)tid->sig_ret - 32), RT_NULL);
|
||||||
#endif /* RT_USING_SMP */
|
#endif /* RT_USING_SMP */
|
||||||
|
|
||||||
rt_hw_interrupt_enable(level);
|
rt_spin_unlock_irqrestore(&_thread_signal_lock, level);
|
||||||
LOG_D("signal stack pointer @ 0x%08x", tid->sp);
|
LOG_D("signal stack pointer @ 0x%08x", tid->sp);
|
||||||
|
|
||||||
/* re-schedule */
|
/* re-schedule */
|
||||||
|
@ -160,7 +162,7 @@ static void _signal_deliver(rt_thread_t tid)
|
||||||
}
|
}
|
||||||
else
|
else
|
||||||
{
|
{
|
||||||
rt_hw_interrupt_enable(level);
|
rt_spin_unlock_irqrestore(&_thread_signal_lock, level);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -168,37 +170,38 @@ static void _signal_deliver(rt_thread_t tid)
|
||||||
#ifdef RT_USING_SMP
|
#ifdef RT_USING_SMP
|
||||||
void *rt_signal_check(void* context)
|
void *rt_signal_check(void* context)
|
||||||
{
|
{
|
||||||
rt_base_t level;
|
rt_sched_lock_level_t level;
|
||||||
int cpu_id;
|
int cpu_id;
|
||||||
struct rt_cpu* pcpu;
|
struct rt_cpu* pcpu;
|
||||||
struct rt_thread *current_thread;
|
struct rt_thread *current_thread;
|
||||||
|
|
||||||
level = rt_hw_interrupt_disable();
|
level = rt_spin_lock_irqsave(&_thread_signal_lock);
|
||||||
|
|
||||||
cpu_id = rt_hw_cpu_id();
|
cpu_id = rt_hw_cpu_id();
|
||||||
pcpu = rt_cpu_index(cpu_id);
|
pcpu = rt_cpu_index(cpu_id);
|
||||||
current_thread = pcpu->current_thread;
|
current_thread = pcpu->current_thread;
|
||||||
|
|
||||||
if (pcpu->irq_nest)
|
if (pcpu->irq_nest)
|
||||||
{
|
{
|
||||||
rt_hw_interrupt_enable(level);
|
rt_spin_unlock_irqrestore(&_thread_signal_lock, level);
|
||||||
return context;
|
return context;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (current_thread->cpus_lock_nest == 1)
|
if (current_thread->cpus_lock_nest == 1)
|
||||||
{
|
{
|
||||||
if (current_thread->stat & RT_THREAD_STAT_SIGNAL_PENDING)
|
if (RT_SCHED_CTX(current_thread).stat & RT_THREAD_STAT_SIGNAL_PENDING)
|
||||||
{
|
{
|
||||||
void *sig_context;
|
void *sig_context;
|
||||||
|
|
||||||
current_thread->stat &= ~RT_THREAD_STAT_SIGNAL_PENDING;
|
RT_SCHED_CTX(current_thread).stat &= ~RT_THREAD_STAT_SIGNAL_PENDING;
|
||||||
|
|
||||||
rt_hw_interrupt_enable(level);
|
rt_spin_unlock_irqrestore(&_thread_signal_lock, level);
|
||||||
sig_context = rt_hw_stack_init((void *)_signal_entry, context,
|
sig_context = rt_hw_stack_init((void *)_signal_entry, context,
|
||||||
(void*)((char*)context - 32), RT_NULL);
|
(void*)((char*)context - 32), RT_NULL);
|
||||||
return sig_context;
|
return sig_context;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
rt_hw_interrupt_enable(level);
|
rt_spin_unlock_irqrestore(&_thread_signal_lock, level);
|
||||||
return context;
|
return context;
|
||||||
}
|
}
|
||||||
#endif /* RT_USING_SMP */
|
#endif /* RT_USING_SMP */
|
||||||
|
@ -227,10 +230,14 @@ rt_sighandler_t rt_signal_install(int signo, rt_sighandler_t handler)
|
||||||
|
|
||||||
if (!sig_valid(signo)) return SIG_ERR;
|
if (!sig_valid(signo)) return SIG_ERR;
|
||||||
|
|
||||||
level = rt_hw_interrupt_disable();
|
level = rt_spin_lock_irqsave(&_thread_signal_lock);
|
||||||
if (tid->sig_vectors == RT_NULL)
|
if (tid->sig_vectors == RT_NULL)
|
||||||
{
|
{
|
||||||
|
rt_spin_unlock_irqrestore(&_thread_signal_lock, level);
|
||||||
|
|
||||||
rt_thread_alloc_sig(tid);
|
rt_thread_alloc_sig(tid);
|
||||||
|
|
||||||
|
level = rt_spin_lock_irqsave(&_thread_signal_lock);
|
||||||
}
|
}
|
||||||
|
|
||||||
if (tid->sig_vectors)
|
if (tid->sig_vectors)
|
||||||
|
@ -241,7 +248,7 @@ rt_sighandler_t rt_signal_install(int signo, rt_sighandler_t handler)
|
||||||
else if (handler == SIG_DFL) tid->sig_vectors[signo] = _signal_default_handler;
|
else if (handler == SIG_DFL) tid->sig_vectors[signo] = _signal_default_handler;
|
||||||
else tid->sig_vectors[signo] = handler;
|
else tid->sig_vectors[signo] = handler;
|
||||||
}
|
}
|
||||||
rt_hw_interrupt_enable(level);
|
rt_spin_unlock_irqrestore(&_thread_signal_lock, level);
|
||||||
|
|
||||||
return old;
|
return old;
|
||||||
}
|
}
|
||||||
|
@ -262,11 +269,11 @@ void rt_signal_mask(int signo)
|
||||||
rt_base_t level;
|
rt_base_t level;
|
||||||
rt_thread_t tid = rt_thread_self();
|
rt_thread_t tid = rt_thread_self();
|
||||||
|
|
||||||
level = rt_hw_interrupt_disable();
|
level = rt_spin_lock_irqsave(&_thread_signal_lock);
|
||||||
|
|
||||||
tid->sig_mask &= ~sig_mask(signo);
|
tid->sig_mask &= ~sig_mask(signo);
|
||||||
|
|
||||||
rt_hw_interrupt_enable(level);
|
rt_spin_unlock_irqrestore(&_thread_signal_lock, level);
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
|
@ -285,19 +292,19 @@ void rt_signal_unmask(int signo)
|
||||||
rt_base_t level;
|
rt_base_t level;
|
||||||
rt_thread_t tid = rt_thread_self();
|
rt_thread_t tid = rt_thread_self();
|
||||||
|
|
||||||
level = rt_hw_interrupt_disable();
|
level = rt_spin_lock_irqsave(&_thread_signal_lock);
|
||||||
|
|
||||||
tid->sig_mask |= sig_mask(signo);
|
tid->sig_mask |= sig_mask(signo);
|
||||||
|
|
||||||
/* let thread handle pended signals */
|
/* let thread handle pended signals */
|
||||||
if (tid->sig_mask & tid->sig_pending)
|
if (tid->sig_mask & tid->sig_pending)
|
||||||
{
|
{
|
||||||
rt_hw_interrupt_enable(level);
|
rt_spin_unlock_irqrestore(&_thread_signal_lock, level);
|
||||||
_signal_deliver(tid);
|
_signal_deliver(tid);
|
||||||
}
|
}
|
||||||
else
|
else
|
||||||
{
|
{
|
||||||
rt_hw_interrupt_enable(level);
|
rt_spin_unlock_irqrestore(&_thread_signal_lock, level);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -335,7 +342,7 @@ int rt_signal_wait(const rt_sigset_t *set, rt_siginfo_t *si, rt_int32_t timeout)
|
||||||
/* clear siginfo to avoid unknown value */
|
/* clear siginfo to avoid unknown value */
|
||||||
memset(si, 0x0, sizeof(rt_siginfo_t));
|
memset(si, 0x0, sizeof(rt_siginfo_t));
|
||||||
|
|
||||||
level = rt_hw_interrupt_disable();
|
level = rt_spin_lock_irqsave(&_thread_signal_lock);
|
||||||
|
|
||||||
/* already pending */
|
/* already pending */
|
||||||
if (tid->sig_pending & *set) goto __done;
|
if (tid->sig_pending & *set) goto __done;
|
||||||
|
@ -349,7 +356,7 @@ int rt_signal_wait(const rt_sigset_t *set, rt_siginfo_t *si, rt_int32_t timeout)
|
||||||
/* suspend self thread */
|
/* suspend self thread */
|
||||||
rt_thread_suspend_with_flag(tid, RT_UNINTERRUPTIBLE);
|
rt_thread_suspend_with_flag(tid, RT_UNINTERRUPTIBLE);
|
||||||
/* set thread stat as waiting for signal */
|
/* set thread stat as waiting for signal */
|
||||||
tid->stat |= RT_THREAD_STAT_SIGNAL_WAIT;
|
RT_SCHED_CTX(tid).stat |= RT_THREAD_STAT_SIGNAL_WAIT;
|
||||||
|
|
||||||
/* start timeout timer */
|
/* start timeout timer */
|
||||||
if (timeout != RT_WAITING_FOREVER)
|
if (timeout != RT_WAITING_FOREVER)
|
||||||
|
@ -360,21 +367,21 @@ int rt_signal_wait(const rt_sigset_t *set, rt_siginfo_t *si, rt_int32_t timeout)
|
||||||
&timeout);
|
&timeout);
|
||||||
rt_timer_start(&(tid->thread_timer));
|
rt_timer_start(&(tid->thread_timer));
|
||||||
}
|
}
|
||||||
rt_hw_interrupt_enable(level);
|
rt_spin_unlock_irqrestore(&_thread_signal_lock, level);
|
||||||
|
|
||||||
/* do thread scheduling */
|
/* do thread scheduling */
|
||||||
rt_schedule();
|
rt_schedule();
|
||||||
|
|
||||||
level = rt_hw_interrupt_disable();
|
level = rt_spin_lock_irqsave(&_thread_signal_lock);
|
||||||
|
|
||||||
/* remove signal waiting flag */
|
/* remove signal waiting flag */
|
||||||
tid->stat &= ~RT_THREAD_STAT_SIGNAL_WAIT;
|
RT_SCHED_CTX(tid).stat &= ~RT_THREAD_STAT_SIGNAL_WAIT;
|
||||||
|
|
||||||
/* check errno of thread */
|
/* check errno of thread */
|
||||||
if (tid->error == -RT_ETIMEOUT)
|
if (tid->error == -RT_ETIMEOUT)
|
||||||
{
|
{
|
||||||
tid->error = RT_EOK;
|
tid->error = RT_EOK;
|
||||||
rt_hw_interrupt_enable(level);
|
rt_spin_unlock_irqrestore(&_thread_signal_lock, level);
|
||||||
|
|
||||||
/* timer timeout */
|
/* timer timeout */
|
||||||
ret = -RT_ETIMEOUT;
|
ret = -RT_ETIMEOUT;
|
||||||
|
@ -428,7 +435,7 @@ __done:
|
||||||
}
|
}
|
||||||
|
|
||||||
__done_int:
|
__done_int:
|
||||||
rt_hw_interrupt_enable(level);
|
rt_spin_unlock_irqrestore(&_thread_signal_lock, level);
|
||||||
|
|
||||||
__done_return:
|
__done_return:
|
||||||
return ret;
|
return ret;
|
||||||
|
@ -441,11 +448,11 @@ void rt_thread_handle_sig(rt_bool_t clean_state)
|
||||||
rt_thread_t tid = rt_thread_self();
|
rt_thread_t tid = rt_thread_self();
|
||||||
struct siginfo_node *si_node;
|
struct siginfo_node *si_node;
|
||||||
|
|
||||||
level = rt_hw_interrupt_disable();
|
level = rt_spin_lock_irqsave(&_thread_signal_lock);
|
||||||
if (tid->sig_pending & tid->sig_mask)
|
if (tid->sig_pending & tid->sig_mask)
|
||||||
{
|
{
|
||||||
/* if thread is not waiting for signal */
|
/* if thread is not waiting for signal */
|
||||||
if (!(tid->stat & RT_THREAD_STAT_SIGNAL_WAIT))
|
if (!(RT_SCHED_CTX(tid).stat & RT_THREAD_STAT_SIGNAL_WAIT))
|
||||||
{
|
{
|
||||||
while (tid->sig_pending & tid->sig_mask)
|
while (tid->sig_pending & tid->sig_mask)
|
||||||
{
|
{
|
||||||
|
@ -464,12 +471,12 @@ void rt_thread_handle_sig(rt_bool_t clean_state)
|
||||||
signo = si_node->si.si_signo;
|
signo = si_node->si.si_signo;
|
||||||
handler = tid->sig_vectors[signo];
|
handler = tid->sig_vectors[signo];
|
||||||
tid->sig_pending &= ~sig_mask(signo);
|
tid->sig_pending &= ~sig_mask(signo);
|
||||||
rt_hw_interrupt_enable(level);
|
rt_spin_unlock_irqrestore(&_thread_signal_lock, level);
|
||||||
|
|
||||||
LOG_D("handle signal: %d, handler 0x%08x", signo, handler);
|
LOG_D("handle signal: %d, handler 0x%08x", signo, handler);
|
||||||
if (handler) handler(signo);
|
if (handler) handler(signo);
|
||||||
|
|
||||||
level = rt_hw_interrupt_disable();
|
level = rt_spin_lock_irqsave(&_thread_signal_lock);
|
||||||
error = -RT_EINTR;
|
error = -RT_EINTR;
|
||||||
|
|
||||||
rt_mp_free(si_node); /* release this siginfo node */
|
rt_mp_free(si_node); /* release this siginfo node */
|
||||||
|
@ -480,7 +487,7 @@ void rt_thread_handle_sig(rt_bool_t clean_state)
|
||||||
/* whether clean signal status */
|
/* whether clean signal status */
|
||||||
if (clean_state == RT_TRUE)
|
if (clean_state == RT_TRUE)
|
||||||
{
|
{
|
||||||
tid->stat &= ~RT_THREAD_STAT_SIGNAL;
|
RT_SCHED_CTX(tid).stat &= ~RT_THREAD_STAT_SIGNAL;
|
||||||
}
|
}
|
||||||
else
|
else
|
||||||
{
|
{
|
||||||
|
@ -488,12 +495,13 @@ void rt_thread_handle_sig(rt_bool_t clean_state)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
rt_hw_interrupt_enable(level);
|
rt_spin_unlock_irqrestore(&_thread_signal_lock, level);
|
||||||
}
|
}
|
||||||
|
|
||||||
void rt_thread_alloc_sig(rt_thread_t tid)
|
void rt_thread_alloc_sig(rt_thread_t tid)
|
||||||
{
|
{
|
||||||
int index;
|
int index;
|
||||||
|
rt_bool_t need_free = RT_FALSE;
|
||||||
rt_base_t level;
|
rt_base_t level;
|
||||||
rt_sighandler_t *vectors;
|
rt_sighandler_t *vectors;
|
||||||
|
|
||||||
|
@ -505,9 +513,23 @@ void rt_thread_alloc_sig(rt_thread_t tid)
|
||||||
vectors[index] = _signal_default_handler;
|
vectors[index] = _signal_default_handler;
|
||||||
}
|
}
|
||||||
|
|
||||||
level = rt_hw_interrupt_disable();
|
level = rt_spin_lock_irqsave(&_thread_signal_lock);
|
||||||
|
|
||||||
|
if (tid->sig_vectors == RT_NULL)
|
||||||
|
{
|
||||||
tid->sig_vectors = vectors;
|
tid->sig_vectors = vectors;
|
||||||
rt_hw_interrupt_enable(level);
|
}
|
||||||
|
else
|
||||||
|
{
|
||||||
|
need_free = RT_TRUE;
|
||||||
|
}
|
||||||
|
|
||||||
|
rt_spin_unlock_irqrestore(&_thread_signal_lock, level);
|
||||||
|
|
||||||
|
if (need_free)
|
||||||
|
{
|
||||||
|
rt_free(vectors);
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
void rt_thread_free_sig(rt_thread_t tid)
|
void rt_thread_free_sig(rt_thread_t tid)
|
||||||
|
@ -516,13 +538,13 @@ void rt_thread_free_sig(rt_thread_t tid)
|
||||||
struct siginfo_node *si_node;
|
struct siginfo_node *si_node;
|
||||||
rt_sighandler_t *sig_vectors;
|
rt_sighandler_t *sig_vectors;
|
||||||
|
|
||||||
level = rt_hw_interrupt_disable();
|
level = rt_spin_lock_irqsave(&_thread_signal_lock);
|
||||||
si_node = (struct siginfo_node *)tid->si_list;
|
si_node = (struct siginfo_node *)tid->si_list;
|
||||||
tid->si_list = RT_NULL;
|
tid->si_list = RT_NULL;
|
||||||
|
|
||||||
sig_vectors = tid->sig_vectors;
|
sig_vectors = tid->sig_vectors;
|
||||||
tid->sig_vectors = RT_NULL;
|
tid->sig_vectors = RT_NULL;
|
||||||
rt_hw_interrupt_enable(level);
|
rt_spin_unlock_irqrestore(&_thread_signal_lock, level);
|
||||||
|
|
||||||
if (si_node)
|
if (si_node)
|
||||||
{
|
{
|
||||||
|
@ -570,7 +592,7 @@ int rt_thread_kill(rt_thread_t tid, int sig)
|
||||||
si.si_code = SI_USER;
|
si.si_code = SI_USER;
|
||||||
si.si_value.sival_ptr = RT_NULL;
|
si.si_value.sival_ptr = RT_NULL;
|
||||||
|
|
||||||
level = rt_hw_interrupt_disable();
|
level = rt_spin_lock_irqsave(&_thread_signal_lock);
|
||||||
if (tid->sig_pending & sig_mask(sig))
|
if (tid->sig_pending & sig_mask(sig))
|
||||||
{
|
{
|
||||||
/* whether already emits this signal? */
|
/* whether already emits this signal? */
|
||||||
|
@ -590,12 +612,12 @@ int rt_thread_kill(rt_thread_t tid, int sig)
|
||||||
if (entry->si.si_signo == sig)
|
if (entry->si.si_signo == sig)
|
||||||
{
|
{
|
||||||
memcpy(&(entry->si), &si, sizeof(siginfo_t));
|
memcpy(&(entry->si), &si, sizeof(siginfo_t));
|
||||||
rt_hw_interrupt_enable(level);
|
rt_spin_unlock_irqrestore(&_thread_signal_lock, level);
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
rt_hw_interrupt_enable(level);
|
rt_spin_unlock_irqrestore(&_thread_signal_lock, level);
|
||||||
|
|
||||||
si_node = (struct siginfo_node *) rt_mp_alloc(_siginfo_pool, 0);
|
si_node = (struct siginfo_node *) rt_mp_alloc(_siginfo_pool, 0);
|
||||||
if (si_node)
|
if (si_node)
|
||||||
|
@ -603,7 +625,7 @@ int rt_thread_kill(rt_thread_t tid, int sig)
|
||||||
rt_slist_init(&(si_node->list));
|
rt_slist_init(&(si_node->list));
|
||||||
memcpy(&(si_node->si), &si, sizeof(siginfo_t));
|
memcpy(&(si_node->si), &si, sizeof(siginfo_t));
|
||||||
|
|
||||||
level = rt_hw_interrupt_disable();
|
level = rt_spin_lock_irqsave(&_thread_signal_lock);
|
||||||
|
|
||||||
if (tid->si_list)
|
if (tid->si_list)
|
||||||
{
|
{
|
||||||
|
@ -620,7 +642,7 @@ int rt_thread_kill(rt_thread_t tid, int sig)
|
||||||
/* a new signal */
|
/* a new signal */
|
||||||
tid->sig_pending |= sig_mask(sig);
|
tid->sig_pending |= sig_mask(sig);
|
||||||
|
|
||||||
rt_hw_interrupt_enable(level);
|
rt_spin_unlock_irqrestore(&_thread_signal_lock, level);
|
||||||
}
|
}
|
||||||
else
|
else
|
||||||
{
|
{
|
||||||
|
|
599
src/thread.c
599
src/thread.c
|
@ -79,31 +79,30 @@ RT_OBJECT_HOOKLIST_DEFINE(rt_thread_inited);
|
||||||
static void _thread_exit(void)
|
static void _thread_exit(void)
|
||||||
{
|
{
|
||||||
struct rt_thread *thread;
|
struct rt_thread *thread;
|
||||||
rt_base_t level;
|
rt_sched_lock_level_t slvl;
|
||||||
|
rt_base_t critical_level;
|
||||||
|
|
||||||
/* get current thread */
|
/* get current thread */
|
||||||
thread = rt_thread_self();
|
thread = rt_thread_self();
|
||||||
|
|
||||||
rt_enter_critical();
|
critical_level = rt_enter_critical();
|
||||||
|
rt_sched_lock(&slvl);
|
||||||
|
|
||||||
/* remove from schedule */
|
/* remove from schedule */
|
||||||
rt_schedule_remove_thread(thread);
|
rt_sched_remove_thread(thread);
|
||||||
|
|
||||||
level = rt_spin_lock_irqsave(&(thread->spinlock));
|
|
||||||
|
|
||||||
/* remove it from timer list */
|
/* remove it from timer list */
|
||||||
rt_timer_detach(&thread->thread_timer);
|
rt_timer_detach(&thread->thread_timer);
|
||||||
|
|
||||||
/* change stat */
|
/* change stat */
|
||||||
thread->stat = RT_THREAD_CLOSE;
|
rt_sched_thread_close(thread);
|
||||||
|
|
||||||
rt_spin_unlock_irqrestore(&(thread->spinlock), level);
|
rt_sched_unlock(slvl);
|
||||||
|
|
||||||
/* insert to defunct thread list */
|
/* insert to defunct thread list */
|
||||||
rt_thread_defunct_enqueue(thread);
|
rt_thread_defunct_enqueue(thread);
|
||||||
|
|
||||||
LOG_D("line:%d thread:%s exit\n", __LINE__, rt_thread_self()->parent.name);
|
rt_exit_critical_safe(critical_level);
|
||||||
rt_exit_critical();
|
|
||||||
|
|
||||||
/* switch to next task */
|
/* switch to next task */
|
||||||
rt_schedule();
|
rt_schedule();
|
||||||
|
@ -118,41 +117,66 @@ static void _thread_exit(void)
|
||||||
static void _thread_timeout(void *parameter)
|
static void _thread_timeout(void *parameter)
|
||||||
{
|
{
|
||||||
struct rt_thread *thread;
|
struct rt_thread *thread;
|
||||||
rt_base_t level;
|
rt_sched_lock_level_t slvl;
|
||||||
|
|
||||||
thread = (struct rt_thread *)parameter;
|
thread = (struct rt_thread *)parameter;
|
||||||
|
|
||||||
/* parameter check */
|
/* parameter check */
|
||||||
RT_ASSERT(thread != RT_NULL);
|
RT_ASSERT(thread != RT_NULL);
|
||||||
RT_ASSERT((thread->stat & RT_THREAD_SUSPEND_MASK) == RT_THREAD_SUSPEND_MASK);
|
|
||||||
RT_ASSERT(rt_object_get_type((rt_object_t)thread) == RT_Object_Class_Thread);
|
RT_ASSERT(rt_object_get_type((rt_object_t)thread) == RT_Object_Class_Thread);
|
||||||
level = rt_spin_lock_irqsave(&(thread->spinlock));
|
|
||||||
|
rt_sched_lock(&slvl);
|
||||||
|
|
||||||
|
/**
|
||||||
|
* resume of the thread and stop of the thread timer should be an atomic
|
||||||
|
* operation. So we don't expected that thread had resumed.
|
||||||
|
*/
|
||||||
|
RT_ASSERT(rt_sched_thread_is_suspended(thread));
|
||||||
|
|
||||||
/* set error number */
|
/* set error number */
|
||||||
thread->error = -RT_ETIMEOUT;
|
thread->error = -RT_ETIMEOUT;
|
||||||
|
|
||||||
/* remove from suspend list */
|
/* remove from suspend list */
|
||||||
rt_list_remove(&(thread->tlist));
|
rt_list_remove(&RT_THREAD_LIST_NODE(thread));
|
||||||
rt_spin_unlock_irqrestore(&(thread->spinlock), level);
|
|
||||||
/* insert to schedule ready list */
|
/* insert to schedule ready list */
|
||||||
rt_schedule_insert_thread(thread);
|
rt_sched_insert_thread(thread);
|
||||||
/* do schedule */
|
/* do schedule and release the scheduler lock */
|
||||||
rt_schedule();
|
rt_sched_unlock_n_resched(slvl);
|
||||||
}
|
}
|
||||||
|
|
||||||
/* release the mutex held by a thread when thread is reclaimed */
|
|
||||||
#ifdef RT_USING_MUTEX
|
#ifdef RT_USING_MUTEX
|
||||||
static void _free_owned_mutex(rt_thread_t thread)
|
static void _thread_detach_from_mutex(rt_thread_t thread)
|
||||||
{
|
{
|
||||||
rt_list_t *node;
|
rt_list_t *node;
|
||||||
rt_list_t *tmp_list;
|
rt_list_t *tmp_list;
|
||||||
struct rt_mutex *mutex;
|
struct rt_mutex *mutex;
|
||||||
|
rt_base_t level;
|
||||||
|
|
||||||
|
level = rt_spin_lock_irqsave(&thread->spinlock);
|
||||||
|
|
||||||
|
/* check if thread is waiting on a mutex */
|
||||||
|
if ((thread->pending_object) &&
|
||||||
|
(rt_object_get_type(thread->pending_object) == RT_Object_Class_Mutex))
|
||||||
|
{
|
||||||
|
/* remove it from its waiting list */
|
||||||
|
struct rt_mutex *mutex = (struct rt_mutex*)thread->pending_object;
|
||||||
|
rt_mutex_drop_thread(mutex, thread);
|
||||||
|
thread->pending_object = RT_NULL;
|
||||||
|
}
|
||||||
|
|
||||||
|
/* free taken mutex after detaching from waiting, so we don't lost mutex just got */
|
||||||
rt_list_for_each_safe(node, tmp_list, &(thread->taken_object_list))
|
rt_list_for_each_safe(node, tmp_list, &(thread->taken_object_list))
|
||||||
{
|
{
|
||||||
mutex = rt_list_entry(node, struct rt_mutex, taken_list);
|
mutex = rt_list_entry(node, struct rt_mutex, taken_list);
|
||||||
rt_mutex_release(mutex);
|
rt_mutex_release(mutex);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
rt_spin_unlock_irqrestore(&thread->spinlock, level);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#else
|
||||||
|
|
||||||
|
static void _thread_detach_from_mutex(rt_thread_t thread) {}
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
static rt_err_t _thread_init(struct rt_thread *thread,
|
static rt_err_t _thread_init(struct rt_thread *thread,
|
||||||
|
@ -166,16 +190,14 @@ static rt_err_t _thread_init(struct rt_thread *thread,
|
||||||
{
|
{
|
||||||
RT_UNUSED(name);
|
RT_UNUSED(name);
|
||||||
|
|
||||||
/* init thread list */
|
rt_sched_thread_init_ctx(thread, tick, priority);
|
||||||
rt_list_init(&(thread->tlist));
|
|
||||||
rt_list_init(&(thread->tlist_schedule));
|
|
||||||
|
|
||||||
#ifdef RT_USING_MEM_PROTECTION
|
#ifdef RT_USING_MEM_PROTECTION
|
||||||
thread->mem_regions = RT_NULL;
|
thread->mem_regions = RT_NULL;
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
#ifdef RT_USING_SMART
|
#ifdef RT_USING_SMART
|
||||||
thread->wakeup.func = RT_NULL;
|
thread->wakeup_handle.func = RT_NULL;
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
thread->entry = (void *)entry;
|
thread->entry = (void *)entry;
|
||||||
|
@ -200,13 +222,6 @@ static rt_err_t _thread_init(struct rt_thread *thread,
|
||||||
(void *)_thread_exit);
|
(void *)_thread_exit);
|
||||||
#endif /* ARCH_CPU_STACK_GROWS_UPWARD */
|
#endif /* ARCH_CPU_STACK_GROWS_UPWARD */
|
||||||
|
|
||||||
/* priority init */
|
|
||||||
RT_ASSERT(priority < RT_THREAD_PRIORITY_MAX);
|
|
||||||
thread->init_priority = priority;
|
|
||||||
thread->current_priority = priority;
|
|
||||||
|
|
||||||
thread->number_mask = 0;
|
|
||||||
|
|
||||||
#ifdef RT_USING_MUTEX
|
#ifdef RT_USING_MUTEX
|
||||||
rt_list_init(&thread->taken_object_list);
|
rt_list_init(&thread->taken_object_list);
|
||||||
thread->pending_object = RT_NULL;
|
thread->pending_object = RT_NULL;
|
||||||
|
@ -217,28 +232,13 @@ static rt_err_t _thread_init(struct rt_thread *thread,
|
||||||
thread->event_info = 0;
|
thread->event_info = 0;
|
||||||
#endif /* RT_USING_EVENT */
|
#endif /* RT_USING_EVENT */
|
||||||
|
|
||||||
#if RT_THREAD_PRIORITY_MAX > 32
|
|
||||||
thread->number = 0;
|
|
||||||
thread->high_mask = 0;
|
|
||||||
#endif /* RT_THREAD_PRIORITY_MAX > 32 */
|
|
||||||
|
|
||||||
/* tick init */
|
|
||||||
rt_atomic_store(&thread->init_tick, tick);
|
|
||||||
rt_atomic_store(&thread->remaining_tick, tick);
|
|
||||||
|
|
||||||
/* error and flags */
|
/* error and flags */
|
||||||
thread->error = RT_EOK;
|
thread->error = RT_EOK;
|
||||||
thread->stat = RT_THREAD_INIT;
|
|
||||||
|
|
||||||
#ifdef RT_USING_SMP
|
|
||||||
/* not bind on any cpu */
|
|
||||||
thread->bind_cpu = RT_CPUS_NR;
|
|
||||||
thread->oncpu = RT_CPU_DETACHED;
|
|
||||||
|
|
||||||
/* lock init */
|
/* lock init */
|
||||||
|
#ifdef RT_USING_SMP
|
||||||
rt_atomic_store(&thread->cpus_lock_nest, 0);
|
rt_atomic_store(&thread->cpus_lock_nest, 0);
|
||||||
rt_atomic_store(&thread->critical_lock_nest, 0);
|
#endif
|
||||||
#endif /* RT_USING_SMP */
|
|
||||||
|
|
||||||
/* initialize cleanup function and user data */
|
/* initialize cleanup function and user data */
|
||||||
thread->cleanup = 0;
|
thread->cleanup = 0;
|
||||||
|
@ -250,7 +250,7 @@ static rt_err_t _thread_init(struct rt_thread *thread,
|
||||||
_thread_timeout,
|
_thread_timeout,
|
||||||
thread,
|
thread,
|
||||||
0,
|
0,
|
||||||
RT_TIMER_FLAG_ONE_SHOT);
|
RT_TIMER_FLAG_ONE_SHOT | RT_TIMER_FLAG_THREAD_TIMER);
|
||||||
|
|
||||||
/* initialize signal */
|
/* initialize signal */
|
||||||
#ifdef RT_USING_SIGNALS
|
#ifdef RT_USING_SIGNALS
|
||||||
|
@ -268,6 +268,7 @@ static rt_err_t _thread_init(struct rt_thread *thread,
|
||||||
thread->tid_ref_count = 0;
|
thread->tid_ref_count = 0;
|
||||||
thread->lwp = RT_NULL;
|
thread->lwp = RT_NULL;
|
||||||
thread->susp_recycler = RT_NULL;
|
thread->susp_recycler = RT_NULL;
|
||||||
|
thread->robust_list = RT_NULL;
|
||||||
rt_list_init(&(thread->sibling));
|
rt_list_init(&(thread->sibling));
|
||||||
|
|
||||||
/* lwp thread-signal init */
|
/* lwp thread-signal init */
|
||||||
|
@ -392,34 +393,24 @@ rt_err_t rt_thread_startup(rt_thread_t thread)
|
||||||
{
|
{
|
||||||
/* parameter check */
|
/* parameter check */
|
||||||
RT_ASSERT(thread != RT_NULL);
|
RT_ASSERT(thread != RT_NULL);
|
||||||
RT_ASSERT((thread->stat & RT_THREAD_STAT_MASK) == RT_THREAD_INIT);
|
RT_ASSERT((RT_SCHED_CTX(thread).stat & RT_THREAD_STAT_MASK) == RT_THREAD_INIT);
|
||||||
RT_ASSERT(rt_object_get_type((rt_object_t)thread) == RT_Object_Class_Thread);
|
RT_ASSERT(rt_object_get_type((rt_object_t)thread) == RT_Object_Class_Thread);
|
||||||
|
|
||||||
/* calculate priority attribute */
|
|
||||||
#if RT_THREAD_PRIORITY_MAX > 32
|
|
||||||
thread->number = thread->current_priority >> 3; /* 5bit */
|
|
||||||
thread->number_mask = 1L << thread->number;
|
|
||||||
thread->high_mask = 1L << (thread->current_priority & 0x07); /* 3bit */
|
|
||||||
#else
|
|
||||||
thread->number_mask = 1L << thread->current_priority;
|
|
||||||
#endif /* RT_THREAD_PRIORITY_MAX > 32 */
|
|
||||||
|
|
||||||
LOG_D("startup a thread:%s with priority:%d",
|
LOG_D("startup a thread:%s with priority:%d",
|
||||||
thread->parent.name, thread->current_priority);
|
thread->parent.name, thread->current_priority);
|
||||||
/* change thread stat */
|
|
||||||
thread->stat = RT_THREAD_SUSPEND;
|
/* calculate priority attribute and reset thread stat to suspend */
|
||||||
/* then resume it */
|
rt_sched_thread_startup(thread);
|
||||||
|
|
||||||
|
/* resume and do a schedule if scheduler is available */
|
||||||
rt_thread_resume(thread);
|
rt_thread_resume(thread);
|
||||||
if (rt_thread_self() != RT_NULL)
|
|
||||||
{
|
|
||||||
/* do a scheduling */
|
|
||||||
rt_schedule();
|
|
||||||
}
|
|
||||||
|
|
||||||
return RT_EOK;
|
return RT_EOK;
|
||||||
}
|
}
|
||||||
RTM_EXPORT(rt_thread_startup);
|
RTM_EXPORT(rt_thread_startup);
|
||||||
|
|
||||||
|
static rt_err_t _thread_detach(rt_thread_t thread);
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* @brief This function will detach a thread. The thread object will be removed from
|
* @brief This function will detach a thread. The thread object will be removed from
|
||||||
* thread queue and detached/deleted from the system object management.
|
* thread queue and detached/deleted from the system object management.
|
||||||
|
@ -431,52 +422,68 @@ RTM_EXPORT(rt_thread_startup);
|
||||||
*/
|
*/
|
||||||
rt_err_t rt_thread_detach(rt_thread_t thread)
|
rt_err_t rt_thread_detach(rt_thread_t thread)
|
||||||
{
|
{
|
||||||
rt_base_t level;
|
|
||||||
|
|
||||||
/* parameter check */
|
/* parameter check */
|
||||||
RT_ASSERT(thread != RT_NULL);
|
RT_ASSERT(thread != RT_NULL);
|
||||||
RT_ASSERT(rt_object_get_type((rt_object_t)thread) == RT_Object_Class_Thread);
|
RT_ASSERT(rt_object_get_type((rt_object_t)thread) == RT_Object_Class_Thread);
|
||||||
RT_ASSERT(rt_object_is_systemobject((rt_object_t)thread));
|
RT_ASSERT(rt_object_is_systemobject((rt_object_t)thread));
|
||||||
|
|
||||||
if ((thread->stat & RT_THREAD_STAT_MASK) == RT_THREAD_CLOSE)
|
return _thread_detach(thread);
|
||||||
return RT_EOK;
|
}
|
||||||
|
RTM_EXPORT(rt_thread_detach);
|
||||||
|
|
||||||
rt_enter_critical();
|
static rt_err_t _thread_detach(rt_thread_t thread)
|
||||||
|
{
|
||||||
|
rt_err_t error;
|
||||||
|
rt_sched_lock_level_t slvl;
|
||||||
|
rt_uint8_t thread_status;
|
||||||
|
rt_base_t critical_level;
|
||||||
|
|
||||||
if ((thread->stat & RT_THREAD_STAT_MASK) != RT_THREAD_INIT)
|
/**
|
||||||
|
* forbid scheduling on current core before returning since current thread
|
||||||
|
* may be detached from scheduler.
|
||||||
|
*/
|
||||||
|
critical_level = rt_enter_critical();
|
||||||
|
|
||||||
|
/* before checking status of scheduler */
|
||||||
|
rt_sched_lock(&slvl);
|
||||||
|
|
||||||
|
/* check if thread is already closed */
|
||||||
|
thread_status = rt_sched_thread_get_stat(thread);
|
||||||
|
if (thread_status != RT_THREAD_CLOSE)
|
||||||
|
{
|
||||||
|
if (thread_status != RT_THREAD_INIT)
|
||||||
{
|
{
|
||||||
/* remove from schedule */
|
/* remove from schedule */
|
||||||
rt_schedule_remove_thread(thread);
|
rt_sched_remove_thread(thread);
|
||||||
}
|
}
|
||||||
|
|
||||||
/* disable interrupt */
|
|
||||||
level = rt_spin_lock_irqsave(&(thread->spinlock));
|
|
||||||
|
|
||||||
/* release thread timer */
|
/* release thread timer */
|
||||||
rt_timer_detach(&(thread->thread_timer));
|
rt_timer_detach(&(thread->thread_timer));
|
||||||
|
|
||||||
/* change stat */
|
/* change stat */
|
||||||
thread->stat = RT_THREAD_CLOSE;
|
rt_sched_thread_close(thread);
|
||||||
|
|
||||||
#ifdef RT_USING_MUTEX
|
/* scheduler works are done */
|
||||||
_free_owned_mutex(thread);
|
rt_sched_unlock(slvl);
|
||||||
if ((thread->pending_object) &&
|
|
||||||
(rt_object_get_type(thread->pending_object) == RT_Object_Class_Mutex))
|
_thread_detach_from_mutex(thread);
|
||||||
{
|
|
||||||
struct rt_mutex *mutex = (struct rt_mutex*)thread->pending_object;
|
|
||||||
rt_mutex_drop_thread(mutex, thread);
|
|
||||||
thread->pending_object = RT_NULL;
|
|
||||||
}
|
|
||||||
#endif
|
|
||||||
rt_spin_unlock_irqrestore(&(thread->spinlock), level);
|
|
||||||
|
|
||||||
/* insert to defunct thread list */
|
/* insert to defunct thread list */
|
||||||
rt_thread_defunct_enqueue(thread);
|
rt_thread_defunct_enqueue(thread);
|
||||||
|
|
||||||
rt_exit_critical();
|
error = RT_EOK;
|
||||||
return RT_EOK;
|
}
|
||||||
|
else
|
||||||
|
{
|
||||||
|
rt_sched_unlock(slvl);
|
||||||
|
|
||||||
|
/* already closed */
|
||||||
|
error = RT_EOK;
|
||||||
|
}
|
||||||
|
|
||||||
|
rt_exit_critical_safe(critical_level);
|
||||||
|
return error;
|
||||||
}
|
}
|
||||||
RTM_EXPORT(rt_thread_detach);
|
|
||||||
|
|
||||||
#ifdef RT_USING_HEAP
|
#ifdef RT_USING_HEAP
|
||||||
/**
|
/**
|
||||||
|
@ -546,47 +553,12 @@ RTM_EXPORT(rt_thread_create);
|
||||||
*/
|
*/
|
||||||
rt_err_t rt_thread_delete(rt_thread_t thread)
|
rt_err_t rt_thread_delete(rt_thread_t thread)
|
||||||
{
|
{
|
||||||
rt_base_t level;
|
|
||||||
|
|
||||||
/* parameter check */
|
/* parameter check */
|
||||||
RT_ASSERT(thread != RT_NULL);
|
RT_ASSERT(thread != RT_NULL);
|
||||||
RT_ASSERT(rt_object_get_type((rt_object_t)thread) == RT_Object_Class_Thread);
|
RT_ASSERT(rt_object_get_type((rt_object_t)thread) == RT_Object_Class_Thread);
|
||||||
RT_ASSERT(rt_object_is_systemobject((rt_object_t)thread) == RT_FALSE);
|
RT_ASSERT(rt_object_is_systemobject((rt_object_t)thread) == RT_FALSE);
|
||||||
|
|
||||||
if ((thread->stat & RT_THREAD_STAT_MASK) == RT_THREAD_CLOSE)
|
return _thread_detach(thread);
|
||||||
return RT_EOK;
|
|
||||||
|
|
||||||
rt_enter_critical();
|
|
||||||
|
|
||||||
if ((thread->stat & RT_THREAD_STAT_MASK) != RT_THREAD_INIT)
|
|
||||||
{
|
|
||||||
/* remove from schedule */
|
|
||||||
rt_schedule_remove_thread(thread);
|
|
||||||
}
|
|
||||||
level = rt_spin_lock_irqsave(&(thread->spinlock));
|
|
||||||
|
|
||||||
/* release thread timer */
|
|
||||||
rt_timer_detach(&(thread->thread_timer));
|
|
||||||
|
|
||||||
/* change stat */
|
|
||||||
thread->stat = RT_THREAD_CLOSE;
|
|
||||||
|
|
||||||
#ifdef RT_USING_MUTEX
|
|
||||||
_free_owned_mutex(thread);
|
|
||||||
if ((thread->pending_object) &&
|
|
||||||
(rt_object_get_type(thread->pending_object) == RT_Object_Class_Mutex))
|
|
||||||
{
|
|
||||||
struct rt_mutex *mutex = (struct rt_mutex*)thread->pending_object;
|
|
||||||
rt_mutex_drop_thread(mutex, thread);
|
|
||||||
thread->pending_object = RT_NULL;
|
|
||||||
}
|
|
||||||
#endif
|
|
||||||
rt_spin_unlock_irqrestore(&(thread->spinlock), level);
|
|
||||||
/* insert to defunct thread list */
|
|
||||||
rt_thread_defunct_enqueue(thread);
|
|
||||||
|
|
||||||
rt_exit_critical();
|
|
||||||
return RT_EOK;
|
|
||||||
}
|
}
|
||||||
RTM_EXPORT(rt_thread_delete);
|
RTM_EXPORT(rt_thread_delete);
|
||||||
#endif /* RT_USING_HEAP */
|
#endif /* RT_USING_HEAP */
|
||||||
|
@ -601,15 +573,12 @@ RTM_EXPORT(rt_thread_delete);
|
||||||
*/
|
*/
|
||||||
rt_err_t rt_thread_yield(void)
|
rt_err_t rt_thread_yield(void)
|
||||||
{
|
{
|
||||||
struct rt_thread *thread;
|
rt_sched_lock_level_t slvl;
|
||||||
rt_base_t level;
|
rt_sched_lock(&slvl);
|
||||||
|
|
||||||
thread = rt_thread_self();
|
rt_sched_thread_yield(rt_thread_self());
|
||||||
level = rt_spin_lock_irqsave(&(thread->spinlock));
|
|
||||||
rt_atomic_store(&thread->remaining_tick, thread->init_tick);
|
rt_sched_unlock_n_resched(slvl);
|
||||||
thread->stat |= RT_THREAD_STAT_YIELD;
|
|
||||||
rt_spin_unlock_irqrestore(&(thread->spinlock), level);
|
|
||||||
rt_schedule();
|
|
||||||
|
|
||||||
return RT_EOK;
|
return RT_EOK;
|
||||||
}
|
}
|
||||||
|
@ -626,8 +595,8 @@ RTM_EXPORT(rt_thread_yield);
|
||||||
*/
|
*/
|
||||||
static rt_err_t _thread_sleep(rt_tick_t tick)
|
static rt_err_t _thread_sleep(rt_tick_t tick)
|
||||||
{
|
{
|
||||||
rt_base_t level;
|
|
||||||
struct rt_thread *thread;
|
struct rt_thread *thread;
|
||||||
|
rt_base_t critical_level;
|
||||||
int err;
|
int err;
|
||||||
|
|
||||||
if (tick == 0)
|
if (tick == 0)
|
||||||
|
@ -642,37 +611,37 @@ static rt_err_t _thread_sleep(rt_tick_t tick)
|
||||||
|
|
||||||
/* current context checking */
|
/* current context checking */
|
||||||
RT_DEBUG_SCHEDULER_AVAILABLE(RT_TRUE);
|
RT_DEBUG_SCHEDULER_AVAILABLE(RT_TRUE);
|
||||||
|
|
||||||
/* reset thread error */
|
/* reset thread error */
|
||||||
thread->error = RT_EOK;
|
thread->error = RT_EOK;
|
||||||
level = rt_hw_local_irq_disable();
|
|
||||||
|
/* lock scheduler since current thread may be suspended */
|
||||||
|
critical_level = rt_enter_critical();
|
||||||
|
|
||||||
/* suspend thread */
|
/* suspend thread */
|
||||||
rt_enter_critical();
|
|
||||||
err = rt_thread_suspend_with_flag(thread, RT_INTERRUPTIBLE);
|
err = rt_thread_suspend_with_flag(thread, RT_INTERRUPTIBLE);
|
||||||
rt_spin_lock(&(thread->spinlock));
|
|
||||||
/* reset the timeout of thread timer and start it */
|
/* reset the timeout of thread timer and start it */
|
||||||
if (err == RT_EOK)
|
if (err == RT_EOK)
|
||||||
{
|
{
|
||||||
rt_timer_control(&(thread->thread_timer), RT_TIMER_CTRL_SET_TIME, &tick);
|
rt_timer_control(&(thread->thread_timer), RT_TIMER_CTRL_SET_TIME, &tick);
|
||||||
rt_timer_start(&(thread->thread_timer));
|
rt_timer_start(&(thread->thread_timer));
|
||||||
|
|
||||||
/* enable interrupt */
|
|
||||||
rt_spin_unlock(&(thread->spinlock));
|
|
||||||
rt_hw_local_irq_enable(level);
|
|
||||||
rt_exit_critical();
|
|
||||||
|
|
||||||
thread->error = -RT_EINTR;
|
thread->error = -RT_EINTR;
|
||||||
|
|
||||||
|
/* notify a pending rescheduling */
|
||||||
rt_schedule();
|
rt_schedule();
|
||||||
|
|
||||||
|
/* exit critical and do a rescheduling */
|
||||||
|
rt_exit_critical_safe(critical_level);
|
||||||
|
|
||||||
/* clear error number of this thread to RT_EOK */
|
/* clear error number of this thread to RT_EOK */
|
||||||
if (thread->error == -RT_ETIMEOUT)
|
if (thread->error == -RT_ETIMEOUT)
|
||||||
thread->error = RT_EOK;
|
thread->error = RT_EOK;
|
||||||
}
|
}
|
||||||
else
|
else
|
||||||
{
|
{
|
||||||
rt_spin_unlock(&(thread->spinlock));
|
rt_exit_critical_safe(critical_level);
|
||||||
rt_hw_local_irq_enable(level);
|
|
||||||
rt_exit_critical();
|
|
||||||
}
|
}
|
||||||
|
|
||||||
return err;
|
return err;
|
||||||
|
@ -704,9 +673,9 @@ RTM_EXPORT(rt_thread_delay);
|
||||||
*/
|
*/
|
||||||
rt_err_t rt_thread_delay_until(rt_tick_t *tick, rt_tick_t inc_tick)
|
rt_err_t rt_thread_delay_until(rt_tick_t *tick, rt_tick_t inc_tick)
|
||||||
{
|
{
|
||||||
rt_base_t level;
|
|
||||||
struct rt_thread *thread;
|
struct rt_thread *thread;
|
||||||
rt_tick_t cur_tick;
|
rt_tick_t cur_tick;
|
||||||
|
rt_base_t critical_level;
|
||||||
|
|
||||||
RT_ASSERT(tick != RT_NULL);
|
RT_ASSERT(tick != RT_NULL);
|
||||||
|
|
||||||
|
@ -719,7 +688,7 @@ rt_err_t rt_thread_delay_until(rt_tick_t *tick, rt_tick_t inc_tick)
|
||||||
thread->error = RT_EOK;
|
thread->error = RT_EOK;
|
||||||
|
|
||||||
/* disable interrupt */
|
/* disable interrupt */
|
||||||
level = rt_hw_local_irq_disable();
|
critical_level = rt_enter_critical();
|
||||||
|
|
||||||
cur_tick = rt_tick_get();
|
cur_tick = rt_tick_get();
|
||||||
if (cur_tick - *tick < inc_tick)
|
if (cur_tick - *tick < inc_tick)
|
||||||
|
@ -729,19 +698,14 @@ rt_err_t rt_thread_delay_until(rt_tick_t *tick, rt_tick_t inc_tick)
|
||||||
*tick += inc_tick;
|
*tick += inc_tick;
|
||||||
left_tick = *tick - cur_tick;
|
left_tick = *tick - cur_tick;
|
||||||
|
|
||||||
rt_enter_critical();
|
|
||||||
/* suspend thread */
|
/* suspend thread */
|
||||||
rt_thread_suspend_with_flag(thread, RT_UNINTERRUPTIBLE);
|
rt_thread_suspend_with_flag(thread, RT_UNINTERRUPTIBLE);
|
||||||
|
|
||||||
rt_spin_lock(&(thread->spinlock));
|
|
||||||
|
|
||||||
/* reset the timeout of thread timer and start it */
|
/* reset the timeout of thread timer and start it */
|
||||||
rt_timer_control(&(thread->thread_timer), RT_TIMER_CTRL_SET_TIME, &left_tick);
|
rt_timer_control(&(thread->thread_timer), RT_TIMER_CTRL_SET_TIME, &left_tick);
|
||||||
rt_timer_start(&(thread->thread_timer));
|
rt_timer_start(&(thread->thread_timer));
|
||||||
|
|
||||||
rt_spin_unlock(&(thread->spinlock));
|
rt_exit_critical_safe(critical_level);
|
||||||
rt_hw_local_irq_enable(level);
|
|
||||||
rt_exit_critical();
|
|
||||||
|
|
||||||
rt_schedule();
|
rt_schedule();
|
||||||
|
|
||||||
|
@ -754,7 +718,7 @@ rt_err_t rt_thread_delay_until(rt_tick_t *tick, rt_tick_t inc_tick)
|
||||||
else
|
else
|
||||||
{
|
{
|
||||||
*tick = cur_tick;
|
*tick = cur_tick;
|
||||||
rt_hw_local_irq_enable(level);
|
rt_exit_critical_safe(critical_level);
|
||||||
}
|
}
|
||||||
|
|
||||||
return thread->error;
|
return thread->error;
|
||||||
|
@ -780,65 +744,6 @@ rt_err_t rt_thread_mdelay(rt_int32_t ms)
|
||||||
RTM_EXPORT(rt_thread_mdelay);
|
RTM_EXPORT(rt_thread_mdelay);
|
||||||
|
|
||||||
#ifdef RT_USING_SMP
|
#ifdef RT_USING_SMP
|
||||||
static void rt_thread_cpu_bind(rt_thread_t thread, int cpu)
|
|
||||||
{
|
|
||||||
rt_base_t level;
|
|
||||||
|
|
||||||
if (cpu >= RT_CPUS_NR)
|
|
||||||
{
|
|
||||||
cpu = RT_CPUS_NR;
|
|
||||||
}
|
|
||||||
|
|
||||||
if ((thread->stat & RT_THREAD_STAT_MASK) == RT_THREAD_READY)
|
|
||||||
{
|
|
||||||
/* unbind */
|
|
||||||
/* remove from old ready queue */
|
|
||||||
rt_schedule_remove_thread(thread);
|
|
||||||
/* change thread bind cpu */
|
|
||||||
thread->bind_cpu = cpu;
|
|
||||||
/* add to new ready queue */
|
|
||||||
rt_schedule_insert_thread(thread);
|
|
||||||
if (rt_thread_self() != RT_NULL)
|
|
||||||
{
|
|
||||||
rt_schedule();
|
|
||||||
}
|
|
||||||
}
|
|
||||||
else
|
|
||||||
{
|
|
||||||
level = rt_spin_lock_irqsave(&(thread->spinlock));
|
|
||||||
thread->bind_cpu = cpu;
|
|
||||||
if ((thread->stat & RT_THREAD_STAT_MASK) == RT_THREAD_RUNNING)
|
|
||||||
{
|
|
||||||
/* thread is running on a cpu */
|
|
||||||
int current_cpu = rt_hw_cpu_id();
|
|
||||||
|
|
||||||
if (cpu != RT_CPUS_NR)
|
|
||||||
{
|
|
||||||
if (thread->oncpu == current_cpu)
|
|
||||||
{
|
|
||||||
/* current thread on current cpu */
|
|
||||||
if (cpu != current_cpu)
|
|
||||||
{
|
|
||||||
/* bind to other cpu */
|
|
||||||
rt_hw_ipi_send(RT_SCHEDULE_IPI, 1U << cpu);
|
|
||||||
/* self cpu need reschedule */
|
|
||||||
rt_spin_unlock_irqrestore(&(thread->spinlock), level);
|
|
||||||
rt_schedule();
|
|
||||||
level = rt_spin_lock_irqsave(&(thread->spinlock));
|
|
||||||
}
|
|
||||||
/* else do nothing */
|
|
||||||
}
|
|
||||||
else
|
|
||||||
{
|
|
||||||
/* no running on self cpu, but dest cpu can be itself */
|
|
||||||
rt_hw_ipi_send(RT_SCHEDULE_IPI, 1U << thread->oncpu);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
/* else do nothing */
|
|
||||||
}
|
|
||||||
rt_spin_unlock_irqrestore(&(thread->spinlock), level);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
/**
|
/**
|
||||||
|
@ -863,8 +768,6 @@ static void rt_thread_cpu_bind(rt_thread_t thread, int cpu)
|
||||||
*/
|
*/
|
||||||
rt_err_t rt_thread_control(rt_thread_t thread, int cmd, void *arg)
|
rt_err_t rt_thread_control(rt_thread_t thread, int cmd, void *arg)
|
||||||
{
|
{
|
||||||
rt_base_t level;
|
|
||||||
|
|
||||||
/* parameter check */
|
/* parameter check */
|
||||||
RT_ASSERT(thread != RT_NULL);
|
RT_ASSERT(thread != RT_NULL);
|
||||||
RT_ASSERT(rt_object_get_type((rt_object_t)thread) == RT_Object_Class_Thread);
|
RT_ASSERT(rt_object_get_type((rt_object_t)thread) == RT_Object_Class_Thread);
|
||||||
|
@ -873,44 +776,12 @@ rt_err_t rt_thread_control(rt_thread_t thread, int cmd, void *arg)
|
||||||
{
|
{
|
||||||
case RT_THREAD_CTRL_CHANGE_PRIORITY:
|
case RT_THREAD_CTRL_CHANGE_PRIORITY:
|
||||||
{
|
{
|
||||||
/* for ready thread, change queue */
|
rt_err_t error;
|
||||||
if ((thread->stat & RT_THREAD_STAT_MASK) == RT_THREAD_READY)
|
rt_sched_lock_level_t slvl;
|
||||||
{
|
rt_sched_lock(&slvl);
|
||||||
/* remove thread from schedule queue first */
|
error = rt_sched_thread_change_priority(thread, *(rt_uint8_t *)arg);
|
||||||
rt_schedule_remove_thread(thread);
|
rt_sched_unlock(slvl);
|
||||||
level = rt_spin_lock_irqsave(&(thread->spinlock));
|
return error;
|
||||||
/* change thread priority */
|
|
||||||
thread->current_priority = *(rt_uint8_t *)arg;
|
|
||||||
|
|
||||||
/* recalculate priority attribute */
|
|
||||||
#if RT_THREAD_PRIORITY_MAX > 32
|
|
||||||
thread->number = thread->current_priority >> 3; /* 5bit */
|
|
||||||
thread->number_mask = 1 << thread->number;
|
|
||||||
thread->high_mask = 1 << (thread->current_priority & 0x07); /* 3bit */
|
|
||||||
#else
|
|
||||||
thread->number_mask = 1 << thread->current_priority;
|
|
||||||
#endif /* RT_THREAD_PRIORITY_MAX > 32 */
|
|
||||||
thread->stat = RT_THREAD_INIT;
|
|
||||||
rt_spin_unlock_irqrestore(&(thread->spinlock), level);
|
|
||||||
/* insert thread to schedule queue again */
|
|
||||||
rt_schedule_insert_thread(thread);
|
|
||||||
}
|
|
||||||
else
|
|
||||||
{
|
|
||||||
level = rt_spin_lock_irqsave(&(thread->spinlock));
|
|
||||||
thread->current_priority = *(rt_uint8_t *)arg;
|
|
||||||
|
|
||||||
/* recalculate priority attribute */
|
|
||||||
#if RT_THREAD_PRIORITY_MAX > 32
|
|
||||||
thread->number = thread->current_priority >> 3; /* 5bit */
|
|
||||||
thread->number_mask = 1 << thread->number;
|
|
||||||
thread->high_mask = 1 << (thread->current_priority & 0x07); /* 3bit */
|
|
||||||
#else
|
|
||||||
thread->number_mask = 1 << thread->current_priority;
|
|
||||||
#endif /* RT_THREAD_PRIORITY_MAX > 32 */
|
|
||||||
rt_spin_unlock_irqrestore(&(thread->spinlock), level);
|
|
||||||
}
|
|
||||||
break;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
case RT_THREAD_CTRL_STARTUP:
|
case RT_THREAD_CTRL_STARTUP:
|
||||||
|
@ -936,16 +807,14 @@ rt_err_t rt_thread_control(rt_thread_t thread, int cmd, void *arg)
|
||||||
return rt_err;
|
return rt_err;
|
||||||
}
|
}
|
||||||
|
|
||||||
#ifdef RT_USING_SMP
|
|
||||||
case RT_THREAD_CTRL_BIND_CPU:
|
case RT_THREAD_CTRL_BIND_CPU:
|
||||||
{
|
{
|
||||||
rt_uint8_t cpu;
|
rt_uint8_t cpu;
|
||||||
|
|
||||||
cpu = (rt_uint8_t)(size_t)arg;
|
cpu = (rt_uint8_t)(size_t)arg;
|
||||||
rt_thread_cpu_bind(thread, cpu);
|
return rt_sched_thread_bind_cpu(thread, cpu);
|
||||||
break;
|
|
||||||
}
|
}
|
||||||
#endif /*RT_USING_SMP*/
|
|
||||||
default:
|
default:
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
|
@ -958,7 +827,7 @@ RTM_EXPORT(rt_thread_control);
|
||||||
#include <lwp_signal.h>
|
#include <lwp_signal.h>
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
static void rt_thread_set_suspend_state(struct rt_thread *thread, int suspend_flag)
|
static void _thread_set_suspend_state(struct rt_thread *thread, int suspend_flag)
|
||||||
{
|
{
|
||||||
rt_uint8_t stat = RT_THREAD_SUSPEND_UNINTERRUPTIBLE;
|
rt_uint8_t stat = RT_THREAD_SUSPEND_UNINTERRUPTIBLE;
|
||||||
|
|
||||||
|
@ -978,9 +847,110 @@ static void rt_thread_set_suspend_state(struct rt_thread *thread, int suspend_fl
|
||||||
RT_ASSERT(0);
|
RT_ASSERT(0);
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
thread->stat = stat | (thread->stat & ~RT_THREAD_STAT_MASK);
|
RT_SCHED_CTX(thread).stat = stat | (RT_SCHED_CTX(thread).stat & ~RT_THREAD_STAT_MASK);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* @brief This function will suspend the specified thread and change it to suspend state.
|
||||||
|
*
|
||||||
|
* @note This function ONLY can suspend current thread itself.
|
||||||
|
* rt_thread_suspend(rt_thread_self());
|
||||||
|
*
|
||||||
|
* Do not use the rt_thread_suspend to suspend other threads. You have no way of knowing what code a
|
||||||
|
* thread is executing when you suspend it. If you suspend a thread while sharing a resouce with
|
||||||
|
* other threads and occupying this resouce, starvation can occur very easily.
|
||||||
|
*
|
||||||
|
* @param thread the thread to be suspended.
|
||||||
|
* @param susp_list the list thread enqueued to. RT_NULL if no list.
|
||||||
|
* @param ipc_flags is a flag for the thread object to be suspended. It determines how the thread is suspended.
|
||||||
|
* The flag can be ONE of the following values:
|
||||||
|
* RT_IPC_FLAG_PRIO The pending threads will queue in order of priority.
|
||||||
|
* RT_IPC_FLAG_FIFO The pending threads will queue in the first-in-first-out method
|
||||||
|
* (also known as first-come-first-served (FCFS) scheduling strategy).
|
||||||
|
* NOTE: RT_IPC_FLAG_FIFO is a non-real-time scheduling mode. It is strongly recommended to use
|
||||||
|
* RT_IPC_FLAG_PRIO to ensure the thread is real-time UNLESS your applications concern about
|
||||||
|
* the first-in-first-out principle, and you clearly understand that all threads involved in
|
||||||
|
* this semaphore will become non-real-time threads.
|
||||||
|
* @param suspend_flag status flag of the thread to be suspended.
|
||||||
|
*
|
||||||
|
* @return Return the operation status. If the return value is RT_EOK, the function is successfully executed.
|
||||||
|
* If the return value is any other values, it means this operation failed.
|
||||||
|
*/
|
||||||
|
rt_err_t rt_thread_suspend_to_list(rt_thread_t thread, rt_list_t *susp_list, int ipc_flags, int suspend_flag)
|
||||||
|
{
|
||||||
|
rt_base_t stat;
|
||||||
|
rt_sched_lock_level_t slvl;
|
||||||
|
|
||||||
|
/* parameter check */
|
||||||
|
RT_ASSERT(thread != RT_NULL);
|
||||||
|
RT_ASSERT(rt_object_get_type((rt_object_t)thread) == RT_Object_Class_Thread);
|
||||||
|
RT_ASSERT(thread == rt_thread_self());
|
||||||
|
|
||||||
|
LOG_D("thread suspend: %s", thread->parent.name);
|
||||||
|
|
||||||
|
rt_sched_lock(&slvl);
|
||||||
|
|
||||||
|
stat = rt_sched_thread_get_stat(thread);
|
||||||
|
if ((stat != RT_THREAD_READY) && (stat != RT_THREAD_RUNNING))
|
||||||
|
{
|
||||||
|
LOG_D("thread suspend: thread disorder, 0x%2x", thread->stat);
|
||||||
|
rt_sched_unlock(slvl);
|
||||||
|
return -RT_ERROR;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (stat == RT_THREAD_RUNNING)
|
||||||
|
{
|
||||||
|
/* not suspend running status thread on other core */
|
||||||
|
RT_ASSERT(thread == rt_thread_self());
|
||||||
|
}
|
||||||
|
|
||||||
|
#ifdef RT_USING_SMART
|
||||||
|
rt_sched_unlock(slvl);
|
||||||
|
|
||||||
|
/* check pending signals for thread before suspend */
|
||||||
|
if (lwp_thread_signal_suspend_check(thread, suspend_flag) == 0)
|
||||||
|
{
|
||||||
|
/* not to suspend */
|
||||||
|
return -RT_EINTR;
|
||||||
|
}
|
||||||
|
|
||||||
|
rt_sched_lock(&slvl);
|
||||||
|
if (stat == RT_THREAD_READY)
|
||||||
|
{
|
||||||
|
stat = rt_sched_thread_get_stat(thread);
|
||||||
|
|
||||||
|
if (stat != RT_THREAD_READY)
|
||||||
|
{
|
||||||
|
/* status updated while we check for signal */
|
||||||
|
rt_sched_unlock(slvl);
|
||||||
|
return -RT_ERROR;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
#endif
|
||||||
|
|
||||||
|
/* change thread stat */
|
||||||
|
rt_sched_remove_thread(thread);
|
||||||
|
_thread_set_suspend_state(thread, suspend_flag);
|
||||||
|
|
||||||
|
if (susp_list)
|
||||||
|
{
|
||||||
|
/**
|
||||||
|
* enqueue thread on the push list before leaving critical region of
|
||||||
|
* scheduler, so we won't miss notification of async events.
|
||||||
|
*/
|
||||||
|
rt_susp_list_enqueue(susp_list, thread, ipc_flags);
|
||||||
|
}
|
||||||
|
|
||||||
|
/* stop thread timer anyway */
|
||||||
|
rt_sched_thread_timer_stop(thread);
|
||||||
|
|
||||||
|
rt_sched_unlock(slvl);
|
||||||
|
|
||||||
|
RT_OBJECT_HOOK_CALL(rt_thread_suspend_hook, (thread));
|
||||||
|
return RT_EOK;
|
||||||
|
}
|
||||||
|
RTM_EXPORT(rt_thread_suspend_to_list);
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* @brief This function will suspend the specified thread and change it to suspend state.
|
* @brief This function will suspend the specified thread and change it to suspend state.
|
||||||
*
|
*
|
||||||
|
@ -999,52 +969,7 @@ static void rt_thread_set_suspend_state(struct rt_thread *thread, int suspend_fl
|
||||||
*/
|
*/
|
||||||
rt_err_t rt_thread_suspend_with_flag(rt_thread_t thread, int suspend_flag)
|
rt_err_t rt_thread_suspend_with_flag(rt_thread_t thread, int suspend_flag)
|
||||||
{
|
{
|
||||||
rt_base_t stat;
|
return rt_thread_suspend_to_list(thread, RT_NULL, 0, suspend_flag);
|
||||||
rt_base_t level;
|
|
||||||
|
|
||||||
/* parameter check */
|
|
||||||
RT_ASSERT(thread != RT_NULL);
|
|
||||||
RT_ASSERT(rt_object_get_type((rt_object_t)thread) == RT_Object_Class_Thread);
|
|
||||||
RT_ASSERT(thread == rt_thread_self());
|
|
||||||
|
|
||||||
LOG_D("thread suspend: %s", thread->parent.name);
|
|
||||||
|
|
||||||
level = rt_spin_lock_irqsave(&(thread->spinlock));
|
|
||||||
|
|
||||||
stat = thread->stat & RT_THREAD_STAT_MASK;
|
|
||||||
if ((stat != RT_THREAD_READY) && (stat != RT_THREAD_RUNNING))
|
|
||||||
{
|
|
||||||
LOG_D("thread suspend: thread disorder, 0x%2x", thread->stat);
|
|
||||||
rt_spin_unlock_irqrestore(&(thread->spinlock), level);
|
|
||||||
return -RT_ERROR;
|
|
||||||
}
|
|
||||||
|
|
||||||
if (stat == RT_THREAD_RUNNING)
|
|
||||||
{
|
|
||||||
/* not suspend running status thread on other core */
|
|
||||||
RT_ASSERT(thread == rt_thread_self());
|
|
||||||
}
|
|
||||||
#ifdef RT_USING_SMART
|
|
||||||
if (lwp_thread_signal_suspend_check(thread, suspend_flag) == 0)
|
|
||||||
{
|
|
||||||
/* not to suspend */
|
|
||||||
rt_spin_unlock_irqrestore(&(thread->spinlock), level);
|
|
||||||
return -RT_EINTR;
|
|
||||||
}
|
|
||||||
#endif
|
|
||||||
|
|
||||||
rt_spin_unlock_irqrestore(&(thread->spinlock), level);
|
|
||||||
rt_schedule_remove_thread(thread);
|
|
||||||
level = rt_spin_lock_irqsave(&(thread->spinlock));
|
|
||||||
|
|
||||||
rt_thread_set_suspend_state(thread, suspend_flag);
|
|
||||||
|
|
||||||
/* stop thread timer anyway */
|
|
||||||
rt_timer_stop(&(thread->thread_timer));
|
|
||||||
rt_spin_unlock_irqrestore(&(thread->spinlock), level);
|
|
||||||
|
|
||||||
RT_OBJECT_HOOK_CALL(rt_thread_suspend_hook, (thread));
|
|
||||||
return RT_EOK;
|
|
||||||
}
|
}
|
||||||
RTM_EXPORT(rt_thread_suspend_with_flag);
|
RTM_EXPORT(rt_thread_suspend_with_flag);
|
||||||
|
|
||||||
|
@ -1064,7 +989,8 @@ RTM_EXPORT(rt_thread_suspend);
|
||||||
*/
|
*/
|
||||||
rt_err_t rt_thread_resume(rt_thread_t thread)
|
rt_err_t rt_thread_resume(rt_thread_t thread)
|
||||||
{
|
{
|
||||||
rt_base_t level;
|
rt_sched_lock_level_t slvl;
|
||||||
|
rt_err_t error;
|
||||||
|
|
||||||
/* parameter check */
|
/* parameter check */
|
||||||
RT_ASSERT(thread != RT_NULL);
|
RT_ASSERT(thread != RT_NULL);
|
||||||
|
@ -1072,33 +998,22 @@ rt_err_t rt_thread_resume(rt_thread_t thread)
|
||||||
|
|
||||||
LOG_D("thread resume: %s", thread->parent.name);
|
LOG_D("thread resume: %s", thread->parent.name);
|
||||||
|
|
||||||
level = rt_spin_lock_irqsave(&(thread->spinlock)); //TODO need lock for cpu
|
rt_sched_lock(&slvl);
|
||||||
|
|
||||||
if ((thread->stat & RT_THREAD_SUSPEND_MASK) != RT_THREAD_SUSPEND_MASK)
|
error = rt_sched_thread_ready(thread);
|
||||||
|
|
||||||
|
if (!error)
|
||||||
{
|
{
|
||||||
rt_spin_unlock_irqrestore(&(thread->spinlock), level);
|
error = rt_sched_unlock_n_resched(slvl);
|
||||||
|
}
|
||||||
LOG_D("thread resume: thread disorder, %d",
|
else
|
||||||
thread->stat);
|
{
|
||||||
|
rt_sched_unlock(slvl);
|
||||||
return -RT_ERROR;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/* remove from suspend list */
|
|
||||||
rt_list_remove(&(thread->tlist));
|
|
||||||
|
|
||||||
rt_timer_stop(&thread->thread_timer);
|
|
||||||
|
|
||||||
#ifdef RT_USING_SMART
|
|
||||||
thread->wakeup.func = RT_NULL;
|
|
||||||
#endif
|
|
||||||
|
|
||||||
rt_spin_unlock_irqrestore(&(thread->spinlock), level);
|
|
||||||
/* insert to schedule ready list */
|
|
||||||
rt_schedule_insert_thread(thread);
|
|
||||||
|
|
||||||
RT_OBJECT_HOOK_CALL(rt_thread_resume_hook, (thread));
|
RT_OBJECT_HOOK_CALL(rt_thread_resume_hook, (thread));
|
||||||
return RT_EOK;
|
|
||||||
|
return error;
|
||||||
}
|
}
|
||||||
RTM_EXPORT(rt_thread_resume);
|
RTM_EXPORT(rt_thread_resume);
|
||||||
|
|
||||||
|
@ -1112,19 +1027,21 @@ RTM_EXPORT(rt_thread_resume);
|
||||||
*/
|
*/
|
||||||
rt_err_t rt_thread_wakeup(rt_thread_t thread)
|
rt_err_t rt_thread_wakeup(rt_thread_t thread)
|
||||||
{
|
{
|
||||||
register rt_base_t temp;
|
rt_sched_lock_level_t slvl;
|
||||||
rt_err_t ret;
|
rt_err_t ret;
|
||||||
rt_wakeup_func_t func = RT_NULL;
|
rt_wakeup_func_t func = RT_NULL;
|
||||||
|
|
||||||
RT_ASSERT(thread != RT_NULL);
|
RT_ASSERT(thread != RT_NULL);
|
||||||
RT_ASSERT(rt_object_get_type((rt_object_t)thread) == RT_Object_Class_Thread);
|
RT_ASSERT(rt_object_get_type((rt_object_t)thread) == RT_Object_Class_Thread);
|
||||||
temp = rt_spin_lock_irqsave(&(thread->spinlock));
|
|
||||||
func = thread->wakeup.func;
|
rt_sched_lock(&slvl);
|
||||||
thread->wakeup.func = RT_NULL;
|
func = thread->wakeup_handle.func;
|
||||||
rt_spin_unlock_irqrestore(&(thread->spinlock), temp);
|
thread->wakeup_handle.func = RT_NULL;
|
||||||
|
rt_sched_unlock(slvl);
|
||||||
|
|
||||||
if (func)
|
if (func)
|
||||||
{
|
{
|
||||||
ret = func(thread->wakeup.user_data, thread);
|
ret = func(thread->wakeup_handle.user_data, thread);
|
||||||
}
|
}
|
||||||
else
|
else
|
||||||
{
|
{
|
||||||
|
@ -1136,15 +1053,15 @@ RTM_EXPORT(rt_thread_wakeup);
|
||||||
|
|
||||||
void rt_thread_wakeup_set(struct rt_thread *thread, rt_wakeup_func_t func, void* user_data)
|
void rt_thread_wakeup_set(struct rt_thread *thread, rt_wakeup_func_t func, void* user_data)
|
||||||
{
|
{
|
||||||
register rt_base_t temp;
|
rt_sched_lock_level_t slvl;
|
||||||
|
|
||||||
RT_ASSERT(thread != RT_NULL);
|
RT_ASSERT(thread != RT_NULL);
|
||||||
RT_ASSERT(rt_object_get_type((rt_object_t)thread) == RT_Object_Class_Thread);
|
RT_ASSERT(rt_object_get_type((rt_object_t)thread) == RT_Object_Class_Thread);
|
||||||
|
|
||||||
temp = rt_spin_lock_irqsave(&(thread->spinlock));
|
rt_sched_lock(&slvl);
|
||||||
thread->wakeup.func = func;
|
thread->wakeup_handle.func = func;
|
||||||
thread->wakeup.user_data = user_data;
|
thread->wakeup_handle.user_data = user_data;
|
||||||
rt_spin_unlock_irqrestore(&(thread->spinlock), temp);
|
rt_sched_unlock(slvl);
|
||||||
}
|
}
|
||||||
RTM_EXPORT(rt_thread_wakeup_set);
|
RTM_EXPORT(rt_thread_wakeup_set);
|
||||||
#endif
|
#endif
|
||||||
|
|
145
src/timer.c
145
src/timer.c
|
@ -20,6 +20,7 @@
|
||||||
* 2022-01-07 Gabriel Moving __on_rt_xxxxx_hook to timer.c
|
* 2022-01-07 Gabriel Moving __on_rt_xxxxx_hook to timer.c
|
||||||
* 2022-04-19 Stanley Correct descriptions
|
* 2022-04-19 Stanley Correct descriptions
|
||||||
* 2023-09-15 xqyjlj perf rt_hw_interrupt_disable/enable
|
* 2023-09-15 xqyjlj perf rt_hw_interrupt_disable/enable
|
||||||
|
* 2024-01-25 Shell add RT_TIMER_FLAG_THREAD_TIMER for timer to sync with sched
|
||||||
*/
|
*/
|
||||||
|
|
||||||
#include <rtthread.h>
|
#include <rtthread.h>
|
||||||
|
@ -31,7 +32,7 @@
|
||||||
|
|
||||||
/* hard timer list */
|
/* hard timer list */
|
||||||
static rt_list_t _timer_list[RT_TIMER_SKIP_LIST_LEVEL];
|
static rt_list_t _timer_list[RT_TIMER_SKIP_LIST_LEVEL];
|
||||||
static struct rt_spinlock _hard_spinlock;
|
static struct rt_spinlock _htimer_lock;
|
||||||
|
|
||||||
#ifdef RT_USING_TIMER_SOFT
|
#ifdef RT_USING_TIMER_SOFT
|
||||||
|
|
||||||
|
@ -50,7 +51,7 @@ static struct rt_spinlock _hard_spinlock;
|
||||||
static rt_uint8_t _soft_timer_status = RT_SOFT_TIMER_IDLE;
|
static rt_uint8_t _soft_timer_status = RT_SOFT_TIMER_IDLE;
|
||||||
/* soft timer list */
|
/* soft timer list */
|
||||||
static rt_list_t _soft_timer_list[RT_TIMER_SKIP_LIST_LEVEL];
|
static rt_list_t _soft_timer_list[RT_TIMER_SKIP_LIST_LEVEL];
|
||||||
static struct rt_spinlock _soft_spinlock;
|
static struct rt_spinlock _stimer_lock;
|
||||||
static struct rt_thread _timer_thread;
|
static struct rt_thread _timer_thread;
|
||||||
static struct rt_semaphore _soft_timer_sem;
|
static struct rt_semaphore _soft_timer_sem;
|
||||||
rt_align(RT_ALIGN_SIZE)
|
rt_align(RT_ALIGN_SIZE)
|
||||||
|
@ -94,6 +95,35 @@ void rt_timer_exit_sethook(void (*hook)(struct rt_timer *timer))
|
||||||
/**@}*/
|
/**@}*/
|
||||||
#endif /* RT_USING_HOOK */
|
#endif /* RT_USING_HOOK */
|
||||||
|
|
||||||
|
rt_inline struct rt_spinlock* _timerlock_idx(struct rt_timer *timer)
|
||||||
|
{
|
||||||
|
#ifdef RT_USING_TIMER_SOFT
|
||||||
|
if (timer->parent.flag & RT_TIMER_FLAG_SOFT_TIMER)
|
||||||
|
{
|
||||||
|
return &_stimer_lock;
|
||||||
|
}
|
||||||
|
else
|
||||||
|
#endif /* RT_USING_TIMER_SOFT */
|
||||||
|
{
|
||||||
|
return &_htimer_lock;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
rt_inline rt_list_t* _timerhead_idx(struct rt_timer *timer)
|
||||||
|
{
|
||||||
|
#ifdef RT_USING_TIMER_SOFT
|
||||||
|
if (timer->parent.flag & RT_TIMER_FLAG_SOFT_TIMER)
|
||||||
|
{
|
||||||
|
/* insert timer to soft timer list */
|
||||||
|
return _soft_timer_list;
|
||||||
|
}
|
||||||
|
else
|
||||||
|
#endif /* RT_USING_TIMER_SOFT */
|
||||||
|
{
|
||||||
|
/* insert timer to system timer list */
|
||||||
|
return _timer_list;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* @brief [internal] The init funtion of timer
|
* @brief [internal] The init funtion of timer
|
||||||
|
@ -280,17 +310,7 @@ rt_err_t rt_timer_detach(rt_timer_t timer)
|
||||||
RT_ASSERT(rt_object_get_type(&timer->parent) == RT_Object_Class_Timer);
|
RT_ASSERT(rt_object_get_type(&timer->parent) == RT_Object_Class_Timer);
|
||||||
RT_ASSERT(rt_object_is_systemobject(&timer->parent));
|
RT_ASSERT(rt_object_is_systemobject(&timer->parent));
|
||||||
|
|
||||||
#ifdef RT_USING_TIMER_SOFT
|
spinlock = _timerlock_idx(timer);
|
||||||
if (timer->parent.flag & RT_TIMER_FLAG_SOFT_TIMER)
|
|
||||||
{
|
|
||||||
spinlock = &_soft_spinlock;
|
|
||||||
}
|
|
||||||
else
|
|
||||||
#endif /* RT_USING_TIMER_SOFT */
|
|
||||||
{
|
|
||||||
spinlock = &_hard_spinlock;
|
|
||||||
}
|
|
||||||
|
|
||||||
level = rt_spin_lock_irqsave(spinlock);
|
level = rt_spin_lock_irqsave(spinlock);
|
||||||
|
|
||||||
_timer_remove(timer);
|
_timer_remove(timer);
|
||||||
|
@ -325,6 +345,7 @@ RTM_EXPORT(rt_timer_detach);
|
||||||
*
|
*
|
||||||
* RT_TIMER_FLAG_HARD_TIMER Hardware timer
|
* RT_TIMER_FLAG_HARD_TIMER Hardware timer
|
||||||
* RT_TIMER_FLAG_SOFT_TIMER Software timer
|
* RT_TIMER_FLAG_SOFT_TIMER Software timer
|
||||||
|
* RT_TIMER_FLAG_THREAD_TIMER Thread timer
|
||||||
*
|
*
|
||||||
* NOTE:
|
* NOTE:
|
||||||
* You can use multiple values with "|" logical operator. By default, system will use the RT_TIME_FLAG_HARD_TIMER.
|
* You can use multiple values with "|" logical operator. By default, system will use the RT_TIME_FLAG_HARD_TIMER.
|
||||||
|
@ -373,16 +394,7 @@ rt_err_t rt_timer_delete(rt_timer_t timer)
|
||||||
RT_ASSERT(rt_object_get_type(&timer->parent) == RT_Object_Class_Timer);
|
RT_ASSERT(rt_object_get_type(&timer->parent) == RT_Object_Class_Timer);
|
||||||
RT_ASSERT(rt_object_is_systemobject(&timer->parent) == RT_FALSE);
|
RT_ASSERT(rt_object_is_systemobject(&timer->parent) == RT_FALSE);
|
||||||
|
|
||||||
#ifdef RT_USING_TIMER_SOFT
|
spinlock = _timerlock_idx(timer);
|
||||||
if (timer->parent.flag & RT_TIMER_FLAG_SOFT_TIMER)
|
|
||||||
{
|
|
||||||
spinlock = &_soft_spinlock;
|
|
||||||
}
|
|
||||||
else
|
|
||||||
#endif /* RT_USING_TIMER_SOFT */
|
|
||||||
{
|
|
||||||
spinlock = &_hard_spinlock;
|
|
||||||
}
|
|
||||||
|
|
||||||
level = rt_spin_lock_irqsave(spinlock);
|
level = rt_spin_lock_irqsave(spinlock);
|
||||||
|
|
||||||
|
@ -485,6 +497,8 @@ static rt_err_t _timer_start(rt_list_t *timer_list, rt_timer_t timer)
|
||||||
*/
|
*/
|
||||||
rt_err_t rt_timer_start(rt_timer_t timer)
|
rt_err_t rt_timer_start(rt_timer_t timer)
|
||||||
{
|
{
|
||||||
|
rt_sched_lock_level_t slvl;
|
||||||
|
int is_thread_timer = 0;
|
||||||
struct rt_spinlock *spinlock;
|
struct rt_spinlock *spinlock;
|
||||||
rt_list_t *timer_list;
|
rt_list_t *timer_list;
|
||||||
rt_base_t level;
|
rt_base_t level;
|
||||||
|
@ -498,13 +512,24 @@ rt_err_t rt_timer_start(rt_timer_t timer)
|
||||||
if (timer->parent.flag & RT_TIMER_FLAG_SOFT_TIMER)
|
if (timer->parent.flag & RT_TIMER_FLAG_SOFT_TIMER)
|
||||||
{
|
{
|
||||||
timer_list = _soft_timer_list;
|
timer_list = _soft_timer_list;
|
||||||
spinlock = &_soft_spinlock;
|
spinlock = &_stimer_lock;
|
||||||
}
|
}
|
||||||
else
|
else
|
||||||
#endif /* RT_USING_TIMER_SOFT */
|
#endif /* RT_USING_TIMER_SOFT */
|
||||||
{
|
{
|
||||||
timer_list = _timer_list;
|
timer_list = _timer_list;
|
||||||
spinlock = &_hard_spinlock;
|
spinlock = &_htimer_lock;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (timer->parent.flag & RT_TIMER_FLAG_THREAD_TIMER)
|
||||||
|
{
|
||||||
|
rt_thread_t thread;
|
||||||
|
is_thread_timer = 1;
|
||||||
|
rt_sched_lock(&slvl);
|
||||||
|
|
||||||
|
thread = rt_container_of(timer, struct rt_thread, thread_timer);
|
||||||
|
RT_ASSERT(rt_object_get_type(&thread->parent) == RT_Object_Class_Thread);
|
||||||
|
rt_sched_thread_timer_start(thread);
|
||||||
}
|
}
|
||||||
|
|
||||||
level = rt_spin_lock_irqsave(spinlock);
|
level = rt_spin_lock_irqsave(spinlock);
|
||||||
|
@ -512,17 +537,19 @@ rt_err_t rt_timer_start(rt_timer_t timer)
|
||||||
err = _timer_start(timer_list, timer);
|
err = _timer_start(timer_list, timer);
|
||||||
|
|
||||||
#ifdef RT_USING_TIMER_SOFT
|
#ifdef RT_USING_TIMER_SOFT
|
||||||
if (err == RT_EOK)
|
if (err == RT_EOK && (timer->parent.flag & RT_TIMER_FLAG_SOFT_TIMER))
|
||||||
{
|
|
||||||
if (timer->parent.flag & RT_TIMER_FLAG_SOFT_TIMER)
|
|
||||||
{
|
{
|
||||||
rt_sem_release(&_soft_timer_sem);
|
rt_sem_release(&_soft_timer_sem);
|
||||||
}
|
}
|
||||||
}
|
|
||||||
#endif /* RT_USING_TIMER_SOFT */
|
#endif /* RT_USING_TIMER_SOFT */
|
||||||
|
|
||||||
rt_spin_unlock_irqrestore(spinlock, level);
|
rt_spin_unlock_irqrestore(spinlock, level);
|
||||||
|
|
||||||
|
if (is_thread_timer)
|
||||||
|
{
|
||||||
|
rt_sched_unlock(slvl);
|
||||||
|
}
|
||||||
|
|
||||||
return err;
|
return err;
|
||||||
}
|
}
|
||||||
RTM_EXPORT(rt_timer_start);
|
RTM_EXPORT(rt_timer_start);
|
||||||
|
@ -543,16 +570,8 @@ rt_err_t rt_timer_stop(rt_timer_t timer)
|
||||||
RT_ASSERT(timer != RT_NULL);
|
RT_ASSERT(timer != RT_NULL);
|
||||||
RT_ASSERT(rt_object_get_type(&timer->parent) == RT_Object_Class_Timer);
|
RT_ASSERT(rt_object_get_type(&timer->parent) == RT_Object_Class_Timer);
|
||||||
|
|
||||||
#ifdef RT_USING_TIMER_SOFT
|
spinlock = _timerlock_idx(timer);
|
||||||
if (timer->parent.flag & RT_TIMER_FLAG_SOFT_TIMER)
|
|
||||||
{
|
|
||||||
spinlock = &_soft_spinlock;
|
|
||||||
}
|
|
||||||
else
|
|
||||||
#endif /* RT_USING_TIMER_SOFT */
|
|
||||||
{
|
|
||||||
spinlock = &_hard_spinlock;
|
|
||||||
}
|
|
||||||
level = rt_spin_lock_irqsave(spinlock);
|
level = rt_spin_lock_irqsave(spinlock);
|
||||||
|
|
||||||
if (!(timer->parent.flag & RT_TIMER_FLAG_ACTIVATED))
|
if (!(timer->parent.flag & RT_TIMER_FLAG_ACTIVATED))
|
||||||
|
@ -565,6 +584,7 @@ rt_err_t rt_timer_stop(rt_timer_t timer)
|
||||||
_timer_remove(timer);
|
_timer_remove(timer);
|
||||||
/* change status */
|
/* change status */
|
||||||
timer->parent.flag &= ~RT_TIMER_FLAG_ACTIVATED;
|
timer->parent.flag &= ~RT_TIMER_FLAG_ACTIVATED;
|
||||||
|
|
||||||
rt_spin_unlock_irqrestore(spinlock, level);
|
rt_spin_unlock_irqrestore(spinlock, level);
|
||||||
|
|
||||||
return RT_EOK;
|
return RT_EOK;
|
||||||
|
@ -582,10 +602,16 @@ RTM_EXPORT(rt_timer_stop);
|
||||||
*/
|
*/
|
||||||
rt_err_t rt_timer_control(rt_timer_t timer, int cmd, void *arg)
|
rt_err_t rt_timer_control(rt_timer_t timer, int cmd, void *arg)
|
||||||
{
|
{
|
||||||
|
struct rt_spinlock *spinlock;
|
||||||
|
rt_base_t level;
|
||||||
|
|
||||||
/* parameter check */
|
/* parameter check */
|
||||||
RT_ASSERT(timer != RT_NULL);
|
RT_ASSERT(timer != RT_NULL);
|
||||||
RT_ASSERT(rt_object_get_type(&timer->parent) == RT_Object_Class_Timer);
|
RT_ASSERT(rt_object_get_type(&timer->parent) == RT_Object_Class_Timer);
|
||||||
|
|
||||||
|
spinlock = _timerlock_idx(timer);
|
||||||
|
|
||||||
|
level = rt_spin_lock_irqsave(spinlock);
|
||||||
switch (cmd)
|
switch (cmd)
|
||||||
{
|
{
|
||||||
case RT_TIMER_CTRL_GET_TIME:
|
case RT_TIMER_CTRL_GET_TIME:
|
||||||
|
@ -640,6 +666,7 @@ rt_err_t rt_timer_control(rt_timer_t timer, int cmd, void *arg)
|
||||||
default:
|
default:
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
|
rt_spin_unlock_irqrestore(spinlock, level);
|
||||||
|
|
||||||
return RT_EOK;
|
return RT_EOK;
|
||||||
}
|
}
|
||||||
|
@ -660,21 +687,23 @@ void rt_timer_check(void)
|
||||||
|
|
||||||
RT_ASSERT(rt_interrupt_get_nest() > 0);
|
RT_ASSERT(rt_interrupt_get_nest() > 0);
|
||||||
|
|
||||||
|
LOG_D("timer check enter");
|
||||||
|
|
||||||
|
level = rt_spin_lock_irqsave(&_htimer_lock);
|
||||||
|
|
||||||
|
current_tick = rt_tick_get();
|
||||||
|
|
||||||
#ifdef RT_USING_SMP
|
#ifdef RT_USING_SMP
|
||||||
|
/* Running on core 0 only */
|
||||||
if (rt_hw_cpu_id() != 0)
|
if (rt_hw_cpu_id() != 0)
|
||||||
{
|
{
|
||||||
|
rt_spin_unlock_irqrestore(&_htimer_lock, level);
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
rt_list_init(&list);
|
rt_list_init(&list);
|
||||||
|
|
||||||
LOG_D("timer check enter");
|
|
||||||
|
|
||||||
current_tick = rt_tick_get();
|
|
||||||
|
|
||||||
level = rt_spin_lock_irqsave(&_hard_spinlock);
|
|
||||||
|
|
||||||
while (!rt_list_isempty(&_timer_list[RT_TIMER_SKIP_LIST_LEVEL - 1]))
|
while (!rt_list_isempty(&_timer_list[RT_TIMER_SKIP_LIST_LEVEL - 1]))
|
||||||
{
|
{
|
||||||
t = rt_list_entry(_timer_list[RT_TIMER_SKIP_LIST_LEVEL - 1].next,
|
t = rt_list_entry(_timer_list[RT_TIMER_SKIP_LIST_LEVEL - 1].next,
|
||||||
|
@ -696,7 +725,7 @@ void rt_timer_check(void)
|
||||||
}
|
}
|
||||||
/* add timer to temporary list */
|
/* add timer to temporary list */
|
||||||
rt_list_insert_after(&list, &(t->row[RT_TIMER_SKIP_LIST_LEVEL - 1]));
|
rt_list_insert_after(&list, &(t->row[RT_TIMER_SKIP_LIST_LEVEL - 1]));
|
||||||
rt_spin_unlock_irqrestore(&_hard_spinlock, level);
|
rt_spin_unlock_irqrestore(&_htimer_lock, level);
|
||||||
/* call timeout function */
|
/* call timeout function */
|
||||||
t->timeout_func(t->parameter);
|
t->timeout_func(t->parameter);
|
||||||
|
|
||||||
|
@ -705,7 +734,7 @@ void rt_timer_check(void)
|
||||||
|
|
||||||
RT_OBJECT_HOOK_CALL(rt_timer_exit_hook, (t));
|
RT_OBJECT_HOOK_CALL(rt_timer_exit_hook, (t));
|
||||||
LOG_D("current tick: %d", current_tick);
|
LOG_D("current tick: %d", current_tick);
|
||||||
level = rt_spin_lock_irqsave(&_hard_spinlock);
|
level = rt_spin_lock_irqsave(&_htimer_lock);
|
||||||
/* Check whether the timer object is detached or started again */
|
/* Check whether the timer object is detached or started again */
|
||||||
if (rt_list_isempty(&list))
|
if (rt_list_isempty(&list))
|
||||||
{
|
{
|
||||||
|
@ -722,7 +751,7 @@ void rt_timer_check(void)
|
||||||
}
|
}
|
||||||
else break;
|
else break;
|
||||||
}
|
}
|
||||||
rt_spin_unlock_irqrestore(&_hard_spinlock, level);
|
rt_spin_unlock_irqrestore(&_htimer_lock, level);
|
||||||
LOG_D("timer check leave");
|
LOG_D("timer check leave");
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -736,9 +765,9 @@ rt_tick_t rt_timer_next_timeout_tick(void)
|
||||||
rt_base_t level;
|
rt_base_t level;
|
||||||
rt_tick_t next_timeout = RT_TICK_MAX;
|
rt_tick_t next_timeout = RT_TICK_MAX;
|
||||||
|
|
||||||
level = rt_spin_lock_irqsave(&_hard_spinlock);
|
level = rt_spin_lock_irqsave(&_htimer_lock);
|
||||||
_timer_list_next_timeout(_timer_list, &next_timeout);
|
_timer_list_next_timeout(_timer_list, &next_timeout);
|
||||||
rt_spin_unlock_irqrestore(&_hard_spinlock, level);
|
rt_spin_unlock_irqrestore(&_htimer_lock, level);
|
||||||
|
|
||||||
return next_timeout;
|
return next_timeout;
|
||||||
}
|
}
|
||||||
|
@ -757,7 +786,7 @@ static void _soft_timer_check(void)
|
||||||
|
|
||||||
rt_list_init(&list);
|
rt_list_init(&list);
|
||||||
LOG_D("software timer check enter");
|
LOG_D("software timer check enter");
|
||||||
level = rt_spin_lock_irqsave(&_soft_spinlock);
|
level = rt_spin_lock_irqsave(&_stimer_lock);
|
||||||
|
|
||||||
while (!rt_list_isempty(&_soft_timer_list[RT_TIMER_SKIP_LIST_LEVEL - 1]))
|
while (!rt_list_isempty(&_soft_timer_list[RT_TIMER_SKIP_LIST_LEVEL - 1]))
|
||||||
{
|
{
|
||||||
|
@ -785,7 +814,7 @@ static void _soft_timer_check(void)
|
||||||
|
|
||||||
_soft_timer_status = RT_SOFT_TIMER_BUSY;
|
_soft_timer_status = RT_SOFT_TIMER_BUSY;
|
||||||
|
|
||||||
rt_spin_unlock_irqrestore(&_soft_spinlock, level);
|
rt_spin_unlock_irqrestore(&_stimer_lock, level);
|
||||||
|
|
||||||
/* call timeout function */
|
/* call timeout function */
|
||||||
t->timeout_func(t->parameter);
|
t->timeout_func(t->parameter);
|
||||||
|
@ -793,7 +822,7 @@ static void _soft_timer_check(void)
|
||||||
RT_OBJECT_HOOK_CALL(rt_timer_exit_hook, (t));
|
RT_OBJECT_HOOK_CALL(rt_timer_exit_hook, (t));
|
||||||
LOG_D("current tick: %d", current_tick);
|
LOG_D("current tick: %d", current_tick);
|
||||||
|
|
||||||
level = rt_spin_lock_irqsave(&_soft_spinlock);
|
level = rt_spin_lock_irqsave(&_stimer_lock);
|
||||||
|
|
||||||
_soft_timer_status = RT_SOFT_TIMER_IDLE;
|
_soft_timer_status = RT_SOFT_TIMER_IDLE;
|
||||||
/* Check whether the timer object is detached or started again */
|
/* Check whether the timer object is detached or started again */
|
||||||
|
@ -813,7 +842,7 @@ static void _soft_timer_check(void)
|
||||||
else break; /* not check anymore */
|
else break; /* not check anymore */
|
||||||
}
|
}
|
||||||
|
|
||||||
rt_spin_unlock_irqrestore(&_soft_spinlock, level);
|
rt_spin_unlock_irqrestore(&_stimer_lock, level);
|
||||||
|
|
||||||
LOG_D("software timer check leave");
|
LOG_D("software timer check leave");
|
||||||
}
|
}
|
||||||
|
@ -836,9 +865,9 @@ static void _timer_thread_entry(void *parameter)
|
||||||
while (1)
|
while (1)
|
||||||
{
|
{
|
||||||
/* get the next timeout tick */
|
/* get the next timeout tick */
|
||||||
level = rt_spin_lock_irqsave(&_soft_spinlock);
|
level = rt_spin_lock_irqsave(&_stimer_lock);
|
||||||
ret = _timer_list_next_timeout(_soft_timer_list, &next_timeout);
|
ret = _timer_list_next_timeout(_soft_timer_list, &next_timeout);
|
||||||
rt_spin_unlock_irqrestore(&_soft_spinlock, level);
|
rt_spin_unlock_irqrestore(&_stimer_lock, level);
|
||||||
|
|
||||||
if (ret != RT_EOK)
|
if (ret != RT_EOK)
|
||||||
{
|
{
|
||||||
|
@ -878,7 +907,7 @@ void rt_system_timer_init(void)
|
||||||
{
|
{
|
||||||
rt_list_init(_timer_list + i);
|
rt_list_init(_timer_list + i);
|
||||||
}
|
}
|
||||||
rt_spin_lock_init(&_hard_spinlock);
|
rt_spin_lock_init(&_htimer_lock);
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
|
@ -897,7 +926,7 @@ void rt_system_timer_thread_init(void)
|
||||||
{
|
{
|
||||||
rt_list_init(_soft_timer_list + i);
|
rt_list_init(_soft_timer_list + i);
|
||||||
}
|
}
|
||||||
rt_spin_lock_init(&_soft_spinlock);
|
rt_spin_lock_init(&_stimer_lock);
|
||||||
rt_sem_init(&_soft_timer_sem, "stimer", 0, RT_IPC_FLAG_PRIO);
|
rt_sem_init(&_soft_timer_sem, "stimer", 0, RT_IPC_FLAG_PRIO);
|
||||||
/* start software timer thread */
|
/* start software timer thread */
|
||||||
rt_thread_init(&_timer_thread,
|
rt_thread_init(&_timer_thread,
|
||||||
|
|
Loading…
Reference in New Issue