2009-07-03 06:48:23 +08:00
|
|
|
/*
|
2018-09-14 22:37:43 +08:00
|
|
|
* Copyright (c) 2006-2018, RT-Thread Development Team
|
2013-06-24 17:06:09 +08:00
|
|
|
*
|
2018-09-14 22:37:43 +08:00
|
|
|
* SPDX-License-Identifier: Apache-2.0
|
2009-07-03 06:48:23 +08:00
|
|
|
*
|
|
|
|
* Change Logs:
|
|
|
|
* Date Author Notes
|
|
|
|
* 2006-03-23 Bernard the first version
|
2010-11-12 18:16:33 +08:00
|
|
|
* 2010-11-10 Bernard add cleanup callback function in thread exit.
|
2012-12-29 20:29:03 +08:00
|
|
|
* 2012-12-29 Bernard fix compiling warning.
|
2013-12-21 11:31:40 +08:00
|
|
|
* 2013-12-21 Grissiom let rt_thread_idle_excute loop until there is no
|
|
|
|
* dead thread.
|
2016-08-09 11:29:11 +08:00
|
|
|
* 2016-08-09 ArdaFu add method to get the handler of the idle thread.
|
2018-02-07 19:53:08 +08:00
|
|
|
* 2018-02-07 Bernard lock scheduler to protect tid->cleanup.
|
2018-07-13 18:13:25 +08:00
|
|
|
* 2018-07-14 armink add idle hook list
|
2018-11-22 14:40:43 +08:00
|
|
|
* 2018-11-22 Jesven add per cpu idle task
|
|
|
|
* combine the code of primary and secondary cpu
|
2009-07-03 06:48:23 +08:00
|
|
|
*/
|
|
|
|
|
|
|
|
#include <rthw.h>
|
|
|
|
#include <rtthread.h>
|
|
|
|
|
2018-08-30 20:27:45 +08:00
|
|
|
#ifdef RT_USING_MODULE
|
|
|
|
#include <dlmodule.h>
|
|
|
|
#endif
|
|
|
|
|
2017-05-16 10:02:19 +08:00
|
|
|
#if defined (RT_USING_HOOK)
|
|
|
|
#ifndef RT_USING_IDLE_HOOK
|
2017-06-07 09:24:22 +08:00
|
|
|
#define RT_USING_IDLE_HOOK
|
2017-05-16 10:02:19 +08:00
|
|
|
#endif
|
|
|
|
#endif
|
|
|
|
|
2011-03-04 17:16:41 +08:00
|
|
|
#ifndef IDLE_THREAD_STACK_SIZE
|
2017-05-16 10:02:19 +08:00
|
|
|
#if defined (RT_USING_IDLE_HOOK) || defined(RT_USING_HEAP)
|
2012-12-20 15:55:05 +08:00
|
|
|
#define IDLE_THREAD_STACK_SIZE 256
|
2009-07-03 06:48:23 +08:00
|
|
|
#else
|
2012-12-20 15:55:05 +08:00
|
|
|
#define IDLE_THREAD_STACK_SIZE 128
|
2009-07-03 06:48:23 +08:00
|
|
|
#endif
|
2011-03-04 17:16:41 +08:00
|
|
|
#endif
|
2009-07-03 06:48:23 +08:00
|
|
|
|
2018-11-22 14:40:43 +08:00
|
|
|
#ifdef RT_USING_SMP
|
|
|
|
#define _CPUS_NR RT_CPUS_NR
|
|
|
|
#else
|
|
|
|
#define _CPUS_NR 1
|
|
|
|
#endif
|
2009-07-03 06:48:23 +08:00
|
|
|
|
|
|
|
extern rt_list_t rt_thread_defunct;
|
|
|
|
|
2018-11-22 14:40:43 +08:00
|
|
|
static struct rt_thread idle[_CPUS_NR];
|
|
|
|
ALIGN(RT_ALIGN_SIZE)
|
|
|
|
static rt_uint8_t rt_thread_stack[_CPUS_NR][IDLE_THREAD_STACK_SIZE];
|
2018-11-22 22:06:02 +08:00
|
|
|
|
|
|
|
#ifdef RT_USING_IDLE_HOOK
|
|
|
|
#ifndef RT_IDEL_HOOK_LIST_SIZE
|
|
|
|
#define RT_IDEL_HOOK_LIST_SIZE 4
|
|
|
|
#endif
|
|
|
|
|
2018-07-12 18:37:51 +08:00
|
|
|
static void (*idle_hook_list[RT_IDEL_HOOK_LIST_SIZE])();
|
2009-07-03 06:48:23 +08:00
|
|
|
|
|
|
|
/**
|
2014-09-11 12:50:41 +08:00
|
|
|
* @ingroup Hook
|
2017-09-15 11:02:24 +08:00
|
|
|
* This function sets a hook function to idle thread loop. When the system performs
|
2014-09-11 12:50:41 +08:00
|
|
|
* idle loop, this hook function should be invoked.
|
2009-07-03 06:48:23 +08:00
|
|
|
*
|
|
|
|
* @param hook the specified hook function
|
|
|
|
*
|
2018-07-12 18:37:51 +08:00
|
|
|
* @return RT_EOK: set OK
|
|
|
|
* -RT_EFULL: hook list is full
|
|
|
|
*
|
2009-07-03 06:48:23 +08:00
|
|
|
* @note the hook function must be simple and never be blocked or suspend.
|
|
|
|
*/
|
2018-07-12 18:37:51 +08:00
|
|
|
rt_err_t rt_thread_idle_sethook(void (*hook)(void))
|
2009-07-03 06:48:23 +08:00
|
|
|
{
|
2018-07-12 18:37:51 +08:00
|
|
|
rt_size_t i;
|
|
|
|
rt_base_t level;
|
2018-07-13 18:13:25 +08:00
|
|
|
rt_err_t ret = -RT_EFULL;
|
2018-07-12 18:37:51 +08:00
|
|
|
|
|
|
|
/* disable interrupt */
|
|
|
|
level = rt_hw_interrupt_disable();
|
|
|
|
|
|
|
|
for (i = 0; i < RT_IDEL_HOOK_LIST_SIZE; i++)
|
|
|
|
{
|
|
|
|
if (idle_hook_list[i] == RT_NULL)
|
|
|
|
{
|
|
|
|
idle_hook_list[i] = hook;
|
2018-07-13 18:13:25 +08:00
|
|
|
ret = RT_EOK;
|
|
|
|
break;
|
2018-07-12 18:37:51 +08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
/* enable interrupt */
|
|
|
|
rt_hw_interrupt_enable(level);
|
|
|
|
|
2018-07-13 18:13:25 +08:00
|
|
|
return ret;
|
2009-07-03 06:48:23 +08:00
|
|
|
}
|
2018-07-12 18:37:51 +08:00
|
|
|
|
|
|
|
/**
|
|
|
|
* delete the idle hook on hook list
|
|
|
|
*
|
|
|
|
* @param hook the specified hook function
|
|
|
|
*
|
|
|
|
* @return RT_EOK: delete OK
|
|
|
|
* -RT_ENOSYS: hook was not found
|
|
|
|
*/
|
|
|
|
rt_err_t rt_thread_idle_delhook(void (*hook)(void))
|
|
|
|
{
|
|
|
|
rt_size_t i;
|
|
|
|
rt_base_t level;
|
2018-07-13 18:13:25 +08:00
|
|
|
rt_err_t ret = -RT_ENOSYS;
|
2018-07-12 18:37:51 +08:00
|
|
|
|
|
|
|
/* disable interrupt */
|
|
|
|
level = rt_hw_interrupt_disable();
|
|
|
|
|
|
|
|
for (i = 0; i < RT_IDEL_HOOK_LIST_SIZE; i++)
|
|
|
|
{
|
|
|
|
if (idle_hook_list[i] == hook)
|
|
|
|
{
|
|
|
|
idle_hook_list[i] = RT_NULL;
|
2018-07-13 18:13:25 +08:00
|
|
|
ret = RT_EOK;
|
|
|
|
break;
|
2018-07-12 18:37:51 +08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
/* enable interrupt */
|
|
|
|
rt_hw_interrupt_enable(level);
|
|
|
|
|
2018-07-13 18:13:25 +08:00
|
|
|
return ret;
|
2018-07-12 18:37:51 +08:00
|
|
|
}
|
|
|
|
|
2009-07-03 06:48:23 +08:00
|
|
|
#endif
|
|
|
|
|
2014-09-02 17:21:31 +08:00
|
|
|
/* Return whether there is defunctional thread to be deleted. */
|
|
|
|
rt_inline int _has_defunct_thread(void)
|
|
|
|
{
|
|
|
|
/* The rt_list_isempty has prototype of "int rt_list_isempty(const rt_list_t *l)".
|
|
|
|
* So the compiler has a good reason that the rt_thread_defunct list does
|
|
|
|
* not change within rt_thread_idle_excute thus optimize the "while" loop
|
|
|
|
* into a "if".
|
|
|
|
*
|
|
|
|
* So add the volatile qualifier here. */
|
2017-09-15 11:02:24 +08:00
|
|
|
const volatile rt_list_t *l = (const volatile rt_list_t *)&rt_thread_defunct;
|
2014-09-02 17:21:31 +08:00
|
|
|
|
|
|
|
return l->next != l;
|
|
|
|
}
|
|
|
|
|
2010-08-05 16:16:30 +08:00
|
|
|
/**
|
2010-11-29 08:04:55 +08:00
|
|
|
* @ingroup Thread
|
|
|
|
*
|
|
|
|
* This function will perform system background job when system idle.
|
2010-08-05 16:16:30 +08:00
|
|
|
*/
|
|
|
|
void rt_thread_idle_excute(void)
|
2009-07-03 06:48:23 +08:00
|
|
|
{
|
2013-12-21 11:31:40 +08:00
|
|
|
/* Loop until there is no dead thread. So one call to rt_thread_idle_excute
|
|
|
|
* will do all the cleanups. */
|
2014-09-02 17:21:31 +08:00
|
|
|
while (_has_defunct_thread())
|
2012-12-20 15:55:05 +08:00
|
|
|
{
|
|
|
|
rt_base_t lock;
|
|
|
|
rt_thread_t thread;
|
2010-08-05 16:16:30 +08:00
|
|
|
#ifdef RT_USING_MODULE
|
2018-08-30 20:27:45 +08:00
|
|
|
struct rt_dlmodule *module = RT_NULL;
|
2009-07-03 06:48:23 +08:00
|
|
|
#endif
|
2012-12-20 15:55:05 +08:00
|
|
|
RT_DEBUG_NOT_IN_INTERRUPT;
|
|
|
|
|
|
|
|
/* disable interrupt */
|
|
|
|
lock = rt_hw_interrupt_disable();
|
|
|
|
|
|
|
|
/* re-check whether list is empty */
|
2014-09-02 17:21:31 +08:00
|
|
|
if (_has_defunct_thread())
|
2012-12-20 15:55:05 +08:00
|
|
|
{
|
|
|
|
/* get defunct thread */
|
|
|
|
thread = rt_list_entry(rt_thread_defunct.next,
|
|
|
|
struct rt_thread,
|
|
|
|
tlist);
|
2010-04-23 22:23:23 +08:00
|
|
|
#ifdef RT_USING_MODULE
|
2018-08-30 20:27:45 +08:00
|
|
|
module = (struct rt_dlmodule*)thread->module_id;
|
|
|
|
if (module)
|
2012-12-20 15:55:05 +08:00
|
|
|
{
|
2018-08-30 20:27:45 +08:00
|
|
|
dlmodule_destroy(module);
|
2012-12-20 15:55:05 +08:00
|
|
|
}
|
2010-04-23 22:23:23 +08:00
|
|
|
#endif
|
2012-12-20 15:55:05 +08:00
|
|
|
/* remove defunct thread */
|
|
|
|
rt_list_remove(&(thread->tlist));
|
2018-02-07 19:53:08 +08:00
|
|
|
|
|
|
|
/* lock scheduler to prevent scheduling in cleanup function. */
|
|
|
|
rt_enter_critical();
|
|
|
|
|
2012-12-20 15:55:05 +08:00
|
|
|
/* invoke thread cleanup */
|
|
|
|
if (thread->cleanup != RT_NULL)
|
|
|
|
thread->cleanup(thread);
|
|
|
|
|
2017-10-15 22:31:53 +08:00
|
|
|
#ifdef RT_USING_SIGNALS
|
|
|
|
rt_thread_free_sig(thread);
|
|
|
|
#endif
|
|
|
|
|
2012-12-20 15:55:05 +08:00
|
|
|
/* if it's a system object, not delete it */
|
|
|
|
if (rt_object_is_systemobject((rt_object_t)thread) == RT_TRUE)
|
|
|
|
{
|
2019-02-07 23:49:46 +08:00
|
|
|
/* detach this object */
|
|
|
|
rt_object_detach((rt_object_t)thread);
|
2018-02-07 19:53:08 +08:00
|
|
|
/* unlock scheduler */
|
|
|
|
rt_exit_critical();
|
|
|
|
|
2012-12-20 15:55:05 +08:00
|
|
|
/* enable interrupt */
|
|
|
|
rt_hw_interrupt_enable(lock);
|
|
|
|
|
|
|
|
return;
|
|
|
|
}
|
2018-02-07 19:53:08 +08:00
|
|
|
|
|
|
|
/* unlock scheduler */
|
|
|
|
rt_exit_critical();
|
2012-12-20 15:55:05 +08:00
|
|
|
}
|
|
|
|
else
|
|
|
|
{
|
|
|
|
/* enable interrupt */
|
|
|
|
rt_hw_interrupt_enable(lock);
|
|
|
|
|
|
|
|
/* may the defunct thread list is removed by others, just return */
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* enable interrupt */
|
|
|
|
rt_hw_interrupt_enable(lock);
|
2010-04-23 22:23:23 +08:00
|
|
|
|
2010-11-30 13:20:41 +08:00
|
|
|
#ifdef RT_USING_HEAP
|
2018-06-10 18:46:11 +08:00
|
|
|
/* release thread's stack */
|
|
|
|
RT_KERNEL_FREE(thread->stack_addr);
|
2012-12-20 15:55:05 +08:00
|
|
|
/* delete thread object */
|
|
|
|
rt_object_delete((rt_object_t)thread);
|
2011-07-05 07:48:07 +08:00
|
|
|
#endif
|
2012-12-20 15:55:05 +08:00
|
|
|
}
|
2010-08-05 16:16:30 +08:00
|
|
|
}
|
|
|
|
|
2011-09-21 11:56:42 +08:00
|
|
|
static void rt_thread_idle_entry(void *parameter)
|
2010-08-05 16:16:30 +08:00
|
|
|
{
|
2018-11-22 14:40:43 +08:00
|
|
|
#ifdef RT_USING_SMP
|
|
|
|
if (rt_hw_cpu_id() != 0)
|
|
|
|
{
|
|
|
|
while (1)
|
|
|
|
{
|
|
|
|
rt_hw_secondary_cpu_idle_exec();
|
|
|
|
}
|
|
|
|
}
|
2018-07-12 18:37:51 +08:00
|
|
|
#endif
|
|
|
|
|
2012-12-20 15:55:05 +08:00
|
|
|
while (1)
|
|
|
|
{
|
2017-09-15 11:02:24 +08:00
|
|
|
#ifdef RT_USING_IDLE_HOOK
|
2018-11-22 14:40:43 +08:00
|
|
|
rt_size_t i;
|
|
|
|
|
2018-07-12 18:37:51 +08:00
|
|
|
for (i = 0; i < RT_IDEL_HOOK_LIST_SIZE; i++)
|
2017-05-22 23:53:55 +08:00
|
|
|
{
|
2018-07-12 18:37:51 +08:00
|
|
|
if (idle_hook_list[i] != RT_NULL)
|
|
|
|
{
|
|
|
|
idle_hook_list[i]();
|
|
|
|
}
|
2017-05-22 23:53:55 +08:00
|
|
|
}
|
2017-09-15 11:02:24 +08:00
|
|
|
#endif
|
2017-05-22 23:53:55 +08:00
|
|
|
|
2012-12-20 15:55:05 +08:00
|
|
|
rt_thread_idle_excute();
|
|
|
|
}
|
2009-07-03 06:48:23 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
2014-09-11 12:50:41 +08:00
|
|
|
* @ingroup SystemInit
|
2010-11-29 08:04:55 +08:00
|
|
|
*
|
2009-07-03 06:48:23 +08:00
|
|
|
* This function will initialize idle thread, then start it.
|
|
|
|
*
|
|
|
|
* @note this function must be invoked when system init.
|
|
|
|
*/
|
2011-09-19 15:15:31 +08:00
|
|
|
void rt_thread_idle_init(void)
|
2009-07-03 06:48:23 +08:00
|
|
|
{
|
2018-12-24 09:07:56 +08:00
|
|
|
rt_ubase_t i;
|
2018-11-22 14:40:43 +08:00
|
|
|
char tidle_name[RT_NAME_MAX];
|
|
|
|
|
|
|
|
for (i = 0; i < _CPUS_NR; i++)
|
|
|
|
{
|
|
|
|
rt_sprintf(tidle_name, "tidle%d", i);
|
|
|
|
rt_thread_init(&idle[i],
|
|
|
|
tidle_name,
|
|
|
|
rt_thread_idle_entry,
|
|
|
|
RT_NULL,
|
|
|
|
&rt_thread_stack[i][0],
|
|
|
|
sizeof(rt_thread_stack[i]),
|
|
|
|
RT_THREAD_PRIORITY_MAX - 1,
|
|
|
|
32);
|
|
|
|
#ifdef RT_USING_SMP
|
|
|
|
rt_thread_control(&idle[i], RT_THREAD_CTRL_BIND_CPU, (void*)i);
|
|
|
|
#endif
|
|
|
|
/* startup */
|
|
|
|
rt_thread_startup(&idle[i]);
|
|
|
|
}
|
2009-07-03 06:48:23 +08:00
|
|
|
}
|
2016-08-09 11:29:11 +08:00
|
|
|
|
|
|
|
/**
|
|
|
|
* @ingroup Thread
|
|
|
|
*
|
|
|
|
* This function will get the handler of the idle thread.
|
|
|
|
*
|
|
|
|
*/
|
|
|
|
rt_thread_t rt_thread_idle_gethandler(void)
|
|
|
|
{
|
2018-11-22 14:40:43 +08:00
|
|
|
#ifdef RT_USING_SMP
|
|
|
|
register int id = rt_hw_cpu_id();
|
|
|
|
#else
|
|
|
|
register int id = 0;
|
|
|
|
#endif
|
|
|
|
|
|
|
|
return (rt_thread_t)(&idle[id]);
|
2016-08-09 11:29:11 +08:00
|
|
|
}
|