rt-thread/src/idle.c

354 lines
8.5 KiB
C
Raw Normal View History

/*
2021-03-08 11:25:38 +08:00
* Copyright (c) 2006-2021, RT-Thread Development Team
2013-06-24 17:06:09 +08:00
*
* SPDX-License-Identifier: Apache-2.0
*
* Change Logs:
* Date Author Notes
* 2006-03-23 Bernard the first version
* 2010-11-10 Bernard add cleanup callback function in thread exit.
* 2012-12-29 Bernard fix compiling warning.
* 2013-12-21 Grissiom let rt_thread_idle_excute loop until there is no
* dead thread.
* 2016-08-09 ArdaFu add method to get the handler of the idle thread.
* 2018-02-07 Bernard lock scheduler to protect tid->cleanup.
* 2018-07-14 armink add idle hook list
2018-11-22 14:40:43 +08:00
* 2018-11-22 Jesven add per cpu idle task
* combine the code of primary and secondary cpu
* 2021-11-15 THEWON Remove duplicate work between idle and _thread_exit
*/
#include <rthw.h>
#include <rtthread.h>
2018-08-30 20:27:45 +08:00
#ifdef RT_USING_MODULE
#include <dlmodule.h>
2021-06-09 19:50:03 +08:00
#endif /* RT_USING_MODULE */
2018-08-30 20:27:45 +08:00
2021-06-09 19:50:03 +08:00
#ifdef RT_USING_HOOK
2017-05-16 10:02:19 +08:00
#ifndef RT_USING_IDLE_HOOK
#define RT_USING_IDLE_HOOK
2021-06-09 19:50:03 +08:00
#endif /* RT_USING_IDLE_HOOK */
#endif /* RT_USING_HOOK */
2017-05-16 10:02:19 +08:00
#ifndef IDLE_THREAD_STACK_SIZE
2017-05-16 10:02:19 +08:00
#if defined (RT_USING_IDLE_HOOK) || defined(RT_USING_HEAP)
#define IDLE_THREAD_STACK_SIZE 256
#else
#define IDLE_THREAD_STACK_SIZE 128
2021-06-09 19:50:03 +08:00
#endif /* (RT_USING_IDLE_HOOK) || defined(RT_USING_HEAP) */
#endif /* IDLE_THREAD_STACK_SIZE */
2018-11-22 14:40:43 +08:00
#ifdef RT_USING_SMP
#define _CPUS_NR RT_CPUS_NR
#else
#define _CPUS_NR 1
2021-06-09 19:50:03 +08:00
#endif /* RT_USING_SMP */
2022-01-11 21:25:08 +08:00
static rt_list_t _rt_thread_defunct = RT_LIST_OBJECT_INIT(_rt_thread_defunct);
2018-11-22 14:40:43 +08:00
static struct rt_thread idle[_CPUS_NR];
ALIGN(RT_ALIGN_SIZE)
static rt_uint8_t rt_thread_stack[_CPUS_NR][IDLE_THREAD_STACK_SIZE];
2018-11-22 22:06:02 +08:00
2021-07-03 17:54:58 +08:00
#ifdef RT_USING_SMP
#ifndef SYSTEM_THREAD_STACK_SIZE
#define SYSTEM_THREAD_STACK_SIZE IDLE_THREAD_STACK_SIZE
#endif
static struct rt_thread rt_system_thread;
ALIGN(RT_ALIGN_SIZE)
static rt_uint8_t rt_system_stack[SYSTEM_THREAD_STACK_SIZE];
static struct rt_semaphore system_sem;
#endif
2018-11-22 22:06:02 +08:00
#ifdef RT_USING_IDLE_HOOK
#ifndef RT_IDLE_HOOK_LIST_SIZE
#define RT_IDLE_HOOK_LIST_SIZE 4
2021-06-09 19:50:03 +08:00
#endif /* RT_IDLE_HOOK_LIST_SIZE */
2018-11-22 22:06:02 +08:00
static void (*idle_hook_list[RT_IDLE_HOOK_LIST_SIZE])(void);
/**
* @brief This function sets a hook function to idle thread loop. When the system performs
* idle loop, this hook function should be invoked.
*
* @param hook the specified hook function.
*
* @return RT_EOK: set OK.
* -RT_EFULL: hook list is full.
2018-07-12 18:37:51 +08:00
*
* @note the hook function must be simple and never be blocked or suspend.
*/
2018-07-12 18:37:51 +08:00
rt_err_t rt_thread_idle_sethook(void (*hook)(void))
{
2018-07-12 18:37:51 +08:00
rt_size_t i;
rt_base_t level;
rt_err_t ret = -RT_EFULL;
2018-07-12 18:37:51 +08:00
/* disable interrupt */
level = rt_hw_interrupt_disable();
for (i = 0; i < RT_IDLE_HOOK_LIST_SIZE; i++)
2018-07-12 18:37:51 +08:00
{
if (idle_hook_list[i] == RT_NULL)
{
idle_hook_list[i] = hook;
ret = RT_EOK;
break;
2018-07-12 18:37:51 +08:00
}
}
/* enable interrupt */
rt_hw_interrupt_enable(level);
return ret;
}
2018-07-12 18:37:51 +08:00
/**
* @brief delete the idle hook on hook list.
2018-07-12 18:37:51 +08:00
*
* @param hook the specified hook function.
2018-07-12 18:37:51 +08:00
*
* @return RT_EOK: delete OK.
* -RT_ENOSYS: hook was not found.
2018-07-12 18:37:51 +08:00
*/
rt_err_t rt_thread_idle_delhook(void (*hook)(void))
{
rt_size_t i;
rt_base_t level;
rt_err_t ret = -RT_ENOSYS;
2018-07-12 18:37:51 +08:00
/* disable interrupt */
level = rt_hw_interrupt_disable();
for (i = 0; i < RT_IDLE_HOOK_LIST_SIZE; i++)
2018-07-12 18:37:51 +08:00
{
if (idle_hook_list[i] == hook)
{
idle_hook_list[i] = RT_NULL;
ret = RT_EOK;
break;
2018-07-12 18:37:51 +08:00
}
}
/* enable interrupt */
rt_hw_interrupt_enable(level);
return ret;
2018-07-12 18:37:51 +08:00
}
2021-06-09 19:50:03 +08:00
#endif /* RT_USING_IDLE_HOOK */
/**
* @brief Enqueue a thread to defunct queue.
*
* @note It must be called between rt_hw_interrupt_disable and rt_hw_interrupt_enable
2021-07-03 17:54:58 +08:00
*/
void rt_thread_defunct_enqueue(rt_thread_t thread)
{
rt_list_insert_after(&_rt_thread_defunct, &thread->tlist);
#ifdef RT_USING_SMP
rt_sem_release(&system_sem);
#endif
}
/**
* @brief Dequeue a thread from defunct queue.
2021-07-03 17:54:58 +08:00
*/
rt_thread_t rt_thread_defunct_dequeue(void)
{
register rt_base_t lock;
2021-07-03 17:54:58 +08:00
rt_thread_t thread = RT_NULL;
rt_list_t *l = &_rt_thread_defunct;
#ifdef RT_USING_SMP
/* disable interrupt */
lock = rt_hw_interrupt_disable();
2021-07-03 17:54:58 +08:00
if (l->next != l)
{
thread = rt_list_entry(l->next,
struct rt_thread,
tlist);
rt_list_remove(&(thread->tlist));
}
rt_hw_interrupt_enable(lock);
#else
if (l->next != l)
{
thread = rt_list_entry(l->next,
struct rt_thread,
tlist);
lock = rt_hw_interrupt_disable();
rt_list_remove(&(thread->tlist));
rt_hw_interrupt_enable(lock);
}
#endif
2021-07-03 17:54:58 +08:00
return thread;
}
/**
* @brief This function will perform system background job when system idle.
*/
2021-07-03 17:54:58 +08:00
static void rt_defunct_execute(void)
{
2021-07-03 17:54:58 +08:00
/* Loop until there is no dead thread. So one call to rt_defunct_execute
* will do all the cleanups. */
while (1)
{
rt_thread_t thread;
2021-07-03 17:54:58 +08:00
void (*cleanup)(struct rt_thread *tid);
2021-07-03 17:54:58 +08:00
#ifdef RT_USING_MODULE
struct rt_dlmodule *module = RT_NULL;
#endif
/* get defunct thread */
thread = rt_thread_defunct_dequeue();
if (thread == RT_NULL)
{
break;
}
#ifdef RT_USING_MODULE
2021-07-03 17:54:58 +08:00
module = (struct rt_dlmodule*)thread->module_id;
if (module)
{
dlmodule_destroy(module);
}
#endif
/* invoke thread cleanup */
cleanup = thread->cleanup;
if (cleanup != RT_NULL)
{
cleanup(thread);
}
#ifdef RT_USING_SIGNALS
rt_thread_free_sig(thread);
#endif
/* if it's a system object, not delete it */
if (rt_object_is_systemobject((rt_object_t)thread) == RT_TRUE)
{
/* detach this object */
rt_object_detach((rt_object_t)thread);
}
else
{
#ifdef RT_USING_HEAP
/* release thread's stack */
RT_KERNEL_FREE(thread->stack_addr);
/* delete thread object */
rt_object_delete((rt_object_t)thread);
#endif
}
}
}
static void rt_thread_idle_entry(void *parameter)
{
2018-11-22 14:40:43 +08:00
#ifdef RT_USING_SMP
if (rt_hw_cpu_id() != 0)
{
while (1)
{
rt_hw_secondary_cpu_idle_exec();
}
}
2021-06-09 19:50:03 +08:00
#endif /* RT_USING_SMP */
2018-07-12 18:37:51 +08:00
while (1)
{
#ifdef RT_USING_IDLE_HOOK
2018-11-22 14:40:43 +08:00
rt_size_t i;
void (*idle_hook)(void);
2018-11-22 14:40:43 +08:00
for (i = 0; i < RT_IDLE_HOOK_LIST_SIZE; i++)
2017-05-22 23:53:55 +08:00
{
idle_hook = idle_hook_list[i];
if (idle_hook != RT_NULL)
2018-07-12 18:37:51 +08:00
{
idle_hook();
2018-07-12 18:37:51 +08:00
}
2017-05-22 23:53:55 +08:00
}
2021-06-09 19:50:03 +08:00
#endif /* RT_USING_IDLE_HOOK */
2017-05-22 23:53:55 +08:00
2021-07-03 17:54:58 +08:00
#ifndef RT_USING_SMP
rt_defunct_execute();
#endif /* RT_USING_SMP */
2021-03-08 11:25:38 +08:00
#ifdef RT_USING_PM
2022-01-18 07:58:18 +08:00
void rt_system_power_manager(void);
rt_system_power_manager();
2021-06-09 19:50:03 +08:00
#endif /* RT_USING_PM */
}
}
2021-07-03 17:54:58 +08:00
#ifdef RT_USING_SMP
static void rt_thread_system_entry(void *parameter)
{
while (1)
{
rt_sem_take(&system_sem, RT_WAITING_FOREVER);
rt_defunct_execute();
}
}
#endif
/**
* @brief This function will initialize idle thread, then start it.
*
* @note this function must be invoked when system init.
*/
void rt_thread_idle_init(void)
{
rt_ubase_t i;
2018-11-22 14:40:43 +08:00
char tidle_name[RT_NAME_MAX];
for (i = 0; i < _CPUS_NR; i++)
{
rt_sprintf(tidle_name, "tidle%d", i);
rt_thread_init(&idle[i],
tidle_name,
rt_thread_idle_entry,
RT_NULL,
&rt_thread_stack[i][0],
sizeof(rt_thread_stack[i]),
RT_THREAD_PRIORITY_MAX - 1,
32);
#ifdef RT_USING_SMP
rt_thread_control(&idle[i], RT_THREAD_CTRL_BIND_CPU, (void*)i);
2021-06-09 19:50:03 +08:00
#endif /* RT_USING_SMP */
2018-11-22 14:40:43 +08:00
/* startup */
rt_thread_startup(&idle[i]);
}
2021-07-03 17:54:58 +08:00
#ifdef RT_USING_SMP
RT_ASSERT(RT_THREAD_PRIORITY_MAX > 2);
rt_sem_init(&system_sem, "defunct", 1, RT_IPC_FLAG_FIFO);
/* create defunct thread */
rt_thread_init(&rt_system_thread,
"tsystem",
rt_thread_system_entry,
RT_NULL,
rt_system_stack,
sizeof(rt_system_stack),
RT_THREAD_PRIORITY_MAX - 2,
32);
/* startup */
rt_thread_startup(&rt_system_thread);
#endif
}
/**
* @brief This function will get the handler of the idle thread.
*/
rt_thread_t rt_thread_idle_gethandler(void)
{
2018-11-22 14:40:43 +08:00
#ifdef RT_USING_SMP
register int id = rt_hw_cpu_id();
#else
register int id = 0;
2021-06-09 19:50:03 +08:00
#endif /* RT_USING_SMP */
2018-11-22 14:40:43 +08:00
return (rt_thread_t)(&idle[id]);
}