2017-02-27 00:58:11 +08:00
|
|
|
/*
|
2018-10-14 19:37:18 +08:00
|
|
|
* Copyright (c) 2006-2018, RT-Thread Development Team
|
2017-02-27 00:58:11 +08:00
|
|
|
*
|
2018-10-14 19:37:18 +08:00
|
|
|
* SPDX-License-Identifier: Apache-2.0
|
2017-02-27 00:58:11 +08:00
|
|
|
*
|
|
|
|
* Change Logs:
|
|
|
|
* Date Author Notes
|
|
|
|
* 2017-02-27 bernard fix the re-work issue.
|
|
|
|
*/
|
|
|
|
|
|
|
|
#include <rthw.h>
|
2014-07-13 07:27:57 +08:00
|
|
|
#include <rtthread.h>
|
|
|
|
#include <rtdevice.h>
|
|
|
|
|
|
|
|
#ifdef RT_USING_HEAP
|
2019-03-17 16:01:12 +08:00
|
|
|
|
2019-04-22 11:41:17 +08:00
|
|
|
static void _delayed_work_timeout_handler(void *parameter);
|
|
|
|
|
2017-10-17 17:53:01 +08:00
|
|
|
rt_inline rt_err_t _workqueue_work_completion(struct rt_workqueue *queue)
|
|
|
|
{
|
|
|
|
rt_err_t result;
|
2019-03-17 16:01:12 +08:00
|
|
|
|
|
|
|
rt_enter_critical();
|
2017-10-17 17:53:01 +08:00
|
|
|
while (1)
|
|
|
|
{
|
|
|
|
/* try to take condition semaphore */
|
|
|
|
result = rt_sem_trytake(&(queue->sem));
|
|
|
|
if (result == -RT_ETIMEOUT)
|
|
|
|
{
|
|
|
|
/* it's timeout, release this semaphore */
|
|
|
|
rt_sem_release(&(queue->sem));
|
|
|
|
}
|
|
|
|
else if (result == RT_EOK)
|
|
|
|
{
|
|
|
|
/* keep the sem value = 0 */
|
|
|
|
result = RT_EOK;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
else
|
|
|
|
{
|
|
|
|
result = -RT_ERROR;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
rt_exit_critical();
|
2019-03-17 16:01:12 +08:00
|
|
|
|
2017-10-17 17:53:01 +08:00
|
|
|
return result;
|
|
|
|
}
|
|
|
|
|
2019-03-17 16:01:12 +08:00
|
|
|
static void _workqueue_thread_entry(void *parameter)
|
2014-07-13 07:27:57 +08:00
|
|
|
{
|
2017-02-27 00:58:11 +08:00
|
|
|
rt_base_t level;
|
2019-03-17 16:01:12 +08:00
|
|
|
struct rt_work *work;
|
|
|
|
struct rt_workqueue *queue;
|
2017-02-27 00:58:11 +08:00
|
|
|
|
2019-03-17 16:01:12 +08:00
|
|
|
queue = (struct rt_workqueue *) parameter;
|
2017-02-27 00:58:11 +08:00
|
|
|
RT_ASSERT(queue != RT_NULL);
|
|
|
|
|
|
|
|
while (1)
|
|
|
|
{
|
|
|
|
if (rt_list_isempty(&(queue->work_list)))
|
|
|
|
{
|
|
|
|
/* no software timer exist, suspend self. */
|
|
|
|
rt_thread_suspend(rt_thread_self());
|
|
|
|
rt_schedule();
|
|
|
|
}
|
|
|
|
|
|
|
|
/* we have work to do with. */
|
|
|
|
level = rt_hw_interrupt_disable();
|
|
|
|
work = rt_list_entry(queue->work_list.next, struct rt_work, list);
|
|
|
|
rt_list_remove(&(work->list));
|
|
|
|
queue->work_current = work;
|
2019-03-17 16:01:12 +08:00
|
|
|
work->flags &= ~RT_WORK_STATE_PENDING;
|
2017-02-27 00:58:11 +08:00
|
|
|
rt_hw_interrupt_enable(level);
|
|
|
|
|
|
|
|
/* do work */
|
|
|
|
work->work_func(work, work->work_data);
|
|
|
|
level = rt_hw_interrupt_disable();
|
|
|
|
/* clean current work */
|
|
|
|
queue->work_current = RT_NULL;
|
|
|
|
rt_hw_interrupt_enable(level);
|
2017-10-17 17:53:01 +08:00
|
|
|
|
|
|
|
/* ack work completion */
|
|
|
|
_workqueue_work_completion(queue);
|
2017-02-27 00:58:11 +08:00
|
|
|
}
|
2014-07-13 07:27:57 +08:00
|
|
|
}
|
|
|
|
|
2019-03-17 16:01:12 +08:00
|
|
|
static rt_err_t _workqueue_submit_work(struct rt_workqueue *queue, struct rt_work *work)
|
|
|
|
{
|
|
|
|
rt_base_t level;
|
|
|
|
|
|
|
|
level = rt_hw_interrupt_disable();
|
|
|
|
if (work->flags & RT_WORK_STATE_PENDING)
|
|
|
|
{
|
|
|
|
rt_hw_interrupt_enable(level);
|
|
|
|
return -RT_EBUSY;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (queue->work_current == work)
|
|
|
|
{
|
|
|
|
rt_hw_interrupt_enable(level);
|
|
|
|
return -RT_EBUSY;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* NOTE: the work MUST be initialized firstly */
|
|
|
|
rt_list_remove(&(work->list));
|
|
|
|
|
|
|
|
rt_list_insert_after(queue->work_list.prev, &(work->list));
|
|
|
|
work->flags |= RT_WORK_STATE_PENDING;
|
|
|
|
|
|
|
|
/* whether the workqueue is doing work */
|
|
|
|
if (queue->work_current == RT_NULL)
|
|
|
|
{
|
|
|
|
rt_hw_interrupt_enable(level);
|
|
|
|
/* resume work thread */
|
|
|
|
rt_thread_resume(queue->work_thread);
|
|
|
|
rt_schedule();
|
|
|
|
}
|
|
|
|
else
|
|
|
|
{
|
|
|
|
rt_hw_interrupt_enable(level);
|
|
|
|
}
|
|
|
|
|
|
|
|
return RT_EOK;
|
|
|
|
}
|
|
|
|
|
|
|
|
static rt_err_t _workqueue_cancel_work(struct rt_workqueue *queue, struct rt_work *work)
|
|
|
|
{
|
|
|
|
rt_base_t level;
|
|
|
|
|
|
|
|
level = rt_hw_interrupt_disable();
|
|
|
|
if (queue->work_current == work)
|
|
|
|
{
|
|
|
|
rt_hw_interrupt_enable(level);
|
|
|
|
return -RT_EBUSY;
|
|
|
|
}
|
|
|
|
rt_list_remove(&(work->list));
|
|
|
|
work->flags &= ~RT_WORK_STATE_PENDING;
|
|
|
|
rt_hw_interrupt_enable(level);
|
|
|
|
|
|
|
|
return RT_EOK;
|
|
|
|
}
|
|
|
|
|
|
|
|
static rt_err_t _workqueue_cancel_delayed_work(struct rt_delayed_work *work)
|
|
|
|
{
|
|
|
|
rt_base_t level;
|
|
|
|
int ret = RT_EOK;
|
|
|
|
|
|
|
|
if (!work->workqueue)
|
|
|
|
{
|
|
|
|
ret = -EINVAL;
|
|
|
|
goto __exit;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (work->work.flags & RT_WORK_STATE_PENDING)
|
|
|
|
{
|
|
|
|
/* Remove from the queue if already submitted */
|
|
|
|
ret = rt_workqueue_cancel_work(work->workqueue, &(work->work));
|
|
|
|
if (ret)
|
|
|
|
{
|
|
|
|
goto __exit;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
else
|
|
|
|
{
|
2019-04-22 11:41:17 +08:00
|
|
|
if (work->work.flags & RT_WORK_STATE_SUBMITTING)
|
|
|
|
{
|
|
|
|
level = rt_hw_interrupt_disable();
|
|
|
|
rt_timer_stop(&(work->timer));
|
|
|
|
rt_timer_detach(&(work->timer));
|
|
|
|
work->work.flags &= ~RT_WORK_STATE_SUBMITTING;
|
|
|
|
rt_hw_interrupt_enable(level);
|
|
|
|
}
|
2019-03-17 16:01:12 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
level = rt_hw_interrupt_disable();
|
|
|
|
/* Detach from workqueue */
|
|
|
|
work->workqueue = RT_NULL;
|
|
|
|
work->work.flags &= ~(RT_WORK_STATE_PENDING);
|
|
|
|
rt_hw_interrupt_enable(level);
|
|
|
|
|
|
|
|
__exit:
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
static rt_err_t _workqueue_submit_delayed_work(struct rt_workqueue *queue,
|
|
|
|
struct rt_delayed_work *work, rt_tick_t ticks)
|
|
|
|
{
|
|
|
|
rt_base_t level;
|
|
|
|
int ret = RT_EOK;
|
|
|
|
|
|
|
|
|
|
|
|
/* Work cannot be active in multiple queues */
|
|
|
|
if (work->workqueue && work->workqueue != queue)
|
|
|
|
{
|
|
|
|
ret = -RT_EINVAL;
|
|
|
|
goto __exit;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Cancel if work has been submitted */
|
|
|
|
if (work->workqueue == queue)
|
|
|
|
{
|
|
|
|
ret = _workqueue_cancel_delayed_work(work);
|
|
|
|
if (ret < 0)
|
|
|
|
{
|
|
|
|
goto __exit;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
level = rt_hw_interrupt_disable();
|
|
|
|
/* Attach workqueue so the timeout callback can submit it */
|
|
|
|
work->workqueue = queue;
|
|
|
|
rt_hw_interrupt_enable(level);
|
|
|
|
|
|
|
|
if (!ticks)
|
|
|
|
{
|
|
|
|
/* Submit work if no ticks is 0 */
|
|
|
|
_workqueue_submit_work(work->workqueue, &(work->work));
|
|
|
|
}
|
|
|
|
else
|
|
|
|
{
|
2019-04-22 11:41:17 +08:00
|
|
|
level = rt_hw_interrupt_disable();
|
2019-03-17 16:01:12 +08:00
|
|
|
/* Add timeout */
|
2019-04-22 11:41:17 +08:00
|
|
|
work->work.flags |= RT_WORK_STATE_SUBMITTING;
|
|
|
|
rt_timer_init(&(work->timer), "work", _delayed_work_timeout_handler, work, ticks,
|
|
|
|
RT_TIMER_FLAG_ONE_SHOT | RT_TIMER_FLAG_SOFT_TIMER);
|
|
|
|
rt_hw_interrupt_enable(level);
|
2019-03-17 16:01:12 +08:00
|
|
|
rt_timer_start(&(work->timer));
|
|
|
|
}
|
|
|
|
|
|
|
|
__exit:
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
static void _delayed_work_timeout_handler(void *parameter)
|
|
|
|
{
|
|
|
|
struct rt_delayed_work *delayed_work;
|
2019-04-22 11:41:17 +08:00
|
|
|
rt_base_t level;
|
2019-03-17 16:01:12 +08:00
|
|
|
|
|
|
|
delayed_work = (struct rt_delayed_work *)parameter;
|
2019-04-22 11:41:17 +08:00
|
|
|
level = rt_hw_interrupt_disable();
|
2019-03-17 16:01:12 +08:00
|
|
|
rt_timer_stop(&(delayed_work->timer));
|
2019-04-22 11:41:17 +08:00
|
|
|
rt_timer_detach(&(delayed_work->timer));
|
|
|
|
delayed_work->work.flags &= ~RT_WORK_STATE_SUBMITTING;
|
|
|
|
rt_hw_interrupt_enable(level);
|
2019-03-17 16:01:12 +08:00
|
|
|
_workqueue_submit_work(delayed_work->workqueue, &(delayed_work->work));
|
|
|
|
}
|
|
|
|
|
|
|
|
struct rt_workqueue *rt_workqueue_create(const char *name, rt_uint16_t stack_size, rt_uint8_t priority)
|
2014-07-13 07:27:57 +08:00
|
|
|
{
|
2017-02-27 00:58:11 +08:00
|
|
|
struct rt_workqueue *queue = RT_NULL;
|
2014-08-04 16:40:40 +08:00
|
|
|
|
2019-03-17 16:01:12 +08:00
|
|
|
queue = (struct rt_workqueue *)RT_KERNEL_MALLOC(sizeof(struct rt_workqueue));
|
2017-02-27 00:58:11 +08:00
|
|
|
if (queue != RT_NULL)
|
|
|
|
{
|
2014-07-13 07:27:57 +08:00
|
|
|
/* initialize work list */
|
|
|
|
rt_list_init(&(queue->work_list));
|
2017-02-27 00:58:11 +08:00
|
|
|
queue->work_current = RT_NULL;
|
2017-10-17 17:53:01 +08:00
|
|
|
rt_sem_init(&(queue->sem), "wqueue", 0, RT_IPC_FLAG_FIFO);
|
2017-02-27 00:58:11 +08:00
|
|
|
|
|
|
|
/* create the work thread */
|
|
|
|
queue->work_thread = rt_thread_create(name, _workqueue_thread_entry, queue, stack_size, priority, 10);
|
|
|
|
if (queue->work_thread == RT_NULL)
|
|
|
|
{
|
|
|
|
RT_KERNEL_FREE(queue);
|
|
|
|
return RT_NULL;
|
|
|
|
}
|
|
|
|
|
|
|
|
rt_thread_startup(queue->work_thread);
|
|
|
|
}
|
|
|
|
|
|
|
|
return queue;
|
2014-07-13 07:27:57 +08:00
|
|
|
}
|
|
|
|
|
2019-03-17 16:01:12 +08:00
|
|
|
rt_err_t rt_workqueue_destroy(struct rt_workqueue *queue)
|
2014-07-13 07:27:57 +08:00
|
|
|
{
|
2017-02-27 00:58:11 +08:00
|
|
|
RT_ASSERT(queue != RT_NULL);
|
2014-07-13 07:27:57 +08:00
|
|
|
|
2017-02-27 00:58:11 +08:00
|
|
|
rt_thread_delete(queue->work_thread);
|
|
|
|
RT_KERNEL_FREE(queue);
|
2014-07-13 07:27:57 +08:00
|
|
|
|
2017-02-27 00:58:11 +08:00
|
|
|
return RT_EOK;
|
2014-07-13 07:27:57 +08:00
|
|
|
}
|
|
|
|
|
2019-03-17 16:01:12 +08:00
|
|
|
rt_err_t rt_workqueue_dowork(struct rt_workqueue *queue, struct rt_work *work)
|
2014-07-13 07:27:57 +08:00
|
|
|
{
|
2017-02-27 00:58:11 +08:00
|
|
|
RT_ASSERT(queue != RT_NULL);
|
|
|
|
RT_ASSERT(work != RT_NULL);
|
|
|
|
|
2019-03-17 16:01:12 +08:00
|
|
|
return _workqueue_submit_work(queue, work);
|
|
|
|
}
|
2017-02-27 00:58:11 +08:00
|
|
|
|
2019-03-17 16:01:12 +08:00
|
|
|
rt_err_t rt_workqueue_submit_work(struct rt_workqueue *queue, struct rt_work *work, rt_tick_t time)
|
|
|
|
{
|
|
|
|
RT_ASSERT(queue != RT_NULL);
|
|
|
|
RT_ASSERT(work != RT_NULL);
|
2017-02-27 00:58:11 +08:00
|
|
|
|
2019-03-17 16:01:12 +08:00
|
|
|
if (work->type & RT_WORK_TYPE_DELAYED)
|
2017-02-27 00:58:11 +08:00
|
|
|
{
|
2019-03-17 16:01:12 +08:00
|
|
|
return _workqueue_submit_delayed_work(queue, (struct rt_delayed_work *)work, time);
|
|
|
|
}
|
|
|
|
else
|
|
|
|
{
|
|
|
|
return _workqueue_submit_work(queue, work);
|
2017-02-27 00:58:11 +08:00
|
|
|
}
|
2014-07-13 07:27:57 +08:00
|
|
|
}
|
|
|
|
|
2019-03-17 16:01:12 +08:00
|
|
|
rt_err_t rt_workqueue_critical_work(struct rt_workqueue *queue, struct rt_work *work)
|
2017-01-31 13:17:04 +08:00
|
|
|
{
|
2017-02-27 00:58:11 +08:00
|
|
|
rt_base_t level;
|
|
|
|
RT_ASSERT(queue != RT_NULL);
|
|
|
|
RT_ASSERT(work != RT_NULL);
|
|
|
|
|
|
|
|
level = rt_hw_interrupt_disable();
|
|
|
|
if (queue->work_current == work)
|
|
|
|
{
|
|
|
|
rt_hw_interrupt_enable(level);
|
|
|
|
return -RT_EBUSY;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* NOTE: the work MUST be initialized firstly */
|
|
|
|
rt_list_remove(&(work->list));
|
|
|
|
|
|
|
|
rt_list_insert_after(queue->work_list.prev, &(work->list));
|
|
|
|
if (queue->work_current == RT_NULL)
|
|
|
|
{
|
|
|
|
rt_hw_interrupt_enable(level);
|
|
|
|
/* resume work thread */
|
|
|
|
rt_thread_resume(queue->work_thread);
|
|
|
|
rt_schedule();
|
|
|
|
}
|
|
|
|
else rt_hw_interrupt_enable(level);
|
|
|
|
|
|
|
|
return RT_EOK;
|
2017-01-31 13:17:04 +08:00
|
|
|
}
|
|
|
|
|
2019-03-17 16:01:12 +08:00
|
|
|
rt_err_t rt_workqueue_cancel_work(struct rt_workqueue *queue, struct rt_work *work)
|
2014-07-13 07:27:57 +08:00
|
|
|
{
|
2017-02-27 00:58:11 +08:00
|
|
|
RT_ASSERT(queue != RT_NULL);
|
|
|
|
RT_ASSERT(work != RT_NULL);
|
2014-07-13 07:27:57 +08:00
|
|
|
|
2019-03-17 16:01:12 +08:00
|
|
|
if (work->type & RT_WORK_TYPE_DELAYED)
|
2017-02-27 00:58:11 +08:00
|
|
|
{
|
2019-03-17 16:01:12 +08:00
|
|
|
return _workqueue_cancel_delayed_work((struct rt_delayed_work *)work);
|
|
|
|
}
|
|
|
|
else
|
|
|
|
{
|
|
|
|
return _workqueue_cancel_work(queue, work);
|
2017-02-27 00:58:11 +08:00
|
|
|
}
|
2014-07-13 07:27:57 +08:00
|
|
|
}
|
|
|
|
|
2019-03-17 16:01:12 +08:00
|
|
|
rt_err_t rt_workqueue_cancel_work_sync(struct rt_workqueue *queue, struct rt_work *work)
|
2017-10-17 17:53:01 +08:00
|
|
|
{
|
|
|
|
rt_base_t level;
|
|
|
|
|
|
|
|
RT_ASSERT(queue != RT_NULL);
|
|
|
|
RT_ASSERT(work != RT_NULL);
|
|
|
|
|
|
|
|
level = rt_hw_interrupt_disable();
|
|
|
|
if (queue->work_current == work) /* it's current work in the queue */
|
|
|
|
{
|
|
|
|
/* wait for work completion */
|
|
|
|
rt_sem_take(&(queue->sem), RT_WAITING_FOREVER);
|
|
|
|
}
|
|
|
|
else
|
|
|
|
{
|
|
|
|
rt_list_remove(&(work->list));
|
|
|
|
}
|
2019-03-17 16:01:12 +08:00
|
|
|
work->flags &= ~RT_WORK_STATE_PENDING;
|
2017-10-17 17:53:01 +08:00
|
|
|
rt_hw_interrupt_enable(level);
|
|
|
|
|
|
|
|
return RT_EOK;
|
|
|
|
}
|
|
|
|
|
2019-03-17 16:01:12 +08:00
|
|
|
rt_err_t rt_workqueue_cancel_all_work(struct rt_workqueue *queue)
|
2017-01-31 13:17:04 +08:00
|
|
|
{
|
2017-02-27 00:58:11 +08:00
|
|
|
struct rt_list_node *node, *next;
|
|
|
|
RT_ASSERT(queue != RT_NULL);
|
|
|
|
|
|
|
|
rt_enter_critical();
|
|
|
|
for (node = queue->work_list.next; node != &(queue->work_list); node = next)
|
|
|
|
{
|
|
|
|
next = node->next;
|
|
|
|
rt_list_remove(node);
|
|
|
|
}
|
|
|
|
rt_exit_critical();
|
|
|
|
|
|
|
|
return RT_EOK;
|
2017-01-31 13:17:04 +08:00
|
|
|
}
|
|
|
|
|
2019-03-17 16:01:12 +08:00
|
|
|
void rt_delayed_work_init(struct rt_delayed_work *work, void (*work_func)(struct rt_work *work,
|
|
|
|
void *work_data), void *work_data)
|
|
|
|
{
|
|
|
|
rt_work_init(&(work->work), work_func, work_data);
|
|
|
|
work->work.type = RT_WORK_TYPE_DELAYED;
|
|
|
|
}
|
2014-07-13 07:27:57 +08:00
|
|
|
|
2019-03-17 16:01:12 +08:00
|
|
|
#ifdef RT_USING_SYSTEM_WORKQUEUE
|
|
|
|
static struct rt_workqueue *sys_workq;
|
|
|
|
|
|
|
|
rt_err_t rt_work_submit(struct rt_work *work, rt_tick_t time)
|
|
|
|
{
|
|
|
|
return rt_workqueue_submit_work(sys_workq, work, time);
|
|
|
|
}
|
|
|
|
|
|
|
|
rt_err_t rt_work_cancel(struct rt_work *work)
|
|
|
|
{
|
|
|
|
return rt_workqueue_cancel_work(sys_workq, work);
|
|
|
|
}
|
|
|
|
|
|
|
|
static int rt_work_sys_workqueue_init(void)
|
|
|
|
{
|
2019-04-03 17:50:16 +08:00
|
|
|
sys_workq = rt_workqueue_create("sys_work", RT_SYSTEM_WORKQUEUE_STACKSIZE,
|
2019-03-17 16:01:12 +08:00
|
|
|
RT_SYSTEM_WORKQUEUE_PRIORITY);
|
|
|
|
|
|
|
|
return RT_EOK;
|
|
|
|
}
|
|
|
|
|
|
|
|
INIT_DEVICE_EXPORT(rt_work_sys_workqueue_init);
|
|
|
|
#endif
|
|
|
|
#endif
|