2017-10-15 22:44:53 +08:00
|
|
|
/*
|
2021-03-08 18:19:04 +08:00
|
|
|
* Copyright (c) 2006-2021, RT-Thread Development Team
|
2017-10-15 22:44:53 +08:00
|
|
|
*
|
2018-10-29 11:06:58 +08:00
|
|
|
* SPDX-License-Identifier: Apache-2.0
|
2017-10-15 22:44:53 +08:00
|
|
|
*
|
|
|
|
* Change Logs:
|
2024-04-02 11:18:15 +08:00
|
|
|
* Date Author Notes
|
|
|
|
* 2016-12-28 Bernard first version
|
|
|
|
* 2018-03-09 Bernard Add protection for pt->triggered.
|
|
|
|
* 2023-12-04 Shell Fix return code and error verification
|
|
|
|
* 2023-12-14 Shell When poll goes to sleep before the waitqueue has added a
|
|
|
|
* record and finished enumerating all the fd's, it may be
|
|
|
|
* incorrectly woken up. This is basically because the poll
|
|
|
|
* mechanism wakeup algorithm does not correctly distinguish
|
|
|
|
* the current wait state.
|
|
|
|
* 2024-03-29 TroyMitchelle Add all function comments and comments to structure members
|
2017-10-15 22:44:53 +08:00
|
|
|
*/
|
2022-01-08 23:29:41 +08:00
|
|
|
|
2017-10-15 22:44:53 +08:00
|
|
|
#include <stdint.h>
|
2017-10-17 22:27:06 +08:00
|
|
|
#include <rthw.h>
|
|
|
|
#include <rtthread.h>
|
2017-10-15 22:44:53 +08:00
|
|
|
#include <dfs_file.h>
|
2021-12-08 04:47:00 +08:00
|
|
|
#include "poll.h"
|
2017-10-15 22:44:53 +08:00
|
|
|
|
2024-04-02 11:18:15 +08:00
|
|
|
|
|
|
|
enum rt_poll_status
|
|
|
|
{
|
|
|
|
RT_POLL_STAT_INIT, /**< Poll operation initialization status. */
|
|
|
|
RT_POLL_STAT_TRIG, /**< Poll operation triggered status. */
|
|
|
|
RT_POLL_STAT_WAITING /**< Poll operation waiting status. */
|
2024-03-28 23:42:56 +08:00
|
|
|
};
|
2017-10-15 22:44:53 +08:00
|
|
|
|
2024-04-02 11:18:15 +08:00
|
|
|
|
|
|
|
struct rt_poll_table
|
2017-10-15 22:44:53 +08:00
|
|
|
{
|
2024-04-02 11:18:15 +08:00
|
|
|
rt_pollreq_t req; /**< Poll request. */
|
|
|
|
enum rt_poll_status status; /**< Status of the poll operation. */
|
|
|
|
rt_thread_t polling_thread; /**< Polling thread associated with the table. */
|
|
|
|
struct rt_poll_node *nodes; /**< Linked list of poll nodes. */
|
2017-10-15 22:44:53 +08:00
|
|
|
};
|
|
|
|
|
2024-04-02 11:18:15 +08:00
|
|
|
|
|
|
|
struct rt_poll_node
|
2017-10-15 22:44:53 +08:00
|
|
|
{
|
2024-04-02 11:18:15 +08:00
|
|
|
struct rt_wqueue_node wqn; /**< Wait queue node for the poll node. */
|
|
|
|
struct rt_poll_table *pt; /**< Pointer to the parent poll table. */
|
|
|
|
struct rt_poll_node *next; /**< Pointer to the next poll node. */
|
2017-10-15 22:44:53 +08:00
|
|
|
};
|
|
|
|
|
2023-10-25 20:31:25 +08:00
|
|
|
static RT_DEFINE_SPINLOCK(_spinlock);
|
|
|
|
|
2024-04-02 11:18:15 +08:00
|
|
|
/**
|
|
|
|
* @brief Wake-up function for the wait queue.
|
|
|
|
*
|
|
|
|
* This function is invoked when a node in the wait queue needs to be woken up.
|
|
|
|
*
|
|
|
|
* @param wait Pointer to the wait queue node.
|
|
|
|
* @param key Key associated with the wake-up operation.
|
|
|
|
* @return Upon successful wake-up, returns 0; otherwise, -1 is returned.
|
|
|
|
*/
|
2017-10-15 22:44:53 +08:00
|
|
|
static int __wqueue_pollwake(struct rt_wqueue_node *wait, void *key)
|
|
|
|
{
|
2024-03-28 23:42:56 +08:00
|
|
|
rt_ubase_t level;
|
2017-10-15 22:44:53 +08:00
|
|
|
struct rt_poll_node *pn;
|
2024-03-28 23:42:56 +08:00
|
|
|
int is_waiting;
|
2017-10-15 22:44:53 +08:00
|
|
|
|
2018-12-13 14:54:26 +08:00
|
|
|
if (key && !((rt_ubase_t)key & wait->key))
|
2017-10-15 22:44:53 +08:00
|
|
|
return -1;
|
|
|
|
|
|
|
|
pn = rt_container_of(wait, struct rt_poll_node, wqn);
|
|
|
|
|
2024-03-28 23:42:56 +08:00
|
|
|
level = rt_spin_lock_irqsave(&_spinlock);
|
|
|
|
is_waiting = (pn->pt->status == RT_POLL_STAT_WAITING);
|
|
|
|
|
|
|
|
pn->pt->status = RT_POLL_STAT_TRIG;
|
|
|
|
rt_spin_unlock_irqrestore(&_spinlock, level);
|
|
|
|
|
|
|
|
if (is_waiting)
|
|
|
|
return __wqueue_default_wake(wait, key);
|
|
|
|
|
|
|
|
return -1;
|
2017-10-15 22:44:53 +08:00
|
|
|
}
|
|
|
|
|
2024-04-02 11:18:15 +08:00
|
|
|
/**
|
|
|
|
* @brief Adds a poll request to the wait queue.
|
|
|
|
*
|
|
|
|
* This function adds a poll request to the wait queue associated with the specified
|
|
|
|
* wait queue and poll request.
|
|
|
|
*
|
|
|
|
* @param wq Pointer to the wait queue.
|
|
|
|
* @param req Pointer to the poll request.
|
|
|
|
*/
|
2017-10-15 22:44:53 +08:00
|
|
|
static void _poll_add(rt_wqueue_t *wq, rt_pollreq_t *req)
|
|
|
|
{
|
|
|
|
struct rt_poll_table *pt;
|
|
|
|
struct rt_poll_node *node;
|
|
|
|
|
2019-06-18 20:09:19 +08:00
|
|
|
node = (struct rt_poll_node *)rt_malloc(sizeof(struct rt_poll_node));
|
2017-10-15 22:44:53 +08:00
|
|
|
if (node == RT_NULL)
|
|
|
|
return;
|
|
|
|
|
|
|
|
pt = rt_container_of(req, struct rt_poll_table, req);
|
|
|
|
|
|
|
|
node->wqn.key = req->_key;
|
|
|
|
rt_list_init(&(node->wqn.list));
|
|
|
|
node->wqn.polling_thread = pt->polling_thread;
|
|
|
|
node->wqn.wakeup = __wqueue_pollwake;
|
|
|
|
node->next = pt->nodes;
|
|
|
|
node->pt = pt;
|
|
|
|
pt->nodes = node;
|
|
|
|
rt_wqueue_add(wq, &node->wqn);
|
|
|
|
}
|
|
|
|
|
2024-04-02 11:18:15 +08:00
|
|
|
/**
|
|
|
|
* @brief Initializes a poll table.
|
|
|
|
*
|
|
|
|
* This function initializes a poll table with the provided poll request, status,
|
|
|
|
* and polling thread.
|
|
|
|
*
|
|
|
|
* @param pt Pointer to the poll table to be initialized.
|
|
|
|
*/
|
2017-10-15 22:44:53 +08:00
|
|
|
static void poll_table_init(struct rt_poll_table *pt)
|
|
|
|
{
|
|
|
|
pt->req._proc = _poll_add;
|
2024-03-28 23:42:56 +08:00
|
|
|
pt->status = RT_POLL_STAT_INIT;
|
2017-10-15 22:44:53 +08:00
|
|
|
pt->nodes = RT_NULL;
|
|
|
|
pt->polling_thread = rt_thread_self();
|
|
|
|
}
|
|
|
|
|
2024-04-02 11:18:15 +08:00
|
|
|
/**
|
|
|
|
* @brief Waits for events on the poll table with a specified timeout.
|
|
|
|
*
|
|
|
|
* This function waits for events on the poll table with the specified timeout
|
|
|
|
* in milliseconds.
|
|
|
|
*
|
|
|
|
* @param pt Pointer to the poll table.
|
|
|
|
* @param msec Timeout value in milliseconds.
|
|
|
|
* @return Upon successful completion, returns 0. If the timeout expires, -RT_ETIMEOUT
|
|
|
|
* is returned. If the operation is interrupted by a signal, -RT_EINTR is
|
|
|
|
* returned.
|
|
|
|
*/
|
2017-10-15 22:44:53 +08:00
|
|
|
static int poll_wait_timeout(struct rt_poll_table *pt, int msec)
|
|
|
|
{
|
|
|
|
rt_int32_t timeout;
|
|
|
|
int ret = 0;
|
|
|
|
struct rt_thread *thread;
|
|
|
|
rt_base_t level;
|
|
|
|
|
|
|
|
thread = pt->polling_thread;
|
|
|
|
|
|
|
|
timeout = rt_tick_from_millisecond(msec);
|
|
|
|
|
2023-10-25 20:31:25 +08:00
|
|
|
level = rt_spin_lock_irqsave(&_spinlock);
|
2017-10-15 22:44:53 +08:00
|
|
|
|
2024-03-28 23:42:56 +08:00
|
|
|
if (timeout != 0 && pt->status != RT_POLL_STAT_TRIG)
|
2017-10-15 22:44:53 +08:00
|
|
|
{
|
2022-12-03 12:07:44 +08:00
|
|
|
if (rt_thread_suspend_with_flag(thread, RT_INTERRUPTIBLE) == RT_EOK)
|
2017-10-15 22:44:53 +08:00
|
|
|
{
|
2022-12-03 12:07:44 +08:00
|
|
|
if (timeout > 0)
|
|
|
|
{
|
|
|
|
rt_timer_control(&(thread->thread_timer),
|
|
|
|
RT_TIMER_CTRL_SET_TIME,
|
|
|
|
&timeout);
|
|
|
|
rt_timer_start(&(thread->thread_timer));
|
2024-03-28 23:42:56 +08:00
|
|
|
rt_set_errno(RT_ETIMEOUT);
|
2022-12-03 12:07:44 +08:00
|
|
|
}
|
2024-03-28 23:42:56 +08:00
|
|
|
else
|
|
|
|
{
|
|
|
|
rt_set_errno(0);
|
|
|
|
}
|
|
|
|
pt->status = RT_POLL_STAT_WAITING;
|
2023-10-25 20:31:25 +08:00
|
|
|
rt_spin_unlock_irqrestore(&_spinlock, level);
|
2017-10-15 22:44:53 +08:00
|
|
|
|
2022-12-03 12:07:44 +08:00
|
|
|
rt_schedule();
|
2018-03-09 08:35:12 +08:00
|
|
|
|
2023-10-25 20:31:25 +08:00
|
|
|
level = rt_spin_lock_irqsave(&_spinlock);
|
2024-03-28 23:42:56 +08:00
|
|
|
if (pt->status == RT_POLL_STAT_WAITING)
|
|
|
|
pt->status = RT_POLL_STAT_INIT;
|
2022-12-03 12:07:44 +08:00
|
|
|
}
|
2017-10-15 22:44:53 +08:00
|
|
|
}
|
|
|
|
|
2024-03-28 23:42:56 +08:00
|
|
|
ret = rt_get_errno();
|
|
|
|
if (ret == RT_EINTR)
|
|
|
|
ret = -RT_EINTR;
|
|
|
|
else if (pt->status == RT_POLL_STAT_TRIG)
|
|
|
|
ret = RT_EOK;
|
|
|
|
else
|
|
|
|
ret = -RT_ETIMEOUT;
|
|
|
|
|
2023-10-25 20:31:25 +08:00
|
|
|
rt_spin_unlock_irqrestore(&_spinlock, level);
|
2017-10-15 22:44:53 +08:00
|
|
|
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
2024-04-02 11:18:15 +08:00
|
|
|
/**
|
|
|
|
* @brief Performs poll operation for a single file descriptor.
|
|
|
|
*
|
|
|
|
* This function performs a poll operation for a single file descriptor and updates
|
|
|
|
* the revents field of the pollfd structure accordingly.
|
|
|
|
*
|
|
|
|
* @param pollfd Pointer to the pollfd structure.
|
|
|
|
* @param req Pointer to the poll request.
|
|
|
|
* @return Upon successful completion, returns the bitmask of events that occurred.
|
|
|
|
* If an error occurs, -1 is returned.
|
|
|
|
*/
|
2017-10-15 22:44:53 +08:00
|
|
|
static int do_pollfd(struct pollfd *pollfd, rt_pollreq_t *req)
|
|
|
|
{
|
|
|
|
int mask = 0;
|
|
|
|
int fd;
|
|
|
|
|
|
|
|
fd = pollfd->fd;
|
|
|
|
|
|
|
|
if (fd >= 0)
|
|
|
|
{
|
2023-04-08 22:25:51 +08:00
|
|
|
struct dfs_file *f = fd_get(fd);
|
2017-10-15 22:44:53 +08:00
|
|
|
mask = POLLNVAL;
|
|
|
|
|
|
|
|
if (f)
|
|
|
|
{
|
|
|
|
mask = POLLMASK_DEFAULT;
|
2022-12-03 12:07:44 +08:00
|
|
|
if (f->vnode->fops->poll)
|
2017-10-15 22:44:53 +08:00
|
|
|
{
|
2019-04-03 18:09:52 +08:00
|
|
|
req->_key = pollfd->events | POLLERR | POLLHUP;
|
2017-10-15 22:44:53 +08:00
|
|
|
|
2022-12-03 12:07:44 +08:00
|
|
|
mask = f->vnode->fops->poll(f, req);
|
2020-05-17 15:51:35 +08:00
|
|
|
|
2020-05-21 22:05:38 +08:00
|
|
|
/* dealwith the device return error -1*/
|
2020-05-17 15:51:35 +08:00
|
|
|
if (mask < 0)
|
2021-03-08 18:19:04 +08:00
|
|
|
{
|
2020-05-17 15:51:35 +08:00
|
|
|
pollfd->revents = 0;
|
|
|
|
return mask;
|
|
|
|
}
|
2017-10-15 22:44:53 +08:00
|
|
|
}
|
|
|
|
/* Mask out unneeded events. */
|
|
|
|
mask &= pollfd->events | POLLERR | POLLHUP;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
pollfd->revents = mask;
|
|
|
|
|
|
|
|
return mask;
|
|
|
|
}
|
|
|
|
|
2024-04-02 11:18:15 +08:00
|
|
|
/**
|
|
|
|
* @brief Performs the poll operation on an array of file descriptors.
|
|
|
|
*
|
|
|
|
* This function performs the poll operation on an array of file descriptors and
|
|
|
|
* waits for events with the specified timeout.
|
|
|
|
*
|
|
|
|
* @param fds Pointer to the array of pollfd structures.
|
|
|
|
* @param nfds Number of file descriptors in the array.
|
|
|
|
* @param pt Pointer to the poll table.
|
|
|
|
* @param msec Timeout value in milliseconds.
|
|
|
|
* @return Upon successful completion, returns the number of file descriptors
|
|
|
|
* for which events were received. If the timeout expires, -RT_ETIMEOUT
|
|
|
|
* is returned. If the operation is interrupted by a signal, -RT_EINTR is
|
|
|
|
* returned.
|
|
|
|
*/
|
2017-10-15 22:44:53 +08:00
|
|
|
static int poll_do(struct pollfd *fds, nfds_t nfds, struct rt_poll_table *pt, int msec)
|
|
|
|
{
|
|
|
|
int num;
|
|
|
|
int istimeout = 0;
|
2022-06-08 01:21:50 +08:00
|
|
|
nfds_t n;
|
2017-10-15 22:44:53 +08:00
|
|
|
struct pollfd *pf;
|
2020-05-17 15:51:35 +08:00
|
|
|
int ret = 0;
|
2017-10-15 22:44:53 +08:00
|
|
|
|
|
|
|
if (msec == 0)
|
|
|
|
{
|
|
|
|
pt->req._proc = RT_NULL;
|
|
|
|
istimeout = 1;
|
|
|
|
}
|
|
|
|
|
|
|
|
while (1)
|
|
|
|
{
|
|
|
|
pf = fds;
|
|
|
|
num = 0;
|
2024-03-28 23:42:56 +08:00
|
|
|
pt->status = RT_POLL_STAT_INIT;
|
2017-10-15 22:44:53 +08:00
|
|
|
|
|
|
|
for (n = 0; n < nfds; n ++)
|
|
|
|
{
|
2020-05-17 15:51:35 +08:00
|
|
|
ret = do_pollfd(pf, &pt->req);
|
|
|
|
if(ret < 0)
|
|
|
|
{
|
2020-05-21 22:05:38 +08:00
|
|
|
/*dealwith the device return error -1 */
|
2020-05-17 15:51:35 +08:00
|
|
|
pt->req._proc = RT_NULL;
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
else if(ret > 0)
|
2017-10-15 22:44:53 +08:00
|
|
|
{
|
|
|
|
num ++;
|
|
|
|
pt->req._proc = RT_NULL;
|
|
|
|
}
|
|
|
|
pf ++;
|
|
|
|
}
|
|
|
|
|
|
|
|
pt->req._proc = RT_NULL;
|
|
|
|
|
|
|
|
if (num || istimeout)
|
|
|
|
break;
|
|
|
|
|
2024-03-28 23:42:56 +08:00
|
|
|
ret = poll_wait_timeout(pt, msec);
|
|
|
|
if (ret == -RT_EINTR)
|
|
|
|
return -EINTR;
|
|
|
|
else if (ret == -RT_ETIMEOUT)
|
2017-10-15 22:44:53 +08:00
|
|
|
istimeout = 1;
|
2024-03-28 23:42:56 +08:00
|
|
|
else
|
|
|
|
istimeout = 0;
|
2017-10-15 22:44:53 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
return num;
|
|
|
|
}
|
|
|
|
|
2024-04-02 11:18:15 +08:00
|
|
|
/**
|
|
|
|
* @brief Tears down the poll table.
|
|
|
|
*
|
|
|
|
* This function tears down the poll table by removing all poll nodes associated
|
|
|
|
* with it.
|
|
|
|
*
|
|
|
|
* @param pt Pointer to the poll table.
|
|
|
|
*/
|
2017-10-15 22:44:53 +08:00
|
|
|
static void poll_teardown(struct rt_poll_table *pt)
|
|
|
|
{
|
|
|
|
struct rt_poll_node *node, *next;
|
|
|
|
|
|
|
|
next = pt->nodes;
|
|
|
|
while (next)
|
|
|
|
{
|
|
|
|
node = next;
|
|
|
|
rt_wqueue_remove(&node->wqn);
|
|
|
|
next = node->next;
|
|
|
|
rt_free(node);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2024-04-02 11:18:15 +08:00
|
|
|
/**
|
|
|
|
* @brief Performs the poll operation on a set of file descriptors.
|
|
|
|
*
|
|
|
|
* This function performs the poll operation on a set of file descriptors and
|
|
|
|
* waits for events with the specified timeout.
|
|
|
|
*
|
|
|
|
* @param fds Pointer to the array of pollfd structures.
|
|
|
|
* @param nfds Number of file descriptors in the array.
|
|
|
|
* @param timeout Timeout value in milliseconds.
|
|
|
|
* @return Upon successful completion, returns the number of file descriptors
|
|
|
|
* for which events were received. If the timeout expires, 0 is returned.
|
|
|
|
* If an error occurs, -1 is returned.
|
|
|
|
*/
|
2017-10-15 22:44:53 +08:00
|
|
|
int poll(struct pollfd *fds, nfds_t nfds, int timeout)
|
|
|
|
{
|
|
|
|
int num;
|
|
|
|
struct rt_poll_table table;
|
|
|
|
|
|
|
|
poll_table_init(&table);
|
|
|
|
|
|
|
|
num = poll_do(fds, nfds, &table, timeout);
|
2017-10-25 18:42:06 +08:00
|
|
|
|
2017-10-15 22:44:53 +08:00
|
|
|
poll_teardown(&table);
|
|
|
|
|
|
|
|
return num;
|
|
|
|
}
|