2023-08-01 17:15:41 +08:00
/*
* Copyright ( c ) 2006 - 2023 , RT - Thread Development Team
*
* SPDX - License - Identifier : Apache - 2.0
*
* Change Logs :
* Date Author Notes
* 2023 - 07 - 29 zmq810150896 first version
*/
# include <rtthread.h>
# include <fcntl.h>
# include <stdint.h>
# include <unistd.h>
# include <dfs_file.h>
# include "sys/epoll.h"
# include "poll.h"
# include <lwp_signal.h>
# define EPOLL_MUTEX_NAME "EVENTEPOLL"
# define EFD_SHARED_EPOLL_TYPE (EPOLL_CTL_ADD | EPOLL_CTL_DEL | EPOLL_CTL_MOD)
# define EPOLLINOUT_BITS (EPOLLIN | EPOLLOUT | EPOLLRDNORM | EPOLLWRNORM)
# define EPOLLEXCLUSIVE_BITS (EPOLLINOUT_BITS | EPOLLERR | EPOLLHUP | \
EPOLLET | EPOLLEXCLUSIVE )
struct rt_eventpoll ;
/* Monitor queue */
struct rt_fd_list
{
rt_uint32_t revents ; /* Monitored events */
struct epoll_event epev ;
rt_pollreq_t req ;
struct rt_eventpoll * ep ;
struct rt_wqueue_node wqn ;
2023-11-28 13:56:06 +08:00
int exclusive ; /* If triggered horizontally, a check is made to see if the data has been read, and if there is any data left to read, the readability event is returned in the next epoll_wait */
2023-09-06 11:22:16 +08:00
int fd ;
2023-08-01 17:15:41 +08:00
struct rt_fd_list * next ;
2023-11-28 13:56:06 +08:00
rt_slist_t rdl_node ;
2023-08-01 17:15:41 +08:00
} ;
struct rt_eventpoll
{
rt_uint32_t tirggered ; /* the waited thread whether triggered */
rt_wqueue_t epoll_read ;
rt_thread_t polling_thread ;
struct rt_mutex lock ;
struct rt_fd_list * fdlist ; /* Monitor list */
int eventpoll_num ; /* Number of ready lists */
rt_pollreq_t req ;
2023-11-28 13:56:06 +08:00
struct rt_spinlock spinlock ;
rt_slist_t rdl_head ;
2023-08-01 17:15:41 +08:00
} ;
static int epoll_close ( struct dfs_file * file ) ;
static int epoll_poll ( struct dfs_file * file , struct rt_pollreq * req ) ;
static int epoll_get_event ( struct rt_fd_list * fl , rt_pollreq_t * req ) ;
static int epoll_do_ctl ( int epfd , int op , int fd , struct epoll_event * event ) ;
static const struct dfs_file_ops epoll_fops =
{
. close = epoll_close ,
. poll = epoll_poll ,
} ;
static int epoll_close_fdlist ( struct rt_fd_list * fdlist )
{
struct rt_fd_list * fre_node , * list ;
if ( fdlist ! = RT_NULL )
{
list = fdlist ;
while ( list - > next ! = RT_NULL )
{
fre_node = list - > next ;
2023-11-28 13:56:06 +08:00
if ( fre_node - > wqn . wqueue )
rt_wqueue_remove ( & fre_node - > wqn ) ;
2023-08-01 17:15:41 +08:00
list - > next = fre_node - > next ;
rt_free ( fre_node ) ;
}
2023-11-28 13:56:06 +08:00
rt_free ( fdlist ) ;
2023-08-01 17:15:41 +08:00
}
return 0 ;
}
static int epoll_close ( struct dfs_file * file )
{
struct rt_eventpoll * ep ;
2023-09-20 07:54:11 +08:00
if ( file - > vnode - > ref_count ! = 1 )
return 0 ;
2023-08-01 17:15:41 +08:00
if ( file - > vnode )
{
if ( file - > vnode - > data )
{
ep = file - > vnode - > data ;
if ( ep )
{
rt_mutex_take ( & ep - > lock , RT_WAITING_FOREVER ) ;
if ( ep - > fdlist )
{
epoll_close_fdlist ( ep - > fdlist ) ;
}
rt_mutex_release ( & ep - > lock ) ;
rt_mutex_detach ( & ep - > lock ) ;
rt_free ( ep ) ;
}
}
}
return 0 ;
}
static int epoll_poll ( struct dfs_file * file , struct rt_pollreq * req )
{
struct rt_eventpoll * ep ;
int events = 0 ;
if ( file - > vnode - > data )
{
ep = file - > vnode - > data ;
ep - > req . _key = req - > _key ;
2023-11-28 13:56:06 +08:00
rt_mutex_take ( & ep - > lock , RT_WAITING_FOREVER ) ;
2023-08-01 17:15:41 +08:00
rt_poll_add ( & ep - > epoll_read , req ) ;
2023-11-28 13:56:06 +08:00
rt_spin_lock ( & ep - > spinlock ) ;
2023-08-01 17:15:41 +08:00
2023-11-28 13:56:06 +08:00
if ( ! rt_slist_isempty ( & ep - > rdl_head ) )
events | = POLLIN | EPOLLRDNORM | POLLOUT ;
2023-08-01 17:15:41 +08:00
2023-11-28 13:56:06 +08:00
rt_spin_unlock ( & ep - > spinlock ) ;
rt_mutex_release ( & ep - > lock ) ;
2023-08-01 17:15:41 +08:00
}
2023-11-28 13:56:06 +08:00
return events ;
2023-08-01 17:15:41 +08:00
}
static int epoll_wqueue_callback ( struct rt_wqueue_node * wait , void * key )
{
struct rt_fd_list * fdlist ;
2023-11-28 13:56:06 +08:00
struct rt_eventpoll * ep ;
2023-08-01 17:15:41 +08:00
if ( key & & ! ( ( rt_ubase_t ) key & wait - > key ) )
return - 1 ;
fdlist = rt_container_of ( wait , struct rt_fd_list , wqn ) ;
2023-11-28 13:56:06 +08:00
ep = fdlist - > ep ;
if ( ep )
2023-08-01 17:15:41 +08:00
{
2023-11-28 13:56:06 +08:00
rt_spin_lock ( & ep - > spinlock ) ;
rt_slist_append ( & ep - > rdl_head , & fdlist - > rdl_node ) ;
fdlist - > exclusive = 0 ;
ep - > tirggered = 1 ;
ep - > eventpoll_num + + ;
rt_wqueue_wakeup ( & ep - > epoll_read , ( void * ) POLLIN ) ;
rt_spin_unlock ( & ep - > spinlock ) ;
2023-08-01 17:15:41 +08:00
}
return __wqueue_default_wake ( wait , key ) ;
}
static void epoll_wqueue_add_callback ( rt_wqueue_t * wq , rt_pollreq_t * req )
{
struct rt_fd_list * fdlist ;
struct rt_eventpoll * ep ;
fdlist = rt_container_of ( req , struct rt_fd_list , req ) ;
ep = fdlist - > ep ;
fdlist - > wqn . key = req - > _key ;
rt_list_init ( & ( fdlist - > wqn . list ) ) ;
fdlist - > wqn . polling_thread = ep - > polling_thread ;
fdlist - > wqn . wakeup = epoll_wqueue_callback ;
rt_wqueue_add ( wq , & fdlist - > wqn ) ;
}
static void epoll_ctl_install ( struct rt_fd_list * fdlist , struct rt_eventpoll * ep )
{
rt_uint32_t mask = 0 ;
2023-08-03 21:45:30 +08:00
fdlist - > req . _key = fdlist - > revents ;
2023-08-01 17:15:41 +08:00
mask = epoll_get_event ( fdlist , & fdlist - > req ) ;
2023-09-06 11:22:16 +08:00
2023-08-03 21:45:30 +08:00
if ( mask & fdlist - > revents )
2023-08-01 17:15:41 +08:00
{
2023-11-28 13:56:06 +08:00
if ( ep )
{
rt_mutex_take ( & ep - > lock , RT_WAITING_FOREVER ) ;
rt_spin_lock ( & ep - > spinlock ) ;
rt_slist_append ( & ep - > rdl_head , & fdlist - > rdl_node ) ;
fdlist - > exclusive = 0 ;
ep - > tirggered = 1 ;
ep - > eventpoll_num + + ;
rt_spin_unlock ( & ep - > spinlock ) ;
rt_mutex_release ( & ep - > lock ) ;
}
2023-08-01 17:15:41 +08:00
}
}
static void epoll_member_init ( struct rt_eventpoll * ep )
{
ep - > tirggered = 0 ;
ep - > eventpoll_num = 0 ;
ep - > polling_thread = rt_thread_self ( ) ;
ep - > fdlist = RT_NULL ;
ep - > req . _key = 0 ;
2023-11-28 13:56:06 +08:00
rt_slist_init ( & ( ep - > rdl_head ) ) ;
2023-08-01 17:15:41 +08:00
rt_wqueue_init ( & ep - > epoll_read ) ;
2023-11-28 13:56:06 +08:00
rt_mutex_init ( & ep - > lock , EPOLL_MUTEX_NAME , RT_IPC_FLAG_FIFO ) ;
rt_spin_lock_init ( & ep - > spinlock ) ;
2023-08-01 17:15:41 +08:00
}
static int epoll_epf_init ( int fd )
{
struct dfs_file * df ;
struct rt_eventpoll * ep ;
rt_err_t ret = 0 ;
df = fd_get ( fd ) ;
if ( df )
{
ep = ( struct rt_eventpoll * ) rt_malloc ( sizeof ( struct rt_eventpoll ) ) ;
if ( ep )
{
epoll_member_init ( ep ) ;
2023-08-03 21:45:30 +08:00
# ifdef RT_USING_DFS_V2
df - > fops = & epoll_fops ;
# endif
2023-08-01 17:15:41 +08:00
df - > vnode = ( struct dfs_vnode * ) rt_malloc ( sizeof ( struct dfs_vnode ) ) ;
if ( df - > vnode )
{
ep - > fdlist = ( struct rt_fd_list * ) rt_malloc ( sizeof ( struct rt_fd_list ) ) ;
if ( ep - > fdlist )
{
ep - > fdlist - > next = RT_NULL ;
2023-09-06 11:22:16 +08:00
ep - > fdlist - > fd = fd ;
2023-08-01 17:15:41 +08:00
ep - > fdlist - > ep = ep ;
2023-11-28 13:56:06 +08:00
ep - > fdlist - > exclusive = 0 ;
2023-08-01 17:15:41 +08:00
dfs_vnode_init ( df - > vnode , FT_REGULAR , & epoll_fops ) ;
df - > vnode - > data = ep ;
2023-11-28 13:56:06 +08:00
rt_slist_init ( & ep - > fdlist - > rdl_node ) ;
2023-08-01 17:15:41 +08:00
}
else
{
ret = - ENOMEM ;
rt_free ( df - > vnode ) ;
rt_free ( ep ) ;
}
}
else
{
ret = - ENOMEM ;
rt_free ( ep ) ;
}
}
else
{
ret = - ENOMEM ;
}
}
return ret ;
}
static int epoll_do_create ( int size )
{
2023-08-03 21:45:30 +08:00
rt_err_t ret = - 1 ;
2023-08-01 17:15:41 +08:00
int status ;
int fd ;
if ( size < 0 )
{
2023-08-03 21:45:30 +08:00
rt_set_errno ( EINVAL ) ;
2023-08-01 17:15:41 +08:00
}
else
{
fd = fd_new ( ) ;
if ( fd > = 0 )
{
ret = fd ;
status = epoll_epf_init ( fd ) ;
if ( status < 0 )
{
fd_release ( fd ) ;
2023-08-03 21:45:30 +08:00
rt_set_errno ( - status ) ;
2023-08-01 17:15:41 +08:00
}
}
else
{
2023-08-03 21:45:30 +08:00
rt_set_errno ( - fd ) ;
2023-08-01 17:15:41 +08:00
}
}
return ret ;
}
2023-09-06 11:22:16 +08:00
static int epoll_ctl_add ( struct dfs_file * df , int fd , struct epoll_event * event )
2023-08-01 17:15:41 +08:00
{
struct rt_fd_list * fdlist ;
struct rt_eventpoll * ep ;
rt_err_t ret = - EINVAL ;
if ( df - > vnode - > data )
{
ep = df - > vnode - > data ;
fdlist = ep - > fdlist ;
ret = 0 ;
2023-11-28 13:56:06 +08:00
rt_mutex_take ( & ep - > lock , RT_WAITING_FOREVER ) ;
2023-08-01 17:15:41 +08:00
while ( fdlist - > next ! = RT_NULL )
{
2023-09-06 11:22:16 +08:00
if ( fdlist - > next - > fd = = fd )
2023-08-01 17:15:41 +08:00
{
2023-11-28 13:56:06 +08:00
rt_mutex_release ( & ep - > lock ) ;
2023-08-01 17:15:41 +08:00
return 0 ;
}
fdlist = fdlist - > next ;
}
2023-11-28 13:56:06 +08:00
rt_mutex_release ( & ep - > lock ) ;
2023-08-01 17:15:41 +08:00
fdlist = ( struct rt_fd_list * ) rt_malloc ( sizeof ( struct rt_fd_list ) ) ;
if ( fdlist )
{
2023-09-06 11:22:16 +08:00
fdlist - > fd = fd ;
memcpy ( & fdlist - > epev . data , & event - > data , sizeof ( event - > data ) ) ;
2023-11-28 13:56:06 +08:00
fdlist - > epev . events = 0 ;
2023-08-01 17:15:41 +08:00
fdlist - > ep = ep ;
2023-11-28 13:56:06 +08:00
fdlist - > exclusive = 0 ;
2023-08-01 17:15:41 +08:00
fdlist - > req . _proc = epoll_wqueue_add_callback ;
fdlist - > revents = event - > events ;
2023-11-28 13:56:06 +08:00
rt_mutex_take ( & ep - > lock , RT_WAITING_FOREVER ) ;
fdlist - > next = ep - > fdlist - > next ;
2023-08-01 17:15:41 +08:00
ep - > fdlist - > next = fdlist ;
2023-11-28 13:56:06 +08:00
rt_mutex_release ( & ep - > lock ) ;
rt_slist_init ( & fdlist - > rdl_node ) ;
2023-08-01 17:15:41 +08:00
epoll_ctl_install ( fdlist , ep ) ;
}
else
{
ret = - ENOMEM ;
}
}
return ret ;
}
2023-09-06 11:22:16 +08:00
static int epoll_ctl_del ( struct dfs_file * df , int fd )
2023-08-01 17:15:41 +08:00
{
2023-11-28 13:56:06 +08:00
struct rt_fd_list * fdlist , * fre_fd , * rdlist ;
2023-08-01 17:15:41 +08:00
struct rt_eventpoll * ep = RT_NULL ;
2023-11-28 13:56:06 +08:00
rt_slist_t * node = RT_NULL ;
2023-08-01 17:15:41 +08:00
rt_err_t ret = - EINVAL ;
if ( df - > vnode - > data )
{
ep = df - > vnode - > data ;
2023-11-28 13:56:06 +08:00
if ( ep )
2023-08-01 17:15:41 +08:00
{
2023-11-28 13:56:06 +08:00
rt_mutex_take ( & ep - > lock , RT_WAITING_FOREVER ) ;
rt_spin_lock ( & ep - > spinlock ) ;
rt_slist_for_each ( node , & ep - > rdl_head )
2023-08-01 17:15:41 +08:00
{
2023-11-28 13:56:06 +08:00
rdlist = rt_slist_entry ( node , struct rt_fd_list , rdl_node ) ;
if ( rdlist - > fd = = fd )
rt_slist_remove ( & ep - > rdl_head , node ) ;
2023-08-01 17:15:41 +08:00
}
2023-11-28 13:56:06 +08:00
rt_spin_unlock ( & ep - > spinlock ) ;
2023-08-01 17:15:41 +08:00
2023-11-28 13:56:06 +08:00
fdlist = ep - > fdlist ;
while ( fdlist - > next ! = RT_NULL )
2023-08-01 17:15:41 +08:00
{
2023-11-28 13:56:06 +08:00
if ( fdlist - > next - > fd = = fd )
2023-08-01 17:15:41 +08:00
{
2023-11-28 13:56:06 +08:00
fre_fd = fdlist - > next ;
fdlist - > next = fdlist - > next - > next ;
if ( fre_fd - > wqn . wqueue )
rt_wqueue_remove ( & fre_fd - > wqn ) ;
rt_free ( fre_fd ) ;
2023-08-01 17:15:41 +08:00
break ;
}
else
{
2023-11-28 13:56:06 +08:00
fdlist = fdlist - > next ;
2023-08-01 17:15:41 +08:00
}
}
2023-11-28 13:56:06 +08:00
rt_mutex_release ( & ep - > lock ) ;
2023-08-01 17:15:41 +08:00
}
ret = 0 ;
}
return ret ;
}
2023-09-06 11:22:16 +08:00
static int epoll_ctl_mod ( struct dfs_file * df , int fd , struct epoll_event * event )
2023-08-01 17:15:41 +08:00
{
struct rt_fd_list * fdlist ;
struct rt_eventpoll * ep = RT_NULL ;
rt_err_t ret = - EINVAL ;
if ( df - > vnode - > data )
{
ep = df - > vnode - > data ;
fdlist = ep - > fdlist ;
while ( fdlist - > next ! = RT_NULL )
{
2023-09-06 11:22:16 +08:00
if ( fdlist - > next - > fd = = fd )
2023-08-01 17:15:41 +08:00
{
2023-11-28 13:56:06 +08:00
rt_mutex_take ( & ep - > lock , RT_WAITING_FOREVER ) ;
2023-09-06 11:22:16 +08:00
memcpy ( & fdlist - > next - > epev . data , & event - > data , sizeof ( event - > data ) ) ;
2023-08-01 17:15:41 +08:00
fdlist - > next - > revents = event - > events ;
2023-11-28 13:56:06 +08:00
if ( fdlist - > next - > wqn . wqueue )
rt_wqueue_remove ( & fdlist - > next - > wqn ) ;
rt_mutex_release ( & ep - > lock ) ;
2023-08-01 17:15:41 +08:00
epoll_ctl_install ( fdlist - > next , ep ) ;
break ;
}
fdlist = fdlist - > next ;
}
ret = 0 ;
}
return ret ;
}
static int epoll_do_ctl ( int epfd , int op , int fd , struct epoll_event * event )
{
struct dfs_file * epdf ;
struct rt_eventpoll * ep ;
2023-08-03 21:45:30 +08:00
rt_err_t ret = 0 ;
2023-08-01 17:15:41 +08:00
if ( op & ~ EFD_SHARED_EPOLL_TYPE )
2023-08-03 21:45:30 +08:00
{
rt_set_errno ( EINVAL ) ;
return - 1 ;
}
2023-08-01 17:15:41 +08:00
2023-09-06 11:22:16 +08:00
if ( ( epfd = = fd ) | | ( epfd < 0 ) )
2023-08-03 21:45:30 +08:00
{
rt_set_errno ( EINVAL ) ;
return - 1 ;
}
2023-08-01 17:15:41 +08:00
2023-11-28 13:56:06 +08:00
if ( ! ( op & EPOLL_CTL_DEL ) )
2023-08-03 21:45:30 +08:00
{
2023-11-28 13:56:06 +08:00
if ( ! ( event - > events & EPOLLEXCLUSIVE_BITS ) )
{
rt_set_errno ( EINVAL ) ;
return - 1 ;
}
event - > events | = EPOLLERR | EPOLLHUP ;
2023-08-03 21:45:30 +08:00
}
2023-08-01 17:15:41 +08:00
2023-09-06 11:22:16 +08:00
if ( ! fd_get ( fd ) )
{
rt_set_errno ( EBADF ) ;
return - 1 ;
}
2023-08-01 17:15:41 +08:00
epdf = fd_get ( epfd ) ;
if ( epdf - > vnode - > data )
{
ep = epdf - > vnode - > data ;
switch ( op )
{
case EPOLL_CTL_ADD :
2023-09-06 11:22:16 +08:00
ret = epoll_ctl_add ( epdf , fd , event ) ;
2023-08-01 17:15:41 +08:00
break ;
case EPOLL_CTL_DEL :
2023-09-06 11:22:16 +08:00
ret = epoll_ctl_del ( epdf , fd ) ;
2023-08-01 17:15:41 +08:00
break ;
case EPOLL_CTL_MOD :
2023-09-06 11:22:16 +08:00
ret = epoll_ctl_mod ( epdf , fd , event ) ;
2023-08-01 17:15:41 +08:00
break ;
default :
2023-08-03 21:45:30 +08:00
rt_set_errno ( EINVAL ) ;
2023-08-01 17:15:41 +08:00
break ;
}
2023-08-03 21:45:30 +08:00
if ( ret < 0 )
{
rt_set_errno ( - ret ) ;
ret = - 1 ;
}
2023-09-20 07:54:11 +08:00
else
{
ep - > polling_thread = rt_thread_self ( ) ;
}
2023-08-03 21:45:30 +08:00
}
2023-08-01 17:15:41 +08:00
return ret ;
}
static int epoll_wait_timeout ( struct rt_eventpoll * ep , int msec )
{
rt_int32_t timeout ;
struct rt_thread * thread ;
rt_base_t level ;
int ret = 0 ;
thread = ep - > polling_thread ;
timeout = rt_tick_from_millisecond ( msec ) ;
2023-11-28 13:56:06 +08:00
level = rt_spin_lock_irqsave ( & ep - > spinlock ) ;
2023-08-01 17:15:41 +08:00
if ( timeout ! = 0 & & ! ep - > tirggered )
{
if ( rt_thread_suspend_with_flag ( thread , RT_KILLABLE ) = = RT_EOK )
{
if ( timeout > 0 )
{
rt_timer_control ( & ( thread - > thread_timer ) ,
RT_TIMER_CTRL_SET_TIME ,
& timeout ) ;
rt_timer_start ( & ( thread - > thread_timer ) ) ;
}
2023-11-28 13:56:06 +08:00
rt_spin_unlock_irqrestore ( & ep - > spinlock , level ) ;
2023-08-01 17:15:41 +08:00
rt_schedule ( ) ;
2023-11-28 13:56:06 +08:00
level = rt_spin_lock_irqsave ( & ep - > spinlock ) ;
2023-08-01 17:15:41 +08:00
}
}
ret = ! ep - > tirggered ;
2023-11-28 13:56:06 +08:00
rt_spin_unlock_irqrestore ( & ep - > spinlock , level ) ;
2023-08-01 17:15:41 +08:00
return ret ;
}
static int epoll_get_event ( struct rt_fd_list * fl , rt_pollreq_t * req )
{
struct dfs_file * df ;
int mask = 0 ;
int fd = 0 ;
2023-09-06 11:22:16 +08:00
fd = fl - > fd ;
2023-08-01 17:15:41 +08:00
if ( fd > = 0 )
{
df = fd_get ( fd ) ;
if ( df )
{
if ( df - > vnode - > fops - > poll )
{
2023-08-03 21:45:30 +08:00
req - > _key = fl - > revents | POLLERR | POLLHUP ;
2023-08-01 17:15:41 +08:00
mask = df - > vnode - > fops - > poll ( df , req ) ;
if ( mask < 0 )
return mask ;
}
2023-08-03 21:45:30 +08:00
mask & = fl - > revents | EPOLLOUT | POLLERR ;
2023-08-01 17:15:41 +08:00
}
}
return mask ;
}
static int epoll_do ( struct rt_eventpoll * ep , struct epoll_event * events , int maxevents , int timeout )
{
2023-11-28 13:56:06 +08:00
struct rt_fd_list * rdlist ;
rt_slist_t * node = RT_NULL ;
2023-08-01 17:15:41 +08:00
int event_num = 0 ;
int istimeout = 0 ;
int isn_add = 0 ;
int isfree = 0 ;
int mask = 0 ;
while ( 1 )
{
rt_mutex_take ( & ep - > lock , RT_WAITING_FOREVER ) ;
2023-11-28 13:56:06 +08:00
rt_spin_lock ( & ep - > spinlock ) ;
2023-08-01 17:15:41 +08:00
if ( ep - > eventpoll_num > 0 )
{
2023-11-28 13:56:06 +08:00
rt_slist_for_each ( node , & ep - > rdl_head )
2023-08-01 17:15:41 +08:00
{
2023-11-28 13:56:06 +08:00
rdlist = rt_slist_entry ( node , struct rt_fd_list , rdl_node ) ;
rt_spin_unlock ( & ep - > spinlock ) ;
2023-08-01 17:15:41 +08:00
isfree = 0 ;
isn_add = 0 ;
if ( event_num < maxevents )
{
2023-11-28 13:56:06 +08:00
if ( rdlist - > wqn . wqueue )
{
rt_wqueue_remove ( & rdlist - > wqn ) ;
}
mask = epoll_get_event ( rdlist , & rdlist - > req ) ;
2023-09-06 11:22:16 +08:00
2023-11-28 13:56:06 +08:00
if ( mask & rdlist - > revents )
2023-09-06 11:22:16 +08:00
{
2023-11-28 13:56:06 +08:00
rdlist - > epev . events = mask & rdlist - > revents ;
2023-09-06 11:22:16 +08:00
}
else
2023-08-01 17:15:41 +08:00
{
2023-09-06 11:22:16 +08:00
isfree = 1 ;
isn_add = 1 ;
}
2023-11-28 13:56:06 +08:00
if ( rdlist - > revents & EPOLLONESHOT )
2023-09-06 11:22:16 +08:00
{
2023-11-28 13:56:06 +08:00
rdlist - > revents = 0 ;
2023-08-01 17:15:41 +08:00
isfree = 1 ;
2023-11-28 13:56:06 +08:00
if ( rdlist - > wqn . wqueue )
rt_wqueue_remove ( & rdlist - > wqn ) ;
2023-08-01 17:15:41 +08:00
}
else
{
2023-11-28 13:56:06 +08:00
if ( rdlist - > revents & EPOLLET )
2023-08-01 17:15:41 +08:00
{
isfree = 1 ;
}
else
{
2023-11-28 13:56:06 +08:00
rt_spin_lock ( & ep - > spinlock ) ;
2023-09-06 11:22:16 +08:00
if ( rdlist - > exclusive ! = 1 )
2023-08-01 17:15:41 +08:00
{
rdlist - > exclusive = 1 ;
}
2023-11-28 13:56:06 +08:00
rt_spin_unlock ( & ep - > spinlock ) ;
2023-08-01 17:15:41 +08:00
}
}
if ( ! isn_add )
{
2023-11-28 13:56:06 +08:00
memcpy ( & events [ event_num ] , & rdlist - > epev , sizeof ( rdlist - > epev ) ) ;
2023-08-01 17:15:41 +08:00
event_num + + ;
}
if ( isfree )
{
2023-11-28 13:56:06 +08:00
rt_spin_lock ( & ep - > spinlock ) ;
2023-08-01 17:15:41 +08:00
ep - > eventpoll_num - - ;
2023-11-28 13:56:06 +08:00
rt_slist_remove ( & ep - > rdl_head , & rdlist - > rdl_node ) ;
rt_spin_unlock ( & ep - > spinlock ) ;
2023-08-01 17:15:41 +08:00
}
}
else
{
2023-11-28 13:56:06 +08:00
rt_spin_lock ( & ep - > spinlock ) ;
2023-08-01 17:15:41 +08:00
break ;
}
2023-11-28 13:56:06 +08:00
rt_spin_lock ( & ep - > spinlock ) ;
2023-08-01 17:15:41 +08:00
}
}
2023-11-28 13:56:06 +08:00
rt_spin_unlock ( & ep - > spinlock ) ;
2023-08-01 17:15:41 +08:00
rt_mutex_release ( & ep - > lock ) ;
if ( event_num | | istimeout )
{
2023-11-28 13:56:06 +08:00
rt_spin_lock ( & ep - > spinlock ) ;
2023-08-01 17:15:41 +08:00
ep - > tirggered = 0 ;
2023-11-28 13:56:06 +08:00
rt_spin_unlock ( & ep - > spinlock ) ;
if ( ( timeout > = 0 ) | | ( event_num > 0 ) )
break ;
2023-08-01 17:15:41 +08:00
}
if ( epoll_wait_timeout ( ep , timeout ) )
{
istimeout = 1 ;
}
}
return event_num ;
}
static int epoll_do_wait ( int epfd , struct epoll_event * events , int maxevents , int timeout , const sigset_t * ss )
{
struct rt_eventpoll * ep ;
struct dfs_file * df ;
lwp_sigset_t old_sig , new_sig ;
2023-08-03 21:45:30 +08:00
rt_err_t ret = 0 ;
2023-08-01 17:15:41 +08:00
if ( ss )
{
memcpy ( & new_sig , ss , sizeof ( lwp_sigset_t ) ) ;
lwp_thread_signal_mask ( rt_thread_self ( ) , LWP_SIG_MASK_CMD_BLOCK , & new_sig , & old_sig ) ;
}
if ( ( maxevents > 0 ) & & ( epfd > = 0 ) )
{
df = fd_get ( epfd ) ;
if ( df & & df - > vnode )
{
ep = ( struct rt_eventpoll * ) df - > vnode - > data ;
if ( ep )
{
ret = epoll_do ( ep , events , maxevents , timeout ) ;
}
}
}
if ( ss )
{
lwp_thread_signal_mask ( rt_thread_self ( ) , LWP_SIG_MASK_CMD_SET_MASK , & old_sig , RT_NULL ) ;
}
2023-08-03 21:45:30 +08:00
if ( ret < 0 )
{
rt_set_errno ( - ret ) ;
ret = - 1 ;
}
2023-08-01 17:15:41 +08:00
return ret ;
}
int epoll_create ( int size )
{
return epoll_do_create ( size ) ;
}
int epoll_ctl ( int epfd , int op , int fd , struct epoll_event * event )
{
return epoll_do_ctl ( epfd , op , fd , event ) ;
}
int epoll_wait ( int epfd , struct epoll_event * events , int maxevents , int timeout )
{
return epoll_do_wait ( epfd , events , maxevents , timeout , RT_NULL ) ;
}
int epoll_pwait ( int epfd , struct epoll_event * events , int maxevents , int timeout , const sigset_t * ss )
{
return epoll_do_wait ( epfd , events , maxevents , timeout , ss ) ;
}
int epoll_pwait2 ( int epfd , struct epoll_event * events , int maxevents , int timeout , const sigset_t * ss )
{
return epoll_do_wait ( epfd , events , maxevents , timeout , ss ) ;
}