update libc (#8335)

This commit is contained in:
geniusgogo 2023-12-10 06:49:24 +08:00 committed by GitHub
parent 25bb80b657
commit 0b6402f3cb
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
3 changed files with 86 additions and 35 deletions

View File

@ -427,14 +427,14 @@ static int cmd_rm(int argc, char **argv)
if (stat(argv[index], &s) == 0)
#endif
{
if (s.st_mode & S_IFDIR)
if (S_ISDIR(s.st_mode))
{
if (r == 0)
rt_kprintf("cannot remove '%s': Is a directory\n", argv[index]);
else
directory_delete_for_msh(argv[index], f, v);
}
else if (s.st_mode & S_IFREG)
else
{
if (unlink(argv[index]) != 0)
{
@ -1099,7 +1099,7 @@ static int cmd_chmod(int argc, char **argv)
struct stat s;
if (stat(argv[i], &s) == 0)
{
if (s.st_mode & S_IFDIR)
if (S_ISDIR(s.st_mode))
{
directory_setattr(argv[i], &attr, f, v);
}

View File

@ -35,6 +35,7 @@ struct rt_fd_list
struct rt_eventpoll *ep;
struct rt_wqueue_node wqn;
int exclusive;/* If triggered horizontally, a check is made to see if the data has been read, and if there is any data left to read, the readability event is returned in the next epoll_wait */
rt_bool_t is_rdl_node;
int fd;
struct rt_fd_list *next;
rt_slist_t rdl_node;
@ -121,6 +122,7 @@ static int epoll_poll(struct dfs_file *file, struct rt_pollreq *req)
{
struct rt_eventpoll *ep;
int events = 0;
rt_base_t level;
if (file->vnode->data)
{
@ -130,12 +132,12 @@ static int epoll_poll(struct dfs_file *file, struct rt_pollreq *req)
rt_mutex_take(&ep->lock, RT_WAITING_FOREVER);
rt_poll_add(&ep->epoll_read, req);
rt_spin_lock(&ep->spinlock);
level = rt_spin_lock_irqsave(&ep->spinlock);
if (!rt_slist_isempty(&ep->rdl_head))
events |= POLLIN | EPOLLRDNORM | POLLOUT;
rt_spin_unlock(&ep->spinlock);
rt_spin_unlock_irqrestore(&ep->spinlock, level);
rt_mutex_release(&ep->lock);
}
@ -146,6 +148,7 @@ static int epoll_wqueue_callback(struct rt_wqueue_node *wait, void *key)
{
struct rt_fd_list *fdlist;
struct rt_eventpoll *ep;
rt_base_t level;
if (key && !((rt_ubase_t)key & wait->key))
return -1;
@ -155,13 +158,17 @@ static int epoll_wqueue_callback(struct rt_wqueue_node *wait, void *key)
ep = fdlist->ep;
if (ep)
{
rt_spin_lock(&ep->spinlock);
rt_slist_append(&ep->rdl_head, &fdlist->rdl_node);
fdlist->exclusive = 0;
ep->tirggered = 1;
ep->eventpoll_num ++;
rt_wqueue_wakeup(&ep->epoll_read, (void *)POLLIN);
rt_spin_unlock(&ep->spinlock);
level = rt_spin_lock_irqsave(&ep->spinlock);
if (fdlist->is_rdl_node == RT_FALSE)
{
rt_slist_append(&ep->rdl_head, &fdlist->rdl_node);
fdlist->exclusive = 0;
fdlist->is_rdl_node = RT_TRUE;
ep->tirggered = 1;
ep->eventpoll_num ++;
rt_wqueue_wakeup(&ep->epoll_read, (void *)POLLIN);
}
rt_spin_unlock_irqrestore(&ep->spinlock, level);
}
return __wqueue_default_wake(wait, key);
@ -187,6 +194,7 @@ static void epoll_wqueue_add_callback(rt_wqueue_t *wq, rt_pollreq_t *req)
static void epoll_ctl_install(struct rt_fd_list *fdlist, struct rt_eventpoll *ep)
{
rt_uint32_t mask = 0;
rt_base_t level;
fdlist->req._key = fdlist->revents;
@ -197,12 +205,13 @@ static void epoll_ctl_install(struct rt_fd_list *fdlist, struct rt_eventpoll *ep
if (ep)
{
rt_mutex_take(&ep->lock, RT_WAITING_FOREVER);
rt_spin_lock(&ep->spinlock);
level = rt_spin_lock_irqsave(&ep->spinlock);
rt_slist_append(&ep->rdl_head, &fdlist->rdl_node);
fdlist->exclusive = 0;
fdlist->is_rdl_node = RT_TRUE;
ep->tirggered = 1;
ep->eventpoll_num ++;
rt_spin_unlock(&ep->spinlock);
rt_spin_unlock_irqrestore(&ep->spinlock, level);
rt_mutex_release(&ep->lock);
}
}
@ -250,6 +259,7 @@ static int epoll_epf_init(int fd)
ep->fdlist->fd = fd;
ep->fdlist->ep = ep;
ep->fdlist->exclusive = 0;
ep->fdlist->is_rdl_node = RT_FALSE;
dfs_vnode_init(df->vnode, FT_REGULAR, &epoll_fops);
df->vnode->data = ep;
rt_slist_init(&ep->fdlist->rdl_node);
@ -340,6 +350,7 @@ static int epoll_ctl_add(struct dfs_file *df, int fd, struct epoll_event *event)
fdlist->epev.events = 0;
fdlist->ep = ep;
fdlist->exclusive = 0;
fdlist->is_rdl_node = RT_FALSE;
fdlist->req._proc = epoll_wqueue_add_callback;
fdlist->revents = event->events;
rt_mutex_take(&ep->lock, RT_WAITING_FOREVER);
@ -365,6 +376,7 @@ static int epoll_ctl_del(struct dfs_file *df, int fd)
struct rt_eventpoll *ep = RT_NULL;
rt_slist_t *node = RT_NULL;
rt_err_t ret = -EINVAL;
rt_base_t level;
if (df->vnode->data)
{
@ -373,14 +385,14 @@ static int epoll_ctl_del(struct dfs_file *df, int fd)
if (ep)
{
rt_mutex_take(&ep->lock, RT_WAITING_FOREVER);
rt_spin_lock(&ep->spinlock);
level = rt_spin_lock_irqsave(&ep->spinlock);
rt_slist_for_each(node, &ep->rdl_head)
{
rdlist = rt_slist_entry(node, struct rt_fd_list, rdl_node);
if (rdlist->fd == fd)
rt_slist_remove(&ep->rdl_head, node);
}
rt_spin_unlock(&ep->spinlock);
rt_spin_unlock_irqrestore(&ep->spinlock, level);
fdlist = ep->fdlist;
while (fdlist->next != RT_NULL)
@ -591,18 +603,22 @@ static int epoll_do(struct rt_eventpoll *ep, struct epoll_event *events, int max
int isn_add = 0;
int isfree = 0;
int mask = 0;
rt_base_t level;
while (1)
{
rt_mutex_take(&ep->lock, RT_WAITING_FOREVER);
rt_spin_lock(&ep->spinlock);
level = rt_spin_lock_irqsave(&ep->spinlock);
if (ep->eventpoll_num > 0)
{
rt_slist_for_each(node,&ep->rdl_head)
{
rdlist = rt_slist_entry(node, struct rt_fd_list, rdl_node);
ep->eventpoll_num --;
rt_slist_remove(&ep->rdl_head, &rdlist->rdl_node);
rdlist->is_rdl_node = RT_FALSE;
rt_spin_unlock(&ep->spinlock);
rt_spin_unlock_irqrestore(&ep->spinlock, level);
isfree = 0;
isn_add = 0;
@ -640,12 +656,12 @@ static int epoll_do(struct rt_eventpoll *ep, struct epoll_event *events, int max
}
else
{
rt_spin_lock(&ep->spinlock);
level = rt_spin_lock_irqsave(&ep->spinlock);
if (rdlist->exclusive != 1)
{
rdlist->exclusive = 1;
}
rt_spin_unlock(&ep->spinlock);
rt_spin_unlock_irqrestore(&ep->spinlock, level);
}
}
@ -655,32 +671,45 @@ static int epoll_do(struct rt_eventpoll *ep, struct epoll_event *events, int max
event_num ++;
}
if (isfree)
if (!isfree)
{
rt_spin_lock(&ep->spinlock);
ep->eventpoll_num --;
rt_slist_remove(&ep->rdl_head, &rdlist->rdl_node);
rt_spin_unlock(&ep->spinlock);
if (rdlist->is_rdl_node == RT_FALSE)
{
level = rt_spin_lock_irqsave(&ep->spinlock);
ep->eventpoll_num ++;
rt_slist_append(&ep->rdl_head, &rdlist->rdl_node);
rdlist->is_rdl_node = RT_TRUE;
rt_spin_unlock_irqrestore(&ep->spinlock, level);
}
else
{
level = rt_spin_lock_irqsave(&ep->spinlock);
if (!rdlist->wqn.wqueue)
{
epoll_get_event(rdlist, &rdlist->req);
}
rt_spin_unlock_irqrestore(&ep->spinlock, level);
}
}
}
else
{
rt_spin_lock(&ep->spinlock);
level = rt_spin_lock_irqsave(&ep->spinlock);
break;
}
rt_spin_lock(&ep->spinlock);
level = rt_spin_lock_irqsave(&ep->spinlock);
}
}
rt_spin_unlock(&ep->spinlock);
rt_spin_unlock_irqrestore(&ep->spinlock, level);
rt_mutex_release(&ep->lock);
if (event_num || istimeout)
{
rt_spin_lock(&ep->spinlock);
level = rt_spin_lock_irqsave(&ep->spinlock);
ep->tirggered = 0;
rt_spin_unlock(&ep->spinlock);
rt_spin_unlock_irqrestore(&ep->spinlock, level);
if ((timeout >= 0) || (event_num > 0))
break;
}

View File

@ -76,7 +76,10 @@ static int timerfd_close(struct dfs_file *file)
tfd->timer = RT_NULL;
}
rt_wqueue_remove(&tfd->wqn);
if (tfd->wqn.wqueue)
{
rt_wqueue_remove(&tfd->wqn);
}
rt_mutex_detach(&tfd->lock);
rt_free(tfd);
@ -99,7 +102,9 @@ static int timerfd_poll(struct dfs_file *file, struct rt_pollreq *req)
rt_mutex_release(&tfd->lock);
if (rt_atomic_load(&(tfd->ticks)) > 0)
{
events |= POLLIN;
}
return events;
}
@ -141,7 +146,10 @@ static ssize_t timerfd_read(struct dfs_file *file, void *buf, size_t count, off_
{
tfd->wqn.polling_thread = rt_thread_self();
rt_wqueue_remove(&tfd->wqn);
if (tfd->wqn.wqueue)
{
rt_wqueue_remove(&tfd->wqn);
}
rt_wqueue_add(&tfd->timerfd_queue, &tfd->wqn);
ret = rt_thread_suspend_with_flag(tfd->wqn.polling_thread, RT_INTERRUPTIBLE);
@ -207,7 +215,7 @@ static int timerfd_do_create(int clockid, int flags)
{
df->flags |= flags;
tfd = (struct rt_timerfd *)rt_malloc(sizeof(struct rt_timerfd));
tfd = (struct rt_timerfd *)rt_calloc(1, sizeof(struct rt_timerfd));
if (tfd)
{
@ -317,6 +325,7 @@ static void timerfd_timeout(void *parameter)
tfd->timer = rt_timer_create(TIMERFD_MUTEX_NAME, timerfd_timeout,
tfd, tfd->tick_out,
RT_TIMER_FLAG_PERIODIC | RT_TIMER_FLAG_SOFT_TIMER);
RT_ASSERT(tfd->timer);
rt_timer_start(tfd->timer);
}
@ -389,6 +398,7 @@ static int timerfd_do_settime(int fd, int flags, const struct itimerspec *new, s
if (new->it_value.tv_nsec == 0 && new->it_value.tv_sec == 0)
{
rt_mutex_release(&tfd->lock);
return 0;
}
@ -403,7 +413,10 @@ static int timerfd_do_settime(int fd, int flags, const struct itimerspec *new, s
ret = get_current_time(tfd, &current_time);
if (ret < 0)
{
rt_mutex_release(&tfd->lock);
return ret;
}
cur_time = current_time.tv_sec * SEC_TO_MSEC + (current_time.tv_nsec / MSEC_TO_NSEC);
value_msec = value_msec - cur_time;
@ -419,7 +432,10 @@ static int timerfd_do_settime(int fd, int flags, const struct itimerspec *new, s
{
tfd->tick_out = rt_tick_from_millisecond(interval_msec);
if (tfd->tick_out < 0)
{
rt_mutex_release(&tfd->lock);
return -EINVAL;
}
tfd->isperiodic = OPEN_PERIODIC;
}
@ -428,16 +444,22 @@ static int timerfd_do_settime(int fd, int flags, const struct itimerspec *new, s
if (value_msec > 0)
{
if (value_msec > TIME_INT32_MAX)
{
rt_mutex_release(&tfd->lock);
return -EINVAL;
}
tick_out = rt_tick_from_millisecond(value_msec);
if (tick_out < 0)
{
rt_mutex_release(&tfd->lock);
return -EINVAL;
}
tfd->timer = rt_timer_create(TIMERFD_MUTEX_NAME, timerfd_timeout,
tfd, tick_out,
RT_TIMER_FLAG_ONE_SHOT | RT_TIMER_FLAG_SOFT_TIMER);
rt_timer_start(tfd->timer);
RT_ASSERT(tfd->timer);
rt_timer_start(tfd->timer);
}
else
{