fix rt_channel lock. (#8244)

This commit is contained in:
geniusgogo 2023-11-10 21:37:07 +08:00 committed by GitHub
parent 589641258e
commit 9acebb07dc
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
4 changed files with 253 additions and 189 deletions

View File

@ -79,6 +79,10 @@ int lwp_component_init(void)
{
LOG_E("%s: lwp_pid_init() failed", __func__);
}
else if ((rc = rt_channel_component_init()) != RT_EOK)
{
LOG_E("%s: rt_channel_component_init failed", __func__);
}
return rc;
}
INIT_COMPONENT_EXPORT(lwp_component_init);

View File

@ -54,10 +54,8 @@ typedef struct rt_ipc_msg *rt_ipc_msg_t;
static rt_ipc_msg_t _ipc_msg_free_list = (rt_ipc_msg_t)RT_NULL; /* released chain */
static int rt_ipc_msg_used = 0; /* first unallocated entry */
static struct rt_ipc_msg ipc_msg_pool[RT_CH_MSG_MAX_NR]; /* initial message array */
static struct rt_spinlock ipc_big_lock;
#define ipc_list_lock ipc_big_lock
#define ipc_ch_lock ipc_big_lock
static struct rt_mutex _chn_obj_lock;
static struct rt_spinlock _msg_list_lock; /* lock protect of _ipc_msg_free_list */
/**
* Allocate an IPC message from the statically-allocated array.
@ -65,7 +63,9 @@ static struct rt_spinlock ipc_big_lock;
static rt_ipc_msg_t _ipc_msg_alloc(void)
{
rt_ipc_msg_t p = (rt_ipc_msg_t)RT_NULL;
rt_base_t level;
level = rt_spin_lock_irqsave(&_msg_list_lock);
if (_ipc_msg_free_list) /* use the released chain first */
{
p = _ipc_msg_free_list;
@ -76,6 +76,7 @@ static rt_ipc_msg_t _ipc_msg_alloc(void)
p = &ipc_msg_pool[rt_ipc_msg_used];
rt_ipc_msg_used++;
}
rt_spin_unlock_irqrestore(&_msg_list_lock, level);
return p;
}
@ -84,8 +85,12 @@ static rt_ipc_msg_t _ipc_msg_alloc(void)
*/
static void _ipc_msg_free(rt_ipc_msg_t p_msg)
{
p_msg->msg.sender = (void*)_ipc_msg_free_list;
rt_base_t level;
level = rt_spin_lock_irqsave(&_msg_list_lock);
p_msg->msg.sender = (void *)_ipc_msg_free_list;
_ipc_msg_free_list = p_msg;
rt_spin_unlock_irqrestore(&_msg_list_lock, level);
}
/**
@ -97,7 +102,7 @@ static void rt_ipc_msg_init(rt_ipc_msg_t msg, struct rt_channel_msg *data, rt_ui
msg->need_reply = need_reply;
msg->msg = *data;
msg->msg.sender = (void*)rt_thread_self();
msg->msg.sender = (void *)rt_thread_self();
rt_list_init(&msg->mlist);
}
@ -160,7 +165,6 @@ rt_inline rt_err_t rt_channel_list_suspend(rt_list_t *list, struct rt_thread *th
return ret;
}
static void _rt_channel_check_wq_wakup_locked(rt_channel_t ch)
{
if (rt_list_isempty(&ch->wait_msg))
@ -171,6 +175,11 @@ static void _rt_channel_check_wq_wakup_locked(rt_channel_t ch)
rt_wqueue_wakeup(&ch->reader_queue, 0);
}
rt_err_t rt_channel_component_init(void)
{
return rt_mutex_init(&_chn_obj_lock, "rt_chnannel", RT_IPC_FLAG_PRIO);
}
/**
* Create a new or open an existing IPC channel.
*/
@ -178,7 +187,7 @@ rt_channel_t rt_raw_channel_open(const char *name, int flags)
{
rt_err_t err = RT_EOK;
rt_channel_t ch = RT_NULL;
rt_base_t level;
struct rt_object *object;
struct rt_list_node *node;
struct rt_object_information *information;
@ -194,10 +203,15 @@ rt_channel_t rt_raw_channel_open(const char *name, int flags)
* - Channel Object list (RW; this may write to a channel if needed, and
* the RCU operation of the routine should be atomic)
*/
rt_spin_lock(&ipc_list_lock);
information = rt_object_get_information(RT_Object_Class_Channel);
RT_ASSERT(information != RT_NULL);
err = rt_mutex_take_interruptible(&_chn_obj_lock, RT_WAITING_FOREVER);
if (err != RT_EOK)
{
return RT_NULL;
}
for (node = information->object_list.next;
node != &(information->object_list);
node = node->next)
@ -212,7 +226,9 @@ rt_channel_t rt_raw_channel_open(const char *name, int flags)
}
/* find the IPC channel with the specific name */
ch = (rt_channel_t)object;
level = rt_spin_lock_irqsave(&ch->slock);
ch->ref++; /* increase the reference count */
rt_spin_unlock_irqrestore(&ch->slock, level);
break;
}
}
@ -229,6 +245,7 @@ rt_channel_t rt_raw_channel_open(const char *name, int flags)
if (ch)
{
rt_channel_object_init(&ch->parent); /* suspended receivers */
rt_spin_lock_init(&ch->slock);
rt_list_init(&ch->wait_msg); /* unhandled messages */
rt_list_init(&ch->wait_thread); /* suspended senders */
rt_wqueue_init(&ch->reader_queue); /* reader poll queue */
@ -238,7 +255,7 @@ rt_channel_t rt_raw_channel_open(const char *name, int flags)
}
}
rt_spin_unlock(&ipc_list_lock);
rt_mutex_release(&_chn_obj_lock);
return ch;
}
@ -248,23 +265,25 @@ rt_channel_t rt_raw_channel_open(const char *name, int flags)
*/
rt_err_t rt_raw_channel_close(rt_channel_t ch)
{
rt_err_t rc = RT_EOK;
rt_err_t rc = -RT_EIO;
rt_base_t level;
RT_DEBUG_NOT_IN_INTERRUPT;
if (ch == RT_NULL)
if (ch != RT_NULL)
{
rc = -RT_EIO;
rc = rt_mutex_take_interruptible(&_chn_obj_lock, RT_WAITING_FOREVER);
if (rc != RT_EOK)
{
return rc;
}
else
{
/**
* Brief: Remove the channel from object list
*
* Note: Critical Section
* - the channel
*/
rt_spin_lock(&ipc_ch_lock);
level = rt_spin_lock_irqsave(&ch->slock);
if (rt_object_get_type(&ch->parent.parent) != RT_Object_Class_Channel)
{
@ -281,6 +300,13 @@ rt_err_t rt_raw_channel_close(rt_channel_t ch)
else
{
ch->ref--;
rc = RT_EOK;
}
rt_spin_unlock_irqrestore(&ch->slock, level);
if (rc == RT_EOK)
{
if (ch->ref == 0)
{
/* wakeup all the suspended receivers and senders */
@ -292,9 +318,9 @@ rt_err_t rt_raw_channel_close(rt_channel_t ch)
rt_object_delete(&ch->parent.parent); /* release the IPC channel structure */
}
rc = RT_EOK;
}
rt_spin_unlock(&ipc_ch_lock);
rt_mutex_release(&_chn_obj_lock);
}
return rc;
@ -346,7 +372,7 @@ static rt_err_t wakeup_sender_wait_reply(void *object, struct rt_thread *thread)
static void sender_timeout(void *parameter)
{
struct rt_thread *thread = (struct rt_thread*)parameter;
struct rt_thread *thread = (struct rt_thread *)parameter;
rt_channel_t ch;
ch = (rt_channel_t)(thread->wakeup.user_data);
@ -445,10 +471,9 @@ static int _ipc_msg_fd_new(void *file)
rt_atomic_add(&(d->vnode->ref_count), 1);
#else
if (d->vnode)
d->vnode->ref_count ++;
d->vnode->ref_count++;
#endif
return fd;
}
@ -501,6 +526,7 @@ static rt_err_t _do_send_recv_timeout(rt_channel_t ch, rt_channel_msg_t data, in
rt_thread_t thread_recv;
rt_thread_t thread_send = 0;
void (*old_timeout_func)(void *) = 0;
rt_base_t level;
/* IPC message : file descriptor */
if (data->type == RT_CHANNEL_FD)
@ -516,7 +542,9 @@ static rt_err_t _do_send_recv_timeout(rt_channel_t ch, rt_channel_msg_t data, in
thread_send->error = RT_EOK;
}
rt_spin_lock(&ipc_ch_lock);
rc = RT_EOK;
level = rt_spin_lock_irqsave(&ch->slock);
switch (ch->stat)
{
@ -531,7 +559,7 @@ static rt_err_t _do_send_recv_timeout(rt_channel_t ch, rt_channel_msg_t data, in
}
else
{
rt_thread_wakeup_set(thread_send, wakeup_sender_wait_recv, (void*)ch);
rt_thread_wakeup_set(thread_send, wakeup_sender_wait_recv, (void *)ch);
if (time > 0)
{
rt_timer_control(&(thread_send->thread_timer),
@ -575,7 +603,7 @@ static rt_err_t _do_send_recv_timeout(rt_channel_t ch, rt_channel_msg_t data, in
{
ch->reply = thread_send; /* record the current waiting sender */
ch->stat = RT_IPC_STAT_ACTIVE;
rt_thread_wakeup_set(thread_send, wakeup_sender_wait_reply, (void*)ch);
rt_thread_wakeup_set(thread_send, wakeup_sender_wait_reply, (void *)ch);
if (time > 0)
{
rt_timer_control(&(thread_send->thread_timer),
@ -615,12 +643,11 @@ static rt_err_t _do_send_recv_timeout(rt_channel_t ch, rt_channel_msg_t data, in
{
_rt_channel_check_wq_wakup_locked(ch);
}
rt_spin_unlock(&ipc_ch_lock);
rt_spin_unlock_irqrestore(&ch->slock, level);
/* reschedule in order to let the potential receivers run */
rt_schedule();
rt_spin_lock(&ipc_ch_lock);
if (need_reply)
{
if (old_timeout_func)
@ -642,7 +669,10 @@ static rt_err_t _do_send_recv_timeout(rt_channel_t ch, rt_channel_msg_t data, in
}
}
}
rt_spin_unlock(&ipc_ch_lock);
else
{
rt_spin_unlock_irqrestore(&ch->slock, level);
}
return rc;
}
@ -679,6 +709,7 @@ rt_err_t rt_raw_channel_reply(rt_channel_t ch, rt_channel_msg_t data)
DEF_RETURN_CODE(rc);
rt_ipc_msg_t msg;
struct rt_thread *thread;
rt_base_t level;
if (ch == RT_NULL)
{
@ -686,7 +717,7 @@ rt_err_t rt_raw_channel_reply(rt_channel_t ch, rt_channel_msg_t data)
}
else
{
rt_spin_lock(&ipc_ch_lock);
level = rt_spin_lock_irqsave(&ch->slock);
if (rt_object_get_type(&ch->parent.parent) != RT_Object_Class_Channel)
{
@ -722,7 +753,7 @@ rt_err_t rt_raw_channel_reply(rt_channel_t ch, rt_channel_msg_t data)
rc = RT_EOK;
}
}
rt_spin_unlock(&ipc_ch_lock);
rt_spin_unlock_irqrestore(&ch->slock, level);
rt_schedule();
}
@ -734,37 +765,39 @@ static rt_err_t wakeup_receiver(void *object, struct rt_thread *thread)
{
rt_channel_t ch;
rt_err_t ret;
rt_base_t level;
ch = (rt_channel_t)object;
level = rt_spin_lock_irqsave(&ch->slock);
ch->stat = RT_IPC_STAT_IDLE;
thread->error = -RT_EINTR;
ret = rt_channel_list_resume(&ch->parent.suspend_thread);
rt_spin_lock(&ipc_ch_lock);
_rt_channel_check_wq_wakup_locked(ch);
rt_spin_unlock(&ipc_ch_lock);
rt_spin_unlock_irqrestore(&ch->slock, level);
return ret;
}
static void receiver_timeout(void *parameter)
{
struct rt_thread *thread = (struct rt_thread*)parameter;
struct rt_thread *thread = (struct rt_thread *)parameter;
rt_channel_t ch;
rt_base_t level;
ch = (rt_channel_t)(thread->wakeup.user_data);
level = rt_spin_lock_irqsave(&ch->slock);
ch->stat = RT_IPC_STAT_IDLE;
thread->error = -RT_ETIMEOUT;
thread->wakeup.func = RT_NULL;
rt_spin_lock(&ipc_ch_lock);
rt_list_remove(&(thread->tlist));
/* insert to schedule ready list */
rt_schedule_insert_thread(thread);
_rt_channel_check_wq_wakup_locked(ch);
rt_spin_unlock(&ipc_ch_lock);
rt_spin_unlock_irqrestore(&ch->slock, level);
/* do schedule */
rt_schedule();
@ -779,6 +812,7 @@ static rt_err_t _rt_raw_channel_recv_timeout(rt_channel_t ch, rt_channel_msg_t d
struct rt_thread *thread;
rt_ipc_msg_t msg_ret;
void (*old_timeout_func)(void *) = 0;
rt_base_t level;
RT_DEBUG_NOT_IN_INTERRUPT;
@ -787,7 +821,7 @@ static rt_err_t _rt_raw_channel_recv_timeout(rt_channel_t ch, rt_channel_msg_t d
return -RT_EIO;
}
rt_spin_lock(&ipc_ch_lock);
level = rt_spin_lock_irqsave(&ch->slock);
if (rt_object_get_type(&ch->parent.parent) != RT_Object_Class_Channel)
{
@ -832,8 +866,8 @@ static rt_err_t _rt_raw_channel_recv_timeout(rt_channel_t ch, rt_channel_msg_t d
rc = rt_channel_list_suspend(&ch->parent.suspend_thread, thread);
if (rc == RT_EOK)
{
rt_thread_wakeup_set(thread, wakeup_receiver, (void*)ch);
ch->stat = RT_IPC_STAT_WAIT;/* no valid suspended senders */
rt_thread_wakeup_set(thread, wakeup_receiver, (void *)ch);
ch->stat = RT_IPC_STAT_WAIT; /* no valid suspended senders */
thread->error = RT_EOK;
if (time > 0)
{
@ -849,11 +883,10 @@ static rt_err_t _rt_raw_channel_recv_timeout(rt_channel_t ch, rt_channel_msg_t d
&time);
rt_timer_start(&(thread->thread_timer));
}
rt_spin_unlock(&ipc_ch_lock);
rt_spin_unlock_irqrestore(&ch->slock, level);
rt_schedule(); /* let the senders run */
rt_spin_lock(&ipc_ch_lock);
if (old_timeout_func)
{
rt_timer_control(&(thread->thread_timer),
@ -872,11 +905,13 @@ static rt_err_t _rt_raw_channel_recv_timeout(rt_channel_t ch, rt_channel_msg_t d
_ipc_msg_free(thread->msg_ret); /* put back the message to kernel */
thread->msg_ret = RT_NULL;
}
level = rt_spin_lock_irqsave(&ch->slock);
}
}
}
rt_spin_unlock(&ipc_ch_lock);
rt_spin_unlock_irqrestore(&ch->slock, level);
RETURN(rc);
}
@ -976,28 +1011,36 @@ static int channel_fops_poll(struct dfs_file *file, struct rt_pollreq *req)
{
int mask = POLLOUT;
rt_channel_t ch;
rt_base_t level;
ch = (rt_channel_t)file->vnode->data;
level = rt_spin_lock_irqsave(&ch->slock);
rt_poll_add(&(ch->reader_queue), req);
if (ch->stat != RT_IPC_STAT_IDLE)
{
rt_spin_unlock_irqrestore(&ch->slock, level);
return mask;
}
if (!rt_list_isempty(&ch->wait_msg))
{
mask |= POLLIN;
}
rt_spin_unlock_irqrestore(&ch->slock, level);
return mask;
}
static int channel_fops_close(struct dfs_file *file)
{
rt_channel_t ch;
rt_base_t level;
RT_DEBUG_NOT_IN_INTERRUPT;
rt_spin_lock(&ipc_ch_lock);
ch = (rt_channel_t)file->vnode->data;
level = rt_spin_lock_irqsave(&ch->slock);
if (file->vnode->ref_count == 1)
{
ch->ref--;
@ -1010,11 +1053,20 @@ static int channel_fops_close(struct dfs_file *file)
/* all ipc msg will lost */
rt_list_init(&ch->wait_msg);
rt_spin_unlock_irqrestore(&ch->slock, level);
rt_object_delete(&ch->parent.parent); /* release the IPC channel structure */
}
else
{
rt_spin_unlock_irqrestore(&ch->slock, level);
}
}
else
{
rt_spin_unlock_irqrestore(&ch->slock, level);
}
rt_spin_unlock(&ipc_ch_lock);
return 0;
}
@ -1035,6 +1087,7 @@ int lwp_channel_open(int fdt_type, const char *name, int flags)
{
goto quit;
}
d = lwp_fd_get(fdt_type, fd);
d->vnode = (struct dfs_vnode *)rt_malloc(sizeof(struct dfs_vnode));
if (!d->vnode)
@ -1119,6 +1172,7 @@ rt_err_t lwp_channel_close(int fdt_type, int fd)
rt_err_t lwp_channel_send(int fdt_type, int fd, rt_channel_msg_t data)
{
rt_channel_t ch;
ch = fd_2_channel(fdt_type, fd);
if (ch)
{
@ -1130,6 +1184,7 @@ rt_err_t lwp_channel_send(int fdt_type, int fd, rt_channel_msg_t data)
rt_err_t lwp_channel_send_recv_timeout(int fdt_type, int fd, rt_channel_msg_t data, rt_channel_msg_t data_ret, rt_int32_t time)
{
rt_channel_t ch;
ch = fd_2_channel(fdt_type, fd);
if (ch)
{
@ -1141,6 +1196,7 @@ rt_err_t lwp_channel_send_recv_timeout(int fdt_type, int fd, rt_channel_msg_t da
rt_err_t lwp_channel_reply(int fdt_type, int fd, rt_channel_msg_t data)
{
rt_channel_t ch;
ch = fd_2_channel(fdt_type, fd);
if (ch)
{
@ -1152,6 +1208,7 @@ rt_err_t lwp_channel_reply(int fdt_type, int fd, rt_channel_msg_t data)
rt_err_t lwp_channel_recv_timeout(int fdt_type, int fd, rt_channel_msg_t data, rt_int32_t time)
{
rt_channel_t ch;
ch = fd_2_channel(fdt_type, fd);
if (ch)
{
@ -1215,44 +1272,45 @@ static int list_channel(void)
RT_DEBUG_NOT_IN_INTERRUPT;
const char* stat_strs[] = {"idle", "wait", "active"};
const char *stat_strs[] = {"idle", "wait", "active"};
information = rt_object_get_information(RT_Object_Class_Channel);
RT_ASSERT(information != RT_NULL);
count = 0;
rt_spin_lock(&ipc_list_lock);
rt_mutex_take(&_chn_obj_lock, RT_WAITING_FOREVER);
/* get the count of IPC channels */
for (node = information->object_list.next;
node != &(information->object_list);
node = node->next)
{
count ++;
count++;
}
rt_spin_unlock(&ipc_list_lock);
rt_mutex_release(&_chn_obj_lock);
if (count == 0) return 0;
if (count == 0)
return 0;
channels = (rt_channel_t *) rt_calloc(count, sizeof(rt_channel_t));
if (channels == RT_NULL) return 0; /* out of memory */
channels = (rt_channel_t *)rt_calloc(count, sizeof(rt_channel_t));
if (channels == RT_NULL)
return 0; /* out of memory */
index = 0;
rt_spin_lock(&ipc_list_lock);
rt_mutex_take(&_chn_obj_lock, RT_WAITING_FOREVER);
/* retrieve pointer of IPC channels */
for (node = information->object_list.next;
count > 0 && node != &(information->object_list);
count--, node = node->next)
for (index = 0, node = information->object_list.next;
index < count && node != &(information->object_list);
node = node->next)
{
object = rt_list_entry(node, struct rt_object, list);
channels[index] = (rt_channel_t)object;
index ++;
index++;
}
rt_spin_unlock(&ipc_list_lock);
rt_mutex_release(&_chn_obj_lock);
rt_kprintf(" channel state\n");
rt_kprintf("-------- -------\n");
for (index = 0; index < count; index ++)
for (index = 0; index < count; index++)
{
if (channels[index] != RT_NULL)
{
@ -1267,4 +1325,3 @@ static int list_channel(void)
return 0;
}
MSH_CMD_EXPORT(list_channel, list IPC channel information);

View File

@ -63,6 +63,8 @@ rt_err_t rt_raw_channel_recv(rt_channel_t ch, rt_channel_msg_t data);
rt_err_t rt_raw_channel_recv_timeout(rt_channel_t ch, rt_channel_msg_t data, rt_int32_t time);
rt_err_t rt_raw_channel_peek(rt_channel_t ch, rt_channel_msg_t data);
rt_err_t rt_channel_component_init(void);
#ifdef __cplusplus
}
#endif

View File

@ -1449,6 +1449,7 @@ struct rt_channel
{
struct rt_ipc_object parent; /**< inherit from object */
struct rt_thread *reply; /**< the thread will be reply */
struct rt_spinlock slock; /**< spinlock of this channel */
rt_list_t wait_msg; /**< the wait queue of sender msg */
rt_list_t wait_thread; /**< the wait queue of sender thread */
rt_wqueue_t reader_queue; /**< channel poll queue */