144 lines
4.0 KiB
C
Raw Normal View History

2024-09-12 11:54:50 +08:00
/*
* Copyright (c) 2006-2024 RT-Thread Development Team
*
* SPDX-License-Identifier: Apache-2.0
*
* Change Logs:
* Date Author Notes
* 2024/9/12 zhujiale the first version
*/
2024-08-29 10:12:47 +08:00
#include "smp.h"
2024-08-29 16:21:19 +08:00
#define DBG_TAG "SMP"
#define DBG_LVL DBG_INFO
#include <rtdbg.h>
2024-09-13 15:41:43 +08:00
static struct rt_smp_call rt_smp_work[RT_CPUS_NR];
static rt_atomic_t rt_smp_wait;
static rt_err_t smp_call_handler(struct rt_smp_event *event)
2024-08-29 10:12:47 +08:00
{
2024-09-12 18:25:10 +08:00
switch (event->event_id)
2024-08-29 10:12:47 +08:00
{
case SMP_CALL_EVENT_FUNC:
event->func(event->data);
2024-09-12 18:25:10 +08:00
rt_atomic_add(&rt_smp_wait, 1);
2024-08-29 10:12:47 +08:00
break;
default:
2024-09-12 11:54:50 +08:00
LOG_E("error event id\n");
2024-09-12 13:23:38 +08:00
return -RT_ERROR;
2024-08-29 10:12:47 +08:00
}
return RT_EOK;
}
void rt_smp_call_ipi_handler(int vector, void *param)
{
int cur_cpu = rt_hw_cpu_id();
2024-09-13 09:32:59 +08:00
rt_spin_lock(&rt_smp_work[cur_cpu].lock);
2024-09-12 18:25:10 +08:00
if (rt_smp_work[cur_cpu].event.event_id)
2024-08-29 10:12:47 +08:00
{
if (smp_call_handler(&rt_smp_work[cur_cpu].event) != RT_EOK)
2024-08-29 10:12:47 +08:00
{
2024-08-29 16:21:19 +08:00
LOG_E("Have no event\n");
2024-08-29 10:12:47 +08:00
}
2024-09-12 18:25:10 +08:00
rt_memset(&rt_smp_work[cur_cpu].event, 0, sizeof(struct rt_smp_event));
2024-08-29 10:12:47 +08:00
}
2024-09-12 18:25:10 +08:00
rt_spin_unlock(&rt_smp_work[cur_cpu].lock);
2024-08-29 10:12:47 +08:00
}
2024-09-12 11:54:50 +08:00
/**
* @brief call function on specified CPU ,
*
* @param cpu_mask cpu mask for call
* @param func the function pointer
* @param data the data pointer
* @param flag call flag if you set SMP_CALL_WAIT_ALL
* then it will wait all cpu call finish and return
* else it will call function on specified CPU and return immediately
* @param cond the condition function pointer,if you set it then it will call function only when cond return true
*/
2024-09-12 18:30:34 +08:00
void rt_smp_call_func_cond(int cpu_mask, rt_smp_call_func_back func, void *data, rt_uint8_t flag, rt_smp_cond cond)
2024-08-29 10:12:47 +08:00
{
2024-08-29 16:21:19 +08:00
RT_DEBUG_NOT_IN_INTERRUPT;
2024-09-12 18:25:10 +08:00
struct rt_smp_event event;
rt_bool_t need_call = RT_TRUE, need_wait = RT_FALSE;
int cur_cpu = rt_hw_cpu_id();
int cpuid = 1 << cur_cpu;
int tmp_id = 0, cpu_nr = 0;
int tmp_mask;
2024-09-13 18:01:40 +08:00
int irq_flag;
2024-08-29 10:12:47 +08:00
2024-09-12 18:25:10 +08:00
if (flag == SMP_CALL_WAIT_ALL)
2024-09-12 11:54:50 +08:00
{
need_wait = RT_TRUE;
2024-09-12 18:25:10 +08:00
rt_atomic_store(&rt_smp_wait, 0);
2024-09-12 11:54:50 +08:00
}
2024-09-12 18:25:10 +08:00
if (cpuid & cpu_mask)
2024-08-29 16:21:19 +08:00
{
2024-08-29 10:12:47 +08:00
func(data);
cpu_mask = cpu_mask & (~cpuid);
2024-08-29 16:21:19 +08:00
}
2024-09-12 18:25:10 +08:00
if (!cpu_mask)
2024-09-12 11:54:50 +08:00
need_call = RT_FALSE;
2024-08-29 10:12:47 +08:00
2024-08-29 16:21:19 +08:00
tmp_mask = cpu_mask;
2024-09-12 18:25:10 +08:00
if (need_call)
2024-08-29 10:12:47 +08:00
{
2024-09-12 18:25:10 +08:00
while (tmp_mask)
2024-08-29 10:12:47 +08:00
{
2024-09-12 18:25:10 +08:00
if ((tmp_mask & 1) && (tmp_id < RT_CPUS_NR))
2024-08-29 10:12:47 +08:00
{
2024-09-12 18:25:10 +08:00
if (cond && !cond(tmp_id, data))
2024-09-12 11:54:50 +08:00
continue;
cpu_nr++;
2024-08-29 16:21:19 +08:00
event.event_id = SMP_CALL_EVENT_FUNC;
2024-09-12 18:25:10 +08:00
event.func = func;
event.data = data;
2024-08-29 16:21:19 +08:00
event.cpu_mask = cpu_mask;
2024-09-13 18:01:40 +08:00
irq_flag = rt_spin_lock_irqsave(&rt_smp_work[tmp_id].lock);
2024-09-12 18:25:10 +08:00
rt_smp_work[tmp_id].event = event;
2024-09-13 18:01:40 +08:00
rt_spin_unlock_irqrestore(&rt_smp_work[tmp_id].lock,irq_flag);
2024-08-29 10:12:47 +08:00
}
tmp_id++;
tmp_mask = tmp_mask >> 1;
}
2024-09-12 13:12:35 +08:00
rt_hw_ipi_send(RT_FUNC_IPI, cpu_mask);
2024-08-29 10:12:47 +08:00
}
2024-09-12 11:54:50 +08:00
2024-09-12 18:25:10 +08:00
if (need_wait)
2024-09-12 11:54:50 +08:00
{
2024-09-12 18:25:10 +08:00
while (rt_atomic_load(&rt_smp_wait) != cpu_nr);
2024-09-12 11:54:50 +08:00
}
}
2024-09-12 18:30:34 +08:00
void rt_smp_call_each_cpu(rt_smp_call_func_back func, void *data, rt_uint8_t flag)
2024-09-12 11:54:50 +08:00
{
2024-09-12 18:25:10 +08:00
rt_smp_call_func_cond(RT_ALL_CPU, func, data, flag, RT_NULL);
2024-09-12 11:54:50 +08:00
}
2024-09-12 18:30:34 +08:00
void rt_smp_call_each_cpu_cond(rt_smp_call_func_back func, void *data, rt_uint8_t flag, rt_smp_cond cond_func)
2024-09-12 11:54:50 +08:00
{
2024-09-12 18:25:10 +08:00
rt_smp_call_func_cond(RT_ALL_CPU, func, data, flag, cond_func);
2024-09-12 11:54:50 +08:00
}
2024-09-12 18:30:34 +08:00
void rt_smp_call_any_cpu(int cpu_mask, rt_smp_call_func_back func, void *data, rt_uint8_t flag)
2024-09-12 11:54:50 +08:00
{
2024-09-12 18:25:10 +08:00
rt_smp_call_func_cond(cpu_mask, func, data, flag, RT_NULL);
2024-08-29 10:12:47 +08:00
}
2024-09-12 11:54:50 +08:00
2024-09-12 18:30:34 +08:00
void rt_smp_call_any_cpu_cond(int cpu_mask, rt_smp_call_func_back func, void *data, rt_uint8_t flag, rt_smp_cond cond_func)
2024-09-12 11:54:50 +08:00
{
2024-09-12 18:25:10 +08:00
rt_smp_call_func_cond(cpu_mask, func, data, flag, cond_func);
2024-09-12 11:54:50 +08:00
}
2024-09-12 18:25:10 +08:00
void rt_smp_init(void)
2024-08-29 10:12:47 +08:00
{
2024-09-12 18:25:10 +08:00
for (int i = 0; i < RT_CPUS_NR; i++)
2024-09-12 11:54:50 +08:00
{
2024-09-12 18:25:10 +08:00
rt_memset(&rt_smp_work[i], 0, sizeof(struct rt_smp_call));
rt_spin_lock_init(&rt_smp_work[i].lock);
2024-09-12 11:54:50 +08:00
}
2024-08-29 10:12:47 +08:00
}