utest: smp_call: add smoke test cases

Signed-off-by: Shell <smokewood@qq.com>
This commit is contained in:
Shell 2024-10-30 19:45:49 +08:00 committed by Rbb666
parent 2f6d98bfcb
commit 81a35861fd
7 changed files with 564 additions and 79 deletions

View File

@ -1,6 +1,6 @@
menu "SMP Testcase" menu "SMP-Call Testcase"
config UTEST_SMP_CALL_FUNC config UTEST_SMP_CALL_FUNC
bool "Call random cpu to run func" bool "SMP-Call test cases"
default n default n
endmenu endmenu

View File

@ -5,8 +5,8 @@ cwd = GetCurrentDir()
src = [] src = []
CPPPATH = [cwd] CPPPATH = [cwd]
if GetDepend(['RT_USING_SMP','UTEST_SMP_CALL_FUNC']): if GetDepend(['RT_USING_SMP', 'UTEST_SMP_CALL_FUNC']):
src += ['smp.c'] src += Glob('smp*.c')
group = DefineGroup('utestcases', src, depend = ['RT_USING_UTESTCASES'], CPPPATH = CPPPATH) group = DefineGroup('utestcases', src, depend = ['RT_USING_UTESTCASES'], CPPPATH = CPPPATH)

View File

@ -1,75 +0,0 @@
#include <rtdevice.h>
#include "utest.h"
#include "utest_assert.h"
#include "smp.h"
static int pass_count = 0;
static int pass = 1000;
static struct rt_spinlock lock;
static void test_call(void *data)
{
rt_spin_lock(&lock);
int *i = (int *)data;
int id = rt_hw_cpu_id();
*i &= ~(1 << id);
if (*i == 0)
pass_count++;
rt_spin_unlock(&lock);
}
static void test1()
{
int cpu_mask = 0xf;
for (int i = 0; i < 1000; i++)
{
cpu_mask = rand() % 0xf;
if (cpu_mask == 0)
pass--;
rt_smp_call_any_cpu(cpu_mask,test_call, &cpu_mask, SMP_CALL_WAIT_ALL);
if (i % 20 == 0)
rt_kprintf("#");
}
rt_kprintf("\n");
uassert_true(pass_count == pass);
}
static void test_call2(void *data)
{
rt_spin_lock(&lock);
int a = 100000;
while (a--);
int *i = (int *)data;
(*i)++;
rt_spin_unlock(&lock);
}
static void test2(void)
{
int data = 0;
rt_smp_call_each_cpu(test_call2, &data, SMP_CALL_WAIT_ALL);
uassert_true(data == RT_CPUS_NR);
rt_thread_mdelay(10);
data = 0;
rt_smp_call_each_cpu(test_call2, &data, SMP_CALL_NO_WAIT);
uassert_true(data != RT_CPUS_NR);
}
static rt_err_t utest_tc_init(void)
{
pass_count = 0;
pass = 1000;
rt_spin_lock_init(&lock);
return RT_EOK;
}
static rt_err_t utest_tc_cleanup(void)
{
return RT_EOK;
}
static void testcase(void)
{
UTEST_UNIT_RUN(test1);
UTEST_UNIT_RUN(test2);
}
UTEST_TC_EXPORT(testcase, "testcase.smp.smp", utest_tc_init, utest_tc_cleanup, 10);

View File

@ -0,0 +1,90 @@
/*
* Copyright (c) 2006-2024 RT-Thread Development Team
*
* SPDX-License-Identifier: Apache-2.0
*
* Change Logs:
* Date Author Notes
* 2024/9/12 zhujiale the first version
* 2024/10/28 Shell Added more assertions
*/
#include <rtdevice.h>
#include <utest.h>
#include <utest_assert.h>
#include <smp.h>
#define TEST_COUNT 10000
static int pass_count = 0;
static RT_DEFINE_SPINLOCK(_test_data_lock);
static void _test_smp_cb(void *data)
{
int *maskp;
int oncpu;
if (!rt_hw_interrupt_is_disabled())
{
/* SYNC.004 */
uassert_true(0);
}
rt_spin_lock(&_test_data_lock);
oncpu = rt_hw_cpu_id();
maskp = (int *)data;
*maskp &= ~(1 << oncpu);
rt_spin_unlock(&_test_data_lock);
}
static void _blocking_call(void)
{
volatile int cpu_mask;
rt_ubase_t tested_cpus = 0;
for (int i = 0; i < TEST_COUNT; i++)
{
cpu_mask = rand() % RT_ALL_CPU;
tested_cpus |= cpu_mask;
rt_smp_call_cpu_mask(cpu_mask, _test_smp_cb, (void *)&cpu_mask, SMP_CALL_WAIT_ALL);
if (!cpu_mask)
{
pass_count++;
}
else
{
/* TARG.001, MP.001 */
uassert_true(0);
break;
}
}
LOG_D("pass_count %d", pass_count);
/* TARG.001 */
uassert_true(pass_count == TEST_COUNT);
/* TOP.001, TOP.002 */
uassert_true(tested_cpus == RT_ALL_CPU);
}
static rt_err_t utest_tc_init(void)
{
pass_count = 0;
srand(rt_tick_get());
return RT_EOK;
}
static rt_err_t utest_tc_cleanup(void)
{
return RT_EOK;
}
static void _testcase(void)
{
UTEST_UNIT_RUN(_blocking_call);
}
UTEST_TC_EXPORT(_testcase, "testcase.smp.smoke.001", utest_tc_init, utest_tc_cleanup, 10);

View File

@ -0,0 +1,132 @@
/*
* Copyright (c) 2006-2024 RT-Thread Development Team
*
* SPDX-License-Identifier: Apache-2.0
*
* Change Logs:
* Date Author Notes
* 2024/10/28 Shell Added smp.smoke
*/
#include <rtdevice.h>
#include <utest.h>
#include <utest_assert.h>
#include <smp.h>
#define PERCPU_TEST_COUNT 10000
#define NEWLINE_ON 80
static struct rt_semaphore _utestd_exited;
static rt_thread_t _utestd[RT_CPUS_NR];
static rt_atomic_t _entry_counts[RT_CPUS_NR];
static void _logging_progress(void)
{
static rt_atomic_t counts;
rt_ubase_t old;
rt_kputs("#");
old = rt_atomic_add(&counts, 1);
if (old % NEWLINE_ON == 0)
{
rt_kputs("\n");
}
}
static void _test_smp_cb(void *param)
{
rt_ubase_t req_cpuid = (rt_ubase_t)param;
if (!rt_hw_interrupt_is_disabled())
{
/* SYNC.004 */
uassert_true(0);
}
_logging_progress();
rt_atomic_add(&_entry_counts[req_cpuid], 1);
}
static void _utestd_entry(void *oncpu_param)
{
rt_ubase_t oncpu = (rt_ubase_t)oncpu_param;
volatile int cpu_mask;
volatile int popcount = 0;
rt_ubase_t tested_cpus = 0;
if (rt_hw_cpu_id() != oncpu)
{
/* SYNC.004 */
uassert_true(0);
}
for (size_t i = 0; i < PERCPU_TEST_COUNT; i++)
{
cpu_mask = rand() % RT_ALL_CPU;
tested_cpus |= cpu_mask;
rt_smp_call_cpu_mask(cpu_mask, _test_smp_cb, oncpu_param, SMP_CALL_WAIT_ALL);
popcount += __builtin_popcount(cpu_mask);
}
LOG_D("popcount %d, _entry_counts[%d] %d", popcount, oncpu, _entry_counts[oncpu]);
/* TARG.001 */
uassert_true(popcount == rt_atomic_load(&_entry_counts[oncpu]));
/* TOP.001, TOP.002 */
uassert_true(tested_cpus == RT_ALL_CPU);
rt_sem_release(&_utestd_exited);
}
static void _blocking_mtsafe_call(void)
{
rt_err_t error;
for (size_t i = 0; i < RT_CPUS_NR; i++)
{
error = rt_thread_startup(_utestd[i]);
/* SYNC.001, SYNC.002, SYNC.003 */
uassert_true(!error);
}
for (size_t i = 0; i < RT_CPUS_NR; i++)
{
rt_sem_take(&_utestd_exited, RT_WAITING_FOREVER);
}
}
static rt_err_t utest_tc_init(void)
{
for (size_t i = 0; i < RT_CPUS_NR; i++)
{
rt_atomic_store(&_entry_counts[i], 0);
_utestd[i] = rt_thread_create("utestd", _utestd_entry, (void *)i,
UTEST_THR_STACK_SIZE, UTEST_THR_PRIORITY,
20);
rt_thread_control(_utestd[i], RT_THREAD_CTRL_BIND_CPU, (void *)i);
/* SYNC.001, SYNC.002, SYNC.003 */
uassert_true(_utestd[i] != RT_NULL);
}
rt_sem_init(&_utestd_exited, "utestd", 0, RT_IPC_FLAG_PRIO);
srand(rt_tick_get());
return RT_EOK;
}
static rt_err_t utest_tc_cleanup(void)
{
rt_sem_detach(&_utestd_exited);
return RT_EOK;
}
static void _testcase(void)
{
UTEST_UNIT_RUN(_blocking_mtsafe_call);
}
UTEST_TC_EXPORT(_testcase, "testcase.smp.smoke.002", utest_tc_init, utest_tc_cleanup, 10);

View File

@ -0,0 +1,128 @@
/*
* Copyright (c) 2006-2024 RT-Thread Development Team
*
* SPDX-License-Identifier: Apache-2.0
*
* Change Logs:
* Date Author Notes
* 2024/10/28 Shell Added smp.smoke
*/
#include <rtdevice.h>
#include <utest.h>
#include <utest_assert.h>
#include <smp.h>
#define PERCPU_TEST_COUNT 10000
#define NEWLINE_ON 80
static struct rt_semaphore _utestd_exited;
static rt_thread_t _utestd[RT_CPUS_NR];
static rt_atomic_t _entry_counts[RT_CPUS_NR];
static void _logging_progress(void)
{
static rt_atomic_t counts;
rt_ubase_t old;
rt_kputs("#");
old = rt_atomic_add(&counts, 1);
if (old % NEWLINE_ON == 0)
{
rt_kputs("\n");
}
}
static void _test_smp_cb(void *param)
{
rt_ubase_t req_cpuid = (rt_ubase_t)param;
if (!rt_hw_interrupt_is_disabled())
{
/* SYNC.004 */
uassert_true(0);
}
_logging_progress();
rt_atomic_add(&_entry_counts[req_cpuid], 1);
}
static void _utestd_entry(void *oncpu_param)
{
rt_ubase_t oncpu = (rt_ubase_t)oncpu_param;
volatile int cpu_mask;
volatile int popcount = 0;
rt_thread_t curthr = rt_thread_self();
if (rt_hw_cpu_id() != oncpu)
{
/* SYNC.004 */
uassert_true(0);
}
for (size_t i = 0; i < PERCPU_TEST_COUNT; i++)
{
cpu_mask = rand() % RT_ALL_CPU;
rt_smp_call_cpu_mask(cpu_mask, _test_smp_cb, oncpu_param, 0);
popcount += __builtin_popcount(cpu_mask);
}
for (size_t i = 0; i < RT_CPUS_NR; i++)
{
rt_thread_control(curthr, RT_THREAD_CTRL_BIND_CPU, (void *)i);
}
LOG_D("popcount %d, _entry_counts[%d] %d", popcount, oncpu, _entry_counts[oncpu]);
/* MP.002 */
uassert_true(popcount == rt_atomic_load(&_entry_counts[oncpu]));
rt_sem_release(&_utestd_exited);
}
static void _async_call(void)
{
for (size_t i = 0; i < RT_CPUS_NR; i++)
{
rt_thread_startup(_utestd[i]);
}
for (size_t i = 0; i < RT_CPUS_NR; i++)
{
rt_sem_take(&_utestd_exited, RT_WAITING_FOREVER);
}
}
static rt_err_t utest_tc_init(void)
{
for (size_t i = 0; i < RT_CPUS_NR; i++)
{
rt_atomic_store(&_entry_counts[i], 0);
_utestd[i] = rt_thread_create("utestd", _utestd_entry, (void *)i,
UTEST_THR_STACK_SIZE, UTEST_THR_PRIORITY,
20);
rt_thread_control(_utestd[i], RT_THREAD_CTRL_BIND_CPU, (void *)i);
/* SYNC.001, SYNC.002, SYNC.003 */
uassert_true(_utestd[i] != RT_NULL);
}
rt_sem_init(&_utestd_exited, "utestd", 0, RT_IPC_FLAG_PRIO);
srand(rt_tick_get());
return RT_EOK;
}
static rt_err_t utest_tc_cleanup(void)
{
rt_sem_detach(&_utestd_exited);
return RT_EOK;
}
static void _testcase(void)
{
UTEST_UNIT_RUN(_async_call);
}
UTEST_TC_EXPORT(_testcase, "testcase.smp.smoke.003", utest_tc_init, utest_tc_cleanup, 10);

View File

@ -0,0 +1,210 @@
/*
* Copyright (c) 2006-2024 RT-Thread Development Team
*
* SPDX-License-Identifier: Apache-2.0
*
* Change Logs:
* Date Author Notes
* 2024/10/28 Shell Added smp.smoke
*/
#include <rtdevice.h>
#include <utest.h>
#include <utest_assert.h>
#include <smp.h>
#define PERCPU_TEST_COUNT 10000
#define NEWLINE_ON 80
#define MAX_RETRIES (RT_TICK_PER_SECOND)
static struct rt_semaphore _utestd_exited;
static rt_thread_t _utestd[RT_CPUS_NR];
static rt_atomic_t _entry_counts[RT_CPUS_NR];
static struct rt_smp_call_req _callreq_data[RT_CPUS_NR][RT_CPUS_NR];
static rt_ubase_t _masks_data[RT_CPUS_NR];
static RT_DEFINE_SPINLOCK(_test_data_lock);
static void _logging_progress(char id)
{
static rt_atomic_t counts;
rt_ubase_t old;
rt_kprintf("%c", id);
old = rt_atomic_add(&counts, 1);
if (old % NEWLINE_ON == 0)
{
rt_kputs("\n");
}
}
static void _reentr_isr_cb(void *param)
{
rt_ubase_t *maskp;
int oncpu;
if (!rt_hw_interrupt_is_disabled())
{
/* SYNC.004 */
uassert_true(0);
}
rt_spin_lock(&_test_data_lock);
oncpu = rt_hw_cpu_id();
maskp = (rt_ubase_t *)param;
*maskp |= (1 << oncpu);
rt_spin_unlock(&_test_data_lock);
_logging_progress('0' + (maskp - _masks_data));
}
static void _test_smp_call_isr(void *param)
{
rt_err_t error;
rt_ubase_t iter, oncpu = (rt_ubase_t)param;
struct rt_smp_call_req *callreqp = _callreq_data[oncpu];
if (rt_hw_cpu_id() != oncpu)
{
/* SYNC.004 */
uassert_true(0);
}
if (!rt_hw_interrupt_is_disabled())
{
/* SYNC.004, PRIV.001 */
uassert_true(0);
}
rt_smp_for_each_remote_cpu(iter, oncpu)
{
error = rt_smp_call_request(iter, SMP_CALL_NO_LOCAL, &callreqp[iter]);
if (error)
{
/* SYNC.002 */
uassert_false(error);
}
}
}
static rt_ubase_t _wait_for_update(rt_ubase_t *maskp, rt_ubase_t exp, int cpuid, rt_thread_t curthr)
{
rt_ubase_t level, current_mask;
for (size_t i = cpuid; i < RT_CPUS_NR; i++)
{
rt_thread_control(curthr, RT_THREAD_CTRL_BIND_CPU, (void *)(i % RT_CPUS_NR));
}
for (size_t i = 0; i < MAX_RETRIES; i++)
{
level = rt_spin_lock_irqsave(&_test_data_lock);
current_mask = *maskp;
rt_spin_unlock_irqrestore(&_test_data_lock, level);
if (current_mask == exp)
{
break;
}
rt_thread_delay(1);
}
return current_mask;
}
static void _utestd_entry(void *oncpu_param)
{
rt_thread_t curthr = rt_thread_self();
rt_ubase_t oncpu = (rt_ubase_t)oncpu_param;
rt_ubase_t worker_id = (oncpu + 1) % RT_CPUS_NR;
int cpu_mask = 1ul << worker_id;
rt_ubase_t req_cpus_mask = ~cpu_mask & RT_ALL_CPU;
rt_ubase_t *mask_data = &_masks_data[worker_id];
rt_ubase_t current_mask;
rt_ubase_t level;
for (size_t i = 0; i < PERCPU_TEST_COUNT; i++)
{
rt_smp_call_cpu_mask(cpu_mask, _test_smp_call_isr, (void *)worker_id, 0);
current_mask = _wait_for_update(mask_data, req_cpus_mask, worker_id, curthr);
if (current_mask != req_cpus_mask)
{
LOG_I("current mask 0x%x, last fetch 0x%x", *mask_data, current_mask);
/* MP.002, TARG.001 */
uassert_true(0);
break;
}
else
{
rt_ubase_t iter;
level = rt_spin_lock_irqsave(&_test_data_lock);
*mask_data = 0;
rt_spin_unlock_irqrestore(&_test_data_lock, level);
rt_smp_for_each_remote_cpu(iter, worker_id)
{
rt_smp_request_wait_freed(&_callreq_data[worker_id][iter]);
}
}
}
rt_sem_release(&_utestd_exited);
}
static void _test_reentr_isr_main(void)
{
for (size_t i = 0; i < RT_CPUS_NR; i++)
{
rt_thread_startup(_utestd[i]);
}
for (size_t i = 0; i < RT_CPUS_NR; i++)
{
rt_sem_take(&_utestd_exited, RT_WAITING_FOREVER);
}
}
static rt_err_t utest_tc_init(void)
{
size_t iter_x, iter_y;
rt_smp_for_each_cpu(iter_x)
{
rt_smp_for_each_cpu(iter_y)
{
rt_smp_call_req_init(&_callreq_data[iter_x][iter_y],
_reentr_isr_cb, &_masks_data[iter_x]);
}
}
for (size_t i = 0; i < RT_CPUS_NR; i++)
{
_masks_data[i] = 0;
rt_atomic_store(&_entry_counts[i], 0);
_utestd[i] = rt_thread_create("utestd", _utestd_entry, (void *)i,
UTEST_THR_STACK_SIZE, UTEST_THR_PRIORITY + 1,
20);
rt_thread_control(_utestd[i], RT_THREAD_CTRL_BIND_CPU, (void *)i);
uassert_true(_utestd[i] != RT_NULL);
}
rt_sem_init(&_utestd_exited, "utestd", 0, RT_IPC_FLAG_PRIO);
srand(rt_tick_get());
return RT_EOK;
}
static rt_err_t utest_tc_cleanup(void)
{
rt_sem_detach(&_utestd_exited);
return RT_EOK;
}
static void _testcase(void)
{
UTEST_UNIT_RUN(_test_reentr_isr_main);
}
UTEST_TC_EXPORT(_testcase, "testcase.smp.smoke.004", utest_tc_init, utest_tc_cleanup, 10);