✨ feat: spinlock should lock sched (#8360)
This commit is contained in:
parent
a01b66d57b
commit
e31fa93423
|
@ -20,6 +20,7 @@
|
|||
* 2023-05-20 Bernard add rtatomic.h header file to included files.
|
||||
* 2023-06-30 ChuShicheng move debug check from the rtdebug.h
|
||||
* 2023-10-16 Shell Support a new backtrace framework
|
||||
* 2023-12-10 xqyjlj fix spinlock in up
|
||||
*/
|
||||
|
||||
#ifndef __RT_THREAD_H__
|
||||
|
@ -553,11 +554,34 @@ void rt_spin_unlock(struct rt_spinlock *lock);
|
|||
rt_base_t rt_spin_lock_irqsave(struct rt_spinlock *lock);
|
||||
void rt_spin_unlock_irqrestore(struct rt_spinlock *lock, rt_base_t level);
|
||||
#else
|
||||
#define rt_spin_lock_init(lock) do {RT_UNUSED(lock);} while (0)
|
||||
#define rt_spin_lock(lock) do {RT_UNUSED(lock);} while (0)
|
||||
#define rt_spin_unlock(lock) do {RT_UNUSED(lock);} while (0)
|
||||
rt_inline rt_base_t rt_spin_lock_irqsave(struct rt_spinlock *lock) {RT_UNUSED(lock);return rt_hw_interrupt_disable();}
|
||||
#define rt_spin_unlock_irqrestore(lock, level) do {RT_UNUSED(lock); rt_hw_interrupt_enable(level);} while (0)
|
||||
|
||||
rt_inline void rt_spin_lock_init(struct rt_spinlock *lock)
|
||||
{
|
||||
RT_UNUSED(lock);
|
||||
}
|
||||
rt_inline void rt_spin_lock(struct rt_spinlock *lock)
|
||||
{
|
||||
RT_UNUSED(lock);
|
||||
rt_enter_critical();
|
||||
}
|
||||
rt_inline void rt_spin_unlock(struct rt_spinlock *lock)
|
||||
{
|
||||
RT_UNUSED(lock);
|
||||
rt_exit_critical();
|
||||
}
|
||||
rt_inline rt_base_t rt_spin_lock_irqsave(struct rt_spinlock *lock)
|
||||
{
|
||||
rt_base_t level;
|
||||
RT_UNUSED(lock);
|
||||
level = rt_hw_interrupt_disable();
|
||||
return level;
|
||||
}
|
||||
rt_inline void rt_spin_unlock_irqrestore(struct rt_spinlock *lock, rt_base_t level)
|
||||
{
|
||||
RT_UNUSED(lock);
|
||||
rt_hw_interrupt_enable(level);
|
||||
}
|
||||
|
||||
#endif /* RT_USING_SMP */
|
||||
|
||||
/**@}*/
|
||||
|
|
|
@ -169,6 +169,10 @@ RTM_EXPORT(rt_tick_from_millisecond);
|
|||
*/
|
||||
rt_weak rt_tick_t rt_tick_get_millisecond(void)
|
||||
{
|
||||
#if RT_TICK_PER_SECOND == 0 // make cppcheck happy
|
||||
#error "RT_TICK_PER_SECOND must be greater than zero"
|
||||
#endif
|
||||
|
||||
#if 1000 % RT_TICK_PER_SECOND == 0u
|
||||
return rt_tick_get() * (1000u / RT_TICK_PER_SECOND);
|
||||
#else
|
||||
|
|
15
src/cpu.c
15
src/cpu.c
|
@ -7,6 +7,7 @@
|
|||
* Date Author Notes
|
||||
* 2018-10-30 Bernard The first version
|
||||
* 2023-09-15 xqyjlj perf rt_hw_interrupt_disable/enable
|
||||
* 2023-12-10 xqyjlj spinlock should lock sched
|
||||
*/
|
||||
#include <rthw.h>
|
||||
#include <rtthread.h>
|
||||
|
@ -44,7 +45,7 @@ void rt_spin_lock_init(struct rt_spinlock *lock)
|
|||
RTM_EXPORT(rt_spin_lock_init)
|
||||
|
||||
/**
|
||||
* @brief This function will lock the spinlock.
|
||||
* @brief This function will lock the spinlock, will lock the thread scheduler.
|
||||
*
|
||||
* @note If the spinlock is locked, the current CPU will keep polling the spinlock state
|
||||
* until the spinlock is unlocked.
|
||||
|
@ -53,6 +54,7 @@ RTM_EXPORT(rt_spin_lock_init)
|
|||
*/
|
||||
void rt_spin_lock(struct rt_spinlock *lock)
|
||||
{
|
||||
rt_enter_critical();
|
||||
rt_hw_spin_lock(&lock->lock);
|
||||
#if defined(RT_DEBUGING_SPINLOCK)
|
||||
if (rt_cpu_self() != RT_NULL)
|
||||
|
@ -65,7 +67,7 @@ void rt_spin_lock(struct rt_spinlock *lock)
|
|||
RTM_EXPORT(rt_spin_lock)
|
||||
|
||||
/**
|
||||
* @brief This function will unlock the spinlock.
|
||||
* @brief This function will unlock the spinlock, will unlock the thread scheduler.
|
||||
*
|
||||
* @param lock is a pointer to the spinlock.
|
||||
*/
|
||||
|
@ -76,11 +78,12 @@ void rt_spin_unlock(struct rt_spinlock *lock)
|
|||
lock->owner = __OWNER_MAGIC;
|
||||
lock->pc = RT_NULL;
|
||||
#endif /* RT_DEBUGING_SPINLOCK */
|
||||
rt_exit_critical();
|
||||
}
|
||||
RTM_EXPORT(rt_spin_unlock)
|
||||
|
||||
/**
|
||||
* @brief This function will disable the local interrupt and then lock the spinlock.
|
||||
* @brief This function will disable the local interrupt and then lock the spinlock, will lock the thread scheduler.
|
||||
*
|
||||
* @note If the spinlock is locked, the current CPU will keep polling the spinlock state
|
||||
* until the spinlock is unlocked.
|
||||
|
@ -94,6 +97,7 @@ rt_base_t rt_spin_lock_irqsave(struct rt_spinlock *lock)
|
|||
unsigned long level;
|
||||
|
||||
level = rt_hw_local_irq_disable();
|
||||
rt_enter_critical();
|
||||
rt_hw_spin_lock(&lock->lock);
|
||||
#if defined(RT_DEBUGING_SPINLOCK)
|
||||
if (rt_cpu_self() != RT_NULL)
|
||||
|
@ -107,7 +111,7 @@ rt_base_t rt_spin_lock_irqsave(struct rt_spinlock *lock)
|
|||
RTM_EXPORT(rt_spin_lock_irqsave)
|
||||
|
||||
/**
|
||||
* @brief This function will unlock the spinlock and then restore current cpu interrupt status.
|
||||
* @brief This function will unlock the spinlock and then restore current cpu interrupt status, will unlock the thread scheduler.
|
||||
*
|
||||
* @param lock is a pointer to the spinlock.
|
||||
*
|
||||
|
@ -121,6 +125,7 @@ void rt_spin_unlock_irqrestore(struct rt_spinlock *lock, rt_base_t level)
|
|||
#endif /* RT_DEBUGING_SPINLOCK */
|
||||
rt_hw_spin_unlock(&lock->lock);
|
||||
rt_hw_local_irq_enable(level);
|
||||
rt_exit_critical();
|
||||
}
|
||||
RTM_EXPORT(rt_spin_unlock_irqrestore)
|
||||
|
||||
|
@ -221,7 +226,7 @@ void rt_cpus_lock_status_restore(struct rt_thread *thread)
|
|||
#endif
|
||||
if (pcpu->current_thread != RT_NULL )
|
||||
{
|
||||
rt_spin_unlock(&(pcpu->current_thread->spinlock));
|
||||
rt_hw_spin_unlock(&(pcpu->current_thread->spinlock.lock));
|
||||
if ((pcpu->current_thread->stat & RT_THREAD_STAT_MASK) == RT_THREAD_RUNNING)
|
||||
{
|
||||
rt_schedule_insert_thread(pcpu->current_thread);
|
||||
|
|
|
@ -30,6 +30,7 @@
|
|||
* 2022-01-07 Gabriel Moving __on_rt_xxxxx_hook to scheduler.c
|
||||
* 2023-03-27 rose_man Split into scheduler upc and scheduler_mp.c
|
||||
* 2023-09-15 xqyjlj perf rt_hw_interrupt_disable/enable
|
||||
* 2023-12-10 xqyjlj use rt_hw_spinlock
|
||||
*/
|
||||
|
||||
#include <rtthread.h>
|
||||
|
@ -40,7 +41,7 @@
|
|||
#include <rtdbg.h>
|
||||
|
||||
rt_list_t rt_thread_priority_table[RT_THREAD_PRIORITY_MAX];
|
||||
static RT_DEFINE_SPINLOCK(_spinlock);
|
||||
static rt_hw_spinlock_t _mp_scheduler_spinlock;
|
||||
rt_uint32_t rt_thread_ready_priority_group;
|
||||
#if RT_THREAD_PRIORITY_MAX > 32
|
||||
/* Maximum priority level, 256 */
|
||||
|
@ -122,7 +123,7 @@ static void _scheduler_stack_check(struct rt_thread *thread)
|
|||
rt_kprintf("thread:%s stack overflow\n", thread->parent.name);
|
||||
|
||||
level = rt_hw_local_irq_disable();
|
||||
rt_spin_lock(&_spinlock);
|
||||
rt_hw_spin_lock(&_mp_scheduler_spinlock);
|
||||
while (level);
|
||||
}
|
||||
#endif
|
||||
|
@ -200,6 +201,8 @@ void rt_system_scheduler_init(void)
|
|||
LOG_D("start scheduler: max priority 0x%02x",
|
||||
RT_THREAD_PRIORITY_MAX);
|
||||
|
||||
rt_hw_spin_lock_init(&_mp_scheduler_spinlock);
|
||||
|
||||
for (offset = 0; offset < RT_THREAD_PRIORITY_MAX; offset ++)
|
||||
{
|
||||
rt_list_init(&rt_thread_priority_table[offset]);
|
||||
|
@ -266,14 +269,14 @@ static void _rt_schedule_insert_thread(struct rt_thread *thread, rt_bool_t is_lo
|
|||
/* disable interrupt */
|
||||
if(is_lock)
|
||||
{
|
||||
rt_spin_lock(&(thread->spinlock));
|
||||
rt_hw_spin_lock(&(thread->spinlock.lock));
|
||||
}
|
||||
|
||||
if ((thread->stat & RT_THREAD_STAT_MASK) == RT_THREAD_READY)
|
||||
{
|
||||
if(is_lock)
|
||||
{
|
||||
rt_spin_unlock(&(thread->spinlock));
|
||||
rt_hw_spin_unlock(&(thread->spinlock.lock));
|
||||
}
|
||||
return;
|
||||
}
|
||||
|
@ -284,7 +287,7 @@ static void _rt_schedule_insert_thread(struct rt_thread *thread, rt_bool_t is_lo
|
|||
thread->stat = RT_THREAD_RUNNING | (thread->stat & ~RT_THREAD_STAT_MASK);
|
||||
if(is_lock)
|
||||
{
|
||||
rt_spin_unlock(&(thread->spinlock));
|
||||
rt_hw_spin_unlock(&(thread->spinlock.lock));
|
||||
}
|
||||
return;
|
||||
}
|
||||
|
@ -317,7 +320,7 @@ static void _rt_schedule_insert_thread(struct rt_thread *thread, rt_bool_t is_lo
|
|||
}
|
||||
if(is_lock)
|
||||
{
|
||||
rt_spin_unlock(&(thread->spinlock));
|
||||
rt_hw_spin_unlock(&(thread->spinlock.lock));
|
||||
}
|
||||
|
||||
cpu_mask = RT_CPU_MASK ^ (1 << cpu_id);
|
||||
|
@ -329,7 +332,7 @@ static void _rt_schedule_insert_thread(struct rt_thread *thread, rt_bool_t is_lo
|
|||
|
||||
if(is_lock)
|
||||
{
|
||||
rt_spin_lock(&(pcpu->spinlock));
|
||||
rt_hw_spin_lock(&(pcpu->spinlock.lock));
|
||||
}
|
||||
#if RT_THREAD_PRIORITY_MAX > 32
|
||||
pcpu->ready_table[thread->number] |= thread->high_mask;
|
||||
|
@ -351,8 +354,8 @@ static void _rt_schedule_insert_thread(struct rt_thread *thread, rt_bool_t is_lo
|
|||
|
||||
if(is_lock)
|
||||
{
|
||||
rt_spin_unlock(&(pcpu->spinlock));
|
||||
rt_spin_unlock(&(thread->spinlock));
|
||||
rt_hw_spin_unlock(&(pcpu->spinlock.lock));
|
||||
rt_hw_spin_unlock(&(thread->spinlock.lock));
|
||||
}
|
||||
|
||||
if (cpu_id != bind_cpu)
|
||||
|
@ -397,7 +400,7 @@ static void _rt_schedule_remove_thread(struct rt_thread *thread, rt_bool_t is_lo
|
|||
struct rt_cpu *pcpu = rt_cpu_index(thread->bind_cpu);
|
||||
if(is_lock)
|
||||
{
|
||||
rt_spin_lock(&(pcpu->spinlock));
|
||||
rt_hw_spin_lock(&(pcpu->spinlock.lock));
|
||||
}
|
||||
if (rt_list_isempty(&(pcpu->priority_table[thread->current_priority])))
|
||||
{
|
||||
|
@ -413,7 +416,7 @@ static void _rt_schedule_remove_thread(struct rt_thread *thread, rt_bool_t is_lo
|
|||
}
|
||||
if(is_lock)
|
||||
{
|
||||
rt_spin_unlock(&(pcpu->spinlock));
|
||||
rt_hw_spin_unlock(&(pcpu->spinlock.lock));
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -428,17 +431,17 @@ void rt_system_scheduler_start(void)
|
|||
rt_ubase_t highest_ready_priority;
|
||||
|
||||
rt_hw_local_irq_disable();
|
||||
rt_spin_lock(&_spinlock);
|
||||
rt_hw_spin_lock(&_mp_scheduler_spinlock);
|
||||
|
||||
to_thread = _scheduler_get_highest_priority_thread(&highest_ready_priority);
|
||||
rt_spin_lock(&to_thread->spinlock);
|
||||
rt_hw_spin_lock(&(to_thread->spinlock.lock));
|
||||
to_thread->oncpu = rt_hw_cpu_id();
|
||||
|
||||
_rt_schedule_remove_thread(to_thread, RT_TRUE);
|
||||
to_thread->stat = RT_THREAD_RUNNING;
|
||||
|
||||
rt_spin_unlock(&to_thread->spinlock);
|
||||
rt_spin_unlock(&_spinlock);
|
||||
rt_hw_spin_unlock(&(to_thread->spinlock.lock));
|
||||
rt_hw_spin_unlock(&_mp_scheduler_spinlock);
|
||||
|
||||
rt_hw_spin_unlock(&_cpus_lock);
|
||||
|
||||
|
@ -465,19 +468,19 @@ void rt_schedule(void)
|
|||
/* disable interrupt */
|
||||
level = rt_hw_local_irq_disable();
|
||||
|
||||
rt_spin_lock(&_spinlock);
|
||||
rt_hw_spin_lock(&_mp_scheduler_spinlock);
|
||||
|
||||
cpu_id = rt_hw_cpu_id();
|
||||
pcpu = rt_cpu_index(cpu_id);
|
||||
rt_spin_lock(&pcpu->spinlock);
|
||||
rt_hw_spin_lock(&(pcpu->spinlock.lock));
|
||||
current_thread = pcpu->current_thread;
|
||||
|
||||
/* whether do switch in interrupt */
|
||||
if (rt_atomic_load(&(pcpu->irq_nest)))
|
||||
{
|
||||
pcpu->irq_switch_flag = 1;
|
||||
rt_spin_unlock(&pcpu->spinlock);
|
||||
rt_spin_unlock(&_spinlock);
|
||||
rt_hw_spin_unlock(&(pcpu->spinlock.lock));
|
||||
rt_hw_spin_unlock(&_mp_scheduler_spinlock);
|
||||
rt_hw_local_irq_enable(level);
|
||||
goto __exit;
|
||||
}
|
||||
|
@ -498,7 +501,7 @@ void rt_schedule(void)
|
|||
}
|
||||
#endif /* RT_USING_SIGNALS */
|
||||
|
||||
rt_spin_lock(&(current_thread->spinlock));
|
||||
rt_hw_spin_lock(&(current_thread->spinlock.lock));
|
||||
if (rt_atomic_load(&(current_thread->critical_lock_nest)) == 0) /* whether lock scheduler */
|
||||
{
|
||||
rt_ubase_t highest_ready_priority;
|
||||
|
@ -533,7 +536,7 @@ void rt_schedule(void)
|
|||
|
||||
if (to_thread != current_thread)
|
||||
{
|
||||
rt_spin_lock(&(to_thread->spinlock));
|
||||
rt_hw_spin_lock(&(to_thread->spinlock.lock));
|
||||
}
|
||||
to_thread->oncpu = cpu_id;
|
||||
if (to_thread != current_thread)
|
||||
|
@ -560,9 +563,9 @@ void rt_schedule(void)
|
|||
|
||||
RT_OBJECT_HOOK_CALL(rt_scheduler_switch_hook, (current_thread));
|
||||
|
||||
rt_spin_unlock(&(to_thread->spinlock));
|
||||
rt_spin_unlock(&pcpu->spinlock);
|
||||
rt_spin_unlock(&_spinlock);
|
||||
rt_hw_spin_unlock(&(to_thread->spinlock.lock));
|
||||
rt_hw_spin_unlock(&(pcpu->spinlock.lock));
|
||||
rt_hw_spin_unlock(&_mp_scheduler_spinlock);
|
||||
|
||||
need_unlock = RT_FALSE;
|
||||
rt_hw_context_switch((rt_ubase_t)¤t_thread->sp,
|
||||
|
@ -573,29 +576,29 @@ void rt_schedule(void)
|
|||
|
||||
if(need_unlock)
|
||||
{
|
||||
rt_spin_unlock(&(current_thread->spinlock));
|
||||
rt_spin_unlock(&pcpu->spinlock);
|
||||
rt_spin_unlock(&_spinlock);
|
||||
rt_hw_spin_unlock(&(current_thread->spinlock.lock));
|
||||
rt_hw_spin_unlock(&(pcpu->spinlock.lock));
|
||||
rt_hw_spin_unlock(&_mp_scheduler_spinlock);
|
||||
}
|
||||
rt_hw_local_irq_enable(level);
|
||||
|
||||
#ifdef RT_USING_SIGNALS
|
||||
/* check stat of thread for signal */
|
||||
rt_spin_lock(&(current_thread->spinlock));
|
||||
rt_hw_spin_lock(&(current_thread->spinlock));
|
||||
if (current_thread->stat & RT_THREAD_STAT_SIGNAL_PENDING)
|
||||
{
|
||||
extern void rt_thread_handle_sig(rt_bool_t clean_state);
|
||||
|
||||
current_thread->stat &= ~RT_THREAD_STAT_SIGNAL_PENDING;
|
||||
|
||||
rt_spin_unlock(&(current_thread->spinlock));
|
||||
rt_hw_spin_unlock(&(current_thread->spinlock));
|
||||
|
||||
/* check signal status */
|
||||
rt_thread_handle_sig(RT_TRUE);
|
||||
}
|
||||
else
|
||||
{
|
||||
rt_spin_unlock(&(current_thread->spinlock));
|
||||
rt_hw_spin_unlock(&(current_thread->spinlock));
|
||||
}
|
||||
#endif /* RT_USING_SIGNALS */
|
||||
|
||||
|
@ -618,10 +621,10 @@ void rt_scheduler_do_irq_switch(void *context)
|
|||
rt_bool_t need_unlock = RT_TRUE;
|
||||
|
||||
level = rt_hw_local_irq_disable();
|
||||
rt_spin_lock(&_spinlock);
|
||||
rt_hw_spin_lock(&_mp_scheduler_spinlock);
|
||||
cpu_id = rt_hw_cpu_id();
|
||||
pcpu = rt_cpu_index(cpu_id);
|
||||
rt_spin_lock(&pcpu->spinlock);
|
||||
rt_hw_spin_lock(&(pcpu->spinlock.lock));
|
||||
current_thread = pcpu->current_thread;
|
||||
|
||||
#ifdef RT_USING_SIGNALS
|
||||
|
@ -642,12 +645,12 @@ void rt_scheduler_do_irq_switch(void *context)
|
|||
|
||||
if (pcpu->irq_switch_flag == 0)
|
||||
{
|
||||
rt_spin_unlock(&pcpu->spinlock);
|
||||
rt_spin_unlock(&_spinlock);
|
||||
rt_hw_spin_unlock(&(pcpu->spinlock.lock));
|
||||
rt_hw_spin_unlock(&_mp_scheduler_spinlock);
|
||||
rt_hw_local_irq_enable(level);
|
||||
return;
|
||||
}
|
||||
rt_spin_lock(&(current_thread->spinlock));
|
||||
rt_hw_spin_lock(&(current_thread->spinlock.lock));
|
||||
if (rt_atomic_load(&(current_thread->critical_lock_nest)) == 0 &&
|
||||
rt_atomic_load(&(pcpu->irq_nest)) == 0)
|
||||
{
|
||||
|
@ -686,7 +689,7 @@ void rt_scheduler_do_irq_switch(void *context)
|
|||
|
||||
if (to_thread != current_thread)
|
||||
{
|
||||
rt_spin_lock(&(to_thread->spinlock));
|
||||
rt_hw_spin_lock(&(to_thread->spinlock.lock));
|
||||
}
|
||||
to_thread->oncpu = cpu_id;
|
||||
if (to_thread != current_thread)
|
||||
|
@ -707,9 +710,9 @@ void rt_scheduler_do_irq_switch(void *context)
|
|||
|
||||
RT_OBJECT_HOOK_CALL(rt_scheduler_switch_hook, (current_thread));
|
||||
|
||||
rt_spin_unlock(&(to_thread->spinlock));
|
||||
rt_spin_unlock(&pcpu->spinlock);
|
||||
rt_spin_unlock(&_spinlock);
|
||||
rt_hw_spin_unlock(&(to_thread->spinlock.lock));
|
||||
rt_hw_spin_unlock(&(pcpu->spinlock.lock));
|
||||
rt_hw_spin_unlock(&_mp_scheduler_spinlock);
|
||||
|
||||
need_unlock = RT_FALSE;
|
||||
rt_hw_context_switch_interrupt(context, (rt_ubase_t)¤t_thread->sp,
|
||||
|
@ -720,9 +723,9 @@ void rt_scheduler_do_irq_switch(void *context)
|
|||
|
||||
if(need_unlock)
|
||||
{
|
||||
rt_spin_unlock(&(current_thread->spinlock));
|
||||
rt_spin_unlock(&pcpu->spinlock);
|
||||
rt_spin_unlock(&_spinlock);
|
||||
rt_hw_spin_unlock(&(current_thread->spinlock.lock));
|
||||
rt_hw_spin_unlock(&(pcpu->spinlock.lock));
|
||||
rt_hw_spin_unlock(&_mp_scheduler_spinlock);
|
||||
}
|
||||
|
||||
rt_hw_local_irq_enable(level);
|
||||
|
@ -739,9 +742,11 @@ void rt_scheduler_do_irq_switch(void *context)
|
|||
void rt_schedule_insert_thread(struct rt_thread *thread)
|
||||
{
|
||||
rt_base_t level;
|
||||
level = rt_spin_lock_irqsave(&_spinlock);
|
||||
level = rt_hw_local_irq_disable();
|
||||
rt_hw_spin_lock(&_mp_scheduler_spinlock);
|
||||
_rt_schedule_insert_thread(thread, RT_TRUE);
|
||||
rt_spin_unlock_irqrestore(&_spinlock, level);
|
||||
rt_hw_spin_unlock(&_mp_scheduler_spinlock);
|
||||
rt_hw_local_irq_enable(level);
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -754,11 +759,13 @@ void rt_schedule_insert_thread(struct rt_thread *thread)
|
|||
void rt_schedule_remove_thread(struct rt_thread *thread)
|
||||
{
|
||||
rt_base_t level;
|
||||
level = rt_spin_lock_irqsave(&_spinlock);
|
||||
rt_spin_lock(&thread->spinlock);
|
||||
level = rt_hw_local_irq_disable();
|
||||
rt_hw_spin_lock(&_mp_scheduler_spinlock);
|
||||
rt_hw_spin_lock(&(thread->spinlock.lock));
|
||||
_rt_schedule_remove_thread(thread, RT_TRUE);
|
||||
rt_spin_unlock(&thread->spinlock);
|
||||
rt_spin_unlock_irqrestore(&_spinlock, level);
|
||||
rt_hw_spin_unlock(&(thread->spinlock.lock));
|
||||
rt_hw_spin_unlock(&_mp_scheduler_spinlock);
|
||||
rt_hw_local_irq_enable(level);
|
||||
}
|
||||
|
||||
/**
|
||||
|
|
|
@ -17,13 +17,13 @@ import format_ignore
|
|||
class CPPCheck:
|
||||
def __init__(self, file_list):
|
||||
self.file_list = file_list
|
||||
|
||||
|
||||
def check(self):
|
||||
file_list_filtered = [file for file in self.file_list if file.endswith(('.c', '.cpp', '.cc', '.cxx'))]
|
||||
logging.info("Start to static code analysis.")
|
||||
check_result = True
|
||||
for file in file_list_filtered:
|
||||
result = subprocess.run(['cppcheck', '--enable=warning', 'performance', 'portability', '--inline-suppr', '--error-exitcode=1', '--force', file], stdout = subprocess.PIPE, stderr = subprocess.PIPE)
|
||||
result = subprocess.run(['cppcheck', '-DRTM_EXPORT', '--enable=warning', 'performance', 'portability', '--inline-suppr', '--error-exitcode=1', '--force', file], stdout = subprocess.PIPE, stderr = subprocess.PIPE)
|
||||
logging.info(result.stdout.decode())
|
||||
logging.info(result.stderr.decode())
|
||||
if result.stderr:
|
||||
|
|
Loading…
Reference in New Issue