2018-11-22 14:40:43 +08:00
|
|
|
/*
|
2021-03-08 11:25:38 +08:00
|
|
|
* Copyright (c) 2006-2021, RT-Thread Development Team
|
2018-11-22 14:40:43 +08:00
|
|
|
*
|
|
|
|
* SPDX-License-Identifier: Apache-2.0
|
|
|
|
*
|
|
|
|
* Change Logs:
|
|
|
|
* Date Author Notes
|
|
|
|
* 2018-10-30 Bernard The first version
|
|
|
|
*/
|
|
|
|
#include <rthw.h>
|
2019-09-28 12:26:48 +08:00
|
|
|
#include <rtthread.h>
|
2018-11-22 14:40:43 +08:00
|
|
|
|
2019-09-26 18:13:07 +08:00
|
|
|
#ifdef RT_USING_SMP
|
2019-09-28 11:56:03 +08:00
|
|
|
static struct rt_cpu rt_cpus[RT_CPUS_NR];
|
|
|
|
rt_hw_spinlock_t _cpus_lock;
|
|
|
|
|
|
|
|
/*
|
2019-09-26 18:13:07 +08:00
|
|
|
* disable scheduler
|
2019-09-28 11:56:03 +08:00
|
|
|
*/
|
2019-09-26 18:13:07 +08:00
|
|
|
static void rt_preempt_disable(void)
|
|
|
|
{
|
2019-09-28 12:26:48 +08:00
|
|
|
register rt_base_t level;
|
|
|
|
struct rt_thread *current_thread;
|
2019-09-26 18:13:07 +08:00
|
|
|
|
2019-09-28 12:26:48 +08:00
|
|
|
/* disable interrupt */
|
|
|
|
level = rt_hw_local_irq_disable();
|
2019-09-26 18:13:07 +08:00
|
|
|
|
2019-09-28 12:26:48 +08:00
|
|
|
current_thread = rt_thread_self();
|
|
|
|
if (!current_thread)
|
|
|
|
{
|
|
|
|
rt_hw_local_irq_enable(level);
|
|
|
|
return;
|
|
|
|
}
|
2019-09-26 18:13:07 +08:00
|
|
|
|
2019-09-28 12:26:48 +08:00
|
|
|
/* lock scheduler for local cpu */
|
|
|
|
current_thread->scheduler_lock_nest ++;
|
2019-09-26 18:13:07 +08:00
|
|
|
|
2019-09-28 12:26:48 +08:00
|
|
|
/* enable interrupt */
|
|
|
|
rt_hw_local_irq_enable(level);
|
2019-09-26 18:13:07 +08:00
|
|
|
}
|
|
|
|
|
2019-09-28 11:56:03 +08:00
|
|
|
/*
|
|
|
|
* enable scheduler
|
|
|
|
*/
|
2019-09-26 18:13:07 +08:00
|
|
|
static void rt_preempt_enable(void)
|
|
|
|
{
|
2019-09-28 12:26:48 +08:00
|
|
|
register rt_base_t level;
|
|
|
|
struct rt_thread *current_thread;
|
2019-09-26 18:13:07 +08:00
|
|
|
|
2019-09-28 12:26:48 +08:00
|
|
|
/* disable interrupt */
|
|
|
|
level = rt_hw_local_irq_disable();
|
2019-09-26 18:13:07 +08:00
|
|
|
|
2019-09-28 12:26:48 +08:00
|
|
|
current_thread = rt_thread_self();
|
|
|
|
if (!current_thread)
|
|
|
|
{
|
|
|
|
rt_hw_local_irq_enable(level);
|
|
|
|
return;
|
|
|
|
}
|
2019-09-26 18:13:07 +08:00
|
|
|
|
2019-09-28 12:26:48 +08:00
|
|
|
/* unlock scheduler for local cpu */
|
|
|
|
current_thread->scheduler_lock_nest --;
|
2019-09-26 18:13:07 +08:00
|
|
|
|
2019-09-28 12:26:48 +08:00
|
|
|
rt_schedule();
|
|
|
|
/* enable interrupt */
|
|
|
|
rt_hw_local_irq_enable(level);
|
2019-09-26 18:13:07 +08:00
|
|
|
}
|
|
|
|
|
2019-09-28 11:56:03 +08:00
|
|
|
void rt_spin_lock_init(struct rt_spinlock *lock)
|
2019-09-26 18:13:07 +08:00
|
|
|
{
|
2019-09-28 12:26:48 +08:00
|
|
|
rt_hw_spin_lock_init(&lock->lock);
|
2019-09-26 18:13:07 +08:00
|
|
|
}
|
|
|
|
RTM_EXPORT(rt_spin_lock_init)
|
|
|
|
|
2019-09-28 11:56:03 +08:00
|
|
|
void rt_spin_lock(struct rt_spinlock *lock)
|
2019-09-26 18:13:07 +08:00
|
|
|
{
|
2019-09-28 12:26:48 +08:00
|
|
|
rt_preempt_disable();
|
|
|
|
rt_hw_spin_lock(&lock->lock);
|
2019-09-26 18:13:07 +08:00
|
|
|
}
|
|
|
|
RTM_EXPORT(rt_spin_lock)
|
|
|
|
|
2019-09-28 11:56:03 +08:00
|
|
|
void rt_spin_unlock(struct rt_spinlock *lock)
|
2019-09-26 18:13:07 +08:00
|
|
|
{
|
2019-09-28 12:26:48 +08:00
|
|
|
rt_hw_spin_unlock(&lock->lock);
|
|
|
|
rt_preempt_enable();
|
2019-09-26 18:13:07 +08:00
|
|
|
}
|
|
|
|
RTM_EXPORT(rt_spin_unlock)
|
|
|
|
|
2019-09-28 11:56:03 +08:00
|
|
|
rt_base_t rt_spin_lock_irqsave(struct rt_spinlock *lock)
|
2019-09-26 18:13:07 +08:00
|
|
|
{
|
2019-09-28 12:26:48 +08:00
|
|
|
unsigned long level;
|
2019-09-26 18:13:07 +08:00
|
|
|
|
2019-09-28 12:26:48 +08:00
|
|
|
rt_preempt_disable();
|
2019-09-26 18:13:07 +08:00
|
|
|
|
2019-09-28 12:26:48 +08:00
|
|
|
level = rt_hw_local_irq_disable();
|
|
|
|
rt_hw_spin_lock(&lock->lock);
|
2019-09-26 18:13:07 +08:00
|
|
|
|
2019-09-28 12:26:48 +08:00
|
|
|
return level;
|
2019-09-26 18:13:07 +08:00
|
|
|
}
|
|
|
|
RTM_EXPORT(rt_spin_lock_irqsave)
|
|
|
|
|
2019-09-28 11:56:03 +08:00
|
|
|
void rt_spin_unlock_irqrestore(struct rt_spinlock *lock, rt_base_t level)
|
2019-09-26 18:13:07 +08:00
|
|
|
{
|
2019-09-28 12:26:48 +08:00
|
|
|
rt_hw_spin_unlock(&lock->lock);
|
|
|
|
rt_hw_local_irq_enable(level);
|
2019-09-26 18:13:07 +08:00
|
|
|
|
2019-09-28 12:26:48 +08:00
|
|
|
rt_preempt_enable();
|
2019-09-26 18:13:07 +08:00
|
|
|
}
|
|
|
|
RTM_EXPORT(rt_spin_unlock_irqrestore)
|
|
|
|
|
2018-11-22 14:40:43 +08:00
|
|
|
/**
|
|
|
|
* This fucntion will return current cpu.
|
|
|
|
*/
|
|
|
|
struct rt_cpu *rt_cpu_self(void)
|
|
|
|
{
|
|
|
|
return &rt_cpus[rt_hw_cpu_id()];
|
|
|
|
}
|
|
|
|
|
|
|
|
struct rt_cpu *rt_cpu_index(int index)
|
|
|
|
{
|
|
|
|
return &rt_cpus[index];
|
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* This function will lock all cpus's scheduler and disable local irq.
|
|
|
|
*/
|
|
|
|
rt_base_t rt_cpus_lock(void)
|
|
|
|
{
|
|
|
|
rt_base_t level;
|
|
|
|
struct rt_cpu* pcpu;
|
|
|
|
|
|
|
|
level = rt_hw_local_irq_disable();
|
|
|
|
|
|
|
|
pcpu = rt_cpu_self();
|
|
|
|
if (pcpu->current_thread != RT_NULL)
|
|
|
|
{
|
2019-09-28 11:56:03 +08:00
|
|
|
register rt_ubase_t lock_nest = pcpu->current_thread->cpus_lock_nest;
|
2019-05-09 08:38:18 +08:00
|
|
|
|
|
|
|
pcpu->current_thread->cpus_lock_nest++;
|
|
|
|
if (lock_nest == 0)
|
2018-11-22 14:40:43 +08:00
|
|
|
{
|
|
|
|
pcpu->current_thread->scheduler_lock_nest++;
|
|
|
|
rt_hw_spin_lock(&_cpus_lock);
|
|
|
|
}
|
|
|
|
}
|
2019-05-09 08:38:18 +08:00
|
|
|
|
2018-11-22 14:40:43 +08:00
|
|
|
return level;
|
|
|
|
}
|
|
|
|
RTM_EXPORT(rt_cpus_lock);
|
|
|
|
|
|
|
|
/**
|
|
|
|
* This function will restore all cpus's scheduler and restore local irq.
|
|
|
|
*/
|
|
|
|
void rt_cpus_unlock(rt_base_t level)
|
|
|
|
{
|
|
|
|
struct rt_cpu* pcpu = rt_cpu_self();
|
|
|
|
|
|
|
|
if (pcpu->current_thread != RT_NULL)
|
|
|
|
{
|
2019-05-09 08:38:18 +08:00
|
|
|
pcpu->current_thread->cpus_lock_nest--;
|
|
|
|
|
|
|
|
if (pcpu->current_thread->cpus_lock_nest == 0)
|
2018-11-22 14:40:43 +08:00
|
|
|
{
|
|
|
|
pcpu->current_thread->scheduler_lock_nest--;
|
|
|
|
rt_hw_spin_unlock(&_cpus_lock);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
rt_hw_local_irq_enable(level);
|
|
|
|
}
|
|
|
|
RTM_EXPORT(rt_cpus_unlock);
|
|
|
|
|
|
|
|
/**
|
|
|
|
* This function is invoked by scheduler.
|
|
|
|
* It will restore the lock state to whatever the thread's counter expects.
|
|
|
|
* If target thread not locked the cpus then unlock the cpus lock.
|
|
|
|
*/
|
|
|
|
void rt_cpus_lock_status_restore(struct rt_thread *thread)
|
|
|
|
{
|
|
|
|
struct rt_cpu* pcpu = rt_cpu_self();
|
|
|
|
|
|
|
|
pcpu->current_thread = thread;
|
|
|
|
if (!thread->cpus_lock_nest)
|
|
|
|
{
|
|
|
|
rt_hw_spin_unlock(&_cpus_lock);
|
|
|
|
}
|
|
|
|
}
|
2018-12-12 09:36:39 +08:00
|
|
|
RTM_EXPORT(rt_cpus_lock_status_restore);
|
2018-11-22 14:40:43 +08:00
|
|
|
|
2021-06-09 19:50:03 +08:00
|
|
|
#endif /* RT_USING_SMP */
|