Merge pull request #3111 from BernardXiong/spin_lock
Add spin lock API in Kernel
This commit is contained in:
commit
c07ebbc9d2
|
@ -577,6 +577,7 @@ struct rt_thread
|
||||||
|
|
||||||
rt_uint16_t scheduler_lock_nest; /**< scheduler lock count */
|
rt_uint16_t scheduler_lock_nest; /**< scheduler lock count */
|
||||||
rt_uint16_t cpus_lock_nest; /**< cpus lock count */
|
rt_uint16_t cpus_lock_nest; /**< cpus lock count */
|
||||||
|
rt_uint16_t critical_lock_nest; /**< critical lock count */
|
||||||
#endif /*RT_USING_SMP*/
|
#endif /*RT_USING_SMP*/
|
||||||
|
|
||||||
/* priority */
|
/* priority */
|
||||||
|
|
|
@ -143,6 +143,12 @@ typedef union {
|
||||||
} tickets;
|
} tickets;
|
||||||
} rt_hw_spinlock_t;
|
} rt_hw_spinlock_t;
|
||||||
|
|
||||||
|
struct rt_spinlock
|
||||||
|
{
|
||||||
|
rt_hw_spinlock_t lock;
|
||||||
|
};
|
||||||
|
|
||||||
|
void rt_hw_spin_lock_init(rt_hw_spinlock_t *lock);
|
||||||
void rt_hw_spin_lock(rt_hw_spinlock_t *lock);
|
void rt_hw_spin_lock(rt_hw_spinlock_t *lock);
|
||||||
void rt_hw_spin_unlock(rt_hw_spinlock_t *lock);
|
void rt_hw_spin_unlock(rt_hw_spinlock_t *lock);
|
||||||
|
|
||||||
|
|
|
@ -391,6 +391,27 @@ rt_err_t rt_mq_recv(rt_mq_t mq,
|
||||||
rt_err_t rt_mq_control(rt_mq_t mq, int cmd, void *arg);
|
rt_err_t rt_mq_control(rt_mq_t mq, int cmd, void *arg);
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
|
/*
|
||||||
|
* spinlock
|
||||||
|
*/
|
||||||
|
#ifdef RT_USING_SMP
|
||||||
|
struct rt_spinlock;
|
||||||
|
|
||||||
|
void rt_spin_lock_init(struct rt_spinlock *lock);
|
||||||
|
void rt_spin_lock(struct rt_spinlock *lock);
|
||||||
|
void rt_spin_unlock(struct rt_spinlock *lock);
|
||||||
|
rt_base_t rt_spin_lock_irqsave(struct rt_spinlock *lock);
|
||||||
|
void rt_spin_unlock_irqrestore(struct rt_spinlock *lock, rt_base_t level);
|
||||||
|
|
||||||
|
#else
|
||||||
|
#define rt_spin_lock_init(lock) /* nothing */
|
||||||
|
#define rt_spin_lock(lock) rt_enter_critical()
|
||||||
|
#define rt_spin_unlock(lock) rt_exit_critical()
|
||||||
|
#define rt_spin_lock_irqsave(lock) rt_hw_interrupt_disable()
|
||||||
|
#define rt_spin_unlock_irqrestore(lock, level) rt_hw_interrupt_enable(level)
|
||||||
|
|
||||||
|
#endif
|
||||||
|
|
||||||
/**@}*/
|
/**@}*/
|
||||||
|
|
||||||
#ifdef RT_USING_DEVICE
|
#ifdef RT_USING_DEVICE
|
||||||
|
|
|
@ -14,6 +14,7 @@
|
||||||
#include <board.h>
|
#include <board.h>
|
||||||
|
|
||||||
#ifdef RT_USING_SMP
|
#ifdef RT_USING_SMP
|
||||||
|
|
||||||
int rt_hw_cpu_id(void)
|
int rt_hw_cpu_id(void)
|
||||||
{
|
{
|
||||||
int cpu_id;
|
int cpu_id;
|
||||||
|
@ -25,6 +26,11 @@ int rt_hw_cpu_id(void)
|
||||||
return cpu_id;
|
return cpu_id;
|
||||||
};
|
};
|
||||||
|
|
||||||
|
void rt_hw_spin_lock_init(rt_hw_spinlock_t *lock)
|
||||||
|
{
|
||||||
|
lock->slock = 0;
|
||||||
|
}
|
||||||
|
|
||||||
void rt_hw_spin_lock(rt_hw_spinlock_t *lock)
|
void rt_hw_spin_lock(rt_hw_spinlock_t *lock)
|
||||||
{
|
{
|
||||||
unsigned long tmp;
|
unsigned long tmp;
|
||||||
|
|
|
@ -25,6 +25,11 @@ int rt_hw_cpu_id(void)
|
||||||
return read_csr(mhartid);
|
return read_csr(mhartid);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
void rt_hw_spin_lock_init(rt_hw_spinlock_t *lock)
|
||||||
|
{
|
||||||
|
((spinlock_t *)lock)->lock = 0;
|
||||||
|
}
|
||||||
|
|
||||||
void rt_hw_spin_lock(rt_hw_spinlock_t *lock)
|
void rt_hw_spin_lock(rt_hw_spinlock_t *lock)
|
||||||
{
|
{
|
||||||
spinlock_lock((spinlock_t *)lock);
|
spinlock_lock((spinlock_t *)lock);
|
||||||
|
|
|
@ -26,6 +26,9 @@ if GetDepend('RT_USING_MEMHEAP') == False:
|
||||||
if GetDepend('RT_USING_DEVICE') == False:
|
if GetDepend('RT_USING_DEVICE') == False:
|
||||||
SrcRemove(src, ['device.c'])
|
SrcRemove(src, ['device.c'])
|
||||||
|
|
||||||
|
if GetDepend('RT_USING_SMP') == False:
|
||||||
|
SrcRemove(src, ['cpu.c'])
|
||||||
|
|
||||||
group = DefineGroup('Kernel', src, depend = [''], CPPPATH = CPPPATH)
|
group = DefineGroup('Kernel', src, depend = [''], CPPPATH = CPPPATH)
|
||||||
|
|
||||||
Return('group')
|
Return('group')
|
||||||
|
|
99
src/cpu.c
99
src/cpu.c
|
@ -7,15 +7,106 @@
|
||||||
* Date Author Notes
|
* Date Author Notes
|
||||||
* 2018-10-30 Bernard The first version
|
* 2018-10-30 Bernard The first version
|
||||||
*/
|
*/
|
||||||
|
|
||||||
#include <rtthread.h>
|
|
||||||
#include <rthw.h>
|
#include <rthw.h>
|
||||||
|
#include <rtthread.h>
|
||||||
|
|
||||||
#ifdef RT_USING_SMP
|
#ifdef RT_USING_SMP
|
||||||
|
|
||||||
static struct rt_cpu rt_cpus[RT_CPUS_NR];
|
static struct rt_cpu rt_cpus[RT_CPUS_NR];
|
||||||
rt_hw_spinlock_t _cpus_lock;
|
rt_hw_spinlock_t _cpus_lock;
|
||||||
|
|
||||||
|
/*
|
||||||
|
* disable scheduler
|
||||||
|
*/
|
||||||
|
static void rt_preempt_disable(void)
|
||||||
|
{
|
||||||
|
register rt_base_t level;
|
||||||
|
struct rt_thread *current_thread;
|
||||||
|
|
||||||
|
/* disable interrupt */
|
||||||
|
level = rt_hw_local_irq_disable();
|
||||||
|
|
||||||
|
current_thread = rt_thread_self();
|
||||||
|
if (!current_thread)
|
||||||
|
{
|
||||||
|
rt_hw_local_irq_enable(level);
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
/* lock scheduler for local cpu */
|
||||||
|
current_thread->scheduler_lock_nest ++;
|
||||||
|
|
||||||
|
/* enable interrupt */
|
||||||
|
rt_hw_local_irq_enable(level);
|
||||||
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
* enable scheduler
|
||||||
|
*/
|
||||||
|
static void rt_preempt_enable(void)
|
||||||
|
{
|
||||||
|
register rt_base_t level;
|
||||||
|
struct rt_thread *current_thread;
|
||||||
|
|
||||||
|
/* disable interrupt */
|
||||||
|
level = rt_hw_local_irq_disable();
|
||||||
|
|
||||||
|
current_thread = rt_thread_self();
|
||||||
|
if (!current_thread)
|
||||||
|
{
|
||||||
|
rt_hw_local_irq_enable(level);
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
/* unlock scheduler for local cpu */
|
||||||
|
current_thread->scheduler_lock_nest --;
|
||||||
|
|
||||||
|
rt_schedule();
|
||||||
|
/* enable interrupt */
|
||||||
|
rt_hw_local_irq_enable(level);
|
||||||
|
}
|
||||||
|
|
||||||
|
void rt_spin_lock_init(struct rt_spinlock *lock)
|
||||||
|
{
|
||||||
|
rt_hw_spin_lock_init(&lock->lock);
|
||||||
|
}
|
||||||
|
RTM_EXPORT(rt_spin_lock_init)
|
||||||
|
|
||||||
|
void rt_spin_lock(struct rt_spinlock *lock)
|
||||||
|
{
|
||||||
|
rt_preempt_disable();
|
||||||
|
rt_hw_spin_lock(&lock->lock);
|
||||||
|
}
|
||||||
|
RTM_EXPORT(rt_spin_lock)
|
||||||
|
|
||||||
|
void rt_spin_unlock(struct rt_spinlock *lock)
|
||||||
|
{
|
||||||
|
rt_hw_spin_unlock(&lock->lock);
|
||||||
|
rt_preempt_enable();
|
||||||
|
}
|
||||||
|
RTM_EXPORT(rt_spin_unlock)
|
||||||
|
|
||||||
|
rt_base_t rt_spin_lock_irqsave(struct rt_spinlock *lock)
|
||||||
|
{
|
||||||
|
unsigned long level;
|
||||||
|
|
||||||
|
rt_preempt_disable();
|
||||||
|
|
||||||
|
level = rt_hw_local_irq_disable();
|
||||||
|
rt_hw_spin_lock(&lock->lock);
|
||||||
|
|
||||||
|
return level;
|
||||||
|
}
|
||||||
|
RTM_EXPORT(rt_spin_lock_irqsave)
|
||||||
|
|
||||||
|
void rt_spin_unlock_irqrestore(struct rt_spinlock *lock, rt_base_t level)
|
||||||
|
{
|
||||||
|
rt_hw_spin_unlock(&lock->lock);
|
||||||
|
rt_hw_local_irq_enable(level);
|
||||||
|
|
||||||
|
rt_preempt_enable();
|
||||||
|
}
|
||||||
|
RTM_EXPORT(rt_spin_unlock_irqrestore)
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* This fucntion will return current cpu.
|
* This fucntion will return current cpu.
|
||||||
*/
|
*/
|
||||||
|
@ -42,7 +133,7 @@ rt_base_t rt_cpus_lock(void)
|
||||||
pcpu = rt_cpu_self();
|
pcpu = rt_cpu_self();
|
||||||
if (pcpu->current_thread != RT_NULL)
|
if (pcpu->current_thread != RT_NULL)
|
||||||
{
|
{
|
||||||
register rt_uint16_t lock_nest = pcpu->current_thread->cpus_lock_nest;
|
register rt_ubase_t lock_nest = pcpu->current_thread->cpus_lock_nest;
|
||||||
|
|
||||||
pcpu->current_thread->cpus_lock_nest++;
|
pcpu->current_thread->cpus_lock_nest++;
|
||||||
if (lock_nest == 0)
|
if (lock_nest == 0)
|
||||||
|
|
|
@ -840,11 +840,14 @@ void rt_enter_critical(void)
|
||||||
*/
|
*/
|
||||||
|
|
||||||
/* lock scheduler for all cpus */
|
/* lock scheduler for all cpus */
|
||||||
if (current_thread->scheduler_lock_nest == !!current_thread->cpus_lock_nest)
|
if (current_thread->critical_lock_nest == 0)
|
||||||
{
|
{
|
||||||
rt_hw_spin_lock(&_rt_critical_lock);
|
rt_hw_spin_lock(&_rt_critical_lock);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/* critical for local cpu */
|
||||||
|
current_thread->critical_lock_nest ++;
|
||||||
|
|
||||||
/* lock scheduler for local cpu */
|
/* lock scheduler for local cpu */
|
||||||
current_thread->scheduler_lock_nest ++;
|
current_thread->scheduler_lock_nest ++;
|
||||||
|
|
||||||
|
@ -892,7 +895,9 @@ void rt_exit_critical(void)
|
||||||
|
|
||||||
current_thread->scheduler_lock_nest --;
|
current_thread->scheduler_lock_nest --;
|
||||||
|
|
||||||
if (current_thread->scheduler_lock_nest == !!current_thread->cpus_lock_nest)
|
current_thread->critical_lock_nest --;
|
||||||
|
|
||||||
|
if (current_thread->critical_lock_nest == 0)
|
||||||
{
|
{
|
||||||
rt_hw_spin_unlock(&_rt_critical_lock);
|
rt_hw_spin_unlock(&_rt_critical_lock);
|
||||||
}
|
}
|
||||||
|
@ -951,7 +956,7 @@ rt_uint16_t rt_critical_level(void)
|
||||||
#ifdef RT_USING_SMP
|
#ifdef RT_USING_SMP
|
||||||
struct rt_thread *current_thread = rt_cpu_self()->current_thread;
|
struct rt_thread *current_thread = rt_cpu_self()->current_thread;
|
||||||
|
|
||||||
return current_thread->scheduler_lock_nest;
|
return current_thread->critical_lock_nest;
|
||||||
#else
|
#else
|
||||||
return rt_scheduler_lock_nest;
|
return rt_scheduler_lock_nest;
|
||||||
#endif /*RT_USING_SMP*/
|
#endif /*RT_USING_SMP*/
|
||||||
|
|
|
@ -172,6 +172,7 @@ static rt_err_t _rt_thread_init(struct rt_thread *thread,
|
||||||
/* lock init */
|
/* lock init */
|
||||||
thread->scheduler_lock_nest = 0;
|
thread->scheduler_lock_nest = 0;
|
||||||
thread->cpus_lock_nest = 0;
|
thread->cpus_lock_nest = 0;
|
||||||
|
thread->critical_lock_nest = 0;
|
||||||
#endif /*RT_USING_SMP*/
|
#endif /*RT_USING_SMP*/
|
||||||
|
|
||||||
/* initialize cleanup function and user data */
|
/* initialize cleanup function and user data */
|
||||||
|
|
Loading…
Reference in New Issue