Merge pull request #3111 from BernardXiong/spin_lock

Add spin lock API in Kernel
This commit is contained in:
Bernard Xiong 2019-09-29 09:28:01 +08:00 committed by GitHub
commit c07ebbc9d2
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
10 changed files with 152 additions and 13 deletions

View File

@ -577,6 +577,7 @@ struct rt_thread
rt_uint16_t scheduler_lock_nest; /**< scheduler lock count */
rt_uint16_t cpus_lock_nest; /**< cpus lock count */
rt_uint16_t critical_lock_nest; /**< critical lock count */
#endif /*RT_USING_SMP*/
/* priority */

View File

@ -143,6 +143,12 @@ typedef union {
} tickets;
} rt_hw_spinlock_t;
struct rt_spinlock
{
rt_hw_spinlock_t lock;
};
void rt_hw_spin_lock_init(rt_hw_spinlock_t *lock);
void rt_hw_spin_lock(rt_hw_spinlock_t *lock);
void rt_hw_spin_unlock(rt_hw_spinlock_t *lock);

View File

@ -391,6 +391,27 @@ rt_err_t rt_mq_recv(rt_mq_t mq,
rt_err_t rt_mq_control(rt_mq_t mq, int cmd, void *arg);
#endif
/*
* spinlock
*/
#ifdef RT_USING_SMP
struct rt_spinlock;
void rt_spin_lock_init(struct rt_spinlock *lock);
void rt_spin_lock(struct rt_spinlock *lock);
void rt_spin_unlock(struct rt_spinlock *lock);
rt_base_t rt_spin_lock_irqsave(struct rt_spinlock *lock);
void rt_spin_unlock_irqrestore(struct rt_spinlock *lock, rt_base_t level);
#else
#define rt_spin_lock_init(lock) /* nothing */
#define rt_spin_lock(lock) rt_enter_critical()
#define rt_spin_unlock(lock) rt_exit_critical()
#define rt_spin_lock_irqsave(lock) rt_hw_interrupt_disable()
#define rt_spin_unlock_irqrestore(lock, level) rt_hw_interrupt_enable(level)
#endif
/**@}*/
#ifdef RT_USING_DEVICE

View File

@ -14,6 +14,7 @@
#include <board.h>
#ifdef RT_USING_SMP
int rt_hw_cpu_id(void)
{
int cpu_id;
@ -25,6 +26,11 @@ int rt_hw_cpu_id(void)
return cpu_id;
};
void rt_hw_spin_lock_init(rt_hw_spinlock_t *lock)
{
lock->slock = 0;
}
void rt_hw_spin_lock(rt_hw_spinlock_t *lock)
{
unsigned long tmp;

View File

@ -25,6 +25,11 @@ int rt_hw_cpu_id(void)
return read_csr(mhartid);
}
void rt_hw_spin_lock_init(rt_hw_spinlock_t *lock)
{
((spinlock_t *)lock)->lock = 0;
}
void rt_hw_spin_lock(rt_hw_spinlock_t *lock)
{
spinlock_lock((spinlock_t *)lock);

View File

@ -20,7 +20,7 @@ config RT_USING_ARCH_DATA_TYPE
config RT_USING_SMP
bool "Enable SMP(Symmetric multiprocessing)"
default n
help
help
This option should be selected by machines which have an SMP-
capable CPU.
The only effect of this option is to make the SMP-related
@ -28,10 +28,10 @@ config RT_USING_SMP
config RT_CPUS_NR
int "Number of CPUs"
default 2
depends on RT_USING_SMP
default 2
depends on RT_USING_SMP
help
Number of CPUs in the system
Number of CPUs in the system
config RT_ALIGN_SIZE
int "Alignment size for CPU architecture data access"

View File

@ -26,6 +26,9 @@ if GetDepend('RT_USING_MEMHEAP') == False:
if GetDepend('RT_USING_DEVICE') == False:
SrcRemove(src, ['device.c'])
if GetDepend('RT_USING_SMP') == False:
SrcRemove(src, ['cpu.c'])
group = DefineGroup('Kernel', src, depend = [''], CPPPATH = CPPPATH)
Return('group')

View File

@ -7,15 +7,106 @@
* Date Author Notes
* 2018-10-30 Bernard The first version
*/
#include <rtthread.h>
#include <rthw.h>
#include <rtthread.h>
#ifdef RT_USING_SMP
static struct rt_cpu rt_cpus[RT_CPUS_NR];
rt_hw_spinlock_t _cpus_lock;
/*
* disable scheduler
*/
static void rt_preempt_disable(void)
{
register rt_base_t level;
struct rt_thread *current_thread;
/* disable interrupt */
level = rt_hw_local_irq_disable();
current_thread = rt_thread_self();
if (!current_thread)
{
rt_hw_local_irq_enable(level);
return;
}
/* lock scheduler for local cpu */
current_thread->scheduler_lock_nest ++;
/* enable interrupt */
rt_hw_local_irq_enable(level);
}
/*
* enable scheduler
*/
static void rt_preempt_enable(void)
{
register rt_base_t level;
struct rt_thread *current_thread;
/* disable interrupt */
level = rt_hw_local_irq_disable();
current_thread = rt_thread_self();
if (!current_thread)
{
rt_hw_local_irq_enable(level);
return;
}
/* unlock scheduler for local cpu */
current_thread->scheduler_lock_nest --;
rt_schedule();
/* enable interrupt */
rt_hw_local_irq_enable(level);
}
void rt_spin_lock_init(struct rt_spinlock *lock)
{
rt_hw_spin_lock_init(&lock->lock);
}
RTM_EXPORT(rt_spin_lock_init)
void rt_spin_lock(struct rt_spinlock *lock)
{
rt_preempt_disable();
rt_hw_spin_lock(&lock->lock);
}
RTM_EXPORT(rt_spin_lock)
void rt_spin_unlock(struct rt_spinlock *lock)
{
rt_hw_spin_unlock(&lock->lock);
rt_preempt_enable();
}
RTM_EXPORT(rt_spin_unlock)
rt_base_t rt_spin_lock_irqsave(struct rt_spinlock *lock)
{
unsigned long level;
rt_preempt_disable();
level = rt_hw_local_irq_disable();
rt_hw_spin_lock(&lock->lock);
return level;
}
RTM_EXPORT(rt_spin_lock_irqsave)
void rt_spin_unlock_irqrestore(struct rt_spinlock *lock, rt_base_t level)
{
rt_hw_spin_unlock(&lock->lock);
rt_hw_local_irq_enable(level);
rt_preempt_enable();
}
RTM_EXPORT(rt_spin_unlock_irqrestore)
/**
* This fucntion will return current cpu.
*/
@ -42,7 +133,7 @@ rt_base_t rt_cpus_lock(void)
pcpu = rt_cpu_self();
if (pcpu->current_thread != RT_NULL)
{
register rt_uint16_t lock_nest = pcpu->current_thread->cpus_lock_nest;
register rt_ubase_t lock_nest = pcpu->current_thread->cpus_lock_nest;
pcpu->current_thread->cpus_lock_nest++;
if (lock_nest == 0)

View File

@ -83,7 +83,7 @@ static void _rt_scheduler_stack_check(struct rt_thread *thread)
RT_ASSERT(thread != RT_NULL);
#if defined(ARCH_CPU_STACK_GROWS_UPWARD)
if (*((rt_uint8_t *)((rt_ubase_t)thread->stack_addr + thread->stack_size - 1)) != '#' ||
if (*((rt_uint8_t *)((rt_ubase_t)thread->stack_addr + thread->stack_size - 1)) != '#' ||
#else
if (*((rt_uint8_t *)thread->stack_addr) != '#' ||
#endif
@ -840,11 +840,14 @@ void rt_enter_critical(void)
*/
/* lock scheduler for all cpus */
if (current_thread->scheduler_lock_nest == !!current_thread->cpus_lock_nest)
if (current_thread->critical_lock_nest == 0)
{
rt_hw_spin_lock(&_rt_critical_lock);
}
/* critical for local cpu */
current_thread->critical_lock_nest ++;
/* lock scheduler for local cpu */
current_thread->scheduler_lock_nest ++;
@ -892,7 +895,9 @@ void rt_exit_critical(void)
current_thread->scheduler_lock_nest --;
if (current_thread->scheduler_lock_nest == !!current_thread->cpus_lock_nest)
current_thread->critical_lock_nest --;
if (current_thread->critical_lock_nest == 0)
{
rt_hw_spin_unlock(&_rt_critical_lock);
}
@ -951,9 +956,9 @@ rt_uint16_t rt_critical_level(void)
#ifdef RT_USING_SMP
struct rt_thread *current_thread = rt_cpu_self()->current_thread;
return current_thread->scheduler_lock_nest;
return current_thread->critical_lock_nest;
#else
return rt_scheduler_lock_nest;
return rt_scheduler_lock_nest;
#endif /*RT_USING_SMP*/
}
RTM_EXPORT(rt_critical_level);

View File

@ -172,6 +172,7 @@ static rt_err_t _rt_thread_init(struct rt_thread *thread,
/* lock init */
thread->scheduler_lock_nest = 0;
thread->cpus_lock_nest = 0;
thread->critical_lock_nest = 0;
#endif /*RT_USING_SMP*/
/* initialize cleanup function and user data */