From cb07e5fb24afebdf233847da551d3ede80727345 Mon Sep 17 00:00:00 2001 From: shaojinchun Date: Thu, 26 Sep 2019 18:13:07 +0800 Subject: [PATCH 1/3] =?UTF-8?q?=E5=BC=80=E6=94=BEspinlock=E7=9B=B8?= =?UTF-8?q?=E5=85=B3=E5=87=BD=E6=95=B0?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- include/rtdef.h | 1 + include/rthw.h | 14 ++++ libcpu/arm/cortex-a/cpu.c | 6 ++ libcpu/risc-v/k210/cpuport_smp.c | 5 ++ src/cpu.c | 113 +++++++++++++++++++++++++++++++ src/scheduler.c | 11 ++- src/thread.c | 1 + 7 files changed, 148 insertions(+), 3 deletions(-) diff --git a/include/rtdef.h b/include/rtdef.h index b25a6a87d5..3dc8a2a2a9 100644 --- a/include/rtdef.h +++ b/include/rtdef.h @@ -577,6 +577,7 @@ struct rt_thread rt_uint16_t scheduler_lock_nest; /**< scheduler lock count */ rt_uint16_t cpus_lock_nest; /**< cpus lock count */ + rt_uint16_t critical_lock_nest; /**< critical lock count */ #endif /*RT_USING_SMP*/ /* priority */ diff --git a/include/rthw.h b/include/rthw.h index ef3dc087ad..a77f2c1e46 100644 --- a/include/rthw.h +++ b/include/rthw.h @@ -143,6 +143,12 @@ typedef union { } tickets; } rt_hw_spinlock_t; +typedef struct +{ + rt_hw_spinlock_t lock; +} rt_spinlock_t; + +void rt_hw_spin_lock_init(rt_hw_spinlock_t *lock); void rt_hw_spin_lock(rt_hw_spinlock_t *lock); void rt_hw_spin_unlock(rt_hw_spinlock_t *lock); @@ -181,8 +187,16 @@ void rt_hw_secondary_cpu_idle_exec(void); #define rt_hw_spin_lock(lock) *(lock) = rt_hw_interrupt_disable() #define rt_hw_spin_unlock(lock) rt_hw_interrupt_enable(*(lock)) +typedef int rt_spinlock_t; + #endif +void rt_spin_lock_init(rt_spinlock_t *lock); +void rt_spin_lock(rt_spinlock_t *lock); +void rt_spin_unlock(rt_spinlock_t *lock); +rt_base_t rt_spin_lock_irqsave(rt_spinlock_t *lock); +void rt_spin_unlock_irqrestore(rt_spinlock_t *lock, rt_base_t level); + #ifdef __cplusplus } #endif diff --git a/libcpu/arm/cortex-a/cpu.c b/libcpu/arm/cortex-a/cpu.c index 6b129f89ee..7c6bf58a4b 100644 --- a/libcpu/arm/cortex-a/cpu.c +++ b/libcpu/arm/cortex-a/cpu.c @@ -14,6 +14,7 @@ #include #ifdef RT_USING_SMP + int rt_hw_cpu_id(void) { int cpu_id; @@ -25,6 +26,11 @@ int rt_hw_cpu_id(void) return cpu_id; }; +void rt_hw_spin_lock_init(rt_hw_spinlock_t *lock) +{ + lock->slock = 0; +} + void rt_hw_spin_lock(rt_hw_spinlock_t *lock) { unsigned long tmp; diff --git a/libcpu/risc-v/k210/cpuport_smp.c b/libcpu/risc-v/k210/cpuport_smp.c index a31314a13f..8c57dfeafc 100644 --- a/libcpu/risc-v/k210/cpuport_smp.c +++ b/libcpu/risc-v/k210/cpuport_smp.c @@ -25,6 +25,11 @@ int rt_hw_cpu_id(void) return read_csr(mhartid); } +void rt_hw_spin_lock_init(rt_hw_spinlock_t *lock) +{ + ((spinlock_t *)lock)->lock = 0; +} + void rt_hw_spin_lock(rt_hw_spinlock_t *lock) { spinlock_lock((spinlock_t *)lock); diff --git a/src/cpu.c b/src/cpu.c index fe3ba21bb0..a4075108e1 100644 --- a/src/cpu.c +++ b/src/cpu.c @@ -11,6 +11,119 @@ #include #include +#ifdef RT_USING_SMP +/*********************************** + * disable scheduler + ***********************************/ +static void rt_preempt_disable(void) +{ + register rt_base_t level; + struct rt_thread *current_thread; + + /* disable interrupt */ + level = rt_hw_local_irq_disable(); + + current_thread = rt_cpu_self()->current_thread; + if (!current_thread) + { + rt_hw_local_irq_enable(level); + return; + } + + /* lock scheduler for local cpu */ + current_thread->scheduler_lock_nest ++; + + /* enable interrupt */ + rt_hw_local_irq_enable(level); +} + +/*********************************** + * restore scheduler + ***********************************/ +static void rt_preempt_enable(void) +{ + register rt_base_t level; + struct rt_thread *current_thread; + + /* disable interrupt */ + level = rt_hw_local_irq_disable(); + + current_thread = rt_cpu_self()->current_thread; + if (!current_thread) + { + rt_hw_local_irq_enable(level); + return; + } + + /* unlock scheduler for local cpu */ + current_thread->scheduler_lock_nest --; + + rt_schedule(); + /* enable interrupt */ + rt_hw_local_irq_enable(level); +} +#endif + +void rt_spin_lock_init(rt_spinlock_t *lock) +{ +#ifdef RT_USING_SMP + rt_hw_spin_lock_init(&lock->lock); +#endif +} +RTM_EXPORT(rt_spin_lock_init) + +void rt_spin_lock(rt_spinlock_t *lock) +{ +#ifdef RT_USING_SMP + rt_preempt_disable(); + rt_hw_spin_lock(&lock->lock); +#else + rt_enter_critical(); +#endif +} +RTM_EXPORT(rt_spin_lock) + +void rt_spin_unlock(rt_spinlock_t *lock) +{ +#ifdef RT_USING_SMP + rt_hw_spin_unlock(&lock->lock); + rt_preempt_enable(); +#else + rt_exit_critical(); +#endif +} +RTM_EXPORT(rt_spin_unlock) + +rt_base_t rt_spin_lock_irqsave(rt_spinlock_t *lock) +{ + unsigned long level; + +#ifdef RT_USING_SMP + rt_preempt_disable(); + + level = rt_hw_local_irq_disable(); + rt_hw_spin_lock(&lock->lock); + + return level; +#else + return rt_hw_interrupt_disable(); +#endif +} +RTM_EXPORT(rt_spin_lock_irqsave) + +void rt_spin_unlock_irqrestore(rt_spinlock_t *lock, rt_base_t level) +{ +#ifdef RT_USING_SMP + rt_hw_spin_unlock(&lock->lock); + rt_hw_local_irq_enable(level); + + rt_preempt_enable(); +#else + rt_hw_interrupt_enable(level); +#endif +} +RTM_EXPORT(rt_spin_unlock_irqrestore) + #ifdef RT_USING_SMP static struct rt_cpu rt_cpus[RT_CPUS_NR]; diff --git a/src/scheduler.c b/src/scheduler.c index b487bd4b60..0faafaa2fc 100644 --- a/src/scheduler.c +++ b/src/scheduler.c @@ -840,11 +840,14 @@ void rt_enter_critical(void) */ /* lock scheduler for all cpus */ - if (current_thread->scheduler_lock_nest == !!current_thread->cpus_lock_nest) + if (current_thread->critical_lock_nest == 0) { rt_hw_spin_lock(&_rt_critical_lock); } + /* critical for local cpu */ + current_thread->critical_lock_nest ++; + /* lock scheduler for local cpu */ current_thread->scheduler_lock_nest ++; @@ -892,7 +895,9 @@ void rt_exit_critical(void) current_thread->scheduler_lock_nest --; - if (current_thread->scheduler_lock_nest == !!current_thread->cpus_lock_nest) + current_thread->critical_lock_nest --; + + if (current_thread->critical_lock_nest == 0) { rt_hw_spin_unlock(&_rt_critical_lock); } @@ -951,7 +956,7 @@ rt_uint16_t rt_critical_level(void) #ifdef RT_USING_SMP struct rt_thread *current_thread = rt_cpu_self()->current_thread; - return current_thread->scheduler_lock_nest; + return current_thread->critical_lock_nest; #else return rt_scheduler_lock_nest; #endif /*RT_USING_SMP*/ diff --git a/src/thread.c b/src/thread.c index ab203b1d49..a88729e52c 100644 --- a/src/thread.c +++ b/src/thread.c @@ -172,6 +172,7 @@ static rt_err_t _rt_thread_init(struct rt_thread *thread, /* lock init */ thread->scheduler_lock_nest = 0; thread->cpus_lock_nest = 0; + thread->critical_lock_nest = 0; #endif /*RT_USING_SMP*/ /* initialize cleanup function and user data */ From 5cd65092967a1dbaa35bb7fdd80cf3a0e2f9cc9f Mon Sep 17 00:00:00 2001 From: Bernard Xiong Date: Sat, 28 Sep 2019 11:56:03 +0800 Subject: [PATCH 2/3] [Kernel] cleanup code of spinlock in SMP Kernel --- include/rthw.h | 12 ++--------- include/rtthread.h | 21 ++++++++++++++++++ src/SConscript | 3 +++ src/cpu.c | 53 ++++++++++++++-------------------------------- 4 files changed, 42 insertions(+), 47 deletions(-) diff --git a/include/rthw.h b/include/rthw.h index a77f2c1e46..2a602dcfe7 100644 --- a/include/rthw.h +++ b/include/rthw.h @@ -143,10 +143,10 @@ typedef union { } tickets; } rt_hw_spinlock_t; -typedef struct +struct rt_spinlock { rt_hw_spinlock_t lock; -} rt_spinlock_t; +}; void rt_hw_spin_lock_init(rt_hw_spinlock_t *lock); void rt_hw_spin_lock(rt_hw_spinlock_t *lock); @@ -187,16 +187,8 @@ void rt_hw_secondary_cpu_idle_exec(void); #define rt_hw_spin_lock(lock) *(lock) = rt_hw_interrupt_disable() #define rt_hw_spin_unlock(lock) rt_hw_interrupt_enable(*(lock)) -typedef int rt_spinlock_t; - #endif -void rt_spin_lock_init(rt_spinlock_t *lock); -void rt_spin_lock(rt_spinlock_t *lock); -void rt_spin_unlock(rt_spinlock_t *lock); -rt_base_t rt_spin_lock_irqsave(rt_spinlock_t *lock); -void rt_spin_unlock_irqrestore(rt_spinlock_t *lock, rt_base_t level); - #ifdef __cplusplus } #endif diff --git a/include/rtthread.h b/include/rtthread.h index d703f36268..13a1a4424b 100644 --- a/include/rtthread.h +++ b/include/rtthread.h @@ -391,6 +391,27 @@ rt_err_t rt_mq_recv(rt_mq_t mq, rt_err_t rt_mq_control(rt_mq_t mq, int cmd, void *arg); #endif +/* + * spinlock + */ +#ifdef RT_USING_SMP +struct rt_spinlock; + +void rt_spin_lock_init(struct rt_spinlock *lock); +void rt_spin_lock(struct rt_spinlock *lock); +void rt_spin_unlock(struct rt_spinlock *lock); +rt_base_t rt_spin_lock_irqsave(struct rt_spinlock *lock); +void rt_spin_unlock_irqrestore(struct rt_spinlock *lock, rt_base_t level); + +#else +#define rt_spin_lock_init(lock) /* nothing */ +#define rt_spin_lock(lock) rt_enter_critical() +#define rt_spin_unlock(lock) rt_exit_critical() +#define rt_spin_lock_irqsave(lock) rt_hw_interrupt_disable() +#define rt_spin_unlock_irqrestore(lock, level) rt_hw_interrupt_enable(level) + +#endif + /**@}*/ #ifdef RT_USING_DEVICE diff --git a/src/SConscript b/src/SConscript index ea6e19318f..82c3b8aaf1 100644 --- a/src/SConscript +++ b/src/SConscript @@ -26,6 +26,9 @@ if GetDepend('RT_USING_MEMHEAP') == False: if GetDepend('RT_USING_DEVICE') == False: SrcRemove(src, ['device.c']) +if GetDepend('RT_USING_SMP') == False: + SrcRemove(src, ['cpu.c']) + group = DefineGroup('Kernel', src, depend = [''], CPPPATH = CPPPATH) Return('group') diff --git a/src/cpu.c b/src/cpu.c index a4075108e1..f6f210eda8 100644 --- a/src/cpu.c +++ b/src/cpu.c @@ -12,9 +12,12 @@ #include #ifdef RT_USING_SMP -/*********************************** +static struct rt_cpu rt_cpus[RT_CPUS_NR]; +rt_hw_spinlock_t _cpus_lock; + +/* * disable scheduler - ***********************************/ + */ static void rt_preempt_disable(void) { register rt_base_t level; @@ -23,7 +26,7 @@ static void rt_preempt_disable(void) /* disable interrupt */ level = rt_hw_local_irq_disable(); - current_thread = rt_cpu_self()->current_thread; + current_thread = rt_thread_self(); if (!current_thread) { rt_hw_local_irq_enable(level); @@ -37,9 +40,9 @@ static void rt_preempt_disable(void) rt_hw_local_irq_enable(level); } -/*********************************** - * restore scheduler - ***********************************/ +/* + * enable scheduler + */ static void rt_preempt_enable(void) { register rt_base_t level; @@ -48,7 +51,7 @@ static void rt_preempt_enable(void) /* disable interrupt */ level = rt_hw_local_irq_disable(); - current_thread = rt_cpu_self()->current_thread; + current_thread = rt_thread_self(); if (!current_thread) { rt_hw_local_irq_enable(level); @@ -62,73 +65,49 @@ static void rt_preempt_enable(void) /* enable interrupt */ rt_hw_local_irq_enable(level); } -#endif -void rt_spin_lock_init(rt_spinlock_t *lock) +void rt_spin_lock_init(struct rt_spinlock *lock) { -#ifdef RT_USING_SMP rt_hw_spin_lock_init(&lock->lock); -#endif } RTM_EXPORT(rt_spin_lock_init) -void rt_spin_lock(rt_spinlock_t *lock) +void rt_spin_lock(struct rt_spinlock *lock) { -#ifdef RT_USING_SMP rt_preempt_disable(); rt_hw_spin_lock(&lock->lock); -#else - rt_enter_critical(); -#endif } RTM_EXPORT(rt_spin_lock) -void rt_spin_unlock(rt_spinlock_t *lock) +void rt_spin_unlock(struct rt_spinlock *lock) { -#ifdef RT_USING_SMP rt_hw_spin_unlock(&lock->lock); rt_preempt_enable(); -#else - rt_exit_critical(); -#endif } RTM_EXPORT(rt_spin_unlock) -rt_base_t rt_spin_lock_irqsave(rt_spinlock_t *lock) +rt_base_t rt_spin_lock_irqsave(struct rt_spinlock *lock) { unsigned long level; -#ifdef RT_USING_SMP rt_preempt_disable(); level = rt_hw_local_irq_disable(); rt_hw_spin_lock(&lock->lock); return level; -#else - return rt_hw_interrupt_disable(); -#endif } RTM_EXPORT(rt_spin_lock_irqsave) -void rt_spin_unlock_irqrestore(rt_spinlock_t *lock, rt_base_t level) +void rt_spin_unlock_irqrestore(struct rt_spinlock *lock, rt_base_t level) { -#ifdef RT_USING_SMP rt_hw_spin_unlock(&lock->lock); rt_hw_local_irq_enable(level); rt_preempt_enable(); -#else - rt_hw_interrupt_enable(level); -#endif } RTM_EXPORT(rt_spin_unlock_irqrestore) -#ifdef RT_USING_SMP - -static struct rt_cpu rt_cpus[RT_CPUS_NR]; -rt_hw_spinlock_t _cpus_lock; - /** * This fucntion will return current cpu. */ @@ -155,7 +134,7 @@ rt_base_t rt_cpus_lock(void) pcpu = rt_cpu_self(); if (pcpu->current_thread != RT_NULL) { - register rt_uint16_t lock_nest = pcpu->current_thread->cpus_lock_nest; + register rt_ubase_t lock_nest = pcpu->current_thread->cpus_lock_nest; pcpu->current_thread->cpus_lock_nest++; if (lock_nest == 0) From 71311b73ae3e6a91ca1e9b2b82ca11af2c5cb868 Mon Sep 17 00:00:00 2001 From: Bernard Xiong Date: Sat, 28 Sep 2019 12:26:48 +0800 Subject: [PATCH 3/3] [Kernel] Fix the wrong indentation --- src/Kconfig | 8 ++--- src/cpu.c | 87 ++++++++++++++++++++++++------------------------- src/scheduler.c | 4 +-- 3 files changed, 49 insertions(+), 50 deletions(-) diff --git a/src/Kconfig b/src/Kconfig index 70805264c5..22942a017e 100644 --- a/src/Kconfig +++ b/src/Kconfig @@ -20,7 +20,7 @@ config RT_USING_ARCH_DATA_TYPE config RT_USING_SMP bool "Enable SMP(Symmetric multiprocessing)" default n - help + help This option should be selected by machines which have an SMP- capable CPU. The only effect of this option is to make the SMP-related @@ -28,10 +28,10 @@ config RT_USING_SMP config RT_CPUS_NR int "Number of CPUs" - default 2 - depends on RT_USING_SMP + default 2 + depends on RT_USING_SMP help - Number of CPUs in the system + Number of CPUs in the system config RT_ALIGN_SIZE int "Alignment size for CPU architecture data access" diff --git a/src/cpu.c b/src/cpu.c index f6f210eda8..96e585713b 100644 --- a/src/cpu.c +++ b/src/cpu.c @@ -7,9 +7,8 @@ * Date Author Notes * 2018-10-30 Bernard The first version */ - -#include #include +#include #ifdef RT_USING_SMP static struct rt_cpu rt_cpus[RT_CPUS_NR]; @@ -20,24 +19,24 @@ rt_hw_spinlock_t _cpus_lock; */ static void rt_preempt_disable(void) { - register rt_base_t level; - struct rt_thread *current_thread; + register rt_base_t level; + struct rt_thread *current_thread; - /* disable interrupt */ - level = rt_hw_local_irq_disable(); + /* disable interrupt */ + level = rt_hw_local_irq_disable(); - current_thread = rt_thread_self(); - if (!current_thread) - { - rt_hw_local_irq_enable(level); - return; - } + current_thread = rt_thread_self(); + if (!current_thread) + { + rt_hw_local_irq_enable(level); + return; + } - /* lock scheduler for local cpu */ - current_thread->scheduler_lock_nest ++; + /* lock scheduler for local cpu */ + current_thread->scheduler_lock_nest ++; - /* enable interrupt */ - rt_hw_local_irq_enable(level); + /* enable interrupt */ + rt_hw_local_irq_enable(level); } /* @@ -45,66 +44,66 @@ static void rt_preempt_disable(void) */ static void rt_preempt_enable(void) { - register rt_base_t level; - struct rt_thread *current_thread; + register rt_base_t level; + struct rt_thread *current_thread; - /* disable interrupt */ - level = rt_hw_local_irq_disable(); + /* disable interrupt */ + level = rt_hw_local_irq_disable(); - current_thread = rt_thread_self(); - if (!current_thread) - { - rt_hw_local_irq_enable(level); - return; - } + current_thread = rt_thread_self(); + if (!current_thread) + { + rt_hw_local_irq_enable(level); + return; + } - /* unlock scheduler for local cpu */ - current_thread->scheduler_lock_nest --; + /* unlock scheduler for local cpu */ + current_thread->scheduler_lock_nest --; - rt_schedule(); - /* enable interrupt */ - rt_hw_local_irq_enable(level); + rt_schedule(); + /* enable interrupt */ + rt_hw_local_irq_enable(level); } void rt_spin_lock_init(struct rt_spinlock *lock) { - rt_hw_spin_lock_init(&lock->lock); + rt_hw_spin_lock_init(&lock->lock); } RTM_EXPORT(rt_spin_lock_init) void rt_spin_lock(struct rt_spinlock *lock) { - rt_preempt_disable(); - rt_hw_spin_lock(&lock->lock); + rt_preempt_disable(); + rt_hw_spin_lock(&lock->lock); } RTM_EXPORT(rt_spin_lock) void rt_spin_unlock(struct rt_spinlock *lock) { - rt_hw_spin_unlock(&lock->lock); - rt_preempt_enable(); + rt_hw_spin_unlock(&lock->lock); + rt_preempt_enable(); } RTM_EXPORT(rt_spin_unlock) rt_base_t rt_spin_lock_irqsave(struct rt_spinlock *lock) { - unsigned long level; + unsigned long level; - rt_preempt_disable(); + rt_preempt_disable(); - level = rt_hw_local_irq_disable(); - rt_hw_spin_lock(&lock->lock); + level = rt_hw_local_irq_disable(); + rt_hw_spin_lock(&lock->lock); - return level; + return level; } RTM_EXPORT(rt_spin_lock_irqsave) void rt_spin_unlock_irqrestore(struct rt_spinlock *lock, rt_base_t level) { - rt_hw_spin_unlock(&lock->lock); - rt_hw_local_irq_enable(level); + rt_hw_spin_unlock(&lock->lock); + rt_hw_local_irq_enable(level); - rt_preempt_enable(); + rt_preempt_enable(); } RTM_EXPORT(rt_spin_unlock_irqrestore) diff --git a/src/scheduler.c b/src/scheduler.c index 0faafaa2fc..f671a49623 100644 --- a/src/scheduler.c +++ b/src/scheduler.c @@ -83,7 +83,7 @@ static void _rt_scheduler_stack_check(struct rt_thread *thread) RT_ASSERT(thread != RT_NULL); #if defined(ARCH_CPU_STACK_GROWS_UPWARD) - if (*((rt_uint8_t *)((rt_ubase_t)thread->stack_addr + thread->stack_size - 1)) != '#' || + if (*((rt_uint8_t *)((rt_ubase_t)thread->stack_addr + thread->stack_size - 1)) != '#' || #else if (*((rt_uint8_t *)thread->stack_addr) != '#' || #endif @@ -958,7 +958,7 @@ rt_uint16_t rt_critical_level(void) return current_thread->critical_lock_nest; #else - return rt_scheduler_lock_nest; + return rt_scheduler_lock_nest; #endif /*RT_USING_SMP*/ } RTM_EXPORT(rt_critical_level);