[Kernel] Fix the wrong indentation
This commit is contained in:
parent
5cd6509296
commit
71311b73ae
@ -20,7 +20,7 @@ config RT_USING_ARCH_DATA_TYPE
|
||||
config RT_USING_SMP
|
||||
bool "Enable SMP(Symmetric multiprocessing)"
|
||||
default n
|
||||
help
|
||||
help
|
||||
This option should be selected by machines which have an SMP-
|
||||
capable CPU.
|
||||
The only effect of this option is to make the SMP-related
|
||||
@ -28,10 +28,10 @@ config RT_USING_SMP
|
||||
|
||||
config RT_CPUS_NR
|
||||
int "Number of CPUs"
|
||||
default 2
|
||||
depends on RT_USING_SMP
|
||||
default 2
|
||||
depends on RT_USING_SMP
|
||||
help
|
||||
Number of CPUs in the system
|
||||
Number of CPUs in the system
|
||||
|
||||
config RT_ALIGN_SIZE
|
||||
int "Alignment size for CPU architecture data access"
|
||||
|
87
src/cpu.c
87
src/cpu.c
@ -7,9 +7,8 @@
|
||||
* Date Author Notes
|
||||
* 2018-10-30 Bernard The first version
|
||||
*/
|
||||
|
||||
#include <rtthread.h>
|
||||
#include <rthw.h>
|
||||
#include <rtthread.h>
|
||||
|
||||
#ifdef RT_USING_SMP
|
||||
static struct rt_cpu rt_cpus[RT_CPUS_NR];
|
||||
@ -20,24 +19,24 @@ rt_hw_spinlock_t _cpus_lock;
|
||||
*/
|
||||
static void rt_preempt_disable(void)
|
||||
{
|
||||
register rt_base_t level;
|
||||
struct rt_thread *current_thread;
|
||||
register rt_base_t level;
|
||||
struct rt_thread *current_thread;
|
||||
|
||||
/* disable interrupt */
|
||||
level = rt_hw_local_irq_disable();
|
||||
/* disable interrupt */
|
||||
level = rt_hw_local_irq_disable();
|
||||
|
||||
current_thread = rt_thread_self();
|
||||
if (!current_thread)
|
||||
{
|
||||
rt_hw_local_irq_enable(level);
|
||||
return;
|
||||
}
|
||||
current_thread = rt_thread_self();
|
||||
if (!current_thread)
|
||||
{
|
||||
rt_hw_local_irq_enable(level);
|
||||
return;
|
||||
}
|
||||
|
||||
/* lock scheduler for local cpu */
|
||||
current_thread->scheduler_lock_nest ++;
|
||||
/* lock scheduler for local cpu */
|
||||
current_thread->scheduler_lock_nest ++;
|
||||
|
||||
/* enable interrupt */
|
||||
rt_hw_local_irq_enable(level);
|
||||
/* enable interrupt */
|
||||
rt_hw_local_irq_enable(level);
|
||||
}
|
||||
|
||||
/*
|
||||
@ -45,66 +44,66 @@ static void rt_preempt_disable(void)
|
||||
*/
|
||||
static void rt_preempt_enable(void)
|
||||
{
|
||||
register rt_base_t level;
|
||||
struct rt_thread *current_thread;
|
||||
register rt_base_t level;
|
||||
struct rt_thread *current_thread;
|
||||
|
||||
/* disable interrupt */
|
||||
level = rt_hw_local_irq_disable();
|
||||
/* disable interrupt */
|
||||
level = rt_hw_local_irq_disable();
|
||||
|
||||
current_thread = rt_thread_self();
|
||||
if (!current_thread)
|
||||
{
|
||||
rt_hw_local_irq_enable(level);
|
||||
return;
|
||||
}
|
||||
current_thread = rt_thread_self();
|
||||
if (!current_thread)
|
||||
{
|
||||
rt_hw_local_irq_enable(level);
|
||||
return;
|
||||
}
|
||||
|
||||
/* unlock scheduler for local cpu */
|
||||
current_thread->scheduler_lock_nest --;
|
||||
/* unlock scheduler for local cpu */
|
||||
current_thread->scheduler_lock_nest --;
|
||||
|
||||
rt_schedule();
|
||||
/* enable interrupt */
|
||||
rt_hw_local_irq_enable(level);
|
||||
rt_schedule();
|
||||
/* enable interrupt */
|
||||
rt_hw_local_irq_enable(level);
|
||||
}
|
||||
|
||||
void rt_spin_lock_init(struct rt_spinlock *lock)
|
||||
{
|
||||
rt_hw_spin_lock_init(&lock->lock);
|
||||
rt_hw_spin_lock_init(&lock->lock);
|
||||
}
|
||||
RTM_EXPORT(rt_spin_lock_init)
|
||||
|
||||
void rt_spin_lock(struct rt_spinlock *lock)
|
||||
{
|
||||
rt_preempt_disable();
|
||||
rt_hw_spin_lock(&lock->lock);
|
||||
rt_preempt_disable();
|
||||
rt_hw_spin_lock(&lock->lock);
|
||||
}
|
||||
RTM_EXPORT(rt_spin_lock)
|
||||
|
||||
void rt_spin_unlock(struct rt_spinlock *lock)
|
||||
{
|
||||
rt_hw_spin_unlock(&lock->lock);
|
||||
rt_preempt_enable();
|
||||
rt_hw_spin_unlock(&lock->lock);
|
||||
rt_preempt_enable();
|
||||
}
|
||||
RTM_EXPORT(rt_spin_unlock)
|
||||
|
||||
rt_base_t rt_spin_lock_irqsave(struct rt_spinlock *lock)
|
||||
{
|
||||
unsigned long level;
|
||||
unsigned long level;
|
||||
|
||||
rt_preempt_disable();
|
||||
rt_preempt_disable();
|
||||
|
||||
level = rt_hw_local_irq_disable();
|
||||
rt_hw_spin_lock(&lock->lock);
|
||||
level = rt_hw_local_irq_disable();
|
||||
rt_hw_spin_lock(&lock->lock);
|
||||
|
||||
return level;
|
||||
return level;
|
||||
}
|
||||
RTM_EXPORT(rt_spin_lock_irqsave)
|
||||
|
||||
void rt_spin_unlock_irqrestore(struct rt_spinlock *lock, rt_base_t level)
|
||||
{
|
||||
rt_hw_spin_unlock(&lock->lock);
|
||||
rt_hw_local_irq_enable(level);
|
||||
rt_hw_spin_unlock(&lock->lock);
|
||||
rt_hw_local_irq_enable(level);
|
||||
|
||||
rt_preempt_enable();
|
||||
rt_preempt_enable();
|
||||
}
|
||||
RTM_EXPORT(rt_spin_unlock_irqrestore)
|
||||
|
||||
|
@ -83,7 +83,7 @@ static void _rt_scheduler_stack_check(struct rt_thread *thread)
|
||||
RT_ASSERT(thread != RT_NULL);
|
||||
|
||||
#if defined(ARCH_CPU_STACK_GROWS_UPWARD)
|
||||
if (*((rt_uint8_t *)((rt_ubase_t)thread->stack_addr + thread->stack_size - 1)) != '#' ||
|
||||
if (*((rt_uint8_t *)((rt_ubase_t)thread->stack_addr + thread->stack_size - 1)) != '#' ||
|
||||
#else
|
||||
if (*((rt_uint8_t *)thread->stack_addr) != '#' ||
|
||||
#endif
|
||||
@ -958,7 +958,7 @@ rt_uint16_t rt_critical_level(void)
|
||||
|
||||
return current_thread->critical_lock_nest;
|
||||
#else
|
||||
return rt_scheduler_lock_nest;
|
||||
return rt_scheduler_lock_nest;
|
||||
#endif /*RT_USING_SMP*/
|
||||
}
|
||||
RTM_EXPORT(rt_critical_level);
|
||||
|
Loading…
x
Reference in New Issue
Block a user