开放spinlock相关函数
This commit is contained in:
parent
676279421f
commit
cb07e5fb24
|
@ -577,6 +577,7 @@ struct rt_thread
|
||||||
|
|
||||||
rt_uint16_t scheduler_lock_nest; /**< scheduler lock count */
|
rt_uint16_t scheduler_lock_nest; /**< scheduler lock count */
|
||||||
rt_uint16_t cpus_lock_nest; /**< cpus lock count */
|
rt_uint16_t cpus_lock_nest; /**< cpus lock count */
|
||||||
|
rt_uint16_t critical_lock_nest; /**< critical lock count */
|
||||||
#endif /*RT_USING_SMP*/
|
#endif /*RT_USING_SMP*/
|
||||||
|
|
||||||
/* priority */
|
/* priority */
|
||||||
|
|
|
@ -143,6 +143,12 @@ typedef union {
|
||||||
} tickets;
|
} tickets;
|
||||||
} rt_hw_spinlock_t;
|
} rt_hw_spinlock_t;
|
||||||
|
|
||||||
|
typedef struct
|
||||||
|
{
|
||||||
|
rt_hw_spinlock_t lock;
|
||||||
|
} rt_spinlock_t;
|
||||||
|
|
||||||
|
void rt_hw_spin_lock_init(rt_hw_spinlock_t *lock);
|
||||||
void rt_hw_spin_lock(rt_hw_spinlock_t *lock);
|
void rt_hw_spin_lock(rt_hw_spinlock_t *lock);
|
||||||
void rt_hw_spin_unlock(rt_hw_spinlock_t *lock);
|
void rt_hw_spin_unlock(rt_hw_spinlock_t *lock);
|
||||||
|
|
||||||
|
@ -181,8 +187,16 @@ void rt_hw_secondary_cpu_idle_exec(void);
|
||||||
#define rt_hw_spin_lock(lock) *(lock) = rt_hw_interrupt_disable()
|
#define rt_hw_spin_lock(lock) *(lock) = rt_hw_interrupt_disable()
|
||||||
#define rt_hw_spin_unlock(lock) rt_hw_interrupt_enable(*(lock))
|
#define rt_hw_spin_unlock(lock) rt_hw_interrupt_enable(*(lock))
|
||||||
|
|
||||||
|
typedef int rt_spinlock_t;
|
||||||
|
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
|
void rt_spin_lock_init(rt_spinlock_t *lock);
|
||||||
|
void rt_spin_lock(rt_spinlock_t *lock);
|
||||||
|
void rt_spin_unlock(rt_spinlock_t *lock);
|
||||||
|
rt_base_t rt_spin_lock_irqsave(rt_spinlock_t *lock);
|
||||||
|
void rt_spin_unlock_irqrestore(rt_spinlock_t *lock, rt_base_t level);
|
||||||
|
|
||||||
#ifdef __cplusplus
|
#ifdef __cplusplus
|
||||||
}
|
}
|
||||||
#endif
|
#endif
|
||||||
|
|
|
@ -14,6 +14,7 @@
|
||||||
#include <board.h>
|
#include <board.h>
|
||||||
|
|
||||||
#ifdef RT_USING_SMP
|
#ifdef RT_USING_SMP
|
||||||
|
|
||||||
int rt_hw_cpu_id(void)
|
int rt_hw_cpu_id(void)
|
||||||
{
|
{
|
||||||
int cpu_id;
|
int cpu_id;
|
||||||
|
@ -25,6 +26,11 @@ int rt_hw_cpu_id(void)
|
||||||
return cpu_id;
|
return cpu_id;
|
||||||
};
|
};
|
||||||
|
|
||||||
|
void rt_hw_spin_lock_init(rt_hw_spinlock_t *lock)
|
||||||
|
{
|
||||||
|
lock->slock = 0;
|
||||||
|
}
|
||||||
|
|
||||||
void rt_hw_spin_lock(rt_hw_spinlock_t *lock)
|
void rt_hw_spin_lock(rt_hw_spinlock_t *lock)
|
||||||
{
|
{
|
||||||
unsigned long tmp;
|
unsigned long tmp;
|
||||||
|
|
|
@ -25,6 +25,11 @@ int rt_hw_cpu_id(void)
|
||||||
return read_csr(mhartid);
|
return read_csr(mhartid);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
void rt_hw_spin_lock_init(rt_hw_spinlock_t *lock)
|
||||||
|
{
|
||||||
|
((spinlock_t *)lock)->lock = 0;
|
||||||
|
}
|
||||||
|
|
||||||
void rt_hw_spin_lock(rt_hw_spinlock_t *lock)
|
void rt_hw_spin_lock(rt_hw_spinlock_t *lock)
|
||||||
{
|
{
|
||||||
spinlock_lock((spinlock_t *)lock);
|
spinlock_lock((spinlock_t *)lock);
|
||||||
|
|
113
src/cpu.c
113
src/cpu.c
|
@ -11,6 +11,119 @@
|
||||||
#include <rtthread.h>
|
#include <rtthread.h>
|
||||||
#include <rthw.h>
|
#include <rthw.h>
|
||||||
|
|
||||||
|
#ifdef RT_USING_SMP
|
||||||
|
/***********************************
|
||||||
|
* disable scheduler
|
||||||
|
***********************************/
|
||||||
|
static void rt_preempt_disable(void)
|
||||||
|
{
|
||||||
|
register rt_base_t level;
|
||||||
|
struct rt_thread *current_thread;
|
||||||
|
|
||||||
|
/* disable interrupt */
|
||||||
|
level = rt_hw_local_irq_disable();
|
||||||
|
|
||||||
|
current_thread = rt_cpu_self()->current_thread;
|
||||||
|
if (!current_thread)
|
||||||
|
{
|
||||||
|
rt_hw_local_irq_enable(level);
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
/* lock scheduler for local cpu */
|
||||||
|
current_thread->scheduler_lock_nest ++;
|
||||||
|
|
||||||
|
/* enable interrupt */
|
||||||
|
rt_hw_local_irq_enable(level);
|
||||||
|
}
|
||||||
|
|
||||||
|
/***********************************
|
||||||
|
* restore scheduler
|
||||||
|
***********************************/
|
||||||
|
static void rt_preempt_enable(void)
|
||||||
|
{
|
||||||
|
register rt_base_t level;
|
||||||
|
struct rt_thread *current_thread;
|
||||||
|
|
||||||
|
/* disable interrupt */
|
||||||
|
level = rt_hw_local_irq_disable();
|
||||||
|
|
||||||
|
current_thread = rt_cpu_self()->current_thread;
|
||||||
|
if (!current_thread)
|
||||||
|
{
|
||||||
|
rt_hw_local_irq_enable(level);
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
/* unlock scheduler for local cpu */
|
||||||
|
current_thread->scheduler_lock_nest --;
|
||||||
|
|
||||||
|
rt_schedule();
|
||||||
|
/* enable interrupt */
|
||||||
|
rt_hw_local_irq_enable(level);
|
||||||
|
}
|
||||||
|
#endif
|
||||||
|
|
||||||
|
void rt_spin_lock_init(rt_spinlock_t *lock)
|
||||||
|
{
|
||||||
|
#ifdef RT_USING_SMP
|
||||||
|
rt_hw_spin_lock_init(&lock->lock);
|
||||||
|
#endif
|
||||||
|
}
|
||||||
|
RTM_EXPORT(rt_spin_lock_init)
|
||||||
|
|
||||||
|
void rt_spin_lock(rt_spinlock_t *lock)
|
||||||
|
{
|
||||||
|
#ifdef RT_USING_SMP
|
||||||
|
rt_preempt_disable();
|
||||||
|
rt_hw_spin_lock(&lock->lock);
|
||||||
|
#else
|
||||||
|
rt_enter_critical();
|
||||||
|
#endif
|
||||||
|
}
|
||||||
|
RTM_EXPORT(rt_spin_lock)
|
||||||
|
|
||||||
|
void rt_spin_unlock(rt_spinlock_t *lock)
|
||||||
|
{
|
||||||
|
#ifdef RT_USING_SMP
|
||||||
|
rt_hw_spin_unlock(&lock->lock);
|
||||||
|
rt_preempt_enable();
|
||||||
|
#else
|
||||||
|
rt_exit_critical();
|
||||||
|
#endif
|
||||||
|
}
|
||||||
|
RTM_EXPORT(rt_spin_unlock)
|
||||||
|
|
||||||
|
rt_base_t rt_spin_lock_irqsave(rt_spinlock_t *lock)
|
||||||
|
{
|
||||||
|
unsigned long level;
|
||||||
|
|
||||||
|
#ifdef RT_USING_SMP
|
||||||
|
rt_preempt_disable();
|
||||||
|
|
||||||
|
level = rt_hw_local_irq_disable();
|
||||||
|
rt_hw_spin_lock(&lock->lock);
|
||||||
|
|
||||||
|
return level;
|
||||||
|
#else
|
||||||
|
return rt_hw_interrupt_disable();
|
||||||
|
#endif
|
||||||
|
}
|
||||||
|
RTM_EXPORT(rt_spin_lock_irqsave)
|
||||||
|
|
||||||
|
void rt_spin_unlock_irqrestore(rt_spinlock_t *lock, rt_base_t level)
|
||||||
|
{
|
||||||
|
#ifdef RT_USING_SMP
|
||||||
|
rt_hw_spin_unlock(&lock->lock);
|
||||||
|
rt_hw_local_irq_enable(level);
|
||||||
|
|
||||||
|
rt_preempt_enable();
|
||||||
|
#else
|
||||||
|
rt_hw_interrupt_enable(level);
|
||||||
|
#endif
|
||||||
|
}
|
||||||
|
RTM_EXPORT(rt_spin_unlock_irqrestore)
|
||||||
|
|
||||||
#ifdef RT_USING_SMP
|
#ifdef RT_USING_SMP
|
||||||
|
|
||||||
static struct rt_cpu rt_cpus[RT_CPUS_NR];
|
static struct rt_cpu rt_cpus[RT_CPUS_NR];
|
||||||
|
|
|
@ -840,11 +840,14 @@ void rt_enter_critical(void)
|
||||||
*/
|
*/
|
||||||
|
|
||||||
/* lock scheduler for all cpus */
|
/* lock scheduler for all cpus */
|
||||||
if (current_thread->scheduler_lock_nest == !!current_thread->cpus_lock_nest)
|
if (current_thread->critical_lock_nest == 0)
|
||||||
{
|
{
|
||||||
rt_hw_spin_lock(&_rt_critical_lock);
|
rt_hw_spin_lock(&_rt_critical_lock);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/* critical for local cpu */
|
||||||
|
current_thread->critical_lock_nest ++;
|
||||||
|
|
||||||
/* lock scheduler for local cpu */
|
/* lock scheduler for local cpu */
|
||||||
current_thread->scheduler_lock_nest ++;
|
current_thread->scheduler_lock_nest ++;
|
||||||
|
|
||||||
|
@ -892,7 +895,9 @@ void rt_exit_critical(void)
|
||||||
|
|
||||||
current_thread->scheduler_lock_nest --;
|
current_thread->scheduler_lock_nest --;
|
||||||
|
|
||||||
if (current_thread->scheduler_lock_nest == !!current_thread->cpus_lock_nest)
|
current_thread->critical_lock_nest --;
|
||||||
|
|
||||||
|
if (current_thread->critical_lock_nest == 0)
|
||||||
{
|
{
|
||||||
rt_hw_spin_unlock(&_rt_critical_lock);
|
rt_hw_spin_unlock(&_rt_critical_lock);
|
||||||
}
|
}
|
||||||
|
@ -951,7 +956,7 @@ rt_uint16_t rt_critical_level(void)
|
||||||
#ifdef RT_USING_SMP
|
#ifdef RT_USING_SMP
|
||||||
struct rt_thread *current_thread = rt_cpu_self()->current_thread;
|
struct rt_thread *current_thread = rt_cpu_self()->current_thread;
|
||||||
|
|
||||||
return current_thread->scheduler_lock_nest;
|
return current_thread->critical_lock_nest;
|
||||||
#else
|
#else
|
||||||
return rt_scheduler_lock_nest;
|
return rt_scheduler_lock_nest;
|
||||||
#endif /*RT_USING_SMP*/
|
#endif /*RT_USING_SMP*/
|
||||||
|
|
|
@ -172,6 +172,7 @@ static rt_err_t _rt_thread_init(struct rt_thread *thread,
|
||||||
/* lock init */
|
/* lock init */
|
||||||
thread->scheduler_lock_nest = 0;
|
thread->scheduler_lock_nest = 0;
|
||||||
thread->cpus_lock_nest = 0;
|
thread->cpus_lock_nest = 0;
|
||||||
|
thread->critical_lock_nest = 0;
|
||||||
#endif /*RT_USING_SMP*/
|
#endif /*RT_USING_SMP*/
|
||||||
|
|
||||||
/* initialize cleanup function and user data */
|
/* initialize cleanup function and user data */
|
||||||
|
|
Loading…
Reference in New Issue