add cpu up

This commit is contained in:
Shell 2024-04-19 10:45:09 +08:00 committed by Meco Man
parent 2aacba2c86
commit 451ac03965
5 changed files with 140 additions and 98 deletions

View File

@ -604,45 +604,12 @@ rt_thread_t rt_thread_defunct_dequeue(void);
* spinlock
*/
struct rt_spinlock;
#ifdef RT_USING_SMP
void rt_spin_lock_init(struct rt_spinlock *lock);
void rt_spin_lock(struct rt_spinlock *lock);
void rt_spin_unlock(struct rt_spinlock *lock);
rt_base_t rt_spin_lock_irqsave(struct rt_spinlock *lock);
void rt_spin_unlock_irqrestore(struct rt_spinlock *lock, rt_base_t level);
#else
rt_inline void rt_spin_lock_init(struct rt_spinlock *lock)
{
RT_UNUSED(lock);
}
rt_inline void rt_spin_lock(struct rt_spinlock *lock)
{
RT_UNUSED(lock);
rt_enter_critical();
}
rt_inline void rt_spin_unlock(struct rt_spinlock *lock)
{
RT_UNUSED(lock);
rt_exit_critical();
}
rt_inline rt_base_t rt_spin_lock_irqsave(struct rt_spinlock *lock)
{
rt_base_t level;
RT_UNUSED(lock);
level = rt_hw_interrupt_disable();
rt_enter_critical();
return level;
}
rt_inline void rt_spin_unlock_irqrestore(struct rt_spinlock *lock, rt_base_t level)
{
RT_UNUSED(lock);
rt_exit_critical();
rt_hw_interrupt_enable(level);
}
#endif /* RT_USING_SMP */
/**@}*/

View File

@ -135,62 +135,76 @@ struct rt_spinlock
#endif /* RT_DEBUGING_SPINLOCK */
};
#ifdef RT_DEBUGING_SPINLOCK
#ifndef RT_SPINLOCK_INIT
#define RT_SPINLOCK_INIT {{0}} /* can be overridden by cpuport.h */
#endif /* RT_SPINLOCK_INIT */
#define __OWNER_MAGIC ((void *)0xdeadbeaf)
#else /* !RT_USING_SMP */
#if defined(__GNUC__)
#define __GET_RETURN_ADDRESS __builtin_return_address(0)
#else
#define __GET_RETURN_ADDRESS RT_NULL
#endif
struct rt_spinlock
{
#ifdef RT_USING_DEBUG
rt_uint32_t critical_level;
#endif /* RT_USING_DEBUG */
rt_ubase_t lock;
};
#define RT_SPINLOCK_INIT {0}
#endif /* RT_USING_SMP */
#if defined(RT_DEBUGING_SPINLOCK) && defined(RT_USING_SMP)
#define _SPIN_LOCK_DEBUG_OWNER(lock) \
do \
{ \
struct rt_thread *_curthr = rt_thread_self(); \
if (_curthr != RT_NULL) \
{ \
(lock)->owner = _curthr; \
(lock)->pc = __GET_RETURN_ADDRESS; \
} \
} while (0)
#define __OWNER_MAGIC ((void *)0xdeadbeaf)
#define _SPIN_UNLOCK_DEBUG_OWNER(lock) \
do \
{ \
(lock)->owner = __OWNER_MAGIC; \
(lock)->pc = RT_NULL; \
} while (0)
#if defined(__GNUC__)
#define __GET_RETURN_ADDRESS __builtin_return_address(0)
#else /* !__GNUC__ */
#define __GET_RETURN_ADDRESS RT_NULL
#endif /* __GNUC__ */
#else
#define _SPIN_LOCK_DEBUG_OWNER(lock) \
do \
{ \
struct rt_thread *_curthr = rt_thread_self(); \
if (_curthr != RT_NULL) \
{ \
(lock)->owner = _curthr; \
(lock)->pc = __GET_RETURN_ADDRESS; \
} \
} while (0)
#define _SPIN_LOCK_DEBUG_OWNER(lock)
#define _SPIN_UNLOCK_DEBUG_OWNER(lock)
#endif
#define _SPIN_UNLOCK_DEBUG_OWNER(lock) \
do \
{ \
(lock)->owner = __OWNER_MAGIC; \
(lock)->pc = RT_NULL; \
} while (0)
#else /* !RT_DEBUGING_SPINLOCK */
#define _SPIN_LOCK_DEBUG_OWNER(lock)
#define _SPIN_UNLOCK_DEBUG_OWNER(lock)
#endif /* RT_DEBUGING_SPINLOCK */
#ifdef RT_USING_DEBUG
#define _SPIN_LOCK_DEBUG_CRITICAL(lock) \
do \
{ \
struct rt_thread *_curthr = rt_thread_self(); \
if (_curthr != RT_NULL) \
{ \
(lock)->critical_level = rt_critical_level(); \
} \
} while (0)
#define _SPIN_LOCK_DEBUG_CRITICAL(lock) \
do \
{ \
struct rt_thread *_curthr = rt_thread_self(); \
if (_curthr != RT_NULL) \
{ \
(lock)->critical_level = rt_critical_level(); \
} \
} while (0)
#define _SPIN_UNLOCK_DEBUG_CRITICAL(lock, critical) \
do \
{ \
(critical) = (lock)->critical_level; \
} while (0)
#define _SPIN_UNLOCK_DEBUG_CRITICAL(lock, critical) \
do \
{ \
(critical) = (lock)->critical_level; \
} while (0)
#else /* !RT_USING_DEBUG */
#define _SPIN_LOCK_DEBUG_CRITICAL(lock)
#define _SPIN_UNLOCK_DEBUG_CRITICAL(lock, critical) (critical = 0)
#else
#define _SPIN_LOCK_DEBUG_CRITICAL(lock)
#define _SPIN_UNLOCK_DEBUG_CRITICAL(lock, critical) (critical = 0)
#endif /* RT_USING_DEBUG */
#define RT_SPIN_LOCK_DEBUG(lock) \
@ -207,22 +221,6 @@ struct rt_spinlock
_SPIN_UNLOCK_DEBUG_CRITICAL(lock, critical); \
} while (0)
#ifndef RT_SPINLOCK_INIT
#define RT_SPINLOCK_INIT {{0}} /* can be overridden by cpuport.h */
#endif /* RT_SPINLOCK_INIT */
#else
struct rt_spinlock
{
#ifdef RT_USING_DEBUG
rt_uint32_t critical_level;
#endif /* RT_USING_DEBUG */
rt_ubase_t lock;
};
#define RT_SPINLOCK_INIT {0}
#endif /* RT_USING_SMP */
typedef struct rt_spinlock rt_spinlock_t;
#define RT_DEFINE_SPINLOCK(x) struct rt_spinlock x = RT_SPINLOCK_INIT

View File

@ -25,9 +25,9 @@ if GetDepend('RT_USING_DEVICE') == False:
SrcRemove(src, ['device.c'])
if GetDepend('RT_USING_SMP') == False:
SrcRemove(src, ['cpu.c', 'scheduler_mp.c'])
SrcRemove(src, ['cpu_mp.c', 'scheduler_mp.c'])
else:
SrcRemove(src, ['scheduler_up.c'])
SrcRemove(src, ['cpu_up.c', 'scheduler_up.c'])
LOCAL_CFLAGS = ''
LINKFLAGS = ''

View File

@ -21,7 +21,6 @@
rt_base_t _cpus_critical_level;
#endif /* RT_USING_DEBUG */
#ifdef RT_USING_SMP
static struct rt_cpu _cpus[RT_CPUS_NR];
rt_hw_spinlock_t _cpus_lock;
#if defined(RT_DEBUGING_SPINLOCK)
@ -217,4 +216,3 @@ void rt_cpus_lock_status_restore(struct rt_thread *thread)
rt_sched_post_ctx_switch(thread);
}
RTM_EXPORT(rt_cpus_lock_status_restore);
#endif /* RT_USING_SMP */

79
src/cpu_up.c Normal file
View File

@ -0,0 +1,79 @@
/*
* Copyright (c) 2006-2024, RT-Thread Development Team
*
* SPDX-License-Identifier: Apache-2.0
*
* Change Logs:
* Date Author Notes
* 2024-04-19 Shell Fixup UP irq spinlock
*/
#include <rthw.h>
#include <rtthread.h>
/**
* @brief Initialize a static spinlock object.
*
* @param lock is a pointer to the spinlock to initialize.
*/
void rt_spin_lock_init(struct rt_spinlock *lock)
{
RT_UNUSED(lock);
}
/**
* @brief This function will lock the spinlock, will lock the thread scheduler.
*
* @note If the spinlock is locked, the current CPU will keep polling the spinlock state
* until the spinlock is unlocked.
*
* @param lock is a pointer to the spinlock.
*/
void rt_spin_lock(struct rt_spinlock *lock)
{
RT_UNUSED(lock);
rt_enter_critical();
}
/**
* @brief This function will unlock the spinlock, will unlock the thread scheduler.
*
* @param lock is a pointer to the spinlock.
*/
void rt_spin_unlock(struct rt_spinlock *lock)
{
RT_UNUSED(lock);
rt_exit_critical();
}
/**
* @brief This function will disable the local interrupt and then lock the spinlock, will lock the thread scheduler.
*
* @note If the spinlock is locked, the current CPU will keep polling the spinlock state
* until the spinlock is unlocked.
*
* @param lock is a pointer to the spinlock.
*
* @return Return current cpu interrupt status.
*/
rt_base_t rt_spin_lock_irqsave(struct rt_spinlock *lock)
{
rt_base_t level;
RT_UNUSED(lock);
level = rt_hw_interrupt_disable();
rt_enter_critical();
return level;
}
/**
* @brief This function will unlock the spinlock and then restore current cpu interrupt status, will unlock the thread scheduler.
*
* @param lock is a pointer to the spinlock.
*
* @param level is interrupt status returned by rt_spin_lock_irqsave().
*/
void rt_spin_unlock_irqrestore(struct rt_spinlock *lock, rt_base_t level)
{
RT_UNUSED(lock);
rt_exit_critical();
rt_hw_interrupt_enable(level);
}