2020-01-10 10:38:21 +08:00
|
|
|
/*
|
2021-03-27 17:51:56 +08:00
|
|
|
* Copyright (c) 2006-2021, RT-Thread Development Team
|
2020-01-10 10:38:21 +08:00
|
|
|
*
|
|
|
|
* SPDX-License-Identifier: Apache-2.0
|
|
|
|
*
|
|
|
|
* Change Logs:
|
|
|
|
* Date Author Notes
|
|
|
|
* 2011-09-15 Bernard first version
|
|
|
|
* 2019-07-28 zdzn add smp support
|
2022-02-21 23:24:51 +08:00
|
|
|
* 2021-12-21 GuEe-GUI set tpidr_el1 as multiprocessor id instead of mpidr_el1
|
2022-01-07 13:49:06 +08:00
|
|
|
* 2021-12-28 GuEe-GUI add spinlock for aarch64
|
2020-01-10 10:38:21 +08:00
|
|
|
*/
|
|
|
|
|
|
|
|
#include <rthw.h>
|
|
|
|
#include <rtthread.h>
|
2022-01-07 13:49:06 +08:00
|
|
|
#include <cpuport.h>
|
|
|
|
|
|
|
|
#ifdef RT_USING_SMP
|
|
|
|
/* The more common mpidr_el1 table, redefine it in BSP if it is in other cases */
|
2022-12-12 02:12:03 +08:00
|
|
|
rt_weak rt_uint64_t rt_cpu_mpidr_early[] =
|
2022-01-07 13:49:06 +08:00
|
|
|
{
|
|
|
|
[0] = 0x80000000,
|
|
|
|
[1] = 0x80000001,
|
|
|
|
[2] = 0x80000002,
|
|
|
|
[3] = 0x80000003,
|
|
|
|
[4] = 0x80000004,
|
|
|
|
[5] = 0x80000005,
|
|
|
|
[6] = 0x80000006,
|
|
|
|
[7] = 0x80000007,
|
|
|
|
[RT_CPUS_NR] = 0
|
|
|
|
};
|
|
|
|
#endif
|
2020-01-10 10:38:21 +08:00
|
|
|
|
|
|
|
int rt_hw_cpu_id(void)
|
|
|
|
{
|
|
|
|
rt_base_t value;
|
|
|
|
|
2022-01-07 13:49:06 +08:00
|
|
|
__asm__ volatile ("mrs %0, tpidr_el1":"=r"(value));
|
|
|
|
|
|
|
|
return value;
|
|
|
|
}
|
2020-01-10 10:38:21 +08:00
|
|
|
|
|
|
|
#ifdef RT_USING_SMP
|
|
|
|
void rt_hw_spin_lock_init(rt_hw_spinlock_t *lock)
|
|
|
|
{
|
|
|
|
lock->slock = 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
void rt_hw_spin_lock(rt_hw_spinlock_t *lock)
|
|
|
|
{
|
2022-01-07 13:49:06 +08:00
|
|
|
rt_hw_spinlock_t lock_val, new_lockval;
|
|
|
|
unsigned int tmp;
|
2020-01-10 10:38:21 +08:00
|
|
|
|
2022-01-07 13:49:06 +08:00
|
|
|
__asm__ volatile (
|
|
|
|
/* Increment the next ticket. */
|
|
|
|
" prfm pstl1strm, %3\n"
|
|
|
|
"1: ldaxr %w0, %3\n"
|
|
|
|
" add %w1, %w0, %w5\n"
|
|
|
|
" stxr %w2, %w1, %3\n"
|
|
|
|
" cbnz %w2, 1b\n"
|
|
|
|
/* Check wether we get the lock */
|
|
|
|
" eor %w1, %w0, %w0, ror #16\n"
|
|
|
|
" cbz %w1, 3f\n"
|
|
|
|
/*
|
|
|
|
* Didn't get lock and spin on the owner.
|
|
|
|
* Should send a local event to avoid missing an
|
|
|
|
* unlock before the exclusive load.
|
|
|
|
*/
|
|
|
|
" sevl\n"
|
|
|
|
"2: wfe\n"
|
|
|
|
" ldaxrh %w2, %4\n"
|
|
|
|
" eor %w1, %w2, %w0, lsr #16\n"
|
|
|
|
" cbnz %w1, 2b\n"
|
|
|
|
/* got the lock. */
|
|
|
|
"3:"
|
|
|
|
: "=&r" (lock_val), "=&r" (new_lockval), "=&r" (tmp), "+Q" (*lock)
|
|
|
|
: "Q" (lock->tickets.owner), "I" (1 << 16)
|
|
|
|
: "memory");
|
2020-01-10 10:38:21 +08:00
|
|
|
__DMB();
|
|
|
|
}
|
|
|
|
|
|
|
|
void rt_hw_spin_unlock(rt_hw_spinlock_t *lock)
|
|
|
|
{
|
|
|
|
__DMB();
|
2022-01-07 13:49:06 +08:00
|
|
|
__asm__ volatile (
|
|
|
|
"stlrh %w1, %0\n"
|
|
|
|
: "=Q" (lock->tickets.owner)
|
|
|
|
: "r" (lock->tickets.owner + 1)
|
|
|
|
: "memory");
|
2020-01-10 10:38:21 +08:00
|
|
|
}
|
|
|
|
#endif /*RT_USING_SMP*/
|
|
|
|
|
|
|
|
/**
|
|
|
|
* @addtogroup ARM CPU
|
|
|
|
*/
|
|
|
|
/*@{*/
|
|
|
|
|
|
|
|
/** shutdown CPU */
|
2022-12-12 02:12:03 +08:00
|
|
|
rt_weak void rt_hw_cpu_shutdown()
|
2020-01-10 10:38:21 +08:00
|
|
|
{
|
2022-04-20 10:56:11 +08:00
|
|
|
register rt_int32_t level;
|
2020-01-10 10:38:21 +08:00
|
|
|
rt_kprintf("shutdown...\n");
|
|
|
|
|
|
|
|
level = rt_hw_interrupt_disable();
|
|
|
|
while (level)
|
|
|
|
{
|
|
|
|
RT_ASSERT(0);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
/*@}*/
|