AArch64: support hardware atomic

Support aarch64 rt_hw_atomic_* api.
Add atomic implemente by rt_atomic api:
    rt_atomic_dec_and_test
    rt_atomic_fetch_add_unless
    rt_atomic_add_unless
    rt_atomic_inc_not_zero

Signed-off-by: GuEe-GUI <GuEe-GUI@github.com>
This commit is contained in:
wusongjie 2023-07-05 13:38:29 +08:00
parent 179157f4e1
commit 3b7e46de7e
2 changed files with 139 additions and 0 deletions

View File

@ -11,6 +11,8 @@
#ifndef __RT_ATOMIC_H__ #ifndef __RT_ATOMIC_H__
#define __RT_ATOMIC_H__ #define __RT_ATOMIC_H__
#include <rthw.h>
#if !defined(__cplusplus) #if !defined(__cplusplus)
rt_atomic_t rt_hw_atomic_load(volatile rt_atomic_t *ptr); rt_atomic_t rt_hw_atomic_load(volatile rt_atomic_t *ptr);
@ -206,6 +208,35 @@ rt_inline rt_atomic_t rt_soft_atomic_compare_exchange_strong(volatile rt_atomic_
} }
#endif /* RT_USING_STDC_ATOMIC */ #endif /* RT_USING_STDC_ATOMIC */
rt_inline rt_bool_t rt_atomic_dec_and_test(volatile rt_atomic_t *ptr)
{
return rt_atomic_sub(ptr, 1) == 0;
}
rt_inline rt_atomic_t rt_atomic_fetch_add_unless(volatile rt_atomic_t *ptr, rt_atomic_t a, rt_atomic_t u)
{
rt_atomic_t c = rt_atomic_load(ptr);
do {
if (c == u)
{
break;
}
} while (!rt_atomic_compare_exchange_strong(ptr, &c, c + a));
return c;
}
rt_inline rt_bool_t rt_atomic_add_unless(volatile rt_atomic_t *ptr, rt_atomic_t a, rt_atomic_t u)
{
return rt_atomic_fetch_add_unless(ptr, a, u) != u;
}
rt_inline rt_bool_t rt_atomic_inc_not_zero(volatile rt_atomic_t *ptr)
{
return rt_atomic_add_unless(ptr, 1, 0);
}
#endif /* __cplusplus */ #endif /* __cplusplus */
#endif /* __RT_ATOMIC_H__ */ #endif /* __RT_ATOMIC_H__ */

View File

@ -0,0 +1,108 @@
/*
* Copyright (c) 2006-2023, RT-Thread Development Team
*
* SPDX-License-Identifier: Apache-2.0
*
* Change Logs:
* Date Author Notes
* 2023-05-18 GuEe-GUI first version
*/
#include <rtatomic.h>
rt_atomic_t rt_hw_atomic_load(volatile rt_atomic_t *ptr)
{
rt_atomic_t ret;
__asm__ volatile (
" ldr %w0, %1\n"
" dmb ish"
: "=r" (ret)
: "Q" (*ptr)
: "memory");
return ret;
}
void rt_hw_atomic_store(volatile rt_atomic_t *ptr, rt_atomic_t val)
{
__asm__ volatile (
" stlr %w1, %0\n"
" dmb ish"
: "=Q" (*ptr)
: "r" (val)
: "memory");
}
#define AARCH64_ATOMIC_OP_RETURN(op, ins, constraint) \
rt_atomic_t rt_hw_atomic_##op(volatile rt_atomic_t *ptr, rt_atomic_t in_val) \
{ \
rt_atomic_t tmp, val, result; \
\
__asm__ volatile ( \
" prfm pstl1strm, %3\n" \
"1: ldxr %w0, %3\n" \
" "#ins " %w1, %w0, %w4\n" \
" stlxr %w2, %w1, %3\n" \
" cbnz %w2, 1b\n" \
" dmb ish" \
: "=&r" (result), "=&r" (val), "=&r" (tmp), "+Q" (*ptr) \
: __RT_STRINGIFY(constraint) "r" (in_val) \
: "memory"); \
\
return result; \
}
AARCH64_ATOMIC_OP_RETURN(add, add, I)
AARCH64_ATOMIC_OP_RETURN(sub, sub, J)
AARCH64_ATOMIC_OP_RETURN(and, and, K)
AARCH64_ATOMIC_OP_RETURN(or, orr, K)
AARCH64_ATOMIC_OP_RETURN(xor, eor, K)
rt_atomic_t rt_hw_atomic_exchange(volatile rt_atomic_t *ptr, rt_atomic_t val)
{
rt_atomic_t ret, tmp;
__asm__ volatile (
" prfm pstl1strm, %2\n"
"1: ldxr %w0, %2\n"
" stlxr %w1, %w3, %2\n"
" cbnz %w1, 1b\n"
" dmb ish"
: "=&r" (ret), "=&r" (tmp), "+Q" (*ptr)
: "r" (val)
: "memory");
return ret;
}
void rt_hw_atomic_flag_clear(volatile rt_atomic_t *ptr)
{
rt_hw_atomic_and(ptr, 0);
}
rt_atomic_t rt_hw_atomic_flag_test_and_set(volatile rt_atomic_t *ptr)
{
return rt_hw_atomic_or(ptr, 1);
}
rt_atomic_t rt_hw_atomic_compare_exchange_strong(volatile rt_atomic_t *ptr, rt_atomic_t *old, rt_atomic_t new)
{
rt_atomic_t tmp, oldval;
__asm__ volatile (
" prfm pstl1strm, %2\n"
"1: ldxr %w0, %2\n"
" eor %w1, %w0, %w3\n"
" cbnz %w1, 2f\n"
" stlxr %w1, %w4, %2\n"
" cbnz %w1, 1b\n"
" dmb ish\n"
"2:"
: "=&r" (oldval), "=&r" (tmp), "+Q" (*ptr)
: "Kr" (*old), "r" (new)
: "memory");
return oldval;
}