mirror of
https://git.proxmox.com/git/mirror_ubuntu-kernels.git
synced 2026-01-25 20:15:01 +00:00
Currently each ordering variant has several potential definitions,
with a mixture of preprocessor and C definitions, including several
copies of its C prototype, e.g.
| #if defined(arch_atomic_fetch_andnot_acquire)
| #define raw_atomic_fetch_andnot_acquire arch_atomic_fetch_andnot_acquire
| #elif defined(arch_atomic_fetch_andnot_relaxed)
| static __always_inline int
| raw_atomic_fetch_andnot_acquire(int i, atomic_t *v)
| {
| int ret = arch_atomic_fetch_andnot_relaxed(i, v);
| __atomic_acquire_fence();
| return ret;
| }
| #elif defined(arch_atomic_fetch_andnot)
| #define raw_atomic_fetch_andnot_acquire arch_atomic_fetch_andnot
| #else
| static __always_inline int
| raw_atomic_fetch_andnot_acquire(int i, atomic_t *v)
| {
| return raw_atomic_fetch_and_acquire(~i, v);
| }
| #endif
Make this a bit simpler by defining the C prototype once, and writing
the various potential definitions as plain C code guarded by ifdeffery.
For example, the above becomes:
| static __always_inline int
| raw_atomic_fetch_andnot_acquire(int i, atomic_t *v)
| {
| #if defined(arch_atomic_fetch_andnot_acquire)
| return arch_atomic_fetch_andnot_acquire(i, v);
| #elif defined(arch_atomic_fetch_andnot_relaxed)
| int ret = arch_atomic_fetch_andnot_relaxed(i, v);
| __atomic_acquire_fence();
| return ret;
| #elif defined(arch_atomic_fetch_andnot)
| return arch_atomic_fetch_andnot(i, v);
| #else
| return raw_atomic_fetch_and_acquire(~i, v);
| #endif
| }
Which is far easier to read. As we now always have a single copy of the
C prototype wrapping all the potential definitions, we now have an
obvious single location for kerneldoc comments.
At the same time, the fallbacks for raw_atomic*_xhcg() are made to use
'new' rather than 'i' as the name of the new value. This is what the
existing fallback template used, and is more consistent with the
raw_atomic{_try,}cmpxchg() fallbacks.
There should be no functional change as a result of this patch.
Signed-off-by: Mark Rutland <mark.rutland@arm.com>
Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Reviewed-by: Kees Cook <keescook@chromium.org>
Link: https://lore.kernel.org/r/20230605070124.3741859-24-mark.rutland@arm.com
2815 lines
71 KiB
C
2815 lines
71 KiB
C
// SPDX-License-Identifier: GPL-2.0
|
|
|
|
// Generated by scripts/atomic/gen-atomic-fallback.sh
|
|
// DO NOT MODIFY THIS FILE DIRECTLY
|
|
|
|
#ifndef _LINUX_ATOMIC_FALLBACK_H
|
|
#define _LINUX_ATOMIC_FALLBACK_H
|
|
|
|
#include <linux/compiler.h>
|
|
|
|
#if defined(arch_xchg)
|
|
#define raw_xchg arch_xchg
|
|
#elif defined(arch_xchg_relaxed)
|
|
#define raw_xchg(...) \
|
|
__atomic_op_fence(arch_xchg, __VA_ARGS__)
|
|
#else
|
|
extern void raw_xchg_not_implemented(void);
|
|
#define raw_xchg(...) raw_xchg_not_implemented()
|
|
#endif
|
|
|
|
#if defined(arch_xchg_acquire)
|
|
#define raw_xchg_acquire arch_xchg_acquire
|
|
#elif defined(arch_xchg_relaxed)
|
|
#define raw_xchg_acquire(...) \
|
|
__atomic_op_acquire(arch_xchg, __VA_ARGS__)
|
|
#elif defined(arch_xchg)
|
|
#define raw_xchg_acquire arch_xchg
|
|
#else
|
|
extern void raw_xchg_acquire_not_implemented(void);
|
|
#define raw_xchg_acquire(...) raw_xchg_acquire_not_implemented()
|
|
#endif
|
|
|
|
#if defined(arch_xchg_release)
|
|
#define raw_xchg_release arch_xchg_release
|
|
#elif defined(arch_xchg_relaxed)
|
|
#define raw_xchg_release(...) \
|
|
__atomic_op_release(arch_xchg, __VA_ARGS__)
|
|
#elif defined(arch_xchg)
|
|
#define raw_xchg_release arch_xchg
|
|
#else
|
|
extern void raw_xchg_release_not_implemented(void);
|
|
#define raw_xchg_release(...) raw_xchg_release_not_implemented()
|
|
#endif
|
|
|
|
#if defined(arch_xchg_relaxed)
|
|
#define raw_xchg_relaxed arch_xchg_relaxed
|
|
#elif defined(arch_xchg)
|
|
#define raw_xchg_relaxed arch_xchg
|
|
#else
|
|
extern void raw_xchg_relaxed_not_implemented(void);
|
|
#define raw_xchg_relaxed(...) raw_xchg_relaxed_not_implemented()
|
|
#endif
|
|
|
|
#if defined(arch_cmpxchg)
|
|
#define raw_cmpxchg arch_cmpxchg
|
|
#elif defined(arch_cmpxchg_relaxed)
|
|
#define raw_cmpxchg(...) \
|
|
__atomic_op_fence(arch_cmpxchg, __VA_ARGS__)
|
|
#else
|
|
extern void raw_cmpxchg_not_implemented(void);
|
|
#define raw_cmpxchg(...) raw_cmpxchg_not_implemented()
|
|
#endif
|
|
|
|
#if defined(arch_cmpxchg_acquire)
|
|
#define raw_cmpxchg_acquire arch_cmpxchg_acquire
|
|
#elif defined(arch_cmpxchg_relaxed)
|
|
#define raw_cmpxchg_acquire(...) \
|
|
__atomic_op_acquire(arch_cmpxchg, __VA_ARGS__)
|
|
#elif defined(arch_cmpxchg)
|
|
#define raw_cmpxchg_acquire arch_cmpxchg
|
|
#else
|
|
extern void raw_cmpxchg_acquire_not_implemented(void);
|
|
#define raw_cmpxchg_acquire(...) raw_cmpxchg_acquire_not_implemented()
|
|
#endif
|
|
|
|
#if defined(arch_cmpxchg_release)
|
|
#define raw_cmpxchg_release arch_cmpxchg_release
|
|
#elif defined(arch_cmpxchg_relaxed)
|
|
#define raw_cmpxchg_release(...) \
|
|
__atomic_op_release(arch_cmpxchg, __VA_ARGS__)
|
|
#elif defined(arch_cmpxchg)
|
|
#define raw_cmpxchg_release arch_cmpxchg
|
|
#else
|
|
extern void raw_cmpxchg_release_not_implemented(void);
|
|
#define raw_cmpxchg_release(...) raw_cmpxchg_release_not_implemented()
|
|
#endif
|
|
|
|
#if defined(arch_cmpxchg_relaxed)
|
|
#define raw_cmpxchg_relaxed arch_cmpxchg_relaxed
|
|
#elif defined(arch_cmpxchg)
|
|
#define raw_cmpxchg_relaxed arch_cmpxchg
|
|
#else
|
|
extern void raw_cmpxchg_relaxed_not_implemented(void);
|
|
#define raw_cmpxchg_relaxed(...) raw_cmpxchg_relaxed_not_implemented()
|
|
#endif
|
|
|
|
#if defined(arch_cmpxchg64)
|
|
#define raw_cmpxchg64 arch_cmpxchg64
|
|
#elif defined(arch_cmpxchg64_relaxed)
|
|
#define raw_cmpxchg64(...) \
|
|
__atomic_op_fence(arch_cmpxchg64, __VA_ARGS__)
|
|
#else
|
|
extern void raw_cmpxchg64_not_implemented(void);
|
|
#define raw_cmpxchg64(...) raw_cmpxchg64_not_implemented()
|
|
#endif
|
|
|
|
#if defined(arch_cmpxchg64_acquire)
|
|
#define raw_cmpxchg64_acquire arch_cmpxchg64_acquire
|
|
#elif defined(arch_cmpxchg64_relaxed)
|
|
#define raw_cmpxchg64_acquire(...) \
|
|
__atomic_op_acquire(arch_cmpxchg64, __VA_ARGS__)
|
|
#elif defined(arch_cmpxchg64)
|
|
#define raw_cmpxchg64_acquire arch_cmpxchg64
|
|
#else
|
|
extern void raw_cmpxchg64_acquire_not_implemented(void);
|
|
#define raw_cmpxchg64_acquire(...) raw_cmpxchg64_acquire_not_implemented()
|
|
#endif
|
|
|
|
#if defined(arch_cmpxchg64_release)
|
|
#define raw_cmpxchg64_release arch_cmpxchg64_release
|
|
#elif defined(arch_cmpxchg64_relaxed)
|
|
#define raw_cmpxchg64_release(...) \
|
|
__atomic_op_release(arch_cmpxchg64, __VA_ARGS__)
|
|
#elif defined(arch_cmpxchg64)
|
|
#define raw_cmpxchg64_release arch_cmpxchg64
|
|
#else
|
|
extern void raw_cmpxchg64_release_not_implemented(void);
|
|
#define raw_cmpxchg64_release(...) raw_cmpxchg64_release_not_implemented()
|
|
#endif
|
|
|
|
#if defined(arch_cmpxchg64_relaxed)
|
|
#define raw_cmpxchg64_relaxed arch_cmpxchg64_relaxed
|
|
#elif defined(arch_cmpxchg64)
|
|
#define raw_cmpxchg64_relaxed arch_cmpxchg64
|
|
#else
|
|
extern void raw_cmpxchg64_relaxed_not_implemented(void);
|
|
#define raw_cmpxchg64_relaxed(...) raw_cmpxchg64_relaxed_not_implemented()
|
|
#endif
|
|
|
|
#if defined(arch_cmpxchg128)
|
|
#define raw_cmpxchg128 arch_cmpxchg128
|
|
#elif defined(arch_cmpxchg128_relaxed)
|
|
#define raw_cmpxchg128(...) \
|
|
__atomic_op_fence(arch_cmpxchg128, __VA_ARGS__)
|
|
#else
|
|
extern void raw_cmpxchg128_not_implemented(void);
|
|
#define raw_cmpxchg128(...) raw_cmpxchg128_not_implemented()
|
|
#endif
|
|
|
|
#if defined(arch_cmpxchg128_acquire)
|
|
#define raw_cmpxchg128_acquire arch_cmpxchg128_acquire
|
|
#elif defined(arch_cmpxchg128_relaxed)
|
|
#define raw_cmpxchg128_acquire(...) \
|
|
__atomic_op_acquire(arch_cmpxchg128, __VA_ARGS__)
|
|
#elif defined(arch_cmpxchg128)
|
|
#define raw_cmpxchg128_acquire arch_cmpxchg128
|
|
#else
|
|
extern void raw_cmpxchg128_acquire_not_implemented(void);
|
|
#define raw_cmpxchg128_acquire(...) raw_cmpxchg128_acquire_not_implemented()
|
|
#endif
|
|
|
|
#if defined(arch_cmpxchg128_release)
|
|
#define raw_cmpxchg128_release arch_cmpxchg128_release
|
|
#elif defined(arch_cmpxchg128_relaxed)
|
|
#define raw_cmpxchg128_release(...) \
|
|
__atomic_op_release(arch_cmpxchg128, __VA_ARGS__)
|
|
#elif defined(arch_cmpxchg128)
|
|
#define raw_cmpxchg128_release arch_cmpxchg128
|
|
#else
|
|
extern void raw_cmpxchg128_release_not_implemented(void);
|
|
#define raw_cmpxchg128_release(...) raw_cmpxchg128_release_not_implemented()
|
|
#endif
|
|
|
|
#if defined(arch_cmpxchg128_relaxed)
|
|
#define raw_cmpxchg128_relaxed arch_cmpxchg128_relaxed
|
|
#elif defined(arch_cmpxchg128)
|
|
#define raw_cmpxchg128_relaxed arch_cmpxchg128
|
|
#else
|
|
extern void raw_cmpxchg128_relaxed_not_implemented(void);
|
|
#define raw_cmpxchg128_relaxed(...) raw_cmpxchg128_relaxed_not_implemented()
|
|
#endif
|
|
|
|
#if defined(arch_try_cmpxchg)
|
|
#define raw_try_cmpxchg arch_try_cmpxchg
|
|
#elif defined(arch_try_cmpxchg_relaxed)
|
|
#define raw_try_cmpxchg(...) \
|
|
__atomic_op_fence(arch_try_cmpxchg, __VA_ARGS__)
|
|
#else
|
|
#define raw_try_cmpxchg(_ptr, _oldp, _new) \
|
|
({ \
|
|
typeof(*(_ptr)) *___op = (_oldp), ___o = *___op, ___r; \
|
|
___r = raw_cmpxchg((_ptr), ___o, (_new)); \
|
|
if (unlikely(___r != ___o)) \
|
|
*___op = ___r; \
|
|
likely(___r == ___o); \
|
|
})
|
|
#endif
|
|
|
|
#if defined(arch_try_cmpxchg_acquire)
|
|
#define raw_try_cmpxchg_acquire arch_try_cmpxchg_acquire
|
|
#elif defined(arch_try_cmpxchg_relaxed)
|
|
#define raw_try_cmpxchg_acquire(...) \
|
|
__atomic_op_acquire(arch_try_cmpxchg, __VA_ARGS__)
|
|
#elif defined(arch_try_cmpxchg)
|
|
#define raw_try_cmpxchg_acquire arch_try_cmpxchg
|
|
#else
|
|
#define raw_try_cmpxchg_acquire(_ptr, _oldp, _new) \
|
|
({ \
|
|
typeof(*(_ptr)) *___op = (_oldp), ___o = *___op, ___r; \
|
|
___r = raw_cmpxchg_acquire((_ptr), ___o, (_new)); \
|
|
if (unlikely(___r != ___o)) \
|
|
*___op = ___r; \
|
|
likely(___r == ___o); \
|
|
})
|
|
#endif
|
|
|
|
#if defined(arch_try_cmpxchg_release)
|
|
#define raw_try_cmpxchg_release arch_try_cmpxchg_release
|
|
#elif defined(arch_try_cmpxchg_relaxed)
|
|
#define raw_try_cmpxchg_release(...) \
|
|
__atomic_op_release(arch_try_cmpxchg, __VA_ARGS__)
|
|
#elif defined(arch_try_cmpxchg)
|
|
#define raw_try_cmpxchg_release arch_try_cmpxchg
|
|
#else
|
|
#define raw_try_cmpxchg_release(_ptr, _oldp, _new) \
|
|
({ \
|
|
typeof(*(_ptr)) *___op = (_oldp), ___o = *___op, ___r; \
|
|
___r = raw_cmpxchg_release((_ptr), ___o, (_new)); \
|
|
if (unlikely(___r != ___o)) \
|
|
*___op = ___r; \
|
|
likely(___r == ___o); \
|
|
})
|
|
#endif
|
|
|
|
#if defined(arch_try_cmpxchg_relaxed)
|
|
#define raw_try_cmpxchg_relaxed arch_try_cmpxchg_relaxed
|
|
#elif defined(arch_try_cmpxchg)
|
|
#define raw_try_cmpxchg_relaxed arch_try_cmpxchg
|
|
#else
|
|
#define raw_try_cmpxchg_relaxed(_ptr, _oldp, _new) \
|
|
({ \
|
|
typeof(*(_ptr)) *___op = (_oldp), ___o = *___op, ___r; \
|
|
___r = raw_cmpxchg_relaxed((_ptr), ___o, (_new)); \
|
|
if (unlikely(___r != ___o)) \
|
|
*___op = ___r; \
|
|
likely(___r == ___o); \
|
|
})
|
|
#endif
|
|
|
|
#if defined(arch_try_cmpxchg64)
|
|
#define raw_try_cmpxchg64 arch_try_cmpxchg64
|
|
#elif defined(arch_try_cmpxchg64_relaxed)
|
|
#define raw_try_cmpxchg64(...) \
|
|
__atomic_op_fence(arch_try_cmpxchg64, __VA_ARGS__)
|
|
#else
|
|
#define raw_try_cmpxchg64(_ptr, _oldp, _new) \
|
|
({ \
|
|
typeof(*(_ptr)) *___op = (_oldp), ___o = *___op, ___r; \
|
|
___r = raw_cmpxchg64((_ptr), ___o, (_new)); \
|
|
if (unlikely(___r != ___o)) \
|
|
*___op = ___r; \
|
|
likely(___r == ___o); \
|
|
})
|
|
#endif
|
|
|
|
#if defined(arch_try_cmpxchg64_acquire)
|
|
#define raw_try_cmpxchg64_acquire arch_try_cmpxchg64_acquire
|
|
#elif defined(arch_try_cmpxchg64_relaxed)
|
|
#define raw_try_cmpxchg64_acquire(...) \
|
|
__atomic_op_acquire(arch_try_cmpxchg64, __VA_ARGS__)
|
|
#elif defined(arch_try_cmpxchg64)
|
|
#define raw_try_cmpxchg64_acquire arch_try_cmpxchg64
|
|
#else
|
|
#define raw_try_cmpxchg64_acquire(_ptr, _oldp, _new) \
|
|
({ \
|
|
typeof(*(_ptr)) *___op = (_oldp), ___o = *___op, ___r; \
|
|
___r = raw_cmpxchg64_acquire((_ptr), ___o, (_new)); \
|
|
if (unlikely(___r != ___o)) \
|
|
*___op = ___r; \
|
|
likely(___r == ___o); \
|
|
})
|
|
#endif
|
|
|
|
#if defined(arch_try_cmpxchg64_release)
|
|
#define raw_try_cmpxchg64_release arch_try_cmpxchg64_release
|
|
#elif defined(arch_try_cmpxchg64_relaxed)
|
|
#define raw_try_cmpxchg64_release(...) \
|
|
__atomic_op_release(arch_try_cmpxchg64, __VA_ARGS__)
|
|
#elif defined(arch_try_cmpxchg64)
|
|
#define raw_try_cmpxchg64_release arch_try_cmpxchg64
|
|
#else
|
|
#define raw_try_cmpxchg64_release(_ptr, _oldp, _new) \
|
|
({ \
|
|
typeof(*(_ptr)) *___op = (_oldp), ___o = *___op, ___r; \
|
|
___r = raw_cmpxchg64_release((_ptr), ___o, (_new)); \
|
|
if (unlikely(___r != ___o)) \
|
|
*___op = ___r; \
|
|
likely(___r == ___o); \
|
|
})
|
|
#endif
|
|
|
|
#if defined(arch_try_cmpxchg64_relaxed)
|
|
#define raw_try_cmpxchg64_relaxed arch_try_cmpxchg64_relaxed
|
|
#elif defined(arch_try_cmpxchg64)
|
|
#define raw_try_cmpxchg64_relaxed arch_try_cmpxchg64
|
|
#else
|
|
#define raw_try_cmpxchg64_relaxed(_ptr, _oldp, _new) \
|
|
({ \
|
|
typeof(*(_ptr)) *___op = (_oldp), ___o = *___op, ___r; \
|
|
___r = raw_cmpxchg64_relaxed((_ptr), ___o, (_new)); \
|
|
if (unlikely(___r != ___o)) \
|
|
*___op = ___r; \
|
|
likely(___r == ___o); \
|
|
})
|
|
#endif
|
|
|
|
#if defined(arch_try_cmpxchg128)
|
|
#define raw_try_cmpxchg128 arch_try_cmpxchg128
|
|
#elif defined(arch_try_cmpxchg128_relaxed)
|
|
#define raw_try_cmpxchg128(...) \
|
|
__atomic_op_fence(arch_try_cmpxchg128, __VA_ARGS__)
|
|
#else
|
|
#define raw_try_cmpxchg128(_ptr, _oldp, _new) \
|
|
({ \
|
|
typeof(*(_ptr)) *___op = (_oldp), ___o = *___op, ___r; \
|
|
___r = raw_cmpxchg128((_ptr), ___o, (_new)); \
|
|
if (unlikely(___r != ___o)) \
|
|
*___op = ___r; \
|
|
likely(___r == ___o); \
|
|
})
|
|
#endif
|
|
|
|
#if defined(arch_try_cmpxchg128_acquire)
|
|
#define raw_try_cmpxchg128_acquire arch_try_cmpxchg128_acquire
|
|
#elif defined(arch_try_cmpxchg128_relaxed)
|
|
#define raw_try_cmpxchg128_acquire(...) \
|
|
__atomic_op_acquire(arch_try_cmpxchg128, __VA_ARGS__)
|
|
#elif defined(arch_try_cmpxchg128)
|
|
#define raw_try_cmpxchg128_acquire arch_try_cmpxchg128
|
|
#else
|
|
#define raw_try_cmpxchg128_acquire(_ptr, _oldp, _new) \
|
|
({ \
|
|
typeof(*(_ptr)) *___op = (_oldp), ___o = *___op, ___r; \
|
|
___r = raw_cmpxchg128_acquire((_ptr), ___o, (_new)); \
|
|
if (unlikely(___r != ___o)) \
|
|
*___op = ___r; \
|
|
likely(___r == ___o); \
|
|
})
|
|
#endif
|
|
|
|
#if defined(arch_try_cmpxchg128_release)
|
|
#define raw_try_cmpxchg128_release arch_try_cmpxchg128_release
|
|
#elif defined(arch_try_cmpxchg128_relaxed)
|
|
#define raw_try_cmpxchg128_release(...) \
|
|
__atomic_op_release(arch_try_cmpxchg128, __VA_ARGS__)
|
|
#elif defined(arch_try_cmpxchg128)
|
|
#define raw_try_cmpxchg128_release arch_try_cmpxchg128
|
|
#else
|
|
#define raw_try_cmpxchg128_release(_ptr, _oldp, _new) \
|
|
({ \
|
|
typeof(*(_ptr)) *___op = (_oldp), ___o = *___op, ___r; \
|
|
___r = raw_cmpxchg128_release((_ptr), ___o, (_new)); \
|
|
if (unlikely(___r != ___o)) \
|
|
*___op = ___r; \
|
|
likely(___r == ___o); \
|
|
})
|
|
#endif
|
|
|
|
#if defined(arch_try_cmpxchg128_relaxed)
|
|
#define raw_try_cmpxchg128_relaxed arch_try_cmpxchg128_relaxed
|
|
#elif defined(arch_try_cmpxchg128)
|
|
#define raw_try_cmpxchg128_relaxed arch_try_cmpxchg128
|
|
#else
|
|
#define raw_try_cmpxchg128_relaxed(_ptr, _oldp, _new) \
|
|
({ \
|
|
typeof(*(_ptr)) *___op = (_oldp), ___o = *___op, ___r; \
|
|
___r = raw_cmpxchg128_relaxed((_ptr), ___o, (_new)); \
|
|
if (unlikely(___r != ___o)) \
|
|
*___op = ___r; \
|
|
likely(___r == ___o); \
|
|
})
|
|
#endif
|
|
|
|
#define raw_cmpxchg_local arch_cmpxchg_local
|
|
|
|
#ifdef arch_try_cmpxchg_local
|
|
#define raw_try_cmpxchg_local arch_try_cmpxchg_local
|
|
#else
|
|
#define raw_try_cmpxchg_local(_ptr, _oldp, _new) \
|
|
({ \
|
|
typeof(*(_ptr)) *___op = (_oldp), ___o = *___op, ___r; \
|
|
___r = raw_cmpxchg_local((_ptr), ___o, (_new)); \
|
|
if (unlikely(___r != ___o)) \
|
|
*___op = ___r; \
|
|
likely(___r == ___o); \
|
|
})
|
|
#endif
|
|
|
|
#define raw_cmpxchg64_local arch_cmpxchg64_local
|
|
|
|
#ifdef arch_try_cmpxchg64_local
|
|
#define raw_try_cmpxchg64_local arch_try_cmpxchg64_local
|
|
#else
|
|
#define raw_try_cmpxchg64_local(_ptr, _oldp, _new) \
|
|
({ \
|
|
typeof(*(_ptr)) *___op = (_oldp), ___o = *___op, ___r; \
|
|
___r = raw_cmpxchg64_local((_ptr), ___o, (_new)); \
|
|
if (unlikely(___r != ___o)) \
|
|
*___op = ___r; \
|
|
likely(___r == ___o); \
|
|
})
|
|
#endif
|
|
|
|
#define raw_cmpxchg128_local arch_cmpxchg128_local
|
|
|
|
#ifdef arch_try_cmpxchg128_local
|
|
#define raw_try_cmpxchg128_local arch_try_cmpxchg128_local
|
|
#else
|
|
#define raw_try_cmpxchg128_local(_ptr, _oldp, _new) \
|
|
({ \
|
|
typeof(*(_ptr)) *___op = (_oldp), ___o = *___op, ___r; \
|
|
___r = raw_cmpxchg128_local((_ptr), ___o, (_new)); \
|
|
if (unlikely(___r != ___o)) \
|
|
*___op = ___r; \
|
|
likely(___r == ___o); \
|
|
})
|
|
#endif
|
|
|
|
#define raw_sync_cmpxchg arch_sync_cmpxchg
|
|
|
|
static __always_inline int
|
|
raw_atomic_read(const atomic_t *v)
|
|
{
|
|
return arch_atomic_read(v);
|
|
}
|
|
|
|
static __always_inline int
|
|
raw_atomic_read_acquire(const atomic_t *v)
|
|
{
|
|
#if defined(arch_atomic_read_acquire)
|
|
return arch_atomic_read_acquire(v);
|
|
#elif defined(arch_atomic_read)
|
|
return arch_atomic_read(v);
|
|
#else
|
|
int ret;
|
|
|
|
if (__native_word(atomic_t)) {
|
|
ret = smp_load_acquire(&(v)->counter);
|
|
} else {
|
|
ret = raw_atomic_read(v);
|
|
__atomic_acquire_fence();
|
|
}
|
|
|
|
return ret;
|
|
#endif
|
|
}
|
|
|
|
static __always_inline void
|
|
raw_atomic_set(atomic_t *v, int i)
|
|
{
|
|
arch_atomic_set(v, i);
|
|
}
|
|
|
|
static __always_inline void
|
|
raw_atomic_set_release(atomic_t *v, int i)
|
|
{
|
|
#if defined(arch_atomic_set_release)
|
|
arch_atomic_set_release(v, i);
|
|
#elif defined(arch_atomic_set)
|
|
arch_atomic_set(v, i);
|
|
#else
|
|
if (__native_word(atomic_t)) {
|
|
smp_store_release(&(v)->counter, i);
|
|
} else {
|
|
__atomic_release_fence();
|
|
raw_atomic_set(v, i);
|
|
}
|
|
#endif
|
|
}
|
|
|
|
static __always_inline void
|
|
raw_atomic_add(int i, atomic_t *v)
|
|
{
|
|
arch_atomic_add(i, v);
|
|
}
|
|
|
|
static __always_inline int
|
|
raw_atomic_add_return(int i, atomic_t *v)
|
|
{
|
|
#if defined(arch_atomic_add_return)
|
|
return arch_atomic_add_return(i, v);
|
|
#elif defined(arch_atomic_add_return_relaxed)
|
|
int ret;
|
|
__atomic_pre_full_fence();
|
|
ret = arch_atomic_add_return_relaxed(i, v);
|
|
__atomic_post_full_fence();
|
|
return ret;
|
|
#else
|
|
#error "Unable to define raw_atomic_add_return"
|
|
#endif
|
|
}
|
|
|
|
static __always_inline int
|
|
raw_atomic_add_return_acquire(int i, atomic_t *v)
|
|
{
|
|
#if defined(arch_atomic_add_return_acquire)
|
|
return arch_atomic_add_return_acquire(i, v);
|
|
#elif defined(arch_atomic_add_return_relaxed)
|
|
int ret = arch_atomic_add_return_relaxed(i, v);
|
|
__atomic_acquire_fence();
|
|
return ret;
|
|
#elif defined(arch_atomic_add_return)
|
|
return arch_atomic_add_return(i, v);
|
|
#else
|
|
#error "Unable to define raw_atomic_add_return_acquire"
|
|
#endif
|
|
}
|
|
|
|
static __always_inline int
|
|
raw_atomic_add_return_release(int i, atomic_t *v)
|
|
{
|
|
#if defined(arch_atomic_add_return_release)
|
|
return arch_atomic_add_return_release(i, v);
|
|
#elif defined(arch_atomic_add_return_relaxed)
|
|
__atomic_release_fence();
|
|
return arch_atomic_add_return_relaxed(i, v);
|
|
#elif defined(arch_atomic_add_return)
|
|
return arch_atomic_add_return(i, v);
|
|
#else
|
|
#error "Unable to define raw_atomic_add_return_release"
|
|
#endif
|
|
}
|
|
|
|
static __always_inline int
|
|
raw_atomic_add_return_relaxed(int i, atomic_t *v)
|
|
{
|
|
#if defined(arch_atomic_add_return_relaxed)
|
|
return arch_atomic_add_return_relaxed(i, v);
|
|
#elif defined(arch_atomic_add_return)
|
|
return arch_atomic_add_return(i, v);
|
|
#else
|
|
#error "Unable to define raw_atomic_add_return_relaxed"
|
|
#endif
|
|
}
|
|
|
|
static __always_inline int
|
|
raw_atomic_fetch_add(int i, atomic_t *v)
|
|
{
|
|
#if defined(arch_atomic_fetch_add)
|
|
return arch_atomic_fetch_add(i, v);
|
|
#elif defined(arch_atomic_fetch_add_relaxed)
|
|
int ret;
|
|
__atomic_pre_full_fence();
|
|
ret = arch_atomic_fetch_add_relaxed(i, v);
|
|
__atomic_post_full_fence();
|
|
return ret;
|
|
#else
|
|
#error "Unable to define raw_atomic_fetch_add"
|
|
#endif
|
|
}
|
|
|
|
static __always_inline int
|
|
raw_atomic_fetch_add_acquire(int i, atomic_t *v)
|
|
{
|
|
#if defined(arch_atomic_fetch_add_acquire)
|
|
return arch_atomic_fetch_add_acquire(i, v);
|
|
#elif defined(arch_atomic_fetch_add_relaxed)
|
|
int ret = arch_atomic_fetch_add_relaxed(i, v);
|
|
__atomic_acquire_fence();
|
|
return ret;
|
|
#elif defined(arch_atomic_fetch_add)
|
|
return arch_atomic_fetch_add(i, v);
|
|
#else
|
|
#error "Unable to define raw_atomic_fetch_add_acquire"
|
|
#endif
|
|
}
|
|
|
|
static __always_inline int
|
|
raw_atomic_fetch_add_release(int i, atomic_t *v)
|
|
{
|
|
#if defined(arch_atomic_fetch_add_release)
|
|
return arch_atomic_fetch_add_release(i, v);
|
|
#elif defined(arch_atomic_fetch_add_relaxed)
|
|
__atomic_release_fence();
|
|
return arch_atomic_fetch_add_relaxed(i, v);
|
|
#elif defined(arch_atomic_fetch_add)
|
|
return arch_atomic_fetch_add(i, v);
|
|
#else
|
|
#error "Unable to define raw_atomic_fetch_add_release"
|
|
#endif
|
|
}
|
|
|
|
static __always_inline int
|
|
raw_atomic_fetch_add_relaxed(int i, atomic_t *v)
|
|
{
|
|
#if defined(arch_atomic_fetch_add_relaxed)
|
|
return arch_atomic_fetch_add_relaxed(i, v);
|
|
#elif defined(arch_atomic_fetch_add)
|
|
return arch_atomic_fetch_add(i, v);
|
|
#else
|
|
#error "Unable to define raw_atomic_fetch_add_relaxed"
|
|
#endif
|
|
}
|
|
|
|
static __always_inline void
|
|
raw_atomic_sub(int i, atomic_t *v)
|
|
{
|
|
arch_atomic_sub(i, v);
|
|
}
|
|
|
|
static __always_inline int
|
|
raw_atomic_sub_return(int i, atomic_t *v)
|
|
{
|
|
#if defined(arch_atomic_sub_return)
|
|
return arch_atomic_sub_return(i, v);
|
|
#elif defined(arch_atomic_sub_return_relaxed)
|
|
int ret;
|
|
__atomic_pre_full_fence();
|
|
ret = arch_atomic_sub_return_relaxed(i, v);
|
|
__atomic_post_full_fence();
|
|
return ret;
|
|
#else
|
|
#error "Unable to define raw_atomic_sub_return"
|
|
#endif
|
|
}
|
|
|
|
static __always_inline int
|
|
raw_atomic_sub_return_acquire(int i, atomic_t *v)
|
|
{
|
|
#if defined(arch_atomic_sub_return_acquire)
|
|
return arch_atomic_sub_return_acquire(i, v);
|
|
#elif defined(arch_atomic_sub_return_relaxed)
|
|
int ret = arch_atomic_sub_return_relaxed(i, v);
|
|
__atomic_acquire_fence();
|
|
return ret;
|
|
#elif defined(arch_atomic_sub_return)
|
|
return arch_atomic_sub_return(i, v);
|
|
#else
|
|
#error "Unable to define raw_atomic_sub_return_acquire"
|
|
#endif
|
|
}
|
|
|
|
static __always_inline int
|
|
raw_atomic_sub_return_release(int i, atomic_t *v)
|
|
{
|
|
#if defined(arch_atomic_sub_return_release)
|
|
return arch_atomic_sub_return_release(i, v);
|
|
#elif defined(arch_atomic_sub_return_relaxed)
|
|
__atomic_release_fence();
|
|
return arch_atomic_sub_return_relaxed(i, v);
|
|
#elif defined(arch_atomic_sub_return)
|
|
return arch_atomic_sub_return(i, v);
|
|
#else
|
|
#error "Unable to define raw_atomic_sub_return_release"
|
|
#endif
|
|
}
|
|
|
|
static __always_inline int
|
|
raw_atomic_sub_return_relaxed(int i, atomic_t *v)
|
|
{
|
|
#if defined(arch_atomic_sub_return_relaxed)
|
|
return arch_atomic_sub_return_relaxed(i, v);
|
|
#elif defined(arch_atomic_sub_return)
|
|
return arch_atomic_sub_return(i, v);
|
|
#else
|
|
#error "Unable to define raw_atomic_sub_return_relaxed"
|
|
#endif
|
|
}
|
|
|
|
static __always_inline int
|
|
raw_atomic_fetch_sub(int i, atomic_t *v)
|
|
{
|
|
#if defined(arch_atomic_fetch_sub)
|
|
return arch_atomic_fetch_sub(i, v);
|
|
#elif defined(arch_atomic_fetch_sub_relaxed)
|
|
int ret;
|
|
__atomic_pre_full_fence();
|
|
ret = arch_atomic_fetch_sub_relaxed(i, v);
|
|
__atomic_post_full_fence();
|
|
return ret;
|
|
#else
|
|
#error "Unable to define raw_atomic_fetch_sub"
|
|
#endif
|
|
}
|
|
|
|
static __always_inline int
|
|
raw_atomic_fetch_sub_acquire(int i, atomic_t *v)
|
|
{
|
|
#if defined(arch_atomic_fetch_sub_acquire)
|
|
return arch_atomic_fetch_sub_acquire(i, v);
|
|
#elif defined(arch_atomic_fetch_sub_relaxed)
|
|
int ret = arch_atomic_fetch_sub_relaxed(i, v);
|
|
__atomic_acquire_fence();
|
|
return ret;
|
|
#elif defined(arch_atomic_fetch_sub)
|
|
return arch_atomic_fetch_sub(i, v);
|
|
#else
|
|
#error "Unable to define raw_atomic_fetch_sub_acquire"
|
|
#endif
|
|
}
|
|
|
|
static __always_inline int
|
|
raw_atomic_fetch_sub_release(int i, atomic_t *v)
|
|
{
|
|
#if defined(arch_atomic_fetch_sub_release)
|
|
return arch_atomic_fetch_sub_release(i, v);
|
|
#elif defined(arch_atomic_fetch_sub_relaxed)
|
|
__atomic_release_fence();
|
|
return arch_atomic_fetch_sub_relaxed(i, v);
|
|
#elif defined(arch_atomic_fetch_sub)
|
|
return arch_atomic_fetch_sub(i, v);
|
|
#else
|
|
#error "Unable to define raw_atomic_fetch_sub_release"
|
|
#endif
|
|
}
|
|
|
|
static __always_inline int
|
|
raw_atomic_fetch_sub_relaxed(int i, atomic_t *v)
|
|
{
|
|
#if defined(arch_atomic_fetch_sub_relaxed)
|
|
return arch_atomic_fetch_sub_relaxed(i, v);
|
|
#elif defined(arch_atomic_fetch_sub)
|
|
return arch_atomic_fetch_sub(i, v);
|
|
#else
|
|
#error "Unable to define raw_atomic_fetch_sub_relaxed"
|
|
#endif
|
|
}
|
|
|
|
static __always_inline void
|
|
raw_atomic_inc(atomic_t *v)
|
|
{
|
|
#if defined(arch_atomic_inc)
|
|
arch_atomic_inc(v);
|
|
#else
|
|
raw_atomic_add(1, v);
|
|
#endif
|
|
}
|
|
|
|
static __always_inline int
|
|
raw_atomic_inc_return(atomic_t *v)
|
|
{
|
|
#if defined(arch_atomic_inc_return)
|
|
return arch_atomic_inc_return(v);
|
|
#elif defined(arch_atomic_inc_return_relaxed)
|
|
int ret;
|
|
__atomic_pre_full_fence();
|
|
ret = arch_atomic_inc_return_relaxed(v);
|
|
__atomic_post_full_fence();
|
|
return ret;
|
|
#else
|
|
return raw_atomic_add_return(1, v);
|
|
#endif
|
|
}
|
|
|
|
static __always_inline int
|
|
raw_atomic_inc_return_acquire(atomic_t *v)
|
|
{
|
|
#if defined(arch_atomic_inc_return_acquire)
|
|
return arch_atomic_inc_return_acquire(v);
|
|
#elif defined(arch_atomic_inc_return_relaxed)
|
|
int ret = arch_atomic_inc_return_relaxed(v);
|
|
__atomic_acquire_fence();
|
|
return ret;
|
|
#elif defined(arch_atomic_inc_return)
|
|
return arch_atomic_inc_return(v);
|
|
#else
|
|
return raw_atomic_add_return_acquire(1, v);
|
|
#endif
|
|
}
|
|
|
|
static __always_inline int
|
|
raw_atomic_inc_return_release(atomic_t *v)
|
|
{
|
|
#if defined(arch_atomic_inc_return_release)
|
|
return arch_atomic_inc_return_release(v);
|
|
#elif defined(arch_atomic_inc_return_relaxed)
|
|
__atomic_release_fence();
|
|
return arch_atomic_inc_return_relaxed(v);
|
|
#elif defined(arch_atomic_inc_return)
|
|
return arch_atomic_inc_return(v);
|
|
#else
|
|
return raw_atomic_add_return_release(1, v);
|
|
#endif
|
|
}
|
|
|
|
static __always_inline int
|
|
raw_atomic_inc_return_relaxed(atomic_t *v)
|
|
{
|
|
#if defined(arch_atomic_inc_return_relaxed)
|
|
return arch_atomic_inc_return_relaxed(v);
|
|
#elif defined(arch_atomic_inc_return)
|
|
return arch_atomic_inc_return(v);
|
|
#else
|
|
return raw_atomic_add_return_relaxed(1, v);
|
|
#endif
|
|
}
|
|
|
|
static __always_inline int
|
|
raw_atomic_fetch_inc(atomic_t *v)
|
|
{
|
|
#if defined(arch_atomic_fetch_inc)
|
|
return arch_atomic_fetch_inc(v);
|
|
#elif defined(arch_atomic_fetch_inc_relaxed)
|
|
int ret;
|
|
__atomic_pre_full_fence();
|
|
ret = arch_atomic_fetch_inc_relaxed(v);
|
|
__atomic_post_full_fence();
|
|
return ret;
|
|
#else
|
|
return raw_atomic_fetch_add(1, v);
|
|
#endif
|
|
}
|
|
|
|
static __always_inline int
|
|
raw_atomic_fetch_inc_acquire(atomic_t *v)
|
|
{
|
|
#if defined(arch_atomic_fetch_inc_acquire)
|
|
return arch_atomic_fetch_inc_acquire(v);
|
|
#elif defined(arch_atomic_fetch_inc_relaxed)
|
|
int ret = arch_atomic_fetch_inc_relaxed(v);
|
|
__atomic_acquire_fence();
|
|
return ret;
|
|
#elif defined(arch_atomic_fetch_inc)
|
|
return arch_atomic_fetch_inc(v);
|
|
#else
|
|
return raw_atomic_fetch_add_acquire(1, v);
|
|
#endif
|
|
}
|
|
|
|
static __always_inline int
|
|
raw_atomic_fetch_inc_release(atomic_t *v)
|
|
{
|
|
#if defined(arch_atomic_fetch_inc_release)
|
|
return arch_atomic_fetch_inc_release(v);
|
|
#elif defined(arch_atomic_fetch_inc_relaxed)
|
|
__atomic_release_fence();
|
|
return arch_atomic_fetch_inc_relaxed(v);
|
|
#elif defined(arch_atomic_fetch_inc)
|
|
return arch_atomic_fetch_inc(v);
|
|
#else
|
|
return raw_atomic_fetch_add_release(1, v);
|
|
#endif
|
|
}
|
|
|
|
static __always_inline int
|
|
raw_atomic_fetch_inc_relaxed(atomic_t *v)
|
|
{
|
|
#if defined(arch_atomic_fetch_inc_relaxed)
|
|
return arch_atomic_fetch_inc_relaxed(v);
|
|
#elif defined(arch_atomic_fetch_inc)
|
|
return arch_atomic_fetch_inc(v);
|
|
#else
|
|
return raw_atomic_fetch_add_relaxed(1, v);
|
|
#endif
|
|
}
|
|
|
|
static __always_inline void
|
|
raw_atomic_dec(atomic_t *v)
|
|
{
|
|
#if defined(arch_atomic_dec)
|
|
arch_atomic_dec(v);
|
|
#else
|
|
raw_atomic_sub(1, v);
|
|
#endif
|
|
}
|
|
|
|
static __always_inline int
|
|
raw_atomic_dec_return(atomic_t *v)
|
|
{
|
|
#if defined(arch_atomic_dec_return)
|
|
return arch_atomic_dec_return(v);
|
|
#elif defined(arch_atomic_dec_return_relaxed)
|
|
int ret;
|
|
__atomic_pre_full_fence();
|
|
ret = arch_atomic_dec_return_relaxed(v);
|
|
__atomic_post_full_fence();
|
|
return ret;
|
|
#else
|
|
return raw_atomic_sub_return(1, v);
|
|
#endif
|
|
}
|
|
|
|
static __always_inline int
|
|
raw_atomic_dec_return_acquire(atomic_t *v)
|
|
{
|
|
#if defined(arch_atomic_dec_return_acquire)
|
|
return arch_atomic_dec_return_acquire(v);
|
|
#elif defined(arch_atomic_dec_return_relaxed)
|
|
int ret = arch_atomic_dec_return_relaxed(v);
|
|
__atomic_acquire_fence();
|
|
return ret;
|
|
#elif defined(arch_atomic_dec_return)
|
|
return arch_atomic_dec_return(v);
|
|
#else
|
|
return raw_atomic_sub_return_acquire(1, v);
|
|
#endif
|
|
}
|
|
|
|
static __always_inline int
|
|
raw_atomic_dec_return_release(atomic_t *v)
|
|
{
|
|
#if defined(arch_atomic_dec_return_release)
|
|
return arch_atomic_dec_return_release(v);
|
|
#elif defined(arch_atomic_dec_return_relaxed)
|
|
__atomic_release_fence();
|
|
return arch_atomic_dec_return_relaxed(v);
|
|
#elif defined(arch_atomic_dec_return)
|
|
return arch_atomic_dec_return(v);
|
|
#else
|
|
return raw_atomic_sub_return_release(1, v);
|
|
#endif
|
|
}
|
|
|
|
static __always_inline int
|
|
raw_atomic_dec_return_relaxed(atomic_t *v)
|
|
{
|
|
#if defined(arch_atomic_dec_return_relaxed)
|
|
return arch_atomic_dec_return_relaxed(v);
|
|
#elif defined(arch_atomic_dec_return)
|
|
return arch_atomic_dec_return(v);
|
|
#else
|
|
return raw_atomic_sub_return_relaxed(1, v);
|
|
#endif
|
|
}
|
|
|
|
static __always_inline int
|
|
raw_atomic_fetch_dec(atomic_t *v)
|
|
{
|
|
#if defined(arch_atomic_fetch_dec)
|
|
return arch_atomic_fetch_dec(v);
|
|
#elif defined(arch_atomic_fetch_dec_relaxed)
|
|
int ret;
|
|
__atomic_pre_full_fence();
|
|
ret = arch_atomic_fetch_dec_relaxed(v);
|
|
__atomic_post_full_fence();
|
|
return ret;
|
|
#else
|
|
return raw_atomic_fetch_sub(1, v);
|
|
#endif
|
|
}
|
|
|
|
static __always_inline int
|
|
raw_atomic_fetch_dec_acquire(atomic_t *v)
|
|
{
|
|
#if defined(arch_atomic_fetch_dec_acquire)
|
|
return arch_atomic_fetch_dec_acquire(v);
|
|
#elif defined(arch_atomic_fetch_dec_relaxed)
|
|
int ret = arch_atomic_fetch_dec_relaxed(v);
|
|
__atomic_acquire_fence();
|
|
return ret;
|
|
#elif defined(arch_atomic_fetch_dec)
|
|
return arch_atomic_fetch_dec(v);
|
|
#else
|
|
return raw_atomic_fetch_sub_acquire(1, v);
|
|
#endif
|
|
}
|
|
|
|
static __always_inline int
|
|
raw_atomic_fetch_dec_release(atomic_t *v)
|
|
{
|
|
#if defined(arch_atomic_fetch_dec_release)
|
|
return arch_atomic_fetch_dec_release(v);
|
|
#elif defined(arch_atomic_fetch_dec_relaxed)
|
|
__atomic_release_fence();
|
|
return arch_atomic_fetch_dec_relaxed(v);
|
|
#elif defined(arch_atomic_fetch_dec)
|
|
return arch_atomic_fetch_dec(v);
|
|
#else
|
|
return raw_atomic_fetch_sub_release(1, v);
|
|
#endif
|
|
}
|
|
|
|
static __always_inline int
|
|
raw_atomic_fetch_dec_relaxed(atomic_t *v)
|
|
{
|
|
#if defined(arch_atomic_fetch_dec_relaxed)
|
|
return arch_atomic_fetch_dec_relaxed(v);
|
|
#elif defined(arch_atomic_fetch_dec)
|
|
return arch_atomic_fetch_dec(v);
|
|
#else
|
|
return raw_atomic_fetch_sub_relaxed(1, v);
|
|
#endif
|
|
}
|
|
|
|
static __always_inline void
|
|
raw_atomic_and(int i, atomic_t *v)
|
|
{
|
|
arch_atomic_and(i, v);
|
|
}
|
|
|
|
static __always_inline int
|
|
raw_atomic_fetch_and(int i, atomic_t *v)
|
|
{
|
|
#if defined(arch_atomic_fetch_and)
|
|
return arch_atomic_fetch_and(i, v);
|
|
#elif defined(arch_atomic_fetch_and_relaxed)
|
|
int ret;
|
|
__atomic_pre_full_fence();
|
|
ret = arch_atomic_fetch_and_relaxed(i, v);
|
|
__atomic_post_full_fence();
|
|
return ret;
|
|
#else
|
|
#error "Unable to define raw_atomic_fetch_and"
|
|
#endif
|
|
}
|
|
|
|
static __always_inline int
|
|
raw_atomic_fetch_and_acquire(int i, atomic_t *v)
|
|
{
|
|
#if defined(arch_atomic_fetch_and_acquire)
|
|
return arch_atomic_fetch_and_acquire(i, v);
|
|
#elif defined(arch_atomic_fetch_and_relaxed)
|
|
int ret = arch_atomic_fetch_and_relaxed(i, v);
|
|
__atomic_acquire_fence();
|
|
return ret;
|
|
#elif defined(arch_atomic_fetch_and)
|
|
return arch_atomic_fetch_and(i, v);
|
|
#else
|
|
#error "Unable to define raw_atomic_fetch_and_acquire"
|
|
#endif
|
|
}
|
|
|
|
static __always_inline int
|
|
raw_atomic_fetch_and_release(int i, atomic_t *v)
|
|
{
|
|
#if defined(arch_atomic_fetch_and_release)
|
|
return arch_atomic_fetch_and_release(i, v);
|
|
#elif defined(arch_atomic_fetch_and_relaxed)
|
|
__atomic_release_fence();
|
|
return arch_atomic_fetch_and_relaxed(i, v);
|
|
#elif defined(arch_atomic_fetch_and)
|
|
return arch_atomic_fetch_and(i, v);
|
|
#else
|
|
#error "Unable to define raw_atomic_fetch_and_release"
|
|
#endif
|
|
}
|
|
|
|
static __always_inline int
|
|
raw_atomic_fetch_and_relaxed(int i, atomic_t *v)
|
|
{
|
|
#if defined(arch_atomic_fetch_and_relaxed)
|
|
return arch_atomic_fetch_and_relaxed(i, v);
|
|
#elif defined(arch_atomic_fetch_and)
|
|
return arch_atomic_fetch_and(i, v);
|
|
#else
|
|
#error "Unable to define raw_atomic_fetch_and_relaxed"
|
|
#endif
|
|
}
|
|
|
|
static __always_inline void
|
|
raw_atomic_andnot(int i, atomic_t *v)
|
|
{
|
|
#if defined(arch_atomic_andnot)
|
|
arch_atomic_andnot(i, v);
|
|
#else
|
|
raw_atomic_and(~i, v);
|
|
#endif
|
|
}
|
|
|
|
static __always_inline int
|
|
raw_atomic_fetch_andnot(int i, atomic_t *v)
|
|
{
|
|
#if defined(arch_atomic_fetch_andnot)
|
|
return arch_atomic_fetch_andnot(i, v);
|
|
#elif defined(arch_atomic_fetch_andnot_relaxed)
|
|
int ret;
|
|
__atomic_pre_full_fence();
|
|
ret = arch_atomic_fetch_andnot_relaxed(i, v);
|
|
__atomic_post_full_fence();
|
|
return ret;
|
|
#else
|
|
return raw_atomic_fetch_and(~i, v);
|
|
#endif
|
|
}
|
|
|
|
static __always_inline int
|
|
raw_atomic_fetch_andnot_acquire(int i, atomic_t *v)
|
|
{
|
|
#if defined(arch_atomic_fetch_andnot_acquire)
|
|
return arch_atomic_fetch_andnot_acquire(i, v);
|
|
#elif defined(arch_atomic_fetch_andnot_relaxed)
|
|
int ret = arch_atomic_fetch_andnot_relaxed(i, v);
|
|
__atomic_acquire_fence();
|
|
return ret;
|
|
#elif defined(arch_atomic_fetch_andnot)
|
|
return arch_atomic_fetch_andnot(i, v);
|
|
#else
|
|
return raw_atomic_fetch_and_acquire(~i, v);
|
|
#endif
|
|
}
|
|
|
|
static __always_inline int
|
|
raw_atomic_fetch_andnot_release(int i, atomic_t *v)
|
|
{
|
|
#if defined(arch_atomic_fetch_andnot_release)
|
|
return arch_atomic_fetch_andnot_release(i, v);
|
|
#elif defined(arch_atomic_fetch_andnot_relaxed)
|
|
__atomic_release_fence();
|
|
return arch_atomic_fetch_andnot_relaxed(i, v);
|
|
#elif defined(arch_atomic_fetch_andnot)
|
|
return arch_atomic_fetch_andnot(i, v);
|
|
#else
|
|
return raw_atomic_fetch_and_release(~i, v);
|
|
#endif
|
|
}
|
|
|
|
static __always_inline int
|
|
raw_atomic_fetch_andnot_relaxed(int i, atomic_t *v)
|
|
{
|
|
#if defined(arch_atomic_fetch_andnot_relaxed)
|
|
return arch_atomic_fetch_andnot_relaxed(i, v);
|
|
#elif defined(arch_atomic_fetch_andnot)
|
|
return arch_atomic_fetch_andnot(i, v);
|
|
#else
|
|
return raw_atomic_fetch_and_relaxed(~i, v);
|
|
#endif
|
|
}
|
|
|
|
static __always_inline void
|
|
raw_atomic_or(int i, atomic_t *v)
|
|
{
|
|
arch_atomic_or(i, v);
|
|
}
|
|
|
|
static __always_inline int
|
|
raw_atomic_fetch_or(int i, atomic_t *v)
|
|
{
|
|
#if defined(arch_atomic_fetch_or)
|
|
return arch_atomic_fetch_or(i, v);
|
|
#elif defined(arch_atomic_fetch_or_relaxed)
|
|
int ret;
|
|
__atomic_pre_full_fence();
|
|
ret = arch_atomic_fetch_or_relaxed(i, v);
|
|
__atomic_post_full_fence();
|
|
return ret;
|
|
#else
|
|
#error "Unable to define raw_atomic_fetch_or"
|
|
#endif
|
|
}
|
|
|
|
static __always_inline int
|
|
raw_atomic_fetch_or_acquire(int i, atomic_t *v)
|
|
{
|
|
#if defined(arch_atomic_fetch_or_acquire)
|
|
return arch_atomic_fetch_or_acquire(i, v);
|
|
#elif defined(arch_atomic_fetch_or_relaxed)
|
|
int ret = arch_atomic_fetch_or_relaxed(i, v);
|
|
__atomic_acquire_fence();
|
|
return ret;
|
|
#elif defined(arch_atomic_fetch_or)
|
|
return arch_atomic_fetch_or(i, v);
|
|
#else
|
|
#error "Unable to define raw_atomic_fetch_or_acquire"
|
|
#endif
|
|
}
|
|
|
|
static __always_inline int
|
|
raw_atomic_fetch_or_release(int i, atomic_t *v)
|
|
{
|
|
#if defined(arch_atomic_fetch_or_release)
|
|
return arch_atomic_fetch_or_release(i, v);
|
|
#elif defined(arch_atomic_fetch_or_relaxed)
|
|
__atomic_release_fence();
|
|
return arch_atomic_fetch_or_relaxed(i, v);
|
|
#elif defined(arch_atomic_fetch_or)
|
|
return arch_atomic_fetch_or(i, v);
|
|
#else
|
|
#error "Unable to define raw_atomic_fetch_or_release"
|
|
#endif
|
|
}
|
|
|
|
static __always_inline int
|
|
raw_atomic_fetch_or_relaxed(int i, atomic_t *v)
|
|
{
|
|
#if defined(arch_atomic_fetch_or_relaxed)
|
|
return arch_atomic_fetch_or_relaxed(i, v);
|
|
#elif defined(arch_atomic_fetch_or)
|
|
return arch_atomic_fetch_or(i, v);
|
|
#else
|
|
#error "Unable to define raw_atomic_fetch_or_relaxed"
|
|
#endif
|
|
}
|
|
|
|
static __always_inline void
|
|
raw_atomic_xor(int i, atomic_t *v)
|
|
{
|
|
arch_atomic_xor(i, v);
|
|
}
|
|
|
|
static __always_inline int
|
|
raw_atomic_fetch_xor(int i, atomic_t *v)
|
|
{
|
|
#if defined(arch_atomic_fetch_xor)
|
|
return arch_atomic_fetch_xor(i, v);
|
|
#elif defined(arch_atomic_fetch_xor_relaxed)
|
|
int ret;
|
|
__atomic_pre_full_fence();
|
|
ret = arch_atomic_fetch_xor_relaxed(i, v);
|
|
__atomic_post_full_fence();
|
|
return ret;
|
|
#else
|
|
#error "Unable to define raw_atomic_fetch_xor"
|
|
#endif
|
|
}
|
|
|
|
static __always_inline int
|
|
raw_atomic_fetch_xor_acquire(int i, atomic_t *v)
|
|
{
|
|
#if defined(arch_atomic_fetch_xor_acquire)
|
|
return arch_atomic_fetch_xor_acquire(i, v);
|
|
#elif defined(arch_atomic_fetch_xor_relaxed)
|
|
int ret = arch_atomic_fetch_xor_relaxed(i, v);
|
|
__atomic_acquire_fence();
|
|
return ret;
|
|
#elif defined(arch_atomic_fetch_xor)
|
|
return arch_atomic_fetch_xor(i, v);
|
|
#else
|
|
#error "Unable to define raw_atomic_fetch_xor_acquire"
|
|
#endif
|
|
}
|
|
|
|
static __always_inline int
|
|
raw_atomic_fetch_xor_release(int i, atomic_t *v)
|
|
{
|
|
#if defined(arch_atomic_fetch_xor_release)
|
|
return arch_atomic_fetch_xor_release(i, v);
|
|
#elif defined(arch_atomic_fetch_xor_relaxed)
|
|
__atomic_release_fence();
|
|
return arch_atomic_fetch_xor_relaxed(i, v);
|
|
#elif defined(arch_atomic_fetch_xor)
|
|
return arch_atomic_fetch_xor(i, v);
|
|
#else
|
|
#error "Unable to define raw_atomic_fetch_xor_release"
|
|
#endif
|
|
}
|
|
|
|
static __always_inline int
|
|
raw_atomic_fetch_xor_relaxed(int i, atomic_t *v)
|
|
{
|
|
#if defined(arch_atomic_fetch_xor_relaxed)
|
|
return arch_atomic_fetch_xor_relaxed(i, v);
|
|
#elif defined(arch_atomic_fetch_xor)
|
|
return arch_atomic_fetch_xor(i, v);
|
|
#else
|
|
#error "Unable to define raw_atomic_fetch_xor_relaxed"
|
|
#endif
|
|
}
|
|
|
|
static __always_inline int
|
|
raw_atomic_xchg(atomic_t *v, int new)
|
|
{
|
|
#if defined(arch_atomic_xchg)
|
|
return arch_atomic_xchg(v, new);
|
|
#elif defined(arch_atomic_xchg_relaxed)
|
|
int ret;
|
|
__atomic_pre_full_fence();
|
|
ret = arch_atomic_xchg_relaxed(v, new);
|
|
__atomic_post_full_fence();
|
|
return ret;
|
|
#else
|
|
return raw_xchg(&v->counter, new);
|
|
#endif
|
|
}
|
|
|
|
static __always_inline int
|
|
raw_atomic_xchg_acquire(atomic_t *v, int new)
|
|
{
|
|
#if defined(arch_atomic_xchg_acquire)
|
|
return arch_atomic_xchg_acquire(v, new);
|
|
#elif defined(arch_atomic_xchg_relaxed)
|
|
int ret = arch_atomic_xchg_relaxed(v, new);
|
|
__atomic_acquire_fence();
|
|
return ret;
|
|
#elif defined(arch_atomic_xchg)
|
|
return arch_atomic_xchg(v, new);
|
|
#else
|
|
return raw_xchg_acquire(&v->counter, new);
|
|
#endif
|
|
}
|
|
|
|
static __always_inline int
|
|
raw_atomic_xchg_release(atomic_t *v, int new)
|
|
{
|
|
#if defined(arch_atomic_xchg_release)
|
|
return arch_atomic_xchg_release(v, new);
|
|
#elif defined(arch_atomic_xchg_relaxed)
|
|
__atomic_release_fence();
|
|
return arch_atomic_xchg_relaxed(v, new);
|
|
#elif defined(arch_atomic_xchg)
|
|
return arch_atomic_xchg(v, new);
|
|
#else
|
|
return raw_xchg_release(&v->counter, new);
|
|
#endif
|
|
}
|
|
|
|
static __always_inline int
|
|
raw_atomic_xchg_relaxed(atomic_t *v, int new)
|
|
{
|
|
#if defined(arch_atomic_xchg_relaxed)
|
|
return arch_atomic_xchg_relaxed(v, new);
|
|
#elif defined(arch_atomic_xchg)
|
|
return arch_atomic_xchg(v, new);
|
|
#else
|
|
return raw_xchg_relaxed(&v->counter, new);
|
|
#endif
|
|
}
|
|
|
|
static __always_inline int
|
|
raw_atomic_cmpxchg(atomic_t *v, int old, int new)
|
|
{
|
|
#if defined(arch_atomic_cmpxchg)
|
|
return arch_atomic_cmpxchg(v, old, new);
|
|
#elif defined(arch_atomic_cmpxchg_relaxed)
|
|
int ret;
|
|
__atomic_pre_full_fence();
|
|
ret = arch_atomic_cmpxchg_relaxed(v, old, new);
|
|
__atomic_post_full_fence();
|
|
return ret;
|
|
#else
|
|
return raw_cmpxchg(&v->counter, old, new);
|
|
#endif
|
|
}
|
|
|
|
static __always_inline int
|
|
raw_atomic_cmpxchg_acquire(atomic_t *v, int old, int new)
|
|
{
|
|
#if defined(arch_atomic_cmpxchg_acquire)
|
|
return arch_atomic_cmpxchg_acquire(v, old, new);
|
|
#elif defined(arch_atomic_cmpxchg_relaxed)
|
|
int ret = arch_atomic_cmpxchg_relaxed(v, old, new);
|
|
__atomic_acquire_fence();
|
|
return ret;
|
|
#elif defined(arch_atomic_cmpxchg)
|
|
return arch_atomic_cmpxchg(v, old, new);
|
|
#else
|
|
return raw_cmpxchg_acquire(&v->counter, old, new);
|
|
#endif
|
|
}
|
|
|
|
static __always_inline int
|
|
raw_atomic_cmpxchg_release(atomic_t *v, int old, int new)
|
|
{
|
|
#if defined(arch_atomic_cmpxchg_release)
|
|
return arch_atomic_cmpxchg_release(v, old, new);
|
|
#elif defined(arch_atomic_cmpxchg_relaxed)
|
|
__atomic_release_fence();
|
|
return arch_atomic_cmpxchg_relaxed(v, old, new);
|
|
#elif defined(arch_atomic_cmpxchg)
|
|
return arch_atomic_cmpxchg(v, old, new);
|
|
#else
|
|
return raw_cmpxchg_release(&v->counter, old, new);
|
|
#endif
|
|
}
|
|
|
|
static __always_inline int
|
|
raw_atomic_cmpxchg_relaxed(atomic_t *v, int old, int new)
|
|
{
|
|
#if defined(arch_atomic_cmpxchg_relaxed)
|
|
return arch_atomic_cmpxchg_relaxed(v, old, new);
|
|
#elif defined(arch_atomic_cmpxchg)
|
|
return arch_atomic_cmpxchg(v, old, new);
|
|
#else
|
|
return raw_cmpxchg_relaxed(&v->counter, old, new);
|
|
#endif
|
|
}
|
|
|
|
static __always_inline bool
|
|
raw_atomic_try_cmpxchg(atomic_t *v, int *old, int new)
|
|
{
|
|
#if defined(arch_atomic_try_cmpxchg)
|
|
return arch_atomic_try_cmpxchg(v, old, new);
|
|
#elif defined(arch_atomic_try_cmpxchg_relaxed)
|
|
bool ret;
|
|
__atomic_pre_full_fence();
|
|
ret = arch_atomic_try_cmpxchg_relaxed(v, old, new);
|
|
__atomic_post_full_fence();
|
|
return ret;
|
|
#else
|
|
int r, o = *old;
|
|
r = raw_atomic_cmpxchg(v, o, new);
|
|
if (unlikely(r != o))
|
|
*old = r;
|
|
return likely(r == o);
|
|
#endif
|
|
}
|
|
|
|
static __always_inline bool
|
|
raw_atomic_try_cmpxchg_acquire(atomic_t *v, int *old, int new)
|
|
{
|
|
#if defined(arch_atomic_try_cmpxchg_acquire)
|
|
return arch_atomic_try_cmpxchg_acquire(v, old, new);
|
|
#elif defined(arch_atomic_try_cmpxchg_relaxed)
|
|
bool ret = arch_atomic_try_cmpxchg_relaxed(v, old, new);
|
|
__atomic_acquire_fence();
|
|
return ret;
|
|
#elif defined(arch_atomic_try_cmpxchg)
|
|
return arch_atomic_try_cmpxchg(v, old, new);
|
|
#else
|
|
int r, o = *old;
|
|
r = raw_atomic_cmpxchg_acquire(v, o, new);
|
|
if (unlikely(r != o))
|
|
*old = r;
|
|
return likely(r == o);
|
|
#endif
|
|
}
|
|
|
|
static __always_inline bool
|
|
raw_atomic_try_cmpxchg_release(atomic_t *v, int *old, int new)
|
|
{
|
|
#if defined(arch_atomic_try_cmpxchg_release)
|
|
return arch_atomic_try_cmpxchg_release(v, old, new);
|
|
#elif defined(arch_atomic_try_cmpxchg_relaxed)
|
|
__atomic_release_fence();
|
|
return arch_atomic_try_cmpxchg_relaxed(v, old, new);
|
|
#elif defined(arch_atomic_try_cmpxchg)
|
|
return arch_atomic_try_cmpxchg(v, old, new);
|
|
#else
|
|
int r, o = *old;
|
|
r = raw_atomic_cmpxchg_release(v, o, new);
|
|
if (unlikely(r != o))
|
|
*old = r;
|
|
return likely(r == o);
|
|
#endif
|
|
}
|
|
|
|
static __always_inline bool
|
|
raw_atomic_try_cmpxchg_relaxed(atomic_t *v, int *old, int new)
|
|
{
|
|
#if defined(arch_atomic_try_cmpxchg_relaxed)
|
|
return arch_atomic_try_cmpxchg_relaxed(v, old, new);
|
|
#elif defined(arch_atomic_try_cmpxchg)
|
|
return arch_atomic_try_cmpxchg(v, old, new);
|
|
#else
|
|
int r, o = *old;
|
|
r = raw_atomic_cmpxchg_relaxed(v, o, new);
|
|
if (unlikely(r != o))
|
|
*old = r;
|
|
return likely(r == o);
|
|
#endif
|
|
}
|
|
|
|
static __always_inline bool
|
|
raw_atomic_sub_and_test(int i, atomic_t *v)
|
|
{
|
|
#if defined(arch_atomic_sub_and_test)
|
|
return arch_atomic_sub_and_test(i, v);
|
|
#else
|
|
return raw_atomic_sub_return(i, v) == 0;
|
|
#endif
|
|
}
|
|
|
|
static __always_inline bool
|
|
raw_atomic_dec_and_test(atomic_t *v)
|
|
{
|
|
#if defined(arch_atomic_dec_and_test)
|
|
return arch_atomic_dec_and_test(v);
|
|
#else
|
|
return raw_atomic_dec_return(v) == 0;
|
|
#endif
|
|
}
|
|
|
|
static __always_inline bool
|
|
raw_atomic_inc_and_test(atomic_t *v)
|
|
{
|
|
#if defined(arch_atomic_inc_and_test)
|
|
return arch_atomic_inc_and_test(v);
|
|
#else
|
|
return raw_atomic_inc_return(v) == 0;
|
|
#endif
|
|
}
|
|
|
|
static __always_inline bool
|
|
raw_atomic_add_negative(int i, atomic_t *v)
|
|
{
|
|
#if defined(arch_atomic_add_negative)
|
|
return arch_atomic_add_negative(i, v);
|
|
#elif defined(arch_atomic_add_negative_relaxed)
|
|
bool ret;
|
|
__atomic_pre_full_fence();
|
|
ret = arch_atomic_add_negative_relaxed(i, v);
|
|
__atomic_post_full_fence();
|
|
return ret;
|
|
#else
|
|
return raw_atomic_add_return(i, v) < 0;
|
|
#endif
|
|
}
|
|
|
|
static __always_inline bool
|
|
raw_atomic_add_negative_acquire(int i, atomic_t *v)
|
|
{
|
|
#if defined(arch_atomic_add_negative_acquire)
|
|
return arch_atomic_add_negative_acquire(i, v);
|
|
#elif defined(arch_atomic_add_negative_relaxed)
|
|
bool ret = arch_atomic_add_negative_relaxed(i, v);
|
|
__atomic_acquire_fence();
|
|
return ret;
|
|
#elif defined(arch_atomic_add_negative)
|
|
return arch_atomic_add_negative(i, v);
|
|
#else
|
|
return raw_atomic_add_return_acquire(i, v) < 0;
|
|
#endif
|
|
}
|
|
|
|
static __always_inline bool
|
|
raw_atomic_add_negative_release(int i, atomic_t *v)
|
|
{
|
|
#if defined(arch_atomic_add_negative_release)
|
|
return arch_atomic_add_negative_release(i, v);
|
|
#elif defined(arch_atomic_add_negative_relaxed)
|
|
__atomic_release_fence();
|
|
return arch_atomic_add_negative_relaxed(i, v);
|
|
#elif defined(arch_atomic_add_negative)
|
|
return arch_atomic_add_negative(i, v);
|
|
#else
|
|
return raw_atomic_add_return_release(i, v) < 0;
|
|
#endif
|
|
}
|
|
|
|
static __always_inline bool
|
|
raw_atomic_add_negative_relaxed(int i, atomic_t *v)
|
|
{
|
|
#if defined(arch_atomic_add_negative_relaxed)
|
|
return arch_atomic_add_negative_relaxed(i, v);
|
|
#elif defined(arch_atomic_add_negative)
|
|
return arch_atomic_add_negative(i, v);
|
|
#else
|
|
return raw_atomic_add_return_relaxed(i, v) < 0;
|
|
#endif
|
|
}
|
|
|
|
static __always_inline int
|
|
raw_atomic_fetch_add_unless(atomic_t *v, int a, int u)
|
|
{
|
|
#if defined(arch_atomic_fetch_add_unless)
|
|
return arch_atomic_fetch_add_unless(v, a, u);
|
|
#else
|
|
int c = raw_atomic_read(v);
|
|
|
|
do {
|
|
if (unlikely(c == u))
|
|
break;
|
|
} while (!raw_atomic_try_cmpxchg(v, &c, c + a));
|
|
|
|
return c;
|
|
#endif
|
|
}
|
|
|
|
static __always_inline bool
|
|
raw_atomic_add_unless(atomic_t *v, int a, int u)
|
|
{
|
|
#if defined(arch_atomic_add_unless)
|
|
return arch_atomic_add_unless(v, a, u);
|
|
#else
|
|
return raw_atomic_fetch_add_unless(v, a, u) != u;
|
|
#endif
|
|
}
|
|
|
|
static __always_inline bool
|
|
raw_atomic_inc_not_zero(atomic_t *v)
|
|
{
|
|
#if defined(arch_atomic_inc_not_zero)
|
|
return arch_atomic_inc_not_zero(v);
|
|
#else
|
|
return raw_atomic_add_unless(v, 1, 0);
|
|
#endif
|
|
}
|
|
|
|
static __always_inline bool
|
|
raw_atomic_inc_unless_negative(atomic_t *v)
|
|
{
|
|
#if defined(arch_atomic_inc_unless_negative)
|
|
return arch_atomic_inc_unless_negative(v);
|
|
#else
|
|
int c = raw_atomic_read(v);
|
|
|
|
do {
|
|
if (unlikely(c < 0))
|
|
return false;
|
|
} while (!raw_atomic_try_cmpxchg(v, &c, c + 1));
|
|
|
|
return true;
|
|
#endif
|
|
}
|
|
|
|
static __always_inline bool
|
|
raw_atomic_dec_unless_positive(atomic_t *v)
|
|
{
|
|
#if defined(arch_atomic_dec_unless_positive)
|
|
return arch_atomic_dec_unless_positive(v);
|
|
#else
|
|
int c = raw_atomic_read(v);
|
|
|
|
do {
|
|
if (unlikely(c > 0))
|
|
return false;
|
|
} while (!raw_atomic_try_cmpxchg(v, &c, c - 1));
|
|
|
|
return true;
|
|
#endif
|
|
}
|
|
|
|
static __always_inline int
|
|
raw_atomic_dec_if_positive(atomic_t *v)
|
|
{
|
|
#if defined(arch_atomic_dec_if_positive)
|
|
return arch_atomic_dec_if_positive(v);
|
|
#else
|
|
int dec, c = raw_atomic_read(v);
|
|
|
|
do {
|
|
dec = c - 1;
|
|
if (unlikely(dec < 0))
|
|
break;
|
|
} while (!raw_atomic_try_cmpxchg(v, &c, dec));
|
|
|
|
return dec;
|
|
#endif
|
|
}
|
|
|
|
#ifdef CONFIG_GENERIC_ATOMIC64
|
|
#include <asm-generic/atomic64.h>
|
|
#endif
|
|
|
|
static __always_inline s64
|
|
raw_atomic64_read(const atomic64_t *v)
|
|
{
|
|
return arch_atomic64_read(v);
|
|
}
|
|
|
|
static __always_inline s64
|
|
raw_atomic64_read_acquire(const atomic64_t *v)
|
|
{
|
|
#if defined(arch_atomic64_read_acquire)
|
|
return arch_atomic64_read_acquire(v);
|
|
#elif defined(arch_atomic64_read)
|
|
return arch_atomic64_read(v);
|
|
#else
|
|
s64 ret;
|
|
|
|
if (__native_word(atomic64_t)) {
|
|
ret = smp_load_acquire(&(v)->counter);
|
|
} else {
|
|
ret = raw_atomic64_read(v);
|
|
__atomic_acquire_fence();
|
|
}
|
|
|
|
return ret;
|
|
#endif
|
|
}
|
|
|
|
static __always_inline void
|
|
raw_atomic64_set(atomic64_t *v, s64 i)
|
|
{
|
|
arch_atomic64_set(v, i);
|
|
}
|
|
|
|
static __always_inline void
|
|
raw_atomic64_set_release(atomic64_t *v, s64 i)
|
|
{
|
|
#if defined(arch_atomic64_set_release)
|
|
arch_atomic64_set_release(v, i);
|
|
#elif defined(arch_atomic64_set)
|
|
arch_atomic64_set(v, i);
|
|
#else
|
|
if (__native_word(atomic64_t)) {
|
|
smp_store_release(&(v)->counter, i);
|
|
} else {
|
|
__atomic_release_fence();
|
|
raw_atomic64_set(v, i);
|
|
}
|
|
#endif
|
|
}
|
|
|
|
static __always_inline void
|
|
raw_atomic64_add(s64 i, atomic64_t *v)
|
|
{
|
|
arch_atomic64_add(i, v);
|
|
}
|
|
|
|
static __always_inline s64
|
|
raw_atomic64_add_return(s64 i, atomic64_t *v)
|
|
{
|
|
#if defined(arch_atomic64_add_return)
|
|
return arch_atomic64_add_return(i, v);
|
|
#elif defined(arch_atomic64_add_return_relaxed)
|
|
s64 ret;
|
|
__atomic_pre_full_fence();
|
|
ret = arch_atomic64_add_return_relaxed(i, v);
|
|
__atomic_post_full_fence();
|
|
return ret;
|
|
#else
|
|
#error "Unable to define raw_atomic64_add_return"
|
|
#endif
|
|
}
|
|
|
|
static __always_inline s64
|
|
raw_atomic64_add_return_acquire(s64 i, atomic64_t *v)
|
|
{
|
|
#if defined(arch_atomic64_add_return_acquire)
|
|
return arch_atomic64_add_return_acquire(i, v);
|
|
#elif defined(arch_atomic64_add_return_relaxed)
|
|
s64 ret = arch_atomic64_add_return_relaxed(i, v);
|
|
__atomic_acquire_fence();
|
|
return ret;
|
|
#elif defined(arch_atomic64_add_return)
|
|
return arch_atomic64_add_return(i, v);
|
|
#else
|
|
#error "Unable to define raw_atomic64_add_return_acquire"
|
|
#endif
|
|
}
|
|
|
|
static __always_inline s64
|
|
raw_atomic64_add_return_release(s64 i, atomic64_t *v)
|
|
{
|
|
#if defined(arch_atomic64_add_return_release)
|
|
return arch_atomic64_add_return_release(i, v);
|
|
#elif defined(arch_atomic64_add_return_relaxed)
|
|
__atomic_release_fence();
|
|
return arch_atomic64_add_return_relaxed(i, v);
|
|
#elif defined(arch_atomic64_add_return)
|
|
return arch_atomic64_add_return(i, v);
|
|
#else
|
|
#error "Unable to define raw_atomic64_add_return_release"
|
|
#endif
|
|
}
|
|
|
|
static __always_inline s64
|
|
raw_atomic64_add_return_relaxed(s64 i, atomic64_t *v)
|
|
{
|
|
#if defined(arch_atomic64_add_return_relaxed)
|
|
return arch_atomic64_add_return_relaxed(i, v);
|
|
#elif defined(arch_atomic64_add_return)
|
|
return arch_atomic64_add_return(i, v);
|
|
#else
|
|
#error "Unable to define raw_atomic64_add_return_relaxed"
|
|
#endif
|
|
}
|
|
|
|
static __always_inline s64
|
|
raw_atomic64_fetch_add(s64 i, atomic64_t *v)
|
|
{
|
|
#if defined(arch_atomic64_fetch_add)
|
|
return arch_atomic64_fetch_add(i, v);
|
|
#elif defined(arch_atomic64_fetch_add_relaxed)
|
|
s64 ret;
|
|
__atomic_pre_full_fence();
|
|
ret = arch_atomic64_fetch_add_relaxed(i, v);
|
|
__atomic_post_full_fence();
|
|
return ret;
|
|
#else
|
|
#error "Unable to define raw_atomic64_fetch_add"
|
|
#endif
|
|
}
|
|
|
|
static __always_inline s64
|
|
raw_atomic64_fetch_add_acquire(s64 i, atomic64_t *v)
|
|
{
|
|
#if defined(arch_atomic64_fetch_add_acquire)
|
|
return arch_atomic64_fetch_add_acquire(i, v);
|
|
#elif defined(arch_atomic64_fetch_add_relaxed)
|
|
s64 ret = arch_atomic64_fetch_add_relaxed(i, v);
|
|
__atomic_acquire_fence();
|
|
return ret;
|
|
#elif defined(arch_atomic64_fetch_add)
|
|
return arch_atomic64_fetch_add(i, v);
|
|
#else
|
|
#error "Unable to define raw_atomic64_fetch_add_acquire"
|
|
#endif
|
|
}
|
|
|
|
static __always_inline s64
|
|
raw_atomic64_fetch_add_release(s64 i, atomic64_t *v)
|
|
{
|
|
#if defined(arch_atomic64_fetch_add_release)
|
|
return arch_atomic64_fetch_add_release(i, v);
|
|
#elif defined(arch_atomic64_fetch_add_relaxed)
|
|
__atomic_release_fence();
|
|
return arch_atomic64_fetch_add_relaxed(i, v);
|
|
#elif defined(arch_atomic64_fetch_add)
|
|
return arch_atomic64_fetch_add(i, v);
|
|
#else
|
|
#error "Unable to define raw_atomic64_fetch_add_release"
|
|
#endif
|
|
}
|
|
|
|
static __always_inline s64
|
|
raw_atomic64_fetch_add_relaxed(s64 i, atomic64_t *v)
|
|
{
|
|
#if defined(arch_atomic64_fetch_add_relaxed)
|
|
return arch_atomic64_fetch_add_relaxed(i, v);
|
|
#elif defined(arch_atomic64_fetch_add)
|
|
return arch_atomic64_fetch_add(i, v);
|
|
#else
|
|
#error "Unable to define raw_atomic64_fetch_add_relaxed"
|
|
#endif
|
|
}
|
|
|
|
static __always_inline void
|
|
raw_atomic64_sub(s64 i, atomic64_t *v)
|
|
{
|
|
arch_atomic64_sub(i, v);
|
|
}
|
|
|
|
static __always_inline s64
|
|
raw_atomic64_sub_return(s64 i, atomic64_t *v)
|
|
{
|
|
#if defined(arch_atomic64_sub_return)
|
|
return arch_atomic64_sub_return(i, v);
|
|
#elif defined(arch_atomic64_sub_return_relaxed)
|
|
s64 ret;
|
|
__atomic_pre_full_fence();
|
|
ret = arch_atomic64_sub_return_relaxed(i, v);
|
|
__atomic_post_full_fence();
|
|
return ret;
|
|
#else
|
|
#error "Unable to define raw_atomic64_sub_return"
|
|
#endif
|
|
}
|
|
|
|
static __always_inline s64
|
|
raw_atomic64_sub_return_acquire(s64 i, atomic64_t *v)
|
|
{
|
|
#if defined(arch_atomic64_sub_return_acquire)
|
|
return arch_atomic64_sub_return_acquire(i, v);
|
|
#elif defined(arch_atomic64_sub_return_relaxed)
|
|
s64 ret = arch_atomic64_sub_return_relaxed(i, v);
|
|
__atomic_acquire_fence();
|
|
return ret;
|
|
#elif defined(arch_atomic64_sub_return)
|
|
return arch_atomic64_sub_return(i, v);
|
|
#else
|
|
#error "Unable to define raw_atomic64_sub_return_acquire"
|
|
#endif
|
|
}
|
|
|
|
static __always_inline s64
|
|
raw_atomic64_sub_return_release(s64 i, atomic64_t *v)
|
|
{
|
|
#if defined(arch_atomic64_sub_return_release)
|
|
return arch_atomic64_sub_return_release(i, v);
|
|
#elif defined(arch_atomic64_sub_return_relaxed)
|
|
__atomic_release_fence();
|
|
return arch_atomic64_sub_return_relaxed(i, v);
|
|
#elif defined(arch_atomic64_sub_return)
|
|
return arch_atomic64_sub_return(i, v);
|
|
#else
|
|
#error "Unable to define raw_atomic64_sub_return_release"
|
|
#endif
|
|
}
|
|
|
|
static __always_inline s64
|
|
raw_atomic64_sub_return_relaxed(s64 i, atomic64_t *v)
|
|
{
|
|
#if defined(arch_atomic64_sub_return_relaxed)
|
|
return arch_atomic64_sub_return_relaxed(i, v);
|
|
#elif defined(arch_atomic64_sub_return)
|
|
return arch_atomic64_sub_return(i, v);
|
|
#else
|
|
#error "Unable to define raw_atomic64_sub_return_relaxed"
|
|
#endif
|
|
}
|
|
|
|
static __always_inline s64
|
|
raw_atomic64_fetch_sub(s64 i, atomic64_t *v)
|
|
{
|
|
#if defined(arch_atomic64_fetch_sub)
|
|
return arch_atomic64_fetch_sub(i, v);
|
|
#elif defined(arch_atomic64_fetch_sub_relaxed)
|
|
s64 ret;
|
|
__atomic_pre_full_fence();
|
|
ret = arch_atomic64_fetch_sub_relaxed(i, v);
|
|
__atomic_post_full_fence();
|
|
return ret;
|
|
#else
|
|
#error "Unable to define raw_atomic64_fetch_sub"
|
|
#endif
|
|
}
|
|
|
|
static __always_inline s64
|
|
raw_atomic64_fetch_sub_acquire(s64 i, atomic64_t *v)
|
|
{
|
|
#if defined(arch_atomic64_fetch_sub_acquire)
|
|
return arch_atomic64_fetch_sub_acquire(i, v);
|
|
#elif defined(arch_atomic64_fetch_sub_relaxed)
|
|
s64 ret = arch_atomic64_fetch_sub_relaxed(i, v);
|
|
__atomic_acquire_fence();
|
|
return ret;
|
|
#elif defined(arch_atomic64_fetch_sub)
|
|
return arch_atomic64_fetch_sub(i, v);
|
|
#else
|
|
#error "Unable to define raw_atomic64_fetch_sub_acquire"
|
|
#endif
|
|
}
|
|
|
|
static __always_inline s64
|
|
raw_atomic64_fetch_sub_release(s64 i, atomic64_t *v)
|
|
{
|
|
#if defined(arch_atomic64_fetch_sub_release)
|
|
return arch_atomic64_fetch_sub_release(i, v);
|
|
#elif defined(arch_atomic64_fetch_sub_relaxed)
|
|
__atomic_release_fence();
|
|
return arch_atomic64_fetch_sub_relaxed(i, v);
|
|
#elif defined(arch_atomic64_fetch_sub)
|
|
return arch_atomic64_fetch_sub(i, v);
|
|
#else
|
|
#error "Unable to define raw_atomic64_fetch_sub_release"
|
|
#endif
|
|
}
|
|
|
|
static __always_inline s64
|
|
raw_atomic64_fetch_sub_relaxed(s64 i, atomic64_t *v)
|
|
{
|
|
#if defined(arch_atomic64_fetch_sub_relaxed)
|
|
return arch_atomic64_fetch_sub_relaxed(i, v);
|
|
#elif defined(arch_atomic64_fetch_sub)
|
|
return arch_atomic64_fetch_sub(i, v);
|
|
#else
|
|
#error "Unable to define raw_atomic64_fetch_sub_relaxed"
|
|
#endif
|
|
}
|
|
|
|
static __always_inline void
|
|
raw_atomic64_inc(atomic64_t *v)
|
|
{
|
|
#if defined(arch_atomic64_inc)
|
|
arch_atomic64_inc(v);
|
|
#else
|
|
raw_atomic64_add(1, v);
|
|
#endif
|
|
}
|
|
|
|
static __always_inline s64
|
|
raw_atomic64_inc_return(atomic64_t *v)
|
|
{
|
|
#if defined(arch_atomic64_inc_return)
|
|
return arch_atomic64_inc_return(v);
|
|
#elif defined(arch_atomic64_inc_return_relaxed)
|
|
s64 ret;
|
|
__atomic_pre_full_fence();
|
|
ret = arch_atomic64_inc_return_relaxed(v);
|
|
__atomic_post_full_fence();
|
|
return ret;
|
|
#else
|
|
return raw_atomic64_add_return(1, v);
|
|
#endif
|
|
}
|
|
|
|
static __always_inline s64
|
|
raw_atomic64_inc_return_acquire(atomic64_t *v)
|
|
{
|
|
#if defined(arch_atomic64_inc_return_acquire)
|
|
return arch_atomic64_inc_return_acquire(v);
|
|
#elif defined(arch_atomic64_inc_return_relaxed)
|
|
s64 ret = arch_atomic64_inc_return_relaxed(v);
|
|
__atomic_acquire_fence();
|
|
return ret;
|
|
#elif defined(arch_atomic64_inc_return)
|
|
return arch_atomic64_inc_return(v);
|
|
#else
|
|
return raw_atomic64_add_return_acquire(1, v);
|
|
#endif
|
|
}
|
|
|
|
static __always_inline s64
|
|
raw_atomic64_inc_return_release(atomic64_t *v)
|
|
{
|
|
#if defined(arch_atomic64_inc_return_release)
|
|
return arch_atomic64_inc_return_release(v);
|
|
#elif defined(arch_atomic64_inc_return_relaxed)
|
|
__atomic_release_fence();
|
|
return arch_atomic64_inc_return_relaxed(v);
|
|
#elif defined(arch_atomic64_inc_return)
|
|
return arch_atomic64_inc_return(v);
|
|
#else
|
|
return raw_atomic64_add_return_release(1, v);
|
|
#endif
|
|
}
|
|
|
|
static __always_inline s64
|
|
raw_atomic64_inc_return_relaxed(atomic64_t *v)
|
|
{
|
|
#if defined(arch_atomic64_inc_return_relaxed)
|
|
return arch_atomic64_inc_return_relaxed(v);
|
|
#elif defined(arch_atomic64_inc_return)
|
|
return arch_atomic64_inc_return(v);
|
|
#else
|
|
return raw_atomic64_add_return_relaxed(1, v);
|
|
#endif
|
|
}
|
|
|
|
static __always_inline s64
|
|
raw_atomic64_fetch_inc(atomic64_t *v)
|
|
{
|
|
#if defined(arch_atomic64_fetch_inc)
|
|
return arch_atomic64_fetch_inc(v);
|
|
#elif defined(arch_atomic64_fetch_inc_relaxed)
|
|
s64 ret;
|
|
__atomic_pre_full_fence();
|
|
ret = arch_atomic64_fetch_inc_relaxed(v);
|
|
__atomic_post_full_fence();
|
|
return ret;
|
|
#else
|
|
return raw_atomic64_fetch_add(1, v);
|
|
#endif
|
|
}
|
|
|
|
static __always_inline s64
|
|
raw_atomic64_fetch_inc_acquire(atomic64_t *v)
|
|
{
|
|
#if defined(arch_atomic64_fetch_inc_acquire)
|
|
return arch_atomic64_fetch_inc_acquire(v);
|
|
#elif defined(arch_atomic64_fetch_inc_relaxed)
|
|
s64 ret = arch_atomic64_fetch_inc_relaxed(v);
|
|
__atomic_acquire_fence();
|
|
return ret;
|
|
#elif defined(arch_atomic64_fetch_inc)
|
|
return arch_atomic64_fetch_inc(v);
|
|
#else
|
|
return raw_atomic64_fetch_add_acquire(1, v);
|
|
#endif
|
|
}
|
|
|
|
static __always_inline s64
|
|
raw_atomic64_fetch_inc_release(atomic64_t *v)
|
|
{
|
|
#if defined(arch_atomic64_fetch_inc_release)
|
|
return arch_atomic64_fetch_inc_release(v);
|
|
#elif defined(arch_atomic64_fetch_inc_relaxed)
|
|
__atomic_release_fence();
|
|
return arch_atomic64_fetch_inc_relaxed(v);
|
|
#elif defined(arch_atomic64_fetch_inc)
|
|
return arch_atomic64_fetch_inc(v);
|
|
#else
|
|
return raw_atomic64_fetch_add_release(1, v);
|
|
#endif
|
|
}
|
|
|
|
static __always_inline s64
|
|
raw_atomic64_fetch_inc_relaxed(atomic64_t *v)
|
|
{
|
|
#if defined(arch_atomic64_fetch_inc_relaxed)
|
|
return arch_atomic64_fetch_inc_relaxed(v);
|
|
#elif defined(arch_atomic64_fetch_inc)
|
|
return arch_atomic64_fetch_inc(v);
|
|
#else
|
|
return raw_atomic64_fetch_add_relaxed(1, v);
|
|
#endif
|
|
}
|
|
|
|
static __always_inline void
|
|
raw_atomic64_dec(atomic64_t *v)
|
|
{
|
|
#if defined(arch_atomic64_dec)
|
|
arch_atomic64_dec(v);
|
|
#else
|
|
raw_atomic64_sub(1, v);
|
|
#endif
|
|
}
|
|
|
|
static __always_inline s64
|
|
raw_atomic64_dec_return(atomic64_t *v)
|
|
{
|
|
#if defined(arch_atomic64_dec_return)
|
|
return arch_atomic64_dec_return(v);
|
|
#elif defined(arch_atomic64_dec_return_relaxed)
|
|
s64 ret;
|
|
__atomic_pre_full_fence();
|
|
ret = arch_atomic64_dec_return_relaxed(v);
|
|
__atomic_post_full_fence();
|
|
return ret;
|
|
#else
|
|
return raw_atomic64_sub_return(1, v);
|
|
#endif
|
|
}
|
|
|
|
static __always_inline s64
|
|
raw_atomic64_dec_return_acquire(atomic64_t *v)
|
|
{
|
|
#if defined(arch_atomic64_dec_return_acquire)
|
|
return arch_atomic64_dec_return_acquire(v);
|
|
#elif defined(arch_atomic64_dec_return_relaxed)
|
|
s64 ret = arch_atomic64_dec_return_relaxed(v);
|
|
__atomic_acquire_fence();
|
|
return ret;
|
|
#elif defined(arch_atomic64_dec_return)
|
|
return arch_atomic64_dec_return(v);
|
|
#else
|
|
return raw_atomic64_sub_return_acquire(1, v);
|
|
#endif
|
|
}
|
|
|
|
static __always_inline s64
|
|
raw_atomic64_dec_return_release(atomic64_t *v)
|
|
{
|
|
#if defined(arch_atomic64_dec_return_release)
|
|
return arch_atomic64_dec_return_release(v);
|
|
#elif defined(arch_atomic64_dec_return_relaxed)
|
|
__atomic_release_fence();
|
|
return arch_atomic64_dec_return_relaxed(v);
|
|
#elif defined(arch_atomic64_dec_return)
|
|
return arch_atomic64_dec_return(v);
|
|
#else
|
|
return raw_atomic64_sub_return_release(1, v);
|
|
#endif
|
|
}
|
|
|
|
static __always_inline s64
|
|
raw_atomic64_dec_return_relaxed(atomic64_t *v)
|
|
{
|
|
#if defined(arch_atomic64_dec_return_relaxed)
|
|
return arch_atomic64_dec_return_relaxed(v);
|
|
#elif defined(arch_atomic64_dec_return)
|
|
return arch_atomic64_dec_return(v);
|
|
#else
|
|
return raw_atomic64_sub_return_relaxed(1, v);
|
|
#endif
|
|
}
|
|
|
|
static __always_inline s64
|
|
raw_atomic64_fetch_dec(atomic64_t *v)
|
|
{
|
|
#if defined(arch_atomic64_fetch_dec)
|
|
return arch_atomic64_fetch_dec(v);
|
|
#elif defined(arch_atomic64_fetch_dec_relaxed)
|
|
s64 ret;
|
|
__atomic_pre_full_fence();
|
|
ret = arch_atomic64_fetch_dec_relaxed(v);
|
|
__atomic_post_full_fence();
|
|
return ret;
|
|
#else
|
|
return raw_atomic64_fetch_sub(1, v);
|
|
#endif
|
|
}
|
|
|
|
static __always_inline s64
|
|
raw_atomic64_fetch_dec_acquire(atomic64_t *v)
|
|
{
|
|
#if defined(arch_atomic64_fetch_dec_acquire)
|
|
return arch_atomic64_fetch_dec_acquire(v);
|
|
#elif defined(arch_atomic64_fetch_dec_relaxed)
|
|
s64 ret = arch_atomic64_fetch_dec_relaxed(v);
|
|
__atomic_acquire_fence();
|
|
return ret;
|
|
#elif defined(arch_atomic64_fetch_dec)
|
|
return arch_atomic64_fetch_dec(v);
|
|
#else
|
|
return raw_atomic64_fetch_sub_acquire(1, v);
|
|
#endif
|
|
}
|
|
|
|
static __always_inline s64
|
|
raw_atomic64_fetch_dec_release(atomic64_t *v)
|
|
{
|
|
#if defined(arch_atomic64_fetch_dec_release)
|
|
return arch_atomic64_fetch_dec_release(v);
|
|
#elif defined(arch_atomic64_fetch_dec_relaxed)
|
|
__atomic_release_fence();
|
|
return arch_atomic64_fetch_dec_relaxed(v);
|
|
#elif defined(arch_atomic64_fetch_dec)
|
|
return arch_atomic64_fetch_dec(v);
|
|
#else
|
|
return raw_atomic64_fetch_sub_release(1, v);
|
|
#endif
|
|
}
|
|
|
|
static __always_inline s64
|
|
raw_atomic64_fetch_dec_relaxed(atomic64_t *v)
|
|
{
|
|
#if defined(arch_atomic64_fetch_dec_relaxed)
|
|
return arch_atomic64_fetch_dec_relaxed(v);
|
|
#elif defined(arch_atomic64_fetch_dec)
|
|
return arch_atomic64_fetch_dec(v);
|
|
#else
|
|
return raw_atomic64_fetch_sub_relaxed(1, v);
|
|
#endif
|
|
}
|
|
|
|
static __always_inline void
|
|
raw_atomic64_and(s64 i, atomic64_t *v)
|
|
{
|
|
arch_atomic64_and(i, v);
|
|
}
|
|
|
|
static __always_inline s64
|
|
raw_atomic64_fetch_and(s64 i, atomic64_t *v)
|
|
{
|
|
#if defined(arch_atomic64_fetch_and)
|
|
return arch_atomic64_fetch_and(i, v);
|
|
#elif defined(arch_atomic64_fetch_and_relaxed)
|
|
s64 ret;
|
|
__atomic_pre_full_fence();
|
|
ret = arch_atomic64_fetch_and_relaxed(i, v);
|
|
__atomic_post_full_fence();
|
|
return ret;
|
|
#else
|
|
#error "Unable to define raw_atomic64_fetch_and"
|
|
#endif
|
|
}
|
|
|
|
static __always_inline s64
|
|
raw_atomic64_fetch_and_acquire(s64 i, atomic64_t *v)
|
|
{
|
|
#if defined(arch_atomic64_fetch_and_acquire)
|
|
return arch_atomic64_fetch_and_acquire(i, v);
|
|
#elif defined(arch_atomic64_fetch_and_relaxed)
|
|
s64 ret = arch_atomic64_fetch_and_relaxed(i, v);
|
|
__atomic_acquire_fence();
|
|
return ret;
|
|
#elif defined(arch_atomic64_fetch_and)
|
|
return arch_atomic64_fetch_and(i, v);
|
|
#else
|
|
#error "Unable to define raw_atomic64_fetch_and_acquire"
|
|
#endif
|
|
}
|
|
|
|
static __always_inline s64
|
|
raw_atomic64_fetch_and_release(s64 i, atomic64_t *v)
|
|
{
|
|
#if defined(arch_atomic64_fetch_and_release)
|
|
return arch_atomic64_fetch_and_release(i, v);
|
|
#elif defined(arch_atomic64_fetch_and_relaxed)
|
|
__atomic_release_fence();
|
|
return arch_atomic64_fetch_and_relaxed(i, v);
|
|
#elif defined(arch_atomic64_fetch_and)
|
|
return arch_atomic64_fetch_and(i, v);
|
|
#else
|
|
#error "Unable to define raw_atomic64_fetch_and_release"
|
|
#endif
|
|
}
|
|
|
|
static __always_inline s64
|
|
raw_atomic64_fetch_and_relaxed(s64 i, atomic64_t *v)
|
|
{
|
|
#if defined(arch_atomic64_fetch_and_relaxed)
|
|
return arch_atomic64_fetch_and_relaxed(i, v);
|
|
#elif defined(arch_atomic64_fetch_and)
|
|
return arch_atomic64_fetch_and(i, v);
|
|
#else
|
|
#error "Unable to define raw_atomic64_fetch_and_relaxed"
|
|
#endif
|
|
}
|
|
|
|
static __always_inline void
|
|
raw_atomic64_andnot(s64 i, atomic64_t *v)
|
|
{
|
|
#if defined(arch_atomic64_andnot)
|
|
arch_atomic64_andnot(i, v);
|
|
#else
|
|
raw_atomic64_and(~i, v);
|
|
#endif
|
|
}
|
|
|
|
static __always_inline s64
|
|
raw_atomic64_fetch_andnot(s64 i, atomic64_t *v)
|
|
{
|
|
#if defined(arch_atomic64_fetch_andnot)
|
|
return arch_atomic64_fetch_andnot(i, v);
|
|
#elif defined(arch_atomic64_fetch_andnot_relaxed)
|
|
s64 ret;
|
|
__atomic_pre_full_fence();
|
|
ret = arch_atomic64_fetch_andnot_relaxed(i, v);
|
|
__atomic_post_full_fence();
|
|
return ret;
|
|
#else
|
|
return raw_atomic64_fetch_and(~i, v);
|
|
#endif
|
|
}
|
|
|
|
static __always_inline s64
|
|
raw_atomic64_fetch_andnot_acquire(s64 i, atomic64_t *v)
|
|
{
|
|
#if defined(arch_atomic64_fetch_andnot_acquire)
|
|
return arch_atomic64_fetch_andnot_acquire(i, v);
|
|
#elif defined(arch_atomic64_fetch_andnot_relaxed)
|
|
s64 ret = arch_atomic64_fetch_andnot_relaxed(i, v);
|
|
__atomic_acquire_fence();
|
|
return ret;
|
|
#elif defined(arch_atomic64_fetch_andnot)
|
|
return arch_atomic64_fetch_andnot(i, v);
|
|
#else
|
|
return raw_atomic64_fetch_and_acquire(~i, v);
|
|
#endif
|
|
}
|
|
|
|
static __always_inline s64
|
|
raw_atomic64_fetch_andnot_release(s64 i, atomic64_t *v)
|
|
{
|
|
#if defined(arch_atomic64_fetch_andnot_release)
|
|
return arch_atomic64_fetch_andnot_release(i, v);
|
|
#elif defined(arch_atomic64_fetch_andnot_relaxed)
|
|
__atomic_release_fence();
|
|
return arch_atomic64_fetch_andnot_relaxed(i, v);
|
|
#elif defined(arch_atomic64_fetch_andnot)
|
|
return arch_atomic64_fetch_andnot(i, v);
|
|
#else
|
|
return raw_atomic64_fetch_and_release(~i, v);
|
|
#endif
|
|
}
|
|
|
|
static __always_inline s64
|
|
raw_atomic64_fetch_andnot_relaxed(s64 i, atomic64_t *v)
|
|
{
|
|
#if defined(arch_atomic64_fetch_andnot_relaxed)
|
|
return arch_atomic64_fetch_andnot_relaxed(i, v);
|
|
#elif defined(arch_atomic64_fetch_andnot)
|
|
return arch_atomic64_fetch_andnot(i, v);
|
|
#else
|
|
return raw_atomic64_fetch_and_relaxed(~i, v);
|
|
#endif
|
|
}
|
|
|
|
static __always_inline void
|
|
raw_atomic64_or(s64 i, atomic64_t *v)
|
|
{
|
|
arch_atomic64_or(i, v);
|
|
}
|
|
|
|
static __always_inline s64
|
|
raw_atomic64_fetch_or(s64 i, atomic64_t *v)
|
|
{
|
|
#if defined(arch_atomic64_fetch_or)
|
|
return arch_atomic64_fetch_or(i, v);
|
|
#elif defined(arch_atomic64_fetch_or_relaxed)
|
|
s64 ret;
|
|
__atomic_pre_full_fence();
|
|
ret = arch_atomic64_fetch_or_relaxed(i, v);
|
|
__atomic_post_full_fence();
|
|
return ret;
|
|
#else
|
|
#error "Unable to define raw_atomic64_fetch_or"
|
|
#endif
|
|
}
|
|
|
|
static __always_inline s64
|
|
raw_atomic64_fetch_or_acquire(s64 i, atomic64_t *v)
|
|
{
|
|
#if defined(arch_atomic64_fetch_or_acquire)
|
|
return arch_atomic64_fetch_or_acquire(i, v);
|
|
#elif defined(arch_atomic64_fetch_or_relaxed)
|
|
s64 ret = arch_atomic64_fetch_or_relaxed(i, v);
|
|
__atomic_acquire_fence();
|
|
return ret;
|
|
#elif defined(arch_atomic64_fetch_or)
|
|
return arch_atomic64_fetch_or(i, v);
|
|
#else
|
|
#error "Unable to define raw_atomic64_fetch_or_acquire"
|
|
#endif
|
|
}
|
|
|
|
static __always_inline s64
|
|
raw_atomic64_fetch_or_release(s64 i, atomic64_t *v)
|
|
{
|
|
#if defined(arch_atomic64_fetch_or_release)
|
|
return arch_atomic64_fetch_or_release(i, v);
|
|
#elif defined(arch_atomic64_fetch_or_relaxed)
|
|
__atomic_release_fence();
|
|
return arch_atomic64_fetch_or_relaxed(i, v);
|
|
#elif defined(arch_atomic64_fetch_or)
|
|
return arch_atomic64_fetch_or(i, v);
|
|
#else
|
|
#error "Unable to define raw_atomic64_fetch_or_release"
|
|
#endif
|
|
}
|
|
|
|
static __always_inline s64
|
|
raw_atomic64_fetch_or_relaxed(s64 i, atomic64_t *v)
|
|
{
|
|
#if defined(arch_atomic64_fetch_or_relaxed)
|
|
return arch_atomic64_fetch_or_relaxed(i, v);
|
|
#elif defined(arch_atomic64_fetch_or)
|
|
return arch_atomic64_fetch_or(i, v);
|
|
#else
|
|
#error "Unable to define raw_atomic64_fetch_or_relaxed"
|
|
#endif
|
|
}
|
|
|
|
static __always_inline void
|
|
raw_atomic64_xor(s64 i, atomic64_t *v)
|
|
{
|
|
arch_atomic64_xor(i, v);
|
|
}
|
|
|
|
static __always_inline s64
|
|
raw_atomic64_fetch_xor(s64 i, atomic64_t *v)
|
|
{
|
|
#if defined(arch_atomic64_fetch_xor)
|
|
return arch_atomic64_fetch_xor(i, v);
|
|
#elif defined(arch_atomic64_fetch_xor_relaxed)
|
|
s64 ret;
|
|
__atomic_pre_full_fence();
|
|
ret = arch_atomic64_fetch_xor_relaxed(i, v);
|
|
__atomic_post_full_fence();
|
|
return ret;
|
|
#else
|
|
#error "Unable to define raw_atomic64_fetch_xor"
|
|
#endif
|
|
}
|
|
|
|
static __always_inline s64
|
|
raw_atomic64_fetch_xor_acquire(s64 i, atomic64_t *v)
|
|
{
|
|
#if defined(arch_atomic64_fetch_xor_acquire)
|
|
return arch_atomic64_fetch_xor_acquire(i, v);
|
|
#elif defined(arch_atomic64_fetch_xor_relaxed)
|
|
s64 ret = arch_atomic64_fetch_xor_relaxed(i, v);
|
|
__atomic_acquire_fence();
|
|
return ret;
|
|
#elif defined(arch_atomic64_fetch_xor)
|
|
return arch_atomic64_fetch_xor(i, v);
|
|
#else
|
|
#error "Unable to define raw_atomic64_fetch_xor_acquire"
|
|
#endif
|
|
}
|
|
|
|
static __always_inline s64
|
|
raw_atomic64_fetch_xor_release(s64 i, atomic64_t *v)
|
|
{
|
|
#if defined(arch_atomic64_fetch_xor_release)
|
|
return arch_atomic64_fetch_xor_release(i, v);
|
|
#elif defined(arch_atomic64_fetch_xor_relaxed)
|
|
__atomic_release_fence();
|
|
return arch_atomic64_fetch_xor_relaxed(i, v);
|
|
#elif defined(arch_atomic64_fetch_xor)
|
|
return arch_atomic64_fetch_xor(i, v);
|
|
#else
|
|
#error "Unable to define raw_atomic64_fetch_xor_release"
|
|
#endif
|
|
}
|
|
|
|
static __always_inline s64
|
|
raw_atomic64_fetch_xor_relaxed(s64 i, atomic64_t *v)
|
|
{
|
|
#if defined(arch_atomic64_fetch_xor_relaxed)
|
|
return arch_atomic64_fetch_xor_relaxed(i, v);
|
|
#elif defined(arch_atomic64_fetch_xor)
|
|
return arch_atomic64_fetch_xor(i, v);
|
|
#else
|
|
#error "Unable to define raw_atomic64_fetch_xor_relaxed"
|
|
#endif
|
|
}
|
|
|
|
static __always_inline s64
|
|
raw_atomic64_xchg(atomic64_t *v, s64 new)
|
|
{
|
|
#if defined(arch_atomic64_xchg)
|
|
return arch_atomic64_xchg(v, new);
|
|
#elif defined(arch_atomic64_xchg_relaxed)
|
|
s64 ret;
|
|
__atomic_pre_full_fence();
|
|
ret = arch_atomic64_xchg_relaxed(v, new);
|
|
__atomic_post_full_fence();
|
|
return ret;
|
|
#else
|
|
return raw_xchg(&v->counter, new);
|
|
#endif
|
|
}
|
|
|
|
static __always_inline s64
|
|
raw_atomic64_xchg_acquire(atomic64_t *v, s64 new)
|
|
{
|
|
#if defined(arch_atomic64_xchg_acquire)
|
|
return arch_atomic64_xchg_acquire(v, new);
|
|
#elif defined(arch_atomic64_xchg_relaxed)
|
|
s64 ret = arch_atomic64_xchg_relaxed(v, new);
|
|
__atomic_acquire_fence();
|
|
return ret;
|
|
#elif defined(arch_atomic64_xchg)
|
|
return arch_atomic64_xchg(v, new);
|
|
#else
|
|
return raw_xchg_acquire(&v->counter, new);
|
|
#endif
|
|
}
|
|
|
|
static __always_inline s64
|
|
raw_atomic64_xchg_release(atomic64_t *v, s64 new)
|
|
{
|
|
#if defined(arch_atomic64_xchg_release)
|
|
return arch_atomic64_xchg_release(v, new);
|
|
#elif defined(arch_atomic64_xchg_relaxed)
|
|
__atomic_release_fence();
|
|
return arch_atomic64_xchg_relaxed(v, new);
|
|
#elif defined(arch_atomic64_xchg)
|
|
return arch_atomic64_xchg(v, new);
|
|
#else
|
|
return raw_xchg_release(&v->counter, new);
|
|
#endif
|
|
}
|
|
|
|
static __always_inline s64
|
|
raw_atomic64_xchg_relaxed(atomic64_t *v, s64 new)
|
|
{
|
|
#if defined(arch_atomic64_xchg_relaxed)
|
|
return arch_atomic64_xchg_relaxed(v, new);
|
|
#elif defined(arch_atomic64_xchg)
|
|
return arch_atomic64_xchg(v, new);
|
|
#else
|
|
return raw_xchg_relaxed(&v->counter, new);
|
|
#endif
|
|
}
|
|
|
|
static __always_inline s64
|
|
raw_atomic64_cmpxchg(atomic64_t *v, s64 old, s64 new)
|
|
{
|
|
#if defined(arch_atomic64_cmpxchg)
|
|
return arch_atomic64_cmpxchg(v, old, new);
|
|
#elif defined(arch_atomic64_cmpxchg_relaxed)
|
|
s64 ret;
|
|
__atomic_pre_full_fence();
|
|
ret = arch_atomic64_cmpxchg_relaxed(v, old, new);
|
|
__atomic_post_full_fence();
|
|
return ret;
|
|
#else
|
|
return raw_cmpxchg(&v->counter, old, new);
|
|
#endif
|
|
}
|
|
|
|
static __always_inline s64
|
|
raw_atomic64_cmpxchg_acquire(atomic64_t *v, s64 old, s64 new)
|
|
{
|
|
#if defined(arch_atomic64_cmpxchg_acquire)
|
|
return arch_atomic64_cmpxchg_acquire(v, old, new);
|
|
#elif defined(arch_atomic64_cmpxchg_relaxed)
|
|
s64 ret = arch_atomic64_cmpxchg_relaxed(v, old, new);
|
|
__atomic_acquire_fence();
|
|
return ret;
|
|
#elif defined(arch_atomic64_cmpxchg)
|
|
return arch_atomic64_cmpxchg(v, old, new);
|
|
#else
|
|
return raw_cmpxchg_acquire(&v->counter, old, new);
|
|
#endif
|
|
}
|
|
|
|
static __always_inline s64
|
|
raw_atomic64_cmpxchg_release(atomic64_t *v, s64 old, s64 new)
|
|
{
|
|
#if defined(arch_atomic64_cmpxchg_release)
|
|
return arch_atomic64_cmpxchg_release(v, old, new);
|
|
#elif defined(arch_atomic64_cmpxchg_relaxed)
|
|
__atomic_release_fence();
|
|
return arch_atomic64_cmpxchg_relaxed(v, old, new);
|
|
#elif defined(arch_atomic64_cmpxchg)
|
|
return arch_atomic64_cmpxchg(v, old, new);
|
|
#else
|
|
return raw_cmpxchg_release(&v->counter, old, new);
|
|
#endif
|
|
}
|
|
|
|
static __always_inline s64
|
|
raw_atomic64_cmpxchg_relaxed(atomic64_t *v, s64 old, s64 new)
|
|
{
|
|
#if defined(arch_atomic64_cmpxchg_relaxed)
|
|
return arch_atomic64_cmpxchg_relaxed(v, old, new);
|
|
#elif defined(arch_atomic64_cmpxchg)
|
|
return arch_atomic64_cmpxchg(v, old, new);
|
|
#else
|
|
return raw_cmpxchg_relaxed(&v->counter, old, new);
|
|
#endif
|
|
}
|
|
|
|
static __always_inline bool
|
|
raw_atomic64_try_cmpxchg(atomic64_t *v, s64 *old, s64 new)
|
|
{
|
|
#if defined(arch_atomic64_try_cmpxchg)
|
|
return arch_atomic64_try_cmpxchg(v, old, new);
|
|
#elif defined(arch_atomic64_try_cmpxchg_relaxed)
|
|
bool ret;
|
|
__atomic_pre_full_fence();
|
|
ret = arch_atomic64_try_cmpxchg_relaxed(v, old, new);
|
|
__atomic_post_full_fence();
|
|
return ret;
|
|
#else
|
|
s64 r, o = *old;
|
|
r = raw_atomic64_cmpxchg(v, o, new);
|
|
if (unlikely(r != o))
|
|
*old = r;
|
|
return likely(r == o);
|
|
#endif
|
|
}
|
|
|
|
static __always_inline bool
|
|
raw_atomic64_try_cmpxchg_acquire(atomic64_t *v, s64 *old, s64 new)
|
|
{
|
|
#if defined(arch_atomic64_try_cmpxchg_acquire)
|
|
return arch_atomic64_try_cmpxchg_acquire(v, old, new);
|
|
#elif defined(arch_atomic64_try_cmpxchg_relaxed)
|
|
bool ret = arch_atomic64_try_cmpxchg_relaxed(v, old, new);
|
|
__atomic_acquire_fence();
|
|
return ret;
|
|
#elif defined(arch_atomic64_try_cmpxchg)
|
|
return arch_atomic64_try_cmpxchg(v, old, new);
|
|
#else
|
|
s64 r, o = *old;
|
|
r = raw_atomic64_cmpxchg_acquire(v, o, new);
|
|
if (unlikely(r != o))
|
|
*old = r;
|
|
return likely(r == o);
|
|
#endif
|
|
}
|
|
|
|
static __always_inline bool
|
|
raw_atomic64_try_cmpxchg_release(atomic64_t *v, s64 *old, s64 new)
|
|
{
|
|
#if defined(arch_atomic64_try_cmpxchg_release)
|
|
return arch_atomic64_try_cmpxchg_release(v, old, new);
|
|
#elif defined(arch_atomic64_try_cmpxchg_relaxed)
|
|
__atomic_release_fence();
|
|
return arch_atomic64_try_cmpxchg_relaxed(v, old, new);
|
|
#elif defined(arch_atomic64_try_cmpxchg)
|
|
return arch_atomic64_try_cmpxchg(v, old, new);
|
|
#else
|
|
s64 r, o = *old;
|
|
r = raw_atomic64_cmpxchg_release(v, o, new);
|
|
if (unlikely(r != o))
|
|
*old = r;
|
|
return likely(r == o);
|
|
#endif
|
|
}
|
|
|
|
static __always_inline bool
|
|
raw_atomic64_try_cmpxchg_relaxed(atomic64_t *v, s64 *old, s64 new)
|
|
{
|
|
#if defined(arch_atomic64_try_cmpxchg_relaxed)
|
|
return arch_atomic64_try_cmpxchg_relaxed(v, old, new);
|
|
#elif defined(arch_atomic64_try_cmpxchg)
|
|
return arch_atomic64_try_cmpxchg(v, old, new);
|
|
#else
|
|
s64 r, o = *old;
|
|
r = raw_atomic64_cmpxchg_relaxed(v, o, new);
|
|
if (unlikely(r != o))
|
|
*old = r;
|
|
return likely(r == o);
|
|
#endif
|
|
}
|
|
|
|
static __always_inline bool
|
|
raw_atomic64_sub_and_test(s64 i, atomic64_t *v)
|
|
{
|
|
#if defined(arch_atomic64_sub_and_test)
|
|
return arch_atomic64_sub_and_test(i, v);
|
|
#else
|
|
return raw_atomic64_sub_return(i, v) == 0;
|
|
#endif
|
|
}
|
|
|
|
static __always_inline bool
|
|
raw_atomic64_dec_and_test(atomic64_t *v)
|
|
{
|
|
#if defined(arch_atomic64_dec_and_test)
|
|
return arch_atomic64_dec_and_test(v);
|
|
#else
|
|
return raw_atomic64_dec_return(v) == 0;
|
|
#endif
|
|
}
|
|
|
|
static __always_inline bool
|
|
raw_atomic64_inc_and_test(atomic64_t *v)
|
|
{
|
|
#if defined(arch_atomic64_inc_and_test)
|
|
return arch_atomic64_inc_and_test(v);
|
|
#else
|
|
return raw_atomic64_inc_return(v) == 0;
|
|
#endif
|
|
}
|
|
|
|
static __always_inline bool
|
|
raw_atomic64_add_negative(s64 i, atomic64_t *v)
|
|
{
|
|
#if defined(arch_atomic64_add_negative)
|
|
return arch_atomic64_add_negative(i, v);
|
|
#elif defined(arch_atomic64_add_negative_relaxed)
|
|
bool ret;
|
|
__atomic_pre_full_fence();
|
|
ret = arch_atomic64_add_negative_relaxed(i, v);
|
|
__atomic_post_full_fence();
|
|
return ret;
|
|
#else
|
|
return raw_atomic64_add_return(i, v) < 0;
|
|
#endif
|
|
}
|
|
|
|
static __always_inline bool
|
|
raw_atomic64_add_negative_acquire(s64 i, atomic64_t *v)
|
|
{
|
|
#if defined(arch_atomic64_add_negative_acquire)
|
|
return arch_atomic64_add_negative_acquire(i, v);
|
|
#elif defined(arch_atomic64_add_negative_relaxed)
|
|
bool ret = arch_atomic64_add_negative_relaxed(i, v);
|
|
__atomic_acquire_fence();
|
|
return ret;
|
|
#elif defined(arch_atomic64_add_negative)
|
|
return arch_atomic64_add_negative(i, v);
|
|
#else
|
|
return raw_atomic64_add_return_acquire(i, v) < 0;
|
|
#endif
|
|
}
|
|
|
|
static __always_inline bool
|
|
raw_atomic64_add_negative_release(s64 i, atomic64_t *v)
|
|
{
|
|
#if defined(arch_atomic64_add_negative_release)
|
|
return arch_atomic64_add_negative_release(i, v);
|
|
#elif defined(arch_atomic64_add_negative_relaxed)
|
|
__atomic_release_fence();
|
|
return arch_atomic64_add_negative_relaxed(i, v);
|
|
#elif defined(arch_atomic64_add_negative)
|
|
return arch_atomic64_add_negative(i, v);
|
|
#else
|
|
return raw_atomic64_add_return_release(i, v) < 0;
|
|
#endif
|
|
}
|
|
|
|
static __always_inline bool
|
|
raw_atomic64_add_negative_relaxed(s64 i, atomic64_t *v)
|
|
{
|
|
#if defined(arch_atomic64_add_negative_relaxed)
|
|
return arch_atomic64_add_negative_relaxed(i, v);
|
|
#elif defined(arch_atomic64_add_negative)
|
|
return arch_atomic64_add_negative(i, v);
|
|
#else
|
|
return raw_atomic64_add_return_relaxed(i, v) < 0;
|
|
#endif
|
|
}
|
|
|
|
static __always_inline s64
|
|
raw_atomic64_fetch_add_unless(atomic64_t *v, s64 a, s64 u)
|
|
{
|
|
#if defined(arch_atomic64_fetch_add_unless)
|
|
return arch_atomic64_fetch_add_unless(v, a, u);
|
|
#else
|
|
s64 c = raw_atomic64_read(v);
|
|
|
|
do {
|
|
if (unlikely(c == u))
|
|
break;
|
|
} while (!raw_atomic64_try_cmpxchg(v, &c, c + a));
|
|
|
|
return c;
|
|
#endif
|
|
}
|
|
|
|
static __always_inline bool
|
|
raw_atomic64_add_unless(atomic64_t *v, s64 a, s64 u)
|
|
{
|
|
#if defined(arch_atomic64_add_unless)
|
|
return arch_atomic64_add_unless(v, a, u);
|
|
#else
|
|
return raw_atomic64_fetch_add_unless(v, a, u) != u;
|
|
#endif
|
|
}
|
|
|
|
static __always_inline bool
|
|
raw_atomic64_inc_not_zero(atomic64_t *v)
|
|
{
|
|
#if defined(arch_atomic64_inc_not_zero)
|
|
return arch_atomic64_inc_not_zero(v);
|
|
#else
|
|
return raw_atomic64_add_unless(v, 1, 0);
|
|
#endif
|
|
}
|
|
|
|
static __always_inline bool
|
|
raw_atomic64_inc_unless_negative(atomic64_t *v)
|
|
{
|
|
#if defined(arch_atomic64_inc_unless_negative)
|
|
return arch_atomic64_inc_unless_negative(v);
|
|
#else
|
|
s64 c = raw_atomic64_read(v);
|
|
|
|
do {
|
|
if (unlikely(c < 0))
|
|
return false;
|
|
} while (!raw_atomic64_try_cmpxchg(v, &c, c + 1));
|
|
|
|
return true;
|
|
#endif
|
|
}
|
|
|
|
static __always_inline bool
|
|
raw_atomic64_dec_unless_positive(atomic64_t *v)
|
|
{
|
|
#if defined(arch_atomic64_dec_unless_positive)
|
|
return arch_atomic64_dec_unless_positive(v);
|
|
#else
|
|
s64 c = raw_atomic64_read(v);
|
|
|
|
do {
|
|
if (unlikely(c > 0))
|
|
return false;
|
|
} while (!raw_atomic64_try_cmpxchg(v, &c, c - 1));
|
|
|
|
return true;
|
|
#endif
|
|
}
|
|
|
|
static __always_inline s64
|
|
raw_atomic64_dec_if_positive(atomic64_t *v)
|
|
{
|
|
#if defined(arch_atomic64_dec_if_positive)
|
|
return arch_atomic64_dec_if_positive(v);
|
|
#else
|
|
s64 dec, c = raw_atomic64_read(v);
|
|
|
|
do {
|
|
dec = c - 1;
|
|
if (unlikely(dec < 0))
|
|
break;
|
|
} while (!raw_atomic64_try_cmpxchg(v, &c, dec));
|
|
|
|
return dec;
|
|
#endif
|
|
}
|
|
|
|
#endif /* _LINUX_ATOMIC_FALLBACK_H */
|
|
// 205e090382132f1fc85e48b46e722865f9c81309
|