We'd like all architectures to convert to ARCH_ATOMIC, as once all architectures are converted it will be possible to make significant cleanups to the atomics headers, and this will make it much easier to generically enable atomic functionality (e.g. debug logic in the instrumented wrappers). As a step towards that, this patch migrates alpha to ARCH_ATOMIC. The arch code provides arch_{atomic,atomic64,xchg,cmpxchg}*(), and common code wraps these with optional instrumentation to provide the regular functions. Note: xchg_local() is NOT currently part of the generic atomic arch_atomic API, and is not instrumented. Signed-off-by: Mark Rutland <mark.rutland@arm.com> Cc: Boqun Feng <boqun.feng@gmail.com> Cc: Ivan Kokshaysky <ink@jurassic.park.msu.ru> Cc: Matt Turner <mattst88@gmail.com> Cc: Peter Zijlstra <peterz@infradead.org> Cc: Richard Henderson <rth@twiddle.net> Cc: Will Deacon <will@kernel.org> Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org> Link: https://lore.kernel.org/r/20210525140232.53872-14-mark.rutland@arm.com
77 lines
1.9 KiB
C
77 lines
1.9 KiB
C
/* SPDX-License-Identifier: GPL-2.0 */
|
|
#ifndef _ALPHA_CMPXCHG_H
|
|
#define _ALPHA_CMPXCHG_H
|
|
|
|
/*
|
|
* Atomic exchange routines.
|
|
*/
|
|
|
|
#define ____xchg(type, args...) __xchg ## type ## _local(args)
|
|
#define ____cmpxchg(type, args...) __cmpxchg ## type ## _local(args)
|
|
#include <asm/xchg.h>
|
|
|
|
#define xchg_local(ptr, x) \
|
|
({ \
|
|
__typeof__(*(ptr)) _x_ = (x); \
|
|
(__typeof__(*(ptr))) __xchg_local((ptr), (unsigned long)_x_, \
|
|
sizeof(*(ptr))); \
|
|
})
|
|
|
|
#define arch_cmpxchg_local(ptr, o, n) \
|
|
({ \
|
|
__typeof__(*(ptr)) _o_ = (o); \
|
|
__typeof__(*(ptr)) _n_ = (n); \
|
|
(__typeof__(*(ptr))) __cmpxchg_local((ptr), (unsigned long)_o_, \
|
|
(unsigned long)_n_, \
|
|
sizeof(*(ptr))); \
|
|
})
|
|
|
|
#define arch_cmpxchg64_local(ptr, o, n) \
|
|
({ \
|
|
BUILD_BUG_ON(sizeof(*(ptr)) != 8); \
|
|
cmpxchg_local((ptr), (o), (n)); \
|
|
})
|
|
|
|
#undef ____xchg
|
|
#undef ____cmpxchg
|
|
#define ____xchg(type, args...) __xchg ##type(args)
|
|
#define ____cmpxchg(type, args...) __cmpxchg ##type(args)
|
|
#include <asm/xchg.h>
|
|
|
|
/*
|
|
* The leading and the trailing memory barriers guarantee that these
|
|
* operations are fully ordered.
|
|
*/
|
|
#define arch_xchg(ptr, x) \
|
|
({ \
|
|
__typeof__(*(ptr)) __ret; \
|
|
__typeof__(*(ptr)) _x_ = (x); \
|
|
smp_mb(); \
|
|
__ret = (__typeof__(*(ptr))) \
|
|
__xchg((ptr), (unsigned long)_x_, sizeof(*(ptr))); \
|
|
smp_mb(); \
|
|
__ret; \
|
|
})
|
|
|
|
#define arch_cmpxchg(ptr, o, n) \
|
|
({ \
|
|
__typeof__(*(ptr)) __ret; \
|
|
__typeof__(*(ptr)) _o_ = (o); \
|
|
__typeof__(*(ptr)) _n_ = (n); \
|
|
smp_mb(); \
|
|
__ret = (__typeof__(*(ptr))) __cmpxchg((ptr), \
|
|
(unsigned long)_o_, (unsigned long)_n_, sizeof(*(ptr)));\
|
|
smp_mb(); \
|
|
__ret; \
|
|
})
|
|
|
|
#define arch_cmpxchg64(ptr, o, n) \
|
|
({ \
|
|
BUILD_BUG_ON(sizeof(*(ptr)) != 8); \
|
|
arch_cmpxchg((ptr), (o), (n)); \
|
|
})
|
|
|
|
#undef ____cmpxchg
|
|
|
|
#endif /* _ALPHA_CMPXCHG_H */
|