mirror of
https://github.com/torvalds/linux.git
synced 2024-11-19 02:21:47 +00:00
locking/atomics: Instrument xchg()
While we instrument all of the (non-relaxed) atomic_*() functions and cmpxchg(), we missed xchg(). Let's add instrumentation for xchg(), fixing up x86 to implement arch_xchg(). Signed-off-by: Mark Rutland <mark.rutland@arm.com> Acked-by: Peter Zijlstra (Intel) <peterz@infradead.org> Acked-by: Will Deacon <will.deacon@arm.com> Cc: Boqun Feng <boqun.feng@gmail.com> Cc: Dmitry Vyukov <dvyukov@google.com> Cc: Linus Torvalds <torvalds@linux-foundation.org> Cc: Peter Zijlstra <peterz@infradead.org> Cc: Thomas Gleixner <tglx@linutronix.de> Cc: andy.shevchenko@gmail.com Cc: arnd@arndb.de Cc: aryabinin@virtuozzo.com Cc: catalin.marinas@arm.com Cc: glider@google.com Cc: linux-arm-kernel@lists.infradead.org Cc: parri.andrea@gmail.com Cc: peter@hurleysoftware.com Link: http://lkml.kernel.org/r/20180716113017.3909-5-mark.rutland@arm.com Signed-off-by: Ingo Molnar <mingo@kernel.org>
This commit is contained in:
parent
df79ed2c06
commit
f9881cc43b
@ -202,7 +202,7 @@ static __always_inline bool arch_atomic_try_cmpxchg(atomic_t *v, int *old, int n
|
||||
|
||||
static inline int arch_atomic_xchg(atomic_t *v, int new)
|
||||
{
|
||||
return xchg(&v->counter, new);
|
||||
return arch_xchg(&v->counter, new);
|
||||
}
|
||||
|
||||
static inline void arch_atomic_and(int i, atomic_t *v)
|
||||
|
@ -188,7 +188,7 @@ static __always_inline bool arch_atomic64_try_cmpxchg(atomic64_t *v, s64 *old, l
|
||||
|
||||
static inline long arch_atomic64_xchg(atomic64_t *v, long new)
|
||||
{
|
||||
return xchg(&v->counter, new);
|
||||
return arch_xchg(&v->counter, new);
|
||||
}
|
||||
|
||||
static inline void arch_atomic64_and(long i, atomic64_t *v)
|
||||
|
@ -75,7 +75,7 @@ extern void __add_wrong_size(void)
|
||||
* use "asm volatile" and "memory" clobbers to prevent gcc from moving
|
||||
* information around.
|
||||
*/
|
||||
#define xchg(ptr, v) __xchg_op((ptr), (v), xchg, "")
|
||||
#define arch_xchg(ptr, v) __xchg_op((ptr), (v), xchg, "")
|
||||
|
||||
/*
|
||||
* Atomic compare and exchange. Compare OLD with MEM, if identical,
|
||||
|
@ -408,6 +408,13 @@ static __always_inline bool atomic64_add_negative(s64 i, atomic64_t *v)
|
||||
}
|
||||
#endif
|
||||
|
||||
#define xchg(ptr, new) \
|
||||
({ \
|
||||
typeof(ptr) __ai_ptr = (ptr); \
|
||||
kasan_check_write(__ai_ptr, sizeof(*__ai_ptr)); \
|
||||
arch_xchg(__ai_ptr, (new)); \
|
||||
})
|
||||
|
||||
#define cmpxchg(ptr, old, new) \
|
||||
({ \
|
||||
typeof(ptr) __ai_ptr = (ptr); \
|
||||
|
Loading…
Reference in New Issue
Block a user