forked from Minki/linux
powerpc/spinlock: Define smp_mb__after_spinlock only once
Instead of both queued and simple spinlocks doing it. Move it into the arch's spinlock.h. Signed-off-by: Davidlohr Bueso <dbueso@suse.de> Signed-off-by: Michael Ellerman <mpe@ellerman.id.au> Link: https://lore.kernel.org/r/20210309015950.27688-2-dave@stgolabs.net
This commit is contained in:
parent
93c043e393
commit
2bf3604c41
@ -44,8 +44,6 @@ static __always_inline void queued_spin_lock(struct qspinlock *lock)
|
||||
}
|
||||
#define queued_spin_lock queued_spin_lock
|
||||
|
||||
#define smp_mb__after_spinlock() smp_mb()
|
||||
|
||||
static __always_inline int queued_spin_is_locked(struct qspinlock *lock)
|
||||
{
|
||||
/*
|
||||
|
@ -282,7 +282,4 @@ static inline void arch_write_unlock(arch_rwlock_t *rw)
|
||||
#define arch_read_relax(lock) rw_yield(lock)
|
||||
#define arch_write_relax(lock) rw_yield(lock)
|
||||
|
||||
/* See include/linux/spinlock.h */
|
||||
#define smp_mb__after_spinlock() smp_mb()
|
||||
|
||||
#endif /* _ASM_POWERPC_SIMPLE_SPINLOCK_H */
|
||||
|
@ -10,6 +10,9 @@
|
||||
#include <asm/simple_spinlock.h>
|
||||
#endif
|
||||
|
||||
/* See include/linux/spinlock.h */
|
||||
#define smp_mb__after_spinlock() smp_mb()
|
||||
|
||||
#ifndef CONFIG_PARAVIRT_SPINLOCKS
|
||||
static inline void pv_spinlocks_init(void) { }
|
||||
#endif
|
||||
|
Loading…
Reference in New Issue
Block a user