forked from Minki/linux
6262db7c08
There is an ordering issue with spin_unlock_wait() on powerpc, because
the spin_lock primitive is an ACQUIRE and an ACQUIRE is only ordering
the load part of the operation with memory operations following it.
Therefore the following event sequence can happen:
CPU 1 CPU 2 CPU 3
================== ==================== ==============
spin_unlock(&lock);
spin_lock(&lock):
r1 = *lock; // r1 == 0;
o = object; o = READ_ONCE(object); // reordered here
object = NULL;
smp_mb();
spin_unlock_wait(&lock);
*lock = 1;
smp_mb();
o->dead = true; < o = READ_ONCE(object); > // reordered upwards
if (o) // true
BUG_ON(o->dead); // true!!
To fix this, we add a "nop" ll/sc loop in arch_spin_unlock_wait() on
ppc, the "nop" ll/sc loop reads the lock
value and writes it back atomically, in this way it will synchronize the
view of the lock on CPU1 with that on CPU2. Therefore in the scenario
above, either CPU2 will fail to get the lock at first or CPU1 will see
the lock acquired by CPU2, both cases will eliminate this bug. This is a
similar idea as what Will Deacon did for ARM64 in:
d86b8da04d
("arm64: spinlock: serialise spin_unlock_wait against concurrent lockers")
Furthermore, if the "nop" ll/sc figures out the lock is locked, we
actually don't need to do the "nop" ll/sc trick again, we can just do a
normal load+check loop for the lock to be released, because in that
case, spin_unlock_wait() is called when someone is holding the lock, and
the store part of the "nop" ll/sc happens before the lock release of the
current lock holder:
"nop" ll/sc -> spin_unlock()
and the lock release happens before the next lock acquisition:
spin_unlock() -> spin_lock() <next holder>
which means the "nop" ll/sc happens before the next lock acquisition:
"nop" ll/sc -> spin_unlock() -> spin_lock() <next holder>
With a smp_mb() preceding spin_unlock_wait(), the store of object is
guaranteed to be observed by the next lock holder:
STORE -> smp_mb() -> "nop" ll/sc
-> spin_unlock() -> spin_lock() <next holder>
This patch therefore fixes the issue and also cleans the
arch_spin_unlock_wait() a little bit by removing superfluous memory
barriers in loops and consolidating the implementations for PPC32 and
PPC64 into one.
Suggested-by: "Paul E. McKenney" <paulmck@linux.vnet.ibm.com>
Signed-off-by: Boqun Feng <boqun.feng@gmail.com>
Reviewed-by: "Paul E. McKenney" <paulmck@linux.vnet.ibm.com>
[mpe: Inline the "nop" ll/sc loop and set EH=0, munge change log]
Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
71 lines
2.0 KiB
C
71 lines
2.0 KiB
C
/*
|
|
* Spin and read/write lock operations.
|
|
*
|
|
* Copyright (C) 2001-2004 Paul Mackerras <paulus@au.ibm.com>, IBM
|
|
* Copyright (C) 2001 Anton Blanchard <anton@au.ibm.com>, IBM
|
|
* Copyright (C) 2002 Dave Engebretsen <engebret@us.ibm.com>, IBM
|
|
* Rework to support virtual processors
|
|
*
|
|
* This program is free software; you can redistribute it and/or
|
|
* modify it under the terms of the GNU General Public License
|
|
* as published by the Free Software Foundation; either version
|
|
* 2 of the License, or (at your option) any later version.
|
|
*/
|
|
|
|
#include <linux/kernel.h>
|
|
#include <linux/spinlock.h>
|
|
#include <linux/export.h>
|
|
#include <linux/stringify.h>
|
|
#include <linux/smp.h>
|
|
|
|
/* waiting for a spinlock... */
|
|
#if defined(CONFIG_PPC_SPLPAR)
|
|
#include <asm/hvcall.h>
|
|
#include <asm/smp.h>
|
|
|
|
void __spin_yield(arch_spinlock_t *lock)
|
|
{
|
|
unsigned int lock_value, holder_cpu, yield_count;
|
|
|
|
lock_value = lock->slock;
|
|
if (lock_value == 0)
|
|
return;
|
|
holder_cpu = lock_value & 0xffff;
|
|
BUG_ON(holder_cpu >= NR_CPUS);
|
|
yield_count = be32_to_cpu(lppaca_of(holder_cpu).yield_count);
|
|
if ((yield_count & 1) == 0)
|
|
return; /* virtual cpu is currently running */
|
|
rmb();
|
|
if (lock->slock != lock_value)
|
|
return; /* something has changed */
|
|
plpar_hcall_norets(H_CONFER,
|
|
get_hard_smp_processor_id(holder_cpu), yield_count);
|
|
}
|
|
EXPORT_SYMBOL_GPL(__spin_yield);
|
|
|
|
/*
|
|
* Waiting for a read lock or a write lock on a rwlock...
|
|
* This turns out to be the same for read and write locks, since
|
|
* we only know the holder if it is write-locked.
|
|
*/
|
|
void __rw_yield(arch_rwlock_t *rw)
|
|
{
|
|
int lock_value;
|
|
unsigned int holder_cpu, yield_count;
|
|
|
|
lock_value = rw->lock;
|
|
if (lock_value >= 0)
|
|
return; /* no write lock at present */
|
|
holder_cpu = lock_value & 0xffff;
|
|
BUG_ON(holder_cpu >= NR_CPUS);
|
|
yield_count = be32_to_cpu(lppaca_of(holder_cpu).yield_count);
|
|
if ((yield_count & 1) == 0)
|
|
return; /* virtual cpu is currently running */
|
|
rmb();
|
|
if (rw->lock != lock_value)
|
|
return; /* something has changed */
|
|
plpar_hcall_norets(H_CONFER,
|
|
get_hard_smp_processor_id(holder_cpu), yield_count);
|
|
}
|
|
#endif
|