mirror of
https://github.com/torvalds/linux.git
synced 2024-12-31 23:31:29 +00:00
locking/qspinlock: Re-order code
Flip the branch condition after atomic_fetch_or_acquire(_Q_PENDING_VAL) such that we loose the indent. This also result in a more natural code flow IMO. Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org> Acked-by: Will Deacon <will.deacon@arm.com> Cc: Linus Torvalds <torvalds@linux-foundation.org> Cc: Peter Zijlstra <peterz@infradead.org> Cc: Thomas Gleixner <tglx@linutronix.de> Cc: andrea.parri@amarulasolutions.com Cc: longman@redhat.com Link: https://lkml.kernel.org/r/20181003130257.156322446@infradead.org Signed-off-by: Ingo Molnar <mingo@kernel.org>
This commit is contained in:
parent
ec57e2f0ac
commit
53bf57fab7
@ -330,39 +330,37 @@ void queued_spin_lock_slowpath(struct qspinlock *lock, u32 val)
|
||||
* 0,0,1 -> 0,1,1 ; pending
|
||||
*/
|
||||
val = atomic_fetch_or_acquire(_Q_PENDING_VAL, &lock->val);
|
||||
if (!(val & ~_Q_LOCKED_MASK)) {
|
||||
/*
|
||||
* We're pending, wait for the owner to go away.
|
||||
*
|
||||
* *,1,1 -> *,1,0
|
||||
*
|
||||
* this wait loop must be a load-acquire such that we match the
|
||||
* store-release that clears the locked bit and create lock
|
||||
* sequentiality; this is because not all
|
||||
* clear_pending_set_locked() implementations imply full
|
||||
* barriers.
|
||||
*/
|
||||
if (val & _Q_LOCKED_MASK) {
|
||||
atomic_cond_read_acquire(&lock->val,
|
||||
!(VAL & _Q_LOCKED_MASK));
|
||||
}
|
||||
|
||||
/*
|
||||
* take ownership and clear the pending bit.
|
||||
*
|
||||
* *,1,0 -> *,0,1
|
||||
*/
|
||||
clear_pending_set_locked(lock);
|
||||
qstat_inc(qstat_lock_pending, true);
|
||||
return;
|
||||
/*
|
||||
* If we observe any contention; undo and queue.
|
||||
*/
|
||||
if (unlikely(val & ~_Q_LOCKED_MASK)) {
|
||||
if (!(val & _Q_PENDING_MASK))
|
||||
clear_pending(lock);
|
||||
goto queue;
|
||||
}
|
||||
|
||||
/*
|
||||
* If pending was clear but there are waiters in the queue, then
|
||||
* we need to undo our setting of pending before we queue ourselves.
|
||||
* We're pending, wait for the owner to go away.
|
||||
*
|
||||
* 0,1,1 -> 0,1,0
|
||||
*
|
||||
* this wait loop must be a load-acquire such that we match the
|
||||
* store-release that clears the locked bit and create lock
|
||||
* sequentiality; this is because not all
|
||||
* clear_pending_set_locked() implementations imply full
|
||||
* barriers.
|
||||
*/
|
||||
if (!(val & _Q_PENDING_MASK))
|
||||
clear_pending(lock);
|
||||
if (val & _Q_LOCKED_MASK)
|
||||
atomic_cond_read_acquire(&lock->val, !(VAL & _Q_LOCKED_MASK));
|
||||
|
||||
/*
|
||||
* take ownership and clear the pending bit.
|
||||
*
|
||||
* 0,1,0 -> 0,0,1
|
||||
*/
|
||||
clear_pending_set_locked(lock);
|
||||
qstat_inc(qstat_lock_pending, true);
|
||||
return;
|
||||
|
||||
/*
|
||||
* End of pending bit optimistic spinning and beginning of MCS
|
||||
|
Loading…
Reference in New Issue
Block a user