forked from Minki/linux
rwsem: skip initial trylock in rwsem_down_write_failed
We can skip the initial trylock in rwsem_down_write_failed() if there are known active lockers already, thus saving one likely-to-fail cmpxchg. Signed-off-by: Michel Lespinasse <walken@google.com> Reviewed-by: Peter Hurley <peter@hurleysoftware.com> Acked-by: Davidlohr Bueso <davidlohr.bueso@hp.com> Acked-by: Rik van Riel <riel@redhat.com> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
This commit is contained in:
parent
a7d2c573ae
commit
9b0fc9c09f
17
lib/rwsem.c
17
lib/rwsem.c
@ -216,14 +216,15 @@ struct rw_semaphore __sched *rwsem_down_write_failed(struct rw_semaphore *sem)
|
||||
/* wait until we successfully acquire the lock */
|
||||
set_task_state(tsk, TASK_UNINTERRUPTIBLE);
|
||||
while (true) {
|
||||
|
||||
/* Try acquiring the write lock. */
|
||||
count = RWSEM_ACTIVE_WRITE_BIAS;
|
||||
if (!list_is_singular(&sem->wait_list))
|
||||
count += RWSEM_WAITING_BIAS;
|
||||
if (cmpxchg(&sem->count, RWSEM_WAITING_BIAS, count) ==
|
||||
if (!(count & RWSEM_ACTIVE_MASK)) {
|
||||
/* Try acquiring the write lock. */
|
||||
count = RWSEM_ACTIVE_WRITE_BIAS;
|
||||
if (!list_is_singular(&sem->wait_list))
|
||||
count += RWSEM_WAITING_BIAS;
|
||||
if (cmpxchg(&sem->count, RWSEM_WAITING_BIAS, count) ==
|
||||
RWSEM_WAITING_BIAS)
|
||||
break;
|
||||
break;
|
||||
}
|
||||
|
||||
raw_spin_unlock_irq(&sem->wait_lock);
|
||||
|
||||
@ -231,7 +232,7 @@ struct rw_semaphore __sched *rwsem_down_write_failed(struct rw_semaphore *sem)
|
||||
do {
|
||||
schedule();
|
||||
set_task_state(tsk, TASK_UNINTERRUPTIBLE);
|
||||
} while (sem->count & RWSEM_ACTIVE_MASK);
|
||||
} while ((count = sem->count) & RWSEM_ACTIVE_MASK);
|
||||
|
||||
raw_spin_lock_irq(&sem->wait_lock);
|
||||
}
|
||||
|
Loading…
Reference in New Issue
Block a user