locking/rwsem: Simplify the is-owner-spinnable checks
Add the trivial owner_on_cpu() helper for rwsem_can_spin_on_owner() and rwsem_spin_on_owner(), it also allows to make rwsem_can_spin_on_owner() a bit more clear. Signed-off-by: Oleg Nesterov <oleg@redhat.com> Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org> Acked-by: Waiman Long <longman@redhat.com> Cc: Amir Goldstein <amir73il@gmail.com> Cc: Davidlohr Bueso <dave@stgolabs.net> Cc: Jan Kara <jack@suse.cz> Cc: Linus Torvalds <torvalds@linux-foundation.org> Cc: Matthew Wilcox <willy@infradead.org> Cc: Peter Zijlstra <peterz@infradead.org> Cc: Theodore Y. Ts'o <tytso@mit.edu> Cc: Thomas Gleixner <tglx@linutronix.de> Link: http://lkml.kernel.org/r/20180518165534.GA22348@redhat.com Signed-off-by: Ingo Molnar <mingo@kernel.org>
This commit is contained in:
parent
675c00c332
commit
1b22fc609c
@ -347,6 +347,15 @@ static inline bool rwsem_try_write_lock_unqueued(struct rw_semaphore *sem)
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static inline bool owner_on_cpu(struct task_struct *owner)
|
||||||
|
{
|
||||||
|
/*
|
||||||
|
* As lock holder preemption issue, we both skip spinning if
|
||||||
|
* task is not on cpu or its cpu is preempted
|
||||||
|
*/
|
||||||
|
return owner->on_cpu && !vcpu_is_preempted(task_cpu(owner));
|
||||||
|
}
|
||||||
|
|
||||||
static inline bool rwsem_can_spin_on_owner(struct rw_semaphore *sem)
|
static inline bool rwsem_can_spin_on_owner(struct rw_semaphore *sem)
|
||||||
{
|
{
|
||||||
struct task_struct *owner;
|
struct task_struct *owner;
|
||||||
@ -359,17 +368,10 @@ static inline bool rwsem_can_spin_on_owner(struct rw_semaphore *sem)
|
|||||||
|
|
||||||
rcu_read_lock();
|
rcu_read_lock();
|
||||||
owner = READ_ONCE(sem->owner);
|
owner = READ_ONCE(sem->owner);
|
||||||
if (!owner || !is_rwsem_owner_spinnable(owner)) {
|
if (owner) {
|
||||||
ret = !owner; /* !owner is spinnable */
|
ret = is_rwsem_owner_spinnable(owner) &&
|
||||||
goto done;
|
owner_on_cpu(owner);
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
|
||||||
* As lock holder preemption issue, we both skip spinning if task is not
|
|
||||||
* on cpu or its cpu is preempted
|
|
||||||
*/
|
|
||||||
ret = owner->on_cpu && !vcpu_is_preempted(task_cpu(owner));
|
|
||||||
done:
|
|
||||||
rcu_read_unlock();
|
rcu_read_unlock();
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
@ -398,8 +400,7 @@ static noinline bool rwsem_spin_on_owner(struct rw_semaphore *sem)
|
|||||||
* abort spinning when need_resched or owner is not running or
|
* abort spinning when need_resched or owner is not running or
|
||||||
* owner's cpu is preempted.
|
* owner's cpu is preempted.
|
||||||
*/
|
*/
|
||||||
if (!owner->on_cpu || need_resched() ||
|
if (need_resched() || !owner_on_cpu(owner)) {
|
||||||
vcpu_is_preempted(task_cpu(owner))) {
|
|
||||||
rcu_read_unlock();
|
rcu_read_unlock();
|
||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
|
Loading…
Reference in New Issue
Block a user