forked from Minki/linux
rtmutex: Simplify and document try_to_take_rtmutex()
The current implementation of try_to_take_rtmutex() is correct, but requires more than a single brain twist to understand the clever encoded conditionals. Untangle it and document the cases proper. Looks less efficient at the first glance, but actually reduces the binary code size on x8664 by 80 bytes. Signed-off-by: Thomas Gleixner <tglx@linutronix.de> Reviewed-by: Steven Rostedt <rostedt@goodmis.org>
This commit is contained in:
parent
88f2b4c15e
commit
358c331f39
@ -533,76 +533,119 @@ static int rt_mutex_adjust_prio_chain(struct task_struct *task,
|
||||
*
|
||||
* Must be called with lock->wait_lock held.
|
||||
*
|
||||
* @lock: the lock to be acquired.
|
||||
* @task: the task which wants to acquire the lock
|
||||
* @waiter: the waiter that is queued to the lock's wait list. (could be NULL)
|
||||
* @lock: The lock to be acquired.
|
||||
* @task: The task which wants to acquire the lock
|
||||
* @waiter: The waiter that is queued to the lock's wait list if the
|
||||
* callsite called task_blocked_on_lock(), otherwise NULL
|
||||
*/
|
||||
static int try_to_take_rt_mutex(struct rt_mutex *lock, struct task_struct *task,
|
||||
struct rt_mutex_waiter *waiter)
|
||||
struct rt_mutex_waiter *waiter)
|
||||
{
|
||||
unsigned long flags;
|
||||
|
||||
/*
|
||||
* We have to be careful here if the atomic speedups are
|
||||
* enabled, such that, when
|
||||
* - no other waiter is on the lock
|
||||
* - the lock has been released since we did the cmpxchg
|
||||
* the lock can be released or taken while we are doing the
|
||||
* checks and marking the lock with RT_MUTEX_HAS_WAITERS.
|
||||
* Before testing whether we can acquire @lock, we set the
|
||||
* RT_MUTEX_HAS_WAITERS bit in @lock->owner. This forces all
|
||||
* other tasks which try to modify @lock into the slow path
|
||||
* and they serialize on @lock->wait_lock.
|
||||
*
|
||||
* The atomic acquire/release aware variant of
|
||||
* mark_rt_mutex_waiters uses a cmpxchg loop. After setting
|
||||
* the WAITERS bit, the atomic release / acquire can not
|
||||
* happen anymore and lock->wait_lock protects us from the
|
||||
* non-atomic case.
|
||||
* The RT_MUTEX_HAS_WAITERS bit can have a transitional state
|
||||
* as explained at the top of this file if and only if:
|
||||
*
|
||||
* Note, that this might set lock->owner =
|
||||
* RT_MUTEX_HAS_WAITERS in the case the lock is not contended
|
||||
* any more. This is fixed up when we take the ownership.
|
||||
* This is the transitional state explained at the top of this file.
|
||||
* - There is a lock owner. The caller must fixup the
|
||||
* transient state if it does a trylock or leaves the lock
|
||||
* function due to a signal or timeout.
|
||||
*
|
||||
* - @task acquires the lock and there are no other
|
||||
* waiters. This is undone in rt_mutex_set_owner(@task) at
|
||||
* the end of this function.
|
||||
*/
|
||||
mark_rt_mutex_waiters(lock);
|
||||
|
||||
/*
|
||||
* If @lock has an owner, give up.
|
||||
*/
|
||||
if (rt_mutex_owner(lock))
|
||||
return 0;
|
||||
|
||||
/*
|
||||
* It will get the lock because of one of these conditions:
|
||||
* 1) there is no waiter
|
||||
* 2) higher priority than waiters
|
||||
* 3) it is top waiter
|
||||
* If @waiter != NULL, @task has already enqueued the waiter
|
||||
* into @lock waiter list. If @waiter == NULL then this is a
|
||||
* trylock attempt.
|
||||
*/
|
||||
if (rt_mutex_has_waiters(lock)) {
|
||||
if (task->prio >= rt_mutex_top_waiter(lock)->prio) {
|
||||
if (!waiter || waiter != rt_mutex_top_waiter(lock))
|
||||
return 0;
|
||||
}
|
||||
}
|
||||
|
||||
if (waiter || rt_mutex_has_waiters(lock)) {
|
||||
unsigned long flags;
|
||||
struct rt_mutex_waiter *top;
|
||||
|
||||
raw_spin_lock_irqsave(&task->pi_lock, flags);
|
||||
|
||||
/* remove the queued waiter. */
|
||||
if (waiter) {
|
||||
rt_mutex_dequeue(lock, waiter);
|
||||
task->pi_blocked_on = NULL;
|
||||
}
|
||||
if (waiter) {
|
||||
/*
|
||||
* If waiter is not the highest priority waiter of
|
||||
* @lock, give up.
|
||||
*/
|
||||
if (waiter != rt_mutex_top_waiter(lock))
|
||||
return 0;
|
||||
|
||||
/*
|
||||
* We have to enqueue the top waiter(if it exists) into
|
||||
* task->pi_waiters list.
|
||||
* We can acquire the lock. Remove the waiter from the
|
||||
* lock waiters list.
|
||||
*/
|
||||
rt_mutex_dequeue(lock, waiter);
|
||||
|
||||
} else {
|
||||
/*
|
||||
* If the lock has waiters already we check whether @task is
|
||||
* eligible to take over the lock.
|
||||
*
|
||||
* If there are no other waiters, @task can acquire
|
||||
* the lock. @task->pi_blocked_on is NULL, so it does
|
||||
* not need to be dequeued.
|
||||
*/
|
||||
if (rt_mutex_has_waiters(lock)) {
|
||||
top = rt_mutex_top_waiter(lock);
|
||||
rt_mutex_enqueue_pi(task, top);
|
||||
/*
|
||||
* If @task->prio is greater than or equal to
|
||||
* the top waiter priority (kernel view),
|
||||
* @task lost.
|
||||
*/
|
||||
if (task->prio >= rt_mutex_top_waiter(lock)->prio)
|
||||
return 0;
|
||||
|
||||
/*
|
||||
* The current top waiter stays enqueued. We
|
||||
* don't have to change anything in the lock
|
||||
* waiters order.
|
||||
*/
|
||||
} else {
|
||||
/*
|
||||
* No waiters. Take the lock without the
|
||||
* pi_lock dance.@task->pi_blocked_on is NULL
|
||||
* and we have no waiters to enqueue in @task
|
||||
* pi waiters list.
|
||||
*/
|
||||
goto takeit;
|
||||
}
|
||||
raw_spin_unlock_irqrestore(&task->pi_lock, flags);
|
||||
}
|
||||
|
||||
/*
|
||||
* Clear @task->pi_blocked_on. Requires protection by
|
||||
* @task->pi_lock. Redundant operation for the @waiter == NULL
|
||||
* case, but conditionals are more expensive than a redundant
|
||||
* store.
|
||||
*/
|
||||
raw_spin_lock_irqsave(&task->pi_lock, flags);
|
||||
task->pi_blocked_on = NULL;
|
||||
/*
|
||||
* Finish the lock acquisition. @task is the new owner. If
|
||||
* other waiters exist we have to insert the highest priority
|
||||
* waiter into @task->pi_waiters list.
|
||||
*/
|
||||
if (rt_mutex_has_waiters(lock))
|
||||
rt_mutex_enqueue_pi(task, rt_mutex_top_waiter(lock));
|
||||
raw_spin_unlock_irqrestore(&task->pi_lock, flags);
|
||||
|
||||
takeit:
|
||||
/* We got the lock. */
|
||||
debug_rt_mutex_lock(lock);
|
||||
|
||||
/*
|
||||
* This either preserves the RT_MUTEX_HAS_WAITERS bit if there
|
||||
* are still waiters or clears it.
|
||||
*/
|
||||
rt_mutex_set_owner(lock, task);
|
||||
|
||||
rt_mutex_deadlock_account_lock(lock, task);
|
||||
|
Loading…
Reference in New Issue
Block a user