locking/rtmutex: Provide rt_mutex_slowlock_locked()
Split the inner workings of rt_mutex_slowlock() out into a separate function, which can be reused by the upcoming RT lock substitutions, e.g. for rw_semaphores. Signed-off-by: Thomas Gleixner <tglx@linutronix.de> Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org> Signed-off-by: Ingo Molnar <mingo@kernel.org> Link: https://lore.kernel.org/r/20210815211302.841971086@linutronix.de
This commit is contained in:
committed by
Ingo Molnar
parent
830e6acc8a
commit
ebbdc41e90
@@ -1106,7 +1106,7 @@ static void __sched remove_waiter(struct rt_mutex_base *lock,
|
|||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* __rt_mutex_slowlock() - Perform the wait-wake-try-to-take loop
|
* rt_mutex_slowlock_block() - Perform the wait-wake-try-to-take loop
|
||||||
* @lock: the rt_mutex to take
|
* @lock: the rt_mutex to take
|
||||||
* @state: the state the task should block in (TASK_INTERRUPTIBLE
|
* @state: the state the task should block in (TASK_INTERRUPTIBLE
|
||||||
* or TASK_UNINTERRUPTIBLE)
|
* or TASK_UNINTERRUPTIBLE)
|
||||||
@@ -1115,7 +1115,7 @@ static void __sched remove_waiter(struct rt_mutex_base *lock,
|
|||||||
*
|
*
|
||||||
* Must be called with lock->wait_lock held and interrupts disabled
|
* Must be called with lock->wait_lock held and interrupts disabled
|
||||||
*/
|
*/
|
||||||
static int __sched __rt_mutex_slowlock(struct rt_mutex_base *lock,
|
static int __sched rt_mutex_slowlock_block(struct rt_mutex_base *lock,
|
||||||
unsigned int state,
|
unsigned int state,
|
||||||
struct hrtimer_sleeper *timeout,
|
struct hrtimer_sleeper *timeout,
|
||||||
struct rt_mutex_waiter *waiter)
|
struct rt_mutex_waiter *waiter)
|
||||||
@@ -1168,20 +1168,72 @@ static void __sched rt_mutex_handle_deadlock(int res, int detect_deadlock,
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/**
|
||||||
* Slow path lock function:
|
* __rt_mutex_slowlock - Locking slowpath invoked with lock::wait_lock held
|
||||||
|
* @lock: The rtmutex to block lock
|
||||||
|
* @state: The task state for sleeping
|
||||||
|
* @chwalk: Indicator whether full or partial chainwalk is requested
|
||||||
|
* @waiter: Initializer waiter for blocking
|
||||||
*/
|
*/
|
||||||
static int __sched rt_mutex_slowlock(struct rt_mutex_base *lock,
|
static int __sched __rt_mutex_slowlock(struct rt_mutex_base *lock,
|
||||||
unsigned int state,
|
unsigned int state,
|
||||||
struct hrtimer_sleeper *timeout,
|
enum rtmutex_chainwalk chwalk,
|
||||||
enum rtmutex_chainwalk chwalk)
|
struct rt_mutex_waiter *waiter)
|
||||||
|
{
|
||||||
|
int ret;
|
||||||
|
|
||||||
|
lockdep_assert_held(&lock->wait_lock);
|
||||||
|
|
||||||
|
/* Try to acquire the lock again: */
|
||||||
|
if (try_to_take_rt_mutex(lock, current, NULL))
|
||||||
|
return 0;
|
||||||
|
|
||||||
|
set_current_state(state);
|
||||||
|
|
||||||
|
ret = task_blocks_on_rt_mutex(lock, waiter, current, chwalk);
|
||||||
|
|
||||||
|
if (likely(!ret))
|
||||||
|
ret = rt_mutex_slowlock_block(lock, state, NULL, waiter);
|
||||||
|
|
||||||
|
if (unlikely(ret)) {
|
||||||
|
__set_current_state(TASK_RUNNING);
|
||||||
|
remove_waiter(lock, waiter);
|
||||||
|
rt_mutex_handle_deadlock(ret, chwalk, waiter);
|
||||||
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
* try_to_take_rt_mutex() sets the waiter bit
|
||||||
|
* unconditionally. We might have to fix that up.
|
||||||
|
*/
|
||||||
|
fixup_rt_mutex_waiters(lock);
|
||||||
|
return ret;
|
||||||
|
}
|
||||||
|
|
||||||
|
static inline int __rt_mutex_slowlock_locked(struct rt_mutex_base *lock,
|
||||||
|
unsigned int state)
|
||||||
{
|
{
|
||||||
struct rt_mutex_waiter waiter;
|
struct rt_mutex_waiter waiter;
|
||||||
unsigned long flags;
|
int ret;
|
||||||
int ret = 0;
|
|
||||||
|
|
||||||
rt_mutex_init_waiter(&waiter);
|
rt_mutex_init_waiter(&waiter);
|
||||||
|
|
||||||
|
ret = __rt_mutex_slowlock(lock, state, RT_MUTEX_MIN_CHAINWALK, &waiter);
|
||||||
|
|
||||||
|
debug_rt_mutex_free_waiter(&waiter);
|
||||||
|
return ret;
|
||||||
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
* rt_mutex_slowlock - Locking slowpath invoked when fast path fails
|
||||||
|
* @lock: The rtmutex to block lock
|
||||||
|
* @state: The task state for sleeping
|
||||||
|
*/
|
||||||
|
static int __sched rt_mutex_slowlock(struct rt_mutex_base *lock,
|
||||||
|
unsigned int state)
|
||||||
|
{
|
||||||
|
unsigned long flags;
|
||||||
|
int ret;
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Technically we could use raw_spin_[un]lock_irq() here, but this can
|
* Technically we could use raw_spin_[un]lock_irq() here, but this can
|
||||||
* be called in early boot if the cmpxchg() fast path is disabled
|
* be called in early boot if the cmpxchg() fast path is disabled
|
||||||
@@ -1191,44 +1243,8 @@ static int __sched rt_mutex_slowlock(struct rt_mutex_base *lock,
|
|||||||
* irqsave/restore variants.
|
* irqsave/restore variants.
|
||||||
*/
|
*/
|
||||||
raw_spin_lock_irqsave(&lock->wait_lock, flags);
|
raw_spin_lock_irqsave(&lock->wait_lock, flags);
|
||||||
|
ret = __rt_mutex_slowlock_locked(lock, state);
|
||||||
/* Try to acquire the lock again: */
|
|
||||||
if (try_to_take_rt_mutex(lock, current, NULL)) {
|
|
||||||
raw_spin_unlock_irqrestore(&lock->wait_lock, flags);
|
raw_spin_unlock_irqrestore(&lock->wait_lock, flags);
|
||||||
return 0;
|
|
||||||
}
|
|
||||||
|
|
||||||
set_current_state(state);
|
|
||||||
|
|
||||||
/* Setup the timer, when timeout != NULL */
|
|
||||||
if (unlikely(timeout))
|
|
||||||
hrtimer_start_expires(&timeout->timer, HRTIMER_MODE_ABS);
|
|
||||||
|
|
||||||
ret = task_blocks_on_rt_mutex(lock, &waiter, current, chwalk);
|
|
||||||
|
|
||||||
if (likely(!ret))
|
|
||||||
/* sleep on the mutex */
|
|
||||||
ret = __rt_mutex_slowlock(lock, state, timeout, &waiter);
|
|
||||||
|
|
||||||
if (unlikely(ret)) {
|
|
||||||
__set_current_state(TASK_RUNNING);
|
|
||||||
remove_waiter(lock, &waiter);
|
|
||||||
rt_mutex_handle_deadlock(ret, chwalk, &waiter);
|
|
||||||
}
|
|
||||||
|
|
||||||
/*
|
|
||||||
* try_to_take_rt_mutex() sets the waiter bit
|
|
||||||
* unconditionally. We might have to fix that up.
|
|
||||||
*/
|
|
||||||
fixup_rt_mutex_waiters(lock);
|
|
||||||
|
|
||||||
raw_spin_unlock_irqrestore(&lock->wait_lock, flags);
|
|
||||||
|
|
||||||
/* Remove pending timer: */
|
|
||||||
if (unlikely(timeout))
|
|
||||||
hrtimer_cancel(&timeout->timer);
|
|
||||||
|
|
||||||
debug_rt_mutex_free_waiter(&waiter);
|
|
||||||
|
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
@@ -1239,7 +1255,7 @@ static __always_inline int __rt_mutex_lock(struct rt_mutex_base *lock,
|
|||||||
if (likely(rt_mutex_cmpxchg_acquire(lock, NULL, current)))
|
if (likely(rt_mutex_cmpxchg_acquire(lock, NULL, current)))
|
||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
return rt_mutex_slowlock(lock, state, NULL, RT_MUTEX_MIN_CHAINWALK);
|
return rt_mutex_slowlock(lock, state);
|
||||||
}
|
}
|
||||||
|
|
||||||
static int __sched __rt_mutex_slowtrylock(struct rt_mutex_base *lock)
|
static int __sched __rt_mutex_slowtrylock(struct rt_mutex_base *lock)
|
||||||
|
|||||||
@@ -342,7 +342,7 @@ int __sched rt_mutex_wait_proxy_lock(struct rt_mutex_base *lock,
|
|||||||
raw_spin_lock_irq(&lock->wait_lock);
|
raw_spin_lock_irq(&lock->wait_lock);
|
||||||
/* sleep on the mutex */
|
/* sleep on the mutex */
|
||||||
set_current_state(TASK_INTERRUPTIBLE);
|
set_current_state(TASK_INTERRUPTIBLE);
|
||||||
ret = __rt_mutex_slowlock(lock, TASK_INTERRUPTIBLE, to, waiter);
|
ret = rt_mutex_slowlock_block(lock, TASK_INTERRUPTIBLE, to, waiter);
|
||||||
/*
|
/*
|
||||||
* try_to_take_rt_mutex() sets the waiter bit unconditionally. We might
|
* try_to_take_rt_mutex() sets the waiter bit unconditionally. We might
|
||||||
* have to fix that up.
|
* have to fix that up.
|
||||||
|
|||||||
Reference in New Issue
Block a user