mirror of
https://github.com/torvalds/linux.git
synced 2024-12-25 12:21:37 +00:00
hrtimer: raise softirq unlocked to avoid circular lock dependency
The scheduler hrtimer bits in 2.6.25 introduced a circular lock dependency in a rare code path: ======================================================= [ INFO: possible circular locking dependency detected ] 2.6.25-sched-devel.git-x86-latest.git #19 ------------------------------------------------------- X/2980 is trying to acquire lock: (&rq->rq_lock_key#2){++..}, at: [<ffffffff80230146>] task_rq_lock+0x56/0xa0 but task is already holding lock: (&cpu_base->lock){++..}, at: [<ffffffff80257ae1>] lock_hrtimer_base+0x31/0x60 which lock already depends on the new lock. The scenario which leads to this is: posix-timer signal is delivered -> posix-timer is rearmed timer is already expired in hrtimer_enqueue() -> softirq is raised To prevent this we need to move the raise of the softirq out of the base->lock protected code path. Signed-off-by: Thomas Gleixner <tglx@linutronix.de> Cc: stable@kernel.org Acked-by: Peter Zijlstra <a.p.zijlstra@chello.nl>
This commit is contained in:
parent
e31a94ed37
commit
0c96c5979a
@ -590,7 +590,6 @@ static inline int hrtimer_enqueue_reprogram(struct hrtimer *timer,
|
|||||||
list_add_tail(&timer->cb_entry,
|
list_add_tail(&timer->cb_entry,
|
||||||
&base->cpu_base->cb_pending);
|
&base->cpu_base->cb_pending);
|
||||||
timer->state = HRTIMER_STATE_PENDING;
|
timer->state = HRTIMER_STATE_PENDING;
|
||||||
raise_softirq(HRTIMER_SOFTIRQ);
|
|
||||||
return 1;
|
return 1;
|
||||||
default:
|
default:
|
||||||
BUG();
|
BUG();
|
||||||
@ -633,6 +632,11 @@ static int hrtimer_switch_to_hres(void)
|
|||||||
return 1;
|
return 1;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static inline void hrtimer_raise_softirq(void)
|
||||||
|
{
|
||||||
|
raise_softirq(HRTIMER_SOFTIRQ);
|
||||||
|
}
|
||||||
|
|
||||||
#else
|
#else
|
||||||
|
|
||||||
static inline int hrtimer_hres_active(void) { return 0; }
|
static inline int hrtimer_hres_active(void) { return 0; }
|
||||||
@ -651,6 +655,7 @@ static inline int hrtimer_reprogram(struct hrtimer *timer,
|
|||||||
{
|
{
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
static inline void hrtimer_raise_softirq(void) { }
|
||||||
|
|
||||||
#endif /* CONFIG_HIGH_RES_TIMERS */
|
#endif /* CONFIG_HIGH_RES_TIMERS */
|
||||||
|
|
||||||
@ -850,7 +855,7 @@ hrtimer_start(struct hrtimer *timer, ktime_t tim, const enum hrtimer_mode mode)
|
|||||||
{
|
{
|
||||||
struct hrtimer_clock_base *base, *new_base;
|
struct hrtimer_clock_base *base, *new_base;
|
||||||
unsigned long flags;
|
unsigned long flags;
|
||||||
int ret;
|
int ret, raise;
|
||||||
|
|
||||||
base = lock_hrtimer_base(timer, &flags);
|
base = lock_hrtimer_base(timer, &flags);
|
||||||
|
|
||||||
@ -884,8 +889,18 @@ hrtimer_start(struct hrtimer *timer, ktime_t tim, const enum hrtimer_mode mode)
|
|||||||
enqueue_hrtimer(timer, new_base,
|
enqueue_hrtimer(timer, new_base,
|
||||||
new_base->cpu_base == &__get_cpu_var(hrtimer_bases));
|
new_base->cpu_base == &__get_cpu_var(hrtimer_bases));
|
||||||
|
|
||||||
|
/*
|
||||||
|
* The timer may be expired and moved to the cb_pending
|
||||||
|
* list. We can not raise the softirq with base lock held due
|
||||||
|
* to a possible deadlock with runqueue lock.
|
||||||
|
*/
|
||||||
|
raise = timer->state == HRTIMER_STATE_PENDING;
|
||||||
|
|
||||||
unlock_hrtimer_base(timer, &flags);
|
unlock_hrtimer_base(timer, &flags);
|
||||||
|
|
||||||
|
if (raise)
|
||||||
|
hrtimer_raise_softirq();
|
||||||
|
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
EXPORT_SYMBOL_GPL(hrtimer_start);
|
EXPORT_SYMBOL_GPL(hrtimer_start);
|
||||||
|
Loading…
Reference in New Issue
Block a user