forked from Minki/linux
[PATCH] sched: add debugging
These conditions should now be impossible, and we need to fix them if they happen. Signed-off-by: Nick Piggin <nickpiggin@yahoo.com.au> Signed-off-by: Andrew Morton <akpm@osdl.org> Signed-off-by: Linus Torvalds <torvalds@osdl.org>
This commit is contained in:
parent
3950745131
commit
db935dbd43
@ -1942,15 +1942,7 @@ static int load_balance(int this_cpu, runqueue_t *this_rq,
|
||||
goto out_balanced;
|
||||
}
|
||||
|
||||
/*
|
||||
* This should be "impossible", but since load
|
||||
* balancing is inherently racy and statistical,
|
||||
* it could happen in theory.
|
||||
*/
|
||||
if (unlikely(busiest == this_rq)) {
|
||||
WARN_ON(1);
|
||||
goto out_balanced;
|
||||
}
|
||||
BUG_ON(busiest == this_rq);
|
||||
|
||||
schedstat_add(sd, lb_imbalance[idle], imbalance);
|
||||
|
||||
@ -2052,11 +2044,13 @@ static int load_balance_newidle(int this_cpu, runqueue_t *this_rq,
|
||||
}
|
||||
|
||||
busiest = find_busiest_queue(group);
|
||||
if (!busiest || busiest == this_rq) {
|
||||
if (!busiest) {
|
||||
schedstat_inc(sd, lb_nobusyq[NEWLY_IDLE]);
|
||||
goto out_balanced;
|
||||
}
|
||||
|
||||
BUG_ON(busiest == this_rq);
|
||||
|
||||
/* Attempt to move tasks */
|
||||
double_lock_balance(this_rq, busiest);
|
||||
|
||||
|
Loading…
Reference in New Issue
Block a user