mirror of
https://github.com/torvalds/linux.git
synced 2024-11-12 07:01:57 +00:00
sched: Move up affinity check to mitigate useless redoing overhead
Currently, LBF_ALL_PINNED is cleared after affinity check is passed. So, if task migration is skipped by small load value or small imbalance value in move_tasks(), we don't clear LBF_ALL_PINNED. At last, we trigger 'redo' in load_balance(). Imbalance value is often so small that any tasks cannot be moved to other cpus and, of course, this situation may be continued after we change the target cpu. So this patch move up affinity check code and clear LBF_ALL_PINNED before evaluating load value in order to mitigate useless redoing overhead. In addition, re-order some comments correctly. Signed-off-by: Joonsoo Kim <iamjoonsoo.kim@lge.com> Acked-by: Peter Zijlstra <a.p.zijlstra@chello.nl> Tested-by: Jason Low <jason.low2@hp.com> Cc: Srivatsa Vaddagiri <vatsa@linux.vnet.ibm.com> Cc: Davidlohr Bueso <davidlohr.bueso@hp.com> Cc: Peter Zijlstra <peterz@infradead.org> Link: http://lkml.kernel.org/r/1366705662-3587-5-git-send-email-iamjoonsoo.kim@lge.com Signed-off-by: Ingo Molnar <mingo@kernel.org>
This commit is contained in:
parent
cfc0311804
commit
d31980846f
@ -3896,10 +3896,14 @@ int can_migrate_task(struct task_struct *p, struct lb_env *env)
|
|||||||
int tsk_cache_hot = 0;
|
int tsk_cache_hot = 0;
|
||||||
/*
|
/*
|
||||||
* We do not migrate tasks that are:
|
* We do not migrate tasks that are:
|
||||||
* 1) running (obviously), or
|
* 1) throttled_lb_pair, or
|
||||||
* 2) cannot be migrated to this CPU due to cpus_allowed, or
|
* 2) cannot be migrated to this CPU due to cpus_allowed, or
|
||||||
* 3) are cache-hot on their current CPU.
|
* 3) running (obviously), or
|
||||||
|
* 4) are cache-hot on their current CPU.
|
||||||
*/
|
*/
|
||||||
|
if (throttled_lb_pair(task_group(p), env->src_cpu, env->dst_cpu))
|
||||||
|
return 0;
|
||||||
|
|
||||||
if (!cpumask_test_cpu(env->dst_cpu, tsk_cpus_allowed(p))) {
|
if (!cpumask_test_cpu(env->dst_cpu, tsk_cpus_allowed(p))) {
|
||||||
int new_dst_cpu;
|
int new_dst_cpu;
|
||||||
|
|
||||||
@ -3967,9 +3971,6 @@ static int move_one_task(struct lb_env *env)
|
|||||||
struct task_struct *p, *n;
|
struct task_struct *p, *n;
|
||||||
|
|
||||||
list_for_each_entry_safe(p, n, &env->src_rq->cfs_tasks, se.group_node) {
|
list_for_each_entry_safe(p, n, &env->src_rq->cfs_tasks, se.group_node) {
|
||||||
if (throttled_lb_pair(task_group(p), env->src_rq->cpu, env->dst_cpu))
|
|
||||||
continue;
|
|
||||||
|
|
||||||
if (!can_migrate_task(p, env))
|
if (!can_migrate_task(p, env))
|
||||||
continue;
|
continue;
|
||||||
|
|
||||||
@ -4021,7 +4022,7 @@ static int move_tasks(struct lb_env *env)
|
|||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (throttled_lb_pair(task_group(p), env->src_cpu, env->dst_cpu))
|
if (!can_migrate_task(p, env))
|
||||||
goto next;
|
goto next;
|
||||||
|
|
||||||
load = task_h_load(p);
|
load = task_h_load(p);
|
||||||
@ -4032,9 +4033,6 @@ static int move_tasks(struct lb_env *env)
|
|||||||
if ((load / 2) > env->imbalance)
|
if ((load / 2) > env->imbalance)
|
||||||
goto next;
|
goto next;
|
||||||
|
|
||||||
if (!can_migrate_task(p, env))
|
|
||||||
goto next;
|
|
||||||
|
|
||||||
move_task(p, env);
|
move_task(p, env);
|
||||||
pulled++;
|
pulled++;
|
||||||
env->imbalance -= load;
|
env->imbalance -= load;
|
||||||
|
Loading…
Reference in New Issue
Block a user