sched/fair: Remove double_lock_balance() from active_load_balance_cpu_stop()

Avoid double_rq_lock() and use the TASK_ON_RQ_MIGRATING state for
active_load_balance_cpu_stop(). The advantage is (obviously) not
holding two 'rq->lock's at the same time and thereby increasing
parallelism.

Further note that if there was no task to migrate we will not
have acquired the second rq->lock at all.

The important point to note is that because we acquire dst->lock
immediately after releasing src->lock the potential wait time of
task_rq_lock() callers on TASK_ON_RQ_MIGRATING is not longer
than it would have been in the double rq lock scenario.

Signed-off-by: Kirill Tkhai <ktkhai@parallels.com>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Paul Turner <pjt@google.com>
Cc: Oleg Nesterov <oleg@redhat.com>
Cc: Steven Rostedt <rostedt@goodmis.org>
Cc: Mike Galbraith <umgwanakikbuti@gmail.com>
Cc: Kirill Tkhai <tkhai@yandex.ru>
Cc: Tim Chen <tim.c.chen@linux.intel.com>
Cc: Nicolas Pitre <nicolas.pitre@linaro.org>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Link: http://lkml.kernel.org/r/1408528081.23412.92.camel@tkhai
Signed-off-by: Ingo Molnar <mingo@kernel.org>
This commit is contained in:
Kirill Tkhai 2014-08-20 13:48:01 +04:00 committed by Ingo Molnar
parent a1e0182979
commit e5673f2805

View File

@ -5138,6 +5138,8 @@ static int task_hot(struct task_struct *p, struct lb_env *env)
{
s64 delta;
lockdep_assert_held(&env->src_rq->lock);
if (p->sched_class != &fair_sched_class)
return 0;
@ -5257,6 +5259,9 @@ static
int can_migrate_task(struct task_struct *p, struct lb_env *env)
{
int tsk_cache_hot = 0;
lockdep_assert_held(&env->src_rq->lock);
/*
* We do not migrate tasks that are:
* 1) throttled_lb_pair, or
@ -5341,30 +5346,49 @@ int can_migrate_task(struct task_struct *p, struct lb_env *env)
}
/*
* move_one_task tries to move exactly one task from busiest to this_rq, as
* detach_one_task() -- tries to dequeue exactly one task from env->src_rq, as
* part of active balancing operations within "domain".
* Returns 1 if successful and 0 otherwise.
*
* Called with both runqueues locked.
* Returns a task if successful and NULL otherwise.
*/
static int move_one_task(struct lb_env *env)
static struct task_struct *detach_one_task(struct lb_env *env)
{
struct task_struct *p, *n;
lockdep_assert_held(&env->src_rq->lock);
list_for_each_entry_safe(p, n, &env->src_rq->cfs_tasks, se.group_node) {
if (!can_migrate_task(p, env))
continue;
move_task(p, env);
deactivate_task(env->src_rq, p, 0);
p->on_rq = TASK_ON_RQ_MIGRATING;
set_task_cpu(p, env->dst_cpu);
/*
* Right now, this is only the second place move_task()
* is called, so we can safely collect move_task()
* stats here rather than inside move_task().
* Right now, this is only the second place where
* lb_gained[env->idle] is updated (other is move_tasks)
* so we can safely collect stats here rather than
* inside move_tasks().
*/
schedstat_inc(env->sd, lb_gained[env->idle]);
return 1;
return p;
}
return 0;
return NULL;
}
/*
* attach_one_task() -- attaches the task returned from detach_one_task() to
* its new rq.
*/
static void attach_one_task(struct rq *rq, struct task_struct *p)
{
raw_spin_lock(&rq->lock);
BUG_ON(task_rq(p) != rq);
p->on_rq = TASK_ON_RQ_QUEUED;
activate_task(rq, p, 0);
check_preempt_curr(rq, p, 0);
raw_spin_unlock(&rq->lock);
}
static const unsigned int sched_nr_migrate_break = 32;
@ -6943,6 +6967,7 @@ static int active_load_balance_cpu_stop(void *data)
int target_cpu = busiest_rq->push_cpu;
struct rq *target_rq = cpu_rq(target_cpu);
struct sched_domain *sd;
struct task_struct *p = NULL;
raw_spin_lock_irq(&busiest_rq->lock);
@ -6962,9 +6987,6 @@ static int active_load_balance_cpu_stop(void *data)
*/
BUG_ON(busiest_rq == target_rq);
/* move a task from busiest_rq to target_rq */
double_lock_balance(busiest_rq, target_rq);
/* Search for an sd spanning us and the target CPU. */
rcu_read_lock();
for_each_domain(target_cpu, sd) {
@ -6985,16 +7007,22 @@ static int active_load_balance_cpu_stop(void *data)
schedstat_inc(sd, alb_count);
if (move_one_task(&env))
p = detach_one_task(&env);
if (p)
schedstat_inc(sd, alb_pushed);
else
schedstat_inc(sd, alb_failed);
}
rcu_read_unlock();
double_unlock_balance(busiest_rq, target_rq);
out_unlock:
busiest_rq->active_balance = 0;
raw_spin_unlock_irq(&busiest_rq->lock);
raw_spin_unlock(&busiest_rq->lock);
if (p)
attach_one_task(target_rq, p);
local_irq_enable();
return 0;
}