cpu/hotplug: Split out the state walk into functions

We need that for running callbacks on the AP and the BP.

Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Cc: linux-arch@vger.kernel.org
Cc: Rik van Riel <riel@redhat.com>
Cc: Rafael Wysocki <rafael.j.wysocki@intel.com>
Cc: "Srivatsa S. Bhat" <srivatsa@mit.edu>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Arjan van de Ven <arjan@linux.intel.com>
Cc: Sebastian Siewior <bigeasy@linutronix.de>
Cc: Rusty Russell <rusty@rustcorp.com.au>
Cc: Steven Rostedt <rostedt@goodmis.org>
Cc: Oleg Nesterov <oleg@redhat.com>
Cc: Tejun Heo <tj@kernel.org>
Cc: Andrew Morton <akpm@linux-foundation.org>
Cc: Paul McKenney <paulmck@linux.vnet.ibm.com>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Paul Turner <pjt@google.com>
Link: http://lkml.kernel.org/r/20160226182341.374946234@linutronix.de
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
This commit is contained in:
Thomas Gleixner 2016-02-26 18:43:37 +00:00
parent 931ef16330
commit 2e1a3483ce

View File

@ -329,10 +329,74 @@ static int bringup_cpu(unsigned int cpu)
return 0; return 0;
} }
/*
* Hotplug state machine related functions
*/
static void undo_cpu_down(unsigned int cpu, struct cpuhp_cpu_state *st,
struct cpuhp_step *steps)
{
for (st->state++; st->state < st->target; st->state++) {
struct cpuhp_step *step = steps + st->state;
if (!step->skip_onerr)
cpuhp_invoke_callback(cpu, st->state, step->startup);
}
}
static int cpuhp_down_callbacks(unsigned int cpu, struct cpuhp_cpu_state *st,
struct cpuhp_step *steps, enum cpuhp_state target)
{
enum cpuhp_state prev_state = st->state;
int ret = 0;
for (; st->state > target; st->state--) {
struct cpuhp_step *step = steps + st->state;
ret = cpuhp_invoke_callback(cpu, st->state, step->teardown);
if (ret) {
st->target = prev_state;
undo_cpu_down(cpu, st, steps);
break;
}
}
return ret;
}
static void undo_cpu_up(unsigned int cpu, struct cpuhp_cpu_state *st,
struct cpuhp_step *steps)
{
for (st->state--; st->state > st->target; st->state--) {
struct cpuhp_step *step = steps + st->state;
if (!step->skip_onerr)
cpuhp_invoke_callback(cpu, st->state, step->teardown);
}
}
static int cpuhp_up_callbacks(unsigned int cpu, struct cpuhp_cpu_state *st,
struct cpuhp_step *steps, enum cpuhp_state target)
{
enum cpuhp_state prev_state = st->state;
int ret = 0;
while (st->state < target) {
struct cpuhp_step *step;
st->state++;
step = steps + st->state;
ret = cpuhp_invoke_callback(cpu, st->state, step->startup);
if (ret) {
st->target = prev_state;
undo_cpu_up(cpu, st, steps);
break;
}
}
return ret;
}
#ifdef CONFIG_HOTPLUG_CPU #ifdef CONFIG_HOTPLUG_CPU
EXPORT_SYMBOL(register_cpu_notifier); EXPORT_SYMBOL(register_cpu_notifier);
EXPORT_SYMBOL(__register_cpu_notifier); EXPORT_SYMBOL(__register_cpu_notifier);
void unregister_cpu_notifier(struct notifier_block *nb) void unregister_cpu_notifier(struct notifier_block *nb)
{ {
cpu_maps_update_begin(); cpu_maps_update_begin();
@ -537,15 +601,6 @@ static int notify_dead(unsigned int cpu)
#endif #endif
#ifdef CONFIG_HOTPLUG_CPU #ifdef CONFIG_HOTPLUG_CPU
static void undo_cpu_down(unsigned int cpu, struct cpuhp_cpu_state *st)
{
for (st->state++; st->state < st->target; st->state++) {
struct cpuhp_step *step = cpuhp_bp_states + st->state;
if (!step->skip_onerr)
cpuhp_invoke_callback(cpu, st->state, step->startup);
}
}
/* Requires cpu_add_remove_lock to be held */ /* Requires cpu_add_remove_lock to be held */
static int __ref _cpu_down(unsigned int cpu, int tasks_frozen, static int __ref _cpu_down(unsigned int cpu, int tasks_frozen,
@ -567,16 +622,8 @@ static int __ref _cpu_down(unsigned int cpu, int tasks_frozen,
prev_state = st->state; prev_state = st->state;
st->target = target; st->target = target;
for (; st->state > st->target; st->state--) { ret = cpuhp_down_callbacks(cpu, st, cpuhp_bp_states, target);
struct cpuhp_step *step = cpuhp_bp_states + st->state;
ret = cpuhp_invoke_callback(cpu, st->state, step->teardown);
if (ret) {
st->target = prev_state;
undo_cpu_down(cpu, st);
break;
}
}
hasdied = prev_state != st->state && st->state == CPUHP_OFFLINE; hasdied = prev_state != st->state && st->state == CPUHP_OFFLINE;
cpu_hotplug_done(); cpu_hotplug_done();
@ -645,22 +692,12 @@ static int cpuhp_set_cpu_active(unsigned int cpu)
return 0; return 0;
} }
static void undo_cpu_up(unsigned int cpu, struct cpuhp_cpu_state *st)
{
for (st->state--; st->state > st->target; st->state--) {
struct cpuhp_step *step = cpuhp_bp_states + st->state;
if (!step->skip_onerr)
cpuhp_invoke_callback(cpu, st->state, step->teardown);
}
}
/* Requires cpu_add_remove_lock to be held */ /* Requires cpu_add_remove_lock to be held */
static int _cpu_up(unsigned int cpu, int tasks_frozen, enum cpuhp_state target) static int _cpu_up(unsigned int cpu, int tasks_frozen, enum cpuhp_state target)
{ {
struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, cpu); struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, cpu);
struct task_struct *idle; struct task_struct *idle;
int prev_state, ret = 0; int ret = 0;
cpu_hotplug_begin(); cpu_hotplug_begin();
@ -687,20 +724,8 @@ static int _cpu_up(unsigned int cpu, int tasks_frozen, enum cpuhp_state target)
cpuhp_tasks_frozen = tasks_frozen; cpuhp_tasks_frozen = tasks_frozen;
prev_state = st->state;
st->target = target; st->target = target;
while (st->state < st->target) { ret = cpuhp_up_callbacks(cpu, st, cpuhp_bp_states, target);
struct cpuhp_step *step;
st->state++;
step = cpuhp_bp_states + st->state;
ret = cpuhp_invoke_callback(cpu, st->state, step->startup);
if (ret) {
st->target = prev_state;
undo_cpu_up(cpu, st);
break;
}
}
out: out:
cpu_hotplug_done(); cpu_hotplug_done();
return ret; return ret;