arm64: fix a migrating irq bug when hotplug cpu
When cpu is disabled, all irqs will be migratged to another cpu. In some cases, a new affinity is different, the old affinity need to be updated and if irq_set_affinity's return value is IRQ_SET_MASK_OK_DONE, the old affinity can not be updated. Fix it by using irq_do_set_affinity. And migrating interrupts is a core code matter, so use the generic function irq_migrate_all_off_this_cpu() to migrate interrupts in kernel/irq/migration.c. Cc: Jiang Liu <jiang.liu@linux.intel.com> Cc: Thomas Gleixner <tglx@linutronix.de> Cc: Mark Rutland <mark.rutland@arm.com> Cc: Will Deacon <will.deacon@arm.com> Cc: Russell King - ARM Linux <linux@arm.linux.org.uk> Cc: Hanjun Guo <hanjun.guo@linaro.org> Acked-by: Marc Zyngier <marc.zyngier@arm.com> Signed-off-by: Yang Yingliang <yangyingliang@huawei.com> Signed-off-by: Catalin Marinas <catalin.marinas@arm.com>
This commit is contained in:
parent
a78afccbba
commit
217d453d47
@ -427,6 +427,7 @@ config NR_CPUS
|
|||||||
|
|
||||||
config HOTPLUG_CPU
|
config HOTPLUG_CPU
|
||||||
bool "Support for hot-pluggable CPUs"
|
bool "Support for hot-pluggable CPUs"
|
||||||
|
select GENERIC_IRQ_MIGRATION
|
||||||
help
|
help
|
||||||
Say Y here to experiment with turning CPUs off and on. CPUs
|
Say Y here to experiment with turning CPUs off and on. CPUs
|
||||||
can be controlled through /sys/devices/system/cpu.
|
can be controlled through /sys/devices/system/cpu.
|
||||||
|
@ -7,7 +7,6 @@
|
|||||||
|
|
||||||
struct pt_regs;
|
struct pt_regs;
|
||||||
|
|
||||||
extern void migrate_irqs(void);
|
|
||||||
extern void set_handle_irq(void (*handle_irq)(struct pt_regs *));
|
extern void set_handle_irq(void (*handle_irq)(struct pt_regs *));
|
||||||
|
|
||||||
static inline void acpi_irq_init(void)
|
static inline void acpi_irq_init(void)
|
||||||
|
@ -27,7 +27,6 @@
|
|||||||
#include <linux/init.h>
|
#include <linux/init.h>
|
||||||
#include <linux/irqchip.h>
|
#include <linux/irqchip.h>
|
||||||
#include <linux/seq_file.h>
|
#include <linux/seq_file.h>
|
||||||
#include <linux/ratelimit.h>
|
|
||||||
|
|
||||||
unsigned long irq_err_count;
|
unsigned long irq_err_count;
|
||||||
|
|
||||||
@ -54,64 +53,3 @@ void __init init_IRQ(void)
|
|||||||
if (!handle_arch_irq)
|
if (!handle_arch_irq)
|
||||||
panic("No interrupt controller found.");
|
panic("No interrupt controller found.");
|
||||||
}
|
}
|
||||||
|
|
||||||
#ifdef CONFIG_HOTPLUG_CPU
|
|
||||||
static bool migrate_one_irq(struct irq_desc *desc)
|
|
||||||
{
|
|
||||||
struct irq_data *d = irq_desc_get_irq_data(desc);
|
|
||||||
const struct cpumask *affinity = irq_data_get_affinity_mask(d);
|
|
||||||
struct irq_chip *c;
|
|
||||||
bool ret = false;
|
|
||||||
|
|
||||||
/*
|
|
||||||
* If this is a per-CPU interrupt, or the affinity does not
|
|
||||||
* include this CPU, then we have nothing to do.
|
|
||||||
*/
|
|
||||||
if (irqd_is_per_cpu(d) || !cpumask_test_cpu(smp_processor_id(), affinity))
|
|
||||||
return false;
|
|
||||||
|
|
||||||
if (cpumask_any_and(affinity, cpu_online_mask) >= nr_cpu_ids) {
|
|
||||||
affinity = cpu_online_mask;
|
|
||||||
ret = true;
|
|
||||||
}
|
|
||||||
|
|
||||||
c = irq_data_get_irq_chip(d);
|
|
||||||
if (!c->irq_set_affinity)
|
|
||||||
pr_debug("IRQ%u: unable to set affinity\n", d->irq);
|
|
||||||
else if (c->irq_set_affinity(d, affinity, false) == IRQ_SET_MASK_OK && ret)
|
|
||||||
cpumask_copy(irq_data_get_affinity_mask(d), affinity);
|
|
||||||
|
|
||||||
return ret;
|
|
||||||
}
|
|
||||||
|
|
||||||
/*
|
|
||||||
* The current CPU has been marked offline. Migrate IRQs off this CPU.
|
|
||||||
* If the affinity settings do not allow other CPUs, force them onto any
|
|
||||||
* available CPU.
|
|
||||||
*
|
|
||||||
* Note: we must iterate over all IRQs, whether they have an attached
|
|
||||||
* action structure or not, as we need to get chained interrupts too.
|
|
||||||
*/
|
|
||||||
void migrate_irqs(void)
|
|
||||||
{
|
|
||||||
unsigned int i;
|
|
||||||
struct irq_desc *desc;
|
|
||||||
unsigned long flags;
|
|
||||||
|
|
||||||
local_irq_save(flags);
|
|
||||||
|
|
||||||
for_each_irq_desc(i, desc) {
|
|
||||||
bool affinity_broken;
|
|
||||||
|
|
||||||
raw_spin_lock(&desc->lock);
|
|
||||||
affinity_broken = migrate_one_irq(desc);
|
|
||||||
raw_spin_unlock(&desc->lock);
|
|
||||||
|
|
||||||
if (affinity_broken)
|
|
||||||
pr_warn_ratelimited("IRQ%u no longer affine to CPU%u\n",
|
|
||||||
i, smp_processor_id());
|
|
||||||
}
|
|
||||||
|
|
||||||
local_irq_restore(flags);
|
|
||||||
}
|
|
||||||
#endif /* CONFIG_HOTPLUG_CPU */
|
|
||||||
|
@ -231,7 +231,8 @@ int __cpu_disable(void)
|
|||||||
/*
|
/*
|
||||||
* OK - migrate IRQs away from this CPU
|
* OK - migrate IRQs away from this CPU
|
||||||
*/
|
*/
|
||||||
migrate_irqs();
|
irq_migrate_all_off_this_cpu();
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
Loading…
Reference in New Issue
Block a user