Merge branch 'irq-urgent-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip
Pull irq fixes from Thomas Gleixner: "This lot provides: - plug a hotplug race in the new affinity infrastructure - a fix for the trigger type of chained interrupts - plug a potential memory leak in the core code - a few fixes for ARM and MIPS GICs" * 'irq-urgent-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip: irqchip/mips-gic: Implement activate op for device domain irqchip/mips-gic: Cleanup chip and handler setup genirq/affinity: Use get/put_online_cpus around cpumask operations genirq: Fix potential memleak when failing to get irq pm irqchip/gicv3-its: Disable the ITS before initializing it irqchip/gicv3: Remove disabling redistributor and group1 non-secure interrupts irqchip/gic: Allow self-SGIs for SMP on UP configurations genirq: Correctly configure the trigger on chained interrupts
This commit is contained in:
commit
4340393e5a
@ -1545,7 +1545,12 @@ static int its_force_quiescent(void __iomem *base)
|
|||||||
u32 val;
|
u32 val;
|
||||||
|
|
||||||
val = readl_relaxed(base + GITS_CTLR);
|
val = readl_relaxed(base + GITS_CTLR);
|
||||||
if (val & GITS_CTLR_QUIESCENT)
|
/*
|
||||||
|
* GIC architecture specification requires the ITS to be both
|
||||||
|
* disabled and quiescent for writes to GITS_BASER<n> or
|
||||||
|
* GITS_CBASER to not have UNPREDICTABLE results.
|
||||||
|
*/
|
||||||
|
if ((val & GITS_CTLR_QUIESCENT) && !(val & GITS_CTLR_ENABLE))
|
||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
/* Disable the generation of all interrupts to this ITS */
|
/* Disable the generation of all interrupts to this ITS */
|
||||||
|
@ -667,13 +667,20 @@ static int gic_set_affinity(struct irq_data *d, const struct cpumask *mask_val,
|
|||||||
#endif
|
#endif
|
||||||
|
|
||||||
#ifdef CONFIG_CPU_PM
|
#ifdef CONFIG_CPU_PM
|
||||||
|
/* Check whether it's single security state view */
|
||||||
|
static bool gic_dist_security_disabled(void)
|
||||||
|
{
|
||||||
|
return readl_relaxed(gic_data.dist_base + GICD_CTLR) & GICD_CTLR_DS;
|
||||||
|
}
|
||||||
|
|
||||||
static int gic_cpu_pm_notifier(struct notifier_block *self,
|
static int gic_cpu_pm_notifier(struct notifier_block *self,
|
||||||
unsigned long cmd, void *v)
|
unsigned long cmd, void *v)
|
||||||
{
|
{
|
||||||
if (cmd == CPU_PM_EXIT) {
|
if (cmd == CPU_PM_EXIT) {
|
||||||
gic_enable_redist(true);
|
if (gic_dist_security_disabled())
|
||||||
|
gic_enable_redist(true);
|
||||||
gic_cpu_sys_reg_init();
|
gic_cpu_sys_reg_init();
|
||||||
} else if (cmd == CPU_PM_ENTER) {
|
} else if (cmd == CPU_PM_ENTER && gic_dist_security_disabled()) {
|
||||||
gic_write_grpen1(0);
|
gic_write_grpen1(0);
|
||||||
gic_enable_redist(false);
|
gic_enable_redist(false);
|
||||||
}
|
}
|
||||||
|
@ -769,6 +769,13 @@ static void gic_raise_softirq(const struct cpumask *mask, unsigned int irq)
|
|||||||
int cpu;
|
int cpu;
|
||||||
unsigned long flags, map = 0;
|
unsigned long flags, map = 0;
|
||||||
|
|
||||||
|
if (unlikely(nr_cpu_ids == 1)) {
|
||||||
|
/* Only one CPU? let's do a self-IPI... */
|
||||||
|
writel_relaxed(2 << 24 | irq,
|
||||||
|
gic_data_dist_base(&gic_data[0]) + GIC_DIST_SOFTINT);
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
raw_spin_lock_irqsave(&irq_controller_lock, flags);
|
raw_spin_lock_irqsave(&irq_controller_lock, flags);
|
||||||
|
|
||||||
/* Convert our logical CPU mask into a physical one. */
|
/* Convert our logical CPU mask into a physical one. */
|
||||||
|
@ -713,9 +713,6 @@ static int gic_shared_irq_domain_map(struct irq_domain *d, unsigned int virq,
|
|||||||
unsigned long flags;
|
unsigned long flags;
|
||||||
int i;
|
int i;
|
||||||
|
|
||||||
irq_set_chip_and_handler(virq, &gic_level_irq_controller,
|
|
||||||
handle_level_irq);
|
|
||||||
|
|
||||||
spin_lock_irqsave(&gic_lock, flags);
|
spin_lock_irqsave(&gic_lock, flags);
|
||||||
gic_map_to_pin(intr, gic_cpu_pin);
|
gic_map_to_pin(intr, gic_cpu_pin);
|
||||||
gic_map_to_vpe(intr, mips_cm_vp_id(vpe));
|
gic_map_to_vpe(intr, mips_cm_vp_id(vpe));
|
||||||
@ -732,6 +729,10 @@ static int gic_irq_domain_map(struct irq_domain *d, unsigned int virq,
|
|||||||
{
|
{
|
||||||
if (GIC_HWIRQ_TO_LOCAL(hw) < GIC_NUM_LOCAL_INTRS)
|
if (GIC_HWIRQ_TO_LOCAL(hw) < GIC_NUM_LOCAL_INTRS)
|
||||||
return gic_local_irq_domain_map(d, virq, hw);
|
return gic_local_irq_domain_map(d, virq, hw);
|
||||||
|
|
||||||
|
irq_set_chip_and_handler(virq, &gic_level_irq_controller,
|
||||||
|
handle_level_irq);
|
||||||
|
|
||||||
return gic_shared_irq_domain_map(d, virq, hw, 0);
|
return gic_shared_irq_domain_map(d, virq, hw, 0);
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -771,11 +772,13 @@ static int gic_irq_domain_alloc(struct irq_domain *d, unsigned int virq,
|
|||||||
hwirq = GIC_SHARED_TO_HWIRQ(base_hwirq + i);
|
hwirq = GIC_SHARED_TO_HWIRQ(base_hwirq + i);
|
||||||
|
|
||||||
ret = irq_domain_set_hwirq_and_chip(d, virq + i, hwirq,
|
ret = irq_domain_set_hwirq_and_chip(d, virq + i, hwirq,
|
||||||
&gic_edge_irq_controller,
|
&gic_level_irq_controller,
|
||||||
NULL);
|
NULL);
|
||||||
if (ret)
|
if (ret)
|
||||||
goto error;
|
goto error;
|
||||||
|
|
||||||
|
irq_set_handler(virq + i, handle_level_irq);
|
||||||
|
|
||||||
ret = gic_shared_irq_domain_map(d, virq + i, hwirq, cpu);
|
ret = gic_shared_irq_domain_map(d, virq + i, hwirq, cpu);
|
||||||
if (ret)
|
if (ret)
|
||||||
goto error;
|
goto error;
|
||||||
@ -890,10 +893,17 @@ void gic_dev_domain_free(struct irq_domain *d, unsigned int virq,
|
|||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static void gic_dev_domain_activate(struct irq_domain *domain,
|
||||||
|
struct irq_data *d)
|
||||||
|
{
|
||||||
|
gic_shared_irq_domain_map(domain, d->irq, d->hwirq, 0);
|
||||||
|
}
|
||||||
|
|
||||||
static struct irq_domain_ops gic_dev_domain_ops = {
|
static struct irq_domain_ops gic_dev_domain_ops = {
|
||||||
.xlate = gic_dev_domain_xlate,
|
.xlate = gic_dev_domain_xlate,
|
||||||
.alloc = gic_dev_domain_alloc,
|
.alloc = gic_dev_domain_alloc,
|
||||||
.free = gic_dev_domain_free,
|
.free = gic_dev_domain_free,
|
||||||
|
.activate = gic_dev_domain_activate,
|
||||||
};
|
};
|
||||||
|
|
||||||
static int gic_ipi_domain_xlate(struct irq_domain *d, struct device_node *ctrlr,
|
static int gic_ipi_domain_xlate(struct irq_domain *d, struct device_node *ctrlr,
|
||||||
|
@ -39,6 +39,7 @@ struct cpumask *irq_create_affinity_mask(unsigned int *nr_vecs)
|
|||||||
return NULL;
|
return NULL;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
get_online_cpus();
|
||||||
if (max_vecs >= num_online_cpus()) {
|
if (max_vecs >= num_online_cpus()) {
|
||||||
cpumask_copy(affinity_mask, cpu_online_mask);
|
cpumask_copy(affinity_mask, cpu_online_mask);
|
||||||
*nr_vecs = num_online_cpus();
|
*nr_vecs = num_online_cpus();
|
||||||
@ -56,6 +57,7 @@ struct cpumask *irq_create_affinity_mask(unsigned int *nr_vecs)
|
|||||||
}
|
}
|
||||||
*nr_vecs = vecs;
|
*nr_vecs = vecs;
|
||||||
}
|
}
|
||||||
|
put_online_cpus();
|
||||||
|
|
||||||
return affinity_mask;
|
return affinity_mask;
|
||||||
}
|
}
|
||||||
|
@ -820,6 +820,17 @@ __irq_do_set_handler(struct irq_desc *desc, irq_flow_handler_t handle,
|
|||||||
desc->name = name;
|
desc->name = name;
|
||||||
|
|
||||||
if (handle != handle_bad_irq && is_chained) {
|
if (handle != handle_bad_irq && is_chained) {
|
||||||
|
/*
|
||||||
|
* We're about to start this interrupt immediately,
|
||||||
|
* hence the need to set the trigger configuration.
|
||||||
|
* But the .set_type callback may have overridden the
|
||||||
|
* flow handler, ignoring that we're dealing with a
|
||||||
|
* chained interrupt. Reset it immediately because we
|
||||||
|
* do know better.
|
||||||
|
*/
|
||||||
|
__irq_set_trigger(desc, irqd_get_trigger_type(&desc->irq_data));
|
||||||
|
desc->handle_irq = handle;
|
||||||
|
|
||||||
irq_settings_set_noprobe(desc);
|
irq_settings_set_noprobe(desc);
|
||||||
irq_settings_set_norequest(desc);
|
irq_settings_set_norequest(desc);
|
||||||
irq_settings_set_nothread(desc);
|
irq_settings_set_nothread(desc);
|
||||||
|
@ -1681,8 +1681,10 @@ int request_threaded_irq(unsigned int irq, irq_handler_t handler,
|
|||||||
action->dev_id = dev_id;
|
action->dev_id = dev_id;
|
||||||
|
|
||||||
retval = irq_chip_pm_get(&desc->irq_data);
|
retval = irq_chip_pm_get(&desc->irq_data);
|
||||||
if (retval < 0)
|
if (retval < 0) {
|
||||||
|
kfree(action);
|
||||||
return retval;
|
return retval;
|
||||||
|
}
|
||||||
|
|
||||||
chip_bus_lock(desc);
|
chip_bus_lock(desc);
|
||||||
retval = __setup_irq(irq, desc, action);
|
retval = __setup_irq(irq, desc, action);
|
||||||
@ -1985,8 +1987,10 @@ int request_percpu_irq(unsigned int irq, irq_handler_t handler,
|
|||||||
action->percpu_dev_id = dev_id;
|
action->percpu_dev_id = dev_id;
|
||||||
|
|
||||||
retval = irq_chip_pm_get(&desc->irq_data);
|
retval = irq_chip_pm_get(&desc->irq_data);
|
||||||
if (retval < 0)
|
if (retval < 0) {
|
||||||
|
kfree(action);
|
||||||
return retval;
|
return retval;
|
||||||
|
}
|
||||||
|
|
||||||
chip_bus_lock(desc);
|
chip_bus_lock(desc);
|
||||||
retval = __setup_irq(irq, desc, action);
|
retval = __setup_irq(irq, desc, action);
|
||||||
|
Loading…
Reference in New Issue
Block a user