mirror of
https://github.com/torvalds/linux.git
synced 2024-12-25 12:21:37 +00:00
Merge branch 'irq-urgent-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip
Pull irq fixes from Thomas Gleixner: "A set of fixes mostly for the ARM/GIC world: - Fix the MSI affinity handling in the ls-scfg irq chip driver so it updates and uses the effective affinity mask correctly - Prevent binding LPIs to offline CPUs and respect the Cavium erratum which requires that LPIs which belong to an offline NUMA node are not bound to a CPU on a different NUMA node. - Free only the amount of allocated interrupts in the GIC-V2M driver instead of trying to free log2(nrirqs). - Prevent emitting SYNC and VSYNC targetting non existing interrupt collections in the GIC-V3 ITS driver - Ensure that the GIV-V3 interrupt redistributor is correctly reprogrammed on CPU hotplug - Remove a stale unused helper function" * 'irq-urgent-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip: irqdesc: Delete irq_desc_get_msi_desc() irqchip/gic-v3-its: Fix reprogramming of redistributors on CPU hotplug irqchip/gic-v3-its: Only emit VSYNC if targetting a valid collection irqchip/gic-v3-its: Only emit SYNC if targetting a valid collection irqchip/gic-v3-its: Don't bind LPI to unavailable NUMA node irqchip/gic-v2m: Fix SPI release on error path irqchip/ls-scfg-msi: Fix MSI affinity handling genirq/debugfs: Add missing IRQCHIP_SUPPORTS_LEVEL_MSI debug
This commit is contained in:
commit
78fea6334f
@ -199,7 +199,7 @@ static int gicv2m_irq_domain_alloc(struct irq_domain *domain, unsigned int virq,
|
||||
|
||||
fail:
|
||||
irq_domain_free_irqs_parent(domain, virq, nr_irqs);
|
||||
gicv2m_unalloc_msi(v2m, hwirq, get_count_order(nr_irqs));
|
||||
gicv2m_unalloc_msi(v2m, hwirq, nr_irqs);
|
||||
return err;
|
||||
}
|
||||
|
||||
|
@ -182,6 +182,22 @@ static struct its_collection *dev_event_to_col(struct its_device *its_dev,
|
||||
return its->collections + its_dev->event_map.col_map[event];
|
||||
}
|
||||
|
||||
static struct its_collection *valid_col(struct its_collection *col)
|
||||
{
|
||||
if (WARN_ON_ONCE(col->target_address & GENMASK_ULL(0, 15)))
|
||||
return NULL;
|
||||
|
||||
return col;
|
||||
}
|
||||
|
||||
static struct its_vpe *valid_vpe(struct its_node *its, struct its_vpe *vpe)
|
||||
{
|
||||
if (valid_col(its->collections + vpe->col_idx))
|
||||
return vpe;
|
||||
|
||||
return NULL;
|
||||
}
|
||||
|
||||
/*
|
||||
* ITS command descriptors - parameters to be encoded in a command
|
||||
* block.
|
||||
@ -439,7 +455,7 @@ static struct its_collection *its_build_mapti_cmd(struct its_node *its,
|
||||
|
||||
its_fixup_cmd(cmd);
|
||||
|
||||
return col;
|
||||
return valid_col(col);
|
||||
}
|
||||
|
||||
static struct its_collection *its_build_movi_cmd(struct its_node *its,
|
||||
@ -458,7 +474,7 @@ static struct its_collection *its_build_movi_cmd(struct its_node *its,
|
||||
|
||||
its_fixup_cmd(cmd);
|
||||
|
||||
return col;
|
||||
return valid_col(col);
|
||||
}
|
||||
|
||||
static struct its_collection *its_build_discard_cmd(struct its_node *its,
|
||||
@ -476,7 +492,7 @@ static struct its_collection *its_build_discard_cmd(struct its_node *its,
|
||||
|
||||
its_fixup_cmd(cmd);
|
||||
|
||||
return col;
|
||||
return valid_col(col);
|
||||
}
|
||||
|
||||
static struct its_collection *its_build_inv_cmd(struct its_node *its,
|
||||
@ -494,7 +510,7 @@ static struct its_collection *its_build_inv_cmd(struct its_node *its,
|
||||
|
||||
its_fixup_cmd(cmd);
|
||||
|
||||
return col;
|
||||
return valid_col(col);
|
||||
}
|
||||
|
||||
static struct its_collection *its_build_int_cmd(struct its_node *its,
|
||||
@ -512,7 +528,7 @@ static struct its_collection *its_build_int_cmd(struct its_node *its,
|
||||
|
||||
its_fixup_cmd(cmd);
|
||||
|
||||
return col;
|
||||
return valid_col(col);
|
||||
}
|
||||
|
||||
static struct its_collection *its_build_clear_cmd(struct its_node *its,
|
||||
@ -530,7 +546,7 @@ static struct its_collection *its_build_clear_cmd(struct its_node *its,
|
||||
|
||||
its_fixup_cmd(cmd);
|
||||
|
||||
return col;
|
||||
return valid_col(col);
|
||||
}
|
||||
|
||||
static struct its_collection *its_build_invall_cmd(struct its_node *its,
|
||||
@ -554,7 +570,7 @@ static struct its_vpe *its_build_vinvall_cmd(struct its_node *its,
|
||||
|
||||
its_fixup_cmd(cmd);
|
||||
|
||||
return desc->its_vinvall_cmd.vpe;
|
||||
return valid_vpe(its, desc->its_vinvall_cmd.vpe);
|
||||
}
|
||||
|
||||
static struct its_vpe *its_build_vmapp_cmd(struct its_node *its,
|
||||
@ -576,7 +592,7 @@ static struct its_vpe *its_build_vmapp_cmd(struct its_node *its,
|
||||
|
||||
its_fixup_cmd(cmd);
|
||||
|
||||
return desc->its_vmapp_cmd.vpe;
|
||||
return valid_vpe(its, desc->its_vmapp_cmd.vpe);
|
||||
}
|
||||
|
||||
static struct its_vpe *its_build_vmapti_cmd(struct its_node *its,
|
||||
@ -599,7 +615,7 @@ static struct its_vpe *its_build_vmapti_cmd(struct its_node *its,
|
||||
|
||||
its_fixup_cmd(cmd);
|
||||
|
||||
return desc->its_vmapti_cmd.vpe;
|
||||
return valid_vpe(its, desc->its_vmapti_cmd.vpe);
|
||||
}
|
||||
|
||||
static struct its_vpe *its_build_vmovi_cmd(struct its_node *its,
|
||||
@ -622,7 +638,7 @@ static struct its_vpe *its_build_vmovi_cmd(struct its_node *its,
|
||||
|
||||
its_fixup_cmd(cmd);
|
||||
|
||||
return desc->its_vmovi_cmd.vpe;
|
||||
return valid_vpe(its, desc->its_vmovi_cmd.vpe);
|
||||
}
|
||||
|
||||
static struct its_vpe *its_build_vmovp_cmd(struct its_node *its,
|
||||
@ -640,7 +656,7 @@ static struct its_vpe *its_build_vmovp_cmd(struct its_node *its,
|
||||
|
||||
its_fixup_cmd(cmd);
|
||||
|
||||
return desc->its_vmovp_cmd.vpe;
|
||||
return valid_vpe(its, desc->its_vmovp_cmd.vpe);
|
||||
}
|
||||
|
||||
static u64 its_cmd_ptr_to_offset(struct its_node *its,
|
||||
@ -1824,11 +1840,16 @@ static int its_alloc_tables(struct its_node *its)
|
||||
|
||||
static int its_alloc_collections(struct its_node *its)
|
||||
{
|
||||
int i;
|
||||
|
||||
its->collections = kcalloc(nr_cpu_ids, sizeof(*its->collections),
|
||||
GFP_KERNEL);
|
||||
if (!its->collections)
|
||||
return -ENOMEM;
|
||||
|
||||
for (i = 0; i < nr_cpu_ids; i++)
|
||||
its->collections[i].target_address = ~0ULL;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
@ -2310,7 +2331,14 @@ static int its_irq_domain_activate(struct irq_domain *domain,
|
||||
cpu_mask = cpumask_of_node(its_dev->its->numa_node);
|
||||
|
||||
/* Bind the LPI to the first possible CPU */
|
||||
cpu = cpumask_first(cpu_mask);
|
||||
cpu = cpumask_first_and(cpu_mask, cpu_online_mask);
|
||||
if (cpu >= nr_cpu_ids) {
|
||||
if (its_dev->its->flags & ITS_FLAGS_WORKAROUND_CAVIUM_23144)
|
||||
return -EINVAL;
|
||||
|
||||
cpu = cpumask_first(cpu_online_mask);
|
||||
}
|
||||
|
||||
its_dev->event_map.col_map[event] = cpu;
|
||||
irq_data_update_effective_affinity(d, cpumask_of(cpu));
|
||||
|
||||
@ -3399,6 +3427,16 @@ static int redist_disable_lpis(void)
|
||||
u64 timeout = USEC_PER_SEC;
|
||||
u64 val;
|
||||
|
||||
/*
|
||||
* If coming via a CPU hotplug event, we don't need to disable
|
||||
* LPIs before trying to re-enable them. They are already
|
||||
* configured and all is well in the world. Detect this case
|
||||
* by checking the allocation of the pending table for the
|
||||
* current CPU.
|
||||
*/
|
||||
if (gic_data_rdist()->pend_page)
|
||||
return 0;
|
||||
|
||||
if (!gic_rdists_supports_plpis()) {
|
||||
pr_info("CPU%d: LPIs not supported\n", smp_processor_id());
|
||||
return -ENXIO;
|
||||
|
@ -93,8 +93,12 @@ static void ls_scfg_msi_compose_msg(struct irq_data *data, struct msi_msg *msg)
|
||||
msg->address_lo = lower_32_bits(msi_data->msiir_addr);
|
||||
msg->data = data->hwirq;
|
||||
|
||||
if (msi_affinity_flag)
|
||||
msg->data |= cpumask_first(data->common->affinity);
|
||||
if (msi_affinity_flag) {
|
||||
const struct cpumask *mask;
|
||||
|
||||
mask = irq_data_get_effective_affinity_mask(data);
|
||||
msg->data |= cpumask_first(mask);
|
||||
}
|
||||
|
||||
iommu_dma_map_msi_msg(data->irq, msg);
|
||||
}
|
||||
@ -121,7 +125,7 @@ static int ls_scfg_msi_set_affinity(struct irq_data *irq_data,
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
cpumask_copy(irq_data->common->affinity, mask);
|
||||
irq_data_update_effective_affinity(irq_data, cpumask_of(cpu));
|
||||
|
||||
return IRQ_SET_MASK_OK;
|
||||
}
|
||||
|
@ -503,6 +503,7 @@ struct irq_chip {
|
||||
* IRQCHIP_SKIP_SET_WAKE: Skip chip.irq_set_wake(), for this irq chip
|
||||
* IRQCHIP_ONESHOT_SAFE: One shot does not require mask/unmask
|
||||
* IRQCHIP_EOI_THREADED: Chip requires eoi() on unmask in threaded mode
|
||||
* IRQCHIP_SUPPORTS_LEVEL_MSI Chip can provide two doorbells for Level MSIs
|
||||
*/
|
||||
enum {
|
||||
IRQCHIP_SET_TYPE_MASKED = (1 << 0),
|
||||
|
@ -145,11 +145,6 @@ static inline void *irq_desc_get_handler_data(struct irq_desc *desc)
|
||||
return desc->irq_common_data.handler_data;
|
||||
}
|
||||
|
||||
static inline struct msi_desc *irq_desc_get_msi_desc(struct irq_desc *desc)
|
||||
{
|
||||
return desc->irq_common_data.msi_desc;
|
||||
}
|
||||
|
||||
/*
|
||||
* Architectures call this to let the generic IRQ layer
|
||||
* handle an interrupt.
|
||||
|
@ -55,6 +55,7 @@ static const struct irq_bit_descr irqchip_flags[] = {
|
||||
BIT_MASK_DESCR(IRQCHIP_SKIP_SET_WAKE),
|
||||
BIT_MASK_DESCR(IRQCHIP_ONESHOT_SAFE),
|
||||
BIT_MASK_DESCR(IRQCHIP_EOI_THREADED),
|
||||
BIT_MASK_DESCR(IRQCHIP_SUPPORTS_LEVEL_MSI),
|
||||
};
|
||||
|
||||
static void
|
||||
|
Loading…
Reference in New Issue
Block a user