mirror of
https://github.com/torvalds/linux.git
synced 2024-11-10 22:21:40 +00:00
Merge branch irq/plic-masking into irq/irqchip-next
* irq/plic-masking: : . : SiFive PLIC optimisations from Samuel Holland: : : "This series removes the spinlocks and cpumask operations from the PLIC : driver's hot path. As far as I know, using the priority to mask : interrupts is an intended usage and will work on all existing : implementations. [...]" : . irqchip/sifive-plic: Separate the enable and mask operations irqchip/sifive-plic: Make better use of the effective affinity mask PCI: hv: Take a const cpumask in hv_compose_msi_req_get_cpu() genirq: Provide an IRQ affinity mask in non-SMP configs genirq: Return a const cpumask from irq_data_get_affinity_mask genirq: Add and use an irq_data_update_affinity helper genirq: Refactor accessors to use irq_data_get_affinity_mask genirq: Drop redundant irq_init_effective_affinity genirq: GENERIC_IRQ_EFFECTIVE_AFF_MASK depends on SMP genirq: GENERIC_IRQ_IPI depends on SMP irqchip/mips-gic: Only register IPI domain when SMP is enabled Signed-off-by: Marc Zyngier <maz@kernel.org>
This commit is contained in:
commit
d4a930a08c
@ -60,7 +60,7 @@ int irq_select_affinity(unsigned int irq)
|
||||
cpu = (cpu < (NR_CPUS-1) ? cpu + 1 : 0);
|
||||
last_cpu = cpu;
|
||||
|
||||
cpumask_copy(irq_data_get_affinity_mask(data), cpumask_of(cpu));
|
||||
irq_data_update_affinity(data, cpumask_of(cpu));
|
||||
chip->irq_set_affinity(data, cpumask_of(cpu), false);
|
||||
return 0;
|
||||
}
|
||||
|
@ -40,7 +40,7 @@ config ARCH_HIP04
|
||||
select HAVE_ARM_ARCH_TIMER
|
||||
select MCPM if SMP
|
||||
select MCPM_QUAD_CLUSTER if SMP
|
||||
select GENERIC_IRQ_EFFECTIVE_AFF_MASK
|
||||
select GENERIC_IRQ_EFFECTIVE_AFF_MASK if SMP
|
||||
help
|
||||
Support for Hisilicon HiP04 SoC family
|
||||
|
||||
|
@ -834,7 +834,7 @@ iosapic_unregister_intr (unsigned int gsi)
|
||||
if (iosapic_intr_info[irq].count == 0) {
|
||||
#ifdef CONFIG_SMP
|
||||
/* Clear affinity */
|
||||
cpumask_setall(irq_get_affinity_mask(irq));
|
||||
irq_data_update_affinity(irq_get_irq_data(irq), cpu_all_mask);
|
||||
#endif
|
||||
/* Clear the interrupt information */
|
||||
iosapic_intr_info[irq].dest = 0;
|
||||
|
@ -57,8 +57,8 @@ static char irq_redir [NR_IRQS]; // = { [0 ... NR_IRQS-1] = 1 };
|
||||
void set_irq_affinity_info (unsigned int irq, int hwid, int redir)
|
||||
{
|
||||
if (irq < NR_IRQS) {
|
||||
cpumask_copy(irq_get_affinity_mask(irq),
|
||||
cpumask_of(cpu_logical_id(hwid)));
|
||||
irq_data_update_affinity(irq_get_irq_data(irq),
|
||||
cpumask_of(cpu_logical_id(hwid)));
|
||||
irq_redir[irq] = (char) (redir & 0xff);
|
||||
}
|
||||
}
|
||||
|
@ -37,7 +37,7 @@ static int ia64_set_msi_irq_affinity(struct irq_data *idata,
|
||||
msg.data = data;
|
||||
|
||||
pci_write_msi_msg(irq, &msg);
|
||||
cpumask_copy(irq_data_get_affinity_mask(idata), cpumask_of(cpu));
|
||||
irq_data_update_affinity(idata, cpumask_of(cpu));
|
||||
|
||||
return 0;
|
||||
}
|
||||
@ -132,7 +132,7 @@ static int dmar_msi_set_affinity(struct irq_data *data,
|
||||
msg.address_lo |= MSI_ADDR_DEST_ID_CPU(cpu_physical_id(cpu));
|
||||
|
||||
dmar_msi_write(irq, &msg);
|
||||
cpumask_copy(irq_data_get_affinity_mask(data), mask);
|
||||
irq_data_update_affinity(data, mask);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
@ -263,7 +263,7 @@ static int next_cpu_for_irq(struct irq_data *data)
|
||||
|
||||
#ifdef CONFIG_SMP
|
||||
int cpu;
|
||||
struct cpumask *mask = irq_data_get_affinity_mask(data);
|
||||
const struct cpumask *mask = irq_data_get_affinity_mask(data);
|
||||
int weight = cpumask_weight(mask);
|
||||
struct octeon_ciu_chip_data *cd = irq_data_get_irq_chip_data(data);
|
||||
|
||||
@ -758,7 +758,7 @@ static void octeon_irq_cpu_offline_ciu(struct irq_data *data)
|
||||
{
|
||||
int cpu = smp_processor_id();
|
||||
cpumask_t new_affinity;
|
||||
struct cpumask *mask = irq_data_get_affinity_mask(data);
|
||||
const struct cpumask *mask = irq_data_get_affinity_mask(data);
|
||||
|
||||
if (!cpumask_test_cpu(cpu, mask))
|
||||
return;
|
||||
|
@ -315,7 +315,7 @@ unsigned long txn_affinity_addr(unsigned int irq, int cpu)
|
||||
{
|
||||
#ifdef CONFIG_SMP
|
||||
struct irq_data *d = irq_get_irq_data(irq);
|
||||
cpumask_copy(irq_data_get_affinity_mask(d), cpumask_of(cpu));
|
||||
irq_data_update_affinity(d, cpumask_of(cpu));
|
||||
#endif
|
||||
|
||||
return per_cpu(cpu_data, cpu).txn_addr;
|
||||
|
@ -230,16 +230,17 @@ void migrate_irqs(void)
|
||||
struct irq_data *data = irq_get_irq_data(irq);
|
||||
|
||||
if (irq_data_get_node(data) == cpu) {
|
||||
struct cpumask *mask = irq_data_get_affinity_mask(data);
|
||||
const struct cpumask *mask = irq_data_get_affinity_mask(data);
|
||||
unsigned int newcpu = cpumask_any_and(mask,
|
||||
cpu_online_mask);
|
||||
if (newcpu >= nr_cpu_ids) {
|
||||
pr_info_ratelimited("IRQ%u no longer affine to CPU%u\n",
|
||||
irq, cpu);
|
||||
|
||||
cpumask_setall(mask);
|
||||
irq_set_affinity(irq, cpu_all_mask);
|
||||
} else {
|
||||
irq_set_affinity(irq, mask);
|
||||
}
|
||||
irq_set_affinity(irq, mask);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -192,7 +192,7 @@ static void hv_irq_compose_msi_msg(struct irq_data *data, struct msi_msg *msg)
|
||||
struct pci_dev *dev;
|
||||
struct hv_interrupt_entry out_entry, *stored_entry;
|
||||
struct irq_cfg *cfg = irqd_cfg(data);
|
||||
cpumask_t *affinity;
|
||||
const cpumask_t *affinity;
|
||||
int cpu;
|
||||
u64 status;
|
||||
|
||||
|
@ -169,7 +169,7 @@ void migrate_irqs(void)
|
||||
|
||||
for_each_active_irq(i) {
|
||||
struct irq_data *data = irq_get_irq_data(i);
|
||||
struct cpumask *mask;
|
||||
const struct cpumask *mask;
|
||||
unsigned int newcpu;
|
||||
|
||||
if (irqd_is_per_cpu(data))
|
||||
@ -185,9 +185,10 @@ void migrate_irqs(void)
|
||||
pr_info_ratelimited("IRQ%u no longer affine to CPU%u\n",
|
||||
i, cpu);
|
||||
|
||||
cpumask_setall(mask);
|
||||
irq_set_affinity(i, cpu_all_mask);
|
||||
} else {
|
||||
irq_set_affinity(i, mask);
|
||||
}
|
||||
irq_set_affinity(i, mask);
|
||||
}
|
||||
}
|
||||
#endif /* CONFIG_HOTPLUG_CPU */
|
||||
|
@ -194,7 +194,7 @@ hyperv_root_ir_compose_msi_msg(struct irq_data *irq_data, struct msi_msg *msg)
|
||||
u32 vector;
|
||||
struct irq_cfg *cfg;
|
||||
int ioapic_id;
|
||||
struct cpumask *affinity;
|
||||
const struct cpumask *affinity;
|
||||
int cpu;
|
||||
struct hv_interrupt_entry entry;
|
||||
struct hyperv_root_ir_data *data = irq_data->chip_data;
|
||||
|
@ -8,7 +8,7 @@ config IRQCHIP
|
||||
config ARM_GIC
|
||||
bool
|
||||
select IRQ_DOMAIN_HIERARCHY
|
||||
select GENERIC_IRQ_EFFECTIVE_AFF_MASK
|
||||
select GENERIC_IRQ_EFFECTIVE_AFF_MASK if SMP
|
||||
|
||||
config ARM_GIC_PM
|
||||
bool
|
||||
@ -34,7 +34,7 @@ config ARM_GIC_V3
|
||||
bool
|
||||
select IRQ_DOMAIN_HIERARCHY
|
||||
select PARTITION_PERCPU
|
||||
select GENERIC_IRQ_EFFECTIVE_AFF_MASK
|
||||
select GENERIC_IRQ_EFFECTIVE_AFF_MASK if SMP
|
||||
|
||||
config ARM_GIC_V3_ITS
|
||||
bool
|
||||
@ -76,7 +76,7 @@ config ARMADA_370_XP_IRQ
|
||||
bool
|
||||
select GENERIC_IRQ_CHIP
|
||||
select PCI_MSI if PCI
|
||||
select GENERIC_IRQ_EFFECTIVE_AFF_MASK
|
||||
select GENERIC_IRQ_EFFECTIVE_AFF_MASK if SMP
|
||||
|
||||
config ALPINE_MSI
|
||||
bool
|
||||
@ -112,7 +112,7 @@ config BCM6345_L1_IRQ
|
||||
bool
|
||||
select GENERIC_IRQ_CHIP
|
||||
select IRQ_DOMAIN
|
||||
select GENERIC_IRQ_EFFECTIVE_AFF_MASK
|
||||
select GENERIC_IRQ_EFFECTIVE_AFF_MASK if SMP
|
||||
|
||||
config BCM7038_L1_IRQ
|
||||
tristate "Broadcom STB 7038-style L1/L2 interrupt controller driver"
|
||||
@ -120,7 +120,7 @@ config BCM7038_L1_IRQ
|
||||
default ARCH_BRCMSTB || BMIPS_GENERIC
|
||||
select GENERIC_IRQ_CHIP
|
||||
select IRQ_DOMAIN
|
||||
select GENERIC_IRQ_EFFECTIVE_AFF_MASK
|
||||
select GENERIC_IRQ_EFFECTIVE_AFF_MASK if SMP
|
||||
|
||||
config BCM7120_L2_IRQ
|
||||
tristate "Broadcom STB 7120-style L2 interrupt controller driver"
|
||||
@ -177,9 +177,9 @@ config MADERA_IRQ
|
||||
config IRQ_MIPS_CPU
|
||||
bool
|
||||
select GENERIC_IRQ_CHIP
|
||||
select GENERIC_IRQ_IPI if SYS_SUPPORTS_MULTITHREADING
|
||||
select GENERIC_IRQ_IPI if SMP && SYS_SUPPORTS_MULTITHREADING
|
||||
select IRQ_DOMAIN
|
||||
select GENERIC_IRQ_EFFECTIVE_AFF_MASK
|
||||
select GENERIC_IRQ_EFFECTIVE_AFF_MASK if SMP
|
||||
|
||||
config CLPS711X_IRQCHIP
|
||||
bool
|
||||
@ -294,7 +294,7 @@ config VERSATILE_FPGA_IRQ_NR
|
||||
config XTENSA_MX
|
||||
bool
|
||||
select IRQ_DOMAIN
|
||||
select GENERIC_IRQ_EFFECTIVE_AFF_MASK
|
||||
select GENERIC_IRQ_EFFECTIVE_AFF_MASK if SMP
|
||||
|
||||
config XILINX_INTC
|
||||
bool "Xilinx Interrupt Controller IP"
|
||||
@ -322,7 +322,8 @@ config KEYSTONE_IRQ
|
||||
|
||||
config MIPS_GIC
|
||||
bool
|
||||
select GENERIC_IRQ_IPI
|
||||
select GENERIC_IRQ_IPI if SMP
|
||||
select IRQ_DOMAIN_HIERARCHY
|
||||
select MIPS_CM
|
||||
|
||||
config INGENIC_IRQ
|
||||
@ -530,6 +531,7 @@ config SIFIVE_PLIC
|
||||
bool "SiFive Platform-Level Interrupt Controller"
|
||||
depends on RISCV
|
||||
select IRQ_DOMAIN_HIERARCHY
|
||||
select GENERIC_IRQ_EFFECTIVE_AFF_MASK if SMP
|
||||
help
|
||||
This enables support for the PLIC chip found in SiFive (and
|
||||
potentially other) RISC-V systems. The PLIC controls devices
|
||||
|
@ -216,11 +216,11 @@ static int bcm6345_l1_set_affinity(struct irq_data *d,
|
||||
enabled = intc->cpus[old_cpu]->enable_cache[word] & mask;
|
||||
if (enabled)
|
||||
__bcm6345_l1_mask(d);
|
||||
cpumask_copy(irq_data_get_affinity_mask(d), dest);
|
||||
irq_data_update_affinity(d, dest);
|
||||
if (enabled)
|
||||
__bcm6345_l1_unmask(d);
|
||||
} else {
|
||||
cpumask_copy(irq_data_get_affinity_mask(d), dest);
|
||||
irq_data_update_affinity(d, dest);
|
||||
}
|
||||
raw_spin_unlock_irqrestore(&intc->lock, flags);
|
||||
|
||||
|
@ -52,13 +52,15 @@ static DEFINE_PER_CPU_READ_MOSTLY(unsigned long[GIC_MAX_LONGS], pcpu_masks);
|
||||
|
||||
static DEFINE_SPINLOCK(gic_lock);
|
||||
static struct irq_domain *gic_irq_domain;
|
||||
static struct irq_domain *gic_ipi_domain;
|
||||
static int gic_shared_intrs;
|
||||
static unsigned int gic_cpu_pin;
|
||||
static unsigned int timer_cpu_pin;
|
||||
static struct irq_chip gic_level_irq_controller, gic_edge_irq_controller;
|
||||
|
||||
#ifdef CONFIG_GENERIC_IRQ_IPI
|
||||
static DECLARE_BITMAP(ipi_resrv, GIC_MAX_INTRS);
|
||||
static DECLARE_BITMAP(ipi_available, GIC_MAX_INTRS);
|
||||
#endif /* CONFIG_GENERIC_IRQ_IPI */
|
||||
|
||||
static struct gic_all_vpes_chip_data {
|
||||
u32 map;
|
||||
@ -472,9 +474,11 @@ static int gic_irq_domain_map(struct irq_domain *d, unsigned int virq,
|
||||
u32 map;
|
||||
|
||||
if (hwirq >= GIC_SHARED_HWIRQ_BASE) {
|
||||
#ifdef CONFIG_GENERIC_IRQ_IPI
|
||||
/* verify that shared irqs don't conflict with an IPI irq */
|
||||
if (test_bit(GIC_HWIRQ_TO_SHARED(hwirq), ipi_resrv))
|
||||
return -EBUSY;
|
||||
#endif /* CONFIG_GENERIC_IRQ_IPI */
|
||||
|
||||
err = irq_domain_set_hwirq_and_chip(d, virq, hwirq,
|
||||
&gic_level_irq_controller,
|
||||
@ -567,6 +571,8 @@ static const struct irq_domain_ops gic_irq_domain_ops = {
|
||||
.map = gic_irq_domain_map,
|
||||
};
|
||||
|
||||
#ifdef CONFIG_GENERIC_IRQ_IPI
|
||||
|
||||
static int gic_ipi_domain_xlate(struct irq_domain *d, struct device_node *ctrlr,
|
||||
const u32 *intspec, unsigned int intsize,
|
||||
irq_hw_number_t *out_hwirq,
|
||||
@ -670,6 +676,48 @@ static const struct irq_domain_ops gic_ipi_domain_ops = {
|
||||
.match = gic_ipi_domain_match,
|
||||
};
|
||||
|
||||
static int gic_register_ipi_domain(struct device_node *node)
|
||||
{
|
||||
struct irq_domain *gic_ipi_domain;
|
||||
unsigned int v[2], num_ipis;
|
||||
|
||||
gic_ipi_domain = irq_domain_add_hierarchy(gic_irq_domain,
|
||||
IRQ_DOMAIN_FLAG_IPI_PER_CPU,
|
||||
GIC_NUM_LOCAL_INTRS + gic_shared_intrs,
|
||||
node, &gic_ipi_domain_ops, NULL);
|
||||
if (!gic_ipi_domain) {
|
||||
pr_err("Failed to add IPI domain");
|
||||
return -ENXIO;
|
||||
}
|
||||
|
||||
irq_domain_update_bus_token(gic_ipi_domain, DOMAIN_BUS_IPI);
|
||||
|
||||
if (node &&
|
||||
!of_property_read_u32_array(node, "mti,reserved-ipi-vectors", v, 2)) {
|
||||
bitmap_set(ipi_resrv, v[0], v[1]);
|
||||
} else {
|
||||
/*
|
||||
* Reserve 2 interrupts per possible CPU/VP for use as IPIs,
|
||||
* meeting the requirements of arch/mips SMP.
|
||||
*/
|
||||
num_ipis = 2 * num_possible_cpus();
|
||||
bitmap_set(ipi_resrv, gic_shared_intrs - num_ipis, num_ipis);
|
||||
}
|
||||
|
||||
bitmap_copy(ipi_available, ipi_resrv, GIC_MAX_INTRS);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
#else /* !CONFIG_GENERIC_IRQ_IPI */
|
||||
|
||||
static inline int gic_register_ipi_domain(struct device_node *node)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
|
||||
#endif /* !CONFIG_GENERIC_IRQ_IPI */
|
||||
|
||||
static int gic_cpu_startup(unsigned int cpu)
|
||||
{
|
||||
/* Enable or disable EIC */
|
||||
@ -688,11 +736,12 @@ static int gic_cpu_startup(unsigned int cpu)
|
||||
static int __init gic_of_init(struct device_node *node,
|
||||
struct device_node *parent)
|
||||
{
|
||||
unsigned int cpu_vec, i, gicconfig, v[2], num_ipis;
|
||||
unsigned int cpu_vec, i, gicconfig;
|
||||
unsigned long reserved;
|
||||
phys_addr_t gic_base;
|
||||
struct resource res;
|
||||
size_t gic_len;
|
||||
int ret;
|
||||
|
||||
/* Find the first available CPU vector. */
|
||||
i = 0;
|
||||
@ -780,30 +829,9 @@ static int __init gic_of_init(struct device_node *node,
|
||||
return -ENXIO;
|
||||
}
|
||||
|
||||
gic_ipi_domain = irq_domain_add_hierarchy(gic_irq_domain,
|
||||
IRQ_DOMAIN_FLAG_IPI_PER_CPU,
|
||||
GIC_NUM_LOCAL_INTRS + gic_shared_intrs,
|
||||
node, &gic_ipi_domain_ops, NULL);
|
||||
if (!gic_ipi_domain) {
|
||||
pr_err("Failed to add IPI domain");
|
||||
return -ENXIO;
|
||||
}
|
||||
|
||||
irq_domain_update_bus_token(gic_ipi_domain, DOMAIN_BUS_IPI);
|
||||
|
||||
if (node &&
|
||||
!of_property_read_u32_array(node, "mti,reserved-ipi-vectors", v, 2)) {
|
||||
bitmap_set(ipi_resrv, v[0], v[1]);
|
||||
} else {
|
||||
/*
|
||||
* Reserve 2 interrupts per possible CPU/VP for use as IPIs,
|
||||
* meeting the requirements of arch/mips SMP.
|
||||
*/
|
||||
num_ipis = 2 * num_possible_cpus();
|
||||
bitmap_set(ipi_resrv, gic_shared_intrs - num_ipis, num_ipis);
|
||||
}
|
||||
|
||||
bitmap_copy(ipi_available, ipi_resrv, GIC_MAX_INTRS);
|
||||
ret = gic_register_ipi_domain(node);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
board_bind_eic_interrupt = &gic_bind_eic_interrupt;
|
||||
|
||||
|
@ -108,37 +108,43 @@ static inline void plic_irq_toggle(const struct cpumask *mask,
|
||||
struct irq_data *d, int enable)
|
||||
{
|
||||
int cpu;
|
||||
struct plic_priv *priv = irq_data_get_irq_chip_data(d);
|
||||
|
||||
writel(enable, priv->regs + PRIORITY_BASE + d->hwirq * PRIORITY_PER_ID);
|
||||
for_each_cpu(cpu, mask) {
|
||||
struct plic_handler *handler = per_cpu_ptr(&plic_handlers, cpu);
|
||||
|
||||
if (handler->present &&
|
||||
cpumask_test_cpu(cpu, &handler->priv->lmask))
|
||||
plic_toggle(handler, d->hwirq, enable);
|
||||
plic_toggle(handler, d->hwirq, enable);
|
||||
}
|
||||
}
|
||||
|
||||
static void plic_irq_enable(struct irq_data *d)
|
||||
{
|
||||
plic_irq_toggle(irq_data_get_effective_affinity_mask(d), d, 1);
|
||||
}
|
||||
|
||||
static void plic_irq_disable(struct irq_data *d)
|
||||
{
|
||||
plic_irq_toggle(irq_data_get_effective_affinity_mask(d), d, 0);
|
||||
}
|
||||
|
||||
static void plic_irq_unmask(struct irq_data *d)
|
||||
{
|
||||
struct cpumask amask;
|
||||
unsigned int cpu;
|
||||
struct plic_priv *priv = irq_data_get_irq_chip_data(d);
|
||||
|
||||
cpumask_and(&amask, &priv->lmask, cpu_online_mask);
|
||||
cpu = cpumask_any_and(irq_data_get_affinity_mask(d),
|
||||
&amask);
|
||||
if (WARN_ON_ONCE(cpu >= nr_cpu_ids))
|
||||
return;
|
||||
plic_irq_toggle(cpumask_of(cpu), d, 1);
|
||||
writel(1, priv->regs + PRIORITY_BASE + d->hwirq * PRIORITY_PER_ID);
|
||||
}
|
||||
|
||||
static void plic_irq_mask(struct irq_data *d)
|
||||
{
|
||||
struct plic_priv *priv = irq_data_get_irq_chip_data(d);
|
||||
|
||||
plic_irq_toggle(&priv->lmask, d, 0);
|
||||
writel(0, priv->regs + PRIORITY_BASE + d->hwirq * PRIORITY_PER_ID);
|
||||
}
|
||||
|
||||
static void plic_irq_eoi(struct irq_data *d)
|
||||
{
|
||||
struct plic_handler *handler = this_cpu_ptr(&plic_handlers);
|
||||
|
||||
writel(d->hwirq, handler->hart_base + CONTEXT_CLAIM);
|
||||
}
|
||||
|
||||
#ifdef CONFIG_SMP
|
||||
@ -159,30 +165,21 @@ static int plic_set_affinity(struct irq_data *d,
|
||||
if (cpu >= nr_cpu_ids)
|
||||
return -EINVAL;
|
||||
|
||||
plic_irq_toggle(&priv->lmask, d, 0);
|
||||
plic_irq_toggle(cpumask_of(cpu), d, !irqd_irq_masked(d));
|
||||
plic_irq_disable(d);
|
||||
|
||||
irq_data_update_effective_affinity(d, cpumask_of(cpu));
|
||||
|
||||
if (!irqd_irq_disabled(d))
|
||||
plic_irq_enable(d);
|
||||
|
||||
return IRQ_SET_MASK_OK_DONE;
|
||||
}
|
||||
#endif
|
||||
|
||||
static void plic_irq_eoi(struct irq_data *d)
|
||||
{
|
||||
struct plic_handler *handler = this_cpu_ptr(&plic_handlers);
|
||||
|
||||
if (irqd_irq_masked(d)) {
|
||||
plic_irq_unmask(d);
|
||||
writel(d->hwirq, handler->hart_base + CONTEXT_CLAIM);
|
||||
plic_irq_mask(d);
|
||||
} else {
|
||||
writel(d->hwirq, handler->hart_base + CONTEXT_CLAIM);
|
||||
}
|
||||
}
|
||||
|
||||
static struct irq_chip plic_edge_chip = {
|
||||
.name = "SiFive PLIC",
|
||||
.irq_enable = plic_irq_enable,
|
||||
.irq_disable = plic_irq_disable,
|
||||
.irq_ack = plic_irq_eoi,
|
||||
.irq_mask = plic_irq_mask,
|
||||
.irq_unmask = plic_irq_unmask,
|
||||
@ -190,10 +187,13 @@ static struct irq_chip plic_edge_chip = {
|
||||
.irq_set_affinity = plic_set_affinity,
|
||||
#endif
|
||||
.irq_set_type = plic_irq_set_type,
|
||||
.flags = IRQCHIP_AFFINITY_PRE_STARTUP,
|
||||
};
|
||||
|
||||
static struct irq_chip plic_chip = {
|
||||
.name = "SiFive PLIC",
|
||||
.irq_enable = plic_irq_enable,
|
||||
.irq_disable = plic_irq_disable,
|
||||
.irq_mask = plic_irq_mask,
|
||||
.irq_unmask = plic_irq_unmask,
|
||||
.irq_eoi = plic_irq_eoi,
|
||||
@ -201,6 +201,7 @@ static struct irq_chip plic_chip = {
|
||||
.irq_set_affinity = plic_set_affinity,
|
||||
#endif
|
||||
.irq_set_type = plic_irq_set_type,
|
||||
.flags = IRQCHIP_AFFINITY_PRE_STARTUP,
|
||||
};
|
||||
|
||||
static int plic_irq_set_type(struct irq_data *d, unsigned int type)
|
||||
@ -438,8 +439,11 @@ static int __init __plic_init(struct device_node *node,
|
||||
i * CONTEXT_ENABLE_SIZE;
|
||||
handler->priv = priv;
|
||||
done:
|
||||
for (hwirq = 1; hwirq <= nr_irqs; hwirq++)
|
||||
for (hwirq = 1; hwirq <= nr_irqs; hwirq++) {
|
||||
plic_toggle(handler, hwirq, 0);
|
||||
writel(1, priv->regs + PRIORITY_BASE +
|
||||
hwirq * PRIORITY_PER_ID);
|
||||
}
|
||||
nr_handlers++;
|
||||
}
|
||||
|
||||
|
@ -677,7 +677,7 @@ static int iosapic_set_affinity_irq(struct irq_data *d,
|
||||
if (dest_cpu < 0)
|
||||
return -1;
|
||||
|
||||
cpumask_copy(irq_data_get_affinity_mask(d), cpumask_of(dest_cpu));
|
||||
irq_data_update_affinity(d, cpumask_of(dest_cpu));
|
||||
vi->txn_addr = txn_affinity_addr(d->irq, dest_cpu);
|
||||
|
||||
spin_lock_irqsave(&iosapic_lock, flags);
|
||||
|
@ -642,7 +642,7 @@ static void hv_arch_irq_unmask(struct irq_data *data)
|
||||
struct hv_retarget_device_interrupt *params;
|
||||
struct tran_int_desc *int_desc;
|
||||
struct hv_pcibus_device *hbus;
|
||||
struct cpumask *dest;
|
||||
const struct cpumask *dest;
|
||||
cpumask_var_t tmp;
|
||||
struct pci_bus *pbus;
|
||||
struct pci_dev *pdev;
|
||||
@ -1613,7 +1613,7 @@ out:
|
||||
}
|
||||
|
||||
static u32 hv_compose_msi_req_v1(
|
||||
struct pci_create_interrupt *int_pkt, struct cpumask *affinity,
|
||||
struct pci_create_interrupt *int_pkt, const struct cpumask *affinity,
|
||||
u32 slot, u8 vector, u8 vector_count)
|
||||
{
|
||||
int_pkt->message_type.type = PCI_CREATE_INTERRUPT_MESSAGE;
|
||||
@ -1635,13 +1635,13 @@ static u32 hv_compose_msi_req_v1(
|
||||
* Create MSI w/ dummy vCPU set targeting just one vCPU, overwritten
|
||||
* by subsequent retarget in hv_irq_unmask().
|
||||
*/
|
||||
static int hv_compose_msi_req_get_cpu(struct cpumask *affinity)
|
||||
static int hv_compose_msi_req_get_cpu(const struct cpumask *affinity)
|
||||
{
|
||||
return cpumask_first_and(affinity, cpu_online_mask);
|
||||
}
|
||||
|
||||
static u32 hv_compose_msi_req_v2(
|
||||
struct pci_create_interrupt2 *int_pkt, struct cpumask *affinity,
|
||||
struct pci_create_interrupt2 *int_pkt, const struct cpumask *affinity,
|
||||
u32 slot, u8 vector, u8 vector_count)
|
||||
{
|
||||
int cpu;
|
||||
@ -1660,7 +1660,7 @@ static u32 hv_compose_msi_req_v2(
|
||||
}
|
||||
|
||||
static u32 hv_compose_msi_req_v3(
|
||||
struct pci_create_interrupt3 *int_pkt, struct cpumask *affinity,
|
||||
struct pci_create_interrupt3 *int_pkt, const struct cpumask *affinity,
|
||||
u32 slot, u32 vector, u8 vector_count)
|
||||
{
|
||||
int cpu;
|
||||
@ -1697,7 +1697,7 @@ static void hv_compose_msi_msg(struct irq_data *data, struct msi_msg *msg)
|
||||
struct hv_pci_dev *hpdev;
|
||||
struct pci_bus *pbus;
|
||||
struct pci_dev *pdev;
|
||||
struct cpumask *dest;
|
||||
const struct cpumask *dest;
|
||||
struct compose_comp_ctxt comp;
|
||||
struct tran_int_desc *int_desc;
|
||||
struct msi_desc *msi_desc;
|
||||
|
@ -72,7 +72,7 @@ static int intc_set_affinity(struct irq_data *data,
|
||||
if (!cpumask_intersects(cpumask, cpu_online_mask))
|
||||
return -1;
|
||||
|
||||
cpumask_copy(irq_data_get_affinity_mask(data), cpumask);
|
||||
irq_data_update_affinity(data, cpumask);
|
||||
|
||||
return IRQ_SET_MASK_OK_NOCOPY;
|
||||
}
|
||||
|
@ -528,9 +528,10 @@ static void bind_evtchn_to_cpu(evtchn_port_t evtchn, unsigned int cpu,
|
||||
BUG_ON(irq == -1);
|
||||
|
||||
if (IS_ENABLED(CONFIG_SMP) && force_affinity) {
|
||||
cpumask_copy(irq_get_affinity_mask(irq), cpumask_of(cpu));
|
||||
cpumask_copy(irq_get_effective_affinity_mask(irq),
|
||||
cpumask_of(cpu));
|
||||
struct irq_data *data = irq_get_irq_data(irq);
|
||||
|
||||
irq_data_update_affinity(data, cpumask_of(cpu));
|
||||
irq_data_update_effective_affinity(data, cpumask_of(cpu));
|
||||
}
|
||||
|
||||
xen_evtchn_port_bind_to_cpu(evtchn, cpu, info->cpu);
|
||||
|
@ -151,7 +151,9 @@ struct irq_common_data {
|
||||
#endif
|
||||
void *handler_data;
|
||||
struct msi_desc *msi_desc;
|
||||
#ifdef CONFIG_SMP
|
||||
cpumask_var_t affinity;
|
||||
#endif
|
||||
#ifdef CONFIG_GENERIC_IRQ_EFFECTIVE_AFF_MASK
|
||||
cpumask_var_t effective_affinity;
|
||||
#endif
|
||||
@ -879,21 +881,34 @@ static inline int irq_data_get_node(struct irq_data *d)
|
||||
return irq_common_data_get_node(d->common);
|
||||
}
|
||||
|
||||
static inline struct cpumask *irq_get_affinity_mask(int irq)
|
||||
static inline
|
||||
const struct cpumask *irq_data_get_affinity_mask(struct irq_data *d)
|
||||
{
|
||||
#ifdef CONFIG_SMP
|
||||
return d->common->affinity;
|
||||
#else
|
||||
return cpumask_of(0);
|
||||
#endif
|
||||
}
|
||||
|
||||
static inline void irq_data_update_affinity(struct irq_data *d,
|
||||
const struct cpumask *m)
|
||||
{
|
||||
#ifdef CONFIG_SMP
|
||||
cpumask_copy(d->common->affinity, m);
|
||||
#endif
|
||||
}
|
||||
|
||||
static inline const struct cpumask *irq_get_affinity_mask(int irq)
|
||||
{
|
||||
struct irq_data *d = irq_get_irq_data(irq);
|
||||
|
||||
return d ? d->common->affinity : NULL;
|
||||
}
|
||||
|
||||
static inline struct cpumask *irq_data_get_affinity_mask(struct irq_data *d)
|
||||
{
|
||||
return d->common->affinity;
|
||||
return d ? irq_data_get_affinity_mask(d) : NULL;
|
||||
}
|
||||
|
||||
#ifdef CONFIG_GENERIC_IRQ_EFFECTIVE_AFF_MASK
|
||||
static inline
|
||||
struct cpumask *irq_data_get_effective_affinity_mask(struct irq_data *d)
|
||||
const struct cpumask *irq_data_get_effective_affinity_mask(struct irq_data *d)
|
||||
{
|
||||
return d->common->effective_affinity;
|
||||
}
|
||||
@ -908,13 +923,14 @@ static inline void irq_data_update_effective_affinity(struct irq_data *d,
|
||||
{
|
||||
}
|
||||
static inline
|
||||
struct cpumask *irq_data_get_effective_affinity_mask(struct irq_data *d)
|
||||
const struct cpumask *irq_data_get_effective_affinity_mask(struct irq_data *d)
|
||||
{
|
||||
return d->common->affinity;
|
||||
return irq_data_get_affinity_mask(d);
|
||||
}
|
||||
#endif
|
||||
|
||||
static inline struct cpumask *irq_get_effective_affinity_mask(unsigned int irq)
|
||||
static inline
|
||||
const struct cpumask *irq_get_effective_affinity_mask(unsigned int irq)
|
||||
{
|
||||
struct irq_data *d = irq_get_irq_data(irq);
|
||||
|
||||
|
@ -24,6 +24,7 @@ config GENERIC_IRQ_SHOW_LEVEL
|
||||
|
||||
# Supports effective affinity mask
|
||||
config GENERIC_IRQ_EFFECTIVE_AFF_MASK
|
||||
depends on SMP
|
||||
bool
|
||||
|
||||
# Support for delayed migration from interrupt context
|
||||
@ -82,6 +83,7 @@ config IRQ_FASTEOI_HIERARCHY_HANDLERS
|
||||
# Generic IRQ IPI support
|
||||
config GENERIC_IRQ_IPI
|
||||
bool
|
||||
depends on SMP
|
||||
select IRQ_DOMAIN_HIERARCHY
|
||||
|
||||
# Generic MSI interrupt support
|
||||
|
@ -188,7 +188,8 @@ enum {
|
||||
|
||||
#ifdef CONFIG_SMP
|
||||
static int
|
||||
__irq_startup_managed(struct irq_desc *desc, struct cpumask *aff, bool force)
|
||||
__irq_startup_managed(struct irq_desc *desc, const struct cpumask *aff,
|
||||
bool force)
|
||||
{
|
||||
struct irq_data *d = irq_desc_get_irq_data(desc);
|
||||
|
||||
@ -224,7 +225,8 @@ __irq_startup_managed(struct irq_desc *desc, struct cpumask *aff, bool force)
|
||||
}
|
||||
#else
|
||||
static __always_inline int
|
||||
__irq_startup_managed(struct irq_desc *desc, struct cpumask *aff, bool force)
|
||||
__irq_startup_managed(struct irq_desc *desc, const struct cpumask *aff,
|
||||
bool force)
|
||||
{
|
||||
return IRQ_STARTUP_NORMAL;
|
||||
}
|
||||
@ -252,7 +254,7 @@ static int __irq_startup(struct irq_desc *desc)
|
||||
int irq_startup(struct irq_desc *desc, bool resend, bool force)
|
||||
{
|
||||
struct irq_data *d = irq_desc_get_irq_data(desc);
|
||||
struct cpumask *aff = irq_data_get_affinity_mask(d);
|
||||
const struct cpumask *aff = irq_data_get_affinity_mask(d);
|
||||
int ret = 0;
|
||||
|
||||
desc->depth = 0;
|
||||
|
@ -30,7 +30,7 @@ static void irq_debug_show_bits(struct seq_file *m, int ind, unsigned int state,
|
||||
static void irq_debug_show_masks(struct seq_file *m, struct irq_desc *desc)
|
||||
{
|
||||
struct irq_data *data = irq_desc_get_irq_data(desc);
|
||||
struct cpumask *msk;
|
||||
const struct cpumask *msk;
|
||||
|
||||
msk = irq_data_get_affinity_mask(data);
|
||||
seq_printf(m, "affinity: %*pbl\n", cpumask_pr_args(msk));
|
||||
|
@ -115,11 +115,11 @@ free_descs:
|
||||
int irq_destroy_ipi(unsigned int irq, const struct cpumask *dest)
|
||||
{
|
||||
struct irq_data *data = irq_get_irq_data(irq);
|
||||
struct cpumask *ipimask = data ? irq_data_get_affinity_mask(data) : NULL;
|
||||
const struct cpumask *ipimask;
|
||||
struct irq_domain *domain;
|
||||
unsigned int nr_irqs;
|
||||
|
||||
if (!irq || !data || !ipimask)
|
||||
if (!irq || !data)
|
||||
return -EINVAL;
|
||||
|
||||
domain = data->domain;
|
||||
@ -131,7 +131,8 @@ int irq_destroy_ipi(unsigned int irq, const struct cpumask *dest)
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
if (WARN_ON(!cpumask_subset(dest, ipimask)))
|
||||
ipimask = irq_data_get_affinity_mask(data);
|
||||
if (!ipimask || WARN_ON(!cpumask_subset(dest, ipimask)))
|
||||
/*
|
||||
* Must be destroying a subset of CPUs to which this IPI
|
||||
* was set up to target
|
||||
@ -162,12 +163,13 @@ int irq_destroy_ipi(unsigned int irq, const struct cpumask *dest)
|
||||
irq_hw_number_t ipi_get_hwirq(unsigned int irq, unsigned int cpu)
|
||||
{
|
||||
struct irq_data *data = irq_get_irq_data(irq);
|
||||
struct cpumask *ipimask = data ? irq_data_get_affinity_mask(data) : NULL;
|
||||
const struct cpumask *ipimask;
|
||||
|
||||
if (!data || !ipimask || cpu >= nr_cpu_ids)
|
||||
if (!data || cpu >= nr_cpu_ids)
|
||||
return INVALID_HWIRQ;
|
||||
|
||||
if (!cpumask_test_cpu(cpu, ipimask))
|
||||
ipimask = irq_data_get_affinity_mask(data);
|
||||
if (!ipimask || !cpumask_test_cpu(cpu, ipimask))
|
||||
return INVALID_HWIRQ;
|
||||
|
||||
/*
|
||||
@ -186,7 +188,7 @@ EXPORT_SYMBOL_GPL(ipi_get_hwirq);
|
||||
static int ipi_send_verify(struct irq_chip *chip, struct irq_data *data,
|
||||
const struct cpumask *dest, unsigned int cpu)
|
||||
{
|
||||
struct cpumask *ipimask = irq_data_get_affinity_mask(data);
|
||||
const struct cpumask *ipimask = irq_data_get_affinity_mask(data);
|
||||
|
||||
if (!chip || !ipimask)
|
||||
return -EINVAL;
|
||||
|
@ -205,16 +205,8 @@ static void irq_validate_effective_affinity(struct irq_data *data)
|
||||
pr_warn_once("irq_chip %s did not update eff. affinity mask of irq %u\n",
|
||||
chip->name, data->irq);
|
||||
}
|
||||
|
||||
static inline void irq_init_effective_affinity(struct irq_data *data,
|
||||
const struct cpumask *mask)
|
||||
{
|
||||
cpumask_copy(irq_data_get_effective_affinity_mask(data), mask);
|
||||
}
|
||||
#else
|
||||
static inline void irq_validate_effective_affinity(struct irq_data *data) { }
|
||||
static inline void irq_init_effective_affinity(struct irq_data *data,
|
||||
const struct cpumask *mask) { }
|
||||
#endif
|
||||
|
||||
int irq_do_set_affinity(struct irq_data *data, const struct cpumask *mask,
|
||||
@ -347,7 +339,7 @@ static bool irq_set_affinity_deactivated(struct irq_data *data,
|
||||
return false;
|
||||
|
||||
cpumask_copy(desc->irq_common_data.affinity, mask);
|
||||
irq_init_effective_affinity(data, mask);
|
||||
irq_data_update_effective_affinity(data, mask);
|
||||
irqd_set(data, IRQD_AFFINITY_SET);
|
||||
return true;
|
||||
}
|
||||
|
Loading…
Reference in New Issue
Block a user