Switch ARM/ARM64 over to the modern per device MSI domains:

This simplifies the handling of platform MSI and wire to MSI controllers
   and removes about 500 lines of legacy code.
 
   Aside of that it paves the way for ARM/ARM64 to utilize the dynamic
   allocation of PCI/MSI interrupts and to support the upcoming non
   standard IMS (Interrupt Message Store) mechanism on PCIe devices
 -----BEGIN PGP SIGNATURE-----
 
 iQJHBAABCgAxFiEEQp8+kY+LLUocC4bMphj1TA10mKEFAmaeheUTHHRnbHhAbGlu
 dXRyb25peC5kZQAKCRCmGPVMDXSYocX4D/wLYD+DQDpA3U1XS8jPNE4vKcmBBNX8
 Mj4qdHsY8fK+FhmtLsj8FL3iSTymPtgXzFupXGS+5iFG3LhbW8JWEbqjbowcJ1c8
 /4w8sKyyWdCSScrCTrH4A3RrLNDAX3DzSMqqi17sdETuwtN0RJiXgcm/CwRXETmn
 kVqB7ddalyAR0Z2N/ym1fkuwyBAdeu3cBxMy/BWR6GFae1dAGe8Kr8GsmmuzBTFi
 DQSmkh6kZntTn9y+K7juXF+1q8InolmHiOOUeoUJachSCyp6nu9W2+S2MVUiuOA2
 X1/Ei3eKvkBHFDd7phZnIrVecuNehAQEV6BRMKOYBiDG4lwD6vCbbr9/YF5vBGni
 tbZAetk9VBpIj0YRVAz7WkLC2JmVbw4znlrDwe8+xeLeDwRXl9f4Xc1Udr0qKgpd
 1bNE1zG1z45v5J3OtJLJ4MCYcUCsQgv1CkUlNEdz5+NhXHT+W+oKJor/0WYJ3Qwe
 iqTEJ9BA1/SzvngwIt/uoMZlEjBl/0/T1UEMJvP/7oEqjl/UAEWGlpKnID3hsDc2
 GcIEOJod6hWzyPyeJUI6RpCHy4ZG93WL7Ks+lvzfp381yoDL5/KlveDtSomyuzYF
 2xXHUAvw8MAYfJ/CFft/DYme8sBpn1cxAMWdctEiAn0qfS7X1RNZ/RhQ2OXxRw3q
 tNpc0jEen9m72A==
 =2adH
 -----END PGP SIGNATURE-----

Merge tag 'irq-msi-2024-07-22' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip

Pull MSI interrupt updates from Thomas Gleixner:
 "Switch ARM/ARM64 over to the modern per device MSI domains.

  This simplifies the handling of platform MSI and wire to MSI
  controllers and removes about 500 lines of legacy code.

  Aside of that it paves the way for ARM/ARM64 to utilize the dynamic
  allocation of PCI/MSI interrupts and to support the upcoming non
  standard IMS (Interrupt Message Store) mechanism on PCIe devices"

* tag 'irq-msi-2024-07-22' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip: (25 commits)
  irqchip/gic-v3-its: Correctly fish out the DID for platform MSI
  irqchip/gic-v3-its: Correctly honor the RID remapping
  genirq/msi: Move msi_device_data to core
  genirq/msi: Remove platform MSI leftovers
  irqchip/irq-mvebu-icu: Remove platform MSI leftovers
  irqchip/irq-mvebu-sei: Switch to MSI parent
  irqchip/mvebu-odmi: Switch to parent MSI
  irqchip/mvebu-gicp: Switch to MSI parent
  irqchip/irq-mvebu-icu: Prepare for real per device MSI
  irqchip/imx-mu-msi: Switch to MSI parent
  irqchip/gic-v2m: Switch to device MSI
  irqchip/gic_v3_mbi: Switch over to parent domain
  genirq/msi: Remove platform_msi_create_device_domain()
  irqchip/mbigen: Remove platform_msi_create_device_domain() fallback
  irqchip/gic-v3-its: Switch platform MSI to MSI parent
  irqchip/irq-msi-lib: Prepare for DOMAIN_BUS_WIRED_TO_MSI
  irqchip/mbigen: Prepare for real per device MSI
  irqchip/irq-msi-lib: Prepare for DEVICE MSI to replace platform MSI
  irqchip/gic-v3-its: Provide MSI parent for PCI/MSI[-X]
  irqchip/irq-msi-lib: Prepare for PCI MSI/MSIX
  ...
This commit is contained in:
Linus Torvalds 2024-07-22 14:02:19 -07:00
commit 66ebbdfdeb
22 changed files with 756 additions and 1313 deletions

View File

@ -4,346 +4,12 @@
*
* Copyright (C) 2015 ARM Limited, All Rights Reserved.
* Author: Marc Zyngier <marc.zyngier@arm.com>
* Copyright (C) 2022 Linutronix GmbH
*/
#include <linux/device.h>
#include <linux/idr.h>
#include <linux/irq.h>
#include <linux/irqdomain.h>
#include <linux/msi.h>
#include <linux/slab.h>
/* Begin of removal area. Once everything is converted over. Cleanup the includes too! */
#define DEV_ID_SHIFT 21
#define MAX_DEV_MSIS (1 << (32 - DEV_ID_SHIFT))
/*
* Internal data structure containing a (made up, but unique) devid
* and the callback to write the MSI message.
*/
struct platform_msi_priv_data {
struct device *dev;
void *host_data;
msi_alloc_info_t arg;
irq_write_msi_msg_t write_msg;
int devid;
};
/* The devid allocator */
static DEFINE_IDA(platform_msi_devid_ida);
#ifdef GENERIC_MSI_DOMAIN_OPS
/*
* Convert an msi_desc to a globaly unique identifier (per-device
* devid + msi_desc position in the msi_list).
*/
static irq_hw_number_t platform_msi_calc_hwirq(struct msi_desc *desc)
{
u32 devid = desc->dev->msi.data->platform_data->devid;
return (devid << (32 - DEV_ID_SHIFT)) | desc->msi_index;
}
static void platform_msi_set_desc(msi_alloc_info_t *arg, struct msi_desc *desc)
{
arg->desc = desc;
arg->hwirq = platform_msi_calc_hwirq(desc);
}
static int platform_msi_init(struct irq_domain *domain,
struct msi_domain_info *info,
unsigned int virq, irq_hw_number_t hwirq,
msi_alloc_info_t *arg)
{
return irq_domain_set_hwirq_and_chip(domain, virq, hwirq,
info->chip, info->chip_data);
}
static void platform_msi_set_proxy_dev(msi_alloc_info_t *arg)
{
arg->flags |= MSI_ALLOC_FLAGS_PROXY_DEVICE;
}
#else
#define platform_msi_set_desc NULL
#define platform_msi_init NULL
#define platform_msi_set_proxy_dev(x) do {} while(0)
#endif
static void platform_msi_update_dom_ops(struct msi_domain_info *info)
{
struct msi_domain_ops *ops = info->ops;
BUG_ON(!ops);
if (ops->msi_init == NULL)
ops->msi_init = platform_msi_init;
if (ops->set_desc == NULL)
ops->set_desc = platform_msi_set_desc;
}
static void platform_msi_write_msg(struct irq_data *data, struct msi_msg *msg)
{
struct msi_desc *desc = irq_data_get_msi_desc(data);
desc->dev->msi.data->platform_data->write_msg(desc, msg);
}
static void platform_msi_update_chip_ops(struct msi_domain_info *info)
{
struct irq_chip *chip = info->chip;
BUG_ON(!chip);
if (!chip->irq_mask)
chip->irq_mask = irq_chip_mask_parent;
if (!chip->irq_unmask)
chip->irq_unmask = irq_chip_unmask_parent;
if (!chip->irq_eoi)
chip->irq_eoi = irq_chip_eoi_parent;
if (!chip->irq_set_affinity)
chip->irq_set_affinity = msi_domain_set_affinity;
if (!chip->irq_write_msi_msg)
chip->irq_write_msi_msg = platform_msi_write_msg;
if (WARN_ON((info->flags & MSI_FLAG_LEVEL_CAPABLE) &&
!(chip->flags & IRQCHIP_SUPPORTS_LEVEL_MSI)))
info->flags &= ~MSI_FLAG_LEVEL_CAPABLE;
}
/**
* platform_msi_create_irq_domain - Create a platform MSI interrupt domain
* @fwnode: Optional fwnode of the interrupt controller
* @info: MSI domain info
* @parent: Parent irq domain
*
* Updates the domain and chip ops and creates a platform MSI
* interrupt domain.
*
* Returns:
* A domain pointer or NULL in case of failure.
*/
struct irq_domain *platform_msi_create_irq_domain(struct fwnode_handle *fwnode,
struct msi_domain_info *info,
struct irq_domain *parent)
{
struct irq_domain *domain;
if (info->flags & MSI_FLAG_USE_DEF_DOM_OPS)
platform_msi_update_dom_ops(info);
if (info->flags & MSI_FLAG_USE_DEF_CHIP_OPS)
platform_msi_update_chip_ops(info);
info->flags |= MSI_FLAG_DEV_SYSFS | MSI_FLAG_ALLOC_SIMPLE_MSI_DESCS |
MSI_FLAG_FREE_MSI_DESCS;
domain = msi_create_irq_domain(fwnode, info, parent);
if (domain)
irq_domain_update_bus_token(domain, DOMAIN_BUS_PLATFORM_MSI);
return domain;
}
EXPORT_SYMBOL_GPL(platform_msi_create_irq_domain);
static int platform_msi_alloc_priv_data(struct device *dev, unsigned int nvec,
irq_write_msi_msg_t write_msi_msg)
{
struct platform_msi_priv_data *datap;
int err;
/*
* Limit the number of interrupts to 2048 per device. Should we
* need to bump this up, DEV_ID_SHIFT should be adjusted
* accordingly (which would impact the max number of MSI
* capable devices).
*/
if (!dev->msi.domain || !write_msi_msg || !nvec || nvec > MAX_DEV_MSIS)
return -EINVAL;
if (dev->msi.domain->bus_token != DOMAIN_BUS_PLATFORM_MSI) {
dev_err(dev, "Incompatible msi_domain, giving up\n");
return -EINVAL;
}
err = msi_setup_device_data(dev);
if (err)
return err;
/* Already initialized? */
if (dev->msi.data->platform_data)
return -EBUSY;
datap = kzalloc(sizeof(*datap), GFP_KERNEL);
if (!datap)
return -ENOMEM;
datap->devid = ida_alloc_max(&platform_msi_devid_ida,
(1 << DEV_ID_SHIFT) - 1, GFP_KERNEL);
if (datap->devid < 0) {
err = datap->devid;
kfree(datap);
return err;
}
datap->write_msg = write_msi_msg;
datap->dev = dev;
dev->msi.data->platform_data = datap;
return 0;
}
static void platform_msi_free_priv_data(struct device *dev)
{
struct platform_msi_priv_data *data = dev->msi.data->platform_data;
dev->msi.data->platform_data = NULL;
ida_free(&platform_msi_devid_ida, data->devid);
kfree(data);
}
/**
* platform_msi_domain_alloc_irqs - Allocate MSI interrupts for @dev
* @dev: The device for which to allocate interrupts
* @nvec: The number of interrupts to allocate
* @write_msi_msg: Callback to write an interrupt message for @dev
*
* Returns:
* Zero for success, or an error code in case of failure
*/
static int platform_msi_domain_alloc_irqs(struct device *dev, unsigned int nvec,
irq_write_msi_msg_t write_msi_msg)
{
int err;
err = platform_msi_alloc_priv_data(dev, nvec, write_msi_msg);
if (err)
return err;
err = msi_domain_alloc_irqs_range(dev, MSI_DEFAULT_DOMAIN, 0, nvec - 1);
if (err)
platform_msi_free_priv_data(dev);
return err;
}
/**
* platform_msi_get_host_data - Query the private data associated with
* a platform-msi domain
* @domain: The platform-msi domain
*
* Return: The private data provided when calling
* platform_msi_create_device_domain().
*/
void *platform_msi_get_host_data(struct irq_domain *domain)
{
struct platform_msi_priv_data *data = domain->host_data;
return data->host_data;
}
static struct lock_class_key platform_device_msi_lock_class;
/**
* __platform_msi_create_device_domain - Create a platform-msi device domain
*
* @dev: The device generating the MSIs
* @nvec: The number of MSIs that need to be allocated
* @is_tree: flag to indicate tree hierarchy
* @write_msi_msg: Callback to write an interrupt message for @dev
* @ops: The hierarchy domain operations to use
* @host_data: Private data associated to this domain
*
* Return: An irqdomain for @nvec interrupts on success, NULL in case of error.
*
* This is for interrupt domains which stack on a platform-msi domain
* created by platform_msi_create_irq_domain(). @dev->msi.domain points to
* that platform-msi domain which is the parent for the new domain.
*/
struct irq_domain *
__platform_msi_create_device_domain(struct device *dev,
unsigned int nvec,
bool is_tree,
irq_write_msi_msg_t write_msi_msg,
const struct irq_domain_ops *ops,
void *host_data)
{
struct platform_msi_priv_data *data;
struct irq_domain *domain;
int err;
err = platform_msi_alloc_priv_data(dev, nvec, write_msi_msg);
if (err)
return NULL;
/*
* Use a separate lock class for the MSI descriptor mutex on
* platform MSI device domains because the descriptor mutex nests
* into the domain mutex. See alloc/free below.
*/
lockdep_set_class(&dev->msi.data->mutex, &platform_device_msi_lock_class);
data = dev->msi.data->platform_data;
data->host_data = host_data;
domain = irq_domain_create_hierarchy(dev->msi.domain, 0,
is_tree ? 0 : nvec,
dev->fwnode, ops, data);
if (!domain)
goto free_priv;
platform_msi_set_proxy_dev(&data->arg);
err = msi_domain_prepare_irqs(domain->parent, dev, nvec, &data->arg);
if (err)
goto free_domain;
return domain;
free_domain:
irq_domain_remove(domain);
free_priv:
platform_msi_free_priv_data(dev);
return NULL;
}
/**
* platform_msi_device_domain_free - Free interrupts associated with a platform-msi
* device domain
*
* @domain: The platform-msi device domain
* @virq: The base irq from which to perform the free operation
* @nr_irqs: How many interrupts to free from @virq
*/
void platform_msi_device_domain_free(struct irq_domain *domain, unsigned int virq,
unsigned int nr_irqs)
{
struct platform_msi_priv_data *data = domain->host_data;
msi_lock_descs(data->dev);
msi_domain_depopulate_descs(data->dev, virq, nr_irqs);
irq_domain_free_irqs_common(domain, virq, nr_irqs);
msi_free_msi_descs_range(data->dev, virq, virq + nr_irqs - 1);
msi_unlock_descs(data->dev);
}
/**
* platform_msi_device_domain_alloc - Allocate interrupts associated with
* a platform-msi device domain
*
* @domain: The platform-msi device domain
* @virq: The base irq from which to perform the allocate operation
* @nr_irqs: How many interrupts to allocate from @virq
*
* Return 0 on success, or an error code on failure. Must be called
* with irq_domain_mutex held (which can only be done as part of a
* top-level interrupt allocation).
*/
int platform_msi_device_domain_alloc(struct irq_domain *domain, unsigned int virq,
unsigned int nr_irqs)
{
struct platform_msi_priv_data *data = domain->host_data;
struct device *dev = data->dev;
return msi_domain_populate_irqs(domain->parent, dev, virq, nr_irqs, &data->arg);
}
/* End of removal area */
/* Real per device domain interfaces */
/*
* This indirection can go when platform_device_msi_init_and_alloc_irqs()
@ -357,7 +23,7 @@ static void platform_msi_write_msi_msg(struct irq_data *d, struct msi_msg *msg)
cb(irq_data_get_msi_desc(d), msg);
}
static void platform_msi_set_desc_byindex(msi_alloc_info_t *arg, struct msi_desc *desc)
static void platform_msi_set_desc(msi_alloc_info_t *arg, struct msi_desc *desc)
{
arg->desc = desc;
arg->hwirq = desc->msi_index;
@ -373,7 +39,7 @@ static const struct msi_domain_template platform_msi_template = {
},
.ops = {
.set_desc = platform_msi_set_desc_byindex,
.set_desc = platform_msi_set_desc,
},
.info = {
@ -408,10 +74,6 @@ int platform_device_msi_init_and_alloc_irqs(struct device *dev, unsigned int nve
if (!domain || !write_msi_msg)
return -EINVAL;
/* Migration support. Will go away once everything is converted */
if (!irq_domain_is_msi_parent(domain))
return platform_msi_domain_alloc_irqs(dev, nvec, write_msi_msg);
/*
* @write_msi_msg is stored in the resulting msi_domain_info::data.
* The underlying domain creation mechanism will assign that
@ -432,12 +94,6 @@ EXPORT_SYMBOL_GPL(platform_device_msi_init_and_alloc_irqs);
*/
void platform_device_msi_free_irqs_all(struct device *dev)
{
struct irq_domain *domain = dev->msi.domain;
msi_domain_free_irqs_all(dev, MSI_DEFAULT_DOMAIN);
/* Migration support. Will go away once everything is converted */
if (!irq_domain_is_msi_parent(domain))
platform_msi_free_priv_data(dev);
}
EXPORT_SYMBOL_GPL(platform_device_msi_free_irqs_all);

View File

@ -26,6 +26,7 @@ config ARM_GIC_V2M
bool
depends on PCI
select ARM_GIC
select IRQ_MSI_LIB
select PCI_MSI
config GIC_NON_BANKED
@ -41,6 +42,7 @@ config ARM_GIC_V3
config ARM_GIC_V3_ITS
bool
select GENERIC_MSI_IRQ
select IRQ_MSI_LIB
default ARM_GIC_V3
config ARM_GIC_V3_ITS_PCI
@ -74,6 +76,9 @@ config ARM_VIC_NR
The maximum number of VICs available in the system, for
power management.
config IRQ_MSI_LIB
bool
config ARMADA_370_XP_IRQ
bool
select GENERIC_IRQ_CHIP
@ -378,6 +383,7 @@ config MSCC_OCELOT_IRQ
select GENERIC_IRQ_CHIP
config MVEBU_GICP
select IRQ_MSI_LIB
bool
config MVEBU_ICU
@ -385,6 +391,7 @@ config MVEBU_ICU
config MVEBU_ODMI
bool
select IRQ_MSI_LIB
select GENERIC_MSI_IRQ
config MVEBU_PIC
@ -508,6 +515,7 @@ config IMX_MU_MSI
select IRQ_DOMAIN
select IRQ_DOMAIN_HIERARCHY
select GENERIC_MSI_IRQ
select IRQ_MSI_LIB
help
Provide a driver for the i.MX Messaging Unit block used as a
CPU-to-CPU MSI controller. This requires a specially crafted DT

View File

@ -29,10 +29,10 @@ obj-$(CONFIG_ARCH_SPEAR3XX) += spear-shirq.o
obj-$(CONFIG_ARM_GIC) += irq-gic.o irq-gic-common.o
obj-$(CONFIG_ARM_GIC_PM) += irq-gic-pm.o
obj-$(CONFIG_ARCH_REALVIEW) += irq-gic-realview.o
obj-$(CONFIG_IRQ_MSI_LIB) += irq-msi-lib.o
obj-$(CONFIG_ARM_GIC_V2M) += irq-gic-v2m.o
obj-$(CONFIG_ARM_GIC_V3) += irq-gic-v3.o irq-gic-v3-mbi.o irq-gic-common.o
obj-$(CONFIG_ARM_GIC_V3_ITS) += irq-gic-v3-its.o irq-gic-v3-its-platform-msi.o irq-gic-v4.o
obj-$(CONFIG_ARM_GIC_V3_ITS_PCI) += irq-gic-v3-its-pci-msi.o
obj-$(CONFIG_ARM_GIC_V3_ITS) += irq-gic-v3-its.o irq-gic-v4.o irq-gic-v3-its-msi-parent.o
obj-$(CONFIG_ARM_GIC_V3_ITS_FSL_MC) += irq-gic-v3-its-fsl-mc-msi.o
obj-$(CONFIG_PARTITION_PERCPU) += irq-partition-percpu.o
obj-$(CONFIG_HISILICON_IRQ_MBIGEN) += irq-mbigen.o

View File

@ -8,6 +8,7 @@
#include <linux/of.h>
#include <linux/irqdomain.h>
#include <linux/msi.h>
#include <linux/irqchip/arm-gic-common.h>
struct gic_quirk {
@ -28,6 +29,8 @@ void gic_enable_quirks(u32 iidr, const struct gic_quirk *quirks,
void gic_enable_of_quirks(const struct device_node *np,
const struct gic_quirk *quirks, void *data);
extern const struct msi_parent_ops gic_v3_its_msi_parent_ops;
#define RDIST_FLAGS_PROPBASE_NEEDS_FLUSHING (1 << 0)
#define RDIST_FLAGS_RD_TABLES_PREALLOCATED (1 << 1)
#define RDIST_FLAGS_FORCE_NON_SHAREABLE (1 << 2)

View File

@ -26,6 +26,8 @@
#include <linux/irqchip/arm-gic.h>
#include <linux/irqchip/arm-gic-common.h>
#include "irq-msi-lib.h"
/*
* MSI_TYPER:
* [31:26] Reserved
@ -72,31 +74,6 @@ struct v2m_data {
u32 flags; /* v2m flags for specific implementation */
};
static void gicv2m_mask_msi_irq(struct irq_data *d)
{
pci_msi_mask_irq(d);
irq_chip_mask_parent(d);
}
static void gicv2m_unmask_msi_irq(struct irq_data *d)
{
pci_msi_unmask_irq(d);
irq_chip_unmask_parent(d);
}
static struct irq_chip gicv2m_msi_irq_chip = {
.name = "MSI",
.irq_mask = gicv2m_mask_msi_irq,
.irq_unmask = gicv2m_unmask_msi_irq,
.irq_eoi = irq_chip_eoi_parent,
};
static struct msi_domain_info gicv2m_msi_domain_info = {
.flags = (MSI_FLAG_USE_DEF_DOM_OPS | MSI_FLAG_USE_DEF_CHIP_OPS |
MSI_FLAG_PCI_MSIX | MSI_FLAG_MULTI_PCI_MSI),
.chip = &gicv2m_msi_irq_chip,
};
static phys_addr_t gicv2m_get_msi_addr(struct v2m_data *v2m, int hwirq)
{
if (v2m->flags & GICV2M_GRAVITON_ADDRESS_ONLY)
@ -230,6 +207,7 @@ static void gicv2m_irq_domain_free(struct irq_domain *domain,
}
static const struct irq_domain_ops gicv2m_domain_ops = {
.select = msi_lib_irq_domain_select,
.alloc = gicv2m_irq_domain_alloc,
.free = gicv2m_irq_domain_free,
};
@ -250,19 +228,6 @@ static bool is_msi_spi_valid(u32 base, u32 num)
return true;
}
static struct irq_chip gicv2m_pmsi_irq_chip = {
.name = "pMSI",
};
static struct msi_domain_ops gicv2m_pmsi_ops = {
};
static struct msi_domain_info gicv2m_pmsi_domain_info = {
.flags = (MSI_FLAG_USE_DEF_DOM_OPS | MSI_FLAG_USE_DEF_CHIP_OPS),
.ops = &gicv2m_pmsi_ops,
.chip = &gicv2m_pmsi_irq_chip,
};
static void __init gicv2m_teardown(void)
{
struct v2m_data *v2m, *tmp;
@ -278,9 +243,27 @@ static void __init gicv2m_teardown(void)
}
}
#define GICV2M_MSI_FLAGS_REQUIRED (MSI_FLAG_USE_DEF_DOM_OPS | \
MSI_FLAG_USE_DEF_CHIP_OPS | \
MSI_FLAG_PCI_MSI_MASK_PARENT)
#define GICV2M_MSI_FLAGS_SUPPORTED (MSI_GENERIC_FLAGS_MASK | \
MSI_FLAG_PCI_MSIX | \
MSI_FLAG_MULTI_PCI_MSI)
static struct msi_parent_ops gicv2m_msi_parent_ops = {
.supported_flags = GICV2M_MSI_FLAGS_SUPPORTED,
.required_flags = GICV2M_MSI_FLAGS_REQUIRED,
.bus_select_token = DOMAIN_BUS_NEXUS,
.bus_select_mask = MATCH_PCI_MSI | MATCH_PLATFORM_MSI,
.prefix = "GICv2m-",
.init_dev_msi_info = msi_lib_init_dev_msi_info,
};
static __init int gicv2m_allocate_domains(struct irq_domain *parent)
{
struct irq_domain *inner_domain, *pci_domain, *plat_domain;
struct irq_domain *inner_domain;
struct v2m_data *v2m;
v2m = list_first_entry_or_null(&v2m_nodes, struct v2m_data, entry);
@ -295,22 +278,8 @@ static __init int gicv2m_allocate_domains(struct irq_domain *parent)
}
irq_domain_update_bus_token(inner_domain, DOMAIN_BUS_NEXUS);
pci_domain = pci_msi_create_irq_domain(v2m->fwnode,
&gicv2m_msi_domain_info,
inner_domain);
plat_domain = platform_msi_create_irq_domain(v2m->fwnode,
&gicv2m_pmsi_domain_info,
inner_domain);
if (!pci_domain || !plat_domain) {
pr_err("Failed to create MSI domains\n");
if (plat_domain)
irq_domain_remove(plat_domain);
if (pci_domain)
irq_domain_remove(pci_domain);
irq_domain_remove(inner_domain);
return -ENOMEM;
}
inner_domain->flags |= IRQ_DOMAIN_FLAG_MSI_PARENT;
inner_domain->msi_parent_ops = &gicv2m_msi_parent_ops;
return 0;
}
@ -511,7 +480,7 @@ acpi_parse_madt_msi(union acpi_subtable_headers *header,
pr_info("applying Amazon Graviton quirk\n");
res.end = res.start + SZ_8K - 1;
flags |= GICV2M_GRAVITON_ADDRESS_ONLY;
gicv2m_msi_domain_info.flags &= ~MSI_FLAG_MULTI_PCI_MSI;
gicv2m_msi_parent_ops.supported_flags &= ~MSI_FLAG_MULTI_PCI_MSI;
}
if (m->flags & ACPI_MADT_OVERRIDE_SPI_VALUES) {

View File

@ -0,0 +1,210 @@
// SPDX-License-Identifier: GPL-2.0-only
// Copyright (C) 2013-2015 ARM Limited, All Rights Reserved.
// Author: Marc Zyngier <marc.zyngier@arm.com>
// Copyright (C) 2022 Linutronix GmbH
// Copyright (C) 2022 Intel
#include <linux/acpi_iort.h>
#include <linux/pci.h>
#include "irq-gic-common.h"
#include "irq-msi-lib.h"
#define ITS_MSI_FLAGS_REQUIRED (MSI_FLAG_USE_DEF_DOM_OPS | \
MSI_FLAG_USE_DEF_CHIP_OPS | \
MSI_FLAG_PCI_MSI_MASK_PARENT)
#define ITS_MSI_FLAGS_SUPPORTED (MSI_GENERIC_FLAGS_MASK | \
MSI_FLAG_PCI_MSIX | \
MSI_FLAG_MULTI_PCI_MSI)
#ifdef CONFIG_PCI_MSI
static int its_pci_msi_vec_count(struct pci_dev *pdev, void *data)
{
int msi, msix, *count = data;
msi = max(pci_msi_vec_count(pdev), 0);
msix = max(pci_msix_vec_count(pdev), 0);
*count += max(msi, msix);
return 0;
}
static int its_get_pci_alias(struct pci_dev *pdev, u16 alias, void *data)
{
struct pci_dev **alias_dev = data;
*alias_dev = pdev;
return 0;
}
static int its_pci_msi_prepare(struct irq_domain *domain, struct device *dev,
int nvec, msi_alloc_info_t *info)
{
struct pci_dev *pdev, *alias_dev;
struct msi_domain_info *msi_info;
int alias_count = 0, minnvec = 1;
if (!dev_is_pci(dev))
return -EINVAL;
pdev = to_pci_dev(dev);
/*
* If pdev is downstream of any aliasing bridges, take an upper
* bound of how many other vectors could map to the same DevID.
* Also tell the ITS that the signalling will come from a proxy
* device, and that special allocation rules apply.
*/
pci_for_each_dma_alias(pdev, its_get_pci_alias, &alias_dev);
if (alias_dev != pdev) {
if (alias_dev->subordinate)
pci_walk_bus(alias_dev->subordinate,
its_pci_msi_vec_count, &alias_count);
info->flags |= MSI_ALLOC_FLAGS_PROXY_DEVICE;
}
/* ITS specific DeviceID, as the core ITS ignores dev. */
info->scratchpad[0].ul = pci_msi_domain_get_msi_rid(domain->parent, pdev);
/*
* @domain->msi_domain_info->hwsize contains the size of the
* MSI[-X] domain, but vector allocation happens one by one. This
* needs some thought when MSI comes into play as the size of MSI
* might be unknown at domain creation time and therefore set to
* MSI_MAX_INDEX.
*/
msi_info = msi_get_domain_info(domain);
if (msi_info->hwsize > nvec)
nvec = msi_info->hwsize;
/*
* Always allocate a power of 2, and special case device 0 for
* broken systems where the DevID is not wired (and all devices
* appear as DevID 0). For that reason, we generously allocate a
* minimum of 32 MSIs for DevID 0. If you want more because all
* your devices are aliasing to DevID 0, consider fixing your HW.
*/
nvec = max(nvec, alias_count);
if (!info->scratchpad[0].ul)
minnvec = 32;
nvec = max_t(int, minnvec, roundup_pow_of_two(nvec));
msi_info = msi_get_domain_info(domain->parent);
return msi_info->ops->msi_prepare(domain->parent, dev, nvec, info);
}
#else /* CONFIG_PCI_MSI */
#define its_pci_msi_prepare NULL
#endif /* !CONFIG_PCI_MSI */
static int of_pmsi_get_dev_id(struct irq_domain *domain, struct device *dev,
u32 *dev_id)
{
int ret, index = 0;
/* Suck the DeviceID out of the msi-parent property */
do {
struct of_phandle_args args;
ret = of_parse_phandle_with_args(dev->of_node,
"msi-parent", "#msi-cells",
index, &args);
if (args.np == irq_domain_get_of_node(domain)) {
if (WARN_ON(args.args_count != 1))
return -EINVAL;
*dev_id = args.args[0];
break;
}
index++;
} while (!ret);
return ret;
}
int __weak iort_pmsi_get_dev_id(struct device *dev, u32 *dev_id)
{
return -1;
}
static int its_pmsi_prepare(struct irq_domain *domain, struct device *dev,
int nvec, msi_alloc_info_t *info)
{
struct msi_domain_info *msi_info;
u32 dev_id;
int ret;
if (dev->of_node)
ret = of_pmsi_get_dev_id(domain->parent, dev, &dev_id);
else
ret = iort_pmsi_get_dev_id(dev, &dev_id);
if (ret)
return ret;
/* ITS specific DeviceID, as the core ITS ignores dev. */
info->scratchpad[0].ul = dev_id;
/*
* @domain->msi_domain_info->hwsize contains the size of the device
* domain, but vector allocation happens one by one.
*/
msi_info = msi_get_domain_info(domain);
if (msi_info->hwsize > nvec)
nvec = msi_info->hwsize;
/* Allocate at least 32 MSIs, and always as a power of 2 */
nvec = max_t(int, 32, roundup_pow_of_two(nvec));
msi_info = msi_get_domain_info(domain->parent);
return msi_info->ops->msi_prepare(domain->parent,
dev, nvec, info);
}
static bool its_init_dev_msi_info(struct device *dev, struct irq_domain *domain,
struct irq_domain *real_parent, struct msi_domain_info *info)
{
if (!msi_lib_init_dev_msi_info(dev, domain, real_parent, info))
return false;
switch(info->bus_token) {
case DOMAIN_BUS_PCI_DEVICE_MSI:
case DOMAIN_BUS_PCI_DEVICE_MSIX:
/*
* FIXME: This probably should be done after a (not yet
* existing) post domain creation callback once to make
* support for dynamic post-enable MSI-X allocations
* work without having to reevaluate the domain size
* over and over. It is known already at allocation
* time via info->hwsize.
*
* That should work perfectly fine for MSI/MSI-X but needs
* some thoughts for purely software managed MSI domains
* where the index space is only limited artificially via
* %MSI_MAX_INDEX.
*/
info->ops->msi_prepare = its_pci_msi_prepare;
break;
case DOMAIN_BUS_DEVICE_MSI:
case DOMAIN_BUS_WIRED_TO_MSI:
/*
* FIXME: See the above PCI prepare comment. The domain
* size is also known at domain creation time.
*/
info->ops->msi_prepare = its_pmsi_prepare;
break;
default:
/* Confused. How did the lib return true? */
WARN_ON_ONCE(1);
return false;
}
return true;
}
const struct msi_parent_ops gic_v3_its_msi_parent_ops = {
.supported_flags = ITS_MSI_FLAGS_SUPPORTED,
.required_flags = ITS_MSI_FLAGS_REQUIRED,
.bus_select_token = DOMAIN_BUS_NEXUS,
.bus_select_mask = MATCH_PCI_MSI | MATCH_PLATFORM_MSI,
.prefix = "ITS-",
.init_dev_msi_info = its_init_dev_msi_info,
};

View File

@ -1,202 +0,0 @@
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (C) 2013-2015 ARM Limited, All Rights Reserved.
* Author: Marc Zyngier <marc.zyngier@arm.com>
*/
#include <linux/acpi_iort.h>
#include <linux/pci.h>
#include <linux/msi.h>
#include <linux/of.h>
#include <linux/of_irq.h>
#include <linux/of_pci.h>
static void its_mask_msi_irq(struct irq_data *d)
{
pci_msi_mask_irq(d);
irq_chip_mask_parent(d);
}
static void its_unmask_msi_irq(struct irq_data *d)
{
pci_msi_unmask_irq(d);
irq_chip_unmask_parent(d);
}
static struct irq_chip its_msi_irq_chip = {
.name = "ITS-MSI",
.irq_unmask = its_unmask_msi_irq,
.irq_mask = its_mask_msi_irq,
.irq_eoi = irq_chip_eoi_parent,
};
static int its_pci_msi_vec_count(struct pci_dev *pdev, void *data)
{
int msi, msix, *count = data;
msi = max(pci_msi_vec_count(pdev), 0);
msix = max(pci_msix_vec_count(pdev), 0);
*count += max(msi, msix);
return 0;
}
static int its_get_pci_alias(struct pci_dev *pdev, u16 alias, void *data)
{
struct pci_dev **alias_dev = data;
*alias_dev = pdev;
return 0;
}
static int its_pci_msi_prepare(struct irq_domain *domain, struct device *dev,
int nvec, msi_alloc_info_t *info)
{
struct pci_dev *pdev, *alias_dev;
struct msi_domain_info *msi_info;
int alias_count = 0, minnvec = 1;
if (!dev_is_pci(dev))
return -EINVAL;
msi_info = msi_get_domain_info(domain->parent);
pdev = to_pci_dev(dev);
/*
* If pdev is downstream of any aliasing bridges, take an upper
* bound of how many other vectors could map to the same DevID.
* Also tell the ITS that the signalling will come from a proxy
* device, and that special allocation rules apply.
*/
pci_for_each_dma_alias(pdev, its_get_pci_alias, &alias_dev);
if (alias_dev != pdev) {
if (alias_dev->subordinate)
pci_walk_bus(alias_dev->subordinate,
its_pci_msi_vec_count, &alias_count);
info->flags |= MSI_ALLOC_FLAGS_PROXY_DEVICE;
}
/* ITS specific DeviceID, as the core ITS ignores dev. */
info->scratchpad[0].ul = pci_msi_domain_get_msi_rid(domain, pdev);
/*
* Always allocate a power of 2, and special case device 0 for
* broken systems where the DevID is not wired (and all devices
* appear as DevID 0). For that reason, we generously allocate a
* minimum of 32 MSIs for DevID 0. If you want more because all
* your devices are aliasing to DevID 0, consider fixing your HW.
*/
nvec = max(nvec, alias_count);
if (!info->scratchpad[0].ul)
minnvec = 32;
nvec = max_t(int, minnvec, roundup_pow_of_two(nvec));
return msi_info->ops->msi_prepare(domain->parent, dev, nvec, info);
}
static struct msi_domain_ops its_pci_msi_ops = {
.msi_prepare = its_pci_msi_prepare,
};
static struct msi_domain_info its_pci_msi_domain_info = {
.flags = (MSI_FLAG_USE_DEF_DOM_OPS | MSI_FLAG_USE_DEF_CHIP_OPS |
MSI_FLAG_MULTI_PCI_MSI | MSI_FLAG_PCI_MSIX),
.ops = &its_pci_msi_ops,
.chip = &its_msi_irq_chip,
};
static struct of_device_id its_device_id[] = {
{ .compatible = "arm,gic-v3-its", },
{},
};
static int __init its_pci_msi_init_one(struct fwnode_handle *handle,
const char *name)
{
struct irq_domain *parent;
parent = irq_find_matching_fwnode(handle, DOMAIN_BUS_NEXUS);
if (!parent || !msi_get_domain_info(parent)) {
pr_err("%s: Unable to locate ITS domain\n", name);
return -ENXIO;
}
if (!pci_msi_create_irq_domain(handle, &its_pci_msi_domain_info,
parent)) {
pr_err("%s: Unable to create PCI domain\n", name);
return -ENOMEM;
}
return 0;
}
static int __init its_pci_of_msi_init(void)
{
struct device_node *np;
for (np = of_find_matching_node(NULL, its_device_id); np;
np = of_find_matching_node(np, its_device_id)) {
if (!of_device_is_available(np))
continue;
if (!of_property_read_bool(np, "msi-controller"))
continue;
if (its_pci_msi_init_one(of_node_to_fwnode(np), np->full_name))
continue;
pr_info("PCI/MSI: %pOF domain created\n", np);
}
return 0;
}
#ifdef CONFIG_ACPI
static int __init
its_pci_msi_parse_madt(union acpi_subtable_headers *header,
const unsigned long end)
{
struct acpi_madt_generic_translator *its_entry;
struct fwnode_handle *dom_handle;
const char *node_name;
int err = -ENXIO;
its_entry = (struct acpi_madt_generic_translator *)header;
node_name = kasprintf(GFP_KERNEL, "ITS@0x%lx",
(long)its_entry->base_address);
dom_handle = iort_find_domain_token(its_entry->translation_id);
if (!dom_handle) {
pr_err("%s: Unable to locate ITS domain handle\n", node_name);
goto out;
}
err = its_pci_msi_init_one(dom_handle, node_name);
if (!err)
pr_info("PCI/MSI: %s domain created\n", node_name);
out:
kfree(node_name);
return err;
}
static int __init its_pci_acpi_msi_init(void)
{
acpi_table_parse_madt(ACPI_MADT_TYPE_GENERIC_TRANSLATOR,
its_pci_msi_parse_madt, 0);
return 0;
}
#else
static int __init its_pci_acpi_msi_init(void)
{
return 0;
}
#endif
static int __init its_pci_msi_init(void)
{
its_pci_of_msi_init();
its_pci_acpi_msi_init();
return 0;
}
early_initcall(its_pci_msi_init);

View File

@ -1,163 +0,0 @@
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (C) 2013-2015 ARM Limited, All Rights Reserved.
* Author: Marc Zyngier <marc.zyngier@arm.com>
*/
#include <linux/acpi_iort.h>
#include <linux/device.h>
#include <linux/msi.h>
#include <linux/of.h>
#include <linux/of_irq.h>
static struct irq_chip its_pmsi_irq_chip = {
.name = "ITS-pMSI",
};
static int of_pmsi_get_dev_id(struct irq_domain *domain, struct device *dev,
u32 *dev_id)
{
int ret, index = 0;
/* Suck the DeviceID out of the msi-parent property */
do {
struct of_phandle_args args;
ret = of_parse_phandle_with_args(dev->of_node,
"msi-parent", "#msi-cells",
index, &args);
if (args.np == irq_domain_get_of_node(domain)) {
if (WARN_ON(args.args_count != 1))
return -EINVAL;
*dev_id = args.args[0];
break;
}
index++;
} while (!ret);
return ret;
}
int __weak iort_pmsi_get_dev_id(struct device *dev, u32 *dev_id)
{
return -1;
}
static int its_pmsi_prepare(struct irq_domain *domain, struct device *dev,
int nvec, msi_alloc_info_t *info)
{
struct msi_domain_info *msi_info;
u32 dev_id;
int ret;
msi_info = msi_get_domain_info(domain->parent);
if (dev->of_node)
ret = of_pmsi_get_dev_id(domain, dev, &dev_id);
else
ret = iort_pmsi_get_dev_id(dev, &dev_id);
if (ret)
return ret;
/* ITS specific DeviceID, as the core ITS ignores dev. */
info->scratchpad[0].ul = dev_id;
/* Allocate at least 32 MSIs, and always as a power of 2 */
nvec = max_t(int, 32, roundup_pow_of_two(nvec));
return msi_info->ops->msi_prepare(domain->parent,
dev, nvec, info);
}
static struct msi_domain_ops its_pmsi_ops = {
.msi_prepare = its_pmsi_prepare,
};
static struct msi_domain_info its_pmsi_domain_info = {
.flags = (MSI_FLAG_USE_DEF_DOM_OPS | MSI_FLAG_USE_DEF_CHIP_OPS),
.ops = &its_pmsi_ops,
.chip = &its_pmsi_irq_chip,
};
static const struct of_device_id its_device_id[] = {
{ .compatible = "arm,gic-v3-its", },
{},
};
static int __init its_pmsi_init_one(struct fwnode_handle *fwnode,
const char *name)
{
struct irq_domain *parent;
parent = irq_find_matching_fwnode(fwnode, DOMAIN_BUS_NEXUS);
if (!parent || !msi_get_domain_info(parent)) {
pr_err("%s: unable to locate ITS domain\n", name);
return -ENXIO;
}
if (!platform_msi_create_irq_domain(fwnode, &its_pmsi_domain_info,
parent)) {
pr_err("%s: unable to create platform domain\n", name);
return -ENXIO;
}
pr_info("Platform MSI: %s domain created\n", name);
return 0;
}
#ifdef CONFIG_ACPI
static int __init
its_pmsi_parse_madt(union acpi_subtable_headers *header,
const unsigned long end)
{
struct acpi_madt_generic_translator *its_entry;
struct fwnode_handle *domain_handle;
const char *node_name;
int err = -ENXIO;
its_entry = (struct acpi_madt_generic_translator *)header;
node_name = kasprintf(GFP_KERNEL, "ITS@0x%lx",
(long)its_entry->base_address);
domain_handle = iort_find_domain_token(its_entry->translation_id);
if (!domain_handle) {
pr_err("%s: Unable to locate ITS domain handle\n", node_name);
goto out;
}
err = its_pmsi_init_one(domain_handle, node_name);
out:
kfree(node_name);
return err;
}
static void __init its_pmsi_acpi_init(void)
{
acpi_table_parse_madt(ACPI_MADT_TYPE_GENERIC_TRANSLATOR,
its_pmsi_parse_madt, 0);
}
#else
static inline void its_pmsi_acpi_init(void) { }
#endif
static void __init its_pmsi_of_init(void)
{
struct device_node *np;
for (np = of_find_matching_node(NULL, its_device_id); np;
np = of_find_matching_node(np, its_device_id)) {
if (!of_device_is_available(np))
continue;
if (!of_property_read_bool(np, "msi-controller"))
continue;
its_pmsi_init_one(of_node_to_fwnode(np), np->full_name);
}
}
static int __init its_pmsi_init(void)
{
its_pmsi_of_init();
its_pmsi_acpi_init();
return 0;
}
early_initcall(its_pmsi_init);

View File

@ -38,6 +38,7 @@
#include <asm/exception.h>
#include "irq-gic-common.h"
#include "irq-msi-lib.h"
#define ITS_FLAGS_CMDQ_NEEDS_FLUSHING (1ULL << 0)
#define ITS_FLAGS_WORKAROUND_CAVIUM_22375 (1ULL << 1)
@ -3682,6 +3683,7 @@ static void its_irq_domain_free(struct irq_domain *domain, unsigned int virq,
}
static const struct irq_domain_ops its_domain_ops = {
.select = msi_lib_irq_domain_select,
.alloc = its_irq_domain_alloc,
.free = its_irq_domain_free,
.activate = its_irq_domain_activate,
@ -4989,6 +4991,9 @@ static int its_init_domain(struct its_node *its)
irq_domain_update_bus_token(inner_domain, DOMAIN_BUS_NEXUS);
inner_domain->msi_parent_ops = &gic_v3_its_msi_parent_ops;
inner_domain->flags |= IRQ_DOMAIN_FLAG_MSI_PARENT;
return 0;
}

View File

@ -18,6 +18,8 @@
#include <linux/irqchip/arm-gic-v3.h>
#include "irq-msi-lib.h"
struct mbi_range {
u32 spi_start;
u32 nr_spis;
@ -138,6 +140,7 @@ static void mbi_irq_domain_free(struct irq_domain *domain,
}
static const struct irq_domain_ops mbi_domain_ops = {
.select = msi_lib_irq_domain_select,
.alloc = mbi_irq_domain_alloc,
.free = mbi_irq_domain_free,
};
@ -151,54 +154,6 @@ static void mbi_compose_msi_msg(struct irq_data *data, struct msi_msg *msg)
iommu_dma_compose_msi_msg(irq_data_get_msi_desc(data), msg);
}
#ifdef CONFIG_PCI_MSI
/* PCI-specific irqchip */
static void mbi_mask_msi_irq(struct irq_data *d)
{
pci_msi_mask_irq(d);
irq_chip_mask_parent(d);
}
static void mbi_unmask_msi_irq(struct irq_data *d)
{
pci_msi_unmask_irq(d);
irq_chip_unmask_parent(d);
}
static struct irq_chip mbi_msi_irq_chip = {
.name = "MSI",
.irq_mask = mbi_mask_msi_irq,
.irq_unmask = mbi_unmask_msi_irq,
.irq_eoi = irq_chip_eoi_parent,
.irq_compose_msi_msg = mbi_compose_msi_msg,
};
static struct msi_domain_info mbi_msi_domain_info = {
.flags = (MSI_FLAG_USE_DEF_DOM_OPS | MSI_FLAG_USE_DEF_CHIP_OPS |
MSI_FLAG_PCI_MSIX | MSI_FLAG_MULTI_PCI_MSI),
.chip = &mbi_msi_irq_chip,
};
static int mbi_allocate_pci_domain(struct irq_domain *nexus_domain,
struct irq_domain **pci_domain)
{
*pci_domain = pci_msi_create_irq_domain(nexus_domain->parent->fwnode,
&mbi_msi_domain_info,
nexus_domain);
if (!*pci_domain)
return -ENOMEM;
return 0;
}
#else
static int mbi_allocate_pci_domain(struct irq_domain *nexus_domain,
struct irq_domain **pci_domain)
{
*pci_domain = NULL;
return 0;
}
#endif
static void mbi_compose_mbi_msg(struct irq_data *data, struct msi_msg *msg)
{
mbi_compose_msi_msg(data, msg);
@ -210,28 +165,51 @@ static void mbi_compose_mbi_msg(struct irq_data *data, struct msi_msg *msg)
iommu_dma_compose_msi_msg(irq_data_get_msi_desc(data), &msg[1]);
}
/* Platform-MSI specific irqchip */
static struct irq_chip mbi_pmsi_irq_chip = {
.name = "pMSI",
.irq_set_type = irq_chip_set_type_parent,
.irq_compose_msi_msg = mbi_compose_mbi_msg,
.flags = IRQCHIP_SUPPORTS_LEVEL_MSI,
};
static struct msi_domain_ops mbi_pmsi_ops = {
};
static struct msi_domain_info mbi_pmsi_domain_info = {
.flags = (MSI_FLAG_USE_DEF_DOM_OPS | MSI_FLAG_USE_DEF_CHIP_OPS |
MSI_FLAG_LEVEL_CAPABLE),
.ops = &mbi_pmsi_ops,
.chip = &mbi_pmsi_irq_chip,
};
static int mbi_allocate_domains(struct irq_domain *parent)
static bool mbi_init_dev_msi_info(struct device *dev, struct irq_domain *domain,
struct irq_domain *real_parent, struct msi_domain_info *info)
{
struct irq_domain *nexus_domain, *pci_domain, *plat_domain;
int err;
if (!msi_lib_init_dev_msi_info(dev, domain, real_parent, info))
return false;
switch (info->bus_token) {
case DOMAIN_BUS_PCI_DEVICE_MSI:
case DOMAIN_BUS_PCI_DEVICE_MSIX:
info->chip->irq_compose_msi_msg = mbi_compose_msi_msg;
return true;
case DOMAIN_BUS_DEVICE_MSI:
info->chip->irq_compose_msi_msg = mbi_compose_mbi_msg;
info->chip->irq_set_type = irq_chip_set_type_parent;
info->chip->flags |= IRQCHIP_SUPPORTS_LEVEL_MSI;
info->flags |= MSI_FLAG_LEVEL_CAPABLE;
return true;
default:
WARN_ON_ONCE(1);
return false;
}
}
#define MBI_MSI_FLAGS_REQUIRED (MSI_FLAG_USE_DEF_DOM_OPS | \
MSI_FLAG_USE_DEF_CHIP_OPS | \
MSI_FLAG_PCI_MSI_MASK_PARENT)
#define MBI_MSI_FLAGS_SUPPORTED (MSI_GENERIC_FLAGS_MASK | \
MSI_FLAG_PCI_MSIX | \
MSI_FLAG_MULTI_PCI_MSI)
static const struct msi_parent_ops gic_v3_mbi_msi_parent_ops = {
.supported_flags = MBI_MSI_FLAGS_SUPPORTED,
.required_flags = MBI_MSI_FLAGS_REQUIRED,
.bus_select_token = DOMAIN_BUS_NEXUS,
.bus_select_mask = MATCH_PCI_MSI | MATCH_PLATFORM_MSI,
.prefix = "MBI-",
.init_dev_msi_info = mbi_init_dev_msi_info,
};
static int mbi_allocate_domain(struct irq_domain *parent)
{
struct irq_domain *nexus_domain;
nexus_domain = irq_domain_create_hierarchy(parent, 0, 0, parent->fwnode,
&mbi_domain_ops, NULL);
@ -239,22 +217,8 @@ static int mbi_allocate_domains(struct irq_domain *parent)
return -ENOMEM;
irq_domain_update_bus_token(nexus_domain, DOMAIN_BUS_NEXUS);
err = mbi_allocate_pci_domain(nexus_domain, &pci_domain);
plat_domain = platform_msi_create_irq_domain(parent->fwnode,
&mbi_pmsi_domain_info,
nexus_domain);
if (err || !plat_domain) {
if (plat_domain)
irq_domain_remove(plat_domain);
if (pci_domain)
irq_domain_remove(pci_domain);
irq_domain_remove(nexus_domain);
return -ENOMEM;
}
nexus_domain->flags |= IRQ_DOMAIN_FLAG_MSI_PARENT;
nexus_domain->msi_parent_ops = &gic_v3_mbi_msi_parent_ops;
return 0;
}
@ -317,7 +281,7 @@ int __init mbi_init(struct fwnode_handle *fwnode, struct irq_domain *parent)
pr_info("Using MBI frame %pa\n", &mbi_phys_base);
ret = mbi_allocate_domains(parent);
ret = mbi_allocate_domain(parent);
if (ret)
goto err_free_mbi;

View File

@ -24,6 +24,8 @@
#include <linux/pm_domain.h>
#include <linux/spinlock.h>
#include "irq-msi-lib.h"
#define IMX_MU_CHANS 4
enum imx_mu_xcr {
@ -114,20 +116,6 @@ static void imx_mu_msi_parent_ack_irq(struct irq_data *data)
imx_mu_read(msi_data, msi_data->cfg->xRR + data->hwirq * 4);
}
static struct irq_chip imx_mu_msi_irq_chip = {
.name = "MU-MSI",
.irq_ack = irq_chip_ack_parent,
};
static struct msi_domain_ops imx_mu_msi_irq_ops = {
};
static struct msi_domain_info imx_mu_msi_domain_info = {
.flags = (MSI_FLAG_USE_DEF_DOM_OPS | MSI_FLAG_USE_DEF_CHIP_OPS),
.ops = &imx_mu_msi_irq_ops,
.chip = &imx_mu_msi_irq_chip,
};
static void imx_mu_msi_parent_compose_msg(struct irq_data *data,
struct msi_msg *msg)
{
@ -195,6 +183,7 @@ static void imx_mu_msi_domain_irq_free(struct irq_domain *domain,
}
static const struct irq_domain_ops imx_mu_msi_domain_ops = {
.select = msi_lib_irq_domain_select,
.alloc = imx_mu_msi_domain_irq_alloc,
.free = imx_mu_msi_domain_irq_free,
};
@ -216,35 +205,38 @@ static void imx_mu_msi_irq_handler(struct irq_desc *desc)
chained_irq_exit(chip, desc);
}
#define IMX_MU_MSI_FLAGS_REQUIRED (MSI_FLAG_USE_DEF_DOM_OPS | \
MSI_FLAG_USE_DEF_CHIP_OPS | \
MSI_FLAG_PARENT_PM_DEV)
#define IMX_MU_MSI_FLAGS_SUPPORTED (MSI_GENERIC_FLAGS_MASK)
static const struct msi_parent_ops imx_mu_msi_parent_ops = {
.supported_flags = IMX_MU_MSI_FLAGS_SUPPORTED,
.required_flags = IMX_MU_MSI_FLAGS_REQUIRED,
.bus_select_token = DOMAIN_BUS_NEXUS,
.bus_select_mask = MATCH_PLATFORM_MSI,
.prefix = "MU-MSI-",
.init_dev_msi_info = msi_lib_init_dev_msi_info,
};
static int imx_mu_msi_domains_init(struct imx_mu_msi *msi_data, struct device *dev)
{
struct fwnode_handle *fwnodes = dev_fwnode(dev);
struct irq_domain *parent;
/* Initialize MSI domain parent */
parent = irq_domain_create_linear(fwnodes,
IMX_MU_CHANS,
&imx_mu_msi_domain_ops,
msi_data);
parent = irq_domain_create_linear(fwnodes, IMX_MU_CHANS,
&imx_mu_msi_domain_ops, msi_data);
if (!parent) {
dev_err(dev, "failed to create IRQ domain\n");
return -ENOMEM;
}
irq_domain_update_bus_token(parent, DOMAIN_BUS_NEXUS);
msi_data->msi_domain = platform_msi_create_irq_domain(fwnodes,
&imx_mu_msi_domain_info,
parent);
if (!msi_data->msi_domain) {
dev_err(dev, "failed to create MSI domain\n");
irq_domain_remove(parent);
return -ENOMEM;
}
irq_domain_set_pm_device(msi_data->msi_domain, dev);
parent->dev = parent->pm_dev = dev;
parent->flags |= IRQ_DOMAIN_FLAG_MSI_PARENT;
parent->msi_parent_ops = &imx_mu_msi_parent_ops;
return 0;
}

View File

@ -135,24 +135,14 @@ static int mbigen_set_type(struct irq_data *data, unsigned int type)
return 0;
}
static struct irq_chip mbigen_irq_chip = {
.name = "mbigen-v2",
.irq_mask = irq_chip_mask_parent,
.irq_unmask = irq_chip_unmask_parent,
.irq_eoi = mbigen_eoi_irq,
.irq_set_type = mbigen_set_type,
.irq_set_affinity = irq_chip_set_affinity_parent,
};
static void mbigen_write_msg(struct msi_desc *desc, struct msi_msg *msg)
static void mbigen_write_msi_msg(struct irq_data *d, struct msi_msg *msg)
{
struct irq_data *d = irq_get_irq_data(desc->irq);
void __iomem *base = d->chip_data;
u32 val;
if (!msg->address_lo && !msg->address_hi)
return;
base += get_mbigen_vec_reg(d->hwirq);
val = readl_relaxed(base);
@ -165,10 +155,8 @@ static void mbigen_write_msg(struct msi_desc *desc, struct msi_msg *msg)
writel_relaxed(val, base);
}
static int mbigen_domain_translate(struct irq_domain *d,
struct irq_fwspec *fwspec,
unsigned long *hwirq,
unsigned int *type)
static int mbigen_domain_translate(struct irq_domain *d, struct irq_fwspec *fwspec,
unsigned long *hwirq, unsigned int *type)
{
if (is_of_node(fwspec->fwnode) || is_acpi_device_node(fwspec->fwnode)) {
if (fwspec->param_count != 2)
@ -192,51 +180,48 @@ static int mbigen_domain_translate(struct irq_domain *d,
return -EINVAL;
}
static int mbigen_irq_domain_alloc(struct irq_domain *domain,
unsigned int virq,
unsigned int nr_irqs,
void *args)
static void mbigen_domain_set_desc(msi_alloc_info_t *arg, struct msi_desc *desc)
{
struct irq_fwspec *fwspec = args;
irq_hw_number_t hwirq;
unsigned int type;
struct mbigen_device *mgn_chip;
int i, err;
err = mbigen_domain_translate(domain, fwspec, &hwirq, &type);
if (err)
return err;
err = platform_msi_device_domain_alloc(domain, virq, nr_irqs);
if (err)
return err;
mgn_chip = platform_msi_get_host_data(domain);
for (i = 0; i < nr_irqs; i++)
irq_domain_set_hwirq_and_chip(domain, virq + i, hwirq + i,
&mbigen_irq_chip, mgn_chip->base);
return 0;
arg->desc = desc;
arg->hwirq = (u32)desc->data.icookie.value;
}
static void mbigen_irq_domain_free(struct irq_domain *domain, unsigned int virq,
unsigned int nr_irqs)
{
platform_msi_device_domain_free(domain, virq, nr_irqs);
}
static const struct msi_domain_template mbigen_msi_template = {
.chip = {
.name = "mbigen-v2",
.irq_mask = irq_chip_mask_parent,
.irq_unmask = irq_chip_unmask_parent,
.irq_eoi = mbigen_eoi_irq,
.irq_set_type = mbigen_set_type,
.irq_write_msi_msg = mbigen_write_msi_msg,
},
static const struct irq_domain_ops mbigen_domain_ops = {
.translate = mbigen_domain_translate,
.alloc = mbigen_irq_domain_alloc,
.free = mbigen_irq_domain_free,
.ops = {
.set_desc = mbigen_domain_set_desc,
.msi_translate = mbigen_domain_translate,
},
.info = {
.bus_token = DOMAIN_BUS_WIRED_TO_MSI,
.flags = MSI_FLAG_USE_DEV_FWNODE,
},
};
static bool mbigen_create_device_domain(struct device *dev, unsigned int size,
struct mbigen_device *mgn_chip)
{
if (WARN_ON_ONCE(!dev->msi.domain))
return false;
return msi_create_device_irq_domain(dev, MSI_DEFAULT_DOMAIN,
&mbigen_msi_template, size,
NULL, mgn_chip->base);
}
static int mbigen_of_create_domain(struct platform_device *pdev,
struct mbigen_device *mgn_chip)
{
struct platform_device *child;
struct irq_domain *domain;
struct device_node *np;
u32 num_pins;
int ret = 0;
@ -258,11 +243,7 @@ static int mbigen_of_create_domain(struct platform_device *pdev,
break;
}
domain = platform_msi_create_device_domain(&child->dev, num_pins,
mbigen_write_msg,
&mbigen_domain_ops,
mgn_chip);
if (!domain) {
if (!mbigen_create_device_domain(&child->dev, num_pins, mgn_chip)) {
ret = -ENOMEM;
break;
}
@ -284,7 +265,6 @@ MODULE_DEVICE_TABLE(acpi, mbigen_acpi_match);
static int mbigen_acpi_create_domain(struct platform_device *pdev,
struct mbigen_device *mgn_chip)
{
struct irq_domain *domain;
u32 num_pins = 0;
int ret;
@ -315,11 +295,7 @@ static int mbigen_acpi_create_domain(struct platform_device *pdev,
if (ret || num_pins == 0)
return -EINVAL;
domain = platform_msi_create_device_domain(&pdev->dev, num_pins,
mbigen_write_msg,
&mbigen_domain_ops,
mgn_chip);
if (!domain)
if (!mbigen_create_device_domain(&pdev->dev, num_pins, mgn_chip))
return -ENOMEM;
return 0;

View File

@ -0,0 +1,140 @@
// SPDX-License-Identifier: GPL-2.0-only
// Copyright (C) 2022 Linutronix GmbH
// Copyright (C) 2022 Intel
#include <linux/export.h>
#include "irq-msi-lib.h"
/**
* msi_lib_init_dev_msi_info - Domain info setup for MSI domains
* @dev: The device for which the domain is created for
* @domain: The domain providing this callback
* @real_parent: The real parent domain of the domain to be initialized
* which might be a domain built on top of @domain or
* @domain itself
* @info: The domain info for the domain to be initialize
*
* This function is to be used for all types of MSI domains above the root
* parent domain and any intermediates. The topmost parent domain specific
* functionality is determined via @real_parent.
*
* All intermediate domains between the root and the device domain must
* have either msi_parent_ops.init_dev_msi_info = msi_parent_init_dev_msi_info
* or invoke it down the line.
*/
bool msi_lib_init_dev_msi_info(struct device *dev, struct irq_domain *domain,
struct irq_domain *real_parent,
struct msi_domain_info *info)
{
const struct msi_parent_ops *pops = real_parent->msi_parent_ops;
u32 required_flags;
/* Parent ops available? */
if (WARN_ON_ONCE(!pops))
return false;
/*
* MSI parent domain specific settings. For now there is only the
* root parent domain, e.g. NEXUS, acting as a MSI parent, but it is
* possible to stack MSI parents. See x86 vector -> irq remapping
*/
if (domain->bus_token == pops->bus_select_token) {
if (WARN_ON_ONCE(domain != real_parent))
return false;
} else {
WARN_ON_ONCE(1);
return false;
}
required_flags = pops->required_flags;
/* Is the target domain bus token supported? */
switch(info->bus_token) {
case DOMAIN_BUS_PCI_DEVICE_MSI:
case DOMAIN_BUS_PCI_DEVICE_MSIX:
if (WARN_ON_ONCE(!IS_ENABLED(CONFIG_PCI_MSI)))
return false;
break;
case DOMAIN_BUS_DEVICE_MSI:
/*
* Per device MSI should never have any MSI feature bits
* set. It's sole purpose is to create a dumb interrupt
* chip which has a device specific irq_write_msi_msg()
* callback.
*/
if (WARN_ON_ONCE(info->flags))
return false;
/* Core managed MSI descriptors */
info->flags = MSI_FLAG_ALLOC_SIMPLE_MSI_DESCS | MSI_FLAG_FREE_MSI_DESCS;
fallthrough;
case DOMAIN_BUS_WIRED_TO_MSI:
/* Remove PCI specific flags */
required_flags &= ~MSI_FLAG_PCI_MSI_MASK_PARENT;
break;
default:
/*
* This should never be reached. See
* msi_lib_irq_domain_select()
*/
WARN_ON_ONCE(1);
return false;
}
/*
* Mask out the domain specific MSI feature flags which are not
* supported by the real parent.
*/
info->flags &= pops->supported_flags;
/* Enforce the required flags */
info->flags |= required_flags;
/* Chip updates for all child bus types */
if (!info->chip->irq_eoi)
info->chip->irq_eoi = irq_chip_eoi_parent;
if (!info->chip->irq_ack)
info->chip->irq_ack = irq_chip_ack_parent;
/*
* The device MSI domain can never have a set affinity callback. It
* always has to rely on the parent domain to handle affinity
* settings. The device MSI domain just has to write the resulting
* MSI message into the hardware which is the whole purpose of the
* device MSI domain aside of mask/unmask which is provided e.g. by
* PCI/MSI device domains.
*/
info->chip->irq_set_affinity = msi_domain_set_affinity;
return true;
}
EXPORT_SYMBOL_GPL(msi_lib_init_dev_msi_info);
/**
* msi_lib_irq_domain_select - Shared select function for NEXUS domains
* @d: Pointer to the irq domain on which select is invoked
* @fwspec: Firmware spec describing what is searched
* @bus_token: The bus token for which a matching irq domain is looked up
*
* Returns: %0 if @d is not what is being looked for
*
* %1 if @d is either the domain which is directly searched for or
* if @d is providing the parent MSI domain for the functionality
* requested with @bus_token.
*/
int msi_lib_irq_domain_select(struct irq_domain *d, struct irq_fwspec *fwspec,
enum irq_domain_bus_token bus_token)
{
const struct msi_parent_ops *ops = d->msi_parent_ops;
u32 busmask = BIT(bus_token);
if (fwspec->fwnode != d->fwnode || fwspec->param_count != 0)
return 0;
/* Handle pure domain searches */
if (bus_token == ops->bus_select_token)
return 1;
return ops && !!(ops->bus_select_mask & busmask);
}
EXPORT_SYMBOL_GPL(msi_lib_irq_domain_select);

View File

@ -0,0 +1,27 @@
// SPDX-License-Identifier: GPL-2.0-only
// Copyright (C) 2022 Linutronix GmbH
// Copyright (C) 2022 Intel
#ifndef _DRIVERS_IRQCHIP_IRQ_MSI_LIB_H
#define _DRIVERS_IRQCHIP_IRQ_MSI_LIB_H
#include <linux/bits.h>
#include <linux/irqdomain.h>
#include <linux/msi.h>
#ifdef CONFIG_PCI_MSI
#define MATCH_PCI_MSI BIT(DOMAIN_BUS_PCI_MSI)
#else
#define MATCH_PCI_MSI (0)
#endif
#define MATCH_PLATFORM_MSI BIT(DOMAIN_BUS_PLATFORM_MSI)
int msi_lib_irq_domain_select(struct irq_domain *d, struct irq_fwspec *fwspec,
enum irq_domain_bus_token bus_token);
bool msi_lib_init_dev_msi_info(struct device *dev, struct irq_domain *domain,
struct irq_domain *real_parent,
struct msi_domain_info *info);
#endif /* _DRIVERS_IRQCHIP_IRQ_MSI_LIB_H */

View File

@ -17,6 +17,8 @@
#include <linux/of_platform.h>
#include <linux/platform_device.h>
#include "irq-msi-lib.h"
#include <dt-bindings/interrupt-controller/arm-gic.h>
#define GICP_SETSPI_NSR_OFFSET 0x0
@ -145,32 +147,32 @@ static void gicp_irq_domain_free(struct irq_domain *domain,
}
static const struct irq_domain_ops gicp_domain_ops = {
.select = msi_lib_irq_domain_select,
.alloc = gicp_irq_domain_alloc,
.free = gicp_irq_domain_free,
};
static struct irq_chip gicp_msi_irq_chip = {
.name = "GICP",
.irq_set_type = irq_chip_set_type_parent,
.flags = IRQCHIP_SUPPORTS_LEVEL_MSI,
};
#define GICP_MSI_FLAGS_REQUIRED (MSI_FLAG_USE_DEF_DOM_OPS | \
MSI_FLAG_USE_DEF_CHIP_OPS)
static struct msi_domain_ops gicp_msi_ops = {
};
#define GICP_MSI_FLAGS_SUPPORTED (MSI_GENERIC_FLAGS_MASK | \
MSI_FLAG_LEVEL_CAPABLE)
static struct msi_domain_info gicp_msi_domain_info = {
.flags = (MSI_FLAG_USE_DEF_DOM_OPS | MSI_FLAG_USE_DEF_CHIP_OPS |
MSI_FLAG_LEVEL_CAPABLE),
.ops = &gicp_msi_ops,
.chip = &gicp_msi_irq_chip,
static const struct msi_parent_ops gicp_msi_parent_ops = {
.supported_flags = GICP_MSI_FLAGS_SUPPORTED,
.required_flags = GICP_MSI_FLAGS_REQUIRED,
.bus_select_token = DOMAIN_BUS_GENERIC_MSI,
.bus_select_mask = MATCH_PLATFORM_MSI,
.prefix = "GICP-",
.init_dev_msi_info = msi_lib_init_dev_msi_info,
};
static int mvebu_gicp_probe(struct platform_device *pdev)
{
struct mvebu_gicp *gicp;
struct irq_domain *inner_domain, *plat_domain, *parent_domain;
struct irq_domain *inner_domain, *parent_domain;
struct device_node *node = pdev->dev.of_node;
struct device_node *irq_parent_dn;
struct mvebu_gicp *gicp;
int ret, i;
gicp = devm_kzalloc(&pdev->dev, sizeof(*gicp), GFP_KERNEL);
@ -234,17 +236,9 @@ static int mvebu_gicp_probe(struct platform_device *pdev)
if (!inner_domain)
return -ENOMEM;
plat_domain = platform_msi_create_irq_domain(of_node_to_fwnode(node),
&gicp_msi_domain_info,
inner_domain);
if (!plat_domain) {
irq_domain_remove(inner_domain);
return -ENOMEM;
}
platform_set_drvdata(pdev, gicp);
irq_domain_update_bus_token(inner_domain, DOMAIN_BUS_GENERIC_MSI);
inner_domain->flags |= IRQ_DOMAIN_FLAG_MSI_PARENT;
inner_domain->msi_parent_ops = &gicp_msi_parent_ops;
return 0;
}

View File

@ -20,6 +20,8 @@
#include <linux/of_platform.h>
#include <linux/platform_device.h>
#include "irq-msi-lib.h"
#include <dt-bindings/interrupt-controller/mvebu-icu.h>
/* ICU registers */
@ -60,99 +62,13 @@ struct mvebu_icu_msi_data {
const struct mvebu_icu_subset_data *subset_data;
};
struct mvebu_icu_irq_data {
struct mvebu_icu *icu;
unsigned int icu_group;
unsigned int type;
};
static DEFINE_STATIC_KEY_FALSE(legacy_bindings);
static void mvebu_icu_init(struct mvebu_icu *icu,
struct mvebu_icu_msi_data *msi_data,
struct msi_msg *msg)
{
const struct mvebu_icu_subset_data *subset = msi_data->subset_data;
if (atomic_cmpxchg(&msi_data->initialized, false, true))
return;
/* Set 'SET' ICU SPI message address in AP */
writel_relaxed(msg[0].address_hi, icu->base + subset->offset_set_ah);
writel_relaxed(msg[0].address_lo, icu->base + subset->offset_set_al);
if (subset->icu_group != ICU_GRP_NSR)
return;
/* Set 'CLEAR' ICU SPI message address in AP (level-MSI only) */
writel_relaxed(msg[1].address_hi, icu->base + subset->offset_clr_ah);
writel_relaxed(msg[1].address_lo, icu->base + subset->offset_clr_al);
}
static void mvebu_icu_write_msg(struct msi_desc *desc, struct msi_msg *msg)
{
struct irq_data *d = irq_get_irq_data(desc->irq);
struct mvebu_icu_msi_data *msi_data = platform_msi_get_host_data(d->domain);
struct mvebu_icu_irq_data *icu_irqd = d->chip_data;
struct mvebu_icu *icu = icu_irqd->icu;
unsigned int icu_int;
if (msg->address_lo || msg->address_hi) {
/* One off initialization per domain */
mvebu_icu_init(icu, msi_data, msg);
/* Configure the ICU with irq number & type */
icu_int = msg->data | ICU_INT_ENABLE;
if (icu_irqd->type & IRQ_TYPE_EDGE_RISING)
icu_int |= ICU_IS_EDGE;
icu_int |= icu_irqd->icu_group << ICU_GROUP_SHIFT;
} else {
/* De-configure the ICU */
icu_int = 0;
}
writel_relaxed(icu_int, icu->base + ICU_INT_CFG(d->hwirq));
/*
* The SATA unit has 2 ports, and a dedicated ICU entry per
* port. The ahci sata driver supports only one irq interrupt
* per SATA unit. To solve this conflict, we configure the 2
* SATA wired interrupts in the south bridge into 1 GIC
* interrupt in the north bridge. Even if only a single port
* is enabled, if sata node is enabled, both interrupts are
* configured (regardless of which port is actually in use).
*/
if (d->hwirq == ICU_SATA0_ICU_ID || d->hwirq == ICU_SATA1_ICU_ID) {
writel_relaxed(icu_int,
icu->base + ICU_INT_CFG(ICU_SATA0_ICU_ID));
writel_relaxed(icu_int,
icu->base + ICU_INT_CFG(ICU_SATA1_ICU_ID));
}
}
static struct irq_chip mvebu_icu_nsr_chip = {
.name = "ICU-NSR",
.irq_mask = irq_chip_mask_parent,
.irq_unmask = irq_chip_unmask_parent,
.irq_eoi = irq_chip_eoi_parent,
.irq_set_type = irq_chip_set_type_parent,
.irq_set_affinity = irq_chip_set_affinity_parent,
};
static struct irq_chip mvebu_icu_sei_chip = {
.name = "ICU-SEI",
.irq_ack = irq_chip_ack_parent,
.irq_mask = irq_chip_mask_parent,
.irq_unmask = irq_chip_unmask_parent,
.irq_set_type = irq_chip_set_type_parent,
.irq_set_affinity = irq_chip_set_affinity_parent,
};
static int
mvebu_icu_irq_domain_translate(struct irq_domain *d, struct irq_fwspec *fwspec,
static int mvebu_icu_translate(struct irq_domain *d, struct irq_fwspec *fwspec,
unsigned long *hwirq, unsigned int *type)
{
unsigned int param_count = static_branch_unlikely(&legacy_bindings) ? 3 : 2;
struct mvebu_icu_msi_data *msi_data = platform_msi_get_host_data(d);
struct mvebu_icu_msi_data *msi_data = d->host_data;
struct mvebu_icu *icu = msi_data->icu;
/* Check the count of the parameters in dt */
@ -192,81 +108,126 @@ mvebu_icu_irq_domain_translate(struct irq_domain *d, struct irq_fwspec *fwspec,
return 0;
}
static int
mvebu_icu_irq_domain_alloc(struct irq_domain *domain, unsigned int virq,
unsigned int nr_irqs, void *args)
static void mvebu_icu_init(struct mvebu_icu *icu,
struct mvebu_icu_msi_data *msi_data,
struct msi_msg *msg)
{
int err;
unsigned long hwirq;
struct irq_fwspec *fwspec = args;
struct mvebu_icu_msi_data *msi_data = platform_msi_get_host_data(domain);
const struct mvebu_icu_subset_data *subset = msi_data->subset_data;
if (atomic_cmpxchg(&msi_data->initialized, false, true))
return;
/* Set 'SET' ICU SPI message address in AP */
writel_relaxed(msg[0].address_hi, icu->base + subset->offset_set_ah);
writel_relaxed(msg[0].address_lo, icu->base + subset->offset_set_al);
if (subset->icu_group != ICU_GRP_NSR)
return;
/* Set 'CLEAR' ICU SPI message address in AP (level-MSI only) */
writel_relaxed(msg[1].address_hi, icu->base + subset->offset_clr_ah);
writel_relaxed(msg[1].address_lo, icu->base + subset->offset_clr_al);
}
static int mvebu_icu_msi_init(struct irq_domain *domain, struct msi_domain_info *info,
unsigned int virq, irq_hw_number_t hwirq, msi_alloc_info_t *arg)
{
irq_domain_set_hwirq_and_chip(domain, virq, hwirq, info->chip, info->chip_data);
return irq_set_irqchip_state(virq, IRQCHIP_STATE_PENDING, false);
}
static void mvebu_icu_set_desc(msi_alloc_info_t *arg, struct msi_desc *desc)
{
arg->desc = desc;
arg->hwirq = (u32)desc->data.icookie.value;
}
static void mvebu_icu_write_msi_msg(struct irq_data *d, struct msi_msg *msg)
{
struct mvebu_icu_msi_data *msi_data = d->chip_data;
unsigned int icu_group = msi_data->subset_data->icu_group;
struct msi_desc *desc = irq_data_get_msi_desc(d);
struct mvebu_icu *icu = msi_data->icu;
struct mvebu_icu_irq_data *icu_irqd;
struct irq_chip *chip = &mvebu_icu_nsr_chip;
unsigned int type;
u32 icu_int;
icu_irqd = kmalloc(sizeof(*icu_irqd), GFP_KERNEL);
if (!icu_irqd)
return -ENOMEM;
err = mvebu_icu_irq_domain_translate(domain, fwspec, &hwirq,
&icu_irqd->type);
if (err) {
dev_err(icu->dev, "failed to translate ICU parameters\n");
goto free_irqd;
if (msg->address_lo || msg->address_hi) {
/* One off initialization per domain */
mvebu_icu_init(icu, msi_data, msg);
/* Configure the ICU with irq number & type */
icu_int = msg->data | ICU_INT_ENABLE;
type = (unsigned int)(desc->data.icookie.value >> 32);
if (type & IRQ_TYPE_EDGE_RISING)
icu_int |= ICU_IS_EDGE;
icu_int |= icu_group << ICU_GROUP_SHIFT;
} else {
/* De-configure the ICU */
icu_int = 0;
}
if (static_branch_unlikely(&legacy_bindings))
icu_irqd->icu_group = fwspec->param[0];
else
icu_irqd->icu_group = msi_data->subset_data->icu_group;
icu_irqd->icu = icu;
writel_relaxed(icu_int, icu->base + ICU_INT_CFG(d->hwirq));
err = platform_msi_device_domain_alloc(domain, virq, nr_irqs);
if (err) {
dev_err(icu->dev, "failed to allocate ICU interrupt in parent domain\n");
goto free_irqd;
/*
* The SATA unit has 2 ports, and a dedicated ICU entry per
* port. The ahci sata driver supports only one irq interrupt
* per SATA unit. To solve this conflict, we configure the 2
* SATA wired interrupts in the south bridge into 1 GIC
* interrupt in the north bridge. Even if only a single port
* is enabled, if sata node is enabled, both interrupts are
* configured (regardless of which port is actually in use).
*/
if (d->hwirq == ICU_SATA0_ICU_ID || d->hwirq == ICU_SATA1_ICU_ID) {
writel_relaxed(icu_int, icu->base + ICU_INT_CFG(ICU_SATA0_ICU_ID));
writel_relaxed(icu_int, icu->base + ICU_INT_CFG(ICU_SATA1_ICU_ID));
}
/* Make sure there is no interrupt left pending by the firmware */
err = irq_set_irqchip_state(virq, IRQCHIP_STATE_PENDING, false);
if (err)
goto free_msi;
if (icu_irqd->icu_group == ICU_GRP_SEI)
chip = &mvebu_icu_sei_chip;
err = irq_domain_set_hwirq_and_chip(domain, virq, hwirq,
chip, icu_irqd);
if (err) {
dev_err(icu->dev, "failed to set the data to IRQ domain\n");
goto free_msi;
}
return 0;
free_msi:
platform_msi_device_domain_free(domain, virq, nr_irqs);
free_irqd:
kfree(icu_irqd);
return err;
}
static void
mvebu_icu_irq_domain_free(struct irq_domain *domain, unsigned int virq,
unsigned int nr_irqs)
{
struct irq_data *d = irq_get_irq_data(virq);
struct mvebu_icu_irq_data *icu_irqd = d->chip_data;
static const struct msi_domain_template mvebu_icu_nsr_msi_template = {
.chip = {
.name = "ICU-NSR",
.irq_mask = irq_chip_mask_parent,
.irq_unmask = irq_chip_unmask_parent,
.irq_eoi = irq_chip_eoi_parent,
.irq_set_type = irq_chip_set_type_parent,
.irq_write_msi_msg = mvebu_icu_write_msi_msg,
.flags = IRQCHIP_SUPPORTS_LEVEL_MSI,
},
kfree(icu_irqd);
.ops = {
.msi_translate = mvebu_icu_translate,
.msi_init = mvebu_icu_msi_init,
.set_desc = mvebu_icu_set_desc,
},
platform_msi_device_domain_free(domain, virq, nr_irqs);
}
.info = {
.bus_token = DOMAIN_BUS_WIRED_TO_MSI,
.flags = MSI_FLAG_LEVEL_CAPABLE |
MSI_FLAG_USE_DEV_FWNODE,
},
};
static const struct irq_domain_ops mvebu_icu_domain_ops = {
.translate = mvebu_icu_irq_domain_translate,
.alloc = mvebu_icu_irq_domain_alloc,
.free = mvebu_icu_irq_domain_free,
static const struct msi_domain_template mvebu_icu_sei_msi_template = {
.chip = {
.name = "ICU-SEI",
.irq_mask = irq_chip_mask_parent,
.irq_unmask = irq_chip_unmask_parent,
.irq_ack = irq_chip_ack_parent,
.irq_set_type = irq_chip_set_type_parent,
.irq_write_msi_msg = mvebu_icu_write_msi_msg,
.flags = IRQCHIP_SUPPORTS_LEVEL_MSI,
},
.ops = {
.msi_translate = mvebu_icu_translate,
.msi_init = mvebu_icu_msi_init,
.set_desc = mvebu_icu_set_desc,
},
.info = {
.bus_token = DOMAIN_BUS_WIRED_TO_MSI,
.flags = MSI_FLAG_LEVEL_CAPABLE |
MSI_FLAG_USE_DEV_FWNODE,
},
};
static const struct mvebu_icu_subset_data mvebu_icu_nsr_subset_data = {
@ -297,10 +258,10 @@ static const struct of_device_id mvebu_icu_subset_of_match[] = {
static int mvebu_icu_subset_probe(struct platform_device *pdev)
{
const struct msi_domain_template *tmpl;
struct mvebu_icu_msi_data *msi_data;
struct device_node *msi_parent_dn;
struct device *dev = &pdev->dev;
struct irq_domain *irq_domain;
bool sei;
msi_data = devm_kzalloc(dev, sizeof(*msi_data), GFP_KERNEL);
if (!msi_data)
@ -314,20 +275,18 @@ static int mvebu_icu_subset_probe(struct platform_device *pdev)
msi_data->subset_data = of_device_get_match_data(dev);
}
dev->msi.domain = of_msi_get_domain(dev, dev->of_node,
DOMAIN_BUS_PLATFORM_MSI);
dev->msi.domain = of_msi_get_domain(dev, dev->of_node, DOMAIN_BUS_PLATFORM_MSI);
if (!dev->msi.domain)
return -EPROBE_DEFER;
msi_parent_dn = irq_domain_get_of_node(dev->msi.domain);
if (!msi_parent_dn)
if (!irq_domain_get_of_node(dev->msi.domain))
return -ENODEV;
irq_domain = platform_msi_create_device_tree_domain(dev, ICU_MAX_IRQS,
mvebu_icu_write_msg,
&mvebu_icu_domain_ops,
msi_data);
if (!irq_domain) {
sei = msi_data->subset_data->icu_group == ICU_GRP_SEI;
tmpl = sei ? &mvebu_icu_sei_msi_template : &mvebu_icu_nsr_msi_template;
if (!msi_create_device_irq_domain(dev, MSI_DEFAULT_DOMAIN, tmpl,
ICU_MAX_IRQS, NULL, msi_data)) {
dev_err(dev, "Failed to create ICU MSI domain\n");
return -ENOMEM;
}

View File

@ -17,6 +17,9 @@
#include <linux/msi.h>
#include <linux/of_address.h>
#include <linux/slab.h>
#include "irq-msi-lib.h"
#include <dt-bindings/interrupt-controller/arm-gic.h>
#define GICP_ODMIN_SET 0x40
@ -141,27 +144,29 @@ static void odmi_irq_domain_free(struct irq_domain *domain,
}
static const struct irq_domain_ops odmi_domain_ops = {
.select = msi_lib_irq_domain_select,
.alloc = odmi_irq_domain_alloc,
.free = odmi_irq_domain_free,
};
static struct irq_chip odmi_msi_irq_chip = {
.name = "ODMI",
};
#define ODMI_MSI_FLAGS_REQUIRED (MSI_FLAG_USE_DEF_DOM_OPS | \
MSI_FLAG_USE_DEF_CHIP_OPS)
static struct msi_domain_ops odmi_msi_ops = {
};
#define ODMI_MSI_FLAGS_SUPPORTED (MSI_GENERIC_FLAGS_MASK)
static struct msi_domain_info odmi_msi_domain_info = {
.flags = (MSI_FLAG_USE_DEF_DOM_OPS | MSI_FLAG_USE_DEF_CHIP_OPS),
.ops = &odmi_msi_ops,
.chip = &odmi_msi_irq_chip,
static const struct msi_parent_ops odmi_msi_parent_ops = {
.supported_flags = ODMI_MSI_FLAGS_SUPPORTED,
.required_flags = ODMI_MSI_FLAGS_REQUIRED,
.bus_select_token = DOMAIN_BUS_GENERIC_MSI,
.bus_select_mask = MATCH_PLATFORM_MSI,
.prefix = "ODMI-",
.init_dev_msi_info = msi_lib_init_dev_msi_info,
};
static int __init mvebu_odmi_init(struct device_node *node,
struct device_node *parent)
{
struct irq_domain *parent_domain, *inner_domain, *plat_domain;
struct irq_domain *parent_domain, *inner_domain;
int ret, i;
if (of_property_read_u32(node, "marvell,odmi-frames", &odmis_count))
@ -208,18 +213,12 @@ static int __init mvebu_odmi_init(struct device_node *node,
goto err_unmap;
}
plat_domain = platform_msi_create_irq_domain(of_node_to_fwnode(node),
&odmi_msi_domain_info,
inner_domain);
if (!plat_domain) {
ret = -ENOMEM;
goto err_remove_inner;
}
irq_domain_update_bus_token(inner_domain, DOMAIN_BUS_GENERIC_MSI);
inner_domain->flags |= IRQ_DOMAIN_FLAG_MSI_PARENT;
inner_domain->msi_parent_ops = &odmi_msi_parent_ops;
return 0;
err_remove_inner:
irq_domain_remove(inner_domain);
err_unmap:
for (i = 0; i < odmis_count; i++) {
struct odmi_data *odmi = &odmis[i];

View File

@ -14,6 +14,8 @@
#include <linux/of_irq.h>
#include <linux/of_platform.h>
#include "irq-msi-lib.h"
/* Cause register */
#define GICP_SECR(idx) (0x0 + ((idx) * 0x4))
/* Mask register */
@ -190,6 +192,7 @@ static void mvebu_sei_domain_free(struct irq_domain *domain, unsigned int virq,
}
static const struct irq_domain_ops mvebu_sei_domain_ops = {
.select = msi_lib_irq_domain_select,
.alloc = mvebu_sei_domain_alloc,
.free = mvebu_sei_domain_free,
};
@ -307,21 +310,6 @@ static const struct irq_domain_ops mvebu_sei_cp_domain_ops = {
.free = mvebu_sei_cp_domain_free,
};
static struct irq_chip mvebu_sei_msi_irq_chip = {
.name = "SEI pMSI",
.irq_ack = irq_chip_ack_parent,
.irq_set_type = irq_chip_set_type_parent,
};
static struct msi_domain_ops mvebu_sei_msi_ops = {
};
static struct msi_domain_info mvebu_sei_msi_domain_info = {
.flags = MSI_FLAG_USE_DEF_DOM_OPS | MSI_FLAG_USE_DEF_CHIP_OPS,
.ops = &mvebu_sei_msi_ops,
.chip = &mvebu_sei_msi_irq_chip,
};
static void mvebu_sei_handle_cascade_irq(struct irq_desc *desc)
{
struct mvebu_sei *sei = irq_desc_get_handler_data(desc);
@ -360,10 +348,23 @@ static void mvebu_sei_reset(struct mvebu_sei *sei)
}
}
#define SEI_MSI_FLAGS_REQUIRED (MSI_FLAG_USE_DEF_DOM_OPS | \
MSI_FLAG_USE_DEF_CHIP_OPS)
#define SEI_MSI_FLAGS_SUPPORTED (MSI_GENERIC_FLAGS_MASK)
static const struct msi_parent_ops sei_msi_parent_ops = {
.supported_flags = SEI_MSI_FLAGS_SUPPORTED,
.required_flags = SEI_MSI_FLAGS_REQUIRED,
.bus_select_mask = MATCH_PLATFORM_MSI,
.bus_select_token = DOMAIN_BUS_GENERIC_MSI,
.prefix = "SEI-",
.init_dev_msi_info = msi_lib_init_dev_msi_info,
};
static int mvebu_sei_probe(struct platform_device *pdev)
{
struct device_node *node = pdev->dev.of_node;
struct irq_domain *plat_domain;
struct mvebu_sei *sei;
u32 parent_irq;
int ret;
@ -440,33 +441,20 @@ static int mvebu_sei_probe(struct platform_device *pdev)
}
irq_domain_update_bus_token(sei->cp_domain, DOMAIN_BUS_GENERIC_MSI);
plat_domain = platform_msi_create_irq_domain(of_node_to_fwnode(node),
&mvebu_sei_msi_domain_info,
sei->cp_domain);
if (!plat_domain) {
pr_err("Failed to create CPs MSI domain\n");
ret = -ENOMEM;
goto remove_cp_domain;
}
sei->cp_domain->flags |= IRQ_DOMAIN_FLAG_MSI_PARENT;
sei->cp_domain->msi_parent_ops = &sei_msi_parent_ops;
mvebu_sei_reset(sei);
irq_set_chained_handler_and_data(parent_irq,
mvebu_sei_handle_cascade_irq,
sei);
irq_set_chained_handler_and_data(parent_irq, mvebu_sei_handle_cascade_irq, sei);
return 0;
remove_cp_domain:
irq_domain_remove(sei->cp_domain);
remove_ap_domain:
irq_domain_remove(sei->ap_domain);
remove_sei_domain:
irq_domain_remove(sei->sei_domain);
dispose_irq:
irq_dispose_mapping(parent_irq);
return ret;
}

View File

@ -148,17 +148,35 @@ static void pci_device_domain_set_desc(msi_alloc_info_t *arg, struct msi_desc *d
arg->hwirq = desc->msi_index;
}
static __always_inline void cond_mask_parent(struct irq_data *data)
{
struct msi_domain_info *info = data->domain->host_data;
if (unlikely(info->flags & MSI_FLAG_PCI_MSI_MASK_PARENT))
irq_chip_mask_parent(data);
}
static __always_inline void cond_unmask_parent(struct irq_data *data)
{
struct msi_domain_info *info = data->domain->host_data;
if (unlikely(info->flags & MSI_FLAG_PCI_MSI_MASK_PARENT))
irq_chip_unmask_parent(data);
}
static void pci_irq_mask_msi(struct irq_data *data)
{
struct msi_desc *desc = irq_data_get_msi_desc(data);
pci_msi_mask(desc, BIT(data->irq - desc->irq));
cond_mask_parent(data);
}
static void pci_irq_unmask_msi(struct irq_data *data)
{
struct msi_desc *desc = irq_data_get_msi_desc(data);
cond_unmask_parent(data);
pci_msi_unmask(desc, BIT(data->irq - desc->irq));
}
@ -195,10 +213,12 @@ static const struct msi_domain_template pci_msi_template = {
static void pci_irq_mask_msix(struct irq_data *data)
{
pci_msix_mask(irq_data_get_msi_desc(data));
cond_mask_parent(data);
}
static void pci_irq_unmask_msix(struct irq_data *data)
{
cond_unmask_parent(data);
pci_msix_unmask(irq_data_get_msi_desc(data));
}

View File

@ -21,11 +21,7 @@
#include <linux/irqdomain_defs.h>
#include <linux/cpumask_types.h>
#include <linux/msi_api.h>
#include <linux/xarray.h>
#include <linux/mutex.h>
#include <linux/list.h>
#include <linux/irq.h>
#include <linux/bits.h>
#include <asm/msi.h>
@ -81,7 +77,6 @@ extern int pci_msi_ignore_mask;
/* Helper functions */
struct msi_desc;
struct pci_dev;
struct platform_msi_priv_data;
struct device_attribute;
struct irq_domain;
struct irq_affinity_desc;
@ -228,22 +223,6 @@ struct msi_dev_domain {
struct irq_domain *domain;
};
/**
* msi_device_data - MSI per device data
* @properties: MSI properties which are interesting to drivers
* @platform_data: Platform-MSI specific data
* @mutex: Mutex protecting the MSI descriptor store
* @__domains: Internal data for per device MSI domains
* @__iter_idx: Index to search the next entry for iterators
*/
struct msi_device_data {
unsigned long properties;
struct platform_msi_priv_data *platform_data;
struct mutex mutex;
struct msi_dev_domain __domains[MSI_MAX_DEVICE_IRQDOMAINS];
unsigned long __iter_idx;
};
int msi_setup_device_data(struct device *dev);
void msi_lock_descs(struct device *dev);
@ -556,6 +535,8 @@ enum {
MSI_FLAG_USE_DEV_FWNODE = (1 << 7),
/* Set parent->dev into domain->pm_dev on device domain creation */
MSI_FLAG_PARENT_PM_DEV = (1 << 8),
/* Support for parent mask/unmask */
MSI_FLAG_PCI_MSI_MASK_PARENT = (1 << 9),
/* Mask for the generic functionality */
MSI_GENERIC_FLAGS_MASK = GENMASK(15, 0),
@ -639,35 +620,6 @@ void msi_domain_free_irqs_all(struct device *dev, unsigned int domid);
struct msi_domain_info *msi_get_domain_info(struct irq_domain *domain);
struct irq_domain *platform_msi_create_irq_domain(struct fwnode_handle *fwnode,
struct msi_domain_info *info,
struct irq_domain *parent);
/* When an MSI domain is used as an intermediate domain */
int msi_domain_prepare_irqs(struct irq_domain *domain, struct device *dev,
int nvec, msi_alloc_info_t *args);
int msi_domain_populate_irqs(struct irq_domain *domain, struct device *dev,
int virq, int nvec, msi_alloc_info_t *args);
void msi_domain_depopulate_descs(struct device *dev, int virq, int nvec);
struct irq_domain *
__platform_msi_create_device_domain(struct device *dev,
unsigned int nvec,
bool is_tree,
irq_write_msi_msg_t write_msi_msg,
const struct irq_domain_ops *ops,
void *host_data);
#define platform_msi_create_device_domain(dev, nvec, write, ops, data) \
__platform_msi_create_device_domain(dev, nvec, false, write, ops, data)
#define platform_msi_create_device_tree_domain(dev, nvec, write, ops, data) \
__platform_msi_create_device_domain(dev, nvec, true, write, ops, data)
int platform_msi_device_domain_alloc(struct irq_domain *domain, unsigned int virq,
unsigned int nr_irqs);
void platform_msi_device_domain_free(struct irq_domain *domain, unsigned int virq,
unsigned int nvec);
void *platform_msi_get_host_data(struct irq_domain *domain);
/* Per device platform MSI */
int platform_device_msi_init_and_alloc_irqs(struct device *dev, unsigned int nvec,
irq_write_msi_msg_t write_msi_msg);

View File

@ -503,7 +503,8 @@ struct irq_domain *irq_find_matching_fwspec(struct irq_fwspec *fwspec,
struct fwnode_handle *fwnode = fwspec->fwnode;
int rc;
/* We might want to match the legacy controller last since
/*
* We might want to match the legacy controller last since
* it might potentially be set to match all interrupts in
* the absence of a device node. This isn't a problem so far
* yet though...

View File

@ -8,17 +8,33 @@
* This file contains common code to support Message Signaled Interrupts for
* PCI compatible and non PCI compatible devices.
*/
#include <linux/types.h>
#include <linux/device.h>
#include <linux/irq.h>
#include <linux/irqdomain.h>
#include <linux/msi.h>
#include <linux/mutex.h>
#include <linux/pci.h>
#include <linux/slab.h>
#include <linux/sysfs.h>
#include <linux/pci.h>
#include <linux/types.h>
#include <linux/xarray.h>
#include "internals.h"
/**
* struct msi_device_data - MSI per device data
* @properties: MSI properties which are interesting to drivers
* @mutex: Mutex protecting the MSI descriptor store
* @__domains: Internal data for per device MSI domains
* @__iter_idx: Index to search the next entry for iterators
*/
struct msi_device_data {
unsigned long properties;
struct mutex mutex;
struct msi_dev_domain __domains[MSI_MAX_DEVICE_IRQDOMAINS];
unsigned long __iter_idx;
};
/**
* struct msi_ctrl - MSI internal management control structure
* @domid: ID of the domain on which management operations should be done
@ -1088,8 +1104,8 @@ bool msi_match_device_irq_domain(struct device *dev, unsigned int domid,
return ret;
}
int msi_domain_prepare_irqs(struct irq_domain *domain, struct device *dev,
int nvec, msi_alloc_info_t *arg)
static int msi_domain_prepare_irqs(struct irq_domain *domain, struct device *dev,
int nvec, msi_alloc_info_t *arg)
{
struct msi_domain_info *info = domain->host_data;
struct msi_domain_ops *ops = info->ops;
@ -1097,77 +1113,6 @@ int msi_domain_prepare_irqs(struct irq_domain *domain, struct device *dev,
return ops->msi_prepare(domain, dev, nvec, arg);
}
int msi_domain_populate_irqs(struct irq_domain *domain, struct device *dev,
int virq_base, int nvec, msi_alloc_info_t *arg)
{
struct msi_domain_info *info = domain->host_data;
struct msi_domain_ops *ops = info->ops;
struct msi_ctrl ctrl = {
.domid = MSI_DEFAULT_DOMAIN,
.first = virq_base,
.last = virq_base + nvec - 1,
};
struct msi_desc *desc;
struct xarray *xa;
int ret, virq;
msi_lock_descs(dev);
if (!msi_ctrl_valid(dev, &ctrl)) {
ret = -EINVAL;
goto unlock;
}
ret = msi_domain_add_simple_msi_descs(dev, &ctrl);
if (ret)
goto unlock;
xa = &dev->msi.data->__domains[ctrl.domid].store;
for (virq = virq_base; virq < virq_base + nvec; virq++) {
desc = xa_load(xa, virq);
desc->irq = virq;
ops->set_desc(arg, desc);
ret = irq_domain_alloc_irqs_hierarchy(domain, virq, 1, arg);
if (ret)
goto fail;
irq_set_msi_desc(virq, desc);
}
msi_unlock_descs(dev);
return 0;
fail:
for (--virq; virq >= virq_base; virq--) {
msi_domain_depopulate_descs(dev, virq, 1);
irq_domain_free_irqs_common(domain, virq, 1);
}
msi_domain_free_descs(dev, &ctrl);
unlock:
msi_unlock_descs(dev);
return ret;
}
void msi_domain_depopulate_descs(struct device *dev, int virq_base, int nvec)
{
struct msi_ctrl ctrl = {
.domid = MSI_DEFAULT_DOMAIN,
.first = virq_base,
.last = virq_base + nvec - 1,
};
struct msi_desc *desc;
struct xarray *xa;
unsigned long idx;
if (!msi_ctrl_valid(dev, &ctrl))
return;
xa = &dev->msi.data->__domains[ctrl.domid].store;
xa_for_each_range(xa, idx, desc, ctrl.first, ctrl.last)
desc->irq = 0;
}
/*
* Carefully check whether the device can use reservation mode. If
* reservation mode is enabled then the early activation will assign a