PCI: designware: Add support for v3.65 hardware

The Keystone PCI controller is based on v3.65 DesignWare hardware.  This
version differs from newer versions of the hardware in functional areas
discussed below that make it necessary to change dw_pcie_host_init() to
support v3.65 based PCI controller.

    1. No support for ATU port.  Any ATU-specific resource handling code is
       to be bypassed for v3.65 h/w.

    2. MSI controller uses application space to implement MSI and 32 MSI
       interrupts are multiplexed over 8 IRQs to the host.  Hence the code
       to process MSI IRQ needs to be different.  This patch allows
       platform driver to provide its own irq_domain_ops ptr to
       irq_domain_add_linear() through an API callback from the DesignWare
       core driver.

    3. MSI interrupt generation requires EP to write to the RC's
       application register.  So enhance the driver to allow setup of
       inbound access to MSI IRQ register as a post scan bus API callback.

Signed-off-by: Murali Karicheri <m-karicheri2@ti.com>
Signed-off-by: Bjorn Helgaas <bhelgaas@google.com>
Reviewed-by: Pratyush Anand <pratyush.anand@st.com>
Acked-by: Mohit KUMAR <mohit.kumar@st.com>
Acked-by: Jingoo Han <jg1.han@samsung.com>
CC: Santosh Shilimkar <santosh.shilimkar@ti.com>
CC: Russell King <linux@arm.linux.org.uk>
CC: Grant Likely <grant.likely@linaro.org>
CC: Rob Herring <robh+dt@kernel.org>
CC: Jingoo Han <jg1.han@samsung.com>
CC: Richard Zhu <r65037@freescale.com>
CC: Kishon Vijay Abraham I <kishon@ti.com>
CC: Marek Vasut <marex@denx.de>
CC: Arnd Bergmann <arnd@arndb.de>
CC: Pawel Moll <pawel.moll@arm.com>
CC: Mark Rutland <mark.rutland@arm.com>
CC: Ian Campbell <ijc+devicetree@hellion.org.uk>
CC: Kumar Gala <galak@codeaurora.org>
CC: Randy Dunlap <rdunlap@infradead.org>
CC: Grant Likely <grant.likely@linaro.org>
This commit is contained in:
Murali Karicheri 2014-07-23 14:54:51 -04:00 committed by Bjorn Helgaas
parent 52addcf9d6
commit b14a3d1784
2 changed files with 36 additions and 18 deletions

View File

@ -425,7 +425,7 @@ int __init dw_pcie_host_init(struct pcie_port *pp)
struct resource *cfg_res; struct resource *cfg_res;
u32 val, na, ns; u32 val, na, ns;
const __be32 *addrp; const __be32 *addrp;
int i, index; int i, index, ret;
/* Find the address cell size and the number of cells in order to get /* Find the address cell size and the number of cells in order to get
* the untranslated address. * the untranslated address.
@ -511,18 +511,25 @@ int __init dw_pcie_host_init(struct pcie_port *pp)
pp->mem_base = pp->mem.start; pp->mem_base = pp->mem.start;
if (!pp->va_cfg0_base) {
pp->cfg0_base = pp->cfg.start;
pp->va_cfg0_base = devm_ioremap(pp->dev, pp->cfg0_base, pp->va_cfg0_base = devm_ioremap(pp->dev, pp->cfg0_base,
pp->config.cfg0_size); pp->config.cfg0_size);
if (!pp->va_cfg0_base) { if (!pp->va_cfg0_base) {
dev_err(pp->dev, "error with ioremap in function\n"); dev_err(pp->dev, "error with ioremap in function\n");
return -ENOMEM; return -ENOMEM;
} }
}
if (!pp->va_cfg1_base) {
pp->cfg1_base = pp->cfg.start + pp->config.cfg0_size;
pp->va_cfg1_base = devm_ioremap(pp->dev, pp->cfg1_base, pp->va_cfg1_base = devm_ioremap(pp->dev, pp->cfg1_base,
pp->config.cfg1_size); pp->config.cfg1_size);
if (!pp->va_cfg1_base) { if (!pp->va_cfg1_base) {
dev_err(pp->dev, "error with ioremap\n"); dev_err(pp->dev, "error with ioremap\n");
return -ENOMEM; return -ENOMEM;
} }
}
if (of_property_read_u32(np, "num-lanes", &pp->lanes)) { if (of_property_read_u32(np, "num-lanes", &pp->lanes)) {
dev_err(pp->dev, "Failed to parse the number of lanes\n"); dev_err(pp->dev, "Failed to parse the number of lanes\n");
@ -530,6 +537,7 @@ int __init dw_pcie_host_init(struct pcie_port *pp)
} }
if (IS_ENABLED(CONFIG_PCI_MSI)) { if (IS_ENABLED(CONFIG_PCI_MSI)) {
if (!pp->ops->msi_host_init) {
pp->irq_domain = irq_domain_add_linear(pp->dev->of_node, pp->irq_domain = irq_domain_add_linear(pp->dev->of_node,
MAX_MSI_IRQS, &msi_domain_ops, MAX_MSI_IRQS, &msi_domain_ops,
&dw_pcie_msi_chip); &dw_pcie_msi_chip);
@ -540,6 +548,11 @@ int __init dw_pcie_host_init(struct pcie_port *pp)
for (i = 0; i < MAX_MSI_IRQS; i++) for (i = 0; i < MAX_MSI_IRQS; i++)
irq_create_mapping(pp->irq_domain, i); irq_create_mapping(pp->irq_domain, i);
} else {
ret = pp->ops->msi_host_init(pp, &dw_pcie_msi_chip);
if (ret < 0)
return ret;
}
} }
if (pp->ops->host_init) if (pp->ops->host_init)
@ -799,6 +812,9 @@ static struct pci_bus *dw_pcie_scan_bus(int nr, struct pci_sys_data *sys)
BUG(); BUG();
} }
if (bus && pp->ops->scan_bus)
pp->ops->scan_bus(pp);
return bus; return bus;
} }

View File

@ -74,6 +74,8 @@ struct pcie_host_ops {
void (*msi_set_irq)(struct pcie_port *pp, int irq); void (*msi_set_irq)(struct pcie_port *pp, int irq);
void (*msi_clear_irq)(struct pcie_port *pp, int irq); void (*msi_clear_irq)(struct pcie_port *pp, int irq);
u32 (*get_msi_data)(struct pcie_port *pp); u32 (*get_msi_data)(struct pcie_port *pp);
void (*scan_bus)(struct pcie_port *pp);
int (*msi_host_init)(struct pcie_port *pp, struct msi_chip *chip);
}; };
int dw_pcie_cfg_read(void __iomem *addr, int where, int size, u32 *val); int dw_pcie_cfg_read(void __iomem *addr, int where, int size, u32 *val);