linux/drivers/pci/setup-res.c

389 lines
9.9 KiB
C
Raw Normal View History

/*
* drivers/pci/setup-res.c
*
* Extruded from code written by
* Dave Rusling (david.rusling@reo.mts.dec.com)
* David Mosberger (davidm@cs.arizona.edu)
* David Miller (davem@redhat.com)
*
* Support routines for initializing a PCI subsystem.
*/
/* fixed for multiple pci buses, 1999 Andrea Arcangeli <andrea@suse.de> */
/*
* Nov 2000, Ivan Kokshaysky <ink@jurassic.park.msu.ru>
* Resource sorting
*/
#include <linux/kernel.h>
#include <linux/export.h>
#include <linux/pci.h>
#include <linux/errno.h>
#include <linux/ioport.h>
#include <linux/cache.h>
#include <linux/slab.h>
#include "pci.h"
void pci_update_resource(struct pci_dev *dev, int resno)
{
struct pci_bus_region region;
bool disable;
u16 cmd;
u32 new, check, mask;
int reg;
enum pci_bar_type type;
struct resource *res = dev->resource + resno;
/*
* Ignore resources for unimplemented BARs and unused resource slots
* for 64 bit BARs.
*/
if (!res->flags)
return;
if (res->flags & IORESOURCE_UNSET)
return;
/*
* Ignore non-moveable resources. This might be legacy resources for
* which no functional BAR register exists or another important
* system resource we shouldn't move around.
*/
if (res->flags & IORESOURCE_PCI_FIXED)
return;
pcibios_resource_to_bus(dev->bus, &region, res);
new = region.start | (res->flags & PCI_REGION_FLAG_MASK);
if (res->flags & IORESOURCE_IO)
mask = (u32)PCI_BASE_ADDRESS_IO_MASK;
else
mask = (u32)PCI_BASE_ADDRESS_MEM_MASK;
reg = pci_resource_bar(dev, resno, &type);
if (!reg)
return;
if (type != pci_bar_unknown) {
if (!(res->flags & IORESOURCE_ROM_ENABLE))
return;
new |= PCI_ROM_ADDRESS_ENABLE;
}
/*
* We can't update a 64-bit BAR atomically, so when possible,
* disable decoding so that a half-updated BAR won't conflict
* with another device.
*/
disable = (res->flags & IORESOURCE_MEM_64) && !dev->mmio_always_on;
if (disable) {
pci_read_config_word(dev, PCI_COMMAND, &cmd);
pci_write_config_word(dev, PCI_COMMAND,
cmd & ~PCI_COMMAND_MEMORY);
}
pci_write_config_dword(dev, reg, new);
pci_read_config_dword(dev, reg, &check);
if ((new ^ check) & mask) {
dev_err(&dev->dev, "BAR %d: error updating (%#08x != %#08x)\n",
resno, new, check);
}
if (res->flags & IORESOURCE_MEM_64) {
new = region.start >> 16 >> 16;
pci_write_config_dword(dev, reg + 4, new);
pci_read_config_dword(dev, reg + 4, &check);
if (check != new) {
dev_err(&dev->dev, "BAR %d: error updating (high %#08x != %#08x)\n",
resno, new, check);
}
}
if (disable)
pci_write_config_word(dev, PCI_COMMAND, cmd);
}
pci: do not mark exported functions as __devinit Functions marked __devinit will be removed after kernel init. But being exported they are potentially called by a module much later. So the safer choice seems to be to keep the function even in the non CONFIG_HOTPLUG case. This silence the follwoing section mismatch warnings: WARNING: drivers/built-in.o - Section mismatch: reference to .init.text:pci_bus_add_device from __ksymtab_gpl between '__ksymtab_pci_bus_add_device' (at offset 0x20) and '__ksymtab_pci_walk_bus' WARNING: drivers/built-in.o - Section mismatch: reference to .init.text:pci_create_bus from __ksymtab_gpl between '__ksymtab_pci_create_bus' (at offset 0x40) and '__ksymtab_pci_stop_bus_device' WARNING: drivers/built-in.o - Section mismatch: reference to .init.text:pci_bus_max_busnr from __ksymtab_gpl between '__ksymtab_pci_bus_max_busnr' (at offset 0xc0) and '__ksymtab_pci_assign_resource_fixed' WARNING: drivers/built-in.o - Section mismatch: reference to .init.text:pci_claim_resource from __ksymtab_gpl between '__ksymtab_pci_claim_resource' (at offset 0xe0) and '__ksymtab_pcie_port_bus_type' WARNING: drivers/built-in.o - Section mismatch: reference to .init.text:pci_bus_add_devices from __ksymtab between '__ksymtab_pci_bus_add_devices' (at offset 0x70) and '__ksymtab_pci_bus_alloc_resource' WARNING: drivers/built-in.o - Section mismatch: reference to .init.text:pci_scan_bus_parented from __ksymtab between '__ksymtab_pci_scan_bus_parented' (at offset 0x90) and '__ksymtab_pci_root_buses' WARNING: drivers/built-in.o - Section mismatch: reference to .init.text:pci_bus_assign_resources from __ksymtab between '__ksymtab_pci_bus_assign_resources' (at offset 0x4d0) and '__ksymtab_pci_bus_size_bridges' WARNING: drivers/built-in.o - Section mismatch: reference to .init.text:pci_bus_size_bridges from __ksymtab between '__ksymtab_pci_bus_size_bridges' (at offset 0x4e0) and '__ksymtab_pci_setup_cardbus' Signed-off-by: Sam Ravnborg <sam@ravnborg.org> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
2007-03-27 05:53:30 +00:00
int pci_claim_resource(struct pci_dev *dev, int resource)
{
struct resource *res = &dev->resource[resource];
struct resource *root, *conflict;
if (res->flags & IORESOURCE_UNSET) {
dev_info(&dev->dev, "can't claim BAR %d %pR: no address assigned\n",
resource, res);
return -EINVAL;
}
root = pci_find_parent_resource(dev, res);
if (!root) {
dev_info(&dev->dev, "can't claim BAR %d %pR: no compatible bridge window\n",
resource, res);
return -EINVAL;
}
conflict = request_resource_conflict(root, res);
if (conflict) {
dev_info(&dev->dev, "can't claim BAR %d %pR: address conflict with %s %pR\n",
resource, res, conflict->name, conflict);
return -EBUSY;
}
return 0;
}
EXPORT_SYMBOL(pci_claim_resource);
2009-03-16 08:13:39 +00:00
void pci_disable_bridge_window(struct pci_dev *dev)
{
dev_info(&dev->dev, "disabling bridge mem windows\n");
2009-03-16 08:13:39 +00:00
/* MMIO Base/Limit */
pci_write_config_dword(dev, PCI_MEMORY_BASE, 0x0000fff0);
/* Prefetchable MMIO Base/Limit */
pci_write_config_dword(dev, PCI_PREF_LIMIT_UPPER32, 0);
pci_write_config_dword(dev, PCI_PREF_MEMORY_BASE, 0x0000fff0);
pci_write_config_dword(dev, PCI_PREF_BASE_UPPER32, 0xffffffff);
}
/*
* Generic function that returns a value indicating that the device's
* original BIOS BAR address was not saved and so is not available for
* reinstatement.
*
* Can be over-ridden by architecture specific code that implements
* reinstatement functionality rather than leaving it disabled when
* normal allocation attempts fail.
*/
resource_size_t __weak pcibios_retrieve_fw_addr(struct pci_dev *dev, int idx)
{
return 0;
}
static int pci_revert_fw_address(struct resource *res, struct pci_dev *dev,
int resno, resource_size_t size)
{
struct resource *root, *conflict;
resource_size_t fw_addr, start, end;
fw_addr = pcibios_retrieve_fw_addr(dev, resno);
if (!fw_addr)
return -ENOMEM;
start = res->start;
end = res->end;
res->start = fw_addr;
res->end = res->start + size - 1;
root = pci_find_parent_resource(dev, res);
if (!root) {
if (res->flags & IORESOURCE_IO)
root = &ioport_resource;
else
root = &iomem_resource;
}
dev_info(&dev->dev, "BAR %d: trying firmware assignment %pR\n",
resno, res);
conflict = request_resource_conflict(root, res);
if (conflict) {
dev_info(&dev->dev, "BAR %d: %pR conflicts with %s %pR\n",
resno, res, conflict->name, conflict);
res->start = start;
res->end = end;
return -EBUSY;
}
return 0;
}
static int __pci_assign_resource(struct pci_bus *bus, struct pci_dev *dev,
int resno, resource_size_t size, resource_size_t align)
{
struct resource *res = dev->resource + resno;
resource_size_t min;
int ret;
min = (res->flags & IORESOURCE_IO) ? PCIBIOS_MIN_IO : PCIBIOS_MIN_MEM;
/*
* First, try exact prefetching match. Even if a 64-bit
* prefetchable bridge window is below 4GB, we can't put a 32-bit
* prefetchable resource in it because pbus_size_mem() assumes a
* 64-bit window will contain no 32-bit resources. If we assign
* things differently than they were sized, not everything will fit.
*/
ret = pci_bus_alloc_resource(bus, res, size, align, min,
PCI: Restrict 64-bit prefetchable bridge windows to 64-bit resources This patch changes the way we handle 64-bit prefetchable bridge windows to make it more likely that we can assign space to all devices. Previously we put all prefetchable resources in the prefetchable bridge window. If any of those resources was 32-bit only, we restricted the window to be below 4GB. After this patch, we only put 64-bit prefetchable resources in a 64-bit prefetchable window. We put all 32-bit prefetchable resources in the non-prefetchable window, even if there are no 64-bit prefetchable resources. With the previous approach, if there was a 32-bit prefetchable resource behind a bridge, we forced the bridge's prefetchable window below 4GB, which meant that even if there was plenty of space above 4GB available, we couldn't use it, and assignment of large 64-bit resources could fail, as in the bugzilla below. The new strategy is: 1) If the prefetchable window is 64 bits wide, we put only 64-bit prefetchable resources in it. Any 32-bit prefetchable resources go in the non-prefetchable window. 2) If the prefetchable window is 32 bits wide, we put both 32- and 64-bit prefetchable resources in it. 3) If there is no prefetchable window, all MMIO resources go in the non-prefetchable window. This reduces performance for 32-bit prefetchable resources below a bridge with a 64-bit prefetchable window. We previously assigned prefetchable space, but now we'll assign non-prefetchable space. This is the case even if there are no 64-bit prefetchable resources, or if they would all fit below 4GB. In those cases, the old strategy would work and would have better performance. [bhelgaas: write changelog, add bugzilla link, fold in mem64_mask removal] Link: https://bugzilla.kernel.org/show_bug.cgi?id=74151 Tested-by: Guo Chao <yan@linux.vnet.ibm.com> Tested-by: Wei Yang <weiyang@linux.vnet.ibm.com> Signed-off-by: Yinghai Lu <yinghai@kernel.org> Signed-off-by: Bjorn Helgaas <bhelgaas@google.com>
2014-05-19 23:01:55 +00:00
IORESOURCE_PREFETCH | IORESOURCE_MEM_64,
pcibios_align_resource, dev);
if (ret == 0)
return 0;
/*
* If the prefetchable window is only 32 bits wide, we can put
* 64-bit prefetchable resources in it.
*/
if ((res->flags & (IORESOURCE_PREFETCH | IORESOURCE_MEM_64)) ==
PCI: Restrict 64-bit prefetchable bridge windows to 64-bit resources This patch changes the way we handle 64-bit prefetchable bridge windows to make it more likely that we can assign space to all devices. Previously we put all prefetchable resources in the prefetchable bridge window. If any of those resources was 32-bit only, we restricted the window to be below 4GB. After this patch, we only put 64-bit prefetchable resources in a 64-bit prefetchable window. We put all 32-bit prefetchable resources in the non-prefetchable window, even if there are no 64-bit prefetchable resources. With the previous approach, if there was a 32-bit prefetchable resource behind a bridge, we forced the bridge's prefetchable window below 4GB, which meant that even if there was plenty of space above 4GB available, we couldn't use it, and assignment of large 64-bit resources could fail, as in the bugzilla below. The new strategy is: 1) If the prefetchable window is 64 bits wide, we put only 64-bit prefetchable resources in it. Any 32-bit prefetchable resources go in the non-prefetchable window. 2) If the prefetchable window is 32 bits wide, we put both 32- and 64-bit prefetchable resources in it. 3) If there is no prefetchable window, all MMIO resources go in the non-prefetchable window. This reduces performance for 32-bit prefetchable resources below a bridge with a 64-bit prefetchable window. We previously assigned prefetchable space, but now we'll assign non-prefetchable space. This is the case even if there are no 64-bit prefetchable resources, or if they would all fit below 4GB. In those cases, the old strategy would work and would have better performance. [bhelgaas: write changelog, add bugzilla link, fold in mem64_mask removal] Link: https://bugzilla.kernel.org/show_bug.cgi?id=74151 Tested-by: Guo Chao <yan@linux.vnet.ibm.com> Tested-by: Wei Yang <weiyang@linux.vnet.ibm.com> Signed-off-by: Yinghai Lu <yinghai@kernel.org> Signed-off-by: Bjorn Helgaas <bhelgaas@google.com>
2014-05-19 23:01:55 +00:00
(IORESOURCE_PREFETCH | IORESOURCE_MEM_64)) {
ret = pci_bus_alloc_resource(bus, res, size, align, min,
IORESOURCE_PREFETCH,
pcibios_align_resource, dev);
if (ret == 0)
return 0;
}
PCI: Restrict 64-bit prefetchable bridge windows to 64-bit resources This patch changes the way we handle 64-bit prefetchable bridge windows to make it more likely that we can assign space to all devices. Previously we put all prefetchable resources in the prefetchable bridge window. If any of those resources was 32-bit only, we restricted the window to be below 4GB. After this patch, we only put 64-bit prefetchable resources in a 64-bit prefetchable window. We put all 32-bit prefetchable resources in the non-prefetchable window, even if there are no 64-bit prefetchable resources. With the previous approach, if there was a 32-bit prefetchable resource behind a bridge, we forced the bridge's prefetchable window below 4GB, which meant that even if there was plenty of space above 4GB available, we couldn't use it, and assignment of large 64-bit resources could fail, as in the bugzilla below. The new strategy is: 1) If the prefetchable window is 64 bits wide, we put only 64-bit prefetchable resources in it. Any 32-bit prefetchable resources go in the non-prefetchable window. 2) If the prefetchable window is 32 bits wide, we put both 32- and 64-bit prefetchable resources in it. 3) If there is no prefetchable window, all MMIO resources go in the non-prefetchable window. This reduces performance for 32-bit prefetchable resources below a bridge with a 64-bit prefetchable window. We previously assigned prefetchable space, but now we'll assign non-prefetchable space. This is the case even if there are no 64-bit prefetchable resources, or if they would all fit below 4GB. In those cases, the old strategy would work and would have better performance. [bhelgaas: write changelog, add bugzilla link, fold in mem64_mask removal] Link: https://bugzilla.kernel.org/show_bug.cgi?id=74151 Tested-by: Guo Chao <yan@linux.vnet.ibm.com> Tested-by: Wei Yang <weiyang@linux.vnet.ibm.com> Signed-off-by: Yinghai Lu <yinghai@kernel.org> Signed-off-by: Bjorn Helgaas <bhelgaas@google.com>
2014-05-19 23:01:55 +00:00
/*
* If we didn't find a better match, we can put any memory resource
* in a non-prefetchable window. If this resource is 32 bits and
* non-prefetchable, the first call already tried the only possibility
* so we don't need to try again.
*/
if (res->flags & (IORESOURCE_PREFETCH | IORESOURCE_MEM_64))
ret = pci_bus_alloc_resource(bus, res, size, align, min, 0,
pcibios_align_resource, dev);
return ret;
}
static int _pci_assign_resource(struct pci_dev *dev, int resno,
resource_size_t size, resource_size_t min_align)
{
struct pci_bus *bus;
int ret;
bus = dev->bus;
while ((ret = __pci_assign_resource(bus, dev, resno, size, min_align))) {
if (!bus->parent || !bus->self->transparent)
break;
bus = bus->parent;
}
return ret;
}
int pci_assign_resource(struct pci_dev *dev, int resno)
{
struct resource *res = dev->resource + resno;
resource_size_t align, size;
int ret;
res->flags |= IORESOURCE_UNSET;
PCI SR-IOV: correct broken resource alignment calculations An SR-IOV capable device includes an SR-IOV PCIe capability which describes the Virtual Function (VF) BAR requirements. A typical SR-IOV device can support multiple VFs whose BARs must be in a contiguous region, effectively an array of VF BARs. The BAR reports the size requirement for a single VF. We calculate the full range needed by simply multiplying the VF BAR size with the number of possible VFs and create a resource spanning the full range. This all seems sane enough except it artificially inflates the alignment requirement for the VF BAR. The VF BAR need only be aligned to the size of a single BAR not the contiguous range of VF BARs. This can cause us to fail to allocate resources for the BAR despite the fact that we actually have enough space. This patch adds a thin PCI specific layer over the generic resource_alignment() function which is aware of the special nature of VF BARs and does sorting and allocation based on the smaller alignment requirement. I recognize that while resource_alignment is generic, it's basically a PCI helper. An alternative to this patch is to add PCI VF BAR specific information to struct resource. I opted for the extra layer rather than adding such PCI specific information to struct resource. This does have the slight downside that we don't cache the BAR size and re-read for each alignment query (happens a small handful of times during boot for each VF BAR). Signed-off-by: Chris Wright <chrisw@sous-sol.org> Cc: Ivan Kokshaysky <ink@jurassic.park.msu.ru> Cc: Linus Torvalds <torvalds@linux-foundation.org> Cc: Matthew Wilcox <matthew@wil.cx> Cc: Yu Zhao <yu.zhao@intel.com> Cc: stable@kernel.org Signed-off-by: Jesse Barnes <jbarnes@virtuousgeek.org>
2009-08-28 20:00:06 +00:00
align = pci_resource_alignment(dev, res);
if (!align) {
dev_info(&dev->dev, "BAR %d: can't assign %pR (bogus alignment)\n",
resno, res);
return -EINVAL;
}
size = resource_size(res);
ret = _pci_assign_resource(dev, resno, size, align);
/*
* If we failed to assign anything, let's try the address
* where firmware left it. That at least has a chance of
* working, which is better than just leaving it disabled.
*/
if (ret < 0) {
dev_info(&dev->dev, "BAR %d: no space for %pR\n", resno, res);
ret = pci_revert_fw_address(res, dev, resno, size);
}
if (ret < 0) {
dev_info(&dev->dev, "BAR %d: failed to assign %pR\n", resno,
res);
return ret;
}
res->flags &= ~IORESOURCE_UNSET;
res->flags &= ~IORESOURCE_STARTALIGN;
dev_info(&dev->dev, "BAR %d: assigned %pR\n", resno, res);
if (resno < PCI_BRIDGE_RESOURCES)
pci_update_resource(dev, resno);
return 0;
}
EXPORT_SYMBOL(pci_assign_resource);
int pci_reassign_resource(struct pci_dev *dev, int resno, resource_size_t addsize,
resource_size_t min_align)
{
struct resource *res = dev->resource + resno;
unsigned long flags;
resource_size_t new_size;
int ret;
flags = res->flags;
res->flags |= IORESOURCE_UNSET;
if (!res->parent) {
dev_info(&dev->dev, "BAR %d: can't reassign an unassigned resource %pR\n",
resno, res);
return -EINVAL;
}
/* already aligned with min_align */
new_size = resource_size(res) + addsize;
ret = _pci_assign_resource(dev, resno, new_size, min_align);
if (ret) {
res->flags = flags;
dev_info(&dev->dev, "BAR %d: %pR (failed to expand by %#llx)\n",
resno, res, (unsigned long long) addsize);
return ret;
}
res->flags &= ~IORESOURCE_UNSET;
res->flags &= ~IORESOURCE_STARTALIGN;
dev_info(&dev->dev, "BAR %d: reassigned %pR (expanded by %#llx)\n",
resno, res, (unsigned long long) addsize);
if (resno < PCI_BRIDGE_RESOURCES)
pci_update_resource(dev, resno);
return 0;
}
int pci_enable_resources(struct pci_dev *dev, int mask)
{
u16 cmd, old_cmd;
int i;
struct resource *r;
pci_read_config_word(dev, PCI_COMMAND, &cmd);
old_cmd = cmd;
for (i = 0; i < PCI_NUM_RESOURCES; i++) {
if (!(mask & (1 << i)))
continue;
r = &dev->resource[i];
if (!(r->flags & (IORESOURCE_IO | IORESOURCE_MEM)))
continue;
if ((i == PCI_ROM_RESOURCE) &&
(!(r->flags & IORESOURCE_ROM_ENABLE)))
continue;
if (r->flags & IORESOURCE_UNSET) {
dev_err(&dev->dev, "can't enable device: BAR %d %pR not assigned\n",
i, r);
return -EINVAL;
}
if (!r->parent) {
dev_err(&dev->dev, "can't enable device: BAR %d %pR not claimed\n",
i, r);
return -EINVAL;
}
if (r->flags & IORESOURCE_IO)
cmd |= PCI_COMMAND_IO;
if (r->flags & IORESOURCE_MEM)
cmd |= PCI_COMMAND_MEMORY;
}
if (cmd != old_cmd) {
dev_info(&dev->dev, "enabling device (%04x -> %04x)\n",
old_cmd, cmd);
pci_write_config_word(dev, PCI_COMMAND, cmd);
}
return 0;
}