linux/drivers/pci/devres.c
Philipp Stanner 815a3909ea PCI: Move devres code from pci.c to devres.c
The file pci.c is very large and contains a number of devres functions.
These functions should now reside in devres.c.

Move as much devres-specific code from pci.c to devres.c as possible.

There are a few callers left in pci.c that do devres operations. These
should be ported in the future. Add corresponding TODOs.

The reason they are not moved right now in this commit is that PCI's devres
currently implements a sort of "hybrid-mode": pci_request_region(), for
instance, does not have a corresponding pcim_ equivalent, yet. Instead, the
function can be made managed by previously calling pcim_enable_device()
(instead of pci_enable_device()). This makes it unreasonable to move
pci_request_region() to devres.c. Moving the functions would require
changes to PCI's API and is, therefore, left for future work.

In summary, this commit serves as a preparation step for a following
patch series that will cleanly separate the PCI's managed and unmanaged
API.

Link: https://lore.kernel.org/r/20240131090023.12331-5-pstanner@redhat.com
Suggested-by: Danilo Krummrich <dakr@redhat.com>
Signed-off-by: Philipp Stanner <pstanner@redhat.com>
Signed-off-by: Bjorn Helgaas <bhelgaas@google.com>
2024-02-12 10:36:17 -06:00

449 lines
10 KiB
C

// SPDX-License-Identifier: GPL-2.0
#include <linux/device.h>
#include <linux/pci.h>
#include "pci.h"
/*
* PCI iomap devres
*/
#define PCIM_IOMAP_MAX PCI_STD_NUM_BARS
struct pcim_iomap_devres {
void __iomem *table[PCIM_IOMAP_MAX];
};
static void devm_pci_unmap_iospace(struct device *dev, void *ptr)
{
struct resource **res = ptr;
pci_unmap_iospace(*res);
}
/**
* devm_pci_remap_iospace - Managed pci_remap_iospace()
* @dev: Generic device to remap IO address for
* @res: Resource describing the I/O space
* @phys_addr: physical address of range to be mapped
*
* Managed pci_remap_iospace(). Map is automatically unmapped on driver
* detach.
*/
int devm_pci_remap_iospace(struct device *dev, const struct resource *res,
phys_addr_t phys_addr)
{
const struct resource **ptr;
int error;
ptr = devres_alloc(devm_pci_unmap_iospace, sizeof(*ptr), GFP_KERNEL);
if (!ptr)
return -ENOMEM;
error = pci_remap_iospace(res, phys_addr);
if (error) {
devres_free(ptr);
} else {
*ptr = res;
devres_add(dev, ptr);
}
return error;
}
EXPORT_SYMBOL(devm_pci_remap_iospace);
/**
* devm_pci_remap_cfgspace - Managed pci_remap_cfgspace()
* @dev: Generic device to remap IO address for
* @offset: Resource address to map
* @size: Size of map
*
* Managed pci_remap_cfgspace(). Map is automatically unmapped on driver
* detach.
*/
void __iomem *devm_pci_remap_cfgspace(struct device *dev,
resource_size_t offset,
resource_size_t size)
{
void __iomem **ptr, *addr;
ptr = devres_alloc(devm_ioremap_release, sizeof(*ptr), GFP_KERNEL);
if (!ptr)
return NULL;
addr = pci_remap_cfgspace(offset, size);
if (addr) {
*ptr = addr;
devres_add(dev, ptr);
} else
devres_free(ptr);
return addr;
}
EXPORT_SYMBOL(devm_pci_remap_cfgspace);
/**
* devm_pci_remap_cfg_resource - check, request region and ioremap cfg resource
* @dev: generic device to handle the resource for
* @res: configuration space resource to be handled
*
* Checks that a resource is a valid memory region, requests the memory
* region and ioremaps with pci_remap_cfgspace() API that ensures the
* proper PCI configuration space memory attributes are guaranteed.
*
* All operations are managed and will be undone on driver detach.
*
* Returns a pointer to the remapped memory or an ERR_PTR() encoded error code
* on failure. Usage example::
*
* res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
* base = devm_pci_remap_cfg_resource(&pdev->dev, res);
* if (IS_ERR(base))
* return PTR_ERR(base);
*/
void __iomem *devm_pci_remap_cfg_resource(struct device *dev,
struct resource *res)
{
resource_size_t size;
const char *name;
void __iomem *dest_ptr;
BUG_ON(!dev);
if (!res || resource_type(res) != IORESOURCE_MEM) {
dev_err(dev, "invalid resource\n");
return IOMEM_ERR_PTR(-EINVAL);
}
size = resource_size(res);
if (res->name)
name = devm_kasprintf(dev, GFP_KERNEL, "%s %s", dev_name(dev),
res->name);
else
name = devm_kstrdup(dev, dev_name(dev), GFP_KERNEL);
if (!name)
return IOMEM_ERR_PTR(-ENOMEM);
if (!devm_request_mem_region(dev, res->start, size, name)) {
dev_err(dev, "can't request region for resource %pR\n", res);
return IOMEM_ERR_PTR(-EBUSY);
}
dest_ptr = devm_pci_remap_cfgspace(dev, res->start, size);
if (!dest_ptr) {
dev_err(dev, "ioremap failed for resource %pR\n", res);
devm_release_mem_region(dev, res->start, size);
dest_ptr = IOMEM_ERR_PTR(-ENOMEM);
}
return dest_ptr;
}
EXPORT_SYMBOL(devm_pci_remap_cfg_resource);
/**
* pcim_set_mwi - a device-managed pci_set_mwi()
* @dev: the PCI device for which MWI is enabled
*
* Managed pci_set_mwi().
*
* RETURNS: An appropriate -ERRNO error value on error, or zero for success.
*/
int pcim_set_mwi(struct pci_dev *dev)
{
struct pci_devres *dr;
dr = find_pci_dr(dev);
if (!dr)
return -ENOMEM;
dr->mwi = 1;
return pci_set_mwi(dev);
}
EXPORT_SYMBOL(pcim_set_mwi);
static void pcim_release(struct device *gendev, void *res)
{
struct pci_dev *dev = to_pci_dev(gendev);
struct pci_devres *this = res;
int i;
for (i = 0; i < DEVICE_COUNT_RESOURCE; i++)
if (this->region_mask & (1 << i))
pci_release_region(dev, i);
if (this->mwi)
pci_clear_mwi(dev);
if (this->restore_intx)
pci_intx(dev, this->orig_intx);
if (this->enabled && !this->pinned)
pci_disable_device(dev);
}
/*
* TODO: After the last four callers in pci.c are ported, find_pci_dr()
* needs to be made static again.
*/
struct pci_devres *find_pci_dr(struct pci_dev *pdev)
{
if (pci_is_managed(pdev))
return devres_find(&pdev->dev, pcim_release, NULL, NULL);
return NULL;
}
static struct pci_devres *get_pci_dr(struct pci_dev *pdev)
{
struct pci_devres *dr, *new_dr;
dr = devres_find(&pdev->dev, pcim_release, NULL, NULL);
if (dr)
return dr;
new_dr = devres_alloc(pcim_release, sizeof(*new_dr), GFP_KERNEL);
if (!new_dr)
return NULL;
return devres_get(&pdev->dev, new_dr, NULL, NULL);
}
/**
* pcim_enable_device - Managed pci_enable_device()
* @pdev: PCI device to be initialized
*
* Managed pci_enable_device().
*/
int pcim_enable_device(struct pci_dev *pdev)
{
struct pci_devres *dr;
int rc;
dr = get_pci_dr(pdev);
if (unlikely(!dr))
return -ENOMEM;
if (dr->enabled)
return 0;
rc = pci_enable_device(pdev);
if (!rc) {
pdev->is_managed = 1;
dr->enabled = 1;
}
return rc;
}
EXPORT_SYMBOL(pcim_enable_device);
/**
* pcim_pin_device - Pin managed PCI device
* @pdev: PCI device to pin
*
* Pin managed PCI device @pdev. Pinned device won't be disabled on
* driver detach. @pdev must have been enabled with
* pcim_enable_device().
*/
void pcim_pin_device(struct pci_dev *pdev)
{
struct pci_devres *dr;
dr = find_pci_dr(pdev);
WARN_ON(!dr || !dr->enabled);
if (dr)
dr->pinned = 1;
}
EXPORT_SYMBOL(pcim_pin_device);
static void pcim_iomap_release(struct device *gendev, void *res)
{
struct pci_dev *dev = to_pci_dev(gendev);
struct pcim_iomap_devres *this = res;
int i;
for (i = 0; i < PCIM_IOMAP_MAX; i++)
if (this->table[i])
pci_iounmap(dev, this->table[i]);
}
/**
* pcim_iomap_table - access iomap allocation table
* @pdev: PCI device to access iomap table for
*
* Access iomap allocation table for @dev. If iomap table doesn't
* exist and @pdev is managed, it will be allocated. All iomaps
* recorded in the iomap table are automatically unmapped on driver
* detach.
*
* This function might sleep when the table is first allocated but can
* be safely called without context and guaranteed to succeed once
* allocated.
*/
void __iomem * const *pcim_iomap_table(struct pci_dev *pdev)
{
struct pcim_iomap_devres *dr, *new_dr;
dr = devres_find(&pdev->dev, pcim_iomap_release, NULL, NULL);
if (dr)
return dr->table;
new_dr = devres_alloc_node(pcim_iomap_release, sizeof(*new_dr), GFP_KERNEL,
dev_to_node(&pdev->dev));
if (!new_dr)
return NULL;
dr = devres_get(&pdev->dev, new_dr, NULL, NULL);
return dr->table;
}
EXPORT_SYMBOL(pcim_iomap_table);
/**
* pcim_iomap - Managed pcim_iomap()
* @pdev: PCI device to iomap for
* @bar: BAR to iomap
* @maxlen: Maximum length of iomap
*
* Managed pci_iomap(). Map is automatically unmapped on driver
* detach.
*/
void __iomem *pcim_iomap(struct pci_dev *pdev, int bar, unsigned long maxlen)
{
void __iomem **tbl;
BUG_ON(bar >= PCIM_IOMAP_MAX);
tbl = (void __iomem **)pcim_iomap_table(pdev);
if (!tbl || tbl[bar]) /* duplicate mappings not allowed */
return NULL;
tbl[bar] = pci_iomap(pdev, bar, maxlen);
return tbl[bar];
}
EXPORT_SYMBOL(pcim_iomap);
/**
* pcim_iounmap - Managed pci_iounmap()
* @pdev: PCI device to iounmap for
* @addr: Address to unmap
*
* Managed pci_iounmap(). @addr must have been mapped using pcim_iomap().
*/
void pcim_iounmap(struct pci_dev *pdev, void __iomem *addr)
{
void __iomem **tbl;
int i;
pci_iounmap(pdev, addr);
tbl = (void __iomem **)pcim_iomap_table(pdev);
BUG_ON(!tbl);
for (i = 0; i < PCIM_IOMAP_MAX; i++)
if (tbl[i] == addr) {
tbl[i] = NULL;
return;
}
WARN_ON(1);
}
EXPORT_SYMBOL(pcim_iounmap);
/**
* pcim_iomap_regions - Request and iomap PCI BARs
* @pdev: PCI device to map IO resources for
* @mask: Mask of BARs to request and iomap
* @name: Name used when requesting regions
*
* Request and iomap regions specified by @mask.
*/
int pcim_iomap_regions(struct pci_dev *pdev, int mask, const char *name)
{
void __iomem * const *iomap;
int i, rc;
iomap = pcim_iomap_table(pdev);
if (!iomap)
return -ENOMEM;
for (i = 0; i < DEVICE_COUNT_RESOURCE; i++) {
unsigned long len;
if (!(mask & (1 << i)))
continue;
rc = -EINVAL;
len = pci_resource_len(pdev, i);
if (!len)
goto err_inval;
rc = pci_request_region(pdev, i, name);
if (rc)
goto err_inval;
rc = -ENOMEM;
if (!pcim_iomap(pdev, i, 0))
goto err_region;
}
return 0;
err_region:
pci_release_region(pdev, i);
err_inval:
while (--i >= 0) {
if (!(mask & (1 << i)))
continue;
pcim_iounmap(pdev, iomap[i]);
pci_release_region(pdev, i);
}
return rc;
}
EXPORT_SYMBOL(pcim_iomap_regions);
/**
* pcim_iomap_regions_request_all - Request all BARs and iomap specified ones
* @pdev: PCI device to map IO resources for
* @mask: Mask of BARs to iomap
* @name: Name used when requesting regions
*
* Request all PCI BARs and iomap regions specified by @mask.
*/
int pcim_iomap_regions_request_all(struct pci_dev *pdev, int mask,
const char *name)
{
int request_mask = ((1 << 6) - 1) & ~mask;
int rc;
rc = pci_request_selected_regions(pdev, request_mask, name);
if (rc)
return rc;
rc = pcim_iomap_regions(pdev, mask, name);
if (rc)
pci_release_selected_regions(pdev, request_mask);
return rc;
}
EXPORT_SYMBOL(pcim_iomap_regions_request_all);
/**
* pcim_iounmap_regions - Unmap and release PCI BARs
* @pdev: PCI device to map IO resources for
* @mask: Mask of BARs to unmap and release
*
* Unmap and release regions specified by @mask.
*/
void pcim_iounmap_regions(struct pci_dev *pdev, int mask)
{
void __iomem * const *iomap;
int i;
iomap = pcim_iomap_table(pdev);
if (!iomap)
return;
for (i = 0; i < PCIM_IOMAP_MAX; i++) {
if (!(mask & (1 << i)))
continue;
pcim_iounmap(pdev, iomap[i]);
pci_release_region(pdev, i);
}
}
EXPORT_SYMBOL(pcim_iounmap_regions);