linux/drivers/dma/idxd/init.c

776 lines
19 KiB
C
Raw Normal View History

// SPDX-License-Identifier: GPL-2.0
/* Copyright(c) 2019 Intel Corporation. All rights rsvd. */
#include <linux/init.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/slab.h>
#include <linux/pci.h>
#include <linux/interrupt.h>
#include <linux/delay.h>
#include <linux/dma-mapping.h>
#include <linux/workqueue.h>
#include <linux/aer.h>
#include <linux/fs.h>
#include <linux/io-64-nonatomic-lo-hi.h>
#include <linux/device.h>
#include <linux/idr.h>
#include <linux/intel-svm.h>
#include <linux/iommu.h>
#include <uapi/linux/idxd.h>
#include <linux/dmaengine.h>
#include "../dmaengine.h"
#include "registers.h"
#include "idxd.h"
#include "perfmon.h"
MODULE_VERSION(IDXD_DRIVER_VERSION);
MODULE_LICENSE("GPL v2");
MODULE_AUTHOR("Intel Corporation");
MODULE_IMPORT_NS(IDXD);
static bool sva = true;
module_param(sva, bool, 0644);
MODULE_PARM_DESC(sva, "Toggle SVA support on/off");
bool tc_override;
module_param(tc_override, bool, 0644);
MODULE_PARM_DESC(tc_override, "Override traffic class defaults");
#define DRV_NAME "idxd"
bool support_enqcmd;
DEFINE_IDA(idxd_ida);
static struct idxd_driver_data idxd_driver_data[] = {
[IDXD_TYPE_DSA] = {
.name_prefix = "dsa",
.type = IDXD_TYPE_DSA,
.compl_size = sizeof(struct dsa_completion_record),
.align = 32,
.dev_type = &dsa_device_type,
},
[IDXD_TYPE_IAX] = {
.name_prefix = "iax",
.type = IDXD_TYPE_IAX,
.compl_size = sizeof(struct iax_completion_record),
.align = 64,
.dev_type = &iax_device_type,
},
};
static struct pci_device_id idxd_pci_tbl[] = {
/* DSA ver 1.0 platforms */
{ PCI_DEVICE_DATA(INTEL, DSA_SPR0, &idxd_driver_data[IDXD_TYPE_DSA]) },
/* IAX ver 1.0 platforms */
{ PCI_DEVICE_DATA(INTEL, IAX_SPR0, &idxd_driver_data[IDXD_TYPE_IAX]) },
{ 0, }
};
MODULE_DEVICE_TABLE(pci, idxd_pci_tbl);
static int idxd_setup_interrupts(struct idxd_device *idxd)
{
struct pci_dev *pdev = idxd->pdev;
struct device *dev = &pdev->dev;
struct idxd_irq_entry *ie;
int i, msixcnt;
int rc = 0;
msixcnt = pci_msix_vec_count(pdev);
if (msixcnt < 0) {
dev_err(dev, "Not MSI-X interrupt capable.\n");
return -ENOSPC;
}
idxd->irq_cnt = msixcnt;
rc = pci_alloc_irq_vectors(pdev, msixcnt, msixcnt, PCI_IRQ_MSIX);
if (rc != msixcnt) {
dev_err(dev, "Failed enabling %d MSIX entries: %d\n", msixcnt, rc);
return -ENOSPC;
}
dev_dbg(dev, "Enabled %d msix vectors\n", msixcnt);
ie = idxd_get_ie(idxd, 0);
ie->vector = pci_irq_vector(pdev, 0);
rc = request_threaded_irq(ie->vector, NULL, idxd_misc_thread, 0, "idxd-misc", ie);
if (rc < 0) {
dev_err(dev, "Failed to allocate misc interrupt.\n");
goto err_misc_irq;
}
dev_dbg(dev, "Requested idxd-misc handler on msix vector %d\n", ie->vector);
for (i = 0; i < idxd->max_wqs; i++) {
int msix_idx = i + 1;
ie = idxd_get_ie(idxd, msix_idx);
ie->id = msix_idx;
ie->int_handle = INVALID_INT_HANDLE;
ie->pasid = INVALID_IOASID;
spin_lock_init(&ie->list_lock);
init_llist_head(&ie->pending_llist);
INIT_LIST_HEAD(&ie->work_list);
}
idxd_unmask_error_interrupts(idxd);
return 0;
err_misc_irq:
idxd_mask_error_interrupts(idxd);
pci_free_irq_vectors(pdev);
dev_err(dev, "No usable interrupts\n");
return rc;
}
static void idxd_cleanup_interrupts(struct idxd_device *idxd)
{
struct pci_dev *pdev = idxd->pdev;
struct idxd_irq_entry *ie;
int msixcnt;
msixcnt = pci_msix_vec_count(pdev);
if (msixcnt <= 0)
return;
ie = idxd_get_ie(idxd, 0);
idxd_mask_error_interrupts(idxd);
free_irq(ie->vector, ie);
pci_free_irq_vectors(pdev);
}
static int idxd_setup_wqs(struct idxd_device *idxd)
{
struct device *dev = &idxd->pdev->dev;
struct idxd_wq *wq;
struct device *conf_dev;
int i, rc;
idxd->wqs = kcalloc_node(idxd->max_wqs, sizeof(struct idxd_wq *),
GFP_KERNEL, dev_to_node(dev));
if (!idxd->wqs)
return -ENOMEM;
for (i = 0; i < idxd->max_wqs; i++) {
wq = kzalloc_node(sizeof(*wq), GFP_KERNEL, dev_to_node(dev));
if (!wq) {
rc = -ENOMEM;
goto err;
}
idxd_dev_set_type(&wq->idxd_dev, IDXD_DEV_WQ);
conf_dev = wq_confdev(wq);
wq->id = i;
wq->idxd = idxd;
device_initialize(wq_confdev(wq));
conf_dev->parent = idxd_confdev(idxd);
conf_dev->bus = &dsa_bus_type;
conf_dev->type = &idxd_wq_device_type;
rc = dev_set_name(conf_dev, "wq%d.%d", idxd->id, wq->id);
if (rc < 0) {
put_device(conf_dev);
goto err;
}
mutex_init(&wq->wq_lock);
init_waitqueue_head(&wq->err_queue);
init_completion(&wq->wq_dead);
dmaengine: idxd: handle interrupt handle revoked event "Interrupt handle revoked" is an event that happens when the driver is running on a guest kernel and the VM is migrated to a new machine. The device will trigger an interrupt that signals to the guest driver that the interrupt handles need to be replaced. The misc irq thread function calls a helper function to handle the event. The function uses the WQ percpu_ref to quiesce the kernel submissions. It then replaces the interrupt handles by requesting interrupt handle command for each I/O MSIX vector. Once the handle is updated, the driver will unblock the submission path to allow new submissions. The submitter will attempt to acquire a percpu_ref before submission. When the request fails, it will wait on the wq_resurrect 'completion'. The driver does anticipate the possibility of descriptors being submitted before the WQ percpu_ref is killed. If a descriptor has already been submitted, it will return with incorrect interrupt handle status. The descriptor will be re-submitted with the new interrupt handle on the completion path. For descriptors with incorrect interrupt handles, completion interrupt won't be triggered. At the completion of the interrupt handle refresh, the handling function will call idxd_int_handle_refresh_drain() to issue drain descriptors to each of the wq with associated interrupt handle. The drain descriptor will have interrupt request set but without completion record. This will ensure all descriptors with incorrect interrupt completion handle get drained and a completion interrupt is triggered for the guest driver to process them. Reviewed-by: Kevin Tian <kevin.tian@intel.com> Co-Developed-by: Sanjay Kumar <sanjay.k.kumar@intel.com> Signed-off-by: Dave Jiang <dave.jiang@intel.com> Link: https://lore.kernel.org/r/163528420189.3925689.18212568593220415551.stgit@djiang5-desk3.ch.intel.com Signed-off-by: Vinod Koul <vkoul@kernel.org>
2021-10-26 21:36:41 +00:00
init_completion(&wq->wq_resurrect);
wq->max_xfer_bytes = WQ_DEFAULT_MAX_XFER;
wq->max_batch_size = WQ_DEFAULT_MAX_BATCH;
wq->enqcmds_retries = IDXD_ENQCMDS_RETRIES;
wq->wqcfg = kzalloc_node(idxd->wqcfg_size, GFP_KERNEL, dev_to_node(dev));
if (!wq->wqcfg) {
put_device(conf_dev);
rc = -ENOMEM;
goto err;
}
idxd->wqs[i] = wq;
}
return 0;
err:
while (--i >= 0) {
wq = idxd->wqs[i];
conf_dev = wq_confdev(wq);
put_device(conf_dev);
}
return rc;
}
static int idxd_setup_engines(struct idxd_device *idxd)
{
struct idxd_engine *engine;
struct device *dev = &idxd->pdev->dev;
struct device *conf_dev;
int i, rc;
idxd->engines = kcalloc_node(idxd->max_engines, sizeof(struct idxd_engine *),
GFP_KERNEL, dev_to_node(dev));
if (!idxd->engines)
return -ENOMEM;
for (i = 0; i < idxd->max_engines; i++) {
engine = kzalloc_node(sizeof(*engine), GFP_KERNEL, dev_to_node(dev));
if (!engine) {
rc = -ENOMEM;
goto err;
}
idxd_dev_set_type(&engine->idxd_dev, IDXD_DEV_ENGINE);
conf_dev = engine_confdev(engine);
engine->id = i;
engine->idxd = idxd;
device_initialize(conf_dev);
conf_dev->parent = idxd_confdev(idxd);
conf_dev->bus = &dsa_bus_type;
conf_dev->type = &idxd_engine_device_type;
rc = dev_set_name(conf_dev, "engine%d.%d", idxd->id, engine->id);
if (rc < 0) {
put_device(conf_dev);
goto err;
}
idxd->engines[i] = engine;
}
return 0;
err:
while (--i >= 0) {
engine = idxd->engines[i];
conf_dev = engine_confdev(engine);
put_device(conf_dev);
}
return rc;
}
static int idxd_setup_groups(struct idxd_device *idxd)
{
struct device *dev = &idxd->pdev->dev;
struct device *conf_dev;
struct idxd_group *group;
int i, rc;
idxd->groups = kcalloc_node(idxd->max_groups, sizeof(struct idxd_group *),
GFP_KERNEL, dev_to_node(dev));
if (!idxd->groups)
return -ENOMEM;
for (i = 0; i < idxd->max_groups; i++) {
group = kzalloc_node(sizeof(*group), GFP_KERNEL, dev_to_node(dev));
if (!group) {
rc = -ENOMEM;
goto err;
}
idxd_dev_set_type(&group->idxd_dev, IDXD_DEV_GROUP);
conf_dev = group_confdev(group);
group->id = i;
group->idxd = idxd;
device_initialize(conf_dev);
conf_dev->parent = idxd_confdev(idxd);
conf_dev->bus = &dsa_bus_type;
conf_dev->type = &idxd_group_device_type;
rc = dev_set_name(conf_dev, "group%d.%d", idxd->id, group->id);
if (rc < 0) {
put_device(conf_dev);
goto err;
}
idxd->groups[i] = group;
if (idxd->hw.version < DEVICE_VERSION_2 && !tc_override) {
group->tc_a = 1;
group->tc_b = 1;
} else {
group->tc_a = -1;
group->tc_b = -1;
}
}
return 0;
err:
while (--i >= 0) {
group = idxd->groups[i];
put_device(group_confdev(group));
}
return rc;
}
static void idxd_cleanup_internals(struct idxd_device *idxd)
{
int i;
for (i = 0; i < idxd->max_groups; i++)
put_device(group_confdev(idxd->groups[i]));
for (i = 0; i < idxd->max_engines; i++)
put_device(engine_confdev(idxd->engines[i]));
for (i = 0; i < idxd->max_wqs; i++)
put_device(wq_confdev(idxd->wqs[i]));
destroy_workqueue(idxd->wq);
}
static int idxd_setup_internals(struct idxd_device *idxd)
{
struct device *dev = &idxd->pdev->dev;
int rc, i;
init_waitqueue_head(&idxd->cmd_waitq);
rc = idxd_setup_wqs(idxd);
if (rc < 0)
goto err_wqs;
rc = idxd_setup_engines(idxd);
if (rc < 0)
goto err_engine;
rc = idxd_setup_groups(idxd);
if (rc < 0)
goto err_group;
idxd->wq = create_workqueue(dev_name(dev));
if (!idxd->wq) {
rc = -ENOMEM;
goto err_wkq_create;
}
return 0;
err_wkq_create:
for (i = 0; i < idxd->max_groups; i++)
put_device(group_confdev(idxd->groups[i]));
err_group:
for (i = 0; i < idxd->max_engines; i++)
put_device(engine_confdev(idxd->engines[i]));
err_engine:
for (i = 0; i < idxd->max_wqs; i++)
put_device(wq_confdev(idxd->wqs[i]));
err_wqs:
return rc;
}
static void idxd_read_table_offsets(struct idxd_device *idxd)
{
union offsets_reg offsets;
struct device *dev = &idxd->pdev->dev;
offsets.bits[0] = ioread64(idxd->reg_base + IDXD_TABLE_OFFSET);
offsets.bits[1] = ioread64(idxd->reg_base + IDXD_TABLE_OFFSET + sizeof(u64));
idxd->grpcfg_offset = offsets.grpcfg * IDXD_TABLE_MULT;
dev_dbg(dev, "IDXD Group Config Offset: %#x\n", idxd->grpcfg_offset);
idxd->wqcfg_offset = offsets.wqcfg * IDXD_TABLE_MULT;
dev_dbg(dev, "IDXD Work Queue Config Offset: %#x\n", idxd->wqcfg_offset);
idxd->msix_perm_offset = offsets.msix_perm * IDXD_TABLE_MULT;
dev_dbg(dev, "IDXD MSIX Permission Offset: %#x\n", idxd->msix_perm_offset);
idxd->perfmon_offset = offsets.perfmon * IDXD_TABLE_MULT;
dev_dbg(dev, "IDXD Perfmon Offset: %#x\n", idxd->perfmon_offset);
}
static void idxd_read_caps(struct idxd_device *idxd)
{
struct device *dev = &idxd->pdev->dev;
int i;
/* reading generic capabilities */
idxd->hw.gen_cap.bits = ioread64(idxd->reg_base + IDXD_GENCAP_OFFSET);
dev_dbg(dev, "gen_cap: %#llx\n", idxd->hw.gen_cap.bits);
if (idxd->hw.gen_cap.cmd_cap) {
idxd->hw.cmd_cap = ioread32(idxd->reg_base + IDXD_CMDCAP_OFFSET);
dev_dbg(dev, "cmd_cap: %#x\n", idxd->hw.cmd_cap);
}
/* reading command capabilities */
if (idxd->hw.cmd_cap & BIT(IDXD_CMD_REQUEST_INT_HANDLE))
idxd->request_int_handles = true;
idxd->max_xfer_bytes = 1ULL << idxd->hw.gen_cap.max_xfer_shift;
dev_dbg(dev, "max xfer size: %llu bytes\n", idxd->max_xfer_bytes);
idxd->max_batch_size = 1U << idxd->hw.gen_cap.max_batch_shift;
dev_dbg(dev, "max batch size: %u\n", idxd->max_batch_size);
if (idxd->hw.gen_cap.config_en)
set_bit(IDXD_FLAG_CONFIGURABLE, &idxd->flags);
/* reading group capabilities */
idxd->hw.group_cap.bits =
ioread64(idxd->reg_base + IDXD_GRPCAP_OFFSET);
dev_dbg(dev, "group_cap: %#llx\n", idxd->hw.group_cap.bits);
idxd->max_groups = idxd->hw.group_cap.num_groups;
dev_dbg(dev, "max groups: %u\n", idxd->max_groups);
idxd->max_rdbufs = idxd->hw.group_cap.total_rdbufs;
dev_dbg(dev, "max read buffers: %u\n", idxd->max_rdbufs);
idxd->nr_rdbufs = idxd->max_rdbufs;
/* read engine capabilities */
idxd->hw.engine_cap.bits =
ioread64(idxd->reg_base + IDXD_ENGCAP_OFFSET);
dev_dbg(dev, "engine_cap: %#llx\n", idxd->hw.engine_cap.bits);
idxd->max_engines = idxd->hw.engine_cap.num_engines;
dev_dbg(dev, "max engines: %u\n", idxd->max_engines);
/* read workqueue capabilities */
idxd->hw.wq_cap.bits = ioread64(idxd->reg_base + IDXD_WQCAP_OFFSET);
dev_dbg(dev, "wq_cap: %#llx\n", idxd->hw.wq_cap.bits);
idxd->max_wq_size = idxd->hw.wq_cap.total_wq_size;
dev_dbg(dev, "total workqueue size: %u\n", idxd->max_wq_size);
idxd->max_wqs = idxd->hw.wq_cap.num_wqs;
dev_dbg(dev, "max workqueues: %u\n", idxd->max_wqs);
idxd->wqcfg_size = 1 << (idxd->hw.wq_cap.wqcfg_size + IDXD_WQCFG_MIN);
dev_dbg(dev, "wqcfg size: %u\n", idxd->wqcfg_size);
/* reading operation capabilities */
for (i = 0; i < 4; i++) {
idxd->hw.opcap.bits[i] = ioread64(idxd->reg_base +
IDXD_OPCAP_OFFSET + i * sizeof(u64));
dev_dbg(dev, "opcap[%d]: %#llx\n", i, idxd->hw.opcap.bits[i]);
}
}
static struct idxd_device *idxd_alloc(struct pci_dev *pdev, struct idxd_driver_data *data)
{
struct device *dev = &pdev->dev;
struct device *conf_dev;
struct idxd_device *idxd;
int rc;
idxd = kzalloc_node(sizeof(*idxd), GFP_KERNEL, dev_to_node(dev));
if (!idxd)
return NULL;
conf_dev = idxd_confdev(idxd);
idxd->pdev = pdev;
idxd->data = data;
idxd_dev_set_type(&idxd->idxd_dev, idxd->data->type);
idxd->id = ida_alloc(&idxd_ida, GFP_KERNEL);
if (idxd->id < 0)
return NULL;
device_initialize(conf_dev);
conf_dev->parent = dev;
conf_dev->bus = &dsa_bus_type;
conf_dev->type = idxd->data->dev_type;
rc = dev_set_name(conf_dev, "%s%d", idxd->data->name_prefix, idxd->id);
if (rc < 0) {
put_device(conf_dev);
return NULL;
}
spin_lock_init(&idxd->dev_lock);
spin_lock_init(&idxd->cmd_lock);
return idxd;
}
static int idxd_enable_system_pasid(struct idxd_device *idxd)
{
int flags;
unsigned int pasid;
struct iommu_sva *sva;
flags = SVM_FLAG_SUPERVISOR_MODE;
sva = iommu_sva_bind_device(&idxd->pdev->dev, NULL, &flags);
if (IS_ERR(sva)) {
dev_warn(&idxd->pdev->dev,
"iommu sva bind failed: %ld\n", PTR_ERR(sva));
return PTR_ERR(sva);
}
pasid = iommu_sva_get_pasid(sva);
if (pasid == IOMMU_PASID_INVALID) {
iommu_sva_unbind_device(sva);
return -ENODEV;
}
idxd->sva = sva;
idxd->pasid = pasid;
dev_dbg(&idxd->pdev->dev, "system pasid: %u\n", pasid);
return 0;
}
static void idxd_disable_system_pasid(struct idxd_device *idxd)
{
iommu_sva_unbind_device(idxd->sva);
idxd->sva = NULL;
}
static int idxd_probe(struct idxd_device *idxd)
{
struct pci_dev *pdev = idxd->pdev;
struct device *dev = &pdev->dev;
int rc;
dev_dbg(dev, "%s entered and resetting device\n", __func__);
rc = idxd_device_init_reset(idxd);
if (rc < 0)
return rc;
dev_dbg(dev, "IDXD reset complete\n");
if (IS_ENABLED(CONFIG_INTEL_IDXD_SVM) && sva) {
rc = iommu_dev_enable_feature(dev, IOMMU_DEV_FEAT_SVA);
if (rc == 0) {
rc = idxd_enable_system_pasid(idxd);
if (rc < 0) {
iommu_dev_disable_feature(dev, IOMMU_DEV_FEAT_SVA);
dev_warn(dev, "Failed to enable PASID. No SVA support: %d\n", rc);
} else {
set_bit(IDXD_FLAG_PASID_ENABLED, &idxd->flags);
}
} else {
dev_warn(dev, "Unable to turn on SVA feature.\n");
}
} else if (!sva) {
dev_warn(dev, "User forced SVA off via module param.\n");
}
idxd_read_caps(idxd);
idxd_read_table_offsets(idxd);
rc = idxd_setup_internals(idxd);
if (rc)
goto err;
/* If the configs are readonly, then load them from device */
if (!test_bit(IDXD_FLAG_CONFIGURABLE, &idxd->flags)) {
dev_dbg(dev, "Loading RO device config\n");
rc = idxd_device_load_config(idxd);
if (rc < 0)
goto err_config;
}
rc = idxd_setup_interrupts(idxd);
if (rc)
goto err_config;
idxd->major = idxd_cdev_get_major(idxd);
rc = perfmon_pmu_init(idxd);
if (rc < 0)
dev_warn(dev, "Failed to initialize perfmon. No PMU support: %d\n", rc);
dev_dbg(dev, "IDXD device %d probed successfully\n", idxd->id);
return 0;
err_config:
idxd_cleanup_internals(idxd);
err:
if (device_pasid_enabled(idxd))
idxd_disable_system_pasid(idxd);
iommu_dev_disable_feature(dev, IOMMU_DEV_FEAT_SVA);
return rc;
}
static void idxd_cleanup(struct idxd_device *idxd)
{
struct device *dev = &idxd->pdev->dev;
perfmon_pmu_remove(idxd);
idxd_cleanup_interrupts(idxd);
idxd_cleanup_internals(idxd);
if (device_pasid_enabled(idxd))
idxd_disable_system_pasid(idxd);
iommu_dev_disable_feature(dev, IOMMU_DEV_FEAT_SVA);
}
static int idxd_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
{
struct device *dev = &pdev->dev;
struct idxd_device *idxd;
struct idxd_driver_data *data = (struct idxd_driver_data *)id->driver_data;
int rc;
rc = pci_enable_device(pdev);
if (rc)
return rc;
dev_dbg(dev, "Alloc IDXD context\n");
idxd = idxd_alloc(pdev, data);
if (!idxd) {
rc = -ENOMEM;
goto err_idxd_alloc;
}
dev_dbg(dev, "Mapping BARs\n");
idxd->reg_base = pci_iomap(pdev, IDXD_MMIO_BAR, 0);
if (!idxd->reg_base) {
rc = -ENOMEM;
goto err_iomap;
}
dev_dbg(dev, "Set DMA masks\n");
rc = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
if (rc)
rc = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
if (rc)
goto err;
dev_dbg(dev, "Set PCI master\n");
pci_set_master(pdev);
pci_set_drvdata(pdev, idxd);
idxd->hw.version = ioread32(idxd->reg_base + IDXD_VER_OFFSET);
rc = idxd_probe(idxd);
if (rc) {
dev_err(dev, "Intel(R) IDXD DMA Engine init failed\n");
goto err;
}
rc = idxd_register_devices(idxd);
if (rc) {
dev_err(dev, "IDXD sysfs setup failed\n");
goto err_dev_register;
}
dev_info(&pdev->dev, "Intel(R) Accelerator Device (v%x)\n",
idxd->hw.version);
return 0;
err_dev_register:
idxd_cleanup(idxd);
err:
pci_iounmap(pdev, idxd->reg_base);
err_iomap:
put_device(idxd_confdev(idxd));
err_idxd_alloc:
pci_disable_device(pdev);
return rc;
}
void idxd_wqs_quiesce(struct idxd_device *idxd)
{
struct idxd_wq *wq;
int i;
for (i = 0; i < idxd->max_wqs; i++) {
wq = idxd->wqs[i];
if (wq->state == IDXD_WQ_ENABLED && wq->type == IDXD_WQT_KERNEL)
idxd_wq_quiesce(wq);
}
}
static void idxd_shutdown(struct pci_dev *pdev)
{
struct idxd_device *idxd = pci_get_drvdata(pdev);
struct idxd_irq_entry *irq_entry;
int rc;
rc = idxd_device_disable(idxd);
if (rc)
dev_err(&pdev->dev, "Disabling device failed\n");
irq_entry = &idxd->ie;
synchronize_irq(irq_entry->vector);
idxd_mask_error_interrupts(idxd);
flush_workqueue(idxd->wq);
}
static void idxd_remove(struct pci_dev *pdev)
{
struct idxd_device *idxd = pci_get_drvdata(pdev);
struct idxd_irq_entry *irq_entry;
dmanegine: idxd: fix resource free ordering on driver removal Fault triggers on ioread32() when pci driver unbind is envoked. The placement of idxd sub-driver removal causes the probing of the device mmio region after the mmio mapping being torn down. The driver needs the sub-drivers to be unbound but not release the idxd context until all shutdown activities has been done. Move the sub-driver unregistering up before the remove() calls shutdown(). But take a device ref on the idxd->conf_dev so that the memory does not get freed in ->release(). When all cleanup activities has been done, release the ref to allow the idxd memory to be freed. [57159.542766] RIP: 0010:ioread32+0x27/0x60 [57159.547097] Code: 00 66 90 48 81 ff ff ff 03 00 77 1e 48 81 ff 00 00 01 00 76 05 0f b7 d7 ed c3 8b 15 03 50 41 01 b8 ff ff ff ff 85 d2 75 04 c3 <8b> 07 c3 55 83 ea 01 48 89 fe 48 c7 c7 00 70 5f 82 48 89 e5 48 83 [57159.566647] RSP: 0018:ffffc900011abb60 EFLAGS: 00010292 [57159.572295] RAX: ffffc900011e0000 RBX: ffff888107d39800 RCX: 0000000000000000 [57159.579842] RDX: 0000000000000000 RSI: ffffffff82b1e448 RDI: ffffc900011e0090 [57159.587421] RBP: ffffc900011abb88 R08: 0000000000000000 R09: 0000000000000001 [57159.594972] R10: 0000000000000001 R11: 0000000000000000 R12: ffff8881019840d0 [57159.602533] R13: ffff8881097e9000 R14: ffffffffa08542a0 R15: 00000000000003a8 [57159.610093] FS: 00007f991e0a8740(0000) GS:ffff888459900000(0000) knlGS:00000000000 00000 [57159.618614] CS: 0010 DS: 0000 ES: 0000 CR0: 0000000080050033 [57159.624814] CR2: ffffc900011e0090 CR3: 000000010862a002 CR4: 00000000003706e0 [57159.632397] DR0: 0000000000000000 DR1: 0000000000000000 DR2: 0000000000000000 [57159.639973] DR3: 0000000000000000 DR6: 00000000fffe0ff0 DR7: 0000000000000400 [57159.647601] Call Trace: [57159.650502] ? idxd_device_disable+0x41/0x110 [idxd] [57159.655948] idxd_device_drv_remove+0x2b/0x80 [idxd] [57159.661374] idxd_config_bus_remove+0x16/0x20 [57159.666191] __device_release_driver+0x163/0x240 [57159.671320] device_release_driver+0x2b/0x40 [57159.676052] bus_remove_device+0xf5/0x160 [57159.680524] device_del+0x19c/0x400 [57159.684440] device_unregister+0x18/0x60 [57159.688792] idxd_remove+0x140/0x1c0 [idxd] [57159.693406] pci_device_remove+0x3e/0xb0 [57159.697758] __device_release_driver+0x163/0x240 [57159.702788] device_driver_detach+0x43/0xb0 [57159.707424] unbind_store+0x11e/0x130 [57159.711537] drv_attr_store+0x24/0x30 [57159.715646] sysfs_kf_write+0x4b/0x60 [57159.719710] kernfs_fop_write_iter+0x153/0x1e0 [57159.724563] new_sync_write+0x120/0x1b0 [57159.728812] vfs_write+0x23e/0x350 [57159.732624] ksys_write+0x70/0xf0 [57159.736335] __x64_sys_write+0x1a/0x20 [57159.740492] do_syscall_64+0x3b/0x90 [57159.744465] entry_SYSCALL_64_after_hwframe+0x44/0xae [57159.749908] RIP: 0033:0x7f991e19c387 [57159.753898] Code: 0d 00 f7 d8 64 89 02 48 c7 c0 ff ff ff ff eb b7 0f 1f 00 f3 0f 1e fa 64 8b 04 25 18 00 00 00 85 c0 75 10 b8 01 00 00 00 0f 05 <48> 3d 00 f0 ff ff 77 51 c3 48 83 ec 28 48 89 54 24 18 48 89 74 24 [57159.773564] RSP: 002b:00007ffc2ce2d6a8 EFLAGS: 00000246 ORIG_RAX: 0000000000000001 [57159.781550] RAX: ffffffffffffffda RBX: 000000000000000c RCX: 00007f991e19c387 [57159.789133] RDX: 000000000000000c RSI: 000055ee2630e140 RDI: 0000000000000001 [57159.796695] RBP: 000055ee2630e140 R08: 0000000000000000 R09: 00007f991e2324e0 [57159.804246] R10: 00007f991e2323e0 R11: 0000000000000246 R12: 000000000000000c [57159.811800] R13: 00007f991e26f520 R14: 000000000000000c R15: 00007f991e26f700 [57159.819373] Modules linked in: idxd bridge stp llc bnep sunrpc nls_iso8859_1 intel_ rapl_msr intel_rapl_common x86_pkg_temp_thermal intel_powerclamp coretemp snd_hda_code c_realtek iTCO_wdt 8250_dw snd_hda_codec_generic kvm_intel ledtrig_audio iTCO_vendor_s upport snd_hda_intel snd_intel_dspcfg ppdev kvm snd_hda_codec intel_wmi_thunderbolt sn d_hwdep irqbypass iwlwifi btusb snd_hda_core rapl btrtl intel_cstate snd_seq btbcm snd _seq_device btintel snd_pcm cfg80211 bluetooth pcspkr psmouse input_leds snd_timer int el_lpss_pci mei_me intel_lpss snd ecdh_generic ecc mei ucsi_acpi i2c_i801 idma64 i2c_s mbus virt_dma soundcore typec_ucsi typec wmi parport_pc parport video mac_hid acpi_pad sch_fq_codel drm ip_tables x_tables crct10dif_pclmul crc32_pclmul ghash_clmulni_intel usbkbd hid_generic usbmouse aesni_intel usbhid crypto_simd cryptd e1000e hid serio_ra w ahci libahci pinctrl_sunrisepoint fuse msr autofs4 [last unloaded: idxd] [57159.904082] CR2: ffffc900011e0090 [57159.907877] ---[ end trace b4e32f49ce9176a4 ]--- Fixes: 49c4959f04b5 ("dmaengine: idxd: fix sequence for pci driver remove() and shutdown()") Reported-by: Ziye Yang <ziye.yang@intel.com> Signed-off-by: Dave Jiang <dave.jiang@intel.com> Link: https://lore.kernel.org/r/163225535868.4152687.9318737776682088722.stgit@djiang5-desk3.ch.intel.com Signed-off-by: Vinod Koul <vkoul@kernel.org>
2021-09-21 20:15:58 +00:00
idxd_unregister_devices(idxd);
/*
* When ->release() is called for the idxd->conf_dev, it frees all the memory related
* to the idxd context. The driver still needs those bits in order to do the rest of
* the cleanup. However, we do need to unbound the idxd sub-driver. So take a ref
* on the device here to hold off the freeing while allowing the idxd sub-driver
* to unbind.
*/
get_device(idxd_confdev(idxd));
device_unregister(idxd_confdev(idxd));
idxd_shutdown(pdev);
if (device_pasid_enabled(idxd))
idxd_disable_system_pasid(idxd);
irq_entry = idxd_get_ie(idxd, 0);
free_irq(irq_entry->vector, irq_entry);
pci_free_irq_vectors(pdev);
pci_iounmap(pdev, idxd->reg_base);
iommu_dev_disable_feature(&pdev->dev, IOMMU_DEV_FEAT_SVA);
pci_disable_device(pdev);
destroy_workqueue(idxd->wq);
perfmon_pmu_remove(idxd);
dmanegine: idxd: fix resource free ordering on driver removal Fault triggers on ioread32() when pci driver unbind is envoked. The placement of idxd sub-driver removal causes the probing of the device mmio region after the mmio mapping being torn down. The driver needs the sub-drivers to be unbound but not release the idxd context until all shutdown activities has been done. Move the sub-driver unregistering up before the remove() calls shutdown(). But take a device ref on the idxd->conf_dev so that the memory does not get freed in ->release(). When all cleanup activities has been done, release the ref to allow the idxd memory to be freed. [57159.542766] RIP: 0010:ioread32+0x27/0x60 [57159.547097] Code: 00 66 90 48 81 ff ff ff 03 00 77 1e 48 81 ff 00 00 01 00 76 05 0f b7 d7 ed c3 8b 15 03 50 41 01 b8 ff ff ff ff 85 d2 75 04 c3 <8b> 07 c3 55 83 ea 01 48 89 fe 48 c7 c7 00 70 5f 82 48 89 e5 48 83 [57159.566647] RSP: 0018:ffffc900011abb60 EFLAGS: 00010292 [57159.572295] RAX: ffffc900011e0000 RBX: ffff888107d39800 RCX: 0000000000000000 [57159.579842] RDX: 0000000000000000 RSI: ffffffff82b1e448 RDI: ffffc900011e0090 [57159.587421] RBP: ffffc900011abb88 R08: 0000000000000000 R09: 0000000000000001 [57159.594972] R10: 0000000000000001 R11: 0000000000000000 R12: ffff8881019840d0 [57159.602533] R13: ffff8881097e9000 R14: ffffffffa08542a0 R15: 00000000000003a8 [57159.610093] FS: 00007f991e0a8740(0000) GS:ffff888459900000(0000) knlGS:00000000000 00000 [57159.618614] CS: 0010 DS: 0000 ES: 0000 CR0: 0000000080050033 [57159.624814] CR2: ffffc900011e0090 CR3: 000000010862a002 CR4: 00000000003706e0 [57159.632397] DR0: 0000000000000000 DR1: 0000000000000000 DR2: 0000000000000000 [57159.639973] DR3: 0000000000000000 DR6: 00000000fffe0ff0 DR7: 0000000000000400 [57159.647601] Call Trace: [57159.650502] ? idxd_device_disable+0x41/0x110 [idxd] [57159.655948] idxd_device_drv_remove+0x2b/0x80 [idxd] [57159.661374] idxd_config_bus_remove+0x16/0x20 [57159.666191] __device_release_driver+0x163/0x240 [57159.671320] device_release_driver+0x2b/0x40 [57159.676052] bus_remove_device+0xf5/0x160 [57159.680524] device_del+0x19c/0x400 [57159.684440] device_unregister+0x18/0x60 [57159.688792] idxd_remove+0x140/0x1c0 [idxd] [57159.693406] pci_device_remove+0x3e/0xb0 [57159.697758] __device_release_driver+0x163/0x240 [57159.702788] device_driver_detach+0x43/0xb0 [57159.707424] unbind_store+0x11e/0x130 [57159.711537] drv_attr_store+0x24/0x30 [57159.715646] sysfs_kf_write+0x4b/0x60 [57159.719710] kernfs_fop_write_iter+0x153/0x1e0 [57159.724563] new_sync_write+0x120/0x1b0 [57159.728812] vfs_write+0x23e/0x350 [57159.732624] ksys_write+0x70/0xf0 [57159.736335] __x64_sys_write+0x1a/0x20 [57159.740492] do_syscall_64+0x3b/0x90 [57159.744465] entry_SYSCALL_64_after_hwframe+0x44/0xae [57159.749908] RIP: 0033:0x7f991e19c387 [57159.753898] Code: 0d 00 f7 d8 64 89 02 48 c7 c0 ff ff ff ff eb b7 0f 1f 00 f3 0f 1e fa 64 8b 04 25 18 00 00 00 85 c0 75 10 b8 01 00 00 00 0f 05 <48> 3d 00 f0 ff ff 77 51 c3 48 83 ec 28 48 89 54 24 18 48 89 74 24 [57159.773564] RSP: 002b:00007ffc2ce2d6a8 EFLAGS: 00000246 ORIG_RAX: 0000000000000001 [57159.781550] RAX: ffffffffffffffda RBX: 000000000000000c RCX: 00007f991e19c387 [57159.789133] RDX: 000000000000000c RSI: 000055ee2630e140 RDI: 0000000000000001 [57159.796695] RBP: 000055ee2630e140 R08: 0000000000000000 R09: 00007f991e2324e0 [57159.804246] R10: 00007f991e2323e0 R11: 0000000000000246 R12: 000000000000000c [57159.811800] R13: 00007f991e26f520 R14: 000000000000000c R15: 00007f991e26f700 [57159.819373] Modules linked in: idxd bridge stp llc bnep sunrpc nls_iso8859_1 intel_ rapl_msr intel_rapl_common x86_pkg_temp_thermal intel_powerclamp coretemp snd_hda_code c_realtek iTCO_wdt 8250_dw snd_hda_codec_generic kvm_intel ledtrig_audio iTCO_vendor_s upport snd_hda_intel snd_intel_dspcfg ppdev kvm snd_hda_codec intel_wmi_thunderbolt sn d_hwdep irqbypass iwlwifi btusb snd_hda_core rapl btrtl intel_cstate snd_seq btbcm snd _seq_device btintel snd_pcm cfg80211 bluetooth pcspkr psmouse input_leds snd_timer int el_lpss_pci mei_me intel_lpss snd ecdh_generic ecc mei ucsi_acpi i2c_i801 idma64 i2c_s mbus virt_dma soundcore typec_ucsi typec wmi parport_pc parport video mac_hid acpi_pad sch_fq_codel drm ip_tables x_tables crct10dif_pclmul crc32_pclmul ghash_clmulni_intel usbkbd hid_generic usbmouse aesni_intel usbhid crypto_simd cryptd e1000e hid serio_ra w ahci libahci pinctrl_sunrisepoint fuse msr autofs4 [last unloaded: idxd] [57159.904082] CR2: ffffc900011e0090 [57159.907877] ---[ end trace b4e32f49ce9176a4 ]--- Fixes: 49c4959f04b5 ("dmaengine: idxd: fix sequence for pci driver remove() and shutdown()") Reported-by: Ziye Yang <ziye.yang@intel.com> Signed-off-by: Dave Jiang <dave.jiang@intel.com> Link: https://lore.kernel.org/r/163225535868.4152687.9318737776682088722.stgit@djiang5-desk3.ch.intel.com Signed-off-by: Vinod Koul <vkoul@kernel.org>
2021-09-21 20:15:58 +00:00
put_device(idxd_confdev(idxd));
}
static struct pci_driver idxd_pci_driver = {
.name = DRV_NAME,
.id_table = idxd_pci_tbl,
.probe = idxd_pci_probe,
.remove = idxd_remove,
.shutdown = idxd_shutdown,
};
static int __init idxd_init_module(void)
{
int err;
/*
* If the CPU does not support MOVDIR64B or ENQCMDS, there's no point in
* enumerating the device. We can not utilize it.
*/
if (!cpu_feature_enabled(X86_FEATURE_MOVDIR64B)) {
pr_warn("idxd driver failed to load without MOVDIR64B.\n");
return -ENODEV;
}
if (!cpu_feature_enabled(X86_FEATURE_ENQCMD))
pr_warn("Platform does not have ENQCMD(S) support.\n");
else
support_enqcmd = true;
perfmon_init();
err = idxd_driver_register(&idxd_drv);
if (err < 0)
goto err_idxd_driver_register;
dmaengine: idxd: create dmaengine driver for wq 'device' The original architecture of /sys/bus/dsa invented a scheme whereby a single entry in the list of bus drivers, /sys/bus/drivers/dsa, handled all device types and internally routed them to different drivers. Those internal drivers were invisible to userspace. Now, as /sys/bus/dsa wants to grow support for alternate drivers for a given device, for example vfio-mdev instead of kernel-internal-dmaengine, a proper bus device-driver model is needed. The first step in that process is separating the existing omnibus/implicit "dsa" driver into proper individual drivers registered on /sys/bus/dsa. Establish the idxd_dmaengine_drv driver that controls the enabling and disabling of the wq and also register and unregister the dma channel. idxd_wq_alloc_resources() and idxd_wq_free_resources() also get moved to the dmaengine driver. The resources (dma descriptors allocation and setup) are only used by the dmaengine driver and should only happen when it loads. The char dev driver (cdev) related bits are left in the __drv_enable_wq() and __drv_disable_wq() calls to be moved when we split out the char dev driver just like how the dmaengine driver is split out. WQ autoload support is not expected currently. With the amount of configuration needed for the device, the wq is always expected to be enabled by a tool (or via sysfs) rather than auto enabled at driver load. Reviewed-by: Dan Williams <dan.j.williams@intel.com> Signed-off-by: Dave Jiang <dave.jiang@intel.com> Link: https://lore.kernel.org/r/162637467033.744545.12330636655625405394.stgit@djiang5-desk3.ch.intel.com Signed-off-by: Vinod Koul <vkoul@kernel.org>
2021-07-15 18:44:30 +00:00
err = idxd_driver_register(&idxd_dmaengine_drv);
if (err < 0)
goto err_idxd_dmaengine_driver_register;
err = idxd_driver_register(&idxd_user_drv);
if (err < 0)
goto err_idxd_user_driver_register;
err = idxd_cdev_register();
if (err)
goto err_cdev_register;
err = pci_register_driver(&idxd_pci_driver);
if (err)
goto err_pci_register;
return 0;
err_pci_register:
idxd_cdev_remove();
err_cdev_register:
idxd_driver_unregister(&idxd_user_drv);
err_idxd_user_driver_register:
dmaengine: idxd: create dmaengine driver for wq 'device' The original architecture of /sys/bus/dsa invented a scheme whereby a single entry in the list of bus drivers, /sys/bus/drivers/dsa, handled all device types and internally routed them to different drivers. Those internal drivers were invisible to userspace. Now, as /sys/bus/dsa wants to grow support for alternate drivers for a given device, for example vfio-mdev instead of kernel-internal-dmaengine, a proper bus device-driver model is needed. The first step in that process is separating the existing omnibus/implicit "dsa" driver into proper individual drivers registered on /sys/bus/dsa. Establish the idxd_dmaengine_drv driver that controls the enabling and disabling of the wq and also register and unregister the dma channel. idxd_wq_alloc_resources() and idxd_wq_free_resources() also get moved to the dmaengine driver. The resources (dma descriptors allocation and setup) are only used by the dmaengine driver and should only happen when it loads. The char dev driver (cdev) related bits are left in the __drv_enable_wq() and __drv_disable_wq() calls to be moved when we split out the char dev driver just like how the dmaengine driver is split out. WQ autoload support is not expected currently. With the amount of configuration needed for the device, the wq is always expected to be enabled by a tool (or via sysfs) rather than auto enabled at driver load. Reviewed-by: Dan Williams <dan.j.williams@intel.com> Signed-off-by: Dave Jiang <dave.jiang@intel.com> Link: https://lore.kernel.org/r/162637467033.744545.12330636655625405394.stgit@djiang5-desk3.ch.intel.com Signed-off-by: Vinod Koul <vkoul@kernel.org>
2021-07-15 18:44:30 +00:00
idxd_driver_unregister(&idxd_dmaengine_drv);
err_idxd_dmaengine_driver_register:
idxd_driver_unregister(&idxd_drv);
err_idxd_driver_register:
return err;
}
module_init(idxd_init_module);
static void __exit idxd_exit_module(void)
{
idxd_driver_unregister(&idxd_user_drv);
dmaengine: idxd: create dmaengine driver for wq 'device' The original architecture of /sys/bus/dsa invented a scheme whereby a single entry in the list of bus drivers, /sys/bus/drivers/dsa, handled all device types and internally routed them to different drivers. Those internal drivers were invisible to userspace. Now, as /sys/bus/dsa wants to grow support for alternate drivers for a given device, for example vfio-mdev instead of kernel-internal-dmaengine, a proper bus device-driver model is needed. The first step in that process is separating the existing omnibus/implicit "dsa" driver into proper individual drivers registered on /sys/bus/dsa. Establish the idxd_dmaengine_drv driver that controls the enabling and disabling of the wq and also register and unregister the dma channel. idxd_wq_alloc_resources() and idxd_wq_free_resources() also get moved to the dmaengine driver. The resources (dma descriptors allocation and setup) are only used by the dmaengine driver and should only happen when it loads. The char dev driver (cdev) related bits are left in the __drv_enable_wq() and __drv_disable_wq() calls to be moved when we split out the char dev driver just like how the dmaengine driver is split out. WQ autoload support is not expected currently. With the amount of configuration needed for the device, the wq is always expected to be enabled by a tool (or via sysfs) rather than auto enabled at driver load. Reviewed-by: Dan Williams <dan.j.williams@intel.com> Signed-off-by: Dave Jiang <dave.jiang@intel.com> Link: https://lore.kernel.org/r/162637467033.744545.12330636655625405394.stgit@djiang5-desk3.ch.intel.com Signed-off-by: Vinod Koul <vkoul@kernel.org>
2021-07-15 18:44:30 +00:00
idxd_driver_unregister(&idxd_dmaengine_drv);
idxd_driver_unregister(&idxd_drv);
pci_unregister_driver(&idxd_pci_driver);
idxd_cdev_remove();
perfmon_exit();
}
module_exit(idxd_exit_module);