/* * Copyright © 2015 Intel Corporation. * * This program is free software; you can redistribute it and/or modify it * under the terms and conditions of the GNU General Public License, * version 2, as published by the Free Software Foundation. * * This program is distributed in the hope it will be useful, but WITHOUT * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for * more details. * * Authors: David Woodhouse */ #include #include #include #include #include #include #include #include #include #include static irqreturn_t prq_event_thread(int irq, void *d); struct pasid_entry { u64 val; }; struct pasid_state_entry { u64 val; }; int intel_svm_alloc_pasid_tables(struct intel_iommu *iommu) { struct page *pages; int order; order = ecap_pss(iommu->ecap) + 7 - PAGE_SHIFT; if (order < 0) order = 0; pages = alloc_pages(GFP_KERNEL | __GFP_ZERO, order); if (!pages) { pr_warn("IOMMU: %s: Failed to allocate PASID table\n", iommu->name); return -ENOMEM; } iommu->pasid_table = page_address(pages); pr_info("%s: Allocated order %d PASID table.\n", iommu->name, order); if (ecap_dis(iommu->ecap)) { pages = alloc_pages(GFP_KERNEL | __GFP_ZERO, order); if (pages) iommu->pasid_state_table = page_address(pages); else pr_warn("IOMMU: %s: Failed to allocate PASID state table\n", iommu->name); } idr_init(&iommu->pasid_idr); return 0; } int intel_svm_free_pasid_tables(struct intel_iommu *iommu) { int order; order = ecap_pss(iommu->ecap) + 7 - PAGE_SHIFT; if (order < 0) order = 0; if (iommu->pasid_table) { free_pages((unsigned long)iommu->pasid_table, order); iommu->pasid_table = NULL; } if (iommu->pasid_state_table) { free_pages((unsigned long)iommu->pasid_state_table, order); iommu->pasid_state_table = NULL; } idr_destroy(&iommu->pasid_idr); return 0; } #define PRQ_ORDER 0 int intel_svm_enable_prq(struct intel_iommu *iommu) { struct page *pages; int irq, ret; pages = alloc_pages(GFP_KERNEL | __GFP_ZERO, PRQ_ORDER); if (!pages) { pr_warn("IOMMU: %s: Failed to allocate page request queue\n", iommu->name); return -ENOMEM; } iommu->prq = page_address(pages); irq = dmar_alloc_hwirq(DMAR_UNITS_SUPPORTED + iommu->seq_id, iommu->node, iommu); if (irq <= 0) { pr_err("IOMMU: %s: Failed to create IRQ vector for page request queue\n", iommu->name); ret = -EINVAL; err: free_pages((unsigned long)iommu->prq, PRQ_ORDER); iommu->prq = NULL; return ret; } iommu->pr_irq = irq; snprintf(iommu->prq_name, sizeof(iommu->prq_name), "dmar%d-prq", iommu->seq_id); ret = request_threaded_irq(irq, NULL, prq_event_thread, IRQF_ONESHOT, iommu->prq_name, iommu); if (ret) { pr_err("IOMMU: %s: Failed to request IRQ for page request queue\n", iommu->name); dmar_free_hwirq(irq); goto err; } dmar_writeq(iommu->reg + DMAR_PQH_REG, 0ULL); dmar_writeq(iommu->reg + DMAR_PQT_REG, 0ULL); dmar_writeq(iommu->reg + DMAR_PQA_REG, virt_to_phys(iommu->prq) | PRQ_ORDER); return 0; } int intel_svm_finish_prq(struct intel_iommu *iommu) { dmar_writeq(iommu->reg + DMAR_PQH_REG, 0ULL); dmar_writeq(iommu->reg + DMAR_PQT_REG, 0ULL); dmar_writeq(iommu->reg + DMAR_PQA_REG, 0ULL); free_irq(iommu->pr_irq, iommu); dmar_free_hwirq(iommu->pr_irq); iommu->pr_irq = 0; free_pages((unsigned long)iommu->prq, PRQ_ORDER); iommu->prq = NULL; return 0; } static void intel_flush_svm_range_dev (struct intel_svm *svm, struct intel_svm_dev *sdev, unsigned long address, int pages, int ih) { struct qi_desc desc; int mask = ilog2(__roundup_pow_of_two(pages)); if (pages == -1 || !cap_pgsel_inv(svm->iommu->cap) || mask > cap_max_amask_val(svm->iommu->cap)) { desc.low = QI_EIOTLB_PASID(svm->pasid) | QI_EIOTLB_DID(sdev->did) | QI_EIOTLB_GRAN(QI_GRAN_NONG_PASID) | QI_EIOTLB_TYPE; desc.high = 0; } else { desc.low = QI_EIOTLB_PASID(svm->pasid) | QI_EIOTLB_DID(sdev->did) | QI_EIOTLB_GRAN(QI_GRAN_PSI_PASID) | QI_EIOTLB_TYPE; desc.high = QI_EIOTLB_ADDR(address) | QI_EIOTLB_GL(1) | QI_EIOTLB_IH(ih) | QI_EIOTLB_AM(mask); } qi_submit_sync(&desc, svm->iommu); if (sdev->dev_iotlb) { desc.low = QI_DEV_EIOTLB_PASID(svm->pasid) | QI_DEV_EIOTLB_SID(sdev->sid) | QI_DEV_EIOTLB_QDEP(sdev->qdep) | QI_DEIOTLB_TYPE; if (mask) { unsigned long adr, delta; /* Least significant zero bits in the address indicate the * range of the request. So mask them out according to the * size. */ adr = address & ((1<<(VTD_PAGE_SHIFT + mask)) - 1); /* Now ensure that we round down further if the original * request was not aligned w.r.t. its size */ delta = address - adr; if (delta + (pages << VTD_PAGE_SHIFT) >= (1 << (VTD_PAGE_SHIFT + mask))) adr &= ~(1 << (VTD_PAGE_SHIFT + mask)); desc.high = QI_DEV_EIOTLB_ADDR(adr) | QI_DEV_EIOTLB_SIZE; } else { desc.high = QI_DEV_EIOTLB_ADDR(address); } qi_submit_sync(&desc, svm->iommu); } } static void intel_flush_svm_range(struct intel_svm *svm, unsigned long address, int pages, int ih) { struct intel_svm_dev *sdev; /* Try deferred invalidate if available */ if (svm->iommu->pasid_state_table && !cmpxchg64(&svm->iommu->pasid_state_table[svm->pasid].val, 0, 1ULL << 63)) return; rcu_read_lock(); list_for_each_entry_rcu(sdev, &svm->devs, list) intel_flush_svm_range_dev(svm, sdev, address, pages, ih); rcu_read_unlock(); } static void intel_change_pte(struct mmu_notifier *mn, struct mm_struct *mm, unsigned long address, pte_t pte) { struct intel_svm *svm = container_of(mn, struct intel_svm, notifier); intel_flush_svm_range(svm, address, 1, 1); } static void intel_invalidate_page(struct mmu_notifier *mn, struct mm_struct *mm, unsigned long address) { struct intel_svm *svm = container_of(mn, struct intel_svm, notifier); intel_flush_svm_range(svm, address, 1, 1); } /* Pages have been freed at this point */ static void intel_invalidate_range(struct mmu_notifier *mn, struct mm_struct *mm, unsigned long start, unsigned long end) { struct intel_svm *svm = container_of(mn, struct intel_svm, notifier); intel_flush_svm_range(svm, start, (end - start + PAGE_SIZE - 1) >> VTD_PAGE_SHIFT , 0); } static void intel_flush_pasid_dev(struct intel_svm *svm, struct intel_svm_dev *sdev) { struct qi_desc desc; desc.high = 0; desc.low = QI_PC_TYPE | QI_PC_DID(sdev->did) | QI_PC_PASID_SEL | QI_PC_PASID(svm->pasid); qi_submit_sync(&desc, svm->iommu); } static void intel_mm_release(struct mmu_notifier *mn, struct mm_struct *mm) { struct intel_svm *svm = container_of(mn, struct intel_svm, notifier); svm->iommu->pasid_table[svm->pasid].val = 0; /* There's no need to do any flush because we can't get here if there * are any devices left anyway. */ WARN_ON(!list_empty(&svm->devs)); } static const struct mmu_notifier_ops intel_mmuops = { .release = intel_mm_release, .change_pte = intel_change_pte, .invalidate_page = intel_invalidate_page, .invalidate_range = intel_invalidate_range, }; static DEFINE_MUTEX(pasid_mutex); int intel_svm_bind_mm(struct device *dev, int *pasid, int flags, struct svm_dev_ops *ops) { struct intel_iommu *iommu = intel_svm_device_to_iommu(dev); struct intel_svm_dev *sdev; struct intel_svm *svm = NULL; int pasid_max; int ret; BUG_ON(pasid && !current->mm); if (WARN_ON(!iommu)) return -EINVAL; if (dev_is_pci(dev)) { pasid_max = pci_max_pasids(to_pci_dev(dev)); if (pasid_max < 0) return -EINVAL; } else pasid_max = 1 << 20; mutex_lock(&pasid_mutex); if (pasid) { int i; idr_for_each_entry(&iommu->pasid_idr, svm, i) { if (svm->mm != current->mm) continue; if (svm->pasid >= pasid_max) { dev_warn(dev, "Limited PASID width. Cannot use existing PASID %d\n", svm->pasid); ret = -ENOSPC; goto out; } list_for_each_entry(sdev, &svm->devs, list) { if (dev == sdev->dev) { if (sdev->ops != ops) { ret = -EBUSY; goto out; } sdev->users++; goto success; } } break; } } sdev = kzalloc(sizeof(*sdev), GFP_KERNEL); if (!sdev) { ret = -ENOMEM; goto out; } sdev->dev = dev; ret = intel_iommu_enable_pasid(iommu, sdev); if (ret || !pasid) { /* If they don't actually want to assign a PASID, this is * just an enabling check/preparation. */ kfree(sdev); goto out; } /* Finish the setup now we know we're keeping it */ sdev->users = 1; sdev->ops = ops; init_rcu_head(&sdev->rcu); if (!svm) { svm = kzalloc(sizeof(*svm), GFP_KERNEL); if (!svm) { ret = -ENOMEM; kfree(sdev); goto out; } svm->iommu = iommu; if (pasid_max > 2 << ecap_pss(iommu->ecap)) pasid_max = 2 << ecap_pss(iommu->ecap); ret = idr_alloc(&iommu->pasid_idr, svm, 0, pasid_max - 1, GFP_KERNEL); if (ret < 0) { kfree(svm); goto out; } svm->pasid = ret; svm->notifier.ops = &intel_mmuops; svm->mm = get_task_mm(current); INIT_LIST_HEAD_RCU(&svm->devs); ret = -ENOMEM; if (!svm->mm || (ret = mmu_notifier_register(&svm->notifier, svm->mm))) { idr_remove(&svm->iommu->pasid_idr, svm->pasid); kfree(svm); kfree(sdev); goto out; } iommu->pasid_table[svm->pasid].val = (u64)__pa(svm->mm->pgd) | 1; wmb(); } list_add_rcu(&sdev->list, &svm->devs); success: *pasid = svm->pasid; ret = 0; out: mutex_unlock(&pasid_mutex); return ret; } EXPORT_SYMBOL_GPL(intel_svm_bind_mm); int intel_svm_unbind_mm(struct device *dev, int pasid) { struct intel_svm_dev *sdev; struct intel_iommu *iommu; struct intel_svm *svm; int ret = -EINVAL; mutex_lock(&pasid_mutex); iommu = intel_svm_device_to_iommu(dev); if (!iommu || !iommu->pasid_table) goto out; svm = idr_find(&iommu->pasid_idr, pasid); if (!svm) goto out; list_for_each_entry(sdev, &svm->devs, list) { if (dev == sdev->dev) { ret = 0; sdev->users--; if (!sdev->users) { list_del_rcu(&sdev->list); /* Flush the PASID cache and IOTLB for this device. * Note that we do depend on the hardware *not* using * the PASID any more. Just as we depend on other * devices never using PASIDs that they have no right * to use. We have a *shared* PASID table, because it's * large and has to be physically contiguous. So it's * hard to be as defensive as we might like. */ intel_flush_pasid_dev(svm, sdev); intel_flush_svm_range_dev(svm, sdev, 0, -1, 0); kfree_rcu(sdev, rcu); if (list_empty(&svm->devs)) { mmu_notifier_unregister(&svm->notifier, svm->mm); idr_remove(&svm->iommu->pasid_idr, svm->pasid); mmput(svm->mm); /* We mandate that no page faults may be outstanding * for the PASID when intel_svm_unbind_mm() is called. * If that is not obeyed, subtle errors will happen. * Let's make them less subtle... */ memset(svm, 0x6b, sizeof(*svm)); kfree(svm); } } break; } } out: mutex_unlock(&pasid_mutex); return ret; } EXPORT_SYMBOL_GPL(intel_svm_unbind_mm); /* Page request queue descriptor */ struct page_req_dsc { u64 srr:1; u64 bof:1; u64 pasid_present:1; u64 lpig:1; u64 pasid:20; u64 bus:8; u64 private:23; u64 prg_index:9; u64 rd_req:1; u64 wr_req:1; u64 exe_req:1; u64 priv_req:1; u64 devfn:8; u64 addr:52; }; #define PRQ_RING_MASK ((0x1000 << PRQ_ORDER) - 0x10) static irqreturn_t prq_event_thread(int irq, void *d) { struct intel_iommu *iommu = d; struct intel_svm *svm = NULL; int head, tail, handled = 0; tail = dmar_readq(iommu->reg + DMAR_PQT_REG) & PRQ_RING_MASK; head = dmar_readq(iommu->reg + DMAR_PQH_REG) & PRQ_RING_MASK; while (head != tail) { struct intel_svm_dev *sdev; struct vm_area_struct *vma; struct page_req_dsc *req; struct qi_desc resp; int ret, result; u64 address; handled = 1; req = &iommu->prq[head / sizeof(*req)]; result = QI_RESP_FAILURE; address = req->addr << PAGE_SHIFT; if (!req->pasid_present) { pr_err("%s: Page request without PASID: %08llx %08llx\n", iommu->name, ((unsigned long long *)req)[0], ((unsigned long long *)req)[1]); goto bad_req; } if (!svm || svm->pasid != req->pasid) { rcu_read_lock(); svm = idr_find(&iommu->pasid_idr, req->pasid); /* It *can't* go away, because the driver is not permitted * to unbind the mm while any page faults are outstanding. * So we only need RCU to protect the internal idr code. */ rcu_read_unlock(); if (!svm) { pr_err("%s: Page request for invalid PASID %d: %08llx %08llx\n", iommu->name, req->pasid, ((unsigned long long *)req)[0], ((unsigned long long *)req)[1]); goto bad_req; } } result = QI_RESP_INVALID; down_read(&svm->mm->mmap_sem); vma = find_extend_vma(svm->mm, address); if (!vma || address < vma->vm_start) goto invalid; ret = handle_mm_fault(svm->mm, vma, address, req->wr_req ? FAULT_FLAG_WRITE : 0); if (ret & VM_FAULT_ERROR) goto invalid; result = QI_RESP_SUCCESS; invalid: up_read(&svm->mm->mmap_sem); bad_req: /* Accounting for major/minor faults? */ rcu_read_lock(); list_for_each_entry_rcu(sdev, &svm->devs, list) { if (sdev->sid == PCI_DEVID(req->bus, req->devfn)); break; } /* Other devices can go away, but the drivers are not permitted * to unbind while any page faults might be in flight. So it's * OK to drop the 'lock' here now we have it. */ rcu_read_unlock(); if (WARN_ON(&sdev->list == &svm->devs)) sdev = NULL; if (sdev && sdev->ops && sdev->ops->fault_cb) { int rwxp = (req->rd_req << 3) | (req->wr_req << 2) | (req->wr_req << 1) | (req->exe_req); sdev->ops->fault_cb(sdev->dev, req->pasid, req->addr, req->private, rwxp, result); } if (req->lpig) { /* Page Group Response */ resp.low = QI_PGRP_PASID(req->pasid) | QI_PGRP_DID((req->bus << 8) | req->devfn) | QI_PGRP_PASID_P(req->pasid_present) | QI_PGRP_RESP_TYPE; resp.high = QI_PGRP_IDX(req->prg_index) | QI_PGRP_PRIV(req->private) | QI_PGRP_RESP_CODE(result); qi_submit_sync(&resp, svm->iommu); } else if (req->srr) { /* Page Stream Response */ resp.low = QI_PSTRM_IDX(req->prg_index) | QI_PSTRM_PRIV(req->private) | QI_PSTRM_BUS(req->bus) | QI_PSTRM_PASID(req->pasid) | QI_PSTRM_RESP_TYPE; resp.high = QI_PSTRM_ADDR(address) | QI_PSTRM_DEVFN(req->devfn) | QI_PSTRM_RESP_CODE(result); qi_submit_sync(&resp, svm->iommu); } head = (head + sizeof(*req)) & PRQ_RING_MASK; } dmar_writeq(iommu->reg + DMAR_PQH_REG, tail); return IRQ_RETVAL(handled); }