mirror of
https://github.com/torvalds/linux.git
synced 2024-11-16 00:52:01 +00:00
Intel IOMMU Suspend/Resume Support - Queued Invalidation
This patch supports queued invalidation suspend/resume. Signed-off-by: Fenghua Yu <fenghua.yu@intel.com> Acked-by: Ingo Molnar <mingo@elte.hu> Signed-off-by: David Woodhouse <David.Woodhouse@intel.com>
This commit is contained in:
parent
f59c7b69bc
commit
eb4a52bc66
@ -789,6 +789,35 @@ end:
|
|||||||
spin_unlock_irqrestore(&iommu->register_lock, flags);
|
spin_unlock_irqrestore(&iommu->register_lock, flags);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Enable queued invalidation.
|
||||||
|
*/
|
||||||
|
static void __dmar_enable_qi(struct intel_iommu *iommu)
|
||||||
|
{
|
||||||
|
u32 cmd, sts;
|
||||||
|
unsigned long flags;
|
||||||
|
struct q_inval *qi = iommu->qi;
|
||||||
|
|
||||||
|
qi->free_head = qi->free_tail = 0;
|
||||||
|
qi->free_cnt = QI_LENGTH;
|
||||||
|
|
||||||
|
spin_lock_irqsave(&iommu->register_lock, flags);
|
||||||
|
|
||||||
|
/* write zero to the tail reg */
|
||||||
|
writel(0, iommu->reg + DMAR_IQT_REG);
|
||||||
|
|
||||||
|
dmar_writeq(iommu->reg + DMAR_IQA_REG, virt_to_phys(qi->desc));
|
||||||
|
|
||||||
|
cmd = iommu->gcmd | DMA_GCMD_QIE;
|
||||||
|
iommu->gcmd |= DMA_GCMD_QIE;
|
||||||
|
writel(cmd, iommu->reg + DMAR_GCMD_REG);
|
||||||
|
|
||||||
|
/* Make sure hardware complete it */
|
||||||
|
IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG, readl, (sts & DMA_GSTS_QIES), sts);
|
||||||
|
|
||||||
|
spin_unlock_irqrestore(&iommu->register_lock, flags);
|
||||||
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Enable Queued Invalidation interface. This is a must to support
|
* Enable Queued Invalidation interface. This is a must to support
|
||||||
* interrupt-remapping. Also used by DMA-remapping, which replaces
|
* interrupt-remapping. Also used by DMA-remapping, which replaces
|
||||||
@ -796,8 +825,6 @@ end:
|
|||||||
*/
|
*/
|
||||||
int dmar_enable_qi(struct intel_iommu *iommu)
|
int dmar_enable_qi(struct intel_iommu *iommu)
|
||||||
{
|
{
|
||||||
u32 cmd, sts;
|
|
||||||
unsigned long flags;
|
|
||||||
struct q_inval *qi;
|
struct q_inval *qi;
|
||||||
|
|
||||||
if (!ecap_qis(iommu->ecap))
|
if (!ecap_qis(iommu->ecap))
|
||||||
@ -835,19 +862,7 @@ int dmar_enable_qi(struct intel_iommu *iommu)
|
|||||||
|
|
||||||
spin_lock_init(&qi->q_lock);
|
spin_lock_init(&qi->q_lock);
|
||||||
|
|
||||||
spin_lock_irqsave(&iommu->register_lock, flags);
|
__dmar_enable_qi(iommu);
|
||||||
/* write zero to the tail reg */
|
|
||||||
writel(0, iommu->reg + DMAR_IQT_REG);
|
|
||||||
|
|
||||||
dmar_writeq(iommu->reg + DMAR_IQA_REG, virt_to_phys(qi->desc));
|
|
||||||
|
|
||||||
cmd = iommu->gcmd | DMA_GCMD_QIE;
|
|
||||||
iommu->gcmd |= DMA_GCMD_QIE;
|
|
||||||
writel(cmd, iommu->reg + DMAR_GCMD_REG);
|
|
||||||
|
|
||||||
/* Make sure hardware complete it */
|
|
||||||
IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG, readl, (sts & DMA_GSTS_QIES), sts);
|
|
||||||
spin_unlock_irqrestore(&iommu->register_lock, flags);
|
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
@ -1102,3 +1117,28 @@ int __init enable_drhd_fault_handling(void)
|
|||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Re-enable Queued Invalidation interface.
|
||||||
|
*/
|
||||||
|
int dmar_reenable_qi(struct intel_iommu *iommu)
|
||||||
|
{
|
||||||
|
if (!ecap_qis(iommu->ecap))
|
||||||
|
return -ENOENT;
|
||||||
|
|
||||||
|
if (!iommu->qi)
|
||||||
|
return -ENOENT;
|
||||||
|
|
||||||
|
/*
|
||||||
|
* First disable queued invalidation.
|
||||||
|
*/
|
||||||
|
dmar_disable_qi(iommu);
|
||||||
|
/*
|
||||||
|
* Then enable queued invalidation again. Since there is no pending
|
||||||
|
* invalidation requests now, it's safe to re-enable queued
|
||||||
|
* invalidation.
|
||||||
|
*/
|
||||||
|
__dmar_enable_qi(iommu);
|
||||||
|
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
Loading…
Reference in New Issue
Block a user