forked from Minki/linux
dmar: Use queued invalidation interface for IOTLB and context invalidation
If queued invalidation interface is available and enabled, queued invalidation interface will be used instead of the register based interface. According to Vt-d2 specification, when queued invalidation is enabled, invalidation command submit works only through invalidation queue and not through the command registers interface. Signed-off-by: Youquan Song <youquan.song@intel.com> Signed-off-by: Suresh Siddha <suresh.b.siddha@intel.com> Signed-off-by: David Woodhouse <David.Woodhouse@intel.com>
This commit is contained in:
parent
3481f21097
commit
a77b67d402
@ -567,27 +567,6 @@ static int __iommu_flush_context(struct intel_iommu *iommu,
|
|||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
static int inline iommu_flush_context_global(struct intel_iommu *iommu,
|
|
||||||
int non_present_entry_flush)
|
|
||||||
{
|
|
||||||
return __iommu_flush_context(iommu, 0, 0, 0, DMA_CCMD_GLOBAL_INVL,
|
|
||||||
non_present_entry_flush);
|
|
||||||
}
|
|
||||||
|
|
||||||
static int inline iommu_flush_context_domain(struct intel_iommu *iommu, u16 did,
|
|
||||||
int non_present_entry_flush)
|
|
||||||
{
|
|
||||||
return __iommu_flush_context(iommu, did, 0, 0, DMA_CCMD_DOMAIN_INVL,
|
|
||||||
non_present_entry_flush);
|
|
||||||
}
|
|
||||||
|
|
||||||
static int inline iommu_flush_context_device(struct intel_iommu *iommu,
|
|
||||||
u16 did, u16 source_id, u8 function_mask, int non_present_entry_flush)
|
|
||||||
{
|
|
||||||
return __iommu_flush_context(iommu, did, source_id, function_mask,
|
|
||||||
DMA_CCMD_DEVICE_INVL, non_present_entry_flush);
|
|
||||||
}
|
|
||||||
|
|
||||||
/* return value determine if we need a write buffer flush */
|
/* return value determine if we need a write buffer flush */
|
||||||
static int __iommu_flush_iotlb(struct intel_iommu *iommu, u16 did,
|
static int __iommu_flush_iotlb(struct intel_iommu *iommu, u16 did,
|
||||||
u64 addr, unsigned int size_order, u64 type,
|
u64 addr, unsigned int size_order, u64 type,
|
||||||
@ -660,20 +639,6 @@ static int __iommu_flush_iotlb(struct intel_iommu *iommu, u16 did,
|
|||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
static int inline iommu_flush_iotlb_global(struct intel_iommu *iommu,
|
|
||||||
int non_present_entry_flush)
|
|
||||||
{
|
|
||||||
return __iommu_flush_iotlb(iommu, 0, 0, 0, DMA_TLB_GLOBAL_FLUSH,
|
|
||||||
non_present_entry_flush);
|
|
||||||
}
|
|
||||||
|
|
||||||
static int inline iommu_flush_iotlb_dsi(struct intel_iommu *iommu, u16 did,
|
|
||||||
int non_present_entry_flush)
|
|
||||||
{
|
|
||||||
return __iommu_flush_iotlb(iommu, did, 0, 0, DMA_TLB_DSI_FLUSH,
|
|
||||||
non_present_entry_flush);
|
|
||||||
}
|
|
||||||
|
|
||||||
static int iommu_flush_iotlb_psi(struct intel_iommu *iommu, u16 did,
|
static int iommu_flush_iotlb_psi(struct intel_iommu *iommu, u16 did,
|
||||||
u64 addr, unsigned int pages, int non_present_entry_flush)
|
u64 addr, unsigned int pages, int non_present_entry_flush)
|
||||||
{
|
{
|
||||||
@ -684,8 +649,9 @@ static int iommu_flush_iotlb_psi(struct intel_iommu *iommu, u16 did,
|
|||||||
|
|
||||||
/* Fallback to domain selective flush if no PSI support */
|
/* Fallback to domain selective flush if no PSI support */
|
||||||
if (!cap_pgsel_inv(iommu->cap))
|
if (!cap_pgsel_inv(iommu->cap))
|
||||||
return iommu_flush_iotlb_dsi(iommu, did,
|
return iommu->flush.flush_iotlb(iommu, did, 0, 0,
|
||||||
non_present_entry_flush);
|
DMA_TLB_DSI_FLUSH,
|
||||||
|
non_present_entry_flush);
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* PSI requires page size to be 2 ^ x, and the base address is naturally
|
* PSI requires page size to be 2 ^ x, and the base address is naturally
|
||||||
@ -694,11 +660,12 @@ static int iommu_flush_iotlb_psi(struct intel_iommu *iommu, u16 did,
|
|||||||
mask = ilog2(__roundup_pow_of_two(pages));
|
mask = ilog2(__roundup_pow_of_two(pages));
|
||||||
/* Fallback to domain selective flush if size is too big */
|
/* Fallback to domain selective flush if size is too big */
|
||||||
if (mask > cap_max_amask_val(iommu->cap))
|
if (mask > cap_max_amask_val(iommu->cap))
|
||||||
return iommu_flush_iotlb_dsi(iommu, did,
|
return iommu->flush.flush_iotlb(iommu, did, 0, 0,
|
||||||
non_present_entry_flush);
|
DMA_TLB_DSI_FLUSH, non_present_entry_flush);
|
||||||
|
|
||||||
return __iommu_flush_iotlb(iommu, did, addr, mask,
|
return iommu->flush.flush_iotlb(iommu, did, addr, mask,
|
||||||
DMA_TLB_PSI_FLUSH, non_present_entry_flush);
|
DMA_TLB_PSI_FLUSH,
|
||||||
|
non_present_entry_flush);
|
||||||
}
|
}
|
||||||
|
|
||||||
static void iommu_disable_protect_mem_regions(struct intel_iommu *iommu)
|
static void iommu_disable_protect_mem_regions(struct intel_iommu *iommu)
|
||||||
@ -1204,11 +1171,13 @@ static int domain_context_mapping_one(struct dmar_domain *domain,
|
|||||||
__iommu_flush_cache(iommu, context, sizeof(*context));
|
__iommu_flush_cache(iommu, context, sizeof(*context));
|
||||||
|
|
||||||
/* it's a non-present to present mapping */
|
/* it's a non-present to present mapping */
|
||||||
if (iommu_flush_context_device(iommu, domain->id,
|
if (iommu->flush.flush_context(iommu, domain->id,
|
||||||
(((u16)bus) << 8) | devfn, DMA_CCMD_MASK_NOBIT, 1))
|
(((u16)bus) << 8) | devfn, DMA_CCMD_MASK_NOBIT,
|
||||||
|
DMA_CCMD_DEVICE_INVL, 1))
|
||||||
iommu_flush_write_buffer(iommu);
|
iommu_flush_write_buffer(iommu);
|
||||||
else
|
else
|
||||||
iommu_flush_iotlb_dsi(iommu, 0, 0);
|
iommu->flush.flush_iotlb(iommu, 0, 0, 0, DMA_TLB_DSI_FLUSH, 0);
|
||||||
|
|
||||||
spin_unlock_irqrestore(&iommu->lock, flags);
|
spin_unlock_irqrestore(&iommu->lock, flags);
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
@ -1310,8 +1279,10 @@ domain_page_mapping(struct dmar_domain *domain, dma_addr_t iova,
|
|||||||
static void detach_domain_for_dev(struct dmar_domain *domain, u8 bus, u8 devfn)
|
static void detach_domain_for_dev(struct dmar_domain *domain, u8 bus, u8 devfn)
|
||||||
{
|
{
|
||||||
clear_context_table(domain->iommu, bus, devfn);
|
clear_context_table(domain->iommu, bus, devfn);
|
||||||
iommu_flush_context_global(domain->iommu, 0);
|
domain->iommu->flush.flush_context(domain->iommu, 0, 0, 0,
|
||||||
iommu_flush_iotlb_global(domain->iommu, 0);
|
DMA_CCMD_GLOBAL_INVL, 0);
|
||||||
|
domain->iommu->flush.flush_iotlb(domain->iommu, 0, 0, 0,
|
||||||
|
DMA_TLB_GLOBAL_FLUSH, 0);
|
||||||
}
|
}
|
||||||
|
|
||||||
static void domain_remove_dev_info(struct dmar_domain *domain)
|
static void domain_remove_dev_info(struct dmar_domain *domain)
|
||||||
@ -1662,6 +1633,28 @@ int __init init_dmars(void)
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
for_each_drhd_unit(drhd) {
|
||||||
|
if (drhd->ignored)
|
||||||
|
continue;
|
||||||
|
|
||||||
|
iommu = drhd->iommu;
|
||||||
|
if (dmar_enable_qi(iommu)) {
|
||||||
|
/*
|
||||||
|
* Queued Invalidate not enabled, use Register Based
|
||||||
|
* Invalidate
|
||||||
|
*/
|
||||||
|
iommu->flush.flush_context = __iommu_flush_context;
|
||||||
|
iommu->flush.flush_iotlb = __iommu_flush_iotlb;
|
||||||
|
printk(KERN_INFO "IOMMU 0x%Lx: using Register based "
|
||||||
|
"invalidation\n", drhd->reg_base_addr);
|
||||||
|
} else {
|
||||||
|
iommu->flush.flush_context = qi_flush_context;
|
||||||
|
iommu->flush.flush_iotlb = qi_flush_iotlb;
|
||||||
|
printk(KERN_INFO "IOMMU 0x%Lx: using Queued "
|
||||||
|
"invalidation\n", drhd->reg_base_addr);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* For each rmrr
|
* For each rmrr
|
||||||
* for each dev attached to rmrr
|
* for each dev attached to rmrr
|
||||||
@ -1714,9 +1707,10 @@ int __init init_dmars(void)
|
|||||||
|
|
||||||
iommu_set_root_entry(iommu);
|
iommu_set_root_entry(iommu);
|
||||||
|
|
||||||
iommu_flush_context_global(iommu, 0);
|
iommu->flush.flush_context(iommu, 0, 0, 0, DMA_CCMD_GLOBAL_INVL,
|
||||||
iommu_flush_iotlb_global(iommu, 0);
|
0);
|
||||||
|
iommu->flush.flush_iotlb(iommu, 0, 0, 0, DMA_TLB_GLOBAL_FLUSH,
|
||||||
|
0);
|
||||||
iommu_disable_protect_mem_regions(iommu);
|
iommu_disable_protect_mem_regions(iommu);
|
||||||
|
|
||||||
ret = iommu_enable_translation(iommu);
|
ret = iommu_enable_translation(iommu);
|
||||||
@ -1891,7 +1885,8 @@ static void flush_unmaps(void)
|
|||||||
struct intel_iommu *iommu =
|
struct intel_iommu *iommu =
|
||||||
deferred_flush[i].domain[0]->iommu;
|
deferred_flush[i].domain[0]->iommu;
|
||||||
|
|
||||||
iommu_flush_iotlb_global(iommu, 0);
|
iommu->flush.flush_iotlb(iommu, 0, 0, 0,
|
||||||
|
DMA_TLB_GLOBAL_FLUSH, 0);
|
||||||
for (j = 0; j < deferred_flush[i].next; j++) {
|
for (j = 0; j < deferred_flush[i].next; j++) {
|
||||||
__free_iova(&deferred_flush[i].domain[j]->iovad,
|
__free_iova(&deferred_flush[i].domain[j]->iovad,
|
||||||
deferred_flush[i].iova[j]);
|
deferred_flush[i].iova[j]);
|
||||||
|
@ -278,6 +278,13 @@ struct ir_table {
|
|||||||
};
|
};
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
|
struct iommu_flush {
|
||||||
|
int (*flush_context)(struct intel_iommu *iommu, u16 did, u16 sid, u8 fm,
|
||||||
|
u64 type, int non_present_entry_flush);
|
||||||
|
int (*flush_iotlb)(struct intel_iommu *iommu, u16 did, u64 addr,
|
||||||
|
unsigned int size_order, u64 type, int non_present_entry_flush);
|
||||||
|
};
|
||||||
|
|
||||||
struct intel_iommu {
|
struct intel_iommu {
|
||||||
void __iomem *reg; /* Pointer to hardware regs, virtual addr */
|
void __iomem *reg; /* Pointer to hardware regs, virtual addr */
|
||||||
u64 cap;
|
u64 cap;
|
||||||
@ -297,6 +304,7 @@ struct intel_iommu {
|
|||||||
unsigned char name[7]; /* Device Name */
|
unsigned char name[7]; /* Device Name */
|
||||||
struct msi_msg saved_msg;
|
struct msi_msg saved_msg;
|
||||||
struct sys_device sysdev;
|
struct sys_device sysdev;
|
||||||
|
struct iommu_flush flush;
|
||||||
#endif
|
#endif
|
||||||
struct q_inval *qi; /* Queued invalidation info */
|
struct q_inval *qi; /* Queued invalidation info */
|
||||||
#ifdef CONFIG_INTR_REMAP
|
#ifdef CONFIG_INTR_REMAP
|
||||||
|
Loading…
Reference in New Issue
Block a user