vfio/spapr_tce: Invalidate multiple TCEs at once
Invalidating a TCE cache entry for each updated TCE is quite expensive. This makes use of the new iommu_table_ops::xchg_no_kill()/tce_kill() callbacks to bring down the time spent in mapping a huge guest DMA window. Signed-off-by: Alexey Kardashevskiy <aik@ozlabs.ru> Signed-off-by: Michael Ellerman <mpe@ellerman.id.au> Link: https://lore.kernel.org/r/20190829085252.72370-4-aik@ozlabs.ru
This commit is contained in:
parent
01b7d128b5
commit
650ab1e370
@ -435,7 +435,7 @@ static int tce_iommu_clear(struct tce_container *container,
|
||||
unsigned long oldhpa;
|
||||
long ret;
|
||||
enum dma_data_direction direction;
|
||||
unsigned long lastentry = entry + pages;
|
||||
unsigned long lastentry = entry + pages, firstentry = entry;
|
||||
|
||||
for ( ; entry < lastentry; ++entry) {
|
||||
if (tbl->it_indirect_levels && tbl->it_userspace) {
|
||||
@ -460,7 +460,7 @@ static int tce_iommu_clear(struct tce_container *container,
|
||||
|
||||
direction = DMA_NONE;
|
||||
oldhpa = 0;
|
||||
ret = iommu_tce_xchg(container->mm, tbl, entry, &oldhpa,
|
||||
ret = iommu_tce_xchg_no_kill(container->mm, tbl, entry, &oldhpa,
|
||||
&direction);
|
||||
if (ret)
|
||||
continue;
|
||||
@ -476,6 +476,8 @@ static int tce_iommu_clear(struct tce_container *container,
|
||||
tce_iommu_unuse_page(container, oldhpa);
|
||||
}
|
||||
|
||||
iommu_tce_kill(tbl, firstentry, pages);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
@ -518,8 +520,8 @@ static long tce_iommu_build(struct tce_container *container,
|
||||
|
||||
hpa |= offset;
|
||||
dirtmp = direction;
|
||||
ret = iommu_tce_xchg(container->mm, tbl, entry + i, &hpa,
|
||||
&dirtmp);
|
||||
ret = iommu_tce_xchg_no_kill(container->mm, tbl, entry + i,
|
||||
&hpa, &dirtmp);
|
||||
if (ret) {
|
||||
tce_iommu_unuse_page(container, hpa);
|
||||
pr_err("iommu_tce: %s failed ioba=%lx, tce=%lx, ret=%ld\n",
|
||||
@ -536,6 +538,8 @@ static long tce_iommu_build(struct tce_container *container,
|
||||
|
||||
if (ret)
|
||||
tce_iommu_clear(container, tbl, entry, i);
|
||||
else
|
||||
iommu_tce_kill(tbl, entry, pages);
|
||||
|
||||
return ret;
|
||||
}
|
||||
@ -572,8 +576,8 @@ static long tce_iommu_build_v2(struct tce_container *container,
|
||||
if (mm_iommu_mapped_inc(mem))
|
||||
break;
|
||||
|
||||
ret = iommu_tce_xchg(container->mm, tbl, entry + i, &hpa,
|
||||
&dirtmp);
|
||||
ret = iommu_tce_xchg_no_kill(container->mm, tbl, entry + i,
|
||||
&hpa, &dirtmp);
|
||||
if (ret) {
|
||||
/* dirtmp cannot be DMA_NONE here */
|
||||
tce_iommu_unuse_page_v2(container, tbl, entry + i);
|
||||
@ -593,6 +597,8 @@ static long tce_iommu_build_v2(struct tce_container *container,
|
||||
|
||||
if (ret)
|
||||
tce_iommu_clear(container, tbl, entry, i);
|
||||
else
|
||||
iommu_tce_kill(tbl, entry, pages);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
Loading…
Reference in New Issue
Block a user