iommu/dma: Resurrect the "forcedac" option

In converting intel-iommu over to the common IOMMU DMA ops, it quietly
lost the functionality of its "forcedac" option. Since this is a handy
thing both for testing and for performance optimisation on certain
platforms, reimplement it under the common IOMMU parameter namespace.

For the sake of fixing the inadvertent breakage of the Intel-specific
parameter, remove the dmar_forcedac remnants and hook it up as an alias
while documenting the transition to the new common parameter.

Fixes: c588072bba ("iommu/vt-d: Convert intel iommu driver to the iommu ops")
Signed-off-by: Robin Murphy <robin.murphy@arm.com>
Acked-by: Lu Baolu <baolu.lu@linux.intel.com>
Reviewed-by: John Garry <john.garry@huawei.com>
Link: https://lore.kernel.org/r/7eece8e0ea7bfbe2cd0e30789e0d46df573af9b0.1614961776.git.robin.murphy@arm.com
Signed-off-by: Joerg Roedel <jroedel@suse.de>
This commit is contained in:
Robin Murphy 2021-03-05 16:32:34 +00:00 committed by Joerg Roedel
parent b9abb19fa5
commit 3542dcb15c
4 changed files with 24 additions and 11 deletions

View File

@ -1869,13 +1869,6 @@
bypassed by not enabling DMAR with this option. In
this case, gfx device will use physical address for
DMA.
forcedac [X86-64]
With this option iommu will not optimize to look
for io virtual address below 32-bit forcing dual
address cycle on pci bus for cards supporting greater
than 32-bit addressing. The default is to look
for translation below 32-bit and if not available
then look in the higher range.
strict [Default Off]
With this option on every unmap_single operation will
result in a hardware IOTLB flush operation as opposed
@ -1964,6 +1957,14 @@
nobypass [PPC/POWERNV]
Disable IOMMU bypass, using IOMMU for PCI devices.
iommu.forcedac= [ARM64, X86] Control IOVA allocation for PCI devices.
Format: { "0" | "1" }
0 - Try to allocate a 32-bit DMA address first, before
falling back to the full range if needed.
1 - Allocate directly from the full usable range,
forcing Dual Address Cycle for PCI cards supporting
greater than 32-bit addressing.
iommu.strict= [ARM64] Configure TLB invalidation behaviour
Format: { "0" | "1" }
0 - Lazy mode.

View File

@ -52,6 +52,17 @@ struct iommu_dma_cookie {
};
static DEFINE_STATIC_KEY_FALSE(iommu_deferred_attach_enabled);
bool iommu_dma_forcedac __read_mostly;
static int __init iommu_dma_forcedac_setup(char *str)
{
int ret = kstrtobool(str, &iommu_dma_forcedac);
if (!ret && iommu_dma_forcedac)
pr_info("Forcing DAC for PCI devices\n");
return ret;
}
early_param("iommu.forcedac", iommu_dma_forcedac_setup);
void iommu_dma_free_cpu_cached_iovas(unsigned int cpu,
struct iommu_domain *domain)
@ -444,7 +455,7 @@ static dma_addr_t iommu_dma_alloc_iova(struct iommu_domain *domain,
dma_limit = min(dma_limit, (u64)domain->geometry.aperture_end);
/* Try to get PCI devices a SAC address */
if (dma_limit > DMA_BIT_MASK(32) && dev_is_pci(dev))
if (dma_limit > DMA_BIT_MASK(32) && !iommu_dma_forcedac && dev_is_pci(dev))
iova = alloc_iova_fast(iovad, iova_len,
DMA_BIT_MASK(32) >> shift, false);

View File

@ -360,7 +360,6 @@ int intel_iommu_enabled = 0;
EXPORT_SYMBOL_GPL(intel_iommu_enabled);
static int dmar_map_gfx = 1;
static int dmar_forcedac;
static int intel_iommu_strict;
static int intel_iommu_superpage = 1;
static int iommu_identity_mapping;
@ -451,8 +450,8 @@ static int __init intel_iommu_setup(char *str)
dmar_map_gfx = 0;
pr_info("Disable GFX device mapping\n");
} else if (!strncmp(str, "forcedac", 8)) {
pr_info("Forcing DAC for PCI devices\n");
dmar_forcedac = 1;
pr_warn("intel_iommu=forcedac deprecated; use iommu.forcedac instead\n");
iommu_dma_forcedac = true;
} else if (!strncmp(str, "strict", 6)) {
pr_info("Disable batched IOTLB flush\n");
intel_iommu_strict = 1;

View File

@ -40,6 +40,8 @@ void iommu_dma_get_resv_regions(struct device *dev, struct list_head *list);
void iommu_dma_free_cpu_cached_iovas(unsigned int cpu,
struct iommu_domain *domain);
extern bool iommu_dma_forcedac;
#else /* CONFIG_IOMMU_DMA */
struct iommu_domain;