Merge branch 'for-joerg/arm-smmu/updates' of git://git.kernel.org/pub/scm/linux/kernel/git/will/linux into arm/smmu

This commit is contained in:
Joerg Roedel 2018-10-01 17:28:13 +02:00
commit 6f20a97e09
9 changed files with 243 additions and 69 deletions

View File

@ -1749,6 +1749,18 @@
nobypass [PPC/POWERNV]
Disable IOMMU bypass, using IOMMU for PCI devices.
iommu.strict= [ARM64] Configure TLB invalidation behaviour
Format: { "0" | "1" }
0 - Lazy mode.
Request that DMA unmap operations use deferred
invalidation of hardware TLBs, for increased
throughput at the cost of reduced device isolation.
Will fall back to strict mode if not supported by
the relevant IOMMU driver.
1 - Strict mode (default).
DMA unmap operations invalidate IOMMU hardware TLBs
synchronously.
iommu.passthrough=
[ARM64] Configure DMA to bypass the IOMMU by default.
Format: { "0" | "1" }

View File

@ -567,7 +567,8 @@ struct arm_smmu_device {
int gerr_irq;
int combined_irq;
atomic_t sync_nr;
u32 sync_nr;
u8 prev_cmd_opcode;
unsigned long ias; /* IPA */
unsigned long oas; /* PA */
@ -611,6 +612,7 @@ struct arm_smmu_domain {
struct mutex init_mutex; /* Protects smmu pointer */
struct io_pgtable_ops *pgtbl_ops;
bool non_strict;
enum arm_smmu_domain_stage stage;
union {
@ -708,7 +710,7 @@ static void queue_inc_prod(struct arm_smmu_queue *q)
}
/*
* Wait for the SMMU to consume items. If drain is true, wait until the queue
* Wait for the SMMU to consume items. If sync is true, wait until the queue
* is empty. Otherwise, wait until there is at least one free slot.
*/
static int queue_poll_cons(struct arm_smmu_queue *q, bool sync, bool wfe)
@ -901,6 +903,8 @@ static void arm_smmu_cmdq_insert_cmd(struct arm_smmu_device *smmu, u64 *cmd)
struct arm_smmu_queue *q = &smmu->cmdq.q;
bool wfe = !!(smmu->features & ARM_SMMU_FEAT_SEV);
smmu->prev_cmd_opcode = FIELD_GET(CMDQ_0_OP, cmd[0]);
while (queue_insert_raw(q, cmd) == -ENOSPC) {
if (queue_poll_cons(q, false, wfe))
dev_err_ratelimited(smmu->dev, "CMDQ timeout\n");
@ -948,15 +952,21 @@ static int __arm_smmu_cmdq_issue_sync_msi(struct arm_smmu_device *smmu)
struct arm_smmu_cmdq_ent ent = {
.opcode = CMDQ_OP_CMD_SYNC,
.sync = {
.msidata = atomic_inc_return_relaxed(&smmu->sync_nr),
.msiaddr = virt_to_phys(&smmu->sync_count),
},
};
arm_smmu_cmdq_build_cmd(cmd, &ent);
spin_lock_irqsave(&smmu->cmdq.lock, flags);
arm_smmu_cmdq_insert_cmd(smmu, cmd);
/* Piggy-back on the previous command if it's a SYNC */
if (smmu->prev_cmd_opcode == CMDQ_OP_CMD_SYNC) {
ent.sync.msidata = smmu->sync_nr;
} else {
ent.sync.msidata = ++smmu->sync_nr;
arm_smmu_cmdq_build_cmd(cmd, &ent);
arm_smmu_cmdq_insert_cmd(smmu, cmd);
}
spin_unlock_irqrestore(&smmu->cmdq.lock, flags);
return __arm_smmu_sync_poll_msi(smmu, ent.sync.msidata);
@ -1398,6 +1408,12 @@ static void arm_smmu_tlb_inv_context(void *cookie)
cmd.tlbi.vmid = smmu_domain->s2_cfg.vmid;
}
/*
* NOTE: when io-pgtable is in non-strict mode, we may get here with
* PTEs previously cleared by unmaps on the current CPU not yet visible
* to the SMMU. We are relying on the DSB implicit in queue_inc_prod()
* to guarantee those are observed before the TLBI. Do be careful, 007.
*/
arm_smmu_cmdq_issue_cmd(smmu, &cmd);
__arm_smmu_tlb_sync(smmu);
}
@ -1624,6 +1640,9 @@ static int arm_smmu_domain_finalise(struct iommu_domain *domain)
if (smmu->features & ARM_SMMU_FEAT_COHERENCY)
pgtbl_cfg.quirks = IO_PGTABLE_QUIRK_NO_DMA;
if (smmu_domain->non_strict)
pgtbl_cfg.quirks |= IO_PGTABLE_QUIRK_NON_STRICT;
pgtbl_ops = alloc_io_pgtable_ops(fmt, &pgtbl_cfg, smmu_domain);
if (!pgtbl_ops)
return -ENOMEM;
@ -1772,6 +1791,14 @@ arm_smmu_unmap(struct iommu_domain *domain, unsigned long iova, size_t size)
return ops->unmap(ops, iova, size);
}
static void arm_smmu_flush_iotlb_all(struct iommu_domain *domain)
{
struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
if (smmu_domain->smmu)
arm_smmu_tlb_inv_context(smmu_domain);
}
static void arm_smmu_iotlb_sync(struct iommu_domain *domain)
{
struct arm_smmu_device *smmu = to_smmu_domain(domain)->smmu;
@ -1917,15 +1944,27 @@ static int arm_smmu_domain_get_attr(struct iommu_domain *domain,
{
struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
if (domain->type != IOMMU_DOMAIN_UNMANAGED)
return -EINVAL;
switch (attr) {
case DOMAIN_ATTR_NESTING:
*(int *)data = (smmu_domain->stage == ARM_SMMU_DOMAIN_NESTED);
return 0;
switch (domain->type) {
case IOMMU_DOMAIN_UNMANAGED:
switch (attr) {
case DOMAIN_ATTR_NESTING:
*(int *)data = (smmu_domain->stage == ARM_SMMU_DOMAIN_NESTED);
return 0;
default:
return -ENODEV;
}
break;
case IOMMU_DOMAIN_DMA:
switch (attr) {
case DOMAIN_ATTR_DMA_USE_FLUSH_QUEUE:
*(int *)data = smmu_domain->non_strict;
return 0;
default:
return -ENODEV;
}
break;
default:
return -ENODEV;
return -EINVAL;
}
}
@ -1935,26 +1974,37 @@ static int arm_smmu_domain_set_attr(struct iommu_domain *domain,
int ret = 0;
struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
if (domain->type != IOMMU_DOMAIN_UNMANAGED)
return -EINVAL;
mutex_lock(&smmu_domain->init_mutex);
switch (attr) {
case DOMAIN_ATTR_NESTING:
if (smmu_domain->smmu) {
ret = -EPERM;
goto out_unlock;
switch (domain->type) {
case IOMMU_DOMAIN_UNMANAGED:
switch (attr) {
case DOMAIN_ATTR_NESTING:
if (smmu_domain->smmu) {
ret = -EPERM;
goto out_unlock;
}
if (*(int *)data)
smmu_domain->stage = ARM_SMMU_DOMAIN_NESTED;
else
smmu_domain->stage = ARM_SMMU_DOMAIN_S1;
break;
default:
ret = -ENODEV;
}
break;
case IOMMU_DOMAIN_DMA:
switch(attr) {
case DOMAIN_ATTR_DMA_USE_FLUSH_QUEUE:
smmu_domain->non_strict = *(int *)data;
break;
default:
ret = -ENODEV;
}
if (*(int *)data)
smmu_domain->stage = ARM_SMMU_DOMAIN_NESTED;
else
smmu_domain->stage = ARM_SMMU_DOMAIN_S1;
break;
default:
ret = -ENODEV;
ret = -EINVAL;
}
out_unlock:
@ -1999,7 +2049,7 @@ static struct iommu_ops arm_smmu_ops = {
.attach_dev = arm_smmu_attach_dev,
.map = arm_smmu_map,
.unmap = arm_smmu_unmap,
.flush_iotlb_all = arm_smmu_iotlb_sync,
.flush_iotlb_all = arm_smmu_flush_iotlb_all,
.iotlb_sync = arm_smmu_iotlb_sync,
.iova_to_phys = arm_smmu_iova_to_phys,
.add_device = arm_smmu_add_device,
@ -2180,7 +2230,6 @@ static int arm_smmu_init_structures(struct arm_smmu_device *smmu)
{
int ret;
atomic_set(&smmu->sync_nr, 0);
ret = arm_smmu_init_queues(smmu);
if (ret)
return ret;
@ -2353,8 +2402,8 @@ static int arm_smmu_setup_irqs(struct arm_smmu_device *smmu)
irq = smmu->combined_irq;
if (irq) {
/*
* Cavium ThunderX2 implementation doesn't not support unique
* irq lines. Use single irq line for all the SMMUv3 interrupts.
* Cavium ThunderX2 implementation doesn't support unique irq
* lines. Use a single irq line for all the SMMUv3 interrupts.
*/
ret = devm_request_threaded_irq(smmu->dev, irq,
arm_smmu_combined_irq_handler,

View File

@ -246,6 +246,7 @@ struct arm_smmu_domain {
const struct iommu_gather_ops *tlb_ops;
struct arm_smmu_cfg cfg;
enum arm_smmu_domain_stage stage;
bool non_strict;
struct mutex init_mutex; /* Protects smmu pointer */
spinlock_t cb_lock; /* Serialises ATS1* ops and TLB syncs */
struct iommu_domain domain;
@ -447,7 +448,11 @@ static void arm_smmu_tlb_inv_context_s1(void *cookie)
struct arm_smmu_cfg *cfg = &smmu_domain->cfg;
void __iomem *base = ARM_SMMU_CB(smmu_domain->smmu, cfg->cbndx);
writel_relaxed(cfg->asid, base + ARM_SMMU_CB_S1_TLBIASID);
/*
* NOTE: this is not a relaxed write; it needs to guarantee that PTEs
* cleared by the current CPU are visible to the SMMU before the TLBI.
*/
writel(cfg->asid, base + ARM_SMMU_CB_S1_TLBIASID);
arm_smmu_tlb_sync_context(cookie);
}
@ -457,7 +462,8 @@ static void arm_smmu_tlb_inv_context_s2(void *cookie)
struct arm_smmu_device *smmu = smmu_domain->smmu;
void __iomem *base = ARM_SMMU_GR0(smmu);
writel_relaxed(smmu_domain->cfg.vmid, base + ARM_SMMU_GR0_TLBIVMID);
/* NOTE: see above */
writel(smmu_domain->cfg.vmid, base + ARM_SMMU_GR0_TLBIVMID);
arm_smmu_tlb_sync_global(smmu);
}
@ -469,6 +475,9 @@ static void arm_smmu_tlb_inv_range_nosync(unsigned long iova, size_t size,
bool stage1 = cfg->cbar != CBAR_TYPE_S2_TRANS;
void __iomem *reg = ARM_SMMU_CB(smmu_domain->smmu, cfg->cbndx);
if (smmu_domain->smmu->features & ARM_SMMU_FEAT_COHERENT_WALK)
wmb();
if (stage1) {
reg += leaf ? ARM_SMMU_CB_S1_TLBIVAL : ARM_SMMU_CB_S1_TLBIVA;
@ -510,6 +519,9 @@ static void arm_smmu_tlb_inv_vmid_nosync(unsigned long iova, size_t size,
struct arm_smmu_domain *smmu_domain = cookie;
void __iomem *base = ARM_SMMU_GR0(smmu_domain->smmu);
if (smmu_domain->smmu->features & ARM_SMMU_FEAT_COHERENT_WALK)
wmb();
writel_relaxed(smmu_domain->cfg.vmid, base + ARM_SMMU_GR0_TLBIVMID);
}
@ -863,6 +875,9 @@ static int arm_smmu_init_domain_context(struct iommu_domain *domain,
if (smmu->features & ARM_SMMU_FEAT_COHERENT_WALK)
pgtbl_cfg.quirks = IO_PGTABLE_QUIRK_NO_DMA;
if (smmu_domain->non_strict)
pgtbl_cfg.quirks |= IO_PGTABLE_QUIRK_NON_STRICT;
smmu_domain->smmu = smmu;
pgtbl_ops = alloc_io_pgtable_ops(fmt, &pgtbl_cfg, smmu_domain);
if (!pgtbl_ops) {
@ -1252,6 +1267,14 @@ static size_t arm_smmu_unmap(struct iommu_domain *domain, unsigned long iova,
return ops->unmap(ops, iova, size);
}
static void arm_smmu_flush_iotlb_all(struct iommu_domain *domain)
{
struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
if (smmu_domain->tlb_ops)
smmu_domain->tlb_ops->tlb_flush_all(smmu_domain);
}
static void arm_smmu_iotlb_sync(struct iommu_domain *domain)
{
struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
@ -1470,15 +1493,27 @@ static int arm_smmu_domain_get_attr(struct iommu_domain *domain,
{
struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
if (domain->type != IOMMU_DOMAIN_UNMANAGED)
return -EINVAL;
switch (attr) {
case DOMAIN_ATTR_NESTING:
*(int *)data = (smmu_domain->stage == ARM_SMMU_DOMAIN_NESTED);
return 0;
switch(domain->type) {
case IOMMU_DOMAIN_UNMANAGED:
switch (attr) {
case DOMAIN_ATTR_NESTING:
*(int *)data = (smmu_domain->stage == ARM_SMMU_DOMAIN_NESTED);
return 0;
default:
return -ENODEV;
}
break;
case IOMMU_DOMAIN_DMA:
switch (attr) {
case DOMAIN_ATTR_DMA_USE_FLUSH_QUEUE:
*(int *)data = smmu_domain->non_strict;
return 0;
default:
return -ENODEV;
}
break;
default:
return -ENODEV;
return -EINVAL;
}
}
@ -1488,28 +1523,38 @@ static int arm_smmu_domain_set_attr(struct iommu_domain *domain,
int ret = 0;
struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
if (domain->type != IOMMU_DOMAIN_UNMANAGED)
return -EINVAL;
mutex_lock(&smmu_domain->init_mutex);
switch (attr) {
case DOMAIN_ATTR_NESTING:
if (smmu_domain->smmu) {
ret = -EPERM;
goto out_unlock;
switch(domain->type) {
case IOMMU_DOMAIN_UNMANAGED:
switch (attr) {
case DOMAIN_ATTR_NESTING:
if (smmu_domain->smmu) {
ret = -EPERM;
goto out_unlock;
}
if (*(int *)data)
smmu_domain->stage = ARM_SMMU_DOMAIN_NESTED;
else
smmu_domain->stage = ARM_SMMU_DOMAIN_S1;
break;
default:
ret = -ENODEV;
}
break;
case IOMMU_DOMAIN_DMA:
switch (attr) {
case DOMAIN_ATTR_DMA_USE_FLUSH_QUEUE:
smmu_domain->non_strict = *(int *)data;
break;
default:
ret = -ENODEV;
}
if (*(int *)data)
smmu_domain->stage = ARM_SMMU_DOMAIN_NESTED;
else
smmu_domain->stage = ARM_SMMU_DOMAIN_S1;
break;
default:
ret = -ENODEV;
ret = -EINVAL;
}
out_unlock:
mutex_unlock(&smmu_domain->init_mutex);
return ret;
@ -1562,7 +1607,7 @@ static struct iommu_ops arm_smmu_ops = {
.attach_dev = arm_smmu_attach_dev,
.map = arm_smmu_map,
.unmap = arm_smmu_unmap,
.flush_iotlb_all = arm_smmu_iotlb_sync,
.flush_iotlb_all = arm_smmu_flush_iotlb_all,
.iotlb_sync = arm_smmu_iotlb_sync,
.iova_to_phys = arm_smmu_iova_to_phys,
.add_device = arm_smmu_add_device,

View File

@ -55,6 +55,9 @@ struct iommu_dma_cookie {
};
struct list_head msi_page_list;
spinlock_t msi_lock;
/* Domain for flush queue callback; NULL if flush queue not in use */
struct iommu_domain *fq_domain;
};
static inline size_t cookie_msi_granule(struct iommu_dma_cookie *cookie)
@ -257,6 +260,20 @@ static int iova_reserve_iommu_regions(struct device *dev,
return ret;
}
static void iommu_dma_flush_iotlb_all(struct iova_domain *iovad)
{
struct iommu_dma_cookie *cookie;
struct iommu_domain *domain;
cookie = container_of(iovad, struct iommu_dma_cookie, iovad);
domain = cookie->fq_domain;
/*
* The IOMMU driver supporting DOMAIN_ATTR_DMA_USE_FLUSH_QUEUE
* implies that ops->flush_iotlb_all must be non-NULL.
*/
domain->ops->flush_iotlb_all(domain);
}
/**
* iommu_dma_init_domain - Initialise a DMA mapping domain
* @domain: IOMMU domain previously prepared by iommu_get_dma_cookie()
@ -275,6 +292,7 @@ int iommu_dma_init_domain(struct iommu_domain *domain, dma_addr_t base,
struct iommu_dma_cookie *cookie = domain->iova_cookie;
struct iova_domain *iovad = &cookie->iovad;
unsigned long order, base_pfn, end_pfn;
int attr;
if (!cookie || cookie->type != IOMMU_DMA_IOVA_COOKIE)
return -EINVAL;
@ -308,6 +326,13 @@ int iommu_dma_init_domain(struct iommu_domain *domain, dma_addr_t base,
}
init_iova_domain(iovad, 1UL << order, base_pfn);
if (!cookie->fq_domain && !iommu_domain_get_attr(domain,
DOMAIN_ATTR_DMA_USE_FLUSH_QUEUE, &attr) && attr) {
cookie->fq_domain = domain;
init_iova_flush_queue(iovad, iommu_dma_flush_iotlb_all, NULL);
}
if (!dev)
return 0;
@ -393,6 +418,9 @@ static void iommu_dma_free_iova(struct iommu_dma_cookie *cookie,
/* The MSI case is only ever cleaning up its most recent allocation */
if (cookie->type == IOMMU_DMA_MSI_COOKIE)
cookie->msi_iova -= size;
else if (cookie->fq_domain) /* non-strict mode */
queue_iova(iovad, iova_pfn(iovad, iova),
size >> iova_shift(iovad), 0);
else
free_iova_fast(iovad, iova_pfn(iovad, iova),
size >> iova_shift(iovad));
@ -408,7 +436,9 @@ static void __iommu_dma_unmap(struct iommu_domain *domain, dma_addr_t dma_addr,
dma_addr -= iova_off;
size = iova_align(iovad, size + iova_off);
WARN_ON(iommu_unmap(domain, dma_addr, size) != size);
WARN_ON(iommu_unmap_fast(domain, dma_addr, size) != size);
if (!cookie->fq_domain)
iommu_tlb_sync(domain);
iommu_dma_free_iova(cookie, dma_addr, size);
}

View File

@ -587,6 +587,7 @@ static size_t arm_v7s_split_blk_unmap(struct arm_v7s_io_pgtable *data,
}
io_pgtable_tlb_add_flush(&data->iop, iova, size, size, true);
io_pgtable_tlb_sync(&data->iop);
return size;
}
@ -642,6 +643,13 @@ static size_t __arm_v7s_unmap(struct arm_v7s_io_pgtable *data,
io_pgtable_tlb_sync(iop);
ptep = iopte_deref(pte[i], lvl);
__arm_v7s_free_table(ptep, lvl + 1, data);
} else if (iop->cfg.quirks & IO_PGTABLE_QUIRK_NON_STRICT) {
/*
* Order the PTE update against queueing the IOVA, to
* guarantee that a flush callback from a different CPU
* has observed it before the TLBIALL can be issued.
*/
smp_wmb();
} else {
io_pgtable_tlb_add_flush(iop, iova, blk_size,
blk_size, true);
@ -712,7 +720,8 @@ static struct io_pgtable *arm_v7s_alloc_pgtable(struct io_pgtable_cfg *cfg,
IO_PGTABLE_QUIRK_NO_PERMS |
IO_PGTABLE_QUIRK_TLBI_ON_MAP |
IO_PGTABLE_QUIRK_ARM_MTK_4GB |
IO_PGTABLE_QUIRK_NO_DMA))
IO_PGTABLE_QUIRK_NO_DMA |
IO_PGTABLE_QUIRK_NON_STRICT))
return NULL;
/* If ARM_MTK_4GB is enabled, the NO_PERMS is also expected. */

View File

@ -574,13 +574,13 @@ static size_t arm_lpae_split_blk_unmap(struct arm_lpae_io_pgtable *data,
return 0;
tablep = iopte_deref(pte, data);
} else if (unmap_idx >= 0) {
io_pgtable_tlb_add_flush(&data->iop, iova, size, size, true);
io_pgtable_tlb_sync(&data->iop);
return size;
}
if (unmap_idx < 0)
return __arm_lpae_unmap(data, iova, size, lvl, tablep);
io_pgtable_tlb_add_flush(&data->iop, iova, size, size, true);
return size;
return __arm_lpae_unmap(data, iova, size, lvl, tablep);
}
static size_t __arm_lpae_unmap(struct arm_lpae_io_pgtable *data,
@ -610,6 +610,13 @@ static size_t __arm_lpae_unmap(struct arm_lpae_io_pgtable *data,
io_pgtable_tlb_sync(iop);
ptep = iopte_deref(pte, data);
__arm_lpae_free_pgtable(data, lvl + 1, ptep);
} else if (iop->cfg.quirks & IO_PGTABLE_QUIRK_NON_STRICT) {
/*
* Order the PTE update against queueing the IOVA, to
* guarantee that a flush callback from a different CPU
* has observed it before the TLBIALL can be issued.
*/
smp_wmb();
} else {
io_pgtable_tlb_add_flush(iop, iova, size, size, true);
}
@ -772,7 +779,8 @@ arm_64_lpae_alloc_pgtable_s1(struct io_pgtable_cfg *cfg, void *cookie)
u64 reg;
struct arm_lpae_io_pgtable *data;
if (cfg->quirks & ~(IO_PGTABLE_QUIRK_ARM_NS | IO_PGTABLE_QUIRK_NO_DMA))
if (cfg->quirks & ~(IO_PGTABLE_QUIRK_ARM_NS | IO_PGTABLE_QUIRK_NO_DMA |
IO_PGTABLE_QUIRK_NON_STRICT))
return NULL;
data = arm_lpae_alloc_pgtable(cfg);
@ -864,7 +872,8 @@ arm_64_lpae_alloc_pgtable_s2(struct io_pgtable_cfg *cfg, void *cookie)
struct arm_lpae_io_pgtable *data;
/* The NS quirk doesn't apply at stage 2 */
if (cfg->quirks & ~IO_PGTABLE_QUIRK_NO_DMA)
if (cfg->quirks & ~(IO_PGTABLE_QUIRK_NO_DMA |
IO_PGTABLE_QUIRK_NON_STRICT))
return NULL;
data = arm_lpae_alloc_pgtable(cfg);

View File

@ -71,12 +71,17 @@ struct io_pgtable_cfg {
* be accessed by a fully cache-coherent IOMMU or CPU (e.g. for a
* software-emulated IOMMU), such that pagetable updates need not
* be treated as explicit DMA data.
*
* IO_PGTABLE_QUIRK_NON_STRICT: Skip issuing synchronous leaf TLBIs
* on unmap, for DMA domains using the flush queue mechanism for
* delayed invalidation.
*/
#define IO_PGTABLE_QUIRK_ARM_NS BIT(0)
#define IO_PGTABLE_QUIRK_NO_PERMS BIT(1)
#define IO_PGTABLE_QUIRK_TLBI_ON_MAP BIT(2)
#define IO_PGTABLE_QUIRK_ARM_MTK_4GB BIT(3)
#define IO_PGTABLE_QUIRK_NO_DMA BIT(4)
#define IO_PGTABLE_QUIRK_NON_STRICT BIT(5)
unsigned long quirks;
unsigned long pgsize_bitmap;
unsigned int ias;

View File

@ -41,6 +41,7 @@ static unsigned int iommu_def_domain_type = IOMMU_DOMAIN_IDENTITY;
#else
static unsigned int iommu_def_domain_type = IOMMU_DOMAIN_DMA;
#endif
static bool iommu_dma_strict __read_mostly = true;
struct iommu_callback_data {
const struct iommu_ops *ops;
@ -131,6 +132,12 @@ static int __init iommu_set_def_domain_type(char *str)
}
early_param("iommu.passthrough", iommu_set_def_domain_type);
static int __init iommu_dma_setup(char *str)
{
return kstrtobool(str, &iommu_dma_strict);
}
early_param("iommu.strict", iommu_dma_setup);
static ssize_t iommu_group_attr_show(struct kobject *kobj,
struct attribute *__attr, char *buf)
{
@ -1072,6 +1079,13 @@ struct iommu_group *iommu_group_get_for_dev(struct device *dev)
group->default_domain = dom;
if (!group->domain)
group->domain = dom;
if (dom && !iommu_dma_strict) {
int attr = 1;
iommu_domain_set_attr(dom,
DOMAIN_ATTR_DMA_USE_FLUSH_QUEUE,
&attr);
}
}
ret = iommu_group_add_device(group, dev);

View File

@ -124,6 +124,7 @@ enum iommu_attr {
DOMAIN_ATTR_FSL_PAMU_ENABLE,
DOMAIN_ATTR_FSL_PAMUV1,
DOMAIN_ATTR_NESTING, /* two stages of translation */
DOMAIN_ATTR_DMA_USE_FLUSH_QUEUE,
DOMAIN_ATTR_MAX,
};