iommu/io-pgtable: Remove unused ->tlb_sync() callback
The ->tlb_sync() callback is no longer used, so it can be removed. Signed-off-by: Will Deacon <will@kernel.org>
This commit is contained in:
parent
abfd6fe0cd
commit
e953f7f2fa
@ -269,7 +269,6 @@ static const struct iommu_flush_ops mmu_tlb_ops = {
|
|||||||
.tlb_flush_all = mmu_tlb_inv_context_s1,
|
.tlb_flush_all = mmu_tlb_inv_context_s1,
|
||||||
.tlb_flush_walk = mmu_tlb_flush_walk,
|
.tlb_flush_walk = mmu_tlb_flush_walk,
|
||||||
.tlb_flush_leaf = mmu_tlb_flush_leaf,
|
.tlb_flush_leaf = mmu_tlb_flush_leaf,
|
||||||
.tlb_sync = mmu_tlb_sync_context,
|
|
||||||
};
|
};
|
||||||
|
|
||||||
static const char *access_type_name(struct panfrost_device *pfdev,
|
static const char *access_type_name(struct panfrost_device *pfdev,
|
||||||
|
@ -1545,13 +1545,6 @@ static int arm_smmu_atc_inv_domain(struct arm_smmu_domain *smmu_domain,
|
|||||||
}
|
}
|
||||||
|
|
||||||
/* IO_PGTABLE API */
|
/* IO_PGTABLE API */
|
||||||
static void arm_smmu_tlb_sync(void *cookie)
|
|
||||||
{
|
|
||||||
struct arm_smmu_domain *smmu_domain = cookie;
|
|
||||||
|
|
||||||
arm_smmu_cmdq_issue_sync(smmu_domain->smmu);
|
|
||||||
}
|
|
||||||
|
|
||||||
static void arm_smmu_tlb_inv_context(void *cookie)
|
static void arm_smmu_tlb_inv_context(void *cookie)
|
||||||
{
|
{
|
||||||
struct arm_smmu_domain *smmu_domain = cookie;
|
struct arm_smmu_domain *smmu_domain = cookie;
|
||||||
@ -1634,7 +1627,6 @@ static const struct iommu_flush_ops arm_smmu_flush_ops = {
|
|||||||
.tlb_flush_walk = arm_smmu_tlb_inv_walk,
|
.tlb_flush_walk = arm_smmu_tlb_inv_walk,
|
||||||
.tlb_flush_leaf = arm_smmu_tlb_inv_leaf,
|
.tlb_flush_leaf = arm_smmu_tlb_inv_leaf,
|
||||||
.tlb_add_page = arm_smmu_tlb_inv_page_nosync,
|
.tlb_add_page = arm_smmu_tlb_inv_page_nosync,
|
||||||
.tlb_sync = arm_smmu_tlb_sync,
|
|
||||||
};
|
};
|
||||||
|
|
||||||
/* IOMMU API */
|
/* IOMMU API */
|
||||||
|
@ -251,7 +251,8 @@ enum arm_smmu_domain_stage {
|
|||||||
struct arm_smmu_flush_ops {
|
struct arm_smmu_flush_ops {
|
||||||
struct iommu_flush_ops tlb;
|
struct iommu_flush_ops tlb;
|
||||||
void (*tlb_inv_range)(unsigned long iova, size_t size, size_t granule,
|
void (*tlb_inv_range)(unsigned long iova, size_t size, size_t granule,
|
||||||
bool leaf, void *cookie)
|
bool leaf, void *cookie);
|
||||||
|
void (*tlb_sync)(void *cookie);
|
||||||
};
|
};
|
||||||
|
|
||||||
struct arm_smmu_domain {
|
struct arm_smmu_domain {
|
||||||
@ -539,7 +540,7 @@ static void arm_smmu_tlb_inv_range_nosync(unsigned long iova, size_t size,
|
|||||||
* On MMU-401 at least, the cost of firing off multiple TLBIVMIDs appears
|
* On MMU-401 at least, the cost of firing off multiple TLBIVMIDs appears
|
||||||
* almost negligible, but the benefit of getting the first one in as far ahead
|
* almost negligible, but the benefit of getting the first one in as far ahead
|
||||||
* of the sync as possible is significant, hence we don't just make this a
|
* of the sync as possible is significant, hence we don't just make this a
|
||||||
* no-op and set .tlb_sync to arm_smmu_inv_context_s2() as you might think.
|
* no-op and set .tlb_sync to arm_smmu_tlb_inv_context_s2() as you might think.
|
||||||
*/
|
*/
|
||||||
static void arm_smmu_tlb_inv_vmid_nosync(unsigned long iova, size_t size,
|
static void arm_smmu_tlb_inv_vmid_nosync(unsigned long iova, size_t size,
|
||||||
size_t granule, bool leaf, void *cookie)
|
size_t granule, bool leaf, void *cookie)
|
||||||
@ -560,7 +561,7 @@ static void arm_smmu_tlb_inv_walk(unsigned long iova, size_t size,
|
|||||||
const struct arm_smmu_flush_ops *ops = smmu_domain->flush_ops;
|
const struct arm_smmu_flush_ops *ops = smmu_domain->flush_ops;
|
||||||
|
|
||||||
ops->tlb_inv_range(iova, size, granule, false, cookie);
|
ops->tlb_inv_range(iova, size, granule, false, cookie);
|
||||||
ops->tlb.tlb_sync(cookie);
|
ops->tlb_sync(cookie);
|
||||||
}
|
}
|
||||||
|
|
||||||
static void arm_smmu_tlb_inv_leaf(unsigned long iova, size_t size,
|
static void arm_smmu_tlb_inv_leaf(unsigned long iova, size_t size,
|
||||||
@ -570,7 +571,7 @@ static void arm_smmu_tlb_inv_leaf(unsigned long iova, size_t size,
|
|||||||
const struct arm_smmu_flush_ops *ops = smmu_domain->flush_ops;
|
const struct arm_smmu_flush_ops *ops = smmu_domain->flush_ops;
|
||||||
|
|
||||||
ops->tlb_inv_range(iova, size, granule, true, cookie);
|
ops->tlb_inv_range(iova, size, granule, true, cookie);
|
||||||
ops->tlb.tlb_sync(cookie);
|
ops->tlb_sync(cookie);
|
||||||
}
|
}
|
||||||
|
|
||||||
static void arm_smmu_tlb_add_page(unsigned long iova, size_t granule,
|
static void arm_smmu_tlb_add_page(unsigned long iova, size_t granule,
|
||||||
@ -588,9 +589,9 @@ static const struct arm_smmu_flush_ops arm_smmu_s1_tlb_ops = {
|
|||||||
.tlb_flush_walk = arm_smmu_tlb_inv_walk,
|
.tlb_flush_walk = arm_smmu_tlb_inv_walk,
|
||||||
.tlb_flush_leaf = arm_smmu_tlb_inv_leaf,
|
.tlb_flush_leaf = arm_smmu_tlb_inv_leaf,
|
||||||
.tlb_add_page = arm_smmu_tlb_add_page,
|
.tlb_add_page = arm_smmu_tlb_add_page,
|
||||||
.tlb_sync = arm_smmu_tlb_sync_context,
|
|
||||||
},
|
},
|
||||||
.tlb_inv_range = arm_smmu_tlb_inv_range_nosync,
|
.tlb_inv_range = arm_smmu_tlb_inv_range_nosync,
|
||||||
|
.tlb_sync = arm_smmu_tlb_sync_context,
|
||||||
};
|
};
|
||||||
|
|
||||||
static const struct arm_smmu_flush_ops arm_smmu_s2_tlb_ops_v2 = {
|
static const struct arm_smmu_flush_ops arm_smmu_s2_tlb_ops_v2 = {
|
||||||
@ -599,9 +600,9 @@ static const struct arm_smmu_flush_ops arm_smmu_s2_tlb_ops_v2 = {
|
|||||||
.tlb_flush_walk = arm_smmu_tlb_inv_walk,
|
.tlb_flush_walk = arm_smmu_tlb_inv_walk,
|
||||||
.tlb_flush_leaf = arm_smmu_tlb_inv_leaf,
|
.tlb_flush_leaf = arm_smmu_tlb_inv_leaf,
|
||||||
.tlb_add_page = arm_smmu_tlb_add_page,
|
.tlb_add_page = arm_smmu_tlb_add_page,
|
||||||
.tlb_sync = arm_smmu_tlb_sync_context,
|
|
||||||
},
|
},
|
||||||
.tlb_inv_range = arm_smmu_tlb_inv_range_nosync,
|
.tlb_inv_range = arm_smmu_tlb_inv_range_nosync,
|
||||||
|
.tlb_sync = arm_smmu_tlb_sync_context,
|
||||||
};
|
};
|
||||||
|
|
||||||
static const struct arm_smmu_flush_ops arm_smmu_s2_tlb_ops_v1 = {
|
static const struct arm_smmu_flush_ops arm_smmu_s2_tlb_ops_v1 = {
|
||||||
@ -610,9 +611,9 @@ static const struct arm_smmu_flush_ops arm_smmu_s2_tlb_ops_v1 = {
|
|||||||
.tlb_flush_walk = arm_smmu_tlb_inv_walk,
|
.tlb_flush_walk = arm_smmu_tlb_inv_walk,
|
||||||
.tlb_flush_leaf = arm_smmu_tlb_inv_leaf,
|
.tlb_flush_leaf = arm_smmu_tlb_inv_leaf,
|
||||||
.tlb_add_page = arm_smmu_tlb_add_page,
|
.tlb_add_page = arm_smmu_tlb_add_page,
|
||||||
.tlb_sync = arm_smmu_tlb_sync_vmid,
|
|
||||||
},
|
},
|
||||||
.tlb_inv_range = arm_smmu_tlb_inv_vmid_nosync,
|
.tlb_inv_range = arm_smmu_tlb_inv_vmid_nosync,
|
||||||
|
.tlb_sync = arm_smmu_tlb_sync_vmid,
|
||||||
};
|
};
|
||||||
|
|
||||||
static irqreturn_t arm_smmu_context_fault(int irq, void *dev)
|
static irqreturn_t arm_smmu_context_fault(int irq, void *dev)
|
||||||
@ -1387,7 +1388,7 @@ static void arm_smmu_iotlb_sync(struct iommu_domain *domain,
|
|||||||
|
|
||||||
if (smmu_domain->flush_ops) {
|
if (smmu_domain->flush_ops) {
|
||||||
arm_smmu_rpm_get(smmu);
|
arm_smmu_rpm_get(smmu);
|
||||||
smmu_domain->flush_ops->tlb.tlb_sync(smmu_domain);
|
smmu_domain->flush_ops->tlb_sync(smmu_domain);
|
||||||
arm_smmu_rpm_put(smmu);
|
arm_smmu_rpm_put(smmu);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -813,17 +813,11 @@ static void dummy_tlb_add_page(unsigned long iova, size_t granule, void *cookie)
|
|||||||
dummy_tlb_flush(iova, granule, granule, cookie);
|
dummy_tlb_flush(iova, granule, granule, cookie);
|
||||||
}
|
}
|
||||||
|
|
||||||
static void dummy_tlb_sync(void *cookie)
|
|
||||||
{
|
|
||||||
WARN_ON(cookie != cfg_cookie);
|
|
||||||
}
|
|
||||||
|
|
||||||
static const struct iommu_flush_ops dummy_tlb_ops = {
|
static const struct iommu_flush_ops dummy_tlb_ops = {
|
||||||
.tlb_flush_all = dummy_tlb_flush_all,
|
.tlb_flush_all = dummy_tlb_flush_all,
|
||||||
.tlb_flush_walk = dummy_tlb_flush,
|
.tlb_flush_walk = dummy_tlb_flush,
|
||||||
.tlb_flush_leaf = dummy_tlb_flush,
|
.tlb_flush_leaf = dummy_tlb_flush,
|
||||||
.tlb_add_page = dummy_tlb_add_page,
|
.tlb_add_page = dummy_tlb_add_page,
|
||||||
.tlb_sync = dummy_tlb_sync,
|
|
||||||
};
|
};
|
||||||
|
|
||||||
#define __FAIL(ops) ({ \
|
#define __FAIL(ops) ({ \
|
||||||
|
@ -1080,17 +1080,11 @@ static void dummy_tlb_add_page(unsigned long iova, size_t granule, void *cookie)
|
|||||||
dummy_tlb_flush(iova, granule, granule, cookie);
|
dummy_tlb_flush(iova, granule, granule, cookie);
|
||||||
}
|
}
|
||||||
|
|
||||||
static void dummy_tlb_sync(void *cookie)
|
|
||||||
{
|
|
||||||
WARN_ON(cookie != cfg_cookie);
|
|
||||||
}
|
|
||||||
|
|
||||||
static const struct iommu_flush_ops dummy_tlb_ops __initconst = {
|
static const struct iommu_flush_ops dummy_tlb_ops __initconst = {
|
||||||
.tlb_flush_all = dummy_tlb_flush_all,
|
.tlb_flush_all = dummy_tlb_flush_all,
|
||||||
.tlb_flush_walk = dummy_tlb_flush,
|
.tlb_flush_walk = dummy_tlb_flush,
|
||||||
.tlb_flush_leaf = dummy_tlb_flush,
|
.tlb_flush_leaf = dummy_tlb_flush,
|
||||||
.tlb_add_page = dummy_tlb_add_page,
|
.tlb_add_page = dummy_tlb_add_page,
|
||||||
.tlb_sync = dummy_tlb_sync,
|
|
||||||
};
|
};
|
||||||
|
|
||||||
static void __init arm_lpae_dump_ops(struct io_pgtable_ops *ops)
|
static void __init arm_lpae_dump_ops(struct io_pgtable_ops *ops)
|
||||||
|
@ -371,7 +371,6 @@ static const struct iommu_flush_ops ipmmu_flush_ops = {
|
|||||||
.tlb_flush_all = ipmmu_tlb_flush_all,
|
.tlb_flush_all = ipmmu_tlb_flush_all,
|
||||||
.tlb_flush_walk = ipmmu_tlb_flush,
|
.tlb_flush_walk = ipmmu_tlb_flush,
|
||||||
.tlb_flush_leaf = ipmmu_tlb_flush,
|
.tlb_flush_leaf = ipmmu_tlb_flush,
|
||||||
.tlb_sync = ipmmu_tlb_flush_all,
|
|
||||||
};
|
};
|
||||||
|
|
||||||
/* -----------------------------------------------------------------------------
|
/* -----------------------------------------------------------------------------
|
||||||
|
@ -168,28 +168,16 @@ fail:
|
|||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
static void __flush_iotlb_sync(void *cookie)
|
|
||||||
{
|
|
||||||
/*
|
|
||||||
* Nothing is needed here, the barrier to guarantee
|
|
||||||
* completion of the tlb sync operation is implicitly
|
|
||||||
* taken care when the iommu client does a writel before
|
|
||||||
* kick starting the other master.
|
|
||||||
*/
|
|
||||||
}
|
|
||||||
|
|
||||||
static void __flush_iotlb_walk(unsigned long iova, size_t size,
|
static void __flush_iotlb_walk(unsigned long iova, size_t size,
|
||||||
size_t granule, void *cookie)
|
size_t granule, void *cookie)
|
||||||
{
|
{
|
||||||
__flush_iotlb_range(iova, size, granule, false, cookie);
|
__flush_iotlb_range(iova, size, granule, false, cookie);
|
||||||
__flush_iotlb_sync(cookie);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
static void __flush_iotlb_leaf(unsigned long iova, size_t size,
|
static void __flush_iotlb_leaf(unsigned long iova, size_t size,
|
||||||
size_t granule, void *cookie)
|
size_t granule, void *cookie)
|
||||||
{
|
{
|
||||||
__flush_iotlb_range(iova, size, granule, true, cookie);
|
__flush_iotlb_range(iova, size, granule, true, cookie);
|
||||||
__flush_iotlb_sync(cookie);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
static void __flush_iotlb_page(unsigned long iova, size_t granule, void *cookie)
|
static void __flush_iotlb_page(unsigned long iova, size_t granule, void *cookie)
|
||||||
@ -202,7 +190,6 @@ static const struct iommu_flush_ops msm_iommu_flush_ops = {
|
|||||||
.tlb_flush_walk = __flush_iotlb_walk,
|
.tlb_flush_walk = __flush_iotlb_walk,
|
||||||
.tlb_flush_leaf = __flush_iotlb_leaf,
|
.tlb_flush_leaf = __flush_iotlb_leaf,
|
||||||
.tlb_add_page = __flush_iotlb_page,
|
.tlb_add_page = __flush_iotlb_page,
|
||||||
.tlb_sync = __flush_iotlb_sync,
|
|
||||||
};
|
};
|
||||||
|
|
||||||
static int msm_iommu_alloc_ctx(unsigned long *map, int start, int end)
|
static int msm_iommu_alloc_ctx(unsigned long *map, int start, int end)
|
||||||
@ -712,6 +699,13 @@ static struct iommu_ops msm_iommu_ops = {
|
|||||||
.detach_dev = msm_iommu_detach_dev,
|
.detach_dev = msm_iommu_detach_dev,
|
||||||
.map = msm_iommu_map,
|
.map = msm_iommu_map,
|
||||||
.unmap = msm_iommu_unmap,
|
.unmap = msm_iommu_unmap,
|
||||||
|
/*
|
||||||
|
* Nothing is needed here, the barrier to guarantee
|
||||||
|
* completion of the tlb sync operation is implicitly
|
||||||
|
* taken care when the iommu client does a writel before
|
||||||
|
* kick starting the other master.
|
||||||
|
*/
|
||||||
|
.iotlb_sync = NULL,
|
||||||
.iova_to_phys = msm_iommu_iova_to_phys,
|
.iova_to_phys = msm_iommu_iova_to_phys,
|
||||||
.add_device = msm_iommu_add_device,
|
.add_device = msm_iommu_add_device,
|
||||||
.remove_device = msm_iommu_remove_device,
|
.remove_device = msm_iommu_remove_device,
|
||||||
|
@ -213,7 +213,6 @@ static const struct iommu_flush_ops mtk_iommu_flush_ops = {
|
|||||||
.tlb_flush_walk = mtk_iommu_tlb_flush_walk,
|
.tlb_flush_walk = mtk_iommu_tlb_flush_walk,
|
||||||
.tlb_flush_leaf = mtk_iommu_tlb_flush_leaf,
|
.tlb_flush_leaf = mtk_iommu_tlb_flush_leaf,
|
||||||
.tlb_add_page = mtk_iommu_tlb_flush_page_nosync,
|
.tlb_add_page = mtk_iommu_tlb_flush_page_nosync,
|
||||||
.tlb_sync = mtk_iommu_tlb_sync,
|
|
||||||
};
|
};
|
||||||
|
|
||||||
static irqreturn_t mtk_iommu_isr(int irq, void *dev_id)
|
static irqreturn_t mtk_iommu_isr(int irq, void *dev_id)
|
||||||
|
@ -189,7 +189,6 @@ static const struct iommu_flush_ops qcom_flush_ops = {
|
|||||||
.tlb_flush_walk = qcom_iommu_tlb_flush_walk,
|
.tlb_flush_walk = qcom_iommu_tlb_flush_walk,
|
||||||
.tlb_flush_leaf = qcom_iommu_tlb_flush_leaf,
|
.tlb_flush_leaf = qcom_iommu_tlb_flush_leaf,
|
||||||
.tlb_add_page = qcom_iommu_tlb_add_page,
|
.tlb_add_page = qcom_iommu_tlb_add_page,
|
||||||
.tlb_sync = qcom_iommu_tlb_sync,
|
|
||||||
};
|
};
|
||||||
|
|
||||||
static irqreturn_t qcom_iommu_fault(int irq, void *dev)
|
static irqreturn_t qcom_iommu_fault(int irq, void *dev)
|
||||||
|
@ -30,9 +30,6 @@ enum io_pgtable_fmt {
|
|||||||
* for IOMMUs that cannot batch TLB invalidation operations
|
* for IOMMUs that cannot batch TLB invalidation operations
|
||||||
* efficiently and are therefore better suited to issuing them
|
* efficiently and are therefore better suited to issuing them
|
||||||
* early rather than deferring them until iommu_tlb_sync().
|
* early rather than deferring them until iommu_tlb_sync().
|
||||||
* @tlb_sync: Ensure any queued TLB invalidation has taken effect, and
|
|
||||||
* any corresponding page table updates are visible to the
|
|
||||||
* IOMMU.
|
|
||||||
*
|
*
|
||||||
* Note that these can all be called in atomic context and must therefore
|
* Note that these can all be called in atomic context and must therefore
|
||||||
* not block.
|
* not block.
|
||||||
@ -44,7 +41,6 @@ struct iommu_flush_ops {
|
|||||||
void (*tlb_flush_leaf)(unsigned long iova, size_t size, size_t granule,
|
void (*tlb_flush_leaf)(unsigned long iova, size_t size, size_t granule,
|
||||||
void *cookie);
|
void *cookie);
|
||||||
void (*tlb_add_page)(unsigned long iova, size_t granule, void *cookie);
|
void (*tlb_add_page)(unsigned long iova, size_t granule, void *cookie);
|
||||||
void (*tlb_sync)(void *cookie);
|
|
||||||
};
|
};
|
||||||
|
|
||||||
/**
|
/**
|
||||||
@ -218,11 +214,6 @@ io_pgtable_tlb_add_page(struct io_pgtable *iop, unsigned long iova,
|
|||||||
iop->cfg.tlb->tlb_add_page(iova, granule, iop->cookie);
|
iop->cfg.tlb->tlb_add_page(iova, granule, iop->cookie);
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline void io_pgtable_tlb_sync(struct io_pgtable *iop)
|
|
||||||
{
|
|
||||||
iop->cfg.tlb->tlb_sync(iop->cookie);
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* struct io_pgtable_init_fns - Alloc/free a set of page tables for a
|
* struct io_pgtable_init_fns - Alloc/free a set of page tables for a
|
||||||
* particular format.
|
* particular format.
|
||||||
|
Loading…
Reference in New Issue
Block a user