iommu: Allow selecting page sizes per domain
Many IOMMUs support multiple page table formats, meaning that any given domain may only support a subset of the hardware page sizes presented in iommu_ops->pgsize_bitmap. There are also certain use-cases where the creator of a domain may want to control which page sizes are used, for example to force the use of hugepage mappings to reduce pagetable walk depth. To this end, add a per-domain pgsize_bitmap to represent the subset of page sizes actually in use, to make it possible for domains with different requirements to coexist. Signed-off-by: Will Deacon <will.deacon@arm.com> [rm: hijacked and rebased original patch with new commit message] Signed-off-by: Robin Murphy <robin.murphy@arm.com> Acked-by: Will Deacon <will.deacon@arm.com> Signed-off-by: Joerg Roedel <jroedel@suse.de>
This commit is contained in:
parent
53c92d7933
commit
d16e0faab9
@ -94,7 +94,7 @@ int iommu_dma_init_domain(struct iommu_domain *domain, dma_addr_t base, u64 size
|
|||||||
return -ENODEV;
|
return -ENODEV;
|
||||||
|
|
||||||
/* Use the smallest supported page size for IOVA granularity */
|
/* Use the smallest supported page size for IOVA granularity */
|
||||||
order = __ffs(domain->ops->pgsize_bitmap);
|
order = __ffs(domain->pgsize_bitmap);
|
||||||
base_pfn = max_t(unsigned long, 1, base >> order);
|
base_pfn = max_t(unsigned long, 1, base >> order);
|
||||||
end_pfn = (base + size - 1) >> order;
|
end_pfn = (base + size - 1) >> order;
|
||||||
|
|
||||||
|
@ -337,9 +337,9 @@ static int iommu_group_create_direct_mappings(struct iommu_group *group,
|
|||||||
if (!domain || domain->type != IOMMU_DOMAIN_DMA)
|
if (!domain || domain->type != IOMMU_DOMAIN_DMA)
|
||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
BUG_ON(!domain->ops->pgsize_bitmap);
|
BUG_ON(!domain->pgsize_bitmap);
|
||||||
|
|
||||||
pg_size = 1UL << __ffs(domain->ops->pgsize_bitmap);
|
pg_size = 1UL << __ffs(domain->pgsize_bitmap);
|
||||||
INIT_LIST_HEAD(&mappings);
|
INIT_LIST_HEAD(&mappings);
|
||||||
|
|
||||||
iommu_get_dm_regions(dev, &mappings);
|
iommu_get_dm_regions(dev, &mappings);
|
||||||
@ -1073,6 +1073,8 @@ static struct iommu_domain *__iommu_domain_alloc(struct bus_type *bus,
|
|||||||
|
|
||||||
domain->ops = bus->iommu_ops;
|
domain->ops = bus->iommu_ops;
|
||||||
domain->type = type;
|
domain->type = type;
|
||||||
|
/* Assume all sizes by default; the driver may override this later */
|
||||||
|
domain->pgsize_bitmap = bus->iommu_ops->pgsize_bitmap;
|
||||||
|
|
||||||
return domain;
|
return domain;
|
||||||
}
|
}
|
||||||
@ -1297,7 +1299,7 @@ static size_t iommu_pgsize(struct iommu_domain *domain,
|
|||||||
pgsize = (1UL << (pgsize_idx + 1)) - 1;
|
pgsize = (1UL << (pgsize_idx + 1)) - 1;
|
||||||
|
|
||||||
/* throw away page sizes not supported by the hardware */
|
/* throw away page sizes not supported by the hardware */
|
||||||
pgsize &= domain->ops->pgsize_bitmap;
|
pgsize &= domain->pgsize_bitmap;
|
||||||
|
|
||||||
/* make sure we're still sane */
|
/* make sure we're still sane */
|
||||||
BUG_ON(!pgsize);
|
BUG_ON(!pgsize);
|
||||||
@ -1319,14 +1321,14 @@ int iommu_map(struct iommu_domain *domain, unsigned long iova,
|
|||||||
int ret = 0;
|
int ret = 0;
|
||||||
|
|
||||||
if (unlikely(domain->ops->map == NULL ||
|
if (unlikely(domain->ops->map == NULL ||
|
||||||
domain->ops->pgsize_bitmap == 0UL))
|
domain->pgsize_bitmap == 0UL))
|
||||||
return -ENODEV;
|
return -ENODEV;
|
||||||
|
|
||||||
if (unlikely(!(domain->type & __IOMMU_DOMAIN_PAGING)))
|
if (unlikely(!(domain->type & __IOMMU_DOMAIN_PAGING)))
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
|
|
||||||
/* find out the minimum page size supported */
|
/* find out the minimum page size supported */
|
||||||
min_pagesz = 1 << __ffs(domain->ops->pgsize_bitmap);
|
min_pagesz = 1 << __ffs(domain->pgsize_bitmap);
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* both the virtual address and the physical one, as well as
|
* both the virtual address and the physical one, as well as
|
||||||
@ -1373,14 +1375,14 @@ size_t iommu_unmap(struct iommu_domain *domain, unsigned long iova, size_t size)
|
|||||||
unsigned long orig_iova = iova;
|
unsigned long orig_iova = iova;
|
||||||
|
|
||||||
if (unlikely(domain->ops->unmap == NULL ||
|
if (unlikely(domain->ops->unmap == NULL ||
|
||||||
domain->ops->pgsize_bitmap == 0UL))
|
domain->pgsize_bitmap == 0UL))
|
||||||
return -ENODEV;
|
return -ENODEV;
|
||||||
|
|
||||||
if (unlikely(!(domain->type & __IOMMU_DOMAIN_PAGING)))
|
if (unlikely(!(domain->type & __IOMMU_DOMAIN_PAGING)))
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
|
|
||||||
/* find out the minimum page size supported */
|
/* find out the minimum page size supported */
|
||||||
min_pagesz = 1 << __ffs(domain->ops->pgsize_bitmap);
|
min_pagesz = 1 << __ffs(domain->pgsize_bitmap);
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* The virtual address, as well as the size of the mapping, must be
|
* The virtual address, as well as the size of the mapping, must be
|
||||||
@ -1426,10 +1428,10 @@ size_t default_iommu_map_sg(struct iommu_domain *domain, unsigned long iova,
|
|||||||
unsigned int i, min_pagesz;
|
unsigned int i, min_pagesz;
|
||||||
int ret;
|
int ret;
|
||||||
|
|
||||||
if (unlikely(domain->ops->pgsize_bitmap == 0UL))
|
if (unlikely(domain->pgsize_bitmap == 0UL))
|
||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
min_pagesz = 1 << __ffs(domain->ops->pgsize_bitmap);
|
min_pagesz = 1 << __ffs(domain->pgsize_bitmap);
|
||||||
|
|
||||||
for_each_sg(sg, s, nents, i) {
|
for_each_sg(sg, s, nents, i) {
|
||||||
phys_addr_t phys = page_to_phys(sg_page(s)) + s->offset;
|
phys_addr_t phys = page_to_phys(sg_page(s)) + s->offset;
|
||||||
@ -1510,7 +1512,7 @@ int iommu_domain_get_attr(struct iommu_domain *domain,
|
|||||||
break;
|
break;
|
||||||
case DOMAIN_ATTR_PAGING:
|
case DOMAIN_ATTR_PAGING:
|
||||||
paging = data;
|
paging = data;
|
||||||
*paging = (domain->ops->pgsize_bitmap != 0UL);
|
*paging = (domain->pgsize_bitmap != 0UL);
|
||||||
break;
|
break;
|
||||||
case DOMAIN_ATTR_WINDOWS:
|
case DOMAIN_ATTR_WINDOWS:
|
||||||
count = data;
|
count = data;
|
||||||
|
@ -264,7 +264,7 @@ static int mtk_iommu_domain_finalise(struct mtk_iommu_data *data)
|
|||||||
}
|
}
|
||||||
|
|
||||||
/* Update our support page sizes bitmap */
|
/* Update our support page sizes bitmap */
|
||||||
mtk_iommu_ops.pgsize_bitmap = dom->cfg.pgsize_bitmap;
|
dom->domain.pgsize_bitmap = dom->cfg.pgsize_bitmap;
|
||||||
|
|
||||||
writel(data->m4u_dom->cfg.arm_v7s_cfg.ttbr[0],
|
writel(data->m4u_dom->cfg.arm_v7s_cfg.ttbr[0],
|
||||||
data->base + REG_MMU_PT_BASE_ADDR);
|
data->base + REG_MMU_PT_BASE_ADDR);
|
||||||
|
@ -407,7 +407,7 @@ static unsigned long vfio_pgsize_bitmap(struct vfio_iommu *iommu)
|
|||||||
|
|
||||||
mutex_lock(&iommu->lock);
|
mutex_lock(&iommu->lock);
|
||||||
list_for_each_entry(domain, &iommu->domain_list, next)
|
list_for_each_entry(domain, &iommu->domain_list, next)
|
||||||
bitmap &= domain->domain->ops->pgsize_bitmap;
|
bitmap &= domain->domain->pgsize_bitmap;
|
||||||
mutex_unlock(&iommu->lock);
|
mutex_unlock(&iommu->lock);
|
||||||
|
|
||||||
/*
|
/*
|
||||||
|
@ -78,6 +78,7 @@ struct iommu_domain_geometry {
|
|||||||
struct iommu_domain {
|
struct iommu_domain {
|
||||||
unsigned type;
|
unsigned type;
|
||||||
const struct iommu_ops *ops;
|
const struct iommu_ops *ops;
|
||||||
|
unsigned long pgsize_bitmap; /* Bitmap of page sizes in use */
|
||||||
iommu_fault_handler_t handler;
|
iommu_fault_handler_t handler;
|
||||||
void *handler_token;
|
void *handler_token;
|
||||||
struct iommu_domain_geometry geometry;
|
struct iommu_domain_geometry geometry;
|
||||||
@ -155,7 +156,7 @@ struct iommu_dm_region {
|
|||||||
* @domain_set_windows: Set the number of windows for a domain
|
* @domain_set_windows: Set the number of windows for a domain
|
||||||
* @domain_get_windows: Return the number of windows for a domain
|
* @domain_get_windows: Return the number of windows for a domain
|
||||||
* @of_xlate: add OF master IDs to iommu grouping
|
* @of_xlate: add OF master IDs to iommu grouping
|
||||||
* @pgsize_bitmap: bitmap of supported page sizes
|
* @pgsize_bitmap: bitmap of all possible supported page sizes
|
||||||
*/
|
*/
|
||||||
struct iommu_ops {
|
struct iommu_ops {
|
||||||
bool (*capable)(enum iommu_cap);
|
bool (*capable)(enum iommu_cap);
|
||||||
|
Loading…
Reference in New Issue
Block a user