iommufd: Keep track of each device's reserved regions instead of groups

The driver facing API in the iommu core makes the reserved regions
per-device. An algorithm in the core code consolidates the regions of all
the devices in a group to return the group view.

To allow for devices to be hotplugged into the group iommufd would re-load
the entire group's reserved regions for each device, just in case they
changed.

Further iommufd already has to deal with duplicated/overlapping reserved
regions as it must union all the groups together.

Thus simplify all of this to just use the device reserved regions
interface directly from the iommu driver.

Link: https://lore.kernel.org/r/5-v8-6659224517ea+532-iommufd_alloc_jgg@nvidia.com
Suggested-by: Kevin Tian <kevin.tian@intel.com>
Reviewed-by: Lu Baolu <baolu.lu@linux.intel.com>
Reviewed-by: Kevin Tian <kevin.tian@intel.com>
Tested-by: Nicolin Chen <nicolinc@nvidia.com>
Signed-off-by: Jason Gunthorpe <jgg@nvidia.com>
This commit is contained in:
Jason Gunthorpe 2023-07-17 15:12:01 -03:00
parent 8d0e2e9d93
commit 34f327a985
3 changed files with 16 additions and 23 deletions

View File

@ -356,9 +356,8 @@ int iommufd_hw_pagetable_attach(struct iommufd_hw_pagetable *hwpt,
}
}
rc = iopt_table_enforce_group_resv_regions(&hwpt->ioas->iopt, idev->dev,
idev->igroup->group,
&sw_msi_start);
rc = iopt_table_enforce_dev_resv_regions(
&hwpt->ioas->iopt, idev->dev, &sw_msi_start);
if (rc)
goto err_unlock;

View File

@ -1169,25 +1169,22 @@ void iopt_remove_access(struct io_pagetable *iopt,
up_write(&iopt->domains_rwsem);
}
/* Narrow the valid_iova_itree to include reserved ranges from a group. */
int iopt_table_enforce_group_resv_regions(struct io_pagetable *iopt,
struct device *device,
struct iommu_group *group,
phys_addr_t *sw_msi_start)
/* Narrow the valid_iova_itree to include reserved ranges from a device. */
int iopt_table_enforce_dev_resv_regions(struct io_pagetable *iopt,
struct device *dev,
phys_addr_t *sw_msi_start)
{
struct iommu_resv_region *resv;
struct iommu_resv_region *tmp;
LIST_HEAD(group_resv_regions);
LIST_HEAD(resv_regions);
unsigned int num_hw_msi = 0;
unsigned int num_sw_msi = 0;
int rc;
down_write(&iopt->iova_rwsem);
rc = iommu_get_group_resv_regions(group, &group_resv_regions);
if (rc)
goto out_unlock;
/* FIXME: drivers allocate memory but there is no failure propogated */
iommu_get_resv_regions(dev, &resv_regions);
list_for_each_entry(resv, &group_resv_regions, list) {
list_for_each_entry(resv, &resv_regions, list) {
if (resv->type == IOMMU_RESV_DIRECT_RELAXABLE)
continue;
@ -1199,7 +1196,7 @@ int iopt_table_enforce_group_resv_regions(struct io_pagetable *iopt,
}
rc = iopt_reserve_iova(iopt, resv->start,
resv->length - 1 + resv->start, device);
resv->length - 1 + resv->start, dev);
if (rc)
goto out_reserved;
}
@ -1214,11 +1211,9 @@ int iopt_table_enforce_group_resv_regions(struct io_pagetable *iopt,
goto out_free_resv;
out_reserved:
__iopt_remove_reserved_iova(iopt, device);
__iopt_remove_reserved_iova(iopt, dev);
out_free_resv:
list_for_each_entry_safe(resv, tmp, &group_resv_regions, list)
kfree(resv);
out_unlock:
iommu_put_resv_regions(dev, &resv_regions);
up_write(&iopt->iova_rwsem);
return rc;
}

View File

@ -76,10 +76,9 @@ int iopt_table_add_domain(struct io_pagetable *iopt,
struct iommu_domain *domain);
void iopt_table_remove_domain(struct io_pagetable *iopt,
struct iommu_domain *domain);
int iopt_table_enforce_group_resv_regions(struct io_pagetable *iopt,
struct device *device,
struct iommu_group *group,
phys_addr_t *sw_msi_start);
int iopt_table_enforce_dev_resv_regions(struct io_pagetable *iopt,
struct device *dev,
phys_addr_t *sw_msi_start);
int iopt_set_allow_iova(struct io_pagetable *iopt,
struct rb_root_cached *allowed_iova);
int iopt_reserve_iova(struct io_pagetable *iopt, unsigned long start,