iommu: Use right way to retrieve iommu_ops

The common iommu_ops is hooked to both device and domain. When a helper
has both device and domain pointer, the way to get the iommu_ops looks
messy in iommu core. This sorts out the way to get iommu_ops. The device
related helpers go through device pointer, while the domain related ones
go through domain pointer.

Signed-off-by: Lu Baolu <baolu.lu@linux.intel.com>
Reviewed-by: Christoph Hellwig <hch@lst.de>
Reviewed-by: Jason Gunthorpe <jgg@nvidia.com>
Link: https://lore.kernel.org/r/20220216025249.3459465-8-baolu.lu@linux.intel.com
Signed-off-by: Joerg Roedel <jroedel@suse.de>
This commit is contained in:
Lu Baolu 2022-02-16 10:52:47 +08:00 committed by Joerg Roedel
parent 7eef7f6700
commit 3f6634d997
2 changed files with 36 additions and 25 deletions

View File

@ -323,13 +323,14 @@ err_out:
void iommu_release_device(struct device *dev)
{
const struct iommu_ops *ops = dev->bus->iommu_ops;
const struct iommu_ops *ops;
if (!dev->iommu)
return;
iommu_device_unlink(dev->iommu->iommu_dev, dev);
ops = dev_iommu_ops(dev);
ops->release_device(dev);
iommu_group_remove_device(dev);
@ -833,8 +834,10 @@ out:
static bool iommu_is_attach_deferred(struct iommu_domain *domain,
struct device *dev)
{
if (domain->ops->is_attach_deferred)
return domain->ops->is_attach_deferred(domain, dev);
const struct iommu_ops *ops = dev_iommu_ops(dev);
if (ops->is_attach_deferred)
return ops->is_attach_deferred(domain, dev);
return false;
}
@ -1252,10 +1255,10 @@ int iommu_page_response(struct device *dev,
struct iommu_fault_event *evt;
struct iommu_fault_page_request *prm;
struct dev_iommu *param = dev->iommu;
const struct iommu_ops *ops = dev_iommu_ops(dev);
bool has_pasid = msg->flags & IOMMU_PAGE_RESP_PASID_VALID;
struct iommu_domain *domain = iommu_get_domain_for_dev(dev);
if (!domain || !domain->ops->page_response)
if (!ops->page_response)
return -ENODEV;
if (!param || !param->fault_param)
@ -1296,7 +1299,7 @@ int iommu_page_response(struct device *dev,
msg->pasid = 0;
}
ret = domain->ops->page_response(dev, evt, msg);
ret = ops->page_response(dev, evt, msg);
list_del(&evt->list);
kfree(evt);
break;
@ -1521,7 +1524,7 @@ EXPORT_SYMBOL_GPL(fsl_mc_device_group);
static int iommu_get_def_domain_type(struct device *dev)
{
const struct iommu_ops *ops = dev->bus->iommu_ops;
const struct iommu_ops *ops = dev_iommu_ops(dev);
if (dev_is_pci(dev) && to_pci_dev(dev)->untrusted)
return IOMMU_DOMAIN_DMA;
@ -1580,7 +1583,7 @@ static int iommu_alloc_default_domain(struct iommu_group *group,
*/
static struct iommu_group *iommu_group_get_for_dev(struct device *dev)
{
const struct iommu_ops *ops = dev->bus->iommu_ops;
const struct iommu_ops *ops = dev_iommu_ops(dev);
struct iommu_group *group;
int ret;
@ -1588,9 +1591,6 @@ static struct iommu_group *iommu_group_get_for_dev(struct device *dev)
if (group)
return group;
if (!ops)
return ERR_PTR(-EINVAL);
group = ops->device_group(dev);
if (WARN_ON_ONCE(group == NULL))
return ERR_PTR(-EINVAL);
@ -1759,10 +1759,10 @@ static int __iommu_group_dma_attach(struct iommu_group *group)
static int iommu_group_do_probe_finalize(struct device *dev, void *data)
{
struct iommu_domain *domain = data;
const struct iommu_ops *ops = dev_iommu_ops(dev);
if (domain->ops->probe_finalize)
domain->ops->probe_finalize(dev);
if (ops->probe_finalize)
ops->probe_finalize(dev);
return 0;
}
@ -2020,7 +2020,7 @@ EXPORT_SYMBOL_GPL(iommu_attach_device);
int iommu_deferred_attach(struct device *dev, struct iommu_domain *domain)
{
const struct iommu_ops *ops = domain->ops;
const struct iommu_ops *ops = dev_iommu_ops(dev);
if (ops->is_attach_deferred && ops->is_attach_deferred(domain, dev))
return __iommu_attach_device(domain, dev);
@ -2579,17 +2579,17 @@ EXPORT_SYMBOL_GPL(iommu_set_pgtable_quirks);
void iommu_get_resv_regions(struct device *dev, struct list_head *list)
{
const struct iommu_ops *ops = dev->bus->iommu_ops;
const struct iommu_ops *ops = dev_iommu_ops(dev);
if (ops && ops->get_resv_regions)
if (ops->get_resv_regions)
ops->get_resv_regions(dev, list);
}
void iommu_put_resv_regions(struct device *dev, struct list_head *list)
{
const struct iommu_ops *ops = dev->bus->iommu_ops;
const struct iommu_ops *ops = dev_iommu_ops(dev);
if (ops && ops->put_resv_regions)
if (ops->put_resv_regions)
ops->put_resv_regions(dev, list);
}
@ -2794,9 +2794,9 @@ iommu_sva_bind_device(struct device *dev, struct mm_struct *mm, void *drvdata)
{
struct iommu_group *group;
struct iommu_sva *handle = ERR_PTR(-EINVAL);
const struct iommu_ops *ops = dev->bus->iommu_ops;
const struct iommu_ops *ops = dev_iommu_ops(dev);
if (!ops || !ops->sva_bind)
if (!ops->sva_bind)
return ERR_PTR(-ENODEV);
group = iommu_group_get(dev);
@ -2837,9 +2837,9 @@ void iommu_sva_unbind_device(struct iommu_sva *handle)
{
struct iommu_group *group;
struct device *dev = handle->dev;
const struct iommu_ops *ops = dev->bus->iommu_ops;
const struct iommu_ops *ops = dev_iommu_ops(dev);
if (!ops || !ops->sva_unbind)
if (!ops->sva_unbind)
return;
group = iommu_group_get(dev);
@ -2856,9 +2856,9 @@ EXPORT_SYMBOL_GPL(iommu_sva_unbind_device);
u32 iommu_sva_get_pasid(struct iommu_sva *handle)
{
const struct iommu_ops *ops = handle->dev->bus->iommu_ops;
const struct iommu_ops *ops = dev_iommu_ops(handle->dev);
if (!ops || !ops->sva_get_pasid)
if (!ops->sva_get_pasid)
return IOMMU_PASID_INVALID;
return ops->sva_get_pasid(handle);

View File

@ -381,6 +381,17 @@ static inline void iommu_iotlb_gather_init(struct iommu_iotlb_gather *gather)
};
}
static inline const struct iommu_ops *dev_iommu_ops(struct device *dev)
{
/*
* Assume that valid ops must be installed if iommu_probe_device()
* has succeeded. The device ops are essentially for internal use
* within the IOMMU subsystem itself, so we should be able to trust
* ourselves not to misuse the helper.
*/
return dev->iommu->iommu_dev->ops;
}
#define IOMMU_GROUP_NOTIFY_ADD_DEVICE 1 /* Device added */
#define IOMMU_GROUP_NOTIFY_DEL_DEVICE 2 /* Pre Device removed */
#define IOMMU_GROUP_NOTIFY_BIND_DRIVER 3 /* Pre Driver bind */