2020-11-06 15:50:48 +00:00
|
|
|
// SPDX-License-Identifier: GPL-2.0
|
|
|
|
/*
|
|
|
|
* Helpers for IOMMU drivers implementing SVA
|
|
|
|
*/
|
2023-03-12 11:26:06 +00:00
|
|
|
#include <linux/mmu_context.h>
|
2020-11-06 15:50:48 +00:00
|
|
|
#include <linux/mutex.h>
|
|
|
|
#include <linux/sched/mm.h>
|
2022-10-31 00:59:13 +00:00
|
|
|
#include <linux/iommu.h>
|
2020-11-06 15:50:48 +00:00
|
|
|
|
2024-02-12 01:22:21 +00:00
|
|
|
#include "iommu-priv.h"
|
2020-11-06 15:50:48 +00:00
|
|
|
|
|
|
|
static DEFINE_MUTEX(iommu_sva_lock);
|
2024-05-28 04:54:58 +00:00
|
|
|
static struct iommu_domain *iommu_sva_domain_alloc(struct device *dev,
|
|
|
|
struct mm_struct *mm);
|
2020-11-06 15:50:48 +00:00
|
|
|
|
2023-03-22 20:08:00 +00:00
|
|
|
/* Allocate a PASID for the mm within range (inclusive) */
|
iommu: Support mm PASID 1:n with sva domains
Each mm bound to devices gets a PASID and corresponding sva domains
allocated in iommu_sva_bind_device(), which are referenced by iommu_mm
field of the mm. The PASID is released in __mmdrop(), while a sva domain
is released when no one is using it (the reference count is decremented
in iommu_sva_unbind_device()). However, although sva domains and their
PASID are separate objects such that their own life cycles could be
handled independently, an enqcmd use case may require releasing the
PASID in releasing the mm (i.e., once a PASID is allocated for a mm, it
will be permanently used by the mm and won't be released until the end
of mm) and only allows to drop the PASID after the sva domains are
released. To this end, mmgrab() is called in iommu_sva_domain_alloc() to
increment the mm reference count and mmdrop() is invoked in
iommu_domain_free() to decrement the mm reference count.
Since the required info of PASID and sva domains is kept in struct
iommu_mm_data of a mm, use mm->iommu_mm field instead of the old pasid
field in mm struct. The sva domain list is protected by iommu_sva_lock.
Besides, this patch removes mm_pasid_init(), as with the introduced
iommu_mm structure, initializing mm pasid in mm_init() is unnecessary.
Reviewed-by: Lu Baolu <baolu.lu@linux.intel.com>
Reviewed-by: Vasant Hegde <vasant.hegde@amd.com>
Reviewed-by: Jason Gunthorpe <jgg@nvidia.com>
Tested-by: Nicolin Chen <nicolinc@nvidia.com>
Signed-off-by: Tina Zhang <tina.zhang@intel.com>
Signed-off-by: Jason Gunthorpe <jgg@nvidia.com>
Link: https://lore.kernel.org/r/20231027000525.1278806-6-tina.zhang@intel.com
Signed-off-by: Joerg Roedel <jroedel@suse.de>
2023-10-27 00:05:24 +00:00
|
|
|
static struct iommu_mm_data *iommu_alloc_mm_data(struct mm_struct *mm, struct device *dev)
|
2020-11-06 15:50:48 +00:00
|
|
|
{
|
iommu: Support mm PASID 1:n with sva domains
Each mm bound to devices gets a PASID and corresponding sva domains
allocated in iommu_sva_bind_device(), which are referenced by iommu_mm
field of the mm. The PASID is released in __mmdrop(), while a sva domain
is released when no one is using it (the reference count is decremented
in iommu_sva_unbind_device()). However, although sva domains and their
PASID are separate objects such that their own life cycles could be
handled independently, an enqcmd use case may require releasing the
PASID in releasing the mm (i.e., once a PASID is allocated for a mm, it
will be permanently used by the mm and won't be released until the end
of mm) and only allows to drop the PASID after the sva domains are
released. To this end, mmgrab() is called in iommu_sva_domain_alloc() to
increment the mm reference count and mmdrop() is invoked in
iommu_domain_free() to decrement the mm reference count.
Since the required info of PASID and sva domains is kept in struct
iommu_mm_data of a mm, use mm->iommu_mm field instead of the old pasid
field in mm struct. The sva domain list is protected by iommu_sva_lock.
Besides, this patch removes mm_pasid_init(), as with the introduced
iommu_mm structure, initializing mm pasid in mm_init() is unnecessary.
Reviewed-by: Lu Baolu <baolu.lu@linux.intel.com>
Reviewed-by: Vasant Hegde <vasant.hegde@amd.com>
Reviewed-by: Jason Gunthorpe <jgg@nvidia.com>
Tested-by: Nicolin Chen <nicolinc@nvidia.com>
Signed-off-by: Tina Zhang <tina.zhang@intel.com>
Signed-off-by: Jason Gunthorpe <jgg@nvidia.com>
Link: https://lore.kernel.org/r/20231027000525.1278806-6-tina.zhang@intel.com
Signed-off-by: Joerg Roedel <jroedel@suse.de>
2023-10-27 00:05:24 +00:00
|
|
|
struct iommu_mm_data *iommu_mm;
|
2023-08-09 12:47:55 +00:00
|
|
|
ioasid_t pasid;
|
iommu: Support mm PASID 1:n with sva domains
Each mm bound to devices gets a PASID and corresponding sva domains
allocated in iommu_sva_bind_device(), which are referenced by iommu_mm
field of the mm. The PASID is released in __mmdrop(), while a sva domain
is released when no one is using it (the reference count is decremented
in iommu_sva_unbind_device()). However, although sva domains and their
PASID are separate objects such that their own life cycles could be
handled independently, an enqcmd use case may require releasing the
PASID in releasing the mm (i.e., once a PASID is allocated for a mm, it
will be permanently used by the mm and won't be released until the end
of mm) and only allows to drop the PASID after the sva domains are
released. To this end, mmgrab() is called in iommu_sva_domain_alloc() to
increment the mm reference count and mmdrop() is invoked in
iommu_domain_free() to decrement the mm reference count.
Since the required info of PASID and sva domains is kept in struct
iommu_mm_data of a mm, use mm->iommu_mm field instead of the old pasid
field in mm struct. The sva domain list is protected by iommu_sva_lock.
Besides, this patch removes mm_pasid_init(), as with the introduced
iommu_mm structure, initializing mm pasid in mm_init() is unnecessary.
Reviewed-by: Lu Baolu <baolu.lu@linux.intel.com>
Reviewed-by: Vasant Hegde <vasant.hegde@amd.com>
Reviewed-by: Jason Gunthorpe <jgg@nvidia.com>
Tested-by: Nicolin Chen <nicolinc@nvidia.com>
Signed-off-by: Tina Zhang <tina.zhang@intel.com>
Signed-off-by: Jason Gunthorpe <jgg@nvidia.com>
Link: https://lore.kernel.org/r/20231027000525.1278806-6-tina.zhang@intel.com
Signed-off-by: Joerg Roedel <jroedel@suse.de>
2023-10-27 00:05:24 +00:00
|
|
|
|
|
|
|
lockdep_assert_held(&iommu_sva_lock);
|
2020-11-06 15:50:48 +00:00
|
|
|
|
2023-03-12 11:26:06 +00:00
|
|
|
if (!arch_pgtable_dma_compat(mm))
|
iommu: Support mm PASID 1:n with sva domains
Each mm bound to devices gets a PASID and corresponding sva domains
allocated in iommu_sva_bind_device(), which are referenced by iommu_mm
field of the mm. The PASID is released in __mmdrop(), while a sva domain
is released when no one is using it (the reference count is decremented
in iommu_sva_unbind_device()). However, although sva domains and their
PASID are separate objects such that their own life cycles could be
handled independently, an enqcmd use case may require releasing the
PASID in releasing the mm (i.e., once a PASID is allocated for a mm, it
will be permanently used by the mm and won't be released until the end
of mm) and only allows to drop the PASID after the sva domains are
released. To this end, mmgrab() is called in iommu_sva_domain_alloc() to
increment the mm reference count and mmdrop() is invoked in
iommu_domain_free() to decrement the mm reference count.
Since the required info of PASID and sva domains is kept in struct
iommu_mm_data of a mm, use mm->iommu_mm field instead of the old pasid
field in mm struct. The sva domain list is protected by iommu_sva_lock.
Besides, this patch removes mm_pasid_init(), as with the introduced
iommu_mm structure, initializing mm pasid in mm_init() is unnecessary.
Reviewed-by: Lu Baolu <baolu.lu@linux.intel.com>
Reviewed-by: Vasant Hegde <vasant.hegde@amd.com>
Reviewed-by: Jason Gunthorpe <jgg@nvidia.com>
Tested-by: Nicolin Chen <nicolinc@nvidia.com>
Signed-off-by: Tina Zhang <tina.zhang@intel.com>
Signed-off-by: Jason Gunthorpe <jgg@nvidia.com>
Link: https://lore.kernel.org/r/20231027000525.1278806-6-tina.zhang@intel.com
Signed-off-by: Joerg Roedel <jroedel@suse.de>
2023-10-27 00:05:24 +00:00
|
|
|
return ERR_PTR(-EBUSY);
|
2023-03-12 11:26:06 +00:00
|
|
|
|
iommu: Support mm PASID 1:n with sva domains
Each mm bound to devices gets a PASID and corresponding sva domains
allocated in iommu_sva_bind_device(), which are referenced by iommu_mm
field of the mm. The PASID is released in __mmdrop(), while a sva domain
is released when no one is using it (the reference count is decremented
in iommu_sva_unbind_device()). However, although sva domains and their
PASID are separate objects such that their own life cycles could be
handled independently, an enqcmd use case may require releasing the
PASID in releasing the mm (i.e., once a PASID is allocated for a mm, it
will be permanently used by the mm and won't be released until the end
of mm) and only allows to drop the PASID after the sva domains are
released. To this end, mmgrab() is called in iommu_sva_domain_alloc() to
increment the mm reference count and mmdrop() is invoked in
iommu_domain_free() to decrement the mm reference count.
Since the required info of PASID and sva domains is kept in struct
iommu_mm_data of a mm, use mm->iommu_mm field instead of the old pasid
field in mm struct. The sva domain list is protected by iommu_sva_lock.
Besides, this patch removes mm_pasid_init(), as with the introduced
iommu_mm structure, initializing mm pasid in mm_init() is unnecessary.
Reviewed-by: Lu Baolu <baolu.lu@linux.intel.com>
Reviewed-by: Vasant Hegde <vasant.hegde@amd.com>
Reviewed-by: Jason Gunthorpe <jgg@nvidia.com>
Tested-by: Nicolin Chen <nicolinc@nvidia.com>
Signed-off-by: Tina Zhang <tina.zhang@intel.com>
Signed-off-by: Jason Gunthorpe <jgg@nvidia.com>
Link: https://lore.kernel.org/r/20231027000525.1278806-6-tina.zhang@intel.com
Signed-off-by: Joerg Roedel <jroedel@suse.de>
2023-10-27 00:05:24 +00:00
|
|
|
iommu_mm = mm->iommu_mm;
|
2022-02-07 23:02:48 +00:00
|
|
|
/* Is a PASID already associated with this mm? */
|
iommu: Support mm PASID 1:n with sva domains
Each mm bound to devices gets a PASID and corresponding sva domains
allocated in iommu_sva_bind_device(), which are referenced by iommu_mm
field of the mm. The PASID is released in __mmdrop(), while a sva domain
is released when no one is using it (the reference count is decremented
in iommu_sva_unbind_device()). However, although sva domains and their
PASID are separate objects such that their own life cycles could be
handled independently, an enqcmd use case may require releasing the
PASID in releasing the mm (i.e., once a PASID is allocated for a mm, it
will be permanently used by the mm and won't be released until the end
of mm) and only allows to drop the PASID after the sva domains are
released. To this end, mmgrab() is called in iommu_sva_domain_alloc() to
increment the mm reference count and mmdrop() is invoked in
iommu_domain_free() to decrement the mm reference count.
Since the required info of PASID and sva domains is kept in struct
iommu_mm_data of a mm, use mm->iommu_mm field instead of the old pasid
field in mm struct. The sva domain list is protected by iommu_sva_lock.
Besides, this patch removes mm_pasid_init(), as with the introduced
iommu_mm structure, initializing mm pasid in mm_init() is unnecessary.
Reviewed-by: Lu Baolu <baolu.lu@linux.intel.com>
Reviewed-by: Vasant Hegde <vasant.hegde@amd.com>
Reviewed-by: Jason Gunthorpe <jgg@nvidia.com>
Tested-by: Nicolin Chen <nicolinc@nvidia.com>
Signed-off-by: Tina Zhang <tina.zhang@intel.com>
Signed-off-by: Jason Gunthorpe <jgg@nvidia.com>
Link: https://lore.kernel.org/r/20231027000525.1278806-6-tina.zhang@intel.com
Signed-off-by: Joerg Roedel <jroedel@suse.de>
2023-10-27 00:05:24 +00:00
|
|
|
if (iommu_mm) {
|
|
|
|
if (iommu_mm->pasid >= dev->iommu->max_pasids)
|
|
|
|
return ERR_PTR(-EOVERFLOW);
|
|
|
|
return iommu_mm;
|
2020-11-06 15:50:48 +00:00
|
|
|
}
|
2022-02-07 23:02:48 +00:00
|
|
|
|
iommu: Support mm PASID 1:n with sva domains
Each mm bound to devices gets a PASID and corresponding sva domains
allocated in iommu_sva_bind_device(), which are referenced by iommu_mm
field of the mm. The PASID is released in __mmdrop(), while a sva domain
is released when no one is using it (the reference count is decremented
in iommu_sva_unbind_device()). However, although sva domains and their
PASID are separate objects such that their own life cycles could be
handled independently, an enqcmd use case may require releasing the
PASID in releasing the mm (i.e., once a PASID is allocated for a mm, it
will be permanently used by the mm and won't be released until the end
of mm) and only allows to drop the PASID after the sva domains are
released. To this end, mmgrab() is called in iommu_sva_domain_alloc() to
increment the mm reference count and mmdrop() is invoked in
iommu_domain_free() to decrement the mm reference count.
Since the required info of PASID and sva domains is kept in struct
iommu_mm_data of a mm, use mm->iommu_mm field instead of the old pasid
field in mm struct. The sva domain list is protected by iommu_sva_lock.
Besides, this patch removes mm_pasid_init(), as with the introduced
iommu_mm structure, initializing mm pasid in mm_init() is unnecessary.
Reviewed-by: Lu Baolu <baolu.lu@linux.intel.com>
Reviewed-by: Vasant Hegde <vasant.hegde@amd.com>
Reviewed-by: Jason Gunthorpe <jgg@nvidia.com>
Tested-by: Nicolin Chen <nicolinc@nvidia.com>
Signed-off-by: Tina Zhang <tina.zhang@intel.com>
Signed-off-by: Jason Gunthorpe <jgg@nvidia.com>
Link: https://lore.kernel.org/r/20231027000525.1278806-6-tina.zhang@intel.com
Signed-off-by: Joerg Roedel <jroedel@suse.de>
2023-10-27 00:05:24 +00:00
|
|
|
iommu_mm = kzalloc(sizeof(struct iommu_mm_data), GFP_KERNEL);
|
|
|
|
if (!iommu_mm)
|
|
|
|
return ERR_PTR(-ENOMEM);
|
|
|
|
|
2023-08-09 12:47:55 +00:00
|
|
|
pasid = iommu_alloc_global_pasid(dev);
|
|
|
|
if (pasid == IOMMU_PASID_INVALID) {
|
iommu: Support mm PASID 1:n with sva domains
Each mm bound to devices gets a PASID and corresponding sva domains
allocated in iommu_sva_bind_device(), which are referenced by iommu_mm
field of the mm. The PASID is released in __mmdrop(), while a sva domain
is released when no one is using it (the reference count is decremented
in iommu_sva_unbind_device()). However, although sva domains and their
PASID are separate objects such that their own life cycles could be
handled independently, an enqcmd use case may require releasing the
PASID in releasing the mm (i.e., once a PASID is allocated for a mm, it
will be permanently used by the mm and won't be released until the end
of mm) and only allows to drop the PASID after the sva domains are
released. To this end, mmgrab() is called in iommu_sva_domain_alloc() to
increment the mm reference count and mmdrop() is invoked in
iommu_domain_free() to decrement the mm reference count.
Since the required info of PASID and sva domains is kept in struct
iommu_mm_data of a mm, use mm->iommu_mm field instead of the old pasid
field in mm struct. The sva domain list is protected by iommu_sva_lock.
Besides, this patch removes mm_pasid_init(), as with the introduced
iommu_mm structure, initializing mm pasid in mm_init() is unnecessary.
Reviewed-by: Lu Baolu <baolu.lu@linux.intel.com>
Reviewed-by: Vasant Hegde <vasant.hegde@amd.com>
Reviewed-by: Jason Gunthorpe <jgg@nvidia.com>
Tested-by: Nicolin Chen <nicolinc@nvidia.com>
Signed-off-by: Tina Zhang <tina.zhang@intel.com>
Signed-off-by: Jason Gunthorpe <jgg@nvidia.com>
Link: https://lore.kernel.org/r/20231027000525.1278806-6-tina.zhang@intel.com
Signed-off-by: Joerg Roedel <jroedel@suse.de>
2023-10-27 00:05:24 +00:00
|
|
|
kfree(iommu_mm);
|
|
|
|
return ERR_PTR(-ENOSPC);
|
2023-08-09 12:47:55 +00:00
|
|
|
}
|
iommu: Support mm PASID 1:n with sva domains
Each mm bound to devices gets a PASID and corresponding sva domains
allocated in iommu_sva_bind_device(), which are referenced by iommu_mm
field of the mm. The PASID is released in __mmdrop(), while a sva domain
is released when no one is using it (the reference count is decremented
in iommu_sva_unbind_device()). However, although sva domains and their
PASID are separate objects such that their own life cycles could be
handled independently, an enqcmd use case may require releasing the
PASID in releasing the mm (i.e., once a PASID is allocated for a mm, it
will be permanently used by the mm and won't be released until the end
of mm) and only allows to drop the PASID after the sva domains are
released. To this end, mmgrab() is called in iommu_sva_domain_alloc() to
increment the mm reference count and mmdrop() is invoked in
iommu_domain_free() to decrement the mm reference count.
Since the required info of PASID and sva domains is kept in struct
iommu_mm_data of a mm, use mm->iommu_mm field instead of the old pasid
field in mm struct. The sva domain list is protected by iommu_sva_lock.
Besides, this patch removes mm_pasid_init(), as with the introduced
iommu_mm structure, initializing mm pasid in mm_init() is unnecessary.
Reviewed-by: Lu Baolu <baolu.lu@linux.intel.com>
Reviewed-by: Vasant Hegde <vasant.hegde@amd.com>
Reviewed-by: Jason Gunthorpe <jgg@nvidia.com>
Tested-by: Nicolin Chen <nicolinc@nvidia.com>
Signed-off-by: Tina Zhang <tina.zhang@intel.com>
Signed-off-by: Jason Gunthorpe <jgg@nvidia.com>
Link: https://lore.kernel.org/r/20231027000525.1278806-6-tina.zhang@intel.com
Signed-off-by: Joerg Roedel <jroedel@suse.de>
2023-10-27 00:05:24 +00:00
|
|
|
iommu_mm->pasid = pasid;
|
|
|
|
INIT_LIST_HEAD(&iommu_mm->sva_domains);
|
|
|
|
/*
|
|
|
|
* Make sure the write to mm->iommu_mm is not reordered in front of
|
|
|
|
* initialization to iommu_mm fields. If it does, readers may see a
|
|
|
|
* valid iommu_mm with uninitialized values.
|
|
|
|
*/
|
|
|
|
smp_store_release(&mm->iommu_mm, iommu_mm);
|
|
|
|
return iommu_mm;
|
2020-11-06 15:50:48 +00:00
|
|
|
}
|
2022-10-31 00:59:13 +00:00
|
|
|
|
|
|
|
/**
|
|
|
|
* iommu_sva_bind_device() - Bind a process address space to a device
|
|
|
|
* @dev: the device
|
|
|
|
* @mm: the mm to bind, caller must hold a reference to mm_users
|
|
|
|
*
|
|
|
|
* Create a bond between device and address space, allowing the device to
|
|
|
|
* access the mm using the PASID returned by iommu_sva_get_pasid(). If a
|
|
|
|
* bond already exists between @device and @mm, an additional internal
|
|
|
|
* reference is taken. Caller must call iommu_sva_unbind_device()
|
|
|
|
* to release each reference.
|
|
|
|
*
|
|
|
|
* iommu_dev_enable_feature(dev, IOMMU_DEV_FEAT_SVA) must be called first, to
|
|
|
|
* initialize the required SVA features.
|
|
|
|
*
|
|
|
|
* On error, returns an ERR_PTR value.
|
|
|
|
*/
|
|
|
|
struct iommu_sva *iommu_sva_bind_device(struct device *dev, struct mm_struct *mm)
|
|
|
|
{
|
2024-07-02 06:34:36 +00:00
|
|
|
struct iommu_group *group = dev->iommu_group;
|
|
|
|
struct iommu_attach_handle *attach_handle;
|
iommu: Support mm PASID 1:n with sva domains
Each mm bound to devices gets a PASID and corresponding sva domains
allocated in iommu_sva_bind_device(), which are referenced by iommu_mm
field of the mm. The PASID is released in __mmdrop(), while a sva domain
is released when no one is using it (the reference count is decremented
in iommu_sva_unbind_device()). However, although sva domains and their
PASID are separate objects such that their own life cycles could be
handled independently, an enqcmd use case may require releasing the
PASID in releasing the mm (i.e., once a PASID is allocated for a mm, it
will be permanently used by the mm and won't be released until the end
of mm) and only allows to drop the PASID after the sva domains are
released. To this end, mmgrab() is called in iommu_sva_domain_alloc() to
increment the mm reference count and mmdrop() is invoked in
iommu_domain_free() to decrement the mm reference count.
Since the required info of PASID and sva domains is kept in struct
iommu_mm_data of a mm, use mm->iommu_mm field instead of the old pasid
field in mm struct. The sva domain list is protected by iommu_sva_lock.
Besides, this patch removes mm_pasid_init(), as with the introduced
iommu_mm structure, initializing mm pasid in mm_init() is unnecessary.
Reviewed-by: Lu Baolu <baolu.lu@linux.intel.com>
Reviewed-by: Vasant Hegde <vasant.hegde@amd.com>
Reviewed-by: Jason Gunthorpe <jgg@nvidia.com>
Tested-by: Nicolin Chen <nicolinc@nvidia.com>
Signed-off-by: Tina Zhang <tina.zhang@intel.com>
Signed-off-by: Jason Gunthorpe <jgg@nvidia.com>
Link: https://lore.kernel.org/r/20231027000525.1278806-6-tina.zhang@intel.com
Signed-off-by: Joerg Roedel <jroedel@suse.de>
2023-10-27 00:05:24 +00:00
|
|
|
struct iommu_mm_data *iommu_mm;
|
2022-10-31 00:59:13 +00:00
|
|
|
struct iommu_domain *domain;
|
|
|
|
struct iommu_sva *handle;
|
|
|
|
int ret;
|
|
|
|
|
2024-07-02 06:34:36 +00:00
|
|
|
if (!group)
|
|
|
|
return ERR_PTR(-ENODEV);
|
|
|
|
|
iommu: Support mm PASID 1:n with sva domains
Each mm bound to devices gets a PASID and corresponding sva domains
allocated in iommu_sva_bind_device(), which are referenced by iommu_mm
field of the mm. The PASID is released in __mmdrop(), while a sva domain
is released when no one is using it (the reference count is decremented
in iommu_sva_unbind_device()). However, although sva domains and their
PASID are separate objects such that their own life cycles could be
handled independently, an enqcmd use case may require releasing the
PASID in releasing the mm (i.e., once a PASID is allocated for a mm, it
will be permanently used by the mm and won't be released until the end
of mm) and only allows to drop the PASID after the sva domains are
released. To this end, mmgrab() is called in iommu_sva_domain_alloc() to
increment the mm reference count and mmdrop() is invoked in
iommu_domain_free() to decrement the mm reference count.
Since the required info of PASID and sva domains is kept in struct
iommu_mm_data of a mm, use mm->iommu_mm field instead of the old pasid
field in mm struct. The sva domain list is protected by iommu_sva_lock.
Besides, this patch removes mm_pasid_init(), as with the introduced
iommu_mm structure, initializing mm pasid in mm_init() is unnecessary.
Reviewed-by: Lu Baolu <baolu.lu@linux.intel.com>
Reviewed-by: Vasant Hegde <vasant.hegde@amd.com>
Reviewed-by: Jason Gunthorpe <jgg@nvidia.com>
Tested-by: Nicolin Chen <nicolinc@nvidia.com>
Signed-off-by: Tina Zhang <tina.zhang@intel.com>
Signed-off-by: Jason Gunthorpe <jgg@nvidia.com>
Link: https://lore.kernel.org/r/20231027000525.1278806-6-tina.zhang@intel.com
Signed-off-by: Joerg Roedel <jroedel@suse.de>
2023-10-27 00:05:24 +00:00
|
|
|
mutex_lock(&iommu_sva_lock);
|
|
|
|
|
2022-10-31 00:59:13 +00:00
|
|
|
/* Allocate mm->pasid if necessary. */
|
iommu: Support mm PASID 1:n with sva domains
Each mm bound to devices gets a PASID and corresponding sva domains
allocated in iommu_sva_bind_device(), which are referenced by iommu_mm
field of the mm. The PASID is released in __mmdrop(), while a sva domain
is released when no one is using it (the reference count is decremented
in iommu_sva_unbind_device()). However, although sva domains and their
PASID are separate objects such that their own life cycles could be
handled independently, an enqcmd use case may require releasing the
PASID in releasing the mm (i.e., once a PASID is allocated for a mm, it
will be permanently used by the mm and won't be released until the end
of mm) and only allows to drop the PASID after the sva domains are
released. To this end, mmgrab() is called in iommu_sva_domain_alloc() to
increment the mm reference count and mmdrop() is invoked in
iommu_domain_free() to decrement the mm reference count.
Since the required info of PASID and sva domains is kept in struct
iommu_mm_data of a mm, use mm->iommu_mm field instead of the old pasid
field in mm struct. The sva domain list is protected by iommu_sva_lock.
Besides, this patch removes mm_pasid_init(), as with the introduced
iommu_mm structure, initializing mm pasid in mm_init() is unnecessary.
Reviewed-by: Lu Baolu <baolu.lu@linux.intel.com>
Reviewed-by: Vasant Hegde <vasant.hegde@amd.com>
Reviewed-by: Jason Gunthorpe <jgg@nvidia.com>
Tested-by: Nicolin Chen <nicolinc@nvidia.com>
Signed-off-by: Tina Zhang <tina.zhang@intel.com>
Signed-off-by: Jason Gunthorpe <jgg@nvidia.com>
Link: https://lore.kernel.org/r/20231027000525.1278806-6-tina.zhang@intel.com
Signed-off-by: Joerg Roedel <jroedel@suse.de>
2023-10-27 00:05:24 +00:00
|
|
|
iommu_mm = iommu_alloc_mm_data(mm, dev);
|
|
|
|
if (IS_ERR(iommu_mm)) {
|
|
|
|
ret = PTR_ERR(iommu_mm);
|
|
|
|
goto out_unlock;
|
|
|
|
}
|
2022-10-31 00:59:13 +00:00
|
|
|
|
2024-07-02 06:34:36 +00:00
|
|
|
/* A bond already exists, just take a reference`. */
|
|
|
|
attach_handle = iommu_attach_handle_get(group, iommu_mm->pasid, IOMMU_DOMAIN_SVA);
|
|
|
|
if (!IS_ERR(attach_handle)) {
|
|
|
|
handle = container_of(attach_handle, struct iommu_sva, handle);
|
|
|
|
if (attach_handle->domain->mm != mm) {
|
|
|
|
ret = -EBUSY;
|
|
|
|
goto out_unlock;
|
2024-02-22 14:07:41 +00:00
|
|
|
}
|
2024-07-02 06:34:36 +00:00
|
|
|
refcount_inc(&handle->users);
|
|
|
|
mutex_unlock(&iommu_sva_lock);
|
|
|
|
return handle;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (PTR_ERR(attach_handle) != -ENOENT) {
|
|
|
|
ret = PTR_ERR(attach_handle);
|
|
|
|
goto out_unlock;
|
2024-02-22 14:07:41 +00:00
|
|
|
}
|
|
|
|
|
2022-10-31 00:59:13 +00:00
|
|
|
handle = kzalloc(sizeof(*handle), GFP_KERNEL);
|
iommu: Support mm PASID 1:n with sva domains
Each mm bound to devices gets a PASID and corresponding sva domains
allocated in iommu_sva_bind_device(), which are referenced by iommu_mm
field of the mm. The PASID is released in __mmdrop(), while a sva domain
is released when no one is using it (the reference count is decremented
in iommu_sva_unbind_device()). However, although sva domains and their
PASID are separate objects such that their own life cycles could be
handled independently, an enqcmd use case may require releasing the
PASID in releasing the mm (i.e., once a PASID is allocated for a mm, it
will be permanently used by the mm and won't be released until the end
of mm) and only allows to drop the PASID after the sva domains are
released. To this end, mmgrab() is called in iommu_sva_domain_alloc() to
increment the mm reference count and mmdrop() is invoked in
iommu_domain_free() to decrement the mm reference count.
Since the required info of PASID and sva domains is kept in struct
iommu_mm_data of a mm, use mm->iommu_mm field instead of the old pasid
field in mm struct. The sva domain list is protected by iommu_sva_lock.
Besides, this patch removes mm_pasid_init(), as with the introduced
iommu_mm structure, initializing mm pasid in mm_init() is unnecessary.
Reviewed-by: Lu Baolu <baolu.lu@linux.intel.com>
Reviewed-by: Vasant Hegde <vasant.hegde@amd.com>
Reviewed-by: Jason Gunthorpe <jgg@nvidia.com>
Tested-by: Nicolin Chen <nicolinc@nvidia.com>
Signed-off-by: Tina Zhang <tina.zhang@intel.com>
Signed-off-by: Jason Gunthorpe <jgg@nvidia.com>
Link: https://lore.kernel.org/r/20231027000525.1278806-6-tina.zhang@intel.com
Signed-off-by: Joerg Roedel <jroedel@suse.de>
2023-10-27 00:05:24 +00:00
|
|
|
if (!handle) {
|
|
|
|
ret = -ENOMEM;
|
2022-10-31 00:59:13 +00:00
|
|
|
goto out_unlock;
|
|
|
|
}
|
|
|
|
|
iommu: Support mm PASID 1:n with sva domains
Each mm bound to devices gets a PASID and corresponding sva domains
allocated in iommu_sva_bind_device(), which are referenced by iommu_mm
field of the mm. The PASID is released in __mmdrop(), while a sva domain
is released when no one is using it (the reference count is decremented
in iommu_sva_unbind_device()). However, although sva domains and their
PASID are separate objects such that their own life cycles could be
handled independently, an enqcmd use case may require releasing the
PASID in releasing the mm (i.e., once a PASID is allocated for a mm, it
will be permanently used by the mm and won't be released until the end
of mm) and only allows to drop the PASID after the sva domains are
released. To this end, mmgrab() is called in iommu_sva_domain_alloc() to
increment the mm reference count and mmdrop() is invoked in
iommu_domain_free() to decrement the mm reference count.
Since the required info of PASID and sva domains is kept in struct
iommu_mm_data of a mm, use mm->iommu_mm field instead of the old pasid
field in mm struct. The sva domain list is protected by iommu_sva_lock.
Besides, this patch removes mm_pasid_init(), as with the introduced
iommu_mm structure, initializing mm pasid in mm_init() is unnecessary.
Reviewed-by: Lu Baolu <baolu.lu@linux.intel.com>
Reviewed-by: Vasant Hegde <vasant.hegde@amd.com>
Reviewed-by: Jason Gunthorpe <jgg@nvidia.com>
Tested-by: Nicolin Chen <nicolinc@nvidia.com>
Signed-off-by: Tina Zhang <tina.zhang@intel.com>
Signed-off-by: Jason Gunthorpe <jgg@nvidia.com>
Link: https://lore.kernel.org/r/20231027000525.1278806-6-tina.zhang@intel.com
Signed-off-by: Joerg Roedel <jroedel@suse.de>
2023-10-27 00:05:24 +00:00
|
|
|
/* Search for an existing domain. */
|
|
|
|
list_for_each_entry(domain, &mm->iommu_mm->sva_domains, next) {
|
2024-07-02 06:34:35 +00:00
|
|
|
ret = iommu_attach_device_pasid(domain, dev, iommu_mm->pasid,
|
|
|
|
&handle->handle);
|
iommu: Support mm PASID 1:n with sva domains
Each mm bound to devices gets a PASID and corresponding sva domains
allocated in iommu_sva_bind_device(), which are referenced by iommu_mm
field of the mm. The PASID is released in __mmdrop(), while a sva domain
is released when no one is using it (the reference count is decremented
in iommu_sva_unbind_device()). However, although sva domains and their
PASID are separate objects such that their own life cycles could be
handled independently, an enqcmd use case may require releasing the
PASID in releasing the mm (i.e., once a PASID is allocated for a mm, it
will be permanently used by the mm and won't be released until the end
of mm) and only allows to drop the PASID after the sva domains are
released. To this end, mmgrab() is called in iommu_sva_domain_alloc() to
increment the mm reference count and mmdrop() is invoked in
iommu_domain_free() to decrement the mm reference count.
Since the required info of PASID and sva domains is kept in struct
iommu_mm_data of a mm, use mm->iommu_mm field instead of the old pasid
field in mm struct. The sva domain list is protected by iommu_sva_lock.
Besides, this patch removes mm_pasid_init(), as with the introduced
iommu_mm structure, initializing mm pasid in mm_init() is unnecessary.
Reviewed-by: Lu Baolu <baolu.lu@linux.intel.com>
Reviewed-by: Vasant Hegde <vasant.hegde@amd.com>
Reviewed-by: Jason Gunthorpe <jgg@nvidia.com>
Tested-by: Nicolin Chen <nicolinc@nvidia.com>
Signed-off-by: Tina Zhang <tina.zhang@intel.com>
Signed-off-by: Jason Gunthorpe <jgg@nvidia.com>
Link: https://lore.kernel.org/r/20231027000525.1278806-6-tina.zhang@intel.com
Signed-off-by: Joerg Roedel <jroedel@suse.de>
2023-10-27 00:05:24 +00:00
|
|
|
if (!ret) {
|
|
|
|
domain->users++;
|
|
|
|
goto out;
|
|
|
|
}
|
2022-10-31 00:59:13 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
/* Allocate a new domain and set it on device pasid. */
|
|
|
|
domain = iommu_sva_domain_alloc(dev, mm);
|
2024-04-18 10:33:59 +00:00
|
|
|
if (IS_ERR(domain)) {
|
|
|
|
ret = PTR_ERR(domain);
|
2023-12-13 11:14:50 +00:00
|
|
|
goto out_free_handle;
|
2022-10-31 00:59:13 +00:00
|
|
|
}
|
|
|
|
|
2024-07-02 06:34:35 +00:00
|
|
|
ret = iommu_attach_device_pasid(domain, dev, iommu_mm->pasid,
|
|
|
|
&handle->handle);
|
2022-10-31 00:59:13 +00:00
|
|
|
if (ret)
|
|
|
|
goto out_free_domain;
|
|
|
|
domain->users = 1;
|
iommu: Support mm PASID 1:n with sva domains
Each mm bound to devices gets a PASID and corresponding sva domains
allocated in iommu_sva_bind_device(), which are referenced by iommu_mm
field of the mm. The PASID is released in __mmdrop(), while a sva domain
is released when no one is using it (the reference count is decremented
in iommu_sva_unbind_device()). However, although sva domains and their
PASID are separate objects such that their own life cycles could be
handled independently, an enqcmd use case may require releasing the
PASID in releasing the mm (i.e., once a PASID is allocated for a mm, it
will be permanently used by the mm and won't be released until the end
of mm) and only allows to drop the PASID after the sva domains are
released. To this end, mmgrab() is called in iommu_sva_domain_alloc() to
increment the mm reference count and mmdrop() is invoked in
iommu_domain_free() to decrement the mm reference count.
Since the required info of PASID and sva domains is kept in struct
iommu_mm_data of a mm, use mm->iommu_mm field instead of the old pasid
field in mm struct. The sva domain list is protected by iommu_sva_lock.
Besides, this patch removes mm_pasid_init(), as with the introduced
iommu_mm structure, initializing mm pasid in mm_init() is unnecessary.
Reviewed-by: Lu Baolu <baolu.lu@linux.intel.com>
Reviewed-by: Vasant Hegde <vasant.hegde@amd.com>
Reviewed-by: Jason Gunthorpe <jgg@nvidia.com>
Tested-by: Nicolin Chen <nicolinc@nvidia.com>
Signed-off-by: Tina Zhang <tina.zhang@intel.com>
Signed-off-by: Jason Gunthorpe <jgg@nvidia.com>
Link: https://lore.kernel.org/r/20231027000525.1278806-6-tina.zhang@intel.com
Signed-off-by: Joerg Roedel <jroedel@suse.de>
2023-10-27 00:05:24 +00:00
|
|
|
list_add(&domain->next, &mm->iommu_mm->sva_domains);
|
|
|
|
|
2022-10-31 00:59:13 +00:00
|
|
|
out:
|
2024-02-27 06:48:21 +00:00
|
|
|
refcount_set(&handle->users, 1);
|
2022-10-31 00:59:13 +00:00
|
|
|
mutex_unlock(&iommu_sva_lock);
|
|
|
|
handle->dev = dev;
|
|
|
|
return handle;
|
|
|
|
|
|
|
|
out_free_domain:
|
|
|
|
iommu_domain_free(domain);
|
2023-12-13 11:14:50 +00:00
|
|
|
out_free_handle:
|
iommu: Support mm PASID 1:n with sva domains
Each mm bound to devices gets a PASID and corresponding sva domains
allocated in iommu_sva_bind_device(), which are referenced by iommu_mm
field of the mm. The PASID is released in __mmdrop(), while a sva domain
is released when no one is using it (the reference count is decremented
in iommu_sva_unbind_device()). However, although sva domains and their
PASID are separate objects such that their own life cycles could be
handled independently, an enqcmd use case may require releasing the
PASID in releasing the mm (i.e., once a PASID is allocated for a mm, it
will be permanently used by the mm and won't be released until the end
of mm) and only allows to drop the PASID after the sva domains are
released. To this end, mmgrab() is called in iommu_sva_domain_alloc() to
increment the mm reference count and mmdrop() is invoked in
iommu_domain_free() to decrement the mm reference count.
Since the required info of PASID and sva domains is kept in struct
iommu_mm_data of a mm, use mm->iommu_mm field instead of the old pasid
field in mm struct. The sva domain list is protected by iommu_sva_lock.
Besides, this patch removes mm_pasid_init(), as with the introduced
iommu_mm structure, initializing mm pasid in mm_init() is unnecessary.
Reviewed-by: Lu Baolu <baolu.lu@linux.intel.com>
Reviewed-by: Vasant Hegde <vasant.hegde@amd.com>
Reviewed-by: Jason Gunthorpe <jgg@nvidia.com>
Tested-by: Nicolin Chen <nicolinc@nvidia.com>
Signed-off-by: Tina Zhang <tina.zhang@intel.com>
Signed-off-by: Jason Gunthorpe <jgg@nvidia.com>
Link: https://lore.kernel.org/r/20231027000525.1278806-6-tina.zhang@intel.com
Signed-off-by: Joerg Roedel <jroedel@suse.de>
2023-10-27 00:05:24 +00:00
|
|
|
kfree(handle);
|
2022-10-31 00:59:13 +00:00
|
|
|
out_unlock:
|
|
|
|
mutex_unlock(&iommu_sva_lock);
|
|
|
|
return ERR_PTR(ret);
|
|
|
|
}
|
|
|
|
EXPORT_SYMBOL_GPL(iommu_sva_bind_device);
|
|
|
|
|
|
|
|
/**
|
|
|
|
* iommu_sva_unbind_device() - Remove a bond created with iommu_sva_bind_device
|
|
|
|
* @handle: the handle returned by iommu_sva_bind_device()
|
|
|
|
*
|
|
|
|
* Put reference to a bond between device and address space. The device should
|
|
|
|
* not be issuing any more transaction for this PASID. All outstanding page
|
|
|
|
* requests for this PASID must have been flushed to the IOMMU.
|
|
|
|
*/
|
|
|
|
void iommu_sva_unbind_device(struct iommu_sva *handle)
|
|
|
|
{
|
2024-07-02 06:34:35 +00:00
|
|
|
struct iommu_domain *domain = handle->handle.domain;
|
iommu: Support mm PASID 1:n with sva domains
Each mm bound to devices gets a PASID and corresponding sva domains
allocated in iommu_sva_bind_device(), which are referenced by iommu_mm
field of the mm. The PASID is released in __mmdrop(), while a sva domain
is released when no one is using it (the reference count is decremented
in iommu_sva_unbind_device()). However, although sva domains and their
PASID are separate objects such that their own life cycles could be
handled independently, an enqcmd use case may require releasing the
PASID in releasing the mm (i.e., once a PASID is allocated for a mm, it
will be permanently used by the mm and won't be released until the end
of mm) and only allows to drop the PASID after the sva domains are
released. To this end, mmgrab() is called in iommu_sva_domain_alloc() to
increment the mm reference count and mmdrop() is invoked in
iommu_domain_free() to decrement the mm reference count.
Since the required info of PASID and sva domains is kept in struct
iommu_mm_data of a mm, use mm->iommu_mm field instead of the old pasid
field in mm struct. The sva domain list is protected by iommu_sva_lock.
Besides, this patch removes mm_pasid_init(), as with the introduced
iommu_mm structure, initializing mm pasid in mm_init() is unnecessary.
Reviewed-by: Lu Baolu <baolu.lu@linux.intel.com>
Reviewed-by: Vasant Hegde <vasant.hegde@amd.com>
Reviewed-by: Jason Gunthorpe <jgg@nvidia.com>
Tested-by: Nicolin Chen <nicolinc@nvidia.com>
Signed-off-by: Tina Zhang <tina.zhang@intel.com>
Signed-off-by: Jason Gunthorpe <jgg@nvidia.com>
Link: https://lore.kernel.org/r/20231027000525.1278806-6-tina.zhang@intel.com
Signed-off-by: Joerg Roedel <jroedel@suse.de>
2023-10-27 00:05:24 +00:00
|
|
|
struct iommu_mm_data *iommu_mm = domain->mm->iommu_mm;
|
2022-10-31 00:59:13 +00:00
|
|
|
struct device *dev = handle->dev;
|
|
|
|
|
|
|
|
mutex_lock(&iommu_sva_lock);
|
2024-02-22 14:07:41 +00:00
|
|
|
if (!refcount_dec_and_test(&handle->users)) {
|
|
|
|
mutex_unlock(&iommu_sva_lock);
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
iommu: Support mm PASID 1:n with sva domains
Each mm bound to devices gets a PASID and corresponding sva domains
allocated in iommu_sva_bind_device(), which are referenced by iommu_mm
field of the mm. The PASID is released in __mmdrop(), while a sva domain
is released when no one is using it (the reference count is decremented
in iommu_sva_unbind_device()). However, although sva domains and their
PASID are separate objects such that their own life cycles could be
handled independently, an enqcmd use case may require releasing the
PASID in releasing the mm (i.e., once a PASID is allocated for a mm, it
will be permanently used by the mm and won't be released until the end
of mm) and only allows to drop the PASID after the sva domains are
released. To this end, mmgrab() is called in iommu_sva_domain_alloc() to
increment the mm reference count and mmdrop() is invoked in
iommu_domain_free() to decrement the mm reference count.
Since the required info of PASID and sva domains is kept in struct
iommu_mm_data of a mm, use mm->iommu_mm field instead of the old pasid
field in mm struct. The sva domain list is protected by iommu_sva_lock.
Besides, this patch removes mm_pasid_init(), as with the introduced
iommu_mm structure, initializing mm pasid in mm_init() is unnecessary.
Reviewed-by: Lu Baolu <baolu.lu@linux.intel.com>
Reviewed-by: Vasant Hegde <vasant.hegde@amd.com>
Reviewed-by: Jason Gunthorpe <jgg@nvidia.com>
Tested-by: Nicolin Chen <nicolinc@nvidia.com>
Signed-off-by: Tina Zhang <tina.zhang@intel.com>
Signed-off-by: Jason Gunthorpe <jgg@nvidia.com>
Link: https://lore.kernel.org/r/20231027000525.1278806-6-tina.zhang@intel.com
Signed-off-by: Joerg Roedel <jroedel@suse.de>
2023-10-27 00:05:24 +00:00
|
|
|
iommu_detach_device_pasid(domain, dev, iommu_mm->pasid);
|
2022-10-31 00:59:13 +00:00
|
|
|
if (--domain->users == 0) {
|
iommu: Support mm PASID 1:n with sva domains
Each mm bound to devices gets a PASID and corresponding sva domains
allocated in iommu_sva_bind_device(), which are referenced by iommu_mm
field of the mm. The PASID is released in __mmdrop(), while a sva domain
is released when no one is using it (the reference count is decremented
in iommu_sva_unbind_device()). However, although sva domains and their
PASID are separate objects such that their own life cycles could be
handled independently, an enqcmd use case may require releasing the
PASID in releasing the mm (i.e., once a PASID is allocated for a mm, it
will be permanently used by the mm and won't be released until the end
of mm) and only allows to drop the PASID after the sva domains are
released. To this end, mmgrab() is called in iommu_sva_domain_alloc() to
increment the mm reference count and mmdrop() is invoked in
iommu_domain_free() to decrement the mm reference count.
Since the required info of PASID and sva domains is kept in struct
iommu_mm_data of a mm, use mm->iommu_mm field instead of the old pasid
field in mm struct. The sva domain list is protected by iommu_sva_lock.
Besides, this patch removes mm_pasid_init(), as with the introduced
iommu_mm structure, initializing mm pasid in mm_init() is unnecessary.
Reviewed-by: Lu Baolu <baolu.lu@linux.intel.com>
Reviewed-by: Vasant Hegde <vasant.hegde@amd.com>
Reviewed-by: Jason Gunthorpe <jgg@nvidia.com>
Tested-by: Nicolin Chen <nicolinc@nvidia.com>
Signed-off-by: Tina Zhang <tina.zhang@intel.com>
Signed-off-by: Jason Gunthorpe <jgg@nvidia.com>
Link: https://lore.kernel.org/r/20231027000525.1278806-6-tina.zhang@intel.com
Signed-off-by: Joerg Roedel <jroedel@suse.de>
2023-10-27 00:05:24 +00:00
|
|
|
list_del(&domain->next);
|
2022-10-31 00:59:13 +00:00
|
|
|
iommu_domain_free(domain);
|
|
|
|
}
|
|
|
|
mutex_unlock(&iommu_sva_lock);
|
|
|
|
kfree(handle);
|
|
|
|
}
|
|
|
|
EXPORT_SYMBOL_GPL(iommu_sva_unbind_device);
|
|
|
|
|
|
|
|
u32 iommu_sva_get_pasid(struct iommu_sva *handle)
|
|
|
|
{
|
2024-07-02 06:34:35 +00:00
|
|
|
struct iommu_domain *domain = handle->handle.domain;
|
2022-10-31 00:59:13 +00:00
|
|
|
|
2023-10-27 00:05:22 +00:00
|
|
|
return mm_get_enqcmd_pasid(domain->mm);
|
2022-10-31 00:59:13 +00:00
|
|
|
}
|
|
|
|
EXPORT_SYMBOL_GPL(iommu_sva_get_pasid);
|
2022-10-31 00:59:15 +00:00
|
|
|
|
2024-02-12 01:22:21 +00:00
|
|
|
void mm_pasid_drop(struct mm_struct *mm)
|
|
|
|
{
|
|
|
|
struct iommu_mm_data *iommu_mm = mm->iommu_mm;
|
|
|
|
|
|
|
|
if (!iommu_mm)
|
|
|
|
return;
|
|
|
|
|
|
|
|
iommu_free_global_pasid(iommu_mm->pasid);
|
|
|
|
kfree(iommu_mm);
|
|
|
|
}
|
|
|
|
|
2022-10-31 00:59:15 +00:00
|
|
|
/*
|
|
|
|
* I/O page fault handler for SVA
|
|
|
|
*/
|
2024-02-12 01:22:21 +00:00
|
|
|
static enum iommu_page_response_code
|
2024-02-12 01:22:20 +00:00
|
|
|
iommu_sva_handle_mm(struct iommu_fault *fault, struct mm_struct *mm)
|
2022-10-31 00:59:15 +00:00
|
|
|
{
|
|
|
|
vm_fault_t ret;
|
|
|
|
struct vm_area_struct *vma;
|
|
|
|
unsigned int access_flags = 0;
|
|
|
|
unsigned int fault_flags = FAULT_FLAG_REMOTE;
|
|
|
|
struct iommu_fault_page_request *prm = &fault->prm;
|
|
|
|
enum iommu_page_response_code status = IOMMU_PAGE_RESP_INVALID;
|
|
|
|
|
|
|
|
if (!(prm->flags & IOMMU_FAULT_PAGE_REQUEST_PASID_VALID))
|
|
|
|
return status;
|
|
|
|
|
|
|
|
if (!mmget_not_zero(mm))
|
|
|
|
return status;
|
|
|
|
|
|
|
|
mmap_read_lock(mm);
|
|
|
|
|
2023-06-24 20:45:51 +00:00
|
|
|
vma = vma_lookup(mm, prm->addr);
|
2022-10-31 00:59:15 +00:00
|
|
|
if (!vma)
|
|
|
|
/* Unmapped area */
|
|
|
|
goto out_put_mm;
|
|
|
|
|
|
|
|
if (prm->perm & IOMMU_FAULT_PERM_READ)
|
|
|
|
access_flags |= VM_READ;
|
|
|
|
|
|
|
|
if (prm->perm & IOMMU_FAULT_PERM_WRITE) {
|
|
|
|
access_flags |= VM_WRITE;
|
|
|
|
fault_flags |= FAULT_FLAG_WRITE;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (prm->perm & IOMMU_FAULT_PERM_EXEC) {
|
|
|
|
access_flags |= VM_EXEC;
|
|
|
|
fault_flags |= FAULT_FLAG_INSTRUCTION;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (!(prm->perm & IOMMU_FAULT_PERM_PRIV))
|
|
|
|
fault_flags |= FAULT_FLAG_USER;
|
|
|
|
|
|
|
|
if (access_flags & ~vma->vm_flags)
|
|
|
|
/* Access fault */
|
|
|
|
goto out_put_mm;
|
|
|
|
|
|
|
|
ret = handle_mm_fault(vma, prm->addr, fault_flags, NULL);
|
|
|
|
status = ret & VM_FAULT_ERROR ? IOMMU_PAGE_RESP_INVALID :
|
|
|
|
IOMMU_PAGE_RESP_SUCCESS;
|
|
|
|
|
|
|
|
out_put_mm:
|
|
|
|
mmap_read_unlock(mm);
|
|
|
|
mmput(mm);
|
|
|
|
|
|
|
|
return status;
|
|
|
|
}
|
2023-03-22 20:07:58 +00:00
|
|
|
|
2024-02-12 01:22:21 +00:00
|
|
|
static void iommu_sva_handle_iopf(struct work_struct *work)
|
2023-03-22 20:07:58 +00:00
|
|
|
{
|
2024-02-12 01:22:21 +00:00
|
|
|
struct iopf_fault *iopf;
|
|
|
|
struct iopf_group *group;
|
|
|
|
enum iommu_page_response_code status = IOMMU_PAGE_RESP_SUCCESS;
|
|
|
|
|
|
|
|
group = container_of(work, struct iopf_group, work);
|
|
|
|
list_for_each_entry(iopf, &group->faults, list) {
|
|
|
|
/*
|
|
|
|
* For the moment, errors are sticky: don't handle subsequent
|
|
|
|
* faults in the group if there is an error.
|
|
|
|
*/
|
|
|
|
if (status != IOMMU_PAGE_RESP_SUCCESS)
|
|
|
|
break;
|
|
|
|
|
iommu: Add attach handle to struct iopf_group
Previously, the domain that a page fault targets is stored in an
iopf_group, which represents a minimal set of page faults. With the
introduction of attach handle, replace the domain with the handle
so that the fault handler can obtain more information as needed
when handling the faults.
iommu_report_device_fault() is currently used for SVA page faults,
which handles the page fault in an internal cycle. The domain is retrieved
with iommu_get_domain_for_dev_pasid() if the pasid in the fault message
is valid. This doesn't work in IOMMUFD case, where if the pasid table of
a device is wholly managed by user space, there is no domain attached to
the PASID of the device, and all page faults are forwarded through a
NESTING domain attaching to RID.
Add a static flag in iommu ops, which indicates if the IOMMU driver
supports user-managed PASID tables. In the iopf deliver path, if no
attach handle found for the iopf PASID, roll back to RID domain when
the IOMMU driver supports this capability.
iommu_get_domain_for_dev_pasid() is no longer used and can be removed.
Signed-off-by: Lu Baolu <baolu.lu@linux.intel.com>
Reviewed-by: Jason Gunthorpe <jgg@nvidia.com>
Reviewed-by: Kevin Tian <kevin.tian@intel.com>
Link: https://lore.kernel.org/r/20240702063444.105814-4-baolu.lu@linux.intel.com
Signed-off-by: Will Deacon <will@kernel.org>
2024-07-02 06:34:37 +00:00
|
|
|
status = iommu_sva_handle_mm(&iopf->fault,
|
|
|
|
group->attach_handle->domain->mm);
|
2024-02-12 01:22:21 +00:00
|
|
|
}
|
iommu: Support mm PASID 1:n with sva domains
Each mm bound to devices gets a PASID and corresponding sva domains
allocated in iommu_sva_bind_device(), which are referenced by iommu_mm
field of the mm. The PASID is released in __mmdrop(), while a sva domain
is released when no one is using it (the reference count is decremented
in iommu_sva_unbind_device()). However, although sva domains and their
PASID are separate objects such that their own life cycles could be
handled independently, an enqcmd use case may require releasing the
PASID in releasing the mm (i.e., once a PASID is allocated for a mm, it
will be permanently used by the mm and won't be released until the end
of mm) and only allows to drop the PASID after the sva domains are
released. To this end, mmgrab() is called in iommu_sva_domain_alloc() to
increment the mm reference count and mmdrop() is invoked in
iommu_domain_free() to decrement the mm reference count.
Since the required info of PASID and sva domains is kept in struct
iommu_mm_data of a mm, use mm->iommu_mm field instead of the old pasid
field in mm struct. The sva domain list is protected by iommu_sva_lock.
Besides, this patch removes mm_pasid_init(), as with the introduced
iommu_mm structure, initializing mm pasid in mm_init() is unnecessary.
Reviewed-by: Lu Baolu <baolu.lu@linux.intel.com>
Reviewed-by: Vasant Hegde <vasant.hegde@amd.com>
Reviewed-by: Jason Gunthorpe <jgg@nvidia.com>
Tested-by: Nicolin Chen <nicolinc@nvidia.com>
Signed-off-by: Tina Zhang <tina.zhang@intel.com>
Signed-off-by: Jason Gunthorpe <jgg@nvidia.com>
Link: https://lore.kernel.org/r/20231027000525.1278806-6-tina.zhang@intel.com
Signed-off-by: Joerg Roedel <jroedel@suse.de>
2023-10-27 00:05:24 +00:00
|
|
|
|
2024-02-12 01:22:21 +00:00
|
|
|
iopf_group_response(group, status);
|
|
|
|
iopf_free_group(group);
|
|
|
|
}
|
2023-03-22 20:08:00 +00:00
|
|
|
|
2024-02-12 01:22:21 +00:00
|
|
|
static int iommu_sva_iopf_handler(struct iopf_group *group)
|
|
|
|
{
|
2024-02-12 01:22:23 +00:00
|
|
|
struct iommu_fault_param *fault_param = group->fault_param;
|
2024-02-12 01:22:21 +00:00
|
|
|
|
|
|
|
INIT_WORK(&group->work, iommu_sva_handle_iopf);
|
|
|
|
if (!queue_work(fault_param->queue->wq, &group->work))
|
|
|
|
return -EBUSY;
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2024-05-28 04:54:58 +00:00
|
|
|
static struct iommu_domain *iommu_sva_domain_alloc(struct device *dev,
|
|
|
|
struct mm_struct *mm)
|
2024-02-12 01:22:21 +00:00
|
|
|
{
|
|
|
|
const struct iommu_ops *ops = dev_iommu_ops(dev);
|
|
|
|
struct iommu_domain *domain;
|
|
|
|
|
2024-04-18 10:33:59 +00:00
|
|
|
if (ops->domain_alloc_sva) {
|
|
|
|
domain = ops->domain_alloc_sva(dev, mm);
|
|
|
|
if (IS_ERR(domain))
|
|
|
|
return domain;
|
|
|
|
} else {
|
|
|
|
domain = ops->domain_alloc(IOMMU_DOMAIN_SVA);
|
|
|
|
if (!domain)
|
|
|
|
return ERR_PTR(-ENOMEM);
|
|
|
|
}
|
2024-02-12 01:22:21 +00:00
|
|
|
|
|
|
|
domain->type = IOMMU_DOMAIN_SVA;
|
|
|
|
mmgrab(mm);
|
|
|
|
domain->mm = mm;
|
|
|
|
domain->owner = ops;
|
|
|
|
domain->iopf_handler = iommu_sva_iopf_handler;
|
|
|
|
|
|
|
|
return domain;
|
2023-03-22 20:07:58 +00:00
|
|
|
}
|