vfio: Change vfio_external_check_extension() to vfio_file_enforced_coherent()

Instead of a general extension check change the function into a limited
test if the iommu_domain has enforced coherency, which is the only thing
kvm needs to query.

Make the new op self contained by properly refcounting the container
before touching it.

Reviewed-by: Christoph Hellwig <hch@lst.de>
Signed-off-by: Jason Gunthorpe <jgg@nvidia.com>
Link: https://lore.kernel.org/r/5-v3-f7729924a7ea+25e33-vfio_kvm_no_group_jgg@nvidia.com
Signed-off-by: Alex Williamson <alex.williamson@redhat.com>
This commit is contained in:
Jason Gunthorpe 2022-05-04 16:14:43 -03:00 committed by Alex Williamson
parent c38ff5b0c3
commit a905ad043f
3 changed files with 36 additions and 13 deletions

View File

@ -1701,11 +1701,35 @@ struct iommu_group *vfio_file_iommu_group(struct file *file)
}
EXPORT_SYMBOL_GPL(vfio_file_iommu_group);
long vfio_external_check_extension(struct vfio_group *group, unsigned long arg)
/**
* vfio_file_enforced_coherent - True if the DMA associated with the VFIO file
* is always CPU cache coherent
* @file: VFIO group file
*
* Enforced coherency means that the IOMMU ignores things like the PCIe no-snoop
* bit in DMA transactions. A return of false indicates that the user has
* rights to access additional instructions such as wbinvd on x86.
*/
bool vfio_file_enforced_coherent(struct file *file)
{
return vfio_ioctl_check_extension(group->container, arg);
struct vfio_group *group = file->private_data;
bool ret;
if (file->f_op != &vfio_group_fops)
return true;
/*
* Since the coherency state is determined only once a container is
* attached the user must do so before they can prove they have
* permission.
*/
if (vfio_group_add_container_user(group))
return true;
ret = vfio_ioctl_check_extension(group->container, VFIO_DMA_CC_IOMMU);
vfio_group_try_dissolve_container(group);
return ret;
}
EXPORT_SYMBOL_GPL(vfio_external_check_extension);
EXPORT_SYMBOL_GPL(vfio_file_enforced_coherent);
/*
* Sub-module support

View File

@ -139,8 +139,7 @@ int vfio_mig_get_next_state(struct vfio_device *device,
extern struct vfio_group *vfio_group_get_external_user(struct file *filep);
extern void vfio_group_put_external_user(struct vfio_group *group);
extern struct iommu_group *vfio_file_iommu_group(struct file *file);
extern long vfio_external_check_extension(struct vfio_group *group,
unsigned long arg);
extern bool vfio_file_enforced_coherent(struct file *file);
#define VFIO_PIN_PAGES_MAX_ENTRIES (PAGE_SIZE/sizeof(unsigned long))

View File

@ -75,20 +75,20 @@ static void kvm_vfio_group_set_kvm(struct vfio_group *group, struct kvm *kvm)
symbol_put(vfio_group_set_kvm);
}
static bool kvm_vfio_group_is_coherent(struct vfio_group *vfio_group)
static bool kvm_vfio_file_enforced_coherent(struct file *file)
{
long (*fn)(struct vfio_group *, unsigned long);
long ret;
bool (*fn)(struct file *file);
bool ret;
fn = symbol_get(vfio_external_check_extension);
fn = symbol_get(vfio_file_enforced_coherent);
if (!fn)
return false;
ret = fn(vfio_group, VFIO_DMA_CC_IOMMU);
ret = fn(file);
symbol_put(vfio_external_check_extension);
symbol_put(vfio_file_enforced_coherent);
return ret > 0;
return ret;
}
#ifdef CONFIG_SPAPR_TCE_IOMMU
@ -136,7 +136,7 @@ static void kvm_vfio_update_coherency(struct kvm_device *dev)
mutex_lock(&kv->lock);
list_for_each_entry(kvg, &kv->group_list, node) {
if (!kvm_vfio_group_is_coherent(kvg->vfio_group)) {
if (!kvm_vfio_file_enforced_coherent(kvg->file)) {
noncoherent = true;
break;
}