mirror of
https://github.com/torvalds/linux.git
synced 2024-11-17 01:22:07 +00:00
VFIO updates for v5.8-rc1
- Block accesses to disabled MMIO space (Alex Williamson) - VFIO device migration API (Kirti Wankhede) - type1 IOMMU dirty bitmap API and implementation (Kirti Wankhede) - PCI NULL capability masking (Alex Williamson) - Memory leak fixes (Qian Cai) - Reference leak fix (Qiushi Wu) -----BEGIN PGP SIGNATURE----- Version: GnuPG v2.0.14 (GNU/Linux) iQIcBAABAgAGBQJe19RZAAoJECObm247sIsiBTEP/1DsA/mL/eDR94Hkmz3y1qoX tkrIJuM5xo95uk6DsbXDikbjM2jpUmvDUiRilara25IceuVX/tKgTjgxNOWJd+h8 KHBoPM0bveILp+Hp6r6ZAEfhZLQ3P6A5XBTB6ql0ebveadyt13TP9Cr0jCf4Jxr2 2A7wZAexWN8P4YBXBqrW8X3ZF3V9rfWYOJ4eILRvEplJ/TkqsGJyKW8LKP45waUF wutN6yh7irnWQbbUO6hb/eTIO7usrNB2L4A5CJMSZIm8JZYYmFQOrn73Ry3bA3Pg U+v3xIRU5U1ed/1DeLs8lRDDxImmkSXbVga0N+e3qnKtDIBh7/i9yNT9hQjhG9Ba C3IQoOM0Fj03hK/UahNerN7FKYoj+JvtdSKeTSNFXwdp/JUSXjj1tLouxUBSFsgD P15CMPXF/U6rT3nQ7/3z3JFKft/iSf6ShjXeNJFqhwH+lKsj13kF2+py3+vgayl8 zLOqeLdRkQwXz73UStX+hrc7MmNYZVRxJ0uf3XXq3/CZ1gwiinIQqxDF3ry+pQ6w vXB8j5XJqsOguoAU7trTdvkKpfoJd4yY0D6aNN9F3JN8xkEBbfm6xZ7Hzur2JI6r iCxUHcS4XrXA/PRlTFupg8451CefuPJa2UkAWSdk182hO2R41l458Q6b7lIMqhOQ oP04S8VYvvDIw5JVnPWU =KSAE -----END PGP SIGNATURE----- Merge tag 'vfio-v5.8-rc1' of git://github.com/awilliam/linux-vfio Pull VFIO updates from Alex Williamson: - Block accesses to disabled MMIO space (Alex Williamson) - VFIO device migration API (Kirti Wankhede) - type1 IOMMU dirty bitmap API and implementation (Kirti Wankhede) - PCI NULL capability masking (Alex Williamson) - Memory leak fixes (Qian Cai) - Reference leak fix (Qiushi Wu) * tag 'vfio-v5.8-rc1' of git://github.com/awilliam/linux-vfio: vfio iommu: typecast corrections vfio iommu: Use shift operation for 64-bit integer division vfio/mdev: Fix reference count leak in add_mdev_supported_type vfio: Selective dirty page tracking if IOMMU backed device pins pages vfio iommu: Add migration capability to report supported features vfio iommu: Update UNMAP_DMA ioctl to get dirty bitmap before unmap vfio iommu: Implementation of ioctl for dirty pages tracking vfio iommu: Add ioctl definition for dirty pages tracking vfio iommu: Cache pgsize_bitmap in struct vfio_iommu vfio iommu: Remove atomicity of ref_count of pinned pages vfio: UAPI for migration interface for device state vfio/pci: fix memory leaks of eventfd ctx vfio/pci: fix memory leaks in alloc_perm_bits() vfio-pci: Mask cap zero vfio-pci: Invalidate mmaps and block MMIO access on disabled memory vfio-pci: Fault mmaps to enable vma tracking vfio/type1: Support faulting PFNMAP vmas
This commit is contained in:
commit
5a36f0f3f5
@ -110,7 +110,7 @@ static struct mdev_type *add_mdev_supported_type(struct mdev_parent *parent,
|
||||
"%s-%s", dev_driver_string(parent->dev),
|
||||
group->name);
|
||||
if (ret) {
|
||||
kfree(type);
|
||||
kobject_put(&type->kobj);
|
||||
return ERR_PTR(ret);
|
||||
}
|
||||
|
||||
|
@ -26,6 +26,7 @@
|
||||
#include <linux/vfio.h>
|
||||
#include <linux/vgaarb.h>
|
||||
#include <linux/nospec.h>
|
||||
#include <linux/sched/mm.h>
|
||||
|
||||
#include "vfio_pci_private.h"
|
||||
|
||||
@ -184,6 +185,7 @@ no_mmap:
|
||||
|
||||
static void vfio_pci_try_bus_reset(struct vfio_pci_device *vdev);
|
||||
static void vfio_pci_disable(struct vfio_pci_device *vdev);
|
||||
static int vfio_pci_try_zap_and_vma_lock_cb(struct pci_dev *pdev, void *data);
|
||||
|
||||
/*
|
||||
* INTx masking requires the ability to disable INTx signaling via PCI_COMMAND
|
||||
@ -519,6 +521,10 @@ static void vfio_pci_release(void *device_data)
|
||||
vfio_pci_vf_token_user_add(vdev, -1);
|
||||
vfio_spapr_pci_eeh_release(vdev->pdev);
|
||||
vfio_pci_disable(vdev);
|
||||
if (vdev->err_trigger)
|
||||
eventfd_ctx_put(vdev->err_trigger);
|
||||
if (vdev->req_trigger)
|
||||
eventfd_ctx_put(vdev->req_trigger);
|
||||
}
|
||||
|
||||
mutex_unlock(&vdev->reflck->lock);
|
||||
@ -736,6 +742,12 @@ int vfio_pci_register_dev_region(struct vfio_pci_device *vdev,
|
||||
return 0;
|
||||
}
|
||||
|
||||
struct vfio_devices {
|
||||
struct vfio_device **devices;
|
||||
int cur_index;
|
||||
int max_index;
|
||||
};
|
||||
|
||||
static long vfio_pci_ioctl(void *device_data,
|
||||
unsigned int cmd, unsigned long arg)
|
||||
{
|
||||
@ -809,7 +821,7 @@ static long vfio_pci_ioctl(void *device_data,
|
||||
{
|
||||
void __iomem *io;
|
||||
size_t size;
|
||||
u16 orig_cmd;
|
||||
u16 cmd;
|
||||
|
||||
info.offset = VFIO_PCI_INDEX_TO_OFFSET(info.index);
|
||||
info.flags = 0;
|
||||
@ -829,10 +841,7 @@ static long vfio_pci_ioctl(void *device_data,
|
||||
* Is it really there? Enable memory decode for
|
||||
* implicit access in pci_map_rom().
|
||||
*/
|
||||
pci_read_config_word(pdev, PCI_COMMAND, &orig_cmd);
|
||||
pci_write_config_word(pdev, PCI_COMMAND,
|
||||
orig_cmd | PCI_COMMAND_MEMORY);
|
||||
|
||||
cmd = vfio_pci_memory_lock_and_enable(vdev);
|
||||
io = pci_map_rom(pdev, &size);
|
||||
if (io) {
|
||||
info.flags = VFIO_REGION_INFO_FLAG_READ;
|
||||
@ -840,8 +849,8 @@ static long vfio_pci_ioctl(void *device_data,
|
||||
} else {
|
||||
info.size = 0;
|
||||
}
|
||||
vfio_pci_memory_unlock_and_restore(vdev, cmd);
|
||||
|
||||
pci_write_config_word(pdev, PCI_COMMAND, orig_cmd);
|
||||
break;
|
||||
}
|
||||
case VFIO_PCI_VGA_REGION_INDEX:
|
||||
@ -984,8 +993,16 @@ static long vfio_pci_ioctl(void *device_data,
|
||||
return ret;
|
||||
|
||||
} else if (cmd == VFIO_DEVICE_RESET) {
|
||||
return vdev->reset_works ?
|
||||
pci_try_reset_function(vdev->pdev) : -EINVAL;
|
||||
int ret;
|
||||
|
||||
if (!vdev->reset_works)
|
||||
return -EINVAL;
|
||||
|
||||
vfio_pci_zap_and_down_write_memory_lock(vdev);
|
||||
ret = pci_try_reset_function(vdev->pdev);
|
||||
up_write(&vdev->memory_lock);
|
||||
|
||||
return ret;
|
||||
|
||||
} else if (cmd == VFIO_DEVICE_GET_PCI_HOT_RESET_INFO) {
|
||||
struct vfio_pci_hot_reset_info hdr;
|
||||
@ -1065,8 +1082,9 @@ reset_info_exit:
|
||||
int32_t *group_fds;
|
||||
struct vfio_pci_group_entry *groups;
|
||||
struct vfio_pci_group_info info;
|
||||
struct vfio_devices devs = { .cur_index = 0 };
|
||||
bool slot = false;
|
||||
int i, count = 0, ret = 0;
|
||||
int i, group_idx, mem_idx = 0, count = 0, ret = 0;
|
||||
|
||||
minsz = offsetofend(struct vfio_pci_hot_reset, count);
|
||||
|
||||
@ -1118,9 +1136,9 @@ reset_info_exit:
|
||||
* user interface and store the group and iommu ID. This
|
||||
* ensures the group is held across the reset.
|
||||
*/
|
||||
for (i = 0; i < hdr.count; i++) {
|
||||
for (group_idx = 0; group_idx < hdr.count; group_idx++) {
|
||||
struct vfio_group *group;
|
||||
struct fd f = fdget(group_fds[i]);
|
||||
struct fd f = fdget(group_fds[group_idx]);
|
||||
if (!f.file) {
|
||||
ret = -EBADF;
|
||||
break;
|
||||
@ -1133,8 +1151,9 @@ reset_info_exit:
|
||||
break;
|
||||
}
|
||||
|
||||
groups[i].group = group;
|
||||
groups[i].id = vfio_external_user_iommu_id(group);
|
||||
groups[group_idx].group = group;
|
||||
groups[group_idx].id =
|
||||
vfio_external_user_iommu_id(group);
|
||||
}
|
||||
|
||||
kfree(group_fds);
|
||||
@ -1153,13 +1172,63 @@ reset_info_exit:
|
||||
ret = vfio_pci_for_each_slot_or_bus(vdev->pdev,
|
||||
vfio_pci_validate_devs,
|
||||
&info, slot);
|
||||
if (!ret)
|
||||
/* User has access, do the reset */
|
||||
ret = pci_reset_bus(vdev->pdev);
|
||||
if (ret)
|
||||
goto hot_reset_release;
|
||||
|
||||
devs.max_index = count;
|
||||
devs.devices = kcalloc(count, sizeof(struct vfio_device *),
|
||||
GFP_KERNEL);
|
||||
if (!devs.devices) {
|
||||
ret = -ENOMEM;
|
||||
goto hot_reset_release;
|
||||
}
|
||||
|
||||
/*
|
||||
* We need to get memory_lock for each device, but devices
|
||||
* can share mmap_sem, therefore we need to zap and hold
|
||||
* the vma_lock for each device, and only then get each
|
||||
* memory_lock.
|
||||
*/
|
||||
ret = vfio_pci_for_each_slot_or_bus(vdev->pdev,
|
||||
vfio_pci_try_zap_and_vma_lock_cb,
|
||||
&devs, slot);
|
||||
if (ret)
|
||||
goto hot_reset_release;
|
||||
|
||||
for (; mem_idx < devs.cur_index; mem_idx++) {
|
||||
struct vfio_pci_device *tmp;
|
||||
|
||||
tmp = vfio_device_data(devs.devices[mem_idx]);
|
||||
|
||||
ret = down_write_trylock(&tmp->memory_lock);
|
||||
if (!ret) {
|
||||
ret = -EBUSY;
|
||||
goto hot_reset_release;
|
||||
}
|
||||
mutex_unlock(&tmp->vma_lock);
|
||||
}
|
||||
|
||||
/* User has access, do the reset */
|
||||
ret = pci_reset_bus(vdev->pdev);
|
||||
|
||||
hot_reset_release:
|
||||
for (i--; i >= 0; i--)
|
||||
vfio_group_put_external_user(groups[i].group);
|
||||
for (i = 0; i < devs.cur_index; i++) {
|
||||
struct vfio_device *device;
|
||||
struct vfio_pci_device *tmp;
|
||||
|
||||
device = devs.devices[i];
|
||||
tmp = vfio_device_data(device);
|
||||
|
||||
if (i < mem_idx)
|
||||
up_write(&tmp->memory_lock);
|
||||
else
|
||||
mutex_unlock(&tmp->vma_lock);
|
||||
vfio_device_put(device);
|
||||
}
|
||||
kfree(devs.devices);
|
||||
|
||||
for (group_idx--; group_idx >= 0; group_idx--)
|
||||
vfio_group_put_external_user(groups[group_idx].group);
|
||||
|
||||
kfree(groups);
|
||||
return ret;
|
||||
@ -1299,6 +1368,202 @@ static ssize_t vfio_pci_write(void *device_data, const char __user *buf,
|
||||
return vfio_pci_rw(device_data, (char __user *)buf, count, ppos, true);
|
||||
}
|
||||
|
||||
/* Return 1 on zap and vma_lock acquired, 0 on contention (only with @try) */
|
||||
static int vfio_pci_zap_and_vma_lock(struct vfio_pci_device *vdev, bool try)
|
||||
{
|
||||
struct vfio_pci_mmap_vma *mmap_vma, *tmp;
|
||||
|
||||
/*
|
||||
* Lock ordering:
|
||||
* vma_lock is nested under mmap_sem for vm_ops callback paths.
|
||||
* The memory_lock semaphore is used by both code paths calling
|
||||
* into this function to zap vmas and the vm_ops.fault callback
|
||||
* to protect the memory enable state of the device.
|
||||
*
|
||||
* When zapping vmas we need to maintain the mmap_sem => vma_lock
|
||||
* ordering, which requires using vma_lock to walk vma_list to
|
||||
* acquire an mm, then dropping vma_lock to get the mmap_sem and
|
||||
* reacquiring vma_lock. This logic is derived from similar
|
||||
* requirements in uverbs_user_mmap_disassociate().
|
||||
*
|
||||
* mmap_sem must always be the top-level lock when it is taken.
|
||||
* Therefore we can only hold the memory_lock write lock when
|
||||
* vma_list is empty, as we'd need to take mmap_sem to clear
|
||||
* entries. vma_list can only be guaranteed empty when holding
|
||||
* vma_lock, thus memory_lock is nested under vma_lock.
|
||||
*
|
||||
* This enables the vm_ops.fault callback to acquire vma_lock,
|
||||
* followed by memory_lock read lock, while already holding
|
||||
* mmap_sem without risk of deadlock.
|
||||
*/
|
||||
while (1) {
|
||||
struct mm_struct *mm = NULL;
|
||||
|
||||
if (try) {
|
||||
if (!mutex_trylock(&vdev->vma_lock))
|
||||
return 0;
|
||||
} else {
|
||||
mutex_lock(&vdev->vma_lock);
|
||||
}
|
||||
while (!list_empty(&vdev->vma_list)) {
|
||||
mmap_vma = list_first_entry(&vdev->vma_list,
|
||||
struct vfio_pci_mmap_vma,
|
||||
vma_next);
|
||||
mm = mmap_vma->vma->vm_mm;
|
||||
if (mmget_not_zero(mm))
|
||||
break;
|
||||
|
||||
list_del(&mmap_vma->vma_next);
|
||||
kfree(mmap_vma);
|
||||
mm = NULL;
|
||||
}
|
||||
if (!mm)
|
||||
return 1;
|
||||
mutex_unlock(&vdev->vma_lock);
|
||||
|
||||
if (try) {
|
||||
if (!down_read_trylock(&mm->mmap_sem)) {
|
||||
mmput(mm);
|
||||
return 0;
|
||||
}
|
||||
} else {
|
||||
down_read(&mm->mmap_sem);
|
||||
}
|
||||
if (mmget_still_valid(mm)) {
|
||||
if (try) {
|
||||
if (!mutex_trylock(&vdev->vma_lock)) {
|
||||
up_read(&mm->mmap_sem);
|
||||
mmput(mm);
|
||||
return 0;
|
||||
}
|
||||
} else {
|
||||
mutex_lock(&vdev->vma_lock);
|
||||
}
|
||||
list_for_each_entry_safe(mmap_vma, tmp,
|
||||
&vdev->vma_list, vma_next) {
|
||||
struct vm_area_struct *vma = mmap_vma->vma;
|
||||
|
||||
if (vma->vm_mm != mm)
|
||||
continue;
|
||||
|
||||
list_del(&mmap_vma->vma_next);
|
||||
kfree(mmap_vma);
|
||||
|
||||
zap_vma_ptes(vma, vma->vm_start,
|
||||
vma->vm_end - vma->vm_start);
|
||||
}
|
||||
mutex_unlock(&vdev->vma_lock);
|
||||
}
|
||||
up_read(&mm->mmap_sem);
|
||||
mmput(mm);
|
||||
}
|
||||
}
|
||||
|
||||
void vfio_pci_zap_and_down_write_memory_lock(struct vfio_pci_device *vdev)
|
||||
{
|
||||
vfio_pci_zap_and_vma_lock(vdev, false);
|
||||
down_write(&vdev->memory_lock);
|
||||
mutex_unlock(&vdev->vma_lock);
|
||||
}
|
||||
|
||||
u16 vfio_pci_memory_lock_and_enable(struct vfio_pci_device *vdev)
|
||||
{
|
||||
u16 cmd;
|
||||
|
||||
down_write(&vdev->memory_lock);
|
||||
pci_read_config_word(vdev->pdev, PCI_COMMAND, &cmd);
|
||||
if (!(cmd & PCI_COMMAND_MEMORY))
|
||||
pci_write_config_word(vdev->pdev, PCI_COMMAND,
|
||||
cmd | PCI_COMMAND_MEMORY);
|
||||
|
||||
return cmd;
|
||||
}
|
||||
|
||||
void vfio_pci_memory_unlock_and_restore(struct vfio_pci_device *vdev, u16 cmd)
|
||||
{
|
||||
pci_write_config_word(vdev->pdev, PCI_COMMAND, cmd);
|
||||
up_write(&vdev->memory_lock);
|
||||
}
|
||||
|
||||
/* Caller holds vma_lock */
|
||||
static int __vfio_pci_add_vma(struct vfio_pci_device *vdev,
|
||||
struct vm_area_struct *vma)
|
||||
{
|
||||
struct vfio_pci_mmap_vma *mmap_vma;
|
||||
|
||||
mmap_vma = kmalloc(sizeof(*mmap_vma), GFP_KERNEL);
|
||||
if (!mmap_vma)
|
||||
return -ENOMEM;
|
||||
|
||||
mmap_vma->vma = vma;
|
||||
list_add(&mmap_vma->vma_next, &vdev->vma_list);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
/*
|
||||
* Zap mmaps on open so that we can fault them in on access and therefore
|
||||
* our vma_list only tracks mappings accessed since last zap.
|
||||
*/
|
||||
static void vfio_pci_mmap_open(struct vm_area_struct *vma)
|
||||
{
|
||||
zap_vma_ptes(vma, vma->vm_start, vma->vm_end - vma->vm_start);
|
||||
}
|
||||
|
||||
static void vfio_pci_mmap_close(struct vm_area_struct *vma)
|
||||
{
|
||||
struct vfio_pci_device *vdev = vma->vm_private_data;
|
||||
struct vfio_pci_mmap_vma *mmap_vma;
|
||||
|
||||
mutex_lock(&vdev->vma_lock);
|
||||
list_for_each_entry(mmap_vma, &vdev->vma_list, vma_next) {
|
||||
if (mmap_vma->vma == vma) {
|
||||
list_del(&mmap_vma->vma_next);
|
||||
kfree(mmap_vma);
|
||||
break;
|
||||
}
|
||||
}
|
||||
mutex_unlock(&vdev->vma_lock);
|
||||
}
|
||||
|
||||
static vm_fault_t vfio_pci_mmap_fault(struct vm_fault *vmf)
|
||||
{
|
||||
struct vm_area_struct *vma = vmf->vma;
|
||||
struct vfio_pci_device *vdev = vma->vm_private_data;
|
||||
vm_fault_t ret = VM_FAULT_NOPAGE;
|
||||
|
||||
mutex_lock(&vdev->vma_lock);
|
||||
down_read(&vdev->memory_lock);
|
||||
|
||||
if (!__vfio_pci_memory_enabled(vdev)) {
|
||||
ret = VM_FAULT_SIGBUS;
|
||||
mutex_unlock(&vdev->vma_lock);
|
||||
goto up_out;
|
||||
}
|
||||
|
||||
if (__vfio_pci_add_vma(vdev, vma)) {
|
||||
ret = VM_FAULT_OOM;
|
||||
mutex_unlock(&vdev->vma_lock);
|
||||
goto up_out;
|
||||
}
|
||||
|
||||
mutex_unlock(&vdev->vma_lock);
|
||||
|
||||
if (remap_pfn_range(vma, vma->vm_start, vma->vm_pgoff,
|
||||
vma->vm_end - vma->vm_start, vma->vm_page_prot))
|
||||
ret = VM_FAULT_SIGBUS;
|
||||
|
||||
up_out:
|
||||
up_read(&vdev->memory_lock);
|
||||
return ret;
|
||||
}
|
||||
|
||||
static const struct vm_operations_struct vfio_pci_mmap_ops = {
|
||||
.open = vfio_pci_mmap_open,
|
||||
.close = vfio_pci_mmap_close,
|
||||
.fault = vfio_pci_mmap_fault,
|
||||
};
|
||||
|
||||
static int vfio_pci_mmap(void *device_data, struct vm_area_struct *vma)
|
||||
{
|
||||
struct vfio_pci_device *vdev = device_data;
|
||||
@ -1357,8 +1622,14 @@ static int vfio_pci_mmap(void *device_data, struct vm_area_struct *vma)
|
||||
vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
|
||||
vma->vm_pgoff = (pci_resource_start(pdev, index) >> PAGE_SHIFT) + pgoff;
|
||||
|
||||
return remap_pfn_range(vma, vma->vm_start, vma->vm_pgoff,
|
||||
req_len, vma->vm_page_prot);
|
||||
/*
|
||||
* See remap_pfn_range(), called from vfio_pci_fault() but we can't
|
||||
* change vm_flags within the fault handler. Set them now.
|
||||
*/
|
||||
vma->vm_flags |= VM_IO | VM_PFNMAP | VM_DONTEXPAND | VM_DONTDUMP;
|
||||
vma->vm_ops = &vfio_pci_mmap_ops;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void vfio_pci_request(void *device_data, unsigned int count)
|
||||
@ -1608,6 +1879,9 @@ static int vfio_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
|
||||
spin_lock_init(&vdev->irqlock);
|
||||
mutex_init(&vdev->ioeventfds_lock);
|
||||
INIT_LIST_HEAD(&vdev->ioeventfds_list);
|
||||
mutex_init(&vdev->vma_lock);
|
||||
INIT_LIST_HEAD(&vdev->vma_list);
|
||||
init_rwsem(&vdev->memory_lock);
|
||||
|
||||
ret = vfio_add_group_dev(&pdev->dev, &vfio_pci_ops, vdev);
|
||||
if (ret)
|
||||
@ -1861,12 +2135,6 @@ static void vfio_pci_reflck_put(struct vfio_pci_reflck *reflck)
|
||||
kref_put_mutex(&reflck->kref, vfio_pci_reflck_release, &reflck_lock);
|
||||
}
|
||||
|
||||
struct vfio_devices {
|
||||
struct vfio_device **devices;
|
||||
int cur_index;
|
||||
int max_index;
|
||||
};
|
||||
|
||||
static int vfio_pci_get_unused_devs(struct pci_dev *pdev, void *data)
|
||||
{
|
||||
struct vfio_devices *devs = data;
|
||||
@ -1897,6 +2165,39 @@ static int vfio_pci_get_unused_devs(struct pci_dev *pdev, void *data)
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int vfio_pci_try_zap_and_vma_lock_cb(struct pci_dev *pdev, void *data)
|
||||
{
|
||||
struct vfio_devices *devs = data;
|
||||
struct vfio_device *device;
|
||||
struct vfio_pci_device *vdev;
|
||||
|
||||
if (devs->cur_index == devs->max_index)
|
||||
return -ENOSPC;
|
||||
|
||||
device = vfio_device_get_from_dev(&pdev->dev);
|
||||
if (!device)
|
||||
return -EINVAL;
|
||||
|
||||
if (pci_dev_driver(pdev) != &vfio_pci_driver) {
|
||||
vfio_device_put(device);
|
||||
return -EBUSY;
|
||||
}
|
||||
|
||||
vdev = vfio_device_data(device);
|
||||
|
||||
/*
|
||||
* Locking multiple devices is prone to deadlock, runaway and
|
||||
* unwind if we hit contention.
|
||||
*/
|
||||
if (!vfio_pci_zap_and_vma_lock(vdev, true)) {
|
||||
vfio_device_put(device);
|
||||
return -EBUSY;
|
||||
}
|
||||
|
||||
devs->devices[devs->cur_index++] = device;
|
||||
return 0;
|
||||
}
|
||||
|
||||
/*
|
||||
* If a bus or slot reset is available for the provided device and:
|
||||
* - All of the devices affected by that bus or slot reset are unused
|
||||
|
@ -395,6 +395,14 @@ static inline void p_setd(struct perm_bits *p, int off, u32 virt, u32 write)
|
||||
*(__le32 *)(&p->write[off]) = cpu_to_le32(write);
|
||||
}
|
||||
|
||||
/* Caller should hold memory_lock semaphore */
|
||||
bool __vfio_pci_memory_enabled(struct vfio_pci_device *vdev)
|
||||
{
|
||||
u16 cmd = le16_to_cpu(*(__le16 *)&vdev->vconfig[PCI_COMMAND]);
|
||||
|
||||
return cmd & PCI_COMMAND_MEMORY;
|
||||
}
|
||||
|
||||
/*
|
||||
* Restore the *real* BARs after we detect a FLR or backdoor reset.
|
||||
* (backdoor = some device specific technique that we didn't catch)
|
||||
@ -556,13 +564,18 @@ static int vfio_basic_config_write(struct vfio_pci_device *vdev, int pos,
|
||||
|
||||
new_cmd = le32_to_cpu(val);
|
||||
|
||||
phys_io = !!(phys_cmd & PCI_COMMAND_IO);
|
||||
virt_io = !!(le16_to_cpu(*virt_cmd) & PCI_COMMAND_IO);
|
||||
new_io = !!(new_cmd & PCI_COMMAND_IO);
|
||||
|
||||
phys_mem = !!(phys_cmd & PCI_COMMAND_MEMORY);
|
||||
virt_mem = !!(le16_to_cpu(*virt_cmd) & PCI_COMMAND_MEMORY);
|
||||
new_mem = !!(new_cmd & PCI_COMMAND_MEMORY);
|
||||
|
||||
phys_io = !!(phys_cmd & PCI_COMMAND_IO);
|
||||
virt_io = !!(le16_to_cpu(*virt_cmd) & PCI_COMMAND_IO);
|
||||
new_io = !!(new_cmd & PCI_COMMAND_IO);
|
||||
if (!new_mem)
|
||||
vfio_pci_zap_and_down_write_memory_lock(vdev);
|
||||
else
|
||||
down_write(&vdev->memory_lock);
|
||||
|
||||
/*
|
||||
* If the user is writing mem/io enable (new_mem/io) and we
|
||||
@ -579,8 +592,11 @@ static int vfio_basic_config_write(struct vfio_pci_device *vdev, int pos,
|
||||
}
|
||||
|
||||
count = vfio_default_config_write(vdev, pos, count, perm, offset, val);
|
||||
if (count < 0)
|
||||
if (count < 0) {
|
||||
if (offset == PCI_COMMAND)
|
||||
up_write(&vdev->memory_lock);
|
||||
return count;
|
||||
}
|
||||
|
||||
/*
|
||||
* Save current memory/io enable bits in vconfig to allow for
|
||||
@ -591,6 +607,8 @@ static int vfio_basic_config_write(struct vfio_pci_device *vdev, int pos,
|
||||
|
||||
*virt_cmd &= cpu_to_le16(~mask);
|
||||
*virt_cmd |= cpu_to_le16(new_cmd & mask);
|
||||
|
||||
up_write(&vdev->memory_lock);
|
||||
}
|
||||
|
||||
/* Emulate INTx disable */
|
||||
@ -828,8 +846,11 @@ static int vfio_exp_config_write(struct vfio_pci_device *vdev, int pos,
|
||||
pos - offset + PCI_EXP_DEVCAP,
|
||||
&cap);
|
||||
|
||||
if (!ret && (cap & PCI_EXP_DEVCAP_FLR))
|
||||
if (!ret && (cap & PCI_EXP_DEVCAP_FLR)) {
|
||||
vfio_pci_zap_and_down_write_memory_lock(vdev);
|
||||
pci_try_reset_function(vdev->pdev);
|
||||
up_write(&vdev->memory_lock);
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
@ -907,8 +928,11 @@ static int vfio_af_config_write(struct vfio_pci_device *vdev, int pos,
|
||||
pos - offset + PCI_AF_CAP,
|
||||
&cap);
|
||||
|
||||
if (!ret && (cap & PCI_AF_CAP_FLR) && (cap & PCI_AF_CAP_TP))
|
||||
if (!ret && (cap & PCI_AF_CAP_FLR) && (cap & PCI_AF_CAP_TP)) {
|
||||
vfio_pci_zap_and_down_write_memory_lock(vdev);
|
||||
pci_try_reset_function(vdev->pdev);
|
||||
up_write(&vdev->memory_lock);
|
||||
}
|
||||
}
|
||||
|
||||
return count;
|
||||
@ -1462,7 +1486,12 @@ static int vfio_cap_init(struct vfio_pci_device *vdev)
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
if (cap <= PCI_CAP_ID_MAX) {
|
||||
/*
|
||||
* ID 0 is a NULL capability, conflicting with our fake
|
||||
* PCI_CAP_ID_BASIC. As it has no content, consider it
|
||||
* hidden for now.
|
||||
*/
|
||||
if (cap && cap <= PCI_CAP_ID_MAX) {
|
||||
len = pci_cap_length[cap];
|
||||
if (len == 0xFF) { /* Variable length */
|
||||
len = vfio_cap_len(vdev, cap, pos);
|
||||
@ -1728,8 +1757,11 @@ void vfio_config_free(struct vfio_pci_device *vdev)
|
||||
vdev->vconfig = NULL;
|
||||
kfree(vdev->pci_config_map);
|
||||
vdev->pci_config_map = NULL;
|
||||
kfree(vdev->msi_perm);
|
||||
vdev->msi_perm = NULL;
|
||||
if (vdev->msi_perm) {
|
||||
free_perm_bits(vdev->msi_perm);
|
||||
kfree(vdev->msi_perm);
|
||||
vdev->msi_perm = NULL;
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
|
@ -249,6 +249,7 @@ static int vfio_msi_enable(struct vfio_pci_device *vdev, int nvec, bool msix)
|
||||
struct pci_dev *pdev = vdev->pdev;
|
||||
unsigned int flag = msix ? PCI_IRQ_MSIX : PCI_IRQ_MSI;
|
||||
int ret;
|
||||
u16 cmd;
|
||||
|
||||
if (!is_irq_none(vdev))
|
||||
return -EINVAL;
|
||||
@ -258,13 +259,16 @@ static int vfio_msi_enable(struct vfio_pci_device *vdev, int nvec, bool msix)
|
||||
return -ENOMEM;
|
||||
|
||||
/* return the number of supported vectors if we can't get all: */
|
||||
cmd = vfio_pci_memory_lock_and_enable(vdev);
|
||||
ret = pci_alloc_irq_vectors(pdev, 1, nvec, flag);
|
||||
if (ret < nvec) {
|
||||
if (ret > 0)
|
||||
pci_free_irq_vectors(pdev);
|
||||
vfio_pci_memory_unlock_and_restore(vdev, cmd);
|
||||
kfree(vdev->ctx);
|
||||
return ret;
|
||||
}
|
||||
vfio_pci_memory_unlock_and_restore(vdev, cmd);
|
||||
|
||||
vdev->num_ctx = nvec;
|
||||
vdev->irq_type = msix ? VFIO_PCI_MSIX_IRQ_INDEX :
|
||||
@ -287,6 +291,7 @@ static int vfio_msi_set_vector_signal(struct vfio_pci_device *vdev,
|
||||
struct pci_dev *pdev = vdev->pdev;
|
||||
struct eventfd_ctx *trigger;
|
||||
int irq, ret;
|
||||
u16 cmd;
|
||||
|
||||
if (vector < 0 || vector >= vdev->num_ctx)
|
||||
return -EINVAL;
|
||||
@ -295,7 +300,11 @@ static int vfio_msi_set_vector_signal(struct vfio_pci_device *vdev,
|
||||
|
||||
if (vdev->ctx[vector].trigger) {
|
||||
irq_bypass_unregister_producer(&vdev->ctx[vector].producer);
|
||||
|
||||
cmd = vfio_pci_memory_lock_and_enable(vdev);
|
||||
free_irq(irq, vdev->ctx[vector].trigger);
|
||||
vfio_pci_memory_unlock_and_restore(vdev, cmd);
|
||||
|
||||
kfree(vdev->ctx[vector].name);
|
||||
eventfd_ctx_put(vdev->ctx[vector].trigger);
|
||||
vdev->ctx[vector].trigger = NULL;
|
||||
@ -323,6 +332,7 @@ static int vfio_msi_set_vector_signal(struct vfio_pci_device *vdev,
|
||||
* such a reset it would be unsuccessful. To avoid this, restore the
|
||||
* cached value of the message prior to enabling.
|
||||
*/
|
||||
cmd = vfio_pci_memory_lock_and_enable(vdev);
|
||||
if (msix) {
|
||||
struct msi_msg msg;
|
||||
|
||||
@ -332,6 +342,7 @@ static int vfio_msi_set_vector_signal(struct vfio_pci_device *vdev,
|
||||
|
||||
ret = request_irq(irq, vfio_msihandler, 0,
|
||||
vdev->ctx[vector].name, trigger);
|
||||
vfio_pci_memory_unlock_and_restore(vdev, cmd);
|
||||
if (ret) {
|
||||
kfree(vdev->ctx[vector].name);
|
||||
eventfd_ctx_put(trigger);
|
||||
@ -376,6 +387,7 @@ static void vfio_msi_disable(struct vfio_pci_device *vdev, bool msix)
|
||||
{
|
||||
struct pci_dev *pdev = vdev->pdev;
|
||||
int i;
|
||||
u16 cmd;
|
||||
|
||||
for (i = 0; i < vdev->num_ctx; i++) {
|
||||
vfio_virqfd_disable(&vdev->ctx[i].unmask);
|
||||
@ -384,7 +396,9 @@ static void vfio_msi_disable(struct vfio_pci_device *vdev, bool msix)
|
||||
|
||||
vfio_msi_set_block(vdev, 0, vdev->num_ctx, NULL, msix);
|
||||
|
||||
cmd = vfio_pci_memory_lock_and_enable(vdev);
|
||||
pci_free_irq_vectors(pdev);
|
||||
vfio_pci_memory_unlock_and_restore(vdev, cmd);
|
||||
|
||||
/*
|
||||
* Both disable paths above use pci_intx_for_msi() to clear DisINTx
|
||||
|
@ -92,6 +92,11 @@ struct vfio_pci_vf_token {
|
||||
int users;
|
||||
};
|
||||
|
||||
struct vfio_pci_mmap_vma {
|
||||
struct vm_area_struct *vma;
|
||||
struct list_head vma_next;
|
||||
};
|
||||
|
||||
struct vfio_pci_device {
|
||||
struct pci_dev *pdev;
|
||||
void __iomem *barmap[PCI_STD_NUM_BARS];
|
||||
@ -132,6 +137,9 @@ struct vfio_pci_device {
|
||||
struct list_head ioeventfds_list;
|
||||
struct vfio_pci_vf_token *vf_token;
|
||||
struct notifier_block nb;
|
||||
struct mutex vma_lock;
|
||||
struct list_head vma_list;
|
||||
struct rw_semaphore memory_lock;
|
||||
};
|
||||
|
||||
#define is_intx(vdev) (vdev->irq_type == VFIO_PCI_INTX_IRQ_INDEX)
|
||||
@ -174,6 +182,13 @@ extern int vfio_pci_register_dev_region(struct vfio_pci_device *vdev,
|
||||
extern int vfio_pci_set_power_state(struct vfio_pci_device *vdev,
|
||||
pci_power_t state);
|
||||
|
||||
extern bool __vfio_pci_memory_enabled(struct vfio_pci_device *vdev);
|
||||
extern void vfio_pci_zap_and_down_write_memory_lock(struct vfio_pci_device
|
||||
*vdev);
|
||||
extern u16 vfio_pci_memory_lock_and_enable(struct vfio_pci_device *vdev);
|
||||
extern void vfio_pci_memory_unlock_and_restore(struct vfio_pci_device *vdev,
|
||||
u16 cmd);
|
||||
|
||||
#ifdef CONFIG_VFIO_PCI_IGD
|
||||
extern int vfio_pci_igd_init(struct vfio_pci_device *vdev);
|
||||
#else
|
||||
|
@ -162,6 +162,7 @@ ssize_t vfio_pci_bar_rw(struct vfio_pci_device *vdev, char __user *buf,
|
||||
size_t x_start = 0, x_end = 0;
|
||||
resource_size_t end;
|
||||
void __iomem *io;
|
||||
struct resource *res = &vdev->pdev->resource[bar];
|
||||
ssize_t done;
|
||||
|
||||
if (pci_resource_start(pdev, bar))
|
||||
@ -177,6 +178,14 @@ ssize_t vfio_pci_bar_rw(struct vfio_pci_device *vdev, char __user *buf,
|
||||
|
||||
count = min(count, (size_t)(end - pos));
|
||||
|
||||
if (res->flags & IORESOURCE_MEM) {
|
||||
down_read(&vdev->memory_lock);
|
||||
if (!__vfio_pci_memory_enabled(vdev)) {
|
||||
up_read(&vdev->memory_lock);
|
||||
return -EIO;
|
||||
}
|
||||
}
|
||||
|
||||
if (bar == PCI_ROM_RESOURCE) {
|
||||
/*
|
||||
* The ROM can fill less space than the BAR, so we start the
|
||||
@ -184,13 +193,17 @@ ssize_t vfio_pci_bar_rw(struct vfio_pci_device *vdev, char __user *buf,
|
||||
* filling large ROM BARs much faster.
|
||||
*/
|
||||
io = pci_map_rom(pdev, &x_start);
|
||||
if (!io)
|
||||
return -ENOMEM;
|
||||
if (!io) {
|
||||
done = -ENOMEM;
|
||||
goto out;
|
||||
}
|
||||
x_end = end;
|
||||
} else {
|
||||
int ret = vfio_pci_setup_barmap(vdev, bar);
|
||||
if (ret)
|
||||
return ret;
|
||||
if (ret) {
|
||||
done = ret;
|
||||
goto out;
|
||||
}
|
||||
|
||||
io = vdev->barmap[bar];
|
||||
}
|
||||
@ -207,6 +220,9 @@ ssize_t vfio_pci_bar_rw(struct vfio_pci_device *vdev, char __user *buf,
|
||||
|
||||
if (bar == PCI_ROM_RESOURCE)
|
||||
pci_unmap_rom(pdev, io);
|
||||
out:
|
||||
if (res->flags & IORESOURCE_MEM)
|
||||
up_read(&vdev->memory_lock);
|
||||
|
||||
return done;
|
||||
}
|
||||
|
@ -85,6 +85,7 @@ struct vfio_group {
|
||||
atomic_t opened;
|
||||
wait_queue_head_t container_q;
|
||||
bool noiommu;
|
||||
unsigned int dev_counter;
|
||||
struct kvm *kvm;
|
||||
struct blocking_notifier_head notifier;
|
||||
};
|
||||
@ -555,6 +556,7 @@ struct vfio_device *vfio_group_create_device(struct vfio_group *group,
|
||||
|
||||
mutex_lock(&group->device_lock);
|
||||
list_add(&device->group_next, &group->device_list);
|
||||
group->dev_counter++;
|
||||
mutex_unlock(&group->device_lock);
|
||||
|
||||
return device;
|
||||
@ -567,6 +569,7 @@ static void vfio_device_release(struct kref *kref)
|
||||
struct vfio_group *group = device->group;
|
||||
|
||||
list_del(&device->group_next);
|
||||
group->dev_counter--;
|
||||
mutex_unlock(&group->device_lock);
|
||||
|
||||
dev_set_drvdata(device->dev, NULL);
|
||||
@ -1945,6 +1948,9 @@ int vfio_pin_pages(struct device *dev, unsigned long *user_pfn, int npage,
|
||||
if (!group)
|
||||
return -ENODEV;
|
||||
|
||||
if (group->dev_counter > 1)
|
||||
return -EINVAL;
|
||||
|
||||
ret = vfio_group_add_container_user(group);
|
||||
if (ret)
|
||||
goto err_pin_pages;
|
||||
@ -1952,7 +1958,8 @@ int vfio_pin_pages(struct device *dev, unsigned long *user_pfn, int npage,
|
||||
container = group->container;
|
||||
driver = container->iommu_driver;
|
||||
if (likely(driver && driver->ops->pin_pages))
|
||||
ret = driver->ops->pin_pages(container->iommu_data, user_pfn,
|
||||
ret = driver->ops->pin_pages(container->iommu_data,
|
||||
group->iommu_group, user_pfn,
|
||||
npage, prot, phys_pfn);
|
||||
else
|
||||
ret = -ENOTTY;
|
||||
@ -2050,8 +2057,8 @@ int vfio_group_pin_pages(struct vfio_group *group,
|
||||
driver = container->iommu_driver;
|
||||
if (likely(driver && driver->ops->pin_pages))
|
||||
ret = driver->ops->pin_pages(container->iommu_data,
|
||||
user_iova_pfn, npage,
|
||||
prot, phys_pfn);
|
||||
group->iommu_group, user_iova_pfn,
|
||||
npage, prot, phys_pfn);
|
||||
else
|
||||
ret = -ENOTTY;
|
||||
|
||||
|
@ -69,8 +69,11 @@ struct vfio_iommu {
|
||||
struct rb_root dma_list;
|
||||
struct blocking_notifier_head notifier;
|
||||
unsigned int dma_avail;
|
||||
uint64_t pgsize_bitmap;
|
||||
bool v2;
|
||||
bool nesting;
|
||||
bool dirty_page_tracking;
|
||||
bool pinned_page_dirty_scope;
|
||||
};
|
||||
|
||||
struct vfio_domain {
|
||||
@ -91,12 +94,14 @@ struct vfio_dma {
|
||||
bool lock_cap; /* capable(CAP_IPC_LOCK) */
|
||||
struct task_struct *task;
|
||||
struct rb_root pfn_list; /* Ex-user pinned pfn list */
|
||||
unsigned long *bitmap;
|
||||
};
|
||||
|
||||
struct vfio_group {
|
||||
struct iommu_group *iommu_group;
|
||||
struct list_head next;
|
||||
bool mdev_group; /* An mdev group */
|
||||
bool pinned_page_dirty_scope;
|
||||
};
|
||||
|
||||
struct vfio_iova {
|
||||
@ -112,7 +117,7 @@ struct vfio_pfn {
|
||||
struct rb_node node;
|
||||
dma_addr_t iova; /* Device address */
|
||||
unsigned long pfn; /* Host pfn */
|
||||
atomic_t ref_count;
|
||||
unsigned int ref_count;
|
||||
};
|
||||
|
||||
struct vfio_regions {
|
||||
@ -125,8 +130,25 @@ struct vfio_regions {
|
||||
#define IS_IOMMU_CAP_DOMAIN_IN_CONTAINER(iommu) \
|
||||
(!list_empty(&iommu->domain_list))
|
||||
|
||||
#define DIRTY_BITMAP_BYTES(n) (ALIGN(n, BITS_PER_TYPE(u64)) / BITS_PER_BYTE)
|
||||
|
||||
/*
|
||||
* Input argument of number of bits to bitmap_set() is unsigned integer, which
|
||||
* further casts to signed integer for unaligned multi-bit operation,
|
||||
* __bitmap_set().
|
||||
* Then maximum bitmap size supported is 2^31 bits divided by 2^3 bits/byte,
|
||||
* that is 2^28 (256 MB) which maps to 2^31 * 2^12 = 2^43 (8TB) on 4K page
|
||||
* system.
|
||||
*/
|
||||
#define DIRTY_BITMAP_PAGES_MAX ((u64)INT_MAX)
|
||||
#define DIRTY_BITMAP_SIZE_MAX DIRTY_BITMAP_BYTES(DIRTY_BITMAP_PAGES_MAX)
|
||||
|
||||
static int put_pfn(unsigned long pfn, int prot);
|
||||
|
||||
static struct vfio_group *vfio_iommu_find_iommu_group(struct vfio_iommu *iommu,
|
||||
struct iommu_group *iommu_group);
|
||||
|
||||
static void update_pinned_page_dirty_scope(struct vfio_iommu *iommu);
|
||||
/*
|
||||
* This code handles mapping and unmapping of user data buffers
|
||||
* into DMA'ble space using the IOMMU
|
||||
@ -175,6 +197,81 @@ static void vfio_unlink_dma(struct vfio_iommu *iommu, struct vfio_dma *old)
|
||||
rb_erase(&old->node, &iommu->dma_list);
|
||||
}
|
||||
|
||||
|
||||
static int vfio_dma_bitmap_alloc(struct vfio_dma *dma, size_t pgsize)
|
||||
{
|
||||
uint64_t npages = dma->size / pgsize;
|
||||
|
||||
if (npages > DIRTY_BITMAP_PAGES_MAX)
|
||||
return -EINVAL;
|
||||
|
||||
/*
|
||||
* Allocate extra 64 bits that are used to calculate shift required for
|
||||
* bitmap_shift_left() to manipulate and club unaligned number of pages
|
||||
* in adjacent vfio_dma ranges.
|
||||
*/
|
||||
dma->bitmap = kvzalloc(DIRTY_BITMAP_BYTES(npages) + sizeof(u64),
|
||||
GFP_KERNEL);
|
||||
if (!dma->bitmap)
|
||||
return -ENOMEM;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void vfio_dma_bitmap_free(struct vfio_dma *dma)
|
||||
{
|
||||
kfree(dma->bitmap);
|
||||
dma->bitmap = NULL;
|
||||
}
|
||||
|
||||
static void vfio_dma_populate_bitmap(struct vfio_dma *dma, size_t pgsize)
|
||||
{
|
||||
struct rb_node *p;
|
||||
unsigned long pgshift = __ffs(pgsize);
|
||||
|
||||
for (p = rb_first(&dma->pfn_list); p; p = rb_next(p)) {
|
||||
struct vfio_pfn *vpfn = rb_entry(p, struct vfio_pfn, node);
|
||||
|
||||
bitmap_set(dma->bitmap, (vpfn->iova - dma->iova) >> pgshift, 1);
|
||||
}
|
||||
}
|
||||
|
||||
static int vfio_dma_bitmap_alloc_all(struct vfio_iommu *iommu, size_t pgsize)
|
||||
{
|
||||
struct rb_node *n;
|
||||
|
||||
for (n = rb_first(&iommu->dma_list); n; n = rb_next(n)) {
|
||||
struct vfio_dma *dma = rb_entry(n, struct vfio_dma, node);
|
||||
int ret;
|
||||
|
||||
ret = vfio_dma_bitmap_alloc(dma, pgsize);
|
||||
if (ret) {
|
||||
struct rb_node *p;
|
||||
|
||||
for (p = rb_prev(n); p; p = rb_prev(p)) {
|
||||
struct vfio_dma *dma = rb_entry(n,
|
||||
struct vfio_dma, node);
|
||||
|
||||
vfio_dma_bitmap_free(dma);
|
||||
}
|
||||
return ret;
|
||||
}
|
||||
vfio_dma_populate_bitmap(dma, pgsize);
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void vfio_dma_bitmap_free_all(struct vfio_iommu *iommu)
|
||||
{
|
||||
struct rb_node *n;
|
||||
|
||||
for (n = rb_first(&iommu->dma_list); n; n = rb_next(n)) {
|
||||
struct vfio_dma *dma = rb_entry(n, struct vfio_dma, node);
|
||||
|
||||
vfio_dma_bitmap_free(dma);
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
* Helper Functions for host iova-pfn list
|
||||
*/
|
||||
@ -233,7 +330,7 @@ static int vfio_add_to_pfn_list(struct vfio_dma *dma, dma_addr_t iova,
|
||||
|
||||
vpfn->iova = iova;
|
||||
vpfn->pfn = pfn;
|
||||
atomic_set(&vpfn->ref_count, 1);
|
||||
vpfn->ref_count = 1;
|
||||
vfio_link_pfn(dma, vpfn);
|
||||
return 0;
|
||||
}
|
||||
@ -251,7 +348,7 @@ static struct vfio_pfn *vfio_iova_get_vfio_pfn(struct vfio_dma *dma,
|
||||
struct vfio_pfn *vpfn = vfio_find_vpfn(dma, iova);
|
||||
|
||||
if (vpfn)
|
||||
atomic_inc(&vpfn->ref_count);
|
||||
vpfn->ref_count++;
|
||||
return vpfn;
|
||||
}
|
||||
|
||||
@ -259,7 +356,8 @@ static int vfio_iova_put_vfio_pfn(struct vfio_dma *dma, struct vfio_pfn *vpfn)
|
||||
{
|
||||
int ret = 0;
|
||||
|
||||
if (atomic_dec_and_test(&vpfn->ref_count)) {
|
||||
vpfn->ref_count--;
|
||||
if (!vpfn->ref_count) {
|
||||
ret = put_pfn(vpfn->pfn, dma->prot);
|
||||
vfio_remove_from_pfn_list(dma, vpfn);
|
||||
}
|
||||
@ -317,6 +415,32 @@ static int put_pfn(unsigned long pfn, int prot)
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int follow_fault_pfn(struct vm_area_struct *vma, struct mm_struct *mm,
|
||||
unsigned long vaddr, unsigned long *pfn,
|
||||
bool write_fault)
|
||||
{
|
||||
int ret;
|
||||
|
||||
ret = follow_pfn(vma, vaddr, pfn);
|
||||
if (ret) {
|
||||
bool unlocked = false;
|
||||
|
||||
ret = fixup_user_fault(NULL, mm, vaddr,
|
||||
FAULT_FLAG_REMOTE |
|
||||
(write_fault ? FAULT_FLAG_WRITE : 0),
|
||||
&unlocked);
|
||||
if (unlocked)
|
||||
return -EAGAIN;
|
||||
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
ret = follow_pfn(vma, vaddr, pfn);
|
||||
}
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int vaddr_get_pfn(struct mm_struct *mm, unsigned long vaddr,
|
||||
int prot, unsigned long *pfn)
|
||||
{
|
||||
@ -339,12 +463,16 @@ static int vaddr_get_pfn(struct mm_struct *mm, unsigned long vaddr,
|
||||
|
||||
vaddr = untagged_addr(vaddr);
|
||||
|
||||
retry:
|
||||
vma = find_vma_intersection(mm, vaddr, vaddr + 1);
|
||||
|
||||
if (vma && vma->vm_flags & VM_PFNMAP) {
|
||||
if (!follow_pfn(vma, vaddr, pfn) &&
|
||||
is_invalid_reserved_pfn(*pfn))
|
||||
ret = 0;
|
||||
ret = follow_fault_pfn(vma, mm, vaddr, pfn, prot & IOMMU_WRITE);
|
||||
if (ret == -EAGAIN)
|
||||
goto retry;
|
||||
|
||||
if (!ret && !is_invalid_reserved_pfn(*pfn))
|
||||
ret = -EFAULT;
|
||||
}
|
||||
done:
|
||||
up_read(&mm->mmap_sem);
|
||||
@ -501,11 +629,13 @@ static int vfio_unpin_page_external(struct vfio_dma *dma, dma_addr_t iova,
|
||||
}
|
||||
|
||||
static int vfio_iommu_type1_pin_pages(void *iommu_data,
|
||||
struct iommu_group *iommu_group,
|
||||
unsigned long *user_pfn,
|
||||
int npage, int prot,
|
||||
unsigned long *phys_pfn)
|
||||
{
|
||||
struct vfio_iommu *iommu = iommu_data;
|
||||
struct vfio_group *group;
|
||||
int i, j, ret;
|
||||
unsigned long remote_vaddr;
|
||||
struct vfio_dma *dma;
|
||||
@ -566,9 +696,26 @@ static int vfio_iommu_type1_pin_pages(void *iommu_data,
|
||||
vfio_unpin_page_external(dma, iova, do_accounting);
|
||||
goto pin_unwind;
|
||||
}
|
||||
|
||||
if (iommu->dirty_page_tracking) {
|
||||
unsigned long pgshift = __ffs(iommu->pgsize_bitmap);
|
||||
|
||||
/*
|
||||
* Bitmap populated with the smallest supported page
|
||||
* size
|
||||
*/
|
||||
bitmap_set(dma->bitmap,
|
||||
(iova - dma->iova) >> pgshift, 1);
|
||||
}
|
||||
}
|
||||
ret = i;
|
||||
|
||||
group = vfio_iommu_find_iommu_group(iommu, iommu_group);
|
||||
if (!group->pinned_page_dirty_scope) {
|
||||
group->pinned_page_dirty_scope = true;
|
||||
update_pinned_page_dirty_scope(iommu);
|
||||
}
|
||||
|
||||
ret = i;
|
||||
goto pin_done;
|
||||
|
||||
pin_unwind:
|
||||
@ -800,19 +947,19 @@ static void vfio_remove_dma(struct vfio_iommu *iommu, struct vfio_dma *dma)
|
||||
vfio_unmap_unpin(iommu, dma, true);
|
||||
vfio_unlink_dma(iommu, dma);
|
||||
put_task_struct(dma->task);
|
||||
vfio_dma_bitmap_free(dma);
|
||||
kfree(dma);
|
||||
iommu->dma_avail++;
|
||||
}
|
||||
|
||||
static unsigned long vfio_pgsize_bitmap(struct vfio_iommu *iommu)
|
||||
static void vfio_update_pgsize_bitmap(struct vfio_iommu *iommu)
|
||||
{
|
||||
struct vfio_domain *domain;
|
||||
unsigned long bitmap = ULONG_MAX;
|
||||
|
||||
mutex_lock(&iommu->lock);
|
||||
iommu->pgsize_bitmap = ULONG_MAX;
|
||||
|
||||
list_for_each_entry(domain, &iommu->domain_list, next)
|
||||
bitmap &= domain->domain->pgsize_bitmap;
|
||||
mutex_unlock(&iommu->lock);
|
||||
iommu->pgsize_bitmap &= domain->domain->pgsize_bitmap;
|
||||
|
||||
/*
|
||||
* In case the IOMMU supports page sizes smaller than PAGE_SIZE
|
||||
@ -822,36 +969,143 @@ static unsigned long vfio_pgsize_bitmap(struct vfio_iommu *iommu)
|
||||
* granularity while iommu driver can use the sub-PAGE_SIZE size
|
||||
* to map the buffer.
|
||||
*/
|
||||
if (bitmap & ~PAGE_MASK) {
|
||||
bitmap &= PAGE_MASK;
|
||||
bitmap |= PAGE_SIZE;
|
||||
if (iommu->pgsize_bitmap & ~PAGE_MASK) {
|
||||
iommu->pgsize_bitmap &= PAGE_MASK;
|
||||
iommu->pgsize_bitmap |= PAGE_SIZE;
|
||||
}
|
||||
}
|
||||
|
||||
static int update_user_bitmap(u64 __user *bitmap, struct vfio_iommu *iommu,
|
||||
struct vfio_dma *dma, dma_addr_t base_iova,
|
||||
size_t pgsize)
|
||||
{
|
||||
unsigned long pgshift = __ffs(pgsize);
|
||||
unsigned long nbits = dma->size >> pgshift;
|
||||
unsigned long bit_offset = (dma->iova - base_iova) >> pgshift;
|
||||
unsigned long copy_offset = bit_offset / BITS_PER_LONG;
|
||||
unsigned long shift = bit_offset % BITS_PER_LONG;
|
||||
unsigned long leftover;
|
||||
|
||||
/*
|
||||
* mark all pages dirty if any IOMMU capable device is not able
|
||||
* to report dirty pages and all pages are pinned and mapped.
|
||||
*/
|
||||
if (!iommu->pinned_page_dirty_scope && dma->iommu_mapped)
|
||||
bitmap_set(dma->bitmap, 0, nbits);
|
||||
|
||||
if (shift) {
|
||||
bitmap_shift_left(dma->bitmap, dma->bitmap, shift,
|
||||
nbits + shift);
|
||||
|
||||
if (copy_from_user(&leftover,
|
||||
(void __user *)(bitmap + copy_offset),
|
||||
sizeof(leftover)))
|
||||
return -EFAULT;
|
||||
|
||||
bitmap_or(dma->bitmap, dma->bitmap, &leftover, shift);
|
||||
}
|
||||
|
||||
return bitmap;
|
||||
if (copy_to_user((void __user *)(bitmap + copy_offset), dma->bitmap,
|
||||
DIRTY_BITMAP_BYTES(nbits + shift)))
|
||||
return -EFAULT;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int vfio_iova_dirty_bitmap(u64 __user *bitmap, struct vfio_iommu *iommu,
|
||||
dma_addr_t iova, size_t size, size_t pgsize)
|
||||
{
|
||||
struct vfio_dma *dma;
|
||||
struct rb_node *n;
|
||||
unsigned long pgshift = __ffs(pgsize);
|
||||
int ret;
|
||||
|
||||
/*
|
||||
* GET_BITMAP request must fully cover vfio_dma mappings. Multiple
|
||||
* vfio_dma mappings may be clubbed by specifying large ranges, but
|
||||
* there must not be any previous mappings bisected by the range.
|
||||
* An error will be returned if these conditions are not met.
|
||||
*/
|
||||
dma = vfio_find_dma(iommu, iova, 1);
|
||||
if (dma && dma->iova != iova)
|
||||
return -EINVAL;
|
||||
|
||||
dma = vfio_find_dma(iommu, iova + size - 1, 0);
|
||||
if (dma && dma->iova + dma->size != iova + size)
|
||||
return -EINVAL;
|
||||
|
||||
for (n = rb_first(&iommu->dma_list); n; n = rb_next(n)) {
|
||||
struct vfio_dma *dma = rb_entry(n, struct vfio_dma, node);
|
||||
|
||||
if (dma->iova < iova)
|
||||
continue;
|
||||
|
||||
if (dma->iova > iova + size - 1)
|
||||
break;
|
||||
|
||||
ret = update_user_bitmap(bitmap, iommu, dma, iova, pgsize);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
/*
|
||||
* Re-populate bitmap to include all pinned pages which are
|
||||
* considered as dirty but exclude pages which are unpinned and
|
||||
* pages which are marked dirty by vfio_dma_rw()
|
||||
*/
|
||||
bitmap_clear(dma->bitmap, 0, dma->size >> pgshift);
|
||||
vfio_dma_populate_bitmap(dma, pgsize);
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int verify_bitmap_size(uint64_t npages, uint64_t bitmap_size)
|
||||
{
|
||||
if (!npages || !bitmap_size || (bitmap_size > DIRTY_BITMAP_SIZE_MAX) ||
|
||||
(bitmap_size < DIRTY_BITMAP_BYTES(npages)))
|
||||
return -EINVAL;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int vfio_dma_do_unmap(struct vfio_iommu *iommu,
|
||||
struct vfio_iommu_type1_dma_unmap *unmap)
|
||||
struct vfio_iommu_type1_dma_unmap *unmap,
|
||||
struct vfio_bitmap *bitmap)
|
||||
{
|
||||
uint64_t mask;
|
||||
struct vfio_dma *dma, *dma_last = NULL;
|
||||
size_t unmapped = 0;
|
||||
size_t unmapped = 0, pgsize;
|
||||
int ret = 0, retries = 0;
|
||||
unsigned long pgshift;
|
||||
|
||||
mask = ((uint64_t)1 << __ffs(vfio_pgsize_bitmap(iommu))) - 1;
|
||||
|
||||
if (unmap->iova & mask)
|
||||
return -EINVAL;
|
||||
if (!unmap->size || unmap->size & mask)
|
||||
return -EINVAL;
|
||||
if (unmap->iova + unmap->size - 1 < unmap->iova ||
|
||||
unmap->size > SIZE_MAX)
|
||||
return -EINVAL;
|
||||
|
||||
WARN_ON(mask & PAGE_MASK);
|
||||
again:
|
||||
mutex_lock(&iommu->lock);
|
||||
|
||||
pgshift = __ffs(iommu->pgsize_bitmap);
|
||||
pgsize = (size_t)1 << pgshift;
|
||||
|
||||
if (unmap->iova & (pgsize - 1)) {
|
||||
ret = -EINVAL;
|
||||
goto unlock;
|
||||
}
|
||||
|
||||
if (!unmap->size || unmap->size & (pgsize - 1)) {
|
||||
ret = -EINVAL;
|
||||
goto unlock;
|
||||
}
|
||||
|
||||
if (unmap->iova + unmap->size - 1 < unmap->iova ||
|
||||
unmap->size > SIZE_MAX) {
|
||||
ret = -EINVAL;
|
||||
goto unlock;
|
||||
}
|
||||
|
||||
/* When dirty tracking is enabled, allow only min supported pgsize */
|
||||
if ((unmap->flags & VFIO_DMA_UNMAP_FLAG_GET_DIRTY_BITMAP) &&
|
||||
(!iommu->dirty_page_tracking || (bitmap->pgsize != pgsize))) {
|
||||
ret = -EINVAL;
|
||||
goto unlock;
|
||||
}
|
||||
|
||||
WARN_ON((pgsize - 1) & PAGE_MASK);
|
||||
again:
|
||||
/*
|
||||
* vfio-iommu-type1 (v1) - User mappings were coalesced together to
|
||||
* avoid tracking individual mappings. This means that the granularity
|
||||
@ -929,8 +1183,17 @@ again:
|
||||
blocking_notifier_call_chain(&iommu->notifier,
|
||||
VFIO_IOMMU_NOTIFY_DMA_UNMAP,
|
||||
&nb_unmap);
|
||||
mutex_lock(&iommu->lock);
|
||||
goto again;
|
||||
}
|
||||
|
||||
if (unmap->flags & VFIO_DMA_UNMAP_FLAG_GET_DIRTY_BITMAP) {
|
||||
ret = update_user_bitmap(bitmap->data, iommu, dma,
|
||||
unmap->iova, pgsize);
|
||||
if (ret)
|
||||
break;
|
||||
}
|
||||
|
||||
unmapped += dma->size;
|
||||
vfio_remove_dma(iommu, dma);
|
||||
}
|
||||
@ -1037,31 +1300,35 @@ static int vfio_dma_do_map(struct vfio_iommu *iommu,
|
||||
unsigned long vaddr = map->vaddr;
|
||||
size_t size = map->size;
|
||||
int ret = 0, prot = 0;
|
||||
uint64_t mask;
|
||||
size_t pgsize;
|
||||
struct vfio_dma *dma;
|
||||
|
||||
/* Verify that none of our __u64 fields overflow */
|
||||
if (map->size != size || map->vaddr != vaddr || map->iova != iova)
|
||||
return -EINVAL;
|
||||
|
||||
mask = ((uint64_t)1 << __ffs(vfio_pgsize_bitmap(iommu))) - 1;
|
||||
|
||||
WARN_ON(mask & PAGE_MASK);
|
||||
|
||||
/* READ/WRITE from device perspective */
|
||||
if (map->flags & VFIO_DMA_MAP_FLAG_WRITE)
|
||||
prot |= IOMMU_WRITE;
|
||||
if (map->flags & VFIO_DMA_MAP_FLAG_READ)
|
||||
prot |= IOMMU_READ;
|
||||
|
||||
if (!prot || !size || (size | iova | vaddr) & mask)
|
||||
return -EINVAL;
|
||||
mutex_lock(&iommu->lock);
|
||||
|
||||
pgsize = (size_t)1 << __ffs(iommu->pgsize_bitmap);
|
||||
|
||||
WARN_ON((pgsize - 1) & PAGE_MASK);
|
||||
|
||||
if (!prot || !size || (size | iova | vaddr) & (pgsize - 1)) {
|
||||
ret = -EINVAL;
|
||||
goto out_unlock;
|
||||
}
|
||||
|
||||
/* Don't allow IOVA or virtual address wrap */
|
||||
if (iova + size - 1 < iova || vaddr + size - 1 < vaddr)
|
||||
return -EINVAL;
|
||||
|
||||
mutex_lock(&iommu->lock);
|
||||
if (iova + size - 1 < iova || vaddr + size - 1 < vaddr) {
|
||||
ret = -EINVAL;
|
||||
goto out_unlock;
|
||||
}
|
||||
|
||||
if (vfio_find_dma(iommu, iova, size)) {
|
||||
ret = -EEXIST;
|
||||
@ -1129,6 +1396,12 @@ static int vfio_dma_do_map(struct vfio_iommu *iommu,
|
||||
else
|
||||
ret = vfio_pin_map_dma(iommu, dma, size);
|
||||
|
||||
if (!ret && iommu->dirty_page_tracking) {
|
||||
ret = vfio_dma_bitmap_alloc(dma, pgsize);
|
||||
if (ret)
|
||||
vfio_remove_dma(iommu, dma);
|
||||
}
|
||||
|
||||
out_unlock:
|
||||
mutex_unlock(&iommu->lock);
|
||||
return ret;
|
||||
@ -1267,6 +1540,51 @@ static struct vfio_group *find_iommu_group(struct vfio_domain *domain,
|
||||
return NULL;
|
||||
}
|
||||
|
||||
static struct vfio_group *vfio_iommu_find_iommu_group(struct vfio_iommu *iommu,
|
||||
struct iommu_group *iommu_group)
|
||||
{
|
||||
struct vfio_domain *domain;
|
||||
struct vfio_group *group = NULL;
|
||||
|
||||
list_for_each_entry(domain, &iommu->domain_list, next) {
|
||||
group = find_iommu_group(domain, iommu_group);
|
||||
if (group)
|
||||
return group;
|
||||
}
|
||||
|
||||
if (iommu->external_domain)
|
||||
group = find_iommu_group(iommu->external_domain, iommu_group);
|
||||
|
||||
return group;
|
||||
}
|
||||
|
||||
static void update_pinned_page_dirty_scope(struct vfio_iommu *iommu)
|
||||
{
|
||||
struct vfio_domain *domain;
|
||||
struct vfio_group *group;
|
||||
|
||||
list_for_each_entry(domain, &iommu->domain_list, next) {
|
||||
list_for_each_entry(group, &domain->group_list, next) {
|
||||
if (!group->pinned_page_dirty_scope) {
|
||||
iommu->pinned_page_dirty_scope = false;
|
||||
return;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if (iommu->external_domain) {
|
||||
domain = iommu->external_domain;
|
||||
list_for_each_entry(group, &domain->group_list, next) {
|
||||
if (!group->pinned_page_dirty_scope) {
|
||||
iommu->pinned_page_dirty_scope = false;
|
||||
return;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
iommu->pinned_page_dirty_scope = true;
|
||||
}
|
||||
|
||||
static bool vfio_iommu_has_sw_msi(struct list_head *group_resv_regions,
|
||||
phys_addr_t *base)
|
||||
{
|
||||
@ -1667,12 +1985,23 @@ static int vfio_iommu_type1_attach_group(void *iommu_data,
|
||||
if (!iommu->external_domain) {
|
||||
INIT_LIST_HEAD(&domain->group_list);
|
||||
iommu->external_domain = domain;
|
||||
vfio_update_pgsize_bitmap(iommu);
|
||||
} else {
|
||||
kfree(domain);
|
||||
}
|
||||
|
||||
list_add(&group->next,
|
||||
&iommu->external_domain->group_list);
|
||||
/*
|
||||
* Non-iommu backed group cannot dirty memory directly,
|
||||
* it can only use interfaces that provide dirty
|
||||
* tracking.
|
||||
* The iommu scope can only be promoted with the
|
||||
* addition of a dirty tracking group.
|
||||
*/
|
||||
group->pinned_page_dirty_scope = true;
|
||||
if (!iommu->pinned_page_dirty_scope)
|
||||
update_pinned_page_dirty_scope(iommu);
|
||||
mutex_unlock(&iommu->lock);
|
||||
|
||||
return 0;
|
||||
@ -1792,9 +2121,17 @@ static int vfio_iommu_type1_attach_group(void *iommu_data,
|
||||
}
|
||||
|
||||
list_add(&domain->next, &iommu->domain_list);
|
||||
vfio_update_pgsize_bitmap(iommu);
|
||||
done:
|
||||
/* Delete the old one and insert new iova list */
|
||||
vfio_iommu_iova_insert_copy(iommu, &iova_copy);
|
||||
|
||||
/*
|
||||
* An iommu backed group can dirty memory directly and therefore
|
||||
* demotes the iommu scope until it declares itself dirty tracking
|
||||
* capable via the page pinning interface.
|
||||
*/
|
||||
iommu->pinned_page_dirty_scope = false;
|
||||
mutex_unlock(&iommu->lock);
|
||||
vfio_iommu_resv_free(&group_resv_regions);
|
||||
|
||||
@ -1947,6 +2284,7 @@ static void vfio_iommu_type1_detach_group(void *iommu_data,
|
||||
struct vfio_iommu *iommu = iommu_data;
|
||||
struct vfio_domain *domain;
|
||||
struct vfio_group *group;
|
||||
bool update_dirty_scope = false;
|
||||
LIST_HEAD(iova_copy);
|
||||
|
||||
mutex_lock(&iommu->lock);
|
||||
@ -1954,6 +2292,7 @@ static void vfio_iommu_type1_detach_group(void *iommu_data,
|
||||
if (iommu->external_domain) {
|
||||
group = find_iommu_group(iommu->external_domain, iommu_group);
|
||||
if (group) {
|
||||
update_dirty_scope = !group->pinned_page_dirty_scope;
|
||||
list_del(&group->next);
|
||||
kfree(group);
|
||||
|
||||
@ -1983,6 +2322,7 @@ static void vfio_iommu_type1_detach_group(void *iommu_data,
|
||||
continue;
|
||||
|
||||
vfio_iommu_detach_group(domain, group);
|
||||
update_dirty_scope = !group->pinned_page_dirty_scope;
|
||||
list_del(&group->next);
|
||||
kfree(group);
|
||||
/*
|
||||
@ -2003,6 +2343,7 @@ static void vfio_iommu_type1_detach_group(void *iommu_data,
|
||||
list_del(&domain->next);
|
||||
kfree(domain);
|
||||
vfio_iommu_aper_expand(iommu, &iova_copy);
|
||||
vfio_update_pgsize_bitmap(iommu);
|
||||
}
|
||||
break;
|
||||
}
|
||||
@ -2013,6 +2354,12 @@ static void vfio_iommu_type1_detach_group(void *iommu_data,
|
||||
vfio_iommu_iova_free(&iova_copy);
|
||||
|
||||
detach_group_done:
|
||||
/*
|
||||
* Removal of a group without dirty tracking may allow the iommu scope
|
||||
* to be promoted.
|
||||
*/
|
||||
if (update_dirty_scope)
|
||||
update_pinned_page_dirty_scope(iommu);
|
||||
mutex_unlock(&iommu->lock);
|
||||
}
|
||||
|
||||
@ -2135,8 +2482,6 @@ static int vfio_iommu_iova_build_caps(struct vfio_iommu *iommu,
|
||||
size_t size;
|
||||
int iovas = 0, i = 0, ret;
|
||||
|
||||
mutex_lock(&iommu->lock);
|
||||
|
||||
list_for_each_entry(iova, &iommu->iova_list, list)
|
||||
iovas++;
|
||||
|
||||
@ -2145,17 +2490,14 @@ static int vfio_iommu_iova_build_caps(struct vfio_iommu *iommu,
|
||||
* Return 0 as a container with a single mdev device
|
||||
* will have an empty list
|
||||
*/
|
||||
ret = 0;
|
||||
goto out_unlock;
|
||||
return 0;
|
||||
}
|
||||
|
||||
size = sizeof(*cap_iovas) + (iovas * sizeof(*cap_iovas->iova_ranges));
|
||||
|
||||
cap_iovas = kzalloc(size, GFP_KERNEL);
|
||||
if (!cap_iovas) {
|
||||
ret = -ENOMEM;
|
||||
goto out_unlock;
|
||||
}
|
||||
if (!cap_iovas)
|
||||
return -ENOMEM;
|
||||
|
||||
cap_iovas->nr_iovas = iovas;
|
||||
|
||||
@ -2168,11 +2510,25 @@ static int vfio_iommu_iova_build_caps(struct vfio_iommu *iommu,
|
||||
ret = vfio_iommu_iova_add_cap(caps, cap_iovas, size);
|
||||
|
||||
kfree(cap_iovas);
|
||||
out_unlock:
|
||||
mutex_unlock(&iommu->lock);
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int vfio_iommu_migration_build_caps(struct vfio_iommu *iommu,
|
||||
struct vfio_info_cap *caps)
|
||||
{
|
||||
struct vfio_iommu_type1_info_cap_migration cap_mig;
|
||||
|
||||
cap_mig.header.id = VFIO_IOMMU_TYPE1_INFO_CAP_MIGRATION;
|
||||
cap_mig.header.version = 1;
|
||||
|
||||
cap_mig.flags = 0;
|
||||
/* support minimum pgsize */
|
||||
cap_mig.pgsize_bitmap = (size_t)1 << __ffs(iommu->pgsize_bitmap);
|
||||
cap_mig.max_dirty_bitmap_size = DIRTY_BITMAP_SIZE_MAX;
|
||||
|
||||
return vfio_info_add_capability(caps, &cap_mig.header, sizeof(cap_mig));
|
||||
}
|
||||
|
||||
static long vfio_iommu_type1_ioctl(void *iommu_data,
|
||||
unsigned int cmd, unsigned long arg)
|
||||
{
|
||||
@ -2214,11 +2570,18 @@ static long vfio_iommu_type1_ioctl(void *iommu_data,
|
||||
info.cap_offset = 0; /* output, no-recopy necessary */
|
||||
}
|
||||
|
||||
mutex_lock(&iommu->lock);
|
||||
info.flags = VFIO_IOMMU_INFO_PGSIZES;
|
||||
|
||||
info.iova_pgsizes = vfio_pgsize_bitmap(iommu);
|
||||
info.iova_pgsizes = iommu->pgsize_bitmap;
|
||||
|
||||
ret = vfio_iommu_migration_build_caps(iommu, &caps);
|
||||
|
||||
if (!ret)
|
||||
ret = vfio_iommu_iova_build_caps(iommu, &caps);
|
||||
|
||||
mutex_unlock(&iommu->lock);
|
||||
|
||||
ret = vfio_iommu_iova_build_caps(iommu, &caps);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
@ -2261,22 +2624,143 @@ static long vfio_iommu_type1_ioctl(void *iommu_data,
|
||||
|
||||
} else if (cmd == VFIO_IOMMU_UNMAP_DMA) {
|
||||
struct vfio_iommu_type1_dma_unmap unmap;
|
||||
long ret;
|
||||
struct vfio_bitmap bitmap = { 0 };
|
||||
int ret;
|
||||
|
||||
minsz = offsetofend(struct vfio_iommu_type1_dma_unmap, size);
|
||||
|
||||
if (copy_from_user(&unmap, (void __user *)arg, minsz))
|
||||
return -EFAULT;
|
||||
|
||||
if (unmap.argsz < minsz || unmap.flags)
|
||||
if (unmap.argsz < minsz ||
|
||||
unmap.flags & ~VFIO_DMA_UNMAP_FLAG_GET_DIRTY_BITMAP)
|
||||
return -EINVAL;
|
||||
|
||||
ret = vfio_dma_do_unmap(iommu, &unmap);
|
||||
if (unmap.flags & VFIO_DMA_UNMAP_FLAG_GET_DIRTY_BITMAP) {
|
||||
unsigned long pgshift;
|
||||
|
||||
if (unmap.argsz < (minsz + sizeof(bitmap)))
|
||||
return -EINVAL;
|
||||
|
||||
if (copy_from_user(&bitmap,
|
||||
(void __user *)(arg + minsz),
|
||||
sizeof(bitmap)))
|
||||
return -EFAULT;
|
||||
|
||||
if (!access_ok((void __user *)bitmap.data, bitmap.size))
|
||||
return -EINVAL;
|
||||
|
||||
pgshift = __ffs(bitmap.pgsize);
|
||||
ret = verify_bitmap_size(unmap.size >> pgshift,
|
||||
bitmap.size);
|
||||
if (ret)
|
||||
return ret;
|
||||
}
|
||||
|
||||
ret = vfio_dma_do_unmap(iommu, &unmap, &bitmap);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
return copy_to_user((void __user *)arg, &unmap, minsz) ?
|
||||
-EFAULT : 0;
|
||||
} else if (cmd == VFIO_IOMMU_DIRTY_PAGES) {
|
||||
struct vfio_iommu_type1_dirty_bitmap dirty;
|
||||
uint32_t mask = VFIO_IOMMU_DIRTY_PAGES_FLAG_START |
|
||||
VFIO_IOMMU_DIRTY_PAGES_FLAG_STOP |
|
||||
VFIO_IOMMU_DIRTY_PAGES_FLAG_GET_BITMAP;
|
||||
int ret = 0;
|
||||
|
||||
if (!iommu->v2)
|
||||
return -EACCES;
|
||||
|
||||
minsz = offsetofend(struct vfio_iommu_type1_dirty_bitmap,
|
||||
flags);
|
||||
|
||||
if (copy_from_user(&dirty, (void __user *)arg, minsz))
|
||||
return -EFAULT;
|
||||
|
||||
if (dirty.argsz < minsz || dirty.flags & ~mask)
|
||||
return -EINVAL;
|
||||
|
||||
/* only one flag should be set at a time */
|
||||
if (__ffs(dirty.flags) != __fls(dirty.flags))
|
||||
return -EINVAL;
|
||||
|
||||
if (dirty.flags & VFIO_IOMMU_DIRTY_PAGES_FLAG_START) {
|
||||
size_t pgsize;
|
||||
|
||||
mutex_lock(&iommu->lock);
|
||||
pgsize = 1 << __ffs(iommu->pgsize_bitmap);
|
||||
if (!iommu->dirty_page_tracking) {
|
||||
ret = vfio_dma_bitmap_alloc_all(iommu, pgsize);
|
||||
if (!ret)
|
||||
iommu->dirty_page_tracking = true;
|
||||
}
|
||||
mutex_unlock(&iommu->lock);
|
||||
return ret;
|
||||
} else if (dirty.flags & VFIO_IOMMU_DIRTY_PAGES_FLAG_STOP) {
|
||||
mutex_lock(&iommu->lock);
|
||||
if (iommu->dirty_page_tracking) {
|
||||
iommu->dirty_page_tracking = false;
|
||||
vfio_dma_bitmap_free_all(iommu);
|
||||
}
|
||||
mutex_unlock(&iommu->lock);
|
||||
return 0;
|
||||
} else if (dirty.flags &
|
||||
VFIO_IOMMU_DIRTY_PAGES_FLAG_GET_BITMAP) {
|
||||
struct vfio_iommu_type1_dirty_bitmap_get range;
|
||||
unsigned long pgshift;
|
||||
size_t data_size = dirty.argsz - minsz;
|
||||
size_t iommu_pgsize;
|
||||
|
||||
if (!data_size || data_size < sizeof(range))
|
||||
return -EINVAL;
|
||||
|
||||
if (copy_from_user(&range, (void __user *)(arg + minsz),
|
||||
sizeof(range)))
|
||||
return -EFAULT;
|
||||
|
||||
if (range.iova + range.size < range.iova)
|
||||
return -EINVAL;
|
||||
if (!access_ok((void __user *)range.bitmap.data,
|
||||
range.bitmap.size))
|
||||
return -EINVAL;
|
||||
|
||||
pgshift = __ffs(range.bitmap.pgsize);
|
||||
ret = verify_bitmap_size(range.size >> pgshift,
|
||||
range.bitmap.size);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
mutex_lock(&iommu->lock);
|
||||
|
||||
iommu_pgsize = (size_t)1 << __ffs(iommu->pgsize_bitmap);
|
||||
|
||||
/* allow only smallest supported pgsize */
|
||||
if (range.bitmap.pgsize != iommu_pgsize) {
|
||||
ret = -EINVAL;
|
||||
goto out_unlock;
|
||||
}
|
||||
if (range.iova & (iommu_pgsize - 1)) {
|
||||
ret = -EINVAL;
|
||||
goto out_unlock;
|
||||
}
|
||||
if (!range.size || range.size & (iommu_pgsize - 1)) {
|
||||
ret = -EINVAL;
|
||||
goto out_unlock;
|
||||
}
|
||||
|
||||
if (iommu->dirty_page_tracking)
|
||||
ret = vfio_iova_dirty_bitmap(range.bitmap.data,
|
||||
iommu, range.iova, range.size,
|
||||
range.bitmap.pgsize);
|
||||
else
|
||||
ret = -EINVAL;
|
||||
out_unlock:
|
||||
mutex_unlock(&iommu->lock);
|
||||
|
||||
return ret;
|
||||
}
|
||||
}
|
||||
|
||||
return -ENOTTY;
|
||||
@ -2344,10 +2828,19 @@ static int vfio_iommu_type1_dma_rw_chunk(struct vfio_iommu *iommu,
|
||||
|
||||
vaddr = dma->vaddr + offset;
|
||||
|
||||
if (write)
|
||||
if (write) {
|
||||
*copied = copy_to_user((void __user *)vaddr, data,
|
||||
count) ? 0 : count;
|
||||
else
|
||||
if (*copied && iommu->dirty_page_tracking) {
|
||||
unsigned long pgshift = __ffs(iommu->pgsize_bitmap);
|
||||
/*
|
||||
* Bitmap populated with the smallest supported page
|
||||
* size
|
||||
*/
|
||||
bitmap_set(dma->bitmap, offset >> pgshift,
|
||||
*copied >> pgshift);
|
||||
}
|
||||
} else
|
||||
*copied = copy_from_user(data, (void __user *)vaddr,
|
||||
count) ? 0 : count;
|
||||
if (kthread)
|
||||
|
@ -76,7 +76,9 @@ struct vfio_iommu_driver_ops {
|
||||
struct iommu_group *group);
|
||||
void (*detach_group)(void *iommu_data,
|
||||
struct iommu_group *group);
|
||||
int (*pin_pages)(void *iommu_data, unsigned long *user_pfn,
|
||||
int (*pin_pages)(void *iommu_data,
|
||||
struct iommu_group *group,
|
||||
unsigned long *user_pfn,
|
||||
int npage, int prot,
|
||||
unsigned long *phys_pfn);
|
||||
int (*unpin_pages)(void *iommu_data,
|
||||
|
@ -305,6 +305,7 @@ struct vfio_region_info_cap_type {
|
||||
#define VFIO_REGION_TYPE_PCI_VENDOR_MASK (0xffff)
|
||||
#define VFIO_REGION_TYPE_GFX (1)
|
||||
#define VFIO_REGION_TYPE_CCW (2)
|
||||
#define VFIO_REGION_TYPE_MIGRATION (3)
|
||||
|
||||
/* sub-types for VFIO_REGION_TYPE_PCI_* */
|
||||
|
||||
@ -379,6 +380,233 @@ struct vfio_region_gfx_edid {
|
||||
/* sub-types for VFIO_REGION_TYPE_CCW */
|
||||
#define VFIO_REGION_SUBTYPE_CCW_ASYNC_CMD (1)
|
||||
|
||||
/* sub-types for VFIO_REGION_TYPE_MIGRATION */
|
||||
#define VFIO_REGION_SUBTYPE_MIGRATION (1)
|
||||
|
||||
/*
|
||||
* The structure vfio_device_migration_info is placed at the 0th offset of
|
||||
* the VFIO_REGION_SUBTYPE_MIGRATION region to get and set VFIO device related
|
||||
* migration information. Field accesses from this structure are only supported
|
||||
* at their native width and alignment. Otherwise, the result is undefined and
|
||||
* vendor drivers should return an error.
|
||||
*
|
||||
* device_state: (read/write)
|
||||
* - The user application writes to this field to inform the vendor driver
|
||||
* about the device state to be transitioned to.
|
||||
* - The vendor driver should take the necessary actions to change the
|
||||
* device state. After successful transition to a given state, the
|
||||
* vendor driver should return success on write(device_state, state)
|
||||
* system call. If the device state transition fails, the vendor driver
|
||||
* should return an appropriate -errno for the fault condition.
|
||||
* - On the user application side, if the device state transition fails,
|
||||
* that is, if write(device_state, state) returns an error, read
|
||||
* device_state again to determine the current state of the device from
|
||||
* the vendor driver.
|
||||
* - The vendor driver should return previous state of the device unless
|
||||
* the vendor driver has encountered an internal error, in which case
|
||||
* the vendor driver may report the device_state VFIO_DEVICE_STATE_ERROR.
|
||||
* - The user application must use the device reset ioctl to recover the
|
||||
* device from VFIO_DEVICE_STATE_ERROR state. If the device is
|
||||
* indicated to be in a valid device state by reading device_state, the
|
||||
* user application may attempt to transition the device to any valid
|
||||
* state reachable from the current state or terminate itself.
|
||||
*
|
||||
* device_state consists of 3 bits:
|
||||
* - If bit 0 is set, it indicates the _RUNNING state. If bit 0 is clear,
|
||||
* it indicates the _STOP state. When the device state is changed to
|
||||
* _STOP, driver should stop the device before write() returns.
|
||||
* - If bit 1 is set, it indicates the _SAVING state, which means that the
|
||||
* driver should start gathering device state information that will be
|
||||
* provided to the VFIO user application to save the device's state.
|
||||
* - If bit 2 is set, it indicates the _RESUMING state, which means that
|
||||
* the driver should prepare to resume the device. Data provided through
|
||||
* the migration region should be used to resume the device.
|
||||
* Bits 3 - 31 are reserved for future use. To preserve them, the user
|
||||
* application should perform a read-modify-write operation on this
|
||||
* field when modifying the specified bits.
|
||||
*
|
||||
* +------- _RESUMING
|
||||
* |+------ _SAVING
|
||||
* ||+----- _RUNNING
|
||||
* |||
|
||||
* 000b => Device Stopped, not saving or resuming
|
||||
* 001b => Device running, which is the default state
|
||||
* 010b => Stop the device & save the device state, stop-and-copy state
|
||||
* 011b => Device running and save the device state, pre-copy state
|
||||
* 100b => Device stopped and the device state is resuming
|
||||
* 101b => Invalid state
|
||||
* 110b => Error state
|
||||
* 111b => Invalid state
|
||||
*
|
||||
* State transitions:
|
||||
*
|
||||
* _RESUMING _RUNNING Pre-copy Stop-and-copy _STOP
|
||||
* (100b) (001b) (011b) (010b) (000b)
|
||||
* 0. Running or default state
|
||||
* |
|
||||
*
|
||||
* 1. Normal Shutdown (optional)
|
||||
* |------------------------------------->|
|
||||
*
|
||||
* 2. Save the state or suspend
|
||||
* |------------------------->|---------->|
|
||||
*
|
||||
* 3. Save the state during live migration
|
||||
* |----------->|------------>|---------->|
|
||||
*
|
||||
* 4. Resuming
|
||||
* |<---------|
|
||||
*
|
||||
* 5. Resumed
|
||||
* |--------->|
|
||||
*
|
||||
* 0. Default state of VFIO device is _RUNNNG when the user application starts.
|
||||
* 1. During normal shutdown of the user application, the user application may
|
||||
* optionally change the VFIO device state from _RUNNING to _STOP. This
|
||||
* transition is optional. The vendor driver must support this transition but
|
||||
* must not require it.
|
||||
* 2. When the user application saves state or suspends the application, the
|
||||
* device state transitions from _RUNNING to stop-and-copy and then to _STOP.
|
||||
* On state transition from _RUNNING to stop-and-copy, driver must stop the
|
||||
* device, save the device state and send it to the application through the
|
||||
* migration region. The sequence to be followed for such transition is given
|
||||
* below.
|
||||
* 3. In live migration of user application, the state transitions from _RUNNING
|
||||
* to pre-copy, to stop-and-copy, and to _STOP.
|
||||
* On state transition from _RUNNING to pre-copy, the driver should start
|
||||
* gathering the device state while the application is still running and send
|
||||
* the device state data to application through the migration region.
|
||||
* On state transition from pre-copy to stop-and-copy, the driver must stop
|
||||
* the device, save the device state and send it to the user application
|
||||
* through the migration region.
|
||||
* Vendor drivers must support the pre-copy state even for implementations
|
||||
* where no data is provided to the user before the stop-and-copy state. The
|
||||
* user must not be required to consume all migration data before the device
|
||||
* transitions to a new state, including the stop-and-copy state.
|
||||
* The sequence to be followed for above two transitions is given below.
|
||||
* 4. To start the resuming phase, the device state should be transitioned from
|
||||
* the _RUNNING to the _RESUMING state.
|
||||
* In the _RESUMING state, the driver should use the device state data
|
||||
* received through the migration region to resume the device.
|
||||
* 5. After providing saved device data to the driver, the application should
|
||||
* change the state from _RESUMING to _RUNNING.
|
||||
*
|
||||
* reserved:
|
||||
* Reads on this field return zero and writes are ignored.
|
||||
*
|
||||
* pending_bytes: (read only)
|
||||
* The number of pending bytes still to be migrated from the vendor driver.
|
||||
*
|
||||
* data_offset: (read only)
|
||||
* The user application should read data_offset field from the migration
|
||||
* region. The user application should read the device data from this
|
||||
* offset within the migration region during the _SAVING state or write
|
||||
* the device data during the _RESUMING state. See below for details of
|
||||
* sequence to be followed.
|
||||
*
|
||||
* data_size: (read/write)
|
||||
* The user application should read data_size to get the size in bytes of
|
||||
* the data copied in the migration region during the _SAVING state and
|
||||
* write the size in bytes of the data copied in the migration region
|
||||
* during the _RESUMING state.
|
||||
*
|
||||
* The format of the migration region is as follows:
|
||||
* ------------------------------------------------------------------
|
||||
* |vfio_device_migration_info| data section |
|
||||
* | | /////////////////////////////// |
|
||||
* ------------------------------------------------------------------
|
||||
* ^ ^
|
||||
* offset 0-trapped part data_offset
|
||||
*
|
||||
* The structure vfio_device_migration_info is always followed by the data
|
||||
* section in the region, so data_offset will always be nonzero. The offset
|
||||
* from where the data is copied is decided by the kernel driver. The data
|
||||
* section can be trapped, mmapped, or partitioned, depending on how the kernel
|
||||
* driver defines the data section. The data section partition can be defined
|
||||
* as mapped by the sparse mmap capability. If mmapped, data_offset must be
|
||||
* page aligned, whereas initial section which contains the
|
||||
* vfio_device_migration_info structure, might not end at the offset, which is
|
||||
* page aligned. The user is not required to access through mmap regardless
|
||||
* of the capabilities of the region mmap.
|
||||
* The vendor driver should determine whether and how to partition the data
|
||||
* section. The vendor driver should return data_offset accordingly.
|
||||
*
|
||||
* The sequence to be followed while in pre-copy state and stop-and-copy state
|
||||
* is as follows:
|
||||
* a. Read pending_bytes, indicating the start of a new iteration to get device
|
||||
* data. Repeated read on pending_bytes at this stage should have no side
|
||||
* effects.
|
||||
* If pending_bytes == 0, the user application should not iterate to get data
|
||||
* for that device.
|
||||
* If pending_bytes > 0, perform the following steps.
|
||||
* b. Read data_offset, indicating that the vendor driver should make data
|
||||
* available through the data section. The vendor driver should return this
|
||||
* read operation only after data is available from (region + data_offset)
|
||||
* to (region + data_offset + data_size).
|
||||
* c. Read data_size, which is the amount of data in bytes available through
|
||||
* the migration region.
|
||||
* Read on data_offset and data_size should return the offset and size of
|
||||
* the current buffer if the user application reads data_offset and
|
||||
* data_size more than once here.
|
||||
* d. Read data_size bytes of data from (region + data_offset) from the
|
||||
* migration region.
|
||||
* e. Process the data.
|
||||
* f. Read pending_bytes, which indicates that the data from the previous
|
||||
* iteration has been read. If pending_bytes > 0, go to step b.
|
||||
*
|
||||
* The user application can transition from the _SAVING|_RUNNING
|
||||
* (pre-copy state) to the _SAVING (stop-and-copy) state regardless of the
|
||||
* number of pending bytes. The user application should iterate in _SAVING
|
||||
* (stop-and-copy) until pending_bytes is 0.
|
||||
*
|
||||
* The sequence to be followed while _RESUMING device state is as follows:
|
||||
* While data for this device is available, repeat the following steps:
|
||||
* a. Read data_offset from where the user application should write data.
|
||||
* b. Write migration data starting at the migration region + data_offset for
|
||||
* the length determined by data_size from the migration source.
|
||||
* c. Write data_size, which indicates to the vendor driver that data is
|
||||
* written in the migration region. Vendor driver must return this write
|
||||
* operations on consuming data. Vendor driver should apply the
|
||||
* user-provided migration region data to the device resume state.
|
||||
*
|
||||
* If an error occurs during the above sequences, the vendor driver can return
|
||||
* an error code for next read() or write() operation, which will terminate the
|
||||
* loop. The user application should then take the next necessary action, for
|
||||
* example, failing migration or terminating the user application.
|
||||
*
|
||||
* For the user application, data is opaque. The user application should write
|
||||
* data in the same order as the data is received and the data should be of
|
||||
* same transaction size at the source.
|
||||
*/
|
||||
|
||||
struct vfio_device_migration_info {
|
||||
__u32 device_state; /* VFIO device state */
|
||||
#define VFIO_DEVICE_STATE_STOP (0)
|
||||
#define VFIO_DEVICE_STATE_RUNNING (1 << 0)
|
||||
#define VFIO_DEVICE_STATE_SAVING (1 << 1)
|
||||
#define VFIO_DEVICE_STATE_RESUMING (1 << 2)
|
||||
#define VFIO_DEVICE_STATE_MASK (VFIO_DEVICE_STATE_RUNNING | \
|
||||
VFIO_DEVICE_STATE_SAVING | \
|
||||
VFIO_DEVICE_STATE_RESUMING)
|
||||
|
||||
#define VFIO_DEVICE_STATE_VALID(state) \
|
||||
(state & VFIO_DEVICE_STATE_RESUMING ? \
|
||||
(state & VFIO_DEVICE_STATE_MASK) == VFIO_DEVICE_STATE_RESUMING : 1)
|
||||
|
||||
#define VFIO_DEVICE_STATE_IS_ERROR(state) \
|
||||
((state & VFIO_DEVICE_STATE_MASK) == (VFIO_DEVICE_STATE_SAVING | \
|
||||
VFIO_DEVICE_STATE_RESUMING))
|
||||
|
||||
#define VFIO_DEVICE_STATE_SET_ERROR(state) \
|
||||
((state & ~VFIO_DEVICE_STATE_MASK) | VFIO_DEVICE_SATE_SAVING | \
|
||||
VFIO_DEVICE_STATE_RESUMING)
|
||||
|
||||
__u32 reserved;
|
||||
__u64 pending_bytes;
|
||||
__u64 data_offset;
|
||||
__u64 data_size;
|
||||
};
|
||||
|
||||
/*
|
||||
* The MSIX mappable capability informs that MSIX data of a BAR can be mmapped
|
||||
* which allows direct access to non-MSIX registers which happened to be within
|
||||
@ -785,6 +1013,29 @@ struct vfio_iommu_type1_info_cap_iova_range {
|
||||
struct vfio_iova_range iova_ranges[];
|
||||
};
|
||||
|
||||
/*
|
||||
* The migration capability allows to report supported features for migration.
|
||||
*
|
||||
* The structures below define version 1 of this capability.
|
||||
*
|
||||
* The existence of this capability indicates that IOMMU kernel driver supports
|
||||
* dirty page logging.
|
||||
*
|
||||
* pgsize_bitmap: Kernel driver returns bitmap of supported page sizes for dirty
|
||||
* page logging.
|
||||
* max_dirty_bitmap_size: Kernel driver returns maximum supported dirty bitmap
|
||||
* size in bytes that can be used by user applications when getting the dirty
|
||||
* bitmap.
|
||||
*/
|
||||
#define VFIO_IOMMU_TYPE1_INFO_CAP_MIGRATION 1
|
||||
|
||||
struct vfio_iommu_type1_info_cap_migration {
|
||||
struct vfio_info_cap_header header;
|
||||
__u32 flags;
|
||||
__u64 pgsize_bitmap;
|
||||
__u64 max_dirty_bitmap_size; /* in bytes */
|
||||
};
|
||||
|
||||
#define VFIO_IOMMU_GET_INFO _IO(VFIO_TYPE, VFIO_BASE + 12)
|
||||
|
||||
/**
|
||||
@ -805,6 +1056,12 @@ struct vfio_iommu_type1_dma_map {
|
||||
|
||||
#define VFIO_IOMMU_MAP_DMA _IO(VFIO_TYPE, VFIO_BASE + 13)
|
||||
|
||||
struct vfio_bitmap {
|
||||
__u64 pgsize; /* page size for bitmap in bytes */
|
||||
__u64 size; /* in bytes */
|
||||
__u64 __user *data; /* one bit per page */
|
||||
};
|
||||
|
||||
/**
|
||||
* VFIO_IOMMU_UNMAP_DMA - _IOWR(VFIO_TYPE, VFIO_BASE + 14,
|
||||
* struct vfio_dma_unmap)
|
||||
@ -814,12 +1071,23 @@ struct vfio_iommu_type1_dma_map {
|
||||
* field. No guarantee is made to the user that arbitrary unmaps of iova
|
||||
* or size different from those used in the original mapping call will
|
||||
* succeed.
|
||||
* VFIO_DMA_UNMAP_FLAG_GET_DIRTY_BITMAP should be set to get the dirty bitmap
|
||||
* before unmapping IO virtual addresses. When this flag is set, the user must
|
||||
* provide a struct vfio_bitmap in data[]. User must provide zero-allocated
|
||||
* memory via vfio_bitmap.data and its size in the vfio_bitmap.size field.
|
||||
* A bit in the bitmap represents one page, of user provided page size in
|
||||
* vfio_bitmap.pgsize field, consecutively starting from iova offset. Bit set
|
||||
* indicates that the page at that offset from iova is dirty. A Bitmap of the
|
||||
* pages in the range of unmapped size is returned in the user-provided
|
||||
* vfio_bitmap.data.
|
||||
*/
|
||||
struct vfio_iommu_type1_dma_unmap {
|
||||
__u32 argsz;
|
||||
__u32 flags;
|
||||
#define VFIO_DMA_UNMAP_FLAG_GET_DIRTY_BITMAP (1 << 0)
|
||||
__u64 iova; /* IO virtual address */
|
||||
__u64 size; /* Size of mapping (bytes) */
|
||||
__u8 data[];
|
||||
};
|
||||
|
||||
#define VFIO_IOMMU_UNMAP_DMA _IO(VFIO_TYPE, VFIO_BASE + 14)
|
||||
@ -831,6 +1099,57 @@ struct vfio_iommu_type1_dma_unmap {
|
||||
#define VFIO_IOMMU_ENABLE _IO(VFIO_TYPE, VFIO_BASE + 15)
|
||||
#define VFIO_IOMMU_DISABLE _IO(VFIO_TYPE, VFIO_BASE + 16)
|
||||
|
||||
/**
|
||||
* VFIO_IOMMU_DIRTY_PAGES - _IOWR(VFIO_TYPE, VFIO_BASE + 17,
|
||||
* struct vfio_iommu_type1_dirty_bitmap)
|
||||
* IOCTL is used for dirty pages logging.
|
||||
* Caller should set flag depending on which operation to perform, details as
|
||||
* below:
|
||||
*
|
||||
* Calling the IOCTL with VFIO_IOMMU_DIRTY_PAGES_FLAG_START flag set, instructs
|
||||
* the IOMMU driver to log pages that are dirtied or potentially dirtied by
|
||||
* the device; designed to be used when a migration is in progress. Dirty pages
|
||||
* are logged until logging is disabled by user application by calling the IOCTL
|
||||
* with VFIO_IOMMU_DIRTY_PAGES_FLAG_STOP flag.
|
||||
*
|
||||
* Calling the IOCTL with VFIO_IOMMU_DIRTY_PAGES_FLAG_STOP flag set, instructs
|
||||
* the IOMMU driver to stop logging dirtied pages.
|
||||
*
|
||||
* Calling the IOCTL with VFIO_IOMMU_DIRTY_PAGES_FLAG_GET_BITMAP flag set
|
||||
* returns the dirty pages bitmap for IOMMU container for a given IOVA range.
|
||||
* The user must specify the IOVA range and the pgsize through the structure
|
||||
* vfio_iommu_type1_dirty_bitmap_get in the data[] portion. This interface
|
||||
* supports getting a bitmap of the smallest supported pgsize only and can be
|
||||
* modified in future to get a bitmap of any specified supported pgsize. The
|
||||
* user must provide a zeroed memory area for the bitmap memory and specify its
|
||||
* size in bitmap.size. One bit is used to represent one page consecutively
|
||||
* starting from iova offset. The user should provide page size in bitmap.pgsize
|
||||
* field. A bit set in the bitmap indicates that the page at that offset from
|
||||
* iova is dirty. The caller must set argsz to a value including the size of
|
||||
* structure vfio_iommu_type1_dirty_bitmap_get, but excluding the size of the
|
||||
* actual bitmap. If dirty pages logging is not enabled, an error will be
|
||||
* returned.
|
||||
*
|
||||
* Only one of the flags _START, _STOP and _GET may be specified at a time.
|
||||
*
|
||||
*/
|
||||
struct vfio_iommu_type1_dirty_bitmap {
|
||||
__u32 argsz;
|
||||
__u32 flags;
|
||||
#define VFIO_IOMMU_DIRTY_PAGES_FLAG_START (1 << 0)
|
||||
#define VFIO_IOMMU_DIRTY_PAGES_FLAG_STOP (1 << 1)
|
||||
#define VFIO_IOMMU_DIRTY_PAGES_FLAG_GET_BITMAP (1 << 2)
|
||||
__u8 data[];
|
||||
};
|
||||
|
||||
struct vfio_iommu_type1_dirty_bitmap_get {
|
||||
__u64 iova; /* IO virtual address */
|
||||
__u64 size; /* Size of iova range */
|
||||
struct vfio_bitmap bitmap;
|
||||
};
|
||||
|
||||
#define VFIO_IOMMU_DIRTY_PAGES _IO(VFIO_TYPE, VFIO_BASE + 17)
|
||||
|
||||
/* -------- Additional API for SPAPR TCE (Server POWERPC) IOMMU -------- */
|
||||
|
||||
/*
|
||||
|
Loading…
Reference in New Issue
Block a user