xen/gntdev: add ioctl for grant copy

Add IOCTL_GNTDEV_GRANT_COPY to allow applications to copy between user
space buffers and grant references.

This interface is similar to the GNTTABOP_copy hypercall ABI except
the local buffers are provided using a virtual address (instead of a
GFN and offset).  To avoid userspace from having to page align its
buffers the driver will use two or more ops if required.

If the ioctl returns 0, the application must check the status of each
segment with the segments status field.  If the ioctl returns a -ve
error code (EINVAL or EFAULT), the status of individual ops is
undefined.

Signed-off-by: David Vrabel <david.vrabel@citrix.com>
Reviewed-by: Boris Ostrovsky <boris.ostrovsky@oracle.com>
This commit is contained in:
David Vrabel 2014-12-02 16:13:26 +00:00
parent 6a1f513776
commit a4cdb556ca
2 changed files with 253 additions and 0 deletions

View File

@ -748,6 +748,206 @@ static long gntdev_ioctl_notify(struct gntdev_priv *priv, void __user *u)
return rc;
}
#define GNTDEV_COPY_BATCH 24
struct gntdev_copy_batch {
struct gnttab_copy ops[GNTDEV_COPY_BATCH];
struct page *pages[GNTDEV_COPY_BATCH];
s16 __user *status[GNTDEV_COPY_BATCH];
unsigned int nr_ops;
unsigned int nr_pages;
};
static int gntdev_get_page(struct gntdev_copy_batch *batch, void __user *virt,
bool writeable, unsigned long *gfn)
{
unsigned long addr = (unsigned long)virt;
struct page *page;
unsigned long xen_pfn;
int ret;
ret = get_user_pages_fast(addr, 1, writeable, &page);
if (ret < 0)
return ret;
batch->pages[batch->nr_pages++] = page;
xen_pfn = page_to_xen_pfn(page) + XEN_PFN_DOWN(addr & ~PAGE_MASK);
*gfn = pfn_to_gfn(xen_pfn);
return 0;
}
static void gntdev_put_pages(struct gntdev_copy_batch *batch)
{
unsigned int i;
for (i = 0; i < batch->nr_pages; i++)
put_page(batch->pages[i]);
batch->nr_pages = 0;
}
static int gntdev_copy(struct gntdev_copy_batch *batch)
{
unsigned int i;
gnttab_batch_copy(batch->ops, batch->nr_ops);
gntdev_put_pages(batch);
/*
* For each completed op, update the status if the op failed
* and all previous ops for the segment were successful.
*/
for (i = 0; i < batch->nr_ops; i++) {
s16 status = batch->ops[i].status;
s16 old_status;
if (status == GNTST_okay)
continue;
if (__get_user(old_status, batch->status[i]))
return -EFAULT;
if (old_status != GNTST_okay)
continue;
if (__put_user(status, batch->status[i]))
return -EFAULT;
}
batch->nr_ops = 0;
return 0;
}
static int gntdev_grant_copy_seg(struct gntdev_copy_batch *batch,
struct gntdev_grant_copy_segment *seg,
s16 __user *status)
{
uint16_t copied = 0;
/*
* Disallow local -> local copies since there is only space in
* batch->pages for one page per-op and this would be a very
* expensive memcpy().
*/
if (!(seg->flags & (GNTCOPY_source_gref | GNTCOPY_dest_gref)))
return -EINVAL;
/* Can't cross page if source/dest is a grant ref. */
if (seg->flags & GNTCOPY_source_gref) {
if (seg->source.foreign.offset + seg->len > XEN_PAGE_SIZE)
return -EINVAL;
}
if (seg->flags & GNTCOPY_dest_gref) {
if (seg->dest.foreign.offset + seg->len > XEN_PAGE_SIZE)
return -EINVAL;
}
if (put_user(GNTST_okay, status))
return -EFAULT;
while (copied < seg->len) {
struct gnttab_copy *op;
void __user *virt;
size_t len, off;
unsigned long gfn;
int ret;
if (batch->nr_ops >= GNTDEV_COPY_BATCH) {
ret = gntdev_copy(batch);
if (ret < 0)
return ret;
}
len = seg->len - copied;
op = &batch->ops[batch->nr_ops];
op->flags = 0;
if (seg->flags & GNTCOPY_source_gref) {
op->source.u.ref = seg->source.foreign.ref;
op->source.domid = seg->source.foreign.domid;
op->source.offset = seg->source.foreign.offset + copied;
op->flags |= GNTCOPY_source_gref;
} else {
virt = seg->source.virt + copied;
off = (unsigned long)virt & ~XEN_PAGE_MASK;
len = min(len, (size_t)XEN_PAGE_SIZE - off);
ret = gntdev_get_page(batch, virt, false, &gfn);
if (ret < 0)
return ret;
op->source.u.gmfn = gfn;
op->source.domid = DOMID_SELF;
op->source.offset = off;
}
if (seg->flags & GNTCOPY_dest_gref) {
op->dest.u.ref = seg->dest.foreign.ref;
op->dest.domid = seg->dest.foreign.domid;
op->dest.offset = seg->dest.foreign.offset + copied;
op->flags |= GNTCOPY_dest_gref;
} else {
virt = seg->dest.virt + copied;
off = (unsigned long)virt & ~XEN_PAGE_MASK;
len = min(len, (size_t)XEN_PAGE_SIZE - off);
ret = gntdev_get_page(batch, virt, true, &gfn);
if (ret < 0)
return ret;
op->dest.u.gmfn = gfn;
op->dest.domid = DOMID_SELF;
op->dest.offset = off;
}
op->len = len;
copied += len;
batch->status[batch->nr_ops] = status;
batch->nr_ops++;
}
return 0;
}
static long gntdev_ioctl_grant_copy(struct gntdev_priv *priv, void __user *u)
{
struct ioctl_gntdev_grant_copy copy;
struct gntdev_copy_batch batch;
unsigned int i;
int ret = 0;
if (copy_from_user(&copy, u, sizeof(copy)))
return -EFAULT;
batch.nr_ops = 0;
batch.nr_pages = 0;
for (i = 0; i < copy.count; i++) {
struct gntdev_grant_copy_segment seg;
if (copy_from_user(&seg, &copy.segments[i], sizeof(seg))) {
ret = -EFAULT;
goto out;
}
ret = gntdev_grant_copy_seg(&batch, &seg, &copy.segments[i].status);
if (ret < 0)
goto out;
cond_resched();
}
if (batch.nr_ops)
ret = gntdev_copy(&batch);
return ret;
out:
gntdev_put_pages(&batch);
return ret;
}
static long gntdev_ioctl(struct file *flip,
unsigned int cmd, unsigned long arg)
{
@ -767,6 +967,9 @@ static long gntdev_ioctl(struct file *flip,
case IOCTL_GNTDEV_SET_UNMAP_NOTIFY:
return gntdev_ioctl_notify(priv, ptr);
case IOCTL_GNTDEV_GRANT_COPY:
return gntdev_ioctl_grant_copy(priv, ptr);
default:
pr_debug("priv %p, unknown cmd %x\n", priv, cmd);
return -ENOIOCTLCMD;

View File

@ -144,6 +144,56 @@ struct ioctl_gntdev_unmap_notify {
__u32 event_channel_port;
};
struct gntdev_grant_copy_segment {
union {
void __user *virt;
struct {
grant_ref_t ref;
__u16 offset;
domid_t domid;
} foreign;
} source, dest;
__u16 len;
__u16 flags; /* GNTCOPY_* */
__s16 status; /* GNTST_* */
};
/*
* Copy between grant references and local buffers.
*
* The copy is split into @count @segments, each of which can copy
* to/from one grant reference.
*
* Each segment is similar to struct gnttab_copy in the hypervisor ABI
* except the local buffer is specified using a virtual address
* (instead of a GFN and offset).
*
* The local buffer may cross a Xen page boundary -- the driver will
* split segments into multiple ops if required.
*
* Returns 0 if all segments have been processed and @status in each
* segment is valid. Note that one or more segments may have failed
* (status != GNTST_okay).
*
* If the driver had to split a segment into two or more ops, @status
* includes the status of the first failed op for that segment (or
* GNTST_okay if all ops were successful).
*
* If -1 is returned, the status of all segments is undefined.
*
* EINVAL: A segment has local buffers for both source and
* destination.
* EINVAL: A segment crosses the boundary of a foreign page.
* EFAULT: A segment's local buffer is not accessible.
*/
#define IOCTL_GNTDEV_GRANT_COPY \
_IOC(_IOC_NONE, 'G', 8, sizeof(struct ioctl_gntdev_grant_copy))
struct ioctl_gntdev_grant_copy {
unsigned int count;
struct gntdev_grant_copy_segment __user *segments;
};
/* Clear (set to zero) the byte specified by index */
#define UNMAP_NOTIFY_CLEAR_BYTE 0x1
/* Send an interrupt on the indicated event channel */