xen: branch for v5.5-rc1

-----BEGIN PGP SIGNATURE-----
 
 iHUEABYIAB0WIQRTLbB6QfY48x44uB6AXGG7T9hjvgUCXeqGZgAKCRCAXGG7T9hj
 vqALAP91rVmsKE1DwjAvu/mzd7eHskIuAhB/1bxNP6doeJQ6jQD/Vbv06V44fMXG
 JANwgnQIpzJA+n+ek4Up4aGktY8TWAs=
 =NYv9
 -----END PGP SIGNATURE-----

Merge tag 'for-linus-5.5b-rc1-tag' of git://git.kernel.org/pub/scm/linux/kernel/git/xen/tip

Pull more xen updates from Juergen Gross:

 - a patch to fix a build warning

 - a cleanup of no longer needed code in the Xen event handling

 - a small series for the Xen grant driver avoiding high order
   allocations and replacing an insane global limit by a per-call one

 - a small series fixing Xen frontend/backend module referencing

* tag 'for-linus-5.5b-rc1-tag' of git://git.kernel.org/pub/scm/linux/kernel/git/xen/tip:
  xen-blkback: allow module to be cleanly unloaded
  xen/xenbus: reference count registered modules
  xen/gntdev: switch from kcalloc() to kvcalloc()
  xen/gntdev: replace global limit of mapped pages by limit per call
  xen/gntdev: remove redundant non-zero check on ret
  xen/events: remove event handling recursion detection
This commit is contained in:
Linus Torvalds 2019-12-07 14:49:20 -08:00
commit f74fd13f45
8 changed files with 66 additions and 62 deletions

View File

@ -1506,5 +1506,13 @@ static int __init xen_blkif_init(void)
module_init(xen_blkif_init);
static void __exit xen_blkif_fini(void)
{
xen_blkif_xenbus_fini();
xen_blkif_interface_fini();
}
module_exit(xen_blkif_fini);
MODULE_LICENSE("Dual BSD/GPL");
MODULE_ALIAS("xen-backend:vbd");

View File

@ -375,9 +375,12 @@ struct phys_req {
struct block_device *bdev;
blkif_sector_t sector_number;
};
int xen_blkif_interface_init(void);
void xen_blkif_interface_fini(void);
int xen_blkif_xenbus_init(void);
void xen_blkif_xenbus_fini(void);
irqreturn_t xen_blkif_be_int(int irq, void *dev_id);
int xen_blkif_schedule(void *arg);

View File

@ -333,6 +333,12 @@ int __init xen_blkif_interface_init(void)
return 0;
}
void xen_blkif_interface_fini(void)
{
kmem_cache_destroy(xen_blkif_cachep);
xen_blkif_cachep = NULL;
}
/*
* sysfs interface for VBD I/O requests
*/
@ -1122,3 +1128,8 @@ int xen_blkif_xenbus_init(void)
{
return xenbus_register_backend(&xen_blkbk_driver);
}
void xen_blkif_xenbus_fini(void)
{
xenbus_unregister_driver(&xen_blkbk_driver);
}

View File

@ -1213,31 +1213,21 @@ void xen_send_IPI_one(unsigned int cpu, enum ipi_vector vector)
notify_remote_via_irq(irq);
}
static DEFINE_PER_CPU(unsigned, xed_nesting_count);
static void __xen_evtchn_do_upcall(void)
{
struct vcpu_info *vcpu_info = __this_cpu_read(xen_vcpu);
int cpu = get_cpu();
unsigned count;
int cpu = smp_processor_id();
do {
vcpu_info->evtchn_upcall_pending = 0;
if (__this_cpu_inc_return(xed_nesting_count) - 1)
goto out;
xen_evtchn_handle_events(cpu);
BUG_ON(!irqs_disabled());
count = __this_cpu_read(xed_nesting_count);
__this_cpu_write(xed_nesting_count, 0);
} while (count != 1 || vcpu_info->evtchn_upcall_pending);
virt_rmb(); /* Hypervisor can set upcall pending. */
out:
put_cpu();
} while (vcpu_info->evtchn_upcall_pending);
}
void xen_evtchn_do_upcall(struct pt_regs *regs)

View File

@ -81,7 +81,7 @@ void gntdev_add_map(struct gntdev_priv *priv, struct gntdev_grant_map *add);
void gntdev_put_map(struct gntdev_priv *priv, struct gntdev_grant_map *map);
bool gntdev_account_mapped_pages(int count);
bool gntdev_test_page_count(unsigned int count);
int gntdev_map_grant_pages(struct gntdev_grant_map *map);

View File

@ -446,7 +446,7 @@ dmabuf_exp_alloc_backing_storage(struct gntdev_priv *priv, int dmabuf_flags,
{
struct gntdev_grant_map *map;
if (unlikely(count <= 0))
if (unlikely(gntdev_test_page_count(count)))
return ERR_PTR(-EINVAL);
if ((dmabuf_flags & GNTDEV_DMA_FLAG_WC) &&
@ -459,11 +459,6 @@ dmabuf_exp_alloc_backing_storage(struct gntdev_priv *priv, int dmabuf_flags,
if (!map)
return ERR_PTR(-ENOMEM);
if (unlikely(gntdev_account_mapped_pages(count))) {
pr_debug("can't map %d pages: over limit\n", count);
gntdev_put_map(NULL, map);
return ERR_PTR(-ENOMEM);
}
return map;
}
@ -771,7 +766,7 @@ long gntdev_ioctl_dmabuf_exp_from_refs(struct gntdev_priv *priv, int use_ptemod,
if (copy_from_user(&op, u, sizeof(op)) != 0)
return -EFAULT;
if (unlikely(op.count <= 0))
if (unlikely(gntdev_test_page_count(op.count)))
return -EINVAL;
refs = kcalloc(op.count, sizeof(*refs), GFP_KERNEL);
@ -818,7 +813,7 @@ long gntdev_ioctl_dmabuf_imp_to_refs(struct gntdev_priv *priv,
if (copy_from_user(&op, u, sizeof(op)) != 0)
return -EFAULT;
if (unlikely(op.count <= 0))
if (unlikely(gntdev_test_page_count(op.count)))
return -EINVAL;
gntdev_dmabuf = dmabuf_imp_to_refs(priv->dmabuf_priv,

View File

@ -55,12 +55,10 @@ MODULE_AUTHOR("Derek G. Murray <Derek.Murray@cl.cam.ac.uk>, "
"Gerd Hoffmann <kraxel@redhat.com>");
MODULE_DESCRIPTION("User-space granted page access driver");
static int limit = 1024*1024;
module_param(limit, int, 0644);
MODULE_PARM_DESC(limit, "Maximum number of grants that may be mapped by "
"the gntdev device");
static atomic_t pages_mapped = ATOMIC_INIT(0);
static unsigned int limit = 64*1024;
module_param(limit, uint, 0644);
MODULE_PARM_DESC(limit,
"Maximum number of grants that may be mapped by one mapping request");
static int use_ptemod;
@ -71,9 +69,9 @@ static struct miscdevice gntdev_miscdev;
/* ------------------------------------------------------------------ */
bool gntdev_account_mapped_pages(int count)
bool gntdev_test_page_count(unsigned int count)
{
return atomic_add_return(count, &pages_mapped) > limit;
return !count || count > limit;
}
static void gntdev_print_maps(struct gntdev_priv *priv,
@ -114,14 +112,14 @@ static void gntdev_free_map(struct gntdev_grant_map *map)
gnttab_free_pages(map->count, map->pages);
#ifdef CONFIG_XEN_GRANT_DMA_ALLOC
kfree(map->frames);
kvfree(map->frames);
#endif
kfree(map->pages);
kfree(map->grants);
kfree(map->map_ops);
kfree(map->unmap_ops);
kfree(map->kmap_ops);
kfree(map->kunmap_ops);
kvfree(map->pages);
kvfree(map->grants);
kvfree(map->map_ops);
kvfree(map->unmap_ops);
kvfree(map->kmap_ops);
kvfree(map->kunmap_ops);
kfree(map);
}
@ -135,12 +133,13 @@ struct gntdev_grant_map *gntdev_alloc_map(struct gntdev_priv *priv, int count,
if (NULL == add)
return NULL;
add->grants = kcalloc(count, sizeof(add->grants[0]), GFP_KERNEL);
add->map_ops = kcalloc(count, sizeof(add->map_ops[0]), GFP_KERNEL);
add->unmap_ops = kcalloc(count, sizeof(add->unmap_ops[0]), GFP_KERNEL);
add->kmap_ops = kcalloc(count, sizeof(add->kmap_ops[0]), GFP_KERNEL);
add->kunmap_ops = kcalloc(count, sizeof(add->kunmap_ops[0]), GFP_KERNEL);
add->pages = kcalloc(count, sizeof(add->pages[0]), GFP_KERNEL);
add->grants = kvcalloc(count, sizeof(add->grants[0]), GFP_KERNEL);
add->map_ops = kvcalloc(count, sizeof(add->map_ops[0]), GFP_KERNEL);
add->unmap_ops = kvcalloc(count, sizeof(add->unmap_ops[0]), GFP_KERNEL);
add->kmap_ops = kvcalloc(count, sizeof(add->kmap_ops[0]), GFP_KERNEL);
add->kunmap_ops = kvcalloc(count,
sizeof(add->kunmap_ops[0]), GFP_KERNEL);
add->pages = kvcalloc(count, sizeof(add->pages[0]), GFP_KERNEL);
if (NULL == add->grants ||
NULL == add->map_ops ||
NULL == add->unmap_ops ||
@ -159,8 +158,8 @@ struct gntdev_grant_map *gntdev_alloc_map(struct gntdev_priv *priv, int count,
if (dma_flags & (GNTDEV_DMA_FLAG_WC | GNTDEV_DMA_FLAG_COHERENT)) {
struct gnttab_dma_alloc_args args;
add->frames = kcalloc(count, sizeof(add->frames[0]),
GFP_KERNEL);
add->frames = kvcalloc(count, sizeof(add->frames[0]),
GFP_KERNEL);
if (!add->frames)
goto err;
@ -241,8 +240,6 @@ void gntdev_put_map(struct gntdev_priv *priv, struct gntdev_grant_map *map)
if (!refcount_dec_and_test(&map->users))
return;
atomic_sub(map->count, &pages_mapped);
if (map->notify.flags & UNMAP_NOTIFY_SEND_EVENT) {
notify_remote_via_evtchn(map->notify.event);
evtchn_put(map->notify.event);
@ -506,7 +503,6 @@ static const struct mmu_interval_notifier_ops gntdev_mmu_ops = {
static int gntdev_open(struct inode *inode, struct file *flip)
{
struct gntdev_priv *priv;
int ret = 0;
priv = kzalloc(sizeof(*priv), GFP_KERNEL);
if (!priv)
@ -518,17 +514,13 @@ static int gntdev_open(struct inode *inode, struct file *flip)
#ifdef CONFIG_XEN_GNTDEV_DMABUF
priv->dmabuf_priv = gntdev_dmabuf_init(flip);
if (IS_ERR(priv->dmabuf_priv)) {
ret = PTR_ERR(priv->dmabuf_priv);
int ret = PTR_ERR(priv->dmabuf_priv);
kfree(priv);
return ret;
}
#endif
if (ret) {
kfree(priv);
return ret;
}
flip->private_data = priv;
#ifdef CONFIG_XEN_GRANT_DMA_ALLOC
priv->dma_dev = gntdev_miscdev.this_device;
@ -573,7 +565,7 @@ static long gntdev_ioctl_map_grant_ref(struct gntdev_priv *priv,
if (copy_from_user(&op, u, sizeof(op)) != 0)
return -EFAULT;
pr_debug("priv %p, add %d\n", priv, op.count);
if (unlikely(op.count <= 0))
if (unlikely(gntdev_test_page_count(op.count)))
return -EINVAL;
err = -ENOMEM;
@ -581,12 +573,6 @@ static long gntdev_ioctl_map_grant_ref(struct gntdev_priv *priv,
if (!map)
return err;
if (unlikely(gntdev_account_mapped_pages(op.count))) {
pr_debug("can't map: over limit\n");
gntdev_put_map(NULL, map);
return err;
}
if (copy_from_user(map->grants, &u->refs,
sizeof(map->grants[0]) * op.count) != 0) {
gntdev_put_map(NULL, map);

View File

@ -232,9 +232,16 @@ int xenbus_dev_probe(struct device *_dev)
return err;
}
if (!try_module_get(drv->driver.owner)) {
dev_warn(&dev->dev, "failed to acquire module reference on '%s'\n",
drv->driver.name);
err = -ESRCH;
goto fail;
}
err = drv->probe(dev, id);
if (err)
goto fail;
goto fail_put;
err = watch_otherend(dev);
if (err) {
@ -244,6 +251,8 @@ int xenbus_dev_probe(struct device *_dev)
}
return 0;
fail_put:
module_put(drv->driver.owner);
fail:
xenbus_dev_error(dev, err, "xenbus_dev_probe on %s", dev->nodename);
xenbus_switch_state(dev, XenbusStateClosed);
@ -263,6 +272,8 @@ int xenbus_dev_remove(struct device *_dev)
if (drv->remove)
drv->remove(dev);
module_put(drv->driver.owner);
free_otherend_details(dev);
xenbus_switch_state(dev, XenbusStateClosed);