virtio: fixes, cleanups

A couple of mlx5 fixes related to cvq
 A couple of reverts dropping useless code (code that used it got reverted
 earlier)
 
 Signed-off-by: Michael S. Tsirkin <mst@redhat.com>
 -----BEGIN PGP SIGNATURE-----
 
 iQFDBAABCAAtFiEEXQn9CHHI+FuUyooNKB8NuNKNVGkFAmJKyK4PHG1zdEByZWRo
 YXQuY29tAAoJECgfDbjSjVRpE40IAIvsq1WDsXfg/649lp0wMO5MPEzAvCfgQpub
 hCti2lKY4K9Ykqh+uTipDTxpRmg3wt/4QlQ6NYZVmn8jQ1JsNAIFCkc7ptG1xwQI
 0RypofGsFQwbar2vKh8jEzYUsIbwLKeLNlib113fH9zyDRw7KjRppUgC2cjMoYv6
 JhlOFZAApzFknXC/I8fmJyzVYpX+v2GB8RX2wWiNXHQZ9hkdKd1IvJqRiuRuSY3L
 haubMBFG8hrdW8qQ40ngvt+GnrxKUustmPM6x9DCJYaMkKRkXC6NxueUcIk6qWiw
 UJz5IOXURdJ3i1qxmkMGSnJiYbFpmwVGlqRekN3qNVpXNovfbg0=
 =l15S
 -----END PGP SIGNATURE-----

Merge tag 'for_linus' of git://git.kernel.org/pub/scm/linux/kernel/git/mst/vhost

Pull virtio fixes from Michael Tsirkin:
 "Fixes and cleanups:

   - A couple of mlx5 fixes related to cvq

   - A couple of reverts dropping useless code (code that used it got
     reverted earlier)"

* tag 'for_linus' of git://git.kernel.org/pub/scm/linux/kernel/git/mst/vhost:
  vdpa: mlx5: synchronize driver status with CVQ
  vdpa: mlx5: prevent cvq work from hogging CPU
  Revert "virtio_config: introduce a new .enable_cbs method"
  Revert "virtio: use virtio_device_ready() in virtio_device_restore()"
This commit is contained in:
Linus Torvalds 2022-04-05 10:40:52 -07:00
commit 3e732ebf73
3 changed files with 43 additions and 30 deletions

View File

@ -163,6 +163,7 @@ struct mlx5_vdpa_net {
u32 cur_num_vqs;
struct notifier_block nb;
struct vdpa_callback config_cb;
struct mlx5_vdpa_wq_ent cvq_ent;
};
static void free_resources(struct mlx5_vdpa_net *ndev);
@ -1658,6 +1659,12 @@ static void mlx5_cvq_kick_handler(struct work_struct *work)
mvdev = wqent->mvdev;
ndev = to_mlx5_vdpa_ndev(mvdev);
cvq = &mvdev->cvq;
mutex_lock(&ndev->reslock);
if (!(mvdev->status & VIRTIO_CONFIG_S_DRIVER_OK))
goto out;
if (!(ndev->mvdev.actual_features & BIT_ULL(VIRTIO_NET_F_CTRL_VQ)))
goto out;
@ -1696,9 +1703,13 @@ static void mlx5_cvq_kick_handler(struct work_struct *work)
if (vringh_need_notify_iotlb(&cvq->vring))
vringh_notify(&cvq->vring);
queue_work(mvdev->wq, &wqent->work);
break;
}
out:
kfree(wqent);
mutex_unlock(&ndev->reslock);
}
static void mlx5_vdpa_kick_vq(struct vdpa_device *vdev, u16 idx)
@ -1706,7 +1717,6 @@ static void mlx5_vdpa_kick_vq(struct vdpa_device *vdev, u16 idx)
struct mlx5_vdpa_dev *mvdev = to_mvdev(vdev);
struct mlx5_vdpa_net *ndev = to_mlx5_vdpa_ndev(mvdev);
struct mlx5_vdpa_virtqueue *mvq;
struct mlx5_vdpa_wq_ent *wqent;
if (!is_index_valid(mvdev, idx))
return;
@ -1715,13 +1725,7 @@ static void mlx5_vdpa_kick_vq(struct vdpa_device *vdev, u16 idx)
if (!mvdev->wq || !mvdev->cvq.ready)
return;
wqent = kzalloc(sizeof(*wqent), GFP_ATOMIC);
if (!wqent)
return;
wqent->mvdev = mvdev;
INIT_WORK(&wqent->work, mlx5_cvq_kick_handler);
queue_work(mvdev->wq, &wqent->work);
queue_work(mvdev->wq, &ndev->cvq_ent.work);
return;
}
@ -2180,7 +2184,7 @@ static int mlx5_vdpa_change_map(struct mlx5_vdpa_dev *mvdev, struct vhost_iotlb
goto err_mr;
if (!(mvdev->status & VIRTIO_CONFIG_S_DRIVER_OK))
return 0;
goto err_mr;
restore_channels_info(ndev);
err = setup_driver(mvdev);
@ -2195,12 +2199,14 @@ err_mr:
return err;
}
/* reslock must be held for this function */
static int setup_driver(struct mlx5_vdpa_dev *mvdev)
{
struct mlx5_vdpa_net *ndev = to_mlx5_vdpa_ndev(mvdev);
int err;
mutex_lock(&ndev->reslock);
WARN_ON(!mutex_is_locked(&ndev->reslock));
if (ndev->setup) {
mlx5_vdpa_warn(mvdev, "setup driver called for already setup driver\n");
err = 0;
@ -2230,7 +2236,6 @@ static int setup_driver(struct mlx5_vdpa_dev *mvdev)
goto err_fwd;
}
ndev->setup = true;
mutex_unlock(&ndev->reslock);
return 0;
@ -2241,23 +2246,23 @@ err_tir:
err_rqt:
teardown_virtqueues(ndev);
out:
mutex_unlock(&ndev->reslock);
return err;
}
/* reslock must be held for this function */
static void teardown_driver(struct mlx5_vdpa_net *ndev)
{
mutex_lock(&ndev->reslock);
WARN_ON(!mutex_is_locked(&ndev->reslock));
if (!ndev->setup)
goto out;
return;
remove_fwd_to_tir(ndev);
destroy_tir(ndev);
destroy_rqt(ndev);
teardown_virtqueues(ndev);
ndev->setup = false;
out:
mutex_unlock(&ndev->reslock);
}
static void clear_vqs_ready(struct mlx5_vdpa_net *ndev)
@ -2278,6 +2283,8 @@ static void mlx5_vdpa_set_status(struct vdpa_device *vdev, u8 status)
print_status(mvdev, status, true);
mutex_lock(&ndev->reslock);
if ((status ^ ndev->mvdev.status) & VIRTIO_CONFIG_S_DRIVER_OK) {
if (status & VIRTIO_CONFIG_S_DRIVER_OK) {
err = setup_driver(mvdev);
@ -2287,16 +2294,19 @@ static void mlx5_vdpa_set_status(struct vdpa_device *vdev, u8 status)
}
} else {
mlx5_vdpa_warn(mvdev, "did not expect DRIVER_OK to be cleared\n");
return;
goto err_clear;
}
}
ndev->mvdev.status = status;
mutex_unlock(&ndev->reslock);
return;
err_setup:
mlx5_vdpa_destroy_mr(&ndev->mvdev);
ndev->mvdev.status |= VIRTIO_CONFIG_S_FAILED;
err_clear:
mutex_unlock(&ndev->reslock);
}
static int mlx5_vdpa_reset(struct vdpa_device *vdev)
@ -2306,6 +2316,8 @@ static int mlx5_vdpa_reset(struct vdpa_device *vdev)
print_status(mvdev, 0, true);
mlx5_vdpa_info(mvdev, "performing device reset\n");
mutex_lock(&ndev->reslock);
teardown_driver(ndev);
clear_vqs_ready(ndev);
mlx5_vdpa_destroy_mr(&ndev->mvdev);
@ -2318,6 +2330,7 @@ static int mlx5_vdpa_reset(struct vdpa_device *vdev)
if (mlx5_vdpa_create_mr(mvdev, NULL))
mlx5_vdpa_warn(mvdev, "create MR failed\n");
}
mutex_unlock(&ndev->reslock);
return 0;
}
@ -2353,19 +2366,24 @@ static u32 mlx5_vdpa_get_generation(struct vdpa_device *vdev)
static int mlx5_vdpa_set_map(struct vdpa_device *vdev, struct vhost_iotlb *iotlb)
{
struct mlx5_vdpa_dev *mvdev = to_mvdev(vdev);
struct mlx5_vdpa_net *ndev = to_mlx5_vdpa_ndev(mvdev);
bool change_map;
int err;
mutex_lock(&ndev->reslock);
err = mlx5_vdpa_handle_set_map(mvdev, iotlb, &change_map);
if (err) {
mlx5_vdpa_warn(mvdev, "set map failed(%d)\n", err);
return err;
goto err;
}
if (change_map)
return mlx5_vdpa_change_map(mvdev, iotlb);
err = mlx5_vdpa_change_map(mvdev, iotlb);
return 0;
err:
mutex_unlock(&ndev->reslock);
return err;
}
static void mlx5_vdpa_free(struct vdpa_device *vdev)
@ -2740,6 +2758,8 @@ static int mlx5_vdpa_dev_add(struct vdpa_mgmt_dev *v_mdev, const char *name,
if (err)
goto err_mr;
ndev->cvq_ent.mvdev = mvdev;
INIT_WORK(&ndev->cvq_ent.work, mlx5_cvq_kick_handler);
mvdev->wq = create_singlethread_workqueue("mlx5_vdpa_wq");
if (!mvdev->wq) {
err = -ENOMEM;

View File

@ -526,9 +526,8 @@ int virtio_device_restore(struct virtio_device *dev)
goto err;
}
/* If restore didn't do it, mark device DRIVER_OK ourselves. */
if (!(dev->config->get_status(dev) & VIRTIO_CONFIG_S_DRIVER_OK))
virtio_device_ready(dev);
/* Finally, tell the device we're all set */
virtio_add_status(dev, VIRTIO_CONFIG_S_DRIVER_OK);
virtio_config_enable(dev);

View File

@ -23,8 +23,6 @@ struct virtio_shm_region {
* any of @get/@set, @get_status/@set_status, or @get_features/
* @finalize_features are NOT safe to be called from an atomic
* context.
* @enable_cbs: enable the callbacks
* vdev: the virtio_device
* @get: read the value of a configuration field
* vdev: the virtio_device
* offset: the offset of the configuration field
@ -78,7 +76,6 @@ struct virtio_shm_region {
*/
typedef void vq_callback_t(struct virtqueue *);
struct virtio_config_ops {
void (*enable_cbs)(struct virtio_device *vdev);
void (*get)(struct virtio_device *vdev, unsigned offset,
void *buf, unsigned len);
void (*set)(struct virtio_device *vdev, unsigned offset,
@ -233,9 +230,6 @@ void virtio_device_ready(struct virtio_device *dev)
{
unsigned status = dev->config->get_status(dev);
if (dev->config->enable_cbs)
dev->config->enable_cbs(dev);
BUG_ON(status & VIRTIO_CONFIG_S_DRIVER_OK);
dev->config->set_status(dev, status | VIRTIO_CONFIG_S_DRIVER_OK);
}