mirror of
https://github.com/torvalds/linux.git
synced 2024-11-10 22:21:40 +00:00
vdpa/mlx5: Allow creation/deletion of any given mr struct
This patch adapts the mr creation/deletion code to be able to work with any given mr struct pointer. All the APIs are adapted to take an extra parameter for the mr. mlx5_vdpa_create/delete_mr doesn't need a ASID parameter anymore. The check is done in the caller instead (mlx5_set_map). This change is needed for a followup patch which will introduce an additional mr for the vq descriptor data. Signed-off-by: Dragos Tatulea <dtatulea@nvidia.com> Acked-by: Eugenio Pérez <eperezma@redhat.com> Acked-by: Jason Wang <jasowang@redhat.com> Message-Id: <20231018171456.1624030-12-dtatulea@nvidia.com> Signed-off-by: Michael S. Tsirkin <mst@redhat.com> Reviewed-by: Si-Wei Liu <si-wei.liu@oracle.com> Tested-by: Si-Wei Liu <si-wei.liu@oracle.com> Tested-by: Lei Yang <leiyang@redhat.com>
This commit is contained in:
parent
07a2da4024
commit
1b3ce9576f
@ -116,10 +116,12 @@ int mlx5_vdpa_create_mkey(struct mlx5_vdpa_dev *mvdev, u32 *mkey, u32 *in,
|
||||
int mlx5_vdpa_destroy_mkey(struct mlx5_vdpa_dev *mvdev, u32 mkey);
|
||||
int mlx5_vdpa_handle_set_map(struct mlx5_vdpa_dev *mvdev, struct vhost_iotlb *iotlb,
|
||||
bool *change_map, unsigned int asid);
|
||||
int mlx5_vdpa_create_mr(struct mlx5_vdpa_dev *mvdev, struct vhost_iotlb *iotlb,
|
||||
unsigned int asid);
|
||||
int mlx5_vdpa_create_mr(struct mlx5_vdpa_dev *mvdev,
|
||||
struct mlx5_vdpa_mr *mr,
|
||||
struct vhost_iotlb *iotlb);
|
||||
void mlx5_vdpa_destroy_mr_resources(struct mlx5_vdpa_dev *mvdev);
|
||||
void mlx5_vdpa_destroy_mr(struct mlx5_vdpa_dev *mvdev, unsigned int asid);
|
||||
void mlx5_vdpa_destroy_mr(struct mlx5_vdpa_dev *mvdev,
|
||||
struct mlx5_vdpa_mr *mr);
|
||||
int mlx5_vdpa_update_cvq_iotlb(struct mlx5_vdpa_dev *mvdev,
|
||||
struct vhost_iotlb *iotlb,
|
||||
unsigned int asid);
|
||||
|
@ -301,10 +301,13 @@ static void unmap_direct_mr(struct mlx5_vdpa_dev *mvdev, struct mlx5_vdpa_direct
|
||||
sg_free_table(&mr->sg_head);
|
||||
}
|
||||
|
||||
static int add_direct_chain(struct mlx5_vdpa_dev *mvdev, u64 start, u64 size, u8 perm,
|
||||
static int add_direct_chain(struct mlx5_vdpa_dev *mvdev,
|
||||
struct mlx5_vdpa_mr *mr,
|
||||
u64 start,
|
||||
u64 size,
|
||||
u8 perm,
|
||||
struct vhost_iotlb *iotlb)
|
||||
{
|
||||
struct mlx5_vdpa_mr *mr = &mvdev->mr;
|
||||
struct mlx5_vdpa_direct_mr *dmr;
|
||||
struct mlx5_vdpa_direct_mr *n;
|
||||
LIST_HEAD(tmp);
|
||||
@ -354,9 +357,10 @@ err_alloc:
|
||||
* indirect memory key that provides access to the enitre address space given
|
||||
* by iotlb.
|
||||
*/
|
||||
static int create_user_mr(struct mlx5_vdpa_dev *mvdev, struct vhost_iotlb *iotlb)
|
||||
static int create_user_mr(struct mlx5_vdpa_dev *mvdev,
|
||||
struct mlx5_vdpa_mr *mr,
|
||||
struct vhost_iotlb *iotlb)
|
||||
{
|
||||
struct mlx5_vdpa_mr *mr = &mvdev->mr;
|
||||
struct mlx5_vdpa_direct_mr *dmr;
|
||||
struct mlx5_vdpa_direct_mr *n;
|
||||
struct vhost_iotlb_map *map;
|
||||
@ -384,7 +388,7 @@ static int create_user_mr(struct mlx5_vdpa_dev *mvdev, struct vhost_iotlb *iotlb
|
||||
LOG_MAX_KLM_SIZE);
|
||||
mr->num_klms += nnuls;
|
||||
}
|
||||
err = add_direct_chain(mvdev, ps, pe - ps, pperm, iotlb);
|
||||
err = add_direct_chain(mvdev, mr, ps, pe - ps, pperm, iotlb);
|
||||
if (err)
|
||||
goto err_chain;
|
||||
}
|
||||
@ -393,7 +397,7 @@ static int create_user_mr(struct mlx5_vdpa_dev *mvdev, struct vhost_iotlb *iotlb
|
||||
pperm = map->perm;
|
||||
}
|
||||
}
|
||||
err = add_direct_chain(mvdev, ps, pe - ps, pperm, iotlb);
|
||||
err = add_direct_chain(mvdev, mr, ps, pe - ps, pperm, iotlb);
|
||||
if (err)
|
||||
goto err_chain;
|
||||
|
||||
@ -489,13 +493,8 @@ static void destroy_user_mr(struct mlx5_vdpa_dev *mvdev, struct mlx5_vdpa_mr *mr
|
||||
}
|
||||
}
|
||||
|
||||
static void _mlx5_vdpa_destroy_mr(struct mlx5_vdpa_dev *mvdev, unsigned int asid)
|
||||
static void _mlx5_vdpa_destroy_mr(struct mlx5_vdpa_dev *mvdev, struct mlx5_vdpa_mr *mr)
|
||||
{
|
||||
struct mlx5_vdpa_mr *mr = &mvdev->mr;
|
||||
|
||||
if (mvdev->group2asid[MLX5_VDPA_DATAVQ_GROUP] != asid)
|
||||
return;
|
||||
|
||||
if (!mr->initialized)
|
||||
return;
|
||||
|
||||
@ -507,38 +506,33 @@ static void _mlx5_vdpa_destroy_mr(struct mlx5_vdpa_dev *mvdev, unsigned int asid
|
||||
mr->initialized = false;
|
||||
}
|
||||
|
||||
void mlx5_vdpa_destroy_mr(struct mlx5_vdpa_dev *mvdev, unsigned int asid)
|
||||
void mlx5_vdpa_destroy_mr(struct mlx5_vdpa_dev *mvdev,
|
||||
struct mlx5_vdpa_mr *mr)
|
||||
{
|
||||
struct mlx5_vdpa_mr *mr = &mvdev->mr;
|
||||
|
||||
mutex_lock(&mr->mkey_mtx);
|
||||
|
||||
_mlx5_vdpa_destroy_mr(mvdev, asid);
|
||||
_mlx5_vdpa_destroy_mr(mvdev, mr);
|
||||
|
||||
mutex_unlock(&mr->mkey_mtx);
|
||||
}
|
||||
|
||||
void mlx5_vdpa_destroy_mr_resources(struct mlx5_vdpa_dev *mvdev)
|
||||
{
|
||||
mlx5_vdpa_destroy_mr(mvdev, mvdev->group2asid[MLX5_VDPA_DATAVQ_GROUP]);
|
||||
mlx5_vdpa_destroy_mr(mvdev, &mvdev->mr);
|
||||
prune_iotlb(mvdev);
|
||||
}
|
||||
|
||||
static int _mlx5_vdpa_create_mr(struct mlx5_vdpa_dev *mvdev,
|
||||
struct vhost_iotlb *iotlb,
|
||||
unsigned int asid)
|
||||
struct mlx5_vdpa_mr *mr,
|
||||
struct vhost_iotlb *iotlb)
|
||||
{
|
||||
struct mlx5_vdpa_mr *mr = &mvdev->mr;
|
||||
int err;
|
||||
|
||||
if (mvdev->group2asid[MLX5_VDPA_DATAVQ_GROUP] != asid)
|
||||
return 0;
|
||||
|
||||
if (mr->initialized)
|
||||
return 0;
|
||||
|
||||
if (iotlb)
|
||||
err = create_user_mr(mvdev, iotlb);
|
||||
err = create_user_mr(mvdev, mr, iotlb);
|
||||
else
|
||||
err = create_dma_mr(mvdev, mr);
|
||||
|
||||
@ -550,13 +544,14 @@ static int _mlx5_vdpa_create_mr(struct mlx5_vdpa_dev *mvdev,
|
||||
return 0;
|
||||
}
|
||||
|
||||
int mlx5_vdpa_create_mr(struct mlx5_vdpa_dev *mvdev, struct vhost_iotlb *iotlb,
|
||||
unsigned int asid)
|
||||
int mlx5_vdpa_create_mr(struct mlx5_vdpa_dev *mvdev,
|
||||
struct mlx5_vdpa_mr *mr,
|
||||
struct vhost_iotlb *iotlb)
|
||||
{
|
||||
int err;
|
||||
|
||||
mutex_lock(&mvdev->mr.mkey_mtx);
|
||||
err = _mlx5_vdpa_create_mr(mvdev, iotlb, asid);
|
||||
err = _mlx5_vdpa_create_mr(mvdev, mr, iotlb);
|
||||
mutex_unlock(&mvdev->mr.mkey_mtx);
|
||||
return err;
|
||||
}
|
||||
@ -574,7 +569,7 @@ int mlx5_vdpa_handle_set_map(struct mlx5_vdpa_dev *mvdev, struct vhost_iotlb *io
|
||||
*change_map = true;
|
||||
}
|
||||
if (!*change_map)
|
||||
err = _mlx5_vdpa_create_mr(mvdev, iotlb, asid);
|
||||
err = _mlx5_vdpa_create_mr(mvdev, mr, iotlb);
|
||||
mutex_unlock(&mr->mkey_mtx);
|
||||
|
||||
return err;
|
||||
@ -603,7 +598,7 @@ int mlx5_vdpa_create_dma_mr(struct mlx5_vdpa_dev *mvdev)
|
||||
{
|
||||
int err;
|
||||
|
||||
err = mlx5_vdpa_create_mr(mvdev, NULL, 0);
|
||||
err = mlx5_vdpa_create_mr(mvdev, &mvdev->mr, NULL);
|
||||
if (err)
|
||||
return err;
|
||||
|
||||
|
@ -2684,8 +2684,8 @@ static int mlx5_vdpa_change_map(struct mlx5_vdpa_dev *mvdev,
|
||||
goto err_mr;
|
||||
|
||||
teardown_driver(ndev);
|
||||
mlx5_vdpa_destroy_mr(mvdev, asid);
|
||||
err = mlx5_vdpa_create_mr(mvdev, iotlb, asid);
|
||||
mlx5_vdpa_destroy_mr(mvdev, &mvdev->mr);
|
||||
err = mlx5_vdpa_create_mr(mvdev, &mvdev->mr, iotlb);
|
||||
if (err)
|
||||
goto err_mr;
|
||||
|
||||
@ -2700,7 +2700,7 @@ static int mlx5_vdpa_change_map(struct mlx5_vdpa_dev *mvdev,
|
||||
return 0;
|
||||
|
||||
err_setup:
|
||||
mlx5_vdpa_destroy_mr(mvdev, asid);
|
||||
mlx5_vdpa_destroy_mr(mvdev, &mvdev->mr);
|
||||
err_mr:
|
||||
return err;
|
||||
}
|
||||
@ -2922,6 +2922,9 @@ static int set_map_data(struct mlx5_vdpa_dev *mvdev, struct vhost_iotlb *iotlb,
|
||||
bool change_map;
|
||||
int err;
|
||||
|
||||
if (mvdev->group2asid[MLX5_VDPA_DATAVQ_GROUP] != asid)
|
||||
goto end;
|
||||
|
||||
err = mlx5_vdpa_handle_set_map(mvdev, iotlb, &change_map, asid);
|
||||
if (err) {
|
||||
mlx5_vdpa_warn(mvdev, "set map failed(%d)\n", err);
|
||||
@ -2934,6 +2937,7 @@ static int set_map_data(struct mlx5_vdpa_dev *mvdev, struct vhost_iotlb *iotlb,
|
||||
return err;
|
||||
}
|
||||
|
||||
end:
|
||||
return mlx5_vdpa_update_cvq_iotlb(mvdev, iotlb, asid);
|
||||
}
|
||||
|
||||
|
Loading…
Reference in New Issue
Block a user