forked from Minki/linux
Second RDMA 5.6 pull request
- Fix busted syzkaller fix in 'get_new_pps' - this turned out to crash on certain HW configurations - Bug fixes for various missed things in error unwinds - Add a missing rcu_read_lock annotation in hfi/qib - Fix two ODP related regressions from the recent mmu notifier changes - Several more syzkaller bugs in siw, RDMA netlink, verbs and iwcm - Revert an old patch in CMA as it is now shown to not be allocating port numbers properly -----BEGIN PGP SIGNATURE----- iQIzBAABCgAdFiEEfB7FMLh+8QxL+6i3OG33FX4gmxoFAl5iWSQACgkQOG33FX4g mxoadw//ZkIcG25OMhgc4iqOXT+brCCYosdi1MB8ptcW/lx+t2jH8VD9cd8kOW4M VfFIpiuqVc6U06BpoRJkSV3Ix5Hiw0nQVD9q1mNiqSs0fyAuJG0NGtVeqWWXSFFC ptHzn1z5Aw9GV2necS+nJcZ3NceMW/rP255LHioqVfj7xSFJiymXfncH7YwQZOop S88Dr3m+DibW+ueVwvtLPvSPaWL40NGZo4sNuITrfiJuHYvstWedUMtYkGCGjrmT bUI7lpYgsakVTlM2LTtlAFrAoL/adkfrNbiCVLqGLpoy3DIdXVscQzt9CRnCP1iF t1l0jY+2YNAMMfjktLDnhUU7wfAwgw/XTNoqzlRCAAiTp7D8+eo560Txj9xyjGw+ spxGOWuDEVWlBOFHHltRbQ13QZ06vA7yg0YqoIuEg86c+X38NoVEA3sRf59v05qM XqPcdIBusjRfd8kZsk07uYbp5VQsNHSfL2ZtxAFwiWFr4stjBcwqrx3sFw5610uZ Pt6uWN6JlGRb7A35I0ZuRwWhN1HTFkd7rIKK3d5hTWcqefH6JAkZldMsG0qt/YW2 nRnoZhUNwtP2YI6eOTpskQCyK41tqP5tC84k1GMBuAxMYw40FFqN9/M7v0h9NWq7 Eq8BMjbLB6DDR8cBJk7uoYfpYM6slnGLlDGfrLRR9j1oWv6iuCY= =SFSu -----END PGP SIGNATURE----- Merge tag 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/rdma/rdma Pull rdma fixes from Jason Gunthorpe: "Nothing particularly exciting, some small ODP regressions from the mmu notifier rework, another bunch of syzkaller fixes, and a bug fix for a botched syzkaller fix in the first rc pull request. - Fix busted syzkaller fix in 'get_new_pps' - this turned out to crash on certain HW configurations - Bug fixes for various missed things in error unwinds - Add a missing rcu_read_lock annotation in hfi/qib - Fix two ODP related regressions from the recent mmu notifier changes - Several more syzkaller bugs in siw, RDMA netlink, verbs and iwcm - Revert an old patch in CMA as it is now shown to not be allocating port numbers properly" * tag 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/rdma/rdma: RDMA/iwcm: Fix iwcm work deallocation RDMA/siw: Fix failure handling during device creation RDMA/nldev: Fix crash when set a QP to a new counter but QPN is missing RDMA/odp: Ensure the mm is still alive before creating an implicit child RDMA/core: Fix protection fault in ib_mr_pool_destroy IB/mlx5: Fix implicit ODP race IB/hfi1, qib: Ensure RCU is locked when accessing list RDMA/core: Fix pkey and port assignment in get_new_pps RMDA/cm: Fix missing ib_cm_destroy_id() in ib_cm_insert_listen() RDMA/rw: Fix error flow during RDMA context initialization RDMA/core: Fix use of logical OR in get_new_pps Revert "RDMA/cma: Simplify rdma_resolve_addr() error flow"
This commit is contained in:
commit
61a09258f2
@ -1191,6 +1191,7 @@ struct ib_cm_id *ib_cm_insert_listen(struct ib_device *device,
|
||||
/* Sharing an ib_cm_id with different handlers is not
|
||||
* supported */
|
||||
spin_unlock_irqrestore(&cm.lock, flags);
|
||||
ib_destroy_cm_id(cm_id);
|
||||
return ERR_PTR(-EINVAL);
|
||||
}
|
||||
refcount_inc(&cm_id_priv->refcount);
|
||||
|
@ -3212,19 +3212,26 @@ int rdma_resolve_addr(struct rdma_cm_id *id, struct sockaddr *src_addr,
|
||||
int ret;
|
||||
|
||||
id_priv = container_of(id, struct rdma_id_private, id);
|
||||
memcpy(cma_dst_addr(id_priv), dst_addr, rdma_addr_size(dst_addr));
|
||||
if (id_priv->state == RDMA_CM_IDLE) {
|
||||
ret = cma_bind_addr(id, src_addr, dst_addr);
|
||||
if (ret)
|
||||
if (ret) {
|
||||
memset(cma_dst_addr(id_priv), 0,
|
||||
rdma_addr_size(dst_addr));
|
||||
return ret;
|
||||
}
|
||||
}
|
||||
|
||||
if (cma_family(id_priv) != dst_addr->sa_family)
|
||||
if (cma_family(id_priv) != dst_addr->sa_family) {
|
||||
memset(cma_dst_addr(id_priv), 0, rdma_addr_size(dst_addr));
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
if (!cma_comp_exch(id_priv, RDMA_CM_ADDR_BOUND, RDMA_CM_ADDR_QUERY))
|
||||
if (!cma_comp_exch(id_priv, RDMA_CM_ADDR_BOUND, RDMA_CM_ADDR_QUERY)) {
|
||||
memset(cma_dst_addr(id_priv), 0, rdma_addr_size(dst_addr));
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
memcpy(cma_dst_addr(id_priv), dst_addr, rdma_addr_size(dst_addr));
|
||||
if (cma_any_addr(dst_addr)) {
|
||||
ret = cma_resolve_loopback(id_priv);
|
||||
} else {
|
||||
|
@ -338,6 +338,20 @@ static inline struct ib_qp *_ib_create_qp(struct ib_device *dev,
|
||||
qp->pd = pd;
|
||||
qp->uobject = uobj;
|
||||
qp->real_qp = qp;
|
||||
|
||||
qp->qp_type = attr->qp_type;
|
||||
qp->rwq_ind_tbl = attr->rwq_ind_tbl;
|
||||
qp->send_cq = attr->send_cq;
|
||||
qp->recv_cq = attr->recv_cq;
|
||||
qp->srq = attr->srq;
|
||||
qp->rwq_ind_tbl = attr->rwq_ind_tbl;
|
||||
qp->event_handler = attr->event_handler;
|
||||
|
||||
atomic_set(&qp->usecnt, 0);
|
||||
spin_lock_init(&qp->mr_lock);
|
||||
INIT_LIST_HEAD(&qp->rdma_mrs);
|
||||
INIT_LIST_HEAD(&qp->sig_mrs);
|
||||
|
||||
/*
|
||||
* We don't track XRC QPs for now, because they don't have PD
|
||||
* and more importantly they are created internaly by driver,
|
||||
|
@ -159,8 +159,10 @@ static void dealloc_work_entries(struct iwcm_id_private *cm_id_priv)
|
||||
{
|
||||
struct list_head *e, *tmp;
|
||||
|
||||
list_for_each_safe(e, tmp, &cm_id_priv->work_free_list)
|
||||
list_for_each_safe(e, tmp, &cm_id_priv->work_free_list) {
|
||||
list_del(e);
|
||||
kfree(list_entry(e, struct iwcm_work, free_list));
|
||||
}
|
||||
}
|
||||
|
||||
static int alloc_work_entries(struct iwcm_id_private *cm_id_priv, int count)
|
||||
|
@ -1757,6 +1757,8 @@ static int nldev_stat_set_doit(struct sk_buff *skb, struct nlmsghdr *nlh,
|
||||
if (ret)
|
||||
goto err_msg;
|
||||
} else {
|
||||
if (!tb[RDMA_NLDEV_ATTR_RES_LQPN])
|
||||
goto err_msg;
|
||||
qpn = nla_get_u32(tb[RDMA_NLDEV_ATTR_RES_LQPN]);
|
||||
if (tb[RDMA_NLDEV_ATTR_STAT_COUNTER_ID]) {
|
||||
cntn = nla_get_u32(tb[RDMA_NLDEV_ATTR_STAT_COUNTER_ID]);
|
||||
|
@ -273,6 +273,23 @@ static int rdma_rw_init_single_wr(struct rdma_rw_ctx *ctx, struct ib_qp *qp,
|
||||
return 1;
|
||||
}
|
||||
|
||||
static void rdma_rw_unmap_sg(struct ib_device *dev, struct scatterlist *sg,
|
||||
u32 sg_cnt, enum dma_data_direction dir)
|
||||
{
|
||||
if (is_pci_p2pdma_page(sg_page(sg)))
|
||||
pci_p2pdma_unmap_sg(dev->dma_device, sg, sg_cnt, dir);
|
||||
else
|
||||
ib_dma_unmap_sg(dev, sg, sg_cnt, dir);
|
||||
}
|
||||
|
||||
static int rdma_rw_map_sg(struct ib_device *dev, struct scatterlist *sg,
|
||||
u32 sg_cnt, enum dma_data_direction dir)
|
||||
{
|
||||
if (is_pci_p2pdma_page(sg_page(sg)))
|
||||
return pci_p2pdma_map_sg(dev->dma_device, sg, sg_cnt, dir);
|
||||
return ib_dma_map_sg(dev, sg, sg_cnt, dir);
|
||||
}
|
||||
|
||||
/**
|
||||
* rdma_rw_ctx_init - initialize a RDMA READ/WRITE context
|
||||
* @ctx: context to initialize
|
||||
@ -295,11 +312,7 @@ int rdma_rw_ctx_init(struct rdma_rw_ctx *ctx, struct ib_qp *qp, u8 port_num,
|
||||
struct ib_device *dev = qp->pd->device;
|
||||
int ret;
|
||||
|
||||
if (is_pci_p2pdma_page(sg_page(sg)))
|
||||
ret = pci_p2pdma_map_sg(dev->dma_device, sg, sg_cnt, dir);
|
||||
else
|
||||
ret = ib_dma_map_sg(dev, sg, sg_cnt, dir);
|
||||
|
||||
ret = rdma_rw_map_sg(dev, sg, sg_cnt, dir);
|
||||
if (!ret)
|
||||
return -ENOMEM;
|
||||
sg_cnt = ret;
|
||||
@ -338,7 +351,7 @@ int rdma_rw_ctx_init(struct rdma_rw_ctx *ctx, struct ib_qp *qp, u8 port_num,
|
||||
return ret;
|
||||
|
||||
out_unmap_sg:
|
||||
ib_dma_unmap_sg(dev, sg, sg_cnt, dir);
|
||||
rdma_rw_unmap_sg(dev, sg, sg_cnt, dir);
|
||||
return ret;
|
||||
}
|
||||
EXPORT_SYMBOL(rdma_rw_ctx_init);
|
||||
@ -588,11 +601,7 @@ void rdma_rw_ctx_destroy(struct rdma_rw_ctx *ctx, struct ib_qp *qp, u8 port_num,
|
||||
break;
|
||||
}
|
||||
|
||||
if (is_pci_p2pdma_page(sg_page(sg)))
|
||||
pci_p2pdma_unmap_sg(qp->pd->device->dma_device, sg,
|
||||
sg_cnt, dir);
|
||||
else
|
||||
ib_dma_unmap_sg(qp->pd->device, sg, sg_cnt, dir);
|
||||
rdma_rw_unmap_sg(qp->pd->device, sg, sg_cnt, dir);
|
||||
}
|
||||
EXPORT_SYMBOL(rdma_rw_ctx_destroy);
|
||||
|
||||
|
@ -340,15 +340,19 @@ static struct ib_ports_pkeys *get_new_pps(const struct ib_qp *qp,
|
||||
return NULL;
|
||||
|
||||
if (qp_attr_mask & IB_QP_PORT)
|
||||
new_pps->main.port_num =
|
||||
(qp_pps) ? qp_pps->main.port_num : qp_attr->port_num;
|
||||
new_pps->main.port_num = qp_attr->port_num;
|
||||
else if (qp_pps)
|
||||
new_pps->main.port_num = qp_pps->main.port_num;
|
||||
|
||||
if (qp_attr_mask & IB_QP_PKEY_INDEX)
|
||||
new_pps->main.pkey_index = (qp_pps) ? qp_pps->main.pkey_index :
|
||||
qp_attr->pkey_index;
|
||||
new_pps->main.pkey_index = qp_attr->pkey_index;
|
||||
else if (qp_pps)
|
||||
new_pps->main.pkey_index = qp_pps->main.pkey_index;
|
||||
|
||||
if ((qp_attr_mask & IB_QP_PKEY_INDEX) && (qp_attr_mask & IB_QP_PORT))
|
||||
new_pps->main.state = IB_PORT_PKEY_VALID;
|
||||
|
||||
if (!(qp_attr_mask & (IB_QP_PKEY_INDEX || IB_QP_PORT)) && qp_pps) {
|
||||
if (!(qp_attr_mask & (IB_QP_PKEY_INDEX | IB_QP_PORT)) && qp_pps) {
|
||||
new_pps->main.port_num = qp_pps->main.port_num;
|
||||
new_pps->main.pkey_index = qp_pps->main.pkey_index;
|
||||
if (qp_pps->main.state != IB_PORT_PKEY_NOT_VALID)
|
||||
|
@ -181,14 +181,28 @@ ib_umem_odp_alloc_child(struct ib_umem_odp *root, unsigned long addr,
|
||||
odp_data->page_shift = PAGE_SHIFT;
|
||||
odp_data->notifier.ops = ops;
|
||||
|
||||
/*
|
||||
* A mmget must be held when registering a notifier, the owming_mm only
|
||||
* has a mm_grab at this point.
|
||||
*/
|
||||
if (!mmget_not_zero(umem->owning_mm)) {
|
||||
ret = -EFAULT;
|
||||
goto out_free;
|
||||
}
|
||||
|
||||
odp_data->tgid = get_pid(root->tgid);
|
||||
ret = ib_init_umem_odp(odp_data, ops);
|
||||
if (ret) {
|
||||
put_pid(odp_data->tgid);
|
||||
kfree(odp_data);
|
||||
return ERR_PTR(ret);
|
||||
}
|
||||
if (ret)
|
||||
goto out_tgid;
|
||||
mmput(umem->owning_mm);
|
||||
return odp_data;
|
||||
|
||||
out_tgid:
|
||||
put_pid(odp_data->tgid);
|
||||
mmput(umem->owning_mm);
|
||||
out_free:
|
||||
kfree(odp_data);
|
||||
return ERR_PTR(ret);
|
||||
}
|
||||
EXPORT_SYMBOL(ib_umem_odp_alloc_child);
|
||||
|
||||
|
@ -1445,16 +1445,7 @@ static int create_qp(struct uverbs_attr_bundle *attrs,
|
||||
if (ret)
|
||||
goto err_cb;
|
||||
|
||||
qp->pd = pd;
|
||||
qp->send_cq = attr.send_cq;
|
||||
qp->recv_cq = attr.recv_cq;
|
||||
qp->srq = attr.srq;
|
||||
qp->rwq_ind_tbl = ind_tbl;
|
||||
qp->event_handler = attr.event_handler;
|
||||
qp->qp_type = attr.qp_type;
|
||||
atomic_set(&qp->usecnt, 0);
|
||||
atomic_inc(&pd->usecnt);
|
||||
qp->port = 0;
|
||||
if (attr.send_cq)
|
||||
atomic_inc(&attr.send_cq->usecnt);
|
||||
if (attr.recv_cq)
|
||||
|
@ -1185,16 +1185,6 @@ struct ib_qp *ib_create_qp_user(struct ib_pd *pd,
|
||||
if (ret)
|
||||
goto err;
|
||||
|
||||
qp->qp_type = qp_init_attr->qp_type;
|
||||
qp->rwq_ind_tbl = qp_init_attr->rwq_ind_tbl;
|
||||
|
||||
atomic_set(&qp->usecnt, 0);
|
||||
qp->mrs_used = 0;
|
||||
spin_lock_init(&qp->mr_lock);
|
||||
INIT_LIST_HEAD(&qp->rdma_mrs);
|
||||
INIT_LIST_HEAD(&qp->sig_mrs);
|
||||
qp->port = 0;
|
||||
|
||||
if (qp_init_attr->qp_type == IB_QPT_XRC_TGT) {
|
||||
struct ib_qp *xrc_qp =
|
||||
create_xrc_qp_user(qp, qp_init_attr, udata);
|
||||
|
@ -515,10 +515,11 @@ static inline void hfi1_handle_packet(struct hfi1_packet *packet,
|
||||
opa_get_lid(packet->dlid, 9B));
|
||||
if (!mcast)
|
||||
goto drop;
|
||||
rcu_read_lock();
|
||||
list_for_each_entry_rcu(p, &mcast->qp_list, list) {
|
||||
packet->qp = p->qp;
|
||||
if (hfi1_do_pkey_check(packet))
|
||||
goto drop;
|
||||
goto unlock_drop;
|
||||
spin_lock_irqsave(&packet->qp->r_lock, flags);
|
||||
packet_handler = qp_ok(packet);
|
||||
if (likely(packet_handler))
|
||||
@ -527,6 +528,7 @@ static inline void hfi1_handle_packet(struct hfi1_packet *packet,
|
||||
ibp->rvp.n_pkt_drops++;
|
||||
spin_unlock_irqrestore(&packet->qp->r_lock, flags);
|
||||
}
|
||||
rcu_read_unlock();
|
||||
/*
|
||||
* Notify rvt_multicast_detach() if it is waiting for us
|
||||
* to finish.
|
||||
|
@ -636,6 +636,7 @@ struct mlx5_ib_mr {
|
||||
|
||||
/* For ODP and implicit */
|
||||
atomic_t num_deferred_work;
|
||||
wait_queue_head_t q_deferred_work;
|
||||
struct xarray implicit_children;
|
||||
union {
|
||||
struct rcu_head rcu;
|
||||
|
@ -235,7 +235,8 @@ static void free_implicit_child_mr(struct mlx5_ib_mr *mr, bool need_imr_xlt)
|
||||
mr->parent = NULL;
|
||||
mlx5_mr_cache_free(mr->dev, mr);
|
||||
ib_umem_odp_release(odp);
|
||||
atomic_dec(&imr->num_deferred_work);
|
||||
if (atomic_dec_and_test(&imr->num_deferred_work))
|
||||
wake_up(&imr->q_deferred_work);
|
||||
}
|
||||
|
||||
static void free_implicit_child_mr_work(struct work_struct *work)
|
||||
@ -554,6 +555,7 @@ struct mlx5_ib_mr *mlx5_ib_alloc_implicit_mr(struct mlx5_ib_pd *pd,
|
||||
imr->umem = &umem_odp->umem;
|
||||
imr->is_odp_implicit = true;
|
||||
atomic_set(&imr->num_deferred_work, 0);
|
||||
init_waitqueue_head(&imr->q_deferred_work);
|
||||
xa_init(&imr->implicit_children);
|
||||
|
||||
err = mlx5_ib_update_xlt(imr, 0,
|
||||
@ -611,10 +613,7 @@ void mlx5_ib_free_implicit_mr(struct mlx5_ib_mr *imr)
|
||||
* under xa_lock while the child is in the xarray. Thus at this point
|
||||
* it is only decreasing, and all work holding it is now on the wq.
|
||||
*/
|
||||
if (atomic_read(&imr->num_deferred_work)) {
|
||||
flush_workqueue(system_unbound_wq);
|
||||
WARN_ON(atomic_read(&imr->num_deferred_work));
|
||||
}
|
||||
wait_event(imr->q_deferred_work, !atomic_read(&imr->num_deferred_work));
|
||||
|
||||
/*
|
||||
* Fence the imr before we destroy the children. This allows us to
|
||||
@ -645,10 +644,7 @@ void mlx5_ib_fence_odp_mr(struct mlx5_ib_mr *mr)
|
||||
/* Wait for all running page-fault handlers to finish. */
|
||||
synchronize_srcu(&mr->dev->odp_srcu);
|
||||
|
||||
if (atomic_read(&mr->num_deferred_work)) {
|
||||
flush_workqueue(system_unbound_wq);
|
||||
WARN_ON(atomic_read(&mr->num_deferred_work));
|
||||
}
|
||||
wait_event(mr->q_deferred_work, !atomic_read(&mr->num_deferred_work));
|
||||
|
||||
dma_fence_odp_mr(mr);
|
||||
}
|
||||
@ -1720,7 +1716,8 @@ static void destroy_prefetch_work(struct prefetch_mr_work *work)
|
||||
u32 i;
|
||||
|
||||
for (i = 0; i < work->num_sge; ++i)
|
||||
atomic_dec(&work->frags[i].mr->num_deferred_work);
|
||||
if (atomic_dec_and_test(&work->frags[i].mr->num_deferred_work))
|
||||
wake_up(&work->frags[i].mr->q_deferred_work);
|
||||
kvfree(work);
|
||||
}
|
||||
|
||||
|
@ -329,8 +329,10 @@ void qib_ib_rcv(struct qib_ctxtdata *rcd, void *rhdr, void *data, u32 tlen)
|
||||
if (mcast == NULL)
|
||||
goto drop;
|
||||
this_cpu_inc(ibp->pmastats->n_multicast_rcv);
|
||||
rcu_read_lock();
|
||||
list_for_each_entry_rcu(p, &mcast->qp_list, list)
|
||||
qib_qp_rcv(rcd, hdr, 1, data, tlen, p->qp);
|
||||
rcu_read_unlock();
|
||||
/*
|
||||
* Notify rvt_multicast_detach() if it is waiting for us
|
||||
* to finish.
|
||||
|
@ -388,6 +388,9 @@ static struct siw_device *siw_device_create(struct net_device *netdev)
|
||||
{ .max_segment_size = SZ_2G };
|
||||
base_dev->num_comp_vectors = num_possible_cpus();
|
||||
|
||||
xa_init_flags(&sdev->qp_xa, XA_FLAGS_ALLOC1);
|
||||
xa_init_flags(&sdev->mem_xa, XA_FLAGS_ALLOC1);
|
||||
|
||||
ib_set_device_ops(base_dev, &siw_device_ops);
|
||||
rv = ib_device_set_netdev(base_dev, netdev, 1);
|
||||
if (rv)
|
||||
@ -415,9 +418,6 @@ static struct siw_device *siw_device_create(struct net_device *netdev)
|
||||
sdev->attrs.max_srq_wr = SIW_MAX_SRQ_WR;
|
||||
sdev->attrs.max_srq_sge = SIW_MAX_SGE;
|
||||
|
||||
xa_init_flags(&sdev->qp_xa, XA_FLAGS_ALLOC1);
|
||||
xa_init_flags(&sdev->mem_xa, XA_FLAGS_ALLOC1);
|
||||
|
||||
INIT_LIST_HEAD(&sdev->cep_list);
|
||||
INIT_LIST_HEAD(&sdev->qp_list);
|
||||
|
||||
|
Loading…
Reference in New Issue
Block a user