mirror of
https://github.com/torvalds/linux.git
synced 2024-11-16 17:12:06 +00:00
RDMA v5.14 second rc Pull Request
Several small recent regressions: - Typo causing incorrect operation of the mlx5 mkey cache expiration - Revert a CM patch that is breaking some ULPs - Typo breaking SRQ in rxe - Revert a rxe patch breaking icrc calculation - Static checker warning about unbalance locking in hns - Subtle cxgb4 regression from a recent atomic to refcount conversion -----BEGIN PGP SIGNATURE----- iQIzBAABCgAdFiEEfB7FMLh+8QxL+6i3OG33FX4gmxoFAmEMk6cACgkQOG33FX4g mxpkXA//caQKk5ggU1x70E/Hzfm4aIRFZCPTP/TSUv5mMFW9qyor9TE3Q+CoEyXq QN15wPx5VSW6pNqwsHG4kQLbBnw/3rlUL5rL3rhU0lwR3+DaL31eTB1WjADJnO3d SxzN3FH42aKGbAVDXs75qBPv2NgvWyOjwn7p/gxjBeOVd9JpKNSTNzKAFkN6F+Ct 7bpTMvbJRmYF8bOJEk3jFhQqZwTIYIlOgSPJ8r7zwNqyGRAjRi7HiLNWLWsKK5Nt qJlmLa/qMpxBT0JUzcN2Il4DRlSP9BxZEBKzaJHkUpYz4p+lnuQSjtwYXnggJBiT RGW/MXsWU/itwkGRic1uAxTeFFbsOiQ7eGk/gYJ6ByoTuGnHihmZERI4SkgDPdhG VWpN6+CBr4u765l/z+LaX/kt03U47E1S7rYUezcTIDWktRHINfKOVazDsN0TjtSR HR8REA0IVfnnEqKj04QZqjZ42OuehtMIXc1N/c74z7KLbvoFx+kHte+iP4fffYYn wqG1tI4oD9+kZIHQQVTXgc8Edt5zRD9UCiaHHdRhwcVSzbMzRMTUTB7qCsICqtsh 9W/zVm+0A81UKy2Qeffh5QvFpMS/mpmmvGT6/50IoUoC6fOqj4gj2OtNOV8XArRf xTIOw1dMmeX8WSnSNpCBql7jkDySID7Ecx9DgbKRoukHtAxf2rM= =dwUD -----END PGP SIGNATURE----- Merge tag 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/rdma/rdma Pull rdma fixes from Jason Gunthorpe: "Several small recent regressions - rather more than usual, but nothing too scary. Good to know people are testing. - Typo causing incorrect operation of the mlx5 mkey cache expiration - Revert a CM patch that is breaking some ULPs - Typo breaking SRQ in rxe - Revert a rxe patch breaking icrc calculation - Static checker warning about unbalanced locking in hns - Subtle cxgb4 regression from a recent atomic to refcount conversion" * tag 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/rdma/rdma: RDMA/iw_cxgb4: Fix refcount underflow while destroying cqs. RDMA/hns: Fix the double unlock problem of poll_sem RDMA/rxe: Restore setting tot_len in the IPv4 header RDMA/rxe: Use the correct size of wqe when processing SRQ RDMA/cma: Revert INIT-INIT patch RDMA/mlx5: Delay emptying a cache entry when a new MR is added to it recently
This commit is contained in:
commit
b4b927fcb0
@ -926,12 +926,25 @@ static int cma_init_ud_qp(struct rdma_id_private *id_priv, struct ib_qp *qp)
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int cma_init_conn_qp(struct rdma_id_private *id_priv, struct ib_qp *qp)
|
||||
{
|
||||
struct ib_qp_attr qp_attr;
|
||||
int qp_attr_mask, ret;
|
||||
|
||||
qp_attr.qp_state = IB_QPS_INIT;
|
||||
ret = rdma_init_qp_attr(&id_priv->id, &qp_attr, &qp_attr_mask);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
return ib_modify_qp(qp, &qp_attr, qp_attr_mask);
|
||||
}
|
||||
|
||||
int rdma_create_qp(struct rdma_cm_id *id, struct ib_pd *pd,
|
||||
struct ib_qp_init_attr *qp_init_attr)
|
||||
{
|
||||
struct rdma_id_private *id_priv;
|
||||
struct ib_qp *qp;
|
||||
int ret = 0;
|
||||
int ret;
|
||||
|
||||
id_priv = container_of(id, struct rdma_id_private, id);
|
||||
if (id->device != pd->device) {
|
||||
@ -948,6 +961,8 @@ int rdma_create_qp(struct rdma_cm_id *id, struct ib_pd *pd,
|
||||
|
||||
if (id->qp_type == IB_QPT_UD)
|
||||
ret = cma_init_ud_qp(id_priv, qp);
|
||||
else
|
||||
ret = cma_init_conn_qp(id_priv, qp);
|
||||
if (ret)
|
||||
goto out_destroy;
|
||||
|
||||
|
@ -967,6 +967,12 @@ int c4iw_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *wc)
|
||||
return !err || err == -ENODATA ? npolled : err;
|
||||
}
|
||||
|
||||
void c4iw_cq_rem_ref(struct c4iw_cq *chp)
|
||||
{
|
||||
if (refcount_dec_and_test(&chp->refcnt))
|
||||
complete(&chp->cq_rel_comp);
|
||||
}
|
||||
|
||||
int c4iw_destroy_cq(struct ib_cq *ib_cq, struct ib_udata *udata)
|
||||
{
|
||||
struct c4iw_cq *chp;
|
||||
@ -976,8 +982,8 @@ int c4iw_destroy_cq(struct ib_cq *ib_cq, struct ib_udata *udata)
|
||||
chp = to_c4iw_cq(ib_cq);
|
||||
|
||||
xa_erase_irq(&chp->rhp->cqs, chp->cq.cqid);
|
||||
refcount_dec(&chp->refcnt);
|
||||
wait_event(chp->wait, !refcount_read(&chp->refcnt));
|
||||
c4iw_cq_rem_ref(chp);
|
||||
wait_for_completion(&chp->cq_rel_comp);
|
||||
|
||||
ucontext = rdma_udata_to_drv_context(udata, struct c4iw_ucontext,
|
||||
ibucontext);
|
||||
@ -1081,7 +1087,7 @@ int c4iw_create_cq(struct ib_cq *ibcq, const struct ib_cq_init_attr *attr,
|
||||
spin_lock_init(&chp->lock);
|
||||
spin_lock_init(&chp->comp_handler_lock);
|
||||
refcount_set(&chp->refcnt, 1);
|
||||
init_waitqueue_head(&chp->wait);
|
||||
init_completion(&chp->cq_rel_comp);
|
||||
ret = xa_insert_irq(&rhp->cqs, chp->cq.cqid, chp, GFP_KERNEL);
|
||||
if (ret)
|
||||
goto err_destroy_cq;
|
||||
|
@ -213,8 +213,7 @@ void c4iw_ev_dispatch(struct c4iw_dev *dev, struct t4_cqe *err_cqe)
|
||||
break;
|
||||
}
|
||||
done:
|
||||
if (refcount_dec_and_test(&chp->refcnt))
|
||||
wake_up(&chp->wait);
|
||||
c4iw_cq_rem_ref(chp);
|
||||
c4iw_qp_rem_ref(&qhp->ibqp);
|
||||
out:
|
||||
return;
|
||||
@ -234,8 +233,7 @@ int c4iw_ev_handler(struct c4iw_dev *dev, u32 qid)
|
||||
spin_lock_irqsave(&chp->comp_handler_lock, flag);
|
||||
(*chp->ibcq.comp_handler)(&chp->ibcq, chp->ibcq.cq_context);
|
||||
spin_unlock_irqrestore(&chp->comp_handler_lock, flag);
|
||||
if (refcount_dec_and_test(&chp->refcnt))
|
||||
wake_up(&chp->wait);
|
||||
c4iw_cq_rem_ref(chp);
|
||||
} else {
|
||||
pr_debug("unknown cqid 0x%x\n", qid);
|
||||
xa_unlock_irqrestore(&dev->cqs, flag);
|
||||
|
@ -428,7 +428,7 @@ struct c4iw_cq {
|
||||
spinlock_t lock;
|
||||
spinlock_t comp_handler_lock;
|
||||
refcount_t refcnt;
|
||||
wait_queue_head_t wait;
|
||||
struct completion cq_rel_comp;
|
||||
struct c4iw_wr_wait *wr_waitp;
|
||||
};
|
||||
|
||||
@ -979,6 +979,7 @@ struct ib_mr *c4iw_reg_user_mr(struct ib_pd *pd, u64 start,
|
||||
struct ib_mr *c4iw_get_dma_mr(struct ib_pd *pd, int acc);
|
||||
int c4iw_dereg_mr(struct ib_mr *ib_mr, struct ib_udata *udata);
|
||||
int c4iw_destroy_cq(struct ib_cq *ib_cq, struct ib_udata *udata);
|
||||
void c4iw_cq_rem_ref(struct c4iw_cq *chp);
|
||||
int c4iw_create_cq(struct ib_cq *ibcq, const struct ib_cq_init_attr *attr,
|
||||
struct ib_udata *udata);
|
||||
int c4iw_arm_cq(struct ib_cq *ibcq, enum ib_cq_notify_flags flags);
|
||||
|
@ -213,8 +213,10 @@ int hns_roce_cmd_use_events(struct hns_roce_dev *hr_dev)
|
||||
|
||||
hr_cmd->context =
|
||||
kcalloc(hr_cmd->max_cmds, sizeof(*hr_cmd->context), GFP_KERNEL);
|
||||
if (!hr_cmd->context)
|
||||
if (!hr_cmd->context) {
|
||||
hr_dev->cmd_mod = 0;
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
for (i = 0; i < hr_cmd->max_cmds; ++i) {
|
||||
hr_cmd->context[i].token = i;
|
||||
@ -228,7 +230,6 @@ int hns_roce_cmd_use_events(struct hns_roce_dev *hr_dev)
|
||||
spin_lock_init(&hr_cmd->context_lock);
|
||||
|
||||
hr_cmd->use_events = 1;
|
||||
down(&hr_cmd->poll_sem);
|
||||
|
||||
return 0;
|
||||
}
|
||||
@ -239,8 +240,6 @@ void hns_roce_cmd_use_polling(struct hns_roce_dev *hr_dev)
|
||||
|
||||
kfree(hr_cmd->context);
|
||||
hr_cmd->use_events = 0;
|
||||
|
||||
up(&hr_cmd->poll_sem);
|
||||
}
|
||||
|
||||
struct hns_roce_cmd_mailbox *
|
||||
|
@ -873,11 +873,9 @@ int hns_roce_init(struct hns_roce_dev *hr_dev)
|
||||
|
||||
if (hr_dev->cmd_mod) {
|
||||
ret = hns_roce_cmd_use_events(hr_dev);
|
||||
if (ret) {
|
||||
if (ret)
|
||||
dev_warn(dev,
|
||||
"Cmd event mode failed, set back to poll!\n");
|
||||
hns_roce_cmd_use_polling(hr_dev);
|
||||
}
|
||||
}
|
||||
|
||||
ret = hns_roce_init_hem(hr_dev);
|
||||
|
@ -531,8 +531,8 @@ static void __cache_work_func(struct mlx5_cache_ent *ent)
|
||||
*/
|
||||
spin_unlock_irq(&ent->lock);
|
||||
need_delay = need_resched() || someone_adding(cache) ||
|
||||
time_after(jiffies,
|
||||
READ_ONCE(cache->last_add) + 300 * HZ);
|
||||
!time_after(jiffies,
|
||||
READ_ONCE(cache->last_add) + 300 * HZ);
|
||||
spin_lock_irq(&ent->lock);
|
||||
if (ent->disabled)
|
||||
goto out;
|
||||
|
@ -259,6 +259,7 @@ static void prepare_ipv4_hdr(struct dst_entry *dst, struct sk_buff *skb,
|
||||
|
||||
iph->version = IPVERSION;
|
||||
iph->ihl = sizeof(struct iphdr) >> 2;
|
||||
iph->tot_len = htons(skb->len);
|
||||
iph->frag_off = df;
|
||||
iph->protocol = proto;
|
||||
iph->tos = tos;
|
||||
|
@ -318,7 +318,7 @@ static enum resp_states get_srq_wqe(struct rxe_qp *qp)
|
||||
pr_warn("%s: invalid num_sge in SRQ entry\n", __func__);
|
||||
return RESPST_ERR_MALFORMED_WQE;
|
||||
}
|
||||
size = sizeof(wqe) + wqe->dma.num_sge*sizeof(struct rxe_sge);
|
||||
size = sizeof(*wqe) + wqe->dma.num_sge*sizeof(struct rxe_sge);
|
||||
memcpy(&qp->resp.srq_wqe, wqe, size);
|
||||
|
||||
qp->resp.wqe = &qp->resp.srq_wqe.wqe;
|
||||
|
Loading…
Reference in New Issue
Block a user