mirror of
https://github.com/torvalds/linux.git
synced 2024-11-18 10:01:43 +00:00
RDMA/rxe: Fix missing acks from responder
All responder errors from request packets that do not consume a receive WQE fail to generate acks for RC QPs. This patch corrects this behavior by making the flow follow the same path as request packets that do consume a WQE after the completion. Link: https://lore.kernel.org/r/20210402001016.3210-1-rpearson@hpe.com Link: https://lore.kernel.org/linux-rdma/1a7286ac-bcea-40fb-2267-480134dd301b@gmail.com/ Signed-off-by: Bob Pearson <rpearson@hpe.com> Signed-off-by: Jason Gunthorpe <jgg@nvidia.com>
This commit is contained in:
parent
7d8f346504
commit
ea49225189
@ -676,7 +676,6 @@ int rxe_completer(void *arg)
|
||||
|
||||
/* there is nothing to retry in this case */
|
||||
if (!wqe || (wqe->state == wqe_state_posted)) {
|
||||
pr_warn("Retry attempted without a valid wqe\n");
|
||||
ret = -EAGAIN;
|
||||
goto done;
|
||||
}
|
||||
|
@ -816,8 +816,8 @@ static enum resp_states do_complete(struct rxe_qp *qp,
|
||||
struct rxe_recv_wqe *wqe = qp->resp.wqe;
|
||||
struct rxe_dev *rxe = to_rdev(qp->ibqp.device);
|
||||
|
||||
if (unlikely(!wqe))
|
||||
return RESPST_CLEANUP;
|
||||
if (!wqe)
|
||||
goto finish;
|
||||
|
||||
memset(&cqe, 0, sizeof(cqe));
|
||||
|
||||
@ -917,12 +917,12 @@ static enum resp_states do_complete(struct rxe_qp *qp,
|
||||
if (rxe_cq_post(qp->rcq, &cqe, pkt ? bth_se(pkt) : 1))
|
||||
return RESPST_ERR_CQ_OVERFLOW;
|
||||
|
||||
if (qp->resp.state == QP_STATE_ERROR)
|
||||
finish:
|
||||
if (unlikely(qp->resp.state == QP_STATE_ERROR))
|
||||
return RESPST_CHK_RESOURCE;
|
||||
|
||||
if (!pkt)
|
||||
if (unlikely(!pkt))
|
||||
return RESPST_DONE;
|
||||
else if (qp_type(qp) == IB_QPT_RC)
|
||||
if (qp_type(qp) == IB_QPT_RC)
|
||||
return RESPST_ACKNOWLEDGE;
|
||||
else
|
||||
return RESPST_CLEANUP;
|
||||
@ -1056,10 +1056,8 @@ static enum resp_states duplicate_request(struct rxe_qp *qp,
|
||||
if (pkt->mask & RXE_SEND_MASK ||
|
||||
pkt->mask & RXE_WRITE_MASK) {
|
||||
/* SEND. Ack again and cleanup. C9-105. */
|
||||
if (bth_ack(pkt))
|
||||
send_ack(qp, pkt, AETH_ACK_UNLIMITED, prev_psn);
|
||||
rc = RESPST_CLEANUP;
|
||||
goto out;
|
||||
send_ack(qp, pkt, AETH_ACK_UNLIMITED, prev_psn);
|
||||
return RESPST_CLEANUP;
|
||||
} else if (pkt->mask & RXE_READ_MASK) {
|
||||
struct resp_res *res;
|
||||
|
||||
|
Loading…
Reference in New Issue
Block a user