RDMA/rxe: Remove qp->resp.state

The rxe driver has four different QP state variables,
    qp->attr.qp_state,
    qp->req.state,
    qp->comp.state, and
    qp->resp.state.
All of these basically carry the same information.

This patch replaces uses of qp->resp.state by qp->attr.qp_state.  This is
the first of three patches which will remove all but the qp->attr.qp_state
variable. This will bring the driver closer to the IBA description.

Link: https://lore.kernel.org/r/20230405042611.6467-1-rpearsonhpe@gmail.com
Signed-off-by: Bob Pearson <rpearsonhpe@gmail.com>
Signed-off-by: Jason Gunthorpe <jgg@nvidia.com>
This commit is contained in:
Bob Pearson 2023-04-04 23:26:07 -05:00 committed by Jason Gunthorpe
parent bd4ba605c4
commit a588429a66
6 changed files with 8 additions and 14 deletions

View File

@ -414,7 +414,7 @@ int rxe_xmit_packet(struct rxe_qp *qp, struct rxe_pkt_info *pkt,
struct rxe_dev *rxe = to_rdev(qp->ibqp.device);
if ((is_request && (qp->req.state != QP_STATE_READY)) ||
(!is_request && (qp->resp.state != QP_STATE_READY))) {
(!is_request && (qp_state(qp) <= IB_QPS_RTR))) {
rxe_dbg_qp(qp, "Packet dropped. QP is not in ready state\n");
goto drop;
}

View File

@ -287,7 +287,6 @@ static int rxe_qp_init_resp(struct rxe_dev *rxe, struct rxe_qp *qp,
qp->resp.opcode = OPCODE_NONE;
qp->resp.msn = 0;
qp->resp.state = QP_STATE_RESET;
return 0;
}
@ -479,7 +478,6 @@ static void rxe_qp_reset(struct rxe_qp *qp)
/* move qp to the reset state */
qp->req.state = QP_STATE_RESET;
qp->comp.state = QP_STATE_RESET;
qp->resp.state = QP_STATE_RESET;
/* drain work and packet queuesc */
rxe_requester(qp);
@ -532,7 +530,6 @@ static void rxe_qp_drain(struct rxe_qp *qp)
void rxe_qp_error(struct rxe_qp *qp)
{
qp->req.state = QP_STATE_ERROR;
qp->resp.state = QP_STATE_ERROR;
qp->comp.state = QP_STATE_ERROR;
qp->attr.qp_state = IB_QPS_ERR;
@ -663,13 +660,11 @@ int rxe_qp_from_attr(struct rxe_qp *qp, struct ib_qp_attr *attr, int mask,
case IB_QPS_INIT:
rxe_dbg_qp(qp, "state -> INIT\n");
qp->req.state = QP_STATE_INIT;
qp->resp.state = QP_STATE_INIT;
qp->comp.state = QP_STATE_INIT;
break;
case IB_QPS_RTR:
rxe_dbg_qp(qp, "state -> RTR\n");
qp->resp.state = QP_STATE_READY;
break;
case IB_QPS_RTS:

View File

@ -39,7 +39,7 @@ static int check_type_state(struct rxe_dev *rxe, struct rxe_pkt_info *pkt,
}
if (pkt->mask & RXE_REQ_MASK) {
if (unlikely(qp->resp.state != QP_STATE_READY))
if (unlikely(qp_state(qp) <= IB_QPS_RTR))
return -EINVAL;
} else if (unlikely(qp->req.state < QP_STATE_READY ||
qp->req.state > QP_STATE_DRAINED))

View File

@ -1137,7 +1137,7 @@ static enum resp_states do_complete(struct rxe_qp *qp,
return RESPST_ERR_CQ_OVERFLOW;
finish:
if (unlikely(qp->resp.state == QP_STATE_ERROR))
if (unlikely(qp_state(qp) == IB_QPS_ERR))
return RESPST_CHK_RESOURCE;
if (unlikely(!pkt))
return RESPST_DONE;
@ -1464,10 +1464,10 @@ int rxe_responder(struct rxe_qp *qp)
struct rxe_pkt_info *pkt = NULL;
int ret;
if (!qp->valid || qp->resp.state == QP_STATE_ERROR ||
qp->resp.state == QP_STATE_RESET) {
bool notify = qp->valid &&
(qp->resp.state == QP_STATE_ERROR);
if (!qp->valid || qp_state(qp) == IB_QPS_ERR ||
qp_state(qp) == IB_QPS_RESET) {
bool notify = qp->valid && (qp_state(qp) == IB_QPS_ERR);
drain_req_pkts(qp);
flush_recv_queue(qp, notify);
goto exit;

View File

@ -1012,7 +1012,7 @@ static int rxe_post_recv(struct ib_qp *ibqp, const struct ib_recv_wr *wr,
spin_unlock_irqrestore(&rq->producer_lock, flags);
if (qp->resp.state == QP_STATE_ERROR)
if (qp_state(qp) == IB_QPS_ERR)
rxe_sched_task(&qp->resp.task);
err_out:

View File

@ -173,7 +173,6 @@ struct resp_res {
};
struct rxe_resp_info {
enum rxe_qp_state state;
u32 msn;
u32 psn;
u32 ack_psn;