mirror of
https://github.com/torvalds/linux.git
synced 2024-11-14 08:02:07 +00:00
IB/rdmavt: Rename check_send_wqe as setup_wqe
The driver-provided function check_send_wqe allows the hardware driver to check and set up the incoming send wqe before it is inserted into the swqe ring. This patch will rename it as setup_wqe to better reflect its usage. In addition, this function is only called when all setup is complete in rdmavt. Reviewed-by: Mike Marciniszyn <mike.marciniszyn@intel.com> Signed-off-by: Kaike Wan <kaike.wan@intel.com> Signed-off-by: Dennis Dalessandro <dennis.dalessandro@intel.com> Signed-off-by: Jason Gunthorpe <jgg@mellanox.com>
This commit is contained in:
parent
8c31c9188b
commit
d205a06a14
@ -282,16 +282,21 @@ void hfi1_modify_qp(struct rvt_qp *qp, struct ib_qp_attr *attr,
|
||||
}
|
||||
|
||||
/**
|
||||
* hfi1_check_send_wqe - validate wqe
|
||||
* hfi1_setup_wqe - set up the wqe
|
||||
* @qp - The qp
|
||||
* @wqe - The built wqe
|
||||
* @call_send - Determine if the send should be posted or scheduled.
|
||||
*
|
||||
* Perform setup of the wqe. This is called
|
||||
* prior to inserting the wqe into the ring but after
|
||||
* the wqe has been setup by RDMAVT. This function
|
||||
* allows the driver the opportunity to perform
|
||||
* validation and additional setup of the wqe.
|
||||
*
|
||||
* Returns 0 on success, -EINVAL on failure
|
||||
*
|
||||
*/
|
||||
int hfi1_check_send_wqe(struct rvt_qp *qp,
|
||||
struct rvt_swqe *wqe, bool *call_send)
|
||||
int hfi1_setup_wqe(struct rvt_qp *qp, struct rvt_swqe *wqe, bool *call_send)
|
||||
{
|
||||
struct hfi1_ibport *ibp = to_iport(qp->ibqp.device, qp->port_num);
|
||||
struct rvt_ah *ah;
|
||||
|
@ -1937,7 +1937,7 @@ int hfi1_register_ib_device(struct hfi1_devdata *dd)
|
||||
dd->verbs_dev.rdi.driver_f.check_modify_qp = hfi1_check_modify_qp;
|
||||
dd->verbs_dev.rdi.driver_f.modify_qp = hfi1_modify_qp;
|
||||
dd->verbs_dev.rdi.driver_f.notify_restart_rc = hfi1_restart_rc;
|
||||
dd->verbs_dev.rdi.driver_f.check_send_wqe = hfi1_check_send_wqe;
|
||||
dd->verbs_dev.rdi.driver_f.setup_wqe = hfi1_setup_wqe;
|
||||
dd->verbs_dev.rdi.driver_f.comp_vect_cpu_lookup =
|
||||
hfi1_comp_vect_mappings_lookup;
|
||||
|
||||
|
@ -343,8 +343,8 @@ int hfi1_check_modify_qp(struct rvt_qp *qp, struct ib_qp_attr *attr,
|
||||
void hfi1_modify_qp(struct rvt_qp *qp, struct ib_qp_attr *attr,
|
||||
int attr_mask, struct ib_udata *udata);
|
||||
void hfi1_restart_rc(struct rvt_qp *qp, u32 psn, int wait);
|
||||
int hfi1_check_send_wqe(struct rvt_qp *qp, struct rvt_swqe *wqe,
|
||||
bool *call_send);
|
||||
int hfi1_setup_wqe(struct rvt_qp *qp, struct rvt_swqe *wqe,
|
||||
bool *call_send);
|
||||
|
||||
extern const u32 rc_only_opcode;
|
||||
extern const u32 uc_only_opcode;
|
||||
|
@ -1588,7 +1588,7 @@ int qib_register_ib_device(struct qib_devdata *dd)
|
||||
dd->verbs_dev.rdi.driver_f.port_callback = qib_create_port_files;
|
||||
dd->verbs_dev.rdi.driver_f.get_pci_dev = qib_get_pci_dev;
|
||||
dd->verbs_dev.rdi.driver_f.check_ah = qib_check_ah;
|
||||
dd->verbs_dev.rdi.driver_f.check_send_wqe = qib_check_send_wqe;
|
||||
dd->verbs_dev.rdi.driver_f.setup_wqe = qib_check_send_wqe;
|
||||
dd->verbs_dev.rdi.driver_f.notify_new_ah = qib_notify_new_ah;
|
||||
dd->verbs_dev.rdi.driver_f.alloc_qpn = qib_alloc_qpn;
|
||||
dd->verbs_dev.rdi.driver_f.qp_priv_alloc = qib_qp_priv_alloc;
|
||||
|
@ -1823,13 +1823,11 @@ static int rvt_post_one_wr(struct rvt_qp *qp,
|
||||
wqe->wr.num_sge = j;
|
||||
}
|
||||
|
||||
/* general part of wqe valid - allow for driver checks */
|
||||
if (rdi->driver_f.check_send_wqe) {
|
||||
ret = rdi->driver_f.check_send_wqe(qp, wqe, call_send);
|
||||
if (ret < 0)
|
||||
goto bail_inval_free;
|
||||
}
|
||||
|
||||
/*
|
||||
* Calculate and set SWQE PSN values prior to handing it off
|
||||
* to the driver's check routine. This give the driver the
|
||||
* opportunity to adjust PSN values based on internal checks.
|
||||
*/
|
||||
log_pmtu = qp->log_pmtu;
|
||||
if (qp->ibqp.qp_type != IB_QPT_UC &&
|
||||
qp->ibqp.qp_type != IB_QPT_RC) {
|
||||
@ -1854,8 +1852,18 @@ static int rvt_post_one_wr(struct rvt_qp *qp,
|
||||
(wqe->length ?
|
||||
((wqe->length - 1) >> log_pmtu) :
|
||||
0);
|
||||
qp->s_next_psn = wqe->lpsn + 1;
|
||||
}
|
||||
|
||||
/* general part of wqe valid - allow for driver checks */
|
||||
if (rdi->driver_f.setup_wqe) {
|
||||
ret = rdi->driver_f.setup_wqe(qp, wqe, call_send);
|
||||
if (ret < 0)
|
||||
goto bail_inval_free_ref;
|
||||
}
|
||||
|
||||
if (!(rdi->post_parms[wr->opcode].flags & RVT_OPERATION_LOCAL))
|
||||
qp->s_next_psn = wqe->lpsn + 1;
|
||||
|
||||
if (unlikely(reserved_op)) {
|
||||
wqe->wr.send_flags |= RVT_SEND_RESERVE_USED;
|
||||
rvt_qp_wqe_reserve(qp, wqe);
|
||||
@ -1869,6 +1877,10 @@ static int rvt_post_one_wr(struct rvt_qp *qp,
|
||||
|
||||
return 0;
|
||||
|
||||
bail_inval_free_ref:
|
||||
if (qp->ibqp.qp_type != IB_QPT_UC &&
|
||||
qp->ibqp.qp_type != IB_QPT_RC)
|
||||
atomic_dec(&ibah_to_rvtah(ud_wr(wr)->ah)->refcount);
|
||||
bail_inval_free:
|
||||
/* release mr holds */
|
||||
while (j) {
|
||||
|
@ -215,13 +215,14 @@ struct rvt_driver_provided {
|
||||
void (*schedule_send_no_lock)(struct rvt_qp *qp);
|
||||
|
||||
/*
|
||||
* Validate the wqe. This needs to be done prior to inserting the
|
||||
* wqe into the ring, but after the wqe has been set up. Allow for
|
||||
* driver specific work request checking by providing a callback.
|
||||
* call_send indicates if the wqe should be posted or scheduled.
|
||||
* Driver specific work request setup and checking.
|
||||
* This function is allowed to perform any setup, checks, or
|
||||
* adjustments required to the SWQE in order to be usable by
|
||||
* underlying protocols. This includes private data structure
|
||||
* allocations.
|
||||
*/
|
||||
int (*check_send_wqe)(struct rvt_qp *qp, struct rvt_swqe *wqe,
|
||||
bool *call_send);
|
||||
int (*setup_wqe)(struct rvt_qp *qp, struct rvt_swqe *wqe,
|
||||
bool *call_send);
|
||||
|
||||
/*
|
||||
* Sometimes rdmavt needs to kick the driver's send progress. That is
|
||||
|
Loading…
Reference in New Issue
Block a user