mirror of
https://github.com/torvalds/linux.git
synced 2024-11-19 18:41:48 +00:00
cxgb4: Use more common logging style
Convert printks to pr_<level> Miscellanea: o Coalesce formats o Realign arguments Signed-off-by: Joe Perches <joe@perches.com> Reviewed-by: Steve Wise <swise@opengridcomputing.com> Signed-off-by: Doug Ledford <dledford@redhat.com>
This commit is contained in:
parent
b7b37ee0e1
commit
700456bd25
@ -467,7 +467,7 @@ static struct net_device *get_real_dev(struct net_device *egress_dev)
|
||||
|
||||
static void arp_failure_discard(void *handle, struct sk_buff *skb)
|
||||
{
|
||||
pr_err(MOD "ARP failure\n");
|
||||
pr_err("ARP failure\n");
|
||||
kfree_skb(skb);
|
||||
}
|
||||
|
||||
@ -528,7 +528,7 @@ static void pass_accept_rpl_arp_failure(void *handle, struct sk_buff *skb)
|
||||
{
|
||||
struct c4iw_ep *ep = handle;
|
||||
|
||||
pr_err(MOD "ARP failure during accept - tid %u -dropping connection\n",
|
||||
pr_err("ARP failure during accept - tid %u - dropping connection\n",
|
||||
ep->hwtid);
|
||||
|
||||
__state_set(&ep->com, DEAD);
|
||||
@ -542,7 +542,7 @@ static void act_open_req_arp_failure(void *handle, struct sk_buff *skb)
|
||||
{
|
||||
struct c4iw_ep *ep = handle;
|
||||
|
||||
printk(KERN_ERR MOD "ARP failure during connect\n");
|
||||
pr_err("ARP failure during connect\n");
|
||||
connect_reply_upcall(ep, -EHOSTUNREACH);
|
||||
__state_set(&ep->com, DEAD);
|
||||
if (ep->com.remote_addr.ss_family == AF_INET6) {
|
||||
@ -724,8 +724,7 @@ static int send_connect(struct c4iw_ep *ep)
|
||||
|
||||
skb = get_skb(NULL, wrlen, GFP_KERNEL);
|
||||
if (!skb) {
|
||||
printk(KERN_ERR MOD "%s - failed to alloc skb.\n",
|
||||
__func__);
|
||||
pr_err("%s - failed to alloc skb\n", __func__);
|
||||
return -ENOMEM;
|
||||
}
|
||||
set_wr_txq(skb, CPL_PRIORITY_SETUP, ep->ctrlq_idx);
|
||||
@ -1023,7 +1022,7 @@ static int send_mpa_reject(struct c4iw_ep *ep, const void *pdata, u8 plen)
|
||||
|
||||
skb = get_skb(NULL, wrlen, GFP_KERNEL);
|
||||
if (!skb) {
|
||||
printk(KERN_ERR MOD "%s - cannot alloc skb!\n", __func__);
|
||||
pr_err("%s - cannot alloc skb!\n", __func__);
|
||||
return -ENOMEM;
|
||||
}
|
||||
set_wr_txq(skb, CPL_PRIORITY_DATA, ep->txq_idx);
|
||||
@ -1103,7 +1102,7 @@ static int send_mpa_reply(struct c4iw_ep *ep, const void *pdata, u8 plen)
|
||||
|
||||
skb = get_skb(NULL, wrlen, GFP_KERNEL);
|
||||
if (!skb) {
|
||||
printk(KERN_ERR MOD "%s - cannot alloc skb!\n", __func__);
|
||||
pr_err("%s - cannot alloc skb!\n", __func__);
|
||||
return -ENOMEM;
|
||||
}
|
||||
set_wr_txq(skb, CPL_PRIORITY_DATA, ep->txq_idx);
|
||||
@ -1379,7 +1378,7 @@ static int update_rx_credits(struct c4iw_ep *ep, u32 credits)
|
||||
PDBG("%s ep %p tid %u credits %u\n", __func__, ep, ep->hwtid, credits);
|
||||
skb = get_skb(NULL, wrlen, GFP_KERNEL);
|
||||
if (!skb) {
|
||||
printk(KERN_ERR MOD "update_rx_credits - cannot alloc skb!\n");
|
||||
pr_err("update_rx_credits - cannot alloc skb!\n");
|
||||
return 0;
|
||||
}
|
||||
|
||||
@ -1454,8 +1453,8 @@ static int process_mpa_reply(struct c4iw_ep *ep, struct sk_buff *skb)
|
||||
|
||||
/* Validate MPA header. */
|
||||
if (mpa->revision > mpa_rev) {
|
||||
printk(KERN_ERR MOD "%s MPA version mismatch. Local = %d,"
|
||||
" Received = %d\n", __func__, mpa_rev, mpa->revision);
|
||||
pr_err("%s MPA version mismatch. Local = %d, Received = %d\n",
|
||||
__func__, mpa_rev, mpa->revision);
|
||||
err = -EPROTO;
|
||||
goto err_stop_timer;
|
||||
}
|
||||
@ -1610,7 +1609,7 @@ static int process_mpa_reply(struct c4iw_ep *ep, struct sk_buff *skb)
|
||||
* supports, generate TERM message
|
||||
*/
|
||||
if (rtr_mismatch) {
|
||||
printk(KERN_ERR "%s: RTR mismatch, sending TERM\n", __func__);
|
||||
pr_err("%s: RTR mismatch, sending TERM\n", __func__);
|
||||
attrs.layer_etype = LAYER_MPA | DDP_LLP;
|
||||
attrs.ecode = MPA_NOMATCH_RTR;
|
||||
attrs.next_state = C4IW_QP_STATE_TERMINATE;
|
||||
@ -1629,8 +1628,7 @@ static int process_mpa_reply(struct c4iw_ep *ep, struct sk_buff *skb)
|
||||
* initiator ORD.
|
||||
*/
|
||||
if (insuff_ird) {
|
||||
printk(KERN_ERR "%s: Insufficient IRD, sending TERM\n",
|
||||
__func__);
|
||||
pr_err("%s: Insufficient IRD, sending TERM\n", __func__);
|
||||
attrs.layer_etype = LAYER_MPA | DDP_LLP;
|
||||
attrs.ecode = MPA_INSUFF_IRD;
|
||||
attrs.next_state = C4IW_QP_STATE_TERMINATE;
|
||||
@ -1701,8 +1699,8 @@ static int process_mpa_request(struct c4iw_ep *ep, struct sk_buff *skb)
|
||||
* Validate MPA Header.
|
||||
*/
|
||||
if (mpa->revision > mpa_rev) {
|
||||
printk(KERN_ERR MOD "%s MPA version mismatch. Local = %d,"
|
||||
" Received = %d\n", __func__, mpa_rev, mpa->revision);
|
||||
pr_err("%s MPA version mismatch. Local = %d, Received = %d\n",
|
||||
__func__, mpa_rev, mpa->revision);
|
||||
goto err_stop_timer;
|
||||
}
|
||||
|
||||
@ -1866,7 +1864,7 @@ static int abort_rpl(struct c4iw_dev *dev, struct sk_buff *skb)
|
||||
|
||||
ep = get_ep_from_tid(dev, tid);
|
||||
if (!ep) {
|
||||
printk(KERN_WARNING MOD "Abort rpl to freed endpoint\n");
|
||||
pr_warn("Abort rpl to freed endpoint\n");
|
||||
return 0;
|
||||
}
|
||||
PDBG("%s ep %p tid %u\n", __func__, ep, ep->hwtid);
|
||||
@ -1878,8 +1876,7 @@ static int abort_rpl(struct c4iw_dev *dev, struct sk_buff *skb)
|
||||
release = 1;
|
||||
break;
|
||||
default:
|
||||
printk(KERN_ERR "%s ep %p state %d\n",
|
||||
__func__, ep, ep->com.state);
|
||||
pr_err("%s ep %p state %d\n", __func__, ep, ep->com.state);
|
||||
break;
|
||||
}
|
||||
mutex_unlock(&ep->com.mutex);
|
||||
@ -2124,7 +2121,7 @@ static int c4iw_reconnect(struct c4iw_ep *ep)
|
||||
*/
|
||||
ep->atid = cxgb4_alloc_atid(ep->com.dev->rdev.lldi.tids, ep);
|
||||
if (ep->atid == -1) {
|
||||
pr_err("%s - cannot alloc atid.\n", __func__);
|
||||
pr_err("%s - cannot alloc atid\n", __func__);
|
||||
err = -ENOMEM;
|
||||
goto fail2;
|
||||
}
|
||||
@ -2151,7 +2148,7 @@ static int c4iw_reconnect(struct c4iw_ep *ep)
|
||||
ra = (__u8 *)&raddr6->sin6_addr;
|
||||
}
|
||||
if (!ep->dst) {
|
||||
pr_err("%s - cannot find route.\n", __func__);
|
||||
pr_err("%s - cannot find route\n", __func__);
|
||||
err = -EHOSTUNREACH;
|
||||
goto fail3;
|
||||
}
|
||||
@ -2159,7 +2156,7 @@ static int c4iw_reconnect(struct c4iw_ep *ep)
|
||||
ep->com.dev->rdev.lldi.adapter_type,
|
||||
ep->com.cm_id->tos);
|
||||
if (err) {
|
||||
pr_err("%s - cannot alloc l2e.\n", __func__);
|
||||
pr_err("%s - cannot alloc l2e\n", __func__);
|
||||
goto fail4;
|
||||
}
|
||||
|
||||
@ -2493,15 +2490,13 @@ static int pass_accept_req(struct c4iw_dev *dev, struct sk_buff *skb)
|
||||
&parent_ep->com.local_addr)->sin6_scope_id);
|
||||
}
|
||||
if (!dst) {
|
||||
printk(KERN_ERR MOD "%s - failed to find dst entry!\n",
|
||||
__func__);
|
||||
pr_err("%s - failed to find dst entry!\n", __func__);
|
||||
goto reject;
|
||||
}
|
||||
|
||||
child_ep = alloc_ep(sizeof(*child_ep), GFP_KERNEL);
|
||||
if (!child_ep) {
|
||||
printk(KERN_ERR MOD "%s - failed to allocate ep entry!\n",
|
||||
__func__);
|
||||
pr_err("%s - failed to allocate ep entry!\n", __func__);
|
||||
dst_release(dst);
|
||||
goto reject;
|
||||
}
|
||||
@ -2509,8 +2504,7 @@ static int pass_accept_req(struct c4iw_dev *dev, struct sk_buff *skb)
|
||||
err = import_ep(child_ep, iptype, peer_ip, dst, dev, false,
|
||||
parent_ep->com.dev->rdev.lldi.adapter_type, tos);
|
||||
if (err) {
|
||||
printk(KERN_ERR MOD "%s - failed to allocate l2t entry!\n",
|
||||
__func__);
|
||||
pr_err("%s - failed to allocate l2t entry!\n", __func__);
|
||||
dst_release(dst);
|
||||
kfree(child_ep);
|
||||
goto reject;
|
||||
@ -2797,9 +2791,7 @@ static int peer_abort(struct c4iw_dev *dev, struct sk_buff *skb)
|
||||
ep->com.qp, C4IW_QP_ATTR_NEXT_STATE,
|
||||
&attrs, 1);
|
||||
if (ret)
|
||||
printk(KERN_ERR MOD
|
||||
"%s - qp <- error failed!\n",
|
||||
__func__);
|
||||
pr_err("%s - qp <- error failed!\n", __func__);
|
||||
}
|
||||
peer_abort_upcall(ep);
|
||||
break;
|
||||
@ -2918,13 +2910,13 @@ static int terminate(struct c4iw_dev *dev, struct sk_buff *skb)
|
||||
BUG_ON(!ep);
|
||||
|
||||
if (ep && ep->com.qp) {
|
||||
printk(KERN_WARNING MOD "TERM received tid %u qpid %u\n", tid,
|
||||
ep->com.qp->wq.sq.qid);
|
||||
pr_warn("TERM received tid %u qpid %u\n",
|
||||
tid, ep->com.qp->wq.sq.qid);
|
||||
attrs.next_state = C4IW_QP_STATE_TERMINATE;
|
||||
c4iw_modify_qp(ep->com.qp->rhp, ep->com.qp,
|
||||
C4IW_QP_ATTR_NEXT_STATE, &attrs, 1);
|
||||
} else
|
||||
printk(KERN_WARNING MOD "TERM received tid %u no ep/qp\n", tid);
|
||||
pr_warn("TERM received tid %u no ep/qp\n", tid);
|
||||
c4iw_put_ep(&ep->com);
|
||||
|
||||
return 0;
|
||||
@ -3188,7 +3180,7 @@ int c4iw_connect(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param)
|
||||
}
|
||||
ep = alloc_ep(sizeof(*ep), GFP_KERNEL);
|
||||
if (!ep) {
|
||||
printk(KERN_ERR MOD "%s - cannot alloc ep.\n", __func__);
|
||||
pr_err("%s - cannot alloc ep\n", __func__);
|
||||
err = -ENOMEM;
|
||||
goto out;
|
||||
}
|
||||
@ -3228,7 +3220,7 @@ int c4iw_connect(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param)
|
||||
*/
|
||||
ep->atid = cxgb4_alloc_atid(dev->rdev.lldi.tids, ep);
|
||||
if (ep->atid == -1) {
|
||||
printk(KERN_ERR MOD "%s - cannot alloc atid.\n", __func__);
|
||||
pr_err("%s - cannot alloc atid\n", __func__);
|
||||
err = -ENOMEM;
|
||||
goto fail2;
|
||||
}
|
||||
@ -3292,7 +3284,7 @@ int c4iw_connect(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param)
|
||||
raddr6->sin6_scope_id);
|
||||
}
|
||||
if (!ep->dst) {
|
||||
printk(KERN_ERR MOD "%s - cannot find route.\n", __func__);
|
||||
pr_err("%s - cannot find route\n", __func__);
|
||||
err = -EHOSTUNREACH;
|
||||
goto fail3;
|
||||
}
|
||||
@ -3300,7 +3292,7 @@ int c4iw_connect(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param)
|
||||
err = import_ep(ep, iptype, ra, ep->dst, ep->com.dev, true,
|
||||
ep->com.dev->rdev.lldi.adapter_type, cm_id->tos);
|
||||
if (err) {
|
||||
printk(KERN_ERR MOD "%s - cannot alloc l2e.\n", __func__);
|
||||
pr_err("%s - cannot alloc l2e\n", __func__);
|
||||
goto fail4;
|
||||
}
|
||||
|
||||
@ -3414,7 +3406,7 @@ int c4iw_create_listen(struct iw_cm_id *cm_id, int backlog)
|
||||
|
||||
ep = alloc_ep(sizeof(*ep), GFP_KERNEL);
|
||||
if (!ep) {
|
||||
printk(KERN_ERR MOD "%s - cannot alloc ep.\n", __func__);
|
||||
pr_err("%s - cannot alloc ep\n", __func__);
|
||||
err = -ENOMEM;
|
||||
goto fail1;
|
||||
}
|
||||
@ -3439,7 +3431,7 @@ int c4iw_create_listen(struct iw_cm_id *cm_id, int backlog)
|
||||
cm_id->m_local_addr.ss_family, ep);
|
||||
|
||||
if (ep->stid == -1) {
|
||||
printk(KERN_ERR MOD "%s - cannot alloc stid.\n", __func__);
|
||||
pr_err("%s - cannot alloc stid\n", __func__);
|
||||
err = -ENOMEM;
|
||||
goto fail2;
|
||||
}
|
||||
@ -3600,8 +3592,7 @@ int c4iw_ep_disconnect(struct c4iw_ep *ep, int abrupt, gfp_t gfp)
|
||||
C4IW_QP_ATTR_NEXT_STATE,
|
||||
&attrs, 1);
|
||||
if (ret)
|
||||
pr_err(MOD
|
||||
"%s - qp <- error failed!\n",
|
||||
pr_err("%s - qp <- error failed!\n",
|
||||
__func__);
|
||||
}
|
||||
fatal = 1;
|
||||
@ -4157,8 +4148,8 @@ static int set_tcb_rpl(struct c4iw_dev *dev, struct sk_buff *skb)
|
||||
struct cpl_set_tcb_rpl *rpl = cplhdr(skb);
|
||||
|
||||
if (rpl->status != CPL_ERR_NONE) {
|
||||
printk(KERN_ERR MOD "Unexpected SET_TCB_RPL status %u "
|
||||
"for tid %u\n", rpl->status, GET_TID(rpl));
|
||||
pr_err("Unexpected SET_TCB_RPL status %u for tid %u\n",
|
||||
rpl->status, GET_TID(rpl));
|
||||
}
|
||||
kfree_skb(skb);
|
||||
return 0;
|
||||
@ -4186,8 +4177,8 @@ static int fw6_msg(struct c4iw_dev *dev, struct sk_buff *skb)
|
||||
sched(dev, skb);
|
||||
break;
|
||||
default:
|
||||
printk(KERN_ERR MOD "%s unexpected fw6 msg type %u\n", __func__,
|
||||
rpl->type);
|
||||
pr_err("%s unexpected fw6 msg type %u\n",
|
||||
__func__, rpl->type);
|
||||
kfree_skb(skb);
|
||||
break;
|
||||
}
|
||||
@ -4203,8 +4194,7 @@ static int peer_abort_intr(struct c4iw_dev *dev, struct sk_buff *skb)
|
||||
ep = get_ep_from_tid(dev, tid);
|
||||
/* This EP will be dereferenced in peer_abort() */
|
||||
if (!ep) {
|
||||
printk(KERN_WARNING MOD
|
||||
"Abort on non-existent endpoint, tid %d\n", tid);
|
||||
pr_warn("Abort on non-existent endpoint, tid %d\n", tid);
|
||||
kfree_skb(skb);
|
||||
return 0;
|
||||
}
|
||||
|
@ -159,7 +159,7 @@ static int create_cq(struct c4iw_rdev *rdev, struct t4_cq *cq,
|
||||
&cq->bar2_qid,
|
||||
user ? &cq->bar2_pa : NULL);
|
||||
if (user && !cq->bar2_pa) {
|
||||
pr_warn(MOD "%s: cqid %u not in BAR2 range.\n",
|
||||
pr_warn("%s: cqid %u not in BAR2 range\n",
|
||||
pci_name(rdev->lldi.pdev), cq->cqid);
|
||||
ret = -EINVAL;
|
||||
goto err4;
|
||||
@ -766,8 +766,7 @@ static int c4iw_poll_cq_one(struct c4iw_cq *chp, struct ib_wc *wc)
|
||||
wc->opcode = IB_WC_SEND;
|
||||
break;
|
||||
default:
|
||||
printk(KERN_ERR MOD "Unexpected opcode %d "
|
||||
"in the CQE received for QPID=0x%0x\n",
|
||||
pr_err("Unexpected opcode %d in the CQE received for QPID=0x%0x\n",
|
||||
CQE_OPCODE(&cqe), CQE_QPID(&cqe));
|
||||
ret = -EINVAL;
|
||||
goto out;
|
||||
@ -822,8 +821,7 @@ static int c4iw_poll_cq_one(struct c4iw_cq *chp, struct ib_wc *wc)
|
||||
wc->status = IB_WC_WR_FLUSH_ERR;
|
||||
break;
|
||||
default:
|
||||
printk(KERN_ERR MOD
|
||||
"Unexpected cqe_status 0x%x for QPID=0x%0x\n",
|
||||
pr_err("Unexpected cqe_status 0x%x for QPID=0x%0x\n",
|
||||
CQE_STATUS(&cqe), CQE_QPID(&cqe));
|
||||
wc->status = IB_WC_FATAL_ERR;
|
||||
}
|
||||
|
@ -334,7 +334,7 @@ static int qp_release(struct inode *inode, struct file *file)
|
||||
{
|
||||
struct c4iw_debugfs_data *qpd = file->private_data;
|
||||
if (!qpd) {
|
||||
printk(KERN_INFO "%s null qpd?\n", __func__);
|
||||
pr_info("%s null qpd?\n", __func__);
|
||||
return 0;
|
||||
}
|
||||
vfree(qpd->buf);
|
||||
@ -422,7 +422,7 @@ static int stag_release(struct inode *inode, struct file *file)
|
||||
{
|
||||
struct c4iw_debugfs_data *stagd = file->private_data;
|
||||
if (!stagd) {
|
||||
printk(KERN_INFO "%s null stagd?\n", __func__);
|
||||
pr_info("%s null stagd?\n", __func__);
|
||||
return 0;
|
||||
}
|
||||
vfree(stagd->buf);
|
||||
@ -796,15 +796,14 @@ static int c4iw_rdev_open(struct c4iw_rdev *rdev)
|
||||
* cqid and qpid range must match for now.
|
||||
*/
|
||||
if (rdev->lldi.udb_density != rdev->lldi.ucq_density) {
|
||||
pr_err(MOD "%s: unsupported udb/ucq densities %u/%u\n",
|
||||
pr_err("%s: unsupported udb/ucq densities %u/%u\n",
|
||||
pci_name(rdev->lldi.pdev), rdev->lldi.udb_density,
|
||||
rdev->lldi.ucq_density);
|
||||
return -EINVAL;
|
||||
}
|
||||
if (rdev->lldi.vr->qp.start != rdev->lldi.vr->cq.start ||
|
||||
rdev->lldi.vr->qp.size != rdev->lldi.vr->cq.size) {
|
||||
pr_err(MOD "%s: unsupported qp and cq id ranges "
|
||||
"qp start %u size %u cq start %u size %u\n",
|
||||
pr_err("%s: unsupported qp and cq id ranges qp start %u size %u cq start %u size %u\n",
|
||||
pci_name(rdev->lldi.pdev), rdev->lldi.vr->qp.start,
|
||||
rdev->lldi.vr->qp.size, rdev->lldi.vr->cq.size,
|
||||
rdev->lldi.vr->cq.size);
|
||||
@ -843,22 +842,22 @@ static int c4iw_rdev_open(struct c4iw_rdev *rdev)
|
||||
|
||||
err = c4iw_init_resource(rdev, c4iw_num_stags(rdev), T4_MAX_NUM_PD);
|
||||
if (err) {
|
||||
printk(KERN_ERR MOD "error %d initializing resources\n", err);
|
||||
pr_err("error %d initializing resources\n", err);
|
||||
return err;
|
||||
}
|
||||
err = c4iw_pblpool_create(rdev);
|
||||
if (err) {
|
||||
printk(KERN_ERR MOD "error %d initializing pbl pool\n", err);
|
||||
pr_err("error %d initializing pbl pool\n", err);
|
||||
goto destroy_resource;
|
||||
}
|
||||
err = c4iw_rqtpool_create(rdev);
|
||||
if (err) {
|
||||
printk(KERN_ERR MOD "error %d initializing rqt pool\n", err);
|
||||
pr_err("error %d initializing rqt pool\n", err);
|
||||
goto destroy_pblpool;
|
||||
}
|
||||
err = c4iw_ocqp_pool_create(rdev);
|
||||
if (err) {
|
||||
printk(KERN_ERR MOD "error %d initializing ocqp pool\n", err);
|
||||
pr_err("error %d initializing ocqp pool\n", err);
|
||||
goto destroy_rqtpool;
|
||||
}
|
||||
rdev->status_page = (struct t4_dev_status_page *)
|
||||
@ -954,17 +953,17 @@ static struct c4iw_dev *c4iw_alloc(const struct cxgb4_lld_info *infop)
|
||||
int ret;
|
||||
|
||||
if (!rdma_supported(infop)) {
|
||||
printk(KERN_INFO MOD "%s: RDMA not supported on this device.\n",
|
||||
pci_name(infop->pdev));
|
||||
pr_info("%s: RDMA not supported on this device\n",
|
||||
pci_name(infop->pdev));
|
||||
return ERR_PTR(-ENOSYS);
|
||||
}
|
||||
if (!ocqp_supported(infop))
|
||||
pr_info("%s: On-Chip Queues not supported on this device.\n",
|
||||
pr_info("%s: On-Chip Queues not supported on this device\n",
|
||||
pci_name(infop->pdev));
|
||||
|
||||
devp = (struct c4iw_dev *)ib_alloc_device(sizeof(*devp));
|
||||
if (!devp) {
|
||||
printk(KERN_ERR MOD "Cannot allocate ib device\n");
|
||||
pr_err("Cannot allocate ib device\n");
|
||||
return ERR_PTR(-ENOMEM);
|
||||
}
|
||||
devp->rdev.lldi = *infop;
|
||||
@ -1000,7 +999,7 @@ static struct c4iw_dev *c4iw_alloc(const struct cxgb4_lld_info *infop)
|
||||
devp->rdev.bar2_kva = ioremap_wc(devp->rdev.bar2_pa,
|
||||
pci_resource_len(devp->rdev.lldi.pdev, 2));
|
||||
if (!devp->rdev.bar2_kva) {
|
||||
pr_err(MOD "Unable to ioremap BAR2\n");
|
||||
pr_err("Unable to ioremap BAR2\n");
|
||||
ib_dealloc_device(&devp->ibdev);
|
||||
return ERR_PTR(-EINVAL);
|
||||
}
|
||||
@ -1012,7 +1011,7 @@ static struct c4iw_dev *c4iw_alloc(const struct cxgb4_lld_info *infop)
|
||||
devp->rdev.oc_mw_kva = ioremap_wc(devp->rdev.oc_mw_pa,
|
||||
devp->rdev.lldi.vr->ocq.size);
|
||||
if (!devp->rdev.oc_mw_kva) {
|
||||
pr_err(MOD "Unable to ioremap onchip mem\n");
|
||||
pr_err("Unable to ioremap onchip mem\n");
|
||||
ib_dealloc_device(&devp->ibdev);
|
||||
return ERR_PTR(-EINVAL);
|
||||
}
|
||||
@ -1025,7 +1024,7 @@ static struct c4iw_dev *c4iw_alloc(const struct cxgb4_lld_info *infop)
|
||||
|
||||
ret = c4iw_rdev_open(&devp->rdev);
|
||||
if (ret) {
|
||||
printk(KERN_ERR MOD "Unable to open CXIO rdev err %d\n", ret);
|
||||
pr_err("Unable to open CXIO rdev err %d\n", ret);
|
||||
ib_dealloc_device(&devp->ibdev);
|
||||
return ERR_PTR(ret);
|
||||
}
|
||||
@ -1138,8 +1137,7 @@ static inline int recv_rx_pkt(struct c4iw_dev *dev, const struct pkt_gl *gl,
|
||||
goto out;
|
||||
|
||||
if (c4iw_handlers[opcode] == NULL) {
|
||||
pr_info("%s no handler opcode 0x%x...\n", __func__,
|
||||
opcode);
|
||||
pr_info("%s no handler opcode 0x%x...\n", __func__, opcode);
|
||||
kfree_skb(skb);
|
||||
goto out;
|
||||
}
|
||||
@ -1176,13 +1174,11 @@ static int c4iw_uld_rx_handler(void *handle, const __be64 *rsp,
|
||||
if (recv_rx_pkt(dev, gl, rsp))
|
||||
return 0;
|
||||
|
||||
pr_info("%s: unexpected FL contents at %p, " \
|
||||
"RSS %#llx, FL %#llx, len %u\n",
|
||||
pci_name(ctx->lldi.pdev), gl->va,
|
||||
(unsigned long long)be64_to_cpu(*rsp),
|
||||
(unsigned long long)be64_to_cpu(
|
||||
*(__force __be64 *)gl->va),
|
||||
gl->tot_len);
|
||||
pr_info("%s: unexpected FL contents at %p, RSS %#llx, FL %#llx, len %u\n",
|
||||
pci_name(ctx->lldi.pdev), gl->va,
|
||||
be64_to_cpu(*rsp),
|
||||
be64_to_cpu(*(__force __be64 *)gl->va),
|
||||
gl->tot_len);
|
||||
|
||||
return 0;
|
||||
} else {
|
||||
@ -1195,8 +1191,7 @@ static int c4iw_uld_rx_handler(void *handle, const __be64 *rsp,
|
||||
if (c4iw_handlers[opcode]) {
|
||||
c4iw_handlers[opcode](dev, skb);
|
||||
} else {
|
||||
pr_info("%s no handler opcode 0x%x...\n", __func__,
|
||||
opcode);
|
||||
pr_info("%s no handler opcode 0x%x...\n", __func__, opcode);
|
||||
kfree_skb(skb);
|
||||
}
|
||||
|
||||
@ -1212,14 +1207,13 @@ static int c4iw_uld_state_change(void *handle, enum cxgb4_state new_state)
|
||||
PDBG("%s new_state %u\n", __func__, new_state);
|
||||
switch (new_state) {
|
||||
case CXGB4_STATE_UP:
|
||||
printk(KERN_INFO MOD "%s: Up\n", pci_name(ctx->lldi.pdev));
|
||||
pr_info("%s: Up\n", pci_name(ctx->lldi.pdev));
|
||||
if (!ctx->dev) {
|
||||
int ret;
|
||||
|
||||
ctx->dev = c4iw_alloc(&ctx->lldi);
|
||||
if (IS_ERR(ctx->dev)) {
|
||||
printk(KERN_ERR MOD
|
||||
"%s: initialization failed: %ld\n",
|
||||
pr_err("%s: initialization failed: %ld\n",
|
||||
pci_name(ctx->lldi.pdev),
|
||||
PTR_ERR(ctx->dev));
|
||||
ctx->dev = NULL;
|
||||
@ -1227,22 +1221,19 @@ static int c4iw_uld_state_change(void *handle, enum cxgb4_state new_state)
|
||||
}
|
||||
ret = c4iw_register_device(ctx->dev);
|
||||
if (ret) {
|
||||
printk(KERN_ERR MOD
|
||||
"%s: RDMA registration failed: %d\n",
|
||||
pr_err("%s: RDMA registration failed: %d\n",
|
||||
pci_name(ctx->lldi.pdev), ret);
|
||||
c4iw_dealloc(ctx);
|
||||
}
|
||||
}
|
||||
break;
|
||||
case CXGB4_STATE_DOWN:
|
||||
printk(KERN_INFO MOD "%s: Down\n",
|
||||
pci_name(ctx->lldi.pdev));
|
||||
pr_info("%s: Down\n", pci_name(ctx->lldi.pdev));
|
||||
if (ctx->dev)
|
||||
c4iw_remove(ctx);
|
||||
break;
|
||||
case CXGB4_STATE_START_RECOVERY:
|
||||
printk(KERN_INFO MOD "%s: Fatal Error\n",
|
||||
pci_name(ctx->lldi.pdev));
|
||||
pr_info("%s: Fatal Error\n", pci_name(ctx->lldi.pdev));
|
||||
if (ctx->dev) {
|
||||
struct ib_event event;
|
||||
|
||||
@ -1255,8 +1246,7 @@ static int c4iw_uld_state_change(void *handle, enum cxgb4_state new_state)
|
||||
}
|
||||
break;
|
||||
case CXGB4_STATE_DETACH:
|
||||
printk(KERN_INFO MOD "%s: Detach\n",
|
||||
pci_name(ctx->lldi.pdev));
|
||||
pr_info("%s: Detach\n", pci_name(ctx->lldi.pdev));
|
||||
if (ctx->dev)
|
||||
c4iw_remove(ctx);
|
||||
break;
|
||||
@ -1406,9 +1396,7 @@ static void recover_lost_dbs(struct uld_ctx *ctx, struct qp_list *qp_list)
|
||||
t4_sq_host_wq_pidx(&qp->wq),
|
||||
t4_sq_wq_size(&qp->wq));
|
||||
if (ret) {
|
||||
pr_err(MOD "%s: Fatal error - "
|
||||
"DB overflow recovery failed - "
|
||||
"error syncing SQ qid %u\n",
|
||||
pr_err("%s: Fatal error - DB overflow recovery failed - error syncing SQ qid %u\n",
|
||||
pci_name(ctx->lldi.pdev), qp->wq.sq.qid);
|
||||
spin_unlock(&qp->lock);
|
||||
spin_unlock_irq(&qp->rhp->lock);
|
||||
@ -1422,9 +1410,7 @@ static void recover_lost_dbs(struct uld_ctx *ctx, struct qp_list *qp_list)
|
||||
t4_rq_wq_size(&qp->wq));
|
||||
|
||||
if (ret) {
|
||||
pr_err(MOD "%s: Fatal error - "
|
||||
"DB overflow recovery failed - "
|
||||
"error syncing RQ qid %u\n",
|
||||
pr_err("%s: Fatal error - DB overflow recovery failed - error syncing RQ qid %u\n",
|
||||
pci_name(ctx->lldi.pdev), qp->wq.rq.qid);
|
||||
spin_unlock(&qp->lock);
|
||||
spin_unlock_irq(&qp->rhp->lock);
|
||||
@ -1455,7 +1441,7 @@ static void recover_queues(struct uld_ctx *ctx)
|
||||
/* flush the SGE contexts */
|
||||
ret = cxgb4_flush_eq_cache(ctx->dev->rdev.lldi.ports[0]);
|
||||
if (ret) {
|
||||
printk(KERN_ERR MOD "%s: Fatal error - DB overflow recovery failed\n",
|
||||
pr_err("%s: Fatal error - DB overflow recovery failed\n",
|
||||
pci_name(ctx->lldi.pdev));
|
||||
return;
|
||||
}
|
||||
@ -1513,8 +1499,8 @@ static int c4iw_uld_control(void *handle, enum cxgb4_control control, ...)
|
||||
mutex_unlock(&ctx->dev->rdev.stats.lock);
|
||||
break;
|
||||
default:
|
||||
printk(KERN_WARNING MOD "%s: unknown control cmd %u\n",
|
||||
pci_name(ctx->lldi.pdev), control);
|
||||
pr_warn("%s: unknown control cmd %u\n",
|
||||
pci_name(ctx->lldi.pdev), control);
|
||||
break;
|
||||
}
|
||||
return 0;
|
||||
@ -1543,8 +1529,7 @@ static int __init c4iw_init_module(void)
|
||||
|
||||
c4iw_debugfs_root = debugfs_create_dir(DRV_NAME, NULL);
|
||||
if (!c4iw_debugfs_root)
|
||||
printk(KERN_WARNING MOD
|
||||
"could not create debugfs entry, continuing\n");
|
||||
pr_warn("could not create debugfs entry, continuing\n");
|
||||
|
||||
cxgb4_register_uld(CXGB4_ULD_RDMA, &c4iw_uld_info);
|
||||
|
||||
|
@ -124,8 +124,7 @@ void c4iw_ev_dispatch(struct c4iw_dev *dev, struct t4_cqe *err_cqe)
|
||||
spin_lock_irq(&dev->lock);
|
||||
qhp = get_qhp(dev, CQE_QPID(err_cqe));
|
||||
if (!qhp) {
|
||||
printk(KERN_ERR MOD "BAD AE qpid 0x%x opcode %d "
|
||||
"status 0x%x type %d wrid.hi 0x%x wrid.lo 0x%x\n",
|
||||
pr_err("BAD AE qpid 0x%x opcode %d status 0x%x type %d wrid.hi 0x%x wrid.lo 0x%x\n",
|
||||
CQE_QPID(err_cqe),
|
||||
CQE_OPCODE(err_cqe), CQE_STATUS(err_cqe),
|
||||
CQE_TYPE(err_cqe), CQE_WRID_HI(err_cqe),
|
||||
@ -140,8 +139,7 @@ void c4iw_ev_dispatch(struct c4iw_dev *dev, struct t4_cqe *err_cqe)
|
||||
cqid = qhp->attr.rcq;
|
||||
chp = get_chp(dev, cqid);
|
||||
if (!chp) {
|
||||
printk(KERN_ERR MOD "BAD AE cqid 0x%x qpid 0x%x opcode %d "
|
||||
"status 0x%x type %d wrid.hi 0x%x wrid.lo 0x%x\n",
|
||||
pr_err("BAD AE cqid 0x%x qpid 0x%x opcode %d status 0x%x type %d wrid.hi 0x%x wrid.lo 0x%x\n",
|
||||
cqid, CQE_QPID(err_cqe),
|
||||
CQE_OPCODE(err_cqe), CQE_STATUS(err_cqe),
|
||||
CQE_TYPE(err_cqe), CQE_WRID_HI(err_cqe),
|
||||
@ -165,7 +163,7 @@ void c4iw_ev_dispatch(struct c4iw_dev *dev, struct t4_cqe *err_cqe)
|
||||
|
||||
/* Completion Events */
|
||||
case T4_ERR_SUCCESS:
|
||||
printk(KERN_ERR MOD "AE with status 0!\n");
|
||||
pr_err("AE with status 0!\n");
|
||||
break;
|
||||
|
||||
case T4_ERR_STAG:
|
||||
@ -207,7 +205,7 @@ void c4iw_ev_dispatch(struct c4iw_dev *dev, struct t4_cqe *err_cqe)
|
||||
break;
|
||||
|
||||
default:
|
||||
printk(KERN_ERR MOD "Unknown T4 status 0x%x QPID 0x%x\n",
|
||||
pr_err("Unknown T4 status 0x%x QPID 0x%x\n",
|
||||
CQE_STATUS(err_cqe), qhp->wq.sq.qid);
|
||||
post_qp_event(dev, chp, qhp, err_cqe, IB_EVENT_QP_FATAL);
|
||||
break;
|
||||
|
@ -64,6 +64,12 @@
|
||||
#define DRV_NAME "iw_cxgb4"
|
||||
#define MOD DRV_NAME ":"
|
||||
|
||||
#ifdef pr_fmt
|
||||
#undef pr_fmt
|
||||
#endif
|
||||
|
||||
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
|
||||
|
||||
extern int c4iw_debug;
|
||||
#define PDBG(fmt, args...) \
|
||||
do { \
|
||||
|
@ -234,10 +234,8 @@ static int write_adapter_mem(struct c4iw_rdev *rdev, u32 addr, u32 len,
|
||||
if (is_t5(rdev->lldi.adapter_type) && use_dsgl) {
|
||||
if (len > inline_threshold) {
|
||||
if (_c4iw_write_mem_dma(rdev, addr, len, data, skb)) {
|
||||
printk_ratelimited(KERN_WARNING
|
||||
"%s: dma map"
|
||||
" failure (non fatal)\n",
|
||||
pci_name(rdev->lldi.pdev));
|
||||
pr_warn_ratelimited("%s: dma map failure (non fatal)\n",
|
||||
pci_name(rdev->lldi.pdev));
|
||||
return _c4iw_write_mem_inline(rdev, addr, len,
|
||||
data, skb);
|
||||
} else {
|
||||
|
@ -123,7 +123,6 @@ static struct ib_ucontext *c4iw_alloc_ucontext(struct ib_device *ibdev,
|
||||
{
|
||||
struct c4iw_ucontext *context;
|
||||
struct c4iw_dev *rhp = to_c4iw_dev(ibdev);
|
||||
static int warned;
|
||||
struct c4iw_alloc_ucontext_resp uresp;
|
||||
int ret = 0;
|
||||
struct c4iw_mm_entry *mm = NULL;
|
||||
@ -141,8 +140,7 @@ static struct ib_ucontext *c4iw_alloc_ucontext(struct ib_device *ibdev,
|
||||
kref_init(&context->kref);
|
||||
|
||||
if (udata->outlen < sizeof(uresp) - sizeof(uresp.reserved)) {
|
||||
if (!warned++)
|
||||
pr_err(MOD "Warning - downlevel libcxgb4 (non-fatal), device status page disabled.");
|
||||
pr_err_once("Warning - downlevel libcxgb4 (non-fatal), device status page disabled\n");
|
||||
rhp->rdev.flags |= T4_STATUS_PAGE_DISABLED;
|
||||
} else {
|
||||
mm = kmalloc(sizeof(*mm), GFP_KERNEL);
|
||||
|
@ -275,7 +275,7 @@ static int create_qp(struct c4iw_rdev *rdev, struct t4_wq *wq,
|
||||
* User mode must have bar2 access.
|
||||
*/
|
||||
if (user && (!wq->sq.bar2_pa || !wq->rq.bar2_pa)) {
|
||||
pr_warn(MOD "%s: sqid %u or rqid %u not in BAR2 range.\n",
|
||||
pr_warn("%s: sqid %u or rqid %u not in BAR2 range\n",
|
||||
pci_name(rdev->lldi.pdev), wq->sq.qid, wq->rq.qid);
|
||||
goto free_dma;
|
||||
}
|
||||
@ -1671,8 +1671,7 @@ int c4iw_modify_qp(struct c4iw_dev *rhp, struct c4iw_qp *qhp,
|
||||
goto err;
|
||||
break;
|
||||
default:
|
||||
printk(KERN_ERR "%s in a bad state %d\n",
|
||||
__func__, qhp->attr.state);
|
||||
pr_err("%s in a bad state %d\n", __func__, qhp->attr.state);
|
||||
ret = -EINVAL;
|
||||
goto err;
|
||||
break;
|
||||
|
@ -293,10 +293,8 @@ int c4iw_pblpool_create(struct c4iw_rdev *rdev)
|
||||
PDBG("%s failed to add PBL chunk (%x/%x)\n",
|
||||
__func__, pbl_start, pbl_chunk);
|
||||
if (pbl_chunk <= 1024 << MIN_PBL_SHIFT) {
|
||||
printk(KERN_WARNING MOD
|
||||
"Failed to add all PBL chunks (%x/%x)\n",
|
||||
pbl_start,
|
||||
pbl_top - pbl_start);
|
||||
pr_warn("Failed to add all PBL chunks (%x/%x)\n",
|
||||
pbl_start, pbl_top - pbl_start);
|
||||
return 0;
|
||||
}
|
||||
pbl_chunk >>= 1;
|
||||
@ -326,7 +324,7 @@ u32 c4iw_rqtpool_alloc(struct c4iw_rdev *rdev, int size)
|
||||
unsigned long addr = gen_pool_alloc(rdev->rqt_pool, size << 6);
|
||||
PDBG("%s addr 0x%x size %d\n", __func__, (u32)addr, size << 6);
|
||||
if (!addr)
|
||||
pr_warn_ratelimited(MOD "%s: Out of RQT memory\n",
|
||||
pr_warn_ratelimited("%s: Out of RQT memory\n",
|
||||
pci_name(rdev->lldi.pdev));
|
||||
mutex_lock(&rdev->stats.lock);
|
||||
if (addr) {
|
||||
@ -366,9 +364,8 @@ int c4iw_rqtpool_create(struct c4iw_rdev *rdev)
|
||||
PDBG("%s failed to add RQT chunk (%x/%x)\n",
|
||||
__func__, rqt_start, rqt_chunk);
|
||||
if (rqt_chunk <= 1024 << MIN_RQT_SHIFT) {
|
||||
printk(KERN_WARNING MOD
|
||||
"Failed to add all RQT chunks (%x/%x)\n",
|
||||
rqt_start, rqt_top - rqt_start);
|
||||
pr_warn("Failed to add all RQT chunks (%x/%x)\n",
|
||||
rqt_start, rqt_top - rqt_start);
|
||||
return 0;
|
||||
}
|
||||
rqt_chunk >>= 1;
|
||||
@ -432,9 +429,8 @@ int c4iw_ocqp_pool_create(struct c4iw_rdev *rdev)
|
||||
PDBG("%s failed to add OCQP chunk (%x/%x)\n",
|
||||
__func__, start, chunk);
|
||||
if (chunk <= 1024 << MIN_OCQP_SHIFT) {
|
||||
printk(KERN_WARNING MOD
|
||||
"Failed to add all OCQP chunks (%x/%x)\n",
|
||||
start, top - start);
|
||||
pr_warn("Failed to add all OCQP chunks (%x/%x)\n",
|
||||
start, top - start);
|
||||
return 0;
|
||||
}
|
||||
chunk >>= 1;
|
||||
|
@ -656,7 +656,7 @@ static inline int t4_next_hw_cqe(struct t4_cq *cq, struct t4_cqe **cqe)
|
||||
if (cq->queue[prev_cidx].bits_type_ts != cq->bits_type_ts) {
|
||||
ret = -EOVERFLOW;
|
||||
cq->error = 1;
|
||||
printk(KERN_ERR MOD "cq overflow cqid %u\n", cq->cqid);
|
||||
pr_err("cq overflow cqid %u\n", cq->cqid);
|
||||
BUG_ON(1);
|
||||
} else if (t4_valid_cqe(cq, &cq->queue[cq->cidx])) {
|
||||
|
||||
|
Loading…
Reference in New Issue
Block a user