mirror of
https://github.com/torvalds/linux.git
synced 2024-12-29 06:12:08 +00:00
Merge branch 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/roland/infiniband
* 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/roland/infiniband: IB/fmr_pool: Stop ib_fmr threads from contributing to load average IB/ipath: Fix incorrect use of sizeof on msg buffer (function argument) IB/ipath: Limit length checksummed in eeprom IB/ipath: Fix a race where s_last is updated without lock held IB/mlx4: Lock SQ lock in mlx4_ib_post_send() IPoIB/cm: Fix receive QP cleanup
This commit is contained in:
commit
53173920da
@ -291,10 +291,10 @@ struct ib_fmr_pool *ib_create_fmr_pool(struct ib_pd *pd,
|
|||||||
atomic_set(&pool->flush_ser, 0);
|
atomic_set(&pool->flush_ser, 0);
|
||||||
init_waitqueue_head(&pool->force_wait);
|
init_waitqueue_head(&pool->force_wait);
|
||||||
|
|
||||||
pool->thread = kthread_create(ib_fmr_cleanup_thread,
|
pool->thread = kthread_run(ib_fmr_cleanup_thread,
|
||||||
pool,
|
pool,
|
||||||
"ib_fmr(%s)",
|
"ib_fmr(%s)",
|
||||||
device->name);
|
device->name);
|
||||||
if (IS_ERR(pool->thread)) {
|
if (IS_ERR(pool->thread)) {
|
||||||
printk(KERN_WARNING PFX "couldn't start cleanup thread\n");
|
printk(KERN_WARNING PFX "couldn't start cleanup thread\n");
|
||||||
ret = PTR_ERR(pool->thread);
|
ret = PTR_ERR(pool->thread);
|
||||||
|
@ -538,7 +538,15 @@ static u8 flash_csum(struct ipath_flash *ifp, int adjust)
|
|||||||
u8 *ip = (u8 *) ifp;
|
u8 *ip = (u8 *) ifp;
|
||||||
u8 csum = 0, len;
|
u8 csum = 0, len;
|
||||||
|
|
||||||
for (len = 0; len < ifp->if_length; len++)
|
/*
|
||||||
|
* Limit length checksummed to max length of actual data.
|
||||||
|
* Checksum of erased eeprom will still be bad, but we avoid
|
||||||
|
* reading past the end of the buffer we were passed.
|
||||||
|
*/
|
||||||
|
len = ifp->if_length;
|
||||||
|
if (len > sizeof(struct ipath_flash))
|
||||||
|
len = sizeof(struct ipath_flash);
|
||||||
|
while (len--)
|
||||||
csum += *ip++;
|
csum += *ip++;
|
||||||
csum -= ifp->if_csum;
|
csum -= ifp->if_csum;
|
||||||
csum = ~csum;
|
csum = ~csum;
|
||||||
|
@ -453,7 +453,7 @@ skip_ibchange:
|
|||||||
}
|
}
|
||||||
|
|
||||||
static void handle_supp_msgs(struct ipath_devdata *dd,
|
static void handle_supp_msgs(struct ipath_devdata *dd,
|
||||||
unsigned supp_msgs, char msg[512])
|
unsigned supp_msgs, char *msg, int msgsz)
|
||||||
{
|
{
|
||||||
/*
|
/*
|
||||||
* Print the message unless it's ibc status change only, which
|
* Print the message unless it's ibc status change only, which
|
||||||
@ -461,9 +461,9 @@ static void handle_supp_msgs(struct ipath_devdata *dd,
|
|||||||
*/
|
*/
|
||||||
if (dd->ipath_lasterror & ~INFINIPATH_E_IBSTATUSCHANGED) {
|
if (dd->ipath_lasterror & ~INFINIPATH_E_IBSTATUSCHANGED) {
|
||||||
int iserr;
|
int iserr;
|
||||||
iserr = ipath_decode_err(msg, sizeof msg,
|
iserr = ipath_decode_err(msg, msgsz,
|
||||||
dd->ipath_lasterror &
|
dd->ipath_lasterror &
|
||||||
~INFINIPATH_E_IBSTATUSCHANGED);
|
~INFINIPATH_E_IBSTATUSCHANGED);
|
||||||
if (dd->ipath_lasterror &
|
if (dd->ipath_lasterror &
|
||||||
~(INFINIPATH_E_RRCVEGRFULL |
|
~(INFINIPATH_E_RRCVEGRFULL |
|
||||||
INFINIPATH_E_RRCVHDRFULL | INFINIPATH_E_PKTERRS))
|
INFINIPATH_E_RRCVHDRFULL | INFINIPATH_E_PKTERRS))
|
||||||
@ -492,8 +492,8 @@ static void handle_supp_msgs(struct ipath_devdata *dd,
|
|||||||
}
|
}
|
||||||
|
|
||||||
static unsigned handle_frequent_errors(struct ipath_devdata *dd,
|
static unsigned handle_frequent_errors(struct ipath_devdata *dd,
|
||||||
ipath_err_t errs, char msg[512],
|
ipath_err_t errs, char *msg,
|
||||||
int *noprint)
|
int msgsz, int *noprint)
|
||||||
{
|
{
|
||||||
unsigned long nc;
|
unsigned long nc;
|
||||||
static unsigned long nextmsg_time;
|
static unsigned long nextmsg_time;
|
||||||
@ -512,7 +512,7 @@ static unsigned handle_frequent_errors(struct ipath_devdata *dd,
|
|||||||
nextmsg_time = nc + HZ * 3;
|
nextmsg_time = nc + HZ * 3;
|
||||||
}
|
}
|
||||||
else if (supp_msgs) {
|
else if (supp_msgs) {
|
||||||
handle_supp_msgs(dd, supp_msgs, msg);
|
handle_supp_msgs(dd, supp_msgs, msg, msgsz);
|
||||||
supp_msgs = 0;
|
supp_msgs = 0;
|
||||||
nmsgs = 0;
|
nmsgs = 0;
|
||||||
}
|
}
|
||||||
@ -525,14 +525,14 @@ static unsigned handle_frequent_errors(struct ipath_devdata *dd,
|
|||||||
|
|
||||||
static int handle_errors(struct ipath_devdata *dd, ipath_err_t errs)
|
static int handle_errors(struct ipath_devdata *dd, ipath_err_t errs)
|
||||||
{
|
{
|
||||||
char msg[512];
|
char msg[128];
|
||||||
u64 ignore_this_time = 0;
|
u64 ignore_this_time = 0;
|
||||||
int i, iserr = 0;
|
int i, iserr = 0;
|
||||||
int chkerrpkts = 0, noprint = 0;
|
int chkerrpkts = 0, noprint = 0;
|
||||||
unsigned supp_msgs;
|
unsigned supp_msgs;
|
||||||
int log_idx;
|
int log_idx;
|
||||||
|
|
||||||
supp_msgs = handle_frequent_errors(dd, errs, msg, &noprint);
|
supp_msgs = handle_frequent_errors(dd, errs, msg, sizeof msg, &noprint);
|
||||||
|
|
||||||
/* don't report errors that are masked */
|
/* don't report errors that are masked */
|
||||||
errs &= ~dd->ipath_maskederrs;
|
errs &= ~dd->ipath_maskederrs;
|
||||||
|
@ -630,11 +630,8 @@ bail:;
|
|||||||
void ipath_send_complete(struct ipath_qp *qp, struct ipath_swqe *wqe,
|
void ipath_send_complete(struct ipath_qp *qp, struct ipath_swqe *wqe,
|
||||||
enum ib_wc_status status)
|
enum ib_wc_status status)
|
||||||
{
|
{
|
||||||
u32 last = qp->s_last;
|
unsigned long flags;
|
||||||
|
u32 last;
|
||||||
if (++last == qp->s_size)
|
|
||||||
last = 0;
|
|
||||||
qp->s_last = last;
|
|
||||||
|
|
||||||
/* See ch. 11.2.4.1 and 10.7.3.1 */
|
/* See ch. 11.2.4.1 and 10.7.3.1 */
|
||||||
if (!(qp->s_flags & IPATH_S_SIGNAL_REQ_WR) ||
|
if (!(qp->s_flags & IPATH_S_SIGNAL_REQ_WR) ||
|
||||||
@ -658,4 +655,11 @@ void ipath_send_complete(struct ipath_qp *qp, struct ipath_swqe *wqe,
|
|||||||
wc.port_num = 0;
|
wc.port_num = 0;
|
||||||
ipath_cq_enter(to_icq(qp->ibqp.send_cq), &wc, 0);
|
ipath_cq_enter(to_icq(qp->ibqp.send_cq), &wc, 0);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
spin_lock_irqsave(&qp->s_lock, flags);
|
||||||
|
last = qp->s_last;
|
||||||
|
if (++last >= qp->s_size)
|
||||||
|
last = 0;
|
||||||
|
qp->s_last = last;
|
||||||
|
spin_unlock_irqrestore(&qp->s_lock, flags);
|
||||||
}
|
}
|
||||||
|
@ -1282,7 +1282,7 @@ int mlx4_ib_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
|
|||||||
int size;
|
int size;
|
||||||
int i;
|
int i;
|
||||||
|
|
||||||
spin_lock_irqsave(&qp->rq.lock, flags);
|
spin_lock_irqsave(&qp->sq.lock, flags);
|
||||||
|
|
||||||
ind = qp->sq.head;
|
ind = qp->sq.head;
|
||||||
|
|
||||||
@ -1448,7 +1448,7 @@ out:
|
|||||||
(qp->sq.wqe_cnt - 1));
|
(qp->sq.wqe_cnt - 1));
|
||||||
}
|
}
|
||||||
|
|
||||||
spin_unlock_irqrestore(&qp->rq.lock, flags);
|
spin_unlock_irqrestore(&qp->sq.lock, flags);
|
||||||
|
|
||||||
return err;
|
return err;
|
||||||
}
|
}
|
||||||
|
@ -60,7 +60,7 @@ static struct ib_qp_attr ipoib_cm_err_attr = {
|
|||||||
.qp_state = IB_QPS_ERR
|
.qp_state = IB_QPS_ERR
|
||||||
};
|
};
|
||||||
|
|
||||||
#define IPOIB_CM_RX_DRAIN_WRID 0x7fffffff
|
#define IPOIB_CM_RX_DRAIN_WRID 0xffffffff
|
||||||
|
|
||||||
static struct ib_send_wr ipoib_cm_rx_drain_wr = {
|
static struct ib_send_wr ipoib_cm_rx_drain_wr = {
|
||||||
.wr_id = IPOIB_CM_RX_DRAIN_WRID,
|
.wr_id = IPOIB_CM_RX_DRAIN_WRID,
|
||||||
|
Loading…
Reference in New Issue
Block a user