mirror of
https://github.com/torvalds/linux.git
synced 2024-12-31 23:31:29 +00:00
v6.1 first rc pull request
Fix a few more of the usual sorts of bugs: - Another regression with source route validation in CMA, introduced this merge window - Crash in hfi1 due to faulty list operations - PCI ID updates for EFA - Disable LOCAL_INV in hns because it causes a HW hang - Crash in hns due to missing initialization - Memory leak in rxe - Missing error unwind during ib_core module loading - Missing error handling in qedr around work queue creation during startup -----BEGIN PGP SIGNATURE----- iHUEABYIAB0WIQRRRCHOFoQz/8F5bUaFwuHvBreFYQUCY2JgFQAKCRCFwuHvBreF Yf1bAQChHuVuXM6BlX2wB8CoEZDqefHsOyuDbLCLlbqn3brCwAD/RmzK0jZfkFV2 xiE6vZJNSqmfvIyUJtzQY2TfgH0TpgE= =T7AL -----END PGP SIGNATURE----- Merge tag 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/rdma/rdma Pull rdma fixes from Jason Gunthorpe: "Fix a few more of the usual sorts of bugs: - Another regression with source route validation in CMA, introduced this merge window - Crash in hfi1 due to faulty list operations - PCI ID updates for EFA - Disable LOCAL_INV in hns because it causes a HW hang - Crash in hns due to missing initialization - Memory leak in rxe - Missing error unwind during ib_core module loading - Missing error handling in qedr around work queue creation during startup" * tag 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/rdma/rdma: RDMA/qedr: clean up work queue on failure in qedr_alloc_resources() RDMA/core: Fix null-ptr-deref in ib_core_cleanup() RDMA/rxe: Fix mr leak in RESPST_ERR_RNR RDMA/hns: Fix NULL pointer problem in free_mr_init() RDMA/hns: Disable local invalidate operation RDMA/efa: Add EFA 0xefa2 PCI ID IB/hfi1: Correctly move list in sc_disable() RDMA/cma: Use output interface for net_dev check
This commit is contained in:
commit
ae13366b17
@ -1556,7 +1556,7 @@ static bool validate_ipv4_net_dev(struct net_device *net_dev,
|
||||
return false;
|
||||
|
||||
memset(&fl4, 0, sizeof(fl4));
|
||||
fl4.flowi4_iif = net_dev->ifindex;
|
||||
fl4.flowi4_oif = net_dev->ifindex;
|
||||
fl4.daddr = daddr;
|
||||
fl4.saddr = saddr;
|
||||
|
||||
|
@ -2815,10 +2815,18 @@ static int __init ib_core_init(void)
|
||||
|
||||
nldev_init();
|
||||
rdma_nl_register(RDMA_NL_LS, ibnl_ls_cb_table);
|
||||
roce_gid_mgmt_init();
|
||||
ret = roce_gid_mgmt_init();
|
||||
if (ret) {
|
||||
pr_warn("Couldn't init RoCE GID management\n");
|
||||
goto err_parent;
|
||||
}
|
||||
|
||||
return 0;
|
||||
|
||||
err_parent:
|
||||
rdma_nl_unregister(RDMA_NL_LS);
|
||||
nldev_exit();
|
||||
unregister_pernet_device(&rdma_dev_net_ops);
|
||||
err_compat:
|
||||
unregister_blocking_lsm_notifier(&ibdev_lsm_nb);
|
||||
err_sa:
|
||||
|
@ -2537,7 +2537,7 @@ void __init nldev_init(void)
|
||||
rdma_nl_register(RDMA_NL_NLDEV, nldev_cb_table);
|
||||
}
|
||||
|
||||
void __exit nldev_exit(void)
|
||||
void nldev_exit(void)
|
||||
{
|
||||
rdma_nl_unregister(RDMA_NL_NLDEV);
|
||||
}
|
||||
|
@ -1,6 +1,6 @@
|
||||
// SPDX-License-Identifier: GPL-2.0 OR BSD-2-Clause
|
||||
/*
|
||||
* Copyright 2018-2021 Amazon.com, Inc. or its affiliates. All rights reserved.
|
||||
* Copyright 2018-2022 Amazon.com, Inc. or its affiliates. All rights reserved.
|
||||
*/
|
||||
|
||||
#include <linux/module.h>
|
||||
@ -14,10 +14,12 @@
|
||||
|
||||
#define PCI_DEV_ID_EFA0_VF 0xefa0
|
||||
#define PCI_DEV_ID_EFA1_VF 0xefa1
|
||||
#define PCI_DEV_ID_EFA2_VF 0xefa2
|
||||
|
||||
static const struct pci_device_id efa_pci_tbl[] = {
|
||||
{ PCI_VDEVICE(AMAZON, PCI_DEV_ID_EFA0_VF) },
|
||||
{ PCI_VDEVICE(AMAZON, PCI_DEV_ID_EFA1_VF) },
|
||||
{ PCI_VDEVICE(AMAZON, PCI_DEV_ID_EFA2_VF) },
|
||||
{ }
|
||||
};
|
||||
|
||||
|
@ -913,8 +913,7 @@ void sc_disable(struct send_context *sc)
|
||||
spin_unlock(&sc->release_lock);
|
||||
|
||||
write_seqlock(&sc->waitlock);
|
||||
if (!list_empty(&sc->piowait))
|
||||
list_move(&sc->piowait, &wake_list);
|
||||
list_splice_init(&sc->piowait, &wake_list);
|
||||
write_sequnlock(&sc->waitlock);
|
||||
while (!list_empty(&wake_list)) {
|
||||
struct iowait *wait;
|
||||
|
@ -118,7 +118,6 @@ static const u32 hns_roce_op_code[] = {
|
||||
HR_OPC_MAP(ATOMIC_CMP_AND_SWP, ATOM_CMP_AND_SWAP),
|
||||
HR_OPC_MAP(ATOMIC_FETCH_AND_ADD, ATOM_FETCH_AND_ADD),
|
||||
HR_OPC_MAP(SEND_WITH_INV, SEND_WITH_INV),
|
||||
HR_OPC_MAP(LOCAL_INV, LOCAL_INV),
|
||||
HR_OPC_MAP(MASKED_ATOMIC_CMP_AND_SWP, ATOM_MSK_CMP_AND_SWAP),
|
||||
HR_OPC_MAP(MASKED_ATOMIC_FETCH_AND_ADD, ATOM_MSK_FETCH_AND_ADD),
|
||||
HR_OPC_MAP(REG_MR, FAST_REG_PMR),
|
||||
@ -559,9 +558,6 @@ static int set_rc_opcode(struct hns_roce_dev *hr_dev,
|
||||
else
|
||||
ret = -EOPNOTSUPP;
|
||||
break;
|
||||
case IB_WR_LOCAL_INV:
|
||||
hr_reg_enable(rc_sq_wqe, RC_SEND_WQE_SO);
|
||||
fallthrough;
|
||||
case IB_WR_SEND_WITH_INV:
|
||||
rc_sq_wqe->inv_key = cpu_to_le32(wr->ex.invalidate_rkey);
|
||||
break;
|
||||
@ -2805,8 +2801,12 @@ static int free_mr_modify_qp(struct hns_roce_dev *hr_dev)
|
||||
|
||||
static int free_mr_init(struct hns_roce_dev *hr_dev)
|
||||
{
|
||||
struct hns_roce_v2_priv *priv = hr_dev->priv;
|
||||
struct hns_roce_v2_free_mr *free_mr = &priv->free_mr;
|
||||
int ret;
|
||||
|
||||
mutex_init(&free_mr->mutex);
|
||||
|
||||
ret = free_mr_alloc_res(hr_dev);
|
||||
if (ret)
|
||||
return ret;
|
||||
@ -3222,7 +3222,6 @@ static int hns_roce_v2_write_mtpt(struct hns_roce_dev *hr_dev,
|
||||
|
||||
hr_reg_write(mpt_entry, MPT_ST, V2_MPT_ST_VALID);
|
||||
hr_reg_write(mpt_entry, MPT_PD, mr->pd);
|
||||
hr_reg_enable(mpt_entry, MPT_L_INV_EN);
|
||||
|
||||
hr_reg_write_bool(mpt_entry, MPT_BIND_EN,
|
||||
mr->access & IB_ACCESS_MW_BIND);
|
||||
@ -3313,7 +3312,6 @@ static int hns_roce_v2_frmr_write_mtpt(struct hns_roce_dev *hr_dev,
|
||||
|
||||
hr_reg_enable(mpt_entry, MPT_RA_EN);
|
||||
hr_reg_enable(mpt_entry, MPT_R_INV_EN);
|
||||
hr_reg_enable(mpt_entry, MPT_L_INV_EN);
|
||||
|
||||
hr_reg_enable(mpt_entry, MPT_FRE);
|
||||
hr_reg_clear(mpt_entry, MPT_MR_MW);
|
||||
@ -3345,7 +3343,6 @@ static int hns_roce_v2_mw_write_mtpt(void *mb_buf, struct hns_roce_mw *mw)
|
||||
hr_reg_write(mpt_entry, MPT_PD, mw->pdn);
|
||||
|
||||
hr_reg_enable(mpt_entry, MPT_R_INV_EN);
|
||||
hr_reg_enable(mpt_entry, MPT_L_INV_EN);
|
||||
hr_reg_enable(mpt_entry, MPT_LW_EN);
|
||||
|
||||
hr_reg_enable(mpt_entry, MPT_MR_MW);
|
||||
@ -3794,7 +3791,6 @@ static const u32 wc_send_op_map[] = {
|
||||
HR_WC_OP_MAP(RDMA_READ, RDMA_READ),
|
||||
HR_WC_OP_MAP(RDMA_WRITE, RDMA_WRITE),
|
||||
HR_WC_OP_MAP(RDMA_WRITE_WITH_IMM, RDMA_WRITE),
|
||||
HR_WC_OP_MAP(LOCAL_INV, LOCAL_INV),
|
||||
HR_WC_OP_MAP(ATOM_CMP_AND_SWAP, COMP_SWAP),
|
||||
HR_WC_OP_MAP(ATOM_FETCH_AND_ADD, FETCH_ADD),
|
||||
HR_WC_OP_MAP(ATOM_MSK_CMP_AND_SWAP, MASKED_COMP_SWAP),
|
||||
@ -3844,9 +3840,6 @@ static void fill_send_wc(struct ib_wc *wc, struct hns_roce_v2_cqe *cqe)
|
||||
case HNS_ROCE_V2_WQE_OP_RDMA_WRITE_WITH_IMM:
|
||||
wc->wc_flags |= IB_WC_WITH_IMM;
|
||||
break;
|
||||
case HNS_ROCE_V2_WQE_OP_LOCAL_INV:
|
||||
wc->wc_flags |= IB_WC_WITH_INVALIDATE;
|
||||
break;
|
||||
case HNS_ROCE_V2_WQE_OP_ATOM_CMP_AND_SWAP:
|
||||
case HNS_ROCE_V2_WQE_OP_ATOM_FETCH_AND_ADD:
|
||||
case HNS_ROCE_V2_WQE_OP_ATOM_MSK_CMP_AND_SWAP:
|
||||
|
@ -179,7 +179,6 @@ enum {
|
||||
HNS_ROCE_V2_WQE_OP_ATOM_MSK_CMP_AND_SWAP = 0x8,
|
||||
HNS_ROCE_V2_WQE_OP_ATOM_MSK_FETCH_AND_ADD = 0x9,
|
||||
HNS_ROCE_V2_WQE_OP_FAST_REG_PMR = 0xa,
|
||||
HNS_ROCE_V2_WQE_OP_LOCAL_INV = 0xb,
|
||||
HNS_ROCE_V2_WQE_OP_BIND_MW = 0xc,
|
||||
HNS_ROCE_V2_WQE_OP_MASK = 0x1f,
|
||||
};
|
||||
@ -915,7 +914,6 @@ struct hns_roce_v2_rc_send_wqe {
|
||||
#define RC_SEND_WQE_OWNER RC_SEND_WQE_FIELD_LOC(7, 7)
|
||||
#define RC_SEND_WQE_CQE RC_SEND_WQE_FIELD_LOC(8, 8)
|
||||
#define RC_SEND_WQE_FENCE RC_SEND_WQE_FIELD_LOC(9, 9)
|
||||
#define RC_SEND_WQE_SO RC_SEND_WQE_FIELD_LOC(10, 10)
|
||||
#define RC_SEND_WQE_SE RC_SEND_WQE_FIELD_LOC(11, 11)
|
||||
#define RC_SEND_WQE_INLINE RC_SEND_WQE_FIELD_LOC(12, 12)
|
||||
#define RC_SEND_WQE_WQE_INDEX RC_SEND_WQE_FIELD_LOC(30, 15)
|
||||
|
@ -344,6 +344,10 @@ static int qedr_alloc_resources(struct qedr_dev *dev)
|
||||
if (IS_IWARP(dev)) {
|
||||
xa_init(&dev->qps);
|
||||
dev->iwarp_wq = create_singlethread_workqueue("qedr_iwarpq");
|
||||
if (!dev->iwarp_wq) {
|
||||
rc = -ENOMEM;
|
||||
goto err1;
|
||||
}
|
||||
}
|
||||
|
||||
/* Allocate Status blocks for CNQ */
|
||||
@ -351,7 +355,7 @@ static int qedr_alloc_resources(struct qedr_dev *dev)
|
||||
GFP_KERNEL);
|
||||
if (!dev->sb_array) {
|
||||
rc = -ENOMEM;
|
||||
goto err1;
|
||||
goto err_destroy_wq;
|
||||
}
|
||||
|
||||
dev->cnq_array = kcalloc(dev->num_cnq,
|
||||
@ -402,6 +406,9 @@ err3:
|
||||
kfree(dev->cnq_array);
|
||||
err2:
|
||||
kfree(dev->sb_array);
|
||||
err_destroy_wq:
|
||||
if (IS_IWARP(dev))
|
||||
destroy_workqueue(dev->iwarp_wq);
|
||||
err1:
|
||||
kfree(dev->sgid_tbl);
|
||||
return rc;
|
||||
|
@ -806,8 +806,10 @@ static enum resp_states read_reply(struct rxe_qp *qp,
|
||||
|
||||
skb = prepare_ack_packet(qp, &ack_pkt, opcode, payload,
|
||||
res->cur_psn, AETH_ACK_UNLIMITED);
|
||||
if (!skb)
|
||||
if (!skb) {
|
||||
rxe_put(mr);
|
||||
return RESPST_ERR_RNR;
|
||||
}
|
||||
|
||||
rxe_mr_copy(mr, res->read.va, payload_addr(&ack_pkt),
|
||||
payload, RXE_FROM_MR_OBJ);
|
||||
|
Loading…
Reference in New Issue
Block a user