RDMA fourth 5.8 rc pull request
Two more merge window regressions, a corruption bug in hfi1 and a few other small fixes. - Missing user input validation regression in ucma - Disallowing a previously allowed user combination regression in mlx5 - ODP prefetch memory leaking triggerable by userspace - Memory corruption in hf1 due to faulty ring buffer logic - Missed mutex initialization crash in mlx5 - Two small defects with RDMA DIM -----BEGIN PGP SIGNATURE----- iQIzBAABCgAdFiEEfB7FMLh+8QxL+6i3OG33FX4gmxoFAl8kNQUACgkQOG33FX4g mxr+hg/7Bpr+77pMHGdVdEDLK+niN03W83fIMulONBpFo0/uuMFwuLdYapjBndro zhx9mCJXIBWNtEqxi82mq4BQEAH4MS6Jb9oZqSu77d6TvJg08fiBCFY7UojAoaKs pXT3Cp+GAu6WrDWc65MYnePYnt/4PB/bw2lPhHRjntsBhTN1xj65VUkHwOCIpKdQ ESuWk1Pc0OlLBcPjK1WKNfRbB0xCrvMZLVK7NVDBj/XshdsBZFJvByrL7L10c/zs qIrsrIjLQW7X2xT+av5sX3TfUzBb6SPcfjIKveC6onctman/oS++ZwHcnKxkYjlp SPOYCTaUVFOVwXGz17nuiCul9rkq6FJm8xa08+rJ4MoDxMBLiWvaqn6Not+8uJF/ sO/wU/H+mgPkYwmpt0JNblGkgbs+a3Gs+PE8NM5a9euOTSNV21el/x4rv/QSAyqK KxS9b9/vkSZzmYQucds9EM2R70PKGr2VtRdL7f4nONCNoPL0No2eMrIR2QzXjhsC yydOIn5cUpvClLg30Bs1njkp7YtO/2wZRGy8RpI/9oX4/tqWiQcw+FK0BGA3lzVl kE6wrFAwnnhqSYjFimTMlV/2GA7wlkewuDGBHcqaGEcCVw/d7juzVaan5ciA8l4C WRIBo+DQYNkGOzBRJBs65JTMKfkHJrqevmDor3m15JB7Uk4Qacc= =0t3P -----END PGP SIGNATURE----- Merge tag 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/rdma/rdma Pull rdma fixes from Jason Gunthorpe: "Two more merge window regressions, a corruption bug in hfi1 and a few other small fixes. - Missing user input validation regression in ucma - Disallowing a previously allowed user combination regression in mlx5 - ODP prefetch memory leaking triggerable by userspace - Memory corruption in hf1 due to faulty ring buffer logic - Missed mutex initialization crash in mlx5 - Two small defects with RDMA DIM" * tag 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/rdma/rdma: RDMA/core: Free DIM memory in error unwind RDMA/core: Stop DIM before destroying CQ RDMA/mlx5: Initialize QP mutex for the debug kernels IB/rdmavt: Fix RQ counting issues causing use of an invalid RWQE RDMA/mlx5: Allow providing extra scatter CQE QP flag RDMA/mlx5: Fix prefetch memory leak if get_prefetchable_mr fails RDMA/cm: Add min length checks to user structure copies
This commit is contained in:
commit
ae2911de2e
@ -72,6 +72,15 @@ static void rdma_dim_init(struct ib_cq *cq)
|
|||||||
INIT_WORK(&dim->work, ib_cq_rdma_dim_work);
|
INIT_WORK(&dim->work, ib_cq_rdma_dim_work);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static void rdma_dim_destroy(struct ib_cq *cq)
|
||||||
|
{
|
||||||
|
if (!cq->dim)
|
||||||
|
return;
|
||||||
|
|
||||||
|
cancel_work_sync(&cq->dim->work);
|
||||||
|
kfree(cq->dim);
|
||||||
|
}
|
||||||
|
|
||||||
static int __poll_cq(struct ib_cq *cq, int num_entries, struct ib_wc *wc)
|
static int __poll_cq(struct ib_cq *cq, int num_entries, struct ib_wc *wc)
|
||||||
{
|
{
|
||||||
int rc;
|
int rc;
|
||||||
@ -266,6 +275,7 @@ struct ib_cq *__ib_alloc_cq_user(struct ib_device *dev, void *private,
|
|||||||
return cq;
|
return cq;
|
||||||
|
|
||||||
out_destroy_cq:
|
out_destroy_cq:
|
||||||
|
rdma_dim_destroy(cq);
|
||||||
rdma_restrack_del(&cq->res);
|
rdma_restrack_del(&cq->res);
|
||||||
cq->device->ops.destroy_cq(cq, udata);
|
cq->device->ops.destroy_cq(cq, udata);
|
||||||
out_free_wc:
|
out_free_wc:
|
||||||
@ -331,12 +341,10 @@ void ib_free_cq_user(struct ib_cq *cq, struct ib_udata *udata)
|
|||||||
WARN_ON_ONCE(1);
|
WARN_ON_ONCE(1);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
rdma_dim_destroy(cq);
|
||||||
trace_cq_free(cq);
|
trace_cq_free(cq);
|
||||||
rdma_restrack_del(&cq->res);
|
rdma_restrack_del(&cq->res);
|
||||||
cq->device->ops.destroy_cq(cq, udata);
|
cq->device->ops.destroy_cq(cq, udata);
|
||||||
if (cq->dim)
|
|
||||||
cancel_work_sync(&cq->dim->work);
|
|
||||||
kfree(cq->dim);
|
|
||||||
kfree(cq->wc);
|
kfree(cq->wc);
|
||||||
kfree(cq);
|
kfree(cq);
|
||||||
}
|
}
|
||||||
|
@ -1084,6 +1084,8 @@ static ssize_t ucma_connect(struct ucma_file *file, const char __user *inbuf,
|
|||||||
size_t in_size;
|
size_t in_size;
|
||||||
int ret;
|
int ret;
|
||||||
|
|
||||||
|
if (in_len < offsetofend(typeof(cmd), reserved))
|
||||||
|
return -EINVAL;
|
||||||
in_size = min_t(size_t, in_len, sizeof(cmd));
|
in_size = min_t(size_t, in_len, sizeof(cmd));
|
||||||
if (copy_from_user(&cmd, inbuf, in_size))
|
if (copy_from_user(&cmd, inbuf, in_size))
|
||||||
return -EFAULT;
|
return -EFAULT;
|
||||||
@ -1141,6 +1143,8 @@ static ssize_t ucma_accept(struct ucma_file *file, const char __user *inbuf,
|
|||||||
size_t in_size;
|
size_t in_size;
|
||||||
int ret;
|
int ret;
|
||||||
|
|
||||||
|
if (in_len < offsetofend(typeof(cmd), reserved))
|
||||||
|
return -EINVAL;
|
||||||
in_size = min_t(size_t, in_len, sizeof(cmd));
|
in_size = min_t(size_t, in_len, sizeof(cmd));
|
||||||
if (copy_from_user(&cmd, inbuf, in_size))
|
if (copy_from_user(&cmd, inbuf, in_size))
|
||||||
return -EFAULT;
|
return -EFAULT;
|
||||||
|
@ -1797,9 +1797,7 @@ static bool init_prefetch_work(struct ib_pd *pd,
|
|||||||
work->frags[i].mr =
|
work->frags[i].mr =
|
||||||
get_prefetchable_mr(pd, advice, sg_list[i].lkey);
|
get_prefetchable_mr(pd, advice, sg_list[i].lkey);
|
||||||
if (!work->frags[i].mr) {
|
if (!work->frags[i].mr) {
|
||||||
work->num_sge = i - 1;
|
work->num_sge = i;
|
||||||
if (i)
|
|
||||||
destroy_prefetch_work(work);
|
|
||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -1865,6 +1863,7 @@ int mlx5_ib_advise_mr_prefetch(struct ib_pd *pd,
|
|||||||
srcu_key = srcu_read_lock(&dev->odp_srcu);
|
srcu_key = srcu_read_lock(&dev->odp_srcu);
|
||||||
if (!init_prefetch_work(pd, advice, pf_flags, work, sg_list, num_sge)) {
|
if (!init_prefetch_work(pd, advice, pf_flags, work, sg_list, num_sge)) {
|
||||||
srcu_read_unlock(&dev->odp_srcu, srcu_key);
|
srcu_read_unlock(&dev->odp_srcu, srcu_key);
|
||||||
|
destroy_prefetch_work(work);
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
}
|
}
|
||||||
queue_work(system_unbound_wq, &work->work);
|
queue_work(system_unbound_wq, &work->work);
|
||||||
|
@ -1766,15 +1766,14 @@ err:
|
|||||||
}
|
}
|
||||||
|
|
||||||
static void configure_requester_scat_cqe(struct mlx5_ib_dev *dev,
|
static void configure_requester_scat_cqe(struct mlx5_ib_dev *dev,
|
||||||
|
struct mlx5_ib_qp *qp,
|
||||||
struct ib_qp_init_attr *init_attr,
|
struct ib_qp_init_attr *init_attr,
|
||||||
struct mlx5_ib_create_qp *ucmd,
|
|
||||||
void *qpc)
|
void *qpc)
|
||||||
{
|
{
|
||||||
int scqe_sz;
|
int scqe_sz;
|
||||||
bool allow_scat_cqe = false;
|
bool allow_scat_cqe = false;
|
||||||
|
|
||||||
if (ucmd)
|
allow_scat_cqe = qp->flags_en & MLX5_QP_FLAG_ALLOW_SCATTER_CQE;
|
||||||
allow_scat_cqe = ucmd->flags & MLX5_QP_FLAG_ALLOW_SCATTER_CQE;
|
|
||||||
|
|
||||||
if (!allow_scat_cqe && init_attr->sq_sig_type != IB_SIGNAL_ALL_WR)
|
if (!allow_scat_cqe && init_attr->sq_sig_type != IB_SIGNAL_ALL_WR)
|
||||||
return;
|
return;
|
||||||
@ -1853,8 +1852,6 @@ static int create_xrc_tgt_qp(struct mlx5_ib_dev *dev, struct mlx5_ib_qp *qp,
|
|||||||
u32 *in;
|
u32 *in;
|
||||||
int err;
|
int err;
|
||||||
|
|
||||||
mutex_init(&qp->mutex);
|
|
||||||
|
|
||||||
if (attr->sq_sig_type == IB_SIGNAL_ALL_WR)
|
if (attr->sq_sig_type == IB_SIGNAL_ALL_WR)
|
||||||
qp->sq_signal_bits = MLX5_WQE_CTRL_CQ_UPDATE;
|
qp->sq_signal_bits = MLX5_WQE_CTRL_CQ_UPDATE;
|
||||||
|
|
||||||
@ -1938,7 +1935,6 @@ static int create_user_qp(struct mlx5_ib_dev *dev, struct ib_pd *pd,
|
|||||||
u32 *in;
|
u32 *in;
|
||||||
int err;
|
int err;
|
||||||
|
|
||||||
mutex_init(&qp->mutex);
|
|
||||||
spin_lock_init(&qp->sq.lock);
|
spin_lock_init(&qp->sq.lock);
|
||||||
spin_lock_init(&qp->rq.lock);
|
spin_lock_init(&qp->rq.lock);
|
||||||
|
|
||||||
@ -2012,7 +2008,7 @@ static int create_user_qp(struct mlx5_ib_dev *dev, struct ib_pd *pd,
|
|||||||
}
|
}
|
||||||
if ((qp->flags_en & MLX5_QP_FLAG_SCATTER_CQE) &&
|
if ((qp->flags_en & MLX5_QP_FLAG_SCATTER_CQE) &&
|
||||||
(qp->type == MLX5_IB_QPT_DCI || qp->type == IB_QPT_RC))
|
(qp->type == MLX5_IB_QPT_DCI || qp->type == IB_QPT_RC))
|
||||||
configure_requester_scat_cqe(dev, init_attr, ucmd, qpc);
|
configure_requester_scat_cqe(dev, qp, init_attr, qpc);
|
||||||
|
|
||||||
if (qp->rq.wqe_cnt) {
|
if (qp->rq.wqe_cnt) {
|
||||||
MLX5_SET(qpc, qpc, log_rq_stride, qp->rq.wqe_shift - 4);
|
MLX5_SET(qpc, qpc, log_rq_stride, qp->rq.wqe_shift - 4);
|
||||||
@ -2129,7 +2125,6 @@ static int create_kernel_qp(struct mlx5_ib_dev *dev, struct ib_pd *pd,
|
|||||||
u32 *in;
|
u32 *in;
|
||||||
int err;
|
int err;
|
||||||
|
|
||||||
mutex_init(&qp->mutex);
|
|
||||||
spin_lock_init(&qp->sq.lock);
|
spin_lock_init(&qp->sq.lock);
|
||||||
spin_lock_init(&qp->rq.lock);
|
spin_lock_init(&qp->rq.lock);
|
||||||
|
|
||||||
@ -2543,13 +2538,18 @@ static void process_vendor_flag(struct mlx5_ib_dev *dev, int *flags, int flag,
|
|||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (flag == MLX5_QP_FLAG_SCATTER_CQE) {
|
switch (flag) {
|
||||||
|
case MLX5_QP_FLAG_SCATTER_CQE:
|
||||||
|
case MLX5_QP_FLAG_ALLOW_SCATTER_CQE:
|
||||||
/*
|
/*
|
||||||
* We don't return error if this flag was provided,
|
* We don't return error if these flags were provided,
|
||||||
* and mlx5 doesn't have right capability.
|
* and mlx5 doesn't have right capability.
|
||||||
*/
|
*/
|
||||||
*flags &= ~MLX5_QP_FLAG_SCATTER_CQE;
|
*flags &= ~(MLX5_QP_FLAG_SCATTER_CQE |
|
||||||
|
MLX5_QP_FLAG_ALLOW_SCATTER_CQE);
|
||||||
return;
|
return;
|
||||||
|
default:
|
||||||
|
break;
|
||||||
}
|
}
|
||||||
mlx5_ib_dbg(dev, "Vendor create QP flag 0x%X is not supported\n", flag);
|
mlx5_ib_dbg(dev, "Vendor create QP flag 0x%X is not supported\n", flag);
|
||||||
}
|
}
|
||||||
@ -2589,6 +2589,8 @@ static int process_vendor_flags(struct mlx5_ib_dev *dev, struct mlx5_ib_qp *qp,
|
|||||||
process_vendor_flag(dev, &flags, MLX5_QP_FLAG_SIGNATURE, true, qp);
|
process_vendor_flag(dev, &flags, MLX5_QP_FLAG_SIGNATURE, true, qp);
|
||||||
process_vendor_flag(dev, &flags, MLX5_QP_FLAG_SCATTER_CQE,
|
process_vendor_flag(dev, &flags, MLX5_QP_FLAG_SCATTER_CQE,
|
||||||
MLX5_CAP_GEN(mdev, sctr_data_cqe), qp);
|
MLX5_CAP_GEN(mdev, sctr_data_cqe), qp);
|
||||||
|
process_vendor_flag(dev, &flags, MLX5_QP_FLAG_ALLOW_SCATTER_CQE,
|
||||||
|
MLX5_CAP_GEN(mdev, sctr_data_cqe), qp);
|
||||||
|
|
||||||
if (qp->type == IB_QPT_RAW_PACKET) {
|
if (qp->type == IB_QPT_RAW_PACKET) {
|
||||||
cond = MLX5_CAP_ETH(mdev, tunnel_stateless_vxlan) ||
|
cond = MLX5_CAP_ETH(mdev, tunnel_stateless_vxlan) ||
|
||||||
@ -2963,6 +2965,7 @@ struct ib_qp *mlx5_ib_create_qp(struct ib_pd *pd, struct ib_qp_init_attr *attr,
|
|||||||
goto free_ucmd;
|
goto free_ucmd;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
mutex_init(&qp->mutex);
|
||||||
qp->type = type;
|
qp->type = type;
|
||||||
if (udata) {
|
if (udata) {
|
||||||
err = process_vendor_flags(dev, qp, params.ucmd, attr);
|
err = process_vendor_flags(dev, qp, params.ucmd, attr);
|
||||||
|
@ -901,8 +901,6 @@ static void rvt_init_qp(struct rvt_dev_info *rdi, struct rvt_qp *qp,
|
|||||||
qp->s_tail_ack_queue = 0;
|
qp->s_tail_ack_queue = 0;
|
||||||
qp->s_acked_ack_queue = 0;
|
qp->s_acked_ack_queue = 0;
|
||||||
qp->s_num_rd_atomic = 0;
|
qp->s_num_rd_atomic = 0;
|
||||||
if (qp->r_rq.kwq)
|
|
||||||
qp->r_rq.kwq->count = qp->r_rq.size;
|
|
||||||
qp->r_sge.num_sge = 0;
|
qp->r_sge.num_sge = 0;
|
||||||
atomic_set(&qp->s_reserved_used, 0);
|
atomic_set(&qp->s_reserved_used, 0);
|
||||||
}
|
}
|
||||||
@ -2366,31 +2364,6 @@ bad_lkey:
|
|||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
|
||||||
* get_count - count numbers of request work queue entries
|
|
||||||
* in circular buffer
|
|
||||||
* @rq: data structure for request queue entry
|
|
||||||
* @tail: tail indices of the circular buffer
|
|
||||||
* @head: head indices of the circular buffer
|
|
||||||
*
|
|
||||||
* Return - total number of entries in the circular buffer
|
|
||||||
*/
|
|
||||||
static u32 get_count(struct rvt_rq *rq, u32 tail, u32 head)
|
|
||||||
{
|
|
||||||
u32 count;
|
|
||||||
|
|
||||||
count = head;
|
|
||||||
|
|
||||||
if (count >= rq->size)
|
|
||||||
count = 0;
|
|
||||||
if (count < tail)
|
|
||||||
count += rq->size - tail;
|
|
||||||
else
|
|
||||||
count -= tail;
|
|
||||||
|
|
||||||
return count;
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* get_rvt_head - get head indices of the circular buffer
|
* get_rvt_head - get head indices of the circular buffer
|
||||||
* @rq: data structure for request queue entry
|
* @rq: data structure for request queue entry
|
||||||
@ -2465,7 +2438,7 @@ int rvt_get_rwqe(struct rvt_qp *qp, bool wr_id_only)
|
|||||||
|
|
||||||
if (kwq->count < RVT_RWQ_COUNT_THRESHOLD) {
|
if (kwq->count < RVT_RWQ_COUNT_THRESHOLD) {
|
||||||
head = get_rvt_head(rq, ip);
|
head = get_rvt_head(rq, ip);
|
||||||
kwq->count = get_count(rq, tail, head);
|
kwq->count = rvt_get_rq_count(rq, head, tail);
|
||||||
}
|
}
|
||||||
if (unlikely(kwq->count == 0)) {
|
if (unlikely(kwq->count == 0)) {
|
||||||
ret = 0;
|
ret = 0;
|
||||||
@ -2500,7 +2473,9 @@ int rvt_get_rwqe(struct rvt_qp *qp, bool wr_id_only)
|
|||||||
* the number of remaining WQEs.
|
* the number of remaining WQEs.
|
||||||
*/
|
*/
|
||||||
if (kwq->count < srq->limit) {
|
if (kwq->count < srq->limit) {
|
||||||
kwq->count = get_count(rq, tail, get_rvt_head(rq, ip));
|
kwq->count =
|
||||||
|
rvt_get_rq_count(rq,
|
||||||
|
get_rvt_head(rq, ip), tail);
|
||||||
if (kwq->count < srq->limit) {
|
if (kwq->count < srq->limit) {
|
||||||
struct ib_event ev;
|
struct ib_event ev;
|
||||||
|
|
||||||
|
@ -127,9 +127,7 @@ __be32 rvt_compute_aeth(struct rvt_qp *qp)
|
|||||||
* not atomic, which is OK, since the fuzziness is
|
* not atomic, which is OK, since the fuzziness is
|
||||||
* resolved as further ACKs go out.
|
* resolved as further ACKs go out.
|
||||||
*/
|
*/
|
||||||
credits = head - tail;
|
credits = rvt_get_rq_count(&qp->r_rq, head, tail);
|
||||||
if ((int)credits < 0)
|
|
||||||
credits += qp->r_rq.size;
|
|
||||||
}
|
}
|
||||||
/*
|
/*
|
||||||
* Binary search the credit table to find the code to
|
* Binary search the credit table to find the code to
|
||||||
|
@ -305,6 +305,25 @@ struct rvt_rq {
|
|||||||
spinlock_t lock ____cacheline_aligned_in_smp;
|
spinlock_t lock ____cacheline_aligned_in_smp;
|
||||||
};
|
};
|
||||||
|
|
||||||
|
/**
|
||||||
|
* rvt_get_rq_count - count numbers of request work queue entries
|
||||||
|
* in circular buffer
|
||||||
|
* @rq: data structure for request queue entry
|
||||||
|
* @head: head indices of the circular buffer
|
||||||
|
* @tail: tail indices of the circular buffer
|
||||||
|
*
|
||||||
|
* Return - total number of entries in the Receive Queue
|
||||||
|
*/
|
||||||
|
|
||||||
|
static inline u32 rvt_get_rq_count(struct rvt_rq *rq, u32 head, u32 tail)
|
||||||
|
{
|
||||||
|
u32 count = head - tail;
|
||||||
|
|
||||||
|
if ((s32)count < 0)
|
||||||
|
count += rq->size;
|
||||||
|
return count;
|
||||||
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* This structure holds the information that the send tasklet needs
|
* This structure holds the information that the send tasklet needs
|
||||||
* to send a RDMA read response or atomic operation.
|
* to send a RDMA read response or atomic operation.
|
||||||
|
Loading…
Reference in New Issue
Block a user