mirror of
https://github.com/torvalds/linux.git
synced 2024-11-13 15:41:39 +00:00
Merge branch 'for-linus' of git://git.kernel.dk/linux-block
Pull block fixes from Jens Axboe: "A set of fixes, one for NVMe from Keith, and a set for nvme-{rdma,t,f} from the usual suspects, fixing actual problems that would be a shame to release 4.9 with" * 'for-linus' of git://git.kernel.dk/linux-block: nvme/pci: Don't free queues on error nvmet-rdma: drain the queue-pair just before freeing it nvme-rdma: stop and free io queues on connect failure nvmet-rdma: don't forget to delete a queue from the list of connection failed nvmet: Don't queue fatal error work if csts.cfs is set nvme-rdma: reject non-connect commands before the queue is live nvmet-rdma: Fix possible NULL deref when handling rdma cm events
This commit is contained in:
commit
623898671c
@ -1242,20 +1242,16 @@ static int nvme_configure_admin_queue(struct nvme_dev *dev)
|
||||
|
||||
result = nvme_enable_ctrl(&dev->ctrl, cap);
|
||||
if (result)
|
||||
goto free_nvmeq;
|
||||
return result;
|
||||
|
||||
nvmeq->cq_vector = 0;
|
||||
result = queue_request_irq(nvmeq);
|
||||
if (result) {
|
||||
nvmeq->cq_vector = -1;
|
||||
goto free_nvmeq;
|
||||
return result;
|
||||
}
|
||||
|
||||
return result;
|
||||
|
||||
free_nvmeq:
|
||||
nvme_free_queues(dev, 0);
|
||||
return result;
|
||||
}
|
||||
|
||||
static bool nvme_should_reset(struct nvme_dev *dev, u32 csts)
|
||||
@ -1317,10 +1313,8 @@ static int nvme_create_io_queues(struct nvme_dev *dev)
|
||||
max = min(dev->max_qid, dev->queue_count - 1);
|
||||
for (i = dev->online_queues; i <= max; i++) {
|
||||
ret = nvme_create_queue(dev->queues[i], i);
|
||||
if (ret) {
|
||||
nvme_free_queues(dev, i);
|
||||
if (ret)
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
@ -1460,13 +1454,9 @@ static int nvme_setup_io_queues(struct nvme_dev *dev)
|
||||
result = queue_request_irq(adminq);
|
||||
if (result) {
|
||||
adminq->cq_vector = -1;
|
||||
goto free_queues;
|
||||
return result;
|
||||
}
|
||||
return nvme_create_io_queues(dev);
|
||||
|
||||
free_queues:
|
||||
nvme_free_queues(dev, 1);
|
||||
return result;
|
||||
}
|
||||
|
||||
static void nvme_del_queue_end(struct request *req, int error)
|
||||
|
@ -83,6 +83,7 @@ enum nvme_rdma_queue_flags {
|
||||
NVME_RDMA_Q_CONNECTED = (1 << 0),
|
||||
NVME_RDMA_IB_QUEUE_ALLOCATED = (1 << 1),
|
||||
NVME_RDMA_Q_DELETING = (1 << 2),
|
||||
NVME_RDMA_Q_LIVE = (1 << 3),
|
||||
};
|
||||
|
||||
struct nvme_rdma_queue {
|
||||
@ -624,10 +625,18 @@ static int nvme_rdma_connect_io_queues(struct nvme_rdma_ctrl *ctrl)
|
||||
|
||||
for (i = 1; i < ctrl->queue_count; i++) {
|
||||
ret = nvmf_connect_io_queue(&ctrl->ctrl, i);
|
||||
if (ret)
|
||||
break;
|
||||
if (ret) {
|
||||
dev_info(ctrl->ctrl.device,
|
||||
"failed to connect i/o queue: %d\n", ret);
|
||||
goto out_free_queues;
|
||||
}
|
||||
set_bit(NVME_RDMA_Q_LIVE, &ctrl->queues[i].flags);
|
||||
}
|
||||
|
||||
return 0;
|
||||
|
||||
out_free_queues:
|
||||
nvme_rdma_free_io_queues(ctrl);
|
||||
return ret;
|
||||
}
|
||||
|
||||
@ -712,6 +721,8 @@ static void nvme_rdma_reconnect_ctrl_work(struct work_struct *work)
|
||||
if (ret)
|
||||
goto stop_admin_q;
|
||||
|
||||
set_bit(NVME_RDMA_Q_LIVE, &ctrl->queues[0].flags);
|
||||
|
||||
ret = nvme_enable_ctrl(&ctrl->ctrl, ctrl->cap);
|
||||
if (ret)
|
||||
goto stop_admin_q;
|
||||
@ -761,8 +772,10 @@ static void nvme_rdma_error_recovery_work(struct work_struct *work)
|
||||
|
||||
nvme_stop_keep_alive(&ctrl->ctrl);
|
||||
|
||||
for (i = 0; i < ctrl->queue_count; i++)
|
||||
for (i = 0; i < ctrl->queue_count; i++) {
|
||||
clear_bit(NVME_RDMA_Q_CONNECTED, &ctrl->queues[i].flags);
|
||||
clear_bit(NVME_RDMA_Q_LIVE, &ctrl->queues[i].flags);
|
||||
}
|
||||
|
||||
if (ctrl->queue_count > 1)
|
||||
nvme_stop_queues(&ctrl->ctrl);
|
||||
@ -1378,6 +1391,24 @@ nvme_rdma_timeout(struct request *rq, bool reserved)
|
||||
return BLK_EH_HANDLED;
|
||||
}
|
||||
|
||||
/*
|
||||
* We cannot accept any other command until the Connect command has completed.
|
||||
*/
|
||||
static inline bool nvme_rdma_queue_is_ready(struct nvme_rdma_queue *queue,
|
||||
struct request *rq)
|
||||
{
|
||||
if (unlikely(!test_bit(NVME_RDMA_Q_LIVE, &queue->flags))) {
|
||||
struct nvme_command *cmd = (struct nvme_command *)rq->cmd;
|
||||
|
||||
if (rq->cmd_type != REQ_TYPE_DRV_PRIV ||
|
||||
cmd->common.opcode != nvme_fabrics_command ||
|
||||
cmd->fabrics.fctype != nvme_fabrics_type_connect)
|
||||
return false;
|
||||
}
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
static int nvme_rdma_queue_rq(struct blk_mq_hw_ctx *hctx,
|
||||
const struct blk_mq_queue_data *bd)
|
||||
{
|
||||
@ -1394,6 +1425,9 @@ static int nvme_rdma_queue_rq(struct blk_mq_hw_ctx *hctx,
|
||||
|
||||
WARN_ON_ONCE(rq->tag < 0);
|
||||
|
||||
if (!nvme_rdma_queue_is_ready(queue, rq))
|
||||
return BLK_MQ_RQ_QUEUE_BUSY;
|
||||
|
||||
dev = queue->device->dev;
|
||||
ib_dma_sync_single_for_cpu(dev, sqe->dma,
|
||||
sizeof(struct nvme_command), DMA_TO_DEVICE);
|
||||
@ -1544,6 +1578,8 @@ static int nvme_rdma_configure_admin_queue(struct nvme_rdma_ctrl *ctrl)
|
||||
if (error)
|
||||
goto out_cleanup_queue;
|
||||
|
||||
set_bit(NVME_RDMA_Q_LIVE, &ctrl->queues[0].flags);
|
||||
|
||||
error = nvmf_reg_read64(&ctrl->ctrl, NVME_REG_CAP, &ctrl->cap);
|
||||
if (error) {
|
||||
dev_err(ctrl->ctrl.device,
|
||||
|
@ -838,9 +838,13 @@ static void nvmet_fatal_error_handler(struct work_struct *work)
|
||||
|
||||
void nvmet_ctrl_fatal_error(struct nvmet_ctrl *ctrl)
|
||||
{
|
||||
ctrl->csts |= NVME_CSTS_CFS;
|
||||
INIT_WORK(&ctrl->fatal_err_work, nvmet_fatal_error_handler);
|
||||
schedule_work(&ctrl->fatal_err_work);
|
||||
mutex_lock(&ctrl->lock);
|
||||
if (!(ctrl->csts & NVME_CSTS_CFS)) {
|
||||
ctrl->csts |= NVME_CSTS_CFS;
|
||||
INIT_WORK(&ctrl->fatal_err_work, nvmet_fatal_error_handler);
|
||||
schedule_work(&ctrl->fatal_err_work);
|
||||
}
|
||||
mutex_unlock(&ctrl->lock);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(nvmet_ctrl_fatal_error);
|
||||
|
||||
|
@ -951,6 +951,7 @@ err_destroy_cq:
|
||||
|
||||
static void nvmet_rdma_destroy_queue_ib(struct nvmet_rdma_queue *queue)
|
||||
{
|
||||
ib_drain_qp(queue->cm_id->qp);
|
||||
rdma_destroy_qp(queue->cm_id);
|
||||
ib_free_cq(queue->cq);
|
||||
}
|
||||
@ -1066,6 +1067,7 @@ nvmet_rdma_alloc_queue(struct nvmet_rdma_device *ndev,
|
||||
spin_lock_init(&queue->rsp_wr_wait_lock);
|
||||
INIT_LIST_HEAD(&queue->free_rsps);
|
||||
spin_lock_init(&queue->rsps_lock);
|
||||
INIT_LIST_HEAD(&queue->queue_list);
|
||||
|
||||
queue->idx = ida_simple_get(&nvmet_rdma_queue_ida, 0, 0, GFP_KERNEL);
|
||||
if (queue->idx < 0) {
|
||||
@ -1244,7 +1246,6 @@ static void __nvmet_rdma_queue_disconnect(struct nvmet_rdma_queue *queue)
|
||||
|
||||
if (disconnect) {
|
||||
rdma_disconnect(queue->cm_id);
|
||||
ib_drain_qp(queue->cm_id->qp);
|
||||
schedule_work(&queue->release_work);
|
||||
}
|
||||
}
|
||||
@ -1269,7 +1270,12 @@ static void nvmet_rdma_queue_connect_fail(struct rdma_cm_id *cm_id,
|
||||
{
|
||||
WARN_ON_ONCE(queue->state != NVMET_RDMA_Q_CONNECTING);
|
||||
|
||||
pr_err("failed to connect queue\n");
|
||||
mutex_lock(&nvmet_rdma_queue_mutex);
|
||||
if (!list_empty(&queue->queue_list))
|
||||
list_del_init(&queue->queue_list);
|
||||
mutex_unlock(&nvmet_rdma_queue_mutex);
|
||||
|
||||
pr_err("failed to connect queue %d\n", queue->idx);
|
||||
schedule_work(&queue->release_work);
|
||||
}
|
||||
|
||||
@ -1352,7 +1358,13 @@ static int nvmet_rdma_cm_handler(struct rdma_cm_id *cm_id,
|
||||
case RDMA_CM_EVENT_ADDR_CHANGE:
|
||||
case RDMA_CM_EVENT_DISCONNECTED:
|
||||
case RDMA_CM_EVENT_TIMEWAIT_EXIT:
|
||||
nvmet_rdma_queue_disconnect(queue);
|
||||
/*
|
||||
* We might end up here when we already freed the qp
|
||||
* which means queue release sequence is in progress,
|
||||
* so don't get in the way...
|
||||
*/
|
||||
if (queue)
|
||||
nvmet_rdma_queue_disconnect(queue);
|
||||
break;
|
||||
case RDMA_CM_EVENT_DEVICE_REMOVAL:
|
||||
ret = nvmet_rdma_device_removal(cm_id, queue);
|
||||
|
Loading…
Reference in New Issue
Block a user