nvme-pci: remove nvmeq->tags
There is no real need to have a pointer to the tagset in struct nvme_queue, as we only need it in a single place, and that place can derive the used tagset from the device and qid trivially. This fixes a problem with stale pointer exposure when tagsets are reset, and also shrinks the nvme_queue structure. It also matches what most other transports have done since day 1. Reported-by: Edmund Nadolski <edmund.nadolski@intel.com> Signed-off-by: Christoph Hellwig <hch@lst.de> Signed-off-by: Keith Busch <kbusch@kernel.org>
This commit is contained in:
parent
b716e6889c
commit
cfa27356f8
@ -167,7 +167,6 @@ struct nvme_queue {
|
||||
/* only used for poll queues: */
|
||||
spinlock_t cq_poll_lock ____cacheline_aligned_in_smp;
|
||||
volatile struct nvme_completion *cqes;
|
||||
struct blk_mq_tags **tags;
|
||||
dma_addr_t sq_dma_addr;
|
||||
dma_addr_t cq_dma_addr;
|
||||
u32 __iomem *q_db;
|
||||
@ -376,29 +375,17 @@ static int nvme_admin_init_hctx(struct blk_mq_hw_ctx *hctx, void *data,
|
||||
|
||||
WARN_ON(hctx_idx != 0);
|
||||
WARN_ON(dev->admin_tagset.tags[0] != hctx->tags);
|
||||
WARN_ON(nvmeq->tags);
|
||||
|
||||
hctx->driver_data = nvmeq;
|
||||
nvmeq->tags = &dev->admin_tagset.tags[0];
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void nvme_admin_exit_hctx(struct blk_mq_hw_ctx *hctx, unsigned int hctx_idx)
|
||||
{
|
||||
struct nvme_queue *nvmeq = hctx->driver_data;
|
||||
|
||||
nvmeq->tags = NULL;
|
||||
}
|
||||
|
||||
static int nvme_init_hctx(struct blk_mq_hw_ctx *hctx, void *data,
|
||||
unsigned int hctx_idx)
|
||||
{
|
||||
struct nvme_dev *dev = data;
|
||||
struct nvme_queue *nvmeq = &dev->queues[hctx_idx + 1];
|
||||
|
||||
if (!nvmeq->tags)
|
||||
nvmeq->tags = &dev->tagset.tags[hctx_idx];
|
||||
|
||||
WARN_ON(dev->tagset.tags[hctx_idx] != hctx->tags);
|
||||
hctx->driver_data = nvmeq;
|
||||
return 0;
|
||||
@ -948,6 +935,13 @@ static inline void nvme_ring_cq_doorbell(struct nvme_queue *nvmeq)
|
||||
writel(head, nvmeq->q_db + nvmeq->dev->db_stride);
|
||||
}
|
||||
|
||||
static inline struct blk_mq_tags *nvme_queue_tagset(struct nvme_queue *nvmeq)
|
||||
{
|
||||
if (!nvmeq->qid)
|
||||
return nvmeq->dev->admin_tagset.tags[0];
|
||||
return nvmeq->dev->tagset.tags[nvmeq->qid - 1];
|
||||
}
|
||||
|
||||
static inline void nvme_handle_cqe(struct nvme_queue *nvmeq, u16 idx)
|
||||
{
|
||||
volatile struct nvme_completion *cqe = &nvmeq->cqes[idx];
|
||||
@ -972,7 +966,7 @@ static inline void nvme_handle_cqe(struct nvme_queue *nvmeq, u16 idx)
|
||||
return;
|
||||
}
|
||||
|
||||
req = blk_mq_tag_to_rq(*nvmeq->tags, cqe->command_id);
|
||||
req = blk_mq_tag_to_rq(nvme_queue_tagset(nvmeq), cqe->command_id);
|
||||
trace_nvme_sq(req, cqe->sq_head, nvmeq->sq_tail);
|
||||
nvme_end_request(req, cqe->status, cqe->result);
|
||||
}
|
||||
@ -1572,7 +1566,6 @@ static const struct blk_mq_ops nvme_mq_admin_ops = {
|
||||
.queue_rq = nvme_queue_rq,
|
||||
.complete = nvme_pci_complete_rq,
|
||||
.init_hctx = nvme_admin_init_hctx,
|
||||
.exit_hctx = nvme_admin_exit_hctx,
|
||||
.init_request = nvme_init_request,
|
||||
.timeout = nvme_timeout,
|
||||
};
|
||||
|
Loading…
Reference in New Issue
Block a user