mirror of
https://github.com/torvalds/linux.git
synced 2024-11-11 22:51:42 +00:00
Merge branch 'for-linus' of git://git.kernel.dk/linux-block
Pull more block layer fixes from Jens Axboe: "I wasn't going to send off a new pull before next week, but the blk flush fix from Jan from the other day introduced a regression. It's rare enough not to have hit during testing, since it requires both a device that rejects the first flush, and bad timing while it does that. But since someone did hit it, let's get the revert into 4.4-rc3 so we don't have a released rc with that known issue. Apart from that revert, three other fixes: - From Christoph, a fix for a missing unmap in NVMe request preparation. - An NVMe fix from Nishanth that fixes data corruption on powerpc. - Also from Christoph, fix a list_del() attempt on blk-mq that didn't have a matching list_add() at timer start" * 'for-linus' of git://git.kernel.dk/linux-block: Revert "blk-flush: Queue through IO scheduler when flush not required" block: fix blk_abort_request for blk-mq drivers nvme: add missing unmaps in nvme_queue_rq NVMe: default to 4k device page size
This commit is contained in:
commit
9b81d512a4
@ -422,7 +422,7 @@ void blk_insert_flush(struct request *rq)
|
||||
if (q->mq_ops) {
|
||||
blk_mq_insert_request(rq, false, false, true);
|
||||
} else
|
||||
q->elevator->type->ops.elevator_add_req_fn(q, rq);
|
||||
list_add_tail(&rq->queuelist, &q->queue_head);
|
||||
return;
|
||||
}
|
||||
|
||||
|
@ -158,11 +158,13 @@ void blk_abort_request(struct request *req)
|
||||
{
|
||||
if (blk_mark_rq_complete(req))
|
||||
return;
|
||||
blk_delete_timer(req);
|
||||
if (req->q->mq_ops)
|
||||
|
||||
if (req->q->mq_ops) {
|
||||
blk_mq_rq_timed_out(req, false);
|
||||
else
|
||||
} else {
|
||||
blk_delete_timer(req);
|
||||
blk_rq_timed_out(req);
|
||||
}
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(blk_abort_request);
|
||||
|
||||
|
@ -896,19 +896,28 @@ static int nvme_queue_rq(struct blk_mq_hw_ctx *hctx,
|
||||
goto retry_cmd;
|
||||
}
|
||||
if (blk_integrity_rq(req)) {
|
||||
if (blk_rq_count_integrity_sg(req->q, req->bio) != 1)
|
||||
if (blk_rq_count_integrity_sg(req->q, req->bio) != 1) {
|
||||
dma_unmap_sg(dev->dev, iod->sg, iod->nents,
|
||||
dma_dir);
|
||||
goto error_cmd;
|
||||
}
|
||||
|
||||
sg_init_table(iod->meta_sg, 1);
|
||||
if (blk_rq_map_integrity_sg(
|
||||
req->q, req->bio, iod->meta_sg) != 1)
|
||||
req->q, req->bio, iod->meta_sg) != 1) {
|
||||
dma_unmap_sg(dev->dev, iod->sg, iod->nents,
|
||||
dma_dir);
|
||||
goto error_cmd;
|
||||
}
|
||||
|
||||
if (rq_data_dir(req))
|
||||
nvme_dif_remap(req, nvme_dif_prep);
|
||||
|
||||
if (!dma_map_sg(nvmeq->q_dmadev, iod->meta_sg, 1, dma_dir))
|
||||
if (!dma_map_sg(nvmeq->q_dmadev, iod->meta_sg, 1, dma_dir)) {
|
||||
dma_unmap_sg(dev->dev, iod->sg, iod->nents,
|
||||
dma_dir);
|
||||
goto error_cmd;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@ -1728,9 +1737,13 @@ static int nvme_configure_admin_queue(struct nvme_dev *dev)
|
||||
u32 aqa;
|
||||
u64 cap = lo_hi_readq(&dev->bar->cap);
|
||||
struct nvme_queue *nvmeq;
|
||||
unsigned page_shift = PAGE_SHIFT;
|
||||
/*
|
||||
* default to a 4K page size, with the intention to update this
|
||||
* path in the future to accomodate architectures with differing
|
||||
* kernel and IO page sizes.
|
||||
*/
|
||||
unsigned page_shift = 12;
|
||||
unsigned dev_page_min = NVME_CAP_MPSMIN(cap) + 12;
|
||||
unsigned dev_page_max = NVME_CAP_MPSMAX(cap) + 12;
|
||||
|
||||
if (page_shift < dev_page_min) {
|
||||
dev_err(dev->dev,
|
||||
@ -1739,13 +1752,6 @@ static int nvme_configure_admin_queue(struct nvme_dev *dev)
|
||||
1 << page_shift);
|
||||
return -ENODEV;
|
||||
}
|
||||
if (page_shift > dev_page_max) {
|
||||
dev_info(dev->dev,
|
||||
"Device maximum page size (%u) smaller than "
|
||||
"host (%u); enabling work-around\n",
|
||||
1 << dev_page_max, 1 << page_shift);
|
||||
page_shift = dev_page_max;
|
||||
}
|
||||
|
||||
dev->subsystem = readl(&dev->bar->vs) >= NVME_VS(1, 1) ?
|
||||
NVME_CAP_NSSRC(cap) : 0;
|
||||
|
Loading…
Reference in New Issue
Block a user