forked from Minki/linux
blk-mq: streamline blk_mq_make_request
Turn the different ways of merging or issuing I/O into a series of if/else statements instead of the current maze of gotos. Note that this means we pin the CPU a little longer for some cases as the CTX put is moved to common code at the end of the function. Signed-off-by: Christoph Hellwig <hch@lst.de> Signed-off-by: Jens Axboe <axboe@fb.com>
This commit is contained in:
parent
2299722c4b
commit
a4d907b6a3
@ -1531,16 +1531,17 @@ static blk_qc_t blk_mq_make_request(struct request_queue *q, struct bio *bio)
|
|||||||
|
|
||||||
cookie = request_to_qc_t(data.hctx, rq);
|
cookie = request_to_qc_t(data.hctx, rq);
|
||||||
|
|
||||||
if (unlikely(is_flush_fua)) {
|
|
||||||
if (q->elevator)
|
|
||||||
goto elv_insert;
|
|
||||||
blk_mq_bio_to_request(rq, bio);
|
|
||||||
blk_insert_flush(rq);
|
|
||||||
goto run_queue;
|
|
||||||
}
|
|
||||||
|
|
||||||
plug = current->plug;
|
plug = current->plug;
|
||||||
if (plug && q->nr_hw_queues == 1) {
|
if (unlikely(is_flush_fua)) {
|
||||||
|
blk_mq_bio_to_request(rq, bio);
|
||||||
|
if (q->elevator) {
|
||||||
|
blk_mq_sched_insert_request(rq, false, true, true,
|
||||||
|
true);
|
||||||
|
} else {
|
||||||
|
blk_insert_flush(rq);
|
||||||
|
blk_mq_run_hw_queue(data.hctx, true);
|
||||||
|
}
|
||||||
|
} else if (plug && q->nr_hw_queues == 1) {
|
||||||
struct request *last = NULL;
|
struct request *last = NULL;
|
||||||
|
|
||||||
blk_mq_bio_to_request(rq, bio);
|
blk_mq_bio_to_request(rq, bio);
|
||||||
@ -1559,8 +1560,6 @@ static blk_qc_t blk_mq_make_request(struct request_queue *q, struct bio *bio)
|
|||||||
else
|
else
|
||||||
last = list_entry_rq(plug->mq_list.prev);
|
last = list_entry_rq(plug->mq_list.prev);
|
||||||
|
|
||||||
blk_mq_put_ctx(data.ctx);
|
|
||||||
|
|
||||||
if (request_count >= BLK_MAX_REQUEST_COUNT || (last &&
|
if (request_count >= BLK_MAX_REQUEST_COUNT || (last &&
|
||||||
blk_rq_bytes(last) >= BLK_PLUG_FLUSH_SIZE)) {
|
blk_rq_bytes(last) >= BLK_PLUG_FLUSH_SIZE)) {
|
||||||
blk_flush_plug_list(plug, false);
|
blk_flush_plug_list(plug, false);
|
||||||
@ -1568,7 +1567,6 @@ static blk_qc_t blk_mq_make_request(struct request_queue *q, struct bio *bio)
|
|||||||
}
|
}
|
||||||
|
|
||||||
list_add_tail(&rq->queuelist, &plug->mq_list);
|
list_add_tail(&rq->queuelist, &plug->mq_list);
|
||||||
goto done;
|
|
||||||
} else if (plug && !blk_queue_nomerges(q)) {
|
} else if (plug && !blk_queue_nomerges(q)) {
|
||||||
blk_mq_bio_to_request(rq, bio);
|
blk_mq_bio_to_request(rq, bio);
|
||||||
|
|
||||||
@ -1585,39 +1583,20 @@ static blk_qc_t blk_mq_make_request(struct request_queue *q, struct bio *bio)
|
|||||||
list_del_init(&same_queue_rq->queuelist);
|
list_del_init(&same_queue_rq->queuelist);
|
||||||
list_add_tail(&rq->queuelist, &plug->mq_list);
|
list_add_tail(&rq->queuelist, &plug->mq_list);
|
||||||
|
|
||||||
blk_mq_put_ctx(data.ctx);
|
|
||||||
if (same_queue_rq)
|
if (same_queue_rq)
|
||||||
blk_mq_try_issue_directly(data.hctx, same_queue_rq,
|
blk_mq_try_issue_directly(data.hctx, same_queue_rq,
|
||||||
&cookie);
|
&cookie);
|
||||||
goto done;
|
} else if (q->nr_hw_queues > 1 && is_sync) {
|
||||||
} else if (is_sync) {
|
|
||||||
blk_mq_bio_to_request(rq, bio);
|
blk_mq_bio_to_request(rq, bio);
|
||||||
|
|
||||||
blk_mq_put_ctx(data.ctx);
|
|
||||||
blk_mq_try_issue_directly(data.hctx, rq, &cookie);
|
blk_mq_try_issue_directly(data.hctx, rq, &cookie);
|
||||||
goto done;
|
} else if (q->elevator) {
|
||||||
|
blk_mq_bio_to_request(rq, bio);
|
||||||
|
blk_mq_sched_insert_request(rq, false, true, true, true);
|
||||||
|
} else if (!blk_mq_merge_queue_io(data.hctx, data.ctx, rq, bio)) {
|
||||||
|
blk_mq_run_hw_queue(data.hctx, true);
|
||||||
}
|
}
|
||||||
|
|
||||||
if (q->elevator) {
|
|
||||||
elv_insert:
|
|
||||||
blk_mq_put_ctx(data.ctx);
|
|
||||||
blk_mq_bio_to_request(rq, bio);
|
|
||||||
blk_mq_sched_insert_request(rq, false, true,
|
|
||||||
!is_sync || is_flush_fua, true);
|
|
||||||
goto done;
|
|
||||||
}
|
|
||||||
if (!blk_mq_merge_queue_io(data.hctx, data.ctx, rq, bio)) {
|
|
||||||
/*
|
|
||||||
* For a SYNC request, send it to the hardware immediately. For
|
|
||||||
* an ASYNC request, just ensure that we run it later on. The
|
|
||||||
* latter allows for merging opportunities and more efficient
|
|
||||||
* dispatching.
|
|
||||||
*/
|
|
||||||
run_queue:
|
|
||||||
blk_mq_run_hw_queue(data.hctx, !is_sync || is_flush_fua);
|
|
||||||
}
|
|
||||||
blk_mq_put_ctx(data.ctx);
|
blk_mq_put_ctx(data.ctx);
|
||||||
done:
|
|
||||||
return cookie;
|
return cookie;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
Loading…
Reference in New Issue
Block a user