blk-mq: merge blk_mq_insert_request and blk_mq_run_request
It's almost identical to blk_mq_insert_request, so fold the two into one slightly more generic function by making the flush special case a bit smarted. Signed-off-by: Christoph Hellwig <hch@lst.de> Signed-off-by: Jens Axboe <axboe@fb.com>
This commit is contained in:
parent
081241e592
commit
eeabc850b7
@ -65,7 +65,7 @@ void blk_execute_rq_nowait(struct request_queue *q, struct gendisk *bd_disk,
|
|||||||
* be resued after dying flag is set
|
* be resued after dying flag is set
|
||||||
*/
|
*/
|
||||||
if (q->mq_ops) {
|
if (q->mq_ops) {
|
||||||
blk_mq_insert_request(q, rq, at_head, true);
|
blk_mq_insert_request(rq, at_head, true, false);
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -137,7 +137,7 @@ static void mq_flush_run(struct work_struct *work)
|
|||||||
rq = container_of(work, struct request, mq_flush_work);
|
rq = container_of(work, struct request, mq_flush_work);
|
||||||
|
|
||||||
memset(&rq->csd, 0, sizeof(rq->csd));
|
memset(&rq->csd, 0, sizeof(rq->csd));
|
||||||
blk_mq_run_request(rq, true, false);
|
blk_mq_insert_request(rq, false, true, false);
|
||||||
}
|
}
|
||||||
|
|
||||||
static bool blk_flush_queue_rq(struct request *rq)
|
static bool blk_flush_queue_rq(struct request *rq)
|
||||||
@ -411,7 +411,7 @@ void blk_insert_flush(struct request *rq)
|
|||||||
if ((policy & REQ_FSEQ_DATA) &&
|
if ((policy & REQ_FSEQ_DATA) &&
|
||||||
!(policy & (REQ_FSEQ_PREFLUSH | REQ_FSEQ_POSTFLUSH))) {
|
!(policy & (REQ_FSEQ_PREFLUSH | REQ_FSEQ_POSTFLUSH))) {
|
||||||
if (q->mq_ops) {
|
if (q->mq_ops) {
|
||||||
blk_mq_run_request(rq, false, true);
|
blk_mq_insert_request(rq, false, false, true);
|
||||||
} else
|
} else
|
||||||
list_add_tail(&rq->queuelist, &q->queue_head);
|
list_add_tail(&rq->queuelist, &q->queue_head);
|
||||||
return;
|
return;
|
||||||
|
@ -724,60 +724,27 @@ static void __blk_mq_insert_request(struct blk_mq_hw_ctx *hctx,
|
|||||||
blk_mq_add_timer(rq);
|
blk_mq_add_timer(rq);
|
||||||
}
|
}
|
||||||
|
|
||||||
void blk_mq_insert_request(struct request_queue *q, struct request *rq,
|
void blk_mq_insert_request(struct request *rq, bool at_head, bool run_queue,
|
||||||
bool at_head, bool run_queue)
|
bool async)
|
||||||
{
|
|
||||||
struct blk_mq_hw_ctx *hctx;
|
|
||||||
struct blk_mq_ctx *ctx, *current_ctx;
|
|
||||||
|
|
||||||
ctx = rq->mq_ctx;
|
|
||||||
hctx = q->mq_ops->map_queue(q, ctx->cpu);
|
|
||||||
|
|
||||||
if (rq->cmd_flags & (REQ_FLUSH | REQ_FUA)) {
|
|
||||||
blk_insert_flush(rq);
|
|
||||||
} else {
|
|
||||||
current_ctx = blk_mq_get_ctx(q);
|
|
||||||
|
|
||||||
if (!cpu_online(ctx->cpu)) {
|
|
||||||
ctx = current_ctx;
|
|
||||||
hctx = q->mq_ops->map_queue(q, ctx->cpu);
|
|
||||||
rq->mq_ctx = ctx;
|
|
||||||
}
|
|
||||||
spin_lock(&ctx->lock);
|
|
||||||
__blk_mq_insert_request(hctx, rq, at_head);
|
|
||||||
spin_unlock(&ctx->lock);
|
|
||||||
|
|
||||||
blk_mq_put_ctx(current_ctx);
|
|
||||||
}
|
|
||||||
|
|
||||||
if (run_queue)
|
|
||||||
__blk_mq_run_hw_queue(hctx);
|
|
||||||
}
|
|
||||||
EXPORT_SYMBOL(blk_mq_insert_request);
|
|
||||||
|
|
||||||
/*
|
|
||||||
* This is a special version of blk_mq_insert_request to bypass FLUSH request
|
|
||||||
* check. Should only be used internally.
|
|
||||||
*/
|
|
||||||
void blk_mq_run_request(struct request *rq, bool run_queue, bool async)
|
|
||||||
{
|
{
|
||||||
struct request_queue *q = rq->q;
|
struct request_queue *q = rq->q;
|
||||||
struct blk_mq_hw_ctx *hctx;
|
struct blk_mq_hw_ctx *hctx;
|
||||||
struct blk_mq_ctx *ctx, *current_ctx;
|
struct blk_mq_ctx *ctx = rq->mq_ctx, *current_ctx;
|
||||||
|
|
||||||
current_ctx = blk_mq_get_ctx(q);
|
current_ctx = blk_mq_get_ctx(q);
|
||||||
|
if (!cpu_online(ctx->cpu))
|
||||||
|
rq->mq_ctx = ctx = current_ctx;
|
||||||
|
|
||||||
ctx = rq->mq_ctx;
|
|
||||||
if (!cpu_online(ctx->cpu)) {
|
|
||||||
ctx = current_ctx;
|
|
||||||
rq->mq_ctx = ctx;
|
|
||||||
}
|
|
||||||
hctx = q->mq_ops->map_queue(q, ctx->cpu);
|
hctx = q->mq_ops->map_queue(q, ctx->cpu);
|
||||||
|
|
||||||
/* ctx->cpu might be offline */
|
if (rq->cmd_flags & (REQ_FLUSH | REQ_FUA) &&
|
||||||
spin_lock(&ctx->lock);
|
!(rq->cmd_flags & (REQ_FLUSH_SEQ))) {
|
||||||
__blk_mq_insert_request(hctx, rq, false);
|
blk_insert_flush(rq);
|
||||||
spin_unlock(&ctx->lock);
|
} else {
|
||||||
|
spin_lock(&ctx->lock);
|
||||||
|
__blk_mq_insert_request(hctx, rq, at_head);
|
||||||
|
spin_unlock(&ctx->lock);
|
||||||
|
}
|
||||||
|
|
||||||
blk_mq_put_ctx(current_ctx);
|
blk_mq_put_ctx(current_ctx);
|
||||||
|
|
||||||
|
@ -23,7 +23,6 @@ struct blk_mq_ctx {
|
|||||||
};
|
};
|
||||||
|
|
||||||
void __blk_mq_complete_request(struct request *rq);
|
void __blk_mq_complete_request(struct request *rq);
|
||||||
void blk_mq_run_request(struct request *rq, bool run_queue, bool async);
|
|
||||||
void blk_mq_run_hw_queue(struct blk_mq_hw_ctx *hctx, bool async);
|
void blk_mq_run_hw_queue(struct blk_mq_hw_ctx *hctx, bool async);
|
||||||
void blk_mq_init_flush(struct request_queue *q);
|
void blk_mq_init_flush(struct request_queue *q);
|
||||||
void blk_mq_drain_queue(struct request_queue *q);
|
void blk_mq_drain_queue(struct request_queue *q);
|
||||||
|
@ -122,8 +122,7 @@ void blk_mq_free_commands(struct request_queue *, void (*free)(void *data, struc
|
|||||||
|
|
||||||
void blk_mq_flush_plug_list(struct blk_plug *plug, bool from_schedule);
|
void blk_mq_flush_plug_list(struct blk_plug *plug, bool from_schedule);
|
||||||
|
|
||||||
void blk_mq_insert_request(struct request_queue *, struct request *,
|
void blk_mq_insert_request(struct request *, bool, bool, bool);
|
||||||
bool, bool);
|
|
||||||
void blk_mq_run_queues(struct request_queue *q, bool async);
|
void blk_mq_run_queues(struct request_queue *q, bool async);
|
||||||
void blk_mq_free_request(struct request *rq);
|
void blk_mq_free_request(struct request *rq);
|
||||||
bool blk_mq_can_queue(struct blk_mq_hw_ctx *);
|
bool blk_mq_can_queue(struct blk_mq_hw_ctx *);
|
||||||
|
Loading…
Reference in New Issue
Block a user