From 5a9d041ba2f6da468c891ca0fe263758e2c12091 Mon Sep 17 00:00:00 2001 From: Jens Axboe Date: Sat, 13 Nov 2021 11:18:32 -0700 Subject: [PATCH] block: move io_context creation into where it's needed The only user of the io_context for IO is BFQ, yet we put the checking and logic of it into the normal IO path. Put the creation into blk_mq_sched_assign_ioc(), and have BFQ use that helper. Reviewed-by: Johannes Thumshirn Reviewed-by: Christoph Hellwig Signed-off-by: Jens Axboe --- block/bfq-iosched.c | 2 ++ block/blk-core.c | 9 --------- block/blk-mq-sched.c | 5 +++++ block/blk-mq.c | 3 --- 4 files changed, 7 insertions(+), 12 deletions(-) diff --git a/block/bfq-iosched.c b/block/bfq-iosched.c index fec18118dc30..1ce1a99a7160 100644 --- a/block/bfq-iosched.c +++ b/block/bfq-iosched.c @@ -6573,6 +6573,8 @@ static struct bfq_queue *bfq_get_bfqq_handle_split(struct bfq_data *bfqd, */ static void bfq_prepare_request(struct request *rq) { + blk_mq_sched_assign_ioc(rq); + /* * Regardless of whether we have an icq attached, we have to * clear the scheduler pointers, as they might point to diff --git a/block/blk-core.c b/block/blk-core.c index 35a04d8c180a..2053d1b0e90e 100644 --- a/block/blk-core.c +++ b/block/blk-core.c @@ -750,15 +750,6 @@ noinline_for_stack bool submit_bio_checks(struct bio *bio) break; } - /* - * Various block parts want %current->io_context, so allocate it up - * front rather than dealing with lots of pain to allocate it only - * where needed. This may fail and the block layer knows how to live - * with it. - */ - if (unlikely(!current->io_context)) - create_task_io_context(current, GFP_ATOMIC, q->node); - if (blk_throtl_bio(bio)) return false; diff --git a/block/blk-mq-sched.c b/block/blk-mq-sched.c index ba21449439cc..b942b38000e5 100644 --- a/block/blk-mq-sched.c +++ b/block/blk-mq-sched.c @@ -24,6 +24,10 @@ void blk_mq_sched_assign_ioc(struct request *rq) struct io_context *ioc; struct io_cq *icq; + /* create task io_context, if we don't have one already */ + if (unlikely(!current->io_context)) + create_task_io_context(current, GFP_ATOMIC, q->node); + /* * May not have an IO context if it's a passthrough request */ @@ -43,6 +47,7 @@ void blk_mq_sched_assign_ioc(struct request *rq) get_io_context(icq->ioc); rq->elv.icq = icq; } +EXPORT_SYMBOL_GPL(blk_mq_sched_assign_ioc); /* * Mark a hardware queue as needing a restart. For shared queues, maintain diff --git a/block/blk-mq.c b/block/blk-mq.c index 7cd408408a37..d6e7634e5e1f 100644 --- a/block/blk-mq.c +++ b/block/blk-mq.c @@ -406,9 +406,6 @@ static struct request *blk_mq_rq_ctx_init(struct blk_mq_alloc_data *data, if (!op_is_flush(data->cmd_flags) && e->type->ops.prepare_request) { - if (e->type->icq_cache) - blk_mq_sched_assign_ioc(rq); - e->type->ops.prepare_request(rq); rq->rq_flags |= RQF_ELVPRIV; }