mirror of
https://github.com/torvalds/linux.git
synced 2024-12-29 06:12:08 +00:00
blk-mq-sched: refactor scheduler initialization
Preparation cleanup for the next couple of fixes, push blk_mq_sched_setup() and e->ops.mq.init_sched() into a helper. Signed-off-by: Omar Sandoval <osandov@fb.com> Signed-off-by: Jens Axboe <axboe@fb.com>
This commit is contained in:
parent
81380ca107
commit
6917ff0b5b
@ -432,51 +432,23 @@ static void blk_mq_sched_free_tags(struct blk_mq_tag_set *set,
|
||||
}
|
||||
}
|
||||
|
||||
int blk_mq_sched_setup(struct request_queue *q)
|
||||
static int blk_mq_sched_alloc_tags(struct request_queue *q,
|
||||
struct blk_mq_hw_ctx *hctx,
|
||||
unsigned int hctx_idx)
|
||||
{
|
||||
struct blk_mq_tag_set *set = q->tag_set;
|
||||
struct blk_mq_hw_ctx *hctx;
|
||||
int ret, i;
|
||||
int ret;
|
||||
|
||||
/*
|
||||
* Default to 256, since we don't split into sync/async like the
|
||||
* old code did. Additionally, this is a per-hw queue depth.
|
||||
*/
|
||||
q->nr_requests = 2 * BLKDEV_MAX_RQ;
|
||||
hctx->sched_tags = blk_mq_alloc_rq_map(set, hctx_idx, q->nr_requests,
|
||||
set->reserved_tags);
|
||||
if (!hctx->sched_tags)
|
||||
return -ENOMEM;
|
||||
|
||||
/*
|
||||
* We're switching to using an IO scheduler, so setup the hctx
|
||||
* scheduler tags and switch the request map from the regular
|
||||
* tags to scheduler tags. First allocate what we need, so we
|
||||
* can safely fail and fallback, if needed.
|
||||
*/
|
||||
ret = 0;
|
||||
queue_for_each_hw_ctx(q, hctx, i) {
|
||||
hctx->sched_tags = blk_mq_alloc_rq_map(set, i,
|
||||
q->nr_requests, set->reserved_tags);
|
||||
if (!hctx->sched_tags) {
|
||||
ret = -ENOMEM;
|
||||
break;
|
||||
}
|
||||
ret = blk_mq_alloc_rqs(set, hctx->sched_tags, i, q->nr_requests);
|
||||
if (ret)
|
||||
break;
|
||||
}
|
||||
ret = blk_mq_alloc_rqs(set, hctx->sched_tags, hctx_idx, q->nr_requests);
|
||||
if (ret)
|
||||
blk_mq_sched_free_tags(set, hctx, hctx_idx);
|
||||
|
||||
/*
|
||||
* If we failed, free what we did allocate
|
||||
*/
|
||||
if (ret) {
|
||||
queue_for_each_hw_ctx(q, hctx, i) {
|
||||
if (!hctx->sched_tags)
|
||||
continue;
|
||||
blk_mq_sched_free_tags(set, hctx, i);
|
||||
}
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
return 0;
|
||||
return ret;
|
||||
}
|
||||
|
||||
void blk_mq_sched_teardown(struct request_queue *q)
|
||||
@ -489,6 +461,40 @@ void blk_mq_sched_teardown(struct request_queue *q)
|
||||
blk_mq_sched_free_tags(set, hctx, i);
|
||||
}
|
||||
|
||||
int blk_mq_init_sched(struct request_queue *q, struct elevator_type *e)
|
||||
{
|
||||
struct blk_mq_hw_ctx *hctx;
|
||||
unsigned int i;
|
||||
int ret;
|
||||
|
||||
if (!e) {
|
||||
q->elevator = NULL;
|
||||
return 0;
|
||||
}
|
||||
|
||||
/*
|
||||
* Default to 256, since we don't split into sync/async like the
|
||||
* old code did. Additionally, this is a per-hw queue depth.
|
||||
*/
|
||||
q->nr_requests = 2 * BLKDEV_MAX_RQ;
|
||||
|
||||
queue_for_each_hw_ctx(q, hctx, i) {
|
||||
ret = blk_mq_sched_alloc_tags(q, hctx, i);
|
||||
if (ret)
|
||||
goto err;
|
||||
}
|
||||
|
||||
ret = e->ops.mq.init_sched(q, e);
|
||||
if (ret)
|
||||
goto err;
|
||||
|
||||
return 0;
|
||||
|
||||
err:
|
||||
blk_mq_sched_teardown(q);
|
||||
return ret;
|
||||
}
|
||||
|
||||
int blk_mq_sched_init(struct request_queue *q)
|
||||
{
|
||||
int ret;
|
||||
|
@ -32,7 +32,7 @@ void blk_mq_sched_move_to_dispatch(struct blk_mq_hw_ctx *hctx,
|
||||
struct list_head *rq_list,
|
||||
struct request *(*get_rq)(struct blk_mq_hw_ctx *));
|
||||
|
||||
int blk_mq_sched_setup(struct request_queue *q);
|
||||
int blk_mq_init_sched(struct request_queue *q, struct elevator_type *e);
|
||||
void blk_mq_sched_teardown(struct request_queue *q);
|
||||
|
||||
int blk_mq_sched_init(struct request_queue *q);
|
||||
|
@ -242,17 +242,12 @@ int elevator_init(struct request_queue *q, char *name)
|
||||
}
|
||||
}
|
||||
|
||||
if (e->uses_mq) {
|
||||
err = blk_mq_sched_setup(q);
|
||||
if (!err)
|
||||
err = e->ops.mq.init_sched(q, e);
|
||||
} else
|
||||
if (e->uses_mq)
|
||||
err = blk_mq_init_sched(q, e);
|
||||
else
|
||||
err = e->ops.sq.elevator_init_fn(q, e);
|
||||
if (err) {
|
||||
if (e->uses_mq)
|
||||
blk_mq_sched_teardown(q);
|
||||
if (err)
|
||||
elevator_put(e);
|
||||
}
|
||||
return err;
|
||||
}
|
||||
EXPORT_SYMBOL(elevator_init);
|
||||
@ -987,21 +982,18 @@ static int elevator_switch(struct request_queue *q, struct elevator_type *new_e)
|
||||
}
|
||||
|
||||
/* allocate, init and register new elevator */
|
||||
if (new_e) {
|
||||
if (new_e->uses_mq) {
|
||||
err = blk_mq_sched_setup(q);
|
||||
if (!err)
|
||||
err = new_e->ops.mq.init_sched(q, new_e);
|
||||
} else
|
||||
err = new_e->ops.sq.elevator_init_fn(q, new_e);
|
||||
if (err)
|
||||
goto fail_init;
|
||||
if (q->mq_ops)
|
||||
err = blk_mq_init_sched(q, new_e);
|
||||
else
|
||||
err = new_e->ops.sq.elevator_init_fn(q, new_e);
|
||||
if (err)
|
||||
goto fail_init;
|
||||
|
||||
if (new_e) {
|
||||
err = elv_register_queue(q);
|
||||
if (err)
|
||||
goto fail_register;
|
||||
} else
|
||||
q->elevator = NULL;
|
||||
}
|
||||
|
||||
/* done, kill the old one and finish */
|
||||
if (old) {
|
||||
|
Loading…
Reference in New Issue
Block a user