null_blk: refactor init and init errors code paths

Simplify the initialization logic of the three block-layers.

- The queue initialization is split into two parts. This allows reuse of
  code when initializing the sq-, bio- and mq-based layers.
- Set submit_queues default value to 0 and always set it at init time.
- Simplify the init error code paths.

Signed-off-by: Matias Bjorling <m@bjorling.me>
Signed-off-by: Jens Axboe <axboe@kernel.dk>
This commit is contained in:
Matias Bjorling 2013-12-18 13:41:43 +01:00 committed by Jens Axboe
parent 12f8f4fc03
commit 2d263a7856

View File

@ -65,7 +65,7 @@ enum {
NULL_Q_MQ = 2,
};
static int submit_queues = 1;
static int submit_queues;
module_param(submit_queues, int, S_IRUGO);
MODULE_PARM_DESC(submit_queues, "Number of submission queues");
@ -355,16 +355,24 @@ static void null_free_hctx(struct blk_mq_hw_ctx *hctx, unsigned int hctx_index)
kfree(hctx);
}
static void null_init_queue(struct nullb *nullb, struct nullb_queue *nq)
{
BUG_ON(!nullb);
BUG_ON(!nq);
init_waitqueue_head(&nq->wait);
nq->queue_depth = nullb->queue_depth;
}
static int null_init_hctx(struct blk_mq_hw_ctx *hctx, void *data,
unsigned int index)
{
struct nullb *nullb = data;
struct nullb_queue *nq = &nullb->queues[index];
init_waitqueue_head(&nq->wait);
nq->queue_depth = nullb->queue_depth;
nullb->nr_queues++;
hctx->driver_data = nq;
null_init_queue(nullb, nq);
nullb->nr_queues++;
return 0;
}
@ -417,13 +425,13 @@ static int setup_commands(struct nullb_queue *nq)
nq->cmds = kzalloc(nq->queue_depth * sizeof(*cmd), GFP_KERNEL);
if (!nq->cmds)
return 1;
return -ENOMEM;
tag_size = ALIGN(nq->queue_depth, BITS_PER_LONG) / BITS_PER_LONG;
nq->tag_map = kzalloc(tag_size * sizeof(unsigned long), GFP_KERNEL);
if (!nq->tag_map) {
kfree(nq->cmds);
return 1;
return -ENOMEM;
}
for (i = 0; i < nq->queue_depth; i++) {
@ -454,33 +462,37 @@ static void cleanup_queues(struct nullb *nullb)
static int setup_queues(struct nullb *nullb)
{
struct nullb_queue *nq;
int i;
nullb->queues = kzalloc(submit_queues * sizeof(*nq), GFP_KERNEL);
nullb->queues = kzalloc(submit_queues * sizeof(struct nullb_queue),
GFP_KERNEL);
if (!nullb->queues)
return 1;
return -ENOMEM;
nullb->nr_queues = 0;
nullb->queue_depth = hw_queue_depth;
if (queue_mode == NULL_Q_MQ)
return 0;
return 0;
}
static int init_driver_queues(struct nullb *nullb)
{
struct nullb_queue *nq;
int i, ret = 0;
for (i = 0; i < submit_queues; i++) {
nq = &nullb->queues[i];
init_waitqueue_head(&nq->wait);
nq->queue_depth = hw_queue_depth;
if (setup_commands(nq))
break;
null_init_queue(nullb, nq);
ret = setup_commands(nq);
if (ret)
goto err_queue;
nullb->nr_queues++;
}
if (i == submit_queues)
return 0;
return 0;
err_queue:
cleanup_queues(nullb);
return 1;
return ret;
}
static int null_add_dev(void)
@ -495,9 +507,6 @@ static int null_add_dev(void)
spin_lock_init(&nullb->lock);
if (queue_mode == NULL_Q_MQ && use_per_node_hctx)
submit_queues = nr_online_nodes;
if (setup_queues(nullb))
goto err;
@ -518,11 +527,13 @@ static int null_add_dev(void)
} else if (queue_mode == NULL_Q_BIO) {
nullb->q = blk_alloc_queue_node(GFP_KERNEL, home_node);
blk_queue_make_request(nullb->q, null_queue_bio);
init_driver_queues(nullb);
} else {
nullb->q = blk_init_queue_node(null_request_fn, &nullb->lock, home_node);
blk_queue_prep_rq(nullb->q, null_rq_prep_fn);
if (nullb->q)
blk_queue_softirq_done(nullb->q, null_softirq_done_fn);
init_driver_queues(nullb);
}
if (!nullb->q)
@ -579,7 +590,9 @@ static int __init null_init(void)
}
#endif
if (submit_queues > nr_cpu_ids)
if (queue_mode == NULL_Q_MQ && use_per_node_hctx)
submit_queues = nr_online_nodes;
else if (submit_queues > nr_cpu_ids)
submit_queues = nr_cpu_ids;
else if (!submit_queues)
submit_queues = 1;