forked from Minki/linux
blk-mq: make the sysfs mq/ layout reflect current mappings
Currently blk-mq registers all the hardware queues in sysfs, regardless of whether it uses them (e.g. they have CPU mappings) or not. The unused hardware queues lack the cpux/ directories, and the other sysfs entries (like active, pending, etc) are all zeroes. Change this so that sysfs correctly reflects the current mappings of the hardware queues. Signed-off-by: Jens Axboe <axboe@fb.com>
This commit is contained in:
parent
2230237500
commit
67aec14ce8
@ -327,6 +327,42 @@ static struct kobj_type blk_mq_hw_ktype = {
|
||||
.release = blk_mq_sysfs_release,
|
||||
};
|
||||
|
||||
void blk_mq_unregister_hctx(struct blk_mq_hw_ctx *hctx)
|
||||
{
|
||||
struct blk_mq_ctx *ctx;
|
||||
int i;
|
||||
|
||||
if (!hctx->nr_ctx || !(hctx->flags & BLK_MQ_F_SYSFS_UP))
|
||||
return;
|
||||
|
||||
hctx_for_each_ctx(hctx, ctx, i)
|
||||
kobject_del(&ctx->kobj);
|
||||
|
||||
kobject_del(&hctx->kobj);
|
||||
}
|
||||
|
||||
int blk_mq_register_hctx(struct blk_mq_hw_ctx *hctx)
|
||||
{
|
||||
struct request_queue *q = hctx->queue;
|
||||
struct blk_mq_ctx *ctx;
|
||||
int i, ret;
|
||||
|
||||
if (!hctx->nr_ctx || !(hctx->flags & BLK_MQ_F_SYSFS_UP))
|
||||
return 0;
|
||||
|
||||
ret = kobject_add(&hctx->kobj, &q->mq_kobj, "%u", hctx->queue_num);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
hctx_for_each_ctx(hctx, ctx, i) {
|
||||
ret = kobject_add(&ctx->kobj, &hctx->kobj, "cpu%u", ctx->cpu);
|
||||
if (ret)
|
||||
break;
|
||||
}
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
void blk_mq_unregister_disk(struct gendisk *disk)
|
||||
{
|
||||
struct request_queue *q = disk->queue;
|
||||
@ -335,11 +371,11 @@ void blk_mq_unregister_disk(struct gendisk *disk)
|
||||
int i, j;
|
||||
|
||||
queue_for_each_hw_ctx(q, hctx, i) {
|
||||
hctx_for_each_ctx(hctx, ctx, j) {
|
||||
kobject_del(&ctx->kobj);
|
||||
blk_mq_unregister_hctx(hctx);
|
||||
|
||||
hctx_for_each_ctx(hctx, ctx, j)
|
||||
kobject_put(&ctx->kobj);
|
||||
}
|
||||
kobject_del(&hctx->kobj);
|
||||
|
||||
kobject_put(&hctx->kobj);
|
||||
}
|
||||
|
||||
@ -350,15 +386,30 @@ void blk_mq_unregister_disk(struct gendisk *disk)
|
||||
kobject_put(&disk_to_dev(disk)->kobj);
|
||||
}
|
||||
|
||||
static void blk_mq_sysfs_init(struct request_queue *q)
|
||||
{
|
||||
struct blk_mq_hw_ctx *hctx;
|
||||
struct blk_mq_ctx *ctx;
|
||||
int i, j;
|
||||
|
||||
kobject_init(&q->mq_kobj, &blk_mq_ktype);
|
||||
|
||||
queue_for_each_hw_ctx(q, hctx, i) {
|
||||
kobject_init(&hctx->kobj, &blk_mq_hw_ktype);
|
||||
|
||||
hctx_for_each_ctx(hctx, ctx, j)
|
||||
kobject_init(&ctx->kobj, &blk_mq_ctx_ktype);
|
||||
}
|
||||
}
|
||||
|
||||
int blk_mq_register_disk(struct gendisk *disk)
|
||||
{
|
||||
struct device *dev = disk_to_dev(disk);
|
||||
struct request_queue *q = disk->queue;
|
||||
struct blk_mq_hw_ctx *hctx;
|
||||
struct blk_mq_ctx *ctx;
|
||||
int ret, i, j;
|
||||
int ret, i;
|
||||
|
||||
kobject_init(&q->mq_kobj, &blk_mq_ktype);
|
||||
blk_mq_sysfs_init(q);
|
||||
|
||||
ret = kobject_add(&q->mq_kobj, kobject_get(&dev->kobj), "%s", "mq");
|
||||
if (ret < 0)
|
||||
@ -367,20 +418,10 @@ int blk_mq_register_disk(struct gendisk *disk)
|
||||
kobject_uevent(&q->mq_kobj, KOBJ_ADD);
|
||||
|
||||
queue_for_each_hw_ctx(q, hctx, i) {
|
||||
kobject_init(&hctx->kobj, &blk_mq_hw_ktype);
|
||||
ret = kobject_add(&hctx->kobj, &q->mq_kobj, "%u", i);
|
||||
hctx->flags |= BLK_MQ_F_SYSFS_UP;
|
||||
ret = blk_mq_register_hctx(hctx);
|
||||
if (ret)
|
||||
break;
|
||||
|
||||
if (!hctx->nr_ctx)
|
||||
continue;
|
||||
|
||||
hctx_for_each_ctx(hctx, ctx, j) {
|
||||
kobject_init(&ctx->kobj, &blk_mq_ctx_ktype);
|
||||
ret = kobject_add(&ctx->kobj, &hctx->kobj, "cpu%u", ctx->cpu);
|
||||
if (ret)
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
if (ret) {
|
||||
@ -390,3 +431,26 @@ int blk_mq_register_disk(struct gendisk *disk)
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
void blk_mq_sysfs_unregister(struct request_queue *q)
|
||||
{
|
||||
struct blk_mq_hw_ctx *hctx;
|
||||
int i;
|
||||
|
||||
queue_for_each_hw_ctx(q, hctx, i)
|
||||
blk_mq_unregister_hctx(hctx);
|
||||
}
|
||||
|
||||
int blk_mq_sysfs_register(struct request_queue *q)
|
||||
{
|
||||
struct blk_mq_hw_ctx *hctx;
|
||||
int i, ret = 0;
|
||||
|
||||
queue_for_each_hw_ctx(q, hctx, i) {
|
||||
ret = blk_mq_register_hctx(hctx);
|
||||
if (ret)
|
||||
break;
|
||||
}
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
@ -1924,6 +1924,8 @@ static void blk_mq_queue_reinit(struct request_queue *q)
|
||||
{
|
||||
blk_mq_freeze_queue(q);
|
||||
|
||||
blk_mq_sysfs_unregister(q);
|
||||
|
||||
blk_mq_update_queue_map(q->mq_map, q->nr_hw_queues);
|
||||
|
||||
/*
|
||||
@ -1934,6 +1936,8 @@ static void blk_mq_queue_reinit(struct request_queue *q)
|
||||
|
||||
blk_mq_map_swqueue(q);
|
||||
|
||||
blk_mq_sysfs_register(q);
|
||||
|
||||
blk_mq_unfreeze_queue(q);
|
||||
}
|
||||
|
||||
|
@ -54,6 +54,12 @@ extern unsigned int *blk_mq_make_queue_map(struct blk_mq_tag_set *set);
|
||||
extern int blk_mq_update_queue_map(unsigned int *map, unsigned int nr_queues);
|
||||
extern int blk_mq_hw_queue_to_node(unsigned int *map, unsigned int);
|
||||
|
||||
/*
|
||||
* sysfs helpers
|
||||
*/
|
||||
extern int blk_mq_sysfs_register(struct request_queue *q);
|
||||
extern void blk_mq_sysfs_unregister(struct request_queue *q);
|
||||
|
||||
/*
|
||||
* Basic implementation of sparser bitmap, allowing the user to spread
|
||||
* the bits over more cachelines.
|
||||
|
@ -130,6 +130,7 @@ enum {
|
||||
BLK_MQ_F_SHOULD_SORT = 1 << 1,
|
||||
BLK_MQ_F_TAG_SHARED = 1 << 2,
|
||||
BLK_MQ_F_SG_MERGE = 1 << 3,
|
||||
BLK_MQ_F_SYSFS_UP = 1 << 4,
|
||||
|
||||
BLK_MQ_S_STOPPED = 0,
|
||||
BLK_MQ_S_TAG_ACTIVE = 1,
|
||||
|
Loading…
Reference in New Issue
Block a user