forked from Minki/linux
dm: add 'use_blk_mq' module param and expose in per-device ro sysfs attr
Request-based DM's blk-mq support defaults to off; but a user can easily change the default using the dm_mod.use_blk_mq module/boot option. Also, you can check what mode a given request-based DM device is using with: cat /sys/block/dm-X/dm/use_blk_mq This change enabled further cleanup and reduced work (e.g. the md->io_pool and md->rq_pool isn't created if using blk-mq). Signed-off-by: Mike Snitzer <snitzer@redhat.com>
This commit is contained in:
parent
022333427a
commit
17e149b8f7
@ -37,3 +37,11 @@ Description: Allow control over how long a request that is a
|
|||||||
accounting. This attribute is not applicable to
|
accounting. This attribute is not applicable to
|
||||||
bio-based DM devices so it will only ever report 0 for
|
bio-based DM devices so it will only ever report 0 for
|
||||||
them.
|
them.
|
||||||
|
|
||||||
|
What: /sys/block/dm-<num>/dm/use_blk_mq
|
||||||
|
Date: March 2015
|
||||||
|
KernelVersion: 4.1
|
||||||
|
Contact: dm-devel@redhat.com
|
||||||
|
Description: Request-based Device-mapper blk-mq I/O path mode.
|
||||||
|
Contains the value 1 if the device is using blk-mq.
|
||||||
|
Otherwise it contains 0. Read-only attribute.
|
||||||
|
@ -196,6 +196,17 @@ config BLK_DEV_DM
|
|||||||
|
|
||||||
If unsure, say N.
|
If unsure, say N.
|
||||||
|
|
||||||
|
config DM_MQ_DEFAULT
|
||||||
|
bool "request-based DM: use blk-mq I/O path by default"
|
||||||
|
depends on BLK_DEV_DM
|
||||||
|
---help---
|
||||||
|
This option enables the blk-mq based I/O path for request-based
|
||||||
|
DM devices by default. With the option the dm_mod.use_blk_mq
|
||||||
|
module/boot option defaults to Y, without it to N, but it can
|
||||||
|
still be overriden either way.
|
||||||
|
|
||||||
|
If unsure say N.
|
||||||
|
|
||||||
config DM_DEBUG
|
config DM_DEBUG
|
||||||
bool "Device mapper debugging support"
|
bool "Device mapper debugging support"
|
||||||
depends on BLK_DEV_DM
|
depends on BLK_DEV_DM
|
||||||
|
@ -89,15 +89,24 @@ static ssize_t dm_attr_suspended_show(struct mapped_device *md, char *buf)
|
|||||||
return strlen(buf);
|
return strlen(buf);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static ssize_t dm_attr_use_blk_mq_show(struct mapped_device *md, char *buf)
|
||||||
|
{
|
||||||
|
sprintf(buf, "%d\n", dm_use_blk_mq(md));
|
||||||
|
|
||||||
|
return strlen(buf);
|
||||||
|
}
|
||||||
|
|
||||||
static DM_ATTR_RO(name);
|
static DM_ATTR_RO(name);
|
||||||
static DM_ATTR_RO(uuid);
|
static DM_ATTR_RO(uuid);
|
||||||
static DM_ATTR_RO(suspended);
|
static DM_ATTR_RO(suspended);
|
||||||
|
static DM_ATTR_RO(use_blk_mq);
|
||||||
static DM_ATTR_RW(rq_based_seq_io_merge_deadline);
|
static DM_ATTR_RW(rq_based_seq_io_merge_deadline);
|
||||||
|
|
||||||
static struct attribute *dm_attrs[] = {
|
static struct attribute *dm_attrs[] = {
|
||||||
&dm_attr_name.attr,
|
&dm_attr_name.attr,
|
||||||
&dm_attr_uuid.attr,
|
&dm_attr_uuid.attr,
|
||||||
&dm_attr_suspended.attr,
|
&dm_attr_suspended.attr,
|
||||||
|
&dm_attr_use_blk_mq.attr,
|
||||||
&dm_attr_rq_based_seq_io_merge_deadline.attr,
|
&dm_attr_rq_based_seq_io_merge_deadline.attr,
|
||||||
NULL,
|
NULL,
|
||||||
};
|
};
|
||||||
|
@ -940,7 +940,7 @@ bool dm_table_mq_request_based(struct dm_table *t)
|
|||||||
return dm_table_get_type(t) == DM_TYPE_MQ_REQUEST_BASED;
|
return dm_table_get_type(t) == DM_TYPE_MQ_REQUEST_BASED;
|
||||||
}
|
}
|
||||||
|
|
||||||
static int dm_table_alloc_md_mempools(struct dm_table *t)
|
static int dm_table_alloc_md_mempools(struct dm_table *t, struct mapped_device *md)
|
||||||
{
|
{
|
||||||
unsigned type = dm_table_get_type(t);
|
unsigned type = dm_table_get_type(t);
|
||||||
unsigned per_bio_data_size = 0;
|
unsigned per_bio_data_size = 0;
|
||||||
@ -958,7 +958,7 @@ static int dm_table_alloc_md_mempools(struct dm_table *t)
|
|||||||
per_bio_data_size = max(per_bio_data_size, tgt->per_bio_data_size);
|
per_bio_data_size = max(per_bio_data_size, tgt->per_bio_data_size);
|
||||||
}
|
}
|
||||||
|
|
||||||
t->mempools = dm_alloc_md_mempools(type, t->integrity_supported, per_bio_data_size);
|
t->mempools = dm_alloc_md_mempools(md, type, t->integrity_supported, per_bio_data_size);
|
||||||
if (!t->mempools)
|
if (!t->mempools)
|
||||||
return -ENOMEM;
|
return -ENOMEM;
|
||||||
|
|
||||||
@ -1128,7 +1128,7 @@ int dm_table_complete(struct dm_table *t)
|
|||||||
return r;
|
return r;
|
||||||
}
|
}
|
||||||
|
|
||||||
r = dm_table_alloc_md_mempools(t);
|
r = dm_table_alloc_md_mempools(t, t->md);
|
||||||
if (r)
|
if (r)
|
||||||
DMERR("unable to allocate mempools");
|
DMERR("unable to allocate mempools");
|
||||||
|
|
||||||
|
@ -228,8 +228,20 @@ struct mapped_device {
|
|||||||
|
|
||||||
/* for blk-mq request-based DM support */
|
/* for blk-mq request-based DM support */
|
||||||
struct blk_mq_tag_set tag_set;
|
struct blk_mq_tag_set tag_set;
|
||||||
|
bool use_blk_mq;
|
||||||
};
|
};
|
||||||
|
|
||||||
|
#ifdef CONFIG_DM_MQ_DEFAULT
|
||||||
|
static bool use_blk_mq = true;
|
||||||
|
#else
|
||||||
|
static bool use_blk_mq = false;
|
||||||
|
#endif
|
||||||
|
|
||||||
|
bool dm_use_blk_mq(struct mapped_device *md)
|
||||||
|
{
|
||||||
|
return md->use_blk_mq;
|
||||||
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* For mempools pre-allocation at the table loading time.
|
* For mempools pre-allocation at the table loading time.
|
||||||
*/
|
*/
|
||||||
@ -2034,7 +2046,7 @@ ssize_t dm_attr_rq_based_seq_io_merge_deadline_store(struct mapped_device *md,
|
|||||||
{
|
{
|
||||||
unsigned deadline;
|
unsigned deadline;
|
||||||
|
|
||||||
if (!dm_request_based(md))
|
if (!dm_request_based(md) || md->use_blk_mq)
|
||||||
return count;
|
return count;
|
||||||
|
|
||||||
if (kstrtouint(buf, 10, &deadline))
|
if (kstrtouint(buf, 10, &deadline))
|
||||||
@ -2222,6 +2234,7 @@ static void dm_init_md_queue(struct mapped_device *md)
|
|||||||
|
|
||||||
static void dm_init_old_md_queue(struct mapped_device *md)
|
static void dm_init_old_md_queue(struct mapped_device *md)
|
||||||
{
|
{
|
||||||
|
md->use_blk_mq = false;
|
||||||
dm_init_md_queue(md);
|
dm_init_md_queue(md);
|
||||||
|
|
||||||
/*
|
/*
|
||||||
@ -2263,6 +2276,7 @@ static struct mapped_device *alloc_dev(int minor)
|
|||||||
if (r < 0)
|
if (r < 0)
|
||||||
goto bad_io_barrier;
|
goto bad_io_barrier;
|
||||||
|
|
||||||
|
md->use_blk_mq = use_blk_mq;
|
||||||
md->type = DM_TYPE_NONE;
|
md->type = DM_TYPE_NONE;
|
||||||
mutex_init(&md->suspend_lock);
|
mutex_init(&md->suspend_lock);
|
||||||
mutex_init(&md->type_lock);
|
mutex_init(&md->type_lock);
|
||||||
@ -2349,7 +2363,6 @@ static void unlock_fs(struct mapped_device *md);
|
|||||||
static void free_dev(struct mapped_device *md)
|
static void free_dev(struct mapped_device *md)
|
||||||
{
|
{
|
||||||
int minor = MINOR(disk_devt(md->disk));
|
int minor = MINOR(disk_devt(md->disk));
|
||||||
bool using_blk_mq = !!md->queue->mq_ops;
|
|
||||||
|
|
||||||
unlock_fs(md);
|
unlock_fs(md);
|
||||||
destroy_workqueue(md->wq);
|
destroy_workqueue(md->wq);
|
||||||
@ -2375,7 +2388,7 @@ static void free_dev(struct mapped_device *md)
|
|||||||
del_gendisk(md->disk);
|
del_gendisk(md->disk);
|
||||||
put_disk(md->disk);
|
put_disk(md->disk);
|
||||||
blk_cleanup_queue(md->queue);
|
blk_cleanup_queue(md->queue);
|
||||||
if (using_blk_mq)
|
if (md->use_blk_mq)
|
||||||
blk_mq_free_tag_set(&md->tag_set);
|
blk_mq_free_tag_set(&md->tag_set);
|
||||||
bdput(md->bdev);
|
bdput(md->bdev);
|
||||||
free_minor(minor);
|
free_minor(minor);
|
||||||
@ -2388,7 +2401,7 @@ static void __bind_mempools(struct mapped_device *md, struct dm_table *t)
|
|||||||
{
|
{
|
||||||
struct dm_md_mempools *p = dm_table_get_md_mempools(t);
|
struct dm_md_mempools *p = dm_table_get_md_mempools(t);
|
||||||
|
|
||||||
if (md->io_pool && md->bs) {
|
if (md->bs) {
|
||||||
/* The md already has necessary mempools. */
|
/* The md already has necessary mempools. */
|
||||||
if (dm_table_get_type(t) == DM_TYPE_BIO_BASED) {
|
if (dm_table_get_type(t) == DM_TYPE_BIO_BASED) {
|
||||||
/*
|
/*
|
||||||
@ -2798,13 +2811,21 @@ out_tag_set:
|
|||||||
return err;
|
return err;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static unsigned filter_md_type(unsigned type, struct mapped_device *md)
|
||||||
|
{
|
||||||
|
if (type == DM_TYPE_BIO_BASED)
|
||||||
|
return type;
|
||||||
|
|
||||||
|
return !md->use_blk_mq ? DM_TYPE_REQUEST_BASED : DM_TYPE_MQ_REQUEST_BASED;
|
||||||
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Setup the DM device's queue based on md's type
|
* Setup the DM device's queue based on md's type
|
||||||
*/
|
*/
|
||||||
int dm_setup_md_queue(struct mapped_device *md)
|
int dm_setup_md_queue(struct mapped_device *md)
|
||||||
{
|
{
|
||||||
int r;
|
int r;
|
||||||
unsigned md_type = dm_get_md_type(md);
|
unsigned md_type = filter_md_type(dm_get_md_type(md), md);
|
||||||
|
|
||||||
switch (md_type) {
|
switch (md_type) {
|
||||||
case DM_TYPE_REQUEST_BASED:
|
case DM_TYPE_REQUEST_BASED:
|
||||||
@ -3509,16 +3530,19 @@ int dm_noflush_suspending(struct dm_target *ti)
|
|||||||
}
|
}
|
||||||
EXPORT_SYMBOL_GPL(dm_noflush_suspending);
|
EXPORT_SYMBOL_GPL(dm_noflush_suspending);
|
||||||
|
|
||||||
struct dm_md_mempools *dm_alloc_md_mempools(unsigned type, unsigned integrity, unsigned per_bio_data_size)
|
struct dm_md_mempools *dm_alloc_md_mempools(struct mapped_device *md, unsigned type,
|
||||||
|
unsigned integrity, unsigned per_bio_data_size)
|
||||||
{
|
{
|
||||||
struct dm_md_mempools *pools = kzalloc(sizeof(*pools), GFP_KERNEL);
|
struct dm_md_mempools *pools = kzalloc(sizeof(*pools), GFP_KERNEL);
|
||||||
struct kmem_cache *cachep;
|
struct kmem_cache *cachep = NULL;
|
||||||
unsigned int pool_size = 0;
|
unsigned int pool_size = 0;
|
||||||
unsigned int front_pad;
|
unsigned int front_pad;
|
||||||
|
|
||||||
if (!pools)
|
if (!pools)
|
||||||
return NULL;
|
return NULL;
|
||||||
|
|
||||||
|
type = filter_md_type(type, md);
|
||||||
|
|
||||||
switch (type) {
|
switch (type) {
|
||||||
case DM_TYPE_BIO_BASED:
|
case DM_TYPE_BIO_BASED:
|
||||||
cachep = _io_cache;
|
cachep = _io_cache;
|
||||||
@ -3526,13 +3550,13 @@ struct dm_md_mempools *dm_alloc_md_mempools(unsigned type, unsigned integrity, u
|
|||||||
front_pad = roundup(per_bio_data_size, __alignof__(struct dm_target_io)) + offsetof(struct dm_target_io, clone);
|
front_pad = roundup(per_bio_data_size, __alignof__(struct dm_target_io)) + offsetof(struct dm_target_io, clone);
|
||||||
break;
|
break;
|
||||||
case DM_TYPE_REQUEST_BASED:
|
case DM_TYPE_REQUEST_BASED:
|
||||||
|
cachep = _rq_tio_cache;
|
||||||
pool_size = dm_get_reserved_rq_based_ios();
|
pool_size = dm_get_reserved_rq_based_ios();
|
||||||
pools->rq_pool = mempool_create_slab_pool(pool_size, _rq_cache);
|
pools->rq_pool = mempool_create_slab_pool(pool_size, _rq_cache);
|
||||||
if (!pools->rq_pool)
|
if (!pools->rq_pool)
|
||||||
goto out;
|
goto out;
|
||||||
/* fall through to setup remaining rq-based pools */
|
/* fall through to setup remaining rq-based pools */
|
||||||
case DM_TYPE_MQ_REQUEST_BASED:
|
case DM_TYPE_MQ_REQUEST_BASED:
|
||||||
cachep = _rq_tio_cache;
|
|
||||||
if (!pool_size)
|
if (!pool_size)
|
||||||
pool_size = dm_get_reserved_rq_based_ios();
|
pool_size = dm_get_reserved_rq_based_ios();
|
||||||
front_pad = offsetof(struct dm_rq_clone_bio_info, clone);
|
front_pad = offsetof(struct dm_rq_clone_bio_info, clone);
|
||||||
@ -3540,12 +3564,14 @@ struct dm_md_mempools *dm_alloc_md_mempools(unsigned type, unsigned integrity, u
|
|||||||
WARN_ON(per_bio_data_size != 0);
|
WARN_ON(per_bio_data_size != 0);
|
||||||
break;
|
break;
|
||||||
default:
|
default:
|
||||||
goto out;
|
BUG();
|
||||||
}
|
}
|
||||||
|
|
||||||
pools->io_pool = mempool_create_slab_pool(pool_size, cachep);
|
if (cachep) {
|
||||||
if (!pools->io_pool)
|
pools->io_pool = mempool_create_slab_pool(pool_size, cachep);
|
||||||
goto out;
|
if (!pools->io_pool)
|
||||||
|
goto out;
|
||||||
|
}
|
||||||
|
|
||||||
pools->bs = bioset_create_nobvec(pool_size, front_pad);
|
pools->bs = bioset_create_nobvec(pool_size, front_pad);
|
||||||
if (!pools->bs)
|
if (!pools->bs)
|
||||||
@ -3602,6 +3628,9 @@ MODULE_PARM_DESC(reserved_bio_based_ios, "Reserved IOs in bio-based mempools");
|
|||||||
module_param(reserved_rq_based_ios, uint, S_IRUGO | S_IWUSR);
|
module_param(reserved_rq_based_ios, uint, S_IRUGO | S_IWUSR);
|
||||||
MODULE_PARM_DESC(reserved_rq_based_ios, "Reserved IOs in request-based mempools");
|
MODULE_PARM_DESC(reserved_rq_based_ios, "Reserved IOs in request-based mempools");
|
||||||
|
|
||||||
|
module_param(use_blk_mq, bool, S_IRUGO | S_IWUSR);
|
||||||
|
MODULE_PARM_DESC(use_blk_mq, "Use block multiqueue for request-based DM devices");
|
||||||
|
|
||||||
MODULE_DESCRIPTION(DM_NAME " driver");
|
MODULE_DESCRIPTION(DM_NAME " driver");
|
||||||
MODULE_AUTHOR("Joe Thornber <dm-devel@redhat.com>");
|
MODULE_AUTHOR("Joe Thornber <dm-devel@redhat.com>");
|
||||||
MODULE_LICENSE("GPL");
|
MODULE_LICENSE("GPL");
|
||||||
|
@ -211,6 +211,8 @@ int dm_kobject_uevent(struct mapped_device *md, enum kobject_action action,
|
|||||||
void dm_internal_suspend(struct mapped_device *md);
|
void dm_internal_suspend(struct mapped_device *md);
|
||||||
void dm_internal_resume(struct mapped_device *md);
|
void dm_internal_resume(struct mapped_device *md);
|
||||||
|
|
||||||
|
bool dm_use_blk_mq(struct mapped_device *md);
|
||||||
|
|
||||||
int dm_io_init(void);
|
int dm_io_init(void);
|
||||||
void dm_io_exit(void);
|
void dm_io_exit(void);
|
||||||
|
|
||||||
@ -220,7 +222,8 @@ void dm_kcopyd_exit(void);
|
|||||||
/*
|
/*
|
||||||
* Mempool operations
|
* Mempool operations
|
||||||
*/
|
*/
|
||||||
struct dm_md_mempools *dm_alloc_md_mempools(unsigned type, unsigned integrity, unsigned per_bio_data_size);
|
struct dm_md_mempools *dm_alloc_md_mempools(struct mapped_device *md, unsigned type,
|
||||||
|
unsigned integrity, unsigned per_bio_data_size);
|
||||||
void dm_free_md_mempools(struct dm_md_mempools *pools);
|
void dm_free_md_mempools(struct dm_md_mempools *pools);
|
||||||
|
|
||||||
/*
|
/*
|
||||||
|
Loading…
Reference in New Issue
Block a user