forked from Minki/linux
block, cfq: move icq cache management to block core
Let elevators set ->icq_size and ->icq_align in elevator_type and elv_register() and elv_unregister() respectively create and destroy kmem_cache for icq. * elv_register() now can return failure. All callers updated. * icq caches are automatically named "ELVNAME_io_cq". * cfq_slab_setup/kill() are collapsed into cfq_init/exit(). * While at it, minor indentation change for iosched_cfq.elevator_name for consistency. This will help moving icq management to block core. This doesn't introduce any functional change. Signed-off-by: Tejun Heo <tj@kernel.org> Signed-off-by: Jens Axboe <axboe@kernel.dk>
This commit is contained in:
parent
47fdd4ca96
commit
3d3c2379fe
@ -3914,34 +3914,6 @@ static void *cfq_init_queue(struct request_queue *q)
|
|||||||
return cfqd;
|
return cfqd;
|
||||||
}
|
}
|
||||||
|
|
||||||
static void cfq_slab_kill(void)
|
|
||||||
{
|
|
||||||
/*
|
|
||||||
* Caller already ensured that pending RCU callbacks are completed,
|
|
||||||
* so we should have no busy allocations at this point.
|
|
||||||
*/
|
|
||||||
if (cfq_pool)
|
|
||||||
kmem_cache_destroy(cfq_pool);
|
|
||||||
if (cfq_icq_pool)
|
|
||||||
kmem_cache_destroy(cfq_icq_pool);
|
|
||||||
}
|
|
||||||
|
|
||||||
static int __init cfq_slab_setup(void)
|
|
||||||
{
|
|
||||||
cfq_pool = KMEM_CACHE(cfq_queue, 0);
|
|
||||||
if (!cfq_pool)
|
|
||||||
goto fail;
|
|
||||||
|
|
||||||
cfq_icq_pool = KMEM_CACHE(cfq_io_cq, 0);
|
|
||||||
if (!cfq_icq_pool)
|
|
||||||
goto fail;
|
|
||||||
|
|
||||||
return 0;
|
|
||||||
fail:
|
|
||||||
cfq_slab_kill();
|
|
||||||
return -ENOMEM;
|
|
||||||
}
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* sysfs parts below -->
|
* sysfs parts below -->
|
||||||
*/
|
*/
|
||||||
@ -4053,8 +4025,10 @@ static struct elevator_type iosched_cfq = {
|
|||||||
.elevator_init_fn = cfq_init_queue,
|
.elevator_init_fn = cfq_init_queue,
|
||||||
.elevator_exit_fn = cfq_exit_queue,
|
.elevator_exit_fn = cfq_exit_queue,
|
||||||
},
|
},
|
||||||
|
.icq_size = sizeof(struct cfq_io_cq),
|
||||||
|
.icq_align = __alignof__(struct cfq_io_cq),
|
||||||
.elevator_attrs = cfq_attrs,
|
.elevator_attrs = cfq_attrs,
|
||||||
.elevator_name = "cfq",
|
.elevator_name = "cfq",
|
||||||
.elevator_owner = THIS_MODULE,
|
.elevator_owner = THIS_MODULE,
|
||||||
};
|
};
|
||||||
|
|
||||||
@ -4072,6 +4046,8 @@ static struct blkio_policy_type blkio_policy_cfq;
|
|||||||
|
|
||||||
static int __init cfq_init(void)
|
static int __init cfq_init(void)
|
||||||
{
|
{
|
||||||
|
int ret;
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* could be 0 on HZ < 1000 setups
|
* could be 0 on HZ < 1000 setups
|
||||||
*/
|
*/
|
||||||
@ -4086,10 +4062,17 @@ static int __init cfq_init(void)
|
|||||||
#else
|
#else
|
||||||
cfq_group_idle = 0;
|
cfq_group_idle = 0;
|
||||||
#endif
|
#endif
|
||||||
if (cfq_slab_setup())
|
cfq_pool = KMEM_CACHE(cfq_queue, 0);
|
||||||
|
if (!cfq_pool)
|
||||||
return -ENOMEM;
|
return -ENOMEM;
|
||||||
|
|
||||||
elv_register(&iosched_cfq);
|
ret = elv_register(&iosched_cfq);
|
||||||
|
if (ret) {
|
||||||
|
kmem_cache_destroy(cfq_pool);
|
||||||
|
return ret;
|
||||||
|
}
|
||||||
|
cfq_icq_pool = iosched_cfq.icq_cache;
|
||||||
|
|
||||||
blkio_policy_register(&blkio_policy_cfq);
|
blkio_policy_register(&blkio_policy_cfq);
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
@ -4099,8 +4082,7 @@ static void __exit cfq_exit(void)
|
|||||||
{
|
{
|
||||||
blkio_policy_unregister(&blkio_policy_cfq);
|
blkio_policy_unregister(&blkio_policy_cfq);
|
||||||
elv_unregister(&iosched_cfq);
|
elv_unregister(&iosched_cfq);
|
||||||
rcu_barrier(); /* make sure all cic RCU frees are complete */
|
kmem_cache_destroy(cfq_pool);
|
||||||
cfq_slab_kill();
|
|
||||||
}
|
}
|
||||||
|
|
||||||
module_init(cfq_init);
|
module_init(cfq_init);
|
||||||
|
@ -448,9 +448,7 @@ static struct elevator_type iosched_deadline = {
|
|||||||
|
|
||||||
static int __init deadline_init(void)
|
static int __init deadline_init(void)
|
||||||
{
|
{
|
||||||
elv_register(&iosched_deadline);
|
return elv_register(&iosched_deadline);
|
||||||
|
|
||||||
return 0;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
static void __exit deadline_exit(void)
|
static void __exit deadline_exit(void)
|
||||||
|
@ -886,15 +886,36 @@ void elv_unregister_queue(struct request_queue *q)
|
|||||||
}
|
}
|
||||||
EXPORT_SYMBOL(elv_unregister_queue);
|
EXPORT_SYMBOL(elv_unregister_queue);
|
||||||
|
|
||||||
void elv_register(struct elevator_type *e)
|
int elv_register(struct elevator_type *e)
|
||||||
{
|
{
|
||||||
char *def = "";
|
char *def = "";
|
||||||
|
|
||||||
|
/* create icq_cache if requested */
|
||||||
|
if (e->icq_size) {
|
||||||
|
if (WARN_ON(e->icq_size < sizeof(struct io_cq)) ||
|
||||||
|
WARN_ON(e->icq_align < __alignof__(struct io_cq)))
|
||||||
|
return -EINVAL;
|
||||||
|
|
||||||
|
snprintf(e->icq_cache_name, sizeof(e->icq_cache_name),
|
||||||
|
"%s_io_cq", e->elevator_name);
|
||||||
|
e->icq_cache = kmem_cache_create(e->icq_cache_name, e->icq_size,
|
||||||
|
e->icq_align, 0, NULL);
|
||||||
|
if (!e->icq_cache)
|
||||||
|
return -ENOMEM;
|
||||||
|
}
|
||||||
|
|
||||||
|
/* register, don't allow duplicate names */
|
||||||
spin_lock(&elv_list_lock);
|
spin_lock(&elv_list_lock);
|
||||||
BUG_ON(elevator_find(e->elevator_name));
|
if (elevator_find(e->elevator_name)) {
|
||||||
|
spin_unlock(&elv_list_lock);
|
||||||
|
if (e->icq_cache)
|
||||||
|
kmem_cache_destroy(e->icq_cache);
|
||||||
|
return -EBUSY;
|
||||||
|
}
|
||||||
list_add_tail(&e->list, &elv_list);
|
list_add_tail(&e->list, &elv_list);
|
||||||
spin_unlock(&elv_list_lock);
|
spin_unlock(&elv_list_lock);
|
||||||
|
|
||||||
|
/* print pretty message */
|
||||||
if (!strcmp(e->elevator_name, chosen_elevator) ||
|
if (!strcmp(e->elevator_name, chosen_elevator) ||
|
||||||
(!*chosen_elevator &&
|
(!*chosen_elevator &&
|
||||||
!strcmp(e->elevator_name, CONFIG_DEFAULT_IOSCHED)))
|
!strcmp(e->elevator_name, CONFIG_DEFAULT_IOSCHED)))
|
||||||
@ -902,14 +923,26 @@ void elv_register(struct elevator_type *e)
|
|||||||
|
|
||||||
printk(KERN_INFO "io scheduler %s registered%s\n", e->elevator_name,
|
printk(KERN_INFO "io scheduler %s registered%s\n", e->elevator_name,
|
||||||
def);
|
def);
|
||||||
|
return 0;
|
||||||
}
|
}
|
||||||
EXPORT_SYMBOL_GPL(elv_register);
|
EXPORT_SYMBOL_GPL(elv_register);
|
||||||
|
|
||||||
void elv_unregister(struct elevator_type *e)
|
void elv_unregister(struct elevator_type *e)
|
||||||
{
|
{
|
||||||
|
/* unregister */
|
||||||
spin_lock(&elv_list_lock);
|
spin_lock(&elv_list_lock);
|
||||||
list_del_init(&e->list);
|
list_del_init(&e->list);
|
||||||
spin_unlock(&elv_list_lock);
|
spin_unlock(&elv_list_lock);
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Destroy icq_cache if it exists. icq's are RCU managed. Make
|
||||||
|
* sure all RCU operations are complete before proceeding.
|
||||||
|
*/
|
||||||
|
if (e->icq_cache) {
|
||||||
|
rcu_barrier();
|
||||||
|
kmem_cache_destroy(e->icq_cache);
|
||||||
|
e->icq_cache = NULL;
|
||||||
|
}
|
||||||
}
|
}
|
||||||
EXPORT_SYMBOL_GPL(elv_unregister);
|
EXPORT_SYMBOL_GPL(elv_unregister);
|
||||||
|
|
||||||
|
@ -94,9 +94,7 @@ static struct elevator_type elevator_noop = {
|
|||||||
|
|
||||||
static int __init noop_init(void)
|
static int __init noop_init(void)
|
||||||
{
|
{
|
||||||
elv_register(&elevator_noop);
|
return elv_register(&elevator_noop);
|
||||||
|
|
||||||
return 0;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
static void __exit noop_exit(void)
|
static void __exit noop_exit(void)
|
||||||
|
@ -78,10 +78,19 @@ struct elv_fs_entry {
|
|||||||
*/
|
*/
|
||||||
struct elevator_type
|
struct elevator_type
|
||||||
{
|
{
|
||||||
|
/* managed by elevator core */
|
||||||
|
struct kmem_cache *icq_cache;
|
||||||
|
|
||||||
|
/* fields provided by elevator implementation */
|
||||||
struct elevator_ops ops;
|
struct elevator_ops ops;
|
||||||
|
size_t icq_size;
|
||||||
|
size_t icq_align;
|
||||||
struct elv_fs_entry *elevator_attrs;
|
struct elv_fs_entry *elevator_attrs;
|
||||||
char elevator_name[ELV_NAME_MAX];
|
char elevator_name[ELV_NAME_MAX];
|
||||||
struct module *elevator_owner;
|
struct module *elevator_owner;
|
||||||
|
|
||||||
|
/* managed by elevator core */
|
||||||
|
char icq_cache_name[ELV_NAME_MAX + 5]; /* elvname + "_io_cq" */
|
||||||
struct list_head list;
|
struct list_head list;
|
||||||
};
|
};
|
||||||
|
|
||||||
@ -127,7 +136,7 @@ extern void elv_drain_elevator(struct request_queue *);
|
|||||||
/*
|
/*
|
||||||
* io scheduler registration
|
* io scheduler registration
|
||||||
*/
|
*/
|
||||||
extern void elv_register(struct elevator_type *);
|
extern int elv_register(struct elevator_type *);
|
||||||
extern void elv_unregister(struct elevator_type *);
|
extern void elv_unregister(struct elevator_type *);
|
||||||
|
|
||||||
/*
|
/*
|
||||||
|
Loading…
Reference in New Issue
Block a user