forked from Minki/linux
block: make ioc get/put interface more conventional and fix race on alloction
Ignoring copy_io() during fork, io_context can be allocated from two places - current_io_context() and set_task_ioprio(). The former is always called from local task while the latter can be called from different task. The synchornization between them are peculiar and dubious. * current_io_context() doesn't grab task_lock() and assumes that if it saw %NULL ->io_context, it would stay that way until allocation and assignment is complete. It has smp_wmb() between alloc/init and assignment. * set_task_ioprio() grabs task_lock() for assignment and does smp_read_barrier_depends() between "ioc = task->io_context" and "if (ioc)". Unfortunately, this doesn't achieve anything - the latter is not a dependent load of the former. ie, if ioc itself were being dereferenced "ioc->xxx", it would mean something (not sure what tho) but as the code currently stands, the dependent read barrier is noop. As only one of the the two test-assignment sequences is task_lock() protected, the task_lock() can't do much about race between the two. Nothing prevents current_io_context() and set_task_ioprio() allocating its own ioc for the same task and overwriting the other's. Also, set_task_ioprio() can race with exiting task and create a new ioc after exit_io_context() is finished. ioc get/put doesn't have any reason to be complex. The only hot path is accessing the existing ioc of %current, which is simple to achieve given that ->io_context is never destroyed as long as the task is alive. All other paths can happily go through task_lock() like all other task sub structures without impacting anything. This patch updates ioc get/put so that it becomes more conventional. * alloc_io_context() is replaced with get_task_io_context(). This is the only interface which can acquire access to ioc of another task. On return, the caller has an explicit reference to the object which should be put using put_io_context() afterwards. * The functionality of current_io_context() remains the same but when creating a new ioc, it shares the code path with get_task_io_context() and always goes through task_lock(). * get_io_context() now means incrementing ref on an ioc which the caller already has access to (be that an explicit refcnt or implicit %current one). * PF_EXITING inhibits creation of new io_context and once exit_io_context() is finished, it's guaranteed that both ioc acquisition functions return %NULL. * All users are updated. Most are trivial but smp_read_barrier_depends() removal from cfq_get_io_context() needs a bit of explanation. I suppose the original intention was to ensure ioc->ioprio is visible when set_task_ioprio() allocates new io_context and installs it; however, this wouldn't have worked because set_task_ioprio() doesn't have wmb between init and install. There are other problems with this which will be fixed in another patch. * While at it, use NUMA_NO_NODE instead of -1 for wildcard node specification. -v2: Vivek spotted contamination from debug patch. Removed. Signed-off-by: Tejun Heo <tj@kernel.org> Cc: Vivek Goyal <vgoyal@redhat.com> Signed-off-by: Jens Axboe <axboe@kernel.dk>
This commit is contained in:
parent
42ec57a8f6
commit
6e736be7f2
@ -1645,11 +1645,12 @@ static void blkiocg_attach_task(struct cgroup *cgrp, struct task_struct *tsk)
|
|||||||
{
|
{
|
||||||
struct io_context *ioc;
|
struct io_context *ioc;
|
||||||
|
|
||||||
task_lock(tsk);
|
/* we don't lose anything even if ioc allocation fails */
|
||||||
ioc = tsk->io_context;
|
ioc = get_task_io_context(tsk, GFP_ATOMIC, NUMA_NO_NODE);
|
||||||
if (ioc)
|
if (ioc) {
|
||||||
ioc->cgroup_changed = 1;
|
ioc->cgroup_changed = 1;
|
||||||
task_unlock(tsk);
|
put_io_context(ioc);
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
void blkio_policy_register(struct blkio_policy_type *blkiop)
|
void blkio_policy_register(struct blkio_policy_type *blkiop)
|
||||||
|
@ -16,6 +16,19 @@
|
|||||||
*/
|
*/
|
||||||
static struct kmem_cache *iocontext_cachep;
|
static struct kmem_cache *iocontext_cachep;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* get_io_context - increment reference count to io_context
|
||||||
|
* @ioc: io_context to get
|
||||||
|
*
|
||||||
|
* Increment reference count to @ioc.
|
||||||
|
*/
|
||||||
|
void get_io_context(struct io_context *ioc)
|
||||||
|
{
|
||||||
|
BUG_ON(atomic_long_read(&ioc->refcount) <= 0);
|
||||||
|
atomic_long_inc(&ioc->refcount);
|
||||||
|
}
|
||||||
|
EXPORT_SYMBOL(get_io_context);
|
||||||
|
|
||||||
static void cfq_dtor(struct io_context *ioc)
|
static void cfq_dtor(struct io_context *ioc)
|
||||||
{
|
{
|
||||||
if (!hlist_empty(&ioc->cic_list)) {
|
if (!hlist_empty(&ioc->cic_list)) {
|
||||||
@ -71,6 +84,9 @@ void exit_io_context(struct task_struct *task)
|
|||||||
{
|
{
|
||||||
struct io_context *ioc;
|
struct io_context *ioc;
|
||||||
|
|
||||||
|
/* PF_EXITING prevents new io_context from being attached to @task */
|
||||||
|
WARN_ON_ONCE(!(current->flags & PF_EXITING));
|
||||||
|
|
||||||
task_lock(task);
|
task_lock(task);
|
||||||
ioc = task->io_context;
|
ioc = task->io_context;
|
||||||
task->io_context = NULL;
|
task->io_context = NULL;
|
||||||
@ -82,7 +98,9 @@ void exit_io_context(struct task_struct *task)
|
|||||||
put_io_context(ioc);
|
put_io_context(ioc);
|
||||||
}
|
}
|
||||||
|
|
||||||
struct io_context *alloc_io_context(gfp_t gfp_flags, int node)
|
static struct io_context *create_task_io_context(struct task_struct *task,
|
||||||
|
gfp_t gfp_flags, int node,
|
||||||
|
bool take_ref)
|
||||||
{
|
{
|
||||||
struct io_context *ioc;
|
struct io_context *ioc;
|
||||||
|
|
||||||
@ -98,6 +116,20 @@ struct io_context *alloc_io_context(gfp_t gfp_flags, int node)
|
|||||||
INIT_RADIX_TREE(&ioc->radix_root, GFP_ATOMIC | __GFP_HIGH);
|
INIT_RADIX_TREE(&ioc->radix_root, GFP_ATOMIC | __GFP_HIGH);
|
||||||
INIT_HLIST_HEAD(&ioc->cic_list);
|
INIT_HLIST_HEAD(&ioc->cic_list);
|
||||||
|
|
||||||
|
/* try to install, somebody might already have beaten us to it */
|
||||||
|
task_lock(task);
|
||||||
|
|
||||||
|
if (!task->io_context && !(task->flags & PF_EXITING)) {
|
||||||
|
task->io_context = ioc;
|
||||||
|
} else {
|
||||||
|
kmem_cache_free(iocontext_cachep, ioc);
|
||||||
|
ioc = task->io_context;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (ioc && take_ref)
|
||||||
|
get_io_context(ioc);
|
||||||
|
|
||||||
|
task_unlock(task);
|
||||||
return ioc;
|
return ioc;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -114,46 +146,47 @@ struct io_context *alloc_io_context(gfp_t gfp_flags, int node)
|
|||||||
*/
|
*/
|
||||||
struct io_context *current_io_context(gfp_t gfp_flags, int node)
|
struct io_context *current_io_context(gfp_t gfp_flags, int node)
|
||||||
{
|
{
|
||||||
struct task_struct *tsk = current;
|
might_sleep_if(gfp_flags & __GFP_WAIT);
|
||||||
struct io_context *ret;
|
|
||||||
|
|
||||||
ret = tsk->io_context;
|
if (current->io_context)
|
||||||
if (likely(ret))
|
return current->io_context;
|
||||||
return ret;
|
|
||||||
|
|
||||||
ret = alloc_io_context(gfp_flags, node);
|
return create_task_io_context(current, gfp_flags, node, false);
|
||||||
if (ret) {
|
|
||||||
/* make sure set_task_ioprio() sees the settings above */
|
|
||||||
smp_wmb();
|
|
||||||
tsk->io_context = ret;
|
|
||||||
}
|
|
||||||
|
|
||||||
return ret;
|
|
||||||
}
|
}
|
||||||
|
EXPORT_SYMBOL(current_io_context);
|
||||||
|
|
||||||
/*
|
/**
|
||||||
* If the current task has no IO context then create one and initialise it.
|
* get_task_io_context - get io_context of a task
|
||||||
* If it does have a context, take a ref on it.
|
* @task: task of interest
|
||||||
|
* @gfp_flags: allocation flags, used if allocation is necessary
|
||||||
|
* @node: allocation node, used if allocation is necessary
|
||||||
*
|
*
|
||||||
* This is always called in the context of the task which submitted the I/O.
|
* Return io_context of @task. If it doesn't exist, it is created with
|
||||||
|
* @gfp_flags and @node. The returned io_context has its reference count
|
||||||
|
* incremented.
|
||||||
|
*
|
||||||
|
* This function always goes through task_lock() and it's better to use
|
||||||
|
* current_io_context() + get_io_context() for %current.
|
||||||
*/
|
*/
|
||||||
struct io_context *get_io_context(gfp_t gfp_flags, int node)
|
struct io_context *get_task_io_context(struct task_struct *task,
|
||||||
|
gfp_t gfp_flags, int node)
|
||||||
{
|
{
|
||||||
struct io_context *ioc = NULL;
|
struct io_context *ioc;
|
||||||
|
|
||||||
/*
|
might_sleep_if(gfp_flags & __GFP_WAIT);
|
||||||
* Check for unlikely race with exiting task. ioc ref count is
|
|
||||||
* zero when ioc is being detached.
|
|
||||||
*/
|
|
||||||
do {
|
|
||||||
ioc = current_io_context(gfp_flags, node);
|
|
||||||
if (unlikely(!ioc))
|
|
||||||
break;
|
|
||||||
} while (!atomic_long_inc_not_zero(&ioc->refcount));
|
|
||||||
|
|
||||||
return ioc;
|
task_lock(task);
|
||||||
|
ioc = task->io_context;
|
||||||
|
if (likely(ioc)) {
|
||||||
|
get_io_context(ioc);
|
||||||
|
task_unlock(task);
|
||||||
|
return ioc;
|
||||||
|
}
|
||||||
|
task_unlock(task);
|
||||||
|
|
||||||
|
return create_task_io_context(task, gfp_flags, node, true);
|
||||||
}
|
}
|
||||||
EXPORT_SYMBOL(get_io_context);
|
EXPORT_SYMBOL(get_task_io_context);
|
||||||
|
|
||||||
static int __init blk_ioc_init(void)
|
static int __init blk_ioc_init(void)
|
||||||
{
|
{
|
||||||
|
@ -122,6 +122,7 @@ static inline int blk_should_fake_timeout(struct request_queue *q)
|
|||||||
}
|
}
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
|
void get_io_context(struct io_context *ioc);
|
||||||
struct io_context *current_io_context(gfp_t gfp_flags, int node);
|
struct io_context *current_io_context(gfp_t gfp_flags, int node);
|
||||||
|
|
||||||
int ll_back_merge_fn(struct request_queue *q, struct request *req,
|
int ll_back_merge_fn(struct request_queue *q, struct request *req,
|
||||||
|
@ -14,6 +14,7 @@
|
|||||||
#include <linux/rbtree.h>
|
#include <linux/rbtree.h>
|
||||||
#include <linux/ioprio.h>
|
#include <linux/ioprio.h>
|
||||||
#include <linux/blktrace_api.h>
|
#include <linux/blktrace_api.h>
|
||||||
|
#include "blk.h"
|
||||||
#include "cfq.h"
|
#include "cfq.h"
|
||||||
|
|
||||||
/*
|
/*
|
||||||
@ -3194,13 +3195,13 @@ static struct cfq_io_context *
|
|||||||
cfq_get_io_context(struct cfq_data *cfqd, gfp_t gfp_mask)
|
cfq_get_io_context(struct cfq_data *cfqd, gfp_t gfp_mask)
|
||||||
{
|
{
|
||||||
struct io_context *ioc = NULL;
|
struct io_context *ioc = NULL;
|
||||||
struct cfq_io_context *cic;
|
struct cfq_io_context *cic = NULL;
|
||||||
|
|
||||||
might_sleep_if(gfp_mask & __GFP_WAIT);
|
might_sleep_if(gfp_mask & __GFP_WAIT);
|
||||||
|
|
||||||
ioc = get_io_context(gfp_mask, cfqd->queue->node);
|
ioc = current_io_context(gfp_mask, cfqd->queue->node);
|
||||||
if (!ioc)
|
if (!ioc)
|
||||||
return NULL;
|
goto err;
|
||||||
|
|
||||||
cic = cfq_cic_lookup(cfqd, ioc);
|
cic = cfq_cic_lookup(cfqd, ioc);
|
||||||
if (cic)
|
if (cic)
|
||||||
@ -3211,10 +3212,10 @@ cfq_get_io_context(struct cfq_data *cfqd, gfp_t gfp_mask)
|
|||||||
goto err;
|
goto err;
|
||||||
|
|
||||||
if (cfq_cic_link(cfqd, ioc, cic, gfp_mask))
|
if (cfq_cic_link(cfqd, ioc, cic, gfp_mask))
|
||||||
goto err_free;
|
goto err;
|
||||||
|
|
||||||
out:
|
out:
|
||||||
smp_read_barrier_depends();
|
get_io_context(ioc);
|
||||||
|
|
||||||
if (unlikely(ioc->ioprio_changed))
|
if (unlikely(ioc->ioprio_changed))
|
||||||
cfq_ioc_set_ioprio(ioc);
|
cfq_ioc_set_ioprio(ioc);
|
||||||
|
|
||||||
@ -3223,10 +3224,9 @@ out:
|
|||||||
cfq_ioc_set_cgroup(ioc);
|
cfq_ioc_set_cgroup(ioc);
|
||||||
#endif
|
#endif
|
||||||
return cic;
|
return cic;
|
||||||
err_free:
|
|
||||||
cfq_cic_free(cic);
|
|
||||||
err:
|
err:
|
||||||
put_io_context(ioc);
|
if (cic)
|
||||||
|
cfq_cic_free(cic);
|
||||||
return NULL;
|
return NULL;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
21
fs/ioprio.c
21
fs/ioprio.c
@ -48,28 +48,13 @@ int set_task_ioprio(struct task_struct *task, int ioprio)
|
|||||||
if (err)
|
if (err)
|
||||||
return err;
|
return err;
|
||||||
|
|
||||||
task_lock(task);
|
ioc = get_task_io_context(task, GFP_ATOMIC, NUMA_NO_NODE);
|
||||||
do {
|
if (ioc) {
|
||||||
ioc = task->io_context;
|
|
||||||
/* see wmb() in current_io_context() */
|
|
||||||
smp_read_barrier_depends();
|
|
||||||
if (ioc)
|
|
||||||
break;
|
|
||||||
|
|
||||||
ioc = alloc_io_context(GFP_ATOMIC, -1);
|
|
||||||
if (!ioc) {
|
|
||||||
err = -ENOMEM;
|
|
||||||
break;
|
|
||||||
}
|
|
||||||
task->io_context = ioc;
|
|
||||||
} while (1);
|
|
||||||
|
|
||||||
if (!err) {
|
|
||||||
ioc->ioprio = ioprio;
|
ioc->ioprio = ioprio;
|
||||||
ioc->ioprio_changed = 1;
|
ioc->ioprio_changed = 1;
|
||||||
|
put_io_context(ioc);
|
||||||
}
|
}
|
||||||
|
|
||||||
task_unlock(task);
|
|
||||||
return err;
|
return err;
|
||||||
}
|
}
|
||||||
EXPORT_SYMBOL_GPL(set_task_ioprio);
|
EXPORT_SYMBOL_GPL(set_task_ioprio);
|
||||||
|
@ -78,8 +78,8 @@ struct task_struct;
|
|||||||
#ifdef CONFIG_BLOCK
|
#ifdef CONFIG_BLOCK
|
||||||
void put_io_context(struct io_context *ioc);
|
void put_io_context(struct io_context *ioc);
|
||||||
void exit_io_context(struct task_struct *task);
|
void exit_io_context(struct task_struct *task);
|
||||||
struct io_context *get_io_context(gfp_t gfp_flags, int node);
|
struct io_context *get_task_io_context(struct task_struct *task,
|
||||||
struct io_context *alloc_io_context(gfp_t gfp_flags, int node);
|
gfp_t gfp_flags, int node);
|
||||||
#else
|
#else
|
||||||
struct io_context;
|
struct io_context;
|
||||||
static inline void put_io_context(struct io_context *ioc) { }
|
static inline void put_io_context(struct io_context *ioc) { }
|
||||||
|
@ -870,6 +870,7 @@ static int copy_io(unsigned long clone_flags, struct task_struct *tsk)
|
|||||||
{
|
{
|
||||||
#ifdef CONFIG_BLOCK
|
#ifdef CONFIG_BLOCK
|
||||||
struct io_context *ioc = current->io_context;
|
struct io_context *ioc = current->io_context;
|
||||||
|
struct io_context *new_ioc;
|
||||||
|
|
||||||
if (!ioc)
|
if (!ioc)
|
||||||
return 0;
|
return 0;
|
||||||
@ -881,11 +882,12 @@ static int copy_io(unsigned long clone_flags, struct task_struct *tsk)
|
|||||||
if (unlikely(!tsk->io_context))
|
if (unlikely(!tsk->io_context))
|
||||||
return -ENOMEM;
|
return -ENOMEM;
|
||||||
} else if (ioprio_valid(ioc->ioprio)) {
|
} else if (ioprio_valid(ioc->ioprio)) {
|
||||||
tsk->io_context = alloc_io_context(GFP_KERNEL, -1);
|
new_ioc = get_task_io_context(tsk, GFP_KERNEL, NUMA_NO_NODE);
|
||||||
if (unlikely(!tsk->io_context))
|
if (unlikely(!new_ioc))
|
||||||
return -ENOMEM;
|
return -ENOMEM;
|
||||||
|
|
||||||
tsk->io_context->ioprio = ioc->ioprio;
|
new_ioc->ioprio = ioc->ioprio;
|
||||||
|
put_io_context(new_ioc);
|
||||||
}
|
}
|
||||||
#endif
|
#endif
|
||||||
return 0;
|
return 0;
|
||||||
|
Loading…
Reference in New Issue
Block a user