forked from Minki/linux
sched: Use rcu in sched_get/set_affinity()
tasklist_lock is held read locked to protect the find_task_by_vpid() call and to prevent the task going away. sched_setaffinity acquires a task struct ref and drops tasklist lock right away. The access to the cpus_allowed mask is protected by rq->lock. rcu_read_lock() provides the same protection here. Signed-off-by: Thomas Gleixner <tglx@linutronix.de> Cc: Peter Zijlstra <peterz@infradead.org> LKML-Reference: <20091209100706.789059966@linutronix.de> Signed-off-by: Ingo Molnar <mingo@elte.hu>
This commit is contained in:
parent
5fe85be081
commit
23f5d14251
@ -6516,22 +6516,18 @@ long sched_setaffinity(pid_t pid, const struct cpumask *in_mask)
|
||||
int retval;
|
||||
|
||||
get_online_cpus();
|
||||
read_lock(&tasklist_lock);
|
||||
rcu_read_lock();
|
||||
|
||||
p = find_process_by_pid(pid);
|
||||
if (!p) {
|
||||
read_unlock(&tasklist_lock);
|
||||
rcu_read_unlock();
|
||||
put_online_cpus();
|
||||
return -ESRCH;
|
||||
}
|
||||
|
||||
/*
|
||||
* It is not safe to call set_cpus_allowed with the
|
||||
* tasklist_lock held. We will bump the task_struct's
|
||||
* usage count and then drop tasklist_lock.
|
||||
*/
|
||||
/* Prevent p going away */
|
||||
get_task_struct(p);
|
||||
read_unlock(&tasklist_lock);
|
||||
rcu_read_unlock();
|
||||
|
||||
if (!alloc_cpumask_var(&cpus_allowed, GFP_KERNEL)) {
|
||||
retval = -ENOMEM;
|
||||
@ -6617,7 +6613,7 @@ long sched_getaffinity(pid_t pid, struct cpumask *mask)
|
||||
int retval;
|
||||
|
||||
get_online_cpus();
|
||||
read_lock(&tasklist_lock);
|
||||
rcu_read_lock();
|
||||
|
||||
retval = -ESRCH;
|
||||
p = find_process_by_pid(pid);
|
||||
@ -6633,7 +6629,7 @@ long sched_getaffinity(pid_t pid, struct cpumask *mask)
|
||||
task_rq_unlock(rq, &flags);
|
||||
|
||||
out_unlock:
|
||||
read_unlock(&tasklist_lock);
|
||||
rcu_read_unlock();
|
||||
put_online_cpus();
|
||||
|
||||
return retval;
|
||||
|
Loading…
Reference in New Issue
Block a user