mirror of
https://github.com/torvalds/linux.git
synced 2024-11-10 22:21:40 +00:00
cgroup: don't use subsys->can_attach_task() or ->attach_task()
Now that subsys->can_attach() and attach() take @tset instead of @task, they can handle per-task operations. Convert ->can_attach_task() and ->attach_task() users to use ->can_attach() and attach() instead. Most converions are straight-forward. Noteworthy changes are, * In cgroup_freezer, remove unnecessary NULL assignments to unused methods. It's useless and very prone to get out of sync, which already happened. * In cpuset, PF_THREAD_BOUND test is checked for each task. This doesn't make any practical difference but is conceptually cleaner. Signed-off-by: Tejun Heo <tj@kernel.org> Reviewed-by: KAMEZAWA Hiroyuki <kamezawa.hiroyu@jp.fujitsu.com> Reviewed-by: Frederic Weisbecker <fweisbec@gmail.com> Acked-by: Li Zefan <lizf@cn.fujitsu.com> Cc: Paul Menage <paul@paulmenage.org> Cc: Balbir Singh <bsingharora@gmail.com> Cc: Daisuke Nishimura <nishimura@mxp.nes.nec.co.jp> Cc: James Morris <jmorris@namei.org> Cc: Ingo Molnar <mingo@elte.hu> Cc: Peter Zijlstra <peterz@infradead.org>
This commit is contained in:
parent
2f7ee5691e
commit
bb9d97b6df
@ -30,8 +30,10 @@ EXPORT_SYMBOL_GPL(blkio_root_cgroup);
|
|||||||
|
|
||||||
static struct cgroup_subsys_state *blkiocg_create(struct cgroup_subsys *,
|
static struct cgroup_subsys_state *blkiocg_create(struct cgroup_subsys *,
|
||||||
struct cgroup *);
|
struct cgroup *);
|
||||||
static int blkiocg_can_attach_task(struct cgroup *, struct task_struct *);
|
static int blkiocg_can_attach(struct cgroup_subsys *, struct cgroup *,
|
||||||
static void blkiocg_attach_task(struct cgroup *, struct task_struct *);
|
struct cgroup_taskset *);
|
||||||
|
static void blkiocg_attach(struct cgroup_subsys *, struct cgroup *,
|
||||||
|
struct cgroup_taskset *);
|
||||||
static void blkiocg_destroy(struct cgroup_subsys *, struct cgroup *);
|
static void blkiocg_destroy(struct cgroup_subsys *, struct cgroup *);
|
||||||
static int blkiocg_populate(struct cgroup_subsys *, struct cgroup *);
|
static int blkiocg_populate(struct cgroup_subsys *, struct cgroup *);
|
||||||
|
|
||||||
@ -44,8 +46,8 @@ static int blkiocg_populate(struct cgroup_subsys *, struct cgroup *);
|
|||||||
struct cgroup_subsys blkio_subsys = {
|
struct cgroup_subsys blkio_subsys = {
|
||||||
.name = "blkio",
|
.name = "blkio",
|
||||||
.create = blkiocg_create,
|
.create = blkiocg_create,
|
||||||
.can_attach_task = blkiocg_can_attach_task,
|
.can_attach = blkiocg_can_attach,
|
||||||
.attach_task = blkiocg_attach_task,
|
.attach = blkiocg_attach,
|
||||||
.destroy = blkiocg_destroy,
|
.destroy = blkiocg_destroy,
|
||||||
.populate = blkiocg_populate,
|
.populate = blkiocg_populate,
|
||||||
#ifdef CONFIG_BLK_CGROUP
|
#ifdef CONFIG_BLK_CGROUP
|
||||||
@ -1626,30 +1628,39 @@ done:
|
|||||||
* of the main cic data structures. For now we allow a task to change
|
* of the main cic data structures. For now we allow a task to change
|
||||||
* its cgroup only if it's the only owner of its ioc.
|
* its cgroup only if it's the only owner of its ioc.
|
||||||
*/
|
*/
|
||||||
static int blkiocg_can_attach_task(struct cgroup *cgrp, struct task_struct *tsk)
|
static int blkiocg_can_attach(struct cgroup_subsys *ss, struct cgroup *cgrp,
|
||||||
|
struct cgroup_taskset *tset)
|
||||||
{
|
{
|
||||||
|
struct task_struct *task;
|
||||||
struct io_context *ioc;
|
struct io_context *ioc;
|
||||||
int ret = 0;
|
int ret = 0;
|
||||||
|
|
||||||
/* task_lock() is needed to avoid races with exit_io_context() */
|
/* task_lock() is needed to avoid races with exit_io_context() */
|
||||||
task_lock(tsk);
|
cgroup_taskset_for_each(task, cgrp, tset) {
|
||||||
ioc = tsk->io_context;
|
task_lock(task);
|
||||||
if (ioc && atomic_read(&ioc->nr_tasks) > 1)
|
ioc = task->io_context;
|
||||||
ret = -EINVAL;
|
if (ioc && atomic_read(&ioc->nr_tasks) > 1)
|
||||||
task_unlock(tsk);
|
ret = -EINVAL;
|
||||||
|
task_unlock(task);
|
||||||
|
if (ret)
|
||||||
|
break;
|
||||||
|
}
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
static void blkiocg_attach_task(struct cgroup *cgrp, struct task_struct *tsk)
|
static void blkiocg_attach(struct cgroup_subsys *ss, struct cgroup *cgrp,
|
||||||
|
struct cgroup_taskset *tset)
|
||||||
{
|
{
|
||||||
|
struct task_struct *task;
|
||||||
struct io_context *ioc;
|
struct io_context *ioc;
|
||||||
|
|
||||||
task_lock(tsk);
|
cgroup_taskset_for_each(task, cgrp, tset) {
|
||||||
ioc = tsk->io_context;
|
task_lock(task);
|
||||||
if (ioc)
|
ioc = task->io_context;
|
||||||
ioc->cgroup_changed = 1;
|
if (ioc)
|
||||||
task_unlock(tsk);
|
ioc->cgroup_changed = 1;
|
||||||
|
task_unlock(task);
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
void blkio_policy_register(struct blkio_policy_type *blkiop)
|
void blkio_policy_register(struct blkio_policy_type *blkiop)
|
||||||
|
@ -162,10 +162,14 @@ static int freezer_can_attach(struct cgroup_subsys *ss,
|
|||||||
struct cgroup_taskset *tset)
|
struct cgroup_taskset *tset)
|
||||||
{
|
{
|
||||||
struct freezer *freezer;
|
struct freezer *freezer;
|
||||||
|
struct task_struct *task;
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Anything frozen can't move or be moved to/from.
|
* Anything frozen can't move or be moved to/from.
|
||||||
*/
|
*/
|
||||||
|
cgroup_taskset_for_each(task, new_cgroup, tset)
|
||||||
|
if (cgroup_freezing(task))
|
||||||
|
return -EBUSY;
|
||||||
|
|
||||||
freezer = cgroup_freezer(new_cgroup);
|
freezer = cgroup_freezer(new_cgroup);
|
||||||
if (freezer->state != CGROUP_THAWED)
|
if (freezer->state != CGROUP_THAWED)
|
||||||
@ -174,11 +178,6 @@ static int freezer_can_attach(struct cgroup_subsys *ss,
|
|||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
static int freezer_can_attach_task(struct cgroup *cgrp, struct task_struct *tsk)
|
|
||||||
{
|
|
||||||
return cgroup_freezing(tsk) ? -EBUSY : 0;
|
|
||||||
}
|
|
||||||
|
|
||||||
static void freezer_fork(struct cgroup_subsys *ss, struct task_struct *task)
|
static void freezer_fork(struct cgroup_subsys *ss, struct task_struct *task)
|
||||||
{
|
{
|
||||||
struct freezer *freezer;
|
struct freezer *freezer;
|
||||||
@ -374,10 +373,5 @@ struct cgroup_subsys freezer_subsys = {
|
|||||||
.populate = freezer_populate,
|
.populate = freezer_populate,
|
||||||
.subsys_id = freezer_subsys_id,
|
.subsys_id = freezer_subsys_id,
|
||||||
.can_attach = freezer_can_attach,
|
.can_attach = freezer_can_attach,
|
||||||
.can_attach_task = freezer_can_attach_task,
|
|
||||||
.pre_attach = NULL,
|
|
||||||
.attach_task = NULL,
|
|
||||||
.attach = NULL,
|
|
||||||
.fork = freezer_fork,
|
.fork = freezer_fork,
|
||||||
.exit = NULL,
|
|
||||||
};
|
};
|
||||||
|
@ -1375,33 +1375,34 @@ static int cpuset_can_attach(struct cgroup_subsys *ss, struct cgroup *cgrp,
|
|||||||
struct cgroup_taskset *tset)
|
struct cgroup_taskset *tset)
|
||||||
{
|
{
|
||||||
struct cpuset *cs = cgroup_cs(cgrp);
|
struct cpuset *cs = cgroup_cs(cgrp);
|
||||||
|
struct task_struct *task;
|
||||||
|
int ret;
|
||||||
|
|
||||||
if (cpumask_empty(cs->cpus_allowed) || nodes_empty(cs->mems_allowed))
|
if (cpumask_empty(cs->cpus_allowed) || nodes_empty(cs->mems_allowed))
|
||||||
return -ENOSPC;
|
return -ENOSPC;
|
||||||
|
|
||||||
/*
|
cgroup_taskset_for_each(task, cgrp, tset) {
|
||||||
* Kthreads bound to specific cpus cannot be moved to a new cpuset; we
|
/*
|
||||||
* cannot change their cpu affinity and isolating such threads by their
|
* Kthreads bound to specific cpus cannot be moved to a new
|
||||||
* set of allowed nodes is unnecessary. Thus, cpusets are not
|
* cpuset; we cannot change their cpu affinity and
|
||||||
* applicable for such threads. This prevents checking for success of
|
* isolating such threads by their set of allowed nodes is
|
||||||
* set_cpus_allowed_ptr() on all attached tasks before cpus_allowed may
|
* unnecessary. Thus, cpusets are not applicable for such
|
||||||
* be changed.
|
* threads. This prevents checking for success of
|
||||||
*/
|
* set_cpus_allowed_ptr() on all attached tasks before
|
||||||
if (cgroup_taskset_first(tset)->flags & PF_THREAD_BOUND)
|
* cpus_allowed may be changed.
|
||||||
return -EINVAL;
|
*/
|
||||||
|
if (task->flags & PF_THREAD_BOUND)
|
||||||
|
return -EINVAL;
|
||||||
|
if ((ret = security_task_setscheduler(task)))
|
||||||
|
return ret;
|
||||||
|
}
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
static int cpuset_can_attach_task(struct cgroup *cgrp, struct task_struct *task)
|
|
||||||
{
|
|
||||||
return security_task_setscheduler(task);
|
|
||||||
}
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Protected by cgroup_lock. The nodemasks must be stored globally because
|
* Protected by cgroup_lock. The nodemasks must be stored globally because
|
||||||
* dynamically allocating them is not allowed in pre_attach, and they must
|
* dynamically allocating them is not allowed in pre_attach, and they must
|
||||||
* persist among pre_attach, attach_task, and attach.
|
* persist among pre_attach, and attach.
|
||||||
*/
|
*/
|
||||||
static cpumask_var_t cpus_attach;
|
static cpumask_var_t cpus_attach;
|
||||||
static nodemask_t cpuset_attach_nodemask_from;
|
static nodemask_t cpuset_attach_nodemask_from;
|
||||||
@ -1420,39 +1421,34 @@ static void cpuset_pre_attach(struct cgroup *cont)
|
|||||||
guarantee_online_mems(cs, &cpuset_attach_nodemask_to);
|
guarantee_online_mems(cs, &cpuset_attach_nodemask_to);
|
||||||
}
|
}
|
||||||
|
|
||||||
/* Per-thread attachment work. */
|
|
||||||
static void cpuset_attach_task(struct cgroup *cont, struct task_struct *tsk)
|
|
||||||
{
|
|
||||||
int err;
|
|
||||||
struct cpuset *cs = cgroup_cs(cont);
|
|
||||||
|
|
||||||
/*
|
|
||||||
* can_attach beforehand should guarantee that this doesn't fail.
|
|
||||||
* TODO: have a better way to handle failure here
|
|
||||||
*/
|
|
||||||
err = set_cpus_allowed_ptr(tsk, cpus_attach);
|
|
||||||
WARN_ON_ONCE(err);
|
|
||||||
|
|
||||||
cpuset_change_task_nodemask(tsk, &cpuset_attach_nodemask_to);
|
|
||||||
cpuset_update_task_spread_flag(cs, tsk);
|
|
||||||
}
|
|
||||||
|
|
||||||
static void cpuset_attach(struct cgroup_subsys *ss, struct cgroup *cgrp,
|
static void cpuset_attach(struct cgroup_subsys *ss, struct cgroup *cgrp,
|
||||||
struct cgroup_taskset *tset)
|
struct cgroup_taskset *tset)
|
||||||
{
|
{
|
||||||
struct mm_struct *mm;
|
struct mm_struct *mm;
|
||||||
struct task_struct *tsk = cgroup_taskset_first(tset);
|
struct task_struct *task;
|
||||||
|
struct task_struct *leader = cgroup_taskset_first(tset);
|
||||||
struct cgroup *oldcgrp = cgroup_taskset_cur_cgroup(tset);
|
struct cgroup *oldcgrp = cgroup_taskset_cur_cgroup(tset);
|
||||||
struct cpuset *cs = cgroup_cs(cgrp);
|
struct cpuset *cs = cgroup_cs(cgrp);
|
||||||
struct cpuset *oldcs = cgroup_cs(oldcgrp);
|
struct cpuset *oldcs = cgroup_cs(oldcgrp);
|
||||||
|
|
||||||
|
cgroup_taskset_for_each(task, cgrp, tset) {
|
||||||
|
/*
|
||||||
|
* can_attach beforehand should guarantee that this doesn't
|
||||||
|
* fail. TODO: have a better way to handle failure here
|
||||||
|
*/
|
||||||
|
WARN_ON_ONCE(set_cpus_allowed_ptr(task, cpus_attach));
|
||||||
|
|
||||||
|
cpuset_change_task_nodemask(task, &cpuset_attach_nodemask_to);
|
||||||
|
cpuset_update_task_spread_flag(cs, task);
|
||||||
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Change mm, possibly for multiple threads in a threadgroup. This is
|
* Change mm, possibly for multiple threads in a threadgroup. This is
|
||||||
* expensive and may sleep.
|
* expensive and may sleep.
|
||||||
*/
|
*/
|
||||||
cpuset_attach_nodemask_from = oldcs->mems_allowed;
|
cpuset_attach_nodemask_from = oldcs->mems_allowed;
|
||||||
cpuset_attach_nodemask_to = cs->mems_allowed;
|
cpuset_attach_nodemask_to = cs->mems_allowed;
|
||||||
mm = get_task_mm(tsk);
|
mm = get_task_mm(leader);
|
||||||
if (mm) {
|
if (mm) {
|
||||||
mpol_rebind_mm(mm, &cpuset_attach_nodemask_to);
|
mpol_rebind_mm(mm, &cpuset_attach_nodemask_to);
|
||||||
if (is_memory_migrate(cs))
|
if (is_memory_migrate(cs))
|
||||||
@ -1908,9 +1904,7 @@ struct cgroup_subsys cpuset_subsys = {
|
|||||||
.create = cpuset_create,
|
.create = cpuset_create,
|
||||||
.destroy = cpuset_destroy,
|
.destroy = cpuset_destroy,
|
||||||
.can_attach = cpuset_can_attach,
|
.can_attach = cpuset_can_attach,
|
||||||
.can_attach_task = cpuset_can_attach_task,
|
|
||||||
.pre_attach = cpuset_pre_attach,
|
.pre_attach = cpuset_pre_attach,
|
||||||
.attach_task = cpuset_attach_task,
|
|
||||||
.attach = cpuset_attach,
|
.attach = cpuset_attach,
|
||||||
.populate = cpuset_populate,
|
.populate = cpuset_populate,
|
||||||
.post_clone = cpuset_post_clone,
|
.post_clone = cpuset_post_clone,
|
||||||
|
@ -7044,10 +7044,13 @@ static int __perf_cgroup_move(void *info)
|
|||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
static void
|
static void perf_cgroup_attach(struct cgroup_subsys *ss, struct cgroup *cgrp,
|
||||||
perf_cgroup_attach_task(struct cgroup *cgrp, struct task_struct *task)
|
struct cgroup_taskset *tset)
|
||||||
{
|
{
|
||||||
task_function_call(task, __perf_cgroup_move, task);
|
struct task_struct *task;
|
||||||
|
|
||||||
|
cgroup_taskset_for_each(task, cgrp, tset)
|
||||||
|
task_function_call(task, __perf_cgroup_move, task);
|
||||||
}
|
}
|
||||||
|
|
||||||
static void perf_cgroup_exit(struct cgroup_subsys *ss, struct cgroup *cgrp,
|
static void perf_cgroup_exit(struct cgroup_subsys *ss, struct cgroup *cgrp,
|
||||||
@ -7061,7 +7064,7 @@ static void perf_cgroup_exit(struct cgroup_subsys *ss, struct cgroup *cgrp,
|
|||||||
if (!(task->flags & PF_EXITING))
|
if (!(task->flags & PF_EXITING))
|
||||||
return;
|
return;
|
||||||
|
|
||||||
perf_cgroup_attach_task(cgrp, task);
|
task_function_call(task, __perf_cgroup_move, task);
|
||||||
}
|
}
|
||||||
|
|
||||||
struct cgroup_subsys perf_subsys = {
|
struct cgroup_subsys perf_subsys = {
|
||||||
@ -7070,6 +7073,6 @@ struct cgroup_subsys perf_subsys = {
|
|||||||
.create = perf_cgroup_create,
|
.create = perf_cgroup_create,
|
||||||
.destroy = perf_cgroup_destroy,
|
.destroy = perf_cgroup_destroy,
|
||||||
.exit = perf_cgroup_exit,
|
.exit = perf_cgroup_exit,
|
||||||
.attach_task = perf_cgroup_attach_task,
|
.attach = perf_cgroup_attach,
|
||||||
};
|
};
|
||||||
#endif /* CONFIG_CGROUP_PERF */
|
#endif /* CONFIG_CGROUP_PERF */
|
||||||
|
@ -9127,24 +9127,31 @@ cpu_cgroup_destroy(struct cgroup_subsys *ss, struct cgroup *cgrp)
|
|||||||
sched_destroy_group(tg);
|
sched_destroy_group(tg);
|
||||||
}
|
}
|
||||||
|
|
||||||
static int
|
static int cpu_cgroup_can_attach(struct cgroup_subsys *ss, struct cgroup *cgrp,
|
||||||
cpu_cgroup_can_attach_task(struct cgroup *cgrp, struct task_struct *tsk)
|
struct cgroup_taskset *tset)
|
||||||
{
|
{
|
||||||
|
struct task_struct *task;
|
||||||
|
|
||||||
|
cgroup_taskset_for_each(task, cgrp, tset) {
|
||||||
#ifdef CONFIG_RT_GROUP_SCHED
|
#ifdef CONFIG_RT_GROUP_SCHED
|
||||||
if (!sched_rt_can_attach(cgroup_tg(cgrp), tsk))
|
if (!sched_rt_can_attach(cgroup_tg(cgrp), task))
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
#else
|
#else
|
||||||
/* We don't support RT-tasks being in separate groups */
|
/* We don't support RT-tasks being in separate groups */
|
||||||
if (tsk->sched_class != &fair_sched_class)
|
if (task->sched_class != &fair_sched_class)
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
#endif
|
#endif
|
||||||
|
}
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
static void
|
static void cpu_cgroup_attach(struct cgroup_subsys *ss, struct cgroup *cgrp,
|
||||||
cpu_cgroup_attach_task(struct cgroup *cgrp, struct task_struct *tsk)
|
struct cgroup_taskset *tset)
|
||||||
{
|
{
|
||||||
sched_move_task(tsk);
|
struct task_struct *task;
|
||||||
|
|
||||||
|
cgroup_taskset_for_each(task, cgrp, tset)
|
||||||
|
sched_move_task(task);
|
||||||
}
|
}
|
||||||
|
|
||||||
static void
|
static void
|
||||||
@ -9480,8 +9487,8 @@ struct cgroup_subsys cpu_cgroup_subsys = {
|
|||||||
.name = "cpu",
|
.name = "cpu",
|
||||||
.create = cpu_cgroup_create,
|
.create = cpu_cgroup_create,
|
||||||
.destroy = cpu_cgroup_destroy,
|
.destroy = cpu_cgroup_destroy,
|
||||||
.can_attach_task = cpu_cgroup_can_attach_task,
|
.can_attach = cpu_cgroup_can_attach,
|
||||||
.attach_task = cpu_cgroup_attach_task,
|
.attach = cpu_cgroup_attach,
|
||||||
.exit = cpu_cgroup_exit,
|
.exit = cpu_cgroup_exit,
|
||||||
.populate = cpu_cgroup_populate,
|
.populate = cpu_cgroup_populate,
|
||||||
.subsys_id = cpu_cgroup_subsys_id,
|
.subsys_id = cpu_cgroup_subsys_id,
|
||||||
|
Loading…
Reference in New Issue
Block a user