mirror of
https://github.com/torvalds/linux.git
synced 2024-12-27 05:11:48 +00:00
cgroup, cpuset: remove cgroup_subsys->post_clone()
Currently CGRP_CPUSET_CLONE_CHILDREN triggers ->post_clone(). Now that clone_children is cpuset specific, there's no reason to have this rather odd option activation mechanism in cgroup core. cpuset can check the flag from its ->css_allocate() and take the necessary action. Move cpuset_post_clone() logic to the end of cpuset_css_alloc() and remove cgroup_subsys->post_clone(). Loosely based on Glauber's "generalize post_clone into post_create" patch. Signed-off-by: Tejun Heo <tj@kernel.org> Original-patch-by: Glauber Costa <glommer@parallels.com> Original-patch: <1351686554-22592-2-git-send-email-glommer@parallels.com> Acked-by: Serge E. Hallyn <serge.hallyn@ubuntu.com> Acked-by: Li Zefan <lizefan@huawei.com> Cc: Glauber Costa <glommer@parallels.com>
This commit is contained in:
parent
2260e7fc1f
commit
033fa1c5f5
@ -642,14 +642,6 @@ void exit(struct task_struct *task)
|
|||||||
|
|
||||||
Called during task exit.
|
Called during task exit.
|
||||||
|
|
||||||
void post_clone(struct cgroup *cgrp)
|
|
||||||
(cgroup_mutex held by caller)
|
|
||||||
|
|
||||||
Called during cgroup_create() to do any parameter
|
|
||||||
initialization which might be required before a task could attach. For
|
|
||||||
example, in cpusets, no task may attach before 'cpus' and 'mems' are set
|
|
||||||
up.
|
|
||||||
|
|
||||||
void bind(struct cgroup *root)
|
void bind(struct cgroup *root)
|
||||||
(cgroup_mutex held by caller)
|
(cgroup_mutex held by caller)
|
||||||
|
|
||||||
|
@ -452,7 +452,6 @@ struct cgroup_subsys {
|
|||||||
void (*fork)(struct task_struct *task);
|
void (*fork)(struct task_struct *task);
|
||||||
void (*exit)(struct cgroup *cgrp, struct cgroup *old_cgrp,
|
void (*exit)(struct cgroup *cgrp, struct cgroup *old_cgrp,
|
||||||
struct task_struct *task);
|
struct task_struct *task);
|
||||||
void (*post_clone)(struct cgroup *cgrp);
|
|
||||||
void (*bind)(struct cgroup *root);
|
void (*bind)(struct cgroup *root);
|
||||||
|
|
||||||
int subsys_id;
|
int subsys_id;
|
||||||
|
@ -4142,10 +4142,6 @@ static long cgroup_create(struct cgroup *parent, struct dentry *dentry,
|
|||||||
if (err)
|
if (err)
|
||||||
goto err_free_all;
|
goto err_free_all;
|
||||||
}
|
}
|
||||||
/* At error, ->css_free() callback has to free assigned ID. */
|
|
||||||
if (test_bit(CGRP_CPUSET_CLONE_CHILDREN, &parent->flags) &&
|
|
||||||
ss->post_clone)
|
|
||||||
ss->post_clone(cgrp);
|
|
||||||
|
|
||||||
if (ss->broken_hierarchy && !ss->warned_broken_hierarchy &&
|
if (ss->broken_hierarchy && !ss->warned_broken_hierarchy &&
|
||||||
parent->parent) {
|
parent->parent) {
|
||||||
|
@ -1783,43 +1783,6 @@ static struct cftype files[] = {
|
|||||||
{ } /* terminate */
|
{ } /* terminate */
|
||||||
};
|
};
|
||||||
|
|
||||||
/*
|
|
||||||
* post_clone() is called during cgroup_create() when the
|
|
||||||
* clone_children mount argument was specified. The cgroup
|
|
||||||
* can not yet have any tasks.
|
|
||||||
*
|
|
||||||
* Currently we refuse to set up the cgroup - thereby
|
|
||||||
* refusing the task to be entered, and as a result refusing
|
|
||||||
* the sys_unshare() or clone() which initiated it - if any
|
|
||||||
* sibling cpusets have exclusive cpus or mem.
|
|
||||||
*
|
|
||||||
* If this becomes a problem for some users who wish to
|
|
||||||
* allow that scenario, then cpuset_post_clone() could be
|
|
||||||
* changed to grant parent->cpus_allowed-sibling_cpus_exclusive
|
|
||||||
* (and likewise for mems) to the new cgroup. Called with cgroup_mutex
|
|
||||||
* held.
|
|
||||||
*/
|
|
||||||
static void cpuset_post_clone(struct cgroup *cgroup)
|
|
||||||
{
|
|
||||||
struct cgroup *parent, *child;
|
|
||||||
struct cpuset *cs, *parent_cs;
|
|
||||||
|
|
||||||
parent = cgroup->parent;
|
|
||||||
list_for_each_entry(child, &parent->children, sibling) {
|
|
||||||
cs = cgroup_cs(child);
|
|
||||||
if (is_mem_exclusive(cs) || is_cpu_exclusive(cs))
|
|
||||||
return;
|
|
||||||
}
|
|
||||||
cs = cgroup_cs(cgroup);
|
|
||||||
parent_cs = cgroup_cs(parent);
|
|
||||||
|
|
||||||
mutex_lock(&callback_mutex);
|
|
||||||
cs->mems_allowed = parent_cs->mems_allowed;
|
|
||||||
cpumask_copy(cs->cpus_allowed, parent_cs->cpus_allowed);
|
|
||||||
mutex_unlock(&callback_mutex);
|
|
||||||
return;
|
|
||||||
}
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* cpuset_css_alloc - allocate a cpuset css
|
* cpuset_css_alloc - allocate a cpuset css
|
||||||
* cont: control group that the new cpuset will be part of
|
* cont: control group that the new cpuset will be part of
|
||||||
@ -1827,13 +1790,14 @@ static void cpuset_post_clone(struct cgroup *cgroup)
|
|||||||
|
|
||||||
static struct cgroup_subsys_state *cpuset_css_alloc(struct cgroup *cont)
|
static struct cgroup_subsys_state *cpuset_css_alloc(struct cgroup *cont)
|
||||||
{
|
{
|
||||||
struct cpuset *cs;
|
struct cgroup *parent_cg = cont->parent;
|
||||||
struct cpuset *parent;
|
struct cgroup *tmp_cg;
|
||||||
|
struct cpuset *parent, *cs;
|
||||||
|
|
||||||
if (!cont->parent) {
|
if (!parent_cg)
|
||||||
return &top_cpuset.css;
|
return &top_cpuset.css;
|
||||||
}
|
parent = cgroup_cs(parent_cg);
|
||||||
parent = cgroup_cs(cont->parent);
|
|
||||||
cs = kmalloc(sizeof(*cs), GFP_KERNEL);
|
cs = kmalloc(sizeof(*cs), GFP_KERNEL);
|
||||||
if (!cs)
|
if (!cs)
|
||||||
return ERR_PTR(-ENOMEM);
|
return ERR_PTR(-ENOMEM);
|
||||||
@ -1855,7 +1819,36 @@ static struct cgroup_subsys_state *cpuset_css_alloc(struct cgroup *cont)
|
|||||||
|
|
||||||
cs->parent = parent;
|
cs->parent = parent;
|
||||||
number_of_cpusets++;
|
number_of_cpusets++;
|
||||||
return &cs->css ;
|
|
||||||
|
if (!test_bit(CGRP_CPUSET_CLONE_CHILDREN, &cont->flags))
|
||||||
|
goto skip_clone;
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Clone @parent's configuration if CGRP_CPUSET_CLONE_CHILDREN is
|
||||||
|
* set. This flag handling is implemented in cgroup core for
|
||||||
|
* histrical reasons - the flag may be specified during mount.
|
||||||
|
*
|
||||||
|
* Currently, if any sibling cpusets have exclusive cpus or mem, we
|
||||||
|
* refuse to clone the configuration - thereby refusing the task to
|
||||||
|
* be entered, and as a result refusing the sys_unshare() or
|
||||||
|
* clone() which initiated it. If this becomes a problem for some
|
||||||
|
* users who wish to allow that scenario, then this could be
|
||||||
|
* changed to grant parent->cpus_allowed-sibling_cpus_exclusive
|
||||||
|
* (and likewise for mems) to the new cgroup.
|
||||||
|
*/
|
||||||
|
list_for_each_entry(tmp_cg, &parent_cg->children, sibling) {
|
||||||
|
struct cpuset *tmp_cs = cgroup_cs(tmp_cg);
|
||||||
|
|
||||||
|
if (is_mem_exclusive(tmp_cs) || is_cpu_exclusive(tmp_cs))
|
||||||
|
goto skip_clone;
|
||||||
|
}
|
||||||
|
|
||||||
|
mutex_lock(&callback_mutex);
|
||||||
|
cs->mems_allowed = parent->mems_allowed;
|
||||||
|
cpumask_copy(cs->cpus_allowed, parent->cpus_allowed);
|
||||||
|
mutex_unlock(&callback_mutex);
|
||||||
|
skip_clone:
|
||||||
|
return &cs->css;
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
@ -1882,7 +1875,6 @@ struct cgroup_subsys cpuset_subsys = {
|
|||||||
.css_free = cpuset_css_free,
|
.css_free = cpuset_css_free,
|
||||||
.can_attach = cpuset_can_attach,
|
.can_attach = cpuset_can_attach,
|
||||||
.attach = cpuset_attach,
|
.attach = cpuset_attach,
|
||||||
.post_clone = cpuset_post_clone,
|
|
||||||
.subsys_id = cpuset_subsys_id,
|
.subsys_id = cpuset_subsys_id,
|
||||||
.base_cftypes = files,
|
.base_cftypes = files,
|
||||||
.early_init = 1,
|
.early_init = 1,
|
||||||
|
Loading…
Reference in New Issue
Block a user