break;
if (ss->cancel_attach_task)
- ss->cancel_attach_task(cgrp, tsk);
+ ss->cancel_attach_task(cgrp, oldcgrp, tsk);
if (ss->cancel_attach)
ss->cancel_attach(ss, cgrp, tsk);
}
return 0;
}
+struct task_cgroup {
+ struct task_struct *tsk;
+ struct cgroup *oldcgrp;
+};
+
/**
* cgroup_attach_proc - attach all threads in a threadgroup to a cgroup
* @cgrp: the cgroup to attach to
/* threadgroup list cursor and array */
struct task_struct *tsk;
struct flex_array *group;
+ struct task_cgroup *tc;
/*
* we need to make sure we have css_sets for all the tasks we're
* going to move -before- we actually start moving them, so that in
*/
group_size = get_nr_threads(leader);
/* flex_array supports very large thread-groups better than kmalloc. */
- group = flex_array_alloc(sizeof(struct task_struct *), group_size,
+ group = flex_array_alloc(sizeof(struct task_cgroup), group_size,
GFP_KERNEL);
if (!group)
return -ENOMEM;
tsk = leader;
i = 0;
do {
+ struct task_cgroup tsk_cgrp;
+
/* as per above, nr_threads may decrease, but not increase. */
BUG_ON(i >= group_size);
get_task_struct(tsk);
+ tsk_cgrp.tsk = tsk;
+ tsk_cgrp.oldcgrp = task_cgroup_from_root(tsk, root);
/*
* saying GFP_ATOMIC has no effect here because we did prealloc
* earlier, but it's good form to communicate our expectations.
*/
- retval = flex_array_put_ptr(group, i, tsk, GFP_ATOMIC);
+ retval = flex_array_put(group, i, &tsk_cgrp, GFP_ATOMIC);
BUG_ON(retval != 0);
i++;
} while_each_thread(leader, tsk);
if (ss->can_attach_task) {
/* run on each task in the threadgroup. */
for (i = 0; i < group_size; i++) {
- tsk = flex_array_get_ptr(group, i);
- oldcgrp = task_cgroup_from_root(tsk, root);
-
+ tc = flex_array_get(group, i);
retval = ss->can_attach_task(cgrp,
- oldcgrp, tsk);
+ tc->oldcgrp,
+ tc->tsk);
if (retval) {
failed_ss = ss;
- failed_task = tsk;
+ failed_task = tc->tsk;
goto out_cancel_attach;
}
}
*/
INIT_LIST_HEAD(&newcg_list);
for (i = 0; i < group_size; i++) {
- tsk = flex_array_get_ptr(group, i);
+ tc = flex_array_get(group, i);
+ tsk = tc->tsk;
/* nothing to do if this task is already in the cgroup */
- oldcgrp = task_cgroup_from_root(tsk, root);
- if (cgrp == oldcgrp)
+ if (cgrp == tc->oldcgrp)
continue;
/* get old css_set pointer */
task_lock(tsk);
ss->pre_attach(cgrp);
}
for (i = 0; i < group_size; i++) {
- tsk = flex_array_get_ptr(group, i);
+ tc = flex_array_get(group, i);
+ tsk = tc->tsk;
+ oldcgrp = tc->oldcgrp;
/* leave current thread as it is if it's already there */
- oldcgrp = task_cgroup_from_root(tsk, root);
if (cgrp == oldcgrp)
continue;
/* if the thread is PF_EXITING, it can just get skipped. */
}
} else if (retval == -ESRCH) {
if (ss->cancel_attach_task)
- ss->cancel_attach_task(cgrp, tsk);
+ ss->cancel_attach_task(cgrp, oldcgrp, tsk);
} else {
BUG_ON(1);
}
if (ss->cancel_attach_task && (ss != failed_ss ||
failed_task)) {
for (i = 0; i < group_size; i++) {
- tsk = flex_array_get_ptr(group, i);
- if (tsk == failed_task)
+ tc = flex_array_get(group, i);
+ if (tc->tsk == failed_task)
break;
- ss->cancel_attach_task(cgrp, tsk);
+ ss->cancel_attach_task(cgrp, tc->oldcgrp, tc->tsk);
}
}
}
/* clean up the array of referenced threads in the group. */
for (i = 0; i < group_size; i++) {
- tsk = flex_array_get_ptr(group, i);
- put_task_struct(tsk);
+ tc = flex_array_get(group, i);
+ put_task_struct(tc->tsk);
}
out_free_group_list:
flex_array_free(group);
res_counter_uncharge(cgroup_task_res_counter(old_cgrp), 1);
}
-/*
- * Protected amongst can_attach_task/attach_task/cancel_attach_task by
- * cgroup mutex
- */
-static struct res_counter *common_ancestor;
-
/*
* This does more than just probing the ability to attach to the dest cgroup.
* We can not just _check_ if we can attach to the destination and do the real
struct cgroup *old_cgrp,
struct task_struct *tsk)
{
+ int err;
+ struct res_counter *common_ancestor;
struct res_counter *res = cgroup_task_res_counter(cgrp);
struct res_counter *old_res = cgroup_task_res_counter(old_cgrp);
- int err;
/*
* When moving a task from a cgroup to another, we don't want
/* Uncharge the dest cgroup that we charged in task_counter_can_attach_task() */
static void task_counter_cancel_attach_task(struct cgroup *cgrp,
+ struct cgroup *old_cgrp,
struct task_struct *tsk)
{
- res_counter_uncharge_until(cgroup_task_res_counter(cgrp),
- common_ancestor, 1);
+ struct res_counter *common_ancestor;
+ struct res_counter *res = cgroup_task_res_counter(cgrp);
+ struct res_counter *old_res = cgroup_task_res_counter(old_cgrp);
+
+ common_ancestor = res_counter_common_ancestor(res, old_res);
+ res_counter_uncharge_until(res, common_ancestor, 1);
}
/*
struct cgroup *old_cgrp,
struct task_struct *tsk)
{
- res_counter_uncharge_until(cgroup_task_res_counter(old_cgrp),
- common_ancestor, 1);
+ struct res_counter *common_ancestor;
+ struct res_counter *res = cgroup_task_res_counter(cgrp);
+ struct res_counter *old_res = cgroup_task_res_counter(old_cgrp);
+
+ common_ancestor = res_counter_common_ancestor(res, old_res);
+ res_counter_uncharge_until(old_res, common_ancestor, 1);
}
static u64 task_counter_read_u64(struct cgroup *cgrp, struct cftype *cft)