This will be called only about subsystems whose can_attach() operation have
succeeded.
+void cancel_attach_task(struct cgroup *cgrp, struct task_struct *tsk)
+(cgroup_mutex held by caller)
+
+As cancel_attach, but for operations that must be cancelled once per
+task that wanted to be attached. This typically revert the effect of
+can_attach_task().
+
void pre_attach(struct cgroup *cgrp);
(cgroup_mutex held by caller)
struct task_struct *tsk);
void (*cancel_attach)(struct cgroup_subsys *ss, struct cgroup *cgrp,
struct task_struct *tsk);
+ void (*cancel_attach_task)(struct cgroup *cgrp,
+ struct task_struct *tsk);
void (*pre_attach)(struct cgroup *cgrp);
void (*attach_task)(struct cgroup *cgrp, struct cgroup *old_cgrp,
struct task_struct *tsk);
* remaining subsystems.
*/
break;
+
+ if (ss->cancel_attach_task)
+ ss->cancel_attach_task(cgrp, tsk);
if (ss->cancel_attach)
ss->cancel_attach(ss, cgrp, tsk);
}
{
int retval, i, group_size;
struct cgroup_subsys *ss, *failed_ss = NULL;
- bool cancel_failed_ss = false;
+ struct task_struct *failed_task = NULL;
/* guaranteed to be initialized later, but the compiler needs this */
struct cgroup *oldcgrp = NULL;
struct css_set *oldcg;
oldcgrp, tsk);
if (retval) {
failed_ss = ss;
- cancel_failed_ss = true;
+ failed_task = tsk;
goto out_cancel_attach;
}
}
if (ss->attach_task)
ss->attach_task(cgrp, oldcgrp, tsk);
}
+ } else if (retval == -ESRCH) {
+ if (ss->cancel_attach_task)
+ ss->cancel_attach_task(cgrp, tsk);
} else {
- BUG_ON(retval != -ESRCH);
+ BUG_ON(1);
}
}
/* nothing is sensitive to fork() after this point. */
/* same deal as in cgroup_attach_task */
if (retval) {
for_each_subsys(root, ss) {
+ if (ss->cancel_attach_task && (ss != failed_ss ||
+ failed_task)) {
+ for (i = 0; i < group_size; i++) {
+ tsk = flex_array_get_ptr(group, i);
+ if (tsk == failed_task)
+ break;
+ ss->cancel_attach_task(cgrp, tsk);
+ }
+ }
+
if (ss == failed_ss) {
- if (cancel_failed_ss && ss->cancel_attach)
+ if (failed_task && ss->cancel_attach)
ss->cancel_attach(ss, cgrp, leader);
break;
}