remain valid while the caller holds cgroup_mutex and it is ensured that either
attach() or cancel_attach() will be called in future.
-int can_attach_task(struct cgroup *cgrp, struct task_struct *tsk);
+int can_attach_task(struct cgroup *cgrp, struct cgroup *old_cgrp,
+ struct task_struct *tsk);
(cgroup_mutex held by caller)
As can_attach, but for operations that must be run once per task to be
Called after the task has been attached to the cgroup, to allow any
post-attachment activity that requires memory allocations or blocking.
-void attach_task(struct cgroup *cgrp, struct task_struct *tsk);
+void attach_task(struct cgroup *cgrp, struct cgroup *old_cgrp,
+ struct task_struct *tsk);
(cgroup_mutex held by caller)
As attach, but for operations that must be run once per task to be attached,
static struct cgroup_subsys_state *blkiocg_create(struct cgroup_subsys *,
struct cgroup *);
-static int blkiocg_can_attach_task(struct cgroup *, struct task_struct *);
-static void blkiocg_attach_task(struct cgroup *, struct task_struct *);
+static int blkiocg_can_attach_task(struct cgroup *, struct cgroup *,
+ struct task_struct *);
+static void blkiocg_attach_task(struct cgroup *, struct cgroup *,
+ struct task_struct *);
static void blkiocg_destroy(struct cgroup_subsys *, struct cgroup *);
static int blkiocg_populate(struct cgroup_subsys *, struct cgroup *);
* of the main cic data structures. For now we allow a task to change
* its cgroup only if it's the only owner of its ioc.
*/
-static int blkiocg_can_attach_task(struct cgroup *cgrp, struct task_struct *tsk)
+static int blkiocg_can_attach_task(struct cgroup *cgrp, struct cgroup *old_cgrp,
+ struct task_struct *tsk)
{
struct io_context *ioc;
int ret = 0;
return ret;
}
-static void blkiocg_attach_task(struct cgroup *cgrp, struct task_struct *tsk)
+static void blkiocg_attach_task(struct cgroup *cgrp, struct cgroup *old_cgrp,
+ struct task_struct *tsk)
{
struct io_context *ioc;
void (*destroy)(struct cgroup_subsys *ss, struct cgroup *cgrp);
int (*can_attach)(struct cgroup_subsys *ss, struct cgroup *cgrp,
struct task_struct *tsk);
- int (*can_attach_task)(struct cgroup *cgrp, struct task_struct *tsk);
+ int (*can_attach_task)(struct cgroup *cgrp, struct cgroup *old_cgrp,
+ struct task_struct *tsk);
void (*cancel_attach)(struct cgroup_subsys *ss, struct cgroup *cgrp,
struct task_struct *tsk);
void (*pre_attach)(struct cgroup *cgrp);
- void (*attach_task)(struct cgroup *cgrp, struct task_struct *tsk);
+ void (*attach_task)(struct cgroup *cgrp, struct cgroup *old_cgrp,
+ struct task_struct *tsk);
void (*attach)(struct cgroup_subsys *ss, struct cgroup *cgrp,
struct cgroup *old_cgrp, struct task_struct *tsk);
void (*fork)(struct cgroup_subsys *ss, struct task_struct *task);
}
}
if (ss->can_attach_task) {
- retval = ss->can_attach_task(cgrp, tsk);
+ retval = ss->can_attach_task(cgrp, oldcgrp, tsk);
if (retval) {
failed_ss = ss;
goto out;
if (ss->pre_attach)
ss->pre_attach(cgrp);
if (ss->attach_task)
- ss->attach_task(cgrp, tsk);
+ ss->attach_task(cgrp, oldcgrp, tsk);
if (ss->attach)
ss->attach(ss, cgrp, oldcgrp, tsk);
}
/* run on each task in the threadgroup. */
for (i = 0; i < group_size; i++) {
tsk = flex_array_get_ptr(group, i);
- retval = ss->can_attach_task(cgrp, tsk);
+ oldcgrp = task_cgroup_from_root(tsk, root);
+
+ retval = ss->can_attach_task(cgrp,
+ oldcgrp, tsk);
if (retval) {
failed_ss = ss;
cancel_failed_ss = true;
/* attach each task to each subsystem */
for_each_subsys(root, ss) {
if (ss->attach_task)
- ss->attach_task(cgrp, tsk);
+ ss->attach_task(cgrp, oldcgrp, tsk);
}
} else {
BUG_ON(retval != -ESRCH);
return 0;
}
-static int freezer_can_attach_task(struct cgroup *cgrp, struct task_struct *tsk)
+static int freezer_can_attach_task(struct cgroup *cgrp, struct cgroup *old_cgrp,
+ struct task_struct *tsk)
{
rcu_read_lock();
if (__cgroup_freezing_or_frozen(tsk)) {
return 0;
}
-static int cpuset_can_attach_task(struct cgroup *cgrp, struct task_struct *task)
+static int cpuset_can_attach_task(struct cgroup *cgrp, struct cgroup *old_cgrp,
+ struct task_struct *task)
{
return security_task_setscheduler(task);
}
}
/* Per-thread attachment work. */
-static void cpuset_attach_task(struct cgroup *cont, struct task_struct *tsk)
+static void cpuset_attach_task(struct cgroup *cont, struct cgroup *old,
+ struct task_struct *tsk)
{
int err;
struct cpuset *cs = cgroup_cs(cont);
}
static void
-perf_cgroup_attach_task(struct cgroup *cgrp, struct task_struct *task)
+perf_cgroup_attach_task(struct cgroup *cgrp, struct cgroup *old_cgrp,
+ struct task_struct *task)
{
task_function_call(task, __perf_cgroup_move, task);
}
if (!(task->flags & PF_EXITING))
return;
- perf_cgroup_attach_task(cgrp, task);
+ perf_cgroup_attach_task(cgrp, old_cgrp, task);
}
struct cgroup_subsys perf_subsys = {
}
static int
-cpu_cgroup_can_attach_task(struct cgroup *cgrp, struct task_struct *tsk)
+cpu_cgroup_can_attach_task(struct cgroup *cgrp, struct cgroup *old_cgrp,
+ struct task_struct *tsk)
{
#ifdef CONFIG_RT_GROUP_SCHED
if (!sched_rt_can_attach(cgroup_tg(cgrp), tsk))
}
static void
-cpu_cgroup_attach_task(struct cgroup *cgrp, struct task_struct *tsk)
+cpu_cgroup_attach_task(struct cgroup *cgrp, struct cgroup *old_cgrp,
+ struct task_struct *tsk)
{
sched_move_task(tsk);
}