/* the list of cgroups eligible for automatic release. Protected by
* release_list_lock */
static LIST_HEAD(release_list);
-static DEFINE_SPINLOCK(release_list_lock);
+static DEFINE_RAW_SPINLOCK(release_list_lock);
static void cgroup_release_agent(struct work_struct *work);
static DECLARE_WORK(release_agent_work, cgroup_release_agent);
static void check_for_release(struct cgroup *cgrp);
goto out_free_group_list;
/* prevent changes to the threadgroup list while we take a snapshot. */
- rcu_read_lock();
+ read_lock(&tasklist_lock);
if (!thread_group_leader(leader)) {
/*
* a race with de_thread from another thread's exec() may strip
* throw this task away and try again (from cgroup_procs_write);
* this is "double-double-toil-and-trouble-check locking".
*/
- rcu_read_unlock();
+ read_unlock(&tasklist_lock);
retval = -EAGAIN;
goto out_free_group_list;
}
} while_each_thread(leader, tsk);
/* remember the number of threads in the array for later. */
group_size = i;
- rcu_read_unlock();
+ read_unlock(&tasklist_lock);
/*
* step 1: check that we can legitimately attach to the cgroup.
oldcgrp = task_cgroup_from_root(tsk, root);
if (cgrp == oldcgrp)
continue;
- /* attach each task to each subsystem */
- for_each_subsys(root, ss) {
- if (ss->attach_task)
- ss->attach_task(cgrp, tsk);
- }
/* if the thread is PF_EXITING, it can just get skipped. */
retval = cgroup_task_migrate(cgrp, oldcgrp, tsk, true);
- BUG_ON(retval != 0 && retval != -ESRCH);
+ if (retval == 0) {
+ /* attach each task to each subsystem */
+ for_each_subsys(root, ss) {
+ if (ss->attach_task)
+ ss->attach_task(cgrp, tsk);
+ }
+ } else {
+ BUG_ON(retval != -ESRCH);
+ }
}
/* nothing is sensitive to fork() after this point. */
finish_wait(&cgroup_rmdir_waitq, &wait);
clear_bit(CGRP_WAIT_ON_RMDIR, &cgrp->flags);
- spin_lock(&release_list_lock);
+ raw_spin_lock(&release_list_lock);
set_bit(CGRP_REMOVED, &cgrp->flags);
if (!list_empty(&cgrp->release_list))
list_del_init(&cgrp->release_list);
- spin_unlock(&release_list_lock);
+ raw_spin_unlock(&release_list_lock);
cgroup_lock_hierarchy(cgrp->root);
/* delete this cgroup from parent->children */
* already queued for a userspace notification, queue
* it now */
int need_schedule_work = 0;
- spin_lock(&release_list_lock);
+ raw_spin_lock(&release_list_lock);
if (!cgroup_is_removed(cgrp) &&
list_empty(&cgrp->release_list)) {
list_add(&cgrp->release_list, &release_list);
need_schedule_work = 1;
}
- spin_unlock(&release_list_lock);
+ raw_spin_unlock(&release_list_lock);
if (need_schedule_work)
schedule_work(&release_agent_work);
}
{
BUG_ON(work != &release_agent_work);
mutex_lock(&cgroup_mutex);
- spin_lock(&release_list_lock);
+ raw_spin_lock(&release_list_lock);
while (!list_empty(&release_list)) {
char *argv[3], *envp[3];
int i;
struct cgroup,
release_list);
list_del_init(&cgrp->release_list);
- spin_unlock(&release_list_lock);
+ raw_spin_unlock(&release_list_lock);
pathbuf = kmalloc(PAGE_SIZE, GFP_KERNEL);
if (!pathbuf)
goto continue_free;
continue_free:
kfree(pathbuf);
kfree(agentbuf);
- spin_lock(&release_list_lock);
+ raw_spin_lock(&release_list_lock);
}
- spin_unlock(&release_list_lock);
+ raw_spin_unlock(&release_list_lock);
mutex_unlock(&cgroup_mutex);
}
rcu_assign_pointer(id->css, NULL);
rcu_assign_pointer(css->id, NULL);
- spin_lock(&ss->id_lock);
+ write_lock(&ss->id_lock);
idr_remove(&ss->idr, id->id);
- spin_unlock(&ss->id_lock);
+ write_unlock(&ss->id_lock);
kfree_rcu(id, rcu_head);
}
EXPORT_SYMBOL_GPL(free_css_id);
error = -ENOMEM;
goto err_out;
}
- spin_lock(&ss->id_lock);
+ write_lock(&ss->id_lock);
/* Don't use 0. allocates an ID of 1-65535 */
error = idr_get_new_above(&ss->idr, newid, 1, &myid);
- spin_unlock(&ss->id_lock);
+ write_unlock(&ss->id_lock);
/* Returns error when there are no free spaces for new ID.*/
if (error) {
return newid;
remove_idr:
error = -ENOSPC;
- spin_lock(&ss->id_lock);
+ write_lock(&ss->id_lock);
idr_remove(&ss->idr, myid);
- spin_unlock(&ss->id_lock);
+ write_unlock(&ss->id_lock);
err_out:
kfree(newid);
return ERR_PTR(error);
{
struct css_id *newid;
- spin_lock_init(&ss->id_lock);
+ rwlock_init(&ss->id_lock);
idr_init(&ss->idr);
newid = get_new_cssid(ss, 0);
* scan next entry from bitmap(tree), tmpid is updated after
* idr_get_next().
*/
- spin_lock(&ss->id_lock);
+ read_lock(&ss->id_lock);
tmp = idr_get_next(&ss->idr, &tmpid);
- spin_unlock(&ss->id_lock);
+ read_unlock(&ss->id_lock);
if (!tmp)
break;