4 * Processor and Memory placement constraints for sets of tasks.
6 * Copyright (C) 2003 BULL SA.
7 * Copyright (C) 2004 Silicon Graphics, Inc.
9 * Portions derived from Patrick Mochel's sysfs code.
10 * sysfs is Copyright (c) 2001-3 Patrick Mochel
11 * Portions Copyright (c) 2004 Silicon Graphics, Inc.
13 * 2003-10-10 Written by Simon Derr <simon.derr@bull.net>
14 * 2003-10-22 Updates by Stephen Hemminger.
15 * 2004 May-July Rework by Paul Jackson <pj@sgi.com>
17 * This file is subject to the terms and conditions of the GNU General Public
18 * License. See the file COPYING in the main directory of the Linux
19 * distribution for more details.
22 #include <linux/config.h>
23 #include <linux/cpu.h>
24 #include <linux/cpumask.h>
25 #include <linux/cpuset.h>
26 #include <linux/err.h>
27 #include <linux/errno.h>
28 #include <linux/file.h>
30 #include <linux/init.h>
31 #include <linux/interrupt.h>
32 #include <linux/kernel.h>
33 #include <linux/kmod.h>
34 #include <linux/list.h>
35 #include <linux/mempolicy.h>
37 #include <linux/module.h>
38 #include <linux/mount.h>
39 #include <linux/namei.h>
40 #include <linux/pagemap.h>
41 #include <linux/proc_fs.h>
42 #include <linux/sched.h>
43 #include <linux/seq_file.h>
44 #include <linux/slab.h>
45 #include <linux/smp_lock.h>
46 #include <linux/spinlock.h>
47 #include <linux/stat.h>
48 #include <linux/string.h>
49 #include <linux/time.h>
50 #include <linux/backing-dev.h>
51 #include <linux/sort.h>
53 #include <asm/uaccess.h>
54 #include <asm/atomic.h>
55 #include <asm/semaphore.h>
57 #define CPUSET_SUPER_MAGIC 0x27e0eb
60 unsigned long flags; /* "unsigned long" so bitops work */
61 cpumask_t cpus_allowed; /* CPUs allowed to tasks in cpuset */
62 nodemask_t mems_allowed; /* Memory Nodes allowed to tasks */
65 * Count is atomic so can incr (fork) or decr (exit) without a lock.
67 atomic_t count; /* count tasks using this cpuset */
70 * We link our 'sibling' struct into our parents 'children'.
71 * Our children link their 'sibling' into our 'children'.
73 struct list_head sibling; /* my parents children */
74 struct list_head children; /* my children */
76 struct cpuset *parent; /* my parent */
77 struct dentry *dentry; /* cpuset fs entry */
80 * Copy of global cpuset_mems_generation as of the most
81 * recent time this cpuset changed its mems_allowed.
86 /* bits in struct cpuset flags field */
94 /* convenient tests for these bits */
95 static inline int is_cpu_exclusive(const struct cpuset *cs)
97 return !!test_bit(CS_CPU_EXCLUSIVE, &cs->flags);
100 static inline int is_mem_exclusive(const struct cpuset *cs)
102 return !!test_bit(CS_MEM_EXCLUSIVE, &cs->flags);
105 static inline int is_removed(const struct cpuset *cs)
107 return !!test_bit(CS_REMOVED, &cs->flags);
110 static inline int notify_on_release(const struct cpuset *cs)
112 return !!test_bit(CS_NOTIFY_ON_RELEASE, &cs->flags);
116 * Increment this atomic integer everytime any cpuset changes its
117 * mems_allowed value. Users of cpusets can track this generation
118 * number, and avoid having to lock and reload mems_allowed unless
119 * the cpuset they're using changes generation.
121 * A single, global generation is needed because attach_task() could
122 * reattach a task to a different cpuset, which must not have its
123 * generation numbers aliased with those of that tasks previous cpuset.
125 * Generations are needed for mems_allowed because one task cannot
126 * modify anothers memory placement. So we must enable every task,
127 * on every visit to __alloc_pages(), to efficiently check whether
128 * its current->cpuset->mems_allowed has changed, requiring an update
129 * of its current->mems_allowed.
131 static atomic_t cpuset_mems_generation = ATOMIC_INIT(1);
133 static struct cpuset top_cpuset = {
134 .flags = ((1 << CS_CPU_EXCLUSIVE) | (1 << CS_MEM_EXCLUSIVE)),
135 .cpus_allowed = CPU_MASK_ALL,
136 .mems_allowed = NODE_MASK_ALL,
137 .count = ATOMIC_INIT(0),
138 .sibling = LIST_HEAD_INIT(top_cpuset.sibling),
139 .children = LIST_HEAD_INIT(top_cpuset.children),
142 .mems_generation = 0,
145 static struct vfsmount *cpuset_mount;
146 static struct super_block *cpuset_sb = NULL;
149 * We have two global cpuset semaphores below. They can nest.
150 * It is ok to first take manage_sem, then nest callback_sem. We also
151 * require taking task_lock() when dereferencing a tasks cpuset pointer.
152 * See "The task_lock() exception", at the end of this comment.
154 * A task must hold both semaphores to modify cpusets. If a task
155 * holds manage_sem, then it blocks others wanting that semaphore,
156 * ensuring that it is the only task able to also acquire callback_sem
157 * and be able to modify cpusets. It can perform various checks on
158 * the cpuset structure first, knowing nothing will change. It can
159 * also allocate memory while just holding manage_sem. While it is
160 * performing these checks, various callback routines can briefly
161 * acquire callback_sem to query cpusets. Once it is ready to make
162 * the changes, it takes callback_sem, blocking everyone else.
164 * Calls to the kernel memory allocator can not be made while holding
165 * callback_sem, as that would risk double tripping on callback_sem
166 * from one of the callbacks into the cpuset code from within
169 * If a task is only holding callback_sem, then it has read-only
172 * The task_struct fields mems_allowed and mems_generation may only
173 * be accessed in the context of that task, so require no locks.
175 * Any task can increment and decrement the count field without lock.
176 * So in general, code holding manage_sem or callback_sem can't rely
177 * on the count field not changing. However, if the count goes to
178 * zero, then only attach_task(), which holds both semaphores, can
179 * increment it again. Because a count of zero means that no tasks
180 * are currently attached, therefore there is no way a task attached
181 * to that cpuset can fork (the other way to increment the count).
182 * So code holding manage_sem or callback_sem can safely assume that
183 * if the count is zero, it will stay zero. Similarly, if a task
184 * holds manage_sem or callback_sem on a cpuset with zero count, it
185 * knows that the cpuset won't be removed, as cpuset_rmdir() needs
186 * both of those semaphores.
188 * A possible optimization to improve parallelism would be to make
189 * callback_sem a R/W semaphore (rwsem), allowing the callback routines
190 * to proceed in parallel, with read access, until the holder of
191 * manage_sem needed to take this rwsem for exclusive write access
192 * and modify some cpusets.
194 * The cpuset_common_file_write handler for operations that modify
195 * the cpuset hierarchy holds manage_sem across the entire operation,
196 * single threading all such cpuset modifications across the system.
198 * The cpuset_common_file_read() handlers only hold callback_sem across
199 * small pieces of code, such as when reading out possibly multi-word
200 * cpumasks and nodemasks.
202 * The fork and exit callbacks cpuset_fork() and cpuset_exit(), don't
203 * (usually) take either semaphore. These are the two most performance
204 * critical pieces of code here. The exception occurs on cpuset_exit(),
205 * when a task in a notify_on_release cpuset exits. Then manage_sem
206 * is taken, and if the cpuset count is zero, a usermode call made
207 * to /sbin/cpuset_release_agent with the name of the cpuset (path
208 * relative to the root of cpuset file system) as the argument.
210 * A cpuset can only be deleted if both its 'count' of using tasks
211 * is zero, and its list of 'children' cpusets is empty. Since all
212 * tasks in the system use _some_ cpuset, and since there is always at
213 * least one task in the system (init, pid == 1), therefore, top_cpuset
214 * always has either children cpusets and/or using tasks. So we don't
215 * need a special hack to ensure that top_cpuset cannot be deleted.
217 * The above "Tale of Two Semaphores" would be complete, but for:
219 * The task_lock() exception
221 * The need for this exception arises from the action of attach_task(),
222 * which overwrites one tasks cpuset pointer with another. It does
223 * so using both semaphores, however there are several performance
224 * critical places that need to reference task->cpuset without the
225 * expense of grabbing a system global semaphore. Therefore except as
226 * noted below, when dereferencing or, as in attach_task(), modifying
227 * a tasks cpuset pointer we use task_lock(), which acts on a spinlock
228 * (task->alloc_lock) already in the task_struct routinely used for
232 static DECLARE_MUTEX(manage_sem);
233 static DECLARE_MUTEX(callback_sem);
236 * A couple of forward declarations required, due to cyclic reference loop:
237 * cpuset_mkdir -> cpuset_create -> cpuset_populate_dir -> cpuset_add_file
238 * -> cpuset_create_file -> cpuset_dir_inode_operations -> cpuset_mkdir.
241 static int cpuset_mkdir(struct inode *dir, struct dentry *dentry, int mode);
242 static int cpuset_rmdir(struct inode *unused_dir, struct dentry *dentry);
244 static struct backing_dev_info cpuset_backing_dev_info = {
245 .ra_pages = 0, /* No readahead */
246 .capabilities = BDI_CAP_NO_ACCT_DIRTY | BDI_CAP_NO_WRITEBACK,
249 static struct inode *cpuset_new_inode(mode_t mode)
251 struct inode *inode = new_inode(cpuset_sb);
254 inode->i_mode = mode;
255 inode->i_uid = current->fsuid;
256 inode->i_gid = current->fsgid;
257 inode->i_blksize = PAGE_CACHE_SIZE;
259 inode->i_atime = inode->i_mtime = inode->i_ctime = CURRENT_TIME;
260 inode->i_mapping->backing_dev_info = &cpuset_backing_dev_info;
265 static void cpuset_diput(struct dentry *dentry, struct inode *inode)
267 /* is dentry a directory ? if so, kfree() associated cpuset */
268 if (S_ISDIR(inode->i_mode)) {
269 struct cpuset *cs = dentry->d_fsdata;
270 BUG_ON(!(is_removed(cs)));
276 static struct dentry_operations cpuset_dops = {
277 .d_iput = cpuset_diput,
280 static struct dentry *cpuset_get_dentry(struct dentry *parent, const char *name)
282 struct dentry *d = lookup_one_len(name, parent, strlen(name));
284 d->d_op = &cpuset_dops;
288 static void remove_dir(struct dentry *d)
290 struct dentry *parent = dget(d->d_parent);
293 simple_rmdir(parent->d_inode, d);
298 * NOTE : the dentry must have been dget()'ed
300 static void cpuset_d_remove_dir(struct dentry *dentry)
302 struct list_head *node;
304 spin_lock(&dcache_lock);
305 node = dentry->d_subdirs.next;
306 while (node != &dentry->d_subdirs) {
307 struct dentry *d = list_entry(node, struct dentry, d_child);
311 spin_unlock(&dcache_lock);
313 simple_unlink(dentry->d_inode, d);
315 spin_lock(&dcache_lock);
317 node = dentry->d_subdirs.next;
319 list_del_init(&dentry->d_child);
320 spin_unlock(&dcache_lock);
324 static struct super_operations cpuset_ops = {
325 .statfs = simple_statfs,
326 .drop_inode = generic_delete_inode,
329 static int cpuset_fill_super(struct super_block *sb, void *unused_data,
335 sb->s_blocksize = PAGE_CACHE_SIZE;
336 sb->s_blocksize_bits = PAGE_CACHE_SHIFT;
337 sb->s_magic = CPUSET_SUPER_MAGIC;
338 sb->s_op = &cpuset_ops;
341 inode = cpuset_new_inode(S_IFDIR | S_IRUGO | S_IXUGO | S_IWUSR);
343 inode->i_op = &simple_dir_inode_operations;
344 inode->i_fop = &simple_dir_operations;
345 /* directories start off with i_nlink == 2 (for "." entry) */
351 root = d_alloc_root(inode);
360 static struct super_block *cpuset_get_sb(struct file_system_type *fs_type,
361 int flags, const char *unused_dev_name,
364 return get_sb_single(fs_type, flags, data, cpuset_fill_super);
367 static struct file_system_type cpuset_fs_type = {
369 .get_sb = cpuset_get_sb,
370 .kill_sb = kill_litter_super,
375 * The files in the cpuset filesystem mostly have a very simple read/write
376 * handling, some common function will take care of it. Nevertheless some cases
377 * (read tasks) are special and therefore I define this structure for every
381 * When reading/writing to a file:
382 * - the cpuset to use in file->f_dentry->d_parent->d_fsdata
383 * - the 'cftype' of the file is file->f_dentry->d_fsdata
389 int (*open) (struct inode *inode, struct file *file);
390 ssize_t (*read) (struct file *file, char __user *buf, size_t nbytes,
392 int (*write) (struct file *file, const char __user *buf, size_t nbytes,
394 int (*release) (struct inode *inode, struct file *file);
397 static inline struct cpuset *__d_cs(struct dentry *dentry)
399 return dentry->d_fsdata;
402 static inline struct cftype *__d_cft(struct dentry *dentry)
404 return dentry->d_fsdata;
408 * Call with manage_sem held. Writes path of cpuset into buf.
409 * Returns 0 on success, -errno on error.
412 static int cpuset_path(const struct cpuset *cs, char *buf, int buflen)
416 start = buf + buflen;
420 int len = cs->dentry->d_name.len;
421 if ((start -= len) < buf)
422 return -ENAMETOOLONG;
423 memcpy(start, cs->dentry->d_name.name, len);
430 return -ENAMETOOLONG;
433 memmove(buf, start, buf + buflen - start);
438 * Notify userspace when a cpuset is released, by running
439 * /sbin/cpuset_release_agent with the name of the cpuset (path
440 * relative to the root of cpuset file system) as the argument.
442 * Most likely, this user command will try to rmdir this cpuset.
444 * This races with the possibility that some other task will be
445 * attached to this cpuset before it is removed, or that some other
446 * user task will 'mkdir' a child cpuset of this cpuset. That's ok.
447 * The presumed 'rmdir' will fail quietly if this cpuset is no longer
448 * unused, and this cpuset will be reprieved from its death sentence,
449 * to continue to serve a useful existence. Next time it's released,
450 * we will get notified again, if it still has 'notify_on_release' set.
452 * The final arg to call_usermodehelper() is 0, which means don't
453 * wait. The separate /sbin/cpuset_release_agent task is forked by
454 * call_usermodehelper(), then control in this thread returns here,
455 * without waiting for the release agent task. We don't bother to
456 * wait because the caller of this routine has no use for the exit
457 * status of the /sbin/cpuset_release_agent task, so no sense holding
458 * our caller up for that.
460 * When we had only one cpuset semaphore, we had to call this
461 * without holding it, to avoid deadlock when call_usermodehelper()
462 * allocated memory. With two locks, we could now call this while
463 * holding manage_sem, but we still don't, so as to minimize
464 * the time manage_sem is held.
467 static void cpuset_release_agent(const char *pathbuf)
469 char *argv[3], *envp[3];
476 argv[i++] = "/sbin/cpuset_release_agent";
477 argv[i++] = (char *)pathbuf;
481 /* minimal command environment */
482 envp[i++] = "HOME=/";
483 envp[i++] = "PATH=/sbin:/bin:/usr/sbin:/usr/bin";
486 call_usermodehelper(argv[0], argv, envp, 0);
491 * Either cs->count of using tasks transitioned to zero, or the
492 * cs->children list of child cpusets just became empty. If this
493 * cs is notify_on_release() and now both the user count is zero and
494 * the list of children is empty, prepare cpuset path in a kmalloc'd
495 * buffer, to be returned via ppathbuf, so that the caller can invoke
496 * cpuset_release_agent() with it later on, once manage_sem is dropped.
497 * Call here with manage_sem held.
499 * This check_for_release() routine is responsible for kmalloc'ing
500 * pathbuf. The above cpuset_release_agent() is responsible for
501 * kfree'ing pathbuf. The caller of these routines is responsible
502 * for providing a pathbuf pointer, initialized to NULL, then
503 * calling check_for_release() with manage_sem held and the address
504 * of the pathbuf pointer, then dropping manage_sem, then calling
505 * cpuset_release_agent() with pathbuf, as set by check_for_release().
508 static void check_for_release(struct cpuset *cs, char **ppathbuf)
510 if (notify_on_release(cs) && atomic_read(&cs->count) == 0 &&
511 list_empty(&cs->children)) {
514 buf = kmalloc(PAGE_SIZE, GFP_KERNEL);
517 if (cpuset_path(cs, buf, PAGE_SIZE) < 0)
525 * Return in *pmask the portion of a cpusets's cpus_allowed that
526 * are online. If none are online, walk up the cpuset hierarchy
527 * until we find one that does have some online cpus. If we get
528 * all the way to the top and still haven't found any online cpus,
529 * return cpu_online_map. Or if passed a NULL cs from an exit'ing
530 * task, return cpu_online_map.
532 * One way or another, we guarantee to return some non-empty subset
535 * Call with callback_sem held.
538 static void guarantee_online_cpus(const struct cpuset *cs, cpumask_t *pmask)
540 while (cs && !cpus_intersects(cs->cpus_allowed, cpu_online_map))
543 cpus_and(*pmask, cs->cpus_allowed, cpu_online_map);
545 *pmask = cpu_online_map;
546 BUG_ON(!cpus_intersects(*pmask, cpu_online_map));
550 * Return in *pmask the portion of a cpusets's mems_allowed that
551 * are online. If none are online, walk up the cpuset hierarchy
552 * until we find one that does have some online mems. If we get
553 * all the way to the top and still haven't found any online mems,
554 * return node_online_map.
556 * One way or another, we guarantee to return some non-empty subset
557 * of node_online_map.
559 * Call with callback_sem held.
562 static void guarantee_online_mems(const struct cpuset *cs, nodemask_t *pmask)
564 while (cs && !nodes_intersects(cs->mems_allowed, node_online_map))
567 nodes_and(*pmask, cs->mems_allowed, node_online_map);
569 *pmask = node_online_map;
570 BUG_ON(!nodes_intersects(*pmask, node_online_map));
574 * Refresh current tasks mems_allowed and mems_generation from current
577 * Call without callback_sem or task_lock() held. May be called with
578 * or without manage_sem held. Will acquire task_lock() and might
579 * acquire callback_sem during call.
581 * The task_lock() is required to dereference current->cpuset safely.
582 * Without it, we could pick up the pointer value of current->cpuset
583 * in one instruction, and then attach_task could give us a different
584 * cpuset, and then the cpuset we had could be removed and freed,
585 * and then on our next instruction, we could dereference a no longer
586 * valid cpuset pointer to get its mems_generation field.
588 * This routine is needed to update the per-task mems_allowed data,
589 * within the tasks context, when it is trying to allocate memory
590 * (in various mm/mempolicy.c routines) and notices that some other
591 * task has been modifying its cpuset.
594 static void refresh_mems(void)
596 int my_cpusets_mem_gen;
599 my_cpusets_mem_gen = current->cpuset->mems_generation;
600 task_unlock(current);
602 if (current->cpuset_mems_generation != my_cpusets_mem_gen) {
604 nodemask_t oldmem = current->mems_allowed;
608 cs = current->cpuset;
609 guarantee_online_mems(cs, ¤t->mems_allowed);
610 current->cpuset_mems_generation = cs->mems_generation;
611 task_unlock(current);
613 if (!nodes_equal(oldmem, current->mems_allowed))
614 numa_policy_rebind(&oldmem, ¤t->mems_allowed);
619 * is_cpuset_subset(p, q) - Is cpuset p a subset of cpuset q?
621 * One cpuset is a subset of another if all its allowed CPUs and
622 * Memory Nodes are a subset of the other, and its exclusive flags
623 * are only set if the other's are set. Call holding manage_sem.
626 static int is_cpuset_subset(const struct cpuset *p, const struct cpuset *q)
628 return cpus_subset(p->cpus_allowed, q->cpus_allowed) &&
629 nodes_subset(p->mems_allowed, q->mems_allowed) &&
630 is_cpu_exclusive(p) <= is_cpu_exclusive(q) &&
631 is_mem_exclusive(p) <= is_mem_exclusive(q);
635 * validate_change() - Used to validate that any proposed cpuset change
636 * follows the structural rules for cpusets.
638 * If we replaced the flag and mask values of the current cpuset
639 * (cur) with those values in the trial cpuset (trial), would
640 * our various subset and exclusive rules still be valid? Presumes
643 * 'cur' is the address of an actual, in-use cpuset. Operations
644 * such as list traversal that depend on the actual address of the
645 * cpuset in the list must use cur below, not trial.
647 * 'trial' is the address of bulk structure copy of cur, with
648 * perhaps one or more of the fields cpus_allowed, mems_allowed,
649 * or flags changed to new, trial values.
651 * Return 0 if valid, -errno if not.
654 static int validate_change(const struct cpuset *cur, const struct cpuset *trial)
656 struct cpuset *c, *par;
658 /* Each of our child cpusets must be a subset of us */
659 list_for_each_entry(c, &cur->children, sibling) {
660 if (!is_cpuset_subset(c, trial))
664 /* Remaining checks don't apply to root cpuset */
665 if ((par = cur->parent) == NULL)
668 /* We must be a subset of our parent cpuset */
669 if (!is_cpuset_subset(trial, par))
672 /* If either I or some sibling (!= me) is exclusive, we can't overlap */
673 list_for_each_entry(c, &par->children, sibling) {
674 if ((is_cpu_exclusive(trial) || is_cpu_exclusive(c)) &&
676 cpus_intersects(trial->cpus_allowed, c->cpus_allowed))
678 if ((is_mem_exclusive(trial) || is_mem_exclusive(c)) &&
680 nodes_intersects(trial->mems_allowed, c->mems_allowed))
688 * For a given cpuset cur, partition the system as follows
689 * a. All cpus in the parent cpuset's cpus_allowed that are not part of any
690 * exclusive child cpusets
691 * b. All cpus in the current cpuset's cpus_allowed that are not part of any
692 * exclusive child cpusets
693 * Build these two partitions by calling partition_sched_domains
695 * Call with manage_sem held. May nest a call to the
696 * lock_cpu_hotplug()/unlock_cpu_hotplug() pair.
699 static void update_cpu_domains(struct cpuset *cur)
701 struct cpuset *c, *par = cur->parent;
702 cpumask_t pspan, cspan;
704 if (par == NULL || cpus_empty(cur->cpus_allowed))
708 * Get all cpus from parent's cpus_allowed not part of exclusive
711 pspan = par->cpus_allowed;
712 list_for_each_entry(c, &par->children, sibling) {
713 if (is_cpu_exclusive(c))
714 cpus_andnot(pspan, pspan, c->cpus_allowed);
716 if (is_removed(cur) || !is_cpu_exclusive(cur)) {
717 cpus_or(pspan, pspan, cur->cpus_allowed);
718 if (cpus_equal(pspan, cur->cpus_allowed))
720 cspan = CPU_MASK_NONE;
722 if (cpus_empty(pspan))
724 cspan = cur->cpus_allowed;
726 * Get all cpus from current cpuset's cpus_allowed not part
727 * of exclusive children
729 list_for_each_entry(c, &cur->children, sibling) {
730 if (is_cpu_exclusive(c))
731 cpus_andnot(cspan, cspan, c->cpus_allowed);
736 partition_sched_domains(&pspan, &cspan);
737 unlock_cpu_hotplug();
741 * Call with manage_sem held. May take callback_sem during call.
744 static int update_cpumask(struct cpuset *cs, char *buf)
746 struct cpuset trialcs;
747 int retval, cpus_unchanged;
750 retval = cpulist_parse(buf, trialcs.cpus_allowed);
753 cpus_and(trialcs.cpus_allowed, trialcs.cpus_allowed, cpu_online_map);
754 if (cpus_empty(trialcs.cpus_allowed))
756 retval = validate_change(cs, &trialcs);
759 cpus_unchanged = cpus_equal(cs->cpus_allowed, trialcs.cpus_allowed);
761 cs->cpus_allowed = trialcs.cpus_allowed;
763 if (is_cpu_exclusive(cs) && !cpus_unchanged)
764 update_cpu_domains(cs);
769 * Call with manage_sem held. May take callback_sem during call.
772 static int update_nodemask(struct cpuset *cs, char *buf)
774 struct cpuset trialcs;
778 retval = nodelist_parse(buf, trialcs.mems_allowed);
781 nodes_and(trialcs.mems_allowed, trialcs.mems_allowed, node_online_map);
782 if (nodes_empty(trialcs.mems_allowed))
784 retval = validate_change(cs, &trialcs);
787 cs->mems_allowed = trialcs.mems_allowed;
788 atomic_inc(&cpuset_mems_generation);
789 cs->mems_generation = atomic_read(&cpuset_mems_generation);
796 * update_flag - read a 0 or a 1 in a file and update associated flag
797 * bit: the bit to update (CS_CPU_EXCLUSIVE, CS_MEM_EXCLUSIVE,
798 * CS_NOTIFY_ON_RELEASE)
799 * cs: the cpuset to update
800 * buf: the buffer where we read the 0 or 1
802 * Call with manage_sem held.
805 static int update_flag(cpuset_flagbits_t bit, struct cpuset *cs, char *buf)
808 struct cpuset trialcs;
809 int err, cpu_exclusive_changed;
811 turning_on = (simple_strtoul(buf, NULL, 10) != 0);
815 set_bit(bit, &trialcs.flags);
817 clear_bit(bit, &trialcs.flags);
819 err = validate_change(cs, &trialcs);
822 cpu_exclusive_changed =
823 (is_cpu_exclusive(cs) != is_cpu_exclusive(&trialcs));
826 set_bit(bit, &cs->flags);
828 clear_bit(bit, &cs->flags);
831 if (cpu_exclusive_changed)
832 update_cpu_domains(cs);
837 * Attack task specified by pid in 'pidbuf' to cpuset 'cs', possibly
838 * writing the path of the old cpuset in 'ppathbuf' if it needs to be
839 * notified on release.
841 * Call holding manage_sem. May take callback_sem and task_lock of
842 * the task 'pid' during call.
845 static int attach_task(struct cpuset *cs, char *pidbuf, char **ppathbuf)
848 struct task_struct *tsk;
849 struct cpuset *oldcs;
852 if (sscanf(pidbuf, "%d", &pid) != 1)
854 if (cpus_empty(cs->cpus_allowed) || nodes_empty(cs->mems_allowed))
858 read_lock(&tasklist_lock);
860 tsk = find_task_by_pid(pid);
861 if (!tsk || tsk->flags & PF_EXITING) {
862 read_unlock(&tasklist_lock);
866 get_task_struct(tsk);
867 read_unlock(&tasklist_lock);
869 if ((current->euid) && (current->euid != tsk->uid)
870 && (current->euid != tsk->suid)) {
871 put_task_struct(tsk);
876 get_task_struct(tsk);
886 put_task_struct(tsk);
889 atomic_inc(&cs->count);
893 guarantee_online_cpus(cs, &cpus);
894 set_cpus_allowed(tsk, cpus);
897 put_task_struct(tsk);
898 if (atomic_dec_and_test(&oldcs->count))
899 check_for_release(oldcs, ppathbuf);
903 /* The various types of files and directories in a cpuset file system */
912 FILE_NOTIFY_ON_RELEASE,
916 static ssize_t cpuset_common_file_write(struct file *file, const char __user *userbuf,
917 size_t nbytes, loff_t *unused_ppos)
919 struct cpuset *cs = __d_cs(file->f_dentry->d_parent);
920 struct cftype *cft = __d_cft(file->f_dentry);
921 cpuset_filetype_t type = cft->private;
923 char *pathbuf = NULL;
926 /* Crude upper limit on largest legitimate cpulist user might write. */
927 if (nbytes > 100 + 6 * NR_CPUS)
930 /* +1 for nul-terminator */
931 if ((buffer = kmalloc(nbytes + 1, GFP_KERNEL)) == 0)
934 if (copy_from_user(buffer, userbuf, nbytes)) {
938 buffer[nbytes] = 0; /* nul-terminate */
942 if (is_removed(cs)) {
949 retval = update_cpumask(cs, buffer);
952 retval = update_nodemask(cs, buffer);
954 case FILE_CPU_EXCLUSIVE:
955 retval = update_flag(CS_CPU_EXCLUSIVE, cs, buffer);
957 case FILE_MEM_EXCLUSIVE:
958 retval = update_flag(CS_MEM_EXCLUSIVE, cs, buffer);
960 case FILE_NOTIFY_ON_RELEASE:
961 retval = update_flag(CS_NOTIFY_ON_RELEASE, cs, buffer);
964 retval = attach_task(cs, buffer, &pathbuf);
975 cpuset_release_agent(pathbuf);
981 static ssize_t cpuset_file_write(struct file *file, const char __user *buf,
982 size_t nbytes, loff_t *ppos)
985 struct cftype *cft = __d_cft(file->f_dentry);
989 /* special function ? */
991 retval = cft->write(file, buf, nbytes, ppos);
993 retval = cpuset_common_file_write(file, buf, nbytes, ppos);
999 * These ascii lists should be read in a single call, by using a user
1000 * buffer large enough to hold the entire map. If read in smaller
1001 * chunks, there is no guarantee of atomicity. Since the display format
1002 * used, list of ranges of sequential numbers, is variable length,
1003 * and since these maps can change value dynamically, one could read
1004 * gibberish by doing partial reads while a list was changing.
1005 * A single large read to a buffer that crosses a page boundary is
1006 * ok, because the result being copied to user land is not recomputed
1007 * across a page fault.
1010 static int cpuset_sprintf_cpulist(char *page, struct cpuset *cs)
1014 down(&callback_sem);
1015 mask = cs->cpus_allowed;
1018 return cpulist_scnprintf(page, PAGE_SIZE, mask);
1021 static int cpuset_sprintf_memlist(char *page, struct cpuset *cs)
1025 down(&callback_sem);
1026 mask = cs->mems_allowed;
1029 return nodelist_scnprintf(page, PAGE_SIZE, mask);
1032 static ssize_t cpuset_common_file_read(struct file *file, char __user *buf,
1033 size_t nbytes, loff_t *ppos)
1035 struct cftype *cft = __d_cft(file->f_dentry);
1036 struct cpuset *cs = __d_cs(file->f_dentry->d_parent);
1037 cpuset_filetype_t type = cft->private;
1042 if (!(page = (char *)__get_free_page(GFP_KERNEL)))
1049 s += cpuset_sprintf_cpulist(s, cs);
1052 s += cpuset_sprintf_memlist(s, cs);
1054 case FILE_CPU_EXCLUSIVE:
1055 *s++ = is_cpu_exclusive(cs) ? '1' : '0';
1057 case FILE_MEM_EXCLUSIVE:
1058 *s++ = is_mem_exclusive(cs) ? '1' : '0';
1060 case FILE_NOTIFY_ON_RELEASE:
1061 *s++ = notify_on_release(cs) ? '1' : '0';
1069 retval = simple_read_from_buffer(buf, nbytes, ppos, page, s - page);
1071 free_page((unsigned long)page);
1075 static ssize_t cpuset_file_read(struct file *file, char __user *buf, size_t nbytes,
1079 struct cftype *cft = __d_cft(file->f_dentry);
1083 /* special function ? */
1085 retval = cft->read(file, buf, nbytes, ppos);
1087 retval = cpuset_common_file_read(file, buf, nbytes, ppos);
1092 static int cpuset_file_open(struct inode *inode, struct file *file)
1097 err = generic_file_open(inode, file);
1101 cft = __d_cft(file->f_dentry);
1105 err = cft->open(inode, file);
1112 static int cpuset_file_release(struct inode *inode, struct file *file)
1114 struct cftype *cft = __d_cft(file->f_dentry);
1116 return cft->release(inode, file);
1121 * cpuset_rename - Only allow simple rename of directories in place.
1123 static int cpuset_rename(struct inode *old_dir, struct dentry *old_dentry,
1124 struct inode *new_dir, struct dentry *new_dentry)
1126 if (!S_ISDIR(old_dentry->d_inode->i_mode))
1128 if (new_dentry->d_inode)
1130 if (old_dir != new_dir)
1132 return simple_rename(old_dir, old_dentry, new_dir, new_dentry);
1135 static struct file_operations cpuset_file_operations = {
1136 .read = cpuset_file_read,
1137 .write = cpuset_file_write,
1138 .llseek = generic_file_llseek,
1139 .open = cpuset_file_open,
1140 .release = cpuset_file_release,
1143 static struct inode_operations cpuset_dir_inode_operations = {
1144 .lookup = simple_lookup,
1145 .mkdir = cpuset_mkdir,
1146 .rmdir = cpuset_rmdir,
1147 .rename = cpuset_rename,
1150 static int cpuset_create_file(struct dentry *dentry, int mode)
1152 struct inode *inode;
1156 if (dentry->d_inode)
1159 inode = cpuset_new_inode(mode);
1163 if (S_ISDIR(mode)) {
1164 inode->i_op = &cpuset_dir_inode_operations;
1165 inode->i_fop = &simple_dir_operations;
1167 /* start off with i_nlink == 2 (for "." entry) */
1169 } else if (S_ISREG(mode)) {
1171 inode->i_fop = &cpuset_file_operations;
1174 d_instantiate(dentry, inode);
1175 dget(dentry); /* Extra count - pin the dentry in core */
1180 * cpuset_create_dir - create a directory for an object.
1181 * cs: the cpuset we create the directory for.
1182 * It must have a valid ->parent field
1183 * And we are going to fill its ->dentry field.
1184 * name: The name to give to the cpuset directory. Will be copied.
1185 * mode: mode to set on new directory.
1188 static int cpuset_create_dir(struct cpuset *cs, const char *name, int mode)
1190 struct dentry *dentry = NULL;
1191 struct dentry *parent;
1194 parent = cs->parent->dentry;
1195 dentry = cpuset_get_dentry(parent, name);
1197 return PTR_ERR(dentry);
1198 error = cpuset_create_file(dentry, S_IFDIR | mode);
1200 dentry->d_fsdata = cs;
1201 parent->d_inode->i_nlink++;
1202 cs->dentry = dentry;
1209 static int cpuset_add_file(struct dentry *dir, const struct cftype *cft)
1211 struct dentry *dentry;
1214 down(&dir->d_inode->i_sem);
1215 dentry = cpuset_get_dentry(dir, cft->name);
1216 if (!IS_ERR(dentry)) {
1217 error = cpuset_create_file(dentry, 0644 | S_IFREG);
1219 dentry->d_fsdata = (void *)cft;
1222 error = PTR_ERR(dentry);
1223 up(&dir->d_inode->i_sem);
1228 * Stuff for reading the 'tasks' file.
1230 * Reading this file can return large amounts of data if a cpuset has
1231 * *lots* of attached tasks. So it may need several calls to read(),
1232 * but we cannot guarantee that the information we produce is correct
1233 * unless we produce it entirely atomically.
1235 * Upon tasks file open(), a struct ctr_struct is allocated, that
1236 * will have a pointer to an array (also allocated here). The struct
1237 * ctr_struct * is stored in file->private_data. Its resources will
1238 * be freed by release() when the file is closed. The array is used
1239 * to sprintf the PIDs and then used by read().
1242 /* cpusets_tasks_read array */
1250 * Load into 'pidarray' up to 'npids' of the tasks using cpuset 'cs'.
1251 * Return actual number of pids loaded. No need to task_lock(p)
1252 * when reading out p->cpuset, as we don't really care if it changes
1253 * on the next cycle, and we are not going to try to dereference it.
1255 static inline int pid_array_load(pid_t *pidarray, int npids, struct cpuset *cs)
1258 struct task_struct *g, *p;
1260 read_lock(&tasklist_lock);
1262 do_each_thread(g, p) {
1263 if (p->cpuset == cs) {
1264 pidarray[n++] = p->pid;
1265 if (unlikely(n == npids))
1268 } while_each_thread(g, p);
1271 read_unlock(&tasklist_lock);
1275 static int cmppid(const void *a, const void *b)
1277 return *(pid_t *)a - *(pid_t *)b;
1281 * Convert array 'a' of 'npids' pid_t's to a string of newline separated
1282 * decimal pids in 'buf'. Don't write more than 'sz' chars, but return
1283 * count 'cnt' of how many chars would be written if buf were large enough.
1285 static int pid_array_to_buf(char *buf, int sz, pid_t *a, int npids)
1290 for (i = 0; i < npids; i++)
1291 cnt += snprintf(buf + cnt, max(sz - cnt, 0), "%d\n", a[i]);
1296 * Handle an open on 'tasks' file. Prepare a buffer listing the
1297 * process id's of tasks currently attached to the cpuset being opened.
1299 * Does not require any specific cpuset semaphores, and does not take any.
1301 static int cpuset_tasks_open(struct inode *unused, struct file *file)
1303 struct cpuset *cs = __d_cs(file->f_dentry->d_parent);
1304 struct ctr_struct *ctr;
1309 if (!(file->f_mode & FMODE_READ))
1312 ctr = kmalloc(sizeof(*ctr), GFP_KERNEL);
1317 * If cpuset gets more users after we read count, we won't have
1318 * enough space - tough. This race is indistinguishable to the
1319 * caller from the case that the additional cpuset users didn't
1320 * show up until sometime later on.
1322 npids = atomic_read(&cs->count);
1323 pidarray = kmalloc(npids * sizeof(pid_t), GFP_KERNEL);
1327 npids = pid_array_load(pidarray, npids, cs);
1328 sort(pidarray, npids, sizeof(pid_t), cmppid, NULL);
1330 /* Call pid_array_to_buf() twice, first just to get bufsz */
1331 ctr->bufsz = pid_array_to_buf(&c, sizeof(c), pidarray, npids) + 1;
1332 ctr->buf = kmalloc(ctr->bufsz, GFP_KERNEL);
1335 ctr->bufsz = pid_array_to_buf(ctr->buf, ctr->bufsz, pidarray, npids);
1338 file->private_data = ctr;
1349 static ssize_t cpuset_tasks_read(struct file *file, char __user *buf,
1350 size_t nbytes, loff_t *ppos)
1352 struct ctr_struct *ctr = file->private_data;
1354 if (*ppos + nbytes > ctr->bufsz)
1355 nbytes = ctr->bufsz - *ppos;
1356 if (copy_to_user(buf, ctr->buf + *ppos, nbytes))
1362 static int cpuset_tasks_release(struct inode *unused_inode, struct file *file)
1364 struct ctr_struct *ctr;
1366 if (file->f_mode & FMODE_READ) {
1367 ctr = file->private_data;
1375 * for the common functions, 'private' gives the type of file
1378 static struct cftype cft_tasks = {
1380 .open = cpuset_tasks_open,
1381 .read = cpuset_tasks_read,
1382 .release = cpuset_tasks_release,
1383 .private = FILE_TASKLIST,
1386 static struct cftype cft_cpus = {
1388 .private = FILE_CPULIST,
1391 static struct cftype cft_mems = {
1393 .private = FILE_MEMLIST,
1396 static struct cftype cft_cpu_exclusive = {
1397 .name = "cpu_exclusive",
1398 .private = FILE_CPU_EXCLUSIVE,
1401 static struct cftype cft_mem_exclusive = {
1402 .name = "mem_exclusive",
1403 .private = FILE_MEM_EXCLUSIVE,
1406 static struct cftype cft_notify_on_release = {
1407 .name = "notify_on_release",
1408 .private = FILE_NOTIFY_ON_RELEASE,
1411 static int cpuset_populate_dir(struct dentry *cs_dentry)
1415 if ((err = cpuset_add_file(cs_dentry, &cft_cpus)) < 0)
1417 if ((err = cpuset_add_file(cs_dentry, &cft_mems)) < 0)
1419 if ((err = cpuset_add_file(cs_dentry, &cft_cpu_exclusive)) < 0)
1421 if ((err = cpuset_add_file(cs_dentry, &cft_mem_exclusive)) < 0)
1423 if ((err = cpuset_add_file(cs_dentry, &cft_notify_on_release)) < 0)
1425 if ((err = cpuset_add_file(cs_dentry, &cft_tasks)) < 0)
1431 * cpuset_create - create a cpuset
1432 * parent: cpuset that will be parent of the new cpuset.
1433 * name: name of the new cpuset. Will be strcpy'ed.
1434 * mode: mode to set on new inode
1436 * Must be called with the semaphore on the parent inode held
1439 static long cpuset_create(struct cpuset *parent, const char *name, int mode)
1444 cs = kmalloc(sizeof(*cs), GFP_KERNEL);
1451 if (notify_on_release(parent))
1452 set_bit(CS_NOTIFY_ON_RELEASE, &cs->flags);
1453 cs->cpus_allowed = CPU_MASK_NONE;
1454 cs->mems_allowed = NODE_MASK_NONE;
1455 atomic_set(&cs->count, 0);
1456 INIT_LIST_HEAD(&cs->sibling);
1457 INIT_LIST_HEAD(&cs->children);
1458 atomic_inc(&cpuset_mems_generation);
1459 cs->mems_generation = atomic_read(&cpuset_mems_generation);
1461 cs->parent = parent;
1463 down(&callback_sem);
1464 list_add(&cs->sibling, &cs->parent->children);
1467 err = cpuset_create_dir(cs, name, mode);
1472 * Release manage_sem before cpuset_populate_dir() because it
1473 * will down() this new directory's i_sem and if we race with
1474 * another mkdir, we might deadlock.
1478 err = cpuset_populate_dir(cs->dentry);
1479 /* If err < 0, we have a half-filled directory - oh well ;) */
1482 list_del(&cs->sibling);
1488 static int cpuset_mkdir(struct inode *dir, struct dentry *dentry, int mode)
1490 struct cpuset *c_parent = dentry->d_parent->d_fsdata;
1492 /* the vfs holds inode->i_sem already */
1493 return cpuset_create(c_parent, dentry->d_name.name, mode | S_IFDIR);
1496 static int cpuset_rmdir(struct inode *unused_dir, struct dentry *dentry)
1498 struct cpuset *cs = dentry->d_fsdata;
1500 struct cpuset *parent;
1501 char *pathbuf = NULL;
1503 /* the vfs holds both inode->i_sem already */
1507 if (atomic_read(&cs->count) > 0) {
1511 if (!list_empty(&cs->children)) {
1515 parent = cs->parent;
1516 down(&callback_sem);
1517 set_bit(CS_REMOVED, &cs->flags);
1518 if (is_cpu_exclusive(cs))
1519 update_cpu_domains(cs);
1520 list_del(&cs->sibling); /* delete my sibling from parent->children */
1521 spin_lock(&cs->dentry->d_lock);
1522 d = dget(cs->dentry);
1524 spin_unlock(&d->d_lock);
1525 cpuset_d_remove_dir(d);
1528 if (list_empty(&parent->children))
1529 check_for_release(parent, &pathbuf);
1531 cpuset_release_agent(pathbuf);
1536 * cpuset_init - initialize cpusets at system boot
1538 * Description: Initialize top_cpuset and the cpuset internal file system,
1541 int __init cpuset_init(void)
1543 struct dentry *root;
1546 top_cpuset.cpus_allowed = CPU_MASK_ALL;
1547 top_cpuset.mems_allowed = NODE_MASK_ALL;
1549 atomic_inc(&cpuset_mems_generation);
1550 top_cpuset.mems_generation = atomic_read(&cpuset_mems_generation);
1552 init_task.cpuset = &top_cpuset;
1554 err = register_filesystem(&cpuset_fs_type);
1557 cpuset_mount = kern_mount(&cpuset_fs_type);
1558 if (IS_ERR(cpuset_mount)) {
1559 printk(KERN_ERR "cpuset: could not mount!\n");
1560 err = PTR_ERR(cpuset_mount);
1561 cpuset_mount = NULL;
1564 root = cpuset_mount->mnt_sb->s_root;
1565 root->d_fsdata = &top_cpuset;
1566 root->d_inode->i_nlink++;
1567 top_cpuset.dentry = root;
1568 root->d_inode->i_op = &cpuset_dir_inode_operations;
1569 err = cpuset_populate_dir(root);
1575 * cpuset_init_smp - initialize cpus_allowed
1577 * Description: Finish top cpuset after cpu, node maps are initialized
1580 void __init cpuset_init_smp(void)
1582 top_cpuset.cpus_allowed = cpu_online_map;
1583 top_cpuset.mems_allowed = node_online_map;
1587 * cpuset_fork - attach newly forked task to its parents cpuset.
1588 * @tsk: pointer to task_struct of forking parent process.
1590 * Description: A task inherits its parent's cpuset at fork().
1592 * A pointer to the shared cpuset was automatically copied in fork.c
1593 * by dup_task_struct(). However, we ignore that copy, since it was
1594 * not made under the protection of task_lock(), so might no longer be
1595 * a valid cpuset pointer. attach_task() might have already changed
1596 * current->cpuset, allowing the previously referenced cpuset to
1597 * be removed and freed. Instead, we task_lock(current) and copy
1598 * its present value of current->cpuset for our freshly forked child.
1600 * At the point that cpuset_fork() is called, 'current' is the parent
1601 * task, and the passed argument 'child' points to the child task.
1604 void cpuset_fork(struct task_struct *child)
1607 child->cpuset = current->cpuset;
1608 atomic_inc(&child->cpuset->count);
1609 task_unlock(current);
1613 * cpuset_exit - detach cpuset from exiting task
1614 * @tsk: pointer to task_struct of exiting process
1616 * Description: Detach cpuset from @tsk and release it.
1618 * Note that cpusets marked notify_on_release force every task in
1619 * them to take the global manage_sem semaphore when exiting.
1620 * This could impact scaling on very large systems. Be reluctant to
1621 * use notify_on_release cpusets where very high task exit scaling
1622 * is required on large systems.
1624 * Don't even think about derefencing 'cs' after the cpuset use count
1625 * goes to zero, except inside a critical section guarded by manage_sem
1626 * or callback_sem. Otherwise a zero cpuset use count is a license to
1627 * any other task to nuke the cpuset immediately, via cpuset_rmdir().
1629 * This routine has to take manage_sem, not callback_sem, because
1630 * it is holding that semaphore while calling check_for_release(),
1631 * which calls kmalloc(), so can't be called holding callback__sem().
1633 * We don't need to task_lock() this reference to tsk->cpuset,
1634 * because tsk is already marked PF_EXITING, so attach_task() won't
1638 void cpuset_exit(struct task_struct *tsk)
1642 BUG_ON(!(tsk->flags & PF_EXITING));
1647 if (notify_on_release(cs)) {
1648 char *pathbuf = NULL;
1651 if (atomic_dec_and_test(&cs->count))
1652 check_for_release(cs, &pathbuf);
1654 cpuset_release_agent(pathbuf);
1656 atomic_dec(&cs->count);
1661 * cpuset_cpus_allowed - return cpus_allowed mask from a tasks cpuset.
1662 * @tsk: pointer to task_struct from which to obtain cpuset->cpus_allowed.
1664 * Description: Returns the cpumask_t cpus_allowed of the cpuset
1665 * attached to the specified @tsk. Guaranteed to return some non-empty
1666 * subset of cpu_online_map, even if this means going outside the
1670 cpumask_t cpuset_cpus_allowed(const struct task_struct *tsk)
1674 down(&callback_sem);
1675 task_lock((struct task_struct *)tsk);
1676 guarantee_online_cpus(tsk->cpuset, &mask);
1677 task_unlock((struct task_struct *)tsk);
1683 void cpuset_init_current_mems_allowed(void)
1685 current->mems_allowed = NODE_MASK_ALL;
1689 * cpuset_update_current_mems_allowed - update mems parameters to new values
1691 * If the current tasks cpusets mems_allowed changed behind our backs,
1692 * update current->mems_allowed and mems_generation to the new value.
1693 * Do not call this routine if in_interrupt().
1695 * Call without callback_sem or task_lock() held. May be called
1696 * with or without manage_sem held. Unless exiting, it will acquire
1697 * task_lock(). Also might acquire callback_sem during call to
1701 void cpuset_update_current_mems_allowed(void)
1704 int need_to_refresh = 0;
1707 cs = current->cpuset;
1710 if (current->cpuset_mems_generation != cs->mems_generation)
1711 need_to_refresh = 1;
1713 task_unlock(current);
1714 if (need_to_refresh)
1719 * cpuset_restrict_to_mems_allowed - limit nodes to current mems_allowed
1720 * @nodes: pointer to a node bitmap that is and-ed with mems_allowed
1722 void cpuset_restrict_to_mems_allowed(unsigned long *nodes)
1724 bitmap_and(nodes, nodes, nodes_addr(current->mems_allowed),
1729 * cpuset_zonelist_valid_mems_allowed - check zonelist vs. curremt mems_allowed
1730 * @zl: the zonelist to be checked
1732 * Are any of the nodes on zonelist zl allowed in current->mems_allowed?
1734 int cpuset_zonelist_valid_mems_allowed(struct zonelist *zl)
1738 for (i = 0; zl->zones[i]; i++) {
1739 int nid = zl->zones[i]->zone_pgdat->node_id;
1741 if (node_isset(nid, current->mems_allowed))
1748 * nearest_exclusive_ancestor() - Returns the nearest mem_exclusive
1749 * ancestor to the specified cpuset. Call holding callback_sem.
1750 * If no ancestor is mem_exclusive (an unusual configuration), then
1751 * returns the root cpuset.
1753 static const struct cpuset *nearest_exclusive_ancestor(const struct cpuset *cs)
1755 while (!is_mem_exclusive(cs) && cs->parent)
1761 * cpuset_zone_allowed - Can we allocate memory on zone z's memory node?
1762 * @z: is this zone on an allowed node?
1763 * @gfp_mask: memory allocation flags (we use __GFP_HARDWALL)
1765 * If we're in interrupt, yes, we can always allocate. If zone
1766 * z's node is in our tasks mems_allowed, yes. If it's not a
1767 * __GFP_HARDWALL request and this zone's nodes is in the nearest
1768 * mem_exclusive cpuset ancestor to this tasks cpuset, yes.
1771 * GFP_USER allocations are marked with the __GFP_HARDWALL bit,
1772 * and do not allow allocations outside the current tasks cpuset.
1773 * GFP_KERNEL allocations are not so marked, so can escape to the
1774 * nearest mem_exclusive ancestor cpuset.
1776 * Scanning up parent cpusets requires callback_sem. The __alloc_pages()
1777 * routine only calls here with __GFP_HARDWALL bit _not_ set if
1778 * it's a GFP_KERNEL allocation, and all nodes in the current tasks
1779 * mems_allowed came up empty on the first pass over the zonelist.
1780 * So only GFP_KERNEL allocations, if all nodes in the cpuset are
1781 * short of memory, might require taking the callback_sem semaphore.
1783 * The first loop over the zonelist in mm/page_alloc.c:__alloc_pages()
1784 * calls here with __GFP_HARDWALL always set in gfp_mask, enforcing
1785 * hardwall cpusets - no allocation on a node outside the cpuset is
1786 * allowed (unless in interrupt, of course).
1788 * The second loop doesn't even call here for GFP_ATOMIC requests
1789 * (if the __alloc_pages() local variable 'wait' is set). That check
1790 * and the checks below have the combined affect in the second loop of
1791 * the __alloc_pages() routine that:
1792 * in_interrupt - any node ok (current task context irrelevant)
1793 * GFP_ATOMIC - any node ok
1794 * GFP_KERNEL - any node in enclosing mem_exclusive cpuset ok
1795 * GFP_USER - only nodes in current tasks mems allowed ok.
1798 int cpuset_zone_allowed(struct zone *z, gfp_t gfp_mask)
1800 int node; /* node that zone z is on */
1801 const struct cpuset *cs; /* current cpuset ancestors */
1802 int allowed = 1; /* is allocation in zone z allowed? */
1806 node = z->zone_pgdat->node_id;
1807 if (node_isset(node, current->mems_allowed))
1809 if (gfp_mask & __GFP_HARDWALL) /* If hardwall request, stop here */
1812 /* Not hardwall and node outside mems_allowed: scan up cpusets */
1813 down(&callback_sem);
1815 if (current->flags & PF_EXITING) /* Let dying task have memory */
1818 cs = nearest_exclusive_ancestor(current->cpuset);
1819 task_unlock(current);
1821 allowed = node_isset(node, cs->mems_allowed);
1827 * cpuset_excl_nodes_overlap - Do we overlap @p's mem_exclusive ancestors?
1828 * @p: pointer to task_struct of some other task.
1830 * Description: Return true if the nearest mem_exclusive ancestor
1831 * cpusets of tasks @p and current overlap. Used by oom killer to
1832 * determine if task @p's memory usage might impact the memory
1833 * available to the current task.
1835 * Acquires callback_sem - not suitable for calling from a fast path.
1838 int cpuset_excl_nodes_overlap(const struct task_struct *p)
1840 const struct cpuset *cs1, *cs2; /* my and p's cpuset ancestors */
1841 int overlap = 0; /* do cpusets overlap? */
1843 down(&callback_sem);
1846 if (current->flags & PF_EXITING) {
1847 task_unlock(current);
1850 cs1 = nearest_exclusive_ancestor(current->cpuset);
1851 task_unlock(current);
1853 task_lock((struct task_struct *)p);
1854 if (p->flags & PF_EXITING) {
1855 task_unlock((struct task_struct *)p);
1858 cs2 = nearest_exclusive_ancestor(p->cpuset);
1859 task_unlock((struct task_struct *)p);
1861 overlap = nodes_intersects(cs1->mems_allowed, cs2->mems_allowed);
1869 * proc_cpuset_show()
1870 * - Print tasks cpuset path into seq_file.
1871 * - Used for /proc/<pid>/cpuset.
1872 * - No need to task_lock(tsk) on this tsk->cpuset reference, as it
1873 * doesn't really matter if tsk->cpuset changes after we read it,
1874 * and we take manage_sem, keeping attach_task() from changing it
1878 static int proc_cpuset_show(struct seq_file *m, void *v)
1881 struct task_struct *tsk;
1885 buf = kmalloc(PAGE_SIZE, GFP_KERNEL);
1897 retval = cpuset_path(cs, buf, PAGE_SIZE);
1908 static int cpuset_open(struct inode *inode, struct file *file)
1910 struct task_struct *tsk = PROC_I(inode)->task;
1911 return single_open(file, proc_cpuset_show, tsk);
1914 struct file_operations proc_cpuset_operations = {
1915 .open = cpuset_open,
1917 .llseek = seq_lseek,
1918 .release = single_release,
1921 /* Display task cpus_allowed, mems_allowed in /proc/<pid>/status file. */
1922 char *cpuset_task_status_allowed(struct task_struct *task, char *buffer)
1924 buffer += sprintf(buffer, "Cpus_allowed:\t");
1925 buffer += cpumask_scnprintf(buffer, PAGE_SIZE, task->cpus_allowed);
1926 buffer += sprintf(buffer, "\n");
1927 buffer += sprintf(buffer, "Mems_allowed:\t");
1928 buffer += nodemask_scnprintf(buffer, PAGE_SIZE, task->mems_allowed);
1929 buffer += sprintf(buffer, "\n");