2 * Simple NUMA memory policy for the Linux kernel.
4 * Copyright 2003,2004 Andi Kleen, SuSE Labs.
5 * (C) Copyright 2005 Christoph Lameter, Silicon Graphics, Inc.
6 * Subject to the GNU Public License, version 2.
8 * NUMA policy allows the user to give hints in which node(s) memory should
11 * Support four policies per VMA and per process:
13 * The VMA policy has priority over the process policy for a page fault.
15 * interleave Allocate memory interleaved over a set of nodes,
16 * with normal fallback if it fails.
17 * For VMA based allocations this interleaves based on the
18 * offset into the backing object or offset into the mapping
19 * for anonymous memory. For process policy an process counter
22 * bind Only allocate memory on a specific set of nodes,
24 * FIXME: memory is allocated starting with the first node
25 * to the last. It would be better if bind would truly restrict
26 * the allocation to memory nodes instead
28 * preferred Try a specific node first before normal fallback.
29 * As a special case node -1 here means do the allocation
30 * on the local CPU. This is normally identical to default,
31 * but useful to set in a VMA when you have a non default
34 * default Allocate on the local node first, or when on a VMA
35 * use the process policy. This is what Linux always did
36 * in a NUMA aware kernel and still does by, ahem, default.
38 * The process policy is applied for most non interrupt memory allocations
39 * in that process' context. Interrupts ignore the policies and always
40 * try to allocate on the local CPU. The VMA policy is only applied for memory
41 * allocations for a VMA in the VM.
43 * Currently there are a few corner cases in swapping where the policy
44 * is not applied, but the majority should be handled. When process policy
45 * is used it is not remembered over swap outs/swap ins.
47 * Only the highest zone in the zone hierarchy gets policied. Allocations
48 * requesting a lower zone just use default policy. This implies that
49 * on systems with highmem kernel lowmem allocation don't get policied.
50 * Same with GFP_DMA allocations.
52 * For shmfs/tmpfs/hugetlbfs shared memory the policy is shared between
53 * all users and remembered even when nobody has memory mapped.
57 fix mmap readahead to honour policy and enable policy for any page cache
59 statistics for bigpages
60 global policy for page cache? currently it uses process policy. Requires
62 handle mremap for shared memory (currently ignored for the policy)
64 make bind policy root only? It can trigger oom much faster and the
65 kernel is not always grateful with that.
66 could replace all the switch()es with a mempolicy_ops structure.
69 #include <linux/mempolicy.h>
71 #include <linux/highmem.h>
72 #include <linux/hugetlb.h>
73 #include <linux/kernel.h>
74 #include <linux/sched.h>
75 #include <linux/nodemask.h>
76 #include <linux/cpuset.h>
77 #include <linux/gfp.h>
78 #include <linux/slab.h>
79 #include <linux/string.h>
80 #include <linux/module.h>
81 #include <linux/nsproxy.h>
82 #include <linux/interrupt.h>
83 #include <linux/init.h>
84 #include <linux/compat.h>
85 #include <linux/swap.h>
86 #include <linux/seq_file.h>
87 #include <linux/proc_fs.h>
88 #include <linux/migrate.h>
89 #include <linux/rmap.h>
90 #include <linux/security.h>
91 #include <linux/syscalls.h>
93 #include <asm/tlbflush.h>
94 #include <asm/uaccess.h>
97 #define MPOL_MF_DISCONTIG_OK (MPOL_MF_INTERNAL << 0) /* Skip checks for continuous vmas */
98 #define MPOL_MF_INVERT (MPOL_MF_INTERNAL << 1) /* Invert check for nodemask */
99 #define MPOL_MF_STATS (MPOL_MF_INTERNAL << 2) /* Gather statistics */
101 static struct kmem_cache *policy_cache;
102 static struct kmem_cache *sn_cache;
104 /* Highest zone. An specific allocation for a zone below that is not
106 enum zone_type policy_zone = 0;
108 struct mempolicy default_policy = {
109 .refcnt = ATOMIC_INIT(1), /* never free it */
110 .policy = MPOL_DEFAULT,
113 static void mpol_rebind_policy(struct mempolicy *pol,
114 const nodemask_t *newmask);
116 /* Do sanity checking on a policy */
117 static int mpol_check_policy(int mode, nodemask_t *nodes)
119 int was_empty, is_empty;
125 * "Contextualize" the in-coming nodemast for cpusets:
126 * Remember whether in-coming nodemask was empty, If not,
127 * restrict the nodes to the allowed nodes in the cpuset.
128 * This is guaranteed to be a subset of nodes with memory.
130 cpuset_update_task_memory_state();
131 is_empty = was_empty = nodes_empty(*nodes);
133 nodes_and(*nodes, *nodes, cpuset_current_mems_allowed);
134 is_empty = nodes_empty(*nodes); /* after "contextualization" */
140 * require caller to specify an empty nodemask
141 * before "contextualization"
147 case MPOL_INTERLEAVE:
149 * require at least 1 valid node after "contextualization"
156 * Did caller specify invalid nodes?
157 * Don't silently accept this as "local allocation".
159 if (!was_empty && is_empty)
166 /* Check that the nodemask contains at least one populated zone */
167 static int is_valid_nodemask(nodemask_t *nodemask)
171 /* Check that there is something useful in this mask */
174 for_each_node_mask(nd, *nodemask) {
177 for (k = 0; k <= policy_zone; k++) {
178 z = &NODE_DATA(nd)->node_zones[k];
179 if (z->present_pages > 0)
187 /* Create a new policy */
188 static struct mempolicy *mpol_new(int mode, nodemask_t *nodes)
190 struct mempolicy *policy;
192 pr_debug("setting mode %d nodes[0] %lx\n",
193 mode, nodes ? nodes_addr(*nodes)[0] : -1);
195 if (mode == MPOL_DEFAULT)
197 policy = kmem_cache_alloc(policy_cache, GFP_KERNEL);
199 return ERR_PTR(-ENOMEM);
200 atomic_set(&policy->refcnt, 1);
202 case MPOL_INTERLEAVE:
203 policy->v.nodes = *nodes;
204 if (nodes_weight(policy->v.nodes) == 0) {
205 kmem_cache_free(policy_cache, policy);
206 return ERR_PTR(-EINVAL);
210 policy->v.preferred_node = first_node(*nodes);
211 if (policy->v.preferred_node >= MAX_NUMNODES)
212 policy->v.preferred_node = -1;
215 if (!is_valid_nodemask(nodes)) {
216 kmem_cache_free(policy_cache, policy);
217 return ERR_PTR(-EINVAL);
219 policy->v.nodes = *nodes;
222 policy->policy = mode;
223 policy->cpuset_mems_allowed = cpuset_mems_allowed(current);
227 static void gather_stats(struct page *, void *, int pte_dirty);
228 static void migrate_page_add(struct page *page, struct list_head *pagelist,
229 unsigned long flags);
231 /* Scan through pages checking if pages follow certain conditions. */
232 static int check_pte_range(struct vm_area_struct *vma, pmd_t *pmd,
233 unsigned long addr, unsigned long end,
234 const nodemask_t *nodes, unsigned long flags,
241 orig_pte = pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl);
246 if (!pte_present(*pte))
248 page = vm_normal_page(vma, addr, *pte);
252 * The check for PageReserved here is important to avoid
253 * handling zero pages and other pages that may have been
254 * marked special by the system.
256 * If the PageReserved would not be checked here then f.e.
257 * the location of the zero page could have an influence
258 * on MPOL_MF_STRICT, zero pages would be counted for
259 * the per node stats, and there would be useless attempts
260 * to put zero pages on the migration list.
262 if (PageReserved(page))
264 nid = page_to_nid(page);
265 if (node_isset(nid, *nodes) == !!(flags & MPOL_MF_INVERT))
268 if (flags & MPOL_MF_STATS)
269 gather_stats(page, private, pte_dirty(*pte));
270 else if (flags & (MPOL_MF_MOVE | MPOL_MF_MOVE_ALL))
271 migrate_page_add(page, private, flags);
274 } while (pte++, addr += PAGE_SIZE, addr != end);
275 pte_unmap_unlock(orig_pte, ptl);
279 static inline int check_pmd_range(struct vm_area_struct *vma, pud_t *pud,
280 unsigned long addr, unsigned long end,
281 const nodemask_t *nodes, unsigned long flags,
287 pmd = pmd_offset(pud, addr);
289 next = pmd_addr_end(addr, end);
290 if (pmd_none_or_clear_bad(pmd))
292 if (check_pte_range(vma, pmd, addr, next, nodes,
295 } while (pmd++, addr = next, addr != end);
299 static inline int check_pud_range(struct vm_area_struct *vma, pgd_t *pgd,
300 unsigned long addr, unsigned long end,
301 const nodemask_t *nodes, unsigned long flags,
307 pud = pud_offset(pgd, addr);
309 next = pud_addr_end(addr, end);
310 if (pud_none_or_clear_bad(pud))
312 if (check_pmd_range(vma, pud, addr, next, nodes,
315 } while (pud++, addr = next, addr != end);
319 static inline int check_pgd_range(struct vm_area_struct *vma,
320 unsigned long addr, unsigned long end,
321 const nodemask_t *nodes, unsigned long flags,
327 pgd = pgd_offset(vma->vm_mm, addr);
329 next = pgd_addr_end(addr, end);
330 if (pgd_none_or_clear_bad(pgd))
332 if (check_pud_range(vma, pgd, addr, next, nodes,
335 } while (pgd++, addr = next, addr != end);
340 * Check if all pages in a range are on a set of nodes.
341 * If pagelist != NULL then isolate pages from the LRU and
342 * put them on the pagelist.
344 static struct vm_area_struct *
345 check_range(struct mm_struct *mm, unsigned long start, unsigned long end,
346 const nodemask_t *nodes, unsigned long flags, void *private)
349 struct vm_area_struct *first, *vma, *prev;
351 if (flags & (MPOL_MF_MOVE | MPOL_MF_MOVE_ALL)) {
353 err = migrate_prep();
358 first = find_vma(mm, start);
360 return ERR_PTR(-EFAULT);
362 for (vma = first; vma && vma->vm_start < end; vma = vma->vm_next) {
363 if (!(flags & MPOL_MF_DISCONTIG_OK)) {
364 if (!vma->vm_next && vma->vm_end < end)
365 return ERR_PTR(-EFAULT);
366 if (prev && prev->vm_end < vma->vm_start)
367 return ERR_PTR(-EFAULT);
369 if (!is_vm_hugetlb_page(vma) &&
370 ((flags & MPOL_MF_STRICT) ||
371 ((flags & (MPOL_MF_MOVE | MPOL_MF_MOVE_ALL)) &&
372 vma_migratable(vma)))) {
373 unsigned long endvma = vma->vm_end;
377 if (vma->vm_start > start)
378 start = vma->vm_start;
379 err = check_pgd_range(vma, start, endvma, nodes,
382 first = ERR_PTR(err);
391 /* Apply policy to a single VMA */
392 static int policy_vma(struct vm_area_struct *vma, struct mempolicy *new)
395 struct mempolicy *old = vma->vm_policy;
397 pr_debug("vma %lx-%lx/%lx vm_ops %p vm_file %p set_policy %p\n",
398 vma->vm_start, vma->vm_end, vma->vm_pgoff,
399 vma->vm_ops, vma->vm_file,
400 vma->vm_ops ? vma->vm_ops->set_policy : NULL);
402 if (vma->vm_ops && vma->vm_ops->set_policy)
403 err = vma->vm_ops->set_policy(vma, new);
406 vma->vm_policy = new;
412 /* Step 2: apply policy to a range and do splits. */
413 static int mbind_range(struct vm_area_struct *vma, unsigned long start,
414 unsigned long end, struct mempolicy *new)
416 struct vm_area_struct *next;
420 for (; vma && vma->vm_start < end; vma = next) {
422 if (vma->vm_start < start)
423 err = split_vma(vma->vm_mm, vma, start, 1);
424 if (!err && vma->vm_end > end)
425 err = split_vma(vma->vm_mm, vma, end, 0);
427 err = policy_vma(vma, new);
435 * Update task->flags PF_MEMPOLICY bit: set iff non-default
436 * mempolicy. Allows more rapid checking of this (combined perhaps
437 * with other PF_* flag bits) on memory allocation hot code paths.
439 * If called from outside this file, the task 'p' should -only- be
440 * a newly forked child not yet visible on the task list, because
441 * manipulating the task flags of a visible task is not safe.
443 * The above limitation is why this routine has the funny name
444 * mpol_fix_fork_child_flag().
446 * It is also safe to call this with a task pointer of current,
447 * which the static wrapper mpol_set_task_struct_flag() does,
448 * for use within this file.
451 void mpol_fix_fork_child_flag(struct task_struct *p)
454 p->flags |= PF_MEMPOLICY;
456 p->flags &= ~PF_MEMPOLICY;
459 static void mpol_set_task_struct_flag(void)
461 mpol_fix_fork_child_flag(current);
464 /* Set the process memory policy */
465 static long do_set_mempolicy(int mode, nodemask_t *nodes)
467 struct mempolicy *new;
469 if (mpol_check_policy(mode, nodes))
471 new = mpol_new(mode, nodes);
474 mpol_free(current->mempolicy);
475 current->mempolicy = new;
476 mpol_set_task_struct_flag();
477 if (new && new->policy == MPOL_INTERLEAVE)
478 current->il_next = first_node(new->v.nodes);
482 /* Fill a zone bitmap for a policy */
483 static void get_zonemask(struct mempolicy *p, nodemask_t *nodes)
491 case MPOL_INTERLEAVE:
495 /* or use current node instead of memory_map? */
496 if (p->v.preferred_node < 0)
497 *nodes = node_states[N_HIGH_MEMORY];
499 node_set(p->v.preferred_node, *nodes);
506 static int lookup_node(struct mm_struct *mm, unsigned long addr)
511 err = get_user_pages(current, mm, addr & PAGE_MASK, 1, 0, 0, &p, NULL);
513 err = page_to_nid(p);
519 /* Retrieve NUMA policy */
520 static long do_get_mempolicy(int *policy, nodemask_t *nmask,
521 unsigned long addr, unsigned long flags)
524 struct mm_struct *mm = current->mm;
525 struct vm_area_struct *vma = NULL;
526 struct mempolicy *pol = current->mempolicy;
528 cpuset_update_task_memory_state();
530 ~(unsigned long)(MPOL_F_NODE|MPOL_F_ADDR|MPOL_F_MEMS_ALLOWED))
533 if (flags & MPOL_F_MEMS_ALLOWED) {
534 if (flags & (MPOL_F_NODE|MPOL_F_ADDR))
536 *policy = 0; /* just so it's initialized */
537 *nmask = cpuset_current_mems_allowed;
541 if (flags & MPOL_F_ADDR) {
542 down_read(&mm->mmap_sem);
543 vma = find_vma_intersection(mm, addr, addr+1);
545 up_read(&mm->mmap_sem);
548 if (vma->vm_ops && vma->vm_ops->get_policy)
549 pol = vma->vm_ops->get_policy(vma, addr);
551 pol = vma->vm_policy;
556 pol = &default_policy;
558 if (flags & MPOL_F_NODE) {
559 if (flags & MPOL_F_ADDR) {
560 err = lookup_node(mm, addr);
564 } else if (pol == current->mempolicy &&
565 pol->policy == MPOL_INTERLEAVE) {
566 *policy = current->il_next;
572 *policy = pol->policy;
575 up_read(¤t->mm->mmap_sem);
581 get_zonemask(pol, nmask);
585 up_read(¤t->mm->mmap_sem);
589 #ifdef CONFIG_MIGRATION
593 static void migrate_page_add(struct page *page, struct list_head *pagelist,
597 * Avoid migrating a page that is shared with others.
599 if ((flags & MPOL_MF_MOVE_ALL) || page_mapcount(page) == 1)
600 isolate_lru_page(page, pagelist);
603 static struct page *new_node_page(struct page *page, unsigned long node, int **x)
605 return alloc_pages_node(node, GFP_HIGHUSER_MOVABLE, 0);
609 * Migrate pages from one node to a target node.
610 * Returns error or the number of pages not migrated.
612 static int migrate_to_node(struct mm_struct *mm, int source, int dest,
620 node_set(source, nmask);
622 check_range(mm, mm->mmap->vm_start, TASK_SIZE, &nmask,
623 flags | MPOL_MF_DISCONTIG_OK, &pagelist);
625 if (!list_empty(&pagelist))
626 err = migrate_pages(&pagelist, new_node_page, dest);
632 * Move pages between the two nodesets so as to preserve the physical
633 * layout as much as possible.
635 * Returns the number of page that could not be moved.
637 int do_migrate_pages(struct mm_struct *mm,
638 const nodemask_t *from_nodes, const nodemask_t *to_nodes, int flags)
645 down_read(&mm->mmap_sem);
647 err = migrate_vmas(mm, from_nodes, to_nodes, flags);
652 * Find a 'source' bit set in 'tmp' whose corresponding 'dest'
653 * bit in 'to' is not also set in 'tmp'. Clear the found 'source'
654 * bit in 'tmp', and return that <source, dest> pair for migration.
655 * The pair of nodemasks 'to' and 'from' define the map.
657 * If no pair of bits is found that way, fallback to picking some
658 * pair of 'source' and 'dest' bits that are not the same. If the
659 * 'source' and 'dest' bits are the same, this represents a node
660 * that will be migrating to itself, so no pages need move.
662 * If no bits are left in 'tmp', or if all remaining bits left
663 * in 'tmp' correspond to the same bit in 'to', return false
664 * (nothing left to migrate).
666 * This lets us pick a pair of nodes to migrate between, such that
667 * if possible the dest node is not already occupied by some other
668 * source node, minimizing the risk of overloading the memory on a
669 * node that would happen if we migrated incoming memory to a node
670 * before migrating outgoing memory source that same node.
672 * A single scan of tmp is sufficient. As we go, we remember the
673 * most recent <s, d> pair that moved (s != d). If we find a pair
674 * that not only moved, but what's better, moved to an empty slot
675 * (d is not set in tmp), then we break out then, with that pair.
676 * Otherwise when we finish scannng from_tmp, we at least have the
677 * most recent <s, d> pair that moved. If we get all the way through
678 * the scan of tmp without finding any node that moved, much less
679 * moved to an empty node, then there is nothing left worth migrating.
683 while (!nodes_empty(tmp)) {
688 for_each_node_mask(s, tmp) {
689 d = node_remap(s, *from_nodes, *to_nodes);
693 source = s; /* Node moved. Memorize */
696 /* dest not in remaining from nodes? */
697 if (!node_isset(dest, tmp))
703 node_clear(source, tmp);
704 err = migrate_to_node(mm, source, dest, flags);
711 up_read(&mm->mmap_sem);
719 * Allocate a new page for page migration based on vma policy.
720 * Start assuming that page is mapped by vma pointed to by @private.
721 * Search forward from there, if not. N.B., this assumes that the
722 * list of pages handed to migrate_pages()--which is how we get here--
723 * is in virtual address order.
725 static struct page *new_vma_page(struct page *page, unsigned long private, int **x)
727 struct vm_area_struct *vma = (struct vm_area_struct *)private;
728 unsigned long uninitialized_var(address);
731 address = page_address_in_vma(page, vma);
732 if (address != -EFAULT)
738 * if !vma, alloc_page_vma() will use task or system default policy
740 return alloc_page_vma(GFP_HIGHUSER_MOVABLE, vma, address);
744 static void migrate_page_add(struct page *page, struct list_head *pagelist,
749 int do_migrate_pages(struct mm_struct *mm,
750 const nodemask_t *from_nodes, const nodemask_t *to_nodes, int flags)
755 static struct page *new_vma_page(struct page *page, unsigned long private, int **x)
761 static long do_mbind(unsigned long start, unsigned long len,
762 unsigned long mode, nodemask_t *nmask,
765 struct vm_area_struct *vma;
766 struct mm_struct *mm = current->mm;
767 struct mempolicy *new;
772 if ((flags & ~(unsigned long)(MPOL_MF_STRICT |
773 MPOL_MF_MOVE | MPOL_MF_MOVE_ALL))
776 if ((flags & MPOL_MF_MOVE_ALL) && !capable(CAP_SYS_NICE))
779 if (start & ~PAGE_MASK)
782 if (mode == MPOL_DEFAULT)
783 flags &= ~MPOL_MF_STRICT;
785 len = (len + PAGE_SIZE - 1) & PAGE_MASK;
793 if (mpol_check_policy(mode, nmask))
796 new = mpol_new(mode, nmask);
801 * If we are using the default policy then operation
802 * on discontinuous address spaces is okay after all
805 flags |= MPOL_MF_DISCONTIG_OK;
807 pr_debug("mbind %lx-%lx mode:%ld nodes:%lx\n",start,start+len,
808 mode, nmask ? nodes_addr(*nmask)[0] : -1);
810 down_write(&mm->mmap_sem);
811 vma = check_range(mm, start, end, nmask,
812 flags | MPOL_MF_INVERT, &pagelist);
818 err = mbind_range(vma, start, end, new);
820 if (!list_empty(&pagelist))
821 nr_failed = migrate_pages(&pagelist, new_vma_page,
824 if (!err && nr_failed && (flags & MPOL_MF_STRICT))
828 up_write(&mm->mmap_sem);
834 * User space interface with variable sized bitmaps for nodelists.
837 /* Copy a node mask from user space. */
838 static int get_nodes(nodemask_t *nodes, const unsigned long __user *nmask,
839 unsigned long maxnode)
842 unsigned long nlongs;
843 unsigned long endmask;
847 if (maxnode == 0 || !nmask)
849 if (maxnode > PAGE_SIZE*BITS_PER_BYTE)
852 nlongs = BITS_TO_LONGS(maxnode);
853 if ((maxnode % BITS_PER_LONG) == 0)
856 endmask = (1UL << (maxnode % BITS_PER_LONG)) - 1;
858 /* When the user specified more nodes than supported just check
859 if the non supported part is all zero. */
860 if (nlongs > BITS_TO_LONGS(MAX_NUMNODES)) {
861 if (nlongs > PAGE_SIZE/sizeof(long))
863 for (k = BITS_TO_LONGS(MAX_NUMNODES); k < nlongs; k++) {
865 if (get_user(t, nmask + k))
867 if (k == nlongs - 1) {
873 nlongs = BITS_TO_LONGS(MAX_NUMNODES);
877 if (copy_from_user(nodes_addr(*nodes), nmask, nlongs*sizeof(unsigned long)))
879 nodes_addr(*nodes)[nlongs-1] &= endmask;
883 /* Copy a kernel node mask to user space */
884 static int copy_nodes_to_user(unsigned long __user *mask, unsigned long maxnode,
887 unsigned long copy = ALIGN(maxnode-1, 64) / 8;
888 const int nbytes = BITS_TO_LONGS(MAX_NUMNODES) * sizeof(long);
891 if (copy > PAGE_SIZE)
893 if (clear_user((char __user *)mask + nbytes, copy - nbytes))
897 return copy_to_user(mask, nodes_addr(*nodes), copy) ? -EFAULT : 0;
900 asmlinkage long sys_mbind(unsigned long start, unsigned long len,
902 unsigned long __user *nmask, unsigned long maxnode,
908 err = get_nodes(&nodes, nmask, maxnode);
911 return do_mbind(start, len, mode, &nodes, flags);
914 /* Set the process memory policy */
915 asmlinkage long sys_set_mempolicy(int mode, unsigned long __user *nmask,
916 unsigned long maxnode)
921 if (mode < 0 || mode > MPOL_MAX)
923 err = get_nodes(&nodes, nmask, maxnode);
926 return do_set_mempolicy(mode, &nodes);
929 asmlinkage long sys_migrate_pages(pid_t pid, unsigned long maxnode,
930 const unsigned long __user *old_nodes,
931 const unsigned long __user *new_nodes)
933 struct mm_struct *mm;
934 struct task_struct *task;
937 nodemask_t task_nodes;
940 err = get_nodes(&old, old_nodes, maxnode);
944 err = get_nodes(&new, new_nodes, maxnode);
948 /* Find the mm_struct */
949 read_lock(&tasklist_lock);
950 task = pid ? find_task_by_vpid(pid) : current;
952 read_unlock(&tasklist_lock);
955 mm = get_task_mm(task);
956 read_unlock(&tasklist_lock);
962 * Check if this process has the right to modify the specified
963 * process. The right exists if the process has administrative
964 * capabilities, superuser privileges or the same
965 * userid as the target process.
967 if ((current->euid != task->suid) && (current->euid != task->uid) &&
968 (current->uid != task->suid) && (current->uid != task->uid) &&
969 !capable(CAP_SYS_NICE)) {
974 task_nodes = cpuset_mems_allowed(task);
975 /* Is the user allowed to access the target nodes? */
976 if (!nodes_subset(new, task_nodes) && !capable(CAP_SYS_NICE)) {
981 if (!nodes_subset(new, node_states[N_HIGH_MEMORY])) {
986 err = security_task_movememory(task);
990 err = do_migrate_pages(mm, &old, &new,
991 capable(CAP_SYS_NICE) ? MPOL_MF_MOVE_ALL : MPOL_MF_MOVE);
998 /* Retrieve NUMA policy */
999 asmlinkage long sys_get_mempolicy(int __user *policy,
1000 unsigned long __user *nmask,
1001 unsigned long maxnode,
1002 unsigned long addr, unsigned long flags)
1005 int uninitialized_var(pval);
1008 if (nmask != NULL && maxnode < MAX_NUMNODES)
1011 err = do_get_mempolicy(&pval, &nodes, addr, flags);
1016 if (policy && put_user(pval, policy))
1020 err = copy_nodes_to_user(nmask, maxnode, &nodes);
1025 #ifdef CONFIG_COMPAT
1027 asmlinkage long compat_sys_get_mempolicy(int __user *policy,
1028 compat_ulong_t __user *nmask,
1029 compat_ulong_t maxnode,
1030 compat_ulong_t addr, compat_ulong_t flags)
1033 unsigned long __user *nm = NULL;
1034 unsigned long nr_bits, alloc_size;
1035 DECLARE_BITMAP(bm, MAX_NUMNODES);
1037 nr_bits = min_t(unsigned long, maxnode-1, MAX_NUMNODES);
1038 alloc_size = ALIGN(nr_bits, BITS_PER_LONG) / 8;
1041 nm = compat_alloc_user_space(alloc_size);
1043 err = sys_get_mempolicy(policy, nm, nr_bits+1, addr, flags);
1045 if (!err && nmask) {
1046 err = copy_from_user(bm, nm, alloc_size);
1047 /* ensure entire bitmap is zeroed */
1048 err |= clear_user(nmask, ALIGN(maxnode-1, 8) / 8);
1049 err |= compat_put_bitmap(nmask, bm, nr_bits);
1055 asmlinkage long compat_sys_set_mempolicy(int mode, compat_ulong_t __user *nmask,
1056 compat_ulong_t maxnode)
1059 unsigned long __user *nm = NULL;
1060 unsigned long nr_bits, alloc_size;
1061 DECLARE_BITMAP(bm, MAX_NUMNODES);
1063 nr_bits = min_t(unsigned long, maxnode-1, MAX_NUMNODES);
1064 alloc_size = ALIGN(nr_bits, BITS_PER_LONG) / 8;
1067 err = compat_get_bitmap(bm, nmask, nr_bits);
1068 nm = compat_alloc_user_space(alloc_size);
1069 err |= copy_to_user(nm, bm, alloc_size);
1075 return sys_set_mempolicy(mode, nm, nr_bits+1);
1078 asmlinkage long compat_sys_mbind(compat_ulong_t start, compat_ulong_t len,
1079 compat_ulong_t mode, compat_ulong_t __user *nmask,
1080 compat_ulong_t maxnode, compat_ulong_t flags)
1083 unsigned long __user *nm = NULL;
1084 unsigned long nr_bits, alloc_size;
1087 nr_bits = min_t(unsigned long, maxnode-1, MAX_NUMNODES);
1088 alloc_size = ALIGN(nr_bits, BITS_PER_LONG) / 8;
1091 err = compat_get_bitmap(nodes_addr(bm), nmask, nr_bits);
1092 nm = compat_alloc_user_space(alloc_size);
1093 err |= copy_to_user(nm, nodes_addr(bm), alloc_size);
1099 return sys_mbind(start, len, mode, nm, nr_bits+1, flags);
1105 * get_vma_policy(@task, @vma, @addr)
1106 * @task - task for fallback if vma policy == default
1107 * @vma - virtual memory area whose policy is sought
1108 * @addr - address in @vma for shared policy lookup
1110 * Returns effective policy for a VMA at specified address.
1111 * Falls back to @task or system default policy, as necessary.
1112 * Returned policy has extra reference count if shared, vma,
1113 * or some other task's policy [show_numa_maps() can pass
1114 * @task != current]. It is the caller's responsibility to
1115 * free the reference in these cases.
1117 static struct mempolicy * get_vma_policy(struct task_struct *task,
1118 struct vm_area_struct *vma, unsigned long addr)
1120 struct mempolicy *pol = task->mempolicy;
1124 if (vma->vm_ops && vma->vm_ops->get_policy) {
1125 pol = vma->vm_ops->get_policy(vma, addr);
1126 shared_pol = 1; /* if pol non-NULL, add ref below */
1127 } else if (vma->vm_policy &&
1128 vma->vm_policy->policy != MPOL_DEFAULT)
1129 pol = vma->vm_policy;
1132 pol = &default_policy;
1133 else if (!shared_pol && pol != current->mempolicy)
1134 mpol_get(pol); /* vma or other task's policy */
1138 /* Return a nodemask representing a mempolicy */
1139 static nodemask_t *nodemask_policy(gfp_t gfp, struct mempolicy *policy)
1141 /* Lower zones don't get a nodemask applied for MPOL_BIND */
1142 if (unlikely(policy->policy == MPOL_BIND) &&
1143 gfp_zone(gfp) >= policy_zone &&
1144 cpuset_nodemask_valid_mems_allowed(&policy->v.nodes))
1145 return &policy->v.nodes;
1150 /* Return a zonelist representing a mempolicy */
1151 static struct zonelist *zonelist_policy(gfp_t gfp, struct mempolicy *policy)
1155 switch (policy->policy) {
1156 case MPOL_PREFERRED:
1157 nd = policy->v.preferred_node;
1159 nd = numa_node_id();
1163 * Normally, MPOL_BIND allocations node-local are node-local
1164 * within the allowed nodemask. However, if __GFP_THISNODE is
1165 * set and the current node is part of the mask, we use the
1166 * the zonelist for the first node in the mask instead.
1168 nd = numa_node_id();
1169 if (unlikely(gfp & __GFP_THISNODE) &&
1170 unlikely(!node_isset(nd, policy->v.nodes)))
1171 nd = first_node(policy->v.nodes);
1173 case MPOL_INTERLEAVE: /* should not happen */
1175 nd = numa_node_id();
1181 return node_zonelist(nd, gfp);
1184 /* Do dynamic interleaving for a process */
1185 static unsigned interleave_nodes(struct mempolicy *policy)
1188 struct task_struct *me = current;
1191 next = next_node(nid, policy->v.nodes);
1192 if (next >= MAX_NUMNODES)
1193 next = first_node(policy->v.nodes);
1199 * Depending on the memory policy provide a node from which to allocate the
1202 unsigned slab_node(struct mempolicy *policy)
1204 int pol = policy ? policy->policy : MPOL_DEFAULT;
1207 case MPOL_INTERLEAVE:
1208 return interleave_nodes(policy);
1212 * Follow bind policy behavior and start allocation at the
1215 struct zonelist *zonelist;
1217 enum zone_type highest_zoneidx = gfp_zone(GFP_KERNEL);
1218 zonelist = &NODE_DATA(numa_node_id())->node_zonelists[0];
1219 (void)first_zones_zonelist(zonelist, highest_zoneidx,
1225 case MPOL_PREFERRED:
1226 if (policy->v.preferred_node >= 0)
1227 return policy->v.preferred_node;
1231 return numa_node_id();
1235 /* Do static interleaving for a VMA with known offset. */
1236 static unsigned offset_il_node(struct mempolicy *pol,
1237 struct vm_area_struct *vma, unsigned long off)
1239 unsigned nnodes = nodes_weight(pol->v.nodes);
1240 unsigned target = (unsigned)off % nnodes;
1246 nid = next_node(nid, pol->v.nodes);
1248 } while (c <= target);
1252 /* Determine a node number for interleave */
1253 static inline unsigned interleave_nid(struct mempolicy *pol,
1254 struct vm_area_struct *vma, unsigned long addr, int shift)
1260 * for small pages, there is no difference between
1261 * shift and PAGE_SHIFT, so the bit-shift is safe.
1262 * for huge pages, since vm_pgoff is in units of small
1263 * pages, we need to shift off the always 0 bits to get
1266 BUG_ON(shift < PAGE_SHIFT);
1267 off = vma->vm_pgoff >> (shift - PAGE_SHIFT);
1268 off += (addr - vma->vm_start) >> shift;
1269 return offset_il_node(pol, vma, off);
1271 return interleave_nodes(pol);
1274 #ifdef CONFIG_HUGETLBFS
1276 * huge_zonelist(@vma, @addr, @gfp_flags, @mpol)
1277 * @vma = virtual memory area whose policy is sought
1278 * @addr = address in @vma for shared policy lookup and interleave policy
1279 * @gfp_flags = for requested zone
1280 * @mpol = pointer to mempolicy pointer for reference counted mempolicy
1281 * @nodemask = pointer to nodemask pointer for MPOL_BIND nodemask
1283 * Returns a zonelist suitable for a huge page allocation.
1284 * If the effective policy is 'BIND, returns pointer to local node's zonelist,
1285 * and a pointer to the mempolicy's @nodemask for filtering the zonelist.
1286 * If it is also a policy for which get_vma_policy() returns an extra
1287 * reference, we must hold that reference until after the allocation.
1288 * In that case, return policy via @mpol so hugetlb allocation can drop
1289 * the reference. For non-'BIND referenced policies, we can/do drop the
1290 * reference here, so the caller doesn't need to know about the special case
1291 * for default and current task policy.
1293 struct zonelist *huge_zonelist(struct vm_area_struct *vma, unsigned long addr,
1294 gfp_t gfp_flags, struct mempolicy **mpol,
1295 nodemask_t **nodemask)
1297 struct mempolicy *pol = get_vma_policy(current, vma, addr);
1298 struct zonelist *zl;
1300 *mpol = NULL; /* probably no unref needed */
1301 *nodemask = NULL; /* assume !MPOL_BIND */
1302 if (pol->policy == MPOL_BIND) {
1303 *nodemask = &pol->v.nodes;
1304 } else if (pol->policy == MPOL_INTERLEAVE) {
1307 nid = interleave_nid(pol, vma, addr, HPAGE_SHIFT);
1308 if (unlikely(pol != &default_policy &&
1309 pol != current->mempolicy))
1310 __mpol_free(pol); /* finished with pol */
1311 return node_zonelist(nid, gfp_flags);
1314 zl = zonelist_policy(GFP_HIGHUSER, pol);
1315 if (unlikely(pol != &default_policy && pol != current->mempolicy)) {
1316 if (pol->policy != MPOL_BIND)
1317 __mpol_free(pol); /* finished with pol */
1319 *mpol = pol; /* unref needed after allocation */
1325 /* Allocate a page in interleaved policy.
1326 Own path because it needs to do special accounting. */
1327 static struct page *alloc_page_interleave(gfp_t gfp, unsigned order,
1330 struct zonelist *zl;
1333 zl = node_zonelist(nid, gfp);
1334 page = __alloc_pages(gfp, order, zl);
1335 if (page && page_zone(page) == zonelist_zone(&zl->_zonerefs[0]))
1336 inc_zone_page_state(page, NUMA_INTERLEAVE_HIT);
1341 * alloc_page_vma - Allocate a page for a VMA.
1344 * %GFP_USER user allocation.
1345 * %GFP_KERNEL kernel allocations,
1346 * %GFP_HIGHMEM highmem/user allocations,
1347 * %GFP_FS allocation should not call back into a file system.
1348 * %GFP_ATOMIC don't sleep.
1350 * @vma: Pointer to VMA or NULL if not available.
1351 * @addr: Virtual Address of the allocation. Must be inside the VMA.
1353 * This function allocates a page from the kernel page pool and applies
1354 * a NUMA policy associated with the VMA or the current process.
1355 * When VMA is not NULL caller must hold down_read on the mmap_sem of the
1356 * mm_struct of the VMA to prevent it from going away. Should be used for
1357 * all allocations for pages that will be mapped into
1358 * user space. Returns NULL when no page can be allocated.
1360 * Should be called with the mm_sem of the vma hold.
1363 alloc_page_vma(gfp_t gfp, struct vm_area_struct *vma, unsigned long addr)
1365 struct mempolicy *pol = get_vma_policy(current, vma, addr);
1366 struct zonelist *zl;
1368 cpuset_update_task_memory_state();
1370 if (unlikely(pol->policy == MPOL_INTERLEAVE)) {
1373 nid = interleave_nid(pol, vma, addr, PAGE_SHIFT);
1374 if (unlikely(pol != &default_policy &&
1375 pol != current->mempolicy))
1376 __mpol_free(pol); /* finished with pol */
1377 return alloc_page_interleave(gfp, 0, nid);
1379 zl = zonelist_policy(gfp, pol);
1380 if (pol != &default_policy && pol != current->mempolicy) {
1382 * slow path: ref counted policy -- shared or vma
1384 struct page *page = __alloc_pages_nodemask(gfp, 0,
1385 zl, nodemask_policy(gfp, pol));
1390 * fast path: default or task policy
1392 return __alloc_pages_nodemask(gfp, 0, zl, nodemask_policy(gfp, pol));
1396 * alloc_pages_current - Allocate pages.
1399 * %GFP_USER user allocation,
1400 * %GFP_KERNEL kernel allocation,
1401 * %GFP_HIGHMEM highmem allocation,
1402 * %GFP_FS don't call back into a file system.
1403 * %GFP_ATOMIC don't sleep.
1404 * @order: Power of two of allocation size in pages. 0 is a single page.
1406 * Allocate a page from the kernel page pool. When not in
1407 * interrupt context and apply the current process NUMA policy.
1408 * Returns NULL when no page can be allocated.
1410 * Don't call cpuset_update_task_memory_state() unless
1411 * 1) it's ok to take cpuset_sem (can WAIT), and
1412 * 2) allocating for current task (not interrupt).
1414 struct page *alloc_pages_current(gfp_t gfp, unsigned order)
1416 struct mempolicy *pol = current->mempolicy;
1418 if ((gfp & __GFP_WAIT) && !in_interrupt())
1419 cpuset_update_task_memory_state();
1420 if (!pol || in_interrupt() || (gfp & __GFP_THISNODE))
1421 pol = &default_policy;
1422 if (pol->policy == MPOL_INTERLEAVE)
1423 return alloc_page_interleave(gfp, order, interleave_nodes(pol));
1424 return __alloc_pages_nodemask(gfp, order,
1425 zonelist_policy(gfp, pol), nodemask_policy(gfp, pol));
1427 EXPORT_SYMBOL(alloc_pages_current);
1430 * If mpol_copy() sees current->cpuset == cpuset_being_rebound, then it
1431 * rebinds the mempolicy its copying by calling mpol_rebind_policy()
1432 * with the mems_allowed returned by cpuset_mems_allowed(). This
1433 * keeps mempolicies cpuset relative after its cpuset moves. See
1434 * further kernel/cpuset.c update_nodemask().
1437 /* Slow path of a mempolicy copy */
1438 struct mempolicy *__mpol_copy(struct mempolicy *old)
1440 struct mempolicy *new = kmem_cache_alloc(policy_cache, GFP_KERNEL);
1443 return ERR_PTR(-ENOMEM);
1444 if (current_cpuset_is_being_rebound()) {
1445 nodemask_t mems = cpuset_mems_allowed(current);
1446 mpol_rebind_policy(old, &mems);
1449 atomic_set(&new->refcnt, 1);
1453 /* Slow path of a mempolicy comparison */
1454 int __mpol_equal(struct mempolicy *a, struct mempolicy *b)
1458 if (a->policy != b->policy)
1460 switch (a->policy) {
1465 case MPOL_INTERLEAVE:
1466 return nodes_equal(a->v.nodes, b->v.nodes);
1467 case MPOL_PREFERRED:
1468 return a->v.preferred_node == b->v.preferred_node;
1475 /* Slow path of a mpol destructor. */
1476 void __mpol_free(struct mempolicy *p)
1478 if (!atomic_dec_and_test(&p->refcnt))
1480 p->policy = MPOL_DEFAULT;
1481 kmem_cache_free(policy_cache, p);
1485 * Shared memory backing store policy support.
1487 * Remember policies even when nobody has shared memory mapped.
1488 * The policies are kept in Red-Black tree linked from the inode.
1489 * They are protected by the sp->lock spinlock, which should be held
1490 * for any accesses to the tree.
1493 /* lookup first element intersecting start-end */
1494 /* Caller holds sp->lock */
1495 static struct sp_node *
1496 sp_lookup(struct shared_policy *sp, unsigned long start, unsigned long end)
1498 struct rb_node *n = sp->root.rb_node;
1501 struct sp_node *p = rb_entry(n, struct sp_node, nd);
1503 if (start >= p->end)
1505 else if (end <= p->start)
1513 struct sp_node *w = NULL;
1514 struct rb_node *prev = rb_prev(n);
1517 w = rb_entry(prev, struct sp_node, nd);
1518 if (w->end <= start)
1522 return rb_entry(n, struct sp_node, nd);
1525 /* Insert a new shared policy into the list. */
1526 /* Caller holds sp->lock */
1527 static void sp_insert(struct shared_policy *sp, struct sp_node *new)
1529 struct rb_node **p = &sp->root.rb_node;
1530 struct rb_node *parent = NULL;
1535 nd = rb_entry(parent, struct sp_node, nd);
1536 if (new->start < nd->start)
1538 else if (new->end > nd->end)
1539 p = &(*p)->rb_right;
1543 rb_link_node(&new->nd, parent, p);
1544 rb_insert_color(&new->nd, &sp->root);
1545 pr_debug("inserting %lx-%lx: %d\n", new->start, new->end,
1546 new->policy ? new->policy->policy : 0);
1549 /* Find shared policy intersecting idx */
1551 mpol_shared_policy_lookup(struct shared_policy *sp, unsigned long idx)
1553 struct mempolicy *pol = NULL;
1556 if (!sp->root.rb_node)
1558 spin_lock(&sp->lock);
1559 sn = sp_lookup(sp, idx, idx+1);
1561 mpol_get(sn->policy);
1564 spin_unlock(&sp->lock);
1568 static void sp_delete(struct shared_policy *sp, struct sp_node *n)
1570 pr_debug("deleting %lx-l%lx\n", n->start, n->end);
1571 rb_erase(&n->nd, &sp->root);
1572 mpol_free(n->policy);
1573 kmem_cache_free(sn_cache, n);
1576 static struct sp_node *sp_alloc(unsigned long start, unsigned long end,
1577 struct mempolicy *pol)
1579 struct sp_node *n = kmem_cache_alloc(sn_cache, GFP_KERNEL);
1590 /* Replace a policy range. */
1591 static int shared_policy_replace(struct shared_policy *sp, unsigned long start,
1592 unsigned long end, struct sp_node *new)
1594 struct sp_node *n, *new2 = NULL;
1597 spin_lock(&sp->lock);
1598 n = sp_lookup(sp, start, end);
1599 /* Take care of old policies in the same range. */
1600 while (n && n->start < end) {
1601 struct rb_node *next = rb_next(&n->nd);
1602 if (n->start >= start) {
1608 /* Old policy spanning whole new range. */
1611 spin_unlock(&sp->lock);
1612 new2 = sp_alloc(end, n->end, n->policy);
1618 sp_insert(sp, new2);
1626 n = rb_entry(next, struct sp_node, nd);
1630 spin_unlock(&sp->lock);
1632 mpol_free(new2->policy);
1633 kmem_cache_free(sn_cache, new2);
1638 void mpol_shared_policy_init(struct shared_policy *info, int policy,
1639 nodemask_t *policy_nodes)
1641 info->root = RB_ROOT;
1642 spin_lock_init(&info->lock);
1644 if (policy != MPOL_DEFAULT) {
1645 struct mempolicy *newpol;
1647 /* Falls back to MPOL_DEFAULT on any error */
1648 newpol = mpol_new(policy, policy_nodes);
1649 if (!IS_ERR(newpol)) {
1650 /* Create pseudo-vma that contains just the policy */
1651 struct vm_area_struct pvma;
1653 memset(&pvma, 0, sizeof(struct vm_area_struct));
1654 /* Policy covers entire file */
1655 pvma.vm_end = TASK_SIZE;
1656 mpol_set_shared_policy(info, &pvma, newpol);
1662 int mpol_set_shared_policy(struct shared_policy *info,
1663 struct vm_area_struct *vma, struct mempolicy *npol)
1666 struct sp_node *new = NULL;
1667 unsigned long sz = vma_pages(vma);
1669 pr_debug("set_shared_policy %lx sz %lu %d %lx\n",
1671 sz, npol? npol->policy : -1,
1672 npol ? nodes_addr(npol->v.nodes)[0] : -1);
1675 new = sp_alloc(vma->vm_pgoff, vma->vm_pgoff + sz, npol);
1679 err = shared_policy_replace(info, vma->vm_pgoff, vma->vm_pgoff+sz, new);
1681 kmem_cache_free(sn_cache, new);
1685 /* Free a backing policy store on inode delete. */
1686 void mpol_free_shared_policy(struct shared_policy *p)
1689 struct rb_node *next;
1691 if (!p->root.rb_node)
1693 spin_lock(&p->lock);
1694 next = rb_first(&p->root);
1696 n = rb_entry(next, struct sp_node, nd);
1697 next = rb_next(&n->nd);
1698 rb_erase(&n->nd, &p->root);
1699 mpol_free(n->policy);
1700 kmem_cache_free(sn_cache, n);
1702 spin_unlock(&p->lock);
1705 /* assumes fs == KERNEL_DS */
1706 void __init numa_policy_init(void)
1708 nodemask_t interleave_nodes;
1709 unsigned long largest = 0;
1710 int nid, prefer = 0;
1712 policy_cache = kmem_cache_create("numa_policy",
1713 sizeof(struct mempolicy),
1714 0, SLAB_PANIC, NULL);
1716 sn_cache = kmem_cache_create("shared_policy_node",
1717 sizeof(struct sp_node),
1718 0, SLAB_PANIC, NULL);
1721 * Set interleaving policy for system init. Interleaving is only
1722 * enabled across suitably sized nodes (default is >= 16MB), or
1723 * fall back to the largest node if they're all smaller.
1725 nodes_clear(interleave_nodes);
1726 for_each_node_state(nid, N_HIGH_MEMORY) {
1727 unsigned long total_pages = node_present_pages(nid);
1729 /* Preserve the largest node */
1730 if (largest < total_pages) {
1731 largest = total_pages;
1735 /* Interleave this node? */
1736 if ((total_pages << PAGE_SHIFT) >= (16 << 20))
1737 node_set(nid, interleave_nodes);
1740 /* All too small, use the largest */
1741 if (unlikely(nodes_empty(interleave_nodes)))
1742 node_set(prefer, interleave_nodes);
1744 if (do_set_mempolicy(MPOL_INTERLEAVE, &interleave_nodes))
1745 printk("numa_policy_init: interleaving failed\n");
1748 /* Reset policy of current process to default */
1749 void numa_default_policy(void)
1751 do_set_mempolicy(MPOL_DEFAULT, NULL);
1754 /* Migrate a policy to a different set of nodes */
1755 static void mpol_rebind_policy(struct mempolicy *pol,
1756 const nodemask_t *newmask)
1758 nodemask_t *mpolmask;
1763 mpolmask = &pol->cpuset_mems_allowed;
1764 if (nodes_equal(*mpolmask, *newmask))
1767 switch (pol->policy) {
1772 case MPOL_INTERLEAVE:
1773 nodes_remap(tmp, pol->v.nodes, *mpolmask, *newmask);
1775 *mpolmask = *newmask;
1776 current->il_next = node_remap(current->il_next,
1777 *mpolmask, *newmask);
1779 case MPOL_PREFERRED:
1780 pol->v.preferred_node = node_remap(pol->v.preferred_node,
1781 *mpolmask, *newmask);
1782 *mpolmask = *newmask;
1791 * Wrapper for mpol_rebind_policy() that just requires task
1792 * pointer, and updates task mempolicy.
1795 void mpol_rebind_task(struct task_struct *tsk, const nodemask_t *new)
1797 mpol_rebind_policy(tsk->mempolicy, new);
1801 * Rebind each vma in mm to new nodemask.
1803 * Call holding a reference to mm. Takes mm->mmap_sem during call.
1806 void mpol_rebind_mm(struct mm_struct *mm, nodemask_t *new)
1808 struct vm_area_struct *vma;
1810 down_write(&mm->mmap_sem);
1811 for (vma = mm->mmap; vma; vma = vma->vm_next)
1812 mpol_rebind_policy(vma->vm_policy, new);
1813 up_write(&mm->mmap_sem);
1817 * Display pages allocated per node and memory policy via /proc.
1820 static const char * const policy_types[] =
1821 { "default", "prefer", "bind", "interleave" };
1824 * Convert a mempolicy into a string.
1825 * Returns the number of characters in buffer (if positive)
1826 * or an error (negative)
1828 static inline int mpol_to_str(char *buffer, int maxlen, struct mempolicy *pol)
1833 int mode = pol ? pol->policy : MPOL_DEFAULT;
1840 case MPOL_PREFERRED:
1842 node_set(pol->v.preferred_node, nodes);
1847 case MPOL_INTERLEAVE:
1848 nodes = pol->v.nodes;
1856 l = strlen(policy_types[mode]);
1857 if (buffer + maxlen < p + l + 1)
1860 strcpy(p, policy_types[mode]);
1863 if (!nodes_empty(nodes)) {
1864 if (buffer + maxlen < p + 2)
1867 p += nodelist_scnprintf(p, buffer + maxlen - p, nodes);
1873 unsigned long pages;
1875 unsigned long active;
1876 unsigned long writeback;
1877 unsigned long mapcount_max;
1878 unsigned long dirty;
1879 unsigned long swapcache;
1880 unsigned long node[MAX_NUMNODES];
1883 static void gather_stats(struct page *page, void *private, int pte_dirty)
1885 struct numa_maps *md = private;
1886 int count = page_mapcount(page);
1889 if (pte_dirty || PageDirty(page))
1892 if (PageSwapCache(page))
1895 if (PageActive(page))
1898 if (PageWriteback(page))
1904 if (count > md->mapcount_max)
1905 md->mapcount_max = count;
1907 md->node[page_to_nid(page)]++;
1910 #ifdef CONFIG_HUGETLB_PAGE
1911 static void check_huge_range(struct vm_area_struct *vma,
1912 unsigned long start, unsigned long end,
1913 struct numa_maps *md)
1918 for (addr = start; addr < end; addr += HPAGE_SIZE) {
1919 pte_t *ptep = huge_pte_offset(vma->vm_mm, addr & HPAGE_MASK);
1929 page = pte_page(pte);
1933 gather_stats(page, md, pte_dirty(*ptep));
1937 static inline void check_huge_range(struct vm_area_struct *vma,
1938 unsigned long start, unsigned long end,
1939 struct numa_maps *md)
1944 int show_numa_map(struct seq_file *m, void *v)
1946 struct proc_maps_private *priv = m->private;
1947 struct vm_area_struct *vma = v;
1948 struct numa_maps *md;
1949 struct file *file = vma->vm_file;
1950 struct mm_struct *mm = vma->vm_mm;
1951 struct mempolicy *pol;
1958 md = kzalloc(sizeof(struct numa_maps), GFP_KERNEL);
1962 pol = get_vma_policy(priv->task, vma, vma->vm_start);
1963 mpol_to_str(buffer, sizeof(buffer), pol);
1965 * unref shared or other task's mempolicy
1967 if (pol != &default_policy && pol != current->mempolicy)
1970 seq_printf(m, "%08lx %s", vma->vm_start, buffer);
1973 seq_printf(m, " file=");
1974 seq_path(m, &file->f_path, "\n\t= ");
1975 } else if (vma->vm_start <= mm->brk && vma->vm_end >= mm->start_brk) {
1976 seq_printf(m, " heap");
1977 } else if (vma->vm_start <= mm->start_stack &&
1978 vma->vm_end >= mm->start_stack) {
1979 seq_printf(m, " stack");
1982 if (is_vm_hugetlb_page(vma)) {
1983 check_huge_range(vma, vma->vm_start, vma->vm_end, md);
1984 seq_printf(m, " huge");
1986 check_pgd_range(vma, vma->vm_start, vma->vm_end,
1987 &node_states[N_HIGH_MEMORY], MPOL_MF_STATS, md);
1994 seq_printf(m," anon=%lu",md->anon);
1997 seq_printf(m," dirty=%lu",md->dirty);
1999 if (md->pages != md->anon && md->pages != md->dirty)
2000 seq_printf(m, " mapped=%lu", md->pages);
2002 if (md->mapcount_max > 1)
2003 seq_printf(m, " mapmax=%lu", md->mapcount_max);
2006 seq_printf(m," swapcache=%lu", md->swapcache);
2008 if (md->active < md->pages && !is_vm_hugetlb_page(vma))
2009 seq_printf(m," active=%lu", md->active);
2012 seq_printf(m," writeback=%lu", md->writeback);
2014 for_each_node_state(n, N_HIGH_MEMORY)
2016 seq_printf(m, " N%d=%lu", n, md->node[n]);
2021 if (m->count < m->size)
2022 m->version = (vma != priv->tail_vma) ? vma->vm_start : 0;