2 * Simple NUMA memory policy for the Linux kernel.
4 * Copyright 2003,2004 Andi Kleen, SuSE Labs.
5 * (C) Copyright 2005 Christoph Lameter, Silicon Graphics, Inc.
6 * Subject to the GNU Public License, version 2.
8 * NUMA policy allows the user to give hints in which node(s) memory should
11 * Support four policies per VMA and per process:
13 * The VMA policy has priority over the process policy for a page fault.
15 * interleave Allocate memory interleaved over a set of nodes,
16 * with normal fallback if it fails.
17 * For VMA based allocations this interleaves based on the
18 * offset into the backing object or offset into the mapping
19 * for anonymous memory. For process policy an process counter
22 * bind Only allocate memory on a specific set of nodes,
24 * FIXME: memory is allocated starting with the first node
25 * to the last. It would be better if bind would truly restrict
26 * the allocation to memory nodes instead
28 * preferred Try a specific node first before normal fallback.
29 * As a special case NUMA_NO_NODE here means do the allocation
30 * on the local CPU. This is normally identical to default,
31 * but useful to set in a VMA when you have a non default
34 * default Allocate on the local node first, or when on a VMA
35 * use the process policy. This is what Linux always did
36 * in a NUMA aware kernel and still does by, ahem, default.
38 * The process policy is applied for most non interrupt memory allocations
39 * in that process' context. Interrupts ignore the policies and always
40 * try to allocate on the local CPU. The VMA policy is only applied for memory
41 * allocations for a VMA in the VM.
43 * Currently there are a few corner cases in swapping where the policy
44 * is not applied, but the majority should be handled. When process policy
45 * is used it is not remembered over swap outs/swap ins.
47 * Only the highest zone in the zone hierarchy gets policied. Allocations
48 * requesting a lower zone just use default policy. This implies that
49 * on systems with highmem kernel lowmem allocation don't get policied.
50 * Same with GFP_DMA allocations.
52 * For shmfs/tmpfs/hugetlbfs shared memory the policy is shared between
53 * all users and remembered even when nobody has memory mapped.
57 fix mmap readahead to honour policy and enable policy for any page cache
59 statistics for bigpages
60 global policy for page cache? currently it uses process policy. Requires
62 handle mremap for shared memory (currently ignored for the policy)
64 make bind policy root only? It can trigger oom much faster and the
65 kernel is not always grateful with that.
68 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
70 #include <linux/mempolicy.h>
72 #include <linux/highmem.h>
73 #include <linux/hugetlb.h>
74 #include <linux/kernel.h>
75 #include <linux/sched.h>
76 #include <linux/nodemask.h>
77 #include <linux/cpuset.h>
78 #include <linux/slab.h>
79 #include <linux/string.h>
80 #include <linux/export.h>
81 #include <linux/nsproxy.h>
82 #include <linux/interrupt.h>
83 #include <linux/init.h>
84 #include <linux/compat.h>
85 #include <linux/swap.h>
86 #include <linux/seq_file.h>
87 #include <linux/proc_fs.h>
88 #include <linux/migrate.h>
89 #include <linux/ksm.h>
90 #include <linux/rmap.h>
91 #include <linux/security.h>
92 #include <linux/syscalls.h>
93 #include <linux/ctype.h>
94 #include <linux/mm_inline.h>
95 #include <linux/mmu_notifier.h>
96 #include <linux/printk.h>
98 #include <asm/tlbflush.h>
99 #include <asm/uaccess.h>
100 #include <linux/random.h>
102 #include "internal.h"
105 #define MPOL_MF_DISCONTIG_OK (MPOL_MF_INTERNAL << 0) /* Skip checks for continuous vmas */
106 #define MPOL_MF_INVERT (MPOL_MF_INTERNAL << 1) /* Invert check for nodemask */
108 static struct kmem_cache *policy_cache;
109 static struct kmem_cache *sn_cache;
111 /* Highest zone. An specific allocation for a zone below that is not
113 enum zone_type policy_zone = 0;
116 * run-time system-wide default policy => local allocation
118 static struct mempolicy default_policy = {
119 .refcnt = ATOMIC_INIT(1), /* never free it */
120 .mode = MPOL_PREFERRED,
121 .flags = MPOL_F_LOCAL,
124 static struct mempolicy preferred_node_policy[MAX_NUMNODES];
126 struct mempolicy *get_task_policy(struct task_struct *p)
128 struct mempolicy *pol = p->mempolicy;
134 node = numa_node_id();
135 if (node != NUMA_NO_NODE) {
136 pol = &preferred_node_policy[node];
137 /* preferred_node_policy is not initialised early in boot */
142 return &default_policy;
145 static const struct mempolicy_operations {
146 int (*create)(struct mempolicy *pol, const nodemask_t *nodes);
148 * If read-side task has no lock to protect task->mempolicy, write-side
149 * task will rebind the task->mempolicy by two step. The first step is
150 * setting all the newly nodes, and the second step is cleaning all the
151 * disallowed nodes. In this way, we can avoid finding no node to alloc
153 * If we have a lock to protect task->mempolicy in read-side, we do
157 * MPOL_REBIND_ONCE - do rebind work at once
158 * MPOL_REBIND_STEP1 - set all the newly nodes
159 * MPOL_REBIND_STEP2 - clean all the disallowed nodes
161 void (*rebind)(struct mempolicy *pol, const nodemask_t *nodes,
162 enum mpol_rebind_step step);
163 } mpol_ops[MPOL_MAX];
165 static inline int mpol_store_user_nodemask(const struct mempolicy *pol)
167 return pol->flags & MPOL_MODE_FLAGS;
170 static void mpol_relative_nodemask(nodemask_t *ret, const nodemask_t *orig,
171 const nodemask_t *rel)
174 nodes_fold(tmp, *orig, nodes_weight(*rel));
175 nodes_onto(*ret, tmp, *rel);
178 static int mpol_new_interleave(struct mempolicy *pol, const nodemask_t *nodes)
180 if (nodes_empty(*nodes))
182 pol->v.nodes = *nodes;
186 static int mpol_new_preferred(struct mempolicy *pol, const nodemask_t *nodes)
189 pol->flags |= MPOL_F_LOCAL; /* local allocation */
190 else if (nodes_empty(*nodes))
191 return -EINVAL; /* no allowed nodes */
193 pol->v.preferred_node = first_node(*nodes);
197 static int mpol_new_bind(struct mempolicy *pol, const nodemask_t *nodes)
199 if (nodes_empty(*nodes))
201 pol->v.nodes = *nodes;
206 * mpol_set_nodemask is called after mpol_new() to set up the nodemask, if
207 * any, for the new policy. mpol_new() has already validated the nodes
208 * parameter with respect to the policy mode and flags. But, we need to
209 * handle an empty nodemask with MPOL_PREFERRED here.
211 * Must be called holding task's alloc_lock to protect task's mems_allowed
212 * and mempolicy. May also be called holding the mmap_semaphore for write.
214 static int mpol_set_nodemask(struct mempolicy *pol,
215 const nodemask_t *nodes, struct nodemask_scratch *nsc)
219 /* if mode is MPOL_DEFAULT, pol is NULL. This is right. */
223 nodes_and(nsc->mask1,
224 cpuset_current_mems_allowed, node_states[N_MEMORY]);
227 if (pol->mode == MPOL_PREFERRED && nodes_empty(*nodes))
228 nodes = NULL; /* explicit local allocation */
230 if (pol->flags & MPOL_F_RELATIVE_NODES)
231 mpol_relative_nodemask(&nsc->mask2, nodes, &nsc->mask1);
233 nodes_and(nsc->mask2, *nodes, nsc->mask1);
235 if (mpol_store_user_nodemask(pol))
236 pol->w.user_nodemask = *nodes;
238 pol->w.cpuset_mems_allowed =
239 cpuset_current_mems_allowed;
243 ret = mpol_ops[pol->mode].create(pol, &nsc->mask2);
245 ret = mpol_ops[pol->mode].create(pol, NULL);
250 * This function just creates a new policy, does some check and simple
251 * initialization. You must invoke mpol_set_nodemask() to set nodes.
253 static struct mempolicy *mpol_new(unsigned short mode, unsigned short flags,
256 struct mempolicy *policy;
258 pr_debug("setting mode %d flags %d nodes[0] %lx\n",
259 mode, flags, nodes ? nodes_addr(*nodes)[0] : NUMA_NO_NODE);
261 if (mode == MPOL_DEFAULT) {
262 if (nodes && !nodes_empty(*nodes))
263 return ERR_PTR(-EINVAL);
269 * MPOL_PREFERRED cannot be used with MPOL_F_STATIC_NODES or
270 * MPOL_F_RELATIVE_NODES if the nodemask is empty (local allocation).
271 * All other modes require a valid pointer to a non-empty nodemask.
273 if (mode == MPOL_PREFERRED) {
274 if (nodes_empty(*nodes)) {
275 if (((flags & MPOL_F_STATIC_NODES) ||
276 (flags & MPOL_F_RELATIVE_NODES)))
277 return ERR_PTR(-EINVAL);
279 } else if (mode == MPOL_LOCAL) {
280 if (!nodes_empty(*nodes))
281 return ERR_PTR(-EINVAL);
282 mode = MPOL_PREFERRED;
283 } else if (nodes_empty(*nodes))
284 return ERR_PTR(-EINVAL);
285 policy = kmem_cache_alloc(policy_cache, GFP_KERNEL);
287 return ERR_PTR(-ENOMEM);
288 atomic_set(&policy->refcnt, 1);
290 policy->flags = flags;
295 /* Slow path of a mpol destructor. */
296 void __mpol_put(struct mempolicy *p)
298 if (!atomic_dec_and_test(&p->refcnt))
300 kmem_cache_free(policy_cache, p);
303 static void mpol_rebind_default(struct mempolicy *pol, const nodemask_t *nodes,
304 enum mpol_rebind_step step)
310 * MPOL_REBIND_ONCE - do rebind work at once
311 * MPOL_REBIND_STEP1 - set all the newly nodes
312 * MPOL_REBIND_STEP2 - clean all the disallowed nodes
314 static void mpol_rebind_nodemask(struct mempolicy *pol, const nodemask_t *nodes,
315 enum mpol_rebind_step step)
319 if (pol->flags & MPOL_F_STATIC_NODES)
320 nodes_and(tmp, pol->w.user_nodemask, *nodes);
321 else if (pol->flags & MPOL_F_RELATIVE_NODES)
322 mpol_relative_nodemask(&tmp, &pol->w.user_nodemask, nodes);
325 * if step == 1, we use ->w.cpuset_mems_allowed to cache the
328 if (step == MPOL_REBIND_ONCE || step == MPOL_REBIND_STEP1) {
329 nodes_remap(tmp, pol->v.nodes,
330 pol->w.cpuset_mems_allowed, *nodes);
331 pol->w.cpuset_mems_allowed = step ? tmp : *nodes;
332 } else if (step == MPOL_REBIND_STEP2) {
333 tmp = pol->w.cpuset_mems_allowed;
334 pol->w.cpuset_mems_allowed = *nodes;
339 if (nodes_empty(tmp))
342 if (step == MPOL_REBIND_STEP1)
343 nodes_or(pol->v.nodes, pol->v.nodes, tmp);
344 else if (step == MPOL_REBIND_ONCE || step == MPOL_REBIND_STEP2)
349 if (!node_isset(current->il_next, tmp)) {
350 current->il_next = next_node(current->il_next, tmp);
351 if (current->il_next >= MAX_NUMNODES)
352 current->il_next = first_node(tmp);
353 if (current->il_next >= MAX_NUMNODES)
354 current->il_next = numa_node_id();
358 static void mpol_rebind_preferred(struct mempolicy *pol,
359 const nodemask_t *nodes,
360 enum mpol_rebind_step step)
364 if (pol->flags & MPOL_F_STATIC_NODES) {
365 int node = first_node(pol->w.user_nodemask);
367 if (node_isset(node, *nodes)) {
368 pol->v.preferred_node = node;
369 pol->flags &= ~MPOL_F_LOCAL;
371 pol->flags |= MPOL_F_LOCAL;
372 } else if (pol->flags & MPOL_F_RELATIVE_NODES) {
373 mpol_relative_nodemask(&tmp, &pol->w.user_nodemask, nodes);
374 pol->v.preferred_node = first_node(tmp);
375 } else if (!(pol->flags & MPOL_F_LOCAL)) {
376 pol->v.preferred_node = node_remap(pol->v.preferred_node,
377 pol->w.cpuset_mems_allowed,
379 pol->w.cpuset_mems_allowed = *nodes;
384 * mpol_rebind_policy - Migrate a policy to a different set of nodes
386 * If read-side task has no lock to protect task->mempolicy, write-side
387 * task will rebind the task->mempolicy by two step. The first step is
388 * setting all the newly nodes, and the second step is cleaning all the
389 * disallowed nodes. In this way, we can avoid finding no node to alloc
391 * If we have a lock to protect task->mempolicy in read-side, we do
395 * MPOL_REBIND_ONCE - do rebind work at once
396 * MPOL_REBIND_STEP1 - set all the newly nodes
397 * MPOL_REBIND_STEP2 - clean all the disallowed nodes
399 static void mpol_rebind_policy(struct mempolicy *pol, const nodemask_t *newmask,
400 enum mpol_rebind_step step)
404 if (!mpol_store_user_nodemask(pol) && step == MPOL_REBIND_ONCE &&
405 nodes_equal(pol->w.cpuset_mems_allowed, *newmask))
408 if (step == MPOL_REBIND_STEP1 && (pol->flags & MPOL_F_REBINDING))
411 if (step == MPOL_REBIND_STEP2 && !(pol->flags & MPOL_F_REBINDING))
414 if (step == MPOL_REBIND_STEP1)
415 pol->flags |= MPOL_F_REBINDING;
416 else if (step == MPOL_REBIND_STEP2)
417 pol->flags &= ~MPOL_F_REBINDING;
418 else if (step >= MPOL_REBIND_NSTEP)
421 mpol_ops[pol->mode].rebind(pol, newmask, step);
425 * Wrapper for mpol_rebind_policy() that just requires task
426 * pointer, and updates task mempolicy.
428 * Called with task's alloc_lock held.
431 void mpol_rebind_task(struct task_struct *tsk, const nodemask_t *new,
432 enum mpol_rebind_step step)
434 mpol_rebind_policy(tsk->mempolicy, new, step);
438 * Rebind each vma in mm to new nodemask.
440 * Call holding a reference to mm. Takes mm->mmap_sem during call.
443 void mpol_rebind_mm(struct mm_struct *mm, nodemask_t *new)
445 struct vm_area_struct *vma;
447 down_write(&mm->mmap_sem);
448 for (vma = mm->mmap; vma; vma = vma->vm_next)
449 mpol_rebind_policy(vma->vm_policy, new, MPOL_REBIND_ONCE);
450 up_write(&mm->mmap_sem);
453 static const struct mempolicy_operations mpol_ops[MPOL_MAX] = {
455 .rebind = mpol_rebind_default,
457 [MPOL_INTERLEAVE] = {
458 .create = mpol_new_interleave,
459 .rebind = mpol_rebind_nodemask,
462 .create = mpol_new_preferred,
463 .rebind = mpol_rebind_preferred,
466 .create = mpol_new_bind,
467 .rebind = mpol_rebind_nodemask,
471 static void migrate_page_add(struct page *page, struct list_head *pagelist,
472 unsigned long flags);
475 * Scan through pages checking if pages follow certain conditions,
476 * and move them to the pagelist if they do.
478 static int queue_pages_pte_range(struct vm_area_struct *vma, pmd_t *pmd,
479 unsigned long addr, unsigned long end,
480 const nodemask_t *nodes, unsigned long flags,
487 orig_pte = pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl);
492 if (!pte_present(*pte))
494 page = vm_normal_page(vma, addr, *pte);
498 * vm_normal_page() filters out zero pages, but there might
499 * still be PageReserved pages to skip, perhaps in a VDSO.
501 if (PageReserved(page))
503 nid = page_to_nid(page);
504 if (node_isset(nid, *nodes) == !!(flags & MPOL_MF_INVERT))
507 if (flags & (MPOL_MF_MOVE | MPOL_MF_MOVE_ALL))
508 migrate_page_add(page, private, flags);
511 } while (pte++, addr += PAGE_SIZE, addr != end);
512 pte_unmap_unlock(orig_pte, ptl);
516 static void queue_pages_hugetlb_pmd_range(struct vm_area_struct *vma,
517 pmd_t *pmd, const nodemask_t *nodes, unsigned long flags,
520 #ifdef CONFIG_HUGETLB_PAGE
526 ptl = huge_pte_lock(hstate_vma(vma), vma->vm_mm, (pte_t *)pmd);
527 entry = huge_ptep_get((pte_t *)pmd);
528 if (!pte_present(entry))
530 page = pte_page(entry);
531 nid = page_to_nid(page);
532 if (node_isset(nid, *nodes) == !!(flags & MPOL_MF_INVERT))
534 /* With MPOL_MF_MOVE, we migrate only unshared hugepage. */
535 if (flags & (MPOL_MF_MOVE_ALL) ||
536 (flags & MPOL_MF_MOVE && page_mapcount(page) == 1))
537 isolate_huge_page(page, private);
545 static inline int queue_pages_pmd_range(struct vm_area_struct *vma, pud_t *pud,
546 unsigned long addr, unsigned long end,
547 const nodemask_t *nodes, unsigned long flags,
553 pmd = pmd_offset(pud, addr);
555 next = pmd_addr_end(addr, end);
556 if (!pmd_present(*pmd))
558 if (pmd_huge(*pmd) && is_vm_hugetlb_page(vma)) {
559 queue_pages_hugetlb_pmd_range(vma, pmd, nodes,
563 split_huge_page_pmd(vma, addr, pmd);
564 if (pmd_none_or_trans_huge_or_clear_bad(pmd))
566 if (queue_pages_pte_range(vma, pmd, addr, next, nodes,
569 } while (pmd++, addr = next, addr != end);
573 static inline int queue_pages_pud_range(struct vm_area_struct *vma, pgd_t *pgd,
574 unsigned long addr, unsigned long end,
575 const nodemask_t *nodes, unsigned long flags,
581 pud = pud_offset(pgd, addr);
583 next = pud_addr_end(addr, end);
584 if (pud_huge(*pud) && is_vm_hugetlb_page(vma))
586 if (pud_none_or_clear_bad(pud))
588 if (queue_pages_pmd_range(vma, pud, addr, next, nodes,
591 } while (pud++, addr = next, addr != end);
595 static inline int queue_pages_pgd_range(struct vm_area_struct *vma,
596 unsigned long addr, unsigned long end,
597 const nodemask_t *nodes, unsigned long flags,
603 pgd = pgd_offset(vma->vm_mm, addr);
605 next = pgd_addr_end(addr, end);
606 if (pgd_none_or_clear_bad(pgd))
608 if (queue_pages_pud_range(vma, pgd, addr, next, nodes,
611 } while (pgd++, addr = next, addr != end);
615 #ifdef CONFIG_NUMA_BALANCING
617 * This is used to mark a range of virtual addresses to be inaccessible.
618 * These are later cleared by a NUMA hinting fault. Depending on these
619 * faults, pages may be migrated for better NUMA placement.
621 * This is assuming that NUMA faults are handled using PROT_NONE. If
622 * an architecture makes a different choice, it will need further
623 * changes to the core.
625 unsigned long change_prot_numa(struct vm_area_struct *vma,
626 unsigned long addr, unsigned long end)
630 nr_updated = change_protection(vma, addr, end, vma->vm_page_prot, 0, 1);
632 count_vm_numa_events(NUMA_PTE_UPDATES, nr_updated);
637 static unsigned long change_prot_numa(struct vm_area_struct *vma,
638 unsigned long addr, unsigned long end)
642 #endif /* CONFIG_NUMA_BALANCING */
645 * Walk through page tables and collect pages to be migrated.
647 * If pages found in a given range are on a set of nodes (determined by
648 * @nodes and @flags,) it's isolated and queued to the pagelist which is
649 * passed via @private.)
652 queue_pages_range(struct mm_struct *mm, unsigned long start, unsigned long end,
653 const nodemask_t *nodes, unsigned long flags, void *private)
656 struct vm_area_struct *vma, *prev;
658 vma = find_vma(mm, start);
662 for (; vma && vma->vm_start < end; vma = vma->vm_next) {
663 unsigned long endvma = vma->vm_end;
667 if (vma->vm_start > start)
668 start = vma->vm_start;
670 if (!(flags & MPOL_MF_DISCONTIG_OK)) {
671 if (!vma->vm_next && vma->vm_end < end)
673 if (prev && prev->vm_end < vma->vm_start)
677 if (flags & MPOL_MF_LAZY) {
678 /* Similar to task_numa_work, skip inaccessible VMAs */
679 if (vma->vm_flags & (VM_READ | VM_EXEC | VM_WRITE))
680 change_prot_numa(vma, start, endvma);
684 if ((flags & MPOL_MF_STRICT) ||
685 ((flags & (MPOL_MF_MOVE | MPOL_MF_MOVE_ALL)) &&
686 vma_migratable(vma))) {
688 err = queue_pages_pgd_range(vma, start, endvma, nodes,
700 * Apply policy to a single VMA
701 * This must be called with the mmap_sem held for writing.
703 static int vma_replace_policy(struct vm_area_struct *vma,
704 struct mempolicy *pol)
707 struct mempolicy *old;
708 struct mempolicy *new;
710 pr_debug("vma %lx-%lx/%lx vm_ops %p vm_file %p set_policy %p\n",
711 vma->vm_start, vma->vm_end, vma->vm_pgoff,
712 vma->vm_ops, vma->vm_file,
713 vma->vm_ops ? vma->vm_ops->set_policy : NULL);
719 if (vma->vm_ops && vma->vm_ops->set_policy) {
720 err = vma->vm_ops->set_policy(vma, new);
725 old = vma->vm_policy;
726 vma->vm_policy = new; /* protected by mmap_sem */
735 /* Step 2: apply policy to a range and do splits. */
736 static int mbind_range(struct mm_struct *mm, unsigned long start,
737 unsigned long end, struct mempolicy *new_pol)
739 struct vm_area_struct *next;
740 struct vm_area_struct *prev;
741 struct vm_area_struct *vma;
744 unsigned long vmstart;
747 vma = find_vma(mm, start);
748 if (!vma || vma->vm_start > start)
752 if (start > vma->vm_start)
755 for (; vma && vma->vm_start < end; prev = vma, vma = next) {
757 vmstart = max(start, vma->vm_start);
758 vmend = min(end, vma->vm_end);
760 if (mpol_equal(vma_policy(vma), new_pol))
763 pgoff = vma->vm_pgoff +
764 ((vmstart - vma->vm_start) >> PAGE_SHIFT);
765 prev = vma_merge(mm, prev, vmstart, vmend, vma->vm_flags,
766 vma->anon_vma, vma->vm_file, pgoff,
771 if (mpol_equal(vma_policy(vma), new_pol))
773 /* vma_merge() joined vma && vma->next, case 8 */
776 if (vma->vm_start != vmstart) {
777 err = split_vma(vma->vm_mm, vma, vmstart, 1);
781 if (vma->vm_end != vmend) {
782 err = split_vma(vma->vm_mm, vma, vmend, 0);
787 err = vma_replace_policy(vma, new_pol);
796 /* Set the process memory policy */
797 static long do_set_mempolicy(unsigned short mode, unsigned short flags,
800 struct mempolicy *new, *old;
801 NODEMASK_SCRATCH(scratch);
807 new = mpol_new(mode, flags, nodes);
814 ret = mpol_set_nodemask(new, nodes, scratch);
816 task_unlock(current);
820 old = current->mempolicy;
821 current->mempolicy = new;
822 if (new && new->mode == MPOL_INTERLEAVE &&
823 nodes_weight(new->v.nodes))
824 current->il_next = first_node(new->v.nodes);
825 task_unlock(current);
829 NODEMASK_SCRATCH_FREE(scratch);
834 * Return nodemask for policy for get_mempolicy() query
836 * Called with task's alloc_lock held
838 static void get_policy_nodemask(struct mempolicy *p, nodemask_t *nodes)
841 if (p == &default_policy)
847 case MPOL_INTERLEAVE:
851 if (!(p->flags & MPOL_F_LOCAL))
852 node_set(p->v.preferred_node, *nodes);
853 /* else return empty node mask for local allocation */
860 static int lookup_node(struct mm_struct *mm, unsigned long addr)
865 err = get_user_pages(current, mm, addr & PAGE_MASK, 1, 0, 0, &p, NULL);
867 err = page_to_nid(p);
873 /* Retrieve NUMA policy */
874 static long do_get_mempolicy(int *policy, nodemask_t *nmask,
875 unsigned long addr, unsigned long flags)
878 struct mm_struct *mm = current->mm;
879 struct vm_area_struct *vma = NULL;
880 struct mempolicy *pol = current->mempolicy;
883 ~(unsigned long)(MPOL_F_NODE|MPOL_F_ADDR|MPOL_F_MEMS_ALLOWED))
886 if (flags & MPOL_F_MEMS_ALLOWED) {
887 if (flags & (MPOL_F_NODE|MPOL_F_ADDR))
889 *policy = 0; /* just so it's initialized */
891 *nmask = cpuset_current_mems_allowed;
892 task_unlock(current);
896 if (flags & MPOL_F_ADDR) {
898 * Do NOT fall back to task policy if the
899 * vma/shared policy at addr is NULL. We
900 * want to return MPOL_DEFAULT in this case.
902 down_read(&mm->mmap_sem);
903 vma = find_vma_intersection(mm, addr, addr+1);
905 up_read(&mm->mmap_sem);
908 if (vma->vm_ops && vma->vm_ops->get_policy)
909 pol = vma->vm_ops->get_policy(vma, addr);
911 pol = vma->vm_policy;
916 pol = &default_policy; /* indicates default behavior */
918 if (flags & MPOL_F_NODE) {
919 if (flags & MPOL_F_ADDR) {
920 err = lookup_node(mm, addr);
924 } else if (pol == current->mempolicy &&
925 pol->mode == MPOL_INTERLEAVE) {
926 *policy = current->il_next;
932 *policy = pol == &default_policy ? MPOL_DEFAULT :
935 * Internal mempolicy flags must be masked off before exposing
936 * the policy to userspace.
938 *policy |= (pol->flags & MPOL_MODE_FLAGS);
942 up_read(¤t->mm->mmap_sem);
948 if (mpol_store_user_nodemask(pol)) {
949 *nmask = pol->w.user_nodemask;
952 get_policy_nodemask(pol, nmask);
953 task_unlock(current);
960 up_read(¤t->mm->mmap_sem);
964 #ifdef CONFIG_MIGRATION
968 static void migrate_page_add(struct page *page, struct list_head *pagelist,
972 * Avoid migrating a page that is shared with others.
974 if ((flags & MPOL_MF_MOVE_ALL) || page_mapcount(page) == 1) {
975 if (!isolate_lru_page(page)) {
976 list_add_tail(&page->lru, pagelist);
977 inc_zone_page_state(page, NR_ISOLATED_ANON +
978 page_is_file_cache(page));
983 static struct page *new_node_page(struct page *page, unsigned long node, int **x)
986 return alloc_huge_page_node(page_hstate(compound_head(page)),
989 return alloc_pages_exact_node(node, GFP_HIGHUSER_MOVABLE, 0);
993 * Migrate pages from one node to a target node.
994 * Returns error or the number of pages not migrated.
996 static int migrate_to_node(struct mm_struct *mm, int source, int dest,
1000 LIST_HEAD(pagelist);
1004 node_set(source, nmask);
1007 * This does not "check" the range but isolates all pages that
1008 * need migration. Between passing in the full user address
1009 * space range and MPOL_MF_DISCONTIG_OK, this call can not fail.
1011 VM_BUG_ON(!(flags & (MPOL_MF_MOVE | MPOL_MF_MOVE_ALL)));
1012 queue_pages_range(mm, mm->mmap->vm_start, mm->task_size, &nmask,
1013 flags | MPOL_MF_DISCONTIG_OK, &pagelist);
1015 if (!list_empty(&pagelist)) {
1016 err = migrate_pages(&pagelist, new_node_page, NULL, dest,
1017 MIGRATE_SYNC, MR_SYSCALL);
1019 putback_movable_pages(&pagelist);
1026 * Move pages between the two nodesets so as to preserve the physical
1027 * layout as much as possible.
1029 * Returns the number of page that could not be moved.
1031 int do_migrate_pages(struct mm_struct *mm, const nodemask_t *from,
1032 const nodemask_t *to, int flags)
1038 err = migrate_prep();
1042 down_read(&mm->mmap_sem);
1044 err = migrate_vmas(mm, from, to, flags);
1049 * Find a 'source' bit set in 'tmp' whose corresponding 'dest'
1050 * bit in 'to' is not also set in 'tmp'. Clear the found 'source'
1051 * bit in 'tmp', and return that <source, dest> pair for migration.
1052 * The pair of nodemasks 'to' and 'from' define the map.
1054 * If no pair of bits is found that way, fallback to picking some
1055 * pair of 'source' and 'dest' bits that are not the same. If the
1056 * 'source' and 'dest' bits are the same, this represents a node
1057 * that will be migrating to itself, so no pages need move.
1059 * If no bits are left in 'tmp', or if all remaining bits left
1060 * in 'tmp' correspond to the same bit in 'to', return false
1061 * (nothing left to migrate).
1063 * This lets us pick a pair of nodes to migrate between, such that
1064 * if possible the dest node is not already occupied by some other
1065 * source node, minimizing the risk of overloading the memory on a
1066 * node that would happen if we migrated incoming memory to a node
1067 * before migrating outgoing memory source that same node.
1069 * A single scan of tmp is sufficient. As we go, we remember the
1070 * most recent <s, d> pair that moved (s != d). If we find a pair
1071 * that not only moved, but what's better, moved to an empty slot
1072 * (d is not set in tmp), then we break out then, with that pair.
1073 * Otherwise when we finish scanning from_tmp, we at least have the
1074 * most recent <s, d> pair that moved. If we get all the way through
1075 * the scan of tmp without finding any node that moved, much less
1076 * moved to an empty node, then there is nothing left worth migrating.
1080 while (!nodes_empty(tmp)) {
1082 int source = NUMA_NO_NODE;
1085 for_each_node_mask(s, tmp) {
1088 * do_migrate_pages() tries to maintain the relative
1089 * node relationship of the pages established between
1090 * threads and memory areas.
1092 * However if the number of source nodes is not equal to
1093 * the number of destination nodes we can not preserve
1094 * this node relative relationship. In that case, skip
1095 * copying memory from a node that is in the destination
1098 * Example: [2,3,4] -> [3,4,5] moves everything.
1099 * [0-7] - > [3,4,5] moves only 0,1,2,6,7.
1102 if ((nodes_weight(*from) != nodes_weight(*to)) &&
1103 (node_isset(s, *to)))
1106 d = node_remap(s, *from, *to);
1110 source = s; /* Node moved. Memorize */
1113 /* dest not in remaining from nodes? */
1114 if (!node_isset(dest, tmp))
1117 if (source == NUMA_NO_NODE)
1120 node_clear(source, tmp);
1121 err = migrate_to_node(mm, source, dest, flags);
1128 up_read(&mm->mmap_sem);
1136 * Allocate a new page for page migration based on vma policy.
1137 * Start by assuming the page is mapped by the same vma as contains @start.
1138 * Search forward from there, if not. N.B., this assumes that the
1139 * list of pages handed to migrate_pages()--which is how we get here--
1140 * is in virtual address order.
1142 static struct page *new_page(struct page *page, unsigned long start, int **x)
1144 struct vm_area_struct *vma;
1145 unsigned long uninitialized_var(address);
1147 vma = find_vma(current->mm, start);
1149 address = page_address_in_vma(page, vma);
1150 if (address != -EFAULT)
1155 if (PageHuge(page)) {
1157 return alloc_huge_page_noerr(vma, address, 1);
1160 * if !vma, alloc_page_vma() will use task or system default policy
1162 return alloc_page_vma(GFP_HIGHUSER_MOVABLE, vma, address);
1166 static void migrate_page_add(struct page *page, struct list_head *pagelist,
1167 unsigned long flags)
1171 int do_migrate_pages(struct mm_struct *mm, const nodemask_t *from,
1172 const nodemask_t *to, int flags)
1177 static struct page *new_page(struct page *page, unsigned long start, int **x)
1183 static long do_mbind(unsigned long start, unsigned long len,
1184 unsigned short mode, unsigned short mode_flags,
1185 nodemask_t *nmask, unsigned long flags)
1187 struct mm_struct *mm = current->mm;
1188 struct mempolicy *new;
1191 LIST_HEAD(pagelist);
1193 if (flags & ~(unsigned long)MPOL_MF_VALID)
1195 if ((flags & MPOL_MF_MOVE_ALL) && !capable(CAP_SYS_NICE))
1198 if (start & ~PAGE_MASK)
1201 if (mode == MPOL_DEFAULT)
1202 flags &= ~MPOL_MF_STRICT;
1204 len = (len + PAGE_SIZE - 1) & PAGE_MASK;
1212 new = mpol_new(mode, mode_flags, nmask);
1214 return PTR_ERR(new);
1216 if (flags & MPOL_MF_LAZY)
1217 new->flags |= MPOL_F_MOF;
1220 * If we are using the default policy then operation
1221 * on discontinuous address spaces is okay after all
1224 flags |= MPOL_MF_DISCONTIG_OK;
1226 pr_debug("mbind %lx-%lx mode:%d flags:%d nodes:%lx\n",
1227 start, start + len, mode, mode_flags,
1228 nmask ? nodes_addr(*nmask)[0] : NUMA_NO_NODE);
1230 if (flags & (MPOL_MF_MOVE | MPOL_MF_MOVE_ALL)) {
1232 err = migrate_prep();
1237 NODEMASK_SCRATCH(scratch);
1239 down_write(&mm->mmap_sem);
1241 err = mpol_set_nodemask(new, nmask, scratch);
1242 task_unlock(current);
1244 up_write(&mm->mmap_sem);
1247 NODEMASK_SCRATCH_FREE(scratch);
1252 err = queue_pages_range(mm, start, end, nmask,
1253 flags | MPOL_MF_INVERT, &pagelist);
1255 err = mbind_range(mm, start, end, new);
1260 if (!list_empty(&pagelist)) {
1261 WARN_ON_ONCE(flags & MPOL_MF_LAZY);
1262 nr_failed = migrate_pages(&pagelist, new_page, NULL,
1263 start, MIGRATE_SYNC, MR_MEMPOLICY_MBIND);
1265 putback_movable_pages(&pagelist);
1268 if (nr_failed && (flags & MPOL_MF_STRICT))
1271 putback_movable_pages(&pagelist);
1273 up_write(&mm->mmap_sem);
1280 * User space interface with variable sized bitmaps for nodelists.
1283 /* Copy a node mask from user space. */
1284 static int get_nodes(nodemask_t *nodes, const unsigned long __user *nmask,
1285 unsigned long maxnode)
1288 unsigned long nlongs;
1289 unsigned long endmask;
1292 nodes_clear(*nodes);
1293 if (maxnode == 0 || !nmask)
1295 if (maxnode > PAGE_SIZE*BITS_PER_BYTE)
1298 nlongs = BITS_TO_LONGS(maxnode);
1299 if ((maxnode % BITS_PER_LONG) == 0)
1302 endmask = (1UL << (maxnode % BITS_PER_LONG)) - 1;
1304 /* When the user specified more nodes than supported just check
1305 if the non supported part is all zero. */
1306 if (nlongs > BITS_TO_LONGS(MAX_NUMNODES)) {
1307 if (nlongs > PAGE_SIZE/sizeof(long))
1309 for (k = BITS_TO_LONGS(MAX_NUMNODES); k < nlongs; k++) {
1311 if (get_user(t, nmask + k))
1313 if (k == nlongs - 1) {
1319 nlongs = BITS_TO_LONGS(MAX_NUMNODES);
1323 if (copy_from_user(nodes_addr(*nodes), nmask, nlongs*sizeof(unsigned long)))
1325 nodes_addr(*nodes)[nlongs-1] &= endmask;
1329 /* Copy a kernel node mask to user space */
1330 static int copy_nodes_to_user(unsigned long __user *mask, unsigned long maxnode,
1333 unsigned long copy = ALIGN(maxnode-1, 64) / 8;
1334 const int nbytes = BITS_TO_LONGS(MAX_NUMNODES) * sizeof(long);
1336 if (copy > nbytes) {
1337 if (copy > PAGE_SIZE)
1339 if (clear_user((char __user *)mask + nbytes, copy - nbytes))
1343 return copy_to_user(mask, nodes_addr(*nodes), copy) ? -EFAULT : 0;
1346 SYSCALL_DEFINE6(mbind, unsigned long, start, unsigned long, len,
1347 unsigned long, mode, const unsigned long __user *, nmask,
1348 unsigned long, maxnode, unsigned, flags)
1352 unsigned short mode_flags;
1354 mode_flags = mode & MPOL_MODE_FLAGS;
1355 mode &= ~MPOL_MODE_FLAGS;
1356 if (mode >= MPOL_MAX)
1358 if ((mode_flags & MPOL_F_STATIC_NODES) &&
1359 (mode_flags & MPOL_F_RELATIVE_NODES))
1361 err = get_nodes(&nodes, nmask, maxnode);
1364 return do_mbind(start, len, mode, mode_flags, &nodes, flags);
1367 /* Set the process memory policy */
1368 SYSCALL_DEFINE3(set_mempolicy, int, mode, const unsigned long __user *, nmask,
1369 unsigned long, maxnode)
1373 unsigned short flags;
1375 flags = mode & MPOL_MODE_FLAGS;
1376 mode &= ~MPOL_MODE_FLAGS;
1377 if ((unsigned int)mode >= MPOL_MAX)
1379 if ((flags & MPOL_F_STATIC_NODES) && (flags & MPOL_F_RELATIVE_NODES))
1381 err = get_nodes(&nodes, nmask, maxnode);
1384 return do_set_mempolicy(mode, flags, &nodes);
1387 SYSCALL_DEFINE4(migrate_pages, pid_t, pid, unsigned long, maxnode,
1388 const unsigned long __user *, old_nodes,
1389 const unsigned long __user *, new_nodes)
1391 const struct cred *cred = current_cred(), *tcred;
1392 struct mm_struct *mm = NULL;
1393 struct task_struct *task;
1394 nodemask_t task_nodes;
1398 NODEMASK_SCRATCH(scratch);
1403 old = &scratch->mask1;
1404 new = &scratch->mask2;
1406 err = get_nodes(old, old_nodes, maxnode);
1410 err = get_nodes(new, new_nodes, maxnode);
1414 /* Find the mm_struct */
1416 task = pid ? find_task_by_vpid(pid) : current;
1422 get_task_struct(task);
1427 * Check if this process has the right to modify the specified
1428 * process. The right exists if the process has administrative
1429 * capabilities, superuser privileges or the same
1430 * userid as the target process.
1432 tcred = __task_cred(task);
1433 if (!uid_eq(cred->euid, tcred->suid) && !uid_eq(cred->euid, tcred->uid) &&
1434 !uid_eq(cred->uid, tcred->suid) && !uid_eq(cred->uid, tcred->uid) &&
1435 !capable(CAP_SYS_NICE)) {
1442 task_nodes = cpuset_mems_allowed(task);
1443 /* Is the user allowed to access the target nodes? */
1444 if (!nodes_subset(*new, task_nodes) && !capable(CAP_SYS_NICE)) {
1449 if (!nodes_subset(*new, node_states[N_MEMORY])) {
1454 err = security_task_movememory(task);
1458 mm = get_task_mm(task);
1459 put_task_struct(task);
1466 err = do_migrate_pages(mm, old, new,
1467 capable(CAP_SYS_NICE) ? MPOL_MF_MOVE_ALL : MPOL_MF_MOVE);
1471 NODEMASK_SCRATCH_FREE(scratch);
1476 put_task_struct(task);
1482 /* Retrieve NUMA policy */
1483 SYSCALL_DEFINE5(get_mempolicy, int __user *, policy,
1484 unsigned long __user *, nmask, unsigned long, maxnode,
1485 unsigned long, addr, unsigned long, flags)
1488 int uninitialized_var(pval);
1491 if (nmask != NULL && maxnode < MAX_NUMNODES)
1494 err = do_get_mempolicy(&pval, &nodes, addr, flags);
1499 if (policy && put_user(pval, policy))
1503 err = copy_nodes_to_user(nmask, maxnode, &nodes);
1508 #ifdef CONFIG_COMPAT
1510 COMPAT_SYSCALL_DEFINE5(get_mempolicy, int __user *, policy,
1511 compat_ulong_t __user *, nmask,
1512 compat_ulong_t, maxnode,
1513 compat_ulong_t, addr, compat_ulong_t, flags)
1516 unsigned long __user *nm = NULL;
1517 unsigned long nr_bits, alloc_size;
1518 DECLARE_BITMAP(bm, MAX_NUMNODES);
1520 nr_bits = min_t(unsigned long, maxnode-1, MAX_NUMNODES);
1521 alloc_size = ALIGN(nr_bits, BITS_PER_LONG) / 8;
1524 nm = compat_alloc_user_space(alloc_size);
1526 err = sys_get_mempolicy(policy, nm, nr_bits+1, addr, flags);
1528 if (!err && nmask) {
1529 unsigned long copy_size;
1530 copy_size = min_t(unsigned long, sizeof(bm), alloc_size);
1531 err = copy_from_user(bm, nm, copy_size);
1532 /* ensure entire bitmap is zeroed */
1533 err |= clear_user(nmask, ALIGN(maxnode-1, 8) / 8);
1534 err |= compat_put_bitmap(nmask, bm, nr_bits);
1540 COMPAT_SYSCALL_DEFINE3(set_mempolicy, int, mode, compat_ulong_t __user *, nmask,
1541 compat_ulong_t, maxnode)
1544 unsigned long __user *nm = NULL;
1545 unsigned long nr_bits, alloc_size;
1546 DECLARE_BITMAP(bm, MAX_NUMNODES);
1548 nr_bits = min_t(unsigned long, maxnode-1, MAX_NUMNODES);
1549 alloc_size = ALIGN(nr_bits, BITS_PER_LONG) / 8;
1552 err = compat_get_bitmap(bm, nmask, nr_bits);
1553 nm = compat_alloc_user_space(alloc_size);
1554 err |= copy_to_user(nm, bm, alloc_size);
1560 return sys_set_mempolicy(mode, nm, nr_bits+1);
1563 COMPAT_SYSCALL_DEFINE6(mbind, compat_ulong_t, start, compat_ulong_t, len,
1564 compat_ulong_t, mode, compat_ulong_t __user *, nmask,
1565 compat_ulong_t, maxnode, compat_ulong_t, flags)
1568 unsigned long __user *nm = NULL;
1569 unsigned long nr_bits, alloc_size;
1572 nr_bits = min_t(unsigned long, maxnode-1, MAX_NUMNODES);
1573 alloc_size = ALIGN(nr_bits, BITS_PER_LONG) / 8;
1576 err = compat_get_bitmap(nodes_addr(bm), nmask, nr_bits);
1577 nm = compat_alloc_user_space(alloc_size);
1578 err |= copy_to_user(nm, nodes_addr(bm), alloc_size);
1584 return sys_mbind(start, len, mode, nm, nr_bits+1, flags);
1589 struct mempolicy *__get_vma_policy(struct vm_area_struct *vma,
1592 struct mempolicy *pol = NULL;
1595 if (vma->vm_ops && vma->vm_ops->get_policy) {
1596 pol = vma->vm_ops->get_policy(vma, addr);
1597 } else if (vma->vm_policy) {
1598 pol = vma->vm_policy;
1601 * shmem_alloc_page() passes MPOL_F_SHARED policy with
1602 * a pseudo vma whose vma->vm_ops=NULL. Take a reference
1603 * count on these policies which will be dropped by
1604 * mpol_cond_put() later
1606 if (mpol_needs_cond_ref(pol))
1615 * get_vma_policy(@vma, @addr)
1616 * @vma: virtual memory area whose policy is sought
1617 * @addr: address in @vma for shared policy lookup
1619 * Returns effective policy for a VMA at specified address.
1620 * Falls back to current->mempolicy or system default policy, as necessary.
1621 * Shared policies [those marked as MPOL_F_SHARED] require an extra reference
1622 * count--added by the get_policy() vm_op, as appropriate--to protect against
1623 * freeing by another task. It is the caller's responsibility to free the
1624 * extra reference for shared policies.
1626 static struct mempolicy *get_vma_policy(struct vm_area_struct *vma,
1629 struct mempolicy *pol = __get_vma_policy(vma, addr);
1632 pol = get_task_policy(current);
1637 bool vma_policy_mof(struct vm_area_struct *vma)
1639 struct mempolicy *pol;
1641 if (vma->vm_ops && vma->vm_ops->get_policy) {
1644 pol = vma->vm_ops->get_policy(vma, vma->vm_start);
1645 if (pol && (pol->flags & MPOL_F_MOF))
1652 pol = vma->vm_policy;
1654 pol = get_task_policy(current);
1656 return pol->flags & MPOL_F_MOF;
1659 static int apply_policy_zone(struct mempolicy *policy, enum zone_type zone)
1661 enum zone_type dynamic_policy_zone = policy_zone;
1663 BUG_ON(dynamic_policy_zone == ZONE_MOVABLE);
1666 * if policy->v.nodes has movable memory only,
1667 * we apply policy when gfp_zone(gfp) = ZONE_MOVABLE only.
1669 * policy->v.nodes is intersect with node_states[N_MEMORY].
1670 * so if the following test faile, it implies
1671 * policy->v.nodes has movable memory only.
1673 if (!nodes_intersects(policy->v.nodes, node_states[N_HIGH_MEMORY]))
1674 dynamic_policy_zone = ZONE_MOVABLE;
1676 return zone >= dynamic_policy_zone;
1680 * Return a nodemask representing a mempolicy for filtering nodes for
1683 static nodemask_t *policy_nodemask(gfp_t gfp, struct mempolicy *policy)
1685 /* Lower zones don't get a nodemask applied for MPOL_BIND */
1686 if (unlikely(policy->mode == MPOL_BIND) &&
1687 apply_policy_zone(policy, gfp_zone(gfp)) &&
1688 cpuset_nodemask_valid_mems_allowed(&policy->v.nodes))
1689 return &policy->v.nodes;
1694 /* Return a zonelist indicated by gfp for node representing a mempolicy */
1695 static struct zonelist *policy_zonelist(gfp_t gfp, struct mempolicy *policy,
1698 switch (policy->mode) {
1699 case MPOL_PREFERRED:
1700 if (!(policy->flags & MPOL_F_LOCAL))
1701 nd = policy->v.preferred_node;
1705 * Normally, MPOL_BIND allocations are node-local within the
1706 * allowed nodemask. However, if __GFP_THISNODE is set and the
1707 * current node isn't part of the mask, we use the zonelist for
1708 * the first node in the mask instead.
1710 if (unlikely(gfp & __GFP_THISNODE) &&
1711 unlikely(!node_isset(nd, policy->v.nodes)))
1712 nd = first_node(policy->v.nodes);
1717 return node_zonelist(nd, gfp);
1720 /* Do dynamic interleaving for a process */
1721 static unsigned interleave_nodes(struct mempolicy *policy)
1724 struct task_struct *me = current;
1727 next = next_node(nid, policy->v.nodes);
1728 if (next >= MAX_NUMNODES)
1729 next = first_node(policy->v.nodes);
1730 if (next < MAX_NUMNODES)
1736 * Depending on the memory policy provide a node from which to allocate the
1739 unsigned int mempolicy_slab_node(void)
1741 struct mempolicy *policy;
1742 int node = numa_mem_id();
1747 policy = current->mempolicy;
1748 if (!policy || policy->flags & MPOL_F_LOCAL)
1751 switch (policy->mode) {
1752 case MPOL_PREFERRED:
1754 * handled MPOL_F_LOCAL above
1756 return policy->v.preferred_node;
1758 case MPOL_INTERLEAVE:
1759 return interleave_nodes(policy);
1763 * Follow bind policy behavior and start allocation at the
1766 struct zonelist *zonelist;
1768 enum zone_type highest_zoneidx = gfp_zone(GFP_KERNEL);
1769 zonelist = &NODE_DATA(node)->node_zonelists[0];
1770 (void)first_zones_zonelist(zonelist, highest_zoneidx,
1773 return zone ? zone->node : node;
1781 /* Do static interleaving for a VMA with known offset. */
1782 static unsigned offset_il_node(struct mempolicy *pol,
1783 struct vm_area_struct *vma, unsigned long off)
1785 unsigned nnodes = nodes_weight(pol->v.nodes);
1788 int nid = NUMA_NO_NODE;
1791 return numa_node_id();
1792 target = (unsigned int)off % nnodes;
1795 nid = next_node(nid, pol->v.nodes);
1797 } while (c <= target);
1801 /* Determine a node number for interleave */
1802 static inline unsigned interleave_nid(struct mempolicy *pol,
1803 struct vm_area_struct *vma, unsigned long addr, int shift)
1809 * for small pages, there is no difference between
1810 * shift and PAGE_SHIFT, so the bit-shift is safe.
1811 * for huge pages, since vm_pgoff is in units of small
1812 * pages, we need to shift off the always 0 bits to get
1815 BUG_ON(shift < PAGE_SHIFT);
1816 off = vma->vm_pgoff >> (shift - PAGE_SHIFT);
1817 off += (addr - vma->vm_start) >> shift;
1818 return offset_il_node(pol, vma, off);
1820 return interleave_nodes(pol);
1824 * Return the bit number of a random bit set in the nodemask.
1825 * (returns NUMA_NO_NODE if nodemask is empty)
1827 int node_random(const nodemask_t *maskp)
1829 int w, bit = NUMA_NO_NODE;
1831 w = nodes_weight(*maskp);
1833 bit = bitmap_ord_to_pos(maskp->bits,
1834 get_random_int() % w, MAX_NUMNODES);
1838 #ifdef CONFIG_HUGETLBFS
1840 * huge_zonelist(@vma, @addr, @gfp_flags, @mpol)
1841 * @vma: virtual memory area whose policy is sought
1842 * @addr: address in @vma for shared policy lookup and interleave policy
1843 * @gfp_flags: for requested zone
1844 * @mpol: pointer to mempolicy pointer for reference counted mempolicy
1845 * @nodemask: pointer to nodemask pointer for MPOL_BIND nodemask
1847 * Returns a zonelist suitable for a huge page allocation and a pointer
1848 * to the struct mempolicy for conditional unref after allocation.
1849 * If the effective policy is 'BIND, returns a pointer to the mempolicy's
1850 * @nodemask for filtering the zonelist.
1852 * Must be protected by read_mems_allowed_begin()
1854 struct zonelist *huge_zonelist(struct vm_area_struct *vma, unsigned long addr,
1855 gfp_t gfp_flags, struct mempolicy **mpol,
1856 nodemask_t **nodemask)
1858 struct zonelist *zl;
1860 *mpol = get_vma_policy(vma, addr);
1861 *nodemask = NULL; /* assume !MPOL_BIND */
1863 if (unlikely((*mpol)->mode == MPOL_INTERLEAVE)) {
1864 zl = node_zonelist(interleave_nid(*mpol, vma, addr,
1865 huge_page_shift(hstate_vma(vma))), gfp_flags);
1867 zl = policy_zonelist(gfp_flags, *mpol, numa_node_id());
1868 if ((*mpol)->mode == MPOL_BIND)
1869 *nodemask = &(*mpol)->v.nodes;
1875 * init_nodemask_of_mempolicy
1877 * If the current task's mempolicy is "default" [NULL], return 'false'
1878 * to indicate default policy. Otherwise, extract the policy nodemask
1879 * for 'bind' or 'interleave' policy into the argument nodemask, or
1880 * initialize the argument nodemask to contain the single node for
1881 * 'preferred' or 'local' policy and return 'true' to indicate presence
1882 * of non-default mempolicy.
1884 * We don't bother with reference counting the mempolicy [mpol_get/put]
1885 * because the current task is examining it's own mempolicy and a task's
1886 * mempolicy is only ever changed by the task itself.
1888 * N.B., it is the caller's responsibility to free a returned nodemask.
1890 bool init_nodemask_of_mempolicy(nodemask_t *mask)
1892 struct mempolicy *mempolicy;
1895 if (!(mask && current->mempolicy))
1899 mempolicy = current->mempolicy;
1900 switch (mempolicy->mode) {
1901 case MPOL_PREFERRED:
1902 if (mempolicy->flags & MPOL_F_LOCAL)
1903 nid = numa_node_id();
1905 nid = mempolicy->v.preferred_node;
1906 init_nodemask_of_node(mask, nid);
1911 case MPOL_INTERLEAVE:
1912 *mask = mempolicy->v.nodes;
1918 task_unlock(current);
1925 * mempolicy_nodemask_intersects
1927 * If tsk's mempolicy is "default" [NULL], return 'true' to indicate default
1928 * policy. Otherwise, check for intersection between mask and the policy
1929 * nodemask for 'bind' or 'interleave' policy. For 'perferred' or 'local'
1930 * policy, always return true since it may allocate elsewhere on fallback.
1932 * Takes task_lock(tsk) to prevent freeing of its mempolicy.
1934 bool mempolicy_nodemask_intersects(struct task_struct *tsk,
1935 const nodemask_t *mask)
1937 struct mempolicy *mempolicy;
1943 mempolicy = tsk->mempolicy;
1947 switch (mempolicy->mode) {
1948 case MPOL_PREFERRED:
1950 * MPOL_PREFERRED and MPOL_F_LOCAL are only preferred nodes to
1951 * allocate from, they may fallback to other nodes when oom.
1952 * Thus, it's possible for tsk to have allocated memory from
1957 case MPOL_INTERLEAVE:
1958 ret = nodes_intersects(mempolicy->v.nodes, *mask);
1968 /* Allocate a page in interleaved policy.
1969 Own path because it needs to do special accounting. */
1970 static struct page *alloc_page_interleave(gfp_t gfp, unsigned order,
1973 struct zonelist *zl;
1976 zl = node_zonelist(nid, gfp);
1977 page = __alloc_pages(gfp, order, zl);
1978 if (page && page_zone(page) == zonelist_zone(&zl->_zonerefs[0]))
1979 inc_zone_page_state(page, NUMA_INTERLEAVE_HIT);
1984 * alloc_pages_vma - Allocate a page for a VMA.
1987 * %GFP_USER user allocation.
1988 * %GFP_KERNEL kernel allocations,
1989 * %GFP_HIGHMEM highmem/user allocations,
1990 * %GFP_FS allocation should not call back into a file system.
1991 * %GFP_ATOMIC don't sleep.
1993 * @order:Order of the GFP allocation.
1994 * @vma: Pointer to VMA or NULL if not available.
1995 * @addr: Virtual Address of the allocation. Must be inside the VMA.
1997 * This function allocates a page from the kernel page pool and applies
1998 * a NUMA policy associated with the VMA or the current process.
1999 * When VMA is not NULL caller must hold down_read on the mmap_sem of the
2000 * mm_struct of the VMA to prevent it from going away. Should be used for
2001 * all allocations for pages that will be mapped into
2002 * user space. Returns NULL when no page can be allocated.
2004 * Should be called with the mm_sem of the vma hold.
2007 alloc_pages_vma(gfp_t gfp, int order, struct vm_area_struct *vma,
2008 unsigned long addr, int node)
2010 struct mempolicy *pol;
2012 unsigned int cpuset_mems_cookie;
2015 pol = get_vma_policy(vma, addr);
2016 cpuset_mems_cookie = read_mems_allowed_begin();
2018 if (unlikely(pol->mode == MPOL_INTERLEAVE)) {
2021 nid = interleave_nid(pol, vma, addr, PAGE_SHIFT + order);
2023 page = alloc_page_interleave(gfp, order, nid);
2024 if (unlikely(!page && read_mems_allowed_retry(cpuset_mems_cookie)))
2029 page = __alloc_pages_nodemask(gfp, order,
2030 policy_zonelist(gfp, pol, node),
2031 policy_nodemask(gfp, pol));
2033 if (unlikely(!page && read_mems_allowed_retry(cpuset_mems_cookie)))
2039 * alloc_pages_current - Allocate pages.
2042 * %GFP_USER user allocation,
2043 * %GFP_KERNEL kernel allocation,
2044 * %GFP_HIGHMEM highmem allocation,
2045 * %GFP_FS don't call back into a file system.
2046 * %GFP_ATOMIC don't sleep.
2047 * @order: Power of two of allocation size in pages. 0 is a single page.
2049 * Allocate a page from the kernel page pool. When not in
2050 * interrupt context and apply the current process NUMA policy.
2051 * Returns NULL when no page can be allocated.
2053 * Don't call cpuset_update_task_memory_state() unless
2054 * 1) it's ok to take cpuset_sem (can WAIT), and
2055 * 2) allocating for current task (not interrupt).
2057 struct page *alloc_pages_current(gfp_t gfp, unsigned order)
2059 struct mempolicy *pol = &default_policy;
2061 unsigned int cpuset_mems_cookie;
2063 if (!in_interrupt() && !(gfp & __GFP_THISNODE))
2064 pol = get_task_policy(current);
2067 cpuset_mems_cookie = read_mems_allowed_begin();
2070 * No reference counting needed for current->mempolicy
2071 * nor system default_policy
2073 if (pol->mode == MPOL_INTERLEAVE)
2074 page = alloc_page_interleave(gfp, order, interleave_nodes(pol));
2076 page = __alloc_pages_nodemask(gfp, order,
2077 policy_zonelist(gfp, pol, numa_node_id()),
2078 policy_nodemask(gfp, pol));
2080 if (unlikely(!page && read_mems_allowed_retry(cpuset_mems_cookie)))
2085 EXPORT_SYMBOL(alloc_pages_current);
2087 int vma_dup_policy(struct vm_area_struct *src, struct vm_area_struct *dst)
2089 struct mempolicy *pol = mpol_dup(vma_policy(src));
2092 return PTR_ERR(pol);
2093 dst->vm_policy = pol;
2098 * If mpol_dup() sees current->cpuset == cpuset_being_rebound, then it
2099 * rebinds the mempolicy its copying by calling mpol_rebind_policy()
2100 * with the mems_allowed returned by cpuset_mems_allowed(). This
2101 * keeps mempolicies cpuset relative after its cpuset moves. See
2102 * further kernel/cpuset.c update_nodemask().
2104 * current's mempolicy may be rebinded by the other task(the task that changes
2105 * cpuset's mems), so we needn't do rebind work for current task.
2108 /* Slow path of a mempolicy duplicate */
2109 struct mempolicy *__mpol_dup(struct mempolicy *old)
2111 struct mempolicy *new = kmem_cache_alloc(policy_cache, GFP_KERNEL);
2114 return ERR_PTR(-ENOMEM);
2116 /* task's mempolicy is protected by alloc_lock */
2117 if (old == current->mempolicy) {
2120 task_unlock(current);
2124 if (current_cpuset_is_being_rebound()) {
2125 nodemask_t mems = cpuset_mems_allowed(current);
2126 if (new->flags & MPOL_F_REBINDING)
2127 mpol_rebind_policy(new, &mems, MPOL_REBIND_STEP2);
2129 mpol_rebind_policy(new, &mems, MPOL_REBIND_ONCE);
2131 atomic_set(&new->refcnt, 1);
2135 /* Slow path of a mempolicy comparison */
2136 bool __mpol_equal(struct mempolicy *a, struct mempolicy *b)
2140 if (a->mode != b->mode)
2142 if (a->flags != b->flags)
2144 if (mpol_store_user_nodemask(a))
2145 if (!nodes_equal(a->w.user_nodemask, b->w.user_nodemask))
2151 case MPOL_INTERLEAVE:
2152 return !!nodes_equal(a->v.nodes, b->v.nodes);
2153 case MPOL_PREFERRED:
2154 return a->v.preferred_node == b->v.preferred_node;
2162 * Shared memory backing store policy support.
2164 * Remember policies even when nobody has shared memory mapped.
2165 * The policies are kept in Red-Black tree linked from the inode.
2166 * They are protected by the sp->lock spinlock, which should be held
2167 * for any accesses to the tree.
2170 /* lookup first element intersecting start-end */
2171 /* Caller holds sp->lock */
2172 static struct sp_node *
2173 sp_lookup(struct shared_policy *sp, unsigned long start, unsigned long end)
2175 struct rb_node *n = sp->root.rb_node;
2178 struct sp_node *p = rb_entry(n, struct sp_node, nd);
2180 if (start >= p->end)
2182 else if (end <= p->start)
2190 struct sp_node *w = NULL;
2191 struct rb_node *prev = rb_prev(n);
2194 w = rb_entry(prev, struct sp_node, nd);
2195 if (w->end <= start)
2199 return rb_entry(n, struct sp_node, nd);
2202 /* Insert a new shared policy into the list. */
2203 /* Caller holds sp->lock */
2204 static void sp_insert(struct shared_policy *sp, struct sp_node *new)
2206 struct rb_node **p = &sp->root.rb_node;
2207 struct rb_node *parent = NULL;
2212 nd = rb_entry(parent, struct sp_node, nd);
2213 if (new->start < nd->start)
2215 else if (new->end > nd->end)
2216 p = &(*p)->rb_right;
2220 rb_link_node(&new->nd, parent, p);
2221 rb_insert_color(&new->nd, &sp->root);
2222 pr_debug("inserting %lx-%lx: %d\n", new->start, new->end,
2223 new->policy ? new->policy->mode : 0);
2226 /* Find shared policy intersecting idx */
2228 mpol_shared_policy_lookup(struct shared_policy *sp, unsigned long idx)
2230 struct mempolicy *pol = NULL;
2233 if (!sp->root.rb_node)
2235 spin_lock(&sp->lock);
2236 sn = sp_lookup(sp, idx, idx+1);
2238 mpol_get(sn->policy);
2241 spin_unlock(&sp->lock);
2245 static void sp_free(struct sp_node *n)
2247 mpol_put(n->policy);
2248 kmem_cache_free(sn_cache, n);
2252 * mpol_misplaced - check whether current page node is valid in policy
2254 * @page: page to be checked
2255 * @vma: vm area where page mapped
2256 * @addr: virtual address where page mapped
2258 * Lookup current policy node id for vma,addr and "compare to" page's
2262 * -1 - not misplaced, page is in the right node
2263 * node - node id where the page should be
2265 * Policy determination "mimics" alloc_page_vma().
2266 * Called from fault path where we know the vma and faulting address.
2268 int mpol_misplaced(struct page *page, struct vm_area_struct *vma, unsigned long addr)
2270 struct mempolicy *pol;
2272 int curnid = page_to_nid(page);
2273 unsigned long pgoff;
2274 int thiscpu = raw_smp_processor_id();
2275 int thisnid = cpu_to_node(thiscpu);
2281 pol = get_vma_policy(vma, addr);
2282 if (!(pol->flags & MPOL_F_MOF))
2285 switch (pol->mode) {
2286 case MPOL_INTERLEAVE:
2287 BUG_ON(addr >= vma->vm_end);
2288 BUG_ON(addr < vma->vm_start);
2290 pgoff = vma->vm_pgoff;
2291 pgoff += (addr - vma->vm_start) >> PAGE_SHIFT;
2292 polnid = offset_il_node(pol, vma, pgoff);
2295 case MPOL_PREFERRED:
2296 if (pol->flags & MPOL_F_LOCAL)
2297 polnid = numa_node_id();
2299 polnid = pol->v.preferred_node;
2304 * allows binding to multiple nodes.
2305 * use current page if in policy nodemask,
2306 * else select nearest allowed node, if any.
2307 * If no allowed nodes, use current [!misplaced].
2309 if (node_isset(curnid, pol->v.nodes))
2311 (void)first_zones_zonelist(
2312 node_zonelist(numa_node_id(), GFP_HIGHUSER),
2313 gfp_zone(GFP_HIGHUSER),
2314 &pol->v.nodes, &zone);
2315 polnid = zone->node;
2322 /* Migrate the page towards the node whose CPU is referencing it */
2323 if (pol->flags & MPOL_F_MORON) {
2326 if (!should_numa_migrate_memory(current, page, curnid, thiscpu))
2330 if (curnid != polnid)
2338 static void sp_delete(struct shared_policy *sp, struct sp_node *n)
2340 pr_debug("deleting %lx-l%lx\n", n->start, n->end);
2341 rb_erase(&n->nd, &sp->root);
2345 static void sp_node_init(struct sp_node *node, unsigned long start,
2346 unsigned long end, struct mempolicy *pol)
2348 node->start = start;
2353 static struct sp_node *sp_alloc(unsigned long start, unsigned long end,
2354 struct mempolicy *pol)
2357 struct mempolicy *newpol;
2359 n = kmem_cache_alloc(sn_cache, GFP_KERNEL);
2363 newpol = mpol_dup(pol);
2364 if (IS_ERR(newpol)) {
2365 kmem_cache_free(sn_cache, n);
2368 newpol->flags |= MPOL_F_SHARED;
2369 sp_node_init(n, start, end, newpol);
2374 /* Replace a policy range. */
2375 static int shared_policy_replace(struct shared_policy *sp, unsigned long start,
2376 unsigned long end, struct sp_node *new)
2379 struct sp_node *n_new = NULL;
2380 struct mempolicy *mpol_new = NULL;
2384 spin_lock(&sp->lock);
2385 n = sp_lookup(sp, start, end);
2386 /* Take care of old policies in the same range. */
2387 while (n && n->start < end) {
2388 struct rb_node *next = rb_next(&n->nd);
2389 if (n->start >= start) {
2395 /* Old policy spanning whole new range. */
2400 *mpol_new = *n->policy;
2401 atomic_set(&mpol_new->refcnt, 1);
2402 sp_node_init(n_new, end, n->end, mpol_new);
2404 sp_insert(sp, n_new);
2413 n = rb_entry(next, struct sp_node, nd);
2417 spin_unlock(&sp->lock);
2424 kmem_cache_free(sn_cache, n_new);
2429 spin_unlock(&sp->lock);
2431 n_new = kmem_cache_alloc(sn_cache, GFP_KERNEL);
2434 mpol_new = kmem_cache_alloc(policy_cache, GFP_KERNEL);
2441 * mpol_shared_policy_init - initialize shared policy for inode
2442 * @sp: pointer to inode shared policy
2443 * @mpol: struct mempolicy to install
2445 * Install non-NULL @mpol in inode's shared policy rb-tree.
2446 * On entry, the current task has a reference on a non-NULL @mpol.
2447 * This must be released on exit.
2448 * This is called at get_inode() calls and we can use GFP_KERNEL.
2450 void mpol_shared_policy_init(struct shared_policy *sp, struct mempolicy *mpol)
2454 sp->root = RB_ROOT; /* empty tree == default mempolicy */
2455 spin_lock_init(&sp->lock);
2458 struct vm_area_struct pvma;
2459 struct mempolicy *new;
2460 NODEMASK_SCRATCH(scratch);
2464 /* contextualize the tmpfs mount point mempolicy */
2465 new = mpol_new(mpol->mode, mpol->flags, &mpol->w.user_nodemask);
2467 goto free_scratch; /* no valid nodemask intersection */
2470 ret = mpol_set_nodemask(new, &mpol->w.user_nodemask, scratch);
2471 task_unlock(current);
2475 /* Create pseudo-vma that contains just the policy */
2476 memset(&pvma, 0, sizeof(struct vm_area_struct));
2477 pvma.vm_end = TASK_SIZE; /* policy covers entire file */
2478 mpol_set_shared_policy(sp, &pvma, new); /* adds ref */
2481 mpol_put(new); /* drop initial ref */
2483 NODEMASK_SCRATCH_FREE(scratch);
2485 mpol_put(mpol); /* drop our incoming ref on sb mpol */
2489 int mpol_set_shared_policy(struct shared_policy *info,
2490 struct vm_area_struct *vma, struct mempolicy *npol)
2493 struct sp_node *new = NULL;
2494 unsigned long sz = vma_pages(vma);
2496 pr_debug("set_shared_policy %lx sz %lu %d %d %lx\n",
2498 sz, npol ? npol->mode : -1,
2499 npol ? npol->flags : -1,
2500 npol ? nodes_addr(npol->v.nodes)[0] : NUMA_NO_NODE);
2503 new = sp_alloc(vma->vm_pgoff, vma->vm_pgoff + sz, npol);
2507 err = shared_policy_replace(info, vma->vm_pgoff, vma->vm_pgoff+sz, new);
2513 /* Free a backing policy store on inode delete. */
2514 void mpol_free_shared_policy(struct shared_policy *p)
2517 struct rb_node *next;
2519 if (!p->root.rb_node)
2521 spin_lock(&p->lock);
2522 next = rb_first(&p->root);
2524 n = rb_entry(next, struct sp_node, nd);
2525 next = rb_next(&n->nd);
2528 spin_unlock(&p->lock);
2531 #ifdef CONFIG_NUMA_BALANCING
2532 static int __initdata numabalancing_override;
2534 static void __init check_numabalancing_enable(void)
2536 bool numabalancing_default = false;
2538 if (IS_ENABLED(CONFIG_NUMA_BALANCING_DEFAULT_ENABLED))
2539 numabalancing_default = true;
2541 /* Parsed by setup_numabalancing. override == 1 enables, -1 disables */
2542 if (numabalancing_override)
2543 set_numabalancing_state(numabalancing_override == 1);
2545 if (nr_node_ids > 1 && !numabalancing_override) {
2546 pr_info("%s automatic NUMA balancing. "
2547 "Configure with numa_balancing= or the "
2548 "kernel.numa_balancing sysctl",
2549 numabalancing_default ? "Enabling" : "Disabling");
2550 set_numabalancing_state(numabalancing_default);
2554 static int __init setup_numabalancing(char *str)
2560 if (!strcmp(str, "enable")) {
2561 numabalancing_override = 1;
2563 } else if (!strcmp(str, "disable")) {
2564 numabalancing_override = -1;
2569 pr_warn("Unable to parse numa_balancing=\n");
2573 __setup("numa_balancing=", setup_numabalancing);
2575 static inline void __init check_numabalancing_enable(void)
2578 #endif /* CONFIG_NUMA_BALANCING */
2580 /* assumes fs == KERNEL_DS */
2581 void __init numa_policy_init(void)
2583 nodemask_t interleave_nodes;
2584 unsigned long largest = 0;
2585 int nid, prefer = 0;
2587 policy_cache = kmem_cache_create("numa_policy",
2588 sizeof(struct mempolicy),
2589 0, SLAB_PANIC, NULL);
2591 sn_cache = kmem_cache_create("shared_policy_node",
2592 sizeof(struct sp_node),
2593 0, SLAB_PANIC, NULL);
2595 for_each_node(nid) {
2596 preferred_node_policy[nid] = (struct mempolicy) {
2597 .refcnt = ATOMIC_INIT(1),
2598 .mode = MPOL_PREFERRED,
2599 .flags = MPOL_F_MOF | MPOL_F_MORON,
2600 .v = { .preferred_node = nid, },
2605 * Set interleaving policy for system init. Interleaving is only
2606 * enabled across suitably sized nodes (default is >= 16MB), or
2607 * fall back to the largest node if they're all smaller.
2609 nodes_clear(interleave_nodes);
2610 for_each_node_state(nid, N_MEMORY) {
2611 unsigned long total_pages = node_present_pages(nid);
2613 /* Preserve the largest node */
2614 if (largest < total_pages) {
2615 largest = total_pages;
2619 /* Interleave this node? */
2620 if ((total_pages << PAGE_SHIFT) >= (16 << 20))
2621 node_set(nid, interleave_nodes);
2624 /* All too small, use the largest */
2625 if (unlikely(nodes_empty(interleave_nodes)))
2626 node_set(prefer, interleave_nodes);
2628 if (do_set_mempolicy(MPOL_INTERLEAVE, 0, &interleave_nodes))
2629 pr_err("%s: interleaving failed\n", __func__);
2631 check_numabalancing_enable();
2634 /* Reset policy of current process to default */
2635 void numa_default_policy(void)
2637 do_set_mempolicy(MPOL_DEFAULT, 0, NULL);
2641 * Parse and format mempolicy from/to strings
2645 * "local" is implemented internally by MPOL_PREFERRED with MPOL_F_LOCAL flag.
2647 static const char * const policy_modes[] =
2649 [MPOL_DEFAULT] = "default",
2650 [MPOL_PREFERRED] = "prefer",
2651 [MPOL_BIND] = "bind",
2652 [MPOL_INTERLEAVE] = "interleave",
2653 [MPOL_LOCAL] = "local",
2659 * mpol_parse_str - parse string to mempolicy, for tmpfs mpol mount option.
2660 * @str: string containing mempolicy to parse
2661 * @mpol: pointer to struct mempolicy pointer, returned on success.
2664 * <mode>[=<flags>][:<nodelist>]
2666 * On success, returns 0, else 1
2668 int mpol_parse_str(char *str, struct mempolicy **mpol)
2670 struct mempolicy *new = NULL;
2671 unsigned short mode;
2672 unsigned short mode_flags;
2674 char *nodelist = strchr(str, ':');
2675 char *flags = strchr(str, '=');
2679 /* NUL-terminate mode or flags string */
2681 if (nodelist_parse(nodelist, nodes))
2683 if (!nodes_subset(nodes, node_states[N_MEMORY]))
2689 *flags++ = '\0'; /* terminate mode string */
2691 for (mode = 0; mode < MPOL_MAX; mode++) {
2692 if (!strcmp(str, policy_modes[mode])) {
2696 if (mode >= MPOL_MAX)
2700 case MPOL_PREFERRED:
2702 * Insist on a nodelist of one node only
2705 char *rest = nodelist;
2706 while (isdigit(*rest))
2712 case MPOL_INTERLEAVE:
2714 * Default to online nodes with memory if no nodelist
2717 nodes = node_states[N_MEMORY];
2721 * Don't allow a nodelist; mpol_new() checks flags
2725 mode = MPOL_PREFERRED;
2729 * Insist on a empty nodelist
2736 * Insist on a nodelist
2745 * Currently, we only support two mutually exclusive
2748 if (!strcmp(flags, "static"))
2749 mode_flags |= MPOL_F_STATIC_NODES;
2750 else if (!strcmp(flags, "relative"))
2751 mode_flags |= MPOL_F_RELATIVE_NODES;
2756 new = mpol_new(mode, mode_flags, &nodes);
2761 * Save nodes for mpol_to_str() to show the tmpfs mount options
2762 * for /proc/mounts, /proc/pid/mounts and /proc/pid/mountinfo.
2764 if (mode != MPOL_PREFERRED)
2765 new->v.nodes = nodes;
2767 new->v.preferred_node = first_node(nodes);
2769 new->flags |= MPOL_F_LOCAL;
2772 * Save nodes for contextualization: this will be used to "clone"
2773 * the mempolicy in a specific context [cpuset] at a later time.
2775 new->w.user_nodemask = nodes;
2780 /* Restore string for error message */
2789 #endif /* CONFIG_TMPFS */
2792 * mpol_to_str - format a mempolicy structure for printing
2793 * @buffer: to contain formatted mempolicy string
2794 * @maxlen: length of @buffer
2795 * @pol: pointer to mempolicy to be formatted
2797 * Convert @pol into a string. If @buffer is too short, truncate the string.
2798 * Recommend a @maxlen of at least 32 for the longest mode, "interleave", the
2799 * longest flag, "relative", and to display at least a few node ids.
2801 void mpol_to_str(char *buffer, int maxlen, struct mempolicy *pol)
2804 nodemask_t nodes = NODE_MASK_NONE;
2805 unsigned short mode = MPOL_DEFAULT;
2806 unsigned short flags = 0;
2808 if (pol && pol != &default_policy && !(pol->flags & MPOL_F_MORON)) {
2816 case MPOL_PREFERRED:
2817 if (flags & MPOL_F_LOCAL)
2820 node_set(pol->v.preferred_node, nodes);
2823 case MPOL_INTERLEAVE:
2824 nodes = pol->v.nodes;
2828 snprintf(p, maxlen, "unknown");
2832 p += snprintf(p, maxlen, "%s", policy_modes[mode]);
2834 if (flags & MPOL_MODE_FLAGS) {
2835 p += snprintf(p, buffer + maxlen - p, "=");
2838 * Currently, the only defined flags are mutually exclusive
2840 if (flags & MPOL_F_STATIC_NODES)
2841 p += snprintf(p, buffer + maxlen - p, "static");
2842 else if (flags & MPOL_F_RELATIVE_NODES)
2843 p += snprintf(p, buffer + maxlen - p, "relative");
2846 if (!nodes_empty(nodes)) {
2847 p += snprintf(p, buffer + maxlen - p, ":");
2848 p += nodelist_scnprintf(p, buffer + maxlen - p, nodes);