2 * NUMA memory policies for Linux.
3 * Copyright 2003,2004 Andi Kleen SuSE Labs
5 #ifndef _LINUX_MEMPOLICY_H
6 #define _LINUX_MEMPOLICY_H 1
8 #include <linux/mmzone.h>
9 #include <linux/slab.h>
10 #include <linux/rbtree.h>
11 #include <linux/spinlock.h>
12 #include <linux/nodemask.h>
13 #include <linux/pagemap.h>
14 #include <uapi/linux/mempolicy.h>
21 * Describe a memory policy.
23 * A mempolicy can be either associated with a process or with a VMA.
24 * For VMA related allocations the VMA policy is preferred, otherwise
25 * the process policy is used. Interrupts ignore the memory policy
26 * of the current process.
28 * Locking policy for interlave:
29 * In process context there is no locking because only the process accesses
30 * its own state. All vma manipulation is somewhat protected by a down_read on
34 * Mempolicy objects are reference counted. A mempolicy will be freed when
35 * mpol_put() decrements the reference count to zero.
37 * Duplicating policy objects:
38 * mpol_dup() allocates a new mempolicy and copies the specified mempolicy
39 * to the new storage. The reference count of the new object is initialized
40 * to 1, representing the caller of mpol_dup().
44 unsigned short mode; /* See MPOL_* above */
45 unsigned short flags; /* See set_mempolicy() MPOL_F_* above */
47 short preferred_node; /* preferred */
48 nodemask_t nodes; /* interleave/bind */
49 /* undefined for default */
52 nodemask_t cpuset_mems_allowed; /* relative to these nodes */
53 nodemask_t user_nodemask; /* nodemask passed by user */
58 * Support for managing mempolicy data objects (clone, copy, destroy)
59 * The default fast path of a NULL MPOL_DEFAULT policy is always inlined.
62 extern void __mpol_put(struct mempolicy *pol);
63 static inline void mpol_put(struct mempolicy *pol)
70 * Does mempolicy pol need explicit unref after use?
71 * Currently only needed for shared policies.
73 static inline int mpol_needs_cond_ref(struct mempolicy *pol)
75 return (pol && (pol->flags & MPOL_F_SHARED));
78 static inline void mpol_cond_put(struct mempolicy *pol)
80 if (mpol_needs_cond_ref(pol))
84 extern struct mempolicy *__mpol_cond_copy(struct mempolicy *tompol,
85 struct mempolicy *frompol);
86 static inline struct mempolicy *mpol_cond_copy(struct mempolicy *tompol,
87 struct mempolicy *frompol)
91 return __mpol_cond_copy(tompol, frompol);
94 extern struct mempolicy *__mpol_dup(struct mempolicy *pol);
95 static inline struct mempolicy *mpol_dup(struct mempolicy *pol)
98 pol = __mpol_dup(pol);
102 #define vma_policy(vma) ((vma)->vm_policy)
103 #define vma_set_policy(vma, pol) ((vma)->vm_policy = (pol))
105 static inline void mpol_get(struct mempolicy *pol)
108 atomic_inc(&pol->refcnt);
111 extern bool __mpol_equal(struct mempolicy *a, struct mempolicy *b);
112 static inline bool mpol_equal(struct mempolicy *a, struct mempolicy *b)
116 return __mpol_equal(a, b);
120 * Tree of shared policies for a shared memory region.
121 * Maintain the policies in a pseudo mm that contains vmas. The vmas
122 * carry the policy. As a special twist the pseudo mm is indexed in pages, not
123 * bytes, so that we can work with shared memory segments bigger than
129 unsigned long start, end;
130 struct mempolicy *policy;
133 struct shared_policy {
138 void mpol_shared_policy_init(struct shared_policy *sp, struct mempolicy *mpol);
139 int mpol_set_shared_policy(struct shared_policy *info,
140 struct vm_area_struct *vma,
141 struct mempolicy *new);
142 void mpol_free_shared_policy(struct shared_policy *p);
143 struct mempolicy *mpol_shared_policy_lookup(struct shared_policy *sp,
146 struct mempolicy *get_vma_policy(struct task_struct *tsk,
147 struct vm_area_struct *vma, unsigned long addr);
149 extern void numa_default_policy(void);
150 extern void numa_policy_init(void);
151 extern void mpol_rebind_task(struct task_struct *tsk, const nodemask_t *new,
152 enum mpol_rebind_step step);
153 extern void mpol_rebind_mm(struct mm_struct *mm, nodemask_t *new);
154 extern void mpol_fix_fork_child_flag(struct task_struct *p);
156 extern struct zonelist *huge_zonelist(struct vm_area_struct *vma,
157 unsigned long addr, gfp_t gfp_flags,
158 struct mempolicy **mpol, nodemask_t **nodemask);
159 extern bool init_nodemask_of_mempolicy(nodemask_t *mask);
160 extern bool mempolicy_nodemask_intersects(struct task_struct *tsk,
161 const nodemask_t *mask);
162 extern unsigned slab_node(void);
164 extern enum zone_type policy_zone;
166 static inline void check_highest_zone(enum zone_type k)
168 if (k > policy_zone && k != ZONE_MOVABLE)
172 int do_migrate_pages(struct mm_struct *mm, const nodemask_t *from,
173 const nodemask_t *to, int flags);
177 extern int mpol_parse_str(char *str, struct mempolicy **mpol, int no_context);
180 extern int mpol_to_str(char *buffer, int maxlen, struct mempolicy *pol,
183 /* Check if a vma is migratable */
184 static inline int vma_migratable(struct vm_area_struct *vma)
186 if (vma->vm_flags & (VM_IO | VM_HUGETLB | VM_PFNMAP))
189 * Migration allocates pages in the highest zone. If we cannot
190 * do so then migration (at least from node to node) is not
194 gfp_zone(mapping_gfp_mask(vma->vm_file->f_mapping))
200 extern int mpol_misplaced(struct page *, struct vm_area_struct *, unsigned long);
202 #else /* CONFIG_NUMA */
206 static inline bool mpol_equal(struct mempolicy *a, struct mempolicy *b)
211 static inline void mpol_put(struct mempolicy *p)
215 static inline void mpol_cond_put(struct mempolicy *pol)
219 static inline struct mempolicy *mpol_cond_copy(struct mempolicy *to,
220 struct mempolicy *from)
225 static inline void mpol_get(struct mempolicy *pol)
229 static inline struct mempolicy *mpol_dup(struct mempolicy *old)
234 struct shared_policy {};
236 static inline int mpol_set_shared_policy(struct shared_policy *info,
237 struct vm_area_struct *vma,
238 struct mempolicy *new)
243 static inline void mpol_shared_policy_init(struct shared_policy *sp,
244 struct mempolicy *mpol)
248 static inline void mpol_free_shared_policy(struct shared_policy *p)
252 static inline struct mempolicy *
253 mpol_shared_policy_lookup(struct shared_policy *sp, unsigned long idx)
258 #define vma_policy(vma) NULL
259 #define vma_set_policy(vma, pol) do {} while(0)
261 static inline void numa_policy_init(void)
265 static inline void numa_default_policy(void)
269 static inline void mpol_rebind_task(struct task_struct *tsk,
270 const nodemask_t *new,
271 enum mpol_rebind_step step)
275 static inline void mpol_rebind_mm(struct mm_struct *mm, nodemask_t *new)
279 static inline void mpol_fix_fork_child_flag(struct task_struct *p)
283 static inline struct zonelist *huge_zonelist(struct vm_area_struct *vma,
284 unsigned long addr, gfp_t gfp_flags,
285 struct mempolicy **mpol, nodemask_t **nodemask)
289 return node_zonelist(0, gfp_flags);
292 static inline bool init_nodemask_of_mempolicy(nodemask_t *m)
297 static inline bool mempolicy_nodemask_intersects(struct task_struct *tsk,
298 const nodemask_t *mask)
303 static inline int do_migrate_pages(struct mm_struct *mm, const nodemask_t *from,
304 const nodemask_t *to, int flags)
309 static inline void check_highest_zone(int k)
314 static inline int mpol_parse_str(char *str, struct mempolicy **mpol,
317 return 1; /* error */
321 static inline int mpol_to_str(char *buffer, int maxlen, struct mempolicy *pol,
327 static inline int mpol_misplaced(struct page *page, struct vm_area_struct *vma,
328 unsigned long address)
330 return -1; /* no node preference */
333 #endif /* CONFIG_NUMA */