2 #include <linux/slab.h>
3 #include <linux/string.h>
4 #include <linux/compiler.h>
5 #include <linux/export.h>
7 #include <linux/sched.h>
8 #include <linux/security.h>
9 #include <linux/swap.h>
10 #include <linux/swapops.h>
11 #include <linux/mman.h>
12 #include <linux/hugetlb.h>
13 #include <linux/vmalloc.h>
15 #include <asm/sections.h>
16 #include <asm/uaccess.h>
20 static inline int is_kernel_rodata(unsigned long addr)
22 return addr >= (unsigned long)__start_rodata &&
23 addr < (unsigned long)__end_rodata;
27 * kfree_const - conditionally free memory
28 * @x: pointer to the memory
30 * Function calls kfree only if @x is not in .rodata section.
32 void kfree_const(const void *x)
34 if (!is_kernel_rodata((unsigned long)x))
37 EXPORT_SYMBOL(kfree_const);
40 * kstrdup - allocate space for and copy an existing string
41 * @s: the string to duplicate
42 * @gfp: the GFP mask used in the kmalloc() call when allocating memory
44 char *kstrdup(const char *s, gfp_t gfp)
53 buf = kmalloc_track_caller(len, gfp);
58 EXPORT_SYMBOL(kstrdup);
61 * kstrdup_const - conditionally duplicate an existing const string
62 * @s: the string to duplicate
63 * @gfp: the GFP mask used in the kmalloc() call when allocating memory
65 * Function returns source string if it is in .rodata section otherwise it
66 * fallbacks to kstrdup.
67 * Strings allocated by kstrdup_const should be freed by kfree_const.
69 const char *kstrdup_const(const char *s, gfp_t gfp)
71 if (is_kernel_rodata((unsigned long)s))
74 return kstrdup(s, gfp);
76 EXPORT_SYMBOL(kstrdup_const);
79 * kstrndup - allocate space for and copy an existing string
80 * @s: the string to duplicate
81 * @max: read at most @max chars from @s
82 * @gfp: the GFP mask used in the kmalloc() call when allocating memory
84 char *kstrndup(const char *s, size_t max, gfp_t gfp)
92 len = strnlen(s, max);
93 buf = kmalloc_track_caller(len+1, gfp);
100 EXPORT_SYMBOL(kstrndup);
103 * kmemdup - duplicate region of memory
105 * @src: memory region to duplicate
106 * @len: memory region length
107 * @gfp: GFP mask to use
109 void *kmemdup(const void *src, size_t len, gfp_t gfp)
113 p = kmalloc_track_caller(len, gfp);
118 EXPORT_SYMBOL(kmemdup);
121 * memdup_user - duplicate memory region from user space
123 * @src: source address in user space
124 * @len: number of bytes to copy
126 * Returns an ERR_PTR() on failure.
128 void *memdup_user(const void __user *src, size_t len)
133 * Always use GFP_KERNEL, since copy_from_user() can sleep and
134 * cause pagefault, which makes it pointless to use GFP_NOFS
137 p = kmalloc_track_caller(len, GFP_KERNEL);
139 return ERR_PTR(-ENOMEM);
141 if (copy_from_user(p, src, len)) {
143 return ERR_PTR(-EFAULT);
148 EXPORT_SYMBOL(memdup_user);
151 * strndup_user - duplicate an existing string from user space
152 * @s: The string to duplicate
153 * @n: Maximum number of bytes to copy, including the trailing NUL.
155 char *strndup_user(const char __user *s, long n)
160 length = strnlen_user(s, n);
163 return ERR_PTR(-EFAULT);
166 return ERR_PTR(-EINVAL);
168 p = memdup_user(s, length);
173 p[length - 1] = '\0';
177 EXPORT_SYMBOL(strndup_user);
180 * memdup_user_nul - duplicate memory region from user space and NUL-terminate
182 * @src: source address in user space
183 * @len: number of bytes to copy
185 * Returns an ERR_PTR() on failure.
187 void *memdup_user_nul(const void __user *src, size_t len)
192 * Always use GFP_KERNEL, since copy_from_user() can sleep and
193 * cause pagefault, which makes it pointless to use GFP_NOFS
196 p = kmalloc_track_caller(len + 1, GFP_KERNEL);
198 return ERR_PTR(-ENOMEM);
200 if (copy_from_user(p, src, len)) {
202 return ERR_PTR(-EFAULT);
208 EXPORT_SYMBOL(memdup_user_nul);
210 void __vma_link_list(struct mm_struct *mm, struct vm_area_struct *vma,
211 struct vm_area_struct *prev, struct rb_node *rb_parent)
213 struct vm_area_struct *next;
217 next = prev->vm_next;
222 next = rb_entry(rb_parent,
223 struct vm_area_struct, vm_rb);
232 /* Check if the vma is being used as a stack by this task */
233 int vma_is_stack_for_current(struct vm_area_struct *vma)
235 struct task_struct * __maybe_unused t = current;
237 return (vma->vm_start <= KSTK_ESP(t) && vma->vm_end >= KSTK_ESP(t));
240 #if defined(CONFIG_MMU) && !defined(HAVE_ARCH_PICK_MMAP_LAYOUT)
241 void arch_pick_mmap_layout(struct mm_struct *mm)
243 mm->mmap_base = TASK_UNMAPPED_BASE;
244 mm->get_unmapped_area = arch_get_unmapped_area;
249 * Like get_user_pages_fast() except its IRQ-safe in that it won't fall
250 * back to the regular GUP.
251 * If the architecture not support this function, simply return with no
254 int __weak __get_user_pages_fast(unsigned long start,
255 int nr_pages, int write, struct page **pages)
259 EXPORT_SYMBOL_GPL(__get_user_pages_fast);
262 * get_user_pages_fast() - pin user pages in memory
263 * @start: starting user address
264 * @nr_pages: number of pages from start to pin
265 * @write: whether pages will be written to
266 * @pages: array that receives pointers to the pages pinned.
267 * Should be at least nr_pages long.
269 * Returns number of pages pinned. This may be fewer than the number
270 * requested. If nr_pages is 0 or negative, returns 0. If no pages
271 * were pinned, returns -errno.
273 * get_user_pages_fast provides equivalent functionality to get_user_pages,
274 * operating on current and current->mm, with force=0 and vma=NULL. However
275 * unlike get_user_pages, it must be called without mmap_sem held.
277 * get_user_pages_fast may take mmap_sem and page table locks, so no
278 * assumptions can be made about lack of locking. get_user_pages_fast is to be
279 * implemented in a way that is advantageous (vs get_user_pages()) when the
280 * user memory area is already faulted in and present in ptes. However if the
281 * pages have to be faulted in, it may turn out to be slightly slower so
282 * callers need to carefully consider what to use. On many architectures,
283 * get_user_pages_fast simply falls back to get_user_pages.
285 int __weak get_user_pages_fast(unsigned long start,
286 int nr_pages, int write, struct page **pages)
288 return get_user_pages_unlocked(start, nr_pages, write, 0, pages);
290 EXPORT_SYMBOL_GPL(get_user_pages_fast);
292 unsigned long vm_mmap_pgoff(struct file *file, unsigned long addr,
293 unsigned long len, unsigned long prot,
294 unsigned long flag, unsigned long pgoff)
297 struct mm_struct *mm = current->mm;
298 unsigned long populate;
300 ret = security_mmap_file(file, prot, flag);
302 if (down_write_killable(&mm->mmap_sem))
304 ret = do_mmap_pgoff(file, addr, len, prot, flag, pgoff,
306 up_write(&mm->mmap_sem);
308 mm_populate(ret, populate);
313 unsigned long vm_mmap(struct file *file, unsigned long addr,
314 unsigned long len, unsigned long prot,
315 unsigned long flag, unsigned long offset)
317 if (unlikely(offset + PAGE_ALIGN(len) < offset))
319 if (unlikely(offset_in_page(offset)))
322 return vm_mmap_pgoff(file, addr, len, prot, flag, offset >> PAGE_SHIFT);
324 EXPORT_SYMBOL(vm_mmap);
326 void kvfree(const void *addr)
328 if (is_vmalloc_addr(addr))
333 EXPORT_SYMBOL(kvfree);
335 static inline void *__page_rmapping(struct page *page)
337 unsigned long mapping;
339 mapping = (unsigned long)page->mapping;
340 mapping &= ~PAGE_MAPPING_FLAGS;
342 return (void *)mapping;
345 /* Neutral page->mapping pointer to address_space or anon_vma or other */
346 void *page_rmapping(struct page *page)
348 page = compound_head(page);
349 return __page_rmapping(page);
353 * Return true if this page is mapped into pagetables.
354 * For compound page it returns true if any subpage of compound page is mapped.
356 bool page_mapped(struct page *page)
360 if (likely(!PageCompound(page)))
361 return atomic_read(&page->_mapcount) >= 0;
362 page = compound_head(page);
363 if (atomic_read(compound_mapcount_ptr(page)) >= 0)
367 for (i = 0; i < hpage_nr_pages(page); i++) {
368 if (atomic_read(&page[i]._mapcount) >= 0)
373 EXPORT_SYMBOL(page_mapped);
375 struct anon_vma *page_anon_vma(struct page *page)
377 unsigned long mapping;
379 page = compound_head(page);
380 mapping = (unsigned long)page->mapping;
381 if ((mapping & PAGE_MAPPING_FLAGS) != PAGE_MAPPING_ANON)
383 return __page_rmapping(page);
386 struct address_space *page_mapping(struct page *page)
388 struct address_space *mapping;
390 page = compound_head(page);
392 /* This happens if someone calls flush_dcache_page on slab page */
393 if (unlikely(PageSlab(page)))
396 if (unlikely(PageSwapCache(page))) {
399 entry.val = page_private(page);
400 return swap_address_space(entry);
403 mapping = page->mapping;
404 if ((unsigned long)mapping & PAGE_MAPPING_ANON)
407 return (void *)((unsigned long)mapping & ~PAGE_MAPPING_FLAGS);
409 EXPORT_SYMBOL(page_mapping);
411 /* Slow path of page_mapcount() for compound pages */
412 int __page_mapcount(struct page *page)
416 ret = atomic_read(&page->_mapcount) + 1;
418 * For file THP page->_mapcount contains total number of mapping
419 * of the page: no need to look into compound_mapcount.
421 if (!PageAnon(page) && !PageHuge(page))
423 page = compound_head(page);
424 ret += atomic_read(compound_mapcount_ptr(page)) + 1;
425 if (PageDoubleMap(page))
429 EXPORT_SYMBOL_GPL(__page_mapcount);
431 int sysctl_overcommit_memory __read_mostly = OVERCOMMIT_GUESS;
432 int sysctl_overcommit_ratio __read_mostly = 50;
433 unsigned long sysctl_overcommit_kbytes __read_mostly;
434 int sysctl_max_map_count __read_mostly = DEFAULT_MAX_MAP_COUNT;
435 unsigned long sysctl_user_reserve_kbytes __read_mostly = 1UL << 17; /* 128MB */
436 unsigned long sysctl_admin_reserve_kbytes __read_mostly = 1UL << 13; /* 8MB */
438 int overcommit_ratio_handler(struct ctl_table *table, int write,
439 void __user *buffer, size_t *lenp,
444 ret = proc_dointvec(table, write, buffer, lenp, ppos);
445 if (ret == 0 && write)
446 sysctl_overcommit_kbytes = 0;
450 int overcommit_kbytes_handler(struct ctl_table *table, int write,
451 void __user *buffer, size_t *lenp,
456 ret = proc_doulongvec_minmax(table, write, buffer, lenp, ppos);
457 if (ret == 0 && write)
458 sysctl_overcommit_ratio = 0;
463 * Committed memory limit enforced when OVERCOMMIT_NEVER policy is used
465 unsigned long vm_commit_limit(void)
467 unsigned long allowed;
469 if (sysctl_overcommit_kbytes)
470 allowed = sysctl_overcommit_kbytes >> (PAGE_SHIFT - 10);
472 allowed = ((totalram_pages - hugetlb_total_pages())
473 * sysctl_overcommit_ratio / 100);
474 allowed += total_swap_pages;
480 * Make sure vm_committed_as in one cacheline and not cacheline shared with
481 * other variables. It can be updated by several CPUs frequently.
483 struct percpu_counter vm_committed_as ____cacheline_aligned_in_smp;
486 * The global memory commitment made in the system can be a metric
487 * that can be used to drive ballooning decisions when Linux is hosted
488 * as a guest. On Hyper-V, the host implements a policy engine for dynamically
489 * balancing memory across competing virtual machines that are hosted.
490 * Several metrics drive this policy engine including the guest reported
493 unsigned long vm_memory_committed(void)
495 return percpu_counter_read_positive(&vm_committed_as);
497 EXPORT_SYMBOL_GPL(vm_memory_committed);
500 * Check that a process has enough memory to allocate a new virtual
501 * mapping. 0 means there is enough memory for the allocation to
502 * succeed and -ENOMEM implies there is not.
504 * We currently support three overcommit policies, which are set via the
505 * vm.overcommit_memory sysctl. See Documentation/vm/overcommit-accounting
507 * Strict overcommit modes added 2002 Feb 26 by Alan Cox.
508 * Additional code 2002 Jul 20 by Robert Love.
510 * cap_sys_admin is 1 if the process has admin privileges, 0 otherwise.
512 * Note this is a helper function intended to be used by LSMs which
513 * wish to use this logic.
515 int __vm_enough_memory(struct mm_struct *mm, long pages, int cap_sys_admin)
517 long free, allowed, reserve;
519 VM_WARN_ONCE(percpu_counter_read(&vm_committed_as) <
520 -(s64)vm_committed_as_batch * num_online_cpus(),
521 "memory commitment underflow");
523 vm_acct_memory(pages);
526 * Sometimes we want to use more memory than we have
528 if (sysctl_overcommit_memory == OVERCOMMIT_ALWAYS)
531 if (sysctl_overcommit_memory == OVERCOMMIT_GUESS) {
532 free = global_page_state(NR_FREE_PAGES);
533 free += global_node_page_state(NR_FILE_PAGES);
536 * shmem pages shouldn't be counted as free in this
537 * case, they can't be purged, only swapped out, and
538 * that won't affect the overall amount of available
539 * memory in the system.
541 free -= global_node_page_state(NR_SHMEM);
543 free += get_nr_swap_pages();
546 * Any slabs which are created with the
547 * SLAB_RECLAIM_ACCOUNT flag claim to have contents
548 * which are reclaimable, under pressure. The dentry
549 * cache and most inode caches should fall into this
551 free += global_page_state(NR_SLAB_RECLAIMABLE);
554 * Leave reserved pages. The pages are not for anonymous pages.
556 if (free <= totalreserve_pages)
559 free -= totalreserve_pages;
562 * Reserve some for root
565 free -= sysctl_admin_reserve_kbytes >> (PAGE_SHIFT - 10);
573 allowed = vm_commit_limit();
575 * Reserve some for root
578 allowed -= sysctl_admin_reserve_kbytes >> (PAGE_SHIFT - 10);
581 * Don't let a single process grow so big a user can't recover
584 reserve = sysctl_user_reserve_kbytes >> (PAGE_SHIFT - 10);
585 allowed -= min_t(long, mm->total_vm / 32, reserve);
588 if (percpu_counter_read_positive(&vm_committed_as) < allowed)
591 vm_unacct_memory(pages);
597 * get_cmdline() - copy the cmdline value to a buffer.
598 * @task: the task whose cmdline value to copy.
599 * @buffer: the buffer to copy to.
600 * @buflen: the length of the buffer. Larger cmdline values are truncated
602 * Returns the size of the cmdline field copied. Note that the copy does
603 * not guarantee an ending NULL byte.
605 int get_cmdline(struct task_struct *task, char *buffer, int buflen)
609 struct mm_struct *mm = get_task_mm(task);
610 unsigned long arg_start, arg_end, env_start, env_end;
614 goto out_mm; /* Shh! No looking before we're done */
616 down_read(&mm->mmap_sem);
617 arg_start = mm->arg_start;
618 arg_end = mm->arg_end;
619 env_start = mm->env_start;
620 env_end = mm->env_end;
621 up_read(&mm->mmap_sem);
623 len = arg_end - arg_start;
628 res = access_process_vm(task, arg_start, buffer, len, 0);
631 * If the nul at the end of args has been overwritten, then
632 * assume application is using setproctitle(3).
634 if (res > 0 && buffer[res-1] != '\0' && len < buflen) {
635 len = strnlen(buffer, res);
639 len = env_end - env_start;
640 if (len > buflen - res)
642 res += access_process_vm(task, env_start,
644 res = strnlen(buffer, res);