1 // SPDX-License-Identifier: GPL-2.0-only
3 #include <linux/slab.h>
4 #include <linux/string.h>
5 #include <linux/compiler.h>
6 #include <linux/export.h>
8 #include <linux/sched.h>
9 #include <linux/sched/mm.h>
10 #include <linux/sched/signal.h>
11 #include <linux/sched/task_stack.h>
12 #include <linux/security.h>
13 #include <linux/swap.h>
14 #include <linux/swapops.h>
15 #include <linux/mman.h>
16 #include <linux/hugetlb.h>
17 #include <linux/vmalloc.h>
18 #include <linux/userfaultfd_k.h>
19 #include <linux/elf.h>
20 #include <linux/random.h>
22 #include <linux/uaccess.h>
27 * kfree_const - conditionally free memory
28 * @x: pointer to the memory
30 * Function calls kfree only if @x is not in .rodata section.
32 void kfree_const(const void *x)
34 if (!is_kernel_rodata((unsigned long)x))
37 EXPORT_SYMBOL(kfree_const);
40 * kstrdup - allocate space for and copy an existing string
41 * @s: the string to duplicate
42 * @gfp: the GFP mask used in the kmalloc() call when allocating memory
44 * Return: newly allocated copy of @s or %NULL in case of error
46 char *kstrdup(const char *s, gfp_t gfp)
55 buf = kmalloc_track_caller(len, gfp);
60 EXPORT_SYMBOL(kstrdup);
63 * kstrdup_const - conditionally duplicate an existing const string
64 * @s: the string to duplicate
65 * @gfp: the GFP mask used in the kmalloc() call when allocating memory
67 * Note: Strings allocated by kstrdup_const should be freed by kfree_const.
69 * Return: source string if it is in .rodata section otherwise
70 * fallback to kstrdup.
72 const char *kstrdup_const(const char *s, gfp_t gfp)
74 if (is_kernel_rodata((unsigned long)s))
77 return kstrdup(s, gfp);
79 EXPORT_SYMBOL(kstrdup_const);
82 * kstrndup - allocate space for and copy an existing string
83 * @s: the string to duplicate
84 * @max: read at most @max chars from @s
85 * @gfp: the GFP mask used in the kmalloc() call when allocating memory
87 * Note: Use kmemdup_nul() instead if the size is known exactly.
89 * Return: newly allocated copy of @s or %NULL in case of error
91 char *kstrndup(const char *s, size_t max, gfp_t gfp)
99 len = strnlen(s, max);
100 buf = kmalloc_track_caller(len+1, gfp);
107 EXPORT_SYMBOL(kstrndup);
110 * kmemdup - duplicate region of memory
112 * @src: memory region to duplicate
113 * @len: memory region length
114 * @gfp: GFP mask to use
116 * Return: newly allocated copy of @src or %NULL in case of error
118 void *kmemdup(const void *src, size_t len, gfp_t gfp)
122 p = kmalloc_track_caller(len, gfp);
127 EXPORT_SYMBOL(kmemdup);
130 * kmemdup_nul - Create a NUL-terminated string from unterminated data
131 * @s: The data to stringify
132 * @len: The size of the data
133 * @gfp: the GFP mask used in the kmalloc() call when allocating memory
135 * Return: newly allocated copy of @s with NUL-termination or %NULL in
138 char *kmemdup_nul(const char *s, size_t len, gfp_t gfp)
145 buf = kmalloc_track_caller(len + 1, gfp);
152 EXPORT_SYMBOL(kmemdup_nul);
155 * memdup_user - duplicate memory region from user space
157 * @src: source address in user space
158 * @len: number of bytes to copy
160 * Return: an ERR_PTR() on failure. Result is physically
161 * contiguous, to be freed by kfree().
163 void *memdup_user(const void __user *src, size_t len)
167 p = kmalloc_track_caller(len, GFP_USER | __GFP_NOWARN);
169 return ERR_PTR(-ENOMEM);
171 if (copy_from_user(p, src, len)) {
173 return ERR_PTR(-EFAULT);
178 EXPORT_SYMBOL(memdup_user);
181 * vmemdup_user - duplicate memory region from user space
183 * @src: source address in user space
184 * @len: number of bytes to copy
186 * Return: an ERR_PTR() on failure. Result may be not
187 * physically contiguous. Use kvfree() to free.
189 void *vmemdup_user(const void __user *src, size_t len)
193 p = kvmalloc(len, GFP_USER);
195 return ERR_PTR(-ENOMEM);
197 if (copy_from_user(p, src, len)) {
199 return ERR_PTR(-EFAULT);
204 EXPORT_SYMBOL(vmemdup_user);
207 * strndup_user - duplicate an existing string from user space
208 * @s: The string to duplicate
209 * @n: Maximum number of bytes to copy, including the trailing NUL.
211 * Return: newly allocated copy of @s or an ERR_PTR() in case of error
213 char *strndup_user(const char __user *s, long n)
218 length = strnlen_user(s, n);
221 return ERR_PTR(-EFAULT);
224 return ERR_PTR(-EINVAL);
226 p = memdup_user(s, length);
231 p[length - 1] = '\0';
235 EXPORT_SYMBOL(strndup_user);
238 * memdup_user_nul - duplicate memory region from user space and NUL-terminate
240 * @src: source address in user space
241 * @len: number of bytes to copy
243 * Return: an ERR_PTR() on failure.
245 void *memdup_user_nul(const void __user *src, size_t len)
250 * Always use GFP_KERNEL, since copy_from_user() can sleep and
251 * cause pagefault, which makes it pointless to use GFP_NOFS
254 p = kmalloc_track_caller(len + 1, GFP_KERNEL);
256 return ERR_PTR(-ENOMEM);
258 if (copy_from_user(p, src, len)) {
260 return ERR_PTR(-EFAULT);
266 EXPORT_SYMBOL(memdup_user_nul);
268 void __vma_link_list(struct mm_struct *mm, struct vm_area_struct *vma,
269 struct vm_area_struct *prev, struct rb_node *rb_parent)
271 struct vm_area_struct *next;
275 next = prev->vm_next;
280 next = rb_entry(rb_parent,
281 struct vm_area_struct, vm_rb);
290 /* Check if the vma is being used as a stack by this task */
291 int vma_is_stack_for_current(struct vm_area_struct *vma)
293 struct task_struct * __maybe_unused t = current;
295 return (vma->vm_start <= KSTK_ESP(t) && vma->vm_end >= KSTK_ESP(t));
298 #ifndef STACK_RND_MASK
299 #define STACK_RND_MASK (0x7ff >> (PAGE_SHIFT - 12)) /* 8MB of VA */
302 unsigned long randomize_stack_top(unsigned long stack_top)
304 unsigned long random_variable = 0;
306 if (current->flags & PF_RANDOMIZE) {
307 random_variable = get_random_long();
308 random_variable &= STACK_RND_MASK;
309 random_variable <<= PAGE_SHIFT;
311 #ifdef CONFIG_STACK_GROWSUP
312 return PAGE_ALIGN(stack_top) + random_variable;
314 return PAGE_ALIGN(stack_top) - random_variable;
318 #if defined(CONFIG_MMU) && !defined(HAVE_ARCH_PICK_MMAP_LAYOUT)
319 void arch_pick_mmap_layout(struct mm_struct *mm, struct rlimit *rlim_stack)
321 mm->mmap_base = TASK_UNMAPPED_BASE;
322 mm->get_unmapped_area = arch_get_unmapped_area;
327 * __account_locked_vm - account locked pages to an mm's locked_vm
328 * @mm: mm to account against
329 * @pages: number of pages to account
330 * @inc: %true if @pages should be considered positive, %false if not
331 * @task: task used to check RLIMIT_MEMLOCK
332 * @bypass_rlim: %true if checking RLIMIT_MEMLOCK should be skipped
334 * Assumes @task and @mm are valid (i.e. at least one reference on each), and
335 * that mmap_sem is held as writer.
339 * * -ENOMEM if RLIMIT_MEMLOCK would be exceeded.
341 int __account_locked_vm(struct mm_struct *mm, unsigned long pages, bool inc,
342 struct task_struct *task, bool bypass_rlim)
344 unsigned long locked_vm, limit;
347 lockdep_assert_held_write(&mm->mmap_sem);
349 locked_vm = mm->locked_vm;
352 limit = task_rlimit(task, RLIMIT_MEMLOCK) >> PAGE_SHIFT;
353 if (locked_vm + pages > limit)
357 mm->locked_vm = locked_vm + pages;
359 WARN_ON_ONCE(pages > locked_vm);
360 mm->locked_vm = locked_vm - pages;
363 pr_debug("%s: [%d] caller %ps %c%lu %lu/%lu%s\n", __func__, task->pid,
364 (void *)_RET_IP_, (inc) ? '+' : '-', pages << PAGE_SHIFT,
365 locked_vm << PAGE_SHIFT, task_rlimit(task, RLIMIT_MEMLOCK),
366 ret ? " - exceeded" : "");
370 EXPORT_SYMBOL_GPL(__account_locked_vm);
373 * account_locked_vm - account locked pages to an mm's locked_vm
374 * @mm: mm to account against, may be NULL
375 * @pages: number of pages to account
376 * @inc: %true if @pages should be considered positive, %false if not
378 * Assumes a non-NULL @mm is valid (i.e. at least one reference on it).
381 * * 0 on success, or if mm is NULL
382 * * -ENOMEM if RLIMIT_MEMLOCK would be exceeded.
384 int account_locked_vm(struct mm_struct *mm, unsigned long pages, bool inc)
388 if (pages == 0 || !mm)
391 down_write(&mm->mmap_sem);
392 ret = __account_locked_vm(mm, pages, inc, current,
393 capable(CAP_IPC_LOCK));
394 up_write(&mm->mmap_sem);
398 EXPORT_SYMBOL_GPL(account_locked_vm);
400 unsigned long vm_mmap_pgoff(struct file *file, unsigned long addr,
401 unsigned long len, unsigned long prot,
402 unsigned long flag, unsigned long pgoff)
405 struct mm_struct *mm = current->mm;
406 unsigned long populate;
409 ret = security_mmap_file(file, prot, flag);
411 if (down_write_killable(&mm->mmap_sem))
413 ret = do_mmap_pgoff(file, addr, len, prot, flag, pgoff,
415 up_write(&mm->mmap_sem);
416 userfaultfd_unmap_complete(mm, &uf);
418 mm_populate(ret, populate);
423 unsigned long vm_mmap(struct file *file, unsigned long addr,
424 unsigned long len, unsigned long prot,
425 unsigned long flag, unsigned long offset)
427 if (unlikely(offset + PAGE_ALIGN(len) < offset))
429 if (unlikely(offset_in_page(offset)))
432 return vm_mmap_pgoff(file, addr, len, prot, flag, offset >> PAGE_SHIFT);
434 EXPORT_SYMBOL(vm_mmap);
437 * kvmalloc_node - attempt to allocate physically contiguous memory, but upon
438 * failure, fall back to non-contiguous (vmalloc) allocation.
439 * @size: size of the request.
440 * @flags: gfp mask for the allocation - must be compatible (superset) with GFP_KERNEL.
441 * @node: numa node to allocate from
443 * Uses kmalloc to get the memory but if the allocation fails then falls back
444 * to the vmalloc allocator. Use kvfree for freeing the memory.
446 * Reclaim modifiers - __GFP_NORETRY and __GFP_NOFAIL are not supported.
447 * __GFP_RETRY_MAYFAIL is supported, and it should be used only if kmalloc is
448 * preferable to the vmalloc fallback, due to visible performance drawbacks.
450 * Please note that any use of gfp flags outside of GFP_KERNEL is careful to not
451 * fall back to vmalloc.
453 * Return: pointer to the allocated memory of %NULL in case of failure
455 void *kvmalloc_node(size_t size, gfp_t flags, int node)
457 gfp_t kmalloc_flags = flags;
461 * vmalloc uses GFP_KERNEL for some internal allocations (e.g page tables)
462 * so the given set of flags has to be compatible.
464 if ((flags & GFP_KERNEL) != GFP_KERNEL)
465 return kmalloc_node(size, flags, node);
468 * We want to attempt a large physically contiguous block first because
469 * it is less likely to fragment multiple larger blocks and therefore
470 * contribute to a long term fragmentation less than vmalloc fallback.
471 * However make sure that larger requests are not too disruptive - no
472 * OOM killer and no allocation failure warnings as we have a fallback.
474 if (size > PAGE_SIZE) {
475 kmalloc_flags |= __GFP_NOWARN;
477 if (!(kmalloc_flags & __GFP_RETRY_MAYFAIL))
478 kmalloc_flags |= __GFP_NORETRY;
481 ret = kmalloc_node(size, kmalloc_flags, node);
484 * It doesn't really make sense to fallback to vmalloc for sub page
487 if (ret || size <= PAGE_SIZE)
490 return __vmalloc_node_flags_caller(size, node, flags,
491 __builtin_return_address(0));
493 EXPORT_SYMBOL(kvmalloc_node);
496 * kvfree() - Free memory.
497 * @addr: Pointer to allocated memory.
499 * kvfree frees memory allocated by any of vmalloc(), kmalloc() or kvmalloc().
500 * It is slightly more efficient to use kfree() or vfree() if you are certain
501 * that you know which one to use.
503 * Context: Either preemptible task context or not-NMI interrupt.
505 void kvfree(const void *addr)
507 if (is_vmalloc_addr(addr))
512 EXPORT_SYMBOL(kvfree);
514 static inline void *__page_rmapping(struct page *page)
516 unsigned long mapping;
518 mapping = (unsigned long)page->mapping;
519 mapping &= ~PAGE_MAPPING_FLAGS;
521 return (void *)mapping;
524 /* Neutral page->mapping pointer to address_space or anon_vma or other */
525 void *page_rmapping(struct page *page)
527 page = compound_head(page);
528 return __page_rmapping(page);
532 * Return true if this page is mapped into pagetables.
533 * For compound page it returns true if any subpage of compound page is mapped.
535 bool page_mapped(struct page *page)
539 if (likely(!PageCompound(page)))
540 return atomic_read(&page->_mapcount) >= 0;
541 page = compound_head(page);
542 if (atomic_read(compound_mapcount_ptr(page)) >= 0)
546 for (i = 0; i < compound_nr(page); i++) {
547 if (atomic_read(&page[i]._mapcount) >= 0)
552 EXPORT_SYMBOL(page_mapped);
554 struct anon_vma *page_anon_vma(struct page *page)
556 unsigned long mapping;
558 page = compound_head(page);
559 mapping = (unsigned long)page->mapping;
560 if ((mapping & PAGE_MAPPING_FLAGS) != PAGE_MAPPING_ANON)
562 return __page_rmapping(page);
565 struct address_space *page_mapping(struct page *page)
567 struct address_space *mapping;
569 page = compound_head(page);
571 /* This happens if someone calls flush_dcache_page on slab page */
572 if (unlikely(PageSlab(page)))
575 if (unlikely(PageSwapCache(page))) {
578 entry.val = page_private(page);
579 return swap_address_space(entry);
582 mapping = page->mapping;
583 if ((unsigned long)mapping & PAGE_MAPPING_ANON)
586 return (void *)((unsigned long)mapping & ~PAGE_MAPPING_FLAGS);
588 EXPORT_SYMBOL(page_mapping);
591 * For file cache pages, return the address_space, otherwise return NULL
593 struct address_space *page_mapping_file(struct page *page)
595 if (unlikely(PageSwapCache(page)))
597 return page_mapping(page);
600 /* Slow path of page_mapcount() for compound pages */
601 int __page_mapcount(struct page *page)
605 ret = atomic_read(&page->_mapcount) + 1;
607 * For file THP page->_mapcount contains total number of mapping
608 * of the page: no need to look into compound_mapcount.
610 if (!PageAnon(page) && !PageHuge(page))
612 page = compound_head(page);
613 ret += atomic_read(compound_mapcount_ptr(page)) + 1;
614 if (PageDoubleMap(page))
618 EXPORT_SYMBOL_GPL(__page_mapcount);
620 int sysctl_overcommit_memory __read_mostly = OVERCOMMIT_GUESS;
621 int sysctl_overcommit_ratio __read_mostly = 50;
622 unsigned long sysctl_overcommit_kbytes __read_mostly;
623 int sysctl_max_map_count __read_mostly = DEFAULT_MAX_MAP_COUNT;
624 unsigned long sysctl_user_reserve_kbytes __read_mostly = 1UL << 17; /* 128MB */
625 unsigned long sysctl_admin_reserve_kbytes __read_mostly = 1UL << 13; /* 8MB */
627 int overcommit_ratio_handler(struct ctl_table *table, int write,
628 void __user *buffer, size_t *lenp,
633 ret = proc_dointvec(table, write, buffer, lenp, ppos);
634 if (ret == 0 && write)
635 sysctl_overcommit_kbytes = 0;
639 int overcommit_kbytes_handler(struct ctl_table *table, int write,
640 void __user *buffer, size_t *lenp,
645 ret = proc_doulongvec_minmax(table, write, buffer, lenp, ppos);
646 if (ret == 0 && write)
647 sysctl_overcommit_ratio = 0;
652 * Committed memory limit enforced when OVERCOMMIT_NEVER policy is used
654 unsigned long vm_commit_limit(void)
656 unsigned long allowed;
658 if (sysctl_overcommit_kbytes)
659 allowed = sysctl_overcommit_kbytes >> (PAGE_SHIFT - 10);
661 allowed = ((totalram_pages() - hugetlb_total_pages())
662 * sysctl_overcommit_ratio / 100);
663 allowed += total_swap_pages;
669 * Make sure vm_committed_as in one cacheline and not cacheline shared with
670 * other variables. It can be updated by several CPUs frequently.
672 struct percpu_counter vm_committed_as ____cacheline_aligned_in_smp;
675 * The global memory commitment made in the system can be a metric
676 * that can be used to drive ballooning decisions when Linux is hosted
677 * as a guest. On Hyper-V, the host implements a policy engine for dynamically
678 * balancing memory across competing virtual machines that are hosted.
679 * Several metrics drive this policy engine including the guest reported
682 unsigned long vm_memory_committed(void)
684 return percpu_counter_read_positive(&vm_committed_as);
686 EXPORT_SYMBOL_GPL(vm_memory_committed);
689 * Check that a process has enough memory to allocate a new virtual
690 * mapping. 0 means there is enough memory for the allocation to
691 * succeed and -ENOMEM implies there is not.
693 * We currently support three overcommit policies, which are set via the
694 * vm.overcommit_memory sysctl. See Documentation/vm/overcommit-accounting.rst
696 * Strict overcommit modes added 2002 Feb 26 by Alan Cox.
697 * Additional code 2002 Jul 20 by Robert Love.
699 * cap_sys_admin is 1 if the process has admin privileges, 0 otherwise.
701 * Note this is a helper function intended to be used by LSMs which
702 * wish to use this logic.
704 int __vm_enough_memory(struct mm_struct *mm, long pages, int cap_sys_admin)
708 VM_WARN_ONCE(percpu_counter_read(&vm_committed_as) <
709 -(s64)vm_committed_as_batch * num_online_cpus(),
710 "memory commitment underflow");
712 vm_acct_memory(pages);
715 * Sometimes we want to use more memory than we have
717 if (sysctl_overcommit_memory == OVERCOMMIT_ALWAYS)
720 if (sysctl_overcommit_memory == OVERCOMMIT_GUESS) {
721 if (pages > totalram_pages() + total_swap_pages)
726 allowed = vm_commit_limit();
728 * Reserve some for root
731 allowed -= sysctl_admin_reserve_kbytes >> (PAGE_SHIFT - 10);
734 * Don't let a single process grow so big a user can't recover
737 long reserve = sysctl_user_reserve_kbytes >> (PAGE_SHIFT - 10);
739 allowed -= min_t(long, mm->total_vm / 32, reserve);
742 if (percpu_counter_read_positive(&vm_committed_as) < allowed)
745 vm_unacct_memory(pages);
751 * get_cmdline() - copy the cmdline value to a buffer.
752 * @task: the task whose cmdline value to copy.
753 * @buffer: the buffer to copy to.
754 * @buflen: the length of the buffer. Larger cmdline values are truncated
757 * Return: the size of the cmdline field copied. Note that the copy does
758 * not guarantee an ending NULL byte.
760 int get_cmdline(struct task_struct *task, char *buffer, int buflen)
764 struct mm_struct *mm = get_task_mm(task);
765 unsigned long arg_start, arg_end, env_start, env_end;
769 goto out_mm; /* Shh! No looking before we're done */
771 spin_lock(&mm->arg_lock);
772 arg_start = mm->arg_start;
773 arg_end = mm->arg_end;
774 env_start = mm->env_start;
775 env_end = mm->env_end;
776 spin_unlock(&mm->arg_lock);
778 len = arg_end - arg_start;
783 res = access_process_vm(task, arg_start, buffer, len, FOLL_FORCE);
786 * If the nul at the end of args has been overwritten, then
787 * assume application is using setproctitle(3).
789 if (res > 0 && buffer[res-1] != '\0' && len < buflen) {
790 len = strnlen(buffer, res);
794 len = env_end - env_start;
795 if (len > buflen - res)
797 res += access_process_vm(task, env_start,
800 res = strnlen(buffer, res);
809 int memcmp_pages(struct page *page1, struct page *page2)
814 addr1 = kmap_atomic(page1);
815 addr2 = kmap_atomic(page2);
816 ret = memcmp(addr1, addr2, PAGE_SIZE);
817 kunmap_atomic(addr2);
818 kunmap_atomic(addr1);