]> asedeno.scripts.mit.edu Git - linux.git/blobdiff - mm/util.c
spi: introduce spi_delay struct as "value + unit" & spi_delay_exec()
[linux.git] / mm / util.c
index 68575a315dc5adbbd5f1b3c7b80a359e9d8036ef..3ad6db9a722e07a7ff4d8805441256b7556f46f2 100644 (file)
--- a/mm/util.c
+++ b/mm/util.c
@@ -7,6 +7,7 @@
 #include <linux/err.h>
 #include <linux/sched.h>
 #include <linux/sched/mm.h>
+#include <linux/sched/signal.h>
 #include <linux/sched/task_stack.h>
 #include <linux/security.h>
 #include <linux/swap.h>
 #include <linux/hugetlb.h>
 #include <linux/vmalloc.h>
 #include <linux/userfaultfd_k.h>
+#include <linux/elf.h>
+#include <linux/elf-randomize.h>
+#include <linux/personality.h>
+#include <linux/random.h>
+#include <linux/processor.h>
+#include <linux/sizes.h>
+#include <linux/compat.h>
 
 #include <linux/uaccess.h>
 
@@ -292,7 +300,105 @@ int vma_is_stack_for_current(struct vm_area_struct *vma)
        return (vma->vm_start <= KSTK_ESP(t) && vma->vm_end >= KSTK_ESP(t));
 }
 
-#if defined(CONFIG_MMU) && !defined(HAVE_ARCH_PICK_MMAP_LAYOUT)
+#ifndef STACK_RND_MASK
+#define STACK_RND_MASK (0x7ff >> (PAGE_SHIFT - 12))     /* 8MB of VA */
+#endif
+
+unsigned long randomize_stack_top(unsigned long stack_top)
+{
+       unsigned long random_variable = 0;
+
+       if (current->flags & PF_RANDOMIZE) {
+               random_variable = get_random_long();
+               random_variable &= STACK_RND_MASK;
+               random_variable <<= PAGE_SHIFT;
+       }
+#ifdef CONFIG_STACK_GROWSUP
+       return PAGE_ALIGN(stack_top) + random_variable;
+#else
+       return PAGE_ALIGN(stack_top) - random_variable;
+#endif
+}
+
+#ifdef CONFIG_ARCH_WANT_DEFAULT_TOPDOWN_MMAP_LAYOUT
+unsigned long arch_randomize_brk(struct mm_struct *mm)
+{
+       /* Is the current task 32bit ? */
+       if (!IS_ENABLED(CONFIG_64BIT) || is_compat_task())
+               return randomize_page(mm->brk, SZ_32M);
+
+       return randomize_page(mm->brk, SZ_1G);
+}
+
+unsigned long arch_mmap_rnd(void)
+{
+       unsigned long rnd;
+
+#ifdef CONFIG_HAVE_ARCH_MMAP_RND_COMPAT_BITS
+       if (is_compat_task())
+               rnd = get_random_long() & ((1UL << mmap_rnd_compat_bits) - 1);
+       else
+#endif /* CONFIG_HAVE_ARCH_MMAP_RND_COMPAT_BITS */
+               rnd = get_random_long() & ((1UL << mmap_rnd_bits) - 1);
+
+       return rnd << PAGE_SHIFT;
+}
+
+static int mmap_is_legacy(struct rlimit *rlim_stack)
+{
+       if (current->personality & ADDR_COMPAT_LAYOUT)
+               return 1;
+
+       if (rlim_stack->rlim_cur == RLIM_INFINITY)
+               return 1;
+
+       return sysctl_legacy_va_layout;
+}
+
+/*
+ * Leave enough space between the mmap area and the stack to honour ulimit in
+ * the face of randomisation.
+ */
+#define MIN_GAP                (SZ_128M)
+#define MAX_GAP                (STACK_TOP / 6 * 5)
+
+static unsigned long mmap_base(unsigned long rnd, struct rlimit *rlim_stack)
+{
+       unsigned long gap = rlim_stack->rlim_cur;
+       unsigned long pad = stack_guard_gap;
+
+       /* Account for stack randomization if necessary */
+       if (current->flags & PF_RANDOMIZE)
+               pad += (STACK_RND_MASK << PAGE_SHIFT);
+
+       /* Values close to RLIM_INFINITY can overflow. */
+       if (gap + pad > gap)
+               gap += pad;
+
+       if (gap < MIN_GAP)
+               gap = MIN_GAP;
+       else if (gap > MAX_GAP)
+               gap = MAX_GAP;
+
+       return PAGE_ALIGN(STACK_TOP - gap - rnd);
+}
+
+void arch_pick_mmap_layout(struct mm_struct *mm, struct rlimit *rlim_stack)
+{
+       unsigned long random_factor = 0UL;
+
+       if (current->flags & PF_RANDOMIZE)
+               random_factor = arch_mmap_rnd();
+
+       if (mmap_is_legacy(rlim_stack)) {
+               mm->mmap_base = TASK_UNMAPPED_BASE + random_factor;
+               mm->get_unmapped_area = arch_get_unmapped_area;
+       } else {
+               mm->mmap_base = mmap_base(random_factor, rlim_stack);
+               mm->get_unmapped_area = arch_get_unmapped_area_topdown;
+       }
+}
+#elif defined(CONFIG_MMU) && !defined(HAVE_ARCH_PICK_MMAP_LAYOUT)
 void arch_pick_mmap_layout(struct mm_struct *mm, struct rlimit *rlim_stack)
 {
        mm->mmap_base = TASK_UNMAPPED_BASE;
@@ -300,6 +406,80 @@ void arch_pick_mmap_layout(struct mm_struct *mm, struct rlimit *rlim_stack)
 }
 #endif
 
+/**
+ * __account_locked_vm - account locked pages to an mm's locked_vm
+ * @mm:          mm to account against
+ * @pages:       number of pages to account
+ * @inc:         %true if @pages should be considered positive, %false if not
+ * @task:        task used to check RLIMIT_MEMLOCK
+ * @bypass_rlim: %true if checking RLIMIT_MEMLOCK should be skipped
+ *
+ * Assumes @task and @mm are valid (i.e. at least one reference on each), and
+ * that mmap_sem is held as writer.
+ *
+ * Return:
+ * * 0       on success
+ * * -ENOMEM if RLIMIT_MEMLOCK would be exceeded.
+ */
+int __account_locked_vm(struct mm_struct *mm, unsigned long pages, bool inc,
+                       struct task_struct *task, bool bypass_rlim)
+{
+       unsigned long locked_vm, limit;
+       int ret = 0;
+
+       lockdep_assert_held_write(&mm->mmap_sem);
+
+       locked_vm = mm->locked_vm;
+       if (inc) {
+               if (!bypass_rlim) {
+                       limit = task_rlimit(task, RLIMIT_MEMLOCK) >> PAGE_SHIFT;
+                       if (locked_vm + pages > limit)
+                               ret = -ENOMEM;
+               }
+               if (!ret)
+                       mm->locked_vm = locked_vm + pages;
+       } else {
+               WARN_ON_ONCE(pages > locked_vm);
+               mm->locked_vm = locked_vm - pages;
+       }
+
+       pr_debug("%s: [%d] caller %ps %c%lu %lu/%lu%s\n", __func__, task->pid,
+                (void *)_RET_IP_, (inc) ? '+' : '-', pages << PAGE_SHIFT,
+                locked_vm << PAGE_SHIFT, task_rlimit(task, RLIMIT_MEMLOCK),
+                ret ? " - exceeded" : "");
+
+       return ret;
+}
+EXPORT_SYMBOL_GPL(__account_locked_vm);
+
+/**
+ * account_locked_vm - account locked pages to an mm's locked_vm
+ * @mm:          mm to account against, may be NULL
+ * @pages:       number of pages to account
+ * @inc:         %true if @pages should be considered positive, %false if not
+ *
+ * Assumes a non-NULL @mm is valid (i.e. at least one reference on it).
+ *
+ * Return:
+ * * 0       on success, or if mm is NULL
+ * * -ENOMEM if RLIMIT_MEMLOCK would be exceeded.
+ */
+int account_locked_vm(struct mm_struct *mm, unsigned long pages, bool inc)
+{
+       int ret;
+
+       if (pages == 0 || !mm)
+               return 0;
+
+       down_write(&mm->mmap_sem);
+       ret = __account_locked_vm(mm, pages, inc, current,
+                                 capable(CAP_IPC_LOCK));
+       up_write(&mm->mmap_sem);
+
+       return ret;
+}
+EXPORT_SYMBOL_GPL(account_locked_vm);
+
 unsigned long vm_mmap_pgoff(struct file *file, unsigned long addr,
        unsigned long len, unsigned long prot,
        unsigned long flag, unsigned long pgoff)
@@ -446,7 +626,7 @@ bool page_mapped(struct page *page)
                return true;
        if (PageHuge(page))
                return false;
-       for (i = 0; i < (1 << compound_order(page)); i++) {
+       for (i = 0; i < compound_nr(page); i++) {
                if (atomic_read(&page[i]._mapcount) >= 0)
                        return true;
        }
@@ -708,3 +888,16 @@ int get_cmdline(struct task_struct *task, char *buffer, int buflen)
 out:
        return res;
 }
+
+int memcmp_pages(struct page *page1, struct page *page2)
+{
+       char *addr1, *addr2;
+       int ret;
+
+       addr1 = kmap_atomic(page1);
+       addr2 = kmap_atomic(page2);
+       ret = memcmp(addr1, addr2, PAGE_SIZE);
+       kunmap_atomic(addr2);
+       kunmap_atomic(addr1);
+       return ret;
+}