]> asedeno.scripts.mit.edu Git - linux.git/blobdiff - mm/userfaultfd.c
Merge tag 'y2038-alsa-v8-signed' of git://git.kernel.org:/pub/scm/linux/kernel/git...
[linux.git] / mm / userfaultfd.c
index c7ae74ce5ff3e6f2d01824e0dbcad08d856eb9ff..1b0d7abad1d4565cf6050099d6e2c64157dca558 100644 (file)
 #include <asm/tlbflush.h>
 #include "internal.h"
 
+static __always_inline
+struct vm_area_struct *find_dst_vma(struct mm_struct *dst_mm,
+                                   unsigned long dst_start,
+                                   unsigned long len)
+{
+       /*
+        * Make sure that the dst range is both valid and fully within a
+        * single existing vma.
+        */
+       struct vm_area_struct *dst_vma;
+
+       dst_vma = find_vma(dst_mm, dst_start);
+       if (!dst_vma)
+               return NULL;
+
+       if (dst_start < dst_vma->vm_start ||
+           dst_start + len > dst_vma->vm_end)
+               return NULL;
+
+       /*
+        * Check the vma is registered in uffd, this is required to
+        * enforce the VM_MAYWRITE check done at uffd registration
+        * time.
+        */
+       if (!dst_vma->vm_userfaultfd_ctx.ctx)
+               return NULL;
+
+       return dst_vma;
+}
+
 static int mcopy_atomic_pte(struct mm_struct *dst_mm,
                            pmd_t *dst_pmd,
                            struct vm_area_struct *dst_vma,
@@ -60,7 +90,7 @@ static int mcopy_atomic_pte(struct mm_struct *dst_mm,
 
        /*
         * The memory barrier inside __SetPageUptodate makes sure that
-        * preceeding stores to the page contents become visible before
+        * preceding stores to the page contents become visible before
         * the set_pte_at() write.
         */
        __SetPageUptodate(page);
@@ -184,7 +214,6 @@ static __always_inline ssize_t __mcopy_atomic_hugetlb(struct mm_struct *dst_mm,
        unsigned long src_addr, dst_addr;
        long copied;
        struct page *page;
-       struct hstate *h;
        unsigned long vma_hpagesize;
        pgoff_t idx;
        u32 hash;
@@ -221,20 +250,9 @@ static __always_inline ssize_t __mcopy_atomic_hugetlb(struct mm_struct *dst_mm,
         */
        if (!dst_vma) {
                err = -ENOENT;
-               dst_vma = find_vma(dst_mm, dst_start);
+               dst_vma = find_dst_vma(dst_mm, dst_start, len);
                if (!dst_vma || !is_vm_hugetlb_page(dst_vma))
                        goto out_unlock;
-               /*
-                * Check the vma is registered in uffd, this is
-                * required to enforce the VM_MAYWRITE check done at
-                * uffd registration time.
-                */
-               if (!dst_vma->vm_userfaultfd_ctx.ctx)
-                       goto out_unlock;
-
-               if (dst_start < dst_vma->vm_start ||
-                   dst_start + len > dst_vma->vm_end)
-                       goto out_unlock;
 
                err = -EINVAL;
                if (vma_hpagesize != vma_kernel_pagesize(dst_vma))
@@ -243,10 +261,6 @@ static __always_inline ssize_t __mcopy_atomic_hugetlb(struct mm_struct *dst_mm,
                vm_shared = dst_vma->vm_flags & VM_SHARED;
        }
 
-       if (WARN_ON(dst_addr & (vma_hpagesize - 1) ||
-                   (len - copied) & (vma_hpagesize - 1)))
-               goto out_unlock;
-
        /*
         * If not shared, ensure the dst_vma has a anon_vma.
         */
@@ -256,24 +270,21 @@ static __always_inline ssize_t __mcopy_atomic_hugetlb(struct mm_struct *dst_mm,
                        goto out_unlock;
        }
 
-       h = hstate_vma(dst_vma);
-
        while (src_addr < src_start + len) {
                pte_t dst_pteval;
 
                BUG_ON(dst_addr >= dst_start + len);
-               VM_BUG_ON(dst_addr & ~huge_page_mask(h));
 
                /*
                 * Serialize via hugetlb_fault_mutex
                 */
                idx = linear_page_index(dst_vma, dst_addr);
                mapping = dst_vma->vm_file->f_mapping;
-               hash = hugetlb_fault_mutex_hash(h, mapping, idx, dst_addr);
+               hash = hugetlb_fault_mutex_hash(mapping, idx);
                mutex_lock(&hugetlb_fault_mutex_table[hash]);
 
                err = -ENOMEM;
-               dst_pte = huge_pte_alloc(dst_mm, dst_addr, huge_page_size(h));
+               dst_pte = huge_pte_alloc(dst_mm, dst_addr, vma_hpagesize);
                if (!dst_pte) {
                        mutex_unlock(&hugetlb_fault_mutex_table[hash]);
                        goto out_unlock;
@@ -300,7 +311,8 @@ static __always_inline ssize_t __mcopy_atomic_hugetlb(struct mm_struct *dst_mm,
 
                        err = copy_huge_page_from_user(page,
                                                (const void __user *)src_addr,
-                                               pages_per_huge_page(h), true);
+                                               vma_hpagesize / PAGE_SIZE,
+                                               true);
                        if (unlikely(err)) {
                                err = -EFAULT;
                                goto out;
@@ -475,20 +487,9 @@ static __always_inline ssize_t __mcopy_atomic(struct mm_struct *dst_mm,
         * both valid and fully within a single existing vma.
         */
        err = -ENOENT;
-       dst_vma = find_vma(dst_mm, dst_start);
+       dst_vma = find_dst_vma(dst_mm, dst_start, len);
        if (!dst_vma)
                goto out_unlock;
-       /*
-        * Check the vma is registered in uffd, this is required to
-        * enforce the VM_MAYWRITE check done at uffd registration
-        * time.
-        */
-       if (!dst_vma->vm_userfaultfd_ctx.ctx)
-               goto out_unlock;
-
-       if (dst_start < dst_vma->vm_start ||
-           dst_start + len > dst_vma->vm_end)
-               goto out_unlock;
 
        err = -EINVAL;
        /*