]> asedeno.scripts.mit.edu Git - linux.git/blobdiff - mm/memory.c
Merge tag 'for-4.20-part2-tag' of git://git.kernel.org/pub/scm/linux/kernel/git/kdave...
[linux.git] / mm / memory.c
index 40b692fa4b9946ea19ddd0b98fe19fc3aa08e013..072139579d897021e83fe6b70344ad4acd62d664 100644 (file)
@@ -1520,19 +1520,16 @@ int vm_insert_page(struct vm_area_struct *vma, unsigned long addr,
 }
 EXPORT_SYMBOL(vm_insert_page);
 
-static int insert_pfn(struct vm_area_struct *vma, unsigned long addr,
+static vm_fault_t insert_pfn(struct vm_area_struct *vma, unsigned long addr,
                        pfn_t pfn, pgprot_t prot, bool mkwrite)
 {
        struct mm_struct *mm = vma->vm_mm;
-       int retval;
        pte_t *pte, entry;
        spinlock_t *ptl;
 
-       retval = -ENOMEM;
        pte = get_locked_pte(mm, addr, &ptl);
        if (!pte)
-               goto out;
-       retval = -EBUSY;
+               return VM_FAULT_OOM;
        if (!pte_none(*pte)) {
                if (mkwrite) {
                        /*
@@ -1565,11 +1562,9 @@ static int insert_pfn(struct vm_area_struct *vma, unsigned long addr,
        set_pte_at(mm, addr, pte, entry);
        update_mmu_cache(vma, addr, pte); /* XXX: why not for insert_page? */
 
-       retval = 0;
 out_unlock:
        pte_unmap_unlock(pte, ptl);
-out:
-       return retval;
+       return VM_FAULT_NOPAGE;
 }
 
 /**
@@ -1593,8 +1588,6 @@ static int insert_pfn(struct vm_area_struct *vma, unsigned long addr,
 vm_fault_t vmf_insert_pfn_prot(struct vm_area_struct *vma, unsigned long addr,
                        unsigned long pfn, pgprot_t pgprot)
 {
-       int err;
-
        /*
         * Technically, architectures with pte_special can avoid all these
         * restrictions (same for remap_pfn_range).  However we would like
@@ -1615,15 +1608,8 @@ vm_fault_t vmf_insert_pfn_prot(struct vm_area_struct *vma, unsigned long addr,
 
        track_pfn_insert(vma, &pgprot, __pfn_to_pfn_t(pfn, PFN_DEV));
 
-       err = insert_pfn(vma, addr, __pfn_to_pfn_t(pfn, PFN_DEV), pgprot,
+       return insert_pfn(vma, addr, __pfn_to_pfn_t(pfn, PFN_DEV), pgprot,
                        false);
-
-       if (err == -ENOMEM)
-               return VM_FAULT_OOM;
-       if (err < 0 && err != -EBUSY)
-               return VM_FAULT_SIGBUS;
-
-       return VM_FAULT_NOPAGE;
 }
 EXPORT_SYMBOL(vmf_insert_pfn_prot);
 
@@ -1668,20 +1654,21 @@ static bool vm_mixed_ok(struct vm_area_struct *vma, pfn_t pfn)
        return false;
 }
 
-static int __vm_insert_mixed(struct vm_area_struct *vma, unsigned long addr,
-                       pfn_t pfn, bool mkwrite)
+static vm_fault_t __vm_insert_mixed(struct vm_area_struct *vma,
+               unsigned long addr, pfn_t pfn, bool mkwrite)
 {
        pgprot_t pgprot = vma->vm_page_prot;
+       int err;
 
        BUG_ON(!vm_mixed_ok(vma, pfn));
 
        if (addr < vma->vm_start || addr >= vma->vm_end)
-               return -EFAULT;
+               return VM_FAULT_SIGBUS;
 
        track_pfn_insert(vma, &pgprot, pfn);
 
        if (!pfn_modify_allowed(pfn_t_to_pfn(pfn), pgprot))
-               return -EACCES;
+               return VM_FAULT_SIGBUS;
 
        /*
         * If we don't have pte special, then we have to use the pfn_valid()
@@ -1700,15 +1687,10 @@ static int __vm_insert_mixed(struct vm_area_struct *vma, unsigned long addr,
                 * result in pfn_t_has_page() == false.
                 */
                page = pfn_to_page(pfn_t_to_pfn(pfn));
-               return insert_page(vma, addr, page, pgprot);
+               err = insert_page(vma, addr, page, pgprot);
+       } else {
+               return insert_pfn(vma, addr, pfn, pgprot, mkwrite);
        }
-       return insert_pfn(vma, addr, pfn, pgprot, mkwrite);
-}
-
-vm_fault_t vmf_insert_mixed(struct vm_area_struct *vma, unsigned long addr,
-               pfn_t pfn)
-{
-       int err = __vm_insert_mixed(vma, addr, pfn, false);
 
        if (err == -ENOMEM)
                return VM_FAULT_OOM;
@@ -1717,6 +1699,12 @@ vm_fault_t vmf_insert_mixed(struct vm_area_struct *vma, unsigned long addr,
 
        return VM_FAULT_NOPAGE;
 }
+
+vm_fault_t vmf_insert_mixed(struct vm_area_struct *vma, unsigned long addr,
+               pfn_t pfn)
+{
+       return __vm_insert_mixed(vma, addr, pfn, false);
+}
 EXPORT_SYMBOL(vmf_insert_mixed);
 
 /*
@@ -1724,18 +1712,10 @@ EXPORT_SYMBOL(vmf_insert_mixed);
  *  different entry in the mean time, we treat that as success as we assume
  *  the same entry was actually inserted.
  */
-
 vm_fault_t vmf_insert_mixed_mkwrite(struct vm_area_struct *vma,
                unsigned long addr, pfn_t pfn)
 {
-       int err;
-
-       err =  __vm_insert_mixed(vma, addr, pfn, true);
-       if (err == -ENOMEM)
-               return VM_FAULT_OOM;
-       if (err < 0 && err != -EBUSY)
-               return VM_FAULT_SIGBUS;
-       return VM_FAULT_NOPAGE;
+       return __vm_insert_mixed(vma, addr, pfn, true);
 }
 EXPORT_SYMBOL(vmf_insert_mixed_mkwrite);
 
@@ -3516,10 +3496,36 @@ static vm_fault_t do_fault(struct vm_fault *vmf)
        struct vm_area_struct *vma = vmf->vma;
        vm_fault_t ret;
 
-       /* The VMA was not fully populated on mmap() or missing VM_DONTEXPAND */
-       if (!vma->vm_ops->fault)
-               ret = VM_FAULT_SIGBUS;
-       else if (!(vmf->flags & FAULT_FLAG_WRITE))
+       /*
+        * The VMA was not fully populated on mmap() or missing VM_DONTEXPAND
+        */
+       if (!vma->vm_ops->fault) {
+               /*
+                * If we find a migration pmd entry or a none pmd entry, which
+                * should never happen, return SIGBUS
+                */
+               if (unlikely(!pmd_present(*vmf->pmd)))
+                       ret = VM_FAULT_SIGBUS;
+               else {
+                       vmf->pte = pte_offset_map_lock(vmf->vma->vm_mm,
+                                                      vmf->pmd,
+                                                      vmf->address,
+                                                      &vmf->ptl);
+                       /*
+                        * Make sure this is not a temporary clearing of pte
+                        * by holding ptl and checking again. A R/M/W update
+                        * of pte involves: take ptl, clearing the pte so that
+                        * we don't have concurrent modification by hardware
+                        * followed by an update.
+                        */
+                       if (unlikely(pte_none(*vmf->pte)))
+                               ret = VM_FAULT_SIGBUS;
+                       else
+                               ret = VM_FAULT_NOPAGE;
+
+                       pte_unmap_unlock(vmf->pte, vmf->ptl);
+               }
+       } else if (!(vmf->flags & FAULT_FLAG_WRITE))
                ret = do_read_fault(vmf);
        else if (!(vma->vm_flags & VM_SHARED))
                ret = do_cow_fault(vmf);