]> asedeno.scripts.mit.edu Git - linux.git/commitdiff
Merge branch 'akpm' (patches from Andrew)
authorLinus Torvalds <torvalds@linux-foundation.org>
Thu, 30 Nov 2017 03:12:44 +0000 (19:12 -0800)
committerLinus Torvalds <torvalds@linux-foundation.org>
Thu, 30 Nov 2017 03:12:44 +0000 (19:12 -0800)
Mergr misc fixes from Andrew Morton:
 "28 fixes"

* emailed patches from Andrew Morton <akpm@linux-foundation.org>: (28 commits)
  fs/hugetlbfs/inode.c: change put_page/unlock_page order in hugetlbfs_fallocate()
  mm/hugetlb: fix NULL-pointer dereference on 5-level paging machine
  autofs: revert "autofs: fix AT_NO_AUTOMOUNT not being honored"
  autofs: revert "autofs: take more care to not update last_used on path walk"
  fs/fat/inode.c: fix sb_rdonly() change
  mm, memcg: fix mem_cgroup_swapout() for THPs
  mm: migrate: fix an incorrect call of prep_transhuge_page()
  kmemleak: add scheduling point to kmemleak_scan()
  scripts/bloat-o-meter: don't fail with division by 0
  fs/mbcache.c: make count_objects() more robust
  Revert "mm/page-writeback.c: print a warning if the vm dirtiness settings are illogical"
  mm/madvise.c: fix madvise() infinite loop under special circumstances
  exec: avoid RLIMIT_STACK races with prlimit()
  IB/core: disable memory registration of filesystem-dax vmas
  v4l2: disable filesystem-dax mapping support
  mm: fail get_vaddr_frames() for filesystem-dax mappings
  mm: introduce get_user_pages_longterm
  device-dax: implement ->split() to catch invalid munmap attempts
  mm, hugetlbfs: introduce ->split() to vm_operations_struct
  scripts/faddr2line: extend usage on generic arch
  ...

1  2 
mm/huge_memory.c
mm/memory.c

diff --combined mm/huge_memory.c
index 0e7ded98d114d184877d2fc9bd0f02c3187f2ed5,05b729f45e8a37dc668d4760eb7f7646ccdf0e79..2f2f5e77490278f58c6e9a923899255efff77551
@@@ -474,10 -474,13 +474,10 @@@ out
  }
  __setup("transparent_hugepage=", setup_transparent_hugepage);
  
 -pmd_t maybe_pmd_mkwrite(pmd_t pmd, struct vm_area_struct *vma, bool dirty)
 +pmd_t maybe_pmd_mkwrite(pmd_t pmd, struct vm_area_struct *vma)
  {
 -      if (likely(vma->vm_flags & VM_WRITE)) {
 +      if (likely(vma->vm_flags & VM_WRITE))
                pmd = pmd_mkwrite(pmd);
 -              if (dirty)
 -                      pmd = pmd_mkdirty(pmd);
 -      }
        return pmd;
  }
  
@@@ -599,7 -602,7 +599,7 @@@ static int __do_huge_pmd_anonymous_page
                }
  
                entry = mk_huge_pmd(page, vma->vm_page_prot);
 -              entry = maybe_pmd_mkwrite(entry, vma, true);
 +              entry = maybe_pmd_mkwrite(pmd_mkdirty(entry), vma);
                page_add_new_anon_rmap(page, vma, haddr, true);
                mem_cgroup_commit_charge(page, memcg, false, true);
                lru_cache_add_active_or_unevictable(page, vma);
@@@ -741,8 -744,8 +741,8 @@@ static void insert_pfn_pmd(struct vm_ar
        if (pfn_t_devmap(pfn))
                entry = pmd_mkdevmap(entry);
        if (write) {
 -              entry = pmd_mkyoung(entry);
 -              entry = maybe_pmd_mkwrite(entry, vma, true);
 +              entry = pmd_mkyoung(pmd_mkdirty(entry));
 +              entry = maybe_pmd_mkwrite(entry, vma);
        }
  
        if (pgtable) {
@@@ -788,10 -791,14 +788,10 @@@ int vmf_insert_pfn_pmd(struct vm_area_s
  EXPORT_SYMBOL_GPL(vmf_insert_pfn_pmd);
  
  #ifdef CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD
 -static pud_t maybe_pud_mkwrite(pud_t pud, struct vm_area_struct *vma,
 -              bool dirty)
 +static pud_t maybe_pud_mkwrite(pud_t pud, struct vm_area_struct *vma)
  {
 -      if (likely(vma->vm_flags & VM_WRITE)) {
 +      if (likely(vma->vm_flags & VM_WRITE))
                pud = pud_mkwrite(pud);
 -              if (dirty)
 -                      pud = pud_mkdirty(pud);
 -      }
        return pud;
  }
  
@@@ -807,8 -814,8 +807,8 @@@ static void insert_pfn_pud(struct vm_ar
        if (pfn_t_devmap(pfn))
                entry = pud_mkdevmap(entry);
        if (write) {
 -              entry = pud_mkyoung(entry);
 -              entry = maybe_pud_mkwrite(entry, vma, true);
 +              entry = pud_mkyoung(pud_mkdirty(entry));
 +              entry = maybe_pud_mkwrite(entry, vma);
        }
        set_pud_at(mm, addr, pud, entry);
        update_mmu_cache_pud(vma, addr, pud);
@@@ -870,7 -877,7 +870,7 @@@ struct page *follow_devmap_pmd(struct v
         */
        WARN_ONCE(flags & FOLL_COW, "mm: In follow_devmap_pmd with FOLL_COW set");
  
-       if (flags & FOLL_WRITE && !pmd_write(*pmd))
+       if (!pmd_access_permitted(*pmd, flags & FOLL_WRITE))
                return NULL;
  
        if (pmd_present(*pmd) && pmd_devmap(*pmd))
@@@ -1012,7 -1019,7 +1012,7 @@@ struct page *follow_devmap_pud(struct v
  
        assert_spin_locked(pud_lockptr(mm, pud));
  
-       if (flags & FOLL_WRITE && !pud_write(*pud))
+       if (!pud_access_permitted(*pud, flags & FOLL_WRITE))
                return NULL;
  
        if (pud_present(*pud) && pud_devmap(*pud))
@@@ -1279,7 -1286,7 +1279,7 @@@ int do_huge_pmd_wp_page(struct vm_faul
        if (reuse_swap_page(page, NULL)) {
                pmd_t entry;
                entry = pmd_mkyoung(orig_pmd);
 -              entry = maybe_pmd_mkwrite(entry, vma, true);
 +              entry = maybe_pmd_mkwrite(pmd_mkdirty(entry), vma);
                if (pmdp_set_access_flags(vma, haddr, vmf->pmd, entry,  1))
                        update_mmu_cache_pmd(vma, vmf->address, vmf->pmd);
                ret |= VM_FAULT_WRITE;
@@@ -1349,7 -1356,7 +1349,7 @@@ alloc
        } else {
                pmd_t entry;
                entry = mk_huge_pmd(new_page, vma->vm_page_prot);
 -              entry = maybe_pmd_mkwrite(entry, vma, true);
 +              entry = maybe_pmd_mkwrite(pmd_mkdirty(entry), vma);
                pmdp_huge_clear_flush_notify(vma, haddr, vmf->pmd);
                page_add_new_anon_rmap(new_page, vma, haddr, true);
                mem_cgroup_commit_charge(new_page, memcg, false, true);
@@@ -1386,7 -1393,7 +1386,7 @@@ out_unlock
   */
  static inline bool can_follow_write_pmd(pmd_t pmd, unsigned int flags)
  {
-       return pmd_write(pmd) ||
+       return pmd_access_permitted(pmd, WRITE) ||
               ((flags & FOLL_FORCE) && (flags & FOLL_COW) && pmd_dirty(pmd));
  }
  
@@@ -2928,7 -2935,7 +2928,7 @@@ void remove_migration_pmd(struct page_v
        if (pmd_swp_soft_dirty(*pvmw->pmd))
                pmde = pmd_mksoft_dirty(pmde);
        if (is_write_migration_entry(entry))
 -              pmde = maybe_pmd_mkwrite(pmde, vma, false);
 +              pmde = maybe_pmd_mkwrite(pmde, vma);
  
        flush_cache_range(vma, mmun_start, mmun_start + HPAGE_PMD_SIZE);
        page_add_anon_rmap(new, vma, mmun_start, true);
diff --combined mm/memory.c
index 85e7a87da79fe4a5487e1f3f6216e61b9827515c,4f07acd1695fb9991d0f5de2c3cec23a7dc78eaf..5eb3d2524bdc28239b33a0ac6e385fa5a5b9aaf9
@@@ -3335,7 -3335,7 +3335,7 @@@ static int do_set_pmd(struct vm_fault *
  
        entry = mk_huge_pmd(page, vma->vm_page_prot);
        if (write)
 -              entry = maybe_pmd_mkwrite(entry, vma, true);
 +              entry = maybe_pmd_mkwrite(pmd_mkdirty(entry), vma);
  
        add_mm_counter(vma->vm_mm, MM_FILEPAGES, HPAGE_PMD_NR);
        page_add_file_rmap(page, true);
@@@ -3948,7 -3948,7 +3948,7 @@@ static int handle_pte_fault(struct vm_f
        if (unlikely(!pte_same(*vmf->pte, entry)))
                goto unlock;
        if (vmf->flags & FAULT_FLAG_WRITE) {
-               if (!pte_write(entry))
+               if (!pte_access_permitted(entry, WRITE))
                        return do_wp_page(vmf);
                entry = pte_mkdirty(entry);
        }
@@@ -4013,7 -4013,7 +4013,7 @@@ static int __handle_mm_fault(struct vm_
  
                        /* NUMA case for anonymous PUDs would go here */
  
-                       if (dirty && !pud_write(orig_pud)) {
+                       if (dirty && !pud_access_permitted(orig_pud, WRITE)) {
                                ret = wp_huge_pud(&vmf, orig_pud);
                                if (!(ret & VM_FAULT_FALLBACK))
                                        return ret;
                        if (pmd_protnone(orig_pmd) && vma_is_accessible(vma))
                                return do_huge_pmd_numa_page(&vmf, orig_pmd);
  
-                       if (dirty && !pmd_write(orig_pmd)) {
+                       if (dirty && !pmd_access_permitted(orig_pmd, WRITE)) {
                                ret = wp_huge_pmd(&vmf, orig_pmd);
                                if (!(ret & VM_FAULT_FALLBACK))
                                        return ret;
@@@ -4336,7 -4336,7 +4336,7 @@@ int follow_phys(struct vm_area_struct *
                goto out;
        pte = *ptep;
  
-       if ((flags & FOLL_WRITE) && !pte_write(pte))
+       if (!pte_access_permitted(pte, flags & FOLL_WRITE))
                goto unlock;
  
        *prot = pgprot_val(pte_pgprot(pte));