]> asedeno.scripts.mit.edu Git - linux.git/blobdiff - mm/huge_memory.c
ovl: Fix ovl_getattr() to get number of blocks from lower
[linux.git] / mm / huge_memory.c
index 14ed6ee5e02fc8bc6acc767de9e42ed464ce5675..1cd7c1a57a144320b7d1729d7caa6ec93351cc54 100644 (file)
@@ -483,11 +483,8 @@ pmd_t maybe_pmd_mkwrite(pmd_t pmd, struct vm_area_struct *vma)
 
 static inline struct list_head *page_deferred_list(struct page *page)
 {
-       /*
-        * ->lru in the tail pages is occupied by compound_head.
-        * Let's use ->mapping + ->index in the second tail page as list_head.
-        */
-       return (struct list_head *)&page[2].mapping;
+       /* ->lru in the tail pages is occupied by compound_head. */
+       return &page[2].deferred_list;
 }
 
 void prep_transhuge_page(struct page *page)
@@ -1134,8 +1131,8 @@ static int do_huge_pmd_wp_page_fallback(struct vm_fault *vmf, pmd_t orig_pmd,
        unsigned long mmun_start;       /* For mmu_notifiers */
        unsigned long mmun_end;         /* For mmu_notifiers */
 
-       pages = kmalloc(sizeof(struct page *) * HPAGE_PMD_NR,
-                       GFP_KERNEL);
+       pages = kmalloc_array(HPAGE_PMD_NR, sizeof(struct page *),
+                             GFP_KERNEL);
        if (unlikely(!pages)) {
                ret |= VM_FAULT_OOM;
                goto out;
@@ -1185,7 +1182,7 @@ static int do_huge_pmd_wp_page_fallback(struct vm_fault *vmf, pmd_t orig_pmd,
         * mmu_notifier_invalidate_range_end() happens which can lead to a
         * device seeing memory write in different order than CPU.
         *
-        * See Documentation/vm/mmu_notifier.txt
+        * See Documentation/vm/mmu_notifier.rst
         */
        pmdp_huge_clear_flush_notify(vma, haddr, vmf->pmd);
 
@@ -2037,7 +2034,7 @@ static void __split_huge_zero_page_pmd(struct vm_area_struct *vma,
         * replacing a zero pmd write protected page with a zero pte write
         * protected page.
         *
-        * See Documentation/vm/mmu_notifier.txt
+        * See Documentation/vm/mmu_notifier.rst
         */
        pmdp_huge_clear_flush(vma, haddr, pmd);
 
@@ -2431,7 +2428,7 @@ static void __split_huge_page(struct page *page, struct list_head *list,
                __split_huge_page_tail(head, i, lruvec, list);
                /* Some pages can be beyond i_size: drop them from page cache */
                if (head[i].index >= end) {
-                       __ClearPageDirty(head + i);
+                       ClearPageDirty(head + i);
                        __delete_from_page_cache(head + i, NULL);
                        if (IS_ENABLED(CONFIG_SHMEM) && PageSwapBacked(head))
                                shmem_uncharge(head->mapping->host, 1);
@@ -2925,7 +2922,10 @@ void remove_migration_pmd(struct page_vma_mapped_walk *pvmw, struct page *new)
                pmde = maybe_pmd_mkwrite(pmde, vma);
 
        flush_cache_range(vma, mmun_start, mmun_start + HPAGE_PMD_SIZE);
-       page_add_anon_rmap(new, vma, mmun_start, true);
+       if (PageAnon(new))
+               page_add_anon_rmap(new, vma, mmun_start, true);
+       else
+               page_add_file_rmap(new, true);
        set_pmd_at(mm, mmun_start, pvmw->pmd, pmde);
        if (vma->vm_flags & VM_LOCKED)
                mlock_vma_page(new);