]> asedeno.scripts.mit.edu Git - linux.git/blobdiff - mm/mempolicy.c
Merge branch 'for-4.18/wacom' into for-linus
[linux.git] / mm / mempolicy.c
index 01cbb7078d6ca50f3dfca124309e402ba78d7aa3..9ac49ef17b4e1e5128f3db38e0a97bbc067ed1b9 100644 (file)
@@ -446,15 +446,6 @@ static int queue_pages_pmd(pmd_t *pmd, spinlock_t *ptl, unsigned long addr,
                __split_huge_pmd(walk->vma, pmd, addr, false, NULL);
                goto out;
        }
-       if (!thp_migration_supported()) {
-               get_page(page);
-               spin_unlock(ptl);
-               lock_page(page);
-               ret = split_huge_page(page);
-               unlock_page(page);
-               put_page(page);
-               goto out;
-       }
        if (!queue_pages_required(page, qp)) {
                ret = 1;
                goto unlock;
@@ -495,7 +486,7 @@ static int queue_pages_pte_range(pmd_t *pmd, unsigned long addr,
 
        if (pmd_trans_unstable(pmd))
                return 0;
-retry:
+
        pte = pte_offset_map_lock(walk->mm, pmd, addr, &ptl);
        for (; addr != end; pte++, addr += PAGE_SIZE) {
                if (!pte_present(*pte))
@@ -511,22 +502,6 @@ static int queue_pages_pte_range(pmd_t *pmd, unsigned long addr,
                        continue;
                if (!queue_pages_required(page, qp))
                        continue;
-               if (PageTransCompound(page) && !thp_migration_supported()) {
-                       get_page(page);
-                       pte_unmap_unlock(pte, ptl);
-                       lock_page(page);
-                       ret = split_huge_page(page);
-                       unlock_page(page);
-                       put_page(page);
-                       /* Failed to split -- skip. */
-                       if (ret) {
-                               pte = pte_offset_map_lock(walk->mm, pmd,
-                                               addr, &ptl);
-                               continue;
-                       }
-                       goto retry;
-               }
-
                migrate_page_add(page, qp->pagelist, flags);
        }
        pte_unmap_unlock(pte - 1, ptl);
@@ -942,12 +917,13 @@ static void migrate_page_add(struct page *page, struct list_head *pagelist,
        }
 }
 
-static struct page *new_node_page(struct page *page, unsigned long node, int **x)
+/* page allocation callback for NUMA node migration */
+struct page *alloc_new_node_page(struct page *page, unsigned long node)
 {
        if (PageHuge(page))
                return alloc_huge_page_node(page_hstate(compound_head(page)),
                                        node);
-       else if (thp_migration_supported() && PageTransHuge(page)) {
+       else if (PageTransHuge(page)) {
                struct page *thp;
 
                thp = alloc_pages_node(node,
@@ -986,7 +962,7 @@ static int migrate_to_node(struct mm_struct *mm, int source, int dest,
                        flags | MPOL_MF_DISCONTIG_OK, &pagelist);
 
        if (!list_empty(&pagelist)) {
-               err = migrate_pages(&pagelist, new_node_page, NULL, dest,
+               err = migrate_pages(&pagelist, alloc_new_node_page, NULL, dest,
                                        MIGRATE_SYNC, MR_SYSCALL);
                if (err)
                        putback_movable_pages(&pagelist);
@@ -1107,7 +1083,7 @@ int do_migrate_pages(struct mm_struct *mm, const nodemask_t *from,
  * list of pages handed to migrate_pages()--which is how we get here--
  * is in virtual address order.
  */
-static struct page *new_page(struct page *page, unsigned long start, int **x)
+static struct page *new_page(struct page *page, unsigned long start)
 {
        struct vm_area_struct *vma;
        unsigned long uninitialized_var(address);
@@ -1123,7 +1099,7 @@ static struct page *new_page(struct page *page, unsigned long start, int **x)
        if (PageHuge(page)) {
                return alloc_huge_page_vma(page_hstate(compound_head(page)),
                                vma, address);
-       } else if (thp_migration_supported() && PageTransHuge(page)) {
+       } else if (PageTransHuge(page)) {
                struct page *thp;
 
                thp = alloc_hugepage_vma(GFP_TRANSHUGE, vma, address,
@@ -1152,7 +1128,7 @@ int do_migrate_pages(struct mm_struct *mm, const nodemask_t *from,
        return -ENOSYS;
 }
 
-static struct page *new_page(struct page *page, unsigned long start, int **x)
+static struct page *new_page(struct page *page, unsigned long start)
 {
        return NULL;
 }