]> asedeno.scripts.mit.edu Git - linux.git/blobdiff - mm/mempolicy.c
Merge branches 'pm-core', 'pm-qos', 'pm-domains' and 'pm-opp'
[linux.git] / mm / mempolicy.c
index 0b859af06b87df4e17af4180b953a6337c251dff..1e7873e40c9a16e922d4800e6dc41486eee23540 100644 (file)
@@ -96,7 +96,7 @@
 #include <linux/printk.h>
 
 #include <asm/tlbflush.h>
-#include <asm/uaccess.h>
+#include <linux/uaccess.h>
 
 #include "internal.h"
 
@@ -276,7 +276,9 @@ static struct mempolicy *mpol_new(unsigned short mode, unsigned short flags,
                                return ERR_PTR(-EINVAL);
                }
        } else if (mode == MPOL_LOCAL) {
-               if (!nodes_empty(*nodes))
+               if (!nodes_empty(*nodes) ||
+                   (flags & MPOL_F_STATIC_NODES) ||
+                   (flags & MPOL_F_RELATIVE_NODES))
                        return ERR_PTR(-EINVAL);
                mode = MPOL_PREFERRED;
        } else if (nodes_empty(*nodes))
@@ -496,7 +498,7 @@ static int queue_pages_pte_range(pmd_t *pmd, unsigned long addr,
                        page = pmd_page(*pmd);
                        if (is_huge_zero_page(page)) {
                                spin_unlock(ptl);
-                               split_huge_pmd(vma, pmd, addr);
+                               __split_huge_pmd(vma, pmd, addr, false, NULL);
                        } else {
                                get_page(page);
                                spin_unlock(ptl);
@@ -1679,25 +1681,17 @@ static nodemask_t *policy_nodemask(gfp_t gfp, struct mempolicy *policy)
 static struct zonelist *policy_zonelist(gfp_t gfp, struct mempolicy *policy,
        int nd)
 {
-       switch (policy->mode) {
-       case MPOL_PREFERRED:
-               if (!(policy->flags & MPOL_F_LOCAL))
-                       nd = policy->v.preferred_node;
-               break;
-       case MPOL_BIND:
+       if (policy->mode == MPOL_PREFERRED && !(policy->flags & MPOL_F_LOCAL))
+               nd = policy->v.preferred_node;
+       else {
                /*
-                * Normally, MPOL_BIND allocations are node-local within the
-                * allowed nodemask.  However, if __GFP_THISNODE is set and the
-                * current node isn't part of the mask, we use the zonelist for
-                * the first node in the mask instead.
+                * __GFP_THISNODE shouldn't even be used with the bind policy
+                * because we might easily break the expectation to stay on the
+                * requested node and not break the policy.
                 */
-               if (unlikely(gfp & __GFP_THISNODE) &&
-                               unlikely(!node_isset(nd, policy->v.nodes)))
-                       nd = first_node(policy->v.nodes);
-               break;
-       default:
-               BUG();
+               WARN_ON_ONCE(policy->mode == MPOL_BIND && (gfp & __GFP_THISNODE));
        }
+
        return node_zonelist(nd, gfp);
 }
 
@@ -2023,8 +2017,8 @@ alloc_pages_vma(gfp_t gfp, int order, struct vm_area_struct *vma,
 
        nmask = policy_nodemask(gfp, pol);
        zl = policy_zonelist(gfp, pol, node);
-       mpol_cond_put(pol);
        page = __alloc_pages_nodemask(gfp, order, zl, nmask);
+       mpol_cond_put(pol);
 out:
        if (unlikely(!page && read_mems_allowed_retry(cpuset_mems_cookie)))
                goto retry_cpuset;