]> asedeno.scripts.mit.edu Git - linux.git/blobdiff - mm/swapfile.c
IB/core: Ensure we map P2P memory correctly in rdma_rw_ctx_[init|destroy]()
[linux.git] / mm / swapfile.c
index 618358ad464bcecddc66397679d992a5deb27bf0..d954b71c4f9c2e842e142713e1a921addb6a4c9d 100644 (file)
@@ -204,8 +204,16 @@ static void discard_swap_cluster(struct swap_info_struct *si,
 
 #ifdef CONFIG_THP_SWAP
 #define SWAPFILE_CLUSTER       HPAGE_PMD_NR
+
+#define swap_entry_size(size)  (size)
 #else
 #define SWAPFILE_CLUSTER       256
+
+/*
+ * Define swap_entry_size() as constant to let compiler to optimize
+ * out some code if !CONFIG_THP_SWAP
+ */
+#define swap_entry_size(size)  1
 #endif
 #define LATENCY_LIMIT          256
 
@@ -269,7 +277,9 @@ static inline void cluster_set_null(struct swap_cluster_info *info)
 
 static inline bool cluster_is_huge(struct swap_cluster_info *info)
 {
-       return info->flags & CLUSTER_FLAG_HUGE;
+       if (IS_ENABLED(CONFIG_THP_SWAP))
+               return info->flags & CLUSTER_FLAG_HUGE;
+       return false;
 }
 
 static inline void cluster_clear_huge(struct swap_cluster_info *info)
@@ -930,18 +940,18 @@ static unsigned long scan_swap_map(struct swap_info_struct *si,
 
 }
 
-int get_swap_pages(int n_goal, bool cluster, swp_entry_t swp_entries[])
+int get_swap_pages(int n_goal, swp_entry_t swp_entries[], int entry_size)
 {
-       unsigned long nr_pages = cluster ? SWAPFILE_CLUSTER : 1;
+       unsigned long size = swap_entry_size(entry_size);
        struct swap_info_struct *si, *next;
        long avail_pgs;
        int n_ret = 0;
        int node;
 
        /* Only single cluster request supported */
-       WARN_ON_ONCE(n_goal > 1 && cluster);
+       WARN_ON_ONCE(n_goal > 1 && size == SWAPFILE_CLUSTER);
 
-       avail_pgs = atomic_long_read(&nr_swap_pages) / nr_pages;
+       avail_pgs = atomic_long_read(&nr_swap_pages) / size;
        if (avail_pgs <= 0)
                goto noswap;
 
@@ -951,7 +961,7 @@ int get_swap_pages(int n_goal, bool cluster, swp_entry_t swp_entries[])
        if (n_goal > avail_pgs)
                n_goal = avail_pgs;
 
-       atomic_long_sub(n_goal * nr_pages, &nr_swap_pages);
+       atomic_long_sub(n_goal * size, &nr_swap_pages);
 
        spin_lock(&swap_avail_lock);
 
@@ -978,14 +988,14 @@ int get_swap_pages(int n_goal, bool cluster, swp_entry_t swp_entries[])
                        spin_unlock(&si->lock);
                        goto nextsi;
                }
-               if (cluster) {
+               if (size == SWAPFILE_CLUSTER) {
                        if (!(si->flags & SWP_FILE))
                                n_ret = swap_alloc_cluster(si, swp_entries);
                } else
                        n_ret = scan_swap_map_slots(si, SWAP_HAS_CACHE,
                                                    n_goal, swp_entries);
                spin_unlock(&si->lock);
-               if (n_ret || cluster)
+               if (n_ret || size == SWAPFILE_CLUSTER)
                        goto check_out;
                pr_debug("scan_swap_map of si %d failed to find offset\n",
                        si->type);
@@ -1011,7 +1021,7 @@ int get_swap_pages(int n_goal, bool cluster, swp_entry_t swp_entries[])
 
 check_out:
        if (n_ret < n_goal)
-               atomic_long_add((long)(n_goal - n_ret) * nr_pages,
+               atomic_long_add((long)(n_goal - n_ret) * size,
                                &nr_swap_pages);
 noswap:
        return n_ret;
@@ -1113,16 +1123,13 @@ static struct swap_info_struct *swap_info_get_cont(swp_entry_t entry,
        return p;
 }
 
-static unsigned char __swap_entry_free(struct swap_info_struct *p,
-                                      swp_entry_t entry, unsigned char usage)
+static unsigned char __swap_entry_free_locked(struct swap_info_struct *p,
+                                             unsigned long offset,
+                                             unsigned char usage)
 {
-       struct swap_cluster_info *ci;
-       unsigned long offset = swp_offset(entry);
        unsigned char count;
        unsigned char has_cache;
 
-       ci = lock_cluster_or_swap_info(p, offset);
-
        count = p->swap_map[offset];
 
        has_cache = count & SWAP_HAS_CACHE;
@@ -1150,6 +1157,17 @@ static unsigned char __swap_entry_free(struct swap_info_struct *p,
        usage = count | has_cache;
        p->swap_map[offset] = usage ? : SWAP_HAS_CACHE;
 
+       return usage;
+}
+
+static unsigned char __swap_entry_free(struct swap_info_struct *p,
+                                      swp_entry_t entry, unsigned char usage)
+{
+       struct swap_cluster_info *ci;
+       unsigned long offset = swp_offset(entry);
+
+       ci = lock_cluster_or_swap_info(p, offset);
+       usage = __swap_entry_free_locked(p, offset, usage);
        unlock_cluster_or_swap_info(p, ci);
 
        return usage;
@@ -1190,18 +1208,7 @@ void swap_free(swp_entry_t entry)
 /*
  * Called after dropping swapcache to decrease refcnt to swap entries.
  */
-static void swapcache_free(swp_entry_t entry)
-{
-       struct swap_info_struct *p;
-
-       p = _swap_info_get(entry);
-       if (p) {
-               if (!__swap_entry_free(p, entry, SWAP_HAS_CACHE))
-                       free_swap_slot(entry);
-       }
-}
-
-static void swapcache_free_cluster(swp_entry_t entry)
+void put_swap_page(struct page *page, swp_entry_t entry)
 {
        unsigned long offset = swp_offset(entry);
        unsigned long idx = offset / SWAPFILE_CLUSTER;
@@ -1210,43 +1217,45 @@ static void swapcache_free_cluster(swp_entry_t entry)
        unsigned char *map;
        unsigned int i, free_entries = 0;
        unsigned char val;
-
-       if (!IS_ENABLED(CONFIG_THP_SWAP))
-               return;
+       int size = swap_entry_size(hpage_nr_pages(page));
 
        si = _swap_info_get(entry);
        if (!si)
                return;
 
-       ci = lock_cluster(si, offset);
-       VM_BUG_ON(!cluster_is_huge(ci));
-       map = si->swap_map + offset;
-       for (i = 0; i < SWAPFILE_CLUSTER; i++) {
-               val = map[i];
-               VM_BUG_ON(!(val & SWAP_HAS_CACHE));
-               if (val == SWAP_HAS_CACHE)
-                       free_entries++;
-       }
-       if (!free_entries) {
-               for (i = 0; i < SWAPFILE_CLUSTER; i++)
-                       map[i] &= ~SWAP_HAS_CACHE;
+       ci = lock_cluster_or_swap_info(si, offset);
+       if (size == SWAPFILE_CLUSTER) {
+               VM_BUG_ON(!cluster_is_huge(ci));
+               map = si->swap_map + offset;
+               for (i = 0; i < SWAPFILE_CLUSTER; i++) {
+                       val = map[i];
+                       VM_BUG_ON(!(val & SWAP_HAS_CACHE));
+                       if (val == SWAP_HAS_CACHE)
+                               free_entries++;
+               }
+               cluster_clear_huge(ci);
+               if (free_entries == SWAPFILE_CLUSTER) {
+                       unlock_cluster_or_swap_info(si, ci);
+                       spin_lock(&si->lock);
+                       ci = lock_cluster(si, offset);
+                       memset(map, 0, SWAPFILE_CLUSTER);
+                       unlock_cluster(ci);
+                       mem_cgroup_uncharge_swap(entry, SWAPFILE_CLUSTER);
+                       swap_free_cluster(si, idx);
+                       spin_unlock(&si->lock);
+                       return;
+               }
        }
-       cluster_clear_huge(ci);
-       unlock_cluster(ci);
-       if (free_entries == SWAPFILE_CLUSTER) {
-               spin_lock(&si->lock);
-               ci = lock_cluster(si, offset);
-               memset(map, 0, SWAPFILE_CLUSTER);
-               unlock_cluster(ci);
-               mem_cgroup_uncharge_swap(entry, SWAPFILE_CLUSTER);
-               swap_free_cluster(si, idx);
-               spin_unlock(&si->lock);
-       } else if (free_entries) {
-               for (i = 0; i < SWAPFILE_CLUSTER; i++, entry.val++) {
-                       if (!__swap_entry_free(si, entry, SWAP_HAS_CACHE))
-                               free_swap_slot(entry);
+       for (i = 0; i < size; i++, entry.val++) {
+               if (!__swap_entry_free_locked(si, offset + i, SWAP_HAS_CACHE)) {
+                       unlock_cluster_or_swap_info(si, ci);
+                       free_swap_slot(entry);
+                       if (i == size - 1)
+                               return;
+                       lock_cluster_or_swap_info(si, offset);
                }
        }
+       unlock_cluster_or_swap_info(si, ci);
 }
 
 #ifdef CONFIG_THP_SWAP
@@ -1266,14 +1275,6 @@ int split_swap_cluster(swp_entry_t entry)
 }
 #endif
 
-void put_swap_page(struct page *page, swp_entry_t entry)
-{
-       if (!PageTransHuge(page))
-               swapcache_free(entry);
-       else
-               swapcache_free_cluster(entry);
-}
-
 static int swp_entry_cmp(const void *ent1, const void *ent2)
 {
        const swp_entry_t *e1 = ent1, *e2 = ent2;
@@ -1424,17 +1425,14 @@ static bool swap_page_trans_huge_swapped(struct swap_info_struct *si,
        int i;
        bool ret = false;
 
-       if (!IS_ENABLED(CONFIG_THP_SWAP))
-               return swap_swapcount(si, entry) != 0;
-
        ci = lock_cluster_or_swap_info(si, offset);
        if (!ci || !cluster_is_huge(ci)) {
-               if (map[roffset] != SWAP_HAS_CACHE)
+               if (swap_count(map[roffset]))
                        ret = true;
                goto unlock_out;
        }
        for (i = 0; i < SWAPFILE_CLUSTER; i++) {
-               if (map[offset + i] != SWAP_HAS_CACHE) {
+               if (swap_count(map[offset + i])) {
                        ret = true;
                        break;
                }