]> asedeno.scripts.mit.edu Git - linux.git/blobdiff - mm/swapfile.c
IB/core: Ensure we map P2P memory correctly in rdma_rw_ctx_[init|destroy]()
[linux.git] / mm / swapfile.c
index 043645e7f0b5892ffa004cdca2d85deaecfd4674..d954b71c4f9c2e842e142713e1a921addb6a4c9d 100644 (file)
@@ -940,18 +940,18 @@ static unsigned long scan_swap_map(struct swap_info_struct *si,
 
 }
 
-int get_swap_pages(int n_goal, bool cluster, swp_entry_t swp_entries[])
+int get_swap_pages(int n_goal, swp_entry_t swp_entries[], int entry_size)
 {
-       unsigned long nr_pages = cluster ? SWAPFILE_CLUSTER : 1;
+       unsigned long size = swap_entry_size(entry_size);
        struct swap_info_struct *si, *next;
        long avail_pgs;
        int n_ret = 0;
        int node;
 
        /* Only single cluster request supported */
-       WARN_ON_ONCE(n_goal > 1 && cluster);
+       WARN_ON_ONCE(n_goal > 1 && size == SWAPFILE_CLUSTER);
 
-       avail_pgs = atomic_long_read(&nr_swap_pages) / nr_pages;
+       avail_pgs = atomic_long_read(&nr_swap_pages) / size;
        if (avail_pgs <= 0)
                goto noswap;
 
@@ -961,7 +961,7 @@ int get_swap_pages(int n_goal, bool cluster, swp_entry_t swp_entries[])
        if (n_goal > avail_pgs)
                n_goal = avail_pgs;
 
-       atomic_long_sub(n_goal * nr_pages, &nr_swap_pages);
+       atomic_long_sub(n_goal * size, &nr_swap_pages);
 
        spin_lock(&swap_avail_lock);
 
@@ -988,14 +988,14 @@ int get_swap_pages(int n_goal, bool cluster, swp_entry_t swp_entries[])
                        spin_unlock(&si->lock);
                        goto nextsi;
                }
-               if (cluster) {
+               if (size == SWAPFILE_CLUSTER) {
                        if (!(si->flags & SWP_FILE))
                                n_ret = swap_alloc_cluster(si, swp_entries);
                } else
                        n_ret = scan_swap_map_slots(si, SWAP_HAS_CACHE,
                                                    n_goal, swp_entries);
                spin_unlock(&si->lock);
-               if (n_ret || cluster)
+               if (n_ret || size == SWAPFILE_CLUSTER)
                        goto check_out;
                pr_debug("scan_swap_map of si %d failed to find offset\n",
                        si->type);
@@ -1021,7 +1021,7 @@ int get_swap_pages(int n_goal, bool cluster, swp_entry_t swp_entries[])
 
 check_out:
        if (n_ret < n_goal)
-               atomic_long_add((long)(n_goal - n_ret) * nr_pages,
+               atomic_long_add((long)(n_goal - n_ret) * size,
                                &nr_swap_pages);
 noswap:
        return n_ret;
@@ -1123,16 +1123,13 @@ static struct swap_info_struct *swap_info_get_cont(swp_entry_t entry,
        return p;
 }
 
-static unsigned char __swap_entry_free(struct swap_info_struct *p,
-                                      swp_entry_t entry, unsigned char usage)
+static unsigned char __swap_entry_free_locked(struct swap_info_struct *p,
+                                             unsigned long offset,
+                                             unsigned char usage)
 {
-       struct swap_cluster_info *ci;
-       unsigned long offset = swp_offset(entry);
        unsigned char count;
        unsigned char has_cache;
 
-       ci = lock_cluster_or_swap_info(p, offset);
-
        count = p->swap_map[offset];
 
        has_cache = count & SWAP_HAS_CACHE;
@@ -1160,6 +1157,17 @@ static unsigned char __swap_entry_free(struct swap_info_struct *p,
        usage = count | has_cache;
        p->swap_map[offset] = usage ? : SWAP_HAS_CACHE;
 
+       return usage;
+}
+
+static unsigned char __swap_entry_free(struct swap_info_struct *p,
+                                      swp_entry_t entry, unsigned char usage)
+{
+       struct swap_cluster_info *ci;
+       unsigned long offset = swp_offset(entry);
+
+       ci = lock_cluster_or_swap_info(p, offset);
+       usage = __swap_entry_free_locked(p, offset, usage);
        unlock_cluster_or_swap_info(p, ci);
 
        return usage;
@@ -1215,8 +1223,8 @@ void put_swap_page(struct page *page, swp_entry_t entry)
        if (!si)
                return;
 
+       ci = lock_cluster_or_swap_info(si, offset);
        if (size == SWAPFILE_CLUSTER) {
-               ci = lock_cluster(si, offset);
                VM_BUG_ON(!cluster_is_huge(ci));
                map = si->swap_map + offset;
                for (i = 0; i < SWAPFILE_CLUSTER; i++) {
@@ -1225,13 +1233,9 @@ void put_swap_page(struct page *page, swp_entry_t entry)
                        if (val == SWAP_HAS_CACHE)
                                free_entries++;
                }
-               if (!free_entries) {
-                       for (i = 0; i < SWAPFILE_CLUSTER; i++)
-                               map[i] &= ~SWAP_HAS_CACHE;
-               }
                cluster_clear_huge(ci);
-               unlock_cluster(ci);
                if (free_entries == SWAPFILE_CLUSTER) {
+                       unlock_cluster_or_swap_info(si, ci);
                        spin_lock(&si->lock);
                        ci = lock_cluster(si, offset);
                        memset(map, 0, SWAPFILE_CLUSTER);
@@ -1242,12 +1246,16 @@ void put_swap_page(struct page *page, swp_entry_t entry)
                        return;
                }
        }
-       if (size == 1 || free_entries) {
-               for (i = 0; i < size; i++, entry.val++) {
-                       if (!__swap_entry_free(si, entry, SWAP_HAS_CACHE))
-                               free_swap_slot(entry);
+       for (i = 0; i < size; i++, entry.val++) {
+               if (!__swap_entry_free_locked(si, offset + i, SWAP_HAS_CACHE)) {
+                       unlock_cluster_or_swap_info(si, ci);
+                       free_swap_slot(entry);
+                       if (i == size - 1)
+                               return;
+                       lock_cluster_or_swap_info(si, offset);
                }
        }
+       unlock_cluster_or_swap_info(si, ci);
 }
 
 #ifdef CONFIG_THP_SWAP