]> asedeno.scripts.mit.edu Git - linux.git/blobdiff - mm/swap_state.c
hugetlbfs: don't retry when pool page allocations start to fail
[linux.git] / mm / swap_state.c
index 85245fdec8d9a34ff74176da9f0d6b59fde901b7..8e7ce9a9bc5ebef151adf2b1178fce24bea8503c 100644 (file)
@@ -73,23 +73,24 @@ unsigned long total_swapcache_pages(void)
        unsigned int i, j, nr;
        unsigned long ret = 0;
        struct address_space *spaces;
+       struct swap_info_struct *si;
 
-       rcu_read_lock();
        for (i = 0; i < MAX_SWAPFILES; i++) {
-               /*
-                * The corresponding entries in nr_swapper_spaces and
-                * swapper_spaces will be reused only after at least
-                * one grace period.  So it is impossible for them
-                * belongs to different usage.
-                */
-               nr = nr_swapper_spaces[i];
-               spaces = rcu_dereference(swapper_spaces[i]);
-               if (!nr || !spaces)
+               swp_entry_t entry = swp_entry(i, 1);
+
+               /* Avoid get_swap_device() to warn for bad swap entry */
+               if (!swp_swap_info(entry))
+                       continue;
+               /* Prevent swapoff to free swapper_spaces */
+               si = get_swap_device(entry);
+               if (!si)
                        continue;
+               nr = nr_swapper_spaces[i];
+               spaces = swapper_spaces[i];
                for (j = 0; j < nr; j++)
                        ret += spaces[j].nrpages;
+               put_swap_device(si);
        }
-       rcu_read_unlock();
        return ret;
 }
 
@@ -115,7 +116,7 @@ int add_to_swap_cache(struct page *page, swp_entry_t entry, gfp_t gfp)
        struct address_space *address_space = swap_address_space(entry);
        pgoff_t idx = swp_offset(entry);
        XA_STATE_ORDER(xas, &address_space->i_pages, idx, compound_order(page));
-       unsigned long i, nr = 1UL << compound_order(page);
+       unsigned long i, nr = compound_nr(page);
 
        VM_BUG_ON_PAGE(!PageLocked(page), page);
        VM_BUG_ON_PAGE(PageSwapCache(page), page);
@@ -132,7 +133,7 @@ int add_to_swap_cache(struct page *page, swp_entry_t entry, gfp_t gfp)
                for (i = 0; i < nr; i++) {
                        VM_BUG_ON_PAGE(xas.xa_index != idx + i, page);
                        set_page_private(page + i, entry.val + i);
-                       xas_store(&xas, page + i);
+                       xas_store(&xas, page);
                        xas_next(&xas);
                }
                address_space->nrpages += nr;
@@ -167,7 +168,7 @@ void __delete_from_swap_cache(struct page *page, swp_entry_t entry)
 
        for (i = 0; i < nr; i++) {
                void *entry = xas_store(&xas, NULL);
-               VM_BUG_ON_PAGE(entry != page + i, entry);
+               VM_BUG_ON_PAGE(entry != page, entry);
                set_page_private(page + i, 0);
                xas_next(&xas);
        }
@@ -310,8 +311,13 @@ struct page *lookup_swap_cache(swp_entry_t entry, struct vm_area_struct *vma,
                               unsigned long addr)
 {
        struct page *page;
+       struct swap_info_struct *si;
 
+       si = get_swap_device(entry);
+       if (!si)
+               return NULL;
        page = find_get_page(swap_address_space(entry), swp_offset(entry));
+       put_swap_device(si);
 
        INC_CACHE_INFO(find_total);
        if (page) {
@@ -354,8 +360,8 @@ struct page *__read_swap_cache_async(swp_entry_t entry, gfp_t gfp_mask,
                        struct vm_area_struct *vma, unsigned long addr,
                        bool *new_page_allocated)
 {
-       struct page *found_page, *new_page = NULL;
-       struct address_space *swapper_space = swap_address_space(entry);
+       struct page *found_page = NULL, *new_page = NULL;
+       struct swap_info_struct *si;
        int err;
        *new_page_allocated = false;
 
@@ -365,7 +371,12 @@ struct page *__read_swap_cache_async(swp_entry_t entry, gfp_t gfp_mask,
                 * called after lookup_swap_cache() failed, re-calling
                 * that would confuse statistics.
                 */
-               found_page = find_get_page(swapper_space, swp_offset(entry));
+               si = get_swap_device(entry);
+               if (!si)
+                       break;
+               found_page = find_get_page(swap_address_space(entry),
+                                          swp_offset(entry));
+               put_swap_device(si);
                if (found_page)
                        break;
 
@@ -601,20 +612,16 @@ int init_swap_address_space(unsigned int type, unsigned long nr_pages)
                mapping_set_no_writeback_tags(space);
        }
        nr_swapper_spaces[type] = nr;
-       rcu_assign_pointer(swapper_spaces[type], spaces);
+       swapper_spaces[type] = spaces;
 
        return 0;
 }
 
 void exit_swap_address_space(unsigned int type)
 {
-       struct address_space *spaces;
-
-       spaces = swapper_spaces[type];
+       kvfree(swapper_spaces[type]);
        nr_swapper_spaces[type] = 0;
-       rcu_assign_pointer(swapper_spaces[type], NULL);
-       synchronize_rcu();
-       kvfree(spaces);
+       swapper_spaces[type] = NULL;
 }
 
 static inline void swap_ra_clamp_pfn(struct vm_area_struct *vma,