]> asedeno.scripts.mit.edu Git - linux.git/commitdiff
page cache: Convert delete_batch to XArray
authorMatthew Wilcox <willy@infradead.org>
Mon, 4 Dec 2017 08:59:45 +0000 (03:59 -0500)
committerMatthew Wilcox <willy@infradead.org>
Sun, 21 Oct 2018 14:46:36 +0000 (10:46 -0400)
Rename the function from page_cache_tree_delete_batch to just
page_cache_delete_batch.

Signed-off-by: Matthew Wilcox <willy@infradead.org>
mm/filemap.c

index ae1fcaa24f97c9fdf29591849367d1ed1031809a..f7f9af1d98b01046d9013c1abcd4e5ccf9638c03 100644 (file)
@@ -272,7 +272,7 @@ void delete_from_page_cache(struct page *page)
 EXPORT_SYMBOL(delete_from_page_cache);
 
 /*
- * page_cache_tree_delete_batch - delete several pages from page cache
+ * page_cache_delete_batch - delete several pages from page cache
  * @mapping: the mapping to which pages belong
  * @pvec: pagevec with pages to delete
  *
@@ -285,23 +285,18 @@ EXPORT_SYMBOL(delete_from_page_cache);
  *
  * The function expects the i_pages lock to be held.
  */
-static void
-page_cache_tree_delete_batch(struct address_space *mapping,
+static void page_cache_delete_batch(struct address_space *mapping,
                             struct pagevec *pvec)
 {
-       struct radix_tree_iter iter;
-       void **slot;
+       XA_STATE(xas, &mapping->i_pages, pvec->pages[0]->index);
        int total_pages = 0;
        int i = 0, tail_pages = 0;
        struct page *page;
-       pgoff_t start;
 
-       start = pvec->pages[0]->index;
-       radix_tree_for_each_slot(slot, &mapping->i_pages, &iter, start) {
+       mapping_set_update(&xas, mapping);
+       xas_for_each(&xas, page, ULONG_MAX) {
                if (i >= pagevec_count(pvec) && !tail_pages)
                        break;
-               page = radix_tree_deref_slot_protected(slot,
-                                                      &mapping->i_pages.xa_lock);
                if (xa_is_value(page))
                        continue;
                if (!tail_pages) {
@@ -310,8 +305,11 @@ page_cache_tree_delete_batch(struct address_space *mapping,
                         * have our pages locked so they are protected from
                         * being removed.
                         */
-                       if (page != pvec->pages[i])
+                       if (page != pvec->pages[i]) {
+                               VM_BUG_ON_PAGE(page->index >
+                                               pvec->pages[i]->index, page);
                                continue;
+                       }
                        WARN_ON_ONCE(!PageLocked(page));
                        if (PageTransHuge(page) && !PageHuge(page))
                                tail_pages = HPAGE_PMD_NR - 1;
@@ -322,11 +320,11 @@ page_cache_tree_delete_batch(struct address_space *mapping,
                         */
                        i++;
                } else {
+                       VM_BUG_ON_PAGE(page->index + HPAGE_PMD_NR - tail_pages
+                                       != pvec->pages[i]->index, page);
                        tail_pages--;
                }
-               radix_tree_clear_tags(&mapping->i_pages, iter.node, slot);
-               __radix_tree_replace(&mapping->i_pages, iter.node, slot, NULL,
-                               workingset_lookup_update(mapping));
+               xas_store(&xas, NULL);
                total_pages++;
        }
        mapping->nrpages -= total_pages;
@@ -347,7 +345,7 @@ void delete_from_page_cache_batch(struct address_space *mapping,
 
                unaccount_page_cache_page(mapping, pvec->pages[i]);
        }
-       page_cache_tree_delete_batch(mapping, pvec);
+       page_cache_delete_batch(mapping, pvec);
        xa_unlock_irqrestore(&mapping->i_pages, flags);
 
        for (i = 0; i < pagevec_count(pvec); i++)