]> asedeno.scripts.mit.edu Git - linux.git/blobdiff - mm/vmscan.c
IB/hfi1: Fix NULL pointer dereference when invalid num_vls is used
[linux.git] / mm / vmscan.c
index e411385b304a8b4cdee9245a3785e473d4f500c4..8b920ce3ae02f206f8598d6510986ceef6f0440d 100644 (file)
@@ -693,7 +693,7 @@ static int __remove_mapping(struct address_space *mapping, struct page *page,
        BUG_ON(!PageLocked(page));
        BUG_ON(mapping != page_mapping(page));
 
-       spin_lock_irqsave(&mapping->tree_lock, flags);
+       xa_lock_irqsave(&mapping->i_pages, flags);
        /*
         * The non racy check for a busy page.
         *
@@ -717,7 +717,7 @@ static int __remove_mapping(struct address_space *mapping, struct page *page,
         * load is not satisfied before that of page->_refcount.
         *
         * Note that if SetPageDirty is always performed via set_page_dirty,
-        * and thus under tree_lock, then this ordering is not required.
+        * and thus under the i_pages lock, then this ordering is not required.
         */
        if (unlikely(PageTransHuge(page)) && PageSwapCache(page))
                refcount = 1 + HPAGE_PMD_NR;
@@ -735,7 +735,7 @@ static int __remove_mapping(struct address_space *mapping, struct page *page,
                swp_entry_t swap = { .val = page_private(page) };
                mem_cgroup_swapout(page, swap);
                __delete_from_swap_cache(page);
-               spin_unlock_irqrestore(&mapping->tree_lock, flags);
+               xa_unlock_irqrestore(&mapping->i_pages, flags);
                put_swap_page(page, swap);
        } else {
                void (*freepage)(struct page *);
@@ -756,13 +756,13 @@ static int __remove_mapping(struct address_space *mapping, struct page *page,
                 * only page cache pages found in these are zero pages
                 * covering holes, and because we don't want to mix DAX
                 * exceptional entries and shadow exceptional entries in the
-                * same page_tree.
+                * same address_space.
                 */
                if (reclaimed && page_is_file_cache(page) &&
                    !mapping_exiting(mapping) && !dax_mapping(mapping))
                        shadow = workingset_eviction(mapping, page);
                __delete_from_page_cache(page, shadow);
-               spin_unlock_irqrestore(&mapping->tree_lock, flags);
+               xa_unlock_irqrestore(&mapping->i_pages, flags);
 
                if (freepage != NULL)
                        freepage(page);
@@ -771,7 +771,7 @@ static int __remove_mapping(struct address_space *mapping, struct page *page,
        return 1;
 
 cannot_free:
-       spin_unlock_irqrestore(&mapping->tree_lock, flags);
+       xa_unlock_irqrestore(&mapping->i_pages, flags);
        return 0;
 }
 
@@ -902,17 +902,6 @@ static void page_check_dirty_writeback(struct page *page,
                mapping->a_ops->is_dirty_writeback(page, dirty, writeback);
 }
 
-struct reclaim_stat {
-       unsigned nr_dirty;
-       unsigned nr_unqueued_dirty;
-       unsigned nr_congested;
-       unsigned nr_writeback;
-       unsigned nr_immediate;
-       unsigned nr_activate;
-       unsigned nr_ref_keep;
-       unsigned nr_unmap_fail;
-};
-
 /*
  * shrink_page_list() returns the number of reclaimed pages
  */
@@ -1823,12 +1812,7 @@ shrink_inactive_list(unsigned long nr_to_scan, struct lruvec *lruvec,
                sc->nr.file_taken += nr_taken;
 
        trace_mm_vmscan_lru_shrink_inactive(pgdat->node_id,
-                       nr_scanned, nr_reclaimed,
-                       stat.nr_dirty,  stat.nr_writeback,
-                       stat.nr_congested, stat.nr_immediate,
-                       stat.nr_activate, stat.nr_ref_keep,
-                       stat.nr_unmap_fail,
-                       sc->priority, file);
+                       nr_scanned, nr_reclaimed, &stat, sc->priority, file);
        return nr_reclaimed;
 }
 
@@ -2546,7 +2530,7 @@ static bool shrink_node(pg_data_t *pgdat, struct scan_control *sc)
                                        sc->memcg_low_skipped = 1;
                                        continue;
                                }
-                               mem_cgroup_event(memcg, MEMCG_LOW);
+                               memcg_memory_event(memcg, MEMCG_LOW);
                        }
 
                        reclaimed = sc->nr_reclaimed;