]> asedeno.scripts.mit.edu Git - linux.git/blobdiff - mm/list_lru.c
Merge tag 'f2fs-for-4.19' of git://git.kernel.org/pub/scm/linux/kernel/git/jaegeuk/f2fs
[linux.git] / mm / list_lru.c
index 1fc5be746e69bf1c9ccb38f01eb58242b2445d8c..5b30625fd3651a51deab140c3412897a51624f4f 100644 (file)
@@ -30,6 +30,11 @@ static void list_lru_unregister(struct list_lru *lru)
        mutex_unlock(&list_lrus_mutex);
 }
 
+static int lru_shrinker_id(struct list_lru *lru)
+{
+       return lru->shrinker_id;
+}
+
 static inline bool list_lru_memcg_aware(struct list_lru *lru)
 {
        /*
@@ -93,6 +98,11 @@ static void list_lru_unregister(struct list_lru *lru)
 {
 }
 
+static int lru_shrinker_id(struct list_lru *lru)
+{
+       return -1;
+}
+
 static inline bool list_lru_memcg_aware(struct list_lru *lru)
 {
        return false;
@@ -118,13 +128,17 @@ bool list_lru_add(struct list_lru *lru, struct list_head *item)
 {
        int nid = page_to_nid(virt_to_page(item));
        struct list_lru_node *nlru = &lru->node[nid];
+       struct mem_cgroup *memcg;
        struct list_lru_one *l;
 
        spin_lock(&nlru->lock);
        if (list_empty(item)) {
-               l = list_lru_from_kmem(nlru, item, NULL);
+               l = list_lru_from_kmem(nlru, item, &memcg);
                list_add_tail(item, &l->list);
-               l->nr_items++;
+               /* Set shrinker bit if the first element was added */
+               if (!l->nr_items++)
+                       memcg_set_shrinker_bit(memcg, nid,
+                                              lru_shrinker_id(lru));
                nlru->nr_items++;
                spin_unlock(&nlru->lock);
                return true;
@@ -195,17 +209,15 @@ unsigned long list_lru_count_node(struct list_lru *lru, int nid)
 EXPORT_SYMBOL_GPL(list_lru_count_node);
 
 static unsigned long
-__list_lru_walk_one(struct list_lru *lru, int nid, int memcg_idx,
+__list_lru_walk_one(struct list_lru_node *nlru, int memcg_idx,
                    list_lru_walk_cb isolate, void *cb_arg,
                    unsigned long *nr_to_walk)
 {
 
-       struct list_lru_node *nlru = &lru->node[nid];
        struct list_lru_one *l;
        struct list_head *item, *n;
        unsigned long isolated = 0;
 
-       spin_lock(&nlru->lock);
        l = list_lru_from_memcg_idx(nlru, memcg_idx);
 restart:
        list_for_each_safe(item, n, &l->list) {
@@ -251,8 +263,6 @@ __list_lru_walk_one(struct list_lru *lru, int nid, int memcg_idx,
                        BUG();
                }
        }
-
-       spin_unlock(&nlru->lock);
        return isolated;
 }
 
@@ -261,11 +271,32 @@ list_lru_walk_one(struct list_lru *lru, int nid, struct mem_cgroup *memcg,
                  list_lru_walk_cb isolate, void *cb_arg,
                  unsigned long *nr_to_walk)
 {
-       return __list_lru_walk_one(lru, nid, memcg_cache_id(memcg),
-                                  isolate, cb_arg, nr_to_walk);
+       struct list_lru_node *nlru = &lru->node[nid];
+       unsigned long ret;
+
+       spin_lock(&nlru->lock);
+       ret = __list_lru_walk_one(nlru, memcg_cache_id(memcg), isolate, cb_arg,
+                                 nr_to_walk);
+       spin_unlock(&nlru->lock);
+       return ret;
 }
 EXPORT_SYMBOL_GPL(list_lru_walk_one);
 
+unsigned long
+list_lru_walk_one_irq(struct list_lru *lru, int nid, struct mem_cgroup *memcg,
+                     list_lru_walk_cb isolate, void *cb_arg,
+                     unsigned long *nr_to_walk)
+{
+       struct list_lru_node *nlru = &lru->node[nid];
+       unsigned long ret;
+
+       spin_lock_irq(&nlru->lock);
+       ret = __list_lru_walk_one(nlru, memcg_cache_id(memcg), isolate, cb_arg,
+                                 nr_to_walk);
+       spin_unlock_irq(&nlru->lock);
+       return ret;
+}
+
 unsigned long list_lru_walk_node(struct list_lru *lru, int nid,
                                 list_lru_walk_cb isolate, void *cb_arg,
                                 unsigned long *nr_to_walk)
@@ -273,12 +304,18 @@ unsigned long list_lru_walk_node(struct list_lru *lru, int nid,
        long isolated = 0;
        int memcg_idx;
 
-       isolated += __list_lru_walk_one(lru, nid, -1, isolate, cb_arg,
-                                       nr_to_walk);
+       isolated += list_lru_walk_one(lru, nid, NULL, isolate, cb_arg,
+                                     nr_to_walk);
        if (*nr_to_walk > 0 && list_lru_memcg_aware(lru)) {
                for_each_memcg_cache_index(memcg_idx) {
-                       isolated += __list_lru_walk_one(lru, nid, memcg_idx,
-                                               isolate, cb_arg, nr_to_walk);
+                       struct list_lru_node *nlru = &lru->node[nid];
+
+                       spin_lock(&nlru->lock);
+                       isolated += __list_lru_walk_one(nlru, memcg_idx,
+                                                       isolate, cb_arg,
+                                                       nr_to_walk);
+                       spin_unlock(&nlru->lock);
+
                        if (*nr_to_walk <= 0)
                                break;
                }
@@ -501,10 +538,13 @@ int memcg_update_all_list_lrus(int new_size)
        goto out;
 }
 
-static void memcg_drain_list_lru_node(struct list_lru_node *nlru,
-                                     int src_idx, int dst_idx)
+static void memcg_drain_list_lru_node(struct list_lru *lru, int nid,
+                                     int src_idx, struct mem_cgroup *dst_memcg)
 {
+       struct list_lru_node *nlru = &lru->node[nid];
+       int dst_idx = dst_memcg->kmemcg_id;
        struct list_lru_one *src, *dst;
+       bool set;
 
        /*
         * Since list_lru_{add,del} may be called under an IRQ-safe lock,
@@ -516,14 +556,17 @@ static void memcg_drain_list_lru_node(struct list_lru_node *nlru,
        dst = list_lru_from_memcg_idx(nlru, dst_idx);
 
        list_splice_init(&src->list, &dst->list);
+       set = (!dst->nr_items && src->nr_items);
        dst->nr_items += src->nr_items;
+       if (set)
+               memcg_set_shrinker_bit(dst_memcg, nid, lru_shrinker_id(lru));
        src->nr_items = 0;
 
        spin_unlock_irq(&nlru->lock);
 }
 
 static void memcg_drain_list_lru(struct list_lru *lru,
-                                int src_idx, int dst_idx)
+                                int src_idx, struct mem_cgroup *dst_memcg)
 {
        int i;
 
@@ -531,16 +574,16 @@ static void memcg_drain_list_lru(struct list_lru *lru,
                return;
 
        for_each_node(i)
-               memcg_drain_list_lru_node(&lru->node[i], src_idx, dst_idx);
+               memcg_drain_list_lru_node(lru, i, src_idx, dst_memcg);
 }
 
-void memcg_drain_all_list_lrus(int src_idx, int dst_idx)
+void memcg_drain_all_list_lrus(int src_idx, struct mem_cgroup *dst_memcg)
 {
        struct list_lru *lru;
 
        mutex_lock(&list_lrus_mutex);
        list_for_each_entry(lru, &list_lrus, list)
-               memcg_drain_list_lru(lru, src_idx, dst_idx);
+               memcg_drain_list_lru(lru, src_idx, dst_memcg);
        mutex_unlock(&list_lrus_mutex);
 }
 #else