2 * Copyright (c) 2013 Red Hat, Inc. and Parallels Inc. All rights reserved.
3 * Authors: David Chinner and Glauber Costa
5 * Generic LRU infrastructure
7 #include <linux/kernel.h>
8 #include <linux/module.h>
10 #include <linux/list_lru.h>
11 #include <linux/slab.h>
12 #include <linux/mutex.h>
13 #include <linux/memcontrol.h>
15 #ifdef CONFIG_MEMCG_KMEM
16 static LIST_HEAD(list_lrus);
17 static DEFINE_MUTEX(list_lrus_mutex);
19 static void list_lru_register(struct list_lru *lru)
21 mutex_lock(&list_lrus_mutex);
22 list_add(&lru->list, &list_lrus);
23 mutex_unlock(&list_lrus_mutex);
26 static void list_lru_unregister(struct list_lru *lru)
28 mutex_lock(&list_lrus_mutex);
30 mutex_unlock(&list_lrus_mutex);
33 static inline bool list_lru_memcg_aware(struct list_lru *lru)
36 * This needs node 0 to be always present, even
37 * in the systems supporting sparse numa ids.
39 return !!lru->node[0].memcg_lrus;
42 static inline struct list_lru_one *
43 list_lru_from_memcg_idx(struct list_lru_node *nlru, int idx)
45 struct list_lru_memcg *memcg_lrus;
47 * Either lock or RCU protects the array of per cgroup lists
48 * from relocation (see memcg_update_list_lru_node).
50 memcg_lrus = rcu_dereference_check(nlru->memcg_lrus,
51 lockdep_is_held(&nlru->lock));
52 if (memcg_lrus && idx >= 0)
53 return memcg_lrus->lru[idx];
57 static __always_inline struct mem_cgroup *mem_cgroup_from_kmem(void *ptr)
61 if (!memcg_kmem_enabled())
63 page = virt_to_head_page(ptr);
64 return page->mem_cgroup;
67 static inline struct list_lru_one *
68 list_lru_from_kmem(struct list_lru_node *nlru, void *ptr)
70 struct mem_cgroup *memcg;
72 if (!nlru->memcg_lrus)
75 memcg = mem_cgroup_from_kmem(ptr);
79 return list_lru_from_memcg_idx(nlru, memcg_cache_id(memcg));
82 static void list_lru_register(struct list_lru *lru)
86 static void list_lru_unregister(struct list_lru *lru)
90 static inline bool list_lru_memcg_aware(struct list_lru *lru)
95 static inline struct list_lru_one *
96 list_lru_from_memcg_idx(struct list_lru_node *nlru, int idx)
101 static inline struct list_lru_one *
102 list_lru_from_kmem(struct list_lru_node *nlru, void *ptr)
106 #endif /* CONFIG_MEMCG_KMEM */
108 bool list_lru_add(struct list_lru *lru, struct list_head *item)
110 int nid = page_to_nid(virt_to_page(item));
111 struct list_lru_node *nlru = &lru->node[nid];
112 struct list_lru_one *l;
114 spin_lock(&nlru->lock);
115 if (list_empty(item)) {
116 l = list_lru_from_kmem(nlru, item);
117 list_add_tail(item, &l->list);
120 spin_unlock(&nlru->lock);
123 spin_unlock(&nlru->lock);
126 EXPORT_SYMBOL_GPL(list_lru_add);
128 bool list_lru_del(struct list_lru *lru, struct list_head *item)
130 int nid = page_to_nid(virt_to_page(item));
131 struct list_lru_node *nlru = &lru->node[nid];
132 struct list_lru_one *l;
134 spin_lock(&nlru->lock);
135 if (!list_empty(item)) {
136 l = list_lru_from_kmem(nlru, item);
140 spin_unlock(&nlru->lock);
143 spin_unlock(&nlru->lock);
146 EXPORT_SYMBOL_GPL(list_lru_del);
148 void list_lru_isolate(struct list_lru_one *list, struct list_head *item)
153 EXPORT_SYMBOL_GPL(list_lru_isolate);
155 void list_lru_isolate_move(struct list_lru_one *list, struct list_head *item,
156 struct list_head *head)
158 list_move(item, head);
161 EXPORT_SYMBOL_GPL(list_lru_isolate_move);
163 unsigned long list_lru_count_one(struct list_lru *lru,
164 int nid, struct mem_cgroup *memcg)
166 struct list_lru_node *nlru = &lru->node[nid];
167 struct list_lru_one *l;
171 l = list_lru_from_memcg_idx(nlru, memcg_cache_id(memcg));
177 EXPORT_SYMBOL_GPL(list_lru_count_one);
179 unsigned long list_lru_count_node(struct list_lru *lru, int nid)
181 struct list_lru_node *nlru;
183 nlru = &lru->node[nid];
184 return nlru->nr_items;
186 EXPORT_SYMBOL_GPL(list_lru_count_node);
189 __list_lru_walk_one(struct list_lru *lru, int nid, int memcg_idx,
190 list_lru_walk_cb isolate, void *cb_arg,
191 unsigned long *nr_to_walk)
194 struct list_lru_node *nlru = &lru->node[nid];
195 struct list_lru_one *l;
196 struct list_head *item, *n;
197 unsigned long isolated = 0;
199 spin_lock(&nlru->lock);
200 l = list_lru_from_memcg_idx(nlru, memcg_idx);
202 list_for_each_safe(item, n, &l->list) {
206 * decrement nr_to_walk first so that we don't livelock if we
207 * get stuck on large numbesr of LRU_RETRY items
213 ret = isolate(item, l, &nlru->lock, cb_arg);
215 case LRU_REMOVED_RETRY:
216 assert_spin_locked(&nlru->lock);
222 * If the lru lock has been dropped, our list
223 * traversal is now invalid and so we have to
224 * restart from scratch.
226 if (ret == LRU_REMOVED_RETRY)
230 list_move_tail(item, &l->list);
236 * The lru lock has been dropped, our list traversal is
237 * now invalid and so we have to restart from scratch.
239 assert_spin_locked(&nlru->lock);
246 spin_unlock(&nlru->lock);
251 list_lru_walk_one(struct list_lru *lru, int nid, struct mem_cgroup *memcg,
252 list_lru_walk_cb isolate, void *cb_arg,
253 unsigned long *nr_to_walk)
255 return __list_lru_walk_one(lru, nid, memcg_cache_id(memcg),
256 isolate, cb_arg, nr_to_walk);
258 EXPORT_SYMBOL_GPL(list_lru_walk_one);
260 unsigned long list_lru_walk_node(struct list_lru *lru, int nid,
261 list_lru_walk_cb isolate, void *cb_arg,
262 unsigned long *nr_to_walk)
267 isolated += __list_lru_walk_one(lru, nid, -1, isolate, cb_arg,
269 if (*nr_to_walk > 0 && list_lru_memcg_aware(lru)) {
270 for_each_memcg_cache_index(memcg_idx) {
271 isolated += __list_lru_walk_one(lru, nid, memcg_idx,
272 isolate, cb_arg, nr_to_walk);
273 if (*nr_to_walk <= 0)
279 EXPORT_SYMBOL_GPL(list_lru_walk_node);
281 static void init_one_lru(struct list_lru_one *l)
283 INIT_LIST_HEAD(&l->list);
287 #ifdef CONFIG_MEMCG_KMEM
288 static void __memcg_destroy_list_lru_node(struct list_lru_memcg *memcg_lrus,
293 for (i = begin; i < end; i++)
294 kfree(memcg_lrus->lru[i]);
297 static int __memcg_init_list_lru_node(struct list_lru_memcg *memcg_lrus,
302 for (i = begin; i < end; i++) {
303 struct list_lru_one *l;
305 l = kmalloc(sizeof(struct list_lru_one), GFP_KERNEL);
310 memcg_lrus->lru[i] = l;
314 __memcg_destroy_list_lru_node(memcg_lrus, begin, i - 1);
318 static int memcg_init_list_lru_node(struct list_lru_node *nlru)
320 struct list_lru_memcg *memcg_lrus;
321 int size = memcg_nr_cache_ids;
323 memcg_lrus = kvmalloc(sizeof(*memcg_lrus) +
324 size * sizeof(void *), GFP_KERNEL);
328 if (__memcg_init_list_lru_node(memcg_lrus, 0, size)) {
332 RCU_INIT_POINTER(nlru->memcg_lrus, memcg_lrus);
337 static void memcg_destroy_list_lru_node(struct list_lru_node *nlru)
339 struct list_lru_memcg *memcg_lrus;
341 * This is called when shrinker has already been unregistered,
342 * and nobody can use it. So, there is no need to use kvfree_rcu().
344 memcg_lrus = rcu_dereference_protected(nlru->memcg_lrus, true);
345 __memcg_destroy_list_lru_node(memcg_lrus, 0, memcg_nr_cache_ids);
349 static void kvfree_rcu(struct rcu_head *head)
351 struct list_lru_memcg *mlru;
353 mlru = container_of(head, struct list_lru_memcg, rcu);
357 static int memcg_update_list_lru_node(struct list_lru_node *nlru,
358 int old_size, int new_size)
360 struct list_lru_memcg *old, *new;
362 BUG_ON(old_size > new_size);
364 old = rcu_dereference_protected(nlru->memcg_lrus,
365 lockdep_is_held(&list_lrus_mutex));
366 new = kvmalloc(sizeof(*new) + new_size * sizeof(void *), GFP_KERNEL);
370 if (__memcg_init_list_lru_node(new, old_size, new_size)) {
375 memcpy(&new->lru, &old->lru, old_size * sizeof(void *));
378 * The locking below allows readers that hold nlru->lock avoid taking
379 * rcu_read_lock (see list_lru_from_memcg_idx).
381 * Since list_lru_{add,del} may be called under an IRQ-safe lock,
382 * we have to use IRQ-safe primitives here to avoid deadlock.
384 spin_lock_irq(&nlru->lock);
385 rcu_assign_pointer(nlru->memcg_lrus, new);
386 spin_unlock_irq(&nlru->lock);
388 call_rcu(&old->rcu, kvfree_rcu);
392 static void memcg_cancel_update_list_lru_node(struct list_lru_node *nlru,
393 int old_size, int new_size)
395 struct list_lru_memcg *memcg_lrus;
397 memcg_lrus = rcu_dereference_protected(nlru->memcg_lrus,
398 lockdep_is_held(&list_lrus_mutex));
399 /* do not bother shrinking the array back to the old size, because we
400 * cannot handle allocation failures here */
401 __memcg_destroy_list_lru_node(memcg_lrus, old_size, new_size);
404 static int memcg_init_list_lru(struct list_lru *lru, bool memcg_aware)
412 if (memcg_init_list_lru_node(&lru->node[i]))
417 for (i = i - 1; i >= 0; i--) {
418 if (!lru->node[i].memcg_lrus)
420 memcg_destroy_list_lru_node(&lru->node[i]);
425 static void memcg_destroy_list_lru(struct list_lru *lru)
429 if (!list_lru_memcg_aware(lru))
433 memcg_destroy_list_lru_node(&lru->node[i]);
436 static int memcg_update_list_lru(struct list_lru *lru,
437 int old_size, int new_size)
441 if (!list_lru_memcg_aware(lru))
445 if (memcg_update_list_lru_node(&lru->node[i],
451 for (i = i - 1; i >= 0; i--) {
452 if (!lru->node[i].memcg_lrus)
455 memcg_cancel_update_list_lru_node(&lru->node[i],
461 static void memcg_cancel_update_list_lru(struct list_lru *lru,
462 int old_size, int new_size)
466 if (!list_lru_memcg_aware(lru))
470 memcg_cancel_update_list_lru_node(&lru->node[i],
474 int memcg_update_all_list_lrus(int new_size)
477 struct list_lru *lru;
478 int old_size = memcg_nr_cache_ids;
480 mutex_lock(&list_lrus_mutex);
481 list_for_each_entry(lru, &list_lrus, list) {
482 ret = memcg_update_list_lru(lru, old_size, new_size);
487 mutex_unlock(&list_lrus_mutex);
490 list_for_each_entry_continue_reverse(lru, &list_lrus, list)
491 memcg_cancel_update_list_lru(lru, old_size, new_size);
495 static void memcg_drain_list_lru_node(struct list_lru_node *nlru,
496 int src_idx, int dst_idx)
498 struct list_lru_one *src, *dst;
501 * Since list_lru_{add,del} may be called under an IRQ-safe lock,
502 * we have to use IRQ-safe primitives here to avoid deadlock.
504 spin_lock_irq(&nlru->lock);
506 src = list_lru_from_memcg_idx(nlru, src_idx);
507 dst = list_lru_from_memcg_idx(nlru, dst_idx);
509 list_splice_init(&src->list, &dst->list);
510 dst->nr_items += src->nr_items;
513 spin_unlock_irq(&nlru->lock);
516 static void memcg_drain_list_lru(struct list_lru *lru,
517 int src_idx, int dst_idx)
521 if (!list_lru_memcg_aware(lru))
525 memcg_drain_list_lru_node(&lru->node[i], src_idx, dst_idx);
528 void memcg_drain_all_list_lrus(int src_idx, int dst_idx)
530 struct list_lru *lru;
532 mutex_lock(&list_lrus_mutex);
533 list_for_each_entry(lru, &list_lrus, list)
534 memcg_drain_list_lru(lru, src_idx, dst_idx);
535 mutex_unlock(&list_lrus_mutex);
538 static int memcg_init_list_lru(struct list_lru *lru, bool memcg_aware)
543 static void memcg_destroy_list_lru(struct list_lru *lru)
546 #endif /* CONFIG_MEMCG_KMEM */
548 int __list_lru_init(struct list_lru *lru, bool memcg_aware,
549 struct lock_class_key *key, struct shrinker *shrinker)
552 size_t size = sizeof(*lru->node) * nr_node_ids;
555 #ifdef CONFIG_MEMCG_KMEM
557 lru->shrinker_id = shrinker->id;
559 lru->shrinker_id = -1;
561 memcg_get_cache_ids();
563 lru->node = kzalloc(size, GFP_KERNEL);
568 spin_lock_init(&lru->node[i].lock);
570 lockdep_set_class(&lru->node[i].lock, key);
571 init_one_lru(&lru->node[i].lru);
574 err = memcg_init_list_lru(lru, memcg_aware);
577 /* Do this so a list_lru_destroy() doesn't crash: */
582 list_lru_register(lru);
584 memcg_put_cache_ids();
587 EXPORT_SYMBOL_GPL(__list_lru_init);
589 void list_lru_destroy(struct list_lru *lru)
591 /* Already destroyed or not yet initialized? */
595 memcg_get_cache_ids();
597 list_lru_unregister(lru);
599 memcg_destroy_list_lru(lru);
603 #ifdef CONFIG_MEMCG_KMEM
604 lru->shrinker_id = -1;
606 memcg_put_cache_ids();
608 EXPORT_SYMBOL_GPL(list_lru_destroy);