2 * Copyright (c) 2013 Red Hat, Inc. and Parallels Inc. All rights reserved.
3 * Authors: David Chinner and Glauber Costa
5 * Generic LRU infrastructure
7 #include <linux/kernel.h>
8 #include <linux/module.h>
10 #include <linux/list_lru.h>
11 #include <linux/slab.h>
12 #include <linux/mutex.h>
13 #include <linux/memcontrol.h>
15 #ifdef CONFIG_MEMCG_KMEM
16 static LIST_HEAD(list_lrus);
17 static DEFINE_MUTEX(list_lrus_mutex);
19 static void list_lru_register(struct list_lru *lru)
21 mutex_lock(&list_lrus_mutex);
22 list_add(&lru->list, &list_lrus);
23 mutex_unlock(&list_lrus_mutex);
26 static void list_lru_unregister(struct list_lru *lru)
28 mutex_lock(&list_lrus_mutex);
30 mutex_unlock(&list_lrus_mutex);
33 static int lru_shrinker_id(struct list_lru *lru)
35 return lru->shrinker_id;
38 static inline bool list_lru_memcg_aware(struct list_lru *lru)
41 * This needs node 0 to be always present, even
42 * in the systems supporting sparse numa ids.
44 return !!lru->node[0].memcg_lrus;
47 static inline struct list_lru_one *
48 list_lru_from_memcg_idx(struct list_lru_node *nlru, int idx)
50 struct list_lru_memcg *memcg_lrus;
52 * Either lock or RCU protects the array of per cgroup lists
53 * from relocation (see memcg_update_list_lru_node).
55 memcg_lrus = rcu_dereference_check(nlru->memcg_lrus,
56 lockdep_is_held(&nlru->lock));
57 if (memcg_lrus && idx >= 0)
58 return memcg_lrus->lru[idx];
62 static __always_inline struct mem_cgroup *mem_cgroup_from_kmem(void *ptr)
66 if (!memcg_kmem_enabled())
68 page = virt_to_head_page(ptr);
69 return page->mem_cgroup;
72 static inline struct list_lru_one *
73 list_lru_from_kmem(struct list_lru_node *nlru, void *ptr,
74 struct mem_cgroup **memcg_ptr)
76 struct list_lru_one *l = &nlru->lru;
77 struct mem_cgroup *memcg = NULL;
79 if (!nlru->memcg_lrus)
82 memcg = mem_cgroup_from_kmem(ptr);
86 l = list_lru_from_memcg_idx(nlru, memcg_cache_id(memcg));
93 static void list_lru_register(struct list_lru *lru)
97 static void list_lru_unregister(struct list_lru *lru)
101 static int lru_shrinker_id(struct list_lru *lru)
106 static inline bool list_lru_memcg_aware(struct list_lru *lru)
111 static inline struct list_lru_one *
112 list_lru_from_memcg_idx(struct list_lru_node *nlru, int idx)
117 static inline struct list_lru_one *
118 list_lru_from_kmem(struct list_lru_node *nlru, void *ptr,
119 struct mem_cgroup **memcg_ptr)
125 #endif /* CONFIG_MEMCG_KMEM */
127 bool list_lru_add(struct list_lru *lru, struct list_head *item)
129 int nid = page_to_nid(virt_to_page(item));
130 struct list_lru_node *nlru = &lru->node[nid];
131 struct mem_cgroup *memcg;
132 struct list_lru_one *l;
134 spin_lock(&nlru->lock);
135 if (list_empty(item)) {
136 l = list_lru_from_kmem(nlru, item, &memcg);
137 list_add_tail(item, &l->list);
138 /* Set shrinker bit if the first element was added */
140 memcg_set_shrinker_bit(memcg, nid,
141 lru_shrinker_id(lru));
143 spin_unlock(&nlru->lock);
146 spin_unlock(&nlru->lock);
149 EXPORT_SYMBOL_GPL(list_lru_add);
151 bool list_lru_del(struct list_lru *lru, struct list_head *item)
153 int nid = page_to_nid(virt_to_page(item));
154 struct list_lru_node *nlru = &lru->node[nid];
155 struct list_lru_one *l;
157 spin_lock(&nlru->lock);
158 if (!list_empty(item)) {
159 l = list_lru_from_kmem(nlru, item, NULL);
163 spin_unlock(&nlru->lock);
166 spin_unlock(&nlru->lock);
169 EXPORT_SYMBOL_GPL(list_lru_del);
171 void list_lru_isolate(struct list_lru_one *list, struct list_head *item)
176 EXPORT_SYMBOL_GPL(list_lru_isolate);
178 void list_lru_isolate_move(struct list_lru_one *list, struct list_head *item,
179 struct list_head *head)
181 list_move(item, head);
184 EXPORT_SYMBOL_GPL(list_lru_isolate_move);
186 unsigned long list_lru_count_one(struct list_lru *lru,
187 int nid, struct mem_cgroup *memcg)
189 struct list_lru_node *nlru = &lru->node[nid];
190 struct list_lru_one *l;
194 l = list_lru_from_memcg_idx(nlru, memcg_cache_id(memcg));
200 EXPORT_SYMBOL_GPL(list_lru_count_one);
202 unsigned long list_lru_count_node(struct list_lru *lru, int nid)
204 struct list_lru_node *nlru;
206 nlru = &lru->node[nid];
207 return nlru->nr_items;
209 EXPORT_SYMBOL_GPL(list_lru_count_node);
212 __list_lru_walk_one(struct list_lru *lru, int nid, int memcg_idx,
213 list_lru_walk_cb isolate, void *cb_arg,
214 unsigned long *nr_to_walk)
217 struct list_lru_node *nlru = &lru->node[nid];
218 struct list_lru_one *l;
219 struct list_head *item, *n;
220 unsigned long isolated = 0;
222 l = list_lru_from_memcg_idx(nlru, memcg_idx);
224 list_for_each_safe(item, n, &l->list) {
228 * decrement nr_to_walk first so that we don't livelock if we
229 * get stuck on large numbesr of LRU_RETRY items
235 ret = isolate(item, l, &nlru->lock, cb_arg);
237 case LRU_REMOVED_RETRY:
238 assert_spin_locked(&nlru->lock);
244 * If the lru lock has been dropped, our list
245 * traversal is now invalid and so we have to
246 * restart from scratch.
248 if (ret == LRU_REMOVED_RETRY)
252 list_move_tail(item, &l->list);
258 * The lru lock has been dropped, our list traversal is
259 * now invalid and so we have to restart from scratch.
261 assert_spin_locked(&nlru->lock);
271 list_lru_walk_one(struct list_lru *lru, int nid, struct mem_cgroup *memcg,
272 list_lru_walk_cb isolate, void *cb_arg,
273 unsigned long *nr_to_walk)
275 struct list_lru_node *nlru = &lru->node[nid];
278 spin_lock(&nlru->lock);
279 ret = __list_lru_walk_one(lru, nid, memcg_cache_id(memcg),
280 isolate, cb_arg, nr_to_walk);
281 spin_unlock(&nlru->lock);
284 EXPORT_SYMBOL_GPL(list_lru_walk_one);
286 unsigned long list_lru_walk_node(struct list_lru *lru, int nid,
287 list_lru_walk_cb isolate, void *cb_arg,
288 unsigned long *nr_to_walk)
293 isolated += list_lru_walk_one(lru, nid, NULL, isolate, cb_arg,
295 if (*nr_to_walk > 0 && list_lru_memcg_aware(lru)) {
296 for_each_memcg_cache_index(memcg_idx) {
297 struct list_lru_node *nlru = &lru->node[nid];
299 spin_lock(&nlru->lock);
300 isolated += __list_lru_walk_one(lru, nid, memcg_idx,
301 isolate, cb_arg, nr_to_walk);
302 spin_unlock(&nlru->lock);
304 if (*nr_to_walk <= 0)
310 EXPORT_SYMBOL_GPL(list_lru_walk_node);
312 static void init_one_lru(struct list_lru_one *l)
314 INIT_LIST_HEAD(&l->list);
318 #ifdef CONFIG_MEMCG_KMEM
319 static void __memcg_destroy_list_lru_node(struct list_lru_memcg *memcg_lrus,
324 for (i = begin; i < end; i++)
325 kfree(memcg_lrus->lru[i]);
328 static int __memcg_init_list_lru_node(struct list_lru_memcg *memcg_lrus,
333 for (i = begin; i < end; i++) {
334 struct list_lru_one *l;
336 l = kmalloc(sizeof(struct list_lru_one), GFP_KERNEL);
341 memcg_lrus->lru[i] = l;
345 __memcg_destroy_list_lru_node(memcg_lrus, begin, i - 1);
349 static int memcg_init_list_lru_node(struct list_lru_node *nlru)
351 struct list_lru_memcg *memcg_lrus;
352 int size = memcg_nr_cache_ids;
354 memcg_lrus = kvmalloc(sizeof(*memcg_lrus) +
355 size * sizeof(void *), GFP_KERNEL);
359 if (__memcg_init_list_lru_node(memcg_lrus, 0, size)) {
363 RCU_INIT_POINTER(nlru->memcg_lrus, memcg_lrus);
368 static void memcg_destroy_list_lru_node(struct list_lru_node *nlru)
370 struct list_lru_memcg *memcg_lrus;
372 * This is called when shrinker has already been unregistered,
373 * and nobody can use it. So, there is no need to use kvfree_rcu().
375 memcg_lrus = rcu_dereference_protected(nlru->memcg_lrus, true);
376 __memcg_destroy_list_lru_node(memcg_lrus, 0, memcg_nr_cache_ids);
380 static void kvfree_rcu(struct rcu_head *head)
382 struct list_lru_memcg *mlru;
384 mlru = container_of(head, struct list_lru_memcg, rcu);
388 static int memcg_update_list_lru_node(struct list_lru_node *nlru,
389 int old_size, int new_size)
391 struct list_lru_memcg *old, *new;
393 BUG_ON(old_size > new_size);
395 old = rcu_dereference_protected(nlru->memcg_lrus,
396 lockdep_is_held(&list_lrus_mutex));
397 new = kvmalloc(sizeof(*new) + new_size * sizeof(void *), GFP_KERNEL);
401 if (__memcg_init_list_lru_node(new, old_size, new_size)) {
406 memcpy(&new->lru, &old->lru, old_size * sizeof(void *));
409 * The locking below allows readers that hold nlru->lock avoid taking
410 * rcu_read_lock (see list_lru_from_memcg_idx).
412 * Since list_lru_{add,del} may be called under an IRQ-safe lock,
413 * we have to use IRQ-safe primitives here to avoid deadlock.
415 spin_lock_irq(&nlru->lock);
416 rcu_assign_pointer(nlru->memcg_lrus, new);
417 spin_unlock_irq(&nlru->lock);
419 call_rcu(&old->rcu, kvfree_rcu);
423 static void memcg_cancel_update_list_lru_node(struct list_lru_node *nlru,
424 int old_size, int new_size)
426 struct list_lru_memcg *memcg_lrus;
428 memcg_lrus = rcu_dereference_protected(nlru->memcg_lrus,
429 lockdep_is_held(&list_lrus_mutex));
430 /* do not bother shrinking the array back to the old size, because we
431 * cannot handle allocation failures here */
432 __memcg_destroy_list_lru_node(memcg_lrus, old_size, new_size);
435 static int memcg_init_list_lru(struct list_lru *lru, bool memcg_aware)
443 if (memcg_init_list_lru_node(&lru->node[i]))
448 for (i = i - 1; i >= 0; i--) {
449 if (!lru->node[i].memcg_lrus)
451 memcg_destroy_list_lru_node(&lru->node[i]);
456 static void memcg_destroy_list_lru(struct list_lru *lru)
460 if (!list_lru_memcg_aware(lru))
464 memcg_destroy_list_lru_node(&lru->node[i]);
467 static int memcg_update_list_lru(struct list_lru *lru,
468 int old_size, int new_size)
472 if (!list_lru_memcg_aware(lru))
476 if (memcg_update_list_lru_node(&lru->node[i],
482 for (i = i - 1; i >= 0; i--) {
483 if (!lru->node[i].memcg_lrus)
486 memcg_cancel_update_list_lru_node(&lru->node[i],
492 static void memcg_cancel_update_list_lru(struct list_lru *lru,
493 int old_size, int new_size)
497 if (!list_lru_memcg_aware(lru))
501 memcg_cancel_update_list_lru_node(&lru->node[i],
505 int memcg_update_all_list_lrus(int new_size)
508 struct list_lru *lru;
509 int old_size = memcg_nr_cache_ids;
511 mutex_lock(&list_lrus_mutex);
512 list_for_each_entry(lru, &list_lrus, list) {
513 ret = memcg_update_list_lru(lru, old_size, new_size);
518 mutex_unlock(&list_lrus_mutex);
521 list_for_each_entry_continue_reverse(lru, &list_lrus, list)
522 memcg_cancel_update_list_lru(lru, old_size, new_size);
526 static void memcg_drain_list_lru_node(struct list_lru *lru, int nid,
527 int src_idx, struct mem_cgroup *dst_memcg)
529 struct list_lru_node *nlru = &lru->node[nid];
530 int dst_idx = dst_memcg->kmemcg_id;
531 struct list_lru_one *src, *dst;
535 * Since list_lru_{add,del} may be called under an IRQ-safe lock,
536 * we have to use IRQ-safe primitives here to avoid deadlock.
538 spin_lock_irq(&nlru->lock);
540 src = list_lru_from_memcg_idx(nlru, src_idx);
541 dst = list_lru_from_memcg_idx(nlru, dst_idx);
543 list_splice_init(&src->list, &dst->list);
544 set = (!dst->nr_items && src->nr_items);
545 dst->nr_items += src->nr_items;
547 memcg_set_shrinker_bit(dst_memcg, nid, lru_shrinker_id(lru));
550 spin_unlock_irq(&nlru->lock);
553 static void memcg_drain_list_lru(struct list_lru *lru,
554 int src_idx, struct mem_cgroup *dst_memcg)
558 if (!list_lru_memcg_aware(lru))
562 memcg_drain_list_lru_node(lru, i, src_idx, dst_memcg);
565 void memcg_drain_all_list_lrus(int src_idx, struct mem_cgroup *dst_memcg)
567 struct list_lru *lru;
569 mutex_lock(&list_lrus_mutex);
570 list_for_each_entry(lru, &list_lrus, list)
571 memcg_drain_list_lru(lru, src_idx, dst_memcg);
572 mutex_unlock(&list_lrus_mutex);
575 static int memcg_init_list_lru(struct list_lru *lru, bool memcg_aware)
580 static void memcg_destroy_list_lru(struct list_lru *lru)
583 #endif /* CONFIG_MEMCG_KMEM */
585 int __list_lru_init(struct list_lru *lru, bool memcg_aware,
586 struct lock_class_key *key, struct shrinker *shrinker)
589 size_t size = sizeof(*lru->node) * nr_node_ids;
592 #ifdef CONFIG_MEMCG_KMEM
594 lru->shrinker_id = shrinker->id;
596 lru->shrinker_id = -1;
598 memcg_get_cache_ids();
600 lru->node = kzalloc(size, GFP_KERNEL);
605 spin_lock_init(&lru->node[i].lock);
607 lockdep_set_class(&lru->node[i].lock, key);
608 init_one_lru(&lru->node[i].lru);
611 err = memcg_init_list_lru(lru, memcg_aware);
614 /* Do this so a list_lru_destroy() doesn't crash: */
619 list_lru_register(lru);
621 memcg_put_cache_ids();
624 EXPORT_SYMBOL_GPL(__list_lru_init);
626 void list_lru_destroy(struct list_lru *lru)
628 /* Already destroyed or not yet initialized? */
632 memcg_get_cache_ids();
634 list_lru_unregister(lru);
636 memcg_destroy_list_lru(lru);
640 #ifdef CONFIG_MEMCG_KMEM
641 lru->shrinker_id = -1;
643 memcg_put_cache_ids();
645 EXPORT_SYMBOL_GPL(list_lru_destroy);