]> asedeno.scripts.mit.edu Git - linux.git/blob - mm/list_lru.c
mm/list_lru.c: add memcg argument to list_lru_from_kmem()
[linux.git] / mm / list_lru.c
1 /*
2  * Copyright (c) 2013 Red Hat, Inc. and Parallels Inc. All rights reserved.
3  * Authors: David Chinner and Glauber Costa
4  *
5  * Generic LRU infrastructure
6  */
7 #include <linux/kernel.h>
8 #include <linux/module.h>
9 #include <linux/mm.h>
10 #include <linux/list_lru.h>
11 #include <linux/slab.h>
12 #include <linux/mutex.h>
13 #include <linux/memcontrol.h>
14
15 #ifdef CONFIG_MEMCG_KMEM
16 static LIST_HEAD(list_lrus);
17 static DEFINE_MUTEX(list_lrus_mutex);
18
19 static void list_lru_register(struct list_lru *lru)
20 {
21         mutex_lock(&list_lrus_mutex);
22         list_add(&lru->list, &list_lrus);
23         mutex_unlock(&list_lrus_mutex);
24 }
25
26 static void list_lru_unregister(struct list_lru *lru)
27 {
28         mutex_lock(&list_lrus_mutex);
29         list_del(&lru->list);
30         mutex_unlock(&list_lrus_mutex);
31 }
32
33 static inline bool list_lru_memcg_aware(struct list_lru *lru)
34 {
35         /*
36          * This needs node 0 to be always present, even
37          * in the systems supporting sparse numa ids.
38          */
39         return !!lru->node[0].memcg_lrus;
40 }
41
42 static inline struct list_lru_one *
43 list_lru_from_memcg_idx(struct list_lru_node *nlru, int idx)
44 {
45         struct list_lru_memcg *memcg_lrus;
46         /*
47          * Either lock or RCU protects the array of per cgroup lists
48          * from relocation (see memcg_update_list_lru_node).
49          */
50         memcg_lrus = rcu_dereference_check(nlru->memcg_lrus,
51                                            lockdep_is_held(&nlru->lock));
52         if (memcg_lrus && idx >= 0)
53                 return memcg_lrus->lru[idx];
54         return &nlru->lru;
55 }
56
57 static __always_inline struct mem_cgroup *mem_cgroup_from_kmem(void *ptr)
58 {
59         struct page *page;
60
61         if (!memcg_kmem_enabled())
62                 return NULL;
63         page = virt_to_head_page(ptr);
64         return page->mem_cgroup;
65 }
66
67 static inline struct list_lru_one *
68 list_lru_from_kmem(struct list_lru_node *nlru, void *ptr,
69                    struct mem_cgroup **memcg_ptr)
70 {
71         struct list_lru_one *l = &nlru->lru;
72         struct mem_cgroup *memcg = NULL;
73
74         if (!nlru->memcg_lrus)
75                 goto out;
76
77         memcg = mem_cgroup_from_kmem(ptr);
78         if (!memcg)
79                 goto out;
80
81         l = list_lru_from_memcg_idx(nlru, memcg_cache_id(memcg));
82 out:
83         if (memcg_ptr)
84                 *memcg_ptr = memcg;
85         return l;
86 }
87 #else
88 static void list_lru_register(struct list_lru *lru)
89 {
90 }
91
92 static void list_lru_unregister(struct list_lru *lru)
93 {
94 }
95
96 static inline bool list_lru_memcg_aware(struct list_lru *lru)
97 {
98         return false;
99 }
100
101 static inline struct list_lru_one *
102 list_lru_from_memcg_idx(struct list_lru_node *nlru, int idx)
103 {
104         return &nlru->lru;
105 }
106
107 static inline struct list_lru_one *
108 list_lru_from_kmem(struct list_lru_node *nlru, void *ptr,
109                    struct mem_cgroup **memcg_ptr)
110 {
111         if (memcg_ptr)
112                 *memcg_ptr = NULL;
113         return &nlru->lru;
114 }
115 #endif /* CONFIG_MEMCG_KMEM */
116
117 bool list_lru_add(struct list_lru *lru, struct list_head *item)
118 {
119         int nid = page_to_nid(virt_to_page(item));
120         struct list_lru_node *nlru = &lru->node[nid];
121         struct list_lru_one *l;
122
123         spin_lock(&nlru->lock);
124         if (list_empty(item)) {
125                 l = list_lru_from_kmem(nlru, item, NULL);
126                 list_add_tail(item, &l->list);
127                 l->nr_items++;
128                 nlru->nr_items++;
129                 spin_unlock(&nlru->lock);
130                 return true;
131         }
132         spin_unlock(&nlru->lock);
133         return false;
134 }
135 EXPORT_SYMBOL_GPL(list_lru_add);
136
137 bool list_lru_del(struct list_lru *lru, struct list_head *item)
138 {
139         int nid = page_to_nid(virt_to_page(item));
140         struct list_lru_node *nlru = &lru->node[nid];
141         struct list_lru_one *l;
142
143         spin_lock(&nlru->lock);
144         if (!list_empty(item)) {
145                 l = list_lru_from_kmem(nlru, item, NULL);
146                 list_del_init(item);
147                 l->nr_items--;
148                 nlru->nr_items--;
149                 spin_unlock(&nlru->lock);
150                 return true;
151         }
152         spin_unlock(&nlru->lock);
153         return false;
154 }
155 EXPORT_SYMBOL_GPL(list_lru_del);
156
157 void list_lru_isolate(struct list_lru_one *list, struct list_head *item)
158 {
159         list_del_init(item);
160         list->nr_items--;
161 }
162 EXPORT_SYMBOL_GPL(list_lru_isolate);
163
164 void list_lru_isolate_move(struct list_lru_one *list, struct list_head *item,
165                            struct list_head *head)
166 {
167         list_move(item, head);
168         list->nr_items--;
169 }
170 EXPORT_SYMBOL_GPL(list_lru_isolate_move);
171
172 unsigned long list_lru_count_one(struct list_lru *lru,
173                                  int nid, struct mem_cgroup *memcg)
174 {
175         struct list_lru_node *nlru = &lru->node[nid];
176         struct list_lru_one *l;
177         unsigned long count;
178
179         rcu_read_lock();
180         l = list_lru_from_memcg_idx(nlru, memcg_cache_id(memcg));
181         count = l->nr_items;
182         rcu_read_unlock();
183
184         return count;
185 }
186 EXPORT_SYMBOL_GPL(list_lru_count_one);
187
188 unsigned long list_lru_count_node(struct list_lru *lru, int nid)
189 {
190         struct list_lru_node *nlru;
191
192         nlru = &lru->node[nid];
193         return nlru->nr_items;
194 }
195 EXPORT_SYMBOL_GPL(list_lru_count_node);
196
197 static unsigned long
198 __list_lru_walk_one(struct list_lru *lru, int nid, int memcg_idx,
199                     list_lru_walk_cb isolate, void *cb_arg,
200                     unsigned long *nr_to_walk)
201 {
202
203         struct list_lru_node *nlru = &lru->node[nid];
204         struct list_lru_one *l;
205         struct list_head *item, *n;
206         unsigned long isolated = 0;
207
208         spin_lock(&nlru->lock);
209         l = list_lru_from_memcg_idx(nlru, memcg_idx);
210 restart:
211         list_for_each_safe(item, n, &l->list) {
212                 enum lru_status ret;
213
214                 /*
215                  * decrement nr_to_walk first so that we don't livelock if we
216                  * get stuck on large numbesr of LRU_RETRY items
217                  */
218                 if (!*nr_to_walk)
219                         break;
220                 --*nr_to_walk;
221
222                 ret = isolate(item, l, &nlru->lock, cb_arg);
223                 switch (ret) {
224                 case LRU_REMOVED_RETRY:
225                         assert_spin_locked(&nlru->lock);
226                         /* fall through */
227                 case LRU_REMOVED:
228                         isolated++;
229                         nlru->nr_items--;
230                         /*
231                          * If the lru lock has been dropped, our list
232                          * traversal is now invalid and so we have to
233                          * restart from scratch.
234                          */
235                         if (ret == LRU_REMOVED_RETRY)
236                                 goto restart;
237                         break;
238                 case LRU_ROTATE:
239                         list_move_tail(item, &l->list);
240                         break;
241                 case LRU_SKIP:
242                         break;
243                 case LRU_RETRY:
244                         /*
245                          * The lru lock has been dropped, our list traversal is
246                          * now invalid and so we have to restart from scratch.
247                          */
248                         assert_spin_locked(&nlru->lock);
249                         goto restart;
250                 default:
251                         BUG();
252                 }
253         }
254
255         spin_unlock(&nlru->lock);
256         return isolated;
257 }
258
259 unsigned long
260 list_lru_walk_one(struct list_lru *lru, int nid, struct mem_cgroup *memcg,
261                   list_lru_walk_cb isolate, void *cb_arg,
262                   unsigned long *nr_to_walk)
263 {
264         return __list_lru_walk_one(lru, nid, memcg_cache_id(memcg),
265                                    isolate, cb_arg, nr_to_walk);
266 }
267 EXPORT_SYMBOL_GPL(list_lru_walk_one);
268
269 unsigned long list_lru_walk_node(struct list_lru *lru, int nid,
270                                  list_lru_walk_cb isolate, void *cb_arg,
271                                  unsigned long *nr_to_walk)
272 {
273         long isolated = 0;
274         int memcg_idx;
275
276         isolated += __list_lru_walk_one(lru, nid, -1, isolate, cb_arg,
277                                         nr_to_walk);
278         if (*nr_to_walk > 0 && list_lru_memcg_aware(lru)) {
279                 for_each_memcg_cache_index(memcg_idx) {
280                         isolated += __list_lru_walk_one(lru, nid, memcg_idx,
281                                                 isolate, cb_arg, nr_to_walk);
282                         if (*nr_to_walk <= 0)
283                                 break;
284                 }
285         }
286         return isolated;
287 }
288 EXPORT_SYMBOL_GPL(list_lru_walk_node);
289
290 static void init_one_lru(struct list_lru_one *l)
291 {
292         INIT_LIST_HEAD(&l->list);
293         l->nr_items = 0;
294 }
295
296 #ifdef CONFIG_MEMCG_KMEM
297 static void __memcg_destroy_list_lru_node(struct list_lru_memcg *memcg_lrus,
298                                           int begin, int end)
299 {
300         int i;
301
302         for (i = begin; i < end; i++)
303                 kfree(memcg_lrus->lru[i]);
304 }
305
306 static int __memcg_init_list_lru_node(struct list_lru_memcg *memcg_lrus,
307                                       int begin, int end)
308 {
309         int i;
310
311         for (i = begin; i < end; i++) {
312                 struct list_lru_one *l;
313
314                 l = kmalloc(sizeof(struct list_lru_one), GFP_KERNEL);
315                 if (!l)
316                         goto fail;
317
318                 init_one_lru(l);
319                 memcg_lrus->lru[i] = l;
320         }
321         return 0;
322 fail:
323         __memcg_destroy_list_lru_node(memcg_lrus, begin, i - 1);
324         return -ENOMEM;
325 }
326
327 static int memcg_init_list_lru_node(struct list_lru_node *nlru)
328 {
329         struct list_lru_memcg *memcg_lrus;
330         int size = memcg_nr_cache_ids;
331
332         memcg_lrus = kvmalloc(sizeof(*memcg_lrus) +
333                               size * sizeof(void *), GFP_KERNEL);
334         if (!memcg_lrus)
335                 return -ENOMEM;
336
337         if (__memcg_init_list_lru_node(memcg_lrus, 0, size)) {
338                 kvfree(memcg_lrus);
339                 return -ENOMEM;
340         }
341         RCU_INIT_POINTER(nlru->memcg_lrus, memcg_lrus);
342
343         return 0;
344 }
345
346 static void memcg_destroy_list_lru_node(struct list_lru_node *nlru)
347 {
348         struct list_lru_memcg *memcg_lrus;
349         /*
350          * This is called when shrinker has already been unregistered,
351          * and nobody can use it. So, there is no need to use kvfree_rcu().
352          */
353         memcg_lrus = rcu_dereference_protected(nlru->memcg_lrus, true);
354         __memcg_destroy_list_lru_node(memcg_lrus, 0, memcg_nr_cache_ids);
355         kvfree(memcg_lrus);
356 }
357
358 static void kvfree_rcu(struct rcu_head *head)
359 {
360         struct list_lru_memcg *mlru;
361
362         mlru = container_of(head, struct list_lru_memcg, rcu);
363         kvfree(mlru);
364 }
365
366 static int memcg_update_list_lru_node(struct list_lru_node *nlru,
367                                       int old_size, int new_size)
368 {
369         struct list_lru_memcg *old, *new;
370
371         BUG_ON(old_size > new_size);
372
373         old = rcu_dereference_protected(nlru->memcg_lrus,
374                                         lockdep_is_held(&list_lrus_mutex));
375         new = kvmalloc(sizeof(*new) + new_size * sizeof(void *), GFP_KERNEL);
376         if (!new)
377                 return -ENOMEM;
378
379         if (__memcg_init_list_lru_node(new, old_size, new_size)) {
380                 kvfree(new);
381                 return -ENOMEM;
382         }
383
384         memcpy(&new->lru, &old->lru, old_size * sizeof(void *));
385
386         /*
387          * The locking below allows readers that hold nlru->lock avoid taking
388          * rcu_read_lock (see list_lru_from_memcg_idx).
389          *
390          * Since list_lru_{add,del} may be called under an IRQ-safe lock,
391          * we have to use IRQ-safe primitives here to avoid deadlock.
392          */
393         spin_lock_irq(&nlru->lock);
394         rcu_assign_pointer(nlru->memcg_lrus, new);
395         spin_unlock_irq(&nlru->lock);
396
397         call_rcu(&old->rcu, kvfree_rcu);
398         return 0;
399 }
400
401 static void memcg_cancel_update_list_lru_node(struct list_lru_node *nlru,
402                                               int old_size, int new_size)
403 {
404         struct list_lru_memcg *memcg_lrus;
405
406         memcg_lrus = rcu_dereference_protected(nlru->memcg_lrus,
407                                                lockdep_is_held(&list_lrus_mutex));
408         /* do not bother shrinking the array back to the old size, because we
409          * cannot handle allocation failures here */
410         __memcg_destroy_list_lru_node(memcg_lrus, old_size, new_size);
411 }
412
413 static int memcg_init_list_lru(struct list_lru *lru, bool memcg_aware)
414 {
415         int i;
416
417         if (!memcg_aware)
418                 return 0;
419
420         for_each_node(i) {
421                 if (memcg_init_list_lru_node(&lru->node[i]))
422                         goto fail;
423         }
424         return 0;
425 fail:
426         for (i = i - 1; i >= 0; i--) {
427                 if (!lru->node[i].memcg_lrus)
428                         continue;
429                 memcg_destroy_list_lru_node(&lru->node[i]);
430         }
431         return -ENOMEM;
432 }
433
434 static void memcg_destroy_list_lru(struct list_lru *lru)
435 {
436         int i;
437
438         if (!list_lru_memcg_aware(lru))
439                 return;
440
441         for_each_node(i)
442                 memcg_destroy_list_lru_node(&lru->node[i]);
443 }
444
445 static int memcg_update_list_lru(struct list_lru *lru,
446                                  int old_size, int new_size)
447 {
448         int i;
449
450         if (!list_lru_memcg_aware(lru))
451                 return 0;
452
453         for_each_node(i) {
454                 if (memcg_update_list_lru_node(&lru->node[i],
455                                                old_size, new_size))
456                         goto fail;
457         }
458         return 0;
459 fail:
460         for (i = i - 1; i >= 0; i--) {
461                 if (!lru->node[i].memcg_lrus)
462                         continue;
463
464                 memcg_cancel_update_list_lru_node(&lru->node[i],
465                                                   old_size, new_size);
466         }
467         return -ENOMEM;
468 }
469
470 static void memcg_cancel_update_list_lru(struct list_lru *lru,
471                                          int old_size, int new_size)
472 {
473         int i;
474
475         if (!list_lru_memcg_aware(lru))
476                 return;
477
478         for_each_node(i)
479                 memcg_cancel_update_list_lru_node(&lru->node[i],
480                                                   old_size, new_size);
481 }
482
483 int memcg_update_all_list_lrus(int new_size)
484 {
485         int ret = 0;
486         struct list_lru *lru;
487         int old_size = memcg_nr_cache_ids;
488
489         mutex_lock(&list_lrus_mutex);
490         list_for_each_entry(lru, &list_lrus, list) {
491                 ret = memcg_update_list_lru(lru, old_size, new_size);
492                 if (ret)
493                         goto fail;
494         }
495 out:
496         mutex_unlock(&list_lrus_mutex);
497         return ret;
498 fail:
499         list_for_each_entry_continue_reverse(lru, &list_lrus, list)
500                 memcg_cancel_update_list_lru(lru, old_size, new_size);
501         goto out;
502 }
503
504 static void memcg_drain_list_lru_node(struct list_lru_node *nlru,
505                                       int src_idx, int dst_idx)
506 {
507         struct list_lru_one *src, *dst;
508
509         /*
510          * Since list_lru_{add,del} may be called under an IRQ-safe lock,
511          * we have to use IRQ-safe primitives here to avoid deadlock.
512          */
513         spin_lock_irq(&nlru->lock);
514
515         src = list_lru_from_memcg_idx(nlru, src_idx);
516         dst = list_lru_from_memcg_idx(nlru, dst_idx);
517
518         list_splice_init(&src->list, &dst->list);
519         dst->nr_items += src->nr_items;
520         src->nr_items = 0;
521
522         spin_unlock_irq(&nlru->lock);
523 }
524
525 static void memcg_drain_list_lru(struct list_lru *lru,
526                                  int src_idx, int dst_idx)
527 {
528         int i;
529
530         if (!list_lru_memcg_aware(lru))
531                 return;
532
533         for_each_node(i)
534                 memcg_drain_list_lru_node(&lru->node[i], src_idx, dst_idx);
535 }
536
537 void memcg_drain_all_list_lrus(int src_idx, int dst_idx)
538 {
539         struct list_lru *lru;
540
541         mutex_lock(&list_lrus_mutex);
542         list_for_each_entry(lru, &list_lrus, list)
543                 memcg_drain_list_lru(lru, src_idx, dst_idx);
544         mutex_unlock(&list_lrus_mutex);
545 }
546 #else
547 static int memcg_init_list_lru(struct list_lru *lru, bool memcg_aware)
548 {
549         return 0;
550 }
551
552 static void memcg_destroy_list_lru(struct list_lru *lru)
553 {
554 }
555 #endif /* CONFIG_MEMCG_KMEM */
556
557 int __list_lru_init(struct list_lru *lru, bool memcg_aware,
558                     struct lock_class_key *key, struct shrinker *shrinker)
559 {
560         int i;
561         size_t size = sizeof(*lru->node) * nr_node_ids;
562         int err = -ENOMEM;
563
564 #ifdef CONFIG_MEMCG_KMEM
565         if (shrinker)
566                 lru->shrinker_id = shrinker->id;
567         else
568                 lru->shrinker_id = -1;
569 #endif
570         memcg_get_cache_ids();
571
572         lru->node = kzalloc(size, GFP_KERNEL);
573         if (!lru->node)
574                 goto out;
575
576         for_each_node(i) {
577                 spin_lock_init(&lru->node[i].lock);
578                 if (key)
579                         lockdep_set_class(&lru->node[i].lock, key);
580                 init_one_lru(&lru->node[i].lru);
581         }
582
583         err = memcg_init_list_lru(lru, memcg_aware);
584         if (err) {
585                 kfree(lru->node);
586                 /* Do this so a list_lru_destroy() doesn't crash: */
587                 lru->node = NULL;
588                 goto out;
589         }
590
591         list_lru_register(lru);
592 out:
593         memcg_put_cache_ids();
594         return err;
595 }
596 EXPORT_SYMBOL_GPL(__list_lru_init);
597
598 void list_lru_destroy(struct list_lru *lru)
599 {
600         /* Already destroyed or not yet initialized? */
601         if (!lru->node)
602                 return;
603
604         memcg_get_cache_ids();
605
606         list_lru_unregister(lru);
607
608         memcg_destroy_list_lru(lru);
609         kfree(lru->node);
610         lru->node = NULL;
611
612 #ifdef CONFIG_MEMCG_KMEM
613         lru->shrinker_id = -1;
614 #endif
615         memcg_put_cache_ids();
616 }
617 EXPORT_SYMBOL_GPL(list_lru_destroy);