]> asedeno.scripts.mit.edu Git - linux.git/blob - lib/debugobjects.c
debugobjects: Scale thresholds with # of CPUs
[linux.git] / lib / debugobjects.c
1 /*
2  * Generic infrastructure for lifetime debugging of objects.
3  *
4  * Started by Thomas Gleixner
5  *
6  * Copyright (C) 2008, Thomas Gleixner <tglx@linutronix.de>
7  *
8  * For licencing details see kernel-base/COPYING
9  */
10
11 #define pr_fmt(fmt) "ODEBUG: " fmt
12
13 #include <linux/debugobjects.h>
14 #include <linux/interrupt.h>
15 #include <linux/sched.h>
16 #include <linux/seq_file.h>
17 #include <linux/debugfs.h>
18 #include <linux/slab.h>
19 #include <linux/hash.h>
20
21 #define ODEBUG_HASH_BITS        14
22 #define ODEBUG_HASH_SIZE        (1 << ODEBUG_HASH_BITS)
23
24 #define ODEBUG_POOL_SIZE        1024
25 #define ODEBUG_POOL_MIN_LEVEL   256
26
27 #define ODEBUG_CHUNK_SHIFT      PAGE_SHIFT
28 #define ODEBUG_CHUNK_SIZE       (1 << ODEBUG_CHUNK_SHIFT)
29 #define ODEBUG_CHUNK_MASK       (~(ODEBUG_CHUNK_SIZE - 1))
30
31 struct debug_bucket {
32         struct hlist_head       list;
33         raw_spinlock_t          lock;
34 };
35
36 static struct debug_bucket      obj_hash[ODEBUG_HASH_SIZE];
37
38 static struct debug_obj         obj_static_pool[ODEBUG_POOL_SIZE] __initdata;
39
40 static DEFINE_RAW_SPINLOCK(pool_lock);
41
42 static HLIST_HEAD(obj_pool);
43
44 static int                      obj_pool_min_free = ODEBUG_POOL_SIZE;
45 static int                      obj_pool_free = ODEBUG_POOL_SIZE;
46 static int                      obj_pool_used;
47 static int                      obj_pool_max_used;
48 static struct kmem_cache        *obj_cache;
49
50 static int                      debug_objects_maxchain __read_mostly;
51 static int                      debug_objects_fixups __read_mostly;
52 static int                      debug_objects_warnings __read_mostly;
53 static int                      debug_objects_enabled __read_mostly
54                                 = CONFIG_DEBUG_OBJECTS_ENABLE_DEFAULT;
55 static int                      debug_objects_pool_size __read_mostly
56                                 = ODEBUG_POOL_SIZE;
57 static int                      debug_objects_pool_min_level __read_mostly
58                                 = ODEBUG_POOL_MIN_LEVEL;
59 static struct debug_obj_descr   *descr_test  __read_mostly;
60
61 /*
62  * Track numbers of kmem_cache_alloc and kmem_cache_free done.
63  */
64 static int                      debug_objects_alloc;
65 static int                      debug_objects_freed;
66
67 static void free_obj_work(struct work_struct *work);
68 static DECLARE_WORK(debug_obj_work, free_obj_work);
69
70 static int __init enable_object_debug(char *str)
71 {
72         debug_objects_enabled = 1;
73         return 0;
74 }
75
76 static int __init disable_object_debug(char *str)
77 {
78         debug_objects_enabled = 0;
79         return 0;
80 }
81
82 early_param("debug_objects", enable_object_debug);
83 early_param("no_debug_objects", disable_object_debug);
84
85 static const char *obj_states[ODEBUG_STATE_MAX] = {
86         [ODEBUG_STATE_NONE]             = "none",
87         [ODEBUG_STATE_INIT]             = "initialized",
88         [ODEBUG_STATE_INACTIVE]         = "inactive",
89         [ODEBUG_STATE_ACTIVE]           = "active",
90         [ODEBUG_STATE_DESTROYED]        = "destroyed",
91         [ODEBUG_STATE_NOTAVAILABLE]     = "not available",
92 };
93
94 static void fill_pool(void)
95 {
96         gfp_t gfp = GFP_ATOMIC | __GFP_NORETRY | __GFP_NOWARN;
97         struct debug_obj *new;
98         unsigned long flags;
99
100         if (likely(obj_pool_free >= debug_objects_pool_min_level))
101                 return;
102
103         if (unlikely(!obj_cache))
104                 return;
105
106         while (obj_pool_free < debug_objects_pool_min_level) {
107
108                 new = kmem_cache_zalloc(obj_cache, gfp);
109                 if (!new)
110                         return;
111
112                 raw_spin_lock_irqsave(&pool_lock, flags);
113                 hlist_add_head(&new->node, &obj_pool);
114                 debug_objects_alloc++;
115                 obj_pool_free++;
116                 raw_spin_unlock_irqrestore(&pool_lock, flags);
117         }
118 }
119
120 /*
121  * Lookup an object in the hash bucket.
122  */
123 static struct debug_obj *lookup_object(void *addr, struct debug_bucket *b)
124 {
125         struct debug_obj *obj;
126         int cnt = 0;
127
128         hlist_for_each_entry(obj, &b->list, node) {
129                 cnt++;
130                 if (obj->object == addr)
131                         return obj;
132         }
133         if (cnt > debug_objects_maxchain)
134                 debug_objects_maxchain = cnt;
135
136         return NULL;
137 }
138
139 /*
140  * Allocate a new object. If the pool is empty, switch off the debugger.
141  * Must be called with interrupts disabled.
142  */
143 static struct debug_obj *
144 alloc_object(void *addr, struct debug_bucket *b, struct debug_obj_descr *descr)
145 {
146         struct debug_obj *obj = NULL;
147
148         raw_spin_lock(&pool_lock);
149         if (obj_pool.first) {
150                 obj         = hlist_entry(obj_pool.first, typeof(*obj), node);
151
152                 obj->object = addr;
153                 obj->descr  = descr;
154                 obj->state  = ODEBUG_STATE_NONE;
155                 obj->astate = 0;
156                 hlist_del(&obj->node);
157
158                 hlist_add_head(&obj->node, &b->list);
159
160                 obj_pool_used++;
161                 if (obj_pool_used > obj_pool_max_used)
162                         obj_pool_max_used = obj_pool_used;
163
164                 obj_pool_free--;
165                 if (obj_pool_free < obj_pool_min_free)
166                         obj_pool_min_free = obj_pool_free;
167         }
168         raw_spin_unlock(&pool_lock);
169
170         return obj;
171 }
172
173 /*
174  * workqueue function to free objects.
175  */
176 static void free_obj_work(struct work_struct *work)
177 {
178         struct debug_obj *obj;
179         unsigned long flags;
180
181         raw_spin_lock_irqsave(&pool_lock, flags);
182         while (obj_pool_free > debug_objects_pool_size) {
183                 obj = hlist_entry(obj_pool.first, typeof(*obj), node);
184                 hlist_del(&obj->node);
185                 obj_pool_free--;
186                 debug_objects_freed++;
187                 /*
188                  * We release pool_lock across kmem_cache_free() to
189                  * avoid contention on pool_lock.
190                  */
191                 raw_spin_unlock_irqrestore(&pool_lock, flags);
192                 kmem_cache_free(obj_cache, obj);
193                 raw_spin_lock_irqsave(&pool_lock, flags);
194         }
195         raw_spin_unlock_irqrestore(&pool_lock, flags);
196 }
197
198 /*
199  * Put the object back into the pool and schedule work to free objects
200  * if necessary.
201  */
202 static void free_object(struct debug_obj *obj)
203 {
204         unsigned long flags;
205         int sched = 0;
206
207         raw_spin_lock_irqsave(&pool_lock, flags);
208         /*
209          * schedule work when the pool is filled and the cache is
210          * initialized:
211          */
212         if (obj_pool_free > debug_objects_pool_size && obj_cache)
213                 sched = 1;
214         hlist_add_head(&obj->node, &obj_pool);
215         obj_pool_free++;
216         obj_pool_used--;
217         raw_spin_unlock_irqrestore(&pool_lock, flags);
218         if (sched)
219                 schedule_work(&debug_obj_work);
220 }
221
222 /*
223  * We run out of memory. That means we probably have tons of objects
224  * allocated.
225  */
226 static void debug_objects_oom(void)
227 {
228         struct debug_bucket *db = obj_hash;
229         struct hlist_node *tmp;
230         HLIST_HEAD(freelist);
231         struct debug_obj *obj;
232         unsigned long flags;
233         int i;
234
235         pr_warn("Out of memory. ODEBUG disabled\n");
236
237         for (i = 0; i < ODEBUG_HASH_SIZE; i++, db++) {
238                 raw_spin_lock_irqsave(&db->lock, flags);
239                 hlist_move_list(&db->list, &freelist);
240                 raw_spin_unlock_irqrestore(&db->lock, flags);
241
242                 /* Now free them */
243                 hlist_for_each_entry_safe(obj, tmp, &freelist, node) {
244                         hlist_del(&obj->node);
245                         free_object(obj);
246                 }
247         }
248 }
249
250 /*
251  * We use the pfn of the address for the hash. That way we can check
252  * for freed objects simply by checking the affected bucket.
253  */
254 static struct debug_bucket *get_bucket(unsigned long addr)
255 {
256         unsigned long hash;
257
258         hash = hash_long((addr >> ODEBUG_CHUNK_SHIFT), ODEBUG_HASH_BITS);
259         return &obj_hash[hash];
260 }
261
262 static void debug_print_object(struct debug_obj *obj, char *msg)
263 {
264         struct debug_obj_descr *descr = obj->descr;
265         static int limit;
266
267         if (limit < 5 && descr != descr_test) {
268                 void *hint = descr->debug_hint ?
269                         descr->debug_hint(obj->object) : NULL;
270                 limit++;
271                 WARN(1, KERN_ERR "ODEBUG: %s %s (active state %u) "
272                                  "object type: %s hint: %pS\n",
273                         msg, obj_states[obj->state], obj->astate,
274                         descr->name, hint);
275         }
276         debug_objects_warnings++;
277 }
278
279 /*
280  * Try to repair the damage, so we have a better chance to get useful
281  * debug output.
282  */
283 static bool
284 debug_object_fixup(bool (*fixup)(void *addr, enum debug_obj_state state),
285                    void * addr, enum debug_obj_state state)
286 {
287         if (fixup && fixup(addr, state)) {
288                 debug_objects_fixups++;
289                 return true;
290         }
291         return false;
292 }
293
294 static void debug_object_is_on_stack(void *addr, int onstack)
295 {
296         int is_on_stack;
297         static int limit;
298
299         if (limit > 4)
300                 return;
301
302         is_on_stack = object_is_on_stack(addr);
303         if (is_on_stack == onstack)
304                 return;
305
306         limit++;
307         if (is_on_stack)
308                 pr_warn("object is on stack, but not annotated\n");
309         else
310                 pr_warn("object is not on stack, but annotated\n");
311         WARN_ON(1);
312 }
313
314 static void
315 __debug_object_init(void *addr, struct debug_obj_descr *descr, int onstack)
316 {
317         enum debug_obj_state state;
318         struct debug_bucket *db;
319         struct debug_obj *obj;
320         unsigned long flags;
321
322         fill_pool();
323
324         db = get_bucket((unsigned long) addr);
325
326         raw_spin_lock_irqsave(&db->lock, flags);
327
328         obj = lookup_object(addr, db);
329         if (!obj) {
330                 obj = alloc_object(addr, db, descr);
331                 if (!obj) {
332                         debug_objects_enabled = 0;
333                         raw_spin_unlock_irqrestore(&db->lock, flags);
334                         debug_objects_oom();
335                         return;
336                 }
337                 debug_object_is_on_stack(addr, onstack);
338         }
339
340         switch (obj->state) {
341         case ODEBUG_STATE_NONE:
342         case ODEBUG_STATE_INIT:
343         case ODEBUG_STATE_INACTIVE:
344                 obj->state = ODEBUG_STATE_INIT;
345                 break;
346
347         case ODEBUG_STATE_ACTIVE:
348                 debug_print_object(obj, "init");
349                 state = obj->state;
350                 raw_spin_unlock_irqrestore(&db->lock, flags);
351                 debug_object_fixup(descr->fixup_init, addr, state);
352                 return;
353
354         case ODEBUG_STATE_DESTROYED:
355                 debug_print_object(obj, "init");
356                 break;
357         default:
358                 break;
359         }
360
361         raw_spin_unlock_irqrestore(&db->lock, flags);
362 }
363
364 /**
365  * debug_object_init - debug checks when an object is initialized
366  * @addr:       address of the object
367  * @descr:      pointer to an object specific debug description structure
368  */
369 void debug_object_init(void *addr, struct debug_obj_descr *descr)
370 {
371         if (!debug_objects_enabled)
372                 return;
373
374         __debug_object_init(addr, descr, 0);
375 }
376 EXPORT_SYMBOL_GPL(debug_object_init);
377
378 /**
379  * debug_object_init_on_stack - debug checks when an object on stack is
380  *                              initialized
381  * @addr:       address of the object
382  * @descr:      pointer to an object specific debug description structure
383  */
384 void debug_object_init_on_stack(void *addr, struct debug_obj_descr *descr)
385 {
386         if (!debug_objects_enabled)
387                 return;
388
389         __debug_object_init(addr, descr, 1);
390 }
391 EXPORT_SYMBOL_GPL(debug_object_init_on_stack);
392
393 /**
394  * debug_object_activate - debug checks when an object is activated
395  * @addr:       address of the object
396  * @descr:      pointer to an object specific debug description structure
397  * Returns 0 for success, -EINVAL for check failed.
398  */
399 int debug_object_activate(void *addr, struct debug_obj_descr *descr)
400 {
401         enum debug_obj_state state;
402         struct debug_bucket *db;
403         struct debug_obj *obj;
404         unsigned long flags;
405         int ret;
406         struct debug_obj o = { .object = addr,
407                                .state = ODEBUG_STATE_NOTAVAILABLE,
408                                .descr = descr };
409
410         if (!debug_objects_enabled)
411                 return 0;
412
413         db = get_bucket((unsigned long) addr);
414
415         raw_spin_lock_irqsave(&db->lock, flags);
416
417         obj = lookup_object(addr, db);
418         if (obj) {
419                 switch (obj->state) {
420                 case ODEBUG_STATE_INIT:
421                 case ODEBUG_STATE_INACTIVE:
422                         obj->state = ODEBUG_STATE_ACTIVE;
423                         ret = 0;
424                         break;
425
426                 case ODEBUG_STATE_ACTIVE:
427                         debug_print_object(obj, "activate");
428                         state = obj->state;
429                         raw_spin_unlock_irqrestore(&db->lock, flags);
430                         ret = debug_object_fixup(descr->fixup_activate, addr, state);
431                         return ret ? 0 : -EINVAL;
432
433                 case ODEBUG_STATE_DESTROYED:
434                         debug_print_object(obj, "activate");
435                         ret = -EINVAL;
436                         break;
437                 default:
438                         ret = 0;
439                         break;
440                 }
441                 raw_spin_unlock_irqrestore(&db->lock, flags);
442                 return ret;
443         }
444
445         raw_spin_unlock_irqrestore(&db->lock, flags);
446         /*
447          * We are here when a static object is activated. We
448          * let the type specific code confirm whether this is
449          * true or not. if true, we just make sure that the
450          * static object is tracked in the object tracker. If
451          * not, this must be a bug, so we try to fix it up.
452          */
453         if (descr->is_static_object && descr->is_static_object(addr)) {
454                 /* track this static object */
455                 debug_object_init(addr, descr);
456                 debug_object_activate(addr, descr);
457         } else {
458                 debug_print_object(&o, "activate");
459                 ret = debug_object_fixup(descr->fixup_activate, addr,
460                                         ODEBUG_STATE_NOTAVAILABLE);
461                 return ret ? 0 : -EINVAL;
462         }
463         return 0;
464 }
465 EXPORT_SYMBOL_GPL(debug_object_activate);
466
467 /**
468  * debug_object_deactivate - debug checks when an object is deactivated
469  * @addr:       address of the object
470  * @descr:      pointer to an object specific debug description structure
471  */
472 void debug_object_deactivate(void *addr, struct debug_obj_descr *descr)
473 {
474         struct debug_bucket *db;
475         struct debug_obj *obj;
476         unsigned long flags;
477
478         if (!debug_objects_enabled)
479                 return;
480
481         db = get_bucket((unsigned long) addr);
482
483         raw_spin_lock_irqsave(&db->lock, flags);
484
485         obj = lookup_object(addr, db);
486         if (obj) {
487                 switch (obj->state) {
488                 case ODEBUG_STATE_INIT:
489                 case ODEBUG_STATE_INACTIVE:
490                 case ODEBUG_STATE_ACTIVE:
491                         if (!obj->astate)
492                                 obj->state = ODEBUG_STATE_INACTIVE;
493                         else
494                                 debug_print_object(obj, "deactivate");
495                         break;
496
497                 case ODEBUG_STATE_DESTROYED:
498                         debug_print_object(obj, "deactivate");
499                         break;
500                 default:
501                         break;
502                 }
503         } else {
504                 struct debug_obj o = { .object = addr,
505                                        .state = ODEBUG_STATE_NOTAVAILABLE,
506                                        .descr = descr };
507
508                 debug_print_object(&o, "deactivate");
509         }
510
511         raw_spin_unlock_irqrestore(&db->lock, flags);
512 }
513 EXPORT_SYMBOL_GPL(debug_object_deactivate);
514
515 /**
516  * debug_object_destroy - debug checks when an object is destroyed
517  * @addr:       address of the object
518  * @descr:      pointer to an object specific debug description structure
519  */
520 void debug_object_destroy(void *addr, struct debug_obj_descr *descr)
521 {
522         enum debug_obj_state state;
523         struct debug_bucket *db;
524         struct debug_obj *obj;
525         unsigned long flags;
526
527         if (!debug_objects_enabled)
528                 return;
529
530         db = get_bucket((unsigned long) addr);
531
532         raw_spin_lock_irqsave(&db->lock, flags);
533
534         obj = lookup_object(addr, db);
535         if (!obj)
536                 goto out_unlock;
537
538         switch (obj->state) {
539         case ODEBUG_STATE_NONE:
540         case ODEBUG_STATE_INIT:
541         case ODEBUG_STATE_INACTIVE:
542                 obj->state = ODEBUG_STATE_DESTROYED;
543                 break;
544         case ODEBUG_STATE_ACTIVE:
545                 debug_print_object(obj, "destroy");
546                 state = obj->state;
547                 raw_spin_unlock_irqrestore(&db->lock, flags);
548                 debug_object_fixup(descr->fixup_destroy, addr, state);
549                 return;
550
551         case ODEBUG_STATE_DESTROYED:
552                 debug_print_object(obj, "destroy");
553                 break;
554         default:
555                 break;
556         }
557 out_unlock:
558         raw_spin_unlock_irqrestore(&db->lock, flags);
559 }
560 EXPORT_SYMBOL_GPL(debug_object_destroy);
561
562 /**
563  * debug_object_free - debug checks when an object is freed
564  * @addr:       address of the object
565  * @descr:      pointer to an object specific debug description structure
566  */
567 void debug_object_free(void *addr, struct debug_obj_descr *descr)
568 {
569         enum debug_obj_state state;
570         struct debug_bucket *db;
571         struct debug_obj *obj;
572         unsigned long flags;
573
574         if (!debug_objects_enabled)
575                 return;
576
577         db = get_bucket((unsigned long) addr);
578
579         raw_spin_lock_irqsave(&db->lock, flags);
580
581         obj = lookup_object(addr, db);
582         if (!obj)
583                 goto out_unlock;
584
585         switch (obj->state) {
586         case ODEBUG_STATE_ACTIVE:
587                 debug_print_object(obj, "free");
588                 state = obj->state;
589                 raw_spin_unlock_irqrestore(&db->lock, flags);
590                 debug_object_fixup(descr->fixup_free, addr, state);
591                 return;
592         default:
593                 hlist_del(&obj->node);
594                 raw_spin_unlock_irqrestore(&db->lock, flags);
595                 free_object(obj);
596                 return;
597         }
598 out_unlock:
599         raw_spin_unlock_irqrestore(&db->lock, flags);
600 }
601 EXPORT_SYMBOL_GPL(debug_object_free);
602
603 /**
604  * debug_object_assert_init - debug checks when object should be init-ed
605  * @addr:       address of the object
606  * @descr:      pointer to an object specific debug description structure
607  */
608 void debug_object_assert_init(void *addr, struct debug_obj_descr *descr)
609 {
610         struct debug_bucket *db;
611         struct debug_obj *obj;
612         unsigned long flags;
613
614         if (!debug_objects_enabled)
615                 return;
616
617         db = get_bucket((unsigned long) addr);
618
619         raw_spin_lock_irqsave(&db->lock, flags);
620
621         obj = lookup_object(addr, db);
622         if (!obj) {
623                 struct debug_obj o = { .object = addr,
624                                        .state = ODEBUG_STATE_NOTAVAILABLE,
625                                        .descr = descr };
626
627                 raw_spin_unlock_irqrestore(&db->lock, flags);
628                 /*
629                  * Maybe the object is static, and we let the type specific
630                  * code confirm. Track this static object if true, else invoke
631                  * fixup.
632                  */
633                 if (descr->is_static_object && descr->is_static_object(addr)) {
634                         /* Track this static object */
635                         debug_object_init(addr, descr);
636                 } else {
637                         debug_print_object(&o, "assert_init");
638                         debug_object_fixup(descr->fixup_assert_init, addr,
639                                            ODEBUG_STATE_NOTAVAILABLE);
640                 }
641                 return;
642         }
643
644         raw_spin_unlock_irqrestore(&db->lock, flags);
645 }
646 EXPORT_SYMBOL_GPL(debug_object_assert_init);
647
648 /**
649  * debug_object_active_state - debug checks object usage state machine
650  * @addr:       address of the object
651  * @descr:      pointer to an object specific debug description structure
652  * @expect:     expected state
653  * @next:       state to move to if expected state is found
654  */
655 void
656 debug_object_active_state(void *addr, struct debug_obj_descr *descr,
657                           unsigned int expect, unsigned int next)
658 {
659         struct debug_bucket *db;
660         struct debug_obj *obj;
661         unsigned long flags;
662
663         if (!debug_objects_enabled)
664                 return;
665
666         db = get_bucket((unsigned long) addr);
667
668         raw_spin_lock_irqsave(&db->lock, flags);
669
670         obj = lookup_object(addr, db);
671         if (obj) {
672                 switch (obj->state) {
673                 case ODEBUG_STATE_ACTIVE:
674                         if (obj->astate == expect)
675                                 obj->astate = next;
676                         else
677                                 debug_print_object(obj, "active_state");
678                         break;
679
680                 default:
681                         debug_print_object(obj, "active_state");
682                         break;
683                 }
684         } else {
685                 struct debug_obj o = { .object = addr,
686                                        .state = ODEBUG_STATE_NOTAVAILABLE,
687                                        .descr = descr };
688
689                 debug_print_object(&o, "active_state");
690         }
691
692         raw_spin_unlock_irqrestore(&db->lock, flags);
693 }
694 EXPORT_SYMBOL_GPL(debug_object_active_state);
695
696 #ifdef CONFIG_DEBUG_OBJECTS_FREE
697 static void __debug_check_no_obj_freed(const void *address, unsigned long size)
698 {
699         unsigned long flags, oaddr, saddr, eaddr, paddr, chunks;
700         struct hlist_node *tmp;
701         HLIST_HEAD(freelist);
702         struct debug_obj_descr *descr;
703         enum debug_obj_state state;
704         struct debug_bucket *db;
705         struct debug_obj *obj;
706         int cnt;
707
708         saddr = (unsigned long) address;
709         eaddr = saddr + size;
710         paddr = saddr & ODEBUG_CHUNK_MASK;
711         chunks = ((eaddr - paddr) + (ODEBUG_CHUNK_SIZE - 1));
712         chunks >>= ODEBUG_CHUNK_SHIFT;
713
714         for (;chunks > 0; chunks--, paddr += ODEBUG_CHUNK_SIZE) {
715                 db = get_bucket(paddr);
716
717 repeat:
718                 cnt = 0;
719                 raw_spin_lock_irqsave(&db->lock, flags);
720                 hlist_for_each_entry_safe(obj, tmp, &db->list, node) {
721                         cnt++;
722                         oaddr = (unsigned long) obj->object;
723                         if (oaddr < saddr || oaddr >= eaddr)
724                                 continue;
725
726                         switch (obj->state) {
727                         case ODEBUG_STATE_ACTIVE:
728                                 debug_print_object(obj, "free");
729                                 descr = obj->descr;
730                                 state = obj->state;
731                                 raw_spin_unlock_irqrestore(&db->lock, flags);
732                                 debug_object_fixup(descr->fixup_free,
733                                                    (void *) oaddr, state);
734                                 goto repeat;
735                         default:
736                                 hlist_del(&obj->node);
737                                 hlist_add_head(&obj->node, &freelist);
738                                 break;
739                         }
740                 }
741                 raw_spin_unlock_irqrestore(&db->lock, flags);
742
743                 /* Now free them */
744                 hlist_for_each_entry_safe(obj, tmp, &freelist, node) {
745                         hlist_del(&obj->node);
746                         free_object(obj);
747                 }
748
749                 if (cnt > debug_objects_maxchain)
750                         debug_objects_maxchain = cnt;
751         }
752 }
753
754 void debug_check_no_obj_freed(const void *address, unsigned long size)
755 {
756         if (debug_objects_enabled)
757                 __debug_check_no_obj_freed(address, size);
758 }
759 #endif
760
761 #ifdef CONFIG_DEBUG_FS
762
763 static int debug_stats_show(struct seq_file *m, void *v)
764 {
765         seq_printf(m, "max_chain     :%d\n", debug_objects_maxchain);
766         seq_printf(m, "warnings      :%d\n", debug_objects_warnings);
767         seq_printf(m, "fixups        :%d\n", debug_objects_fixups);
768         seq_printf(m, "pool_free     :%d\n", obj_pool_free);
769         seq_printf(m, "pool_min_free :%d\n", obj_pool_min_free);
770         seq_printf(m, "pool_used     :%d\n", obj_pool_used);
771         seq_printf(m, "pool_max_used :%d\n", obj_pool_max_used);
772         seq_printf(m, "objects_alloc :%d\n", debug_objects_alloc);
773         seq_printf(m, "objects_freed :%d\n", debug_objects_freed);
774         return 0;
775 }
776
777 static int debug_stats_open(struct inode *inode, struct file *filp)
778 {
779         return single_open(filp, debug_stats_show, NULL);
780 }
781
782 static const struct file_operations debug_stats_fops = {
783         .open           = debug_stats_open,
784         .read           = seq_read,
785         .llseek         = seq_lseek,
786         .release        = single_release,
787 };
788
789 static int __init debug_objects_init_debugfs(void)
790 {
791         struct dentry *dbgdir, *dbgstats;
792
793         if (!debug_objects_enabled)
794                 return 0;
795
796         dbgdir = debugfs_create_dir("debug_objects", NULL);
797         if (!dbgdir)
798                 return -ENOMEM;
799
800         dbgstats = debugfs_create_file("stats", 0444, dbgdir, NULL,
801                                        &debug_stats_fops);
802         if (!dbgstats)
803                 goto err;
804
805         return 0;
806
807 err:
808         debugfs_remove(dbgdir);
809
810         return -ENOMEM;
811 }
812 __initcall(debug_objects_init_debugfs);
813
814 #else
815 static inline void debug_objects_init_debugfs(void) { }
816 #endif
817
818 #ifdef CONFIG_DEBUG_OBJECTS_SELFTEST
819
820 /* Random data structure for the self test */
821 struct self_test {
822         unsigned long   dummy1[6];
823         int             static_init;
824         unsigned long   dummy2[3];
825 };
826
827 static __initdata struct debug_obj_descr descr_type_test;
828
829 static bool __init is_static_object(void *addr)
830 {
831         struct self_test *obj = addr;
832
833         return obj->static_init;
834 }
835
836 /*
837  * fixup_init is called when:
838  * - an active object is initialized
839  */
840 static bool __init fixup_init(void *addr, enum debug_obj_state state)
841 {
842         struct self_test *obj = addr;
843
844         switch (state) {
845         case ODEBUG_STATE_ACTIVE:
846                 debug_object_deactivate(obj, &descr_type_test);
847                 debug_object_init(obj, &descr_type_test);
848                 return true;
849         default:
850                 return false;
851         }
852 }
853
854 /*
855  * fixup_activate is called when:
856  * - an active object is activated
857  * - an unknown non-static object is activated
858  */
859 static bool __init fixup_activate(void *addr, enum debug_obj_state state)
860 {
861         struct self_test *obj = addr;
862
863         switch (state) {
864         case ODEBUG_STATE_NOTAVAILABLE:
865                 return true;
866         case ODEBUG_STATE_ACTIVE:
867                 debug_object_deactivate(obj, &descr_type_test);
868                 debug_object_activate(obj, &descr_type_test);
869                 return true;
870
871         default:
872                 return false;
873         }
874 }
875
876 /*
877  * fixup_destroy is called when:
878  * - an active object is destroyed
879  */
880 static bool __init fixup_destroy(void *addr, enum debug_obj_state state)
881 {
882         struct self_test *obj = addr;
883
884         switch (state) {
885         case ODEBUG_STATE_ACTIVE:
886                 debug_object_deactivate(obj, &descr_type_test);
887                 debug_object_destroy(obj, &descr_type_test);
888                 return true;
889         default:
890                 return false;
891         }
892 }
893
894 /*
895  * fixup_free is called when:
896  * - an active object is freed
897  */
898 static bool __init fixup_free(void *addr, enum debug_obj_state state)
899 {
900         struct self_test *obj = addr;
901
902         switch (state) {
903         case ODEBUG_STATE_ACTIVE:
904                 debug_object_deactivate(obj, &descr_type_test);
905                 debug_object_free(obj, &descr_type_test);
906                 return true;
907         default:
908                 return false;
909         }
910 }
911
912 static int __init
913 check_results(void *addr, enum debug_obj_state state, int fixups, int warnings)
914 {
915         struct debug_bucket *db;
916         struct debug_obj *obj;
917         unsigned long flags;
918         int res = -EINVAL;
919
920         db = get_bucket((unsigned long) addr);
921
922         raw_spin_lock_irqsave(&db->lock, flags);
923
924         obj = lookup_object(addr, db);
925         if (!obj && state != ODEBUG_STATE_NONE) {
926                 WARN(1, KERN_ERR "ODEBUG: selftest object not found\n");
927                 goto out;
928         }
929         if (obj && obj->state != state) {
930                 WARN(1, KERN_ERR "ODEBUG: selftest wrong state: %d != %d\n",
931                        obj->state, state);
932                 goto out;
933         }
934         if (fixups != debug_objects_fixups) {
935                 WARN(1, KERN_ERR "ODEBUG: selftest fixups failed %d != %d\n",
936                        fixups, debug_objects_fixups);
937                 goto out;
938         }
939         if (warnings != debug_objects_warnings) {
940                 WARN(1, KERN_ERR "ODEBUG: selftest warnings failed %d != %d\n",
941                        warnings, debug_objects_warnings);
942                 goto out;
943         }
944         res = 0;
945 out:
946         raw_spin_unlock_irqrestore(&db->lock, flags);
947         if (res)
948                 debug_objects_enabled = 0;
949         return res;
950 }
951
952 static __initdata struct debug_obj_descr descr_type_test = {
953         .name                   = "selftest",
954         .is_static_object       = is_static_object,
955         .fixup_init             = fixup_init,
956         .fixup_activate         = fixup_activate,
957         .fixup_destroy          = fixup_destroy,
958         .fixup_free             = fixup_free,
959 };
960
961 static __initdata struct self_test obj = { .static_init = 0 };
962
963 static void __init debug_objects_selftest(void)
964 {
965         int fixups, oldfixups, warnings, oldwarnings;
966         unsigned long flags;
967
968         local_irq_save(flags);
969
970         fixups = oldfixups = debug_objects_fixups;
971         warnings = oldwarnings = debug_objects_warnings;
972         descr_test = &descr_type_test;
973
974         debug_object_init(&obj, &descr_type_test);
975         if (check_results(&obj, ODEBUG_STATE_INIT, fixups, warnings))
976                 goto out;
977         debug_object_activate(&obj, &descr_type_test);
978         if (check_results(&obj, ODEBUG_STATE_ACTIVE, fixups, warnings))
979                 goto out;
980         debug_object_activate(&obj, &descr_type_test);
981         if (check_results(&obj, ODEBUG_STATE_ACTIVE, ++fixups, ++warnings))
982                 goto out;
983         debug_object_deactivate(&obj, &descr_type_test);
984         if (check_results(&obj, ODEBUG_STATE_INACTIVE, fixups, warnings))
985                 goto out;
986         debug_object_destroy(&obj, &descr_type_test);
987         if (check_results(&obj, ODEBUG_STATE_DESTROYED, fixups, warnings))
988                 goto out;
989         debug_object_init(&obj, &descr_type_test);
990         if (check_results(&obj, ODEBUG_STATE_DESTROYED, fixups, ++warnings))
991                 goto out;
992         debug_object_activate(&obj, &descr_type_test);
993         if (check_results(&obj, ODEBUG_STATE_DESTROYED, fixups, ++warnings))
994                 goto out;
995         debug_object_deactivate(&obj, &descr_type_test);
996         if (check_results(&obj, ODEBUG_STATE_DESTROYED, fixups, ++warnings))
997                 goto out;
998         debug_object_free(&obj, &descr_type_test);
999         if (check_results(&obj, ODEBUG_STATE_NONE, fixups, warnings))
1000                 goto out;
1001
1002         obj.static_init = 1;
1003         debug_object_activate(&obj, &descr_type_test);
1004         if (check_results(&obj, ODEBUG_STATE_ACTIVE, fixups, warnings))
1005                 goto out;
1006         debug_object_init(&obj, &descr_type_test);
1007         if (check_results(&obj, ODEBUG_STATE_INIT, ++fixups, ++warnings))
1008                 goto out;
1009         debug_object_free(&obj, &descr_type_test);
1010         if (check_results(&obj, ODEBUG_STATE_NONE, fixups, warnings))
1011                 goto out;
1012
1013 #ifdef CONFIG_DEBUG_OBJECTS_FREE
1014         debug_object_init(&obj, &descr_type_test);
1015         if (check_results(&obj, ODEBUG_STATE_INIT, fixups, warnings))
1016                 goto out;
1017         debug_object_activate(&obj, &descr_type_test);
1018         if (check_results(&obj, ODEBUG_STATE_ACTIVE, fixups, warnings))
1019                 goto out;
1020         __debug_check_no_obj_freed(&obj, sizeof(obj));
1021         if (check_results(&obj, ODEBUG_STATE_NONE, ++fixups, ++warnings))
1022                 goto out;
1023 #endif
1024         pr_info("selftest passed\n");
1025
1026 out:
1027         debug_objects_fixups = oldfixups;
1028         debug_objects_warnings = oldwarnings;
1029         descr_test = NULL;
1030
1031         local_irq_restore(flags);
1032 }
1033 #else
1034 static inline void debug_objects_selftest(void) { }
1035 #endif
1036
1037 /*
1038  * Called during early boot to initialize the hash buckets and link
1039  * the static object pool objects into the poll list. After this call
1040  * the object tracker is fully operational.
1041  */
1042 void __init debug_objects_early_init(void)
1043 {
1044         int i;
1045
1046         for (i = 0; i < ODEBUG_HASH_SIZE; i++)
1047                 raw_spin_lock_init(&obj_hash[i].lock);
1048
1049         for (i = 0; i < ODEBUG_POOL_SIZE; i++)
1050                 hlist_add_head(&obj_static_pool[i].node, &obj_pool);
1051 }
1052
1053 /*
1054  * Convert the statically allocated objects to dynamic ones:
1055  */
1056 static int __init debug_objects_replace_static_objects(void)
1057 {
1058         struct debug_bucket *db = obj_hash;
1059         struct hlist_node *tmp;
1060         struct debug_obj *obj, *new;
1061         HLIST_HEAD(objects);
1062         int i, cnt = 0;
1063
1064         for (i = 0; i < ODEBUG_POOL_SIZE; i++) {
1065                 obj = kmem_cache_zalloc(obj_cache, GFP_KERNEL);
1066                 if (!obj)
1067                         goto free;
1068                 hlist_add_head(&obj->node, &objects);
1069         }
1070
1071         /*
1072          * When debug_objects_mem_init() is called we know that only
1073          * one CPU is up, so disabling interrupts is enough
1074          * protection. This avoids the lockdep hell of lock ordering.
1075          */
1076         local_irq_disable();
1077
1078         /* Remove the statically allocated objects from the pool */
1079         hlist_for_each_entry_safe(obj, tmp, &obj_pool, node)
1080                 hlist_del(&obj->node);
1081         /* Move the allocated objects to the pool */
1082         hlist_move_list(&objects, &obj_pool);
1083
1084         /* Replace the active object references */
1085         for (i = 0; i < ODEBUG_HASH_SIZE; i++, db++) {
1086                 hlist_move_list(&db->list, &objects);
1087
1088                 hlist_for_each_entry(obj, &objects, node) {
1089                         new = hlist_entry(obj_pool.first, typeof(*obj), node);
1090                         hlist_del(&new->node);
1091                         /* copy object data */
1092                         *new = *obj;
1093                         hlist_add_head(&new->node, &db->list);
1094                         cnt++;
1095                 }
1096         }
1097         local_irq_enable();
1098
1099         pr_debug("%d of %d active objects replaced\n",
1100                  cnt, obj_pool_used);
1101         return 0;
1102 free:
1103         hlist_for_each_entry_safe(obj, tmp, &objects, node) {
1104                 hlist_del(&obj->node);
1105                 kmem_cache_free(obj_cache, obj);
1106         }
1107         return -ENOMEM;
1108 }
1109
1110 /*
1111  * Called after the kmem_caches are functional to setup a dedicated
1112  * cache pool, which has the SLAB_DEBUG_OBJECTS flag set. This flag
1113  * prevents that the debug code is called on kmem_cache_free() for the
1114  * debug tracker objects to avoid recursive calls.
1115  */
1116 void __init debug_objects_mem_init(void)
1117 {
1118         if (!debug_objects_enabled)
1119                 return;
1120
1121         obj_cache = kmem_cache_create("debug_objects_cache",
1122                                       sizeof (struct debug_obj), 0,
1123                                       SLAB_DEBUG_OBJECTS, NULL);
1124
1125         if (!obj_cache || debug_objects_replace_static_objects()) {
1126                 debug_objects_enabled = 0;
1127                 if (obj_cache)
1128                         kmem_cache_destroy(obj_cache);
1129                 pr_warn("out of memory.\n");
1130         } else
1131                 debug_objects_selftest();
1132
1133         /*
1134          * Increase the thresholds for allocating and freeing objects
1135          * according to the number of possible CPUs available in the system.
1136          */
1137         debug_objects_pool_size += num_possible_cpus() * 32;
1138         debug_objects_pool_min_level += num_possible_cpus() * 4;
1139 }