]> asedeno.scripts.mit.edu Git - linux.git/blobdiff - lib/debugobjects.c
Merge branches 'pm-core', 'pm-qos', 'pm-domains' and 'pm-opp'
[linux.git] / lib / debugobjects.c
index dc78217b2199e37f089d18bce5d6795562d241b1..8c28cbd7e104b6b23dd9b87e25aac2f0b6796c44 100644 (file)
@@ -59,9 +59,9 @@ static int                    debug_objects_pool_min_level __read_mostly
 static struct debug_obj_descr  *descr_test  __read_mostly;
 
 /*
- * Track numbers of kmem_cache_alloc and kmem_cache_free done.
+ * Track numbers of kmem_cache_alloc()/free() calls done.
  */
-static int                     debug_objects_alloc;
+static int                     debug_objects_allocated;
 static int                     debug_objects_freed;
 
 static void free_obj_work(struct work_struct *work);
@@ -111,7 +111,7 @@ static void fill_pool(void)
 
                raw_spin_lock_irqsave(&pool_lock, flags);
                hlist_add_head(&new->node, &obj_pool);
-               debug_objects_alloc++;
+               debug_objects_allocated++;
                obj_pool_free++;
                raw_spin_unlock_irqrestore(&pool_lock, flags);
        }
@@ -172,25 +172,39 @@ alloc_object(void *addr, struct debug_bucket *b, struct debug_obj_descr *descr)
 
 /*
  * workqueue function to free objects.
+ *
+ * To reduce contention on the global pool_lock, the actual freeing of
+ * debug objects will be delayed if the pool_lock is busy. We also free
+ * the objects in a batch of 4 for each lock/unlock cycle.
  */
+#define ODEBUG_FREE_BATCH      4
+
 static void free_obj_work(struct work_struct *work)
 {
-       struct debug_obj *obj;
+       struct debug_obj *objs[ODEBUG_FREE_BATCH];
        unsigned long flags;
+       int i;
 
-       raw_spin_lock_irqsave(&pool_lock, flags);
-       while (obj_pool_free > debug_objects_pool_size) {
-               obj = hlist_entry(obj_pool.first, typeof(*obj), node);
-               hlist_del(&obj->node);
-               obj_pool_free--;
-               debug_objects_freed++;
+       if (!raw_spin_trylock_irqsave(&pool_lock, flags))
+               return;
+       while (obj_pool_free >= debug_objects_pool_size + ODEBUG_FREE_BATCH) {
+               for (i = 0; i < ODEBUG_FREE_BATCH; i++) {
+                       objs[i] = hlist_entry(obj_pool.first,
+                                             typeof(*objs[0]), node);
+                       hlist_del(&objs[i]->node);
+               }
+
+               obj_pool_free -= ODEBUG_FREE_BATCH;
+               debug_objects_freed += ODEBUG_FREE_BATCH;
                /*
                 * We release pool_lock across kmem_cache_free() to
                 * avoid contention on pool_lock.
                 */
                raw_spin_unlock_irqrestore(&pool_lock, flags);
-               kmem_cache_free(obj_cache, obj);
-               raw_spin_lock_irqsave(&pool_lock, flags);
+               for (i = 0; i < ODEBUG_FREE_BATCH; i++)
+                       kmem_cache_free(obj_cache, objs[i]);
+               if (!raw_spin_trylock_irqsave(&pool_lock, flags))
+                       return;
        }
        raw_spin_unlock_irqrestore(&pool_lock, flags);
 }
@@ -769,8 +783,8 @@ static int debug_stats_show(struct seq_file *m, void *v)
        seq_printf(m, "pool_min_free :%d\n", obj_pool_min_free);
        seq_printf(m, "pool_used     :%d\n", obj_pool_used);
        seq_printf(m, "pool_max_used :%d\n", obj_pool_max_used);
-       seq_printf(m, "objects_alloc :%d\n", debug_objects_alloc);
-       seq_printf(m, "objects_freed :%d\n", debug_objects_freed);
+       seq_printf(m, "objs_allocated:%d\n", debug_objects_allocated);
+       seq_printf(m, "objs_freed    :%d\n", debug_objects_freed);
        return 0;
 }