]> asedeno.scripts.mit.edu Git - linux.git/commitdiff
locking/lockdep: Refactorize check_noncircular and check_redundant
authorYuyang Du <duyuyang@gmail.com>
Mon, 6 May 2019 08:19:35 +0000 (16:19 +0800)
committerIngo Molnar <mingo@kernel.org>
Mon, 3 Jun 2019 09:55:50 +0000 (11:55 +0200)
These two functions now handle different check results themselves. A new
check_path function is added to check whether there is a path in the
dependency graph. No functional change.

Signed-off-by: Yuyang Du <duyuyang@gmail.com>
Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: bvanassche@acm.org
Cc: frederic@kernel.org
Cc: ming.lei@redhat.com
Cc: will.deacon@arm.com
Link: https://lkml.kernel.org/r/20190506081939.74287-20-duyuyang@gmail.com
Signed-off-by: Ingo Molnar <mingo@kernel.org>
kernel/locking/lockdep.c

index 8169706df7677d7cafc728f6024b531140a86753..30a1c0e3257301040c08f7e10119922774b0e0f9 100644 (file)
@@ -1683,33 +1683,90 @@ unsigned long lockdep_count_backward_deps(struct lock_class *class)
 }
 
 /*
- * Prove that the dependency graph starting at <entry> can not
- * lead to <target>. Print an error and return 0 if it does.
+ * Check that the dependency graph starting at <src> can lead to
+ * <target> or not. Print an error and return 0 if it does.
  */
 static noinline int
-check_noncircular(struct lock_list *root, struct lock_class *target,
-               struct lock_list **target_entry)
+check_path(struct lock_class *target, struct lock_list *src_entry,
+          struct lock_list **target_entry)
 {
-       int result;
+       int ret;
+
+       ret = __bfs_forwards(src_entry, (void *)target, class_equal,
+                            target_entry);
+
+       if (unlikely(ret < 0))
+               print_bfs_bug(ret);
+
+       return ret;
+}
+
+/*
+ * Prove that the dependency graph starting at <src> can not
+ * lead to <target>. If it can, there is a circle when adding
+ * <target> -> <src> dependency.
+ *
+ * Print an error and return 0 if it does.
+ */
+static noinline int
+check_noncircular(struct held_lock *src, struct held_lock *target,
+                 struct lock_trace *trace)
+{
+       int ret;
+       struct lock_list *uninitialized_var(target_entry);
+       struct lock_list src_entry = {
+               .class = hlock_class(src),
+               .parent = NULL,
+       };
 
        debug_atomic_inc(nr_cyclic_checks);
 
-       result = __bfs_forwards(root, target, class_equal, target_entry);
+       ret = check_path(hlock_class(target), &src_entry, &target_entry);
 
-       return result;
+       if (unlikely(!ret)) {
+               if (!trace->nr_entries) {
+                       /*
+                        * If save_trace fails here, the printing might
+                        * trigger a WARN but because of the !nr_entries it
+                        * should not do bad things.
+                        */
+                       save_trace(trace);
+               }
+
+               print_circular_bug(&src_entry, target_entry, src, target);
+       }
+
+       return ret;
 }
 
+/*
+ * Check that the dependency graph starting at <src> can lead to
+ * <target> or not. If it can, <src> -> <target> dependency is already
+ * in the graph.
+ *
+ * Print an error and return 2 if it does or 1 if it does not.
+ */
 static noinline int
-check_redundant(struct lock_list *root, struct lock_class *target,
-               struct lock_list **target_entry)
+check_redundant(struct held_lock *src, struct held_lock *target)
 {
-       int result;
+       int ret;
+       struct lock_list *uninitialized_var(target_entry);
+       struct lock_list src_entry = {
+               .class = hlock_class(src),
+               .parent = NULL,
+       };
 
        debug_atomic_inc(nr_redundant_checks);
 
-       result = __bfs_forwards(root, target, class_equal, target_entry);
+       ret = check_path(hlock_class(target), &src_entry, &target_entry);
 
-       return result;
+       if (!ret) {
+               debug_atomic_inc(nr_redundant);
+               ret = 2;
+       } else if (ret < 0)
+               ret = 0;
+
+       return ret;
 }
 
 #ifdef CONFIG_TRACE_IRQFLAGS
@@ -2307,9 +2364,7 @@ static int
 check_prev_add(struct task_struct *curr, struct held_lock *prev,
               struct held_lock *next, int distance, struct lock_trace *trace)
 {
-       struct lock_list *uninitialized_var(target_entry);
        struct lock_list *entry;
-       struct lock_list this;
        int ret;
 
        if (!hlock_class(prev)->key || !hlock_class(next)->key) {
@@ -2340,25 +2395,9 @@ check_prev_add(struct task_struct *curr, struct held_lock *prev,
         * MAX_CIRCULAR_QUEUE_SIZE) which keeps track of a breadth of nodes
         * in the graph whose neighbours are to be checked.
         */
-       this.class = hlock_class(next);
-       this.parent = NULL;
-       ret = check_noncircular(&this, hlock_class(prev), &target_entry);
-       if (unlikely(!ret)) {
-               if (!trace->nr_entries) {
-                       /*
-                        * If save_trace fails here, the printing might
-                        * trigger a WARN but because of the !nr_entries it
-                        * should not do bad things.
-                        */
-                       save_trace(trace);
-               }
-               print_circular_bug(&this, target_entry, next, prev);
+       ret = check_noncircular(next, prev, trace);
+       if (unlikely(ret <= 0))
                return 0;
-       }
-       else if (unlikely(ret < 0)) {
-               print_bfs_bug(ret);
-               return 0;
-       }
 
        if (!check_irq_usage(curr, prev, next))
                return 0;
@@ -2392,18 +2431,9 @@ check_prev_add(struct task_struct *curr, struct held_lock *prev,
        /*
         * Is the <prev> -> <next> link redundant?
         */
-       this.class = hlock_class(prev);
-       this.parent = NULL;
-       ret = check_redundant(&this, hlock_class(next), &target_entry);
-       if (!ret) {
-               debug_atomic_inc(nr_redundant);
-               return 2;
-       }
-       if (ret < 0) {
-               print_bfs_bug(ret);
-               return 0;
-       }
-
+       ret = check_redundant(prev, next);
+       if (ret != 1)
+               return ret;
 
        if (!trace->nr_entries && !save_trace(trace))
                return 0;