]> git.karo-electronics.de Git - mv-sheeva.git/blobdiff - kernel/lockdep.c
lockdep: Print a nicer description for simple deadlocks
[mv-sheeva.git] / kernel / lockdep.c
index 0d2058da80f51888b503f4e92829bddbe884b264..c4cc5d1acf4833a07ee21958cb5eac91990e0316 100644 (file)
@@ -490,6 +490,18 @@ void get_usage_chars(struct lock_class *class, char usage[LOCK_USAGE_CHARS])
        usage[i] = '\0';
 }
 
+static int __print_lock_name(struct lock_class *class)
+{
+       char str[KSYM_NAME_LEN];
+       const char *name;
+
+       name = class->name;
+       if (!name)
+               name = __get_key_name(class->key, str);
+
+       return printk("%s", name);
+}
+
 static void print_lock_name(struct lock_class *class)
 {
        char str[KSYM_NAME_LEN], usage[LOCK_USAGE_CHARS];
@@ -1053,6 +1065,56 @@ print_circular_bug_entry(struct lock_list *target, int depth)
        return 0;
 }
 
+static void
+print_circular_lock_scenario(struct held_lock *src,
+                            struct held_lock *tgt,
+                            struct lock_list *prt)
+{
+       struct lock_class *source = hlock_class(src);
+       struct lock_class *target = hlock_class(tgt);
+       struct lock_class *parent = prt->class;
+
+       /*
+        * A direct locking problem where unsafe_class lock is taken
+        * directly by safe_class lock, then all we need to show
+        * is the deadlock scenario, as it is obvious that the
+        * unsafe lock is taken under the safe lock.
+        *
+        * But if there is a chain instead, where the safe lock takes
+        * an intermediate lock (middle_class) where this lock is
+        * not the same as the safe lock, then the lock chain is
+        * used to describe the problem. Otherwise we would need
+        * to show a different CPU case for each link in the chain
+        * from the safe_class lock to the unsafe_class lock.
+        */
+       if (parent != source) {
+               printk("Chain exists of:\n  ");
+               __print_lock_name(source);
+               printk(" --> ");
+               __print_lock_name(parent);
+               printk(" --> ");
+               __print_lock_name(target);
+               printk("\n\n");
+       }
+
+       printk(" Possible unsafe locking scenario:\n\n");
+       printk("       CPU0                    CPU1\n");
+       printk("       ----                    ----\n");
+       printk("  lock(");
+       __print_lock_name(target);
+       printk(");\n");
+       printk("                               lock(");
+       __print_lock_name(parent);
+       printk(");\n");
+       printk("                               lock(");
+       __print_lock_name(target);
+       printk(");\n");
+       printk("  lock(");
+       __print_lock_name(source);
+       printk(");\n");
+       printk("\n *** DEADLOCK ***\n\n");
+}
+
 /*
  * When a circular dependency is detected, print the
  * header first:
@@ -1096,6 +1158,7 @@ static noinline int print_circular_bug(struct lock_list *this,
 {
        struct task_struct *curr = current;
        struct lock_list *parent;
+       struct lock_list *first_parent;
        int depth;
 
        if (!debug_locks_off_graph_unlock() || debug_locks_silent)
@@ -1109,6 +1172,7 @@ static noinline int print_circular_bug(struct lock_list *this,
        print_circular_bug_header(target, depth, check_src, check_tgt);
 
        parent = get_lock_parent(target);
+       first_parent = parent;
 
        while (parent) {
                print_circular_bug_entry(parent, --depth);
@@ -1116,6 +1180,9 @@ static noinline int print_circular_bug(struct lock_list *this,
        }
 
        printk("\nother info that might help us debug this:\n\n");
+       print_circular_lock_scenario(check_src, check_tgt,
+                                    first_parent);
+
        lockdep_print_held_locks(curr);
 
        printk("\nstack backtrace:\n");
@@ -1325,6 +1392,62 @@ print_shortest_lock_dependencies(struct lock_list *leaf,
        return;
 }
 
+static void
+print_irq_lock_scenario(struct lock_list *safe_entry,
+                       struct lock_list *unsafe_entry,
+                       struct held_lock *prev,
+                       struct held_lock *next)
+{
+       struct lock_class *safe_class = safe_entry->class;
+       struct lock_class *unsafe_class = unsafe_entry->class;
+       struct lock_class *middle_class = hlock_class(prev);
+
+       if (middle_class == safe_class)
+               middle_class = hlock_class(next);
+
+       /*
+        * A direct locking problem where unsafe_class lock is taken
+        * directly by safe_class lock, then all we need to show
+        * is the deadlock scenario, as it is obvious that the
+        * unsafe lock is taken under the safe lock.
+        *
+        * But if there is a chain instead, where the safe lock takes
+        * an intermediate lock (middle_class) where this lock is
+        * not the same as the safe lock, then the lock chain is
+        * used to describe the problem. Otherwise we would need
+        * to show a different CPU case for each link in the chain
+        * from the safe_class lock to the unsafe_class lock.
+        */
+       if (middle_class != unsafe_class) {
+               printk("Chain exists of:\n  ");
+               __print_lock_name(safe_class);
+               printk(" --> ");
+               __print_lock_name(middle_class);
+               printk(" --> ");
+               __print_lock_name(unsafe_class);
+               printk("\n\n");
+       }
+
+       printk(" Possible interrupt unsafe locking scenario:\n\n");
+       printk("       CPU0                    CPU1\n");
+       printk("       ----                    ----\n");
+       printk("  lock(");
+       __print_lock_name(unsafe_class);
+       printk(");\n");
+       printk("                               local_irq_disable();\n");
+       printk("                               lock(");
+       __print_lock_name(safe_class);
+       printk(");\n");
+       printk("                               lock(");
+       __print_lock_name(middle_class);
+       printk(");\n");
+       printk("  <Interrupt>\n");
+       printk("    lock(");
+       __print_lock_name(safe_class);
+       printk(");\n");
+       printk("\n *** DEADLOCK ***\n\n");
+}
+
 static int
 print_bad_irq_dependency(struct task_struct *curr,
                         struct lock_list *prev_root,
@@ -1376,6 +1499,8 @@ print_bad_irq_dependency(struct task_struct *curr,
        print_stack_trace(forwards_entry->class->usage_traces + bit2, 1);
 
        printk("\nother info that might help us debug this:\n\n");
+       print_irq_lock_scenario(backwards_entry, forwards_entry, prev, next);
+
        lockdep_print_held_locks(curr);
 
        printk("\nthe dependencies between %s-irq-safe lock", irqclass);
@@ -1539,6 +1664,26 @@ static inline void inc_chains(void)
 
 #endif
 
+static void
+print_deadlock_scenario(struct held_lock *nxt,
+                            struct held_lock *prv)
+{
+       struct lock_class *next = hlock_class(nxt);
+       struct lock_class *prev = hlock_class(prv);
+
+       printk(" Possible unsafe locking scenario:\n\n");
+       printk("       CPU0\n");
+       printk("       ----\n");
+       printk("  lock(");
+       __print_lock_name(prev);
+       printk(");\n");
+       printk("  lock(");
+       __print_lock_name(next);
+       printk(");\n");
+       printk("\n *** DEADLOCK ***\n\n");
+       printk(" May be due to missing lock nesting notation\n\n");
+}
+
 static int
 print_deadlock_bug(struct task_struct *curr, struct held_lock *prev,
                   struct held_lock *next)
@@ -1557,6 +1702,7 @@ print_deadlock_bug(struct task_struct *curr, struct held_lock *prev,
        print_lock(prev);
 
        printk("\nother info that might help us debug this:\n");
+       print_deadlock_scenario(next, prev);
        lockdep_print_held_locks(curr);
 
        printk("\nstack backtrace:\n");
@@ -2309,7 +2455,7 @@ void trace_hardirqs_on_caller(unsigned long ip)
        if (unlikely(curr->hardirqs_enabled)) {
                /*
                 * Neither irq nor preemption are disabled here
-                * so this is racy by nature but loosing one hit
+                * so this is racy by nature but losing one hit
                 * in a stat is not a big deal.
                 */
                __debug_atomic_inc(redundant_hardirqs_on);
@@ -2620,7 +2766,7 @@ static int mark_lock(struct task_struct *curr, struct held_lock *this,
        if (!graph_lock())
                return 0;
        /*
-        * Make sure we didnt race:
+        * Make sure we didn't race:
         */
        if (unlikely(hlock_class(this)->usage_mask & new_mask)) {
                graph_unlock();