]> git.karo-electronics.de Git - karo-tx-linux.git/blob - kernel/lockdep.c
[PATCH] lockdep: more unlock-on-error fixes
[karo-tx-linux.git] / kernel / lockdep.c
1 /*
2  * kernel/lockdep.c
3  *
4  * Runtime locking correctness validator
5  *
6  * Started by Ingo Molnar:
7  *
8  *  Copyright (C) 2006 Red Hat, Inc., Ingo Molnar <mingo@redhat.com>
9  *
10  * this code maps all the lock dependencies as they occur in a live kernel
11  * and will warn about the following classes of locking bugs:
12  *
13  * - lock inversion scenarios
14  * - circular lock dependencies
15  * - hardirq/softirq safe/unsafe locking bugs
16  *
17  * Bugs are reported even if the current locking scenario does not cause
18  * any deadlock at this point.
19  *
20  * I.e. if anytime in the past two locks were taken in a different order,
21  * even if it happened for another task, even if those were different
22  * locks (but of the same class as this lock), this code will detect it.
23  *
24  * Thanks to Arjan van de Ven for coming up with the initial idea of
25  * mapping lock dependencies runtime.
26  */
27 #include <linux/mutex.h>
28 #include <linux/sched.h>
29 #include <linux/delay.h>
30 #include <linux/module.h>
31 #include <linux/proc_fs.h>
32 #include <linux/seq_file.h>
33 #include <linux/spinlock.h>
34 #include <linux/kallsyms.h>
35 #include <linux/interrupt.h>
36 #include <linux/stacktrace.h>
37 #include <linux/debug_locks.h>
38 #include <linux/irqflags.h>
39 #include <linux/utsname.h>
40
41 #include <asm/sections.h>
42
43 #include "lockdep_internals.h"
44
45 /*
46  * lockdep_lock: protects the lockdep graph, the hashes and the
47  *               class/list/hash allocators.
48  *
49  * This is one of the rare exceptions where it's justified
50  * to use a raw spinlock - we really dont want the spinlock
51  * code to recurse back into the lockdep code...
52  */
53 static raw_spinlock_t lockdep_lock = (raw_spinlock_t)__RAW_SPIN_LOCK_UNLOCKED;
54
55 static int graph_lock(void)
56 {
57         __raw_spin_lock(&lockdep_lock);
58         /*
59          * Make sure that if another CPU detected a bug while
60          * walking the graph we dont change it (while the other
61          * CPU is busy printing out stuff with the graph lock
62          * dropped already)
63          */
64         if (!debug_locks) {
65                 __raw_spin_unlock(&lockdep_lock);
66                 return 0;
67         }
68         return 1;
69 }
70
71 static inline int graph_unlock(void)
72 {
73         if (debug_locks && !__raw_spin_is_locked(&lockdep_lock))
74                 return DEBUG_LOCKS_WARN_ON(1);
75
76         __raw_spin_unlock(&lockdep_lock);
77         return 0;
78 }
79
80 /*
81  * Turn lock debugging off and return with 0 if it was off already,
82  * and also release the graph lock:
83  */
84 static inline int debug_locks_off_graph_unlock(void)
85 {
86         int ret = debug_locks_off();
87
88         __raw_spin_unlock(&lockdep_lock);
89
90         return ret;
91 }
92
93 static int lockdep_initialized;
94
95 unsigned long nr_list_entries;
96 static struct lock_list list_entries[MAX_LOCKDEP_ENTRIES];
97
98 /*
99  * Allocate a lockdep entry. (assumes the graph_lock held, returns
100  * with NULL on failure)
101  */
102 static struct lock_list *alloc_list_entry(void)
103 {
104         if (nr_list_entries >= MAX_LOCKDEP_ENTRIES) {
105                 if (!debug_locks_off_graph_unlock())
106                         return NULL;
107
108                 printk("BUG: MAX_LOCKDEP_ENTRIES too low!\n");
109                 printk("turning off the locking correctness validator.\n");
110                 return NULL;
111         }
112         return list_entries + nr_list_entries++;
113 }
114
115 /*
116  * All data structures here are protected by the global debug_lock.
117  *
118  * Mutex key structs only get allocated, once during bootup, and never
119  * get freed - this significantly simplifies the debugging code.
120  */
121 unsigned long nr_lock_classes;
122 static struct lock_class lock_classes[MAX_LOCKDEP_KEYS];
123
124 /*
125  * We keep a global list of all lock classes. The list only grows,
126  * never shrinks. The list is only accessed with the lockdep
127  * spinlock lock held.
128  */
129 LIST_HEAD(all_lock_classes);
130
131 /*
132  * The lockdep classes are in a hash-table as well, for fast lookup:
133  */
134 #define CLASSHASH_BITS          (MAX_LOCKDEP_KEYS_BITS - 1)
135 #define CLASSHASH_SIZE          (1UL << CLASSHASH_BITS)
136 #define CLASSHASH_MASK          (CLASSHASH_SIZE - 1)
137 #define __classhashfn(key)      ((((unsigned long)key >> CLASSHASH_BITS) + (unsigned long)key) & CLASSHASH_MASK)
138 #define classhashentry(key)     (classhash_table + __classhashfn((key)))
139
140 static struct list_head classhash_table[CLASSHASH_SIZE];
141
142 unsigned long nr_lock_chains;
143 static struct lock_chain lock_chains[MAX_LOCKDEP_CHAINS];
144
145 /*
146  * We put the lock dependency chains into a hash-table as well, to cache
147  * their existence:
148  */
149 #define CHAINHASH_BITS          (MAX_LOCKDEP_CHAINS_BITS-1)
150 #define CHAINHASH_SIZE          (1UL << CHAINHASH_BITS)
151 #define CHAINHASH_MASK          (CHAINHASH_SIZE - 1)
152 #define __chainhashfn(chain) \
153                 (((chain >> CHAINHASH_BITS) + chain) & CHAINHASH_MASK)
154 #define chainhashentry(chain)   (chainhash_table + __chainhashfn((chain)))
155
156 static struct list_head chainhash_table[CHAINHASH_SIZE];
157
158 /*
159  * The hash key of the lock dependency chains is a hash itself too:
160  * it's a hash of all locks taken up to that lock, including that lock.
161  * It's a 64-bit hash, because it's important for the keys to be
162  * unique.
163  */
164 #define iterate_chain_key(key1, key2) \
165         (((key1) << MAX_LOCKDEP_KEYS_BITS) ^ \
166         ((key1) >> (64-MAX_LOCKDEP_KEYS_BITS)) ^ \
167         (key2))
168
169 void lockdep_off(void)
170 {
171         current->lockdep_recursion++;
172 }
173
174 EXPORT_SYMBOL(lockdep_off);
175
176 void lockdep_on(void)
177 {
178         current->lockdep_recursion--;
179 }
180
181 EXPORT_SYMBOL(lockdep_on);
182
183 /*
184  * Debugging switches:
185  */
186
187 #define VERBOSE                 0
188 #define VERY_VERBOSE            0
189
190 #if VERBOSE
191 # define HARDIRQ_VERBOSE        1
192 # define SOFTIRQ_VERBOSE        1
193 #else
194 # define HARDIRQ_VERBOSE        0
195 # define SOFTIRQ_VERBOSE        0
196 #endif
197
198 #if VERBOSE || HARDIRQ_VERBOSE || SOFTIRQ_VERBOSE
199 /*
200  * Quick filtering for interesting events:
201  */
202 static int class_filter(struct lock_class *class)
203 {
204 #if 0
205         /* Example */
206         if (class->name_version == 1 &&
207                         !strcmp(class->name, "lockname"))
208                 return 1;
209         if (class->name_version == 1 &&
210                         !strcmp(class->name, "&struct->lockfield"))
211                 return 1;
212 #endif
213         /* Filter everything else. 1 would be to allow everything else */
214         return 0;
215 }
216 #endif
217
218 static int verbose(struct lock_class *class)
219 {
220 #if VERBOSE
221         return class_filter(class);
222 #endif
223         return 0;
224 }
225
226 #ifdef CONFIG_TRACE_IRQFLAGS
227
228 static int hardirq_verbose(struct lock_class *class)
229 {
230 #if HARDIRQ_VERBOSE
231         return class_filter(class);
232 #endif
233         return 0;
234 }
235
236 static int softirq_verbose(struct lock_class *class)
237 {
238 #if SOFTIRQ_VERBOSE
239         return class_filter(class);
240 #endif
241         return 0;
242 }
243
244 #endif
245
246 /*
247  * Stack-trace: tightly packed array of stack backtrace
248  * addresses. Protected by the graph_lock.
249  */
250 unsigned long nr_stack_trace_entries;
251 static unsigned long stack_trace[MAX_STACK_TRACE_ENTRIES];
252
253 static int save_trace(struct stack_trace *trace)
254 {
255         trace->nr_entries = 0;
256         trace->max_entries = MAX_STACK_TRACE_ENTRIES - nr_stack_trace_entries;
257         trace->entries = stack_trace + nr_stack_trace_entries;
258
259         trace->skip = 3;
260         trace->all_contexts = 0;
261
262         save_stack_trace(trace, NULL);
263
264         trace->max_entries = trace->nr_entries;
265
266         nr_stack_trace_entries += trace->nr_entries;
267
268         if (nr_stack_trace_entries == MAX_STACK_TRACE_ENTRIES) {
269                 if (!debug_locks_off_graph_unlock())
270                         return 0;
271
272                 printk("BUG: MAX_STACK_TRACE_ENTRIES too low!\n");
273                 printk("turning off the locking correctness validator.\n");
274                 dump_stack();
275
276                 return 0;
277         }
278
279         return 1;
280 }
281
282 unsigned int nr_hardirq_chains;
283 unsigned int nr_softirq_chains;
284 unsigned int nr_process_chains;
285 unsigned int max_lockdep_depth;
286 unsigned int max_recursion_depth;
287
288 #ifdef CONFIG_DEBUG_LOCKDEP
289 /*
290  * We cannot printk in early bootup code. Not even early_printk()
291  * might work. So we mark any initialization errors and printk
292  * about it later on, in lockdep_info().
293  */
294 static int lockdep_init_error;
295
296 /*
297  * Various lockdep statistics:
298  */
299 atomic_t chain_lookup_hits;
300 atomic_t chain_lookup_misses;
301 atomic_t hardirqs_on_events;
302 atomic_t hardirqs_off_events;
303 atomic_t redundant_hardirqs_on;
304 atomic_t redundant_hardirqs_off;
305 atomic_t softirqs_on_events;
306 atomic_t softirqs_off_events;
307 atomic_t redundant_softirqs_on;
308 atomic_t redundant_softirqs_off;
309 atomic_t nr_unused_locks;
310 atomic_t nr_cyclic_checks;
311 atomic_t nr_cyclic_check_recursions;
312 atomic_t nr_find_usage_forwards_checks;
313 atomic_t nr_find_usage_forwards_recursions;
314 atomic_t nr_find_usage_backwards_checks;
315 atomic_t nr_find_usage_backwards_recursions;
316 # define debug_atomic_inc(ptr)          atomic_inc(ptr)
317 # define debug_atomic_dec(ptr)          atomic_dec(ptr)
318 # define debug_atomic_read(ptr)         atomic_read(ptr)
319 #else
320 # define debug_atomic_inc(ptr)          do { } while (0)
321 # define debug_atomic_dec(ptr)          do { } while (0)
322 # define debug_atomic_read(ptr)         0
323 #endif
324
325 /*
326  * Locking printouts:
327  */
328
329 static const char *usage_str[] =
330 {
331         [LOCK_USED] =                   "initial-use ",
332         [LOCK_USED_IN_HARDIRQ] =        "in-hardirq-W",
333         [LOCK_USED_IN_SOFTIRQ] =        "in-softirq-W",
334         [LOCK_ENABLED_SOFTIRQS] =       "softirq-on-W",
335         [LOCK_ENABLED_HARDIRQS] =       "hardirq-on-W",
336         [LOCK_USED_IN_HARDIRQ_READ] =   "in-hardirq-R",
337         [LOCK_USED_IN_SOFTIRQ_READ] =   "in-softirq-R",
338         [LOCK_ENABLED_SOFTIRQS_READ] =  "softirq-on-R",
339         [LOCK_ENABLED_HARDIRQS_READ] =  "hardirq-on-R",
340 };
341
342 const char * __get_key_name(struct lockdep_subclass_key *key, char *str)
343 {
344         unsigned long offs, size;
345         char *modname;
346
347         return kallsyms_lookup((unsigned long)key, &size, &offs, &modname, str);
348 }
349
350 void
351 get_usage_chars(struct lock_class *class, char *c1, char *c2, char *c3, char *c4)
352 {
353         *c1 = '.', *c2 = '.', *c3 = '.', *c4 = '.';
354
355         if (class->usage_mask & LOCKF_USED_IN_HARDIRQ)
356                 *c1 = '+';
357         else
358                 if (class->usage_mask & LOCKF_ENABLED_HARDIRQS)
359                         *c1 = '-';
360
361         if (class->usage_mask & LOCKF_USED_IN_SOFTIRQ)
362                 *c2 = '+';
363         else
364                 if (class->usage_mask & LOCKF_ENABLED_SOFTIRQS)
365                         *c2 = '-';
366
367         if (class->usage_mask & LOCKF_ENABLED_HARDIRQS_READ)
368                 *c3 = '-';
369         if (class->usage_mask & LOCKF_USED_IN_HARDIRQ_READ) {
370                 *c3 = '+';
371                 if (class->usage_mask & LOCKF_ENABLED_HARDIRQS_READ)
372                         *c3 = '?';
373         }
374
375         if (class->usage_mask & LOCKF_ENABLED_SOFTIRQS_READ)
376                 *c4 = '-';
377         if (class->usage_mask & LOCKF_USED_IN_SOFTIRQ_READ) {
378                 *c4 = '+';
379                 if (class->usage_mask & LOCKF_ENABLED_SOFTIRQS_READ)
380                         *c4 = '?';
381         }
382 }
383
384 static void print_lock_name(struct lock_class *class)
385 {
386         char str[KSYM_NAME_LEN + 1], c1, c2, c3, c4;
387         const char *name;
388
389         get_usage_chars(class, &c1, &c2, &c3, &c4);
390
391         name = class->name;
392         if (!name) {
393                 name = __get_key_name(class->key, str);
394                 printk(" (%s", name);
395         } else {
396                 printk(" (%s", name);
397                 if (class->name_version > 1)
398                         printk("#%d", class->name_version);
399                 if (class->subclass)
400                         printk("/%d", class->subclass);
401         }
402         printk("){%c%c%c%c}", c1, c2, c3, c4);
403 }
404
405 static void print_lockdep_cache(struct lockdep_map *lock)
406 {
407         const char *name;
408         char str[KSYM_NAME_LEN + 1];
409
410         name = lock->name;
411         if (!name)
412                 name = __get_key_name(lock->key->subkeys, str);
413
414         printk("%s", name);
415 }
416
417 static void print_lock(struct held_lock *hlock)
418 {
419         print_lock_name(hlock->class);
420         printk(", at: ");
421         print_ip_sym(hlock->acquire_ip);
422 }
423
424 static void lockdep_print_held_locks(struct task_struct *curr)
425 {
426         int i, depth = curr->lockdep_depth;
427
428         if (!depth) {
429                 printk("no locks held by %s/%d.\n", curr->comm, curr->pid);
430                 return;
431         }
432         printk("%d lock%s held by %s/%d:\n",
433                 depth, depth > 1 ? "s" : "", curr->comm, curr->pid);
434
435         for (i = 0; i < depth; i++) {
436                 printk(" #%d: ", i);
437                 print_lock(curr->held_locks + i);
438         }
439 }
440
441 static void print_lock_class_header(struct lock_class *class, int depth)
442 {
443         int bit;
444
445         printk("%*s->", depth, "");
446         print_lock_name(class);
447         printk(" ops: %lu", class->ops);
448         printk(" {\n");
449
450         for (bit = 0; bit < LOCK_USAGE_STATES; bit++) {
451                 if (class->usage_mask & (1 << bit)) {
452                         int len = depth;
453
454                         len += printk("%*s   %s", depth, "", usage_str[bit]);
455                         len += printk(" at:\n");
456                         print_stack_trace(class->usage_traces + bit, len);
457                 }
458         }
459         printk("%*s }\n", depth, "");
460
461         printk("%*s ... key      at: ",depth,"");
462         print_ip_sym((unsigned long)class->key);
463 }
464
465 /*
466  * printk all lock dependencies starting at <entry>:
467  */
468 static void print_lock_dependencies(struct lock_class *class, int depth)
469 {
470         struct lock_list *entry;
471
472         if (DEBUG_LOCKS_WARN_ON(depth >= 20))
473                 return;
474
475         print_lock_class_header(class, depth);
476
477         list_for_each_entry(entry, &class->locks_after, entry) {
478                 if (DEBUG_LOCKS_WARN_ON(!entry->class))
479                         return;
480
481                 print_lock_dependencies(entry->class, depth + 1);
482
483                 printk("%*s ... acquired at:\n",depth,"");
484                 print_stack_trace(&entry->trace, 2);
485                 printk("\n");
486         }
487 }
488
489 /*
490  * Add a new dependency to the head of the list:
491  */
492 static int add_lock_to_list(struct lock_class *class, struct lock_class *this,
493                             struct list_head *head, unsigned long ip)
494 {
495         struct lock_list *entry;
496         /*
497          * Lock not present yet - get a new dependency struct and
498          * add it to the list:
499          */
500         entry = alloc_list_entry();
501         if (!entry)
502                 return 0;
503
504         entry->class = this;
505         if (!save_trace(&entry->trace))
506                 return 0;
507
508         /*
509          * Since we never remove from the dependency list, the list can
510          * be walked lockless by other CPUs, it's only allocation
511          * that must be protected by the spinlock. But this also means
512          * we must make new entries visible only once writes to the
513          * entry become visible - hence the RCU op:
514          */
515         list_add_tail_rcu(&entry->entry, head);
516
517         return 1;
518 }
519
520 /*
521  * Recursive, forwards-direction lock-dependency checking, used for
522  * both noncyclic checking and for hardirq-unsafe/softirq-unsafe
523  * checking.
524  *
525  * (to keep the stackframe of the recursive functions small we
526  *  use these global variables, and we also mark various helper
527  *  functions as noinline.)
528  */
529 static struct held_lock *check_source, *check_target;
530
531 /*
532  * Print a dependency chain entry (this is only done when a deadlock
533  * has been detected):
534  */
535 static noinline int
536 print_circular_bug_entry(struct lock_list *target, unsigned int depth)
537 {
538         if (debug_locks_silent)
539                 return 0;
540         printk("\n-> #%u", depth);
541         print_lock_name(target->class);
542         printk(":\n");
543         print_stack_trace(&target->trace, 6);
544
545         return 0;
546 }
547
548 static void print_kernel_version(void)
549 {
550         printk("%s %.*s\n", init_utsname()->release,
551                 (int)strcspn(init_utsname()->version, " "),
552                 init_utsname()->version);
553 }
554
555 /*
556  * When a circular dependency is detected, print the
557  * header first:
558  */
559 static noinline int
560 print_circular_bug_header(struct lock_list *entry, unsigned int depth)
561 {
562         struct task_struct *curr = current;
563
564         if (!debug_locks_off_graph_unlock() || debug_locks_silent)
565                 return 0;
566
567         printk("\n=======================================================\n");
568         printk(  "[ INFO: possible circular locking dependency detected ]\n");
569         print_kernel_version();
570         printk(  "-------------------------------------------------------\n");
571         printk("%s/%d is trying to acquire lock:\n",
572                 curr->comm, curr->pid);
573         print_lock(check_source);
574         printk("\nbut task is already holding lock:\n");
575         print_lock(check_target);
576         printk("\nwhich lock already depends on the new lock.\n\n");
577         printk("\nthe existing dependency chain (in reverse order) is:\n");
578
579         print_circular_bug_entry(entry, depth);
580
581         return 0;
582 }
583
584 static noinline int print_circular_bug_tail(void)
585 {
586         struct task_struct *curr = current;
587         struct lock_list this;
588
589         if (debug_locks_silent)
590                 return 0;
591
592         this.class = check_source->class;
593         if (!save_trace(&this.trace))
594                 return 0;
595
596         print_circular_bug_entry(&this, 0);
597
598         printk("\nother info that might help us debug this:\n\n");
599         lockdep_print_held_locks(curr);
600
601         printk("\nstack backtrace:\n");
602         dump_stack();
603
604         return 0;
605 }
606
607 #define RECURSION_LIMIT 40
608
609 static int noinline print_infinite_recursion_bug(void)
610 {
611         if (!debug_locks_off_graph_unlock())
612                 return 0;
613
614         WARN_ON(1);
615
616         return 0;
617 }
618
619 /*
620  * Prove that the dependency graph starting at <entry> can not
621  * lead to <target>. Print an error and return 0 if it does.
622  */
623 static noinline int
624 check_noncircular(struct lock_class *source, unsigned int depth)
625 {
626         struct lock_list *entry;
627
628         debug_atomic_inc(&nr_cyclic_check_recursions);
629         if (depth > max_recursion_depth)
630                 max_recursion_depth = depth;
631         if (depth >= RECURSION_LIMIT)
632                 return print_infinite_recursion_bug();
633         /*
634          * Check this lock's dependency list:
635          */
636         list_for_each_entry(entry, &source->locks_after, entry) {
637                 if (entry->class == check_target->class)
638                         return print_circular_bug_header(entry, depth+1);
639                 debug_atomic_inc(&nr_cyclic_checks);
640                 if (!check_noncircular(entry->class, depth+1))
641                         return print_circular_bug_entry(entry, depth+1);
642         }
643         return 1;
644 }
645
646 static int very_verbose(struct lock_class *class)
647 {
648 #if VERY_VERBOSE
649         return class_filter(class);
650 #endif
651         return 0;
652 }
653 #ifdef CONFIG_TRACE_IRQFLAGS
654
655 /*
656  * Forwards and backwards subgraph searching, for the purposes of
657  * proving that two subgraphs can be connected by a new dependency
658  * without creating any illegal irq-safe -> irq-unsafe lock dependency.
659  */
660 static enum lock_usage_bit find_usage_bit;
661 static struct lock_class *forwards_match, *backwards_match;
662
663 /*
664  * Find a node in the forwards-direction dependency sub-graph starting
665  * at <source> that matches <find_usage_bit>.
666  *
667  * Return 2 if such a node exists in the subgraph, and put that node
668  * into <forwards_match>.
669  *
670  * Return 1 otherwise and keep <forwards_match> unchanged.
671  * Return 0 on error.
672  */
673 static noinline int
674 find_usage_forwards(struct lock_class *source, unsigned int depth)
675 {
676         struct lock_list *entry;
677         int ret;
678
679         if (depth > max_recursion_depth)
680                 max_recursion_depth = depth;
681         if (depth >= RECURSION_LIMIT)
682                 return print_infinite_recursion_bug();
683
684         debug_atomic_inc(&nr_find_usage_forwards_checks);
685         if (source->usage_mask & (1 << find_usage_bit)) {
686                 forwards_match = source;
687                 return 2;
688         }
689
690         /*
691          * Check this lock's dependency list:
692          */
693         list_for_each_entry(entry, &source->locks_after, entry) {
694                 debug_atomic_inc(&nr_find_usage_forwards_recursions);
695                 ret = find_usage_forwards(entry->class, depth+1);
696                 if (ret == 2 || ret == 0)
697                         return ret;
698         }
699         return 1;
700 }
701
702 /*
703  * Find a node in the backwards-direction dependency sub-graph starting
704  * at <source> that matches <find_usage_bit>.
705  *
706  * Return 2 if such a node exists in the subgraph, and put that node
707  * into <backwards_match>.
708  *
709  * Return 1 otherwise and keep <backwards_match> unchanged.
710  * Return 0 on error.
711  */
712 static noinline int
713 find_usage_backwards(struct lock_class *source, unsigned int depth)
714 {
715         struct lock_list *entry;
716         int ret;
717
718         if (!__raw_spin_is_locked(&lockdep_lock))
719                 return DEBUG_LOCKS_WARN_ON(1);
720
721         if (depth > max_recursion_depth)
722                 max_recursion_depth = depth;
723         if (depth >= RECURSION_LIMIT)
724                 return print_infinite_recursion_bug();
725
726         debug_atomic_inc(&nr_find_usage_backwards_checks);
727         if (source->usage_mask & (1 << find_usage_bit)) {
728                 backwards_match = source;
729                 return 2;
730         }
731
732         /*
733          * Check this lock's dependency list:
734          */
735         list_for_each_entry(entry, &source->locks_before, entry) {
736                 debug_atomic_inc(&nr_find_usage_backwards_recursions);
737                 ret = find_usage_backwards(entry->class, depth+1);
738                 if (ret == 2 || ret == 0)
739                         return ret;
740         }
741         return 1;
742 }
743
744 static int
745 print_bad_irq_dependency(struct task_struct *curr,
746                          struct held_lock *prev,
747                          struct held_lock *next,
748                          enum lock_usage_bit bit1,
749                          enum lock_usage_bit bit2,
750                          const char *irqclass)
751 {
752         if (!debug_locks_off_graph_unlock() || debug_locks_silent)
753                 return 0;
754
755         printk("\n======================================================\n");
756         printk(  "[ INFO: %s-safe -> %s-unsafe lock order detected ]\n",
757                 irqclass, irqclass);
758         print_kernel_version();
759         printk(  "------------------------------------------------------\n");
760         printk("%s/%d [HC%u[%lu]:SC%u[%lu]:HE%u:SE%u] is trying to acquire:\n",
761                 curr->comm, curr->pid,
762                 curr->hardirq_context, hardirq_count() >> HARDIRQ_SHIFT,
763                 curr->softirq_context, softirq_count() >> SOFTIRQ_SHIFT,
764                 curr->hardirqs_enabled,
765                 curr->softirqs_enabled);
766         print_lock(next);
767
768         printk("\nand this task is already holding:\n");
769         print_lock(prev);
770         printk("which would create a new lock dependency:\n");
771         print_lock_name(prev->class);
772         printk(" ->");
773         print_lock_name(next->class);
774         printk("\n");
775
776         printk("\nbut this new dependency connects a %s-irq-safe lock:\n",
777                 irqclass);
778         print_lock_name(backwards_match);
779         printk("\n... which became %s-irq-safe at:\n", irqclass);
780
781         print_stack_trace(backwards_match->usage_traces + bit1, 1);
782
783         printk("\nto a %s-irq-unsafe lock:\n", irqclass);
784         print_lock_name(forwards_match);
785         printk("\n... which became %s-irq-unsafe at:\n", irqclass);
786         printk("...");
787
788         print_stack_trace(forwards_match->usage_traces + bit2, 1);
789
790         printk("\nother info that might help us debug this:\n\n");
791         lockdep_print_held_locks(curr);
792
793         printk("\nthe %s-irq-safe lock's dependencies:\n", irqclass);
794         print_lock_dependencies(backwards_match, 0);
795
796         printk("\nthe %s-irq-unsafe lock's dependencies:\n", irqclass);
797         print_lock_dependencies(forwards_match, 0);
798
799         printk("\nstack backtrace:\n");
800         dump_stack();
801
802         return 0;
803 }
804
805 static int
806 check_usage(struct task_struct *curr, struct held_lock *prev,
807             struct held_lock *next, enum lock_usage_bit bit_backwards,
808             enum lock_usage_bit bit_forwards, const char *irqclass)
809 {
810         int ret;
811
812         find_usage_bit = bit_backwards;
813         /* fills in <backwards_match> */
814         ret = find_usage_backwards(prev->class, 0);
815         if (!ret || ret == 1)
816                 return ret;
817
818         find_usage_bit = bit_forwards;
819         ret = find_usage_forwards(next->class, 0);
820         if (!ret || ret == 1)
821                 return ret;
822         /* ret == 2 */
823         return print_bad_irq_dependency(curr, prev, next,
824                         bit_backwards, bit_forwards, irqclass);
825 }
826
827 #endif
828
829 static int
830 print_deadlock_bug(struct task_struct *curr, struct held_lock *prev,
831                    struct held_lock *next)
832 {
833         if (!debug_locks_off_graph_unlock() || debug_locks_silent)
834                 return 0;
835
836         printk("\n=============================================\n");
837         printk(  "[ INFO: possible recursive locking detected ]\n");
838         print_kernel_version();
839         printk(  "---------------------------------------------\n");
840         printk("%s/%d is trying to acquire lock:\n",
841                 curr->comm, curr->pid);
842         print_lock(next);
843         printk("\nbut task is already holding lock:\n");
844         print_lock(prev);
845
846         printk("\nother info that might help us debug this:\n");
847         lockdep_print_held_locks(curr);
848
849         printk("\nstack backtrace:\n");
850         dump_stack();
851
852         return 0;
853 }
854
855 /*
856  * Check whether we are holding such a class already.
857  *
858  * (Note that this has to be done separately, because the graph cannot
859  * detect such classes of deadlocks.)
860  *
861  * Returns: 0 on deadlock detected, 1 on OK, 2 on recursive read
862  */
863 static int
864 check_deadlock(struct task_struct *curr, struct held_lock *next,
865                struct lockdep_map *next_instance, int read)
866 {
867         struct held_lock *prev;
868         int i;
869
870         for (i = 0; i < curr->lockdep_depth; i++) {
871                 prev = curr->held_locks + i;
872                 if (prev->class != next->class)
873                         continue;
874                 /*
875                  * Allow read-after-read recursion of the same
876                  * lock class (i.e. read_lock(lock)+read_lock(lock)):
877                  */
878                 if ((read == 2) && prev->read)
879                         return 2;
880                 return print_deadlock_bug(curr, prev, next);
881         }
882         return 1;
883 }
884
885 /*
886  * There was a chain-cache miss, and we are about to add a new dependency
887  * to a previous lock. We recursively validate the following rules:
888  *
889  *  - would the adding of the <prev> -> <next> dependency create a
890  *    circular dependency in the graph? [== circular deadlock]
891  *
892  *  - does the new prev->next dependency connect any hardirq-safe lock
893  *    (in the full backwards-subgraph starting at <prev>) with any
894  *    hardirq-unsafe lock (in the full forwards-subgraph starting at
895  *    <next>)? [== illegal lock inversion with hardirq contexts]
896  *
897  *  - does the new prev->next dependency connect any softirq-safe lock
898  *    (in the full backwards-subgraph starting at <prev>) with any
899  *    softirq-unsafe lock (in the full forwards-subgraph starting at
900  *    <next>)? [== illegal lock inversion with softirq contexts]
901  *
902  * any of these scenarios could lead to a deadlock.
903  *
904  * Then if all the validations pass, we add the forwards and backwards
905  * dependency.
906  */
907 static int
908 check_prev_add(struct task_struct *curr, struct held_lock *prev,
909                struct held_lock *next)
910 {
911         struct lock_list *entry;
912         int ret;
913
914         /*
915          * Prove that the new <prev> -> <next> dependency would not
916          * create a circular dependency in the graph. (We do this by
917          * forward-recursing into the graph starting at <next>, and
918          * checking whether we can reach <prev>.)
919          *
920          * We are using global variables to control the recursion, to
921          * keep the stackframe size of the recursive functions low:
922          */
923         check_source = next;
924         check_target = prev;
925         if (!(check_noncircular(next->class, 0)))
926                 return print_circular_bug_tail();
927
928 #ifdef CONFIG_TRACE_IRQFLAGS
929         /*
930          * Prove that the new dependency does not connect a hardirq-safe
931          * lock with a hardirq-unsafe lock - to achieve this we search
932          * the backwards-subgraph starting at <prev>, and the
933          * forwards-subgraph starting at <next>:
934          */
935         if (!check_usage(curr, prev, next, LOCK_USED_IN_HARDIRQ,
936                                         LOCK_ENABLED_HARDIRQS, "hard"))
937                 return 0;
938
939         /*
940          * Prove that the new dependency does not connect a hardirq-safe-read
941          * lock with a hardirq-unsafe lock - to achieve this we search
942          * the backwards-subgraph starting at <prev>, and the
943          * forwards-subgraph starting at <next>:
944          */
945         if (!check_usage(curr, prev, next, LOCK_USED_IN_HARDIRQ_READ,
946                                         LOCK_ENABLED_HARDIRQS, "hard-read"))
947                 return 0;
948
949         /*
950          * Prove that the new dependency does not connect a softirq-safe
951          * lock with a softirq-unsafe lock - to achieve this we search
952          * the backwards-subgraph starting at <prev>, and the
953          * forwards-subgraph starting at <next>:
954          */
955         if (!check_usage(curr, prev, next, LOCK_USED_IN_SOFTIRQ,
956                                         LOCK_ENABLED_SOFTIRQS, "soft"))
957                 return 0;
958         /*
959          * Prove that the new dependency does not connect a softirq-safe-read
960          * lock with a softirq-unsafe lock - to achieve this we search
961          * the backwards-subgraph starting at <prev>, and the
962          * forwards-subgraph starting at <next>:
963          */
964         if (!check_usage(curr, prev, next, LOCK_USED_IN_SOFTIRQ_READ,
965                                         LOCK_ENABLED_SOFTIRQS, "soft"))
966                 return 0;
967 #endif
968         /*
969          * For recursive read-locks we do all the dependency checks,
970          * but we dont store read-triggered dependencies (only
971          * write-triggered dependencies). This ensures that only the
972          * write-side dependencies matter, and that if for example a
973          * write-lock never takes any other locks, then the reads are
974          * equivalent to a NOP.
975          */
976         if (next->read == 2 || prev->read == 2)
977                 return 1;
978         /*
979          * Is the <prev> -> <next> dependency already present?
980          *
981          * (this may occur even though this is a new chain: consider
982          *  e.g. the L1 -> L2 -> L3 -> L4 and the L5 -> L1 -> L2 -> L3
983          *  chains - the second one will be new, but L1 already has
984          *  L2 added to its dependency list, due to the first chain.)
985          */
986         list_for_each_entry(entry, &prev->class->locks_after, entry) {
987                 if (entry->class == next->class)
988                         return 2;
989         }
990
991         /*
992          * Ok, all validations passed, add the new lock
993          * to the previous lock's dependency list:
994          */
995         ret = add_lock_to_list(prev->class, next->class,
996                                &prev->class->locks_after, next->acquire_ip);
997         if (!ret)
998                 return 0;
999
1000         ret = add_lock_to_list(next->class, prev->class,
1001                                &next->class->locks_before, next->acquire_ip);
1002         if (!ret)
1003                 return 0;
1004
1005         /*
1006          * Debugging printouts:
1007          */
1008         if (verbose(prev->class) || verbose(next->class)) {
1009                 graph_unlock();
1010                 printk("\n new dependency: ");
1011                 print_lock_name(prev->class);
1012                 printk(" => ");
1013                 print_lock_name(next->class);
1014                 printk("\n");
1015                 dump_stack();
1016                 return graph_lock();
1017         }
1018         return 1;
1019 }
1020
1021 /*
1022  * Add the dependency to all directly-previous locks that are 'relevant'.
1023  * The ones that are relevant are (in increasing distance from curr):
1024  * all consecutive trylock entries and the final non-trylock entry - or
1025  * the end of this context's lock-chain - whichever comes first.
1026  */
1027 static int
1028 check_prevs_add(struct task_struct *curr, struct held_lock *next)
1029 {
1030         int depth = curr->lockdep_depth;
1031         struct held_lock *hlock;
1032
1033         /*
1034          * Debugging checks.
1035          *
1036          * Depth must not be zero for a non-head lock:
1037          */
1038         if (!depth)
1039                 goto out_bug;
1040         /*
1041          * At least two relevant locks must exist for this
1042          * to be a head:
1043          */
1044         if (curr->held_locks[depth].irq_context !=
1045                         curr->held_locks[depth-1].irq_context)
1046                 goto out_bug;
1047
1048         for (;;) {
1049                 hlock = curr->held_locks + depth-1;
1050                 /*
1051                  * Only non-recursive-read entries get new dependencies
1052                  * added:
1053                  */
1054                 if (hlock->read != 2) {
1055                         if (!check_prev_add(curr, hlock, next))
1056                                 return 0;
1057                         /*
1058                          * Stop after the first non-trylock entry,
1059                          * as non-trylock entries have added their
1060                          * own direct dependencies already, so this
1061                          * lock is connected to them indirectly:
1062                          */
1063                         if (!hlock->trylock)
1064                                 break;
1065                 }
1066                 depth--;
1067                 /*
1068                  * End of lock-stack?
1069                  */
1070                 if (!depth)
1071                         break;
1072                 /*
1073                  * Stop the search if we cross into another context:
1074                  */
1075                 if (curr->held_locks[depth].irq_context !=
1076                                 curr->held_locks[depth-1].irq_context)
1077                         break;
1078         }
1079         return 1;
1080 out_bug:
1081         if (!debug_locks_off_graph_unlock())
1082                 return 0;
1083
1084         WARN_ON(1);
1085
1086         return 0;
1087 }
1088
1089
1090 /*
1091  * Is this the address of a static object:
1092  */
1093 static int static_obj(void *obj)
1094 {
1095         unsigned long start = (unsigned long) &_stext,
1096                       end   = (unsigned long) &_end,
1097                       addr  = (unsigned long) obj;
1098 #ifdef CONFIG_SMP
1099         int i;
1100 #endif
1101
1102         /*
1103          * static variable?
1104          */
1105         if ((addr >= start) && (addr < end))
1106                 return 1;
1107
1108 #ifdef CONFIG_SMP
1109         /*
1110          * percpu var?
1111          */
1112         for_each_possible_cpu(i) {
1113                 start = (unsigned long) &__per_cpu_start + per_cpu_offset(i);
1114                 end   = (unsigned long) &__per_cpu_start + PERCPU_ENOUGH_ROOM
1115                                         + per_cpu_offset(i);
1116
1117                 if ((addr >= start) && (addr < end))
1118                         return 1;
1119         }
1120 #endif
1121
1122         /*
1123          * module var?
1124          */
1125         return is_module_address(addr);
1126 }
1127
1128 /*
1129  * To make lock name printouts unique, we calculate a unique
1130  * class->name_version generation counter:
1131  */
1132 static int count_matching_names(struct lock_class *new_class)
1133 {
1134         struct lock_class *class;
1135         int count = 0;
1136
1137         if (!new_class->name)
1138                 return 0;
1139
1140         list_for_each_entry(class, &all_lock_classes, lock_entry) {
1141                 if (new_class->key - new_class->subclass == class->key)
1142                         return class->name_version;
1143                 if (class->name && !strcmp(class->name, new_class->name))
1144                         count = max(count, class->name_version);
1145         }
1146
1147         return count + 1;
1148 }
1149
1150 /*
1151  * Register a lock's class in the hash-table, if the class is not present
1152  * yet. Otherwise we look it up. We cache the result in the lock object
1153  * itself, so actual lookup of the hash should be once per lock object.
1154  */
1155 static inline struct lock_class *
1156 look_up_lock_class(struct lockdep_map *lock, unsigned int subclass)
1157 {
1158         struct lockdep_subclass_key *key;
1159         struct list_head *hash_head;
1160         struct lock_class *class;
1161
1162 #ifdef CONFIG_DEBUG_LOCKDEP
1163         /*
1164          * If the architecture calls into lockdep before initializing
1165          * the hashes then we'll warn about it later. (we cannot printk
1166          * right now)
1167          */
1168         if (unlikely(!lockdep_initialized)) {
1169                 lockdep_init();
1170                 lockdep_init_error = 1;
1171         }
1172 #endif
1173
1174         /*
1175          * Static locks do not have their class-keys yet - for them the key
1176          * is the lock object itself:
1177          */
1178         if (unlikely(!lock->key))
1179                 lock->key = (void *)lock;
1180
1181         /*
1182          * NOTE: the class-key must be unique. For dynamic locks, a static
1183          * lock_class_key variable is passed in through the mutex_init()
1184          * (or spin_lock_init()) call - which acts as the key. For static
1185          * locks we use the lock object itself as the key.
1186          */
1187         BUILD_BUG_ON(sizeof(struct lock_class_key) > sizeof(struct lock_class));
1188
1189         key = lock->key->subkeys + subclass;
1190
1191         hash_head = classhashentry(key);
1192
1193         /*
1194          * We can walk the hash lockfree, because the hash only
1195          * grows, and we are careful when adding entries to the end:
1196          */
1197         list_for_each_entry(class, hash_head, hash_entry)
1198                 if (class->key == key)
1199                         return class;
1200
1201         return NULL;
1202 }
1203
1204 /*
1205  * Register a lock's class in the hash-table, if the class is not present
1206  * yet. Otherwise we look it up. We cache the result in the lock object
1207  * itself, so actual lookup of the hash should be once per lock object.
1208  */
1209 static inline struct lock_class *
1210 register_lock_class(struct lockdep_map *lock, unsigned int subclass, int force)
1211 {
1212         struct lockdep_subclass_key *key;
1213         struct list_head *hash_head;
1214         struct lock_class *class;
1215         unsigned long flags;
1216
1217         class = look_up_lock_class(lock, subclass);
1218         if (likely(class))
1219                 return class;
1220
1221         /*
1222          * Debug-check: all keys must be persistent!
1223          */
1224         if (!static_obj(lock->key)) {
1225                 debug_locks_off();
1226                 printk("INFO: trying to register non-static key.\n");
1227                 printk("the code is fine but needs lockdep annotation.\n");
1228                 printk("turning off the locking correctness validator.\n");
1229                 dump_stack();
1230
1231                 return NULL;
1232         }
1233
1234         key = lock->key->subkeys + subclass;
1235         hash_head = classhashentry(key);
1236
1237         raw_local_irq_save(flags);
1238         if (!graph_lock()) {
1239                 raw_local_irq_restore(flags);
1240                 return NULL;
1241         }
1242         /*
1243          * We have to do the hash-walk again, to avoid races
1244          * with another CPU:
1245          */
1246         list_for_each_entry(class, hash_head, hash_entry)
1247                 if (class->key == key)
1248                         goto out_unlock_set;
1249         /*
1250          * Allocate a new key from the static array, and add it to
1251          * the hash:
1252          */
1253         if (nr_lock_classes >= MAX_LOCKDEP_KEYS) {
1254                 if (!debug_locks_off_graph_unlock()) {
1255                         raw_local_irq_restore(flags);
1256                         return NULL;
1257                 }
1258                 raw_local_irq_restore(flags);
1259
1260                 printk("BUG: MAX_LOCKDEP_KEYS too low!\n");
1261                 printk("turning off the locking correctness validator.\n");
1262                 return NULL;
1263         }
1264         class = lock_classes + nr_lock_classes++;
1265         debug_atomic_inc(&nr_unused_locks);
1266         class->key = key;
1267         class->name = lock->name;
1268         class->subclass = subclass;
1269         INIT_LIST_HEAD(&class->lock_entry);
1270         INIT_LIST_HEAD(&class->locks_before);
1271         INIT_LIST_HEAD(&class->locks_after);
1272         class->name_version = count_matching_names(class);
1273         /*
1274          * We use RCU's safe list-add method to make
1275          * parallel walking of the hash-list safe:
1276          */
1277         list_add_tail_rcu(&class->hash_entry, hash_head);
1278
1279         if (verbose(class)) {
1280                 graph_unlock();
1281                 raw_local_irq_restore(flags);
1282
1283                 printk("\nnew class %p: %s", class->key, class->name);
1284                 if (class->name_version > 1)
1285                         printk("#%d", class->name_version);
1286                 printk("\n");
1287                 dump_stack();
1288
1289                 raw_local_irq_save(flags);
1290                 if (!graph_lock()) {
1291                         raw_local_irq_restore(flags);
1292                         return NULL;
1293                 }
1294         }
1295 out_unlock_set:
1296         graph_unlock();
1297         raw_local_irq_restore(flags);
1298
1299         if (!subclass || force)
1300                 lock->class_cache = class;
1301
1302         if (DEBUG_LOCKS_WARN_ON(class->subclass != subclass))
1303                 return NULL;
1304
1305         return class;
1306 }
1307
1308 /*
1309  * Look up a dependency chain. If the key is not present yet then
1310  * add it and return 0 - in this case the new dependency chain is
1311  * validated. If the key is already hashed, return 1.
1312  */
1313 static inline int lookup_chain_cache(u64 chain_key, struct lock_class *class)
1314 {
1315         struct list_head *hash_head = chainhashentry(chain_key);
1316         struct lock_chain *chain;
1317
1318         if (DEBUG_LOCKS_WARN_ON(!irqs_disabled()))
1319                 return 0;
1320         /*
1321          * We can walk it lock-free, because entries only get added
1322          * to the hash:
1323          */
1324         list_for_each_entry(chain, hash_head, entry) {
1325                 if (chain->chain_key == chain_key) {
1326 cache_hit:
1327                         debug_atomic_inc(&chain_lookup_hits);
1328                         if (very_verbose(class))
1329                                 printk("\nhash chain already cached, key: "
1330                                         "%016Lx tail class: [%p] %s\n",
1331                                         (unsigned long long)chain_key,
1332                                         class->key, class->name);
1333                         return 0;
1334                 }
1335         }
1336         if (very_verbose(class))
1337                 printk("\nnew hash chain, key: %016Lx tail class: [%p] %s\n",
1338                         (unsigned long long)chain_key, class->key, class->name);
1339         /*
1340          * Allocate a new chain entry from the static array, and add
1341          * it to the hash:
1342          */
1343         if (!graph_lock())
1344                 return 0;
1345         /*
1346          * We have to walk the chain again locked - to avoid duplicates:
1347          */
1348         list_for_each_entry(chain, hash_head, entry) {
1349                 if (chain->chain_key == chain_key) {
1350                         graph_unlock();
1351                         goto cache_hit;
1352                 }
1353         }
1354         if (unlikely(nr_lock_chains >= MAX_LOCKDEP_CHAINS)) {
1355                 if (!debug_locks_off_graph_unlock())
1356                         return 0;
1357
1358                 printk("BUG: MAX_LOCKDEP_CHAINS too low!\n");
1359                 printk("turning off the locking correctness validator.\n");
1360                 return 0;
1361         }
1362         chain = lock_chains + nr_lock_chains++;
1363         chain->chain_key = chain_key;
1364         list_add_tail_rcu(&chain->entry, hash_head);
1365         debug_atomic_inc(&chain_lookup_misses);
1366 #ifdef CONFIG_TRACE_IRQFLAGS
1367         if (current->hardirq_context)
1368                 nr_hardirq_chains++;
1369         else {
1370                 if (current->softirq_context)
1371                         nr_softirq_chains++;
1372                 else
1373                         nr_process_chains++;
1374         }
1375 #else
1376         nr_process_chains++;
1377 #endif
1378
1379         return 1;
1380 }
1381
1382 /*
1383  * We are building curr_chain_key incrementally, so double-check
1384  * it from scratch, to make sure that it's done correctly:
1385  */
1386 static void check_chain_key(struct task_struct *curr)
1387 {
1388 #ifdef CONFIG_DEBUG_LOCKDEP
1389         struct held_lock *hlock, *prev_hlock = NULL;
1390         unsigned int i, id;
1391         u64 chain_key = 0;
1392
1393         for (i = 0; i < curr->lockdep_depth; i++) {
1394                 hlock = curr->held_locks + i;
1395                 if (chain_key != hlock->prev_chain_key) {
1396                         debug_locks_off();
1397                         printk("hm#1, depth: %u [%u], %016Lx != %016Lx\n",
1398                                 curr->lockdep_depth, i,
1399                                 (unsigned long long)chain_key,
1400                                 (unsigned long long)hlock->prev_chain_key);
1401                         WARN_ON(1);
1402                         return;
1403                 }
1404                 id = hlock->class - lock_classes;
1405                 if (DEBUG_LOCKS_WARN_ON(id >= MAX_LOCKDEP_KEYS))
1406                         return;
1407
1408                 if (prev_hlock && (prev_hlock->irq_context !=
1409                                                         hlock->irq_context))
1410                         chain_key = 0;
1411                 chain_key = iterate_chain_key(chain_key, id);
1412                 prev_hlock = hlock;
1413         }
1414         if (chain_key != curr->curr_chain_key) {
1415                 debug_locks_off();
1416                 printk("hm#2, depth: %u [%u], %016Lx != %016Lx\n",
1417                         curr->lockdep_depth, i,
1418                         (unsigned long long)chain_key,
1419                         (unsigned long long)curr->curr_chain_key);
1420                 WARN_ON(1);
1421         }
1422 #endif
1423 }
1424
1425 #ifdef CONFIG_TRACE_IRQFLAGS
1426
1427 /*
1428  * print irq inversion bug:
1429  */
1430 static int
1431 print_irq_inversion_bug(struct task_struct *curr, struct lock_class *other,
1432                         struct held_lock *this, int forwards,
1433                         const char *irqclass)
1434 {
1435         if (!debug_locks_off_graph_unlock() || debug_locks_silent)
1436                 return 0;
1437
1438         printk("\n=========================================================\n");
1439         printk(  "[ INFO: possible irq lock inversion dependency detected ]\n");
1440         print_kernel_version();
1441         printk(  "---------------------------------------------------------\n");
1442         printk("%s/%d just changed the state of lock:\n",
1443                 curr->comm, curr->pid);
1444         print_lock(this);
1445         if (forwards)
1446                 printk("but this lock took another, %s-irq-unsafe lock in the past:\n", irqclass);
1447         else
1448                 printk("but this lock was taken by another, %s-irq-safe lock in the past:\n", irqclass);
1449         print_lock_name(other);
1450         printk("\n\nand interrupts could create inverse lock ordering between them.\n\n");
1451
1452         printk("\nother info that might help us debug this:\n");
1453         lockdep_print_held_locks(curr);
1454
1455         printk("\nthe first lock's dependencies:\n");
1456         print_lock_dependencies(this->class, 0);
1457
1458         printk("\nthe second lock's dependencies:\n");
1459         print_lock_dependencies(other, 0);
1460
1461         printk("\nstack backtrace:\n");
1462         dump_stack();
1463
1464         return 0;
1465 }
1466
1467 /*
1468  * Prove that in the forwards-direction subgraph starting at <this>
1469  * there is no lock matching <mask>:
1470  */
1471 static int
1472 check_usage_forwards(struct task_struct *curr, struct held_lock *this,
1473                      enum lock_usage_bit bit, const char *irqclass)
1474 {
1475         int ret;
1476
1477         find_usage_bit = bit;
1478         /* fills in <forwards_match> */
1479         ret = find_usage_forwards(this->class, 0);
1480         if (!ret || ret == 1)
1481                 return ret;
1482
1483         return print_irq_inversion_bug(curr, forwards_match, this, 1, irqclass);
1484 }
1485
1486 /*
1487  * Prove that in the backwards-direction subgraph starting at <this>
1488  * there is no lock matching <mask>:
1489  */
1490 static int
1491 check_usage_backwards(struct task_struct *curr, struct held_lock *this,
1492                       enum lock_usage_bit bit, const char *irqclass)
1493 {
1494         int ret;
1495
1496         find_usage_bit = bit;
1497         /* fills in <backwards_match> */
1498         ret = find_usage_backwards(this->class, 0);
1499         if (!ret || ret == 1)
1500                 return ret;
1501
1502         return print_irq_inversion_bug(curr, backwards_match, this, 0, irqclass);
1503 }
1504
1505 void print_irqtrace_events(struct task_struct *curr)
1506 {
1507         printk("irq event stamp: %u\n", curr->irq_events);
1508         printk("hardirqs last  enabled at (%u): ", curr->hardirq_enable_event);
1509         print_ip_sym(curr->hardirq_enable_ip);
1510         printk("hardirqs last disabled at (%u): ", curr->hardirq_disable_event);
1511         print_ip_sym(curr->hardirq_disable_ip);
1512         printk("softirqs last  enabled at (%u): ", curr->softirq_enable_event);
1513         print_ip_sym(curr->softirq_enable_ip);
1514         printk("softirqs last disabled at (%u): ", curr->softirq_disable_event);
1515         print_ip_sym(curr->softirq_disable_ip);
1516 }
1517
1518 #endif
1519
1520 static int
1521 print_usage_bug(struct task_struct *curr, struct held_lock *this,
1522                 enum lock_usage_bit prev_bit, enum lock_usage_bit new_bit)
1523 {
1524         if (!debug_locks_off_graph_unlock() || debug_locks_silent)
1525                 return 0;
1526
1527         printk("\n=================================\n");
1528         printk(  "[ INFO: inconsistent lock state ]\n");
1529         print_kernel_version();
1530         printk(  "---------------------------------\n");
1531
1532         printk("inconsistent {%s} -> {%s} usage.\n",
1533                 usage_str[prev_bit], usage_str[new_bit]);
1534
1535         printk("%s/%d [HC%u[%lu]:SC%u[%lu]:HE%u:SE%u] takes:\n",
1536                 curr->comm, curr->pid,
1537                 trace_hardirq_context(curr), hardirq_count() >> HARDIRQ_SHIFT,
1538                 trace_softirq_context(curr), softirq_count() >> SOFTIRQ_SHIFT,
1539                 trace_hardirqs_enabled(curr),
1540                 trace_softirqs_enabled(curr));
1541         print_lock(this);
1542
1543         printk("{%s} state was registered at:\n", usage_str[prev_bit]);
1544         print_stack_trace(this->class->usage_traces + prev_bit, 1);
1545
1546         print_irqtrace_events(curr);
1547         printk("\nother info that might help us debug this:\n");
1548         lockdep_print_held_locks(curr);
1549
1550         printk("\nstack backtrace:\n");
1551         dump_stack();
1552
1553         return 0;
1554 }
1555
1556 /*
1557  * Print out an error if an invalid bit is set:
1558  */
1559 static inline int
1560 valid_state(struct task_struct *curr, struct held_lock *this,
1561             enum lock_usage_bit new_bit, enum lock_usage_bit bad_bit)
1562 {
1563         if (unlikely(this->class->usage_mask & (1 << bad_bit)))
1564                 return print_usage_bug(curr, this, bad_bit, new_bit);
1565         return 1;
1566 }
1567
1568 #define STRICT_READ_CHECKS      1
1569
1570 /*
1571  * Mark a lock with a usage bit, and validate the state transition:
1572  */
1573 static int mark_lock(struct task_struct *curr, struct held_lock *this,
1574                      enum lock_usage_bit new_bit, unsigned long ip)
1575 {
1576         unsigned int new_mask = 1 << new_bit, ret = 1;
1577
1578         /*
1579          * If already set then do not dirty the cacheline,
1580          * nor do any checks:
1581          */
1582         if (likely(this->class->usage_mask & new_mask))
1583                 return 1;
1584
1585         if (!graph_lock())
1586                 return 0;
1587         /*
1588          * Make sure we didnt race:
1589          */
1590         if (unlikely(this->class->usage_mask & new_mask)) {
1591                 graph_unlock();
1592                 return 1;
1593         }
1594
1595         this->class->usage_mask |= new_mask;
1596
1597 #ifdef CONFIG_TRACE_IRQFLAGS
1598         if (new_bit == LOCK_ENABLED_HARDIRQS ||
1599                         new_bit == LOCK_ENABLED_HARDIRQS_READ)
1600                 ip = curr->hardirq_enable_ip;
1601         else if (new_bit == LOCK_ENABLED_SOFTIRQS ||
1602                         new_bit == LOCK_ENABLED_SOFTIRQS_READ)
1603                 ip = curr->softirq_enable_ip;
1604 #endif
1605         if (!save_trace(this->class->usage_traces + new_bit))
1606                 return 0;
1607
1608         switch (new_bit) {
1609 #ifdef CONFIG_TRACE_IRQFLAGS
1610         case LOCK_USED_IN_HARDIRQ:
1611                 if (!valid_state(curr, this, new_bit, LOCK_ENABLED_HARDIRQS))
1612                         return 0;
1613                 if (!valid_state(curr, this, new_bit,
1614                                  LOCK_ENABLED_HARDIRQS_READ))
1615                         return 0;
1616                 /*
1617                  * just marked it hardirq-safe, check that this lock
1618                  * took no hardirq-unsafe lock in the past:
1619                  */
1620                 if (!check_usage_forwards(curr, this,
1621                                           LOCK_ENABLED_HARDIRQS, "hard"))
1622                         return 0;
1623 #if STRICT_READ_CHECKS
1624                 /*
1625                  * just marked it hardirq-safe, check that this lock
1626                  * took no hardirq-unsafe-read lock in the past:
1627                  */
1628                 if (!check_usage_forwards(curr, this,
1629                                 LOCK_ENABLED_HARDIRQS_READ, "hard-read"))
1630                         return 0;
1631 #endif
1632                 if (hardirq_verbose(this->class))
1633                         ret = 2;
1634                 break;
1635         case LOCK_USED_IN_SOFTIRQ:
1636                 if (!valid_state(curr, this, new_bit, LOCK_ENABLED_SOFTIRQS))
1637                         return 0;
1638                 if (!valid_state(curr, this, new_bit,
1639                                  LOCK_ENABLED_SOFTIRQS_READ))
1640                         return 0;
1641                 /*
1642                  * just marked it softirq-safe, check that this lock
1643                  * took no softirq-unsafe lock in the past:
1644                  */
1645                 if (!check_usage_forwards(curr, this,
1646                                           LOCK_ENABLED_SOFTIRQS, "soft"))
1647                         return 0;
1648 #if STRICT_READ_CHECKS
1649                 /*
1650                  * just marked it softirq-safe, check that this lock
1651                  * took no softirq-unsafe-read lock in the past:
1652                  */
1653                 if (!check_usage_forwards(curr, this,
1654                                 LOCK_ENABLED_SOFTIRQS_READ, "soft-read"))
1655                         return 0;
1656 #endif
1657                 if (softirq_verbose(this->class))
1658                         ret = 2;
1659                 break;
1660         case LOCK_USED_IN_HARDIRQ_READ:
1661                 if (!valid_state(curr, this, new_bit, LOCK_ENABLED_HARDIRQS))
1662                         return 0;
1663                 /*
1664                  * just marked it hardirq-read-safe, check that this lock
1665                  * took no hardirq-unsafe lock in the past:
1666                  */
1667                 if (!check_usage_forwards(curr, this,
1668                                           LOCK_ENABLED_HARDIRQS, "hard"))
1669                         return 0;
1670                 if (hardirq_verbose(this->class))
1671                         ret = 2;
1672                 break;
1673         case LOCK_USED_IN_SOFTIRQ_READ:
1674                 if (!valid_state(curr, this, new_bit, LOCK_ENABLED_SOFTIRQS))
1675                         return 0;
1676                 /*
1677                  * just marked it softirq-read-safe, check that this lock
1678                  * took no softirq-unsafe lock in the past:
1679                  */
1680                 if (!check_usage_forwards(curr, this,
1681                                           LOCK_ENABLED_SOFTIRQS, "soft"))
1682                         return 0;
1683                 if (softirq_verbose(this->class))
1684                         ret = 2;
1685                 break;
1686         case LOCK_ENABLED_HARDIRQS:
1687                 if (!valid_state(curr, this, new_bit, LOCK_USED_IN_HARDIRQ))
1688                         return 0;
1689                 if (!valid_state(curr, this, new_bit,
1690                                  LOCK_USED_IN_HARDIRQ_READ))
1691                         return 0;
1692                 /*
1693                  * just marked it hardirq-unsafe, check that no hardirq-safe
1694                  * lock in the system ever took it in the past:
1695                  */
1696                 if (!check_usage_backwards(curr, this,
1697                                            LOCK_USED_IN_HARDIRQ, "hard"))
1698                         return 0;
1699 #if STRICT_READ_CHECKS
1700                 /*
1701                  * just marked it hardirq-unsafe, check that no
1702                  * hardirq-safe-read lock in the system ever took
1703                  * it in the past:
1704                  */
1705                 if (!check_usage_backwards(curr, this,
1706                                    LOCK_USED_IN_HARDIRQ_READ, "hard-read"))
1707                         return 0;
1708 #endif
1709                 if (hardirq_verbose(this->class))
1710                         ret = 2;
1711                 break;
1712         case LOCK_ENABLED_SOFTIRQS:
1713                 if (!valid_state(curr, this, new_bit, LOCK_USED_IN_SOFTIRQ))
1714                         return 0;
1715                 if (!valid_state(curr, this, new_bit,
1716                                  LOCK_USED_IN_SOFTIRQ_READ))
1717                         return 0;
1718                 /*
1719                  * just marked it softirq-unsafe, check that no softirq-safe
1720                  * lock in the system ever took it in the past:
1721                  */
1722                 if (!check_usage_backwards(curr, this,
1723                                            LOCK_USED_IN_SOFTIRQ, "soft"))
1724                         return 0;
1725 #if STRICT_READ_CHECKS
1726                 /*
1727                  * just marked it softirq-unsafe, check that no
1728                  * softirq-safe-read lock in the system ever took
1729                  * it in the past:
1730                  */
1731                 if (!check_usage_backwards(curr, this,
1732                                    LOCK_USED_IN_SOFTIRQ_READ, "soft-read"))
1733                         return 0;
1734 #endif
1735                 if (softirq_verbose(this->class))
1736                         ret = 2;
1737                 break;
1738         case LOCK_ENABLED_HARDIRQS_READ:
1739                 if (!valid_state(curr, this, new_bit, LOCK_USED_IN_HARDIRQ))
1740                         return 0;
1741 #if STRICT_READ_CHECKS
1742                 /*
1743                  * just marked it hardirq-read-unsafe, check that no
1744                  * hardirq-safe lock in the system ever took it in the past:
1745                  */
1746                 if (!check_usage_backwards(curr, this,
1747                                            LOCK_USED_IN_HARDIRQ, "hard"))
1748                         return 0;
1749 #endif
1750                 if (hardirq_verbose(this->class))
1751                         ret = 2;
1752                 break;
1753         case LOCK_ENABLED_SOFTIRQS_READ:
1754                 if (!valid_state(curr, this, new_bit, LOCK_USED_IN_SOFTIRQ))
1755                         return 0;
1756 #if STRICT_READ_CHECKS
1757                 /*
1758                  * just marked it softirq-read-unsafe, check that no
1759                  * softirq-safe lock in the system ever took it in the past:
1760                  */
1761                 if (!check_usage_backwards(curr, this,
1762                                            LOCK_USED_IN_SOFTIRQ, "soft"))
1763                         return 0;
1764 #endif
1765                 if (softirq_verbose(this->class))
1766                         ret = 2;
1767                 break;
1768 #endif
1769         case LOCK_USED:
1770                 /*
1771                  * Add it to the global list of classes:
1772                  */
1773                 list_add_tail_rcu(&this->class->lock_entry, &all_lock_classes);
1774                 debug_atomic_dec(&nr_unused_locks);
1775                 break;
1776         default:
1777                 if (!debug_locks_off_graph_unlock())
1778                         return 0;
1779                 WARN_ON(1);
1780                 return 0;
1781         }
1782
1783         graph_unlock();
1784
1785         /*
1786          * We must printk outside of the graph_lock:
1787          */
1788         if (ret == 2) {
1789                 printk("\nmarked lock as {%s}:\n", usage_str[new_bit]);
1790                 print_lock(this);
1791                 print_irqtrace_events(curr);
1792                 dump_stack();
1793         }
1794
1795         return ret;
1796 }
1797
1798 #ifdef CONFIG_TRACE_IRQFLAGS
1799 /*
1800  * Mark all held locks with a usage bit:
1801  */
1802 static int
1803 mark_held_locks(struct task_struct *curr, int hardirq, unsigned long ip)
1804 {
1805         enum lock_usage_bit usage_bit;
1806         struct held_lock *hlock;
1807         int i;
1808
1809         for (i = 0; i < curr->lockdep_depth; i++) {
1810                 hlock = curr->held_locks + i;
1811
1812                 if (hardirq) {
1813                         if (hlock->read)
1814                                 usage_bit = LOCK_ENABLED_HARDIRQS_READ;
1815                         else
1816                                 usage_bit = LOCK_ENABLED_HARDIRQS;
1817                 } else {
1818                         if (hlock->read)
1819                                 usage_bit = LOCK_ENABLED_SOFTIRQS_READ;
1820                         else
1821                                 usage_bit = LOCK_ENABLED_SOFTIRQS;
1822                 }
1823                 if (!mark_lock(curr, hlock, usage_bit, ip))
1824                         return 0;
1825         }
1826
1827         return 1;
1828 }
1829
1830 /*
1831  * Debugging helper: via this flag we know that we are in
1832  * 'early bootup code', and will warn about any invalid irqs-on event:
1833  */
1834 static int early_boot_irqs_enabled;
1835
1836 void early_boot_irqs_off(void)
1837 {
1838         early_boot_irqs_enabled = 0;
1839 }
1840
1841 void early_boot_irqs_on(void)
1842 {
1843         early_boot_irqs_enabled = 1;
1844 }
1845
1846 /*
1847  * Hardirqs will be enabled:
1848  */
1849 void trace_hardirqs_on(void)
1850 {
1851         struct task_struct *curr = current;
1852         unsigned long ip;
1853
1854         if (unlikely(!debug_locks || current->lockdep_recursion))
1855                 return;
1856
1857         if (DEBUG_LOCKS_WARN_ON(unlikely(!early_boot_irqs_enabled)))
1858                 return;
1859
1860         if (unlikely(curr->hardirqs_enabled)) {
1861                 debug_atomic_inc(&redundant_hardirqs_on);
1862                 return;
1863         }
1864         /* we'll do an OFF -> ON transition: */
1865         curr->hardirqs_enabled = 1;
1866         ip = (unsigned long) __builtin_return_address(0);
1867
1868         if (DEBUG_LOCKS_WARN_ON(!irqs_disabled()))
1869                 return;
1870         if (DEBUG_LOCKS_WARN_ON(current->hardirq_context))
1871                 return;
1872         /*
1873          * We are going to turn hardirqs on, so set the
1874          * usage bit for all held locks:
1875          */
1876         if (!mark_held_locks(curr, 1, ip))
1877                 return;
1878         /*
1879          * If we have softirqs enabled, then set the usage
1880          * bit for all held locks. (disabled hardirqs prevented
1881          * this bit from being set before)
1882          */
1883         if (curr->softirqs_enabled)
1884                 if (!mark_held_locks(curr, 0, ip))
1885                         return;
1886
1887         curr->hardirq_enable_ip = ip;
1888         curr->hardirq_enable_event = ++curr->irq_events;
1889         debug_atomic_inc(&hardirqs_on_events);
1890 }
1891
1892 EXPORT_SYMBOL(trace_hardirqs_on);
1893
1894 /*
1895  * Hardirqs were disabled:
1896  */
1897 void trace_hardirqs_off(void)
1898 {
1899         struct task_struct *curr = current;
1900
1901         if (unlikely(!debug_locks || current->lockdep_recursion))
1902                 return;
1903
1904         if (DEBUG_LOCKS_WARN_ON(!irqs_disabled()))
1905                 return;
1906
1907         if (curr->hardirqs_enabled) {
1908                 /*
1909                  * We have done an ON -> OFF transition:
1910                  */
1911                 curr->hardirqs_enabled = 0;
1912                 curr->hardirq_disable_ip = _RET_IP_;
1913                 curr->hardirq_disable_event = ++curr->irq_events;
1914                 debug_atomic_inc(&hardirqs_off_events);
1915         } else
1916                 debug_atomic_inc(&redundant_hardirqs_off);
1917 }
1918
1919 EXPORT_SYMBOL(trace_hardirqs_off);
1920
1921 /*
1922  * Softirqs will be enabled:
1923  */
1924 void trace_softirqs_on(unsigned long ip)
1925 {
1926         struct task_struct *curr = current;
1927
1928         if (unlikely(!debug_locks))
1929                 return;
1930
1931         if (DEBUG_LOCKS_WARN_ON(!irqs_disabled()))
1932                 return;
1933
1934         if (curr->softirqs_enabled) {
1935                 debug_atomic_inc(&redundant_softirqs_on);
1936                 return;
1937         }
1938
1939         /*
1940          * We'll do an OFF -> ON transition:
1941          */
1942         curr->softirqs_enabled = 1;
1943         curr->softirq_enable_ip = ip;
1944         curr->softirq_enable_event = ++curr->irq_events;
1945         debug_atomic_inc(&softirqs_on_events);
1946         /*
1947          * We are going to turn softirqs on, so set the
1948          * usage bit for all held locks, if hardirqs are
1949          * enabled too:
1950          */
1951         if (curr->hardirqs_enabled)
1952                 mark_held_locks(curr, 0, ip);
1953 }
1954
1955 /*
1956  * Softirqs were disabled:
1957  */
1958 void trace_softirqs_off(unsigned long ip)
1959 {
1960         struct task_struct *curr = current;
1961
1962         if (unlikely(!debug_locks))
1963                 return;
1964
1965         if (DEBUG_LOCKS_WARN_ON(!irqs_disabled()))
1966                 return;
1967
1968         if (curr->softirqs_enabled) {
1969                 /*
1970                  * We have done an ON -> OFF transition:
1971                  */
1972                 curr->softirqs_enabled = 0;
1973                 curr->softirq_disable_ip = ip;
1974                 curr->softirq_disable_event = ++curr->irq_events;
1975                 debug_atomic_inc(&softirqs_off_events);
1976                 DEBUG_LOCKS_WARN_ON(!softirq_count());
1977         } else
1978                 debug_atomic_inc(&redundant_softirqs_off);
1979 }
1980
1981 #endif
1982
1983 /*
1984  * Initialize a lock instance's lock-class mapping info:
1985  */
1986 void lockdep_init_map(struct lockdep_map *lock, const char *name,
1987                       struct lock_class_key *key, int subclass)
1988 {
1989         if (unlikely(!debug_locks))
1990                 return;
1991
1992         if (DEBUG_LOCKS_WARN_ON(!key))
1993                 return;
1994         if (DEBUG_LOCKS_WARN_ON(!name))
1995                 return;
1996         /*
1997          * Sanity check, the lock-class key must be persistent:
1998          */
1999         if (!static_obj(key)) {
2000                 printk("BUG: key %p not in .data!\n", key);
2001                 DEBUG_LOCKS_WARN_ON(1);
2002                 return;
2003         }
2004         lock->name = name;
2005         lock->key = key;
2006         lock->class_cache = NULL;
2007         if (subclass)
2008                 register_lock_class(lock, subclass, 1);
2009 }
2010
2011 EXPORT_SYMBOL_GPL(lockdep_init_map);
2012
2013 /*
2014  * This gets called for every mutex_lock*()/spin_lock*() operation.
2015  * We maintain the dependency maps and validate the locking attempt:
2016  */
2017 static int __lock_acquire(struct lockdep_map *lock, unsigned int subclass,
2018                           int trylock, int read, int check, int hardirqs_off,
2019                           unsigned long ip)
2020 {
2021         struct task_struct *curr = current;
2022         struct lock_class *class = NULL;
2023         struct held_lock *hlock;
2024         unsigned int depth, id;
2025         int chain_head = 0;
2026         u64 chain_key;
2027
2028         if (unlikely(!debug_locks))
2029                 return 0;
2030
2031         if (DEBUG_LOCKS_WARN_ON(!irqs_disabled()))
2032                 return 0;
2033
2034         if (unlikely(subclass >= MAX_LOCKDEP_SUBCLASSES)) {
2035                 debug_locks_off();
2036                 printk("BUG: MAX_LOCKDEP_SUBCLASSES too low!\n");
2037                 printk("turning off the locking correctness validator.\n");
2038                 return 0;
2039         }
2040
2041         if (!subclass)
2042                 class = lock->class_cache;
2043         /*
2044          * Not cached yet or subclass?
2045          */
2046         if (unlikely(!class)) {
2047                 class = register_lock_class(lock, subclass, 0);
2048                 if (!class)
2049                         return 0;
2050         }
2051         debug_atomic_inc((atomic_t *)&class->ops);
2052         if (very_verbose(class)) {
2053                 printk("\nacquire class [%p] %s", class->key, class->name);
2054                 if (class->name_version > 1)
2055                         printk("#%d", class->name_version);
2056                 printk("\n");
2057                 dump_stack();
2058         }
2059
2060         /*
2061          * Add the lock to the list of currently held locks.
2062          * (we dont increase the depth just yet, up until the
2063          * dependency checks are done)
2064          */
2065         depth = curr->lockdep_depth;
2066         if (DEBUG_LOCKS_WARN_ON(depth >= MAX_LOCK_DEPTH))
2067                 return 0;
2068
2069         hlock = curr->held_locks + depth;
2070
2071         hlock->class = class;
2072         hlock->acquire_ip = ip;
2073         hlock->instance = lock;
2074         hlock->trylock = trylock;
2075         hlock->read = read;
2076         hlock->check = check;
2077         hlock->hardirqs_off = hardirqs_off;
2078
2079         if (check != 2)
2080                 goto out_calc_hash;
2081 #ifdef CONFIG_TRACE_IRQFLAGS
2082         /*
2083          * If non-trylock use in a hardirq or softirq context, then
2084          * mark the lock as used in these contexts:
2085          */
2086         if (!trylock) {
2087                 if (read) {
2088                         if (curr->hardirq_context)
2089                                 if (!mark_lock(curr, hlock,
2090                                                 LOCK_USED_IN_HARDIRQ_READ, ip))
2091                                         return 0;
2092                         if (curr->softirq_context)
2093                                 if (!mark_lock(curr, hlock,
2094                                                 LOCK_USED_IN_SOFTIRQ_READ, ip))
2095                                         return 0;
2096                 } else {
2097                         if (curr->hardirq_context)
2098                                 if (!mark_lock(curr, hlock, LOCK_USED_IN_HARDIRQ, ip))
2099                                         return 0;
2100                         if (curr->softirq_context)
2101                                 if (!mark_lock(curr, hlock, LOCK_USED_IN_SOFTIRQ, ip))
2102                                         return 0;
2103                 }
2104         }
2105         if (!hardirqs_off) {
2106                 if (read) {
2107                         if (!mark_lock(curr, hlock,
2108                                         LOCK_ENABLED_HARDIRQS_READ, ip))
2109                                 return 0;
2110                         if (curr->softirqs_enabled)
2111                                 if (!mark_lock(curr, hlock,
2112                                                 LOCK_ENABLED_SOFTIRQS_READ, ip))
2113                                         return 0;
2114                 } else {
2115                         if (!mark_lock(curr, hlock,
2116                                         LOCK_ENABLED_HARDIRQS, ip))
2117                                 return 0;
2118                         if (curr->softirqs_enabled)
2119                                 if (!mark_lock(curr, hlock,
2120                                                 LOCK_ENABLED_SOFTIRQS, ip))
2121                                         return 0;
2122                 }
2123         }
2124 #endif
2125         /* mark it as used: */
2126         if (!mark_lock(curr, hlock, LOCK_USED, ip))
2127                 return 0;
2128 out_calc_hash:
2129         /*
2130          * Calculate the chain hash: it's the combined has of all the
2131          * lock keys along the dependency chain. We save the hash value
2132          * at every step so that we can get the current hash easily
2133          * after unlock. The chain hash is then used to cache dependency
2134          * results.
2135          *
2136          * The 'key ID' is what is the most compact key value to drive
2137          * the hash, not class->key.
2138          */
2139         id = class - lock_classes;
2140         if (DEBUG_LOCKS_WARN_ON(id >= MAX_LOCKDEP_KEYS))
2141                 return 0;
2142
2143         chain_key = curr->curr_chain_key;
2144         if (!depth) {
2145                 if (DEBUG_LOCKS_WARN_ON(chain_key != 0))
2146                         return 0;
2147                 chain_head = 1;
2148         }
2149
2150         hlock->prev_chain_key = chain_key;
2151
2152 #ifdef CONFIG_TRACE_IRQFLAGS
2153         /*
2154          * Keep track of points where we cross into an interrupt context:
2155          */
2156         hlock->irq_context = 2*(curr->hardirq_context ? 1 : 0) +
2157                                 curr->softirq_context;
2158         if (depth) {
2159                 struct held_lock *prev_hlock;
2160
2161                 prev_hlock = curr->held_locks + depth-1;
2162                 /*
2163                  * If we cross into another context, reset the
2164                  * hash key (this also prevents the checking and the
2165                  * adding of the dependency to 'prev'):
2166                  */
2167                 if (prev_hlock->irq_context != hlock->irq_context) {
2168                         chain_key = 0;
2169                         chain_head = 1;
2170                 }
2171         }
2172 #endif
2173         chain_key = iterate_chain_key(chain_key, id);
2174         curr->curr_chain_key = chain_key;
2175
2176         /*
2177          * Trylock needs to maintain the stack of held locks, but it
2178          * does not add new dependencies, because trylock can be done
2179          * in any order.
2180          *
2181          * We look up the chain_key and do the O(N^2) check and update of
2182          * the dependencies only if this is a new dependency chain.
2183          * (If lookup_chain_cache() returns with 1 it acquires
2184          * graph_lock for us)
2185          */
2186         if (!trylock && (check == 2) && lookup_chain_cache(chain_key, class)) {
2187                 /*
2188                  * Check whether last held lock:
2189                  *
2190                  * - is irq-safe, if this lock is irq-unsafe
2191                  * - is softirq-safe, if this lock is hardirq-unsafe
2192                  *
2193                  * And check whether the new lock's dependency graph
2194                  * could lead back to the previous lock.
2195                  *
2196                  * any of these scenarios could lead to a deadlock. If
2197                  * All validations
2198                  */
2199                 int ret = check_deadlock(curr, hlock, lock, read);
2200
2201                 if (!ret)
2202                         return 0;
2203                 /*
2204                  * Mark recursive read, as we jump over it when
2205                  * building dependencies (just like we jump over
2206                  * trylock entries):
2207                  */
2208                 if (ret == 2)
2209                         hlock->read = 2;
2210                 /*
2211                  * Add dependency only if this lock is not the head
2212                  * of the chain, and if it's not a secondary read-lock:
2213                  */
2214                 if (!chain_head && ret != 2)
2215                         if (!check_prevs_add(curr, hlock))
2216                                 return 0;
2217                 graph_unlock();
2218         } else
2219                 /* after lookup_chain_cache(): */
2220                 if (unlikely(!debug_locks))
2221                         return 0;
2222
2223         curr->lockdep_depth++;
2224         check_chain_key(curr);
2225         if (unlikely(curr->lockdep_depth >= MAX_LOCK_DEPTH)) {
2226                 debug_locks_off();
2227                 printk("BUG: MAX_LOCK_DEPTH too low!\n");
2228                 printk("turning off the locking correctness validator.\n");
2229                 return 0;
2230         }
2231
2232         if (unlikely(curr->lockdep_depth > max_lockdep_depth))
2233                 max_lockdep_depth = curr->lockdep_depth;
2234
2235         return 1;
2236 }
2237
2238 static int
2239 print_unlock_inbalance_bug(struct task_struct *curr, struct lockdep_map *lock,
2240                            unsigned long ip)
2241 {
2242         if (!debug_locks_off())
2243                 return 0;
2244         if (debug_locks_silent)
2245                 return 0;
2246
2247         printk("\n=====================================\n");
2248         printk(  "[ BUG: bad unlock balance detected! ]\n");
2249         printk(  "-------------------------------------\n");
2250         printk("%s/%d is trying to release lock (",
2251                 curr->comm, curr->pid);
2252         print_lockdep_cache(lock);
2253         printk(") at:\n");
2254         print_ip_sym(ip);
2255         printk("but there are no more locks to release!\n");
2256         printk("\nother info that might help us debug this:\n");
2257         lockdep_print_held_locks(curr);
2258
2259         printk("\nstack backtrace:\n");
2260         dump_stack();
2261
2262         return 0;
2263 }
2264
2265 /*
2266  * Common debugging checks for both nested and non-nested unlock:
2267  */
2268 static int check_unlock(struct task_struct *curr, struct lockdep_map *lock,
2269                         unsigned long ip)
2270 {
2271         if (unlikely(!debug_locks))
2272                 return 0;
2273         if (DEBUG_LOCKS_WARN_ON(!irqs_disabled()))
2274                 return 0;
2275
2276         if (curr->lockdep_depth <= 0)
2277                 return print_unlock_inbalance_bug(curr, lock, ip);
2278
2279         return 1;
2280 }
2281
2282 /*
2283  * Remove the lock to the list of currently held locks in a
2284  * potentially non-nested (out of order) manner. This is a
2285  * relatively rare operation, as all the unlock APIs default
2286  * to nested mode (which uses lock_release()):
2287  */
2288 static int
2289 lock_release_non_nested(struct task_struct *curr,
2290                         struct lockdep_map *lock, unsigned long ip)
2291 {
2292         struct held_lock *hlock, *prev_hlock;
2293         unsigned int depth;
2294         int i;
2295
2296         /*
2297          * Check whether the lock exists in the current stack
2298          * of held locks:
2299          */
2300         depth = curr->lockdep_depth;
2301         if (DEBUG_LOCKS_WARN_ON(!depth))
2302                 return 0;
2303
2304         prev_hlock = NULL;
2305         for (i = depth-1; i >= 0; i--) {
2306                 hlock = curr->held_locks + i;
2307                 /*
2308                  * We must not cross into another context:
2309                  */
2310                 if (prev_hlock && prev_hlock->irq_context != hlock->irq_context)
2311                         break;
2312                 if (hlock->instance == lock)
2313                         goto found_it;
2314                 prev_hlock = hlock;
2315         }
2316         return print_unlock_inbalance_bug(curr, lock, ip);
2317
2318 found_it:
2319         /*
2320          * We have the right lock to unlock, 'hlock' points to it.
2321          * Now we remove it from the stack, and add back the other
2322          * entries (if any), recalculating the hash along the way:
2323          */
2324         curr->lockdep_depth = i;
2325         curr->curr_chain_key = hlock->prev_chain_key;
2326
2327         for (i++; i < depth; i++) {
2328                 hlock = curr->held_locks + i;
2329                 if (!__lock_acquire(hlock->instance,
2330                         hlock->class->subclass, hlock->trylock,
2331                                 hlock->read, hlock->check, hlock->hardirqs_off,
2332                                 hlock->acquire_ip))
2333                         return 0;
2334         }
2335
2336         if (DEBUG_LOCKS_WARN_ON(curr->lockdep_depth != depth - 1))
2337                 return 0;
2338         return 1;
2339 }
2340
2341 /*
2342  * Remove the lock to the list of currently held locks - this gets
2343  * called on mutex_unlock()/spin_unlock*() (or on a failed
2344  * mutex_lock_interruptible()). This is done for unlocks that nest
2345  * perfectly. (i.e. the current top of the lock-stack is unlocked)
2346  */
2347 static int lock_release_nested(struct task_struct *curr,
2348                                struct lockdep_map *lock, unsigned long ip)
2349 {
2350         struct held_lock *hlock;
2351         unsigned int depth;
2352
2353         /*
2354          * Pop off the top of the lock stack:
2355          */
2356         depth = curr->lockdep_depth - 1;
2357         hlock = curr->held_locks + depth;
2358
2359         /*
2360          * Is the unlock non-nested:
2361          */
2362         if (hlock->instance != lock)
2363                 return lock_release_non_nested(curr, lock, ip);
2364         curr->lockdep_depth--;
2365
2366         if (DEBUG_LOCKS_WARN_ON(!depth && (hlock->prev_chain_key != 0)))
2367                 return 0;
2368
2369         curr->curr_chain_key = hlock->prev_chain_key;
2370
2371 #ifdef CONFIG_DEBUG_LOCKDEP
2372         hlock->prev_chain_key = 0;
2373         hlock->class = NULL;
2374         hlock->acquire_ip = 0;
2375         hlock->irq_context = 0;
2376 #endif
2377         return 1;
2378 }
2379
2380 /*
2381  * Remove the lock to the list of currently held locks - this gets
2382  * called on mutex_unlock()/spin_unlock*() (or on a failed
2383  * mutex_lock_interruptible()). This is done for unlocks that nest
2384  * perfectly. (i.e. the current top of the lock-stack is unlocked)
2385  */
2386 static void
2387 __lock_release(struct lockdep_map *lock, int nested, unsigned long ip)
2388 {
2389         struct task_struct *curr = current;
2390
2391         if (!check_unlock(curr, lock, ip))
2392                 return;
2393
2394         if (nested) {
2395                 if (!lock_release_nested(curr, lock, ip))
2396                         return;
2397         } else {
2398                 if (!lock_release_non_nested(curr, lock, ip))
2399                         return;
2400         }
2401
2402         check_chain_key(curr);
2403 }
2404
2405 /*
2406  * Check whether we follow the irq-flags state precisely:
2407  */
2408 static void check_flags(unsigned long flags)
2409 {
2410 #if defined(CONFIG_DEBUG_LOCKDEP) && defined(CONFIG_TRACE_IRQFLAGS)
2411         if (!debug_locks)
2412                 return;
2413
2414         if (irqs_disabled_flags(flags))
2415                 DEBUG_LOCKS_WARN_ON(current->hardirqs_enabled);
2416         else
2417                 DEBUG_LOCKS_WARN_ON(!current->hardirqs_enabled);
2418
2419         /*
2420          * We dont accurately track softirq state in e.g.
2421          * hardirq contexts (such as on 4KSTACKS), so only
2422          * check if not in hardirq contexts:
2423          */
2424         if (!hardirq_count()) {
2425                 if (softirq_count())
2426                         DEBUG_LOCKS_WARN_ON(current->softirqs_enabled);
2427                 else
2428                         DEBUG_LOCKS_WARN_ON(!current->softirqs_enabled);
2429         }
2430
2431         if (!debug_locks)
2432                 print_irqtrace_events(current);
2433 #endif
2434 }
2435
2436 /*
2437  * We are not always called with irqs disabled - do that here,
2438  * and also avoid lockdep recursion:
2439  */
2440 void lock_acquire(struct lockdep_map *lock, unsigned int subclass,
2441                   int trylock, int read, int check, unsigned long ip)
2442 {
2443         unsigned long flags;
2444
2445         if (unlikely(current->lockdep_recursion))
2446                 return;
2447
2448         raw_local_irq_save(flags);
2449         check_flags(flags);
2450
2451         current->lockdep_recursion = 1;
2452         __lock_acquire(lock, subclass, trylock, read, check,
2453                        irqs_disabled_flags(flags), ip);
2454         current->lockdep_recursion = 0;
2455         raw_local_irq_restore(flags);
2456 }
2457
2458 EXPORT_SYMBOL_GPL(lock_acquire);
2459
2460 void lock_release(struct lockdep_map *lock, int nested, unsigned long ip)
2461 {
2462         unsigned long flags;
2463
2464         if (unlikely(current->lockdep_recursion))
2465                 return;
2466
2467         raw_local_irq_save(flags);
2468         check_flags(flags);
2469         current->lockdep_recursion = 1;
2470         __lock_release(lock, nested, ip);
2471         current->lockdep_recursion = 0;
2472         raw_local_irq_restore(flags);
2473 }
2474
2475 EXPORT_SYMBOL_GPL(lock_release);
2476
2477 /*
2478  * Used by the testsuite, sanitize the validator state
2479  * after a simulated failure:
2480  */
2481
2482 void lockdep_reset(void)
2483 {
2484         unsigned long flags;
2485         int i;
2486
2487         raw_local_irq_save(flags);
2488         current->curr_chain_key = 0;
2489         current->lockdep_depth = 0;
2490         current->lockdep_recursion = 0;
2491         memset(current->held_locks, 0, MAX_LOCK_DEPTH*sizeof(struct held_lock));
2492         nr_hardirq_chains = 0;
2493         nr_softirq_chains = 0;
2494         nr_process_chains = 0;
2495         debug_locks = 1;
2496         for (i = 0; i < CHAINHASH_SIZE; i++)
2497                 INIT_LIST_HEAD(chainhash_table + i);
2498         raw_local_irq_restore(flags);
2499 }
2500
2501 static void zap_class(struct lock_class *class)
2502 {
2503         int i;
2504
2505         /*
2506          * Remove all dependencies this lock is
2507          * involved in:
2508          */
2509         for (i = 0; i < nr_list_entries; i++) {
2510                 if (list_entries[i].class == class)
2511                         list_del_rcu(&list_entries[i].entry);
2512         }
2513         /*
2514          * Unhash the class and remove it from the all_lock_classes list:
2515          */
2516         list_del_rcu(&class->hash_entry);
2517         list_del_rcu(&class->lock_entry);
2518
2519 }
2520
2521 static inline int within(void *addr, void *start, unsigned long size)
2522 {
2523         return addr >= start && addr < start + size;
2524 }
2525
2526 void lockdep_free_key_range(void *start, unsigned long size)
2527 {
2528         struct lock_class *class, *next;
2529         struct list_head *head;
2530         unsigned long flags;
2531         int i;
2532
2533         raw_local_irq_save(flags);
2534         graph_lock();
2535
2536         /*
2537          * Unhash all classes that were created by this module:
2538          */
2539         for (i = 0; i < CLASSHASH_SIZE; i++) {
2540                 head = classhash_table + i;
2541                 if (list_empty(head))
2542                         continue;
2543                 list_for_each_entry_safe(class, next, head, hash_entry)
2544                         if (within(class->key, start, size))
2545                                 zap_class(class);
2546         }
2547
2548         graph_unlock();
2549         raw_local_irq_restore(flags);
2550 }
2551
2552 void lockdep_reset_lock(struct lockdep_map *lock)
2553 {
2554         struct lock_class *class, *next;
2555         struct list_head *head;
2556         unsigned long flags;
2557         int i, j;
2558
2559         raw_local_irq_save(flags);
2560
2561         /*
2562          * Remove all classes this lock might have:
2563          */
2564         for (j = 0; j < MAX_LOCKDEP_SUBCLASSES; j++) {
2565                 /*
2566                  * If the class exists we look it up and zap it:
2567                  */
2568                 class = look_up_lock_class(lock, j);
2569                 if (class)
2570                         zap_class(class);
2571         }
2572         /*
2573          * Debug check: in the end all mapped classes should
2574          * be gone.
2575          */
2576         graph_lock();
2577         for (i = 0; i < CLASSHASH_SIZE; i++) {
2578                 head = classhash_table + i;
2579                 if (list_empty(head))
2580                         continue;
2581                 list_for_each_entry_safe(class, next, head, hash_entry) {
2582                         if (unlikely(class == lock->class_cache)) {
2583                                 if (debug_locks_off_graph_unlock())
2584                                         WARN_ON(1);
2585                                 goto out_restore;
2586                         }
2587                 }
2588         }
2589         graph_unlock();
2590
2591 out_restore:
2592         raw_local_irq_restore(flags);
2593 }
2594
2595 void __init lockdep_init(void)
2596 {
2597         int i;
2598
2599         /*
2600          * Some architectures have their own start_kernel()
2601          * code which calls lockdep_init(), while we also
2602          * call lockdep_init() from the start_kernel() itself,
2603          * and we want to initialize the hashes only once:
2604          */
2605         if (lockdep_initialized)
2606                 return;
2607
2608         for (i = 0; i < CLASSHASH_SIZE; i++)
2609                 INIT_LIST_HEAD(classhash_table + i);
2610
2611         for (i = 0; i < CHAINHASH_SIZE; i++)
2612                 INIT_LIST_HEAD(chainhash_table + i);
2613
2614         lockdep_initialized = 1;
2615 }
2616
2617 void __init lockdep_info(void)
2618 {
2619         printk("Lock dependency validator: Copyright (c) 2006 Red Hat, Inc., Ingo Molnar\n");
2620
2621         printk("... MAX_LOCKDEP_SUBCLASSES:    %lu\n", MAX_LOCKDEP_SUBCLASSES);
2622         printk("... MAX_LOCK_DEPTH:          %lu\n", MAX_LOCK_DEPTH);
2623         printk("... MAX_LOCKDEP_KEYS:        %lu\n", MAX_LOCKDEP_KEYS);
2624         printk("... CLASSHASH_SIZE:           %lu\n", CLASSHASH_SIZE);
2625         printk("... MAX_LOCKDEP_ENTRIES:     %lu\n", MAX_LOCKDEP_ENTRIES);
2626         printk("... MAX_LOCKDEP_CHAINS:      %lu\n", MAX_LOCKDEP_CHAINS);
2627         printk("... CHAINHASH_SIZE:          %lu\n", CHAINHASH_SIZE);
2628
2629         printk(" memory used by lock dependency info: %lu kB\n",
2630                 (sizeof(struct lock_class) * MAX_LOCKDEP_KEYS +
2631                 sizeof(struct list_head) * CLASSHASH_SIZE +
2632                 sizeof(struct lock_list) * MAX_LOCKDEP_ENTRIES +
2633                 sizeof(struct lock_chain) * MAX_LOCKDEP_CHAINS +
2634                 sizeof(struct list_head) * CHAINHASH_SIZE) / 1024);
2635
2636         printk(" per task-struct memory footprint: %lu bytes\n",
2637                 sizeof(struct held_lock) * MAX_LOCK_DEPTH);
2638
2639 #ifdef CONFIG_DEBUG_LOCKDEP
2640         if (lockdep_init_error)
2641                 printk("WARNING: lockdep init error! Arch code didnt call lockdep_init() early enough?\n");
2642 #endif
2643 }
2644
2645 static inline int in_range(const void *start, const void *addr, const void *end)
2646 {
2647         return addr >= start && addr <= end;
2648 }
2649
2650 static void
2651 print_freed_lock_bug(struct task_struct *curr, const void *mem_from,
2652                      const void *mem_to, struct held_lock *hlock)
2653 {
2654         if (!debug_locks_off())
2655                 return;
2656         if (debug_locks_silent)
2657                 return;
2658
2659         printk("\n=========================\n");
2660         printk(  "[ BUG: held lock freed! ]\n");
2661         printk(  "-------------------------\n");
2662         printk("%s/%d is freeing memory %p-%p, with a lock still held there!\n",
2663                 curr->comm, curr->pid, mem_from, mem_to-1);
2664         print_lock(hlock);
2665         lockdep_print_held_locks(curr);
2666
2667         printk("\nstack backtrace:\n");
2668         dump_stack();
2669 }
2670
2671 /*
2672  * Called when kernel memory is freed (or unmapped), or if a lock
2673  * is destroyed or reinitialized - this code checks whether there is
2674  * any held lock in the memory range of <from> to <to>:
2675  */
2676 void debug_check_no_locks_freed(const void *mem_from, unsigned long mem_len)
2677 {
2678         const void *mem_to = mem_from + mem_len, *lock_from, *lock_to;
2679         struct task_struct *curr = current;
2680         struct held_lock *hlock;
2681         unsigned long flags;
2682         int i;
2683
2684         if (unlikely(!debug_locks))
2685                 return;
2686
2687         local_irq_save(flags);
2688         for (i = 0; i < curr->lockdep_depth; i++) {
2689                 hlock = curr->held_locks + i;
2690
2691                 lock_from = (void *)hlock->instance;
2692                 lock_to = (void *)(hlock->instance + 1);
2693
2694                 if (!in_range(mem_from, lock_from, mem_to) &&
2695                                         !in_range(mem_from, lock_to, mem_to))
2696                         continue;
2697
2698                 print_freed_lock_bug(curr, mem_from, mem_to, hlock);
2699                 break;
2700         }
2701         local_irq_restore(flags);
2702 }
2703 EXPORT_SYMBOL_GPL(debug_check_no_locks_freed);
2704
2705 static void print_held_locks_bug(struct task_struct *curr)
2706 {
2707         if (!debug_locks_off())
2708                 return;
2709         if (debug_locks_silent)
2710                 return;
2711
2712         printk("\n=====================================\n");
2713         printk(  "[ BUG: lock held at task exit time! ]\n");
2714         printk(  "-------------------------------------\n");
2715         printk("%s/%d is exiting with locks still held!\n",
2716                 curr->comm, curr->pid);
2717         lockdep_print_held_locks(curr);
2718
2719         printk("\nstack backtrace:\n");
2720         dump_stack();
2721 }
2722
2723 void debug_check_no_locks_held(struct task_struct *task)
2724 {
2725         if (unlikely(task->lockdep_depth > 0))
2726                 print_held_locks_bug(task);
2727 }
2728
2729 void debug_show_all_locks(void)
2730 {
2731         struct task_struct *g, *p;
2732         int count = 10;
2733         int unlock = 1;
2734
2735         printk("\nShowing all locks held in the system:\n");
2736
2737         /*
2738          * Here we try to get the tasklist_lock as hard as possible,
2739          * if not successful after 2 seconds we ignore it (but keep
2740          * trying). This is to enable a debug printout even if a
2741          * tasklist_lock-holding task deadlocks or crashes.
2742          */
2743 retry:
2744         if (!read_trylock(&tasklist_lock)) {
2745                 if (count == 10)
2746                         printk("hm, tasklist_lock locked, retrying... ");
2747                 if (count) {
2748                         count--;
2749                         printk(" #%d", 10-count);
2750                         mdelay(200);
2751                         goto retry;
2752                 }
2753                 printk(" ignoring it.\n");
2754                 unlock = 0;
2755         }
2756         if (count != 10)
2757                 printk(" locked it.\n");
2758
2759         do_each_thread(g, p) {
2760                 if (p->lockdep_depth)
2761                         lockdep_print_held_locks(p);
2762                 if (!unlock)
2763                         if (read_trylock(&tasklist_lock))
2764                                 unlock = 1;
2765         } while_each_thread(g, p);
2766
2767         printk("\n");
2768         printk("=============================================\n\n");
2769
2770         if (unlock)
2771                 read_unlock(&tasklist_lock);
2772 }
2773
2774 EXPORT_SYMBOL_GPL(debug_show_all_locks);
2775
2776 void debug_show_held_locks(struct task_struct *task)
2777 {
2778         lockdep_print_held_locks(task);
2779 }
2780
2781 EXPORT_SYMBOL_GPL(debug_show_held_locks);
2782