]> git.karo-electronics.de Git - mv-sheeva.git/blob - kernel/lockdep.c
lockdep: simplify check_prev_add_irq()
[mv-sheeva.git] / kernel / lockdep.c
1 /*
2  * kernel/lockdep.c
3  *
4  * Runtime locking correctness validator
5  *
6  * Started by Ingo Molnar:
7  *
8  *  Copyright (C) 2006,2007 Red Hat, Inc., Ingo Molnar <mingo@redhat.com>
9  *  Copyright (C) 2007 Red Hat, Inc., Peter Zijlstra <pzijlstr@redhat.com>
10  *
11  * this code maps all the lock dependencies as they occur in a live kernel
12  * and will warn about the following classes of locking bugs:
13  *
14  * - lock inversion scenarios
15  * - circular lock dependencies
16  * - hardirq/softirq safe/unsafe locking bugs
17  *
18  * Bugs are reported even if the current locking scenario does not cause
19  * any deadlock at this point.
20  *
21  * I.e. if anytime in the past two locks were taken in a different order,
22  * even if it happened for another task, even if those were different
23  * locks (but of the same class as this lock), this code will detect it.
24  *
25  * Thanks to Arjan van de Ven for coming up with the initial idea of
26  * mapping lock dependencies runtime.
27  */
28 #define DISABLE_BRANCH_PROFILING
29 #include <linux/mutex.h>
30 #include <linux/sched.h>
31 #include <linux/delay.h>
32 #include <linux/module.h>
33 #include <linux/proc_fs.h>
34 #include <linux/seq_file.h>
35 #include <linux/spinlock.h>
36 #include <linux/kallsyms.h>
37 #include <linux/interrupt.h>
38 #include <linux/stacktrace.h>
39 #include <linux/debug_locks.h>
40 #include <linux/irqflags.h>
41 #include <linux/utsname.h>
42 #include <linux/hash.h>
43 #include <linux/ftrace.h>
44
45 #include <asm/sections.h>
46
47 #include "lockdep_internals.h"
48
49 #ifdef CONFIG_PROVE_LOCKING
50 int prove_locking = 1;
51 module_param(prove_locking, int, 0644);
52 #else
53 #define prove_locking 0
54 #endif
55
56 #ifdef CONFIG_LOCK_STAT
57 int lock_stat = 1;
58 module_param(lock_stat, int, 0644);
59 #else
60 #define lock_stat 0
61 #endif
62
63 /*
64  * lockdep_lock: protects the lockdep graph, the hashes and the
65  *               class/list/hash allocators.
66  *
67  * This is one of the rare exceptions where it's justified
68  * to use a raw spinlock - we really dont want the spinlock
69  * code to recurse back into the lockdep code...
70  */
71 static raw_spinlock_t lockdep_lock = (raw_spinlock_t)__RAW_SPIN_LOCK_UNLOCKED;
72
73 static int graph_lock(void)
74 {
75         __raw_spin_lock(&lockdep_lock);
76         /*
77          * Make sure that if another CPU detected a bug while
78          * walking the graph we dont change it (while the other
79          * CPU is busy printing out stuff with the graph lock
80          * dropped already)
81          */
82         if (!debug_locks) {
83                 __raw_spin_unlock(&lockdep_lock);
84                 return 0;
85         }
86         /* prevent any recursions within lockdep from causing deadlocks */
87         current->lockdep_recursion++;
88         return 1;
89 }
90
91 static inline int graph_unlock(void)
92 {
93         if (debug_locks && !__raw_spin_is_locked(&lockdep_lock))
94                 return DEBUG_LOCKS_WARN_ON(1);
95
96         current->lockdep_recursion--;
97         __raw_spin_unlock(&lockdep_lock);
98         return 0;
99 }
100
101 /*
102  * Turn lock debugging off and return with 0 if it was off already,
103  * and also release the graph lock:
104  */
105 static inline int debug_locks_off_graph_unlock(void)
106 {
107         int ret = debug_locks_off();
108
109         __raw_spin_unlock(&lockdep_lock);
110
111         return ret;
112 }
113
114 static int lockdep_initialized;
115
116 unsigned long nr_list_entries;
117 static struct lock_list list_entries[MAX_LOCKDEP_ENTRIES];
118
119 /*
120  * All data structures here are protected by the global debug_lock.
121  *
122  * Mutex key structs only get allocated, once during bootup, and never
123  * get freed - this significantly simplifies the debugging code.
124  */
125 unsigned long nr_lock_classes;
126 static struct lock_class lock_classes[MAX_LOCKDEP_KEYS];
127
128 static inline struct lock_class *hlock_class(struct held_lock *hlock)
129 {
130         if (!hlock->class_idx) {
131                 DEBUG_LOCKS_WARN_ON(1);
132                 return NULL;
133         }
134         return lock_classes + hlock->class_idx - 1;
135 }
136
137 #ifdef CONFIG_LOCK_STAT
138 static DEFINE_PER_CPU(struct lock_class_stats[MAX_LOCKDEP_KEYS], lock_stats);
139
140 static int lock_point(unsigned long points[], unsigned long ip)
141 {
142         int i;
143
144         for (i = 0; i < LOCKSTAT_POINTS; i++) {
145                 if (points[i] == 0) {
146                         points[i] = ip;
147                         break;
148                 }
149                 if (points[i] == ip)
150                         break;
151         }
152
153         return i;
154 }
155
156 static void lock_time_inc(struct lock_time *lt, s64 time)
157 {
158         if (time > lt->max)
159                 lt->max = time;
160
161         if (time < lt->min || !lt->min)
162                 lt->min = time;
163
164         lt->total += time;
165         lt->nr++;
166 }
167
168 static inline void lock_time_add(struct lock_time *src, struct lock_time *dst)
169 {
170         dst->min += src->min;
171         dst->max += src->max;
172         dst->total += src->total;
173         dst->nr += src->nr;
174 }
175
176 struct lock_class_stats lock_stats(struct lock_class *class)
177 {
178         struct lock_class_stats stats;
179         int cpu, i;
180
181         memset(&stats, 0, sizeof(struct lock_class_stats));
182         for_each_possible_cpu(cpu) {
183                 struct lock_class_stats *pcs =
184                         &per_cpu(lock_stats, cpu)[class - lock_classes];
185
186                 for (i = 0; i < ARRAY_SIZE(stats.contention_point); i++)
187                         stats.contention_point[i] += pcs->contention_point[i];
188
189                 for (i = 0; i < ARRAY_SIZE(stats.contending_point); i++)
190                         stats.contending_point[i] += pcs->contending_point[i];
191
192                 lock_time_add(&pcs->read_waittime, &stats.read_waittime);
193                 lock_time_add(&pcs->write_waittime, &stats.write_waittime);
194
195                 lock_time_add(&pcs->read_holdtime, &stats.read_holdtime);
196                 lock_time_add(&pcs->write_holdtime, &stats.write_holdtime);
197
198                 for (i = 0; i < ARRAY_SIZE(stats.bounces); i++)
199                         stats.bounces[i] += pcs->bounces[i];
200         }
201
202         return stats;
203 }
204
205 void clear_lock_stats(struct lock_class *class)
206 {
207         int cpu;
208
209         for_each_possible_cpu(cpu) {
210                 struct lock_class_stats *cpu_stats =
211                         &per_cpu(lock_stats, cpu)[class - lock_classes];
212
213                 memset(cpu_stats, 0, sizeof(struct lock_class_stats));
214         }
215         memset(class->contention_point, 0, sizeof(class->contention_point));
216         memset(class->contending_point, 0, sizeof(class->contending_point));
217 }
218
219 static struct lock_class_stats *get_lock_stats(struct lock_class *class)
220 {
221         return &get_cpu_var(lock_stats)[class - lock_classes];
222 }
223
224 static void put_lock_stats(struct lock_class_stats *stats)
225 {
226         put_cpu_var(lock_stats);
227 }
228
229 static void lock_release_holdtime(struct held_lock *hlock)
230 {
231         struct lock_class_stats *stats;
232         s64 holdtime;
233
234         if (!lock_stat)
235                 return;
236
237         holdtime = sched_clock() - hlock->holdtime_stamp;
238
239         stats = get_lock_stats(hlock_class(hlock));
240         if (hlock->read)
241                 lock_time_inc(&stats->read_holdtime, holdtime);
242         else
243                 lock_time_inc(&stats->write_holdtime, holdtime);
244         put_lock_stats(stats);
245 }
246 #else
247 static inline void lock_release_holdtime(struct held_lock *hlock)
248 {
249 }
250 #endif
251
252 /*
253  * We keep a global list of all lock classes. The list only grows,
254  * never shrinks. The list is only accessed with the lockdep
255  * spinlock lock held.
256  */
257 LIST_HEAD(all_lock_classes);
258
259 /*
260  * The lockdep classes are in a hash-table as well, for fast lookup:
261  */
262 #define CLASSHASH_BITS          (MAX_LOCKDEP_KEYS_BITS - 1)
263 #define CLASSHASH_SIZE          (1UL << CLASSHASH_BITS)
264 #define __classhashfn(key)      hash_long((unsigned long)key, CLASSHASH_BITS)
265 #define classhashentry(key)     (classhash_table + __classhashfn((key)))
266
267 static struct list_head classhash_table[CLASSHASH_SIZE];
268
269 /*
270  * We put the lock dependency chains into a hash-table as well, to cache
271  * their existence:
272  */
273 #define CHAINHASH_BITS          (MAX_LOCKDEP_CHAINS_BITS-1)
274 #define CHAINHASH_SIZE          (1UL << CHAINHASH_BITS)
275 #define __chainhashfn(chain)    hash_long(chain, CHAINHASH_BITS)
276 #define chainhashentry(chain)   (chainhash_table + __chainhashfn((chain)))
277
278 static struct list_head chainhash_table[CHAINHASH_SIZE];
279
280 /*
281  * The hash key of the lock dependency chains is a hash itself too:
282  * it's a hash of all locks taken up to that lock, including that lock.
283  * It's a 64-bit hash, because it's important for the keys to be
284  * unique.
285  */
286 #define iterate_chain_key(key1, key2) \
287         (((key1) << MAX_LOCKDEP_KEYS_BITS) ^ \
288         ((key1) >> (64-MAX_LOCKDEP_KEYS_BITS)) ^ \
289         (key2))
290
291 void lockdep_off(void)
292 {
293         current->lockdep_recursion++;
294 }
295 EXPORT_SYMBOL(lockdep_off);
296
297 void lockdep_on(void)
298 {
299         current->lockdep_recursion--;
300 }
301 EXPORT_SYMBOL(lockdep_on);
302
303 /*
304  * Debugging switches:
305  */
306
307 #define VERBOSE                 0
308 #define VERY_VERBOSE            0
309
310 #if VERBOSE
311 # define HARDIRQ_VERBOSE        1
312 # define SOFTIRQ_VERBOSE        1
313 # define RECLAIM_VERBOSE        1
314 #else
315 # define HARDIRQ_VERBOSE        0
316 # define SOFTIRQ_VERBOSE        0
317 # define RECLAIM_VERBOSE        0
318 #endif
319
320 #if VERBOSE || HARDIRQ_VERBOSE || SOFTIRQ_VERBOSE || RECLAIM_VERBOSE
321 /*
322  * Quick filtering for interesting events:
323  */
324 static int class_filter(struct lock_class *class)
325 {
326 #if 0
327         /* Example */
328         if (class->name_version == 1 &&
329                         !strcmp(class->name, "lockname"))
330                 return 1;
331         if (class->name_version == 1 &&
332                         !strcmp(class->name, "&struct->lockfield"))
333                 return 1;
334 #endif
335         /* Filter everything else. 1 would be to allow everything else */
336         return 0;
337 }
338 #endif
339
340 static int verbose(struct lock_class *class)
341 {
342 #if VERBOSE
343         return class_filter(class);
344 #endif
345         return 0;
346 }
347
348 /*
349  * Stack-trace: tightly packed array of stack backtrace
350  * addresses. Protected by the graph_lock.
351  */
352 unsigned long nr_stack_trace_entries;
353 static unsigned long stack_trace[MAX_STACK_TRACE_ENTRIES];
354
355 static int save_trace(struct stack_trace *trace)
356 {
357         trace->nr_entries = 0;
358         trace->max_entries = MAX_STACK_TRACE_ENTRIES - nr_stack_trace_entries;
359         trace->entries = stack_trace + nr_stack_trace_entries;
360
361         trace->skip = 3;
362
363         save_stack_trace(trace);
364
365         trace->max_entries = trace->nr_entries;
366
367         nr_stack_trace_entries += trace->nr_entries;
368
369         if (nr_stack_trace_entries == MAX_STACK_TRACE_ENTRIES) {
370                 if (!debug_locks_off_graph_unlock())
371                         return 0;
372
373                 printk("BUG: MAX_STACK_TRACE_ENTRIES too low!\n");
374                 printk("turning off the locking correctness validator.\n");
375                 dump_stack();
376
377                 return 0;
378         }
379
380         return 1;
381 }
382
383 unsigned int nr_hardirq_chains;
384 unsigned int nr_softirq_chains;
385 unsigned int nr_process_chains;
386 unsigned int max_lockdep_depth;
387 unsigned int max_recursion_depth;
388
389 static unsigned int lockdep_dependency_gen_id;
390
391 static bool lockdep_dependency_visit(struct lock_class *source,
392                                      unsigned int depth)
393 {
394         if (!depth)
395                 lockdep_dependency_gen_id++;
396         if (source->dep_gen_id == lockdep_dependency_gen_id)
397                 return true;
398         source->dep_gen_id = lockdep_dependency_gen_id;
399         return false;
400 }
401
402 #ifdef CONFIG_DEBUG_LOCKDEP
403 /*
404  * We cannot printk in early bootup code. Not even early_printk()
405  * might work. So we mark any initialization errors and printk
406  * about it later on, in lockdep_info().
407  */
408 static int lockdep_init_error;
409 static unsigned long lockdep_init_trace_data[20];
410 static struct stack_trace lockdep_init_trace = {
411         .max_entries = ARRAY_SIZE(lockdep_init_trace_data),
412         .entries = lockdep_init_trace_data,
413 };
414
415 /*
416  * Various lockdep statistics:
417  */
418 atomic_t chain_lookup_hits;
419 atomic_t chain_lookup_misses;
420 atomic_t hardirqs_on_events;
421 atomic_t hardirqs_off_events;
422 atomic_t redundant_hardirqs_on;
423 atomic_t redundant_hardirqs_off;
424 atomic_t softirqs_on_events;
425 atomic_t softirqs_off_events;
426 atomic_t redundant_softirqs_on;
427 atomic_t redundant_softirqs_off;
428 atomic_t nr_unused_locks;
429 atomic_t nr_cyclic_checks;
430 atomic_t nr_cyclic_check_recursions;
431 atomic_t nr_find_usage_forwards_checks;
432 atomic_t nr_find_usage_forwards_recursions;
433 atomic_t nr_find_usage_backwards_checks;
434 atomic_t nr_find_usage_backwards_recursions;
435 # define debug_atomic_inc(ptr)          atomic_inc(ptr)
436 # define debug_atomic_dec(ptr)          atomic_dec(ptr)
437 # define debug_atomic_read(ptr)         atomic_read(ptr)
438 #else
439 # define debug_atomic_inc(ptr)          do { } while (0)
440 # define debug_atomic_dec(ptr)          do { } while (0)
441 # define debug_atomic_read(ptr)         0
442 #endif
443
444 /*
445  * Locking printouts:
446  */
447
448 #define __STR(foo)      #foo
449 #define STR(foo)        __STR(foo)
450
451 #define __USAGE(__STATE)                                                \
452         [LOCK_USED_IN_##__STATE] = "IN-"STR(__STATE)"-W",               \
453         [LOCK_ENABLED_##__STATE] = STR(__STATE)"-ON-W",                 \
454         [LOCK_USED_IN_##__STATE##_READ] = "IN-"STR(__STATE)"-R",        \
455         [LOCK_ENABLED_##__STATE##_READ] = STR(__STATE)"-ON-R",
456
457 static const char *usage_str[] =
458 {
459 #define LOCKDEP_STATE(__STATE) __USAGE(__STATE)
460 #include "lockdep_states.h"
461 #undef LOCKDEP_STATE
462         [LOCK_USED] = "INITIAL USE",
463 };
464
465 const char * __get_key_name(struct lockdep_subclass_key *key, char *str)
466 {
467         return kallsyms_lookup((unsigned long)key, NULL, NULL, NULL, str);
468 }
469
470 static inline unsigned long lock_flag(enum lock_usage_bit bit)
471 {
472         return 1UL << bit;
473 }
474
475 static char get_usage_char(struct lock_class *class, enum lock_usage_bit bit)
476 {
477         char c = '.';
478
479         if (class->usage_mask & lock_flag(bit + 2))
480                 c = '+';
481         if (class->usage_mask & lock_flag(bit)) {
482                 c = '-';
483                 if (class->usage_mask & lock_flag(bit + 2))
484                         c = '?';
485         }
486
487         return c;
488 }
489
490 void get_usage_chars(struct lock_class *class, char usage[LOCK_USAGE_CHARS])
491 {
492         int i = 0;
493
494 #define LOCKDEP_STATE(__STATE)                                          \
495         usage[i++] = get_usage_char(class, LOCK_USED_IN_##__STATE);     \
496         usage[i++] = get_usage_char(class, LOCK_USED_IN_##__STATE##_READ);
497 #include "lockdep_states.h"
498 #undef LOCKDEP_STATE
499
500         usage[i] = '\0';
501 }
502
503 static void print_lock_name(struct lock_class *class)
504 {
505         char str[KSYM_NAME_LEN], usage[LOCK_USAGE_CHARS];
506         const char *name;
507
508         get_usage_chars(class, usage);
509
510         name = class->name;
511         if (!name) {
512                 name = __get_key_name(class->key, str);
513                 printk(" (%s", name);
514         } else {
515                 printk(" (%s", name);
516                 if (class->name_version > 1)
517                         printk("#%d", class->name_version);
518                 if (class->subclass)
519                         printk("/%d", class->subclass);
520         }
521         printk("){%s}", usage);
522 }
523
524 static void print_lockdep_cache(struct lockdep_map *lock)
525 {
526         const char *name;
527         char str[KSYM_NAME_LEN];
528
529         name = lock->name;
530         if (!name)
531                 name = __get_key_name(lock->key->subkeys, str);
532
533         printk("%s", name);
534 }
535
536 static void print_lock(struct held_lock *hlock)
537 {
538         print_lock_name(hlock_class(hlock));
539         printk(", at: ");
540         print_ip_sym(hlock->acquire_ip);
541 }
542
543 static void lockdep_print_held_locks(struct task_struct *curr)
544 {
545         int i, depth = curr->lockdep_depth;
546
547         if (!depth) {
548                 printk("no locks held by %s/%d.\n", curr->comm, task_pid_nr(curr));
549                 return;
550         }
551         printk("%d lock%s held by %s/%d:\n",
552                 depth, depth > 1 ? "s" : "", curr->comm, task_pid_nr(curr));
553
554         for (i = 0; i < depth; i++) {
555                 printk(" #%d: ", i);
556                 print_lock(curr->held_locks + i);
557         }
558 }
559
560 static void print_lock_class_header(struct lock_class *class, int depth)
561 {
562         int bit;
563
564         printk("%*s->", depth, "");
565         print_lock_name(class);
566         printk(" ops: %lu", class->ops);
567         printk(" {\n");
568
569         for (bit = 0; bit < LOCK_USAGE_STATES; bit++) {
570                 if (class->usage_mask & (1 << bit)) {
571                         int len = depth;
572
573                         len += printk("%*s   %s", depth, "", usage_str[bit]);
574                         len += printk(" at:\n");
575                         print_stack_trace(class->usage_traces + bit, len);
576                 }
577         }
578         printk("%*s }\n", depth, "");
579
580         printk("%*s ... key      at: ",depth,"");
581         print_ip_sym((unsigned long)class->key);
582 }
583
584 /*
585  * printk all lock dependencies starting at <entry>:
586  */
587 static void __used
588 print_lock_dependencies(struct lock_class *class, int depth)
589 {
590         struct lock_list *entry;
591
592         if (lockdep_dependency_visit(class, depth))
593                 return;
594
595         if (DEBUG_LOCKS_WARN_ON(depth >= 20))
596                 return;
597
598         print_lock_class_header(class, depth);
599
600         list_for_each_entry(entry, &class->locks_after, entry) {
601                 if (DEBUG_LOCKS_WARN_ON(!entry->class))
602                         return;
603
604                 print_lock_dependencies(entry->class, depth + 1);
605
606                 printk("%*s ... acquired at:\n",depth,"");
607                 print_stack_trace(&entry->trace, 2);
608                 printk("\n");
609         }
610 }
611
612 static void print_kernel_version(void)
613 {
614         printk("%s %.*s\n", init_utsname()->release,
615                 (int)strcspn(init_utsname()->version, " "),
616                 init_utsname()->version);
617 }
618
619 static int very_verbose(struct lock_class *class)
620 {
621 #if VERY_VERBOSE
622         return class_filter(class);
623 #endif
624         return 0;
625 }
626
627 /*
628  * Is this the address of a static object:
629  */
630 static int static_obj(void *obj)
631 {
632         unsigned long start = (unsigned long) &_stext,
633                       end   = (unsigned long) &_end,
634                       addr  = (unsigned long) obj;
635 #ifdef CONFIG_SMP
636         int i;
637 #endif
638
639         /*
640          * static variable?
641          */
642         if ((addr >= start) && (addr < end))
643                 return 1;
644
645 #ifdef CONFIG_SMP
646         /*
647          * percpu var?
648          */
649         for_each_possible_cpu(i) {
650                 start = (unsigned long) &__per_cpu_start + per_cpu_offset(i);
651                 end   = (unsigned long) &__per_cpu_start + PERCPU_ENOUGH_ROOM
652                                         + per_cpu_offset(i);
653
654                 if ((addr >= start) && (addr < end))
655                         return 1;
656         }
657 #endif
658
659         /*
660          * module var?
661          */
662         return is_module_address(addr);
663 }
664
665 /*
666  * To make lock name printouts unique, we calculate a unique
667  * class->name_version generation counter:
668  */
669 static int count_matching_names(struct lock_class *new_class)
670 {
671         struct lock_class *class;
672         int count = 0;
673
674         if (!new_class->name)
675                 return 0;
676
677         list_for_each_entry(class, &all_lock_classes, lock_entry) {
678                 if (new_class->key - new_class->subclass == class->key)
679                         return class->name_version;
680                 if (class->name && !strcmp(class->name, new_class->name))
681                         count = max(count, class->name_version);
682         }
683
684         return count + 1;
685 }
686
687 /*
688  * Register a lock's class in the hash-table, if the class is not present
689  * yet. Otherwise we look it up. We cache the result in the lock object
690  * itself, so actual lookup of the hash should be once per lock object.
691  */
692 static inline struct lock_class *
693 look_up_lock_class(struct lockdep_map *lock, unsigned int subclass)
694 {
695         struct lockdep_subclass_key *key;
696         struct list_head *hash_head;
697         struct lock_class *class;
698
699 #ifdef CONFIG_DEBUG_LOCKDEP
700         /*
701          * If the architecture calls into lockdep before initializing
702          * the hashes then we'll warn about it later. (we cannot printk
703          * right now)
704          */
705         if (unlikely(!lockdep_initialized)) {
706                 lockdep_init();
707                 lockdep_init_error = 1;
708                 save_stack_trace(&lockdep_init_trace);
709         }
710 #endif
711
712         /*
713          * Static locks do not have their class-keys yet - for them the key
714          * is the lock object itself:
715          */
716         if (unlikely(!lock->key))
717                 lock->key = (void *)lock;
718
719         /*
720          * NOTE: the class-key must be unique. For dynamic locks, a static
721          * lock_class_key variable is passed in through the mutex_init()
722          * (or spin_lock_init()) call - which acts as the key. For static
723          * locks we use the lock object itself as the key.
724          */
725         BUILD_BUG_ON(sizeof(struct lock_class_key) >
726                         sizeof(struct lockdep_map));
727
728         key = lock->key->subkeys + subclass;
729
730         hash_head = classhashentry(key);
731
732         /*
733          * We can walk the hash lockfree, because the hash only
734          * grows, and we are careful when adding entries to the end:
735          */
736         list_for_each_entry(class, hash_head, hash_entry) {
737                 if (class->key == key) {
738                         WARN_ON_ONCE(class->name != lock->name);
739                         return class;
740                 }
741         }
742
743         return NULL;
744 }
745
746 /*
747  * Register a lock's class in the hash-table, if the class is not present
748  * yet. Otherwise we look it up. We cache the result in the lock object
749  * itself, so actual lookup of the hash should be once per lock object.
750  */
751 static inline struct lock_class *
752 register_lock_class(struct lockdep_map *lock, unsigned int subclass, int force)
753 {
754         struct lockdep_subclass_key *key;
755         struct list_head *hash_head;
756         struct lock_class *class;
757         unsigned long flags;
758
759         class = look_up_lock_class(lock, subclass);
760         if (likely(class))
761                 return class;
762
763         /*
764          * Debug-check: all keys must be persistent!
765          */
766         if (!static_obj(lock->key)) {
767                 debug_locks_off();
768                 printk("INFO: trying to register non-static key.\n");
769                 printk("the code is fine but needs lockdep annotation.\n");
770                 printk("turning off the locking correctness validator.\n");
771                 dump_stack();
772
773                 return NULL;
774         }
775
776         key = lock->key->subkeys + subclass;
777         hash_head = classhashentry(key);
778
779         raw_local_irq_save(flags);
780         if (!graph_lock()) {
781                 raw_local_irq_restore(flags);
782                 return NULL;
783         }
784         /*
785          * We have to do the hash-walk again, to avoid races
786          * with another CPU:
787          */
788         list_for_each_entry(class, hash_head, hash_entry)
789                 if (class->key == key)
790                         goto out_unlock_set;
791         /*
792          * Allocate a new key from the static array, and add it to
793          * the hash:
794          */
795         if (nr_lock_classes >= MAX_LOCKDEP_KEYS) {
796                 if (!debug_locks_off_graph_unlock()) {
797                         raw_local_irq_restore(flags);
798                         return NULL;
799                 }
800                 raw_local_irq_restore(flags);
801
802                 printk("BUG: MAX_LOCKDEP_KEYS too low!\n");
803                 printk("turning off the locking correctness validator.\n");
804                 return NULL;
805         }
806         class = lock_classes + nr_lock_classes++;
807         debug_atomic_inc(&nr_unused_locks);
808         class->key = key;
809         class->name = lock->name;
810         class->subclass = subclass;
811         INIT_LIST_HEAD(&class->lock_entry);
812         INIT_LIST_HEAD(&class->locks_before);
813         INIT_LIST_HEAD(&class->locks_after);
814         class->name_version = count_matching_names(class);
815         /*
816          * We use RCU's safe list-add method to make
817          * parallel walking of the hash-list safe:
818          */
819         list_add_tail_rcu(&class->hash_entry, hash_head);
820         /*
821          * Add it to the global list of classes:
822          */
823         list_add_tail_rcu(&class->lock_entry, &all_lock_classes);
824
825         if (verbose(class)) {
826                 graph_unlock();
827                 raw_local_irq_restore(flags);
828
829                 printk("\nnew class %p: %s", class->key, class->name);
830                 if (class->name_version > 1)
831                         printk("#%d", class->name_version);
832                 printk("\n");
833                 dump_stack();
834
835                 raw_local_irq_save(flags);
836                 if (!graph_lock()) {
837                         raw_local_irq_restore(flags);
838                         return NULL;
839                 }
840         }
841 out_unlock_set:
842         graph_unlock();
843         raw_local_irq_restore(flags);
844
845         if (!subclass || force)
846                 lock->class_cache = class;
847
848         if (DEBUG_LOCKS_WARN_ON(class->subclass != subclass))
849                 return NULL;
850
851         return class;
852 }
853
854 #ifdef CONFIG_PROVE_LOCKING
855 /*
856  * Allocate a lockdep entry. (assumes the graph_lock held, returns
857  * with NULL on failure)
858  */
859 static struct lock_list *alloc_list_entry(void)
860 {
861         if (nr_list_entries >= MAX_LOCKDEP_ENTRIES) {
862                 if (!debug_locks_off_graph_unlock())
863                         return NULL;
864
865                 printk("BUG: MAX_LOCKDEP_ENTRIES too low!\n");
866                 printk("turning off the locking correctness validator.\n");
867                 return NULL;
868         }
869         return list_entries + nr_list_entries++;
870 }
871
872 /*
873  * Add a new dependency to the head of the list:
874  */
875 static int add_lock_to_list(struct lock_class *class, struct lock_class *this,
876                             struct list_head *head, unsigned long ip, int distance)
877 {
878         struct lock_list *entry;
879         /*
880          * Lock not present yet - get a new dependency struct and
881          * add it to the list:
882          */
883         entry = alloc_list_entry();
884         if (!entry)
885                 return 0;
886
887         if (!save_trace(&entry->trace))
888                 return 0;
889
890         entry->class = this;
891         entry->distance = distance;
892         /*
893          * Since we never remove from the dependency list, the list can
894          * be walked lockless by other CPUs, it's only allocation
895          * that must be protected by the spinlock. But this also means
896          * we must make new entries visible only once writes to the
897          * entry become visible - hence the RCU op:
898          */
899         list_add_tail_rcu(&entry->entry, head);
900
901         return 1;
902 }
903
904 /*
905  * Recursive, forwards-direction lock-dependency checking, used for
906  * both noncyclic checking and for hardirq-unsafe/softirq-unsafe
907  * checking.
908  *
909  * (to keep the stackframe of the recursive functions small we
910  *  use these global variables, and we also mark various helper
911  *  functions as noinline.)
912  */
913 static struct held_lock *check_source, *check_target;
914
915 /*
916  * Print a dependency chain entry (this is only done when a deadlock
917  * has been detected):
918  */
919 static noinline int
920 print_circular_bug_entry(struct lock_list *target, unsigned int depth)
921 {
922         if (debug_locks_silent)
923                 return 0;
924         printk("\n-> #%u", depth);
925         print_lock_name(target->class);
926         printk(":\n");
927         print_stack_trace(&target->trace, 6);
928
929         return 0;
930 }
931
932 /*
933  * When a circular dependency is detected, print the
934  * header first:
935  */
936 static noinline int
937 print_circular_bug_header(struct lock_list *entry, unsigned int depth)
938 {
939         struct task_struct *curr = current;
940
941         if (!debug_locks_off_graph_unlock() || debug_locks_silent)
942                 return 0;
943
944         printk("\n=======================================================\n");
945         printk(  "[ INFO: possible circular locking dependency detected ]\n");
946         print_kernel_version();
947         printk(  "-------------------------------------------------------\n");
948         printk("%s/%d is trying to acquire lock:\n",
949                 curr->comm, task_pid_nr(curr));
950         print_lock(check_source);
951         printk("\nbut task is already holding lock:\n");
952         print_lock(check_target);
953         printk("\nwhich lock already depends on the new lock.\n\n");
954         printk("\nthe existing dependency chain (in reverse order) is:\n");
955
956         print_circular_bug_entry(entry, depth);
957
958         return 0;
959 }
960
961 static noinline int print_circular_bug_tail(void)
962 {
963         struct task_struct *curr = current;
964         struct lock_list this;
965
966         if (debug_locks_silent)
967                 return 0;
968
969         this.class = hlock_class(check_source);
970         if (!save_trace(&this.trace))
971                 return 0;
972
973         print_circular_bug_entry(&this, 0);
974
975         printk("\nother info that might help us debug this:\n\n");
976         lockdep_print_held_locks(curr);
977
978         printk("\nstack backtrace:\n");
979         dump_stack();
980
981         return 0;
982 }
983
984 #define RECURSION_LIMIT 40
985
986 static int noinline print_infinite_recursion_bug(void)
987 {
988         if (!debug_locks_off_graph_unlock())
989                 return 0;
990
991         WARN_ON(1);
992
993         return 0;
994 }
995
996 unsigned long __lockdep_count_forward_deps(struct lock_class *class,
997                                            unsigned int depth)
998 {
999         struct lock_list *entry;
1000         unsigned long ret = 1;
1001
1002         if (lockdep_dependency_visit(class, depth))
1003                 return 0;
1004
1005         /*
1006          * Recurse this class's dependency list:
1007          */
1008         list_for_each_entry(entry, &class->locks_after, entry)
1009                 ret += __lockdep_count_forward_deps(entry->class, depth + 1);
1010
1011         return ret;
1012 }
1013
1014 unsigned long lockdep_count_forward_deps(struct lock_class *class)
1015 {
1016         unsigned long ret, flags;
1017
1018         local_irq_save(flags);
1019         __raw_spin_lock(&lockdep_lock);
1020         ret = __lockdep_count_forward_deps(class, 0);
1021         __raw_spin_unlock(&lockdep_lock);
1022         local_irq_restore(flags);
1023
1024         return ret;
1025 }
1026
1027 unsigned long __lockdep_count_backward_deps(struct lock_class *class,
1028                                             unsigned int depth)
1029 {
1030         struct lock_list *entry;
1031         unsigned long ret = 1;
1032
1033         if (lockdep_dependency_visit(class, depth))
1034                 return 0;
1035         /*
1036          * Recurse this class's dependency list:
1037          */
1038         list_for_each_entry(entry, &class->locks_before, entry)
1039                 ret += __lockdep_count_backward_deps(entry->class, depth + 1);
1040
1041         return ret;
1042 }
1043
1044 unsigned long lockdep_count_backward_deps(struct lock_class *class)
1045 {
1046         unsigned long ret, flags;
1047
1048         local_irq_save(flags);
1049         __raw_spin_lock(&lockdep_lock);
1050         ret = __lockdep_count_backward_deps(class, 0);
1051         __raw_spin_unlock(&lockdep_lock);
1052         local_irq_restore(flags);
1053
1054         return ret;
1055 }
1056
1057 /*
1058  * Prove that the dependency graph starting at <entry> can not
1059  * lead to <target>. Print an error and return 0 if it does.
1060  */
1061 static noinline int
1062 check_noncircular(struct lock_class *source, unsigned int depth)
1063 {
1064         struct lock_list *entry;
1065
1066         if (lockdep_dependency_visit(source, depth))
1067                 return 1;
1068
1069         debug_atomic_inc(&nr_cyclic_check_recursions);
1070         if (depth > max_recursion_depth)
1071                 max_recursion_depth = depth;
1072         if (depth >= RECURSION_LIMIT)
1073                 return print_infinite_recursion_bug();
1074         /*
1075          * Check this lock's dependency list:
1076          */
1077         list_for_each_entry(entry, &source->locks_after, entry) {
1078                 if (entry->class == hlock_class(check_target))
1079                         return print_circular_bug_header(entry, depth+1);
1080                 debug_atomic_inc(&nr_cyclic_checks);
1081                 if (!check_noncircular(entry->class, depth+1))
1082                         return print_circular_bug_entry(entry, depth+1);
1083         }
1084         return 1;
1085 }
1086
1087 #if defined(CONFIG_TRACE_IRQFLAGS) && defined(CONFIG_PROVE_LOCKING)
1088 /*
1089  * Forwards and backwards subgraph searching, for the purposes of
1090  * proving that two subgraphs can be connected by a new dependency
1091  * without creating any illegal irq-safe -> irq-unsafe lock dependency.
1092  */
1093 static enum lock_usage_bit find_usage_bit;
1094 static struct lock_class *forwards_match, *backwards_match;
1095
1096 /*
1097  * Find a node in the forwards-direction dependency sub-graph starting
1098  * at <source> that matches <find_usage_bit>.
1099  *
1100  * Return 2 if such a node exists in the subgraph, and put that node
1101  * into <forwards_match>.
1102  *
1103  * Return 1 otherwise and keep <forwards_match> unchanged.
1104  * Return 0 on error.
1105  */
1106 static noinline int
1107 find_usage_forwards(struct lock_class *source, unsigned int depth)
1108 {
1109         struct lock_list *entry;
1110         int ret;
1111
1112         if (lockdep_dependency_visit(source, depth))
1113                 return 1;
1114
1115         if (depth > max_recursion_depth)
1116                 max_recursion_depth = depth;
1117         if (depth >= RECURSION_LIMIT)
1118                 return print_infinite_recursion_bug();
1119
1120         debug_atomic_inc(&nr_find_usage_forwards_checks);
1121         if (source->usage_mask & (1 << find_usage_bit)) {
1122                 forwards_match = source;
1123                 return 2;
1124         }
1125
1126         /*
1127          * Check this lock's dependency list:
1128          */
1129         list_for_each_entry(entry, &source->locks_after, entry) {
1130                 debug_atomic_inc(&nr_find_usage_forwards_recursions);
1131                 ret = find_usage_forwards(entry->class, depth+1);
1132                 if (ret == 2 || ret == 0)
1133                         return ret;
1134         }
1135         return 1;
1136 }
1137
1138 /*
1139  * Find a node in the backwards-direction dependency sub-graph starting
1140  * at <source> that matches <find_usage_bit>.
1141  *
1142  * Return 2 if such a node exists in the subgraph, and put that node
1143  * into <backwards_match>.
1144  *
1145  * Return 1 otherwise and keep <backwards_match> unchanged.
1146  * Return 0 on error.
1147  */
1148 static noinline int
1149 find_usage_backwards(struct lock_class *source, unsigned int depth)
1150 {
1151         struct lock_list *entry;
1152         int ret;
1153
1154         if (lockdep_dependency_visit(source, depth))
1155                 return 1;
1156
1157         if (!__raw_spin_is_locked(&lockdep_lock))
1158                 return DEBUG_LOCKS_WARN_ON(1);
1159
1160         if (depth > max_recursion_depth)
1161                 max_recursion_depth = depth;
1162         if (depth >= RECURSION_LIMIT)
1163                 return print_infinite_recursion_bug();
1164
1165         debug_atomic_inc(&nr_find_usage_backwards_checks);
1166         if (source->usage_mask & (1 << find_usage_bit)) {
1167                 backwards_match = source;
1168                 return 2;
1169         }
1170
1171         if (!source && debug_locks_off_graph_unlock()) {
1172                 WARN_ON(1);
1173                 return 0;
1174         }
1175
1176         /*
1177          * Check this lock's dependency list:
1178          */
1179         list_for_each_entry(entry, &source->locks_before, entry) {
1180                 debug_atomic_inc(&nr_find_usage_backwards_recursions);
1181                 ret = find_usage_backwards(entry->class, depth+1);
1182                 if (ret == 2 || ret == 0)
1183                         return ret;
1184         }
1185         return 1;
1186 }
1187
1188 static int
1189 print_bad_irq_dependency(struct task_struct *curr,
1190                          struct held_lock *prev,
1191                          struct held_lock *next,
1192                          enum lock_usage_bit bit1,
1193                          enum lock_usage_bit bit2,
1194                          const char *irqclass)
1195 {
1196         if (!debug_locks_off_graph_unlock() || debug_locks_silent)
1197                 return 0;
1198
1199         printk("\n======================================================\n");
1200         printk(  "[ INFO: %s-safe -> %s-unsafe lock order detected ]\n",
1201                 irqclass, irqclass);
1202         print_kernel_version();
1203         printk(  "------------------------------------------------------\n");
1204         printk("%s/%d [HC%u[%lu]:SC%u[%lu]:HE%u:SE%u] is trying to acquire:\n",
1205                 curr->comm, task_pid_nr(curr),
1206                 curr->hardirq_context, hardirq_count() >> HARDIRQ_SHIFT,
1207                 curr->softirq_context, softirq_count() >> SOFTIRQ_SHIFT,
1208                 curr->hardirqs_enabled,
1209                 curr->softirqs_enabled);
1210         print_lock(next);
1211
1212         printk("\nand this task is already holding:\n");
1213         print_lock(prev);
1214         printk("which would create a new lock dependency:\n");
1215         print_lock_name(hlock_class(prev));
1216         printk(" ->");
1217         print_lock_name(hlock_class(next));
1218         printk("\n");
1219
1220         printk("\nbut this new dependency connects a %s-irq-safe lock:\n",
1221                 irqclass);
1222         print_lock_name(backwards_match);
1223         printk("\n... which became %s-irq-safe at:\n", irqclass);
1224
1225         print_stack_trace(backwards_match->usage_traces + bit1, 1);
1226
1227         printk("\nto a %s-irq-unsafe lock:\n", irqclass);
1228         print_lock_name(forwards_match);
1229         printk("\n... which became %s-irq-unsafe at:\n", irqclass);
1230         printk("...");
1231
1232         print_stack_trace(forwards_match->usage_traces + bit2, 1);
1233
1234         printk("\nother info that might help us debug this:\n\n");
1235         lockdep_print_held_locks(curr);
1236
1237         printk("\nthe %s-irq-safe lock's dependencies:\n", irqclass);
1238         print_lock_dependencies(backwards_match, 0);
1239
1240         printk("\nthe %s-irq-unsafe lock's dependencies:\n", irqclass);
1241         print_lock_dependencies(forwards_match, 0);
1242
1243         printk("\nstack backtrace:\n");
1244         dump_stack();
1245
1246         return 0;
1247 }
1248
1249 static int
1250 check_usage(struct task_struct *curr, struct held_lock *prev,
1251             struct held_lock *next, enum lock_usage_bit bit_backwards,
1252             enum lock_usage_bit bit_forwards, const char *irqclass)
1253 {
1254         int ret;
1255
1256         find_usage_bit = bit_backwards;
1257         /* fills in <backwards_match> */
1258         ret = find_usage_backwards(hlock_class(prev), 0);
1259         if (!ret || ret == 1)
1260                 return ret;
1261
1262         find_usage_bit = bit_forwards;
1263         ret = find_usage_forwards(hlock_class(next), 0);
1264         if (!ret || ret == 1)
1265                 return ret;
1266         /* ret == 2 */
1267         return print_bad_irq_dependency(curr, prev, next,
1268                         bit_backwards, bit_forwards, irqclass);
1269 }
1270
1271 static const char *state_names[] = {
1272 #define LOCKDEP_STATE(__STATE) \
1273         STR(__STATE),
1274 #include "lockdep_states.h"
1275 #undef LOCKDEP_STATE
1276 };
1277
1278 static const char *state_rnames[] = {
1279 #define LOCKDEP_STATE(__STATE) \
1280         STR(__STATE)"-READ",
1281 #include "lockdep_states.h"
1282 #undef LOCKDEP_STATE
1283 };
1284
1285 static inline const char *state_name(enum lock_usage_bit bit)
1286 {
1287         return (bit & 1) ? state_rnames[bit >> 2] : state_names[bit >> 2];
1288 }
1289
1290 static int exclusive_bit(int new_bit)
1291 {
1292         /*
1293          * USED_IN
1294          * USED_IN_READ
1295          * ENABLED
1296          * ENABLED_READ
1297          *
1298          * bit 0 - write/read
1299          * bit 1 - used_in/enabled
1300          * bit 2+  state
1301          */
1302
1303         int state = new_bit & ~3;
1304         int dir = new_bit & 2;
1305
1306         /*
1307          * keep state, bit flip the direction and strip read.
1308          */
1309         return state | (dir ^ 2);
1310 }
1311
1312 static int check_irq_usage(struct task_struct *curr, struct held_lock *prev,
1313                            struct held_lock *next, enum lock_usage_bit bit)
1314 {
1315         /*
1316          * Prove that the new dependency does not connect a hardirq-safe
1317          * lock with a hardirq-unsafe lock - to achieve this we search
1318          * the backwards-subgraph starting at <prev>, and the
1319          * forwards-subgraph starting at <next>:
1320          */
1321         if (!check_usage(curr, prev, next, bit,
1322                            exclusive_bit(bit), state_name(bit)))
1323                 return 0;
1324
1325         bit++; /* _READ */
1326
1327         /*
1328          * Prove that the new dependency does not connect a hardirq-safe-read
1329          * lock with a hardirq-unsafe lock - to achieve this we search
1330          * the backwards-subgraph starting at <prev>, and the
1331          * forwards-subgraph starting at <next>:
1332          */
1333         if (!check_usage(curr, prev, next, bit,
1334                            exclusive_bit(bit), state_name(bit)))
1335                 return 0;
1336
1337         return 1;
1338 }
1339
1340 static int
1341 check_prev_add_irq(struct task_struct *curr, struct held_lock *prev,
1342                 struct held_lock *next)
1343 {
1344 #define LOCKDEP_STATE(__STATE)                                          \
1345         if (!check_irq_usage(curr, prev, next, LOCK_USED_IN_##__STATE)) \
1346                 return 0;
1347 #include "lockdep_states.h"
1348 #undef LOCKDEP_STATE
1349
1350         return 1;
1351 }
1352
1353 static void inc_chains(void)
1354 {
1355         if (current->hardirq_context)
1356                 nr_hardirq_chains++;
1357         else {
1358                 if (current->softirq_context)
1359                         nr_softirq_chains++;
1360                 else
1361                         nr_process_chains++;
1362         }
1363 }
1364
1365 #else
1366
1367 static inline int
1368 check_prev_add_irq(struct task_struct *curr, struct held_lock *prev,
1369                 struct held_lock *next)
1370 {
1371         return 1;
1372 }
1373
1374 static inline void inc_chains(void)
1375 {
1376         nr_process_chains++;
1377 }
1378
1379 #endif
1380
1381 static int
1382 print_deadlock_bug(struct task_struct *curr, struct held_lock *prev,
1383                    struct held_lock *next)
1384 {
1385         if (!debug_locks_off_graph_unlock() || debug_locks_silent)
1386                 return 0;
1387
1388         printk("\n=============================================\n");
1389         printk(  "[ INFO: possible recursive locking detected ]\n");
1390         print_kernel_version();
1391         printk(  "---------------------------------------------\n");
1392         printk("%s/%d is trying to acquire lock:\n",
1393                 curr->comm, task_pid_nr(curr));
1394         print_lock(next);
1395         printk("\nbut task is already holding lock:\n");
1396         print_lock(prev);
1397
1398         printk("\nother info that might help us debug this:\n");
1399         lockdep_print_held_locks(curr);
1400
1401         printk("\nstack backtrace:\n");
1402         dump_stack();
1403
1404         return 0;
1405 }
1406
1407 /*
1408  * Check whether we are holding such a class already.
1409  *
1410  * (Note that this has to be done separately, because the graph cannot
1411  * detect such classes of deadlocks.)
1412  *
1413  * Returns: 0 on deadlock detected, 1 on OK, 2 on recursive read
1414  */
1415 static int
1416 check_deadlock(struct task_struct *curr, struct held_lock *next,
1417                struct lockdep_map *next_instance, int read)
1418 {
1419         struct held_lock *prev;
1420         struct held_lock *nest = NULL;
1421         int i;
1422
1423         for (i = 0; i < curr->lockdep_depth; i++) {
1424                 prev = curr->held_locks + i;
1425
1426                 if (prev->instance == next->nest_lock)
1427                         nest = prev;
1428
1429                 if (hlock_class(prev) != hlock_class(next))
1430                         continue;
1431
1432                 /*
1433                  * Allow read-after-read recursion of the same
1434                  * lock class (i.e. read_lock(lock)+read_lock(lock)):
1435                  */
1436                 if ((read == 2) && prev->read)
1437                         return 2;
1438
1439                 /*
1440                  * We're holding the nest_lock, which serializes this lock's
1441                  * nesting behaviour.
1442                  */
1443                 if (nest)
1444                         return 2;
1445
1446                 return print_deadlock_bug(curr, prev, next);
1447         }
1448         return 1;
1449 }
1450
1451 /*
1452  * There was a chain-cache miss, and we are about to add a new dependency
1453  * to a previous lock. We recursively validate the following rules:
1454  *
1455  *  - would the adding of the <prev> -> <next> dependency create a
1456  *    circular dependency in the graph? [== circular deadlock]
1457  *
1458  *  - does the new prev->next dependency connect any hardirq-safe lock
1459  *    (in the full backwards-subgraph starting at <prev>) with any
1460  *    hardirq-unsafe lock (in the full forwards-subgraph starting at
1461  *    <next>)? [== illegal lock inversion with hardirq contexts]
1462  *
1463  *  - does the new prev->next dependency connect any softirq-safe lock
1464  *    (in the full backwards-subgraph starting at <prev>) with any
1465  *    softirq-unsafe lock (in the full forwards-subgraph starting at
1466  *    <next>)? [== illegal lock inversion with softirq contexts]
1467  *
1468  * any of these scenarios could lead to a deadlock.
1469  *
1470  * Then if all the validations pass, we add the forwards and backwards
1471  * dependency.
1472  */
1473 static int
1474 check_prev_add(struct task_struct *curr, struct held_lock *prev,
1475                struct held_lock *next, int distance)
1476 {
1477         struct lock_list *entry;
1478         int ret;
1479
1480         /*
1481          * Prove that the new <prev> -> <next> dependency would not
1482          * create a circular dependency in the graph. (We do this by
1483          * forward-recursing into the graph starting at <next>, and
1484          * checking whether we can reach <prev>.)
1485          *
1486          * We are using global variables to control the recursion, to
1487          * keep the stackframe size of the recursive functions low:
1488          */
1489         check_source = next;
1490         check_target = prev;
1491         if (!(check_noncircular(hlock_class(next), 0)))
1492                 return print_circular_bug_tail();
1493
1494         if (!check_prev_add_irq(curr, prev, next))
1495                 return 0;
1496
1497         /*
1498          * For recursive read-locks we do all the dependency checks,
1499          * but we dont store read-triggered dependencies (only
1500          * write-triggered dependencies). This ensures that only the
1501          * write-side dependencies matter, and that if for example a
1502          * write-lock never takes any other locks, then the reads are
1503          * equivalent to a NOP.
1504          */
1505         if (next->read == 2 || prev->read == 2)
1506                 return 1;
1507         /*
1508          * Is the <prev> -> <next> dependency already present?
1509          *
1510          * (this may occur even though this is a new chain: consider
1511          *  e.g. the L1 -> L2 -> L3 -> L4 and the L5 -> L1 -> L2 -> L3
1512          *  chains - the second one will be new, but L1 already has
1513          *  L2 added to its dependency list, due to the first chain.)
1514          */
1515         list_for_each_entry(entry, &hlock_class(prev)->locks_after, entry) {
1516                 if (entry->class == hlock_class(next)) {
1517                         if (distance == 1)
1518                                 entry->distance = 1;
1519                         return 2;
1520                 }
1521         }
1522
1523         /*
1524          * Ok, all validations passed, add the new lock
1525          * to the previous lock's dependency list:
1526          */
1527         ret = add_lock_to_list(hlock_class(prev), hlock_class(next),
1528                                &hlock_class(prev)->locks_after,
1529                                next->acquire_ip, distance);
1530
1531         if (!ret)
1532                 return 0;
1533
1534         ret = add_lock_to_list(hlock_class(next), hlock_class(prev),
1535                                &hlock_class(next)->locks_before,
1536                                next->acquire_ip, distance);
1537         if (!ret)
1538                 return 0;
1539
1540         /*
1541          * Debugging printouts:
1542          */
1543         if (verbose(hlock_class(prev)) || verbose(hlock_class(next))) {
1544                 graph_unlock();
1545                 printk("\n new dependency: ");
1546                 print_lock_name(hlock_class(prev));
1547                 printk(" => ");
1548                 print_lock_name(hlock_class(next));
1549                 printk("\n");
1550                 dump_stack();
1551                 return graph_lock();
1552         }
1553         return 1;
1554 }
1555
1556 /*
1557  * Add the dependency to all directly-previous locks that are 'relevant'.
1558  * The ones that are relevant are (in increasing distance from curr):
1559  * all consecutive trylock entries and the final non-trylock entry - or
1560  * the end of this context's lock-chain - whichever comes first.
1561  */
1562 static int
1563 check_prevs_add(struct task_struct *curr, struct held_lock *next)
1564 {
1565         int depth = curr->lockdep_depth;
1566         struct held_lock *hlock;
1567
1568         /*
1569          * Debugging checks.
1570          *
1571          * Depth must not be zero for a non-head lock:
1572          */
1573         if (!depth)
1574                 goto out_bug;
1575         /*
1576          * At least two relevant locks must exist for this
1577          * to be a head:
1578          */
1579         if (curr->held_locks[depth].irq_context !=
1580                         curr->held_locks[depth-1].irq_context)
1581                 goto out_bug;
1582
1583         for (;;) {
1584                 int distance = curr->lockdep_depth - depth + 1;
1585                 hlock = curr->held_locks + depth-1;
1586                 /*
1587                  * Only non-recursive-read entries get new dependencies
1588                  * added:
1589                  */
1590                 if (hlock->read != 2) {
1591                         if (!check_prev_add(curr, hlock, next, distance))
1592                                 return 0;
1593                         /*
1594                          * Stop after the first non-trylock entry,
1595                          * as non-trylock entries have added their
1596                          * own direct dependencies already, so this
1597                          * lock is connected to them indirectly:
1598                          */
1599                         if (!hlock->trylock)
1600                                 break;
1601                 }
1602                 depth--;
1603                 /*
1604                  * End of lock-stack?
1605                  */
1606                 if (!depth)
1607                         break;
1608                 /*
1609                  * Stop the search if we cross into another context:
1610                  */
1611                 if (curr->held_locks[depth].irq_context !=
1612                                 curr->held_locks[depth-1].irq_context)
1613                         break;
1614         }
1615         return 1;
1616 out_bug:
1617         if (!debug_locks_off_graph_unlock())
1618                 return 0;
1619
1620         WARN_ON(1);
1621
1622         return 0;
1623 }
1624
1625 unsigned long nr_lock_chains;
1626 struct lock_chain lock_chains[MAX_LOCKDEP_CHAINS];
1627 int nr_chain_hlocks;
1628 static u16 chain_hlocks[MAX_LOCKDEP_CHAIN_HLOCKS];
1629
1630 struct lock_class *lock_chain_get_class(struct lock_chain *chain, int i)
1631 {
1632         return lock_classes + chain_hlocks[chain->base + i];
1633 }
1634
1635 /*
1636  * Look up a dependency chain. If the key is not present yet then
1637  * add it and return 1 - in this case the new dependency chain is
1638  * validated. If the key is already hashed, return 0.
1639  * (On return with 1 graph_lock is held.)
1640  */
1641 static inline int lookup_chain_cache(struct task_struct *curr,
1642                                      struct held_lock *hlock,
1643                                      u64 chain_key)
1644 {
1645         struct lock_class *class = hlock_class(hlock);
1646         struct list_head *hash_head = chainhashentry(chain_key);
1647         struct lock_chain *chain;
1648         struct held_lock *hlock_curr, *hlock_next;
1649         int i, j, n, cn;
1650
1651         if (DEBUG_LOCKS_WARN_ON(!irqs_disabled()))
1652                 return 0;
1653         /*
1654          * We can walk it lock-free, because entries only get added
1655          * to the hash:
1656          */
1657         list_for_each_entry(chain, hash_head, entry) {
1658                 if (chain->chain_key == chain_key) {
1659 cache_hit:
1660                         debug_atomic_inc(&chain_lookup_hits);
1661                         if (very_verbose(class))
1662                                 printk("\nhash chain already cached, key: "
1663                                         "%016Lx tail class: [%p] %s\n",
1664                                         (unsigned long long)chain_key,
1665                                         class->key, class->name);
1666                         return 0;
1667                 }
1668         }
1669         if (very_verbose(class))
1670                 printk("\nnew hash chain, key: %016Lx tail class: [%p] %s\n",
1671                         (unsigned long long)chain_key, class->key, class->name);
1672         /*
1673          * Allocate a new chain entry from the static array, and add
1674          * it to the hash:
1675          */
1676         if (!graph_lock())
1677                 return 0;
1678         /*
1679          * We have to walk the chain again locked - to avoid duplicates:
1680          */
1681         list_for_each_entry(chain, hash_head, entry) {
1682                 if (chain->chain_key == chain_key) {
1683                         graph_unlock();
1684                         goto cache_hit;
1685                 }
1686         }
1687         if (unlikely(nr_lock_chains >= MAX_LOCKDEP_CHAINS)) {
1688                 if (!debug_locks_off_graph_unlock())
1689                         return 0;
1690
1691                 printk("BUG: MAX_LOCKDEP_CHAINS too low!\n");
1692                 printk("turning off the locking correctness validator.\n");
1693                 return 0;
1694         }
1695         chain = lock_chains + nr_lock_chains++;
1696         chain->chain_key = chain_key;
1697         chain->irq_context = hlock->irq_context;
1698         /* Find the first held_lock of current chain */
1699         hlock_next = hlock;
1700         for (i = curr->lockdep_depth - 1; i >= 0; i--) {
1701                 hlock_curr = curr->held_locks + i;
1702                 if (hlock_curr->irq_context != hlock_next->irq_context)
1703                         break;
1704                 hlock_next = hlock;
1705         }
1706         i++;
1707         chain->depth = curr->lockdep_depth + 1 - i;
1708         cn = nr_chain_hlocks;
1709         while (cn + chain->depth <= MAX_LOCKDEP_CHAIN_HLOCKS) {
1710                 n = cmpxchg(&nr_chain_hlocks, cn, cn + chain->depth);
1711                 if (n == cn)
1712                         break;
1713                 cn = n;
1714         }
1715         if (likely(cn + chain->depth <= MAX_LOCKDEP_CHAIN_HLOCKS)) {
1716                 chain->base = cn;
1717                 for (j = 0; j < chain->depth - 1; j++, i++) {
1718                         int lock_id = curr->held_locks[i].class_idx - 1;
1719                         chain_hlocks[chain->base + j] = lock_id;
1720                 }
1721                 chain_hlocks[chain->base + j] = class - lock_classes;
1722         }
1723         list_add_tail_rcu(&chain->entry, hash_head);
1724         debug_atomic_inc(&chain_lookup_misses);
1725         inc_chains();
1726
1727         return 1;
1728 }
1729
1730 static int validate_chain(struct task_struct *curr, struct lockdep_map *lock,
1731                 struct held_lock *hlock, int chain_head, u64 chain_key)
1732 {
1733         /*
1734          * Trylock needs to maintain the stack of held locks, but it
1735          * does not add new dependencies, because trylock can be done
1736          * in any order.
1737          *
1738          * We look up the chain_key and do the O(N^2) check and update of
1739          * the dependencies only if this is a new dependency chain.
1740          * (If lookup_chain_cache() returns with 1 it acquires
1741          * graph_lock for us)
1742          */
1743         if (!hlock->trylock && (hlock->check == 2) &&
1744             lookup_chain_cache(curr, hlock, chain_key)) {
1745                 /*
1746                  * Check whether last held lock:
1747                  *
1748                  * - is irq-safe, if this lock is irq-unsafe
1749                  * - is softirq-safe, if this lock is hardirq-unsafe
1750                  *
1751                  * And check whether the new lock's dependency graph
1752                  * could lead back to the previous lock.
1753                  *
1754                  * any of these scenarios could lead to a deadlock. If
1755                  * All validations
1756                  */
1757                 int ret = check_deadlock(curr, hlock, lock, hlock->read);
1758
1759                 if (!ret)
1760                         return 0;
1761                 /*
1762                  * Mark recursive read, as we jump over it when
1763                  * building dependencies (just like we jump over
1764                  * trylock entries):
1765                  */
1766                 if (ret == 2)
1767                         hlock->read = 2;
1768                 /*
1769                  * Add dependency only if this lock is not the head
1770                  * of the chain, and if it's not a secondary read-lock:
1771                  */
1772                 if (!chain_head && ret != 2)
1773                         if (!check_prevs_add(curr, hlock))
1774                                 return 0;
1775                 graph_unlock();
1776         } else
1777                 /* after lookup_chain_cache(): */
1778                 if (unlikely(!debug_locks))
1779                         return 0;
1780
1781         return 1;
1782 }
1783 #else
1784 static inline int validate_chain(struct task_struct *curr,
1785                 struct lockdep_map *lock, struct held_lock *hlock,
1786                 int chain_head, u64 chain_key)
1787 {
1788         return 1;
1789 }
1790 #endif
1791
1792 /*
1793  * We are building curr_chain_key incrementally, so double-check
1794  * it from scratch, to make sure that it's done correctly:
1795  */
1796 static void check_chain_key(struct task_struct *curr)
1797 {
1798 #ifdef CONFIG_DEBUG_LOCKDEP
1799         struct held_lock *hlock, *prev_hlock = NULL;
1800         unsigned int i, id;
1801         u64 chain_key = 0;
1802
1803         for (i = 0; i < curr->lockdep_depth; i++) {
1804                 hlock = curr->held_locks + i;
1805                 if (chain_key != hlock->prev_chain_key) {
1806                         debug_locks_off();
1807                         WARN(1, "hm#1, depth: %u [%u], %016Lx != %016Lx\n",
1808                                 curr->lockdep_depth, i,
1809                                 (unsigned long long)chain_key,
1810                                 (unsigned long long)hlock->prev_chain_key);
1811                         return;
1812                 }
1813                 id = hlock->class_idx - 1;
1814                 if (DEBUG_LOCKS_WARN_ON(id >= MAX_LOCKDEP_KEYS))
1815                         return;
1816
1817                 if (prev_hlock && (prev_hlock->irq_context !=
1818                                                         hlock->irq_context))
1819                         chain_key = 0;
1820                 chain_key = iterate_chain_key(chain_key, id);
1821                 prev_hlock = hlock;
1822         }
1823         if (chain_key != curr->curr_chain_key) {
1824                 debug_locks_off();
1825                 WARN(1, "hm#2, depth: %u [%u], %016Lx != %016Lx\n",
1826                         curr->lockdep_depth, i,
1827                         (unsigned long long)chain_key,
1828                         (unsigned long long)curr->curr_chain_key);
1829         }
1830 #endif
1831 }
1832
1833 static int
1834 print_usage_bug(struct task_struct *curr, struct held_lock *this,
1835                 enum lock_usage_bit prev_bit, enum lock_usage_bit new_bit)
1836 {
1837         if (!debug_locks_off_graph_unlock() || debug_locks_silent)
1838                 return 0;
1839
1840         printk("\n=================================\n");
1841         printk(  "[ INFO: inconsistent lock state ]\n");
1842         print_kernel_version();
1843         printk(  "---------------------------------\n");
1844
1845         printk("inconsistent {%s} -> {%s} usage.\n",
1846                 usage_str[prev_bit], usage_str[new_bit]);
1847
1848         printk("%s/%d [HC%u[%lu]:SC%u[%lu]:HE%u:SE%u] takes:\n",
1849                 curr->comm, task_pid_nr(curr),
1850                 trace_hardirq_context(curr), hardirq_count() >> HARDIRQ_SHIFT,
1851                 trace_softirq_context(curr), softirq_count() >> SOFTIRQ_SHIFT,
1852                 trace_hardirqs_enabled(curr),
1853                 trace_softirqs_enabled(curr));
1854         print_lock(this);
1855
1856         printk("{%s} state was registered at:\n", usage_str[prev_bit]);
1857         print_stack_trace(hlock_class(this)->usage_traces + prev_bit, 1);
1858
1859         print_irqtrace_events(curr);
1860         printk("\nother info that might help us debug this:\n");
1861         lockdep_print_held_locks(curr);
1862
1863         printk("\nstack backtrace:\n");
1864         dump_stack();
1865
1866         return 0;
1867 }
1868
1869 /*
1870  * Print out an error if an invalid bit is set:
1871  */
1872 static inline int
1873 valid_state(struct task_struct *curr, struct held_lock *this,
1874             enum lock_usage_bit new_bit, enum lock_usage_bit bad_bit)
1875 {
1876         if (unlikely(hlock_class(this)->usage_mask & (1 << bad_bit)))
1877                 return print_usage_bug(curr, this, bad_bit, new_bit);
1878         return 1;
1879 }
1880
1881 static int mark_lock(struct task_struct *curr, struct held_lock *this,
1882                      enum lock_usage_bit new_bit);
1883
1884 #if defined(CONFIG_TRACE_IRQFLAGS) && defined(CONFIG_PROVE_LOCKING)
1885
1886 /*
1887  * print irq inversion bug:
1888  */
1889 static int
1890 print_irq_inversion_bug(struct task_struct *curr, struct lock_class *other,
1891                         struct held_lock *this, int forwards,
1892                         const char *irqclass)
1893 {
1894         if (!debug_locks_off_graph_unlock() || debug_locks_silent)
1895                 return 0;
1896
1897         printk("\n=========================================================\n");
1898         printk(  "[ INFO: possible irq lock inversion dependency detected ]\n");
1899         print_kernel_version();
1900         printk(  "---------------------------------------------------------\n");
1901         printk("%s/%d just changed the state of lock:\n",
1902                 curr->comm, task_pid_nr(curr));
1903         print_lock(this);
1904         if (forwards)
1905                 printk("but this lock took another, %s-irq-unsafe lock in the past:\n", irqclass);
1906         else
1907                 printk("but this lock was taken by another, %s-irq-safe lock in the past:\n", irqclass);
1908         print_lock_name(other);
1909         printk("\n\nand interrupts could create inverse lock ordering between them.\n\n");
1910
1911         printk("\nother info that might help us debug this:\n");
1912         lockdep_print_held_locks(curr);
1913
1914         printk("\nthe first lock's dependencies:\n");
1915         print_lock_dependencies(hlock_class(this), 0);
1916
1917         printk("\nthe second lock's dependencies:\n");
1918         print_lock_dependencies(other, 0);
1919
1920         printk("\nstack backtrace:\n");
1921         dump_stack();
1922
1923         return 0;
1924 }
1925
1926 /*
1927  * Prove that in the forwards-direction subgraph starting at <this>
1928  * there is no lock matching <mask>:
1929  */
1930 static int
1931 check_usage_forwards(struct task_struct *curr, struct held_lock *this,
1932                      enum lock_usage_bit bit, const char *irqclass)
1933 {
1934         int ret;
1935
1936         find_usage_bit = bit;
1937         /* fills in <forwards_match> */
1938         ret = find_usage_forwards(hlock_class(this), 0);
1939         if (!ret || ret == 1)
1940                 return ret;
1941
1942         return print_irq_inversion_bug(curr, forwards_match, this, 1, irqclass);
1943 }
1944
1945 /*
1946  * Prove that in the backwards-direction subgraph starting at <this>
1947  * there is no lock matching <mask>:
1948  */
1949 static int
1950 check_usage_backwards(struct task_struct *curr, struct held_lock *this,
1951                       enum lock_usage_bit bit, const char *irqclass)
1952 {
1953         int ret;
1954
1955         find_usage_bit = bit;
1956         /* fills in <backwards_match> */
1957         ret = find_usage_backwards(hlock_class(this), 0);
1958         if (!ret || ret == 1)
1959                 return ret;
1960
1961         return print_irq_inversion_bug(curr, backwards_match, this, 0, irqclass);
1962 }
1963
1964 void print_irqtrace_events(struct task_struct *curr)
1965 {
1966         printk("irq event stamp: %u\n", curr->irq_events);
1967         printk("hardirqs last  enabled at (%u): ", curr->hardirq_enable_event);
1968         print_ip_sym(curr->hardirq_enable_ip);
1969         printk("hardirqs last disabled at (%u): ", curr->hardirq_disable_event);
1970         print_ip_sym(curr->hardirq_disable_ip);
1971         printk("softirqs last  enabled at (%u): ", curr->softirq_enable_event);
1972         print_ip_sym(curr->softirq_enable_ip);
1973         printk("softirqs last disabled at (%u): ", curr->softirq_disable_event);
1974         print_ip_sym(curr->softirq_disable_ip);
1975 }
1976
1977 static int HARDIRQ_verbose(struct lock_class *class)
1978 {
1979 #if HARDIRQ_VERBOSE
1980         return class_filter(class);
1981 #endif
1982         return 0;
1983 }
1984
1985 static int SOFTIRQ_verbose(struct lock_class *class)
1986 {
1987 #if SOFTIRQ_VERBOSE
1988         return class_filter(class);
1989 #endif
1990         return 0;
1991 }
1992
1993 static int RECLAIM_FS_verbose(struct lock_class *class)
1994 {
1995 #if RECLAIM_VERBOSE
1996         return class_filter(class);
1997 #endif
1998         return 0;
1999 }
2000
2001 #define STRICT_READ_CHECKS      1
2002
2003 static int (*state_verbose_f[])(struct lock_class *class) = {
2004 #define LOCKDEP_STATE(__STATE) \
2005         __STATE##_verbose,
2006 #include "lockdep_states.h"
2007 #undef LOCKDEP_STATE
2008 };
2009
2010 static inline int state_verbose(enum lock_usage_bit bit,
2011                                 struct lock_class *class)
2012 {
2013         return state_verbose_f[bit >> 2](class);
2014 }
2015
2016 typedef int (*check_usage_f)(struct task_struct *, struct held_lock *,
2017                              enum lock_usage_bit bit, const char *name);
2018
2019 static int
2020 mark_lock_irq(struct task_struct *curr, struct held_lock *this, int new_bit)
2021 {
2022         int excl_bit = exclusive_bit(new_bit);
2023         int read = new_bit & 1;
2024         int dir = new_bit & 2;
2025
2026         /*
2027          * mark USED_IN has to look forwards -- to ensure no dependency
2028          * has ENABLED state, which would allow recursion deadlocks.
2029          *
2030          * mark ENABLED has to look backwards -- to ensure no dependee
2031          * has USED_IN state, which, again, would allow  recursion deadlocks.
2032          */
2033         check_usage_f usage = dir ?
2034                 check_usage_backwards : check_usage_forwards;
2035
2036         /*
2037          * Validate that this particular lock does not have conflicting
2038          * usage states.
2039          */
2040         if (!valid_state(curr, this, new_bit, excl_bit))
2041                 return 0;
2042
2043         /*
2044          * Validate that the lock dependencies don't have conflicting usage
2045          * states.
2046          */
2047         if ((!read || !dir || STRICT_READ_CHECKS) &&
2048                         !usage(curr, this, excl_bit, state_name(new_bit)))
2049                 return 0;
2050
2051         /*
2052          * Check for read in write conflicts
2053          */
2054         if (!read) {
2055                 if (!valid_state(curr, this, new_bit, excl_bit + 1))
2056                         return 0;
2057
2058                 if (STRICT_READ_CHECKS &&
2059                         !usage(curr, this, excl_bit + 1,
2060                                 state_name(new_bit + 1)))
2061                         return 0;
2062         }
2063
2064         if (state_verbose(new_bit, hlock_class(this)))
2065                 return 2;
2066
2067         return 1;
2068 }
2069
2070 enum mark_type {
2071 #define LOCKDEP_STATE(__STATE)  __STATE,
2072 #include "lockdep_states.h"
2073 #undef LOCKDEP_STATE
2074 };
2075
2076 /*
2077  * Mark all held locks with a usage bit:
2078  */
2079 static int
2080 mark_held_locks(struct task_struct *curr, enum mark_type mark)
2081 {
2082         enum lock_usage_bit usage_bit;
2083         struct held_lock *hlock;
2084         int i;
2085
2086         for (i = 0; i < curr->lockdep_depth; i++) {
2087                 hlock = curr->held_locks + i;
2088
2089                 usage_bit = 2 + (mark << 2); /* ENABLED */
2090                 if (hlock->read)
2091                         usage_bit += 1; /* READ */
2092
2093                 BUG_ON(usage_bit >= LOCK_USAGE_STATES);
2094
2095                 if (!mark_lock(curr, hlock, usage_bit))
2096                         return 0;
2097         }
2098
2099         return 1;
2100 }
2101
2102 /*
2103  * Debugging helper: via this flag we know that we are in
2104  * 'early bootup code', and will warn about any invalid irqs-on event:
2105  */
2106 static int early_boot_irqs_enabled;
2107
2108 void early_boot_irqs_off(void)
2109 {
2110         early_boot_irqs_enabled = 0;
2111 }
2112
2113 void early_boot_irqs_on(void)
2114 {
2115         early_boot_irqs_enabled = 1;
2116 }
2117
2118 /*
2119  * Hardirqs will be enabled:
2120  */
2121 void trace_hardirqs_on_caller(unsigned long ip)
2122 {
2123         struct task_struct *curr = current;
2124
2125         time_hardirqs_on(CALLER_ADDR0, ip);
2126
2127         if (unlikely(!debug_locks || current->lockdep_recursion))
2128                 return;
2129
2130         if (DEBUG_LOCKS_WARN_ON(unlikely(!early_boot_irqs_enabled)))
2131                 return;
2132
2133         if (unlikely(curr->hardirqs_enabled)) {
2134                 debug_atomic_inc(&redundant_hardirqs_on);
2135                 return;
2136         }
2137         /* we'll do an OFF -> ON transition: */
2138         curr->hardirqs_enabled = 1;
2139
2140         if (DEBUG_LOCKS_WARN_ON(!irqs_disabled()))
2141                 return;
2142         if (DEBUG_LOCKS_WARN_ON(current->hardirq_context))
2143                 return;
2144         /*
2145          * We are going to turn hardirqs on, so set the
2146          * usage bit for all held locks:
2147          */
2148         if (!mark_held_locks(curr, HARDIRQ))
2149                 return;
2150         /*
2151          * If we have softirqs enabled, then set the usage
2152          * bit for all held locks. (disabled hardirqs prevented
2153          * this bit from being set before)
2154          */
2155         if (curr->softirqs_enabled)
2156                 if (!mark_held_locks(curr, SOFTIRQ))
2157                         return;
2158
2159         curr->hardirq_enable_ip = ip;
2160         curr->hardirq_enable_event = ++curr->irq_events;
2161         debug_atomic_inc(&hardirqs_on_events);
2162 }
2163 EXPORT_SYMBOL(trace_hardirqs_on_caller);
2164
2165 void trace_hardirqs_on(void)
2166 {
2167         trace_hardirqs_on_caller(CALLER_ADDR0);
2168 }
2169 EXPORT_SYMBOL(trace_hardirqs_on);
2170
2171 /*
2172  * Hardirqs were disabled:
2173  */
2174 void trace_hardirqs_off_caller(unsigned long ip)
2175 {
2176         struct task_struct *curr = current;
2177
2178         time_hardirqs_off(CALLER_ADDR0, ip);
2179
2180         if (unlikely(!debug_locks || current->lockdep_recursion))
2181                 return;
2182
2183         if (DEBUG_LOCKS_WARN_ON(!irqs_disabled()))
2184                 return;
2185
2186         if (curr->hardirqs_enabled) {
2187                 /*
2188                  * We have done an ON -> OFF transition:
2189                  */
2190                 curr->hardirqs_enabled = 0;
2191                 curr->hardirq_disable_ip = ip;
2192                 curr->hardirq_disable_event = ++curr->irq_events;
2193                 debug_atomic_inc(&hardirqs_off_events);
2194         } else
2195                 debug_atomic_inc(&redundant_hardirqs_off);
2196 }
2197 EXPORT_SYMBOL(trace_hardirqs_off_caller);
2198
2199 void trace_hardirqs_off(void)
2200 {
2201         trace_hardirqs_off_caller(CALLER_ADDR0);
2202 }
2203 EXPORT_SYMBOL(trace_hardirqs_off);
2204
2205 /*
2206  * Softirqs will be enabled:
2207  */
2208 void trace_softirqs_on(unsigned long ip)
2209 {
2210         struct task_struct *curr = current;
2211
2212         if (unlikely(!debug_locks))
2213                 return;
2214
2215         if (DEBUG_LOCKS_WARN_ON(!irqs_disabled()))
2216                 return;
2217
2218         if (curr->softirqs_enabled) {
2219                 debug_atomic_inc(&redundant_softirqs_on);
2220                 return;
2221         }
2222
2223         /*
2224          * We'll do an OFF -> ON transition:
2225          */
2226         curr->softirqs_enabled = 1;
2227         curr->softirq_enable_ip = ip;
2228         curr->softirq_enable_event = ++curr->irq_events;
2229         debug_atomic_inc(&softirqs_on_events);
2230         /*
2231          * We are going to turn softirqs on, so set the
2232          * usage bit for all held locks, if hardirqs are
2233          * enabled too:
2234          */
2235         if (curr->hardirqs_enabled)
2236                 mark_held_locks(curr, SOFTIRQ);
2237 }
2238
2239 /*
2240  * Softirqs were disabled:
2241  */
2242 void trace_softirqs_off(unsigned long ip)
2243 {
2244         struct task_struct *curr = current;
2245
2246         if (unlikely(!debug_locks))
2247                 return;
2248
2249         if (DEBUG_LOCKS_WARN_ON(!irqs_disabled()))
2250                 return;
2251
2252         if (curr->softirqs_enabled) {
2253                 /*
2254                  * We have done an ON -> OFF transition:
2255                  */
2256                 curr->softirqs_enabled = 0;
2257                 curr->softirq_disable_ip = ip;
2258                 curr->softirq_disable_event = ++curr->irq_events;
2259                 debug_atomic_inc(&softirqs_off_events);
2260                 DEBUG_LOCKS_WARN_ON(!softirq_count());
2261         } else
2262                 debug_atomic_inc(&redundant_softirqs_off);
2263 }
2264
2265 void lockdep_trace_alloc(gfp_t gfp_mask)
2266 {
2267         struct task_struct *curr = current;
2268
2269         if (unlikely(!debug_locks))
2270                 return;
2271
2272         /* no reclaim without waiting on it */
2273         if (!(gfp_mask & __GFP_WAIT))
2274                 return;
2275
2276         /* this guy won't enter reclaim */
2277         if ((curr->flags & PF_MEMALLOC) && !(gfp_mask & __GFP_NOMEMALLOC))
2278                 return;
2279
2280         /* We're only interested __GFP_FS allocations for now */
2281         if (!(gfp_mask & __GFP_FS))
2282                 return;
2283
2284         if (DEBUG_LOCKS_WARN_ON(irqs_disabled()))
2285                 return;
2286
2287         mark_held_locks(curr, RECLAIM_FS);
2288 }
2289
2290 static int mark_irqflags(struct task_struct *curr, struct held_lock *hlock)
2291 {
2292         /*
2293          * If non-trylock use in a hardirq or softirq context, then
2294          * mark the lock as used in these contexts:
2295          */
2296         if (!hlock->trylock) {
2297                 if (hlock->read) {
2298                         if (curr->hardirq_context)
2299                                 if (!mark_lock(curr, hlock,
2300                                                 LOCK_USED_IN_HARDIRQ_READ))
2301                                         return 0;
2302                         if (curr->softirq_context)
2303                                 if (!mark_lock(curr, hlock,
2304                                                 LOCK_USED_IN_SOFTIRQ_READ))
2305                                         return 0;
2306                 } else {
2307                         if (curr->hardirq_context)
2308                                 if (!mark_lock(curr, hlock, LOCK_USED_IN_HARDIRQ))
2309                                         return 0;
2310                         if (curr->softirq_context)
2311                                 if (!mark_lock(curr, hlock, LOCK_USED_IN_SOFTIRQ))
2312                                         return 0;
2313                 }
2314         }
2315         if (!hlock->hardirqs_off) {
2316                 if (hlock->read) {
2317                         if (!mark_lock(curr, hlock,
2318                                         LOCK_ENABLED_HARDIRQ_READ))
2319                                 return 0;
2320                         if (curr->softirqs_enabled)
2321                                 if (!mark_lock(curr, hlock,
2322                                                 LOCK_ENABLED_SOFTIRQ_READ))
2323                                         return 0;
2324                 } else {
2325                         if (!mark_lock(curr, hlock,
2326                                         LOCK_ENABLED_HARDIRQ))
2327                                 return 0;
2328                         if (curr->softirqs_enabled)
2329                                 if (!mark_lock(curr, hlock,
2330                                                 LOCK_ENABLED_SOFTIRQ))
2331                                         return 0;
2332                 }
2333         }
2334
2335         /*
2336          * We reuse the irq context infrastructure more broadly as a general
2337          * context checking code. This tests GFP_FS recursion (a lock taken
2338          * during reclaim for a GFP_FS allocation is held over a GFP_FS
2339          * allocation).
2340          */
2341         if (!hlock->trylock && (curr->lockdep_reclaim_gfp & __GFP_FS)) {
2342                 if (hlock->read) {
2343                         if (!mark_lock(curr, hlock, LOCK_USED_IN_RECLAIM_FS_READ))
2344                                         return 0;
2345                 } else {
2346                         if (!mark_lock(curr, hlock, LOCK_USED_IN_RECLAIM_FS))
2347                                         return 0;
2348                 }
2349         }
2350
2351         return 1;
2352 }
2353
2354 static int separate_irq_context(struct task_struct *curr,
2355                 struct held_lock *hlock)
2356 {
2357         unsigned int depth = curr->lockdep_depth;
2358
2359         /*
2360          * Keep track of points where we cross into an interrupt context:
2361          */
2362         hlock->irq_context = 2*(curr->hardirq_context ? 1 : 0) +
2363                                 curr->softirq_context;
2364         if (depth) {
2365                 struct held_lock *prev_hlock;
2366
2367                 prev_hlock = curr->held_locks + depth-1;
2368                 /*
2369                  * If we cross into another context, reset the
2370                  * hash key (this also prevents the checking and the
2371                  * adding of the dependency to 'prev'):
2372                  */
2373                 if (prev_hlock->irq_context != hlock->irq_context)
2374                         return 1;
2375         }
2376         return 0;
2377 }
2378
2379 #else
2380
2381 static inline
2382 int mark_lock_irq(struct task_struct *curr, struct held_lock *this,
2383                 enum lock_usage_bit new_bit)
2384 {
2385         WARN_ON(1);
2386         return 1;
2387 }
2388
2389 static inline int mark_irqflags(struct task_struct *curr,
2390                 struct held_lock *hlock)
2391 {
2392         return 1;
2393 }
2394
2395 static inline int separate_irq_context(struct task_struct *curr,
2396                 struct held_lock *hlock)
2397 {
2398         return 0;
2399 }
2400
2401 #endif
2402
2403 /*
2404  * Mark a lock with a usage bit, and validate the state transition:
2405  */
2406 static int mark_lock(struct task_struct *curr, struct held_lock *this,
2407                              enum lock_usage_bit new_bit)
2408 {
2409         unsigned int new_mask = 1 << new_bit, ret = 1;
2410
2411         /*
2412          * If already set then do not dirty the cacheline,
2413          * nor do any checks:
2414          */
2415         if (likely(hlock_class(this)->usage_mask & new_mask))
2416                 return 1;
2417
2418         if (!graph_lock())
2419                 return 0;
2420         /*
2421          * Make sure we didnt race:
2422          */
2423         if (unlikely(hlock_class(this)->usage_mask & new_mask)) {
2424                 graph_unlock();
2425                 return 1;
2426         }
2427
2428         hlock_class(this)->usage_mask |= new_mask;
2429
2430         if (!save_trace(hlock_class(this)->usage_traces + new_bit))
2431                 return 0;
2432
2433         switch (new_bit) {
2434 #define LOCKDEP_STATE(__STATE)                  \
2435         case LOCK_USED_IN_##__STATE:            \
2436         case LOCK_USED_IN_##__STATE##_READ:     \
2437         case LOCK_ENABLED_##__STATE:            \
2438         case LOCK_ENABLED_##__STATE##_READ:
2439 #include "lockdep_states.h"
2440 #undef LOCKDEP_STATE
2441                 ret = mark_lock_irq(curr, this, new_bit);
2442                 if (!ret)
2443                         return 0;
2444                 break;
2445         case LOCK_USED:
2446                 debug_atomic_dec(&nr_unused_locks);
2447                 break;
2448         default:
2449                 if (!debug_locks_off_graph_unlock())
2450                         return 0;
2451                 WARN_ON(1);
2452                 return 0;
2453         }
2454
2455         graph_unlock();
2456
2457         /*
2458          * We must printk outside of the graph_lock:
2459          */
2460         if (ret == 2) {
2461                 printk("\nmarked lock as {%s}:\n", usage_str[new_bit]);
2462                 print_lock(this);
2463                 print_irqtrace_events(curr);
2464                 dump_stack();
2465         }
2466
2467         return ret;
2468 }
2469
2470 /*
2471  * Initialize a lock instance's lock-class mapping info:
2472  */
2473 void lockdep_init_map(struct lockdep_map *lock, const char *name,
2474                       struct lock_class_key *key, int subclass)
2475 {
2476         if (unlikely(!debug_locks))
2477                 return;
2478
2479         if (DEBUG_LOCKS_WARN_ON(!key))
2480                 return;
2481         if (DEBUG_LOCKS_WARN_ON(!name))
2482                 return;
2483         /*
2484          * Sanity check, the lock-class key must be persistent:
2485          */
2486         if (!static_obj(key)) {
2487                 printk("BUG: key %p not in .data!\n", key);
2488                 DEBUG_LOCKS_WARN_ON(1);
2489                 return;
2490         }
2491         lock->name = name;
2492         lock->key = key;
2493         lock->class_cache = NULL;
2494 #ifdef CONFIG_LOCK_STAT
2495         lock->cpu = raw_smp_processor_id();
2496 #endif
2497         if (subclass)
2498                 register_lock_class(lock, subclass, 1);
2499 }
2500 EXPORT_SYMBOL_GPL(lockdep_init_map);
2501
2502 /*
2503  * This gets called for every mutex_lock*()/spin_lock*() operation.
2504  * We maintain the dependency maps and validate the locking attempt:
2505  */
2506 static int __lock_acquire(struct lockdep_map *lock, unsigned int subclass,
2507                           int trylock, int read, int check, int hardirqs_off,
2508                           struct lockdep_map *nest_lock, unsigned long ip)
2509 {
2510         struct task_struct *curr = current;
2511         struct lock_class *class = NULL;
2512         struct held_lock *hlock;
2513         unsigned int depth, id;
2514         int chain_head = 0;
2515         u64 chain_key;
2516
2517         if (!prove_locking)
2518                 check = 1;
2519
2520         if (unlikely(!debug_locks))
2521                 return 0;
2522
2523         if (DEBUG_LOCKS_WARN_ON(!irqs_disabled()))
2524                 return 0;
2525
2526         if (unlikely(subclass >= MAX_LOCKDEP_SUBCLASSES)) {
2527                 debug_locks_off();
2528                 printk("BUG: MAX_LOCKDEP_SUBCLASSES too low!\n");
2529                 printk("turning off the locking correctness validator.\n");
2530                 return 0;
2531         }
2532
2533         if (!subclass)
2534                 class = lock->class_cache;
2535         /*
2536          * Not cached yet or subclass?
2537          */
2538         if (unlikely(!class)) {
2539                 class = register_lock_class(lock, subclass, 0);
2540                 if (!class)
2541                         return 0;
2542         }
2543         debug_atomic_inc((atomic_t *)&class->ops);
2544         if (very_verbose(class)) {
2545                 printk("\nacquire class [%p] %s", class->key, class->name);
2546                 if (class->name_version > 1)
2547                         printk("#%d", class->name_version);
2548                 printk("\n");
2549                 dump_stack();
2550         }
2551
2552         /*
2553          * Add the lock to the list of currently held locks.
2554          * (we dont increase the depth just yet, up until the
2555          * dependency checks are done)
2556          */
2557         depth = curr->lockdep_depth;
2558         if (DEBUG_LOCKS_WARN_ON(depth >= MAX_LOCK_DEPTH))
2559                 return 0;
2560
2561         hlock = curr->held_locks + depth;
2562         if (DEBUG_LOCKS_WARN_ON(!class))
2563                 return 0;
2564         hlock->class_idx = class - lock_classes + 1;
2565         hlock->acquire_ip = ip;
2566         hlock->instance = lock;
2567         hlock->nest_lock = nest_lock;
2568         hlock->trylock = trylock;
2569         hlock->read = read;
2570         hlock->check = check;
2571         hlock->hardirqs_off = !!hardirqs_off;
2572 #ifdef CONFIG_LOCK_STAT
2573         hlock->waittime_stamp = 0;
2574         hlock->holdtime_stamp = sched_clock();
2575 #endif
2576
2577         if (check == 2 && !mark_irqflags(curr, hlock))
2578                 return 0;
2579
2580         /* mark it as used: */
2581         if (!mark_lock(curr, hlock, LOCK_USED))
2582                 return 0;
2583
2584         /*
2585          * Calculate the chain hash: it's the combined hash of all the
2586          * lock keys along the dependency chain. We save the hash value
2587          * at every step so that we can get the current hash easily
2588          * after unlock. The chain hash is then used to cache dependency
2589          * results.
2590          *
2591          * The 'key ID' is what is the most compact key value to drive
2592          * the hash, not class->key.
2593          */
2594         id = class - lock_classes;
2595         if (DEBUG_LOCKS_WARN_ON(id >= MAX_LOCKDEP_KEYS))
2596                 return 0;
2597
2598         chain_key = curr->curr_chain_key;
2599         if (!depth) {
2600                 if (DEBUG_LOCKS_WARN_ON(chain_key != 0))
2601                         return 0;
2602                 chain_head = 1;
2603         }
2604
2605         hlock->prev_chain_key = chain_key;
2606         if (separate_irq_context(curr, hlock)) {
2607                 chain_key = 0;
2608                 chain_head = 1;
2609         }
2610         chain_key = iterate_chain_key(chain_key, id);
2611
2612         if (!validate_chain(curr, lock, hlock, chain_head, chain_key))
2613                 return 0;
2614
2615         curr->curr_chain_key = chain_key;
2616         curr->lockdep_depth++;
2617         check_chain_key(curr);
2618 #ifdef CONFIG_DEBUG_LOCKDEP
2619         if (unlikely(!debug_locks))
2620                 return 0;
2621 #endif
2622         if (unlikely(curr->lockdep_depth >= MAX_LOCK_DEPTH)) {
2623                 debug_locks_off();
2624                 printk("BUG: MAX_LOCK_DEPTH too low!\n");
2625                 printk("turning off the locking correctness validator.\n");
2626                 return 0;
2627         }
2628
2629         if (unlikely(curr->lockdep_depth > max_lockdep_depth))
2630                 max_lockdep_depth = curr->lockdep_depth;
2631
2632         return 1;
2633 }
2634
2635 static int
2636 print_unlock_inbalance_bug(struct task_struct *curr, struct lockdep_map *lock,
2637                            unsigned long ip)
2638 {
2639         if (!debug_locks_off())
2640                 return 0;
2641         if (debug_locks_silent)
2642                 return 0;
2643
2644         printk("\n=====================================\n");
2645         printk(  "[ BUG: bad unlock balance detected! ]\n");
2646         printk(  "-------------------------------------\n");
2647         printk("%s/%d is trying to release lock (",
2648                 curr->comm, task_pid_nr(curr));
2649         print_lockdep_cache(lock);
2650         printk(") at:\n");
2651         print_ip_sym(ip);
2652         printk("but there are no more locks to release!\n");
2653         printk("\nother info that might help us debug this:\n");
2654         lockdep_print_held_locks(curr);
2655
2656         printk("\nstack backtrace:\n");
2657         dump_stack();
2658
2659         return 0;
2660 }
2661
2662 /*
2663  * Common debugging checks for both nested and non-nested unlock:
2664  */
2665 static int check_unlock(struct task_struct *curr, struct lockdep_map *lock,
2666                         unsigned long ip)
2667 {
2668         if (unlikely(!debug_locks))
2669                 return 0;
2670         if (DEBUG_LOCKS_WARN_ON(!irqs_disabled()))
2671                 return 0;
2672
2673         if (curr->lockdep_depth <= 0)
2674                 return print_unlock_inbalance_bug(curr, lock, ip);
2675
2676         return 1;
2677 }
2678
2679 static int
2680 __lock_set_class(struct lockdep_map *lock, const char *name,
2681                  struct lock_class_key *key, unsigned int subclass,
2682                  unsigned long ip)
2683 {
2684         struct task_struct *curr = current;
2685         struct held_lock *hlock, *prev_hlock;
2686         struct lock_class *class;
2687         unsigned int depth;
2688         int i;
2689
2690         depth = curr->lockdep_depth;
2691         if (DEBUG_LOCKS_WARN_ON(!depth))
2692                 return 0;
2693
2694         prev_hlock = NULL;
2695         for (i = depth-1; i >= 0; i--) {
2696                 hlock = curr->held_locks + i;
2697                 /*
2698                  * We must not cross into another context:
2699                  */
2700                 if (prev_hlock && prev_hlock->irq_context != hlock->irq_context)
2701                         break;
2702                 if (hlock->instance == lock)
2703                         goto found_it;
2704                 prev_hlock = hlock;
2705         }
2706         return print_unlock_inbalance_bug(curr, lock, ip);
2707
2708 found_it:
2709         lockdep_init_map(lock, name, key, 0);
2710         class = register_lock_class(lock, subclass, 0);
2711         hlock->class_idx = class - lock_classes + 1;
2712
2713         curr->lockdep_depth = i;
2714         curr->curr_chain_key = hlock->prev_chain_key;
2715
2716         for (; i < depth; i++) {
2717                 hlock = curr->held_locks + i;
2718                 if (!__lock_acquire(hlock->instance,
2719                         hlock_class(hlock)->subclass, hlock->trylock,
2720                                 hlock->read, hlock->check, hlock->hardirqs_off,
2721                                 hlock->nest_lock, hlock->acquire_ip))
2722                         return 0;
2723         }
2724
2725         if (DEBUG_LOCKS_WARN_ON(curr->lockdep_depth != depth))
2726                 return 0;
2727         return 1;
2728 }
2729
2730 /*
2731  * Remove the lock to the list of currently held locks in a
2732  * potentially non-nested (out of order) manner. This is a
2733  * relatively rare operation, as all the unlock APIs default
2734  * to nested mode (which uses lock_release()):
2735  */
2736 static int
2737 lock_release_non_nested(struct task_struct *curr,
2738                         struct lockdep_map *lock, unsigned long ip)
2739 {
2740         struct held_lock *hlock, *prev_hlock;
2741         unsigned int depth;
2742         int i;
2743
2744         /*
2745          * Check whether the lock exists in the current stack
2746          * of held locks:
2747          */
2748         depth = curr->lockdep_depth;
2749         if (DEBUG_LOCKS_WARN_ON(!depth))
2750                 return 0;
2751
2752         prev_hlock = NULL;
2753         for (i = depth-1; i >= 0; i--) {
2754                 hlock = curr->held_locks + i;
2755                 /*
2756                  * We must not cross into another context:
2757                  */
2758                 if (prev_hlock && prev_hlock->irq_context != hlock->irq_context)
2759                         break;
2760                 if (hlock->instance == lock)
2761                         goto found_it;
2762                 prev_hlock = hlock;
2763         }
2764         return print_unlock_inbalance_bug(curr, lock, ip);
2765
2766 found_it:
2767         lock_release_holdtime(hlock);
2768
2769         /*
2770          * We have the right lock to unlock, 'hlock' points to it.
2771          * Now we remove it from the stack, and add back the other
2772          * entries (if any), recalculating the hash along the way:
2773          */
2774         curr->lockdep_depth = i;
2775         curr->curr_chain_key = hlock->prev_chain_key;
2776
2777         for (i++; i < depth; i++) {
2778                 hlock = curr->held_locks + i;
2779                 if (!__lock_acquire(hlock->instance,
2780                         hlock_class(hlock)->subclass, hlock->trylock,
2781                                 hlock->read, hlock->check, hlock->hardirqs_off,
2782                                 hlock->nest_lock, hlock->acquire_ip))
2783                         return 0;
2784         }
2785
2786         if (DEBUG_LOCKS_WARN_ON(curr->lockdep_depth != depth - 1))
2787                 return 0;
2788         return 1;
2789 }
2790
2791 /*
2792  * Remove the lock to the list of currently held locks - this gets
2793  * called on mutex_unlock()/spin_unlock*() (or on a failed
2794  * mutex_lock_interruptible()). This is done for unlocks that nest
2795  * perfectly. (i.e. the current top of the lock-stack is unlocked)
2796  */
2797 static int lock_release_nested(struct task_struct *curr,
2798                                struct lockdep_map *lock, unsigned long ip)
2799 {
2800         struct held_lock *hlock;
2801         unsigned int depth;
2802
2803         /*
2804          * Pop off the top of the lock stack:
2805          */
2806         depth = curr->lockdep_depth - 1;
2807         hlock = curr->held_locks + depth;
2808
2809         /*
2810          * Is the unlock non-nested:
2811          */
2812         if (hlock->instance != lock)
2813                 return lock_release_non_nested(curr, lock, ip);
2814         curr->lockdep_depth--;
2815
2816         if (DEBUG_LOCKS_WARN_ON(!depth && (hlock->prev_chain_key != 0)))
2817                 return 0;
2818
2819         curr->curr_chain_key = hlock->prev_chain_key;
2820
2821         lock_release_holdtime(hlock);
2822
2823 #ifdef CONFIG_DEBUG_LOCKDEP
2824         hlock->prev_chain_key = 0;
2825         hlock->class_idx = 0;
2826         hlock->acquire_ip = 0;
2827         hlock->irq_context = 0;
2828 #endif
2829         return 1;
2830 }
2831
2832 /*
2833  * Remove the lock to the list of currently held locks - this gets
2834  * called on mutex_unlock()/spin_unlock*() (or on a failed
2835  * mutex_lock_interruptible()). This is done for unlocks that nest
2836  * perfectly. (i.e. the current top of the lock-stack is unlocked)
2837  */
2838 static void
2839 __lock_release(struct lockdep_map *lock, int nested, unsigned long ip)
2840 {
2841         struct task_struct *curr = current;
2842
2843         if (!check_unlock(curr, lock, ip))
2844                 return;
2845
2846         if (nested) {
2847                 if (!lock_release_nested(curr, lock, ip))
2848                         return;
2849         } else {
2850                 if (!lock_release_non_nested(curr, lock, ip))
2851                         return;
2852         }
2853
2854         check_chain_key(curr);
2855 }
2856
2857 /*
2858  * Check whether we follow the irq-flags state precisely:
2859  */
2860 static void check_flags(unsigned long flags)
2861 {
2862 #if defined(CONFIG_PROVE_LOCKING) && defined(CONFIG_DEBUG_LOCKDEP) && \
2863     defined(CONFIG_TRACE_IRQFLAGS)
2864         if (!debug_locks)
2865                 return;
2866
2867         if (irqs_disabled_flags(flags)) {
2868                 if (DEBUG_LOCKS_WARN_ON(current->hardirqs_enabled)) {
2869                         printk("possible reason: unannotated irqs-off.\n");
2870                 }
2871         } else {
2872                 if (DEBUG_LOCKS_WARN_ON(!current->hardirqs_enabled)) {
2873                         printk("possible reason: unannotated irqs-on.\n");
2874                 }
2875         }
2876
2877         /*
2878          * We dont accurately track softirq state in e.g.
2879          * hardirq contexts (such as on 4KSTACKS), so only
2880          * check if not in hardirq contexts:
2881          */
2882         if (!hardirq_count()) {
2883                 if (softirq_count())
2884                         DEBUG_LOCKS_WARN_ON(current->softirqs_enabled);
2885                 else
2886                         DEBUG_LOCKS_WARN_ON(!current->softirqs_enabled);
2887         }
2888
2889         if (!debug_locks)
2890                 print_irqtrace_events(current);
2891 #endif
2892 }
2893
2894 void lock_set_class(struct lockdep_map *lock, const char *name,
2895                     struct lock_class_key *key, unsigned int subclass,
2896                     unsigned long ip)
2897 {
2898         unsigned long flags;
2899
2900         if (unlikely(current->lockdep_recursion))
2901                 return;
2902
2903         raw_local_irq_save(flags);
2904         current->lockdep_recursion = 1;
2905         check_flags(flags);
2906         if (__lock_set_class(lock, name, key, subclass, ip))
2907                 check_chain_key(current);
2908         current->lockdep_recursion = 0;
2909         raw_local_irq_restore(flags);
2910 }
2911 EXPORT_SYMBOL_GPL(lock_set_class);
2912
2913 /*
2914  * We are not always called with irqs disabled - do that here,
2915  * and also avoid lockdep recursion:
2916  */
2917 void lock_acquire(struct lockdep_map *lock, unsigned int subclass,
2918                           int trylock, int read, int check,
2919                           struct lockdep_map *nest_lock, unsigned long ip)
2920 {
2921         unsigned long flags;
2922
2923         if (unlikely(current->lockdep_recursion))
2924                 return;
2925
2926         raw_local_irq_save(flags);
2927         check_flags(flags);
2928
2929         current->lockdep_recursion = 1;
2930         __lock_acquire(lock, subclass, trylock, read, check,
2931                        irqs_disabled_flags(flags), nest_lock, ip);
2932         current->lockdep_recursion = 0;
2933         raw_local_irq_restore(flags);
2934 }
2935 EXPORT_SYMBOL_GPL(lock_acquire);
2936
2937 void lock_release(struct lockdep_map *lock, int nested,
2938                           unsigned long ip)
2939 {
2940         unsigned long flags;
2941
2942         if (unlikely(current->lockdep_recursion))
2943                 return;
2944
2945         raw_local_irq_save(flags);
2946         check_flags(flags);
2947         current->lockdep_recursion = 1;
2948         __lock_release(lock, nested, ip);
2949         current->lockdep_recursion = 0;
2950         raw_local_irq_restore(flags);
2951 }
2952 EXPORT_SYMBOL_GPL(lock_release);
2953
2954 void lockdep_set_current_reclaim_state(gfp_t gfp_mask)
2955 {
2956         current->lockdep_reclaim_gfp = gfp_mask;
2957 }
2958
2959 void lockdep_clear_current_reclaim_state(void)
2960 {
2961         current->lockdep_reclaim_gfp = 0;
2962 }
2963
2964 #ifdef CONFIG_LOCK_STAT
2965 static int
2966 print_lock_contention_bug(struct task_struct *curr, struct lockdep_map *lock,
2967                            unsigned long ip)
2968 {
2969         if (!debug_locks_off())
2970                 return 0;
2971         if (debug_locks_silent)
2972                 return 0;
2973
2974         printk("\n=================================\n");
2975         printk(  "[ BUG: bad contention detected! ]\n");
2976         printk(  "---------------------------------\n");
2977         printk("%s/%d is trying to contend lock (",
2978                 curr->comm, task_pid_nr(curr));
2979         print_lockdep_cache(lock);
2980         printk(") at:\n");
2981         print_ip_sym(ip);
2982         printk("but there are no locks held!\n");
2983         printk("\nother info that might help us debug this:\n");
2984         lockdep_print_held_locks(curr);
2985
2986         printk("\nstack backtrace:\n");
2987         dump_stack();
2988
2989         return 0;
2990 }
2991
2992 static void
2993 __lock_contended(struct lockdep_map *lock, unsigned long ip)
2994 {
2995         struct task_struct *curr = current;
2996         struct held_lock *hlock, *prev_hlock;
2997         struct lock_class_stats *stats;
2998         unsigned int depth;
2999         int i, contention_point, contending_point;
3000
3001         depth = curr->lockdep_depth;
3002         if (DEBUG_LOCKS_WARN_ON(!depth))
3003                 return;
3004
3005         prev_hlock = NULL;
3006         for (i = depth-1; i >= 0; i--) {
3007                 hlock = curr->held_locks + i;
3008                 /*
3009                  * We must not cross into another context:
3010                  */
3011                 if (prev_hlock && prev_hlock->irq_context != hlock->irq_context)
3012                         break;
3013                 if (hlock->instance == lock)
3014                         goto found_it;
3015                 prev_hlock = hlock;
3016         }
3017         print_lock_contention_bug(curr, lock, ip);
3018         return;
3019
3020 found_it:
3021         hlock->waittime_stamp = sched_clock();
3022
3023         contention_point = lock_point(hlock_class(hlock)->contention_point, ip);
3024         contending_point = lock_point(hlock_class(hlock)->contending_point,
3025                                       lock->ip);
3026
3027         stats = get_lock_stats(hlock_class(hlock));
3028         if (contention_point < LOCKSTAT_POINTS)
3029                 stats->contention_point[contention_point]++;
3030         if (contending_point < LOCKSTAT_POINTS)
3031                 stats->contending_point[contending_point]++;
3032         if (lock->cpu != smp_processor_id())
3033                 stats->bounces[bounce_contended + !!hlock->read]++;
3034         put_lock_stats(stats);
3035 }
3036
3037 static void
3038 __lock_acquired(struct lockdep_map *lock, unsigned long ip)
3039 {
3040         struct task_struct *curr = current;
3041         struct held_lock *hlock, *prev_hlock;
3042         struct lock_class_stats *stats;
3043         unsigned int depth;
3044         u64 now;
3045         s64 waittime = 0;
3046         int i, cpu;
3047
3048         depth = curr->lockdep_depth;
3049         if (DEBUG_LOCKS_WARN_ON(!depth))
3050                 return;
3051
3052         prev_hlock = NULL;
3053         for (i = depth-1; i >= 0; i--) {
3054                 hlock = curr->held_locks + i;
3055                 /*
3056                  * We must not cross into another context:
3057                  */
3058                 if (prev_hlock && prev_hlock->irq_context != hlock->irq_context)
3059                         break;
3060                 if (hlock->instance == lock)
3061                         goto found_it;
3062                 prev_hlock = hlock;
3063         }
3064         print_lock_contention_bug(curr, lock, _RET_IP_);
3065         return;
3066
3067 found_it:
3068         cpu = smp_processor_id();
3069         if (hlock->waittime_stamp) {
3070                 now = sched_clock();
3071                 waittime = now - hlock->waittime_stamp;
3072                 hlock->holdtime_stamp = now;
3073         }
3074
3075         stats = get_lock_stats(hlock_class(hlock));
3076         if (waittime) {
3077                 if (hlock->read)
3078                         lock_time_inc(&stats->read_waittime, waittime);
3079                 else
3080                         lock_time_inc(&stats->write_waittime, waittime);
3081         }
3082         if (lock->cpu != cpu)
3083                 stats->bounces[bounce_acquired + !!hlock->read]++;
3084         put_lock_stats(stats);
3085
3086         lock->cpu = cpu;
3087         lock->ip = ip;
3088 }
3089
3090 void lock_contended(struct lockdep_map *lock, unsigned long ip)
3091 {
3092         unsigned long flags;
3093
3094         if (unlikely(!lock_stat))
3095                 return;
3096
3097         if (unlikely(current->lockdep_recursion))
3098                 return;
3099
3100         raw_local_irq_save(flags);
3101         check_flags(flags);
3102         current->lockdep_recursion = 1;
3103         __lock_contended(lock, ip);
3104         current->lockdep_recursion = 0;
3105         raw_local_irq_restore(flags);
3106 }
3107 EXPORT_SYMBOL_GPL(lock_contended);
3108
3109 void lock_acquired(struct lockdep_map *lock, unsigned long ip)
3110 {
3111         unsigned long flags;
3112
3113         if (unlikely(!lock_stat))
3114                 return;
3115
3116         if (unlikely(current->lockdep_recursion))
3117                 return;
3118
3119         raw_local_irq_save(flags);
3120         check_flags(flags);
3121         current->lockdep_recursion = 1;
3122         __lock_acquired(lock, ip);
3123         current->lockdep_recursion = 0;
3124         raw_local_irq_restore(flags);
3125 }
3126 EXPORT_SYMBOL_GPL(lock_acquired);
3127 #endif
3128
3129 /*
3130  * Used by the testsuite, sanitize the validator state
3131  * after a simulated failure:
3132  */
3133
3134 void lockdep_reset(void)
3135 {
3136         unsigned long flags;
3137         int i;
3138
3139         raw_local_irq_save(flags);
3140         current->curr_chain_key = 0;
3141         current->lockdep_depth = 0;
3142         current->lockdep_recursion = 0;
3143         memset(current->held_locks, 0, MAX_LOCK_DEPTH*sizeof(struct held_lock));
3144         nr_hardirq_chains = 0;
3145         nr_softirq_chains = 0;
3146         nr_process_chains = 0;
3147         debug_locks = 1;
3148         for (i = 0; i < CHAINHASH_SIZE; i++)
3149                 INIT_LIST_HEAD(chainhash_table + i);
3150         raw_local_irq_restore(flags);
3151 }
3152
3153 static void zap_class(struct lock_class *class)
3154 {
3155         int i;
3156
3157         /*
3158          * Remove all dependencies this lock is
3159          * involved in:
3160          */
3161         for (i = 0; i < nr_list_entries; i++) {
3162                 if (list_entries[i].class == class)
3163                         list_del_rcu(&list_entries[i].entry);
3164         }
3165         /*
3166          * Unhash the class and remove it from the all_lock_classes list:
3167          */
3168         list_del_rcu(&class->hash_entry);
3169         list_del_rcu(&class->lock_entry);
3170
3171         class->key = NULL;
3172 }
3173
3174 static inline int within(const void *addr, void *start, unsigned long size)
3175 {
3176         return addr >= start && addr < start + size;
3177 }
3178
3179 void lockdep_free_key_range(void *start, unsigned long size)
3180 {
3181         struct lock_class *class, *next;
3182         struct list_head *head;
3183         unsigned long flags;
3184         int i;
3185         int locked;
3186
3187         raw_local_irq_save(flags);
3188         locked = graph_lock();
3189
3190         /*
3191          * Unhash all classes that were created by this module:
3192          */
3193         for (i = 0; i < CLASSHASH_SIZE; i++) {
3194                 head = classhash_table + i;
3195                 if (list_empty(head))
3196                         continue;
3197                 list_for_each_entry_safe(class, next, head, hash_entry) {
3198                         if (within(class->key, start, size))
3199                                 zap_class(class);
3200                         else if (within(class->name, start, size))
3201                                 zap_class(class);
3202                 }
3203         }
3204
3205         if (locked)
3206                 graph_unlock();
3207         raw_local_irq_restore(flags);
3208 }
3209
3210 void lockdep_reset_lock(struct lockdep_map *lock)
3211 {
3212         struct lock_class *class, *next;
3213         struct list_head *head;
3214         unsigned long flags;
3215         int i, j;
3216         int locked;
3217
3218         raw_local_irq_save(flags);
3219
3220         /*
3221          * Remove all classes this lock might have:
3222          */
3223         for (j = 0; j < MAX_LOCKDEP_SUBCLASSES; j++) {
3224                 /*
3225                  * If the class exists we look it up and zap it:
3226                  */
3227                 class = look_up_lock_class(lock, j);
3228                 if (class)
3229                         zap_class(class);
3230         }
3231         /*
3232          * Debug check: in the end all mapped classes should
3233          * be gone.
3234          */
3235         locked = graph_lock();
3236         for (i = 0; i < CLASSHASH_SIZE; i++) {
3237                 head = classhash_table + i;
3238                 if (list_empty(head))
3239                         continue;
3240                 list_for_each_entry_safe(class, next, head, hash_entry) {
3241                         if (unlikely(class == lock->class_cache)) {
3242                                 if (debug_locks_off_graph_unlock())
3243                                         WARN_ON(1);
3244                                 goto out_restore;
3245                         }
3246                 }
3247         }
3248         if (locked)
3249                 graph_unlock();
3250
3251 out_restore:
3252         raw_local_irq_restore(flags);
3253 }
3254
3255 void lockdep_init(void)
3256 {
3257         int i;
3258
3259         /*
3260          * Some architectures have their own start_kernel()
3261          * code which calls lockdep_init(), while we also
3262          * call lockdep_init() from the start_kernel() itself,
3263          * and we want to initialize the hashes only once:
3264          */
3265         if (lockdep_initialized)
3266                 return;
3267
3268         for (i = 0; i < CLASSHASH_SIZE; i++)
3269                 INIT_LIST_HEAD(classhash_table + i);
3270
3271         for (i = 0; i < CHAINHASH_SIZE; i++)
3272                 INIT_LIST_HEAD(chainhash_table + i);
3273
3274         lockdep_initialized = 1;
3275 }
3276
3277 void __init lockdep_info(void)
3278 {
3279         printk("Lock dependency validator: Copyright (c) 2006 Red Hat, Inc., Ingo Molnar\n");
3280
3281         printk("... MAX_LOCKDEP_SUBCLASSES:  %lu\n", MAX_LOCKDEP_SUBCLASSES);
3282         printk("... MAX_LOCK_DEPTH:          %lu\n", MAX_LOCK_DEPTH);
3283         printk("... MAX_LOCKDEP_KEYS:        %lu\n", MAX_LOCKDEP_KEYS);
3284         printk("... CLASSHASH_SIZE:          %lu\n", CLASSHASH_SIZE);
3285         printk("... MAX_LOCKDEP_ENTRIES:     %lu\n", MAX_LOCKDEP_ENTRIES);
3286         printk("... MAX_LOCKDEP_CHAINS:      %lu\n", MAX_LOCKDEP_CHAINS);
3287         printk("... CHAINHASH_SIZE:          %lu\n", CHAINHASH_SIZE);
3288
3289         printk(" memory used by lock dependency info: %lu kB\n",
3290                 (sizeof(struct lock_class) * MAX_LOCKDEP_KEYS +
3291                 sizeof(struct list_head) * CLASSHASH_SIZE +
3292                 sizeof(struct lock_list) * MAX_LOCKDEP_ENTRIES +
3293                 sizeof(struct lock_chain) * MAX_LOCKDEP_CHAINS +
3294                 sizeof(struct list_head) * CHAINHASH_SIZE) / 1024);
3295
3296         printk(" per task-struct memory footprint: %lu bytes\n",
3297                 sizeof(struct held_lock) * MAX_LOCK_DEPTH);
3298
3299 #ifdef CONFIG_DEBUG_LOCKDEP
3300         if (lockdep_init_error) {
3301                 printk("WARNING: lockdep init error! Arch code didn't call lockdep_init() early enough?\n");
3302                 printk("Call stack leading to lockdep invocation was:\n");
3303                 print_stack_trace(&lockdep_init_trace, 0);
3304         }
3305 #endif
3306 }
3307
3308 static void
3309 print_freed_lock_bug(struct task_struct *curr, const void *mem_from,
3310                      const void *mem_to, struct held_lock *hlock)
3311 {
3312         if (!debug_locks_off())
3313                 return;
3314         if (debug_locks_silent)
3315                 return;
3316
3317         printk("\n=========================\n");
3318         printk(  "[ BUG: held lock freed! ]\n");
3319         printk(  "-------------------------\n");
3320         printk("%s/%d is freeing memory %p-%p, with a lock still held there!\n",
3321                 curr->comm, task_pid_nr(curr), mem_from, mem_to-1);
3322         print_lock(hlock);
3323         lockdep_print_held_locks(curr);
3324
3325         printk("\nstack backtrace:\n");
3326         dump_stack();
3327 }
3328
3329 static inline int not_in_range(const void* mem_from, unsigned long mem_len,
3330                                 const void* lock_from, unsigned long lock_len)
3331 {
3332         return lock_from + lock_len <= mem_from ||
3333                 mem_from + mem_len <= lock_from;
3334 }
3335
3336 /*
3337  * Called when kernel memory is freed (or unmapped), or if a lock
3338  * is destroyed or reinitialized - this code checks whether there is
3339  * any held lock in the memory range of <from> to <to>:
3340  */
3341 void debug_check_no_locks_freed(const void *mem_from, unsigned long mem_len)
3342 {
3343         struct task_struct *curr = current;
3344         struct held_lock *hlock;
3345         unsigned long flags;
3346         int i;
3347
3348         if (unlikely(!debug_locks))
3349                 return;
3350
3351         local_irq_save(flags);
3352         for (i = 0; i < curr->lockdep_depth; i++) {
3353                 hlock = curr->held_locks + i;
3354
3355                 if (not_in_range(mem_from, mem_len, hlock->instance,
3356                                         sizeof(*hlock->instance)))
3357                         continue;
3358
3359                 print_freed_lock_bug(curr, mem_from, mem_from + mem_len, hlock);
3360                 break;
3361         }
3362         local_irq_restore(flags);
3363 }
3364 EXPORT_SYMBOL_GPL(debug_check_no_locks_freed);
3365
3366 static void print_held_locks_bug(struct task_struct *curr)
3367 {
3368         if (!debug_locks_off())
3369                 return;
3370         if (debug_locks_silent)
3371                 return;
3372
3373         printk("\n=====================================\n");
3374         printk(  "[ BUG: lock held at task exit time! ]\n");
3375         printk(  "-------------------------------------\n");
3376         printk("%s/%d is exiting with locks still held!\n",
3377                 curr->comm, task_pid_nr(curr));
3378         lockdep_print_held_locks(curr);
3379
3380         printk("\nstack backtrace:\n");
3381         dump_stack();
3382 }
3383
3384 void debug_check_no_locks_held(struct task_struct *task)
3385 {
3386         if (unlikely(task->lockdep_depth > 0))
3387                 print_held_locks_bug(task);
3388 }
3389
3390 void debug_show_all_locks(void)
3391 {
3392         struct task_struct *g, *p;
3393         int count = 10;
3394         int unlock = 1;
3395
3396         if (unlikely(!debug_locks)) {
3397                 printk("INFO: lockdep is turned off.\n");
3398                 return;
3399         }
3400         printk("\nShowing all locks held in the system:\n");
3401
3402         /*
3403          * Here we try to get the tasklist_lock as hard as possible,
3404          * if not successful after 2 seconds we ignore it (but keep
3405          * trying). This is to enable a debug printout even if a
3406          * tasklist_lock-holding task deadlocks or crashes.
3407          */
3408 retry:
3409         if (!read_trylock(&tasklist_lock)) {
3410                 if (count == 10)
3411                         printk("hm, tasklist_lock locked, retrying... ");
3412                 if (count) {
3413                         count--;
3414                         printk(" #%d", 10-count);
3415                         mdelay(200);
3416                         goto retry;
3417                 }
3418                 printk(" ignoring it.\n");
3419                 unlock = 0;
3420         } else {
3421                 if (count != 10)
3422                         printk(KERN_CONT " locked it.\n");
3423         }
3424
3425         do_each_thread(g, p) {
3426                 /*
3427                  * It's not reliable to print a task's held locks
3428                  * if it's not sleeping (or if it's not the current
3429                  * task):
3430                  */
3431                 if (p->state == TASK_RUNNING && p != current)
3432                         continue;
3433                 if (p->lockdep_depth)
3434                         lockdep_print_held_locks(p);
3435                 if (!unlock)
3436                         if (read_trylock(&tasklist_lock))
3437                                 unlock = 1;
3438         } while_each_thread(g, p);
3439
3440         printk("\n");
3441         printk("=============================================\n\n");
3442
3443         if (unlock)
3444                 read_unlock(&tasklist_lock);
3445 }
3446 EXPORT_SYMBOL_GPL(debug_show_all_locks);
3447
3448 /*
3449  * Careful: only use this function if you are sure that
3450  * the task cannot run in parallel!
3451  */
3452 void __debug_show_held_locks(struct task_struct *task)
3453 {
3454         if (unlikely(!debug_locks)) {
3455                 printk("INFO: lockdep is turned off.\n");
3456                 return;
3457         }
3458         lockdep_print_held_locks(task);
3459 }
3460 EXPORT_SYMBOL_GPL(__debug_show_held_locks);
3461
3462 void debug_show_held_locks(struct task_struct *task)
3463 {
3464                 __debug_show_held_locks(task);
3465 }
3466 EXPORT_SYMBOL_GPL(debug_show_held_locks);
3467
3468 void lockdep_sys_exit(void)
3469 {
3470         struct task_struct *curr = current;
3471
3472         if (unlikely(curr->lockdep_depth)) {
3473                 if (!debug_locks_off())
3474                         return;
3475                 printk("\n================================================\n");
3476                 printk(  "[ BUG: lock held when returning to user space! ]\n");
3477                 printk(  "------------------------------------------------\n");
3478                 printk("%s/%d is leaving the kernel with locks still held!\n",
3479                                 curr->comm, curr->pid);
3480                 lockdep_print_held_locks(curr);
3481         }
3482 }