4 * Runtime locking correctness validator
6 * Started by Ingo Molnar:
8 * Copyright (C) 2006,2007 Red Hat, Inc., Ingo Molnar <mingo@redhat.com>
9 * Copyright (C) 2007 Red Hat, Inc., Peter Zijlstra <pzijlstr@redhat.com>
11 * this code maps all the lock dependencies as they occur in a live kernel
12 * and will warn about the following classes of locking bugs:
14 * - lock inversion scenarios
15 * - circular lock dependencies
16 * - hardirq/softirq safe/unsafe locking bugs
18 * Bugs are reported even if the current locking scenario does not cause
19 * any deadlock at this point.
21 * I.e. if anytime in the past two locks were taken in a different order,
22 * even if it happened for another task, even if those were different
23 * locks (but of the same class as this lock), this code will detect it.
25 * Thanks to Arjan van de Ven for coming up with the initial idea of
26 * mapping lock dependencies runtime.
28 #include <linux/mutex.h>
29 #include <linux/sched.h>
30 #include <linux/delay.h>
31 #include <linux/module.h>
32 #include <linux/proc_fs.h>
33 #include <linux/seq_file.h>
34 #include <linux/spinlock.h>
35 #include <linux/kallsyms.h>
36 #include <linux/interrupt.h>
37 #include <linux/stacktrace.h>
38 #include <linux/debug_locks.h>
39 #include <linux/irqflags.h>
40 #include <linux/utsname.h>
41 #include <linux/hash.h>
42 #include <linux/ftrace.h>
44 #include <asm/sections.h>
46 #include "lockdep_internals.h"
48 #ifdef CONFIG_PROVE_LOCKING
49 int prove_locking = 1;
50 module_param(prove_locking, int, 0644);
52 #define prove_locking 0
55 #ifdef CONFIG_LOCK_STAT
57 module_param(lock_stat, int, 0644);
63 * lockdep_lock: protects the lockdep graph, the hashes and the
64 * class/list/hash allocators.
66 * This is one of the rare exceptions where it's justified
67 * to use a raw spinlock - we really dont want the spinlock
68 * code to recurse back into the lockdep code...
70 static raw_spinlock_t lockdep_lock = (raw_spinlock_t)__RAW_SPIN_LOCK_UNLOCKED;
72 static int graph_lock(void)
74 __raw_spin_lock(&lockdep_lock);
76 * Make sure that if another CPU detected a bug while
77 * walking the graph we dont change it (while the other
78 * CPU is busy printing out stuff with the graph lock
82 __raw_spin_unlock(&lockdep_lock);
85 /* prevent any recursions within lockdep from causing deadlocks */
86 current->lockdep_recursion++;
90 static inline int graph_unlock(void)
92 if (debug_locks && !__raw_spin_is_locked(&lockdep_lock))
93 return DEBUG_LOCKS_WARN_ON(1);
95 current->lockdep_recursion--;
96 __raw_spin_unlock(&lockdep_lock);
101 * Turn lock debugging off and return with 0 if it was off already,
102 * and also release the graph lock:
104 static inline int debug_locks_off_graph_unlock(void)
106 int ret = debug_locks_off();
108 __raw_spin_unlock(&lockdep_lock);
113 static int lockdep_initialized;
115 unsigned long nr_list_entries;
116 static struct lock_list list_entries[MAX_LOCKDEP_ENTRIES];
119 * All data structures here are protected by the global debug_lock.
121 * Mutex key structs only get allocated, once during bootup, and never
122 * get freed - this significantly simplifies the debugging code.
124 unsigned long nr_lock_classes;
125 static struct lock_class lock_classes[MAX_LOCKDEP_KEYS];
127 static inline struct lock_class *hlock_class(struct held_lock *hlock)
129 if (!hlock->class_idx) {
130 DEBUG_LOCKS_WARN_ON(1);
133 return lock_classes + hlock->class_idx - 1;
136 #ifdef CONFIG_LOCK_STAT
137 static DEFINE_PER_CPU(struct lock_class_stats[MAX_LOCKDEP_KEYS], lock_stats);
139 static int lock_point(unsigned long points[], unsigned long ip)
143 for (i = 0; i < LOCKSTAT_POINTS; i++) {
144 if (points[i] == 0) {
155 static void lock_time_inc(struct lock_time *lt, s64 time)
160 if (time < lt->min || !lt->min)
167 static inline void lock_time_add(struct lock_time *src, struct lock_time *dst)
169 dst->min += src->min;
170 dst->max += src->max;
171 dst->total += src->total;
175 struct lock_class_stats lock_stats(struct lock_class *class)
177 struct lock_class_stats stats;
180 memset(&stats, 0, sizeof(struct lock_class_stats));
181 for_each_possible_cpu(cpu) {
182 struct lock_class_stats *pcs =
183 &per_cpu(lock_stats, cpu)[class - lock_classes];
185 for (i = 0; i < ARRAY_SIZE(stats.contention_point); i++)
186 stats.contention_point[i] += pcs->contention_point[i];
188 for (i = 0; i < ARRAY_SIZE(stats.contending_point); i++)
189 stats.contending_point[i] += pcs->contending_point[i];
191 lock_time_add(&pcs->read_waittime, &stats.read_waittime);
192 lock_time_add(&pcs->write_waittime, &stats.write_waittime);
194 lock_time_add(&pcs->read_holdtime, &stats.read_holdtime);
195 lock_time_add(&pcs->write_holdtime, &stats.write_holdtime);
197 for (i = 0; i < ARRAY_SIZE(stats.bounces); i++)
198 stats.bounces[i] += pcs->bounces[i];
204 void clear_lock_stats(struct lock_class *class)
208 for_each_possible_cpu(cpu) {
209 struct lock_class_stats *cpu_stats =
210 &per_cpu(lock_stats, cpu)[class - lock_classes];
212 memset(cpu_stats, 0, sizeof(struct lock_class_stats));
214 memset(class->contention_point, 0, sizeof(class->contention_point));
215 memset(class->contending_point, 0, sizeof(class->contending_point));
218 static struct lock_class_stats *get_lock_stats(struct lock_class *class)
220 return &get_cpu_var(lock_stats)[class - lock_classes];
223 static void put_lock_stats(struct lock_class_stats *stats)
225 put_cpu_var(lock_stats);
228 static void lock_release_holdtime(struct held_lock *hlock)
230 struct lock_class_stats *stats;
236 holdtime = sched_clock() - hlock->holdtime_stamp;
238 stats = get_lock_stats(hlock_class(hlock));
240 lock_time_inc(&stats->read_holdtime, holdtime);
242 lock_time_inc(&stats->write_holdtime, holdtime);
243 put_lock_stats(stats);
246 static inline void lock_release_holdtime(struct held_lock *hlock)
252 * We keep a global list of all lock classes. The list only grows,
253 * never shrinks. The list is only accessed with the lockdep
254 * spinlock lock held.
256 LIST_HEAD(all_lock_classes);
259 * The lockdep classes are in a hash-table as well, for fast lookup:
261 #define CLASSHASH_BITS (MAX_LOCKDEP_KEYS_BITS - 1)
262 #define CLASSHASH_SIZE (1UL << CLASSHASH_BITS)
263 #define __classhashfn(key) hash_long((unsigned long)key, CLASSHASH_BITS)
264 #define classhashentry(key) (classhash_table + __classhashfn((key)))
266 static struct list_head classhash_table[CLASSHASH_SIZE];
269 * We put the lock dependency chains into a hash-table as well, to cache
272 #define CHAINHASH_BITS (MAX_LOCKDEP_CHAINS_BITS-1)
273 #define CHAINHASH_SIZE (1UL << CHAINHASH_BITS)
274 #define __chainhashfn(chain) hash_long(chain, CHAINHASH_BITS)
275 #define chainhashentry(chain) (chainhash_table + __chainhashfn((chain)))
277 static struct list_head chainhash_table[CHAINHASH_SIZE];
280 * The hash key of the lock dependency chains is a hash itself too:
281 * it's a hash of all locks taken up to that lock, including that lock.
282 * It's a 64-bit hash, because it's important for the keys to be
285 #define iterate_chain_key(key1, key2) \
286 (((key1) << MAX_LOCKDEP_KEYS_BITS) ^ \
287 ((key1) >> (64-MAX_LOCKDEP_KEYS_BITS)) ^ \
290 void lockdep_off(void)
292 current->lockdep_recursion++;
294 EXPORT_SYMBOL(lockdep_off);
296 void lockdep_on(void)
298 current->lockdep_recursion--;
300 EXPORT_SYMBOL(lockdep_on);
303 * Debugging switches:
307 #define VERY_VERBOSE 0
310 # define HARDIRQ_VERBOSE 1
311 # define SOFTIRQ_VERBOSE 1
313 # define HARDIRQ_VERBOSE 0
314 # define SOFTIRQ_VERBOSE 0
317 #if VERBOSE || HARDIRQ_VERBOSE || SOFTIRQ_VERBOSE
319 * Quick filtering for interesting events:
321 static int class_filter(struct lock_class *class)
325 if (class->name_version == 1 &&
326 !strcmp(class->name, "lockname"))
328 if (class->name_version == 1 &&
329 !strcmp(class->name, "&struct->lockfield"))
332 /* Filter everything else. 1 would be to allow everything else */
337 static int verbose(struct lock_class *class)
340 return class_filter(class);
346 * Stack-trace: tightly packed array of stack backtrace
347 * addresses. Protected by the graph_lock.
349 unsigned long nr_stack_trace_entries;
350 static unsigned long stack_trace[MAX_STACK_TRACE_ENTRIES];
352 static int save_trace(struct stack_trace *trace)
354 trace->nr_entries = 0;
355 trace->max_entries = MAX_STACK_TRACE_ENTRIES - nr_stack_trace_entries;
356 trace->entries = stack_trace + nr_stack_trace_entries;
360 save_stack_trace(trace);
362 trace->max_entries = trace->nr_entries;
364 nr_stack_trace_entries += trace->nr_entries;
366 if (nr_stack_trace_entries == MAX_STACK_TRACE_ENTRIES) {
367 if (!debug_locks_off_graph_unlock())
370 printk("BUG: MAX_STACK_TRACE_ENTRIES too low!\n");
371 printk("turning off the locking correctness validator.\n");
380 unsigned int nr_hardirq_chains;
381 unsigned int nr_softirq_chains;
382 unsigned int nr_process_chains;
383 unsigned int max_lockdep_depth;
384 unsigned int max_recursion_depth;
386 static unsigned int lockdep_dependency_gen_id;
388 static bool lockdep_dependency_visit(struct lock_class *source,
392 lockdep_dependency_gen_id++;
393 if (source->dep_gen_id == lockdep_dependency_gen_id)
395 source->dep_gen_id = lockdep_dependency_gen_id;
399 #ifdef CONFIG_DEBUG_LOCKDEP
401 * We cannot printk in early bootup code. Not even early_printk()
402 * might work. So we mark any initialization errors and printk
403 * about it later on, in lockdep_info().
405 static int lockdep_init_error;
406 static unsigned long lockdep_init_trace_data[20];
407 static struct stack_trace lockdep_init_trace = {
408 .max_entries = ARRAY_SIZE(lockdep_init_trace_data),
409 .entries = lockdep_init_trace_data,
413 * Various lockdep statistics:
415 atomic_t chain_lookup_hits;
416 atomic_t chain_lookup_misses;
417 atomic_t hardirqs_on_events;
418 atomic_t hardirqs_off_events;
419 atomic_t redundant_hardirqs_on;
420 atomic_t redundant_hardirqs_off;
421 atomic_t softirqs_on_events;
422 atomic_t softirqs_off_events;
423 atomic_t redundant_softirqs_on;
424 atomic_t redundant_softirqs_off;
425 atomic_t nr_unused_locks;
426 atomic_t nr_cyclic_checks;
427 atomic_t nr_cyclic_check_recursions;
428 atomic_t nr_find_usage_forwards_checks;
429 atomic_t nr_find_usage_forwards_recursions;
430 atomic_t nr_find_usage_backwards_checks;
431 atomic_t nr_find_usage_backwards_recursions;
432 # define debug_atomic_inc(ptr) atomic_inc(ptr)
433 # define debug_atomic_dec(ptr) atomic_dec(ptr)
434 # define debug_atomic_read(ptr) atomic_read(ptr)
436 # define debug_atomic_inc(ptr) do { } while (0)
437 # define debug_atomic_dec(ptr) do { } while (0)
438 # define debug_atomic_read(ptr) 0
445 static const char *usage_str[] =
447 [LOCK_USED] = "initial-use ",
448 [LOCK_USED_IN_HARDIRQ] = "in-hardirq-W",
449 [LOCK_USED_IN_SOFTIRQ] = "in-softirq-W",
450 [LOCK_ENABLED_SOFTIRQS] = "softirq-on-W",
451 [LOCK_ENABLED_HARDIRQS] = "hardirq-on-W",
452 [LOCK_USED_IN_HARDIRQ_READ] = "in-hardirq-R",
453 [LOCK_USED_IN_SOFTIRQ_READ] = "in-softirq-R",
454 [LOCK_ENABLED_SOFTIRQS_READ] = "softirq-on-R",
455 [LOCK_ENABLED_HARDIRQS_READ] = "hardirq-on-R",
458 const char * __get_key_name(struct lockdep_subclass_key *key, char *str)
460 return kallsyms_lookup((unsigned long)key, NULL, NULL, NULL, str);
464 get_usage_chars(struct lock_class *class, char *c1, char *c2, char *c3, char *c4)
466 *c1 = '.', *c2 = '.', *c3 = '.', *c4 = '.';
468 if (class->usage_mask & LOCKF_USED_IN_HARDIRQ)
471 if (class->usage_mask & LOCKF_ENABLED_HARDIRQS)
474 if (class->usage_mask & LOCKF_USED_IN_SOFTIRQ)
477 if (class->usage_mask & LOCKF_ENABLED_SOFTIRQS)
480 if (class->usage_mask & LOCKF_ENABLED_HARDIRQS_READ)
482 if (class->usage_mask & LOCKF_USED_IN_HARDIRQ_READ) {
484 if (class->usage_mask & LOCKF_ENABLED_HARDIRQS_READ)
488 if (class->usage_mask & LOCKF_ENABLED_SOFTIRQS_READ)
490 if (class->usage_mask & LOCKF_USED_IN_SOFTIRQ_READ) {
492 if (class->usage_mask & LOCKF_ENABLED_SOFTIRQS_READ)
497 static void print_lock_name(struct lock_class *class)
499 char str[KSYM_NAME_LEN], c1, c2, c3, c4;
502 get_usage_chars(class, &c1, &c2, &c3, &c4);
506 name = __get_key_name(class->key, str);
507 printk(" (%s", name);
509 printk(" (%s", name);
510 if (class->name_version > 1)
511 printk("#%d", class->name_version);
513 printk("/%d", class->subclass);
515 printk("){%c%c%c%c}", c1, c2, c3, c4);
518 static void print_lockdep_cache(struct lockdep_map *lock)
521 char str[KSYM_NAME_LEN];
525 name = __get_key_name(lock->key->subkeys, str);
530 static void print_lock(struct held_lock *hlock)
532 print_lock_name(hlock_class(hlock));
534 print_ip_sym(hlock->acquire_ip);
537 static void lockdep_print_held_locks(struct task_struct *curr)
539 int i, depth = curr->lockdep_depth;
542 printk("no locks held by %s/%d.\n", curr->comm, task_pid_nr(curr));
545 printk("%d lock%s held by %s/%d:\n",
546 depth, depth > 1 ? "s" : "", curr->comm, task_pid_nr(curr));
548 for (i = 0; i < depth; i++) {
550 print_lock(curr->held_locks + i);
554 static void print_lock_class_header(struct lock_class *class, int depth)
558 printk("%*s->", depth, "");
559 print_lock_name(class);
560 printk(" ops: %lu", class->ops);
563 for (bit = 0; bit < LOCK_USAGE_STATES; bit++) {
564 if (class->usage_mask & (1 << bit)) {
567 len += printk("%*s %s", depth, "", usage_str[bit]);
568 len += printk(" at:\n");
569 print_stack_trace(class->usage_traces + bit, len);
572 printk("%*s }\n", depth, "");
574 printk("%*s ... key at: ",depth,"");
575 print_ip_sym((unsigned long)class->key);
579 * printk all lock dependencies starting at <entry>:
582 print_lock_dependencies(struct lock_class *class, int depth)
584 struct lock_list *entry;
586 if (lockdep_dependency_visit(class, depth))
589 if (DEBUG_LOCKS_WARN_ON(depth >= 20))
592 print_lock_class_header(class, depth);
594 list_for_each_entry(entry, &class->locks_after, entry) {
595 if (DEBUG_LOCKS_WARN_ON(!entry->class))
598 print_lock_dependencies(entry->class, depth + 1);
600 printk("%*s ... acquired at:\n",depth,"");
601 print_stack_trace(&entry->trace, 2);
606 static void print_kernel_version(void)
608 printk("%s %.*s\n", init_utsname()->release,
609 (int)strcspn(init_utsname()->version, " "),
610 init_utsname()->version);
613 static int very_verbose(struct lock_class *class)
616 return class_filter(class);
622 * Is this the address of a static object:
624 static int static_obj(void *obj)
626 unsigned long start = (unsigned long) &_stext,
627 end = (unsigned long) &_end,
628 addr = (unsigned long) obj;
636 if ((addr >= start) && (addr < end))
643 for_each_possible_cpu(i) {
644 start = (unsigned long) &__per_cpu_start + per_cpu_offset(i);
645 end = (unsigned long) &__per_cpu_start + PERCPU_ENOUGH_ROOM
648 if ((addr >= start) && (addr < end))
656 return is_module_address(addr);
660 * To make lock name printouts unique, we calculate a unique
661 * class->name_version generation counter:
663 static int count_matching_names(struct lock_class *new_class)
665 struct lock_class *class;
668 if (!new_class->name)
671 list_for_each_entry(class, &all_lock_classes, lock_entry) {
672 if (new_class->key - new_class->subclass == class->key)
673 return class->name_version;
674 if (class->name && !strcmp(class->name, new_class->name))
675 count = max(count, class->name_version);
682 * Register a lock's class in the hash-table, if the class is not present
683 * yet. Otherwise we look it up. We cache the result in the lock object
684 * itself, so actual lookup of the hash should be once per lock object.
686 static inline struct lock_class *
687 look_up_lock_class(struct lockdep_map *lock, unsigned int subclass)
689 struct lockdep_subclass_key *key;
690 struct list_head *hash_head;
691 struct lock_class *class;
693 #ifdef CONFIG_DEBUG_LOCKDEP
695 * If the architecture calls into lockdep before initializing
696 * the hashes then we'll warn about it later. (we cannot printk
699 if (unlikely(!lockdep_initialized)) {
701 lockdep_init_error = 1;
702 save_stack_trace(&lockdep_init_trace);
707 * Static locks do not have their class-keys yet - for them the key
708 * is the lock object itself:
710 if (unlikely(!lock->key))
711 lock->key = (void *)lock;
714 * NOTE: the class-key must be unique. For dynamic locks, a static
715 * lock_class_key variable is passed in through the mutex_init()
716 * (or spin_lock_init()) call - which acts as the key. For static
717 * locks we use the lock object itself as the key.
719 BUILD_BUG_ON(sizeof(struct lock_class_key) >
720 sizeof(struct lockdep_map));
722 key = lock->key->subkeys + subclass;
724 hash_head = classhashentry(key);
727 * We can walk the hash lockfree, because the hash only
728 * grows, and we are careful when adding entries to the end:
730 list_for_each_entry(class, hash_head, hash_entry) {
731 if (class->key == key) {
732 WARN_ON_ONCE(class->name != lock->name);
741 * Register a lock's class in the hash-table, if the class is not present
742 * yet. Otherwise we look it up. We cache the result in the lock object
743 * itself, so actual lookup of the hash should be once per lock object.
745 static inline struct lock_class *
746 register_lock_class(struct lockdep_map *lock, unsigned int subclass, int force)
748 struct lockdep_subclass_key *key;
749 struct list_head *hash_head;
750 struct lock_class *class;
753 class = look_up_lock_class(lock, subclass);
758 * Debug-check: all keys must be persistent!
760 if (!static_obj(lock->key)) {
762 printk("INFO: trying to register non-static key.\n");
763 printk("the code is fine but needs lockdep annotation.\n");
764 printk("turning off the locking correctness validator.\n");
770 key = lock->key->subkeys + subclass;
771 hash_head = classhashentry(key);
773 raw_local_irq_save(flags);
775 raw_local_irq_restore(flags);
779 * We have to do the hash-walk again, to avoid races
782 list_for_each_entry(class, hash_head, hash_entry)
783 if (class->key == key)
786 * Allocate a new key from the static array, and add it to
789 if (nr_lock_classes >= MAX_LOCKDEP_KEYS) {
790 if (!debug_locks_off_graph_unlock()) {
791 raw_local_irq_restore(flags);
794 raw_local_irq_restore(flags);
796 printk("BUG: MAX_LOCKDEP_KEYS too low!\n");
797 printk("turning off the locking correctness validator.\n");
800 class = lock_classes + nr_lock_classes++;
801 debug_atomic_inc(&nr_unused_locks);
803 class->name = lock->name;
804 class->subclass = subclass;
805 INIT_LIST_HEAD(&class->lock_entry);
806 INIT_LIST_HEAD(&class->locks_before);
807 INIT_LIST_HEAD(&class->locks_after);
808 class->name_version = count_matching_names(class);
810 * We use RCU's safe list-add method to make
811 * parallel walking of the hash-list safe:
813 list_add_tail_rcu(&class->hash_entry, hash_head);
815 * Add it to the global list of classes:
817 list_add_tail_rcu(&class->lock_entry, &all_lock_classes);
819 if (verbose(class)) {
821 raw_local_irq_restore(flags);
823 printk("\nnew class %p: %s", class->key, class->name);
824 if (class->name_version > 1)
825 printk("#%d", class->name_version);
829 raw_local_irq_save(flags);
831 raw_local_irq_restore(flags);
837 raw_local_irq_restore(flags);
839 if (!subclass || force)
840 lock->class_cache = class;
842 if (DEBUG_LOCKS_WARN_ON(class->subclass != subclass))
848 #ifdef CONFIG_PROVE_LOCKING
850 * Allocate a lockdep entry. (assumes the graph_lock held, returns
851 * with NULL on failure)
853 static struct lock_list *alloc_list_entry(void)
855 if (nr_list_entries >= MAX_LOCKDEP_ENTRIES) {
856 if (!debug_locks_off_graph_unlock())
859 printk("BUG: MAX_LOCKDEP_ENTRIES too low!\n");
860 printk("turning off the locking correctness validator.\n");
863 return list_entries + nr_list_entries++;
867 * Add a new dependency to the head of the list:
869 static int add_lock_to_list(struct lock_class *class, struct lock_class *this,
870 struct list_head *head, unsigned long ip, int distance)
872 struct lock_list *entry;
874 * Lock not present yet - get a new dependency struct and
875 * add it to the list:
877 entry = alloc_list_entry();
881 if (!save_trace(&entry->trace))
885 entry->distance = distance;
887 * Since we never remove from the dependency list, the list can
888 * be walked lockless by other CPUs, it's only allocation
889 * that must be protected by the spinlock. But this also means
890 * we must make new entries visible only once writes to the
891 * entry become visible - hence the RCU op:
893 list_add_tail_rcu(&entry->entry, head);
899 * Recursive, forwards-direction lock-dependency checking, used for
900 * both noncyclic checking and for hardirq-unsafe/softirq-unsafe
903 * (to keep the stackframe of the recursive functions small we
904 * use these global variables, and we also mark various helper
905 * functions as noinline.)
907 static struct held_lock *check_source, *check_target;
910 * Print a dependency chain entry (this is only done when a deadlock
911 * has been detected):
914 print_circular_bug_entry(struct lock_list *target, unsigned int depth)
916 if (debug_locks_silent)
918 printk("\n-> #%u", depth);
919 print_lock_name(target->class);
921 print_stack_trace(&target->trace, 6);
927 * When a circular dependency is detected, print the
931 print_circular_bug_header(struct lock_list *entry, unsigned int depth)
933 struct task_struct *curr = current;
935 if (!debug_locks_off_graph_unlock() || debug_locks_silent)
938 printk("\n=======================================================\n");
939 printk( "[ INFO: possible circular locking dependency detected ]\n");
940 print_kernel_version();
941 printk( "-------------------------------------------------------\n");
942 printk("%s/%d is trying to acquire lock:\n",
943 curr->comm, task_pid_nr(curr));
944 print_lock(check_source);
945 printk("\nbut task is already holding lock:\n");
946 print_lock(check_target);
947 printk("\nwhich lock already depends on the new lock.\n\n");
948 printk("\nthe existing dependency chain (in reverse order) is:\n");
950 print_circular_bug_entry(entry, depth);
955 static noinline int print_circular_bug_tail(void)
957 struct task_struct *curr = current;
958 struct lock_list this;
960 if (debug_locks_silent)
963 this.class = hlock_class(check_source);
964 if (!save_trace(&this.trace))
967 print_circular_bug_entry(&this, 0);
969 printk("\nother info that might help us debug this:\n\n");
970 lockdep_print_held_locks(curr);
972 printk("\nstack backtrace:\n");
978 #define RECURSION_LIMIT 40
980 static int noinline print_infinite_recursion_bug(void)
982 if (!debug_locks_off_graph_unlock())
990 unsigned long __lockdep_count_forward_deps(struct lock_class *class,
993 struct lock_list *entry;
994 unsigned long ret = 1;
996 if (lockdep_dependency_visit(class, depth))
1000 * Recurse this class's dependency list:
1002 list_for_each_entry(entry, &class->locks_after, entry)
1003 ret += __lockdep_count_forward_deps(entry->class, depth + 1);
1008 unsigned long lockdep_count_forward_deps(struct lock_class *class)
1010 unsigned long ret, flags;
1012 local_irq_save(flags);
1013 __raw_spin_lock(&lockdep_lock);
1014 ret = __lockdep_count_forward_deps(class, 0);
1015 __raw_spin_unlock(&lockdep_lock);
1016 local_irq_restore(flags);
1021 unsigned long __lockdep_count_backward_deps(struct lock_class *class,
1024 struct lock_list *entry;
1025 unsigned long ret = 1;
1027 if (lockdep_dependency_visit(class, depth))
1030 * Recurse this class's dependency list:
1032 list_for_each_entry(entry, &class->locks_before, entry)
1033 ret += __lockdep_count_backward_deps(entry->class, depth + 1);
1038 unsigned long lockdep_count_backward_deps(struct lock_class *class)
1040 unsigned long ret, flags;
1042 local_irq_save(flags);
1043 __raw_spin_lock(&lockdep_lock);
1044 ret = __lockdep_count_backward_deps(class, 0);
1045 __raw_spin_unlock(&lockdep_lock);
1046 local_irq_restore(flags);
1052 * Prove that the dependency graph starting at <entry> can not
1053 * lead to <target>. Print an error and return 0 if it does.
1056 check_noncircular(struct lock_class *source, unsigned int depth)
1058 struct lock_list *entry;
1060 if (lockdep_dependency_visit(source, depth))
1063 debug_atomic_inc(&nr_cyclic_check_recursions);
1064 if (depth > max_recursion_depth)
1065 max_recursion_depth = depth;
1066 if (depth >= RECURSION_LIMIT)
1067 return print_infinite_recursion_bug();
1069 * Check this lock's dependency list:
1071 list_for_each_entry(entry, &source->locks_after, entry) {
1072 if (entry->class == hlock_class(check_target))
1073 return print_circular_bug_header(entry, depth+1);
1074 debug_atomic_inc(&nr_cyclic_checks);
1075 if (!check_noncircular(entry->class, depth+1))
1076 return print_circular_bug_entry(entry, depth+1);
1081 #if defined(CONFIG_TRACE_IRQFLAGS) && defined(CONFIG_PROVE_LOCKING)
1083 * Forwards and backwards subgraph searching, for the purposes of
1084 * proving that two subgraphs can be connected by a new dependency
1085 * without creating any illegal irq-safe -> irq-unsafe lock dependency.
1087 static enum lock_usage_bit find_usage_bit;
1088 static struct lock_class *forwards_match, *backwards_match;
1091 * Find a node in the forwards-direction dependency sub-graph starting
1092 * at <source> that matches <find_usage_bit>.
1094 * Return 2 if such a node exists in the subgraph, and put that node
1095 * into <forwards_match>.
1097 * Return 1 otherwise and keep <forwards_match> unchanged.
1098 * Return 0 on error.
1101 find_usage_forwards(struct lock_class *source, unsigned int depth)
1103 struct lock_list *entry;
1106 if (lockdep_dependency_visit(source, depth))
1109 if (depth > max_recursion_depth)
1110 max_recursion_depth = depth;
1111 if (depth >= RECURSION_LIMIT)
1112 return print_infinite_recursion_bug();
1114 debug_atomic_inc(&nr_find_usage_forwards_checks);
1115 if (source->usage_mask & (1 << find_usage_bit)) {
1116 forwards_match = source;
1121 * Check this lock's dependency list:
1123 list_for_each_entry(entry, &source->locks_after, entry) {
1124 debug_atomic_inc(&nr_find_usage_forwards_recursions);
1125 ret = find_usage_forwards(entry->class, depth+1);
1126 if (ret == 2 || ret == 0)
1133 * Find a node in the backwards-direction dependency sub-graph starting
1134 * at <source> that matches <find_usage_bit>.
1136 * Return 2 if such a node exists in the subgraph, and put that node
1137 * into <backwards_match>.
1139 * Return 1 otherwise and keep <backwards_match> unchanged.
1140 * Return 0 on error.
1143 find_usage_backwards(struct lock_class *source, unsigned int depth)
1145 struct lock_list *entry;
1148 if (lockdep_dependency_visit(source, depth))
1151 if (!__raw_spin_is_locked(&lockdep_lock))
1152 return DEBUG_LOCKS_WARN_ON(1);
1154 if (depth > max_recursion_depth)
1155 max_recursion_depth = depth;
1156 if (depth >= RECURSION_LIMIT)
1157 return print_infinite_recursion_bug();
1159 debug_atomic_inc(&nr_find_usage_backwards_checks);
1160 if (source->usage_mask & (1 << find_usage_bit)) {
1161 backwards_match = source;
1165 if (!source && debug_locks_off_graph_unlock()) {
1171 * Check this lock's dependency list:
1173 list_for_each_entry(entry, &source->locks_before, entry) {
1174 debug_atomic_inc(&nr_find_usage_backwards_recursions);
1175 ret = find_usage_backwards(entry->class, depth+1);
1176 if (ret == 2 || ret == 0)
1183 print_bad_irq_dependency(struct task_struct *curr,
1184 struct held_lock *prev,
1185 struct held_lock *next,
1186 enum lock_usage_bit bit1,
1187 enum lock_usage_bit bit2,
1188 const char *irqclass)
1190 if (!debug_locks_off_graph_unlock() || debug_locks_silent)
1193 printk("\n======================================================\n");
1194 printk( "[ INFO: %s-safe -> %s-unsafe lock order detected ]\n",
1195 irqclass, irqclass);
1196 print_kernel_version();
1197 printk( "------------------------------------------------------\n");
1198 printk("%s/%d [HC%u[%lu]:SC%u[%lu]:HE%u:SE%u] is trying to acquire:\n",
1199 curr->comm, task_pid_nr(curr),
1200 curr->hardirq_context, hardirq_count() >> HARDIRQ_SHIFT,
1201 curr->softirq_context, softirq_count() >> SOFTIRQ_SHIFT,
1202 curr->hardirqs_enabled,
1203 curr->softirqs_enabled);
1206 printk("\nand this task is already holding:\n");
1208 printk("which would create a new lock dependency:\n");
1209 print_lock_name(hlock_class(prev));
1211 print_lock_name(hlock_class(next));
1214 printk("\nbut this new dependency connects a %s-irq-safe lock:\n",
1216 print_lock_name(backwards_match);
1217 printk("\n... which became %s-irq-safe at:\n", irqclass);
1219 print_stack_trace(backwards_match->usage_traces + bit1, 1);
1221 printk("\nto a %s-irq-unsafe lock:\n", irqclass);
1222 print_lock_name(forwards_match);
1223 printk("\n... which became %s-irq-unsafe at:\n", irqclass);
1226 print_stack_trace(forwards_match->usage_traces + bit2, 1);
1228 printk("\nother info that might help us debug this:\n\n");
1229 lockdep_print_held_locks(curr);
1231 printk("\nthe %s-irq-safe lock's dependencies:\n", irqclass);
1232 print_lock_dependencies(backwards_match, 0);
1234 printk("\nthe %s-irq-unsafe lock's dependencies:\n", irqclass);
1235 print_lock_dependencies(forwards_match, 0);
1237 printk("\nstack backtrace:\n");
1244 check_usage(struct task_struct *curr, struct held_lock *prev,
1245 struct held_lock *next, enum lock_usage_bit bit_backwards,
1246 enum lock_usage_bit bit_forwards, const char *irqclass)
1250 find_usage_bit = bit_backwards;
1251 /* fills in <backwards_match> */
1252 ret = find_usage_backwards(hlock_class(prev), 0);
1253 if (!ret || ret == 1)
1256 find_usage_bit = bit_forwards;
1257 ret = find_usage_forwards(hlock_class(next), 0);
1258 if (!ret || ret == 1)
1261 return print_bad_irq_dependency(curr, prev, next,
1262 bit_backwards, bit_forwards, irqclass);
1266 check_prev_add_irq(struct task_struct *curr, struct held_lock *prev,
1267 struct held_lock *next)
1270 * Prove that the new dependency does not connect a hardirq-safe
1271 * lock with a hardirq-unsafe lock - to achieve this we search
1272 * the backwards-subgraph starting at <prev>, and the
1273 * forwards-subgraph starting at <next>:
1275 if (!check_usage(curr, prev, next, LOCK_USED_IN_HARDIRQ,
1276 LOCK_ENABLED_HARDIRQS, "hard"))
1280 * Prove that the new dependency does not connect a hardirq-safe-read
1281 * lock with a hardirq-unsafe lock - to achieve this we search
1282 * the backwards-subgraph starting at <prev>, and the
1283 * forwards-subgraph starting at <next>:
1285 if (!check_usage(curr, prev, next, LOCK_USED_IN_HARDIRQ_READ,
1286 LOCK_ENABLED_HARDIRQS, "hard-read"))
1290 * Prove that the new dependency does not connect a softirq-safe
1291 * lock with a softirq-unsafe lock - to achieve this we search
1292 * the backwards-subgraph starting at <prev>, and the
1293 * forwards-subgraph starting at <next>:
1295 if (!check_usage(curr, prev, next, LOCK_USED_IN_SOFTIRQ,
1296 LOCK_ENABLED_SOFTIRQS, "soft"))
1299 * Prove that the new dependency does not connect a softirq-safe-read
1300 * lock with a softirq-unsafe lock - to achieve this we search
1301 * the backwards-subgraph starting at <prev>, and the
1302 * forwards-subgraph starting at <next>:
1304 if (!check_usage(curr, prev, next, LOCK_USED_IN_SOFTIRQ_READ,
1305 LOCK_ENABLED_SOFTIRQS, "soft"))
1311 static void inc_chains(void)
1313 if (current->hardirq_context)
1314 nr_hardirq_chains++;
1316 if (current->softirq_context)
1317 nr_softirq_chains++;
1319 nr_process_chains++;
1326 check_prev_add_irq(struct task_struct *curr, struct held_lock *prev,
1327 struct held_lock *next)
1332 static inline void inc_chains(void)
1334 nr_process_chains++;
1340 print_deadlock_bug(struct task_struct *curr, struct held_lock *prev,
1341 struct held_lock *next)
1343 if (!debug_locks_off_graph_unlock() || debug_locks_silent)
1346 printk("\n=============================================\n");
1347 printk( "[ INFO: possible recursive locking detected ]\n");
1348 print_kernel_version();
1349 printk( "---------------------------------------------\n");
1350 printk("%s/%d is trying to acquire lock:\n",
1351 curr->comm, task_pid_nr(curr));
1353 printk("\nbut task is already holding lock:\n");
1356 printk("\nother info that might help us debug this:\n");
1357 lockdep_print_held_locks(curr);
1359 printk("\nstack backtrace:\n");
1366 * Check whether we are holding such a class already.
1368 * (Note that this has to be done separately, because the graph cannot
1369 * detect such classes of deadlocks.)
1371 * Returns: 0 on deadlock detected, 1 on OK, 2 on recursive read
1374 check_deadlock(struct task_struct *curr, struct held_lock *next,
1375 struct lockdep_map *next_instance, int read)
1377 struct held_lock *prev;
1378 struct held_lock *nest = NULL;
1381 for (i = 0; i < curr->lockdep_depth; i++) {
1382 prev = curr->held_locks + i;
1384 if (prev->instance == next->nest_lock)
1387 if (hlock_class(prev) != hlock_class(next))
1391 * Allow read-after-read recursion of the same
1392 * lock class (i.e. read_lock(lock)+read_lock(lock)):
1394 if ((read == 2) && prev->read)
1398 * We're holding the nest_lock, which serializes this lock's
1399 * nesting behaviour.
1404 return print_deadlock_bug(curr, prev, next);
1410 * There was a chain-cache miss, and we are about to add a new dependency
1411 * to a previous lock. We recursively validate the following rules:
1413 * - would the adding of the <prev> -> <next> dependency create a
1414 * circular dependency in the graph? [== circular deadlock]
1416 * - does the new prev->next dependency connect any hardirq-safe lock
1417 * (in the full backwards-subgraph starting at <prev>) with any
1418 * hardirq-unsafe lock (in the full forwards-subgraph starting at
1419 * <next>)? [== illegal lock inversion with hardirq contexts]
1421 * - does the new prev->next dependency connect any softirq-safe lock
1422 * (in the full backwards-subgraph starting at <prev>) with any
1423 * softirq-unsafe lock (in the full forwards-subgraph starting at
1424 * <next>)? [== illegal lock inversion with softirq contexts]
1426 * any of these scenarios could lead to a deadlock.
1428 * Then if all the validations pass, we add the forwards and backwards
1432 check_prev_add(struct task_struct *curr, struct held_lock *prev,
1433 struct held_lock *next, int distance)
1435 struct lock_list *entry;
1439 * Prove that the new <prev> -> <next> dependency would not
1440 * create a circular dependency in the graph. (We do this by
1441 * forward-recursing into the graph starting at <next>, and
1442 * checking whether we can reach <prev>.)
1444 * We are using global variables to control the recursion, to
1445 * keep the stackframe size of the recursive functions low:
1447 check_source = next;
1448 check_target = prev;
1449 if (!(check_noncircular(hlock_class(next), 0)))
1450 return print_circular_bug_tail();
1452 if (!check_prev_add_irq(curr, prev, next))
1456 * For recursive read-locks we do all the dependency checks,
1457 * but we dont store read-triggered dependencies (only
1458 * write-triggered dependencies). This ensures that only the
1459 * write-side dependencies matter, and that if for example a
1460 * write-lock never takes any other locks, then the reads are
1461 * equivalent to a NOP.
1463 if (next->read == 2 || prev->read == 2)
1466 * Is the <prev> -> <next> dependency already present?
1468 * (this may occur even though this is a new chain: consider
1469 * e.g. the L1 -> L2 -> L3 -> L4 and the L5 -> L1 -> L2 -> L3
1470 * chains - the second one will be new, but L1 already has
1471 * L2 added to its dependency list, due to the first chain.)
1473 list_for_each_entry(entry, &hlock_class(prev)->locks_after, entry) {
1474 if (entry->class == hlock_class(next)) {
1476 entry->distance = 1;
1482 * Ok, all validations passed, add the new lock
1483 * to the previous lock's dependency list:
1485 ret = add_lock_to_list(hlock_class(prev), hlock_class(next),
1486 &hlock_class(prev)->locks_after,
1487 next->acquire_ip, distance);
1492 ret = add_lock_to_list(hlock_class(next), hlock_class(prev),
1493 &hlock_class(next)->locks_before,
1494 next->acquire_ip, distance);
1499 * Debugging printouts:
1501 if (verbose(hlock_class(prev)) || verbose(hlock_class(next))) {
1503 printk("\n new dependency: ");
1504 print_lock_name(hlock_class(prev));
1506 print_lock_name(hlock_class(next));
1509 return graph_lock();
1515 * Add the dependency to all directly-previous locks that are 'relevant'.
1516 * The ones that are relevant are (in increasing distance from curr):
1517 * all consecutive trylock entries and the final non-trylock entry - or
1518 * the end of this context's lock-chain - whichever comes first.
1521 check_prevs_add(struct task_struct *curr, struct held_lock *next)
1523 int depth = curr->lockdep_depth;
1524 struct held_lock *hlock;
1529 * Depth must not be zero for a non-head lock:
1534 * At least two relevant locks must exist for this
1537 if (curr->held_locks[depth].irq_context !=
1538 curr->held_locks[depth-1].irq_context)
1542 int distance = curr->lockdep_depth - depth + 1;
1543 hlock = curr->held_locks + depth-1;
1545 * Only non-recursive-read entries get new dependencies
1548 if (hlock->read != 2) {
1549 if (!check_prev_add(curr, hlock, next, distance))
1552 * Stop after the first non-trylock entry,
1553 * as non-trylock entries have added their
1554 * own direct dependencies already, so this
1555 * lock is connected to them indirectly:
1557 if (!hlock->trylock)
1562 * End of lock-stack?
1567 * Stop the search if we cross into another context:
1569 if (curr->held_locks[depth].irq_context !=
1570 curr->held_locks[depth-1].irq_context)
1575 if (!debug_locks_off_graph_unlock())
1583 unsigned long nr_lock_chains;
1584 struct lock_chain lock_chains[MAX_LOCKDEP_CHAINS];
1585 int nr_chain_hlocks;
1586 static u16 chain_hlocks[MAX_LOCKDEP_CHAIN_HLOCKS];
1588 struct lock_class *lock_chain_get_class(struct lock_chain *chain, int i)
1590 return lock_classes + chain_hlocks[chain->base + i];
1594 * Look up a dependency chain. If the key is not present yet then
1595 * add it and return 1 - in this case the new dependency chain is
1596 * validated. If the key is already hashed, return 0.
1597 * (On return with 1 graph_lock is held.)
1599 static inline int lookup_chain_cache(struct task_struct *curr,
1600 struct held_lock *hlock,
1603 struct lock_class *class = hlock_class(hlock);
1604 struct list_head *hash_head = chainhashentry(chain_key);
1605 struct lock_chain *chain;
1606 struct held_lock *hlock_curr, *hlock_next;
1609 if (DEBUG_LOCKS_WARN_ON(!irqs_disabled()))
1612 * We can walk it lock-free, because entries only get added
1615 list_for_each_entry(chain, hash_head, entry) {
1616 if (chain->chain_key == chain_key) {
1618 debug_atomic_inc(&chain_lookup_hits);
1619 if (very_verbose(class))
1620 printk("\nhash chain already cached, key: "
1621 "%016Lx tail class: [%p] %s\n",
1622 (unsigned long long)chain_key,
1623 class->key, class->name);
1627 if (very_verbose(class))
1628 printk("\nnew hash chain, key: %016Lx tail class: [%p] %s\n",
1629 (unsigned long long)chain_key, class->key, class->name);
1631 * Allocate a new chain entry from the static array, and add
1637 * We have to walk the chain again locked - to avoid duplicates:
1639 list_for_each_entry(chain, hash_head, entry) {
1640 if (chain->chain_key == chain_key) {
1645 if (unlikely(nr_lock_chains >= MAX_LOCKDEP_CHAINS)) {
1646 if (!debug_locks_off_graph_unlock())
1649 printk("BUG: MAX_LOCKDEP_CHAINS too low!\n");
1650 printk("turning off the locking correctness validator.\n");
1653 chain = lock_chains + nr_lock_chains++;
1654 chain->chain_key = chain_key;
1655 chain->irq_context = hlock->irq_context;
1656 /* Find the first held_lock of current chain */
1658 for (i = curr->lockdep_depth - 1; i >= 0; i--) {
1659 hlock_curr = curr->held_locks + i;
1660 if (hlock_curr->irq_context != hlock_next->irq_context)
1665 chain->depth = curr->lockdep_depth + 1 - i;
1666 cn = nr_chain_hlocks;
1667 while (cn + chain->depth <= MAX_LOCKDEP_CHAIN_HLOCKS) {
1668 n = cmpxchg(&nr_chain_hlocks, cn, cn + chain->depth);
1673 if (likely(cn + chain->depth <= MAX_LOCKDEP_CHAIN_HLOCKS)) {
1675 for (j = 0; j < chain->depth - 1; j++, i++) {
1676 int lock_id = curr->held_locks[i].class_idx - 1;
1677 chain_hlocks[chain->base + j] = lock_id;
1679 chain_hlocks[chain->base + j] = class - lock_classes;
1681 list_add_tail_rcu(&chain->entry, hash_head);
1682 debug_atomic_inc(&chain_lookup_misses);
1688 static int validate_chain(struct task_struct *curr, struct lockdep_map *lock,
1689 struct held_lock *hlock, int chain_head, u64 chain_key)
1692 * Trylock needs to maintain the stack of held locks, but it
1693 * does not add new dependencies, because trylock can be done
1696 * We look up the chain_key and do the O(N^2) check and update of
1697 * the dependencies only if this is a new dependency chain.
1698 * (If lookup_chain_cache() returns with 1 it acquires
1699 * graph_lock for us)
1701 if (!hlock->trylock && (hlock->check == 2) &&
1702 lookup_chain_cache(curr, hlock, chain_key)) {
1704 * Check whether last held lock:
1706 * - is irq-safe, if this lock is irq-unsafe
1707 * - is softirq-safe, if this lock is hardirq-unsafe
1709 * And check whether the new lock's dependency graph
1710 * could lead back to the previous lock.
1712 * any of these scenarios could lead to a deadlock. If
1715 int ret = check_deadlock(curr, hlock, lock, hlock->read);
1720 * Mark recursive read, as we jump over it when
1721 * building dependencies (just like we jump over
1727 * Add dependency only if this lock is not the head
1728 * of the chain, and if it's not a secondary read-lock:
1730 if (!chain_head && ret != 2)
1731 if (!check_prevs_add(curr, hlock))
1735 /* after lookup_chain_cache(): */
1736 if (unlikely(!debug_locks))
1742 static inline int validate_chain(struct task_struct *curr,
1743 struct lockdep_map *lock, struct held_lock *hlock,
1744 int chain_head, u64 chain_key)
1751 * We are building curr_chain_key incrementally, so double-check
1752 * it from scratch, to make sure that it's done correctly:
1754 static void check_chain_key(struct task_struct *curr)
1756 #ifdef CONFIG_DEBUG_LOCKDEP
1757 struct held_lock *hlock, *prev_hlock = NULL;
1761 for (i = 0; i < curr->lockdep_depth; i++) {
1762 hlock = curr->held_locks + i;
1763 if (chain_key != hlock->prev_chain_key) {
1765 WARN(1, "hm#1, depth: %u [%u], %016Lx != %016Lx\n",
1766 curr->lockdep_depth, i,
1767 (unsigned long long)chain_key,
1768 (unsigned long long)hlock->prev_chain_key);
1771 id = hlock->class_idx - 1;
1772 if (DEBUG_LOCKS_WARN_ON(id >= MAX_LOCKDEP_KEYS))
1775 if (prev_hlock && (prev_hlock->irq_context !=
1776 hlock->irq_context))
1778 chain_key = iterate_chain_key(chain_key, id);
1781 if (chain_key != curr->curr_chain_key) {
1783 WARN(1, "hm#2, depth: %u [%u], %016Lx != %016Lx\n",
1784 curr->lockdep_depth, i,
1785 (unsigned long long)chain_key,
1786 (unsigned long long)curr->curr_chain_key);
1792 print_usage_bug(struct task_struct *curr, struct held_lock *this,
1793 enum lock_usage_bit prev_bit, enum lock_usage_bit new_bit)
1795 if (!debug_locks_off_graph_unlock() || debug_locks_silent)
1798 printk("\n=================================\n");
1799 printk( "[ INFO: inconsistent lock state ]\n");
1800 print_kernel_version();
1801 printk( "---------------------------------\n");
1803 printk("inconsistent {%s} -> {%s} usage.\n",
1804 usage_str[prev_bit], usage_str[new_bit]);
1806 printk("%s/%d [HC%u[%lu]:SC%u[%lu]:HE%u:SE%u] takes:\n",
1807 curr->comm, task_pid_nr(curr),
1808 trace_hardirq_context(curr), hardirq_count() >> HARDIRQ_SHIFT,
1809 trace_softirq_context(curr), softirq_count() >> SOFTIRQ_SHIFT,
1810 trace_hardirqs_enabled(curr),
1811 trace_softirqs_enabled(curr));
1814 printk("{%s} state was registered at:\n", usage_str[prev_bit]);
1815 print_stack_trace(hlock_class(this)->usage_traces + prev_bit, 1);
1817 print_irqtrace_events(curr);
1818 printk("\nother info that might help us debug this:\n");
1819 lockdep_print_held_locks(curr);
1821 printk("\nstack backtrace:\n");
1828 * Print out an error if an invalid bit is set:
1831 valid_state(struct task_struct *curr, struct held_lock *this,
1832 enum lock_usage_bit new_bit, enum lock_usage_bit bad_bit)
1834 if (unlikely(hlock_class(this)->usage_mask & (1 << bad_bit)))
1835 return print_usage_bug(curr, this, bad_bit, new_bit);
1839 static int mark_lock(struct task_struct *curr, struct held_lock *this,
1840 enum lock_usage_bit new_bit);
1842 #if defined(CONFIG_TRACE_IRQFLAGS) && defined(CONFIG_PROVE_LOCKING)
1845 * print irq inversion bug:
1848 print_irq_inversion_bug(struct task_struct *curr, struct lock_class *other,
1849 struct held_lock *this, int forwards,
1850 const char *irqclass)
1852 if (!debug_locks_off_graph_unlock() || debug_locks_silent)
1855 printk("\n=========================================================\n");
1856 printk( "[ INFO: possible irq lock inversion dependency detected ]\n");
1857 print_kernel_version();
1858 printk( "---------------------------------------------------------\n");
1859 printk("%s/%d just changed the state of lock:\n",
1860 curr->comm, task_pid_nr(curr));
1863 printk("but this lock took another, %s-irq-unsafe lock in the past:\n", irqclass);
1865 printk("but this lock was taken by another, %s-irq-safe lock in the past:\n", irqclass);
1866 print_lock_name(other);
1867 printk("\n\nand interrupts could create inverse lock ordering between them.\n\n");
1869 printk("\nother info that might help us debug this:\n");
1870 lockdep_print_held_locks(curr);
1872 printk("\nthe first lock's dependencies:\n");
1873 print_lock_dependencies(hlock_class(this), 0);
1875 printk("\nthe second lock's dependencies:\n");
1876 print_lock_dependencies(other, 0);
1878 printk("\nstack backtrace:\n");
1885 * Prove that in the forwards-direction subgraph starting at <this>
1886 * there is no lock matching <mask>:
1889 check_usage_forwards(struct task_struct *curr, struct held_lock *this,
1890 enum lock_usage_bit bit, const char *irqclass)
1894 find_usage_bit = bit;
1895 /* fills in <forwards_match> */
1896 ret = find_usage_forwards(hlock_class(this), 0);
1897 if (!ret || ret == 1)
1900 return print_irq_inversion_bug(curr, forwards_match, this, 1, irqclass);
1904 * Prove that in the backwards-direction subgraph starting at <this>
1905 * there is no lock matching <mask>:
1908 check_usage_backwards(struct task_struct *curr, struct held_lock *this,
1909 enum lock_usage_bit bit, const char *irqclass)
1913 find_usage_bit = bit;
1914 /* fills in <backwards_match> */
1915 ret = find_usage_backwards(hlock_class(this), 0);
1916 if (!ret || ret == 1)
1919 return print_irq_inversion_bug(curr, backwards_match, this, 0, irqclass);
1922 void print_irqtrace_events(struct task_struct *curr)
1924 printk("irq event stamp: %u\n", curr->irq_events);
1925 printk("hardirqs last enabled at (%u): ", curr->hardirq_enable_event);
1926 print_ip_sym(curr->hardirq_enable_ip);
1927 printk("hardirqs last disabled at (%u): ", curr->hardirq_disable_event);
1928 print_ip_sym(curr->hardirq_disable_ip);
1929 printk("softirqs last enabled at (%u): ", curr->softirq_enable_event);
1930 print_ip_sym(curr->softirq_enable_ip);
1931 printk("softirqs last disabled at (%u): ", curr->softirq_disable_event);
1932 print_ip_sym(curr->softirq_disable_ip);
1935 static int hardirq_verbose(struct lock_class *class)
1938 return class_filter(class);
1943 static int softirq_verbose(struct lock_class *class)
1946 return class_filter(class);
1951 #define STRICT_READ_CHECKS 1
1953 static int mark_lock_irq(struct task_struct *curr, struct held_lock *this,
1954 enum lock_usage_bit new_bit)
1959 case LOCK_USED_IN_HARDIRQ:
1960 if (!valid_state(curr, this, new_bit, LOCK_ENABLED_HARDIRQS))
1962 if (!valid_state(curr, this, new_bit,
1963 LOCK_ENABLED_HARDIRQS_READ))
1966 * just marked it hardirq-safe, check that this lock
1967 * took no hardirq-unsafe lock in the past:
1969 if (!check_usage_forwards(curr, this,
1970 LOCK_ENABLED_HARDIRQS, "hard"))
1972 #if STRICT_READ_CHECKS
1974 * just marked it hardirq-safe, check that this lock
1975 * took no hardirq-unsafe-read lock in the past:
1977 if (!check_usage_forwards(curr, this,
1978 LOCK_ENABLED_HARDIRQS_READ, "hard-read"))
1981 if (hardirq_verbose(hlock_class(this)))
1984 case LOCK_USED_IN_SOFTIRQ:
1985 if (!valid_state(curr, this, new_bit, LOCK_ENABLED_SOFTIRQS))
1987 if (!valid_state(curr, this, new_bit,
1988 LOCK_ENABLED_SOFTIRQS_READ))
1991 * just marked it softirq-safe, check that this lock
1992 * took no softirq-unsafe lock in the past:
1994 if (!check_usage_forwards(curr, this,
1995 LOCK_ENABLED_SOFTIRQS, "soft"))
1997 #if STRICT_READ_CHECKS
1999 * just marked it softirq-safe, check that this lock
2000 * took no softirq-unsafe-read lock in the past:
2002 if (!check_usage_forwards(curr, this,
2003 LOCK_ENABLED_SOFTIRQS_READ, "soft-read"))
2006 if (softirq_verbose(hlock_class(this)))
2009 case LOCK_USED_IN_HARDIRQ_READ:
2010 if (!valid_state(curr, this, new_bit, LOCK_ENABLED_HARDIRQS))
2013 * just marked it hardirq-read-safe, check that this lock
2014 * took no hardirq-unsafe lock in the past:
2016 if (!check_usage_forwards(curr, this,
2017 LOCK_ENABLED_HARDIRQS, "hard"))
2019 if (hardirq_verbose(hlock_class(this)))
2022 case LOCK_USED_IN_SOFTIRQ_READ:
2023 if (!valid_state(curr, this, new_bit, LOCK_ENABLED_SOFTIRQS))
2026 * just marked it softirq-read-safe, check that this lock
2027 * took no softirq-unsafe lock in the past:
2029 if (!check_usage_forwards(curr, this,
2030 LOCK_ENABLED_SOFTIRQS, "soft"))
2032 if (softirq_verbose(hlock_class(this)))
2035 case LOCK_ENABLED_HARDIRQS:
2036 if (!valid_state(curr, this, new_bit, LOCK_USED_IN_HARDIRQ))
2038 if (!valid_state(curr, this, new_bit,
2039 LOCK_USED_IN_HARDIRQ_READ))
2042 * just marked it hardirq-unsafe, check that no hardirq-safe
2043 * lock in the system ever took it in the past:
2045 if (!check_usage_backwards(curr, this,
2046 LOCK_USED_IN_HARDIRQ, "hard"))
2048 #if STRICT_READ_CHECKS
2050 * just marked it hardirq-unsafe, check that no
2051 * hardirq-safe-read lock in the system ever took
2054 if (!check_usage_backwards(curr, this,
2055 LOCK_USED_IN_HARDIRQ_READ, "hard-read"))
2058 if (hardirq_verbose(hlock_class(this)))
2061 case LOCK_ENABLED_SOFTIRQS:
2062 if (!valid_state(curr, this, new_bit, LOCK_USED_IN_SOFTIRQ))
2064 if (!valid_state(curr, this, new_bit,
2065 LOCK_USED_IN_SOFTIRQ_READ))
2068 * just marked it softirq-unsafe, check that no softirq-safe
2069 * lock in the system ever took it in the past:
2071 if (!check_usage_backwards(curr, this,
2072 LOCK_USED_IN_SOFTIRQ, "soft"))
2074 #if STRICT_READ_CHECKS
2076 * just marked it softirq-unsafe, check that no
2077 * softirq-safe-read lock in the system ever took
2080 if (!check_usage_backwards(curr, this,
2081 LOCK_USED_IN_SOFTIRQ_READ, "soft-read"))
2084 if (softirq_verbose(hlock_class(this)))
2087 case LOCK_ENABLED_HARDIRQS_READ:
2088 if (!valid_state(curr, this, new_bit, LOCK_USED_IN_HARDIRQ))
2090 #if STRICT_READ_CHECKS
2092 * just marked it hardirq-read-unsafe, check that no
2093 * hardirq-safe lock in the system ever took it in the past:
2095 if (!check_usage_backwards(curr, this,
2096 LOCK_USED_IN_HARDIRQ, "hard"))
2099 if (hardirq_verbose(hlock_class(this)))
2102 case LOCK_ENABLED_SOFTIRQS_READ:
2103 if (!valid_state(curr, this, new_bit, LOCK_USED_IN_SOFTIRQ))
2105 #if STRICT_READ_CHECKS
2107 * just marked it softirq-read-unsafe, check that no
2108 * softirq-safe lock in the system ever took it in the past:
2110 if (!check_usage_backwards(curr, this,
2111 LOCK_USED_IN_SOFTIRQ, "soft"))
2114 if (softirq_verbose(hlock_class(this)))
2126 * Mark all held locks with a usage bit:
2129 mark_held_locks(struct task_struct *curr, int hardirq)
2131 enum lock_usage_bit usage_bit;
2132 struct held_lock *hlock;
2135 for (i = 0; i < curr->lockdep_depth; i++) {
2136 hlock = curr->held_locks + i;
2140 usage_bit = LOCK_ENABLED_HARDIRQS_READ;
2142 usage_bit = LOCK_ENABLED_HARDIRQS;
2145 usage_bit = LOCK_ENABLED_SOFTIRQS_READ;
2147 usage_bit = LOCK_ENABLED_SOFTIRQS;
2149 if (!mark_lock(curr, hlock, usage_bit))
2157 * Debugging helper: via this flag we know that we are in
2158 * 'early bootup code', and will warn about any invalid irqs-on event:
2160 static int early_boot_irqs_enabled;
2162 void early_boot_irqs_off(void)
2164 early_boot_irqs_enabled = 0;
2167 void early_boot_irqs_on(void)
2169 early_boot_irqs_enabled = 1;
2173 * Hardirqs will be enabled:
2175 void trace_hardirqs_on_caller(unsigned long ip)
2177 struct task_struct *curr = current;
2179 time_hardirqs_on(CALLER_ADDR0, ip);
2181 if (unlikely(!debug_locks || current->lockdep_recursion))
2184 if (DEBUG_LOCKS_WARN_ON(unlikely(!early_boot_irqs_enabled)))
2187 if (unlikely(curr->hardirqs_enabled)) {
2188 debug_atomic_inc(&redundant_hardirqs_on);
2191 /* we'll do an OFF -> ON transition: */
2192 curr->hardirqs_enabled = 1;
2194 if (DEBUG_LOCKS_WARN_ON(!irqs_disabled()))
2196 if (DEBUG_LOCKS_WARN_ON(current->hardirq_context))
2199 * We are going to turn hardirqs on, so set the
2200 * usage bit for all held locks:
2202 if (!mark_held_locks(curr, 1))
2205 * If we have softirqs enabled, then set the usage
2206 * bit for all held locks. (disabled hardirqs prevented
2207 * this bit from being set before)
2209 if (curr->softirqs_enabled)
2210 if (!mark_held_locks(curr, 0))
2213 curr->hardirq_enable_ip = ip;
2214 curr->hardirq_enable_event = ++curr->irq_events;
2215 debug_atomic_inc(&hardirqs_on_events);
2217 EXPORT_SYMBOL(trace_hardirqs_on_caller);
2219 void trace_hardirqs_on(void)
2221 trace_hardirqs_on_caller(CALLER_ADDR0);
2223 EXPORT_SYMBOL(trace_hardirqs_on);
2226 * Hardirqs were disabled:
2228 void trace_hardirqs_off_caller(unsigned long ip)
2230 struct task_struct *curr = current;
2232 time_hardirqs_off(CALLER_ADDR0, ip);
2234 if (unlikely(!debug_locks || current->lockdep_recursion))
2237 if (DEBUG_LOCKS_WARN_ON(!irqs_disabled()))
2240 if (curr->hardirqs_enabled) {
2242 * We have done an ON -> OFF transition:
2244 curr->hardirqs_enabled = 0;
2245 curr->hardirq_disable_ip = ip;
2246 curr->hardirq_disable_event = ++curr->irq_events;
2247 debug_atomic_inc(&hardirqs_off_events);
2249 debug_atomic_inc(&redundant_hardirqs_off);
2251 EXPORT_SYMBOL(trace_hardirqs_off_caller);
2253 void trace_hardirqs_off(void)
2255 trace_hardirqs_off_caller(CALLER_ADDR0);
2257 EXPORT_SYMBOL(trace_hardirqs_off);
2260 * Softirqs will be enabled:
2262 void trace_softirqs_on(unsigned long ip)
2264 struct task_struct *curr = current;
2266 if (unlikely(!debug_locks))
2269 if (DEBUG_LOCKS_WARN_ON(!irqs_disabled()))
2272 if (curr->softirqs_enabled) {
2273 debug_atomic_inc(&redundant_softirqs_on);
2278 * We'll do an OFF -> ON transition:
2280 curr->softirqs_enabled = 1;
2281 curr->softirq_enable_ip = ip;
2282 curr->softirq_enable_event = ++curr->irq_events;
2283 debug_atomic_inc(&softirqs_on_events);
2285 * We are going to turn softirqs on, so set the
2286 * usage bit for all held locks, if hardirqs are
2289 if (curr->hardirqs_enabled)
2290 mark_held_locks(curr, 0);
2294 * Softirqs were disabled:
2296 void trace_softirqs_off(unsigned long ip)
2298 struct task_struct *curr = current;
2300 if (unlikely(!debug_locks))
2303 if (DEBUG_LOCKS_WARN_ON(!irqs_disabled()))
2306 if (curr->softirqs_enabled) {
2308 * We have done an ON -> OFF transition:
2310 curr->softirqs_enabled = 0;
2311 curr->softirq_disable_ip = ip;
2312 curr->softirq_disable_event = ++curr->irq_events;
2313 debug_atomic_inc(&softirqs_off_events);
2314 DEBUG_LOCKS_WARN_ON(!softirq_count());
2316 debug_atomic_inc(&redundant_softirqs_off);
2319 static int mark_irqflags(struct task_struct *curr, struct held_lock *hlock)
2322 * If non-trylock use in a hardirq or softirq context, then
2323 * mark the lock as used in these contexts:
2325 if (!hlock->trylock) {
2327 if (curr->hardirq_context)
2328 if (!mark_lock(curr, hlock,
2329 LOCK_USED_IN_HARDIRQ_READ))
2331 if (curr->softirq_context)
2332 if (!mark_lock(curr, hlock,
2333 LOCK_USED_IN_SOFTIRQ_READ))
2336 if (curr->hardirq_context)
2337 if (!mark_lock(curr, hlock, LOCK_USED_IN_HARDIRQ))
2339 if (curr->softirq_context)
2340 if (!mark_lock(curr, hlock, LOCK_USED_IN_SOFTIRQ))
2344 if (!hlock->hardirqs_off) {
2346 if (!mark_lock(curr, hlock,
2347 LOCK_ENABLED_HARDIRQS_READ))
2349 if (curr->softirqs_enabled)
2350 if (!mark_lock(curr, hlock,
2351 LOCK_ENABLED_SOFTIRQS_READ))
2354 if (!mark_lock(curr, hlock,
2355 LOCK_ENABLED_HARDIRQS))
2357 if (curr->softirqs_enabled)
2358 if (!mark_lock(curr, hlock,
2359 LOCK_ENABLED_SOFTIRQS))
2367 static int separate_irq_context(struct task_struct *curr,
2368 struct held_lock *hlock)
2370 unsigned int depth = curr->lockdep_depth;
2373 * Keep track of points where we cross into an interrupt context:
2375 hlock->irq_context = 2*(curr->hardirq_context ? 1 : 0) +
2376 curr->softirq_context;
2378 struct held_lock *prev_hlock;
2380 prev_hlock = curr->held_locks + depth-1;
2382 * If we cross into another context, reset the
2383 * hash key (this also prevents the checking and the
2384 * adding of the dependency to 'prev'):
2386 if (prev_hlock->irq_context != hlock->irq_context)
2395 int mark_lock_irq(struct task_struct *curr, struct held_lock *this,
2396 enum lock_usage_bit new_bit)
2402 static inline int mark_irqflags(struct task_struct *curr,
2403 struct held_lock *hlock)
2408 static inline int separate_irq_context(struct task_struct *curr,
2409 struct held_lock *hlock)
2417 * Mark a lock with a usage bit, and validate the state transition:
2419 static int mark_lock(struct task_struct *curr, struct held_lock *this,
2420 enum lock_usage_bit new_bit)
2422 unsigned int new_mask = 1 << new_bit, ret = 1;
2425 * If already set then do not dirty the cacheline,
2426 * nor do any checks:
2428 if (likely(hlock_class(this)->usage_mask & new_mask))
2434 * Make sure we didnt race:
2436 if (unlikely(hlock_class(this)->usage_mask & new_mask)) {
2441 hlock_class(this)->usage_mask |= new_mask;
2443 if (!save_trace(hlock_class(this)->usage_traces + new_bit))
2447 case LOCK_USED_IN_HARDIRQ:
2448 case LOCK_USED_IN_SOFTIRQ:
2449 case LOCK_USED_IN_HARDIRQ_READ:
2450 case LOCK_USED_IN_SOFTIRQ_READ:
2451 case LOCK_ENABLED_HARDIRQS:
2452 case LOCK_ENABLED_SOFTIRQS:
2453 case LOCK_ENABLED_HARDIRQS_READ:
2454 case LOCK_ENABLED_SOFTIRQS_READ:
2455 ret = mark_lock_irq(curr, this, new_bit);
2460 debug_atomic_dec(&nr_unused_locks);
2463 if (!debug_locks_off_graph_unlock())
2472 * We must printk outside of the graph_lock:
2475 printk("\nmarked lock as {%s}:\n", usage_str[new_bit]);
2477 print_irqtrace_events(curr);
2485 * Initialize a lock instance's lock-class mapping info:
2487 void lockdep_init_map(struct lockdep_map *lock, const char *name,
2488 struct lock_class_key *key, int subclass)
2490 if (unlikely(!debug_locks))
2493 if (DEBUG_LOCKS_WARN_ON(!key))
2495 if (DEBUG_LOCKS_WARN_ON(!name))
2498 * Sanity check, the lock-class key must be persistent:
2500 if (!static_obj(key)) {
2501 printk("BUG: key %p not in .data!\n", key);
2502 DEBUG_LOCKS_WARN_ON(1);
2507 lock->class_cache = NULL;
2508 #ifdef CONFIG_LOCK_STAT
2509 lock->cpu = raw_smp_processor_id();
2512 register_lock_class(lock, subclass, 1);
2514 EXPORT_SYMBOL_GPL(lockdep_init_map);
2517 * This gets called for every mutex_lock*()/spin_lock*() operation.
2518 * We maintain the dependency maps and validate the locking attempt:
2520 static int __lock_acquire(struct lockdep_map *lock, unsigned int subclass,
2521 int trylock, int read, int check, int hardirqs_off,
2522 struct lockdep_map *nest_lock, unsigned long ip)
2524 struct task_struct *curr = current;
2525 struct lock_class *class = NULL;
2526 struct held_lock *hlock;
2527 unsigned int depth, id;
2534 if (unlikely(!debug_locks))
2537 if (DEBUG_LOCKS_WARN_ON(!irqs_disabled()))
2540 if (unlikely(subclass >= MAX_LOCKDEP_SUBCLASSES)) {
2542 printk("BUG: MAX_LOCKDEP_SUBCLASSES too low!\n");
2543 printk("turning off the locking correctness validator.\n");
2548 class = lock->class_cache;
2550 * Not cached yet or subclass?
2552 if (unlikely(!class)) {
2553 class = register_lock_class(lock, subclass, 0);
2557 debug_atomic_inc((atomic_t *)&class->ops);
2558 if (very_verbose(class)) {
2559 printk("\nacquire class [%p] %s", class->key, class->name);
2560 if (class->name_version > 1)
2561 printk("#%d", class->name_version);
2567 * Add the lock to the list of currently held locks.
2568 * (we dont increase the depth just yet, up until the
2569 * dependency checks are done)
2571 depth = curr->lockdep_depth;
2572 if (DEBUG_LOCKS_WARN_ON(depth >= MAX_LOCK_DEPTH))
2575 hlock = curr->held_locks + depth;
2576 if (DEBUG_LOCKS_WARN_ON(!class))
2578 hlock->class_idx = class - lock_classes + 1;
2579 hlock->acquire_ip = ip;
2580 hlock->instance = lock;
2581 hlock->nest_lock = nest_lock;
2582 hlock->trylock = trylock;
2584 hlock->check = check;
2585 hlock->hardirqs_off = !!hardirqs_off;
2586 #ifdef CONFIG_LOCK_STAT
2587 hlock->waittime_stamp = 0;
2588 hlock->holdtime_stamp = sched_clock();
2591 if (check == 2 && !mark_irqflags(curr, hlock))
2594 /* mark it as used: */
2595 if (!mark_lock(curr, hlock, LOCK_USED))
2599 * Calculate the chain hash: it's the combined hash of all the
2600 * lock keys along the dependency chain. We save the hash value
2601 * at every step so that we can get the current hash easily
2602 * after unlock. The chain hash is then used to cache dependency
2605 * The 'key ID' is what is the most compact key value to drive
2606 * the hash, not class->key.
2608 id = class - lock_classes;
2609 if (DEBUG_LOCKS_WARN_ON(id >= MAX_LOCKDEP_KEYS))
2612 chain_key = curr->curr_chain_key;
2614 if (DEBUG_LOCKS_WARN_ON(chain_key != 0))
2619 hlock->prev_chain_key = chain_key;
2620 if (separate_irq_context(curr, hlock)) {
2624 chain_key = iterate_chain_key(chain_key, id);
2626 if (!validate_chain(curr, lock, hlock, chain_head, chain_key))
2629 curr->curr_chain_key = chain_key;
2630 curr->lockdep_depth++;
2631 check_chain_key(curr);
2632 #ifdef CONFIG_DEBUG_LOCKDEP
2633 if (unlikely(!debug_locks))
2636 if (unlikely(curr->lockdep_depth >= MAX_LOCK_DEPTH)) {
2638 printk("BUG: MAX_LOCK_DEPTH too low!\n");
2639 printk("turning off the locking correctness validator.\n");
2643 if (unlikely(curr->lockdep_depth > max_lockdep_depth))
2644 max_lockdep_depth = curr->lockdep_depth;
2650 print_unlock_inbalance_bug(struct task_struct *curr, struct lockdep_map *lock,
2653 if (!debug_locks_off())
2655 if (debug_locks_silent)
2658 printk("\n=====================================\n");
2659 printk( "[ BUG: bad unlock balance detected! ]\n");
2660 printk( "-------------------------------------\n");
2661 printk("%s/%d is trying to release lock (",
2662 curr->comm, task_pid_nr(curr));
2663 print_lockdep_cache(lock);
2666 printk("but there are no more locks to release!\n");
2667 printk("\nother info that might help us debug this:\n");
2668 lockdep_print_held_locks(curr);
2670 printk("\nstack backtrace:\n");
2677 * Common debugging checks for both nested and non-nested unlock:
2679 static int check_unlock(struct task_struct *curr, struct lockdep_map *lock,
2682 if (unlikely(!debug_locks))
2684 if (DEBUG_LOCKS_WARN_ON(!irqs_disabled()))
2687 if (curr->lockdep_depth <= 0)
2688 return print_unlock_inbalance_bug(curr, lock, ip);
2694 __lock_set_class(struct lockdep_map *lock, const char *name,
2695 struct lock_class_key *key, unsigned int subclass,
2698 struct task_struct *curr = current;
2699 struct held_lock *hlock, *prev_hlock;
2700 struct lock_class *class;
2704 depth = curr->lockdep_depth;
2705 if (DEBUG_LOCKS_WARN_ON(!depth))
2709 for (i = depth-1; i >= 0; i--) {
2710 hlock = curr->held_locks + i;
2712 * We must not cross into another context:
2714 if (prev_hlock && prev_hlock->irq_context != hlock->irq_context)
2716 if (hlock->instance == lock)
2720 return print_unlock_inbalance_bug(curr, lock, ip);
2723 lockdep_init_map(lock, name, key, 0);
2724 class = register_lock_class(lock, subclass, 0);
2725 hlock->class_idx = class - lock_classes + 1;
2727 curr->lockdep_depth = i;
2728 curr->curr_chain_key = hlock->prev_chain_key;
2730 for (; i < depth; i++) {
2731 hlock = curr->held_locks + i;
2732 if (!__lock_acquire(hlock->instance,
2733 hlock_class(hlock)->subclass, hlock->trylock,
2734 hlock->read, hlock->check, hlock->hardirqs_off,
2735 hlock->nest_lock, hlock->acquire_ip))
2739 if (DEBUG_LOCKS_WARN_ON(curr->lockdep_depth != depth))
2745 * Remove the lock to the list of currently held locks in a
2746 * potentially non-nested (out of order) manner. This is a
2747 * relatively rare operation, as all the unlock APIs default
2748 * to nested mode (which uses lock_release()):
2751 lock_release_non_nested(struct task_struct *curr,
2752 struct lockdep_map *lock, unsigned long ip)
2754 struct held_lock *hlock, *prev_hlock;
2759 * Check whether the lock exists in the current stack
2762 depth = curr->lockdep_depth;
2763 if (DEBUG_LOCKS_WARN_ON(!depth))
2767 for (i = depth-1; i >= 0; i--) {
2768 hlock = curr->held_locks + i;
2770 * We must not cross into another context:
2772 if (prev_hlock && prev_hlock->irq_context != hlock->irq_context)
2774 if (hlock->instance == lock)
2778 return print_unlock_inbalance_bug(curr, lock, ip);
2781 lock_release_holdtime(hlock);
2784 * We have the right lock to unlock, 'hlock' points to it.
2785 * Now we remove it from the stack, and add back the other
2786 * entries (if any), recalculating the hash along the way:
2788 curr->lockdep_depth = i;
2789 curr->curr_chain_key = hlock->prev_chain_key;
2791 for (i++; i < depth; i++) {
2792 hlock = curr->held_locks + i;
2793 if (!__lock_acquire(hlock->instance,
2794 hlock_class(hlock)->subclass, hlock->trylock,
2795 hlock->read, hlock->check, hlock->hardirqs_off,
2796 hlock->nest_lock, hlock->acquire_ip))
2800 if (DEBUG_LOCKS_WARN_ON(curr->lockdep_depth != depth - 1))
2806 * Remove the lock to the list of currently held locks - this gets
2807 * called on mutex_unlock()/spin_unlock*() (or on a failed
2808 * mutex_lock_interruptible()). This is done for unlocks that nest
2809 * perfectly. (i.e. the current top of the lock-stack is unlocked)
2811 static int lock_release_nested(struct task_struct *curr,
2812 struct lockdep_map *lock, unsigned long ip)
2814 struct held_lock *hlock;
2818 * Pop off the top of the lock stack:
2820 depth = curr->lockdep_depth - 1;
2821 hlock = curr->held_locks + depth;
2824 * Is the unlock non-nested:
2826 if (hlock->instance != lock)
2827 return lock_release_non_nested(curr, lock, ip);
2828 curr->lockdep_depth--;
2830 if (DEBUG_LOCKS_WARN_ON(!depth && (hlock->prev_chain_key != 0)))
2833 curr->curr_chain_key = hlock->prev_chain_key;
2835 lock_release_holdtime(hlock);
2837 #ifdef CONFIG_DEBUG_LOCKDEP
2838 hlock->prev_chain_key = 0;
2839 hlock->class_idx = 0;
2840 hlock->acquire_ip = 0;
2841 hlock->irq_context = 0;
2847 * Remove the lock to the list of currently held locks - this gets
2848 * called on mutex_unlock()/spin_unlock*() (or on a failed
2849 * mutex_lock_interruptible()). This is done for unlocks that nest
2850 * perfectly. (i.e. the current top of the lock-stack is unlocked)
2853 __lock_release(struct lockdep_map *lock, int nested, unsigned long ip)
2855 struct task_struct *curr = current;
2857 if (!check_unlock(curr, lock, ip))
2861 if (!lock_release_nested(curr, lock, ip))
2864 if (!lock_release_non_nested(curr, lock, ip))
2868 check_chain_key(curr);
2872 * Check whether we follow the irq-flags state precisely:
2874 static void check_flags(unsigned long flags)
2876 #if defined(CONFIG_PROVE_LOCKING) && defined(CONFIG_DEBUG_LOCKDEP) && \
2877 defined(CONFIG_TRACE_IRQFLAGS)
2881 if (irqs_disabled_flags(flags)) {
2882 if (DEBUG_LOCKS_WARN_ON(current->hardirqs_enabled)) {
2883 printk("possible reason: unannotated irqs-off.\n");
2886 if (DEBUG_LOCKS_WARN_ON(!current->hardirqs_enabled)) {
2887 printk("possible reason: unannotated irqs-on.\n");
2892 * We dont accurately track softirq state in e.g.
2893 * hardirq contexts (such as on 4KSTACKS), so only
2894 * check if not in hardirq contexts:
2896 if (!hardirq_count()) {
2897 if (softirq_count())
2898 DEBUG_LOCKS_WARN_ON(current->softirqs_enabled);
2900 DEBUG_LOCKS_WARN_ON(!current->softirqs_enabled);
2904 print_irqtrace_events(current);
2908 void lock_set_class(struct lockdep_map *lock, const char *name,
2909 struct lock_class_key *key, unsigned int subclass,
2912 unsigned long flags;
2914 if (unlikely(current->lockdep_recursion))
2917 raw_local_irq_save(flags);
2918 current->lockdep_recursion = 1;
2920 if (__lock_set_class(lock, name, key, subclass, ip))
2921 check_chain_key(current);
2922 current->lockdep_recursion = 0;
2923 raw_local_irq_restore(flags);
2925 EXPORT_SYMBOL_GPL(lock_set_class);
2928 * We are not always called with irqs disabled - do that here,
2929 * and also avoid lockdep recursion:
2931 void lock_acquire(struct lockdep_map *lock, unsigned int subclass,
2932 int trylock, int read, int check,
2933 struct lockdep_map *nest_lock, unsigned long ip)
2935 unsigned long flags;
2937 if (unlikely(current->lockdep_recursion))
2940 raw_local_irq_save(flags);
2943 current->lockdep_recursion = 1;
2944 __lock_acquire(lock, subclass, trylock, read, check,
2945 irqs_disabled_flags(flags), nest_lock, ip);
2946 current->lockdep_recursion = 0;
2947 raw_local_irq_restore(flags);
2949 EXPORT_SYMBOL_GPL(lock_acquire);
2951 void lock_release(struct lockdep_map *lock, int nested,
2954 unsigned long flags;
2956 if (unlikely(current->lockdep_recursion))
2959 raw_local_irq_save(flags);
2961 current->lockdep_recursion = 1;
2962 __lock_release(lock, nested, ip);
2963 current->lockdep_recursion = 0;
2964 raw_local_irq_restore(flags);
2966 EXPORT_SYMBOL_GPL(lock_release);
2968 #ifdef CONFIG_LOCK_STAT
2970 print_lock_contention_bug(struct task_struct *curr, struct lockdep_map *lock,
2973 if (!debug_locks_off())
2975 if (debug_locks_silent)
2978 printk("\n=================================\n");
2979 printk( "[ BUG: bad contention detected! ]\n");
2980 printk( "---------------------------------\n");
2981 printk("%s/%d is trying to contend lock (",
2982 curr->comm, task_pid_nr(curr));
2983 print_lockdep_cache(lock);
2986 printk("but there are no locks held!\n");
2987 printk("\nother info that might help us debug this:\n");
2988 lockdep_print_held_locks(curr);
2990 printk("\nstack backtrace:\n");
2997 __lock_contended(struct lockdep_map *lock, unsigned long ip)
2999 struct task_struct *curr = current;
3000 struct held_lock *hlock, *prev_hlock;
3001 struct lock_class_stats *stats;
3003 int i, contention_point, contending_point;
3005 depth = curr->lockdep_depth;
3006 if (DEBUG_LOCKS_WARN_ON(!depth))
3010 for (i = depth-1; i >= 0; i--) {
3011 hlock = curr->held_locks + i;
3013 * We must not cross into another context:
3015 if (prev_hlock && prev_hlock->irq_context != hlock->irq_context)
3017 if (hlock->instance == lock)
3021 print_lock_contention_bug(curr, lock, ip);
3025 hlock->waittime_stamp = sched_clock();
3027 contention_point = lock_point(hlock_class(hlock)->contention_point, ip);
3028 contending_point = lock_point(hlock_class(hlock)->contending_point,
3031 stats = get_lock_stats(hlock_class(hlock));
3032 if (contention_point < LOCKSTAT_POINTS)
3033 stats->contention_point[contention_point]++;
3034 if (contending_point < LOCKSTAT_POINTS)
3035 stats->contending_point[contending_point]++;
3036 if (lock->cpu != smp_processor_id())
3037 stats->bounces[bounce_contended + !!hlock->read]++;
3038 put_lock_stats(stats);
3042 __lock_acquired(struct lockdep_map *lock, unsigned long ip)
3044 struct task_struct *curr = current;
3045 struct held_lock *hlock, *prev_hlock;
3046 struct lock_class_stats *stats;
3052 depth = curr->lockdep_depth;
3053 if (DEBUG_LOCKS_WARN_ON(!depth))
3057 for (i = depth-1; i >= 0; i--) {
3058 hlock = curr->held_locks + i;
3060 * We must not cross into another context:
3062 if (prev_hlock && prev_hlock->irq_context != hlock->irq_context)
3064 if (hlock->instance == lock)
3068 print_lock_contention_bug(curr, lock, _RET_IP_);
3072 cpu = smp_processor_id();
3073 if (hlock->waittime_stamp) {
3074 now = sched_clock();
3075 waittime = now - hlock->waittime_stamp;
3076 hlock->holdtime_stamp = now;
3079 stats = get_lock_stats(hlock_class(hlock));
3082 lock_time_inc(&stats->read_waittime, waittime);
3084 lock_time_inc(&stats->write_waittime, waittime);
3086 if (lock->cpu != cpu)
3087 stats->bounces[bounce_acquired + !!hlock->read]++;
3088 put_lock_stats(stats);
3094 void lock_contended(struct lockdep_map *lock, unsigned long ip)
3096 unsigned long flags;
3098 if (unlikely(!lock_stat))
3101 if (unlikely(current->lockdep_recursion))
3104 raw_local_irq_save(flags);
3106 current->lockdep_recursion = 1;
3107 __lock_contended(lock, ip);
3108 current->lockdep_recursion = 0;
3109 raw_local_irq_restore(flags);
3111 EXPORT_SYMBOL_GPL(lock_contended);
3113 void lock_acquired(struct lockdep_map *lock, unsigned long ip)
3115 unsigned long flags;
3117 if (unlikely(!lock_stat))
3120 if (unlikely(current->lockdep_recursion))
3123 raw_local_irq_save(flags);
3125 current->lockdep_recursion = 1;
3126 __lock_acquired(lock, ip);
3127 current->lockdep_recursion = 0;
3128 raw_local_irq_restore(flags);
3130 EXPORT_SYMBOL_GPL(lock_acquired);
3134 * Used by the testsuite, sanitize the validator state
3135 * after a simulated failure:
3138 void lockdep_reset(void)
3140 unsigned long flags;
3143 raw_local_irq_save(flags);
3144 current->curr_chain_key = 0;
3145 current->lockdep_depth = 0;
3146 current->lockdep_recursion = 0;
3147 memset(current->held_locks, 0, MAX_LOCK_DEPTH*sizeof(struct held_lock));
3148 nr_hardirq_chains = 0;
3149 nr_softirq_chains = 0;
3150 nr_process_chains = 0;
3152 for (i = 0; i < CHAINHASH_SIZE; i++)
3153 INIT_LIST_HEAD(chainhash_table + i);
3154 raw_local_irq_restore(flags);
3157 static void zap_class(struct lock_class *class)
3162 * Remove all dependencies this lock is
3165 for (i = 0; i < nr_list_entries; i++) {
3166 if (list_entries[i].class == class)
3167 list_del_rcu(&list_entries[i].entry);
3170 * Unhash the class and remove it from the all_lock_classes list:
3172 list_del_rcu(&class->hash_entry);
3173 list_del_rcu(&class->lock_entry);
3178 static inline int within(const void *addr, void *start, unsigned long size)
3180 return addr >= start && addr < start + size;
3183 void lockdep_free_key_range(void *start, unsigned long size)
3185 struct lock_class *class, *next;
3186 struct list_head *head;
3187 unsigned long flags;
3191 raw_local_irq_save(flags);
3192 locked = graph_lock();
3195 * Unhash all classes that were created by this module:
3197 for (i = 0; i < CLASSHASH_SIZE; i++) {
3198 head = classhash_table + i;
3199 if (list_empty(head))
3201 list_for_each_entry_safe(class, next, head, hash_entry) {
3202 if (within(class->key, start, size))
3204 else if (within(class->name, start, size))
3211 raw_local_irq_restore(flags);
3214 void lockdep_reset_lock(struct lockdep_map *lock)
3216 struct lock_class *class, *next;
3217 struct list_head *head;
3218 unsigned long flags;
3222 raw_local_irq_save(flags);
3225 * Remove all classes this lock might have:
3227 for (j = 0; j < MAX_LOCKDEP_SUBCLASSES; j++) {
3229 * If the class exists we look it up and zap it:
3231 class = look_up_lock_class(lock, j);
3236 * Debug check: in the end all mapped classes should
3239 locked = graph_lock();
3240 for (i = 0; i < CLASSHASH_SIZE; i++) {
3241 head = classhash_table + i;
3242 if (list_empty(head))
3244 list_for_each_entry_safe(class, next, head, hash_entry) {
3245 if (unlikely(class == lock->class_cache)) {
3246 if (debug_locks_off_graph_unlock())
3256 raw_local_irq_restore(flags);
3259 void lockdep_init(void)
3264 * Some architectures have their own start_kernel()
3265 * code which calls lockdep_init(), while we also
3266 * call lockdep_init() from the start_kernel() itself,
3267 * and we want to initialize the hashes only once:
3269 if (lockdep_initialized)
3272 for (i = 0; i < CLASSHASH_SIZE; i++)
3273 INIT_LIST_HEAD(classhash_table + i);
3275 for (i = 0; i < CHAINHASH_SIZE; i++)
3276 INIT_LIST_HEAD(chainhash_table + i);
3278 lockdep_initialized = 1;
3281 void __init lockdep_info(void)
3283 printk("Lock dependency validator: Copyright (c) 2006 Red Hat, Inc., Ingo Molnar\n");
3285 printk("... MAX_LOCKDEP_SUBCLASSES: %lu\n", MAX_LOCKDEP_SUBCLASSES);
3286 printk("... MAX_LOCK_DEPTH: %lu\n", MAX_LOCK_DEPTH);
3287 printk("... MAX_LOCKDEP_KEYS: %lu\n", MAX_LOCKDEP_KEYS);
3288 printk("... CLASSHASH_SIZE: %lu\n", CLASSHASH_SIZE);
3289 printk("... MAX_LOCKDEP_ENTRIES: %lu\n", MAX_LOCKDEP_ENTRIES);
3290 printk("... MAX_LOCKDEP_CHAINS: %lu\n", MAX_LOCKDEP_CHAINS);
3291 printk("... CHAINHASH_SIZE: %lu\n", CHAINHASH_SIZE);
3293 printk(" memory used by lock dependency info: %lu kB\n",
3294 (sizeof(struct lock_class) * MAX_LOCKDEP_KEYS +
3295 sizeof(struct list_head) * CLASSHASH_SIZE +
3296 sizeof(struct lock_list) * MAX_LOCKDEP_ENTRIES +
3297 sizeof(struct lock_chain) * MAX_LOCKDEP_CHAINS +
3298 sizeof(struct list_head) * CHAINHASH_SIZE) / 1024);
3300 printk(" per task-struct memory footprint: %lu bytes\n",
3301 sizeof(struct held_lock) * MAX_LOCK_DEPTH);
3303 #ifdef CONFIG_DEBUG_LOCKDEP
3304 if (lockdep_init_error) {
3305 printk("WARNING: lockdep init error! Arch code didn't call lockdep_init() early enough?\n");
3306 printk("Call stack leading to lockdep invocation was:\n");
3307 print_stack_trace(&lockdep_init_trace, 0);
3313 print_freed_lock_bug(struct task_struct *curr, const void *mem_from,
3314 const void *mem_to, struct held_lock *hlock)
3316 if (!debug_locks_off())
3318 if (debug_locks_silent)
3321 printk("\n=========================\n");
3322 printk( "[ BUG: held lock freed! ]\n");
3323 printk( "-------------------------\n");
3324 printk("%s/%d is freeing memory %p-%p, with a lock still held there!\n",
3325 curr->comm, task_pid_nr(curr), mem_from, mem_to-1);
3327 lockdep_print_held_locks(curr);
3329 printk("\nstack backtrace:\n");
3333 static inline int not_in_range(const void* mem_from, unsigned long mem_len,
3334 const void* lock_from, unsigned long lock_len)
3336 return lock_from + lock_len <= mem_from ||
3337 mem_from + mem_len <= lock_from;
3341 * Called when kernel memory is freed (or unmapped), or if a lock
3342 * is destroyed or reinitialized - this code checks whether there is
3343 * any held lock in the memory range of <from> to <to>:
3345 void debug_check_no_locks_freed(const void *mem_from, unsigned long mem_len)
3347 struct task_struct *curr = current;
3348 struct held_lock *hlock;
3349 unsigned long flags;
3352 if (unlikely(!debug_locks))
3355 local_irq_save(flags);
3356 for (i = 0; i < curr->lockdep_depth; i++) {
3357 hlock = curr->held_locks + i;
3359 if (not_in_range(mem_from, mem_len, hlock->instance,
3360 sizeof(*hlock->instance)))
3363 print_freed_lock_bug(curr, mem_from, mem_from + mem_len, hlock);
3366 local_irq_restore(flags);
3368 EXPORT_SYMBOL_GPL(debug_check_no_locks_freed);
3370 static void print_held_locks_bug(struct task_struct *curr)
3372 if (!debug_locks_off())
3374 if (debug_locks_silent)
3377 printk("\n=====================================\n");
3378 printk( "[ BUG: lock held at task exit time! ]\n");
3379 printk( "-------------------------------------\n");
3380 printk("%s/%d is exiting with locks still held!\n",
3381 curr->comm, task_pid_nr(curr));
3382 lockdep_print_held_locks(curr);
3384 printk("\nstack backtrace:\n");
3388 void debug_check_no_locks_held(struct task_struct *task)
3390 if (unlikely(task->lockdep_depth > 0))
3391 print_held_locks_bug(task);
3394 void debug_show_all_locks(void)
3396 struct task_struct *g, *p;
3400 if (unlikely(!debug_locks)) {
3401 printk("INFO: lockdep is turned off.\n");
3404 printk("\nShowing all locks held in the system:\n");
3407 * Here we try to get the tasklist_lock as hard as possible,
3408 * if not successful after 2 seconds we ignore it (but keep
3409 * trying). This is to enable a debug printout even if a
3410 * tasklist_lock-holding task deadlocks or crashes.
3413 if (!read_trylock(&tasklist_lock)) {
3415 printk("hm, tasklist_lock locked, retrying... ");
3418 printk(" #%d", 10-count);
3422 printk(" ignoring it.\n");
3426 printk(KERN_CONT " locked it.\n");
3429 do_each_thread(g, p) {
3431 * It's not reliable to print a task's held locks
3432 * if it's not sleeping (or if it's not the current
3435 if (p->state == TASK_RUNNING && p != current)
3437 if (p->lockdep_depth)
3438 lockdep_print_held_locks(p);
3440 if (read_trylock(&tasklist_lock))
3442 } while_each_thread(g, p);
3445 printk("=============================================\n\n");
3448 read_unlock(&tasklist_lock);
3450 EXPORT_SYMBOL_GPL(debug_show_all_locks);
3453 * Careful: only use this function if you are sure that
3454 * the task cannot run in parallel!
3456 void __debug_show_held_locks(struct task_struct *task)
3458 if (unlikely(!debug_locks)) {
3459 printk("INFO: lockdep is turned off.\n");
3462 lockdep_print_held_locks(task);
3464 EXPORT_SYMBOL_GPL(__debug_show_held_locks);
3466 void debug_show_held_locks(struct task_struct *task)
3468 __debug_show_held_locks(task);
3470 EXPORT_SYMBOL_GPL(debug_show_held_locks);
3472 void lockdep_sys_exit(void)
3474 struct task_struct *curr = current;
3476 if (unlikely(curr->lockdep_depth)) {
3477 if (!debug_locks_off())
3479 printk("\n================================================\n");
3480 printk( "[ BUG: lock held when returning to user space! ]\n");
3481 printk( "------------------------------------------------\n");
3482 printk("%s/%d is leaving the kernel with locks still held!\n",
3483 curr->comm, curr->pid);
3484 lockdep_print_held_locks(curr);