unsigned long nr_list_entries;
static struct lock_list list_entries[MAX_LOCKDEP_ENTRIES];
+#ifdef CONFIG_PROVE_LOCKING
/*
* Allocate a lockdep entry. (assumes the graph_lock held, returns
* with NULL on failure)
}
return list_entries + nr_list_entries++;
}
+#endif
/*
* All data structures here are protected by the global debug_lock.
static struct list_head classhash_table[CLASSHASH_SIZE];
unsigned long nr_lock_chains;
+#ifdef CONFIG_PROVE_LOCKING
static struct lock_chain lock_chains[MAX_LOCKDEP_CHAINS];
+#endif
/*
* We put the lock dependency chains into a hash-table as well, to cache
}
}
+#ifdef CONFIG_PROVE_LOCKING
/*
* Add a new dependency to the head of the list:
*/
return 0;
}
+#endif
static void print_kernel_version(void)
{
init_utsname()->version);
}
+#ifdef CONFIG_PROVE_LOCKING
/*
* When a circular dependency is detected, print the
* header first:
}
return 1;
}
+#endif
static int very_verbose(struct lock_class *class)
{
#endif
+#ifdef CONFIG_PROVE_LOCKING
static int
print_deadlock_bug(struct task_struct *curr, struct held_lock *prev,
struct held_lock *next)
return 0;
}
-
+#endif
/*
* Is this the address of a static object:
return class;
}
+#ifdef CONFIG_PROVE_LOCKING
/*
* Look up a dependency chain. If the key is not present yet then
* add it and return 1 - in this case the new dependency chain is
return 1;
}
+#endif
/*
* We are building curr_chain_key incrementally, so double-check
* _raw_spin_lock_flags() code, because lockdep assumes
* that interrupts are not re-enabled during lock-acquire:
*/
-#ifdef CONFIG_PROVE_LOCKING
+#ifdef CONFIG_LOCKDEP
_raw_spin_lock(lock);
#else
_raw_spin_lock_flags(lock, &flags);
* _raw_spin_lock_flags() code, because lockdep assumes
* that interrupts are not re-enabled during lock-acquire:
*/
-#ifdef CONFIG_PROVE_SPIN_LOCKING
+#ifdef CONFIG_LOCKDEP
_raw_spin_lock(lock);
#else
_raw_spin_lock_flags(lock, &flags);