2 * kernel/lockdep_proc.c
4 * Runtime locking correctness validator
6 * Started by Ingo Molnar:
8 * Copyright (C) 2006 Red Hat, Inc., Ingo Molnar <mingo@redhat.com>
10 * Code for /proc/lockdep and /proc/lockdep_stats:
13 #include <linux/module.h>
14 #include <linux/proc_fs.h>
15 #include <linux/seq_file.h>
16 #include <linux/kallsyms.h>
17 #include <linux/debug_locks.h>
18 #include <linux/vmalloc.h>
19 #include <linux/sort.h>
20 #include <asm/uaccess.h>
21 #include <asm/div64.h>
23 #include "lockdep_internals.h"
25 static void *l_next(struct seq_file *m, void *v, loff_t *pos)
27 struct lock_class *class = v;
31 if (class->lock_entry.next != &all_lock_classes)
32 class = list_entry(class->lock_entry.next, struct lock_class,
41 static void *l_start(struct seq_file *m, loff_t *pos)
43 struct lock_class *class = m->private;
45 if (&class->lock_entry == all_lock_classes.next)
46 seq_printf(m, "all lock classes:\n");
51 static void l_stop(struct seq_file *m, void *v)
55 static unsigned long count_forward_deps(struct lock_class *class)
57 struct lock_list *entry;
58 unsigned long ret = 1;
61 * Recurse this class's dependency list:
63 list_for_each_entry(entry, &class->locks_after, entry)
64 ret += count_forward_deps(entry->class);
69 static unsigned long count_backward_deps(struct lock_class *class)
71 struct lock_list *entry;
72 unsigned long ret = 1;
75 * Recurse this class's dependency list:
77 list_for_each_entry(entry, &class->locks_before, entry)
78 ret += count_backward_deps(entry->class);
83 static void print_name(struct seq_file *m, struct lock_class *class)
86 const char *name = class->name;
89 name = __get_key_name(class->key, str);
90 seq_printf(m, "%s", name);
92 seq_printf(m, "%s", name);
93 if (class->name_version > 1)
94 seq_printf(m, "#%d", class->name_version);
96 seq_printf(m, "/%d", class->subclass);
100 static int l_show(struct seq_file *m, void *v)
102 unsigned long nr_forward_deps, nr_backward_deps;
103 struct lock_class *class = m->private;
104 struct lock_list *entry;
107 seq_printf(m, "%p", class->key);
108 #ifdef CONFIG_DEBUG_LOCKDEP
109 seq_printf(m, " OPS:%8ld", class->ops);
111 nr_forward_deps = count_forward_deps(class);
112 seq_printf(m, " FD:%5ld", nr_forward_deps);
114 nr_backward_deps = count_backward_deps(class);
115 seq_printf(m, " BD:%5ld", nr_backward_deps);
117 get_usage_chars(class, &c1, &c2, &c3, &c4);
118 seq_printf(m, " %c%c%c%c", c1, c2, c3, c4);
121 print_name(m, class);
124 list_for_each_entry(entry, &class->locks_after, entry) {
125 if (entry->distance == 1) {
126 seq_printf(m, " -> [%p] ", entry->class);
127 print_name(m, entry->class);
136 static const struct seq_operations lockdep_ops = {
143 static int lockdep_open(struct inode *inode, struct file *file)
145 int res = seq_open(file, &lockdep_ops);
147 struct seq_file *m = file->private_data;
149 if (!list_empty(&all_lock_classes))
150 m->private = list_entry(all_lock_classes.next,
151 struct lock_class, lock_entry);
158 static const struct file_operations proc_lockdep_operations = {
159 .open = lockdep_open,
162 .release = seq_release,
165 static void lockdep_stats_debug_show(struct seq_file *m)
167 #ifdef CONFIG_DEBUG_LOCKDEP
168 unsigned int hi1 = debug_atomic_read(&hardirqs_on_events),
169 hi2 = debug_atomic_read(&hardirqs_off_events),
170 hr1 = debug_atomic_read(&redundant_hardirqs_on),
171 hr2 = debug_atomic_read(&redundant_hardirqs_off),
172 si1 = debug_atomic_read(&softirqs_on_events),
173 si2 = debug_atomic_read(&softirqs_off_events),
174 sr1 = debug_atomic_read(&redundant_softirqs_on),
175 sr2 = debug_atomic_read(&redundant_softirqs_off);
177 seq_printf(m, " chain lookup misses: %11u\n",
178 debug_atomic_read(&chain_lookup_misses));
179 seq_printf(m, " chain lookup hits: %11u\n",
180 debug_atomic_read(&chain_lookup_hits));
181 seq_printf(m, " cyclic checks: %11u\n",
182 debug_atomic_read(&nr_cyclic_checks));
183 seq_printf(m, " cyclic-check recursions: %11u\n",
184 debug_atomic_read(&nr_cyclic_check_recursions));
185 seq_printf(m, " find-mask forwards checks: %11u\n",
186 debug_atomic_read(&nr_find_usage_forwards_checks));
187 seq_printf(m, " find-mask forwards recursions: %11u\n",
188 debug_atomic_read(&nr_find_usage_forwards_recursions));
189 seq_printf(m, " find-mask backwards checks: %11u\n",
190 debug_atomic_read(&nr_find_usage_backwards_checks));
191 seq_printf(m, " find-mask backwards recursions:%11u\n",
192 debug_atomic_read(&nr_find_usage_backwards_recursions));
194 seq_printf(m, " hardirq on events: %11u\n", hi1);
195 seq_printf(m, " hardirq off events: %11u\n", hi2);
196 seq_printf(m, " redundant hardirq ons: %11u\n", hr1);
197 seq_printf(m, " redundant hardirq offs: %11u\n", hr2);
198 seq_printf(m, " softirq on events: %11u\n", si1);
199 seq_printf(m, " softirq off events: %11u\n", si2);
200 seq_printf(m, " redundant softirq ons: %11u\n", sr1);
201 seq_printf(m, " redundant softirq offs: %11u\n", sr2);
205 static int lockdep_stats_show(struct seq_file *m, void *v)
207 struct lock_class *class;
208 unsigned long nr_unused = 0, nr_uncategorized = 0,
209 nr_irq_safe = 0, nr_irq_unsafe = 0,
210 nr_softirq_safe = 0, nr_softirq_unsafe = 0,
211 nr_hardirq_safe = 0, nr_hardirq_unsafe = 0,
212 nr_irq_read_safe = 0, nr_irq_read_unsafe = 0,
213 nr_softirq_read_safe = 0, nr_softirq_read_unsafe = 0,
214 nr_hardirq_read_safe = 0, nr_hardirq_read_unsafe = 0,
215 sum_forward_deps = 0, factor = 0;
217 list_for_each_entry(class, &all_lock_classes, lock_entry) {
219 if (class->usage_mask == 0)
221 if (class->usage_mask == LOCKF_USED)
223 if (class->usage_mask & LOCKF_USED_IN_IRQ)
225 if (class->usage_mask & LOCKF_ENABLED_IRQS)
227 if (class->usage_mask & LOCKF_USED_IN_SOFTIRQ)
229 if (class->usage_mask & LOCKF_ENABLED_SOFTIRQS)
231 if (class->usage_mask & LOCKF_USED_IN_HARDIRQ)
233 if (class->usage_mask & LOCKF_ENABLED_HARDIRQS)
235 if (class->usage_mask & LOCKF_USED_IN_IRQ_READ)
237 if (class->usage_mask & LOCKF_ENABLED_IRQS_READ)
238 nr_irq_read_unsafe++;
239 if (class->usage_mask & LOCKF_USED_IN_SOFTIRQ_READ)
240 nr_softirq_read_safe++;
241 if (class->usage_mask & LOCKF_ENABLED_SOFTIRQS_READ)
242 nr_softirq_read_unsafe++;
243 if (class->usage_mask & LOCKF_USED_IN_HARDIRQ_READ)
244 nr_hardirq_read_safe++;
245 if (class->usage_mask & LOCKF_ENABLED_HARDIRQS_READ)
246 nr_hardirq_read_unsafe++;
248 sum_forward_deps += count_forward_deps(class);
250 #ifdef CONFIG_DEBUG_LOCKDEP
251 DEBUG_LOCKS_WARN_ON(debug_atomic_read(&nr_unused_locks) != nr_unused);
253 seq_printf(m, " lock-classes: %11lu [max: %lu]\n",
254 nr_lock_classes, MAX_LOCKDEP_KEYS);
255 seq_printf(m, " direct dependencies: %11lu [max: %lu]\n",
256 nr_list_entries, MAX_LOCKDEP_ENTRIES);
257 seq_printf(m, " indirect dependencies: %11lu\n",
261 * Total number of dependencies:
263 * All irq-safe locks may nest inside irq-unsafe locks,
264 * plus all the other known dependencies:
266 seq_printf(m, " all direct dependencies: %11lu\n",
267 nr_irq_unsafe * nr_irq_safe +
268 nr_hardirq_unsafe * nr_hardirq_safe +
272 * Estimated factor between direct and indirect
276 factor = sum_forward_deps / nr_list_entries;
278 #ifdef CONFIG_PROVE_LOCKING
279 seq_printf(m, " dependency chains: %11lu [max: %lu]\n",
280 nr_lock_chains, MAX_LOCKDEP_CHAINS);
283 #ifdef CONFIG_TRACE_IRQFLAGS
284 seq_printf(m, " in-hardirq chains: %11u\n",
286 seq_printf(m, " in-softirq chains: %11u\n",
289 seq_printf(m, " in-process chains: %11u\n",
291 seq_printf(m, " stack-trace entries: %11lu [max: %lu]\n",
292 nr_stack_trace_entries, MAX_STACK_TRACE_ENTRIES);
293 seq_printf(m, " combined max dependencies: %11u\n",
294 (nr_hardirq_chains + 1) *
295 (nr_softirq_chains + 1) *
296 (nr_process_chains + 1)
298 seq_printf(m, " hardirq-safe locks: %11lu\n",
300 seq_printf(m, " hardirq-unsafe locks: %11lu\n",
302 seq_printf(m, " softirq-safe locks: %11lu\n",
304 seq_printf(m, " softirq-unsafe locks: %11lu\n",
306 seq_printf(m, " irq-safe locks: %11lu\n",
308 seq_printf(m, " irq-unsafe locks: %11lu\n",
311 seq_printf(m, " hardirq-read-safe locks: %11lu\n",
312 nr_hardirq_read_safe);
313 seq_printf(m, " hardirq-read-unsafe locks: %11lu\n",
314 nr_hardirq_read_unsafe);
315 seq_printf(m, " softirq-read-safe locks: %11lu\n",
316 nr_softirq_read_safe);
317 seq_printf(m, " softirq-read-unsafe locks: %11lu\n",
318 nr_softirq_read_unsafe);
319 seq_printf(m, " irq-read-safe locks: %11lu\n",
321 seq_printf(m, " irq-read-unsafe locks: %11lu\n",
324 seq_printf(m, " uncategorized locks: %11lu\n",
326 seq_printf(m, " unused locks: %11lu\n",
328 seq_printf(m, " max locking depth: %11u\n",
330 seq_printf(m, " max recursion depth: %11u\n",
331 max_recursion_depth);
332 lockdep_stats_debug_show(m);
333 seq_printf(m, " debug_locks: %11u\n",
339 static int lockdep_stats_open(struct inode *inode, struct file *file)
341 return single_open(file, lockdep_stats_show, NULL);
344 static const struct file_operations proc_lockdep_stats_operations = {
345 .open = lockdep_stats_open,
348 .release = seq_release,
351 #ifdef CONFIG_LOCK_STAT
353 struct lock_stat_data {
354 struct lock_class *class;
355 struct lock_class_stats stats;
358 struct lock_stat_seq {
359 struct lock_stat_data *iter;
360 struct lock_stat_data *iter_end;
361 struct lock_stat_data stats[MAX_LOCKDEP_KEYS];
365 * sort on absolute number of contentions
367 static int lock_stat_cmp(const void *l, const void *r)
369 const struct lock_stat_data *dl = l, *dr = r;
370 unsigned long nl, nr;
372 nl = dl->stats.read_waittime.nr + dl->stats.write_waittime.nr;
373 nr = dr->stats.read_waittime.nr + dr->stats.write_waittime.nr;
378 static void seq_line(struct seq_file *m, char c, int offset, int length)
382 for (i = 0; i < offset; i++)
384 for (i = 0; i < length; i++)
385 seq_printf(m, "%c", c);
389 static void snprint_time(char *buf, size_t bufsiz, s64 nr)
393 rem = do_div(nr, 1000); /* XXX: do_div_signed */
394 snprintf(buf, bufsiz, "%lld.%02d", (long long)nr, ((int)rem+5)/10);
397 static void seq_time(struct seq_file *m, s64 time)
401 snprint_time(num, sizeof(num), time);
402 seq_printf(m, " %14s", num);
405 static void seq_lock_time(struct seq_file *m, struct lock_time *lt)
407 seq_printf(m, "%14lu", lt->nr);
408 seq_time(m, lt->min);
409 seq_time(m, lt->max);
410 seq_time(m, lt->total);
413 static void seq_stats(struct seq_file *m, struct lock_stat_data *data)
416 struct lock_class *class;
417 struct lock_class_stats *stats;
421 stats = &data->stats;
423 snprintf(name, 38, "%s", class->name);
424 namelen = strlen(name);
426 if (stats->write_holdtime.nr) {
427 if (stats->read_holdtime.nr)
428 seq_printf(m, "%38s-W:", name);
430 seq_printf(m, "%40s:", name);
432 seq_lock_time(m, &stats->write_waittime);
434 seq_lock_time(m, &stats->write_holdtime);
438 if (stats->read_holdtime.nr) {
439 seq_printf(m, "%38s-R:", name);
440 seq_lock_time(m, &stats->read_waittime);
442 seq_lock_time(m, &stats->read_holdtime);
446 if (stats->read_waittime.nr + stats->write_waittime.nr == 0)
449 if (stats->read_holdtime.nr)
452 for (i = 0; i < ARRAY_SIZE(class->contention_point); i++) {
453 char sym[KSYM_SYMBOL_LEN];
456 if (class->contention_point[i] == 0)
460 seq_line(m, '-', 40-namelen, namelen);
462 sprint_symbol(sym, class->contention_point[i]);
463 snprintf(ip, sizeof(ip), "[<%p>]",
464 (void *)class->contention_point[i]);
465 seq_printf(m, "%40s %14lu %29s %s\n", name,
466 stats->contention_point[i],
471 seq_line(m, '.', 0, 40 + 1 + 8 * (14 + 1));
476 static void seq_header(struct seq_file *m)
478 seq_printf(m, "lock_stat version 0.1\n");
479 seq_line(m, '-', 0, 40 + 1 + 8 * (14 + 1));
480 seq_printf(m, "%40s %14s %14s %14s %14s %14s %14s %14s %14s\n",
490 seq_line(m, '-', 0, 40 + 1 + 8 * (14 + 1));
494 static void *ls_start(struct seq_file *m, loff_t *pos)
496 struct lock_stat_seq *data = m->private;
498 if (data->iter == data->stats)
504 static void *ls_next(struct seq_file *m, void *v, loff_t *pos)
506 struct lock_stat_seq *data = m->private;
512 if (data->iter == data->iter_end)
518 static void ls_stop(struct seq_file *m, void *v)
522 static int ls_show(struct seq_file *m, void *v)
524 struct lock_stat_seq *data = m->private;
526 seq_stats(m, data->iter);
530 static struct seq_operations lockstat_ops = {
537 static int lock_stat_open(struct inode *inode, struct file *file)
540 struct lock_class *class;
541 struct lock_stat_seq *data = vmalloc(sizeof(struct lock_stat_seq));
546 res = seq_open(file, &lockstat_ops);
548 struct lock_stat_data *iter = data->stats;
549 struct seq_file *m = file->private_data;
552 list_for_each_entry(class, &all_lock_classes, lock_entry) {
554 iter->stats = lock_stats(class);
557 data->iter_end = iter;
559 sort(data->stats, data->iter_end - data->iter,
560 sizeof(struct lock_stat_data),
561 lock_stat_cmp, NULL);
570 static ssize_t lock_stat_write(struct file *file, const char __user *buf,
571 size_t count, loff_t *ppos)
573 struct lock_class *class;
577 if (get_user(c, buf))
583 list_for_each_entry(class, &all_lock_classes, lock_entry)
584 clear_lock_stats(class);
589 static int lock_stat_release(struct inode *inode, struct file *file)
591 struct seq_file *seq = file->private_data;
595 return seq_release(inode, file);
598 static const struct file_operations proc_lock_stat_operations = {
599 .open = lock_stat_open,
600 .write = lock_stat_write,
603 .release = lock_stat_release,
605 #endif /* CONFIG_LOCK_STAT */
607 static int __init lockdep_proc_init(void)
609 struct proc_dir_entry *entry;
611 entry = create_proc_entry("lockdep", S_IRUSR, NULL);
613 entry->proc_fops = &proc_lockdep_operations;
615 entry = create_proc_entry("lockdep_stats", S_IRUSR, NULL);
617 entry->proc_fops = &proc_lockdep_stats_operations;
619 #ifdef CONFIG_LOCK_STAT
620 entry = create_proc_entry("lock_stat", S_IRUSR, NULL);
622 entry->proc_fops = &proc_lock_stat_operations;
628 __initcall(lockdep_proc_init);