2 * Copyright (C) 2009 Matt Fleming
4 * Based, in part, on kernel/time/clocksource.c.
6 * This file provides arbitration code for stack unwinders.
8 * Multiple stack unwinders can be available on a system, usually with
9 * the most accurate unwinder being the currently active one.
11 #include <linux/errno.h>
12 #include <linux/list.h>
13 #include <linux/spinlock.h>
14 #include <asm/unwinder.h>
15 #include <asm/atomic.h>
18 * This is the most basic stack unwinder an architecture can
19 * provide. For architectures without reliable frame pointers, e.g.
20 * RISC CPUs, it can be implemented by looking through the stack for
21 * addresses that lie within the kernel text section.
23 * Other CPUs, e.g. x86, can use their frame pointer register to
24 * construct more accurate stack traces.
26 static struct list_head unwinder_list;
27 static struct unwinder stack_reader = {
28 .name = "stack-reader",
29 .dump = stack_reader_dump,
32 .next = &unwinder_list,
33 .prev = &unwinder_list,
38 * "curr_unwinder" points to the stack unwinder currently in use. This
39 * is the unwinder with the highest rating.
41 * "unwinder_list" is a linked-list of all available unwinders, sorted
44 * All modifications of "curr_unwinder" and "unwinder_list" must be
45 * performed whilst holding "unwinder_lock".
47 static struct unwinder *curr_unwinder = &stack_reader;
49 static struct list_head unwinder_list = {
50 .next = &stack_reader.list,
51 .prev = &stack_reader.list,
54 static DEFINE_SPINLOCK(unwinder_lock);
56 static atomic_t unwinder_running = ATOMIC_INIT(0);
59 * select_unwinder - Select the best registered stack unwinder.
61 * Private function. Must hold unwinder_lock when called.
63 * Select the stack unwinder with the best rating. This is useful for
64 * setting up curr_unwinder.
66 static struct unwinder *select_unwinder(void)
68 struct unwinder *best;
70 if (list_empty(&unwinder_list))
73 best = list_entry(unwinder_list.next, struct unwinder, list);
74 if (best == curr_unwinder)
81 * Enqueue the stack unwinder sorted by rating.
83 static int unwinder_enqueue(struct unwinder *ops)
85 struct list_head *tmp, *entry = &unwinder_list;
87 list_for_each(tmp, &unwinder_list) {
90 o = list_entry(tmp, struct unwinder, list);
93 /* Keep track of the place, where to insert */
94 if (o->rating >= ops->rating)
97 list_add(&ops->list, entry);
103 * unwinder_register - Used to install new stack unwinder
104 * @u: unwinder to be registered
106 * Install the new stack unwinder on the unwinder list, which is sorted
109 * Returns -EBUSY if registration fails, zero otherwise.
111 int unwinder_register(struct unwinder *u)
116 spin_lock_irqsave(&unwinder_lock, flags);
117 ret = unwinder_enqueue(u);
119 curr_unwinder = select_unwinder();
120 spin_unlock_irqrestore(&unwinder_lock, flags);
126 * Unwind the call stack and pass information to the stacktrace_ops
127 * functions. Also handle the case where we need to switch to a new
128 * stack dumper because the current one faulted unexpectedly.
130 void unwind_stack(struct task_struct *task, struct pt_regs *regs,
131 unsigned long *sp, const struct stacktrace_ops *ops,
137 * The problem with unwinders with high ratings is that they are
138 * inherently more complicated than the simple ones with lower
139 * ratings. We are therefore more likely to fault in the
140 * complicated ones, e.g. hitting BUG()s. If we fault in the
141 * code for the current stack unwinder we try to downgrade to
142 * one with a lower rating.
144 * Hopefully this will give us a semi-reliable stacktrace so we
145 * can diagnose why curr_unwinder->dump() faulted.
147 if (atomic_inc_return(&unwinder_running) != 1) {
148 spin_lock_irqsave(&unwinder_lock, flags);
150 if (!list_is_singular(&unwinder_list)) {
151 list_del(&curr_unwinder->list);
152 curr_unwinder = select_unwinder();
155 spin_unlock_irqrestore(&unwinder_lock, flags);
156 atomic_dec(&unwinder_running);
159 curr_unwinder->dump(task, regs, sp, ops, data);
161 atomic_dec(&unwinder_running);