]> git.karo-electronics.de Git - karo-tx-linux.git/blob - arch/sh/kernel/hw_breakpoint.c
Merge remote-tracking branch 'md/for-next'
[karo-tx-linux.git] / arch / sh / kernel / hw_breakpoint.c
1 /*
2  * arch/sh/kernel/hw_breakpoint.c
3  *
4  * Unified kernel/user-space hardware breakpoint facility for the on-chip UBC.
5  *
6  * Copyright (C) 2009 - 2010  Paul Mundt
7  *
8  * This file is subject to the terms and conditions of the GNU General Public
9  * License.  See the file "COPYING" in the main directory of this archive
10  * for more details.
11  */
12 #include <linux/init.h>
13 #include <linux/perf_event.h>
14 #include <linux/hw_breakpoint.h>
15 #include <linux/percpu.h>
16 #include <linux/kallsyms.h>
17 #include <linux/notifier.h>
18 #include <linux/kprobes.h>
19 #include <linux/kdebug.h>
20 #include <linux/io.h>
21 #include <linux/clk.h>
22 #include <asm/hw_breakpoint.h>
23 #include <asm/mmu_context.h>
24 #include <asm/ptrace.h>
25 #include <asm/traps.h>
26
27 /*
28  * Stores the breakpoints currently in use on each breakpoint address
29  * register for each cpus
30  */
31 static DEFINE_PER_CPU(struct perf_event *, bp_per_reg[HBP_NUM]);
32
33 /*
34  * A dummy placeholder for early accesses until the CPUs get a chance to
35  * register their UBCs later in the boot process.
36  */
37 static struct sh_ubc ubc_dummy = { .num_events = 0 };
38
39 static struct sh_ubc *sh_ubc __read_mostly = &ubc_dummy;
40
41 /*
42  * Install a perf counter breakpoint.
43  *
44  * We seek a free UBC channel and use it for this breakpoint.
45  *
46  * Atomic: we hold the counter->ctx->lock and we only handle variables
47  * and registers local to this cpu.
48  */
49 int arch_install_hw_breakpoint(struct perf_event *bp)
50 {
51         struct arch_hw_breakpoint *info = counter_arch_bp(bp);
52         int i;
53
54         for (i = 0; i < sh_ubc->num_events; i++) {
55                 struct perf_event **slot = &__get_cpu_var(bp_per_reg[i]);
56
57                 if (!*slot) {
58                         *slot = bp;
59                         break;
60                 }
61         }
62
63         if (WARN_ONCE(i == sh_ubc->num_events, "Can't find any breakpoint slot"))
64                 return -EBUSY;
65
66         clk_enable(sh_ubc->clk);
67         sh_ubc->enable(info, i);
68
69         return 0;
70 }
71
72 /*
73  * Uninstall the breakpoint contained in the given counter.
74  *
75  * First we search the debug address register it uses and then we disable
76  * it.
77  *
78  * Atomic: we hold the counter->ctx->lock and we only handle variables
79  * and registers local to this cpu.
80  */
81 void arch_uninstall_hw_breakpoint(struct perf_event *bp)
82 {
83         struct arch_hw_breakpoint *info = counter_arch_bp(bp);
84         int i;
85
86         for (i = 0; i < sh_ubc->num_events; i++) {
87                 struct perf_event **slot = &__get_cpu_var(bp_per_reg[i]);
88
89                 if (*slot == bp) {
90                         *slot = NULL;
91                         break;
92                 }
93         }
94
95         if (WARN_ONCE(i == sh_ubc->num_events, "Can't find any breakpoint slot"))
96                 return;
97
98         sh_ubc->disable(info, i);
99         clk_disable(sh_ubc->clk);
100 }
101
102 static int get_hbp_len(u16 hbp_len)
103 {
104         unsigned int len_in_bytes = 0;
105
106         switch (hbp_len) {
107         case SH_BREAKPOINT_LEN_1:
108                 len_in_bytes = 1;
109                 break;
110         case SH_BREAKPOINT_LEN_2:
111                 len_in_bytes = 2;
112                 break;
113         case SH_BREAKPOINT_LEN_4:
114                 len_in_bytes = 4;
115                 break;
116 #ifdef UBC_64BIT
117         case SH_BREAKPOINT_LEN_8:
118                 len_in_bytes = 8;
119                 break;
120 #endif
121         }
122         return len_in_bytes;
123 }
124
125 /*
126  * Check for virtual address in kernel space.
127  */
128 int arch_check_bp_in_kernelspace(struct perf_event *bp)
129 {
130         unsigned int len;
131         unsigned long va;
132         struct arch_hw_breakpoint *info = counter_arch_bp(bp);
133
134         va = info->address;
135         len = get_hbp_len(info->len);
136
137         return (va >= TASK_SIZE) && ((va + len - 1) >= TASK_SIZE);
138 }
139
140 int arch_bp_generic_fields(int sh_len, int sh_type,
141                            int *gen_len, int *gen_type)
142 {
143         /* Len */
144         switch (sh_len) {
145         case SH_BREAKPOINT_LEN_1:
146                 *gen_len = HW_BREAKPOINT_LEN_1;
147                 break;
148         case SH_BREAKPOINT_LEN_2:
149                 *gen_len = HW_BREAKPOINT_LEN_2;
150                 break;
151         case SH_BREAKPOINT_LEN_4:
152                 *gen_len = HW_BREAKPOINT_LEN_4;
153                 break;
154 #ifdef UBC_64BIT
155         case SH_BREAKPOINT_LEN_8:
156                 *gen_len = HW_BREAKPOINT_LEN_8;
157                 break;
158 #endif
159         default:
160                 return -EINVAL;
161         }
162
163         /* Type */
164         switch (sh_type) {
165         case SH_BREAKPOINT_READ:
166                 *gen_type = HW_BREAKPOINT_R;
167         case SH_BREAKPOINT_WRITE:
168                 *gen_type = HW_BREAKPOINT_W;
169                 break;
170         case SH_BREAKPOINT_RW:
171                 *gen_type = HW_BREAKPOINT_W | HW_BREAKPOINT_R;
172                 break;
173         default:
174                 return -EINVAL;
175         }
176
177         return 0;
178 }
179
180 static int arch_build_bp_info(struct perf_event *bp)
181 {
182         struct arch_hw_breakpoint *info = counter_arch_bp(bp);
183
184         info->address = bp->attr.bp_addr;
185
186         /* Len */
187         switch (bp->attr.bp_len) {
188         case HW_BREAKPOINT_LEN_1:
189                 info->len = SH_BREAKPOINT_LEN_1;
190                 break;
191         case HW_BREAKPOINT_LEN_2:
192                 info->len = SH_BREAKPOINT_LEN_2;
193                 break;
194         case HW_BREAKPOINT_LEN_4:
195                 info->len = SH_BREAKPOINT_LEN_4;
196                 break;
197 #ifdef UBC_64BIT
198         case HW_BREAKPOINT_LEN_8:
199                 info->len = SH_BREAKPOINT_LEN_8;
200                 break;
201 #endif
202         default:
203                 return -EINVAL;
204         }
205
206         /* Type */
207         switch (bp->attr.bp_type) {
208         case HW_BREAKPOINT_R:
209                 info->type = SH_BREAKPOINT_READ;
210                 break;
211         case HW_BREAKPOINT_W:
212                 info->type = SH_BREAKPOINT_WRITE;
213                 break;
214         case HW_BREAKPOINT_W | HW_BREAKPOINT_R:
215                 info->type = SH_BREAKPOINT_RW;
216                 break;
217         default:
218                 return -EINVAL;
219         }
220
221         return 0;
222 }
223
224 /*
225  * Validate the arch-specific HW Breakpoint register settings
226  */
227 int arch_validate_hwbkpt_settings(struct perf_event *bp)
228 {
229         struct arch_hw_breakpoint *info = counter_arch_bp(bp);
230         unsigned int align;
231         int ret;
232
233         ret = arch_build_bp_info(bp);
234         if (ret)
235                 return ret;
236
237         ret = -EINVAL;
238
239         switch (info->len) {
240         case SH_BREAKPOINT_LEN_1:
241                 align = 0;
242                 break;
243         case SH_BREAKPOINT_LEN_2:
244                 align = 1;
245                 break;
246         case SH_BREAKPOINT_LEN_4:
247                 align = 3;
248                 break;
249 #ifdef UBC_64BIT
250         case SH_BREAKPOINT_LEN_8:
251                 align = 7;
252                 break;
253 #endif
254         default:
255                 return ret;
256         }
257
258         /*
259          * For kernel-addresses, either the address or symbol name can be
260          * specified.
261          */
262         if (info->name)
263                 info->address = (unsigned long)kallsyms_lookup_name(info->name);
264
265         /*
266          * Check that the low-order bits of the address are appropriate
267          * for the alignment implied by len.
268          */
269         if (info->address & align)
270                 return -EINVAL;
271
272         return 0;
273 }
274
275 /*
276  * Release the user breakpoints used by ptrace
277  */
278 void flush_ptrace_hw_breakpoint(struct task_struct *tsk)
279 {
280         int i;
281         struct thread_struct *t = &tsk->thread;
282
283         for (i = 0; i < sh_ubc->num_events; i++) {
284                 unregister_hw_breakpoint(t->ptrace_bps[i]);
285                 t->ptrace_bps[i] = NULL;
286         }
287 }
288
289 static int __kprobes hw_breakpoint_handler(struct die_args *args)
290 {
291         int cpu, i, rc = NOTIFY_STOP;
292         struct perf_event *bp;
293         unsigned int cmf, resume_mask;
294
295         /*
296          * Do an early return if none of the channels triggered.
297          */
298         cmf = sh_ubc->triggered_mask();
299         if (unlikely(!cmf))
300                 return NOTIFY_DONE;
301
302         /*
303          * By default, resume all of the active channels.
304          */
305         resume_mask = sh_ubc->active_mask();
306
307         /*
308          * Disable breakpoints during exception handling.
309          */
310         sh_ubc->disable_all();
311
312         cpu = get_cpu();
313         for (i = 0; i < sh_ubc->num_events; i++) {
314                 unsigned long event_mask = (1 << i);
315
316                 if (likely(!(cmf & event_mask)))
317                         continue;
318
319                 /*
320                  * The counter may be concurrently released but that can only
321                  * occur from a call_rcu() path. We can then safely fetch
322                  * the breakpoint, use its callback, touch its counter
323                  * while we are in an rcu_read_lock() path.
324                  */
325                 rcu_read_lock();
326
327                 bp = per_cpu(bp_per_reg[i], cpu);
328                 if (bp)
329                         rc = NOTIFY_DONE;
330
331                 /*
332                  * Reset the condition match flag to denote completion of
333                  * exception handling.
334                  */
335                 sh_ubc->clear_triggered_mask(event_mask);
336
337                 /*
338                  * bp can be NULL due to concurrent perf counter
339                  * removing.
340                  */
341                 if (!bp) {
342                         rcu_read_unlock();
343                         break;
344                 }
345
346                 /*
347                  * Don't restore the channel if the breakpoint is from
348                  * ptrace, as it always operates in one-shot mode.
349                  */
350                 if (bp->overflow_handler == ptrace_triggered)
351                         resume_mask &= ~(1 << i);
352
353                 perf_bp_event(bp, args->regs);
354
355                 /* Deliver the signal to userspace */
356                 if (!arch_check_bp_in_kernelspace(bp)) {
357                         siginfo_t info;
358
359                         info.si_signo = args->signr;
360                         info.si_errno = notifier_to_errno(rc);
361                         info.si_code = TRAP_HWBKPT;
362
363                         force_sig_info(args->signr, &info, current);
364                 }
365
366                 rcu_read_unlock();
367         }
368
369         if (cmf == 0)
370                 rc = NOTIFY_DONE;
371
372         sh_ubc->enable_all(resume_mask);
373
374         put_cpu();
375
376         return rc;
377 }
378
379 BUILD_TRAP_HANDLER(breakpoint)
380 {
381         unsigned long ex = lookup_exception_vector();
382         TRAP_HANDLER_DECL;
383
384         notify_die(DIE_BREAKPOINT, "breakpoint", regs, 0, ex, SIGTRAP);
385 }
386
387 /*
388  * Handle debug exception notifications.
389  */
390 int __kprobes hw_breakpoint_exceptions_notify(struct notifier_block *unused,
391                                     unsigned long val, void *data)
392 {
393         struct die_args *args = data;
394
395         if (val != DIE_BREAKPOINT)
396                 return NOTIFY_DONE;
397
398         /*
399          * If the breakpoint hasn't been triggered by the UBC, it's
400          * probably from a debugger, so don't do anything more here.
401          *
402          * This also permits the UBC interface clock to remain off for
403          * non-UBC breakpoints, as we don't need to check the triggered
404          * or active channel masks.
405          */
406         if (args->trapnr != sh_ubc->trap_nr)
407                 return NOTIFY_DONE;
408
409         return hw_breakpoint_handler(data);
410 }
411
412 void hw_breakpoint_pmu_read(struct perf_event *bp)
413 {
414         /* TODO */
415 }
416
417 int register_sh_ubc(struct sh_ubc *ubc)
418 {
419         /* Bail if it's already assigned */
420         if (sh_ubc != &ubc_dummy)
421                 return -EBUSY;
422         sh_ubc = ubc;
423
424         pr_info("HW Breakpoints: %s UBC support registered\n", ubc->name);
425
426         WARN_ON(ubc->num_events > HBP_NUM);
427
428         return 0;
429 }