2 * This program is free software; you can redistribute it and/or modify
3 * it under the terms of the GNU General Public License as published by
4 * the Free Software Foundation; either version 2 of the License, or
5 * (at your option) any later version.
7 * This program is distributed in the hope that it will be useful,
8 * but WITHOUT ANY WARRANTY; without even the implied warranty of
9 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
10 * GNU General Public License for more details.
12 * You should have received a copy of the GNU General Public License
13 * along with this program; if not, write to the Free Software
14 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
16 * Copyright (C) 2007 Alan Stern
17 * Copyright (C) IBM Corporation, 2009
18 * Copyright (C) 2009, Frederic Weisbecker <fweisbec@gmail.com>
20 * Thanks to Ingo Molnar for his many suggestions.
22 * Authors: Alan Stern <stern@rowland.harvard.edu>
23 * K.Prasad <prasad@linux.vnet.ibm.com>
24 * Frederic Weisbecker <fweisbec@gmail.com>
28 * HW_breakpoint: a unified kernel/user-space hardware breakpoint facility,
29 * using the CPU's debug registers.
30 * This file contains the arch-independent routines.
33 #include <linux/irqflags.h>
34 #include <linux/kallsyms.h>
35 #include <linux/notifier.h>
36 #include <linux/kprobes.h>
37 #include <linux/kdebug.h>
38 #include <linux/kernel.h>
39 #include <linux/module.h>
40 #include <linux/percpu.h>
41 #include <linux/sched.h>
42 #include <linux/init.h>
43 #include <linux/cpu.h>
44 #include <linux/smp.h>
46 #include <linux/hw_breakpoint.h>
52 /* Number of pinned cpu breakpoints in a cpu */
53 static DEFINE_PER_CPU(unsigned int, nr_cpu_bp_pinned);
55 /* Number of pinned task breakpoints in a cpu */
56 static DEFINE_PER_CPU(unsigned int, nr_task_bp_pinned[HBP_NUM]);
58 /* Number of non-pinned cpu/task breakpoints in a cpu */
59 static DEFINE_PER_CPU(unsigned int, nr_bp_flexible);
61 /* Gather the number of total pinned and un-pinned bp in a cpuset */
62 struct bp_busy_slots {
64 unsigned int flexible;
67 /* Serialize accesses to the above constraints */
68 static DEFINE_MUTEX(nr_bp_mutex);
71 * Report the maximum number of pinned breakpoints a task
74 static unsigned int max_task_bp_pinned(int cpu)
77 unsigned int *tsk_pinned = per_cpu(nr_task_bp_pinned, cpu);
79 for (i = HBP_NUM -1; i >= 0; i--) {
80 if (tsk_pinned[i] > 0)
87 static int task_bp_pinned(struct task_struct *tsk)
89 struct perf_event_context *ctx = tsk->perf_event_ctxp;
90 struct list_head *list;
91 struct perf_event *bp;
95 if (WARN_ONCE(!ctx, "No perf context for this task"))
98 list = &ctx->event_list;
100 raw_spin_lock_irqsave(&ctx->lock, flags);
103 * The current breakpoint counter is not included in the list
104 * at the open() callback time
106 list_for_each_entry(bp, list, event_entry) {
107 if (bp->attr.type == PERF_TYPE_BREAKPOINT)
111 raw_spin_unlock_irqrestore(&ctx->lock, flags);
117 * Report the number of pinned/un-pinned breakpoints we have in
118 * a given cpu (cpu > -1) or in all of them (cpu = -1).
121 fetch_bp_busy_slots(struct bp_busy_slots *slots, struct perf_event *bp)
124 struct task_struct *tsk = bp->ctx->task;
127 slots->pinned = per_cpu(nr_cpu_bp_pinned, cpu);
129 slots->pinned += max_task_bp_pinned(cpu);
131 slots->pinned += task_bp_pinned(tsk);
132 slots->flexible = per_cpu(nr_bp_flexible, cpu);
137 for_each_online_cpu(cpu) {
140 nr = per_cpu(nr_cpu_bp_pinned, cpu);
142 nr += max_task_bp_pinned(cpu);
144 nr += task_bp_pinned(tsk);
146 if (nr > slots->pinned)
149 nr = per_cpu(nr_bp_flexible, cpu);
151 if (nr > slots->flexible)
152 slots->flexible = nr;
157 * Add a pinned breakpoint for the given task in our constraint table
159 static void toggle_bp_task_slot(struct task_struct *tsk, int cpu, bool enable)
161 unsigned int *tsk_pinned;
164 count = task_bp_pinned(tsk);
166 tsk_pinned = per_cpu(nr_task_bp_pinned, cpu);
170 tsk_pinned[count-1]--;
174 tsk_pinned[count-1]++;
179 * Add/remove the given breakpoint in our constraint table
181 static void toggle_bp_slot(struct perf_event *bp, bool enable)
184 struct task_struct *tsk = bp->ctx->task;
186 /* Pinned counter task profiling */
189 toggle_bp_task_slot(tsk, cpu, enable);
193 for_each_online_cpu(cpu)
194 toggle_bp_task_slot(tsk, cpu, enable);
198 /* Pinned counter cpu profiling */
200 per_cpu(nr_cpu_bp_pinned, bp->cpu)++;
202 per_cpu(nr_cpu_bp_pinned, bp->cpu)--;
206 * Contraints to check before allowing this new breakpoint counter:
208 * == Non-pinned counter == (Considered as pinned for now)
210 * - If attached to a single cpu, check:
212 * (per_cpu(nr_bp_flexible, cpu) || (per_cpu(nr_cpu_bp_pinned, cpu)
213 * + max(per_cpu(nr_task_bp_pinned, cpu)))) < HBP_NUM
215 * -> If there are already non-pinned counters in this cpu, it means
216 * there is already a free slot for them.
217 * Otherwise, we check that the maximum number of per task
218 * breakpoints (for this cpu) plus the number of per cpu breakpoint
219 * (for this cpu) doesn't cover every registers.
221 * - If attached to every cpus, check:
223 * (per_cpu(nr_bp_flexible, *) || (max(per_cpu(nr_cpu_bp_pinned, *))
224 * + max(per_cpu(nr_task_bp_pinned, *)))) < HBP_NUM
226 * -> This is roughly the same, except we check the number of per cpu
227 * bp for every cpu and we keep the max one. Same for the per tasks
231 * == Pinned counter ==
233 * - If attached to a single cpu, check:
235 * ((per_cpu(nr_bp_flexible, cpu) > 1) + per_cpu(nr_cpu_bp_pinned, cpu)
236 * + max(per_cpu(nr_task_bp_pinned, cpu))) < HBP_NUM
238 * -> Same checks as before. But now the nr_bp_flexible, if any, must keep
239 * one register at least (or they will never be fed).
241 * - If attached to every cpus, check:
243 * ((per_cpu(nr_bp_flexible, *) > 1) + max(per_cpu(nr_cpu_bp_pinned, *))
244 * + max(per_cpu(nr_task_bp_pinned, *))) < HBP_NUM
246 static int __reserve_bp_slot(struct perf_event *bp)
248 struct bp_busy_slots slots = {0};
250 fetch_bp_busy_slots(&slots, bp);
252 /* Flexible counters need to keep at least one slot */
253 if (slots.pinned + (!!slots.flexible) == HBP_NUM)
256 toggle_bp_slot(bp, true);
261 int reserve_bp_slot(struct perf_event *bp)
265 mutex_lock(&nr_bp_mutex);
267 ret = __reserve_bp_slot(bp);
269 mutex_unlock(&nr_bp_mutex);
274 static void __release_bp_slot(struct perf_event *bp)
276 toggle_bp_slot(bp, false);
279 void release_bp_slot(struct perf_event *bp)
281 mutex_lock(&nr_bp_mutex);
283 __release_bp_slot(bp);
285 mutex_unlock(&nr_bp_mutex);
289 * Allow the kernel debugger to reserve breakpoint slots without
290 * taking a lock using the dbg_* variant of for the reserve and
291 * release breakpoint slots.
293 int dbg_reserve_bp_slot(struct perf_event *bp)
295 if (mutex_is_locked(&nr_bp_mutex))
298 return __reserve_bp_slot(bp);
301 int dbg_release_bp_slot(struct perf_event *bp)
303 if (mutex_is_locked(&nr_bp_mutex))
306 __release_bp_slot(bp);
311 static int validate_hw_breakpoint(struct perf_event *bp)
315 ret = arch_validate_hwbkpt_settings(bp);
319 if (arch_check_bp_in_kernelspace(bp)) {
320 if (bp->attr.exclude_kernel)
323 * Don't let unprivileged users set a breakpoint in the trap
324 * path to avoid trap recursion attacks.
326 if (!capable(CAP_SYS_ADMIN))
333 int register_perf_hw_breakpoint(struct perf_event *bp)
337 ret = reserve_bp_slot(bp);
341 ret = validate_hw_breakpoint(bp);
343 /* if arch_validate_hwbkpt_settings() fails then release bp slot */
351 * register_user_hw_breakpoint - register a hardware breakpoint for user space
352 * @attr: breakpoint attributes
353 * @triggered: callback to trigger when we hit the breakpoint
354 * @tsk: pointer to 'task_struct' of the process to which the address belongs
357 register_user_hw_breakpoint(struct perf_event_attr *attr,
358 perf_overflow_handler_t triggered,
359 struct task_struct *tsk)
361 return perf_event_create_kernel_counter(attr, -1, tsk->pid, triggered);
363 EXPORT_SYMBOL_GPL(register_user_hw_breakpoint);
366 * modify_user_hw_breakpoint - modify a user-space hardware breakpoint
367 * @bp: the breakpoint structure to modify
368 * @attr: new breakpoint attributes
369 * @triggered: callback to trigger when we hit the breakpoint
370 * @tsk: pointer to 'task_struct' of the process to which the address belongs
372 int modify_user_hw_breakpoint(struct perf_event *bp, struct perf_event_attr *attr)
374 u64 old_addr = bp->attr.bp_addr;
375 u64 old_len = bp->attr.bp_len;
376 int old_type = bp->attr.bp_type;
379 perf_event_disable(bp);
381 bp->attr.bp_addr = attr->bp_addr;
382 bp->attr.bp_type = attr->bp_type;
383 bp->attr.bp_len = attr->bp_len;
388 err = validate_hw_breakpoint(bp);
390 perf_event_enable(bp);
393 bp->attr.bp_addr = old_addr;
394 bp->attr.bp_type = old_type;
395 bp->attr.bp_len = old_len;
396 if (!bp->attr.disabled)
397 perf_event_enable(bp);
403 bp->attr.disabled = attr->disabled;
407 EXPORT_SYMBOL_GPL(modify_user_hw_breakpoint);
410 * unregister_hw_breakpoint - unregister a user-space hardware breakpoint
411 * @bp: the breakpoint structure to unregister
413 void unregister_hw_breakpoint(struct perf_event *bp)
417 perf_event_release_kernel(bp);
419 EXPORT_SYMBOL_GPL(unregister_hw_breakpoint);
422 * register_wide_hw_breakpoint - register a wide breakpoint in the kernel
423 * @attr: breakpoint attributes
424 * @triggered: callback to trigger when we hit the breakpoint
426 * @return a set of per_cpu pointers to perf events
428 struct perf_event * __percpu *
429 register_wide_hw_breakpoint(struct perf_event_attr *attr,
430 perf_overflow_handler_t triggered)
432 struct perf_event * __percpu *cpu_events, **pevent, *bp;
436 cpu_events = alloc_percpu(typeof(*cpu_events));
438 return (void __percpu __force *)ERR_PTR(-ENOMEM);
441 for_each_online_cpu(cpu) {
442 pevent = per_cpu_ptr(cpu_events, cpu);
443 bp = perf_event_create_kernel_counter(attr, cpu, -1, triggered);
457 for_each_online_cpu(cpu) {
458 pevent = per_cpu_ptr(cpu_events, cpu);
461 unregister_hw_breakpoint(*pevent);
465 free_percpu(cpu_events);
466 return (void __percpu __force *)ERR_PTR(err);
468 EXPORT_SYMBOL_GPL(register_wide_hw_breakpoint);
471 * unregister_wide_hw_breakpoint - unregister a wide breakpoint in the kernel
472 * @cpu_events: the per cpu set of events to unregister
474 void unregister_wide_hw_breakpoint(struct perf_event * __percpu *cpu_events)
477 struct perf_event **pevent;
479 for_each_possible_cpu(cpu) {
480 pevent = per_cpu_ptr(cpu_events, cpu);
481 unregister_hw_breakpoint(*pevent);
483 free_percpu(cpu_events);
485 EXPORT_SYMBOL_GPL(unregister_wide_hw_breakpoint);
487 static struct notifier_block hw_breakpoint_exceptions_nb = {
488 .notifier_call = hw_breakpoint_exceptions_notify,
489 /* we need to be notified first */
490 .priority = 0x7fffffff
493 static int __init init_hw_breakpoint(void)
495 return register_die_notifier(&hw_breakpoint_exceptions_nb);
497 core_initcall(init_hw_breakpoint);
500 struct pmu perf_ops_bp = {
501 .enable = arch_install_hw_breakpoint,
502 .disable = arch_uninstall_hw_breakpoint,
503 .read = hw_breakpoint_pmu_read,