2 * Kernel Probes (KProbes)
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License as published by
6 * the Free Software Foundation; either version 2 of the License, or
7 * (at your option) any later version.
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write to the Free Software
16 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
18 * Copyright (C) IBM Corporation, 2002, 2004
20 * 2002-Oct Created by Vamsi Krishna S <vamsi_krishna@in.ibm.com> Kernel
21 * Probes initial implementation ( includes contributions from
23 * 2004-July Suparna Bhattacharya <suparna@in.ibm.com> added jumper probes
24 * interface to access function arguments.
25 * 2004-Nov Ananth N Mavinakayanahalli <ananth@in.ibm.com> kprobes port
29 #include <linux/kprobes.h>
30 #include <linux/ptrace.h>
31 #include <linux/preempt.h>
32 #include <linux/extable.h>
33 #include <linux/kdebug.h>
34 #include <linux/slab.h>
35 #include <asm/code-patching.h>
36 #include <asm/cacheflush.h>
37 #include <asm/sstep.h>
38 #include <asm/sections.h>
39 #include <linux/uaccess.h>
41 DEFINE_PER_CPU(struct kprobe *, current_kprobe) = NULL;
42 DEFINE_PER_CPU(struct kprobe_ctlblk, kprobe_ctlblk);
44 struct kretprobe_blackpoint kretprobe_blacklist[] = {{NULL, NULL}};
46 bool arch_within_kprobe_blacklist(unsigned long addr)
48 return (addr >= (unsigned long)__kprobes_text_start &&
49 addr < (unsigned long)__kprobes_text_end) ||
50 (addr >= (unsigned long)_stext &&
51 addr < (unsigned long)__head_end);
54 kprobe_opcode_t *kprobe_lookup_name(const char *name, unsigned int offset)
56 kprobe_opcode_t *addr;
58 #ifdef PPC64_ELF_ABI_v2
59 /* PPC64 ABIv2 needs local entry point */
60 addr = (kprobe_opcode_t *)kallsyms_lookup_name(name);
61 if (addr && !offset) {
62 #ifdef CONFIG_KPROBES_ON_FTRACE
65 * Per livepatch.h, ftrace location is always within the first
66 * 16 bytes of a function on powerpc with -mprofile-kernel.
68 faddr = ftrace_location_range((unsigned long)addr,
69 (unsigned long)addr + 16);
71 addr = (kprobe_opcode_t *)faddr;
74 addr = (kprobe_opcode_t *)ppc_function_entry(addr);
76 #elif defined(PPC64_ELF_ABI_v1)
78 * 64bit powerpc ABIv1 uses function descriptors:
79 * - Check for the dot variant of the symbol first.
80 * - If that fails, try looking up the symbol provided.
82 * This ensures we always get to the actual symbol and not
85 * Also handle <module:symbol> format.
87 char dot_name[MODULE_NAME_LEN + 1 + KSYM_NAME_LEN];
89 bool dot_appended = false;
90 if ((modsym = strchr(name, ':')) != NULL) {
92 if (*modsym != '\0' && *modsym != '.') {
93 /* Convert to <module:.symbol> */
94 strncpy(dot_name, name, modsym - name);
95 dot_name[modsym - name] = '.';
96 dot_name[modsym - name + 1] = '\0';
97 strncat(dot_name, modsym,
98 sizeof(dot_name) - (modsym - name) - 2);
102 strncat(dot_name, name, sizeof(dot_name) - 1);
104 } else if (name[0] != '.') {
107 strncat(dot_name, name, KSYM_NAME_LEN - 2);
111 strncat(dot_name, name, KSYM_NAME_LEN - 1);
113 addr = (kprobe_opcode_t *)kallsyms_lookup_name(dot_name);
114 if (!addr && dot_appended) {
115 /* Let's try the original non-dot symbol lookup */
116 addr = (kprobe_opcode_t *)kallsyms_lookup_name(name);
119 addr = (kprobe_opcode_t *)kallsyms_lookup_name(name);
125 int arch_prepare_kprobe(struct kprobe *p)
128 kprobe_opcode_t insn = *p->addr;
130 if ((unsigned long)p->addr & 0x03) {
131 printk("Attempt to register kprobe at an unaligned address\n");
133 } else if (IS_MTMSRD(insn) || IS_RFID(insn) || IS_RFI(insn)) {
134 printk("Cannot register a kprobe on rfi/rfid or mtmsr[d]\n");
138 /* insn must be on a special executable page on ppc64. This is
139 * not explicitly required on ppc32 (right now), but it doesn't hurt */
141 p->ainsn.insn = get_insn_slot();
147 memcpy(p->ainsn.insn, p->addr,
148 MAX_INSN_SIZE * sizeof(kprobe_opcode_t));
149 p->opcode = *p->addr;
150 flush_icache_range((unsigned long)p->ainsn.insn,
151 (unsigned long)p->ainsn.insn + sizeof(kprobe_opcode_t));
154 p->ainsn.boostable = 0;
157 NOKPROBE_SYMBOL(arch_prepare_kprobe);
159 void arch_arm_kprobe(struct kprobe *p)
161 *p->addr = BREAKPOINT_INSTRUCTION;
162 flush_icache_range((unsigned long) p->addr,
163 (unsigned long) p->addr + sizeof(kprobe_opcode_t));
165 NOKPROBE_SYMBOL(arch_arm_kprobe);
167 void arch_disarm_kprobe(struct kprobe *p)
169 *p->addr = p->opcode;
170 flush_icache_range((unsigned long) p->addr,
171 (unsigned long) p->addr + sizeof(kprobe_opcode_t));
173 NOKPROBE_SYMBOL(arch_disarm_kprobe);
175 void arch_remove_kprobe(struct kprobe *p)
178 free_insn_slot(p->ainsn.insn, 0);
179 p->ainsn.insn = NULL;
182 NOKPROBE_SYMBOL(arch_remove_kprobe);
184 static nokprobe_inline void prepare_singlestep(struct kprobe *p, struct pt_regs *regs)
186 enable_single_step(regs);
189 * On powerpc we should single step on the original
190 * instruction even if the probed insn is a trap
191 * variant as values in regs could play a part in
192 * if the trap is taken or not
194 regs->nip = (unsigned long)p->ainsn.insn;
197 static nokprobe_inline void save_previous_kprobe(struct kprobe_ctlblk *kcb)
199 kcb->prev_kprobe.kp = kprobe_running();
200 kcb->prev_kprobe.status = kcb->kprobe_status;
201 kcb->prev_kprobe.saved_msr = kcb->kprobe_saved_msr;
204 static nokprobe_inline void restore_previous_kprobe(struct kprobe_ctlblk *kcb)
206 __this_cpu_write(current_kprobe, kcb->prev_kprobe.kp);
207 kcb->kprobe_status = kcb->prev_kprobe.status;
208 kcb->kprobe_saved_msr = kcb->prev_kprobe.saved_msr;
211 static nokprobe_inline void set_current_kprobe(struct kprobe *p, struct pt_regs *regs,
212 struct kprobe_ctlblk *kcb)
214 __this_cpu_write(current_kprobe, p);
215 kcb->kprobe_saved_msr = regs->msr;
218 bool arch_function_offset_within_entry(unsigned long offset)
220 #ifdef PPC64_ELF_ABI_v2
221 #ifdef CONFIG_KPROBES_ON_FTRACE
231 void arch_prepare_kretprobe(struct kretprobe_instance *ri, struct pt_regs *regs)
233 ri->ret_addr = (kprobe_opcode_t *)regs->link;
235 /* Replace the return addr with trampoline addr */
236 regs->link = (unsigned long)kretprobe_trampoline;
238 NOKPROBE_SYMBOL(arch_prepare_kretprobe);
240 int try_to_emulate(struct kprobe *p, struct pt_regs *regs)
243 unsigned int insn = *p->ainsn.insn;
245 /* regs->nip is also adjusted if emulate_step returns 1 */
246 ret = emulate_step(regs, insn);
249 * Once this instruction has been boosted
250 * successfully, set the boostable flag
252 if (unlikely(p->ainsn.boostable == 0))
253 p->ainsn.boostable = 1;
254 } else if (ret < 0) {
256 * We don't allow kprobes on mtmsr(d)/rfi(d), etc.
257 * So, we should never get here... but, its still
258 * good to catch them, just in case...
260 printk("Can't step on instruction %x\n", insn);
263 /* This instruction can't be boosted */
264 p->ainsn.boostable = -1;
268 NOKPROBE_SYMBOL(try_to_emulate);
270 int kprobe_handler(struct pt_regs *regs)
274 unsigned int *addr = (unsigned int *)regs->nip;
275 struct kprobe_ctlblk *kcb;
281 * We don't want to be preempted for the entire
282 * duration of kprobe processing
285 kcb = get_kprobe_ctlblk();
287 /* Check we're not actually recursing */
288 if (kprobe_running()) {
289 p = get_kprobe(addr);
291 kprobe_opcode_t insn = *p->ainsn.insn;
292 if (kcb->kprobe_status == KPROBE_HIT_SS &&
294 /* Turn off 'trace' bits */
295 regs->msr &= ~MSR_SINGLESTEP;
296 regs->msr |= kcb->kprobe_saved_msr;
299 /* We have reentered the kprobe_handler(), since
300 * another probe was hit while within the handler.
301 * We here save the original kprobes variables and
302 * just single step on the instruction of the new probe
303 * without calling any user handlers.
305 save_previous_kprobe(kcb);
306 set_current_kprobe(p, regs, kcb);
307 kprobes_inc_nmissed_count(p);
308 prepare_singlestep(p, regs);
309 kcb->kprobe_status = KPROBE_REENTER;
310 if (p->ainsn.boostable >= 0) {
311 ret = try_to_emulate(p, regs);
314 restore_previous_kprobe(kcb);
320 if (*addr != BREAKPOINT_INSTRUCTION) {
321 /* If trap variant, then it belongs not to us */
322 kprobe_opcode_t cur_insn = *addr;
323 if (is_trap(cur_insn))
325 /* The breakpoint instruction was removed by
326 * another cpu right after we hit, no further
327 * handling of this interrupt is appropriate
332 p = __this_cpu_read(current_kprobe);
333 if (p->break_handler && p->break_handler(p, regs)) {
334 if (!skip_singlestep(p, regs, kcb))
342 p = get_kprobe(addr);
344 if (*addr != BREAKPOINT_INSTRUCTION) {
346 * PowerPC has multiple variants of the "trap"
347 * instruction. If the current instruction is a
348 * trap variant, it could belong to someone else
350 kprobe_opcode_t cur_insn = *addr;
351 if (is_trap(cur_insn))
354 * The breakpoint instruction was removed right
355 * after we hit it. Another cpu has removed
356 * either a probepoint or a debugger breakpoint
357 * at this address. In either case, no further
358 * handling of this interrupt is appropriate.
362 /* Not one of ours: let kernel handle it */
366 kcb->kprobe_status = KPROBE_HIT_ACTIVE;
367 set_current_kprobe(p, regs, kcb);
368 if (p->pre_handler && p->pre_handler(p, regs))
369 /* handler has already set things up, so skip ss setup */
373 if (p->ainsn.boostable >= 0) {
374 ret = try_to_emulate(p, regs);
378 p->post_handler(p, regs, 0);
380 kcb->kprobe_status = KPROBE_HIT_SSDONE;
381 reset_current_kprobe();
382 preempt_enable_no_resched();
386 prepare_singlestep(p, regs);
387 kcb->kprobe_status = KPROBE_HIT_SS;
391 preempt_enable_no_resched();
394 NOKPROBE_SYMBOL(kprobe_handler);
397 * Function return probe trampoline:
398 * - init_kprobes() establishes a probepoint here
399 * - When the probed function returns, this probe
400 * causes the handlers to fire
402 asm(".global kretprobe_trampoline\n"
403 ".type kretprobe_trampoline, @function\n"
404 "kretprobe_trampoline:\n"
407 ".size kretprobe_trampoline, .-kretprobe_trampoline\n");
410 * Called when the probe at kretprobe trampoline is hit
412 static int trampoline_probe_handler(struct kprobe *p, struct pt_regs *regs)
414 struct kretprobe_instance *ri = NULL;
415 struct hlist_head *head, empty_rp;
416 struct hlist_node *tmp;
417 unsigned long flags, orig_ret_address = 0;
418 unsigned long trampoline_address =(unsigned long)&kretprobe_trampoline;
420 INIT_HLIST_HEAD(&empty_rp);
421 kretprobe_hash_lock(current, &head, &flags);
424 * It is possible to have multiple instances associated with a given
425 * task either because an multiple functions in the call path
426 * have a return probe installed on them, and/or more than one return
427 * return probe was registered for a target function.
429 * We can handle this because:
430 * - instances are always inserted at the head of the list
431 * - when multiple return probes are registered for the same
432 * function, the first instance's ret_addr will point to the
433 * real return address, and all the rest will point to
434 * kretprobe_trampoline
436 hlist_for_each_entry_safe(ri, tmp, head, hlist) {
437 if (ri->task != current)
438 /* another task is sharing our hash bucket */
441 if (ri->rp && ri->rp->handler)
442 ri->rp->handler(ri, regs);
444 orig_ret_address = (unsigned long)ri->ret_addr;
445 recycle_rp_inst(ri, &empty_rp);
447 if (orig_ret_address != trampoline_address)
449 * This is the real return address. Any other
450 * instances associated with this task are for
451 * other calls deeper on the call stack
456 kretprobe_assert(ri, orig_ret_address, trampoline_address);
457 regs->nip = orig_ret_address;
459 * Make LR point to the orig_ret_address.
460 * When the 'nop' inside the kretprobe_trampoline
461 * is optimized, we can do a 'blr' after executing the
462 * detour buffer code.
464 regs->link = orig_ret_address;
466 reset_current_kprobe();
467 kretprobe_hash_unlock(current, &flags);
468 preempt_enable_no_resched();
470 hlist_for_each_entry_safe(ri, tmp, &empty_rp, hlist) {
471 hlist_del(&ri->hlist);
475 * By returning a non-zero value, we are telling
476 * kprobe_handler() that we don't want the post_handler
477 * to run (and have re-enabled preemption)
481 NOKPROBE_SYMBOL(trampoline_probe_handler);
484 * Called after single-stepping. p->addr is the address of the
485 * instruction whose first byte has been replaced by the "breakpoint"
486 * instruction. To avoid the SMP problems that can occur when we
487 * temporarily put back the original opcode to single-step, we
488 * single-stepped a copy of the instruction. The address of this
489 * copy is p->ainsn.insn.
491 int kprobe_post_handler(struct pt_regs *regs)
493 struct kprobe *cur = kprobe_running();
494 struct kprobe_ctlblk *kcb = get_kprobe_ctlblk();
496 if (!cur || user_mode(regs))
499 /* make sure we got here for instruction we have a kprobe on */
500 if (((unsigned long)cur->ainsn.insn + 4) != regs->nip)
503 if ((kcb->kprobe_status != KPROBE_REENTER) && cur->post_handler) {
504 kcb->kprobe_status = KPROBE_HIT_SSDONE;
505 cur->post_handler(cur, regs, 0);
508 /* Adjust nip to after the single-stepped instruction */
509 regs->nip = (unsigned long)cur->addr + 4;
510 regs->msr |= kcb->kprobe_saved_msr;
512 /*Restore back the original saved kprobes variables and continue. */
513 if (kcb->kprobe_status == KPROBE_REENTER) {
514 restore_previous_kprobe(kcb);
517 reset_current_kprobe();
519 preempt_enable_no_resched();
522 * if somebody else is singlestepping across a probe point, msr
523 * will have DE/SE set, in which case, continue the remaining processing
524 * of do_debug, as if this is not a probe hit.
526 if (regs->msr & MSR_SINGLESTEP)
531 NOKPROBE_SYMBOL(kprobe_post_handler);
533 int kprobe_fault_handler(struct pt_regs *regs, int trapnr)
535 struct kprobe *cur = kprobe_running();
536 struct kprobe_ctlblk *kcb = get_kprobe_ctlblk();
537 const struct exception_table_entry *entry;
539 switch(kcb->kprobe_status) {
543 * We are here because the instruction being single
544 * stepped caused a page fault. We reset the current
545 * kprobe and the nip points back to the probe address
546 * and allow the page fault handler to continue as a
549 regs->nip = (unsigned long)cur->addr;
550 regs->msr &= ~MSR_SINGLESTEP; /* Turn off 'trace' bits */
551 regs->msr |= kcb->kprobe_saved_msr;
552 if (kcb->kprobe_status == KPROBE_REENTER)
553 restore_previous_kprobe(kcb);
555 reset_current_kprobe();
556 preempt_enable_no_resched();
558 case KPROBE_HIT_ACTIVE:
559 case KPROBE_HIT_SSDONE:
561 * We increment the nmissed count for accounting,
562 * we can also use npre/npostfault count for accounting
563 * these specific fault cases.
565 kprobes_inc_nmissed_count(cur);
568 * We come here because instructions in the pre/post
569 * handler caused the page_fault, this could happen
570 * if handler tries to access user space by
571 * copy_from_user(), get_user() etc. Let the
572 * user-specified handler try to fix it first.
574 if (cur->fault_handler && cur->fault_handler(cur, regs, trapnr))
578 * In case the user-specified fault handler returned
579 * zero, try to fix up.
581 if ((entry = search_exception_tables(regs->nip)) != NULL) {
582 regs->nip = extable_fixup(entry);
587 * fixup_exception() could not handle it,
588 * Let do_page_fault() fix it.
596 NOKPROBE_SYMBOL(kprobe_fault_handler);
598 unsigned long arch_deref_entry_point(void *entry)
600 return ppc_global_function_entry(entry);
602 NOKPROBE_SYMBOL(arch_deref_entry_point);
604 int setjmp_pre_handler(struct kprobe *p, struct pt_regs *regs)
606 struct jprobe *jp = container_of(p, struct jprobe, kp);
607 struct kprobe_ctlblk *kcb = get_kprobe_ctlblk();
609 memcpy(&kcb->jprobe_saved_regs, regs, sizeof(struct pt_regs));
611 /* setup return addr to the jprobe handler routine */
612 regs->nip = arch_deref_entry_point(jp->entry);
613 #ifdef PPC64_ELF_ABI_v2
614 regs->gpr[12] = (unsigned long)jp->entry;
615 #elif defined(PPC64_ELF_ABI_v1)
616 regs->gpr[2] = (unsigned long)(((func_descr_t *)jp->entry)->toc);
621 NOKPROBE_SYMBOL(setjmp_pre_handler);
623 void __used jprobe_return(void)
625 asm volatile("trap" ::: "memory");
627 NOKPROBE_SYMBOL(jprobe_return);
629 static void __used jprobe_return_end(void)
632 NOKPROBE_SYMBOL(jprobe_return_end);
634 int longjmp_break_handler(struct kprobe *p, struct pt_regs *regs)
636 struct kprobe_ctlblk *kcb = get_kprobe_ctlblk();
639 * FIXME - we should ideally be validating that we got here 'cos
640 * of the "trap" in jprobe_return() above, before restoring the
643 memcpy(regs, &kcb->jprobe_saved_regs, sizeof(struct pt_regs));
644 preempt_enable_no_resched();
647 NOKPROBE_SYMBOL(longjmp_break_handler);
649 static struct kprobe trampoline_p = {
650 .addr = (kprobe_opcode_t *) &kretprobe_trampoline,
651 .pre_handler = trampoline_probe_handler
654 int __init arch_init_kprobes(void)
656 return register_kprobe(&trampoline_p);
659 int arch_trampoline_kprobe(struct kprobe *p)
661 if (p->addr == (kprobe_opcode_t *)&kretprobe_trampoline)
666 NOKPROBE_SYMBOL(arch_trampoline_kprobe);