2 * Kernel Probes (KProbes)
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License as published by
6 * the Free Software Foundation; either version 2 of the License, or
7 * (at your option) any later version.
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write to the Free Software
16 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
18 * Copyright (C) IBM Corporation, 2002, 2004
20 * 2002-Oct Created by Vamsi Krishna S <vamsi_krishna@in.ibm.com> Kernel
21 * Probes initial implementation ( includes contributions from
23 * 2004-July Suparna Bhattacharya <suparna@in.ibm.com> added jumper probes
24 * interface to access function arguments.
25 * 2004-Oct Jim Keniston <kenistoj@us.ibm.com> and Prasanna S Panchamukhi
26 * <prasanna@in.ibm.com> adapted for x86_64
27 * 2005-Mar Roland McGrath <roland@redhat.com>
28 * Fixed to handle %rip-relative addressing mode correctly.
29 * 2005-May Rusty Lynch <rusty.lynch@intel.com>
30 * Added function return probes functionality
31 * 2007-Dec Masami Hiramatsu <mhiramat@redhat.com> added kprobe-booster
32 * and kretprobe-booster for x86-64
35 #include <linux/kprobes.h>
36 #include <linux/ptrace.h>
37 #include <linux/string.h>
38 #include <linux/slab.h>
39 #include <linux/preempt.h>
40 #include <linux/module.h>
41 #include <linux/kdebug.h>
43 #include <asm/pgtable.h>
44 #include <asm/uaccess.h>
45 #include <asm/alternative.h>
47 void jprobe_return_end(void);
48 static void __kprobes arch_copy_kprobe(struct kprobe *p);
50 DEFINE_PER_CPU(struct kprobe *, current_kprobe) = NULL;
51 DEFINE_PER_CPU(struct kprobe_ctlblk, kprobe_ctlblk);
53 struct kretprobe_blackpoint kretprobe_blacklist[] = {
54 {"__switch_to", }, /* This function switches only current task, but
55 doesn't switch kernel stack.*/
56 {NULL, NULL} /* Terminator */
58 const int kretprobe_blacklist_size = ARRAY_SIZE(kretprobe_blacklist);
60 /* Insert a jump instruction at address 'from', which jumps to address 'to'.*/
61 static __always_inline void set_jmp_op(void *from, void *to)
63 struct __arch_jmp_op {
66 } __attribute__((packed)) * jop;
67 jop = (struct __arch_jmp_op *)from;
68 jop->raddr = (s32)((long)(to) - ((long)(from) + 5));
69 jop->op = RELATIVEJUMP_INSTRUCTION;
73 * returns non-zero if opcode is boostable
74 * RIP relative instructions are adjusted at copying time
76 static __always_inline int can_boost(kprobe_opcode_t *opcodes)
78 #define W(row, b0, b1, b2, b3, b4, b5, b6, b7, b8, b9, ba, bb, bc, bd, be, bf)\
79 (((b0##UL << 0x0)|(b1##UL << 0x1)|(b2##UL << 0x2)|(b3##UL << 0x3) | \
80 (b4##UL << 0x4)|(b5##UL << 0x5)|(b6##UL << 0x6)|(b7##UL << 0x7) | \
81 (b8##UL << 0x8)|(b9##UL << 0x9)|(ba##UL << 0xa)|(bb##UL << 0xb) | \
82 (bc##UL << 0xc)|(bd##UL << 0xd)|(be##UL << 0xe)|(bf##UL << 0xf)) \
85 * Undefined/reserved opcodes, conditional jump, Opcode Extension
86 * Groups, and some special opcodes can not boost.
88 static const unsigned long twobyte_is_boostable[256 / 64] = {
89 /* 0 1 2 3 4 5 6 7 8 9 a b c d e f */
90 /* ---------------------------------------------- */
91 W(0x00, 0, 0, 1, 1, 0, 0, 1, 0, 1, 1, 0, 0, 0, 0, 0, 0)|/* 00 */
92 W(0x10, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0)|/* 10 */
93 W(0x20, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0)|/* 20 */
94 W(0x30, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0),/* 30 */
95 W(0x40, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1)|/* 40 */
96 W(0x50, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0)|/* 50 */
97 W(0x60, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 1, 1)|/* 60 */
98 W(0x70, 0, 0, 0, 0, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 1, 1),/* 70 */
99 W(0x80, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0)|/* 80 */
100 W(0x90, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1)|/* 90 */
101 W(0xa0, 1, 1, 0, 1, 1, 1, 0, 0, 1, 1, 0, 1, 1, 1, 0, 1)|/* a0 */
102 W(0xb0, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1),/* b0 */
103 W(0xc0, 1, 1, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1)|/* c0 */
104 W(0xd0, 0, 1, 1, 1, 0, 1, 0, 0, 1, 1, 0, 1, 1, 1, 0, 1)|/* d0 */
105 W(0xe0, 0, 1, 1, 0, 0, 1, 0, 0, 1, 1, 0, 1, 1, 1, 0, 1)|/* e0 */
106 W(0xf0, 0, 1, 1, 1, 0, 1, 0, 0, 1, 1, 1, 0, 1, 1, 1, 0) /* f0 */
107 /* ----------------------------------------------- */
108 /* 0 1 2 3 4 5 6 7 8 9 a b c d e f */
111 kprobe_opcode_t opcode;
112 kprobe_opcode_t *orig_opcodes = opcodes;
115 if (opcodes - orig_opcodes > MAX_INSN_SIZE - 1)
117 opcode = *(opcodes++);
119 /* 2nd-byte opcode */
120 if (opcode == 0x0f) {
121 if (opcodes - orig_opcodes > MAX_INSN_SIZE - 1)
123 return test_bit(*opcodes, twobyte_is_boostable);
126 switch (opcode & 0xf0) {
128 goto retry; /* REX prefix is boostable */
130 if (0x63 < opcode && opcode < 0x67)
131 goto retry; /* prefixes */
132 /* can't boost Address-size override and bound */
133 return (opcode != 0x62 && opcode != 0x67);
135 return 0; /* can't boost conditional jump */
137 /* can't boost software-interruptions */
138 return (0xc1 < opcode && opcode < 0xcc) || opcode == 0xcf;
140 /* can boost AA* and XLAT */
141 return (opcode == 0xd4 || opcode == 0xd5 || opcode == 0xd7);
143 /* can boost in/out and absolute jmps */
144 return ((opcode & 0x04) || opcode == 0xea);
146 if ((opcode & 0x0c) == 0 && opcode != 0xf1)
147 goto retry; /* lock/rep(ne) prefix */
148 /* clear and set flags are boostable */
149 return (opcode == 0xf5 || (0xf7 < opcode && opcode < 0xfe));
151 /* segment override prefixes are boostable */
152 if (opcode == 0x26 || opcode == 0x36 || opcode == 0x3e)
153 goto retry; /* prefixes */
154 /* CS override prefix and call are not boostable */
155 return (opcode != 0x2e && opcode != 0x9a);
160 * returns non-zero if opcode modifies the interrupt flag.
162 static int __kprobes is_IF_modifier(kprobe_opcode_t *insn)
167 case 0xcf: /* iret/iretd */
168 case 0x9d: /* popf/popfd */
172 if (*insn >= 0x40 && *insn <= 0x4f && *++insn == 0xcf)
177 int __kprobes arch_prepare_kprobe(struct kprobe *p)
179 /* insn: must be on special executable page on x86_64. */
180 p->ainsn.insn = get_insn_slot();
181 if (!p->ainsn.insn) {
189 * Determine if the instruction uses the %rip-relative addressing mode.
190 * If it does, Return the address of the 32-bit displacement word.
191 * If not, return null.
193 static s32 __kprobes *is_riprel(u8 *insn)
195 #define W(row,b0,b1,b2,b3,b4,b5,b6,b7,b8,b9,ba,bb,bc,bd,be,bf) \
196 (((b0##UL << 0x0)|(b1##UL << 0x1)|(b2##UL << 0x2)|(b3##UL << 0x3) | \
197 (b4##UL << 0x4)|(b5##UL << 0x5)|(b6##UL << 0x6)|(b7##UL << 0x7) | \
198 (b8##UL << 0x8)|(b9##UL << 0x9)|(ba##UL << 0xa)|(bb##UL << 0xb) | \
199 (bc##UL << 0xc)|(bd##UL << 0xd)|(be##UL << 0xe)|(bf##UL << 0xf)) \
201 static const u64 onebyte_has_modrm[256 / 64] = {
202 /* 0 1 2 3 4 5 6 7 8 9 a b c d e f */
203 /* ------------------------------- */
204 W(0x00, 1,1,1,1,0,0,0,0,1,1,1,1,0,0,0,0)| /* 00 */
205 W(0x10, 1,1,1,1,0,0,0,0,1,1,1,1,0,0,0,0)| /* 10 */
206 W(0x20, 1,1,1,1,0,0,0,0,1,1,1,1,0,0,0,0)| /* 20 */
207 W(0x30, 1,1,1,1,0,0,0,0,1,1,1,1,0,0,0,0), /* 30 */
208 W(0x40, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0)| /* 40 */
209 W(0x50, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0)| /* 50 */
210 W(0x60, 0,0,1,1,0,0,0,0,0,1,0,1,0,0,0,0)| /* 60 */
211 W(0x70, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0), /* 70 */
212 W(0x80, 1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1)| /* 80 */
213 W(0x90, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0)| /* 90 */
214 W(0xa0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0)| /* a0 */
215 W(0xb0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0), /* b0 */
216 W(0xc0, 1,1,0,0,1,1,1,1,0,0,0,0,0,0,0,0)| /* c0 */
217 W(0xd0, 1,1,1,1,0,0,0,0,1,1,1,1,1,1,1,1)| /* d0 */
218 W(0xe0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0)| /* e0 */
219 W(0xf0, 0,0,0,0,0,0,1,1,0,0,0,0,0,0,1,1) /* f0 */
220 /* ------------------------------- */
221 /* 0 1 2 3 4 5 6 7 8 9 a b c d e f */
223 static const u64 twobyte_has_modrm[256 / 64] = {
224 /* 0 1 2 3 4 5 6 7 8 9 a b c d e f */
225 /* ------------------------------- */
226 W(0x00, 1,1,1,1,0,0,0,0,0,0,0,0,0,1,0,1)| /* 0f */
227 W(0x10, 1,1,1,1,1,1,1,1,1,0,0,0,0,0,0,0)| /* 1f */
228 W(0x20, 1,1,1,1,1,0,1,0,1,1,1,1,1,1,1,1)| /* 2f */
229 W(0x30, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0), /* 3f */
230 W(0x40, 1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1)| /* 4f */
231 W(0x50, 1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1)| /* 5f */
232 W(0x60, 1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1)| /* 6f */
233 W(0x70, 1,1,1,1,1,1,1,0,0,0,0,0,1,1,1,1), /* 7f */
234 W(0x80, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0)| /* 8f */
235 W(0x90, 1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1)| /* 9f */
236 W(0xa0, 0,0,0,1,1,1,1,1,0,0,0,1,1,1,1,1)| /* af */
237 W(0xb0, 1,1,1,1,1,1,1,1,0,0,1,1,1,1,1,1), /* bf */
238 W(0xc0, 1,1,1,1,1,1,1,1,0,0,0,0,0,0,0,0)| /* cf */
239 W(0xd0, 1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1)| /* df */
240 W(0xe0, 1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1)| /* ef */
241 W(0xf0, 1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,0) /* ff */
242 /* ------------------------------- */
243 /* 0 1 2 3 4 5 6 7 8 9 a b c d e f */
248 /* Skip legacy instruction prefixes. */
268 /* Skip REX instruction prefix. */
269 if ((*insn & 0xf0) == 0x40)
272 if (*insn == 0x0f) { /* Two-byte opcode. */
274 need_modrm = test_bit(*insn, twobyte_has_modrm);
275 } else { /* One-byte opcode. */
276 need_modrm = test_bit(*insn, onebyte_has_modrm);
281 if ((modrm & 0xc7) == 0x05) { /* %rip+disp32 addressing mode */
282 /* Displacement follows ModRM byte. */
283 return (s32 *) ++insn;
287 /* No %rip-relative addressing mode here. */
291 static void __kprobes arch_copy_kprobe(struct kprobe *p)
294 memcpy(p->ainsn.insn, p->addr, MAX_INSN_SIZE);
295 ripdisp = is_riprel(p->ainsn.insn);
298 * The copied instruction uses the %rip-relative
299 * addressing mode. Adjust the displacement for the
300 * difference between the original location of this
301 * instruction and the location of the copy that will
302 * actually be run. The tricky bit here is making sure
303 * that the sign extension happens correctly in this
304 * calculation, since we need a signed 32-bit result to
305 * be sign-extended to 64 bits when it's added to the
306 * %rip value and yield the same 64-bit result that the
307 * sign-extension of the original signed 32-bit
308 * displacement would have given.
310 s64 disp = (u8 *) p->addr + *ripdisp - (u8 *) p->ainsn.insn;
311 BUG_ON((s64) (s32) disp != disp); /* Sanity check. */
314 if (can_boost(p->addr)) {
315 p->ainsn.boostable = 0;
317 p->ainsn.boostable = -1;
319 p->opcode = *p->addr;
322 void __kprobes arch_arm_kprobe(struct kprobe *p)
324 text_poke(p->addr, ((unsigned char []){BREAKPOINT_INSTRUCTION}), 1);
327 void __kprobes arch_disarm_kprobe(struct kprobe *p)
329 text_poke(p->addr, &p->opcode, 1);
332 void __kprobes arch_remove_kprobe(struct kprobe *p)
334 mutex_lock(&kprobe_mutex);
335 free_insn_slot(p->ainsn.insn, (p->ainsn.boostable == 1));
336 mutex_unlock(&kprobe_mutex);
339 static void __kprobes save_previous_kprobe(struct kprobe_ctlblk *kcb)
341 kcb->prev_kprobe.kp = kprobe_running();
342 kcb->prev_kprobe.status = kcb->kprobe_status;
343 kcb->prev_kprobe.old_rflags = kcb->kprobe_old_rflags;
344 kcb->prev_kprobe.saved_rflags = kcb->kprobe_saved_rflags;
347 static void __kprobes restore_previous_kprobe(struct kprobe_ctlblk *kcb)
349 __get_cpu_var(current_kprobe) = kcb->prev_kprobe.kp;
350 kcb->kprobe_status = kcb->prev_kprobe.status;
351 kcb->kprobe_old_rflags = kcb->prev_kprobe.old_rflags;
352 kcb->kprobe_saved_rflags = kcb->prev_kprobe.saved_rflags;
355 static void __kprobes set_current_kprobe(struct kprobe *p, struct pt_regs *regs,
356 struct kprobe_ctlblk *kcb)
358 __get_cpu_var(current_kprobe) = p;
359 kcb->kprobe_saved_rflags = kcb->kprobe_old_rflags
360 = (regs->flags & (TF_MASK | IF_MASK));
361 if (is_IF_modifier(p->ainsn.insn))
362 kcb->kprobe_saved_rflags &= ~IF_MASK;
365 static __always_inline void clear_btf(void)
367 if (test_thread_flag(TIF_DEBUGCTLMSR))
368 wrmsrl(MSR_IA32_DEBUGCTLMSR, 0);
371 static __always_inline void restore_btf(void)
373 if (test_thread_flag(TIF_DEBUGCTLMSR))
374 wrmsrl(MSR_IA32_DEBUGCTLMSR, current->thread.debugctlmsr);
377 static void __kprobes prepare_singlestep(struct kprobe *p, struct pt_regs *regs)
380 regs->flags |= TF_MASK;
381 regs->flags &= ~IF_MASK;
382 /*single step inline if the instruction is an int3*/
383 if (p->opcode == BREAKPOINT_INSTRUCTION)
384 regs->ip = (unsigned long)p->addr;
386 regs->ip = (unsigned long)p->ainsn.insn;
389 /* Called with kretprobe_lock held */
390 void __kprobes arch_prepare_kretprobe(struct kretprobe_instance *ri,
391 struct pt_regs *regs)
393 unsigned long *sara = (unsigned long *)regs->sp;
395 ri->ret_addr = (kprobe_opcode_t *) *sara;
396 /* Replace the return addr with trampoline addr */
397 *sara = (unsigned long) &kretprobe_trampoline;
400 int __kprobes kprobe_handler(struct pt_regs *regs)
404 kprobe_opcode_t *addr = (kprobe_opcode_t *)(regs->ip - sizeof(kprobe_opcode_t));
405 struct kprobe_ctlblk *kcb;
408 * We don't want to be preempted for the entire
409 * duration of kprobe processing
412 kcb = get_kprobe_ctlblk();
414 /* Check we're not actually recursing */
415 if (kprobe_running()) {
416 p = get_kprobe(addr);
418 if (kcb->kprobe_status == KPROBE_HIT_SS &&
419 *p->ainsn.insn == BREAKPOINT_INSTRUCTION) {
420 regs->flags &= ~TF_MASK;
421 regs->flags |= kcb->kprobe_saved_rflags;
423 } else if (kcb->kprobe_status == KPROBE_HIT_SSDONE) {
424 /* TODO: Provide re-entrancy from
425 * post_kprobes_handler() and avoid exception
426 * stack corruption while single-stepping on
427 * the instruction of the new probe.
429 arch_disarm_kprobe(p);
430 regs->ip = (unsigned long)p->addr;
431 reset_current_kprobe();
434 /* We have reentered the kprobe_handler(), since
435 * another probe was hit while within the
436 * handler. We here save the original kprobe
437 * variables and just single step on instruction
438 * of the new probe without calling any user
441 save_previous_kprobe(kcb);
442 set_current_kprobe(p, regs, kcb);
443 kprobes_inc_nmissed_count(p);
444 prepare_singlestep(p, regs);
445 kcb->kprobe_status = KPROBE_REENTER;
449 if (*addr != BREAKPOINT_INSTRUCTION) {
450 /* The breakpoint instruction was removed by
451 * another cpu right after we hit, no further
452 * handling of this interrupt is appropriate
454 regs->ip = (unsigned long)addr;
458 p = __get_cpu_var(current_kprobe);
459 if (p->break_handler && p->break_handler(p, regs)) {
466 p = get_kprobe(addr);
468 if (*addr != BREAKPOINT_INSTRUCTION) {
470 * The breakpoint instruction was removed right
471 * after we hit it. Another cpu has removed
472 * either a probepoint or a debugger breakpoint
473 * at this address. In either case, no further
474 * handling of this interrupt is appropriate.
475 * Back up over the (now missing) int3 and run
476 * the original instruction.
478 regs->ip = (unsigned long)addr;
481 /* Not one of ours: let kernel handle it */
485 set_current_kprobe(p, regs, kcb);
486 kcb->kprobe_status = KPROBE_HIT_ACTIVE;
488 if (p->pre_handler && p->pre_handler(p, regs))
489 /* handler has already set things up, so skip ss setup */
493 #if !defined(CONFIG_PREEMPT) || defined(CONFIG_PM)
494 if (p->ainsn.boostable == 1 && !p->post_handler) {
495 /* Boost up -- we can execute copied instructions directly */
496 reset_current_kprobe();
497 regs->ip = (unsigned long)p->ainsn.insn;
498 preempt_enable_no_resched();
502 prepare_singlestep(p, regs);
503 kcb->kprobe_status = KPROBE_HIT_SS;
507 preempt_enable_no_resched();
512 * When a retprobed function returns, this code saves registers and
513 * calls trampoline_handler() runs, which calls the kretprobe's handler.
515 void __kprobes kretprobe_trampoline_holder(void)
517 asm volatile ( ".global kretprobe_trampoline\n"
518 "kretprobe_trampoline: \n"
519 /* We don't bother saving the ss register */
523 * Skip cs, ip, orig_ax.
524 * trampoline_handler() will plug in these values
543 " call trampoline_handler\n"
544 /* Replace saved sp with true return address. */
545 " movq %rax, 152(%rsp)\n"
561 /* Skip orig_ax, ip, cs */
568 * Called from kretprobe_trampoline
570 fastcall void * __kprobes trampoline_handler(struct pt_regs *regs)
572 struct kretprobe_instance *ri = NULL;
573 struct hlist_head *head, empty_rp;
574 struct hlist_node *node, *tmp;
575 unsigned long flags, orig_ret_address = 0;
576 unsigned long trampoline_address =(unsigned long)&kretprobe_trampoline;
578 INIT_HLIST_HEAD(&empty_rp);
579 spin_lock_irqsave(&kretprobe_lock, flags);
580 head = kretprobe_inst_table_head(current);
582 regs->cs = __KERNEL_CS;
583 regs->ip = trampoline_address;
584 regs->orig_ax = 0xffffffffffffffff;
587 * It is possible to have multiple instances associated with a given
588 * task either because an multiple functions in the call path
589 * have a return probe installed on them, and/or more then one return
590 * return probe was registered for a target function.
592 * We can handle this because:
593 * - instances are always inserted at the head of the list
594 * - when multiple return probes are registered for the same
595 * function, the first instance's ret_addr will point to the
596 * real return address, and all the rest will point to
597 * kretprobe_trampoline
599 hlist_for_each_entry_safe(ri, node, tmp, head, hlist) {
600 if (ri->task != current)
601 /* another task is sharing our hash bucket */
604 if (ri->rp && ri->rp->handler) {
605 __get_cpu_var(current_kprobe) = &ri->rp->kp;
606 get_kprobe_ctlblk()->kprobe_status = KPROBE_HIT_ACTIVE;
607 ri->rp->handler(ri, regs);
608 __get_cpu_var(current_kprobe) = NULL;
611 orig_ret_address = (unsigned long)ri->ret_addr;
612 recycle_rp_inst(ri, &empty_rp);
614 if (orig_ret_address != trampoline_address)
616 * This is the real return address. Any other
617 * instances associated with this task are for
618 * other calls deeper on the call stack
623 kretprobe_assert(ri, orig_ret_address, trampoline_address);
625 spin_unlock_irqrestore(&kretprobe_lock, flags);
627 hlist_for_each_entry_safe(ri, node, tmp, &empty_rp, hlist) {
628 hlist_del(&ri->hlist);
631 return (void *)orig_ret_address;
635 * Called after single-stepping. p->addr is the address of the
636 * instruction whose first byte has been replaced by the "int 3"
637 * instruction. To avoid the SMP problems that can occur when we
638 * temporarily put back the original opcode to single-step, we
639 * single-stepped a copy of the instruction. The address of this
640 * copy is p->ainsn.insn.
642 * This function prepares to return from the post-single-step
643 * interrupt. We have to fix up the stack as follows:
645 * 0) Except in the case of absolute or indirect jump or call instructions,
646 * the new ip is relative to the copied instruction. We need to make
647 * it relative to the original instruction.
649 * 1) If the single-stepped instruction was pushfl, then the TF and IF
650 * flags are set in the just-pushed flags, and may need to be cleared.
652 * 2) If the single-stepped instruction was a call, the return address
653 * that is atop the stack is the address following the copied instruction.
654 * We need to make it the address following the original instruction.
656 * If this is the first time we've single-stepped the instruction at
657 * this probepoint, and the instruction is boostable, boost it: add a
658 * jump instruction after the copied instruction, that jumps to the next
659 * instruction after the probepoint.
661 static void __kprobes resume_execution(struct kprobe *p,
662 struct pt_regs *regs, struct kprobe_ctlblk *kcb)
664 unsigned long *tos = (unsigned long *)regs->sp;
665 unsigned long copy_rip = (unsigned long)p->ainsn.insn;
666 unsigned long orig_rip = (unsigned long)p->addr;
667 kprobe_opcode_t *insn = p->ainsn.insn;
669 /*skip the REX prefix*/
670 if (*insn >= 0x40 && *insn <= 0x4f)
673 regs->flags &= ~TF_MASK;
675 case 0x9c: /* pushfl */
676 *tos &= ~(TF_MASK | IF_MASK);
677 *tos |= kcb->kprobe_old_rflags;
679 case 0xc2: /* iret/ret/lret */
684 case 0xea: /* jmp absolute -- ip is correct */
685 /* ip is already adjusted, no more changes required */
686 p->ainsn.boostable = 1;
688 case 0xe8: /* call relative - Fix return addr */
689 *tos = orig_rip + (*tos - copy_rip);
692 if ((insn[1] & 0x30) == 0x10) {
693 /* call absolute, indirect */
694 /* Fix return addr; ip is correct. */
696 *tos = orig_rip + (*tos - copy_rip);
698 } else if (((insn[1] & 0x31) == 0x20) || /* jmp near, absolute indirect */
699 ((insn[1] & 0x31) == 0x21)) { /* jmp far, absolute indirect */
700 /* ip is correct. And this is boostable */
701 p->ainsn.boostable = 1;
708 if (p->ainsn.boostable == 0) {
709 if ((regs->ip > copy_rip) &&
710 (regs->ip - copy_rip) + 5 < MAX_INSN_SIZE) {
712 * These instructions can be executed directly if it
713 * jumps back to correct address.
715 set_jmp_op((void *)regs->ip,
716 (void *)orig_rip + (regs->ip - copy_rip));
717 p->ainsn.boostable = 1;
719 p->ainsn.boostable = -1;
723 regs->ip = orig_rip + (regs->ip - copy_rip);
731 int __kprobes post_kprobe_handler(struct pt_regs *regs)
733 struct kprobe *cur = kprobe_running();
734 struct kprobe_ctlblk *kcb = get_kprobe_ctlblk();
739 if ((kcb->kprobe_status != KPROBE_REENTER) && cur->post_handler) {
740 kcb->kprobe_status = KPROBE_HIT_SSDONE;
741 cur->post_handler(cur, regs, 0);
744 resume_execution(cur, regs, kcb);
745 regs->flags |= kcb->kprobe_saved_rflags;
746 trace_hardirqs_fixup_flags(regs->flags);
748 /* Restore the original saved kprobes variables and continue. */
749 if (kcb->kprobe_status == KPROBE_REENTER) {
750 restore_previous_kprobe(kcb);
753 reset_current_kprobe();
755 preempt_enable_no_resched();
758 * if somebody else is singlestepping across a probe point, flags
759 * will have TF set, in which case, continue the remaining processing
760 * of do_debug, as if this is not a probe hit.
762 if (regs->flags & TF_MASK)
768 int __kprobes kprobe_fault_handler(struct pt_regs *regs, int trapnr)
770 struct kprobe *cur = kprobe_running();
771 struct kprobe_ctlblk *kcb = get_kprobe_ctlblk();
772 const struct exception_table_entry *fixup;
774 switch(kcb->kprobe_status) {
778 * We are here because the instruction being single
779 * stepped caused a page fault. We reset the current
780 * kprobe and the ip points back to the probe address
781 * and allow the page fault handler to continue as a
784 regs->ip = (unsigned long)cur->addr;
785 regs->flags |= kcb->kprobe_old_rflags;
786 if (kcb->kprobe_status == KPROBE_REENTER)
787 restore_previous_kprobe(kcb);
789 reset_current_kprobe();
790 preempt_enable_no_resched();
792 case KPROBE_HIT_ACTIVE:
793 case KPROBE_HIT_SSDONE:
795 * We increment the nmissed count for accounting,
796 * we can also use npre/npostfault count for accouting
797 * these specific fault cases.
799 kprobes_inc_nmissed_count(cur);
802 * We come here because instructions in the pre/post
803 * handler caused the page_fault, this could happen
804 * if handler tries to access user space by
805 * copy_from_user(), get_user() etc. Let the
806 * user-specified handler try to fix it first.
808 if (cur->fault_handler && cur->fault_handler(cur, regs, trapnr))
812 * In case the user-specified fault handler returned
813 * zero, try to fix up.
815 fixup = search_exception_tables(regs->ip);
817 regs->ip = fixup->fixup;
822 * fixup() could not handle it,
823 * Let do_page_fault() fix it.
833 * Wrapper routine for handling exceptions.
835 int __kprobes kprobe_exceptions_notify(struct notifier_block *self,
836 unsigned long val, void *data)
838 struct die_args *args = (struct die_args *)data;
839 int ret = NOTIFY_DONE;
841 if (args->regs && user_mode(args->regs))
846 if (kprobe_handler(args->regs))
850 if (post_kprobe_handler(args->regs))
854 /* kprobe_running() needs smp_processor_id() */
856 if (kprobe_running() &&
857 kprobe_fault_handler(args->regs, args->trapnr))
867 int __kprobes setjmp_pre_handler(struct kprobe *p, struct pt_regs *regs)
869 struct jprobe *jp = container_of(p, struct jprobe, kp);
871 struct kprobe_ctlblk *kcb = get_kprobe_ctlblk();
873 kcb->jprobe_saved_regs = *regs;
874 kcb->jprobe_saved_rsp = (long *) regs->sp;
875 addr = (unsigned long)(kcb->jprobe_saved_rsp);
877 * As Linus pointed out, gcc assumes that the callee
878 * owns the argument space and could overwrite it, e.g.
879 * tailcall optimization. So, to be absolutely safe
880 * we also save and restore enough stack bytes to cover
883 memcpy(kcb->jprobes_stack, (kprobe_opcode_t *)addr,
884 MIN_STACK_SIZE(addr));
885 regs->flags &= ~IF_MASK;
886 trace_hardirqs_off();
887 regs->ip = (unsigned long)(jp->entry);
891 void __kprobes jprobe_return(void)
893 struct kprobe_ctlblk *kcb = get_kprobe_ctlblk();
895 asm volatile (" xchg %%rbx,%%rsp \n"
897 " .globl jprobe_return_end \n"
898 " jprobe_return_end: \n"
900 (kcb->jprobe_saved_rsp):"memory");
903 int __kprobes longjmp_break_handler(struct kprobe *p, struct pt_regs *regs)
905 struct kprobe_ctlblk *kcb = get_kprobe_ctlblk();
906 u8 *addr = (u8 *) (regs->ip - 1);
907 unsigned long stack_addr = (unsigned long)(kcb->jprobe_saved_rsp);
908 struct jprobe *jp = container_of(p, struct jprobe, kp);
910 if ((addr > (u8 *) jprobe_return) && (addr < (u8 *) jprobe_return_end)) {
911 if ((unsigned long *)regs->sp != kcb->jprobe_saved_rsp) {
912 struct pt_regs *saved_regs = &kcb->jprobe_saved_regs;
913 printk("current sp %p does not match saved sp %p\n",
914 (long *)regs->sp, kcb->jprobe_saved_rsp);
915 printk("Saved registers for jprobe %p\n", jp);
916 show_registers(saved_regs);
917 printk("Current registers\n");
918 show_registers(regs);
921 *regs = kcb->jprobe_saved_regs;
922 memcpy((kprobe_opcode_t *) stack_addr, kcb->jprobes_stack,
923 MIN_STACK_SIZE(stack_addr));
924 preempt_enable_no_resched();
930 int __init arch_init_kprobes(void)
935 int __kprobes arch_trampoline_kprobe(struct kprobe *p)