2 * Code for Kernel probes Jump optimization.
4 * Copyright 2017, Anju T, IBM Corp.
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License
8 * as published by the Free Software Foundation; either version
9 * 2 of the License, or (at your option) any later version.
12 #include <linux/kprobes.h>
13 #include <linux/jump_label.h>
14 #include <linux/types.h>
15 #include <linux/slab.h>
16 #include <linux/list.h>
17 #include <asm/kprobes.h>
18 #include <asm/ptrace.h>
19 #include <asm/cacheflush.h>
20 #include <asm/code-patching.h>
21 #include <asm/sstep.h>
22 #include <asm/ppc-opcode.h>
24 #define TMPL_CALL_HDLR_IDX \
25 (optprobe_template_call_handler - optprobe_template_entry)
26 #define TMPL_EMULATE_IDX \
27 (optprobe_template_call_emulate - optprobe_template_entry)
28 #define TMPL_RET_IDX \
29 (optprobe_template_ret - optprobe_template_entry)
31 (optprobe_template_op_address - optprobe_template_entry)
32 #define TMPL_INSN_IDX \
33 (optprobe_template_insn - optprobe_template_entry)
34 #define TMPL_END_IDX \
35 (optprobe_template_end - optprobe_template_entry)
37 DEFINE_INSN_CACHE_OPS(ppc_optinsn);
39 static bool insn_page_in_use;
41 static void *__ppc_alloc_insn_page(void)
45 insn_page_in_use = true;
49 static void __ppc_free_insn_page(void *page __maybe_unused)
51 insn_page_in_use = false;
54 struct kprobe_insn_cache kprobe_ppc_optinsn_slots = {
55 .mutex = __MUTEX_INITIALIZER(kprobe_ppc_optinsn_slots.mutex),
56 .pages = LIST_HEAD_INIT(kprobe_ppc_optinsn_slots.pages),
57 /* insn_size initialized later */
58 .alloc = __ppc_alloc_insn_page,
59 .free = __ppc_free_insn_page,
64 * Check if we can optimize this probe. Returns NIP post-emulation if this can
65 * be optimized and 0 otherwise.
67 static unsigned long can_optimize(struct kprobe *p)
70 struct instruction_op op;
71 unsigned long nip = 0;
74 * kprobe placed for kretprobe during boot time
75 * has a 'nop' instruction, which can be emulated.
76 * So further checks can be skipped.
78 if (p->addr == (kprobe_opcode_t *)&kretprobe_trampoline)
79 return (unsigned long)p->addr + sizeof(kprobe_opcode_t);
82 * We only support optimizing kernel addresses, but not
85 * FIXME: Optimize kprobes placed in module addresses.
87 if (!is_kernel_addr((unsigned long)p->addr))
90 memset(®s, 0, sizeof(struct pt_regs));
91 regs.nip = (unsigned long)p->addr;
93 regs.msr = MSR_KERNEL;
96 * Kprobe placed in conditional branch instructions are
97 * not optimized, as we can't predict the nip prior with
98 * dummy pt_regs and can not ensure that the return branch
99 * from detour buffer falls in the range of address (i.e 32MB).
100 * A branch back from trampoline is set up in the detour buffer
101 * to the nip returned by the analyse_instr() here.
103 * Ensure that the instruction is not a conditional branch,
104 * and that can be emulated.
106 if (!is_conditional_branch(*p->ainsn.insn) &&
107 analyse_instr(&op, ®s, *p->ainsn.insn))
113 static void optimized_callback(struct optimized_kprobe *op,
114 struct pt_regs *regs)
116 struct kprobe_ctlblk *kcb = get_kprobe_ctlblk();
119 /* This is possible if op is under delayed unoptimizing */
120 if (kprobe_disabled(&op->kp))
123 local_irq_save(flags);
126 if (kprobe_running()) {
127 kprobes_inc_nmissed_count(&op->kp);
129 __this_cpu_write(current_kprobe, &op->kp);
130 regs->nip = (unsigned long)op->kp.addr;
131 kcb->kprobe_status = KPROBE_HIT_ACTIVE;
132 opt_pre_handler(&op->kp, regs);
133 __this_cpu_write(current_kprobe, NULL);
137 * No need for an explicit __hard_irq_enable() here.
138 * local_irq_restore() will re-enable interrupts,
139 * if they were hard disabled.
141 local_irq_restore(flags);
143 NOKPROBE_SYMBOL(optimized_callback);
145 void arch_remove_optimized_kprobe(struct optimized_kprobe *op)
147 if (op->optinsn.insn) {
148 free_ppc_optinsn_slot(op->optinsn.insn, 1);
149 op->optinsn.insn = NULL;
154 * emulate_step() requires insn to be emulated as
155 * second parameter. Load register 'r4' with the
158 void patch_imm32_load_insns(unsigned int val, kprobe_opcode_t *addr)
160 /* addis r4,0,(insn)@h */
161 *addr++ = PPC_INST_ADDIS | ___PPC_RT(4) |
162 ((val >> 16) & 0xffff);
164 /* ori r4,r4,(insn)@l */
165 *addr = PPC_INST_ORI | ___PPC_RA(4) | ___PPC_RS(4) |
170 * Generate instructions to load provided immediate 64-bit value
171 * to register 'r3' and patch these instructions at 'addr'.
173 void patch_imm64_load_insns(unsigned long val, kprobe_opcode_t *addr)
175 /* lis r3,(op)@highest */
176 *addr++ = PPC_INST_ADDIS | ___PPC_RT(3) |
177 ((val >> 48) & 0xffff);
179 /* ori r3,r3,(op)@higher */
180 *addr++ = PPC_INST_ORI | ___PPC_RA(3) | ___PPC_RS(3) |
181 ((val >> 32) & 0xffff);
183 /* rldicr r3,r3,32,31 */
184 *addr++ = PPC_INST_RLDICR | ___PPC_RA(3) | ___PPC_RS(3) |
185 __PPC_SH64(32) | __PPC_ME64(31);
187 /* oris r3,r3,(op)@h */
188 *addr++ = PPC_INST_ORIS | ___PPC_RA(3) | ___PPC_RS(3) |
189 ((val >> 16) & 0xffff);
191 /* ori r3,r3,(op)@l */
192 *addr = PPC_INST_ORI | ___PPC_RA(3) | ___PPC_RS(3) |
196 int arch_prepare_optimized_kprobe(struct optimized_kprobe *op, struct kprobe *p)
198 kprobe_opcode_t *buff, branch_op_callback, branch_emulate_step;
199 kprobe_opcode_t *op_callback_addr, *emulate_step_addr;
203 kprobe_ppc_optinsn_slots.insn_size = MAX_OPTINSN_SIZE;
205 nip = can_optimize(p);
209 /* Allocate instruction slot for detour buffer */
210 buff = get_ppc_optinsn_slot();
215 * OPTPROBE uses 'b' instruction to branch to optinsn.insn.
217 * The target address has to be relatively nearby, to permit use
218 * of branch instruction in powerpc, because the address is specified
219 * in an immediate field in the instruction opcode itself, ie 24 bits
220 * in the opcode specify the address. Therefore the address should
221 * be within 32MB on either side of the current instruction.
223 b_offset = (unsigned long)buff - (unsigned long)p->addr;
224 if (!is_offset_in_branch_range(b_offset))
227 /* Check if the return address is also within 32MB range */
228 b_offset = (unsigned long)(buff + TMPL_RET_IDX) -
230 if (!is_offset_in_branch_range(b_offset))
234 memcpy(buff, optprobe_template_entry,
235 TMPL_END_IDX * sizeof(kprobe_opcode_t));
238 * Fixup the template with instructions to:
239 * 1. load the address of the actual probepoint
241 patch_imm64_load_insns((unsigned long)op, buff + TMPL_OP_IDX);
244 * 2. branch to optimized_callback() and emulate_step()
246 kprobe_lookup_name("optimized_callback", op_callback_addr);
247 kprobe_lookup_name("emulate_step", emulate_step_addr);
248 if (!op_callback_addr || !emulate_step_addr) {
249 WARN(1, "kprobe_lookup_name() failed\n");
253 branch_op_callback = create_branch((unsigned int *)buff + TMPL_CALL_HDLR_IDX,
254 (unsigned long)op_callback_addr,
257 branch_emulate_step = create_branch((unsigned int *)buff + TMPL_EMULATE_IDX,
258 (unsigned long)emulate_step_addr,
261 if (!branch_op_callback || !branch_emulate_step)
264 buff[TMPL_CALL_HDLR_IDX] = branch_op_callback;
265 buff[TMPL_EMULATE_IDX] = branch_emulate_step;
268 * 3. load instruction to be emulated into relevant register, and
270 patch_imm32_load_insns(*p->ainsn.insn, buff + TMPL_INSN_IDX);
273 * 4. branch back from trampoline
275 buff[TMPL_RET_IDX] = create_branch((unsigned int *)buff + TMPL_RET_IDX,
276 (unsigned long)nip, 0);
278 flush_icache_range((unsigned long)buff,
279 (unsigned long)(&buff[TMPL_END_IDX]));
281 op->optinsn.insn = buff;
286 free_ppc_optinsn_slot(buff, 0);
291 int arch_prepared_optinsn(struct arch_optimized_insn *optinsn)
293 return optinsn->insn != NULL;
297 * On powerpc, Optprobes always replaces one instruction (4 bytes
298 * aligned and 4 bytes long). It is impossible to encounter another
299 * kprobe in this address range. So always return 0.
301 int arch_check_optimized_kprobe(struct optimized_kprobe *op)
306 void arch_optimize_kprobes(struct list_head *oplist)
308 struct optimized_kprobe *op;
309 struct optimized_kprobe *tmp;
311 list_for_each_entry_safe(op, tmp, oplist, list) {
313 * Backup instructions which will be replaced
316 memcpy(op->optinsn.copied_insn, op->kp.addr,
318 patch_instruction(op->kp.addr,
319 create_branch((unsigned int *)op->kp.addr,
320 (unsigned long)op->optinsn.insn, 0));
321 list_del_init(&op->list);
325 void arch_unoptimize_kprobe(struct optimized_kprobe *op)
327 arch_arm_kprobe(&op->kp);
330 void arch_unoptimize_kprobes(struct list_head *oplist,
331 struct list_head *done_list)
333 struct optimized_kprobe *op;
334 struct optimized_kprobe *tmp;
336 list_for_each_entry_safe(op, tmp, oplist, list) {
337 arch_unoptimize_kprobe(op);
338 list_move(&op->list, done_list);
342 int arch_within_optimized_kprobe(struct optimized_kprobe *op,
345 return ((unsigned long)op->kp.addr <= addr &&
346 (unsigned long)op->kp.addr + RELATIVEJUMP_SIZE > addr);