]> git.karo-electronics.de Git - karo-tx-linux.git/blob - arch/powerpc/kernel/optprobes.c
Merge remote-tracking branch 'regulator/topic/vctrl' into regulator-next
[karo-tx-linux.git] / arch / powerpc / kernel / optprobes.c
1 /*
2  * Code for Kernel probes Jump optimization.
3  *
4  * Copyright 2017, Anju T, IBM Corp.
5  *
6  * This program is free software; you can redistribute it and/or
7  * modify it under the terms of the GNU General Public License
8  * as published by the Free Software Foundation; either version
9  * 2 of the License, or (at your option) any later version.
10  */
11
12 #include <linux/kprobes.h>
13 #include <linux/jump_label.h>
14 #include <linux/types.h>
15 #include <linux/slab.h>
16 #include <linux/list.h>
17 #include <asm/kprobes.h>
18 #include <asm/ptrace.h>
19 #include <asm/cacheflush.h>
20 #include <asm/code-patching.h>
21 #include <asm/sstep.h>
22 #include <asm/ppc-opcode.h>
23
24 #define TMPL_CALL_HDLR_IDX      \
25         (optprobe_template_call_handler - optprobe_template_entry)
26 #define TMPL_EMULATE_IDX        \
27         (optprobe_template_call_emulate - optprobe_template_entry)
28 #define TMPL_RET_IDX            \
29         (optprobe_template_ret - optprobe_template_entry)
30 #define TMPL_OP_IDX             \
31         (optprobe_template_op_address - optprobe_template_entry)
32 #define TMPL_INSN_IDX           \
33         (optprobe_template_insn - optprobe_template_entry)
34 #define TMPL_END_IDX            \
35         (optprobe_template_end - optprobe_template_entry)
36
37 DEFINE_INSN_CACHE_OPS(ppc_optinsn);
38
39 static bool insn_page_in_use;
40
41 static void *__ppc_alloc_insn_page(void)
42 {
43         if (insn_page_in_use)
44                 return NULL;
45         insn_page_in_use = true;
46         return &optinsn_slot;
47 }
48
49 static void __ppc_free_insn_page(void *page __maybe_unused)
50 {
51         insn_page_in_use = false;
52 }
53
54 struct kprobe_insn_cache kprobe_ppc_optinsn_slots = {
55         .mutex = __MUTEX_INITIALIZER(kprobe_ppc_optinsn_slots.mutex),
56         .pages = LIST_HEAD_INIT(kprobe_ppc_optinsn_slots.pages),
57         /* insn_size initialized later */
58         .alloc = __ppc_alloc_insn_page,
59         .free = __ppc_free_insn_page,
60         .nr_garbage = 0,
61 };
62
63 /*
64  * Check if we can optimize this probe. Returns NIP post-emulation if this can
65  * be optimized and 0 otherwise.
66  */
67 static unsigned long can_optimize(struct kprobe *p)
68 {
69         struct pt_regs regs;
70         struct instruction_op op;
71         unsigned long nip = 0;
72
73         /*
74          * kprobe placed for kretprobe during boot time
75          * has a 'nop' instruction, which can be emulated.
76          * So further checks can be skipped.
77          */
78         if (p->addr == (kprobe_opcode_t *)&kretprobe_trampoline)
79                 return (unsigned long)p->addr + sizeof(kprobe_opcode_t);
80
81         /*
82          * We only support optimizing kernel addresses, but not
83          * module addresses.
84          *
85          * FIXME: Optimize kprobes placed in module addresses.
86          */
87         if (!is_kernel_addr((unsigned long)p->addr))
88                 return 0;
89
90         memset(&regs, 0, sizeof(struct pt_regs));
91         regs.nip = (unsigned long)p->addr;
92         regs.trap = 0x0;
93         regs.msr = MSR_KERNEL;
94
95         /*
96          * Kprobe placed in conditional branch instructions are
97          * not optimized, as we can't predict the nip prior with
98          * dummy pt_regs and can not ensure that the return branch
99          * from detour buffer falls in the range of address (i.e 32MB).
100          * A branch back from trampoline is set up in the detour buffer
101          * to the nip returned by the analyse_instr() here.
102          *
103          * Ensure that the instruction is not a conditional branch,
104          * and that can be emulated.
105          */
106         if (!is_conditional_branch(*p->ainsn.insn) &&
107                         analyse_instr(&op, &regs, *p->ainsn.insn))
108                 nip = regs.nip;
109
110         return nip;
111 }
112
113 static void optimized_callback(struct optimized_kprobe *op,
114                                struct pt_regs *regs)
115 {
116         struct kprobe_ctlblk *kcb = get_kprobe_ctlblk();
117         unsigned long flags;
118
119         /* This is possible if op is under delayed unoptimizing */
120         if (kprobe_disabled(&op->kp))
121                 return;
122
123         local_irq_save(flags);
124         hard_irq_disable();
125
126         if (kprobe_running()) {
127                 kprobes_inc_nmissed_count(&op->kp);
128         } else {
129                 __this_cpu_write(current_kprobe, &op->kp);
130                 regs->nip = (unsigned long)op->kp.addr;
131                 kcb->kprobe_status = KPROBE_HIT_ACTIVE;
132                 opt_pre_handler(&op->kp, regs);
133                 __this_cpu_write(current_kprobe, NULL);
134         }
135
136         /*
137          * No need for an explicit __hard_irq_enable() here.
138          * local_irq_restore() will re-enable interrupts,
139          * if they were hard disabled.
140          */
141         local_irq_restore(flags);
142 }
143 NOKPROBE_SYMBOL(optimized_callback);
144
145 void arch_remove_optimized_kprobe(struct optimized_kprobe *op)
146 {
147         if (op->optinsn.insn) {
148                 free_ppc_optinsn_slot(op->optinsn.insn, 1);
149                 op->optinsn.insn = NULL;
150         }
151 }
152
153 /*
154  * emulate_step() requires insn to be emulated as
155  * second parameter. Load register 'r4' with the
156  * instruction.
157  */
158 void patch_imm32_load_insns(unsigned int val, kprobe_opcode_t *addr)
159 {
160         /* addis r4,0,(insn)@h */
161         *addr++ = PPC_INST_ADDIS | ___PPC_RT(4) |
162                   ((val >> 16) & 0xffff);
163
164         /* ori r4,r4,(insn)@l */
165         *addr = PPC_INST_ORI | ___PPC_RA(4) | ___PPC_RS(4) |
166                 (val & 0xffff);
167 }
168
169 /*
170  * Generate instructions to load provided immediate 64-bit value
171  * to register 'r3' and patch these instructions at 'addr'.
172  */
173 void patch_imm64_load_insns(unsigned long val, kprobe_opcode_t *addr)
174 {
175         /* lis r3,(op)@highest */
176         *addr++ = PPC_INST_ADDIS | ___PPC_RT(3) |
177                   ((val >> 48) & 0xffff);
178
179         /* ori r3,r3,(op)@higher */
180         *addr++ = PPC_INST_ORI | ___PPC_RA(3) | ___PPC_RS(3) |
181                   ((val >> 32) & 0xffff);
182
183         /* rldicr r3,r3,32,31 */
184         *addr++ = PPC_INST_RLDICR | ___PPC_RA(3) | ___PPC_RS(3) |
185                   __PPC_SH64(32) | __PPC_ME64(31);
186
187         /* oris r3,r3,(op)@h */
188         *addr++ = PPC_INST_ORIS | ___PPC_RA(3) | ___PPC_RS(3) |
189                   ((val >> 16) & 0xffff);
190
191         /* ori r3,r3,(op)@l */
192         *addr = PPC_INST_ORI | ___PPC_RA(3) | ___PPC_RS(3) |
193                 (val & 0xffff);
194 }
195
196 int arch_prepare_optimized_kprobe(struct optimized_kprobe *op, struct kprobe *p)
197 {
198         kprobe_opcode_t *buff, branch_op_callback, branch_emulate_step;
199         kprobe_opcode_t *op_callback_addr, *emulate_step_addr;
200         long b_offset;
201         unsigned long nip;
202
203         kprobe_ppc_optinsn_slots.insn_size = MAX_OPTINSN_SIZE;
204
205         nip = can_optimize(p);
206         if (!nip)
207                 return -EILSEQ;
208
209         /* Allocate instruction slot for detour buffer */
210         buff = get_ppc_optinsn_slot();
211         if (!buff)
212                 return -ENOMEM;
213
214         /*
215          * OPTPROBE uses 'b' instruction to branch to optinsn.insn.
216          *
217          * The target address has to be relatively nearby, to permit use
218          * of branch instruction in powerpc, because the address is specified
219          * in an immediate field in the instruction opcode itself, ie 24 bits
220          * in the opcode specify the address. Therefore the address should
221          * be within 32MB on either side of the current instruction.
222          */
223         b_offset = (unsigned long)buff - (unsigned long)p->addr;
224         if (!is_offset_in_branch_range(b_offset))
225                 goto error;
226
227         /* Check if the return address is also within 32MB range */
228         b_offset = (unsigned long)(buff + TMPL_RET_IDX) -
229                         (unsigned long)nip;
230         if (!is_offset_in_branch_range(b_offset))
231                 goto error;
232
233         /* Setup template */
234         memcpy(buff, optprobe_template_entry,
235                         TMPL_END_IDX * sizeof(kprobe_opcode_t));
236
237         /*
238          * Fixup the template with instructions to:
239          * 1. load the address of the actual probepoint
240          */
241         patch_imm64_load_insns((unsigned long)op, buff + TMPL_OP_IDX);
242
243         /*
244          * 2. branch to optimized_callback() and emulate_step()
245          */
246         kprobe_lookup_name("optimized_callback", op_callback_addr);
247         kprobe_lookup_name("emulate_step", emulate_step_addr);
248         if (!op_callback_addr || !emulate_step_addr) {
249                 WARN(1, "kprobe_lookup_name() failed\n");
250                 goto error;
251         }
252
253         branch_op_callback = create_branch((unsigned int *)buff + TMPL_CALL_HDLR_IDX,
254                                 (unsigned long)op_callback_addr,
255                                 BRANCH_SET_LINK);
256
257         branch_emulate_step = create_branch((unsigned int *)buff + TMPL_EMULATE_IDX,
258                                 (unsigned long)emulate_step_addr,
259                                 BRANCH_SET_LINK);
260
261         if (!branch_op_callback || !branch_emulate_step)
262                 goto error;
263
264         buff[TMPL_CALL_HDLR_IDX] = branch_op_callback;
265         buff[TMPL_EMULATE_IDX] = branch_emulate_step;
266
267         /*
268          * 3. load instruction to be emulated into relevant register, and
269          */
270         patch_imm32_load_insns(*p->ainsn.insn, buff + TMPL_INSN_IDX);
271
272         /*
273          * 4. branch back from trampoline
274          */
275         buff[TMPL_RET_IDX] = create_branch((unsigned int *)buff + TMPL_RET_IDX,
276                                 (unsigned long)nip, 0);
277
278         flush_icache_range((unsigned long)buff,
279                            (unsigned long)(&buff[TMPL_END_IDX]));
280
281         op->optinsn.insn = buff;
282
283         return 0;
284
285 error:
286         free_ppc_optinsn_slot(buff, 0);
287         return -ERANGE;
288
289 }
290
291 int arch_prepared_optinsn(struct arch_optimized_insn *optinsn)
292 {
293         return optinsn->insn != NULL;
294 }
295
296 /*
297  * On powerpc, Optprobes always replaces one instruction (4 bytes
298  * aligned and 4 bytes long). It is impossible to encounter another
299  * kprobe in this address range. So always return 0.
300  */
301 int arch_check_optimized_kprobe(struct optimized_kprobe *op)
302 {
303         return 0;
304 }
305
306 void arch_optimize_kprobes(struct list_head *oplist)
307 {
308         struct optimized_kprobe *op;
309         struct optimized_kprobe *tmp;
310
311         list_for_each_entry_safe(op, tmp, oplist, list) {
312                 /*
313                  * Backup instructions which will be replaced
314                  * by jump address
315                  */
316                 memcpy(op->optinsn.copied_insn, op->kp.addr,
317                                                RELATIVEJUMP_SIZE);
318                 patch_instruction(op->kp.addr,
319                         create_branch((unsigned int *)op->kp.addr,
320                                       (unsigned long)op->optinsn.insn, 0));
321                 list_del_init(&op->list);
322         }
323 }
324
325 void arch_unoptimize_kprobe(struct optimized_kprobe *op)
326 {
327         arch_arm_kprobe(&op->kp);
328 }
329
330 void arch_unoptimize_kprobes(struct list_head *oplist,
331                              struct list_head *done_list)
332 {
333         struct optimized_kprobe *op;
334         struct optimized_kprobe *tmp;
335
336         list_for_each_entry_safe(op, tmp, oplist, list) {
337                 arch_unoptimize_kprobe(op);
338                 list_move(&op->list, done_list);
339         }
340 }
341
342 int arch_within_optimized_kprobe(struct optimized_kprobe *op,
343                                  unsigned long addr)
344 {
345         return ((unsigned long)op->kp.addr <= addr &&
346                 (unsigned long)op->kp.addr + RELATIVEJUMP_SIZE > addr);
347 }