2 * Copyright 2010 Tilera Corporation. All Rights Reserved.
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License
6 * as published by the Free Software Foundation, version 2.
8 * This program is distributed in the hope that it will be useful, but
9 * WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
11 * NON INFRINGEMENT. See the GNU General Public License for
14 * A code-rewriter that enables instruction single-stepping.
15 * Derived from iLib's single-stepping code.
18 #ifndef __tilegx__ /* No support for single-step yet. */
20 /* These functions are only used on the TILE platform */
21 #include <linux/slab.h>
22 #include <linux/thread_info.h>
23 #include <linux/uaccess.h>
24 #include <linux/mman.h>
25 #include <linux/types.h>
26 #include <linux/err.h>
27 #include <asm/cacheflush.h>
28 #include <asm/opcode-tile.h>
29 #include <asm/opcode_constants.h>
32 #define signExtend17(val) sign_extend((val), 17)
33 #define TILE_X1_MASK (0xffffffffULL << 31)
37 static int __init setup_unaligned_printk(char *str)
40 if (strict_strtol(str, 0, &val) != 0)
42 unaligned_printk = val;
43 pr_info("Printk for each unaligned data accesses is %s\n",
44 unaligned_printk ? "enabled" : "disabled");
47 __setup("unaligned_printk=", setup_unaligned_printk);
49 unsigned int unaligned_fixup_count;
59 static inline tile_bundle_bits set_BrOff_X1(tile_bundle_bits n, int32_t offset)
61 tile_bundle_bits result;
63 /* mask out the old offset */
64 tile_bundle_bits mask = create_BrOff_X1(-1);
67 /* or in the new offset */
68 result |= create_BrOff_X1(offset);
73 static inline tile_bundle_bits move_X1(tile_bundle_bits n, int dest, int src)
75 tile_bundle_bits result;
78 result = n & (~TILE_X1_MASK);
80 op = create_Opcode_X1(SPECIAL_0_OPCODE_X1) |
81 create_RRROpcodeExtension_X1(OR_SPECIAL_0_OPCODE_X1) |
82 create_Dest_X1(dest) |
83 create_SrcB_X1(TREG_ZERO) |
90 static inline tile_bundle_bits nop_X1(tile_bundle_bits n)
92 return move_X1(n, TREG_ZERO, TREG_ZERO);
95 static inline tile_bundle_bits addi_X1(
96 tile_bundle_bits n, int dest, int src, int imm)
100 n |= (create_SrcA_X1(src) |
101 create_Dest_X1(dest) |
102 create_Imm8_X1(imm) |
104 create_Opcode_X1(IMM_0_OPCODE_X1) |
105 create_ImmOpcodeExtension_X1(ADDI_IMM_0_OPCODE_X1));
110 static tile_bundle_bits rewrite_load_store_unaligned(
111 struct single_step_state *state,
112 tile_bundle_bits bundle,
113 struct pt_regs *regs,
115 int size, int sign_ext)
117 unsigned char __user *addr;
118 int val_reg, addr_reg, err, val;
120 /* Get address and value registers */
121 if (bundle & TILE_BUNDLE_Y_ENCODING_MASK) {
122 addr_reg = get_SrcA_Y2(bundle);
123 val_reg = get_SrcBDest_Y2(bundle);
124 } else if (mem_op == MEMOP_LOAD || mem_op == MEMOP_LOAD_POSTINCR) {
125 addr_reg = get_SrcA_X1(bundle);
126 val_reg = get_Dest_X1(bundle);
128 addr_reg = get_SrcA_X1(bundle);
129 val_reg = get_SrcB_X1(bundle);
133 * If registers are not GPRs, don't try to handle it.
135 * FIXME: we could handle non-GPR loads by getting the real value
136 * from memory, writing it to the single step buffer, using a
137 * temp_reg to hold a pointer to that memory, then executing that
138 * instruction and resetting temp_reg. For non-GPR stores, it's a
139 * little trickier; we could use the single step buffer for that
140 * too, but we'd have to add some more state bits so that we could
141 * call back in here to copy that value to the real target. For
142 * now, we just handle the simple case.
144 if ((val_reg >= PTREGS_NR_GPRS &&
145 (val_reg != TREG_ZERO ||
146 mem_op == MEMOP_LOAD ||
147 mem_op == MEMOP_LOAD_POSTINCR)) ||
148 addr_reg >= PTREGS_NR_GPRS)
151 /* If it's aligned, don't handle it specially */
152 addr = (void __user *)regs->regs[addr_reg];
153 if (((unsigned long)addr % size) == 0)
156 #ifndef __LITTLE_ENDIAN
157 # error We assume little-endian representation with copy_xx_user size 2 here
159 /* Handle unaligned load/store */
160 if (mem_op == MEMOP_LOAD || mem_op == MEMOP_LOAD_POSTINCR) {
161 unsigned short val_16;
164 err = copy_from_user(&val_16, addr, sizeof(val_16));
165 val = sign_ext ? ((short)val_16) : val_16;
168 err = copy_from_user(&val, addr, sizeof(val));
174 state->update_reg = val_reg;
175 state->update_value = val;
179 val = (val_reg == TREG_ZERO) ? 0 : regs->regs[val_reg];
180 err = copy_to_user(addr, &val, size);
186 .si_code = SEGV_MAPERR,
189 force_sig_info(info.si_signo, &info, current);
190 return (tile_bundle_bits) 0;
193 if (unaligned_fixup == 0) {
196 .si_code = BUS_ADRALN,
199 force_sig_info(info.si_signo, &info, current);
200 return (tile_bundle_bits) 0;
203 if (unaligned_printk || unaligned_fixup_count == 0) {
204 pr_info("Process %d/%s: PC %#lx: Fixup of"
205 " unaligned %s at %#lx.\n",
206 current->pid, current->comm, regs->pc,
207 (mem_op == MEMOP_LOAD ||
208 mem_op == MEMOP_LOAD_POSTINCR) ?
210 (unsigned long)addr);
211 if (!unaligned_printk) {
214 P("Unaligned fixups in the kernel will slow your application considerably.\n");
215 P("To find them, write a \"1\" to /proc/sys/tile/unaligned_fixup/printk,\n");
216 P("which requests the kernel show all unaligned fixups, or write a \"0\"\n");
217 P("to /proc/sys/tile/unaligned_fixup/enabled, in which case each unaligned\n");
218 P("access will become a SIGBUS you can debug. No further warnings will be\n");
219 P("shown so as to avoid additional slowdown, but you can track the number\n");
220 P("of fixups performed via /proc/sys/tile/unaligned_fixup/count.\n");
221 P("Use the tile-addr2line command (see \"info addr2line\") to decode PCs.\n");
226 ++unaligned_fixup_count;
228 if (bundle & TILE_BUNDLE_Y_ENCODING_MASK) {
229 /* Convert the Y2 instruction to a prefetch. */
230 bundle &= ~(create_SrcBDest_Y2(-1) |
231 create_Opcode_Y2(-1));
232 bundle |= (create_SrcBDest_Y2(TREG_ZERO) |
233 create_Opcode_Y2(LW_OPCODE_Y2));
234 /* Replace the load postincr with an addi */
235 } else if (mem_op == MEMOP_LOAD_POSTINCR) {
236 bundle = addi_X1(bundle, addr_reg, addr_reg,
237 get_Imm8_X1(bundle));
238 /* Replace the store postincr with an addi */
239 } else if (mem_op == MEMOP_STORE_POSTINCR) {
240 bundle = addi_X1(bundle, addr_reg, addr_reg,
241 get_Dest_Imm8_X1(bundle));
243 /* Convert the X1 instruction to a nop. */
244 bundle &= ~(create_Opcode_X1(-1) |
245 create_UnShOpcodeExtension_X1(-1) |
246 create_UnOpcodeExtension_X1(-1));
247 bundle |= (create_Opcode_X1(SHUN_0_OPCODE_X1) |
248 create_UnShOpcodeExtension_X1(
249 UN_0_SHUN_0_OPCODE_X1) |
250 create_UnOpcodeExtension_X1(
251 NOP_UN_0_SHUN_0_OPCODE_X1));
258 * single_step_once() - entry point when single stepping has been triggered.
259 * @regs: The machine register state
261 * When we arrive at this routine via a trampoline, the single step
262 * engine copies the executing bundle to the single step buffer.
263 * If the instruction is a condition branch, then the target is
264 * reset to one past the next instruction. If the instruction
265 * sets the lr, then that is noted. If the instruction is a jump
266 * or call, then the new target pc is preserved and the current
267 * bundle instruction set to null.
269 * The necessary post-single-step rewriting information is stored in
270 * single_step_state-> We use data segment values because the
271 * stack will be rewound when we run the rewritten single-stepped
274 void single_step_once(struct pt_regs *regs)
276 extern tile_bundle_bits __single_step_ill_insn;
277 extern tile_bundle_bits __single_step_j_insn;
278 extern tile_bundle_bits __single_step_addli_insn;
279 extern tile_bundle_bits __single_step_auli_insn;
280 struct thread_info *info = (void *)current_thread_info();
281 struct single_step_state *state = info->step_state;
282 int is_single_step = test_ti_thread_flag(info, TIF_SINGLESTEP);
283 tile_bundle_bits __user *buffer, *pc;
284 tile_bundle_bits bundle;
286 int target_reg = TREG_LR;
288 enum mem_op mem_op = MEMOP_NONE;
289 int size = 0, sign_ext = 0; /* happy compiler */
292 " .pushsection .rodata.single_step\n"
294 " .globl __single_step_ill_insn\n"
295 "__single_step_ill_insn:\n"
297 " .globl __single_step_addli_insn\n"
298 "__single_step_addli_insn:\n"
299 " { nop; addli r0, zero, 0 }\n"
300 " .globl __single_step_auli_insn\n"
301 "__single_step_auli_insn:\n"
302 " { nop; auli r0, r0, 0 }\n"
303 " .globl __single_step_j_insn\n"
304 "__single_step_j_insn:\n"
310 /* allocate a page of writable, executable memory */
311 state = kmalloc(sizeof(struct single_step_state), GFP_KERNEL);
313 pr_err("Out of kernel memory trying to single-step\n");
317 /* allocate a cache line of writable, executable memory */
318 down_write(¤t->mm->mmap_sem);
319 buffer = (void __user *) do_mmap(NULL, 0, 64,
320 PROT_EXEC | PROT_READ | PROT_WRITE,
321 MAP_PRIVATE | MAP_ANONYMOUS,
323 up_write(¤t->mm->mmap_sem);
325 if (IS_ERR((void __force *)buffer)) {
327 pr_err("Out of kernel pages trying to single-step\n");
331 state->buffer = buffer;
332 state->is_enabled = 0;
334 info->step_state = state;
336 /* Validate our stored instruction patterns */
337 BUG_ON(get_Opcode_X1(__single_step_addli_insn) !=
339 BUG_ON(get_Opcode_X1(__single_step_auli_insn) !=
341 BUG_ON(get_SrcA_X1(__single_step_addli_insn) != TREG_ZERO);
342 BUG_ON(get_Dest_X1(__single_step_addli_insn) != 0);
343 BUG_ON(get_JOffLong_X1(__single_step_j_insn) != 0);
347 * If we are returning from a syscall, we still haven't hit the
348 * "ill" for the swint1 instruction. So back the PC up to be
349 * pointing at the swint1, but we'll actually return directly
350 * back to the "ill" so we come back in via SIGILL as if we
351 * had "executed" the swint1 without ever being in kernel space.
353 if (regs->faultnum == INT_SWINT_1)
356 pc = (tile_bundle_bits __user *)(regs->pc);
357 if (get_user(bundle, pc) != 0) {
358 pr_err("Couldn't read instruction at %p trying to step\n", pc);
362 /* We'll follow the instruction with 2 ill op bundles */
363 state->orig_pc = (unsigned long)pc;
364 state->next_pc = (unsigned long)(pc + 1);
365 state->branch_next_pc = 0;
368 if (!(bundle & TILE_BUNDLE_Y_ENCODING_MASK)) {
369 /* two wide, check for control flow */
370 int opcode = get_Opcode_X1(bundle);
374 case BRANCH_OPCODE_X1:
376 int32_t offset = signExtend17(get_BrOff_X1(bundle));
379 * For branches, we use a rewriting trick to let the
380 * hardware evaluate whether the branch is taken or
381 * untaken. We record the target offset and then
382 * rewrite the branch instruction to target 1 insn
383 * ahead if the branch is taken. We then follow the
384 * rewritten branch with two bundles, each containing
385 * an "ill" instruction. The supervisor examines the
386 * pc after the single step code is executed, and if
387 * the pc is the first ill instruction, then the
388 * branch (if any) was not taken. If the pc is the
389 * second ill instruction, then the branch was
390 * taken. The new pc is computed for these cases, and
391 * inserted into the registers for the thread. If
392 * the pc is the start of the single step code, then
393 * an exception or interrupt was taken before the
394 * code started processing, and the same "original"
395 * pc is restored. This change, different from the
396 * original implementation, has the advantage of
397 * executing a single user instruction.
399 state->branch_next_pc = (unsigned long)(pc + offset);
401 /* rewrite branch offset to go forward one bundle */
402 bundle = set_BrOff_X1(bundle, 2);
411 (unsigned long) (pc + get_JOffLong_X1(bundle));
417 (unsigned long) (pc + get_JOffLong_X1(bundle));
418 bundle = nop_X1(bundle);
421 case SPECIAL_0_OPCODE_X1:
422 switch (get_RRROpcodeExtension_X1(bundle)) {
424 case JALRP_SPECIAL_0_OPCODE_X1:
425 case JALR_SPECIAL_0_OPCODE_X1:
428 regs->regs[get_SrcA_X1(bundle)];
431 case JRP_SPECIAL_0_OPCODE_X1:
432 case JR_SPECIAL_0_OPCODE_X1:
434 regs->regs[get_SrcA_X1(bundle)];
435 bundle = nop_X1(bundle);
438 case LNK_SPECIAL_0_OPCODE_X1:
440 target_reg = get_Dest_X1(bundle);
444 case SH_SPECIAL_0_OPCODE_X1:
445 mem_op = MEMOP_STORE;
449 case SW_SPECIAL_0_OPCODE_X1:
450 mem_op = MEMOP_STORE;
457 case SHUN_0_OPCODE_X1:
458 if (get_UnShOpcodeExtension_X1(bundle) ==
459 UN_0_SHUN_0_OPCODE_X1) {
460 switch (get_UnOpcodeExtension_X1(bundle)) {
461 case LH_UN_0_SHUN_0_OPCODE_X1:
467 case LH_U_UN_0_SHUN_0_OPCODE_X1:
473 case LW_UN_0_SHUN_0_OPCODE_X1:
478 case IRET_UN_0_SHUN_0_OPCODE_X1:
480 unsigned long ex0_0 = __insn_mfspr(
482 unsigned long ex0_1 = __insn_mfspr(
485 * Special-case it if we're iret'ing
486 * to PL0 again. Otherwise just let
487 * it run and it will generate SIGILL.
489 if (EX1_PL(ex0_1) == USER_PL) {
490 state->next_pc = ex0_0;
492 bundle = nop_X1(bundle);
500 /* postincrement operations */
501 case IMM_0_OPCODE_X1:
502 switch (get_ImmOpcodeExtension_X1(bundle)) {
503 case LWADD_IMM_0_OPCODE_X1:
504 mem_op = MEMOP_LOAD_POSTINCR;
508 case LHADD_IMM_0_OPCODE_X1:
509 mem_op = MEMOP_LOAD_POSTINCR;
514 case LHADD_U_IMM_0_OPCODE_X1:
515 mem_op = MEMOP_LOAD_POSTINCR;
520 case SWADD_IMM_0_OPCODE_X1:
521 mem_op = MEMOP_STORE_POSTINCR;
525 case SHADD_IMM_0_OPCODE_X1:
526 mem_op = MEMOP_STORE_POSTINCR;
534 #endif /* CHIP_HAS_WH64() */
539 * Get an available register. We start with a
540 * bitmask with 1's for available registers.
541 * We truncate to the low 32 registers since
542 * we are guaranteed to have set bits in the
543 * low 32 bits, then use ctz to pick the first.
545 u32 mask = (u32) ~((1ULL << get_Dest_X0(bundle)) |
546 (1ULL << get_SrcA_X0(bundle)) |
547 (1ULL << get_SrcB_X0(bundle)) |
548 (1ULL << target_reg));
549 temp_reg = __builtin_ctz(mask);
550 state->update_reg = temp_reg;
551 state->update_value = regs->regs[temp_reg];
552 regs->regs[temp_reg] = (unsigned long) (pc+1);
553 regs->flags |= PT_FLAGS_RESTORE_REGS;
554 bundle = move_X1(bundle, target_reg, temp_reg);
557 int opcode = get_Opcode_Y2(bundle);
580 mem_op = MEMOP_STORE;
585 mem_op = MEMOP_STORE;
592 * Check if we need to rewrite an unaligned load/store.
593 * Returning zero is a special value meaning we need to SIGSEGV.
595 if (mem_op != MEMOP_NONE && unaligned_fixup >= 0) {
596 bundle = rewrite_load_store_unaligned(state, bundle, regs,
597 mem_op, size, sign_ext);
602 /* write the bundle to our execution area */
603 buffer = state->buffer;
604 err = __put_user(bundle, buffer++);
607 * If we're really single-stepping, we take an INT_ILL after.
608 * If we're just handling an unaligned access, we can just
609 * jump directly back to where we were in user code.
611 if (is_single_step) {
612 err |= __put_user(__single_step_ill_insn, buffer++);
613 err |= __put_user(__single_step_ill_insn, buffer++);
618 /* We have some state to update; do it inline */
620 bundle = __single_step_addli_insn;
621 bundle |= create_Dest_X1(state->update_reg);
622 bundle |= create_Imm16_X1(state->update_value);
623 err |= __put_user(bundle, buffer++);
624 bundle = __single_step_auli_insn;
625 bundle |= create_Dest_X1(state->update_reg);
626 bundle |= create_SrcA_X1(state->update_reg);
627 ha16 = (state->update_value + 0x8000) >> 16;
628 bundle |= create_Imm16_X1(ha16);
629 err |= __put_user(bundle, buffer++);
633 /* End with a jump back to the next instruction */
634 delta = ((regs->pc + TILE_BUNDLE_SIZE_IN_BYTES) -
635 (unsigned long)buffer) >>
636 TILE_LOG2_BUNDLE_ALIGNMENT_IN_BYTES;
637 bundle = __single_step_j_insn;
638 bundle |= create_JOffLong_X1(delta);
639 err |= __put_user(bundle, buffer++);
643 pr_err("Fault when writing to single-step buffer\n");
649 * We do a local flush only, since this is a thread-specific buffer.
651 __flush_icache_range((unsigned long)state->buffer,
652 (unsigned long)buffer);
654 /* Indicate enabled */
655 state->is_enabled = is_single_step;
656 regs->pc = (unsigned long)state->buffer;
658 /* Fault immediately if we are coming back from a syscall. */
659 if (regs->faultnum == INT_SWINT_1)
663 #endif /* !__tilegx__ */