2 * This file is subject to the terms and conditions of the GNU General Public
3 * License. See the file "COPYING" in the main directory of this archive
6 * Copyright (C) 1992 Ross Biro
7 * Copyright (C) Linus Torvalds
8 * Copyright (C) 1994, 95, 96, 97, 98, 2000 Ralf Baechle
9 * Copyright (C) 1996 David S. Miller
10 * Kevin D. Kissell, kevink@mips.com and Carsten Langgaard, carstenl@mips.com
11 * Copyright (C) 1999 MIPS Technologies, Inc.
12 * Copyright (C) 2000 Ulf Carlsson
14 * At this time Linux/MIPS64 only supports syscall tracing, even for 32-bit
17 #include <linux/compiler.h>
18 #include <linux/context_tracking.h>
19 #include <linux/elf.h>
20 #include <linux/kernel.h>
21 #include <linux/sched.h>
23 #include <linux/errno.h>
24 #include <linux/ptrace.h>
25 #include <linux/regset.h>
26 #include <linux/smp.h>
27 #include <linux/security.h>
28 #include <linux/tracehook.h>
29 #include <linux/audit.h>
30 #include <linux/seccomp.h>
31 #include <linux/ftrace.h>
33 #include <asm/byteorder.h>
35 #include <asm/cpu-info.h>
38 #include <asm/mipsregs.h>
39 #include <asm/mipsmtregs.h>
40 #include <asm/pgtable.h>
42 #include <asm/syscall.h>
43 #include <asm/uaccess.h>
44 #include <asm/bootinfo.h>
47 #define CREATE_TRACE_POINTS
48 #include <trace/events/syscalls.h>
50 static void init_fp_ctx(struct task_struct *target)
52 /* If FP has been used then the target already has context */
53 if (tsk_used_math(target))
56 /* Begin with data registers set to all 1s... */
57 memset(&target->thread.fpu.fpr, ~0, sizeof(target->thread.fpu.fpr));
59 /* ...and FCSR zeroed */
60 target->thread.fpu.fcr31 = 0;
63 * Record that the target has "used" math, such that the context
64 * just initialised, and any modifications made by the caller,
67 set_stopped_child_used_math(target);
71 * Called by kernel/ptrace.c when detaching..
73 * Make sure single step bits etc are not set.
75 void ptrace_disable(struct task_struct *child)
77 /* Don't load the watchpoint registers for the ex-child. */
78 clear_tsk_thread_flag(child, TIF_LOAD_WATCH);
82 * Read a general register set. We always use the 64-bit format, even
83 * for 32-bit kernels and for 32-bit processes on a 64-bit kernel.
84 * Registers are sign extended to fill the available space.
86 int ptrace_getregs(struct task_struct *child, struct user_pt_regs __user *data)
91 if (!access_ok(VERIFY_WRITE, data, 38 * 8))
94 regs = task_pt_regs(child);
96 for (i = 0; i < 32; i++)
97 __put_user((long)regs->regs[i], (__s64 __user *)&data->regs[i]);
98 __put_user((long)regs->lo, (__s64 __user *)&data->lo);
99 __put_user((long)regs->hi, (__s64 __user *)&data->hi);
100 __put_user((long)regs->cp0_epc, (__s64 __user *)&data->cp0_epc);
101 __put_user((long)regs->cp0_badvaddr, (__s64 __user *)&data->cp0_badvaddr);
102 __put_user((long)regs->cp0_status, (__s64 __user *)&data->cp0_status);
103 __put_user((long)regs->cp0_cause, (__s64 __user *)&data->cp0_cause);
109 * Write a general register set. As for PTRACE_GETREGS, we always use
110 * the 64-bit format. On a 32-bit kernel only the lower order half
111 * (according to endianness) will be used.
113 int ptrace_setregs(struct task_struct *child, struct user_pt_regs __user *data)
115 struct pt_regs *regs;
118 if (!access_ok(VERIFY_READ, data, 38 * 8))
121 regs = task_pt_regs(child);
123 for (i = 0; i < 32; i++)
124 __get_user(regs->regs[i], (__s64 __user *)&data->regs[i]);
125 __get_user(regs->lo, (__s64 __user *)&data->lo);
126 __get_user(regs->hi, (__s64 __user *)&data->hi);
127 __get_user(regs->cp0_epc, (__s64 __user *)&data->cp0_epc);
129 /* badvaddr, status, and cause may not be written. */
134 int ptrace_getfpregs(struct task_struct *child, __u32 __user *data)
138 if (!access_ok(VERIFY_WRITE, data, 33 * 8))
141 if (tsk_used_math(child)) {
142 union fpureg *fregs = get_fpu_regs(child);
143 for (i = 0; i < 32; i++)
144 __put_user(get_fpr64(&fregs[i], 0),
145 i + (__u64 __user *)data);
147 for (i = 0; i < 32; i++)
148 __put_user((__u64) -1, i + (__u64 __user *) data);
151 __put_user(child->thread.fpu.fcr31, data + 64);
152 __put_user(boot_cpu_data.fpu_id, data + 65);
157 int ptrace_setfpregs(struct task_struct *child, __u32 __user *data)
166 if (!access_ok(VERIFY_READ, data, 33 * 8))
170 fregs = get_fpu_regs(child);
172 for (i = 0; i < 32; i++) {
173 __get_user(fpr_val, i + (__u64 __user *)data);
174 set_fpr64(&fregs[i], 0, fpr_val);
177 __get_user(value, data + 64);
178 fcr31 = child->thread.fpu.fcr31;
179 mask = boot_cpu_data.fpu_msk31;
180 child->thread.fpu.fcr31 = (value & ~mask) | (fcr31 & mask);
182 /* FIR may not be written. */
187 int ptrace_get_watch_regs(struct task_struct *child,
188 struct pt_watch_regs __user *addr)
190 enum pt_watch_style style;
193 if (!cpu_has_watch || boot_cpu_data.watch_reg_use_cnt == 0)
195 if (!access_ok(VERIFY_WRITE, addr, sizeof(struct pt_watch_regs)))
199 style = pt_watch_style_mips32;
200 #define WATCH_STYLE mips32
202 style = pt_watch_style_mips64;
203 #define WATCH_STYLE mips64
206 __put_user(style, &addr->style);
207 __put_user(boot_cpu_data.watch_reg_use_cnt,
208 &addr->WATCH_STYLE.num_valid);
209 for (i = 0; i < boot_cpu_data.watch_reg_use_cnt; i++) {
210 __put_user(child->thread.watch.mips3264.watchlo[i],
211 &addr->WATCH_STYLE.watchlo[i]);
212 __put_user(child->thread.watch.mips3264.watchhi[i] & 0xfff,
213 &addr->WATCH_STYLE.watchhi[i]);
214 __put_user(boot_cpu_data.watch_reg_masks[i],
215 &addr->WATCH_STYLE.watch_masks[i]);
218 __put_user(0, &addr->WATCH_STYLE.watchlo[i]);
219 __put_user(0, &addr->WATCH_STYLE.watchhi[i]);
220 __put_user(0, &addr->WATCH_STYLE.watch_masks[i]);
226 int ptrace_set_watch_regs(struct task_struct *child,
227 struct pt_watch_regs __user *addr)
230 int watch_active = 0;
231 unsigned long lt[NUM_WATCH_REGS];
232 u16 ht[NUM_WATCH_REGS];
234 if (!cpu_has_watch || boot_cpu_data.watch_reg_use_cnt == 0)
236 if (!access_ok(VERIFY_READ, addr, sizeof(struct pt_watch_regs)))
238 /* Check the values. */
239 for (i = 0; i < boot_cpu_data.watch_reg_use_cnt; i++) {
240 __get_user(lt[i], &addr->WATCH_STYLE.watchlo[i]);
242 if (lt[i] & __UA_LIMIT)
245 if (test_tsk_thread_flag(child, TIF_32BIT_ADDR)) {
246 if (lt[i] & 0xffffffff80000000UL)
249 if (lt[i] & __UA_LIMIT)
253 __get_user(ht[i], &addr->WATCH_STYLE.watchhi[i]);
258 for (i = 0; i < boot_cpu_data.watch_reg_use_cnt; i++) {
261 child->thread.watch.mips3264.watchlo[i] = lt[i];
263 child->thread.watch.mips3264.watchhi[i] = ht[i];
267 set_tsk_thread_flag(child, TIF_LOAD_WATCH);
269 clear_tsk_thread_flag(child, TIF_LOAD_WATCH);
274 /* regset get/set implementations */
276 #if defined(CONFIG_32BIT) || defined(CONFIG_MIPS32_O32)
278 static int gpr32_get(struct task_struct *target,
279 const struct user_regset *regset,
280 unsigned int pos, unsigned int count,
281 void *kbuf, void __user *ubuf)
283 struct pt_regs *regs = task_pt_regs(target);
284 u32 uregs[ELF_NGREG] = {};
287 for (i = MIPS32_EF_R1; i <= MIPS32_EF_R31; i++) {
288 /* k0/k1 are copied as zero. */
289 if (i == MIPS32_EF_R26 || i == MIPS32_EF_R27)
292 uregs[i] = regs->regs[i - MIPS32_EF_R0];
295 uregs[MIPS32_EF_LO] = regs->lo;
296 uregs[MIPS32_EF_HI] = regs->hi;
297 uregs[MIPS32_EF_CP0_EPC] = regs->cp0_epc;
298 uregs[MIPS32_EF_CP0_BADVADDR] = regs->cp0_badvaddr;
299 uregs[MIPS32_EF_CP0_STATUS] = regs->cp0_status;
300 uregs[MIPS32_EF_CP0_CAUSE] = regs->cp0_cause;
302 return user_regset_copyout(&pos, &count, &kbuf, &ubuf, uregs, 0,
306 static int gpr32_set(struct task_struct *target,
307 const struct user_regset *regset,
308 unsigned int pos, unsigned int count,
309 const void *kbuf, const void __user *ubuf)
311 struct pt_regs *regs = task_pt_regs(target);
312 u32 uregs[ELF_NGREG];
313 unsigned start, num_regs, i;
316 start = pos / sizeof(u32);
317 num_regs = count / sizeof(u32);
319 if (start + num_regs > ELF_NGREG)
322 err = user_regset_copyin(&pos, &count, &kbuf, &ubuf, uregs, 0,
327 for (i = start; i < num_regs; i++) {
329 * Cast all values to signed here so that if this is a 64-bit
330 * kernel, the supplied 32-bit values will be sign extended.
333 case MIPS32_EF_R1 ... MIPS32_EF_R25:
334 /* k0/k1 are ignored. */
335 case MIPS32_EF_R28 ... MIPS32_EF_R31:
336 regs->regs[i - MIPS32_EF_R0] = (s32)uregs[i];
339 regs->lo = (s32)uregs[i];
342 regs->hi = (s32)uregs[i];
344 case MIPS32_EF_CP0_EPC:
345 regs->cp0_epc = (s32)uregs[i];
353 #endif /* CONFIG_32BIT || CONFIG_MIPS32_O32 */
357 static int gpr64_get(struct task_struct *target,
358 const struct user_regset *regset,
359 unsigned int pos, unsigned int count,
360 void *kbuf, void __user *ubuf)
362 struct pt_regs *regs = task_pt_regs(target);
363 u64 uregs[ELF_NGREG] = {};
366 for (i = MIPS64_EF_R1; i <= MIPS64_EF_R31; i++) {
367 /* k0/k1 are copied as zero. */
368 if (i == MIPS64_EF_R26 || i == MIPS64_EF_R27)
371 uregs[i] = regs->regs[i - MIPS64_EF_R0];
374 uregs[MIPS64_EF_LO] = regs->lo;
375 uregs[MIPS64_EF_HI] = regs->hi;
376 uregs[MIPS64_EF_CP0_EPC] = regs->cp0_epc;
377 uregs[MIPS64_EF_CP0_BADVADDR] = regs->cp0_badvaddr;
378 uregs[MIPS64_EF_CP0_STATUS] = regs->cp0_status;
379 uregs[MIPS64_EF_CP0_CAUSE] = regs->cp0_cause;
381 return user_regset_copyout(&pos, &count, &kbuf, &ubuf, uregs, 0,
385 static int gpr64_set(struct task_struct *target,
386 const struct user_regset *regset,
387 unsigned int pos, unsigned int count,
388 const void *kbuf, const void __user *ubuf)
390 struct pt_regs *regs = task_pt_regs(target);
391 u64 uregs[ELF_NGREG];
392 unsigned start, num_regs, i;
395 start = pos / sizeof(u64);
396 num_regs = count / sizeof(u64);
398 if (start + num_regs > ELF_NGREG)
401 err = user_regset_copyin(&pos, &count, &kbuf, &ubuf, uregs, 0,
406 for (i = start; i < num_regs; i++) {
408 case MIPS64_EF_R1 ... MIPS64_EF_R25:
409 /* k0/k1 are ignored. */
410 case MIPS64_EF_R28 ... MIPS64_EF_R31:
411 regs->regs[i - MIPS64_EF_R0] = uregs[i];
419 case MIPS64_EF_CP0_EPC:
420 regs->cp0_epc = uregs[i];
428 #endif /* CONFIG_64BIT */
430 static int fpr_get(struct task_struct *target,
431 const struct user_regset *regset,
432 unsigned int pos, unsigned int count,
433 void *kbuf, void __user *ubuf)
441 if (sizeof(target->thread.fpu.fpr[i]) == sizeof(elf_fpreg_t))
442 return user_regset_copyout(&pos, &count, &kbuf, &ubuf,
444 0, sizeof(elf_fpregset_t));
446 for (i = 0; i < NUM_FPU_REGS; i++) {
447 fpr_val = get_fpr64(&target->thread.fpu.fpr[i], 0);
448 err = user_regset_copyout(&pos, &count, &kbuf, &ubuf,
449 &fpr_val, i * sizeof(elf_fpreg_t),
450 (i + 1) * sizeof(elf_fpreg_t));
458 static int fpr_set(struct task_struct *target,
459 const struct user_regset *regset,
460 unsigned int pos, unsigned int count,
461 const void *kbuf, const void __user *ubuf)
471 if (sizeof(target->thread.fpu.fpr[i]) == sizeof(elf_fpreg_t))
472 return user_regset_copyin(&pos, &count, &kbuf, &ubuf,
474 0, sizeof(elf_fpregset_t));
476 for (i = 0; i < NUM_FPU_REGS; i++) {
477 err = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
478 &fpr_val, i * sizeof(elf_fpreg_t),
479 (i + 1) * sizeof(elf_fpreg_t));
482 set_fpr64(&target->thread.fpu.fpr[i], 0, fpr_val);
493 #if defined(CONFIG_32BIT) || defined(CONFIG_MIPS32_O32)
495 static const struct user_regset mips_regsets[] = {
497 .core_note_type = NT_PRSTATUS,
499 .size = sizeof(unsigned int),
500 .align = sizeof(unsigned int),
505 .core_note_type = NT_PRFPREG,
507 .size = sizeof(elf_fpreg_t),
508 .align = sizeof(elf_fpreg_t),
514 static const struct user_regset_view user_mips_view = {
516 .e_machine = ELF_ARCH,
517 .ei_osabi = ELF_OSABI,
518 .regsets = mips_regsets,
519 .n = ARRAY_SIZE(mips_regsets),
522 #endif /* CONFIG_32BIT || CONFIG_MIPS32_O32 */
526 static const struct user_regset mips64_regsets[] = {
528 .core_note_type = NT_PRSTATUS,
530 .size = sizeof(unsigned long),
531 .align = sizeof(unsigned long),
536 .core_note_type = NT_PRFPREG,
538 .size = sizeof(elf_fpreg_t),
539 .align = sizeof(elf_fpreg_t),
545 static const struct user_regset_view user_mips64_view = {
547 .e_machine = ELF_ARCH,
548 .ei_osabi = ELF_OSABI,
549 .regsets = mips64_regsets,
550 .n = ARRAY_SIZE(mips64_regsets),
553 #endif /* CONFIG_64BIT */
555 const struct user_regset_view *task_user_regset_view(struct task_struct *task)
558 return &user_mips_view;
560 #ifdef CONFIG_MIPS32_O32
561 if (test_tsk_thread_flag(task, TIF_32BIT_REGS))
562 return &user_mips_view;
564 return &user_mips64_view;
568 long arch_ptrace(struct task_struct *child, long request,
569 unsigned long addr, unsigned long data)
572 void __user *addrp = (void __user *) addr;
573 void __user *datavp = (void __user *) data;
574 unsigned long __user *datalp = (void __user *) data;
577 /* when I and D space are separate, these will need to be fixed. */
578 case PTRACE_PEEKTEXT: /* read word at location addr. */
579 case PTRACE_PEEKDATA:
580 ret = generic_ptrace_peekdata(child, addr, data);
583 /* Read the word at location addr in the USER area. */
584 case PTRACE_PEEKUSR: {
585 struct pt_regs *regs;
587 unsigned long tmp = 0;
589 regs = task_pt_regs(child);
590 ret = 0; /* Default return value. */
594 tmp = regs->regs[addr];
596 case FPR_BASE ... FPR_BASE + 31:
597 if (!tsk_used_math(child)) {
598 /* FP not yet used */
602 fregs = get_fpu_regs(child);
605 if (test_thread_flag(TIF_32BIT_FPREGS)) {
607 * The odd registers are actually the high
608 * order bits of the values stored in the even
609 * registers - unless we're using r2k_switch.S.
611 tmp = get_fpr32(&fregs[(addr & ~1) - FPR_BASE],
616 tmp = get_fpr32(&fregs[addr - FPR_BASE], 0);
622 tmp = regs->cp0_cause;
625 tmp = regs->cp0_badvaddr;
633 #ifdef CONFIG_CPU_HAS_SMARTMIPS
639 tmp = child->thread.fpu.fcr31;
642 /* implementation / version register */
643 tmp = boot_cpu_data.fpu_id;
645 case DSP_BASE ... DSP_BASE + 5: {
653 dregs = __get_dsp_regs(child);
654 tmp = (unsigned long) (dregs[addr - DSP_BASE]);
663 tmp = child->thread.dsp.dspcontrol;
670 ret = put_user(tmp, datalp);
674 /* when I and D space are separate, this will have to be fixed. */
675 case PTRACE_POKETEXT: /* write the word at location addr. */
676 case PTRACE_POKEDATA:
677 ret = generic_ptrace_pokedata(child, addr, data);
680 case PTRACE_POKEUSR: {
681 struct pt_regs *regs;
683 regs = task_pt_regs(child);
687 regs->regs[addr] = data;
689 case FPR_BASE ... FPR_BASE + 31: {
690 union fpureg *fregs = get_fpu_regs(child);
694 if (test_thread_flag(TIF_32BIT_FPREGS)) {
696 * The odd registers are actually the high
697 * order bits of the values stored in the even
698 * registers - unless we're using r2k_switch.S.
700 set_fpr32(&fregs[(addr & ~1) - FPR_BASE],
705 set_fpr64(&fregs[addr - FPR_BASE], 0, data);
709 regs->cp0_epc = data;
717 #ifdef CONFIG_CPU_HAS_SMARTMIPS
723 child->thread.fpu.fcr31 = data & ~FPU_CSR_ALL_X;
725 case DSP_BASE ... DSP_BASE + 5: {
733 dregs = __get_dsp_regs(child);
734 dregs[addr - DSP_BASE] = data;
742 child->thread.dsp.dspcontrol = data;
745 /* The rest are not allowed. */
753 ret = ptrace_getregs(child, datavp);
757 ret = ptrace_setregs(child, datavp);
760 case PTRACE_GETFPREGS:
761 ret = ptrace_getfpregs(child, datavp);
764 case PTRACE_SETFPREGS:
765 ret = ptrace_setfpregs(child, datavp);
768 case PTRACE_GET_THREAD_AREA:
769 ret = put_user(task_thread_info(child)->tp_value, datalp);
772 case PTRACE_GET_WATCH_REGS:
773 ret = ptrace_get_watch_regs(child, addrp);
776 case PTRACE_SET_WATCH_REGS:
777 ret = ptrace_set_watch_regs(child, addrp);
781 ret = ptrace_request(child, request, addr, data);
789 * Notification of system call entry/exit
790 * - triggered by current->work.syscall_trace
792 asmlinkage long syscall_trace_enter(struct pt_regs *regs, long syscall)
797 current_thread_info()->syscall = syscall;
799 if (secure_computing() == -1)
802 if (test_thread_flag(TIF_SYSCALL_TRACE) &&
803 tracehook_report_syscall_entry(regs))
806 if (unlikely(test_thread_flag(TIF_SYSCALL_TRACEPOINT)))
807 trace_sys_enter(regs, regs->regs[2]);
809 audit_syscall_entry(syscall, regs->regs[4], regs->regs[5],
810 regs->regs[6], regs->regs[7]);
815 * Notification of system call entry/exit
816 * - triggered by current->work.syscall_trace
818 asmlinkage void syscall_trace_leave(struct pt_regs *regs)
821 * We may come here right after calling schedule_user()
822 * or do_notify_resume(), in which case we can be in RCU
827 audit_syscall_exit(regs);
829 if (unlikely(test_thread_flag(TIF_SYSCALL_TRACEPOINT)))
830 trace_sys_exit(regs, regs->regs[2]);
832 if (test_thread_flag(TIF_SYSCALL_TRACE))
833 tracehook_report_syscall_exit(regs, 0);