2 * This file is subject to the terms and conditions of the GNU General Public
3 * License. See the file "COPYING" in the main directory of this archive
6 * Copyright (C) 1992 Ross Biro
7 * Copyright (C) Linus Torvalds
8 * Copyright (C) 1994, 95, 96, 97, 98, 2000 Ralf Baechle
9 * Copyright (C) 1996 David S. Miller
10 * Kevin D. Kissell, kevink@mips.com and Carsten Langgaard, carstenl@mips.com
11 * Copyright (C) 1999 MIPS Technologies, Inc.
12 * Copyright (C) 2000 Ulf Carlsson
14 * At this time Linux/MIPS64 only supports syscall tracing, even for 32-bit
17 #include <linux/compiler.h>
18 #include <linux/context_tracking.h>
19 #include <linux/elf.h>
20 #include <linux/kernel.h>
21 #include <linux/sched.h>
23 #include <linux/errno.h>
24 #include <linux/ptrace.h>
25 #include <linux/regset.h>
26 #include <linux/smp.h>
27 #include <linux/security.h>
28 #include <linux/tracehook.h>
29 #include <linux/audit.h>
30 #include <linux/seccomp.h>
31 #include <linux/ftrace.h>
33 #include <asm/byteorder.h>
37 #include <asm/mipsregs.h>
38 #include <asm/mipsmtregs.h>
39 #include <asm/pgtable.h>
41 #include <asm/syscall.h>
42 #include <asm/uaccess.h>
43 #include <asm/bootinfo.h>
46 #define CREATE_TRACE_POINTS
47 #include <trace/events/syscalls.h>
49 static void init_fp_ctx(struct task_struct *target)
51 /* If FP has been used then the target already has context */
52 if (tsk_used_math(target))
55 /* Begin with data registers set to all 1s... */
56 memset(&target->thread.fpu.fpr, ~0, sizeof(target->thread.fpu.fpr));
58 /* ...and FCSR zeroed */
59 target->thread.fpu.fcr31 = 0;
62 * Record that the target has "used" math, such that the context
63 * just initialised, and any modifications made by the caller,
66 set_stopped_child_used_math(target);
70 * Called by kernel/ptrace.c when detaching..
72 * Make sure single step bits etc are not set.
74 void ptrace_disable(struct task_struct *child)
76 /* Don't load the watchpoint registers for the ex-child. */
77 clear_tsk_thread_flag(child, TIF_LOAD_WATCH);
81 * Read a general register set. We always use the 64-bit format, even
82 * for 32-bit kernels and for 32-bit processes on a 64-bit kernel.
83 * Registers are sign extended to fill the available space.
85 int ptrace_getregs(struct task_struct *child, struct user_pt_regs __user *data)
90 if (!access_ok(VERIFY_WRITE, data, 38 * 8))
93 regs = task_pt_regs(child);
95 for (i = 0; i < 32; i++)
96 __put_user((long)regs->regs[i], (__s64 __user *)&data->regs[i]);
97 __put_user((long)regs->lo, (__s64 __user *)&data->lo);
98 __put_user((long)regs->hi, (__s64 __user *)&data->hi);
99 __put_user((long)regs->cp0_epc, (__s64 __user *)&data->cp0_epc);
100 __put_user((long)regs->cp0_badvaddr, (__s64 __user *)&data->cp0_badvaddr);
101 __put_user((long)regs->cp0_status, (__s64 __user *)&data->cp0_status);
102 __put_user((long)regs->cp0_cause, (__s64 __user *)&data->cp0_cause);
108 * Write a general register set. As for PTRACE_GETREGS, we always use
109 * the 64-bit format. On a 32-bit kernel only the lower order half
110 * (according to endianness) will be used.
112 int ptrace_setregs(struct task_struct *child, struct user_pt_regs __user *data)
114 struct pt_regs *regs;
117 if (!access_ok(VERIFY_READ, data, 38 * 8))
120 regs = task_pt_regs(child);
122 for (i = 0; i < 32; i++)
123 __get_user(regs->regs[i], (__s64 __user *)&data->regs[i]);
124 __get_user(regs->lo, (__s64 __user *)&data->lo);
125 __get_user(regs->hi, (__s64 __user *)&data->hi);
126 __get_user(regs->cp0_epc, (__s64 __user *)&data->cp0_epc);
128 /* badvaddr, status, and cause may not be written. */
133 int ptrace_getfpregs(struct task_struct *child, __u32 __user *data)
137 if (!access_ok(VERIFY_WRITE, data, 33 * 8))
140 if (tsk_used_math(child)) {
141 union fpureg *fregs = get_fpu_regs(child);
142 for (i = 0; i < 32; i++)
143 __put_user(get_fpr64(&fregs[i], 0),
144 i + (__u64 __user *)data);
146 for (i = 0; i < 32; i++)
147 __put_user((__u64) -1, i + (__u64 __user *) data);
150 __put_user(child->thread.fpu.fcr31, data + 64);
151 __put_user(boot_cpu_data.fpu_id, data + 65);
156 int ptrace_setfpregs(struct task_struct *child, __u32 __user *data)
162 if (!access_ok(VERIFY_READ, data, 33 * 8))
166 fregs = get_fpu_regs(child);
168 for (i = 0; i < 32; i++) {
169 __get_user(fpr_val, i + (__u64 __user *)data);
170 set_fpr64(&fregs[i], 0, fpr_val);
173 __get_user(child->thread.fpu.fcr31, data + 64);
174 child->thread.fpu.fcr31 &= ~FPU_CSR_ALL_X;
176 /* FIR may not be written. */
181 int ptrace_get_watch_regs(struct task_struct *child,
182 struct pt_watch_regs __user *addr)
184 enum pt_watch_style style;
187 if (!cpu_has_watch || boot_cpu_data.watch_reg_use_cnt == 0)
189 if (!access_ok(VERIFY_WRITE, addr, sizeof(struct pt_watch_regs)))
193 style = pt_watch_style_mips32;
194 #define WATCH_STYLE mips32
196 style = pt_watch_style_mips64;
197 #define WATCH_STYLE mips64
200 __put_user(style, &addr->style);
201 __put_user(boot_cpu_data.watch_reg_use_cnt,
202 &addr->WATCH_STYLE.num_valid);
203 for (i = 0; i < boot_cpu_data.watch_reg_use_cnt; i++) {
204 __put_user(child->thread.watch.mips3264.watchlo[i],
205 &addr->WATCH_STYLE.watchlo[i]);
206 __put_user(child->thread.watch.mips3264.watchhi[i] & 0xfff,
207 &addr->WATCH_STYLE.watchhi[i]);
208 __put_user(boot_cpu_data.watch_reg_masks[i],
209 &addr->WATCH_STYLE.watch_masks[i]);
212 __put_user(0, &addr->WATCH_STYLE.watchlo[i]);
213 __put_user(0, &addr->WATCH_STYLE.watchhi[i]);
214 __put_user(0, &addr->WATCH_STYLE.watch_masks[i]);
220 int ptrace_set_watch_regs(struct task_struct *child,
221 struct pt_watch_regs __user *addr)
224 int watch_active = 0;
225 unsigned long lt[NUM_WATCH_REGS];
226 u16 ht[NUM_WATCH_REGS];
228 if (!cpu_has_watch || boot_cpu_data.watch_reg_use_cnt == 0)
230 if (!access_ok(VERIFY_READ, addr, sizeof(struct pt_watch_regs)))
232 /* Check the values. */
233 for (i = 0; i < boot_cpu_data.watch_reg_use_cnt; i++) {
234 __get_user(lt[i], &addr->WATCH_STYLE.watchlo[i]);
236 if (lt[i] & __UA_LIMIT)
239 if (test_tsk_thread_flag(child, TIF_32BIT_ADDR)) {
240 if (lt[i] & 0xffffffff80000000UL)
243 if (lt[i] & __UA_LIMIT)
247 __get_user(ht[i], &addr->WATCH_STYLE.watchhi[i]);
252 for (i = 0; i < boot_cpu_data.watch_reg_use_cnt; i++) {
255 child->thread.watch.mips3264.watchlo[i] = lt[i];
257 child->thread.watch.mips3264.watchhi[i] = ht[i];
261 set_tsk_thread_flag(child, TIF_LOAD_WATCH);
263 clear_tsk_thread_flag(child, TIF_LOAD_WATCH);
268 /* regset get/set implementations */
270 #if defined(CONFIG_32BIT) || defined(CONFIG_MIPS32_O32)
272 static int gpr32_get(struct task_struct *target,
273 const struct user_regset *regset,
274 unsigned int pos, unsigned int count,
275 void *kbuf, void __user *ubuf)
277 struct pt_regs *regs = task_pt_regs(target);
278 u32 uregs[ELF_NGREG] = {};
281 for (i = MIPS32_EF_R1; i <= MIPS32_EF_R31; i++) {
282 /* k0/k1 are copied as zero. */
283 if (i == MIPS32_EF_R26 || i == MIPS32_EF_R27)
286 uregs[i] = regs->regs[i - MIPS32_EF_R0];
289 uregs[MIPS32_EF_LO] = regs->lo;
290 uregs[MIPS32_EF_HI] = regs->hi;
291 uregs[MIPS32_EF_CP0_EPC] = regs->cp0_epc;
292 uregs[MIPS32_EF_CP0_BADVADDR] = regs->cp0_badvaddr;
293 uregs[MIPS32_EF_CP0_STATUS] = regs->cp0_status;
294 uregs[MIPS32_EF_CP0_CAUSE] = regs->cp0_cause;
296 return user_regset_copyout(&pos, &count, &kbuf, &ubuf, uregs, 0,
300 static int gpr32_set(struct task_struct *target,
301 const struct user_regset *regset,
302 unsigned int pos, unsigned int count,
303 const void *kbuf, const void __user *ubuf)
305 struct pt_regs *regs = task_pt_regs(target);
306 u32 uregs[ELF_NGREG];
307 unsigned start, num_regs, i;
310 start = pos / sizeof(u32);
311 num_regs = count / sizeof(u32);
313 if (start + num_regs > ELF_NGREG)
316 err = user_regset_copyin(&pos, &count, &kbuf, &ubuf, uregs, 0,
321 for (i = start; i < num_regs; i++) {
323 * Cast all values to signed here so that if this is a 64-bit
324 * kernel, the supplied 32-bit values will be sign extended.
327 case MIPS32_EF_R1 ... MIPS32_EF_R25:
328 /* k0/k1 are ignored. */
329 case MIPS32_EF_R28 ... MIPS32_EF_R31:
330 regs->regs[i - MIPS32_EF_R0] = (s32)uregs[i];
333 regs->lo = (s32)uregs[i];
336 regs->hi = (s32)uregs[i];
338 case MIPS32_EF_CP0_EPC:
339 regs->cp0_epc = (s32)uregs[i];
347 #endif /* CONFIG_32BIT || CONFIG_MIPS32_O32 */
351 static int gpr64_get(struct task_struct *target,
352 const struct user_regset *regset,
353 unsigned int pos, unsigned int count,
354 void *kbuf, void __user *ubuf)
356 struct pt_regs *regs = task_pt_regs(target);
357 u64 uregs[ELF_NGREG] = {};
360 for (i = MIPS64_EF_R1; i <= MIPS64_EF_R31; i++) {
361 /* k0/k1 are copied as zero. */
362 if (i == MIPS64_EF_R26 || i == MIPS64_EF_R27)
365 uregs[i] = regs->regs[i - MIPS64_EF_R0];
368 uregs[MIPS64_EF_LO] = regs->lo;
369 uregs[MIPS64_EF_HI] = regs->hi;
370 uregs[MIPS64_EF_CP0_EPC] = regs->cp0_epc;
371 uregs[MIPS64_EF_CP0_BADVADDR] = regs->cp0_badvaddr;
372 uregs[MIPS64_EF_CP0_STATUS] = regs->cp0_status;
373 uregs[MIPS64_EF_CP0_CAUSE] = regs->cp0_cause;
375 return user_regset_copyout(&pos, &count, &kbuf, &ubuf, uregs, 0,
379 static int gpr64_set(struct task_struct *target,
380 const struct user_regset *regset,
381 unsigned int pos, unsigned int count,
382 const void *kbuf, const void __user *ubuf)
384 struct pt_regs *regs = task_pt_regs(target);
385 u64 uregs[ELF_NGREG];
386 unsigned start, num_regs, i;
389 start = pos / sizeof(u64);
390 num_regs = count / sizeof(u64);
392 if (start + num_regs > ELF_NGREG)
395 err = user_regset_copyin(&pos, &count, &kbuf, &ubuf, uregs, 0,
400 for (i = start; i < num_regs; i++) {
402 case MIPS64_EF_R1 ... MIPS64_EF_R25:
403 /* k0/k1 are ignored. */
404 case MIPS64_EF_R28 ... MIPS64_EF_R31:
405 regs->regs[i - MIPS64_EF_R0] = uregs[i];
413 case MIPS64_EF_CP0_EPC:
414 regs->cp0_epc = uregs[i];
422 #endif /* CONFIG_64BIT */
424 static int fpr_get(struct task_struct *target,
425 const struct user_regset *regset,
426 unsigned int pos, unsigned int count,
427 void *kbuf, void __user *ubuf)
435 if (sizeof(target->thread.fpu.fpr[i]) == sizeof(elf_fpreg_t))
436 return user_regset_copyout(&pos, &count, &kbuf, &ubuf,
438 0, sizeof(elf_fpregset_t));
440 for (i = 0; i < NUM_FPU_REGS; i++) {
441 fpr_val = get_fpr64(&target->thread.fpu.fpr[i], 0);
442 err = user_regset_copyout(&pos, &count, &kbuf, &ubuf,
443 &fpr_val, i * sizeof(elf_fpreg_t),
444 (i + 1) * sizeof(elf_fpreg_t));
452 static int fpr_set(struct task_struct *target,
453 const struct user_regset *regset,
454 unsigned int pos, unsigned int count,
455 const void *kbuf, const void __user *ubuf)
465 if (sizeof(target->thread.fpu.fpr[i]) == sizeof(elf_fpreg_t))
466 return user_regset_copyin(&pos, &count, &kbuf, &ubuf,
468 0, sizeof(elf_fpregset_t));
470 for (i = 0; i < NUM_FPU_REGS; i++) {
471 err = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
472 &fpr_val, i * sizeof(elf_fpreg_t),
473 (i + 1) * sizeof(elf_fpreg_t));
476 set_fpr64(&target->thread.fpu.fpr[i], 0, fpr_val);
487 #if defined(CONFIG_32BIT) || defined(CONFIG_MIPS32_O32)
489 static const struct user_regset mips_regsets[] = {
491 .core_note_type = NT_PRSTATUS,
493 .size = sizeof(unsigned int),
494 .align = sizeof(unsigned int),
499 .core_note_type = NT_PRFPREG,
501 .size = sizeof(elf_fpreg_t),
502 .align = sizeof(elf_fpreg_t),
508 static const struct user_regset_view user_mips_view = {
510 .e_machine = ELF_ARCH,
511 .ei_osabi = ELF_OSABI,
512 .regsets = mips_regsets,
513 .n = ARRAY_SIZE(mips_regsets),
516 #endif /* CONFIG_32BIT || CONFIG_MIPS32_O32 */
520 static const struct user_regset mips64_regsets[] = {
522 .core_note_type = NT_PRSTATUS,
524 .size = sizeof(unsigned long),
525 .align = sizeof(unsigned long),
530 .core_note_type = NT_PRFPREG,
532 .size = sizeof(elf_fpreg_t),
533 .align = sizeof(elf_fpreg_t),
539 static const struct user_regset_view user_mips64_view = {
541 .e_machine = ELF_ARCH,
542 .ei_osabi = ELF_OSABI,
543 .regsets = mips64_regsets,
544 .n = ARRAY_SIZE(mips64_regsets),
547 #endif /* CONFIG_64BIT */
549 const struct user_regset_view *task_user_regset_view(struct task_struct *task)
552 return &user_mips_view;
554 #ifdef CONFIG_MIPS32_O32
555 if (test_tsk_thread_flag(task, TIF_32BIT_REGS))
556 return &user_mips_view;
558 return &user_mips64_view;
562 long arch_ptrace(struct task_struct *child, long request,
563 unsigned long addr, unsigned long data)
566 void __user *addrp = (void __user *) addr;
567 void __user *datavp = (void __user *) data;
568 unsigned long __user *datalp = (void __user *) data;
571 /* when I and D space are separate, these will need to be fixed. */
572 case PTRACE_PEEKTEXT: /* read word at location addr. */
573 case PTRACE_PEEKDATA:
574 ret = generic_ptrace_peekdata(child, addr, data);
577 /* Read the word at location addr in the USER area. */
578 case PTRACE_PEEKUSR: {
579 struct pt_regs *regs;
581 unsigned long tmp = 0;
583 regs = task_pt_regs(child);
584 ret = 0; /* Default return value. */
588 tmp = regs->regs[addr];
590 case FPR_BASE ... FPR_BASE + 31:
591 if (!tsk_used_math(child)) {
592 /* FP not yet used */
596 fregs = get_fpu_regs(child);
599 if (test_thread_flag(TIF_32BIT_FPREGS)) {
601 * The odd registers are actually the high
602 * order bits of the values stored in the even
603 * registers - unless we're using r2k_switch.S.
605 tmp = get_fpr32(&fregs[(addr & ~1) - FPR_BASE],
610 tmp = get_fpr32(&fregs[addr - FPR_BASE], 0);
616 tmp = regs->cp0_cause;
619 tmp = regs->cp0_badvaddr;
627 #ifdef CONFIG_CPU_HAS_SMARTMIPS
633 tmp = child->thread.fpu.fcr31;
636 /* implementation / version register */
637 tmp = boot_cpu_data.fpu_id;
639 case DSP_BASE ... DSP_BASE + 5: {
647 dregs = __get_dsp_regs(child);
648 tmp = (unsigned long) (dregs[addr - DSP_BASE]);
657 tmp = child->thread.dsp.dspcontrol;
664 ret = put_user(tmp, datalp);
668 /* when I and D space are separate, this will have to be fixed. */
669 case PTRACE_POKETEXT: /* write the word at location addr. */
670 case PTRACE_POKEDATA:
671 ret = generic_ptrace_pokedata(child, addr, data);
674 case PTRACE_POKEUSR: {
675 struct pt_regs *regs;
677 regs = task_pt_regs(child);
681 regs->regs[addr] = data;
683 case FPR_BASE ... FPR_BASE + 31: {
684 union fpureg *fregs = get_fpu_regs(child);
688 if (test_thread_flag(TIF_32BIT_FPREGS)) {
690 * The odd registers are actually the high
691 * order bits of the values stored in the even
692 * registers - unless we're using r2k_switch.S.
694 set_fpr32(&fregs[(addr & ~1) - FPR_BASE],
699 set_fpr64(&fregs[addr - FPR_BASE], 0, data);
703 regs->cp0_epc = data;
711 #ifdef CONFIG_CPU_HAS_SMARTMIPS
717 child->thread.fpu.fcr31 = data & ~FPU_CSR_ALL_X;
719 case DSP_BASE ... DSP_BASE + 5: {
727 dregs = __get_dsp_regs(child);
728 dregs[addr - DSP_BASE] = data;
736 child->thread.dsp.dspcontrol = data;
739 /* The rest are not allowed. */
747 ret = ptrace_getregs(child, datavp);
751 ret = ptrace_setregs(child, datavp);
754 case PTRACE_GETFPREGS:
755 ret = ptrace_getfpregs(child, datavp);
758 case PTRACE_SETFPREGS:
759 ret = ptrace_setfpregs(child, datavp);
762 case PTRACE_GET_THREAD_AREA:
763 ret = put_user(task_thread_info(child)->tp_value, datalp);
766 case PTRACE_GET_WATCH_REGS:
767 ret = ptrace_get_watch_regs(child, addrp);
770 case PTRACE_SET_WATCH_REGS:
771 ret = ptrace_set_watch_regs(child, addrp);
775 ret = ptrace_request(child, request, addr, data);
783 * Notification of system call entry/exit
784 * - triggered by current->work.syscall_trace
786 asmlinkage long syscall_trace_enter(struct pt_regs *regs, long syscall)
791 current_thread_info()->syscall = syscall;
793 if (secure_computing() == -1)
796 if (test_thread_flag(TIF_SYSCALL_TRACE) &&
797 tracehook_report_syscall_entry(regs))
800 if (unlikely(test_thread_flag(TIF_SYSCALL_TRACEPOINT)))
801 trace_sys_enter(regs, regs->regs[2]);
803 audit_syscall_entry(syscall, regs->regs[4], regs->regs[5],
804 regs->regs[6], regs->regs[7]);
809 * Notification of system call entry/exit
810 * - triggered by current->work.syscall_trace
812 asmlinkage void syscall_trace_leave(struct pt_regs *regs)
815 * We may come here right after calling schedule_user()
816 * or do_notify_resume(), in which case we can be in RCU
821 audit_syscall_exit(regs);
823 if (unlikely(test_thread_flag(TIF_SYSCALL_TRACEPOINT)))
824 trace_sys_exit(regs, regs->regs[2]);
826 if (test_thread_flag(TIF_SYSCALL_TRACE))
827 tracehook_report_syscall_exit(regs, 0);