2 * Kernel support for the ptrace() and syscall tracing interfaces.
4 * Copyright (C) 1999-2005 Hewlett-Packard Co
5 * David Mosberger-Tang <davidm@hpl.hp.com>
6 * Copyright (C) 2006 Intel Co
7 * 2006-08-12 - IA64 Native Utrace implementation support added by
8 * Anil S Keshavamurthy <anil.s.keshavamurthy@intel.com>
10 * Derived from the x86 and Alpha versions.
12 #include <linux/kernel.h>
13 #include <linux/sched.h>
15 #include <linux/errno.h>
16 #include <linux/ptrace.h>
17 #include <linux/user.h>
18 #include <linux/security.h>
19 #include <linux/audit.h>
20 #include <linux/signal.h>
21 #include <linux/regset.h>
22 #include <linux/elf.h>
23 #include <linux/tracehook.h>
25 #include <asm/pgtable.h>
26 #include <asm/processor.h>
27 #include <asm/ptrace_offsets.h>
29 #include <asm/uaccess.h>
30 #include <asm/unwind.h>
32 #include <asm/perfmon.h>
38 * Bits in the PSR that we allow ptrace() to change:
39 * be, up, ac, mfl, mfh (the user mask; five bits total)
40 * db (debug breakpoint fault; one bit)
41 * id (instruction debug fault disable; one bit)
42 * dd (data debug fault disable; one bit)
43 * ri (restart instruction; two bits)
44 * is (instruction set; one bit)
46 #define IPSR_MASK (IA64_PSR_UM | IA64_PSR_DB | IA64_PSR_IS \
47 | IA64_PSR_ID | IA64_PSR_DD | IA64_PSR_RI)
49 #define MASK(nbits) ((1UL << (nbits)) - 1) /* mask with NBITS bits set */
50 #define PFM_MASK MASK(38)
52 #define PTRACE_DEBUG 0
55 # define dprintk(format...) printk(format)
58 # define dprintk(format...)
61 /* Return TRUE if PT was created due to kernel-entry via a system-call. */
64 in_syscall (struct pt_regs *pt)
66 return (long) pt->cr_ifs >= 0;
70 * Collect the NaT bits for r1-r31 from scratch_unat and return a NaT
71 * bitset where bit i is set iff the NaT bit of register i is set.
74 ia64_get_scratch_nat_bits (struct pt_regs *pt, unsigned long scratch_unat)
76 # define GET_BITS(first, last, unat) \
78 unsigned long bit = ia64_unat_pos(&pt->r##first); \
79 unsigned long nbits = (last - first + 1); \
80 unsigned long mask = MASK(nbits) << first; \
83 dist = 64 + bit - first; \
86 ia64_rotr(unat, dist) & mask; \
91 * Registers that are stored consecutively in struct pt_regs
92 * can be handled in parallel. If the register order in
93 * struct_pt_regs changes, this code MUST be updated.
95 val = GET_BITS( 1, 1, scratch_unat);
96 val |= GET_BITS( 2, 3, scratch_unat);
97 val |= GET_BITS(12, 13, scratch_unat);
98 val |= GET_BITS(14, 14, scratch_unat);
99 val |= GET_BITS(15, 15, scratch_unat);
100 val |= GET_BITS( 8, 11, scratch_unat);
101 val |= GET_BITS(16, 31, scratch_unat);
108 * Set the NaT bits for the scratch registers according to NAT and
109 * return the resulting unat (assuming the scratch registers are
113 ia64_put_scratch_nat_bits (struct pt_regs *pt, unsigned long nat)
115 # define PUT_BITS(first, last, nat) \
117 unsigned long bit = ia64_unat_pos(&pt->r##first); \
118 unsigned long nbits = (last - first + 1); \
119 unsigned long mask = MASK(nbits) << first; \
122 dist = 64 + bit - first; \
124 dist = bit - first; \
125 ia64_rotl(nat & mask, dist); \
127 unsigned long scratch_unat;
130 * Registers that are stored consecutively in struct pt_regs
131 * can be handled in parallel. If the register order in
132 * struct_pt_regs changes, this code MUST be updated.
134 scratch_unat = PUT_BITS( 1, 1, nat);
135 scratch_unat |= PUT_BITS( 2, 3, nat);
136 scratch_unat |= PUT_BITS(12, 13, nat);
137 scratch_unat |= PUT_BITS(14, 14, nat);
138 scratch_unat |= PUT_BITS(15, 15, nat);
139 scratch_unat |= PUT_BITS( 8, 11, nat);
140 scratch_unat |= PUT_BITS(16, 31, nat);
147 #define IA64_MLX_TEMPLATE 0x2
148 #define IA64_MOVL_OPCODE 6
151 ia64_increment_ip (struct pt_regs *regs)
153 unsigned long w0, ri = ia64_psr(regs)->ri + 1;
158 } else if (ri == 2) {
159 get_user(w0, (char __user *) regs->cr_iip + 0);
160 if (((w0 >> 1) & 0xf) == IA64_MLX_TEMPLATE) {
162 * rfi'ing to slot 2 of an MLX bundle causes
163 * an illegal operation fault. We don't want
170 ia64_psr(regs)->ri = ri;
174 ia64_decrement_ip (struct pt_regs *regs)
176 unsigned long w0, ri = ia64_psr(regs)->ri - 1;
178 if (ia64_psr(regs)->ri == 0) {
181 get_user(w0, (char __user *) regs->cr_iip + 0);
182 if (((w0 >> 1) & 0xf) == IA64_MLX_TEMPLATE) {
184 * rfi'ing to slot 2 of an MLX bundle causes
185 * an illegal operation fault. We don't want
191 ia64_psr(regs)->ri = ri;
195 * This routine is used to read an rnat bits that are stored on the
196 * kernel backing store. Since, in general, the alignment of the user
197 * and kernel are different, this is not completely trivial. In
198 * essence, we need to construct the user RNAT based on up to two
199 * kernel RNAT values and/or the RNAT value saved in the child's
204 * +--------+ <-- lowest address
211 * | slot01 | > child_regs->ar_rnat
213 * | slot02 | / kernel rbs
214 * +--------+ +--------+
215 * <- child_regs->ar_bspstore | slot61 | <-- krbs
216 * +- - - - + +--------+
218 * +- - - - + +--------+
220 * +- - - - + +--------+
222 * +- - - - + +--------+
227 * | slot01 | > child_stack->ar_rnat
231 * <--- child_stack->ar_bspstore
233 * The way to think of this code is as follows: bit 0 in the user rnat
234 * corresponds to some bit N (0 <= N <= 62) in one of the kernel rnat
235 * value. The kernel rnat value holding this bit is stored in
236 * variable rnat0. rnat1 is loaded with the kernel rnat value that
237 * form the upper bits of the user rnat value.
241 * o when reading the rnat "below" the first rnat slot on the kernel
242 * backing store, rnat0/rnat1 are set to 0 and the low order bits are
243 * merged in from pt->ar_rnat.
245 * o when reading the rnat "above" the last rnat slot on the kernel
246 * backing store, rnat0/rnat1 gets its value from sw->ar_rnat.
249 get_rnat (struct task_struct *task, struct switch_stack *sw,
250 unsigned long *krbs, unsigned long *urnat_addr,
251 unsigned long *urbs_end)
253 unsigned long rnat0 = 0, rnat1 = 0, urnat = 0, *slot0_kaddr;
254 unsigned long umask = 0, mask, m;
255 unsigned long *kbsp, *ubspstore, *rnat0_kaddr, *rnat1_kaddr, shift;
256 long num_regs, nbits;
259 pt = task_pt_regs(task);
260 kbsp = (unsigned long *) sw->ar_bspstore;
261 ubspstore = (unsigned long *) pt->ar_bspstore;
263 if (urbs_end < urnat_addr)
264 nbits = ia64_rse_num_regs(urnat_addr - 63, urbs_end);
269 * First, figure out which bit number slot 0 in user-land maps
270 * to in the kernel rnat. Do this by figuring out how many
271 * register slots we're beyond the user's backingstore and
272 * then computing the equivalent address in kernel space.
274 num_regs = ia64_rse_num_regs(ubspstore, urnat_addr + 1);
275 slot0_kaddr = ia64_rse_skip_regs(krbs, num_regs);
276 shift = ia64_rse_slot_num(slot0_kaddr);
277 rnat1_kaddr = ia64_rse_rnat_addr(slot0_kaddr);
278 rnat0_kaddr = rnat1_kaddr - 64;
280 if (ubspstore + 63 > urnat_addr) {
281 /* some bits need to be merged in from pt->ar_rnat */
282 umask = MASK(ia64_rse_slot_num(ubspstore)) & mask;
283 urnat = (pt->ar_rnat & umask);
290 if (rnat0_kaddr >= kbsp)
292 else if (rnat0_kaddr > krbs)
293 rnat0 = *rnat0_kaddr;
294 urnat |= (rnat0 & m) >> shift;
296 m = mask >> (63 - shift);
297 if (rnat1_kaddr >= kbsp)
299 else if (rnat1_kaddr > krbs)
300 rnat1 = *rnat1_kaddr;
301 urnat |= (rnat1 & m) << (63 - shift);
306 * The reverse of get_rnat.
309 put_rnat (struct task_struct *task, struct switch_stack *sw,
310 unsigned long *krbs, unsigned long *urnat_addr, unsigned long urnat,
311 unsigned long *urbs_end)
313 unsigned long rnat0 = 0, rnat1 = 0, *slot0_kaddr, umask = 0, mask, m;
314 unsigned long *kbsp, *ubspstore, *rnat0_kaddr, *rnat1_kaddr, shift;
315 long num_regs, nbits;
317 unsigned long cfm, *urbs_kargs;
319 pt = task_pt_regs(task);
320 kbsp = (unsigned long *) sw->ar_bspstore;
321 ubspstore = (unsigned long *) pt->ar_bspstore;
323 urbs_kargs = urbs_end;
324 if (in_syscall(pt)) {
326 * If entered via syscall, don't allow user to set rnat bits
330 urbs_kargs = ia64_rse_skip_regs(urbs_end, -(cfm & 0x7f));
333 if (urbs_kargs >= urnat_addr)
336 if ((urnat_addr - 63) >= urbs_kargs)
338 nbits = ia64_rse_num_regs(urnat_addr - 63, urbs_kargs);
343 * First, figure out which bit number slot 0 in user-land maps
344 * to in the kernel rnat. Do this by figuring out how many
345 * register slots we're beyond the user's backingstore and
346 * then computing the equivalent address in kernel space.
348 num_regs = ia64_rse_num_regs(ubspstore, urnat_addr + 1);
349 slot0_kaddr = ia64_rse_skip_regs(krbs, num_regs);
350 shift = ia64_rse_slot_num(slot0_kaddr);
351 rnat1_kaddr = ia64_rse_rnat_addr(slot0_kaddr);
352 rnat0_kaddr = rnat1_kaddr - 64;
354 if (ubspstore + 63 > urnat_addr) {
355 /* some bits need to be place in pt->ar_rnat: */
356 umask = MASK(ia64_rse_slot_num(ubspstore)) & mask;
357 pt->ar_rnat = (pt->ar_rnat & ~umask) | (urnat & umask);
363 * Note: Section 11.1 of the EAS guarantees that bit 63 of an
364 * rnat slot is ignored. so we don't have to clear it here.
366 rnat0 = (urnat << shift);
368 if (rnat0_kaddr >= kbsp)
369 sw->ar_rnat = (sw->ar_rnat & ~m) | (rnat0 & m);
370 else if (rnat0_kaddr > krbs)
371 *rnat0_kaddr = ((*rnat0_kaddr & ~m) | (rnat0 & m));
373 rnat1 = (urnat >> (63 - shift));
374 m = mask >> (63 - shift);
375 if (rnat1_kaddr >= kbsp)
376 sw->ar_rnat = (sw->ar_rnat & ~m) | (rnat1 & m);
377 else if (rnat1_kaddr > krbs)
378 *rnat1_kaddr = ((*rnat1_kaddr & ~m) | (rnat1 & m));
382 on_kernel_rbs (unsigned long addr, unsigned long bspstore,
383 unsigned long urbs_end)
385 unsigned long *rnat_addr = ia64_rse_rnat_addr((unsigned long *)
387 return (addr >= bspstore && addr <= (unsigned long) rnat_addr);
391 * Read a word from the user-level backing store of task CHILD. ADDR
392 * is the user-level address to read the word from, VAL a pointer to
393 * the return value, and USER_BSP gives the end of the user-level
394 * backing store (i.e., it's the address that would be in ar.bsp after
395 * the user executed a "cover" instruction).
397 * This routine takes care of accessing the kernel register backing
398 * store for those registers that got spilled there. It also takes
399 * care of calculating the appropriate RNaT collection words.
402 ia64_peek (struct task_struct *child, struct switch_stack *child_stack,
403 unsigned long user_rbs_end, unsigned long addr, long *val)
405 unsigned long *bspstore, *krbs, regnum, *laddr, *urbs_end, *rnat_addr;
406 struct pt_regs *child_regs;
410 urbs_end = (long *) user_rbs_end;
411 laddr = (unsigned long *) addr;
412 child_regs = task_pt_regs(child);
413 bspstore = (unsigned long *) child_regs->ar_bspstore;
414 krbs = (unsigned long *) child + IA64_RBS_OFFSET/8;
415 if (on_kernel_rbs(addr, (unsigned long) bspstore,
416 (unsigned long) urbs_end))
419 * Attempt to read the RBS in an area that's actually
420 * on the kernel RBS => read the corresponding bits in
423 rnat_addr = ia64_rse_rnat_addr(laddr);
424 ret = get_rnat(child, child_stack, krbs, rnat_addr, urbs_end);
426 if (laddr == rnat_addr) {
427 /* return NaT collection word itself */
432 if (((1UL << ia64_rse_slot_num(laddr)) & ret) != 0) {
434 * It is implementation dependent whether the
435 * data portion of a NaT value gets saved on a
436 * st8.spill or RSE spill (e.g., see EAS 2.6,
437 * 4.4.4.6 Register Spill and Fill). To get
438 * consistent behavior across all possible
439 * IA-64 implementations, we return zero in
446 if (laddr < urbs_end) {
448 * The desired word is on the kernel RBS and
451 regnum = ia64_rse_num_regs(bspstore, laddr);
452 *val = *ia64_rse_skip_regs(krbs, regnum);
456 copied = access_process_vm(child, addr, &ret, sizeof(ret), 0);
457 if (copied != sizeof(ret))
464 ia64_poke (struct task_struct *child, struct switch_stack *child_stack,
465 unsigned long user_rbs_end, unsigned long addr, long val)
467 unsigned long *bspstore, *krbs, regnum, *laddr;
468 unsigned long *urbs_end = (long *) user_rbs_end;
469 struct pt_regs *child_regs;
471 laddr = (unsigned long *) addr;
472 child_regs = task_pt_regs(child);
473 bspstore = (unsigned long *) child_regs->ar_bspstore;
474 krbs = (unsigned long *) child + IA64_RBS_OFFSET/8;
475 if (on_kernel_rbs(addr, (unsigned long) bspstore,
476 (unsigned long) urbs_end))
479 * Attempt to write the RBS in an area that's actually
480 * on the kernel RBS => write the corresponding bits
483 if (ia64_rse_is_rnat_slot(laddr))
484 put_rnat(child, child_stack, krbs, laddr, val,
487 if (laddr < urbs_end) {
488 regnum = ia64_rse_num_regs(bspstore, laddr);
489 *ia64_rse_skip_regs(krbs, regnum) = val;
492 } else if (access_process_vm(child, addr, &val, sizeof(val), 1)
499 * Calculate the address of the end of the user-level register backing
500 * store. This is the address that would have been stored in ar.bsp
501 * if the user had executed a "cover" instruction right before
502 * entering the kernel. If CFMP is not NULL, it is used to return the
503 * "current frame mask" that was active at the time the kernel was
507 ia64_get_user_rbs_end (struct task_struct *child, struct pt_regs *pt,
510 unsigned long *krbs, *bspstore, cfm = pt->cr_ifs;
513 krbs = (unsigned long *) child + IA64_RBS_OFFSET/8;
514 bspstore = (unsigned long *) pt->ar_bspstore;
515 ndirty = ia64_rse_num_regs(krbs, krbs + (pt->loadrs >> 19));
518 ndirty += (cfm & 0x7f);
520 cfm &= ~(1UL << 63); /* clear valid bit */
524 return (unsigned long) ia64_rse_skip_regs(bspstore, ndirty);
528 * Synchronize (i.e, write) the RSE backing store living in kernel
529 * space to the VM of the CHILD task. SW and PT are the pointers to
530 * the switch_stack and pt_regs structures, respectively.
531 * USER_RBS_END is the user-level address at which the backing store
535 ia64_sync_user_rbs (struct task_struct *child, struct switch_stack *sw,
536 unsigned long user_rbs_start, unsigned long user_rbs_end)
538 unsigned long addr, val;
541 /* now copy word for word from kernel rbs to user rbs: */
542 for (addr = user_rbs_start; addr < user_rbs_end; addr += 8) {
543 ret = ia64_peek(child, sw, user_rbs_end, addr, &val);
546 if (access_process_vm(child, addr, &val, sizeof(val), 1)
554 ia64_sync_kernel_rbs (struct task_struct *child, struct switch_stack *sw,
555 unsigned long user_rbs_start, unsigned long user_rbs_end)
557 unsigned long addr, val;
560 /* now copy word for word from user rbs to kernel rbs: */
561 for (addr = user_rbs_start; addr < user_rbs_end; addr += 8) {
562 if (access_process_vm(child, addr, &val, sizeof(val), 0)
566 ret = ia64_poke(child, sw, user_rbs_end, addr, val);
573 typedef long (*syncfunc_t)(struct task_struct *, struct switch_stack *,
574 unsigned long, unsigned long);
576 static void do_sync_rbs(struct unw_frame_info *info, void *arg)
579 unsigned long urbs_end;
582 if (unw_unwind_to_user(info) < 0)
584 pt = task_pt_regs(info->task);
585 urbs_end = ia64_get_user_rbs_end(info->task, pt, NULL);
587 fn(info->task, info->sw, pt->ar_bspstore, urbs_end);
591 * when a thread is stopped (ptraced), debugger might change thread's user
592 * stack (change memory directly), and we must avoid the RSE stored in kernel
593 * to override user stack (user space's RSE is newer than kernel's in the
594 * case). To workaround the issue, we copy kernel RSE to user RSE before the
595 * task is stopped, so user RSE has updated data. we then copy user RSE to
596 * kernel after the task is resummed from traced stop and kernel will use the
597 * newer RSE to return to user. TIF_RESTORE_RSE is the flag to indicate we need
598 * synchronize user RSE to kernel.
600 void ia64_ptrace_stop(void)
602 if (test_and_set_tsk_thread_flag(current, TIF_RESTORE_RSE))
604 set_notify_resume(current);
605 unw_init_running(do_sync_rbs, ia64_sync_user_rbs);
609 * This is called to read back the register backing store.
611 void ia64_sync_krbs(void)
613 clear_tsk_thread_flag(current, TIF_RESTORE_RSE);
615 unw_init_running(do_sync_rbs, ia64_sync_kernel_rbs);
619 * After PTRACE_ATTACH, a thread's register backing store area in user
620 * space is assumed to contain correct data whenever the thread is
621 * stopped. arch_ptrace_stop takes care of this on tracing stops.
622 * But if the child was already stopped for job control when we attach
623 * to it, then it might not ever get into ptrace_stop by the time we
624 * want to examine the user memory containing the RBS.
627 ptrace_attach_sync_user_rbs (struct task_struct *child)
630 struct unw_frame_info info;
633 * If the child is in TASK_STOPPED, we need to change that to
634 * TASK_TRACED momentarily while we operate on it. This ensures
635 * that the child won't be woken up and return to user mode while
636 * we are doing the sync. (It can only be woken up for SIGKILL.)
639 read_lock(&tasklist_lock);
640 if (child->sighand) {
641 spin_lock_irq(&child->sighand->siglock);
642 if (child->state == TASK_STOPPED &&
643 !test_and_set_tsk_thread_flag(child, TIF_RESTORE_RSE)) {
644 set_notify_resume(child);
646 child->state = TASK_TRACED;
649 spin_unlock_irq(&child->sighand->siglock);
651 read_unlock(&tasklist_lock);
656 unw_init_from_blocked_task(&info, child);
657 do_sync_rbs(&info, ia64_sync_user_rbs);
660 * Now move the child back into TASK_STOPPED if it should be in a
661 * job control stop, so that SIGCONT can be used to wake it up.
663 read_lock(&tasklist_lock);
664 if (child->sighand) {
665 spin_lock_irq(&child->sighand->siglock);
666 if (child->state == TASK_TRACED &&
667 (child->signal->flags & SIGNAL_STOP_STOPPED)) {
668 child->state = TASK_STOPPED;
670 spin_unlock_irq(&child->sighand->siglock);
672 read_unlock(&tasklist_lock);
676 thread_matches (struct task_struct *thread, unsigned long addr)
678 unsigned long thread_rbs_end;
679 struct pt_regs *thread_regs;
681 if (ptrace_check_attach(thread, 0) < 0)
683 * If the thread is not in an attachable state, we'll
684 * ignore it. The net effect is that if ADDR happens
685 * to overlap with the portion of the thread's
686 * register backing store that is currently residing
687 * on the thread's kernel stack, then ptrace() may end
688 * up accessing a stale value. But if the thread
689 * isn't stopped, that's a problem anyhow, so we're
690 * doing as well as we can...
694 thread_regs = task_pt_regs(thread);
695 thread_rbs_end = ia64_get_user_rbs_end(thread, thread_regs, NULL);
696 if (!on_kernel_rbs(addr, thread_regs->ar_bspstore, thread_rbs_end))
699 return 1; /* looks like we've got a winner */
703 * Write f32-f127 back to task->thread.fph if it has been modified.
706 ia64_flush_fph (struct task_struct *task)
708 struct ia64_psr *psr = ia64_psr(task_pt_regs(task));
711 * Prevent migrating this task while
712 * we're fiddling with the FPU state
715 if (ia64_is_local_fpu_owner(task) && psr->mfh) {
717 task->thread.flags |= IA64_THREAD_FPH_VALID;
718 ia64_save_fpu(&task->thread.fph[0]);
724 * Sync the fph state of the task so that it can be manipulated
725 * through thread.fph. If necessary, f32-f127 are written back to
726 * thread.fph or, if the fph state hasn't been used before, thread.fph
727 * is cleared to zeroes. Also, access to f32-f127 is disabled to
728 * ensure that the task picks up the state from thread.fph when it
732 ia64_sync_fph (struct task_struct *task)
734 struct ia64_psr *psr = ia64_psr(task_pt_regs(task));
736 ia64_flush_fph(task);
737 if (!(task->thread.flags & IA64_THREAD_FPH_VALID)) {
738 task->thread.flags |= IA64_THREAD_FPH_VALID;
739 memset(&task->thread.fph, 0, sizeof(task->thread.fph));
746 * Change the machine-state of CHILD such that it will return via the normal
747 * kernel exit-path, rather than the syscall-exit path.
750 convert_to_non_syscall (struct task_struct *child, struct pt_regs *pt,
753 struct unw_frame_info info, prev_info;
754 unsigned long ip, sp, pr;
756 unw_init_from_blocked_task(&info, child);
759 if (unw_unwind(&info) < 0)
762 unw_get_sp(&info, &sp);
763 if ((long)((unsigned long)child + IA64_STK_OFFSET - sp)
764 < IA64_PT_REGS_SIZE) {
765 dprintk("ptrace.%s: ran off the top of the kernel "
766 "stack\n", __func__);
769 if (unw_get_pr (&prev_info, &pr) < 0) {
770 unw_get_rp(&prev_info, &ip);
771 dprintk("ptrace.%s: failed to read "
772 "predicate register (ip=0x%lx)\n",
776 if (unw_is_intr_frame(&info)
777 && (pr & (1UL << PRED_USER_STACK)))
782 * Note: at the time of this call, the target task is blocked
783 * in notify_resume_user() and by clearling PRED_LEAVE_SYSCALL
784 * (aka, "pLvSys") we redirect execution from
785 * .work_pending_syscall_end to .work_processed_kernel.
787 unw_get_pr(&prev_info, &pr);
788 pr &= ~((1UL << PRED_SYSCALL) | (1UL << PRED_LEAVE_SYSCALL));
789 pr |= (1UL << PRED_NON_SYSCALL);
790 unw_set_pr(&prev_info, pr);
792 pt->cr_ifs = (1UL << 63) | cfm;
794 * Clear the memory that is NOT written on syscall-entry to
795 * ensure we do not leak kernel-state to user when execution
801 memset(&pt->r16, 0, 16*8); /* clear r16-r31 */
802 memset(&pt->f6, 0, 6*16); /* clear f6-f11 */
810 access_nat_bits (struct task_struct *child, struct pt_regs *pt,
811 struct unw_frame_info *info,
812 unsigned long *data, int write_access)
814 unsigned long regnum, nat_bits, scratch_unat, dummy = 0;
819 scratch_unat = ia64_put_scratch_nat_bits(pt, nat_bits);
820 if (unw_set_ar(info, UNW_AR_UNAT, scratch_unat) < 0) {
821 dprintk("ptrace: failed to set ar.unat\n");
824 for (regnum = 4; regnum <= 7; ++regnum) {
825 unw_get_gr(info, regnum, &dummy, &nat);
826 unw_set_gr(info, regnum, dummy,
827 (nat_bits >> regnum) & 1);
830 if (unw_get_ar(info, UNW_AR_UNAT, &scratch_unat) < 0) {
831 dprintk("ptrace: failed to read ar.unat\n");
834 nat_bits = ia64_get_scratch_nat_bits(pt, scratch_unat);
835 for (regnum = 4; regnum <= 7; ++regnum) {
836 unw_get_gr(info, regnum, &dummy, &nat);
837 nat_bits |= (nat != 0) << regnum;
845 access_uarea (struct task_struct *child, unsigned long addr,
846 unsigned long *data, int write_access);
849 ptrace_getregs (struct task_struct *child, struct pt_all_user_regs __user *ppr)
851 unsigned long psr, ec, lc, rnat, bsp, cfm, nat_bits, val;
852 struct unw_frame_info info;
853 struct ia64_fpreg fpval;
854 struct switch_stack *sw;
856 long ret, retval = 0;
860 if (!access_ok(VERIFY_WRITE, ppr, sizeof(struct pt_all_user_regs)))
863 pt = task_pt_regs(child);
864 sw = (struct switch_stack *) (child->thread.ksp + 16);
865 unw_init_from_blocked_task(&info, child);
866 if (unw_unwind_to_user(&info) < 0) {
870 if (((unsigned long) ppr & 0x7) != 0) {
871 dprintk("ptrace:unaligned register address %p\n", ppr);
875 if (access_uarea(child, PT_CR_IPSR, &psr, 0) < 0
876 || access_uarea(child, PT_AR_EC, &ec, 0) < 0
877 || access_uarea(child, PT_AR_LC, &lc, 0) < 0
878 || access_uarea(child, PT_AR_RNAT, &rnat, 0) < 0
879 || access_uarea(child, PT_AR_BSP, &bsp, 0) < 0
880 || access_uarea(child, PT_CFM, &cfm, 0)
881 || access_uarea(child, PT_NAT_BITS, &nat_bits, 0))
886 retval |= __put_user(pt->cr_iip, &ppr->cr_iip);
887 retval |= __put_user(psr, &ppr->cr_ipsr);
891 retval |= __put_user(pt->ar_pfs, &ppr->ar[PT_AUR_PFS]);
892 retval |= __put_user(pt->ar_rsc, &ppr->ar[PT_AUR_RSC]);
893 retval |= __put_user(pt->ar_bspstore, &ppr->ar[PT_AUR_BSPSTORE]);
894 retval |= __put_user(pt->ar_unat, &ppr->ar[PT_AUR_UNAT]);
895 retval |= __put_user(pt->ar_ccv, &ppr->ar[PT_AUR_CCV]);
896 retval |= __put_user(pt->ar_fpsr, &ppr->ar[PT_AUR_FPSR]);
898 retval |= __put_user(ec, &ppr->ar[PT_AUR_EC]);
899 retval |= __put_user(lc, &ppr->ar[PT_AUR_LC]);
900 retval |= __put_user(rnat, &ppr->ar[PT_AUR_RNAT]);
901 retval |= __put_user(bsp, &ppr->ar[PT_AUR_BSP]);
902 retval |= __put_user(cfm, &ppr->cfm);
906 retval |= __copy_to_user(&ppr->gr[1], &pt->r1, sizeof(long));
907 retval |= __copy_to_user(&ppr->gr[2], &pt->r2, sizeof(long) *2);
911 for (i = 4; i < 8; i++) {
912 if (unw_access_gr(&info, i, &val, &nat, 0) < 0)
914 retval |= __put_user(val, &ppr->gr[i]);
919 retval |= __copy_to_user(&ppr->gr[8], &pt->r8, sizeof(long) * 4);
923 retval |= __copy_to_user(&ppr->gr[12], &pt->r12, sizeof(long) * 2);
924 retval |= __copy_to_user(&ppr->gr[14], &pt->r14, sizeof(long));
925 retval |= __copy_to_user(&ppr->gr[15], &pt->r15, sizeof(long));
929 retval |= __copy_to_user(&ppr->gr[16], &pt->r16, sizeof(long) * 16);
933 retval |= __put_user(pt->b0, &ppr->br[0]);
937 for (i = 1; i < 6; i++) {
938 if (unw_access_br(&info, i, &val, 0) < 0)
940 __put_user(val, &ppr->br[i]);
945 retval |= __put_user(pt->b6, &ppr->br[6]);
946 retval |= __put_user(pt->b7, &ppr->br[7]);
950 for (i = 2; i < 6; i++) {
951 if (unw_get_fr(&info, i, &fpval) < 0)
953 retval |= __copy_to_user(&ppr->fr[i], &fpval, sizeof (fpval));
958 retval |= __copy_to_user(&ppr->fr[6], &pt->f6,
959 sizeof(struct ia64_fpreg) * 6);
961 /* fp scratch regs(12-15) */
963 retval |= __copy_to_user(&ppr->fr[12], &sw->f12,
964 sizeof(struct ia64_fpreg) * 4);
968 for (i = 16; i < 32; i++) {
969 if (unw_get_fr(&info, i, &fpval) < 0)
971 retval |= __copy_to_user(&ppr->fr[i], &fpval, sizeof (fpval));
976 ia64_flush_fph(child);
977 retval |= __copy_to_user(&ppr->fr[32], &child->thread.fph,
978 sizeof(ppr->fr[32]) * 96);
982 retval |= __put_user(pt->pr, &ppr->pr);
986 retval |= __put_user(nat_bits, &ppr->nat);
988 ret = retval ? -EIO : 0;
993 ptrace_setregs (struct task_struct *child, struct pt_all_user_regs __user *ppr)
995 unsigned long psr, rsc, ec, lc, rnat, bsp, cfm, nat_bits, val = 0;
996 struct unw_frame_info info;
997 struct switch_stack *sw;
998 struct ia64_fpreg fpval;
1000 long ret, retval = 0;
1003 memset(&fpval, 0, sizeof(fpval));
1005 if (!access_ok(VERIFY_READ, ppr, sizeof(struct pt_all_user_regs)))
1008 pt = task_pt_regs(child);
1009 sw = (struct switch_stack *) (child->thread.ksp + 16);
1010 unw_init_from_blocked_task(&info, child);
1011 if (unw_unwind_to_user(&info) < 0) {
1015 if (((unsigned long) ppr & 0x7) != 0) {
1016 dprintk("ptrace:unaligned register address %p\n", ppr);
1022 retval |= __get_user(pt->cr_iip, &ppr->cr_iip);
1023 retval |= __get_user(psr, &ppr->cr_ipsr);
1027 retval |= __get_user(pt->ar_pfs, &ppr->ar[PT_AUR_PFS]);
1028 retval |= __get_user(rsc, &ppr->ar[PT_AUR_RSC]);
1029 retval |= __get_user(pt->ar_bspstore, &ppr->ar[PT_AUR_BSPSTORE]);
1030 retval |= __get_user(pt->ar_unat, &ppr->ar[PT_AUR_UNAT]);
1031 retval |= __get_user(pt->ar_ccv, &ppr->ar[PT_AUR_CCV]);
1032 retval |= __get_user(pt->ar_fpsr, &ppr->ar[PT_AUR_FPSR]);
1034 retval |= __get_user(ec, &ppr->ar[PT_AUR_EC]);
1035 retval |= __get_user(lc, &ppr->ar[PT_AUR_LC]);
1036 retval |= __get_user(rnat, &ppr->ar[PT_AUR_RNAT]);
1037 retval |= __get_user(bsp, &ppr->ar[PT_AUR_BSP]);
1038 retval |= __get_user(cfm, &ppr->cfm);
1042 retval |= __copy_from_user(&pt->r1, &ppr->gr[1], sizeof(long));
1043 retval |= __copy_from_user(&pt->r2, &ppr->gr[2], sizeof(long) * 2);
1047 for (i = 4; i < 8; i++) {
1048 retval |= __get_user(val, &ppr->gr[i]);
1049 /* NaT bit will be set via PT_NAT_BITS: */
1050 if (unw_set_gr(&info, i, val, 0) < 0)
1056 retval |= __copy_from_user(&pt->r8, &ppr->gr[8], sizeof(long) * 4);
1060 retval |= __copy_from_user(&pt->r12, &ppr->gr[12], sizeof(long) * 2);
1061 retval |= __copy_from_user(&pt->r14, &ppr->gr[14], sizeof(long));
1062 retval |= __copy_from_user(&pt->r15, &ppr->gr[15], sizeof(long));
1066 retval |= __copy_from_user(&pt->r16, &ppr->gr[16], sizeof(long) * 16);
1070 retval |= __get_user(pt->b0, &ppr->br[0]);
1074 for (i = 1; i < 6; i++) {
1075 retval |= __get_user(val, &ppr->br[i]);
1076 unw_set_br(&info, i, val);
1081 retval |= __get_user(pt->b6, &ppr->br[6]);
1082 retval |= __get_user(pt->b7, &ppr->br[7]);
1086 for (i = 2; i < 6; i++) {
1087 retval |= __copy_from_user(&fpval, &ppr->fr[i], sizeof(fpval));
1088 if (unw_set_fr(&info, i, fpval) < 0)
1094 retval |= __copy_from_user(&pt->f6, &ppr->fr[6],
1095 sizeof(ppr->fr[6]) * 6);
1097 /* fp scratch regs(12-15) */
1099 retval |= __copy_from_user(&sw->f12, &ppr->fr[12],
1100 sizeof(ppr->fr[12]) * 4);
1104 for (i = 16; i < 32; i++) {
1105 retval |= __copy_from_user(&fpval, &ppr->fr[i],
1107 if (unw_set_fr(&info, i, fpval) < 0)
1113 ia64_sync_fph(child);
1114 retval |= __copy_from_user(&child->thread.fph, &ppr->fr[32],
1115 sizeof(ppr->fr[32]) * 96);
1119 retval |= __get_user(pt->pr, &ppr->pr);
1123 retval |= __get_user(nat_bits, &ppr->nat);
1125 retval |= access_uarea(child, PT_CR_IPSR, &psr, 1);
1126 retval |= access_uarea(child, PT_AR_RSC, &rsc, 1);
1127 retval |= access_uarea(child, PT_AR_EC, &ec, 1);
1128 retval |= access_uarea(child, PT_AR_LC, &lc, 1);
1129 retval |= access_uarea(child, PT_AR_RNAT, &rnat, 1);
1130 retval |= access_uarea(child, PT_AR_BSP, &bsp, 1);
1131 retval |= access_uarea(child, PT_CFM, &cfm, 1);
1132 retval |= access_uarea(child, PT_NAT_BITS, &nat_bits, 1);
1134 ret = retval ? -EIO : 0;
1139 user_enable_single_step (struct task_struct *child)
1141 struct ia64_psr *child_psr = ia64_psr(task_pt_regs(child));
1143 set_tsk_thread_flag(child, TIF_SINGLESTEP);
1148 user_enable_block_step (struct task_struct *child)
1150 struct ia64_psr *child_psr = ia64_psr(task_pt_regs(child));
1152 set_tsk_thread_flag(child, TIF_SINGLESTEP);
1157 user_disable_single_step (struct task_struct *child)
1159 struct ia64_psr *child_psr = ia64_psr(task_pt_regs(child));
1161 /* make sure the single step/taken-branch trap bits are not set: */
1162 clear_tsk_thread_flag(child, TIF_SINGLESTEP);
1168 * Called by kernel/ptrace.c when detaching..
1170 * Make sure the single step bit is not set.
1173 ptrace_disable (struct task_struct *child)
1175 user_disable_single_step(child);
1179 arch_ptrace (struct task_struct *child, long request,
1180 unsigned long addr, unsigned long data)
1183 case PTRACE_PEEKTEXT:
1184 case PTRACE_PEEKDATA:
1185 /* read word at location addr */
1186 if (access_process_vm(child, addr, &data, sizeof(data), 0)
1189 /* ensure return value is not mistaken for error code */
1190 force_successful_syscall_return();
1193 /* PTRACE_POKETEXT and PTRACE_POKEDATA is handled
1194 * by the generic ptrace_request().
1197 case PTRACE_PEEKUSR:
1198 /* read the word at addr in the USER area */
1199 if (access_uarea(child, addr, &data, 0) < 0)
1201 /* ensure return value is not mistaken for error code */
1202 force_successful_syscall_return();
1205 case PTRACE_POKEUSR:
1206 /* write the word at addr in the USER area */
1207 if (access_uarea(child, addr, &data, 1) < 0)
1211 case PTRACE_OLD_GETSIGINFO:
1212 /* for backwards-compatibility */
1213 return ptrace_request(child, PTRACE_GETSIGINFO, addr, data);
1215 case PTRACE_OLD_SETSIGINFO:
1216 /* for backwards-compatibility */
1217 return ptrace_request(child, PTRACE_SETSIGINFO, addr, data);
1219 case PTRACE_GETREGS:
1220 return ptrace_getregs(child,
1221 (struct pt_all_user_regs __user *) data);
1223 case PTRACE_SETREGS:
1224 return ptrace_setregs(child,
1225 (struct pt_all_user_regs __user *) data);
1228 return ptrace_request(child, request, addr, data);
1233 /* "asmlinkage" so the input arguments are preserved... */
1236 syscall_trace_enter (long arg0, long arg1, long arg2, long arg3,
1237 long arg4, long arg5, long arg6, long arg7,
1238 struct pt_regs regs)
1240 if (test_thread_flag(TIF_SYSCALL_TRACE))
1241 if (tracehook_report_syscall_entry(®s))
1244 /* copy user rbs to kernel rbs */
1245 if (test_thread_flag(TIF_RESTORE_RSE))
1249 audit_syscall_entry(AUDIT_ARCH_IA64, regs.r15, arg0, arg1, arg2, arg3);
1254 /* "asmlinkage" so the input arguments are preserved... */
1257 syscall_trace_leave (long arg0, long arg1, long arg2, long arg3,
1258 long arg4, long arg5, long arg6, long arg7,
1259 struct pt_regs regs)
1263 audit_syscall_exit(®s);
1265 step = test_thread_flag(TIF_SINGLESTEP);
1266 if (step || test_thread_flag(TIF_SYSCALL_TRACE))
1267 tracehook_report_syscall_exit(®s, step);
1269 /* copy user rbs to kernel rbs */
1270 if (test_thread_flag(TIF_RESTORE_RSE))
1274 /* Utrace implementation starts here */
1282 const void __user *ubuf;
1285 struct regset_getset {
1286 struct task_struct *target;
1287 const struct user_regset *regset;
1289 struct regset_get get;
1290 struct regset_set set;
1298 access_elf_gpreg(struct task_struct *target, struct unw_frame_info *info,
1299 unsigned long addr, unsigned long *data, int write_access)
1302 unsigned long *ptr = NULL;
1306 pt = task_pt_regs(target);
1308 case ELF_GR_OFFSET(1):
1311 case ELF_GR_OFFSET(2):
1312 case ELF_GR_OFFSET(3):
1313 ptr = (void *)&pt->r2 + (addr - ELF_GR_OFFSET(2));
1315 case ELF_GR_OFFSET(4) ... ELF_GR_OFFSET(7):
1317 /* read NaT bit first: */
1318 unsigned long dummy;
1320 ret = unw_get_gr(info, addr/8, &dummy, &nat);
1324 return unw_access_gr(info, addr/8, data, &nat, write_access);
1325 case ELF_GR_OFFSET(8) ... ELF_GR_OFFSET(11):
1326 ptr = (void *)&pt->r8 + addr - ELF_GR_OFFSET(8);
1328 case ELF_GR_OFFSET(12):
1329 case ELF_GR_OFFSET(13):
1330 ptr = (void *)&pt->r12 + addr - ELF_GR_OFFSET(12);
1332 case ELF_GR_OFFSET(14):
1335 case ELF_GR_OFFSET(15):
1346 access_elf_breg(struct task_struct *target, struct unw_frame_info *info,
1347 unsigned long addr, unsigned long *data, int write_access)
1350 unsigned long *ptr = NULL;
1352 pt = task_pt_regs(target);
1354 case ELF_BR_OFFSET(0):
1357 case ELF_BR_OFFSET(1) ... ELF_BR_OFFSET(5):
1358 return unw_access_br(info, (addr - ELF_BR_OFFSET(0))/8,
1359 data, write_access);
1360 case ELF_BR_OFFSET(6):
1363 case ELF_BR_OFFSET(7):
1374 access_elf_areg(struct task_struct *target, struct unw_frame_info *info,
1375 unsigned long addr, unsigned long *data, int write_access)
1378 unsigned long cfm, urbs_end;
1379 unsigned long *ptr = NULL;
1381 pt = task_pt_regs(target);
1382 if (addr >= ELF_AR_RSC_OFFSET && addr <= ELF_AR_SSD_OFFSET) {
1384 case ELF_AR_RSC_OFFSET:
1387 pt->ar_rsc = *data | (3 << 2);
1391 case ELF_AR_BSP_OFFSET:
1393 * By convention, we use PT_AR_BSP to refer to
1394 * the end of the user-level backing store.
1395 * Use ia64_rse_skip_regs(PT_AR_BSP, -CFM.sof)
1396 * to get the real value of ar.bsp at the time
1397 * the kernel was entered.
1399 * Furthermore, when changing the contents of
1400 * PT_AR_BSP (or PT_CFM) while the task is
1401 * blocked in a system call, convert the state
1402 * so that the non-system-call exit
1403 * path is used. This ensures that the proper
1404 * state will be picked up when resuming
1405 * execution. However, it *also* means that
1406 * once we write PT_AR_BSP/PT_CFM, it won't be
1407 * possible to modify the syscall arguments of
1408 * the pending system call any longer. This
1409 * shouldn't be an issue because modifying
1410 * PT_AR_BSP/PT_CFM generally implies that
1411 * we're either abandoning the pending system
1412 * call or that we defer it's re-execution
1413 * (e.g., due to GDB doing an inferior
1416 urbs_end = ia64_get_user_rbs_end(target, pt, &cfm);
1418 if (*data != urbs_end) {
1420 convert_to_non_syscall(target,
1424 * Simulate user-level write
1428 pt->ar_bspstore = *data;
1433 case ELF_AR_BSPSTORE_OFFSET:
1434 ptr = &pt->ar_bspstore;
1436 case ELF_AR_RNAT_OFFSET:
1439 case ELF_AR_CCV_OFFSET:
1442 case ELF_AR_UNAT_OFFSET:
1445 case ELF_AR_FPSR_OFFSET:
1448 case ELF_AR_PFS_OFFSET:
1451 case ELF_AR_LC_OFFSET:
1452 return unw_access_ar(info, UNW_AR_LC, data,
1454 case ELF_AR_EC_OFFSET:
1455 return unw_access_ar(info, UNW_AR_EC, data,
1457 case ELF_AR_CSD_OFFSET:
1460 case ELF_AR_SSD_OFFSET:
1463 } else if (addr >= ELF_CR_IIP_OFFSET && addr <= ELF_CR_IPSR_OFFSET) {
1465 case ELF_CR_IIP_OFFSET:
1468 case ELF_CFM_OFFSET:
1469 urbs_end = ia64_get_user_rbs_end(target, pt, &cfm);
1471 if (((cfm ^ *data) & PFM_MASK) != 0) {
1473 convert_to_non_syscall(target,
1476 pt->cr_ifs = ((pt->cr_ifs & ~PFM_MASK)
1477 | (*data & PFM_MASK));
1482 case ELF_CR_IPSR_OFFSET:
1484 unsigned long tmp = *data;
1485 /* psr.ri==3 is a reserved value: SDM 2:25 */
1486 if ((tmp & IA64_PSR_RI) == IA64_PSR_RI)
1487 tmp &= ~IA64_PSR_RI;
1488 pt->cr_ipsr = ((tmp & IPSR_MASK)
1489 | (pt->cr_ipsr & ~IPSR_MASK));
1491 *data = (pt->cr_ipsr & IPSR_MASK);
1494 } else if (addr == ELF_NAT_OFFSET)
1495 return access_nat_bits(target, pt, info,
1496 data, write_access);
1497 else if (addr == ELF_PR_OFFSET)
1511 access_elf_reg(struct task_struct *target, struct unw_frame_info *info,
1512 unsigned long addr, unsigned long *data, int write_access)
1514 if (addr >= ELF_GR_OFFSET(1) && addr <= ELF_GR_OFFSET(15))
1515 return access_elf_gpreg(target, info, addr, data, write_access);
1516 else if (addr >= ELF_BR_OFFSET(0) && addr <= ELF_BR_OFFSET(7))
1517 return access_elf_breg(target, info, addr, data, write_access);
1519 return access_elf_areg(target, info, addr, data, write_access);
1522 void do_gpregs_get(struct unw_frame_info *info, void *arg)
1525 struct regset_getset *dst = arg;
1527 unsigned int i, index, min_copy;
1529 if (unw_unwind_to_user(info) < 0)
1535 * NaT bits (for r0-r31; bit N == 1 iff rN is a NaT)
1536 * predicate registers (p0-p63)
1539 * ar.rsc ar.bsp ar.bspstore ar.rnat
1540 * ar.ccv ar.unat ar.fpsr ar.pfs ar.lc ar.ec
1545 if (dst->count > 0 && dst->pos < ELF_GR_OFFSET(1)) {
1546 dst->ret = user_regset_copyout_zero(&dst->pos, &dst->count,
1549 0, ELF_GR_OFFSET(1));
1550 if (dst->ret || dst->count == 0)
1555 if (dst->count > 0 && dst->pos < ELF_GR_OFFSET(16)) {
1556 index = (dst->pos - ELF_GR_OFFSET(1)) / sizeof(elf_greg_t);
1557 min_copy = ELF_GR_OFFSET(16) > (dst->pos + dst->count) ?
1558 (dst->pos + dst->count) : ELF_GR_OFFSET(16);
1559 for (i = dst->pos; i < min_copy; i += sizeof(elf_greg_t),
1561 if (access_elf_reg(dst->target, info, i,
1562 &tmp[index], 0) < 0) {
1566 dst->ret = user_regset_copyout(&dst->pos, &dst->count,
1567 &dst->u.get.kbuf, &dst->u.get.ubuf, tmp,
1568 ELF_GR_OFFSET(1), ELF_GR_OFFSET(16));
1569 if (dst->ret || dst->count == 0)
1574 if (dst->count > 0 && dst->pos < ELF_NAT_OFFSET) {
1575 pt = task_pt_regs(dst->target);
1576 dst->ret = user_regset_copyout(&dst->pos, &dst->count,
1577 &dst->u.get.kbuf, &dst->u.get.ubuf, &pt->r16,
1578 ELF_GR_OFFSET(16), ELF_NAT_OFFSET);
1579 if (dst->ret || dst->count == 0)
1583 /* nat, pr, b0 - b7 */
1584 if (dst->count > 0 && dst->pos < ELF_CR_IIP_OFFSET) {
1585 index = (dst->pos - ELF_NAT_OFFSET) / sizeof(elf_greg_t);
1586 min_copy = ELF_CR_IIP_OFFSET > (dst->pos + dst->count) ?
1587 (dst->pos + dst->count) : ELF_CR_IIP_OFFSET;
1588 for (i = dst->pos; i < min_copy; i += sizeof(elf_greg_t),
1590 if (access_elf_reg(dst->target, info, i,
1591 &tmp[index], 0) < 0) {
1595 dst->ret = user_regset_copyout(&dst->pos, &dst->count,
1596 &dst->u.get.kbuf, &dst->u.get.ubuf, tmp,
1597 ELF_NAT_OFFSET, ELF_CR_IIP_OFFSET);
1598 if (dst->ret || dst->count == 0)
1602 /* ip cfm psr ar.rsc ar.bsp ar.bspstore ar.rnat
1603 * ar.ccv ar.unat ar.fpsr ar.pfs ar.lc ar.ec ar.csd ar.ssd
1605 if (dst->count > 0 && dst->pos < (ELF_AR_END_OFFSET)) {
1606 index = (dst->pos - ELF_CR_IIP_OFFSET) / sizeof(elf_greg_t);
1607 min_copy = ELF_AR_END_OFFSET > (dst->pos + dst->count) ?
1608 (dst->pos + dst->count) : ELF_AR_END_OFFSET;
1609 for (i = dst->pos; i < min_copy; i += sizeof(elf_greg_t),
1611 if (access_elf_reg(dst->target, info, i,
1612 &tmp[index], 0) < 0) {
1616 dst->ret = user_regset_copyout(&dst->pos, &dst->count,
1617 &dst->u.get.kbuf, &dst->u.get.ubuf, tmp,
1618 ELF_CR_IIP_OFFSET, ELF_AR_END_OFFSET);
1622 void do_gpregs_set(struct unw_frame_info *info, void *arg)
1625 struct regset_getset *dst = arg;
1627 unsigned int i, index;
1629 if (unw_unwind_to_user(info) < 0)
1633 if (dst->count > 0 && dst->pos < ELF_GR_OFFSET(1)) {
1634 dst->ret = user_regset_copyin_ignore(&dst->pos, &dst->count,
1637 0, ELF_GR_OFFSET(1));
1638 if (dst->ret || dst->count == 0)
1643 if (dst->count > 0 && dst->pos < ELF_GR_OFFSET(16)) {
1645 index = (dst->pos - ELF_GR_OFFSET(1)) / sizeof(elf_greg_t);
1646 dst->ret = user_regset_copyin(&dst->pos, &dst->count,
1647 &dst->u.set.kbuf, &dst->u.set.ubuf, tmp,
1648 ELF_GR_OFFSET(1), ELF_GR_OFFSET(16));
1651 for ( ; i < dst->pos; i += sizeof(elf_greg_t), index++)
1652 if (access_elf_reg(dst->target, info, i,
1653 &tmp[index], 1) < 0) {
1657 if (dst->count == 0)
1662 if (dst->count > 0 && dst->pos < ELF_NAT_OFFSET) {
1663 pt = task_pt_regs(dst->target);
1664 dst->ret = user_regset_copyin(&dst->pos, &dst->count,
1665 &dst->u.set.kbuf, &dst->u.set.ubuf, &pt->r16,
1666 ELF_GR_OFFSET(16), ELF_NAT_OFFSET);
1667 if (dst->ret || dst->count == 0)
1671 /* nat, pr, b0 - b7 */
1672 if (dst->count > 0 && dst->pos < ELF_CR_IIP_OFFSET) {
1674 index = (dst->pos - ELF_NAT_OFFSET) / sizeof(elf_greg_t);
1675 dst->ret = user_regset_copyin(&dst->pos, &dst->count,
1676 &dst->u.set.kbuf, &dst->u.set.ubuf, tmp,
1677 ELF_NAT_OFFSET, ELF_CR_IIP_OFFSET);
1680 for (; i < dst->pos; i += sizeof(elf_greg_t), index++)
1681 if (access_elf_reg(dst->target, info, i,
1682 &tmp[index], 1) < 0) {
1686 if (dst->count == 0)
1690 /* ip cfm psr ar.rsc ar.bsp ar.bspstore ar.rnat
1691 * ar.ccv ar.unat ar.fpsr ar.pfs ar.lc ar.ec ar.csd ar.ssd
1693 if (dst->count > 0 && dst->pos < (ELF_AR_END_OFFSET)) {
1695 index = (dst->pos - ELF_CR_IIP_OFFSET) / sizeof(elf_greg_t);
1696 dst->ret = user_regset_copyin(&dst->pos, &dst->count,
1697 &dst->u.set.kbuf, &dst->u.set.ubuf, tmp,
1698 ELF_CR_IIP_OFFSET, ELF_AR_END_OFFSET);
1701 for ( ; i < dst->pos; i += sizeof(elf_greg_t), index++)
1702 if (access_elf_reg(dst->target, info, i,
1703 &tmp[index], 1) < 0) {
1710 #define ELF_FP_OFFSET(i) (i * sizeof(elf_fpreg_t))
1712 void do_fpregs_get(struct unw_frame_info *info, void *arg)
1714 struct regset_getset *dst = arg;
1715 struct task_struct *task = dst->target;
1716 elf_fpreg_t tmp[30];
1717 int index, min_copy, i;
1719 if (unw_unwind_to_user(info) < 0)
1722 /* Skip pos 0 and 1 */
1723 if (dst->count > 0 && dst->pos < ELF_FP_OFFSET(2)) {
1724 dst->ret = user_regset_copyout_zero(&dst->pos, &dst->count,
1727 0, ELF_FP_OFFSET(2));
1728 if (dst->count == 0 || dst->ret)
1733 if (dst->count > 0 && dst->pos < ELF_FP_OFFSET(32)) {
1734 index = (dst->pos - ELF_FP_OFFSET(2)) / sizeof(elf_fpreg_t);
1736 min_copy = min(((unsigned int)ELF_FP_OFFSET(32)),
1737 dst->pos + dst->count);
1738 for (i = dst->pos; i < min_copy; i += sizeof(elf_fpreg_t),
1740 if (unw_get_fr(info, i / sizeof(elf_fpreg_t),
1745 dst->ret = user_regset_copyout(&dst->pos, &dst->count,
1746 &dst->u.get.kbuf, &dst->u.get.ubuf, tmp,
1747 ELF_FP_OFFSET(2), ELF_FP_OFFSET(32));
1748 if (dst->count == 0 || dst->ret)
1753 if (dst->count > 0) {
1754 ia64_flush_fph(dst->target);
1755 if (task->thread.flags & IA64_THREAD_FPH_VALID)
1756 dst->ret = user_regset_copyout(
1757 &dst->pos, &dst->count,
1758 &dst->u.get.kbuf, &dst->u.get.ubuf,
1759 &dst->target->thread.fph,
1760 ELF_FP_OFFSET(32), -1);
1762 /* Zero fill instead. */
1763 dst->ret = user_regset_copyout_zero(
1764 &dst->pos, &dst->count,
1765 &dst->u.get.kbuf, &dst->u.get.ubuf,
1766 ELF_FP_OFFSET(32), -1);
1770 void do_fpregs_set(struct unw_frame_info *info, void *arg)
1772 struct regset_getset *dst = arg;
1773 elf_fpreg_t fpreg, tmp[30];
1774 int index, start, end;
1776 if (unw_unwind_to_user(info) < 0)
1779 /* Skip pos 0 and 1 */
1780 if (dst->count > 0 && dst->pos < ELF_FP_OFFSET(2)) {
1781 dst->ret = user_regset_copyin_ignore(&dst->pos, &dst->count,
1784 0, ELF_FP_OFFSET(2));
1785 if (dst->count == 0 || dst->ret)
1790 if (dst->count > 0 && dst->pos < ELF_FP_OFFSET(32)) {
1792 end = min(((unsigned int)ELF_FP_OFFSET(32)),
1793 dst->pos + dst->count);
1794 dst->ret = user_regset_copyin(&dst->pos, &dst->count,
1795 &dst->u.set.kbuf, &dst->u.set.ubuf, tmp,
1796 ELF_FP_OFFSET(2), ELF_FP_OFFSET(32));
1800 if (start & 0xF) { /* only write high part */
1801 if (unw_get_fr(info, start / sizeof(elf_fpreg_t),
1806 tmp[start / sizeof(elf_fpreg_t) - 2].u.bits[0]
1810 if (end & 0xF) { /* only write low part */
1811 if (unw_get_fr(info, end / sizeof(elf_fpreg_t),
1816 tmp[end / sizeof(elf_fpreg_t) - 2].u.bits[1]
1818 end = (end + 0xF) & ~0xFUL;
1821 for ( ; start < end ; start += sizeof(elf_fpreg_t)) {
1822 index = start / sizeof(elf_fpreg_t);
1823 if (unw_set_fr(info, index, tmp[index - 2])) {
1828 if (dst->ret || dst->count == 0)
1833 if (dst->count > 0 && dst->pos < ELF_FP_OFFSET(128)) {
1834 ia64_sync_fph(dst->target);
1835 dst->ret = user_regset_copyin(&dst->pos, &dst->count,
1838 &dst->target->thread.fph,
1839 ELF_FP_OFFSET(32), -1);
1844 do_regset_call(void (*call)(struct unw_frame_info *, void *),
1845 struct task_struct *target,
1846 const struct user_regset *regset,
1847 unsigned int pos, unsigned int count,
1848 const void *kbuf, const void __user *ubuf)
1850 struct regset_getset info = { .target = target, .regset = regset,
1851 .pos = pos, .count = count,
1852 .u.set = { .kbuf = kbuf, .ubuf = ubuf },
1855 if (target == current)
1856 unw_init_running(call, &info);
1858 struct unw_frame_info ufi;
1859 memset(&ufi, 0, sizeof(ufi));
1860 unw_init_from_blocked_task(&ufi, target);
1861 (*call)(&ufi, &info);
1868 gpregs_get(struct task_struct *target,
1869 const struct user_regset *regset,
1870 unsigned int pos, unsigned int count,
1871 void *kbuf, void __user *ubuf)
1873 return do_regset_call(do_gpregs_get, target, regset, pos, count,
1877 static int gpregs_set(struct task_struct *target,
1878 const struct user_regset *regset,
1879 unsigned int pos, unsigned int count,
1880 const void *kbuf, const void __user *ubuf)
1882 return do_regset_call(do_gpregs_set, target, regset, pos, count,
1886 static void do_gpregs_writeback(struct unw_frame_info *info, void *arg)
1888 do_sync_rbs(info, ia64_sync_user_rbs);
1892 * This is called to write back the register backing store.
1893 * ptrace does this before it stops, so that a tracer reading the user
1894 * memory after the thread stops will get the current register data.
1897 gpregs_writeback(struct task_struct *target,
1898 const struct user_regset *regset,
1901 if (test_and_set_tsk_thread_flag(target, TIF_RESTORE_RSE))
1903 set_notify_resume(target);
1904 return do_regset_call(do_gpregs_writeback, target, regset, 0, 0,
1909 fpregs_active(struct task_struct *target, const struct user_regset *regset)
1911 return (target->thread.flags & IA64_THREAD_FPH_VALID) ? 128 : 32;
1914 static int fpregs_get(struct task_struct *target,
1915 const struct user_regset *regset,
1916 unsigned int pos, unsigned int count,
1917 void *kbuf, void __user *ubuf)
1919 return do_regset_call(do_fpregs_get, target, regset, pos, count,
1923 static int fpregs_set(struct task_struct *target,
1924 const struct user_regset *regset,
1925 unsigned int pos, unsigned int count,
1926 const void *kbuf, const void __user *ubuf)
1928 return do_regset_call(do_fpregs_set, target, regset, pos, count,
1933 access_uarea(struct task_struct *child, unsigned long addr,
1934 unsigned long *data, int write_access)
1936 unsigned int pos = -1; /* an invalid value */
1938 unsigned long *ptr, regnum;
1940 if ((addr & 0x7) != 0) {
1941 dprintk("ptrace: unaligned register address 0x%lx\n", addr);
1944 if ((addr >= PT_NAT_BITS + 8 && addr < PT_F2) ||
1945 (addr >= PT_R7 + 8 && addr < PT_B1) ||
1946 (addr >= PT_AR_LC + 8 && addr < PT_CR_IPSR) ||
1947 (addr >= PT_AR_SSD + 8 && addr < PT_DBR)) {
1948 dprintk("ptrace: rejecting access to register "
1949 "address 0x%lx\n", addr);
1954 case PT_F32 ... (PT_F127 + 15):
1955 pos = addr - PT_F32 + ELF_FP_OFFSET(32);
1957 case PT_F2 ... (PT_F5 + 15):
1958 pos = addr - PT_F2 + ELF_FP_OFFSET(2);
1960 case PT_F10 ... (PT_F31 + 15):
1961 pos = addr - PT_F10 + ELF_FP_OFFSET(10);
1963 case PT_F6 ... (PT_F9 + 15):
1964 pos = addr - PT_F6 + ELF_FP_OFFSET(6);
1970 ret = fpregs_set(child, NULL, pos,
1971 sizeof(unsigned long), data, NULL);
1973 ret = fpregs_get(child, NULL, pos,
1974 sizeof(unsigned long), data, NULL);
1982 pos = ELF_NAT_OFFSET;
1984 case PT_R4 ... PT_R7:
1985 pos = addr - PT_R4 + ELF_GR_OFFSET(4);
1987 case PT_B1 ... PT_B5:
1988 pos = addr - PT_B1 + ELF_BR_OFFSET(1);
1991 pos = ELF_AR_EC_OFFSET;
1994 pos = ELF_AR_LC_OFFSET;
1997 pos = ELF_CR_IPSR_OFFSET;
2000 pos = ELF_CR_IIP_OFFSET;
2003 pos = ELF_CFM_OFFSET;
2006 pos = ELF_AR_UNAT_OFFSET;
2009 pos = ELF_AR_PFS_OFFSET;
2012 pos = ELF_AR_RSC_OFFSET;
2015 pos = ELF_AR_RNAT_OFFSET;
2017 case PT_AR_BSPSTORE:
2018 pos = ELF_AR_BSPSTORE_OFFSET;
2021 pos = ELF_PR_OFFSET;
2024 pos = ELF_BR_OFFSET(6);
2027 pos = ELF_AR_BSP_OFFSET;
2029 case PT_R1 ... PT_R3:
2030 pos = addr - PT_R1 + ELF_GR_OFFSET(1);
2032 case PT_R12 ... PT_R15:
2033 pos = addr - PT_R12 + ELF_GR_OFFSET(12);
2035 case PT_R8 ... PT_R11:
2036 pos = addr - PT_R8 + ELF_GR_OFFSET(8);
2038 case PT_R16 ... PT_R31:
2039 pos = addr - PT_R16 + ELF_GR_OFFSET(16);
2042 pos = ELF_AR_CCV_OFFSET;
2045 pos = ELF_AR_FPSR_OFFSET;
2048 pos = ELF_BR_OFFSET(0);
2051 pos = ELF_BR_OFFSET(7);
2054 pos = ELF_AR_CSD_OFFSET;
2057 pos = ELF_AR_SSD_OFFSET;
2063 ret = gpregs_set(child, NULL, pos,
2064 sizeof(unsigned long), data, NULL);
2066 ret = gpregs_get(child, NULL, pos,
2067 sizeof(unsigned long), data, NULL);
2073 /* access debug registers */
2074 if (addr >= PT_IBR) {
2075 regnum = (addr - PT_IBR) >> 3;
2076 ptr = &child->thread.ibr[0];
2078 regnum = (addr - PT_DBR) >> 3;
2079 ptr = &child->thread.dbr[0];
2083 dprintk("ptrace: rejecting access to register "
2084 "address 0x%lx\n", addr);
2087 #ifdef CONFIG_PERFMON
2089 * Check if debug registers are used by perfmon. This
2090 * test must be done once we know that we can do the
2091 * operation, i.e. the arguments are all valid, but
2092 * before we start modifying the state.
2094 * Perfmon needs to keep a count of how many processes
2095 * are trying to modify the debug registers for system
2096 * wide monitoring sessions.
2098 * We also include read access here, because they may
2099 * cause the PMU-installed debug register state
2100 * (dbr[], ibr[]) to be reset. The two arrays are also
2101 * used by perfmon, but we do not use
2102 * IA64_THREAD_DBG_VALID. The registers are restored
2103 * by the PMU context switch code.
2105 if (pfm_use_debug_registers(child))
2109 if (!(child->thread.flags & IA64_THREAD_DBG_VALID)) {
2110 child->thread.flags |= IA64_THREAD_DBG_VALID;
2111 memset(child->thread.dbr, 0,
2112 sizeof(child->thread.dbr));
2113 memset(child->thread.ibr, 0,
2114 sizeof(child->thread.ibr));
2119 if ((regnum & 1) && write_access) {
2120 /* don't let the user set kernel-level breakpoints: */
2121 *ptr = *data & ~(7UL << 56);
2131 static const struct user_regset native_regsets[] = {
2133 .core_note_type = NT_PRSTATUS,
2135 .size = sizeof(elf_greg_t), .align = sizeof(elf_greg_t),
2136 .get = gpregs_get, .set = gpregs_set,
2137 .writeback = gpregs_writeback
2140 .core_note_type = NT_PRFPREG,
2142 .size = sizeof(elf_fpreg_t), .align = sizeof(elf_fpreg_t),
2143 .get = fpregs_get, .set = fpregs_set, .active = fpregs_active
2147 static const struct user_regset_view user_ia64_view = {
2149 .e_machine = EM_IA_64,
2150 .regsets = native_regsets, .n = ARRAY_SIZE(native_regsets)
2153 const struct user_regset_view *task_user_regset_view(struct task_struct *tsk)
2155 return &user_ia64_view;
2158 struct syscall_get_set_args {
2161 unsigned long *args;
2162 struct pt_regs *regs;
2166 static void syscall_get_set_args_cb(struct unw_frame_info *info, void *data)
2168 struct syscall_get_set_args *args = data;
2169 struct pt_regs *pt = args->regs;
2170 unsigned long *krbs, cfm, ndirty;
2173 if (unw_unwind_to_user(info) < 0)
2177 krbs = (unsigned long *)info->task + IA64_RBS_OFFSET/8;
2178 ndirty = ia64_rse_num_regs(krbs, krbs + (pt->loadrs >> 19));
2182 count = min_t(int, args->n, cfm & 0x7f);
2184 for (i = 0; i < count; i++) {
2186 *ia64_rse_skip_regs(krbs, ndirty + i + args->i) =
2189 args->args[i] = *ia64_rse_skip_regs(krbs,
2190 ndirty + i + args->i);
2194 while (i < args->n) {
2201 void ia64_syscall_get_set_arguments(struct task_struct *task,
2202 struct pt_regs *regs, unsigned int i, unsigned int n,
2203 unsigned long *args, int rw)
2205 struct syscall_get_set_args data = {
2213 if (task == current)
2214 unw_init_running(syscall_get_set_args_cb, &data);
2216 struct unw_frame_info ufi;
2217 memset(&ufi, 0, sizeof(ufi));
2218 unw_init_from_blocked_task(&ufi, task);
2219 syscall_get_set_args_cb(&ufi, &data);