2 * Signal handling for 32bit PPC and 32bit tasks on 64bit PPC
5 * Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
6 * Copyright (C) 2001 IBM
7 * Copyright (C) 1997,1998 Jakub Jelinek (jj@sunsite.mff.cuni.cz)
8 * Copyright (C) 1997 David S. Miller (davem@caip.rutgers.edu)
10 * Derived from "arch/i386/kernel/signal.c"
11 * Copyright (C) 1991, 1992 Linus Torvalds
12 * 1997-11-28 Modified for POSIX.1b signals by Richard Henderson
14 * This program is free software; you can redistribute it and/or
15 * modify it under the terms of the GNU General Public License
16 * as published by the Free Software Foundation; either version
17 * 2 of the License, or (at your option) any later version.
20 #include <linux/sched.h>
22 #include <linux/smp.h>
23 #include <linux/kernel.h>
24 #include <linux/signal.h>
25 #include <linux/errno.h>
26 #include <linux/elf.h>
27 #include <linux/ptrace.h>
28 #include <linux/ratelimit.h>
30 #include <linux/syscalls.h>
31 #include <linux/compat.h>
33 #include <linux/wait.h>
34 #include <linux/unistd.h>
35 #include <linux/stddef.h>
36 #include <linux/tty.h>
37 #include <linux/binfmts.h>
40 #include <asm/uaccess.h>
41 #include <asm/cacheflush.h>
42 #include <asm/syscalls.h>
43 #include <asm/sigcontext.h>
45 #include <asm/switch_to.h>
49 #include <asm/unistd.h>
51 #include <asm/ucontext.h>
52 #include <asm/pgtable.h>
60 #define sys_sigsuspend compat_sys_sigsuspend
61 #define sys_rt_sigsuspend compat_sys_rt_sigsuspend
62 #define sys_rt_sigreturn compat_sys_rt_sigreturn
63 #define sys_sigaction compat_sys_sigaction
64 #define sys_swapcontext compat_sys_swapcontext
65 #define sys_sigreturn compat_sys_sigreturn
67 #define old_sigaction old_sigaction32
68 #define sigcontext sigcontext32
69 #define mcontext mcontext32
70 #define ucontext ucontext32
73 * Userspace code may pass a ucontext which doesn't include VSX added
74 * at the end. We need to check for this case.
76 #define UCONTEXTSIZEWITHOUTVSX \
77 (sizeof(struct ucontext) - sizeof(elf_vsrreghalf_t32))
80 * Returning 0 means we return to userspace via
81 * ret_from_except and thus restore all user
82 * registers from *regs. This is what we need
83 * to do when a signal has been delivered.
86 #define GP_REGS_SIZE min(sizeof(elf_gregset_t32), sizeof(struct pt_regs32))
87 #undef __SIGNAL_FRAMESIZE
88 #define __SIGNAL_FRAMESIZE __SIGNAL_FRAMESIZE32
90 #define ELF_NVRREG ELF_NVRREG32
93 * Functions for flipping sigsets (thanks to brain dead generic
94 * implementation that makes things simple for little endian only)
96 static inline int put_sigset_t(compat_sigset_t __user *uset, sigset_t *set)
100 switch (_NSIG_WORDS) {
101 case 4: cset.sig[6] = set->sig[3] & 0xffffffffull;
102 cset.sig[7] = set->sig[3] >> 32;
103 case 3: cset.sig[4] = set->sig[2] & 0xffffffffull;
104 cset.sig[5] = set->sig[2] >> 32;
105 case 2: cset.sig[2] = set->sig[1] & 0xffffffffull;
106 cset.sig[3] = set->sig[1] >> 32;
107 case 1: cset.sig[0] = set->sig[0] & 0xffffffffull;
108 cset.sig[1] = set->sig[0] >> 32;
110 return copy_to_user(uset, &cset, sizeof(*uset));
113 static inline int get_sigset_t(sigset_t *set,
114 const compat_sigset_t __user *uset)
118 if (copy_from_user(&s32, uset, sizeof(*uset)))
122 * Swap the 2 words of the 64-bit sigset_t (they are stored
123 * in the "wrong" endian in 32-bit user storage).
125 switch (_NSIG_WORDS) {
126 case 4: set->sig[3] = s32.sig[6] | (((long)s32.sig[7]) << 32);
127 case 3: set->sig[2] = s32.sig[4] | (((long)s32.sig[5]) << 32);
128 case 2: set->sig[1] = s32.sig[2] | (((long)s32.sig[3]) << 32);
129 case 1: set->sig[0] = s32.sig[0] | (((long)s32.sig[1]) << 32);
134 static inline int get_old_sigaction(struct k_sigaction *new_ka,
135 struct old_sigaction __user *act)
137 compat_old_sigset_t mask;
138 compat_uptr_t handler, restorer;
140 if (get_user(handler, &act->sa_handler) ||
141 __get_user(restorer, &act->sa_restorer) ||
142 __get_user(new_ka->sa.sa_flags, &act->sa_flags) ||
143 __get_user(mask, &act->sa_mask))
145 new_ka->sa.sa_handler = compat_ptr(handler);
146 new_ka->sa.sa_restorer = compat_ptr(restorer);
147 siginitset(&new_ka->sa.sa_mask, mask);
151 #define to_user_ptr(p) ptr_to_compat(p)
152 #define from_user_ptr(p) compat_ptr(p)
154 static inline int save_general_regs(struct pt_regs *regs,
155 struct mcontext __user *frame)
157 elf_greg_t64 *gregs = (elf_greg_t64 *)regs;
160 WARN_ON(!FULL_REGS(regs));
162 for (i = 0; i <= PT_RESULT; i ++) {
163 if (i == 14 && !FULL_REGS(regs))
165 if (__put_user((unsigned int)gregs[i], &frame->mc_gregs[i]))
171 static inline int restore_general_regs(struct pt_regs *regs,
172 struct mcontext __user *sr)
174 elf_greg_t64 *gregs = (elf_greg_t64 *)regs;
177 for (i = 0; i <= PT_RESULT; i++) {
178 if ((i == PT_MSR) || (i == PT_SOFTE))
180 if (__get_user(gregs[i], &sr->mc_gregs[i]))
186 #else /* CONFIG_PPC64 */
188 #define GP_REGS_SIZE min(sizeof(elf_gregset_t), sizeof(struct pt_regs))
190 static inline int put_sigset_t(sigset_t __user *uset, sigset_t *set)
192 return copy_to_user(uset, set, sizeof(*uset));
195 static inline int get_sigset_t(sigset_t *set, const sigset_t __user *uset)
197 return copy_from_user(set, uset, sizeof(*uset));
200 static inline int get_old_sigaction(struct k_sigaction *new_ka,
201 struct old_sigaction __user *act)
205 if (!access_ok(VERIFY_READ, act, sizeof(*act)) ||
206 __get_user(new_ka->sa.sa_handler, &act->sa_handler) ||
207 __get_user(new_ka->sa.sa_restorer, &act->sa_restorer) ||
208 __get_user(new_ka->sa.sa_flags, &act->sa_flags) ||
209 __get_user(mask, &act->sa_mask))
211 siginitset(&new_ka->sa.sa_mask, mask);
215 #define to_user_ptr(p) ((unsigned long)(p))
216 #define from_user_ptr(p) ((void __user *)(p))
218 static inline int save_general_regs(struct pt_regs *regs,
219 struct mcontext __user *frame)
221 WARN_ON(!FULL_REGS(regs));
222 return __copy_to_user(&frame->mc_gregs, regs, GP_REGS_SIZE);
225 static inline int restore_general_regs(struct pt_regs *regs,
226 struct mcontext __user *sr)
228 /* copy up to but not including MSR */
229 if (__copy_from_user(regs, &sr->mc_gregs,
230 PT_MSR * sizeof(elf_greg_t)))
232 /* copy from orig_r3 (the word after the MSR) up to the end */
233 if (__copy_from_user(®s->orig_gpr3, &sr->mc_gregs[PT_ORIG_R3],
234 GP_REGS_SIZE - PT_ORIG_R3 * sizeof(elf_greg_t)))
239 #endif /* CONFIG_PPC64 */
242 * Atomically swap in the new signal mask, and wait for a signal.
244 long sys_sigsuspend(old_sigset_t mask)
247 siginitset(&blocked, mask);
248 return sigsuspend(&blocked);
251 long sys_sigaction(int sig, struct old_sigaction __user *act,
252 struct old_sigaction __user *oact)
254 struct k_sigaction new_ka, old_ka;
263 if (get_old_sigaction(&new_ka, act))
267 ret = do_sigaction(sig, act ? &new_ka : NULL, oact ? &old_ka : NULL);
269 if (!access_ok(VERIFY_WRITE, oact, sizeof(*oact)) ||
270 __put_user(to_user_ptr(old_ka.sa.sa_handler),
271 &oact->sa_handler) ||
272 __put_user(to_user_ptr(old_ka.sa.sa_restorer),
273 &oact->sa_restorer) ||
274 __put_user(old_ka.sa.sa_flags, &oact->sa_flags) ||
275 __put_user(old_ka.sa.sa_mask.sig[0], &oact->sa_mask))
283 * When we have signals to deliver, we set up on the
284 * user stack, going down from the original stack pointer:
285 * an ABI gap of 56 words
287 * a sigcontext struct
288 * a gap of __SIGNAL_FRAMESIZE bytes
290 * Each of these things must be a multiple of 16 bytes in size. The following
291 * structure represent all of this except the __SIGNAL_FRAMESIZE gap
295 struct sigcontext sctx; /* the sigcontext */
296 struct mcontext mctx; /* all the register values */
297 #ifdef CONFIG_PPC_TRANSACTIONAL_MEM
298 struct sigcontext sctx_transact;
299 struct mcontext mctx_transact;
302 * Programs using the rs6000/xcoff abi can save up to 19 gp
303 * regs and 18 fp regs below sp before decrementing it.
308 /* We use the mc_pad field for the signal return trampoline. */
312 * When we have rt signals to deliver, we set up on the
313 * user stack, going down from the original stack pointer:
314 * one rt_sigframe struct (siginfo + ucontext + ABI gap)
315 * a gap of __SIGNAL_FRAMESIZE+16 bytes
316 * (the +16 is to get the siginfo and ucontext in the same
317 * positions as in older kernels).
319 * Each of these things must be a multiple of 16 bytes in size.
324 compat_siginfo_t info;
329 #ifdef CONFIG_PPC_TRANSACTIONAL_MEM
330 struct ucontext uc_transact;
333 * Programs using the rs6000/xcoff abi can save up to 19 gp
334 * regs and 18 fp regs below sp before decrementing it.
340 unsigned long copy_fpr_to_user(void __user *to,
341 struct task_struct *task)
343 double buf[ELF_NFPREG];
346 /* save FPR copy to local buffer then write to the thread_struct */
347 for (i = 0; i < (ELF_NFPREG - 1) ; i++)
348 buf[i] = task->thread.TS_FPR(i);
349 memcpy(&buf[i], &task->thread.fpscr, sizeof(double));
350 return __copy_to_user(to, buf, ELF_NFPREG * sizeof(double));
353 unsigned long copy_fpr_from_user(struct task_struct *task,
356 double buf[ELF_NFPREG];
359 if (__copy_from_user(buf, from, ELF_NFPREG * sizeof(double)))
361 for (i = 0; i < (ELF_NFPREG - 1) ; i++)
362 task->thread.TS_FPR(i) = buf[i];
363 memcpy(&task->thread.fpscr, &buf[i], sizeof(double));
368 unsigned long copy_vsx_to_user(void __user *to,
369 struct task_struct *task)
371 double buf[ELF_NVSRHALFREG];
374 /* save FPR copy to local buffer then write to the thread_struct */
375 for (i = 0; i < ELF_NVSRHALFREG; i++)
376 buf[i] = task->thread.fpr[i][TS_VSRLOWOFFSET];
377 return __copy_to_user(to, buf, ELF_NVSRHALFREG * sizeof(double));
380 unsigned long copy_vsx_from_user(struct task_struct *task,
383 double buf[ELF_NVSRHALFREG];
386 if (__copy_from_user(buf, from, ELF_NVSRHALFREG * sizeof(double)))
388 for (i = 0; i < ELF_NVSRHALFREG ; i++)
389 task->thread.fpr[i][TS_VSRLOWOFFSET] = buf[i];
393 #ifdef CONFIG_PPC_TRANSACTIONAL_MEM
394 unsigned long copy_transact_fpr_to_user(void __user *to,
395 struct task_struct *task)
397 double buf[ELF_NFPREG];
400 /* save FPR copy to local buffer then write to the thread_struct */
401 for (i = 0; i < (ELF_NFPREG - 1) ; i++)
402 buf[i] = task->thread.TS_TRANS_FPR(i);
403 memcpy(&buf[i], &task->thread.transact_fpscr, sizeof(double));
404 return __copy_to_user(to, buf, ELF_NFPREG * sizeof(double));
407 unsigned long copy_transact_fpr_from_user(struct task_struct *task,
410 double buf[ELF_NFPREG];
413 if (__copy_from_user(buf, from, ELF_NFPREG * sizeof(double)))
415 for (i = 0; i < (ELF_NFPREG - 1) ; i++)
416 task->thread.TS_TRANS_FPR(i) = buf[i];
417 memcpy(&task->thread.transact_fpscr, &buf[i], sizeof(double));
422 unsigned long copy_transact_vsx_to_user(void __user *to,
423 struct task_struct *task)
425 double buf[ELF_NVSRHALFREG];
428 /* save FPR copy to local buffer then write to the thread_struct */
429 for (i = 0; i < ELF_NVSRHALFREG; i++)
430 buf[i] = task->thread.transact_fpr[i][TS_VSRLOWOFFSET];
431 return __copy_to_user(to, buf, ELF_NVSRHALFREG * sizeof(double));
434 unsigned long copy_transact_vsx_from_user(struct task_struct *task,
437 double buf[ELF_NVSRHALFREG];
440 if (__copy_from_user(buf, from, ELF_NVSRHALFREG * sizeof(double)))
442 for (i = 0; i < ELF_NVSRHALFREG ; i++)
443 task->thread.transact_fpr[i][TS_VSRLOWOFFSET] = buf[i];
446 #endif /* CONFIG_PPC_TRANSACTIONAL_MEM */
448 inline unsigned long copy_fpr_to_user(void __user *to,
449 struct task_struct *task)
451 return __copy_to_user(to, task->thread.fpr,
452 ELF_NFPREG * sizeof(double));
455 inline unsigned long copy_fpr_from_user(struct task_struct *task,
458 return __copy_from_user(task->thread.fpr, from,
459 ELF_NFPREG * sizeof(double));
462 #ifdef CONFIG_PPC_TRANSACTIONAL_MEM
463 inline unsigned long copy_transact_fpr_to_user(void __user *to,
464 struct task_struct *task)
466 return __copy_to_user(to, task->thread.transact_fpr,
467 ELF_NFPREG * sizeof(double));
470 inline unsigned long copy_transact_fpr_from_user(struct task_struct *task,
473 return __copy_from_user(task->thread.transact_fpr, from,
474 ELF_NFPREG * sizeof(double));
476 #endif /* CONFIG_PPC_TRANSACTIONAL_MEM */
480 * Save the current user registers on the user stack.
481 * We only save the altivec/spe registers if the process has used
482 * altivec/spe instructions at some point.
484 static int save_user_regs(struct pt_regs *regs, struct mcontext __user *frame,
485 int sigret, int ctx_has_vsx_region)
487 unsigned long msr = regs->msr;
489 /* Make sure floating point registers are stored in regs */
490 flush_fp_to_thread(current);
492 /* save general registers */
493 if (save_general_regs(regs, frame))
496 #ifdef CONFIG_ALTIVEC
497 /* save altivec registers */
498 if (current->thread.used_vr) {
499 flush_altivec_to_thread(current);
500 if (__copy_to_user(&frame->mc_vregs, current->thread.vr,
501 ELF_NVRREG * sizeof(vector128)))
503 /* set MSR_VEC in the saved MSR value to indicate that
504 frame->mc_vregs contains valid data */
507 /* else assert((regs->msr & MSR_VEC) == 0) */
509 /* We always copy to/from vrsave, it's 0 if we don't have or don't
510 * use altivec. Since VSCR only contains 32 bits saved in the least
511 * significant bits of a vector, we "cheat" and stuff VRSAVE in the
512 * most significant bits of that same vector. --BenH
514 if (__put_user(current->thread.vrsave, (u32 __user *)&frame->mc_vregs[32]))
516 #endif /* CONFIG_ALTIVEC */
517 if (copy_fpr_to_user(&frame->mc_fregs, current))
521 * Copy VSR 0-31 upper half from thread_struct to local
522 * buffer, then write that to userspace. Also set MSR_VSX in
523 * the saved MSR value to indicate that frame->mc_vregs
524 * contains valid data
526 if (current->thread.used_vsr && ctx_has_vsx_region) {
527 __giveup_vsx(current);
528 if (copy_vsx_to_user(&frame->mc_vsregs, current))
532 #endif /* CONFIG_VSX */
534 /* save spe registers */
535 if (current->thread.used_spe) {
536 flush_spe_to_thread(current);
537 if (__copy_to_user(&frame->mc_vregs, current->thread.evr,
538 ELF_NEVRREG * sizeof(u32)))
540 /* set MSR_SPE in the saved MSR value to indicate that
541 frame->mc_vregs contains valid data */
544 /* else assert((regs->msr & MSR_SPE) == 0) */
546 /* We always copy to/from spefscr */
547 if (__put_user(current->thread.spefscr, (u32 __user *)&frame->mc_vregs + ELF_NEVRREG))
549 #endif /* CONFIG_SPE */
551 if (__put_user(msr, &frame->mc_gregs[PT_MSR]))
554 /* Set up the sigreturn trampoline: li r0,sigret; sc */
555 if (__put_user(0x38000000UL + sigret, &frame->tramp[0])
556 || __put_user(0x44000002UL, &frame->tramp[1]))
558 flush_icache_range((unsigned long) &frame->tramp[0],
559 (unsigned long) &frame->tramp[2]);
565 #ifdef CONFIG_PPC_TRANSACTIONAL_MEM
567 * Save the current user registers on the user stack.
568 * We only save the altivec/spe registers if the process has used
569 * altivec/spe instructions at some point.
570 * We also save the transactional registers to a second ucontext in the
573 * See save_user_regs() and signal_64.c:setup_tm_sigcontexts().
575 static int save_tm_user_regs(struct pt_regs *regs,
576 struct mcontext __user *frame,
577 struct mcontext __user *tm_frame, int sigret)
579 unsigned long msr = regs->msr;
581 /* tm_reclaim rolls back all reg states, updating thread.ckpt_regs,
582 * thread.transact_fpr[], thread.transact_vr[], etc.
585 tm_reclaim(¤t->thread, msr, TM_CAUSE_SIGNAL);
587 /* Make sure floating point registers are stored in regs */
588 flush_fp_to_thread(current);
590 /* Save both sets of general registers */
591 if (save_general_regs(¤t->thread.ckpt_regs, frame)
592 || save_general_regs(regs, tm_frame))
595 /* Stash the top half of the 64bit MSR into the 32bit MSR word
596 * of the transactional mcontext. This way we have a backward-compatible
597 * MSR in the 'normal' (checkpointed) mcontext and additionally one can
598 * also look at what type of transaction (T or S) was active at the
599 * time of the signal.
601 if (__put_user((msr >> 32), &tm_frame->mc_gregs[PT_MSR]))
604 #ifdef CONFIG_ALTIVEC
605 /* save altivec registers */
606 if (current->thread.used_vr) {
607 flush_altivec_to_thread(current);
608 if (__copy_to_user(&frame->mc_vregs, current->thread.vr,
609 ELF_NVRREG * sizeof(vector128)))
612 if (__copy_to_user(&tm_frame->mc_vregs,
613 current->thread.transact_vr,
614 ELF_NVRREG * sizeof(vector128)))
617 if (__copy_to_user(&tm_frame->mc_vregs,
619 ELF_NVRREG * sizeof(vector128)))
623 /* set MSR_VEC in the saved MSR value to indicate that
624 * frame->mc_vregs contains valid data
629 /* We always copy to/from vrsave, it's 0 if we don't have or don't
630 * use altivec. Since VSCR only contains 32 bits saved in the least
631 * significant bits of a vector, we "cheat" and stuff VRSAVE in the
632 * most significant bits of that same vector. --BenH
634 if (__put_user(current->thread.vrsave,
635 (u32 __user *)&frame->mc_vregs[32]))
638 if (__put_user(current->thread.transact_vrsave,
639 (u32 __user *)&tm_frame->mc_vregs[32]))
642 if (__put_user(current->thread.vrsave,
643 (u32 __user *)&tm_frame->mc_vregs[32]))
646 #endif /* CONFIG_ALTIVEC */
648 if (copy_fpr_to_user(&frame->mc_fregs, current))
651 if (copy_transact_fpr_to_user(&tm_frame->mc_fregs, current))
654 if (copy_fpr_to_user(&tm_frame->mc_fregs, current))
660 * Copy VSR 0-31 upper half from thread_struct to local
661 * buffer, then write that to userspace. Also set MSR_VSX in
662 * the saved MSR value to indicate that frame->mc_vregs
663 * contains valid data
665 if (current->thread.used_vsr) {
666 __giveup_vsx(current);
667 if (copy_vsx_to_user(&frame->mc_vsregs, current))
670 if (copy_transact_vsx_to_user(&tm_frame->mc_vsregs,
674 if (copy_vsx_to_user(&tm_frame->mc_vsregs, current))
680 #endif /* CONFIG_VSX */
682 /* SPE regs are not checkpointed with TM, so this section is
683 * simply the same as in save_user_regs().
685 if (current->thread.used_spe) {
686 flush_spe_to_thread(current);
687 if (__copy_to_user(&frame->mc_vregs, current->thread.evr,
688 ELF_NEVRREG * sizeof(u32)))
690 /* set MSR_SPE in the saved MSR value to indicate that
691 * frame->mc_vregs contains valid data */
695 /* We always copy to/from spefscr */
696 if (__put_user(current->thread.spefscr, (u32 __user *)&frame->mc_vregs + ELF_NEVRREG))
698 #endif /* CONFIG_SPE */
700 if (__put_user(msr, &frame->mc_gregs[PT_MSR]))
703 /* Set up the sigreturn trampoline: li r0,sigret; sc */
704 if (__put_user(0x38000000UL + sigret, &frame->tramp[0])
705 || __put_user(0x44000002UL, &frame->tramp[1]))
707 flush_icache_range((unsigned long) &frame->tramp[0],
708 (unsigned long) &frame->tramp[2]);
716 * Restore the current user register values from the user stack,
719 static long restore_user_regs(struct pt_regs *regs,
720 struct mcontext __user *sr, int sig)
723 unsigned int save_r2 = 0;
730 * restore general registers but not including MSR or SOFTE. Also
731 * take care of keeping r2 (TLS) intact if not a signal
734 save_r2 = (unsigned int)regs->gpr[2];
735 err = restore_general_regs(regs, sr);
737 err |= __get_user(msr, &sr->mc_gregs[PT_MSR]);
739 regs->gpr[2] = (unsigned long) save_r2;
743 /* if doing signal return, restore the previous little-endian mode */
745 regs->msr = (regs->msr & ~MSR_LE) | (msr & MSR_LE);
748 * Do this before updating the thread state in
749 * current->thread.fpr/vr/evr. That way, if we get preempted
750 * and another task grabs the FPU/Altivec/SPE, it won't be
751 * tempted to save the current CPU state into the thread_struct
752 * and corrupt what we are writing there.
754 discard_lazy_cpu_state();
756 #ifdef CONFIG_ALTIVEC
758 * Force the process to reload the altivec registers from
759 * current->thread when it next does altivec instructions
761 regs->msr &= ~MSR_VEC;
763 /* restore altivec registers from the stack */
764 if (__copy_from_user(current->thread.vr, &sr->mc_vregs,
765 sizeof(sr->mc_vregs)))
767 } else if (current->thread.used_vr)
768 memset(current->thread.vr, 0, ELF_NVRREG * sizeof(vector128));
770 /* Always get VRSAVE back */
771 if (__get_user(current->thread.vrsave, (u32 __user *)&sr->mc_vregs[32]))
773 #endif /* CONFIG_ALTIVEC */
774 if (copy_fpr_from_user(current, &sr->mc_fregs))
779 * Force the process to reload the VSX registers from
780 * current->thread when it next does VSX instruction.
782 regs->msr &= ~MSR_VSX;
785 * Restore altivec registers from the stack to a local
786 * buffer, then write this out to the thread_struct
788 if (copy_vsx_from_user(current, &sr->mc_vsregs))
790 } else if (current->thread.used_vsr)
791 for (i = 0; i < 32 ; i++)
792 current->thread.fpr[i][TS_VSRLOWOFFSET] = 0;
793 #endif /* CONFIG_VSX */
795 * force the process to reload the FP registers from
796 * current->thread when it next does FP instructions
798 regs->msr &= ~(MSR_FP | MSR_FE0 | MSR_FE1);
801 /* force the process to reload the spe registers from
802 current->thread when it next does spe instructions */
803 regs->msr &= ~MSR_SPE;
805 /* restore spe registers from the stack */
806 if (__copy_from_user(current->thread.evr, &sr->mc_vregs,
807 ELF_NEVRREG * sizeof(u32)))
809 } else if (current->thread.used_spe)
810 memset(current->thread.evr, 0, ELF_NEVRREG * sizeof(u32));
812 /* Always get SPEFSCR back */
813 if (__get_user(current->thread.spefscr, (u32 __user *)&sr->mc_vregs + ELF_NEVRREG))
815 #endif /* CONFIG_SPE */
820 #ifdef CONFIG_PPC_TRANSACTIONAL_MEM
822 * Restore the current user register values from the user stack, except for
823 * MSR, and recheckpoint the original checkpointed register state for processes
826 static long restore_tm_user_regs(struct pt_regs *regs,
827 struct mcontext __user *sr,
828 struct mcontext __user *tm_sr)
837 * restore general registers but not including MSR or SOFTE. Also
838 * take care of keeping r2 (TLS) intact if not a signal.
839 * See comment in signal_64.c:restore_tm_sigcontexts();
840 * TFHAR is restored from the checkpointed NIP; TEXASR and TFIAR
841 * were set by the signal delivery.
843 err = restore_general_regs(regs, tm_sr);
844 err |= restore_general_regs(¤t->thread.ckpt_regs, sr);
846 err |= __get_user(current->thread.tm_tfhar, &sr->mc_gregs[PT_NIP]);
848 err |= __get_user(msr, &sr->mc_gregs[PT_MSR]);
852 /* Restore the previous little-endian mode */
853 regs->msr = (regs->msr & ~MSR_LE) | (msr & MSR_LE);
856 * Do this before updating the thread state in
857 * current->thread.fpr/vr/evr. That way, if we get preempted
858 * and another task grabs the FPU/Altivec/SPE, it won't be
859 * tempted to save the current CPU state into the thread_struct
860 * and corrupt what we are writing there.
862 discard_lazy_cpu_state();
864 #ifdef CONFIG_ALTIVEC
865 regs->msr &= ~MSR_VEC;
867 /* restore altivec registers from the stack */
868 if (__copy_from_user(current->thread.vr, &sr->mc_vregs,
869 sizeof(sr->mc_vregs)) ||
870 __copy_from_user(current->thread.transact_vr,
872 sizeof(sr->mc_vregs)))
874 } else if (current->thread.used_vr) {
875 memset(current->thread.vr, 0, ELF_NVRREG * sizeof(vector128));
876 memset(current->thread.transact_vr, 0,
877 ELF_NVRREG * sizeof(vector128));
880 /* Always get VRSAVE back */
881 if (__get_user(current->thread.vrsave,
882 (u32 __user *)&sr->mc_vregs[32]) ||
883 __get_user(current->thread.transact_vrsave,
884 (u32 __user *)&tm_sr->mc_vregs[32]))
886 #endif /* CONFIG_ALTIVEC */
888 regs->msr &= ~(MSR_FP | MSR_FE0 | MSR_FE1);
890 if (copy_fpr_from_user(current, &sr->mc_fregs) ||
891 copy_transact_fpr_from_user(current, &tm_sr->mc_fregs))
895 regs->msr &= ~MSR_VSX;
898 * Restore altivec registers from the stack to a local
899 * buffer, then write this out to the thread_struct
901 if (copy_vsx_from_user(current, &sr->mc_vsregs) ||
902 copy_transact_vsx_from_user(current, &tm_sr->mc_vsregs))
904 } else if (current->thread.used_vsr)
905 for (i = 0; i < 32 ; i++) {
906 current->thread.fpr[i][TS_VSRLOWOFFSET] = 0;
907 current->thread.transact_fpr[i][TS_VSRLOWOFFSET] = 0;
909 #endif /* CONFIG_VSX */
912 /* SPE regs are not checkpointed with TM, so this section is
913 * simply the same as in restore_user_regs().
915 regs->msr &= ~MSR_SPE;
917 if (__copy_from_user(current->thread.evr, &sr->mc_vregs,
918 ELF_NEVRREG * sizeof(u32)))
920 } else if (current->thread.used_spe)
921 memset(current->thread.evr, 0, ELF_NEVRREG * sizeof(u32));
923 /* Always get SPEFSCR back */
924 if (__get_user(current->thread.spefscr, (u32 __user *)&sr->mc_vregs
927 #endif /* CONFIG_SPE */
929 /* Now, recheckpoint. This loads up all of the checkpointed (older)
930 * registers, including FP and V[S]Rs. After recheckpointing, the
931 * transactional versions should be loaded.
934 /* This loads the checkpointed FP/VEC state, if used */
935 tm_recheckpoint(¤t->thread, msr);
936 /* The task has moved into TM state S, so ensure MSR reflects this */
937 regs->msr = (regs->msr & ~MSR_TS_MASK) | MSR_TS_S;
939 /* This loads the speculative FP/VEC state, if used */
941 do_load_up_transact_fpu(¤t->thread);
942 regs->msr |= (MSR_FP | current->thread.fpexc_mode);
945 do_load_up_transact_altivec(¤t->thread);
946 regs->msr |= MSR_VEC;
954 long compat_sys_rt_sigaction(int sig, const struct sigaction32 __user *act,
955 struct sigaction32 __user *oact, size_t sigsetsize)
957 struct k_sigaction new_ka, old_ka;
960 /* XXX: Don't preclude handling different sized sigset_t's. */
961 if (sigsetsize != sizeof(compat_sigset_t))
965 compat_uptr_t handler;
967 ret = get_user(handler, &act->sa_handler);
968 new_ka.sa.sa_handler = compat_ptr(handler);
969 ret |= get_sigset_t(&new_ka.sa.sa_mask, &act->sa_mask);
970 ret |= __get_user(new_ka.sa.sa_flags, &act->sa_flags);
975 ret = do_sigaction(sig, act ? &new_ka : NULL, oact ? &old_ka : NULL);
977 ret = put_user(to_user_ptr(old_ka.sa.sa_handler), &oact->sa_handler);
978 ret |= put_sigset_t(&oact->sa_mask, &old_ka.sa.sa_mask);
979 ret |= __put_user(old_ka.sa.sa_flags, &oact->sa_flags);
985 * Note: it is necessary to treat how as an unsigned int, with the
986 * corresponding cast to a signed int to insure that the proper
987 * conversion (sign extension) between the register representation
988 * of a signed int (msr in 32-bit mode) and the register representation
989 * of a signed int (msr in 64-bit mode) is performed.
991 long compat_sys_rt_sigprocmask(u32 how, compat_sigset_t __user *set,
992 compat_sigset_t __user *oset, size_t sigsetsize)
997 mm_segment_t old_fs = get_fs();
1000 if (get_sigset_t(&s, set))
1005 /* This is valid because of the set_fs() */
1006 up = (sigset_t __user *) &s;
1007 ret = sys_rt_sigprocmask((int)how, set ? up : NULL, oset ? up : NULL,
1013 if (put_sigset_t(oset, &s))
1019 long compat_sys_rt_sigpending(compat_sigset_t __user *set, compat_size_t sigsetsize)
1023 mm_segment_t old_fs = get_fs();
1026 /* The __user pointer cast is valid because of the set_fs() */
1027 ret = sys_rt_sigpending((sigset_t __user *) &s, sigsetsize);
1030 if (put_sigset_t(set, &s))
1037 int copy_siginfo_to_user32(struct compat_siginfo __user *d, siginfo_t *s)
1041 if (!access_ok (VERIFY_WRITE, d, sizeof(*d)))
1044 /* If you change siginfo_t structure, please be sure
1045 * this code is fixed accordingly.
1046 * It should never copy any pad contained in the structure
1047 * to avoid security leaks, but must copy the generic
1048 * 3 ints plus the relevant union member.
1049 * This routine must convert siginfo from 64bit to 32bit as well
1052 err = __put_user(s->si_signo, &d->si_signo);
1053 err |= __put_user(s->si_errno, &d->si_errno);
1054 err |= __put_user((short)s->si_code, &d->si_code);
1056 err |= __copy_to_user(&d->_sifields._pad, &s->_sifields._pad,
1058 else switch(s->si_code >> 16) {
1059 case __SI_CHLD >> 16:
1060 err |= __put_user(s->si_pid, &d->si_pid);
1061 err |= __put_user(s->si_uid, &d->si_uid);
1062 err |= __put_user(s->si_utime, &d->si_utime);
1063 err |= __put_user(s->si_stime, &d->si_stime);
1064 err |= __put_user(s->si_status, &d->si_status);
1066 case __SI_FAULT >> 16:
1067 err |= __put_user((unsigned int)(unsigned long)s->si_addr,
1070 case __SI_POLL >> 16:
1071 err |= __put_user(s->si_band, &d->si_band);
1072 err |= __put_user(s->si_fd, &d->si_fd);
1074 case __SI_TIMER >> 16:
1075 err |= __put_user(s->si_tid, &d->si_tid);
1076 err |= __put_user(s->si_overrun, &d->si_overrun);
1077 err |= __put_user(s->si_int, &d->si_int);
1079 case __SI_RT >> 16: /* This is not generated by the kernel as of now. */
1080 case __SI_MESGQ >> 16:
1081 err |= __put_user(s->si_int, &d->si_int);
1083 case __SI_KILL >> 16:
1085 err |= __put_user(s->si_pid, &d->si_pid);
1086 err |= __put_user(s->si_uid, &d->si_uid);
1092 #define copy_siginfo_to_user copy_siginfo_to_user32
1094 int copy_siginfo_from_user32(siginfo_t *to, struct compat_siginfo __user *from)
1096 memset(to, 0, sizeof *to);
1098 if (copy_from_user(to, from, 3*sizeof(int)) ||
1099 copy_from_user(to->_sifields._pad,
1100 from->_sifields._pad, SI_PAD_SIZE32))
1107 * Note: it is necessary to treat pid and sig as unsigned ints, with the
1108 * corresponding cast to a signed int to insure that the proper conversion
1109 * (sign extension) between the register representation of a signed int
1110 * (msr in 32-bit mode) and the register representation of a signed int
1111 * (msr in 64-bit mode) is performed.
1113 long compat_sys_rt_sigqueueinfo(u32 pid, u32 sig, compat_siginfo_t __user *uinfo)
1117 mm_segment_t old_fs = get_fs();
1119 ret = copy_siginfo_from_user32(&info, uinfo);
1124 /* The __user pointer cast is valid becasuse of the set_fs() */
1125 ret = sys_rt_sigqueueinfo((int)pid, (int)sig, (siginfo_t __user *) &info);
1130 * Start Alternate signal stack support
1133 * sigaltatck compat_sys_sigaltstack
1136 int compat_sys_sigaltstack(u32 __new, u32 __old, int r5,
1137 int r6, int r7, int r8, struct pt_regs *regs)
1139 stack_32_t __user * newstack = compat_ptr(__new);
1140 stack_32_t __user * oldstack = compat_ptr(__old);
1143 mm_segment_t old_fs;
1145 compat_uptr_t ss_sp;
1148 * set sp to the user stack on entry to the system call
1149 * the system call router sets R9 to the saved registers
1153 /* Put new stack info in local 64 bit stack struct */
1155 if (get_user(ss_sp, &newstack->ss_sp) ||
1156 __get_user(uss.ss_flags, &newstack->ss_flags) ||
1157 __get_user(uss.ss_size, &newstack->ss_size))
1159 uss.ss_sp = compat_ptr(ss_sp);
1164 /* The __user pointer casts are valid because of the set_fs() */
1165 ret = do_sigaltstack(
1166 newstack ? (stack_t __user *) &uss : NULL,
1167 oldstack ? (stack_t __user *) &uoss : NULL,
1170 /* Copy the stack information to the user output buffer */
1171 if (!ret && oldstack &&
1172 (put_user(ptr_to_compat(uoss.ss_sp), &oldstack->ss_sp) ||
1173 __put_user(uoss.ss_flags, &oldstack->ss_flags) ||
1174 __put_user(uoss.ss_size, &oldstack->ss_size)))
1178 #endif /* CONFIG_PPC64 */
1181 * Set up a signal frame for a "real-time" signal handler
1182 * (one which gets siginfo).
1184 int handle_rt_signal32(unsigned long sig, struct k_sigaction *ka,
1185 siginfo_t *info, sigset_t *oldset,
1186 struct pt_regs *regs)
1188 struct rt_sigframe __user *rt_sf;
1189 struct mcontext __user *frame;
1191 unsigned long newsp = 0;
1193 unsigned long tramp;
1195 /* Set up Signal Frame */
1196 /* Put a Real Time Context onto stack */
1197 rt_sf = get_sigframe(ka, regs, sizeof(*rt_sf), 1);
1199 if (unlikely(rt_sf == NULL))
1202 /* Put the siginfo & fill in most of the ucontext */
1203 if (copy_siginfo_to_user(&rt_sf->info, info)
1204 || __put_user(0, &rt_sf->uc.uc_flags)
1205 || __put_user(current->sas_ss_sp, &rt_sf->uc.uc_stack.ss_sp)
1206 || __put_user(sas_ss_flags(regs->gpr[1]),
1207 &rt_sf->uc.uc_stack.ss_flags)
1208 || __put_user(current->sas_ss_size, &rt_sf->uc.uc_stack.ss_size)
1209 || __put_user(to_user_ptr(&rt_sf->uc.uc_mcontext),
1211 || put_sigset_t(&rt_sf->uc.uc_sigmask, oldset))
1214 /* Save user registers on the stack */
1215 frame = &rt_sf->uc.uc_mcontext;
1217 if (vdso32_rt_sigtramp && current->mm->context.vdso_base) {
1219 tramp = current->mm->context.vdso_base + vdso32_rt_sigtramp;
1221 sigret = __NR_rt_sigreturn;
1222 tramp = (unsigned long) frame->tramp;
1225 #ifdef CONFIG_PPC_TRANSACTIONAL_MEM
1226 if (MSR_TM_ACTIVE(regs->msr)) {
1227 if (save_tm_user_regs(regs, &rt_sf->uc.uc_mcontext,
1228 &rt_sf->uc_transact.uc_mcontext, sigret))
1233 if (save_user_regs(regs, frame, sigret, 1))
1237 #ifdef CONFIG_PPC_TRANSACTIONAL_MEM
1238 if (MSR_TM_ACTIVE(regs->msr)) {
1239 if (__put_user((unsigned long)&rt_sf->uc_transact,
1241 || __put_user(to_user_ptr(&rt_sf->uc_transact.uc_mcontext),
1242 &rt_sf->uc_transact.uc_regs))
1247 if (__put_user(0, &rt_sf->uc.uc_link))
1250 current->thread.fpscr.val = 0; /* turn off all fp exceptions */
1252 /* create a stack frame for the caller of the handler */
1253 newsp = ((unsigned long)rt_sf) - (__SIGNAL_FRAMESIZE + 16);
1254 addr = (void __user *)regs->gpr[1];
1255 if (put_user(regs->gpr[1], (u32 __user *)newsp))
1258 /* Fill registers for signal handler */
1259 regs->gpr[1] = newsp;
1261 regs->gpr[4] = (unsigned long) &rt_sf->info;
1262 regs->gpr[5] = (unsigned long) &rt_sf->uc;
1263 regs->gpr[6] = (unsigned long) rt_sf;
1264 regs->nip = (unsigned long) ka->sa.sa_handler;
1265 /* enter the signal handler in big-endian mode */
1266 regs->msr &= ~MSR_LE;
1267 #ifdef CONFIG_PPC_TRANSACTIONAL_MEM
1268 /* Remove TM bits from thread's MSR. The MSR in the sigcontext
1269 * just indicates to userland that we were doing a transaction, but we
1270 * don't want to return in transactional state:
1272 regs->msr &= ~MSR_TS_MASK;
1278 printk("badframe in handle_rt_signal, regs=%p frame=%p newsp=%lx\n",
1279 regs, frame, newsp);
1281 if (show_unhandled_signals)
1282 printk_ratelimited(KERN_INFO
1283 "%s[%d]: bad frame in handle_rt_signal32: "
1284 "%p nip %08lx lr %08lx\n",
1285 current->comm, current->pid,
1286 addr, regs->nip, regs->link);
1288 force_sigsegv(sig, current);
1292 static int do_setcontext(struct ucontext __user *ucp, struct pt_regs *regs, int sig)
1295 struct mcontext __user *mcp;
1297 if (get_sigset_t(&set, &ucp->uc_sigmask))
1303 if (__get_user(cmcp, &ucp->uc_regs))
1305 mcp = (struct mcontext __user *)(u64)cmcp;
1306 /* no need to check access_ok(mcp), since mcp < 4GB */
1309 if (__get_user(mcp, &ucp->uc_regs))
1311 if (!access_ok(VERIFY_READ, mcp, sizeof(*mcp)))
1314 set_current_blocked(&set);
1315 if (restore_user_regs(regs, mcp, sig))
1321 #ifdef CONFIG_PPC_TRANSACTIONAL_MEM
1322 static int do_setcontext_tm(struct ucontext __user *ucp,
1323 struct ucontext __user *tm_ucp,
1324 struct pt_regs *regs)
1327 struct mcontext __user *mcp;
1328 struct mcontext __user *tm_mcp;
1332 if (get_sigset_t(&set, &ucp->uc_sigmask))
1335 if (__get_user(cmcp, &ucp->uc_regs) ||
1336 __get_user(tm_cmcp, &tm_ucp->uc_regs))
1338 mcp = (struct mcontext __user *)(u64)cmcp;
1339 tm_mcp = (struct mcontext __user *)(u64)tm_cmcp;
1340 /* no need to check access_ok(mcp), since mcp < 4GB */
1342 set_current_blocked(&set);
1343 if (restore_tm_user_regs(regs, mcp, tm_mcp))
1350 long sys_swapcontext(struct ucontext __user *old_ctx,
1351 struct ucontext __user *new_ctx,
1352 int ctx_size, int r6, int r7, int r8, struct pt_regs *regs)
1355 int ctx_has_vsx_region = 0;
1358 unsigned long new_msr = 0;
1361 struct mcontext __user *mcp;
1365 * Get pointer to the real mcontext. No need for
1366 * access_ok since we are dealing with compat
1369 if (__get_user(cmcp, &new_ctx->uc_regs))
1371 mcp = (struct mcontext __user *)(u64)cmcp;
1372 if (__get_user(new_msr, &mcp->mc_gregs[PT_MSR]))
1376 * Check that the context is not smaller than the original
1377 * size (with VMX but without VSX)
1379 if (ctx_size < UCONTEXTSIZEWITHOUTVSX)
1382 * If the new context state sets the MSR VSX bits but
1383 * it doesn't provide VSX state.
1385 if ((ctx_size < sizeof(struct ucontext)) &&
1386 (new_msr & MSR_VSX))
1388 /* Does the context have enough room to store VSX data? */
1389 if (ctx_size >= sizeof(struct ucontext))
1390 ctx_has_vsx_region = 1;
1392 /* Context size is for future use. Right now, we only make sure
1393 * we are passed something we understand
1395 if (ctx_size < sizeof(struct ucontext))
1398 if (old_ctx != NULL) {
1399 struct mcontext __user *mctx;
1402 * old_ctx might not be 16-byte aligned, in which
1403 * case old_ctx->uc_mcontext won't be either.
1404 * Because we have the old_ctx->uc_pad2 field
1405 * before old_ctx->uc_mcontext, we need to round down
1406 * from &old_ctx->uc_mcontext to a 16-byte boundary.
1408 mctx = (struct mcontext __user *)
1409 ((unsigned long) &old_ctx->uc_mcontext & ~0xfUL);
1410 if (!access_ok(VERIFY_WRITE, old_ctx, ctx_size)
1411 || save_user_regs(regs, mctx, 0, ctx_has_vsx_region)
1412 || put_sigset_t(&old_ctx->uc_sigmask, ¤t->blocked)
1413 || __put_user(to_user_ptr(mctx), &old_ctx->uc_regs))
1416 if (new_ctx == NULL)
1418 if (!access_ok(VERIFY_READ, new_ctx, ctx_size)
1419 || __get_user(tmp, (u8 __user *) new_ctx)
1420 || __get_user(tmp, (u8 __user *) new_ctx + ctx_size - 1))
1424 * If we get a fault copying the context into the kernel's
1425 * image of the user's registers, we can't just return -EFAULT
1426 * because the user's registers will be corrupted. For instance
1427 * the NIP value may have been updated but not some of the
1428 * other registers. Given that we have done the access_ok
1429 * and successfully read the first and last bytes of the region
1430 * above, this should only happen in an out-of-memory situation
1431 * or if another thread unmaps the region containing the context.
1432 * We kill the task with a SIGSEGV in this situation.
1434 if (do_setcontext(new_ctx, regs, 0))
1437 set_thread_flag(TIF_RESTOREALL);
1441 long sys_rt_sigreturn(int r3, int r4, int r5, int r6, int r7, int r8,
1442 struct pt_regs *regs)
1444 struct rt_sigframe __user *rt_sf;
1445 #ifdef CONFIG_PPC_TRANSACTIONAL_MEM
1446 struct ucontext __user *uc_transact;
1447 unsigned long msr_hi;
1451 /* Always make any pending restarted system calls return -EINTR */
1452 current_thread_info()->restart_block.fn = do_no_restart_syscall;
1454 rt_sf = (struct rt_sigframe __user *)
1455 (regs->gpr[1] + __SIGNAL_FRAMESIZE + 16);
1456 if (!access_ok(VERIFY_READ, rt_sf, sizeof(*rt_sf)))
1458 #ifdef CONFIG_PPC_TRANSACTIONAL_MEM
1459 if (__get_user(tmp, &rt_sf->uc.uc_link))
1461 uc_transact = (struct ucontext __user *)(uintptr_t)tmp;
1464 struct mcontext __user *mcp;
1466 if (__get_user(cmcp, &uc_transact->uc_regs))
1468 mcp = (struct mcontext __user *)(u64)cmcp;
1469 /* The top 32 bits of the MSR are stashed in the transactional
1471 if (__get_user(msr_hi, &mcp->mc_gregs[PT_MSR]))
1474 if (MSR_TM_SUSPENDED(msr_hi<<32)) {
1475 /* We only recheckpoint on return if we're
1479 if (do_setcontext_tm(&rt_sf->uc, uc_transact, regs))
1484 /* Fall through, for non-TM restore */
1486 if (do_setcontext(&rt_sf->uc, regs, 1))
1490 * It's not clear whether or why it is desirable to save the
1491 * sigaltstack setting on signal delivery and restore it on
1492 * signal return. But other architectures do this and we have
1493 * always done it up until now so it is probably better not to
1494 * change it. -- paulus
1498 * We use the compat_sys_ version that does the 32/64 bits conversion
1499 * and takes userland pointer directly. What about error checking ?
1500 * nobody does any...
1502 compat_sys_sigaltstack((u32)(u64)&rt_sf->uc.uc_stack, 0, 0, 0, 0, 0, regs);
1504 do_sigaltstack(&rt_sf->uc.uc_stack, NULL, regs->gpr[1]);
1506 set_thread_flag(TIF_RESTOREALL);
1510 if (show_unhandled_signals)
1511 printk_ratelimited(KERN_INFO
1512 "%s[%d]: bad frame in sys_rt_sigreturn: "
1513 "%p nip %08lx lr %08lx\n",
1514 current->comm, current->pid,
1515 rt_sf, regs->nip, regs->link);
1517 force_sig(SIGSEGV, current);
1522 int sys_debug_setcontext(struct ucontext __user *ctx,
1523 int ndbg, struct sig_dbg_op __user *dbg,
1524 int r6, int r7, int r8,
1525 struct pt_regs *regs)
1527 struct sig_dbg_op op;
1530 unsigned long new_msr = regs->msr;
1531 #ifdef CONFIG_PPC_ADV_DEBUG_REGS
1532 unsigned long new_dbcr0 = current->thread.dbcr0;
1535 for (i=0; i<ndbg; i++) {
1536 if (copy_from_user(&op, dbg + i, sizeof(op)))
1538 switch (op.dbg_type) {
1539 case SIG_DBG_SINGLE_STEPPING:
1540 #ifdef CONFIG_PPC_ADV_DEBUG_REGS
1543 new_dbcr0 |= (DBCR0_IDM | DBCR0_IC);
1545 new_dbcr0 &= ~DBCR0_IC;
1546 if (!DBCR_ACTIVE_EVENTS(new_dbcr0,
1547 current->thread.dbcr1)) {
1549 new_dbcr0 &= ~DBCR0_IDM;
1559 case SIG_DBG_BRANCH_TRACING:
1560 #ifdef CONFIG_PPC_ADV_DEBUG_REGS
1575 /* We wait until here to actually install the values in the
1576 registers so if we fail in the above loop, it will not
1577 affect the contents of these registers. After this point,
1578 failure is a problem, anyway, and it's very unlikely unless
1579 the user is really doing something wrong. */
1580 regs->msr = new_msr;
1581 #ifdef CONFIG_PPC_ADV_DEBUG_REGS
1582 current->thread.dbcr0 = new_dbcr0;
1585 if (!access_ok(VERIFY_READ, ctx, sizeof(*ctx))
1586 || __get_user(tmp, (u8 __user *) ctx)
1587 || __get_user(tmp, (u8 __user *) (ctx + 1) - 1))
1591 * If we get a fault copying the context into the kernel's
1592 * image of the user's registers, we can't just return -EFAULT
1593 * because the user's registers will be corrupted. For instance
1594 * the NIP value may have been updated but not some of the
1595 * other registers. Given that we have done the access_ok
1596 * and successfully read the first and last bytes of the region
1597 * above, this should only happen in an out-of-memory situation
1598 * or if another thread unmaps the region containing the context.
1599 * We kill the task with a SIGSEGV in this situation.
1601 if (do_setcontext(ctx, regs, 1)) {
1602 if (show_unhandled_signals)
1603 printk_ratelimited(KERN_INFO "%s[%d]: bad frame in "
1604 "sys_debug_setcontext: %p nip %08lx "
1606 current->comm, current->pid,
1607 ctx, regs->nip, regs->link);
1609 force_sig(SIGSEGV, current);
1614 * It's not clear whether or why it is desirable to save the
1615 * sigaltstack setting on signal delivery and restore it on
1616 * signal return. But other architectures do this and we have
1617 * always done it up until now so it is probably better not to
1618 * change it. -- paulus
1620 do_sigaltstack(&ctx->uc_stack, NULL, regs->gpr[1]);
1622 set_thread_flag(TIF_RESTOREALL);
1629 * OK, we're invoking a handler
1631 int handle_signal32(unsigned long sig, struct k_sigaction *ka,
1632 siginfo_t *info, sigset_t *oldset, struct pt_regs *regs)
1634 struct sigcontext __user *sc;
1635 struct sigframe __user *frame;
1636 unsigned long newsp = 0;
1638 unsigned long tramp;
1640 /* Set up Signal Frame */
1641 frame = get_sigframe(ka, regs, sizeof(*frame), 1);
1642 if (unlikely(frame == NULL))
1644 sc = (struct sigcontext __user *) &frame->sctx;
1647 #error "Please adjust handle_signal()"
1649 if (__put_user(to_user_ptr(ka->sa.sa_handler), &sc->handler)
1650 || __put_user(oldset->sig[0], &sc->oldmask)
1652 || __put_user((oldset->sig[0] >> 32), &sc->_unused[3])
1654 || __put_user(oldset->sig[1], &sc->_unused[3])
1656 || __put_user(to_user_ptr(&frame->mctx), &sc->regs)
1657 || __put_user(sig, &sc->signal))
1660 if (vdso32_sigtramp && current->mm->context.vdso_base) {
1662 tramp = current->mm->context.vdso_base + vdso32_sigtramp;
1664 sigret = __NR_sigreturn;
1665 tramp = (unsigned long) frame->mctx.tramp;
1668 #ifdef CONFIG_PPC_TRANSACTIONAL_MEM
1669 if (MSR_TM_ACTIVE(regs->msr)) {
1670 if (save_tm_user_regs(regs, &frame->mctx, &frame->mctx_transact,
1676 if (save_user_regs(regs, &frame->mctx, sigret, 1))
1681 current->thread.fpscr.val = 0; /* turn off all fp exceptions */
1683 /* create a stack frame for the caller of the handler */
1684 newsp = ((unsigned long)frame) - __SIGNAL_FRAMESIZE;
1685 if (put_user(regs->gpr[1], (u32 __user *)newsp))
1688 regs->gpr[1] = newsp;
1690 regs->gpr[4] = (unsigned long) sc;
1691 regs->nip = (unsigned long) ka->sa.sa_handler;
1692 /* enter the signal handler in big-endian mode */
1693 regs->msr &= ~MSR_LE;
1694 #ifdef CONFIG_PPC_TRANSACTIONAL_MEM
1695 /* Remove TM bits from thread's MSR. The MSR in the sigcontext
1696 * just indicates to userland that we were doing a transaction, but we
1697 * don't want to return in transactional state:
1699 regs->msr &= ~MSR_TS_MASK;
1705 printk("badframe in handle_signal, regs=%p frame=%p newsp=%lx\n",
1706 regs, frame, newsp);
1708 if (show_unhandled_signals)
1709 printk_ratelimited(KERN_INFO
1710 "%s[%d]: bad frame in handle_signal32: "
1711 "%p nip %08lx lr %08lx\n",
1712 current->comm, current->pid,
1713 frame, regs->nip, regs->link);
1715 force_sigsegv(sig, current);
1720 * Do a signal return; undo the signal stack.
1722 long sys_sigreturn(int r3, int r4, int r5, int r6, int r7, int r8,
1723 struct pt_regs *regs)
1725 struct sigcontext __user *sc;
1726 struct sigcontext sigctx;
1727 struct mcontext __user *sr;
1731 /* Always make any pending restarted system calls return -EINTR */
1732 current_thread_info()->restart_block.fn = do_no_restart_syscall;
1734 sc = (struct sigcontext __user *)(regs->gpr[1] + __SIGNAL_FRAMESIZE);
1736 if (copy_from_user(&sigctx, sc, sizeof(sigctx)))
1741 * Note that PPC32 puts the upper 32 bits of the sigmask in the
1742 * unused part of the signal stackframe
1744 set.sig[0] = sigctx.oldmask + ((long)(sigctx._unused[3]) << 32);
1746 set.sig[0] = sigctx.oldmask;
1747 set.sig[1] = sigctx._unused[3];
1749 set_current_blocked(&set);
1751 sr = (struct mcontext __user *)from_user_ptr(sigctx.regs);
1753 if (!access_ok(VERIFY_READ, sr, sizeof(*sr))
1754 || restore_user_regs(regs, sr, 1))
1757 set_thread_flag(TIF_RESTOREALL);
1761 if (show_unhandled_signals)
1762 printk_ratelimited(KERN_INFO
1763 "%s[%d]: bad frame in sys_sigreturn: "
1764 "%p nip %08lx lr %08lx\n",
1765 current->comm, current->pid,
1766 addr, regs->nip, regs->link);
1768 force_sig(SIGSEGV, current);