]> git.karo-electronics.de Git - mv-sheeva.git/commitdiff
Merge branch 'x86/uaccess' into core/percpu
authorIngo Molnar <mingo@elte.hu>
Mon, 9 Feb 2009 23:40:48 +0000 (00:40 +0100)
committerIngo Molnar <mingo@elte.hu>
Mon, 9 Feb 2009 23:40:48 +0000 (00:40 +0100)
1  2 
arch/x86/include/asm/paravirt.h
arch/x86/include/asm/thread_info.h
arch/x86/kernel/io_apic.c
arch/x86/kernel/signal.c

index c85e7475e1719a1aa4bf4592b62e82b9d800b76c,32bc6c2c1386469782cffcb0e69374ed78115830..ff691736f5e9657db383f7e0a2a13d6d2b36b02b
  #define CLBR_EAX  (1 << 0)
  #define CLBR_ECX  (1 << 1)
  #define CLBR_EDX  (1 << 2)
 +#define CLBR_EDI  (1 << 3)
  
 -#ifdef CONFIG_X86_64
 -#define CLBR_RSI  (1 << 3)
 -#define CLBR_RDI  (1 << 4)
 +#ifdef CONFIG_X86_32
 +/* CLBR_ANY should match all regs platform has. For i386, that's just it */
 +#define CLBR_ANY  ((1 << 4) - 1)
 +
 +#define CLBR_ARG_REGS (CLBR_EAX | CLBR_EDX | CLBR_ECX)
 +#define CLBR_RET_REG  (CLBR_EAX | CLBR_EDX)
 +#define CLBR_SCRATCH  (0)
 +#else
 +#define CLBR_RAX  CLBR_EAX
 +#define CLBR_RCX  CLBR_ECX
 +#define CLBR_RDX  CLBR_EDX
 +#define CLBR_RDI  CLBR_EDI
 +#define CLBR_RSI  (1 << 4)
  #define CLBR_R8   (1 << 5)
  #define CLBR_R9   (1 << 6)
  #define CLBR_R10  (1 << 7)
  #define CLBR_R11  (1 << 8)
 +
  #define CLBR_ANY  ((1 << 9) - 1)
 +
 +#define CLBR_ARG_REGS (CLBR_RDI | CLBR_RSI | CLBR_RDX | \
 +                       CLBR_RCX | CLBR_R8 | CLBR_R9)
 +#define CLBR_RET_REG  (CLBR_RAX)
 +#define CLBR_SCRATCH  (CLBR_R10 | CLBR_R11)
 +
  #include <asm/desc_defs.h>
 -#else
 -/* CLBR_ANY should match all regs platform has. For i386, that's just it */
 -#define CLBR_ANY  ((1 << 3) - 1)
  #endif /* X86_64 */
  
 +#define CLBR_CALLEE_SAVE ((CLBR_ARG_REGS | CLBR_SCRATCH) & ~CLBR_RET_REG)
 +
  #ifndef __ASSEMBLY__
  #include <linux/types.h>
  #include <linux/cpumask.h>
@@@ -57,14 -40,6 +57,14 @@@ struct tss_struct
  struct mm_struct;
  struct desc_struct;
  
 +/*
 + * Wrapper type for pointers to code which uses the non-standard
 + * calling convention.  See PV_CALL_SAVE_REGS_THUNK below.
 + */
 +struct paravirt_callee_save {
 +      void *func;
 +};
 +
  /* general info */
  struct pv_info {
        unsigned int kernel_rpl;
@@@ -214,15 -189,11 +214,15 @@@ struct pv_irq_ops 
         * expected to use X86_EFLAGS_IF; all other bits
         * returned from save_fl are undefined, and may be ignored by
         * restore_fl.
 +       *
 +       * NOTE: These functions callers expect the callee to preserve
 +       * more registers than the standard C calling convention.
         */
 -      unsigned long (*save_fl)(void);
 -      void (*restore_fl)(unsigned long);
 -      void (*irq_disable)(void);
 -      void (*irq_enable)(void);
 +      struct paravirt_callee_save save_fl;
 +      struct paravirt_callee_save restore_fl;
 +      struct paravirt_callee_save irq_disable;
 +      struct paravirt_callee_save irq_enable;
 +
        void (*safe_halt)(void);
        void (*halt)(void);
  
@@@ -273,8 -244,7 +273,8 @@@ struct pv_mmu_ops 
        void (*flush_tlb_user)(void);
        void (*flush_tlb_kernel)(void);
        void (*flush_tlb_single)(unsigned long addr);
 -      void (*flush_tlb_others)(const cpumask_t *cpus, struct mm_struct *mm,
 +      void (*flush_tlb_others)(const struct cpumask *cpus,
 +                               struct mm_struct *mm,
                                 unsigned long va);
  
        /* Hooks for allocating and freeing a pagetable top-level */
        void (*ptep_modify_prot_commit)(struct mm_struct *mm, unsigned long addr,
                                        pte_t *ptep, pte_t pte);
  
 -      pteval_t (*pte_val)(pte_t);
 -      pteval_t (*pte_flags)(pte_t);
 -      pte_t (*make_pte)(pteval_t pte);
 +      struct paravirt_callee_save pte_val;
 +      struct paravirt_callee_save make_pte;
  
 -      pgdval_t (*pgd_val)(pgd_t);
 -      pgd_t (*make_pgd)(pgdval_t pgd);
 +      struct paravirt_callee_save pgd_val;
 +      struct paravirt_callee_save make_pgd;
  
  #if PAGETABLE_LEVELS >= 3
  #ifdef CONFIG_X86_PAE
  
        void (*set_pud)(pud_t *pudp, pud_t pudval);
  
 -      pmdval_t (*pmd_val)(pmd_t);
 -      pmd_t (*make_pmd)(pmdval_t pmd);
 +      struct paravirt_callee_save pmd_val;
 +      struct paravirt_callee_save make_pmd;
  
  #if PAGETABLE_LEVELS == 4
 -      pudval_t (*pud_val)(pud_t);
 -      pud_t (*make_pud)(pudval_t pud);
 +      struct paravirt_callee_save pud_val;
 +      struct paravirt_callee_save make_pud;
  
        void (*set_pgd)(pgd_t *pudp, pgd_t pgdval);
  #endif        /* PAGETABLE_LEVELS == 4 */
@@@ -417,8 -388,6 +417,8 @@@ extern struct pv_lock_ops pv_lock_ops
        asm("start_" #ops "_" #name ": " code "; end_" #ops "_" #name ":")
  
  unsigned paravirt_patch_nop(void);
 +unsigned paravirt_patch_ident_32(void *insnbuf, unsigned len);
 +unsigned paravirt_patch_ident_64(void *insnbuf, unsigned len);
  unsigned paravirt_patch_ignore(unsigned len);
  unsigned paravirt_patch_call(void *insnbuf,
                             const void *target, u16 tgt_clobbers,
@@@ -510,45 -479,25 +510,45 @@@ int paravirt_disable_iospace(void)
   * makes sure the incoming and outgoing types are always correct.
   */
  #ifdef CONFIG_X86_32
 -#define PVOP_VCALL_ARGS                       unsigned long __eax, __edx, __ecx
 +#define PVOP_VCALL_ARGS                               \
 +      unsigned long __eax = __eax, __edx = __edx, __ecx = __ecx
  #define PVOP_CALL_ARGS                        PVOP_VCALL_ARGS
 +
 +#define PVOP_CALL_ARG1(x)             "a" ((unsigned long)(x))
 +#define PVOP_CALL_ARG2(x)             "d" ((unsigned long)(x))
 +#define PVOP_CALL_ARG3(x)             "c" ((unsigned long)(x))
 +
  #define PVOP_VCALL_CLOBBERS           "=a" (__eax), "=d" (__edx),     \
                                        "=c" (__ecx)
  #define PVOP_CALL_CLOBBERS            PVOP_VCALL_CLOBBERS
 +
 +#define PVOP_VCALLEE_CLOBBERS         "=a" (__eax), "=d" (__edx)
 +#define PVOP_CALLEE_CLOBBERS          PVOP_VCALLEE_CLOBBERS
 +
  #define EXTRA_CLOBBERS
  #define VEXTRA_CLOBBERS
 -#else
 -#define PVOP_VCALL_ARGS               unsigned long __edi, __esi, __edx, __ecx
 +#else  /* CONFIG_X86_64 */
 +#define PVOP_VCALL_ARGS                                       \
 +      unsigned long __edi = __edi, __esi = __esi,     \
 +              __edx = __edx, __ecx = __ecx
  #define PVOP_CALL_ARGS                PVOP_VCALL_ARGS, __eax
 +
 +#define PVOP_CALL_ARG1(x)             "D" ((unsigned long)(x))
 +#define PVOP_CALL_ARG2(x)             "S" ((unsigned long)(x))
 +#define PVOP_CALL_ARG3(x)             "d" ((unsigned long)(x))
 +#define PVOP_CALL_ARG4(x)             "c" ((unsigned long)(x))
 +
  #define PVOP_VCALL_CLOBBERS   "=D" (__edi),                           \
                                "=S" (__esi), "=d" (__edx),             \
                                "=c" (__ecx)
 -
  #define PVOP_CALL_CLOBBERS    PVOP_VCALL_CLOBBERS, "=a" (__eax)
  
 +#define PVOP_VCALLEE_CLOBBERS "=a" (__eax)
 +#define PVOP_CALLEE_CLOBBERS  PVOP_VCALLEE_CLOBBERS
 +
  #define EXTRA_CLOBBERS         , "r8", "r9", "r10", "r11"
  #define VEXTRA_CLOBBERS        , "rax", "r8", "r9", "r10", "r11"
 -#endif
 +#endif        /* CONFIG_X86_32 */
  
  #ifdef CONFIG_PARAVIRT_DEBUG
  #define PVOP_TEST_NULL(op)    BUG_ON(op == NULL)
  #define PVOP_TEST_NULL(op)    ((void)op)
  #endif
  
 -#define __PVOP_CALL(rettype, op, pre, post, ...)                      \
 +#define ____PVOP_CALL(rettype, op, clbr, call_clbr, extra_clbr,               \
 +                    pre, post, ...)                                   \
        ({                                                              \
                rettype __ret;                                          \
 -              PVOP_CALL_ARGS;                                 \
 +              PVOP_CALL_ARGS;                                         \
                PVOP_TEST_NULL(op);                                     \
                /* This is 32-bit specific, but is okay in 64-bit */    \
                /* since this condition will never hold */              \
                        asm volatile(pre                                \
                                     paravirt_alt(PARAVIRT_CALL)        \
                                     post                               \
 -                                   : PVOP_CALL_CLOBBERS               \
 +                                   : call_clbr                        \
                                     : paravirt_type(op),               \
 -                                     paravirt_clobber(CLBR_ANY),      \
 +                                     paravirt_clobber(clbr),          \
                                       ##__VA_ARGS__                    \
 -                                   : "memory", "cc" EXTRA_CLOBBERS);  \
 +                                   : "memory", "cc" extra_clbr);      \
                        __ret = (rettype)((((u64)__edx) << 32) | __eax); \
                } else {                                                \
                        asm volatile(pre                                \
                                     paravirt_alt(PARAVIRT_CALL)        \
                                     post                               \
 -                                   : PVOP_CALL_CLOBBERS               \
 +                                   : call_clbr                        \
                                     : paravirt_type(op),               \
 -                                     paravirt_clobber(CLBR_ANY),      \
 +                                     paravirt_clobber(clbr),          \
                                       ##__VA_ARGS__                    \
 -                                   : "memory", "cc" EXTRA_CLOBBERS);  \
 +                                   : "memory", "cc" extra_clbr);      \
                        __ret = (rettype)__eax;                         \
                }                                                       \
                __ret;                                                  \
        })
 -#define __PVOP_VCALL(op, pre, post, ...)                              \
 +
 +#define __PVOP_CALL(rettype, op, pre, post, ...)                      \
 +      ____PVOP_CALL(rettype, op, CLBR_ANY, PVOP_CALL_CLOBBERS,        \
 +                    EXTRA_CLOBBERS, pre, post, ##__VA_ARGS__)
 +
 +#define __PVOP_CALLEESAVE(rettype, op, pre, post, ...)                        \
 +      ____PVOP_CALL(rettype, op.func, CLBR_RET_REG,                   \
 +                    PVOP_CALLEE_CLOBBERS, ,                           \
 +                    pre, post, ##__VA_ARGS__)
 +
 +
 +#define ____PVOP_VCALL(op, clbr, call_clbr, extra_clbr, pre, post, ...)       \
        ({                                                              \
                PVOP_VCALL_ARGS;                                        \
                PVOP_TEST_NULL(op);                                     \
                asm volatile(pre                                        \
                             paravirt_alt(PARAVIRT_CALL)                \
                             post                                       \
 -                           : PVOP_VCALL_CLOBBERS                      \
 +                           : call_clbr                                \
                             : paravirt_type(op),                       \
 -                             paravirt_clobber(CLBR_ANY),              \
 +                             paravirt_clobber(clbr),                  \
                               ##__VA_ARGS__                            \
 -                           : "memory", "cc" VEXTRA_CLOBBERS);         \
 +                           : "memory", "cc" extra_clbr);              \
        })
  
 +#define __PVOP_VCALL(op, pre, post, ...)                              \
 +      ____PVOP_VCALL(op, CLBR_ANY, PVOP_VCALL_CLOBBERS,               \
 +                     VEXTRA_CLOBBERS,                                 \
 +                     pre, post, ##__VA_ARGS__)
 +
 +#define __PVOP_VCALLEESAVE(rettype, op, pre, post, ...)                       \
 +      ____PVOP_CALL(rettype, op.func, CLBR_RET_REG,                   \
 +                    PVOP_VCALLEE_CLOBBERS, ,                          \
 +                    pre, post, ##__VA_ARGS__)
 +
 +
 +
  #define PVOP_CALL0(rettype, op)                                               \
        __PVOP_CALL(rettype, op, "", "")
  #define PVOP_VCALL0(op)                                                       \
        __PVOP_VCALL(op, "", "")
  
 +#define PVOP_CALLEE0(rettype, op)                                     \
 +      __PVOP_CALLEESAVE(rettype, op, "", "")
 +#define PVOP_VCALLEE0(op)                                             \
 +      __PVOP_VCALLEESAVE(op, "", "")
 +
 +
  #define PVOP_CALL1(rettype, op, arg1)                                 \
 -      __PVOP_CALL(rettype, op, "", "", "0" ((unsigned long)(arg1)))
 +      __PVOP_CALL(rettype, op, "", "", PVOP_CALL_ARG1(arg1))
  #define PVOP_VCALL1(op, arg1)                                         \
 -      __PVOP_VCALL(op, "", "", "0" ((unsigned long)(arg1)))
 +      __PVOP_VCALL(op, "", "", PVOP_CALL_ARG1(arg1))
 +
 +#define PVOP_CALLEE1(rettype, op, arg1)                                       \
 +      __PVOP_CALLEESAVE(rettype, op, "", "", PVOP_CALL_ARG1(arg1))
 +#define PVOP_VCALLEE1(op, arg1)                                               \
 +      __PVOP_VCALLEESAVE(op, "", "", PVOP_CALL_ARG1(arg1))
 +
  
  #define PVOP_CALL2(rettype, op, arg1, arg2)                           \
 -      __PVOP_CALL(rettype, op, "", "", "0" ((unsigned long)(arg1)),   \
 -      "1" ((unsigned long)(arg2)))
 +      __PVOP_CALL(rettype, op, "", "", PVOP_CALL_ARG1(arg1),          \
 +                  PVOP_CALL_ARG2(arg2))
  #define PVOP_VCALL2(op, arg1, arg2)                                   \
 -      __PVOP_VCALL(op, "", "", "0" ((unsigned long)(arg1)),           \
 -      "1" ((unsigned long)(arg2)))
 +      __PVOP_VCALL(op, "", "", PVOP_CALL_ARG1(arg1),                  \
 +                   PVOP_CALL_ARG2(arg2))
 +
 +#define PVOP_CALLEE2(rettype, op, arg1, arg2)                         \
 +      __PVOP_CALLEESAVE(rettype, op, "", "", PVOP_CALL_ARG1(arg1),    \
 +                        PVOP_CALL_ARG2(arg2))
 +#define PVOP_VCALLEE2(op, arg1, arg2)                                 \
 +      __PVOP_VCALLEESAVE(op, "", "", PVOP_CALL_ARG1(arg1),            \
 +                         PVOP_CALL_ARG2(arg2))
 +
  
  #define PVOP_CALL3(rettype, op, arg1, arg2, arg3)                     \
 -      __PVOP_CALL(rettype, op, "", "", "0" ((unsigned long)(arg1)),   \
 -      "1"((unsigned long)(arg2)), "2"((unsigned long)(arg3)))
 +      __PVOP_CALL(rettype, op, "", "", PVOP_CALL_ARG1(arg1),          \
 +                  PVOP_CALL_ARG2(arg2), PVOP_CALL_ARG3(arg3))
  #define PVOP_VCALL3(op, arg1, arg2, arg3)                             \
 -      __PVOP_VCALL(op, "", "", "0" ((unsigned long)(arg1)),           \
 -      "1"((unsigned long)(arg2)), "2"((unsigned long)(arg3)))
 +      __PVOP_VCALL(op, "", "", PVOP_CALL_ARG1(arg1),                  \
 +                   PVOP_CALL_ARG2(arg2), PVOP_CALL_ARG3(arg3))
  
  /* This is the only difference in x86_64. We can make it much simpler */
  #ifdef CONFIG_X86_32
  #define PVOP_CALL4(rettype, op, arg1, arg2, arg3, arg4)                       \
        __PVOP_CALL(rettype, op,                                        \
                    "push %[_arg4];", "lea 4(%%esp),%%esp;",            \
 -                  "0" ((u32)(arg1)), "1" ((u32)(arg2)),               \
 -                  "2" ((u32)(arg3)), [_arg4] "mr" ((u32)(arg4)))
 +                  PVOP_CALL_ARG1(arg1), PVOP_CALL_ARG2(arg2),         \
 +                  PVOP_CALL_ARG3(arg3), [_arg4] "mr" ((u32)(arg4)))
  #define PVOP_VCALL4(op, arg1, arg2, arg3, arg4)                               \
        __PVOP_VCALL(op,                                                \
                    "push %[_arg4];", "lea 4(%%esp),%%esp;",            \
                    "2" ((u32)(arg3)), [_arg4] "mr" ((u32)(arg4)))
  #else
  #define PVOP_CALL4(rettype, op, arg1, arg2, arg3, arg4)                       \
 -      __PVOP_CALL(rettype, op, "", "", "0" ((unsigned long)(arg1)),   \
 -      "1"((unsigned long)(arg2)), "2"((unsigned long)(arg3)),         \
 -      "3"((unsigned long)(arg4)))
 +      __PVOP_CALL(rettype, op, "", "",                                \
 +                  PVOP_CALL_ARG1(arg1), PVOP_CALL_ARG2(arg2),         \
 +                  PVOP_CALL_ARG3(arg3), PVOP_CALL_ARG4(arg4))
  #define PVOP_VCALL4(op, arg1, arg2, arg3, arg4)                               \
 -      __PVOP_VCALL(op, "", "", "0" ((unsigned long)(arg1)),           \
 -      "1"((unsigned long)(arg2)), "2"((unsigned long)(arg3)),         \
 -      "3"((unsigned long)(arg4)))
 +      __PVOP_VCALL(op, "", "",                                        \
 +                   PVOP_CALL_ARG1(arg1), PVOP_CALL_ARG2(arg2),        \
 +                   PVOP_CALL_ARG3(arg3), PVOP_CALL_ARG4(arg4))
  #endif
  
  static inline int paravirt_enabled(void)
@@@ -1079,11 -984,10 +1079,11 @@@ static inline void __flush_tlb_single(u
        PVOP_VCALL1(pv_mmu_ops.flush_tlb_single, addr);
  }
  
 -static inline void flush_tlb_others(cpumask_t cpumask, struct mm_struct *mm,
 +static inline void flush_tlb_others(const struct cpumask *cpumask,
 +                                  struct mm_struct *mm,
                                    unsigned long va)
  {
 -      PVOP_VCALL3(pv_mmu_ops.flush_tlb_others, &cpumask, mm, va);
 +      PVOP_VCALL3(pv_mmu_ops.flush_tlb_others, cpumask, mm, va);
  }
  
  static inline int paravirt_pgd_alloc(struct mm_struct *mm)
@@@ -1155,13 -1059,13 +1155,13 @@@ static inline pte_t __pte(pteval_t val
        pteval_t ret;
  
        if (sizeof(pteval_t) > sizeof(long))
 -              ret = PVOP_CALL2(pteval_t,
 -                               pv_mmu_ops.make_pte,
 -                               val, (u64)val >> 32);
 +              ret = PVOP_CALLEE2(pteval_t,
 +                                 pv_mmu_ops.make_pte,
 +                                 val, (u64)val >> 32);
        else
 -              ret = PVOP_CALL1(pteval_t,
 -                               pv_mmu_ops.make_pte,
 -                               val);
 +              ret = PVOP_CALLEE1(pteval_t,
 +                                 pv_mmu_ops.make_pte,
 +                                 val);
  
        return (pte_t) { .pte = ret };
  }
@@@ -1171,12 -1075,29 +1171,12 @@@ static inline pteval_t pte_val(pte_t pt
        pteval_t ret;
  
        if (sizeof(pteval_t) > sizeof(long))
 -              ret = PVOP_CALL2(pteval_t, pv_mmu_ops.pte_val,
 -                               pte.pte, (u64)pte.pte >> 32);
 -      else
 -              ret = PVOP_CALL1(pteval_t, pv_mmu_ops.pte_val,
 -                               pte.pte);
 -
 -      return ret;
 -}
 -
 -static inline pteval_t pte_flags(pte_t pte)
 -{
 -      pteval_t ret;
 -
 -      if (sizeof(pteval_t) > sizeof(long))
 -              ret = PVOP_CALL2(pteval_t, pv_mmu_ops.pte_flags,
 -                               pte.pte, (u64)pte.pte >> 32);
 +              ret = PVOP_CALLEE2(pteval_t, pv_mmu_ops.pte_val,
 +                                 pte.pte, (u64)pte.pte >> 32);
        else
 -              ret = PVOP_CALL1(pteval_t, pv_mmu_ops.pte_flags,
 -                               pte.pte);
 +              ret = PVOP_CALLEE1(pteval_t, pv_mmu_ops.pte_val,
 +                                 pte.pte);
  
 -#ifdef CONFIG_PARAVIRT_DEBUG
 -      BUG_ON(ret & PTE_PFN_MASK);
 -#endif
        return ret;
  }
  
@@@ -1185,11 -1106,11 +1185,11 @@@ static inline pgd_t __pgd(pgdval_t val
        pgdval_t ret;
  
        if (sizeof(pgdval_t) > sizeof(long))
 -              ret = PVOP_CALL2(pgdval_t, pv_mmu_ops.make_pgd,
 -                               val, (u64)val >> 32);
 +              ret = PVOP_CALLEE2(pgdval_t, pv_mmu_ops.make_pgd,
 +                                 val, (u64)val >> 32);
        else
 -              ret = PVOP_CALL1(pgdval_t, pv_mmu_ops.make_pgd,
 -                               val);
 +              ret = PVOP_CALLEE1(pgdval_t, pv_mmu_ops.make_pgd,
 +                                 val);
  
        return (pgd_t) { ret };
  }
@@@ -1199,11 -1120,11 +1199,11 @@@ static inline pgdval_t pgd_val(pgd_t pg
        pgdval_t ret;
  
        if (sizeof(pgdval_t) > sizeof(long))
 -              ret =  PVOP_CALL2(pgdval_t, pv_mmu_ops.pgd_val,
 -                                pgd.pgd, (u64)pgd.pgd >> 32);
 +              ret =  PVOP_CALLEE2(pgdval_t, pv_mmu_ops.pgd_val,
 +                                  pgd.pgd, (u64)pgd.pgd >> 32);
        else
 -              ret =  PVOP_CALL1(pgdval_t, pv_mmu_ops.pgd_val,
 -                                pgd.pgd);
 +              ret =  PVOP_CALLEE1(pgdval_t, pv_mmu_ops.pgd_val,
 +                                  pgd.pgd);
  
        return ret;
  }
@@@ -1267,11 -1188,11 +1267,11 @@@ static inline pmd_t __pmd(pmdval_t val
        pmdval_t ret;
  
        if (sizeof(pmdval_t) > sizeof(long))
 -              ret = PVOP_CALL2(pmdval_t, pv_mmu_ops.make_pmd,
 -                               val, (u64)val >> 32);
 +              ret = PVOP_CALLEE2(pmdval_t, pv_mmu_ops.make_pmd,
 +                                 val, (u64)val >> 32);
        else
 -              ret = PVOP_CALL1(pmdval_t, pv_mmu_ops.make_pmd,
 -                               val);
 +              ret = PVOP_CALLEE1(pmdval_t, pv_mmu_ops.make_pmd,
 +                                 val);
  
        return (pmd_t) { ret };
  }
@@@ -1281,11 -1202,11 +1281,11 @@@ static inline pmdval_t pmd_val(pmd_t pm
        pmdval_t ret;
  
        if (sizeof(pmdval_t) > sizeof(long))
 -              ret =  PVOP_CALL2(pmdval_t, pv_mmu_ops.pmd_val,
 -                                pmd.pmd, (u64)pmd.pmd >> 32);
 +              ret =  PVOP_CALLEE2(pmdval_t, pv_mmu_ops.pmd_val,
 +                                  pmd.pmd, (u64)pmd.pmd >> 32);
        else
 -              ret =  PVOP_CALL1(pmdval_t, pv_mmu_ops.pmd_val,
 -                                pmd.pmd);
 +              ret =  PVOP_CALLEE1(pmdval_t, pv_mmu_ops.pmd_val,
 +                                  pmd.pmd);
  
        return ret;
  }
@@@ -1307,11 -1228,11 +1307,11 @@@ static inline pud_t __pud(pudval_t val
        pudval_t ret;
  
        if (sizeof(pudval_t) > sizeof(long))
 -              ret = PVOP_CALL2(pudval_t, pv_mmu_ops.make_pud,
 -                               val, (u64)val >> 32);
 +              ret = PVOP_CALLEE2(pudval_t, pv_mmu_ops.make_pud,
 +                                 val, (u64)val >> 32);
        else
 -              ret = PVOP_CALL1(pudval_t, pv_mmu_ops.make_pud,
 -                               val);
 +              ret = PVOP_CALLEE1(pudval_t, pv_mmu_ops.make_pud,
 +                                 val);
  
        return (pud_t) { ret };
  }
@@@ -1321,11 -1242,11 +1321,11 @@@ static inline pudval_t pud_val(pud_t pu
        pudval_t ret;
  
        if (sizeof(pudval_t) > sizeof(long))
 -              ret =  PVOP_CALL2(pudval_t, pv_mmu_ops.pud_val,
 -                                pud.pud, (u64)pud.pud >> 32);
 +              ret =  PVOP_CALLEE2(pudval_t, pv_mmu_ops.pud_val,
 +                                  pud.pud, (u64)pud.pud >> 32);
        else
 -              ret =  PVOP_CALL1(pudval_t, pv_mmu_ops.pud_val,
 -                                pud.pud);
 +              ret =  PVOP_CALLEE1(pudval_t, pv_mmu_ops.pud_val,
 +                                  pud.pud);
  
        return ret;
  }
@@@ -1466,13 -1387,8 +1466,11 @@@ static inline void __set_fixmap(unsigne
  }
  
  void _paravirt_nop(void);
 +u32 _paravirt_ident_32(u32);
 +u64 _paravirt_ident_64(u64);
 +
  #define paravirt_nop  ((void *)_paravirt_nop)
  
- void paravirt_use_bytelocks(void);
  #ifdef CONFIG_SMP
  
  static inline int __raw_spin_is_locked(struct raw_spinlock *lock)
@@@ -1520,37 -1436,12 +1518,37 @@@ extern struct paravirt_patch_site __par
        __parainstructions_end[];
  
  #ifdef CONFIG_X86_32
 -#define PV_SAVE_REGS "pushl %%ecx; pushl %%edx;"
 -#define PV_RESTORE_REGS "popl %%edx; popl %%ecx"
 +#define PV_SAVE_REGS "pushl %ecx; pushl %edx;"
 +#define PV_RESTORE_REGS "popl %edx; popl %ecx;"
 +
 +/* save and restore all caller-save registers, except return value */
 +#define PV_SAVE_ALL_CALLER_REGS               "pushl %ecx;"
 +#define PV_RESTORE_ALL_CALLER_REGS    "popl  %ecx;"
 +
  #define PV_FLAGS_ARG "0"
  #define PV_EXTRA_CLOBBERS
  #define PV_VEXTRA_CLOBBERS
  #else
 +/* save and restore all caller-save registers, except return value */
 +#define PV_SAVE_ALL_CALLER_REGS                                               \
 +      "push %rcx;"                                                    \
 +      "push %rdx;"                                                    \
 +      "push %rsi;"                                                    \
 +      "push %rdi;"                                                    \
 +      "push %r8;"                                                     \
 +      "push %r9;"                                                     \
 +      "push %r10;"                                                    \
 +      "push %r11;"
 +#define PV_RESTORE_ALL_CALLER_REGS                                    \
 +      "pop %r11;"                                                     \
 +      "pop %r10;"                                                     \
 +      "pop %r9;"                                                      \
 +      "pop %r8;"                                                      \
 +      "pop %rdi;"                                                     \
 +      "pop %rsi;"                                                     \
 +      "pop %rdx;"                                                     \
 +      "pop %rcx;"
 +
  /* We save some registers, but all of them, that's too much. We clobber all
   * caller saved registers but the argument parameter */
  #define PV_SAVE_REGS "pushq %%rdi;"
  #define PV_FLAGS_ARG "D"
  #endif
  
 +/*
 + * Generate a thunk around a function which saves all caller-save
 + * registers except for the return value.  This allows C functions to
 + * be called from assembler code where fewer than normal registers are
 + * available.  It may also help code generation around calls from C
 + * code if the common case doesn't use many registers.
 + *
 + * When a callee is wrapped in a thunk, the caller can assume that all
 + * arg regs and all scratch registers are preserved across the
 + * call. The return value in rax/eax will not be saved, even for void
 + * functions.
 + */
 +#define PV_CALLEE_SAVE_REGS_THUNK(func)                                       \
 +      extern typeof(func) __raw_callee_save_##func;                   \
 +      static void *__##func##__ __used = func;                        \
 +                                                                      \
 +      asm(".pushsection .text;"                                       \
 +          "__raw_callee_save_" #func ": "                             \
 +          PV_SAVE_ALL_CALLER_REGS                                     \
 +          "call " #func ";"                                           \
 +          PV_RESTORE_ALL_CALLER_REGS                                  \
 +          "ret;"                                                      \
 +          ".popsection")
 +
 +/* Get a reference to a callee-save function */
 +#define PV_CALLEE_SAVE(func)                                          \
 +      ((struct paravirt_callee_save) { __raw_callee_save_##func })
 +
 +/* Promise that "func" already uses the right calling convention */
 +#define __PV_IS_CALLEE_SAVE(func)                     \
 +      ((struct paravirt_callee_save) { func })
 +
  static inline unsigned long __raw_local_save_flags(void)
  {
        unsigned long f;
  
 -      asm volatile(paravirt_alt(PV_SAVE_REGS
 -                                PARAVIRT_CALL
 -                                PV_RESTORE_REGS)
 +      asm volatile(paravirt_alt(PARAVIRT_CALL)
                     : "=a"(f)
                     : paravirt_type(pv_irq_ops.save_fl),
                       paravirt_clobber(CLBR_EAX)
 -                   : "memory", "cc" PV_VEXTRA_CLOBBERS);
 +                   : "memory", "cc");
        return f;
  }
  
  static inline void raw_local_irq_restore(unsigned long f)
  {
 -      asm volatile(paravirt_alt(PV_SAVE_REGS
 -                                PARAVIRT_CALL
 -                                PV_RESTORE_REGS)
 +      asm volatile(paravirt_alt(PARAVIRT_CALL)
                     : "=a"(f)
                     : PV_FLAGS_ARG(f),
                       paravirt_type(pv_irq_ops.restore_fl),
                       paravirt_clobber(CLBR_EAX)
 -                   : "memory", "cc" PV_EXTRA_CLOBBERS);
 +                   : "memory", "cc");
  }
  
  static inline void raw_local_irq_disable(void)
  {
 -      asm volatile(paravirt_alt(PV_SAVE_REGS
 -                                PARAVIRT_CALL
 -                                PV_RESTORE_REGS)
 +      asm volatile(paravirt_alt(PARAVIRT_CALL)
                     :
                     : paravirt_type(pv_irq_ops.irq_disable),
                       paravirt_clobber(CLBR_EAX)
 -                   : "memory", "eax", "cc" PV_EXTRA_CLOBBERS);
 +                   : "memory", "eax", "cc");
  }
  
  static inline void raw_local_irq_enable(void)
  {
 -      asm volatile(paravirt_alt(PV_SAVE_REGS
 -                                PARAVIRT_CALL
 -                                PV_RESTORE_REGS)
 +      asm volatile(paravirt_alt(PARAVIRT_CALL)
                     :
                     : paravirt_type(pv_irq_ops.irq_enable),
                       paravirt_clobber(CLBR_EAX)
 -                   : "memory", "eax", "cc" PV_EXTRA_CLOBBERS);
 +                   : "memory", "eax", "cc");
  }
  
  static inline unsigned long __raw_local_irq_save(void)
        .popsection
  
  
 +#define COND_PUSH(set, mask, reg)                     \
 +      .if ((~(set)) & mask); push %reg; .endif
 +#define COND_POP(set, mask, reg)                      \
 +      .if ((~(set)) & mask); pop %reg; .endif
 +
  #ifdef CONFIG_X86_64
 -#define PV_SAVE_REGS                          \
 -      push %rax;                              \
 -      push %rcx;                              \
 -      push %rdx;                              \
 -      push %rsi;                              \
 -      push %rdi;                              \
 -      push %r8;                               \
 -      push %r9;                               \
 -      push %r10;                              \
 -      push %r11
 -#define PV_RESTORE_REGS                               \
 -      pop %r11;                               \
 -      pop %r10;                               \
 -      pop %r9;                                \
 -      pop %r8;                                \
 -      pop %rdi;                               \
 -      pop %rsi;                               \
 -      pop %rdx;                               \
 -      pop %rcx;                               \
 -      pop %rax
 +
 +#define PV_SAVE_REGS(set)                     \
 +      COND_PUSH(set, CLBR_RAX, rax);          \
 +      COND_PUSH(set, CLBR_RCX, rcx);          \
 +      COND_PUSH(set, CLBR_RDX, rdx);          \
 +      COND_PUSH(set, CLBR_RSI, rsi);          \
 +      COND_PUSH(set, CLBR_RDI, rdi);          \
 +      COND_PUSH(set, CLBR_R8, r8);            \
 +      COND_PUSH(set, CLBR_R9, r9);            \
 +      COND_PUSH(set, CLBR_R10, r10);          \
 +      COND_PUSH(set, CLBR_R11, r11)
 +#define PV_RESTORE_REGS(set)                  \
 +      COND_POP(set, CLBR_R11, r11);           \
 +      COND_POP(set, CLBR_R10, r10);           \
 +      COND_POP(set, CLBR_R9, r9);             \
 +      COND_POP(set, CLBR_R8, r8);             \
 +      COND_POP(set, CLBR_RDI, rdi);           \
 +      COND_POP(set, CLBR_RSI, rsi);           \
 +      COND_POP(set, CLBR_RDX, rdx);           \
 +      COND_POP(set, CLBR_RCX, rcx);           \
 +      COND_POP(set, CLBR_RAX, rax)
 +
  #define PARA_PATCH(struct, off)        ((PARAVIRT_PATCH_##struct + (off)) / 8)
  #define PARA_SITE(ptype, clobbers, ops) _PVSITE(ptype, clobbers, ops, .quad, 8)
  #define PARA_INDIRECT(addr)   *addr(%rip)
  #else
 -#define PV_SAVE_REGS   pushl %eax; pushl %edi; pushl %ecx; pushl %edx
 -#define PV_RESTORE_REGS popl %edx; popl %ecx; popl %edi; popl %eax
 +#define PV_SAVE_REGS(set)                     \
 +      COND_PUSH(set, CLBR_EAX, eax);          \
 +      COND_PUSH(set, CLBR_EDI, edi);          \
 +      COND_PUSH(set, CLBR_ECX, ecx);          \
 +      COND_PUSH(set, CLBR_EDX, edx)
 +#define PV_RESTORE_REGS(set)                  \
 +      COND_POP(set, CLBR_EDX, edx);           \
 +      COND_POP(set, CLBR_ECX, ecx);           \
 +      COND_POP(set, CLBR_EDI, edi);           \
 +      COND_POP(set, CLBR_EAX, eax)
 +
  #define PARA_PATCH(struct, off)        ((PARAVIRT_PATCH_##struct + (off)) / 4)
  #define PARA_SITE(ptype, clobbers, ops) _PVSITE(ptype, clobbers, ops, .long, 4)
  #define PARA_INDIRECT(addr)   *%cs:addr
  
  #define DISABLE_INTERRUPTS(clobbers)                                  \
        PARA_SITE(PARA_PATCH(pv_irq_ops, PV_IRQ_irq_disable), clobbers, \
 -                PV_SAVE_REGS;                                         \
 +                PV_SAVE_REGS(clobbers | CLBR_CALLEE_SAVE);            \
                  call PARA_INDIRECT(pv_irq_ops+PV_IRQ_irq_disable);    \
 -                PV_RESTORE_REGS;)                     \
 +                PV_RESTORE_REGS(clobbers | CLBR_CALLEE_SAVE);)
  
  #define ENABLE_INTERRUPTS(clobbers)                                   \
        PARA_SITE(PARA_PATCH(pv_irq_ops, PV_IRQ_irq_enable), clobbers,  \
 -                PV_SAVE_REGS;                                         \
 +                PV_SAVE_REGS(clobbers | CLBR_CALLEE_SAVE);            \
                  call PARA_INDIRECT(pv_irq_ops+PV_IRQ_irq_enable);     \
 -                PV_RESTORE_REGS;)
 +                PV_RESTORE_REGS(clobbers | CLBR_CALLEE_SAVE);)
  
  #define USERGS_SYSRET32                                                       \
        PARA_SITE(PARA_PATCH(pv_cpu_ops, PV_CPU_usergs_sysret32),       \
        PARA_SITE(PARA_PATCH(pv_cpu_ops, PV_CPU_swapgs), CLBR_NONE,     \
                  swapgs)
  
 +/*
 + * Note: swapgs is very special, and in practise is either going to be
 + * implemented with a single "swapgs" instruction or something very
 + * special.  Either way, we don't need to save any registers for
 + * it.
 + */
  #define SWAPGS                                                                \
        PARA_SITE(PARA_PATCH(pv_cpu_ops, PV_CPU_swapgs), CLBR_NONE,     \
 -                PV_SAVE_REGS;                                         \
 -                call PARA_INDIRECT(pv_cpu_ops+PV_CPU_swapgs);         \
 -                PV_RESTORE_REGS                                       \
 +                call PARA_INDIRECT(pv_cpu_ops+PV_CPU_swapgs)          \
                 )
  
  #define GET_CR2_INTO_RCX                              \
index b46f8ca007b5754ce1282809d6a7e8669104ed81,3f90aeb456bc19a83b23334ab875a9c56de1bbd3..df9d5f78385e442ae8c92b8556928e86c270021c
@@@ -40,6 -40,7 +40,7 @@@ struct thread_info 
                                                */
        __u8                    supervisor_stack[0];
  #endif
+       int                     uaccess_err;
  };
  
  #define INIT_THREAD_INFO(tsk)                 \
@@@ -194,21 -195,25 +195,21 @@@ static inline struct thread_info *curre
  
  #else /* X86_32 */
  
 -#include <asm/pda.h>
 +#include <asm/percpu.h>
 +#define KERNEL_STACK_OFFSET (5*8)
  
  /*
   * macros/functions for gaining access to the thread information structure
   * preempt_count needs to be 1 initially, until the scheduler is functional.
   */
  #ifndef __ASSEMBLY__
 -static inline struct thread_info *current_thread_info(void)
 -{
 -      struct thread_info *ti;
 -      ti = (void *)(read_pda(kernelstack) + PDA_STACKOFFSET - THREAD_SIZE);
 -      return ti;
 -}
 +DECLARE_PER_CPU(unsigned long, kernel_stack);
  
 -/* do not use in interrupt context */
 -static inline struct thread_info *stack_thread_info(void)
 +static inline struct thread_info *current_thread_info(void)
  {
        struct thread_info *ti;
 -      asm("andq %%rsp,%0; " : "=r" (ti) : "0" (~(THREAD_SIZE - 1)));
 +      ti = (void *)(percpu_read(kernel_stack) +
 +                    KERNEL_STACK_OFFSET - THREAD_SIZE);
        return ti;
  }
  
  
  /* how to get the thread information struct from ASM */
  #define GET_THREAD_INFO(reg) \
 -      movq %gs:pda_kernelstack,reg ; \
 -      subq $(THREAD_SIZE-PDA_STACKOFFSET),reg
 +      movq PER_CPU_VAR(kernel_stack),reg ; \
 +      subq $(THREAD_SIZE-KERNEL_STACK_OFFSET),reg
  
  #endif
  
index 0a7f6d6b1206f5b9aa9dbc5978eb624cf3ce1f6a,157aafa4558305ca1d6cc76ef52d019fc2198a61..c0498daf01c32cc939316035c7ed45a6e6e77997
@@@ -357,7 -357,7 +357,7 @@@ set_extra_move_desc(struct irq_desc *de
  
        if (!cfg->move_in_progress) {
                /* it means that domain is not changed */
 -              if (!cpumask_intersects(&desc->affinity, mask))
 +              if (!cpumask_intersects(desc->affinity, mask))
                        cfg->move_desc_pending = 1;
        }
  }
@@@ -580,9 -580,9 +580,9 @@@ set_desc_affinity(struct irq_desc *desc
        if (assign_irq_vector(irq, cfg, mask))
                return BAD_APICID;
  
 -      cpumask_and(&desc->affinity, cfg->domain, mask);
 +      cpumask_and(desc->affinity, cfg->domain, mask);
        set_extra_move_desc(desc, mask);
 -      return cpu_mask_to_apicid_and(&desc->affinity, cpu_online_mask);
 +      return cpu_mask_to_apicid_and(desc->affinity, cpu_online_mask);
  }
  
  static void
@@@ -2382,7 -2382,7 +2382,7 @@@ migrate_ioapic_irq_desc(struct irq_des
        if (cfg->move_in_progress)
                send_cleanup_vector(cfg);
  
 -      cpumask_copy(&desc->affinity, mask);
 +      cpumask_copy(desc->affinity, mask);
  }
  
  static int migrate_irq_remapped_level_desc(struct irq_desc *desc)
        }
  
        /* everthing is clear. we have right of way */
 -      migrate_ioapic_irq_desc(desc, &desc->pending_mask);
 +      migrate_ioapic_irq_desc(desc, desc->pending_mask);
  
        ret = 0;
        desc->status &= ~IRQ_MOVE_PENDING;
 -      cpumask_clear(&desc->pending_mask);
 +      cpumask_clear(desc->pending_mask);
  
  unmask:
        unmask_IO_APIC_irq_desc(desc);
@@@ -2433,7 -2433,7 +2433,7 @@@ static void ir_irq_migration(struct wor
                                continue;
                        }
  
 -                      desc->chip->set_affinity(irq, &desc->pending_mask);
 +                      desc->chip->set_affinity(irq, desc->pending_mask);
                        spin_unlock_irqrestore(&desc->lock, flags);
                }
        }
@@@ -2447,7 -2447,7 +2447,7 @@@ static void set_ir_ioapic_affinity_irq_
  {
        if (desc->status & IRQ_LEVEL) {
                desc->status |= IRQ_MOVE_PENDING;
 -              cpumask_copy(&desc->pending_mask, mask);
 +              cpumask_copy(desc->pending_mask, mask);
                migrate_irq_remapped_level_desc(desc);
                return;
        }
@@@ -2515,7 -2515,7 +2515,7 @@@ static void irq_complete_move(struct ir
  
                /* domain has not changed, but affinity did */
                me = smp_processor_id();
 -              if (cpu_isset(me, desc->affinity)) {
 +              if (cpumask_test_cpu(me, desc->affinity)) {
                        *descp = desc = move_irq_desc(desc, me);
                        /* get the new one */
                        cfg = desc->chip_data;
  
        vector = ~get_irq_regs()->orig_ax;
        me = smp_processor_id();
 +
 +      if (vector == cfg->vector && cpumask_test_cpu(me, cfg->domain)) {
  #ifdef CONFIG_NUMA_MIGRATE_IRQ_DESC
                *descp = desc = move_irq_desc(desc, me);
                /* get the new one */
                cfg = desc->chip_data;
  #endif
 -
 -      if (vector == cfg->vector && cpumask_test_cpu(me, cfg->domain))
                send_cleanup_vector(cfg);
 +      }
  }
  #else
  static inline void irq_complete_move(struct irq_desc **descp) {}
@@@ -3183,7 -3182,7 +3183,7 @@@ unsigned int create_irq_nr(unsigned in
  
        irq = 0;
        spin_lock_irqsave(&vector_lock, flags);
 -      for (new = irq_want; new < NR_IRQS; new++) {
 +      for (new = irq_want; new < nr_irqs; new++) {
                if (platform_legacy_irq(new))
                        continue;
  
@@@ -3258,9 -3257,6 +3258,9 @@@ static int msi_compose_msg(struct pci_d
        int err;
        unsigned dest;
  
 +      if (disable_apic)
 +              return -ENXIO;
 +
        cfg = irq_cfg(irq);
        err = assign_irq_vector(irq, cfg, TARGET_CPUS);
        if (err)
@@@ -3466,40 -3462,6 +3466,6 @@@ static int setup_msi_irq(struct pci_de
        return 0;
  }
  
- int arch_setup_msi_irq(struct pci_dev *dev, struct msi_desc *msidesc)
- {
-       unsigned int irq;
-       int ret;
-       unsigned int irq_want;
-       irq_want = nr_irqs_gsi;
-       irq = create_irq_nr(irq_want);
-       if (irq == 0)
-               return -1;
- #ifdef CONFIG_INTR_REMAP
-       if (!intr_remapping_enabled)
-               goto no_ir;
-       ret = msi_alloc_irte(dev, irq, 1);
-       if (ret < 0)
-               goto error;
- no_ir:
- #endif
-       ret = setup_msi_irq(dev, msidesc, irq);
-       if (ret < 0) {
-               destroy_irq(irq);
-               return ret;
-       }
-       return 0;
- #ifdef CONFIG_INTR_REMAP
- error:
-       destroy_irq(irq);
-       return ret;
- #endif
- }
  int arch_setup_msi_irqs(struct pci_dev *dev, int nvec, int type)
  {
        unsigned int irq;
@@@ -3729,9 -3691,6 +3695,9 @@@ int arch_setup_ht_irq(unsigned int irq
        struct irq_cfg *cfg;
        int err;
  
 +      if (disable_apic)
 +              return -ENXIO;
 +
        cfg = irq_cfg(irq);
        err = assign_irq_vector(irq, cfg, TARGET_CPUS);
        if (!err) {
  }
  #endif /* CONFIG_HT_IRQ */
  
 -#ifdef CONFIG_X86_64
 +#ifdef CONFIG_X86_UV
  /*
   * Re-target the irq to the specified CPU and enable the specified MMR located
   * on the specified blade to allow the sending of MSIs to the specified CPU.
@@@ -3856,22 -3815,6 +3822,22 @@@ void __init probe_nr_irqs_gsi(void
                nr_irqs_gsi = nr;
  }
  
 +#ifdef CONFIG_SPARSE_IRQ
 +int __init arch_probe_nr_irqs(void)
 +{
 +      int nr;
 +
 +      nr = ((8 * nr_cpu_ids) > (32 * nr_ioapics) ?
 +              (NR_VECTORS + (8 * nr_cpu_ids)) :
 +              (NR_VECTORS + (32 * nr_ioapics)));
 +
 +      if (nr < nr_irqs && nr > nr_irqs_gsi)
 +              nr_irqs = nr;
 +
 +      return 0;
 +}
 +#endif
 +
  /* --------------------------------------------------------------------------
                            ACPI-based IOAPIC Configuration
     -------------------------------------------------------------------------- */
@@@ -4061,7 -4004,7 +4027,7 @@@ void __init setup_ioapic_dest(void
                         */
                        if (desc->status &
                            (IRQ_NO_BALANCING | IRQ_AFFINITY_SET))
 -                              mask = &desc->affinity;
 +                              mask = desc->affinity;
                        else
                                mask = TARGET_CPUS;
  
diff --combined arch/x86/kernel/signal.c
index df0587f24c547eac4a45a0aa64ea73a5dd49c61e,cf34eb37fbeed58d4309bf7f5557358213f7e8fb..7fc78b0198150f72fa4185a9c7f365a488f72a04
  #endif
  
  #define COPY(x)                       {               \
-       err |= __get_user(regs->x, &sc->x);     \
+       get_user_ex(regs->x, &sc->x);           \
  }
  
  #define COPY_SEG(seg)         {                       \
                unsigned short tmp;                     \
-               err |= __get_user(tmp, &sc->seg);       \
+               get_user_ex(tmp, &sc->seg);             \
                regs->seg = tmp;                        \
  }
  
  #define COPY_SEG_CPL3(seg)    {                       \
                unsigned short tmp;                     \
-               err |= __get_user(tmp, &sc->seg);       \
+               get_user_ex(tmp, &sc->seg);             \
                regs->seg = tmp | 3;                    \
  }
  
  #define GET_SEG(seg)          {                       \
                unsigned short tmp;                     \
-               err |= __get_user(tmp, &sc->seg);       \
+               get_user_ex(tmp, &sc->seg);             \
                loadsegment(seg, tmp);                  \
  }
  
@@@ -83,45 -83,49 +83,49 @@@ restore_sigcontext(struct pt_regs *regs
        /* Always make any pending restarted system calls return -EINTR */
        current_thread_info()->restart_block.fn = do_no_restart_syscall;
  
+       get_user_try {
  #ifdef CONFIG_X86_32
-       GET_SEG(gs);
-       COPY_SEG(fs);
-       COPY_SEG(es);
-       COPY_SEG(ds);
+               GET_SEG(gs);
+               COPY_SEG(fs);
+               COPY_SEG(es);
+               COPY_SEG(ds);
  #endif /* CONFIG_X86_32 */
  
-       COPY(di); COPY(si); COPY(bp); COPY(sp); COPY(bx);
-       COPY(dx); COPY(cx); COPY(ip);
+               COPY(di); COPY(si); COPY(bp); COPY(sp); COPY(bx);
+               COPY(dx); COPY(cx); COPY(ip);
  
  #ifdef CONFIG_X86_64
-       COPY(r8);
-       COPY(r9);
-       COPY(r10);
-       COPY(r11);
-       COPY(r12);
-       COPY(r13);
-       COPY(r14);
-       COPY(r15);
+               COPY(r8);
+               COPY(r9);
+               COPY(r10);
+               COPY(r11);
+               COPY(r12);
+               COPY(r13);
+               COPY(r14);
+               COPY(r15);
  #endif /* CONFIG_X86_64 */
  
  #ifdef CONFIG_X86_32
-       COPY_SEG_CPL3(cs);
-       COPY_SEG_CPL3(ss);
+               COPY_SEG_CPL3(cs);
+               COPY_SEG_CPL3(ss);
  #else /* !CONFIG_X86_32 */
-       /* Kernel saves and restores only the CS segment register on signals,
-        * which is the bare minimum needed to allow mixed 32/64-bit code.
-        * App's signal handler can save/restore other segments if needed. */
-       COPY_SEG_CPL3(cs);
+               /* Kernel saves and restores only the CS segment register on signals,
+                * which is the bare minimum needed to allow mixed 32/64-bit code.
+                * App's signal handler can save/restore other segments if needed. */
+               COPY_SEG_CPL3(cs);
  #endif /* CONFIG_X86_32 */
  
-       err |= __get_user(tmpflags, &sc->flags);
-       regs->flags = (regs->flags & ~FIX_EFLAGS) | (tmpflags & FIX_EFLAGS);
-       regs->orig_ax = -1;             /* disable syscall checks */
+               get_user_ex(tmpflags, &sc->flags);
+               regs->flags = (regs->flags & ~FIX_EFLAGS) | (tmpflags & FIX_EFLAGS);
+               regs->orig_ax = -1;             /* disable syscall checks */
+               get_user_ex(buf, &sc->fpstate);
+               err |= restore_i387_xstate(buf);
  
-       err |= __get_user(buf, &sc->fpstate);
-       err |= restore_i387_xstate(buf);
+               get_user_ex(*pax, &sc->ax);
+       } get_user_catch(err);
  
-       err |= __get_user(*pax, &sc->ax);
        return err;
  }
  
@@@ -131,57 -135,60 +135,60 @@@ setup_sigcontext(struct sigcontext __us
  {
        int err = 0;
  
- #ifdef CONFIG_X86_32
-       {
-               unsigned int tmp;
+       put_user_try {
  
-               savesegment(gs, tmp);
-               err |= __put_user(tmp, (unsigned int __user *)&sc->gs);
-       }
-       err |= __put_user(regs->fs, (unsigned int __user *)&sc->fs);
-       err |= __put_user(regs->es, (unsigned int __user *)&sc->es);
-       err |= __put_user(regs->ds, (unsigned int __user *)&sc->ds);
+ #ifdef CONFIG_X86_32
+               {
+                       unsigned int tmp;
+                       savesegment(gs, tmp);
+                       put_user_ex(tmp, (unsigned int __user *)&sc->gs);
+               }
+               put_user_ex(regs->fs, (unsigned int __user *)&sc->fs);
+               put_user_ex(regs->es, (unsigned int __user *)&sc->es);
+               put_user_ex(regs->ds, (unsigned int __user *)&sc->ds);
  #endif /* CONFIG_X86_32 */
  
-       err |= __put_user(regs->di, &sc->di);
-       err |= __put_user(regs->si, &sc->si);
-       err |= __put_user(regs->bp, &sc->bp);
-       err |= __put_user(regs->sp, &sc->sp);
-       err |= __put_user(regs->bx, &sc->bx);
-       err |= __put_user(regs->dx, &sc->dx);
-       err |= __put_user(regs->cx, &sc->cx);
-       err |= __put_user(regs->ax, &sc->ax);
+               put_user_ex(regs->di, &sc->di);
+               put_user_ex(regs->si, &sc->si);
+               put_user_ex(regs->bp, &sc->bp);
+               put_user_ex(regs->sp, &sc->sp);
+               put_user_ex(regs->bx, &sc->bx);
+               put_user_ex(regs->dx, &sc->dx);
+               put_user_ex(regs->cx, &sc->cx);
+               put_user_ex(regs->ax, &sc->ax);
  #ifdef CONFIG_X86_64
-       err |= __put_user(regs->r8, &sc->r8);
-       err |= __put_user(regs->r9, &sc->r9);
-       err |= __put_user(regs->r10, &sc->r10);
-       err |= __put_user(regs->r11, &sc->r11);
-       err |= __put_user(regs->r12, &sc->r12);
-       err |= __put_user(regs->r13, &sc->r13);
-       err |= __put_user(regs->r14, &sc->r14);
-       err |= __put_user(regs->r15, &sc->r15);
+               put_user_ex(regs->r8, &sc->r8);
+               put_user_ex(regs->r9, &sc->r9);
+               put_user_ex(regs->r10, &sc->r10);
+               put_user_ex(regs->r11, &sc->r11);
+               put_user_ex(regs->r12, &sc->r12);
+               put_user_ex(regs->r13, &sc->r13);
+               put_user_ex(regs->r14, &sc->r14);
+               put_user_ex(regs->r15, &sc->r15);
  #endif /* CONFIG_X86_64 */
  
-       err |= __put_user(current->thread.trap_no, &sc->trapno);
-       err |= __put_user(current->thread.error_code, &sc->err);
-       err |= __put_user(regs->ip, &sc->ip);
+               put_user_ex(current->thread.trap_no, &sc->trapno);
+               put_user_ex(current->thread.error_code, &sc->err);
+               put_user_ex(regs->ip, &sc->ip);
  #ifdef CONFIG_X86_32
-       err |= __put_user(regs->cs, (unsigned int __user *)&sc->cs);
-       err |= __put_user(regs->flags, &sc->flags);
-       err |= __put_user(regs->sp, &sc->sp_at_signal);
-       err |= __put_user(regs->ss, (unsigned int __user *)&sc->ss);
+               put_user_ex(regs->cs, (unsigned int __user *)&sc->cs);
+               put_user_ex(regs->flags, &sc->flags);
+               put_user_ex(regs->sp, &sc->sp_at_signal);
+               put_user_ex(regs->ss, (unsigned int __user *)&sc->ss);
  #else /* !CONFIG_X86_32 */
-       err |= __put_user(regs->flags, &sc->flags);
-       err |= __put_user(regs->cs, &sc->cs);
-       err |= __put_user(0, &sc->gs);
-       err |= __put_user(0, &sc->fs);
+               put_user_ex(regs->flags, &sc->flags);
+               put_user_ex(regs->cs, &sc->cs);
+               put_user_ex(0, &sc->gs);
+               put_user_ex(0, &sc->fs);
  #endif /* CONFIG_X86_32 */
  
-       err |= __put_user(fpstate, &sc->fpstate);
+               put_user_ex(fpstate, &sc->fpstate);
  
-       /* non-iBCS2 extensions.. */
-       err |= __put_user(mask, &sc->oldmask);
-       err |= __put_user(current->thread.cr2, &sc->cr2);
+               /* non-iBCS2 extensions.. */
+               put_user_ex(mask, &sc->oldmask);
+               put_user_ex(current->thread.cr2, &sc->cr2);
+       } put_user_catch(err);
  
        return err;
  }
@@@ -336,43 -343,41 +343,41 @@@ static int __setup_rt_frame(int sig, st
        if (!access_ok(VERIFY_WRITE, frame, sizeof(*frame)))
                return -EFAULT;
  
-       err |= __put_user(sig, &frame->sig);
-       err |= __put_user(&frame->info, &frame->pinfo);
-       err |= __put_user(&frame->uc, &frame->puc);
-       err |= copy_siginfo_to_user(&frame->info, info);
-       if (err)
-               return -EFAULT;
-       /* Create the ucontext.  */
-       if (cpu_has_xsave)
-               err |= __put_user(UC_FP_XSTATE, &frame->uc.uc_flags);
-       else
-               err |= __put_user(0, &frame->uc.uc_flags);
-       err |= __put_user(0, &frame->uc.uc_link);
-       err |= __put_user(current->sas_ss_sp, &frame->uc.uc_stack.ss_sp);
-       err |= __put_user(sas_ss_flags(regs->sp),
-                         &frame->uc.uc_stack.ss_flags);
-       err |= __put_user(current->sas_ss_size, &frame->uc.uc_stack.ss_size);
-       err |= setup_sigcontext(&frame->uc.uc_mcontext, fpstate,
-                               regs, set->sig[0]);
-       err |= __copy_to_user(&frame->uc.uc_sigmask, set, sizeof(*set));
-       if (err)
-               return -EFAULT;
-       /* Set up to return from userspace.  */
-       restorer = VDSO32_SYMBOL(current->mm->context.vdso, rt_sigreturn);
-       if (ka->sa.sa_flags & SA_RESTORER)
-               restorer = ka->sa.sa_restorer;
-       err |= __put_user(restorer, &frame->pretcode);
-       /*
-        * This is movl $__NR_rt_sigreturn, %ax ; int $0x80
-        *
-        * WE DO NOT USE IT ANY MORE! It's only left here for historical
-        * reasons and because gdb uses it as a signature to notice
-        * signal handler stack frames.
-        */
-       err |= __put_user(*((u64 *)&rt_retcode), (u64 *)frame->retcode);
+       put_user_try {
+               put_user_ex(sig, &frame->sig);
+               put_user_ex(&frame->info, &frame->pinfo);
+               put_user_ex(&frame->uc, &frame->puc);
+               err |= copy_siginfo_to_user(&frame->info, info);
+               /* Create the ucontext.  */
+               if (cpu_has_xsave)
+                       put_user_ex(UC_FP_XSTATE, &frame->uc.uc_flags);
+               else
+                       put_user_ex(0, &frame->uc.uc_flags);
+               put_user_ex(0, &frame->uc.uc_link);
+               put_user_ex(current->sas_ss_sp, &frame->uc.uc_stack.ss_sp);
+               put_user_ex(sas_ss_flags(regs->sp),
+                           &frame->uc.uc_stack.ss_flags);
+               put_user_ex(current->sas_ss_size, &frame->uc.uc_stack.ss_size);
+               err |= setup_sigcontext(&frame->uc.uc_mcontext, fpstate,
+                                       regs, set->sig[0]);
+               err |= __copy_to_user(&frame->uc.uc_sigmask, set, sizeof(*set));
+               /* Set up to return from userspace.  */
+               restorer = VDSO32_SYMBOL(current->mm->context.vdso, rt_sigreturn);
+               if (ka->sa.sa_flags & SA_RESTORER)
+                       restorer = ka->sa.sa_restorer;
+               put_user_ex(restorer, &frame->pretcode);
+               /*
+                * This is movl $__NR_rt_sigreturn, %ax ; int $0x80
+                *
+                * WE DO NOT USE IT ANY MORE! It's only left here for historical
+                * reasons and because gdb uses it as a signature to notice
+                * signal handler stack frames.
+                */
+               put_user_ex(*((u64 *)&rt_retcode), (u64 *)frame->retcode);
+       } put_user_catch(err);
  
        if (err)
                return -EFAULT;
@@@ -436,28 -441,30 +441,30 @@@ static int __setup_rt_frame(int sig, st
                        return -EFAULT;
        }
  
-       /* Create the ucontext.  */
-       if (cpu_has_xsave)
-               err |= __put_user(UC_FP_XSTATE, &frame->uc.uc_flags);
-       else
-               err |= __put_user(0, &frame->uc.uc_flags);
-       err |= __put_user(0, &frame->uc.uc_link);
-       err |= __put_user(me->sas_ss_sp, &frame->uc.uc_stack.ss_sp);
-       err |= __put_user(sas_ss_flags(regs->sp),
-                         &frame->uc.uc_stack.ss_flags);
-       err |= __put_user(me->sas_ss_size, &frame->uc.uc_stack.ss_size);
-       err |= setup_sigcontext(&frame->uc.uc_mcontext, fp, regs, set->sig[0]);
-       err |= __copy_to_user(&frame->uc.uc_sigmask, set, sizeof(*set));
-       /* Set up to return from userspace.  If provided, use a stub
-          already in userspace.  */
-       /* x86-64 should always use SA_RESTORER. */
-       if (ka->sa.sa_flags & SA_RESTORER) {
-               err |= __put_user(ka->sa.sa_restorer, &frame->pretcode);
-       } else {
-               /* could use a vstub here */
-               return -EFAULT;
-       }
+       put_user_try {
+               /* Create the ucontext.  */
+               if (cpu_has_xsave)
+                       put_user_ex(UC_FP_XSTATE, &frame->uc.uc_flags);
+               else
+                       put_user_ex(0, &frame->uc.uc_flags);
+               put_user_ex(0, &frame->uc.uc_link);
+               put_user_ex(me->sas_ss_sp, &frame->uc.uc_stack.ss_sp);
+               put_user_ex(sas_ss_flags(regs->sp),
+                           &frame->uc.uc_stack.ss_flags);
+               put_user_ex(me->sas_ss_size, &frame->uc.uc_stack.ss_size);
+               err |= setup_sigcontext(&frame->uc.uc_mcontext, fp, regs, set->sig[0]);
+               err |= __copy_to_user(&frame->uc.uc_sigmask, set, sizeof(*set));
+               /* Set up to return from userspace.  If provided, use a stub
+                  already in userspace.  */
+               /* x86-64 should always use SA_RESTORER. */
+               if (ka->sa.sa_flags & SA_RESTORER) {
+                       put_user_ex(ka->sa.sa_restorer, &frame->pretcode);
+               } else {
+                       /* could use a vstub here */
+                       err |= -EFAULT;
+               }
+       } put_user_catch(err);
  
        if (err)
                return -EFAULT;
@@@ -509,31 -516,41 +516,41 @@@ sys_sigaction(int sig, const struct old
              struct old_sigaction __user *oact)
  {
        struct k_sigaction new_ka, old_ka;
-       int ret;
+       int ret = 0;
  
        if (act) {
                old_sigset_t mask;
  
-               if (!access_ok(VERIFY_READ, act, sizeof(*act)) ||
-                   __get_user(new_ka.sa.sa_handler, &act->sa_handler) ||
-                   __get_user(new_ka.sa.sa_restorer, &act->sa_restorer))
+               if (!access_ok(VERIFY_READ, act, sizeof(*act)))
                        return -EFAULT;
  
-               __get_user(new_ka.sa.sa_flags, &act->sa_flags);
-               __get_user(mask, &act->sa_mask);
+               get_user_try {
+                       get_user_ex(new_ka.sa.sa_handler, &act->sa_handler);
+                       get_user_ex(new_ka.sa.sa_flags, &act->sa_flags);
+                       get_user_ex(mask, &act->sa_mask);
+                       get_user_ex(new_ka.sa.sa_restorer, &act->sa_restorer);
+               } get_user_catch(ret);
+               if (ret)
+                       return -EFAULT;
                siginitset(&new_ka.sa.sa_mask, mask);
        }
  
        ret = do_sigaction(sig, act ? &new_ka : NULL, oact ? &old_ka : NULL);
  
        if (!ret && oact) {
-               if (!access_ok(VERIFY_WRITE, oact, sizeof(*oact)) ||
-                   __put_user(old_ka.sa.sa_handler, &oact->sa_handler) ||
-                   __put_user(old_ka.sa.sa_restorer, &oact->sa_restorer))
+               if (!access_ok(VERIFY_WRITE, oact, sizeof(*oact)))
                        return -EFAULT;
  
-               __put_user(old_ka.sa.sa_flags, &oact->sa_flags);
-               __put_user(old_ka.sa.sa_mask.sig[0], &oact->sa_mask);
+               put_user_try {
+                       put_user_ex(old_ka.sa.sa_handler, &oact->sa_handler);
+                       put_user_ex(old_ka.sa.sa_flags, &oact->sa_flags);
+                       put_user_ex(old_ka.sa.sa_mask.sig[0], &oact->sa_mask);
+                       put_user_ex(old_ka.sa.sa_restorer, &oact->sa_restorer);
+               } put_user_catch(ret);
+               if (ret)
+                       return -EFAULT;
        }
  
        return ret;
@@@ -632,16 -649,9 +649,16 @@@ badframe
  }
  
  #ifdef CONFIG_X86_32
 -asmlinkage int sys_rt_sigreturn(struct pt_regs regs)
 +/*
 + * Note: do not pass in pt_regs directly as with tail-call optimization
 + * GCC will incorrectly stomp on the caller's frame and corrupt user-space
 + * register state:
 + */
 +asmlinkage int sys_rt_sigreturn(unsigned long __unused)
  {
 -      return do_rt_sigreturn(&regs);
 +      struct pt_regs *regs = (struct pt_regs *)&__unused;
 +
 +      return do_rt_sigreturn(regs);
  }
  #else /* !CONFIG_X86_32 */
  asmlinkage long sys_rt_sigreturn(struct pt_regs *regs)