X-Git-Url: https://git.karo-electronics.de/?a=blobdiff_plain;f=arch%2Ffrv%2Fkernel%2Ftraps.c;h=1d2dfe67d4426b7805e595f75293b5b46e5f6a8f;hb=451a3c24b0135bce54542009b5fde43846c7cf67;hp=2e6098c855787de54e82953bb740110be9ed0011;hpb=ae3e0218621db0590163b2d5c424ef1f340e3cc6;p=karo-tx-linux.git diff --git a/arch/frv/kernel/traps.c b/arch/frv/kernel/traps.c index 2e6098c85578..1d2dfe67d442 100644 --- a/arch/frv/kernel/traps.c +++ b/arch/frv/kernel/traps.c @@ -49,7 +49,7 @@ asmlinkage void insn_access_error(unsigned long esfr1, unsigned long epcr0, unsi info.si_signo = SIGSEGV; info.si_code = SEGV_ACCERR; info.si_errno = 0; - info.si_addr = (void *) ((epcr0 & EPCR0_V) ? (epcr0 & EPCR0_PC) : __frame->pc); + info.si_addr = (void __user *) ((epcr0 & EPCR0_V) ? (epcr0 & EPCR0_PC) : __frame->pc); force_sig_info(info.si_signo, &info, current); } /* end insn_access_error() */ @@ -73,7 +73,7 @@ asmlinkage void illegal_instruction(unsigned long esfr1, unsigned long epcr0, un epcr0, esr0, esfr1); info.si_errno = 0; - info.si_addr = (void *) ((epcr0 & EPCR0_PC) ? (epcr0 & EPCR0_PC) : __frame->pc); + info.si_addr = (void __user *) ((epcr0 & EPCR0_V) ? (epcr0 & EPCR0_PC) : __frame->pc); switch (__frame->tbr & TBR_TT) { case TBR_TT_ILLEGAL_INSTR: @@ -100,6 +100,234 @@ asmlinkage void illegal_instruction(unsigned long esfr1, unsigned long epcr0, un force_sig_info(info.si_signo, &info, current); } /* end illegal_instruction() */ +/*****************************************************************************/ +/* + * handle atomic operations with errors + * - arguments in gr8, gr9, gr10 + * - original memory value placed in gr5 + * - replacement memory value placed in gr9 + */ +asmlinkage void atomic_operation(unsigned long esfr1, unsigned long epcr0, + unsigned long esr0) +{ + static DEFINE_SPINLOCK(atomic_op_lock); + unsigned long x, y, z; + unsigned long __user *p; + mm_segment_t oldfs; + siginfo_t info; + int ret; + + y = 0; + z = 0; + + oldfs = get_fs(); + if (!user_mode(__frame)) + set_fs(KERNEL_DS); + + switch (__frame->tbr & TBR_TT) { + /* TIRA gr0,#120 + * u32 __atomic_user_cmpxchg32(u32 *ptr, u32 test, u32 new) + */ + case TBR_TT_ATOMIC_CMPXCHG32: + p = (unsigned long __user *) __frame->gr8; + x = __frame->gr9; + y = __frame->gr10; + + for (;;) { + ret = get_user(z, p); + if (ret < 0) + goto error; + + if (z != x) + goto done; + + spin_lock_irq(&atomic_op_lock); + + if (__get_user(z, p) == 0) { + if (z != x) + goto done2; + + if (__put_user(y, p) == 0) + goto done2; + goto error2; + } + + spin_unlock_irq(&atomic_op_lock); + } + + /* TIRA gr0,#121 + * u32 __atomic_kernel_xchg32(void *v, u32 new) + */ + case TBR_TT_ATOMIC_XCHG32: + p = (unsigned long __user *) __frame->gr8; + y = __frame->gr9; + + for (;;) { + ret = get_user(z, p); + if (ret < 0) + goto error; + + spin_lock_irq(&atomic_op_lock); + + if (__get_user(z, p) == 0) { + if (__put_user(y, p) == 0) + goto done2; + goto error2; + } + + spin_unlock_irq(&atomic_op_lock); + } + + /* TIRA gr0,#122 + * ulong __atomic_kernel_XOR_return(ulong i, ulong *v) + */ + case TBR_TT_ATOMIC_XOR: + p = (unsigned long __user *) __frame->gr8; + x = __frame->gr9; + + for (;;) { + ret = get_user(z, p); + if (ret < 0) + goto error; + + spin_lock_irq(&atomic_op_lock); + + if (__get_user(z, p) == 0) { + y = x ^ z; + if (__put_user(y, p) == 0) + goto done2; + goto error2; + } + + spin_unlock_irq(&atomic_op_lock); + } + + /* TIRA gr0,#123 + * ulong __atomic_kernel_OR_return(ulong i, ulong *v) + */ + case TBR_TT_ATOMIC_OR: + p = (unsigned long __user *) __frame->gr8; + x = __frame->gr9; + + for (;;) { + ret = get_user(z, p); + if (ret < 0) + goto error; + + spin_lock_irq(&atomic_op_lock); + + if (__get_user(z, p) == 0) { + y = x ^ z; + if (__put_user(y, p) == 0) + goto done2; + goto error2; + } + + spin_unlock_irq(&atomic_op_lock); + } + + /* TIRA gr0,#124 + * ulong __atomic_kernel_AND_return(ulong i, ulong *v) + */ + case TBR_TT_ATOMIC_AND: + p = (unsigned long __user *) __frame->gr8; + x = __frame->gr9; + + for (;;) { + ret = get_user(z, p); + if (ret < 0) + goto error; + + spin_lock_irq(&atomic_op_lock); + + if (__get_user(z, p) == 0) { + y = x & z; + if (__put_user(y, p) == 0) + goto done2; + goto error2; + } + + spin_unlock_irq(&atomic_op_lock); + } + + /* TIRA gr0,#125 + * int __atomic_user_sub_return(atomic_t *v, int i) + */ + case TBR_TT_ATOMIC_SUB: + p = (unsigned long __user *) __frame->gr8; + x = __frame->gr9; + + for (;;) { + ret = get_user(z, p); + if (ret < 0) + goto error; + + spin_lock_irq(&atomic_op_lock); + + if (__get_user(z, p) == 0) { + y = z - x; + if (__put_user(y, p) == 0) + goto done2; + goto error2; + } + + spin_unlock_irq(&atomic_op_lock); + } + + /* TIRA gr0,#126 + * int __atomic_user_add_return(atomic_t *v, int i) + */ + case TBR_TT_ATOMIC_ADD: + p = (unsigned long __user *) __frame->gr8; + x = __frame->gr9; + + for (;;) { + ret = get_user(z, p); + if (ret < 0) + goto error; + + spin_lock_irq(&atomic_op_lock); + + if (__get_user(z, p) == 0) { + y = z + x; + if (__put_user(y, p) == 0) + goto done2; + goto error2; + } + + spin_unlock_irq(&atomic_op_lock); + } + + default: + BUG(); + } + +done2: + spin_unlock_irq(&atomic_op_lock); +done: + if (!user_mode(__frame)) + set_fs(oldfs); + __frame->gr5 = z; + __frame->gr9 = y; + return; + +error2: + spin_unlock_irq(&atomic_op_lock); +error: + if (!user_mode(__frame)) + set_fs(oldfs); + __frame->pc -= 4; + + die_if_kernel("-- Atomic Op Error --\n"); + + info.si_signo = SIGSEGV; + info.si_code = SEGV_ACCERR; + info.si_errno = 0; + info.si_addr = (void __user *) __frame->pc; + + force_sig_info(info.si_signo, &info, current); +} + /*****************************************************************************/ /* * @@ -116,7 +344,7 @@ asmlinkage void media_exception(unsigned long msr0, unsigned long msr1) info.si_signo = SIGFPE; info.si_code = FPE_MDAOVF; info.si_errno = 0; - info.si_addr = (void *) __frame->pc; + info.si_addr = (void __user *) __frame->pc; force_sig_info(info.si_signo, &info, current); } /* end media_exception() */ @@ -134,11 +362,8 @@ asmlinkage void memory_access_exception(unsigned long esr0, #ifdef CONFIG_MMU unsigned long fixup; - if ((esr0 & ESRx_EC) == ESRx_EC_DATA_ACCESS) - if (handle_misalignment(esr0, ear0, epcr0) == 0) - return; - - if ((fixup = search_exception_table(__frame->pc)) != 0) { + fixup = search_exception_table(__frame->pc); + if (fixup) { __frame->pc = fixup; return; } @@ -156,7 +381,7 @@ asmlinkage void memory_access_exception(unsigned long esr0, info.si_addr = NULL; if ((esr0 & (ESRx_VALID | ESR0_EAV)) == (ESRx_VALID | ESR0_EAV)) - info.si_addr = (void *) ear0; + info.si_addr = (void __user *) ear0; force_sig_info(info.si_signo, &info, current); @@ -185,7 +410,7 @@ asmlinkage void data_access_error(unsigned long esfr1, unsigned long esr15, unsi info.si_signo = SIGSEGV; info.si_code = SEGV_ACCERR; info.si_errno = 0; - info.si_addr = (void *) + info.si_addr = (void __user *) (((esr15 & (ESRx_VALID|ESR15_EAV)) == (ESRx_VALID|ESR15_EAV)) ? ear15 : 0); force_sig_info(info.si_signo, &info, current); @@ -219,7 +444,7 @@ asmlinkage void division_exception(unsigned long esfr1, unsigned long esr0, unsi info.si_signo = SIGFPE; info.si_code = FPE_INTDIV; info.si_errno = 0; - info.si_addr = (void *) __frame->pc; + info.si_addr = (void __user *) __frame->pc; force_sig_info(info.si_signo, &info, current); } /* end division_exception() */