2 * Low-level exception handling code
4 * Copyright (C) 2012 ARM Ltd.
5 * Authors: Catalin Marinas <catalin.marinas@arm.com>
6 * Will Deacon <will.deacon@arm.com>
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License version 2 as
10 * published by the Free Software Foundation.
12 * This program is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 * GNU General Public License for more details.
17 * You should have received a copy of the GNU General Public License
18 * along with this program. If not, see <http://www.gnu.org/licenses/>.
21 #include <linux/init.h>
22 #include <linux/linkage.h>
24 #include <asm/alternative.h>
25 #include <asm/assembler.h>
26 #include <asm/asm-offsets.h>
27 #include <asm/cpufeature.h>
28 #include <asm/errno.h>
31 #include <asm/memory.h>
32 #include <asm/thread_info.h>
33 #include <asm/unistd.h>
36 * Context tracking subsystem. Used to instrument transitions
37 * between user and kernel mode.
39 .macro ct_user_exit, syscall = 0
40 #ifdef CONFIG_CONTEXT_TRACKING
41 bl context_tracking_user_exit
44 * Save/restore needed during syscalls. Restore syscall arguments from
45 * the values already saved on stack during kernel_entry.
48 ldp x2, x3, [sp, #S_X2]
49 ldp x4, x5, [sp, #S_X4]
50 ldp x6, x7, [sp, #S_X6]
56 #ifdef CONFIG_CONTEXT_TRACKING
57 bl context_tracking_user_enter
70 .macro kernel_entry, el, regsize = 64
71 sub sp, sp, #S_FRAME_SIZE
73 mov w0, w0 // zero upper 32 bits of x0
75 stp x0, x1, [sp, #16 * 0]
76 stp x2, x3, [sp, #16 * 1]
77 stp x4, x5, [sp, #16 * 2]
78 stp x6, x7, [sp, #16 * 3]
79 stp x8, x9, [sp, #16 * 4]
80 stp x10, x11, [sp, #16 * 5]
81 stp x12, x13, [sp, #16 * 6]
82 stp x14, x15, [sp, #16 * 7]
83 stp x16, x17, [sp, #16 * 8]
84 stp x18, x19, [sp, #16 * 9]
85 stp x20, x21, [sp, #16 * 10]
86 stp x22, x23, [sp, #16 * 11]
87 stp x24, x25, [sp, #16 * 12]
88 stp x26, x27, [sp, #16 * 13]
89 stp x28, x29, [sp, #16 * 14]
94 and tsk, tsk, #~(THREAD_SIZE - 1) // Ensure MDSCR_EL1.SS is clear,
95 ldr x19, [tsk, #TI_FLAGS] // since we can unmask debug
96 disable_step_tsk x19, x20 // exceptions when scheduling.
98 mov x29, xzr // fp pointed to user-space
100 add x21, sp, #S_FRAME_SIZE
102 /* Save the task's original addr_limit and set USER_DS (TASK_SIZE_64) */
103 ldr x20, [tsk, #TI_ADDR_LIMIT]
104 str x20, [sp, #S_ORIG_ADDR_LIMIT]
105 mov x20, #TASK_SIZE_64
106 str x20, [tsk, #TI_ADDR_LIMIT]
107 /* No need to reset PSTATE.UAO, hardware's already set it to 0 for us */
108 .endif /* \el == 0 */
111 stp lr, x21, [sp, #S_LR]
112 stp x22, x23, [sp, #S_PC]
115 * Set syscallno to -1 by default (overridden later if real syscall).
119 str x21, [sp, #S_SYSCALLNO]
123 * Set sp_el0 to current thread_info.
130 * Registers that may be useful after this macro is invoked:
134 * x23 - aborted PSTATE
138 .macro kernel_exit, el
140 /* Restore the task's original addr_limit. */
141 ldr x20, [sp, #S_ORIG_ADDR_LIMIT]
142 str x20, [tsk, #TI_ADDR_LIMIT]
144 /* No need to restore UAO, it will be restored from SPSR_EL1 */
147 ldp x21, x22, [sp, #S_PC] // load ELR, SPSR
150 ldr x23, [sp, #S_SP] // load return stack pointer
152 #ifdef CONFIG_ARM64_ERRATUM_845719
153 alternative_if ARM64_WORKAROUND_845719
155 #ifdef CONFIG_PID_IN_CONTEXTIDR
156 mrs x29, contextidr_el1
157 msr contextidr_el1, x29
159 msr contextidr_el1, xzr
162 alternative_else_nop_endif
165 msr elr_el1, x21 // set up the return data
167 ldp x0, x1, [sp, #16 * 0]
168 ldp x2, x3, [sp, #16 * 1]
169 ldp x4, x5, [sp, #16 * 2]
170 ldp x6, x7, [sp, #16 * 3]
171 ldp x8, x9, [sp, #16 * 4]
172 ldp x10, x11, [sp, #16 * 5]
173 ldp x12, x13, [sp, #16 * 6]
174 ldp x14, x15, [sp, #16 * 7]
175 ldp x16, x17, [sp, #16 * 8]
176 ldp x18, x19, [sp, #16 * 9]
177 ldp x20, x21, [sp, #16 * 10]
178 ldp x22, x23, [sp, #16 * 11]
179 ldp x24, x25, [sp, #16 * 12]
180 ldp x26, x27, [sp, #16 * 13]
181 ldp x28, x29, [sp, #16 * 14]
183 add sp, sp, #S_FRAME_SIZE // restore sp
184 eret // return to kernel
187 .macro get_thread_info, rd
191 .macro irq_stack_entry
192 mov x19, sp // preserve the original sp
195 * Compare sp with the current thread_info, if the top
196 * ~(THREAD_SIZE - 1) bits match, we are on a task stack, and
197 * should switch to the irq stack.
199 and x25, x19, #~(THREAD_SIZE - 1)
203 this_cpu_ptr irq_stack, x25, x26
204 mov x26, #IRQ_STACK_START_SP
207 /* switch to the irq stack */
211 * Add a dummy stack frame, this non-standard format is fixed up
214 stp x29, x19, [sp, #-16]!
221 * x19 should be preserved between irq_stack_entry and
224 .macro irq_stack_exit
229 * These are the registers used in the syscall handler, and allow us to
230 * have in theory up to 7 arguments to a function - x0 to x6.
232 * x7 is reserved for the system call number in 32-bit mode.
234 sc_nr .req x25 // number of system calls
235 scno .req x26 // syscall number
236 stbl .req x27 // syscall table pointer
237 tsk .req x28 // current thread_info
240 * Interrupt handling.
243 ldr_l x1, handle_arch_irq
255 .pushsection ".entry.text", "ax"
259 ventry el1_sync_invalid // Synchronous EL1t
260 ventry el1_irq_invalid // IRQ EL1t
261 ventry el1_fiq_invalid // FIQ EL1t
262 ventry el1_error_invalid // Error EL1t
264 ventry el1_sync // Synchronous EL1h
265 ventry el1_irq // IRQ EL1h
266 ventry el1_fiq_invalid // FIQ EL1h
267 ventry el1_error_invalid // Error EL1h
269 ventry el0_sync // Synchronous 64-bit EL0
270 ventry el0_irq // IRQ 64-bit EL0
271 ventry el0_fiq_invalid // FIQ 64-bit EL0
272 ventry el0_error_invalid // Error 64-bit EL0
275 ventry el0_sync_compat // Synchronous 32-bit EL0
276 ventry el0_irq_compat // IRQ 32-bit EL0
277 ventry el0_fiq_invalid_compat // FIQ 32-bit EL0
278 ventry el0_error_invalid_compat // Error 32-bit EL0
280 ventry el0_sync_invalid // Synchronous 32-bit EL0
281 ventry el0_irq_invalid // IRQ 32-bit EL0
282 ventry el0_fiq_invalid // FIQ 32-bit EL0
283 ventry el0_error_invalid // Error 32-bit EL0
288 * Invalid mode handlers
290 .macro inv_entry, el, reason, regsize = 64
291 kernel_entry \el, \regsize
299 inv_entry 0, BAD_SYNC
300 ENDPROC(el0_sync_invalid)
304 ENDPROC(el0_irq_invalid)
308 ENDPROC(el0_fiq_invalid)
311 inv_entry 0, BAD_ERROR
312 ENDPROC(el0_error_invalid)
315 el0_fiq_invalid_compat:
316 inv_entry 0, BAD_FIQ, 32
317 ENDPROC(el0_fiq_invalid_compat)
319 el0_error_invalid_compat:
320 inv_entry 0, BAD_ERROR, 32
321 ENDPROC(el0_error_invalid_compat)
325 inv_entry 1, BAD_SYNC
326 ENDPROC(el1_sync_invalid)
330 ENDPROC(el1_irq_invalid)
334 ENDPROC(el1_fiq_invalid)
337 inv_entry 1, BAD_ERROR
338 ENDPROC(el1_error_invalid)
346 mrs x1, esr_el1 // read the syndrome register
347 lsr x24, x1, #ESR_ELx_EC_SHIFT // exception class
348 cmp x24, #ESR_ELx_EC_DABT_CUR // data abort in EL1
350 cmp x24, #ESR_ELx_EC_IABT_CUR // instruction abort in EL1
352 cmp x24, #ESR_ELx_EC_SYS64 // configurable trap
354 cmp x24, #ESR_ELx_EC_SP_ALIGN // stack alignment exception
356 cmp x24, #ESR_ELx_EC_PC_ALIGN // pc alignment exception
358 cmp x24, #ESR_ELx_EC_UNKNOWN // unknown exception in EL1
360 cmp x24, #ESR_ELx_EC_BREAKPT_CUR // debug exception in EL1
366 * Fall through to the Data abort case
370 * Data abort handling
374 // re-enable interrupts if they were enabled in the aborted context
375 tbnz x23, #7, 1f // PSR_I_BIT
378 mov x2, sp // struct pt_regs
381 // disable interrupts before pulling preserved data off the stack
386 * Stack or PC alignment exception handling
394 * Undefined instruction
401 * Debug exception handling
403 cmp x24, #ESR_ELx_EC_BRK64 // if BRK64
404 cinc x24, x24, eq // set bit '0'
405 tbz x24, #0, el1_inv // EL1 only
407 mov x2, sp // struct pt_regs
408 bl do_debug_exception
411 // TODO: add support for undefined instructions in kernel mode
423 #ifdef CONFIG_TRACE_IRQFLAGS
424 bl trace_hardirqs_off
429 #ifdef CONFIG_PREEMPT
430 ldr w24, [tsk, #TI_PREEMPT] // get preempt count
431 cbnz w24, 1f // preempt count != 0
432 ldr x0, [tsk, #TI_FLAGS] // get flags
433 tbz x0, #TIF_NEED_RESCHED, 1f // needs rescheduling?
437 #ifdef CONFIG_TRACE_IRQFLAGS
443 #ifdef CONFIG_PREEMPT
446 1: bl preempt_schedule_irq // irq en/disable is done inside
447 ldr x0, [tsk, #TI_FLAGS] // get new tasks TI_FLAGS
448 tbnz x0, #TIF_NEED_RESCHED, 1b // needs rescheduling?
458 mrs x25, esr_el1 // read the syndrome register
459 lsr x24, x25, #ESR_ELx_EC_SHIFT // exception class
460 cmp x24, #ESR_ELx_EC_SVC64 // SVC in 64-bit state
462 cmp x24, #ESR_ELx_EC_DABT_LOW // data abort in EL0
464 cmp x24, #ESR_ELx_EC_IABT_LOW // instruction abort in EL0
466 cmp x24, #ESR_ELx_EC_FP_ASIMD // FP/ASIMD access
468 cmp x24, #ESR_ELx_EC_FP_EXC64 // FP/ASIMD exception
470 cmp x24, #ESR_ELx_EC_SYS64 // configurable trap
472 cmp x24, #ESR_ELx_EC_SP_ALIGN // stack alignment exception
474 cmp x24, #ESR_ELx_EC_PC_ALIGN // pc alignment exception
476 cmp x24, #ESR_ELx_EC_UNKNOWN // unknown exception in EL0
478 cmp x24, #ESR_ELx_EC_BREAKPT_LOW // debug exception in EL0
486 mrs x25, esr_el1 // read the syndrome register
487 lsr x24, x25, #ESR_ELx_EC_SHIFT // exception class
488 cmp x24, #ESR_ELx_EC_SVC32 // SVC in 32-bit state
490 cmp x24, #ESR_ELx_EC_DABT_LOW // data abort in EL0
492 cmp x24, #ESR_ELx_EC_IABT_LOW // instruction abort in EL0
494 cmp x24, #ESR_ELx_EC_FP_ASIMD // FP/ASIMD access
496 cmp x24, #ESR_ELx_EC_FP_EXC32 // FP/ASIMD exception
498 cmp x24, #ESR_ELx_EC_PC_ALIGN // pc alignment exception
500 cmp x24, #ESR_ELx_EC_UNKNOWN // unknown exception in EL0
502 cmp x24, #ESR_ELx_EC_CP15_32 // CP15 MRC/MCR trap
504 cmp x24, #ESR_ELx_EC_CP15_64 // CP15 MRRC/MCRR trap
506 cmp x24, #ESR_ELx_EC_CP14_MR // CP14 MRC/MCR trap
508 cmp x24, #ESR_ELx_EC_CP14_LS // CP14 LDC/STC trap
510 cmp x24, #ESR_ELx_EC_CP14_64 // CP14 MRRC/MCRR trap
512 cmp x24, #ESR_ELx_EC_BREAKPT_LOW // debug exception in EL0
517 * AArch32 syscall handling
519 adrp stbl, compat_sys_call_table // load compat syscall table pointer
520 uxtw scno, w7 // syscall number in w7 (r7)
521 mov sc_nr, #__NR_compat_syscalls
532 * Data abort handling
535 // enable interrupts before calling the main handler
538 bic x0, x26, #(0xff << 56)
545 * Instruction abort handling
548 // enable interrupts before calling the main handler
558 * Floating Point or Advanced SIMD access
568 * Floating Point or Advanced SIMD exception
578 * Stack or PC alignment exception handling
581 // enable interrupts before calling the main handler
591 * Undefined instruction
593 // enable interrupts before calling the main handler
601 * System instructions, for trapped cache maintenance instructions
611 * Debug exception handling
613 tbnz x24, #0, el0_inv // EL0 only
617 bl do_debug_exception
636 #ifdef CONFIG_TRACE_IRQFLAGS
637 bl trace_hardirqs_off
643 #ifdef CONFIG_TRACE_IRQFLAGS
650 * Register switch for AArch64. The callee-saved registers need to be saved
651 * and restored. On entry:
652 * x0 = previous task_struct (must be preserved across the switch)
653 * x1 = next task_struct
654 * Previous and next are guaranteed not to be the same.
658 mov x10, #THREAD_CPU_CONTEXT
661 stp x19, x20, [x8], #16 // store callee-saved registers
662 stp x21, x22, [x8], #16
663 stp x23, x24, [x8], #16
664 stp x25, x26, [x8], #16
665 stp x27, x28, [x8], #16
666 stp x29, x9, [x8], #16
669 ldp x19, x20, [x8], #16 // restore callee-saved registers
670 ldp x21, x22, [x8], #16
671 ldp x23, x24, [x8], #16
672 ldp x25, x26, [x8], #16
673 ldp x27, x28, [x8], #16
674 ldp x29, x9, [x8], #16
677 and x9, x9, #~(THREAD_SIZE - 1)
680 ENDPROC(cpu_switch_to)
683 * This is the fast syscall return path. We do as little as possible here,
684 * and this includes saving x0 back into the kernel stack.
687 disable_irq // disable interrupts
688 str x0, [sp, #S_X0] // returned x0
689 ldr x1, [tsk, #TI_FLAGS] // re-check for syscall tracing
690 and x2, x1, #_TIF_SYSCALL_WORK
691 cbnz x2, ret_fast_syscall_trace
692 and x2, x1, #_TIF_WORK_MASK
693 cbnz x2, work_pending
694 enable_step_tsk x1, x2
696 ret_fast_syscall_trace:
697 enable_irq // enable interrupts
698 b __sys_trace_return_skipped // we already saved x0
701 * Ok, we need to do extra processing, enter the slow path.
706 #ifdef CONFIG_TRACE_IRQFLAGS
707 bl trace_hardirqs_on // enabled while in userspace
709 ldr x1, [tsk, #TI_FLAGS] // re-check for single-step
712 * "slow" syscall return path.
715 disable_irq // disable interrupts
716 ldr x1, [tsk, #TI_FLAGS]
717 and x2, x1, #_TIF_WORK_MASK
718 cbnz x2, work_pending
720 enable_step_tsk x1, x2
725 * This is how we return from a fork.
729 cbz x19, 1f // not a kernel thread
732 1: get_thread_info tsk
734 ENDPROC(ret_from_fork)
741 adrp stbl, sys_call_table // load syscall table pointer
742 uxtw scno, w8 // syscall number in w8
743 mov sc_nr, #__NR_syscalls
744 el0_svc_naked: // compat entry point
745 stp x0, scno, [sp, #S_ORIG_X0] // save the original x0 and syscall number
749 ldr x16, [tsk, #TI_FLAGS] // check for syscall hooks
750 tst x16, #_TIF_SYSCALL_WORK
752 cmp scno, sc_nr // check upper syscall limit
754 ldr x16, [stbl, scno, lsl #3] // address in the syscall table
755 blr x16 // call sys_* routine
764 * This is the really slow path. We're going to be doing context
765 * switches, and waiting for our parent to respond.
768 mov w0, #-1 // set default errno for
769 cmp scno, x0 // user-issued syscall(-1)
774 bl syscall_trace_enter
775 cmp w0, #-1 // skip the syscall?
776 b.eq __sys_trace_return_skipped
777 uxtw scno, w0 // syscall number (possibly new)
778 mov x1, sp // pointer to regs
779 cmp scno, sc_nr // check upper syscall limit
781 ldp x0, x1, [sp] // restore the syscall args
782 ldp x2, x3, [sp, #S_X2]
783 ldp x4, x5, [sp, #S_X4]
784 ldp x6, x7, [sp, #S_X6]
785 ldr x16, [stbl, scno, lsl #3] // address in the syscall table
786 blr x16 // call sys_* routine
789 str x0, [sp, #S_X0] // save returned x0
790 __sys_trace_return_skipped:
792 bl syscall_trace_exit
800 .popsection // .entry.text
803 * Special system call wrappers.
805 ENTRY(sys_rt_sigreturn_wrapper)
808 ENDPROC(sys_rt_sigreturn_wrapper)