2 * Copyright 2011 Tilera Corporation. All Rights Reserved.
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License
6 * as published by the Free Software Foundation, version 2.
8 * This program is distributed in the hope that it will be useful, but
9 * WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
11 * NON INFRINGEMENT. See the GNU General Public License for
14 * Linux interrupt vectors.
17 #include <linux/linkage.h>
18 #include <linux/errno.h>
19 #include <linux/unistd.h>
20 #include <asm/ptrace.h>
21 #include <asm/thread_info.h>
22 #include <asm/irqflags.h>
23 #include <asm/asm-offsets.h>
24 #include <asm/types.h>
25 #include <asm/signal.h>
26 #include <hv/hypervisor.h>
28 #include <arch/interrupts.h>
29 #include <arch/spr_def.h>
32 # error "No support for kernel preemption currently"
35 #define PTREGS_PTR(reg, ptreg) addli reg, sp, C_ABI_SAVE_AREA_SIZE + (ptreg)
37 #define PTREGS_OFFSET_SYSCALL PTREGS_OFFSET_REG(TREG_SYSCALL_NR)
40 .macro push_reg reg, ptr=sp, delta=-8
43 addli \ptr, \ptr, \delta
47 .macro pop_reg reg, ptr=sp, delta=8
50 addli \ptr, \ptr, \delta
54 .macro pop_reg_zero reg, zreg, ptr=sp, delta=8
58 addi \ptr, \ptr, \delta
62 .macro push_extra_callee_saves reg
63 PTREGS_PTR(\reg, PTREGS_OFFSET_REG(51))
81 push_reg r34, \reg, PTREGS_OFFSET_BASE - PTREGS_OFFSET_REG(34)
85 .pushsection .rodata, "a"
90 moveli r0, hw2_last(1b)
93 shl16insli r0, r0, hw1(1b)
96 shl16insli r0, r0, hw0(1b)
102 #ifdef __COLLECT_LINKER_FEEDBACK__
103 .pushsection .text.intvec_feedback,"ax"
109 * Default interrupt handler.
111 * vecnum is where we'll put this code.
112 * c_routine is the C routine we'll call.
114 * The C routine is passed two arguments:
115 * - A pointer to the pt_regs state.
116 * - The interrupt vector number.
118 * The "processing" argument specifies the code for processing
119 * the interrupt. Defaults to "handle_interrupt".
121 .macro int_hand vecnum, vecname, c_routine, processing=handle_interrupt
124 /* Temporarily save a register so we have somewhere to work. */
126 mtspr SPR_SYSTEM_SAVE_K_1, r0
127 mfspr r0, SPR_EX_CONTEXT_K_1
129 andi r0, r0, SPR_EX_CONTEXT_1_1__PL_MASK /* mask off ICS */
131 .ifc \vecnum, INT_DOUBLE_FAULT
133 * For double-faults from user-space, fall through to the normal
134 * register save and stack setup path. Otherwise, it's the
135 * hypervisor giving us one last chance to dump diagnostics, and we
136 * branch to the kernel_double_fault routine to do so.
139 j _kernel_double_fault
143 * If we're coming from user-space, then set sp to the top of
144 * the kernel stack. Otherwise, assume sp is already valid.
152 .ifc \c_routine, do_page_fault
154 * The page_fault handler may be downcalled directly by the
155 * hypervisor even when Linux is running and has ICS set.
157 * In this case the contents of EX_CONTEXT_K_1 reflect the
158 * previous fault and can't be relied on to choose whether or
159 * not to reinitialize the stack pointer. So we add a test
160 * to see whether SYSTEM_SAVE_K_2 has the high bit set,
161 * and if so we don't reinitialize sp, since we must be coming
162 * from Linux. (In fact the precise case is !(val & ~1),
163 * but any Linux PC has to have the high bit set.)
165 * Note that the hypervisor *always* sets SYSTEM_SAVE_K_2 for
166 * any path that turns into a downcall to one of our TLB handlers.
168 * FIXME: if we end up never using this path, perhaps we should
169 * prevent the hypervisor from generating downcalls in this case.
170 * The advantage of getting a downcall is we can panic in Linux.
172 mfspr r0, SPR_SYSTEM_SAVE_K_2
174 bltz r0, 0f /* high bit in S_S_1_2 is for a PC to use */
181 * SYSTEM_SAVE_K_0 holds the cpu number in the low bits, and
182 * the current stack top in the higher bits. So we recover
183 * our stack top by just masking off the low bits, then
184 * point sp at the top aligned address on the actual stack page.
186 mfspr r0, SPR_SYSTEM_SAVE_K_0
187 mm r0, zero, LOG2_THREAD_SIZE, 63
191 * Align the stack mod 64 so we can properly predict what
192 * cache lines we need to write-hint to reduce memory fetch
193 * latency as we enter the kernel. The layout of memory is
194 * as follows, with cache line 0 at the lowest VA, and cache
195 * line 8 just below the r0 value this "andi" computes.
196 * Note that we never write to cache line 8, and we skip
197 * cache lines 1-3 for syscalls.
199 * cache line 8: ptregs padding (two words)
200 * cache line 7: sp, lr, pc, ex1, faultnum, orig_r0, flags, cmpexch
201 * cache line 6: r46...r53 (tp)
202 * cache line 5: r38...r45
203 * cache line 4: r30...r37
204 * cache line 3: r22...r29
205 * cache line 2: r14...r21
206 * cache line 1: r6...r13
207 * cache line 0: 2 x frame, r0..r5
212 * Push the first four registers on the stack, so that we can set
213 * them to vector-unique values before we jump to the common code.
215 * Registers are pushed on the stack as a struct pt_regs,
216 * with the sp initially just above the struct, and when we're
217 * done, sp points to the base of the struct, minus
218 * C_ABI_SAVE_AREA_SIZE, so we can directly jal to C code.
220 * This routine saves just the first four registers, plus the
221 * stack context so we can do proper backtracing right away,
222 * and defers to handle_interrupt to save the rest.
223 * The backtracer needs pc, ex1, lr, sp, r52, and faultnum,
224 * and needs sp set to its final location at the bottom of
227 addli r0, r0, PTREGS_OFFSET_LR - (PTREGS_SIZE + KSTK_PTREGS_GAP)
228 wh64 r0 /* cache line 7 */
231 addli r0, r0, PTREGS_OFFSET_SP - PTREGS_OFFSET_LR
235 addli sp, r0, PTREGS_OFFSET_REG(52) - PTREGS_OFFSET_SP
237 wh64 sp /* cache line 6 */
240 addli sp, sp, PTREGS_OFFSET_REG(1) - PTREGS_OFFSET_REG(52)
242 wh64 sp /* cache line 0 */
245 addli sp, sp, PTREGS_OFFSET_REG(2) - PTREGS_OFFSET_REG(1)
249 addli sp, sp, PTREGS_OFFSET_REG(3) - PTREGS_OFFSET_REG(2)
253 addli sp, sp, PTREGS_OFFSET_PC - PTREGS_OFFSET_REG(3)
255 mfspr r0, SPR_EX_CONTEXT_K_0
256 .ifc \processing,handle_syscall
258 * Bump the saved PC by one bundle so that when we return, we won't
259 * execute the same swint instruction again. We need to do this while
260 * we're in the critical section.
266 addli sp, sp, PTREGS_OFFSET_EX1 - PTREGS_OFFSET_PC
268 mfspr r0, SPR_EX_CONTEXT_K_1
271 addi sp, sp, PTREGS_OFFSET_FAULTNUM - PTREGS_OFFSET_EX1
273 * Use r0 for syscalls so it's a temporary; use r1 for interrupts
274 * so that it gets passed through unchanged to the handler routine.
275 * Note that the .if conditional confusingly spans bundles.
277 .ifc \processing,handle_syscall
288 addli sp, sp, PTREGS_OFFSET_REG(0) - PTREGS_OFFSET_FAULTNUM
290 mfspr r0, SPR_SYSTEM_SAVE_K_1 /* Original r0 */
293 addi sp, sp, -PTREGS_OFFSET_REG(0) - 8
296 st sp, zero /* write zero into "Next SP" frame pointer */
297 addi sp, sp, -8 /* leave SP pointing at bottom of frame */
299 .ifc \processing,handle_syscall
302 /* Capture per-interrupt SPR context to registers. */
303 .ifc \c_routine, do_page_fault
304 mfspr r2, SPR_SYSTEM_SAVE_K_3 /* address of page fault */
305 mfspr r3, SPR_SYSTEM_SAVE_K_2 /* info about page fault */
307 .ifc \vecnum, INT_ILL_TRANS
308 mfspr r2, ILL_TRANS_REASON
310 .ifc \vecnum, INT_DOUBLE_FAULT
311 mfspr r2, SPR_SYSTEM_SAVE_K_2 /* double fault info from HV */
313 .ifc \c_routine, do_trap
316 .ifc \c_routine, op_handle_perf_interrupt
317 mfspr r2, PERF_COUNT_STS
318 #if CHIP_HAS_AUX_PERF_COUNTERS()
320 .ifc \c_routine, op_handle_aux_perf_interrupt
321 mfspr r2, AUX_PERF_COUNT_STS
329 /* Put function pointer in r0 */
330 moveli r0, hw2_last(\c_routine)
331 shl16insli r0, r0, hw1(\c_routine)
333 shl16insli r0, r0, hw0(\c_routine)
337 ENDPROC(intvec_\vecname)
339 #ifdef __COLLECT_LINKER_FEEDBACK__
340 .pushsection .text.intvec_feedback,"ax"
342 FEEDBACK_ENTER_EXPLICIT(intvec_\vecname, .intrpt1, 1 << 8)
351 * Save the rest of the registers that we didn't save in the actual
352 * vector itself. We can't use r0-r10 inclusive here.
354 .macro finish_interrupt_save, function
356 /* If it's a syscall, save a proper orig_r0, otherwise just zero. */
357 PTREGS_PTR(r52, PTREGS_OFFSET_ORIG_R0)
359 .ifc \function,handle_syscall
364 PTREGS_PTR(r52, PTREGS_OFFSET_TP)
368 mfspr tp, CMPEXCH_VALUE
369 PTREGS_PTR(r52, PTREGS_OFFSET_CMPEXCH)
373 * For ordinary syscalls, we save neither caller- nor callee-
374 * save registers, since the syscall invoker doesn't expect the
375 * caller-saves to be saved, and the called kernel functions will
376 * take care of saving the callee-saves for us.
378 * For interrupts we save just the caller-save registers. Saving
379 * them is required (since the "caller" can't save them). Again,
380 * the called kernel functions will restore the callee-save
381 * registers for us appropriately.
383 * On return, we normally restore nothing special for syscalls,
384 * and just the caller-save registers for interrupts.
386 * However, there are some important caveats to all this:
388 * - We always save a few callee-save registers to give us
389 * some scratchpad registers to carry across function calls.
391 * - fork/vfork/etc require us to save all the callee-save
392 * registers, which we do in PTREGS_SYSCALL_ALL_REGS, below.
394 * - We always save r0..r5 and r10 for syscalls, since we need
395 * to reload them a bit later for the actual kernel call, and
396 * since we might need them for -ERESTARTNOINTR, etc.
398 * - Before invoking a signal handler, we save the unsaved
399 * callee-save registers so they are visible to the
400 * signal handler or any ptracer.
402 * - If the unsaved callee-save registers are modified, we set
403 * a bit in pt_regs so we know to reload them from pt_regs
404 * and not just rely on the kernel function unwinding.
405 * (Done for ptrace register writes and SA_SIGINFO handler.)
409 PTREGS_PTR(r52, PTREGS_OFFSET_REG(33))
411 wh64 r52 /* cache line 4 */
415 .ifc \function,handle_syscall
416 push_reg r30, r52, PTREGS_OFFSET_SYSCALL - PTREGS_OFFSET_REG(30)
417 push_reg TREG_SYSCALL_NR_NAME, r52, \
418 PTREGS_OFFSET_REG(5) - PTREGS_OFFSET_SYSCALL
421 push_reg r30, r52, PTREGS_OFFSET_REG(29) - PTREGS_OFFSET_REG(30)
422 wh64 r52 /* cache line 3 */
431 wh64 r52 /* cache line 2 */
440 wh64 r52 /* cache line 1 */
456 * If we will be returning to the kernel, we will need to
457 * reset the interrupt masks to the state they had before.
458 * Set DISABLE_IRQ in flags iff we came from PL1 with irqs disabled.
460 mfspr r32, SPR_EX_CONTEXT_K_1
462 andi r32, r32, SPR_EX_CONTEXT_1_1__PL_MASK /* mask off ICS */
463 PTREGS_PTR(r21, PTREGS_OFFSET_FLAGS)
465 beqzt r32, 1f /* zero if from user space */
466 IRQS_DISABLED(r32) /* zero if irqs enabled */
467 #if PT_FLAGS_DISABLE_IRQ != 1
468 # error Value of IRQS_DISABLED used to set PT_FLAGS_DISABLE_IRQ; fix
471 .ifnc \function,handle_syscall
472 /* Record the fact that we saved the caller-save registers above. */
473 ori r32, r32, PT_FLAGS_CALLER_SAVES
478 * we've captured enough state to the stack (including in
479 * particular our EX_CONTEXT state) that we can now release
480 * the interrupt critical section and replace it with our
481 * standard "interrupts disabled" mask value. This allows
482 * synchronous interrupts (and profile interrupts) to punch
483 * through from this point onwards.
485 * It's important that no code before this point touch memory
486 * other than our own stack (to keep the invariant that this
487 * is all that gets touched under ICS), and that no code after
488 * this point reference any interrupt-specific SPR, in particular
489 * the EX_CONTEXT_K_ values.
491 .ifc \function,handle_nmi
494 IRQ_DISABLE(r20, r21)
496 mtspr INTERRUPT_CRITICAL_SECTION, zero
498 /* Load tp with our per-cpu offset. */
501 mfspr r20, SPR_SYSTEM_SAVE_K_0
502 moveli r21, hw2_last(__per_cpu_offset)
505 shl16insli r21, r21, hw1(__per_cpu_offset)
506 bfextu r20, r20, 0, LOG2_THREAD_SIZE-1
508 shl16insli r21, r21, hw0(__per_cpu_offset)
509 shl3add r20, r20, r21
515 #ifdef __COLLECT_LINKER_FEEDBACK__
517 * Notify the feedback routines that we were in the
518 * appropriate fixed interrupt vector area. Note that we
519 * still have ICS set at this point, so we can't invoke any
520 * atomic operations or we will panic. The feedback
521 * routines internally preserve r0..r10 and r30 up.
523 .ifnc \function,handle_syscall
526 moveli r20, INT_SWINT_1 << 5
528 moveli r21, hw2_last(intvec_feedback)
529 shl16insli r21, r21, hw1(intvec_feedback)
530 shl16insli r21, r21, hw0(intvec_feedback)
534 /* And now notify the feedback routines that we are here. */
535 FEEDBACK_ENTER(\function)
539 * Prepare the first 256 stack bytes to be rapidly accessible
540 * without having to fetch the background data.
557 #ifdef CONFIG_TRACE_IRQFLAGS
558 .ifnc \function,handle_nmi
560 * We finally have enough state set up to notify the irq
561 * tracing code that irqs were disabled on entry to the handler.
562 * The TRACE_IRQS_OFF call clobbers registers r0-r29.
563 * For syscalls, we already have the register state saved away
564 * on the stack, so we don't bother to do any register saves here,
565 * and later we pop the registers back off the kernel stack.
566 * For interrupt handlers, save r0-r3 in callee-saved registers.
568 .ifnc \function,handle_syscall
569 { move r30, r0; move r31, r1 }
570 { move r32, r2; move r33, r3 }
573 .ifnc \function,handle_syscall
574 { move r0, r30; move r1, r31 }
575 { move r2, r32; move r3, r33 }
583 * Redispatch a downcall.
585 .macro dc_dispatch vecnum, vecname
588 j hv_downcall_dispatch
589 ENDPROC(intvec_\vecname)
593 * Common code for most interrupts. The C function we're eventually
594 * going to is in r0, and the faultnum is in r1; the original
595 * values for those registers are on the stack.
597 .pushsection .text.handle_interrupt,"ax"
599 finish_interrupt_save handle_interrupt
601 /* Jump to the C routine; it should enable irqs as soon as possible. */
604 PTREGS_PTR(r0, PTREGS_OFFSET_BASE)
606 FEEDBACK_REENTER(handle_interrupt)
608 movei r30, 0 /* not an NMI */
611 STD_ENDPROC(handle_interrupt)
614 * This routine takes a boolean in r30 indicating if this is an NMI.
615 * If so, we also expect a boolean in r31 indicating whether to
616 * re-enable the oprofile interrupts.
618 * Note that .Lresume_userspace is jumped to directly in several
619 * places, and we need to make sure r30 is set correctly in those
622 STD_ENTRY(interrupt_return)
623 /* If we're resuming to kernel space, don't check thread flags. */
625 bnez r30, .Lrestore_all /* NMIs don't special-case user-space */
626 PTREGS_PTR(r29, PTREGS_OFFSET_EX1)
629 andi r29, r29, SPR_EX_CONTEXT_1_1__PL_MASK /* mask off ICS */
631 beqzt r29, .Lresume_userspace
632 PTREGS_PTR(r29, PTREGS_OFFSET_PC)
635 /* If we're resuming to _cpu_idle_nap, bump PC forward by 8. */
636 moveli r27, hw2_last(_cpu_idle_nap)
639 shl16insli r27, r27, hw1(_cpu_idle_nap)
642 shl16insli r27, r27, hw0(_cpu_idle_nap)
648 blbc r27, .Lrestore_all
655 FEEDBACK_REENTER(interrupt_return)
658 * Use r33 to hold whether we have already loaded the callee-saves
659 * into ptregs. We don't want to do it twice in this loop, since
660 * then we'd clobber whatever changes are made by ptrace, etc.
667 /* Get base of stack in r32. */
668 EXTRACT_THREAD_INFO(r32)
670 .Lretry_work_pending:
672 * Disable interrupts so as to make sure we don't
673 * miss an interrupt that sets any of the thread flags (like
674 * need_resched or sigpending) between sampling and the iret.
675 * Routines like schedule() or do_signal() may re-enable
676 * interrupts before returning.
678 IRQ_DISABLE(r20, r21)
679 TRACE_IRQS_OFF /* Note: clobbers registers r0-r29 */
682 /* Check to see if there is any work to do before returning to user. */
684 addi r29, r32, THREAD_INFO_FLAGS_OFFSET
685 moveli r1, hw1_last(_TIF_ALLWORK_MASK)
689 shl16insli r1, r1, hw0(_TIF_ALLWORK_MASK)
692 beqzt r1, .Lrestore_all
695 * Make sure we have all the registers saved for signal
696 * handling or notify-resume. Call out to C code to figure out
697 * exactly what we need to do for each flag bit, then if
698 * necessary, reload the flags and recheck.
701 PTREGS_PTR(r0, PTREGS_OFFSET_BASE)
704 push_extra_callee_saves r0
706 1: jal do_work_pending
707 bnez r0, .Lretry_work_pending
711 * omit the call to single_process_check_nohz, which normally checks
712 * to see if we should start or stop the scheduler tick, because
713 * we can't call arbitrary Linux code from an NMI context.
714 * We always call the homecache TLB deferral code to re-trigger
715 * the deferral mechanism.
717 * The other chunk of responsibility this code has is to reset the
718 * interrupt masks appropriately to reset irqs and NMIs. We have
719 * to call TRACE_IRQS_OFF and TRACE_IRQS_ON to support all the
720 * lockdep-type stuff, but we can't set ICS until afterwards, since
721 * ICS can only be used in very tight chunks of code to avoid
722 * tripping over various assertions that it is off.
725 PTREGS_PTR(r0, PTREGS_OFFSET_EX1)
728 PTREGS_PTR(r32, PTREGS_OFFSET_FLAGS)
731 andi r0, r0, SPR_EX_CONTEXT_1_1__PL_MASK
736 #if PT_FLAGS_DISABLE_IRQ != 1
737 # error Assuming PT_FLAGS_DISABLE_IRQ == 1 so we can use blbct below
743 mtspr INTERRUPT_CRITICAL_SECTION, r0
744 beqzt r30, .Lrestore_regs
747 IRQ_ENABLE_LOAD(r20, r21)
749 mtspr INTERRUPT_CRITICAL_SECTION, r0
750 IRQ_ENABLE_APPLY(r20, r21)
751 beqzt r30, .Lrestore_regs
756 * We now commit to returning from this interrupt, since we will be
757 * doing things like setting EX_CONTEXT SPRs and unwinding the stack
758 * frame. No calls should be made to any other code after this point.
759 * This code should only be entered with ICS set.
760 * r32 must still be set to ptregs.flags.
761 * We launch loads to each cache line separately first, so we can
762 * get some parallelism out of the memory subsystem.
763 * We start zeroing caller-saved registers throughout, since
764 * that will save some cycles if this turns out to be a syscall.
769 * Rotate so we have one high bit and one low bit to test.
770 * - low bit says whether to restore all the callee-saved registers,
771 * or just r30-r33, and r52 up.
772 * - high bit (i.e. sign bit) says whether to restore all the
773 * caller-saved registers, or just r0.
775 #if PT_FLAGS_CALLER_SAVES != 2 || PT_FLAGS_RESTORE_REGS != 4
776 # error Rotate trick does not work :-)
780 PTREGS_PTR(sp, PTREGS_OFFSET_REG(0))
784 * Load cache lines 0, 4, 6 and 7, in that order, then use
785 * the last loaded value, which makes it likely that the other
786 * cache lines have also loaded, at which point we should be
787 * able to safely read all the remaining words on those cache
788 * lines without waiting for the memory subsystem.
790 pop_reg r0, sp, PTREGS_OFFSET_REG(30) - PTREGS_OFFSET_REG(0)
791 pop_reg r30, sp, PTREGS_OFFSET_REG(52) - PTREGS_OFFSET_REG(30)
792 pop_reg_zero r52, r3, sp, PTREGS_OFFSET_CMPEXCH - PTREGS_OFFSET_REG(52)
793 pop_reg_zero r21, r27, sp, PTREGS_OFFSET_EX1 - PTREGS_OFFSET_CMPEXCH
794 pop_reg_zero lr, r2, sp, PTREGS_OFFSET_PC - PTREGS_OFFSET_EX1
796 mtspr CMPEXCH_VALUE, r21
799 pop_reg r21, sp, PTREGS_OFFSET_REG(31) - PTREGS_OFFSET_PC
801 mtspr SPR_EX_CONTEXT_K_1, lr
802 andi lr, lr, SPR_EX_CONTEXT_1_1__PL_MASK /* mask off ICS */
805 mtspr SPR_EX_CONTEXT_K_0, r21
809 /* Restore callee-saveds that we actually use. */
812 pop_reg_zero r33, r8, sp, PTREGS_OFFSET_REG(29) - PTREGS_OFFSET_REG(33)
815 * If we modified other callee-saveds, restore them now.
816 * This is rare, but could be via ptrace or signal handler.
820 blbs r20, .Lrestore_callees
822 .Lcontinue_restore_regs:
824 /* Check if we're returning from a syscall. */
827 bltzt r20, 1f /* no, so go restore callee-save registers */
831 * Check if we're returning to userspace.
832 * Note that if we're not, we don't worry about zeroing everything.
835 addli sp, sp, PTREGS_OFFSET_LR - PTREGS_OFFSET_REG(29)
836 bnez lr, .Lkernel_return
840 * On return from syscall, we've restored r0 from pt_regs, but we
841 * clear the remainder of the caller-saved registers. We could
842 * restore the syscall arguments, but there's not much point,
843 * and it ensures user programs aren't trying to use the
844 * caller-saves if we clear them, as well as avoiding leaking
845 * kernel pointers into userspace.
847 pop_reg_zero lr, r11, sp, PTREGS_OFFSET_TP - PTREGS_OFFSET_LR
848 pop_reg_zero tp, r12, sp, PTREGS_OFFSET_SP - PTREGS_OFFSET_TP
854 { move r15, zero; move r16, zero }
855 { move r17, zero; move r18, zero }
856 { move r19, zero; move r20, zero }
857 { move r21, zero; move r22, zero }
858 { move r23, zero; move r24, zero }
859 { move r25, zero; move r26, zero }
861 /* Set r1 to errno if we are returning an error, otherwise zero. */
877 * Not a syscall, so restore caller-saved registers.
878 * First kick off loads for cache lines 1-3, which we're touching
879 * for the first time here.
882 1: pop_reg r29, sp, PTREGS_OFFSET_REG(21) - PTREGS_OFFSET_REG(29)
883 pop_reg r21, sp, PTREGS_OFFSET_REG(13) - PTREGS_OFFSET_REG(21)
884 pop_reg r13, sp, PTREGS_OFFSET_REG(1) - PTREGS_OFFSET_REG(13)
897 /* r13 already restored above */
905 /* r21 already restored above */
912 pop_reg r28, sp, PTREGS_OFFSET_LR - PTREGS_OFFSET_REG(28)
913 /* r29 already restored above */
914 bnez lr, .Lkernel_return
915 pop_reg lr, sp, PTREGS_OFFSET_TP - PTREGS_OFFSET_LR
916 pop_reg tp, sp, PTREGS_OFFSET_SP - PTREGS_OFFSET_TP
921 * We can't restore tp when in kernel mode, since a thread might
922 * have migrated from another cpu and brought a stale tp value.
925 pop_reg lr, sp, PTREGS_OFFSET_SP - PTREGS_OFFSET_LR
929 /* Restore callee-saved registers from r34 to r51. */
931 addli sp, sp, PTREGS_OFFSET_REG(34) - PTREGS_OFFSET_REG(29)
949 pop_reg r51, sp, PTREGS_OFFSET_REG(29) - PTREGS_OFFSET_REG(51)
950 j .Lcontinue_restore_regs
951 STD_ENDPROC(interrupt_return)
954 * "NMI" interrupts mask ALL interrupts before calling the
955 * handler, and don't check thread flags, etc., on the way
956 * back out. In general, the only things we do here for NMIs
957 * are register save/restore and dataplane kernel-TLB management.
958 * We don't (for example) deal with start/stop of the sched tick.
960 .pushsection .text.handle_nmi,"ax"
962 finish_interrupt_save handle_nmi
965 PTREGS_PTR(r0, PTREGS_OFFSET_BASE)
967 FEEDBACK_REENTER(handle_nmi)
973 STD_ENDPROC(handle_nmi)
976 * Parallel code for syscalls to handle_interrupt.
978 .pushsection .text.handle_syscall,"ax"
980 finish_interrupt_save handle_syscall
986 /* Bump the counter for syscalls made on this tile. */
987 moveli r20, hw2_last(irq_stat + IRQ_CPUSTAT_SYSCALL_COUNT_OFFSET)
988 shl16insli r20, r20, hw1(irq_stat + IRQ_CPUSTAT_SYSCALL_COUNT_OFFSET)
989 shl16insli r20, r20, hw0(irq_stat + IRQ_CPUSTAT_SYSCALL_COUNT_OFFSET)
998 EXTRACT_THREAD_INFO(r31)
1001 /* Trace syscalls, if requested. */
1002 addi r31, r31, THREAD_INFO_FLAGS_OFFSET
1004 andi r30, r30, _TIF_SYSCALL_TRACE
1006 addi r30, r31, THREAD_INFO_STATUS_OFFSET - THREAD_INFO_FLAGS_OFFSET
1007 beqzt r30, .Lrestore_syscall_regs
1009 jal do_syscall_trace
1010 FEEDBACK_REENTER(handle_syscall)
1013 * We always reload our registers from the stack at this
1014 * point. They might be valid, if we didn't build with
1015 * TRACE_IRQFLAGS, and this isn't a dataplane tile, and we're not
1016 * doing syscall tracing, but there are enough cases now that it
1017 * seems simplest just to do the reload unconditionally.
1019 .Lrestore_syscall_regs:
1022 PTREGS_PTR(r11, PTREGS_OFFSET_REG(0))
1029 pop_reg r5, r11, PTREGS_OFFSET_SYSCALL - PTREGS_OFFSET_REG(5)
1031 ld TREG_SYSCALL_NR_NAME, r11
1032 moveli r21, __NR_syscalls
1035 /* Ensure that the syscall number is within the legal range. */
1037 moveli r20, hw2(sys_call_table)
1038 blbs r30, .Lcompat_syscall
1041 cmpltu r21, TREG_SYSCALL_NR_NAME, r21
1042 shl16insli r20, r20, hw1(sys_call_table)
1045 blbc r21, .Linvalid_syscall
1046 shl16insli r20, r20, hw0(sys_call_table)
1048 .Lload_syscall_pointer:
1049 shl3add r20, TREG_SYSCALL_NR_NAME, r20
1052 /* Jump to syscall handler. */
1054 .Lhandle_syscall_link: /* value of "lr" after "jalr r20" above */
1057 * Write our r0 onto the stack so it gets restored instead
1058 * of whatever the user had there before.
1059 * In compat mode, sign-extend r0 before storing it.
1062 PTREGS_PTR(r29, PTREGS_OFFSET_REG(0))
1068 .Lsyscall_sigreturn_skip:
1069 FEEDBACK_REENTER(handle_syscall)
1071 /* Do syscall trace again, if requested. */
1073 andi r0, r30, _TIF_SYSCALL_TRACE
1075 andi r0, r30, _TIF_SINGLESTEP
1078 jal do_syscall_trace
1079 FEEDBACK_REENTER(handle_syscall)
1080 andi r0, r30, _TIF_SINGLESTEP
1084 /* Single stepping -- notify ptrace. */
1089 FEEDBACK_REENTER(handle_syscall)
1092 movei r30, 0 /* not an NMI */
1093 j .Lresume_userspace /* jump into middle of interrupt_return */
1098 * Load the base of the compat syscall table in r20, and
1099 * range-check the syscall number (duplicated from 64-bit path).
1100 * Sign-extend all the user's passed arguments to make them consistent.
1101 * Also save the original "r(n)" values away in "r(11+n)" in
1102 * case the syscall table entry wants to validate them.
1104 moveli r20, hw2(compat_sys_call_table)
1106 cmpltu r21, TREG_SYSCALL_NR_NAME, r21
1107 shl16insli r20, r20, hw1(compat_sys_call_table)
1110 blbc r21, .Linvalid_syscall
1111 shl16insli r20, r20, hw0(compat_sys_call_table)
1113 { move r11, r0; addxi r0, r0, 0 }
1114 { move r12, r1; addxi r1, r1, 0 }
1115 { move r13, r2; addxi r2, r2, 0 }
1116 { move r14, r3; addxi r3, r3, 0 }
1117 { move r15, r4; addxi r4, r4, 0 }
1118 { move r16, r5; addxi r5, r5, 0 }
1119 j .Lload_syscall_pointer
1122 /* Report an invalid syscall back to the user program */
1124 PTREGS_PTR(r29, PTREGS_OFFSET_REG(0))
1129 movei r30, 0 /* not an NMI */
1130 j .Lresume_userspace /* jump into middle of interrupt_return */
1132 STD_ENDPROC(handle_syscall)
1134 /* Return the address for oprofile to suppress in backtraces. */
1135 STD_ENTRY_SECTION(handle_syscall_link_address, .text.handle_syscall)
1138 addli r0, r0, .Lhandle_syscall_link - .
1141 STD_ENDPROC(handle_syscall_link_address)
1143 STD_ENTRY(ret_from_fork)
1146 FEEDBACK_REENTER(ret_from_fork)
1148 movei r30, 0 /* not an NMI */
1149 j .Lresume_userspace /* jump into middle of interrupt_return */
1151 STD_ENDPROC(ret_from_fork)
1153 STD_ENTRY(ret_from_kernel_thread)
1156 FEEDBACK_REENTER(ret_from_fork)
1161 FEEDBACK_REENTER(ret_from_kernel_thread)
1163 movei r30, 0 /* not an NMI */
1164 j .Lresume_userspace /* jump into middle of interrupt_return */
1166 STD_ENDPROC(ret_from_kernel_thread)
1168 /* Various stub interrupt handlers and syscall handlers */
1170 STD_ENTRY_LOCAL(_kernel_double_fault)
1171 mfspr r1, SPR_EX_CONTEXT_K_0
1175 addi sp, sp, -C_ABI_SAVE_AREA_SIZE
1176 j kernel_double_fault
1177 STD_ENDPROC(_kernel_double_fault)
1179 STD_ENTRY_LOCAL(bad_intr)
1180 mfspr r2, SPR_EX_CONTEXT_K_0
1181 panic "Unhandled interrupt %#x: PC %#lx"
1182 STD_ENDPROC(bad_intr)
1185 * Special-case sigreturn to not write r0 to the stack on return.
1186 * This is technically more efficient, but it also avoids difficulties
1187 * in the 64-bit OS when handling 32-bit compat code, since we must not
1188 * sign-extend r0 for the sigreturn return-value case.
1190 #define PTREGS_SYSCALL_SIGRETURN(x, reg) \
1192 addli lr, lr, .Lsyscall_sigreturn_skip - .Lhandle_syscall_link; \
1194 PTREGS_PTR(reg, PTREGS_OFFSET_BASE); \
1199 PTREGS_SYSCALL_SIGRETURN(sys_rt_sigreturn, r0)
1200 #ifdef CONFIG_COMPAT
1201 PTREGS_SYSCALL_SIGRETURN(compat_sys_rt_sigreturn, r0)
1204 /* Save additional callee-saves to pt_regs and jump to standard function. */
1205 STD_ENTRY(_sys_clone)
1206 push_extra_callee_saves r4
1208 STD_ENDPROC(_sys_clone)
1210 /* The single-step support may need to read all the registers. */
1212 push_extra_callee_saves r0
1215 /* Fill the return address stack with nonzero entries. */
1216 STD_ENTRY(fill_ra_stack)
1225 STD_ENDPROC(fill_ra_stack)
1227 /* Include .intrpt1 array of interrupt vectors */
1228 .section ".intrpt1", "ax"
1230 #define op_handle_perf_interrupt bad_intr
1231 #define op_handle_aux_perf_interrupt bad_intr
1233 #ifndef CONFIG_HARDWALL
1234 #define do_hardwall_trap bad_intr
1237 int_hand INT_MEM_ERROR, MEM_ERROR, do_trap
1238 int_hand INT_SINGLE_STEP_3, SINGLE_STEP_3, bad_intr
1239 #if CONFIG_KERNEL_PL == 2
1240 int_hand INT_SINGLE_STEP_2, SINGLE_STEP_2, gx_singlestep_handle
1241 int_hand INT_SINGLE_STEP_1, SINGLE_STEP_1, bad_intr
1243 int_hand INT_SINGLE_STEP_2, SINGLE_STEP_2, bad_intr
1244 int_hand INT_SINGLE_STEP_1, SINGLE_STEP_1, gx_singlestep_handle
1246 int_hand INT_SINGLE_STEP_0, SINGLE_STEP_0, bad_intr
1247 int_hand INT_IDN_COMPLETE, IDN_COMPLETE, bad_intr
1248 int_hand INT_UDN_COMPLETE, UDN_COMPLETE, bad_intr
1249 int_hand INT_ITLB_MISS, ITLB_MISS, do_page_fault
1250 int_hand INT_ILL, ILL, do_trap
1251 int_hand INT_GPV, GPV, do_trap
1252 int_hand INT_IDN_ACCESS, IDN_ACCESS, do_trap
1253 int_hand INT_UDN_ACCESS, UDN_ACCESS, do_trap
1254 int_hand INT_SWINT_3, SWINT_3, do_trap
1255 int_hand INT_SWINT_2, SWINT_2, do_trap
1256 int_hand INT_SWINT_1, SWINT_1, SYSCALL, handle_syscall
1257 int_hand INT_SWINT_0, SWINT_0, do_trap
1258 int_hand INT_ILL_TRANS, ILL_TRANS, do_trap
1259 int_hand INT_UNALIGN_DATA, UNALIGN_DATA, int_unalign
1260 int_hand INT_DTLB_MISS, DTLB_MISS, do_page_fault
1261 int_hand INT_DTLB_ACCESS, DTLB_ACCESS, do_page_fault
1262 int_hand INT_IDN_FIREWALL, IDN_FIREWALL, do_hardwall_trap
1263 int_hand INT_UDN_FIREWALL, UDN_FIREWALL, do_hardwall_trap
1264 int_hand INT_TILE_TIMER, TILE_TIMER, do_timer_interrupt
1265 int_hand INT_IDN_TIMER, IDN_TIMER, bad_intr
1266 int_hand INT_UDN_TIMER, UDN_TIMER, bad_intr
1267 int_hand INT_IDN_AVAIL, IDN_AVAIL, bad_intr
1268 int_hand INT_UDN_AVAIL, UDN_AVAIL, bad_intr
1269 int_hand INT_IPI_3, IPI_3, bad_intr
1270 #if CONFIG_KERNEL_PL == 2
1271 int_hand INT_IPI_2, IPI_2, tile_dev_intr
1272 int_hand INT_IPI_1, IPI_1, bad_intr
1274 int_hand INT_IPI_2, IPI_2, bad_intr
1275 int_hand INT_IPI_1, IPI_1, tile_dev_intr
1277 int_hand INT_IPI_0, IPI_0, bad_intr
1278 int_hand INT_PERF_COUNT, PERF_COUNT, \
1279 op_handle_perf_interrupt, handle_nmi
1280 int_hand INT_AUX_PERF_COUNT, AUX_PERF_COUNT, \
1281 op_handle_perf_interrupt, handle_nmi
1282 int_hand INT_INTCTRL_3, INTCTRL_3, bad_intr
1283 #if CONFIG_KERNEL_PL == 2
1284 dc_dispatch INT_INTCTRL_2, INTCTRL_2
1285 int_hand INT_INTCTRL_1, INTCTRL_1, bad_intr
1287 int_hand INT_INTCTRL_2, INTCTRL_2, bad_intr
1288 dc_dispatch INT_INTCTRL_1, INTCTRL_1
1290 int_hand INT_INTCTRL_0, INTCTRL_0, bad_intr
1291 int_hand INT_MESSAGE_RCV_DWNCL, MESSAGE_RCV_DWNCL, \
1293 int_hand INT_DEV_INTR_DWNCL, DEV_INTR_DWNCL, bad_intr
1294 int_hand INT_I_ASID, I_ASID, bad_intr
1295 int_hand INT_D_ASID, D_ASID, bad_intr
1296 int_hand INT_DOUBLE_FAULT, DOUBLE_FAULT, do_trap
1298 /* Synthetic interrupt delivered only by the simulator */
1299 int_hand INT_BREAKPOINT, BREAKPOINT, do_breakpoint