2 * linux/arch/x86_64/entry.S
4 * Copyright (C) 1991, 1992 Linus Torvalds
5 * Copyright (C) 2000, 2001, 2002 Andi Kleen SuSE Labs
6 * Copyright (C) 2000 Pavel Machek <pavel@suse.cz>
10 * entry.S contains the system-call and fault low-level handling routines.
12 * NOTE: This code handles signal-recognition, which happens every time
13 * after an interrupt and after each system call.
15 * Normal syscalls and interrupts don't save a full stack frame, this is
16 * only done for syscall tracing, signals or fork/exec et.al.
18 * A note on terminology:
19 * - top of stack: Architecture defined interrupt frame from SS to RIP
20 * at the top of the kernel process stack.
21 * - partial stack frame: partially saved registers upto R11.
22 * - full stack frame: Like partial stack frame, but all register saved.
25 * - CFI macros are used to generate dwarf2 unwind information for better
26 * backtraces. They don't change any code.
27 * - SAVE_ALL/RESTORE_ALL - Save/restore all registers
28 * - SAVE_ARGS/RESTORE_ARGS - Save/restore registers that C functions modify.
29 * There are unfortunately lots of special cases where some registers
30 * not touched. The macro is a big mess that should be cleaned up.
31 * - SAVE_REST/RESTORE_REST - Handle the registers not saved by SAVE_ARGS.
32 * Gives a full stack frame.
33 * - ENTRY/END Define functions in the symbol table.
34 * - FIXUP_TOP_OF_STACK/RESTORE_TOP_OF_STACK - Fix up the hardware stack
35 * frame that is otherwise undefined after a SYSCALL
36 * - TRACE_IRQ_* - Trace hard interrupt state for lock debugging.
37 * - errorentry/paranoidentry/zeroentry - Define exception entry points.
40 #include <linux/linkage.h>
41 #include <asm/segment.h>
42 #include <asm/cache.h>
43 #include <asm/errno.h>
44 #include <asm/dwarf2.h>
45 #include <asm/calling.h>
46 #include <asm/asm-offsets.h>
48 #include <asm/unistd.h>
49 #include <asm/thread_info.h>
50 #include <asm/hw_irq.h>
52 #include <asm/irqflags.h>
53 #include <asm/paravirt.h>
54 #include <asm/ftrace.h>
56 /* Avoid __ASSEMBLER__'ifying <linux/audit.h> just for this. */
57 #include <linux/elf-em.h>
58 #define AUDIT_ARCH_X86_64 (EM_X86_64|__AUDIT_ARCH_64BIT|__AUDIT_ARCH_LE)
59 #define __AUDIT_ARCH_64BIT 0x80000000
60 #define __AUDIT_ARCH_LE 0x40000000
64 * Some macro's to hide the most frequently occuring CFI annotations.
68 CFI_ADJUST_CFA_OFFSET 8
73 CFI_ADJUST_CFA_OFFSET -8
76 .macro CFI_MOVQ reg offset=0
77 movq %\reg, \offset(%rsp)
78 CFI_REL_OFFSET \reg, \offset
81 #ifdef CONFIG_FUNCTION_TRACER
82 #ifdef CONFIG_DYNAMIC_FTRACE
89 /* taken from glibc */
101 subq $MCOUNT_INSN_SIZE, %rdi
121 #else /* ! CONFIG_DYNAMIC_FTRACE */
123 cmpq $ftrace_stub, ftrace_trace_function
130 /* taken from glibc */
140 movq 0x38(%rsp), %rdi
142 subq $MCOUNT_INSN_SIZE, %rdi
144 call *ftrace_trace_function
157 #endif /* CONFIG_DYNAMIC_FTRACE */
158 #endif /* CONFIG_FUNCTION_TRACER */
160 #ifndef CONFIG_PREEMPT
161 #define retint_kernel retint_restore_args
164 #ifdef CONFIG_PARAVIRT
165 ENTRY(native_usergs_sysret64)
168 #endif /* CONFIG_PARAVIRT */
171 .macro TRACE_IRQS_IRETQ offset=ARGOFFSET
172 #ifdef CONFIG_TRACE_IRQFLAGS
173 bt $9,EFLAGS-\offset(%rsp) /* interrupts off? */
181 * C code is not supposed to know about undefined top of stack. Every time
182 * a C function with an pt_regs argument is called from the SYSCALL based
183 * fast path FIXUP_TOP_OF_STACK is needed.
184 * RESTORE_TOP_OF_STACK syncs the syscall state after any possible ptregs
188 /* %rsp:at FRAMEEND */
189 .macro FIXUP_TOP_OF_STACK tmp
190 movq %gs:pda_oldrsp,\tmp
192 movq $__USER_DS,SS(%rsp)
193 movq $__USER_CS,CS(%rsp)
195 movq R11(%rsp),\tmp /* get eflags */
196 movq \tmp,EFLAGS(%rsp)
199 .macro RESTORE_TOP_OF_STACK tmp,offset=0
200 movq RSP-\offset(%rsp),\tmp
201 movq \tmp,%gs:pda_oldrsp
202 movq EFLAGS-\offset(%rsp),\tmp
203 movq \tmp,R11-\offset(%rsp)
206 .macro FAKE_STACK_FRAME child_rip
207 /* push in order ss, rsp, eflags, cs, rip */
209 pushq $__KERNEL_DS /* ss */
210 CFI_ADJUST_CFA_OFFSET 8
211 /*CFI_REL_OFFSET ss,0*/
213 CFI_ADJUST_CFA_OFFSET 8
215 pushq $(1<<9) /* eflags - interrupts on */
216 CFI_ADJUST_CFA_OFFSET 8
217 /*CFI_REL_OFFSET rflags,0*/
218 pushq $__KERNEL_CS /* cs */
219 CFI_ADJUST_CFA_OFFSET 8
220 /*CFI_REL_OFFSET cs,0*/
221 pushq \child_rip /* rip */
222 CFI_ADJUST_CFA_OFFSET 8
224 pushq %rax /* orig rax */
225 CFI_ADJUST_CFA_OFFSET 8
228 .macro UNFAKE_STACK_FRAME
230 CFI_ADJUST_CFA_OFFSET -(6*8)
234 * initial frame state for interrupts (and exceptions without error code)
236 .macro EMPTY_FRAME start=1 offset=0
240 CFI_DEF_CFA rsp,8+\offset
242 CFI_DEF_CFA_OFFSET 8+\offset
247 * initial frame state for interrupts (and exceptions without error code)
249 .macro INTR_FRAME start=1 offset=0
250 EMPTY_FRAME \start, (SS+8-RIP)+\offset
251 /*CFI_REL_OFFSET ss, SS-RIP+\offset*/
252 CFI_REL_OFFSET rsp, RSP-RIP+\offset
253 /*CFI_REL_OFFSET rflags, EFLAGS-RIP+\offset*/
254 /*CFI_REL_OFFSET cs, CS-RIP+\offset*/
255 CFI_REL_OFFSET rip, RIP-RIP+\offset
259 * initial frame state for exceptions with error code (and interrupts
260 * with vector already pushed)
262 .macro XCPT_FRAME start=1 offset=0
263 INTR_FRAME \start, (RIP-ORIG_RAX)+\offset
264 /*CFI_REL_OFFSET orig_rax, ORIG_RAX-ORIG_RAX*/
268 * frame that enables calling into C.
270 .macro PARTIAL_FRAME start=1 offset=0
271 XCPT_FRAME \start, (ORIG_RAX-ARGOFFSET)+\offset
272 CFI_REL_OFFSET rdi, (RDI-ARGOFFSET)+\offset
273 CFI_REL_OFFSET rsi, (RSI-ARGOFFSET)+\offset
274 CFI_REL_OFFSET rdx, (RDX-ARGOFFSET)+\offset
275 CFI_REL_OFFSET rcx, (RCX-ARGOFFSET)+\offset
276 CFI_REL_OFFSET rax, (RAX-ARGOFFSET)+\offset
277 CFI_REL_OFFSET r8, (R8-ARGOFFSET)+\offset
278 CFI_REL_OFFSET r9, (R9-ARGOFFSET)+\offset
279 CFI_REL_OFFSET r10, (R10-ARGOFFSET)+\offset
280 CFI_REL_OFFSET r11, (R11-ARGOFFSET)+\offset
284 * frame that enables passing a complete pt_regs to a C function.
286 .macro DEFAULT_FRAME start=1 offset=0
287 PARTIAL_FRAME \start, (R11-R15)+\offset
288 CFI_REL_OFFSET rbx, RBX+\offset
289 CFI_REL_OFFSET rbp, RBP+\offset
290 CFI_REL_OFFSET r12, R12+\offset
291 CFI_REL_OFFSET r13, R13+\offset
292 CFI_REL_OFFSET r14, R14+\offset
293 CFI_REL_OFFSET r15, R15+\offset
296 /* save partial stack frame */
300 CFI_MOVQ rdi, (RDI-ARGOFFSET)+16
301 CFI_MOVQ rsi, (RSI-ARGOFFSET)+16
302 CFI_MOVQ rdx, (RDX-ARGOFFSET)+16
303 CFI_MOVQ rcx, (RCX-ARGOFFSET)+16
304 CFI_MOVQ rax, (RAX-ARGOFFSET)+16
305 CFI_MOVQ r8, (R8-ARGOFFSET)+16
306 CFI_MOVQ r9, (R9-ARGOFFSET)+16
307 CFI_MOVQ r10, (R10-ARGOFFSET)+16
308 CFI_MOVQ r11, (R11-ARGOFFSET)+16
309 leaq -ARGOFFSET+16(%rsp),%rdi /* arg1 for handler */
310 CFI_MOVQ rbp, 8 /* push %rbp */
311 leaq 8(%rsp), %rbp /* mov %rsp, %ebp */
316 * irqcount is used to check if a CPU is already on an interrupt stack
317 * or not. While this is essentially redundant with preempt_count it is
318 * a little cheaper to use a separate counter in the PDA (short of
319 * moving irq_enter into assembly, which would be too much work)
321 1: incl %gs:pda_irqcount
323 CFI_POPQ %rax /* move return address... */
324 mov %gs:pda_irqstackptr,%rsp
326 CFI_PUSHQ %rax /* ... to the new stack */
328 * We entered an interrupt context - irqs are off:
336 * A newly forked process directly context switches into this.
341 push kernel_eflags(%rip)
342 CFI_ADJUST_CFA_OFFSET 8
343 popf # reset kernel eflags
344 CFI_ADJUST_CFA_OFFSET -8
346 GET_THREAD_INFO(%rcx)
347 testl $(_TIF_SYSCALL_TRACE|_TIF_SYSCALL_AUDIT),TI_flags(%rcx)
351 testl $3,CS-ARGOFFSET(%rsp) # from kernel_thread?
352 je int_ret_from_sys_call
353 testl $_TIF_IA32,TI_flags(%rcx)
354 jnz int_ret_from_sys_call
355 RESTORE_TOP_OF_STACK %rdi,ARGOFFSET
356 jmp ret_from_sys_call
359 call syscall_trace_leave
360 GET_THREAD_INFO(%rcx)
366 * System call entry. Upto 6 arguments in registers are supported.
368 * SYSCALL does not save anything on the stack and does not change the
374 * rax system call number
376 * rcx return address for syscall/sysret, C arg3
379 * r10 arg3 (--> moved to rcx for C)
382 * r11 eflags for syscall/sysret, temporary for C
383 * r12-r15,rbp,rbx saved by C code, not touched.
385 * Interrupts are off on entry.
386 * Only called from user space.
388 * XXX if we had a free scratch register we could save the RSP into the stack frame
389 * and report it properly in ps. Unfortunately we haven't.
391 * When user can change the frames always force IRET. That is because
392 * it deals with uncanonical addresses better. SYSRET has trouble
393 * with them due to bugs in both AMD and Intel CPUs.
399 CFI_DEF_CFA rsp,PDA_STACKOFFSET
401 /*CFI_REGISTER rflags,r11*/
404 * A hypervisor implementation might want to use a label
405 * after the swapgs, so that it can do the swapgs
406 * for the guest and jump here on syscall.
408 ENTRY(system_call_after_swapgs)
410 movq %rsp,%gs:pda_oldrsp
411 movq %gs:pda_kernelstack,%rsp
413 * No need to follow this irqs off/on section - it's straight
416 ENABLE_INTERRUPTS(CLBR_NONE)
418 movq %rax,ORIG_RAX-ARGOFFSET(%rsp)
419 movq %rcx,RIP-ARGOFFSET(%rsp)
420 CFI_REL_OFFSET rip,RIP-ARGOFFSET
421 GET_THREAD_INFO(%rcx)
422 testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags(%rcx)
424 system_call_fastpath:
425 cmpq $__NR_syscall_max,%rax
428 call *sys_call_table(,%rax,8) # XXX: rip relative
429 movq %rax,RAX-ARGOFFSET(%rsp)
431 * Syscall return path ending with SYSRET (fast path)
432 * Has incomplete stack frame and undefined top of stack.
435 movl $_TIF_ALLWORK_MASK,%edi
439 GET_THREAD_INFO(%rcx)
440 DISABLE_INTERRUPTS(CLBR_NONE)
442 movl TI_flags(%rcx),%edx
447 * sysretq will re-enable interrupts:
450 movq RIP-ARGOFFSET(%rsp),%rcx
452 RESTORE_ARGS 0,-ARG_SKIP,1
453 /*CFI_REGISTER rflags,r11*/
454 movq %gs:pda_oldrsp, %rsp
458 /* Handle reschedules */
459 /* edx: work, edi: workmask */
461 bt $TIF_NEED_RESCHED,%edx
464 ENABLE_INTERRUPTS(CLBR_NONE)
466 CFI_ADJUST_CFA_OFFSET 8
469 CFI_ADJUST_CFA_OFFSET -8
472 /* Handle a signal */
475 ENABLE_INTERRUPTS(CLBR_NONE)
476 #ifdef CONFIG_AUDITSYSCALL
477 bt $TIF_SYSCALL_AUDIT,%edx
480 /* edx: work flags (arg3) */
481 leaq do_notify_resume(%rip),%rax
482 leaq -ARGOFFSET(%rsp),%rdi # &pt_regs -> arg1
483 xorl %esi,%esi # oldset -> arg2
484 call ptregscall_common
485 movl $_TIF_WORK_MASK,%edi
486 /* Use IRET because user could have changed frame. This
487 works because ptregscall_common has called FIXUP_TOP_OF_STACK. */
488 DISABLE_INTERRUPTS(CLBR_NONE)
493 movq $-ENOSYS,RAX-ARGOFFSET(%rsp)
494 jmp ret_from_sys_call
496 #ifdef CONFIG_AUDITSYSCALL
498 * Fast path for syscall audit without full syscall trace.
499 * We just call audit_syscall_entry() directly, and then
500 * jump back to the normal fast path.
503 movq %r10,%r9 /* 6th arg: 4th syscall arg */
504 movq %rdx,%r8 /* 5th arg: 3rd syscall arg */
505 movq %rsi,%rcx /* 4th arg: 2nd syscall arg */
506 movq %rdi,%rdx /* 3rd arg: 1st syscall arg */
507 movq %rax,%rsi /* 2nd arg: syscall number */
508 movl $AUDIT_ARCH_X86_64,%edi /* 1st arg: audit arch */
509 call audit_syscall_entry
510 LOAD_ARGS 0 /* reload call-clobbered registers */
511 jmp system_call_fastpath
514 * Return fast path for syscall audit. Call audit_syscall_exit()
515 * directly and then jump back to the fast path with TIF_SYSCALL_AUDIT
519 movq %rax,%rsi /* second arg, syscall return value */
520 cmpq $0,%rax /* is it < 0? */
521 setl %al /* 1 if so, 0 if not */
522 movzbl %al,%edi /* zero-extend that into %edi */
523 inc %edi /* first arg, 0->1(AUDITSC_SUCCESS), 1->2(AUDITSC_FAILURE) */
524 call audit_syscall_exit
525 movl $(_TIF_ALLWORK_MASK & ~_TIF_SYSCALL_AUDIT),%edi
527 #endif /* CONFIG_AUDITSYSCALL */
529 /* Do syscall tracing */
531 #ifdef CONFIG_AUDITSYSCALL
532 testl $(_TIF_WORK_SYSCALL_ENTRY & ~_TIF_SYSCALL_AUDIT),TI_flags(%rcx)
536 movq $-ENOSYS,RAX(%rsp) /* ptrace can change this for a bad syscall */
537 FIXUP_TOP_OF_STACK %rdi
539 call syscall_trace_enter
541 * Reload arg registers from stack in case ptrace changed them.
542 * We don't reload %rax because syscall_trace_enter() returned
543 * the value it wants us to use in the table lookup.
545 LOAD_ARGS ARGOFFSET, 1
547 cmpq $__NR_syscall_max,%rax
548 ja int_ret_from_sys_call /* RAX(%rsp) set to -ENOSYS above */
549 movq %r10,%rcx /* fixup for C */
550 call *sys_call_table(,%rax,8)
551 movq %rax,RAX-ARGOFFSET(%rsp)
552 /* Use IRET because user could have changed frame */
555 * Syscall return path ending with IRET.
556 * Has correct top of stack, but partial stack frame.
558 .globl int_ret_from_sys_call
559 .globl int_with_check
560 int_ret_from_sys_call:
561 DISABLE_INTERRUPTS(CLBR_NONE)
563 testl $3,CS-ARGOFFSET(%rsp)
564 je retint_restore_args
565 movl $_TIF_ALLWORK_MASK,%edi
566 /* edi: mask to check */
569 GET_THREAD_INFO(%rcx)
570 movl TI_flags(%rcx),%edx
573 andl $~TS_COMPAT,TI_status(%rcx)
576 /* Either reschedule or signal or syscall exit tracking needed. */
577 /* First do a reschedule test. */
578 /* edx: work, edi: workmask */
580 bt $TIF_NEED_RESCHED,%edx
583 ENABLE_INTERRUPTS(CLBR_NONE)
585 CFI_ADJUST_CFA_OFFSET 8
588 CFI_ADJUST_CFA_OFFSET -8
589 DISABLE_INTERRUPTS(CLBR_NONE)
593 /* handle signals and tracing -- both require a full stack frame */
596 ENABLE_INTERRUPTS(CLBR_NONE)
598 /* Check for syscall exit trace */
599 testl $_TIF_WORK_SYSCALL_EXIT,%edx
602 CFI_ADJUST_CFA_OFFSET 8
603 leaq 8(%rsp),%rdi # &ptregs -> arg1
604 call syscall_trace_leave
606 CFI_ADJUST_CFA_OFFSET -8
607 andl $~(_TIF_WORK_SYSCALL_EXIT|_TIF_SYSCALL_EMU),%edi
611 testl $_TIF_DO_NOTIFY_MASK,%edx
613 movq %rsp,%rdi # &ptregs -> arg1
614 xorl %esi,%esi # oldset -> arg2
615 call do_notify_resume
616 1: movl $_TIF_WORK_MASK,%edi
619 DISABLE_INTERRUPTS(CLBR_NONE)
626 * Certain special system calls that need to save a complete full stack frame.
629 .macro PTREGSCALL label,func,arg
632 leaq \func(%rip),%rax
633 leaq -ARGOFFSET+8(%rsp),\arg /* 8 for return address */
634 jmp ptregscall_common
640 PTREGSCALL stub_clone, sys_clone, %r8
641 PTREGSCALL stub_fork, sys_fork, %rdi
642 PTREGSCALL stub_vfork, sys_vfork, %rdi
643 PTREGSCALL stub_sigaltstack, sys_sigaltstack, %rdx
644 PTREGSCALL stub_iopl, sys_iopl, %rsi
646 ENTRY(ptregscall_common)
648 CFI_ADJUST_CFA_OFFSET -8
649 CFI_REGISTER rip, r11
652 CFI_REGISTER rip, r15
653 FIXUP_TOP_OF_STACK %r11
655 RESTORE_TOP_OF_STACK %r11
657 CFI_REGISTER rip, r11
660 CFI_ADJUST_CFA_OFFSET 8
661 CFI_REL_OFFSET rip, 0
664 END(ptregscall_common)
669 CFI_ADJUST_CFA_OFFSET -8
670 CFI_REGISTER rip, r11
672 FIXUP_TOP_OF_STACK %r11
675 RESTORE_TOP_OF_STACK %r11
678 jmp int_ret_from_sys_call
683 * sigreturn is special because it needs to restore all registers on return.
684 * This cannot be done with SYSRET, so use the IRET return path instead.
686 ENTRY(stub_rt_sigreturn)
689 CFI_ADJUST_CFA_OFFSET -8
692 FIXUP_TOP_OF_STACK %r11
693 call sys_rt_sigreturn
694 movq %rax,RAX(%rsp) # fixme, this could be done at the higher layer
696 jmp int_ret_from_sys_call
698 END(stub_rt_sigreturn)
701 * Build the entry stubs and pointer table with some assembler magic.
702 * We pack 7 stubs into a single 32-byte chunk, which will fit in a
703 * single cache line on all modern x86 implementations.
705 .section .init.rodata,"a"
709 .p2align CONFIG_X86_L1_CACHE_SHIFT
710 ENTRY(irq_entries_start)
712 vector=FIRST_EXTERNAL_VECTOR
713 .rept (NR_VECTORS-FIRST_EXTERNAL_VECTOR+6)/7
716 .if vector < NR_VECTORS
717 .if vector <> FIRST_EXTERNAL_VECTOR
718 CFI_ADJUST_CFA_OFFSET -8
720 1: pushq $(~vector+0x80) /* Note: always in signed byte range */
721 CFI_ADJUST_CFA_OFFSET 8
722 .if ((vector-FIRST_EXTERNAL_VECTOR)%7) <> 6
731 2: jmp common_interrupt
734 END(irq_entries_start)
741 * Interrupt entry/exit.
743 * Interrupt entry points save only callee clobbered registers in fast path.
745 * Entry runs with interrupts off.
748 /* 0(%rsp): ~(interrupt number) */
749 .macro interrupt func
751 CFI_ADJUST_CFA_OFFSET 10*8
758 * The interrupt stubs push (~vector+0x80) onto the stack and
759 * then jump to common_interrupt.
761 .p2align CONFIG_X86_L1_CACHE_SHIFT
764 addq $-0x80,(%rsp) /* Adjust vector to [-256,-1] range */
766 /* 0(%rsp): oldrsp-ARGOFFSET */
768 DISABLE_INTERRUPTS(CLBR_NONE)
770 decl %gs:pda_irqcount
772 CFI_DEF_CFA_REGISTER rsp
773 CFI_ADJUST_CFA_OFFSET -8
775 GET_THREAD_INFO(%rcx)
776 testl $3,CS-ARGOFFSET(%rsp)
779 /* Interrupt came from user space */
781 * Has a correct top of stack, but a partial stack frame
782 * %rcx: thread info. Interrupts off.
784 retint_with_reschedule:
785 movl $_TIF_WORK_MASK,%edi
788 movl TI_flags(%rcx),%edx
793 retint_swapgs: /* return to user-space */
795 * The iretq could re-enable interrupts:
797 DISABLE_INTERRUPTS(CLBR_ANY)
802 retint_restore_args: /* return to kernel space */
803 DISABLE_INTERRUPTS(CLBR_ANY)
805 * The iretq could re-enable interrupts:
814 .section __ex_table, "a"
815 .quad irq_return, bad_iret
818 #ifdef CONFIG_PARAVIRT
822 .section __ex_table,"a"
823 .quad native_iret, bad_iret
830 * The iret traps when the %cs or %ss being restored is bogus.
831 * We've lost the original trap vector and error code.
832 * #GPF is the most likely one to get for an invalid selector.
833 * So pretend we completed the iret and took the #GPF in user mode.
835 * We are now running with the kernel GS after exception recovery.
836 * But error_entry expects us to have user GS to match the user %cs,
842 jmp general_protection
846 /* edi: workmask, edx: work */
849 bt $TIF_NEED_RESCHED,%edx
852 ENABLE_INTERRUPTS(CLBR_NONE)
854 CFI_ADJUST_CFA_OFFSET 8
857 CFI_ADJUST_CFA_OFFSET -8
858 GET_THREAD_INFO(%rcx)
859 DISABLE_INTERRUPTS(CLBR_NONE)
864 testl $_TIF_DO_NOTIFY_MASK,%edx
867 ENABLE_INTERRUPTS(CLBR_NONE)
869 movq $-1,ORIG_RAX(%rsp)
870 xorl %esi,%esi # oldset
871 movq %rsp,%rdi # &pt_regs
872 call do_notify_resume
874 DISABLE_INTERRUPTS(CLBR_NONE)
876 GET_THREAD_INFO(%rcx)
877 jmp retint_with_reschedule
879 #ifdef CONFIG_PREEMPT
880 /* Returning to kernel space. Check if we need preemption */
881 /* rcx: threadinfo. interrupts off. */
883 cmpl $0,TI_preempt_count(%rcx)
884 jnz retint_restore_args
885 bt $TIF_NEED_RESCHED,TI_flags(%rcx)
886 jnc retint_restore_args
887 bt $9,EFLAGS-ARGOFFSET(%rsp) /* interrupts off? */
888 jnc retint_restore_args
889 call preempt_schedule_irq
894 END(common_interrupt)
901 .macro apicinterrupt num,func
904 CFI_ADJUST_CFA_OFFSET 8
910 ENTRY(thermal_interrupt)
911 apicinterrupt THERMAL_APIC_VECTOR,smp_thermal_interrupt
912 END(thermal_interrupt)
914 ENTRY(threshold_interrupt)
915 apicinterrupt THRESHOLD_APIC_VECTOR,mce_threshold_interrupt
916 END(threshold_interrupt)
919 ENTRY(reschedule_interrupt)
920 apicinterrupt RESCHEDULE_VECTOR,smp_reschedule_interrupt
921 END(reschedule_interrupt)
923 .macro INVALIDATE_ENTRY num
924 ENTRY(invalidate_interrupt\num)
925 apicinterrupt INVALIDATE_TLB_VECTOR_START+\num,smp_invalidate_interrupt
926 END(invalidate_interrupt\num)
938 ENTRY(call_function_interrupt)
939 apicinterrupt CALL_FUNCTION_VECTOR,smp_call_function_interrupt
940 END(call_function_interrupt)
941 ENTRY(call_function_single_interrupt)
942 apicinterrupt CALL_FUNCTION_SINGLE_VECTOR,smp_call_function_single_interrupt
943 END(call_function_single_interrupt)
944 ENTRY(irq_move_cleanup_interrupt)
945 apicinterrupt IRQ_MOVE_CLEANUP_VECTOR,smp_irq_move_cleanup_interrupt
946 END(irq_move_cleanup_interrupt)
949 ENTRY(apic_timer_interrupt)
950 apicinterrupt LOCAL_TIMER_VECTOR,smp_apic_timer_interrupt
951 END(apic_timer_interrupt)
953 ENTRY(uv_bau_message_intr1)
954 apicinterrupt 220,uv_bau_message_interrupt
955 END(uv_bau_message_intr1)
957 ENTRY(error_interrupt)
958 apicinterrupt ERROR_APIC_VECTOR,smp_error_interrupt
961 ENTRY(spurious_interrupt)
962 apicinterrupt SPURIOUS_APIC_VECTOR,smp_spurious_interrupt
963 END(spurious_interrupt)
966 * Exception entry points.
970 PARAVIRT_ADJUST_EXCEPTION_FRAME
971 CFI_PUSHQ $-1 /* ORIG_RAX: no syscall to restart */
973 CFI_ADJUST_CFA_OFFSET 15*8
976 movq %rsp,%rdi /* pt_regs pointer */
977 xorl %esi,%esi /* no error code */
979 jmp error_exit /* %ebx: no swapgs flag */
983 .macro errorentry sym
985 PARAVIRT_ADJUST_EXCEPTION_FRAME
987 CFI_ADJUST_CFA_OFFSET 15*8
990 movq %rsp,%rdi /* pt_regs pointer */
991 movq ORIG_RAX(%rsp),%rsi /* get error code */
992 movq $-1,ORIG_RAX(%rsp) /* no syscall to restart */
994 jmp error_exit /* %ebx: no swapgs flag */
998 /* error code is on the stack already */
999 /* handle NMI like exceptions that can happen everywhere */
1000 .macro paranoidentry sym, ist=0, irqtrace=1
1004 movl $MSR_GS_BASE,%ecx
1012 movq %gs:pda_data_offset, %rbp
1018 movq ORIG_RAX(%rsp),%rsi
1019 movq $-1,ORIG_RAX(%rsp)
1021 subq $EXCEPTION_STKSZ, per_cpu__init_tss + TSS_ist + (\ist - 1) * 8(%rbp)
1025 addq $EXCEPTION_STKSZ, per_cpu__init_tss + TSS_ist + (\ist - 1) * 8(%rbp)
1027 DISABLE_INTERRUPTS(CLBR_NONE)
1034 * "Paranoid" exit path from exception stack.
1035 * Paranoid because this is used by NMIs and cannot take
1036 * any kernel state for granted.
1037 * We don't do kernel preemption checks here, because only
1038 * NMI should be common and it does not enable IRQs and
1039 * cannot get reschedule ticks.
1041 * "trace" is 0 for the NMI handler only, because irq-tracing
1042 * is fundamentally NMI-unsafe. (we cannot change the soft and
1043 * hard flags at once, atomically)
1045 .macro paranoidexit trace=1
1046 /* ebx: no swapgs flag */
1047 paranoid_exit\trace:
1048 testl %ebx,%ebx /* swapgs needed? */
1049 jnz paranoid_restore\trace
1051 jnz paranoid_userspace\trace
1052 paranoid_swapgs\trace:
1057 paranoid_restore\trace:
1060 paranoid_userspace\trace:
1061 GET_THREAD_INFO(%rcx)
1062 movl TI_flags(%rcx),%ebx
1063 andl $_TIF_WORK_MASK,%ebx
1064 jz paranoid_swapgs\trace
1065 movq %rsp,%rdi /* &pt_regs */
1067 movq %rax,%rsp /* switch stack for scheduling */
1068 testl $_TIF_NEED_RESCHED,%ebx
1069 jnz paranoid_schedule\trace
1070 movl %ebx,%edx /* arg3: thread flags */
1074 ENABLE_INTERRUPTS(CLBR_NONE)
1075 xorl %esi,%esi /* arg2: oldset */
1076 movq %rsp,%rdi /* arg1: &pt_regs */
1077 call do_notify_resume
1078 DISABLE_INTERRUPTS(CLBR_NONE)
1082 jmp paranoid_userspace\trace
1083 paranoid_schedule\trace:
1087 ENABLE_INTERRUPTS(CLBR_ANY)
1089 DISABLE_INTERRUPTS(CLBR_ANY)
1093 jmp paranoid_userspace\trace
1098 * Exception entry point. This expects an error code/orig_rax on the stack.
1099 * returns in "no swapgs flag" in %ebx.
1101 KPROBE_ENTRY(error_entry)
1103 CFI_ADJUST_CFA_OFFSET 15*8
1104 /* oldrax contains error code */
1123 je error_kernelspace
1132 * There are two places in the kernel that can potentially fault with
1133 * usergs. Handle them here. The exception handlers after iret run with
1134 * kernel gs again, so don't set the user space flag. B stepping K8s
1135 * sometimes report an truncated RIP for IRET exceptions returning to
1136 * compat mode. Check for these here too.
1140 leaq irq_return(%rip),%rcx
1141 cmpq %rcx,RIP+8(%rsp)
1143 movl %ecx,%ecx /* zero extend */
1144 cmpq %rcx,RIP+8(%rsp)
1146 cmpq $gs_change,RIP+8(%rsp)
1149 KPROBE_END(error_entry)
1152 /* ebx: no swapgs flag (1: don't need swapgs, 0: need it) */
1153 KPROBE_ENTRY(error_exit)
1157 DISABLE_INTERRUPTS(CLBR_NONE)
1159 GET_THREAD_INFO(%rcx)
1162 LOCKDEP_SYS_EXIT_IRQ
1163 movl TI_flags(%rcx),%edx
1164 movl $_TIF_WORK_MASK,%edi
1169 KPROBE_END(error_exit)
1171 /* Reload gs selector with exception handling */
1172 /* edi: new selector */
1173 ENTRY(native_load_gs_index)
1176 CFI_ADJUST_CFA_OFFSET 8
1177 DISABLE_INTERRUPTS(CLBR_ANY | ~(CLBR_RDI))
1181 2: mfence /* workaround */
1184 CFI_ADJUST_CFA_OFFSET -8
1187 ENDPROC(native_load_gs_index)
1189 .section __ex_table,"a"
1191 .quad gs_change,bad_gs
1193 .section .fixup,"ax"
1194 /* running with kernelgs */
1196 SWAPGS /* switch back to user gs */
1203 * Create a kernel thread.
1205 * C extern interface:
1206 * extern long kernel_thread(int (*fn)(void *), void * arg, unsigned long flags)
1208 * asm input arguments:
1209 * rdi: fn, rsi: arg, rdx: flags
1211 ENTRY(kernel_thread)
1213 FAKE_STACK_FRAME $child_rip
1216 # rdi: flags, rsi: usp, rdx: will be &pt_regs
1218 orq kernel_thread_flags(%rip),%rdi
1231 * It isn't worth to check for reschedule here,
1232 * so internally to the x86_64 port you can rely on kernel_thread()
1233 * not to reschedule the child before returning, this avoids the need
1234 * of hacks for example to fork off the per-CPU idle tasks.
1235 * [Hopefully no generic code relies on the reschedule -AK]
1241 ENDPROC(kernel_thread)
1244 pushq $0 # fake return address
1247 * Here we are in the child and the registers are set as they were
1248 * at kernel_thread() invocation in the parent.
1260 * execve(). This function needs to use IRET, not SYSRET, to set up all state properly.
1262 * C extern interface:
1263 * extern long execve(char *name, char **argv, char **envp)
1265 * asm input arguments:
1266 * rdi: name, rsi: argv, rdx: envp
1268 * We want to fallback into:
1269 * extern long sys_execve(char *name, char **argv,char **envp, struct pt_regs *regs)
1271 * do_sys_execve asm fallback arguments:
1272 * rdi: name, rsi: argv, rdx: envp, rcx: fake frame on the stack
1274 ENTRY(kernel_execve)
1280 movq %rax, RAX(%rsp)
1283 je int_ret_from_sys_call
1288 ENDPROC(kernel_execve)
1290 KPROBE_ENTRY(page_fault)
1291 errorentry do_page_fault
1292 KPROBE_END(page_fault)
1294 ENTRY(coprocessor_error)
1295 zeroentry do_coprocessor_error
1296 END(coprocessor_error)
1298 ENTRY(simd_coprocessor_error)
1299 zeroentry do_simd_coprocessor_error
1300 END(simd_coprocessor_error)
1302 ENTRY(device_not_available)
1303 zeroentry do_device_not_available
1304 END(device_not_available)
1306 /* runs on exception stack */
1309 PARAVIRT_ADJUST_EXCEPTION_FRAME
1311 CFI_ADJUST_CFA_OFFSET 8
1312 paranoidentry do_debug, DEBUG_STACK
1316 /* runs on exception stack */
1319 PARAVIRT_ADJUST_EXCEPTION_FRAME
1321 CFI_ADJUST_CFA_OFFSET 8
1322 paranoidentry do_nmi, 0, 0
1323 #ifdef CONFIG_TRACE_IRQFLAGS
1333 PARAVIRT_ADJUST_EXCEPTION_FRAME
1335 CFI_ADJUST_CFA_OFFSET 8
1336 paranoidentry do_int3, DEBUG_STACK
1342 zeroentry do_overflow
1350 zeroentry do_invalid_op
1353 ENTRY(coprocessor_segment_overrun)
1354 zeroentry do_coprocessor_segment_overrun
1355 END(coprocessor_segment_overrun)
1357 /* runs on exception stack */
1360 PARAVIRT_ADJUST_EXCEPTION_FRAME
1361 paranoidentry do_double_fault
1367 errorentry do_invalid_TSS
1370 ENTRY(segment_not_present)
1371 errorentry do_segment_not_present
1372 END(segment_not_present)
1374 /* runs on exception stack */
1375 ENTRY(stack_segment)
1377 PARAVIRT_ADJUST_EXCEPTION_FRAME
1378 paranoidentry do_stack_segment
1383 KPROBE_ENTRY(general_protection)
1384 errorentry do_general_protection
1385 KPROBE_END(general_protection)
1387 ENTRY(alignment_check)
1388 errorentry do_alignment_check
1389 END(alignment_check)
1392 zeroentry do_divide_error
1395 ENTRY(spurious_interrupt_bug)
1396 zeroentry do_spurious_interrupt_bug
1397 END(spurious_interrupt_bug)
1399 #ifdef CONFIG_X86_MCE
1400 /* runs on exception stack */
1401 ENTRY(machine_check)
1403 PARAVIRT_ADJUST_EXCEPTION_FRAME
1405 CFI_ADJUST_CFA_OFFSET 8
1406 paranoidentry do_machine_check
1412 /* Call softirq on interrupt stack. Interrupts are off. */
1416 CFI_ADJUST_CFA_OFFSET 8
1417 CFI_REL_OFFSET rbp,0
1419 CFI_DEF_CFA_REGISTER rbp
1420 incl %gs:pda_irqcount
1421 cmove %gs:pda_irqstackptr,%rsp
1422 push %rbp # backlink for old unwinder
1425 CFI_DEF_CFA_REGISTER rsp
1426 CFI_ADJUST_CFA_OFFSET -8
1427 decl %gs:pda_irqcount
1430 ENDPROC(call_softirq)
1432 KPROBE_ENTRY(ignore_sysret)
1437 ENDPROC(ignore_sysret)
1440 ENTRY(xen_hypervisor_callback)
1441 zeroentry xen_do_hypervisor_callback
1442 END(xen_hypervisor_callback)
1445 # A note on the "critical region" in our callback handler.
1446 # We want to avoid stacking callback handlers due to events occurring
1447 # during handling of the last event. To do this, we keep events disabled
1448 # until we've done all processing. HOWEVER, we must enable events before
1449 # popping the stack frame (can't be done atomically) and so it would still
1450 # be possible to get enough handler activations to overflow the stack.
1451 # Although unlikely, bugs of that kind are hard to track down, so we'd
1452 # like to avoid the possibility.
1453 # So, on entry to the handler we detect whether we interrupted an
1454 # existing activation in its critical region -- if so, we pop the current
1455 # activation and restart the handler using the previous one.
1457 ENTRY(xen_do_hypervisor_callback) # do_hypervisor_callback(struct *pt_regs)
1459 /* Since we don't modify %rdi, evtchn_do_upall(struct *pt_regs) will
1460 see the correct pointer to the pt_regs */
1461 movq %rdi, %rsp # we don't return, adjust the stack frame
1464 11: incl %gs:pda_irqcount
1466 CFI_DEF_CFA_REGISTER rbp
1467 cmovzq %gs:pda_irqstackptr,%rsp
1468 pushq %rbp # backlink for old unwinder
1469 call xen_evtchn_do_upcall
1471 CFI_DEF_CFA_REGISTER rsp
1472 decl %gs:pda_irqcount
1475 END(do_hypervisor_callback)
1478 # Hypervisor uses this for application faults while it executes.
1479 # We get here for two reasons:
1480 # 1. Fault while reloading DS, ES, FS or GS
1481 # 2. Fault while executing IRET
1482 # Category 1 we do not need to fix up as Xen has already reloaded all segment
1483 # registers that could be reloaded and zeroed the others.
1484 # Category 2 we fix up by killing the current process. We cannot use the
1485 # normal Linux return path in this case because if we use the IRET hypercall
1486 # to pop the stack frame we end up in an infinite loop of failsafe callbacks.
1487 # We distinguish between categories by comparing each saved segment register
1488 # with its current contents: any discrepancy means we in category 1.
1490 ENTRY(xen_failsafe_callback)
1492 /*CFI_REL_OFFSET gs,GS*/
1493 /*CFI_REL_OFFSET fs,FS*/
1494 /*CFI_REL_OFFSET es,ES*/
1495 /*CFI_REL_OFFSET ds,DS*/
1496 CFI_REL_OFFSET r11,8
1497 CFI_REL_OFFSET rcx,0
1511 /* All segments match their saved values => Category 2 (Bad IRET). */
1517 CFI_ADJUST_CFA_OFFSET -0x30
1518 CFI_PUSHQ $0 /* RIP */
1521 jmp general_protection
1523 1: /* Segment mismatch => Category 1 (Bad segment). Retry the IRET. */
1529 CFI_ADJUST_CFA_OFFSET -0x30
1534 END(xen_failsafe_callback)
1536 #endif /* CONFIG_XEN */