2 * Copyright (C) 1991,1992 Linus Torvalds
4 * entry_32.S contains the system-call and low-level fault and trap handling routines.
6 * Stack layout while running C code:
7 * ptrace needs to have all registers on the stack.
8 * If the order here is changed, it needs to be
9 * updated in fork.c:copy_process(), signal.c:do_signal(),
10 * ptrace.c and ptrace.h
22 * 28(%esp) - %gs saved iff !CONFIG_X86_32_LAZY_GS
31 #include <linux/linkage.h>
32 #include <linux/err.h>
33 #include <asm/thread_info.h>
34 #include <asm/irqflags.h>
35 #include <asm/errno.h>
36 #include <asm/segment.h>
38 #include <asm/percpu.h>
39 #include <asm/processor-flags.h>
40 #include <asm/irq_vectors.h>
41 #include <asm/cpufeatures.h>
42 #include <asm/alternative-asm.h>
45 #include <asm/frame.h>
47 .section .entry.text, "ax"
50 * We use macros for low-level operations which need to be overridden
51 * for paravirtualization. The following will never clobber any registers:
52 * INTERRUPT_RETURN (aka. "iret")
53 * GET_CR0_INTO_EAX (aka. "movl %cr0, %eax")
54 * ENABLE_INTERRUPTS_SYSEXIT (aka "sti; sysexit").
56 * For DISABLE_INTERRUPTS/ENABLE_INTERRUPTS (aka "cli"/"sti"), you must
57 * specify what registers can be overwritten (CLBR_NONE, CLBR_EAX/EDX/ECX/ANY).
58 * Allowing a register to be clobbered can shrink the paravirt replacement
59 * enough to patch inline, increasing performance.
63 # define preempt_stop(clobbers) DISABLE_INTERRUPTS(clobbers); TRACE_IRQS_OFF
65 # define preempt_stop(clobbers)
66 # define resume_kernel restore_all
69 .macro TRACE_IRQS_IRET
70 #ifdef CONFIG_TRACE_IRQFLAGS
71 testl $X86_EFLAGS_IF, PT_EFLAGS(%esp) # interrupts off?
79 * User gs save/restore
81 * %gs is used for userland TLS and kernel only uses it for stack
82 * canary which is required to be at %gs:20 by gcc. Read the comment
83 * at the top of stackprotector.h for more info.
85 * Local labels 98 and 99 are used.
87 #ifdef CONFIG_X86_32_LAZY_GS
89 /* unfortunately push/pop can't be no-op */
94 addl $(4 + \pop), %esp
99 /* all the rest are no-op */
106 .macro REG_TO_PTGS reg
108 .macro SET_KERNEL_GS reg
111 #else /* CONFIG_X86_32_LAZY_GS */
124 .pushsection .fixup, "ax"
128 _ASM_EXTABLE(98b, 99b)
132 98: mov PT_GS(%esp), %gs
135 .pushsection .fixup, "ax"
136 99: movl $0, PT_GS(%esp)
139 _ASM_EXTABLE(98b, 99b)
145 .macro REG_TO_PTGS reg
146 movl \reg, PT_GS(%esp)
148 .macro SET_KERNEL_GS reg
149 movl $(__KERNEL_STACK_CANARY), \reg
153 #endif /* CONFIG_X86_32_LAZY_GS */
155 .macro SAVE_ALL pt_regs_ax=%eax
168 movl $(__USER_DS), %edx
171 movl $(__KERNEL_PERCPU), %edx
177 * This is a sneaky trick to help the unwinder find pt_regs on the stack. The
178 * frame pointer is replaced with an encoded pointer to pt_regs. The encoding
179 * is just setting the LSB, which makes it an invalid stack address and is also
180 * a signal to the unwinder that it's a pt_regs pointer in disguise.
182 * NOTE: This macro must be used *after* SAVE_ALL because it corrupts the
185 .macro ENCODE_FRAME_POINTER
186 #ifdef CONFIG_FRAME_POINTER
192 .macro RESTORE_INT_REGS
202 .macro RESTORE_REGS pop=0
208 .pushsection .fixup, "ax"
226 ENTRY(__switch_to_asm)
228 * Save callee-saved registers
229 * This must match the order in struct inactive_task_frame
237 movl %esp, TASK_threadsp(%eax)
238 movl TASK_threadsp(%edx), %esp
240 #ifdef CONFIG_CC_STACKPROTECTOR
241 movl TASK_stack_canary(%edx), %ebx
242 movl %ebx, PER_CPU_VAR(stack_canary)+stack_canary_offset
245 /* restore callee-saved registers */
255 * A newly forked process directly context switches into this address.
257 * eax: prev task we switched from
258 * ebx: kernel thread func (NULL for user thread)
259 * edi: kernel thread arg
262 FRAME_BEGIN /* help unwinder find end of stack */
265 * schedule_tail() is asmlinkage so we have to put its 'prev' argument
273 jnz 1f /* kernel threads are uncommon */
276 /* When we fork, we trace the syscall return in the child, too. */
277 leal FRAME_OFFSET(%esp), %eax
278 call syscall_return_slowpath
286 * A kernel thread is allowed to return here after successfully
287 * calling do_execve(). Exit to userspace to complete the execve()
290 movl $0, PT_EAX(%esp)
295 * Return to user mode is not as complex as all this looks,
296 * but we want the default path for a system call return to
297 * go as quickly as possible which is why some of this is
298 * less clear than it otherwise should be.
301 # userspace resumption stub bypassing syscall exit tracing
304 preempt_stop(CLBR_ANY)
307 movl PT_EFLAGS(%esp), %eax # mix EFLAGS and CS
308 movb PT_CS(%esp), %al
309 andl $(X86_EFLAGS_VM | SEGMENT_RPL_MASK), %eax
312 * We can be coming here from child spawned by kernel_thread().
314 movl PT_CS(%esp), %eax
315 andl $SEGMENT_RPL_MASK, %eax
318 jb resume_kernel # not returning to v8086 or userspace
320 ENTRY(resume_userspace)
321 DISABLE_INTERRUPTS(CLBR_ANY)
324 call prepare_exit_to_usermode
326 END(ret_from_exception)
328 #ifdef CONFIG_PREEMPT
330 DISABLE_INTERRUPTS(CLBR_ANY)
332 cmpl $0, PER_CPU_VAR(__preempt_count)
334 testl $X86_EFLAGS_IF, PT_EFLAGS(%esp) # interrupts off (exception path) ?
336 call preempt_schedule_irq
341 GLOBAL(__begin_SYSENTER_singlestep_region)
343 * All code from here through __end_SYSENTER_singlestep_region is subject
344 * to being single-stepped if a user program sets TF and executes SYSENTER.
345 * There is absolutely nothing that we can do to prevent this from happening
346 * (thanks Intel!). To keep our handling of this situation as simple as
347 * possible, we handle TF just like AC and NT, except that our #DB handler
348 * will ignore all of the single-step traps generated in this range.
353 * Xen doesn't set %esp to be precisely what the normal SYSENTER
354 * entry point expects, so fix it up before using the normal path.
356 ENTRY(xen_sysenter_target)
357 addl $5*4, %esp /* remove xen-provided frame */
358 jmp .Lsysenter_past_esp
362 * 32-bit SYSENTER entry.
364 * 32-bit system calls through the vDSO's __kernel_vsyscall enter here
365 * if X86_FEATURE_SEP is available. This is the preferred system call
366 * entry on 32-bit systems.
368 * The SYSENTER instruction, in principle, should *only* occur in the
369 * vDSO. In practice, a small number of Android devices were shipped
370 * with a copy of Bionic that inlined a SYSENTER instruction. This
371 * never happened in any of Google's Bionic versions -- it only happened
372 * in a narrow range of Intel-provided versions.
374 * SYSENTER loads SS, ESP, CS, and EIP from previously programmed MSRs.
375 * IF and VM in RFLAGS are cleared (IOW: interrupts are off).
376 * SYSENTER does not save anything on the stack,
377 * and does not save old EIP (!!!), ESP, or EFLAGS.
379 * To avoid losing track of EFLAGS.VM (and thus potentially corrupting
380 * user and/or vm86 state), we explicitly disable the SYSENTER
381 * instruction in vm86 mode by reprogramming the MSRs.
384 * eax system call number
393 ENTRY(entry_SYSENTER_32)
394 movl TSS_sysenter_sp0(%esp), %esp
396 pushl $__USER_DS /* pt_regs->ss */
397 pushl %ebp /* pt_regs->sp (stashed in bp) */
398 pushfl /* pt_regs->flags (except IF = 0) */
399 orl $X86_EFLAGS_IF, (%esp) /* Fix IF */
400 pushl $__USER_CS /* pt_regs->cs */
401 pushl $0 /* pt_regs->ip = 0 (placeholder) */
402 pushl %eax /* pt_regs->orig_ax */
403 SAVE_ALL pt_regs_ax=$-ENOSYS /* save rest */
406 * SYSENTER doesn't filter flags, so we need to clear NT, AC
407 * and TF ourselves. To save a few cycles, we can check whether
408 * either was set instead of doing an unconditional popfq.
409 * This needs to happen before enabling interrupts so that
410 * we don't get preempted with NT set.
412 * If TF is set, we will single-step all the way to here -- do_debug
413 * will ignore all the traps. (Yes, this is slow, but so is
414 * single-stepping in general. This allows us to avoid having
415 * a more complicated code to handle the case where a user program
416 * forces us to single-step through the SYSENTER entry code.)
418 * NB.: .Lsysenter_fix_flags is a label with the code under it moved
419 * out-of-line as an optimization: NT is unlikely to be set in the
420 * majority of the cases and instead of polluting the I$ unnecessarily,
421 * we're keeping that code behind a branch which will predict as
422 * not-taken and therefore its instructions won't be fetched.
424 testl $X86_EFLAGS_NT|X86_EFLAGS_AC|X86_EFLAGS_TF, PT_EFLAGS(%esp)
425 jnz .Lsysenter_fix_flags
426 .Lsysenter_flags_fixed:
429 * User mode is traced as though IRQs are on, and SYSENTER
435 call do_fast_syscall_32
436 /* XEN PV guests always use IRET path */
437 ALTERNATIVE "testl %eax, %eax; jz .Lsyscall_32_done", \
438 "jmp .Lsyscall_32_done", X86_FEATURE_XENPV
440 /* Opportunistic SYSEXIT */
441 TRACE_IRQS_ON /* User mode traces as IRQs on. */
442 movl PT_EIP(%esp), %edx /* pt_regs->ip */
443 movl PT_OLDESP(%esp), %ecx /* pt_regs->sp */
444 1: mov PT_FS(%esp), %fs
446 popl %ebx /* pt_regs->bx */
447 addl $2*4, %esp /* skip pt_regs->cx and pt_regs->dx */
448 popl %esi /* pt_regs->si */
449 popl %edi /* pt_regs->di */
450 popl %ebp /* pt_regs->bp */
451 popl %eax /* pt_regs->ax */
454 * Restore all flags except IF. (We restore IF separately because
455 * STI gives a one-instruction window in which we won't be interrupted,
456 * whereas POPF does not.)
458 addl $PT_EFLAGS-PT_DS, %esp /* point esp at pt_regs->flags */
459 btr $X86_EFLAGS_IF_BIT, (%esp)
463 * Return back to the vDSO, which will pop ecx and edx.
464 * Don't bother with DS and ES (they already contain __USER_DS).
469 .pushsection .fixup, "ax"
470 2: movl $0, PT_FS(%esp)
476 .Lsysenter_fix_flags:
477 pushl $X86_EFLAGS_FIXED
479 jmp .Lsysenter_flags_fixed
480 GLOBAL(__end_SYSENTER_singlestep_region)
481 ENDPROC(entry_SYSENTER_32)
484 * 32-bit legacy system call entry.
486 * 32-bit x86 Linux system calls traditionally used the INT $0x80
487 * instruction. INT $0x80 lands here.
489 * This entry point can be used by any 32-bit perform system calls.
490 * Instances of INT $0x80 can be found inline in various programs and
491 * libraries. It is also used by the vDSO's __kernel_vsyscall
492 * fallback for hardware that doesn't support a faster entry method.
493 * Restarted 32-bit system calls also fall back to INT $0x80
494 * regardless of what instruction was originally used to do the system
495 * call. (64-bit programs can use INT $0x80 as well, but they can
496 * only run on 64-bit kernels and therefore land in
497 * entry_INT80_compat.)
499 * This is considered a slow path. It is not used by most libc
500 * implementations on modern hardware except during process startup.
503 * eax system call number
511 ENTRY(entry_INT80_32)
513 pushl %eax /* pt_regs->orig_ax */
514 SAVE_ALL pt_regs_ax=$-ENOSYS /* save rest */
517 * User mode is traced as though IRQs are on, and the interrupt gate
523 call do_int80_syscall_32
528 .Lrestore_all_notrace:
529 #ifdef CONFIG_X86_ESPFIX32
530 ALTERNATIVE "jmp .Lrestore_nocheck", "", X86_BUG_ESPFIX
532 movl PT_EFLAGS(%esp), %eax # mix EFLAGS, SS and CS
534 * Warning: PT_OLDSS(%esp) contains the wrong/random values if we
535 * are returning to the kernel.
536 * See comments in process.c:copy_thread() for details.
538 movb PT_OLDSS(%esp), %ah
539 movb PT_CS(%esp), %al
540 andl $(X86_EFLAGS_VM | (SEGMENT_TI_MASK << 8) | SEGMENT_RPL_MASK), %eax
541 cmpl $((SEGMENT_LDT << 8) | USER_RPL), %eax
542 je .Lldt_ss # returning to user-space with LDT SS
545 RESTORE_REGS 4 # skip orig_eax/error_code
549 .section .fixup, "ax"
551 pushl $0 # no error code
555 _ASM_EXTABLE(.Lirq_return, iret_exc)
557 #ifdef CONFIG_X86_ESPFIX32
560 * Setup and switch to ESPFIX stack
562 * We're returning to userspace with a 16 bit stack. The CPU will not
563 * restore the high word of ESP for us on executing iret... This is an
564 * "official" bug of all the x86-compatible CPUs, which we can work
565 * around to make dosemu and wine happy. We do this by preloading the
566 * high word of ESP with the high word of the userspace ESP while
567 * compensating for the offset by changing to the ESPFIX segment with
568 * a base address that matches for the difference.
570 #define GDT_ESPFIX_SS PER_CPU_VAR(gdt_page) + (GDT_ENTRY_ESPFIX_SS * 8)
571 mov %esp, %edx /* load kernel esp */
572 mov PT_OLDESP(%esp), %eax /* load userspace esp */
573 mov %dx, %ax /* eax: new kernel esp */
574 sub %eax, %edx /* offset (low word is 0) */
576 mov %dl, GDT_ESPFIX_SS + 4 /* bits 16..23 */
577 mov %dh, GDT_ESPFIX_SS + 7 /* bits 24..31 */
579 pushl %eax /* new kernel esp */
581 * Disable interrupts, but do not irqtrace this section: we
582 * will soon execute iret and the tracer was already set to
583 * the irqstate after the IRET:
585 DISABLE_INTERRUPTS(CLBR_ANY)
586 lss (%esp), %esp /* switch to espfix segment */
587 jmp .Lrestore_nocheck
589 ENDPROC(entry_INT80_32)
591 .macro FIXUP_ESPFIX_STACK
593 * Switch back for ESPFIX stack to the normal zerobased stack
595 * We can't call C functions using the ESPFIX stack. This code reads
596 * the high word of the segment base from the GDT and swiches to the
597 * normal stack and adjusts ESP with the matching offset.
599 #ifdef CONFIG_X86_ESPFIX32
600 /* fixup the stack */
601 mov GDT_ESPFIX_SS + 4, %al /* bits 16..23 */
602 mov GDT_ESPFIX_SS + 7, %ah /* bits 24..31 */
604 addl %esp, %eax /* the adjusted stack pointer */
607 lss (%esp), %esp /* switch to the normal stack segment */
610 .macro UNWIND_ESPFIX_STACK
611 #ifdef CONFIG_X86_ESPFIX32
613 /* see if on espfix stack */
614 cmpw $__ESPFIX_SS, %ax
616 movl $__KERNEL_DS, %eax
619 /* switch to normal stack */
626 * Build the entry stubs with some assembler magic.
627 * We pack 1 stub into every 8-byte block.
630 ENTRY(irq_entries_start)
631 vector=FIRST_EXTERNAL_VECTOR
632 .rept (FIRST_SYSTEM_VECTOR - FIRST_EXTERNAL_VECTOR)
633 pushl $(~vector+0x80) /* Note: always in signed byte range */
638 END(irq_entries_start)
641 * the CPU automatically disables interrupts when executing an IRQ vector,
642 * so IRQ-flags tracing has to follow that:
644 .p2align CONFIG_X86_L1_CACHE_SHIFT
647 addl $-0x80, (%esp) /* Adjust vector into the [-256, -1] range */
654 ENDPROC(common_interrupt)
656 #define BUILD_INTERRUPT3(name, nr, fn) \
661 ENCODE_FRAME_POINTER; \
669 #ifdef CONFIG_TRACING
670 # define TRACE_BUILD_INTERRUPT(name, nr) BUILD_INTERRUPT3(trace_##name, nr, smp_trace_##name)
672 # define TRACE_BUILD_INTERRUPT(name, nr)
675 #define BUILD_INTERRUPT(name, nr) \
676 BUILD_INTERRUPT3(name, nr, smp_##name); \
677 TRACE_BUILD_INTERRUPT(name, nr)
679 /* The include is where all of the SMP etc. interrupts come from */
680 #include <asm/entry_arch.h>
682 ENTRY(coprocessor_error)
685 pushl $do_coprocessor_error
687 END(coprocessor_error)
689 ENTRY(simd_coprocessor_error)
692 #ifdef CONFIG_X86_INVD_BUG
693 /* AMD 486 bug: invd from userspace calls exception 19 instead of #GP */
694 ALTERNATIVE "pushl $do_general_protection", \
695 "pushl $do_simd_coprocessor_error", \
698 pushl $do_simd_coprocessor_error
701 END(simd_coprocessor_error)
703 ENTRY(device_not_available)
705 pushl $-1 # mark this as an int
706 pushl $do_device_not_available
708 END(device_not_available)
710 #ifdef CONFIG_PARAVIRT
713 _ASM_EXTABLE(native_iret, iret_exc)
738 ENTRY(coprocessor_segment_overrun)
741 pushl $do_coprocessor_segment_overrun
743 END(coprocessor_segment_overrun)
747 pushl $do_invalid_TSS
751 ENTRY(segment_not_present)
753 pushl $do_segment_not_present
755 END(segment_not_present)
759 pushl $do_stack_segment
763 ENTRY(alignment_check)
765 pushl $do_alignment_check
771 pushl $0 # no error code
772 pushl $do_divide_error
776 #ifdef CONFIG_X86_MCE
780 pushl machine_check_vector
785 ENTRY(spurious_interrupt_bug)
788 pushl $do_spurious_interrupt_bug
790 END(spurious_interrupt_bug)
793 ENTRY(xen_hypervisor_callback)
794 pushl $-1 /* orig_ax = -1 => not a system call */
800 * Check to see if we got the event in the critical
801 * region in xen_iret_direct, after we've reenabled
802 * events and checked for pending events. This simulates
803 * iret instruction's behaviour where it delivers a
804 * pending interrupt when enabling interrupts:
806 movl PT_EIP(%esp), %eax
807 cmpl $xen_iret_start_crit, %eax
809 cmpl $xen_iret_end_crit, %eax
812 jmp xen_iret_crit_fixup
816 call xen_evtchn_do_upcall
817 #ifndef CONFIG_PREEMPT
818 call xen_maybe_preempt_hcall
821 ENDPROC(xen_hypervisor_callback)
824 * Hypervisor uses this for application faults while it executes.
825 * We get here for two reasons:
826 * 1. Fault while reloading DS, ES, FS or GS
827 * 2. Fault while executing IRET
828 * Category 1 we fix up by reattempting the load, and zeroing the segment
829 * register if the load fails.
830 * Category 2 we fix up by jumping to do_iret_error. We cannot use the
831 * normal Linux return path in this case because if we use the IRET hypercall
832 * to pop the stack frame we end up in an infinite loop of failsafe callbacks.
833 * We distinguish between categories by maintaining a status value in EAX.
835 ENTRY(xen_failsafe_callback)
842 /* EAX == 0 => Category 1 (Bad segment)
843 EAX != 0 => Category 2 (Bad IRET) */
849 5: pushl $-1 /* orig_ax = -1 => not a system call */
852 jmp ret_from_exception
854 .section .fixup, "ax"
872 ENDPROC(xen_failsafe_callback)
874 BUILD_INTERRUPT3(xen_hvm_callback_vector, HYPERVISOR_CALLBACK_VECTOR,
875 xen_evtchn_do_upcall)
877 #endif /* CONFIG_XEN */
879 #if IS_ENABLED(CONFIG_HYPERV)
881 BUILD_INTERRUPT3(hyperv_callback_vector, HYPERVISOR_CALLBACK_VECTOR,
882 hyperv_vector_handler)
884 #endif /* CONFIG_HYPERV */
886 #ifdef CONFIG_TRACING
887 ENTRY(trace_page_fault)
889 pushl $trace_do_page_fault
891 END(trace_page_fault)
902 /* the function address is in %gs's slot on the stack */
915 movl $(__KERNEL_PERCPU), %ecx
919 movl PT_GS(%esp), %edi # get the function address
920 movl PT_ORIG_EAX(%esp), %edx # get the error code
921 movl $-1, PT_ORIG_EAX(%esp) # no syscall to restart
924 movl $(__USER_DS), %ecx
928 movl %esp, %eax # pt_regs pointer
930 jmp ret_from_exception
931 END(common_exception)
935 * #DB can happen at the first instruction of
936 * entry_SYSENTER_32 or in Xen's SYSENTER prologue. If this
937 * happens, then we will be running on a very small stack. We
938 * need to detect this condition and switch to the thread
939 * stack before calling any C code at all.
941 * If you edit this code, keep in mind that NMIs can happen in here.
944 pushl $-1 # mark this as an int
947 xorl %edx, %edx # error code 0
948 movl %esp, %eax # pt_regs pointer
950 /* Are we currently on the SYSENTER stack? */
951 PER_CPU(cpu_tss + CPU_TSS_SYSENTER_stack + SIZEOF_SYSENTER_stack, %ecx)
952 subl %eax, %ecx /* ecx = (end of SYSENTER_stack) - esp */
953 cmpl $SIZEOF_SYSENTER_stack, %ecx
954 jb .Ldebug_from_sysenter_stack
958 jmp ret_from_exception
960 .Ldebug_from_sysenter_stack:
961 /* We're on the SYSENTER stack. Switch off. */
963 movl PER_CPU_VAR(cpu_current_top_of_stack), %esp
967 jmp ret_from_exception
971 * NMI is doubly nasty. It can happen on the first instruction of
972 * entry_SYSENTER_32 (just like #DB), but it can also interrupt the beginning
973 * of the #DB handler even if that #DB in turn hit before entry_SYSENTER_32
974 * switched stacks. We handle both conditions by simply checking whether we
975 * interrupted kernel code running on the SYSENTER stack.
979 #ifdef CONFIG_X86_ESPFIX32
982 cmpw $__ESPFIX_SS, %ax
984 je .Lnmi_espfix_stack
987 pushl %eax # pt_regs->orig_ax
990 xorl %edx, %edx # zero error code
991 movl %esp, %eax # pt_regs pointer
993 /* Are we currently on the SYSENTER stack? */
994 PER_CPU(cpu_tss + CPU_TSS_SYSENTER_stack + SIZEOF_SYSENTER_stack, %ecx)
995 subl %eax, %ecx /* ecx = (end of SYSENTER_stack) - esp */
996 cmpl $SIZEOF_SYSENTER_stack, %ecx
997 jb .Lnmi_from_sysenter_stack
999 /* Not on SYSENTER stack. */
1001 jmp .Lrestore_all_notrace
1003 .Lnmi_from_sysenter_stack:
1005 * We're on the SYSENTER stack. Switch off. No one (not even debug)
1006 * is using the thread stack right now, so it's safe for us to use it.
1009 movl PER_CPU_VAR(cpu_current_top_of_stack), %esp
1012 jmp .Lrestore_all_notrace
1014 #ifdef CONFIG_X86_ESPFIX32
1017 * create the pointer to lss back
1022 /* copy the iret frame of 12 bytes */
1028 ENCODE_FRAME_POINTER
1029 FIXUP_ESPFIX_STACK # %eax == %esp
1030 xorl %edx, %edx # zero error code
1033 lss 12+4(%esp), %esp # back to espfix stack
1040 pushl $-1 # mark this as an int
1042 ENCODE_FRAME_POINTER
1044 xorl %edx, %edx # zero error code
1045 movl %esp, %eax # pt_regs pointer
1047 jmp ret_from_exception
1050 ENTRY(general_protection)
1051 pushl $do_general_protection
1052 jmp common_exception
1053 END(general_protection)
1055 #ifdef CONFIG_KVM_GUEST
1056 ENTRY(async_page_fault)
1058 pushl $do_async_page_fault
1059 jmp common_exception
1060 END(async_page_fault)
1063 ENTRY(rewind_stack_do_exit)
1064 /* Prevent any naive code from trying to unwind to our caller. */
1067 movl PER_CPU_VAR(cpu_current_top_of_stack), %esi
1068 leal -TOP_OF_KERNEL_STACK_PADDING-PTREGS_SIZE(%esi), %esp
1072 END(rewind_stack_do_exit)