From: Andy Lutomirski Date: Fri, 3 Jul 2015 19:44:29 +0000 (-0700) Subject: x86/asm/entry/64: Save all regs on interrupt entry X-Git-Url: https://git.karo-electronics.de/?a=commitdiff_plain;h=ff467594f2a4be01a0fa5e9ffc223fa930d232dd;p=linux-beck.git x86/asm/entry/64: Save all regs on interrupt entry To prepare for the big rewrite of the error and interrupt exit paths, we will need pt_regs completely filled in. It's already completely filled in when error_exit runs, so rearrange interrupt handling to match it. This will slow down interrupt handling very slightly (eight instructions), but the simplification it enables will be more than worth it. Signed-off-by: Andy Lutomirski Cc: Andy Lutomirski Cc: Borislav Petkov Cc: Brian Gerst Cc: Denys Vlasenko Cc: Denys Vlasenko Cc: Frederic Weisbecker Cc: H. Peter Anvin Cc: Kees Cook Cc: Linus Torvalds Cc: Oleg Nesterov Cc: Peter Zijlstra Cc: Rik van Riel Cc: Thomas Gleixner Cc: paulmck@linux.vnet.ibm.com Link: http://lkml.kernel.org/r/d8a766a7f558b30e6e01352854628a2d9943460c.1435952415.git.luto@kernel.org Signed-off-by: Ingo Molnar --- diff --git a/arch/x86/entry/calling.h b/arch/x86/entry/calling.h index 519207f2ee76..3c71dd947c7b 100644 --- a/arch/x86/entry/calling.h +++ b/arch/x86/entry/calling.h @@ -135,9 +135,6 @@ For 32-bit we have the following conventions - kernel is built with movq %rbp, 4*8+\offset(%rsp) movq %rbx, 5*8+\offset(%rsp) .endm - .macro SAVE_EXTRA_REGS_RBP offset=0 - movq %rbp, 4*8+\offset(%rsp) - .endm .macro RESTORE_EXTRA_REGS offset=0 movq 0*8+\offset(%rsp), %r15 diff --git a/arch/x86/entry/entry_64.S b/arch/x86/entry/entry_64.S index 4ca5b782ed70..65029f48bcc4 100644 --- a/arch/x86/entry/entry_64.S +++ b/arch/x86/entry/entry_64.S @@ -502,21 +502,13 @@ END(irq_entries_start) /* 0(%rsp): ~(interrupt number) */ .macro interrupt func cld - /* - * Since nothing in interrupt handling code touches r12...r15 members - * of "struct pt_regs", and since interrupts can nest, we can save - * four stack slots and simultaneously provide - * an unwind-friendly stack layout by saving "truncated" pt_regs - * exactly up to rbp slot, without these members. - */ - ALLOC_PT_GPREGS_ON_STACK -RBP - SAVE_C_REGS -RBP - /* this goes to 0(%rsp) for unwinder, not for saving the value: */ - SAVE_EXTRA_REGS_RBP -RBP + ALLOC_PT_GPREGS_ON_STACK + SAVE_C_REGS + SAVE_EXTRA_REGS - leaq -RBP(%rsp), %rdi /* arg1 for \func (pointer to pt_regs) */ + movq %rsp,%rdi /* arg1 for \func (pointer to pt_regs) */ - testb $3, CS-RBP(%rsp) + testb $3, CS(%rsp) jz 1f SWAPGS 1: @@ -553,9 +545,7 @@ ret_from_intr: decl PER_CPU_VAR(irq_count) /* Restore saved previous stack */ - popq %rsi - /* return code expects complete pt_regs - adjust rsp accordingly: */ - leaq -RBP(%rsi), %rsp + popq %rsp testb $3, CS(%rsp) jz retint_kernel @@ -580,7 +570,7 @@ retint_swapgs: /* return to user-space */ TRACE_IRQS_IRETQ SWAPGS - jmp restore_c_regs_and_iret + jmp restore_regs_and_iret /* Returning to kernel space */ retint_kernel: @@ -604,6 +594,8 @@ retint_kernel: * At this label, code paths which return to kernel and to user, * which come from interrupts/exception and from syscalls, merge. */ +restore_regs_and_iret: + RESTORE_EXTRA_REGS restore_c_regs_and_iret: RESTORE_C_REGS REMOVE_PT_GPREGS_FROM_STACK 8 @@ -674,12 +666,10 @@ retint_signal: jz retint_swapgs TRACE_IRQS_ON ENABLE_INTERRUPTS(CLBR_NONE) - SAVE_EXTRA_REGS movq $-1, ORIG_RAX(%rsp) xorl %esi, %esi /* oldset */ movq %rsp, %rdi /* &pt_regs */ call do_notify_resume - RESTORE_EXTRA_REGS DISABLE_INTERRUPTS(CLBR_NONE) TRACE_IRQS_OFF GET_THREAD_INFO(%rcx) @@ -1160,7 +1150,6 @@ END(error_entry) */ ENTRY(error_exit) movl %ebx, %eax - RESTORE_EXTRA_REGS DISABLE_INTERRUPTS(CLBR_NONE) TRACE_IRQS_OFF testl %eax, %eax