| From 2641f08bb7fc63a636a2b18173221d7040a3512e Mon Sep 17 00:00:00 2001 |
| From: David Woodhouse <dwmw@amazon.co.uk> |
| Date: Thu, 11 Jan 2018 21:46:28 +0000 |
| Subject: x86/retpoline/entry: Convert entry assembler indirect jumps |
| |
| From: David Woodhouse <dwmw@amazon.co.uk> |
| |
| commit 2641f08bb7fc63a636a2b18173221d7040a3512e upstream. |
| |
| Convert indirect jumps in core 32/64bit entry assembler code to use |
| non-speculative sequences when CONFIG_RETPOLINE is enabled. |
| |
| Don't use CALL_NOSPEC in entry_SYSCALL_64_fastpath because the return |
| address after the 'call' instruction must be *precisely* at the |
| .Lentry_SYSCALL_64_after_fastpath label for stub_ptregs_64 to work, |
| and the use of alternatives will mess that up unless we play horrid |
| games to prepend with NOPs and make the variants the same length. It's |
| not worth it; in the case where we ALTERNATIVE out the retpoline, the |
| first instruction at __x86.indirect_thunk.rax is going to be a bare |
| jmp *%rax anyway. |
| |
| Signed-off-by: David Woodhouse <dwmw@amazon.co.uk> |
| Signed-off-by: Thomas Gleixner <tglx@linutronix.de> |
| Acked-by: Ingo Molnar <mingo@kernel.org> |
| Acked-by: Arjan van de Ven <arjan@linux.intel.com> |
| Cc: gnomes@lxorguk.ukuu.org.uk |
| Cc: Rik van Riel <riel@redhat.com> |
| Cc: Andi Kleen <ak@linux.intel.com> |
| Cc: Josh Poimboeuf <jpoimboe@redhat.com> |
| Cc: thomas.lendacky@amd.com |
| Cc: Peter Zijlstra <peterz@infradead.org> |
| Cc: Linus Torvalds <torvalds@linux-foundation.org> |
| Cc: Jiri Kosina <jikos@kernel.org> |
| Cc: Andy Lutomirski <luto@amacapital.net> |
| Cc: Dave Hansen <dave.hansen@intel.com> |
| Cc: Kees Cook <keescook@google.com> |
| Cc: Tim Chen <tim.c.chen@linux.intel.com> |
| Cc: Greg Kroah-Hartman <gregkh@linux-foundation.org> |
| Cc: Paul Turner <pjt@google.com> |
| Link: https://lkml.kernel.org/r/1515707194-20531-7-git-send-email-dwmw@amazon.co.uk |
| Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org> |
| |
| --- |
| arch/x86/entry/entry_32.S | 5 +++-- |
| arch/x86/entry/entry_64.S | 12 +++++++++--- |
| 2 files changed, 12 insertions(+), 5 deletions(-) |
| |
| --- a/arch/x86/entry/entry_32.S |
| +++ b/arch/x86/entry/entry_32.S |
| @@ -44,6 +44,7 @@ |
| #include <asm/asm.h> |
| #include <asm/smap.h> |
| #include <asm/frame.h> |
| +#include <asm/nospec-branch.h> |
| |
| .section .entry.text, "ax" |
| |
| @@ -290,7 +291,7 @@ ENTRY(ret_from_fork) |
| |
| /* kernel thread */ |
| 1: movl %edi, %eax |
| - call *%ebx |
| + CALL_NOSPEC %ebx |
| /* |
| * A kernel thread is allowed to return here after successfully |
| * calling do_execve(). Exit to userspace to complete the execve() |
| @@ -919,7 +920,7 @@ common_exception: |
| movl %ecx, %es |
| TRACE_IRQS_OFF |
| movl %esp, %eax # pt_regs pointer |
| - call *%edi |
| + CALL_NOSPEC %edi |
| jmp ret_from_exception |
| END(common_exception) |
| |
| --- a/arch/x86/entry/entry_64.S |
| +++ b/arch/x86/entry/entry_64.S |
| @@ -37,6 +37,7 @@ |
| #include <asm/pgtable_types.h> |
| #include <asm/export.h> |
| #include <asm/frame.h> |
| +#include <asm/nospec-branch.h> |
| #include <linux/err.h> |
| |
| #include "calling.h" |
| @@ -187,7 +188,7 @@ ENTRY(entry_SYSCALL_64_trampoline) |
| */ |
| pushq %rdi |
| movq $entry_SYSCALL_64_stage2, %rdi |
| - jmp *%rdi |
| + JMP_NOSPEC %rdi |
| END(entry_SYSCALL_64_trampoline) |
| |
| .popsection |
| @@ -266,7 +267,12 @@ entry_SYSCALL_64_fastpath: |
| * It might end up jumping to the slow path. If it jumps, RAX |
| * and all argument registers are clobbered. |
| */ |
| +#ifdef CONFIG_RETPOLINE |
| + movq sys_call_table(, %rax, 8), %rax |
| + call __x86_indirect_thunk_rax |
| +#else |
| call *sys_call_table(, %rax, 8) |
| +#endif |
| .Lentry_SYSCALL_64_after_fastpath_call: |
| |
| movq %rax, RAX(%rsp) |
| @@ -438,7 +444,7 @@ ENTRY(stub_ptregs_64) |
| jmp entry_SYSCALL64_slow_path |
| |
| 1: |
| - jmp *%rax /* Called from C */ |
| + JMP_NOSPEC %rax /* Called from C */ |
| END(stub_ptregs_64) |
| |
| .macro ptregs_stub func |
| @@ -517,7 +523,7 @@ ENTRY(ret_from_fork) |
| 1: |
| /* kernel thread */ |
| movq %r12, %rdi |
| - call *%rbx |
| + CALL_NOSPEC %rbx |
| /* |
| * A kernel thread is allowed to return here after successfully |
| * calling do_execve(). Exit to userspace to complete the execve() |