| From 9697fa39efd3fc3692f2949d4045f393ec58450b Mon Sep 17 00:00:00 2001 |
| From: David Woodhouse <dwmw@amazon.co.uk> |
| Date: Thu, 11 Jan 2018 21:46:27 +0000 |
| Subject: x86/retpoline/crypto: Convert crypto assembler indirect jumps |
| |
| From: David Woodhouse <dwmw@amazon.co.uk> |
| |
| commit 9697fa39efd3fc3692f2949d4045f393ec58450b upstream. |
| |
| Convert all indirect jumps in crypto assembler code to use non-speculative |
| sequences when CONFIG_RETPOLINE is enabled. |
| |
| Signed-off-by: David Woodhouse <dwmw@amazon.co.uk> |
| Signed-off-by: Thomas Gleixner <tglx@linutronix.de> |
| Acked-by: Arjan van de Ven <arjan@linux.intel.com> |
| Acked-by: Ingo Molnar <mingo@kernel.org> |
| Cc: gnomes@lxorguk.ukuu.org.uk |
| Cc: Rik van Riel <riel@redhat.com> |
| Cc: Andi Kleen <ak@linux.intel.com> |
| Cc: Josh Poimboeuf <jpoimboe@redhat.com> |
| Cc: thomas.lendacky@amd.com |
| Cc: Peter Zijlstra <peterz@infradead.org> |
| Cc: Linus Torvalds <torvalds@linux-foundation.org> |
| Cc: Jiri Kosina <jikos@kernel.org> |
| Cc: Andy Lutomirski <luto@amacapital.net> |
| Cc: Dave Hansen <dave.hansen@intel.com> |
| Cc: Kees Cook <keescook@google.com> |
| Cc: Tim Chen <tim.c.chen@linux.intel.com> |
| Cc: Greg Kroah-Hartman <gregkh@linux-foundation.org> |
| Cc: Paul Turner <pjt@google.com> |
| Link: https://lkml.kernel.org/r/1515707194-20531-6-git-send-email-dwmw@amazon.co.uk |
| Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org> |
| |
| --- |
| arch/x86/crypto/aesni-intel_asm.S | 5 +++-- |
| arch/x86/crypto/camellia-aesni-avx-asm_64.S | 3 ++- |
| arch/x86/crypto/camellia-aesni-avx2-asm_64.S | 3 ++- |
| arch/x86/crypto/crc32c-pcl-intel-asm_64.S | 3 ++- |
| 4 files changed, 9 insertions(+), 5 deletions(-) |
| |
| --- a/arch/x86/crypto/aesni-intel_asm.S |
| +++ b/arch/x86/crypto/aesni-intel_asm.S |
| @@ -32,6 +32,7 @@ |
| #include <linux/linkage.h> |
| #include <asm/inst.h> |
| #include <asm/frame.h> |
| +#include <asm/nospec-branch.h> |
| |
| /* |
| * The following macros are used to move an (un)aligned 16 byte value to/from |
| @@ -2884,7 +2885,7 @@ ENTRY(aesni_xts_crypt8) |
| pxor INC, STATE4 |
| movdqu IV, 0x30(OUTP) |
| |
| - call *%r11 |
| + CALL_NOSPEC %r11 |
| |
| movdqu 0x00(OUTP), INC |
| pxor INC, STATE1 |
| @@ -2929,7 +2930,7 @@ ENTRY(aesni_xts_crypt8) |
| _aesni_gf128mul_x_ble() |
| movups IV, (IVP) |
| |
| - call *%r11 |
| + CALL_NOSPEC %r11 |
| |
| movdqu 0x40(OUTP), INC |
| pxor INC, STATE1 |
| --- a/arch/x86/crypto/camellia-aesni-avx-asm_64.S |
| +++ b/arch/x86/crypto/camellia-aesni-avx-asm_64.S |
| @@ -17,6 +17,7 @@ |
| |
| #include <linux/linkage.h> |
| #include <asm/frame.h> |
| +#include <asm/nospec-branch.h> |
| |
| #define CAMELLIA_TABLE_BYTE_LEN 272 |
| |
| @@ -1227,7 +1228,7 @@ camellia_xts_crypt_16way: |
| vpxor 14 * 16(%rax), %xmm15, %xmm14; |
| vpxor 15 * 16(%rax), %xmm15, %xmm15; |
| |
| - call *%r9; |
| + CALL_NOSPEC %r9; |
| |
| addq $(16 * 16), %rsp; |
| |
| --- a/arch/x86/crypto/camellia-aesni-avx2-asm_64.S |
| +++ b/arch/x86/crypto/camellia-aesni-avx2-asm_64.S |
| @@ -12,6 +12,7 @@ |
| |
| #include <linux/linkage.h> |
| #include <asm/frame.h> |
| +#include <asm/nospec-branch.h> |
| |
| #define CAMELLIA_TABLE_BYTE_LEN 272 |
| |
| @@ -1343,7 +1344,7 @@ camellia_xts_crypt_32way: |
| vpxor 14 * 32(%rax), %ymm15, %ymm14; |
| vpxor 15 * 32(%rax), %ymm15, %ymm15; |
| |
| - call *%r9; |
| + CALL_NOSPEC %r9; |
| |
| addq $(16 * 32), %rsp; |
| |
| --- a/arch/x86/crypto/crc32c-pcl-intel-asm_64.S |
| +++ b/arch/x86/crypto/crc32c-pcl-intel-asm_64.S |
| @@ -45,6 +45,7 @@ |
| |
| #include <asm/inst.h> |
| #include <linux/linkage.h> |
| +#include <asm/nospec-branch.h> |
| |
| ## ISCSI CRC 32 Implementation with crc32 and pclmulqdq Instruction |
| |
| @@ -172,7 +173,7 @@ continue_block: |
| movzxw (bufp, %rax, 2), len |
| lea crc_array(%rip), bufp |
| lea (bufp, len, 1), bufp |
| - jmp *bufp |
| + JMP_NOSPEC bufp |
| |
| ################################################################ |
| ## 2a) PROCESS FULL BLOCKS: |