| From 04633df0c43d710e5f696b06539c100898678235 Mon Sep 17 00:00:00 2001 |
| From: Borislav Petkov <bp@suse.de> |
| Date: Thu, 5 Nov 2015 16:57:56 +0100 |
| Subject: x86/cpu: Call verify_cpu() after having entered long mode too |
| |
| commit 04633df0c43d710e5f696b06539c100898678235 upstream. |
| |
| When we get loaded by a 64-bit bootloader, kernel entry point is |
| startup_64 in head_64.S. We don't trust any and all bootloaders because |
| some will fiddle with CPU configuration so we go ahead and massage each |
| CPU into sanity again. |
| |
| For example, some dell BIOSes have this XD disable feature which set |
| IA32_MISC_ENABLE[34] and disable NX. This might be some dumb workaround |
| for other OSes but Linux sure doesn't need it. |
| |
| A similar thing is present in the Surface 3 firmware - see |
| https://bugzilla.kernel.org/show_bug.cgi?id=106051 - which sets this bit |
| only on the BSP: |
| |
| # rdmsr -a 0x1a0 |
| 400850089 |
| 850089 |
| 850089 |
| 850089 |
| |
| I know, right?! |
| |
| There's not even an off switch in there. |
| |
| So fix all those cases by sanitizing the 64-bit entry point too. For |
| that, make verify_cpu() callable in 64-bit mode also. |
| |
| Requested-and-debugged-by: "H. Peter Anvin" <hpa@zytor.com> |
| Reported-and-tested-by: Bastien Nocera <bugzilla@hadess.net> |
| Signed-off-by: Borislav Petkov <bp@suse.de> |
| Cc: Matt Fleming <matt@codeblueprint.co.uk> |
| Cc: Peter Zijlstra <peterz@infradead.org> |
| Link: http://lkml.kernel.org/r/1446739076-21303-1-git-send-email-bp@alien8.de |
| Signed-off-by: Thomas Gleixner <tglx@linutronix.de> |
| [lizf: Backported to 3.4: adjust context] |
| Signed-off-by: Zefan Li <lizefan@huawei.com> |
| --- |
| arch/x86/kernel/head_64.S | 8 ++++++++ |
| arch/x86/kernel/verify_cpu.S | 12 +++++++----- |
| 2 files changed, 15 insertions(+), 5 deletions(-) |
| |
| --- a/arch/x86/kernel/head_64.S |
| +++ b/arch/x86/kernel/head_64.S |
| @@ -45,6 +45,9 @@ L3_START_KERNEL = pud_index(__START_KERN |
| .globl startup_64 |
| startup_64: |
| |
| + /* Sanitize CPU configuration */ |
| + call verify_cpu |
| + |
| /* |
| * At this point the CPU runs in 64bit mode CS.L = 1 CS.D = 1, |
| * and someone has loaded an identity mapped page table |
| @@ -160,6 +163,9 @@ ENTRY(secondary_startup_64) |
| * after the boot processor executes this code. |
| */ |
| |
| + /* Sanitize CPU configuration */ |
| + call verify_cpu |
| + |
| /* Enable PAE mode and PGE */ |
| movl $(X86_CR4_PAE | X86_CR4_PGE), %eax |
| movq %rax, %cr4 |
| @@ -253,6 +259,8 @@ ENTRY(secondary_startup_64) |
| pushq %rax # target address in negative space |
| lretq |
| |
| +#include "verify_cpu.S" |
| + |
| /* SMP bootup changes these two */ |
| __REFDATA |
| .align 8 |
| --- a/arch/x86/kernel/verify_cpu.S |
| +++ b/arch/x86/kernel/verify_cpu.S |
| @@ -34,10 +34,11 @@ |
| #include <asm/msr-index.h> |
| |
| verify_cpu: |
| - pushfl # Save caller passed flags |
| - pushl $0 # Kill any dangerous flags |
| - popfl |
| + pushf # Save caller passed flags |
| + push $0 # Kill any dangerous flags |
| + popf |
| |
| +#ifndef __x86_64__ |
| pushfl # standard way to check for cpuid |
| popl %eax |
| movl %eax,%ebx |
| @@ -48,6 +49,7 @@ verify_cpu: |
| popl %eax |
| cmpl %eax,%ebx |
| jz verify_cpu_no_longmode # cpu has no cpuid |
| +#endif |
| |
| movl $0x0,%eax # See if cpuid 1 is implemented |
| cpuid |
| @@ -130,10 +132,10 @@ verify_cpu_sse_test: |
| jmp verify_cpu_sse_test # try again |
| |
| verify_cpu_no_longmode: |
| - popfl # Restore caller passed flags |
| + popf # Restore caller passed flags |
| movl $1,%eax |
| ret |
| verify_cpu_sse_ok: |
| - popfl # Restore caller passed flags |
| + popf # Restore caller passed flags |
| xorl %eax, %eax |
| ret |