| From foo@baz Tue Aug 14 16:14:56 CEST 2018 |
| From: Thomas Gleixner <tglx@linutronix.de> |
| Date: Fri, 13 Jul 2018 16:23:16 +0200 |
| Subject: x86/litf: Introduce vmx status variable |
| |
| From: Thomas Gleixner <tglx@linutronix.de> |
| |
| commit 72c6d2db64fa18c996ece8f06e499509e6c9a37e upstream |
| |
| Store the effective mitigation of VMX in a status variable and use it to |
| report the VMX state in the l1tf sysfs file. |
| |
| Signed-off-by: Thomas Gleixner <tglx@linutronix.de> |
| Tested-by: Jiri Kosina <jkosina@suse.cz> |
| Reviewed-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org> |
| Reviewed-by: Josh Poimboeuf <jpoimboe@redhat.com> |
| Link: https://lkml.kernel.org/r/20180713142322.433098358@linutronix.de |
| Signed-off-by: David Woodhouse <dwmw@amazon.co.uk> |
| Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org> |
| --- |
| arch/x86/include/asm/vmx.h | 9 +++++++++ |
| arch/x86/kernel/cpu/bugs.c | 36 ++++++++++++++++++++++++++++++++++-- |
| arch/x86/kvm/vmx.c | 22 +++++++++++----------- |
| 3 files changed, 54 insertions(+), 13 deletions(-) |
| |
| --- a/arch/x86/include/asm/vmx.h |
| +++ b/arch/x86/include/asm/vmx.h |
| @@ -499,4 +499,13 @@ enum vm_instruction_error_number { |
| VMXERR_INVALID_OPERAND_TO_INVEPT_INVVPID = 28, |
| }; |
| |
| +enum vmx_l1d_flush_state { |
| + VMENTER_L1D_FLUSH_AUTO, |
| + VMENTER_L1D_FLUSH_NEVER, |
| + VMENTER_L1D_FLUSH_COND, |
| + VMENTER_L1D_FLUSH_ALWAYS, |
| +}; |
| + |
| +extern enum vmx_l1d_flush_state l1tf_vmx_mitigation; |
| + |
| #endif |
| --- a/arch/x86/kernel/cpu/bugs.c |
| +++ b/arch/x86/kernel/cpu/bugs.c |
| @@ -21,6 +21,7 @@ |
| #include <asm/processor-flags.h> |
| #include <asm/fpu/internal.h> |
| #include <asm/msr.h> |
| +#include <asm/vmx.h> |
| #include <asm/paravirt.h> |
| #include <asm/alternative.h> |
| #include <asm/pgtable.h> |
| @@ -635,6 +636,12 @@ void x86_spec_ctrl_setup_ap(void) |
| |
| #undef pr_fmt |
| #define pr_fmt(fmt) "L1TF: " fmt |
| + |
| +#if IS_ENABLED(CONFIG_KVM_INTEL) |
| +enum vmx_l1d_flush_state l1tf_vmx_mitigation __ro_after_init = VMENTER_L1D_FLUSH_AUTO; |
| +EXPORT_SYMBOL_GPL(l1tf_vmx_mitigation); |
| +#endif |
| + |
| static void __init l1tf_select_mitigation(void) |
| { |
| u64 half_pa; |
| @@ -664,6 +671,32 @@ static void __init l1tf_select_mitigatio |
| |
| #ifdef CONFIG_SYSFS |
| |
| +#define L1TF_DEFAULT_MSG "Mitigation: PTE Inversion" |
| + |
| +#if IS_ENABLED(CONFIG_KVM_INTEL) |
| +static const char *l1tf_vmx_states[] = { |
| + [VMENTER_L1D_FLUSH_AUTO] = "auto", |
| + [VMENTER_L1D_FLUSH_NEVER] = "vulnerable", |
| + [VMENTER_L1D_FLUSH_COND] = "conditional cache flushes", |
| + [VMENTER_L1D_FLUSH_ALWAYS] = "cache flushes", |
| +}; |
| + |
| +static ssize_t l1tf_show_state(char *buf) |
| +{ |
| + if (l1tf_vmx_mitigation == VMENTER_L1D_FLUSH_AUTO) |
| + return sprintf(buf, "%s\n", L1TF_DEFAULT_MSG); |
| + |
| + return sprintf(buf, "%s; VMX: SMT %s, L1D %s\n", L1TF_DEFAULT_MSG, |
| + cpu_smt_control == CPU_SMT_ENABLED ? "vulnerable" : "disabled", |
| + l1tf_vmx_states[l1tf_vmx_mitigation]); |
| +} |
| +#else |
| +static ssize_t l1tf_show_state(char *buf) |
| +{ |
| + return sprintf(buf, "%s\n", L1TF_DEFAULT_MSG); |
| +} |
| +#endif |
| + |
| static ssize_t cpu_show_common(struct device *dev, struct device_attribute *attr, |
| char *buf, unsigned int bug) |
| { |
| @@ -691,9 +724,8 @@ static ssize_t cpu_show_common(struct de |
| |
| case X86_BUG_L1TF: |
| if (boot_cpu_has(X86_FEATURE_L1TF_PTEINV)) |
| - return sprintf(buf, "Mitigation: Page Table Inversion\n"); |
| + return l1tf_show_state(buf); |
| break; |
| - |
| default: |
| break; |
| } |
| --- a/arch/x86/kvm/vmx.c |
| +++ b/arch/x86/kvm/vmx.c |
| @@ -194,19 +194,13 @@ extern const ulong vmx_return; |
| |
| static DEFINE_STATIC_KEY_FALSE(vmx_l1d_should_flush); |
| |
| -/* These MUST be in sync with vmentry_l1d_param order. */ |
| -enum vmx_l1d_flush_state { |
| - VMENTER_L1D_FLUSH_NEVER, |
| - VMENTER_L1D_FLUSH_COND, |
| - VMENTER_L1D_FLUSH_ALWAYS, |
| -}; |
| - |
| static enum vmx_l1d_flush_state __read_mostly vmentry_l1d_flush = VMENTER_L1D_FLUSH_COND; |
| |
| static const struct { |
| const char *option; |
| enum vmx_l1d_flush_state cmd; |
| } vmentry_l1d_param[] = { |
| + {"auto", VMENTER_L1D_FLUSH_AUTO}, |
| {"never", VMENTER_L1D_FLUSH_NEVER}, |
| {"cond", VMENTER_L1D_FLUSH_COND}, |
| {"always", VMENTER_L1D_FLUSH_ALWAYS}, |
| @@ -11690,8 +11684,12 @@ static int __init vmx_setup_l1d_flush(vo |
| { |
| struct page *page; |
| |
| + if (!boot_cpu_has_bug(X86_BUG_L1TF)) |
| + return 0; |
| + |
| + l1tf_vmx_mitigation = vmentry_l1d_flush; |
| + |
| if (vmentry_l1d_flush == VMENTER_L1D_FLUSH_NEVER || |
| - !boot_cpu_has_bug(X86_BUG_L1TF) || |
| vmx_l1d_use_msr_save_list()) |
| return 0; |
| |
| @@ -11706,12 +11704,14 @@ static int __init vmx_setup_l1d_flush(vo |
| return 0; |
| } |
| |
| -static void vmx_free_l1d_flush_pages(void) |
| +static void vmx_cleanup_l1d_flush(void) |
| { |
| if (vmx_l1d_flush_pages) { |
| free_pages((unsigned long)vmx_l1d_flush_pages, L1D_CACHE_ORDER); |
| vmx_l1d_flush_pages = NULL; |
| } |
| + /* Restore state so sysfs ignores VMX */ |
| + l1tf_vmx_mitigation = VMENTER_L1D_FLUSH_AUTO; |
| } |
| |
| static int __init vmx_init(void) |
| @@ -11725,7 +11725,7 @@ static int __init vmx_init(void) |
| r = kvm_init(&vmx_x86_ops, sizeof(struct vcpu_vmx), |
| __alignof__(struct vcpu_vmx), THIS_MODULE); |
| if (r) { |
| - vmx_free_l1d_flush_pages(); |
| + vmx_cleanup_l1d_flush(); |
| return r; |
| } |
| |
| @@ -11746,7 +11746,7 @@ static void __exit vmx_exit(void) |
| |
| kvm_exit(); |
| |
| - vmx_free_l1d_flush_pages(); |
| + vmx_cleanup_l1d_flush(); |
| } |
| |
| module_init(vmx_init) |