| From foo@baz Tue Aug 14 16:14:56 CEST 2018 |
| From: Nicolai Stange <nstange@suse.de> |
| Date: Wed, 18 Jul 2018 19:07:38 +0200 |
| Subject: x86/KVM/VMX: Initialize the vmx_l1d_flush_pages' content |
| |
| From: Nicolai Stange <nstange@suse.de> |
| |
| commit 288d152c23dcf3c09da46c5c481903ca10ebfef7 upstream |
| |
| The slow path in vmx_l1d_flush() reads from vmx_l1d_flush_pages in order |
| to evict the L1d cache. |
| |
| However, these pages are never cleared and, in theory, their data could be |
| leaked. |
| |
| More importantly, KSM could merge a nested hypervisor's vmx_l1d_flush_pages |
| to fewer than 1 << L1D_CACHE_ORDER host physical pages and this would break |
| the L1d flushing algorithm: L1D on x86_64 is tagged by physical addresses. |
| |
| Fix this by initializing the individual vmx_l1d_flush_pages with a |
| different pattern each. |
| |
| Rename the "empty_zp" asm constraint identifier in vmx_l1d_flush() to |
| "flush_pages" to reflect this change. |
| |
| Fixes: a47dd5f06714 ("x86/KVM/VMX: Add L1D flush algorithm") |
| Signed-off-by: Nicolai Stange <nstange@suse.de> |
| Signed-off-by: Thomas Gleixner <tglx@linutronix.de> |
| Signed-off-by: David Woodhouse <dwmw@amazon.co.uk> |
| Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org> |
| --- |
| arch/x86/kvm/vmx.c | 17 ++++++++++++++--- |
| 1 file changed, 14 insertions(+), 3 deletions(-) |
| |
| --- a/arch/x86/kvm/vmx.c |
| +++ b/arch/x86/kvm/vmx.c |
| @@ -212,6 +212,7 @@ static void *vmx_l1d_flush_pages; |
| static int vmx_setup_l1d_flush(enum vmx_l1d_flush_state l1tf) |
| { |
| struct page *page; |
| + unsigned int i; |
| |
| if (!enable_ept) { |
| l1tf_vmx_mitigation = VMENTER_L1D_FLUSH_EPT_DISABLED; |
| @@ -244,6 +245,16 @@ static int vmx_setup_l1d_flush(enum vmx_ |
| if (!page) |
| return -ENOMEM; |
| vmx_l1d_flush_pages = page_address(page); |
| + |
| + /* |
| + * Initialize each page with a different pattern in |
| + * order to protect against KSM in the nested |
| + * virtualization case. |
| + */ |
| + for (i = 0; i < 1u << L1D_CACHE_ORDER; ++i) { |
| + memset(vmx_l1d_flush_pages + i * PAGE_SIZE, i + 1, |
| + PAGE_SIZE); |
| + } |
| } |
| |
| l1tf_vmx_mitigation = l1tf; |
| @@ -8675,7 +8686,7 @@ static void vmx_l1d_flush(struct kvm_vcp |
| /* First ensure the pages are in the TLB */ |
| "xorl %%eax, %%eax\n" |
| ".Lpopulate_tlb:\n\t" |
| - "movzbl (%[empty_zp], %%" _ASM_AX "), %%ecx\n\t" |
| + "movzbl (%[flush_pages], %%" _ASM_AX "), %%ecx\n\t" |
| "addl $4096, %%eax\n\t" |
| "cmpl %%eax, %[size]\n\t" |
| "jne .Lpopulate_tlb\n\t" |
| @@ -8684,12 +8695,12 @@ static void vmx_l1d_flush(struct kvm_vcp |
| /* Now fill the cache */ |
| "xorl %%eax, %%eax\n" |
| ".Lfill_cache:\n" |
| - "movzbl (%[empty_zp], %%" _ASM_AX "), %%ecx\n\t" |
| + "movzbl (%[flush_pages], %%" _ASM_AX "), %%ecx\n\t" |
| "addl $64, %%eax\n\t" |
| "cmpl %%eax, %[size]\n\t" |
| "jne .Lfill_cache\n\t" |
| "lfence\n" |
| - :: [empty_zp] "r" (vmx_l1d_flush_pages), |
| + :: [flush_pages] "r" (vmx_l1d_flush_pages), |
| [size] "r" (size) |
| : "eax", "ebx", "ecx", "edx"); |
| } |