| From foo@baz Tue Aug 14 16:14:56 CEST 2018 |
| From: Andi Kleen <ak@linux.intel.com> |
| Date: Tue, 7 Aug 2018 15:09:38 -0700 |
| Subject: x86/mm/kmmio: Make the tracer robust against L1TF |
| |
| From: Andi Kleen <ak@linux.intel.com> |
| |
| commit 1063711b57393c1999248cccb57bebfaf16739e7 upstream |
| |
| The mmio tracer sets io mapping PTEs and PMDs to non present when enabled |
| without inverting the address bits, which makes the PTE entry vulnerable |
| for L1TF. |
| |
| Make it use the right low level macros to actually invert the address bits |
| to protect against L1TF. |
| |
| In principle this could be avoided because MMIO tracing is not likely to be |
| enabled on production machines, but the fix is straigt forward and for |
| consistency sake it's better to get rid of the open coded PTE manipulation. |
| |
| Signed-off-by: Andi Kleen <ak@linux.intel.com> |
| Signed-off-by: Thomas Gleixner <tglx@linutronix.de> |
| Signed-off-by: David Woodhouse <dwmw@amazon.co.uk> |
| Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org> |
| --- |
| arch/x86/mm/kmmio.c | 25 +++++++++++++++---------- |
| 1 file changed, 15 insertions(+), 10 deletions(-) |
| |
| --- a/arch/x86/mm/kmmio.c |
| +++ b/arch/x86/mm/kmmio.c |
| @@ -125,24 +125,29 @@ static struct kmmio_fault_page *get_kmmi |
| |
| static void clear_pmd_presence(pmd_t *pmd, bool clear, pmdval_t *old) |
| { |
| + pmd_t new_pmd; |
| pmdval_t v = pmd_val(*pmd); |
| if (clear) { |
| - *old = v & _PAGE_PRESENT; |
| - v &= ~_PAGE_PRESENT; |
| - } else /* presume this has been called with clear==true previously */ |
| - v |= *old; |
| - set_pmd(pmd, __pmd(v)); |
| + *old = v; |
| + new_pmd = pmd_mknotpresent(*pmd); |
| + } else { |
| + /* Presume this has been called with clear==true previously */ |
| + new_pmd = __pmd(*old); |
| + } |
| + set_pmd(pmd, new_pmd); |
| } |
| |
| static void clear_pte_presence(pte_t *pte, bool clear, pteval_t *old) |
| { |
| pteval_t v = pte_val(*pte); |
| if (clear) { |
| - *old = v & _PAGE_PRESENT; |
| - v &= ~_PAGE_PRESENT; |
| - } else /* presume this has been called with clear==true previously */ |
| - v |= *old; |
| - set_pte_atomic(pte, __pte(v)); |
| + *old = v; |
| + /* Nothing should care about address */ |
| + pte_clear(&init_mm, 0, pte); |
| + } else { |
| + /* Presume this has been called with clear==true previously */ |
| + set_pte_atomic(pte, __pte(*old)); |
| + } |
| } |
| |
| static int clear_page_presence(struct kmmio_fault_page *f, bool clear) |