| From foo@baz Tue Aug 14 16:14:56 CEST 2018 |
| From: Andi Kleen <ak@linux.intel.com> |
| Date: Tue, 7 Aug 2018 15:09:39 -0700 |
| Subject: x86/mm/pat: Make set_memory_np() L1TF safe |
| |
| From: Andi Kleen <ak@linux.intel.com> |
| |
| commit 958f79b9ee55dfaf00c8106ed1c22a2919e0028b upstream |
| |
| set_memory_np() is used to mark kernel mappings not present, but it has |
| it's own open coded mechanism which does not have the L1TF protection of |
| inverting the address bits. |
| |
| Replace the open coded PTE manipulation with the L1TF protecting low level |
| PTE routines. |
| |
| Passes the CPA self test. |
| |
| Signed-off-by: Andi Kleen <ak@linux.intel.com> |
| Signed-off-by: Thomas Gleixner <tglx@linutronix.de> |
| [ dwmw2: Pull in pud_mkhuge() from commit a00cc7d9dd, and pfn_pud() ] |
| Signed-off-by: David Woodhouse <dwmw@amazon.co.uk> |
| Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org> |
| --- |
| arch/x86/include/asm/pgtable.h | 27 +++++++++++++++++++++++++++ |
| arch/x86/mm/pageattr.c | 8 ++++---- |
| 2 files changed, 31 insertions(+), 4 deletions(-) |
| |
| --- a/arch/x86/include/asm/pgtable.h |
| +++ b/arch/x86/include/asm/pgtable.h |
| @@ -413,12 +413,39 @@ static inline pmd_t pfn_pmd(unsigned lon |
| return __pmd(pfn | massage_pgprot(pgprot)); |
| } |
| |
| +static inline pud_t pfn_pud(unsigned long page_nr, pgprot_t pgprot) |
| +{ |
| + phys_addr_t pfn = page_nr << PAGE_SHIFT; |
| + pfn ^= protnone_mask(pgprot_val(pgprot)); |
| + pfn &= PHYSICAL_PUD_PAGE_MASK; |
| + return __pud(pfn | massage_pgprot(pgprot)); |
| +} |
| + |
| static inline pmd_t pmd_mknotpresent(pmd_t pmd) |
| { |
| return pfn_pmd(pmd_pfn(pmd), |
| __pgprot(pmd_flags(pmd) & ~(_PAGE_PRESENT|_PAGE_PROTNONE))); |
| } |
| |
| +static inline pud_t pud_set_flags(pud_t pud, pudval_t set) |
| +{ |
| + pudval_t v = native_pud_val(pud); |
| + |
| + return __pud(v | set); |
| +} |
| + |
| +static inline pud_t pud_clear_flags(pud_t pud, pudval_t clear) |
| +{ |
| + pudval_t v = native_pud_val(pud); |
| + |
| + return __pud(v & ~clear); |
| +} |
| + |
| +static inline pud_t pud_mkhuge(pud_t pud) |
| +{ |
| + return pud_set_flags(pud, _PAGE_PSE); |
| +} |
| + |
| static inline u64 flip_protnone_guard(u64 oldval, u64 val, u64 mask); |
| |
| static inline pte_t pte_modify(pte_t pte, pgprot_t newprot) |
| --- a/arch/x86/mm/pageattr.c |
| +++ b/arch/x86/mm/pageattr.c |
| @@ -1001,8 +1001,8 @@ static long populate_pmd(struct cpa_data |
| |
| pmd = pmd_offset(pud, start); |
| |
| - set_pmd(pmd, __pmd(cpa->pfn << PAGE_SHIFT | _PAGE_PSE | |
| - massage_pgprot(pmd_pgprot))); |
| + set_pmd(pmd, pmd_mkhuge(pfn_pmd(cpa->pfn, |
| + canon_pgprot(pmd_pgprot)))); |
| |
| start += PMD_SIZE; |
| cpa->pfn += PMD_SIZE >> PAGE_SHIFT; |
| @@ -1074,8 +1074,8 @@ static long populate_pud(struct cpa_data |
| * Map everything starting from the Gb boundary, possibly with 1G pages |
| */ |
| while (boot_cpu_has(X86_FEATURE_GBPAGES) && end - start >= PUD_SIZE) { |
| - set_pud(pud, __pud(cpa->pfn << PAGE_SHIFT | _PAGE_PSE | |
| - massage_pgprot(pud_pgprot))); |
| + set_pud(pud, pud_mkhuge(pfn_pud(cpa->pfn, |
| + canon_pgprot(pud_pgprot)))); |
| |
| start += PUD_SIZE; |
| cpa->pfn += PUD_SIZE >> PAGE_SHIFT; |