blob: 27e181b6e8723de189d5c6141a17f185d87a1a8a [file] [log] [blame]
From: Andrea Arcangeli <aarcange@redhat.com>
Date: Tue, 5 Dec 2017 21:15:07 +0100
Subject: x86/mm/kaiser: re-enable vsyscalls
To avoid breaking the kernel ABI.
Signed-off-by: Andrea Arcangeli <aarcange@redhat.com>
[Hugh Dickins: Backported to 3.2:
- Leave out the PVCLOCK_FIXMAP user mapping, which does not apply to
this tree
- For safety added vsyscall_pgprot, and a BUG_ON if _PAGE_USER
outside of FIXMAP.]
Signed-off-by: Hugh Dickins <hughd@google.com>
Signed-off-by: Ben Hutchings <ben@decadent.org.uk>
---
arch/x86/include/asm/vsyscall.h | 1 +
arch/x86/kernel/hpet.c | 3 +++
arch/x86/kernel/vsyscall_64.c | 7 ++++---
arch/x86/mm/kaiser.c | 14 +++++++++++---
4 files changed, 19 insertions(+), 6 deletions(-)
diff --git a/arch/x86/include/asm/vsyscall.h b/arch/x86/include/asm/vsyscall.h
index eaea1d31f753..143e98b28081 100644
--- a/arch/x86/include/asm/vsyscall.h
+++ b/arch/x86/include/asm/vsyscall.h
@@ -22,6 +22,7 @@ enum vsyscall_num {
/* kernel space (writeable) */
extern int vgetcpu_mode;
extern struct timezone sys_tz;
+extern unsigned long vsyscall_pgprot;
#include <asm/vvar.h>
diff --git a/arch/x86/kernel/hpet.c b/arch/x86/kernel/hpet.c
index 4970ef070f2f..02fd03bf15dd 100644
--- a/arch/x86/kernel/hpet.c
+++ b/arch/x86/kernel/hpet.c
@@ -12,6 +12,7 @@
#include <linux/cpu.h>
#include <linux/pm.h>
#include <linux/io.h>
+#include <linux/kaiser.h>
#include <asm/fixmap.h>
#include <asm/hpet.h>
@@ -74,6 +75,8 @@ static inline void hpet_set_mapping(void)
hpet_virt_address = ioremap_nocache(hpet_address, HPET_MMAP_SIZE);
#ifdef CONFIG_X86_64
__set_fixmap(VSYSCALL_HPET, hpet_address, PAGE_KERNEL_VVAR_NOCACHE);
+ kaiser_add_mapping(__fix_to_virt(VSYSCALL_HPET), PAGE_SIZE,
+ __PAGE_KERNEL_VVAR_NOCACHE);
#endif
}
diff --git a/arch/x86/kernel/vsyscall_64.c b/arch/x86/kernel/vsyscall_64.c
index e4d4a22e8b94..3178f308609a 100644
--- a/arch/x86/kernel/vsyscall_64.c
+++ b/arch/x86/kernel/vsyscall_64.c
@@ -58,6 +58,7 @@ DEFINE_VVAR(struct vsyscall_gtod_data, vsyscall_gtod_data) =
};
static enum { EMULATE, NATIVE, NONE } vsyscall_mode = NATIVE;
+unsigned long vsyscall_pgprot = __PAGE_KERNEL_VSYSCALL;
static int __init vsyscall_setup(char *str)
{
@@ -274,10 +275,10 @@ void __init map_vsyscall(void)
extern char __vvar_page;
unsigned long physaddr_vvar_page = __pa_symbol(&__vvar_page);
+ if (vsyscall_mode != NATIVE)
+ vsyscall_pgprot = __PAGE_KERNEL_VVAR;
__set_fixmap(VSYSCALL_FIRST_PAGE, physaddr_vsyscall,
- vsyscall_mode == NATIVE
- ? PAGE_KERNEL_VSYSCALL
- : PAGE_KERNEL_VVAR);
+ __pgprot(vsyscall_pgprot));
BUILD_BUG_ON((unsigned long)__fix_to_virt(VSYSCALL_FIRST_PAGE) !=
(unsigned long)VSYSCALL_START);
diff --git a/arch/x86/mm/kaiser.c b/arch/x86/mm/kaiser.c
index 79b0222ffa74..ab1dfa607546 100644
--- a/arch/x86/mm/kaiser.c
+++ b/arch/x86/mm/kaiser.c
@@ -16,6 +16,7 @@ extern struct mm_struct init_mm;
#include <asm/kaiser.h>
#include <asm/tlbflush.h> /* to verify its kaiser declarations */
+#include <asm/vsyscall.h>
#include <asm/pgtable.h>
#include <asm/pgalloc.h>
#include <asm/desc.h>
@@ -133,7 +134,7 @@ static pte_t *kaiser_pagetable_walk(unsigned long address)
return NULL;
spin_lock(&shadow_table_allocation_lock);
if (pud_none(*pud)) {
- set_pud(pud, __pud(_KERNPG_TABLE | __pa(new_pmd_page)));
+ set_pud(pud, __pud(_PAGE_TABLE | __pa(new_pmd_page)));
__inc_zone_page_state(virt_to_page((void *)
new_pmd_page), NR_KAISERTABLE);
} else
@@ -153,7 +154,7 @@ static pte_t *kaiser_pagetable_walk(unsigned long address)
return NULL;
spin_lock(&shadow_table_allocation_lock);
if (pmd_none(*pmd)) {
- set_pmd(pmd, __pmd(_KERNPG_TABLE | __pa(new_pte_page)));
+ set_pmd(pmd, __pmd(_PAGE_TABLE | __pa(new_pte_page)));
__inc_zone_page_state(virt_to_page((void *)
new_pte_page), NR_KAISERTABLE);
} else
@@ -174,6 +175,9 @@ int kaiser_add_user_map(const void *__start_addr, unsigned long size,
unsigned long end_addr = PAGE_ALIGN(start_addr + size);
unsigned long target_address;
+ if (flags & _PAGE_USER)
+ BUG_ON(address < FIXADDR_START || end_addr >= FIXADDR_TOP);
+
for (; address < end_addr; address += PAGE_SIZE) {
target_address = get_pa_from_mapping(address);
if (target_address == -1) {
@@ -227,7 +231,7 @@ static void __init kaiser_init_all_pgds(void)
break;
}
inc_zone_page_state(virt_to_page(pud), NR_KAISERTABLE);
- new_pgd = __pgd(_KERNPG_TABLE |__pa(pud));
+ new_pgd = __pgd(_PAGE_TABLE |__pa(pud));
/*
* Make sure not to stomp on some other pgd entry.
*/
@@ -285,6 +289,10 @@ void __init kaiser_init(void)
kaiser_add_user_map_early((void *)idt_descr.address,
sizeof(gate_desc) * NR_VECTORS,
__PAGE_KERNEL_RO);
+ kaiser_add_user_map_early((void *)VVAR_ADDRESS, PAGE_SIZE,
+ __PAGE_KERNEL_VVAR);
+ kaiser_add_user_map_early((void *)VSYSCALL_START, PAGE_SIZE,
+ vsyscall_pgprot);
kaiser_add_user_map_early(&x86_cr3_pcid_noflush,
sizeof(x86_cr3_pcid_noflush),
__PAGE_KERNEL);