blob: 5c8bc94837ae4748da016d4a5fd8c1dcd8c3605c [file] [log] [blame]
Subject: x86: highmem: Make it work on RT really
From: Thomas Gleixner <tglx@linutronix.de>
Date: Tue, 08 Jan 2013 12:50:19 +0100
It had been enabled quite some time, but never really worked.
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
---
arch/x86/include/asm/highmem.h | 23 +++++++++++++++++++++++
arch/x86/mm/highmem_32.c | 12 ++++++++++++
include/linux/highmem.h | 2 ++
mm/highmem.c | 21 +++++++++++++++++----
4 files changed, 54 insertions(+), 4 deletions(-)
Index: linux-stable/arch/x86/include/asm/highmem.h
===================================================================
--- linux-stable.orig/arch/x86/include/asm/highmem.h
+++ linux-stable/arch/x86/include/asm/highmem.h
@@ -56,16 +56,39 @@ extern unsigned long highstart_pfn, high
extern void *kmap_high(struct page *page);
extern void kunmap_high(struct page *page);
+extern void *kmap_high_prot(struct page *page, pgprot_t prot);
void *kmap(struct page *page);
void kunmap(struct page *page);
+#ifndef CONFIG_PREEMPT_RT_FULL
void *kmap_atomic_prot(struct page *page, pgprot_t prot);
void *kmap_atomic(struct page *page);
void __kunmap_atomic(void *kvaddr);
void *kmap_atomic_pfn(unsigned long pfn);
void *kmap_atomic_prot_pfn(unsigned long pfn, pgprot_t prot);
struct page *kmap_atomic_to_page(void *ptr);
+#else
+void *__kmap_prot(struct page *page, pgprot_t prot);
+# define kmap_atomic(page) \
+ ({ pagefault_disable(); kmap(page); })
+
+# define kmap_atomic_pfn(pfn) \
+ ({ pagefault_disable(); kmap(pfn_to_page(pfn)) })
+
+# define __kunmap_atomic(kvaddr) \
+ do { kunmap(kmap_to_page(kvaddr)); pagefault_enable(); } while(0)
+
+# define kmap_atomic_prot(page, prot) \
+ ({ pagefault_disable(); __kmap_prot(page, prot); })
+
+# define kmap_atomic_prot_pfn(pfn, prot) \
+ ({ pagefault_disable(); __kmap_prot(pfn_to_page(pfn), prot); })
+
+# define kmap_atomic_to_page(kvaddr) \
+ kmap_to_page(kvaddr)
+
+#endif
#define flush_cache_kmaps() do { } while (0)
Index: linux-stable/arch/x86/mm/highmem_32.c
===================================================================
--- linux-stable.orig/arch/x86/mm/highmem_32.c
+++ linux-stable/arch/x86/mm/highmem_32.c
@@ -21,6 +21,17 @@ void kunmap(struct page *page)
}
EXPORT_SYMBOL(kunmap);
+#ifdef CONFIF_PREEMPT_RT_FULL
+void *__kmap_prot(struct page *page, pgprot_t prot)
+{
+ might_sleep();
+ if (!PageHighMem(page))
+ return page_address(page);
+ return kmap_high_prot(page, prot);
+}
+#endif
+
+#ifndef CONFIG_PREEMPT_RT_FULL
/*
* kmap_atomic/kunmap_atomic is significantly faster than kmap/kunmap because
* no global lock is needed and because the kmap code must perform a global TLB
@@ -115,6 +126,7 @@ struct page *kmap_atomic_to_page(void *p
return pte_page(*pte);
}
EXPORT_SYMBOL(kmap_atomic_to_page);
+#endif
void __init set_highmem_pages_init(void)
{
Index: linux-stable/include/linux/highmem.h
===================================================================
--- linux-stable.orig/include/linux/highmem.h
+++ linux-stable/include/linux/highmem.h
@@ -59,6 +59,8 @@ static inline void *kmap(struct page *pa
return page_address(page);
}
+#define __kmap_prot(page, prot) kmap(page)
+
static inline void kunmap(struct page *page)
{
}
Index: linux-stable/mm/highmem.c
===================================================================
--- linux-stable.orig/mm/highmem.c
+++ linux-stable/mm/highmem.c
@@ -157,7 +157,7 @@ void kmap_flush_unused(void)
unlock_kmap();
}
-static inline unsigned long map_new_virtual(struct page *page)
+static inline unsigned long map_new_virtual(struct page *page, pgprot_t prot)
{
unsigned long vaddr;
int count;
@@ -199,7 +199,7 @@ start:
}
vaddr = PKMAP_ADDR(last_pkmap_nr);
set_pte_at(&init_mm, vaddr,
- &(pkmap_page_table[last_pkmap_nr]), mk_pte(page, kmap_prot));
+ &(pkmap_page_table[last_pkmap_nr]), mk_pte(page, prot));
pkmap_count[last_pkmap_nr] = 1;
set_page_address(page, (void *)vaddr);
@@ -215,7 +215,7 @@ start:
*
* We cannot call this from interrupts, as it may block.
*/
-void *kmap_high(struct page *page)
+void *kmap_high_prot(struct page *page, pgprot_t prot)
{
unsigned long vaddr;
@@ -226,13 +226,26 @@ void *kmap_high(struct page *page)
lock_kmap();
vaddr = (unsigned long)page_address(page);
if (!vaddr)
- vaddr = map_new_virtual(page);
+ vaddr = map_new_virtual(page, prot);
pkmap_count[PKMAP_NR(vaddr)]++;
BUG_ON(pkmap_count[PKMAP_NR(vaddr)] < 2);
unlock_kmap();
return (void*) vaddr;
}
+EXPORT_SYMBOL(kmap_high_prot);
+/**
+ * kmap_high - map a highmem page into memory
+ * @page: &struct page to map
+ *
+ * Returns the page's virtual memory address.
+ *
+ * We cannot call this from interrupts, as it may block.
+ */
+void *kmap_high(struct page *page)
+{
+ return kmap_high_prot(page, kmap_prot);
+}
EXPORT_SYMBOL(kmap_high);
#ifdef ARCH_NEEDS_KMAP_HIGH_GET