blob: 34b8fb05c173a6eb5e62baa5dcc7e801b24402c0 [file] [log] [blame]
Subject: arm: highmem: Use kmap for PREEMPT_RT
From: Thomas Gleixner <tglx@linutronix.de>
Date: Thu, 11 Oct 2012 22:59:24 +0200
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
---
arch/arm/Kconfig | 2 +-
arch/arm/include/asm/highmem.h | 15 +++++++++++++++
arch/arm/mm/highmem.c | 2 ++
3 files changed, 18 insertions(+), 1 deletion(-)
Index: linux-stable/arch/arm/Kconfig
===================================================================
--- linux-stable.orig/arch/arm/Kconfig
+++ linux-stable/arch/arm/Kconfig
@@ -1749,7 +1749,7 @@ config HAVE_ARCH_PFN_VALID
config HIGHMEM
bool "High Memory Support"
- depends on MMU && !PREEMPT_RT_FULL
+ depends on MMU
help
The address space of ARM processors is only 4 Gigabytes large
and it has to accommodate user address space, kernel address
Index: linux-stable/arch/arm/include/asm/highmem.h
===================================================================
--- linux-stable.orig/arch/arm/include/asm/highmem.h
+++ linux-stable/arch/arm/include/asm/highmem.h
@@ -57,10 +57,25 @@ static inline void *kmap_high_get(struct
#ifdef CONFIG_HIGHMEM
extern void *kmap(struct page *page);
extern void kunmap(struct page *page);
+# ifndef CONFIG_PREEMPT_RT_FULL
extern void *kmap_atomic(struct page *page);
extern void __kunmap_atomic(void *kvaddr);
extern void *kmap_atomic_pfn(unsigned long pfn);
extern struct page *kmap_atomic_to_page(const void *ptr);
+# else
+# define kmap_atomic(page) \
+ ({ pagefault_disable(); kmap(page); })
+
+# define kmap_atomic_pfn(pfn) \
+ ({ pagefault_disable(); kmap(pfn_to_page(pfn)) })
+
+# define __kunmap_atomic(kvaddr) \
+ do { kunmap(kmap_to_page(kvaddr)); pagefault_enable(); } while(0)
+
+# define kmap_atomic_to_page(kvaddr) \
+ kmap_to_page(kvaddr)
+
+# endif
#endif
#endif
Index: linux-stable/arch/arm/mm/highmem.c
===================================================================
--- linux-stable.orig/arch/arm/mm/highmem.c
+++ linux-stable/arch/arm/mm/highmem.c
@@ -36,6 +36,7 @@ void kunmap(struct page *page)
}
EXPORT_SYMBOL(kunmap);
+#ifndef CONFIG_PREEMPT_RT_FULL
void *kmap_atomic(struct page *page)
{
unsigned int idx;
@@ -135,3 +136,4 @@ struct page *kmap_atomic_to_page(const v
return pte_page(get_top_pte(vaddr));
}
+#endif