patches-3.6.11-rt29.tar.xz

md5sum:
b2b735c14c8613620bd00527dad06b58  patches-3.6.11-rt29.tar.xz

Announce:
  ------------------
 Dear RT Folks,

 I'm pleased to announce the 3.6.11-rt29 release.

 Changes since 3.6.11-rt26:

    1) Fix the RT highmem implementation on x86 this time really. The
       issue I was seeing with kmap_atomic and friends was actually
       when CONFIG_HIGHMEM was disabled. x8632 uses the atomic maps for
       io_mapping_map_atomic_wc() even when CONFIG_HIGHMEM is off.

    2) Modify the kmap_atomic per thread storage mechanism to reduce
       code in switch_to

    3) Rewrite RT highmem support for ARM with the kmap_atomic switch
       mechanism like x86_32 uses it.

 This is probably the last release for 3.6 from my side. Steven might
 keep it maintained until the 3.8-rt stabilizes, but that's not yet
 decided.

 The delta patch against 3.6.11-rt28 is appended below and can be found
 here:

   http://www.kernel.org/pub/linux/kernel/projects/rt/3.6/incr/patch-3.6.11-rt28-rt29.patch.xz

 The RT patch against 3.6.11 can be found here:

   http://www.kernel.org/pub/linux/kernel/projects/rt/3.6/patch-3.6.11-rt29.patch.xz

 The split quilt queue is available at:

   http://www.kernel.org/pub/linux/kernel/projects/rt/3.6/patches-3.6.11-rt29.tar.xz

 Enjoy,

 	tglx

 [Delta patch snipped]
 ------------------

http://marc.info/?l=linux-rt-users&m=136076482920178&w=2

Signed-off-by: Paul Gortmaker <paul.gortmaker@windriver.com>
diff --git a/arm-enable-highmem-for-rt.patch b/arm-enable-highmem-for-rt.patch
new file mode 100644
index 0000000..0653dcc
--- /dev/null
+++ b/arm-enable-highmem-for-rt.patch
@@ -0,0 +1,137 @@
+Subject: arm-enable-highmem-for-rt.patch
+From: Thomas Gleixner <tglx@linutronix.de>
+Date: Wed, 13 Feb 2013 11:03:11 +0100
+
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+---
+ arch/arm/Kconfig                 |    2 +-
+ arch/arm/include/asm/switch_to.h |    9 +++++++++
+ arch/arm/mm/highmem.c            |   37 +++++++++++++++++++++++++++++++++++--
+ include/linux/highmem.h          |    1 +
+ 4 files changed, 46 insertions(+), 3 deletions(-)
+
+Index: linux-stable/arch/arm/Kconfig
+===================================================================
+--- linux-stable.orig/arch/arm/Kconfig
++++ linux-stable/arch/arm/Kconfig
+@@ -1749,7 +1749,7 @@ config HAVE_ARCH_PFN_VALID
+ 
+ config HIGHMEM
+ 	bool "High Memory Support"
+-	depends on MMU && !PREEMPT_RT_FULL
++	depends on MMU
+ 	help
+ 	  The address space of ARM processors is only 4 Gigabytes large
+ 	  and it has to accommodate user address space, kernel address
+Index: linux-stable/arch/arm/include/asm/switch_to.h
+===================================================================
+--- linux-stable.orig/arch/arm/include/asm/switch_to.h
++++ linux-stable/arch/arm/include/asm/switch_to.h
+@@ -3,6 +3,14 @@
+ 
+ #include <linux/thread_info.h>
+ 
++#if defined CONFIG_PREEMPT_RT_FULL && defined CONFIG_HIGHMEM
++void switch_kmaps(struct task_struct *prev_p, struct task_struct *next_p);
++#else
++static inline void
++switch_kmaps(struct task_struct *prev_p, struct task_struct *next_p) { }
++#endif
++
++
+ /*
+  * switch_to(prev, next) should switch from task `prev' to `next'
+  * `prev' will never be the same as `next'.  schedule() itself
+@@ -12,6 +20,7 @@ extern struct task_struct *__switch_to(s
+ 
+ #define switch_to(prev,next,last)					\
+ do {									\
++	switch_kmaps(prev, next);					\
+ 	last = __switch_to(prev,task_thread_info(prev), task_thread_info(next));	\
+ } while (0)
+ 
+Index: linux-stable/arch/arm/mm/highmem.c
+===================================================================
+--- linux-stable.orig/arch/arm/mm/highmem.c
++++ linux-stable/arch/arm/mm/highmem.c
+@@ -38,6 +38,7 @@ EXPORT_SYMBOL(kunmap);
+ 
+ void *kmap_atomic(struct page *page)
+ {
++	pte_t pte = mk_pte(page, kmap_prot);
+ 	unsigned int idx;
+ 	unsigned long vaddr;
+ 	void *kmap;
+@@ -76,7 +77,10 @@ void *kmap_atomic(struct page *page)
+ 	 * in place, so the contained TLB flush ensures the TLB is updated
+ 	 * with the new mapping.
+ 	 */
+-	set_top_pte(vaddr, mk_pte(page, kmap_prot));
++#ifdef CONFIG_PREEMPT_RT_FULL
++	current->kmap_pte[type] = pte;
++#endif
++	set_top_pte(vaddr, pte);
+ 
+ 	return (void *)vaddr;
+ }
+@@ -110,6 +114,7 @@ EXPORT_SYMBOL(__kunmap_atomic);
+ 
+ void *kmap_atomic_pfn(unsigned long pfn)
+ {
++	pte_t pte = pfn_pte(pfn, kmap_prot);
+ 	unsigned long vaddr;
+ 	int idx, type;
+ 
+@@ -121,7 +126,10 @@ void *kmap_atomic_pfn(unsigned long pfn)
+ #ifdef CONFIG_DEBUG_HIGHMEM
+ 	BUG_ON(!pte_none(get_top_pte(vaddr)));
+ #endif
+-	set_top_pte(vaddr, pfn_pte(pfn, kmap_prot));
++#ifdef CONFIG_PREEMPT_RT_FULL
++	current->kmap_pte[type] = pte;
++#endif
++	set_top_pte(vaddr, pte);
+ 
+ 	return (void *)vaddr;
+ }
+@@ -135,3 +143,28 @@ struct page *kmap_atomic_to_page(const v
+ 
+ 	return pte_page(get_top_pte(vaddr));
+ }
++
++#if defined CONFIG_PREEMPT_RT_FULL
++void switch_kmaps(struct task_struct *prev_p, struct task_struct *next_p)
++{
++	int i;
++
++	/*
++	 * Clear @prev's kmap_atomic mappings
++	 */
++	for (i = 0; i < prev_p->kmap_idx; i++) {
++		int idx = i + KM_TYPE_NR * smp_processor_id();
++
++		set_top_pte(__fix_to_virt(FIX_KMAP_BEGIN + idx), __pte(0));
++	}
++	/*
++	 * Restore @next_p's kmap_atomic mappings
++	 */
++	for (i = 0; i < next_p->kmap_idx; i++) {
++		int idx = i + KM_TYPE_NR * smp_processor_id();
++
++		set_top_pte(__fix_to_virt(FIX_KMAP_BEGIN + idx),
++			    next_p->kmap_pte[i]);
++	}
++}
++#endif
+Index: linux-stable/include/linux/highmem.h
+===================================================================
+--- linux-stable.orig/include/linux/highmem.h
++++ linux-stable/include/linux/highmem.h
+@@ -7,6 +7,7 @@
+ #include <linux/mm.h>
+ #include <linux/uaccess.h>
+ #include <linux/hardirq.h>
++#include <linux/sched.h>
+ 
+ #include <asm/cacheflush.h>
+ 
diff --git a/arm-highmem-use-kmap-on-rt.patch b/arm-highmem-use-kmap-on-rt.patch
deleted file mode 100644
index 34b8fb0..0000000
--- a/arm-highmem-use-kmap-on-rt.patch
+++ /dev/null
@@ -1,71 +0,0 @@
-Subject: arm: highmem: Use kmap for PREEMPT_RT
-From: Thomas Gleixner <tglx@linutronix.de>
-Date: Thu, 11 Oct 2012 22:59:24 +0200
-
-Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
----
- arch/arm/Kconfig               |    2 +-
- arch/arm/include/asm/highmem.h |   15 +++++++++++++++
- arch/arm/mm/highmem.c          |    2 ++
- 3 files changed, 18 insertions(+), 1 deletion(-)
-
-Index: linux-stable/arch/arm/Kconfig
-===================================================================
---- linux-stable.orig/arch/arm/Kconfig
-+++ linux-stable/arch/arm/Kconfig
-@@ -1749,7 +1749,7 @@ config HAVE_ARCH_PFN_VALID
- 
- config HIGHMEM
- 	bool "High Memory Support"
--	depends on MMU && !PREEMPT_RT_FULL
-+	depends on MMU
- 	help
- 	  The address space of ARM processors is only 4 Gigabytes large
- 	  and it has to accommodate user address space, kernel address
-Index: linux-stable/arch/arm/include/asm/highmem.h
-===================================================================
---- linux-stable.orig/arch/arm/include/asm/highmem.h
-+++ linux-stable/arch/arm/include/asm/highmem.h
-@@ -57,10 +57,25 @@ static inline void *kmap_high_get(struct
- #ifdef CONFIG_HIGHMEM
- extern void *kmap(struct page *page);
- extern void kunmap(struct page *page);
-+# ifndef CONFIG_PREEMPT_RT_FULL
- extern void *kmap_atomic(struct page *page);
- extern void __kunmap_atomic(void *kvaddr);
- extern void *kmap_atomic_pfn(unsigned long pfn);
- extern struct page *kmap_atomic_to_page(const void *ptr);
-+# else
-+#  define kmap_atomic(page)	\
-+	({ pagefault_disable(); kmap(page); })
-+
-+#  define kmap_atomic_pfn(pfn)	\
-+	({ pagefault_disable(); kmap(pfn_to_page(pfn)) })
-+
-+#  define __kunmap_atomic(kvaddr)	\
-+	do { kunmap(kmap_to_page(kvaddr)); pagefault_enable(); } while(0)
-+
-+#  define kmap_atomic_to_page(kvaddr)	\
-+	kmap_to_page(kvaddr)
-+
-+# endif
- #endif
- 
- #endif
-Index: linux-stable/arch/arm/mm/highmem.c
-===================================================================
---- linux-stable.orig/arch/arm/mm/highmem.c
-+++ linux-stable/arch/arm/mm/highmem.c
-@@ -36,6 +36,7 @@ void kunmap(struct page *page)
- }
- EXPORT_SYMBOL(kunmap);
- 
-+#ifndef CONFIG_PREEMPT_RT_FULL
- void *kmap_atomic(struct page *page)
- {
- 	unsigned int idx;
-@@ -135,3 +136,4 @@ struct page *kmap_atomic_to_page(const v
- 
- 	return pte_page(get_top_pte(vaddr));
- }
-+#endif
diff --git a/highmem-rt-store-per-task-ptes-directly.patch b/highmem-rt-store-per-task-ptes-directly.patch
new file mode 100644
index 0000000..f8101f8
--- /dev/null
+++ b/highmem-rt-store-per-task-ptes-directly.patch
@@ -0,0 +1,231 @@
+Subject: highmem: Store ptes right away in the task struct
+From: Thomas Gleixner <tglx@linutronix.de>
+Date: Tue, 12 Feb 2013 11:32:38 +0100
+
+Get rid of the per cpu variable and store the idx and the pte content
+right away in the task struct. Shortens the context switch code.
+
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+---
+ arch/x86/kernel/process_32.c |   63 +++++++++++++++++++------------------------
+ arch/x86/mm/highmem_32.c     |    6 +++-
+ arch/x86/mm/iomap_32.c       |    6 +++-
+ include/linux/highmem.h      |   25 ++++++++++++++---
+ mm/highmem.c                 |    6 ++--
+ 5 files changed, 64 insertions(+), 42 deletions(-)
+
+Index: linux-stable/arch/x86/kernel/process_32.c
+===================================================================
+--- linux-stable.orig/arch/x86/kernel/process_32.c
++++ linux-stable/arch/x86/kernel/process_32.c
+@@ -198,6 +198,34 @@ start_thread(struct pt_regs *regs, unsig
+ }
+ EXPORT_SYMBOL_GPL(start_thread);
+ 
++#ifdef CONFIG_PREEMPT_RT_FULL
++static void switch_kmaps(struct task_struct *prev_p, struct task_struct *next_p)
++{
++	int i;
++
++	/*
++	 * Clear @prev's kmap_atomic mappings
++	 */
++	for (i = 0; i < prev_p->kmap_idx; i++) {
++		int idx = i + KM_TYPE_NR * smp_processor_id();
++		pte_t *ptep = kmap_pte - idx;
++
++		kpte_clear_flush(ptep, __fix_to_virt(FIX_KMAP_BEGIN + idx));
++	}
++	/*
++	 * Restore @next_p's kmap_atomic mappings
++	 */
++	for (i = 0; i < next_p->kmap_idx; i++) {
++		int idx = i + KM_TYPE_NR * smp_processor_id();
++
++		set_pte(kmap_pte - idx, next_p->kmap_pte[i]);
++	}
++}
++#else
++static inline void
++switch_kmaps(struct task_struct *prev_p, struct task_struct *next_p) { }
++#endif
++
+ 
+ /*
+  *	switch_to(x,y) should switch tasks from x to y.
+@@ -277,40 +305,7 @@ __switch_to(struct task_struct *prev_p, 
+ 		     task_thread_info(next_p)->flags & _TIF_WORK_CTXSW_NEXT))
+ 		__switch_to_xtra(prev_p, next_p, tss);
+ 
+-#ifdef CONFIG_PREEMPT_RT_FULL
+-	/*
+-	 * Save @prev's kmap_atomic stack
+-	 */
+-	prev_p->kmap_idx = __this_cpu_read(__kmap_atomic_idx);
+-	if (unlikely(prev_p->kmap_idx)) {
+-		int i;
+-
+-		for (i = 0; i < prev_p->kmap_idx; i++) {
+-			int idx = i + KM_TYPE_NR * smp_processor_id();
+-
+-			pte_t *ptep = kmap_pte - idx;
+-			prev_p->kmap_pte[i] = *ptep;
+-			kpte_clear_flush(ptep, __fix_to_virt(FIX_KMAP_BEGIN + idx));
+-		}
+-
+-		__this_cpu_write(__kmap_atomic_idx, 0);
+-	}
+-
+-	/*
+-	 * Restore @next_p's kmap_atomic stack
+-	 */
+-	if (unlikely(next_p->kmap_idx)) {
+-		int i;
+-
+-		__this_cpu_write(__kmap_atomic_idx, next_p->kmap_idx);
+-
+-		for (i = 0; i < next_p->kmap_idx; i++) {
+-			int idx = i + KM_TYPE_NR * smp_processor_id();
+-
+-			set_pte(kmap_pte - idx, next_p->kmap_pte[i]);
+-		}
+-	}
+-#endif
++	switch_kmaps(prev_p, next_p);
+ 
+ 	/*
+ 	 * Leave lazy mode, flushing any hypercalls made here.
+Index: linux-stable/arch/x86/mm/highmem_32.c
+===================================================================
+--- linux-stable.orig/arch/x86/mm/highmem_32.c
++++ linux-stable/arch/x86/mm/highmem_32.c
+@@ -31,6 +31,7 @@ EXPORT_SYMBOL(kunmap);
+  */
+ void *kmap_atomic_prot(struct page *page, pgprot_t prot)
+ {
++	pte_t pte = mk_pte(page, prot);
+ 	unsigned long vaddr;
+ 	int idx, type;
+ 
+@@ -44,7 +45,10 @@ void *kmap_atomic_prot(struct page *page
+ 	idx = type + KM_TYPE_NR*smp_processor_id();
+ 	vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx);
+ 	WARN_ON(!pte_none(*(kmap_pte-idx)));
+-	set_pte(kmap_pte-idx, mk_pte(page, prot));
++#ifdef CONFIG_PREEMPT_RT_FULL
++	current->kmap_pte[type] = pte;
++#endif
++	set_pte(kmap_pte-idx, pte);
+ 	arch_flush_lazy_mmu_mode();
+ 
+ 	return (void *)vaddr;
+Index: linux-stable/arch/x86/mm/iomap_32.c
+===================================================================
+--- linux-stable.orig/arch/x86/mm/iomap_32.c
++++ linux-stable/arch/x86/mm/iomap_32.c
+@@ -56,6 +56,7 @@ EXPORT_SYMBOL_GPL(iomap_free);
+ 
+ void *kmap_atomic_prot_pfn(unsigned long pfn, pgprot_t prot)
+ {
++	pte_t pte = pfn_pte(pfn, prot);
+ 	unsigned long vaddr;
+ 	int idx, type;
+ 
+@@ -64,7 +65,10 @@ void *kmap_atomic_prot_pfn(unsigned long
+ 	type = kmap_atomic_idx_push();
+ 	idx = type + KM_TYPE_NR * smp_processor_id();
+ 	vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx);
+-	set_pte(kmap_pte - idx, pfn_pte(pfn, prot));
++#ifdef CONFIG_PREEMPT_RT_FULL
++	current->kmap_pte[type] = pte;
++#endif
++	set_pte(kmap_pte - idx, pte);
+ 	arch_flush_lazy_mmu_mode();
+ 
+ 	return (void *)vaddr;
+Index: linux-stable/include/linux/highmem.h
+===================================================================
+--- linux-stable.orig/include/linux/highmem.h
++++ linux-stable/include/linux/highmem.h
+@@ -85,32 +85,49 @@ static inline void __kunmap_atomic(void 
+ 
+ #if defined(CONFIG_HIGHMEM) || defined(CONFIG_X86_32)
+ 
++#ifndef CONFIG_PREEMPT_RT_FULL
+ DECLARE_PER_CPU(int, __kmap_atomic_idx);
++#endif
+ 
+ static inline int kmap_atomic_idx_push(void)
+ {
++#ifndef CONFIG_PREEMPT_RT_FULL
+ 	int idx = __this_cpu_inc_return(__kmap_atomic_idx) - 1;
+ 
+-#ifdef CONFIG_DEBUG_HIGHMEM
++# ifdef CONFIG_DEBUG_HIGHMEM
+ 	WARN_ON_ONCE(in_irq() && !irqs_disabled());
+ 	BUG_ON(idx > KM_TYPE_NR);
+-#endif
++# endif
+ 	return idx;
++#else
++	return current->kmap_idx++;
++#endif
+ }
+ 
+ static inline int kmap_atomic_idx(void)
+ {
++#ifndef CONFIG_PREEMPT_RT_FULL
+ 	return __this_cpu_read(__kmap_atomic_idx) - 1;
++#else
++	return current->kmap_idx - 1;
++#endif
+ }
+ 
+ static inline void kmap_atomic_idx_pop(void)
+ {
+-#ifdef CONFIG_DEBUG_HIGHMEM
++#ifndef CONFIG_PREEMPT_RT_FULL
++# ifdef CONFIG_DEBUG_HIGHMEM
+ 	int idx = __this_cpu_dec_return(__kmap_atomic_idx);
+ 
+ 	BUG_ON(idx < 0);
+-#else
++# else
+ 	__this_cpu_dec(__kmap_atomic_idx);
++# endif
++#else
++	current->kmap_idx--;
++# ifdef CONFIG_DEBUG_HIGHMEM
++	BUG_ON(current->kmap_idx < 0);
++# endif
+ #endif
+ }
+ 
+Index: linux-stable/mm/highmem.c
+===================================================================
+--- linux-stable.orig/mm/highmem.c
++++ linux-stable/mm/highmem.c
+@@ -29,10 +29,11 @@
+ #include <linux/kgdb.h>
+ #include <asm/tlbflush.h>
+ 
+-
++#ifndef CONFIG_PREEMPT_RT_FULL
+ #if defined(CONFIG_HIGHMEM) || defined(CONFIG_X86_32)
+ DEFINE_PER_CPU(int, __kmap_atomic_idx);
+ #endif
++#endif
+ 
+ /*
+  * Virtual_count is not a pure "count".
+@@ -47,8 +48,9 @@ DEFINE_PER_CPU(int, __kmap_atomic_idx);
+ unsigned long totalhigh_pages __read_mostly;
+ EXPORT_SYMBOL(totalhigh_pages);
+ 
+-
++#ifndef CONFIG_PREEMPT_RT_FULL
+ EXPORT_PER_CPU_SYMBOL(__kmap_atomic_idx);
++#endif
+ 
+ unsigned int nr_free_highpages (void)
+ {
diff --git a/localversion.patch b/localversion.patch
index cc0efea..3650b62 100644
--- a/localversion.patch
+++ b/localversion.patch
@@ -14,4 +14,4 @@
 --- /dev/null
 +++ linux-stable/localversion-rt
 @@ -0,0 +1 @@
-+-rt28
++-rt29
diff --git a/series b/series
index 35511cd..767e63c 100644
--- a/series
+++ b/series
@@ -631,10 +631,11 @@
 mm-swap-fix-initialization.patch
 wait-simple-rework-for-completions.patch
 completion-use-simple-wait-queues.patch
-
-x86-highmem-make-it-work.patch
-arm-highmem-use-kmap-on-rt.patch
-
 mm-make-pagefault-dis-enable-export-symbol.patch
+
+x86_32-use-kmap-switch-for-non-highmem-as-well.patch
+highmem-rt-store-per-task-ptes-directly.patch
+arm-enable-highmem-for-rt.patch
+
 kconfig-disable-a-few-options-rt.patch
 kconfig-preempt-rt-full.patch
diff --git a/x86-highmem-make-it-work.patch b/x86-highmem-make-it-work.patch
deleted file mode 100644
index 5c8bc94..0000000
--- a/x86-highmem-make-it-work.patch
+++ /dev/null
@@ -1,160 +0,0 @@
-Subject: x86: highmem: Make it work on RT really
-From: Thomas Gleixner <tglx@linutronix.de>
-Date: Tue, 08 Jan 2013 12:50:19 +0100
-
-It had been enabled quite some time, but never really worked.
-
-Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
----
- arch/x86/include/asm/highmem.h |   23 +++++++++++++++++++++++
- arch/x86/mm/highmem_32.c       |   12 ++++++++++++
- include/linux/highmem.h        |    2 ++
- mm/highmem.c                   |   21 +++++++++++++++++----
- 4 files changed, 54 insertions(+), 4 deletions(-)
-
-Index: linux-stable/arch/x86/include/asm/highmem.h
-===================================================================
---- linux-stable.orig/arch/x86/include/asm/highmem.h
-+++ linux-stable/arch/x86/include/asm/highmem.h
-@@ -56,16 +56,39 @@ extern unsigned long highstart_pfn, high
- 
- extern void *kmap_high(struct page *page);
- extern void kunmap_high(struct page *page);
-+extern void *kmap_high_prot(struct page *page, pgprot_t prot);
- 
- void *kmap(struct page *page);
- void kunmap(struct page *page);
- 
-+#ifndef CONFIG_PREEMPT_RT_FULL
- void *kmap_atomic_prot(struct page *page, pgprot_t prot);
- void *kmap_atomic(struct page *page);
- void __kunmap_atomic(void *kvaddr);
- void *kmap_atomic_pfn(unsigned long pfn);
- void *kmap_atomic_prot_pfn(unsigned long pfn, pgprot_t prot);
- struct page *kmap_atomic_to_page(void *ptr);
-+#else
-+void *__kmap_prot(struct page *page, pgprot_t prot);
-+# define kmap_atomic(page)			\
-+	({ pagefault_disable(); kmap(page); })
-+
-+# define kmap_atomic_pfn(pfn)			\
-+	({ pagefault_disable(); kmap(pfn_to_page(pfn)) })
-+
-+# define __kunmap_atomic(kvaddr)		\
-+	do { kunmap(kmap_to_page(kvaddr)); pagefault_enable(); } while(0)
-+
-+# define kmap_atomic_prot(page, prot)		\
-+	({ pagefault_disable(); __kmap_prot(page, prot); })
-+
-+# define kmap_atomic_prot_pfn(pfn, prot)	\
-+	({ pagefault_disable(); __kmap_prot(pfn_to_page(pfn), prot); })
-+
-+# define kmap_atomic_to_page(kvaddr)		\
-+	kmap_to_page(kvaddr)
-+
-+#endif
- 
- #define flush_cache_kmaps()	do { } while (0)
- 
-Index: linux-stable/arch/x86/mm/highmem_32.c
-===================================================================
---- linux-stable.orig/arch/x86/mm/highmem_32.c
-+++ linux-stable/arch/x86/mm/highmem_32.c
-@@ -21,6 +21,17 @@ void kunmap(struct page *page)
- }
- EXPORT_SYMBOL(kunmap);
- 
-+#ifdef CONFIF_PREEMPT_RT_FULL
-+void *__kmap_prot(struct page *page, pgprot_t prot)
-+{
-+	might_sleep();
-+	if (!PageHighMem(page))
-+		return page_address(page);
-+	return kmap_high_prot(page, prot);
-+}
-+#endif
-+
-+#ifndef CONFIG_PREEMPT_RT_FULL
- /*
-  * kmap_atomic/kunmap_atomic is significantly faster than kmap/kunmap because
-  * no global lock is needed and because the kmap code must perform a global TLB
-@@ -115,6 +126,7 @@ struct page *kmap_atomic_to_page(void *p
- 	return pte_page(*pte);
- }
- EXPORT_SYMBOL(kmap_atomic_to_page);
-+#endif
- 
- void __init set_highmem_pages_init(void)
- {
-Index: linux-stable/include/linux/highmem.h
-===================================================================
---- linux-stable.orig/include/linux/highmem.h
-+++ linux-stable/include/linux/highmem.h
-@@ -59,6 +59,8 @@ static inline void *kmap(struct page *pa
- 	return page_address(page);
- }
- 
-+#define __kmap_prot(page, prot)	kmap(page)
-+
- static inline void kunmap(struct page *page)
- {
- }
-Index: linux-stable/mm/highmem.c
-===================================================================
---- linux-stable.orig/mm/highmem.c
-+++ linux-stable/mm/highmem.c
-@@ -157,7 +157,7 @@ void kmap_flush_unused(void)
- 	unlock_kmap();
- }
- 
--static inline unsigned long map_new_virtual(struct page *page)
-+static inline unsigned long map_new_virtual(struct page *page, pgprot_t prot)
- {
- 	unsigned long vaddr;
- 	int count;
-@@ -199,7 +199,7 @@ start:
- 	}
- 	vaddr = PKMAP_ADDR(last_pkmap_nr);
- 	set_pte_at(&init_mm, vaddr,
--		   &(pkmap_page_table[last_pkmap_nr]), mk_pte(page, kmap_prot));
-+		   &(pkmap_page_table[last_pkmap_nr]), mk_pte(page, prot));
- 
- 	pkmap_count[last_pkmap_nr] = 1;
- 	set_page_address(page, (void *)vaddr);
-@@ -215,7 +215,7 @@ start:
-  *
-  * We cannot call this from interrupts, as it may block.
-  */
--void *kmap_high(struct page *page)
-+void *kmap_high_prot(struct page *page, pgprot_t prot)
- {
- 	unsigned long vaddr;
- 
-@@ -226,13 +226,26 @@ void *kmap_high(struct page *page)
- 	lock_kmap();
- 	vaddr = (unsigned long)page_address(page);
- 	if (!vaddr)
--		vaddr = map_new_virtual(page);
-+		vaddr = map_new_virtual(page, prot);
- 	pkmap_count[PKMAP_NR(vaddr)]++;
- 	BUG_ON(pkmap_count[PKMAP_NR(vaddr)] < 2);
- 	unlock_kmap();
- 	return (void*) vaddr;
- }
-+EXPORT_SYMBOL(kmap_high_prot);
- 
-+/**
-+ * kmap_high - map a highmem page into memory
-+ * @page: &struct page to map
-+ *
-+ * Returns the page's virtual memory address.
-+ *
-+ * We cannot call this from interrupts, as it may block.
-+ */
-+void *kmap_high(struct page *page)
-+{
-+	return kmap_high_prot(page, kmap_prot);
-+}
- EXPORT_SYMBOL(kmap_high);
- 
- #ifdef ARCH_NEEDS_KMAP_HIGH_GET
diff --git a/x86_32-use-kmap-switch-for-non-highmem-as-well.patch b/x86_32-use-kmap-switch-for-non-highmem-as-well.patch
new file mode 100644
index 0000000..73d111b
--- /dev/null
+++ b/x86_32-use-kmap-switch-for-non-highmem-as-well.patch
@@ -0,0 +1,44 @@
+Subject: x86/32: Use kmap switch for non highmem as well
+From: Thomas Gleixner <tglx@linutronix.de>
+Date: Wed, 13 Feb 2013 10:59:53 +0100
+
+Even with CONFIG_HIGHMEM=n we need to take care of the "atomic"
+mappings which are installed via iomap_atomic.
+
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+Cc: stable-rt@vger.kernel.org
+---
+ arch/x86/kernel/process_32.c |    2 +-
+ include/linux/sched.h        |    4 +++-
+ 2 files changed, 4 insertions(+), 2 deletions(-)
+
+Index: linux-stable/arch/x86/kernel/process_32.c
+===================================================================
+--- linux-stable.orig/arch/x86/kernel/process_32.c
++++ linux-stable/arch/x86/kernel/process_32.c
+@@ -277,7 +277,7 @@ __switch_to(struct task_struct *prev_p, 
+ 		     task_thread_info(next_p)->flags & _TIF_WORK_CTXSW_NEXT))
+ 		__switch_to_xtra(prev_p, next_p, tss);
+ 
+-#if defined CONFIG_PREEMPT_RT_FULL && defined CONFIG_HIGHMEM
++#ifdef CONFIG_PREEMPT_RT_FULL
+ 	/*
+ 	 * Save @prev's kmap_atomic stack
+ 	 */
+Index: linux-stable/include/linux/sched.h
+===================================================================
+--- linux-stable.orig/include/linux/sched.h
++++ linux-stable/include/linux/sched.h
+@@ -1621,9 +1621,11 @@ struct task_struct {
+ 	int softirq_nestcnt;
+ 	unsigned int softirqs_raised;
+ #endif
+-#if defined CONFIG_PREEMPT_RT_FULL && defined CONFIG_HIGHMEM
++#ifdef CONFIG_PREEMPT_RT_FULL
++# if defined CONFIG_HIGHMEM || defined CONFIG_X86_32
+ 	int kmap_idx;
+ 	pte_t kmap_pte[KM_TYPE_NR];
++# endif
+ #endif
+ 
+ #ifdef CONFIG_DEBUG_PREEMPT