patches-3.6.11-rt31.tar.xz

 *** NOTE ***

     3.6-rt, as of this commit, transitions from development mode to
     sustaining/stable mode (see announce details below) and hence
     this will be the last commit made to this patch repository!

     For further 3.6-rt updates, please see:
	http://git.kernel.org/cgit/linux/kernel/git/rt/linux-stable-rt.git/

 ************

md5sum:
f37a3f5434dab76f9f8ae5cee07fdfdc  patches-3.6.11-rt31.tar.xz

Announce:
 --------------
 Dear RT Folks,

 I'm pleased to announce the 3.6.11-rt31 release.

 Changes since 3.6.11-rt31:

    1) Highmem fixes (Sebastian Siewior)

    2) Futex locking fix

    3) Add a missing timer check (Zhao Hongjiang)

    4) Make simple wait exports EXPORT_SYMBOL. As much as I hate it,
       but we have the policy that we do not break out of tree stuff
       when we move an existing interface to some other internal
       infrastructure. For further information see:
       http://www.youtube.com/watch?v=_36yNWw_07g

    5) irq poll fixup (Paul Gortmaker)

 All credits for this update go to Sebastian Siewior, AKA bigeasy, who
 took up the work to get this out. He's on my companies engineering
 team and I hope you trust him as much as I do.

 This is the last release for the 3.6 kernel from my side. Steven is
 going to take over and maintain it until 3.8-rt is stabilized.

 The delta patch against 3.6.11-rt30 is appended below and can be found
 here:

   http://www.kernel.org/pub/linux/kernel/projects/rt/3.6/incr/patch-3.6.11-rt30-rt31.patch.xz

 The RT patch against 3.6.11 can be found here:

   http://www.kernel.org/pub/linux/kernel/projects/rt/3.6/patch-3.6.11-rt31.patch.xz

 The split quilt queue is available at:

   http://www.kernel.org/pub/linux/kernel/projects/rt/3.6/patches-3.6.11-rt31.tar.xz

 Enjoy,

         tglx
 --------------

Signed-off-by: Paul Gortmaker <paul.gortmaker@windriver.com>
diff --git a/0001-x86-highmem-close-race-between-clear-set-ptes.patch b/0001-x86-highmem-close-race-between-clear-set-ptes.patch
new file mode 100644
index 0000000..bd193cc
--- /dev/null
+++ b/0001-x86-highmem-close-race-between-clear-set-ptes.patch
@@ -0,0 +1,89 @@
+From aae299cf8b01326162cd07e2b9818d2e9b2daa3b Mon Sep 17 00:00:00 2001
+From: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+Date: Mon, 11 Mar 2013 17:08:49 +0100
+Subject: [PATCH 1/6] x86/highmem: close race between clear/set ptes
+
+If the task is interrupted during a kmap_atomic() / kunmap_atomic() (or
+the same code in kmap_atomic_prot_pfn() and its counter part) it may
+race against switch_kmaps() and trigger a false positive warning.
+In kmap_atomic_prot() we first grab a new index via
+kmap_atomic_idx_push() and then check if the slot is already in use.
+
+If we get interrupted after taking the index then switch_kmaps() will
+assume that the index is in use and write the old entry from the
+kmap_pte member. Since __kunmap_atomic() never invalidates this member
+it might write an old entry and now it looks like this entry is already
+in use and a WARN_ON() is seen.
+This patch sets the shadow pte entry to 0 so pte_none() doesn't trigger
+a warning.
+While here, I add a BUG_ON() to kmap_atomic_idx_push() which is also in
+the non-RT case.
+
+Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+---
+ arch/x86/kernel/process_32.c |    3 ++-
+ arch/x86/mm/highmem_32.c     |    3 +++
+ arch/x86/mm/iomap_32.c       |    3 +++
+ include/linux/highmem.h      |    4 +++-
+ 4 files changed, 11 insertions(+), 2 deletions(-)
+
+diff --git a/arch/x86/kernel/process_32.c b/arch/x86/kernel/process_32.c
+index 33e5d14..ebcee60 100644
+--- a/arch/x86/kernel/process_32.c
++++ b/arch/x86/kernel/process_32.c
+@@ -218,7 +218,8 @@ static void switch_kmaps(struct task_struct *prev_p, struct task_struct *next_p)
+ 	for (i = 0; i < next_p->kmap_idx; i++) {
+ 		int idx = i + KM_TYPE_NR * smp_processor_id();
+ 
+-		set_pte(kmap_pte - idx, next_p->kmap_pte[i]);
++		if (!pte_none(next_p->kmap_pte[i]))
++			set_pte(kmap_pte - idx, next_p->kmap_pte[i]);
+ 	}
+ }
+ #else
+diff --git a/arch/x86/mm/highmem_32.c b/arch/x86/mm/highmem_32.c
+index 0935789..6e5ac8b 100644
+--- a/arch/x86/mm/highmem_32.c
++++ b/arch/x86/mm/highmem_32.c
+@@ -91,6 +91,9 @@ void __kunmap_atomic(void *kvaddr)
+ 		 * is a bad idea also, in case the page changes cacheability
+ 		 * attributes or becomes a protected page in a hypervisor.
+ 		 */
++#ifdef CONFIG_PREEMPT_RT_FULL
++		current->kmap_pte[type] = __pte(0);
++#endif
+ 		kpte_clear_flush(kmap_pte-idx, vaddr);
+ 		kmap_atomic_idx_pop();
+ 		arch_flush_lazy_mmu_mode();
+diff --git a/arch/x86/mm/iomap_32.c b/arch/x86/mm/iomap_32.c
+index 4e1d4d5..0c953e3 100644
+--- a/arch/x86/mm/iomap_32.c
++++ b/arch/x86/mm/iomap_32.c
+@@ -114,6 +114,9 @@ iounmap_atomic(void __iomem *kvaddr)
+ 		 * is a bad idea also, in case the page changes cacheability
+ 		 * attributes or becomes a protected page in a hypervisor.
+ 		 */
++#ifdef CONFIG_PREEMPT_RT_FULL
++		current->kmap_pte[type] = __pte(0);
++#endif
+ 		kpte_clear_flush(kmap_pte-idx, vaddr);
+ 		kmap_atomic_idx_pop();
+ 	}
+diff --git a/include/linux/highmem.h b/include/linux/highmem.h
+index acdd321..84223de 100644
+--- a/include/linux/highmem.h
++++ b/include/linux/highmem.h
+@@ -101,7 +101,9 @@ static inline int kmap_atomic_idx_push(void)
+ # endif
+ 	return idx;
+ #else
+-	return current->kmap_idx++;
++	current->kmap_idx++;
++	BUG_ON(current->kmap_idx > KM_TYPE_NR);
++	return current->kmap_idx - 1;
+ #endif
+ }
+ 
+-- 
+1.7.10.4
+
diff --git a/0002-x86-highmem-add-a-already-used-pte-check.patch b/0002-x86-highmem-add-a-already-used-pte-check.patch
new file mode 100644
index 0000000..582c76a
--- /dev/null
+++ b/0002-x86-highmem-add-a-already-used-pte-check.patch
@@ -0,0 +1,28 @@
+From 65513f34449eedb6b84c24a3583266534c1627e4 Mon Sep 17 00:00:00 2001
+From: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+Date: Mon, 11 Mar 2013 17:09:55 +0100
+Subject: [PATCH 2/6] x86/highmem: add a "already used pte" check
+
+This is a copy from kmap_atomic_prot().
+
+Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+---
+ arch/x86/mm/iomap_32.c |    2 ++
+ 1 file changed, 2 insertions(+)
+
+diff --git a/arch/x86/mm/iomap_32.c b/arch/x86/mm/iomap_32.c
+index 0c953e3..62377d6 100644
+--- a/arch/x86/mm/iomap_32.c
++++ b/arch/x86/mm/iomap_32.c
+@@ -65,6 +65,8 @@ void *kmap_atomic_prot_pfn(unsigned long pfn, pgprot_t prot)
+ 	type = kmap_atomic_idx_push();
+ 	idx = type + KM_TYPE_NR * smp_processor_id();
+ 	vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx);
++	WARN_ON(!pte_none(*(kmap_pte - idx)));
++
+ #ifdef CONFIG_PREEMPT_RT_FULL
+ 	current->kmap_pte[type] = pte;
+ #endif
+-- 
+1.7.10.4
+
diff --git a/0003-arm-highmem-flush-tlb-on-unmap.patch b/0003-arm-highmem-flush-tlb-on-unmap.patch
new file mode 100644
index 0000000..04cc016
--- /dev/null
+++ b/0003-arm-highmem-flush-tlb-on-unmap.patch
@@ -0,0 +1,33 @@
+From e2ca4d092d9c6e6b07b465b4d81da207bbcc7437 Mon Sep 17 00:00:00 2001
+From: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+Date: Mon, 11 Mar 2013 21:37:27 +0100
+Subject: [PATCH 3/6] arm/highmem: flush tlb on unmap
+
+The tlb should be flushed on unmap and thus make the mapping entry
+invalid. This is only done in the non-debug case which does not look
+right.
+
+Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+---
+ arch/arm/mm/highmem.c |    2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/arch/arm/mm/highmem.c b/arch/arm/mm/highmem.c
+index a654e0c..c8d6d9a 100644
+--- a/arch/arm/mm/highmem.c
++++ b/arch/arm/mm/highmem.c
+@@ -99,10 +99,10 @@ void __kunmap_atomic(void *kvaddr)
+ 			__cpuc_flush_dcache_area((void *)vaddr, PAGE_SIZE);
+ #ifdef CONFIG_DEBUG_HIGHMEM
+ 		BUG_ON(vaddr != __fix_to_virt(FIX_KMAP_BEGIN + idx));
+-		set_top_pte(vaddr, __pte(0));
+ #else
+ 		(void) idx;  /* to kill a warning */
+ #endif
++		set_top_pte(vaddr, __pte(0));
+ 		kmap_atomic_idx_pop();
+ 	} else if (vaddr >= PKMAP_ADDR(0) && vaddr < PKMAP_ADDR(LAST_PKMAP)) {
+ 		/* this address was obtained through kmap_high_get() */
+-- 
+1.7.10.4
+
diff --git a/0004-arm-highmem-close-race-between-clear-set-pte.patch b/0004-arm-highmem-close-race-between-clear-set-pte.patch
new file mode 100644
index 0000000..d74bb37
--- /dev/null
+++ b/0004-arm-highmem-close-race-between-clear-set-pte.patch
@@ -0,0 +1,44 @@
+From 444eb1a699bacfe6d78b1c2234c754734496ef23 Mon Sep 17 00:00:00 2001
+From: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+Date: Mon, 11 Mar 2013 21:43:53 +0100
+Subject: [PATCH 4/6] arm/highmem: close race between clear/set pte
+
+This patch aims to close the same race in kmap_atomic() /
+kunmap_atomic() vs switch_kmaps() as on x86. In contrast to x86 the
+warning has not been seen and the fix has only been compile tested. The
+pattern is same so the bug should present here as well.
+
+Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+---
+ arch/arm/mm/highmem.c |    8 ++++++--
+ 1 file changed, 6 insertions(+), 2 deletions(-)
+
+diff --git a/arch/arm/mm/highmem.c b/arch/arm/mm/highmem.c
+index c8d6d9a..bd41dd8 100644
+--- a/arch/arm/mm/highmem.c
++++ b/arch/arm/mm/highmem.c
+@@ -97,6 +97,9 @@ void __kunmap_atomic(void *kvaddr)
+ 
+ 		if (cache_is_vivt())
+ 			__cpuc_flush_dcache_area((void *)vaddr, PAGE_SIZE);
++#ifdef CONFIG_PREEMPT_RT_FULL
++		current->kmap_pte[type] = __pte(0);
++#endif
+ #ifdef CONFIG_DEBUG_HIGHMEM
+ 		BUG_ON(vaddr != __fix_to_virt(FIX_KMAP_BEGIN + idx));
+ #else
+@@ -163,8 +166,9 @@ void switch_kmaps(struct task_struct *prev_p, struct task_struct *next_p)
+ 	for (i = 0; i < next_p->kmap_idx; i++) {
+ 		int idx = i + KM_TYPE_NR * smp_processor_id();
+ 
+-		set_top_pte(__fix_to_virt(FIX_KMAP_BEGIN + idx),
+-			    next_p->kmap_pte[i]);
++		if (!pte_none(next_p->kmap_pte[i]))
++			set_top_pte(__fix_to_virt(FIX_KMAP_BEGIN + idx),
++					next_p->kmap_pte[i]);
+ 	}
+ }
+ #endif
+-- 
+1.7.10.4
+
diff --git a/0005-futex-Ensure-lock-unlock-symetry-versus-pi_lock-and-.patch b/0005-futex-Ensure-lock-unlock-symetry-versus-pi_lock-and-.patch
new file mode 100644
index 0000000..2e7fd94
--- /dev/null
+++ b/0005-futex-Ensure-lock-unlock-symetry-versus-pi_lock-and-.patch
@@ -0,0 +1,49 @@
+From eef09918aff670a6162d2ae5fe87b393698ef57d Mon Sep 17 00:00:00 2001
+From: Thomas Gleixner <tglx@linutronix.de>
+Date: Fri, 1 Mar 2013 11:17:42 +0100
+Subject: [PATCH 5/6] futex: Ensure lock/unlock symetry versus pi_lock and
+ hash bucket lock
+
+In exit_pi_state_list() we have the following locking construct:
+
+   spin_lock(&hb->lock);
+   raw_spin_lock_irq(&curr->pi_lock);
+
+   ...
+   spin_unlock(&hb->lock);
+
+In !RT this works, but on RT the migrate_enable() function which is
+called from spin_unlock() sees atomic context due to the held pi_lock
+and just decrements the migrate_disable_atomic counter of the
+task. Now the next call to migrate_disable() sees the counter being
+negative and issues a warning. That check should be in
+migrate_enable() already.
+
+Fix this by dropping pi_lock before unlocking hb->lock and reaquire
+pi_lock after that again. This is safe as the loop code reevaluates
+head again under the pi_lock.
+
+Reported-by: Yong Zhang <yong.zhang@windriver.com>
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+---
+ kernel/futex.c |    2 ++
+ 1 file changed, 2 insertions(+)
+
+diff --git a/kernel/futex.c b/kernel/futex.c
+index 9e26e87..daada3d 100644
+--- a/kernel/futex.c
++++ b/kernel/futex.c
+@@ -568,7 +568,9 @@ void exit_pi_state_list(struct task_struct *curr)
+ 		 * task still owns the PI-state:
+ 		 */
+ 		if (head->next != next) {
++			raw_spin_unlock_irq(&curr->pi_lock);
+ 			spin_unlock(&hb->lock);
++			raw_spin_lock_irq(&curr->pi_lock);
+ 			continue;
+ 		}
+ 
+-- 
+1.7.10.4
+
diff --git a/0006-hrtimer-fix-the-miss-of-hrtimer_peek_ahead_timers-in.patch b/0006-hrtimer-fix-the-miss-of-hrtimer_peek_ahead_timers-in.patch
new file mode 100644
index 0000000..b9fd23a
--- /dev/null
+++ b/0006-hrtimer-fix-the-miss-of-hrtimer_peek_ahead_timers-in.patch
@@ -0,0 +1,90 @@
+From 14359e50a5d86eeed1ce9cac634d5d7cc36486c5 Mon Sep 17 00:00:00 2001
+From: Zhao Hongjiang <zhaohongjiang@huawei.com>
+Date: Fri, 8 Mar 2013 15:03:42 +0800
+Subject: [PATCH 6/6] hrtimer:fix the miss of hrtimer_peek_ahead_timers in
+ nort code
+
+When we run the test "taskset -c 1 stress -m 1 --vm-bytes 50M" and
+"taskset -c 1 cyclictest -t 1 -p 80 -n -m -v" with rt closed 3.4.26-rt38 kernel,
+the cyclictest's result is anomalous as follow:
+       0:     278:       1
+       0:     279:       2
+       0:     280:      17
+       0:     281:    1657
+       0:     282:    1680
+       0:     283:    4678
+       0:     284:    7026
+       0:     285:    6678
+       0:     286:    9677
+       0:     287:   12678
+       0:     288:   13090
+       0:     289:   14678
+       0:     290:   17677
+       0:     291:   20155
+       0:     292:   19678
+       0:     293:   22679
+       0:     294:   25676
+       0:     295:   26218
+       0:     296:   27679
+       0:     297:   30677
+       0:     298:   33283
+       0:     299:   32677
+       0:     300:   35676
+and the result is increase continuously forever. We find that the patch
+("hrtimer: fixup hrtimer callback changes for preempt-rt") has missed
+hrtimer_peek_ahead_timers() in hrtimer_rt_run_pending() fuction in nort
+code. We fix this and the test result isn't increase continuously as same
+as the result with nort 3.4.26 kernel as follow:
+       0:     636:       8
+       0:     637:       7
+       0:     638:       8
+       0:     639:       9
+       0:     640:       7
+       0:     641:       9
+       0:     642:       8
+       0:     643:       8
+       0:     644:       8
+       0:     645:      32
+       0:     646:    8683
+       0:     647:    7760
+       0:     648:    6784
+       0:     649:    5797
+       0:     650:    4810
+       0:     651:    3823
+       0:     652:    2836
+       0:     653:    1849
+       0:     654:     862
+       0:     655:       7
+       0:     656:       7
+       0:     657:       7
+       0:     658:       6
+       0:     659:       8
+       0:     660:       7
+
+Signed-off-by: Zhao Hongjiang <zhaohongjiang@huawei.com>
+Signed-off-by: Weng Meiling <wengmeiling.weng@huawei.com>
+Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+---
+ kernel/hrtimer.c |    6 +++++-
+ 1 file changed, 5 insertions(+), 1 deletion(-)
+
+diff --git a/kernel/hrtimer.c b/kernel/hrtimer.c
+index 0153aec..be493ed 100644
+--- a/kernel/hrtimer.c
++++ b/kernel/hrtimer.c
+@@ -1427,7 +1427,11 @@ static int hrtimer_rt_defer(struct hrtimer *timer)
+ 
+ #else
+ 
+-static inline void hrtimer_rt_run_pending(void) { }
++static inline void hrtimer_rt_run_pending(void)
++{
++	hrtimer_peek_ahead_timers();
++}
++
+ static inline int hrtimer_rt_defer(struct hrtimer *timer) { return 0; }
+ 
+ #endif
+-- 
+1.7.10.4
+
diff --git a/genirq-disable-irqpoll-on-rt.patch b/genirq-disable-irqpoll-on-rt.patch
index 4531923..a28fb81 100644
--- a/genirq-disable-irqpoll-on-rt.patch
+++ b/genirq-disable-irqpoll-on-rt.patch
@@ -21,7 +21,7 @@
  {
 +#ifdef CONFIG_PREEMPT_RT_BASE
 +	printk(KERN_WARNING "irqfixup boot option not supported "
-+		"w/ CONFIG_PREEMPT_RT\n");
++		"w/ CONFIG_PREEMPT_RT_BASE\n");
 +	return 1;
 +#endif
  	irqfixup = 1;
@@ -33,7 +33,7 @@
  {
 +#ifdef CONFIG_PREEMPT_RT_BASE
 +	printk(KERN_WARNING "irqpoll boot option not supported "
-+		"w/ CONFIG_PREEMPT_RT\n");
++		"w/ CONFIG_PREEMPT_RT_BASE\n");
 +	return 1;
 +#endif
  	irqfixup = 2;
diff --git a/kconfig-preempt-rt-full.patch b/kconfig-preempt-rt-full.patch
index ca7261d..92fc433 100644
--- a/kconfig-preempt-rt-full.patch
+++ b/kconfig-preempt-rt-full.patch
@@ -23,7 +23,7 @@
 ===================================================================
 --- linux-stable.orig/kernel/Kconfig.preempt
 +++ linux-stable/kernel/Kconfig.preempt
-@@ -73,6 +73,13 @@ config PREEMPT_RTB
+@@ -73,6 +73,14 @@ config PREEMPT_RTB
  	  enables changes which are preliminary for the full preemptiple
  	  RT kernel.
  
@@ -31,6 +31,7 @@
 +	bool "Fully Preemptible Kernel (RT)"
 +	depends on IRQ_FORCED_THREADING
 +	select PREEMPT_RT_BASE
++	select PREEMPT_RCU
 +	help
 +	  All and everything
 +
diff --git a/localversion.patch b/localversion.patch
index b0fb40f..9996ceb 100644
--- a/localversion.patch
+++ b/localversion.patch
@@ -9,9 +9,7 @@
  localversion-rt |    1 +
  1 file changed, 1 insertion(+)
 
-Index: linux-stable/localversion-rt
-===================================================================
 --- /dev/null
-+++ linux-stable/localversion-rt
++++ b/localversion-rt
 @@ -0,0 +1 @@
-+-rt30
++-rt31
diff --git a/rcu-force-preempt-rcu-for-rt.patch b/rcu-force-preempt-rcu-for-rt.patch
deleted file mode 100644
index 5de6462..0000000
--- a/rcu-force-preempt-rcu-for-rt.patch
+++ /dev/null
@@ -1,28 +0,0 @@
-Subject: RCU: Force PREEMPT_RCU for PREEMPT-RT
-From: Ingo Molnar <mingo@elte.hu>
-Date: Fri, 3 Jul 2009 08:30:30 -0500
-
-PREEMPT_RT relies on PREEMPT_RCU - only allow RCU to be configured
-interactively in the !PREEMPT_RT case.
-
-Signed-off-by: Ingo Molnar <mingo@elte.hu>
-Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
-
-Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl>
-Link: http://lkml.kernel.org/n/tip-j1y0phicu6s6pu8guku2vca0@git.kernel.org
----
- init/Kconfig |    1 -
- 1 file changed, 1 deletion(-)
-
-Index: linux-stable/init/Kconfig
-===================================================================
---- linux-stable.orig/init/Kconfig
-+++ linux-stable/init/Kconfig
-@@ -806,7 +806,6 @@ config RT_GROUP_SCHED
- 	bool "Group scheduling for SCHED_RR/FIFO"
- 	depends on EXPERIMENTAL
- 	depends on CGROUP_SCHED
--	depends on !PREEMPT_RT_FULL
- 	default n
- 	help
- 	  This feature lets you explicitly allocate real CPU bandwidth
diff --git a/rt-disable-rt-group-sched.patch b/rt-disable-rt-group-sched.patch
deleted file mode 100644
index 418dd9d..0000000
--- a/rt-disable-rt-group-sched.patch
+++ /dev/null
@@ -1,29 +0,0 @@
-From f9e7eb3419db82245b3396074c137b687b42df06 Mon Sep 17 00:00:00 2001
-From: Carsten Emde <C.Emde@osadl.org>
-Date: Wed, 11 Jul 2012 22:05:18 +0000
-Subject: Disable RT_GROUP_SCHED in PREEMPT_RT_FULL
-
-Strange CPU stalls have been observed in RT when RT_GROUP_SCHED
-was configured.
-
-Disable it for now.
-
-Signed-off-by: Carsten Emde <C.Emde@osadl.org>
-Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
-
----
- init/Kconfig |    1 +
- 1 file changed, 1 insertion(+)
-
-Index: linux-stable/init/Kconfig
-===================================================================
---- linux-stable.orig/init/Kconfig
-+++ linux-stable/init/Kconfig
-@@ -806,6 +806,7 @@ config RT_GROUP_SCHED
- 	bool "Group scheduling for SCHED_RR/FIFO"
- 	depends on EXPERIMENTAL
- 	depends on CGROUP_SCHED
-+	depends on !PREEMPT_RT_FULL
- 	default n
- 	help
- 	  This feature lets you explicitly allocate real CPU bandwidth
diff --git a/series b/series
index d45a993..542097e 100644
--- a/series
+++ b/series
@@ -330,6 +330,7 @@
 # HRTIMERS
 hrtimers-prepare-full-preemption.patch
 hrtimer-fixup-hrtimer-callback-changes-for-preempt-r.patch
+0006-hrtimer-fix-the-miss-of-hrtimer_peek_ahead_timers-in.patch
 peter_zijlstra-frob-hrtimer.patch
 hrtimer-add-missing-debug_activate-aid.patch
 hrtimer-fix-reprogram-madness.patch
@@ -435,7 +436,6 @@
 timer.c-fix-build-fail-for-RT_FULL.patch
 
 # RCU
-rcu-force-preempt-rcu-for-rt.patch
 peter_zijlstra-frob-rcu.patch
 rcu-merge-rcu-bh-into-rcu-preempt-for-rt.patch
 rcu-tiny-merge-bh.patch
@@ -570,7 +570,6 @@
 lockdep-selftest-convert-spinlock-to-raw-spinlock.patch
 lockdep-selftest-only-do-hardirq-context-test-for-raw-spinlock.patch
 
-rt-disable-rt-group-sched.patch
 fs-jbd-pull-plug-when-waiting-for-space.patch
 perf-make-swevent-hrtimer-irqsafe.patch
 cpu-rt-rework-cpu-down.patch
@@ -635,10 +634,17 @@
 
 x86_32-use-kmap-switch-for-non-highmem-as-well.patch
 highmem-rt-store-per-task-ptes-directly.patch
+0001-x86-highmem-close-race-between-clear-set-ptes.patch
+0002-x86-highmem-add-a-already-used-pte-check.patch
 arm-enable-highmem-for-rt.patch
+0003-arm-highmem-flush-tlb-on-unmap.patch
+0004-arm-highmem-close-race-between-clear-set-pte.patch
 acpi-rt-convert-acpi-lock-back-to-a-raw_spinlock_t.patch
 
 fix-rq-3elock-vs-logbuf_lock-unlock-race.patch
 serial-imx-fix-recursive-locking-bug.patch
+0005-futex-Ensure-lock-unlock-symetry-versus-pi_lock-and-.patch
+
 kconfig-disable-a-few-options-rt.patch
 kconfig-preempt-rt-full.patch
+
diff --git a/wait-simple-implementation.patch b/wait-simple-implementation.patch
index 1b7745e..f1420ec 100644
--- a/wait-simple-implementation.patch
+++ b/wait-simple-implementation.patch
@@ -266,7 +266,7 @@
 +	lockdep_set_class(&head->lock, key);
 +	INIT_LIST_HEAD(&head->list);
 +}
-+EXPORT_SYMBOL_GPL(__init_swait_head);
++EXPORT_SYMBOL(__init_swait_head);
 +
 +void swait_prepare(struct swait_head *head, struct swaiter *w, int state)
 +{
@@ -279,7 +279,7 @@
 +	set_current_state(state);
 +	raw_spin_unlock_irqrestore(&head->lock, flags);
 +}
-+EXPORT_SYMBOL_GPL(swait_prepare);
++EXPORT_SYMBOL(swait_prepare);
 +
 +void swait_finish(struct swait_head *head, struct swaiter *w)
 +{
@@ -292,7 +292,7 @@
 +		raw_spin_unlock_irqrestore(&head->lock, flags);
 +	}
 +}
-+EXPORT_SYMBOL_GPL(swait_finish);
++EXPORT_SYMBOL(swait_finish);
 +
 +int __swait_wake(struct swait_head *head, unsigned int state)
 +{
@@ -313,4 +313,4 @@
 +	raw_spin_unlock_irqrestore(&head->lock, flags);
 +	return woken;
 +}
-+EXPORT_SYMBOL_GPL(__swait_wake);
++EXPORT_SYMBOL(__swait_wake);
diff --git a/wait-simple-rework-for-completions.patch b/wait-simple-rework-for-completions.patch
index d029142..b8600cb 100644
--- a/wait-simple-rework-for-completions.patch
+++ b/wait-simple-rework-for-completions.patch
@@ -128,7 +128,7 @@
  	raw_spin_lock_init(&head->lock);
 @@ -20,19 +38,31 @@ void __init_swait_head(struct swait_head
  }
- EXPORT_SYMBOL_GPL(__init_swait_head);
+ EXPORT_SYMBOL(__init_swait_head);
  
 +void swait_prepare_locked(struct swait_head *head, struct swaiter *w)
 +{
@@ -150,7 +150,7 @@
 +	__set_current_state(state);
  	raw_spin_unlock_irqrestore(&head->lock, flags);
  }
- EXPORT_SYMBOL_GPL(swait_prepare);
+ EXPORT_SYMBOL(swait_prepare);
  
 +void swait_finish_locked(struct swait_head *head, struct swaiter *w)
 +{
@@ -164,7 +164,7 @@
  	unsigned long flags;
 @@ -46,22 +76,43 @@ void swait_finish(struct swait_head *hea
  }
- EXPORT_SYMBOL_GPL(swait_finish);
+ EXPORT_SYMBOL(swait_finish);
  
 -int __swait_wake(struct swait_head *head, unsigned int state)
 +unsigned int