[ANNOUNCE] v4.9-rt1

Dear RT folks!

I'm pleased to announce the v4.9-rt1 patch set.

Please don't download and boots this before Christmas Eve.

Changes since v4.8.15-rt10

  - rebase to v4.9

Known issues
	- CPU hotplug got a little better but can deadlock.

You can get this release via the git tree at:

    git://git.kernel.org/pub/scm/linux/kernel/git/rt/linux-rt-devel.git v4.9-rt1

The RT patch against v4.9 can be found here:

    https://cdn.kernel.org/pub/linux/kernel/projects/rt/4.9/older/patch-4.9-rt1.patch.xz

The split quilt queue is available at:

    https://cdn.kernel.org/pub/linux/kernel/projects/rt/4.9/older/patches-4.9-rt1.tar.xz

Sebastian

Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
diff --git a/patches/HACK-printk-drop-the-logbuf_lock-more-often.patch b/patches/HACK-printk-drop-the-logbuf_lock-more-often.patch
index 0b9be8c..9d1b301 100644
--- a/patches/HACK-printk-drop-the-logbuf_lock-more-often.patch
+++ b/patches/HACK-printk-drop-the-logbuf_lock-more-often.patch
@@ -12,7 +12,7 @@
 
 --- a/kernel/printk/printk.c
 +++ b/kernel/printk/printk.c
-@@ -1399,6 +1399,7 @@ static int syslog_print_all(char __user
+@@ -1396,6 +1396,7 @@ static int syslog_print_all(char __user
  {
  	char *text;
  	int len = 0;
@@ -20,7 +20,7 @@
  
  	text = kmalloc(LOG_LINE_MAX + PREFIX_MAX, GFP_KERNEL);
  	if (!text)
-@@ -1410,6 +1411,14 @@ static int syslog_print_all(char __user
+@@ -1407,6 +1408,14 @@ static int syslog_print_all(char __user
  		u64 seq;
  		u32 idx;
  		enum log_flags prev;
@@ -35,7 +35,7 @@
  
  		/*
  		 * Find first record that fits, including all following records,
-@@ -1425,6 +1434,14 @@ static int syslog_print_all(char __user
+@@ -1422,6 +1431,14 @@ static int syslog_print_all(char __user
  			prev = msg->flags;
  			idx = log_next(idx);
  			seq++;
@@ -50,7 +50,7 @@
  		}
  
  		/* move first record forward until length fits into the buffer */
-@@ -1438,6 +1455,14 @@ static int syslog_print_all(char __user
+@@ -1435,6 +1452,14 @@ static int syslog_print_all(char __user
  			prev = msg->flags;
  			idx = log_next(idx);
  			seq++;
@@ -65,7 +65,7 @@
  		}
  
  		/* last message fitting into this dump */
-@@ -1478,6 +1503,7 @@ static int syslog_print_all(char __user
+@@ -1475,6 +1500,7 @@ static int syslog_print_all(char __user
  		clear_seq = log_next_seq;
  		clear_idx = log_next_idx;
  	}
diff --git a/patches/KVM-arm-arm64-downgrade-preempt_disable-d-region-to-.patch b/patches/KVM-arm-arm64-downgrade-preempt_disable-d-region-to-.patch
index 202beba..a8bb2ec 100644
--- a/patches/KVM-arm-arm64-downgrade-preempt_disable-d-region-to-.patch
+++ b/patches/KVM-arm-arm64-downgrade-preempt_disable-d-region-to-.patch
@@ -22,7 +22,7 @@
 
 --- a/arch/arm/kvm/arm.c
 +++ b/arch/arm/kvm/arm.c
-@@ -584,7 +584,7 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_v
+@@ -619,7 +619,7 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_v
  		 * involves poking the GIC, which must be done in a
  		 * non-preemptible context.
  		 */
@@ -31,7 +31,7 @@
  		kvm_pmu_flush_hwstate(vcpu);
  		kvm_timer_flush_hwstate(vcpu);
  		kvm_vgic_flush_hwstate(vcpu);
-@@ -605,7 +605,7 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_v
+@@ -640,7 +640,7 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_v
  			kvm_pmu_sync_hwstate(vcpu);
  			kvm_timer_sync_hwstate(vcpu);
  			kvm_vgic_sync_hwstate(vcpu);
@@ -40,7 +40,7 @@
  			continue;
  		}
  
-@@ -661,7 +661,7 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_v
+@@ -696,7 +696,7 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_v
  
  		kvm_vgic_sync_hwstate(vcpu);
  
diff --git a/patches/KVM-lapic-mark-LAPIC-timer-handler-as-irqsafe.patch b/patches/KVM-lapic-mark-LAPIC-timer-handler-as-irqsafe.patch
index 26032cc..9ccfeb4 100644
--- a/patches/KVM-lapic-mark-LAPIC-timer-handler-as-irqsafe.patch
+++ b/patches/KVM-lapic-mark-LAPIC-timer-handler-as-irqsafe.patch
@@ -15,7 +15,7 @@
 
 --- a/arch/x86/kvm/lapic.c
 +++ b/arch/x86/kvm/lapic.c
-@@ -1938,6 +1938,7 @@ int kvm_create_lapic(struct kvm_vcpu *vc
+@@ -1939,6 +1939,7 @@ int kvm_create_lapic(struct kvm_vcpu *vc
  	hrtimer_init(&apic->lapic_timer.timer, CLOCK_MONOTONIC,
  		     HRTIMER_MODE_ABS_PINNED);
  	apic->lapic_timer.timer.function = apic_timer_fn;
diff --git a/patches/NFSv4-replace-seqcount_t-with-a-seqlock_t.patch b/patches/NFSv4-replace-seqcount_t-with-a-seqlock_t.patch
index 97d1170..bbd96e1 100644
--- a/patches/NFSv4-replace-seqcount_t-with-a-seqlock_t.patch
+++ b/patches/NFSv4-replace-seqcount_t-with-a-seqlock_t.patch
@@ -46,7 +46,7 @@
  		put_nfs_open_context(ctx);
 --- a/fs/nfs/nfs4_fs.h
 +++ b/fs/nfs/nfs4_fs.h
-@@ -107,7 +107,7 @@ struct nfs4_state_owner {
+@@ -111,7 +111,7 @@ struct nfs4_state_owner {
  	unsigned long	     so_flags;
  	struct list_head     so_states;
  	struct nfs_seqid_counter so_seqid;
@@ -57,7 +57,7 @@
  
 --- a/fs/nfs/nfs4proc.c
 +++ b/fs/nfs/nfs4proc.c
-@@ -2525,7 +2525,7 @@ static int _nfs4_open_and_get_state(stru
+@@ -2697,7 +2697,7 @@ static int _nfs4_open_and_get_state(stru
  	unsigned int seq;
  	int ret;
  
@@ -66,7 +66,7 @@
  
  	ret = _nfs4_proc_open(opendata);
  	if (ret != 0)
-@@ -2561,7 +2561,7 @@ static int _nfs4_open_and_get_state(stru
+@@ -2735,7 +2735,7 @@ static int _nfs4_open_and_get_state(stru
  	ctx->state = state;
  	if (d_inode(dentry) == state->inode) {
  		nfs_inode_attach_open_context(ctx);
@@ -86,7 +86,7 @@
  	mutex_init(&sp->so_delegreturn_mutex);
  	return sp;
  }
-@@ -1459,8 +1459,12 @@ static int nfs4_reclaim_open_state(struc
+@@ -1497,8 +1497,12 @@ static int nfs4_reclaim_open_state(struc
  	 * recovering after a network partition or a reboot from a
  	 * server that doesn't support a grace period.
  	 */
@@ -100,7 +100,7 @@
  restart:
  	list_for_each_entry(state, &sp->so_states, open_states) {
  		if (!test_and_clear_bit(ops->state_flag_bit, &state->flags))
-@@ -1528,14 +1532,20 @@ static int nfs4_reclaim_open_state(struc
+@@ -1567,14 +1571,20 @@ static int nfs4_reclaim_open_state(struc
  		spin_lock(&sp->so_lock);
  		goto restart;
  	}
diff --git a/patches/acpi-rt-Convert-acpi_gbl_hardware-lock-back-to-a-raw.patch b/patches/acpi-rt-Convert-acpi_gbl_hardware-lock-back-to-a-raw.patch
index f374772..172e6cd 100644
--- a/patches/acpi-rt-Convert-acpi_gbl_hardware-lock-back-to-a-raw.patch
+++ b/patches/acpi-rt-Convert-acpi_gbl_hardware-lock-back-to-a-raw.patch
@@ -142,7 +142,7 @@
  	/* Delete the reader/writer lock */
 --- a/include/acpi/platform/aclinux.h
 +++ b/include/acpi/platform/aclinux.h
-@@ -131,6 +131,7 @@
+@@ -133,6 +133,7 @@
  
  #define acpi_cache_t                        struct kmem_cache
  #define acpi_spinlock                       spinlock_t *
@@ -150,7 +150,7 @@
  #define acpi_cpu_flags                      unsigned long
  
  /* Use native linux version of acpi_os_allocate_zeroed */
-@@ -149,6 +150,20 @@
+@@ -151,6 +152,20 @@
  #define ACPI_USE_ALTERNATE_PROTOTYPE_acpi_os_get_thread_id
  #define ACPI_USE_ALTERNATE_PROTOTYPE_acpi_os_create_lock
  
diff --git a/patches/arch-arm64-Add-lazy-preempt-support.patch b/patches/arch-arm64-Add-lazy-preempt-support.patch
index 34de1d1..596f6f2 100644
--- a/patches/arch-arm64-Add-lazy-preempt-support.patch
+++ b/patches/arch-arm64-Add-lazy-preempt-support.patch
@@ -12,14 +12,15 @@
 Signed-off-by: Anders Roxell <anders.roxell@linaro.org>
 ---
  arch/arm64/Kconfig                   |    1 +
- arch/arm64/include/asm/thread_info.h |    6 +++++-
+ arch/arm64/include/asm/thread_info.h |    7 ++++++-
  arch/arm64/kernel/asm-offsets.c      |    1 +
- arch/arm64/kernel/entry.S            |   13 ++++++++++---
- 4 files changed, 17 insertions(+), 4 deletions(-)
+ arch/arm64/kernel/entry.S            |   12 +++++++++---
+ arch/arm64/kernel/signal.c           |    2 +-
+ 5 files changed, 18 insertions(+), 5 deletions(-)
 
 --- a/arch/arm64/Kconfig
 +++ b/arch/arm64/Kconfig
-@@ -90,6 +90,7 @@ config ARM64
+@@ -91,6 +91,7 @@ config ARM64
  	select HAVE_PERF_EVENTS
  	select HAVE_PERF_REGS
  	select HAVE_PERF_USER_STACK_DUMP
@@ -37,7 +38,7 @@
  	int			cpu;		/* cpu */
  };
  
-@@ -109,6 +110,7 @@ static inline struct thread_info *curren
+@@ -112,6 +113,7 @@ static inline struct thread_info *curren
  #define TIF_NEED_RESCHED	1
  #define TIF_NOTIFY_RESUME	2	/* callback before returning to user */
  #define TIF_FOREIGN_FPSTATE	3	/* CPU's FP state is not current's */
@@ -45,7 +46,7 @@
  #define TIF_NOHZ		7
  #define TIF_SYSCALL_TRACE	8
  #define TIF_SYSCALL_AUDIT	9
-@@ -124,6 +126,7 @@ static inline struct thread_info *curren
+@@ -127,6 +129,7 @@ static inline struct thread_info *curren
  #define _TIF_NEED_RESCHED	(1 << TIF_NEED_RESCHED)
  #define _TIF_NOTIFY_RESUME	(1 << TIF_NOTIFY_RESUME)
  #define _TIF_FOREIGN_FPSTATE	(1 << TIF_FOREIGN_FPSTATE)
@@ -53,19 +54,20 @@
  #define _TIF_NOHZ		(1 << TIF_NOHZ)
  #define _TIF_SYSCALL_TRACE	(1 << TIF_SYSCALL_TRACE)
  #define _TIF_SYSCALL_AUDIT	(1 << TIF_SYSCALL_AUDIT)
-@@ -132,7 +135,8 @@ static inline struct thread_info *curren
+@@ -135,7 +138,9 @@ static inline struct thread_info *curren
  #define _TIF_32BIT		(1 << TIF_32BIT)
  
  #define _TIF_WORK_MASK		(_TIF_NEED_RESCHED | _TIF_SIGPENDING | \
 -				 _TIF_NOTIFY_RESUME | _TIF_FOREIGN_FPSTATE)
 +				 _TIF_NOTIFY_RESUME | _TIF_FOREIGN_FPSTATE | \
 +				 _TIF_NEED_RESCHED_LAZY)
++#define _TIF_NEED_RESCHED_MASK	(_TIF_NEED_RESCHED | _TIF_NEED_RESCHED_LAZY)
  
  #define _TIF_SYSCALL_WORK	(_TIF_SYSCALL_TRACE | _TIF_SYSCALL_AUDIT | \
  				 _TIF_SYSCALL_TRACEPOINT | _TIF_SECCOMP | \
 --- a/arch/arm64/kernel/asm-offsets.c
 +++ b/arch/arm64/kernel/asm-offsets.c
-@@ -37,6 +37,7 @@ int main(void)
+@@ -38,6 +38,7 @@ int main(void)
    BLANK();
    DEFINE(TI_FLAGS,		offsetof(struct thread_info, flags));
    DEFINE(TI_PREEMPT,		offsetof(struct thread_info, preempt_count));
@@ -75,7 +77,7 @@
    DEFINE(TI_CPU,		offsetof(struct thread_info, cpu));
 --- a/arch/arm64/kernel/entry.S
 +++ b/arch/arm64/kernel/entry.S
-@@ -434,11 +434,16 @@ ENDPROC(el1_sync)
+@@ -428,11 +428,16 @@ ENDPROC(el1_sync)
  
  #ifdef CONFIG_PREEMPT
  	ldr	w24, [tsk, #TI_PREEMPT]		// get preempt count
@@ -95,7 +97,7 @@
  #endif
  #ifdef CONFIG_TRACE_IRQFLAGS
  	bl	trace_hardirqs_on
-@@ -452,6 +457,7 @@ ENDPROC(el1_irq)
+@@ -446,6 +451,7 @@ ENDPROC(el1_irq)
  1:	bl	preempt_schedule_irq		// irq en/disable is done inside
  	ldr	x0, [tsk, #TI_FLAGS]		// get new tasks TI_FLAGS
  	tbnz	x0, #TIF_NEED_RESCHED, 1b	// needs rescheduling?
@@ -103,11 +105,14 @@
  	ret	x24
  #endif
  
-@@ -708,6 +714,7 @@ ENDPROC(cpu_switch_to)
-  */
- work_pending:
- 	tbnz	x1, #TIF_NEED_RESCHED, work_resched
-+	tbnz	x1, #TIF_NEED_RESCHED_LAZY, work_resched
- 	/* TIF_SIGPENDING, TIF_NOTIFY_RESUME or TIF_FOREIGN_FPSTATE case */
- 	mov	x0, sp				// 'regs'
- 	enable_irq				// enable interrupts for do_notify_resume()
+--- a/arch/arm64/kernel/signal.c
++++ b/arch/arm64/kernel/signal.c
+@@ -409,7 +409,7 @@ asmlinkage void do_notify_resume(struct
+ 	 */
+ 	trace_hardirqs_off();
+ 	do {
+-		if (thread_flags & _TIF_NEED_RESCHED) {
++		if (thread_flags & _TIF_NEED_RESCHED_MASK) {
+ 			schedule();
+ 		} else {
+ 			local_irq_enable();
diff --git a/patches/arm-at91-pit-remove-irq-handler-when-clock-is-unused.patch b/patches/arm-at91-pit-remove-irq-handler-when-clock-is-unused.patch
index 8606755..0c5f9d4 100644
--- a/patches/arm-at91-pit-remove-irq-handler-when-clock-is-unused.patch
+++ b/patches/arm-at91-pit-remove-irq-handler-when-clock-is-unused.patch
@@ -44,7 +44,7 @@
  
  	/* update clocksource counter */
  	data->cnt += data->cycle * PIT_PICNT(pit_read(data->base, AT91_PIT_PIVR));
-@@ -211,15 +220,6 @@ static int __init at91sam926x_pit_common
+@@ -230,15 +239,6 @@ static int __init at91sam926x_pit_dt_ini
  		return ret;
  	}
  
diff --git a/patches/arm-include-definition-for-cpumask_t.patch b/patches/arm-include-definition-for-cpumask_t.patch
new file mode 100644
index 0000000..3750303
--- /dev/null
+++ b/patches/arm-include-definition-for-cpumask_t.patch
@@ -0,0 +1,24 @@
+From: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+Date: Thu, 22 Dec 2016 17:28:33 +0100
+Subject: [PATCH] arm: include definition for cpumask_t
+
+This definition gets pulled in by other files. With the (later) split of
+RCU and spinlock.h it won't compile anymore.
+The split is done in ("rbtree: don't include the rcu header").
+
+Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+---
+ arch/arm/include/asm/irq.h |    2 ++
+ 1 file changed, 2 insertions(+)
+
+--- a/arch/arm/include/asm/irq.h
++++ b/arch/arm/include/asm/irq.h
+@@ -22,6 +22,8 @@
+ #endif
+ 
+ #ifndef __ASSEMBLY__
++#include <linux/cpumask.h>
++
+ struct irqaction;
+ struct pt_regs;
+ extern void migrate_irqs(void);
diff --git a/patches/arm64-xen--Make-XEN-depend-on-non-rt.patch b/patches/arm64-xen--Make-XEN-depend-on-non-rt.patch
index 9761860..f156168 100644
--- a/patches/arm64-xen--Make-XEN-depend-on-non-rt.patch
+++ b/patches/arm64-xen--Make-XEN-depend-on-non-rt.patch
@@ -12,7 +12,7 @@
 
 --- a/arch/arm64/Kconfig
 +++ b/arch/arm64/Kconfig
-@@ -689,7 +689,7 @@ config XEN_DOM0
+@@ -694,7 +694,7 @@ config XEN_DOM0
  
  config XEN
  	bool "Xen guest support on ARM64"
diff --git a/patches/blk-mq-revert-raw-locks-post-pone-notifier-to-POST_D.patchto-POST_D.patch b/patches/blk-mq-revert-raw-locks-post-pone-notifier-to-POST_D.patchto-POST_D.patch
deleted file mode 100644
index 55bbfbd..0000000
--- a/patches/blk-mq-revert-raw-locks-post-pone-notifier-to-POST_D.patchto-POST_D.patch
+++ /dev/null
@@ -1,83 +0,0 @@
-From: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
-Date: Sat, 3 May 2014 11:00:29 +0200
-Subject: blk-mq: revert raw locks, post pone notifier to POST_DEAD
-
-The blk_mq_cpu_notify_lock should be raw because some CPU down levels
-are called with interrupts off. The notifier itself calls currently one
-function that is blk_mq_hctx_notify().
-That function acquires the ctx->lock lock which is sleeping and I would
-prefer to keep it that way. That function only moves IO-requests from
-the CPU that is going offline to another CPU and it is currently the
-only one. Therefore I revert the list lock back to sleeping spinlocks
-and let the notifier run at POST_DEAD time.
-
-Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
----
- block/blk-mq-cpu.c |   17 ++++++++++-------
- block/blk-mq.c     |    2 +-
- 2 files changed, 11 insertions(+), 8 deletions(-)
-
---- a/block/blk-mq-cpu.c
-+++ b/block/blk-mq-cpu.c
-@@ -16,7 +16,7 @@
- #include "blk-mq.h"
- 
- static LIST_HEAD(blk_mq_cpu_notify_list);
--static DEFINE_RAW_SPINLOCK(blk_mq_cpu_notify_lock);
-+static DEFINE_SPINLOCK(blk_mq_cpu_notify_lock);
- 
- static int blk_mq_main_cpu_notify(struct notifier_block *self,
- 				  unsigned long action, void *hcpu)
-@@ -25,7 +25,10 @@ static int blk_mq_main_cpu_notify(struct
- 	struct blk_mq_cpu_notifier *notify;
- 	int ret = NOTIFY_OK;
- 
--	raw_spin_lock(&blk_mq_cpu_notify_lock);
-+	if (action != CPU_POST_DEAD)
-+		return NOTIFY_OK;
-+
-+	spin_lock(&blk_mq_cpu_notify_lock);
- 
- 	list_for_each_entry(notify, &blk_mq_cpu_notify_list, list) {
- 		ret = notify->notify(notify->data, action, cpu);
-@@ -33,7 +36,7 @@ static int blk_mq_main_cpu_notify(struct
- 			break;
- 	}
- 
--	raw_spin_unlock(&blk_mq_cpu_notify_lock);
-+	spin_unlock(&blk_mq_cpu_notify_lock);
- 	return ret;
- }
- 
-@@ -41,16 +44,16 @@ void blk_mq_register_cpu_notifier(struct
- {
- 	BUG_ON(!notifier->notify);
- 
--	raw_spin_lock(&blk_mq_cpu_notify_lock);
-+	spin_lock(&blk_mq_cpu_notify_lock);
- 	list_add_tail(&notifier->list, &blk_mq_cpu_notify_list);
--	raw_spin_unlock(&blk_mq_cpu_notify_lock);
-+	spin_unlock(&blk_mq_cpu_notify_lock);
- }
- 
- void blk_mq_unregister_cpu_notifier(struct blk_mq_cpu_notifier *notifier)
- {
--	raw_spin_lock(&blk_mq_cpu_notify_lock);
-+	spin_lock(&blk_mq_cpu_notify_lock);
- 	list_del(&notifier->list);
--	raw_spin_unlock(&blk_mq_cpu_notify_lock);
-+	spin_unlock(&blk_mq_cpu_notify_lock);
- }
- 
- void blk_mq_init_cpu_notifier(struct blk_mq_cpu_notifier *notifier,
---- a/block/blk-mq.c
-+++ b/block/blk-mq.c
-@@ -1687,7 +1687,7 @@ static int blk_mq_hctx_notify(void *data
- {
- 	struct blk_mq_hw_ctx *hctx = data;
- 
--	if (action == CPU_DEAD || action == CPU_DEAD_FROZEN)
-+	if (action == CPU_POST_DEAD)
- 		return blk_mq_hctx_cpu_offline(hctx, cpu);
- 
- 	/*
diff --git a/patches/block-blk-mq-use-swait.patch b/patches/block-blk-mq-use-swait.patch
index a12466d..a366d69 100644
--- a/patches/block-blk-mq-use-swait.patch
+++ b/patches/block-blk-mq-use-swait.patch
@@ -74,7 +74,7 @@
  	 * Init percpu_ref in atomic mode so that it's faster to shutdown.
 --- a/block/blk-mq.c
 +++ b/block/blk-mq.c
-@@ -92,7 +92,7 @@ EXPORT_SYMBOL_GPL(blk_mq_freeze_queue_st
+@@ -72,7 +72,7 @@ EXPORT_SYMBOL_GPL(blk_mq_freeze_queue_st
  
  static void blk_mq_freeze_queue_wait(struct request_queue *q)
  {
@@ -83,7 +83,7 @@
  }
  
  /*
-@@ -130,7 +130,7 @@ void blk_mq_unfreeze_queue(struct reques
+@@ -110,7 +110,7 @@ void blk_mq_unfreeze_queue(struct reques
  	WARN_ON_ONCE(freeze_depth < 0);
  	if (!freeze_depth) {
  		percpu_ref_reinit(&q->q_usage_counter);
@@ -92,7 +92,7 @@
  	}
  }
  EXPORT_SYMBOL_GPL(blk_mq_unfreeze_queue);
-@@ -149,7 +149,7 @@ void blk_mq_wake_waiters(struct request_
+@@ -129,7 +129,7 @@ void blk_mq_wake_waiters(struct request_
  	 * dying, we need to ensure that processes currently waiting on
  	 * the queue are notified as well.
  	 */
diff --git a/patches/block-mq-don-t-complete-requests-via-IPI.patch b/patches/block-mq-don-t-complete-requests-via-IPI.patch
index ef9764e..e3f65ac 100644
--- a/patches/block-mq-don-t-complete-requests-via-IPI.patch
+++ b/patches/block-mq-don-t-complete-requests-via-IPI.patch
@@ -9,9 +9,9 @@
 ---
  block/blk-core.c       |    3 +++
  block/blk-mq.c         |   20 ++++++++++++++++++++
- include/linux/blk-mq.h |    1 +
+ include/linux/blk-mq.h |    2 +-
  include/linux/blkdev.h |    1 +
- 4 files changed, 25 insertions(+)
+ 4 files changed, 25 insertions(+), 1 deletion(-)
 
 --- a/block/blk-core.c
 +++ b/block/blk-core.c
@@ -27,7 +27,7 @@
  	rq->__sector = (sector_t) -1;
 --- a/block/blk-mq.c
 +++ b/block/blk-mq.c
-@@ -197,6 +197,9 @@ static void blk_mq_rq_ctx_init(struct re
+@@ -177,6 +177,9 @@ static void blk_mq_rq_ctx_init(struct re
  	rq->resid_len = 0;
  	rq->sense = NULL;
  
@@ -37,7 +37,7 @@
  	INIT_LIST_HEAD(&rq->timeout_list);
  	rq->timeout = 0;
  
-@@ -379,6 +382,17 @@ void blk_mq_end_request(struct request *
+@@ -345,6 +348,17 @@ void blk_mq_end_request(struct request *
  }
  EXPORT_SYMBOL(blk_mq_end_request);
  
@@ -55,7 +55,7 @@
  static void __blk_mq_complete_request_remote(void *data)
  {
  	struct request *rq = data;
-@@ -386,6 +400,8 @@ static void __blk_mq_complete_request_re
+@@ -352,6 +366,8 @@ static void __blk_mq_complete_request_re
  	rq->q->softirq_done_fn(rq);
  }
  
@@ -64,7 +64,7 @@
  static void blk_mq_ipi_complete_request(struct request *rq)
  {
  	struct blk_mq_ctx *ctx = rq->mq_ctx;
-@@ -402,10 +418,14 @@ static void blk_mq_ipi_complete_request(
+@@ -368,10 +384,14 @@ static void blk_mq_ipi_complete_request(
  		shared = cpus_share_cache(cpu, ctx->cpu);
  
  	if (cpu != ctx->cpu && !shared && cpu_online(ctx->cpu)) {
@@ -81,14 +81,15 @@
  	}
 --- a/include/linux/blk-mq.h
 +++ b/include/linux/blk-mq.h
-@@ -222,6 +222,7 @@ static inline u16 blk_mq_unique_tag_to_t
+@@ -209,7 +209,7 @@ static inline u16 blk_mq_unique_tag_to_t
+ 	return unique_tag & BLK_MQ_UNIQUE_TAG_MASK;
+ }
  
- struct blk_mq_hw_ctx *blk_mq_map_queue(struct request_queue *, const int ctx_index);
- struct blk_mq_hw_ctx *blk_mq_alloc_single_hw_queue(struct blk_mq_tag_set *, unsigned int, int);
+-
 +void __blk_mq_complete_request_remote_work(struct work_struct *work);
- 
  int blk_mq_request_started(struct request *rq);
  void blk_mq_start_request(struct request *rq);
+ void blk_mq_end_request(struct request *rq, int error);
 --- a/include/linux/blkdev.h
 +++ b/include/linux/blkdev.h
 @@ -89,6 +89,7 @@ struct request {
diff --git a/patches/block-mq-drop-preempt-disable.patch b/patches/block-mq-drop-preempt-disable.patch
index 19f888d..93e5bc0 100644
--- a/patches/block-mq-drop-preempt-disable.patch
+++ b/patches/block-mq-drop-preempt-disable.patch
@@ -13,7 +13,7 @@
 
 --- a/block/blk-mq.c
 +++ b/block/blk-mq.c
-@@ -397,7 +397,7 @@ static void blk_mq_ipi_complete_request(
+@@ -363,7 +363,7 @@ static void blk_mq_ipi_complete_request(
  		return;
  	}
  
@@ -22,7 +22,7 @@
  	if (!test_bit(QUEUE_FLAG_SAME_FORCE, &rq->q->queue_flags))
  		shared = cpus_share_cache(cpu, ctx->cpu);
  
-@@ -409,7 +409,7 @@ static void blk_mq_ipi_complete_request(
+@@ -375,7 +375,7 @@ static void blk_mq_ipi_complete_request(
  	} else {
  		rq->q->softirq_done_fn(rq);
  	}
@@ -31,10 +31,10 @@
  }
  
  static void __blk_mq_complete_request(struct request *rq)
-@@ -938,14 +938,14 @@ void blk_mq_run_hw_queue(struct blk_mq_h
+@@ -917,14 +917,14 @@ void blk_mq_run_hw_queue(struct blk_mq_h
  		return;
  
- 	if (!async) {
+ 	if (!async && !(hctx->flags & BLK_MQ_F_BLOCKING)) {
 -		int cpu = get_cpu();
 +		int cpu = get_cpu_light();
  		if (cpumask_test_cpu(cpu, hctx->cpumask)) {
@@ -48,4 +48,4 @@
 +		put_cpu_light();
  	}
  
- 	kblockd_schedule_delayed_work_on(blk_mq_hctx_next_cpu(hctx),
+ 	kblockd_schedule_work_on(blk_mq_hctx_next_cpu(hctx), &hctx->run_work);
diff --git a/patches/block-mq-use-cpu_light.patch b/patches/block-mq-use-cpu_light.patch
index f943165..fa68d62 100644
--- a/patches/block-mq-use-cpu_light.patch
+++ b/patches/block-mq-use-cpu_light.patch
@@ -12,7 +12,7 @@
 
 --- a/block/blk-mq.h
 +++ b/block/blk-mq.h
-@@ -86,12 +86,12 @@ static inline struct blk_mq_ctx *__blk_m
+@@ -72,12 +72,12 @@ static inline struct blk_mq_ctx *__blk_m
   */
  static inline struct blk_mq_ctx *blk_mq_get_ctx(struct request_queue *q)
  {
diff --git a/patches/block-shorten-interrupt-disabled-regions.patch b/patches/block-shorten-interrupt-disabled-regions.patch
index b4c8af1..763d2d8 100644
--- a/patches/block-shorten-interrupt-disabled-regions.patch
+++ b/patches/block-shorten-interrupt-disabled-regions.patch
@@ -47,7 +47,7 @@
 
 --- a/block/blk-core.c
 +++ b/block/blk-core.c
-@@ -3171,7 +3171,7 @@ static void queue_unplugged(struct reque
+@@ -3177,7 +3177,7 @@ static void queue_unplugged(struct reque
  		blk_run_queue_async(q);
  	else
  		__blk_run_queue(q);
@@ -56,7 +56,7 @@
  }
  
  static void flush_plug_callbacks(struct blk_plug *plug, bool from_schedule)
-@@ -3219,7 +3219,6 @@ EXPORT_SYMBOL(blk_check_plugged);
+@@ -3225,7 +3225,6 @@ EXPORT_SYMBOL(blk_check_plugged);
  void blk_flush_plug_list(struct blk_plug *plug, bool from_schedule)
  {
  	struct request_queue *q;
@@ -64,7 +64,7 @@
  	struct request *rq;
  	LIST_HEAD(list);
  	unsigned int depth;
-@@ -3239,11 +3238,6 @@ void blk_flush_plug_list(struct blk_plug
+@@ -3245,11 +3244,6 @@ void blk_flush_plug_list(struct blk_plug
  	q = NULL;
  	depth = 0;
  
@@ -76,7 +76,7 @@
  	while (!list_empty(&list)) {
  		rq = list_entry_rq(list.next);
  		list_del_init(&rq->queuelist);
-@@ -3256,7 +3250,7 @@ void blk_flush_plug_list(struct blk_plug
+@@ -3262,7 +3256,7 @@ void blk_flush_plug_list(struct blk_plug
  				queue_unplugged(q, depth, from_schedule);
  			q = rq->q;
  			depth = 0;
@@ -85,7 +85,7 @@
  		}
  
  		/*
-@@ -3283,8 +3277,6 @@ void blk_flush_plug_list(struct blk_plug
+@@ -3289,8 +3283,6 @@ void blk_flush_plug_list(struct blk_plug
  	 */
  	if (q)
  		queue_unplugged(q, depth, from_schedule);
diff --git a/patches/btrfs-drop-trace_btrfs_all_work_done-from-normal_wor.patch b/patches/btrfs-drop-trace_btrfs_all_work_done-from-normal_wor.patch
new file mode 100644
index 0000000..ca7952d
--- /dev/null
+++ b/patches/btrfs-drop-trace_btrfs_all_work_done-from-normal_wor.patch
@@ -0,0 +1,38 @@
+From: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+Date: Wed, 14 Dec 2016 14:44:18 +0100
+Subject: [PATCH] btrfs: drop trace_btrfs_all_work_done() from
+ normal_work_helper()
+
+For btrfs_scrubparity_helper() the ->func() is set to
+scrub_parity_bio_endio_worker(). This functions invokes invokes
+scrub_free_parity() which kfrees() the worked object. All is good as
+long as trace events are not enabled because we boom with a backtrace
+like this:
+| Workqueue: btrfs-endio btrfs_endio_helper
+| RIP: 0010:[<ffffffff812f81ae>]  [<ffffffff812f81ae>] trace_event_raw_event_btrfs__work__done+0x4e/0xa0
+| Call Trace:
+|  [<ffffffff8136497d>] btrfs_scrubparity_helper+0x59d/0x780
+|  [<ffffffff81364c49>] btrfs_endio_helper+0x9/0x10
+|  [<ffffffff8108af8e>] process_one_work+0x26e/0x7b0
+|  [<ffffffff8108b516>] worker_thread+0x46/0x560
+|  [<ffffffff81091c4e>] kthread+0xee/0x110
+|  [<ffffffff818e166a>] ret_from_fork+0x2a/0x40
+
+So in order to avoid this, I remove the trace point.
+
+Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+---
+ fs/btrfs/async-thread.c |    2 --
+ 1 file changed, 2 deletions(-)
+
+--- a/fs/btrfs/async-thread.c
++++ b/fs/btrfs/async-thread.c
+@@ -318,8 +318,6 @@ static void normal_work_helper(struct bt
+ 		set_bit(WORK_DONE_BIT, &work->flags);
+ 		run_ordered_work(wq);
+ 	}
+-	if (!need_order)
+-		trace_btrfs_all_work_done(work);
+ }
+ 
+ void btrfs_init_work(struct btrfs_work *work, btrfs_work_func_t uniq_func,
diff --git a/patches/btrfs-swap-free-and-trace-point-in-run_ordered_work.patch b/patches/btrfs-swap-free-and-trace-point-in-run_ordered_work.patch
new file mode 100644
index 0000000..41d0070
--- /dev/null
+++ b/patches/btrfs-swap-free-and-trace-point-in-run_ordered_work.patch
@@ -0,0 +1,33 @@
+From: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+Date: Wed, 14 Dec 2016 12:28:52 +0100
+Subject: [PATCH] btrfs: swap free() and trace point in run_ordered_work()
+
+The previous patch removed a trace point due to a use after free problem
+with tracing enabled. While looking at the backtrace it took me a while
+to find the right spot. While doing so I noticed that this trace point
+could be used with two clean-up functions in run_ordered_work():
+- run_one_async_free()
+- async_cow_free()
+
+Both of them free the `work' item so a later use in the tracepoint is
+not possible.
+This patches swaps the order so we first have the trace point and then
+free the struct.
+
+Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+---
+ fs/btrfs/async-thread.c |    2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/fs/btrfs/async-thread.c
++++ b/fs/btrfs/async-thread.c
+@@ -288,8 +288,8 @@ static void run_ordered_work(struct __bt
+ 		 * we don't want to call the ordered free functions
+ 		 * with the lock held though
+ 		 */
+-		work->ordered_free(work);
+ 		trace_btrfs_all_work_done(work);
++		work->ordered_free(work);
+ 	}
+ 	spin_unlock_irqrestore(lock, flags);
+ }
diff --git a/patches/cgroups-scheduling-while-atomic-in-cgroup-code.patch b/patches/cgroups-scheduling-while-atomic-in-cgroup-code.patch
index c952188..5c367f1 100644
--- a/patches/cgroups-scheduling-while-atomic-in-cgroup-code.patch
+++ b/patches/cgroups-scheduling-while-atomic-in-cgroup-code.patch
@@ -42,7 +42,7 @@
 
 --- a/mm/memcontrol.c
 +++ b/mm/memcontrol.c
-@@ -1727,6 +1727,7 @@ struct memcg_stock_pcp {
+@@ -1697,6 +1697,7 @@ struct memcg_stock_pcp {
  #define FLUSHING_CACHED_CHARGE	0
  };
  static DEFINE_PER_CPU(struct memcg_stock_pcp, memcg_stock);
@@ -50,7 +50,7 @@
  static DEFINE_MUTEX(percpu_charge_mutex);
  
  /**
-@@ -1749,7 +1750,7 @@ static bool consume_stock(struct mem_cgr
+@@ -1719,7 +1720,7 @@ static bool consume_stock(struct mem_cgr
  	if (nr_pages > CHARGE_BATCH)
  		return ret;
  
@@ -59,7 +59,7 @@
  
  	stock = this_cpu_ptr(&memcg_stock);
  	if (memcg == stock->cached && stock->nr_pages >= nr_pages) {
-@@ -1757,7 +1758,7 @@ static bool consume_stock(struct mem_cgr
+@@ -1727,7 +1728,7 @@ static bool consume_stock(struct mem_cgr
  		ret = true;
  	}
  
@@ -68,7 +68,7 @@
  
  	return ret;
  }
-@@ -1784,13 +1785,13 @@ static void drain_local_stock(struct wor
+@@ -1754,13 +1755,13 @@ static void drain_local_stock(struct wor
  	struct memcg_stock_pcp *stock;
  	unsigned long flags;
  
@@ -84,7 +84,7 @@
  }
  
  /*
-@@ -1802,7 +1803,7 @@ static void refill_stock(struct mem_cgro
+@@ -1772,7 +1773,7 @@ static void refill_stock(struct mem_cgro
  	struct memcg_stock_pcp *stock;
  	unsigned long flags;
  
@@ -93,7 +93,7 @@
  
  	stock = this_cpu_ptr(&memcg_stock);
  	if (stock->cached != memcg) { /* reset if necessary */
-@@ -1811,7 +1812,7 @@ static void refill_stock(struct mem_cgro
+@@ -1781,7 +1782,7 @@ static void refill_stock(struct mem_cgro
  	}
  	stock->nr_pages += nr_pages;
  
diff --git a/patches/cgroups-use-simple-wait-in-css_release.patch b/patches/cgroups-use-simple-wait-in-css_release.patch
index aee9727..a048884 100644
--- a/patches/cgroups-use-simple-wait-in-css_release.patch
+++ b/patches/cgroups-use-simple-wait-in-css_release.patch
@@ -52,7 +52,7 @@
  /*
 --- a/kernel/cgroup.c
 +++ b/kernel/cgroup.c
-@@ -5027,10 +5027,10 @@ static void css_free_rcu_fn(struct rcu_h
+@@ -5040,10 +5040,10 @@ static void css_free_rcu_fn(struct rcu_h
  	queue_work(cgroup_destroy_wq, &css->destroy_work);
  }
  
@@ -65,7 +65,7 @@
  	struct cgroup_subsys *ss = css->ss;
  	struct cgroup *cgrp = css->cgroup;
  
-@@ -5071,8 +5071,8 @@ static void css_release(struct percpu_re
+@@ -5086,8 +5086,8 @@ static void css_release(struct percpu_re
  	struct cgroup_subsys_state *css =
  		container_of(ref, struct cgroup_subsys_state, refcnt);
  
@@ -76,7 +76,7 @@
  }
  
  static void init_and_link_css(struct cgroup_subsys_state *css,
-@@ -5716,6 +5716,7 @@ static int __init cgroup_wq_init(void)
+@@ -5742,6 +5742,7 @@ static int __init cgroup_wq_init(void)
  	 */
  	cgroup_destroy_wq = alloc_workqueue("cgroup_destroy", 0, 1);
  	BUG_ON(!cgroup_destroy_wq);
diff --git a/patches/completion-use-simple-wait-queues.patch b/patches/completion-use-simple-wait-queues.patch
index 2d5eee0..e6e2564 100644
--- a/patches/completion-use-simple-wait-queues.patch
+++ b/patches/completion-use-simple-wait-queues.patch
@@ -35,7 +35,7 @@
  		break;
 --- a/drivers/usb/gadget/function/f_fs.c
 +++ b/drivers/usb/gadget/function/f_fs.c
-@@ -1590,7 +1590,7 @@ static void ffs_data_put(struct ffs_data
+@@ -1593,7 +1593,7 @@ static void ffs_data_put(struct ffs_data
  		pr_info("%s(): freeing\n", __func__);
  		ffs_data_clear(ffs);
  		BUG_ON(waitqueue_active(&ffs->ev.waitq) ||
@@ -136,7 +136,7 @@
  struct mm_struct;
 --- a/kernel/power/hibernate.c
 +++ b/kernel/power/hibernate.c
-@@ -681,6 +681,10 @@ static int load_image_and_restore(void)
+@@ -683,6 +683,10 @@ static int load_image_and_restore(void)
  	return error;
  }
  
@@ -147,7 +147,7 @@
  /**
   * hibernate - Carry out system hibernation, including saving the image.
   */
-@@ -694,6 +698,8 @@ int hibernate(void)
+@@ -696,6 +700,8 @@ int hibernate(void)
  		return -EPERM;
  	}
  
@@ -156,7 +156,7 @@
  	lock_system_sleep();
  	/* The snapshot device should not be opened while we're running */
  	if (!atomic_add_unless(&snapshot_device_available, -1, 0)) {
-@@ -771,6 +777,7 @@ int hibernate(void)
+@@ -773,6 +779,7 @@ int hibernate(void)
  	atomic_inc(&snapshot_device_available);
   Unlock:
  	unlock_system_sleep();
@@ -166,7 +166,7 @@
  
 --- a/kernel/power/suspend.c
 +++ b/kernel/power/suspend.c
-@@ -523,6 +523,8 @@ static int enter_state(suspend_state_t s
+@@ -531,6 +531,8 @@ static int enter_state(suspend_state_t s
  	return error;
  }
  
@@ -175,7 +175,7 @@
  /**
   * pm_suspend - Externally visible function for suspending the system.
   * @state: System sleep state to enter.
-@@ -537,6 +539,8 @@ int pm_suspend(suspend_state_t state)
+@@ -545,6 +547,8 @@ int pm_suspend(suspend_state_t state)
  	if (state <= PM_SUSPEND_ON || state >= PM_SUSPEND_MAX)
  		return -EINVAL;
  
@@ -184,7 +184,7 @@
  	error = enter_state(state);
  	if (error) {
  		suspend_stats.fail++;
-@@ -544,6 +548,7 @@ int pm_suspend(suspend_state_t state)
+@@ -552,6 +556,7 @@ int pm_suspend(suspend_state_t state)
  	} else {
  		suspend_stats.success++;
  	}
@@ -286,7 +286,7 @@
  EXPORT_SYMBOL(completion_done);
 --- a/kernel/sched/core.c
 +++ b/kernel/sched/core.c
-@@ -3317,7 +3317,10 @@ void migrate_disable(void)
+@@ -3323,7 +3323,10 @@ void migrate_disable(void)
  	}
  
  #ifdef CONFIG_SCHED_DEBUG
@@ -298,7 +298,7 @@
  #endif
  
  	if (p->migrate_disable) {
-@@ -3344,7 +3347,10 @@ void migrate_enable(void)
+@@ -3350,7 +3353,10 @@ void migrate_enable(void)
  	}
  
  #ifdef CONFIG_SCHED_DEBUG
diff --git a/patches/cond-resched-softirq-rt.patch b/patches/cond-resched-softirq-rt.patch
index d1563b8..89532ca 100644
--- a/patches/cond-resched-softirq-rt.patch
+++ b/patches/cond-resched-softirq-rt.patch
@@ -15,7 +15,7 @@
 
 --- a/include/linux/sched.h
 +++ b/include/linux/sched.h
-@@ -3258,12 +3258,16 @@ extern int __cond_resched_lock(spinlock_
+@@ -3366,12 +3366,16 @@ extern int __cond_resched_lock(spinlock_
  	__cond_resched_lock(lock);				\
  })
  
@@ -34,7 +34,7 @@
  {
 --- a/kernel/sched/core.c
 +++ b/kernel/sched/core.c
-@@ -5021,6 +5021,7 @@ int __cond_resched_lock(spinlock_t *lock
+@@ -5050,6 +5050,7 @@ int __cond_resched_lock(spinlock_t *lock
  }
  EXPORT_SYMBOL(__cond_resched_lock);
  
@@ -42,7 +42,7 @@
  int __sched __cond_resched_softirq(void)
  {
  	BUG_ON(!in_softirq());
-@@ -5034,6 +5035,7 @@ int __sched __cond_resched_softirq(void)
+@@ -5063,6 +5064,7 @@ int __sched __cond_resched_softirq(void)
  	return 0;
  }
  EXPORT_SYMBOL(__cond_resched_softirq);
diff --git a/patches/cpu-hotplug-Document-why-PREEMPT_RT-uses-a-spinlock.patch b/patches/cpu-hotplug-Document-why-PREEMPT_RT-uses-a-spinlock.patch
index 44973e9..ff69c16 100644
--- a/patches/cpu-hotplug-Document-why-PREEMPT_RT-uses-a-spinlock.patch
+++ b/patches/cpu-hotplug-Document-why-PREEMPT_RT-uses-a-spinlock.patch
@@ -38,7 +38,7 @@
 
 --- a/kernel/cpu.c
 +++ b/kernel/cpu.c
-@@ -187,6 +187,14 @@ struct hotplug_pcp {
+@@ -260,6 +260,14 @@ struct hotplug_pcp {
  	int grab_lock;
  	struct completion synced;
  #ifdef CONFIG_PREEMPT_RT_FULL
diff --git a/patches/cpu-rt-make-hotplug-lock-a-sleeping-spinlock-on-rt.patch b/patches/cpu-rt-make-hotplug-lock-a-sleeping-spinlock-on-rt.patch
index ee85da7..8d3c7d0 100644
--- a/patches/cpu-rt-make-hotplug-lock-a-sleeping-spinlock-on-rt.patch
+++ b/patches/cpu-rt-make-hotplug-lock-a-sleeping-spinlock-on-rt.patch
@@ -24,7 +24,7 @@
 
 --- a/kernel/cpu.c
 +++ b/kernel/cpu.c
-@@ -137,10 +137,16 @@ static int cpu_hotplug_disabled;
+@@ -210,10 +210,16 @@ static int cpu_hotplug_disabled;
  
  static struct {
  	struct task_struct *active_writer;
@@ -41,7 +41,7 @@
  	/*
  	 * Also blocks the new readers during
  	 * an ongoing cpu hotplug operation.
-@@ -153,12 +159,24 @@ static struct {
+@@ -226,12 +232,24 @@ static struct {
  } cpu_hotplug = {
  	.active_writer = NULL,
  	.wq = __WAIT_QUEUE_HEAD_INITIALIZER(cpu_hotplug.wq),
@@ -51,7 +51,7 @@
  	.lock = __MUTEX_INITIALIZER(cpu_hotplug.lock),
 +#endif
  #ifdef CONFIG_DEBUG_LOCK_ALLOC
- 	.dep_map = {.name = "cpu_hotplug.lock" },
+ 	.dep_map = STATIC_LOCKDEP_MAP_INIT("cpu_hotplug.dep_map", &cpu_hotplug.dep_map),
  #endif
  };
  
@@ -66,7 +66,7 @@
  /* Lockdep annotations for get/put_online_cpus() and cpu_hotplug_begin/end() */
  #define cpuhp_lock_acquire_read() lock_map_acquire_read(&cpu_hotplug.dep_map)
  #define cpuhp_lock_acquire_tryread() \
-@@ -195,8 +213,8 @@ void pin_current_cpu(void)
+@@ -268,8 +286,8 @@ void pin_current_cpu(void)
  		return;
  	}
  	preempt_enable();
@@ -77,7 +77,7 @@
  	preempt_disable();
  	goto retry;
  }
-@@ -269,9 +287,9 @@ void get_online_cpus(void)
+@@ -342,9 +360,9 @@ void get_online_cpus(void)
  	if (cpu_hotplug.active_writer == current)
  		return;
  	cpuhp_lock_acquire_read();
@@ -89,7 +89,7 @@
  }
  EXPORT_SYMBOL_GPL(get_online_cpus);
  
-@@ -324,11 +342,11 @@ void cpu_hotplug_begin(void)
+@@ -397,11 +415,11 @@ void cpu_hotplug_begin(void)
  	cpuhp_lock_acquire();
  
  	for (;;) {
@@ -103,7 +103,7 @@
  		schedule();
  	}
  	finish_wait(&cpu_hotplug.wq, &wait);
-@@ -337,7 +355,7 @@ void cpu_hotplug_begin(void)
+@@ -410,7 +428,7 @@ void cpu_hotplug_begin(void)
  void cpu_hotplug_done(void)
  {
  	cpu_hotplug.active_writer = NULL;
diff --git a/patches/cpu-rt-rework-cpu-down.patch b/patches/cpu-rt-rework-cpu-down.patch
index fa4b849..8a91b7f 100644
--- a/patches/cpu-rt-rework-cpu-down.patch
+++ b/patches/cpu-rt-rework-cpu-down.patch
@@ -50,13 +50,13 @@
 
 ---
  include/linux/sched.h |    7 +
- kernel/cpu.c          |  238 +++++++++++++++++++++++++++++++++++++++++---------
+ kernel/cpu.c          |  236 +++++++++++++++++++++++++++++++++++++++++---------
  kernel/sched/core.c   |   78 ++++++++++++++++
- 3 files changed, 281 insertions(+), 42 deletions(-)
+ 3 files changed, 280 insertions(+), 41 deletions(-)
 
 --- a/include/linux/sched.h
 +++ b/include/linux/sched.h
-@@ -2429,6 +2429,10 @@ extern void do_set_cpus_allowed(struct t
+@@ -2473,6 +2473,10 @@ extern void do_set_cpus_allowed(struct t
  
  extern int set_cpus_allowed_ptr(struct task_struct *p,
  				const struct cpumask *new_mask);
@@ -67,7 +67,7 @@
  #else
  static inline void do_set_cpus_allowed(struct task_struct *p,
  				      const struct cpumask *new_mask)
-@@ -2441,6 +2445,9 @@ static inline int set_cpus_allowed_ptr(s
+@@ -2485,6 +2489,9 @@ static inline int set_cpus_allowed_ptr(s
  		return -EINVAL;
  	return 0;
  }
@@ -79,7 +79,7 @@
  #ifdef CONFIG_NO_HZ_COMMON
 --- a/kernel/cpu.c
 +++ b/kernel/cpu.c
-@@ -137,16 +137,10 @@ static int cpu_hotplug_disabled;
+@@ -210,16 +210,10 @@ static int cpu_hotplug_disabled;
  
  static struct {
  	struct task_struct *active_writer;
@@ -96,19 +96,17 @@
  	/*
  	 * Also blocks the new readers during
  	 * an ongoing cpu hotplug operation.
-@@ -158,25 +152,13 @@ static struct {
- #endif
+@@ -232,24 +226,12 @@ static struct {
  } cpu_hotplug = {
  	.active_writer = NULL,
--	.wq = __WAIT_QUEUE_HEAD_INITIALIZER(cpu_hotplug.wq),
+ 	.wq = __WAIT_QUEUE_HEAD_INITIALIZER(cpu_hotplug.wq),
 -#ifdef CONFIG_PREEMPT_RT_FULL
 -	.lock = __SPIN_LOCK_UNLOCKED(cpu_hotplug.lock),
 -#else
  	.lock = __MUTEX_INITIALIZER(cpu_hotplug.lock),
 -#endif
-+	.wq = __WAIT_QUEUE_HEAD_INITIALIZER(cpu_hotplug.wq),
  #ifdef CONFIG_DEBUG_LOCK_ALLOC
- 	.dep_map = {.name = "cpu_hotplug.lock" },
+ 	.dep_map = STATIC_LOCKDEP_MAP_INIT("cpu_hotplug.dep_map", &cpu_hotplug.dep_map),
  #endif
  };
  
@@ -123,7 +121,7 @@
  /* Lockdep annotations for get/put_online_cpus() and cpu_hotplug_begin/end() */
  #define cpuhp_lock_acquire_read() lock_map_acquire_read(&cpu_hotplug.dep_map)
  #define cpuhp_lock_acquire_tryread() \
-@@ -184,12 +166,42 @@ static struct {
+@@ -257,12 +239,42 @@ static struct {
  #define cpuhp_lock_acquire()      lock_map_acquire(&cpu_hotplug.dep_map)
  #define cpuhp_lock_release()      lock_map_release(&cpu_hotplug.dep_map)
  
@@ -166,7 +164,7 @@
  static DEFINE_PER_CPU(struct hotplug_pcp, hotplug_pcp);
  
  /**
-@@ -203,18 +215,39 @@ static DEFINE_PER_CPU(struct hotplug_pcp
+@@ -276,18 +288,39 @@ static DEFINE_PER_CPU(struct hotplug_pcp
  void pin_current_cpu(void)
  {
  	struct hotplug_pcp *hp;
@@ -210,7 +208,7 @@
  	preempt_disable();
  	goto retry;
  }
-@@ -235,26 +268,84 @@ void unpin_current_cpu(void)
+@@ -308,26 +341,84 @@ void unpin_current_cpu(void)
  		wake_up_process(hp->unplug);
  }
  
@@ -302,7 +300,7 @@
  /*
   * Start the sync_unplug_thread on the target cpu and wait for it to
   * complete.
-@@ -262,23 +353,83 @@ static int sync_unplug_thread(void *data
+@@ -335,23 +426,83 @@ static int sync_unplug_thread(void *data
  static int cpu_unplug_begin(unsigned int cpu)
  {
  	struct hotplug_pcp *hp = &per_cpu(hotplug_pcp, cpu);
@@ -393,7 +391,7 @@
  }
  
  void get_online_cpus(void)
-@@ -287,9 +438,9 @@ void get_online_cpus(void)
+@@ -360,9 +511,9 @@ void get_online_cpus(void)
  	if (cpu_hotplug.active_writer == current)
  		return;
  	cpuhp_lock_acquire_read();
@@ -405,7 +403,7 @@
  }
  EXPORT_SYMBOL_GPL(get_online_cpus);
  
-@@ -342,11 +493,11 @@ void cpu_hotplug_begin(void)
+@@ -415,11 +566,11 @@ void cpu_hotplug_begin(void)
  	cpuhp_lock_acquire();
  
  	for (;;) {
@@ -419,7 +417,7 @@
  		schedule();
  	}
  	finish_wait(&cpu_hotplug.wq, &wait);
-@@ -355,7 +506,7 @@ void cpu_hotplug_begin(void)
+@@ -428,7 +579,7 @@ void cpu_hotplug_begin(void)
  void cpu_hotplug_done(void)
  {
  	cpu_hotplug.active_writer = NULL;
@@ -428,7 +426,7 @@
  	cpuhp_lock_release();
  }
  
-@@ -828,6 +979,9 @@ static int takedown_cpu(unsigned int cpu
+@@ -907,6 +1058,9 @@ static int takedown_cpu(unsigned int cpu
  	kthread_park(per_cpu_ptr(&cpuhp_state, cpu)->thread);
  	smpboot_park_threads(cpu);
  
@@ -440,8 +438,8 @@
  	 * interrupt affinities.
 --- a/kernel/sched/core.c
 +++ b/kernel/sched/core.c
-@@ -1129,6 +1129,84 @@ void do_set_cpus_allowed(struct task_str
- 		enqueue_task(rq, p, ENQUEUE_RESTORE);
+@@ -1140,6 +1140,84 @@ void do_set_cpus_allowed(struct task_str
+ 		set_curr_task(rq, p);
  }
  
 +static DEFINE_PER_CPU(struct cpumask, sched_cpumasks);
diff --git a/patches/cpu_down_move_migrate_enable_back.patch b/patches/cpu_down_move_migrate_enable_back.patch
index 8d6f033..c725446 100644
--- a/patches/cpu_down_move_migrate_enable_back.patch
+++ b/patches/cpu_down_move_migrate_enable_back.patch
@@ -34,7 +34,7 @@
 
 --- a/kernel/cpu.c
 +++ b/kernel/cpu.c
-@@ -1117,6 +1117,7 @@ static int __ref _cpu_down(unsigned int
+@@ -1195,6 +1195,7 @@ static int __ref _cpu_down(unsigned int
  		goto restore_cpus;
  	}
  
@@ -42,7 +42,7 @@
  	cpu_hotplug_begin();
  	ret = cpu_unplug_begin(cpu);
  	if (ret) {
-@@ -1164,7 +1165,6 @@ static int __ref _cpu_down(unsigned int
+@@ -1242,7 +1243,6 @@ static int __ref _cpu_down(unsigned int
  	cpu_unplug_done(cpu);
  out_cancel:
  	cpu_hotplug_done();
diff --git a/patches/cpumask-disable-offstack-on-rt.patch b/patches/cpumask-disable-offstack-on-rt.patch
index 1e9f534..e36168f 100644
--- a/patches/cpumask-disable-offstack-on-rt.patch
+++ b/patches/cpumask-disable-offstack-on-rt.patch
@@ -46,7 +46,7 @@
 
 --- a/arch/x86/Kconfig
 +++ b/arch/x86/Kconfig
-@@ -888,7 +888,7 @@ config IOMMU_HELPER
+@@ -900,7 +900,7 @@ config IOMMU_HELPER
  config MAXSMP
  	bool "Enable Maximum number of SMP Processors and NUMA Nodes"
  	depends on X86_64 && SMP && DEBUG_KERNEL
diff --git a/patches/dm-make-rt-aware.patch b/patches/dm-make-rt-aware.patch
index 0133d5a..08238ec 100644
--- a/patches/dm-make-rt-aware.patch
+++ b/patches/dm-make-rt-aware.patch
@@ -15,10 +15,10 @@
 
 --- a/drivers/md/dm-rq.c
 +++ b/drivers/md/dm-rq.c
-@@ -811,7 +811,7 @@ static void dm_old_request_fn(struct req
+@@ -832,7 +832,7 @@ static void dm_old_request_fn(struct req
  		/* Establish tio->ti before queuing work (map_tio_request) */
  		tio->ti = ti;
- 		queue_kthread_work(&md->kworker, &tio->work);
+ 		kthread_queue_work(&md->kworker, &tio->work);
 -		BUG_ON(!irqs_disabled());
 +		BUG_ON_NONRT(!irqs_disabled());
  	}
diff --git a/patches/drivers-tty-pl011-irq-disable-madness.patch b/patches/drivers-tty-pl011-irq-disable-madness.patch
index dd90606..1f0748c 100644
--- a/patches/drivers-tty-pl011-irq-disable-madness.patch
+++ b/patches/drivers-tty-pl011-irq-disable-madness.patch
@@ -12,7 +12,7 @@
 
 --- a/drivers/tty/serial/amba-pl011.c
 +++ b/drivers/tty/serial/amba-pl011.c
-@@ -2167,13 +2167,19 @@ pl011_console_write(struct console *co,
+@@ -2194,13 +2194,19 @@ pl011_console_write(struct console *co,
  
  	clk_enable(uap->clk);
  
@@ -35,7 +35,7 @@
  
  	/*
  	 *	First save the CR then disable the interrupts
-@@ -2197,8 +2203,7 @@ pl011_console_write(struct console *co,
+@@ -2224,8 +2230,7 @@ pl011_console_write(struct console *co,
  		pl011_write(old_cr, uap, REG_CR);
  
  	if (locked)
diff --git a/patches/drm-i915-drop-trace_i915_gem_ring_dispatch-onrt.patch b/patches/drm-i915-drop-trace_i915_gem_ring_dispatch-onrt.patch
index 219bbe1..e15a00e 100644
--- a/patches/drm-i915-drop-trace_i915_gem_ring_dispatch-onrt.patch
+++ b/patches/drm-i915-drop-trace_i915_gem_ring_dispatch-onrt.patch
@@ -46,7 +46,7 @@
 
 --- a/drivers/gpu/drm/i915/i915_gem_execbuffer.c
 +++ b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
-@@ -1302,7 +1302,9 @@ i915_gem_ringbuffer_submission(struct i9
+@@ -1537,7 +1537,9 @@ execbuf_submit(struct i915_execbuffer_pa
  	if (ret)
  		return ret;
  
diff --git "a/patches/drmi915_Use_local_lockunlock_irq\050\051_in_intel_pipe_update_startend\050\051.patch" "b/patches/drmi915_Use_local_lockunlock_irq\050\051_in_intel_pipe_update_startend\050\051.patch"
index 9e89971..c574a09 100644
--- "a/patches/drmi915_Use_local_lockunlock_irq\050\051_in_intel_pipe_update_startend\050\051.patch"
+++ "b/patches/drmi915_Use_local_lockunlock_irq\050\051_in_intel_pipe_update_startend\050\051.patch"
@@ -61,15 +61,15 @@
 
 --- a/drivers/gpu/drm/i915/intel_sprite.c
 +++ b/drivers/gpu/drm/i915/intel_sprite.c
-@@ -38,6 +38,7 @@
- #include "intel_drv.h"
- #include <drm/i915_drm.h>
- #include "i915_drv.h"
+@@ -35,6 +35,7 @@
+ #include <drm/drm_rect.h>
+ #include <drm/drm_atomic.h>
+ #include <drm/drm_plane_helper.h>
 +#include <linux/locallock.h>
- 
- static bool
- format_is_yuv(uint32_t format)
-@@ -64,6 +65,8 @@ int intel_usecs_to_scanlines(const struc
+ #include "intel_drv.h"
+ #include "intel_frontbuffer.h"
+ #include <drm/i915_drm.h>
+@@ -65,6 +66,8 @@ int intel_usecs_to_scanlines(const struc
  			    1000 * adjusted_mode->crtc_htotal);
  }
  
@@ -78,7 +78,7 @@
  /**
   * intel_pipe_update_start() - start update of a set of display registers
   * @crtc: the crtc of which the registers are going to be updated
-@@ -94,7 +97,7 @@ void intel_pipe_update_start(struct inte
+@@ -95,7 +98,7 @@ void intel_pipe_update_start(struct inte
  	min = vblank_start - intel_usecs_to_scanlines(adjusted_mode, 100);
  	max = vblank_start - 1;
  
@@ -87,7 +87,7 @@
  
  	if (min <= 0 || max <= 0)
  		return;
-@@ -124,11 +127,11 @@ void intel_pipe_update_start(struct inte
+@@ -125,11 +128,11 @@ void intel_pipe_update_start(struct inte
  			break;
  		}
  
@@ -101,7 +101,7 @@
  	}
  
  	finish_wait(wq, &wait);
-@@ -180,7 +183,7 @@ void intel_pipe_update_end(struct intel_
+@@ -181,7 +184,7 @@ void intel_pipe_update_end(struct intel_
  		crtc->base.state->event = NULL;
  	}
  
diff --git "a/patches/drmradeoni915_Use_preempt_disableenable_rt\050\051_where_recommended.patch" "b/patches/drmradeoni915_Use_preempt_disableenable_rt\050\051_where_recommended.patch"
index 0298621..abc871e 100644
--- "a/patches/drmradeoni915_Use_preempt_disableenable_rt\050\051_where_recommended.patch"
+++ "b/patches/drmradeoni915_Use_preempt_disableenable_rt\050\051_where_recommended.patch"
@@ -33,7 +33,7 @@
  
 --- a/drivers/gpu/drm/radeon/radeon_display.c
 +++ b/drivers/gpu/drm/radeon/radeon_display.c
-@@ -1869,6 +1869,7 @@ int radeon_get_crtc_scanoutpos(struct dr
+@@ -1845,6 +1845,7 @@ int radeon_get_crtc_scanoutpos(struct dr
  	struct radeon_device *rdev = dev->dev_private;
  
  	/* preempt_disable_rt() should go right here in PREEMPT_RT patchset. */
@@ -41,7 +41,7 @@
  
  	/* Get optional system timestamp before query. */
  	if (stime)
-@@ -1961,6 +1962,7 @@ int radeon_get_crtc_scanoutpos(struct dr
+@@ -1937,6 +1938,7 @@ int radeon_get_crtc_scanoutpos(struct dr
  		*etime = ktime_get();
  
  	/* preempt_enable_rt() should go right here in PREEMPT_RT patchset. */
diff --git a/patches/dump-stack-don-t-disable-preemption-during-trace.patch b/patches/dump-stack-don-t-disable-preemption-during-trace.patch
deleted file mode 100644
index 94597b5..0000000
--- a/patches/dump-stack-don-t-disable-preemption-during-trace.patch
+++ /dev/null
@@ -1,77 +0,0 @@
-From: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
-Date: Sun, 16 Aug 2015 14:27:50 +0200
-Subject: dump stack: don't disable preemption during trace
-
-I see here large latencies during a stack dump on x86. The
-preempt_disable() and get_cpu() should forbid moving the task to another
-CPU during a stack dump and avoiding two stack traces in parallel on the
-same CPU. However a stack trace from a second CPU may still happen in
-parallel. Also nesting is allowed so a stack trace happens in
-process-context and we may have another one from IRQ context. With migrate
-disable we keep this code preemptible and allow a second backtrace on
-the same CPU by another task.
-
-Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
----
- arch/x86/kernel/dumpstack_32.c |    4 ++--
- arch/x86/kernel/dumpstack_64.c |    8 ++++----
- 2 files changed, 6 insertions(+), 6 deletions(-)
-
---- a/arch/x86/kernel/dumpstack_32.c
-+++ b/arch/x86/kernel/dumpstack_32.c
-@@ -42,7 +42,7 @@ void dump_trace(struct task_struct *task
- 		unsigned long *stack, unsigned long bp,
- 		const struct stacktrace_ops *ops, void *data)
- {
--	const unsigned cpu = get_cpu();
-+	const unsigned cpu = get_cpu_light();
- 	int graph = 0;
- 	u32 *prev_esp;
- 
-@@ -84,7 +84,7 @@ void dump_trace(struct task_struct *task
- 			break;
- 		touch_nmi_watchdog();
- 	}
--	put_cpu();
-+	put_cpu_light();
- }
- EXPORT_SYMBOL(dump_trace);
- 
---- a/arch/x86/kernel/dumpstack_64.c
-+++ b/arch/x86/kernel/dumpstack_64.c
-@@ -152,7 +152,7 @@ void dump_trace(struct task_struct *task
- 		unsigned long *stack, unsigned long bp,
- 		const struct stacktrace_ops *ops, void *data)
- {
--	const unsigned cpu = get_cpu();
-+	const unsigned cpu = get_cpu_light();
- 	unsigned long *irq_stack = (unsigned long *)per_cpu(irq_stack_ptr, cpu);
- 	unsigned long dummy;
- 	unsigned used = 0;
-@@ -239,7 +239,7 @@ void dump_trace(struct task_struct *task
- 	 * This handles the process stack:
- 	 */
- 	bp = ops->walk_stack(task, stack, bp, ops, data, NULL, &graph);
--	put_cpu();
-+	put_cpu_light();
- }
- EXPORT_SYMBOL(dump_trace);
- 
-@@ -253,7 +253,7 @@ show_stack_log_lvl(struct task_struct *t
- 	int cpu;
- 	int i;
- 
--	preempt_disable();
-+	migrate_disable();
- 	cpu = smp_processor_id();
- 
- 	irq_stack_end	= (unsigned long *)(per_cpu(irq_stack_ptr, cpu));
-@@ -299,7 +299,7 @@ show_stack_log_lvl(struct task_struct *t
- 		stack++;
- 		touch_nmi_watchdog();
- 	}
--	preempt_enable();
-+	migrate_enable();
- 
- 	pr_cont("\n");
- 	show_trace_log_lvl(task, regs, sp, bp, log_lvl);
diff --git a/patches/fs-aio-simple-simple-work.patch b/patches/fs-aio-simple-simple-work.patch
index fddfd68..a0cd8a8 100644
--- a/patches/fs-aio-simple-simple-work.patch
+++ b/patches/fs-aio-simple-simple-work.patch
@@ -54,7 +54,7 @@
  	aio_mnt = kern_mount(&aio_fs);
  	if (IS_ERR(aio_mnt))
  		panic("Failed to create aio fs mount.");
-@@ -578,9 +580,9 @@ static int kiocb_cancel(struct aio_kiocb
+@@ -581,9 +583,9 @@ static int kiocb_cancel(struct aio_kiocb
  	return cancel(&kiocb->common);
  }
  
@@ -66,7 +66,7 @@
  
  	pr_debug("freeing %p\n", ctx);
  
-@@ -599,8 +601,8 @@ static void free_ioctx_reqs(struct percp
+@@ -602,8 +604,8 @@ static void free_ioctx_reqs(struct percp
  	if (ctx->rq_wait && atomic_dec_and_test(&ctx->rq_wait->count))
  		complete(&ctx->rq_wait->comp);
  
@@ -77,7 +77,7 @@
  }
  
  /*
-@@ -608,9 +610,9 @@ static void free_ioctx_reqs(struct percp
+@@ -611,9 +613,9 @@ static void free_ioctx_reqs(struct percp
   * and ctx->users has dropped to 0, so we know no more kiocbs can be submitted -
   * now it's safe to cancel any that need to be.
   */
@@ -89,7 +89,7 @@
  	struct aio_kiocb *req;
  
  	spin_lock_irq(&ctx->ctx_lock);
-@@ -629,6 +631,14 @@ static void free_ioctx_users(struct perc
+@@ -632,6 +634,14 @@ static void free_ioctx_users(struct perc
  	percpu_ref_put(&ctx->reqs);
  }
  
diff --git a/patches/fs-dcache-use-cpu-chill-in-trylock-loops.patch b/patches/fs-dcache-use-cpu-chill-in-trylock-loops.patch
index ede7118..6c42780 100644
--- a/patches/fs-dcache-use-cpu-chill-in-trylock-loops.patch
+++ b/patches/fs-dcache-use-cpu-chill-in-trylock-loops.patch
@@ -17,7 +17,7 @@
 
 --- a/fs/autofs4/autofs_i.h
 +++ b/fs/autofs4/autofs_i.h
-@@ -30,6 +30,7 @@
+@@ -31,6 +31,7 @@
  #include <linux/sched.h>
  #include <linux/mount.h>
  #include <linux/namei.h>
@@ -96,7 +96,7 @@
  #include <linux/security.h>
  #include <linux/idr.h>
  #include <linux/init.h>		/* init_rootfs */
-@@ -355,7 +356,7 @@ int __mnt_want_write(struct vfsmount *m)
+@@ -358,7 +359,7 @@ int __mnt_want_write(struct vfsmount *m)
  	smp_mb();
  	while (ACCESS_ONCE(mnt->mnt.mnt_flags) & MNT_WRITE_HOLD) {
  		preempt_enable();
diff --git a/patches/fs-dcache-use-swait_queue-instead-of-waitqueue.patch b/patches/fs-dcache-use-swait_queue-instead-of-waitqueue.patch
index 4833676..af46f7d 100644
--- a/patches/fs-dcache-use-swait_queue-instead-of-waitqueue.patch
+++ b/patches/fs-dcache-use-swait_queue-instead-of-waitqueue.patch
@@ -80,7 +80,7 @@
  	INIT_HLIST_NODE(&dentry->d_u.d_alias);
 --- a/fs/fuse/dir.c
 +++ b/fs/fuse/dir.c
-@@ -1174,7 +1174,7 @@ static int fuse_direntplus_link(struct f
+@@ -1191,7 +1191,7 @@ static int fuse_direntplus_link(struct f
  	struct inode *dir = d_inode(parent);
  	struct fuse_conn *fc;
  	struct inode *inode;
@@ -120,7 +120,7 @@
  	struct dentry *dentry;
  	struct dentry *alias;
  	struct inode *dir = d_inode(parent);
-@@ -1490,7 +1490,7 @@ int nfs_atomic_open(struct inode *dir, s
+@@ -1498,7 +1498,7 @@ int nfs_atomic_open(struct inode *dir, s
  		    struct file *file, unsigned open_flags,
  		    umode_t mode, int *opened)
  {
@@ -151,7 +151,7 @@
  	spin_lock(&dentry->d_lock);
 --- a/fs/proc/base.c
 +++ b/fs/proc/base.c
-@@ -1819,7 +1819,7 @@ bool proc_fill_cache(struct file *file,
+@@ -1834,7 +1834,7 @@ bool proc_fill_cache(struct file *file,
  
  	child = d_hash_and_lookup(dir, &qname);
  	if (!child) {
@@ -162,7 +162,7 @@
  			goto end_instantiate;
 --- a/fs/proc/proc_sysctl.c
 +++ b/fs/proc/proc_sysctl.c
-@@ -627,7 +627,7 @@ static bool proc_sys_fill_cache(struct f
+@@ -632,7 +632,7 @@ static bool proc_sys_fill_cache(struct f
  
  	child = d_lookup(dir, &qname);
  	if (!child) {
@@ -193,7 +193,7 @@
  extern struct dentry * d_exact_alias(struct dentry *, struct inode *);
 --- a/include/linux/nfs_xdr.h
 +++ b/include/linux/nfs_xdr.h
-@@ -1484,7 +1484,7 @@ struct nfs_unlinkdata {
+@@ -1490,7 +1490,7 @@ struct nfs_unlinkdata {
  	struct nfs_removeargs args;
  	struct nfs_removeres res;
  	struct dentry *dentry;
diff --git a/patches/fs-namespace-preemption-fix.patch b/patches/fs-namespace-preemption-fix.patch
index 4051119..b9434a1 100644
--- a/patches/fs-namespace-preemption-fix.patch
+++ b/patches/fs-namespace-preemption-fix.patch
@@ -15,7 +15,7 @@
 
 --- a/fs/namespace.c
 +++ b/fs/namespace.c
-@@ -353,8 +353,11 @@ int __mnt_want_write(struct vfsmount *m)
+@@ -356,8 +356,11 @@ int __mnt_want_write(struct vfsmount *m)
  	 * incremented count after it has set MNT_WRITE_HOLD.
  	 */
  	smp_mb();
diff --git a/patches/fs-nfs-turn-rmdir_sem-into-a-semaphore.patch b/patches/fs-nfs-turn-rmdir_sem-into-a-semaphore.patch
index 04f8ebd..d3b2660 100644
--- a/patches/fs-nfs-turn-rmdir_sem-into-a-semaphore.patch
+++ b/patches/fs-nfs-turn-rmdir_sem-into-a-semaphore.patch
@@ -21,7 +21,7 @@
 
 --- a/fs/nfs/dir.c
 +++ b/fs/nfs/dir.c
-@@ -1805,7 +1805,11 @@ int nfs_rmdir(struct inode *dir, struct
+@@ -1813,7 +1813,11 @@ int nfs_rmdir(struct inode *dir, struct
  
  	trace_nfs_rmdir_enter(dir, dentry);
  	if (d_really_is_positive(dentry)) {
@@ -33,7 +33,7 @@
  		error = NFS_PROTO(dir)->rmdir(dir, &dentry->d_name);
  		/* Ensure the VFS deletes this inode */
  		switch (error) {
-@@ -1815,7 +1819,11 @@ int nfs_rmdir(struct inode *dir, struct
+@@ -1823,7 +1827,11 @@ int nfs_rmdir(struct inode *dir, struct
  		case -ENOENT:
  			nfs_dentry_handle_enoent(dentry);
  		}
diff --git a/patches/fs-replace-bh_uptodate_lock-for-rt.patch b/patches/fs-replace-bh_uptodate_lock-for-rt.patch
index f1ad50c..e30f00d 100644
--- a/patches/fs-replace-bh_uptodate_lock-for-rt.patch
+++ b/patches/fs-replace-bh_uptodate_lock-for-rt.patch
@@ -73,7 +73,7 @@
  }
  EXPORT_SYMBOL(end_buffer_async_write);
  
-@@ -3384,6 +3376,7 @@ struct buffer_head *alloc_buffer_head(gf
+@@ -3383,6 +3375,7 @@ struct buffer_head *alloc_buffer_head(gf
  	struct buffer_head *ret = kmem_cache_zalloc(bh_cachep, gfp_flags);
  	if (ret) {
  		INIT_LIST_HEAD(&ret->b_assoc_buffers);
diff --git a/patches/futex-Ensure-lock-unlock-symetry-versus-pi_lock-and-.patch b/patches/futex-Ensure-lock-unlock-symetry-versus-pi_lock-and-.patch
index edc01e4..415a19d 100644
--- a/patches/futex-Ensure-lock-unlock-symetry-versus-pi_lock-and-.patch
+++ b/patches/futex-Ensure-lock-unlock-symetry-versus-pi_lock-and-.patch
@@ -30,7 +30,7 @@
 
 --- a/kernel/futex.c
 +++ b/kernel/futex.c
-@@ -895,7 +895,9 @@ void exit_pi_state_list(struct task_stru
+@@ -904,7 +904,9 @@ void exit_pi_state_list(struct task_stru
  		 * task still owns the PI-state:
  		 */
  		if (head->next != next) {
diff --git a/patches/genirq-force-threading.patch b/patches/genirq-force-threading.patch
index a20c2ee..022c3f6 100644
--- a/patches/genirq-force-threading.patch
+++ b/patches/genirq-force-threading.patch
@@ -13,7 +13,7 @@
 
 --- a/include/linux/interrupt.h
 +++ b/include/linux/interrupt.h
-@@ -398,9 +398,13 @@ extern int irq_set_irqchip_state(unsigne
+@@ -406,9 +406,13 @@ extern int irq_set_irqchip_state(unsigne
  				 bool state);
  
  #ifdef CONFIG_IRQ_FORCED_THREADING
diff --git a/patches/hotplug-Use-set_cpus_allowed_ptr-in-sync_unplug_thre.patch b/patches/hotplug-Use-set_cpus_allowed_ptr-in-sync_unplug_thre.patch
index 9ef8650..fca9ea6 100644
--- a/patches/hotplug-Use-set_cpus_allowed_ptr-in-sync_unplug_thre.patch
+++ b/patches/hotplug-Use-set_cpus_allowed_ptr-in-sync_unplug_thre.patch
@@ -35,7 +35,7 @@
 
 --- a/kernel/cpu.c
 +++ b/kernel/cpu.c
-@@ -345,7 +345,7 @@ static int sync_unplug_thread(void *data
+@@ -418,7 +418,7 @@ static int sync_unplug_thread(void *data
  	 * we don't want any more work on this CPU.
  	 */
  	current->flags &= ~PF_NO_SETAFFINITY;
diff --git a/patches/hotplug-light-get-online-cpus.patch b/patches/hotplug-light-get-online-cpus.patch
index 0297231..998480b 100644
--- a/patches/hotplug-light-get-online-cpus.patch
+++ b/patches/hotplug-light-get-online-cpus.patch
@@ -18,7 +18,7 @@
 
 --- a/include/linux/cpu.h
 +++ b/include/linux/cpu.h
-@@ -185,9 +185,6 @@ static inline void cpu_notifier_register
+@@ -180,9 +180,6 @@ static inline void cpu_notifier_register
  #endif /* CONFIG_SMP */
  extern struct bus_type cpu_subsys;
  
@@ -28,7 +28,7 @@
  #ifdef CONFIG_HOTPLUG_CPU
  /* Stop CPUs going up and down. */
  
-@@ -197,6 +194,8 @@ extern void get_online_cpus(void);
+@@ -192,6 +189,8 @@ extern void get_online_cpus(void);
  extern void put_online_cpus(void);
  extern void cpu_hotplug_disable(void);
  extern void cpu_hotplug_enable(void);
@@ -37,7 +37,7 @@
  #define hotcpu_notifier(fn, pri)	cpu_notifier(fn, pri)
  #define __hotcpu_notifier(fn, pri)	__cpu_notifier(fn, pri)
  #define register_hotcpu_notifier(nb)	register_cpu_notifier(nb)
-@@ -214,6 +213,8 @@ static inline void cpu_hotplug_done(void
+@@ -209,6 +208,8 @@ static inline void cpu_hotplug_done(void
  #define put_online_cpus()	do { } while (0)
  #define cpu_hotplug_disable()	do { } while (0)
  #define cpu_hotplug_enable()	do { } while (0)
@@ -48,7 +48,7 @@
  /* These aren't inline functions due to a GCC bug. */
 --- a/kernel/cpu.c
 +++ b/kernel/cpu.c
-@@ -166,6 +166,100 @@ static struct {
+@@ -239,6 +239,100 @@ static struct {
  #define cpuhp_lock_acquire()      lock_map_acquire(&cpu_hotplug.dep_map)
  #define cpuhp_lock_release()      lock_map_release(&cpu_hotplug.dep_map)
  
@@ -149,7 +149,7 @@
  
  void get_online_cpus(void)
  {
-@@ -799,6 +893,8 @@ static int __ref _cpu_down(unsigned int
+@@ -877,6 +971,8 @@ static int __ref _cpu_down(unsigned int
  	struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, cpu);
  	int prev_state, ret = 0;
  	bool hasdied = false;
@@ -158,7 +158,7 @@
  
  	if (num_online_cpus() == 1)
  		return -EBUSY;
-@@ -806,7 +902,27 @@ static int __ref _cpu_down(unsigned int
+@@ -884,7 +980,27 @@ static int __ref _cpu_down(unsigned int
  	if (!cpu_present(cpu))
  		return -EINVAL;
  
@@ -186,7 +186,7 @@
  
  	cpuhp_tasks_frozen = tasks_frozen;
  
-@@ -845,6 +961,8 @@ static int __ref _cpu_down(unsigned int
+@@ -923,6 +1039,8 @@ static int __ref _cpu_down(unsigned int
  
  	hasdied = prev_state != st->state && st->state == CPUHP_OFFLINE;
  out:
diff --git a/patches/hotplug-sync_unplug-no-27-5cn-27-in-task-name.patch b/patches/hotplug-sync_unplug-no-27-5cn-27-in-task-name.patch
index 217c46e..3035be2 100644
--- a/patches/hotplug-sync_unplug-no-27-5cn-27-in-task-name.patch
+++ b/patches/hotplug-sync_unplug-no-27-5cn-27-in-task-name.patch
@@ -13,7 +13,7 @@
 
 --- a/kernel/cpu.c
 +++ b/kernel/cpu.c
-@@ -245,7 +245,7 @@ static int cpu_unplug_begin(unsigned int
+@@ -318,7 +318,7 @@ static int cpu_unplug_begin(unsigned int
  	struct task_struct *tsk;
  
  	init_completion(&hp->synced);
diff --git a/patches/hotplug-use-migrate-disable.patch b/patches/hotplug-use-migrate-disable.patch
index 2c0fa02..7902343 100644
--- a/patches/hotplug-use-migrate-disable.patch
+++ b/patches/hotplug-use-migrate-disable.patch
@@ -12,7 +12,7 @@
 
 --- a/kernel/cpu.c
 +++ b/kernel/cpu.c
-@@ -910,14 +910,13 @@ static int __ref _cpu_down(unsigned int
+@@ -988,14 +988,13 @@ static int __ref _cpu_down(unsigned int
  	cpumask_andnot(cpumask, cpu_online_mask, cpumask_of(cpu));
  	set_cpus_allowed_ptr(current, cpumask);
  	free_cpumask_var(cpumask);
@@ -29,7 +29,7 @@
  
  	cpu_hotplug_begin();
  	ret = cpu_unplug_begin(cpu);
-@@ -966,6 +965,7 @@ static int __ref _cpu_down(unsigned int
+@@ -1044,6 +1043,7 @@ static int __ref _cpu_down(unsigned int
  	cpu_unplug_done(cpu);
  out_cancel:
  	cpu_hotplug_done();
diff --git a/patches/hrtimer-fixup-hrtimer-callback-changes-for-preempt-r.patch b/patches/hrtimer-fixup-hrtimer-callback-changes-for-preempt-r.patch
index 98c116e..081a6b7 100644
--- a/patches/hrtimer-fixup-hrtimer-callback-changes-for-preempt-r.patch
+++ b/patches/hrtimer-fixup-hrtimer-callback-changes-for-preempt-r.patch
@@ -320,7 +320,7 @@
  /**
 --- a/kernel/time/tick-sched.c
 +++ b/kernel/time/tick-sched.c
-@@ -1195,6 +1195,7 @@ void tick_setup_sched_timer(void)
+@@ -1198,6 +1198,7 @@ void tick_setup_sched_timer(void)
  	 * Emulate tick processing via per-CPU hrtimers:
  	 */
  	hrtimer_init(&ts->sched_timer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS);
diff --git a/patches/hwlat-detector-Don-t-ignore-threshold-module-paramet.patch b/patches/hwlat-detector-Don-t-ignore-threshold-module-paramet.patch
deleted file mode 100644
index e4cba99..0000000
--- a/patches/hwlat-detector-Don-t-ignore-threshold-module-paramet.patch
+++ /dev/null
@@ -1,25 +0,0 @@
-From: Mike Galbraith <bitbucket@online.de>
-Date: Fri, 30 Aug 2013 07:57:25 +0200
-Subject: hwlat-detector: Don't ignore threshold module parameter
-
-If the user specified a threshold at module load time, use it.
-
-
-Acked-by: Steven Rostedt <rostedt@goodmis.org>
-Signed-off-by: Mike Galbraith <bitbucket@online.de>
-Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
----
- drivers/misc/hwlat_detector.c |    2 +-
- 1 file changed, 1 insertion(+), 1 deletion(-)
-
---- a/drivers/misc/hwlat_detector.c
-+++ b/drivers/misc/hwlat_detector.c
-@@ -414,7 +414,7 @@ static int init_stats(void)
- 		goto out;
- 
- 	__reset_stats();
--	data.threshold = DEFAULT_LAT_THRESHOLD;	    /* threshold us */
-+	data.threshold = threshold ?: DEFAULT_LAT_THRESHOLD; /* threshold us */
- 	data.sample_window = DEFAULT_SAMPLE_WINDOW; /* window us */
- 	data.sample_width = DEFAULT_SAMPLE_WIDTH;   /* width us */
- 
diff --git a/patches/hwlat-detector-Update-hwlat_detector-to-add-outer-lo.patch b/patches/hwlat-detector-Update-hwlat_detector-to-add-outer-lo.patch
deleted file mode 100644
index 58f97a8..0000000
--- a/patches/hwlat-detector-Update-hwlat_detector-to-add-outer-lo.patch
+++ /dev/null
@@ -1,125 +0,0 @@
-From: Steven Rostedt <rostedt@goodmis.org>
-Date: Mon, 19 Aug 2013 17:33:25 -0400
-Subject: hwlat-detector: Update hwlat_detector to add outer loop detection
-
-The hwlat_detector reads two timestamps in a row, then reports any
-gap between those calls. The problem is, it misses everything between
-the second reading of the time stamp to the first reading of the time stamp
-in the next loop. That's were most of the time is spent, which means,
-chances are likely that it will miss all hardware latencies. This
-defeats the purpose.
-
-By also testing the first time stamp from the previous loop second
-time stamp (the outer loop), we are more likely to find a latency.
-
-Setting the threshold to 1, here's what the report now looks like:
-
-1347415723.0232202770	0	2
-1347415725.0234202822	0	2
-1347415727.0236202875	0	2
-1347415729.0238202928	0	2
-1347415731.0240202980	0	2
-1347415734.0243203061	0	2
-1347415736.0245203113	0	2
-1347415738.0247203166	2	0
-1347415740.0249203219	0	3
-1347415742.0251203272	0	3
-1347415743.0252203299	0	3
-1347415745.0254203351	0	2
-1347415747.0256203404	0	2
-1347415749.0258203457	0	2
-1347415751.0260203510	0	2
-1347415754.0263203589	0	2
-1347415756.0265203642	0	2
-1347415758.0267203695	0	2
-1347415760.0269203748	0	2
-1347415762.0271203801	0	2
-1347415764.0273203853	2	0
-
-There's some hardware latency that takes 2 microseconds to run.
-
-Signed-off-by: Steven Rostedt <srostedt@redhat.com>
-Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
----
- drivers/misc/hwlat_detector.c |   32 ++++++++++++++++++++++++++------
- 1 file changed, 26 insertions(+), 6 deletions(-)
-
---- a/drivers/misc/hwlat_detector.c
-+++ b/drivers/misc/hwlat_detector.c
-@@ -143,6 +143,7 @@ static void detector_exit(void);
- struct sample {
- 	u64		seqnum;		/* unique sequence */
- 	u64		duration;	/* ktime delta */
-+	u64		outer_duration;	/* ktime delta (outer loop) */
- 	struct timespec	timestamp;	/* wall time */
- 	unsigned long   lost;
- };
-@@ -219,11 +220,13 @@ static struct sample *buffer_get_sample(
-  */
- static int get_sample(void *unused)
- {
--	ktime_t start, t1, t2;
-+	ktime_t start, t1, t2, last_t2;
- 	s64 diff, total = 0;
- 	u64 sample = 0;
-+	u64 outer_sample = 0;
- 	int ret = 1;
- 
-+	last_t2.tv64 = 0;
- 	start = ktime_get(); /* start timestamp */
- 
- 	do {
-@@ -231,7 +234,22 @@ static int get_sample(void *unused)
- 		t1 = ktime_get();	/* we'll look for a discontinuity */
- 		t2 = ktime_get();
- 
-+		if (last_t2.tv64) {
-+			/* Check the delta from outer loop (t2 to next t1) */
-+			diff = ktime_to_us(ktime_sub(t1, last_t2));
-+			/* This shouldn't happen */
-+			if (diff < 0) {
-+				pr_err(BANNER "time running backwards\n");
-+				goto out;
-+			}
-+			if (diff > outer_sample)
-+				outer_sample = diff;
-+		}
-+		last_t2 = t2;
-+
- 		total = ktime_to_us(ktime_sub(t2, start)); /* sample width */
-+
-+		/* This checks the inner loop (t1 to t2) */
- 		diff = ktime_to_us(ktime_sub(t2, t1));     /* current diff */
- 
- 		/* This shouldn't happen */
-@@ -246,12 +264,13 @@ static int get_sample(void *unused)
- 	} while (total <= data.sample_width);
- 
- 	/* If we exceed the threshold value, we have found a hardware latency */
--	if (sample > data.threshold) {
-+	if (sample > data.threshold || outer_sample > data.threshold) {
- 		struct sample s;
- 
- 		data.count++;
- 		s.seqnum = data.count;
- 		s.duration = sample;
-+		s.outer_duration = outer_sample;
- 		s.timestamp = CURRENT_TIME;
- 		__buffer_add_sample(&s);
- 
-@@ -738,10 +757,11 @@ static ssize_t debug_sample_fread(struct
- 		}
- 	}
- 
--	len = snprintf(buf, sizeof(buf), "%010lu.%010lu\t%llu\n",
--		      sample->timestamp.tv_sec,
--		      sample->timestamp.tv_nsec,
--		      sample->duration);
-+	len = snprintf(buf, sizeof(buf), "%010lu.%010lu\t%llu\t%llu\n",
-+		       sample->timestamp.tv_sec,
-+		       sample->timestamp.tv_nsec,
-+		       sample->duration,
-+		       sample->outer_duration);
- 
- 
- 	/* handling partial reads is more trouble than it's worth */
diff --git a/patches/hwlat-detector-Use-thread-instead-of-stop-machine.patch b/patches/hwlat-detector-Use-thread-instead-of-stop-machine.patch
deleted file mode 100644
index fe1a435..0000000
--- a/patches/hwlat-detector-Use-thread-instead-of-stop-machine.patch
+++ /dev/null
@@ -1,183 +0,0 @@
-From: Steven Rostedt <rostedt@goodmis.org>
-Date: Mon, 19 Aug 2013 17:33:27 -0400
-Subject: hwlat-detector: Use thread instead of stop machine
-
-There's no reason to use stop machine to search for hardware latency.
-Simply disabling interrupts while running the loop will do enough to
-check if something comes in that wasn't disabled by interrupts being
-off, which is exactly what stop machine does.
-
-Instead of using stop machine, just have the thread disable interrupts
-while it checks for hardware latency.
-
-Signed-off-by: Steven Rostedt <rostedt@goodmis.org>
-Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
----
- drivers/misc/hwlat_detector.c |   60 ++++++++++++++++++------------------------
- 1 file changed, 26 insertions(+), 34 deletions(-)
-
---- a/drivers/misc/hwlat_detector.c
-+++ b/drivers/misc/hwlat_detector.c
-@@ -41,7 +41,6 @@
- #include <linux/module.h>
- #include <linux/init.h>
- #include <linux/ring_buffer.h>
--#include <linux/stop_machine.h>
- #include <linux/time.h>
- #include <linux/hrtimer.h>
- #include <linux/kthread.h>
-@@ -107,7 +106,6 @@ struct data;					/* Global state */
- /* Sampling functions */
- static int __buffer_add_sample(struct sample *sample);
- static struct sample *buffer_get_sample(struct sample *sample);
--static int get_sample(void *unused);
- 
- /* Threading and state */
- static int kthread_fn(void *unused);
-@@ -149,7 +147,7 @@ struct sample {
- 	unsigned long   lost;
- };
- 
--/* keep the global state somewhere. Mostly used under stop_machine. */
-+/* keep the global state somewhere. */
- static struct data {
- 
- 	struct mutex lock;		/* protect changes */
-@@ -172,7 +170,7 @@ static struct data {
-  * @sample: The new latency sample value
-  *
-  * This receives a new latency sample and records it in a global ring buffer.
-- * No additional locking is used in this case - suited for stop_machine use.
-+ * No additional locking is used in this case.
-  */
- static int __buffer_add_sample(struct sample *sample)
- {
-@@ -229,18 +227,18 @@ static struct sample *buffer_get_sample(
- #endif
- /**
-  * get_sample - sample the CPU TSC and look for likely hardware latencies
-- * @unused: This is not used but is a part of the stop_machine API
-  *
-  * Used to repeatedly capture the CPU TSC (or similar), looking for potential
-- * hardware-induced latency. Called under stop_machine, with data.lock held.
-+ * hardware-induced latency. Called with interrupts disabled and with
-+ * data.lock held.
-  */
--static int get_sample(void *unused)
-+static int get_sample(void)
- {
- 	time_type start, t1, t2, last_t2;
- 	s64 diff, total = 0;
- 	u64 sample = 0;
- 	u64 outer_sample = 0;
--	int ret = 1;
-+	int ret = -1;
- 
- 	init_time(last_t2, 0);
- 	start = time_get(); /* start timestamp */
-@@ -279,10 +277,14 @@ static int get_sample(void *unused)
- 
- 	} while (total <= data.sample_width);
- 
-+	ret = 0;
-+
- 	/* If we exceed the threshold value, we have found a hardware latency */
- 	if (sample > data.threshold || outer_sample > data.threshold) {
- 		struct sample s;
- 
-+		ret = 1;
-+
- 		data.count++;
- 		s.seqnum = data.count;
- 		s.duration = sample;
-@@ -295,7 +297,6 @@ static int get_sample(void *unused)
- 			data.max_sample = sample;
- 	}
- 
--	ret = 0;
- out:
- 	return ret;
- }
-@@ -305,32 +306,30 @@ static int get_sample(void *unused)
-  * @unused: A required part of the kthread API.
-  *
-  * Used to periodically sample the CPU TSC via a call to get_sample. We
-- * use stop_machine, whith does (intentionally) introduce latency since we
-+ * disable interrupts, which does (intentionally) introduce latency since we
-  * need to ensure nothing else might be running (and thus pre-empting).
-  * Obviously this should never be used in production environments.
-  *
-- * stop_machine will schedule us typically only on CPU0 which is fine for
-- * almost every real-world hardware latency situation - but we might later
-- * generalize this if we find there are any actualy systems with alternate
-- * SMI delivery or other non CPU0 hardware latencies.
-+ * Currently this runs on which ever CPU it was scheduled on, but most
-+ * real-worald hardware latency situations occur across several CPUs,
-+ * but we might later generalize this if we find there are any actualy
-+ * systems with alternate SMI delivery or other hardware latencies.
-  */
- static int kthread_fn(void *unused)
- {
--	int err = 0;
--	u64 interval = 0;
-+	int ret;
-+	u64 interval;
- 
- 	while (!kthread_should_stop()) {
- 
- 		mutex_lock(&data.lock);
- 
--		err = stop_machine(get_sample, unused, 0);
--		if (err) {
--			/* Houston, we have a problem */
--			mutex_unlock(&data.lock);
--			goto err_out;
--		}
-+		local_irq_disable();
-+		ret = get_sample();
-+		local_irq_enable();
- 
--		wake_up(&data.wq); /* wake up reader(s) */
-+		if (ret > 0)
-+			wake_up(&data.wq); /* wake up reader(s) */
- 
- 		interval = data.sample_window - data.sample_width;
- 		do_div(interval, USEC_PER_MSEC); /* modifies interval value */
-@@ -338,15 +337,10 @@ static int kthread_fn(void *unused)
- 		mutex_unlock(&data.lock);
- 
- 		if (msleep_interruptible(interval))
--			goto out;
-+			break;
- 	}
--		goto out;
--err_out:
--	pr_err(BANNER "could not call stop_machine, disabling\n");
--	enabled = 0;
--out:
--	return err;
- 
-+	return 0;
- }
- 
- /**
-@@ -442,8 +436,7 @@ static int init_stats(void)
-  * This function provides a generic read implementation for the global state
-  * "data" structure debugfs filesystem entries. It would be nice to use
-  * simple_attr_read directly, but we need to make sure that the data.lock
-- * spinlock is held during the actual read (even though we likely won't ever
-- * actually race here as the updater runs under a stop_machine context).
-+ * is held during the actual read.
-  */
- static ssize_t simple_data_read(struct file *filp, char __user *ubuf,
- 				size_t cnt, loff_t *ppos, const u64 *entry)
-@@ -478,8 +471,7 @@ static ssize_t simple_data_read(struct f
-  * This function provides a generic write implementation for the global state
-  * "data" structure debugfs filesystem entries. It would be nice to use
-  * simple_attr_write directly, but we need to make sure that the data.lock
-- * spinlock is held during the actual write (even though we likely won't ever
-- * actually race here as the updater runs under a stop_machine context).
-+ * is held during the actual write.
-  */
- static ssize_t simple_data_write(struct file *filp, const char __user *ubuf,
- 				 size_t cnt, loff_t *ppos, u64 *entry)
diff --git a/patches/hwlat-detector-Use-trace_clock_local-if-available.patch b/patches/hwlat-detector-Use-trace_clock_local-if-available.patch
deleted file mode 100644
index a45adaa..0000000
--- a/patches/hwlat-detector-Use-trace_clock_local-if-available.patch
+++ /dev/null
@@ -1,92 +0,0 @@
-From: Steven Rostedt <rostedt@goodmis.org>
-Date: Mon, 19 Aug 2013 17:33:26 -0400
-Subject: hwlat-detector: Use trace_clock_local if available
-
-As ktime_get() calls into the timing code which does a read_seq(), it
-may be affected by other CPUS that touch that lock. To remove this
-dependency, use the trace_clock_local() which is already exported
-for module use. If CONFIG_TRACING is enabled, use that as the clock,
-otherwise use ktime_get().
-
-Signed-off-by: Steven Rostedt <srostedt@redhat.com>
-Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
----
- drivers/misc/hwlat_detector.c |   34 +++++++++++++++++++++++++---------
- 1 file changed, 25 insertions(+), 9 deletions(-)
-
---- a/drivers/misc/hwlat_detector.c
-+++ b/drivers/misc/hwlat_detector.c
-@@ -51,6 +51,7 @@
- #include <linux/version.h>
- #include <linux/delay.h>
- #include <linux/slab.h>
-+#include <linux/trace_clock.h>
- 
- #define BUF_SIZE_DEFAULT	262144UL		/* 8K*(sizeof(entry)) */
- #define BUF_FLAGS		(RB_FL_OVERWRITE)	/* no block on full */
-@@ -211,6 +212,21 @@ static struct sample *buffer_get_sample(
- 	return sample;
- }
- 
-+#ifndef CONFIG_TRACING
-+#define time_type	ktime_t
-+#define time_get()	ktime_get()
-+#define time_to_us(x)	ktime_to_us(x)
-+#define time_sub(a, b)	ktime_sub(a, b)
-+#define init_time(a, b)	(a).tv64 = b
-+#define time_u64(a)	((a).tv64)
-+#else
-+#define time_type	u64
-+#define time_get()	trace_clock_local()
-+#define time_to_us(x)	div_u64(x, 1000)
-+#define time_sub(a, b)	((a) - (b))
-+#define init_time(a, b)	(a = b)
-+#define time_u64(a)	a
-+#endif
- /**
-  * get_sample - sample the CPU TSC and look for likely hardware latencies
-  * @unused: This is not used but is a part of the stop_machine API
-@@ -220,23 +236,23 @@ static struct sample *buffer_get_sample(
-  */
- static int get_sample(void *unused)
- {
--	ktime_t start, t1, t2, last_t2;
-+	time_type start, t1, t2, last_t2;
- 	s64 diff, total = 0;
- 	u64 sample = 0;
- 	u64 outer_sample = 0;
- 	int ret = 1;
- 
--	last_t2.tv64 = 0;
--	start = ktime_get(); /* start timestamp */
-+	init_time(last_t2, 0);
-+	start = time_get(); /* start timestamp */
- 
- 	do {
- 
--		t1 = ktime_get();	/* we'll look for a discontinuity */
--		t2 = ktime_get();
-+		t1 = time_get();	/* we'll look for a discontinuity */
-+		t2 = time_get();
- 
--		if (last_t2.tv64) {
-+		if (time_u64(last_t2)) {
- 			/* Check the delta from outer loop (t2 to next t1) */
--			diff = ktime_to_us(ktime_sub(t1, last_t2));
-+			diff = time_to_us(time_sub(t1, last_t2));
- 			/* This shouldn't happen */
- 			if (diff < 0) {
- 				pr_err(BANNER "time running backwards\n");
-@@ -247,10 +263,10 @@ static int get_sample(void *unused)
- 		}
- 		last_t2 = t2;
- 
--		total = ktime_to_us(ktime_sub(t2, start)); /* sample width */
-+		total = time_to_us(time_sub(t2, start)); /* sample width */
- 
- 		/* This checks the inner loop (t1 to t2) */
--		diff = ktime_to_us(ktime_sub(t2, t1));     /* current diff */
-+		diff = time_to_us(time_sub(t2, t1));     /* current diff */
- 
- 		/* This shouldn't happen */
- 		if (diff < 0) {
diff --git a/patches/hwlatdetect.patch b/patches/hwlatdetect.patch
deleted file mode 100644
index 93df5c5..0000000
--- a/patches/hwlatdetect.patch
+++ /dev/null
@@ -1,1347 +0,0 @@
-Subject: hwlatdetect.patch
-From: Carsten Emde <C.Emde@osadl.org>
-Date: Tue, 19 Jul 2011 13:53:12 +0100
-
-Jon Masters developed this wonderful SMI detector. For details please
-consult Documentation/hwlat_detector.txt. It could be ported to Linux
-3.0 RT without any major change.
-
-Signed-off-by: Carsten Emde <C.Emde@osadl.org>
-
----
- Documentation/hwlat_detector.txt |   64 ++
- drivers/misc/Kconfig             |   29 
- drivers/misc/Makefile            |    1 
- drivers/misc/hwlat_detector.c    | 1212 +++++++++++++++++++++++++++++++++++++++
- 4 files changed, 1306 insertions(+)
-
---- /dev/null
-+++ b/Documentation/hwlat_detector.txt
-@@ -0,0 +1,64 @@
-+Introduction:
-+-------------
-+
-+The module hwlat_detector is a special purpose kernel module that is used to
-+detect large system latencies induced by the behavior of certain underlying
-+hardware or firmware, independent of Linux itself. The code was developed
-+originally to detect SMIs (System Management Interrupts) on x86 systems,
-+however there is nothing x86 specific about this patchset. It was
-+originally written for use by the "RT" patch since the Real Time
-+kernel is highly latency sensitive.
-+
-+SMIs are usually not serviced by the Linux kernel, which typically does not
-+even know that they are occuring. SMIs are instead are set up by BIOS code
-+and are serviced by BIOS code, usually for "critical" events such as
-+management of thermal sensors and fans. Sometimes though, SMIs are used for
-+other tasks and those tasks can spend an inordinate amount of time in the
-+handler (sometimes measured in milliseconds). Obviously this is a problem if
-+you are trying to keep event service latencies down in the microsecond range.
-+
-+The hardware latency detector works by hogging all of the cpus for configurable
-+amounts of time (by calling stop_machine()), polling the CPU Time Stamp Counter
-+for some period, then looking for gaps in the TSC data. Any gap indicates a
-+time when the polling was interrupted and since the machine is stopped and
-+interrupts turned off the only thing that could do that would be an SMI.
-+
-+Note that the SMI detector should *NEVER* be used in a production environment.
-+It is intended to be run manually to determine if the hardware platform has a
-+problem with long system firmware service routines.
-+
-+Usage:
-+------
-+
-+Loading the module hwlat_detector passing the parameter "enabled=1" (or by
-+setting the "enable" entry in "hwlat_detector" debugfs toggled on) is the only
-+step required to start the hwlat_detector. It is possible to redefine the
-+threshold in microseconds (us) above which latency spikes will be taken
-+into account (parameter "threshold=").
-+
-+Example:
-+
-+	# modprobe hwlat_detector enabled=1 threshold=100
-+
-+After the module is loaded, it creates a directory named "hwlat_detector" under
-+the debugfs mountpoint, "/debug/hwlat_detector" for this text. It is necessary
-+to have debugfs mounted, which might be on /sys/debug on your system.
-+
-+The /debug/hwlat_detector interface contains the following files:
-+
-+count			- number of latency spikes observed since last reset
-+enable			- a global enable/disable toggle (0/1), resets count
-+max			- maximum hardware latency actually observed (usecs)
-+sample			- a pipe from which to read current raw sample data
-+			  in the format <timestamp> <latency observed usecs>
-+			  (can be opened O_NONBLOCK for a single sample)
-+threshold		- minimum latency value to be considered (usecs)
-+width			- time period to sample with CPUs held (usecs)
-+			  must be less than the total window size (enforced)
-+window			- total period of sampling, width being inside (usecs)
-+
-+By default we will set width to 500,000 and window to 1,000,000, meaning that
-+we will sample every 1,000,000 usecs (1s) for 500,000 usecs (0.5s). If we
-+observe any latencies that exceed the threshold (initially 100 usecs),
-+then we write to a global sample ring buffer of 8K samples, which is
-+consumed by reading from the "sample" (pipe) debugfs file interface.
---- a/drivers/misc/Kconfig
-+++ b/drivers/misc/Kconfig
-@@ -122,6 +122,35 @@ config IBM_ASM
- 	  for information on the specific driver level and support statement
- 	  for your IBM server.
- 
-+config HWLAT_DETECTOR
-+	tristate "Testing module to detect hardware-induced latencies"
-+	depends on DEBUG_FS
-+	depends on RING_BUFFER
-+	default m
-+	---help---
-+	  A simple hardware latency detector. Use this module to detect
-+	  large latencies introduced by the behavior of the underlying
-+	  system firmware external to Linux. We do this using periodic
-+	  use of stop_machine to grab all available CPUs and measure
-+	  for unexplainable gaps in the CPU timestamp counter(s). By
-+	  default, the module is not enabled until the "enable" file
-+	  within the "hwlat_detector" debugfs directory is toggled.
-+
-+	  This module is often used to detect SMI (System Management
-+	  Interrupts) on x86 systems, though is not x86 specific. To
-+	  this end, we default to using a sample window of 1 second,
-+	  during which we will sample for 0.5 seconds. If an SMI or
-+	  similar event occurs during that time, it is recorded
-+	  into an 8K samples global ring buffer until retreived.
-+
-+	  WARNING: This software should never be enabled (it can be built
-+	  but should not be turned on after it is loaded) in a production
-+	  environment where high latencies are a concern since the
-+	  sampling mechanism actually introduces latencies for
-+	  regular tasks while the CPU(s) are being held.
-+
-+	  If unsure, say N
-+
- config PHANTOM
- 	tristate "Sensable PHANToM (PCI)"
- 	depends on PCI
---- a/drivers/misc/Makefile
-+++ b/drivers/misc/Makefile
-@@ -38,6 +38,7 @@ obj-$(CONFIG_C2PORT)		+= c2port/
- obj-$(CONFIG_HMC6352)		+= hmc6352.o
- obj-y				+= eeprom/
- obj-y				+= cb710/
-+obj-$(CONFIG_HWLAT_DETECTOR)	+= hwlat_detector.o
- obj-$(CONFIG_SPEAR13XX_PCIE_GADGET)	+= spear13xx_pcie_gadget.o
- obj-$(CONFIG_VMWARE_BALLOON)	+= vmw_balloon.o
- obj-$(CONFIG_ARM_CHARLCD)	+= arm-charlcd.o
---- /dev/null
-+++ b/drivers/misc/hwlat_detector.c
-@@ -0,0 +1,1212 @@
-+/*
-+ * hwlat_detector.c - A simple Hardware Latency detector.
-+ *
-+ * Use this module to detect large system latencies induced by the behavior of
-+ * certain underlying system hardware or firmware, independent of Linux itself.
-+ * The code was developed originally to detect the presence of SMIs on Intel
-+ * and AMD systems, although there is no dependency upon x86 herein.
-+ *
-+ * The classical example usage of this module is in detecting the presence of
-+ * SMIs or System Management Interrupts on Intel and AMD systems. An SMI is a
-+ * somewhat special form of hardware interrupt spawned from earlier CPU debug
-+ * modes in which the (BIOS/EFI/etc.) firmware arranges for the South Bridge
-+ * LPC (or other device) to generate a special interrupt under certain
-+ * circumstances, for example, upon expiration of a special SMI timer device,
-+ * due to certain external thermal readings, on certain I/O address accesses,
-+ * and other situations. An SMI hits a special CPU pin, triggers a special
-+ * SMI mode (complete with special memory map), and the OS is unaware.
-+ *
-+ * Although certain hardware-inducing latencies are necessary (for example,
-+ * a modern system often requires an SMI handler for correct thermal control
-+ * and remote management) they can wreak havoc upon any OS-level performance
-+ * guarantees toward low-latency, especially when the OS is not even made
-+ * aware of the presence of these interrupts. For this reason, we need a
-+ * somewhat brute force mechanism to detect these interrupts. In this case,
-+ * we do it by hogging all of the CPU(s) for configurable timer intervals,
-+ * sampling the built-in CPU timer, looking for discontiguous readings.
-+ *
-+ * WARNING: This implementation necessarily introduces latencies. Therefore,
-+ *          you should NEVER use this module in a production environment
-+ *          requiring any kind of low-latency performance guarantee(s).
-+ *
-+ * Copyright (C) 2008-2009 Jon Masters, Red Hat, Inc. <jcm@redhat.com>
-+ *
-+ * Includes useful feedback from Clark Williams <clark@redhat.com>
-+ *
-+ * This file is licensed under the terms of the GNU General Public
-+ * License version 2. This program is licensed "as is" without any
-+ * warranty of any kind, whether express or implied.
-+ */
-+
-+#include <linux/module.h>
-+#include <linux/init.h>
-+#include <linux/ring_buffer.h>
-+#include <linux/stop_machine.h>
-+#include <linux/time.h>
-+#include <linux/hrtimer.h>
-+#include <linux/kthread.h>
-+#include <linux/debugfs.h>
-+#include <linux/seq_file.h>
-+#include <linux/uaccess.h>
-+#include <linux/version.h>
-+#include <linux/delay.h>
-+#include <linux/slab.h>
-+
-+#define BUF_SIZE_DEFAULT	262144UL		/* 8K*(sizeof(entry)) */
-+#define BUF_FLAGS		(RB_FL_OVERWRITE)	/* no block on full */
-+#define U64STR_SIZE		22			/* 20 digits max */
-+
-+#define VERSION			"1.0.0"
-+#define BANNER			"hwlat_detector: "
-+#define DRVNAME			"hwlat_detector"
-+#define DEFAULT_SAMPLE_WINDOW	1000000			/* 1s */
-+#define DEFAULT_SAMPLE_WIDTH	500000			/* 0.5s */
-+#define DEFAULT_LAT_THRESHOLD	10			/* 10us */
-+
-+/* Module metadata */
-+
-+MODULE_LICENSE("GPL");
-+MODULE_AUTHOR("Jon Masters <jcm@redhat.com>");
-+MODULE_DESCRIPTION("A simple hardware latency detector");
-+MODULE_VERSION(VERSION);
-+
-+/* Module parameters */
-+
-+static int debug;
-+static int enabled;
-+static int threshold;
-+
-+module_param(debug, int, 0);			/* enable debug */
-+module_param(enabled, int, 0);			/* enable detector */
-+module_param(threshold, int, 0);		/* latency threshold */
-+
-+/* Buffering and sampling */
-+
-+static struct ring_buffer *ring_buffer;		/* sample buffer */
-+static DEFINE_MUTEX(ring_buffer_mutex);		/* lock changes */
-+static unsigned long buf_size = BUF_SIZE_DEFAULT;
-+static struct task_struct *kthread;		/* sampling thread */
-+
-+/* DebugFS filesystem entries */
-+
-+static struct dentry *debug_dir;		/* debugfs directory */
-+static struct dentry *debug_max;		/* maximum TSC delta */
-+static struct dentry *debug_count;		/* total detect count */
-+static struct dentry *debug_sample_width;	/* sample width us */
-+static struct dentry *debug_sample_window;	/* sample window us */
-+static struct dentry *debug_sample;		/* raw samples us */
-+static struct dentry *debug_threshold;		/* threshold us */
-+static struct dentry *debug_enable;		/* enable/disable */
-+
-+/* Individual samples and global state */
-+
-+struct sample;					/* latency sample */
-+struct data;					/* Global state */
-+
-+/* Sampling functions */
-+static int __buffer_add_sample(struct sample *sample);
-+static struct sample *buffer_get_sample(struct sample *sample);
-+static int get_sample(void *unused);
-+
-+/* Threading and state */
-+static int kthread_fn(void *unused);
-+static int start_kthread(void);
-+static int stop_kthread(void);
-+static void __reset_stats(void);
-+static int init_stats(void);
-+
-+/* Debugfs interface */
-+static ssize_t simple_data_read(struct file *filp, char __user *ubuf,
-+				size_t cnt, loff_t *ppos, const u64 *entry);
-+static ssize_t simple_data_write(struct file *filp, const char __user *ubuf,
-+				 size_t cnt, loff_t *ppos, u64 *entry);
-+static int debug_sample_fopen(struct inode *inode, struct file *filp);
-+static ssize_t debug_sample_fread(struct file *filp, char __user *ubuf,
-+				  size_t cnt, loff_t *ppos);
-+static int debug_sample_release(struct inode *inode, struct file *filp);
-+static int debug_enable_fopen(struct inode *inode, struct file *filp);
-+static ssize_t debug_enable_fread(struct file *filp, char __user *ubuf,
-+				  size_t cnt, loff_t *ppos);
-+static ssize_t debug_enable_fwrite(struct file *file,
-+				   const char __user *user_buffer,
-+				   size_t user_size, loff_t *offset);
-+
-+/* Initialization functions */
-+static int init_debugfs(void);
-+static void free_debugfs(void);
-+static int detector_init(void);
-+static void detector_exit(void);
-+
-+/* Individual latency samples are stored here when detected and packed into
-+ * the ring_buffer circular buffer, where they are overwritten when
-+ * more than buf_size/sizeof(sample) samples are received. */
-+struct sample {
-+	u64		seqnum;		/* unique sequence */
-+	u64		duration;	/* ktime delta */
-+	struct timespec	timestamp;	/* wall time */
-+	unsigned long   lost;
-+};
-+
-+/* keep the global state somewhere. Mostly used under stop_machine. */
-+static struct data {
-+
-+	struct mutex lock;		/* protect changes */
-+
-+	u64	count;			/* total since reset */
-+	u64	max_sample;		/* max hardware latency */
-+	u64	threshold;		/* sample threshold level */
-+
-+	u64	sample_window;		/* total sampling window (on+off) */
-+	u64	sample_width;		/* active sampling portion of window */
-+
-+	atomic_t sample_open;		/* whether the sample file is open */
-+
-+	wait_queue_head_t wq;		/* waitqeue for new sample values */
-+
-+} data;
-+
-+/**
-+ * __buffer_add_sample - add a new latency sample recording to the ring buffer
-+ * @sample: The new latency sample value
-+ *
-+ * This receives a new latency sample and records it in a global ring buffer.
-+ * No additional locking is used in this case - suited for stop_machine use.
-+ */
-+static int __buffer_add_sample(struct sample *sample)
-+{
-+	return ring_buffer_write(ring_buffer,
-+				 sizeof(struct sample), sample);
-+}
-+
-+/**
-+ * buffer_get_sample - remove a hardware latency sample from the ring buffer
-+ * @sample: Pre-allocated storage for the sample
-+ *
-+ * This retrieves a hardware latency sample from the global circular buffer
-+ */
-+static struct sample *buffer_get_sample(struct sample *sample)
-+{
-+	struct ring_buffer_event *e = NULL;
-+	struct sample *s = NULL;
-+	unsigned int cpu = 0;
-+
-+	if (!sample)
-+		return NULL;
-+
-+	mutex_lock(&ring_buffer_mutex);
-+	for_each_online_cpu(cpu) {
-+		e = ring_buffer_consume(ring_buffer, cpu, NULL, &sample->lost);
-+		if (e)
-+			break;
-+	}
-+
-+	if (e) {
-+		s = ring_buffer_event_data(e);
-+		memcpy(sample, s, sizeof(struct sample));
-+	} else
-+		sample = NULL;
-+	mutex_unlock(&ring_buffer_mutex);
-+
-+	return sample;
-+}
-+
-+/**
-+ * get_sample - sample the CPU TSC and look for likely hardware latencies
-+ * @unused: This is not used but is a part of the stop_machine API
-+ *
-+ * Used to repeatedly capture the CPU TSC (or similar), looking for potential
-+ * hardware-induced latency. Called under stop_machine, with data.lock held.
-+ */
-+static int get_sample(void *unused)
-+{
-+	ktime_t start, t1, t2;
-+	s64 diff, total = 0;
-+	u64 sample = 0;
-+	int ret = 1;
-+
-+	start = ktime_get(); /* start timestamp */
-+
-+	do {
-+
-+		t1 = ktime_get();	/* we'll look for a discontinuity */
-+		t2 = ktime_get();
-+
-+		total = ktime_to_us(ktime_sub(t2, start)); /* sample width */
-+		diff = ktime_to_us(ktime_sub(t2, t1));     /* current diff */
-+
-+		/* This shouldn't happen */
-+		if (diff < 0) {
-+			pr_err(BANNER "time running backwards\n");
-+			goto out;
-+		}
-+
-+		if (diff > sample)
-+			sample = diff; /* only want highest value */
-+
-+	} while (total <= data.sample_width);
-+
-+	/* If we exceed the threshold value, we have found a hardware latency */
-+	if (sample > data.threshold) {
-+		struct sample s;
-+
-+		data.count++;
-+		s.seqnum = data.count;
-+		s.duration = sample;
-+		s.timestamp = CURRENT_TIME;
-+		__buffer_add_sample(&s);
-+
-+		/* Keep a running maximum ever recorded hardware latency */
-+		if (sample > data.max_sample)
-+			data.max_sample = sample;
-+	}
-+
-+	ret = 0;
-+out:
-+	return ret;
-+}
-+
-+/*
-+ * kthread_fn - The CPU time sampling/hardware latency detection kernel thread
-+ * @unused: A required part of the kthread API.
-+ *
-+ * Used to periodically sample the CPU TSC via a call to get_sample. We
-+ * use stop_machine, whith does (intentionally) introduce latency since we
-+ * need to ensure nothing else might be running (and thus pre-empting).
-+ * Obviously this should never be used in production environments.
-+ *
-+ * stop_machine will schedule us typically only on CPU0 which is fine for
-+ * almost every real-world hardware latency situation - but we might later
-+ * generalize this if we find there are any actualy systems with alternate
-+ * SMI delivery or other non CPU0 hardware latencies.
-+ */
-+static int kthread_fn(void *unused)
-+{
-+	int err = 0;
-+	u64 interval = 0;
-+
-+	while (!kthread_should_stop()) {
-+
-+		mutex_lock(&data.lock);
-+
-+		err = stop_machine(get_sample, unused, 0);
-+		if (err) {
-+			/* Houston, we have a problem */
-+			mutex_unlock(&data.lock);
-+			goto err_out;
-+		}
-+
-+		wake_up(&data.wq); /* wake up reader(s) */
-+
-+		interval = data.sample_window - data.sample_width;
-+		do_div(interval, USEC_PER_MSEC); /* modifies interval value */
-+
-+		mutex_unlock(&data.lock);
-+
-+		if (msleep_interruptible(interval))
-+			goto out;
-+	}
-+		goto out;
-+err_out:
-+	pr_err(BANNER "could not call stop_machine, disabling\n");
-+	enabled = 0;
-+out:
-+	return err;
-+
-+}
-+
-+/**
-+ * start_kthread - Kick off the hardware latency sampling/detector kthread
-+ *
-+ * This starts a kernel thread that will sit and sample the CPU timestamp
-+ * counter (TSC or similar) and look for potential hardware latencies.
-+ */
-+static int start_kthread(void)
-+{
-+	kthread = kthread_run(kthread_fn, NULL,
-+					DRVNAME);
-+	if (IS_ERR(kthread)) {
-+		pr_err(BANNER "could not start sampling thread\n");
-+		enabled = 0;
-+		return -ENOMEM;
-+	}
-+
-+	return 0;
-+}
-+
-+/**
-+ * stop_kthread - Inform the hardware latency samping/detector kthread to stop
-+ *
-+ * This kicks the running hardware latency sampling/detector kernel thread and
-+ * tells it to stop sampling now. Use this on unload and at system shutdown.
-+ */
-+static int stop_kthread(void)
-+{
-+	int ret;
-+
-+	ret = kthread_stop(kthread);
-+
-+	return ret;
-+}
-+
-+/**
-+ * __reset_stats - Reset statistics for the hardware latency detector
-+ *
-+ * We use data to store various statistics and global state. We call this
-+ * function in order to reset those when "enable" is toggled on or off, and
-+ * also at initialization. Should be called with data.lock held.
-+ */
-+static void __reset_stats(void)
-+{
-+	data.count = 0;
-+	data.max_sample = 0;
-+	ring_buffer_reset(ring_buffer); /* flush out old sample entries */
-+}
-+
-+/**
-+ * init_stats - Setup global state statistics for the hardware latency detector
-+ *
-+ * We use data to store various statistics and global state. We also use
-+ * a global ring buffer (ring_buffer) to keep raw samples of detected hardware
-+ * induced system latencies. This function initializes these structures and
-+ * allocates the global ring buffer also.
-+ */
-+static int init_stats(void)
-+{
-+	int ret = -ENOMEM;
-+
-+	mutex_init(&data.lock);
-+	init_waitqueue_head(&data.wq);
-+	atomic_set(&data.sample_open, 0);
-+
-+	ring_buffer = ring_buffer_alloc(buf_size, BUF_FLAGS);
-+
-+	if (WARN(!ring_buffer, KERN_ERR BANNER
-+			       "failed to allocate ring buffer!\n"))
-+		goto out;
-+
-+	__reset_stats();
-+	data.threshold = DEFAULT_LAT_THRESHOLD;	    /* threshold us */
-+	data.sample_window = DEFAULT_SAMPLE_WINDOW; /* window us */
-+	data.sample_width = DEFAULT_SAMPLE_WIDTH;   /* width us */
-+
-+	ret = 0;
-+
-+out:
-+	return ret;
-+
-+}
-+
-+/*
-+ * simple_data_read - Wrapper read function for global state debugfs entries
-+ * @filp: The active open file structure for the debugfs "file"
-+ * @ubuf: The userspace provided buffer to read value into
-+ * @cnt: The maximum number of bytes to read
-+ * @ppos: The current "file" position
-+ * @entry: The entry to read from
-+ *
-+ * This function provides a generic read implementation for the global state
-+ * "data" structure debugfs filesystem entries. It would be nice to use
-+ * simple_attr_read directly, but we need to make sure that the data.lock
-+ * spinlock is held during the actual read (even though we likely won't ever
-+ * actually race here as the updater runs under a stop_machine context).
-+ */
-+static ssize_t simple_data_read(struct file *filp, char __user *ubuf,
-+				size_t cnt, loff_t *ppos, const u64 *entry)
-+{
-+	char buf[U64STR_SIZE];
-+	u64 val = 0;
-+	int len = 0;
-+
-+	memset(buf, 0, sizeof(buf));
-+
-+	if (!entry)
-+		return -EFAULT;
-+
-+	mutex_lock(&data.lock);
-+	val = *entry;
-+	mutex_unlock(&data.lock);
-+
-+	len = snprintf(buf, sizeof(buf), "%llu\n", (unsigned long long)val);
-+
-+	return simple_read_from_buffer(ubuf, cnt, ppos, buf, len);
-+
-+}
-+
-+/*
-+ * simple_data_write - Wrapper write function for global state debugfs entries
-+ * @filp: The active open file structure for the debugfs "file"
-+ * @ubuf: The userspace provided buffer to write value from
-+ * @cnt: The maximum number of bytes to write
-+ * @ppos: The current "file" position
-+ * @entry: The entry to write to
-+ *
-+ * This function provides a generic write implementation for the global state
-+ * "data" structure debugfs filesystem entries. It would be nice to use
-+ * simple_attr_write directly, but we need to make sure that the data.lock
-+ * spinlock is held during the actual write (even though we likely won't ever
-+ * actually race here as the updater runs under a stop_machine context).
-+ */
-+static ssize_t simple_data_write(struct file *filp, const char __user *ubuf,
-+				 size_t cnt, loff_t *ppos, u64 *entry)
-+{
-+	char buf[U64STR_SIZE];
-+	int csize = min(cnt, sizeof(buf));
-+	u64 val = 0;
-+	int err = 0;
-+
-+	memset(buf, '\0', sizeof(buf));
-+	if (copy_from_user(buf, ubuf, csize))
-+		return -EFAULT;
-+
-+	buf[U64STR_SIZE-1] = '\0';			/* just in case */
-+	err = kstrtoull(buf, 10, &val);
-+	if (err)
-+		return -EINVAL;
-+
-+	mutex_lock(&data.lock);
-+	*entry = val;
-+	mutex_unlock(&data.lock);
-+
-+	return csize;
-+}
-+
-+/**
-+ * debug_count_fopen - Open function for "count" debugfs entry
-+ * @inode: The in-kernel inode representation of the debugfs "file"
-+ * @filp: The active open file structure for the debugfs "file"
-+ *
-+ * This function provides an open implementation for the "count" debugfs
-+ * interface to the hardware latency detector.
-+ */
-+static int debug_count_fopen(struct inode *inode, struct file *filp)
-+{
-+	return 0;
-+}
-+
-+/**
-+ * debug_count_fread - Read function for "count" debugfs entry
-+ * @filp: The active open file structure for the debugfs "file"
-+ * @ubuf: The userspace provided buffer to read value into
-+ * @cnt: The maximum number of bytes to read
-+ * @ppos: The current "file" position
-+ *
-+ * This function provides a read implementation for the "count" debugfs
-+ * interface to the hardware latency detector. Can be used to read the
-+ * number of latency readings exceeding the configured threshold since
-+ * the detector was last reset (e.g. by writing a zero into "count").
-+ */
-+static ssize_t debug_count_fread(struct file *filp, char __user *ubuf,
-+				     size_t cnt, loff_t *ppos)
-+{
-+	return simple_data_read(filp, ubuf, cnt, ppos, &data.count);
-+}
-+
-+/**
-+ * debug_count_fwrite - Write function for "count" debugfs entry
-+ * @filp: The active open file structure for the debugfs "file"
-+ * @ubuf: The user buffer that contains the value to write
-+ * @cnt: The maximum number of bytes to write to "file"
-+ * @ppos: The current position in the debugfs "file"
-+ *
-+ * This function provides a write implementation for the "count" debugfs
-+ * interface to the hardware latency detector. Can be used to write a
-+ * desired value, especially to zero the total count.
-+ */
-+static ssize_t  debug_count_fwrite(struct file *filp,
-+				       const char __user *ubuf,
-+				       size_t cnt,
-+				       loff_t *ppos)
-+{
-+	return simple_data_write(filp, ubuf, cnt, ppos, &data.count);
-+}
-+
-+/**
-+ * debug_enable_fopen - Dummy open function for "enable" debugfs interface
-+ * @inode: The in-kernel inode representation of the debugfs "file"
-+ * @filp: The active open file structure for the debugfs "file"
-+ *
-+ * This function provides an open implementation for the "enable" debugfs
-+ * interface to the hardware latency detector.
-+ */
-+static int debug_enable_fopen(struct inode *inode, struct file *filp)
-+{
-+	return 0;
-+}
-+
-+/**
-+ * debug_enable_fread - Read function for "enable" debugfs interface
-+ * @filp: The active open file structure for the debugfs "file"
-+ * @ubuf: The userspace provided buffer to read value into
-+ * @cnt: The maximum number of bytes to read
-+ * @ppos: The current "file" position
-+ *
-+ * This function provides a read implementation for the "enable" debugfs
-+ * interface to the hardware latency detector. Can be used to determine
-+ * whether the detector is currently enabled ("0\n" or "1\n" returned).
-+ */
-+static ssize_t debug_enable_fread(struct file *filp, char __user *ubuf,
-+				      size_t cnt, loff_t *ppos)
-+{
-+	char buf[4];
-+
-+	if ((cnt < sizeof(buf)) || (*ppos))
-+		return 0;
-+
-+	buf[0] = enabled ? '1' : '0';
-+	buf[1] = '\n';
-+	buf[2] = '\0';
-+	if (copy_to_user(ubuf, buf, strlen(buf)))
-+		return -EFAULT;
-+	return *ppos = strlen(buf);
-+}
-+
-+/**
-+ * debug_enable_fwrite - Write function for "enable" debugfs interface
-+ * @filp: The active open file structure for the debugfs "file"
-+ * @ubuf: The user buffer that contains the value to write
-+ * @cnt: The maximum number of bytes to write to "file"
-+ * @ppos: The current position in the debugfs "file"
-+ *
-+ * This function provides a write implementation for the "enable" debugfs
-+ * interface to the hardware latency detector. Can be used to enable or
-+ * disable the detector, which will have the side-effect of possibly
-+ * also resetting the global stats and kicking off the measuring
-+ * kthread (on an enable) or the converse (upon a disable).
-+ */
-+static ssize_t  debug_enable_fwrite(struct file *filp,
-+					const char __user *ubuf,
-+					size_t cnt,
-+					loff_t *ppos)
-+{
-+	char buf[4];
-+	int csize = min(cnt, sizeof(buf));
-+	long val = 0;
-+	int err = 0;
-+
-+	memset(buf, '\0', sizeof(buf));
-+	if (copy_from_user(buf, ubuf, csize))
-+		return -EFAULT;
-+
-+	buf[sizeof(buf)-1] = '\0';			/* just in case */
-+	err = kstrtoul(buf, 10, &val);
-+	if (err)
-+		return -EINVAL;
-+
-+	if (val) {
-+		if (enabled)
-+			goto unlock;
-+		enabled = 1;
-+		__reset_stats();
-+		if (start_kthread())
-+			return -EFAULT;
-+	} else {
-+		if (!enabled)
-+			goto unlock;
-+		enabled = 0;
-+		err = stop_kthread();
-+		if (err) {
-+			pr_err(BANNER "cannot stop kthread\n");
-+			return -EFAULT;
-+		}
-+		wake_up(&data.wq);		/* reader(s) should return */
-+	}
-+unlock:
-+	return csize;
-+}
-+
-+/**
-+ * debug_max_fopen - Open function for "max" debugfs entry
-+ * @inode: The in-kernel inode representation of the debugfs "file"
-+ * @filp: The active open file structure for the debugfs "file"
-+ *
-+ * This function provides an open implementation for the "max" debugfs
-+ * interface to the hardware latency detector.
-+ */
-+static int debug_max_fopen(struct inode *inode, struct file *filp)
-+{
-+	return 0;
-+}
-+
-+/**
-+ * debug_max_fread - Read function for "max" debugfs entry
-+ * @filp: The active open file structure for the debugfs "file"
-+ * @ubuf: The userspace provided buffer to read value into
-+ * @cnt: The maximum number of bytes to read
-+ * @ppos: The current "file" position
-+ *
-+ * This function provides a read implementation for the "max" debugfs
-+ * interface to the hardware latency detector. Can be used to determine
-+ * the maximum latency value observed since it was last reset.
-+ */
-+static ssize_t debug_max_fread(struct file *filp, char __user *ubuf,
-+				   size_t cnt, loff_t *ppos)
-+{
-+	return simple_data_read(filp, ubuf, cnt, ppos, &data.max_sample);
-+}
-+
-+/**
-+ * debug_max_fwrite - Write function for "max" debugfs entry
-+ * @filp: The active open file structure for the debugfs "file"
-+ * @ubuf: The user buffer that contains the value to write
-+ * @cnt: The maximum number of bytes to write to "file"
-+ * @ppos: The current position in the debugfs "file"
-+ *
-+ * This function provides a write implementation for the "max" debugfs
-+ * interface to the hardware latency detector. Can be used to reset the
-+ * maximum or set it to some other desired value - if, then, subsequent
-+ * measurements exceed this value, the maximum will be updated.
-+ */
-+static ssize_t  debug_max_fwrite(struct file *filp,
-+				     const char __user *ubuf,
-+				     size_t cnt,
-+				     loff_t *ppos)
-+{
-+	return simple_data_write(filp, ubuf, cnt, ppos, &data.max_sample);
-+}
-+
-+
-+/**
-+ * debug_sample_fopen - An open function for "sample" debugfs interface
-+ * @inode: The in-kernel inode representation of this debugfs "file"
-+ * @filp: The active open file structure for the debugfs "file"
-+ *
-+ * This function handles opening the "sample" file within the hardware
-+ * latency detector debugfs directory interface. This file is used to read
-+ * raw samples from the global ring_buffer and allows the user to see a
-+ * running latency history. Can be opened blocking or non-blocking,
-+ * affecting whether it behaves as a buffer read pipe, or does not.
-+ * Implements simple locking to prevent multiple simultaneous use.
-+ */
-+static int debug_sample_fopen(struct inode *inode, struct file *filp)
-+{
-+	if (!atomic_add_unless(&data.sample_open, 1, 1))
-+		return -EBUSY;
-+	else
-+		return 0;
-+}
-+
-+/**
-+ * debug_sample_fread - A read function for "sample" debugfs interface
-+ * @filp: The active open file structure for the debugfs "file"
-+ * @ubuf: The user buffer that will contain the samples read
-+ * @cnt: The maximum bytes to read from the debugfs "file"
-+ * @ppos: The current position in the debugfs "file"
-+ *
-+ * This function handles reading from the "sample" file within the hardware
-+ * latency detector debugfs directory interface. This file is used to read
-+ * raw samples from the global ring_buffer and allows the user to see a
-+ * running latency history. By default this will block pending a new
-+ * value written into the sample buffer, unless there are already a
-+ * number of value(s) waiting in the buffer, or the sample file was
-+ * previously opened in a non-blocking mode of operation.
-+ */
-+static ssize_t debug_sample_fread(struct file *filp, char __user *ubuf,
-+					size_t cnt, loff_t *ppos)
-+{
-+	int len = 0;
-+	char buf[64];
-+	struct sample *sample = NULL;
-+
-+	if (!enabled)
-+		return 0;
-+
-+	sample = kzalloc(sizeof(struct sample), GFP_KERNEL);
-+	if (!sample)
-+		return -ENOMEM;
-+
-+	while (!buffer_get_sample(sample)) {
-+
-+		DEFINE_WAIT(wait);
-+
-+		if (filp->f_flags & O_NONBLOCK) {
-+			len = -EAGAIN;
-+			goto out;
-+		}
-+
-+		prepare_to_wait(&data.wq, &wait, TASK_INTERRUPTIBLE);
-+		schedule();
-+		finish_wait(&data.wq, &wait);
-+
-+		if (signal_pending(current)) {
-+			len = -EINTR;
-+			goto out;
-+		}
-+
-+		if (!enabled) {			/* enable was toggled */
-+			len = 0;
-+			goto out;
-+		}
-+	}
-+
-+	len = snprintf(buf, sizeof(buf), "%010lu.%010lu\t%llu\n",
-+		      sample->timestamp.tv_sec,
-+		      sample->timestamp.tv_nsec,
-+		      sample->duration);
-+
-+
-+	/* handling partial reads is more trouble than it's worth */
-+	if (len > cnt)
-+		goto out;
-+
-+	if (copy_to_user(ubuf, buf, len))
-+		len = -EFAULT;
-+
-+out:
-+	kfree(sample);
-+	return len;
-+}
-+
-+/**
-+ * debug_sample_release - Release function for "sample" debugfs interface
-+ * @inode: The in-kernel inode represenation of the debugfs "file"
-+ * @filp: The active open file structure for the debugfs "file"
-+ *
-+ * This function completes the close of the debugfs interface "sample" file.
-+ * Frees the sample_open "lock" so that other users may open the interface.
-+ */
-+static int debug_sample_release(struct inode *inode, struct file *filp)
-+{
-+	atomic_dec(&data.sample_open);
-+
-+	return 0;
-+}
-+
-+/**
-+ * debug_threshold_fopen - Open function for "threshold" debugfs entry
-+ * @inode: The in-kernel inode representation of the debugfs "file"
-+ * @filp: The active open file structure for the debugfs "file"
-+ *
-+ * This function provides an open implementation for the "threshold" debugfs
-+ * interface to the hardware latency detector.
-+ */
-+static int debug_threshold_fopen(struct inode *inode, struct file *filp)
-+{
-+	return 0;
-+}
-+
-+/**
-+ * debug_threshold_fread - Read function for "threshold" debugfs entry
-+ * @filp: The active open file structure for the debugfs "file"
-+ * @ubuf: The userspace provided buffer to read value into
-+ * @cnt: The maximum number of bytes to read
-+ * @ppos: The current "file" position
-+ *
-+ * This function provides a read implementation for the "threshold" debugfs
-+ * interface to the hardware latency detector. It can be used to determine
-+ * the current threshold level at which a latency will be recorded in the
-+ * global ring buffer, typically on the order of 10us.
-+ */
-+static ssize_t debug_threshold_fread(struct file *filp, char __user *ubuf,
-+					 size_t cnt, loff_t *ppos)
-+{
-+	return simple_data_read(filp, ubuf, cnt, ppos, &data.threshold);
-+}
-+
-+/**
-+ * debug_threshold_fwrite - Write function for "threshold" debugfs entry
-+ * @filp: The active open file structure for the debugfs "file"
-+ * @ubuf: The user buffer that contains the value to write
-+ * @cnt: The maximum number of bytes to write to "file"
-+ * @ppos: The current position in the debugfs "file"
-+ *
-+ * This function provides a write implementation for the "threshold" debugfs
-+ * interface to the hardware latency detector. It can be used to configure
-+ * the threshold level at which any subsequently detected latencies will
-+ * be recorded into the global ring buffer.
-+ */
-+static ssize_t  debug_threshold_fwrite(struct file *filp,
-+					const char __user *ubuf,
-+					size_t cnt,
-+					loff_t *ppos)
-+{
-+	int ret;
-+
-+	ret = simple_data_write(filp, ubuf, cnt, ppos, &data.threshold);
-+
-+	if (enabled)
-+		wake_up_process(kthread);
-+
-+	return ret;
-+}
-+
-+/**
-+ * debug_width_fopen - Open function for "width" debugfs entry
-+ * @inode: The in-kernel inode representation of the debugfs "file"
-+ * @filp: The active open file structure for the debugfs "file"
-+ *
-+ * This function provides an open implementation for the "width" debugfs
-+ * interface to the hardware latency detector.
-+ */
-+static int debug_width_fopen(struct inode *inode, struct file *filp)
-+{
-+	return 0;
-+}
-+
-+/**
-+ * debug_width_fread - Read function for "width" debugfs entry
-+ * @filp: The active open file structure for the debugfs "file"
-+ * @ubuf: The userspace provided buffer to read value into
-+ * @cnt: The maximum number of bytes to read
-+ * @ppos: The current "file" position
-+ *
-+ * This function provides a read implementation for the "width" debugfs
-+ * interface to the hardware latency detector. It can be used to determine
-+ * for how many us of the total window us we will actively sample for any
-+ * hardware-induced latecy periods. Obviously, it is not possible to
-+ * sample constantly and have the system respond to a sample reader, or,
-+ * worse, without having the system appear to have gone out to lunch.
-+ */
-+static ssize_t debug_width_fread(struct file *filp, char __user *ubuf,
-+				     size_t cnt, loff_t *ppos)
-+{
-+	return simple_data_read(filp, ubuf, cnt, ppos, &data.sample_width);
-+}
-+
-+/**
-+ * debug_width_fwrite - Write function for "width" debugfs entry
-+ * @filp: The active open file structure for the debugfs "file"
-+ * @ubuf: The user buffer that contains the value to write
-+ * @cnt: The maximum number of bytes to write to "file"
-+ * @ppos: The current position in the debugfs "file"
-+ *
-+ * This function provides a write implementation for the "width" debugfs
-+ * interface to the hardware latency detector. It can be used to configure
-+ * for how many us of the total window us we will actively sample for any
-+ * hardware-induced latency periods. Obviously, it is not possible to
-+ * sample constantly and have the system respond to a sample reader, or,
-+ * worse, without having the system appear to have gone out to lunch. It
-+ * is enforced that width is less that the total window size.
-+ */
-+static ssize_t  debug_width_fwrite(struct file *filp,
-+				       const char __user *ubuf,
-+				       size_t cnt,
-+				       loff_t *ppos)
-+{
-+	char buf[U64STR_SIZE];
-+	int csize = min(cnt, sizeof(buf));
-+	u64 val = 0;
-+	int err = 0;
-+
-+	memset(buf, '\0', sizeof(buf));
-+	if (copy_from_user(buf, ubuf, csize))
-+		return -EFAULT;
-+
-+	buf[U64STR_SIZE-1] = '\0';			/* just in case */
-+	err = kstrtoull(buf, 10, &val);
-+	if (err)
-+		return -EINVAL;
-+
-+	mutex_lock(&data.lock);
-+	if (val < data.sample_window)
-+		data.sample_width = val;
-+	else {
-+		mutex_unlock(&data.lock);
-+		return -EINVAL;
-+	}
-+	mutex_unlock(&data.lock);
-+
-+	if (enabled)
-+		wake_up_process(kthread);
-+
-+	return csize;
-+}
-+
-+/**
-+ * debug_window_fopen - Open function for "window" debugfs entry
-+ * @inode: The in-kernel inode representation of the debugfs "file"
-+ * @filp: The active open file structure for the debugfs "file"
-+ *
-+ * This function provides an open implementation for the "window" debugfs
-+ * interface to the hardware latency detector. The window is the total time
-+ * in us that will be considered one sample period. Conceptually, windows
-+ * occur back-to-back and contain a sample width period during which
-+ * actual sampling occurs.
-+ */
-+static int debug_window_fopen(struct inode *inode, struct file *filp)
-+{
-+	return 0;
-+}
-+
-+/**
-+ * debug_window_fread - Read function for "window" debugfs entry
-+ * @filp: The active open file structure for the debugfs "file"
-+ * @ubuf: The userspace provided buffer to read value into
-+ * @cnt: The maximum number of bytes to read
-+ * @ppos: The current "file" position
-+ *
-+ * This function provides a read implementation for the "window" debugfs
-+ * interface to the hardware latency detector. The window is the total time
-+ * in us that will be considered one sample period. Conceptually, windows
-+ * occur back-to-back and contain a sample width period during which
-+ * actual sampling occurs. Can be used to read the total window size.
-+ */
-+static ssize_t debug_window_fread(struct file *filp, char __user *ubuf,
-+				      size_t cnt, loff_t *ppos)
-+{
-+	return simple_data_read(filp, ubuf, cnt, ppos, &data.sample_window);
-+}
-+
-+/**
-+ * debug_window_fwrite - Write function for "window" debugfs entry
-+ * @filp: The active open file structure for the debugfs "file"
-+ * @ubuf: The user buffer that contains the value to write
-+ * @cnt: The maximum number of bytes to write to "file"
-+ * @ppos: The current position in the debugfs "file"
-+ *
-+ * This function provides a write implementation for the "window" debufds
-+ * interface to the hardware latency detetector. The window is the total time
-+ * in us that will be considered one sample period. Conceptually, windows
-+ * occur back-to-back and contain a sample width period during which
-+ * actual sampling occurs. Can be used to write a new total window size. It
-+ * is enfoced that any value written must be greater than the sample width
-+ * size, or an error results.
-+ */
-+static ssize_t  debug_window_fwrite(struct file *filp,
-+					const char __user *ubuf,
-+					size_t cnt,
-+					loff_t *ppos)
-+{
-+	char buf[U64STR_SIZE];
-+	int csize = min(cnt, sizeof(buf));
-+	u64 val = 0;
-+	int err = 0;
-+
-+	memset(buf, '\0', sizeof(buf));
-+	if (copy_from_user(buf, ubuf, csize))
-+		return -EFAULT;
-+
-+	buf[U64STR_SIZE-1] = '\0';			/* just in case */
-+	err = kstrtoull(buf, 10, &val);
-+	if (err)
-+		return -EINVAL;
-+
-+	mutex_lock(&data.lock);
-+	if (data.sample_width < val)
-+		data.sample_window = val;
-+	else {
-+		mutex_unlock(&data.lock);
-+		return -EINVAL;
-+	}
-+	mutex_unlock(&data.lock);
-+
-+	return csize;
-+}
-+
-+/*
-+ * Function pointers for the "count" debugfs file operations
-+ */
-+static const struct file_operations count_fops = {
-+	.open		= debug_count_fopen,
-+	.read		= debug_count_fread,
-+	.write		= debug_count_fwrite,
-+	.owner		= THIS_MODULE,
-+};
-+
-+/*
-+ * Function pointers for the "enable" debugfs file operations
-+ */
-+static const struct file_operations enable_fops = {
-+	.open		= debug_enable_fopen,
-+	.read		= debug_enable_fread,
-+	.write		= debug_enable_fwrite,
-+	.owner		= THIS_MODULE,
-+};
-+
-+/*
-+ * Function pointers for the "max" debugfs file operations
-+ */
-+static const struct file_operations max_fops = {
-+	.open		= debug_max_fopen,
-+	.read		= debug_max_fread,
-+	.write		= debug_max_fwrite,
-+	.owner		= THIS_MODULE,
-+};
-+
-+/*
-+ * Function pointers for the "sample" debugfs file operations
-+ */
-+static const struct file_operations sample_fops = {
-+	.open		= debug_sample_fopen,
-+	.read		= debug_sample_fread,
-+	.release	= debug_sample_release,
-+	.owner		= THIS_MODULE,
-+};
-+
-+/*
-+ * Function pointers for the "threshold" debugfs file operations
-+ */
-+static const struct file_operations threshold_fops = {
-+	.open		= debug_threshold_fopen,
-+	.read		= debug_threshold_fread,
-+	.write		= debug_threshold_fwrite,
-+	.owner		= THIS_MODULE,
-+};
-+
-+/*
-+ * Function pointers for the "width" debugfs file operations
-+ */
-+static const struct file_operations width_fops = {
-+	.open		= debug_width_fopen,
-+	.read		= debug_width_fread,
-+	.write		= debug_width_fwrite,
-+	.owner		= THIS_MODULE,
-+};
-+
-+/*
-+ * Function pointers for the "window" debugfs file operations
-+ */
-+static const struct file_operations window_fops = {
-+	.open		= debug_window_fopen,
-+	.read		= debug_window_fread,
-+	.write		= debug_window_fwrite,
-+	.owner		= THIS_MODULE,
-+};
-+
-+/**
-+ * init_debugfs - A function to initialize the debugfs interface files
-+ *
-+ * This function creates entries in debugfs for "hwlat_detector", including
-+ * files to read values from the detector, current samples, and the
-+ * maximum sample that has been captured since the hardware latency
-+ * dectector was started.
-+ */
-+static int init_debugfs(void)
-+{
-+	int ret = -ENOMEM;
-+
-+	debug_dir = debugfs_create_dir(DRVNAME, NULL);
-+	if (!debug_dir)
-+		goto err_debug_dir;
-+
-+	debug_sample = debugfs_create_file("sample", 0444,
-+					       debug_dir, NULL,
-+					       &sample_fops);
-+	if (!debug_sample)
-+		goto err_sample;
-+
-+	debug_count = debugfs_create_file("count", 0444,
-+					      debug_dir, NULL,
-+					      &count_fops);
-+	if (!debug_count)
-+		goto err_count;
-+
-+	debug_max = debugfs_create_file("max", 0444,
-+					    debug_dir, NULL,
-+					    &max_fops);
-+	if (!debug_max)
-+		goto err_max;
-+
-+	debug_sample_window = debugfs_create_file("window", 0644,
-+						      debug_dir, NULL,
-+						      &window_fops);
-+	if (!debug_sample_window)
-+		goto err_window;
-+
-+	debug_sample_width = debugfs_create_file("width", 0644,
-+						     debug_dir, NULL,
-+						     &width_fops);
-+	if (!debug_sample_width)
-+		goto err_width;
-+
-+	debug_threshold = debugfs_create_file("threshold", 0644,
-+						  debug_dir, NULL,
-+						  &threshold_fops);
-+	if (!debug_threshold)
-+		goto err_threshold;
-+
-+	debug_enable = debugfs_create_file("enable", 0644,
-+					       debug_dir, &enabled,
-+					       &enable_fops);
-+	if (!debug_enable)
-+		goto err_enable;
-+
-+	else {
-+		ret = 0;
-+		goto out;
-+	}
-+
-+err_enable:
-+	debugfs_remove(debug_threshold);
-+err_threshold:
-+	debugfs_remove(debug_sample_width);
-+err_width:
-+	debugfs_remove(debug_sample_window);
-+err_window:
-+	debugfs_remove(debug_max);
-+err_max:
-+	debugfs_remove(debug_count);
-+err_count:
-+	debugfs_remove(debug_sample);
-+err_sample:
-+	debugfs_remove(debug_dir);
-+err_debug_dir:
-+out:
-+	return ret;
-+}
-+
-+/**
-+ * free_debugfs - A function to cleanup the debugfs file interface
-+ */
-+static void free_debugfs(void)
-+{
-+	/* could also use a debugfs_remove_recursive */
-+	debugfs_remove(debug_enable);
-+	debugfs_remove(debug_threshold);
-+	debugfs_remove(debug_sample_width);
-+	debugfs_remove(debug_sample_window);
-+	debugfs_remove(debug_max);
-+	debugfs_remove(debug_count);
-+	debugfs_remove(debug_sample);
-+	debugfs_remove(debug_dir);
-+}
-+
-+/**
-+ * detector_init - Standard module initialization code
-+ */
-+static int detector_init(void)
-+{
-+	int ret = -ENOMEM;
-+
-+	pr_info(BANNER "version %s\n", VERSION);
-+
-+	ret = init_stats();
-+	if (ret)
-+		goto out;
-+
-+	ret = init_debugfs();
-+	if (ret)
-+		goto err_stats;
-+
-+	if (enabled)
-+		ret = start_kthread();
-+
-+	goto out;
-+
-+err_stats:
-+	ring_buffer_free(ring_buffer);
-+out:
-+	return ret;
-+
-+}
-+
-+/**
-+ * detector_exit - Standard module cleanup code
-+ */
-+static void detector_exit(void)
-+{
-+	int err;
-+
-+	if (enabled) {
-+		enabled = 0;
-+		err = stop_kthread();
-+		if (err)
-+			pr_err(BANNER "cannot stop kthread\n");
-+	}
-+
-+	free_debugfs();
-+	ring_buffer_free(ring_buffer);	/* free up the ring buffer */
-+
-+}
-+
-+module_init(detector_init);
-+module_exit(detector_exit);
diff --git a/patches/i915-bogus-warning-from-i915-when-running-on-PREEMPT.patch b/patches/i915-bogus-warning-from-i915-when-running-on-PREEMPT.patch
index fc84a9d..0c7e8eb 100644
--- a/patches/i915-bogus-warning-from-i915-when-running-on-PREEMPT.patch
+++ b/patches/i915-bogus-warning-from-i915-when-running-on-PREEMPT.patch
@@ -18,7 +18,7 @@
 
 --- a/drivers/gpu/drm/i915/intel_display.c
 +++ b/drivers/gpu/drm/i915/intel_display.c
-@@ -11670,7 +11670,7 @@ void intel_check_page_flip(struct drm_i9
+@@ -12142,7 +12142,7 @@ void intel_check_page_flip(struct drm_i9
  	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
  	struct intel_flip_work *work;
  
diff --git a/patches/introduce_migrate_disable_cpu_light.patch b/patches/introduce_migrate_disable_cpu_light.patch
index 6a913cd..94a266d 100644
--- a/patches/introduce_migrate_disable_cpu_light.patch
+++ b/patches/introduce_migrate_disable_cpu_light.patch
@@ -41,7 +41,7 @@
 
 --- a/include/linux/cpu.h
 +++ b/include/linux/cpu.h
-@@ -185,6 +185,9 @@ static inline void cpu_notifier_register
+@@ -180,6 +180,9 @@ static inline void cpu_notifier_register
  #endif /* CONFIG_SMP */
  extern struct bus_type cpu_subsys;
  
@@ -76,7 +76,7 @@
  #ifdef CONFIG_PREEMPT_NOTIFIERS
 --- a/include/linux/sched.h
 +++ b/include/linux/sched.h
-@@ -1495,6 +1495,12 @@ struct task_struct {
+@@ -1520,6 +1520,12 @@ struct task_struct {
  #endif
  
  	unsigned int policy;
@@ -89,8 +89,8 @@
  	int nr_cpus_allowed;
  	cpumask_t cpus_allowed;
  
-@@ -1946,14 +1952,6 @@ extern int arch_task_struct_size __read_
- # define arch_task_struct_size (sizeof(struct task_struct))
+@@ -1990,14 +1996,6 @@ static inline struct vm_struct *task_sta
+ }
  #endif
  
 -/* Future-safe accessor for struct task_struct's cpus_allowed. */
@@ -104,7 +104,7 @@
  #define TNF_MIGRATED	0x01
  #define TNF_NO_GROUP	0x02
  #define TNF_SHARED	0x04
-@@ -3394,6 +3392,31 @@ static inline void set_task_cpu(struct t
+@@ -3515,6 +3513,31 @@ static inline void set_task_cpu(struct t
  
  #endif /* CONFIG_SMP */
  
@@ -150,7 +150,7 @@
   * boot command line:
 --- a/kernel/sched/core.c
 +++ b/kernel/sched/core.c
-@@ -1089,6 +1089,11 @@ void do_set_cpus_allowed(struct task_str
+@@ -1100,6 +1100,11 @@ void do_set_cpus_allowed(struct task_str
  
  	lockdep_assert_held(&p->pi_lock);
  
@@ -162,7 +162,7 @@
  	queued = task_on_rq_queued(p);
  	running = task_current(rq, p);
  
-@@ -1168,7 +1173,7 @@ static int __set_cpus_allowed_ptr(struct
+@@ -1179,7 +1184,7 @@ static int __set_cpus_allowed_ptr(struct
  	}
  
  	/* Can the task run on the task's current CPU? If so, we're done */
@@ -171,8 +171,8 @@
  		goto out;
  
  	dest_cpu = cpumask_any_and(cpu_valid_mask, new_mask);
-@@ -3237,6 +3242,69 @@ static inline void schedule_debug(struct
- 	schedstat_inc(this_rq(), sched_count);
+@@ -3252,6 +3257,69 @@ static inline void schedule_debug(struct
+ 	schedstat_inc(this_rq()->sched_count);
  }
  
 +#if defined(CONFIG_PREEMPT_RT_FULL) && defined(CONFIG_SMP)
@@ -243,7 +243,7 @@
   */
 --- a/kernel/sched/debug.c
 +++ b/kernel/sched/debug.c
-@@ -552,6 +552,9 @@ void print_rt_rq(struct seq_file *m, int
+@@ -558,6 +558,9 @@ void print_rt_rq(struct seq_file *m, int
  	P(rt_throttled);
  	PN(rt_time);
  	PN(rt_runtime);
@@ -253,7 +253,7 @@
  
  #undef PN
  #undef P
-@@ -947,6 +950,10 @@ void proc_sched_show_task(struct task_st
+@@ -953,6 +956,10 @@ void proc_sched_show_task(struct task_st
  #endif
  	P(policy);
  	P(prio);
@@ -261,9 +261,9 @@
 +	P(migrate_disable);
 +#endif
 +	P(nr_cpus_allowed);
+ #undef PN_SCHEDSTAT
  #undef PN
  #undef __PN
- #undef P
 --- a/lib/smp_processor_id.c
 +++ b/lib/smp_processor_id.c
 @@ -39,8 +39,9 @@ notrace static unsigned int check_preemp
diff --git a/patches/iommu-amd--Use-WARN_ON_NORT.patch b/patches/iommu-amd--Use-WARN_ON_NORT.patch
index 25a55c9..6cc705c 100644
--- a/patches/iommu-amd--Use-WARN_ON_NORT.patch
+++ b/patches/iommu-amd--Use-WARN_ON_NORT.patch
@@ -16,7 +16,7 @@
 
 --- a/drivers/iommu/amd_iommu.c
 +++ b/drivers/iommu/amd_iommu.c
-@@ -1835,10 +1835,10 @@ static int __attach_device(struct iommu_
+@@ -1923,10 +1923,10 @@ static int __attach_device(struct iommu_
  	int ret;
  
  	/*
@@ -30,7 +30,7 @@
  
  	/* lock domain */
  	spin_lock(&domain->lock);
-@@ -2006,10 +2006,10 @@ static void __detach_device(struct iommu
+@@ -2094,10 +2094,10 @@ static void __detach_device(struct iommu
  	struct protection_domain *domain;
  
  	/*
diff --git a/patches/iommu-vt-d-don-t-disable-preemption-while-accessing-.patch b/patches/iommu-vt-d-don-t-disable-preemption-while-accessing-.patch
index 407649f..bd84f97 100644
--- a/patches/iommu-vt-d-don-t-disable-preemption-while-accessing-.patch
+++ b/patches/iommu-vt-d-don-t-disable-preemption-while-accessing-.patch
@@ -35,7 +35,7 @@
  
  /* bitmap for indexing intel_iommus */
  static int g_num_of_iommus;
-@@ -3649,10 +3649,8 @@ static void add_unmap(struct dmar_domain
+@@ -3696,10 +3696,8 @@ static void add_unmap(struct dmar_domain
  	struct intel_iommu *iommu;
  	struct deferred_flush_entry *entry;
  	struct deferred_flush_data *flush_data;
@@ -47,7 +47,7 @@
  
  	/* Flush all CPUs' entries to avoid deferring too much.  If
  	 * this becomes a bottleneck, can just flush us, and rely on
-@@ -3685,8 +3683,6 @@ static void add_unmap(struct dmar_domain
+@@ -3732,8 +3730,6 @@ static void add_unmap(struct dmar_domain
  	}
  	flush_data->size++;
  	spin_unlock_irqrestore(&flush_data->lock, flags);
diff --git a/patches/ipc-msg-Implement-lockless-pipelined-wakeups.patch b/patches/ipc-msg-Implement-lockless-pipelined-wakeups.patch
deleted file mode 100644
index dcab30e..0000000
--- a/patches/ipc-msg-Implement-lockless-pipelined-wakeups.patch
+++ /dev/null
@@ -1,227 +0,0 @@
-From: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
-Date: Fri, 30 Oct 2015 11:59:07 +0100
-Subject: ipc/msg: Implement lockless pipelined wakeups
-
-This patch moves the wakeup_process() invocation so it is not done under
-the perm->lock by making use of a lockless wake_q. With this change, the
-waiter is woken up once the message has been assigned and it does not
-need to loop on SMP if the message points to NULL. In the signal case we
-still need to check the pointer under the lock to verify the state.
-
-This change should also avoid the introduction of preempt_disable() in
--RT which avoids a busy-loop which pools for the NULL -> !NULL
-change if the waiter has a higher priority compared to the waker.
-
-Cc: Davidlohr Bueso <dave@stgolabs.net>
-Cc: Manfred Spraul <manfred@colorfullife.com>
-Cc: Andrew Morton <akpm@linux-foundation.org>
-Cc: George Spelvin <linux@horizon.com>
-Cc: Thomas Gleixner <tglx@linutronix.de>
-Cc: Peter Zijlstra <peterz@infradead.org>
-Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
----
-
- ipc/msg.c |  101 +++++++++++++++++---------------------------------------------
- 1 file changed, 28 insertions(+), 73 deletions(-)
-
---- a/ipc/msg.c
-+++ b/ipc/msg.c
-@@ -183,20 +183,14 @@ static void ss_wakeup(struct list_head *
- 	}
- }
- 
--static void expunge_all(struct msg_queue *msq, int res)
-+static void expunge_all(struct msg_queue *msq, int res,
-+			struct wake_q_head *wake_q)
- {
- 	struct msg_receiver *msr, *t;
- 
- 	list_for_each_entry_safe(msr, t, &msq->q_receivers, r_list) {
--		msr->r_msg = NULL; /* initialize expunge ordering */
--		wake_up_process(msr->r_tsk);
--		/*
--		 * Ensure that the wakeup is visible before setting r_msg as
--		 * the receiving end depends on it: either spinning on a nil,
--		 * or dealing with -EAGAIN cases. See lockless receive part 1
--		 * and 2 in do_msgrcv().
--		 */
--		smp_wmb(); /* barrier (B) */
-+
-+		wake_q_add(wake_q, msr->r_tsk);
- 		msr->r_msg = ERR_PTR(res);
- 	}
- }
-@@ -213,11 +207,13 @@ static void freeque(struct ipc_namespace
- {
- 	struct msg_msg *msg, *t;
- 	struct msg_queue *msq = container_of(ipcp, struct msg_queue, q_perm);
-+	WAKE_Q(wake_q);
- 
--	expunge_all(msq, -EIDRM);
-+	expunge_all(msq, -EIDRM, &wake_q);
- 	ss_wakeup(&msq->q_senders, 1);
- 	msg_rmid(ns, msq);
- 	ipc_unlock_object(&msq->q_perm);
-+	wake_up_q(&wake_q);
- 	rcu_read_unlock();
- 
- 	list_for_each_entry_safe(msg, t, &msq->q_messages, m_list) {
-@@ -342,6 +338,7 @@ static int msgctl_down(struct ipc_namesp
- 	struct kern_ipc_perm *ipcp;
- 	struct msqid64_ds uninitialized_var(msqid64);
- 	struct msg_queue *msq;
-+	WAKE_Q(wake_q);
- 	int err;
- 
- 	if (cmd == IPC_SET) {
-@@ -389,7 +386,7 @@ static int msgctl_down(struct ipc_namesp
- 		/* sleeping receivers might be excluded by
- 		 * stricter permissions.
- 		 */
--		expunge_all(msq, -EAGAIN);
-+		expunge_all(msq, -EAGAIN, &wake_q);
- 		/* sleeping senders might be able to send
- 		 * due to a larger queue size.
- 		 */
-@@ -402,6 +399,7 @@ static int msgctl_down(struct ipc_namesp
- 
- out_unlock0:
- 	ipc_unlock_object(&msq->q_perm);
-+	wake_up_q(&wake_q);
- out_unlock1:
- 	rcu_read_unlock();
- out_up:
-@@ -566,7 +564,8 @@ static int testmsg(struct msg_msg *msg,
- 	return 0;
- }
- 
--static inline int pipelined_send(struct msg_queue *msq, struct msg_msg *msg)
-+static inline int pipelined_send(struct msg_queue *msq, struct msg_msg *msg,
-+				 struct wake_q_head *wake_q)
- {
- 	struct msg_receiver *msr, *t;
- 
-@@ -577,27 +576,13 @@ static inline int pipelined_send(struct
- 
- 			list_del(&msr->r_list);
- 			if (msr->r_maxsize < msg->m_ts) {
--				/* initialize pipelined send ordering */
--				msr->r_msg = NULL;
--				wake_up_process(msr->r_tsk);
--				/* barrier (B) see barrier comment below */
--				smp_wmb();
-+				wake_q_add(wake_q, msr->r_tsk);
- 				msr->r_msg = ERR_PTR(-E2BIG);
- 			} else {
--				msr->r_msg = NULL;
- 				msq->q_lrpid = task_pid_vnr(msr->r_tsk);
- 				msq->q_rtime = get_seconds();
--				wake_up_process(msr->r_tsk);
--				/*
--				 * Ensure that the wakeup is visible before
--				 * setting r_msg, as the receiving can otherwise
--				 * exit - once r_msg is set, the receiver can
--				 * continue. See lockless receive part 1 and 2
--				 * in do_msgrcv(). Barrier (B).
--				 */
--				smp_wmb();
-+				wake_q_add(wake_q, msr->r_tsk);
- 				msr->r_msg = msg;
--
- 				return 1;
- 			}
- 		}
-@@ -613,6 +598,7 @@ long do_msgsnd(int msqid, long mtype, vo
- 	struct msg_msg *msg;
- 	int err;
- 	struct ipc_namespace *ns;
-+	WAKE_Q(wake_q);
- 
- 	ns = current->nsproxy->ipc_ns;
- 
-@@ -698,7 +684,7 @@ long do_msgsnd(int msqid, long mtype, vo
- 	msq->q_lspid = task_tgid_vnr(current);
- 	msq->q_stime = get_seconds();
- 
--	if (!pipelined_send(msq, msg)) {
-+	if (!pipelined_send(msq, msg, &wake_q)) {
- 		/* no one is waiting for this message, enqueue it */
- 		list_add_tail(&msg->m_list, &msq->q_messages);
- 		msq->q_cbytes += msgsz;
-@@ -712,6 +698,7 @@ long do_msgsnd(int msqid, long mtype, vo
- 
- out_unlock0:
- 	ipc_unlock_object(&msq->q_perm);
-+	wake_up_q(&wake_q);
- out_unlock1:
- 	rcu_read_unlock();
- 	if (msg != NULL)
-@@ -932,57 +919,25 @@ long do_msgrcv(int msqid, void __user *b
- 		rcu_read_lock();
- 
- 		/* Lockless receive, part 2:
--		 * Wait until pipelined_send or expunge_all are outside of
--		 * wake_up_process(). There is a race with exit(), see
--		 * ipc/mqueue.c for the details. The correct serialization
--		 * ensures that a receiver cannot continue without the wakeup
--		 * being visibible _before_ setting r_msg:
-+		 * The work in pipelined_send() and expunge_all():
-+		 * - Set pointer to message
-+		 * - Queue the receiver task for later wakeup
-+		 * - Wake up the process after the lock is dropped.
- 		 *
--		 * CPU 0                             CPU 1
--		 * <loop receiver>
--		 *   smp_rmb(); (A) <-- pair -.      <waker thread>
--		 *   <load ->r_msg>           |        msr->r_msg = NULL;
--		 *                            |        wake_up_process();
--		 * <continue>                 `------> smp_wmb(); (B)
--		 *                                     msr->r_msg = msg;
--		 *
--		 * Where (A) orders the message value read and where (B) orders
--		 * the write to the r_msg -- done in both pipelined_send and
--		 * expunge_all.
-+		 * Should the process wake up before this wakeup (due to a
-+		 * signal) it will either see the message and continue …
- 		 */
--		for (;;) {
--			/*
--			 * Pairs with writer barrier in pipelined_send
--			 * or expunge_all.
--			 */
--			smp_rmb(); /* barrier (A) */
--			msg = (struct msg_msg *)msr_d.r_msg;
--			if (msg)
--				break;
- 
--			/*
--			 * The cpu_relax() call is a compiler barrier
--			 * which forces everything in this loop to be
--			 * re-loaded.
--			 */
--			cpu_relax();
--		}
--
--		/* Lockless receive, part 3:
--		 * If there is a message or an error then accept it without
--		 * locking.
--		 */
-+		msg = (struct msg_msg *)msr_d.r_msg;
- 		if (msg != ERR_PTR(-EAGAIN))
- 			goto out_unlock1;
- 
--		/* Lockless receive, part 3:
--		 * Acquire the queue spinlock.
--		 */
-+		 /*
-+		  * … or see -EAGAIN, acquire the lock to check the message
-+		  * again.
-+		  */
- 		ipc_lock_object(&msq->q_perm);
- 
--		/* Lockless receive, part 4:
--		 * Repeat test after acquiring the spinlock.
--		 */
- 		msg = (struct msg_msg *)msr_d.r_msg;
- 		if (msg != ERR_PTR(-EAGAIN))
- 			goto out_unlock0;
diff --git a/patches/irq-allow-disabling-of-softirq-processing-in-irq-thread-context.patch b/patches/irq-allow-disabling-of-softirq-processing-in-irq-thread-context.patch
index 142eafe..d7e6788 100644
--- a/patches/irq-allow-disabling-of-softirq-processing-in-irq-thread-context.patch
+++ b/patches/irq-allow-disabling-of-softirq-processing-in-irq-thread-context.patch
@@ -128,7 +128,7 @@
  	return desc->status_use_accessors & _IRQ_PER_CPU;
 --- a/kernel/softirq.c
 +++ b/kernel/softirq.c
-@@ -578,6 +578,15 @@ void __local_bh_enable(void)
+@@ -589,6 +589,15 @@ void __local_bh_enable(void)
  }
  EXPORT_SYMBOL(__local_bh_enable);
  
diff --git a/patches/irqwork-Move-irq-safe-work-to-irq-context.patch b/patches/irqwork-Move-irq-safe-work-to-irq-context.patch
index f2e9b6f..33b7c13 100644
--- a/patches/irqwork-Move-irq-safe-work-to-irq-context.patch
+++ b/patches/irqwork-Move-irq-safe-work-to-irq-context.patch
@@ -64,7 +64,7 @@
  	if (in_irq())
  		irq_work_tick();
  #endif
-@@ -1684,9 +1684,7 @@ static void run_timer_softirq(struct sof
+@@ -1684,9 +1684,7 @@ static __latent_entropy void run_timer_s
  {
  	struct timer_base *base = this_cpu_ptr(&timer_bases[BASE_STD]);
  
diff --git a/patches/irqwork-push_most_work_into_softirq_context.patch b/patches/irqwork-push_most_work_into_softirq_context.patch
index 5b84fd6..7af3770 100644
--- a/patches/irqwork-push_most_work_into_softirq_context.patch
+++ b/patches/irqwork-push_most_work_into_softirq_context.patch
@@ -153,7 +153,7 @@
  	/* We start is dequeued state, because no RT tasks are queued */
 --- a/kernel/time/tick-sched.c
 +++ b/kernel/time/tick-sched.c
-@@ -217,6 +217,7 @@ static void nohz_full_kick_func(struct i
+@@ -220,6 +220,7 @@ static void nohz_full_kick_func(struct i
  
  static DEFINE_PER_CPU(struct irq_work, nohz_full_kick_work) = {
  	.func = nohz_full_kick_func,
@@ -172,7 +172,7 @@
  	if (in_irq())
  		irq_work_tick();
  #endif
-@@ -1684,6 +1684,10 @@ static void run_timer_softirq(struct sof
+@@ -1684,6 +1684,10 @@ static __latent_entropy void run_timer_s
  {
  	struct timer_base *base = this_cpu_ptr(&timer_bases[BASE_STD]);
  
diff --git a/patches/kconfig-preempt-rt-full.patch b/patches/kconfig-preempt-rt-full.patch
index b43504e..81c6784 100644
--- a/patches/kconfig-preempt-rt-full.patch
+++ b/patches/kconfig-preempt-rt-full.patch
@@ -13,7 +13,7 @@
 
 --- a/init/Makefile
 +++ b/init/Makefile
-@@ -33,4 +33,4 @@ mounts-$(CONFIG_BLK_DEV_MD)	+= do_mounts
+@@ -35,4 +35,4 @@ mounts-$(CONFIG_BLK_DEV_MD)	+= do_mounts
  include/generated/compile.h: FORCE
  	@$($(quiet)chk_compile.h)
  	$(Q)$(CONFIG_SHELL) $(srctree)/scripts/mkcompile_h $@ \
diff --git a/patches/kernel-cpu-fix-cpu-down-problem-if-kthread-s-cpu-is-.patch b/patches/kernel-cpu-fix-cpu-down-problem-if-kthread-s-cpu-is-.patch
index fcdaec6..06355cd 100644
--- a/patches/kernel-cpu-fix-cpu-down-problem-if-kthread-s-cpu-is-.patch
+++ b/patches/kernel-cpu-fix-cpu-down-problem-if-kthread-s-cpu-is-.patch
@@ -26,7 +26,7 @@
 
 --- a/kernel/cpu.c
 +++ b/kernel/cpu.c
-@@ -186,6 +186,7 @@ struct hotplug_pcp {
+@@ -259,6 +259,7 @@ struct hotplug_pcp {
  	int refcount;
  	int grab_lock;
  	struct completion synced;
@@ -34,7 +34,7 @@
  #ifdef CONFIG_PREEMPT_RT_FULL
  	/*
  	 * Note, on PREEMPT_RT, the hotplug lock must save the state of
-@@ -289,6 +290,7 @@ static int sync_unplug_thread(void *data
+@@ -362,6 +363,7 @@ static int sync_unplug_thread(void *data
  {
  	struct hotplug_pcp *hp = data;
  
@@ -42,7 +42,7 @@
  	preempt_disable();
  	hp->unplug = current;
  	wait_for_pinned_cpus(hp);
-@@ -354,6 +356,14 @@ static void __cpu_unplug_sync(struct hot
+@@ -427,6 +429,14 @@ static void __cpu_unplug_sync(struct hot
  	wait_for_completion(&hp->synced);
  }
  
@@ -57,7 +57,7 @@
  /*
   * Start the sync_unplug_thread on the target cpu and wait for it to
   * complete.
-@@ -377,6 +387,7 @@ static int cpu_unplug_begin(unsigned int
+@@ -450,6 +460,7 @@ static int cpu_unplug_begin(unsigned int
  	tell_sched_cpu_down_begin(cpu);
  
  	init_completion(&hp->synced);
@@ -65,7 +65,7 @@
  
  	hp->sync_tsk = kthread_create(sync_unplug_thread, hp, "sync_unplug/%d", cpu);
  	if (IS_ERR(hp->sync_tsk)) {
-@@ -392,8 +403,7 @@ static int cpu_unplug_begin(unsigned int
+@@ -465,8 +476,7 @@ static int cpu_unplug_begin(unsigned int
  	 * wait for tasks that are going to enter these sections and
  	 * we must not have them block.
  	 */
@@ -75,7 +75,7 @@
  	return 0;
  }
  
-@@ -983,6 +993,7 @@ static int takedown_cpu(unsigned int cpu
+@@ -1062,6 +1072,7 @@ static int takedown_cpu(unsigned int cpu
  	struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, cpu);
  	int err;
  
diff --git a/patches/kernel-futex-don-t-deboost-too-early.patch b/patches/kernel-futex-don-t-deboost-too-early.patch
index 33f0772..d902342 100644
--- a/patches/kernel-futex-don-t-deboost-too-early.patch
+++ b/patches/kernel-futex-don-t-deboost-too-early.patch
@@ -58,7 +58,7 @@
  	do {						\
 --- a/kernel/futex.c
 +++ b/kernel/futex.c
-@@ -1368,7 +1368,7 @@ static int wake_futex_pi(u32 __user *uad
+@@ -1377,7 +1377,7 @@ static int wake_futex_pi(u32 __user *uad
  	 * deboost first (and lose our higher priority), then the task might get
  	 * scheduled away before the wake up can take place.
  	 */
diff --git a/patches/kernel-hotplug-restore-original-cpu-mask-oncpu-down.patch b/patches/kernel-hotplug-restore-original-cpu-mask-oncpu-down.patch
index 4ad41d5..0d01ea9 100644
--- a/patches/kernel-hotplug-restore-original-cpu-mask-oncpu-down.patch
+++ b/patches/kernel-hotplug-restore-original-cpu-mask-oncpu-down.patch
@@ -15,7 +15,7 @@
 
 --- a/kernel/cpu.c
 +++ b/kernel/cpu.c
-@@ -1088,6 +1088,7 @@ static int __ref _cpu_down(unsigned int
+@@ -1166,6 +1166,7 @@ static int __ref _cpu_down(unsigned int
  	bool hasdied = false;
  	int mycpu;
  	cpumask_var_t cpumask;
@@ -23,7 +23,7 @@
  
  	if (num_online_cpus() == 1)
  		return -EBUSY;
-@@ -1098,6 +1099,12 @@ static int __ref _cpu_down(unsigned int
+@@ -1176,6 +1177,12 @@ static int __ref _cpu_down(unsigned int
  	/* Move the downtaker off the unplug cpu */
  	if (!alloc_cpumask_var(&cpumask, GFP_KERNEL))
  		return -ENOMEM;
@@ -36,7 +36,7 @@
  	cpumask_andnot(cpumask, cpu_online_mask, cpumask_of(cpu));
  	set_cpus_allowed_ptr(current, cpumask);
  	free_cpumask_var(cpumask);
-@@ -1106,7 +1113,8 @@ static int __ref _cpu_down(unsigned int
+@@ -1184,7 +1191,8 @@ static int __ref _cpu_down(unsigned int
  	if (mycpu == cpu) {
  		printk(KERN_ERR "Yuck! Still on unplug CPU\n!");
  		migrate_enable();
@@ -46,7 +46,7 @@
  	}
  
  	cpu_hotplug_begin();
-@@ -1160,6 +1168,9 @@ static int __ref _cpu_down(unsigned int
+@@ -1238,6 +1246,9 @@ static int __ref _cpu_down(unsigned int
  	/* This post dead nonsense must die */
  	if (!ret && hasdied)
  		cpu_notify_nofail(CPU_POST_DEAD, cpu);
diff --git a/patches/kernel-migrate_disable-do-fastpath-in-atomic-irqs-of.patch b/patches/kernel-migrate_disable-do-fastpath-in-atomic-irqs-of.patch
index cc4a5a2..d5756a1 100644
--- a/patches/kernel-migrate_disable-do-fastpath-in-atomic-irqs-of.patch
+++ b/patches/kernel-migrate_disable-do-fastpath-in-atomic-irqs-of.patch
@@ -13,7 +13,7 @@
 
 --- a/kernel/sched/core.c
 +++ b/kernel/sched/core.c
-@@ -3293,7 +3293,7 @@ void migrate_disable(void)
+@@ -3299,7 +3299,7 @@ void migrate_disable(void)
  {
  	struct task_struct *p = current;
  
@@ -22,7 +22,7 @@
  #ifdef CONFIG_SCHED_DEBUG
  		p->migrate_disable_atomic++;
  #endif
-@@ -3320,7 +3320,7 @@ void migrate_enable(void)
+@@ -3326,7 +3326,7 @@ void migrate_enable(void)
  {
  	struct task_struct *p = current;
  
diff --git a/patches/kernel-perf-mark-perf_cpu_context-s-timer-as-irqsafe.patch b/patches/kernel-perf-mark-perf_cpu_context-s-timer-as-irqsafe.patch
index f7495d8..03f935c 100644
--- a/patches/kernel-perf-mark-perf_cpu_context-s-timer-as-irqsafe.patch
+++ b/patches/kernel-perf-mark-perf_cpu_context-s-timer-as-irqsafe.patch
@@ -14,7 +14,7 @@
 
 --- a/kernel/events/core.c
 +++ b/kernel/events/core.c
-@@ -1042,6 +1042,7 @@ static void __perf_mux_hrtimer_init(stru
+@@ -1050,6 +1050,7 @@ static void __perf_mux_hrtimer_init(stru
  	raw_spin_lock_init(&cpuctx->hrtimer_lock);
  	hrtimer_init(timer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS_PINNED);
  	timer->function = perf_mux_hrtimer_handler;
diff --git a/patches/kernel-printk-Don-t-try-to-print-from-IRQ-NMI-region.patch b/patches/kernel-printk-Don-t-try-to-print-from-IRQ-NMI-region.patch
index 651f7a5..b89ee34 100644
--- a/patches/kernel-printk-Don-t-try-to-print-from-IRQ-NMI-region.patch
+++ b/patches/kernel-printk-Don-t-try-to-print-from-IRQ-NMI-region.patch
@@ -15,7 +15,7 @@
 
 --- a/kernel/printk/printk.c
 +++ b/kernel/printk/printk.c
-@@ -1631,6 +1631,11 @@ static void call_console_drivers(int lev
+@@ -1628,6 +1628,11 @@ static void call_console_drivers(int lev
  	if (!console_drivers)
  		return;
  
@@ -27,7 +27,7 @@
  	migrate_disable();
  	for_each_console(con) {
  		if (exclusive_console && con != exclusive_console)
-@@ -2565,6 +2570,11 @@ void console_unblank(void)
+@@ -2556,6 +2561,11 @@ void console_unblank(void)
  {
  	struct console *c;
  
diff --git a/patches/kernel-sched-move-stack-kprobe-clean-up-to-__put_tas.patch b/patches/kernel-sched-move-stack-kprobe-clean-up-to-__put_tas.patch
new file mode 100644
index 0000000..986d5e1
--- /dev/null
+++ b/patches/kernel-sched-move-stack-kprobe-clean-up-to-__put_tas.patch
@@ -0,0 +1,60 @@
+From: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+Date: Mon, 21 Nov 2016 19:31:08 +0100
+Subject: [PATCH] kernel/sched: move stack + kprobe clean up to
+ __put_task_struct()
+
+There is no need to free the stack before the task struct. This also
+comes handy on -RT because we can't free memory in preempt disabled
+region.
+
+Cc: stable-rt@vger.kernel.org #for kprobe_flush_task()
+Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+---
+ kernel/fork.c       |   10 ++++++++++
+ kernel/sched/core.c |    9 ---------
+ 2 files changed, 10 insertions(+), 9 deletions(-)
+
+--- a/kernel/fork.c
++++ b/kernel/fork.c
+@@ -76,6 +76,7 @@
+ #include <linux/compiler.h>
+ #include <linux/sysctl.h>
+ #include <linux/kcov.h>
++#include <linux/kprobes.h>
+ 
+ #include <asm/pgtable.h>
+ #include <asm/pgalloc.h>
+@@ -385,6 +386,15 @@ void __put_task_struct(struct task_struc
+ 	WARN_ON(atomic_read(&tsk->usage));
+ 	WARN_ON(tsk == current);
+ 
++	/*
++	 * Remove function-return probe instances associated with this
++	 * task and put them back on the free list.
++	 */
++	kprobe_flush_task(tsk);
++
++	/* Task is done with its stack. */
++	put_task_stack(tsk);
++
+ 	cgroup_free(tsk);
+ 	task_numa_free(tsk);
+ 	security_task_free(tsk);
+--- a/kernel/sched/core.c
++++ b/kernel/sched/core.c
+@@ -2795,15 +2795,6 @@ static struct rq *finish_task_switch(str
+ 		if (prev->sched_class->task_dead)
+ 			prev->sched_class->task_dead(prev);
+ 
+-		/*
+-		 * Remove function-return probe instances associated with this
+-		 * task and put them back on the free list.
+-		 */
+-		kprobe_flush_task(prev);
+-
+-		/* Task is done with its stack. */
+-		put_task_stack(prev);
+-
+ 		put_task_struct(prev);
+ 	}
+ 
diff --git a/patches/kernel-softirq-unlock-with-irqs-on.patch b/patches/kernel-softirq-unlock-with-irqs-on.patch
index a54b5cb..8ad12c5 100644
--- a/patches/kernel-softirq-unlock-with-irqs-on.patch
+++ b/patches/kernel-softirq-unlock-with-irqs-on.patch
@@ -13,7 +13,7 @@
 
 --- a/kernel/softirq.c
 +++ b/kernel/softirq.c
-@@ -549,8 +549,10 @@ static void do_current_softirqs(void)
+@@ -560,8 +560,10 @@ static void do_current_softirqs(void)
  			do_single_softirq(i);
  		}
  		softirq_clr_runner(i);
diff --git a/patches/kgb-serial-hackaround.patch b/patches/kgb-serial-hackaround.patch
index ee10c9d..ccc19d7 100644
--- a/patches/kgb-serial-hackaround.patch
+++ b/patches/kgb-serial-hackaround.patch
@@ -33,7 +33,7 @@
  #include <linux/uaccess.h>
  #include <linux/pm_runtime.h>
  #include <linux/timer.h>
-@@ -3111,6 +3112,8 @@ void serial8250_console_write(struct uar
+@@ -3146,6 +3147,8 @@ void serial8250_console_write(struct uar
  
  	if (port->sysrq || oops_in_progress)
  		locked = 0;
diff --git a/patches/latency-hist.patch b/patches/latency-hist.patch
index 169c81d..7469413 100644
--- a/patches/latency-hist.patch
+++ b/patches/latency-hist.patch
@@ -236,7 +236,7 @@
  	int				start_pid;
 --- a/include/linux/sched.h
 +++ b/include/linux/sched.h
-@@ -1892,6 +1892,12 @@ struct task_struct {
+@@ -1917,6 +1917,12 @@ struct task_struct {
  	/* bitmask and counter of trace recursion */
  	unsigned long trace_recursion;
  #endif /* CONFIG_TRACING */
@@ -411,7 +411,7 @@
  			 * minimizing wakeups, not running timers at the
 --- a/kernel/trace/Kconfig
 +++ b/kernel/trace/Kconfig
-@@ -187,6 +187,24 @@ config IRQSOFF_TRACER
+@@ -182,6 +182,24 @@ config IRQSOFF_TRACER
  	  enabled. This option and the preempt-off timing option can be
  	  used together or separately.)
  
@@ -436,7 +436,7 @@
  config PREEMPT_TRACER
  	bool "Preemption-off Latency Tracer"
  	default n
-@@ -212,6 +230,24 @@ config PREEMPT_TRACER
+@@ -206,6 +224,24 @@ config PREEMPT_TRACER
  	  enabled. This option and the irqs-off timing option can be
  	  used together or separately.)
  
@@ -461,9 +461,9 @@
  config SCHED_TRACER
  	bool "Scheduling Latency Tracer"
  	select GENERIC_TRACER
-@@ -222,6 +258,74 @@ config SCHED_TRACER
- 	  This tracer tracks the latency of the highest priority task
- 	  to be scheduled in, starting from the point it has woken up.
+@@ -251,6 +287,74 @@ config HWLAT_TRACER
+ 	 file. Every time a latency is greater than tracing_thresh, it will
+ 	 be recorded into the ring buffer.
  
 +config WAKEUP_LATENCY_HIST
 +	bool "Scheduling Latency Histogram"
@@ -538,10 +538,10 @@
  	depends on !GENERIC_TRACER
 --- a/kernel/trace/Makefile
 +++ b/kernel/trace/Makefile
-@@ -37,6 +37,10 @@ obj-$(CONFIG_FUNCTION_TRACER) += trace_f
- obj-$(CONFIG_IRQSOFF_TRACER) += trace_irqsoff.o
+@@ -38,6 +38,10 @@ obj-$(CONFIG_IRQSOFF_TRACER) += trace_ir
  obj-$(CONFIG_PREEMPT_TRACER) += trace_irqsoff.o
  obj-$(CONFIG_SCHED_TRACER) += trace_sched_wakeup.o
+ obj-$(CONFIG_HWLAT_TRACER) += trace_hwlat.o
 +obj-$(CONFIG_INTERRUPT_OFF_HIST) += latency_hist.o
 +obj-$(CONFIG_PREEMPT_OFF_HIST) += latency_hist.o
 +obj-$(CONFIG_WAKEUP_LATENCY_HIST) += latency_hist.o
diff --git a/patches/lglocks-rt.patch b/patches/lglocks-rt.patch
deleted file mode 100644
index e024ff3..0000000
--- a/patches/lglocks-rt.patch
+++ /dev/null
@@ -1,199 +0,0 @@
-Subject: lglocks: Provide a RT safe variant
-From: Thomas Gleixner <tglx@linutronix.de>
-Date: Wed, 15 Jun 2011 11:02:21 +0200
-
-lglocks by itself will spin in order to get the lock. This will end up
-badly if a task with the highest priority keeps spinning while a task
-with the lowest priority owns the lock.
-
-Lets replace them with rt_mutex based locks so they can sleep, track
-owner and boost if needed.
-
-Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
----
- include/linux/lglock.h  |   18 +++++++++++++
- kernel/locking/lglock.c |   62 ++++++++++++++++++++++++++++++------------------
- 2 files changed, 58 insertions(+), 22 deletions(-)
-
---- a/include/linux/lglock.h
-+++ b/include/linux/lglock.h
-@@ -34,13 +34,30 @@
- #endif
- 
- struct lglock {
-+#ifdef CONFIG_PREEMPT_RT_FULL
-+	struct rt_mutex __percpu *lock;
-+#else
- 	arch_spinlock_t __percpu *lock;
-+#endif
- #ifdef CONFIG_DEBUG_LOCK_ALLOC
- 	struct lock_class_key lock_key;
- 	struct lockdep_map    lock_dep_map;
- #endif
- };
- 
-+#ifdef CONFIG_PREEMPT_RT_FULL
-+# define DEFINE_LGLOCK(name)						\
-+	static DEFINE_PER_CPU(struct rt_mutex, name ## _lock)		\
-+	= __RT_MUTEX_INITIALIZER( name ## _lock);			\
-+	struct lglock name = { .lock = &name ## _lock }
-+
-+# define DEFINE_STATIC_LGLOCK(name)					\
-+	static DEFINE_PER_CPU(struct rt_mutex, name ## _lock)		\
-+	= __RT_MUTEX_INITIALIZER( name ## _lock);			\
-+	static struct lglock name = { .lock = &name ## _lock }
-+
-+#else
-+
- #define DEFINE_LGLOCK(name)						\
- 	static DEFINE_PER_CPU(arch_spinlock_t, name ## _lock)		\
- 	= __ARCH_SPIN_LOCK_UNLOCKED;					\
-@@ -50,6 +67,7 @@ struct lglock {
- 	static DEFINE_PER_CPU(arch_spinlock_t, name ## _lock)		\
- 	= __ARCH_SPIN_LOCK_UNLOCKED;					\
- 	static struct lglock name = { .lock = &name ## _lock }
-+#endif
- 
- void lg_lock_init(struct lglock *lg, char *name);
- 
---- a/kernel/locking/lglock.c
-+++ b/kernel/locking/lglock.c
-@@ -4,6 +4,15 @@
- #include <linux/cpu.h>
- #include <linux/string.h>
- 
-+#ifndef CONFIG_PREEMPT_RT_FULL
-+# define lg_lock_ptr		arch_spinlock_t
-+# define lg_do_lock(l)		arch_spin_lock(l)
-+# define lg_do_unlock(l)	arch_spin_unlock(l)
-+#else
-+# define lg_lock_ptr		struct rt_mutex
-+# define lg_do_lock(l)		__rt_spin_lock__no_mg(l)
-+# define lg_do_unlock(l)	__rt_spin_unlock(l)
-+#endif
- /*
-  * Note there is no uninit, so lglocks cannot be defined in
-  * modules (but it's fine to use them from there)
-@@ -12,51 +21,60 @@
- 
- void lg_lock_init(struct lglock *lg, char *name)
- {
-+#ifdef CONFIG_PREEMPT_RT_FULL
-+	int i;
-+
-+	for_each_possible_cpu(i) {
-+		struct rt_mutex *lock = per_cpu_ptr(lg->lock, i);
-+
-+		rt_mutex_init(lock);
-+	}
-+#endif
- 	LOCKDEP_INIT_MAP(&lg->lock_dep_map, name, &lg->lock_key, 0);
- }
- EXPORT_SYMBOL(lg_lock_init);
- 
- void lg_local_lock(struct lglock *lg)
- {
--	arch_spinlock_t *lock;
-+	lg_lock_ptr *lock;
- 
--	preempt_disable();
-+	migrate_disable();
- 	lock_acquire_shared(&lg->lock_dep_map, 0, 0, NULL, _RET_IP_);
- 	lock = this_cpu_ptr(lg->lock);
--	arch_spin_lock(lock);
-+	lg_do_lock(lock);
- }
- EXPORT_SYMBOL(lg_local_lock);
- 
- void lg_local_unlock(struct lglock *lg)
- {
--	arch_spinlock_t *lock;
-+	lg_lock_ptr *lock;
- 
- 	lock_release(&lg->lock_dep_map, 1, _RET_IP_);
- 	lock = this_cpu_ptr(lg->lock);
--	arch_spin_unlock(lock);
--	preempt_enable();
-+	lg_do_unlock(lock);
-+	migrate_enable();
- }
- EXPORT_SYMBOL(lg_local_unlock);
- 
- void lg_local_lock_cpu(struct lglock *lg, int cpu)
- {
--	arch_spinlock_t *lock;
-+	lg_lock_ptr *lock;
- 
--	preempt_disable();
-+	preempt_disable_nort();
- 	lock_acquire_shared(&lg->lock_dep_map, 0, 0, NULL, _RET_IP_);
- 	lock = per_cpu_ptr(lg->lock, cpu);
--	arch_spin_lock(lock);
-+	lg_do_lock(lock);
- }
- EXPORT_SYMBOL(lg_local_lock_cpu);
- 
- void lg_local_unlock_cpu(struct lglock *lg, int cpu)
- {
--	arch_spinlock_t *lock;
-+	lg_lock_ptr *lock;
- 
- 	lock_release(&lg->lock_dep_map, 1, _RET_IP_);
- 	lock = per_cpu_ptr(lg->lock, cpu);
--	arch_spin_unlock(lock);
--	preempt_enable();
-+	lg_do_unlock(lock);
-+	preempt_enable_nort();
- }
- EXPORT_SYMBOL(lg_local_unlock_cpu);
- 
-@@ -70,15 +88,15 @@ void lg_double_lock(struct lglock *lg, i
- 
- 	preempt_disable();
- 	lock_acquire_shared(&lg->lock_dep_map, 0, 0, NULL, _RET_IP_);
--	arch_spin_lock(per_cpu_ptr(lg->lock, cpu1));
--	arch_spin_lock(per_cpu_ptr(lg->lock, cpu2));
-+	lg_do_lock(per_cpu_ptr(lg->lock, cpu1));
-+	lg_do_lock(per_cpu_ptr(lg->lock, cpu2));
- }
- 
- void lg_double_unlock(struct lglock *lg, int cpu1, int cpu2)
- {
- 	lock_release(&lg->lock_dep_map, 1, _RET_IP_);
--	arch_spin_unlock(per_cpu_ptr(lg->lock, cpu1));
--	arch_spin_unlock(per_cpu_ptr(lg->lock, cpu2));
-+	lg_do_unlock(per_cpu_ptr(lg->lock, cpu1));
-+	lg_do_unlock(per_cpu_ptr(lg->lock, cpu2));
- 	preempt_enable();
- }
- 
-@@ -86,12 +104,12 @@ void lg_global_lock(struct lglock *lg)
- {
- 	int i;
- 
--	preempt_disable();
-+	preempt_disable_nort();
- 	lock_acquire_exclusive(&lg->lock_dep_map, 0, 0, NULL, _RET_IP_);
- 	for_each_possible_cpu(i) {
--		arch_spinlock_t *lock;
-+		lg_lock_ptr *lock;
- 		lock = per_cpu_ptr(lg->lock, i);
--		arch_spin_lock(lock);
-+		lg_do_lock(lock);
- 	}
- }
- EXPORT_SYMBOL(lg_global_lock);
-@@ -102,10 +120,10 @@ void lg_global_unlock(struct lglock *lg)
- 
- 	lock_release(&lg->lock_dep_map, 1, _RET_IP_);
- 	for_each_possible_cpu(i) {
--		arch_spinlock_t *lock;
-+		lg_lock_ptr *lock;
- 		lock = per_cpu_ptr(lg->lock, i);
--		arch_spin_unlock(lock);
-+		lg_do_unlock(lock);
- 	}
--	preempt_enable();
-+	preempt_enable_nort();
- }
- EXPORT_SYMBOL(lg_global_unlock);
diff --git a/patches/localversion.patch b/patches/localversion.patch
index e16fb07..a02382e 100644
--- a/patches/localversion.patch
+++ b/patches/localversion.patch
@@ -10,4 +10,4 @@
 --- /dev/null
 +++ b/localversion-rt
 @@ -0,0 +1 @@
-+-rt10
++-rt1
diff --git a/patches/lockdep-Quiet-gcc-about-dangerous-__builtin_return_a.patch b/patches/lockdep-Quiet-gcc-about-dangerous-__builtin_return_a.patch
deleted file mode 100644
index 62db27d..0000000
--- a/patches/lockdep-Quiet-gcc-about-dangerous-__builtin_return_a.patch
+++ /dev/null
@@ -1,110 +0,0 @@
-From: Steven Rostedt <rostedt@goodmis.org>
-Date: Thu, 8 Sep 2016 12:34:33 -0400
-Subject: [PATCH] lockdep: Quiet gcc about dangerous __builtin_return_address()
- operations
-
-[
-  Boris, does this quiet gcc for you?
-  I haven't fully tested this yet, as I still don't have a compiler
-  that does the warning.
-]
-
-Gcc's new warnings about __builtin_return_address(n) operations with
-n > 0 is popping up around the kernel. The operation is dangerous, and
-the warning is "good to know". But there's instances that we use
-__builtin_return_address(n) with n > 0 and are aware of the issues,
-and work around them. And its used mostly for tracing and debugging. In
-these cases, the warning becomes a distraction and is not helpful.
-
-To get better lock issue traces, a function like get_lock_parent_ip()
-uses __builtin_return_address() to find the caller of the lock, and
-skip over the internal callers of the lock itself. Currently it is only
-used in the kernel/ directory and only if certain configs are enabled.
-
-Create a new config called CONFIG_USING_GET_LOCK_PARENT_IP that gets
-selected when another config relies on get_lock_parent_ip(), and this
-will now enable the function get_lock_parent_ip(), otherwise it wont be
-defined. It will also disable the frame-address warnings from gcc in
-the kernel directory.
-
-Reported-by: Borislav Petkov <bp@alien8.de>
-Signed-off-by: Steven Rostedt <rostedt@goodmis.org>
----
- include/linux/ftrace.h |    2 ++
- kernel/Makefile        |    7 +++++++
- kernel/trace/Kconfig   |    1 +
- lib/Kconfig.debug      |   10 ++++++++++
- 4 files changed, 20 insertions(+)
-
---- a/include/linux/ftrace.h
-+++ b/include/linux/ftrace.h
-@@ -714,6 +714,7 @@ static inline void __ftrace_enabled_rest
- #define CALLER_ADDR5 ((unsigned long)ftrace_return_address(5))
- #define CALLER_ADDR6 ((unsigned long)ftrace_return_address(6))
- 
-+#ifdef CONFIG_USING_GET_LOCK_PARENT_IP
- static inline unsigned long get_lock_parent_ip(void)
- {
- 	unsigned long addr = CALLER_ADDR0;
-@@ -725,6 +726,7 @@ static inline unsigned long get_lock_par
- 		return addr;
- 	return CALLER_ADDR2;
- }
-+#endif
- 
- #ifdef CONFIG_IRQSOFF_TRACER
-   extern void time_hardirqs_on(unsigned long a0, unsigned long a1);
---- a/kernel/Makefile
-+++ b/kernel/Makefile
-@@ -11,6 +11,13 @@ obj-y     = fork.o exec_domain.o panic.o
- 	    notifier.o ksysfs.o cred.o reboot.o \
- 	    async.o range.o smpboot.o
- 
-+# Tracing may do some dangerous __builtin_return_address() operations
-+# We know they are dangerous, we don't need gcc telling us that.
-+ifdef CONFIG_USING_GET_LOCK_PARENT_IP
-+FRAME_CFLAGS := $(call cc-disable-warning,frame-address)
-+KBUILD_CFLAGS += $(FRAME_CFLAGS)
-+endif
-+
- obj-$(CONFIG_MULTIUSER) += groups.o
- 
- ifdef CONFIG_FUNCTION_TRACER
---- a/kernel/trace/Kconfig
-+++ b/kernel/trace/Kconfig
-@@ -197,6 +197,7 @@ config PREEMPT_TRACER
- 	select RING_BUFFER_ALLOW_SWAP
- 	select TRACER_SNAPSHOT
- 	select TRACER_SNAPSHOT_PER_CPU_SWAP
-+	select USING_GET_LOCK_PARENT_IP
- 	help
- 	  This option measures the time spent in preemption-off critical
- 	  sections, with microsecond accuracy.
---- a/lib/Kconfig.debug
-+++ b/lib/Kconfig.debug
-@@ -977,6 +977,7 @@ config TIMER_STATS
- config DEBUG_PREEMPT
- 	bool "Debug preemptible kernel"
- 	depends on DEBUG_KERNEL && PREEMPT && TRACE_IRQFLAGS_SUPPORT
-+	select USING_GET_LOCK_PARENT_IP
- 	default y
- 	help
- 	  If you say Y here then the kernel will use a debug variant of the
-@@ -1159,8 +1160,17 @@ config LOCK_TORTURE_TEST
- 
- endmenu # lock debugging
- 
-+config USING_GET_LOCK_PARENT_IP
-+        bool
-+	help
-+	  Enables the use of the function get_lock_parent_ip() that
-+	  will use __builtin_return_address(n) with n > 0 causing
-+	  some gcc warnings. When this is selected, those warnings
-+	  will be suppressed.
-+
- config TRACE_IRQFLAGS
- 	bool
-+	select USING_GET_LOCK_PARENT_IP
- 	help
- 	  Enables hooks to interrupt enabling and disabling for
- 	  either tracing or lock debugging.
diff --git a/patches/lockdep-no-softirq-accounting-on-rt.patch b/patches/lockdep-no-softirq-accounting-on-rt.patch
index 8a1d3cd..6ec5dd2 100644
--- a/patches/lockdep-no-softirq-accounting-on-rt.patch
+++ b/patches/lockdep-no-softirq-accounting-on-rt.patch
@@ -40,7 +40,7 @@
  #if defined(CONFIG_IRQSOFF_TRACER) || \
 --- a/kernel/locking/lockdep.c
 +++ b/kernel/locking/lockdep.c
-@@ -3686,6 +3686,7 @@ static void check_flags(unsigned long fl
+@@ -3689,6 +3689,7 @@ static void check_flags(unsigned long fl
  		}
  	}
  
@@ -48,7 +48,7 @@
  	/*
  	 * We dont accurately track softirq state in e.g.
  	 * hardirq contexts (such as on 4KSTACKS), so only
-@@ -3700,6 +3701,7 @@ static void check_flags(unsigned long fl
+@@ -3703,6 +3704,7 @@ static void check_flags(unsigned long fl
  			DEBUG_LOCKS_WARN_ON(!current->softirqs_enabled);
  		}
  	}
diff --git a/patches/locking-percpu-rwsem-use-swait-for-the-wating-writer.patch b/patches/locking-percpu-rwsem-use-swait-for-the-wating-writer.patch
new file mode 100644
index 0000000..0071ac7
--- /dev/null
+++ b/patches/locking-percpu-rwsem-use-swait-for-the-wating-writer.patch
@@ -0,0 +1,72 @@
+From: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+Date: Mon, 21 Nov 2016 19:26:15 +0100
+Subject: [PATCH] locking/percpu-rwsem: use swait for the wating writer
+
+Use struct swait_queue_head instead of wait_queue_head_t for the waiting
+writer. The swait implementation is smaller and lightweight compared to
+wait_queue_head_t.
+
+Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+---
+ include/linux/percpu-rwsem.h  |    6 +++---
+ kernel/locking/percpu-rwsem.c |    6 +++---
+ 2 files changed, 6 insertions(+), 6 deletions(-)
+
+--- a/include/linux/percpu-rwsem.h
++++ b/include/linux/percpu-rwsem.h
+@@ -4,7 +4,7 @@
+ #include <linux/atomic.h>
+ #include <linux/rwsem.h>
+ #include <linux/percpu.h>
+-#include <linux/wait.h>
++#include <linux/swait.h>
+ #include <linux/rcu_sync.h>
+ #include <linux/lockdep.h>
+ 
+@@ -12,7 +12,7 @@ struct percpu_rw_semaphore {
+ 	struct rcu_sync		rss;
+ 	unsigned int __percpu	*read_count;
+ 	struct rw_semaphore	rw_sem;
+-	wait_queue_head_t	writer;
++	struct swait_queue_head	writer;
+ 	int			readers_block;
+ };
+ 
+@@ -22,7 +22,7 @@ static struct percpu_rw_semaphore name =
+ 	.rss = __RCU_SYNC_INITIALIZER(name.rss, RCU_SCHED_SYNC),	\
+ 	.read_count = &__percpu_rwsem_rc_##name,			\
+ 	.rw_sem = __RWSEM_INITIALIZER(name.rw_sem),			\
+-	.writer = __WAIT_QUEUE_HEAD_INITIALIZER(name.writer),		\
++	.writer = __SWAIT_QUEUE_HEAD_INITIALIZER(name.writer),		\
+ }
+ 
+ extern int __percpu_down_read(struct percpu_rw_semaphore *, int);
+--- a/kernel/locking/percpu-rwsem.c
++++ b/kernel/locking/percpu-rwsem.c
+@@ -18,7 +18,7 @@ int __percpu_init_rwsem(struct percpu_rw
+ 	/* ->rw_sem represents the whole percpu_rw_semaphore for lockdep */
+ 	rcu_sync_init(&sem->rss, RCU_SCHED_SYNC);
+ 	__init_rwsem(&sem->rw_sem, name, rwsem_key);
+-	init_waitqueue_head(&sem->writer);
++	init_swait_queue_head(&sem->writer);
+ 	sem->readers_block = 0;
+ 	return 0;
+ }
+@@ -103,7 +103,7 @@ void __percpu_up_read(struct percpu_rw_s
+ 	__this_cpu_dec(*sem->read_count);
+ 
+ 	/* Prod writer to recheck readers_active */
+-	wake_up(&sem->writer);
++	swake_up(&sem->writer);
+ }
+ EXPORT_SYMBOL_GPL(__percpu_up_read);
+ 
+@@ -160,7 +160,7 @@ void percpu_down_write(struct percpu_rw_
+ 	 */
+ 
+ 	/* Wait for all now active readers to complete. */
+-	wait_event(sem->writer, readers_active_check(sem));
++	swait_event(sem->writer, readers_active_check(sem));
+ }
+ EXPORT_SYMBOL_GPL(percpu_down_write);
+ 
diff --git a/patches/lockinglglocks_Use_preempt_enabledisable_nort.patch b/patches/lockinglglocks_Use_preempt_enabledisable_nort.patch
deleted file mode 100644
index 8c13841..0000000
--- a/patches/lockinglglocks_Use_preempt_enabledisable_nort.patch
+++ /dev/null
@@ -1,34 +0,0 @@
-Subject: locking/lglocks: Use preempt_enable/disable_nort() in lg_double_[un]lock
-From: Mike Galbraith <umgwanakikbuti@gmail.com>
-Date: Sat, 27 Feb 2016 08:34:43 +0100
-
-Let's not do that when snagging an rtmutex.
-
-Signed-off-by: Mike Galbraith <umgwanakilbuti@gmail.com>
-Cc: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
-Cc: linux-rt-users <linux-rt-users@vger.kernel.org>
-Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
----
- kernel/locking/lglock.c |    4 ++--
- 1 file changed, 2 insertions(+), 2 deletions(-)
-
---- a/kernel/locking/lglock.c
-+++ b/kernel/locking/lglock.c
-@@ -86,7 +86,7 @@ void lg_double_lock(struct lglock *lg, i
- 	if (cpu2 < cpu1)
- 		swap(cpu1, cpu2);
- 
--	preempt_disable();
-+	preempt_disable_nort();
- 	lock_acquire_shared(&lg->lock_dep_map, 0, 0, NULL, _RET_IP_);
- 	lg_do_lock(per_cpu_ptr(lg->lock, cpu1));
- 	lg_do_lock(per_cpu_ptr(lg->lock, cpu2));
-@@ -97,7 +97,7 @@ void lg_double_unlock(struct lglock *lg,
- 	lock_release(&lg->lock_dep_map, 1, _RET_IP_);
- 	lg_do_unlock(per_cpu_ptr(lg->lock, cpu1));
- 	lg_do_unlock(per_cpu_ptr(lg->lock, cpu2));
--	preempt_enable();
-+	preempt_enable_nort();
- }
- 
- void lg_global_lock(struct lglock *lg)
diff --git a/patches/md-raid5-percpu-handling-rt-aware.patch b/patches/md-raid5-percpu-handling-rt-aware.patch
index 35b9d2b..2593aa1 100644
--- a/patches/md-raid5-percpu-handling-rt-aware.patch
+++ b/patches/md-raid5-percpu-handling-rt-aware.patch
@@ -14,9 +14,9 @@
 Tested-by: Udo van den Heuvel <udovdh@xs4all.nl>
 
 ---
- drivers/md/raid5.c |    7 +++++--
+ drivers/md/raid5.c |    8 +++++---
  drivers/md/raid5.h |    1 +
- 2 files changed, 6 insertions(+), 2 deletions(-)
+ 2 files changed, 6 insertions(+), 3 deletions(-)
 
 --- a/drivers/md/raid5.c
 +++ b/drivers/md/raid5.c
@@ -41,14 +41,22 @@
  }
  
  static struct stripe_head *alloc_stripe(struct kmem_cache *sc, gfp_t gfp,
-@@ -6438,6 +6440,7 @@ static int raid5_alloc_percpu(struct r5c
- 			       __func__, cpu);
- 			break;
- 		}
-+		spin_lock_init(&per_cpu_ptr(conf->percpu, cpu)->lock);
+@@ -6391,6 +6393,7 @@ static int raid456_cpu_up_prepare(unsign
+ 		       __func__, cpu);
+ 		return -ENOMEM;
  	}
- 	put_online_cpus();
++	spin_lock_init(&per_cpu_ptr(conf->percpu, cpu)->lock);
+ 	return 0;
+ }
  
+@@ -6401,7 +6404,6 @@ static int raid5_alloc_percpu(struct r5c
+ 	conf->percpu = alloc_percpu(struct raid5_percpu);
+ 	if (!conf->percpu)
+ 		return -ENOMEM;
+-
+ 	err = cpuhp_state_add_instance(CPUHP_MD_RAID5_PREPARE, &conf->node);
+ 	if (!err) {
+ 		conf->scribble_disks = max(conf->raid_disks,
 --- a/drivers/md/raid5.h
 +++ b/drivers/md/raid5.h
 @@ -504,6 +504,7 @@ struct r5conf {
diff --git a/patches/mips-disable-highmem-on-rt.patch b/patches/mips-disable-highmem-on-rt.patch
index 8424c9f..a8ebd9a 100644
--- a/patches/mips-disable-highmem-on-rt.patch
+++ b/patches/mips-disable-highmem-on-rt.patch
@@ -11,7 +11,7 @@
 
 --- a/arch/mips/Kconfig
 +++ b/arch/mips/Kconfig
-@@ -2480,7 +2480,7 @@ config MIPS_ASID_BITS_VARIABLE
+@@ -2514,7 +2514,7 @@ config MIPS_ASID_BITS_VARIABLE
  #
  config HIGHMEM
  	bool "High Memory Support"
diff --git a/patches/mm-convert-swap-to-percpu-locked.patch b/patches/mm-convert-swap-to-percpu-locked.patch
index 899ded4..980ef9c 100644
--- a/patches/mm-convert-swap-to-percpu-locked.patch
+++ b/patches/mm-convert-swap-to-percpu-locked.patch
@@ -17,7 +17,7 @@
 
 --- a/include/linux/swap.h
 +++ b/include/linux/swap.h
-@@ -290,6 +290,7 @@ extern unsigned long nr_free_pagecache_p
+@@ -293,6 +293,7 @@ extern unsigned long nr_free_pagecache_p
  
  
  /* linux/mm/swap.c */
@@ -27,7 +27,7 @@
  extern void lru_cache_add_file(struct page *page);
 --- a/mm/compaction.c
 +++ b/mm/compaction.c
-@@ -1585,10 +1585,12 @@ static enum compact_result compact_zone(
+@@ -1612,10 +1612,12 @@ static enum compact_result compact_zone(
  				block_start_pfn(cc->migrate_pfn, cc->order);
  
  			if (cc->last_migrated_pfn < current_block_start) {
@@ -44,7 +44,7 @@
  			}
 --- a/mm/page_alloc.c
 +++ b/mm/page_alloc.c
-@@ -6600,7 +6600,9 @@ static int page_alloc_cpu_notify(struct
+@@ -6559,7 +6559,9 @@ static int page_alloc_cpu_notify(struct
  	int cpu = (unsigned long)hcpu;
  
  	if (action == CPU_DEAD || action == CPU_DEAD_FROZEN) {
diff --git a/patches/mm-disable-sloub-rt.patch b/patches/mm-disable-sloub-rt.patch
index e538973..b081c35 100644
--- a/patches/mm-disable-sloub-rt.patch
+++ b/patches/mm-disable-sloub-rt.patch
@@ -13,7 +13,7 @@
 
 --- a/init/Kconfig
 +++ b/init/Kconfig
-@@ -1748,6 +1748,7 @@ choice
+@@ -1759,6 +1759,7 @@ choice
  
  config SLAB
  	bool "SLAB"
@@ -21,7 +21,7 @@
  	select HAVE_HARDENED_USERCOPY_ALLOCATOR
  	help
  	  The regular slab allocator that is established and known to work
-@@ -1768,6 +1769,7 @@ config SLUB
+@@ -1779,6 +1780,7 @@ config SLUB
  config SLOB
  	depends on EXPERT
  	bool "SLOB (Simple Allocator)"
diff --git a/patches/mm-enable-slub.patch b/patches/mm-enable-slub.patch
index e6940b5..f81c4a4 100644
--- a/patches/mm-enable-slub.patch
+++ b/patches/mm-enable-slub.patch
@@ -27,7 +27,7 @@
  	struct list_head slabs_partial;	/* partial list first, better asm code */
 --- a/mm/slub.c
 +++ b/mm/slub.c
-@@ -1145,7 +1145,7 @@ static noinline int free_debug_processin
+@@ -1141,7 +1141,7 @@ static noinline int free_debug_processin
  	unsigned long uninitialized_var(flags);
  	int ret = 0;
  
@@ -36,7 +36,7 @@
  	slab_lock(page);
  
  	if (s->flags & SLAB_CONSISTENCY_CHECKS) {
-@@ -1180,7 +1180,7 @@ static noinline int free_debug_processin
+@@ -1176,7 +1176,7 @@ static noinline int free_debug_processin
  			 bulk_cnt, cnt);
  
  	slab_unlock(page);
@@ -45,7 +45,7 @@
  	if (!ret)
  		slab_fix(s, "Object at 0x%p not freed", object);
  	return ret;
-@@ -1308,6 +1308,12 @@ static inline void dec_slabs_node(struct
+@@ -1304,6 +1304,12 @@ static inline void dec_slabs_node(struct
  
  #endif /* CONFIG_SLUB_DEBUG */
  
@@ -58,7 +58,7 @@
  /*
   * Hooks for other subsystems that check memory allocations. In a typical
   * production configuration these hooks all should produce no code at all.
-@@ -1530,7 +1536,11 @@ static struct page *allocate_slab(struct
+@@ -1526,7 +1532,11 @@ static struct page *allocate_slab(struct
  
  	flags &= gfp_allowed_mask;
  
@@ -70,7 +70,7 @@
  		local_irq_enable();
  
  	flags |= s->allocflags;
-@@ -1605,7 +1615,11 @@ static struct page *allocate_slab(struct
+@@ -1601,7 +1611,11 @@ static struct page *allocate_slab(struct
  	page->frozen = 1;
  
  out:
@@ -82,7 +82,7 @@
  		local_irq_disable();
  	if (!page)
  		return NULL;
-@@ -1664,6 +1678,16 @@ static void __free_slab(struct kmem_cach
+@@ -1660,6 +1674,16 @@ static void __free_slab(struct kmem_cach
  	__free_pages(page, order);
  }
  
@@ -99,7 +99,7 @@
  #define need_reserve_slab_rcu						\
  	(sizeof(((struct page *)NULL)->lru) < sizeof(struct rcu_head))
  
-@@ -1695,6 +1719,12 @@ static void free_slab(struct kmem_cache
+@@ -1691,6 +1715,12 @@ static void free_slab(struct kmem_cache
  		}
  
  		call_rcu(head, rcu_free_slab);
@@ -112,7 +112,7 @@
  	} else
  		__free_slab(s, page);
  }
-@@ -1802,7 +1832,7 @@ static void *get_partial_node(struct kme
+@@ -1798,7 +1828,7 @@ static void *get_partial_node(struct kme
  	if (!n || !n->nr_partial)
  		return NULL;
  
@@ -121,7 +121,7 @@
  	list_for_each_entry_safe(page, page2, &n->partial, lru) {
  		void *t;
  
-@@ -1827,7 +1857,7 @@ static void *get_partial_node(struct kme
+@@ -1823,7 +1853,7 @@ static void *get_partial_node(struct kme
  			break;
  
  	}
@@ -130,7 +130,7 @@
  	return object;
  }
  
-@@ -2073,7 +2103,7 @@ static void deactivate_slab(struct kmem_
+@@ -2069,7 +2099,7 @@ static void deactivate_slab(struct kmem_
  			 * that acquire_slab() will see a slab page that
  			 * is frozen
  			 */
@@ -139,7 +139,7 @@
  		}
  	} else {
  		m = M_FULL;
-@@ -2084,7 +2114,7 @@ static void deactivate_slab(struct kmem_
+@@ -2080,7 +2110,7 @@ static void deactivate_slab(struct kmem_
  			 * slabs from diagnostic functions will not see
  			 * any frozen slabs.
  			 */
@@ -148,7 +148,7 @@
  		}
  	}
  
-@@ -2119,7 +2149,7 @@ static void deactivate_slab(struct kmem_
+@@ -2115,7 +2145,7 @@ static void deactivate_slab(struct kmem_
  		goto redo;
  
  	if (lock)
@@ -157,7 +157,7 @@
  
  	if (m == M_FREE) {
  		stat(s, DEACTIVATE_EMPTY);
-@@ -2151,10 +2181,10 @@ static void unfreeze_partials(struct kme
+@@ -2147,10 +2177,10 @@ static void unfreeze_partials(struct kme
  		n2 = get_node(s, page_to_nid(page));
  		if (n != n2) {
  			if (n)
@@ -170,7 +170,7 @@
  		}
  
  		do {
-@@ -2183,7 +2213,7 @@ static void unfreeze_partials(struct kme
+@@ -2179,7 +2209,7 @@ static void unfreeze_partials(struct kme
  	}
  
  	if (n)
@@ -179,7 +179,7 @@
  
  	while (discard_page) {
  		page = discard_page;
-@@ -2222,14 +2252,21 @@ static void put_cpu_partial(struct kmem_
+@@ -2218,14 +2248,21 @@ static void put_cpu_partial(struct kmem_
  			pobjects = oldpage->pobjects;
  			pages = oldpage->pages;
  			if (drain && pobjects > s->cpu_partial) {
@@ -201,7 +201,7 @@
  				oldpage = NULL;
  				pobjects = 0;
  				pages = 0;
-@@ -2301,7 +2338,22 @@ static bool has_cpu_slab(int cpu, void *
+@@ -2297,7 +2334,22 @@ static bool has_cpu_slab(int cpu, void *
  
  static void flush_all(struct kmem_cache *s)
  {
@@ -224,7 +224,7 @@
  }
  
  /*
-@@ -2337,10 +2389,10 @@ static unsigned long count_partial(struc
+@@ -2352,10 +2404,10 @@ static unsigned long count_partial(struc
  	unsigned long x = 0;
  	struct page *page;
  
@@ -237,7 +237,7 @@
  	return x;
  }
  #endif /* CONFIG_SLUB_DEBUG || CONFIG_SYSFS */
-@@ -2478,8 +2530,10 @@ static inline void *get_freelist(struct
+@@ -2493,8 +2545,10 @@ static inline void *get_freelist(struct
   * already disabled (which is the case for bulk allocation).
   */
  static void *___slab_alloc(struct kmem_cache *s, gfp_t gfpflags, int node,
@@ -249,7 +249,7 @@
  	void *freelist;
  	struct page *page;
  
-@@ -2539,6 +2593,13 @@ static void *___slab_alloc(struct kmem_c
+@@ -2554,6 +2608,13 @@ static void *___slab_alloc(struct kmem_c
  	VM_BUG_ON(!c->page->frozen);
  	c->freelist = get_freepointer(s, freelist);
  	c->tid = next_tid(c->tid);
@@ -263,7 +263,7 @@
  	return freelist;
  
  new_slab:
-@@ -2570,7 +2631,7 @@ static void *___slab_alloc(struct kmem_c
+@@ -2585,7 +2646,7 @@ static void *___slab_alloc(struct kmem_c
  	deactivate_slab(s, page, get_freepointer(s, freelist));
  	c->page = NULL;
  	c->freelist = NULL;
@@ -272,7 +272,7 @@
  }
  
  /*
-@@ -2582,6 +2643,7 @@ static void *__slab_alloc(struct kmem_ca
+@@ -2597,6 +2658,7 @@ static void *__slab_alloc(struct kmem_ca
  {
  	void *p;
  	unsigned long flags;
@@ -280,7 +280,7 @@
  
  	local_irq_save(flags);
  #ifdef CONFIG_PREEMPT
-@@ -2593,8 +2655,9 @@ static void *__slab_alloc(struct kmem_ca
+@@ -2608,8 +2670,9 @@ static void *__slab_alloc(struct kmem_ca
  	c = this_cpu_ptr(s->cpu_slab);
  #endif
  
@@ -291,7 +291,7 @@
  	return p;
  }
  
-@@ -2780,7 +2843,7 @@ static void __slab_free(struct kmem_cach
+@@ -2795,7 +2858,7 @@ static void __slab_free(struct kmem_cach
  
  	do {
  		if (unlikely(n)) {
@@ -300,7 +300,7 @@
  			n = NULL;
  		}
  		prior = page->freelist;
-@@ -2812,7 +2875,7 @@ static void __slab_free(struct kmem_cach
+@@ -2827,7 +2890,7 @@ static void __slab_free(struct kmem_cach
  				 * Otherwise the list_lock will synchronize with
  				 * other processors updating the list of slabs.
  				 */
@@ -309,7 +309,7 @@
  
  			}
  		}
-@@ -2854,7 +2917,7 @@ static void __slab_free(struct kmem_cach
+@@ -2869,7 +2932,7 @@ static void __slab_free(struct kmem_cach
  		add_partial(n, page, DEACTIVATE_TO_TAIL);
  		stat(s, FREE_ADD_PARTIAL);
  	}
@@ -318,7 +318,7 @@
  	return;
  
  slab_empty:
-@@ -2869,7 +2932,7 @@ static void __slab_free(struct kmem_cach
+@@ -2884,7 +2947,7 @@ static void __slab_free(struct kmem_cach
  		remove_full(s, n, page);
  	}
  
@@ -327,7 +327,7 @@
  	stat(s, FREE_SLAB);
  	discard_slab(s, page);
  }
-@@ -3074,6 +3137,7 @@ int kmem_cache_alloc_bulk(struct kmem_ca
+@@ -3089,6 +3152,7 @@ int kmem_cache_alloc_bulk(struct kmem_ca
  			  void **p)
  {
  	struct kmem_cache_cpu *c;
@@ -335,7 +335,7 @@
  	int i;
  
  	/* memcg and kmem_cache debug support */
-@@ -3097,7 +3161,7 @@ int kmem_cache_alloc_bulk(struct kmem_ca
+@@ -3112,7 +3176,7 @@ int kmem_cache_alloc_bulk(struct kmem_ca
  			 * of re-populating per CPU c->freelist
  			 */
  			p[i] = ___slab_alloc(s, flags, NUMA_NO_NODE,
@@ -344,7 +344,7 @@
  			if (unlikely(!p[i]))
  				goto error;
  
-@@ -3109,6 +3173,7 @@ int kmem_cache_alloc_bulk(struct kmem_ca
+@@ -3124,6 +3188,7 @@ int kmem_cache_alloc_bulk(struct kmem_ca
  	}
  	c->tid = next_tid(c->tid);
  	local_irq_enable();
@@ -352,7 +352,7 @@
  
  	/* Clear memory outside IRQ disabled fastpath loop */
  	if (unlikely(flags & __GFP_ZERO)) {
-@@ -3256,7 +3321,7 @@ static void
+@@ -3271,7 +3336,7 @@ static void
  init_kmem_cache_node(struct kmem_cache_node *n)
  {
  	n->nr_partial = 0;
@@ -361,7 +361,7 @@
  	INIT_LIST_HEAD(&n->partial);
  #ifdef CONFIG_SLUB_DEBUG
  	atomic_long_set(&n->nr_slabs, 0);
-@@ -3600,6 +3665,10 @@ static void list_slab_objects(struct kme
+@@ -3615,6 +3680,10 @@ static void list_slab_objects(struct kme
  							const char *text)
  {
  #ifdef CONFIG_SLUB_DEBUG
@@ -372,7 +372,7 @@
  	void *addr = page_address(page);
  	void *p;
  	unsigned long *map = kzalloc(BITS_TO_LONGS(page->objects) *
-@@ -3620,6 +3689,7 @@ static void list_slab_objects(struct kme
+@@ -3635,6 +3704,7 @@ static void list_slab_objects(struct kme
  	slab_unlock(page);
  	kfree(map);
  #endif
@@ -380,7 +380,7 @@
  }
  
  /*
-@@ -3633,7 +3703,7 @@ static void free_partial(struct kmem_cac
+@@ -3648,7 +3718,7 @@ static void free_partial(struct kmem_cac
  	struct page *page, *h;
  
  	BUG_ON(irqs_disabled());
@@ -389,7 +389,7 @@
  	list_for_each_entry_safe(page, h, &n->partial, lru) {
  		if (!page->inuse) {
  			remove_partial(n, page);
-@@ -3643,7 +3713,7 @@ static void free_partial(struct kmem_cac
+@@ -3658,7 +3728,7 @@ static void free_partial(struct kmem_cac
  			"Objects remaining in %s on __kmem_cache_shutdown()");
  		}
  	}
@@ -398,7 +398,7 @@
  
  	list_for_each_entry_safe(page, h, &discard, lru)
  		discard_slab(s, page);
-@@ -3901,7 +3971,7 @@ int __kmem_cache_shrink(struct kmem_cach
+@@ -3916,7 +3986,7 @@ int __kmem_cache_shrink(struct kmem_cach
  		for (i = 0; i < SHRINK_PROMOTE_MAX; i++)
  			INIT_LIST_HEAD(promote + i);
  
@@ -407,7 +407,7 @@
  
  		/*
  		 * Build lists of slabs to discard or promote.
-@@ -3932,7 +4002,7 @@ int __kmem_cache_shrink(struct kmem_cach
+@@ -3947,7 +4017,7 @@ int __kmem_cache_shrink(struct kmem_cach
  		for (i = SHRINK_PROMOTE_MAX - 1; i >= 0; i--)
  			list_splice(promote + i, &n->partial);
  
@@ -416,7 +416,7 @@
  
  		/* Release empty slabs */
  		list_for_each_entry_safe(page, t, &discard, lru)
-@@ -4108,6 +4178,12 @@ void __init kmem_cache_init(void)
+@@ -4123,6 +4193,12 @@ void __init kmem_cache_init(void)
  {
  	static __initdata struct kmem_cache boot_kmem_cache,
  		boot_kmem_cache_node;
@@ -429,7 +429,7 @@
  
  	if (debug_guardpage_minorder())
  		slub_max_order = 0;
-@@ -4354,7 +4430,7 @@ static int validate_slab_node(struct kme
+@@ -4331,7 +4407,7 @@ static int validate_slab_node(struct kme
  	struct page *page;
  	unsigned long flags;
  
@@ -438,7 +438,7 @@
  
  	list_for_each_entry(page, &n->partial, lru) {
  		validate_slab_slab(s, page, map);
-@@ -4376,7 +4452,7 @@ static int validate_slab_node(struct kme
+@@ -4353,7 +4429,7 @@ static int validate_slab_node(struct kme
  		       s->name, count, atomic_long_read(&n->nr_slabs));
  
  out:
@@ -447,7 +447,7 @@
  	return count;
  }
  
-@@ -4564,12 +4640,12 @@ static int list_locations(struct kmem_ca
+@@ -4541,12 +4617,12 @@ static int list_locations(struct kmem_ca
  		if (!atomic_long_read(&n->nr_slabs))
  			continue;
  
diff --git a/patches/mm-memcontrol-Don-t-call-schedule_work_on-in-preempt.patch b/patches/mm-memcontrol-Don-t-call-schedule_work_on-in-preempt.patch
index 7f94932..b68edad 100644
--- a/patches/mm-memcontrol-Don-t-call-schedule_work_on-in-preempt.patch
+++ b/patches/mm-memcontrol-Don-t-call-schedule_work_on-in-preempt.patch
@@ -48,7 +48,7 @@
 
 --- a/mm/memcontrol.c
 +++ b/mm/memcontrol.c
-@@ -1824,7 +1824,7 @@ static void drain_all_stock(struct mem_c
+@@ -1794,7 +1794,7 @@ static void drain_all_stock(struct mem_c
  		return;
  	/* Notify other cpus that system-wide "drain" is running */
  	get_online_cpus();
@@ -57,7 +57,7 @@
  	for_each_online_cpu(cpu) {
  		struct memcg_stock_pcp *stock = &per_cpu(memcg_stock, cpu);
  		struct mem_cgroup *memcg;
-@@ -1841,7 +1841,7 @@ static void drain_all_stock(struct mem_c
+@@ -1811,7 +1811,7 @@ static void drain_all_stock(struct mem_c
  				schedule_work_on(cpu, &stock->work);
  		}
  	}
diff --git a/patches/mm-memcontrol-do_not_disable_irq.patch b/patches/mm-memcontrol-do_not_disable_irq.patch
index 258f8a0..a631d24 100644
--- a/patches/mm-memcontrol-do_not_disable_irq.patch
+++ b/patches/mm-memcontrol-do_not_disable_irq.patch
@@ -29,7 +29,7 @@
  /* Whether legacy memory+swap accounting is active */
  static bool do_memsw_account(void)
  {
-@@ -4575,12 +4578,12 @@ static int mem_cgroup_move_account(struc
+@@ -4550,12 +4553,12 @@ static int mem_cgroup_move_account(struc
  
  	ret = 0;
  
@@ -44,7 +44,7 @@
  out_unlock:
  	unlock_page(page);
  out:
-@@ -5453,10 +5456,10 @@ void mem_cgroup_commit_charge(struct pag
+@@ -5430,10 +5433,10 @@ void mem_cgroup_commit_charge(struct pag
  
  	commit_charge(page, memcg, lrucare);
  
@@ -57,7 +57,7 @@
  
  	if (do_memsw_account() && PageSwapCache(page)) {
  		swp_entry_t entry = { .val = page_private(page) };
-@@ -5512,14 +5515,14 @@ static void uncharge_batch(struct mem_cg
+@@ -5489,14 +5492,14 @@ static void uncharge_batch(struct mem_cg
  		memcg_oom_recover(memcg);
  	}
  
@@ -74,7 +74,7 @@
  
  	if (!mem_cgroup_is_root(memcg))
  		css_put_many(&memcg->css, nr_pages);
-@@ -5854,6 +5857,7 @@ void mem_cgroup_swapout(struct page *pag
+@@ -5834,6 +5837,7 @@ void mem_cgroup_swapout(struct page *pag
  {
  	struct mem_cgroup *memcg, *swap_memcg;
  	unsigned short oldid;
@@ -82,7 +82,7 @@
  
  	VM_BUG_ON_PAGE(PageLRU(page), page);
  	VM_BUG_ON_PAGE(page_count(page), page);
-@@ -5894,12 +5898,16 @@ void mem_cgroup_swapout(struct page *pag
+@@ -5874,12 +5878,16 @@ void mem_cgroup_swapout(struct page *pag
  	 * important here to have the interrupts disabled because it is the
  	 * only synchronisation we have for udpating the per-CPU variables.
  	 */
diff --git a/patches/mm-memcontrol-mem_cgroup_migrate-replace-another-loc.patch b/patches/mm-memcontrol-mem_cgroup_migrate-replace-another-loc.patch
index e18ec7c..13fbced 100644
--- a/patches/mm-memcontrol-mem_cgroup_migrate-replace-another-loc.patch
+++ b/patches/mm-memcontrol-mem_cgroup_migrate-replace-another-loc.patch
@@ -14,7 +14,7 @@
 
 --- a/mm/memcontrol.c
 +++ b/mm/memcontrol.c
-@@ -5677,10 +5677,10 @@ void mem_cgroup_migrate(struct page *old
+@@ -5654,10 +5654,10 @@ void mem_cgroup_migrate(struct page *old
  
  	commit_charge(newpage, memcg, false);
  
diff --git a/patches/mm-page-alloc-use-local-lock-on-target-cpu.patch b/patches/mm-page-alloc-use-local-lock-on-target-cpu.patch
index a208a16..152606c 100644
--- a/patches/mm-page-alloc-use-local-lock-on-target-cpu.patch
+++ b/patches/mm-page-alloc-use-local-lock-on-target-cpu.patch
@@ -13,7 +13,7 @@
 
 --- a/mm/page_alloc.c
 +++ b/mm/page_alloc.c
-@@ -281,9 +281,9 @@ static DEFINE_LOCAL_IRQ_LOCK(pa_lock);
+@@ -286,9 +286,9 @@ static DEFINE_LOCAL_IRQ_LOCK(pa_lock);
  
  #ifdef CONFIG_PREEMPT_RT_BASE
  # define cpu_lock_irqsave(cpu, flags)		\
diff --git a/patches/mm-page_alloc-reduce-lock-sections-further.patch b/patches/mm-page_alloc-reduce-lock-sections-further.patch
index 30c1024..63a3a83 100644
--- a/patches/mm-page_alloc-reduce-lock-sections-further.patch
+++ b/patches/mm-page_alloc-reduce-lock-sections-further.patch
@@ -13,7 +13,7 @@
 
 --- a/mm/page_alloc.c
 +++ b/mm/page_alloc.c
-@@ -1069,7 +1069,7 @@ static bool bulkfree_pcp_prepare(struct
+@@ -1085,7 +1085,7 @@ static bool bulkfree_pcp_prepare(struct
  #endif /* CONFIG_DEBUG_VM */
  
  /*
@@ -22,7 +22,7 @@
   * Assumes all pages on list are in same zone, and of same order.
   * count is the number of pages to free.
   *
-@@ -1080,19 +1080,58 @@ static bool bulkfree_pcp_prepare(struct
+@@ -1096,19 +1096,58 @@ static bool bulkfree_pcp_prepare(struct
   * pinned" detection logic.
   */
  static void free_pcppages_bulk(struct zone *zone, int count,
@@ -85,7 +85,7 @@
  	while (count) {
  		struct page *page;
  		struct list_head *list;
-@@ -1108,7 +1147,7 @@ static void free_pcppages_bulk(struct zo
+@@ -1124,7 +1163,7 @@ static void free_pcppages_bulk(struct zo
  			batch_free++;
  			if (++migratetype == MIGRATE_PCPTYPES)
  				migratetype = 0;
@@ -94,7 +94,7 @@
  		} while (list_empty(list));
  
  		/* This is the only non-empty list. Free them all. */
-@@ -1116,27 +1155,12 @@ static void free_pcppages_bulk(struct zo
+@@ -1132,27 +1171,12 @@ static void free_pcppages_bulk(struct zo
  			batch_free = count;
  
  		do {
@@ -123,7 +123,7 @@
  }
  
  static void free_one_page(struct zone *zone,
-@@ -1145,7 +1169,9 @@ static void free_one_page(struct zone *z
+@@ -1161,7 +1185,9 @@ static void free_one_page(struct zone *z
  				int migratetype)
  {
  	unsigned long nr_scanned;
@@ -134,7 +134,7 @@
  	nr_scanned = node_page_state(zone->zone_pgdat, NR_PAGES_SCANNED);
  	if (nr_scanned)
  		__mod_node_page_state(zone->zone_pgdat, NR_PAGES_SCANNED, -nr_scanned);
-@@ -1155,7 +1181,7 @@ static void free_one_page(struct zone *z
+@@ -1171,7 +1197,7 @@ static void free_one_page(struct zone *z
  		migratetype = get_pfnblock_migratetype(page, pfn);
  	}
  	__free_one_page(page, pfn, zone, order, migratetype);
@@ -143,7 +143,7 @@
  }
  
  static void __meminit __init_single_page(struct page *page, unsigned long pfn,
-@@ -2232,16 +2258,18 @@ static int rmqueue_bulk(struct zone *zon
+@@ -2251,16 +2277,18 @@ static int rmqueue_bulk(struct zone *zon
  void drain_zone_pages(struct zone *zone, struct per_cpu_pages *pcp)
  {
  	unsigned long flags;
@@ -163,7 +163,7 @@
  }
  #endif
  
-@@ -2257,16 +2285,21 @@ static void drain_pages_zone(unsigned in
+@@ -2276,16 +2304,21 @@ static void drain_pages_zone(unsigned in
  	unsigned long flags;
  	struct per_cpu_pageset *pset;
  	struct per_cpu_pages *pcp;
@@ -187,7 +187,7 @@
  }
  
  /*
-@@ -2448,8 +2481,13 @@ void free_hot_cold_page(struct page *pag
+@@ -2467,8 +2500,13 @@ void free_hot_cold_page(struct page *pag
  	pcp->count++;
  	if (pcp->count >= pcp->high) {
  		unsigned long batch = READ_ONCE(pcp->batch);
diff --git a/patches/mm-page_alloc-rt-friendly-per-cpu-pages.patch b/patches/mm-page_alloc-rt-friendly-per-cpu-pages.patch
index 80cadd2..7066fcb 100644
--- a/patches/mm-page_alloc-rt-friendly-per-cpu-pages.patch
+++ b/patches/mm-page_alloc-rt-friendly-per-cpu-pages.patch
@@ -25,7 +25,7 @@
  #include <linux/page_owner.h>
  #include <linux/kthread.h>
  #include <linux/memcontrol.h>
-@@ -276,6 +277,18 @@ EXPORT_SYMBOL(nr_node_ids);
+@@ -281,6 +282,18 @@ EXPORT_SYMBOL(nr_node_ids);
  EXPORT_SYMBOL(nr_online_nodes);
  #endif
  
@@ -44,7 +44,7 @@
  int page_group_by_mobility_disabled __read_mostly;
  
  #ifdef CONFIG_DEFERRED_STRUCT_PAGE_INIT
-@@ -1228,10 +1241,10 @@ static void __free_pages_ok(struct page
+@@ -1244,10 +1257,10 @@ static void __free_pages_ok(struct page
  		return;
  
  	migratetype = get_pfnblock_migratetype(page, pfn);
@@ -57,7 +57,7 @@
  }
  
  static void __init __free_pages_boot_core(struct page *page, unsigned int order)
-@@ -2221,14 +2234,14 @@ void drain_zone_pages(struct zone *zone,
+@@ -2240,14 +2253,14 @@ void drain_zone_pages(struct zone *zone,
  	unsigned long flags;
  	int to_drain, batch;
  
@@ -74,7 +74,7 @@
  }
  #endif
  
-@@ -2245,7 +2258,7 @@ static void drain_pages_zone(unsigned in
+@@ -2264,7 +2277,7 @@ static void drain_pages_zone(unsigned in
  	struct per_cpu_pageset *pset;
  	struct per_cpu_pages *pcp;
  
@@ -83,7 +83,7 @@
  	pset = per_cpu_ptr(zone->pageset, cpu);
  
  	pcp = &pset->pcp;
-@@ -2253,7 +2266,7 @@ static void drain_pages_zone(unsigned in
+@@ -2272,7 +2285,7 @@ static void drain_pages_zone(unsigned in
  		free_pcppages_bulk(zone, pcp->count, pcp);
  		pcp->count = 0;
  	}
@@ -92,7 +92,7 @@
  }
  
  /*
-@@ -2339,8 +2352,17 @@ void drain_all_pages(struct zone *zone)
+@@ -2358,8 +2371,17 @@ void drain_all_pages(struct zone *zone)
  		else
  			cpumask_clear_cpu(cpu, &cpus_with_pcps);
  	}
@@ -110,7 +110,7 @@
  }
  
  #ifdef CONFIG_HIBERNATION
-@@ -2400,7 +2422,7 @@ void free_hot_cold_page(struct page *pag
+@@ -2419,7 +2441,7 @@ void free_hot_cold_page(struct page *pag
  
  	migratetype = get_pfnblock_migratetype(page, pfn);
  	set_pcppage_migratetype(page, migratetype);
@@ -119,7 +119,7 @@
  	__count_vm_event(PGFREE);
  
  	/*
-@@ -2431,7 +2453,7 @@ void free_hot_cold_page(struct page *pag
+@@ -2450,7 +2472,7 @@ void free_hot_cold_page(struct page *pag
  	}
  
  out:
@@ -128,7 +128,7 @@
  }
  
  /*
-@@ -2568,7 +2590,7 @@ struct page *buffered_rmqueue(struct zon
+@@ -2592,7 +2614,7 @@ struct page *buffered_rmqueue(struct zon
  		struct per_cpu_pages *pcp;
  		struct list_head *list;
  
@@ -137,7 +137,7 @@
  		do {
  			pcp = &this_cpu_ptr(zone->pageset)->pcp;
  			list = &pcp->lists[migratetype];
-@@ -2595,7 +2617,7 @@ struct page *buffered_rmqueue(struct zon
+@@ -2619,7 +2641,7 @@ struct page *buffered_rmqueue(struct zon
  		 * allocate greater than order-1 page units with __GFP_NOFAIL.
  		 */
  		WARN_ON_ONCE((gfp_flags & __GFP_NOFAIL) && (order > 1));
@@ -146,7 +146,7 @@
  
  		do {
  			page = NULL;
-@@ -2607,22 +2629,24 @@ struct page *buffered_rmqueue(struct zon
+@@ -2631,22 +2653,24 @@ struct page *buffered_rmqueue(struct zon
  			if (!page)
  				page = __rmqueue(zone, order, migratetype);
  		} while (page && check_new_pages(page, order));
@@ -175,7 +175,7 @@
  	return NULL;
  }
  
-@@ -6564,6 +6588,7 @@ static int page_alloc_cpu_notify(struct
+@@ -6523,6 +6547,7 @@ static int page_alloc_cpu_notify(struct
  void __init page_alloc_init(void)
  {
  	hotcpu_notifier(page_alloc_cpu_notify, 0);
@@ -183,7 +183,7 @@
  }
  
  /*
-@@ -7380,7 +7405,7 @@ void zone_pcp_reset(struct zone *zone)
+@@ -7351,7 +7376,7 @@ void zone_pcp_reset(struct zone *zone)
  	struct per_cpu_pageset *pset;
  
  	/* avoid races with drain_pages()  */
@@ -192,7 +192,7 @@
  	if (zone->pageset != &boot_pageset) {
  		for_each_online_cpu(cpu) {
  			pset = per_cpu_ptr(zone->pageset, cpu);
-@@ -7389,7 +7414,7 @@ void zone_pcp_reset(struct zone *zone)
+@@ -7360,7 +7385,7 @@ void zone_pcp_reset(struct zone *zone)
  		free_percpu(zone->pageset);
  		zone->pageset = &boot_pageset;
  	}
diff --git a/patches/mm-protect-activate-switch-mm.patch b/patches/mm-protect-activate-switch-mm.patch
index e0e005c..0cbc888 100644
--- a/patches/mm-protect-activate-switch-mm.patch
+++ b/patches/mm-protect-activate-switch-mm.patch
@@ -36,7 +36,7 @@
 
 --- a/fs/exec.c
 +++ b/fs/exec.c
-@@ -1012,12 +1012,14 @@ static int exec_mmap(struct mm_struct *m
+@@ -1017,12 +1017,14 @@ static int exec_mmap(struct mm_struct *m
  		}
  	}
  	task_lock(tsk);
diff --git a/patches/mm-rt-kmap-atomic-scheduling.patch b/patches/mm-rt-kmap-atomic-scheduling.patch
index 8d776a5..fd1b49f 100644
--- a/patches/mm-rt-kmap-atomic-scheduling.patch
+++ b/patches/mm-rt-kmap-atomic-scheduling.patch
@@ -38,7 +38,7 @@
  
  #include <asm/pgtable.h>
  #include <asm/ldt.h>
-@@ -210,6 +211,35 @@ start_thread(struct pt_regs *regs, unsig
+@@ -195,6 +196,35 @@ start_thread(struct pt_regs *regs, unsig
  }
  EXPORT_SYMBOL_GPL(start_thread);
  
@@ -74,7 +74,7 @@
  
  /*
   *	switch_to(x,y) should switch tasks from x to y.
-@@ -286,6 +316,8 @@ EXPORT_SYMBOL_GPL(start_thread);
+@@ -271,6 +301,8 @@ EXPORT_SYMBOL_GPL(start_thread);
  		     task_thread_info(next_p)->flags & _TIF_WORK_CTXSW_NEXT))
  		__switch_to_xtra(prev_p, next_p, tss);
  
@@ -229,7 +229,7 @@
  
  #include <asm/page.h>
  #include <asm/ptrace.h>
-@@ -1954,6 +1955,12 @@ struct task_struct {
+@@ -1979,6 +1980,12 @@ struct task_struct {
  	int softirq_nestcnt;
  	unsigned int softirqs_raised;
  #endif
diff --git a/patches/mm-workingset-do-not-protect-workingset_shadow_nodes.patch b/patches/mm-workingset-do-not-protect-workingset_shadow_nodes.patch
index 363450e..3b838eb 100644
--- a/patches/mm-workingset-do-not-protect-workingset_shadow_nodes.patch
+++ b/patches/mm-workingset-do-not-protect-workingset_shadow_nodes.patch
@@ -25,7 +25,7 @@
  #include <asm/page.h>
  
  struct notifier_block;
-@@ -243,7 +244,8 @@ struct swap_info_struct {
+@@ -246,7 +247,8 @@ struct swap_info_struct {
  void *workingset_eviction(struct address_space *mapping, struct page *page);
  bool workingset_refault(void *shadow);
  void workingset_activation(struct page *page);
diff --git a/patches/net-Have-__napi_schedule_irqoff-disable-interrupts-o.patch b/patches/net-Have-__napi_schedule_irqoff-disable-interrupts-o.patch
index 341b189..57552f2 100644
--- a/patches/net-Have-__napi_schedule_irqoff-disable-interrupts-o.patch
+++ b/patches/net-Have-__napi_schedule_irqoff-disable-interrupts-o.patch
@@ -28,7 +28,7 @@
 
 --- a/include/linux/netdevice.h
 +++ b/include/linux/netdevice.h
-@@ -395,7 +395,19 @@ typedef enum rx_handler_result rx_handle
+@@ -396,7 +396,19 @@ typedef enum rx_handler_result rx_handle
  typedef rx_handler_result_t rx_handler_func_t(struct sk_buff **pskb);
  
  void __napi_schedule(struct napi_struct *n);
@@ -50,7 +50,7 @@
  {
 --- a/net/core/dev.c
 +++ b/net/core/dev.c
-@@ -4906,6 +4906,7 @@ void __napi_schedule(struct napi_struct
+@@ -4914,6 +4914,7 @@ void __napi_schedule(struct napi_struct
  }
  EXPORT_SYMBOL(__napi_schedule);
  
@@ -58,7 +58,7 @@
  /**
   * __napi_schedule_irqoff - schedule for receive
   * @n: entry to schedule
-@@ -4917,6 +4918,7 @@ void __napi_schedule_irqoff(struct napi_
+@@ -4925,6 +4926,7 @@ void __napi_schedule_irqoff(struct napi_
  	____napi_schedule(this_cpu_ptr(&softnet_data), n);
  }
  EXPORT_SYMBOL(__napi_schedule_irqoff);
diff --git a/patches/net-Qdisc-use-a-seqlock-instead-seqcount.patch b/patches/net-Qdisc-use-a-seqlock-instead-seqcount.patch
index b27c745..fd46e55 100644
--- a/patches/net-Qdisc-use-a-seqlock-instead-seqcount.patch
+++ b/patches/net-Qdisc-use-a-seqlock-instead-seqcount.patch
@@ -107,16 +107,16 @@
  
  struct Qdisc_ops;
  struct qdisc_walker;
-@@ -78,7 +79,7 @@ struct Qdisc {
+@@ -86,7 +87,7 @@ struct Qdisc {
  	struct sk_buff		*gso_skb ____cacheline_aligned_in_smp;
- 	struct sk_buff_head	q;
+ 	struct qdisc_skb_head	q;
  	struct gnet_stats_basic_packed bstats;
 -	seqcount_t		running;
 +	net_seqlock_t		running;
  	struct gnet_stats_queue	qstats;
  	unsigned long		state;
  	struct Qdisc            *next_sched;
-@@ -90,13 +91,22 @@ struct Qdisc {
+@@ -98,13 +99,22 @@ struct Qdisc {
  	spinlock_t		busylock ____cacheline_aligned_in_smp;
  };
  
@@ -140,7 +140,7 @@
  	if (qdisc_is_running(qdisc))
  		return false;
  	/* Variant of write_seqcount_begin() telling lockdep a trylock
-@@ -105,11 +115,16 @@ static inline bool qdisc_run_begin(struc
+@@ -113,11 +123,16 @@ static inline bool qdisc_run_begin(struc
  	raw_write_seqcount_begin(&qdisc->running);
  	seqcount_acquire(&qdisc->running.dep_map, 0, 1, _RET_IP_);
  	return true;
@@ -157,7 +157,7 @@
  }
  
  static inline bool qdisc_may_bulk(const struct Qdisc *qdisc)
-@@ -300,7 +315,7 @@ static inline spinlock_t *qdisc_root_sle
+@@ -308,7 +323,7 @@ static inline spinlock_t *qdisc_root_sle
  	return qdisc_lock(root);
  }
  
@@ -230,7 +230,7 @@
  		      struct gnet_stats_basic_packed *b)
 --- a/net/sched/sch_api.c
 +++ b/net/sched/sch_api.c
-@@ -975,7 +975,7 @@ qdisc_create(struct net_device *dev, str
+@@ -981,7 +981,7 @@ static struct Qdisc *qdisc_create(struct
  			rcu_assign_pointer(sch->stab, stab);
  		}
  		if (tca[TCA_RATE]) {
@@ -241,8 +241,8 @@
  			if (sch->flags & TCQ_F_MQROOT)
 --- a/net/sched/sch_generic.c
 +++ b/net/sched/sch_generic.c
-@@ -426,7 +426,11 @@ struct Qdisc noop_qdisc = {
- 	.list		=	LIST_HEAD_INIT(noop_qdisc.list),
+@@ -425,7 +425,11 @@ struct Qdisc noop_qdisc = {
+ 	.ops		=	&noop_qdisc_ops,
  	.q.lock		=	__SPIN_LOCK_UNLOCKED(noop_qdisc.q.lock),
  	.dev_queue	=	&noop_netdev_queue,
 +#ifdef CONFIG_PREEMPT_RT_BASE
@@ -253,7 +253,7 @@
  	.busylock	=	__SPIN_LOCK_UNLOCKED(noop_qdisc.busylock),
  };
  EXPORT_SYMBOL(noop_qdisc);
-@@ -620,9 +624,17 @@ struct Qdisc *qdisc_alloc(struct netdev_
+@@ -624,9 +628,17 @@ struct Qdisc *qdisc_alloc(struct netdev_
  	lockdep_set_class(&sch->busylock,
  			  dev->qdisc_tx_busylock ?: &qdisc_tx_busylock);
  
diff --git a/patches/net-add-back-the-missing-serialization-in-ip_send_un.patch b/patches/net-add-back-the-missing-serialization-in-ip_send_un.patch
index 5653133..047d570 100644
--- a/patches/net-add-back-the-missing-serialization-in-ip_send_un.patch
+++ b/patches/net-add-back-the-missing-serialization-in-ip_send_un.patch
@@ -50,7 +50,7 @@
  
  #include <net/net_namespace.h>
  #include <net/icmp.h>
-@@ -565,6 +566,7 @@ void tcp_v4_send_check(struct sock *sk,
+@@ -564,6 +565,7 @@ void tcp_v4_send_check(struct sock *sk,
  }
  EXPORT_SYMBOL(tcp_v4_send_check);
  
@@ -58,7 +58,7 @@
  /*
   *	This routine will send an RST to the other tcp.
   *
-@@ -692,6 +694,8 @@ static void tcp_v4_send_reset(const stru
+@@ -691,6 +693,8 @@ static void tcp_v4_send_reset(const stru
  		     offsetof(struct inet_timewait_sock, tw_bound_dev_if));
  
  	arg.tos = ip_hdr(skb)->tos;
@@ -67,7 +67,7 @@
  	local_bh_disable();
  	ip_send_unicast_reply(*this_cpu_ptr(net->ipv4.tcp_sk),
  			      skb, &TCP_SKB_CB(skb)->header.h4.opt,
-@@ -701,6 +705,7 @@ static void tcp_v4_send_reset(const stru
+@@ -700,6 +704,7 @@ static void tcp_v4_send_reset(const stru
  	__TCP_INC_STATS(net, TCP_MIB_OUTSEGS);
  	__TCP_INC_STATS(net, TCP_MIB_OUTRSTS);
  	local_bh_enable();
@@ -75,7 +75,7 @@
  
  #ifdef CONFIG_TCP_MD5SIG
  out:
-@@ -776,6 +781,7 @@ static void tcp_v4_send_ack(struct net *
+@@ -775,6 +780,7 @@ static void tcp_v4_send_ack(struct net *
  	if (oif)
  		arg.bound_dev_if = oif;
  	arg.tos = tos;
@@ -83,7 +83,7 @@
  	local_bh_disable();
  	ip_send_unicast_reply(*this_cpu_ptr(net->ipv4.tcp_sk),
  			      skb, &TCP_SKB_CB(skb)->header.h4.opt,
-@@ -784,6 +790,7 @@ static void tcp_v4_send_ack(struct net *
+@@ -783,6 +789,7 @@ static void tcp_v4_send_ack(struct net *
  
  	__TCP_INC_STATS(net, TCP_MIB_OUTSEGS);
  	local_bh_enable();
diff --git a/patches/net-core-cpuhotplug-drain-input_pkt_queue-lockless.patch b/patches/net-core-cpuhotplug-drain-input_pkt_queue-lockless.patch
index 4ac3f6b..c5e393e 100644
--- a/patches/net-core-cpuhotplug-drain-input_pkt_queue-lockless.patch
+++ b/patches/net-core-cpuhotplug-drain-input_pkt_queue-lockless.patch
@@ -35,7 +35,7 @@
 
 --- a/net/core/dev.c
 +++ b/net/core/dev.c
-@@ -8011,7 +8011,7 @@ static int dev_cpu_callback(struct notif
+@@ -8022,7 +8022,7 @@ static int dev_cpu_callback(struct notif
  		netif_rx_ni(skb);
  		input_queue_head_incr(oldsd);
  	}
diff --git a/patches/net-dev-always-take-qdisc-s-busylock-in-__dev_xmit_s.patch b/patches/net-dev-always-take-qdisc-s-busylock-in-__dev_xmit_s.patch
index da834f3..a553cb7 100644
--- a/patches/net-dev-always-take-qdisc-s-busylock-in-__dev_xmit_s.patch
+++ b/patches/net-dev-always-take-qdisc-s-busylock-in-__dev_xmit_s.patch
@@ -20,7 +20,7 @@
 
 --- a/net/core/dev.c
 +++ b/net/core/dev.c
-@@ -3085,7 +3085,11 @@ static inline int __dev_xmit_skb(struct
+@@ -3080,7 +3080,11 @@ static inline int __dev_xmit_skb(struct
  	 * This permits qdisc->running owner to get the lock more
  	 * often and dequeue packets faster.
  	 */
diff --git a/patches/net-fix-iptable-xt-write-recseq-begin-rt-fallout.patch b/patches/net-fix-iptable-xt-write-recseq-begin-rt-fallout.patch
index 13ec1f2..e66f42d 100644
--- a/patches/net-fix-iptable-xt-write-recseq-begin-rt-fallout.patch
+++ b/patches/net-fix-iptable-xt-write-recseq-begin-rt-fallout.patch
@@ -53,11 +53,12 @@
  /*
 --- a/net/netfilter/core.c
 +++ b/net/netfilter/core.c
-@@ -22,11 +22,17 @@
+@@ -22,12 +22,18 @@
  #include <linux/proc_fs.h>
  #include <linux/mutex.h>
  #include <linux/slab.h>
 +#include <linux/locallock.h>
+ #include <linux/rcupdate.h>
  #include <net/net_namespace.h>
  #include <net/sock.h>
  
diff --git a/patches/net-move-xmit_recursion-to-per-task-variable-on-RT.patch b/patches/net-move-xmit_recursion-to-per-task-variable-on-RT.patch
index 2af0743..300688f 100644
--- a/patches/net-move-xmit_recursion-to-per-task-variable-on-RT.patch
+++ b/patches/net-move-xmit_recursion-to-per-task-variable-on-RT.patch
@@ -23,7 +23,7 @@
 
 --- a/include/linux/netdevice.h
 +++ b/include/linux/netdevice.h
-@@ -2446,14 +2446,53 @@ void netdev_freemem(struct net_device *d
+@@ -2461,14 +2461,53 @@ void netdev_freemem(struct net_device *d
  void synchronize_net(void);
  int init_dummy_netdev(struct net_device *dev);
  
@@ -80,7 +80,7 @@
  struct net_device *dev_get_by_index_rcu(struct net *net, int ifindex);
 --- a/include/linux/sched.h
 +++ b/include/linux/sched.h
-@@ -1957,6 +1957,9 @@ struct task_struct {
+@@ -1982,6 +1982,9 @@ struct task_struct {
  #ifdef CONFIG_DEBUG_ATOMIC_SLEEP
  	unsigned long	task_state_change;
  #endif
@@ -92,7 +92,7 @@
  	struct task_struct *oom_reaper_list;
 --- a/net/core/dev.c
 +++ b/net/core/dev.c
-@@ -3148,8 +3148,10 @@ static void skb_update_prio(struct sk_bu
+@@ -3143,8 +3143,10 @@ static void skb_update_prio(struct sk_bu
  #define skb_update_prio(skb)
  #endif
  
@@ -103,7 +103,7 @@
  
  /**
   *	dev_loopback_xmit - loop back @skb
-@@ -3393,8 +3395,7 @@ static int __dev_queue_xmit(struct sk_bu
+@@ -3378,8 +3380,7 @@ static int __dev_queue_xmit(struct sk_bu
  		int cpu = smp_processor_id(); /* ok because BHs are off */
  
  		if (txq->xmit_lock_owner != cpu) {
@@ -113,7 +113,7 @@
  				goto recursion_alert;
  
  			skb = validate_xmit_skb(skb, dev);
-@@ -3404,9 +3405,9 @@ static int __dev_queue_xmit(struct sk_bu
+@@ -3389,9 +3390,9 @@ static int __dev_queue_xmit(struct sk_bu
  			HARD_TX_LOCK(dev, txq, cpu);
  
  			if (!netif_xmit_stopped(txq)) {
@@ -127,7 +127,7 @@
  					goto out;
 --- a/net/core/filter.c
 +++ b/net/core/filter.c
-@@ -1592,7 +1592,7 @@ static inline int __bpf_tx_skb(struct ne
+@@ -1645,7 +1645,7 @@ static inline int __bpf_tx_skb(struct ne
  {
  	int ret;
  
@@ -136,7 +136,7 @@
  		net_crit_ratelimited("bpf: recursion limit reached on datapath, buggy bpf program?\n");
  		kfree_skb(skb);
  		return -ENETDOWN;
-@@ -1600,9 +1600,9 @@ static inline int __bpf_tx_skb(struct ne
+@@ -1653,9 +1653,9 @@ static inline int __bpf_tx_skb(struct ne
  
  	skb->dev = dev;
  
diff --git a/patches/net-prevent-abba-deadlock.patch b/patches/net-prevent-abba-deadlock.patch
index 72c538c..279a0c8 100644
--- a/patches/net-prevent-abba-deadlock.patch
+++ b/patches/net-prevent-abba-deadlock.patch
@@ -95,7 +95,7 @@
 
 --- a/net/core/sock.c
 +++ b/net/core/sock.c
-@@ -2510,12 +2510,11 @@ void lock_sock_nested(struct sock *sk, i
+@@ -2488,12 +2488,11 @@ void lock_sock_nested(struct sock *sk, i
  	if (sk->sk_lock.owned)
  		__lock_sock(sk);
  	sk->sk_lock.owned = 1;
diff --git a/patches/net-provide-a-way-to-delegate-processing-a-softirq-t.patch b/patches/net-provide-a-way-to-delegate-processing-a-softirq-t.patch
index d166a8b..0913999 100644
--- a/patches/net-provide-a-way-to-delegate-processing-a-softirq-t.patch
+++ b/patches/net-provide-a-way-to-delegate-processing-a-softirq-t.patch
@@ -20,7 +20,7 @@
 
 --- a/include/linux/interrupt.h
 +++ b/include/linux/interrupt.h
-@@ -488,6 +488,14 @@ extern void thread_do_softirq(void);
+@@ -496,6 +496,14 @@ extern void thread_do_softirq(void);
  extern void open_softirq(int nr, void (*action)(struct softirq_action *));
  extern void softirq_init(void);
  extern void __raise_softirq_irqoff(unsigned int nr);
@@ -37,7 +37,7 @@
  extern void raise_softirq(unsigned int nr);
 --- a/kernel/softirq.c
 +++ b/kernel/softirq.c
-@@ -675,6 +675,27 @@ void __raise_softirq_irqoff(unsigned int
+@@ -685,6 +685,27 @@ void __raise_softirq_irqoff(unsigned int
  }
  
  /*
@@ -67,7 +67,7 @@
  void raise_softirq_irqoff(unsigned int nr)
 --- a/net/core/dev.c
 +++ b/net/core/dev.c
-@@ -5247,7 +5247,7 @@ static void net_rx_action(struct softirq
+@@ -5255,7 +5255,7 @@ static __latent_entropy void net_rx_acti
  	list_splice_tail(&repoll, &list);
  	list_splice(&list, &sd->poll_list);
  	if (!list_empty(&sd->poll_list))
diff --git a/patches/net-sched-dev_deactivate_many-use-msleep-1-instead-o.patch b/patches/net-sched-dev_deactivate_many-use-msleep-1-instead-o.patch
index 60ef1ab..c436869 100644
--- a/patches/net-sched-dev_deactivate_many-use-msleep-1-instead-o.patch
+++ b/patches/net-sched-dev_deactivate_many-use-msleep-1-instead-o.patch
@@ -46,7 +46,7 @@
 
 --- a/net/sched/sch_generic.c
 +++ b/net/sched/sch_generic.c
-@@ -917,7 +917,7 @@ void dev_deactivate_many(struct list_hea
+@@ -925,7 +925,7 @@ void dev_deactivate_many(struct list_hea
  	/* Wait for outstanding qdisc_run calls. */
  	list_for_each_entry(dev, head, close_list)
  		while (some_qdisc_is_busy(dev))
diff --git a/patches/net-wireless-warn-nort.patch b/patches/net-wireless-warn-nort.patch
index 0a9eb50..1305ef2 100644
--- a/patches/net-wireless-warn-nort.patch
+++ b/patches/net-wireless-warn-nort.patch
@@ -12,7 +12,7 @@
 
 --- a/net/mac80211/rx.c
 +++ b/net/mac80211/rx.c
-@@ -4070,7 +4070,7 @@ void ieee80211_rx_napi(struct ieee80211_
+@@ -4156,7 +4156,7 @@ void ieee80211_rx_napi(struct ieee80211_
  	struct ieee80211_supported_band *sband;
  	struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb);
  
diff --git a/patches/oleg-signal-rt-fix.patch b/patches/oleg-signal-rt-fix.patch
index 192679b..e321750 100644
--- a/patches/oleg-signal-rt-fix.patch
+++ b/patches/oleg-signal-rt-fix.patch
@@ -38,7 +38,7 @@
 
 --- a/arch/x86/entry/common.c
 +++ b/arch/x86/entry/common.c
-@@ -155,6 +155,13 @@ static void exit_to_usermode_loop(struct
+@@ -148,6 +148,13 @@ static void exit_to_usermode_loop(struct
  		if (cached_flags & _TIF_NEED_RESCHED)
  			schedule();
  
@@ -54,9 +54,9 @@
  
 --- a/arch/x86/include/asm/signal.h
 +++ b/arch/x86/include/asm/signal.h
-@@ -23,6 +23,19 @@ typedef struct {
- 	unsigned long sig[_NSIG_WORDS];
- } sigset_t;
+@@ -27,6 +27,19 @@ typedef struct {
+ #define SA_IA32_ABI	0x02000000u
+ #define SA_X32_ABI	0x01000000u
  
 +/*
 + * Because some traps use the IST stack, we must keep preemption
@@ -76,7 +76,7 @@
  #endif
 --- a/include/linux/sched.h
 +++ b/include/linux/sched.h
-@@ -1670,6 +1670,10 @@ struct task_struct {
+@@ -1695,6 +1695,10 @@ struct task_struct {
  	sigset_t blocked, real_blocked;
  	sigset_t saved_sigmask;	/* restored if set_restore_sigmask() was used */
  	struct sigpending pending;
diff --git a/patches/panic-disable-random-on-rt.patch b/patches/panic-disable-random-on-rt.patch
index 25e0141..c21c7d0 100644
--- a/patches/panic-disable-random-on-rt.patch
+++ b/patches/panic-disable-random-on-rt.patch
@@ -12,7 +12,7 @@
 
 --- a/kernel/panic.c
 +++ b/kernel/panic.c
-@@ -449,9 +449,11 @@ static u64 oops_id;
+@@ -482,9 +482,11 @@ static u64 oops_id;
  
  static int init_oops_id(void)
  {
diff --git a/patches/patch-to-introduce-rcu-bh-qs-where-safe-from-softirq.patch b/patches/patch-to-introduce-rcu-bh-qs-where-safe-from-softirq.patch
index 7d85b3e..9b870f1 100644
--- a/patches/patch-to-introduce-rcu-bh-qs-where-safe-from-softirq.patch
+++ b/patches/patch-to-introduce-rcu-bh-qs-where-safe-from-softirq.patch
@@ -41,10 +41,10 @@
 -#endif
  void rcu_check_callbacks(int user);
  void rcu_report_dead(unsigned int cpu);
- 
+ void rcu_cpu_starting(unsigned int cpu);
 --- a/kernel/rcu/tree.c
 +++ b/kernel/rcu/tree.c
-@@ -259,7 +259,14 @@ void rcu_sched_qs(void)
+@@ -257,7 +257,14 @@ void rcu_sched_qs(void)
  			   this_cpu_ptr(&rcu_sched_data), true);
  }
  
diff --git a/patches/perf-make-swevent-hrtimer-irqsafe.patch b/patches/perf-make-swevent-hrtimer-irqsafe.patch
index 8c65a20..eed1470 100644
--- a/patches/perf-make-swevent-hrtimer-irqsafe.patch
+++ b/patches/perf-make-swevent-hrtimer-irqsafe.patch
@@ -58,7 +58,7 @@
 
 --- a/kernel/events/core.c
 +++ b/kernel/events/core.c
-@@ -8217,6 +8217,7 @@ static void perf_swevent_init_hrtimer(st
+@@ -8335,6 +8335,7 @@ static void perf_swevent_init_hrtimer(st
  
  	hrtimer_init(&hwc->hrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
  	hwc->hrtimer.function = perf_swevent_hrtimer;
diff --git a/patches/peterz-percpu-rwsem-rt.patch b/patches/peterz-percpu-rwsem-rt.patch
new file mode 100644
index 0000000..ec31b61
--- /dev/null
+++ b/patches/peterz-percpu-rwsem-rt.patch
@@ -0,0 +1,218 @@
+Subject: locking/percpu-rwsem: Remove preempt_disable variants
+From: Peter Zijlstra <peterz@infradead.org>
+Date: Wed Nov 23 16:29:32 CET 2016
+
+Effective revert commit:
+
+  87709e28dc7c ("fs/locks: Use percpu_down_read_preempt_disable()")
+
+This is causing major pain for PREEMPT_RT and is only a very small
+performance issue for PREEMPT=y.
+
+Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
+---
+---
+ fs/locks.c                   |   32 ++++++++++++++++----------------
+ include/linux/percpu-rwsem.h |   24 ++++--------------------
+ 2 files changed, 20 insertions(+), 36 deletions(-)
+
+--- a/fs/locks.c
++++ b/fs/locks.c
+@@ -935,7 +935,7 @@ static int flock_lock_inode(struct inode
+ 			return -ENOMEM;
+ 	}
+ 
+-	percpu_down_read_preempt_disable(&file_rwsem);
++	percpu_down_read(&file_rwsem);
+ 	spin_lock(&ctx->flc_lock);
+ 	if (request->fl_flags & FL_ACCESS)
+ 		goto find_conflict;
+@@ -976,7 +976,7 @@ static int flock_lock_inode(struct inode
+ 
+ out:
+ 	spin_unlock(&ctx->flc_lock);
+-	percpu_up_read_preempt_enable(&file_rwsem);
++	percpu_up_read(&file_rwsem);
+ 	if (new_fl)
+ 		locks_free_lock(new_fl);
+ 	locks_dispose_list(&dispose);
+@@ -1013,7 +1013,7 @@ static int posix_lock_inode(struct inode
+ 		new_fl2 = locks_alloc_lock();
+ 	}
+ 
+-	percpu_down_read_preempt_disable(&file_rwsem);
++	percpu_down_read(&file_rwsem);
+ 	spin_lock(&ctx->flc_lock);
+ 	/*
+ 	 * New lock request. Walk all POSIX locks and look for conflicts. If
+@@ -1185,7 +1185,7 @@ static int posix_lock_inode(struct inode
+ 	}
+  out:
+ 	spin_unlock(&ctx->flc_lock);
+-	percpu_up_read_preempt_enable(&file_rwsem);
++	percpu_up_read(&file_rwsem);
+ 	/*
+ 	 * Free any unused locks.
+ 	 */
+@@ -1460,7 +1460,7 @@ int __break_lease(struct inode *inode, u
+ 		return error;
+ 	}
+ 
+-	percpu_down_read_preempt_disable(&file_rwsem);
++	percpu_down_read(&file_rwsem);
+ 	spin_lock(&ctx->flc_lock);
+ 
+ 	time_out_leases(inode, &dispose);
+@@ -1512,13 +1512,13 @@ int __break_lease(struct inode *inode, u
+ 	locks_insert_block(fl, new_fl);
+ 	trace_break_lease_block(inode, new_fl);
+ 	spin_unlock(&ctx->flc_lock);
+-	percpu_up_read_preempt_enable(&file_rwsem);
++	percpu_up_read(&file_rwsem);
+ 
+ 	locks_dispose_list(&dispose);
+ 	error = wait_event_interruptible_timeout(new_fl->fl_wait,
+ 						!new_fl->fl_next, break_time);
+ 
+-	percpu_down_read_preempt_disable(&file_rwsem);
++	percpu_down_read(&file_rwsem);
+ 	spin_lock(&ctx->flc_lock);
+ 	trace_break_lease_unblock(inode, new_fl);
+ 	locks_delete_block(new_fl);
+@@ -1535,7 +1535,7 @@ int __break_lease(struct inode *inode, u
+ 	}
+ out:
+ 	spin_unlock(&ctx->flc_lock);
+-	percpu_up_read_preempt_enable(&file_rwsem);
++	percpu_up_read(&file_rwsem);
+ 	locks_dispose_list(&dispose);
+ 	locks_free_lock(new_fl);
+ 	return error;
+@@ -1609,7 +1609,7 @@ int fcntl_getlease(struct file *filp)
+ 
+ 	ctx = smp_load_acquire(&inode->i_flctx);
+ 	if (ctx && !list_empty_careful(&ctx->flc_lease)) {
+-		percpu_down_read_preempt_disable(&file_rwsem);
++		percpu_down_read(&file_rwsem);
+ 		spin_lock(&ctx->flc_lock);
+ 		time_out_leases(inode, &dispose);
+ 		list_for_each_entry(fl, &ctx->flc_lease, fl_list) {
+@@ -1619,7 +1619,7 @@ int fcntl_getlease(struct file *filp)
+ 			break;
+ 		}
+ 		spin_unlock(&ctx->flc_lock);
+-		percpu_up_read_preempt_enable(&file_rwsem);
++		percpu_up_read(&file_rwsem);
+ 
+ 		locks_dispose_list(&dispose);
+ 	}
+@@ -1694,7 +1694,7 @@ generic_add_lease(struct file *filp, lon
+ 		return -EINVAL;
+ 	}
+ 
+-	percpu_down_read_preempt_disable(&file_rwsem);
++	percpu_down_read(&file_rwsem);
+ 	spin_lock(&ctx->flc_lock);
+ 	time_out_leases(inode, &dispose);
+ 	error = check_conflicting_open(dentry, arg, lease->fl_flags);
+@@ -1765,7 +1765,7 @@ generic_add_lease(struct file *filp, lon
+ 		lease->fl_lmops->lm_setup(lease, priv);
+ out:
+ 	spin_unlock(&ctx->flc_lock);
+-	percpu_up_read_preempt_enable(&file_rwsem);
++	percpu_up_read(&file_rwsem);
+ 	locks_dispose_list(&dispose);
+ 	if (is_deleg)
+ 		inode_unlock(inode);
+@@ -1788,7 +1788,7 @@ static int generic_delete_lease(struct f
+ 		return error;
+ 	}
+ 
+-	percpu_down_read_preempt_disable(&file_rwsem);
++	percpu_down_read(&file_rwsem);
+ 	spin_lock(&ctx->flc_lock);
+ 	list_for_each_entry(fl, &ctx->flc_lease, fl_list) {
+ 		if (fl->fl_file == filp &&
+@@ -1801,7 +1801,7 @@ static int generic_delete_lease(struct f
+ 	if (victim)
+ 		error = fl->fl_lmops->lm_change(victim, F_UNLCK, &dispose);
+ 	spin_unlock(&ctx->flc_lock);
+-	percpu_up_read_preempt_enable(&file_rwsem);
++	percpu_up_read(&file_rwsem);
+ 	locks_dispose_list(&dispose);
+ 	return error;
+ }
+@@ -2532,13 +2532,13 @@ locks_remove_lease(struct file *filp, st
+ 	if (list_empty(&ctx->flc_lease))
+ 		return;
+ 
+-	percpu_down_read_preempt_disable(&file_rwsem);
++	percpu_down_read(&file_rwsem);
+ 	spin_lock(&ctx->flc_lock);
+ 	list_for_each_entry_safe(fl, tmp, &ctx->flc_lease, fl_list)
+ 		if (filp == fl->fl_file)
+ 			lease_modify(fl, F_UNLCK, &dispose);
+ 	spin_unlock(&ctx->flc_lock);
+-	percpu_up_read_preempt_enable(&file_rwsem);
++	percpu_up_read(&file_rwsem);
+ 
+ 	locks_dispose_list(&dispose);
+ }
+--- a/include/linux/percpu-rwsem.h
++++ b/include/linux/percpu-rwsem.h
+@@ -28,7 +28,7 @@ static struct percpu_rw_semaphore name =
+ extern int __percpu_down_read(struct percpu_rw_semaphore *, int);
+ extern void __percpu_up_read(struct percpu_rw_semaphore *);
+ 
+-static inline void percpu_down_read_preempt_disable(struct percpu_rw_semaphore *sem)
++static inline void percpu_down_read(struct percpu_rw_semaphore *sem)
+ {
+ 	might_sleep();
+ 
+@@ -46,16 +46,10 @@ static inline void percpu_down_read_pree
+ 	__this_cpu_inc(*sem->read_count);
+ 	if (unlikely(!rcu_sync_is_idle(&sem->rss)))
+ 		__percpu_down_read(sem, false); /* Unconditional memory barrier */
+-	barrier();
+ 	/*
+-	 * The barrier() prevents the compiler from
++	 * The preempt_enable() prevents the compiler from
+ 	 * bleeding the critical section out.
+ 	 */
+-}
+-
+-static inline void percpu_down_read(struct percpu_rw_semaphore *sem)
+-{
+-	percpu_down_read_preempt_disable(sem);
+ 	preempt_enable();
+ }
+ 
+@@ -82,13 +76,9 @@ static inline int percpu_down_read_trylo
+ 	return ret;
+ }
+ 
+-static inline void percpu_up_read_preempt_enable(struct percpu_rw_semaphore *sem)
++static inline void percpu_up_read(struct percpu_rw_semaphore *sem)
+ {
+-	/*
+-	 * The barrier() prevents the compiler from
+-	 * bleeding the critical section out.
+-	 */
+-	barrier();
++	preempt_disable();
+ 	/*
+ 	 * Same as in percpu_down_read().
+ 	 */
+@@ -101,12 +91,6 @@ static inline void percpu_up_read_preemp
+ 	rwsem_release(&sem->rw_sem.dep_map, 1, _RET_IP_);
+ }
+ 
+-static inline void percpu_up_read(struct percpu_rw_semaphore *sem)
+-{
+-	preempt_disable();
+-	percpu_up_read_preempt_enable(sem);
+-}
+-
+ extern void percpu_down_write(struct percpu_rw_semaphore *);
+ extern void percpu_up_write(struct percpu_rw_semaphore *);
+ 
diff --git a/patches/ping-sysrq.patch b/patches/ping-sysrq.patch
index f1d81bf..c337982 100644
--- a/patches/ping-sysrq.patch
+++ b/patches/ping-sysrq.patch
@@ -42,7 +42,7 @@
  'b'     - Will immediately reboot the system without syncing or unmounting
 --- a/include/net/netns/ipv4.h
 +++ b/include/net/netns/ipv4.h
-@@ -70,6 +70,7 @@ struct netns_ipv4 {
+@@ -69,6 +69,7 @@ struct netns_ipv4 {
  
  	int sysctl_icmp_echo_ignore_all;
  	int sysctl_icmp_echo_ignore_broadcasts;
diff --git a/patches/posix-timers-thread-posix-cpu-timers-on-rt.patch b/patches/posix-timers-thread-posix-cpu-timers-on-rt.patch
index 74db6d2..9aa01ce 100644
--- a/patches/posix-timers-thread-posix-cpu-timers-on-rt.patch
+++ b/patches/posix-timers-thread-posix-cpu-timers-on-rt.patch
@@ -19,7 +19,7 @@
 
 --- a/include/linux/init_task.h
 +++ b/include/linux/init_task.h
-@@ -148,6 +148,12 @@ extern struct task_group root_task_group
+@@ -150,6 +150,12 @@ extern struct task_group root_task_group
  # define INIT_PERF_EVENTS(tsk)
  #endif
  
@@ -32,7 +32,7 @@
  #ifdef CONFIG_VIRT_CPU_ACCOUNTING_GEN
  # define INIT_VTIME(tsk)						\
  	.vtime_seqcount = SEQCNT_ZERO(tsk.vtime_seqcount),	\
-@@ -239,6 +245,7 @@ extern struct task_group root_task_group
+@@ -250,6 +256,7 @@ extern struct task_group root_task_group
  	.cpu_timers	= INIT_CPU_TIMERS(tsk.cpu_timers),		\
  	.pi_lock	= __RAW_SPIN_LOCK_UNLOCKED(tsk.pi_lock),	\
  	.timer_slack_ns = 50000, /* 50 usec default slack */		\
@@ -42,7 +42,7 @@
  		[PIDTYPE_PGID] = INIT_PID_LINK(PIDTYPE_PGID),		\
 --- a/include/linux/sched.h
 +++ b/include/linux/sched.h
-@@ -1635,6 +1635,9 @@ struct task_struct {
+@@ -1660,6 +1660,9 @@ struct task_struct {
  
  	struct task_cputime cputime_expires;
  	struct list_head cpu_timers[3];
@@ -54,7 +54,7 @@
  	const struct cred __rcu *real_cred; /* objective and real subjective task
 --- a/kernel/fork.c
 +++ b/kernel/fork.c
-@@ -1274,6 +1274,9 @@ static void rt_mutex_init_task(struct ta
+@@ -1423,6 +1423,9 @@ static void rt_mutex_init_task(struct ta
   */
  static void posix_cpu_timers_init(struct task_struct *tsk)
  {
diff --git a/patches/power-disable-highmem-on-rt.patch b/patches/power-disable-highmem-on-rt.patch
index 01feea4..3141f5b 100644
--- a/patches/power-disable-highmem-on-rt.patch
+++ b/patches/power-disable-highmem-on-rt.patch
@@ -11,7 +11,7 @@
 
 --- a/arch/powerpc/Kconfig
 +++ b/arch/powerpc/Kconfig
-@@ -327,7 +327,7 @@ menu "Kernel options"
+@@ -322,7 +322,7 @@ menu "Kernel options"
  
  config HIGHMEM
  	bool "High memory support"
diff --git a/patches/power-use-generic-rwsem-on-rt.patch b/patches/power-use-generic-rwsem-on-rt.patch
index 2027a69..6e83ca9 100644
--- a/patches/power-use-generic-rwsem-on-rt.patch
+++ b/patches/power-use-generic-rwsem-on-rt.patch
@@ -11,7 +11,7 @@
 
 --- a/arch/powerpc/Kconfig
 +++ b/arch/powerpc/Kconfig
-@@ -57,10 +57,11 @@ config LOCKDEP_SUPPORT
+@@ -52,10 +52,11 @@ config LOCKDEP_SUPPORT
  
  config RWSEM_GENERIC_SPINLOCK
  	bool
diff --git a/patches/powerpc-kvm-Disable-in-kernel-MPIC-emulation-for-PRE.patch b/patches/powerpc-kvm-Disable-in-kernel-MPIC-emulation-for-PRE.patch
index c9da582..e50543c 100644
--- a/patches/powerpc-kvm-Disable-in-kernel-MPIC-emulation-for-PRE.patch
+++ b/patches/powerpc-kvm-Disable-in-kernel-MPIC-emulation-for-PRE.patch
@@ -27,7 +27,7 @@
 
 --- a/arch/powerpc/kvm/Kconfig
 +++ b/arch/powerpc/kvm/Kconfig
-@@ -172,6 +172,7 @@ config KVM_E500MC
+@@ -175,6 +175,7 @@ config KVM_E500MC
  config KVM_MPIC
  	bool "KVM in-kernel MPIC emulation"
  	depends on KVM && E500
diff --git a/patches/powerpc-preempt-lazy-support.patch b/patches/powerpc-preempt-lazy-support.patch
index d0ab226..09aba56 100644
--- a/patches/powerpc-preempt-lazy-support.patch
+++ b/patches/powerpc-preempt-lazy-support.patch
@@ -15,7 +15,7 @@
 
 --- a/arch/powerpc/Kconfig
 +++ b/arch/powerpc/Kconfig
-@@ -141,6 +141,7 @@ config PPC
+@@ -135,6 +135,7 @@ config PPC
  	select ARCH_HAS_TICK_BROADCAST if GENERIC_CLOCKEVENTS_BROADCAST
  	select GENERIC_STRNCPY_FROM_USER
  	select GENERIC_STRNLEN_USER
@@ -133,7 +133,7 @@
  	beq	restore_user
 --- a/arch/powerpc/kernel/entry_64.S
 +++ b/arch/powerpc/kernel/entry_64.S
-@@ -657,7 +657,7 @@ END_MMU_FTR_SECTION_IFSET(MMU_FTR_1T_SEG
+@@ -656,7 +656,7 @@ END_MMU_FTR_SECTION_IFSET(MMU_FTR_1T_SEG
  	bl	restore_math
  	b	restore
  #endif
@@ -142,7 +142,7 @@
  	beq	2f
  	bl	restore_interrupts
  	SCHEDULE_USER
-@@ -719,10 +719,18 @@ END_MMU_FTR_SECTION_IFSET(MMU_FTR_1T_SEG
+@@ -718,10 +718,18 @@ END_MMU_FTR_SECTION_IFSET(MMU_FTR_1T_SEG
  
  #ifdef CONFIG_PREEMPT
  	/* Check if we need to preempt */
@@ -162,7 +162,7 @@
  	cmpwi	cr1,r8,0
  	ld	r0,SOFTE(r1)
  	cmpdi	r0,0
-@@ -739,7 +747,7 @@ END_MMU_FTR_SECTION_IFSET(MMU_FTR_1T_SEG
+@@ -738,7 +746,7 @@ END_MMU_FTR_SECTION_IFSET(MMU_FTR_1T_SEG
  	/* Re-test flags and eventually loop */
  	CURRENT_THREAD_INFO(r9, r1)
  	ld	r4,TI_FLAGS(r9)
diff --git a/patches/preempt-lazy-support.patch b/patches/preempt-lazy-support.patch
index 4648464..4a968d4 100644
--- a/patches/preempt-lazy-support.patch
+++ b/patches/preempt-lazy-support.patch
@@ -127,7 +127,7 @@
  
 --- a/include/linux/sched.h
 +++ b/include/linux/sched.h
-@@ -3238,6 +3238,43 @@ static inline int test_tsk_need_resched(
+@@ -3342,6 +3342,43 @@ static inline int test_tsk_need_resched(
  	return unlikely(test_tsk_thread_flag(tsk,TIF_NEED_RESCHED));
  }
  
@@ -173,7 +173,7 @@
  	set_tsk_thread_flag(current, TIF_SIGPENDING);
 --- a/include/linux/thread_info.h
 +++ b/include/linux/thread_info.h
-@@ -103,7 +103,17 @@ static inline int test_ti_thread_flag(st
+@@ -107,7 +107,17 @@ static inline int test_ti_thread_flag(st
  #define test_thread_flag(flag) \
  	test_ti_thread_flag(current_thread_info(), flag)
  
@@ -258,7 +258,7 @@
  void resched_cpu(int cpu)
  {
  	struct rq *rq = cpu_rq(cpu);
-@@ -2522,6 +2554,9 @@ int sched_fork(unsigned long clone_flags
+@@ -2531,6 +2563,9 @@ int sched_fork(unsigned long clone_flags
  	p->on_cpu = 0;
  #endif
  	init_task_preempt_count(p);
@@ -268,7 +268,7 @@
  #ifdef CONFIG_SMP
  	plist_node_init(&p->pushable_tasks, MAX_PRIO);
  	RB_CLEAR_NODE(&p->pushable_dl_tasks);
-@@ -3356,6 +3391,7 @@ void migrate_disable(void)
+@@ -3362,6 +3397,7 @@ void migrate_disable(void)
  	}
  
  	preempt_disable();
@@ -276,7 +276,7 @@
  	pin_current_cpu();
  	p->migrate_disable = 1;
  	preempt_enable();
-@@ -3395,6 +3431,7 @@ void migrate_enable(void)
+@@ -3401,6 +3437,7 @@ void migrate_enable(void)
  
  	unpin_current_cpu();
  	preempt_enable();
@@ -284,7 +284,7 @@
  }
  EXPORT_SYMBOL(migrate_enable);
  #endif
-@@ -3535,6 +3572,7 @@ static void __sched notrace __schedule(b
+@@ -3530,6 +3567,7 @@ static void __sched notrace __schedule(b
  
  	next = pick_next_task(rq, prev, cookie);
  	clear_tsk_need_resched(prev);
@@ -292,7 +292,7 @@
  	clear_preempt_need_resched();
  	rq->clock_skip_update = 0;
  
-@@ -3654,6 +3692,30 @@ static void __sched notrace preempt_sche
+@@ -3675,6 +3713,30 @@ static void __sched notrace preempt_sche
  	} while (need_resched());
  }
  
@@ -323,7 +323,7 @@
  #ifdef CONFIG_PREEMPT
  /*
   * this is the entry point to schedule() from in-kernel preemption
-@@ -3668,7 +3730,8 @@ asmlinkage __visible void __sched notrac
+@@ -3689,7 +3751,8 @@ asmlinkage __visible void __sched notrac
  	 */
  	if (likely(!preemptible()))
  		return;
@@ -333,7 +333,7 @@
  	preempt_schedule_common();
  }
  NOKPROBE_SYMBOL(preempt_schedule);
-@@ -3695,6 +3758,9 @@ asmlinkage __visible void __sched notrac
+@@ -3716,6 +3779,9 @@ asmlinkage __visible void __sched notrac
  	if (likely(!preemptible()))
  		return;
  
@@ -343,7 +343,7 @@
  	do {
  		/*
  		 * Because the function tracer can trace preempt_count_sub()
-@@ -5458,7 +5524,9 @@ void init_idle(struct task_struct *idle,
+@@ -5481,7 +5547,9 @@ void init_idle(struct task_struct *idle,
  
  	/* Set the preempt count _outside_ the spinlocks! */
  	init_idle_preempt_count(idle, cpu);
@@ -356,7 +356,7 @@
  	 */
 --- a/kernel/sched/fair.c
 +++ b/kernel/sched/fair.c
-@@ -3508,7 +3508,7 @@ check_preempt_tick(struct cfs_rq *cfs_rq
+@@ -3518,7 +3518,7 @@ check_preempt_tick(struct cfs_rq *cfs_rq
  	ideal_runtime = sched_slice(cfs_rq, curr);
  	delta_exec = curr->sum_exec_runtime - curr->prev_sum_exec_runtime;
  	if (delta_exec > ideal_runtime) {
@@ -365,7 +365,7 @@
  		/*
  		 * The current task ran long enough, ensure it doesn't get
  		 * re-elected due to buddy favours.
-@@ -3532,7 +3532,7 @@ check_preempt_tick(struct cfs_rq *cfs_rq
+@@ -3542,7 +3542,7 @@ check_preempt_tick(struct cfs_rq *cfs_rq
  		return;
  
  	if (delta > ideal_runtime)
@@ -374,7 +374,7 @@
  }
  
  static void
-@@ -3677,7 +3677,7 @@ entity_tick(struct cfs_rq *cfs_rq, struc
+@@ -3684,7 +3684,7 @@ entity_tick(struct cfs_rq *cfs_rq, struc
  	 * validating it and just reschedule.
  	 */
  	if (queued) {
@@ -383,7 +383,7 @@
  		return;
  	}
  	/*
-@@ -3859,7 +3859,7 @@ static void __account_cfs_rq_runtime(str
+@@ -3866,7 +3866,7 @@ static void __account_cfs_rq_runtime(str
  	 * hierarchy can be throttled
  	 */
  	if (!assign_cfs_rq_runtime(cfs_rq) && likely(cfs_rq->curr))
@@ -392,7 +392,7 @@
  }
  
  static __always_inline
-@@ -4487,7 +4487,7 @@ static void hrtick_start_fair(struct rq
+@@ -4494,7 +4494,7 @@ static void hrtick_start_fair(struct rq
  
  		if (delta < 0) {
  			if (rq->curr == p)
@@ -401,7 +401,7 @@
  			return;
  		}
  		hrtick_start(rq, delta);
-@@ -5676,7 +5676,7 @@ static void check_preempt_wakeup(struct
+@@ -5905,7 +5905,7 @@ static void check_preempt_wakeup(struct
  	return;
  
  preempt:
@@ -410,7 +410,7 @@
  	/*
  	 * Only set the backward buddy when the current task is still
  	 * on the rq. This can happen when a wakeup gets interleaved
-@@ -8402,7 +8402,7 @@ static void task_fork_fair(struct task_s
+@@ -8631,7 +8631,7 @@ static void task_fork_fair(struct task_s
  		 * 'current' within the tree based on its new key value.
  		 */
  		swap(curr->vruntime, se->vruntime);
@@ -419,7 +419,7 @@
  	}
  
  	se->vruntime -= cfs_rq->min_vruntime;
-@@ -8426,7 +8426,7 @@ prio_changed_fair(struct rq *rq, struct
+@@ -8655,7 +8655,7 @@ prio_changed_fair(struct rq *rq, struct
  	 */
  	if (rq->curr == p) {
  		if (p->prio > oldprio)
@@ -442,7 +442,7 @@
  /*
 --- a/kernel/sched/sched.h
 +++ b/kernel/sched/sched.h
-@@ -1317,6 +1317,15 @@ extern void init_sched_fair_class(void);
+@@ -1347,6 +1347,15 @@ extern void init_sched_fair_class(void);
  extern void resched_curr(struct rq *rq);
  extern void resched_cpu(int cpu);
  
@@ -527,7 +527,7 @@
  void
 --- a/kernel/trace/trace.h
 +++ b/kernel/trace/trace.h
-@@ -123,6 +123,7 @@ struct kretprobe_trace_entry_head {
+@@ -124,6 +124,7 @@ struct kretprobe_trace_entry_head {
   *  NEED_RESCHED	- reschedule is requested
   *  HARDIRQ		- inside an interrupt handler
   *  SOFTIRQ		- inside a softirq handler
@@ -535,7 +535,7 @@
   */
  enum trace_flag_type {
  	TRACE_FLAG_IRQS_OFF		= 0x01,
-@@ -132,6 +133,7 @@ enum trace_flag_type {
+@@ -133,6 +134,7 @@ enum trace_flag_type {
  	TRACE_FLAG_SOFTIRQ		= 0x10,
  	TRACE_FLAG_PREEMPT_RESCHED	= 0x20,
  	TRACE_FLAG_NMI			= 0x40,
diff --git a/patches/printk-kill.patch b/patches/printk-kill.patch
index 177b483..4d47410 100644
--- a/patches/printk-kill.patch
+++ b/patches/printk-kill.patch
@@ -14,7 +14,7 @@
 
 --- a/include/linux/printk.h
 +++ b/include/linux/printk.h
-@@ -125,9 +125,11 @@ struct va_format {
+@@ -126,9 +126,11 @@ struct va_format {
  #ifdef CONFIG_EARLY_PRINTK
  extern asmlinkage __printf(1, 2)
  void early_printk(const char *fmt, ...);
@@ -87,7 +87,7 @@
  #ifdef CONFIG_PRINTK
  DECLARE_WAIT_QUEUE_HEAD(log_wait);
  /* the next printk record to read by syslog(READ) or /proc/kmsg */
-@@ -1750,6 +1802,13 @@ asmlinkage int vprintk_emit(int facility
+@@ -1781,6 +1833,13 @@ asmlinkage int vprintk_emit(int facility
  	/* cpu currently holding logbuf_lock in this function */
  	static unsigned int logbuf_cpu = UINT_MAX;
  
@@ -101,7 +101,7 @@
  	if (level == LOGLEVEL_SCHED) {
  		level = LOGLEVEL_DEFAULT;
  		in_sched = true;
-@@ -2023,26 +2082,6 @@ DEFINE_PER_CPU(printk_func_t, printk_fun
+@@ -2014,26 +2073,6 @@ DEFINE_PER_CPU(printk_func_t, printk_fun
  
  #endif /* CONFIG_PRINTK */
  
diff --git a/patches/printk-rt-aware.patch b/patches/printk-rt-aware.patch
index e376f29..88f73f7 100644
--- a/patches/printk-rt-aware.patch
+++ b/patches/printk-rt-aware.patch
@@ -12,7 +12,7 @@
 
 --- a/kernel/printk/printk.c
 +++ b/kernel/printk/printk.c
-@@ -1631,6 +1631,7 @@ static void call_console_drivers(int lev
+@@ -1628,6 +1628,7 @@ static void call_console_drivers(int lev
  	if (!console_drivers)
  		return;
  
@@ -20,7 +20,7 @@
  	for_each_console(con) {
  		if (exclusive_console && con != exclusive_console)
  			continue;
-@@ -1646,6 +1647,7 @@ static void call_console_drivers(int lev
+@@ -1643,6 +1644,7 @@ static void call_console_drivers(int lev
  		else
  			con->write(con, text, len);
  	}
@@ -28,7 +28,7 @@
  }
  
  /*
-@@ -1960,13 +1962,23 @@ asmlinkage int vprintk_emit(int facility
+@@ -1951,13 +1953,23 @@ asmlinkage int vprintk_emit(int facility
  
  	/* If called from the scheduler, we can not call up(). */
  	if (!in_sched) {
@@ -53,7 +53,7 @@
  			console_unlock();
  		lockdep_on();
  	}
-@@ -2358,11 +2370,16 @@ static void console_cont_flush(char *tex
+@@ -2349,11 +2361,16 @@ static void console_cont_flush(char *tex
  		goto out;
  
  	len = cont_print_text(text, size);
@@ -70,7 +70,7 @@
  	return;
  out:
  	raw_spin_unlock_irqrestore(&logbuf_lock, flags);
-@@ -2486,13 +2503,17 @@ void console_unlock(void)
+@@ -2477,13 +2494,17 @@ void console_unlock(void)
  		console_idx = log_next(console_idx);
  		console_seq++;
  		console_prev = msg->flags;
diff --git a/patches/ptrace-fix-ptrace-vs-tasklist_lock-race.patch b/patches/ptrace-fix-ptrace-vs-tasklist_lock-race.patch
index 9fff059..4bfc71e 100644
--- a/patches/ptrace-fix-ptrace-vs-tasklist_lock-race.patch
+++ b/patches/ptrace-fix-ptrace-vs-tasklist_lock-race.patch
@@ -41,7 +41,7 @@
  #define task_contributes_to_load(task)	\
  				((task->state & TASK_UNINTERRUPTIBLE) != 0 && \
  				 (task->flags & PF_FROZEN) == 0 && \
-@@ -3255,6 +3252,51 @@ static inline int signal_pending_state(l
+@@ -3359,6 +3356,51 @@ static inline int signal_pending_state(l
  	return (state & TASK_INTERRUPTIBLE) || __fatal_signal_pending(p);
  }
  
@@ -95,7 +95,7 @@
   * explicit rescheduling in places that are safe. The return
 --- a/kernel/ptrace.c
 +++ b/kernel/ptrace.c
-@@ -128,7 +128,14 @@ static bool ptrace_freeze_traced(struct
+@@ -130,7 +130,14 @@ static bool ptrace_freeze_traced(struct
  
  	spin_lock_irq(&task->sighand->siglock);
  	if (task_is_traced(task) && !__fatal_signal_pending(task)) {
@@ -113,7 +113,7 @@
  	spin_unlock_irq(&task->sighand->siglock);
 --- a/kernel/sched/core.c
 +++ b/kernel/sched/core.c
-@@ -1373,6 +1373,18 @@ int migrate_swap(struct task_struct *cur
+@@ -1384,6 +1384,18 @@ int migrate_swap(struct task_struct *cur
  	return ret;
  }
  
@@ -132,7 +132,7 @@
  /*
   * wait_task_inactive - wait for a thread to unschedule.
   *
-@@ -1417,7 +1429,7 @@ unsigned long wait_task_inactive(struct
+@@ -1428,7 +1440,7 @@ unsigned long wait_task_inactive(struct
  		 * is actually now running somewhere else!
  		 */
  		while (task_running(rq, p)) {
@@ -141,7 +141,7 @@
  				return 0;
  			cpu_relax();
  		}
-@@ -1432,7 +1444,8 @@ unsigned long wait_task_inactive(struct
+@@ -1443,7 +1455,8 @@ unsigned long wait_task_inactive(struct
  		running = task_running(rq, p);
  		queued = task_on_rq_queued(p);
  		ncsw = 0;
diff --git a/patches/random-make-it-work-on-rt.patch b/patches/random-make-it-work-on-rt.patch
index 8683445..8e66a85 100644
--- a/patches/random-make-it-work-on-rt.patch
+++ b/patches/random-make-it-work-on-rt.patch
@@ -76,7 +76,7 @@
  
 --- a/include/linux/irqdesc.h
 +++ b/include/linux/irqdesc.h
-@@ -64,6 +64,7 @@ struct irq_desc {
+@@ -66,6 +66,7 @@ struct irq_desc {
  	unsigned int		irqs_unhandled;
  	atomic_t		threads_handled;
  	int			threads_handled_last;
@@ -86,12 +86,12 @@
  	const struct cpumask	*percpu_affinity;
 --- a/include/linux/random.h
 +++ b/include/linux/random.h
-@@ -20,7 +20,7 @@ struct random_ready_callback {
- extern void add_device_randomness(const void *, unsigned int);
+@@ -31,7 +31,7 @@ static inline void add_latent_entropy(vo
+ 
  extern void add_input_randomness(unsigned int type, unsigned int code,
- 				 unsigned int value);
--extern void add_interrupt_randomness(int irq, int irq_flags);
-+extern void add_interrupt_randomness(int irq, int irq_flags, __u64 ip);
+ 				 unsigned int value) __latent_entropy;
+-extern void add_interrupt_randomness(int irq, int irq_flags) __latent_entropy;
++extern void add_interrupt_randomness(int irq, int irq_flags, __u64 ip) __latent_entropy;
  
  extern void get_random_bytes(void *buf, int nbytes);
  extern int add_random_ready_callback(struct random_ready_callback *rdy);
diff --git a/patches/rcu-Eliminate-softirq-processing-from-rcutree.patch b/patches/rcu-Eliminate-softirq-processing-from-rcutree.patch
index 6b3df0d..b50477d 100644
--- a/patches/rcu-Eliminate-softirq-processing-from-rcutree.patch
+++ b/patches/rcu-Eliminate-softirq-processing-from-rcutree.patch
@@ -23,7 +23,7 @@
 
 --- a/kernel/rcu/tree.c
 +++ b/kernel/rcu/tree.c
-@@ -56,6 +56,11 @@
+@@ -55,6 +55,11 @@
  #include <linux/random.h>
  #include <linux/trace_events.h>
  #include <linux/suspend.h>
@@ -39,8 +39,8 @@
  /*
   * Do RCU core processing for the current CPU.
   */
--static void rcu_process_callbacks(struct softirq_action *unused)
-+static void rcu_process_callbacks(void)
+-static __latent_entropy void rcu_process_callbacks(struct softirq_action *unused)
++static __latent_entropy void rcu_process_callbacks(void)
  {
  	struct rcu_state *rsp;
  
@@ -168,7 +168,7 @@
  
  /*
   * Handle any core-RCU processing required by a call_rcu() invocation.
-@@ -4237,7 +4328,6 @@ void __init rcu_init(void)
+@@ -4261,7 +4352,6 @@ void __init rcu_init(void)
  	if (dump_tree)
  		rcu_dump_rcu_node_tree(&rcu_sched_state);
  	__rcu_init_preempt();
@@ -178,7 +178,7 @@
  	 * We don't need protection against CPU-hotplug here because
 --- a/kernel/rcu/tree.h
 +++ b/kernel/rcu/tree.h
-@@ -595,12 +595,10 @@ extern struct rcu_state rcu_bh_state;
+@@ -596,12 +596,10 @@ extern struct rcu_state rcu_bh_state;
  extern struct rcu_state rcu_preempt_state;
  #endif /* #ifdef CONFIG_PREEMPT_RCU */
  
@@ -191,7 +191,7 @@
  
  #ifndef RCU_TREE_NONCORE
  
-@@ -620,10 +618,9 @@ void call_rcu(struct rcu_head *head, rcu
+@@ -621,10 +619,9 @@ void call_rcu(struct rcu_head *head, rcu
  static void __init __rcu_init_preempt(void);
  static void rcu_initiate_boost(struct rcu_node *rnp, unsigned long flags);
  static void rcu_preempt_boost_start_gp(struct rcu_node *rnp);
diff --git a/patches/rcu-disable-rcu-fast-no-hz-on-rt.patch b/patches/rcu-disable-rcu-fast-no-hz-on-rt.patch
index 768a8d4..b9f24c8 100644
--- a/patches/rcu-disable-rcu-fast-no-hz-on-rt.patch
+++ b/patches/rcu-disable-rcu-fast-no-hz-on-rt.patch
@@ -13,7 +13,7 @@
 
 --- a/init/Kconfig
 +++ b/init/Kconfig
-@@ -613,7 +613,7 @@ config RCU_FANOUT_LEAF
+@@ -623,7 +623,7 @@ config RCU_FANOUT_LEAF
  
  config RCU_FAST_NO_HZ
  	bool "Accelerate last non-dyntick-idle CPU's grace periods"
diff --git a/patches/rcu-enable-rcu_normal_after_boot-by-default-for-RT.patch b/patches/rcu-enable-rcu_normal_after_boot-by-default-for-RT.patch
index 9f3b4c4..59a7dd8 100644
--- a/patches/rcu-enable-rcu_normal_after_boot-by-default-for-RT.patch
+++ b/patches/rcu-enable-rcu_normal_after_boot-by-default-for-RT.patch
@@ -18,7 +18,7 @@
 
 --- a/kernel/rcu/update.c
 +++ b/kernel/rcu/update.c
-@@ -63,7 +63,7 @@ MODULE_ALIAS("rcupdate");
+@@ -62,7 +62,7 @@
  #ifndef CONFIG_TINY_RCU
  module_param(rcu_expedited, int, 0);
  module_param(rcu_normal, int, 0);
diff --git a/patches/rcu-make-RCU_BOOST-default-on-RT.patch b/patches/rcu-make-RCU_BOOST-default-on-RT.patch
index d7e3e6d..27053c1 100644
--- a/patches/rcu-make-RCU_BOOST-default-on-RT.patch
+++ b/patches/rcu-make-RCU_BOOST-default-on-RT.patch
@@ -14,7 +14,7 @@
 
 --- a/init/Kconfig
 +++ b/init/Kconfig
-@@ -496,7 +496,7 @@ config TINY_RCU
+@@ -506,7 +506,7 @@ config TINY_RCU
  
  config RCU_EXPERT
  	bool "Make expert-level adjustments to RCU configuration"
@@ -23,7 +23,7 @@
  	help
  	  This option needs to be enabled if you wish to make
  	  expert-level adjustments to RCU configuration.  By default,
-@@ -640,7 +640,7 @@ config TREE_RCU_TRACE
+@@ -650,7 +650,7 @@ config TREE_RCU_TRACE
  config RCU_BOOST
  	bool "Enable RCU priority boosting"
  	depends on RT_MUTEXES && PREEMPT_RCU && RCU_EXPERT
diff --git a/patches/rcu-merge-rcu-bh-into-rcu-preempt-for-rt.patch b/patches/rcu-merge-rcu-bh-into-rcu-preempt-for-rt.patch
index 4c35a15..5af9b28 100644
--- a/patches/rcu-merge-rcu-bh-into-rcu-preempt-for-rt.patch
+++ b/patches/rcu-merge-rcu-bh-into-rcu-preempt-for-rt.patch
@@ -63,8 +63,8 @@
 +#endif
  void rcu_check_callbacks(int user);
  void rcu_report_dead(unsigned int cpu);
- 
-@@ -508,7 +516,14 @@ extern struct lockdep_map rcu_callback_m
+ void rcu_cpu_starting(unsigned int cpu);
+@@ -509,7 +517,14 @@ extern struct lockdep_map rcu_callback_m
  int debug_lockdep_rcu_enabled(void);
  
  int rcu_read_lock_held(void);
@@ -79,7 +79,7 @@
  
  /**
   * rcu_read_lock_sched_held() - might we be in RCU-sched read-side critical section?
-@@ -906,10 +921,14 @@ static inline void rcu_read_unlock(void)
+@@ -907,10 +922,14 @@ static inline void rcu_read_unlock(void)
  static inline void rcu_read_lock_bh(void)
  {
  	local_bh_disable();
@@ -94,7 +94,7 @@
  }
  
  /*
-@@ -919,10 +938,14 @@ static inline void rcu_read_lock_bh(void
+@@ -920,10 +939,14 @@ static inline void rcu_read_lock_bh(void
   */
  static inline void rcu_read_unlock_bh(void)
  {
@@ -195,7 +195,7 @@
   * The names includes "busted", and they really means it!
 --- a/kernel/rcu/tree.c
 +++ b/kernel/rcu/tree.c
-@@ -259,6 +259,7 @@ void rcu_sched_qs(void)
+@@ -257,6 +257,7 @@ void rcu_sched_qs(void)
  			   this_cpu_ptr(&rcu_sched_data), true);
  }
  
@@ -203,7 +203,7 @@
  void rcu_bh_qs(void)
  {
  	if (__this_cpu_read(rcu_bh_data.cpu_no_qs.s)) {
-@@ -268,6 +269,7 @@ void rcu_bh_qs(void)
+@@ -266,6 +267,7 @@ void rcu_bh_qs(void)
  		__this_cpu_write(rcu_bh_data.cpu_no_qs.b.norm, false);
  	}
  }
@@ -211,7 +211,7 @@
  
  static DEFINE_PER_CPU(int, rcu_sched_qs_mask);
  
-@@ -448,11 +450,13 @@ EXPORT_SYMBOL_GPL(rcu_batches_started_sc
+@@ -446,11 +448,13 @@ EXPORT_SYMBOL_GPL(rcu_batches_started_sc
  /*
   * Return the number of RCU BH batches started thus far for debug & stats.
   */
@@ -225,7 +225,7 @@
  
  /*
   * Return the number of RCU batches completed thus far for debug & stats.
-@@ -472,6 +476,7 @@ unsigned long rcu_batches_completed_sche
+@@ -470,6 +474,7 @@ unsigned long rcu_batches_completed_sche
  }
  EXPORT_SYMBOL_GPL(rcu_batches_completed_sched);
  
@@ -233,7 +233,7 @@
  /*
   * Return the number of RCU BH batches completed thus far for debug & stats.
   */
-@@ -480,6 +485,7 @@ unsigned long rcu_batches_completed_bh(v
+@@ -478,6 +483,7 @@ unsigned long rcu_batches_completed_bh(v
  	return rcu_bh_state.completed;
  }
  EXPORT_SYMBOL_GPL(rcu_batches_completed_bh);
@@ -241,7 +241,7 @@
  
  /*
   * Return the number of RCU expedited batches completed thus far for
-@@ -503,6 +509,7 @@ unsigned long rcu_exp_batches_completed_
+@@ -501,6 +507,7 @@ unsigned long rcu_exp_batches_completed_
  }
  EXPORT_SYMBOL_GPL(rcu_exp_batches_completed_sched);
  
@@ -249,7 +249,7 @@
  /*
   * Force a quiescent state.
   */
-@@ -521,6 +528,13 @@ void rcu_bh_force_quiescent_state(void)
+@@ -519,6 +526,13 @@ void rcu_bh_force_quiescent_state(void)
  }
  EXPORT_SYMBOL_GPL(rcu_bh_force_quiescent_state);
  
@@ -263,7 +263,7 @@
  /*
   * Force a quiescent state for RCU-sched.
   */
-@@ -571,9 +585,11 @@ void rcutorture_get_gp_data(enum rcutort
+@@ -569,9 +583,11 @@ void rcutorture_get_gp_data(enum rcutort
  	case RCU_FLAVOR:
  		rsp = rcu_state_p;
  		break;
@@ -323,7 +323,7 @@
  
  /**
   * rcu_barrier_sched - Wait for in-flight call_rcu_sched() callbacks.
-@@ -4196,7 +4218,9 @@ void __init rcu_init(void)
+@@ -4220,7 +4242,9 @@ void __init rcu_init(void)
  
  	rcu_bootup_announce();
  	rcu_init_geometry();
@@ -335,7 +335,7 @@
  		rcu_dump_rcu_node_tree(&rcu_sched_state);
 --- a/kernel/rcu/tree.h
 +++ b/kernel/rcu/tree.h
-@@ -587,7 +587,9 @@ extern struct list_head rcu_struct_flavo
+@@ -588,7 +588,9 @@ extern struct list_head rcu_struct_flavo
   */
  extern struct rcu_state rcu_sched_state;
  
@@ -347,7 +347,7 @@
  extern struct rcu_state rcu_preempt_state;
 --- a/kernel/rcu/update.c
 +++ b/kernel/rcu/update.c
-@@ -293,6 +293,7 @@ int rcu_read_lock_held(void)
+@@ -292,6 +292,7 @@ int rcu_read_lock_held(void)
  }
  EXPORT_SYMBOL_GPL(rcu_read_lock_held);
  
@@ -355,7 +355,7 @@
  /**
   * rcu_read_lock_bh_held() - might we be in RCU-bh read-side critical section?
   *
-@@ -319,6 +320,7 @@ int rcu_read_lock_bh_held(void)
+@@ -318,6 +319,7 @@ int rcu_read_lock_bh_held(void)
  	return in_softirq() || irqs_disabled();
  }
  EXPORT_SYMBOL_GPL(rcu_read_lock_bh_held);
diff --git a/patches/rcu-update-make-RCU_EXPEDITE_BOOT-default.patch b/patches/rcu-update-make-RCU_EXPEDITE_BOOT-default.patch
index d3fd1f5..324d402 100644
--- a/patches/rcu-update-make-RCU_EXPEDITE_BOOT-default.patch
+++ b/patches/rcu-update-make-RCU_EXPEDITE_BOOT-default.patch
@@ -16,7 +16,7 @@
 
 --- a/init/Kconfig
 +++ b/init/Kconfig
-@@ -771,19 +771,6 @@ config RCU_NOCB_CPU_ALL
+@@ -781,19 +781,6 @@ config RCU_NOCB_CPU_ALL
  
  endchoice
  
@@ -38,7 +38,7 @@
  config BUILD_BIN2C
 --- a/kernel/rcu/update.c
 +++ b/kernel/rcu/update.c
-@@ -130,8 +130,7 @@ bool rcu_gp_is_normal(void)
+@@ -129,8 +129,7 @@ bool rcu_gp_is_normal(void)
  }
  EXPORT_SYMBOL_GPL(rcu_gp_is_normal);
  
@@ -48,7 +48,7 @@
  
  /*
   * Should normal grace-period primitives be expedited?  Intended for
-@@ -179,8 +178,7 @@ EXPORT_SYMBOL_GPL(rcu_unexpedite_gp);
+@@ -178,8 +177,7 @@ EXPORT_SYMBOL_GPL(rcu_unexpedite_gp);
   */
  void rcu_end_inkernel_boot(void)
  {
diff --git a/patches/rcutree-rcu_bh_qs-disable-irq-while-calling-rcu_pree.patch b/patches/rcutree-rcu_bh_qs-disable-irq-while-calling-rcu_pree.patch
index 58dc9da..cd17d44 100644
--- a/patches/rcutree-rcu_bh_qs-disable-irq-while-calling-rcu_pree.patch
+++ b/patches/rcutree-rcu_bh_qs-disable-irq-while-calling-rcu_pree.patch
@@ -33,7 +33,7 @@
 
 --- a/kernel/rcu/tree.c
 +++ b/kernel/rcu/tree.c
-@@ -264,7 +264,12 @@ static void rcu_preempt_qs(void);
+@@ -262,7 +262,12 @@ static void rcu_preempt_qs(void);
  
  void rcu_bh_qs(void)
  {
diff --git a/patches/re-migrate_disable-race-with-cpu-hotplug-3f.patch b/patches/re-migrate_disable-race-with-cpu-hotplug-3f.patch
index 66ddf64..897b408 100644
--- a/patches/re-migrate_disable-race-with-cpu-hotplug-3f.patch
+++ b/patches/re-migrate_disable-race-with-cpu-hotplug-3f.patch
@@ -19,7 +19,7 @@
 
 --- a/kernel/cpu.c
 +++ b/kernel/cpu.c
-@@ -184,9 +184,11 @@ static DEFINE_PER_CPU(struct hotplug_pcp
+@@ -257,9 +257,11 @@ static DEFINE_PER_CPU(struct hotplug_pcp
   */
  void pin_current_cpu(void)
  {
diff --git a/patches/re-preempt_rt_full-arm-coredump-fails-for-cpu-3e-3d-4.patch b/patches/re-preempt_rt_full-arm-coredump-fails-for-cpu-3e-3d-4.patch
index 250dc3c..55290c0 100644
--- a/patches/re-preempt_rt_full-arm-coredump-fails-for-cpu-3e-3d-4.patch
+++ b/patches/re-preempt_rt_full-arm-coredump-fails-for-cpu-3e-3d-4.patch
@@ -35,7 +35,7 @@
 
 --- a/arch/arm/kernel/process.c
 +++ b/arch/arm/kernel/process.c
-@@ -323,6 +323,30 @@ unsigned long arch_randomize_brk(struct
+@@ -322,6 +322,30 @@ unsigned long arch_randomize_brk(struct
  }
  
  #ifdef CONFIG_MMU
diff --git a/patches/relay-fix-timer-madness.patch b/patches/relay-fix-timer-madness.patch
deleted file mode 100644
index 4ac41bf..0000000
--- a/patches/relay-fix-timer-madness.patch
+++ /dev/null
@@ -1,52 +0,0 @@
-From: Ingo Molnar <mingo@elte.hu>
-Date: Fri, 3 Jul 2009 08:44:07 -0500
-Subject: relay: Fix timer madness
-
-remove timer calls (!!!) from deep within the tracing infrastructure.
-This was totally bogus code that can cause lockups and worse.  Poll
-the buffer every 2 jiffies for now.
-
-Signed-off-by: Ingo Molnar <mingo@elte.hu>
-Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
-
----
- kernel/relay.c |   14 +++++---------
- 1 file changed, 5 insertions(+), 9 deletions(-)
-
---- a/kernel/relay.c
-+++ b/kernel/relay.c
-@@ -336,6 +336,10 @@ static void wakeup_readers(unsigned long
- {
- 	struct rchan_buf *buf = (struct rchan_buf *)data;
- 	wake_up_interruptible(&buf->read_wait);
-+	/*
-+	 * Stupid polling for now:
-+	 */
-+	mod_timer(&buf->timer, jiffies + 1);
- }
- 
- /**
-@@ -353,6 +357,7 @@ static void __relay_reset(struct rchan_b
- 		init_waitqueue_head(&buf->read_wait);
- 		kref_init(&buf->kref);
- 		setup_timer(&buf->timer, wakeup_readers, (unsigned long)buf);
-+		mod_timer(&buf->timer, jiffies + 1);
- 	} else
- 		del_timer_sync(&buf->timer);
- 
-@@ -767,15 +772,6 @@ size_t relay_switch_subbuf(struct rchan_
- 		else
- 			buf->early_bytes += buf->chan->subbuf_size -
- 					    buf->padding[old_subbuf];
--		smp_mb();
--		if (waitqueue_active(&buf->read_wait))
--			/*
--			 * Calling wake_up_interruptible() from here
--			 * will deadlock if we happen to be logging
--			 * from the scheduler (trying to re-grab
--			 * rq->lock), so defer it.
--			 */
--			mod_timer(&buf->timer, jiffies + 1);
- 	}
- 
- 	old = buf->data;
diff --git a/patches/rt-add-rt-locks.patch b/patches/rt-add-rt-locks.patch
index fe0c62a..7740aa9 100644
--- a/patches/rt-add-rt-locks.patch
+++ b/patches/rt-add-rt-locks.patch
@@ -612,7 +612,7 @@
  /* Task command name length */
  #define TASK_COMM_LEN 16
  
-@@ -1009,8 +1014,18 @@ struct wake_q_head {
+@@ -1013,8 +1018,18 @@ struct wake_q_head {
  	struct wake_q_head name = { WAKE_Q_TAIL, &name.first }
  
  extern void wake_q_add(struct wake_q_head *head,
@@ -915,7 +915,7 @@
 +#endif
 --- a/kernel/futex.c
 +++ b/kernel/futex.c
-@@ -1292,6 +1292,7 @@ static int wake_futex_pi(u32 __user *uad
+@@ -1301,6 +1301,7 @@ static int wake_futex_pi(u32 __user *uad
  	struct futex_pi_state *pi_state = this->pi_state;
  	u32 uninitialized_var(curval), newval;
  	WAKE_Q(wake_q);
@@ -923,7 +923,7 @@
  	bool deboost;
  	int ret = 0;
  
-@@ -1358,7 +1359,8 @@ static int wake_futex_pi(u32 __user *uad
+@@ -1367,7 +1368,8 @@ static int wake_futex_pi(u32 __user *uad
  
  	raw_spin_unlock_irq(&pi_state->pi_mutex.wait_lock);
  
@@ -933,7 +933,7 @@
  
  	/*
  	 * First unlock HB so the waiter does not spin on it once he got woken
-@@ -1368,6 +1370,7 @@ static int wake_futex_pi(u32 __user *uad
+@@ -1377,6 +1379,7 @@ static int wake_futex_pi(u32 __user *uad
  	 */
  	spin_unlock(&hb->lock);
  	wake_up_q(&wake_q);
@@ -941,7 +941,7 @@
  	if (deboost)
  		rt_mutex_adjust_prio(current);
  
-@@ -2842,10 +2845,7 @@ static int futex_wait_requeue_pi(u32 __u
+@@ -2851,10 +2854,7 @@ static int futex_wait_requeue_pi(u32 __u
  	 * The waiter is allocated on our stack, manipulated by the requeue
  	 * code while we sleep on uaddr.
  	 */
@@ -976,7 +976,7 @@
  obj-$(CONFIG_LOCKDEP) += lockdep.o
  ifeq ($(CONFIG_PROC_FS),y)
  obj-$(CONFIG_LOCKDEP) += lockdep_proc.o
-@@ -25,7 +29,10 @@ obj-$(CONFIG_RT_MUTEXES) += rtmutex.o
+@@ -24,7 +28,10 @@ obj-$(CONFIG_RT_MUTEXES) += rtmutex.o
  obj-$(CONFIG_DEBUG_RT_MUTEXES) += rtmutex-debug.o
  obj-$(CONFIG_DEBUG_SPINLOCK) += spinlock.o
  obj-$(CONFIG_DEBUG_SPINLOCK) += spinlock_debug.o
diff --git a/patches/rtmutex-futex-prepare-rt.patch b/patches/rtmutex-futex-prepare-rt.patch
index e2887fa..6259f82 100644
--- a/patches/rtmutex-futex-prepare-rt.patch
+++ b/patches/rtmutex-futex-prepare-rt.patch
@@ -15,7 +15,7 @@
 
 --- a/kernel/futex.c
 +++ b/kernel/futex.c
-@@ -1915,6 +1915,16 @@ static int futex_requeue(u32 __user *uad
+@@ -1924,6 +1924,16 @@ static int futex_requeue(u32 __user *uad
  				requeue_pi_wake_futex(this, &key2, hb2);
  				drop_count++;
  				continue;
@@ -32,7 +32,7 @@
  			} else if (ret) {
  				/*
  				 * rt_mutex_start_proxy_lock() detected a
-@@ -2805,7 +2815,7 @@ static int futex_wait_requeue_pi(u32 __u
+@@ -2814,7 +2824,7 @@ static int futex_wait_requeue_pi(u32 __u
  	struct hrtimer_sleeper timeout, *to = NULL;
  	struct rt_mutex_waiter rt_waiter;
  	struct rt_mutex *pi_mutex = NULL;
@@ -41,7 +41,7 @@
  	union futex_key key2 = FUTEX_KEY_INIT;
  	struct futex_q q = futex_q_init;
  	int res, ret;
-@@ -2864,20 +2874,55 @@ static int futex_wait_requeue_pi(u32 __u
+@@ -2873,20 +2883,55 @@ static int futex_wait_requeue_pi(u32 __u
  	/* Queue the futex_q, drop the hb lock, wait for wakeup. */
  	futex_wait_queue_me(hb, &q, to);
  
@@ -108,7 +108,7 @@
  
  	/* Check if the requeue code acquired the second futex for us. */
  	if (!q.rt_waiter) {
-@@ -2886,14 +2931,15 @@ static int futex_wait_requeue_pi(u32 __u
+@@ -2895,14 +2940,15 @@ static int futex_wait_requeue_pi(u32 __u
  		 * did a lock-steal - fix up the PI-state in that case.
  		 */
  		if (q.pi_state && (q.pi_state->owner != current)) {
@@ -126,7 +126,7 @@
  		}
  	} else {
  		/*
-@@ -2906,7 +2952,8 @@ static int futex_wait_requeue_pi(u32 __u
+@@ -2915,7 +2961,8 @@ static int futex_wait_requeue_pi(u32 __u
  		ret = rt_mutex_finish_proxy_lock(pi_mutex, to, &rt_waiter);
  		debug_rt_mutex_free_waiter(&rt_waiter);
  
diff --git a/patches/rtmutex_dont_include_rcu.patch b/patches/rtmutex_dont_include_rcu.patch
index 494f141..7159472 100644
--- a/patches/rtmutex_dont_include_rcu.patch
+++ b/patches/rtmutex_dont_include_rcu.patch
@@ -101,7 +101,7 @@
  
  #include <asm/barrier.h>
  
-@@ -628,54 +629,6 @@ static inline void rcu_preempt_sleep_che
+@@ -629,54 +630,6 @@ static inline void rcu_preempt_sleep_che
  })
  
  /**
diff --git a/patches/sc16is7xx_Drop_bogus_use_of_IRQF_ONESHOT.patch b/patches/sc16is7xx_Drop_bogus_use_of_IRQF_ONESHOT.patch
index aeb61bf..eecfc3a 100644
--- a/patches/sc16is7xx_Drop_bogus_use_of_IRQF_ONESHOT.patch
+++ b/patches/sc16is7xx_Drop_bogus_use_of_IRQF_ONESHOT.patch
@@ -43,7 +43,7 @@
 
 --- a/drivers/tty/serial/sc16is7xx.c
 +++ b/drivers/tty/serial/sc16is7xx.c
-@@ -1240,7 +1240,7 @@ static int sc16is7xx_probe(struct device
+@@ -1264,7 +1264,7 @@ static int sc16is7xx_probe(struct device
  
  	/* Setup interrupt */
  	ret = devm_request_irq(dev, irq, sc16is7xx_irq,
diff --git a/patches/sched-deadline-dl_task_timer-has-to-be-irqsafe.patch b/patches/sched-deadline-dl_task_timer-has-to-be-irqsafe.patch
index 7620c76..03a11a1 100644
--- a/patches/sched-deadline-dl_task_timer-has-to-be-irqsafe.patch
+++ b/patches/sched-deadline-dl_task_timer-has-to-be-irqsafe.patch
@@ -12,7 +12,7 @@
 
 --- a/kernel/sched/deadline.c
 +++ b/kernel/sched/deadline.c
-@@ -697,6 +697,7 @@ void init_dl_task_timer(struct sched_dl_
+@@ -687,6 +687,7 @@ void init_dl_task_timer(struct sched_dl_
  
  	hrtimer_init(timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
  	timer->function = dl_task_timer;
diff --git a/patches/sched-delay-put-task.patch b/patches/sched-delay-put-task.patch
index 2c4f1b6..d5466d2 100644
--- a/patches/sched-delay-put-task.patch
+++ b/patches/sched-delay-put-task.patch
@@ -13,7 +13,7 @@
 
 --- a/include/linux/sched.h
 +++ b/include/linux/sched.h
-@@ -1936,6 +1936,9 @@ struct task_struct {
+@@ -1961,6 +1961,9 @@ struct task_struct {
  	unsigned int	sequential_io;
  	unsigned int	sequential_io_avg;
  #endif
@@ -23,7 +23,7 @@
  #ifdef CONFIG_DEBUG_ATOMIC_SLEEP
  	unsigned long	task_state_change;
  #endif
-@@ -2174,6 +2177,15 @@ extern struct pid *cad_pid;
+@@ -2218,6 +2221,15 @@ extern struct pid *cad_pid;
  extern void free_task(struct task_struct *tsk);
  #define get_task_struct(tsk) do { atomic_inc(&(tsk)->usage); } while(0)
  
@@ -39,7 +39,7 @@
  extern void __put_task_struct(struct task_struct *t);
  
  static inline void put_task_struct(struct task_struct *t)
-@@ -2181,6 +2193,7 @@ static inline void put_task_struct(struc
+@@ -2225,6 +2237,7 @@ static inline void put_task_struct(struc
  	if (atomic_dec_and_test(&t->usage))
  		__put_task_struct(t);
  }
@@ -49,7 +49,7 @@
  struct task_struct *try_get_task_struct(struct task_struct **ptask);
 --- a/kernel/fork.c
 +++ b/kernel/fork.c
-@@ -251,7 +251,9 @@ static inline void put_signal_struct(str
+@@ -376,7 +376,9 @@ static inline void put_signal_struct(str
  	if (atomic_dec_and_test(&sig->sigcnt))
  		free_signal_struct(sig);
  }
@@ -60,7 +60,7 @@
  void __put_task_struct(struct task_struct *tsk)
  {
  	WARN_ON(!tsk->exit_state);
-@@ -268,7 +270,18 @@ void __put_task_struct(struct task_struc
+@@ -393,7 +395,18 @@ void __put_task_struct(struct task_struc
  	if (!profile_handoff_task(tsk))
  		free_task(tsk);
  }
diff --git a/patches/sched-disable-rt-group-sched-on-rt.patch b/patches/sched-disable-rt-group-sched-on-rt.patch
index 165ca21..4ee8a19 100644
--- a/patches/sched-disable-rt-group-sched-on-rt.patch
+++ b/patches/sched-disable-rt-group-sched-on-rt.patch
@@ -18,7 +18,7 @@
 
 --- a/init/Kconfig
 +++ b/init/Kconfig
-@@ -1041,6 +1041,7 @@ config CFS_BANDWIDTH
+@@ -1051,6 +1051,7 @@ config CFS_BANDWIDTH
  config RT_GROUP_SCHED
  	bool "Group scheduling for SCHED_RR/FIFO"
  	depends on CGROUP_SCHED
diff --git a/patches/sched-might-sleep-do-not-account-rcu-depth.patch b/patches/sched-might-sleep-do-not-account-rcu-depth.patch
index 0ed3168..1aa1c71 100644
--- a/patches/sched-might-sleep-do-not-account-rcu-depth.patch
+++ b/patches/sched-might-sleep-do-not-account-rcu-depth.patch
@@ -36,7 +36,7 @@
  /* Internal to kernel */
 --- a/kernel/sched/core.c
 +++ b/kernel/sched/core.c
-@@ -7697,7 +7697,7 @@ void __init sched_init(void)
+@@ -7820,7 +7820,7 @@ void __init sched_init(void)
  #ifdef CONFIG_DEBUG_ATOMIC_SLEEP
  static inline int preempt_count_equals(int preempt_offset)
  {
diff --git a/patches/sched-mmdrop-delayed.patch b/patches/sched-mmdrop-delayed.patch
index 769d328..f2a93de 100644
--- a/patches/sched-mmdrop-delayed.patch
+++ b/patches/sched-mmdrop-delayed.patch
@@ -35,7 +35,7 @@
  	void __user *bd_addr;
 --- a/include/linux/sched.h
 +++ b/include/linux/sched.h
-@@ -2857,6 +2857,17 @@ static inline void mmdrop(struct mm_stru
+@@ -2905,6 +2905,17 @@ static inline void mmdrop(struct mm_stru
  		__mmdrop(mm);
  }
  
@@ -50,12 +50,12 @@
 +# define mmdrop_delayed(mm)	mmdrop(mm)
 +#endif
 +
- static inline bool mmget_not_zero(struct mm_struct *mm)
+ static inline void mmdrop_async_fn(struct work_struct *work)
  {
- 	return atomic_inc_not_zero(&mm->mm_users);
+ 	struct mm_struct *mm = container_of(work, struct mm_struct, async_put_work);
 --- a/kernel/fork.c
 +++ b/kernel/fork.c
-@@ -715,6 +715,19 @@ void __mmdrop(struct mm_struct *mm)
+@@ -862,6 +862,19 @@ void __mmdrop(struct mm_struct *mm)
  }
  EXPORT_SYMBOL_GPL(__mmdrop);
  
@@ -77,7 +77,7 @@
  	VM_BUG_ON(atomic_read(&mm->mm_users));
 --- a/kernel/sched/core.c
 +++ b/kernel/sched/core.c
-@@ -2776,8 +2776,12 @@ static struct rq *finish_task_switch(str
+@@ -2785,8 +2785,12 @@ static struct rq *finish_task_switch(str
  	finish_arch_post_lock_switch();
  
  	fire_sched_in_preempt_notifiers(current);
@@ -91,7 +91,7 @@
  	if (unlikely(prev_state == TASK_DEAD)) {
  		if (prev->sched_class->task_dead)
  			prev->sched_class->task_dead(prev);
-@@ -5513,6 +5517,8 @@ void sched_setnuma(struct task_struct *p
+@@ -5545,6 +5549,8 @@ void sched_setnuma(struct task_struct *p
  #endif /* CONFIG_NUMA_BALANCING */
  
  #ifdef CONFIG_HOTPLUG_CPU
@@ -100,7 +100,7 @@
  /*
   * Ensures that the idle task is using init_mm right before its cpu goes
   * offline.
-@@ -5527,7 +5533,12 @@ void idle_task_exit(void)
+@@ -5559,7 +5565,12 @@ void idle_task_exit(void)
  		switch_mm_irqs_off(mm, &init_mm, current);
  		finish_arch_post_lock_switch();
  	}
@@ -114,7 +114,7 @@
  }
  
  /*
-@@ -7402,6 +7413,10 @@ int sched_cpu_dying(unsigned int cpu)
+@@ -7505,6 +7516,10 @@ int sched_cpu_dying(unsigned int cpu)
  	update_max_interval();
  	nohz_balance_exit_idle(cpu);
  	hrtick_clear(rq);
diff --git a/patches/sched-rt-mutex-wakeup.patch b/patches/sched-rt-mutex-wakeup.patch
index 7ac693f..12f200d 100644
--- a/patches/sched-rt-mutex-wakeup.patch
+++ b/patches/sched-rt-mutex-wakeup.patch
@@ -17,15 +17,15 @@
 
 --- a/include/linux/sched.h
 +++ b/include/linux/sched.h
-@@ -1459,6 +1459,7 @@ struct tlbflush_unmap_batch {
- 
- struct task_struct {
+@@ -1481,6 +1481,7 @@ struct task_struct {
+ 	struct thread_info thread_info;
+ #endif
  	volatile long state;	/* -1 unrunnable, 0 runnable, >0 stopped */
-+	volatile long saved_state;	/* saved state for "spinlock sleepers" */
++	volatile long saved_state; /* saved state for "spinlock sleepers" */
  	void *stack;
  	atomic_t usage;
  	unsigned int flags;	/* per process flags, defined below */
-@@ -2649,6 +2650,7 @@ extern void xtime_update(unsigned long t
+@@ -2697,6 +2698,7 @@ extern void xtime_update(unsigned long t
  
  extern int wake_up_state(struct task_struct *tsk, unsigned int state);
  extern int wake_up_process(struct task_struct *tsk);
@@ -35,7 +35,7 @@
   extern void kick_process(struct task_struct *tsk);
 --- a/kernel/sched/core.c
 +++ b/kernel/sched/core.c
-@@ -2023,8 +2023,25 @@ try_to_wake_up(struct task_struct *p, un
+@@ -2033,8 +2033,25 @@ try_to_wake_up(struct task_struct *p, un
  	 */
  	smp_mb__before_spinlock();
  	raw_spin_lock_irqsave(&p->pi_lock, flags);
@@ -62,7 +62,7 @@
  
  	trace_sched_waking(p);
  
-@@ -2172,6 +2189,18 @@ int wake_up_process(struct task_struct *
+@@ -2181,6 +2198,18 @@ int wake_up_process(struct task_struct *
  }
  EXPORT_SYMBOL(wake_up_process);
  
@@ -83,7 +83,7 @@
  	return try_to_wake_up(p, state, 0);
 --- a/kernel/sched/sched.h
 +++ b/kernel/sched/sched.h
-@@ -1138,6 +1138,7 @@ static inline void finish_lock_switch(st
+@@ -1163,6 +1163,7 @@ static inline void finish_lock_switch(st
  #define WF_SYNC		0x01		/* waker goes to sleep after wakeup */
  #define WF_FORK		0x02		/* child wakeup after fork */
  #define WF_MIGRATED	0x4		/* internal use, task got migrated */
diff --git a/patches/sched-ttwu-ensure-success-return-is-correct.patch b/patches/sched-ttwu-ensure-success-return-is-correct.patch
index ef541e1..62eea3f 100644
--- a/patches/sched-ttwu-ensure-success-return-is-correct.patch
+++ b/patches/sched-ttwu-ensure-success-return-is-correct.patch
@@ -20,7 +20,7 @@
 
 --- a/kernel/sched/core.c
 +++ b/kernel/sched/core.c
-@@ -2030,8 +2030,10 @@ try_to_wake_up(struct task_struct *p, un
+@@ -2040,8 +2040,10 @@ try_to_wake_up(struct task_struct *p, un
  		 * if the wakeup condition is true.
  		 */
  		if (!(wake_flags & WF_LOCK_SLEEPER)) {
diff --git a/patches/sched-workqueue-Only-wake-up-idle-workers-if-not-blo.patch b/patches/sched-workqueue-Only-wake-up-idle-workers-if-not-blo.patch
index 6a31d23..d781b0f 100644
--- a/patches/sched-workqueue-Only-wake-up-idle-workers-if-not-blo.patch
+++ b/patches/sched-workqueue-Only-wake-up-idle-workers-if-not-blo.patch
@@ -23,7 +23,7 @@
 
 --- a/kernel/sched/core.c
 +++ b/kernel/sched/core.c
-@@ -3482,8 +3482,10 @@ static void __sched notrace __schedule(b
+@@ -3477,8 +3477,10 @@ static void __sched notrace __schedule(b
  			 * If a worker went to sleep, notify and ask workqueue
  			 * whether it wants to wake up a task to maintain
  			 * concurrency.
diff --git a/patches/series b/patches/series
index a09de6c..dea119b 100644
--- a/patches/series
+++ b/patches/series
@@ -34,10 +34,12 @@
 fs-dcache-init-in_lookup_hashtable.patch
 iommu-iova-don-t-disable-preempt-around-this_cpu_ptr.patch
 iommu-vt-d-don-t-disable-preemption-while-accessing-.patch
-lockdep-Quiet-gcc-about-dangerous-__builtin_return_a.patch
 x86-apic-get-rid-of-warning-acpi_ioapic_lock-defined.patch
 rxrpc-remove-unused-static-variables.patch
 rcu-update-make-RCU_EXPEDITE_BOOT-default.patch
+locking-percpu-rwsem-use-swait-for-the-wating-writer.patch
+btrfs-drop-trace_btrfs_all_work_done-from-normal_wor.patch
+btrfs-swap-free-and-trace-point-in-run_ordered_work.patch
 
 # Wants a different fix for upstream
 NFSv4-replace-seqcount_t-with-a-seqlock_t.patch
@@ -154,13 +156,6 @@
 trace-latency-hist-Consider-new-argument-when-probin.patch
 trace_Use_rcuidle_version_for_preemptoff_hist_trace_point.patch
 
-# HW LATENCY DETECTOR - this really wants a rewrite
-hwlatdetect.patch
-hwlat-detector-Update-hwlat_detector-to-add-outer-lo.patch
-hwlat-detector-Use-trace_clock_local-if-available.patch
-hwlat-detector-Use-thread-instead-of-stop-machine.patch
-hwlat-detector-Don-t-ignore-threshold-module-paramet.patch
-
 ##################################################
 # REAL RT STUFF starts here
 ##################################################
@@ -257,12 +252,6 @@
 # PANIC
 panic-disable-random-on-rt.patch
 
-# IPC
-ipc-msg-Implement-lockless-pipelined-wakeups.patch
-
-# RELAY
-relay-fix-timer-madness.patch
-
 # TIMERS
 timers-prepare-for-full-preemption.patch
 timer-delay-waking-softirqs-from-the-jiffy-tick.patch
@@ -282,6 +271,7 @@
 sched-delay-put-task.patch
 sched-limit-nr-migrate.patch
 sched-mmdrop-delayed.patch
+kernel-sched-move-stack-kprobe-clean-up-to-__put_tas.patch
 sched-rt-mutex-wakeup.patch
 sched-might-sleep-do-not-account-rcu-depth.patch
 cond-resched-softirq-rt.patch
@@ -330,6 +320,7 @@
 
 # RTMUTEX
 pid.h-include-atomic.h.patch
+arm-include-definition-for-cpumask_t.patch
 locking-locktorture-Do-NOT-include-rwlock.h-directly.patch
 rtmutex-lock-killable.patch
 spinlock-types-separate-raw.patch
@@ -346,14 +337,6 @@
 patch-to-introduce-rcu-bh-qs-where-safe-from-softirq.patch
 rcutree-rcu_bh_qs-disable-irq-while-calling-rcu_pree.patch
 
-# LGLOCKS - lovely
-lglocks-rt.patch
-lockinglglocks_Use_preempt_enabledisable_nort.patch
-
-# STOP machine (depend on lglock & rtmutex)
-stomp-machine-create-lg_global_trylock_relax-primiti.patch
-stomp-machine-use-lg_global_trylock_relax-to-dead-wi.patch
-
 # DRIVERS SERIAL
 drivers-tty-fix-omap-lock-crap.patch
 drivers-tty-pl011-irq-disable-madness.patch
@@ -369,6 +352,7 @@
 hrtimer-Move-schedule_work-call-to-helper-thread.patch
 
 # FS
+peterz-percpu-rwsem-rt.patch
 fs-namespace-preemption-fix.patch
 mm-protect-activate-switch-mm.patch
 fs-block-rt-support.patch
@@ -389,7 +373,6 @@
 block-mq-use-cpu_light.patch
 block-mq-drop-preempt-disable.patch
 block-mq-don-t-complete-requests-via-IPI.patch
-dump-stack-don-t-disable-preemption-during-trace.patch
 md-raid5-percpu-handling-rt-aware.patch
 
 # CPU CHILL
@@ -397,7 +380,6 @@
 cpu_chill-Add-a-UNINTERRUPTIBLE-hrtimer_nanosleep.patch
 
 # block
-blk-mq-revert-raw-locks-post-pone-notifier-to-POST_D.patchto-POST_D.patch
 block-blk-mq-use-swait.patch
 
 # BLOCK LIVELOCK PREVENTION
@@ -411,7 +393,6 @@
 # WORKQUEUE more fixes
 workqueue-use-rcu.patch
 workqueue-use-locallock.patch
-workqueue-use-rcu_readlock-in-put_pwq_unlocked.patch
 work-queue-work-around-irqsafe-timer-optimization.patch
 workqueue-distangle-from-rq-lock.patch
 
@@ -431,7 +412,6 @@
 # NETWORKING
 sunrpc-make-svc_xprt_do_enqueue-use-get_cpu_light.patch
 skbufhead-raw-lock.patch
-skbufhead-raw-lock-free-skbs.patch
 net-core-cpuhotplug-drain-input_pkt_queue-lockless.patch
 net-move-xmit_recursion-to-per-task-variable-on-RT.patch
 net-provide-a-way-to-delegate-processing-a-softirq-t.patch
diff --git a/patches/signals-allow-rt-tasks-to-cache-one-sigqueue-struct.patch b/patches/signals-allow-rt-tasks-to-cache-one-sigqueue-struct.patch
index 5a90242..bdbe97b 100644
--- a/patches/signals-allow-rt-tasks-to-cache-one-sigqueue-struct.patch
+++ b/patches/signals-allow-rt-tasks-to-cache-one-sigqueue-struct.patch
@@ -17,7 +17,7 @@
 
 --- a/include/linux/sched.h
 +++ b/include/linux/sched.h
-@@ -1659,6 +1659,7 @@ struct task_struct {
+@@ -1684,6 +1684,7 @@ struct task_struct {
  /* signal handlers */
  	struct signal_struct *signal;
  	struct sighand_struct *sighand;
@@ -48,7 +48,7 @@
  
 --- a/kernel/fork.c
 +++ b/kernel/fork.c
-@@ -1399,6 +1399,7 @@ static struct task_struct *copy_process(
+@@ -1549,6 +1549,7 @@ static __latent_entropy struct task_stru
  	spin_lock_init(&p->alloc_lock);
  
  	init_sigpending(&p->pending);
diff --git a/patches/skbufhead-raw-lock-free-skbs.patch b/patches/skbufhead-raw-lock-free-skbs.patch
deleted file mode 100644
index 76863f5..0000000
--- a/patches/skbufhead-raw-lock-free-skbs.patch
+++ /dev/null
@@ -1,37 +0,0 @@
-From: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
-Date: Mon, 05 Dec 2016 18:09:34 +0100
-Subject: net: free the sbs in skbufhead
-
-In "skbufhead-raw-lock.patch" we moved the memory to a list and the hunk that
-cleared the list got misplaced.
-
-Cc: stable@vger.kernel.org
-Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
----
- net/core/dev.c |    8 ++++++++
- 1 file changed, 8 insertions(+)
-
---- a/net/core/dev.c
-+++ b/net/core/dev.c
-@@ -5201,13 +5201,21 @@ static void net_rx_action(struct softirq
- 	struct softnet_data *sd = this_cpu_ptr(&softnet_data);
- 	unsigned long time_limit = jiffies + 2;
- 	int budget = netdev_budget;
-+	struct sk_buff_head tofree_q;
-+	struct sk_buff *skb;
- 	LIST_HEAD(list);
- 	LIST_HEAD(repoll);
- 
-+	__skb_queue_head_init(&tofree_q);
-+
- 	local_irq_disable();
-+	skb_queue_splice_init(&sd->tofree_queue, &tofree_q);
- 	list_splice_init(&sd->poll_list, &list);
- 	local_irq_enable();
- 
-+	while ((skb = __skb_dequeue(&tofree_q)))
-+		kfree_skb(skb);
-+
- 	for (;;) {
- 		struct napi_struct *n;
- 
diff --git a/patches/skbufhead-raw-lock.patch b/patches/skbufhead-raw-lock.patch
index 18cc081..4bb689f 100644
--- a/patches/skbufhead-raw-lock.patch
+++ b/patches/skbufhead-raw-lock.patch
@@ -10,12 +10,12 @@
 ---
  include/linux/netdevice.h |    1 +
  include/linux/skbuff.h    |    7 +++++++
- net/core/dev.c            |   19 +++++++++++++------
- 3 files changed, 21 insertions(+), 6 deletions(-)
+ net/core/dev.c            |   31 ++++++++++++++++++++++++-------
+ 3 files changed, 32 insertions(+), 7 deletions(-)
 
 --- a/include/linux/netdevice.h
 +++ b/include/linux/netdevice.h
-@@ -2831,6 +2831,7 @@ struct softnet_data {
+@@ -2846,6 +2846,7 @@ struct softnet_data {
  	unsigned int		dropped;
  	struct sk_buff_head	input_pkt_queue;
  	struct napi_struct	backlog;
@@ -33,7 +33,7 @@
  };
  
  struct sk_buff;
-@@ -1565,6 +1566,12 @@ static inline void skb_queue_head_init(s
+@@ -1573,6 +1574,12 @@ static inline void skb_queue_head_init(s
  	__skb_queue_head_init(list);
  }
  
@@ -65,31 +65,75 @@
  #endif
  }
  
-@@ -4323,7 +4323,7 @@ static void flush_backlog(void *arg)
+@@ -4315,7 +4315,7 @@ static void flush_backlog(struct work_st
  	skb_queue_walk_safe(&sd->input_pkt_queue, skb, tmp) {
- 		if (skb->dev == dev) {
+ 		if (skb->dev->reg_state == NETREG_UNREGISTERING) {
  			__skb_unlink(skb, &sd->input_pkt_queue);
 -			kfree_skb(skb);
 +			__skb_queue_tail(&sd->tofree_queue, skb);
  			input_queue_head_incr(sd);
  		}
  	}
-@@ -4332,10 +4332,13 @@ static void flush_backlog(void *arg)
+@@ -4325,11 +4325,14 @@ static void flush_backlog(struct work_st
  	skb_queue_walk_safe(&sd->process_queue, skb, tmp) {
- 		if (skb->dev == dev) {
+ 		if (skb->dev->reg_state == NETREG_UNREGISTERING) {
  			__skb_unlink(skb, &sd->process_queue);
 -			kfree_skb(skb);
 +			__skb_queue_tail(&sd->tofree_queue, skb);
  			input_queue_head_incr(sd);
  		}
  	}
-+
 +	if (!skb_queue_empty(&sd->tofree_queue))
 +		raise_softirq_irqoff(NET_RX_SOFTIRQ);
+ 	local_bh_enable();
++
  }
  
- static int napi_gro_complete(struct sk_buff *skb)
-@@ -8004,6 +8007,9 @@ static int dev_cpu_callback(struct notif
+ static void flush_all_backlogs(void)
+@@ -4852,7 +4855,9 @@ static int process_backlog(struct napi_s
+ 	while (again) {
+ 		struct sk_buff *skb;
+ 
++		local_irq_disable();
+ 		while ((skb = __skb_dequeue(&sd->process_queue))) {
++			local_irq_enable();
+ 			rcu_read_lock();
+ 			__netif_receive_skb(skb);
+ 			rcu_read_unlock();
+@@ -4860,9 +4865,9 @@ static int process_backlog(struct napi_s
+ 			if (++work >= quota)
+ 				return work;
+ 
++			local_irq_disable();
+ 		}
+ 
+-		local_irq_disable();
+ 		rps_lock(sd);
+ 		if (skb_queue_empty(&sd->input_pkt_queue)) {
+ 			/*
+@@ -5204,13 +5209,21 @@ static __latent_entropy void net_rx_acti
+ 	struct softnet_data *sd = this_cpu_ptr(&softnet_data);
+ 	unsigned long time_limit = jiffies + 2;
+ 	int budget = netdev_budget;
++	struct sk_buff_head tofree_q;
++	struct sk_buff *skb;
+ 	LIST_HEAD(list);
+ 	LIST_HEAD(repoll);
+ 
++	__skb_queue_head_init(&tofree_q);
++
+ 	local_irq_disable();
++	skb_queue_splice_init(&sd->tofree_queue, &tofree_q);
+ 	list_splice_init(&sd->poll_list, &list);
+ 	local_irq_enable();
+ 
++	while ((skb = __skb_dequeue(&tofree_q)))
++		kfree_skb(skb);
++
+ 	for (;;) {
+ 		struct napi_struct *n;
+ 
+@@ -8013,6 +8026,9 @@ static int dev_cpu_callback(struct notif
  		netif_rx_ni(skb);
  		input_queue_head_incr(oldsd);
  	}
@@ -99,9 +143,9 @@
  
  	return NOTIFY_OK;
  }
-@@ -8305,8 +8311,9 @@ static int __init net_dev_init(void)
- 	for_each_possible_cpu(i) {
- 		struct softnet_data *sd = &per_cpu(softnet_data, i);
+@@ -8317,8 +8333,9 @@ static int __init net_dev_init(void)
+ 
+ 		INIT_WORK(flush, flush_backlog);
  
 -		skb_queue_head_init(&sd->input_pkt_queue);
 -		skb_queue_head_init(&sd->process_queue);
diff --git a/patches/slub-disable-SLUB_CPU_PARTIAL.patch b/patches/slub-disable-SLUB_CPU_PARTIAL.patch
index 571a433..4cdb5a6 100644
--- a/patches/slub-disable-SLUB_CPU_PARTIAL.patch
+++ b/patches/slub-disable-SLUB_CPU_PARTIAL.patch
@@ -36,7 +36,7 @@
 
 --- a/init/Kconfig
 +++ b/init/Kconfig
-@@ -1788,7 +1788,7 @@ config SLAB_FREELIST_RANDOM
+@@ -1799,7 +1799,7 @@ config SLAB_FREELIST_RANDOM
  
  config SLUB_CPU_PARTIAL
  	default y
diff --git a/patches/slub-enable-irqs-for-no-wait.patch b/patches/slub-enable-irqs-for-no-wait.patch
index b4af195..9d975d8 100644
--- a/patches/slub-enable-irqs-for-no-wait.patch
+++ b/patches/slub-enable-irqs-for-no-wait.patch
@@ -12,7 +12,7 @@
 
 --- a/mm/slub.c
 +++ b/mm/slub.c
-@@ -1533,14 +1533,17 @@ static struct page *allocate_slab(struct
+@@ -1529,14 +1529,17 @@ static struct page *allocate_slab(struct
  	void *start, *p;
  	int idx, order;
  	bool shuffle;
@@ -32,7 +32,7 @@
  		local_irq_enable();
  
  	flags |= s->allocflags;
-@@ -1615,11 +1618,7 @@ static struct page *allocate_slab(struct
+@@ -1611,11 +1614,7 @@ static struct page *allocate_slab(struct
  	page->frozen = 1;
  
  out:
diff --git a/patches/softirq-disable-softirq-stacks-for-rt.patch b/patches/softirq-disable-softirq-stacks-for-rt.patch
index 49ad29a..2ba3305 100644
--- a/patches/softirq-disable-softirq-stacks-for-rt.patch
+++ b/patches/softirq-disable-softirq-stacks-for-rt.patch
@@ -19,7 +19,7 @@
 
 --- a/arch/powerpc/kernel/irq.c
 +++ b/arch/powerpc/kernel/irq.c
-@@ -633,6 +633,7 @@ void irq_ctx_init(void)
+@@ -638,6 +638,7 @@ void irq_ctx_init(void)
  	}
  }
  
@@ -27,7 +27,7 @@
  void do_softirq_own_stack(void)
  {
  	struct thread_info *curtp, *irqtp;
-@@ -650,6 +651,7 @@ void do_softirq_own_stack(void)
+@@ -655,6 +656,7 @@ void do_softirq_own_stack(void)
  	if (irqtp->flags)
  		set_bits(irqtp->flags, &curtp->flags);
  }
@@ -37,7 +37,7 @@
  {
 --- a/arch/powerpc/kernel/misc_32.S
 +++ b/arch/powerpc/kernel/misc_32.S
-@@ -40,6 +40,7 @@
+@@ -41,6 +41,7 @@
   * We store the saved ksp_limit in the unused part
   * of the STACK_FRAME_OVERHEAD
   */
@@ -45,7 +45,7 @@
  _GLOBAL(call_do_softirq)
  	mflr	r0
  	stw	r0,4(r1)
-@@ -56,6 +57,7 @@
+@@ -57,6 +58,7 @@
  	stw	r10,THREAD+KSP_LIMIT(r2)
  	mtlr	r0
  	blr
@@ -55,7 +55,7 @@
   * void call_do_irq(struct pt_regs *regs, struct thread_info *irqtp);
 --- a/arch/powerpc/kernel/misc_64.S
 +++ b/arch/powerpc/kernel/misc_64.S
-@@ -30,6 +30,7 @@
+@@ -31,6 +31,7 @@
  
  	.text
  
@@ -63,7 +63,7 @@
  _GLOBAL(call_do_softirq)
  	mflr	r0
  	std	r0,16(r1)
-@@ -40,6 +41,7 @@
+@@ -41,6 +42,7 @@
  	ld	r0,16(r1)
  	mtlr	r0
  	blr
@@ -109,7 +109,7 @@
  void fixup_irqs(void)
 --- a/arch/x86/entry/entry_64.S
 +++ b/arch/x86/entry/entry_64.S
-@@ -817,6 +817,7 @@ END(native_load_gs_index)
+@@ -894,6 +894,7 @@ EXPORT_SYMBOL(native_load_gs_index)
  	jmp	2b
  	.previous
  
@@ -117,7 +117,7 @@
  /* Call softirq on interrupt stack. Interrupts are off. */
  ENTRY(do_softirq_own_stack)
  	pushq	%rbp
-@@ -829,6 +830,7 @@ ENTRY(do_softirq_own_stack)
+@@ -906,6 +907,7 @@ ENTRY(do_softirq_own_stack)
  	decl	PER_CPU_VAR(irq_count)
  	ret
  END(do_softirq_own_stack)
@@ -145,7 +145,7 @@
  {
 --- a/include/linux/interrupt.h
 +++ b/include/linux/interrupt.h
-@@ -464,7 +464,7 @@ struct softirq_action
+@@ -472,7 +472,7 @@ struct softirq_action
  asmlinkage void do_softirq(void);
  asmlinkage void __do_softirq(void);
  
diff --git a/patches/softirq-preempt-fix-3-re.patch b/patches/softirq-preempt-fix-3-re.patch
index e5f2e99..4c7a11b 100644
--- a/patches/softirq-preempt-fix-3-re.patch
+++ b/patches/softirq-preempt-fix-3-re.patch
@@ -30,15 +30,15 @@
  }
  
  /*
-@@ -93,6 +94,7 @@ static int blk_cpu_notify(struct notifie
- 				 this_cpu_ptr(&blk_cpu_done));
- 		raise_softirq_irqoff(BLOCK_SOFTIRQ);
- 		local_irq_enable();
-+		preempt_check_resched_rt();
- 	}
+@@ -89,6 +90,7 @@ static int blk_softirq_cpu_dead(unsigned
+ 			 this_cpu_ptr(&blk_cpu_done));
+ 	raise_softirq_irqoff(BLOCK_SOFTIRQ);
+ 	local_irq_enable();
++	preempt_check_resched_rt();
  
- 	return NOTIFY_OK;
-@@ -150,6 +152,7 @@ void __blk_complete_request(struct reque
+ 	return 0;
+ }
+@@ -141,6 +143,7 @@ void __blk_complete_request(struct reque
  		goto do_local;
  
  	local_irq_restore(flags);
@@ -85,7 +85,7 @@
  }
  EXPORT_SYMBOL(irq_poll_complete);
  
-@@ -95,6 +97,7 @@ static void irq_poll_softirq(struct soft
+@@ -95,6 +97,7 @@ static void __latent_entropy irq_poll_so
  		}
  
  		local_irq_enable();
@@ -93,7 +93,7 @@
  
  		/* Even though interrupts have been re-enabled, this
  		 * access is safe because interrupts can only add new
-@@ -132,6 +135,7 @@ static void irq_poll_softirq(struct soft
+@@ -132,6 +135,7 @@ static void __latent_entropy irq_poll_so
  		__raise_softirq_irqoff(IRQ_POLL_SOFTIRQ);
  
  	local_irq_enable();
@@ -101,17 +101,17 @@
  }
  
  /**
-@@ -199,6 +203,7 @@ static int irq_poll_cpu_notify(struct no
- 				 this_cpu_ptr(&blk_cpu_iopoll));
- 		__raise_softirq_irqoff(IRQ_POLL_SOFTIRQ);
- 		local_irq_enable();
-+		preempt_check_resched_rt();
- 	}
+@@ -195,6 +199,7 @@ static int irq_poll_cpu_dead(unsigned in
+ 			 this_cpu_ptr(&blk_cpu_iopoll));
+ 	__raise_softirq_irqoff(IRQ_POLL_SOFTIRQ);
+ 	local_irq_enable();
++	preempt_check_resched_rt();
  
- 	return NOTIFY_OK;
+ 	return 0;
+ }
 --- a/net/core/dev.c
 +++ b/net/core/dev.c
-@@ -2268,6 +2268,7 @@ static void __netif_reschedule(struct Qd
+@@ -2263,6 +2263,7 @@ static void __netif_reschedule(struct Qd
  	sd->output_queue_tailp = &q->next_sched;
  	raise_softirq_irqoff(NET_TX_SOFTIRQ);
  	local_irq_restore(flags);
@@ -119,7 +119,7 @@
  }
  
  void __netif_schedule(struct Qdisc *q)
-@@ -2349,6 +2350,7 @@ void __dev_kfree_skb_irq(struct sk_buff
+@@ -2344,6 +2345,7 @@ void __dev_kfree_skb_irq(struct sk_buff
  	__this_cpu_write(softnet_data.completion_queue, skb);
  	raise_softirq_irqoff(NET_TX_SOFTIRQ);
  	local_irq_restore(flags);
@@ -127,7 +127,7 @@
  }
  EXPORT_SYMBOL(__dev_kfree_skb_irq);
  
-@@ -3778,6 +3780,7 @@ static int enqueue_to_backlog(struct sk_
+@@ -3763,6 +3765,7 @@ static int enqueue_to_backlog(struct sk_
  	rps_unlock(sd);
  
  	local_irq_restore(flags);
@@ -135,7 +135,7 @@
  
  	atomic_long_inc(&skb->dev->rx_dropped);
  	kfree_skb(skb);
-@@ -4797,6 +4800,7 @@ static void net_rps_action_and_irq_enabl
+@@ -4807,6 +4810,7 @@ static void net_rps_action_and_irq_enabl
  		sd->rps_ipi_list = NULL;
  
  		local_irq_enable();
@@ -143,7 +143,7 @@
  
  		/* Send pending IPI's to kick RPS processing on remote cpus. */
  		while (remsd) {
-@@ -4810,6 +4814,7 @@ static void net_rps_action_and_irq_enabl
+@@ -4820,6 +4824,7 @@ static void net_rps_action_and_irq_enabl
  	} else
  #endif
  		local_irq_enable();
@@ -151,7 +151,7 @@
  }
  
  static bool sd_has_rps_ipi_waiting(struct softnet_data *sd)
-@@ -4891,6 +4896,7 @@ void __napi_schedule(struct napi_struct
+@@ -4897,6 +4902,7 @@ void __napi_schedule(struct napi_struct
  	local_irq_save(flags);
  	____napi_schedule(this_cpu_ptr(&softnet_data), n);
  	local_irq_restore(flags);
@@ -159,7 +159,7 @@
  }
  EXPORT_SYMBOL(__napi_schedule);
  
-@@ -7989,6 +7995,7 @@ static int dev_cpu_callback(struct notif
+@@ -7998,6 +8004,7 @@ static int dev_cpu_callback(struct notif
  
  	raise_softirq_irqoff(NET_TX_SOFTIRQ);
  	local_irq_enable();
diff --git a/patches/softirq-split-locks.patch b/patches/softirq-split-locks.patch
index 5325d64..ab2c5bf 100644
--- a/patches/softirq-split-locks.patch
+++ b/patches/softirq-split-locks.patch
@@ -29,10 +29,10 @@
  include/linux/preempt.h     |   15 +
  include/linux/sched.h       |    3 
  init/main.c                 |    1 
- kernel/softirq.c            |  488 +++++++++++++++++++++++++++++++++++++-------
+ kernel/softirq.c            |  492 +++++++++++++++++++++++++++++++++++++-------
  kernel/time/tick-sched.c    |    9 
  net/core/dev.c              |    6 
- 8 files changed, 477 insertions(+), 94 deletions(-)
+ 8 files changed, 480 insertions(+), 95 deletions(-)
 
 --- a/include/linux/bottom_half.h
 +++ b/include/linux/bottom_half.h
@@ -85,7 +85,7 @@
  #endif /* _LINUX_BH_H */
 --- a/include/linux/interrupt.h
 +++ b/include/linux/interrupt.h
-@@ -461,10 +461,11 @@ struct softirq_action
+@@ -469,10 +469,11 @@ struct softirq_action
  	void	(*action)(struct softirq_action *);
  };
  
@@ -99,7 +99,7 @@
  void do_softirq_own_stack(void);
  #else
  static inline void do_softirq_own_stack(void)
-@@ -472,6 +473,9 @@ static inline void do_softirq_own_stack(
+@@ -480,6 +481,9 @@ static inline void do_softirq_own_stack(
  	__do_softirq();
  }
  #endif
@@ -109,7 +109,7 @@
  
  extern void open_softirq(int nr, void (*action)(struct softirq_action *));
  extern void softirq_init(void);
-@@ -479,6 +483,7 @@ extern void __raise_softirq_irqoff(unsig
+@@ -487,6 +491,7 @@ extern void __raise_softirq_irqoff(unsig
  
  extern void raise_softirq_irqoff(unsigned int nr);
  extern void raise_softirq(unsigned int nr);
@@ -117,7 +117,7 @@
  
  DECLARE_PER_CPU(struct task_struct *, ksoftirqd);
  
-@@ -636,6 +641,12 @@ void tasklet_hrtimer_cancel(struct taskl
+@@ -644,6 +649,12 @@ void tasklet_hrtimer_cancel(struct taskl
  	tasklet_kill(&ttimer->tasklet);
  }
  
@@ -172,7 +172,7 @@
   * Are we in NMI context?
 --- a/include/linux/sched.h
 +++ b/include/linux/sched.h
-@@ -1939,6 +1939,8 @@ struct task_struct {
+@@ -1964,6 +1964,8 @@ struct task_struct {
  #endif
  #ifdef CONFIG_PREEMPT_RT_BASE
  	struct rcu_head put_rcu;
@@ -181,7 +181,7 @@
  #endif
  #ifdef CONFIG_DEBUG_ATOMIC_SLEEP
  	unsigned long	task_state_change;
-@@ -2236,6 +2238,7 @@ extern void thread_group_cputime_adjuste
+@@ -2280,6 +2282,7 @@ extern void thread_group_cputime_adjuste
  /*
   * Per process flags
   */
@@ -308,7 +308,7 @@
  /*
   * we cannot loop indefinitely here to avoid userspace starvation,
   * but we also don't want to introduce a worst case 1/HZ latency
-@@ -78,6 +171,68 @@ static void wakeup_softirqd(void)
+@@ -78,6 +171,26 @@ static void wakeup_softirqd(void)
  		wake_up_process(tsk);
  }
  
@@ -332,6 +332,13 @@
 +	}
 +}
 +
+ /*
+  * If ksoftirqd is scheduled, we do not want to process pending softirqs
+  * right now. Let ksoftirqd handle this at its own rate, to get fairness.
+@@ -89,6 +202,48 @@ static bool ksoftirqd_running(void)
+ 	return tsk && (tsk->state == TASK_RUNNING);
+ }
+ 
 +#ifndef CONFIG_PREEMPT_RT_FULL
 +static inline int ksoftirqd_softirq_pending(void)
 +{
@@ -377,7 +384,7 @@
  /*
   * preempt_count and SOFTIRQ_OFFSET usage:
   * - preempt_count is changed by SOFTIRQ_OFFSET on entering or leaving
-@@ -233,10 +388,8 @@ asmlinkage __visible void __softirq_entr
+@@ -244,10 +399,8 @@ asmlinkage __visible void __softirq_entr
  	unsigned long end = jiffies + MAX_SOFTIRQ_TIME;
  	unsigned long old_flags = current->flags;
  	int max_restart = MAX_SOFTIRQ_RESTART;
@@ -388,7 +395,7 @@
  
  	/*
  	 * Mask out PF_MEMALLOC s current task context is borrowed for the
-@@ -255,36 +408,7 @@ asmlinkage __visible void __softirq_entr
+@@ -266,36 +419,7 @@ asmlinkage __visible void __softirq_entr
  	/* Reset the pending bitmask before enabling irqs */
  	set_softirq_pending(0);
  
@@ -426,7 +433,7 @@
  
  	pending = local_softirq_pending();
  	if (pending) {
-@@ -321,6 +445,246 @@ asmlinkage __visible void do_softirq(voi
+@@ -332,6 +456,246 @@ asmlinkage __visible void do_softirq(voi
  }
  
  /*
@@ -673,7 +680,7 @@
   * Enter an interrupt context.
   */
  void irq_enter(void)
-@@ -331,9 +695,9 @@ void irq_enter(void)
+@@ -342,9 +706,9 @@ void irq_enter(void)
  		 * Prevent raise_softirq from needlessly waking up ksoftirqd
  		 * here, as softirq will be serviced on return from interrupt.
  		 */
@@ -685,20 +692,26 @@
  	}
  
  	__irq_enter();
-@@ -341,6 +705,7 @@ void irq_enter(void)
+@@ -352,9 +716,13 @@ void irq_enter(void)
  
  static inline void invoke_softirq(void)
  {
++#ifdef CONFIG_PREEMPT_RT_FULL
++	unsigned long flags;
++#endif
++
+ 	if (ksoftirqd_running())
+ 		return;
+-
 +#ifndef CONFIG_PREEMPT_RT_FULL
  	if (!force_irqthreads) {
  #ifdef CONFIG_HAVE_IRQ_EXIT_ON_IRQ_STACK
  		/*
-@@ -360,6 +725,15 @@ static inline void invoke_softirq(void)
+@@ -374,6 +742,14 @@ static inline void invoke_softirq(void)
  	} else {
  		wakeup_softirqd();
  	}
 +#else /* PREEMPT_RT_FULL */
-+	unsigned long flags;
 +
 +	local_irq_save(flags);
 +	if (__this_cpu_read(ksoftirqd) &&
@@ -709,7 +722,7 @@
  }
  
  static inline void tick_irq_exit(void)
-@@ -396,26 +770,6 @@ void irq_exit(void)
+@@ -410,26 +786,6 @@ void irq_exit(void)
  	trace_hardirq_exit(); /* must be last! */
  }
  
@@ -736,7 +749,7 @@
  void raise_softirq(unsigned int nr)
  {
  	unsigned long flags;
-@@ -425,12 +779,6 @@ void raise_softirq(unsigned int nr)
+@@ -439,12 +795,6 @@ void raise_softirq(unsigned int nr)
  	local_irq_restore(flags);
  }
  
@@ -749,7 +762,7 @@
  void open_softirq(int nr, void (*action)(struct softirq_action *))
  {
  	softirq_vec[nr].action = action;
-@@ -733,23 +1081,7 @@ EXPORT_SYMBOL(tasklet_unlock_wait);
+@@ -747,23 +1097,7 @@ EXPORT_SYMBOL(tasklet_unlock_wait);
  
  static int ksoftirqd_should_run(unsigned int cpu)
  {
@@ -774,7 +787,7 @@
  }
  
  #ifdef CONFIG_HOTPLUG_CPU
-@@ -831,6 +1163,8 @@ static struct notifier_block cpu_nfb = {
+@@ -830,6 +1164,8 @@ static int takeover_tasklets(unsigned in
  
  static struct smp_hotplug_thread softirq_threads = {
  	.store			= &ksoftirqd,
@@ -785,7 +798,7 @@
  	.thread_comm		= "ksoftirqd/%u",
 --- a/kernel/time/tick-sched.c
 +++ b/kernel/time/tick-sched.c
-@@ -879,14 +879,7 @@ static bool can_stop_idle_tick(int cpu,
+@@ -882,14 +882,7 @@ static bool can_stop_idle_tick(int cpu,
  		return false;
  
  	if (unlikely(local_softirq_pending() && cpu_online(cpu))) {
@@ -803,7 +816,7 @@
  
 --- a/net/core/dev.c
 +++ b/net/core/dev.c
-@@ -3849,11 +3849,9 @@ int netif_rx_ni(struct sk_buff *skb)
+@@ -3834,11 +3834,9 @@ int netif_rx_ni(struct sk_buff *skb)
  
  	trace_netif_rx_ni_entry(skb);
  
diff --git a/patches/softirq-split-timer-softirqs-out-of-ksoftirqd.patch b/patches/softirq-split-timer-softirqs-out-of-ksoftirqd.patch
index 449b78f..8428868 100644
--- a/patches/softirq-split-timer-softirqs-out-of-ksoftirqd.patch
+++ b/patches/softirq-split-timer-softirqs-out-of-ksoftirqd.patch
@@ -23,7 +23,7 @@
 Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
 ---
  kernel/softirq.c |   85 +++++++++++++++++++++++++++++++++++++++++++++++--------
- 1 file changed, 74 insertions(+), 11 deletions(-)
+ 1 file changed, 73 insertions(+), 12 deletions(-)
 
 --- a/kernel/softirq.c
 +++ b/kernel/softirq.c
@@ -32,12 +32,12 @@
  
  DEFINE_PER_CPU(struct task_struct *, ksoftirqd);
 +#ifdef CONFIG_PREEMPT_RT_FULL
-+#define TIMER_SOFTIRQS	((1 << TIMER_SOFTIRQ) | (1 << HRTIMER_SOFTIRQ))
++#define TIMER_SOFTIRQS ((1 << TIMER_SOFTIRQ) | (1 << HRTIMER_SOFTIRQ))
 +DEFINE_PER_CPU(struct task_struct *, ktimer_softirqd);
 +#endif
  
  const char * const softirq_to_name[NR_SOFTIRQS] = {
- 	"HI", "TIMER", "NET_TX", "NET_RX", "BLOCK", "BLOCK_IOPOLL",
+ 	"HI", "TIMER", "NET_TX", "NET_RX", "BLOCK", "IRQ_POLL",
 @@ -171,6 +175,17 @@ static void wakeup_softirqd(void)
  		wake_up_process(tsk);
  }
@@ -56,7 +56,7 @@
  static void handle_softirq(unsigned int vec_nr)
  {
  	struct softirq_action *h = softirq_vec + vec_nr;
-@@ -473,7 +488,6 @@ void __raise_softirq_irqoff(unsigned int
+@@ -484,7 +499,6 @@ void __raise_softirq_irqoff(unsigned int
  static inline void local_bh_disable_nort(void) { local_bh_disable(); }
  static inline void _local_bh_enable_nort(void) { _local_bh_enable(); }
  static void ksoftirqd_set_sched_params(unsigned int cpu) { }
@@ -64,7 +64,7 @@
  
  #else /* !PREEMPT_RT_FULL */
  
-@@ -620,8 +634,12 @@ void thread_do_softirq(void)
+@@ -631,8 +645,12 @@ void thread_do_softirq(void)
  
  static void do_raise_softirq_irqoff(unsigned int nr)
  {
@@ -78,7 +78,7 @@
  
  	/*
  	 * If we are not in a hard interrupt and inside a bh disabled
-@@ -630,16 +648,30 @@ static void do_raise_softirq_irqoff(unsi
+@@ -641,16 +659,29 @@ static void do_raise_softirq_irqoff(unsi
  	 * delegate it to ksoftirqd.
  	 */
  	if (!in_irq() && current->softirq_nestcnt)
@@ -103,7 +103,6 @@
 +		wakeup_softirqd();
  }
  
-+
  void __raise_softirq_irqoff(unsigned int nr)
  {
  	do_raise_softirq_irqoff(nr);
@@ -113,7 +112,7 @@
  }
  
  /*
-@@ -665,7 +697,7 @@ void raise_softirq_irqoff(unsigned int n
+@@ -676,7 +707,7 @@ void raise_softirq_irqoff(unsigned int n
  	 * raise a WARN() if the condition is met.
  	 */
  	if (!current->softirq_nestcnt)
@@ -122,7 +121,7 @@
  }
  
  static inline int ksoftirqd_softirq_pending(void)
-@@ -678,22 +710,37 @@ static inline void _local_bh_enable_nort
+@@ -689,22 +720,37 @@ static inline void _local_bh_enable_nort
  
  static inline void ksoftirqd_set_sched_params(unsigned int cpu)
  {
@@ -163,7 +162,7 @@
  #endif /* PREEMPT_RT_FULL */
  /*
   * Enter an interrupt context.
-@@ -743,6 +790,9 @@ static inline void invoke_softirq(void)
+@@ -759,6 +805,9 @@ static inline void invoke_softirq(void)
  	if (__this_cpu_read(ksoftirqd) &&
  			__this_cpu_read(ksoftirqd)->softirqs_raised)
  		wakeup_softirqd();
@@ -173,7 +172,7 @@
  	local_irq_restore(flags);
  #endif
  }
-@@ -1175,17 +1225,30 @@ static struct notifier_block cpu_nfb = {
+@@ -1176,18 +1225,30 @@ static int takeover_tasklets(unsigned in
  static struct smp_hotplug_thread softirq_threads = {
  	.store			= &ksoftirqd,
  	.setup			= ksoftirqd_set_sched_params,
@@ -196,12 +195,13 @@
 +
  static __init int spawn_ksoftirqd(void)
  {
- 	register_cpu_notifier(&cpu_nfb);
- 
+ 	cpuhp_setup_state_nocalls(CPUHP_SOFTIRQ_DEAD, "softirq:dead", NULL,
+ 				  takeover_tasklets);
  	BUG_ON(smpboot_register_percpu_thread(&softirq_threads));
+-
 +#ifdef CONFIG_PREEMPT_RT_FULL
 +	BUG_ON(smpboot_register_percpu_thread(&softirq_timer_threads));
 +#endif
- 
  	return 0;
  }
+ early_initcall(spawn_ksoftirqd);
diff --git a/patches/sparc64-use-generic-rwsem-spinlocks-rt.patch b/patches/sparc64-use-generic-rwsem-spinlocks-rt.patch
index 66c1f9b..519070e 100644
--- a/patches/sparc64-use-generic-rwsem-spinlocks-rt.patch
+++ b/patches/sparc64-use-generic-rwsem-spinlocks-rt.patch
@@ -10,7 +10,7 @@
 
 --- a/arch/sparc/Kconfig
 +++ b/arch/sparc/Kconfig
-@@ -187,12 +187,10 @@ config NR_CPUS
+@@ -194,12 +194,10 @@ config NR_CPUS
  source kernel/Kconfig.hz
  
  config RWSEM_GENERIC_SPINLOCK
diff --git a/patches/stomp-machine-create-lg_global_trylock_relax-primiti.patch b/patches/stomp-machine-create-lg_global_trylock_relax-primiti.patch
deleted file mode 100644
index ce3ee6f..0000000
--- a/patches/stomp-machine-create-lg_global_trylock_relax-primiti.patch
+++ /dev/null
@@ -1,86 +0,0 @@
-From: Mike Galbraith <umgwanakikbuti@gmail.com>
-Date: Fri, 2 May 2014 13:13:22 +0200
-Subject: stomp-machine: create lg_global_trylock_relax() primitive
-
-Create lg_global_trylock_relax() for use by stopper thread when it cannot
-schedule, to deal with stop_cpus_lock, which is now an lglock.
-
-Signed-off-by: Mike Galbraith <umgwanakikbuti@gmail.com>
-Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
----
- include/linux/lglock.h      |    6 ++++++
- include/linux/spinlock_rt.h |    1 +
- kernel/locking/lglock.c     |   25 +++++++++++++++++++++++++
- kernel/locking/rtmutex.c    |    5 +++++
- 4 files changed, 37 insertions(+)
-
---- a/include/linux/lglock.h
-+++ b/include/linux/lglock.h
-@@ -82,6 +82,12 @@ void lg_double_unlock(struct lglock *lg,
- void lg_global_lock(struct lglock *lg);
- void lg_global_unlock(struct lglock *lg);
- 
-+#ifndef CONFIG_PREEMPT_RT_FULL
-+#define lg_global_trylock_relax(name)	lg_global_lock(name)
-+#else
-+void lg_global_trylock_relax(struct lglock *lg);
-+#endif
-+
- #else
- /* When !CONFIG_SMP, map lglock to spinlock */
- #define lglock spinlock
---- a/include/linux/spinlock_rt.h
-+++ b/include/linux/spinlock_rt.h
-@@ -40,6 +40,7 @@ extern int atomic_dec_and_spin_lock(atom
- extern void __lockfunc __rt_spin_lock__no_mg(struct rt_mutex *lock);
- extern void __lockfunc __rt_spin_lock(struct rt_mutex *lock);
- extern void __lockfunc __rt_spin_unlock(struct rt_mutex *lock);
-+extern int __lockfunc __rt_spin_trylock(struct rt_mutex *lock);
- 
- #define spin_lock(lock)			rt_spin_lock(lock)
- 
---- a/kernel/locking/lglock.c
-+++ b/kernel/locking/lglock.c
-@@ -127,3 +127,28 @@ void lg_global_unlock(struct lglock *lg)
- 	preempt_enable_nort();
- }
- EXPORT_SYMBOL(lg_global_unlock);
-+
-+#ifdef CONFIG_PREEMPT_RT_FULL
-+/*
-+ * HACK: If you use this, you get to keep the pieces.
-+ * Used in queue_stop_cpus_work() when stop machinery
-+ * is called from inactive CPU, so we can't schedule.
-+ */
-+# define lg_do_trylock_relax(l)			\
-+	do {					\
-+		while (!__rt_spin_trylock(l))	\
-+			cpu_relax();		\
-+	} while (0)
-+
-+void lg_global_trylock_relax(struct lglock *lg)
-+{
-+	int i;
-+
-+	lock_acquire_exclusive(&lg->lock_dep_map, 0, 0, NULL, _RET_IP_);
-+	for_each_possible_cpu(i) {
-+		lg_lock_ptr *lock;
-+		lock = per_cpu_ptr(lg->lock, i);
-+		lg_do_trylock_relax(lock);
-+	}
-+}
-+#endif
---- a/kernel/locking/rtmutex.c
-+++ b/kernel/locking/rtmutex.c
-@@ -1279,6 +1279,11 @@ void __lockfunc rt_spin_unlock_wait(spin
- }
- EXPORT_SYMBOL(rt_spin_unlock_wait);
- 
-+int __lockfunc __rt_spin_trylock(struct rt_mutex *lock)
-+{
-+	return rt_mutex_trylock(lock);
-+}
-+
- int __lockfunc rt_spin_trylock__no_mg(spinlock_t *lock)
- {
- 	int ret;
diff --git a/patches/stomp-machine-use-lg_global_trylock_relax-to-dead-wi.patch b/patches/stomp-machine-use-lg_global_trylock_relax-to-dead-wi.patch
deleted file mode 100644
index dc0ba8d..0000000
--- a/patches/stomp-machine-use-lg_global_trylock_relax-to-dead-wi.patch
+++ /dev/null
@@ -1,73 +0,0 @@
-From: Mike Galbraith <umgwanakikbuti@gmail.com>
-Date: Fri, 2 May 2014 13:13:34 +0200
-Subject: stomp-machine: use lg_global_trylock_relax() to dead with stop_cpus_lock lglock
-
-If the stop machinery is called from inactive CPU we cannot use
-lg_global_lock(), because some other stomp machine invocation might be
-in progress and the lock can be contended.  We cannot schedule from this
-context, so use the lovely new lg_global_trylock_relax() primitive to
-do what we used to do via one mutex_trylock()/cpu_relax() loop.  We
-now do that trylock()/relax() across an entire herd of locks. Joy.
-
-Signed-off-by: Mike Galbraith <umgwanakikbuti@gmail.com>
-Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
----
- kernel/stop_machine.c |   19 ++++++++++++-------
- 1 file changed, 12 insertions(+), 7 deletions(-)
-
---- a/kernel/stop_machine.c
-+++ b/kernel/stop_machine.c
-@@ -321,18 +321,21 @@ static DEFINE_MUTEX(stop_cpus_mutex);
- 
- static bool queue_stop_cpus_work(const struct cpumask *cpumask,
- 				 cpu_stop_fn_t fn, void *arg,
--				 struct cpu_stop_done *done)
-+				 struct cpu_stop_done *done, bool inactive)
- {
- 	struct cpu_stop_work *work;
- 	unsigned int cpu;
- 	bool queued = false;
- 
- 	/*
--	 * Disable preemption while queueing to avoid getting
--	 * preempted by a stopper which might wait for other stoppers
--	 * to enter @fn which can lead to deadlock.
-+	 * Make sure that all work is queued on all cpus before
-+	 * any of the cpus can execute it.
- 	 */
--	lg_global_lock(&stop_cpus_lock);
-+	if (!inactive)
-+		lg_global_lock(&stop_cpus_lock);
-+	else
-+		lg_global_trylock_relax(&stop_cpus_lock);
-+
- 	for_each_cpu(cpu, cpumask) {
- 		work = &per_cpu(cpu_stopper.stop_work, cpu);
- 		work->fn = fn;
-@@ -352,7 +355,7 @@ static int __stop_cpus(const struct cpum
- 	struct cpu_stop_done done;
- 
- 	cpu_stop_init_done(&done, cpumask_weight(cpumask));
--	if (!queue_stop_cpus_work(cpumask, fn, arg, &done))
-+	if (!queue_stop_cpus_work(cpumask, fn, arg, &done, false))
- 		return -ENOENT;
- 	wait_for_completion(&done.completion);
- 	return done.ret;
-@@ -540,6 +543,8 @@ static int __init cpu_stop_init(void)
- 		INIT_LIST_HEAD(&stopper->works);
- 	}
- 
-+	lg_lock_init(&stop_cpus_lock, "stop_cpus_lock");
-+
- 	BUG_ON(smpboot_register_percpu_thread(&cpu_stop_threads));
- 	stop_machine_unpark(raw_smp_processor_id());
- 	stop_machine_initialized = true;
-@@ -634,7 +639,7 @@ int stop_machine_from_inactive_cpu(cpu_s
- 	set_state(&msdata, MULTI_STOP_PREPARE);
- 	cpu_stop_init_done(&done, num_active_cpus());
- 	queue_stop_cpus_work(cpu_active_mask, multi_cpu_stop, &msdata,
--			     &done);
-+			     &done, true);
- 	ret = multi_cpu_stop(&msdata);
- 
- 	/* Busy wait for completion. */
diff --git a/patches/stop-machine-raw-lock.patch b/patches/stop-machine-raw-lock.patch
index f52693b..412d5bc 100644
--- a/patches/stop-machine-raw-lock.patch
+++ b/patches/stop-machine-raw-lock.patch
@@ -6,12 +6,12 @@
 
 Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
 ---
- kernel/stop_machine.c |   24 ++++++++++++------------
- 1 file changed, 12 insertions(+), 12 deletions(-)
+ kernel/stop_machine.c |   34 +++++++++++++---------------------
+ 1 file changed, 13 insertions(+), 21 deletions(-)
 
 --- a/kernel/stop_machine.c
 +++ b/kernel/stop_machine.c
-@@ -37,7 +37,7 @@ struct cpu_stop_done {
+@@ -36,7 +36,7 @@ struct cpu_stop_done {
  struct cpu_stopper {
  	struct task_struct	*thread;
  
@@ -20,7 +20,7 @@
  	bool			enabled;	/* is this stopper enabled? */
  	struct list_head	works;		/* list of pending works */
  
-@@ -83,14 +83,14 @@ static bool cpu_stop_queue_work(unsigned
+@@ -78,14 +78,14 @@ static bool cpu_stop_queue_work(unsigned
  	unsigned long flags;
  	bool enabled;
  
@@ -37,10 +37,10 @@
  	return enabled;
  }
  
-@@ -232,8 +232,8 @@ static int cpu_stop_queue_two_works(int
+@@ -231,8 +231,8 @@ static int cpu_stop_queue_two_works(int
+ 	struct cpu_stopper *stopper2 = per_cpu_ptr(&cpu_stopper, cpu2);
  	int err;
- 
- 	lg_double_lock(&stop_cpus_lock, cpu1, cpu2);
+ retry:
 -	spin_lock_irq(&stopper1->lock);
 -	spin_lock_nested(&stopper2->lock, SINGLE_DEPTH_NESTING);
 +	raw_spin_lock_irq(&stopper1->lock);
@@ -48,7 +48,7 @@
  
  	err = -ENOENT;
  	if (!stopper1->enabled || !stopper2->enabled)
-@@ -243,8 +243,8 @@ static int cpu_stop_queue_two_works(int
+@@ -255,8 +255,8 @@ static int cpu_stop_queue_two_works(int
  	__cpu_stop_queue_work(stopper1, work1);
  	__cpu_stop_queue_work(stopper2, work2);
  unlock:
@@ -56,10 +56,10 @@
 -	spin_unlock_irq(&stopper1->lock);
 +	raw_spin_unlock(&stopper2->lock);
 +	raw_spin_unlock_irq(&stopper1->lock);
- 	lg_double_unlock(&stop_cpus_lock, cpu1, cpu2);
  
- 	return err;
-@@ -433,9 +433,9 @@ static int cpu_stop_should_run(unsigned
+ 	if (unlikely(err == -EDEADLK)) {
+ 		while (stop_cpus_in_progress)
+@@ -448,9 +448,9 @@ static int cpu_stop_should_run(unsigned
  	unsigned long flags;
  	int run;
  
@@ -71,7 +71,7 @@
  	return run;
  }
  
-@@ -446,13 +446,13 @@ static void cpu_stopper_thread(unsigned
+@@ -461,13 +461,13 @@ static void cpu_stopper_thread(unsigned
  
  repeat:
  	work = NULL;
@@ -87,7 +87,24 @@
  
  	if (work) {
  		cpu_stop_fn_t fn = work->fn;
-@@ -536,7 +536,7 @@ static int __init cpu_stop_init(void)
+@@ -475,15 +475,7 @@ static void cpu_stopper_thread(unsigned
+ 		struct cpu_stop_done *done = work->done;
+ 		int ret;
+ 
+-		/*
+-		 * Wait until the stopper finished scheduling on all
+-		 * cpus
+-		 */
+-		lg_global_lock(&stop_cpus_lock);
+-		/*
+-		 * Let other cpu threads continue as well
+-		 */
+-		lg_global_unlock(&stop_cpus_lock);
++		/* XXX */
+ 
+ 		/* cpu stop callbacks must not sleep, make in_atomic() == T */
+ 		preempt_count_inc();
+@@ -551,7 +543,7 @@ static int __init cpu_stop_init(void)
  	for_each_possible_cpu(cpu) {
  		struct cpu_stopper *stopper = &per_cpu(cpu_stopper, cpu);
  
diff --git a/patches/stop_machine-convert-stop_machine_run-to-PREEMPT_RT.patch b/patches/stop_machine-convert-stop_machine_run-to-PREEMPT_RT.patch
index 83e60c5..3f01729 100644
--- a/patches/stop_machine-convert-stop_machine_run-to-PREEMPT_RT.patch
+++ b/patches/stop_machine-convert-stop_machine_run-to-PREEMPT_RT.patch
@@ -15,7 +15,7 @@
 
 --- a/kernel/stop_machine.c
 +++ b/kernel/stop_machine.c
-@@ -460,6 +460,16 @@ static void cpu_stopper_thread(unsigned
+@@ -475,6 +475,16 @@ static void cpu_stopper_thread(unsigned
  		struct cpu_stop_done *done = work->done;
  		int ret;
  
diff --git a/patches/suspend-prevernt-might-sleep-splats.patch b/patches/suspend-prevernt-might-sleep-splats.patch
index 512b07a..452a52a 100644
--- a/patches/suspend-prevernt-might-sleep-splats.patch
+++ b/patches/suspend-prevernt-might-sleep-splats.patch
@@ -25,7 +25,7 @@
 
 --- a/include/linux/kernel.h
 +++ b/include/linux/kernel.h
-@@ -491,6 +491,7 @@ extern enum system_states {
+@@ -488,6 +488,7 @@ extern enum system_states {
  	SYSTEM_HALT,
  	SYSTEM_POWER_OFF,
  	SYSTEM_RESTART,
@@ -44,7 +44,7 @@
  	error = syscore_suspend();
  	if (error) {
  		printk(KERN_ERR "PM: Some system devices failed to power down, "
-@@ -315,6 +317,7 @@ static int create_image(int platform_mod
+@@ -317,6 +319,7 @@ static int create_image(int platform_mod
  	syscore_resume();
  
   Enable_irqs:
@@ -52,7 +52,7 @@
  	local_irq_enable();
  
   Enable_cpus:
-@@ -444,6 +447,7 @@ static int resume_target_kernel(bool pla
+@@ -446,6 +449,7 @@ static int resume_target_kernel(bool pla
  		goto Enable_cpus;
  
  	local_irq_disable();
@@ -60,7 +60,7 @@
  
  	error = syscore_suspend();
  	if (error)
-@@ -477,6 +481,7 @@ static int resume_target_kernel(bool pla
+@@ -479,6 +483,7 @@ static int resume_target_kernel(bool pla
  	syscore_resume();
  
   Enable_irqs:
@@ -68,7 +68,7 @@
  	local_irq_enable();
  
   Enable_cpus:
-@@ -562,6 +567,7 @@ int hibernation_platform_enter(void)
+@@ -564,6 +569,7 @@ int hibernation_platform_enter(void)
  		goto Enable_cpus;
  
  	local_irq_disable();
@@ -76,7 +76,7 @@
  	syscore_suspend();
  	if (pm_wakeup_pending()) {
  		error = -EAGAIN;
-@@ -574,6 +580,7 @@ int hibernation_platform_enter(void)
+@@ -576,6 +582,7 @@ int hibernation_platform_enter(void)
  
   Power_up:
  	syscore_resume();
@@ -86,7 +86,7 @@
   Enable_cpus:
 --- a/kernel/power/suspend.c
 +++ b/kernel/power/suspend.c
-@@ -361,6 +361,8 @@ static int suspend_enter(suspend_state_t
+@@ -369,6 +369,8 @@ static int suspend_enter(suspend_state_t
  	arch_suspend_disable_irqs();
  	BUG_ON(!irqs_disabled());
  
@@ -95,7 +95,7 @@
  	error = syscore_suspend();
  	if (!error) {
  		*wakeup = pm_wakeup_pending();
-@@ -377,6 +379,8 @@ static int suspend_enter(suspend_state_t
+@@ -385,6 +387,8 @@ static int suspend_enter(suspend_state_t
  		syscore_resume();
  	}
  
diff --git a/patches/tasklet-rt-prevent-tasklets-from-going-into-infinite-spin-in-rt.patch b/patches/tasklet-rt-prevent-tasklets-from-going-into-infinite-spin-in-rt.patch
index 803a0bd..808d6cd 100644
--- a/patches/tasklet-rt-prevent-tasklets-from-going-into-infinite-spin-in-rt.patch
+++ b/patches/tasklet-rt-prevent-tasklets-from-going-into-infinite-spin-in-rt.patch
@@ -43,7 +43,7 @@
 
 --- a/include/linux/interrupt.h
 +++ b/include/linux/interrupt.h
-@@ -500,8 +500,9 @@ static inline struct task_struct *this_c
+@@ -508,8 +508,9 @@ static inline struct task_struct *this_c
       to be executed on some cpu at least once after this.
     * If the tasklet is already scheduled, but its execution is still not
       started, it will be executed only once.
@@ -55,7 +55,7 @@
     * Tasklet is strictly serialized wrt itself, but not
       wrt another tasklets. If client needs some intertask synchronization,
       he makes it with spinlocks.
-@@ -526,27 +527,36 @@ struct tasklet_struct name = { NULL, 0,
+@@ -534,27 +535,36 @@ struct tasklet_struct name = { NULL, 0,
  enum
  {
  	TASKLET_STATE_SCHED,	/* Tasklet is scheduled for execution */
@@ -98,7 +98,7 @@
  #define tasklet_unlock_wait(t) do { } while (0)
  #define tasklet_unlock(t) do { } while (0)
  #endif
-@@ -595,12 +605,7 @@ static inline void tasklet_disable(struc
+@@ -603,12 +613,7 @@ static inline void tasklet_disable(struc
  	smp_mb();
  }
  
@@ -122,7 +122,7 @@
  #include <linux/ftrace.h>
  #include <linux/smp.h>
  #include <linux/smpboot.h>
-@@ -446,15 +447,45 @@ struct tasklet_head {
+@@ -460,15 +461,45 @@ struct tasklet_head {
  static DEFINE_PER_CPU(struct tasklet_head, tasklet_vec);
  static DEFINE_PER_CPU(struct tasklet_head, tasklet_hi_vec);
  
@@ -172,7 +172,7 @@
  	local_irq_restore(flags);
  }
  EXPORT_SYMBOL(__tasklet_schedule);
-@@ -464,10 +495,7 @@ void __tasklet_hi_schedule(struct taskle
+@@ -478,10 +509,7 @@ void __tasklet_hi_schedule(struct taskle
  	unsigned long flags;
  
  	local_irq_save(flags);
@@ -184,7 +184,7 @@
  	local_irq_restore(flags);
  }
  EXPORT_SYMBOL(__tasklet_hi_schedule);
-@@ -476,82 +504,122 @@ void __tasklet_hi_schedule_first(struct
+@@ -490,82 +518,122 @@ void __tasklet_hi_schedule_first(struct
  {
  	BUG_ON(!irqs_disabled());
  
@@ -195,7 +195,7 @@
  }
  EXPORT_SYMBOL(__tasklet_hi_schedule_first);
  
--static void tasklet_action(struct softirq_action *a)
+-static __latent_entropy void tasklet_action(struct softirq_action *a)
 +void  tasklet_enable(struct tasklet_struct *t)
  {
 -	struct tasklet_struct *list;
@@ -316,7 +316,7 @@
 +	__tasklet_action(a, list);
 +}
 +
- static void tasklet_hi_action(struct softirq_action *a)
+ static __latent_entropy void tasklet_hi_action(struct softirq_action *a)
  {
  	struct tasklet_struct *list;
  
@@ -356,7 +356,7 @@
  }
  
  void tasklet_init(struct tasklet_struct *t,
-@@ -572,7 +640,7 @@ void tasklet_kill(struct tasklet_struct
+@@ -586,7 +654,7 @@ void tasklet_kill(struct tasklet_struct
  
  	while (test_and_set_bit(TASKLET_STATE_SCHED, &t->state)) {
  		do {
@@ -365,7 +365,7 @@
  		} while (test_bit(TASKLET_STATE_SCHED, &t->state));
  	}
  	tasklet_unlock_wait(t);
-@@ -646,6 +714,23 @@ void __init softirq_init(void)
+@@ -660,6 +728,23 @@ void __init softirq_init(void)
  	open_softirq(HI_SOFTIRQ, tasklet_hi_action);
  }
  
diff --git a/patches/thermal-Defer-thermal-wakups-to-threads.patch b/patches/thermal-Defer-thermal-wakups-to-threads.patch
index fe12a36..8cc51be 100644
--- a/patches/thermal-Defer-thermal-wakups-to-threads.patch
+++ b/patches/thermal-Defer-thermal-wakups-to-threads.patch
@@ -36,7 +36,7 @@
  #include <asm/cpu_device_id.h>
  #include <asm/mce.h>
  
-@@ -352,7 +353,7 @@ static void pkg_temp_thermal_threshold_w
+@@ -353,7 +354,7 @@ static void pkg_temp_thermal_threshold_w
  	}
  }
  
@@ -45,7 +45,7 @@
  {
  	unsigned long flags;
  	int cpu = smp_processor_id();
-@@ -369,7 +370,7 @@ static int pkg_temp_thermal_platform_the
+@@ -370,7 +371,7 @@ static int pkg_temp_thermal_platform_the
  			pkg_work_scheduled[phy_id]) {
  		disable_pkg_thres_interrupt();
  		spin_unlock_irqrestore(&pkg_work_lock, flags);
@@ -54,7 +54,7 @@
  	}
  	pkg_work_scheduled[phy_id] = 1;
  	spin_unlock_irqrestore(&pkg_work_lock, flags);
-@@ -378,9 +379,48 @@ static int pkg_temp_thermal_platform_the
+@@ -379,9 +380,48 @@ static int pkg_temp_thermal_platform_the
  	schedule_delayed_work_on(cpu,
  				&per_cpu(pkg_temp_thermal_threshold_work, cpu),
  				msecs_to_jiffies(notify_delay_ms));
@@ -103,7 +103,7 @@
  static int find_siblings_cpu(int cpu)
  {
  	int i;
-@@ -584,6 +624,9 @@ static int __init pkg_temp_thermal_init(
+@@ -585,6 +625,9 @@ static int __init pkg_temp_thermal_init(
  	if (!x86_match_cpu(pkg_temp_thermal_ids))
  		return -ENODEV;
  
@@ -113,7 +113,7 @@
  	spin_lock_init(&pkg_work_lock);
  	platform_thermal_package_notify =
  			pkg_temp_thermal_platform_thermal_notify;
-@@ -608,7 +651,7 @@ static int __init pkg_temp_thermal_init(
+@@ -609,7 +652,7 @@ static int __init pkg_temp_thermal_init(
  	kfree(pkg_work_scheduled);
  	platform_thermal_package_notify = NULL;
  	platform_thermal_package_rate_control = NULL;
@@ -122,7 +122,7 @@
  	return -ENODEV;
  }
  
-@@ -633,6 +676,7 @@ static void __exit pkg_temp_thermal_exit
+@@ -634,6 +677,7 @@ static void __exit pkg_temp_thermal_exit
  	mutex_unlock(&phy_dev_list_mutex);
  	platform_thermal_package_notify = NULL;
  	platform_thermal_package_rate_control = NULL;
diff --git a/patches/timekeeping-split-jiffies-lock.patch b/patches/timekeeping-split-jiffies-lock.patch
index d7dbc04..7c1f4f4 100644
--- a/patches/timekeeping-split-jiffies-lock.patch
+++ b/patches/timekeeping-split-jiffies-lock.patch
@@ -114,7 +114,7 @@
  	return period;
  }
  
-@@ -670,10 +675,10 @@ static ktime_t tick_nohz_stop_sched_tick
+@@ -673,10 +678,10 @@ static ktime_t tick_nohz_stop_sched_tick
  
  	/* Read jiffies and the time when jiffies were updated last */
  	do {
diff --git a/patches/tracing-account-for-preempt-off-in-preempt_schedule.patch b/patches/tracing-account-for-preempt-off-in-preempt_schedule.patch
index 4ff67b8..9cc6599 100644
--- a/patches/tracing-account-for-preempt-off-in-preempt_schedule.patch
+++ b/patches/tracing-account-for-preempt-off-in-preempt_schedule.patch
@@ -27,7 +27,7 @@
 
 --- a/kernel/sched/core.c
 +++ b/kernel/sched/core.c
-@@ -3553,7 +3553,16 @@ asmlinkage __visible void __sched notrac
+@@ -3583,7 +3583,16 @@ asmlinkage __visible void __sched notrac
  		 * an infinite recursion.
  		 */
  		prev_ctx = exception_enter();
diff --git a/patches/tty-serial-8250-don-t-take-the-trylock-during-oops.patch b/patches/tty-serial-8250-don-t-take-the-trylock-during-oops.patch
index 8db7ac2..977fcb3 100644
--- a/patches/tty-serial-8250-don-t-take-the-trylock-during-oops.patch
+++ b/patches/tty-serial-8250-don-t-take-the-trylock-during-oops.patch
@@ -14,7 +14,7 @@
 
 --- a/drivers/tty/serial/8250/8250_port.c
 +++ b/drivers/tty/serial/8250/8250_port.c
-@@ -3109,10 +3109,8 @@ void serial8250_console_write(struct uar
+@@ -3144,10 +3144,8 @@ void serial8250_console_write(struct uar
  
  	serial8250_rpm_get(up);
  
diff --git a/patches/upstream-net-rt-remove-preemption-disabling-in-netif_rx.patch b/patches/upstream-net-rt-remove-preemption-disabling-in-netif_rx.patch
index ae3b301..66284f8 100644
--- a/patches/upstream-net-rt-remove-preemption-disabling-in-netif_rx.patch
+++ b/patches/upstream-net-rt-remove-preemption-disabling-in-netif_rx.patch
@@ -37,7 +37,7 @@
 
 --- a/net/core/dev.c
 +++ b/net/core/dev.c
-@@ -3804,7 +3804,7 @@ static int netif_rx_internal(struct sk_b
+@@ -3789,7 +3789,7 @@ static int netif_rx_internal(struct sk_b
  		struct rps_dev_flow voidflow, *rflow = &voidflow;
  		int cpu;
  
@@ -46,7 +46,7 @@
  		rcu_read_lock();
  
  		cpu = get_rps_cpu(skb->dev, skb, &rflow);
-@@ -3814,13 +3814,13 @@ static int netif_rx_internal(struct sk_b
+@@ -3799,13 +3799,13 @@ static int netif_rx_internal(struct sk_b
  		ret = enqueue_to_backlog(skb, cpu, &rflow->last_qtail);
  
  		rcu_read_unlock();
diff --git a/patches/usb-use-_nort-in-giveback.patch b/patches/usb-use-_nort-in-giveback.patch
index 6469b6d..88e1dfe 100644
--- a/patches/usb-use-_nort-in-giveback.patch
+++ b/patches/usb-use-_nort-in-giveback.patch
@@ -43,7 +43,7 @@
 
 --- a/drivers/usb/core/hcd.c
 +++ b/drivers/usb/core/hcd.c
-@@ -1760,9 +1760,9 @@ static void __usb_hcd_giveback_urb(struc
+@@ -1761,9 +1761,9 @@ static void __usb_hcd_giveback_urb(struc
  	 * and no one may trigger the above deadlock situation when
  	 * running complete() in tasklet.
  	 */
diff --git a/patches/workqueue-distangle-from-rq-lock.patch b/patches/workqueue-distangle-from-rq-lock.patch
index 071360f..ad9e7ee 100644
--- a/patches/workqueue-distangle-from-rq-lock.patch
+++ b/patches/workqueue-distangle-from-rq-lock.patch
@@ -31,7 +31,7 @@
 
 --- a/kernel/sched/core.c
 +++ b/kernel/sched/core.c
-@@ -1701,10 +1701,6 @@ static inline void ttwu_activate(struct
+@@ -1711,10 +1711,6 @@ static inline void ttwu_activate(struct
  {
  	activate_task(rq, p, en_flags);
  	p->on_rq = TASK_ON_RQ_QUEUED;
@@ -42,12 +42,13 @@
  }
  
  /*
-@@ -2143,53 +2139,6 @@ try_to_wake_up(struct task_struct *p, un
+@@ -2152,53 +2148,6 @@ try_to_wake_up(struct task_struct *p, un
  }
  
  /**
 - * try_to_wake_up_local - try to wake up a local task with rq lock held
 - * @p: the thread to be awakened
+- * @cookie: context's cookie for pinning
 - *
 - * Put @p on the run-queue if it's not already there. The caller must
 - * ensure that this_rq() is locked, @p is bound to this_rq() and not
@@ -86,8 +87,7 @@
 -		ttwu_activate(rq, p, ENQUEUE_WAKEUP);
 -
 -	ttwu_do_wakeup(rq, p, 0, cookie);
--	if (schedstat_enabled())
--		ttwu_stat(p, smp_processor_id(), 0);
+-	ttwu_stat(p, smp_processor_id(), 0);
 -out:
 -	raw_spin_unlock(&p->pi_lock);
 -}
@@ -96,7 +96,7 @@
   * wake_up_process - Wake up a specific process
   * @p: The process to be woken up.
   *
-@@ -3499,21 +3448,6 @@ static void __sched notrace __schedule(b
+@@ -3494,21 +3443,6 @@ static void __sched notrace __schedule(b
  		} else {
  			deactivate_task(rq, prev, DEQUEUE_SLEEP);
  			prev->on_rq = 0;
@@ -118,7 +118,7 @@
  		}
  		switch_count = &prev->nvcsw;
  	}
-@@ -3546,6 +3480,14 @@ static inline void sched_submit_work(str
+@@ -3567,6 +3501,14 @@ static inline void sched_submit_work(str
  {
  	if (!tsk->state || tsk_is_pi_blocked(tsk))
  		return;
@@ -133,7 +133,7 @@
  	/*
  	 * If we are going to sleep and we have plugged IO queued,
  	 * make sure to submit it to avoid deadlocks.
-@@ -3554,6 +3496,12 @@ static inline void sched_submit_work(str
+@@ -3575,6 +3517,12 @@ static inline void sched_submit_work(str
  		blk_schedule_flush_plug(tsk);
  }
  
@@ -146,7 +146,7 @@
  asmlinkage __visible void __sched schedule(void)
  {
  	struct task_struct *tsk = current;
-@@ -3564,6 +3512,7 @@ asmlinkage __visible void __sched schedu
+@@ -3585,6 +3533,7 @@ asmlinkage __visible void __sched schedu
  		__schedule(false);
  		sched_preempt_enable_no_resched();
  	} while (need_resched());
diff --git a/patches/workqueue-prevent-deadlock-stall.patch b/patches/workqueue-prevent-deadlock-stall.patch
index 914b003..a8e412e 100644
--- a/patches/workqueue-prevent-deadlock-stall.patch
+++ b/patches/workqueue-prevent-deadlock-stall.patch
@@ -43,7 +43,7 @@
 
 --- a/kernel/sched/core.c
 +++ b/kernel/sched/core.c
-@@ -3594,9 +3594,8 @@ STACK_FRAME_NON_STANDARD(__schedule); /*
+@@ -3615,9 +3615,8 @@ void __noreturn do_task_dead(void)
  
  static inline void sched_submit_work(struct task_struct *tsk)
  {
@@ -54,7 +54,7 @@
  	/*
  	 * If a worker went to sleep, notify and ask workqueue whether
  	 * it wants to wake up a task to maintain concurrency.
-@@ -3604,6 +3603,10 @@ static inline void sched_submit_work(str
+@@ -3625,6 +3624,10 @@ static inline void sched_submit_work(str
  	if (tsk->flags & PF_WQ_WORKER)
  		wq_worker_sleeping(tsk);
  
diff --git a/patches/workqueue-use-locallock.patch b/patches/workqueue-use-locallock.patch
index e03df16..ffc72b9 100644
--- a/patches/workqueue-use-locallock.patch
+++ b/patches/workqueue-use-locallock.patch
@@ -7,8 +7,8 @@
 
 Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
 ---
- kernel/workqueue.c |   31 +++++++++++++++++--------------
- 1 file changed, 17 insertions(+), 14 deletions(-)
+ kernel/workqueue.c |   33 +++++++++++++++++++--------------
+ 1 file changed, 19 insertions(+), 14 deletions(-)
 
 --- a/kernel/workqueue.c
 +++ b/kernel/workqueue.c
@@ -29,19 +29,21 @@
  static int worker_thread(void *__worker);
  static void workqueue_sysfs_unregister(struct workqueue_struct *wq);
  
-@@ -1101,9 +1104,9 @@ static void put_pwq_unlocked(struct pool
+@@ -1101,9 +1104,11 @@ static void put_pwq_unlocked(struct pool
  		 * As both pwqs and pools are RCU protected, the
  		 * following lock operations are safe.
  		 */
 -		spin_lock_irq(&pwq->pool->lock);
++		rcu_read_lock();
 +		local_spin_lock_irq(pendingb_lock, &pwq->pool->lock);
  		put_pwq(pwq);
 -		spin_unlock_irq(&pwq->pool->lock);
 +		local_spin_unlock_irq(pendingb_lock, &pwq->pool->lock);
++		rcu_read_unlock();
  	}
  }
  
-@@ -1207,7 +1210,7 @@ static int try_to_grab_pending(struct wo
+@@ -1207,7 +1212,7 @@ static int try_to_grab_pending(struct wo
  	struct worker_pool *pool;
  	struct pool_workqueue *pwq;
  
@@ -50,7 +52,7 @@
  
  	/* try to steal the timer if it exists */
  	if (is_dwork) {
-@@ -1271,7 +1274,7 @@ static int try_to_grab_pending(struct wo
+@@ -1271,7 +1276,7 @@ static int try_to_grab_pending(struct wo
  	spin_unlock(&pool->lock);
  fail:
  	rcu_read_unlock();
@@ -59,7 +61,7 @@
  	if (work_is_canceling(work))
  		return -ENOENT;
  	cpu_relax();
-@@ -1376,7 +1379,7 @@ static void __queue_work(int cpu, struct
+@@ -1376,7 +1381,7 @@ static void __queue_work(int cpu, struct
  	 * queued or lose PENDING.  Grabbing PENDING and queueing should
  	 * happen with IRQ disabled.
  	 */
@@ -68,7 +70,7 @@
  
  	debug_work_activate(work);
  
-@@ -1482,14 +1485,14 @@ bool queue_work_on(int cpu, struct workq
+@@ -1482,14 +1487,14 @@ bool queue_work_on(int cpu, struct workq
  	bool ret = false;
  	unsigned long flags;
  
@@ -85,7 +87,7 @@
  	return ret;
  }
  EXPORT_SYMBOL(queue_work_on);
-@@ -1556,14 +1559,14 @@ bool queue_delayed_work_on(int cpu, stru
+@@ -1556,14 +1561,14 @@ bool queue_delayed_work_on(int cpu, stru
  	unsigned long flags;
  
  	/* read the comment in __queue_work() */
@@ -102,7 +104,7 @@
  	return ret;
  }
  EXPORT_SYMBOL(queue_delayed_work_on);
-@@ -1598,7 +1601,7 @@ bool mod_delayed_work_on(int cpu, struct
+@@ -1598,7 +1603,7 @@ bool mod_delayed_work_on(int cpu, struct
  
  	if (likely(ret >= 0)) {
  		__queue_delayed_work(cpu, wq, dwork, delay);
@@ -111,7 +113,7 @@
  	}
  
  	/* -ENOENT from try_to_grab_pending() becomes %true */
-@@ -2916,7 +2919,7 @@ static bool __cancel_work_timer(struct w
+@@ -2916,7 +2921,7 @@ static bool __cancel_work_timer(struct w
  
  	/* tell other tasks trying to grab @work to back off */
  	mark_work_canceling(work);
@@ -120,7 +122,7 @@
  
  	flush_work(work);
  	clear_work_data(work);
-@@ -2971,10 +2974,10 @@ EXPORT_SYMBOL_GPL(cancel_work_sync);
+@@ -2971,10 +2976,10 @@ EXPORT_SYMBOL_GPL(cancel_work_sync);
   */
  bool flush_delayed_work(struct delayed_work *dwork)
  {
@@ -133,12 +135,12 @@
  	return flush_work(&dwork->work);
  }
  EXPORT_SYMBOL(flush_delayed_work);
-@@ -3009,7 +3012,7 @@ bool cancel_delayed_work(struct delayed_
+@@ -2992,7 +2997,7 @@ static bool __cancel_work(struct work_st
+ 		return false;
  
- 	set_work_pool_and_clear_pending(&dwork->work,
- 					get_work_pool_id(&dwork->work));
+ 	set_work_pool_and_clear_pending(work, get_work_pool_id(work));
 -	local_irq_restore(flags);
 +	local_unlock_irqrestore(pendingb_lock, flags);
  	return ret;
  }
- EXPORT_SYMBOL(cancel_delayed_work);
+ 
diff --git a/patches/workqueue-use-rcu.patch b/patches/workqueue-use-rcu.patch
index 43499ef..5bd35b6 100644
--- a/patches/workqueue-use-rcu.patch
+++ b/patches/workqueue-use-rcu.patch
@@ -207,7 +207,7 @@
  	return false;
  }
  
-@@ -3233,7 +3238,7 @@ static void rcu_free_pool(struct rcu_hea
+@@ -3245,7 +3250,7 @@ static void rcu_free_pool(struct rcu_hea
   * put_unbound_pool - put a worker_pool
   * @pool: worker_pool to put
   *
@@ -216,7 +216,7 @@
   * safe manner.  get_unbound_pool() calls this function on its failure path
   * and this function should be able to release pools which went through,
   * successfully or not, init_worker_pool().
-@@ -3287,8 +3292,8 @@ static void put_unbound_pool(struct work
+@@ -3299,8 +3304,8 @@ static void put_unbound_pool(struct work
  	del_timer_sync(&pool->idle_timer);
  	del_timer_sync(&pool->mayday_timer);
  
@@ -227,7 +227,7 @@
  }
  
  /**
-@@ -3395,14 +3400,14 @@ static void pwq_unbound_release_workfn(s
+@@ -3407,14 +3412,14 @@ static void pwq_unbound_release_workfn(s
  	put_unbound_pool(pool);
  	mutex_unlock(&wq_pool_mutex);
  
@@ -244,7 +244,7 @@
  }
  
  /**
-@@ -4052,7 +4057,7 @@ void destroy_workqueue(struct workqueue_
+@@ -4064,7 +4069,7 @@ void destroy_workqueue(struct workqueue_
  		 * The base ref is never dropped on per-cpu pwqs.  Directly
  		 * schedule RCU free.
  		 */
@@ -253,7 +253,7 @@
  	} else {
  		/*
  		 * We're the sole accessor of @wq at this point.  Directly
-@@ -4145,7 +4150,8 @@ bool workqueue_congested(int cpu, struct
+@@ -4157,7 +4162,8 @@ bool workqueue_congested(int cpu, struct
  	struct pool_workqueue *pwq;
  	bool ret;
  
@@ -263,7 +263,7 @@
  
  	if (cpu == WORK_CPU_UNBOUND)
  		cpu = smp_processor_id();
-@@ -4156,7 +4162,8 @@ bool workqueue_congested(int cpu, struct
+@@ -4168,7 +4174,8 @@ bool workqueue_congested(int cpu, struct
  		pwq = unbound_pwq_by_node(wq, cpu_to_node(cpu));
  
  	ret = !list_empty(&pwq->delayed_works);
@@ -273,7 +273,7 @@
  
  	return ret;
  }
-@@ -4182,15 +4189,15 @@ unsigned int work_busy(struct work_struc
+@@ -4194,15 +4201,15 @@ unsigned int work_busy(struct work_struc
  	if (work_pending(work))
  		ret |= WORK_BUSY_PENDING;
  
@@ -293,7 +293,7 @@
  
  	return ret;
  }
-@@ -4379,7 +4386,7 @@ void show_workqueue_state(void)
+@@ -4391,7 +4398,7 @@ void show_workqueue_state(void)
  	unsigned long flags;
  	int pi;
  
@@ -302,7 +302,7 @@
  
  	pr_info("Showing busy workqueues and worker pools:\n");
  
-@@ -4432,7 +4439,7 @@ void show_workqueue_state(void)
+@@ -4444,7 +4451,7 @@ void show_workqueue_state(void)
  		spin_unlock_irqrestore(&pool->lock, flags);
  	}
  
@@ -311,7 +311,7 @@
  }
  
  /*
-@@ -4770,16 +4777,16 @@ bool freeze_workqueues_busy(void)
+@@ -4782,16 +4789,16 @@ bool freeze_workqueues_busy(void)
  		 * nr_active is monotonically decreasing.  It's safe
  		 * to peek without lock.
  		 */
@@ -331,7 +331,7 @@
  	}
  out_unlock:
  	mutex_unlock(&wq_pool_mutex);
-@@ -4969,7 +4976,8 @@ static ssize_t wq_pool_ids_show(struct d
+@@ -4981,7 +4988,8 @@ static ssize_t wq_pool_ids_show(struct d
  	const char *delim = "";
  	int node, written = 0;
  
@@ -341,7 +341,7 @@
  	for_each_node(node) {
  		written += scnprintf(buf + written, PAGE_SIZE - written,
  				     "%s%d:%d", delim, node,
-@@ -4977,7 +4985,8 @@ static ssize_t wq_pool_ids_show(struct d
+@@ -4989,7 +4997,8 @@ static ssize_t wq_pool_ids_show(struct d
  		delim = " ";
  	}
  	written += scnprintf(buf + written, PAGE_SIZE - written, "\n");
diff --git a/patches/workqueue-use-rcu_readlock-in-put_pwq_unlocked.patch b/patches/workqueue-use-rcu_readlock-in-put_pwq_unlocked.patch
deleted file mode 100644
index 1c0ddee..0000000
--- a/patches/workqueue-use-rcu_readlock-in-put_pwq_unlocked.patch
+++ /dev/null
@@ -1,49 +0,0 @@
-From: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
-Date: Mon, 12 Dec 2016 16:14:18 +0100
-Subject: [PATCH] workqueue: use rcu_readlock() in put_pwq_unlocked()
-
-The RCU sched protection was changed to RCU only and so all IRQ-off and
-preempt-off disabled region were changed to the relevant rcu-read-lock
-primitives. One was missed and triggered:
-|[ BUG: bad unlock balance detected! ]
-|4.4.30-rt41 #51 Tainted: G        W
-|btattach/345 is trying to release lock (
-|Unable to handle kernel paging request at virtual address 6b6b6bbb
-|Backtrace:
-|[<c016b5a0>] (lock_release) from [<c0804844>] (rt_spin_unlock+0x20/0x30)
-|[<c0804824>] (rt_spin_unlock) from [<c0138954>] (put_pwq_unlocked+0xa4/0x118)
-|[<c01388b0>] (put_pwq_unlocked) from [<c0138b2c>] (destroy_workqueue+0x164/0x1b0)
-|[<c01389c8>] (destroy_workqueue) from [<c078e1ac>] (hci_unregister_dev+0x120/0x21c)
-|[<c078e08c>] (hci_unregister_dev) from [<c054f658>] (hci_uart_tty_close+0x90/0xbc)
-|[<c054f5c8>] (hci_uart_tty_close) from [<c03a2be8>] (tty_ldisc_close+0x50/0x58)
-|[<c03a2b98>] (tty_ldisc_close) from [<c03a2cb4>] (tty_ldisc_kill+0x18/0x78)
-|[<c03a2c9c>] (tty_ldisc_kill) from [<c03a3528>] (tty_ldisc_release+0x100/0x134)
-|[<c03a3428>] (tty_ldisc_release) from [<c039cd68>] (tty_release+0x3bc/0x460)
-|[<c039c9ac>] (tty_release) from [<c020cc08>] (__fput+0xe0/0x1b4)
-|[<c020cb28>] (__fput) from [<c020cd3c>] (____fput+0x10/0x14)
-|[<c020cd2c>] (____fput) from [<c013e0d4>] (task_work_run+0xa4/0xb8)
-|[<c013e030>] (task_work_run) from [<c0121754>] (do_exit+0x40c/0x8b0)
-|[<c0121348>] (do_exit) from [<c0122ff8>] (do_group_exit+0x54/0xc4)
-
-Cc: stable-rt@vger.kernel.org
-Reported-by: John Keeping <john@metanate.com>
-Tested-by: John Keeping <john@metanate.com>
-Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
----
- kernel/workqueue.c |    2 ++
- 1 file changed, 2 insertions(+)
-
---- a/kernel/workqueue.c
-+++ b/kernel/workqueue.c
-@@ -1104,9 +1104,11 @@ static void put_pwq_unlocked(struct pool
- 		 * As both pwqs and pools are RCU protected, the
- 		 * following lock operations are safe.
- 		 */
-+		rcu_read_lock();
- 		local_spin_lock_irq(pendingb_lock, &pwq->pool->lock);
- 		put_pwq(pwq);
- 		local_spin_unlock_irq(pendingb_lock, &pwq->pool->lock);
-+		rcu_read_unlock();
- 	}
- }
- 
diff --git a/patches/x86-UV-raw_spinlock-conversion.patch b/patches/x86-UV-raw_spinlock-conversion.patch
index d25fdfc..60e206e 100644
--- a/patches/x86-UV-raw_spinlock-conversion.patch
+++ b/patches/x86-UV-raw_spinlock-conversion.patch
@@ -15,7 +15,7 @@
 
 --- a/arch/x86/include/asm/uv/uv_bau.h
 +++ b/arch/x86/include/asm/uv/uv_bau.h
-@@ -615,9 +615,9 @@ struct bau_control {
+@@ -624,9 +624,9 @@ struct bau_control {
  	cycles_t		send_message;
  	cycles_t		period_end;
  	cycles_t		period_time;
@@ -28,7 +28,7 @@
  	/* tunables */
  	int			max_concurr;
  	int			max_concurr_const;
-@@ -776,15 +776,15 @@ static inline int atom_asr(short i, stru
+@@ -815,15 +815,15 @@ static inline int atom_asr(short i, stru
   * to be lowered below the current 'v'.  atomic_add_unless can only stop
   * on equal.
   */
@@ -50,7 +50,7 @@
  
 --- a/arch/x86/platform/uv/tlb_uv.c
 +++ b/arch/x86/platform/uv/tlb_uv.c
-@@ -729,9 +729,9 @@ static void destination_plugged(struct b
+@@ -748,9 +748,9 @@ static void destination_plugged(struct b
  
  		quiesce_local_uvhub(hmaster);
  
@@ -62,7 +62,7 @@
  
  		end_uvhub_quiesce(hmaster);
  
-@@ -751,9 +751,9 @@ static void destination_timeout(struct b
+@@ -770,9 +770,9 @@ static void destination_timeout(struct b
  
  		quiesce_local_uvhub(hmaster);
  
@@ -74,7 +74,7 @@
  
  		end_uvhub_quiesce(hmaster);
  
-@@ -774,7 +774,7 @@ static void disable_for_period(struct ba
+@@ -793,7 +793,7 @@ static void disable_for_period(struct ba
  	cycles_t tm1;
  
  	hmaster = bcp->uvhub_master;
@@ -83,7 +83,7 @@
  	if (!bcp->baudisabled) {
  		stat->s_bau_disabled++;
  		tm1 = get_cycles();
-@@ -787,7 +787,7 @@ static void disable_for_period(struct ba
+@@ -806,7 +806,7 @@ static void disable_for_period(struct ba
  			}
  		}
  	}
@@ -92,7 +92,7 @@
  }
  
  static void count_max_concurr(int stat, struct bau_control *bcp,
-@@ -850,7 +850,7 @@ static void record_send_stats(cycles_t t
+@@ -869,7 +869,7 @@ static void record_send_stats(cycles_t t
   */
  static void uv1_throttle(struct bau_control *hmaster, struct ptc_stats *stat)
  {
@@ -101,7 +101,7 @@
  	atomic_t *v;
  
  	v = &hmaster->active_descriptor_count;
-@@ -983,7 +983,7 @@ static int check_enable(struct bau_contr
+@@ -1002,7 +1002,7 @@ static int check_enable(struct bau_contr
  	struct bau_control *hmaster;
  
  	hmaster = bcp->uvhub_master;
@@ -110,7 +110,7 @@
  	if (bcp->baudisabled && (get_cycles() >= bcp->set_bau_on_time)) {
  		stat->s_bau_reenabled++;
  		for_each_present_cpu(tcpu) {
-@@ -995,10 +995,10 @@ static int check_enable(struct bau_contr
+@@ -1014,10 +1014,10 @@ static int check_enable(struct bau_contr
  				tbcp->period_giveups = 0;
  			}
  		}
@@ -123,10 +123,10 @@
  	return -1;
  }
  
-@@ -1916,9 +1916,9 @@ static void __init init_per_cpu_tunables
+@@ -1940,9 +1940,9 @@ static void __init init_per_cpu_tunables
  		bcp->cong_reps			= congested_reps;
- 		bcp->disabled_period =		sec_2_cycles(disabled_period);
- 		bcp->giveup_limit =		giveup_limit;
+ 		bcp->disabled_period		= sec_2_cycles(disabled_period);
+ 		bcp->giveup_limit		= giveup_limit;
 -		spin_lock_init(&bcp->queue_lock);
 -		spin_lock_init(&bcp->uvhub_lock);
 -		spin_lock_init(&bcp->disable_lock);
diff --git a/patches/x86-kvm-require-const-tsc-for-rt.patch b/patches/x86-kvm-require-const-tsc-for-rt.patch
index 0e21bf7..0aabd96 100644
--- a/patches/x86-kvm-require-const-tsc-for-rt.patch
+++ b/patches/x86-kvm-require-const-tsc-for-rt.patch
@@ -14,7 +14,7 @@
 
 --- a/arch/x86/kvm/x86.c
 +++ b/arch/x86/kvm/x86.c
-@@ -5877,6 +5877,13 @@ int kvm_arch_init(void *opaque)
+@@ -5925,6 +5925,13 @@ int kvm_arch_init(void *opaque)
  		goto out;
  	}
  
diff --git a/patches/x86-mce-timer-hrtimer.patch b/patches/x86-mce-timer-hrtimer.patch
index 1cd6e72..7e7159a 100644
--- a/patches/x86-mce-timer-hrtimer.patch
+++ b/patches/x86-mce-timer-hrtimer.patch
@@ -31,10 +31,10 @@
  #include <linux/irq_work.h>
  #include <linux/export.h>
 +#include <linux/jiffies.h>
+ #include <linux/jump_label.h>
  
  #include <asm/processor.h>
- #include <asm/traps.h>
-@@ -1291,7 +1292,7 @@ void mce_log_therm_throt_event(__u64 sta
+@@ -1317,7 +1318,7 @@ void mce_log_therm_throt_event(__u64 sta
  static unsigned long check_interval = INITIAL_CHECK_INTERVAL;
  
  static DEFINE_PER_CPU(unsigned long, mce_next_interval); /* in jiffies */
@@ -43,7 +43,7 @@
  
  static unsigned long mce_adjust_timer_default(unsigned long interval)
  {
-@@ -1300,32 +1301,18 @@ static unsigned long mce_adjust_timer_de
+@@ -1326,32 +1327,18 @@ static unsigned long mce_adjust_timer_de
  
  static unsigned long (*mce_adjust_timer)(unsigned long interval) = mce_adjust_timer_default;
  
@@ -82,7 +82,7 @@
  	iv = __this_cpu_read(mce_next_interval);
  
  	if (mce_available(this_cpu_ptr(&cpu_info))) {
-@@ -1348,7 +1335,7 @@ static void mce_timer_fn(unsigned long d
+@@ -1374,7 +1361,7 @@ static void mce_timer_fn(unsigned long d
  
  done:
  	__this_cpu_write(mce_next_interval, iv);
@@ -91,7 +91,7 @@
  }
  
  /*
-@@ -1356,7 +1343,7 @@ static void mce_timer_fn(unsigned long d
+@@ -1382,7 +1369,7 @@ static void mce_timer_fn(unsigned long d
   */
  void mce_timer_kick(unsigned long interval)
  {
@@ -100,7 +100,7 @@
  	unsigned long iv = __this_cpu_read(mce_next_interval);
  
  	__restart_timer(t, interval);
-@@ -1371,7 +1358,7 @@ static void mce_timer_delete_all(void)
+@@ -1397,7 +1384,7 @@ static void mce_timer_delete_all(void)
  	int cpu;
  
  	for_each_online_cpu(cpu)
@@ -109,7 +109,7 @@
  }
  
  static void mce_do_trigger(struct work_struct *work)
-@@ -1717,7 +1704,7 @@ static void __mcheck_cpu_clear_vendor(st
+@@ -1732,7 +1719,7 @@ static void __mcheck_cpu_clear_vendor(st
  	}
  }
  
@@ -118,7 +118,7 @@
  {
  	unsigned long iv = check_interval * HZ;
  
-@@ -1726,16 +1713,17 @@ static void mce_start_timer(unsigned int
+@@ -1741,16 +1728,17 @@ static void mce_start_timer(unsigned int
  
  	per_cpu(mce_next_interval, cpu) = iv;
  
@@ -140,7 +140,7 @@
  	mce_start_timer(cpu, t);
  }
  
-@@ -2459,6 +2447,8 @@ static void mce_disable_cpu(void *h)
+@@ -2475,6 +2463,8 @@ static void mce_disable_cpu(void *h)
  	if (!mce_available(raw_cpu_ptr(&cpu_info)))
  		return;
  
@@ -149,7 +149,7 @@
  	if (!(action & CPU_TASKS_FROZEN))
  		cmci_clear();
  
-@@ -2481,6 +2471,7 @@ static void mce_reenable_cpu(void *h)
+@@ -2497,6 +2487,7 @@ static void mce_reenable_cpu(void *h)
  		if (b->init)
  			wrmsrl(msr_ops.ctl(i), b->ctl);
  	}
@@ -157,7 +157,7 @@
  }
  
  /* Get notified when a cpu comes on/off. Be hotplug friendly. */
-@@ -2488,7 +2479,6 @@ static int
+@@ -2504,7 +2495,6 @@ static int
  mce_cpu_callback(struct notifier_block *nfb, unsigned long action, void *hcpu)
  {
  	unsigned int cpu = (unsigned long)hcpu;
@@ -165,7 +165,7 @@
  
  	switch (action & ~CPU_TASKS_FROZEN) {
  	case CPU_ONLINE:
-@@ -2508,11 +2498,9 @@ mce_cpu_callback(struct notifier_block *
+@@ -2524,11 +2514,9 @@ mce_cpu_callback(struct notifier_block *
  		break;
  	case CPU_DOWN_PREPARE:
  		smp_call_function_single(cpu, mce_disable_cpu, &action, 1);
diff --git a/patches/x86-mce-use-swait-queue-for-mce-wakeups.patch b/patches/x86-mce-use-swait-queue-for-mce-wakeups.patch
index ac91085..1cca142 100644
--- a/patches/x86-mce-use-swait-queue-for-mce-wakeups.patch
+++ b/patches/x86-mce-use-swait-queue-for-mce-wakeups.patch
@@ -65,10 +65,10 @@
  #include <linux/export.h>
  #include <linux/jiffies.h>
 +#include <linux/swork.h>
+ #include <linux/jump_label.h>
  
  #include <asm/processor.h>
- #include <asm/traps.h>
-@@ -1368,6 +1369,56 @@ static void mce_do_trigger(struct work_s
+@@ -1394,6 +1395,56 @@ static void mce_do_trigger(struct work_s
  
  static DECLARE_WORK(mce_trigger_work, mce_do_trigger);
  
@@ -125,7 +125,7 @@
  /*
   * Notify the user(s) about new machine check events.
   * Can be called from interrupt context, but not from machine check/NMI
-@@ -1375,19 +1426,8 @@ static DECLARE_WORK(mce_trigger_work, mc
+@@ -1401,19 +1452,8 @@ static DECLARE_WORK(mce_trigger_work, mc
   */
  int mce_notify_irq(void)
  {
@@ -146,7 +146,7 @@
  		return 1;
  	}
  	return 0;
-@@ -2539,6 +2579,10 @@ static __init int mcheck_init_device(voi
+@@ -2555,6 +2595,10 @@ static __init int mcheck_init_device(voi
  		goto err_out;
  	}
  
diff --git a/patches/x86-preempt-lazy.patch b/patches/x86-preempt-lazy.patch
index 023abde..96d6afe 100644
--- a/patches/x86-preempt-lazy.patch
+++ b/patches/x86-preempt-lazy.patch
@@ -11,9 +11,9 @@
  arch/x86/entry/entry_32.S          |   17 +++++++++++++++++
  arch/x86/entry/entry_64.S          |   16 ++++++++++++++++
  arch/x86/include/asm/preempt.h     |   31 ++++++++++++++++++++++++++++++-
- arch/x86/include/asm/thread_info.h |   10 ++++++++++
+ arch/x86/include/asm/thread_info.h |   11 +++++++++++
  arch/x86/kernel/asm-offsets.c      |    2 ++
- 7 files changed, 78 insertions(+), 3 deletions(-)
+ 7 files changed, 79 insertions(+), 3 deletions(-)
 
 --- a/arch/x86/Kconfig
 +++ b/arch/x86/Kconfig
@@ -27,7 +27,7 @@
  	select ANON_INODES
 --- a/arch/x86/entry/common.c
 +++ b/arch/x86/entry/common.c
-@@ -136,7 +136,7 @@ static long syscall_trace_enter(struct p
+@@ -129,7 +129,7 @@ static long syscall_trace_enter(struct p
  
  #define EXIT_TO_USERMODE_LOOP_FLAGS				\
  	(_TIF_SIGPENDING | _TIF_NOTIFY_RESUME | _TIF_UPROBE |	\
@@ -36,7 +36,7 @@
  
  static void exit_to_usermode_loop(struct pt_regs *regs, u32 cached_flags)
  {
-@@ -152,7 +152,7 @@ static void exit_to_usermode_loop(struct
+@@ -145,7 +145,7 @@ static void exit_to_usermode_loop(struct
  		/* We have work to do. */
  		local_irq_enable();
  
@@ -47,7 +47,7 @@
  #ifdef ARCH_RT_DELAYS_SIGNAL_SEND
 --- a/arch/x86/entry/entry_32.S
 +++ b/arch/x86/entry/entry_32.S
-@@ -271,8 +271,25 @@ END(ret_from_exception)
+@@ -308,8 +308,25 @@ END(ret_from_exception)
  ENTRY(resume_kernel)
  	DISABLE_INTERRUPTS(CLBR_ANY)
  need_resched:
@@ -62,11 +62,11 @@
 +	cmpl $_PREEMPT_ENABLED,PER_CPU_VAR(__preempt_count)
 +	jne restore_all
 +
-+	GET_THREAD_INFO(%ebp)
-+	cmpl $0,TI_preempt_lazy_count(%ebp)	# non-zero preempt_lazy_count ?
++	movl    PER_CPU_VAR(current_task), %ebp
++	cmpl $0,TASK_TI_preempt_lazy_count(%ebp)	# non-zero preempt_lazy_count ?
 +	jnz restore_all
 +
-+	testl $_TIF_NEED_RESCHED_LAZY, TI_flags(%ebp)
++	testl $_TIF_NEED_RESCHED_LAZY, TASK_TI_flags(%ebp)
 +	jz restore_all
 +test_int_off:
 +#endif
@@ -75,7 +75,7 @@
  	call	preempt_schedule_irq
 --- a/arch/x86/entry/entry_64.S
 +++ b/arch/x86/entry/entry_64.S
-@@ -512,7 +512,23 @@ GLOBAL(retint_user)
+@@ -546,7 +546,23 @@ GLOBAL(retint_user)
  	bt	$9, EFLAGS(%rsp)		/* were interrupts off? */
  	jnc	1f
  0:	cmpl	$0, PER_CPU_VAR(__preempt_count)
@@ -88,11 +88,11 @@
 +	cmpl $_PREEMPT_ENABLED,PER_CPU_VAR(__preempt_count)
 +	jnz	1f
 +
-+	GET_THREAD_INFO(%rcx)
-+	cmpl	$0, TI_preempt_lazy_count(%rcx)
++	movq	PER_CPU_VAR(current_task), %rcx
++	cmpl	$0, TASK_TI_preempt_lazy_count(%rcx)
 +	jnz	1f
 +
-+	bt	$TIF_NEED_RESCHED_LAZY,TI_flags(%rcx)
++	bt	$TIF_NEED_RESCHED_LAZY,TASK_TI_flags(%rcx)
 +	jnc	1f
 +do_preempt_schedule_irq:
 +#endif
@@ -151,16 +151,22 @@
  #ifdef CONFIG_PREEMPT
 --- a/arch/x86/include/asm/thread_info.h
 +++ b/arch/x86/include/asm/thread_info.h
-@@ -57,6 +57,8 @@ struct thread_info {
- 	__u32			flags;		/* low level flags */
- 	__u32			status;		/* thread synchronous flags */
- 	__u32			cpu;		/* current CPU */
-+	int			preempt_lazy_count;	/* 0 => lazy preemptable
+@@ -54,11 +54,14 @@ struct task_struct;
+ 
+ struct thread_info {
+ 	unsigned long		flags;		/* low level flags */
++	int                     preempt_lazy_count;     /* 0 => lazy preemptable
 +							   <0 => BUG */
  };
  
  #define INIT_THREAD_INFO(tsk)			\
-@@ -73,6 +75,10 @@ struct thread_info {
+ {						\
+ 	.flags		= 0,			\
++	.preempt_lazy_count = 0,		\
+ }
+ 
+ #define init_stack		(init_thread_union.stack)
+@@ -67,6 +70,10 @@ struct thread_info {
  
  #include <asm/asm-offsets.h>
  
@@ -171,7 +177,7 @@
  #endif
  
  /*
-@@ -91,6 +97,7 @@ struct thread_info {
+@@ -85,6 +92,7 @@ struct thread_info {
  #define TIF_SYSCALL_EMU		6	/* syscall emulation active */
  #define TIF_SYSCALL_AUDIT	7	/* syscall auditing active */
  #define TIF_SECCOMP		8	/* secure computing */
@@ -179,7 +185,7 @@
  #define TIF_USER_RETURN_NOTIFY	11	/* notify kernel of userspace return */
  #define TIF_UPROBE		12	/* breakpointed or singlestepping */
  #define TIF_NOTSC		16	/* TSC is not accessible in userland */
-@@ -115,6 +122,7 @@ struct thread_info {
+@@ -108,6 +116,7 @@ struct thread_info {
  #define _TIF_SYSCALL_EMU	(1 << TIF_SYSCALL_EMU)
  #define _TIF_SYSCALL_AUDIT	(1 << TIF_SYSCALL_AUDIT)
  #define _TIF_SECCOMP		(1 << TIF_SECCOMP)
@@ -187,7 +193,7 @@
  #define _TIF_USER_RETURN_NOTIFY	(1 << TIF_USER_RETURN_NOTIFY)
  #define _TIF_UPROBE		(1 << TIF_UPROBE)
  #define _TIF_NOTSC		(1 << TIF_NOTSC)
-@@ -151,6 +159,8 @@ struct thread_info {
+@@ -143,6 +152,8 @@ struct thread_info {
  #define _TIF_WORK_CTXSW_PREV (_TIF_WORK_CTXSW|_TIF_USER_RETURN_NOTIFY)
  #define _TIF_WORK_CTXSW_NEXT (_TIF_WORK_CTXSW)
  
@@ -198,15 +204,15 @@
  /*
 --- a/arch/x86/kernel/asm-offsets.c
 +++ b/arch/x86/kernel/asm-offsets.c
-@@ -31,6 +31,7 @@ void common(void) {
- 	BLANK();
- 	OFFSET(TI_flags, thread_info, flags);
- 	OFFSET(TI_status, thread_info, status);
-+	OFFSET(TI_preempt_lazy_count, thread_info, preempt_lazy_count);
+@@ -36,6 +36,7 @@ void common(void) {
  
  	BLANK();
+ 	OFFSET(TASK_TI_flags, task_struct, thread_info.flags);
++	OFFSET(TASK_TI_preempt_lazy_count, task_struct, thread_info.preempt_lazy_count);
  	OFFSET(TASK_addr_limit, task_struct, thread.addr_limit);
-@@ -88,4 +89,5 @@ void common(void) {
+ 
+ 	BLANK();
+@@ -91,4 +92,5 @@ void common(void) {
  
  	BLANK();
  	DEFINE(PTREGS_SIZE, sizeof(struct pt_regs));
diff --git a/patches/x86-signal-delay-calling-signals-on-32bit.patch b/patches/x86-signal-delay-calling-signals-on-32bit.patch
index bd37c47..f16a66c 100644
--- a/patches/x86-signal-delay-calling-signals-on-32bit.patch
+++ b/patches/x86-signal-delay-calling-signals-on-32bit.patch
@@ -31,7 +31,7 @@
 
 --- a/arch/x86/include/asm/signal.h
 +++ b/arch/x86/include/asm/signal.h
-@@ -32,7 +32,7 @@ typedef struct {
+@@ -36,7 +36,7 @@ typedef struct {
   * TIF_NOTIFY_RESUME and set up the signal to be sent on exit of the
   * trap.
   */
diff --git a/patches/x86-use-gen-rwsem-spinlocks-rt.patch b/patches/x86-use-gen-rwsem-spinlocks-rt.patch
index fe2b08a..22e3659 100644
--- a/patches/x86-use-gen-rwsem-spinlocks-rt.patch
+++ b/patches/x86-use-gen-rwsem-spinlocks-rt.patch
@@ -13,7 +13,7 @@
 
 --- a/arch/x86/Kconfig
 +++ b/arch/x86/Kconfig
-@@ -231,8 +231,11 @@ config ARCH_MAY_HAVE_PC_FDC
+@@ -232,8 +232,11 @@ config ARCH_MAY_HAVE_PC_FDC
  	def_bool y
  	depends on ISA_DMA_API