[ANNOUNCE] 4.4.12-rt19

Dear RT folks!

I'm pleased to announce the v4.4.12-rt19 patch set. I'm doing this
release mostly due the preemption check fix on non x86 architectures and
the perf/rapl patch.

Changes since v4.4.12-rt18:
  - On return from interrupt on ARM we could schedule with lazy preempt
    count > 0 under some circumstances. It isn't toxic but it shouldn't
    happen. Noticed by Thomas Gleixner.

  - The way the preempt counter is accessed on non-x86 architectures
    allowed the compiler to reorder the code slightly. This led to
    decrementing the preempt counter, checking for the need resched bit
    followed by writing the counter back. An interrupt between the last
    two steps will lead to a missing preemption point and thus high
    latencies. Patch by Peter Zijlstra.

  - It is now ensured that there are no attempts to print from IRQ or
    NMI context. On certain events such as hard-lockup-detector we would
    attempt to grab sleeping locks.

  - The lock used in the perf/rapl driver is now a raw lock. This change
    is part of v4.6-rc1 and therefore not mentioned in the v4.6 series.
    Carsten Emde asked for this change to become part of v4.4.

Known issues
	- CPU hotplug got a little better but can deadlock.

The delta patch against 4.4.12-rt18 is appended below and can be found here:

     https://cdn.kernel.org/pub/linux/kernel/projects/rt/4.4/incr/patch-4.4.12-rt18-rt19.patch.xz

You can get this release via the git tree at:

    git://git.kernel.org/pub/scm/linux/kernel/git/rt/linux-rt-devel.git v4.4.12-rt19

The RT patch against 4.6.1 can be found here:

    https://cdn.kernel.org/pub/linux/kernel/projects/rt/4.4/patch-4.4.12-rt19.patch.xz

The split quilt queue is available at:

    https://cdn.kernel.org/pub/linux/kernel/projects/rt/4.4/patches-4.4.12-rt19.tar.xz

Sebastian

Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
diff --git a/patches/0002-kbuild-Add-option-to-turn-incompatible-pointer-check.patch b/patches/0002-kbuild-Add-option-to-turn-incompatible-pointer-check.patch
index e77f4c5..bc40f11 100644
--- a/patches/0002-kbuild-Add-option-to-turn-incompatible-pointer-check.patch
+++ b/patches/0002-kbuild-Add-option-to-turn-incompatible-pointer-check.patch
@@ -40,7 +40,7 @@
 
 --- a/Makefile
 +++ b/Makefile
-@@ -767,6 +767,9 @@ KBUILD_CFLAGS   += $(call cc-option,-Wer
+@@ -768,6 +768,9 @@ KBUILD_CFLAGS   += $(call cc-option,-Wer
  # Prohibit date/time macros, which would make the build non-deterministic
  KBUILD_CFLAGS   += $(call cc-option,-Werror=date-time)
  
diff --git a/patches/arm-lazy-preempt-correct-resched-condition.patch b/patches/arm-lazy-preempt-correct-resched-condition.patch
new file mode 100644
index 0000000..b7f4f1a
--- /dev/null
+++ b/patches/arm-lazy-preempt-correct-resched-condition.patch
@@ -0,0 +1,31 @@
+From: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+Date: Tue, 24 May 2016 12:56:38 +0200
+Subject: [PATCH] arm: lazy preempt: correct resched condition
+
+If we get out of preempt_schedule_irq() then we check for NEED_RESCHED
+and call the former function again if set because the preemption counter
+has be zero at this point.
+However the counter for lazy-preempt might not be zero therefore we have
+to check the counter before looking at the need_resched_lazy flag.
+
+Cc: stable-rt@vger.kernel.org
+Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+---
+ arch/arm/kernel/entry-armv.S |    6 +++++-
+ 1 file changed, 5 insertions(+), 1 deletion(-)
+
+--- a/arch/arm/kernel/entry-armv.S
++++ b/arch/arm/kernel/entry-armv.S
+@@ -244,7 +244,11 @@ ENDPROC(__irq_svc)
+ 	bne	1b
+ 	tst	r0, #_TIF_NEED_RESCHED_LAZY
+ 	reteq	r8				@ go again
+-	b	1b
++	ldr	r0, [tsk, #TI_PREEMPT_LAZY]	@ get preempt lazy count
++	teq	r0, #0				@ if preempt lazy count != 0
++	beq	1b
++	ret	r8				@ go again
++
+ #endif
+ 
+ __und_fault:
diff --git a/patches/kernel-printk-Don-t-try-to-print-from-IRQ-NMI-region.patch b/patches/kernel-printk-Don-t-try-to-print-from-IRQ-NMI-region.patch
new file mode 100644
index 0000000..5d5aea3
--- /dev/null
+++ b/patches/kernel-printk-Don-t-try-to-print-from-IRQ-NMI-region.patch
@@ -0,0 +1,41 @@
+From: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+Date: Thu, 19 May 2016 17:45:27 +0200
+Subject: [PATCH] kernel/printk: Don't try to print from IRQ/NMI region
+
+On -RT we try to acquire sleeping locks which might lead to warnings
+from lockdep or a warn_on() from spin_try_lock() (which is a rtmutex on
+RT).
+We don't print in general from a IRQ off region so we should not try
+this via console_unblank() / bust_spinlocks() as well.
+
+Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+---
+ kernel/printk/printk.c |   10 ++++++++++
+ 1 file changed, 10 insertions(+)
+
+--- a/kernel/printk/printk.c
++++ b/kernel/printk/printk.c
+@@ -1502,6 +1502,11 @@ static void call_console_drivers(int lev
+ 	if (!console_drivers)
+ 		return;
+ 
++	if (IS_ENABLED(CONFIG_PREEMPT_RT_BASE)) {
++		if (in_irq() || in_nmi())
++			return;
++	}
++
+ 	migrate_disable();
+ 	for_each_console(con) {
+ 		if (exclusive_console && con != exclusive_console)
+@@ -2439,6 +2444,11 @@ void console_unblank(void)
+ {
+ 	struct console *c;
+ 
++	if (IS_ENABLED(CONFIG_PREEMPT_RT_BASE)) {
++		if (in_irq() || in_nmi())
++			return;
++	}
++
+ 	/*
+ 	 * console_unblank can no longer be called in interrupt context unless
+ 	 * oops_in_progress is set to 1..
diff --git a/patches/kernel-rtmutex-only-warn-once-on-a-try-lock-from-bad.patch b/patches/kernel-rtmutex-only-warn-once-on-a-try-lock-from-bad.patch
new file mode 100644
index 0000000..f279849
--- /dev/null
+++ b/patches/kernel-rtmutex-only-warn-once-on-a-try-lock-from-bad.patch
@@ -0,0 +1,26 @@
+From: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+Date: Thu, 19 May 2016 17:12:34 +0200
+Subject: [PATCH] kernel/rtmutex: only warn once on a try lock from bad
+ context
+
+One warning should be enough to get one motivated to fix this. It is
+possible that this happens more than once and so starts flooding the
+output. Later the prints will be suppressed so we only get half of it.
+Depending on the console system used it might not be helpfull.
+
+Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+---
+ kernel/locking/rtmutex.c |    2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/kernel/locking/rtmutex.c
++++ b/kernel/locking/rtmutex.c
+@@ -1479,7 +1479,7 @@ EXPORT_SYMBOL_GPL(rt_mutex_timed_lock);
+ int __sched rt_mutex_trylock(struct rt_mutex *lock)
+ {
+ #ifdef CONFIG_PREEMPT_RT_FULL
+-	if (WARN_ON(in_irq() || in_nmi()))
++	if (WARN_ON_ONCE(in_irq() || in_nmi()))
+ #else
+ 	if (WARN_ON(in_irq() || in_nmi() || in_serving_softirq()))
+ #endif
diff --git a/patches/localversion.patch b/patches/localversion.patch
index 4cba19d..3740e683 100644
--- a/patches/localversion.patch
+++ b/patches/localversion.patch
@@ -1,4 +1,4 @@
-Subject: v4.4.12-rt18
+Subject: v4.4.12-rt19
 From: Thomas Gleixner <tglx@linutronix.de>
 Date: Fri, 08 Jul 2011 20:25:16 +0200
 
@@ -10,4 +10,4 @@
 --- /dev/null
 +++ b/localversion-rt
 @@ -0,0 +1 @@
-+-rt18
++-rt19
diff --git a/patches/mm-memcontrol-do_not_disable_irq.patch b/patches/mm-memcontrol-do_not_disable_irq.patch
index 9d7f3cc..a54b3e9 100644
--- a/patches/mm-memcontrol-do_not_disable_irq.patch
+++ b/patches/mm-memcontrol-do_not_disable_irq.patch
@@ -25,7 +25,7 @@
  extern void lru_cache_add_file(struct page *page);
 --- a/mm/compaction.c
 +++ b/mm/compaction.c
-@@ -1443,10 +1443,12 @@ static int compact_zone(struct zone *zon
+@@ -1435,10 +1435,12 @@ static int compact_zone(struct zone *zon
  				cc->migrate_pfn & ~((1UL << cc->order) - 1);
  
  			if (cc->last_migrated_pfn < current_block_start) {
diff --git a/patches/net-sched-dev_deactivate_many-use-msleep-1-instead-o.patch b/patches/net-sched-dev_deactivate_many-use-msleep-1-instead-o.patch
index b90553d..7eaf626 100644
--- a/patches/net-sched-dev_deactivate_many-use-msleep-1-instead-o.patch
+++ b/patches/net-sched-dev_deactivate_many-use-msleep-1-instead-o.patch
@@ -46,7 +46,7 @@
 
 --- a/net/sched/sch_generic.c
 +++ b/net/sched/sch_generic.c
-@@ -890,7 +890,7 @@ void dev_deactivate_many(struct list_hea
+@@ -893,7 +893,7 @@ void dev_deactivate_many(struct list_hea
  	/* Wait for outstanding qdisc_run calls. */
  	list_for_each_entry(dev, head, close_list)
  		while (some_qdisc_is_busy(dev))
diff --git a/patches/perf-make-swevent-hrtimer-irqsafe.patch b/patches/perf-make-swevent-hrtimer-irqsafe.patch
index 7963bd8..f7d5541 100644
--- a/patches/perf-make-swevent-hrtimer-irqsafe.patch
+++ b/patches/perf-make-swevent-hrtimer-irqsafe.patch
@@ -58,7 +58,7 @@
 
 --- a/kernel/events/core.c
 +++ b/kernel/events/core.c
-@@ -7228,6 +7228,7 @@ static void perf_swevent_init_hrtimer(st
+@@ -7219,6 +7219,7 @@ static void perf_swevent_init_hrtimer(st
  
  	hrtimer_init(&hwc->hrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
  	hwc->hrtimer.function = perf_swevent_hrtimer;
diff --git a/patches/perf-x86-intel-rapl-Make-PMU-lock-raw.patch b/patches/perf-x86-intel-rapl-Make-PMU-lock-raw.patch
new file mode 100644
index 0000000..759cf25
--- /dev/null
+++ b/patches/perf-x86-intel-rapl-Make-PMU-lock-raw.patch
@@ -0,0 +1,114 @@
+From: Thomas Gleixner <tglx@linutronix.de>
+Date: Mon, 22 Feb 2016 22:19:25 +0000
+Subject: [PATCH] perf/x86/intel/rapl: Make PMU lock raw
+
+Upstream commit: a208749c6426 ("perf/x86/intel/rapl: Make PMU lock raw")
+
+This lock is taken in hard interrupt context even on Preempt-RT. Make it raw
+so RT does not have to patch it.
+
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
+Cc: Andi Kleen <andi.kleen@intel.com>
+Cc: Arnaldo Carvalho de Melo <acme@redhat.com>
+Cc: Borislav Petkov <bp@alien8.de>
+Cc: Harish Chegondi <harish.chegondi@intel.com>
+Cc: Jacob Pan <jacob.jun.pan@linux.intel.com>
+Cc: Jiri Olsa <jolsa@redhat.com>
+Cc: Kan Liang <kan.liang@intel.com>
+Cc: Linus Torvalds <torvalds@linux-foundation.org>
+Cc: Peter Zijlstra <peterz@infradead.org>
+Cc: Stephane Eranian <eranian@google.com>
+Cc: Vince Weaver <vincent.weaver@maine.edu>
+Cc: linux-kernel@vger.kernel.org
+Cc: stable-rt@vger.kernel.org
+Link: http://lkml.kernel.org/r/20160222221012.669411833@linutronix.de
+Signed-off-by: Ingo Molnar <mingo@kernel.org>
+Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+---
+ arch/x86/kernel/cpu/perf_event_intel_rapl.c |   20 ++++++++++----------
+ 1 file changed, 10 insertions(+), 10 deletions(-)
+
+--- a/arch/x86/kernel/cpu/perf_event_intel_rapl.c
++++ b/arch/x86/kernel/cpu/perf_event_intel_rapl.c
+@@ -117,7 +117,7 @@ static struct perf_pmu_events_attr event
+ };
+ 
+ struct rapl_pmu {
+-	spinlock_t	 lock;
++	raw_spinlock_t	 lock;
+ 	int		 n_active; /* number of active events */
+ 	struct list_head active_list;
+ 	struct pmu	 *pmu; /* pointer to rapl_pmu_class */
+@@ -220,13 +220,13 @@ static enum hrtimer_restart rapl_hrtimer
+ 	if (!pmu->n_active)
+ 		return HRTIMER_NORESTART;
+ 
+-	spin_lock_irqsave(&pmu->lock, flags);
++	raw_spin_lock_irqsave(&pmu->lock, flags);
+ 
+ 	list_for_each_entry(event, &pmu->active_list, active_entry) {
+ 		rapl_event_update(event);
+ 	}
+ 
+-	spin_unlock_irqrestore(&pmu->lock, flags);
++	raw_spin_unlock_irqrestore(&pmu->lock, flags);
+ 
+ 	hrtimer_forward_now(hrtimer, pmu->timer_interval);
+ 
+@@ -263,9 +263,9 @@ static void rapl_pmu_event_start(struct
+ 	struct rapl_pmu *pmu = __this_cpu_read(rapl_pmu);
+ 	unsigned long flags;
+ 
+-	spin_lock_irqsave(&pmu->lock, flags);
++	raw_spin_lock_irqsave(&pmu->lock, flags);
+ 	__rapl_pmu_event_start(pmu, event);
+-	spin_unlock_irqrestore(&pmu->lock, flags);
++	raw_spin_unlock_irqrestore(&pmu->lock, flags);
+ }
+ 
+ static void rapl_pmu_event_stop(struct perf_event *event, int mode)
+@@ -274,7 +274,7 @@ static void rapl_pmu_event_stop(struct p
+ 	struct hw_perf_event *hwc = &event->hw;
+ 	unsigned long flags;
+ 
+-	spin_lock_irqsave(&pmu->lock, flags);
++	raw_spin_lock_irqsave(&pmu->lock, flags);
+ 
+ 	/* mark event as deactivated and stopped */
+ 	if (!(hwc->state & PERF_HES_STOPPED)) {
+@@ -299,7 +299,7 @@ static void rapl_pmu_event_stop(struct p
+ 		hwc->state |= PERF_HES_UPTODATE;
+ 	}
+ 
+-	spin_unlock_irqrestore(&pmu->lock, flags);
++	raw_spin_unlock_irqrestore(&pmu->lock, flags);
+ }
+ 
+ static int rapl_pmu_event_add(struct perf_event *event, int mode)
+@@ -308,14 +308,14 @@ static int rapl_pmu_event_add(struct per
+ 	struct hw_perf_event *hwc = &event->hw;
+ 	unsigned long flags;
+ 
+-	spin_lock_irqsave(&pmu->lock, flags);
++	raw_spin_lock_irqsave(&pmu->lock, flags);
+ 
+ 	hwc->state = PERF_HES_UPTODATE | PERF_HES_STOPPED;
+ 
+ 	if (mode & PERF_EF_START)
+ 		__rapl_pmu_event_start(pmu, event);
+ 
+-	spin_unlock_irqrestore(&pmu->lock, flags);
++	raw_spin_unlock_irqrestore(&pmu->lock, flags);
+ 
+ 	return 0;
+ }
+@@ -603,7 +603,7 @@ static int rapl_cpu_prepare(int cpu)
+ 	pmu = kzalloc_node(sizeof(*pmu), GFP_KERNEL, cpu_to_node(cpu));
+ 	if (!pmu)
+ 		return -1;
+-	spin_lock_init(&pmu->lock);
++	raw_spin_lock_init(&pmu->lock);
+ 
+ 	INIT_LIST_HEAD(&pmu->active_list);
+ 
diff --git a/patches/sched-preempt-Fix-preempt_count-manipulations.patch b/patches/sched-preempt-Fix-preempt_count-manipulations.patch
new file mode 100644
index 0000000..c46c098
--- /dev/null
+++ b/patches/sched-preempt-Fix-preempt_count-manipulations.patch
@@ -0,0 +1,51 @@
+From: Peter Zijlstra <peterz@infradead.org>
+Date: Mon, 16 May 2016 15:01:11 +0200
+Subject: [PATCH] sched,preempt: Fix preempt_count manipulations
+
+Vikram reported that his ARM64 compiler managed to 'optimize' away the
+preempt_count manipulations in code like:
+
+	preempt_enable_no_resched();
+	put_user();
+	preempt_disable();
+
+Irrespective of that fact that that is horrible code that should be
+fixed for many reasons, it does highlight a deficiency in the generic
+preempt_count manipulators. As it is never right to combine/elide
+preempt_count manipulations like this.
+
+Therefore sprinkle some volatile in the two generic accessors to
+ensure the compiler is aware of the fact that the preempt_count is
+observed outside of the regular program-order view and thus cannot be
+optimized away like this.
+
+x86; the only arch not using the generic code is not affected as we
+do all this in asm in order to use the segment base per-cpu stuff.
+
+Cc: stable@vger.kernel.org
+Cc: stable-rt@vger.kernel.org
+Cc: Thomas Gleixner <tglx@linutronix.de>
+Fixes: a787870924db ("sched, arch: Create asm/preempt.h")
+Reported-by: Vikram Mulukutla <markivx@codeaurora.org>
+Tested-by: Vikram Mulukutla <markivx@codeaurora.org>
+Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
+Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+---
+ include/asm-generic/preempt.h |    4 ++--
+ 1 file changed, 2 insertions(+), 2 deletions(-)
+
+--- a/include/asm-generic/preempt.h
++++ b/include/asm-generic/preempt.h
+@@ -7,10 +7,10 @@
+ 
+ static __always_inline int preempt_count(void)
+ {
+-	return current_thread_info()->preempt_count;
++	return READ_ONCE(current_thread_info()->preempt_count);
+ }
+ 
+-static __always_inline int *preempt_count_ptr(void)
++static __always_inline volatile int *preempt_count_ptr(void)
+ {
+ 	return &current_thread_info()->preempt_count;
+ }
diff --git a/patches/series b/patches/series
index d754ddd..156d174 100644
--- a/patches/series
+++ b/patches/series
@@ -13,6 +13,7 @@
 panic-change-nmi_panic-from-macro-to-function.patch
 sched-cputime-Clarify-vtime-symbols-and-document-the.patch
 sched-cputime-Convert-vtime_seqlock-to-seqcount.patch
+perf-x86-intel-rapl-Make-PMU-lock-raw.patch
 
 # AT91 queue in ARM-SOC
 0001-clk-at91-make-use-of-syscon-to-share-PMC-registers-i.patch
@@ -49,6 +50,7 @@
 sc16is7xx_Drop_bogus_use_of_IRQF_ONESHOT.patch
 f2fs_Mutex_cant_be_used_by_down_write_nest_lock().patch
 ARM-imx-always-use-TWD-on-IMX6Q.patch
+sched-preempt-Fix-preempt_count-manipulations.patch
 
 # Those two should vanish soon (not use PIT during bootup)
 at91_dont_enable_disable_clock.patch
@@ -345,6 +347,7 @@
 irq-allow-disabling-of-softirq-processing-in-irq-thread-context.patch
 softirq-split-timer-softirqs-out-of-ksoftirqd.patch
 rtmutex-trylock-is-okay-on-RT.patch
+kernel-rtmutex-only-warn-once-on-a-try-lock-from-bad.patch
 
 # RAID5
 md-raid5-percpu-handling-rt-aware.patch
@@ -481,6 +484,7 @@
 
 # CONSOLE. NEEDS more thought !!!
 printk-rt-aware.patch
+kernel-printk-Don-t-try-to-print-from-IRQ-NMI-region.patch
 HACK-printk-drop-the-logbuf_lock-more-often.patch
 
 # POWERC
@@ -588,6 +592,7 @@
 preempt-lazy-check-preempt_schedule.patch
 x86-preempt-lazy.patch
 arm-preempt-lazy-support.patch
+arm-lazy-preempt-correct-resched-condition.patch
 powerpc-preempt-lazy-support.patch
 arch-arm64-Add-lazy-preempt-support.patch
 arm-arm64-lazy-preempt-add-TIF_NEED_RESCHED_LAZY-to-.patch
diff --git a/patches/signals-allow-rt-tasks-to-cache-one-sigqueue-struct.patch b/patches/signals-allow-rt-tasks-to-cache-one-sigqueue-struct.patch
index bf6bc95..7ae4985 100644
--- a/patches/signals-allow-rt-tasks-to-cache-one-sigqueue-struct.patch
+++ b/patches/signals-allow-rt-tasks-to-cache-one-sigqueue-struct.patch
@@ -27,7 +27,7 @@
  	sigset_t saved_sigmask;	/* restored if set_restore_sigmask() was used */
 --- a/include/linux/signal.h
 +++ b/include/linux/signal.h
-@@ -218,6 +218,7 @@ static inline void init_sigpending(struc
+@@ -233,6 +233,7 @@ static inline void init_sigpending(struc
  }
  
  extern void flush_sigqueue(struct sigpending *queue);
diff --git a/patches/workqueue-use-rcu.patch b/patches/workqueue-use-rcu.patch
index 8a70b6c..7b580a1 100644
--- a/patches/workqueue-use-rcu.patch
+++ b/patches/workqueue-use-rcu.patch
@@ -312,7 +312,7 @@
  }
  
  /*
-@@ -4711,16 +4719,16 @@ bool freeze_workqueues_busy(void)
+@@ -4722,16 +4730,16 @@ bool freeze_workqueues_busy(void)
  		 * nr_active is monotonically decreasing.  It's safe
  		 * to peek without lock.
  		 */
@@ -332,7 +332,7 @@
  	}
  out_unlock:
  	mutex_unlock(&wq_pool_mutex);
-@@ -4910,7 +4918,8 @@ static ssize_t wq_pool_ids_show(struct d
+@@ -4921,7 +4929,8 @@ static ssize_t wq_pool_ids_show(struct d
  	const char *delim = "";
  	int node, written = 0;
  
@@ -342,7 +342,7 @@
  	for_each_node(node) {
  		written += scnprintf(buf + written, PAGE_SIZE - written,
  				     "%s%d:%d", delim, node,
-@@ -4918,7 +4927,8 @@ static ssize_t wq_pool_ids_show(struct d
+@@ -4929,7 +4938,8 @@ static ssize_t wq_pool_ids_show(struct d
  		delim = " ";
  	}
  	written += scnprintf(buf + written, PAGE_SIZE - written, "\n");