patches-3.8.4-rt2.tar.xz

md5sum:
5f2ea9060211d969ee2b18c397b5c610  patches-3.8.4-rt2.tar.xz

Announce:
 ----------------
 Dear RT Folks,

 I'm pleased to announce the 3.8.4-rt2 release.

 changes since v3.8.4-rt1:
 - build fix for i915 (reported by "Luis Claudio R. Goncalves")
 - build fix for fscache (reported by tglx)
 - build fix for !RT (kernel/softirq.c did not compile)
 - per-cpu rwsem fixed for RT (required only by uprobes so far)
 - slub: delay the execution of the ->ctor() hook for newly created
   objects. This lowers the worst case latencies.

 Known issues:

       - SLxB is broken on PowerPC.

 The delta patch against v3.8.4-rt1 is appended below and can be found
 here:

   https://www.kernel.org/pub/linux/kernel/projects/rt/3.8/incr/patch-3.8.4-rt1-rt2.patch.xz

 The RT patch against 3.8.4 can be found here:

   https://www.kernel.org/pub/linux/kernel/projects/rt/3.8/patch-3.8.4-rt2.patch.xz

 The split quilt queue is available at:

   https://www.kernel.org/pub/linux/kernel/projects/rt/3.8/patches-3.8.4-rt2.tar.xz

 Sebastian
 ----------------

http://marc.info/?l=linux-rt-users&m=136432904720493&w=2

Signed-off-by: Paul Gortmaker <paul.gortmaker@windriver.com>
diff --git a/patches/fscache_compile_fix.patch b/patches/fscache_compile_fix.patch
new file mode 100644
index 0000000..7cf833c
--- /dev/null
+++ b/patches/fscache_compile_fix.patch
@@ -0,0 +1,34 @@
+From: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+Subject: fs/fscache: done merge spin_lock() in while()
+
+Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+--- a/fs/fscache/page.c
++++ b/fs/fscache/page.c
+@@ -796,11 +796,13 @@ void fscache_invalidate_writes(struct fs
+ 
+ 	_enter("");
+ 
+-	while (spin_lock(&cookie->stores_lock),
+-	       n = radix_tree_gang_lookup_tag(&cookie->stores, results, 0,
+-					      ARRAY_SIZE(results),
+-					      FSCACHE_COOKIE_PENDING_TAG),
+-	       n > 0) {
++	do  {
++		spin_lock(&cookie->stores_lock);
++		n = radix_tree_gang_lookup_tag(&cookie->stores, results, 0,
++				ARRAY_SIZE(results),
++				FSCACHE_COOKIE_PENDING_TAG);
++	       if (n == 0)
++		       break;
+ 		for (i = n - 1; i >= 0; i--) {
+ 			page = results[i];
+ 			radix_tree_delete(&cookie->stores, page->index);
+@@ -810,7 +812,7 @@ void fscache_invalidate_writes(struct fs
+ 
+ 		for (i = n - 1; i >= 0; i--)
+ 			page_cache_release(results[i]);
+-	}
++	} while (1);
+ 
+ 	spin_unlock(&cookie->stores_lock);
+ 	_leave("");
diff --git a/patches/i915_compile_fix.patch b/patches/i915_compile_fix.patch
new file mode 100644
index 0000000..0b20079
--- /dev/null
+++ b/patches/i915_compile_fix.patch
@@ -0,0 +1,48 @@
+From: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+Subject: gpu/i915: don't open code these things
+
+Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+--- a/drivers/gpu/drm/i915/i915_gem.c
++++ b/drivers/gpu/drm/i915/i915_gem.c
+@@ -91,7 +91,6 @@ i915_gem_wait_for_error(struct drm_devic
+ {
+ 	struct drm_i915_private *dev_priv = dev->dev_private;
+ 	struct completion *x = &dev_priv->error_completion;
+-	unsigned long flags;
+ 	int ret;
+ 
+ 	if (!atomic_read(&dev_priv->mm.wedged))
+@@ -116,9 +115,7 @@ i915_gem_wait_for_error(struct drm_devic
+ 		 * end up waiting upon a subsequent completion event that
+ 		 * will never happen.
+ 		 */
+-		spin_lock_irqsave(&x->wait.lock, flags);
+-		x->done++;
+-		spin_unlock_irqrestore(&x->wait.lock, flags);
++		complete(x);
+ 	}
+ 	return 0;
+ }
+@@ -946,12 +943,9 @@ i915_gem_check_wedge(struct drm_i915_pri
+ 	if (atomic_read(&dev_priv->mm.wedged)) {
+ 		struct completion *x = &dev_priv->error_completion;
+ 		bool recovery_complete;
+-		unsigned long flags;
+ 
+ 		/* Give the error handler a chance to run. */
+-		spin_lock_irqsave(&x->wait.lock, flags);
+-		recovery_complete = x->done > 0;
+-		spin_unlock_irqrestore(&x->wait.lock, flags);
++		recovery_complete = completion_done(x);
+ 
+ 		/* Non-interruptible callers can't handle -EAGAIN, hence return
+ 		 * -EIO unconditionally for these. */
+@@ -4366,7 +4360,7 @@ static bool mutex_is_locked_by(struct mu
+ 	if (!mutex_is_locked(mutex))
+ 		return false;
+ 
+-#if defined(CONFIG_SMP) || defined(CONFIG_DEBUG_MUTEXES)
++#if (defined(CONFIG_SMP) || defined(CONFIG_DEBUG_MUTEXES)) && !defined(CONFIG_PREEMPT_RT_BASE)
+ 	return mutex->owner == task;
+ #else
+ 	/* Since UP may be pre-empted, we cannot assume that we own the lock */
diff --git a/patches/localversion.patch b/patches/localversion.patch
index 56edefb..2f31c4f 100644
--- a/patches/localversion.patch
+++ b/patches/localversion.patch
@@ -12,4 +12,4 @@
 --- /dev/null
 +++ b/localversion-rt
 @@ -0,0 +1 @@
-+-rt1
++-rt2
diff --git a/patches/percpu-rwsem-compilefix.patch b/patches/percpu-rwsem-compilefix.patch
new file mode 100644
index 0000000..d6f1fd8
--- /dev/null
+++ b/patches/percpu-rwsem-compilefix.patch
@@ -0,0 +1,19 @@
+---
+ lib/percpu-rwsem.c |    4 ++++
+ 1 file changed, 4 insertions(+)
+
+--- a/lib/percpu-rwsem.c
++++ b/lib/percpu-rwsem.c
+@@ -84,8 +84,12 @@ void percpu_down_read(struct percpu_rw_s
+ 
+ 	down_read(&brw->rw_sem);
+ 	atomic_inc(&brw->slow_read_ctr);
++#ifdef CONFIG_PREEMPT_RT_FULL
++	up_read(&brw->rw_sem);
++#else
+ 	/* avoid up_read()->rwsem_release() */
+ 	__up_read(&brw->rw_sem);
++#endif
+ }
+ 
+ void percpu_up_read(struct percpu_rw_semaphore *brw)
diff --git a/patches/rt-add-rt-locks.patch b/patches/rt-add-rt-locks.patch
index 072cd99..1cad15e 100644
--- a/patches/rt-add-rt-locks.patch
+++ b/patches/rt-add-rt-locks.patch
@@ -790,7 +790,7 @@
 +}
 +EXPORT_SYMBOL(rt_down_read_nested);
 +
-+void  __rt_rwsem_init(struct rw_semaphore *rwsem, char *name,
++void  __rt_rwsem_init(struct rw_semaphore *rwsem, const char *name,
 +			      struct lock_class_key *key)
 +{
 +#ifdef CONFIG_DEBUG_LOCK_ALLOC
diff --git a/patches/rwsem-add-rt-variant.patch b/patches/rwsem-add-rt-variant.patch
index 04e16dc..79c0dfb 100644
--- a/patches/rwsem-add-rt-variant.patch
+++ b/patches/rwsem-add-rt-variant.patch
@@ -5,9 +5,9 @@
 Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
 ---
  include/linux/rwsem.h    |    6 ++
- include/linux/rwsem_rt.h |  121 +++++++++++++++++++++++++++++++++++++++++++++++
+ include/linux/rwsem_rt.h |  128 +++++++++++++++++++++++++++++++++++++++++++++++
  lib/Makefile             |    3 +
- 3 files changed, 130 insertions(+)
+ 3 files changed, 137 insertions(+)
 
 --- a/include/linux/rwsem.h
 +++ b/include/linux/rwsem.h
@@ -31,7 +31,7 @@
  #endif /* _LINUX_RWSEM_H */
 --- /dev/null
 +++ b/include/linux/rwsem_rt.h
-@@ -0,0 +1,121 @@
+@@ -0,0 +1,128 @@
 +#ifndef _LINUX_RWSEM_RT_H
 +#define _LINUX_RWSEM_RT_H
 +
@@ -67,15 +67,22 @@
 +#define DECLARE_RWSEM(lockname) \
 +	struct rw_semaphore lockname = __RWSEM_INITIALIZER(lockname)
 +
-+extern void  __rt_rwsem_init(struct rw_semaphore *rwsem, char *name,
++extern void  __rt_rwsem_init(struct rw_semaphore *rwsem, const char *name,
 +				     struct lock_class_key *key);
 +
++#define __rt_init_rwsem(sem, name, key)			\
++	do {						\
++		rt_mutex_init(&(sem)->lock);		\
++		__rt_rwsem_init((sem), (name), (key));\
++	} while (0)
++
++#define __init_rwsem(sem, name, key) __rt_init_rwsem(sem, name, key)
++
 +# define rt_init_rwsem(sem)				\
 +do {							\
 +	static struct lock_class_key __key;		\
 +							\
-+	rt_mutex_init(&(sem)->lock);			\
-+	__rt_rwsem_init((sem), #sem, &__key);		\
++	__rt_init_rwsem((sem), #sem, &__key);		\
 +} while (0)
 +
 +extern void  rt_down_write(struct rw_semaphore *rwsem);
@@ -155,7 +162,7 @@
 +#endif
 --- a/lib/Makefile
 +++ b/lib/Makefile
-@@ -38,9 +38,12 @@ obj-$(CONFIG_HAS_IOMEM) += iomap_copy.o
+@@ -38,8 +38,11 @@ obj-$(CONFIG_HAS_IOMEM) += iomap_copy.o
  obj-$(CONFIG_CHECK_SIGNATURE) += check_signature.o
  obj-$(CONFIG_DEBUG_LOCKING_API_SELFTESTS) += locking-selftest.o
  obj-$(CONFIG_DEBUG_SPINLOCK) += spinlock_debug.o
@@ -163,8 +170,7 @@
 +ifneq ($(CONFIG_PREEMPT_RT_FULL),y)
  lib-$(CONFIG_RWSEM_GENERIC_SPINLOCK) += rwsem-spinlock.o
  lib-$(CONFIG_RWSEM_XCHGADD_ALGORITHM) += rwsem.o
- lib-$(CONFIG_PERCPU_RWSEM) += percpu-rwsem.o
 +endif
+ lib-$(CONFIG_PERCPU_RWSEM) += percpu-rwsem.o
  
  CFLAGS_hweight.o = $(subst $(quote),,$(CONFIG_ARCH_HWEIGHT_CFLAGS))
- obj-$(CONFIG_GENERIC_HWEIGHT) += hweight.o
diff --git a/patches/series b/patches/series
index 068b68b..b72d275 100644
--- a/patches/series
+++ b/patches/series
@@ -398,6 +398,7 @@
 rt-add-rt-to-mutex-headers.patch
 rwsem-add-rt-variant.patch
 rt-add-rt-locks.patch
+percpu-rwsem-compilefix.patch
 
 # RTMUTEX Fallout
 tasklist-lock-fix-section-conflict.patch
@@ -580,6 +581,8 @@
 i2c-omap-drop-the-lock-hard-irq-context.patch
 spi-omap-mcspi-check-condition-also-after-timeout.patch
 HACK-printk-drop-the-logbuf_lock-more-often.patch
+fscache_compile_fix.patch
+i915_compile_fix.patch
 
 # Enable full RT
 powerpc-preempt-lazy-support.patch
@@ -594,6 +597,7 @@
 mm-bounce-local-irq-save-nort.patch
 mmci-remove-bogus-irq-save.patch
 slub-enable-irqs-for-no-wait.patch
+slub_delay_ctor_on_rt.patch
 idle-state.patch
 might-sleep-check-for-idle.patch
 wait-simple-rework-for-completions.patch
diff --git a/patches/slub_delay_ctor_on_rt.patch b/patches/slub_delay_ctor_on_rt.patch
new file mode 100644
index 0000000..1b6a420
--- /dev/null
+++ b/patches/slub_delay_ctor_on_rt.patch
@@ -0,0 +1,31 @@
+From: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+Subject: slub: delay ctor until the object is requested
+
+It seems that allocation of plenty objects causes latency on ARM since that
+code can not be preempted
+
+Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+--- a/mm/slub.c
++++ b/mm/slub.c
+@@ -1346,8 +1346,10 @@ static void setup_object(struct kmem_cac
+ 				void *object)
+ {
+ 	setup_object_debug(s, page, object);
++#ifndef CONFIG_PREEMPT_RT_FULL
+ 	if (unlikely(s->ctor))
+ 		s->ctor(object);
++#endif
+ }
+ 
+ static struct page *new_slab(struct kmem_cache *s, gfp_t flags, int node)
+@@ -2437,6 +2439,10 @@ redo:
+ 
+ 	if (unlikely(gfpflags & __GFP_ZERO) && object)
+ 		memset(object, 0, s->object_size);
++#ifdef CONFIG_PREEMPT_RT_FULL
++	if (unlikely(s->ctor) && object)
++		s->ctor(object);
++#endif
+ 
+ 	slab_post_alloc_hook(s, gfpflags, object);
+ 
diff --git a/patches/softirq-split-locks.patch b/patches/softirq-split-locks.patch
index 905f673..3947450 100644
--- a/patches/softirq-split-locks.patch
+++ b/patches/softirq-split-locks.patch
@@ -40,15 +40,20 @@
  # if defined CONFIG_HIGHMEM || defined CONFIG_X86_32
 --- a/kernel/softirq.c
 +++ b/kernel/softirq.c
-@@ -162,6 +162,7 @@ static void handle_softirq(unsigned int
+@@ -162,6 +162,12 @@ static void handle_softirq(unsigned int
  		rcu_bh_qs(cpu);
  }
  
 +#ifndef CONFIG_PREEMPT_RT_FULL
++static inline int ksoftirqd_softirq_pending(void)
++{
++	return local_softirq_pending();
++}
++
  static void handle_pending_softirqs(u32 pending, int cpu, int need_rcu_bh_qs)
  {
  	unsigned int vec_nr;
-@@ -174,7 +175,19 @@ static void handle_pending_softirqs(u32
+@@ -174,7 +180,19 @@ static void handle_pending_softirqs(u32
  	local_irq_disable();
  }
  
@@ -69,7 +74,7 @@
  /*
   * preempt_count and SOFTIRQ_OFFSET usage:
   * - preempt_count is changed by SOFTIRQ_OFFSET on entering or leaving
-@@ -367,28 +380,117 @@ asmlinkage void do_softirq(void)
+@@ -367,6 +385,32 @@ asmlinkage void do_softirq(void)
  
  #endif
  
@@ -102,13 +107,7 @@
  static inline void local_bh_disable_nort(void) { local_bh_disable(); }
  static inline void _local_bh_enable_nort(void) { _local_bh_enable(); }
  static void ksoftirqd_set_sched_params(unsigned int cpu) { }
- static void ksoftirqd_clr_sched_params(unsigned int cpu, bool online) { }
- 
-+static inline int ksoftirqd_softirq_pending(void)
-+{
-+	return local_softirq_pending();
-+}
-+
+@@ -375,20 +419,78 @@ static void ksoftirqd_clr_sched_params(u
  #else /* !PREEMPT_RT_FULL */
  
  /*