patches-3.6.4-rt12.tar.xz
md5sum:
d01e679782006f198d963499a23cad29 patches-3.6.4-rt12.tar.xz
No announce (but see rt14 announce for retroactive
perspective on rt12).
Signed-off-by: Paul Gortmaker <paul.gortmaker@windriver.com>
diff --git a/arm-preempt-lazy-support.patch b/arm-preempt-lazy-support.patch
new file mode 100644
index 0000000..669ea6c
--- /dev/null
+++ b/arm-preempt-lazy-support.patch
@@ -0,0 +1,105 @@
+Subject: arm-preempt-lazy-support.patch
+From: Thomas Gleixner <tglx@linutronix.de>
+Date: Wed, 31 Oct 2012 12:04:11 +0100
+
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+---
+ arch/arm/Kconfig | 1 +
+ arch/arm/include/asm/thread_info.h | 3 +++
+ arch/arm/kernel/asm-offsets.c | 1 +
+ arch/arm/kernel/entry-armv.S | 8 ++++++++
+ arch/arm/kernel/signal.c | 3 ++-
+ 5 files changed, 15 insertions(+), 1 deletion(-)
+
+Index: linux-stable/arch/arm/Kconfig
+===================================================================
+--- linux-stable.orig/arch/arm/Kconfig
++++ linux-stable/arch/arm/Kconfig
+@@ -50,6 +50,7 @@ config ARM
+ select GENERIC_STRNCPY_FROM_USER
+ select GENERIC_STRNLEN_USER
+ select DCACHE_WORD_ACCESS if (CPU_V6 || CPU_V6K || CPU_V7) && !CPU_BIG_ENDIAN
++ select HAVE_PREEMPT_LAZY
+ help
+ The ARM series is a line of low-power-consumption RISC chip designs
+ licensed by ARM Ltd and targeted at embedded applications and
+Index: linux-stable/arch/arm/include/asm/thread_info.h
+===================================================================
+--- linux-stable.orig/arch/arm/include/asm/thread_info.h
++++ linux-stable/arch/arm/include/asm/thread_info.h
+@@ -50,6 +50,7 @@ struct cpu_context_save {
+ struct thread_info {
+ unsigned long flags; /* low level flags */
+ int preempt_count; /* 0 => preemptable, <0 => bug */
++ int preempt_lazy_count; /* 0 => preemptable, <0 => bug */
+ mm_segment_t addr_limit; /* address limit */
+ struct task_struct *task; /* main task structure */
+ struct exec_domain *exec_domain; /* execution domain */
+@@ -146,6 +147,7 @@ extern int vfp_restore_user_hwstate(stru
+ #define TIF_SIGPENDING 0
+ #define TIF_NEED_RESCHED 1
+ #define TIF_NOTIFY_RESUME 2 /* callback before returning to user */
++#define TIF_NEED_RESCHED_LAZY 3
+ #define TIF_SYSCALL_TRACE 8
+ #define TIF_SYSCALL_AUDIT 9
+ #define TIF_POLLING_NRFLAG 16
+@@ -158,6 +160,7 @@ extern int vfp_restore_user_hwstate(stru
+ #define _TIF_SIGPENDING (1 << TIF_SIGPENDING)
+ #define _TIF_NEED_RESCHED (1 << TIF_NEED_RESCHED)
+ #define _TIF_NOTIFY_RESUME (1 << TIF_NOTIFY_RESUME)
++#define _TIF_NEED_RESCHED_LAZY (1 << TIF_NEED_RESCHED_LAZY)
+ #define _TIF_SYSCALL_TRACE (1 << TIF_SYSCALL_TRACE)
+ #define _TIF_SYSCALL_AUDIT (1 << TIF_SYSCALL_AUDIT)
+ #define _TIF_POLLING_NRFLAG (1 << TIF_POLLING_NRFLAG)
+Index: linux-stable/arch/arm/kernel/asm-offsets.c
+===================================================================
+--- linux-stable.orig/arch/arm/kernel/asm-offsets.c
++++ linux-stable/arch/arm/kernel/asm-offsets.c
+@@ -50,6 +50,7 @@ int main(void)
+ BLANK();
+ DEFINE(TI_FLAGS, offsetof(struct thread_info, flags));
+ DEFINE(TI_PREEMPT, offsetof(struct thread_info, preempt_count));
++ DEFINE(TI_PREEMPT_LAZY, offsetof(struct thread_info, preempt_lazy_count));
+ DEFINE(TI_ADDR_LIMIT, offsetof(struct thread_info, addr_limit));
+ DEFINE(TI_TASK, offsetof(struct thread_info, task));
+ DEFINE(TI_EXEC_DOMAIN, offsetof(struct thread_info, exec_domain));
+Index: linux-stable/arch/arm/kernel/entry-armv.S
+===================================================================
+--- linux-stable.orig/arch/arm/kernel/entry-armv.S
++++ linux-stable/arch/arm/kernel/entry-armv.S
+@@ -221,6 +221,12 @@ __irq_svc:
+ movne r0, #0 @ force flags to 0
+ tst r0, #_TIF_NEED_RESCHED
+ blne svc_preempt
++ ldr r8, [tsk, #TI_PREEMPT_LAZY] @ get preempt lazy count
++ ldr r0, [tsk, #TI_FLAGS] @ get flags
++ teq r8, #0 @ if preempt lazy count != 0
++ movne r0, #0 @ force flags to 0
++ tst r0, #_TIF_NEED_RESCHED_LAZY
++ blne svc_preempt
+ #endif
+
+ #ifdef CONFIG_TRACE_IRQFLAGS
+@@ -240,6 +246,8 @@ svc_preempt:
+ 1: bl preempt_schedule_irq @ irq en/disable is done inside
+ ldr r0, [tsk, #TI_FLAGS] @ get new tasks TI_FLAGS
+ tst r0, #_TIF_NEED_RESCHED
++ bne 1b
++ tst r0, #_TIF_NEED_RESCHED_LAZY
+ moveq pc, r8 @ go again
+ b 1b
+ #endif
+Index: linux-stable/arch/arm/kernel/signal.c
+===================================================================
+--- linux-stable.orig/arch/arm/kernel/signal.c
++++ linux-stable/arch/arm/kernel/signal.c
+@@ -639,7 +639,8 @@ asmlinkage int
+ do_work_pending(struct pt_regs *regs, unsigned int thread_flags, int syscall)
+ {
+ do {
+- if (likely(thread_flags & _TIF_NEED_RESCHED)) {
++ if (likely(thread_flags & (_TIF_NEED_RESCHED |
++ _TIF_NEED_RESCHED_LAZY))) {
+ schedule();
+ } else {
+ if (unlikely(!user_mode(regs)))
diff --git a/fix-random-fallout.patch b/fix-random-fallout.patch
new file mode 100644
index 0000000..11051d9
--- /dev/null
+++ b/fix-random-fallout.patch
@@ -0,0 +1,27 @@
+Subject: genirq: Fix 32bit random changes fallout
+From: Thomas Gleixner <tglx@linutronix.de>
+Date: Wed, 31 Oct 2012 17:06:19 +0100
+
+On 32bit sytems pointers are surprisingly 32bit wide. So gcc complains
+correctly about a cast to a different size. Use an cast to unsigned
+long instead which handles this correctly for bioth 32 and 64 bit.
+
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+Cc: stable-rt@vger.kernel.org
+---
+ kernel/irq/manage.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+Index: linux-stable/kernel/irq/manage.c
+===================================================================
+--- linux-stable.orig/kernel/irq/manage.c
++++ linux-stable/kernel/irq/manage.c
+@@ -855,7 +855,7 @@ static int irq_thread(void *data)
+ #ifdef CONFIG_PREEMPT_RT_FULL
+ migrate_disable();
+ add_interrupt_randomness(action->irq, 0,
+- desc->random_ip ^ (u64) action);
++ desc->random_ip ^ (unsigned long) action);
+ migrate_enable();
+ #endif
+ wake_threads_waitq(desc);
diff --git a/kconfig-preempt-rt-full.patch b/kconfig-preempt-rt-full.patch
index f6d537d..ca7261d 100644
--- a/kconfig-preempt-rt-full.patch
+++ b/kconfig-preempt-rt-full.patch
@@ -23,7 +23,7 @@
===================================================================
--- linux-stable.orig/kernel/Kconfig.preempt
+++ linux-stable/kernel/Kconfig.preempt
-@@ -70,6 +70,13 @@ config PREEMPT_RTB
+@@ -73,6 +73,13 @@ config PREEMPT_RTB
enables changes which are preliminary for the full preemptiple
RT kernel.
diff --git a/localversion.patch b/localversion.patch
index daab3f6..aa55f5b 100644
--- a/localversion.patch
+++ b/localversion.patch
@@ -14,4 +14,4 @@
--- /dev/null
+++ linux-stable/localversion-rt
@@ -0,0 +1 @@
-+-rt11
++-rt12
diff --git a/of-convert-devtree-lock.patch b/of-convert-devtree-lock.patch
index 4d901b5..965e002 100644
--- a/of-convert-devtree-lock.patch
+++ b/of-convert-devtree-lock.patch
@@ -5,9 +5,9 @@
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
---
arch/sparc/kernel/prom_common.c | 4 -
- drivers/of/base.c | 92 ++++++++++++++++++++++------------------
+ drivers/of/base.c | 96 ++++++++++++++++++++++------------------
include/linux/of.h | 2
- 3 files changed, 55 insertions(+), 43 deletions(-)
+ 3 files changed, 57 insertions(+), 45 deletions(-)
Index: linux-stable/arch/sparc/kernel/prom_common.c
===================================================================
@@ -142,6 +142,24 @@
return next;
}
EXPORT_SYMBOL(of_get_next_child);
+@@ -412,7 +417,7 @@ struct device_node *of_get_next_availabl
+ {
+ struct device_node *next;
+
+- read_lock(&devtree_lock);
++ raw_spin_lock(&devtree_lock);
+ next = prev ? prev->sibling : node->child;
+ for (; next; next = next->sibling) {
+ if (!of_device_is_available(next))
+@@ -421,7 +426,7 @@ struct device_node *of_get_next_availabl
+ break;
+ }
+ of_node_put(prev);
+- read_unlock(&devtree_lock);
++ raw_spin_unlock(&devtree_lock);
+ return next;
+ }
+ EXPORT_SYMBOL(of_get_next_available_child);
@@ -436,14 +441,15 @@ EXPORT_SYMBOL(of_get_next_available_chil
struct device_node *of_find_node_by_path(const char *path)
{
diff --git a/powerpc-preempt-lazy-support.patch b/powerpc-preempt-lazy-support.patch
new file mode 100644
index 0000000..3bbabc8
--- /dev/null
+++ b/powerpc-preempt-lazy-support.patch
@@ -0,0 +1,179 @@
+Subject: powerpc-preempt-lazy-support.patch
+From: Thomas Gleixner <tglx@linutronix.de>
+Date: Thu, 01 Nov 2012 10:14:11 +0100
+
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+---
+ arch/powerpc/Kconfig | 1 +
+ arch/powerpc/include/asm/thread_info.h | 7 ++++++-
+ arch/powerpc/kernel/asm-offsets.c | 1 +
+ arch/powerpc/kernel/entry_32.S | 19 +++++++++++++------
+ arch/powerpc/kernel/entry_64.S | 17 +++++++++++------
+ 5 files changed, 32 insertions(+), 13 deletions(-)
+
+Index: linux-stable/arch/powerpc/Kconfig
+===================================================================
+--- linux-stable.orig/arch/powerpc/Kconfig
++++ linux-stable/arch/powerpc/Kconfig
+@@ -140,6 +140,7 @@ config PPC
+ select GENERIC_CLOCKEVENTS
+ select GENERIC_STRNCPY_FROM_USER
+ select GENERIC_STRNLEN_USER
++ select HAVE_PREEMPT_LAZY
+
+ config EARLY_PRINTK
+ bool
+Index: linux-stable/arch/powerpc/include/asm/thread_info.h
+===================================================================
+--- linux-stable.orig/arch/powerpc/include/asm/thread_info.h
++++ linux-stable/arch/powerpc/include/asm/thread_info.h
+@@ -43,6 +43,8 @@ struct thread_info {
+ int cpu; /* cpu we're on */
+ int preempt_count; /* 0 => preemptable,
+ <0 => BUG */
++ int preempt_lazy_count; /* 0 => preemptable,
++ <0 => BUG */
+ struct restart_block restart_block;
+ unsigned long local_flags; /* private flags for thread */
+
+@@ -102,12 +104,14 @@ static inline struct thread_info *curren
+ #define TIF_RESTOREALL 11 /* Restore all regs (implies NOERROR) */
+ #define TIF_NOERROR 12 /* Force successful syscall return */
+ #define TIF_NOTIFY_RESUME 13 /* callback before returning to user */
++#define TIF_NEED_RESCHED_LAZY 14 /* lazy rescheduling necessary */
+ #define TIF_SYSCALL_TRACEPOINT 15 /* syscall tracepoint instrumentation */
+
+ /* as above, but as bit values */
+ #define _TIF_SYSCALL_TRACE (1<<TIF_SYSCALL_TRACE)
+ #define _TIF_SIGPENDING (1<<TIF_SIGPENDING)
+ #define _TIF_NEED_RESCHED (1<<TIF_NEED_RESCHED)
++#define _TIF_NEED_RESCHED_LAZY (1<<TIF_NEED_RESCHED_LAZY)
+ #define _TIF_POLLING_NRFLAG (1<<TIF_POLLING_NRFLAG)
+ #define _TIF_32BIT (1<<TIF_32BIT)
+ #define _TIF_PERFMON_WORK (1<<TIF_PERFMON_WORK)
+@@ -123,8 +127,9 @@ static inline struct thread_info *curren
+ _TIF_SECCOMP | _TIF_SYSCALL_TRACEPOINT)
+
+ #define _TIF_USER_WORK_MASK (_TIF_SIGPENDING | _TIF_NEED_RESCHED | \
+- _TIF_NOTIFY_RESUME)
++ _TIF_NOTIFY_RESUME | _TIF_NEED_RESCHED_LAZY)
+ #define _TIF_PERSYSCALL_MASK (_TIF_RESTOREALL|_TIF_NOERROR)
++#define _TIF_NEED_RESCHED_MASK (_TIF_NEED_RESCHED | _TIF_NEED_RESCHED_LAZY)
+
+ /* Bits in local_flags */
+ /* Don't move TLF_NAPPING without adjusting the code in entry_32.S */
+Index: linux-stable/arch/powerpc/kernel/asm-offsets.c
+===================================================================
+--- linux-stable.orig/arch/powerpc/kernel/asm-offsets.c
++++ linux-stable/arch/powerpc/kernel/asm-offsets.c
+@@ -124,6 +124,7 @@ int main(void)
+ DEFINE(TI_FLAGS, offsetof(struct thread_info, flags));
+ DEFINE(TI_LOCAL_FLAGS, offsetof(struct thread_info, local_flags));
+ DEFINE(TI_PREEMPT, offsetof(struct thread_info, preempt_count));
++ DEFINE(TI_PREEMPT_LAZY, offsetof(struct thread_info, preempt_lazy_count));
+ DEFINE(TI_TASK, offsetof(struct thread_info, task));
+ DEFINE(TI_CPU, offsetof(struct thread_info, cpu));
+
+Index: linux-stable/arch/powerpc/kernel/entry_32.S
+===================================================================
+--- linux-stable.orig/arch/powerpc/kernel/entry_32.S
++++ linux-stable/arch/powerpc/kernel/entry_32.S
+@@ -843,8 +843,15 @@ resume_kernel:
+ bne restore
+ lwz r0,TI_FLAGS(r9)
+ andi. r0,r0,_TIF_NEED_RESCHED
++ bne+ 1f
++ lwz r0,TI_PREEMPT_LAZY(r9)
++ cmpwi 0,r0,0 /* if non-zero, just restore regs and return */
++ bne restore
++ lwz r0,TI_FLAGS(r9)
++ andi. r0,r0,_TIF_NEED_RESCHED_LAZY
+ beq+ restore
+- andi. r0,r3,MSR_EE /* interrupts off? */
++
++1: andi. r0,r3,MSR_EE /* interrupts off? */
+ beq restore /* don't schedule if so */
+ #ifdef CONFIG_TRACE_IRQFLAGS
+ /* Lockdep thinks irqs are enabled, we need to call
+@@ -853,11 +860,11 @@ resume_kernel:
+ */
+ bl trace_hardirqs_off
+ #endif
+-1: bl preempt_schedule_irq
++2: bl preempt_schedule_irq
+ CURRENT_THREAD_INFO(r9, r1)
+ lwz r3,TI_FLAGS(r9)
+- andi. r0,r3,_TIF_NEED_RESCHED
+- bne- 1b
++ andi. r0,r3,_TIF_NEED_RESCHED_MASK
++ bne- 2b
+ #ifdef CONFIG_TRACE_IRQFLAGS
+ /* And now, to properly rebalance the above, we tell lockdep they
+ * are being turned back on, which will happen when we return
+@@ -1180,7 +1187,7 @@ global_dbcr0:
+ #endif /* !(CONFIG_4xx || CONFIG_BOOKE) */
+
+ do_work: /* r10 contains MSR_KERNEL here */
+- andi. r0,r9,_TIF_NEED_RESCHED
++ andi. r0,r9,_TIF_NEED_RESCHED_MASK
+ beq do_user_signal
+
+ do_resched: /* r10 contains MSR_KERNEL here */
+@@ -1201,7 +1208,7 @@ recheck:
+ MTMSRD(r10) /* disable interrupts */
+ CURRENT_THREAD_INFO(r9, r1)
+ lwz r9,TI_FLAGS(r9)
+- andi. r0,r9,_TIF_NEED_RESCHED
++ andi. r0,r9,_TIF_NEED_RESCHED_MASK
+ bne- do_resched
+ andi. r0,r9,_TIF_USER_WORK_MASK
+ beq restore_user
+Index: linux-stable/arch/powerpc/kernel/entry_64.S
+===================================================================
+--- linux-stable.orig/arch/powerpc/kernel/entry_64.S
++++ linux-stable/arch/powerpc/kernel/entry_64.S
+@@ -580,7 +580,7 @@ _GLOBAL(ret_from_except_lite)
+ andi. r0,r4,_TIF_USER_WORK_MASK
+ beq restore
+
+- andi. r0,r4,_TIF_NEED_RESCHED
++ andi. r0,r4,_TIF_NEED_RESCHED_MASK
+ beq 1f
+ bl .restore_interrupts
+ bl .schedule
+@@ -595,11 +595,16 @@ _GLOBAL(ret_from_except_lite)
+ resume_kernel:
+ #ifdef CONFIG_PREEMPT
+ /* Check if we need to preempt */
++ lwz r8,TI_PREEMPT(r9)
+ andi. r0,r4,_TIF_NEED_RESCHED
++ bne+ 1f
++
++ andi. r0,r4,_TIF_NEED_RESCHED_LAZY
+ beq+ restore
++ lwz r8,TI_PREEMPT_LAZY(r9)
++
+ /* Check that preempt_count() == 0 and interrupts are enabled */
+- lwz r8,TI_PREEMPT(r9)
+- cmpwi cr1,r8,0
++1: cmpwi cr1,r8,0
+ ld r0,SOFTE(r1)
+ cmpdi r0,0
+ crandc eq,cr1*4+eq,eq
+@@ -610,13 +615,13 @@ resume_kernel:
+ * sure we are soft-disabled first
+ */
+ SOFT_DISABLE_INTS(r3,r4)
+-1: bl .preempt_schedule_irq
++2: bl .preempt_schedule_irq
+
+ /* Re-test flags and eventually loop */
+ CURRENT_THREAD_INFO(r9, r1)
+ ld r4,TI_FLAGS(r9)
+- andi. r0,r4,_TIF_NEED_RESCHED
+- bne 1b
++ andi. r0,r4,_TIF_NEED_RESCHED_MASK
++ bne 2b
+ #endif /* CONFIG_PREEMPT */
+
+ .globl fast_exc_return_irq
diff --git a/preempt-lazy-support.patch b/preempt-lazy-support.patch
index 937392f..7176edf 100644
--- a/preempt-lazy-support.patch
+++ b/preempt-lazy-support.patch
@@ -52,119 +52,19 @@
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
---
- arch/x86/Kconfig | 1
- arch/x86/include/asm/thread_info.h | 4 ++
- arch/x86/kernel/asm-offsets.c | 1
- arch/x86/kernel/entry_32.S | 8 ++++-
- arch/x86/kernel/entry_64.S | 10 +++++-
- include/linux/ftrace_event.h | 1
- include/linux/preempt.h | 38 +++++++++++++++++++++++++-
- include/linux/sched.h | 51 +++++++++++++++++++++++++++++++---
- kernel/Kconfig.preempt | 3 ++
- kernel/sched/core.c | 54 +++++++++++++++++++++++++++++++++++++
- kernel/sched/fair.c | 16 +++++-----
- kernel/sched/features.h | 2 +
- kernel/sched/sched.h | 9 ++++++
- kernel/trace/trace.c | 41 ++++++++++++++++------------
- kernel/trace/trace.h | 2 +
- kernel/trace/trace_output.c | 13 +++++++-
- 16 files changed, 218 insertions(+), 36 deletions(-)
+ include/linux/ftrace_event.h | 1
+ include/linux/preempt.h | 38 ++++++++++++++++++++++++++-
+ include/linux/sched.h | 51 ++++++++++++++++++++++++++++++++----
+ kernel/Kconfig.preempt | 6 ++++
+ kernel/sched/core.c | 60 ++++++++++++++++++++++++++++++++++++++++++-
+ kernel/sched/fair.c | 16 +++++------
+ kernel/sched/features.h | 4 ++
+ kernel/sched/sched.h | 9 ++++++
+ kernel/trace/trace.c | 41 +++++++++++++++++------------
+ kernel/trace/trace.h | 2 +
+ kernel/trace/trace_output.c | 13 +++++++--
+ 11 files changed, 207 insertions(+), 34 deletions(-)
-Index: linux-stable/arch/x86/Kconfig
-===================================================================
---- linux-stable.orig/arch/x86/Kconfig
-+++ linux-stable/arch/x86/Kconfig
-@@ -97,6 +97,7 @@ config X86
- select KTIME_SCALAR if X86_32
- select GENERIC_STRNCPY_FROM_USER
- select GENERIC_STRNLEN_USER
-+ select HAVE_PREEMPT_LAZY
-
- config INSTRUCTION_DECODER
- def_bool (KPROBES || PERF_EVENTS || UPROBES)
-Index: linux-stable/arch/x86/include/asm/thread_info.h
-===================================================================
---- linux-stable.orig/arch/x86/include/asm/thread_info.h
-+++ linux-stable/arch/x86/include/asm/thread_info.h
-@@ -31,6 +31,8 @@ struct thread_info {
- __u32 cpu; /* current CPU */
- int preempt_count; /* 0 => preemptable,
- <0 => BUG */
-+ int preempt_lazy_count; /* 0 => lazy preemptable,
-+ <0 => BUG */
- mm_segment_t addr_limit;
- struct restart_block restart_block;
- void __user *sysenter_return;
-@@ -83,6 +85,7 @@ struct thread_info {
- #define TIF_SYSCALL_EMU 6 /* syscall emulation active */
- #define TIF_SYSCALL_AUDIT 7 /* syscall auditing active */
- #define TIF_SECCOMP 8 /* secure computing */
-+#define TIF_NEED_RESCHED_LAZY 9 /* lazy rescheduling necessary */
- #define TIF_MCE_NOTIFY 10 /* notify userspace of an MCE */
- #define TIF_USER_RETURN_NOTIFY 11 /* notify kernel of userspace return */
- #define TIF_UPROBE 12 /* breakpointed or singlestepping */
-@@ -108,6 +111,7 @@ struct thread_info {
- #define _TIF_SYSCALL_EMU (1 << TIF_SYSCALL_EMU)
- #define _TIF_SYSCALL_AUDIT (1 << TIF_SYSCALL_AUDIT)
- #define _TIF_SECCOMP (1 << TIF_SECCOMP)
-+#define _TIF_NEED_RESCHED_LAZY (1 << TIF_NEED_RESCHED_LAZY)
- #define _TIF_MCE_NOTIFY (1 << TIF_MCE_NOTIFY)
- #define _TIF_USER_RETURN_NOTIFY (1 << TIF_USER_RETURN_NOTIFY)
- #define _TIF_UPROBE (1 << TIF_UPROBE)
-Index: linux-stable/arch/x86/kernel/asm-offsets.c
-===================================================================
---- linux-stable.orig/arch/x86/kernel/asm-offsets.c
-+++ linux-stable/arch/x86/kernel/asm-offsets.c
-@@ -33,6 +33,7 @@ void common(void) {
- OFFSET(TI_status, thread_info, status);
- OFFSET(TI_addr_limit, thread_info, addr_limit);
- OFFSET(TI_preempt_count, thread_info, preempt_count);
-+ OFFSET(TI_preempt_lazy_count, thread_info, preempt_lazy_count);
-
- BLANK();
- OFFSET(crypto_tfm_ctx_offset, crypto_tfm, __crt_ctx);
-Index: linux-stable/arch/x86/kernel/entry_32.S
-===================================================================
---- linux-stable.orig/arch/x86/kernel/entry_32.S
-+++ linux-stable/arch/x86/kernel/entry_32.S
-@@ -352,8 +352,14 @@ ENTRY(resume_kernel)
- need_resched:
- movl TI_flags(%ebp), %ecx # need_resched set ?
- testb $_TIF_NEED_RESCHED, %cl
-+ jnz 1f
-+
-+ cmpl $0,TI_preempt_lazy_count(%ebp) # non-zero preempt_lazy_count ?
-+ jnz restore_all
-+ testb $_TIF_NEED_RESCHED_LAZY, %cl
- jz restore_all
-- testl $X86_EFLAGS_IF,PT_EFLAGS(%esp) # interrupts off (exception path) ?
-+
-+1: testl $X86_EFLAGS_IF,PT_EFLAGS(%esp) # interrupts off (exception path) ?
- jz restore_all
- call preempt_schedule_irq
- jmp need_resched
-Index: linux-stable/arch/x86/kernel/entry_64.S
-===================================================================
---- linux-stable.orig/arch/x86/kernel/entry_64.S
-+++ linux-stable/arch/x86/kernel/entry_64.S
-@@ -1003,9 +1003,15 @@ retint_signal:
- ENTRY(retint_kernel)
- cmpl $0,TI_preempt_count(%rcx)
- jnz retint_restore_args
-- bt $TIF_NEED_RESCHED,TI_flags(%rcx)
-+ bt $TIF_NEED_RESCHED,TI_flags(%rcx)
-+ jc 1f
-+
-+ cmpl $0,TI_preempt_lazy_count(%rcx)
-+ jnz retint_restore_args
-+ bt $TIF_NEED_RESCHED_LAZY,TI_flags(%rcx)
- jnc retint_restore_args
-- bt $9,EFLAGS-ARGOFFSET(%rsp) /* interrupts off? */
-+
-+1: bt $9,EFLAGS-ARGOFFSET(%rsp) /* interrupts off? */
- jnc retint_restore_args
- call preempt_schedule_irq
- jmp exit_intr
Index: linux-stable/include/linux/ftrace_event.h
===================================================================
--- linux-stable.orig/include/linux/ftrace_event.h
@@ -185,7 +85,7 @@
#define preempt_count() (current_thread_info()->preempt_count)
-+#ifdef CONFIG_HAVE_PREEMPT_LAZY
++#ifdef CONFIG_PREEMPT_LAZY
+#define add_preempt_lazy_count(val) do { preempt_lazy_count() += (val); } while (0)
+#define sub_preempt_lazy_count(val) do { preempt_lazy_count() -= (val); } while (0)
+#define inc_preempt_lazy_count() add_preempt_lazy_count(1)
@@ -203,7 +103,7 @@
asmlinkage void preempt_schedule(void);
-+# ifdef CONFIG_HAVE_PREEMPT_LAZY
++# ifdef CONFIG_PREEMPT_LAZY
+#define preempt_check_resched() \
+do { \
+ if (unlikely(test_thread_flag(TIF_NEED_RESCHED) || \
@@ -256,7 +156,7 @@
return unlikely(test_tsk_thread_flag(tsk,TIF_NEED_RESCHED));
}
-+#ifdef CONFIG_HAVE_PREEMPT_LAZY
++#ifdef CONFIG_PREEMPT_LAZY
+static inline void set_tsk_need_resched_lazy(struct task_struct *tsk)
+{
+ set_tsk_thread_flag(tsk,TIF_NEED_RESCHED_LAZY);
@@ -321,13 +221,16 @@
===================================================================
--- linux-stable.orig/kernel/Kconfig.preempt
+++ linux-stable/kernel/Kconfig.preempt
-@@ -6,6 +6,9 @@ config PREEMPT_RT_BASE
+@@ -6,6 +6,12 @@ config PREEMPT_RT_BASE
bool
select PREEMPT
+config HAVE_PREEMPT_LAZY
+ bool
+
++config PREEMPT_LAZY
++ def_bool y if HAVE_PREEMPT_LAZE && PREEMPT_RT_FULL
++
choice
prompt "Preemption Model"
default PREEMPT_NONE
@@ -339,7 +242,7 @@
smp_send_reschedule(cpu);
}
-+#ifdef CONFIG_HAVE_PREEMPT_LAZY
++#ifdef CONFIG_PREEMPT_LAZY
+void resched_task_lazy(struct task_struct *p)
+{
+ int cpu;
@@ -377,7 +280,7 @@
assert_raw_spin_locked(&task_rq(p)->lock);
set_tsk_need_resched(p);
}
-+#ifdef CONFIG_HAVE_PREEMPT_LAZY
++#ifdef CONFIG_PREEMPT_LAZY
+void resched_task_lazy(struct task_struct *p)
+{
+ if (!sched_feat(PREEMPT_LAZY)) {
@@ -391,7 +294,17 @@
#endif /* CONFIG_SMP */
#if defined(CONFIG_RT_GROUP_SCHED) || (defined(CONFIG_FAIR_GROUP_SCHED) && \
-@@ -3448,6 +3490,7 @@ void migrate_disable(void)
+@@ -1838,6 +1880,9 @@ void sched_fork(struct task_struct *p)
+ /* Want to start with kernel preemption disabled. */
+ task_thread_info(p)->preempt_count = 1;
+ #endif
++#ifdef CONFIG_HAVE_PREEMPT_LAZY
++ task_thread_info(idle)->preempt_lazy_count = 0;
++#endif
+ #ifdef CONFIG_SMP
+ plist_node_init(&p->pushable_tasks, MAX_PRIO);
+ #endif
+@@ -3448,6 +3493,7 @@ void migrate_disable(void)
return;
}
@@ -399,7 +312,7 @@
pin_current_cpu();
p->migrate_disable = 1;
preempt_enable();
-@@ -3503,6 +3546,7 @@ void migrate_enable(void)
+@@ -3503,6 +3549,7 @@ void migrate_enable(void)
unpin_current_cpu();
preempt_enable();
@@ -407,7 +320,7 @@
}
EXPORT_SYMBOL(migrate_enable);
#else
-@@ -3603,6 +3647,7 @@ need_resched:
+@@ -3603,6 +3650,7 @@ need_resched:
put_prev_task(rq, prev);
next = pick_next_task(rq);
clear_tsk_need_resched(prev);
@@ -415,11 +328,11 @@
rq->skip_clock_update = 0;
if (likely(prev != next)) {
-@@ -3724,6 +3769,14 @@ asmlinkage void __sched notrace preempt_
+@@ -3724,6 +3772,14 @@ asmlinkage void __sched notrace preempt_
if (likely(ti->preempt_count || irqs_disabled()))
return;
-+#ifdef CONFIG_HAVE_PREEMPT_LAZY
++#ifdef CONFIG_PREEMPT_LAZY
+ /*
+ * Check for lazy preemption
+ */
@@ -430,14 +343,17 @@
do {
add_preempt_count_notrace(PREEMPT_ACTIVE);
/*
-@@ -5331,6 +5384,7 @@ void __cpuinit init_idle(struct task_str
+@@ -5331,7 +5387,9 @@ void __cpuinit init_idle(struct task_str
/* Set the preempt count _outside_ the spinlocks! */
task_thread_info(idle)->preempt_count = 0;
+-
++#ifdef CONFIG_HAVE_PREEMPT_LAZY
+ task_thread_info(idle)->preempt_lazy_count = 0;
-
++#endif
/*
* The idle tasks have their own, simple scheduling class:
+ */
Index: linux-stable/kernel/sched/fair.c
===================================================================
--- linux-stable.orig/kernel/sched/fair.c
@@ -518,11 +434,13 @@
===================================================================
--- linux-stable.orig/kernel/sched/features.h
+++ linux-stable/kernel/sched/features.h
-@@ -68,8 +68,10 @@ SCHED_FEAT(NONTASK_POWER, true)
+@@ -68,8 +68,12 @@ SCHED_FEAT(NONTASK_POWER, true)
SCHED_FEAT(TTWU_QUEUE, true)
#else
SCHED_FEAT(TTWU_QUEUE, false)
++# ifdef CONFIG_PREEMPT_LAZY
+SCHED_FEAT(PREEMPT_LAZY, true)
++# endif
#endif
SCHED_FEAT(FORCE_SD_OVERLAP, false)
@@ -537,7 +455,7 @@
extern void resched_task(struct task_struct *p);
extern void resched_cpu(int cpu);
-+#ifdef CONFIG_HAVE_PREEMPT_LAZY
++#ifdef CONFIG_PREEMPT_LAZY
+extern void resched_task_lazy(struct task_struct *tsk);
+#else
+static inline void resched_task_lazy(struct task_struct *tsk)
diff --git a/series b/series
index 4aa7d32..a859a63 100644
--- a/series
+++ b/series
@@ -604,12 +604,14 @@
softirq-add-more-debug.patch
net-netif-rx-ni-use-local-bh-disable.patch
-# CHECKME
-#rt-replace-rt-spin-lock-to-raw-one-in-res_counter.patch
+fix-random-fallout.patch
+
+preempt-lazy-support.patch
+x86-preempt-lazy.patch
+arm-preempt-lazy-support.patch
# Enable full RT
-preempt-lazy-support.patch
+powerpc-preempt-lazy-support.patch
kconfig-disable-a-few-options-rt.patch
kconfig-preempt-rt-full.patch
-#preempt-lazy-support.patch
diff --git a/x86-preempt-lazy.patch b/x86-preempt-lazy.patch
new file mode 100644
index 0000000..9224415
--- /dev/null
+++ b/x86-preempt-lazy.patch
@@ -0,0 +1,187 @@
+Subject: x86-preempt-lazy.patch
+From: Thomas Gleixner <tglx@linutronix.de>
+Date: Thu, 01 Nov 2012 11:03:47 +0100
+
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+---
+ arch/x86/Kconfig | 1 +
+ arch/x86/include/asm/thread_info.h | 6 ++++++
+ arch/x86/kernel/asm-offsets.c | 1 +
+ arch/x86/kernel/entry_32.S | 18 +++++++++++++-----
+ arch/x86/kernel/entry_64.S | 24 +++++++++++++++---------
+ 5 files changed, 36 insertions(+), 14 deletions(-)
+
+Index: linux-stable/arch/x86/Kconfig
+===================================================================
+--- linux-stable.orig/arch/x86/Kconfig
++++ linux-stable/arch/x86/Kconfig
+@@ -97,6 +97,7 @@ config X86
+ select KTIME_SCALAR if X86_32
+ select GENERIC_STRNCPY_FROM_USER
+ select GENERIC_STRNLEN_USER
++ select HAVE_PREEMPT_LAZY
+
+ config INSTRUCTION_DECODER
+ def_bool (KPROBES || PERF_EVENTS || UPROBES)
+Index: linux-stable/arch/x86/include/asm/thread_info.h
+===================================================================
+--- linux-stable.orig/arch/x86/include/asm/thread_info.h
++++ linux-stable/arch/x86/include/asm/thread_info.h
+@@ -31,6 +31,8 @@ struct thread_info {
+ __u32 cpu; /* current CPU */
+ int preempt_count; /* 0 => preemptable,
+ <0 => BUG */
++ int preempt_lazy_count; /* 0 => lazy preemptable,
++ <0 => BUG */
+ mm_segment_t addr_limit;
+ struct restart_block restart_block;
+ void __user *sysenter_return;
+@@ -83,6 +85,7 @@ struct thread_info {
+ #define TIF_SYSCALL_EMU 6 /* syscall emulation active */
+ #define TIF_SYSCALL_AUDIT 7 /* syscall auditing active */
+ #define TIF_SECCOMP 8 /* secure computing */
++#define TIF_NEED_RESCHED_LAZY 9 /* lazy rescheduling necessary */
+ #define TIF_MCE_NOTIFY 10 /* notify userspace of an MCE */
+ #define TIF_USER_RETURN_NOTIFY 11 /* notify kernel of userspace return */
+ #define TIF_UPROBE 12 /* breakpointed or singlestepping */
+@@ -108,6 +111,7 @@ struct thread_info {
+ #define _TIF_SYSCALL_EMU (1 << TIF_SYSCALL_EMU)
+ #define _TIF_SYSCALL_AUDIT (1 << TIF_SYSCALL_AUDIT)
+ #define _TIF_SECCOMP (1 << TIF_SECCOMP)
++#define _TIF_NEED_RESCHED_LAZY (1 << TIF_NEED_RESCHED_LAZY)
+ #define _TIF_MCE_NOTIFY (1 << TIF_MCE_NOTIFY)
+ #define _TIF_USER_RETURN_NOTIFY (1 << TIF_USER_RETURN_NOTIFY)
+ #define _TIF_UPROBE (1 << TIF_UPROBE)
+@@ -155,6 +159,8 @@ struct thread_info {
+ #define _TIF_WORK_CTXSW_PREV (_TIF_WORK_CTXSW|_TIF_USER_RETURN_NOTIFY)
+ #define _TIF_WORK_CTXSW_NEXT (_TIF_WORK_CTXSW|_TIF_DEBUG)
+
++#define _TIF_NEED_RESCHED_MASK (_TIF_NEED_RESCHED | _TIF_NEED_RESCHED_LAZY)
++
+ #define PREEMPT_ACTIVE 0x10000000
+
+ #ifdef CONFIG_X86_32
+Index: linux-stable/arch/x86/kernel/asm-offsets.c
+===================================================================
+--- linux-stable.orig/arch/x86/kernel/asm-offsets.c
++++ linux-stable/arch/x86/kernel/asm-offsets.c
+@@ -33,6 +33,7 @@ void common(void) {
+ OFFSET(TI_status, thread_info, status);
+ OFFSET(TI_addr_limit, thread_info, addr_limit);
+ OFFSET(TI_preempt_count, thread_info, preempt_count);
++ OFFSET(TI_preempt_lazy_count, thread_info, preempt_lazy_count);
+
+ BLANK();
+ OFFSET(crypto_tfm_ctx_offset, crypto_tfm, __crt_ctx);
+Index: linux-stable/arch/x86/kernel/entry_32.S
+===================================================================
+--- linux-stable.orig/arch/x86/kernel/entry_32.S
++++ linux-stable/arch/x86/kernel/entry_32.S
+@@ -349,14 +349,22 @@ ENTRY(resume_kernel)
+ DISABLE_INTERRUPTS(CLBR_ANY)
+ cmpl $0,TI_preempt_count(%ebp) # non-zero preempt_count ?
+ jnz restore_all
+-need_resched:
+ movl TI_flags(%ebp), %ecx # need_resched set ?
+ testb $_TIF_NEED_RESCHED, %cl
++ jnz 1f
++
++ cmpl $0,TI_preempt_lazy_count(%ebp) # non-zero preempt_lazy_count ?
++ jnz restore_all
++ testl $_TIF_NEED_RESCHED_LAZY, %cx
+ jz restore_all
+- testl $X86_EFLAGS_IF,PT_EFLAGS(%esp) # interrupts off (exception path) ?
++
++1: testl $X86_EFLAGS_IF,PT_EFLAGS(%esp) # interrupts off (exception path) ?
+ jz restore_all
+ call preempt_schedule_irq
+- jmp need_resched
++ movl TI_flags(%ebp), %ecx # need_resched set ?
++ testl $_TIF_NEED_RESCHED_MASK, %cl
++ jnz 1b
++ jmp restore_all
+ END(resume_kernel)
+ #endif
+ CFI_ENDPROC
+@@ -589,7 +597,7 @@ ENDPROC(system_call)
+ ALIGN
+ RING0_PTREGS_FRAME # can't unwind into user space anyway
+ work_pending:
+- testb $_TIF_NEED_RESCHED, %cl
++ testl $_TIF_NEED_RESCHED_MASK, %cl
+ jz work_notifysig
+ work_resched:
+ call schedule
+@@ -602,7 +610,7 @@ work_resched:
+ andl $_TIF_WORK_MASK, %ecx # is there any work to be done other
+ # than syscall tracing?
+ jz restore_all
+- testb $_TIF_NEED_RESCHED, %cl
++ testl $_TIF_NEED_RESCHED_MASK, %cl
+ jnz work_resched
+
+ work_notifysig: # deal with pending signals and
+Index: linux-stable/arch/x86/kernel/entry_64.S
+===================================================================
+--- linux-stable.orig/arch/x86/kernel/entry_64.S
++++ linux-stable/arch/x86/kernel/entry_64.S
+@@ -560,8 +560,8 @@ sysret_check:
+ /* Handle reschedules */
+ /* edx: work, edi: workmask */
+ sysret_careful:
+- bt $TIF_NEED_RESCHED,%edx
+- jnc sysret_signal
++ testl $_TIF_NEED_RESCHED_MASK,%edx
++ jz sysret_signal
+ TRACE_IRQS_ON
+ ENABLE_INTERRUPTS(CLBR_NONE)
+ pushq_cfi %rdi
+@@ -673,8 +673,8 @@ GLOBAL(int_with_check)
+ /* First do a reschedule test. */
+ /* edx: work, edi: workmask */
+ int_careful:
+- bt $TIF_NEED_RESCHED,%edx
+- jnc int_very_careful
++ testl $_TIF_NEED_RESCHED_MASK,%edx
++ jz int_very_careful
+ TRACE_IRQS_ON
+ ENABLE_INTERRUPTS(CLBR_NONE)
+ pushq_cfi %rdi
+@@ -969,8 +969,8 @@ bad_iret:
+ /* edi: workmask, edx: work */
+ retint_careful:
+ CFI_RESTORE_STATE
+- bt $TIF_NEED_RESCHED,%edx
+- jnc retint_signal
++ testl $_TIF_NEED_RESCHED_MASK,%edx
++ jz retint_signal
+ TRACE_IRQS_ON
+ ENABLE_INTERRUPTS(CLBR_NONE)
+ pushq_cfi %rdi
+@@ -1003,9 +1003,15 @@ retint_signal:
+ ENTRY(retint_kernel)
+ cmpl $0,TI_preempt_count(%rcx)
+ jnz retint_restore_args
+- bt $TIF_NEED_RESCHED,TI_flags(%rcx)
++ bt $TIF_NEED_RESCHED,TI_flags(%rcx)
++ jc 1f
++
++ cmpl $0,TI_preempt_lazy_count(%rcx)
++ jnz retint_restore_args
++ bt $TIF_NEED_RESCHED_LAZY,TI_flags(%rcx)
+ jnc retint_restore_args
+- bt $9,EFLAGS-ARGOFFSET(%rsp) /* interrupts off? */
++
++1: bt $9,EFLAGS-ARGOFFSET(%rsp) /* interrupts off? */
+ jnc retint_restore_args
+ call preempt_schedule_irq
+ jmp exit_intr
+@@ -1437,7 +1443,7 @@ paranoid_userspace:
+ movq %rsp,%rdi /* &pt_regs */
+ call sync_regs
+ movq %rax,%rsp /* switch stack for scheduling */
+- testl $_TIF_NEED_RESCHED,%ebx
++ testl $_TIF_NEED_RESCHED_MASK,%ebx
+ jnz paranoid_schedule
+ movl %ebx,%edx /* arg3: thread flags */
+ TRACE_IRQS_ON