blob: 3bbabc8e6ec250baaa294e131816e0dd4f463eb6 [file] [log] [blame]
Subject: powerpc-preempt-lazy-support.patch
From: Thomas Gleixner <tglx@linutronix.de>
Date: Thu, 01 Nov 2012 10:14:11 +0100
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
---
arch/powerpc/Kconfig | 1 +
arch/powerpc/include/asm/thread_info.h | 7 ++++++-
arch/powerpc/kernel/asm-offsets.c | 1 +
arch/powerpc/kernel/entry_32.S | 19 +++++++++++++------
arch/powerpc/kernel/entry_64.S | 17 +++++++++++------
5 files changed, 32 insertions(+), 13 deletions(-)
Index: linux-stable/arch/powerpc/Kconfig
===================================================================
--- linux-stable.orig/arch/powerpc/Kconfig
+++ linux-stable/arch/powerpc/Kconfig
@@ -140,6 +140,7 @@ config PPC
select GENERIC_CLOCKEVENTS
select GENERIC_STRNCPY_FROM_USER
select GENERIC_STRNLEN_USER
+ select HAVE_PREEMPT_LAZY
config EARLY_PRINTK
bool
Index: linux-stable/arch/powerpc/include/asm/thread_info.h
===================================================================
--- linux-stable.orig/arch/powerpc/include/asm/thread_info.h
+++ linux-stable/arch/powerpc/include/asm/thread_info.h
@@ -43,6 +43,8 @@ struct thread_info {
int cpu; /* cpu we're on */
int preempt_count; /* 0 => preemptable,
<0 => BUG */
+ int preempt_lazy_count; /* 0 => preemptable,
+ <0 => BUG */
struct restart_block restart_block;
unsigned long local_flags; /* private flags for thread */
@@ -102,12 +104,14 @@ static inline struct thread_info *curren
#define TIF_RESTOREALL 11 /* Restore all regs (implies NOERROR) */
#define TIF_NOERROR 12 /* Force successful syscall return */
#define TIF_NOTIFY_RESUME 13 /* callback before returning to user */
+#define TIF_NEED_RESCHED_LAZY 14 /* lazy rescheduling necessary */
#define TIF_SYSCALL_TRACEPOINT 15 /* syscall tracepoint instrumentation */
/* as above, but as bit values */
#define _TIF_SYSCALL_TRACE (1<<TIF_SYSCALL_TRACE)
#define _TIF_SIGPENDING (1<<TIF_SIGPENDING)
#define _TIF_NEED_RESCHED (1<<TIF_NEED_RESCHED)
+#define _TIF_NEED_RESCHED_LAZY (1<<TIF_NEED_RESCHED_LAZY)
#define _TIF_POLLING_NRFLAG (1<<TIF_POLLING_NRFLAG)
#define _TIF_32BIT (1<<TIF_32BIT)
#define _TIF_PERFMON_WORK (1<<TIF_PERFMON_WORK)
@@ -123,8 +127,9 @@ static inline struct thread_info *curren
_TIF_SECCOMP | _TIF_SYSCALL_TRACEPOINT)
#define _TIF_USER_WORK_MASK (_TIF_SIGPENDING | _TIF_NEED_RESCHED | \
- _TIF_NOTIFY_RESUME)
+ _TIF_NOTIFY_RESUME | _TIF_NEED_RESCHED_LAZY)
#define _TIF_PERSYSCALL_MASK (_TIF_RESTOREALL|_TIF_NOERROR)
+#define _TIF_NEED_RESCHED_MASK (_TIF_NEED_RESCHED | _TIF_NEED_RESCHED_LAZY)
/* Bits in local_flags */
/* Don't move TLF_NAPPING without adjusting the code in entry_32.S */
Index: linux-stable/arch/powerpc/kernel/asm-offsets.c
===================================================================
--- linux-stable.orig/arch/powerpc/kernel/asm-offsets.c
+++ linux-stable/arch/powerpc/kernel/asm-offsets.c
@@ -124,6 +124,7 @@ int main(void)
DEFINE(TI_FLAGS, offsetof(struct thread_info, flags));
DEFINE(TI_LOCAL_FLAGS, offsetof(struct thread_info, local_flags));
DEFINE(TI_PREEMPT, offsetof(struct thread_info, preempt_count));
+ DEFINE(TI_PREEMPT_LAZY, offsetof(struct thread_info, preempt_lazy_count));
DEFINE(TI_TASK, offsetof(struct thread_info, task));
DEFINE(TI_CPU, offsetof(struct thread_info, cpu));
Index: linux-stable/arch/powerpc/kernel/entry_32.S
===================================================================
--- linux-stable.orig/arch/powerpc/kernel/entry_32.S
+++ linux-stable/arch/powerpc/kernel/entry_32.S
@@ -843,8 +843,15 @@ resume_kernel:
bne restore
lwz r0,TI_FLAGS(r9)
andi. r0,r0,_TIF_NEED_RESCHED
+ bne+ 1f
+ lwz r0,TI_PREEMPT_LAZY(r9)
+ cmpwi 0,r0,0 /* if non-zero, just restore regs and return */
+ bne restore
+ lwz r0,TI_FLAGS(r9)
+ andi. r0,r0,_TIF_NEED_RESCHED_LAZY
beq+ restore
- andi. r0,r3,MSR_EE /* interrupts off? */
+
+1: andi. r0,r3,MSR_EE /* interrupts off? */
beq restore /* don't schedule if so */
#ifdef CONFIG_TRACE_IRQFLAGS
/* Lockdep thinks irqs are enabled, we need to call
@@ -853,11 +860,11 @@ resume_kernel:
*/
bl trace_hardirqs_off
#endif
-1: bl preempt_schedule_irq
+2: bl preempt_schedule_irq
CURRENT_THREAD_INFO(r9, r1)
lwz r3,TI_FLAGS(r9)
- andi. r0,r3,_TIF_NEED_RESCHED
- bne- 1b
+ andi. r0,r3,_TIF_NEED_RESCHED_MASK
+ bne- 2b
#ifdef CONFIG_TRACE_IRQFLAGS
/* And now, to properly rebalance the above, we tell lockdep they
* are being turned back on, which will happen when we return
@@ -1180,7 +1187,7 @@ global_dbcr0:
#endif /* !(CONFIG_4xx || CONFIG_BOOKE) */
do_work: /* r10 contains MSR_KERNEL here */
- andi. r0,r9,_TIF_NEED_RESCHED
+ andi. r0,r9,_TIF_NEED_RESCHED_MASK
beq do_user_signal
do_resched: /* r10 contains MSR_KERNEL here */
@@ -1201,7 +1208,7 @@ recheck:
MTMSRD(r10) /* disable interrupts */
CURRENT_THREAD_INFO(r9, r1)
lwz r9,TI_FLAGS(r9)
- andi. r0,r9,_TIF_NEED_RESCHED
+ andi. r0,r9,_TIF_NEED_RESCHED_MASK
bne- do_resched
andi. r0,r9,_TIF_USER_WORK_MASK
beq restore_user
Index: linux-stable/arch/powerpc/kernel/entry_64.S
===================================================================
--- linux-stable.orig/arch/powerpc/kernel/entry_64.S
+++ linux-stable/arch/powerpc/kernel/entry_64.S
@@ -580,7 +580,7 @@ _GLOBAL(ret_from_except_lite)
andi. r0,r4,_TIF_USER_WORK_MASK
beq restore
- andi. r0,r4,_TIF_NEED_RESCHED
+ andi. r0,r4,_TIF_NEED_RESCHED_MASK
beq 1f
bl .restore_interrupts
bl .schedule
@@ -595,11 +595,16 @@ _GLOBAL(ret_from_except_lite)
resume_kernel:
#ifdef CONFIG_PREEMPT
/* Check if we need to preempt */
+ lwz r8,TI_PREEMPT(r9)
andi. r0,r4,_TIF_NEED_RESCHED
+ bne+ 1f
+
+ andi. r0,r4,_TIF_NEED_RESCHED_LAZY
beq+ restore
+ lwz r8,TI_PREEMPT_LAZY(r9)
+
/* Check that preempt_count() == 0 and interrupts are enabled */
- lwz r8,TI_PREEMPT(r9)
- cmpwi cr1,r8,0
+1: cmpwi cr1,r8,0
ld r0,SOFTE(r1)
cmpdi r0,0
crandc eq,cr1*4+eq,eq
@@ -610,13 +615,13 @@ resume_kernel:
* sure we are soft-disabled first
*/
SOFT_DISABLE_INTS(r3,r4)
-1: bl .preempt_schedule_irq
+2: bl .preempt_schedule_irq
/* Re-test flags and eventually loop */
CURRENT_THREAD_INFO(r9, r1)
ld r4,TI_FLAGS(r9)
- andi. r0,r4,_TIF_NEED_RESCHED
- bne 1b
+ andi. r0,r4,_TIF_NEED_RESCHED_MASK
+ bne 2b
#endif /* CONFIG_PREEMPT */
.globl fast_exc_return_irq