| From: Thomas Gleixner <tglx@linutronix.de> |
| Date: Thu, 1 Nov 2012 10:14:11 +0100 |
| Subject: powerpc: Add support for lazy preemption |
| |
| Implement the powerpc pieces for lazy preempt. |
| |
| Signed-off-by: Thomas Gleixner <tglx@linutronix.de> |
| --- |
| arch/powerpc/Kconfig | 1 + |
| arch/powerpc/include/asm/thread_info.h | 11 ++++++++--- |
| arch/powerpc/kernel/asm-offsets.c | 1 + |
| arch/powerpc/kernel/entry_32.S | 17 ++++++++++++----- |
| arch/powerpc/kernel/entry_64.S | 14 +++++++++++--- |
| 5 files changed, 33 insertions(+), 11 deletions(-) |
| |
| --- a/arch/powerpc/Kconfig |
| +++ b/arch/powerpc/Kconfig |
| @@ -139,6 +139,7 @@ config PPC |
| select ARCH_HAS_TICK_BROADCAST if GENERIC_CLOCKEVENTS_BROADCAST |
| select GENERIC_STRNCPY_FROM_USER |
| select GENERIC_STRNLEN_USER |
| + select HAVE_PREEMPT_LAZY |
| select HAVE_MOD_ARCH_SPECIFIC |
| select MODULES_USE_ELF_RELA |
| select CLONE_BACKWARDS |
| --- a/arch/powerpc/include/asm/thread_info.h |
| +++ b/arch/powerpc/include/asm/thread_info.h |
| @@ -42,6 +42,8 @@ struct thread_info { |
| int cpu; /* cpu we're on */ |
| int preempt_count; /* 0 => preemptable, |
| <0 => BUG */ |
| + int preempt_lazy_count; /* 0 => preemptable, |
| + <0 => BUG */ |
| unsigned long local_flags; /* private flags for thread */ |
| |
| /* low level flags - has atomic operations done on it */ |
| @@ -82,8 +84,7 @@ static inline struct thread_info *curren |
| #define TIF_SYSCALL_TRACE 0 /* syscall trace active */ |
| #define TIF_SIGPENDING 1 /* signal pending */ |
| #define TIF_NEED_RESCHED 2 /* rescheduling necessary */ |
| -#define TIF_POLLING_NRFLAG 3 /* true if poll_idle() is polling |
| - TIF_NEED_RESCHED */ |
| +#define TIF_NEED_RESCHED_LAZY 3 /* lazy rescheduling necessary */ |
| #define TIF_32BIT 4 /* 32 bit binary */ |
| #define TIF_RESTORE_TM 5 /* need to restore TM FP/VEC/VSX */ |
| #define TIF_SYSCALL_AUDIT 7 /* syscall auditing active */ |
| @@ -101,6 +102,8 @@ static inline struct thread_info *curren |
| #if defined(CONFIG_PPC64) |
| #define TIF_ELF2ABI 18 /* function descriptors must die! */ |
| #endif |
| +#define TIF_POLLING_NRFLAG 19 /* true if poll_idle() is polling |
| + TIF_NEED_RESCHED */ |
| |
| /* as above, but as bit values */ |
| #define _TIF_SYSCALL_TRACE (1<<TIF_SYSCALL_TRACE) |
| @@ -119,14 +122,16 @@ static inline struct thread_info *curren |
| #define _TIF_SYSCALL_TRACEPOINT (1<<TIF_SYSCALL_TRACEPOINT) |
| #define _TIF_EMULATE_STACK_STORE (1<<TIF_EMULATE_STACK_STORE) |
| #define _TIF_NOHZ (1<<TIF_NOHZ) |
| +#define _TIF_NEED_RESCHED_LAZY (1<<TIF_NEED_RESCHED_LAZY) |
| #define _TIF_SYSCALL_DOTRACE (_TIF_SYSCALL_TRACE | _TIF_SYSCALL_AUDIT | \ |
| _TIF_SECCOMP | _TIF_SYSCALL_TRACEPOINT | \ |
| _TIF_NOHZ) |
| |
| #define _TIF_USER_WORK_MASK (_TIF_SIGPENDING | _TIF_NEED_RESCHED | \ |
| _TIF_NOTIFY_RESUME | _TIF_UPROBE | \ |
| - _TIF_RESTORE_TM) |
| + _TIF_RESTORE_TM | _TIF_NEED_RESCHED_LAZY) |
| #define _TIF_PERSYSCALL_MASK (_TIF_RESTOREALL|_TIF_NOERROR) |
| +#define _TIF_NEED_RESCHED_MASK (_TIF_NEED_RESCHED | _TIF_NEED_RESCHED_LAZY) |
| |
| /* Bits in local_flags */ |
| /* Don't move TLF_NAPPING without adjusting the code in entry_32.S */ |
| --- a/arch/powerpc/kernel/asm-offsets.c |
| +++ b/arch/powerpc/kernel/asm-offsets.c |
| @@ -160,6 +160,7 @@ int main(void) |
| DEFINE(TI_FLAGS, offsetof(struct thread_info, flags)); |
| DEFINE(TI_LOCAL_FLAGS, offsetof(struct thread_info, local_flags)); |
| DEFINE(TI_PREEMPT, offsetof(struct thread_info, preempt_count)); |
| + DEFINE(TI_PREEMPT_LAZY, offsetof(struct thread_info, preempt_lazy_count)); |
| DEFINE(TI_TASK, offsetof(struct thread_info, task)); |
| DEFINE(TI_CPU, offsetof(struct thread_info, cpu)); |
| |
| --- a/arch/powerpc/kernel/entry_32.S |
| +++ b/arch/powerpc/kernel/entry_32.S |
| @@ -813,7 +813,14 @@ user_exc_return: /* r10 contains MSR_KE |
| cmpwi 0,r0,0 /* if non-zero, just restore regs and return */ |
| bne restore |
| andi. r8,r8,_TIF_NEED_RESCHED |
| + bne+ 1f |
| + lwz r0,TI_PREEMPT_LAZY(r9) |
| + cmpwi 0,r0,0 /* if non-zero, just restore regs and return */ |
| + bne restore |
| + lwz r0,TI_FLAGS(r9) |
| + andi. r0,r0,_TIF_NEED_RESCHED_LAZY |
| beq+ restore |
| +1: |
| lwz r3,_MSR(r1) |
| andi. r0,r3,MSR_EE /* interrupts off? */ |
| beq restore /* don't schedule if so */ |
| @@ -824,11 +831,11 @@ user_exc_return: /* r10 contains MSR_KE |
| */ |
| bl trace_hardirqs_off |
| #endif |
| -1: bl preempt_schedule_irq |
| +2: bl preempt_schedule_irq |
| CURRENT_THREAD_INFO(r9, r1) |
| lwz r3,TI_FLAGS(r9) |
| - andi. r0,r3,_TIF_NEED_RESCHED |
| - bne- 1b |
| + andi. r0,r3,_TIF_NEED_RESCHED_MASK |
| + bne- 2b |
| #ifdef CONFIG_TRACE_IRQFLAGS |
| /* And now, to properly rebalance the above, we tell lockdep they |
| * are being turned back on, which will happen when we return |
| @@ -1149,7 +1156,7 @@ END_FTR_SECTION_IFSET(CPU_FTR_NEED_PAIRE |
| #endif /* !(CONFIG_4xx || CONFIG_BOOKE) */ |
| |
| do_work: /* r10 contains MSR_KERNEL here */ |
| - andi. r0,r9,_TIF_NEED_RESCHED |
| + andi. r0,r9,_TIF_NEED_RESCHED_MASK |
| beq do_user_signal |
| |
| do_resched: /* r10 contains MSR_KERNEL here */ |
| @@ -1170,7 +1177,7 @@ do_resched: /* r10 contains MSR_KERNEL |
| MTMSRD(r10) /* disable interrupts */ |
| CURRENT_THREAD_INFO(r9, r1) |
| lwz r9,TI_FLAGS(r9) |
| - andi. r0,r9,_TIF_NEED_RESCHED |
| + andi. r0,r9,_TIF_NEED_RESCHED_MASK |
| bne- do_resched |
| andi. r0,r9,_TIF_USER_WORK_MASK |
| beq restore_user |
| --- a/arch/powerpc/kernel/entry_64.S |
| +++ b/arch/powerpc/kernel/entry_64.S |
| @@ -636,7 +636,7 @@ END_FTR_SECTION_IFSET(CPU_FTR_DSCR) |
| #else |
| beq restore |
| #endif |
| -1: andi. r0,r4,_TIF_NEED_RESCHED |
| +1: andi. r0,r4,_TIF_NEED_RESCHED_MASK |
| beq 2f |
| bl restore_interrupts |
| SCHEDULE_USER |
| @@ -698,10 +698,18 @@ END_FTR_SECTION_IFSET(CPU_FTR_DSCR) |
| |
| #ifdef CONFIG_PREEMPT |
| /* Check if we need to preempt */ |
| + lwz r8,TI_PREEMPT(r9) |
| + cmpwi 0,r8,0 /* if non-zero, just restore regs and return */ |
| + bne restore |
| andi. r0,r4,_TIF_NEED_RESCHED |
| + bne+ check_count |
| + |
| + andi. r0,r4,_TIF_NEED_RESCHED_LAZY |
| beq+ restore |
| + lwz r8,TI_PREEMPT_LAZY(r9) |
| + |
| /* Check that preempt_count() == 0 and interrupts are enabled */ |
| - lwz r8,TI_PREEMPT(r9) |
| +check_count: |
| cmpwi cr1,r8,0 |
| ld r0,SOFTE(r1) |
| cmpdi r0,0 |
| @@ -718,7 +726,7 @@ END_FTR_SECTION_IFSET(CPU_FTR_DSCR) |
| /* Re-test flags and eventually loop */ |
| CURRENT_THREAD_INFO(r9, r1) |
| ld r4,TI_FLAGS(r9) |
| - andi. r0,r4,_TIF_NEED_RESCHED |
| + andi. r0,r4,_TIF_NEED_RESCHED_MASK |
| bne 1b |
| |
| /* |