| From a608ba4dbdfc249f58585852efe1569ab960b63f Mon Sep 17 00:00:00 2001 |
| From: Thomas Gleixner <tglx@linutronix.de> |
| Date: Thu, 1 Nov 2012 10:14:11 +0100 |
| Subject: [PATCH] powerpc: Add support for lazy preemption |
| |
| Implement the powerpc pieces for lazy preempt. |
| |
| Signed-off-by: Thomas Gleixner <tglx@linutronix.de> |
| |
| diff --git a/arch/powerpc/Kconfig b/arch/powerpc/Kconfig |
| index fff65b147584..52edc92421b7 100644 |
| --- a/arch/powerpc/Kconfig |
| +++ b/arch/powerpc/Kconfig |
| @@ -154,6 +154,7 @@ config PPC |
| select HAVE_PERF_EVENTS_NMI if PPC64 |
| select HAVE_PERF_REGS |
| select HAVE_PERF_USER_STACK_DUMP |
| + select HAVE_PREEMPT_LAZY |
| select HAVE_RCU_TABLE_FREE if SMP |
| select HAVE_REGS_AND_STACK_ACCESS_API |
| select HAVE_SYSCALL_TRACEPOINTS |
| diff --git a/arch/powerpc/include/asm/thread_info.h b/arch/powerpc/include/asm/thread_info.h |
| index 6fc6464f7421..2245bfc02bd4 100644 |
| --- a/arch/powerpc/include/asm/thread_info.h |
| +++ b/arch/powerpc/include/asm/thread_info.h |
| @@ -43,6 +43,8 @@ struct thread_info { |
| int cpu; /* cpu we're on */ |
| int preempt_count; /* 0 => preemptable, |
| <0 => BUG */ |
| + int preempt_lazy_count; /* 0 => preemptable, |
| + <0 => BUG */ |
| unsigned long local_flags; /* private flags for thread */ |
| #ifdef CONFIG_LIVEPATCH |
| unsigned long *livepatch_sp; |
| @@ -88,8 +90,7 @@ static inline struct thread_info *current_thread_info(void) |
| #define TIF_SYSCALL_TRACE 0 /* syscall trace active */ |
| #define TIF_SIGPENDING 1 /* signal pending */ |
| #define TIF_NEED_RESCHED 2 /* rescheduling necessary */ |
| -#define TIF_POLLING_NRFLAG 3 /* true if poll_idle() is polling |
| - TIF_NEED_RESCHED */ |
| +#define TIF_NEED_RESCHED_LAZY 3 /* lazy rescheduling necessary */ |
| #define TIF_32BIT 4 /* 32 bit binary */ |
| #define TIF_RESTORE_TM 5 /* need to restore TM FP/VEC/VSX */ |
| #define TIF_PATCH_PENDING 6 /* pending live patching update */ |
| @@ -108,6 +109,8 @@ static inline struct thread_info *current_thread_info(void) |
| #if defined(CONFIG_PPC64) |
| #define TIF_ELF2ABI 18 /* function descriptors must die! */ |
| #endif |
| +#define TIF_POLLING_NRFLAG 19 /* true if poll_idle() is polling |
| + TIF_NEED_RESCHED */ |
| |
| /* as above, but as bit values */ |
| #define _TIF_SYSCALL_TRACE (1<<TIF_SYSCALL_TRACE) |
| @@ -127,14 +130,17 @@ static inline struct thread_info *current_thread_info(void) |
| #define _TIF_SYSCALL_TRACEPOINT (1<<TIF_SYSCALL_TRACEPOINT) |
| #define _TIF_EMULATE_STACK_STORE (1<<TIF_EMULATE_STACK_STORE) |
| #define _TIF_NOHZ (1<<TIF_NOHZ) |
| +#define _TIF_NEED_RESCHED_LAZY (1<<TIF_NEED_RESCHED_LAZY) |
| #define _TIF_SYSCALL_DOTRACE (_TIF_SYSCALL_TRACE | _TIF_SYSCALL_AUDIT | \ |
| _TIF_SECCOMP | _TIF_SYSCALL_TRACEPOINT | \ |
| _TIF_NOHZ) |
| |
| #define _TIF_USER_WORK_MASK (_TIF_SIGPENDING | _TIF_NEED_RESCHED | \ |
| _TIF_NOTIFY_RESUME | _TIF_UPROBE | \ |
| - _TIF_RESTORE_TM | _TIF_PATCH_PENDING) |
| + _TIF_RESTORE_TM | _TIF_PATCH_PENDING | \ |
| + _TIF_NEED_RESCHED_LAZY) |
| #define _TIF_PERSYSCALL_MASK (_TIF_RESTOREALL|_TIF_NOERROR) |
| +#define _TIF_NEED_RESCHED_MASK (_TIF_NEED_RESCHED | _TIF_NEED_RESCHED_LAZY) |
| |
| /* Bits in local_flags */ |
| /* Don't move TLF_NAPPING without adjusting the code in entry_32.S */ |
| diff --git a/arch/powerpc/kernel/asm-offsets.c b/arch/powerpc/kernel/asm-offsets.c |
| index 4367e7df51a1..016d03766921 100644 |
| --- a/arch/powerpc/kernel/asm-offsets.c |
| +++ b/arch/powerpc/kernel/asm-offsets.c |
| @@ -156,6 +156,7 @@ int main(void) |
| OFFSET(TI_FLAGS, thread_info, flags); |
| OFFSET(TI_LOCAL_FLAGS, thread_info, local_flags); |
| OFFSET(TI_PREEMPT, thread_info, preempt_count); |
| + OFFSET(TI_PREEMPT_LAZY, thread_info, preempt_lazy_count); |
| OFFSET(TI_TASK, thread_info, task); |
| OFFSET(TI_CPU, thread_info, cpu); |
| |
| diff --git a/arch/powerpc/kernel/entry_32.S b/arch/powerpc/kernel/entry_32.S |
| index a38600949f3a..c7bf018a0a83 100644 |
| --- a/arch/powerpc/kernel/entry_32.S |
| +++ b/arch/powerpc/kernel/entry_32.S |
| @@ -845,7 +845,14 @@ resume_kernel: |
| cmpwi 0,r0,0 /* if non-zero, just restore regs and return */ |
| bne restore |
| andi. r8,r8,_TIF_NEED_RESCHED |
| + bne+ 1f |
| + lwz r0,TI_PREEMPT_LAZY(r9) |
| + cmpwi 0,r0,0 /* if non-zero, just restore regs and return */ |
| + bne restore |
| + lwz r0,TI_FLAGS(r9) |
| + andi. r0,r0,_TIF_NEED_RESCHED_LAZY |
| beq+ restore |
| +1: |
| lwz r3,_MSR(r1) |
| andi. r0,r3,MSR_EE /* interrupts off? */ |
| beq restore /* don't schedule if so */ |
| @@ -856,11 +863,11 @@ resume_kernel: |
| */ |
| bl trace_hardirqs_off |
| #endif |
| -1: bl preempt_schedule_irq |
| +2: bl preempt_schedule_irq |
| CURRENT_THREAD_INFO(r9, r1) |
| lwz r3,TI_FLAGS(r9) |
| - andi. r0,r3,_TIF_NEED_RESCHED |
| - bne- 1b |
| + andi. r0,r3,_TIF_NEED_RESCHED_MASK |
| + bne- 2b |
| #ifdef CONFIG_TRACE_IRQFLAGS |
| /* And now, to properly rebalance the above, we tell lockdep they |
| * are being turned back on, which will happen when we return |
| @@ -1183,7 +1190,7 @@ global_dbcr0: |
| #endif /* !(CONFIG_4xx || CONFIG_BOOKE) */ |
| |
| do_work: /* r10 contains MSR_KERNEL here */ |
| - andi. r0,r9,_TIF_NEED_RESCHED |
| + andi. r0,r9,_TIF_NEED_RESCHED_MASK |
| beq do_user_signal |
| |
| do_resched: /* r10 contains MSR_KERNEL here */ |
| @@ -1204,7 +1211,7 @@ recheck: |
| MTMSRD(r10) /* disable interrupts */ |
| CURRENT_THREAD_INFO(r9, r1) |
| lwz r9,TI_FLAGS(r9) |
| - andi. r0,r9,_TIF_NEED_RESCHED |
| + andi. r0,r9,_TIF_NEED_RESCHED_MASK |
| bne- do_resched |
| andi. r0,r9,_TIF_USER_WORK_MASK |
| beq restore_user |
| diff --git a/arch/powerpc/kernel/entry_64.S b/arch/powerpc/kernel/entry_64.S |
| index 767ef6d68c9e..2cb4d5552319 100644 |
| --- a/arch/powerpc/kernel/entry_64.S |
| +++ b/arch/powerpc/kernel/entry_64.S |
| @@ -656,7 +656,7 @@ _GLOBAL(ret_from_except_lite) |
| bl restore_math |
| b restore |
| #endif |
| -1: andi. r0,r4,_TIF_NEED_RESCHED |
| +1: andi. r0,r4,_TIF_NEED_RESCHED_MASK |
| beq 2f |
| bl restore_interrupts |
| SCHEDULE_USER |
| @@ -718,10 +718,18 @@ resume_kernel: |
| |
| #ifdef CONFIG_PREEMPT |
| /* Check if we need to preempt */ |
| + lwz r8,TI_PREEMPT(r9) |
| + cmpwi 0,r8,0 /* if non-zero, just restore regs and return */ |
| + bne restore |
| andi. r0,r4,_TIF_NEED_RESCHED |
| + bne+ check_count |
| + |
| + andi. r0,r4,_TIF_NEED_RESCHED_LAZY |
| beq+ restore |
| + lwz r8,TI_PREEMPT_LAZY(r9) |
| + |
| /* Check that preempt_count() == 0 and interrupts are enabled */ |
| - lwz r8,TI_PREEMPT(r9) |
| +check_count: |
| cmpwi cr1,r8,0 |
| ld r0,SOFTE(r1) |
| cmpdi r0,0 |
| @@ -738,7 +746,7 @@ resume_kernel: |
| /* Re-test flags and eventually loop */ |
| CURRENT_THREAD_INFO(r9, r1) |
| ld r4,TI_FLAGS(r9) |
| - andi. r0,r4,_TIF_NEED_RESCHED |
| + andi. r0,r4,_TIF_NEED_RESCHED_MASK |
| bne 1b |
| |
| /* |
| -- |
| 2.1.4 |
| |