| From: Thomas Gleixner <tglx@linutronix.de> |
| Date: Thu, 1 Nov 2012 10:14:11 +0100 |
| Subject: [PATCH] powerpc-preempt-lazy-support.patch |
| |
| Signed-off-by: Thomas Gleixner <tglx@linutronix.de> |
| --- |
| arch/powerpc/Kconfig | 1 |
| arch/powerpc/include/asm/thread_info.h | 12 ++++++-- |
| arch/powerpc/kernel/asm-offsets.c | 1 |
| arch/powerpc/kernel/entry_32.S | 17 ++++++++--- |
| arch/powerpc/kernel/entry_64.S | 48 +++++++++++++++++++-------------- |
| 5 files changed, 51 insertions(+), 28 deletions(-) |
| |
| --- a/arch/powerpc/Kconfig |
| +++ b/arch/powerpc/Kconfig |
| @@ -133,6 +133,7 @@ config PPC |
| select GENERIC_CLOCKEVENTS |
| select GENERIC_STRNCPY_FROM_USER |
| select GENERIC_STRNLEN_USER |
| + select HAVE_PREEMPT_LAZY |
| select HAVE_MOD_ARCH_SPECIFIC |
| select MODULES_USE_ELF_RELA |
| select CLONE_BACKWARDS |
| --- a/arch/powerpc/include/asm/thread_info.h |
| +++ b/arch/powerpc/include/asm/thread_info.h |
| @@ -43,6 +43,8 @@ struct thread_info { |
| int cpu; /* cpu we're on */ |
| int preempt_count; /* 0 => preemptable, |
| <0 => BUG */ |
| + int preempt_lazy_count; /* 0 => preemptable, |
| + <0 => BUG */ |
| struct restart_block restart_block; |
| unsigned long local_flags; /* private flags for thread */ |
| |
| @@ -90,8 +92,7 @@ static inline struct thread_info *curren |
| #define TIF_SYSCALL_TRACE 0 /* syscall trace active */ |
| #define TIF_SIGPENDING 1 /* signal pending */ |
| #define TIF_NEED_RESCHED 2 /* rescheduling necessary */ |
| -#define TIF_POLLING_NRFLAG 3 /* true if poll_idle() is polling |
| - TIF_NEED_RESCHED */ |
| +#define TIF_NEED_RESCHED_LAZY 3 /* lazy rescheduling necessary */ |
| #define TIF_32BIT 4 /* 32 bit binary */ |
| #define TIF_PERFMON_WORK 5 /* work for pfm_handle_work() */ |
| #define TIF_PERFMON_CTXSW 6 /* perfmon needs ctxsw calls */ |
| @@ -107,6 +108,8 @@ static inline struct thread_info *curren |
| #define TIF_EMULATE_STACK_STORE 16 /* Is an instruction emulation |
| for stack store? */ |
| #define TIF_MEMDIE 17 /* is terminating due to OOM killer */ |
| +#define TIF_POLLING_NRFLAG 18 /* true if poll_idle() is polling |
| + TIF_NEED_RESCHED */ |
| |
| /* as above, but as bit values */ |
| #define _TIF_SYSCALL_TRACE (1<<TIF_SYSCALL_TRACE) |
| @@ -126,13 +129,16 @@ static inline struct thread_info *curren |
| #define _TIF_SYSCALL_TRACEPOINT (1<<TIF_SYSCALL_TRACEPOINT) |
| #define _TIF_EMULATE_STACK_STORE (1<<TIF_EMULATE_STACK_STORE) |
| #define _TIF_NOHZ (1<<TIF_NOHZ) |
| +#define _TIF_NEED_RESCHED_LAZY (1<<TIF_NEED_RESCHED_LAZY) |
| #define _TIF_SYSCALL_T_OR_A (_TIF_SYSCALL_TRACE | _TIF_SYSCALL_AUDIT | \ |
| _TIF_SECCOMP | _TIF_SYSCALL_TRACEPOINT | \ |
| _TIF_NOHZ) |
| |
| #define _TIF_USER_WORK_MASK (_TIF_SIGPENDING | _TIF_NEED_RESCHED | \ |
| - _TIF_NOTIFY_RESUME | _TIF_UPROBE) |
| + _TIF_NOTIFY_RESUME | _TIF_UPROBE | \ |
| + _TIF_NEED_RESCHED_LAZY) |
| #define _TIF_PERSYSCALL_MASK (_TIF_RESTOREALL|_TIF_NOERROR) |
| +#define _TIF_NEED_RESCHED_MASK (_TIF_NEED_RESCHED | _TIF_NEED_RESCHED_LAZY) |
| |
| /* Bits in local_flags */ |
| /* Don't move TLF_NAPPING without adjusting the code in entry_32.S */ |
| --- a/arch/powerpc/kernel/asm-offsets.c |
| +++ b/arch/powerpc/kernel/asm-offsets.c |
| @@ -162,6 +162,7 @@ int main(void) |
| DEFINE(TI_FLAGS, offsetof(struct thread_info, flags)); |
| DEFINE(TI_LOCAL_FLAGS, offsetof(struct thread_info, local_flags)); |
| DEFINE(TI_PREEMPT, offsetof(struct thread_info, preempt_count)); |
| + DEFINE(TI_PREEMPT_LAZY, offsetof(struct thread_info, preempt_lazy_count)); |
| DEFINE(TI_TASK, offsetof(struct thread_info, task)); |
| DEFINE(TI_CPU, offsetof(struct thread_info, cpu)); |
| |
| --- a/arch/powerpc/kernel/entry_32.S |
| +++ b/arch/powerpc/kernel/entry_32.S |
| @@ -890,7 +890,14 @@ resume_kernel: |
| cmpwi 0,r0,0 /* if non-zero, just restore regs and return */ |
| bne restore |
| andi. r8,r8,_TIF_NEED_RESCHED |
| + bne+ 1f |
| + lwz r0,TI_PREEMPT_LAZY(r9) |
| + cmpwi 0,r0,0 /* if non-zero, just restore regs and return */ |
| + bne restore |
| + lwz r0,TI_FLAGS(r9) |
| + andi. r0,r0,_TIF_NEED_RESCHED_LAZY |
| beq+ restore |
| +1: |
| lwz r3,_MSR(r1) |
| andi. r0,r3,MSR_EE /* interrupts off? */ |
| beq restore /* don't schedule if so */ |
| @@ -901,11 +908,11 @@ resume_kernel: |
| */ |
| bl trace_hardirqs_off |
| #endif |
| -1: bl preempt_schedule_irq |
| +2: bl preempt_schedule_irq |
| CURRENT_THREAD_INFO(r9, r1) |
| lwz r3,TI_FLAGS(r9) |
| - andi. r0,r3,_TIF_NEED_RESCHED |
| - bne- 1b |
| + andi. r0,r3,_TIF_NEED_RESCHED_MASK |
| + bne- 2b |
| #ifdef CONFIG_TRACE_IRQFLAGS |
| /* And now, to properly rebalance the above, we tell lockdep they |
| * are being turned back on, which will happen when we return |
| @@ -1226,7 +1233,7 @@ global_dbcr0: |
| #endif /* !(CONFIG_4xx || CONFIG_BOOKE) */ |
| |
| do_work: /* r10 contains MSR_KERNEL here */ |
| - andi. r0,r9,_TIF_NEED_RESCHED |
| + andi. r0,r9,_TIF_NEED_RESCHED_MASK |
| beq do_user_signal |
| |
| do_resched: /* r10 contains MSR_KERNEL here */ |
| @@ -1247,7 +1254,7 @@ recheck: |
| MTMSRD(r10) /* disable interrupts */ |
| CURRENT_THREAD_INFO(r9, r1) |
| lwz r9,TI_FLAGS(r9) |
| - andi. r0,r9,_TIF_NEED_RESCHED |
| + andi. r0,r9,_TIF_NEED_RESCHED_MASK |
| bne- do_resched |
| andi. r0,r9,_TIF_USER_WORK_MASK |
| beq restore_user |
| --- a/arch/powerpc/kernel/entry_64.S |
| +++ b/arch/powerpc/kernel/entry_64.S |
| @@ -1,5 +1,5 @@ |
| /* |
| - * PowerPC version |
| + * PowerPC version |
| * Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org) |
| * Rewritten by Cort Dougan (cort@cs.nmt.edu) for PReP |
| * Copyright (C) 1996 Cort Dougan <cort@cs.nmt.edu> |
| @@ -239,12 +239,12 @@ END_FTR_SECTION_IFCLR(CPU_FTR_STCX_CHECK |
| RFI |
| b . /* prevent speculative execution */ |
| |
| -syscall_error: |
| +syscall_error: |
| oris r5,r5,0x1000 /* Set SO bit in CR */ |
| neg r3,r3 |
| std r5,_CCR(r1) |
| b .Lsyscall_error_cont |
| - |
| + |
| /* Traced system call support */ |
| syscall_dotrace: |
| bl .save_nvgprs |
| @@ -270,7 +270,7 @@ syscall_dotrace: |
| syscall_enosys: |
| li r3,-ENOSYS |
| b syscall_exit |
| - |
| + |
| syscall_exit_work: |
| #ifdef CONFIG_PPC_BOOK3S |
| mtmsrd r10,1 /* Restore RI */ |
| @@ -333,7 +333,7 @@ _GLOBAL(save_nvgprs) |
| std r0,_TRAP(r1) |
| blr |
| |
| - |
| + |
| /* |
| * The sigsuspend and rt_sigsuspend system calls can call do_signal |
| * and thus put the process into the stopped state where we might |
| @@ -406,7 +406,7 @@ DSCR_DEFAULT: |
| * the fork code also. |
| * |
| * The code which creates the new task context is in 'copy_thread' |
| - * in arch/powerpc/kernel/process.c |
| + * in arch/powerpc/kernel/process.c |
| */ |
| .align 7 |
| _GLOBAL(_switch) |
| @@ -637,7 +637,7 @@ _GLOBAL(ret_from_except_lite) |
| andi. r0,r4,_TIF_USER_WORK_MASK |
| beq restore |
| |
| - andi. r0,r4,_TIF_NEED_RESCHED |
| + andi. r0,r4,_TIF_NEED_RESCHED_MASK |
| beq 1f |
| bl .restore_interrupts |
| SCHEDULE_USER |
| @@ -687,10 +687,18 @@ resume_kernel: |
| |
| #ifdef CONFIG_PREEMPT |
| /* Check if we need to preempt */ |
| + lwz r8,TI_PREEMPT(r9) |
| + cmpwi 0,r8,0 /* if non-zero, just restore regs and return */ |
| + bne restore |
| andi. r0,r4,_TIF_NEED_RESCHED |
| + bne+ check_count |
| + |
| + andi. r0,r4,_TIF_NEED_RESCHED_LAZY |
| beq+ restore |
| + lwz r8,TI_PREEMPT_LAZY(r9) |
| + |
| /* Check that preempt_count() == 0 and interrupts are enabled */ |
| - lwz r8,TI_PREEMPT(r9) |
| +check_count: |
| cmpwi cr1,r8,0 |
| ld r0,SOFTE(r1) |
| cmpdi r0,0 |
| @@ -707,7 +715,7 @@ resume_kernel: |
| /* Re-test flags and eventually loop */ |
| CURRENT_THREAD_INFO(r9, r1) |
| ld r4,TI_FLAGS(r9) |
| - andi. r0,r4,_TIF_NEED_RESCHED |
| + andi. r0,r4,_TIF_NEED_RESCHED_MASK |
| bne 1b |
| |
| /* |
| @@ -874,7 +882,7 @@ restore_check_irq_replay: |
| bl .__check_irq_replay |
| cmpwi cr0,r3,0 |
| beq restore_no_replay |
| - |
| + |
| /* |
| * We need to re-emit an interrupt. We do so by re-using our |
| * existing exception frame. We first change the trap value, |
| @@ -916,7 +924,7 @@ restore_check_irq_replay: |
| b .ret_from_except |
| #endif /* CONFIG_PPC_DOORBELL */ |
| 1: b .ret_from_except /* What else to do here ? */ |
| - |
| + |
| unrecov_restore: |
| addi r3,r1,STACK_FRAME_OVERHEAD |
| bl .unrecoverable_exception |
| @@ -928,7 +936,7 @@ unrecov_restore: |
| * called with the MMU off. |
| * |
| * In addition, we need to be in 32b mode, at least for now. |
| - * |
| + * |
| * Note: r3 is an input parameter to rtas, so don't trash it... |
| */ |
| _GLOBAL(enter_rtas) |
| @@ -962,7 +970,7 @@ _GLOBAL(enter_rtas) |
| li r0,0 |
| mtcr r0 |
| |
| -#ifdef CONFIG_BUG |
| +#ifdef CONFIG_BUG |
| /* There is no way it is acceptable to get here with interrupts enabled, |
| * check it with the asm equivalent of WARN_ON |
| */ |
| @@ -970,7 +978,7 @@ _GLOBAL(enter_rtas) |
| 1: tdnei r0,0 |
| EMIT_BUG_ENTRY 1b,__FILE__,__LINE__,BUGFLAG_WARNING |
| #endif |
| - |
| + |
| /* Hard-disable interrupts */ |
| mfmsr r6 |
| rldicl r7,r6,48,1 |
| @@ -984,7 +992,7 @@ _GLOBAL(enter_rtas) |
| std r1,PACAR1(r13) |
| std r6,PACASAVEDMSR(r13) |
| |
| - /* Setup our real return addr */ |
| + /* Setup our real return addr */ |
| LOAD_REG_ADDR(r4,.rtas_return_loc) |
| clrldi r4,r4,2 /* convert to realmode address */ |
| mtlr r4 |
| @@ -992,7 +1000,7 @@ _GLOBAL(enter_rtas) |
| li r0,0 |
| ori r0,r0,MSR_EE|MSR_SE|MSR_BE|MSR_RI |
| andc r0,r6,r0 |
| - |
| + |
| li r9,1 |
| rldicr r9,r9,MSR_SF_LG,(63-MSR_SF_LG) |
| ori r9,r9,MSR_IR|MSR_DR|MSR_FE0|MSR_FE1|MSR_FP|MSR_RI |
| @@ -1003,7 +1011,7 @@ _GLOBAL(enter_rtas) |
| LOAD_REG_ADDR(r4, rtas) |
| ld r5,RTASENTRY(r4) /* get the rtas->entry value */ |
| ld r4,RTASBASE(r4) /* get the rtas->base value */ |
| - |
| + |
| mtspr SPRN_SRR0,r5 |
| mtspr SPRN_SRR1,r6 |
| rfid |
| @@ -1021,9 +1029,9 @@ _STATIC(rtas_return_loc) |
| mfmsr r6 |
| li r0,MSR_RI |
| andc r6,r6,r0 |
| - sync |
| + sync |
| mtmsrd r6 |
| - |
| + |
| ld r1,PACAR1(r4) /* Restore our SP */ |
| ld r4,PACASAVEDMSR(r4) /* Restore our MSR */ |
| |
| @@ -1121,7 +1129,7 @@ _GLOBAL(enter_prom) |
| REST_10GPRS(22, r1) |
| ld r4,_CCR(r1) |
| mtcr r4 |
| - |
| + |
| addi r1,r1,PROM_FRAME_SIZE |
| ld r0,16(r1) |
| mtlr r0 |