|  | Subject: arm: Add support for lazy preemption | 
|  | From: Thomas Gleixner <tglx@linutronix.de> | 
|  | Date: Wed, 31 Oct 2012 12:04:11 +0100 | 
|  |  | 
|  | Implement the arm pieces for lazy preempt. | 
|  |  | 
|  | Signed-off-by: Thomas Gleixner <tglx@linutronix.de> | 
|  | --- | 
|  | arch/arm/Kconfig                   |    1 + | 
|  | arch/arm/include/asm/thread_info.h |    8 ++++++-- | 
|  | arch/arm/kernel/asm-offsets.c      |    1 + | 
|  | arch/arm/kernel/entry-armv.S       |   19 ++++++++++++++++--- | 
|  | arch/arm/kernel/entry-common.S     |    9 +++++++-- | 
|  | arch/arm/kernel/signal.c           |    3 ++- | 
|  | 6 files changed, 33 insertions(+), 8 deletions(-) | 
|  |  | 
|  | --- a/arch/arm/Kconfig | 
|  | +++ b/arch/arm/Kconfig | 
|  | @@ -88,6 +88,7 @@ config ARM | 
|  | select HAVE_PERF_EVENTS | 
|  | select HAVE_PERF_REGS | 
|  | select HAVE_PERF_USER_STACK_DUMP | 
|  | +	select HAVE_PREEMPT_LAZY | 
|  | select HAVE_RCU_TABLE_FREE if (SMP && ARM_LPAE) | 
|  | select HAVE_REGS_AND_STACK_ACCESS_API | 
|  | select HAVE_SYSCALL_TRACEPOINTS | 
|  | --- a/arch/arm/include/asm/thread_info.h | 
|  | +++ b/arch/arm/include/asm/thread_info.h | 
|  | @@ -49,6 +49,7 @@ struct cpu_context_save { | 
|  | struct thread_info { | 
|  | unsigned long		flags;		/* low level flags */ | 
|  | int			preempt_count;	/* 0 => preemptable, <0 => bug */ | 
|  | +	int			preempt_lazy_count; /* 0 => preemptable, <0 => bug */ | 
|  | mm_segment_t		addr_limit;	/* address limit */ | 
|  | struct task_struct	*task;		/* main task structure */ | 
|  | __u32			cpu;		/* cpu */ | 
|  | @@ -139,7 +140,8 @@ extern int vfp_restore_user_hwstate(stru | 
|  | #define TIF_SYSCALL_TRACE	4	/* syscall trace active */ | 
|  | #define TIF_SYSCALL_AUDIT	5	/* syscall auditing active */ | 
|  | #define TIF_SYSCALL_TRACEPOINT	6	/* syscall tracepoint instrumentation */ | 
|  | -#define TIF_SECCOMP		7	/* seccomp syscall filtering active */ | 
|  | +#define TIF_SECCOMP		8	/* seccomp syscall filtering active */ | 
|  | +#define TIF_NEED_RESCHED_LAZY	7 | 
|  |  | 
|  | #define TIF_NOHZ		12	/* in adaptive nohz mode */ | 
|  | #define TIF_USING_IWMMXT	17 | 
|  | @@ -149,6 +151,7 @@ extern int vfp_restore_user_hwstate(stru | 
|  | #define _TIF_SIGPENDING		(1 << TIF_SIGPENDING) | 
|  | #define _TIF_NEED_RESCHED	(1 << TIF_NEED_RESCHED) | 
|  | #define _TIF_NOTIFY_RESUME	(1 << TIF_NOTIFY_RESUME) | 
|  | +#define _TIF_NEED_RESCHED_LAZY	(1 << TIF_NEED_RESCHED_LAZY) | 
|  | #define _TIF_UPROBE		(1 << TIF_UPROBE) | 
|  | #define _TIF_SYSCALL_TRACE	(1 << TIF_SYSCALL_TRACE) | 
|  | #define _TIF_SYSCALL_AUDIT	(1 << TIF_SYSCALL_AUDIT) | 
|  | @@ -164,7 +167,8 @@ extern int vfp_restore_user_hwstate(stru | 
|  | * Change these and you break ASM code in entry-common.S | 
|  | */ | 
|  | #define _TIF_WORK_MASK		(_TIF_NEED_RESCHED | _TIF_SIGPENDING | \ | 
|  | -				 _TIF_NOTIFY_RESUME | _TIF_UPROBE) | 
|  | +				 _TIF_NOTIFY_RESUME | _TIF_UPROBE | \ | 
|  | +				 _TIF_NEED_RESCHED_LAZY) | 
|  |  | 
|  | #endif /* __KERNEL__ */ | 
|  | #endif /* __ASM_ARM_THREAD_INFO_H */ | 
|  | --- a/arch/arm/kernel/asm-offsets.c | 
|  | +++ b/arch/arm/kernel/asm-offsets.c | 
|  | @@ -67,6 +67,7 @@ int main(void) | 
|  | BLANK(); | 
|  | DEFINE(TI_FLAGS,		offsetof(struct thread_info, flags)); | 
|  | DEFINE(TI_PREEMPT,		offsetof(struct thread_info, preempt_count)); | 
|  | +  DEFINE(TI_PREEMPT_LAZY,	offsetof(struct thread_info, preempt_lazy_count)); | 
|  | DEFINE(TI_ADDR_LIMIT,		offsetof(struct thread_info, addr_limit)); | 
|  | DEFINE(TI_TASK,		offsetof(struct thread_info, task)); | 
|  | DEFINE(TI_CPU,		offsetof(struct thread_info, cpu)); | 
|  | --- a/arch/arm/kernel/entry-armv.S | 
|  | +++ b/arch/arm/kernel/entry-armv.S | 
|  | @@ -216,11 +216,18 @@ ENDPROC(__dabt_svc) | 
|  |  | 
|  | #ifdef CONFIG_PREEMPT | 
|  | ldr	r8, [tsk, #TI_PREEMPT]		@ get preempt count | 
|  | -	ldr	r0, [tsk, #TI_FLAGS]		@ get flags | 
|  | teq	r8, #0				@ if preempt count != 0 | 
|  | +	bne	1f				@ return from exeption | 
|  | +	ldr	r0, [tsk, #TI_FLAGS]		@ get flags | 
|  | +	tst	r0, #_TIF_NEED_RESCHED		@ if NEED_RESCHED is set | 
|  | +	blne	svc_preempt			@ preempt! | 
|  | + | 
|  | +	ldr	r8, [tsk, #TI_PREEMPT_LAZY]	@ get preempt lazy count | 
|  | +	teq	r8, #0				@ if preempt lazy count != 0 | 
|  | movne	r0, #0				@ force flags to 0 | 
|  | -	tst	r0, #_TIF_NEED_RESCHED | 
|  | +	tst	r0, #_TIF_NEED_RESCHED_LAZY | 
|  | blne	svc_preempt | 
|  | +1: | 
|  | #endif | 
|  |  | 
|  | svc_exit r5, irq = 1			@ return from exception | 
|  | @@ -235,8 +242,14 @@ ENDPROC(__irq_svc) | 
|  | 1:	bl	preempt_schedule_irq		@ irq en/disable is done inside | 
|  | ldr	r0, [tsk, #TI_FLAGS]		@ get new tasks TI_FLAGS | 
|  | tst	r0, #_TIF_NEED_RESCHED | 
|  | +	bne	1b | 
|  | +	tst	r0, #_TIF_NEED_RESCHED_LAZY | 
|  | reteq	r8				@ go again | 
|  | -	b	1b | 
|  | +	ldr	r0, [tsk, #TI_PREEMPT_LAZY]	@ get preempt lazy count | 
|  | +	teq	r0, #0				@ if preempt lazy count != 0 | 
|  | +	beq	1b | 
|  | +	ret	r8				@ go again | 
|  | + | 
|  | #endif | 
|  |  | 
|  | __und_fault: | 
|  | --- a/arch/arm/kernel/entry-common.S | 
|  | +++ b/arch/arm/kernel/entry-common.S | 
|  | @@ -54,7 +54,9 @@ saved_pc	.req	lr | 
|  | cmp	r2, #TASK_SIZE | 
|  | blne	addr_limit_check_failed | 
|  | ldr	r1, [tsk, #TI_FLAGS]		@ re-check for syscall tracing | 
|  | -	tst	r1, #_TIF_SYSCALL_WORK | _TIF_WORK_MASK | 
|  | +	tst	r1, #((_TIF_SYSCALL_WORK | _TIF_WORK_MASK) & ~_TIF_SECCOMP) | 
|  | +	bne	fast_work_pending | 
|  | +	tst	r1, #_TIF_SECCOMP | 
|  | bne	fast_work_pending | 
|  |  | 
|  |  | 
|  | @@ -84,8 +86,11 @@ ENDPROC(ret_fast_syscall) | 
|  | cmp	r2, #TASK_SIZE | 
|  | blne	addr_limit_check_failed | 
|  | ldr	r1, [tsk, #TI_FLAGS]		@ re-check for syscall tracing | 
|  | -	tst	r1, #_TIF_SYSCALL_WORK | _TIF_WORK_MASK | 
|  | +	tst	r1, #((_TIF_SYSCALL_WORK | _TIF_WORK_MASK) & ~_TIF_SECCOMP) | 
|  | +	bne	do_slower_path | 
|  | +	tst	r1, #_TIF_SECCOMP | 
|  | beq	no_work_pending | 
|  | +do_slower_path: | 
|  | UNWIND(.fnend		) | 
|  | ENDPROC(ret_fast_syscall) | 
|  |  | 
|  | --- a/arch/arm/kernel/signal.c | 
|  | +++ b/arch/arm/kernel/signal.c | 
|  | @@ -638,7 +638,8 @@ do_work_pending(struct pt_regs *regs, un | 
|  | */ | 
|  | trace_hardirqs_off(); | 
|  | do { | 
|  | -		if (likely(thread_flags & _TIF_NEED_RESCHED)) { | 
|  | +		if (likely(thread_flags & (_TIF_NEED_RESCHED | | 
|  | +					   _TIF_NEED_RESCHED_LAZY))) { | 
|  | schedule(); | 
|  | } else { | 
|  | if (unlikely(!user_mode(regs))) |