| From 77d7d0115176a7dab41f24c2f5d82cdbee99bc0d Mon Sep 17 00:00:00 2001 |
| From: Thomas Gleixner <tglx@linutronix.de> |
| Date: Thu, 1 Nov 2012 11:03:47 +0100 |
| Subject: [PATCH] x86: Support for lazy preemption |
| |
| Implement the x86 pieces for lazy preempt. |
| |
| Signed-off-by: Thomas Gleixner <tglx@linutronix.de> |
| |
| diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig |
| index f3aec424009b..7af373e8ef6f 100644 |
| --- a/arch/x86/Kconfig |
| +++ b/arch/x86/Kconfig |
| @@ -161,6 +161,7 @@ config X86 |
| select HAVE_PERF_USER_STACK_DUMP |
| select HAVE_REGS_AND_STACK_ACCESS_API |
| select HAVE_RELIABLE_STACKTRACE if X86_64 && FRAME_POINTER && STACK_VALIDATION |
| + select HAVE_PREEMPT_LAZY |
| select HAVE_STACK_VALIDATION if X86_64 |
| select HAVE_SYSCALL_TRACEPOINTS |
| select HAVE_UNSTABLE_SCHED_CLOCK |
| diff --git a/arch/x86/entry/common.c b/arch/x86/entry/common.c |
| index 964d9986e20e..263f48d215b4 100644 |
| --- a/arch/x86/entry/common.c |
| +++ b/arch/x86/entry/common.c |
| @@ -131,7 +131,7 @@ static long syscall_trace_enter(struct pt_regs *regs) |
| |
| #define EXIT_TO_USERMODE_LOOP_FLAGS \ |
| (_TIF_SIGPENDING | _TIF_NOTIFY_RESUME | _TIF_UPROBE | \ |
| - _TIF_NEED_RESCHED | _TIF_USER_RETURN_NOTIFY | _TIF_PATCH_PENDING) |
| + _TIF_NEED_RESCHED_MASK | _TIF_USER_RETURN_NOTIFY | _TIF_PATCH_PENDING) |
| |
| static void exit_to_usermode_loop(struct pt_regs *regs, u32 cached_flags) |
| { |
| @@ -146,7 +146,7 @@ static void exit_to_usermode_loop(struct pt_regs *regs, u32 cached_flags) |
| /* We have work to do. */ |
| local_irq_enable(); |
| |
| - if (cached_flags & _TIF_NEED_RESCHED) |
| + if (cached_flags & _TIF_NEED_RESCHED_MASK) |
| schedule(); |
| |
| #ifdef ARCH_RT_DELAYS_SIGNAL_SEND |
| diff --git a/arch/x86/entry/entry_32.S b/arch/x86/entry/entry_32.S |
| index 50bc26949e9e..1a0914133635 100644 |
| --- a/arch/x86/entry/entry_32.S |
| +++ b/arch/x86/entry/entry_32.S |
| @@ -329,8 +329,25 @@ END(ret_from_exception) |
| ENTRY(resume_kernel) |
| DISABLE_INTERRUPTS(CLBR_ANY) |
| .Lneed_resched: |
| + # preempt count == 0 + NEED_RS set? |
| cmpl $0, PER_CPU_VAR(__preempt_count) |
| +#ifndef CONFIG_PREEMPT_LAZY |
| jnz restore_all |
| +#else |
| + jz test_int_off |
| + |
| + # atleast preempt count == 0 ? |
| + cmpl $_PREEMPT_ENABLED,PER_CPU_VAR(__preempt_count) |
| + jne restore_all |
| + |
| + movl PER_CPU_VAR(current_task), %ebp |
| + cmpl $0,TASK_TI_preempt_lazy_count(%ebp) # non-zero preempt_lazy_count ? |
| + jnz restore_all |
| + |
| + testl $_TIF_NEED_RESCHED_LAZY, TASK_TI_flags(%ebp) |
| + jz restore_all |
| +test_int_off: |
| +#endif |
| testl $X86_EFLAGS_IF, PT_EFLAGS(%esp) # interrupts off (exception path) ? |
| jz restore_all |
| call preempt_schedule_irq |
| diff --git a/arch/x86/entry/entry_64.S b/arch/x86/entry/entry_64.S |
| index 24af36ed722c..774fe2f03c1c 100644 |
| --- a/arch/x86/entry/entry_64.S |
| +++ b/arch/x86/entry/entry_64.S |
| @@ -541,7 +541,23 @@ retint_kernel: |
| bt $9, EFLAGS(%rsp) /* were interrupts off? */ |
| jnc 1f |
| 0: cmpl $0, PER_CPU_VAR(__preempt_count) |
| +#ifndef CONFIG_PREEMPT_LAZY |
| jnz 1f |
| +#else |
| + jz do_preempt_schedule_irq |
| + |
| + # atleast preempt count == 0 ? |
| + cmpl $_PREEMPT_ENABLED,PER_CPU_VAR(__preempt_count) |
| + jnz 1f |
| + |
| + movq PER_CPU_VAR(current_task), %rcx |
| + cmpl $0, TASK_TI_preempt_lazy_count(%rcx) |
| + jnz 1f |
| + |
| + bt $TIF_NEED_RESCHED_LAZY,TASK_TI_flags(%rcx) |
| + jnc 1f |
| +do_preempt_schedule_irq: |
| +#endif |
| call preempt_schedule_irq |
| jmp 0b |
| 1: |
| diff --git a/arch/x86/include/asm/preempt.h b/arch/x86/include/asm/preempt.h |
| index ec1f3c651150..d9cd82a824a6 100644 |
| --- a/arch/x86/include/asm/preempt.h |
| +++ b/arch/x86/include/asm/preempt.h |
| @@ -85,17 +85,46 @@ static __always_inline void __preempt_count_sub(int val) |
| * a decrement which hits zero means we have no preempt_count and should |
| * reschedule. |
| */ |
| -static __always_inline bool __preempt_count_dec_and_test(void) |
| +static __always_inline bool ____preempt_count_dec_and_test(void) |
| { |
| GEN_UNARY_RMWcc("decl", __preempt_count, __percpu_arg(0), e); |
| } |
| |
| +static __always_inline bool __preempt_count_dec_and_test(void) |
| +{ |
| + if (____preempt_count_dec_and_test()) |
| + return true; |
| +#ifdef CONFIG_PREEMPT_LAZY |
| + if (current_thread_info()->preempt_lazy_count) |
| + return false; |
| + return test_thread_flag(TIF_NEED_RESCHED_LAZY); |
| +#else |
| + return false; |
| +#endif |
| +} |
| + |
| /* |
| * Returns true when we need to resched and can (barring IRQ state). |
| */ |
| static __always_inline bool should_resched(int preempt_offset) |
| { |
| +#ifdef CONFIG_PREEMPT_LAZY |
| + u32 tmp; |
| + |
| + tmp = raw_cpu_read_4(__preempt_count); |
| + if (tmp == preempt_offset) |
| + return true; |
| + |
| + /* preempt count == 0 ? */ |
| + tmp &= ~PREEMPT_NEED_RESCHED; |
| + if (tmp) |
| + return false; |
| + if (current_thread_info()->preempt_lazy_count) |
| + return false; |
| + return test_thread_flag(TIF_NEED_RESCHED_LAZY); |
| +#else |
| return unlikely(raw_cpu_read_4(__preempt_count) == preempt_offset); |
| +#endif |
| } |
| |
| #ifdef CONFIG_PREEMPT |
| diff --git a/arch/x86/include/asm/thread_info.h b/arch/x86/include/asm/thread_info.h |
| index e00e1bd6e7b3..a22b5e86eeed 100644 |
| --- a/arch/x86/include/asm/thread_info.h |
| +++ b/arch/x86/include/asm/thread_info.h |
| @@ -54,11 +54,14 @@ struct task_struct; |
| |
| struct thread_info { |
| unsigned long flags; /* low level flags */ |
| + int preempt_lazy_count; /* 0 => lazy preemptable |
| + <0 => BUG */ |
| }; |
| |
| #define INIT_THREAD_INFO(tsk) \ |
| { \ |
| .flags = 0, \ |
| + .preempt_lazy_count = 0, \ |
| } |
| |
| #define init_stack (init_thread_union.stack) |
| @@ -67,6 +70,10 @@ struct thread_info { |
| |
| #include <asm/asm-offsets.h> |
| |
| +#define GET_THREAD_INFO(reg) \ |
| + _ASM_MOV PER_CPU_VAR(cpu_current_top_of_stack),reg ; \ |
| + _ASM_SUB $(THREAD_SIZE),reg ; |
| + |
| #endif |
| |
| /* |
| @@ -82,6 +89,7 @@ struct thread_info { |
| #define TIF_SYSCALL_EMU 6 /* syscall emulation active */ |
| #define TIF_SYSCALL_AUDIT 7 /* syscall auditing active */ |
| #define TIF_SECCOMP 8 /* secure computing */ |
| +#define TIF_NEED_RESCHED_LAZY 9 /* lazy rescheduling necessary */ |
| #define TIF_USER_RETURN_NOTIFY 11 /* notify kernel of userspace return */ |
| #define TIF_UPROBE 12 /* breakpointed or singlestepping */ |
| #define TIF_PATCH_PENDING 13 /* pending live patching update */ |
| @@ -107,6 +115,7 @@ struct thread_info { |
| #define _TIF_SYSCALL_EMU (1 << TIF_SYSCALL_EMU) |
| #define _TIF_SYSCALL_AUDIT (1 << TIF_SYSCALL_AUDIT) |
| #define _TIF_SECCOMP (1 << TIF_SECCOMP) |
| +#define _TIF_NEED_RESCHED_LAZY (1 << TIF_NEED_RESCHED_LAZY) |
| #define _TIF_USER_RETURN_NOTIFY (1 << TIF_USER_RETURN_NOTIFY) |
| #define _TIF_UPROBE (1 << TIF_UPROBE) |
| #define _TIF_PATCH_PENDING (1 << TIF_PATCH_PENDING) |
| @@ -146,6 +155,8 @@ struct thread_info { |
| #define _TIF_WORK_CTXSW_PREV (_TIF_WORK_CTXSW|_TIF_USER_RETURN_NOTIFY) |
| #define _TIF_WORK_CTXSW_NEXT (_TIF_WORK_CTXSW) |
| |
| +#define _TIF_NEED_RESCHED_MASK (_TIF_NEED_RESCHED | _TIF_NEED_RESCHED_LAZY) |
| + |
| #define STACK_WARN (THREAD_SIZE/8) |
| |
| /* |
| diff --git a/arch/x86/kernel/asm-offsets.c b/arch/x86/kernel/asm-offsets.c |
| index de827d6ac8c2..41fd80368774 100644 |
| --- a/arch/x86/kernel/asm-offsets.c |
| +++ b/arch/x86/kernel/asm-offsets.c |
| @@ -36,6 +36,7 @@ void common(void) { |
| |
| BLANK(); |
| OFFSET(TASK_TI_flags, task_struct, thread_info.flags); |
| + OFFSET(TASK_TI_preempt_lazy_count, task_struct, thread_info.preempt_lazy_count); |
| OFFSET(TASK_addr_limit, task_struct, thread.addr_limit); |
| |
| BLANK(); |
| @@ -92,4 +93,5 @@ void common(void) { |
| |
| BLANK(); |
| DEFINE(PTREGS_SIZE, sizeof(struct pt_regs)); |
| + DEFINE(_PREEMPT_ENABLED, PREEMPT_ENABLED); |
| } |
| -- |
| 2.1.4 |
| |