| Subject: x86: Support for lazy preemption |
| From: Thomas Gleixner <tglx@linutronix.de> |
| Date: Thu, 01 Nov 2012 11:03:47 +0100 |
| |
| Implement the x86 pieces for lazy preempt. |
| |
| Signed-off-by: Thomas Gleixner <tglx@linutronix.de> |
| --- |
| arch/x86/Kconfig | 1 + |
| arch/x86/entry/common.c | 4 ++-- |
| arch/x86/entry/entry_32.S | 16 ++++++++++++++++ |
| arch/x86/entry/entry_64.S | 16 ++++++++++++++++ |
| arch/x86/include/asm/thread_info.h | 6 ++++++ |
| arch/x86/kernel/asm-offsets.c | 2 ++ |
| 6 files changed, 43 insertions(+), 2 deletions(-) |
| |
| --- a/arch/x86/Kconfig |
| +++ b/arch/x86/Kconfig |
| @@ -17,6 +17,7 @@ config X86_64 |
| ### Arch settings |
| config X86 |
| def_bool y |
| + select HAVE_PREEMPT_LAZY |
| select ACPI_LEGACY_TABLES_LOOKUP if ACPI |
| select ACPI_SYSTEM_POWER_STATES_SUPPORT if ACPI |
| select ANON_INODES |
| --- a/arch/x86/entry/common.c |
| +++ b/arch/x86/entry/common.c |
| @@ -220,7 +220,7 @@ long syscall_trace_enter(struct pt_regs |
| |
| #define EXIT_TO_USERMODE_LOOP_FLAGS \ |
| (_TIF_SIGPENDING | _TIF_NOTIFY_RESUME | _TIF_UPROBE | \ |
| - _TIF_NEED_RESCHED | _TIF_USER_RETURN_NOTIFY) |
| + _TIF_NEED_RESCHED_MASK | _TIF_USER_RETURN_NOTIFY) |
| |
| static void exit_to_usermode_loop(struct pt_regs *regs, u32 cached_flags) |
| { |
| @@ -236,7 +236,7 @@ static void exit_to_usermode_loop(struct |
| /* We have work to do. */ |
| local_irq_enable(); |
| |
| - if (cached_flags & _TIF_NEED_RESCHED) |
| + if (cached_flags & _TIF_NEED_RESCHED_MASK) |
| schedule(); |
| |
| #ifdef ARCH_RT_DELAYS_SIGNAL_SEND |
| --- a/arch/x86/entry/entry_32.S |
| +++ b/arch/x86/entry/entry_32.S |
| @@ -278,8 +278,24 @@ END(ret_from_exception) |
| ENTRY(resume_kernel) |
| DISABLE_INTERRUPTS(CLBR_ANY) |
| need_resched: |
| + # preempt count == 0 + NEED_RS set? |
| cmpl $0, PER_CPU_VAR(__preempt_count) |
| +#ifndef CONFIG_PREEMPT_LAZY |
| jnz restore_all |
| +#else |
| + jz test_int_off |
| + |
| + # atleast preempt count == 0 ? |
| + cmpl $_PREEMPT_ENABLED,PER_CPU_VAR(__preempt_count) |
| + jne restore_all |
| + |
| + cmpl $0,TI_preempt_lazy_count(%ebp) # non-zero preempt_lazy_count ? |
| + jnz restore_all |
| + |
| + testl $_TIF_NEED_RESCHED_LAZY, TI_flags(%ebp) |
| + jz restore_all |
| +test_int_off: |
| +#endif |
| testl $X86_EFLAGS_IF, PT_EFLAGS(%esp) # interrupts off (exception path) ? |
| jz restore_all |
| call preempt_schedule_irq |
| --- a/arch/x86/entry/entry_64.S |
| +++ b/arch/x86/entry/entry_64.S |
| @@ -579,7 +579,23 @@ GLOBAL(retint_user) |
| bt $9, EFLAGS(%rsp) /* were interrupts off? */ |
| jnc 1f |
| 0: cmpl $0, PER_CPU_VAR(__preempt_count) |
| +#ifndef CONFIG_PREEMPT_LAZY |
| jnz 1f |
| +#else |
| + jz do_preempt_schedule_irq |
| + |
| + # atleast preempt count == 0 ? |
| + cmpl $_PREEMPT_ENABLED,PER_CPU_VAR(__preempt_count) |
| + jnz 1f |
| + |
| + GET_THREAD_INFO(%rcx) |
| + cmpl $0, TI_preempt_lazy_count(%rcx) |
| + jnz 1f |
| + |
| + bt $TIF_NEED_RESCHED_LAZY,TI_flags(%rcx) |
| + jnc 1f |
| +do_preempt_schedule_irq: |
| +#endif |
| call preempt_schedule_irq |
| jmp 0b |
| 1: |
| --- a/arch/x86/include/asm/thread_info.h |
| +++ b/arch/x86/include/asm/thread_info.h |
| @@ -58,6 +58,8 @@ struct thread_info { |
| __u32 status; /* thread synchronous flags */ |
| __u32 cpu; /* current CPU */ |
| mm_segment_t addr_limit; |
| + int preempt_lazy_count; /* 0 => lazy preemptable |
| + <0 => BUG */ |
| unsigned int sig_on_uaccess_error:1; |
| unsigned int uaccess_err:1; /* uaccess failed */ |
| }; |
| @@ -95,6 +97,7 @@ struct thread_info { |
| #define TIF_SYSCALL_EMU 6 /* syscall emulation active */ |
| #define TIF_SYSCALL_AUDIT 7 /* syscall auditing active */ |
| #define TIF_SECCOMP 8 /* secure computing */ |
| +#define TIF_NEED_RESCHED_LAZY 9 /* lazy rescheduling necessary */ |
| #define TIF_USER_RETURN_NOTIFY 11 /* notify kernel of userspace return */ |
| #define TIF_UPROBE 12 /* breakpointed or singlestepping */ |
| #define TIF_NOTSC 16 /* TSC is not accessible in userland */ |
| @@ -119,6 +122,7 @@ struct thread_info { |
| #define _TIF_SYSCALL_EMU (1 << TIF_SYSCALL_EMU) |
| #define _TIF_SYSCALL_AUDIT (1 << TIF_SYSCALL_AUDIT) |
| #define _TIF_SECCOMP (1 << TIF_SECCOMP) |
| +#define _TIF_NEED_RESCHED_LAZY (1 << TIF_NEED_RESCHED_LAZY) |
| #define _TIF_USER_RETURN_NOTIFY (1 << TIF_USER_RETURN_NOTIFY) |
| #define _TIF_UPROBE (1 << TIF_UPROBE) |
| #define _TIF_NOTSC (1 << TIF_NOTSC) |
| @@ -152,6 +156,8 @@ struct thread_info { |
| #define _TIF_WORK_CTXSW_PREV (_TIF_WORK_CTXSW|_TIF_USER_RETURN_NOTIFY) |
| #define _TIF_WORK_CTXSW_NEXT (_TIF_WORK_CTXSW) |
| |
| +#define _TIF_NEED_RESCHED_MASK (_TIF_NEED_RESCHED | _TIF_NEED_RESCHED_LAZY) |
| + |
| #define STACK_WARN (THREAD_SIZE/8) |
| |
| /* |
| --- a/arch/x86/kernel/asm-offsets.c |
| +++ b/arch/x86/kernel/asm-offsets.c |
| @@ -32,6 +32,7 @@ void common(void) { |
| OFFSET(TI_flags, thread_info, flags); |
| OFFSET(TI_status, thread_info, status); |
| OFFSET(TI_addr_limit, thread_info, addr_limit); |
| + OFFSET(TI_preempt_lazy_count, thread_info, preempt_lazy_count); |
| |
| BLANK(); |
| OFFSET(crypto_tfm_ctx_offset, crypto_tfm, __crt_ctx); |
| @@ -89,4 +90,5 @@ void common(void) { |
| |
| BLANK(); |
| DEFINE(PTREGS_SIZE, sizeof(struct pt_regs)); |
| + DEFINE(_PREEMPT_ENABLED, PREEMPT_ENABLED); |
| } |