| From: Yong Zhang <yong.zhang0@gmail.com> |
| Date: Tue, 15 May 2012 13:53:56 +0800 |
| Subject: mm: Protect activate_mm() by preempt_[disable&enable]_rt() |
| |
| User preempt_*_rt instead of local_irq_*_rt or otherwise there will be |
| warning on ARM like below: |
| |
| WARNING: at build/linux/kernel/smp.c:459 smp_call_function_many+0x98/0x264() |
| Modules linked in: |
| [<c0013bb4>] (unwind_backtrace+0x0/0xe4) from [<c001be94>] (warn_slowpath_common+0x4c/0x64) |
| [<c001be94>] (warn_slowpath_common+0x4c/0x64) from [<c001bec4>] (warn_slowpath_null+0x18/0x1c) |
| [<c001bec4>] (warn_slowpath_null+0x18/0x1c) from [<c0053ff8>](smp_call_function_many+0x98/0x264) |
| [<c0053ff8>] (smp_call_function_many+0x98/0x264) from [<c0054364>] (smp_call_function+0x44/0x6c) |
| [<c0054364>] (smp_call_function+0x44/0x6c) from [<c0017d50>] (__new_context+0xbc/0x124) |
| [<c0017d50>] (__new_context+0xbc/0x124) from [<c009e49c>] (flush_old_exec+0x460/0x5e4) |
| [<c009e49c>] (flush_old_exec+0x460/0x5e4) from [<c00d61ac>] (load_elf_binary+0x2e0/0x11ac) |
| [<c00d61ac>] (load_elf_binary+0x2e0/0x11ac) from [<c009d060>] (search_binary_handler+0x94/0x2a4) |
| [<c009d060>] (search_binary_handler+0x94/0x2a4) from [<c009e8fc>] (do_execve+0x254/0x364) |
| [<c009e8fc>] (do_execve+0x254/0x364) from [<c0010e84>] (sys_execve+0x34/0x54) |
| [<c0010e84>] (sys_execve+0x34/0x54) from [<c000da00>] (ret_fast_syscall+0x0/0x30) |
| ---[ end trace 0000000000000002 ]--- |
| |
| The reason is that ARM need irq enabled when doing activate_mm(). |
| According to mm-protect-activate-switch-mm.patch, actually |
| preempt_[disable|enable]_rt() is sufficient. |
| |
| Inspired-by: Steven Rostedt <rostedt@goodmis.org> |
| Signed-off-by: Yong Zhang <yong.zhang0@gmail.com> |
| Cc: Steven Rostedt <rostedt@goodmis.org> |
| Link: http://lkml.kernel.org/r/1337061236-1766-1-git-send-email-yong.zhang0@gmail.com |
| Signed-off-by: Thomas Gleixner <tglx@linutronix.de> |
| --- |
| fs/exec.c | 2 ++ |
| mm/mmu_context.c | 2 ++ |
| 2 files changed, 4 insertions(+) |
| |
| --- a/fs/exec.c |
| +++ b/fs/exec.c |
| @@ -961,12 +961,14 @@ static int exec_mmap(struct mm_struct *m |
| } |
| } |
| task_lock(tsk); |
| + preempt_disable_rt(); |
| active_mm = tsk->active_mm; |
| tsk->mm = mm; |
| tsk->active_mm = mm; |
| activate_mm(active_mm, mm); |
| tsk->mm->vmacache_seqnum = 0; |
| vmacache_flush(tsk); |
| + preempt_enable_rt(); |
| task_unlock(tsk); |
| if (old_mm) { |
| up_read(&old_mm->mmap_sem); |
| --- a/mm/mmu_context.c |
| +++ b/mm/mmu_context.c |
| @@ -23,6 +23,7 @@ void use_mm(struct mm_struct *mm) |
| struct task_struct *tsk = current; |
| |
| task_lock(tsk); |
| + preempt_disable_rt(); |
| active_mm = tsk->active_mm; |
| if (active_mm != mm) { |
| atomic_inc(&mm->mm_count); |
| @@ -30,6 +31,7 @@ void use_mm(struct mm_struct *mm) |
| } |
| tsk->mm = mm; |
| switch_mm(active_mm, mm, tsk); |
| + preempt_enable_rt(); |
| task_unlock(tsk); |
| #ifdef finish_arch_post_lock_switch |
| finish_arch_post_lock_switch(); |