| From: Ingo Molnar <mingo@elte.hu> |
| Date: Fri, 3 Jul 2009 08:30:37 -0500 |
| Subject: mm: Prepare decoupling the page fault disabling logic |
| |
| Add a pagefault_disabled variable to task_struct to allow decoupling |
| the pagefault-disabled logic from the preempt count. |
| |
| Signed-off-by: Ingo Molnar <mingo@elte.hu> |
| Signed-off-by: Thomas Gleixner <tglx@linutronix.de> |
| |
| --- |
| include/linux/sched.h | 1 + |
| include/linux/uaccess.h | 33 +++------------------------------ |
| kernel/fork.c | 1 + |
| mm/memory.c | 29 +++++++++++++++++++++++++++++ |
| 4 files changed, 34 insertions(+), 30 deletions(-) |
| |
| --- a/include/linux/sched.h |
| +++ b/include/linux/sched.h |
| @@ -1453,6 +1453,7 @@ struct task_struct { |
| /* mutex deadlock detection */ |
| struct mutex_waiter *blocked_on; |
| #endif |
| + int pagefault_disabled; |
| #ifdef CONFIG_TRACE_IRQFLAGS |
| unsigned int irq_events; |
| unsigned long hardirq_enable_ip; |
| --- a/include/linux/uaccess.h |
| +++ b/include/linux/uaccess.h |
| @@ -6,37 +6,10 @@ |
| |
| /* |
| * These routines enable/disable the pagefault handler in that |
| - * it will not take any locks and go straight to the fixup table. |
| - * |
| - * They have great resemblance to the preempt_disable/enable calls |
| - * and in fact they are identical; this is because currently there is |
| - * no other way to make the pagefault handlers do this. So we do |
| - * disable preemption but we don't necessarily care about that. |
| + * it will not take any MM locks and go straight to the fixup table. |
| */ |
| -static inline void pagefault_disable(void) |
| -{ |
| - inc_preempt_count(); |
| - /* |
| - * make sure to have issued the store before a pagefault |
| - * can hit. |
| - */ |
| - barrier(); |
| -} |
| - |
| -static inline void pagefault_enable(void) |
| -{ |
| - /* |
| - * make sure to issue those last loads/stores before enabling |
| - * the pagefault handler again. |
| - */ |
| - barrier(); |
| - dec_preempt_count(); |
| - /* |
| - * make sure we do.. |
| - */ |
| - barrier(); |
| - preempt_check_resched(); |
| -} |
| +extern void pagefault_disable(void); |
| +extern void pagefault_enable(void); |
| |
| #ifndef ARCH_HAS_NOCACHE_UACCESS |
| |
| --- a/kernel/fork.c |
| +++ b/kernel/fork.c |
| @@ -1285,6 +1285,7 @@ static struct task_struct *copy_process( |
| p->hardirq_context = 0; |
| p->softirq_context = 0; |
| #endif |
| + p->pagefault_disabled = 0; |
| #ifdef CONFIG_LOCKDEP |
| p->lockdep_depth = 0; /* no locks held yet */ |
| p->curr_chain_key = 0; |
| --- a/mm/memory.c |
| +++ b/mm/memory.c |
| @@ -3717,6 +3717,35 @@ unlock: |
| return 0; |
| } |
| |
| +void pagefault_disable(void) |
| +{ |
| + inc_preempt_count(); |
| + current->pagefault_disabled++; |
| + /* |
| + * make sure to have issued the store before a pagefault |
| + * can hit. |
| + */ |
| + barrier(); |
| +} |
| +EXPORT_SYMBOL(pagefault_disable); |
| + |
| +void pagefault_enable(void) |
| +{ |
| + /* |
| + * make sure to issue those last loads/stores before enabling |
| + * the pagefault handler again. |
| + */ |
| + barrier(); |
| + current->pagefault_disabled--; |
| + dec_preempt_count(); |
| + /* |
| + * make sure we do.. |
| + */ |
| + barrier(); |
| + preempt_check_resched(); |
| +} |
| +EXPORT_SYMBOL(pagefault_enable); |
| + |
| /* |
| * By the time we get here, we already hold the mm semaphore |
| */ |