| Subject: mm, rt: kmap_atomic scheduling |
| From: Peter Zijlstra <peterz@infradead.org> |
| Date: Thu, 28 Jul 2011 10:43:51 +0200 |
| |
| In fact, with migrate_disable() existing one could play games with |
| kmap_atomic. You could save/restore the kmap_atomic slots on context |
| switch (if there are any in use of course), this should be esp easy now |
| that we have a kmap_atomic stack. |
| |
| Something like the below.. it wants replacing all the preempt_disable() |
| stuff with pagefault_disable() && migrate_disable() of course, but then |
| you can flip kmaps around like below. |
| |
| Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl> |
| [dvhart@linux.intel.com: build fix] |
| Link: http://lkml.kernel.org/r/1311842631.5890.208.camel@twins |
| --- |
| arch/x86/kernel/process_32.c | 36 ++++++++++++++++++++++++++++++++++++ |
| include/linux/sched.h | 5 +++++ |
| mm/memory.c | 2 ++ |
| 3 files changed, 43 insertions(+) |
| |
| Index: linux-stable/arch/x86/kernel/process_32.c |
| =================================================================== |
| --- linux-stable.orig/arch/x86/kernel/process_32.c |
| +++ linux-stable/arch/x86/kernel/process_32.c |
| @@ -36,6 +36,7 @@ |
| #include <linux/uaccess.h> |
| #include <linux/io.h> |
| #include <linux/kdebug.h> |
| +#include <linux/highmem.h> |
| |
| #include <asm/pgtable.h> |
| #include <asm/ldt.h> |
| @@ -276,6 +277,41 @@ __switch_to(struct task_struct *prev_p, |
| task_thread_info(next_p)->flags & _TIF_WORK_CTXSW_NEXT)) |
| __switch_to_xtra(prev_p, next_p, tss); |
| |
| +#if defined CONFIG_PREEMPT_RT_FULL && defined CONFIG_HIGHMEM |
| + /* |
| + * Save @prev's kmap_atomic stack |
| + */ |
| + prev_p->kmap_idx = __this_cpu_read(__kmap_atomic_idx); |
| + if (unlikely(prev_p->kmap_idx)) { |
| + int i; |
| + |
| + for (i = 0; i < prev_p->kmap_idx; i++) { |
| + int idx = i + KM_TYPE_NR * smp_processor_id(); |
| + |
| + pte_t *ptep = kmap_pte - idx; |
| + prev_p->kmap_pte[i] = *ptep; |
| + kpte_clear_flush(ptep, __fix_to_virt(FIX_KMAP_BEGIN + idx)); |
| + } |
| + |
| + __this_cpu_write(__kmap_atomic_idx, 0); |
| + } |
| + |
| + /* |
| + * Restore @next_p's kmap_atomic stack |
| + */ |
| + if (unlikely(next_p->kmap_idx)) { |
| + int i; |
| + |
| + __this_cpu_write(__kmap_atomic_idx, next_p->kmap_idx); |
| + |
| + for (i = 0; i < next_p->kmap_idx; i++) { |
| + int idx = i + KM_TYPE_NR * smp_processor_id(); |
| + |
| + set_pte(kmap_pte - idx, next_p->kmap_pte[i]); |
| + } |
| + } |
| +#endif |
| + |
| /* |
| * Leave lazy mode, flushing any hypercalls made here. |
| * This must be done before restoring TLS segments so |
| Index: linux-stable/include/linux/sched.h |
| =================================================================== |
| --- linux-stable.orig/include/linux/sched.h |
| +++ linux-stable/include/linux/sched.h |
| @@ -63,6 +63,7 @@ struct sched_param { |
| #include <linux/nodemask.h> |
| #include <linux/mm_types.h> |
| |
| +#include <asm/kmap_types.h> |
| #include <asm/page.h> |
| #include <asm/ptrace.h> |
| #include <asm/cputime.h> |
| @@ -1619,6 +1620,10 @@ struct task_struct { |
| struct rcu_head put_rcu; |
| int softirq_nestcnt; |
| #endif |
| +#if defined CONFIG_PREEMPT_RT_FULL && defined CONFIG_HIGHMEM |
| + int kmap_idx; |
| + pte_t kmap_pte[KM_TYPE_NR]; |
| +#endif |
| }; |
| |
| #ifdef CONFIG_PREEMPT_RT_FULL |
| Index: linux-stable/mm/memory.c |
| =================================================================== |
| --- linux-stable.orig/mm/memory.c |
| +++ linux-stable/mm/memory.c |
| @@ -3487,6 +3487,7 @@ unlock: |
| #ifdef CONFIG_PREEMPT_RT_FULL |
| void pagefault_disable(void) |
| { |
| + migrate_disable(); |
| current->pagefault_disabled++; |
| /* |
| * make sure to have issued the store before a pagefault |
| @@ -3504,6 +3505,7 @@ void pagefault_enable(void) |
| */ |
| barrier(); |
| current->pagefault_disabled--; |
| + migrate_enable(); |
| } |
| EXPORT_SYMBOL_GPL(pagefault_enable); |
| #endif |