| Subject: sched: Move mmdrop to RCU on RT |
| From: Thomas Gleixner <tglx@linutronix.de> |
| Date: Mon, 06 Jun 2011 12:20:33 +0200 |
| |
| Takes sleeping locks and calls into the memory allocator, so nothing |
| we want to do in task switch and oder atomic contexts. |
| |
| Signed-off-by: Thomas Gleixner <tglx@linutronix.de> |
| --- |
| include/linux/mm_types.h | 4 ++++ |
| include/linux/sched.h | 11 +++++++++++ |
| kernel/fork.c | 13 +++++++++++++ |
| kernel/sched/core.c | 19 +++++++++++++++++-- |
| 4 files changed, 45 insertions(+), 2 deletions(-) |
| |
| --- a/include/linux/mm_types.h |
| +++ b/include/linux/mm_types.h |
| @@ -11,6 +11,7 @@ |
| #include <linux/completion.h> |
| #include <linux/cpumask.h> |
| #include <linux/uprobes.h> |
| +#include <linux/rcupdate.h> |
| #include <linux/page-flags-layout.h> |
| #include <linux/workqueue.h> |
| #include <asm/page.h> |
| @@ -508,6 +509,9 @@ struct mm_struct { |
| bool tlb_flush_pending; |
| #endif |
| struct uprobes_state uprobes_state; |
| +#ifdef CONFIG_PREEMPT_RT_BASE |
| + struct rcu_head delayed_drop; |
| +#endif |
| #ifdef CONFIG_X86_INTEL_MPX |
| /* address of the bounds directory */ |
| void __user *bd_addr; |
| --- a/include/linux/sched.h |
| +++ b/include/linux/sched.h |
| @@ -2857,6 +2857,17 @@ static inline void mmdrop(struct mm_stru |
| __mmdrop(mm); |
| } |
| |
| +#ifdef CONFIG_PREEMPT_RT_BASE |
| +extern void __mmdrop_delayed(struct rcu_head *rhp); |
| +static inline void mmdrop_delayed(struct mm_struct *mm) |
| +{ |
| + if (atomic_dec_and_test(&mm->mm_count)) |
| + call_rcu(&mm->delayed_drop, __mmdrop_delayed); |
| +} |
| +#else |
| +# define mmdrop_delayed(mm) mmdrop(mm) |
| +#endif |
| + |
| static inline bool mmget_not_zero(struct mm_struct *mm) |
| { |
| return atomic_inc_not_zero(&mm->mm_users); |
| --- a/kernel/fork.c |
| +++ b/kernel/fork.c |
| @@ -715,6 +715,19 @@ void __mmdrop(struct mm_struct *mm) |
| } |
| EXPORT_SYMBOL_GPL(__mmdrop); |
| |
| +#ifdef CONFIG_PREEMPT_RT_BASE |
| +/* |
| + * RCU callback for delayed mm drop. Not strictly rcu, but we don't |
| + * want another facility to make this work. |
| + */ |
| +void __mmdrop_delayed(struct rcu_head *rhp) |
| +{ |
| + struct mm_struct *mm = container_of(rhp, struct mm_struct, delayed_drop); |
| + |
| + __mmdrop(mm); |
| +} |
| +#endif |
| + |
| static inline void __mmput(struct mm_struct *mm) |
| { |
| VM_BUG_ON(atomic_read(&mm->mm_users)); |
| --- a/kernel/sched/core.c |
| +++ b/kernel/sched/core.c |
| @@ -2776,8 +2776,12 @@ static struct rq *finish_task_switch(str |
| finish_arch_post_lock_switch(); |
| |
| fire_sched_in_preempt_notifiers(current); |
| + /* |
| + * We use mmdrop_delayed() here so we don't have to do the |
| + * full __mmdrop() when we are the last user. |
| + */ |
| if (mm) |
| - mmdrop(mm); |
| + mmdrop_delayed(mm); |
| if (unlikely(prev_state == TASK_DEAD)) { |
| if (prev->sched_class->task_dead) |
| prev->sched_class->task_dead(prev); |
| @@ -5513,6 +5517,8 @@ void sched_setnuma(struct task_struct *p |
| #endif /* CONFIG_NUMA_BALANCING */ |
| |
| #ifdef CONFIG_HOTPLUG_CPU |
| +static DEFINE_PER_CPU(struct mm_struct *, idle_last_mm); |
| + |
| /* |
| * Ensures that the idle task is using init_mm right before its cpu goes |
| * offline. |
| @@ -5527,7 +5533,12 @@ void idle_task_exit(void) |
| switch_mm_irqs_off(mm, &init_mm, current); |
| finish_arch_post_lock_switch(); |
| } |
| - mmdrop(mm); |
| + /* |
| + * Defer the cleanup to an alive cpu. On RT we can neither |
| + * call mmdrop() nor mmdrop_delayed() from here. |
| + */ |
| + per_cpu(idle_last_mm, smp_processor_id()) = mm; |
| + |
| } |
| |
| /* |
| @@ -7402,6 +7413,10 @@ int sched_cpu_dying(unsigned int cpu) |
| update_max_interval(); |
| nohz_balance_exit_idle(cpu); |
| hrtick_clear(rq); |
| + if (per_cpu(idle_last_mm, cpu)) { |
| + mmdrop_delayed(per_cpu(idle_last_mm, cpu)); |
| + per_cpu(idle_last_mm, cpu) = NULL; |
| + } |
| return 0; |
| } |
| #endif |