| Subject: sched-mmdrop-delayed.patch | 
 | From: Thomas Gleixner <tglx@linutronix.de> | 
 | Date: Mon, 06 Jun 2011 12:20:33 +0200 | 
 |  | 
 | Needs thread context (pgd_lock) -> ifdeffed. workqueues wont work with | 
 | RT | 
 |  | 
 | Signed-off-by: Thomas Gleixner <tglx@linutronix.de> | 
 | --- | 
 |  include/linux/mm_types.h |    4 ++++ | 
 |  include/linux/sched.h    |   12 ++++++++++++ | 
 |  kernel/fork.c            |   15 ++++++++++++++- | 
 |  kernel/sched/core.c      |   19 +++++++++++++++++-- | 
 |  4 files changed, 47 insertions(+), 3 deletions(-) | 
 |  | 
 | Index: linux-stable/include/linux/mm_types.h | 
 | =================================================================== | 
 | --- linux-stable.orig/include/linux/mm_types.h | 
 | +++ linux-stable/include/linux/mm_types.h | 
 | @@ -12,6 +12,7 @@ | 
 |  #include <linux/completion.h> | 
 |  #include <linux/cpumask.h> | 
 |  #include <linux/page-debug-flags.h> | 
 | +#include <linux/rcupdate.h> | 
 |  #include <linux/uprobes.h> | 
 |  #include <asm/page.h> | 
 |  #include <asm/mmu.h> | 
 | @@ -409,6 +410,9 @@ struct mm_struct { | 
 |  	struct cpumask cpumask_allocation; | 
 |  #endif | 
 |  	struct uprobes_state uprobes_state; | 
 | +#ifdef CONFIG_PREEMPT_RT_BASE | 
 | +	struct rcu_head delayed_drop; | 
 | +#endif | 
 |  }; | 
 |   | 
 |  static inline void mm_init_cpumask(struct mm_struct *mm) | 
 | Index: linux-stable/include/linux/sched.h | 
 | =================================================================== | 
 | --- linux-stable.orig/include/linux/sched.h | 
 | +++ linux-stable/include/linux/sched.h | 
 | @@ -2324,12 +2324,24 @@ extern struct mm_struct * mm_alloc(void) | 
 |   | 
 |  /* mmdrop drops the mm and the page tables */ | 
 |  extern void __mmdrop(struct mm_struct *); | 
 | + | 
 |  static inline void mmdrop(struct mm_struct * mm) | 
 |  { | 
 |  	if (unlikely(atomic_dec_and_test(&mm->mm_count))) | 
 |  		__mmdrop(mm); | 
 |  } | 
 |   | 
 | +#ifdef CONFIG_PREEMPT_RT_BASE | 
 | +extern void __mmdrop_delayed(struct rcu_head *rhp); | 
 | +static inline void mmdrop_delayed(struct mm_struct *mm) | 
 | +{ | 
 | +	if (atomic_dec_and_test(&mm->mm_count)) | 
 | +		call_rcu(&mm->delayed_drop, __mmdrop_delayed); | 
 | +} | 
 | +#else | 
 | +# define mmdrop_delayed(mm)	mmdrop(mm) | 
 | +#endif | 
 | + | 
 |  /* mmput gets rid of the mappings and all user-space */ | 
 |  extern void mmput(struct mm_struct *); | 
 |  /* Grab a reference to a task's mm, if it is not already going away */ | 
 | Index: linux-stable/kernel/fork.c | 
 | =================================================================== | 
 | --- linux-stable.orig/kernel/fork.c | 
 | +++ linux-stable/kernel/fork.c | 
 | @@ -249,7 +249,7 @@ EXPORT_SYMBOL_GPL(__put_task_struct); | 
 |  #else | 
 |  void __put_task_struct_cb(struct rcu_head *rhp) | 
 |  { | 
 | -	struct task_struct *tsk = container_of(rhp, struct task_struct, rcu); | 
 | +	struct task_struct *tsk = container_of(rhp, struct task_struct, put_rcu); | 
 |   | 
 |  	__put_task_struct(tsk); | 
 |   | 
 | @@ -608,6 +608,19 @@ void __mmdrop(struct mm_struct *mm) | 
 |  } | 
 |  EXPORT_SYMBOL_GPL(__mmdrop); | 
 |   | 
 | +#ifdef CONFIG_PREEMPT_RT_BASE | 
 | +/* | 
 | + * RCU callback for delayed mm drop. Not strictly rcu, but we don't | 
 | + * want another facility to make this work. | 
 | + */ | 
 | +void __mmdrop_delayed(struct rcu_head *rhp) | 
 | +{ | 
 | +	struct mm_struct *mm = container_of(rhp, struct mm_struct, delayed_drop); | 
 | + | 
 | +	__mmdrop(mm); | 
 | +} | 
 | +#endif | 
 | + | 
 |  /* | 
 |   * Decrement the use count and release all resources for an mm. | 
 |   */ | 
 | Index: linux-stable/kernel/sched/core.c | 
 | =================================================================== | 
 | --- linux-stable.orig/kernel/sched/core.c | 
 | +++ linux-stable/kernel/sched/core.c | 
 | @@ -1970,8 +1970,12 @@ static void finish_task_switch(struct rq | 
 |  	finish_arch_post_lock_switch(); | 
 |   | 
 |  	fire_sched_in_preempt_notifiers(current); | 
 | +	/* | 
 | +	 * We use mmdrop_delayed() here so we don't have to do the | 
 | +	 * full __mmdrop() when we are the last user. | 
 | +	 */ | 
 |  	if (mm) | 
 | -		mmdrop(mm); | 
 | +		mmdrop_delayed(mm); | 
 |  	if (unlikely(prev_state == TASK_DEAD)) { | 
 |  		/* | 
 |  		 * Remove function-return probe instances associated with this | 
 | @@ -5302,6 +5306,8 @@ static int migration_cpu_stop(void *data | 
 |   | 
 |  #ifdef CONFIG_HOTPLUG_CPU | 
 |   | 
 | +static DEFINE_PER_CPU(struct mm_struct *, idle_last_mm); | 
 | + | 
 |  /* | 
 |   * Ensures that the idle task is using init_mm right before its cpu goes | 
 |   * offline. | 
 | @@ -5314,7 +5320,12 @@ void idle_task_exit(void) | 
 |   | 
 |  	if (mm != &init_mm) | 
 |  		switch_mm(mm, &init_mm, current); | 
 | -	mmdrop(mm); | 
 | + | 
 | +	/* | 
 | +	 * Defer the cleanup to an alive cpu. On RT we can neither | 
 | +	 * call mmdrop() nor mmdrop_delayed() from here. | 
 | +	 */ | 
 | +	per_cpu(idle_last_mm, smp_processor_id()) = mm; | 
 |  } | 
 |   | 
 |  /* | 
 | @@ -5622,6 +5633,10 @@ migration_call(struct notifier_block *nf | 
 |   | 
 |  	case CPU_DEAD: | 
 |  		calc_load_migrate(rq); | 
 | +		if (per_cpu(idle_last_mm, cpu)) { | 
 | +			mmdrop(per_cpu(idle_last_mm, cpu)); | 
 | +			per_cpu(idle_last_mm, cpu) = NULL; | 
 | +		} | 
 |  		break; | 
 |  #endif | 
 |  	} |