| Subject: sched-migrate-disable.patch | 
 | From: Thomas Gleixner <tglx@linutronix.de> | 
 | Date: Thu, 16 Jun 2011 13:26:08 +0200 | 
 |  | 
 | Signed-off-by: Thomas Gleixner <tglx@linutronix.de> | 
 | --- | 
 |  include/linux/preempt.h |    8 ++++ | 
 |  include/linux/sched.h   |   13 +++++-- | 
 |  kernel/sched/core.c     |   88 +++++++++++++++++++++++++++++++++++++++++++++--- | 
 |  lib/smp_processor_id.c  |    6 +-- | 
 |  4 files changed, 104 insertions(+), 11 deletions(-) | 
 |  | 
 | Index: linux-stable/include/linux/preempt.h | 
 | =================================================================== | 
 | --- linux-stable.orig/include/linux/preempt.h | 
 | +++ linux-stable/include/linux/preempt.h | 
 | @@ -108,6 +108,14 @@ do { \ | 
 |   | 
 |  #endif /* CONFIG_PREEMPT_COUNT */ | 
 |   | 
 | +#ifdef CONFIG_SMP | 
 | +extern void migrate_disable(void); | 
 | +extern void migrate_enable(void); | 
 | +#else | 
 | +# define migrate_disable()		do { } while (0) | 
 | +# define migrate_enable()		do { } while (0) | 
 | +#endif | 
 | + | 
 |  #ifdef CONFIG_PREEMPT_RT_FULL | 
 |  # define preempt_disable_rt()		preempt_disable() | 
 |  # define preempt_enable_rt()		preempt_enable() | 
 | Index: linux-stable/include/linux/sched.h | 
 | =================================================================== | 
 | --- linux-stable.orig/include/linux/sched.h | 
 | +++ linux-stable/include/linux/sched.h | 
 | @@ -1275,6 +1275,7 @@ struct task_struct { | 
 |  #endif | 
 |   | 
 |  	unsigned int policy; | 
 | +	int migrate_disable; | 
 |  	int nr_cpus_allowed; | 
 |  	cpumask_t cpus_allowed; | 
 |   | 
 | @@ -1614,9 +1615,6 @@ struct task_struct { | 
 |  #endif | 
 |  }; | 
 |   | 
 | -/* Future-safe accessor for struct task_struct's cpus_allowed. */ | 
 | -#define tsk_cpus_allowed(tsk) (&(tsk)->cpus_allowed) | 
 | - | 
 |  #ifdef CONFIG_PREEMPT_RT_FULL | 
 |  static inline bool cur_pf_disabled(void) { return current->pagefault_disabled; } | 
 |  #else | 
 | @@ -2773,6 +2771,15 @@ static inline void set_task_cpu(struct t | 
 |   | 
 |  #endif /* CONFIG_SMP */ | 
 |   | 
 | +/* Future-safe accessor for struct task_struct's cpus_allowed. */ | 
 | +static inline const struct cpumask *tsk_cpus_allowed(struct task_struct *p) | 
 | +{ | 
 | +	if (p->migrate_disable) | 
 | +		return cpumask_of(task_cpu(p)); | 
 | + | 
 | +	return &p->cpus_allowed; | 
 | +} | 
 | + | 
 |  extern long sched_setaffinity(pid_t pid, const struct cpumask *new_mask); | 
 |  extern long sched_getaffinity(pid_t pid, struct cpumask *mask); | 
 |   | 
 | Index: linux-stable/kernel/sched/core.c | 
 | =================================================================== | 
 | --- linux-stable.orig/kernel/sched/core.c | 
 | +++ linux-stable/kernel/sched/core.c | 
 | @@ -5203,11 +5203,12 @@ void __cpuinit init_idle(struct task_str | 
 |  #ifdef CONFIG_SMP | 
 |  void do_set_cpus_allowed(struct task_struct *p, const struct cpumask *new_mask) | 
 |  { | 
 | -	if (p->sched_class && p->sched_class->set_cpus_allowed) | 
 | -		p->sched_class->set_cpus_allowed(p, new_mask); | 
 | - | 
 | +	if (!p->migrate_disable) { | 
 | +		if (p->sched_class && p->sched_class->set_cpus_allowed) | 
 | +			p->sched_class->set_cpus_allowed(p, new_mask); | 
 | +		p->nr_cpus_allowed = cpumask_weight(new_mask); | 
 | +	} | 
 |  	cpumask_copy(&p->cpus_allowed, new_mask); | 
 | -	p->nr_cpus_allowed = cpumask_weight(new_mask); | 
 |  } | 
 |   | 
 |  /* | 
 | @@ -5258,7 +5259,7 @@ int set_cpus_allowed_ptr(struct task_str | 
 |  	do_set_cpus_allowed(p, new_mask); | 
 |   | 
 |  	/* Can the task run on the task's current CPU? If so, we're done */ | 
 | -	if (cpumask_test_cpu(task_cpu(p), new_mask)) | 
 | +	if (cpumask_test_cpu(task_cpu(p), new_mask) || p->migrate_disable) | 
 |  		goto out; | 
 |   | 
 |  	dest_cpu = cpumask_any_and(cpu_active_mask, new_mask); | 
 | @@ -5277,6 +5278,83 @@ out: | 
 |  } | 
 |  EXPORT_SYMBOL_GPL(set_cpus_allowed_ptr); | 
 |   | 
 | +void migrate_disable(void) | 
 | +{ | 
 | +	struct task_struct *p = current; | 
 | +	const struct cpumask *mask; | 
 | +	unsigned long flags; | 
 | +	struct rq *rq; | 
 | + | 
 | +	preempt_disable(); | 
 | +	if (p->migrate_disable) { | 
 | +		p->migrate_disable++; | 
 | +		preempt_enable(); | 
 | +		return; | 
 | +	} | 
 | + | 
 | +	pin_current_cpu(); | 
 | +	if (unlikely(!scheduler_running)) { | 
 | +		p->migrate_disable = 1; | 
 | +		preempt_enable(); | 
 | +		return; | 
 | +	} | 
 | +	rq = task_rq_lock(p, &flags); | 
 | +	p->migrate_disable = 1; | 
 | +	mask = tsk_cpus_allowed(p); | 
 | + | 
 | +	WARN_ON(!cpumask_test_cpu(smp_processor_id(), mask)); | 
 | + | 
 | +	if (!cpumask_equal(&p->cpus_allowed, mask)) { | 
 | +		if (p->sched_class->set_cpus_allowed) | 
 | +			p->sched_class->set_cpus_allowed(p, mask); | 
 | +		p->nr_cpus_allowed = cpumask_weight(mask); | 
 | +	} | 
 | +	task_rq_unlock(rq, p, &flags); | 
 | +	preempt_enable(); | 
 | +} | 
 | +EXPORT_SYMBOL(migrate_disable); | 
 | + | 
 | +void migrate_enable(void) | 
 | +{ | 
 | +	struct task_struct *p = current; | 
 | +	const struct cpumask *mask; | 
 | +	unsigned long flags; | 
 | +	struct rq *rq; | 
 | + | 
 | +	WARN_ON_ONCE(p->migrate_disable <= 0); | 
 | + | 
 | +	preempt_disable(); | 
 | +	if (p->migrate_disable > 1) { | 
 | +		p->migrate_disable--; | 
 | +		preempt_enable(); | 
 | +		return; | 
 | +	} | 
 | + | 
 | +	if (unlikely(!scheduler_running)) { | 
 | +		p->migrate_disable = 0; | 
 | +		unpin_current_cpu(); | 
 | +		preempt_enable(); | 
 | +		return; | 
 | +	} | 
 | + | 
 | +	rq = task_rq_lock(p, &flags); | 
 | +	p->migrate_disable = 0; | 
 | +	mask = tsk_cpus_allowed(p); | 
 | + | 
 | +	WARN_ON(!cpumask_test_cpu(smp_processor_id(), mask)); | 
 | + | 
 | +	if (!cpumask_equal(&p->cpus_allowed, mask)) { | 
 | +		if (p->sched_class->set_cpus_allowed) | 
 | +			p->sched_class->set_cpus_allowed(p, mask); | 
 | +		p->nr_cpus_allowed = cpumask_weight(mask); | 
 | +	} | 
 | + | 
 | +	task_rq_unlock(rq, p, &flags); | 
 | +	unpin_current_cpu(); | 
 | +	preempt_enable(); | 
 | +} | 
 | +EXPORT_SYMBOL(migrate_enable); | 
 | + | 
 |  /* | 
 |   * Move (not current) task off this cpu, onto dest cpu. We're doing | 
 |   * this because either it can't run here any more (set_cpus_allowed() | 
 | Index: linux-stable/lib/smp_processor_id.c | 
 | =================================================================== | 
 | --- linux-stable.orig/lib/smp_processor_id.c | 
 | +++ linux-stable/lib/smp_processor_id.c | 
 | @@ -39,9 +39,9 @@ notrace unsigned int debug_smp_processor | 
 |  	if (!printk_ratelimit()) | 
 |  		goto out_enable; | 
 |   | 
 | -	printk(KERN_ERR "BUG: using smp_processor_id() in preemptible [%08x] " | 
 | -			"code: %s/%d\n", | 
 | -			preempt_count() - 1, current->comm, current->pid); | 
 | +	printk(KERN_ERR "BUG: using smp_processor_id() in preemptible [%08x %08x] " | 
 | +	       "code: %s/%d\n", preempt_count() - 1, | 
 | +	       current->migrate_disable, current->comm, current->pid); | 
 |  	print_symbol("caller is %s\n", (long)__builtin_return_address(0)); | 
 |  	dump_stack(); | 
 |   |