| From: Sebastian Andrzej Siewior <bigeasy@linutronix.de> |
| Date: Sat, 27 May 2017 19:02:06 +0200 |
| Subject: kernel/sched/core: add migrate_disable() |
| |
| --- |
| include/linux/preempt.h | 23 ++++++++ |
| include/linux/sched.h | 7 ++ |
| include/linux/smp.h | 3 + |
| kernel/sched/core.c | 130 +++++++++++++++++++++++++++++++++++++++++++++++- |
| kernel/sched/debug.c | 4 + |
| 5 files changed, 165 insertions(+), 2 deletions(-) |
| |
| --- a/include/linux/preempt.h |
| +++ b/include/linux/preempt.h |
| @@ -184,6 +184,22 @@ do { \ |
| |
| #define preemptible() (preempt_count() == 0 && !irqs_disabled()) |
| |
| +#ifdef CONFIG_SMP |
| + |
| +extern void migrate_disable(void); |
| +extern void migrate_enable(void); |
| + |
| +int __migrate_disabled(struct task_struct *p); |
| + |
| +#else |
| +#define migrate_disable() barrier() |
| +#define migrate_enable() barrier() |
| +static inline int __migrate_disabled(struct task_struct *p) |
| +{ |
| + return 0; |
| +} |
| +#endif |
| + |
| #ifdef CONFIG_PREEMPT |
| #define preempt_enable() \ |
| do { \ |
| @@ -252,6 +268,13 @@ do { \ |
| #define preempt_enable_notrace() barrier() |
| #define preemptible() 0 |
| |
| +#define migrate_disable() barrier() |
| +#define migrate_enable() barrier() |
| + |
| +static inline int __migrate_disabled(struct task_struct *p) |
| +{ |
| + return 0; |
| +} |
| #endif /* CONFIG_PREEMPT_COUNT */ |
| |
| #ifdef MODULE |
| --- a/include/linux/sched.h |
| +++ b/include/linux/sched.h |
| @@ -537,6 +537,13 @@ struct task_struct { |
| int nr_cpus_allowed; |
| const cpumask_t *cpus_ptr; |
| cpumask_t cpus_mask; |
| +#if defined(CONFIG_PREEMPT_COUNT) && defined(CONFIG_SMP) |
| + int migrate_disable; |
| + int migrate_disable_update; |
| +# ifdef CONFIG_SCHED_DEBUG |
| + int migrate_disable_atomic; |
| +# endif |
| +#endif |
| |
| #ifdef CONFIG_PREEMPT_RCU |
| int rcu_read_lock_nesting; |
| --- a/include/linux/smp.h |
| +++ b/include/linux/smp.h |
| @@ -197,6 +197,9 @@ static inline int get_boot_cpu_id(void) |
| #define get_cpu() ({ preempt_disable(); smp_processor_id(); }) |
| #define put_cpu() preempt_enable() |
| |
| +#define get_cpu_light() ({ migrate_disable(); smp_processor_id(); }) |
| +#define put_cpu_light() migrate_enable() |
| + |
| /* |
| * Callback to arch code if there's nosmp or maxcpus=0 on the |
| * boot command line: |
| --- a/kernel/sched/core.c |
| +++ b/kernel/sched/core.c |
| @@ -1047,7 +1047,15 @@ void set_cpus_allowed_common(struct task |
| p->nr_cpus_allowed = cpumask_weight(new_mask); |
| } |
| |
| -void do_set_cpus_allowed(struct task_struct *p, const struct cpumask *new_mask) |
| +#if defined(CONFIG_PREEMPT_COUNT) && defined(CONFIG_SMP) |
| +int __migrate_disabled(struct task_struct *p) |
| +{ |
| + return p->migrate_disable; |
| +} |
| +#endif |
| + |
| +static void __do_set_cpus_allowed_tail(struct task_struct *p, |
| + const struct cpumask *new_mask) |
| { |
| struct rq *rq = task_rq(p); |
| bool queued, running; |
| @@ -1076,6 +1084,20 @@ void do_set_cpus_allowed(struct task_str |
| set_curr_task(rq, p); |
| } |
| |
| +void do_set_cpus_allowed(struct task_struct *p, const struct cpumask *new_mask) |
| +{ |
| +#if defined(CONFIG_PREEMPT_COUNT) && defined(CONFIG_SMP) |
| + if (__migrate_disabled(p)) { |
| + lockdep_assert_held(&p->pi_lock); |
| + |
| + cpumask_copy(&p->cpus_mask, new_mask); |
| + p->migrate_disable_update = 1; |
| + return; |
| + } |
| +#endif |
| + __do_set_cpus_allowed_tail(p, new_mask); |
| +} |
| + |
| /* |
| * Change a given task's CPU affinity. Migrate the thread to a |
| * proper CPU and schedule it away if the CPU it's executing on |
| @@ -1134,9 +1156,16 @@ static int __set_cpus_allowed_ptr(struct |
| } |
| |
| /* Can the task run on the task's current CPU? If so, we're done */ |
| - if (cpumask_test_cpu(task_cpu(p), new_mask)) |
| + if (cpumask_test_cpu(task_cpu(p), new_mask) || __migrate_disabled(p)) |
| goto out; |
| |
| +#if defined(CONFIG_PREEMPT_COUNT) && defined(CONFIG_SMP) |
| + if (__migrate_disabled(p)) { |
| + p->migrate_disable_update = 1; |
| + goto out; |
| + } |
| +#endif |
| + |
| dest_cpu = cpumask_any_and(cpu_valid_mask, new_mask); |
| if (task_running(rq, p) || p->state == TASK_WAKING) { |
| struct migration_arg arg = { p, dest_cpu }; |
| @@ -7357,3 +7386,100 @@ const u32 sched_prio_to_wmult[40] = { |
| /* 10 */ 39045157, 49367440, 61356676, 76695844, 95443717, |
| /* 15 */ 119304647, 148102320, 186737708, 238609294, 286331153, |
| }; |
| + |
| +#if defined(CONFIG_PREEMPT_COUNT) && defined(CONFIG_SMP) |
| + |
| +void migrate_disable(void) |
| +{ |
| + struct task_struct *p = current; |
| + |
| + if (in_atomic() || irqs_disabled()) { |
| +#ifdef CONFIG_SCHED_DEBUG |
| + p->migrate_disable_atomic++; |
| +#endif |
| + return; |
| + } |
| +#ifdef CONFIG_SCHED_DEBUG |
| + WARN_ON_ONCE(p->migrate_disable_atomic); |
| +#endif |
| + |
| + if (p->migrate_disable) { |
| + p->migrate_disable++; |
| + return; |
| + } |
| + |
| + preempt_disable(); |
| + p->migrate_disable = 1; |
| + |
| + p->cpus_ptr = cpumask_of(smp_processor_id()); |
| + p->nr_cpus_allowed = 1; |
| + |
| + preempt_enable(); |
| +} |
| +EXPORT_SYMBOL(migrate_disable); |
| + |
| +void migrate_enable(void) |
| +{ |
| + struct task_struct *p = current; |
| + |
| + if (in_atomic() || irqs_disabled()) { |
| +#ifdef CONFIG_SCHED_DEBUG |
| + p->migrate_disable_atomic--; |
| +#endif |
| + return; |
| + } |
| + |
| +#ifdef CONFIG_SCHED_DEBUG |
| + WARN_ON_ONCE(p->migrate_disable_atomic); |
| +#endif |
| + |
| + WARN_ON_ONCE(p->migrate_disable <= 0); |
| + if (p->migrate_disable > 1) { |
| + p->migrate_disable--; |
| + return; |
| + } |
| + |
| + preempt_disable(); |
| + |
| + p->cpus_ptr = &p->cpus_mask; |
| + p->nr_cpus_allowed = cpumask_weight(&p->cpus_mask); |
| + p->migrate_disable = 0; |
| + |
| + if (p->migrate_disable_update) { |
| + struct rq *rq; |
| + struct rq_flags rf; |
| + |
| + rq = task_rq_lock(p, &rf); |
| + update_rq_clock(rq); |
| + |
| + __do_set_cpus_allowed_tail(p, &p->cpus_mask); |
| + task_rq_unlock(rq, p, &rf); |
| + |
| + p->migrate_disable_update = 0; |
| + |
| + WARN_ON(smp_processor_id() != task_cpu(p)); |
| + if (!cpumask_test_cpu(task_cpu(p), &p->cpus_mask)) { |
| + const struct cpumask *cpu_valid_mask = cpu_active_mask; |
| + struct migration_arg arg; |
| + unsigned int dest_cpu; |
| + |
| + if (p->flags & PF_KTHREAD) { |
| + /* |
| + * Kernel threads are allowed on online && !active CPUs |
| + */ |
| + cpu_valid_mask = cpu_online_mask; |
| + } |
| + dest_cpu = cpumask_any_and(cpu_valid_mask, &p->cpus_mask); |
| + arg.task = p; |
| + arg.dest_cpu = dest_cpu; |
| + |
| + preempt_enable(); |
| + stop_one_cpu(task_cpu(p), migration_cpu_stop, &arg); |
| + tlb_migrate_finish(p->mm); |
| + return; |
| + } |
| + } |
| + preempt_enable(); |
| +} |
| +EXPORT_SYMBOL(migrate_enable); |
| +#endif |
| --- a/kernel/sched/debug.c |
| +++ b/kernel/sched/debug.c |
| @@ -958,6 +958,10 @@ void proc_sched_show_task(struct task_st |
| P(dl.runtime); |
| P(dl.deadline); |
| } |
| +#if defined(CONFIG_PREEMPT_COUNT) && defined(CONFIG_SMP) |
| + P(migrate_disable); |
| +#endif |
| + P(nr_cpus_allowed); |
| #undef PN_SCHEDSTAT |
| #undef PN |
| #undef __PN |