| From: Sebastian Andrzej Siewior <bigeasy@linutronix.de> |
| Date: Fri, 16 Feb 2018 11:45:13 +0100 |
| Subject: [PATCH] RCU: skip the "schedule() in RCU section" warning on UP, |
| too |
| |
| In "RCU: we need to skip that warning but only on sleeping locks" we |
| skipped a warning on SMP systems in case we schedule out in a RCU |
| section while attempt to obtain a sleeping lock. This is also required |
| on UP systems. |
| In order to do so, I introduce a tiny version of migrate_disable() + |
| _enable() which only update the counters which we then can check against |
| on RT && !SMP. |
| |
| Cc: stable-rt@vger.kernel.org |
| Reported-by: Grygorii Strashko <grygorii.strashko@ti.com> |
| Tested-by: Grygorii Strashko <grygorii.strashko@ti.com> |
| Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de> |
| --- |
| include/linux/preempt.h | 9 +++++++++ |
| include/linux/sched.h | 6 ++++++ |
| kernel/rcu/tree_plugin.h | 2 +- |
| kernel/sched/core.c | 45 +++++++++++++++++++++++++++++++++++++++++++++ |
| 4 files changed, 61 insertions(+), 1 deletion(-) |
| |
| --- a/include/linux/preempt.h |
| +++ b/include/linux/preempt.h |
| @@ -211,6 +211,15 @@ extern void migrate_enable(void); |
| |
| int __migrate_disabled(struct task_struct *p); |
| |
| +#elif !defined(CONFIG_SMP) && defined(CONFIG_PREEMPT_RT_BASE) |
| + |
| +extern void migrate_disable(void); |
| +extern void migrate_enable(void); |
| +static inline int __migrate_disabled(struct task_struct *p) |
| +{ |
| + return 0; |
| +} |
| + |
| #else |
| #define migrate_disable() barrier() |
| #define migrate_enable() barrier() |
| --- a/include/linux/sched.h |
| +++ b/include/linux/sched.h |
| @@ -604,6 +604,12 @@ struct task_struct { |
| # ifdef CONFIG_SCHED_DEBUG |
| int migrate_disable_atomic; |
| # endif |
| + |
| +#elif !defined(CONFIG_SMP) && defined(CONFIG_PREEMPT_RT_BASE) |
| + int migrate_disable; |
| +# ifdef CONFIG_SCHED_DEBUG |
| + int migrate_disable_atomic; |
| +# endif |
| #endif |
| |
| #ifdef CONFIG_PREEMPT_RCU |
| --- a/kernel/rcu/tree_plugin.h |
| +++ b/kernel/rcu/tree_plugin.h |
| @@ -328,7 +328,7 @@ static void rcu_preempt_note_context_swi |
| int mg_counter = 0; |
| |
| lockdep_assert_irqs_disabled(); |
| -#if defined(CONFIG_PREEMPT_COUNT) && defined(CONFIG_SMP) |
| +#if defined(CONFIG_PREEMPT_RT_BASE) |
| mg_counter = t->migrate_disable; |
| #endif |
| WARN_ON_ONCE(!preempt && t->rcu_read_lock_nesting > 0 && !mg_counter); |
| --- a/kernel/sched/core.c |
| +++ b/kernel/sched/core.c |
| @@ -7283,4 +7283,49 @@ void migrate_enable(void) |
| preempt_enable(); |
| } |
| EXPORT_SYMBOL(migrate_enable); |
| + |
| +#elif !defined(CONFIG_SMP) && defined(CONFIG_PREEMPT_RT_BASE) |
| +void migrate_disable(void) |
| +{ |
| + struct task_struct *p = current; |
| + |
| + if (in_atomic() || irqs_disabled()) { |
| +#ifdef CONFIG_SCHED_DEBUG |
| + p->migrate_disable_atomic++; |
| +#endif |
| + return; |
| + } |
| +#ifdef CONFIG_SCHED_DEBUG |
| + if (unlikely(p->migrate_disable_atomic)) { |
| + tracing_off(); |
| + WARN_ON_ONCE(1); |
| + } |
| +#endif |
| + |
| + p->migrate_disable++; |
| +} |
| +EXPORT_SYMBOL(migrate_disable); |
| + |
| +void migrate_enable(void) |
| +{ |
| + struct task_struct *p = current; |
| + |
| + if (in_atomic() || irqs_disabled()) { |
| +#ifdef CONFIG_SCHED_DEBUG |
| + p->migrate_disable_atomic--; |
| +#endif |
| + return; |
| + } |
| + |
| +#ifdef CONFIG_SCHED_DEBUG |
| + if (unlikely(p->migrate_disable_atomic)) { |
| + tracing_off(); |
| + WARN_ON_ONCE(1); |
| + } |
| +#endif |
| + |
| + WARN_ON_ONCE(p->migrate_disable <= 0); |
| + p->migrate_disable--; |
| +} |
| +EXPORT_SYMBOL(migrate_enable); |
| #endif |