| Subject: rcu: Merge RCU-bh into RCU-preempt |
| Date: Wed, 5 Oct 2011 11:59:38 -0700 |
| From: Thomas Gleixner <tglx@linutronix.de> |
| |
| The Linux kernel has long RCU-bh read-side critical sections that |
| intolerably increase scheduling latency under mainline's RCU-bh rules, |
| which include RCU-bh read-side critical sections being non-preemptible. |
| This patch therefore arranges for RCU-bh to be implemented in terms of |
| RCU-preempt for CONFIG_PREEMPT_RT_FULL=y. |
| |
| This has the downside of defeating the purpose of RCU-bh, namely, |
| handling the case where the system is subjected to a network-based |
| denial-of-service attack that keeps at least one CPU doing full-time |
| softirq processing. This issue will be fixed by a later commit. |
| |
| The current commit will need some work to make it appropriate for |
| mainline use, for example, it needs to be extended to cover Tiny RCU. |
| |
| [ paulmck: Added a useful changelog ] |
| |
| Signed-off-by: Thomas Gleixner <tglx@linutronix.de> |
| Signed-off-by: Paul E. McKenney <paulmck@linux.vnet.ibm.com> |
| Link: http://lkml.kernel.org/r/20111005185938.GA20403@linux.vnet.ibm.com |
| Signed-off-by: Thomas Gleixner <tglx@linutronix.de> |
| |
| --- |
| include/linux/rcupdate.h | 25 +++++++++++++++++++++++++ |
| include/linux/rcutree.h | 18 ++++++++++++++++-- |
| kernel/rcu/tree.c | 16 ++++++++++++++++ |
| kernel/rcu/update.c | 2 ++ |
| 4 files changed, 59 insertions(+), 2 deletions(-) |
| |
| --- a/include/linux/rcupdate.h |
| +++ b/include/linux/rcupdate.h |
| @@ -167,6 +167,9 @@ void call_rcu(struct rcu_head *head, |
| |
| #endif /* #else #ifdef CONFIG_PREEMPT_RCU */ |
| |
| +#ifdef CONFIG_PREEMPT_RT_FULL |
| +#define call_rcu_bh call_rcu |
| +#else |
| /** |
| * call_rcu_bh() - Queue an RCU for invocation after a quicker grace period. |
| * @head: structure to be used for queueing the RCU updates. |
| @@ -190,6 +193,7 @@ void call_rcu(struct rcu_head *head, |
| */ |
| void call_rcu_bh(struct rcu_head *head, |
| void (*func)(struct rcu_head *head)); |
| +#endif |
| |
| /** |
| * call_rcu_sched() - Queue an RCU for invocation after sched grace period. |
| @@ -296,7 +300,13 @@ static inline int rcu_preempt_depth(void |
| void rcu_init(void); |
| void rcu_end_inkernel_boot(void); |
| void rcu_sched_qs(void); |
| + |
| +#ifdef CONFIG_PREEMPT_RT_FULL |
| +static inline void rcu_bh_qs(void) { } |
| +#else |
| void rcu_bh_qs(void); |
| +#endif |
| + |
| void rcu_check_callbacks(int user); |
| struct notifier_block; |
| void rcu_idle_enter(void); |
| @@ -470,7 +480,14 @@ extern struct lockdep_map rcu_callback_m |
| int debug_lockdep_rcu_enabled(void); |
| |
| int rcu_read_lock_held(void); |
| +#ifdef CONFIG_PREEMPT_RT_FULL |
| +static inline int rcu_read_lock_bh_held(void) |
| +{ |
| + return rcu_read_lock_held(); |
| +} |
| +#else |
| int rcu_read_lock_bh_held(void); |
| +#endif |
| |
| /** |
| * rcu_read_lock_sched_held() - might we be in RCU-sched read-side critical section? |
| @@ -997,10 +1014,14 @@ static inline void rcu_read_unlock(void) |
| static inline void rcu_read_lock_bh(void) |
| { |
| local_bh_disable(); |
| +#ifdef CONFIG_PREEMPT_RT_FULL |
| + rcu_read_lock(); |
| +#else |
| __acquire(RCU_BH); |
| rcu_lock_acquire(&rcu_bh_lock_map); |
| rcu_lockdep_assert(rcu_is_watching(), |
| "rcu_read_lock_bh() used illegally while idle"); |
| +#endif |
| } |
| |
| /* |
| @@ -1010,10 +1031,14 @@ static inline void rcu_read_lock_bh(void |
| */ |
| static inline void rcu_read_unlock_bh(void) |
| { |
| +#ifdef CONFIG_PREEMPT_RT_FULL |
| + rcu_read_unlock(); |
| +#else |
| rcu_lockdep_assert(rcu_is_watching(), |
| "rcu_read_unlock_bh() used illegally while idle"); |
| rcu_lock_release(&rcu_bh_lock_map); |
| __release(RCU_BH); |
| +#endif |
| local_bh_enable(); |
| } |
| |
| --- a/include/linux/rcutree.h |
| +++ b/include/linux/rcutree.h |
| @@ -46,7 +46,11 @@ static inline void rcu_virt_note_context |
| rcu_note_context_switch(); |
| } |
| |
| +#ifdef CONFIG_PREEMPT_RT_FULL |
| +# define synchronize_rcu_bh synchronize_rcu |
| +#else |
| void synchronize_rcu_bh(void); |
| +#endif |
| void synchronize_sched_expedited(void); |
| void synchronize_rcu_expedited(void); |
| |
| @@ -74,7 +78,11 @@ static inline void synchronize_rcu_bh_ex |
| } |
| |
| void rcu_barrier(void); |
| +#ifdef CONFIG_PREEMPT_RT_FULL |
| +# define rcu_barrier_bh rcu_barrier |
| +#else |
| void rcu_barrier_bh(void); |
| +#endif |
| void rcu_barrier_sched(void); |
| unsigned long get_state_synchronize_rcu(void); |
| void cond_synchronize_rcu(unsigned long oldstate); |
| @@ -85,12 +93,10 @@ unsigned long rcu_batches_started(void); |
| unsigned long rcu_batches_started_bh(void); |
| unsigned long rcu_batches_started_sched(void); |
| unsigned long rcu_batches_completed(void); |
| -unsigned long rcu_batches_completed_bh(void); |
| unsigned long rcu_batches_completed_sched(void); |
| void show_rcu_gp_kthreads(void); |
| |
| void rcu_force_quiescent_state(void); |
| -void rcu_bh_force_quiescent_state(void); |
| void rcu_sched_force_quiescent_state(void); |
| |
| void exit_rcu(void); |
| @@ -100,6 +106,14 @@ extern int rcu_scheduler_active __read_m |
| |
| bool rcu_is_watching(void); |
| |
| +#ifndef CONFIG_PREEMPT_RT_FULL |
| +void rcu_bh_force_quiescent_state(void); |
| +unsigned long rcu_batches_completed_bh(void); |
| +#else |
| +# define rcu_bh_force_quiescent_state rcu_force_quiescent_state |
| +# define rcu_batches_completed_bh rcu_batches_completed |
| +#endif |
| + |
| void rcu_all_qs(void); |
| |
| #endif /* __LINUX_RCUTREE_H */ |
| --- a/kernel/rcu/tree.c |
| +++ b/kernel/rcu/tree.c |
| @@ -220,6 +220,7 @@ void rcu_sched_qs(void) |
| } |
| } |
| |
| +#ifndef CONFIG_PREEMPT_RT_FULL |
| void rcu_bh_qs(void) |
| { |
| if (!__this_cpu_read(rcu_bh_data.passed_quiesce)) { |
| @@ -229,6 +230,7 @@ void rcu_bh_qs(void) |
| __this_cpu_write(rcu_bh_data.passed_quiesce, 1); |
| } |
| } |
| +#endif |
| |
| static DEFINE_PER_CPU(int, rcu_sched_qs_mask); |
| |
| @@ -404,6 +406,7 @@ unsigned long rcu_batches_completed_sche |
| } |
| EXPORT_SYMBOL_GPL(rcu_batches_completed_sched); |
| |
| +#ifndef CONFIG_PREEMPT_RT_FULL |
| /* |
| * Return the number of RCU BH batches completed thus far for debug & stats. |
| */ |
| @@ -431,6 +434,13 @@ void rcu_bh_force_quiescent_state(void) |
| } |
| EXPORT_SYMBOL_GPL(rcu_bh_force_quiescent_state); |
| |
| +#else |
| +void rcu_force_quiescent_state(void) |
| +{ |
| +} |
| +EXPORT_SYMBOL_GPL(rcu_force_quiescent_state); |
| +#endif |
| + |
| /* |
| * Force a quiescent state for RCU-sched. |
| */ |
| @@ -3040,6 +3050,7 @@ void call_rcu_sched(struct rcu_head *hea |
| } |
| EXPORT_SYMBOL_GPL(call_rcu_sched); |
| |
| +#ifndef CONFIG_PREEMPT_RT_FULL |
| /* |
| * Queue an RCU callback for invocation after a quicker grace period. |
| */ |
| @@ -3048,6 +3059,7 @@ void call_rcu_bh(struct rcu_head *head, |
| __call_rcu(head, func, &rcu_bh_state, -1, 0); |
| } |
| EXPORT_SYMBOL_GPL(call_rcu_bh); |
| +#endif |
| |
| /* |
| * Queue an RCU callback for lazy invocation after a grace period. |
| @@ -3139,6 +3151,7 @@ void synchronize_sched(void) |
| } |
| EXPORT_SYMBOL_GPL(synchronize_sched); |
| |
| +#ifndef CONFIG_PREEMPT_RT_FULL |
| /** |
| * synchronize_rcu_bh - wait until an rcu_bh grace period has elapsed. |
| * |
| @@ -3165,6 +3178,7 @@ void synchronize_rcu_bh(void) |
| wait_rcu_gp(call_rcu_bh); |
| } |
| EXPORT_SYMBOL_GPL(synchronize_rcu_bh); |
| +#endif |
| |
| /** |
| * get_state_synchronize_rcu - Snapshot current RCU state |
| @@ -3677,6 +3691,7 @@ static void _rcu_barrier(struct rcu_stat |
| mutex_unlock(&rsp->barrier_mutex); |
| } |
| |
| +#ifndef CONFIG_PREEMPT_RT_FULL |
| /** |
| * rcu_barrier_bh - Wait until all in-flight call_rcu_bh() callbacks complete. |
| */ |
| @@ -3685,6 +3700,7 @@ void rcu_barrier_bh(void) |
| _rcu_barrier(&rcu_bh_state); |
| } |
| EXPORT_SYMBOL_GPL(rcu_barrier_bh); |
| +#endif |
| |
| /** |
| * rcu_barrier_sched - Wait for in-flight call_rcu_sched() callbacks. |
| --- a/kernel/rcu/update.c |
| +++ b/kernel/rcu/update.c |
| @@ -227,6 +227,7 @@ int rcu_read_lock_held(void) |
| } |
| EXPORT_SYMBOL_GPL(rcu_read_lock_held); |
| |
| +#ifndef CONFIG_PREEMPT_RT_FULL |
| /** |
| * rcu_read_lock_bh_held() - might we be in RCU-bh read-side critical section? |
| * |
| @@ -253,6 +254,7 @@ int rcu_read_lock_bh_held(void) |
| return in_softirq() || irqs_disabled(); |
| } |
| EXPORT_SYMBOL_GPL(rcu_read_lock_bh_held); |
| +#endif |
| |
| #endif /* #ifdef CONFIG_DEBUG_LOCK_ALLOC */ |
| |