| From 615d09440f8136acefebb59a54a64cc68dc3379f Mon Sep 17 00:00:00 2001 |
| From: Thomas Gleixner <tglx@linutronix.de> |
| Date: Wed, 5 Oct 2011 11:59:38 -0700 |
| Subject: [PATCH] rcu: Merge RCU-bh into RCU-preempt |
| |
| The Linux kernel has long RCU-bh read-side critical sections that |
| intolerably increase scheduling latency under mainline's RCU-bh rules, |
| which include RCU-bh read-side critical sections being non-preemptible. |
| This patch therefore arranges for RCU-bh to be implemented in terms of |
| RCU-preempt for CONFIG_PREEMPT_RT_FULL=y. |
| |
| This has the downside of defeating the purpose of RCU-bh, namely, |
| handling the case where the system is subjected to a network-based |
| denial-of-service attack that keeps at least one CPU doing full-time |
| softirq processing. This issue will be fixed by a later commit. |
| |
| The current commit will need some work to make it appropriate for |
| mainline use, for example, it needs to be extended to cover Tiny RCU. |
| |
| [ paulmck: Added a useful changelog ] |
| |
| Signed-off-by: Thomas Gleixner <tglx@linutronix.de> |
| Signed-off-by: Paul E. McKenney <paulmck@linux.vnet.ibm.com> |
| Link: http://lkml.kernel.org/r/20111005185938.GA20403@linux.vnet.ibm.com |
| Signed-off-by: Thomas Gleixner <tglx@linutronix.de> |
| |
| diff --git a/include/linux/rcupdate.h b/include/linux/rcupdate.h |
| index 00859dfeefb5..580c894766ea 100644 |
| --- a/include/linux/rcupdate.h |
| +++ b/include/linux/rcupdate.h |
| @@ -178,6 +178,9 @@ void call_rcu(struct rcu_head *head, |
| |
| #endif /* #else #ifdef CONFIG_PREEMPT_RCU */ |
| |
| +#ifdef CONFIG_PREEMPT_RT_FULL |
| +#define call_rcu_bh call_rcu |
| +#else |
| /** |
| * call_rcu_bh() - Queue an RCU for invocation after a quicker grace period. |
| * @head: structure to be used for queueing the RCU updates. |
| @@ -201,6 +204,7 @@ void call_rcu(struct rcu_head *head, |
| */ |
| void call_rcu_bh(struct rcu_head *head, |
| rcu_callback_t func); |
| +#endif |
| |
| /** |
| * call_rcu_sched() - Queue an RCU for invocation after sched grace period. |
| @@ -338,7 +342,11 @@ static inline int rcu_preempt_depth(void) |
| /* Internal to kernel */ |
| void rcu_init(void); |
| void rcu_sched_qs(void); |
| +#ifdef CONFIG_PREEMPT_RT_FULL |
| +static inline void rcu_bh_qs(void) { } |
| +#else |
| void rcu_bh_qs(void); |
| +#endif |
| void rcu_check_callbacks(int user); |
| void rcu_report_dead(unsigned int cpu); |
| |
| @@ -507,7 +515,14 @@ extern struct lockdep_map rcu_callback_map; |
| int debug_lockdep_rcu_enabled(void); |
| |
| int rcu_read_lock_held(void); |
| +#ifdef CONFIG_PREEMPT_RT_FULL |
| +static inline int rcu_read_lock_bh_held(void) |
| +{ |
| + return rcu_read_lock_held(); |
| +} |
| +#else |
| int rcu_read_lock_bh_held(void); |
| +#endif |
| |
| /** |
| * rcu_read_lock_sched_held() - might we be in RCU-sched read-side critical section? |
| @@ -953,10 +968,14 @@ static inline void rcu_read_unlock(void) |
| static inline void rcu_read_lock_bh(void) |
| { |
| local_bh_disable(); |
| +#ifdef CONFIG_PREEMPT_RT_FULL |
| + rcu_read_lock(); |
| +#else |
| __acquire(RCU_BH); |
| rcu_lock_acquire(&rcu_bh_lock_map); |
| RCU_LOCKDEP_WARN(!rcu_is_watching(), |
| "rcu_read_lock_bh() used illegally while idle"); |
| +#endif |
| } |
| |
| /* |
| @@ -966,10 +985,14 @@ static inline void rcu_read_lock_bh(void) |
| */ |
| static inline void rcu_read_unlock_bh(void) |
| { |
| +#ifdef CONFIG_PREEMPT_RT_FULL |
| + rcu_read_unlock(); |
| +#else |
| RCU_LOCKDEP_WARN(!rcu_is_watching(), |
| "rcu_read_unlock_bh() used illegally while idle"); |
| rcu_lock_release(&rcu_bh_lock_map); |
| __release(RCU_BH); |
| +#endif |
| local_bh_enable(); |
| } |
| |
| diff --git a/include/linux/rcutree.h b/include/linux/rcutree.h |
| index 63a4e4cf40a5..80c130ef20d3 100644 |
| --- a/include/linux/rcutree.h |
| +++ b/include/linux/rcutree.h |
| @@ -44,7 +44,11 @@ static inline void rcu_virt_note_context_switch(int cpu) |
| rcu_note_context_switch(); |
| } |
| |
| +#ifdef CONFIG_PREEMPT_RT_FULL |
| +# define synchronize_rcu_bh synchronize_rcu |
| +#else |
| void synchronize_rcu_bh(void); |
| +#endif |
| void synchronize_sched_expedited(void); |
| void synchronize_rcu_expedited(void); |
| |
| @@ -72,7 +76,11 @@ static inline void synchronize_rcu_bh_expedited(void) |
| } |
| |
| void rcu_barrier(void); |
| +#ifdef CONFIG_PREEMPT_RT_FULL |
| +# define rcu_barrier_bh rcu_barrier |
| +#else |
| void rcu_barrier_bh(void); |
| +#endif |
| void rcu_barrier_sched(void); |
| unsigned long get_state_synchronize_rcu(void); |
| void cond_synchronize_rcu(unsigned long oldstate); |
| @@ -85,14 +93,12 @@ unsigned long rcu_batches_started(void); |
| unsigned long rcu_batches_started_bh(void); |
| unsigned long rcu_batches_started_sched(void); |
| unsigned long rcu_batches_completed(void); |
| -unsigned long rcu_batches_completed_bh(void); |
| unsigned long rcu_batches_completed_sched(void); |
| unsigned long rcu_exp_batches_completed(void); |
| unsigned long rcu_exp_batches_completed_sched(void); |
| void show_rcu_gp_kthreads(void); |
| |
| void rcu_force_quiescent_state(void); |
| -void rcu_bh_force_quiescent_state(void); |
| void rcu_sched_force_quiescent_state(void); |
| |
| void rcu_idle_enter(void); |
| @@ -109,6 +115,14 @@ extern int rcu_scheduler_active __read_mostly; |
| |
| bool rcu_is_watching(void); |
| |
| +#ifndef CONFIG_PREEMPT_RT_FULL |
| +void rcu_bh_force_quiescent_state(void); |
| +unsigned long rcu_batches_completed_bh(void); |
| +#else |
| +# define rcu_bh_force_quiescent_state rcu_force_quiescent_state |
| +# define rcu_batches_completed_bh rcu_batches_completed |
| +#endif |
| + |
| void rcu_all_qs(void); |
| |
| /* RCUtree hotplug events */ |
| diff --git a/kernel/rcu/tree.c b/kernel/rcu/tree.c |
| index 5d80925e7fc8..fc57846be21e 100644 |
| --- a/kernel/rcu/tree.c |
| +++ b/kernel/rcu/tree.c |
| @@ -259,6 +259,7 @@ void rcu_sched_qs(void) |
| this_cpu_ptr(&rcu_sched_data), true); |
| } |
| |
| +#ifndef CONFIG_PREEMPT_RT_FULL |
| void rcu_bh_qs(void) |
| { |
| if (__this_cpu_read(rcu_bh_data.cpu_no_qs.s)) { |
| @@ -268,6 +269,7 @@ void rcu_bh_qs(void) |
| __this_cpu_write(rcu_bh_data.cpu_no_qs.b.norm, false); |
| } |
| } |
| +#endif |
| |
| static DEFINE_PER_CPU(int, rcu_sched_qs_mask); |
| |
| @@ -472,6 +474,7 @@ unsigned long rcu_batches_completed_sched(void) |
| } |
| EXPORT_SYMBOL_GPL(rcu_batches_completed_sched); |
| |
| +#ifndef CONFIG_PREEMPT_RT_FULL |
| /* |
| * Return the number of RCU BH batches completed thus far for debug & stats. |
| */ |
| @@ -521,6 +524,13 @@ void rcu_bh_force_quiescent_state(void) |
| } |
| EXPORT_SYMBOL_GPL(rcu_bh_force_quiescent_state); |
| |
| +#else |
| +void rcu_force_quiescent_state(void) |
| +{ |
| +} |
| +EXPORT_SYMBOL_GPL(rcu_force_quiescent_state); |
| +#endif |
| + |
| /* |
| * Force a quiescent state for RCU-sched. |
| */ |
| @@ -3192,6 +3202,7 @@ void call_rcu_sched(struct rcu_head *head, rcu_callback_t func) |
| } |
| EXPORT_SYMBOL_GPL(call_rcu_sched); |
| |
| +#ifndef CONFIG_PREEMPT_RT_FULL |
| /* |
| * Queue an RCU callback for invocation after a quicker grace period. |
| */ |
| @@ -3200,6 +3211,7 @@ void call_rcu_bh(struct rcu_head *head, rcu_callback_t func) |
| __call_rcu(head, func, &rcu_bh_state, -1, 0); |
| } |
| EXPORT_SYMBOL_GPL(call_rcu_bh); |
| +#endif |
| |
| /* |
| * Queue an RCU callback for lazy invocation after a grace period. |
| @@ -3291,6 +3303,7 @@ void synchronize_sched(void) |
| } |
| EXPORT_SYMBOL_GPL(synchronize_sched); |
| |
| +#ifndef CONFIG_PREEMPT_RT_FULL |
| /** |
| * synchronize_rcu_bh - wait until an rcu_bh grace period has elapsed. |
| * |
| @@ -3317,6 +3330,7 @@ void synchronize_rcu_bh(void) |
| wait_rcu_gp(call_rcu_bh); |
| } |
| EXPORT_SYMBOL_GPL(synchronize_rcu_bh); |
| +#endif |
| |
| /** |
| * get_state_synchronize_rcu - Snapshot current RCU state |
| @@ -3695,6 +3709,7 @@ static void _rcu_barrier(struct rcu_state *rsp) |
| mutex_unlock(&rsp->barrier_mutex); |
| } |
| |
| +#ifndef CONFIG_PREEMPT_RT_FULL |
| /** |
| * rcu_barrier_bh - Wait until all in-flight call_rcu_bh() callbacks complete. |
| */ |
| @@ -3703,6 +3718,7 @@ void rcu_barrier_bh(void) |
| _rcu_barrier(&rcu_bh_state); |
| } |
| EXPORT_SYMBOL_GPL(rcu_barrier_bh); |
| +#endif |
| |
| /** |
| * rcu_barrier_sched - Wait for in-flight call_rcu_sched() callbacks. |
| diff --git a/kernel/rcu/update.c b/kernel/rcu/update.c |
| index f0d8322bc3ec..b40d3468ba4e 100644 |
| --- a/kernel/rcu/update.c |
| +++ b/kernel/rcu/update.c |
| @@ -295,6 +295,7 @@ int rcu_read_lock_held(void) |
| } |
| EXPORT_SYMBOL_GPL(rcu_read_lock_held); |
| |
| +#ifndef CONFIG_PREEMPT_RT_FULL |
| /** |
| * rcu_read_lock_bh_held() - might we be in RCU-bh read-side critical section? |
| * |
| @@ -321,6 +322,7 @@ int rcu_read_lock_bh_held(void) |
| return in_softirq() || irqs_disabled(); |
| } |
| EXPORT_SYMBOL_GPL(rcu_read_lock_bh_held); |
| +#endif |
| |
| #endif /* #ifdef CONFIG_DEBUG_LOCK_ALLOC */ |
| |
| -- |
| 2.5.0 |
| |