| From 4f99beb761417b0d897b48d7eb39ad7f3131c10c Mon Sep 17 00:00:00 2001 |
| From: Thomas Gleixner <tglx@linutronix.de> |
| Date: Fri, 3 Jul 2009 13:16:38 -0500 |
| Subject: [PATCH 158/274] softirq: Sanitize softirq pending for NOHZ/RT |
| |
| Signed-off-by: Thomas Gleixner <tglx@linutronix.de> |
| --- |
| include/linux/interrupt.h | 2 ++ |
| kernel/softirq.c | 61 +++++++++++++++++++++++++++++++++++++++++++++ |
| kernel/time/tick-sched.c | 8 +----- |
| 3 files changed, 64 insertions(+), 7 deletions(-) |
| |
| diff --git a/include/linux/interrupt.h b/include/linux/interrupt.h |
| index 9027bde..d3b1727 100644 |
| --- a/include/linux/interrupt.h |
| +++ b/include/linux/interrupt.h |
| @@ -464,6 +464,8 @@ extern void __raise_softirq_irqoff(unsigned int nr); |
| extern void raise_softirq_irqoff(unsigned int nr); |
| extern void raise_softirq(unsigned int nr); |
| |
| +extern void softirq_check_pending_idle(void); |
| + |
| /* This is the worklist that queues up per-cpu softirq work. |
| * |
| * send_remote_sendirq() adds work to these lists, and |
| diff --git a/kernel/softirq.c b/kernel/softirq.c |
| index 6ec3a78..6f84f09 100644 |
| --- a/kernel/softirq.c |
| +++ b/kernel/softirq.c |
| @@ -61,6 +61,67 @@ char *softirq_to_name[NR_SOFTIRQS] = { |
| "TASKLET", "SCHED", "HRTIMER", "RCU" |
| }; |
| |
| +#ifdef CONFIG_NO_HZ |
| +# ifdef CONFIG_PREEMPT_RT_FULL |
| +/* |
| + * On preempt-rt a softirq might be blocked on a lock. There might be |
| + * no other runnable task on this CPU because the lock owner runs on |
| + * some other CPU. So we have to go into idle with the pending bit |
| + * set. Therefor we need to check this otherwise we warn about false |
| + * positives which confuses users and defeats the whole purpose of |
| + * this test. |
| + * |
| + * This code is called with interrupts disabled. |
| + */ |
| +void softirq_check_pending_idle(void) |
| +{ |
| + static int rate_limit; |
| + u32 warnpending = 0, pending = local_softirq_pending(); |
| + |
| + if (rate_limit >= 10) |
| + return; |
| + |
| + if (pending) { |
| + struct task_struct *tsk; |
| + |
| + tsk = __get_cpu_var(ksoftirqd); |
| + /* |
| + * The wakeup code in rtmutex.c wakes up the task |
| + * _before_ it sets pi_blocked_on to NULL under |
| + * tsk->pi_lock. So we need to check for both: state |
| + * and pi_blocked_on. |
| + */ |
| + raw_spin_lock(&tsk->pi_lock); |
| + |
| + if (!tsk->pi_blocked_on && !(tsk->state == TASK_RUNNING)) |
| + warnpending = 1; |
| + |
| + raw_spin_unlock(&tsk->pi_lock); |
| + } |
| + |
| + if (warnpending) { |
| + printk(KERN_ERR "NOHZ: local_softirq_pending %02x\n", |
| + pending); |
| + rate_limit++; |
| + } |
| +} |
| +# else |
| +/* |
| + * On !PREEMPT_RT we just printk rate limited: |
| + */ |
| +void softirq_check_pending_idle(void) |
| +{ |
| + static int rate_limit; |
| + |
| + if (rate_limit < 10) { |
| + printk(KERN_ERR "NOHZ: local_softirq_pending %02x\n", |
| + local_softirq_pending()); |
| + rate_limit++; |
| + } |
| +} |
| +# endif |
| +#endif |
| + |
| /* |
| * we cannot loop indefinitely here to avoid userspace starvation, |
| * but we also don't want to introduce a worst case 1/HZ latency |
| diff --git a/kernel/time/tick-sched.c b/kernel/time/tick-sched.c |
| index ad58d13..50d74fe 100644 |
| --- a/kernel/time/tick-sched.c |
| +++ b/kernel/time/tick-sched.c |
| @@ -308,13 +308,7 @@ static void tick_nohz_stop_sched_tick(struct tick_sched *ts) |
| return; |
| |
| if (unlikely(local_softirq_pending() && cpu_online(cpu))) { |
| - static int ratelimit; |
| - |
| - if (ratelimit < 10) { |
| - printk(KERN_ERR "NOHZ: local_softirq_pending %02x\n", |
| - (unsigned int) local_softirq_pending()); |
| - ratelimit++; |
| - } |
| + softirq_check_pending_idle(); |
| return; |
| } |
| |
| -- |
| 1.7.10.4 |
| |