blob: f1599ff8109df09aebdcf1351af40ab5481f7dda [file] [log] [blame]
Subject: softirq: Adapt NOHZ softirq pending check to new RT scheme
From: Thomas Gleixner <tglx@linutronix.de>
Date: Sun, 28 Oct 2012 13:46:16 +0000
We can't rely on ksoftirqd anymore and we need to check the tasks
which run a particular softirq and if such a task is pi blocked ignore
the other pending bits of that task as well.
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
---
kernel/softirq.c | 83 ++++++++++++++++++++++++++++++++++++++-----------------
1 file changed, 58 insertions(+), 25 deletions(-)
--- a/kernel/softirq.c
+++ b/kernel/softirq.c
@@ -66,46 +66,71 @@ char *softirq_to_name[NR_SOFTIRQS] = {
#ifdef CONFIG_NO_HZ
# ifdef CONFIG_PREEMPT_RT_FULL
+
+struct softirq_runner {
+ struct task_struct *runner[NR_SOFTIRQS];
+};
+
+static DEFINE_PER_CPU(struct softirq_runner, softirq_runners);
+
+static inline void softirq_set_runner(unsigned int sirq)
+{
+ struct softirq_runner *sr = &__get_cpu_var(softirq_runners);
+
+ sr->runner[sirq] = current;
+}
+
+static inline void softirq_clr_runner(unsigned int sirq)
+{
+ struct softirq_runner *sr = &__get_cpu_var(softirq_runners);
+
+ sr->runner[sirq] = NULL;
+}
+
/*
- * On preempt-rt a softirq might be blocked on a lock. There might be
- * no other runnable task on this CPU because the lock owner runs on
- * some other CPU. So we have to go into idle with the pending bit
- * set. Therefor we need to check this otherwise we warn about false
- * positives which confuses users and defeats the whole purpose of
- * this test.
+ * On preempt-rt a softirq running context might be blocked on a
+ * lock. There might be no other runnable task on this CPU because the
+ * lock owner runs on some other CPU. So we have to go into idle with
+ * the pending bit set. Therefor we need to check this otherwise we
+ * warn about false positives which confuses users and defeats the
+ * whole purpose of this test.
*
* This code is called with interrupts disabled.
*/
void softirq_check_pending_idle(void)
{
static int rate_limit;
- u32 warnpending = 0, pending;
+ struct softirq_runner *sr = &__get_cpu_var(softirq_runners);
+ u32 warnpending;
+ int i;
if (rate_limit >= 10)
return;
- pending = local_softirq_pending() & SOFTIRQ_STOP_IDLE_MASK;
- if (pending) {
- struct task_struct *tsk;
+ warnpending = local_softirq_pending() & SOFTIRQ_STOP_IDLE_MASK;
+ for (i = 0; i < NR_SOFTIRQS; i++) {
+ struct task_struct *tsk = sr->runner[i];
- tsk = __get_cpu_var(ksoftirqd);
/*
* The wakeup code in rtmutex.c wakes up the task
* _before_ it sets pi_blocked_on to NULL under
* tsk->pi_lock. So we need to check for both: state
* and pi_blocked_on.
*/
- raw_spin_lock(&tsk->pi_lock);
-
- if (!tsk->pi_blocked_on && !(tsk->state == TASK_RUNNING))
- warnpending = 1;
-
- raw_spin_unlock(&tsk->pi_lock);
+ if (tsk) {
+ raw_spin_lock(&tsk->pi_lock);
+ if (tsk->pi_blocked_on || tsk->state == TASK_RUNNING) {
+ /* Clear all bits pending in that task */
+ warnpending &= ~(tsk->softirqs_raised);
+ warnpending &= ~(1 << i);
+ }
+ raw_spin_unlock(&tsk->pi_lock);
+ }
}
if (warnpending) {
printk(KERN_ERR "NOHZ: local_softirq_pending %02x\n",
- pending);
+ warnpending);
rate_limit++;
}
}
@@ -125,6 +150,10 @@ void softirq_check_pending_idle(void)
}
}
# endif
+
+#else /* !NO_HZ */
+static inline void softirq_set_runner(unsigned int sirq) { }
+static inline void softirq_clr_runner(unsigned int sirq) { }
#endif
/*
@@ -478,6 +507,7 @@ static void do_current_softirqs(int need
*/
lock_softirq(i);
local_irq_disable();
+ softirq_set_runner(i);
/*
* Check with the local_softirq_pending() bits,
* whether we need to process this still or if someone
@@ -488,6 +518,7 @@ static void do_current_softirqs(int need
set_softirq_pending(pending & ~mask);
do_single_softirq(i, need_rcu_bh_qs);
}
+ softirq_clr_runner(i);
unlock_softirq(i);
WARN_ON(current->softirq_nestcnt != 1);
}
@@ -558,7 +589,7 @@ void thread_do_softirq(void)
}
}
-void __raise_softirq_irqoff(unsigned int nr)
+static void do_raise_softirq_irqoff(unsigned int nr)
{
trace_softirq_raise(nr);
or_softirq_pending(1UL << nr);
@@ -575,12 +606,19 @@ void __raise_softirq_irqoff(unsigned int
__this_cpu_read(ksoftirqd)->softirqs_raised |= (1U << nr);
}
+void __raise_softirq_irqoff(unsigned int nr)
+{
+ do_raise_softirq_irqoff(nr);
+ if (!in_irq() && !current->softirq_nestcnt)
+ wakeup_softirqd();
+}
+
/*
* This function must run with irqs disabled!
*/
void raise_softirq_irqoff(unsigned int nr)
{
- __raise_softirq_irqoff(nr);
+ do_raise_softirq_irqoff(nr);
/*
* If we're in an hard interrupt we let irq return code deal
@@ -602,11 +640,6 @@ void raise_softirq_irqoff(unsigned int n
wakeup_softirqd();
}
-void do_raise_softirq_irqoff(unsigned int nr)
-{
- raise_softirq_irqoff(nr);
-}
-
static inline int ksoftirqd_softirq_pending(void)
{
return current->softirqs_raised;