blob: 78064ac3f772d790c56e39ea30e1741381b2ab3d [file] [log] [blame]
From 9af4a9aa9787a44ac3bbd8df0b5ae047b757e3dc Mon Sep 17 00:00:00 2001
From: Thomas Gleixner <tglx@linutronix.de>
Date: Fri, 24 Jul 2009 10:22:02 +0200
Subject: [PATCH] sched: Debug missed preemption checks
commit 514e0e295511c6a4a54eb0228ccbb519162cc088 in tip.
Developers use preempt_enable_no_resched() in places where the code
calls schedule() immediately which is correct. But there are places
where preempt_enable_no_resched() is not followed by schedule().
Add debug infrastructre to find the offending code. The identified
correct users are converted to use __preempt_enable_no_resched().
For the ever repeating "preempt_enable_no_resched(); schedule();"
sequences a onvenience macro preempt_enable_and_schedule() is
introduced.
Based on a previous patch from Ingo Molnar <mingo@elte.hu>
[PG: kernel/spinlock.c doesn't have the patched lines in 34+]
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Signed-off-by: Paul Gortmaker <paul.gortmaker@windriver.com>
diff --git a/include/linux/preempt.h b/include/linux/preempt.h
index 2e681d9..5c7dba8 100644
--- a/include/linux/preempt.h
+++ b/include/linux/preempt.h
@@ -33,12 +33,24 @@ do { \
barrier(); \
} while (0)
-#define preempt_enable_no_resched() \
+#define __preempt_enable_no_resched() \
do { \
barrier(); \
dec_preempt_count(); \
} while (0)
+#ifdef CONFIG_DEBUG_PREEMPT
+extern void notrace preempt_enable_no_resched(void);
+#else
+# define preempt_enable_no_resched() __preempt_enable_no_resched()
+#endif
+
+#define preempt_enable_and_schedule() \
+do { \
+ __preempt_enable_no_resched(); \
+ schedule(); \
+} while (0)
+
#define preempt_check_resched() \
do { \
if (unlikely(test_thread_flag(TIF_NEED_RESCHED))) \
@@ -47,7 +59,7 @@ do { \
#define preempt_enable() \
do { \
- preempt_enable_no_resched(); \
+ __preempt_enable_no_resched(); \
barrier(); \
preempt_check_resched(); \
} while (0)
@@ -84,6 +96,8 @@ do { \
#define preempt_disable() do { } while (0)
#define preempt_enable_no_resched() do { } while (0)
+#define __preempt_enable_no_resched() do { } while (0)
+#define preempt_enable_and_schedule() schedule()
#define preempt_enable() do { } while (0)
#define preempt_check_resched() do { } while (0)
diff --git a/include/linux/spinlock_api_up.h b/include/linux/spinlock_api_up.h
index af1f472..d05112d 100644
--- a/include/linux/spinlock_api_up.h
+++ b/include/linux/spinlock_api_up.h
@@ -40,7 +40,7 @@
do { preempt_enable(); __release(lock); (void)(lock); } while (0)
#define __UNLOCK_BH(lock) \
- do { preempt_enable_no_resched(); local_bh_enable(); \
+ do { __preempt_enable_no_resched(); local_bh_enable(); \
__release(lock); (void)(lock); } while (0)
#define __UNLOCK_IRQ(lock) \
diff --git a/init/main.c b/init/main.c
index 4cb47a1..edabbff 100644
--- a/init/main.c
+++ b/init/main.c
@@ -424,8 +424,7 @@ static noinline void __init_refok rest_init(void)
* at least once to get things moving:
*/
init_idle_bootup_task(current);
- preempt_enable_no_resched();
- schedule();
+ preempt_enable_and_schedule();
preempt_disable();
/* Call into cpu_idle with preempt disabled */
diff --git a/kernel/mutex.c b/kernel/mutex.c
index 632f04c..90ed15f 100644
--- a/kernel/mutex.c
+++ b/kernel/mutex.c
@@ -249,8 +249,7 @@ __mutex_lock_common(struct mutex *lock, long state, unsigned int subclass,
/* didnt get the lock, go to sleep: */
spin_unlock_mutex(&lock->wait_lock, flags);
- preempt_enable_no_resched();
- schedule();
+ preempt_enable_and_schedule();
preempt_disable();
spin_lock_mutex(&lock->wait_lock, flags);
}
diff --git a/kernel/sched.c b/kernel/sched.c
index d6b9704..07bc2d2 100644
--- a/kernel/sched.c
+++ b/kernel/sched.c
@@ -5327,6 +5327,19 @@ notrace unsigned long get_parent_ip(unsigned long addr)
return addr;
}
+#ifdef CONFIG_DEBUG_PREEMPT
+void notrace preempt_enable_no_resched(void)
+{
+ barrier();
+ dec_preempt_count();
+
+ WARN_ONCE(!preempt_count(),
+ KERN_ERR "BUG: %s:%d task might have lost a preemption check!\n",
+ current->comm, current->pid);
+}
+EXPORT_SYMBOL(preempt_enable_no_resched);
+#endif
+
#if defined(CONFIG_PREEMPT) && (defined(CONFIG_DEBUG_PREEMPT) || \
defined(CONFIG_PREEMPT_TRACER))
@@ -5547,7 +5560,7 @@ need_resched_nonpreemptible:
goto need_resched_nonpreemptible;
}
- preempt_enable_no_resched();
+ __preempt_enable_no_resched();
if (need_resched())
goto need_resched;
}
@@ -6730,9 +6743,8 @@ SYSCALL_DEFINE0(sched_yield)
__release(rq->lock);
spin_release(&rq->lock.dep_map, 1, _THIS_IP_);
do_raw_spin_unlock(&rq->lock);
- preempt_enable_no_resched();
- schedule();
+ preempt_enable_and_schedule();
return 0;
}
diff --git a/kernel/signal.c b/kernel/signal.c
index 549df9d..b02ed64 100644
--- a/kernel/signal.c
+++ b/kernel/signal.c
@@ -1641,8 +1641,7 @@ static void ptrace_stop(int exit_code, int clear_code, siginfo_t *info)
*/
preempt_disable();
read_unlock(&tasklist_lock);
- preempt_enable_no_resched();
- schedule();
+ preempt_enable_and_schedule();
} else {
/*
* By the time we got the lock, our tracer went away.
diff --git a/kernel/softirq.c b/kernel/softirq.c
index 55cf435..19ef218 100644
--- a/kernel/softirq.c
+++ b/kernel/softirq.c
@@ -308,7 +308,7 @@ void irq_exit(void)
if (idle_cpu(smp_processor_id()) && !in_interrupt() && !need_resched())
tick_nohz_stop_sched_tick(0);
#endif
- preempt_enable_no_resched();
+ __preempt_enable_no_resched();
}
/*
@@ -699,8 +699,7 @@ static int run_ksoftirqd(void * __bind_cpu)
while (!kthread_should_stop()) {
preempt_disable();
if (!local_softirq_pending()) {
- preempt_enable_no_resched();
- schedule();
+ preempt_enable_and_schedule();
preempt_disable();
}
@@ -713,7 +712,7 @@ static int run_ksoftirqd(void * __bind_cpu)
if (cpu_is_offline((long)__bind_cpu))
goto wait_to_die;
do_softirq();
- preempt_enable_no_resched();
+ __preempt_enable_no_resched();
cond_resched();
preempt_disable();
rcu_sched_qs((long)__bind_cpu);
diff --git a/lib/kernel_lock.c b/lib/kernel_lock.c
index b135d04..5354922 100644
--- a/lib/kernel_lock.c
+++ b/lib/kernel_lock.c
@@ -53,7 +53,7 @@ int __lockfunc __reacquire_kernel_lock(void)
void __lockfunc __release_kernel_lock(void)
{
do_raw_spin_unlock(&kernel_flag);
- preempt_enable_no_resched();
+ __preempt_enable_no_resched();
}
/*
--
1.7.1.1