blob: 046089d84dfaaf3320c3435d2f36ecce22ecd01e [file] [log] [blame]
From 89f3f6e92b72f07991b7f1e716ea3f97ccd5b2ba Mon Sep 17 00:00:00 2001
From: Gregory Haskins <ghaskins@novell.com>
Date: Fri, 3 Jul 2009 08:44:21 -0500
Subject: [PATCH] sched: make task->oncpu available in all configurations
commit 529d35d4c98a136eb811607fe418df5781395db6 in tip.
We will use this later in the series to eliminate the need for a function
call.
[ Steven Rostedt: added task_is_current function ]
Signed-off-by: Gregory Haskins <ghaskins@novell.com>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Signed-off-by: Paul Gortmaker <paul.gortmaker@windriver.com>
diff --git a/include/linux/sched.h b/include/linux/sched.h
index f342f61..f743779 100644
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -1287,10 +1287,8 @@ struct task_struct {
int lock_depth; /* BKL lock depth */
#ifdef CONFIG_SMP
-#ifdef __ARCH_WANT_UNLOCKED_CTXSW
int oncpu;
#endif
-#endif
int prio, static_prio, normal_prio;
unsigned int rt_priority;
@@ -2084,6 +2082,13 @@ extern void set_curr_task(int cpu, struct task_struct *p);
void yield(void);
void __yield(void);
+#ifdef CONFIG_SMP
+static inline int task_is_current(struct task_struct *task)
+{
+ return task->oncpu;
+}
+#endif
+
/*
* The default (Linux) execution domain.
*/
@@ -2742,8 +2747,6 @@ static inline unsigned long rlimit_max(unsigned int limit)
return task_rlimit_max(current, limit);
}
-extern int task_is_current(struct task_struct *task);
-
#endif /* __KERNEL__ */
#endif
diff --git a/kernel/sched.c b/kernel/sched.c
index c486ea2..9d1c4cf 100644
--- a/kernel/sched.c
+++ b/kernel/sched.c
@@ -697,10 +697,12 @@ inline void update_rq_clock(struct rq *rq)
rq->clock = sched_clock_cpu(cpu_of(rq));
}
+#ifndef CONFIG_SMP
int task_is_current(struct task_struct *task)
{
return task_rq(task)->curr == task;
}
+#endif
/*
* Tunables that become constants when CONFIG_SCHED_DEBUG is off:
@@ -918,18 +920,39 @@ static inline int task_current(struct rq *rq, struct task_struct *p)
return rq->curr == p;
}
-#ifndef __ARCH_WANT_UNLOCKED_CTXSW
static inline int task_running(struct rq *rq, struct task_struct *p)
{
+#ifdef CONFIG_SMP
+ return p->oncpu;
+#else
return task_current(rq, p);
+#endif
}
+#ifndef __ARCH_WANT_UNLOCKED_CTXSW
static inline void prepare_lock_switch(struct rq *rq, struct task_struct *next)
{
+#ifdef CONFIG_SMP
+ /*
+ * We can optimise this out completely for !SMP, because the
+ * SMP rebalancing from interrupt is the only thing that cares
+ * here.
+ */
+ next->oncpu = 1;
+#endif
}
static inline void finish_lock_switch(struct rq *rq, struct task_struct *prev)
{
+#ifdef CONFIG_SMP
+ /*
+ * After ->oncpu is cleared, the task can be moved to a different CPU.
+ * We must ensure this doesn't happen until the switch is completely
+ * finished.
+ */
+ smp_wmb();
+ prev->oncpu = 0;
+#endif
#ifdef CONFIG_DEBUG_SPINLOCK
/* this is a valid case when another task releases the spinlock */
rq->lock.owner = current;
@@ -945,14 +968,6 @@ static inline void finish_lock_switch(struct rq *rq, struct task_struct *prev)
}
#else /* __ARCH_WANT_UNLOCKED_CTXSW */
-static inline int task_running(struct rq *rq, struct task_struct *p)
-{
-#ifdef CONFIG_SMP
- return p->oncpu;
-#else
- return task_current(rq, p);
-#endif
-}
static inline void prepare_lock_switch(struct rq *rq, struct task_struct *next)
{
@@ -2751,7 +2766,7 @@ void sched_fork(struct task_struct *p, int clone_flags)
if (likely(sched_info_on()))
memset(&p->sched_info, 0, sizeof(p->sched_info));
#endif
-#if defined(CONFIG_SMP) && defined(__ARCH_WANT_UNLOCKED_CTXSW)
+#if defined(CONFIG_SMP)
p->oncpu = 0;
#endif
#ifdef CONFIG_PREEMPT
@@ -7380,7 +7395,7 @@ void __cpuinit init_idle(struct task_struct *idle, int cpu)
__set_task_cpu(idle, cpu);
rq->curr = rq->idle = idle;
-#if defined(CONFIG_SMP) && defined(__ARCH_WANT_UNLOCKED_CTXSW)
+#if defined(CONFIG_SMP)
idle->oncpu = 1;
#endif
raw_spin_unlock_irqrestore(&rq->lock, flags);
--
1.7.1.1