blob: 1696f292682c81e860edf8e50af7b0835a5b54ec [file] [log] [blame]
From ba404fe58481603a411847fc718eb3366ccd4fc6 Mon Sep 17 00:00:00 2001
From: Ingo Molnar <mingo@elte.hu>
Date: Fri, 3 Jul 2009 08:29:51 -0500
Subject: [PATCH] percpu: add percpu locked infrastructure
commit 9566018270e0c6cee0388e1a577ae8b6f15f4e8d in tip.
RT needs per cpu data structures protected by per cpu locks instead of
disabling preemption. Add the infrastructure for per cpu locked data.
[PG: fold in a couple small deltas from the big 33rt merge commit,
also note the removal of per_cpu_## in dd17c8f72993f9461e9c19250]
Signed-off-by: Ingo Molnar <mingo@elte.hu>
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Signed-off-by: Paul Gortmaker <paul.gortmaker@windriver.com>
diff --git a/include/asm-generic/percpu.h b/include/asm-generic/percpu.h
index 04f91c2..d1d3488 100644
--- a/include/asm-generic/percpu.h
+++ b/include/asm-generic/percpu.h
@@ -5,6 +5,9 @@
#include <linux/threads.h>
#include <linux/percpu-defs.h>
+#define __per_cpu_var_lock(var) per_cpu__lock_##var##_locked
+#define __per_cpu_var_lock_var(var) per_cpu__##var##_locked
+
#ifdef CONFIG_SMP
/*
@@ -60,10 +63,18 @@ extern unsigned long __per_cpu_offset[NR_CPUS];
#define __raw_get_cpu_var(var) \
(*SHIFT_PERCPU_PTR(&(var), __my_cpu_offset))
+#define per_cpu_lock(var, cpu) \
+ (*SHIFT_PERCPU_PTR(&__per_cpu_var_lock(var), per_cpu_offset(cpu)))
+#define per_cpu_var_locked(var, cpu) \
+ (*SHIFT_PERCPU_PTR(&__per_cpu_var_lock_var(var), per_cpu_offset(cpu)))
+#define __get_cpu_lock(var, cpu) \
+ per_cpu_lock(var, cpu)
+#define __get_cpu_var_locked(var, cpu) \
+ per_cpu_var_locked(var, cpu)
+
#define this_cpu_ptr(ptr) SHIFT_PERCPU_PTR(ptr, my_cpu_offset)
#define __this_cpu_ptr(ptr) SHIFT_PERCPU_PTR(ptr, __my_cpu_offset)
-
#ifdef CONFIG_HAVE_SETUP_PER_CPU_AREA
extern void setup_per_cpu_areas(void);
#endif
@@ -71,6 +82,10 @@ extern void setup_per_cpu_areas(void);
#else /* ! SMP */
#define per_cpu(var, cpu) (*((void)(cpu), &(var)))
+#define per_cpu_var_locked(var, cpu) \
+ (*((void)(cpu), &__per_cpu_var_lock_var(var)))
+#define __get_cpu_lock(var, cpu) __per_cpu_var_lock(var)
+#define __get_cpu_var_locked(var, cpu) __per_cpu_var_lock_var(var)
#define __get_cpu_var(var) (var)
#define __raw_get_cpu_var(var) (var)
#define this_cpu_ptr(ptr) per_cpu_ptr(ptr, 0)
diff --git a/include/linux/percpu-defs.h b/include/linux/percpu-defs.h
index 68567c0..2187c24 100644
--- a/include/linux/percpu-defs.h
+++ b/include/linux/percpu-defs.h
@@ -91,6 +91,22 @@
DEFINE_PER_CPU_SECTION(type, name, "")
/*
+ * next three added for RT patch
+ * (wonder if we need corresponding DECLARE_*'s?) (clrkwllms)
+ */
+#define DEFINE_PER_CPU_SPINLOCK(name, sec) \
+ __PCPU_ATTRS(sec) PER_CPU_DEF_ATTRIBUTES \
+ __DEFINE_SPINLOCK(per_cpu__lock_##name##_locked)
+
+#define DECLARE_PER_CPU_LOCKED(type, name) \
+ extern PER_CPU_ATTRIBUTES spinlock_t __per_cpu_var_lock(name); \
+ extern PER_CPU_ATTRIBUTES __typeof__(type) __per_cpu_var_lock_var(name)
+
+#define DEFINE_PER_CPU_LOCKED(type, name) \
+ DEFINE_PER_CPU_SPINLOCK(name, ""); \
+ DEFINE_PER_CPU_SECTION(type, name##_locked, "")
+
+/*
* Declaration/definition used for per-CPU variables that must come first in
* the set of variables.
*/
@@ -145,10 +161,14 @@
*/
#ifndef __CHECKER__
#define EXPORT_PER_CPU_SYMBOL(var) EXPORT_SYMBOL(var)
+#define EXPORT_PER_CPU_LOCKED_SYMBOL(var) EXPORT_SYMBOL(var##_locked)
#define EXPORT_PER_CPU_SYMBOL_GPL(var) EXPORT_SYMBOL_GPL(var)
+#define EXPORT_PER_CPU_LOCKED_SYMBOL_GPL(var) EXPORT_SYMBOL_GPL(var##_locked)
#else
#define EXPORT_PER_CPU_SYMBOL(var)
+#define EXPORT_PER_CPU_LOCKED_SYMBOL(var##_locked)
#define EXPORT_PER_CPU_SYMBOL_GPL(var)
+#define EXPORT_PER_CPU_LOCKED_SYMBOL_GPL(var##_locked)
#endif
#endif /* _LINUX_PERCPU_DEFS_H */
diff --git a/include/linux/percpu.h b/include/linux/percpu.h
index a93e5bf..35f9a30 100644
--- a/include/linux/percpu.h
+++ b/include/linux/percpu.h
@@ -39,6 +39,29 @@
preempt_enable(); \
} while (0)
+/*
+ * Per-CPU data structures with an additional lock - useful for
+ * PREEMPT_RT code that wants to reschedule but also wants per-CPU
+ * data structures.
+ *
+ * 'cpu' gets updated with the CPU the task is currently executing on.
+ *
+ * NOTE: on normal !PREEMPT_RT kernels these per-CPU variables are the
+ * same as the normal per-CPU variables, so there is no runtime
+ * overhead.
+ */
+#define get_cpu_var_locked(var, cpuptr) \
+(*({ \
+ int __cpu = raw_smp_processor_id(); \
+ \
+ *(cpuptr) = __cpu; \
+ spin_lock(&__get_cpu_lock(var, __cpu)); \
+ &__get_cpu_var_locked(var, __cpu); \
+}))
+
+#define put_cpu_var_locked(var, cpu) \
+ do { (void)cpu; spin_unlock(&__get_cpu_lock(var, cpu)); } while (0)
+
#ifdef CONFIG_SMP
/* minimum unit size, also is the maximum supported allocation size */
--
1.7.1.1