blob: 8a4a774f3b3540e7d92c7ca76d59d7a0e719186d [file] [log] [blame]
From 6317c3b794e0e24a71a33a22033ea2cc17c07858 Mon Sep 17 00:00:00 2001
From: Thomas Gleixner <tglx@linutronix.de>
Date: Sun, 26 Jul 2009 15:35:40 +0200
Subject: [PATCH] rw_semaphores: Add rw_anon_semaphores
commit 7715da0b24745db15637f9d380f65386e3d9478b in tip.
Most of the rw_semaphores have struct owner semantics. i_alloc_sem and
two xfs ones are used as both semaphores and completions. That makes
it hard to convert for preempt-rt. Add an rw_anon_semaphores type
which is not converted on -rt.
[PG: fold addition of anon to rwsem_is_locked() from merge commit]
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Signed-off-by: Paul Gortmaker <paul.gortmaker@windriver.com>
diff --git a/arch/alpha/include/asm/rwsem.h b/arch/alpha/include/asm/rwsem.h
index 1570c0b..55f4f13 100644
--- a/arch/alpha/include/asm/rwsem.h
+++ b/arch/alpha/include/asm/rwsem.h
@@ -18,15 +18,18 @@
struct rwsem_waiter;
-extern struct rw_semaphore *rwsem_down_read_failed(struct rw_semaphore *sem);
-extern struct rw_semaphore *rwsem_down_write_failed(struct rw_semaphore *sem);
-extern struct rw_semaphore *rwsem_wake(struct rw_semaphore *);
-extern struct rw_semaphore *rwsem_downgrade_wake(struct rw_semaphore *sem);
+extern struct rw_anon_semaphore *
+rwsem_down_read_failed(struct rw_anon_semaphore *sem);
+extern struct rw_anon_semaphore *
+rwsem_down_write_failed(struct rw_anon_semaphore *sem);
+extern struct rw_anon_semaphore *rwsem_wake(struct rw_anon_semaphore *);
+extern struct rw_anon_semaphore *
+rwsem_downgrade_wake(struct rw_anon_semaphore *sem);
/*
* the semaphore definition
*/
-struct rw_semaphore {
+struct rw_anon_semaphore {
long count;
#define RWSEM_UNLOCKED_VALUE 0x0000000000000000L
#define RWSEM_ACTIVE_BIAS 0x0000000000000001L
@@ -38,6 +41,31 @@ struct rw_semaphore {
struct list_head wait_list;
};
+#define __RWSEM_ANON_INITIALIZER(name) \
+ { RWSEM_UNLOCKED_VALUE, SPIN_LOCK_UNLOCKED, \
+ LIST_HEAD_INIT((name).wait_list) }
+
+#define DECLARE_ANON_RWSEM(name) \
+ struct rw_anon_semaphore name = __RWSEM_ANON_INITIALIZER(name)
+
+static inline void init_anon_rwsem(struct rw_anon_semaphore *sem)
+{
+ sem->count = RWSEM_UNLOCKED_VALUE;
+ spin_lock_init(&sem->wait_lock);
+ INIT_LIST_HEAD(&sem->wait_list);
+}
+
+static inline int anon_rwsem_is_locked(struct rw_anon_semaphore *sem)
+{
+ return (sem->count != 0);
+}
+
+struct rw_semaphore {
+ long count;
+ spinlock_t wait_lock;
+ struct list_head wait_list;
+};
+
#define __RWSEM_INITIALIZER(name) \
{ RWSEM_UNLOCKED_VALUE, SPIN_LOCK_UNLOCKED, \
LIST_HEAD_INIT((name).wait_list) }
@@ -47,12 +75,15 @@ struct rw_semaphore {
static inline void init_rwsem(struct rw_semaphore *sem)
{
- sem->count = RWSEM_UNLOCKED_VALUE;
- spin_lock_init(&sem->wait_lock);
- INIT_LIST_HEAD(&sem->wait_list);
+ init_anon_rwsem((struct rw_anon_semaphore *)sem);
}
-static inline void __down_read(struct rw_semaphore *sem)
+static inline int rwsem_is_locked(struct rw_semaphore *sem)
+{
+ return (sem->count != 0);
+}
+
+static inline void __down_read(struct rw_anon_semaphore *sem)
{
long oldcount;
#ifndef CONFIG_SMP
@@ -79,7 +110,7 @@ static inline void __down_read(struct rw_semaphore *sem)
/*
* trylock for reading -- returns 1 if successful, 0 if contention
*/
-static inline int __down_read_trylock(struct rw_semaphore *sem)
+static inline int __down_read_trylock(struct rw_anon_semaphore *sem)
{
long old, new, res;
@@ -94,7 +125,7 @@ static inline int __down_read_trylock(struct rw_semaphore *sem)
return res >= 0 ? 1 : 0;
}
-static inline void __down_write(struct rw_semaphore *sem)
+static inline void __down_write(struct rw_anon_semaphore *sem)
{
long oldcount;
#ifndef CONFIG_SMP
@@ -121,7 +152,7 @@ static inline void __down_write(struct rw_semaphore *sem)
/*
* trylock for writing -- returns 1 if successful, 0 if contention
*/
-static inline int __down_write_trylock(struct rw_semaphore *sem)
+static inline int __down_write_trylock(struct rw_anon_semaphore *sem)
{
long ret = cmpxchg(&sem->count, RWSEM_UNLOCKED_VALUE,
RWSEM_ACTIVE_WRITE_BIAS);
@@ -130,7 +161,7 @@ static inline int __down_write_trylock(struct rw_semaphore *sem)
return 0;
}
-static inline void __up_read(struct rw_semaphore *sem)
+static inline void __up_read(struct rw_anon_semaphore *sem)
{
long oldcount;
#ifndef CONFIG_SMP
@@ -155,7 +186,7 @@ static inline void __up_read(struct rw_semaphore *sem)
rwsem_wake(sem);
}
-static inline void __up_write(struct rw_semaphore *sem)
+static inline void __up_write(struct rw_anon_semaphore *sem)
{
long count;
#ifndef CONFIG_SMP
@@ -184,7 +215,7 @@ static inline void __up_write(struct rw_semaphore *sem)
/*
* downgrade write lock to read lock
*/
-static inline void __downgrade_write(struct rw_semaphore *sem)
+static inline void __downgrade_write(struct rw_anon_semaphore *sem)
{
long oldcount;
#ifndef CONFIG_SMP
@@ -208,7 +239,7 @@ static inline void __downgrade_write(struct rw_semaphore *sem)
rwsem_downgrade_wake(sem);
}
-static inline void rwsem_atomic_add(long val, struct rw_semaphore *sem)
+static inline void rwsem_atomic_add(long val, struct rw_anon_semaphore *sem)
{
#ifndef CONFIG_SMP
sem->count += val;
@@ -227,7 +258,7 @@ static inline void rwsem_atomic_add(long val, struct rw_semaphore *sem)
#endif
}
-static inline long rwsem_atomic_update(long val, struct rw_semaphore *sem)
+static inline long rwsem_atomic_update(long val, struct rw_anon_semaphore *sem)
{
#ifndef CONFIG_SMP
sem->count += val;
@@ -250,10 +281,5 @@ static inline long rwsem_atomic_update(long val, struct rw_semaphore *sem)
#endif
}
-static inline int rwsem_is_locked(struct rw_semaphore *sem)
-{
- return (sem->count != 0);
-}
-
#endif /* __KERNEL__ */
#endif /* _ALPHA_RWSEM_H */
diff --git a/arch/ia64/include/asm/rwsem.h b/arch/ia64/include/asm/rwsem.h
index e876268..a48df05 100644
--- a/arch/ia64/include/asm/rwsem.h
+++ b/arch/ia64/include/asm/rwsem.h
@@ -33,7 +33,7 @@
/*
* the semaphore definition
*/
-struct rw_semaphore {
+struct rw_anon_semaphore {
signed long count;
spinlock_t wait_lock;
struct list_head wait_list;
@@ -51,26 +51,47 @@ struct rw_semaphore {
LIST_HEAD_INIT((name).wait_list) }
#define DECLARE_RWSEM(name) \
- struct rw_semaphore name = __RWSEM_INITIALIZER(name)
-
-extern struct rw_semaphore *rwsem_down_read_failed(struct rw_semaphore *sem);
-extern struct rw_semaphore *rwsem_down_write_failed(struct rw_semaphore *sem);
-extern struct rw_semaphore *rwsem_wake(struct rw_semaphore *sem);
-extern struct rw_semaphore *rwsem_downgrade_wake(struct rw_semaphore *sem);
-
-static inline void
-init_rwsem (struct rw_semaphore *sem)
+ struct rw_anon_semaphore name = __RWSEM_INITIALIZER(name)
+
+extern struct rw_anon_semaphore *
+rwsem_down_read_failed(struct rw_anon_semaphore *sem);
+extern struct rw_anon_semaphore *
+rwsem_down_write_failed(struct rw_anon_semaphore *sem);
+extern struct rw_anon_semaphore *
+rwsem_wake(struct rw_anon_semaphore *sem);
+extern struct rw_anon_semaphore *
+rwsem_downgrade_wake(struct rw_anon_semaphore *sem);
+
+static inline void init_anon_rwsem (struct rw_anon_semaphore *sem)
{
sem->count = RWSEM_UNLOCKED_VALUE;
spin_lock_init(&sem->wait_lock);
INIT_LIST_HEAD(&sem->wait_list);
}
+struct rw_anon_semaphore {
+ signed long count;
+ spinlock_t wait_lock;
+ struct list_head wait_list;
+};
+
+#define __RWSEM_INITIALIZER(name) \
+ { RWSEM_UNLOCKED_VALUE, SPIN_LOCK_UNLOCKED, \
+ LIST_HEAD_INIT((name).wait_list) }
+
+#define DECLARE_RWSEM(name) \
+ struct rw_semaphore name = __RWSEM_INITIALIZER(name)
+
+static inline void init_rwsem(struct rw_semaphore *sem)
+{
+ init_anon_rwsem((struct rw_anon_semaphore *)sem);
+}
+
/*
* lock for reading
*/
static inline void
-__down_read (struct rw_semaphore *sem)
+__down_read (struct rw_anon_semaphore *sem)
{
long result = ia64_fetchadd8_acq((unsigned long *)&sem->count, 1);
@@ -82,7 +103,7 @@ __down_read (struct rw_semaphore *sem)
* lock for writing
*/
static inline void
-__down_write (struct rw_semaphore *sem)
+__down_write (struct rw_anon_semaphore *sem)
{
long old, new;
@@ -99,7 +120,7 @@ __down_write (struct rw_semaphore *sem)
* unlock after reading
*/
static inline void
-__up_read (struct rw_semaphore *sem)
+__up_read (struct rw_anon_semaphore *sem)
{
long result = ia64_fetchadd8_rel((unsigned long *)&sem->count, -1);
@@ -111,7 +132,7 @@ __up_read (struct rw_semaphore *sem)
* unlock after writing
*/
static inline void
-__up_write (struct rw_semaphore *sem)
+__up_write (struct rw_anon_semaphore *sem)
{
long old, new;
@@ -128,7 +149,7 @@ __up_write (struct rw_semaphore *sem)
* trylock for reading -- returns 1 if successful, 0 if contention
*/
static inline int
-__down_read_trylock (struct rw_semaphore *sem)
+__down_read_trylock (struct rw_anon_semaphore *sem)
{
long tmp;
while ((tmp = sem->count) >= 0) {
@@ -143,7 +164,7 @@ __down_read_trylock (struct rw_semaphore *sem)
* trylock for writing -- returns 1 if successful, 0 if contention
*/
static inline int
-__down_write_trylock (struct rw_semaphore *sem)
+__down_write_trylock (struct rw_anon_semaphore *sem)
{
long tmp = cmpxchg_acq(&sem->count, RWSEM_UNLOCKED_VALUE,
RWSEM_ACTIVE_WRITE_BIAS);
@@ -154,7 +175,7 @@ __down_write_trylock (struct rw_semaphore *sem)
* downgrade write lock to read lock
*/
static inline void
-__downgrade_write (struct rw_semaphore *sem)
+__downgrade_write (struct rw_anon_semaphore *sem)
{
long old, new;
@@ -174,6 +195,11 @@ __downgrade_write (struct rw_semaphore *sem)
#define rwsem_atomic_add(delta, sem) atomic64_add(delta, (atomic64_t *)(&(sem)->count))
#define rwsem_atomic_update(delta, sem) atomic64_add_return(delta, (atomic64_t *)(&(sem)->count))
+static inline int anon_rwsem_is_locked(struct rw_anon_semaphore *sem)
+{
+ return (sem->count != 0);
+}
+
static inline int rwsem_is_locked(struct rw_semaphore *sem)
{
return (sem->count != 0);
diff --git a/arch/powerpc/include/asm/rwsem.h b/arch/powerpc/include/asm/rwsem.h
index 24cd928..564c373 100644
--- a/arch/powerpc/include/asm/rwsem.h
+++ b/arch/powerpc/include/asm/rwsem.h
@@ -21,7 +21,7 @@
/*
* the semaphore definition
*/
-struct rw_semaphore {
+struct rw_anon_semaphore {
/* XXX this should be able to be an atomic_t -- paulus */
signed int count;
#define RWSEM_UNLOCKED_VALUE 0x00000000
@@ -38,43 +38,47 @@ struct rw_semaphore {
};
#ifdef CONFIG_DEBUG_LOCK_ALLOC
-# define __RWSEM_DEP_MAP_INIT(lockname) , .dep_map = { .name = #lockname }
+# define __RWSEM_ANON_DEP_MAP_INIT(lockname) , .dep_map = { .name = #lockname }
#else
-# define __RWSEM_DEP_MAP_INIT(lockname)
+# define __RWSEM_ANON_DEP_MAP_INIT(lockname)
#endif
-#define __RWSEM_INITIALIZER(name) \
+#define __RWSEM_ANON_INITIALIZER(name) \
{ RWSEM_UNLOCKED_VALUE, __SPIN_LOCK_UNLOCKED((name).wait_lock), \
- LIST_HEAD_INIT((name).wait_list) __RWSEM_DEP_MAP_INIT(name) }
+ LIST_HEAD_INIT((name).wait_list) __RWSEM_ANON_DEP_MAP_INIT(name) }
-#define DECLARE_RWSEM(name) \
- struct rw_semaphore name = __RWSEM_INITIALIZER(name)
+#define DECLARE_ANON_RWSEM(name) \
+ struct rw_anon_semaphore name = __RWSEM_ANON_INITIALIZER(name)
-extern struct rw_semaphore *rwsem_down_read_failed(struct rw_semaphore *sem);
-extern struct rw_semaphore *rwsem_down_write_failed(struct rw_semaphore *sem);
-extern struct rw_semaphore *rwsem_wake(struct rw_semaphore *sem);
-extern struct rw_semaphore *rwsem_downgrade_wake(struct rw_semaphore *sem);
+extern struct rw_anon_semaphore *
+rwsem_down_read_failed(struct rw_anon_semaphore *sem);
+extern struct rw_anon_semaphore *
+rwsem_down_write_failed(struct rw_anon_semaphore *sem);
+extern struct rw_anon_semaphore *
+rwsem_wake(struct rw_anon_semaphore *sem);
+extern struct rw_anon_semaphore *
+rwsem_downgrade_wake(struct rw_anon_semaphore *sem);
-extern void __init_rwsem(struct rw_semaphore *sem, const char *name,
+extern void __init_anon_rwsem(struct rw_anon_semaphore *sem, const char *name,
struct lock_class_key *key);
-#define init_rwsem(sem) \
+#define init_anon_rwsem(sem) \
do { \
static struct lock_class_key __key; \
\
- __init_rwsem((sem), #sem, &__key); \
+ __init_anon_rwsem((sem), #sem, &__key); \
} while (0)
/*
* lock for reading
*/
-static inline void __down_read(struct rw_semaphore *sem)
+static inline void __down_read(struct rw_anon_semaphore *sem)
{
if (unlikely(atomic_inc_return((atomic_t *)(&sem->count)) <= 0))
rwsem_down_read_failed(sem);
}
-static inline int __down_read_trylock(struct rw_semaphore *sem)
+static inline int __down_read_trylock(struct rw_anon_semaphore *sem)
{
int tmp;
@@ -90,7 +94,7 @@ static inline int __down_read_trylock(struct rw_semaphore *sem)
/*
* lock for writing
*/
-static inline void __down_write_nested(struct rw_semaphore *sem, int subclass)
+static inline void __down_write_nested(struct rw_anon_semaphore *sem, int subclass)
{
int tmp;
@@ -100,12 +104,12 @@ static inline void __down_write_nested(struct rw_semaphore *sem, int subclass)
rwsem_down_write_failed(sem);
}
-static inline void __down_write(struct rw_semaphore *sem)
+static inline void __down_write(struct rw_anon_semaphore *sem)
{
__down_write_nested(sem, 0);
}
-static inline int __down_write_trylock(struct rw_semaphore *sem)
+static inline int __down_write_trylock(struct rw_anon_semaphore *sem)
{
int tmp;
@@ -117,7 +121,7 @@ static inline int __down_write_trylock(struct rw_semaphore *sem)
/*
* unlock after reading
*/
-static inline void __up_read(struct rw_semaphore *sem)
+static inline void __up_read(struct rw_anon_semaphore *sem)
{
int tmp;
@@ -129,7 +133,7 @@ static inline void __up_read(struct rw_semaphore *sem)
/*
* unlock after writing
*/
-static inline void __up_write(struct rw_semaphore *sem)
+static inline void __up_write(struct rw_anon_semaphore *sem)
{
if (unlikely(atomic_sub_return(RWSEM_ACTIVE_WRITE_BIAS,
(atomic_t *)(&sem->count)) < 0))
@@ -139,7 +143,7 @@ static inline void __up_write(struct rw_semaphore *sem)
/*
* implement atomic add functionality
*/
-static inline void rwsem_atomic_add(int delta, struct rw_semaphore *sem)
+static inline void rwsem_atomic_add(int delta, struct rw_anon_semaphore *sem)
{
atomic_add(delta, (atomic_t *)(&sem->count));
}
@@ -147,7 +151,7 @@ static inline void rwsem_atomic_add(int delta, struct rw_semaphore *sem)
/*
* downgrade write lock to read lock
*/
-static inline void __downgrade_write(struct rw_semaphore *sem)
+static inline void __downgrade_write(struct rw_anon_semaphore *sem)
{
int tmp;
@@ -159,11 +163,52 @@ static inline void __downgrade_write(struct rw_semaphore *sem)
/*
* implement exchange and add functionality
*/
-static inline int rwsem_atomic_update(int delta, struct rw_semaphore *sem)
+static inline int rwsem_atomic_update(int delta, struct rw_anon_semaphore *sem)
{
return atomic_add_return(delta, (atomic_t *)(&sem->count));
}
+static inline int anon_rwsem_is_locked(struct rw_anon_semaphore *sem)
+{
+ return (sem->count != 0);
+}
+
+struct rw_semaphore {
+ /* XXX this should be able to be an atomic_t -- paulus */
+ signed int count;
+ spinlock_t wait_lock;
+ struct list_head wait_list;
+#ifdef CONFIG_DEBUG_LOCK_ALLOC
+ struct lockdep_map dep_map;
+#endif
+};
+
+#ifdef CONFIG_DEBUG_LOCK_ALLOC
+# define __RWSEM_DEP_MAP_INIT(lockname) , .dep_map = { .name = #lockname }
+#else
+# define __RWSEM_DEP_MAP_INIT(lockname)
+#endif
+
+#define __RWSEM_INITIALIZER(name) \
+ { RWSEM_UNLOCKED_VALUE, __SPIN_LOCK_UNLOCKED((name).wait_lock), \
+ LIST_HEAD_INIT((name).wait_list) __RWSEM_DEP_MAP_INIT(name) }
+
+#define DECLARE_RWSEM(name) \
+ struct rw_semaphore name = __RWSEM_INITIALIZER(name)
+
+static inline void __init_rwsem(struct rw_semaphore *sem, const char *name,
+ struct lock_class_key *key)
+{
+ __init_anon_rwsem((struct rw_anon_semaphore *)sem, name, key);
+}
+
+#define init_rwsem(sem) \
+ do { \
+ static struct lock_class_key __key; \
+ \
+ __init_rwsem((sem), #sem, &__key); \
+ } while (0)
+
static inline int rwsem_is_locked(struct rw_semaphore *sem)
{
return (sem->count != 0);
diff --git a/arch/s390/include/asm/rwsem.h b/arch/s390/include/asm/rwsem.h
index 423fdda..43e95a2 100644
--- a/arch/s390/include/asm/rwsem.h
+++ b/arch/s390/include/asm/rwsem.h
@@ -48,16 +48,21 @@
struct rwsem_waiter;
-extern struct rw_semaphore *rwsem_down_read_failed(struct rw_semaphore *);
-extern struct rw_semaphore *rwsem_down_write_failed(struct rw_semaphore *);
-extern struct rw_semaphore *rwsem_wake(struct rw_semaphore *);
-extern struct rw_semaphore *rwsem_downgrade_wake(struct rw_semaphore *);
-extern struct rw_semaphore *rwsem_downgrade_write(struct rw_semaphore *);
+extern struct rw_anon_semaphore *
+rwsem_down_read_failed(struct rw_anon_semaphore *);
+extern struct rw_anon_semaphore *
+rwsem_down_write_failed(struct rw_anon_semaphore *);
+extern struct rw_anon_semaphore *
+rwsem_wake(struct rw_anon_semaphore *);
+extern struct rw_anon_semaphore *
+rwsem_downgrade_wake(struct rw_anon_semaphore *);
+extern struct rw_anon_semaphore *
+rwsem_downgrade_write(struct rw_anon_semaphore *);
/*
* the semaphore definition
*/
-struct rw_semaphore {
+struct rw_anon_semaphore {
signed long count;
spinlock_t wait_lock;
struct list_head wait_list;
@@ -85,40 +90,40 @@ struct rw_semaphore {
*/
#ifdef CONFIG_DEBUG_LOCK_ALLOC
-# define __RWSEM_DEP_MAP_INIT(lockname) , .dep_map = { .name = #lockname }
+# define __RWSEM_ANON_DEP_MAP_INIT(lockname) , .dep_map = { .name = #lockname }
#else
-# define __RWSEM_DEP_MAP_INIT(lockname)
+# define __RWSEM_ANON_DEP_MAP_INIT(lockname)
#endif
-#define __RWSEM_INITIALIZER(name) \
+#define __RWSEM_ANON_INITIALIZER(name) \
{ RWSEM_UNLOCKED_VALUE, __SPIN_LOCK_UNLOCKED((name).wait.lock), \
- LIST_HEAD_INIT((name).wait_list) __RWSEM_DEP_MAP_INIT(name) }
+ LIST_HEAD_INIT((name).wait_list) __RWSEM_ANON_DEP_MAP_INIT(name) }
-#define DECLARE_RWSEM(name) \
- struct rw_semaphore name = __RWSEM_INITIALIZER(name)
+#define DECLARE_ANON_RWSEM(name) \
+ struct rw_anon_semaphore name = __RWSEM_ANON_INITIALIZER(name)
-static inline void init_rwsem(struct rw_semaphore *sem)
+static inline void init_anon_rwsem(struct rw_anon_semaphore *sem)
{
sem->count = RWSEM_UNLOCKED_VALUE;
spin_lock_init(&sem->wait_lock);
INIT_LIST_HEAD(&sem->wait_list);
}
-extern void __init_rwsem(struct rw_semaphore *sem, const char *name,
- struct lock_class_key *key);
+extern void __init_anon_rwsem(struct rw_anon_semaphore *sem, const char *name,
+ struct lock_class_key *key);
-#define init_rwsem(sem) \
+#define init_anon_rwsem(sem) \
do { \
static struct lock_class_key __key; \
\
- __init_rwsem((sem), #sem, &__key); \
+ __init_anon_rwsem((sem), #sem, &__key); \
} while (0)
/*
* lock for reading
*/
-static inline void __down_read(struct rw_semaphore *sem)
+static inline void __down_read(struct rw_anon_semaphore *sem)
{
signed long old, new;
@@ -146,7 +151,7 @@ static inline void __down_read(struct rw_semaphore *sem)
/*
* trylock for reading -- returns 1 if successful, 0 if contention
*/
-static inline int __down_read_trylock(struct rw_semaphore *sem)
+static inline int __down_read_trylock(struct rw_anon_semaphore *sem)
{
signed long old, new;
@@ -177,7 +182,8 @@ static inline int __down_read_trylock(struct rw_semaphore *sem)
/*
* lock for writing
*/
-static inline void __down_write_nested(struct rw_semaphore *sem, int subclass)
+static inline void
+__down_write_nested(struct rw_anon_semaphore *sem, int subclass)
{
signed long old, new, tmp;
@@ -203,7 +209,7 @@ static inline void __down_write_nested(struct rw_semaphore *sem, int subclass)
rwsem_down_write_failed(sem);
}
-static inline void __down_write(struct rw_semaphore *sem)
+static inline void __down_write(struct rw_anon_semaphore *sem)
{
__down_write_nested(sem, 0);
}
@@ -211,7 +217,7 @@ static inline void __down_write(struct rw_semaphore *sem)
/*
* trylock for writing -- returns 1 if successful, 0 if contention
*/
-static inline int __down_write_trylock(struct rw_semaphore *sem)
+static inline int __down_write_trylock(struct rw_anon_semaphore *sem)
{
signed long old;
@@ -239,7 +245,7 @@ static inline int __down_write_trylock(struct rw_semaphore *sem)
/*
* unlock after reading
*/
-static inline void __up_read(struct rw_semaphore *sem)
+static inline void __up_read(struct rw_anon_semaphore *sem)
{
signed long old, new;
@@ -268,7 +274,7 @@ static inline void __up_read(struct rw_semaphore *sem)
/*
* unlock after writing
*/
-static inline void __up_write(struct rw_semaphore *sem)
+static inline void __up_write(struct rw_anon_semaphore *sem)
{
signed long old, new, tmp;
@@ -298,7 +304,7 @@ static inline void __up_write(struct rw_semaphore *sem)
/*
* downgrade write lock to read lock
*/
-static inline void __downgrade_write(struct rw_semaphore *sem)
+static inline void __downgrade_write(struct rw_anon_semaphore *sem)
{
signed long old, new, tmp;
@@ -327,7 +333,7 @@ static inline void __downgrade_write(struct rw_semaphore *sem)
/*
* implement atomic add functionality
*/
-static inline void rwsem_atomic_add(long delta, struct rw_semaphore *sem)
+static inline void rwsem_atomic_add(long delta, struct rw_anon_semaphore *sem)
{
signed long old, new;
@@ -353,7 +359,8 @@ static inline void rwsem_atomic_add(long delta, struct rw_semaphore *sem)
/*
* implement exchange and add functionality
*/
-static inline long rwsem_atomic_update(long delta, struct rw_semaphore *sem)
+static inline long
+rwsem_atomic_update(long delta, struct rw_anon_semaphore *sem)
{
signed long old, new;
@@ -377,10 +384,52 @@ static inline long rwsem_atomic_update(long delta, struct rw_semaphore *sem)
return new;
}
-static inline int rwsem_is_locked(struct rw_semaphore *sem)
+static inline int rwsem_is_locked(struct rw_anon_semaphore *sem)
{
return (sem->count != 0);
}
+struct rw_semaphore {
+ signed long count;
+ spinlock_t wait_lock;
+ struct list_head wait_list;
+#ifdef CONFIG_DEBUG_LOCK_ALLOC
+ struct lockdep_map dep_map;
+#endif
+};
+
+#ifdef CONFIG_DEBUG_LOCK_ALLOC
+# define __RWSEM_DEP_MAP_INIT(lockname) , .dep_map = { .name = #lockname }
+#else
+# define __RWSEM_DEP_MAP_INIT(lockname)
+#endif
+
+#define __RWSEM_INITIALIZER(name) \
+ { RWSEM_UNLOCKED_VALUE, __SPIN_LOCK_UNLOCKED((name).wait.lock), \
+ LIST_HEAD_INIT((name).wait_list) __RWSEM_DEP_MAP_INIT(name) }
+
+#define DECLARE_RWSEM(name) \
+ struct rw_anon_semaphore name = __RWSEM_INITIALIZER(name)
+
+static inline void init_rwsem(struct rw_anon_semaphore *sem)
+{
+ sem->count = RWSEM_UNLOCKED_VALUE;
+ spin_lock_init(&sem->wait_lock);
+ INIT_LIST_HEAD(&sem->wait_list);
+}
+
+static inline void __init_rwsem(struct rw_semaphore *sem, const char *name,
+ struct lock_class_key *key)
+{
+ __init_anon_rwsem((struct rw_anon_semaphore *)sem, name, key);
+}
+
+#define init_rwsem(sem) \
+do { \
+ static struct lock_class_key __key; \
+ \
+ __init_rwsem((sem), #sem, &__key); \
+} while (0)
+
#endif /* __KERNEL__ */
#endif /* _S390_RWSEM_H */
diff --git a/arch/sh/include/asm/rwsem.h b/arch/sh/include/asm/rwsem.h
index 06e2251..47a3460 100644
--- a/arch/sh/include/asm/rwsem.h
+++ b/arch/sh/include/asm/rwsem.h
@@ -19,7 +19,7 @@
/*
* the semaphore definition
*/
-struct rw_semaphore {
+struct rw_anon_semaphore {
long count;
#define RWSEM_UNLOCKED_VALUE 0x00000000
#define RWSEM_ACTIVE_BIAS 0x00000001
@@ -35,35 +35,38 @@ struct rw_semaphore {
};
#ifdef CONFIG_DEBUG_LOCK_ALLOC
-# define __RWSEM_DEP_MAP_INIT(lockname) , .dep_map = { .name = #lockname }
+# define __RWSEM_ANON_DEP_MAP_INIT(lockname) , .dep_map = { .name = #lockname }
#else
-# define __RWSEM_DEP_MAP_INIT(lockname)
+# define __RWSEM_ANON_DEP_MAP_INIT(lockname)
#endif
-#define __RWSEM_INITIALIZER(name) \
+#define __RWSEM_ANON_INITIALIZER(name) \
{ RWSEM_UNLOCKED_VALUE, __SPIN_LOCK_UNLOCKED((name).wait_lock), \
LIST_HEAD_INIT((name).wait_list) \
- __RWSEM_DEP_MAP_INIT(name) }
+ __RWSEM_ANON_DEP_MAP_INIT(name) }
-#define DECLARE_RWSEM(name) \
- struct rw_semaphore name = __RWSEM_INITIALIZER(name)
+#define DECLARE_ANON_RWSEM(name) \
+ struct rw_anon_semaphore name = __RWSEM_ANON_INITIALIZER(name)
-extern struct rw_semaphore *rwsem_down_read_failed(struct rw_semaphore *sem);
-extern struct rw_semaphore *rwsem_down_write_failed(struct rw_semaphore *sem);
-extern struct rw_semaphore *rwsem_wake(struct rw_semaphore *sem);
-extern struct rw_semaphore *rwsem_downgrade_wake(struct rw_semaphore *sem);
+extern struct rw_anon_semaphore *
+rwsem_down_read_failed(struct rw_anon_semaphore *sem);
+extern struct rw_anon_semaphore *
+rwsem_down_write_failed(struct rw_anon_semaphore *sem);
+extern struct rw_anon_semaphore *rwsem_wake(struct rw_anon_semaphore *sem);
+extern struct rw_anon_semaphore *
+rwsem_downgrade_wake(struct rw_anon_semaphore *sem);
-extern void __init_rwsem(struct rw_semaphore *sem, const char *name,
- struct lock_class_key *key);
+extern void __init_anon_rwsem(struct rw_anon_semaphore *sem, const char *name,
+ struct lock_class_key *key);
-#define init_rwsem(sem) \
+#define init_anon_rwsem(sem) \
do { \
static struct lock_class_key __key; \
\
- __init_rwsem((sem), #sem, &__key); \
+ __init_anon_rwsem((sem), #sem, &__key); \
} while (0)
-static inline void init_rwsem(struct rw_semaphore *sem)
+static inline void init_anon_rwsem(struct rw_anon_semaphore *sem)
{
sem->count = RWSEM_UNLOCKED_VALUE;
spin_lock_init(&sem->wait_lock);
@@ -73,7 +76,7 @@ static inline void init_rwsem(struct rw_semaphore *sem)
/*
* lock for reading
*/
-static inline void __down_read(struct rw_semaphore *sem)
+static inline void __down_read(struct rw_anon_semaphore *sem)
{
if (atomic_inc_return((atomic_t *)(&sem->count)) > 0)
smp_wmb();
@@ -81,7 +84,7 @@ static inline void __down_read(struct rw_semaphore *sem)
rwsem_down_read_failed(sem);
}
-static inline int __down_read_trylock(struct rw_semaphore *sem)
+static inline int __down_read_trylock(struct rw_anon_semaphore *sem)
{
int tmp;
@@ -98,7 +101,7 @@ static inline int __down_read_trylock(struct rw_semaphore *sem)
/*
* lock for writing
*/
-static inline void __down_write(struct rw_semaphore *sem)
+static inline void __down_write(struct rw_anon_semaphore *sem)
{
int tmp;
@@ -110,7 +113,7 @@ static inline void __down_write(struct rw_semaphore *sem)
rwsem_down_write_failed(sem);
}
-static inline int __down_write_trylock(struct rw_semaphore *sem)
+static inline int __down_write_trylock(struct rw_anon_semaphore *sem)
{
int tmp;
@@ -123,7 +126,7 @@ static inline int __down_write_trylock(struct rw_semaphore *sem)
/*
* unlock after reading
*/
-static inline void __up_read(struct rw_semaphore *sem)
+static inline void __up_read(struct rw_anon_semaphore *sem)
{
int tmp;
@@ -136,7 +139,7 @@ static inline void __up_read(struct rw_semaphore *sem)
/*
* unlock after writing
*/
-static inline void __up_write(struct rw_semaphore *sem)
+static inline void __up_write(struct rw_anon_semaphore *sem)
{
smp_wmb();
if (atomic_sub_return(RWSEM_ACTIVE_WRITE_BIAS,
@@ -147,7 +150,7 @@ static inline void __up_write(struct rw_semaphore *sem)
/*
* implement atomic add functionality
*/
-static inline void rwsem_atomic_add(int delta, struct rw_semaphore *sem)
+static inline void rwsem_atomic_add(int delta, struct rw_anon_semaphore *sem)
{
atomic_add(delta, (atomic_t *)(&sem->count));
}
@@ -155,7 +158,7 @@ static inline void rwsem_atomic_add(int delta, struct rw_semaphore *sem)
/*
* downgrade write lock to read lock
*/
-static inline void __downgrade_write(struct rw_semaphore *sem)
+static inline void __downgrade_write(struct rw_anon_semaphore *sem)
{
int tmp;
@@ -165,7 +168,8 @@ static inline void __downgrade_write(struct rw_semaphore *sem)
rwsem_downgrade_wake(sem);
}
-static inline void __down_write_nested(struct rw_semaphore *sem, int subclass)
+static inline void
+ __down_write_nested(struct rw_anon_semaphore *sem, int subclass)
{
__down_write(sem);
}
@@ -173,12 +177,60 @@ static inline void __down_write_nested(struct rw_semaphore *sem, int subclass)
/*
* implement exchange and add functionality
*/
-static inline int rwsem_atomic_update(int delta, struct rw_semaphore *sem)
+static inline int rwsem_atomic_update(int delta, struct rw_anon_semaphore *sem)
{
smp_mb();
return atomic_add_return(delta, (atomic_t *)(&sem->count));
}
+static inline int anon_rwsem_is_locked(struct rw_anon_semaphore *sem)
+{
+ return (sem->count != 0);
+}
+
+struct rw_semaphore {
+ long count;
+ spinlock_t wait_lock;
+ struct list_head wait_list;
+#ifdef CONFIG_DEBUG_LOCK_ALLOC
+ struct lockdep_map dep_map;
+#endif
+};
+
+#ifdef CONFIG_DEBUG_LOCK_ALLOC
+# define __RWSEM_DEP_MAP_INIT(lockname) , .dep_map = { .name = #lockname }
+#else
+# define __RWSEM_DEP_MAP_INIT(lockname)
+#endif
+
+#define __RWSEM_INITIALIZER(name) \
+ { RWSEM_UNLOCKED_VALUE, __SPIN_LOCK_UNLOCKED((name).wait_lock), \
+ LIST_HEAD_INIT((name).wait_list) \
+ __RWSEM_DEP_MAP_INIT(name) }
+
+#define DECLARE_RWSEM(name) \
+ struct rw_semaphore name = __RWSEM_INITIALIZER(name)
+
+static inline void __init_rwsem(struct rw_semaphore *sem, const char *name,
+ struct lock_class_key *key)
+{
+ __init_anon_rwsem((struct rw_anon_semaphore *)sem, name, key);
+}
+
+#define init_rwsem(sem) \
+do { \
+ static struct lock_class_key __key; \
+ \
+ __init_rwsem((sem), #sem, &__key); \
+} while (0)
+
+static inline void init_rwsem(struct rw_semaphore *sem)
+{
+ sem->count = RWSEM_UNLOCKED_VALUE;
+ spin_lock_init(&sem->wait_lock);
+ INIT_LIST_HEAD(&sem->wait_list);
+}
+
static inline int rwsem_is_locked(struct rw_semaphore *sem)
{
return (sem->count != 0);
diff --git a/arch/sparc/include/asm/rwsem.h b/arch/sparc/include/asm/rwsem.h
index 6e56210..7a90aa2 100644
--- a/arch/sparc/include/asm/rwsem.h
+++ b/arch/sparc/include/asm/rwsem.h
@@ -19,7 +19,7 @@
struct rwsem_waiter;
-struct rw_semaphore {
+struct rw_anon_semaphore {
signed int count;
spinlock_t wait_lock;
struct list_head wait_list;
@@ -29,51 +29,92 @@ struct rw_semaphore {
};
#ifdef CONFIG_DEBUG_LOCK_ALLOC
-# define __RWSEM_DEP_MAP_INIT(lockname) , .dep_map = { .name = #lockname }
+# define __RWSEM_ANON_DEP_MAP_INIT(lockname) , .dep_map = { .name = #lockname }
#else
-# define __RWSEM_DEP_MAP_INIT(lockname)
+# define __RWSEM_ANON_DEP_MAP_INIT(lockname)
#endif
-#define __RWSEM_INITIALIZER(name) \
+#define __RWSEMANON__INITIALIZER(name) \
{ RWSEM_UNLOCKED_VALUE, __SPIN_LOCK_UNLOCKED((name).wait_lock), \
- LIST_HEAD_INIT((name).wait_list) __RWSEM_DEP_MAP_INIT(name) }
+ LIST_HEAD_INIT((name).wait_list) __RWSEM_ANON_DEP_MAP_INIT(name) }
-#define DECLARE_RWSEM(name) \
- struct rw_semaphore name = __RWSEM_INITIALIZER(name)
+#define DECLARE_ANON_RWSEM(name) \
+ struct rw_anon_semaphore name = __RWSEM_ANON_INITIALIZER(name)
-extern void __init_rwsem(struct rw_semaphore *sem, const char *name,
- struct lock_class_key *key);
+extern void __init_anon_rwsem(struct rw_anon_semaphore *sem, const char *name,
+ struct lock_class_key *key);
-#define init_rwsem(sem) \
+#define init_anon_rwsem(sem) \
do { \
static struct lock_class_key __key; \
\
- __init_rwsem((sem), #sem, &__key); \
+ __init_anon_rwsem((sem), #sem, &__key); \
} while (0)
-extern void __down_read(struct rw_semaphore *sem);
-extern int __down_read_trylock(struct rw_semaphore *sem);
-extern void __down_write(struct rw_semaphore *sem);
-extern int __down_write_trylock(struct rw_semaphore *sem);
-extern void __up_read(struct rw_semaphore *sem);
-extern void __up_write(struct rw_semaphore *sem);
-extern void __downgrade_write(struct rw_semaphore *sem);
+extern void __down_read(struct rw_anon_semaphore *sem);
+extern int __down_read_trylock(struct rw_anon_semaphore *sem);
+extern void __down_write(struct rw_anon_semaphore *sem);
+extern int __down_write_trylock(struct rw_anon_semaphore *sem);
+extern void __up_read(struct rw_anon_semaphore *sem);
+extern void __up_write(struct rw_anon_semaphore *sem);
+extern void __downgrade_write(struct rw_anon_semaphore *sem);
-static inline void __down_write_nested(struct rw_semaphore *sem, int subclass)
+static inline void
+__down_write_nested(struct rw_anon_semaphore *sem, int subclass)
{
__down_write(sem);
}
-static inline int rwsem_atomic_update(int delta, struct rw_semaphore *sem)
+static inline int rwsem_atomic_update(int delta, struct rw_anon_semaphore *sem)
{
return atomic_add_return(delta, (atomic_t *)(&sem->count));
}
-static inline void rwsem_atomic_add(int delta, struct rw_semaphore *sem)
+static inline void rwsem_atomic_add(int delta, struct rw_anon_semaphore *sem)
{
atomic_add(delta, (atomic_t *)(&sem->count));
}
+static inline int anon_rwsem_is_locked(struct rw_semaphore *sem)
+{
+ return (sem->count != 0);
+}
+
+struct rw_semaphore {
+ signed int count;
+ spinlock_t wait_lock;
+ struct list_head wait_list;
+#ifdef CONFIG_DEBUG_LOCK_ALLOC
+ struct lockdep_map dep_map;
+#endif
+};
+
+#ifdef CONFIG_DEBUG_LOCK_ALLOC
+# define __RWSEM_DEP_MAP_INIT(lockname) , .dep_map = { .name = #lockname }
+#else
+# define __RWSEM_DEP_MAP_INIT(lockname)
+#endif
+
+#define __RWSEM_INITIALIZER(name) \
+{ RWSEM_UNLOCKED_VALUE, SPIN_LOCK_UNLOCKED, LIST_HEAD_INIT((name).wait_list) \
+ __RWSEM_DEP_MAP_INIT(name) }
+
+#define DECLARE_RWSEM(name) \
+ struct rw_semaphore name = __RWSEM_INITIALIZER(name)
+
+static inline void __init_rwsem(struct rw_semaphore *sem, const char *name,
+ struct lock_class_key *key)
+{
+ __init_anon_rwsem((struct rw_anon_semaphore *)sem, name, key);
+}
+
+#define init_rwsem(sem) \
+do { \
+ static struct lock_class_key __key; \
+ \
+ __init_rwsem((sem), #sem, &__key); \
+} while (0)
+
static inline int rwsem_is_locked(struct rw_semaphore *sem)
{
return (sem->count != 0);
diff --git a/arch/x86/include/asm/rwsem.h b/arch/x86/include/asm/rwsem.h
index 606ede1..0d686f7 100644
--- a/arch/x86/include/asm/rwsem.h
+++ b/arch/x86/include/asm/rwsem.h
@@ -45,14 +45,14 @@
struct rwsem_waiter;
-extern asmregparm struct rw_semaphore *
- rwsem_down_read_failed(struct rw_semaphore *sem);
-extern asmregparm struct rw_semaphore *
- rwsem_down_write_failed(struct rw_semaphore *sem);
-extern asmregparm struct rw_semaphore *
- rwsem_wake(struct rw_semaphore *);
-extern asmregparm struct rw_semaphore *
- rwsem_downgrade_wake(struct rw_semaphore *sem);
+extern asmregparm struct rw_anon_semaphore *
+ rwsem_down_read_failed(struct rw_anon_semaphore *sem);
+extern asmregparm struct rw_anon_semaphore *
+ rwsem_down_write_failed(struct rw_anon_semaphore *sem);
+extern asmregparm struct rw_anon_semaphore *
+ rwsem_wake(struct rw_anon_semaphore *);
+extern asmregparm struct rw_anon_semaphore *
+ rwsem_downgrade_wake(struct rw_anon_semaphore *sem);
/*
* the semaphore definition
@@ -76,7 +76,7 @@ extern asmregparm struct rw_semaphore *
typedef signed long rwsem_count_t;
-struct rw_semaphore {
+struct rw_anon_semaphore {
rwsem_count_t count;
spinlock_t wait_lock;
struct list_head wait_list;
@@ -86,35 +86,34 @@ struct rw_semaphore {
};
#ifdef CONFIG_DEBUG_LOCK_ALLOC
-# define __RWSEM_DEP_MAP_INIT(lockname) , .dep_map = { .name = #lockname }
+# define __RWSEM_ANON_DEP_MAP_INIT(lockname) , .dep_map = { .name = #lockname }
#else
-# define __RWSEM_DEP_MAP_INIT(lockname)
+# define __RWSEM_ANON_DEP_MAP_INIT(lockname)
#endif
-
-#define __RWSEM_INITIALIZER(name) \
+#define __RWSEM_ANON_INITIALIZER(name) \
{ \
RWSEM_UNLOCKED_VALUE, __SPIN_LOCK_UNLOCKED((name).wait_lock), \
LIST_HEAD_INIT((name).wait_list) __RWSEM_DEP_MAP_INIT(name) \
}
-#define DECLARE_RWSEM(name) \
- struct rw_semaphore name = __RWSEM_INITIALIZER(name)
+#define DECLARE_ANON_RWSEM(name) \
+ struct rw_anon_semaphore name = __RWSEM_ANON_INITIALIZER(name)
-extern void __init_rwsem(struct rw_semaphore *sem, const char *name,
- struct lock_class_key *key);
+extern void __init_anon_rwsem(struct rw_anon_semaphore *sem, const char *name,
+ struct lock_class_key *key);
-#define init_rwsem(sem) \
+#define init_anon_rwsem(sem) \
do { \
static struct lock_class_key __key; \
\
- __init_rwsem((sem), #sem, &__key); \
+ __init_anon_rwsem((sem), #sem, &__key); \
} while (0)
/*
* lock for reading
*/
-static inline void __down_read(struct rw_semaphore *sem)
+static inline void __down_read(struct rw_anon_semaphore *sem)
{
asm volatile("# beginning down_read\n\t"
LOCK_PREFIX _ASM_INC "(%1)\n\t"
@@ -131,7 +130,7 @@ static inline void __down_read(struct rw_semaphore *sem)
/*
* trylock for reading -- returns 1 if successful, 0 if contention
*/
-static inline int __down_read_trylock(struct rw_semaphore *sem)
+static inline int __down_read_trylock(struct rw_anon_semaphore *sem)
{
rwsem_count_t result, tmp;
asm volatile("# beginning __down_read_trylock\n\t"
@@ -153,7 +152,8 @@ static inline int __down_read_trylock(struct rw_semaphore *sem)
/*
* lock for writing
*/
-static inline void __down_write_nested(struct rw_semaphore *sem, int subclass)
+static inline void
+__down_write_nested(struct rw_anon_semaphore *sem, int subclass)
{
rwsem_count_t tmp;
@@ -172,7 +172,7 @@ static inline void __down_write_nested(struct rw_semaphore *sem, int subclass)
: "memory", "cc");
}
-static inline void __down_write(struct rw_semaphore *sem)
+static inline void __down_write(struct rw_anon_semaphore *sem)
{
__down_write_nested(sem, 0);
}
@@ -180,7 +180,7 @@ static inline void __down_write(struct rw_semaphore *sem)
/*
* trylock for writing -- returns 1 if successful, 0 if contention
*/
-static inline int __down_write_trylock(struct rw_semaphore *sem)
+static inline int __down_write_trylock(struct rw_anon_semaphore *sem)
{
rwsem_count_t ret = cmpxchg(&sem->count,
RWSEM_UNLOCKED_VALUE,
@@ -193,7 +193,7 @@ static inline int __down_write_trylock(struct rw_semaphore *sem)
/*
* unlock after reading
*/
-static inline void __up_read(struct rw_semaphore *sem)
+static inline void __up_read(struct rw_anon_semaphore *sem)
{
rwsem_count_t tmp = -RWSEM_ACTIVE_READ_BIAS;
asm volatile("# beginning __up_read\n\t"
@@ -211,7 +211,7 @@ static inline void __up_read(struct rw_semaphore *sem)
/*
* unlock after writing
*/
-static inline void __up_write(struct rw_semaphore *sem)
+static inline void __up_write(struct rw_anon_semaphore *sem)
{
rwsem_count_t tmp;
asm volatile("# beginning __up_write\n\t"
@@ -230,7 +230,7 @@ static inline void __up_write(struct rw_semaphore *sem)
/*
* downgrade write lock to read lock
*/
-static inline void __downgrade_write(struct rw_semaphore *sem)
+static inline void __downgrade_write(struct rw_anon_semaphore *sem)
{
asm volatile("# beginning __downgrade_write\n\t"
LOCK_PREFIX _ASM_ADD "%2,(%1)\n\t"
@@ -251,7 +251,7 @@ static inline void __downgrade_write(struct rw_semaphore *sem)
* implement atomic add functionality
*/
static inline void rwsem_atomic_add(rwsem_count_t delta,
- struct rw_semaphore *sem)
+ struct rw_anon_semaphore *sem)
{
asm volatile(LOCK_PREFIX _ASM_ADD "%1,%0"
: "+m" (sem->count)
@@ -262,7 +262,7 @@ static inline void rwsem_atomic_add(rwsem_count_t delta,
* implement exchange and add functionality
*/
static inline rwsem_count_t rwsem_atomic_update(rwsem_count_t delta,
- struct rw_semaphore *sem)
+ struct rw_anon_semaphore *sem)
{
rwsem_count_t tmp = delta;
@@ -273,6 +273,47 @@ static inline rwsem_count_t rwsem_atomic_update(rwsem_count_t delta,
return tmp + delta;
}
+static inline int anon_rwsem_is_locked(struct rw_anon_semaphore *sem)
+{
+ return (sem->count != 0);
+}
+
+struct rw_semaphore {
+ signed long count;
+ spinlock_t wait_lock;
+ struct list_head wait_list;
+#ifdef CONFIG_DEBUG_LOCK_ALLOC
+ struct lockdep_map dep_map;
+#endif
+};
+
+#ifdef CONFIG_DEBUG_LOCK_ALLOC
+# define __RWSEM_DEP_MAP_INIT(lockname) , .dep_map = { .name = #lockname }
+#else
+# define __RWSEM_DEP_MAP_INIT(lockname)
+#endif
+
+#define __RWSEM_INITIALIZER(name) \
+{ 0, __SPIN_LOCK_UNLOCKED(name.wait_lock), LIST_HEAD_INIT((name).wait_list) \
+ __RWSEM_DEP_MAP_INIT(name) }
+
+#define DECLARE_RWSEM(name) \
+ struct rw_semaphore name = __RWSEM_INITIALIZER(name)
+
+static inline void __init_rwsem(struct rw_semaphore *sem, const char *name,
+ struct lock_class_key *key)
+{
+ __init_anon_rwsem((struct rw_anon_semaphore *)sem, name, key);
+}
+
+#define init_rwsem(sem) \
+do { \
+ static struct lock_class_key __key; \
+ \
+ __init_rwsem((sem), #sem, &__key); \
+} while (0)
+
+
static inline int rwsem_is_locked(struct rw_semaphore *sem)
{
return (sem->count != 0);
diff --git a/arch/xtensa/include/asm/rwsem.h b/arch/xtensa/include/asm/rwsem.h
index e39edf5..32c5e28 100644
--- a/arch/xtensa/include/asm/rwsem.h
+++ b/arch/xtensa/include/asm/rwsem.h
@@ -25,7 +25,7 @@
/*
* the semaphore definition
*/
-struct rw_semaphore {
+struct rw_anon_semaphore {
signed long count;
#define RWSEM_UNLOCKED_VALUE 0x00000000
#define RWSEM_ACTIVE_BIAS 0x00000001
@@ -37,29 +37,37 @@ struct rw_semaphore {
struct list_head wait_list;
};
-#define __RWSEM_INITIALIZER(name) \
+#define __RWSEM_ANON_INITIALIZER(name) \
{ RWSEM_UNLOCKED_VALUE, SPIN_LOCK_UNLOCKED, \
LIST_HEAD_INIT((name).wait_list) }
-#define DECLARE_RWSEM(name) \
- struct rw_semaphore name = __RWSEM_INITIALIZER(name)
+#define DECLARE_ANON_RWSEM(name) \
+ struct rw_anon_semaphore name = __RWSEM_ANON_INITIALIZER(name)
-extern struct rw_semaphore *rwsem_down_read_failed(struct rw_semaphore *sem);
-extern struct rw_semaphore *rwsem_down_write_failed(struct rw_semaphore *sem);
-extern struct rw_semaphore *rwsem_wake(struct rw_semaphore *sem);
-extern struct rw_semaphore *rwsem_downgrade_wake(struct rw_semaphore *sem);
+extern struct rw_anon_semaphore *
+rwsem_down_read_failed(struct rw_anon_semaphore *sem);
+extern struct rw_anon_semaphore *
+rwsem_down_write_failed(struct rw_anon_semaphore *sem);
+extern struct rw_anon_semaphore *rwsem_wake(struct rw_anon_semaphore *sem);
+extern struct rw_anon_semaphore *
+rwsem_downgrade_wake(struct rw_anon_semaphore *sem);
-static inline void init_rwsem(struct rw_semaphore *sem)
+static inline void init_anon_rwsem(struct rw_anon_semaphore *sem)
{
sem->count = RWSEM_UNLOCKED_VALUE;
spin_lock_init(&sem->wait_lock);
INIT_LIST_HEAD(&sem->wait_list);
}
+static inline int anon_rwsem_is_locked(struct rw_anon_semaphore *sem)
+{
+ return (sem->count != 0);
+}
+
/*
* lock for reading
*/
-static inline void __down_read(struct rw_semaphore *sem)
+static inline void __down_read(struct rw_anon_semaphore *sem)
{
if (atomic_add_return(1,(atomic_t *)(&sem->count)) > 0)
smp_wmb();
@@ -67,7 +75,7 @@ static inline void __down_read(struct rw_semaphore *sem)
rwsem_down_read_failed(sem);
}
-static inline int __down_read_trylock(struct rw_semaphore *sem)
+static inline int __down_read_trylock(struct rw_anon_semaphore *sem)
{
int tmp;
@@ -84,7 +92,7 @@ static inline int __down_read_trylock(struct rw_semaphore *sem)
/*
* lock for writing
*/
-static inline void __down_write(struct rw_semaphore *sem)
+static inline void __down_write(struct rw_anon_semaphore *sem)
{
int tmp;
@@ -96,7 +104,7 @@ static inline void __down_write(struct rw_semaphore *sem)
rwsem_down_write_failed(sem);
}
-static inline int __down_write_trylock(struct rw_semaphore *sem)
+static inline int __down_write_trylock(struct rw_anon_semaphore *sem)
{
int tmp;
@@ -109,7 +117,7 @@ static inline int __down_write_trylock(struct rw_semaphore *sem)
/*
* unlock after reading
*/
-static inline void __up_read(struct rw_semaphore *sem)
+static inline void __up_read(struct rw_anon_semaphore *sem)
{
int tmp;
@@ -122,7 +130,7 @@ static inline void __up_read(struct rw_semaphore *sem)
/*
* unlock after writing
*/
-static inline void __up_write(struct rw_semaphore *sem)
+static inline void __up_write(struct rw_anon_semaphore *sem)
{
smp_wmb();
if (atomic_sub_return(RWSEM_ACTIVE_WRITE_BIAS,
@@ -133,7 +141,7 @@ static inline void __up_write(struct rw_semaphore *sem)
/*
* implement atomic add functionality
*/
-static inline void rwsem_atomic_add(int delta, struct rw_semaphore *sem)
+static inline void rwsem_atomic_add(int delta, struct rw_anon_semaphore *sem)
{
atomic_add(delta, (atomic_t *)(&sem->count));
}
@@ -141,7 +149,7 @@ static inline void rwsem_atomic_add(int delta, struct rw_semaphore *sem)
/*
* downgrade write lock to read lock
*/
-static inline void __downgrade_write(struct rw_semaphore *sem)
+static inline void __downgrade_write(struct rw_anon_semaphore *sem)
{
int tmp;
@@ -154,12 +162,37 @@ static inline void __downgrade_write(struct rw_semaphore *sem)
/*
* implement exchange and add functionality
*/
-static inline int rwsem_atomic_update(int delta, struct rw_semaphore *sem)
+static inline int rwsem_atomic_update(int delta, struct rw_anon_semaphore *sem)
{
smp_mb();
return atomic_add_return(delta, (atomic_t *)(&sem->count));
}
+static inline int anon_rwsem_is_locked(struct rw_anon_semaphore *sem)
+{
+ return (sem->count != 0);
+}
+
+struct rw_semaphore {
+ signed long count;
+ spinlock_t wait_lock;
+ struct list_head wait_list;
+};
+
+#define __RWSEM_INITIALIZER(name) \
+ { RWSEM_UNLOCKED_VALUE, SPIN_LOCK_UNLOCKED, \
+ LIST_HEAD_INIT((name).wait_list) }
+
+#define DECLARE_RWSEM(name) \
+ struct rw_semaphore name = __RWSEM_INITIALIZER(name)
+
+static inline void init_rwsem(struct rw_semaphore *sem)
+{
+ sem->count = RWSEM_UNLOCKED_VALUE;
+ spin_lock_init(&sem->wait_lock);
+ INIT_LIST_HEAD(&sem->wait_list);
+}
+
static inline int rwsem_is_locked(struct rw_semaphore *sem)
{
return (sem->count != 0);
diff --git a/include/linux/rwsem-spinlock.h b/include/linux/rwsem-spinlock.h
index bdfcc25..38a8f95 100644
--- a/include/linux/rwsem-spinlock.h
+++ b/include/linux/rwsem-spinlock.h
@@ -22,6 +22,68 @@
struct rwsem_waiter;
/*
+ * the rw-anon-semaphore definition
+ * - if activity is 0 then there are no active readers or writers
+ * - if activity is +ve then that is the number of active readers
+ * - if activity is -1 then there is one active writer
+ * - if wait_list is not empty, then there are processes waiting for the semaphore
+ *
+ * the anon in the name documents that the semaphore has no full
+ * restrictions versus owner ship.
+ */
+struct rw_anon_semaphore {
+ __s32 activity;
+ spinlock_t wait_lock;
+ struct list_head wait_list;
+#ifdef CONFIG_DEBUG_LOCK_ALLOC
+ struct lockdep_map dep_map;
+#endif
+};
+
+#ifdef CONFIG_DEBUG_LOCK_ALLOC
+# define __RWSEM_ANON_DEP_MAP_INIT(lockname) , .dep_map = { .name = #lockname }
+#else
+# define __RWSEM_ANON_DEP_MAP_INIT(lockname)
+#endif
+
+#define __RWSEM_ANON_INITIALIZER(name) \
+{ 0, __SPIN_LOCK_UNLOCKED(name.wait_lock), LIST_HEAD_INIT((name).wait_list) \
+ __RWSEM_ANON_DEP_MAP_INIT(name) }
+
+#define DECLARE_ANON_RWSEM(name) \
+ struct rw_anon_semaphore name = __RWSEM_ANON_INITIALIZER(name)
+
+extern void __init_anon_rwsem(struct rw_anon_semaphore *sem, const char *name,
+ struct lock_class_key *key);
+
+#define init_anon_rwsem(sem) \
+do { \
+ static struct lock_class_key __key; \
+ \
+ __init_anon_rwsem((sem), #sem, &__key); \
+} while (0)
+
+extern void __down_read(struct rw_anon_semaphore *sem);
+extern int __down_read_trylock(struct rw_anon_semaphore *sem);
+extern void __down_write(struct rw_anon_semaphore *sem);
+extern void __down_write_nested(struct rw_anon_semaphore *sem, int subclass);
+extern int __down_write_trylock(struct rw_anon_semaphore *sem);
+extern void __up_read(struct rw_anon_semaphore *sem);
+extern void __up_write(struct rw_anon_semaphore *sem);
+extern void __downgrade_write(struct rw_anon_semaphore *sem);
+
+static inline int anon_rwsem_is_locked(struct rw_anon_semaphore *sem)
+{
+ return (sem->activity != 0);
+}
+
+/*
+ * Non preempt-rt implementation of rw_semaphore. Same as above, but
+ * restricted vs. ownership. i.e. ownerless locked state and non owner
+ * release not allowed.
+ */
+
+/*
* the rw-semaphore definition
* - if activity is 0 then there are no active readers or writers
* - if activity is +ve then that is the number of active readers
@@ -50,8 +112,11 @@ struct rw_semaphore {
#define DECLARE_RWSEM(name) \
struct rw_semaphore name = __RWSEM_INITIALIZER(name)
-extern void __init_rwsem(struct rw_semaphore *sem, const char *name,
- struct lock_class_key *key);
+static inline void __init_rwsem(struct rw_semaphore *sem, const char *name,
+ struct lock_class_key *key)
+{
+ __init_anon_rwsem((struct rw_anon_semaphore *)sem, name, key);
+}
#define init_rwsem(sem) \
do { \
@@ -60,14 +125,6 @@ do { \
__init_rwsem((sem), #sem, &__key); \
} while (0)
-extern void __down_read(struct rw_semaphore *sem);
-extern int __down_read_trylock(struct rw_semaphore *sem);
-extern void __down_write(struct rw_semaphore *sem);
-extern void __down_write_nested(struct rw_semaphore *sem, int subclass);
-extern int __down_write_trylock(struct rw_semaphore *sem);
-extern void __up_read(struct rw_semaphore *sem);
-extern void __up_write(struct rw_semaphore *sem);
-extern void __downgrade_write(struct rw_semaphore *sem);
extern int rwsem_is_locked(struct rw_semaphore *sem);
#endif /* __KERNEL__ */
diff --git a/include/linux/rwsem.h b/include/linux/rwsem.h
index efd348f..70e2f4b 100644
--- a/include/linux/rwsem.h
+++ b/include/linux/rwsem.h
@@ -14,6 +14,7 @@
#include <asm/system.h>
#include <asm/atomic.h>
+struct rw_anon_semaphore;
struct rw_semaphore;
#ifdef CONFIG_RWSEM_GENERIC_SPINLOCK
@@ -25,37 +26,37 @@ struct rw_semaphore;
/*
* lock for reading
*/
-extern void down_read(struct rw_semaphore *sem);
+extern void anon_down_read(struct rw_anon_semaphore *sem);
/*
* trylock for reading -- returns 1 if successful, 0 if contention
*/
-extern int down_read_trylock(struct rw_semaphore *sem);
+extern int anon_down_read_trylock(struct rw_anon_semaphore *sem);
/*
* lock for writing
*/
-extern void down_write(struct rw_semaphore *sem);
+extern void anon_down_write(struct rw_anon_semaphore *sem);
/*
* trylock for writing -- returns 1 if successful, 0 if contention
*/
-extern int down_write_trylock(struct rw_semaphore *sem);
+extern int anon_down_write_trylock(struct rw_anon_semaphore *sem);
/*
* release a read lock
*/
-extern void up_read(struct rw_semaphore *sem);
+extern void anon_up_read(struct rw_anon_semaphore *sem);
/*
* release a write lock
*/
-extern void up_write(struct rw_semaphore *sem);
+extern void anon_up_write(struct rw_anon_semaphore *sem);
/*
* downgrade write lock to read lock
*/
-extern void downgrade_write(struct rw_semaphore *sem);
+extern void anon_downgrade_write(struct rw_anon_semaphore *sem);
#ifdef CONFIG_DEBUG_LOCK_ALLOC
/*
@@ -71,21 +72,69 @@ extern void downgrade_write(struct rw_semaphore *sem);
* lockdep_set_class() at lock initialization time.
* See Documentation/lockdep-design.txt for more details.)
*/
-extern void down_read_nested(struct rw_semaphore *sem, int subclass);
-extern void down_write_nested(struct rw_semaphore *sem, int subclass);
+extern void anon_down_read_nested(struct rw_anon_semaphore *sem, int subclass);
+extern void anon_down_write_nested(struct rw_anon_semaphore *sem, int subclass);
/*
* Take/release a lock when not the owner will release it.
*
* [ This API should be avoided as much as possible - the
* proper abstraction for this case is completions. ]
*/
-extern void down_read_non_owner(struct rw_semaphore *sem);
-extern void up_read_non_owner(struct rw_semaphore *sem);
+extern void anon_down_read_non_owner(struct rw_anon_semaphore *sem);
+extern void anon_up_read_non_owner(struct rw_anon_semaphore *sem);
#else
-# define down_read_nested(sem, subclass) down_read(sem)
-# define down_write_nested(sem, subclass) down_write(sem)
-# define down_read_non_owner(sem) down_read(sem)
-# define up_read_non_owner(sem) up_read(sem)
+# define anon_down_read_nested(sem, subclass) anon_down_read(sem)
+# define anon_down_write_nested(sem, subclass) anon_down_write(sem)
+# define anon_down_read_non_owner(sem) anon_down_read(sem)
+# define anon_up_read_non_owner(sem) anon_up_read(sem)
#endif
+/*
+ * Non preempt-rt implementations
+ */
+static inline void down_read(struct rw_semaphore *sem)
+{
+ anon_down_read((struct rw_anon_semaphore *)sem);
+}
+
+static inline int down_read_trylock(struct rw_semaphore *sem)
+{
+ return anon_down_read_trylock((struct rw_anon_semaphore *)sem);
+}
+
+static inline void down_write(struct rw_semaphore *sem)
+{
+ anon_down_write((struct rw_anon_semaphore *)sem);
+}
+
+static inline int down_write_trylock(struct rw_semaphore *sem)
+{
+ return anon_down_write_trylock((struct rw_anon_semaphore *)sem);
+}
+
+static inline void up_read(struct rw_semaphore *sem)
+{
+ anon_up_read((struct rw_anon_semaphore *)sem);
+}
+
+static inline void up_write(struct rw_semaphore *sem)
+{
+ anon_up_write((struct rw_anon_semaphore *)sem);
+}
+
+static inline void downgrade_write(struct rw_semaphore *sem)
+{
+ anon_downgrade_write((struct rw_anon_semaphore *)sem);
+}
+
+static inline void down_read_nested(struct rw_semaphore *sem, int subclass)
+{
+ return anon_down_read_nested((struct rw_anon_semaphore *)sem, subclass);
+}
+
+static inline void down_write_nested(struct rw_semaphore *sem, int subclass)
+{
+ anon_down_write_nested((struct rw_anon_semaphore *)sem, subclass);
+}
+
#endif /* _LINUX_RWSEM_H */
diff --git a/kernel/rwsem.c b/kernel/rwsem.c
index cae050b..6c6e7fa 100644
--- a/kernel/rwsem.c
+++ b/kernel/rwsem.c
@@ -16,20 +16,19 @@
/*
* lock for reading
*/
-void __sched down_read(struct rw_semaphore *sem)
+void __sched anon_down_read(struct rw_anon_semaphore *sem)
{
might_sleep();
rwsem_acquire_read(&sem->dep_map, 0, 0, _RET_IP_);
LOCK_CONTENDED(sem, __down_read_trylock, __down_read);
}
-
-EXPORT_SYMBOL(down_read);
+EXPORT_SYMBOL(anon_down_read);
/*
* trylock for reading -- returns 1 if successful, 0 if contention
*/
-int down_read_trylock(struct rw_semaphore *sem)
+int anon_down_read_trylock(struct rw_anon_semaphore *sem)
{
int ret = __down_read_trylock(sem);
@@ -37,26 +36,24 @@ int down_read_trylock(struct rw_semaphore *sem)
rwsem_acquire_read(&sem->dep_map, 0, 1, _RET_IP_);
return ret;
}
-
-EXPORT_SYMBOL(down_read_trylock);
+EXPORT_SYMBOL(anon_down_read_trylock);
/*
* lock for writing
*/
-void __sched down_write(struct rw_semaphore *sem)
+void __sched anon_down_write(struct rw_anon_semaphore *sem)
{
might_sleep();
rwsem_acquire(&sem->dep_map, 0, 0, _RET_IP_);
LOCK_CONTENDED(sem, __down_write_trylock, __down_write);
}
-
-EXPORT_SYMBOL(down_write);
+EXPORT_SYMBOL(anon_down_write);
/*
* trylock for writing -- returns 1 if successful, 0 if contention
*/
-int down_write_trylock(struct rw_semaphore *sem)
+int anon_down_write_trylock(struct rw_anon_semaphore *sem)
{
int ret = __down_write_trylock(sem);
@@ -64,37 +61,34 @@ int down_write_trylock(struct rw_semaphore *sem)
rwsem_acquire(&sem->dep_map, 0, 1, _RET_IP_);
return ret;
}
-
-EXPORT_SYMBOL(down_write_trylock);
+EXPORT_SYMBOL(anon_down_write_trylock);
/*
* release a read lock
*/
-void up_read(struct rw_semaphore *sem)
+void anon_up_read(struct rw_anon_semaphore *sem)
{
rwsem_release(&sem->dep_map, 1, _RET_IP_);
__up_read(sem);
}
-
-EXPORT_SYMBOL(up_read);
+EXPORT_SYMBOL(anon_up_read);
/*
* release a write lock
*/
-void up_write(struct rw_semaphore *sem)
+void anon_up_write(struct rw_anon_semaphore *sem)
{
rwsem_release(&sem->dep_map, 1, _RET_IP_);
__up_write(sem);
}
-
-EXPORT_SYMBOL(up_write);
+EXPORT_SYMBOL(anon_up_write);
/*
* downgrade write lock to read lock
*/
-void downgrade_write(struct rw_semaphore *sem)
+void anon_downgrade_write(struct rw_anon_semaphore *sem)
{
/*
* lockdep: a downgraded write will live on as a write
@@ -102,46 +96,41 @@ void downgrade_write(struct rw_semaphore *sem)
*/
__downgrade_write(sem);
}
-
-EXPORT_SYMBOL(downgrade_write);
+EXPORT_SYMBOL(anon_downgrade_write);
#ifdef CONFIG_DEBUG_LOCK_ALLOC
-void down_read_nested(struct rw_semaphore *sem, int subclass)
+void anon_down_read_nested(struct rw_anon_semaphore *sem, int subclass)
{
might_sleep();
rwsem_acquire_read(&sem->dep_map, subclass, 0, _RET_IP_);
LOCK_CONTENDED(sem, __down_read_trylock, __down_read);
}
+EXPORT_SYMBOL(anon_down_read_nested);
-EXPORT_SYMBOL(down_read_nested);
-
-void down_read_non_owner(struct rw_semaphore *sem)
+void anon_down_read_non_owner(struct rw_anon_semaphore *sem)
{
might_sleep();
__down_read(sem);
}
+EXPORT_SYMBOL(anon_down_read_non_owner);
-EXPORT_SYMBOL(down_read_non_owner);
-
-void down_write_nested(struct rw_semaphore *sem, int subclass)
+void anon_down_write_nested(struct rw_anon_semaphore *sem, int subclass)
{
might_sleep();
rwsem_acquire(&sem->dep_map, subclass, 0, _RET_IP_);
LOCK_CONTENDED(sem, __down_write_trylock, __down_write);
}
+EXPORT_SYMBOL(anon_down_write_nested);
-EXPORT_SYMBOL(down_write_nested);
-
-void up_read_non_owner(struct rw_semaphore *sem)
+void anon_up_read_non_owner(struct rw_anon_semaphore *sem)
{
__up_read(sem);
}
-
-EXPORT_SYMBOL(up_read_non_owner);
+EXPORT_SYMBOL(anon_up_read_non_owner);
#endif
diff --git a/lib/rwsem-spinlock.c b/lib/rwsem-spinlock.c
index ccf95bf..8d84e75 100644
--- a/lib/rwsem-spinlock.c
+++ b/lib/rwsem-spinlock.c
@@ -17,7 +17,7 @@ struct rwsem_waiter {
#define RWSEM_WAITING_FOR_WRITE 0x00000002
};
-int rwsem_is_locked(struct rw_semaphore *sem)
+int anon_rwsem_is_locked(struct rw_anon_semaphore *sem)
{
int ret = 1;
unsigned long flags;
@@ -28,13 +28,13 @@ int rwsem_is_locked(struct rw_semaphore *sem)
}
return ret;
}
-EXPORT_SYMBOL(rwsem_is_locked);
+EXPORT_SYMBOL(anon_rwsem_is_locked);
/*
* initialise the semaphore
*/
-void __init_rwsem(struct rw_semaphore *sem, const char *name,
- struct lock_class_key *key)
+void __init_anon_rwsem(struct rw_anon_semaphore *sem, const char *name,
+ struct lock_class_key *key)
{
#ifdef CONFIG_DEBUG_LOCK_ALLOC
/*
@@ -47,7 +47,7 @@ void __init_rwsem(struct rw_semaphore *sem, const char *name,
spin_lock_init(&sem->wait_lock);
INIT_LIST_HEAD(&sem->wait_list);
}
-EXPORT_SYMBOL(__init_rwsem);
+EXPORT_SYMBOL(__init_anon_rwsem);
/*
* handle the lock release when processes blocked on it that can now run
@@ -58,8 +58,8 @@ EXPORT_SYMBOL(__init_rwsem);
* - woken process blocks are discarded from the list after having task zeroed
* - writers are only woken if wakewrite is non-zero
*/
-static inline struct rw_semaphore *
-__rwsem_do_wake(struct rw_semaphore *sem, int wakewrite)
+static inline struct rw_anon_semaphore *
+__rwsem_do_wake(struct rw_anon_semaphore *sem, int wakewrite)
{
struct rwsem_waiter *waiter;
struct task_struct *tsk;
@@ -117,8 +117,8 @@ __rwsem_do_wake(struct rw_semaphore *sem, int wakewrite)
/*
* wake a single writer
*/
-static inline struct rw_semaphore *
-__rwsem_wake_one_writer(struct rw_semaphore *sem)
+static inline struct rw_anon_semaphore *
+__rwsem_wake_one_writer(struct rw_anon_semaphore *sem)
{
struct rwsem_waiter *waiter;
struct task_struct *tsk;
@@ -139,7 +139,7 @@ __rwsem_wake_one_writer(struct rw_semaphore *sem)
/*
* get a read lock on the semaphore
*/
-void __sched __down_read(struct rw_semaphore *sem)
+void __sched __down_read(struct rw_anon_semaphore *sem)
{
struct rwsem_waiter waiter;
struct task_struct *tsk;
@@ -182,7 +182,7 @@ void __sched __down_read(struct rw_semaphore *sem)
/*
* trylock for reading -- returns 1 if successful, 0 if contention
*/
-int __down_read_trylock(struct rw_semaphore *sem)
+int __down_read_trylock(struct rw_anon_semaphore *sem)
{
unsigned long flags;
int ret = 0;
@@ -205,7 +205,7 @@ int __down_read_trylock(struct rw_semaphore *sem)
* get a write lock on the semaphore
* - we increment the waiting count anyway to indicate an exclusive lock
*/
-void __sched __down_write_nested(struct rw_semaphore *sem, int subclass)
+void __sched __down_write_nested(struct rw_anon_semaphore *sem, int subclass)
{
struct rwsem_waiter waiter;
struct task_struct *tsk;
@@ -245,7 +245,7 @@ void __sched __down_write_nested(struct rw_semaphore *sem, int subclass)
;
}
-void __sched __down_write(struct rw_semaphore *sem)
+void __sched __down_write(struct rw_anon_semaphore *sem)
{
__down_write_nested(sem, 0);
}
@@ -253,7 +253,7 @@ void __sched __down_write(struct rw_semaphore *sem)
/*
* trylock for writing -- returns 1 if successful, 0 if contention
*/
-int __down_write_trylock(struct rw_semaphore *sem)
+int __down_write_trylock(struct rw_anon_semaphore *sem)
{
unsigned long flags;
int ret = 0;
@@ -274,7 +274,7 @@ int __down_write_trylock(struct rw_semaphore *sem)
/*
* release a read lock on the semaphore
*/
-void __up_read(struct rw_semaphore *sem)
+void __up_read(struct rw_anon_semaphore *sem)
{
unsigned long flags;
@@ -289,7 +289,7 @@ void __up_read(struct rw_semaphore *sem)
/*
* release a write lock on the semaphore
*/
-void __up_write(struct rw_semaphore *sem)
+void __up_write(struct rw_anon_semaphore *sem)
{
unsigned long flags;
@@ -306,7 +306,7 @@ void __up_write(struct rw_semaphore *sem)
* downgrade a write lock into a read lock
* - just wake up any readers at the front of the queue
*/
-void __downgrade_write(struct rw_semaphore *sem)
+void __downgrade_write(struct rw_anon_semaphore *sem)
{
unsigned long flags;
diff --git a/lib/rwsem.c b/lib/rwsem.c
index 3e3365e..72eaba5 100644
--- a/lib/rwsem.c
+++ b/lib/rwsem.c
@@ -11,8 +11,8 @@
/*
* Initialize an rwsem:
*/
-void __init_rwsem(struct rw_semaphore *sem, const char *name,
- struct lock_class_key *key)
+void __init_anon_rwsem(struct rw_anon_semaphore *sem, const char *name,
+ struct lock_class_key *key)
{
#ifdef CONFIG_DEBUG_LOCK_ALLOC
/*
@@ -25,8 +25,7 @@ void __init_rwsem(struct rw_semaphore *sem, const char *name,
spin_lock_init(&sem->wait_lock);
INIT_LIST_HEAD(&sem->wait_list);
}
-
-EXPORT_SYMBOL(__init_rwsem);
+EXPORT_SYMBOL(__init_anon_rwsem);
struct rwsem_waiter {
struct list_head list;
@@ -46,8 +45,8 @@ struct rwsem_waiter {
* - woken process blocks are discarded from the list after having task zeroed
* - writers are only woken if downgrading is false
*/
-static inline struct rw_semaphore *
-__rwsem_do_wake(struct rw_semaphore *sem, int downgrading)
+static inline struct rw_anon_semaphore *
+__rwsem_do_wake(struct rw_anon_semaphore *sem, int downgrading)
{
struct rwsem_waiter *waiter;
struct task_struct *tsk;
@@ -146,9 +145,9 @@ __rwsem_do_wake(struct rw_semaphore *sem, int downgrading)
/*
* wait for a lock to be granted
*/
-static struct rw_semaphore __sched *
-rwsem_down_failed_common(struct rw_semaphore *sem,
- struct rwsem_waiter *waiter, signed long adjustment)
+static struct rw_anon_semaphore __sched *
+rwsem_down_failed_common(struct rw_anon_semaphore *sem,
+ struct rwsem_waiter *waiter, signed long adjustment)
{
struct task_struct *tsk = current;
signed long count;
@@ -187,8 +186,8 @@ rwsem_down_failed_common(struct rw_semaphore *sem,
/*
* wait for the read lock to be granted
*/
-asmregparm struct rw_semaphore __sched *
-rwsem_down_read_failed(struct rw_semaphore *sem)
+asmregparm struct rw_anon_semaphore __sched *
+rwsem_down_read_failed(struct rw_anon_semaphore *sem)
{
struct rwsem_waiter waiter;
@@ -201,8 +200,8 @@ rwsem_down_read_failed(struct rw_semaphore *sem)
/*
* wait for the write lock to be granted
*/
-asmregparm struct rw_semaphore __sched *
-rwsem_down_write_failed(struct rw_semaphore *sem)
+asmregparm struct rw_anon_semaphore __sched *
+rwsem_down_write_failed(struct rw_anon_semaphore *sem)
{
struct rwsem_waiter waiter;
@@ -216,7 +215,7 @@ rwsem_down_write_failed(struct rw_semaphore *sem)
* handle waking up a waiter on the semaphore
* - up_read/up_write has decremented the active part of count if we come here
*/
-asmregparm struct rw_semaphore *rwsem_wake(struct rw_semaphore *sem)
+asmregparm struct rw_anon_semaphore *rwsem_wake(struct rw_anon_semaphore *sem)
{
unsigned long flags;
@@ -236,7 +235,8 @@ asmregparm struct rw_semaphore *rwsem_wake(struct rw_semaphore *sem)
* - caller incremented waiting part of count and discovered it still negative
* - just wake up any readers at the front of the queue
*/
-asmregparm struct rw_semaphore *rwsem_downgrade_wake(struct rw_semaphore *sem)
+asmregparm struct rw_anon_semaphore *
+rwsem_downgrade_wake(struct rw_anon_semaphore *sem)
{
unsigned long flags;
--
1.7.1.1