blob: 7060d09c61bc412ba28dde857acf34c9c50864cb [file] [log] [blame]
From f39f0efddad61d3a13f0a4e9807d8c1c927963f7 Mon Sep 17 00:00:00 2001
From: Thomas Gleixner <tglx@linutronix.de>
Date: Wed, 24 Feb 2010 09:54:54 +0100
Subject: [PATCH] rwsem: Make inner lock raw
commit a359d62fa46b9c41621fec1c3b31730cfc436b40 in tip.
There is no reason to convert the lock protecting rwsems (the
ownerless variant) to a sleeping spinlock on -rt. Convert it to raw.
[PG: since tip, some instances were up'd to irqsave/restore in mainline]
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Signed-off-by: Paul Gortmaker <paul.gortmaker@windriver.com>
diff --git a/arch/powerpc/include/asm/rwsem.h b/arch/powerpc/include/asm/rwsem.h
index c2494d4..accf580 100644
--- a/arch/powerpc/include/asm/rwsem.h
+++ b/arch/powerpc/include/asm/rwsem.h
@@ -30,7 +30,7 @@ struct rw_anon_semaphore {
#define RWSEM_WAITING_BIAS (-0x00010000)
#define RWSEM_ACTIVE_READ_BIAS RWSEM_ACTIVE_BIAS
#define RWSEM_ACTIVE_WRITE_BIAS (RWSEM_WAITING_BIAS + RWSEM_ACTIVE_BIAS)
- spinlock_t wait_lock;
+ raw_spinlock_t wait_lock;
struct list_head wait_list;
#ifdef CONFIG_DEBUG_LOCK_ALLOC
struct lockdep_map dep_map;
@@ -44,7 +44,7 @@ struct rw_anon_semaphore {
#endif
#define __RWSEM_ANON_INITIALIZER(name) \
- { RWSEM_UNLOCKED_VALUE, __SPIN_LOCK_UNLOCKED((name).wait_lock), \
+ { RWSEM_UNLOCKED_VALUE, __RAW_SPIN_LOCK_UNLOCKED((name).wait_lock), \
LIST_HEAD_INIT((name).wait_list) __RWSEM_ANON_DEP_MAP_INIT(name) }
#define DECLARE_ANON_RWSEM(name) \
@@ -178,7 +178,7 @@ static inline int anon_rwsem_is_locked(struct rw_anon_semaphore *sem)
struct rw_semaphore {
/* XXX this should be able to be an atomic_t -- paulus */
signed int count;
- spinlock_t wait_lock;
+ raw_spinlock_t wait_lock;
struct list_head wait_list;
#ifdef CONFIG_DEBUG_LOCK_ALLOC
struct lockdep_map dep_map;
@@ -192,7 +192,7 @@ struct rw_semaphore {
#endif
#define __RWSEM_INITIALIZER(name) \
- { RWSEM_UNLOCKED_VALUE, __SPIN_LOCK_UNLOCKED((name).wait_lock), \
+ { RWSEM_UNLOCKED_VALUE, __RAW_SPIN_LOCK_UNLOCKED((name).wait_lock), \
LIST_HEAD_INIT((name).wait_list) __RWSEM_DEP_MAP_INIT(name) }
#define DECLARE_RWSEM(name) \
diff --git a/arch/x86/include/asm/rwsem.h b/arch/x86/include/asm/rwsem.h
index 286cba2..42f8a37 100644
--- a/arch/x86/include/asm/rwsem.h
+++ b/arch/x86/include/asm/rwsem.h
@@ -78,7 +78,7 @@ typedef signed long rwsem_count_t;
struct rw_anon_semaphore {
rwsem_count_t count;
- spinlock_t wait_lock;
+ raw_spinlock_t wait_lock;
struct list_head wait_list;
#ifdef CONFIG_DEBUG_LOCK_ALLOC
struct lockdep_map dep_map;
@@ -93,7 +93,7 @@ struct rw_anon_semaphore {
#define __RWSEM_ANON_INITIALIZER(name) \
{ \
- RWSEM_UNLOCKED_VALUE, __SPIN_LOCK_UNLOCKED((name).wait_lock), \
+ RWSEM_UNLOCKED_VALUE, __RAW_SPIN_LOCK_UNLOCKED((name).wait_lock), \
LIST_HEAD_INIT((name).wait_list) __RWSEM_DEP_MAP_INIT(name) \
}
@@ -282,7 +282,7 @@ static inline int anon_rwsem_is_locked(struct rw_anon_semaphore *sem)
struct rw_semaphore {
signed long count;
- spinlock_t wait_lock;
+ raw_spinlock_t wait_lock;
struct list_head wait_list;
#ifdef CONFIG_DEBUG_LOCK_ALLOC
struct lockdep_map dep_map;
@@ -296,7 +296,7 @@ struct rw_semaphore {
#endif
#define __RWSEM_INITIALIZER(name) \
-{ 0, __SPIN_LOCK_UNLOCKED(name.wait_lock), LIST_HEAD_INIT((name).wait_list) \
+{ 0, __RAW_SPIN_LOCK_UNLOCKED(name.wait_lock), LIST_HEAD_INIT((name).wait_list) \
__RWSEM_DEP_MAP_INIT(name) }
#define DECLARE_RWSEM(name) \
diff --git a/include/linux/rwsem-spinlock.h b/include/linux/rwsem-spinlock.h
index 17df0dc..6608521 100644
--- a/include/linux/rwsem-spinlock.h
+++ b/include/linux/rwsem-spinlock.h
@@ -33,7 +33,7 @@ struct rwsem_waiter;
*/
struct rw_anon_semaphore {
__s32 activity;
- spinlock_t wait_lock;
+ raw_spinlock_t wait_lock;
struct list_head wait_list;
#ifdef CONFIG_DEBUG_LOCK_ALLOC
struct lockdep_map dep_map;
@@ -47,7 +47,7 @@ struct rw_anon_semaphore {
#endif
#define __RWSEM_ANON_INITIALIZER(name) \
-{ 0, __SPIN_LOCK_UNLOCKED(name.wait_lock), LIST_HEAD_INIT((name).wait_list) \
+{ 0, __RAW_SPIN_LOCK_UNLOCKED(name.wait_lock), LIST_HEAD_INIT((name).wait_list) \
__RWSEM_ANON_DEP_MAP_INIT(name) }
#define DECLARE_ANON_RWSEM(name) \
@@ -89,7 +89,7 @@ extern void __downgrade_write(struct rw_anon_semaphore *sem);
*/
struct rw_semaphore {
__s32 activity;
- spinlock_t wait_lock;
+ raw_spinlock_t wait_lock;
struct list_head wait_list;
#ifdef CONFIG_DEBUG_LOCK_ALLOC
struct lockdep_map dep_map;
@@ -103,7 +103,7 @@ struct rw_semaphore {
#endif
#define __RWSEM_INITIALIZER(name) \
-{ 0, __SPIN_LOCK_UNLOCKED(name.wait_lock), LIST_HEAD_INIT((name).wait_list) \
+{ 0, __RAW_SPIN_LOCK_UNLOCKED(name.wait_lock), LIST_HEAD_INIT((name).wait_list) \
__RWSEM_DEP_MAP_INIT(name) }
#define DECLARE_RWSEM(name) \
diff --git a/lib/rwsem-spinlock.c b/lib/rwsem-spinlock.c
index dea1aea..26b7766 100644
--- a/lib/rwsem-spinlock.c
+++ b/lib/rwsem-spinlock.c
@@ -22,9 +22,9 @@ int anon_rwsem_is_locked(struct rw_anon_semaphore *sem)
int ret = 1;
unsigned long flags;
- if (spin_trylock_irqsave(&sem->wait_lock, flags)) {
+ if (raw_spin_trylock_irqsave(&sem->wait_lock, flags)) {
ret = (sem->activity != 0);
- spin_unlock_irqrestore(&sem->wait_lock, flags);
+ raw_spin_unlock_irqrestore(&sem->wait_lock, flags);
}
return ret;
}
@@ -44,7 +44,7 @@ void __init_anon_rwsem(struct rw_anon_semaphore *sem, const char *name,
lockdep_init_map(&sem->dep_map, name, key, 0);
#endif
sem->activity = 0;
- spin_lock_init(&sem->wait_lock);
+ raw_spin_lock_init(&sem->wait_lock);
INIT_LIST_HEAD(&sem->wait_list);
}
EXPORT_SYMBOL(__init_anon_rwsem);
@@ -145,12 +145,12 @@ void __sched __down_read(struct rw_anon_semaphore *sem)
struct task_struct *tsk;
unsigned long flags;
- spin_lock_irqsave(&sem->wait_lock, flags);
+ raw_spin_lock_irqsave(&sem->wait_lock, flags);
if (sem->activity >= 0 && list_empty(&sem->wait_list)) {
/* granted */
sem->activity++;
- spin_unlock_irqrestore(&sem->wait_lock, flags);
+ raw_spin_unlock_irqrestore(&sem->wait_lock, flags);
goto out;
}
@@ -165,7 +165,7 @@ void __sched __down_read(struct rw_anon_semaphore *sem)
list_add_tail(&waiter.list, &sem->wait_list);
/* we don't need to touch the semaphore struct anymore */
- spin_unlock_irqrestore(&sem->wait_lock, flags);
+ raw_spin_unlock_irqrestore(&sem->wait_lock, flags);
/* wait to be given the lock */
for (;;) {
@@ -189,7 +189,7 @@ int __down_read_trylock(struct rw_anon_semaphore *sem)
int ret = 0;
- spin_lock_irqsave(&sem->wait_lock, flags);
+ raw_spin_lock_irqsave(&sem->wait_lock, flags);
if (sem->activity >= 0 && list_empty(&sem->wait_list)) {
/* granted */
@@ -197,7 +197,7 @@ int __down_read_trylock(struct rw_anon_semaphore *sem)
ret = 1;
}
- spin_unlock_irqrestore(&sem->wait_lock, flags);
+ raw_spin_unlock_irqrestore(&sem->wait_lock, flags);
return ret;
}
@@ -212,12 +212,12 @@ void __sched __down_write_nested(struct rw_anon_semaphore *sem, int subclass)
struct task_struct *tsk;
unsigned long flags;
- spin_lock_irqsave(&sem->wait_lock, flags);
+ raw_spin_lock_irqsave(&sem->wait_lock, flags);
if (sem->activity == 0 && list_empty(&sem->wait_list)) {
/* granted */
sem->activity = -1;
- spin_unlock_irqrestore(&sem->wait_lock, flags);
+ raw_spin_unlock_irqrestore(&sem->wait_lock, flags);
goto out;
}
@@ -232,7 +232,7 @@ void __sched __down_write_nested(struct rw_anon_semaphore *sem, int subclass)
list_add_tail(&waiter.list, &sem->wait_list);
/* we don't need to touch the semaphore struct anymore */
- spin_unlock_irqrestore(&sem->wait_lock, flags);
+ raw_spin_unlock_irqrestore(&sem->wait_lock, flags);
/* wait to be given the lock */
for (;;) {
@@ -260,7 +260,7 @@ int __down_write_trylock(struct rw_anon_semaphore *sem)
unsigned long flags;
int ret = 0;
- spin_lock_irqsave(&sem->wait_lock, flags);
+ raw_spin_lock_irqsave(&sem->wait_lock, flags);
if (sem->activity == 0 && list_empty(&sem->wait_list)) {
/* granted */
@@ -268,7 +268,7 @@ int __down_write_trylock(struct rw_anon_semaphore *sem)
ret = 1;
}
- spin_unlock_irqrestore(&sem->wait_lock, flags);
+ raw_spin_unlock_irqrestore(&sem->wait_lock, flags);
return ret;
}
@@ -280,12 +280,12 @@ void __up_read(struct rw_anon_semaphore *sem)
{
unsigned long flags;
- spin_lock_irqsave(&sem->wait_lock, flags);
+ raw_spin_lock_irqsave(&sem->wait_lock, flags);
if (--sem->activity == 0 && !list_empty(&sem->wait_list))
sem = __rwsem_wake_one_writer(sem);
- spin_unlock_irqrestore(&sem->wait_lock, flags);
+ raw_spin_unlock_irqrestore(&sem->wait_lock, flags);
}
/*
@@ -295,13 +295,13 @@ void __up_write(struct rw_anon_semaphore *sem)
{
unsigned long flags;
- spin_lock_irqsave(&sem->wait_lock, flags);
+ raw_spin_lock_irqsave(&sem->wait_lock, flags);
sem->activity = 0;
if (!list_empty(&sem->wait_list))
sem = __rwsem_do_wake(sem, 1);
- spin_unlock_irqrestore(&sem->wait_lock, flags);
+ raw_spin_unlock_irqrestore(&sem->wait_lock, flags);
}
/*
@@ -312,12 +312,12 @@ void __downgrade_write(struct rw_anon_semaphore *sem)
{
unsigned long flags;
- spin_lock_irqsave(&sem->wait_lock, flags);
+ raw_spin_lock_irqsave(&sem->wait_lock, flags);
sem->activity = 1;
if (!list_empty(&sem->wait_list))
sem = __rwsem_do_wake(sem, 0);
- spin_unlock_irqrestore(&sem->wait_lock, flags);
+ raw_spin_unlock_irqrestore(&sem->wait_lock, flags);
}
diff --git a/lib/rwsem.c b/lib/rwsem.c
index 72eaba5..47f5a75 100644
--- a/lib/rwsem.c
+++ b/lib/rwsem.c
@@ -22,7 +22,7 @@ void __init_anon_rwsem(struct rw_anon_semaphore *sem, const char *name,
lockdep_init_map(&sem->dep_map, name, key, 0);
#endif
sem->count = RWSEM_UNLOCKED_VALUE;
- spin_lock_init(&sem->wait_lock);
+ raw_spin_lock_init(&sem->wait_lock);
INIT_LIST_HEAD(&sem->wait_list);
}
EXPORT_SYMBOL(__init_anon_rwsem);
@@ -155,7 +155,7 @@ rwsem_down_failed_common(struct rw_anon_semaphore *sem,
set_task_state(tsk, TASK_UNINTERRUPTIBLE);
/* set up my own style of waitqueue */
- spin_lock_irq(&sem->wait_lock);
+ raw_spin_lock_irq(&sem->wait_lock);
waiter->task = tsk;
get_task_struct(tsk);
@@ -168,7 +168,7 @@ rwsem_down_failed_common(struct rw_anon_semaphore *sem,
if (!(count & RWSEM_ACTIVE_MASK))
sem = __rwsem_do_wake(sem, 0);
- spin_unlock_irq(&sem->wait_lock);
+ raw_spin_unlock_irq(&sem->wait_lock);
/* wait to be given the lock */
for (;;) {
@@ -219,13 +219,13 @@ asmregparm struct rw_anon_semaphore *rwsem_wake(struct rw_anon_semaphore *sem)
{
unsigned long flags;
- spin_lock_irqsave(&sem->wait_lock, flags);
+ raw_spin_lock_irqsave(&sem->wait_lock, flags);
/* do nothing if list empty */
if (!list_empty(&sem->wait_list))
sem = __rwsem_do_wake(sem, 0);
- spin_unlock_irqrestore(&sem->wait_lock, flags);
+ raw_spin_unlock_irqrestore(&sem->wait_lock, flags);
return sem;
}
@@ -240,13 +240,13 @@ rwsem_downgrade_wake(struct rw_anon_semaphore *sem)
{
unsigned long flags;
- spin_lock_irqsave(&sem->wait_lock, flags);
+ raw_spin_lock_irqsave(&sem->wait_lock, flags);
/* do nothing if list empty */
if (!list_empty(&sem->wait_list))
sem = __rwsem_do_wake(sem, 1);
- spin_unlock_irqrestore(&sem->wait_lock, flags);
+ raw_spin_unlock_irqrestore(&sem->wait_lock, flags);
return sem;
}
--
1.7.4