| From 0f9bf4a4ba679e35a7c9490a05e781b0b8481ae4 Mon Sep 17 00:00:00 2001 |
| From: Thomas Gleixner <tglx@linutronix.de> |
| Date: Wed, 24 Feb 2010 09:50:22 +0100 |
| Subject: [PATCH] semaphore: Make inner lock raw |
| |
| commit d0ce166dd4c76b1eb484aa68f1cddaafdeef9238 in tip. |
| |
| There is no reason to have the spin_lock protecting the semaphore |
| being converted to a sleeping spinlock in -rt. Convert it to a |
| raw_spinlock. That also solves lockdep complaining about the |
| rt_mutex.wait_lock being not initialized. |
| |
| Signed-off-by: Thomas Gleixner <tglx@linutronix.de> |
| Signed-off-by: Paul Gortmaker <paul.gortmaker@windriver.com> |
| |
| diff --git a/include/linux/semaphore.h b/include/linux/semaphore.h |
| index aaeced9..66d2591 100644 |
| --- a/include/linux/semaphore.h |
| +++ b/include/linux/semaphore.h |
| @@ -14,14 +14,14 @@ |
| |
| /* Please don't access any members of this structure directly */ |
| struct semaphore { |
| - spinlock_t lock; |
| + raw_spinlock_t lock; |
| unsigned int count; |
| struct list_head wait_list; |
| }; |
| |
| #define __SEMAPHORE_INITIALIZER(name, n) \ |
| { \ |
| - .lock = __SPIN_LOCK_UNLOCKED((name).lock), \ |
| + .lock = __RAW_SPIN_LOCK_UNLOCKED((name).lock), \ |
| .count = n, \ |
| .wait_list = LIST_HEAD_INIT((name).wait_list), \ |
| } |
| diff --git a/kernel/semaphore.c b/kernel/semaphore.c |
| index 94a62c0..d831841 100644 |
| --- a/kernel/semaphore.c |
| +++ b/kernel/semaphore.c |
| @@ -54,12 +54,12 @@ void down(struct semaphore *sem) |
| { |
| unsigned long flags; |
| |
| - spin_lock_irqsave(&sem->lock, flags); |
| + raw_spin_lock_irqsave(&sem->lock, flags); |
| if (likely(sem->count > 0)) |
| sem->count--; |
| else |
| __down(sem); |
| - spin_unlock_irqrestore(&sem->lock, flags); |
| + raw_spin_unlock_irqrestore(&sem->lock, flags); |
| } |
| EXPORT_SYMBOL(down); |
| |
| @@ -77,12 +77,12 @@ int down_interruptible(struct semaphore *sem) |
| unsigned long flags; |
| int result = 0; |
| |
| - spin_lock_irqsave(&sem->lock, flags); |
| + raw_spin_lock_irqsave(&sem->lock, flags); |
| if (likely(sem->count > 0)) |
| sem->count--; |
| else |
| result = __down_interruptible(sem); |
| - spin_unlock_irqrestore(&sem->lock, flags); |
| + raw_spin_unlock_irqrestore(&sem->lock, flags); |
| |
| return result; |
| } |
| @@ -103,12 +103,12 @@ int down_killable(struct semaphore *sem) |
| unsigned long flags; |
| int result = 0; |
| |
| - spin_lock_irqsave(&sem->lock, flags); |
| + raw_spin_lock_irqsave(&sem->lock, flags); |
| if (likely(sem->count > 0)) |
| sem->count--; |
| else |
| result = __down_killable(sem); |
| - spin_unlock_irqrestore(&sem->lock, flags); |
| + raw_spin_unlock_irqrestore(&sem->lock, flags); |
| |
| return result; |
| } |
| @@ -132,11 +132,11 @@ int down_trylock(struct semaphore *sem) |
| unsigned long flags; |
| int count; |
| |
| - spin_lock_irqsave(&sem->lock, flags); |
| + raw_spin_lock_irqsave(&sem->lock, flags); |
| count = sem->count - 1; |
| if (likely(count >= 0)) |
| sem->count = count; |
| - spin_unlock_irqrestore(&sem->lock, flags); |
| + raw_spin_unlock_irqrestore(&sem->lock, flags); |
| |
| return (count < 0); |
| } |
| @@ -157,12 +157,12 @@ int down_timeout(struct semaphore *sem, long jiffies) |
| unsigned long flags; |
| int result = 0; |
| |
| - spin_lock_irqsave(&sem->lock, flags); |
| + raw_spin_lock_irqsave(&sem->lock, flags); |
| if (likely(sem->count > 0)) |
| sem->count--; |
| else |
| result = __down_timeout(sem, jiffies); |
| - spin_unlock_irqrestore(&sem->lock, flags); |
| + raw_spin_unlock_irqrestore(&sem->lock, flags); |
| |
| return result; |
| } |
| @@ -179,12 +179,12 @@ void up(struct semaphore *sem) |
| { |
| unsigned long flags; |
| |
| - spin_lock_irqsave(&sem->lock, flags); |
| + raw_spin_lock_irqsave(&sem->lock, flags); |
| if (likely(list_empty(&sem->wait_list))) |
| sem->count++; |
| else |
| __up(sem); |
| - spin_unlock_irqrestore(&sem->lock, flags); |
| + raw_spin_unlock_irqrestore(&sem->lock, flags); |
| } |
| EXPORT_SYMBOL(up); |
| |
| @@ -217,9 +217,9 @@ static inline int __sched __down_common(struct semaphore *sem, long state, |
| if (timeout <= 0) |
| goto timed_out; |
| __set_task_state(task, state); |
| - spin_unlock_irq(&sem->lock); |
| + raw_spin_unlock_irq(&sem->lock); |
| timeout = schedule_timeout(timeout); |
| - spin_lock_irq(&sem->lock); |
| + raw_spin_lock_irq(&sem->lock); |
| if (waiter.up) |
| return 0; |
| } |
| -- |
| 1.7.1.1 |
| |