| From stable-bounces@linux.kernel.org Fri Apr 1 23:42:31 2005 |
| Date: Fri, 1 Apr 2005 23:42:06 -0800 |
| From: Andrew Morton <akpm@osdl.org> |
| To: stable@kernel.org |
| Cc: |
| Subject: [PATCH] rwsem fix |
| |
| We should merge this backport - it's needed to prevent deadlocks when |
| dio_complete() does up_read() from IRQ context. And perhaps other places. |
| |
| From: David Howells <dhowells@redhat.com> |
| |
| [PATCH] rwsem: Make rwsems use interrupt disabling spinlocks |
| |
| The attached patch makes read/write semaphores use interrupt disabling |
| spinlocks in the slow path, thus rendering the up functions and trylock |
| functions available for use in interrupt context. This matches the |
| regular semaphore behaviour. |
| |
| I've assumed that the normal down functions must be called with interrupts |
| enabled (since they might schedule), and used the irq-disabling spinlock |
| variants that don't save the flags. |
| |
| Signed-Off-By: David Howells <dhowells@redhat.com> |
| Tested-by: Badari Pulavarty <pbadari@us.ibm.com> |
| Signed-off-by: Linus Torvalds <torvalds@osdl.org> |
| Signed-off-by: Chris Wright <chrisw@osdl.org> |
| Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de> |
| |
| diff -Nru a/lib/rwsem-spinlock.c b/lib/rwsem-spinlock.c |
| --- a/lib/rwsem-spinlock.c 2005-04-01 23:22:40 -08:00 |
| +++ b/lib/rwsem-spinlock.c 2005-04-01 23:22:40 -08:00 |
| @@ -140,12 +140,12 @@ |
| |
| rwsemtrace(sem, "Entering __down_read"); |
| |
| - spin_lock(&sem->wait_lock); |
| + spin_lock_irq(&sem->wait_lock); |
| |
| if (sem->activity >= 0 && list_empty(&sem->wait_list)) { |
| /* granted */ |
| sem->activity++; |
| - spin_unlock(&sem->wait_lock); |
| + spin_unlock_irq(&sem->wait_lock); |
| goto out; |
| } |
| |
| @@ -160,7 +160,7 @@ |
| list_add_tail(&waiter.list, &sem->wait_list); |
| |
| /* we don't need to touch the semaphore struct anymore */ |
| - spin_unlock(&sem->wait_lock); |
| + spin_unlock_irq(&sem->wait_lock); |
| |
| /* wait to be given the lock */ |
| for (;;) { |
| @@ -181,10 +181,12 @@ |
| */ |
| int fastcall __down_read_trylock(struct rw_semaphore *sem) |
| { |
| + unsigned long flags; |
| int ret = 0; |
| + |
| rwsemtrace(sem, "Entering __down_read_trylock"); |
| |
| - spin_lock(&sem->wait_lock); |
| + spin_lock_irqsave(&sem->wait_lock, flags); |
| |
| if (sem->activity >= 0 && list_empty(&sem->wait_list)) { |
| /* granted */ |
| @@ -192,7 +194,7 @@ |
| ret = 1; |
| } |
| |
| - spin_unlock(&sem->wait_lock); |
| + spin_unlock_irqrestore(&sem->wait_lock, flags); |
| |
| rwsemtrace(sem, "Leaving __down_read_trylock"); |
| return ret; |
| @@ -209,12 +211,12 @@ |
| |
| rwsemtrace(sem, "Entering __down_write"); |
| |
| - spin_lock(&sem->wait_lock); |
| + spin_lock_irq(&sem->wait_lock); |
| |
| if (sem->activity == 0 && list_empty(&sem->wait_list)) { |
| /* granted */ |
| sem->activity = -1; |
| - spin_unlock(&sem->wait_lock); |
| + spin_unlock_irq(&sem->wait_lock); |
| goto out; |
| } |
| |
| @@ -229,7 +231,7 @@ |
| list_add_tail(&waiter.list, &sem->wait_list); |
| |
| /* we don't need to touch the semaphore struct anymore */ |
| - spin_unlock(&sem->wait_lock); |
| + spin_unlock_irq(&sem->wait_lock); |
| |
| /* wait to be given the lock */ |
| for (;;) { |
| @@ -250,10 +252,12 @@ |
| */ |
| int fastcall __down_write_trylock(struct rw_semaphore *sem) |
| { |
| + unsigned long flags; |
| int ret = 0; |
| + |
| rwsemtrace(sem, "Entering __down_write_trylock"); |
| |
| - spin_lock(&sem->wait_lock); |
| + spin_lock_irqsave(&sem->wait_lock, flags); |
| |
| if (sem->activity == 0 && list_empty(&sem->wait_list)) { |
| /* granted */ |
| @@ -261,7 +265,7 @@ |
| ret = 1; |
| } |
| |
| - spin_unlock(&sem->wait_lock); |
| + spin_unlock_irqrestore(&sem->wait_lock, flags); |
| |
| rwsemtrace(sem, "Leaving __down_write_trylock"); |
| return ret; |
| @@ -272,14 +276,16 @@ |
| */ |
| void fastcall __up_read(struct rw_semaphore *sem) |
| { |
| + unsigned long flags; |
| + |
| rwsemtrace(sem, "Entering __up_read"); |
| |
| - spin_lock(&sem->wait_lock); |
| + spin_lock_irqsave(&sem->wait_lock, flags); |
| |
| if (--sem->activity == 0 && !list_empty(&sem->wait_list)) |
| sem = __rwsem_wake_one_writer(sem); |
| |
| - spin_unlock(&sem->wait_lock); |
| + spin_unlock_irqrestore(&sem->wait_lock, flags); |
| |
| rwsemtrace(sem, "Leaving __up_read"); |
| } |
| @@ -289,15 +295,17 @@ |
| */ |
| void fastcall __up_write(struct rw_semaphore *sem) |
| { |
| + unsigned long flags; |
| + |
| rwsemtrace(sem, "Entering __up_write"); |
| |
| - spin_lock(&sem->wait_lock); |
| + spin_lock_irqsave(&sem->wait_lock, flags); |
| |
| sem->activity = 0; |
| if (!list_empty(&sem->wait_list)) |
| sem = __rwsem_do_wake(sem, 1); |
| |
| - spin_unlock(&sem->wait_lock); |
| + spin_unlock_irqrestore(&sem->wait_lock, flags); |
| |
| rwsemtrace(sem, "Leaving __up_write"); |
| } |
| @@ -308,15 +316,17 @@ |
| */ |
| void fastcall __downgrade_write(struct rw_semaphore *sem) |
| { |
| + unsigned long flags; |
| + |
| rwsemtrace(sem, "Entering __downgrade_write"); |
| |
| - spin_lock(&sem->wait_lock); |
| + spin_lock_irqsave(&sem->wait_lock, flags); |
| |
| sem->activity = 1; |
| if (!list_empty(&sem->wait_list)) |
| sem = __rwsem_do_wake(sem, 0); |
| |
| - spin_unlock(&sem->wait_lock); |
| + spin_unlock_irqrestore(&sem->wait_lock, flags); |
| |
| rwsemtrace(sem, "Leaving __downgrade_write"); |
| } |
| diff -Nru a/lib/rwsem.c b/lib/rwsem.c |
| --- a/lib/rwsem.c 2005-04-01 23:22:40 -08:00 |
| +++ b/lib/rwsem.c 2005-04-01 23:22:40 -08:00 |
| @@ -150,7 +150,7 @@ |
| set_task_state(tsk, TASK_UNINTERRUPTIBLE); |
| |
| /* set up my own style of waitqueue */ |
| - spin_lock(&sem->wait_lock); |
| + spin_lock_irq(&sem->wait_lock); |
| waiter->task = tsk; |
| get_task_struct(tsk); |
| |
| @@ -163,7 +163,7 @@ |
| if (!(count & RWSEM_ACTIVE_MASK)) |
| sem = __rwsem_do_wake(sem, 0); |
| |
| - spin_unlock(&sem->wait_lock); |
| + spin_unlock_irq(&sem->wait_lock); |
| |
| /* wait to be given the lock */ |
| for (;;) { |
| @@ -219,15 +219,17 @@ |
| */ |
| struct rw_semaphore fastcall *rwsem_wake(struct rw_semaphore *sem) |
| { |
| + unsigned long flags; |
| + |
| rwsemtrace(sem, "Entering rwsem_wake"); |
| |
| - spin_lock(&sem->wait_lock); |
| + spin_lock_irqsave(&sem->wait_lock, flags); |
| |
| /* do nothing if list empty */ |
| if (!list_empty(&sem->wait_list)) |
| sem = __rwsem_do_wake(sem, 0); |
| |
| - spin_unlock(&sem->wait_lock); |
| + spin_unlock_irqrestore(&sem->wait_lock, flags); |
| |
| rwsemtrace(sem, "Leaving rwsem_wake"); |
| |
| @@ -241,15 +243,17 @@ |
| */ |
| struct rw_semaphore fastcall *rwsem_downgrade_wake(struct rw_semaphore *sem) |
| { |
| + unsigned long flags; |
| + |
| rwsemtrace(sem, "Entering rwsem_downgrade_wake"); |
| |
| - spin_lock(&sem->wait_lock); |
| + spin_lock_irqsave(&sem->wait_lock, flags); |
| |
| /* do nothing if list empty */ |
| if (!list_empty(&sem->wait_list)) |
| sem = __rwsem_do_wake(sem, 1); |
| |
| - spin_unlock(&sem->wait_lock); |
| + spin_unlock_irqrestore(&sem->wait_lock, flags); |
| |
| rwsemtrace(sem, "Leaving rwsem_downgrade_wake"); |
| return sem; |
| |