| From: Thomas Gleixner <tglx@linutronix.de> |
| Date: Fri, 18 Mar 2011 09:18:52 +0100 |
| Subject: buffer_head: Replace bh_uptodate_lock for -rt |
| |
| Wrap the bit_spin_lock calls into a separate inline and add the RT |
| replacements with a real spinlock. |
| |
| Signed-off-by: Thomas Gleixner <tglx@linutronix.de> |
| --- |
| fs/buffer.c | 21 +++++++-------------- |
| fs/ntfs/aops.c | 10 +++------- |
| include/linux/buffer_head.h | 34 ++++++++++++++++++++++++++++++++++ |
| 3 files changed, 44 insertions(+), 21 deletions(-) |
| |
| --- a/fs/buffer.c |
| +++ b/fs/buffer.c |
| @@ -300,8 +300,7 @@ static void end_buffer_async_read(struct |
| * decide that the page is now completely done. |
| */ |
| first = page_buffers(page); |
| - local_irq_save(flags); |
| - bit_spin_lock(BH_Uptodate_Lock, &first->b_state); |
| + flags = bh_uptodate_lock_irqsave(first); |
| clear_buffer_async_read(bh); |
| unlock_buffer(bh); |
| tmp = bh; |
| @@ -314,8 +313,7 @@ static void end_buffer_async_read(struct |
| } |
| tmp = tmp->b_this_page; |
| } while (tmp != bh); |
| - bit_spin_unlock(BH_Uptodate_Lock, &first->b_state); |
| - local_irq_restore(flags); |
| + bh_uptodate_unlock_irqrestore(first, flags); |
| |
| /* |
| * If none of the buffers had errors and they are all |
| @@ -327,9 +325,7 @@ static void end_buffer_async_read(struct |
| return; |
| |
| still_busy: |
| - bit_spin_unlock(BH_Uptodate_Lock, &first->b_state); |
| - local_irq_restore(flags); |
| - return; |
| + bh_uptodate_unlock_irqrestore(first, flags); |
| } |
| |
| /* |
| @@ -357,8 +353,7 @@ void end_buffer_async_write(struct buffe |
| } |
| |
| first = page_buffers(page); |
| - local_irq_save(flags); |
| - bit_spin_lock(BH_Uptodate_Lock, &first->b_state); |
| + flags = bh_uptodate_lock_irqsave(first); |
| |
| clear_buffer_async_write(bh); |
| unlock_buffer(bh); |
| @@ -370,15 +365,12 @@ void end_buffer_async_write(struct buffe |
| } |
| tmp = tmp->b_this_page; |
| } |
| - bit_spin_unlock(BH_Uptodate_Lock, &first->b_state); |
| - local_irq_restore(flags); |
| + bh_uptodate_unlock_irqrestore(first, flags); |
| end_page_writeback(page); |
| return; |
| |
| still_busy: |
| - bit_spin_unlock(BH_Uptodate_Lock, &first->b_state); |
| - local_irq_restore(flags); |
| - return; |
| + bh_uptodate_unlock_irqrestore(first, flags); |
| } |
| EXPORT_SYMBOL(end_buffer_async_write); |
| |
| @@ -3314,6 +3306,7 @@ struct buffer_head *alloc_buffer_head(gf |
| struct buffer_head *ret = kmem_cache_zalloc(bh_cachep, gfp_flags); |
| if (ret) { |
| INIT_LIST_HEAD(&ret->b_assoc_buffers); |
| + buffer_head_init_locks(ret); |
| preempt_disable(); |
| __this_cpu_inc(bh_accounting.nr); |
| recalc_bh_state(); |
| --- a/fs/ntfs/aops.c |
| +++ b/fs/ntfs/aops.c |
| @@ -107,8 +107,7 @@ static void ntfs_end_buffer_async_read(s |
| "0x%llx.", (unsigned long long)bh->b_blocknr); |
| } |
| first = page_buffers(page); |
| - local_irq_save(flags); |
| - bit_spin_lock(BH_Uptodate_Lock, &first->b_state); |
| + flags = bh_uptodate_lock_irqsave(first); |
| clear_buffer_async_read(bh); |
| unlock_buffer(bh); |
| tmp = bh; |
| @@ -123,8 +122,7 @@ static void ntfs_end_buffer_async_read(s |
| } |
| tmp = tmp->b_this_page; |
| } while (tmp != bh); |
| - bit_spin_unlock(BH_Uptodate_Lock, &first->b_state); |
| - local_irq_restore(flags); |
| + bh_uptodate_unlock_irqrestore(first, flags); |
| /* |
| * If none of the buffers had errors then we can set the page uptodate, |
| * but we first have to perform the post read mst fixups, if the |
| @@ -159,9 +157,7 @@ static void ntfs_end_buffer_async_read(s |
| unlock_page(page); |
| return; |
| still_busy: |
| - bit_spin_unlock(BH_Uptodate_Lock, &first->b_state); |
| - local_irq_restore(flags); |
| - return; |
| + bh_uptodate_unlock_irqrestore(first, flags); |
| } |
| |
| /** |
| --- a/include/linux/buffer_head.h |
| +++ b/include/linux/buffer_head.h |
| @@ -75,8 +75,42 @@ struct buffer_head { |
| struct address_space *b_assoc_map; /* mapping this buffer is |
| associated with */ |
| atomic_t b_count; /* users using this buffer_head */ |
| +#ifdef CONFIG_PREEMPT_RT_BASE |
| + spinlock_t b_uptodate_lock; |
| +#endif |
| }; |
| |
| +static inline unsigned long bh_uptodate_lock_irqsave(struct buffer_head *bh) |
| +{ |
| + unsigned long flags; |
| + |
| +#ifndef CONFIG_PREEMPT_RT_BASE |
| + local_irq_save(flags); |
| + bit_spin_lock(BH_Uptodate_Lock, &bh->b_state); |
| +#else |
| + spin_lock_irqsave(&bh->b_uptodate_lock, flags); |
| +#endif |
| + return flags; |
| +} |
| + |
| +static inline void |
| +bh_uptodate_unlock_irqrestore(struct buffer_head *bh, unsigned long flags) |
| +{ |
| +#ifndef CONFIG_PREEMPT_RT_BASE |
| + bit_spin_unlock(BH_Uptodate_Lock, &bh->b_state); |
| + local_irq_restore(flags); |
| +#else |
| + spin_unlock_irqrestore(&bh->b_uptodate_lock, flags); |
| +#endif |
| +} |
| + |
| +static inline void buffer_head_init_locks(struct buffer_head *bh) |
| +{ |
| +#ifdef CONFIG_PREEMPT_RT_BASE |
| + spin_lock_init(&bh->b_uptodate_lock); |
| +#endif |
| +} |
| + |
| /* |
| * macro tricks to expand the set_buffer_foo(), clear_buffer_foo() |
| * and buffer_foo() functions. |