| From a87b56c9af8f815fd739221b9f29c23812f3b3e3 Mon Sep 17 00:00:00 2001 |
| From: Thomas Gleixner <tglx@linutronix.de> |
| Date: Sun, 26 Jul 2009 19:39:56 +0200 |
| Subject: [PATCH] rt: Add the preempt-rt lock replacement APIs |
| |
| commit 2f0c84571b89108c72ebdc3dcaccbe99f754c63a in tip. |
| |
| Map spinlocks, rwlocks, rw_semaphores and semaphores to the rt_mutex |
| based locking functions for preempt-rt. |
| |
| [PG: drop include/linux/semaphore.h part; it gets tossed for rt >=33] |
| |
| Signed-off-by: Thomas Gleixner <tglx@linutronix.de> |
| Signed-off-by: Paul Gortmaker <paul.gortmaker@windriver.com> |
| |
| diff --git a/arch/powerpc/include/asm/rwsem.h b/arch/powerpc/include/asm/rwsem.h |
| index 564c373..c2494d4 100644 |
| --- a/arch/powerpc/include/asm/rwsem.h |
| +++ b/arch/powerpc/include/asm/rwsem.h |
| @@ -173,6 +173,8 @@ static inline int anon_rwsem_is_locked(struct rw_anon_semaphore *sem) |
| return (sem->count != 0); |
| } |
| |
| +#ifndef CONFIG_PREEMPT_RT |
| + |
| struct rw_semaphore { |
| /* XXX this should be able to be an atomic_t -- paulus */ |
| signed int count; |
| @@ -213,6 +215,7 @@ static inline int rwsem_is_locked(struct rw_semaphore *sem) |
| { |
| return (sem->count != 0); |
| } |
| +#endif |
| |
| #endif /* __KERNEL__ */ |
| #endif /* _ASM_POWERPC_RWSEM_H */ |
| diff --git a/arch/x86/include/asm/rwsem.h b/arch/x86/include/asm/rwsem.h |
| index 0d686f7..286cba2 100644 |
| --- a/arch/x86/include/asm/rwsem.h |
| +++ b/arch/x86/include/asm/rwsem.h |
| @@ -278,6 +278,8 @@ static inline int anon_rwsem_is_locked(struct rw_anon_semaphore *sem) |
| return (sem->count != 0); |
| } |
| |
| +#ifndef CONFIG_PREEMPT_RT |
| + |
| struct rw_semaphore { |
| signed long count; |
| spinlock_t wait_lock; |
| @@ -318,6 +320,7 @@ static inline int rwsem_is_locked(struct rw_semaphore *sem) |
| { |
| return (sem->count != 0); |
| } |
| +#endif |
| |
| #endif /* __KERNEL__ */ |
| #endif /* _ASM_X86_RWSEM_H */ |
| diff --git a/include/linux/mutex.h b/include/linux/mutex.h |
| index 878cab4..f98509b 100644 |
| --- a/include/linux/mutex.h |
| +++ b/include/linux/mutex.h |
| @@ -12,11 +12,85 @@ |
| |
| #include <linux/list.h> |
| #include <linux/spinlock_types.h> |
| +#include <linux/rt_lock.h> |
| #include <linux/linkage.h> |
| #include <linux/lockdep.h> |
| |
| #include <asm/atomic.h> |
| |
| +#ifdef CONFIG_DEBUG_LOCK_ALLOC |
| +# define __DEP_MAP_MUTEX_INITIALIZER(lockname) \ |
| + , .dep_map = { .name = #lockname } |
| +#else |
| +# define __DEP_MAP_MUTEX_INITIALIZER(lockname) |
| +#endif |
| + |
| +#ifdef CONFIG_PREEMPT_RT |
| + |
| +#include <linux/rtmutex.h> |
| + |
| +struct mutex { |
| + struct rt_mutex lock; |
| +#ifdef CONFIG_DEBUG_LOCK_ALLOC |
| + struct lockdep_map dep_map; |
| +#endif |
| +}; |
| + |
| + |
| +#define __MUTEX_INITIALIZER(mutexname) \ |
| + { \ |
| + .lock = __RT_MUTEX_INITIALIZER(mutexname.lock) \ |
| + __DEP_MAP_MUTEX_INITIALIZER(mutexname) \ |
| + } |
| + |
| +#define DEFINE_MUTEX(mutexname) \ |
| + struct mutex mutexname = __MUTEX_INITIALIZER(mutexname) |
| + |
| +extern void |
| +__mutex_init(struct mutex *lock, char *name, struct lock_class_key *key); |
| + |
| +extern void __lockfunc _mutex_lock(struct mutex *lock); |
| +extern int __lockfunc _mutex_lock_interruptible(struct mutex *lock); |
| +extern int __lockfunc _mutex_lock_killable(struct mutex *lock); |
| +extern void __lockfunc _mutex_lock_nested(struct mutex *lock, int subclass); |
| +extern int __lockfunc |
| +_mutex_lock_interruptible_nested(struct mutex *lock, int subclass); |
| +extern int __lockfunc |
| +_mutex_lock_killable_nested(struct mutex *lock, int subclass); |
| +extern int __lockfunc _mutex_trylock(struct mutex *lock); |
| +extern void __lockfunc _mutex_unlock(struct mutex *lock); |
| + |
| +#define mutex_is_locked(l) rt_mutex_is_locked(&(l)->lock) |
| +#define mutex_lock(l) _mutex_lock(l) |
| +#define mutex_lock_interruptible(l) _mutex_lock_interruptible(l) |
| +#define mutex_lock_killable(l) _mutex_lock_killable(l) |
| +#define mutex_trylock(l) _mutex_trylock(l) |
| +#define mutex_unlock(l) _mutex_unlock(l) |
| +#define mutex_destroy(l) rt_mutex_destroy(&(l)->lock) |
| + |
| +#ifdef CONFIG_DEBUG_LOCK_ALLOC |
| +# define mutex_lock_nested(l, s) _mutex_lock_nested(l, s) |
| +# define mutex_lock_interruptible_nested(l, s) \ |
| + _mutex_lock_interruptible_nested(l, s) |
| +# define mutex_lock_killable_nested(l, s) \ |
| + _mutex_lock_killable_nested(l, s) |
| +#else |
| +# define mutex_lock_nested(l, s) _mutex_lock(l) |
| +# define mutex_lock_interruptible_nested(l, s) \ |
| + _mutex_lock_interruptible(l) |
| +# define mutex_lock_killable_nested(l, s) \ |
| + _mutex_lock_killable(l) |
| +#endif |
| + |
| +# define mutex_init(mutex) \ |
| +do { \ |
| + static struct lock_class_key __key; \ |
| + \ |
| + __mutex_init((mutex), #mutex, &__key); \ |
| +} while (0) |
| + |
| +#else /* PREEMPT_RT */ |
| + |
| /* |
| * Simple, straightforward mutexes with strict semantics: |
| * |
| @@ -87,13 +161,6 @@ do { \ |
| # define mutex_destroy(mutex) do { } while (0) |
| #endif |
| |
| -#ifdef CONFIG_DEBUG_LOCK_ALLOC |
| -# define __DEP_MAP_MUTEX_INITIALIZER(lockname) \ |
| - , .dep_map = { .name = #lockname } |
| -#else |
| -# define __DEP_MAP_MUTEX_INITIALIZER(lockname) |
| -#endif |
| - |
| #define __MUTEX_INITIALIZER(lockname) \ |
| { .count = ATOMIC_INIT(1) \ |
| , .wait_lock = __SPIN_LOCK_UNLOCKED(lockname.wait_lock) \ |
| @@ -150,6 +217,8 @@ extern int __must_check mutex_lock_killable(struct mutex *lock); |
| */ |
| extern int mutex_trylock(struct mutex *lock); |
| extern void mutex_unlock(struct mutex *lock); |
| +#endif /* !PREEMPT_RT */ |
| + |
| extern int atomic_dec_and_mutex_lock(atomic_t *cnt, struct mutex *lock); |
| |
| #endif |
| diff --git a/include/linux/plist.h b/include/linux/plist.h |
| index 6898985..2d7d3a5 100644 |
| --- a/include/linux/plist.h |
| +++ b/include/linux/plist.h |
| @@ -75,14 +75,16 @@ |
| |
| #include <linux/kernel.h> |
| #include <linux/list.h> |
| -#include <linux/spinlock_types.h> |
| + |
| +struct spinlock; |
| +struct raw_spinlock; |
| |
| struct plist_head { |
| struct list_head prio_list; |
| struct list_head node_list; |
| #ifdef CONFIG_DEBUG_PI_LIST |
| - raw_spinlock_t *rawlock; |
| - spinlock_t *spinlock; |
| + struct raw_spinlock *rawlock; |
| + struct spinlock *spinlock; |
| #endif |
| }; |
| |
| @@ -142,7 +144,7 @@ struct plist_node { |
| * @lock: spinlock protecting the list (debugging) |
| */ |
| static inline void |
| -plist_head_init(struct plist_head *head, spinlock_t *lock) |
| +plist_head_init(struct plist_head *head, struct spinlock *lock) |
| { |
| INIT_LIST_HEAD(&head->prio_list); |
| INIT_LIST_HEAD(&head->node_list); |
| @@ -158,7 +160,7 @@ plist_head_init(struct plist_head *head, spinlock_t *lock) |
| * @lock: raw_spinlock protecting the list (debugging) |
| */ |
| static inline void |
| -plist_head_init_raw(struct plist_head *head, raw_spinlock_t *lock) |
| +plist_head_init_raw(struct plist_head *head, struct raw_spinlock *lock) |
| { |
| INIT_LIST_HEAD(&head->prio_list); |
| INIT_LIST_HEAD(&head->node_list); |
| diff --git a/include/linux/rt_lock.h b/include/linux/rt_lock.h |
| new file mode 100644 |
| index 0000000..07d98f5 |
| --- /dev/null |
| +++ b/include/linux/rt_lock.h |
| @@ -0,0 +1,183 @@ |
| +#ifndef __LINUX_RT_LOCK_H |
| +#define __LINUX_RT_LOCK_H |
| + |
| +/* |
| + * Real-Time Preemption Support |
| + * |
| + * started by Ingo Molnar: |
| + * |
| + * Copyright (C) 2004, 2005 Red Hat, Inc., Ingo Molnar <mingo@redhat.com> |
| + * |
| + * This file contains the main data structure definitions. |
| + */ |
| +#include <linux/rtmutex.h> |
| +#include <asm/atomic.h> |
| +#include <linux/spinlock_types.h> |
| + |
| +#ifdef CONFIG_PREEMPT_RT |
| + |
| +static inline int preempt_rt(void) { return 1; } |
| + |
| +/* |
| + * spinlocks - an RT mutex plus lock-break field: |
| + */ |
| +typedef struct spinlock { |
| + struct rt_mutex lock; |
| + unsigned int break_lock; |
| +#ifdef CONFIG_DEBUG_LOCK_ALLOC |
| + struct lockdep_map dep_map; |
| +#endif |
| +} spinlock_t; |
| + |
| +#ifdef CONFIG_DEBUG_RT_MUTEXES |
| +# define __RT_SPIN_INITIALIZER(name) \ |
| + { \ |
| + .wait_lock = __RAW_SPIN_LOCK_UNLOCKED(name), \ |
| + .save_state = 1, \ |
| + .file = __FILE__, \ |
| + .line = __LINE__ , \ |
| + } |
| +#else |
| +# define __RT_SPIN_INITIALIZER(name) \ |
| + { .wait_lock = __RAW_SPIN_LOCK_UNLOCKED(name) } |
| +#endif |
| + |
| +#define __SPIN_LOCK_UNLOCKED(name) \ |
| + { .lock = __RT_SPIN_INITIALIZER(name), \ |
| + SPIN_DEP_MAP_INIT(name) } |
| + |
| +#define SPIN_LOCK_UNLOCKED __SPIN_LOCK_UNLOCKED(spin_old_style) |
| + |
| +#define __DEFINE_SPINLOCK(name) \ |
| + spinlock_t name = __SPIN_LOCK_UNLOCKED(name) |
| + |
| +#define DEFINE_SPINLOCK(name) \ |
| + spinlock_t name __cacheline_aligned_in_smp = __SPIN_LOCK_UNLOCKED(name) |
| + |
| +extern void |
| +__rt_spin_lock_init(spinlock_t *lock, char *name, struct lock_class_key *key); |
| + |
| +#define spin_lock_init(lock) \ |
| +do { \ |
| + static struct lock_class_key __key; \ |
| + \ |
| + __rt_spin_lock_init(lock, #lock, &__key); \ |
| +} while (0) |
| + |
| +extern void __lockfunc rt_spin_lock(spinlock_t *lock); |
| +extern void __lockfunc rt_spin_lock_nested(spinlock_t *lock, int subclass); |
| +extern void __lockfunc rt_spin_unlock(spinlock_t *lock); |
| +extern void __lockfunc rt_spin_unlock_wait(spinlock_t *lock); |
| +extern int __lockfunc |
| +rt_spin_trylock_irqsave(spinlock_t *lock, unsigned long *flags); |
| +extern int __lockfunc rt_spin_trylock(spinlock_t *lock); |
| +extern int atomic_dec_and_spin_lock(atomic_t *atomic, spinlock_t *lock); |
| + |
| +/* |
| + * lockdep-less calls, for derived types like rwlock: |
| + * (for trylock they can use rt_mutex_trylock() directly. |
| + */ |
| +extern void __lockfunc __rt_spin_lock(struct rt_mutex *lock); |
| +extern void __lockfunc __rt_spin_unlock(struct rt_mutex *lock); |
| + |
| +/* |
| + * rwlocks - an RW semaphore plus lock-break field: |
| + */ |
| +typedef struct { |
| + struct rt_mutex lock; |
| + int read_depth; |
| + unsigned int break_lock; |
| +#ifdef CONFIG_DEBUG_LOCK_ALLOC |
| + struct lockdep_map dep_map; |
| +#endif |
| +} rwlock_t; |
| + |
| +#ifdef CONFIG_DEBUG_LOCK_ALLOC |
| +# define RW_DEP_MAP_INIT(lockname) .dep_map = { .name = #lockname } |
| +#else |
| +# define RW_DEP_MAP_INIT(lockname) |
| +#endif |
| + |
| +#define __RW_LOCK_UNLOCKED(name) \ |
| + { .lock = __RT_SPIN_INITIALIZER(name), \ |
| + RW_DEP_MAP_INIT(name) } |
| + |
| +#define RW_LOCK_UNLOCKED __RW_LOCK_UNLOCKED(rw_old_style) |
| + |
| +#define DEFINE_RWLOCK(name) \ |
| + rwlock_t name __cacheline_aligned_in_smp = __RW_LOCK_UNLOCKED(name) |
| + |
| +extern void __lockfunc rt_write_lock(rwlock_t *rwlock); |
| +extern void __lockfunc rt_read_lock(rwlock_t *rwlock); |
| +extern int __lockfunc rt_write_trylock(rwlock_t *rwlock); |
| +extern int __lockfunc rt_write_trylock_irqsave(rwlock_t *trylock, |
| + unsigned long *flags); |
| +extern int __lockfunc rt_read_trylock(rwlock_t *rwlock); |
| +extern void __lockfunc rt_write_unlock(rwlock_t *rwlock); |
| +extern void __lockfunc rt_read_unlock(rwlock_t *rwlock); |
| +extern unsigned long __lockfunc rt_write_lock_irqsave(rwlock_t *rwlock); |
| +extern unsigned long __lockfunc rt_read_lock_irqsave(rwlock_t *rwlock); |
| +extern void |
| +__rt_rwlock_init(rwlock_t *rwlock, char *name, struct lock_class_key *key); |
| + |
| +#define rwlock_init(rwl) \ |
| +do { \ |
| + static struct lock_class_key __key; \ |
| + \ |
| + __rt_rwlock_init(rwl, #rwl, &__key); \ |
| +} while (0) |
| + |
| +/* |
| + * RW-semaphores are a spinlock plus a reader-depth count. |
| + * |
| + * Note that the semantics are different from the usual |
| + * Linux rw-sems, in PREEMPT_RT mode we do not allow |
| + * multiple readers to hold the lock at once, we only allow |
| + * a read-lock owner to read-lock recursively. This is |
| + * better for latency, makes the implementation inherently |
| + * fair and makes it simpler as well: |
| + */ |
| +struct rw_semaphore { |
| + struct rt_mutex lock; |
| + int read_depth; |
| +#ifdef CONFIG_DEBUG_LOCK_ALLOC |
| + struct lockdep_map dep_map; |
| +#endif |
| +}; |
| + |
| +#define __RWSEM_INITIALIZER(name) \ |
| + { .lock = __RT_MUTEX_INITIALIZER(name.lock), \ |
| + RW_DEP_MAP_INIT(name) } |
| + |
| +#define DECLARE_RWSEM(lockname) \ |
| + struct rw_semaphore lockname = __RWSEM_INITIALIZER(lockname) |
| + |
| +extern void __rt_rwsem_init(struct rw_semaphore *rwsem, char *name, |
| + struct lock_class_key *key); |
| + |
| +# define rt_init_rwsem(sem) \ |
| +do { \ |
| + static struct lock_class_key __key; \ |
| + \ |
| + __rt_rwsem_init((sem), #sem, &__key); \ |
| +} while (0) |
| + |
| +extern void rt_down_write(struct rw_semaphore *rwsem); |
| +extern void |
| +rt_down_read_nested(struct rw_semaphore *rwsem, int subclass); |
| +extern void |
| +rt_down_write_nested(struct rw_semaphore *rwsem, int subclass); |
| +extern void rt_down_read(struct rw_semaphore *rwsem); |
| +extern int rt_down_write_trylock(struct rw_semaphore *rwsem); |
| +extern int rt_down_read_trylock(struct rw_semaphore *rwsem); |
| +extern void rt_up_read(struct rw_semaphore *rwsem); |
| +extern void rt_up_write(struct rw_semaphore *rwsem); |
| +extern void rt_downgrade_write(struct rw_semaphore *rwsem); |
| + |
| +#else |
| + |
| +static inline int preempt_rt(void) { return 0; } |
| + |
| +#endif /* CONFIG_PREEMPT_RT */ |
| + |
| +#endif |
| diff --git a/include/linux/rwlock.h b/include/linux/rwlock.h |
| index bc2994e..4e804db 100644 |
| --- a/include/linux/rwlock.h |
| +++ b/include/linux/rwlock.h |
| @@ -5,6 +5,60 @@ |
| # error "please don't include this file directly" |
| #endif |
| |
| +#ifdef CONFIG_PREEMPT_RT |
| + |
| +#define read_trylock(lock) __cond_lock(lock, rt_read_trylock(lock)) |
| +#define write_trylock(lock) __cond_lock(lock, rt_write_trylock(lock)) |
| + |
| +#define write_trylock_irqsave(lock, flags) \ |
| + __cond_lock(lock, rt_write_trylock_irqsave(lock, &flags)) |
| + |
| +#define write_lock(lock) rt_write_lock(lock) |
| +#define read_lock(lock) rt_read_lock(lock) |
| + |
| +#define read_lock_irqsave(lock, flags) \ |
| + do { \ |
| + typecheck(unsigned long, flags); \ |
| + flags = rt_read_lock_irqsave(lock); \ |
| + } while (0) |
| + |
| +#define write_lock_irqsave(lock, flags) \ |
| + do { \ |
| + typecheck(unsigned long, flags); \ |
| + flags = rt_write_lock_irqsave(lock); \ |
| + } while (0) |
| + |
| +#define read_lock_irq(lock) rt_read_lock(lock) |
| +#define read_lock_bh(lock) rt_read_lock(lock) |
| + |
| +#define write_lock_irq(lock) rt_write_lock(lock) |
| +#define write_lock_bh(lock) rt_write_lock(lock) |
| + |
| +#define read_unlock(lock) rt_read_unlock(lock) |
| +#define write_unlock(lock) rt_write_unlock(lock) |
| +#define read_unlock_irq(lock) rt_read_unlock(lock) |
| +#define write_unlock_irq(lock) rt_write_unlock(lock) |
| + |
| +#define read_unlock_irqrestore(lock, flags) \ |
| + do { \ |
| + typecheck(unsigned long, flags); \ |
| + (void) flags; \ |
| + rt_read_unlock(lock); \ |
| + } while (0) |
| + |
| +#define read_unlock_bh(lock) rt_read_unlock(lock) |
| + |
| +#define write_unlock_irqrestore(lock, flags) \ |
| + do { \ |
| + typecheck(unsigned long, flags); \ |
| + (void) flags; \ |
| + rt_write_unlock(lock); \ |
| + } while (0) |
| + |
| +#define write_unlock_bh(lock) rt_write_unlock(lock) |
| + |
| +#else |
| + |
| /* |
| * rwlock related methods |
| * |
| @@ -121,5 +175,6 @@ do { \ |
| write_trylock(lock) ? \ |
| 1 : ({ local_irq_restore(flags); 0; }); \ |
| }) |
| +#endif |
| |
| #endif /* __LINUX_RWLOCK_H */ |
| diff --git a/include/linux/rwsem-spinlock.h b/include/linux/rwsem-spinlock.h |
| index 38a8f95..17df0dc 100644 |
| --- a/include/linux/rwsem-spinlock.h |
| +++ b/include/linux/rwsem-spinlock.h |
| @@ -71,12 +71,9 @@ extern int __down_write_trylock(struct rw_anon_semaphore *sem); |
| extern void __up_read(struct rw_anon_semaphore *sem); |
| extern void __up_write(struct rw_anon_semaphore *sem); |
| extern void __downgrade_write(struct rw_anon_semaphore *sem); |
| + extern int anon_rwsem_is_locked(struct rw_anon_semaphore *sem); |
| |
| -static inline int anon_rwsem_is_locked(struct rw_anon_semaphore *sem) |
| -{ |
| - return (sem->activity != 0); |
| -} |
| - |
| +#ifndef CONFIG_PREEMPT_RT |
| /* |
| * Non preempt-rt implementation of rw_semaphore. Same as above, but |
| * restricted vs. ownership. i.e. ownerless locked state and non owner |
| @@ -125,7 +122,11 @@ do { \ |
| __init_rwsem((sem), #sem, &__key); \ |
| } while (0) |
| |
| -extern int rwsem_is_locked(struct rw_semaphore *sem); |
| +static inline int rwsem_is_locked(struct rw_semaphore *sem) |
| +{ |
| + return anon_rwsem_is_locked((struct rw_anon_semaphore *)sem); |
| +} |
| +#endif /* !PREEMPT_RT */ |
| |
| #endif /* __KERNEL__ */ |
| #endif /* _LINUX_RWSEM_SPINLOCK_H */ |
| diff --git a/include/linux/rwsem.h b/include/linux/rwsem.h |
| index 70e2f4b..e516c81 100644 |
| --- a/include/linux/rwsem.h |
| +++ b/include/linux/rwsem.h |
| @@ -11,6 +11,7 @@ |
| |
| #include <linux/types.h> |
| #include <linux/kernel.h> |
| +#include <linux/rt_lock.h> |
| #include <asm/system.h> |
| #include <asm/atomic.h> |
| |
| @@ -89,6 +90,59 @@ extern void anon_up_read_non_owner(struct rw_anon_semaphore *sem); |
| # define anon_up_read_non_owner(sem) anon_up_read(sem) |
| #endif |
| |
| +#ifdef CONFIG_PREEMPT_RT |
| + |
| +#include <linux/rt_lock.h> |
| + |
| +#define init_rwsem(sem) rt_init_rwsem(sem) |
| +#define rwsem_is_locked(s) rt_mutex_is_locked(&(s)->lock) |
| + |
| +static inline void down_read(struct rw_semaphore *sem) |
| +{ |
| + rt_down_read(sem); |
| +} |
| + |
| +static inline int down_read_trylock(struct rw_semaphore *sem) |
| +{ |
| + return rt_down_read_trylock(sem); |
| +} |
| + |
| +static inline void down_write(struct rw_semaphore *sem) |
| +{ |
| + rt_down_write(sem); |
| +} |
| + |
| +static inline int down_write_trylock(struct rw_semaphore *sem) |
| +{ |
| + return rt_down_write_trylock(sem); |
| +} |
| + |
| +static inline void up_read(struct rw_semaphore *sem) |
| +{ |
| + rt_up_read(sem); |
| +} |
| + |
| +static inline void up_write(struct rw_semaphore *sem) |
| +{ |
| + rt_up_write(sem); |
| +} |
| + |
| +static inline void downgrade_write(struct rw_semaphore *sem) |
| +{ |
| + rt_downgrade_write(sem); |
| +} |
| + |
| +static inline void down_read_nested(struct rw_semaphore *sem, int subclass) |
| +{ |
| + return rt_down_read_nested(sem, subclass); |
| +} |
| + |
| +static inline void down_write_nested(struct rw_semaphore *sem, int subclass) |
| +{ |
| + rt_down_write_nested(sem, subclass); |
| +} |
| + |
| +#else |
| /* |
| * Non preempt-rt implementations |
| */ |
| @@ -136,5 +190,6 @@ static inline void down_write_nested(struct rw_semaphore *sem, int subclass) |
| { |
| anon_down_write_nested((struct rw_anon_semaphore *)sem, subclass); |
| } |
| +#endif |
| |
| #endif /* _LINUX_RWSEM_H */ |
| diff --git a/include/linux/spinlock.h b/include/linux/spinlock.h |
| index 89fac6a..f4a9e9d 100644 |
| --- a/include/linux/spinlock.h |
| +++ b/include/linux/spinlock.h |
| @@ -58,23 +58,6 @@ |
| #include <asm/system.h> |
| |
| /* |
| - * Must define these before including other files, inline functions need them |
| - */ |
| -#define LOCK_SECTION_NAME ".text.lock."KBUILD_BASENAME |
| - |
| -#define LOCK_SECTION_START(extra) \ |
| - ".subsection 1\n\t" \ |
| - extra \ |
| - ".ifndef " LOCK_SECTION_NAME "\n\t" \ |
| - LOCK_SECTION_NAME ":\n\t" \ |
| - ".endif\n" |
| - |
| -#define LOCK_SECTION_END \ |
| - ".previous\n\t" |
| - |
| -#define __lockfunc __attribute__((section(".spinlock.text"))) |
| - |
| -/* |
| * Pull the arch_spinlock_t and arch_rwlock_t definitions: |
| */ |
| #include <linux/spinlock_types.h> |
| @@ -264,6 +247,98 @@ static inline void do_raw_spin_unlock(raw_spinlock_t *lock) __releases(lock) |
| # include <linux/spinlock_api_up.h> |
| #endif |
| |
| +#ifdef CONFIG_PREEMPT_RT |
| + |
| +#include <linux/rt_lock.h> |
| + |
| +#define spin_lock(lock) rt_spin_lock(lock) |
| +#define spin_lock_bh(lock) rt_spin_lock(lock) |
| + |
| +#define spin_trylock(lock) __cond_lock(lock, rt_spin_trylock(lock)) |
| + |
| +#ifdef CONFIG_LOCKDEP |
| +# define spin_lock_nested(lock, subclass) \ |
| + rt_spin_lock_nested(lock, subclass) |
| + |
| +# define spin_lock_irqsave_nested(lock, flags, subclass) \ |
| +do { \ |
| + typecheck(unsigned long, flags); \ |
| + flags = 0; \ |
| + rt_spin_lock_nested(lock, subclass); \ |
| +} while (0) |
| +#else |
| +# define spin_lock_nested(lock, subclass) \ |
| + rt_spin_lock(lock) |
| + |
| +# define spin_lock_irqsave_nested(lock, flags, subclass) \ |
| +do { \ |
| + typecheck(unsigned long, flags); \ |
| + flags = 0; \ |
| + rt_spin_lock(lock); \ |
| +} while (0) |
| +#endif |
| + |
| +#define spin_lock_irq(lock) rt_spin_lock(lock) |
| + |
| +#define spin_lock_irqsave(lock, flags) \ |
| +do { \ |
| + typecheck(unsigned long, flags); \ |
| + flags = 0; \ |
| + rt_spin_lock(lock); \ |
| +} while (0) |
| + |
| +/* FIXME: we need rt_spin_lock_nested */ |
| +#define spin_lock_nest_lock(lock, nest_lock) spin_lock_nested(lock, 0) |
| + |
| +#define spin_unlock(lock) rt_spin_unlock(lock) |
| +#define spin_unlock_bh(lock) rt_spin_unlock(lock) |
| +#define spin_unlock_irq(lock) rt_spin_unlock(lock) |
| + |
| +#define spin_unlock_irqrestore(lock, flags) \ |
| +do { \ |
| + typecheck(unsigned long, flags); \ |
| + (void) flags; \ |
| + rt_spin_unlock(lock); \ |
| +} while (0) |
| + |
| +#define spin_trylock_bh(lock) __cond_lock(lock, rt_spin_trylock(lock)) |
| +#define spin_trylock_irq(lock) __cond_lock(lock, rt_spin_trylock(lock)) |
| + |
| +#define spin_trylock_irqsave(lock, flags) \ |
| +({ \ |
| + typecheck(unsigned long, flags); \ |
| + flags = 0; \ |
| + __cond_lock(lock, rt_spin_trylock(lock)); \ |
| +}) |
| + |
| +#define spin_unlock_wait(lock) rt_spin_unlock_wait(lock) |
| + |
| +#ifdef CONFIG_GENERIC_LOCKBREAK |
| +# define spin_is_contended(lock) ((lock)->break_lock) |
| +#else |
| +# define spin_is_contended(lock) (((void)(lock), 0)) |
| +#endif |
| + |
| +static inline int spin_can_locked(spinlock_t *lock) |
| +{ |
| + return !rt_mutex_is_locked(&lock->lock); |
| +} |
| + |
| +static inline int spin_is_locked(spinlock_t *lock) |
| +{ |
| + return rt_mutex_is_locked(&lock->lock); |
| +} |
| + |
| +static inline void assert_spin_locked(spinlock_t *lock) |
| +{ |
| + BUG_ON(!spin_is_locked(lock)); |
| +} |
| + |
| +#define atomic_dec_and_lock(atomic, lock) \ |
| + atomic_dec_and_spin_lock(atomic, lock) |
| + |
| +#else |
| + |
| /* |
| * Map the spin_lock functions to the raw variants for PREEMPT_RT=n |
| */ |
| @@ -396,4 +471,6 @@ extern int _atomic_dec_and_lock(atomic_t *atomic, spinlock_t *lock); |
| #define atomic_dec_and_lock(atomic, lock) \ |
| __cond_lock(lock, _atomic_dec_and_lock(atomic, lock)) |
| |
| +#endif /* !PREEMPT_RT */ |
| + |
| #endif /* __LINUX_SPINLOCK_H */ |
| diff --git a/include/linux/spinlock_types.h b/include/linux/spinlock_types.h |
| index aadd590..b875516 100644 |
| --- a/include/linux/spinlock_types.h |
| +++ b/include/linux/spinlock_types.h |
| @@ -9,6 +9,23 @@ |
| * Released under the General Public License (GPL). |
| */ |
| |
| +/* |
| + * Must define these before including other files, inline functions need them |
| + */ |
| +#define LOCK_SECTION_NAME ".text.lock."KBUILD_BASENAME |
| + |
| +#define LOCK_SECTION_START(extra) \ |
| + ".subsection 1\n\t" \ |
| + extra \ |
| + ".ifndef " LOCK_SECTION_NAME "\n\t" \ |
| + LOCK_SECTION_NAME ":\n\t" \ |
| + ".endif\n" |
| + |
| +#define LOCK_SECTION_END \ |
| + ".previous\n\t" |
| + |
| +#define __lockfunc __attribute__((section(".spinlock.text"))) |
| + |
| #if defined(CONFIG_SMP) |
| # include <asm/spinlock_types.h> |
| #else |
| @@ -61,6 +78,8 @@ typedef struct raw_spinlock { |
| |
| #define DEFINE_RAW_SPINLOCK(x) raw_spinlock_t x = __RAW_SPIN_LOCK_UNLOCKED(x) |
| |
| +#ifndef CONFIG_PREEMPT_RT |
| + |
| typedef struct spinlock { |
| union { |
| struct raw_spinlock rlock; |
| @@ -94,4 +113,6 @@ typedef struct spinlock { |
| |
| #include <linux/rwlock_types.h> |
| |
| +#endif /* !PREEMPT_RT */ |
| + |
| #endif /* __LINUX_SPINLOCK_TYPES_H */ |
| diff --git a/kernel/Makefile b/kernel/Makefile |
| index a987aa1..74491fc 100644 |
| --- a/kernel/Makefile |
| +++ b/kernel/Makefile |
| @@ -7,7 +7,7 @@ obj-y = sched.o fork.o exec_domain.o panic.o printk.o \ |
| sysctl.o sysctl_binary.o capability.o ptrace.o timer.o user.o \ |
| signal.o sys.o kmod.o workqueue.o pid.o \ |
| rcupdate.o extable.o params.o posix-timers.o \ |
| - kthread.o wait.o kfifo.o sys_ni.o posix-cpu-timers.o mutex.o \ |
| + kthread.o wait.o kfifo.o sys_ni.o posix-cpu-timers.o \ |
| hrtimer.o rwsem.o nsproxy.o srcu.o semaphore.o \ |
| notifier.o ksysfs.o pm_qos_params.o sched_clock.o cred.o \ |
| async.o range.o |
| @@ -30,7 +30,10 @@ obj-$(CONFIG_PROFILING) += profile.o |
| obj-$(CONFIG_SYSCTL_SYSCALL_CHECK) += sysctl_check.o |
| obj-$(CONFIG_STACKTRACE) += stacktrace.o |
| obj-y += time/ |
| +ifneq ($(CONFIG_PREEMPT_RT),y) |
| +obj-y += mutex.o |
| obj-$(CONFIG_DEBUG_MUTEXES) += mutex-debug.o |
| +endif |
| obj-$(CONFIG_LOCKDEP) += lockdep.o |
| ifeq ($(CONFIG_PROC_FS),y) |
| obj-$(CONFIG_LOCKDEP) += lockdep_proc.o |
| @@ -42,6 +45,7 @@ endif |
| obj-$(CONFIG_RT_MUTEXES) += rtmutex.o |
| obj-$(CONFIG_DEBUG_RT_MUTEXES) += rtmutex-debug.o |
| obj-$(CONFIG_RT_MUTEX_TESTER) += rtmutex-tester.o |
| +obj-$(CONFIG_PREEMPT_RT) += rt.o |
| obj-$(CONFIG_GENERIC_ISA_DMA) += dma.o |
| obj-$(CONFIG_USE_GENERIC_SMP_HELPERS) += smp.o |
| ifneq ($(CONFIG_SMP),y) |
| diff --git a/kernel/fork.c b/kernel/fork.c |
| index d356851..c67d376 100644 |
| --- a/kernel/fork.c |
| +++ b/kernel/fork.c |
| @@ -85,7 +85,11 @@ int max_threads; /* tunable limit on nr_threads */ |
| |
| DEFINE_PER_CPU(unsigned long, process_counts) = 0; |
| |
| +#ifdef CONFIG_PREEMPT_RT |
| +DEFINE_RWLOCK(tasklist_lock); /* outer */ |
| +#else |
| __cacheline_aligned DEFINE_RWLOCK(tasklist_lock); /* outer */ |
| +#endif |
| |
| #ifdef CONFIG_PROVE_RCU |
| int lockdep_tasklist_lock_is_held(void) |
| diff --git a/kernel/rt.c b/kernel/rt.c |
| new file mode 100644 |
| index 0000000..97709a6 |
| --- /dev/null |
| +++ b/kernel/rt.c |
| @@ -0,0 +1,580 @@ |
| +/* |
| + * kernel/rt.c |
| + * |
| + * Real-Time Preemption Support |
| + * |
| + * started by Ingo Molnar: |
| + * |
| + * Copyright (C) 2004-2006 Red Hat, Inc., Ingo Molnar <mingo@redhat.com> |
| + * Copyright (C) 2006, Timesys Corp., Thomas Gleixner <tglx@timesys.com> |
| + * |
| + * historic credit for proving that Linux spinlocks can be implemented via |
| + * RT-aware mutexes goes to many people: The Pmutex project (Dirk Grambow |
| + * and others) who prototyped it on 2.4 and did lots of comparative |
| + * research and analysis; TimeSys, for proving that you can implement a |
| + * fully preemptible kernel via the use of IRQ threading and mutexes; |
| + * Bill Huey for persuasively arguing on lkml that the mutex model is the |
| + * right one; and to MontaVista, who ported pmutexes to 2.6. |
| + * |
| + * This code is a from-scratch implementation and is not based on pmutexes, |
| + * but the idea of converting spinlocks to mutexes is used here too. |
| + * |
| + * lock debugging, locking tree, deadlock detection: |
| + * |
| + * Copyright (C) 2004, LynuxWorks, Inc., Igor Manyilov, Bill Huey |
| + * Released under the General Public License (GPL). |
| + * |
| + * Includes portions of the generic R/W semaphore implementation from: |
| + * |
| + * Copyright (c) 2001 David Howells (dhowells@redhat.com). |
| + * - Derived partially from idea by Andrea Arcangeli <andrea@suse.de> |
| + * - Derived also from comments by Linus |
| + * |
| + * Pending ownership of locks and ownership stealing: |
| + * |
| + * Copyright (C) 2005, Kihon Technologies Inc., Steven Rostedt |
| + * |
| + * (also by Steven Rostedt) |
| + * - Converted single pi_lock to individual task locks. |
| + * |
| + * By Esben Nielsen: |
| + * Doing priority inheritance with help of the scheduler. |
| + * |
| + * Copyright (C) 2006, Timesys Corp., Thomas Gleixner <tglx@timesys.com> |
| + * - major rework based on Esben Nielsens initial patch |
| + * - replaced thread_info references by task_struct refs |
| + * - removed task->pending_owner dependency |
| + * - BKL drop/reacquire for semaphore style locks to avoid deadlocks |
| + * in the scheduler return path as discussed with Steven Rostedt |
| + * |
| + * Copyright (C) 2006, Kihon Technologies Inc. |
| + * Steven Rostedt <rostedt@goodmis.org> |
| + * - debugged and patched Thomas Gleixner's rework. |
| + * - added back the cmpxchg to the rework. |
| + * - turned atomic require back on for SMP. |
| + */ |
| + |
| +#include <linux/spinlock.h> |
| +#include <linux/sched.h> |
| +#include <linux/delay.h> |
| +#include <linux/module.h> |
| +#include <linux/spinlock.h> |
| +#include <linux/kallsyms.h> |
| +#include <linux/syscalls.h> |
| +#include <linux/interrupt.h> |
| +#include <linux/plist.h> |
| +#include <linux/fs.h> |
| +#include <linux/futex.h> |
| +#include <linux/hrtimer.h> |
| + |
| +#include "rtmutex_common.h" |
| + |
| +#ifdef CONFIG_PREEMPT_RT |
| +/* |
| + * Unlock these on crash: |
| + */ |
| +void zap_rt_locks(void) |
| +{ |
| + //trace_lock_init(); |
| +} |
| +#endif |
| + |
| +/* |
| + * struct mutex functions |
| + */ |
| +void __mutex_init(struct mutex *lock, char *name, struct lock_class_key *key) |
| +{ |
| +#ifdef CONFIG_DEBUG_LOCK_ALLOC |
| + /* |
| + * Make sure we are not reinitializing a held lock: |
| + */ |
| + debug_check_no_locks_freed((void *)lock, sizeof(*lock)); |
| + lockdep_init_map(&lock->dep_map, name, key, 0); |
| +#endif |
| + __rt_mutex_init(&lock->lock, name); |
| +} |
| +EXPORT_SYMBOL(__mutex_init); |
| + |
| +void __lockfunc _mutex_lock(struct mutex *lock) |
| +{ |
| + mutex_acquire(&lock->dep_map, 0, 0, _RET_IP_); |
| + rt_mutex_lock(&lock->lock); |
| +} |
| +EXPORT_SYMBOL(_mutex_lock); |
| + |
| +int __lockfunc _mutex_lock_interruptible(struct mutex *lock) |
| +{ |
| + int ret; |
| + |
| + mutex_acquire(&lock->dep_map, 0, 0, _RET_IP_); |
| + ret = rt_mutex_lock_interruptible(&lock->lock, 0); |
| + if (ret) |
| + mutex_release(&lock->dep_map, 1, _RET_IP_); |
| + return ret; |
| +} |
| +EXPORT_SYMBOL(_mutex_lock_interruptible); |
| + |
| +int __lockfunc _mutex_lock_killable(struct mutex *lock) |
| +{ |
| + int ret; |
| + |
| + mutex_acquire(&lock->dep_map, 0, 0, _RET_IP_); |
| + ret = rt_mutex_lock_killable(&lock->lock, 0); |
| + if (ret) |
| + mutex_release(&lock->dep_map, 1, _RET_IP_); |
| + return ret; |
| +} |
| +EXPORT_SYMBOL(_mutex_lock_killable); |
| + |
| +#ifdef CONFIG_DEBUG_LOCK_ALLOC |
| +void __lockfunc _mutex_lock_nested(struct mutex *lock, int subclass) |
| +{ |
| + mutex_acquire(&lock->dep_map, subclass, 0, _RET_IP_); |
| + rt_mutex_lock(&lock->lock); |
| +} |
| +EXPORT_SYMBOL(_mutex_lock_nested); |
| + |
| +int __lockfunc _mutex_lock_interruptible_nested(struct mutex *lock, int subclass) |
| +{ |
| + int ret; |
| + |
| + mutex_acquire(&lock->dep_map, subclass, 0, _RET_IP_); |
| + ret = rt_mutex_lock_interruptible(&lock->lock, 0); |
| + if (ret) |
| + mutex_release(&lock->dep_map, 1, _RET_IP_); |
| + return ret; |
| +} |
| +EXPORT_SYMBOL(_mutex_lock_interruptible_nested); |
| + |
| +int __lockfunc _mutex_lock_killable_nested(struct mutex *lock, int subclass) |
| +{ |
| + int ret; |
| + |
| + mutex_acquire(&lock->dep_map, subclass, 0, _RET_IP_); |
| + ret = rt_mutex_lock_killable(&lock->lock, 0); |
| + if (ret) |
| + mutex_release(&lock->dep_map, 1, _RET_IP_); |
| + return ret; |
| +} |
| +EXPORT_SYMBOL(_mutex_lock_killable_nested); |
| +#endif |
| + |
| +int __lockfunc _mutex_trylock(struct mutex *lock) |
| +{ |
| + int ret = rt_mutex_trylock(&lock->lock); |
| + |
| + if (ret) |
| + mutex_acquire(&lock->dep_map, 0, 1, _RET_IP_); |
| + |
| + return ret; |
| +} |
| +EXPORT_SYMBOL(_mutex_trylock); |
| + |
| +void __lockfunc _mutex_unlock(struct mutex *lock) |
| +{ |
| + mutex_release(&lock->dep_map, 1, _RET_IP_); |
| + rt_mutex_unlock(&lock->lock); |
| +} |
| +EXPORT_SYMBOL(_mutex_unlock); |
| + |
| +/* |
| + * rwlock_t functions |
| + */ |
| +int __lockfunc rt_write_trylock(rwlock_t *rwlock) |
| +{ |
| + int ret = rt_mutex_trylock(&rwlock->lock); |
| + |
| + if (ret) |
| + rwlock_acquire(&rwlock->dep_map, 0, 1, _RET_IP_); |
| + |
| + return ret; |
| +} |
| +EXPORT_SYMBOL(rt_write_trylock); |
| + |
| +int __lockfunc rt_write_trylock_irqsave(rwlock_t *rwlock, unsigned long *flags) |
| +{ |
| + *flags = 0; |
| + return rt_write_trylock(rwlock); |
| +} |
| +EXPORT_SYMBOL(rt_write_trylock_irqsave); |
| + |
| +int __lockfunc rt_read_trylock(rwlock_t *rwlock) |
| +{ |
| + struct rt_mutex *lock = &rwlock->lock; |
| + unsigned long flags; |
| + int ret; |
| + |
| + /* |
| + * Read locks within the self-held write lock succeed. |
| + */ |
| + spin_lock_irqsave(&lock->wait_lock, flags); |
| + if (rt_mutex_real_owner(lock) == current) { |
| + spin_unlock_irqrestore(&lock->wait_lock, flags); |
| + rwlock->read_depth++; |
| + rwlock_acquire_read(&rwlock->dep_map, 0, 1, _RET_IP_); |
| + return 1; |
| + } |
| + spin_unlock_irqrestore(&lock->wait_lock, flags); |
| + |
| + ret = rt_mutex_trylock(lock); |
| + if (ret) |
| + rwlock_acquire_read(&rwlock->dep_map, 0, 1, _RET_IP_); |
| + |
| + return ret; |
| +} |
| +EXPORT_SYMBOL(rt_read_trylock); |
| + |
| +void __lockfunc rt_write_lock(rwlock_t *rwlock) |
| +{ |
| + rwlock_acquire(&rwlock->dep_map, 0, 0, _RET_IP_); |
| + __rt_spin_lock(&rwlock->lock); |
| +} |
| +EXPORT_SYMBOL(rt_write_lock); |
| + |
| +void __lockfunc rt_read_lock(rwlock_t *rwlock) |
| +{ |
| + unsigned long flags; |
| + struct rt_mutex *lock = &rwlock->lock; |
| + |
| + rwlock_acquire_read(&rwlock->dep_map, 0, 0, _RET_IP_); |
| + /* |
| + * Read locks within the write lock succeed. |
| + */ |
| + spin_lock_irqsave(&lock->wait_lock, flags); |
| + if (rt_mutex_real_owner(lock) == current) { |
| + spin_unlock_irqrestore(&lock->wait_lock, flags); |
| + rwlock->read_depth++; |
| + return; |
| + } |
| + spin_unlock_irqrestore(&lock->wait_lock, flags); |
| + __rt_spin_lock(lock); |
| +} |
| + |
| +EXPORT_SYMBOL(rt_read_lock); |
| + |
| +void __lockfunc rt_write_unlock(rwlock_t *rwlock) |
| +{ |
| + /* NOTE: we always pass in '1' for nested, for simplicity */ |
| + rwlock_release(&rwlock->dep_map, 1, _RET_IP_); |
| + __rt_spin_unlock(&rwlock->lock); |
| +} |
| +EXPORT_SYMBOL(rt_write_unlock); |
| + |
| +void __lockfunc rt_read_unlock(rwlock_t *rwlock) |
| +{ |
| + struct rt_mutex *lock = &rwlock->lock; |
| + unsigned long flags; |
| + |
| + rwlock_release(&rwlock->dep_map, 1, _RET_IP_); |
| + // TRACE_WARN_ON(lock->save_state != 1); |
| + /* |
| + * Read locks within the self-held write lock succeed. |
| + */ |
| + spin_lock_irqsave(&lock->wait_lock, flags); |
| + if (rt_mutex_real_owner(lock) == current && rwlock->read_depth) { |
| + spin_unlock_irqrestore(&lock->wait_lock, flags); |
| + rwlock->read_depth--; |
| + return; |
| + } |
| + spin_unlock_irqrestore(&lock->wait_lock, flags); |
| + __rt_spin_unlock(&rwlock->lock); |
| +} |
| +EXPORT_SYMBOL(rt_read_unlock); |
| + |
| +unsigned long __lockfunc rt_write_lock_irqsave(rwlock_t *rwlock) |
| +{ |
| + rt_write_lock(rwlock); |
| + |
| + return 0; |
| +} |
| +EXPORT_SYMBOL(rt_write_lock_irqsave); |
| + |
| +unsigned long __lockfunc rt_read_lock_irqsave(rwlock_t *rwlock) |
| +{ |
| + rt_read_lock(rwlock); |
| + |
| + return 0; |
| +} |
| +EXPORT_SYMBOL(rt_read_lock_irqsave); |
| + |
| +void __rt_rwlock_init(rwlock_t *rwlock, char *name, struct lock_class_key *key) |
| +{ |
| +#ifdef CONFIG_DEBUG_LOCK_ALLOC |
| + /* |
| + * Make sure we are not reinitializing a held lock: |
| + */ |
| + debug_check_no_locks_freed((void *)rwlock, sizeof(*rwlock)); |
| + lockdep_init_map(&rwlock->dep_map, name, key, 0); |
| +#endif |
| + __rt_mutex_init(&rwlock->lock, name); |
| + rwlock->read_depth = 0; |
| +} |
| +EXPORT_SYMBOL(__rt_rwlock_init); |
| + |
| +/* |
| + * rw_semaphores |
| + */ |
| + |
| +void rt_up_write(struct rw_semaphore *rwsem) |
| +{ |
| + rwsem_release(&rwsem->dep_map, 1, _RET_IP_); |
| + rt_mutex_unlock(&rwsem->lock); |
| +} |
| +EXPORT_SYMBOL(rt_up_write); |
| + |
| +void rt_up_read(struct rw_semaphore *rwsem) |
| +{ |
| + unsigned long flags; |
| + |
| + rwsem_release(&rwsem->dep_map, 1, _RET_IP_); |
| + /* |
| + * Read locks within the self-held write lock succeed. |
| + */ |
| + spin_lock_irqsave(&rwsem->lock.wait_lock, flags); |
| + if (rt_mutex_real_owner(&rwsem->lock) == current && rwsem->read_depth) { |
| + spin_unlock_irqrestore(&rwsem->lock.wait_lock, flags); |
| + rwsem->read_depth--; |
| + return; |
| + } |
| + spin_unlock_irqrestore(&rwsem->lock.wait_lock, flags); |
| + rt_mutex_unlock(&rwsem->lock); |
| +} |
| +EXPORT_SYMBOL(rt_up_read); |
| + |
| +/* |
| + * downgrade a write lock into a read lock |
| + * - just wake up any readers at the front of the queue |
| + */ |
| +void rt_downgrade_write(struct rw_semaphore *rwsem) |
| +{ |
| + BUG(); |
| +} |
| +EXPORT_SYMBOL(rt_downgrade_write); |
| + |
| +int rt_down_write_trylock(struct rw_semaphore *rwsem) |
| +{ |
| + int ret = rt_mutex_trylock(&rwsem->lock); |
| + |
| + if (ret) |
| + rwsem_acquire(&rwsem->dep_map, 0, 1, _RET_IP_); |
| + return ret; |
| +} |
| +EXPORT_SYMBOL(rt_down_write_trylock); |
| + |
| +void rt_down_write(struct rw_semaphore *rwsem) |
| +{ |
| + rwsem_acquire(&rwsem->dep_map, 0, 0, _RET_IP_); |
| + rt_mutex_lock(&rwsem->lock); |
| +} |
| +EXPORT_SYMBOL(rt_down_write); |
| + |
| +void rt_down_write_nested(struct rw_semaphore *rwsem, int subclass) |
| +{ |
| + rwsem_acquire(&rwsem->dep_map, subclass, 0, _RET_IP_); |
| + rt_mutex_lock(&rwsem->lock); |
| +} |
| +EXPORT_SYMBOL(rt_down_write_nested); |
| + |
| +int rt_down_read_trylock(struct rw_semaphore *rwsem) |
| +{ |
| + unsigned long flags; |
| + int ret; |
| + |
| + /* |
| + * Read locks within the self-held write lock succeed. |
| + */ |
| + spin_lock_irqsave(&rwsem->lock.wait_lock, flags); |
| + if (rt_mutex_real_owner(&rwsem->lock) == current) { |
| + spin_unlock_irqrestore(&rwsem->lock.wait_lock, flags); |
| + rwsem_acquire_read(&rwsem->dep_map, 0, 1, _RET_IP_); |
| + rwsem->read_depth++; |
| + return 1; |
| + } |
| + spin_unlock_irqrestore(&rwsem->lock.wait_lock, flags); |
| + |
| + ret = rt_mutex_trylock(&rwsem->lock); |
| + if (ret) |
| + rwsem_acquire(&rwsem->dep_map, 0, 1, _RET_IP_); |
| + return ret; |
| +} |
| +EXPORT_SYMBOL(rt_down_read_trylock); |
| + |
| +static void __rt_down_read(struct rw_semaphore *rwsem, int subclass) |
| +{ |
| + unsigned long flags; |
| + |
| + rwsem_acquire_read(&rwsem->dep_map, subclass, 0, _RET_IP_); |
| + |
| + /* |
| + * Read locks within the write lock succeed. |
| + */ |
| + spin_lock_irqsave(&rwsem->lock.wait_lock, flags); |
| + |
| + if (rt_mutex_real_owner(&rwsem->lock) == current) { |
| + spin_unlock_irqrestore(&rwsem->lock.wait_lock, flags); |
| + rwsem->read_depth++; |
| + return; |
| + } |
| + spin_unlock_irqrestore(&rwsem->lock.wait_lock, flags); |
| + rt_mutex_lock(&rwsem->lock); |
| +} |
| + |
| +void rt_down_read(struct rw_semaphore *rwsem) |
| +{ |
| + __rt_down_read(rwsem, 0); |
| +} |
| +EXPORT_SYMBOL(rt_down_read); |
| + |
| +void rt_down_read_nested(struct rw_semaphore *rwsem, int subclass) |
| +{ |
| + __rt_down_read(rwsem, subclass); |
| +} |
| +EXPORT_SYMBOL(rt_down_read_nested); |
| + |
| +void __rt_rwsem_init(struct rw_semaphore *rwsem, char *name, |
| + struct lock_class_key *key) |
| +{ |
| +#ifdef CONFIG_DEBUG_LOCK_ALLOC |
| + /* |
| + * Make sure we are not reinitializing a held lock: |
| + */ |
| + debug_check_no_locks_freed((void *)rwsem, sizeof(*rwsem)); |
| + lockdep_init_map(&rwsem->dep_map, name, key, 0); |
| +#endif |
| + __rt_mutex_init(&rwsem->lock, name); |
| + rwsem->read_depth = 0; |
| +} |
| +EXPORT_SYMBOL(__rt_rwsem_init); |
| + |
| +/* |
| + * Semaphores |
| + */ |
| +/* |
| + * Linux Semaphores implemented via RT-mutexes. |
| + * |
| + * In the down() variants we use the mutex as the semaphore blocking |
| + * object: we always acquire it, decrease the counter and keep the lock |
| + * locked if we did the 1->0 transition. The next down() will then block. |
| + * |
| + * In the up() path we atomically increase the counter and do the |
| + * unlock if we were the one doing the 0->1 transition. |
| + */ |
| + |
| +static inline void __down_complete(struct semaphore *sem) |
| +{ |
| + int count = atomic_dec_return(&sem->count); |
| + |
| + if (unlikely(count > 0)) |
| + rt_mutex_unlock(&sem->lock); |
| +} |
| + |
| +void rt_down(struct semaphore *sem) |
| +{ |
| + rt_mutex_lock(&sem->lock); |
| + __down_complete(sem); |
| +} |
| +EXPORT_SYMBOL(rt_down); |
| + |
| +int rt_down_interruptible(struct semaphore *sem) |
| +{ |
| + int ret; |
| + |
| + ret = rt_mutex_lock_interruptible(&sem->lock, 0); |
| + if (ret) |
| + return ret; |
| + __down_complete(sem); |
| + return 0; |
| +} |
| +EXPORT_SYMBOL(rt_down_interruptible); |
| + |
| +int rt_down_timeout(struct semaphore *sem, long jiff) |
| +{ |
| + struct hrtimer_sleeper t; |
| + struct timespec ts; |
| + unsigned long expires = jiffies + jiff + 1; |
| + int ret; |
| + |
| + /* |
| + * rt_mutex_slowlock can use an interruptible, but this needs to |
| + * be TASK_INTERRUPTIBLE. The down_timeout uses TASK_UNINTERRUPTIBLE. |
| + * To handle this we loop if a signal caused the timeout and the |
| + * we recalculate the new timeout. |
| + * Yes Thomas, this is a hack! But we can fix it right later. |
| + */ |
| + do { |
| + jiffies_to_timespec(jiff, &ts); |
| + hrtimer_init_on_stack(&t.timer, HRTIMER_MODE_REL, CLOCK_MONOTONIC); |
| + t.timer._expires = timespec_to_ktime(ts); |
| + |
| + ret = rt_mutex_timed_lock(&sem->lock, &t, 0); |
| + if (ret != -EINTR) |
| + break; |
| + |
| + /* signal occured, but the down_timeout doesn't handle them */ |
| + jiff = expires - jiffies; |
| + |
| + } while (jiff > 0); |
| + |
| + if (!ret) |
| + __down_complete(sem); |
| + else |
| + ret = -ETIME; |
| + |
| + return ret; |
| +} |
| +EXPORT_SYMBOL(rt_down_timeout); |
| + |
| +/* |
| + * try to down the semaphore, 0 on success and 1 on failure. (inverted) |
| + */ |
| +int rt_down_trylock(struct semaphore *sem) |
| +{ |
| + /* |
| + * Here we are a tiny bit different from ordinary Linux semaphores, |
| + * because we can get 'transient' locking-failures when say a |
| + * process decreases the count from 9 to 8 and locks/releases the |
| + * embedded mutex internally. It would be quite complex to remove |
| + * these transient failures so lets try it the simple way first: |
| + */ |
| + if (rt_mutex_trylock(&sem->lock)) { |
| + __down_complete(sem); |
| + return 0; |
| + } |
| + return 1; |
| +} |
| +EXPORT_SYMBOL(rt_down_trylock); |
| + |
| +void rt_up(struct semaphore *sem) |
| +{ |
| + int count; |
| + |
| + /* |
| + * Disable preemption to make sure a highprio trylock-er cannot |
| + * preempt us here and get into an infinite loop: |
| + */ |
| + preempt_disable(); |
| + count = atomic_inc_return(&sem->count); |
| + /* |
| + * If we did the 0 -> 1 transition then we are the ones to unlock it: |
| + */ |
| + if (likely(count == 1)) |
| + rt_mutex_unlock(&sem->lock); |
| + preempt_enable(); |
| +} |
| +EXPORT_SYMBOL(rt_up); |
| + |
| +void __sema_init(struct semaphore *sem, int val, |
| + char *name, char *file, int line) |
| +{ |
| + atomic_set(&sem->count, val); |
| + switch (val) { |
| + case 0: |
| + __rt_mutex_init(&sem->lock, name); |
| + rt_mutex_lock(&sem->lock); |
| + break; |
| + default: |
| + __rt_mutex_init(&sem->lock, name); |
| + break; |
| + } |
| +} |
| +EXPORT_SYMBOL(__sema_init); |
| diff --git a/lib/spinlock_debug.c b/lib/spinlock_debug.c |
| index 4755b98..f65f7cd 100644 |
| --- a/lib/spinlock_debug.c |
| +++ b/lib/spinlock_debug.c |
| @@ -31,6 +31,7 @@ void __raw_spin_lock_init(raw_spinlock_t *lock, const char *name, |
| |
| EXPORT_SYMBOL(__raw_spin_lock_init); |
| |
| +#ifndef CONFIG_PREEMPT_RT |
| void __rwlock_init(rwlock_t *lock, const char *name, |
| struct lock_class_key *key) |
| { |
| @@ -46,8 +47,8 @@ void __rwlock_init(rwlock_t *lock, const char *name, |
| lock->owner = SPINLOCK_OWNER_INIT; |
| lock->owner_cpu = -1; |
| } |
| - |
| EXPORT_SYMBOL(__rwlock_init); |
| +#endif |
| |
| static void spin_bug(raw_spinlock_t *lock, const char *msg) |
| { |
| @@ -154,6 +155,7 @@ void do_raw_spin_unlock(raw_spinlock_t *lock) |
| arch_spin_unlock(&lock->raw_lock); |
| } |
| |
| +#ifndef CONFIG_PREEMPT_RT |
| static void rwlock_bug(rwlock_t *lock, const char *msg) |
| { |
| if (!debug_locks_off()) |
| @@ -295,3 +297,5 @@ void do_raw_write_unlock(rwlock_t *lock) |
| debug_write_unlock(lock); |
| arch_write_unlock(&lock->raw_lock); |
| } |
| + |
| +#endif |
| -- |
| 1.7.0.4 |
| |