| Subject: rt/locking: Consolidate lock functions |
| From: Thomas Gleixner <tglx@linutronix.de> |
| Date: Fri, 28 Jul 2017 12:26:59 +0200 |
| |
| Signed-off-by: Thomas Gleixner <tglx@linutronix.de> |
| --- |
| include/linux/locallock.h | 22 +++------------------- |
| include/linux/spinlock_rt.h | 7 ++----- |
| kernel/locking/rt.c | 3 +++ |
| kernel/locking/rtmutex.c | 36 +----------------------------------- |
| 4 files changed, 9 insertions(+), 59 deletions(-) |
| |
| --- a/include/linux/locallock.h |
| +++ b/include/linux/locallock.h |
| @@ -36,26 +36,10 @@ struct local_irq_lock { |
| spin_lock_init(&per_cpu(lvar, __cpu).lock); \ |
| } while (0) |
| |
| -/* |
| - * spin_lock|trylock|unlock_local flavour that does not migrate disable |
| - * used for __local_lock|trylock|unlock where get_local_var/put_local_var |
| - * already takes care of the migrate_disable/enable |
| - * for CONFIG_PREEMPT_BASE map to the normal spin_* calls. |
| - */ |
| -#ifdef CONFIG_PREEMPT_RT_FULL |
| -# define spin_lock_local(lock) rt_spin_lock__no_mg(lock) |
| -# define spin_trylock_local(lock) rt_spin_trylock__no_mg(lock) |
| -# define spin_unlock_local(lock) rt_spin_unlock__no_mg(lock) |
| -#else |
| -# define spin_lock_local(lock) spin_lock(lock) |
| -# define spin_trylock_local(lock) spin_trylock(lock) |
| -# define spin_unlock_local(lock) spin_unlock(lock) |
| -#endif |
| - |
| static inline void __local_lock(struct local_irq_lock *lv) |
| { |
| if (lv->owner != current) { |
| - spin_lock_local(&lv->lock); |
| + spin_lock(&lv->lock); |
| LL_WARN(lv->owner); |
| LL_WARN(lv->nestcnt); |
| lv->owner = current; |
| @@ -71,7 +55,7 @@ static inline void __local_lock(struct l |
| |
| static inline int __local_trylock(struct local_irq_lock *lv) |
| { |
| - if (lv->owner != current && spin_trylock_local(&lv->lock)) { |
| + if (lv->owner != current && spin_trylock(&lv->lock)) { |
| LL_WARN(lv->owner); |
| LL_WARN(lv->nestcnt); |
| lv->owner = current; |
| @@ -101,7 +85,7 @@ static inline void __local_unlock(struct |
| return; |
| |
| lv->owner = NULL; |
| - spin_unlock_local(&lv->lock); |
| + spin_unlock(&lv->lock); |
| } |
| |
| #define local_unlock(lvar) \ |
| --- a/include/linux/spinlock_rt.h |
| +++ b/include/linux/spinlock_rt.h |
| @@ -18,10 +18,6 @@ do { \ |
| __rt_spin_lock_init(slock, #slock, &__key); \ |
| } while (0) |
| |
| -void __lockfunc rt_spin_lock__no_mg(spinlock_t *lock); |
| -void __lockfunc rt_spin_unlock__no_mg(spinlock_t *lock); |
| -int __lockfunc rt_spin_trylock__no_mg(spinlock_t *lock); |
| - |
| extern void __lockfunc rt_spin_lock(spinlock_t *lock); |
| extern unsigned long __lockfunc rt_spin_lock_trace_flags(spinlock_t *lock); |
| extern void __lockfunc rt_spin_lock_nested(spinlock_t *lock, int subclass); |
| @@ -35,9 +31,10 @@ extern int atomic_dec_and_spin_lock(atom |
| /* |
| * lockdep-less calls, for derived types like rwlock: |
| * (for trylock they can use rt_mutex_trylock() directly. |
| + * Migrate disable handling must be done at the call site. |
| */ |
| -extern void __lockfunc __rt_spin_lock__no_mg(struct rt_mutex *lock); |
| extern void __lockfunc __rt_spin_lock(struct rt_mutex *lock); |
| +extern void __lockfunc __rt_spin_trylock(struct rt_mutex *lock); |
| extern void __lockfunc __rt_spin_unlock(struct rt_mutex *lock); |
| |
| #define spin_lock(lock) rt_spin_lock(lock) |
| --- a/kernel/locking/rt.c |
| +++ b/kernel/locking/rt.c |
| @@ -239,6 +239,7 @@ EXPORT_SYMBOL(rt_read_trylock); |
| |
| void __lockfunc rt_write_lock(rwlock_t *rwlock) |
| { |
| + migrate_disable(); |
| rwlock_acquire(&rwlock->dep_map, 0, 0, _RET_IP_); |
| __rt_spin_lock(&rwlock->lock); |
| } |
| @@ -248,9 +249,11 @@ void __lockfunc rt_read_lock(rwlock_t *r |
| { |
| struct rt_mutex *lock = &rwlock->lock; |
| |
| + migrate_disable(); |
| rwlock_acquire(&rwlock->dep_map, 0, 0, _RET_IP_); |
| __rt_spin_lock(lock); |
| } |
| + |
| EXPORT_SYMBOL(rt_read_lock); |
| |
| void __lockfunc rt_write_unlock(rwlock_t *rwlock) |
| --- a/kernel/locking/rtmutex.c |
| +++ b/kernel/locking/rtmutex.c |
| @@ -1146,13 +1146,6 @@ static void noinline __sched rt_spin_lo |
| rt_mutex_postunlock(&wake_q, &wake_sleeper_q); |
| } |
| |
| -void __lockfunc rt_spin_lock__no_mg(spinlock_t *lock) |
| -{ |
| - rt_spin_lock_fastlock(&lock->lock, rt_spin_lock_slowlock); |
| - spin_acquire(&lock->dep_map, 0, 0, _RET_IP_); |
| -} |
| -EXPORT_SYMBOL(rt_spin_lock__no_mg); |
| - |
| void __lockfunc rt_spin_lock(spinlock_t *lock) |
| { |
| migrate_disable(); |
| @@ -1163,35 +1156,19 @@ EXPORT_SYMBOL(rt_spin_lock); |
| |
| void __lockfunc __rt_spin_lock(struct rt_mutex *lock) |
| { |
| - migrate_disable(); |
| rt_spin_lock_fastlock(lock, rt_spin_lock_slowlock); |
| } |
| -EXPORT_SYMBOL(__rt_spin_lock); |
| - |
| -void __lockfunc __rt_spin_lock__no_mg(struct rt_mutex *lock) |
| -{ |
| - rt_spin_lock_fastlock(lock, rt_spin_lock_slowlock); |
| -} |
| -EXPORT_SYMBOL(__rt_spin_lock__no_mg); |
| |
| #ifdef CONFIG_DEBUG_LOCK_ALLOC |
| void __lockfunc rt_spin_lock_nested(spinlock_t *lock, int subclass) |
| { |
| migrate_disable(); |
| - rt_spin_lock_fastlock(&lock->lock, rt_spin_lock_slowlock); |
| spin_acquire(&lock->dep_map, subclass, 0, _RET_IP_); |
| + rt_spin_lock_fastlock(&lock->lock, rt_spin_lock_slowlock); |
| } |
| EXPORT_SYMBOL(rt_spin_lock_nested); |
| #endif |
| |
| -void __lockfunc rt_spin_unlock__no_mg(spinlock_t *lock) |
| -{ |
| - /* NOTE: we always pass in '1' for nested, for simplicity */ |
| - spin_release(&lock->dep_map, 1, _RET_IP_); |
| - rt_spin_lock_fastunlock(&lock->lock, rt_spin_lock_slowunlock); |
| -} |
| -EXPORT_SYMBOL(rt_spin_unlock__no_mg); |
| - |
| void __lockfunc rt_spin_unlock(spinlock_t *lock) |
| { |
| /* NOTE: we always pass in '1' for nested, for simplicity */ |
| @@ -1219,17 +1196,6 @@ void __lockfunc rt_spin_unlock_wait(spin |
| } |
| EXPORT_SYMBOL(rt_spin_unlock_wait); |
| |
| -int __lockfunc rt_spin_trylock__no_mg(spinlock_t *lock) |
| -{ |
| - int ret; |
| - |
| - ret = rt_mutex_trylock(&lock->lock); |
| - if (ret) |
| - spin_acquire(&lock->dep_map, 0, 1, _RET_IP_); |
| - return ret; |
| -} |
| -EXPORT_SYMBOL(rt_spin_trylock__no_mg); |
| - |
| int __lockfunc rt_spin_trylock(spinlock_t *lock) |
| { |
| int ret; |