| Subject: lglocks: Provide a RT safe variant |
| From: Thomas Gleixner <tglx@linutronix.de> |
| Date: Wed, 15 Jun 2011 11:02:21 +0200 |
| |
| lglocks by itself will spin in order to get the lock. This will end up |
| badly if a task with the highest priority keeps spinning while a task |
| with the lowest priority owns the lock. |
| |
| Lets replace them with rt_mutex based locks so they can sleep, track |
| owner and boost if needed. |
| |
| Signed-off-by: Thomas Gleixner <tglx@linutronix.de> |
| --- |
| include/linux/lglock.h | 18 +++++++++++++ |
| kernel/locking/lglock.c | 62 ++++++++++++++++++++++++++++++------------------ |
| 2 files changed, 58 insertions(+), 22 deletions(-) |
| |
| --- a/include/linux/lglock.h |
| +++ b/include/linux/lglock.h |
| @@ -34,13 +34,30 @@ |
| #endif |
| |
| struct lglock { |
| +#ifdef CONFIG_PREEMPT_RT_FULL |
| + struct rt_mutex __percpu *lock; |
| +#else |
| arch_spinlock_t __percpu *lock; |
| +#endif |
| #ifdef CONFIG_DEBUG_LOCK_ALLOC |
| struct lock_class_key lock_key; |
| struct lockdep_map lock_dep_map; |
| #endif |
| }; |
| |
| +#ifdef CONFIG_PREEMPT_RT_FULL |
| +# define DEFINE_LGLOCK(name) \ |
| + static DEFINE_PER_CPU(struct rt_mutex, name ## _lock) \ |
| + = __RT_MUTEX_INITIALIZER( name ## _lock); \ |
| + struct lglock name = { .lock = &name ## _lock } |
| + |
| +# define DEFINE_STATIC_LGLOCK(name) \ |
| + static DEFINE_PER_CPU(struct rt_mutex, name ## _lock) \ |
| + = __RT_MUTEX_INITIALIZER( name ## _lock); \ |
| + static struct lglock name = { .lock = &name ## _lock } |
| + |
| +#else |
| + |
| #define DEFINE_LGLOCK(name) \ |
| static DEFINE_PER_CPU(arch_spinlock_t, name ## _lock) \ |
| = __ARCH_SPIN_LOCK_UNLOCKED; \ |
| @@ -50,6 +67,7 @@ struct lglock { |
| static DEFINE_PER_CPU(arch_spinlock_t, name ## _lock) \ |
| = __ARCH_SPIN_LOCK_UNLOCKED; \ |
| static struct lglock name = { .lock = &name ## _lock } |
| +#endif |
| |
| void lg_lock_init(struct lglock *lg, char *name); |
| |
| --- a/kernel/locking/lglock.c |
| +++ b/kernel/locking/lglock.c |
| @@ -4,6 +4,15 @@ |
| #include <linux/cpu.h> |
| #include <linux/string.h> |
| |
| +#ifndef CONFIG_PREEMPT_RT_FULL |
| +# define lg_lock_ptr arch_spinlock_t |
| +# define lg_do_lock(l) arch_spin_lock(l) |
| +# define lg_do_unlock(l) arch_spin_unlock(l) |
| +#else |
| +# define lg_lock_ptr struct rt_mutex |
| +# define lg_do_lock(l) __rt_spin_lock(l) |
| +# define lg_do_unlock(l) __rt_spin_unlock(l) |
| +#endif |
| /* |
| * Note there is no uninit, so lglocks cannot be defined in |
| * modules (but it's fine to use them from there) |
| @@ -12,51 +21,60 @@ |
| |
| void lg_lock_init(struct lglock *lg, char *name) |
| { |
| +#ifdef CONFIG_PREEMPT_RT_FULL |
| + int i; |
| + |
| + for_each_possible_cpu(i) { |
| + struct rt_mutex *lock = per_cpu_ptr(lg->lock, i); |
| + |
| + rt_mutex_init(lock); |
| + } |
| +#endif |
| LOCKDEP_INIT_MAP(&lg->lock_dep_map, name, &lg->lock_key, 0); |
| } |
| EXPORT_SYMBOL(lg_lock_init); |
| |
| void lg_local_lock(struct lglock *lg) |
| { |
| - arch_spinlock_t *lock; |
| + lg_lock_ptr *lock; |
| |
| - preempt_disable(); |
| + migrate_disable(); |
| lock_acquire_shared(&lg->lock_dep_map, 0, 0, NULL, _RET_IP_); |
| lock = this_cpu_ptr(lg->lock); |
| - arch_spin_lock(lock); |
| + lg_do_lock(lock); |
| } |
| EXPORT_SYMBOL(lg_local_lock); |
| |
| void lg_local_unlock(struct lglock *lg) |
| { |
| - arch_spinlock_t *lock; |
| + lg_lock_ptr *lock; |
| |
| lock_release(&lg->lock_dep_map, 1, _RET_IP_); |
| lock = this_cpu_ptr(lg->lock); |
| - arch_spin_unlock(lock); |
| - preempt_enable(); |
| + lg_do_unlock(lock); |
| + migrate_enable(); |
| } |
| EXPORT_SYMBOL(lg_local_unlock); |
| |
| void lg_local_lock_cpu(struct lglock *lg, int cpu) |
| { |
| - arch_spinlock_t *lock; |
| + lg_lock_ptr *lock; |
| |
| - preempt_disable(); |
| + preempt_disable_nort(); |
| lock_acquire_shared(&lg->lock_dep_map, 0, 0, NULL, _RET_IP_); |
| lock = per_cpu_ptr(lg->lock, cpu); |
| - arch_spin_lock(lock); |
| + lg_do_lock(lock); |
| } |
| EXPORT_SYMBOL(lg_local_lock_cpu); |
| |
| void lg_local_unlock_cpu(struct lglock *lg, int cpu) |
| { |
| - arch_spinlock_t *lock; |
| + lg_lock_ptr *lock; |
| |
| lock_release(&lg->lock_dep_map, 1, _RET_IP_); |
| lock = per_cpu_ptr(lg->lock, cpu); |
| - arch_spin_unlock(lock); |
| - preempt_enable(); |
| + lg_do_unlock(lock); |
| + preempt_enable_nort(); |
| } |
| EXPORT_SYMBOL(lg_local_unlock_cpu); |
| |
| @@ -70,15 +88,15 @@ void lg_double_lock(struct lglock *lg, i |
| |
| preempt_disable(); |
| lock_acquire_shared(&lg->lock_dep_map, 0, 0, NULL, _RET_IP_); |
| - arch_spin_lock(per_cpu_ptr(lg->lock, cpu1)); |
| - arch_spin_lock(per_cpu_ptr(lg->lock, cpu2)); |
| + lg_do_lock(per_cpu_ptr(lg->lock, cpu1)); |
| + lg_do_lock(per_cpu_ptr(lg->lock, cpu2)); |
| } |
| |
| void lg_double_unlock(struct lglock *lg, int cpu1, int cpu2) |
| { |
| lock_release(&lg->lock_dep_map, 1, _RET_IP_); |
| - arch_spin_unlock(per_cpu_ptr(lg->lock, cpu1)); |
| - arch_spin_unlock(per_cpu_ptr(lg->lock, cpu2)); |
| + lg_do_unlock(per_cpu_ptr(lg->lock, cpu1)); |
| + lg_do_unlock(per_cpu_ptr(lg->lock, cpu2)); |
| preempt_enable(); |
| } |
| |
| @@ -86,12 +104,12 @@ void lg_global_lock(struct lglock *lg) |
| { |
| int i; |
| |
| - preempt_disable(); |
| + preempt_disable_nort(); |
| lock_acquire_exclusive(&lg->lock_dep_map, 0, 0, NULL, _RET_IP_); |
| for_each_possible_cpu(i) { |
| - arch_spinlock_t *lock; |
| + lg_lock_ptr *lock; |
| lock = per_cpu_ptr(lg->lock, i); |
| - arch_spin_lock(lock); |
| + lg_do_lock(lock); |
| } |
| } |
| EXPORT_SYMBOL(lg_global_lock); |
| @@ -102,10 +120,10 @@ void lg_global_unlock(struct lglock *lg) |
| |
| lock_release(&lg->lock_dep_map, 1, _RET_IP_); |
| for_each_possible_cpu(i) { |
| - arch_spinlock_t *lock; |
| + lg_lock_ptr *lock; |
| lock = per_cpu_ptr(lg->lock, i); |
| - arch_spin_unlock(lock); |
| + lg_do_unlock(lock); |
| } |
| - preempt_enable(); |
| + preempt_enable_nort(); |
| } |
| EXPORT_SYMBOL(lg_global_unlock); |