blob: da122fc7519baaa0ee06e595c7f7be2917a434e5 [file] [log] [blame]
Subject: locking/rt-rwlock: Make reader biased rwlocks selectable
From: Thomas Gleixner <tglx@linutronix.de>
Date: Wed, 12 Jul 2017 17:04:09 +0200
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
---
include/linux/rwlock_rt.h | 14 ++++----
include/linux/rwlock_types_rt.h | 36 +++++++++++---------
kernel/Kconfig.locks | 17 +++++++++
kernel/locking/rt.c | 2 +
kernel/locking/rwlock-rt.c | 70 ++++++++++++++++++++++++++++++++++++++++
5 files changed, 116 insertions(+), 23 deletions(-)
--- a/include/linux/rwlock_rt.h
+++ b/include/linux/rwlock_rt.h
@@ -5,13 +5,6 @@
#error Do not include directly. Use spinlock.h
#endif
-#define rwlock_init(rwl) \
-do { \
- static struct lock_class_key __key; \
- \
- __rt_rwlock_init(rwl, #rwl, &__key); \
-} while (0)
-
extern void __lockfunc rt_write_lock(rwlock_t *rwlock);
extern void __lockfunc rt_read_lock(rwlock_t *rwlock);
extern int __lockfunc rt_write_trylock(rwlock_t *rwlock);
@@ -101,6 +94,13 @@ static inline int __write_trylock_rt_irq
rt_write_unlock(lock); \
} while (0)
+#define rwlock_init(rwl) \
+do { \
+ static struct lock_class_key __key; \
+ \
+ __rt_rwlock_init(rwl, #rwl, &__key); \
+} while (0)
+
/*
* Internal functions made global for CPU pinning
*/
--- a/include/linux/rwlock_types_rt.h
+++ b/include/linux/rwlock_types_rt.h
@@ -5,6 +5,13 @@
#error "Do not include directly. Include spinlock_types.h instead"
#endif
+#ifdef CONFIG_DEBUG_LOCK_ALLOC
+# define RW_DEP_MAP_INIT(lockname) .dep_map = { .name = #lockname }
+#else
+# define RW_DEP_MAP_INIT(lockname)
+#endif
+
+#ifndef CONFIG_RWLOCK_RT_READER_BIASED
/*
* rwlocks - rtmutex which allows single reader recursion
*/
@@ -16,12 +23,6 @@ typedef struct {
#endif
} rwlock_t;
-#ifdef CONFIG_DEBUG_LOCK_ALLOC
-# define RW_DEP_MAP_INIT(lockname) .dep_map = { .name = #lockname }
-#else
-# define RW_DEP_MAP_INIT(lockname)
-#endif
-
#define __RW_LOCK_UNLOCKED(name) \
{ .lock = __RT_MUTEX_INITIALIZER_SAVE_STATE(name.lock), \
RW_DEP_MAP_INIT(name) }
@@ -29,8 +30,16 @@ typedef struct {
#define DEFINE_RWLOCK(name) \
rwlock_t name = __RW_LOCK_UNLOCKED(name)
-#define READER_BIAS (1U << 31)
-#define WRITER_BIAS (1U << 30)
+#else /* CONFIG_RWLOCK_RT_READER_BIASED */
+
+typedef struct rt_rw_lock rwlock_t;
+
+#define __RW_LOCK_UNLOCKED(name) __RWLOCK_RT_INITIALIZER(name)
+
+#define DEFINE_RWLOCK(name) \
+ rwlock_t name = __RW_LOCK_UNLOCKED(name)
+
+#endif /* !CONFIG_RWLOCK_RT_READER_BIASED */
/*
* A reader biased implementation primarily for CPU pinning.
@@ -46,6 +55,9 @@ struct rt_rw_lock {
#endif
};
+#define READER_BIAS (1U << 31)
+#define WRITER_BIAS (1U << 30)
+
#define __RWLOCK_RT_INITIALIZER(name) \
{ \
.readers = ATOMIC_INIT(READER_BIAS), \
@@ -63,12 +75,4 @@ void __rwlock_biased_rt_init(struct rt_r
__rwlock_biased_rt_init((rwlock), #rwlock, &__key); \
} while (0)
-int __read_rt_trylock(struct rt_rw_lock *rwlock);
-void __read_rt_lock(struct rt_rw_lock *rwlock);
-void __read_rt_unlock(struct rt_rw_lock *rwlock);
-
-void __write_rt_lock(struct rt_rw_lock *rwlock);
-int __write_rt_trylock(struct rt_rw_lock *rwlock);
-void __write_rt_unlock(struct rt_rw_lock *rwlock);
-
#endif
--- a/kernel/Kconfig.locks
+++ b/kernel/Kconfig.locks
@@ -248,3 +248,20 @@ config ARCH_USE_QUEUED_RWLOCKS
config QUEUED_RWLOCKS
def_bool y if ARCH_USE_QUEUED_RWLOCKS
depends on SMP
+
+if PREEMPT_RT_FULL
+
+menu "RT Locking"
+
+config RWLOCK_RT_READER_BIASED
+ bool "Reader biased RWLOCK implementation for Preempt-RT"
+ def_bool n
+ help
+ This is option provides an alternative RWLOCK implementation for
+ PREEMPT-RT. This new implementation is not writer friendly as
+ the regular RT implementation or mainline. However nothing RT
+ related should be affected. Nevertheless here is a switch in case
+ something stalls to double check.
+endmenu
+
+endif
--- a/kernel/locking/rt.c
+++ b/kernel/locking/rt.c
@@ -198,6 +198,7 @@ void __lockfunc _mutex_unlock(struct mut
}
EXPORT_SYMBOL(_mutex_unlock);
+#ifndef CONFIG_RWLOCK_RT_READER_BIASED
/*
* rwlock_t functions
*/
@@ -280,6 +281,7 @@ void __rt_rwlock_init(rwlock_t *rwlock,
rwlock->lock.save_state = 1;
}
EXPORT_SYMBOL(__rt_rwlock_init);
+#endif
/**
* atomic_dec_and_mutex_lock - return holding mutex if we dec to 0
--- a/kernel/locking/rwlock-rt.c
+++ b/kernel/locking/rwlock-rt.c
@@ -244,3 +244,73 @@ void __write_rt_unlock(struct rt_rw_lock
raw_spin_lock_irqsave(&m->wait_lock, flags);
__write_unlock_common(lock, WRITER_BIAS, flags);
}
+
+#ifdef CONFIG_RWLOCK_RT_READER_BIASED
+
+int __lockfunc rt_read_trylock(rwlock_t *rwlock)
+{
+ int ret;
+
+ migrate_disable();
+ ret = __read_rt_trylock(rwlock);
+ if (ret)
+ rwlock_acquire_read(&rwlock->dep_map, 0, 1, _RET_IP_);
+ else
+ migrate_enable();
+ return ret;
+}
+EXPORT_SYMBOL(rt_read_trylock);
+
+int __lockfunc rt_write_trylock(rwlock_t *rwlock)
+{
+ int ret;
+
+ migrate_disable();
+ ret = __write_rt_trylock(rwlock);
+ if (ret)
+ rwlock_acquire(&rwlock->dep_map, 0, 1, _RET_IP_);
+ else
+ migrate_enable();
+ return ret;
+}
+EXPORT_SYMBOL(rt_write_trylock);
+
+void __lockfunc rt_read_lock(rwlock_t *rwlock)
+{
+ migrate_disable();
+ rwlock_acquire_read(&rwlock->dep_map, 0, 0, _RET_IP_);
+ __read_rt_lock(rwlock);
+}
+EXPORT_SYMBOL(rt_read_lock);
+
+void __lockfunc rt_write_lock(rwlock_t *rwlock)
+{
+ migrate_disable();
+ rwlock_acquire(&rwlock->dep_map, 0, 0, _RET_IP_);
+ __write_rt_lock(rwlock);
+}
+EXPORT_SYMBOL(rt_write_lock);
+
+void __lockfunc rt_read_unlock(rwlock_t *rwlock)
+{
+ rwlock_release(&rwlock->dep_map, 1, _RET_IP_);
+ __read_rt_unlock(rwlock);
+ migrate_enable();
+}
+EXPORT_SYMBOL(rt_read_unlock);
+
+void __lockfunc rt_write_unlock(rwlock_t *rwlock)
+{
+ rwlock_release(&rwlock->dep_map, 1, _RET_IP_);
+ __write_rt_unlock(rwlock);
+ migrate_enable();
+}
+EXPORT_SYMBOL(rt_write_unlock);
+
+void __rt_rwlock_init(rwlock_t *rwlock, char *name, struct lock_class_key *key)
+{
+ __rwlock_biased_rt_init(rwlock, name, key);
+}
+EXPORT_SYMBOL(__rt_rwlock_init);
+
+#endif