locking/local_lock: Introduce local_lock_is_locked().
Introduce local_lock_is_locked() that returns true when
given local_lock is locked by current cpu (in !PREEMPT_RT) or
by current task (in PREEMPT_RT).
Signed-off-by: Alexei Starovoitov <ast@kernel.org>
diff --git a/include/linux/local_lock.h b/include/linux/local_lock.h
index 16a2ee4..092ce89 100644
--- a/include/linux/local_lock.h
+++ b/include/linux/local_lock.h
@@ -66,6 +66,8 @@
*/
#define local_trylock(lock) __local_trylock(lock)
+#define local_lock_is_locked(lock) __local_lock_is_locked(lock)
+
/**
* local_trylock_irqsave - Try to acquire a per CPU local lock, save and disable
* interrupts if acquired
diff --git a/include/linux/local_lock_internal.h b/include/linux/local_lock_internal.h
index 29df45f..263723a 100644
--- a/include/linux/local_lock_internal.h
+++ b/include/linux/local_lock_internal.h
@@ -165,6 +165,9 @@ do { \
!!tl; \
})
+/* preemption or migration must be disabled before calling __local_lock_is_locked */
+#define __local_lock_is_locked(lock) READ_ONCE(this_cpu_ptr(lock)->acquired)
+
#define __local_lock_release(lock) \
do { \
local_trylock_t *tl; \
@@ -285,4 +288,9 @@ do { \
__local_trylock(lock); \
})
+/* migration must be disabled before calling __local_lock_is_locked */
+#include "../../kernel/locking/rtmutex_common.h"
+#define __local_lock_is_locked(__lock) \
+ (rt_mutex_owner(&this_cpu_ptr(__lock)->lock) == current)
+
#endif /* CONFIG_PREEMPT_RT */