| From 88e538bc095147a501654eb7c3b2161396e41ba3 Mon Sep 17 00:00:00 2001 |
| From: Peter Zijlstra <peterz@infradead.org> |
| Date: Tue, 18 Dec 2018 18:48:27 +0100 |
| Subject: locking/qspinlock: Re-order code |
| |
| commit 53bf57fab7321fb42b703056a4c80fc9d986d170 upstream. |
| |
| Flip the branch condition after atomic_fetch_or_acquire(_Q_PENDING_VAL) |
| such that we loose the indent. This also result in a more natural code |
| flow IMO. |
| |
| Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org> |
| Acked-by: Will Deacon <will.deacon@arm.com> |
| Cc: Linus Torvalds <torvalds@linux-foundation.org> |
| Cc: Peter Zijlstra <peterz@infradead.org> |
| Cc: Thomas Gleixner <tglx@linutronix.de> |
| Cc: andrea.parri@amarulasolutions.com |
| Cc: longman@redhat.com |
| Link: https://lkml.kernel.org/r/20181003130257.156322446@infradead.org |
| Signed-off-by: Ingo Molnar <mingo@kernel.org> |
| Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de> |
| Signed-off-by: Sasha Levin <sashal@kernel.org> |
| --- |
| kernel/locking/qspinlock.c | 56 ++++++++++++++++++-------------------- |
| 1 file changed, 27 insertions(+), 29 deletions(-) |
| |
| diff --git a/kernel/locking/qspinlock.c b/kernel/locking/qspinlock.c |
| index bfaeb05123ff..ec343276f975 100644 |
| --- a/kernel/locking/qspinlock.c |
| +++ b/kernel/locking/qspinlock.c |
| @@ -330,39 +330,37 @@ void queued_spin_lock_slowpath(struct qspinlock *lock, u32 val) |
| * 0,0,1 -> 0,1,1 ; pending |
| */ |
| val = atomic_fetch_or_acquire(_Q_PENDING_VAL, &lock->val); |
| - if (!(val & ~_Q_LOCKED_MASK)) { |
| - /* |
| - * We're pending, wait for the owner to go away. |
| - * |
| - * *,1,1 -> *,1,0 |
| - * |
| - * this wait loop must be a load-acquire such that we match the |
| - * store-release that clears the locked bit and create lock |
| - * sequentiality; this is because not all |
| - * clear_pending_set_locked() implementations imply full |
| - * barriers. |
| - */ |
| - if (val & _Q_LOCKED_MASK) { |
| - atomic_cond_read_acquire(&lock->val, |
| - !(VAL & _Q_LOCKED_MASK)); |
| - } |
| - |
| - /* |
| - * take ownership and clear the pending bit. |
| - * |
| - * *,1,0 -> *,0,1 |
| - */ |
| - clear_pending_set_locked(lock); |
| - qstat_inc(qstat_lock_pending, true); |
| - return; |
| + /* |
| + * If we observe any contention; undo and queue. |
| + */ |
| + if (unlikely(val & ~_Q_LOCKED_MASK)) { |
| + if (!(val & _Q_PENDING_MASK)) |
| + clear_pending(lock); |
| + goto queue; |
| } |
| |
| /* |
| - * If pending was clear but there are waiters in the queue, then |
| - * we need to undo our setting of pending before we queue ourselves. |
| + * We're pending, wait for the owner to go away. |
| + * |
| + * 0,1,1 -> 0,1,0 |
| + * |
| + * this wait loop must be a load-acquire such that we match the |
| + * store-release that clears the locked bit and create lock |
| + * sequentiality; this is because not all |
| + * clear_pending_set_locked() implementations imply full |
| + * barriers. |
| + */ |
| + if (val & _Q_LOCKED_MASK) |
| + atomic_cond_read_acquire(&lock->val, !(VAL & _Q_LOCKED_MASK)); |
| + |
| + /* |
| + * take ownership and clear the pending bit. |
| + * |
| + * 0,1,0 -> 0,0,1 |
| */ |
| - if (!(val & _Q_PENDING_MASK)) |
| - clear_pending(lock); |
| + clear_pending_set_locked(lock); |
| + qstat_inc(qstat_lock_pending, true); |
| + return; |
| |
| /* |
| * End of pending bit optimistic spinning and beginning of MCS |
| -- |
| 2.19.1 |
| |