diff options
author | Luka Pivk <luka.pivk@toradex.com> | 2019-01-08 07:02:41 +0100 |
---|---|---|
committer | Luka Pivk <luka.pivk@toradex.com> | 2019-01-08 07:02:41 +0100 |
commit | 43f61e8df71c474cd56b5c971096b9cf8d36cd95 (patch) | |
tree | 0031be65fcc2fd2fd3eceeee9558b5526a19e431 /kernel/locking/qspinlock.c | |
parent | 5656ec50dc9950db865d53306326b1a732e3738e (diff) | |
parent | c04c050f5bf98845bfe22164b8a1503d696a6e26 (diff) |
Merge remote-tracking branch 'remotes/origin/linux-4.19.y'toradex_4.19.y
Signed-off-by: Luka Pivk <luka.pivk@toradex.com>
Diffstat (limited to 'kernel/locking/qspinlock.c')
-rw-r--r-- | kernel/locking/qspinlock.c | 71 |
1 files changed, 42 insertions, 29 deletions
diff --git a/kernel/locking/qspinlock.c b/kernel/locking/qspinlock.c index bfaeb05123ff..edd75e0c7d96 100644 --- a/kernel/locking/qspinlock.c +++ b/kernel/locking/qspinlock.c @@ -232,6 +232,20 @@ static __always_inline u32 xchg_tail(struct qspinlock *lock, u32 tail) #endif /* _Q_PENDING_BITS == 8 */ /** + * queued_fetch_set_pending_acquire - fetch the whole lock value and set pending + * @lock : Pointer to queued spinlock structure + * Return: The previous lock value + * + * *,*,* -> *,1,* + */ +#ifndef queued_fetch_set_pending_acquire +static __always_inline u32 queued_fetch_set_pending_acquire(struct qspinlock *lock) +{ + return atomic_fetch_or_acquire(_Q_PENDING_VAL, &lock->val); +} +#endif + +/** * set_locked - Set the lock bit and own the lock * @lock: Pointer to queued spinlock structure * @@ -329,40 +343,39 @@ void queued_spin_lock_slowpath(struct qspinlock *lock, u32 val) * 0,0,0 -> 0,0,1 ; trylock * 0,0,1 -> 0,1,1 ; pending */ - val = atomic_fetch_or_acquire(_Q_PENDING_VAL, &lock->val); - if (!(val & ~_Q_LOCKED_MASK)) { - /* - * We're pending, wait for the owner to go away. - * - * *,1,1 -> *,1,0 - * - * this wait loop must be a load-acquire such that we match the - * store-release that clears the locked bit and create lock - * sequentiality; this is because not all - * clear_pending_set_locked() implementations imply full - * barriers. - */ - if (val & _Q_LOCKED_MASK) { - atomic_cond_read_acquire(&lock->val, - !(VAL & _Q_LOCKED_MASK)); - } + val = queued_fetch_set_pending_acquire(lock); - /* - * take ownership and clear the pending bit. - * - * *,1,0 -> *,0,1 - */ - clear_pending_set_locked(lock); - qstat_inc(qstat_lock_pending, true); - return; + /* + * If we observe any contention; undo and queue. + */ + if (unlikely(val & ~_Q_LOCKED_MASK)) { + if (!(val & _Q_PENDING_MASK)) + clear_pending(lock); + goto queue; } /* - * If pending was clear but there are waiters in the queue, then - * we need to undo our setting of pending before we queue ourselves. + * We're pending, wait for the owner to go away. + * + * 0,1,1 -> 0,1,0 + * + * this wait loop must be a load-acquire such that we match the + * store-release that clears the locked bit and create lock + * sequentiality; this is because not all + * clear_pending_set_locked() implementations imply full + * barriers. + */ + if (val & _Q_LOCKED_MASK) + atomic_cond_read_acquire(&lock->val, !(VAL & _Q_LOCKED_MASK)); + + /* + * take ownership and clear the pending bit. + * + * 0,1,0 -> 0,0,1 */ - if (!(val & _Q_PENDING_MASK)) - clear_pending(lock); + clear_pending_set_locked(lock); + qstat_inc(qstat_lock_pending, true); + return; /* * End of pending bit optimistic spinning and beginning of MCS |