4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License as published by
6 * the Free Software Foundation; either version 2 of the License, or
7 * (at your option) any later version.
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
14 * (C) Copyright 2013-2015 Hewlett-Packard Development Company, L.P.
15 * (C) Copyright 2013-2014 Red Hat, Inc.
16 * (C) Copyright 2015 Intel Corp.
18 * Authors: Waiman Long <waiman.long@hp.com>
19 * Peter Zijlstra <peterz@infradead.org>
21 #include <linux/smp.h>
22 #include <linux/bug.h>
23 #include <linux/cpumask.h>
24 #include <linux/percpu.h>
25 #include <linux/hardirq.h>
26 #include <linux/mutex.h>
27 #include <asm/byteorder.h>
28 #include <asm/qspinlock.h>
31 * The basic principle of a queue-based spinlock can best be understood
32 * by studying a classic queue-based spinlock implementation called the
33 * MCS lock. The paper below provides a good description for this kind
36 * http://www.cise.ufl.edu/tr/DOC/REP-1992-71.pdf
38 * This queued spinlock implementation is based on the MCS lock, however to make
39 * it fit the 4 bytes we assume spinlock_t to be, and preserve its existing
40 * API, we must modify it somehow.
42 * In particular; where the traditional MCS lock consists of a tail pointer
43 * (8 bytes) and needs the next pointer (another 8 bytes) of its own node to
44 * unlock the next pending (next->locked), we compress both these: {tail,
45 * next->locked} into a single u32 value.
47 * Since a spinlock disables recursion of its own context and there is a limit
48 * to the contexts that can nest; namely: task, softirq, hardirq, nmi. As there
49 * are at most 4 nesting levels, it can be encoded by a 2-bit number. Now
50 * we can encode the tail by combining the 2-bit nesting level with the cpu
51 * number. With one byte for the lock value and 3 bytes for the tail, only a
52 * 32-bit word is now needed. Even though we only need 1 bit for the lock,
53 * we extend it to a full byte to achieve better performance for architectures
54 * that support atomic byte write.
56 * We also change the first spinner to spin on the lock bit instead of its
57 * node; whereby avoiding the need to carry a node from lock to unlock, and
58 * preserving existing lock API. This also makes the unlock code simpler and
61 * N.B. The current implementation only supports architectures that allow
62 * atomic operations on smaller 8-bit and 16-bit data types.
66 #include "mcs_spinlock.h"
69 * Per-CPU queue node structures; we can never have more than 4 nested
70 * contexts: task, softirq, hardirq, nmi.
72 * Exactly fits one 64-byte cacheline on a 64-bit architecture.
74 static DEFINE_PER_CPU_ALIGNED(struct mcs_spinlock, mcs_nodes[4]);
77 * We must be able to distinguish between no-tail and the tail at 0:0,
78 * therefore increment the cpu number by one.
81 static inline u32 encode_tail(int cpu, int idx)
85 #ifdef CONFIG_DEBUG_SPINLOCK
88 tail = (cpu + 1) << _Q_TAIL_CPU_OFFSET;
89 tail |= idx << _Q_TAIL_IDX_OFFSET; /* assume < 4 */
94 static inline struct mcs_spinlock *decode_tail(u32 tail)
96 int cpu = (tail >> _Q_TAIL_CPU_OFFSET) - 1;
97 int idx = (tail & _Q_TAIL_IDX_MASK) >> _Q_TAIL_IDX_OFFSET;
99 return per_cpu_ptr(&mcs_nodes[idx], cpu);
102 #define _Q_LOCKED_PENDING_MASK (_Q_LOCKED_MASK | _Q_PENDING_MASK)
105 * By using the whole 2nd least significant byte for the pending bit, we
106 * can allow better optimization of the lock acquisition for the pending
109 * This internal structure is also used by the set_locked function which
110 * is not restricted to _Q_PENDING_BITS == 8.
115 #ifdef __LITTLE_ENDIAN
138 #if _Q_PENDING_BITS == 8
140 * clear_pending_set_locked - take ownership and clear the pending bit.
141 * @lock: Pointer to queued spinlock structure
145 * Lock stealing is not allowed if this function is used.
147 static __always_inline void clear_pending_set_locked(struct qspinlock *lock)
149 struct __qspinlock *l = (void *)lock;
151 WRITE_ONCE(l->locked_pending, _Q_LOCKED_VAL);
155 * xchg_tail - Put in the new queue tail code word & retrieve previous one
156 * @lock : Pointer to queued spinlock structure
157 * @tail : The new queue tail code word
158 * Return: The previous queue tail code word
162 * p,*,* -> n,*,* ; prev = xchg(lock, node)
164 static __always_inline u32 xchg_tail(struct qspinlock *lock, u32 tail)
166 struct __qspinlock *l = (void *)lock;
168 return (u32)xchg(&l->tail, tail >> _Q_TAIL_OFFSET) << _Q_TAIL_OFFSET;
171 #else /* _Q_PENDING_BITS == 8 */
174 * clear_pending_set_locked - take ownership and clear the pending bit.
175 * @lock: Pointer to queued spinlock structure
179 static __always_inline void clear_pending_set_locked(struct qspinlock *lock)
181 atomic_add(-_Q_PENDING_VAL + _Q_LOCKED_VAL, &lock->val);
185 * xchg_tail - Put in the new queue tail code word & retrieve previous one
186 * @lock : Pointer to queued spinlock structure
187 * @tail : The new queue tail code word
188 * Return: The previous queue tail code word
192 * p,*,* -> n,*,* ; prev = xchg(lock, node)
194 static __always_inline u32 xchg_tail(struct qspinlock *lock, u32 tail)
196 u32 old, new, val = atomic_read(&lock->val);
199 new = (val & _Q_LOCKED_PENDING_MASK) | tail;
200 old = atomic_cmpxchg(&lock->val, val, new);
208 #endif /* _Q_PENDING_BITS == 8 */
211 * set_locked - Set the lock bit and own the lock
212 * @lock: Pointer to queued spinlock structure
216 static __always_inline void set_locked(struct qspinlock *lock)
218 struct __qspinlock *l = (void *)lock;
220 WRITE_ONCE(l->locked, _Q_LOCKED_VAL);
224 * queued_spin_lock_slowpath - acquire the queued spinlock
225 * @lock: Pointer to queued spinlock structure
226 * @val: Current value of the queued spinlock 32-bit word
228 * (queue tail, pending bit, lock value)
230 * fast : slow : unlock
232 * uncontended (0,0,0) -:--> (0,0,1) ------------------------------:--> (*,*,0)
233 * : | ^--------.------. / :
235 * pending : (0,1,1) +--> (0,1,0) \ | :
238 * uncontended : (n,x,y) +--> (n,0,0) --' | :
241 * contended : (*,x,y) +--> (*,0,0) ---> (*,0,1) -' :
244 void queued_spin_lock_slowpath(struct qspinlock *lock, u32 val)
246 struct mcs_spinlock *prev, *next, *node;
250 BUILD_BUG_ON(CONFIG_NR_CPUS >= (1U << _Q_TAIL_CPU_BITS));
253 * wait for in-progress pending->locked hand-overs
257 if (val == _Q_PENDING_VAL) {
258 while ((val = atomic_read(&lock->val)) == _Q_PENDING_VAL)
265 * 0,0,0 -> 0,0,1 ; trylock
266 * 0,0,1 -> 0,1,1 ; pending
270 * If we observe any contention; queue.
272 if (val & ~_Q_LOCKED_MASK)
277 new |= _Q_PENDING_VAL;
279 old = atomic_cmpxchg(&lock->val, val, new);
289 if (new == _Q_LOCKED_VAL)
293 * we're pending, wait for the owner to go away.
297 * this wait loop must be a load-acquire such that we match the
298 * store-release that clears the locked bit and create lock
299 * sequentiality; this is because not all clear_pending_set_locked()
300 * implementations imply full barriers.
302 while ((val = smp_load_acquire(&lock->val.counter)) & _Q_LOCKED_MASK)
306 * take ownership and clear the pending bit.
310 clear_pending_set_locked(lock);
314 * End of pending bit optimistic spinning and beginning of MCS
318 node = this_cpu_ptr(&mcs_nodes[0]);
320 tail = encode_tail(smp_processor_id(), idx);
327 * We touched a (possibly) cold cacheline in the per-cpu queue node;
328 * attempt the trylock once more in the hope someone let go while we
331 if (queued_spin_trylock(lock))
335 * We have already touched the queueing cacheline; don't bother with
340 old = xchg_tail(lock, tail);
343 * if there was a previous node; link it and wait until reaching the
344 * head of the waitqueue.
346 if (old & _Q_TAIL_MASK) {
347 prev = decode_tail(old);
348 WRITE_ONCE(prev->next, node);
350 arch_mcs_spin_lock_contended(&node->locked);
354 * we're at the head of the waitqueue, wait for the owner & pending to
359 * this wait loop must use a load-acquire such that we match the
360 * store-release that clears the locked bit and create lock
361 * sequentiality; this is because the set_locked() function below
362 * does not imply a full barrier.
365 while ((val = smp_load_acquire(&lock->val.counter)) & _Q_LOCKED_PENDING_MASK)
371 * n,0,0 -> 0,0,1 : lock, uncontended
372 * *,0,0 -> *,0,1 : lock, contended
374 * If the queue head is the only one in the queue (lock value == tail),
375 * clear the tail code and grab the lock. Otherwise, we only need
383 old = atomic_cmpxchg(&lock->val, val, _Q_LOCKED_VAL);
385 goto release; /* No contention */
391 * contended path; wait for next, release.
393 while (!(next = READ_ONCE(node->next)))
396 arch_mcs_spin_unlock_contended(&next->locked);
402 this_cpu_dec(mcs_nodes[0].count);
404 EXPORT_SYMBOL(queued_spin_lock_slowpath);