]> asedeno.scripts.mit.edu Git - linux.git/blobdiff - kernel/locking/qspinlock.c
Merge tag 'mips_fixes_4.16_2' of git://git.kernel.org/pub/scm/linux/kernel/git/jhogan...
[linux.git] / kernel / locking / qspinlock.c
index 38ece035039e35bf997e4161ec8e84cceaf4508f..d880296245c5954c432c539e2f14f3ade1c4719b 100644 (file)
@@ -379,6 +379,14 @@ void queued_spin_lock_slowpath(struct qspinlock *lock, u32 val)
        tail = encode_tail(smp_processor_id(), idx);
 
        node += idx;
+
+       /*
+        * Ensure that we increment the head node->count before initialising
+        * the actual node. If the compiler is kind enough to reorder these
+        * stores, then an IRQ could overwrite our assignments.
+        */
+       barrier();
+
        node->locked = 0;
        node->next = NULL;
        pv_init_node(node);
@@ -408,14 +416,15 @@ void queued_spin_lock_slowpath(struct qspinlock *lock, u32 val)
         */
        if (old & _Q_TAIL_MASK) {
                prev = decode_tail(old);
+
                /*
-                * The above xchg_tail() is also a load of @lock which
-                * generates, through decode_tail(), a pointer.  The address
-                * dependency matches the RELEASE of xchg_tail() such that
-                * the subsequent access to @prev happens after.
+                * We must ensure that the stores to @node are observed before
+                * the write to prev->next. The address dependency from
+                * xchg_tail is not sufficient to ensure this because the read
+                * component of xchg_tail is unordered with respect to the
+                * initialisation of @node.
                 */
-
-               WRITE_ONCE(prev->next, node);
+               smp_store_release(&prev->next, node);
 
                pv_wait_node(node, prev);
                arch_mcs_spin_lock_contended(&node->locked);