]> asedeno.scripts.mit.edu Git - linux.git/commitdiff
bcache: fix a lost wake-up problem caused by mca_cannibalize_lock
authorGuoju Fang <fangguoju@gmail.com>
Wed, 13 Nov 2019 08:03:16 +0000 (16:03 +0800)
committerJens Axboe <axboe@kernel.dk>
Wed, 13 Nov 2019 22:42:50 +0000 (15:42 -0700)
This patch fix a lost wake-up problem caused by the race between
mca_cannibalize_lock and bch_cannibalize_unlock.

Consider two processes, A and B. Process A is executing
mca_cannibalize_lock, while process B takes c->btree_cache_alloc_lock
and is executing bch_cannibalize_unlock. The problem happens that after
process A executes cmpxchg and will execute prepare_to_wait. In this
timeslice process B executes wake_up, but after that process A executes
prepare_to_wait and set the state to TASK_INTERRUPTIBLE. Then process A
goes to sleep but no one will wake up it. This problem may cause bcache
device to dead.

Signed-off-by: Guoju Fang <fangguoju@gmail.com>
Signed-off-by: Coly Li <colyli@suse.de>
Signed-off-by: Jens Axboe <axboe@kernel.dk>
drivers/md/bcache/bcache.h
drivers/md/bcache/btree.c
drivers/md/bcache/super.c

index 013e35a9e317a44c09426af9b26ff894c5a94692..3653faf3bf48d16139069976d7952d8e05f0a37f 100644 (file)
@@ -582,6 +582,7 @@ struct cache_set {
         */
        wait_queue_head_t       btree_cache_wait;
        struct task_struct      *btree_cache_alloc_lock;
+       spinlock_t              btree_cannibalize_lock;
 
        /*
         * When we free a btree node, we increment the gen of the bucket the
index 00523cd1db80b728461c96b6f6c717a2fd715dc6..39d7fc1ef1eeb87ad6938eb30b6084e9c46cd6ec 100644 (file)
@@ -910,15 +910,17 @@ static struct btree *mca_find(struct cache_set *c, struct bkey *k)
 
 static int mca_cannibalize_lock(struct cache_set *c, struct btree_op *op)
 {
-       struct task_struct *old;
-
-       old = cmpxchg(&c->btree_cache_alloc_lock, NULL, current);
-       if (old && old != current) {
+       spin_lock(&c->btree_cannibalize_lock);
+       if (likely(c->btree_cache_alloc_lock == NULL)) {
+               c->btree_cache_alloc_lock = current;
+       } else if (c->btree_cache_alloc_lock != current) {
                if (op)
                        prepare_to_wait(&c->btree_cache_wait, &op->wait,
                                        TASK_UNINTERRUPTIBLE);
+               spin_unlock(&c->btree_cannibalize_lock);
                return -EINTR;
        }
+       spin_unlock(&c->btree_cannibalize_lock);
 
        return 0;
 }
@@ -953,10 +955,12 @@ static struct btree *mca_cannibalize(struct cache_set *c, struct btree_op *op,
  */
 static void bch_cannibalize_unlock(struct cache_set *c)
 {
+       spin_lock(&c->btree_cannibalize_lock);
        if (c->btree_cache_alloc_lock == current) {
                c->btree_cache_alloc_lock = NULL;
                wake_up(&c->btree_cache_wait);
        }
+       spin_unlock(&c->btree_cannibalize_lock);
 }
 
 static struct btree *mca_alloc(struct cache_set *c, struct btree_op *op,
index 20ed838e9413bf532e47ee6e70f60c56b3499010..ebb854ed05a4bb85e5363a2f12c05826d21608c8 100644 (file)
@@ -1769,6 +1769,7 @@ struct cache_set *bch_cache_set_alloc(struct cache_sb *sb)
        sema_init(&c->sb_write_mutex, 1);
        mutex_init(&c->bucket_lock);
        init_waitqueue_head(&c->btree_cache_wait);
+       spin_lock_init(&c->btree_cannibalize_lock);
        init_waitqueue_head(&c->bucket_wait);
        init_waitqueue_head(&c->gc_wait);
        sema_init(&c->uuid_write_mutex, 1);