]> asedeno.scripts.mit.edu Git - linux.git/commitdiff
btrfs: scrub: convert scrub_workers_refcnt to refcount_t
authorAnand Jain <anand.jain@oracle.com>
Wed, 30 Jan 2019 06:45:02 +0000 (14:45 +0800)
committerDavid Sterba <dsterba@suse.com>
Mon, 25 Feb 2019 13:13:38 +0000 (14:13 +0100)
Use the refcount_t for fs_info::scrub_workers_refcnt instead of int so
we get the extra checks. All reference changes are still done under
scrub_lock.

Signed-off-by: Anand Jain <anand.jain@oracle.com>
Reviewed-by: David Sterba <dsterba@suse.com>
Signed-off-by: David Sterba <dsterba@suse.com>
fs/btrfs/ctree.h
fs/btrfs/disk-io.c
fs/btrfs/scrub.c

index 9306925b67904e18808a3bdde31225aad453a36f..7efa1edb30cdc31e52d7ece43af69ab0be44259a 100644 (file)
@@ -1075,7 +1075,7 @@ struct btrfs_fs_info {
        atomic_t scrubs_paused;
        atomic_t scrub_cancel_req;
        wait_queue_head_t scrub_pause_wait;
-       int scrub_workers_refcnt;
+       refcount_t scrub_workers_refcnt;
        struct btrfs_workqueue *scrub_workers;
        struct btrfs_workqueue *scrub_wr_completion_workers;
        struct btrfs_workqueue *scrub_nocow_workers;
index 8c0038de73ee2ebdabf3d890159088a5c0a3608e..5216e7b3f9ada29308a1c6fc5cf75b816032aded 100644 (file)
@@ -2109,7 +2109,7 @@ static void btrfs_init_scrub(struct btrfs_fs_info *fs_info)
        atomic_set(&fs_info->scrubs_paused, 0);
        atomic_set(&fs_info->scrub_cancel_req, 0);
        init_waitqueue_head(&fs_info->scrub_pause_wait);
-       fs_info->scrub_workers_refcnt = 0;
+       refcount_set(&fs_info->scrub_workers_refcnt, 0);
 }
 
 static void btrfs_init_balance(struct btrfs_fs_info *fs_info)
index 17925af759ae4a1de74ad90fa61f9a90f26e3a55..d20150d68a904f68086988a0ff2d8802d0f2421d 100644 (file)
@@ -3743,7 +3743,7 @@ static noinline_for_stack int scrub_workers_get(struct btrfs_fs_info *fs_info,
 
        lockdep_assert_held(&fs_info->scrub_lock);
 
-       if (fs_info->scrub_workers_refcnt == 0) {
+       if (refcount_read(&fs_info->scrub_workers_refcnt) == 0) {
                fs_info->scrub_workers = btrfs_alloc_workqueue(fs_info, "scrub",
                                flags, is_dev_replace ? 1 : max_active, 4);
                if (!fs_info->scrub_workers)
@@ -3760,8 +3760,11 @@ static noinline_for_stack int scrub_workers_get(struct btrfs_fs_info *fs_info,
                                              max_active, 2);
                if (!fs_info->scrub_parity_workers)
                        goto fail_scrub_parity_workers;
+
+               refcount_set(&fs_info->scrub_workers_refcnt, 1);
+       } else {
+               refcount_inc(&fs_info->scrub_workers_refcnt);
        }
-       ++fs_info->scrub_workers_refcnt;
        return 0;
 
 fail_scrub_parity_workers:
@@ -3927,7 +3930,7 @@ int btrfs_scrub_dev(struct btrfs_fs_info *fs_info, u64 devid, u64 start,
 
        mutex_lock(&fs_info->scrub_lock);
        dev->scrub_ctx = NULL;
-       if (--fs_info->scrub_workers_refcnt == 0) {
+       if (refcount_dec_and_test(&fs_info->scrub_workers_refcnt)) {
                scrub_workers = fs_info->scrub_workers;
                scrub_wr_comp = fs_info->scrub_wr_completion_workers;
                scrub_parity = fs_info->scrub_parity_workers;