]> asedeno.scripts.mit.edu Git - linux.git/commitdiff
md/raid1/10: reset bio allocated from mempool
authorShaohua Li <shli@fb.com>
Fri, 25 Aug 2017 00:50:40 +0000 (17:50 -0700)
committerShaohua Li <shli@fb.com>
Fri, 25 Aug 2017 17:21:44 +0000 (10:21 -0700)
Data allocated from mempool doesn't always get initialized, this happens when
the data is reused instead of fresh allocation. In the raid1/10 case, we must
reinitialize the bios.

Reported-by: Jonathan G. Underwood <jonathan.underwood@gmail.com>
Fixes: f0250618361d(md: raid10: don't use bio's vec table to manage resync pages)
Fixes: 98d30c5812c3(md: raid1: don't use bio's vec table to manage resync pages)
Cc: stable@vger.kernel.org (4.12+)
Cc: Ming Lei <ming.lei@redhat.com>
Signed-off-by: Shaohua Li <shli@fb.com>
drivers/md/raid1.c
drivers/md/raid10.c

index f50958ded9f0c4dd9982c440b20d4e8854697b0d..79474f47eeefad3477d1009857022c574c7a2dc9 100644 (file)
@@ -2564,6 +2564,23 @@ static int init_resync(struct r1conf *conf)
        return 0;
 }
 
+static struct r1bio *raid1_alloc_init_r1buf(struct r1conf *conf)
+{
+       struct r1bio *r1bio = mempool_alloc(conf->r1buf_pool, GFP_NOIO);
+       struct resync_pages *rps;
+       struct bio *bio;
+       int i;
+
+       for (i = conf->poolinfo->raid_disks; i--; ) {
+               bio = r1bio->bios[i];
+               rps = bio->bi_private;
+               bio_reset(bio);
+               bio->bi_private = rps;
+       }
+       r1bio->master_bio = NULL;
+       return r1bio;
+}
+
 /*
  * perform a "sync" on one "block"
  *
@@ -2649,7 +2666,7 @@ static sector_t raid1_sync_request(struct mddev *mddev, sector_t sector_nr,
 
        bitmap_cond_end_sync(mddev->bitmap, sector_nr,
                mddev_is_clustered(mddev) && (sector_nr + 2 * RESYNC_SECTORS > conf->cluster_sync_high));
-       r1_bio = mempool_alloc(conf->r1buf_pool, GFP_NOIO);
+       r1_bio = raid1_alloc_init_r1buf(conf);
 
        raise_barrier(conf, sector_nr);
 
index f55d4cc085f60daa6b25c9f398e462420dbc6c18..d51ac02e98ef5d73a01c8b8598209668330c52df 100644 (file)
@@ -2798,6 +2798,35 @@ static int init_resync(struct r10conf *conf)
        return 0;
 }
 
+static struct r10bio *raid10_alloc_init_r10buf(struct r10conf *conf)
+{
+       struct r10bio *r10bio = mempool_alloc(conf->r10buf_pool, GFP_NOIO);
+       struct rsync_pages *rp;
+       struct bio *bio;
+       int nalloc;
+       int i;
+
+       if (test_bit(MD_RECOVERY_SYNC, &conf->mddev->recovery) ||
+           test_bit(MD_RECOVERY_RESHAPE, &conf->mddev->recovery))
+               nalloc = conf->copies; /* resync */
+       else
+               nalloc = 2; /* recovery */
+
+       for (i = 0; i < nalloc; i++) {
+               bio = r10bio->devs[i].bio;
+               rp = bio->bi_private;
+               bio_reset(bio);
+               bio->bi_private = rp;
+               bio = r10bio->devs[i].repl_bio;
+               if (bio) {
+                       rp = bio->bi_private;
+                       bio_reset(bio);
+                       bio->bi_private = rp;
+               }
+       }
+       return r10bio;
+}
+
 /*
  * perform a "sync" on one "block"
  *
@@ -3027,7 +3056,7 @@ static sector_t raid10_sync_request(struct mddev *mddev, sector_t sector_nr,
                                atomic_inc(&mreplace->nr_pending);
                        rcu_read_unlock();
 
-                       r10_bio = mempool_alloc(conf->r10buf_pool, GFP_NOIO);
+                       r10_bio = raid10_alloc_init_r10buf(conf);
                        r10_bio->state = 0;
                        raise_barrier(conf, rb2 != NULL);
                        atomic_set(&r10_bio->remaining, 0);
@@ -3236,7 +3265,7 @@ static sector_t raid10_sync_request(struct mddev *mddev, sector_t sector_nr,
                }
                if (sync_blocks < max_sync)
                        max_sync = sync_blocks;
-               r10_bio = mempool_alloc(conf->r10buf_pool, GFP_NOIO);
+               r10_bio = raid10_alloc_init_r10buf(conf);
                r10_bio->state = 0;
 
                r10_bio->mddev = mddev;
@@ -4360,7 +4389,7 @@ static sector_t reshape_request(struct mddev *mddev, sector_t sector_nr,
 
 read_more:
        /* Now schedule reads for blocks from sector_nr to last */
-       r10_bio = mempool_alloc(conf->r10buf_pool, GFP_NOIO);
+       r10_bio = raid10_alloc_init_r10buf(conf);
        r10_bio->state = 0;
        raise_barrier(conf, sectors_done != 0);
        atomic_set(&r10_bio->remaining, 0);