1 // SPDX-License-Identifier: GPL-2.0
3 * Copyright (C) 2012 Fusion-io All rights reserved.
4 * Copyright (C) 2012 Intel Corp. All rights reserved.
7 #include <linux/sched.h>
8 #include <linux/wait.h>
10 #include <linux/slab.h>
11 #include <linux/buffer_head.h>
12 #include <linux/blkdev.h>
13 #include <linux/random.h>
14 #include <linux/iocontext.h>
15 #include <linux/capability.h>
16 #include <linux/ratelimit.h>
17 #include <linux/kthread.h>
18 #include <linux/raid/pq.h>
19 #include <linux/hash.h>
20 #include <linux/list_sort.h>
21 #include <linux/raid/xor.h>
23 #include <asm/div64.h>
25 #include "extent_map.h"
27 #include "transaction.h"
28 #include "print-tree.h"
31 #include "async-thread.h"
32 #include "check-integrity.h"
33 #include "rcu-string.h"
35 /* set when additional merges to this rbio are not allowed */
36 #define RBIO_RMW_LOCKED_BIT 1
39 * set when this rbio is sitting in the hash, but it is just a cache
42 #define RBIO_CACHE_BIT 2
45 * set when it is safe to trust the stripe_pages for caching
47 #define RBIO_CACHE_READY_BIT 3
49 #define RBIO_CACHE_SIZE 1024
53 BTRFS_RBIO_READ_REBUILD,
54 BTRFS_RBIO_PARITY_SCRUB,
55 BTRFS_RBIO_REBUILD_MISSING,
58 struct btrfs_raid_bio {
59 struct btrfs_fs_info *fs_info;
60 struct btrfs_bio *bbio;
62 /* while we're doing rmw on a stripe
63 * we put it into a hash table so we can
64 * lock the stripe and merge more rbios
67 struct list_head hash_list;
70 * LRU list for the stripe cache
72 struct list_head stripe_cache;
75 * for scheduling work in the helper threads
77 struct btrfs_work work;
80 * bio list and bio_list_lock are used
81 * to add more bios into the stripe
82 * in hopes of avoiding the full rmw
84 struct bio_list bio_list;
85 spinlock_t bio_list_lock;
87 /* also protected by the bio_list_lock, the
88 * plug list is used by the plugging code
89 * to collect partial bios while plugged. The
90 * stripe locking code also uses it to hand off
91 * the stripe lock to the next pending IO
93 struct list_head plug_list;
96 * flags that tell us if it is safe to
101 /* size of each individual stripe on disk */
104 /* number of data stripes (no p/q) */
111 * set if we're doing a parity rebuild
112 * for a read from higher up, which is handled
113 * differently from a parity rebuild as part of
116 enum btrfs_rbio_ops operation;
118 /* first bad stripe */
121 /* second bad stripe (for raid6 use) */
126 * number of pages needed to represent the full
132 * size of all the bios in the bio_list. This
133 * helps us decide if the rbio maps to a full
142 atomic_t stripes_pending;
146 * these are two arrays of pointers. We allocate the
147 * rbio big enough to hold them both and setup their
148 * locations when the rbio is allocated
151 /* pointers to pages that we allocated for
152 * reading/writing stripes directly from the disk (including P/Q)
154 struct page **stripe_pages;
157 * pointers to the pages in the bio_list. Stored
158 * here for faster lookup
160 struct page **bio_pages;
163 * bitmap to record which horizontal stripe has data
165 unsigned long *dbitmap;
167 /* allocated with real_stripes-many pointers for finish_*() calls */
168 void **finish_pointers;
170 /* allocated with stripe_npages-many bits for finish_*() calls */
171 unsigned long *finish_pbitmap;
174 static int __raid56_parity_recover(struct btrfs_raid_bio *rbio);
175 static noinline void finish_rmw(struct btrfs_raid_bio *rbio);
176 static void rmw_work(struct btrfs_work *work);
177 static void read_rebuild_work(struct btrfs_work *work);
178 static void async_rmw_stripe(struct btrfs_raid_bio *rbio);
179 static void async_read_rebuild(struct btrfs_raid_bio *rbio);
180 static int fail_bio_stripe(struct btrfs_raid_bio *rbio, struct bio *bio);
181 static int fail_rbio_index(struct btrfs_raid_bio *rbio, int failed);
182 static void __free_raid_bio(struct btrfs_raid_bio *rbio);
183 static void index_rbio_pages(struct btrfs_raid_bio *rbio);
184 static int alloc_rbio_pages(struct btrfs_raid_bio *rbio);
186 static noinline void finish_parity_scrub(struct btrfs_raid_bio *rbio,
188 static void async_scrub_parity(struct btrfs_raid_bio *rbio);
191 * the stripe hash table is used for locking, and to collect
192 * bios in hopes of making a full stripe
194 int btrfs_alloc_stripe_hash_table(struct btrfs_fs_info *info)
196 struct btrfs_stripe_hash_table *table;
197 struct btrfs_stripe_hash_table *x;
198 struct btrfs_stripe_hash *cur;
199 struct btrfs_stripe_hash *h;
200 int num_entries = 1 << BTRFS_STRIPE_HASH_TABLE_BITS;
204 if (info->stripe_hash_table)
208 * The table is large, starting with order 4 and can go as high as
209 * order 7 in case lock debugging is turned on.
211 * Try harder to allocate and fallback to vmalloc to lower the chance
212 * of a failing mount.
214 table_size = sizeof(*table) + sizeof(*h) * num_entries;
215 table = kvzalloc(table_size, GFP_KERNEL);
219 spin_lock_init(&table->cache_lock);
220 INIT_LIST_HEAD(&table->stripe_cache);
224 for (i = 0; i < num_entries; i++) {
226 INIT_LIST_HEAD(&cur->hash_list);
227 spin_lock_init(&cur->lock);
230 x = cmpxchg(&info->stripe_hash_table, NULL, table);
237 * caching an rbio means to copy anything from the
238 * bio_pages array into the stripe_pages array. We
239 * use the page uptodate bit in the stripe cache array
240 * to indicate if it has valid data
242 * once the caching is done, we set the cache ready
245 static void cache_rbio_pages(struct btrfs_raid_bio *rbio)
252 ret = alloc_rbio_pages(rbio);
256 for (i = 0; i < rbio->nr_pages; i++) {
257 if (!rbio->bio_pages[i])
260 s = kmap(rbio->bio_pages[i]);
261 d = kmap(rbio->stripe_pages[i]);
263 memcpy(d, s, PAGE_SIZE);
265 kunmap(rbio->bio_pages[i]);
266 kunmap(rbio->stripe_pages[i]);
267 SetPageUptodate(rbio->stripe_pages[i]);
269 set_bit(RBIO_CACHE_READY_BIT, &rbio->flags);
273 * we hash on the first logical address of the stripe
275 static int rbio_bucket(struct btrfs_raid_bio *rbio)
277 u64 num = rbio->bbio->raid_map[0];
280 * we shift down quite a bit. We're using byte
281 * addressing, and most of the lower bits are zeros.
282 * This tends to upset hash_64, and it consistently
283 * returns just one or two different values.
285 * shifting off the lower bits fixes things.
287 return hash_64(num >> 16, BTRFS_STRIPE_HASH_TABLE_BITS);
291 * stealing an rbio means taking all the uptodate pages from the stripe
292 * array in the source rbio and putting them into the destination rbio
294 static void steal_rbio(struct btrfs_raid_bio *src, struct btrfs_raid_bio *dest)
300 if (!test_bit(RBIO_CACHE_READY_BIT, &src->flags))
303 for (i = 0; i < dest->nr_pages; i++) {
304 s = src->stripe_pages[i];
305 if (!s || !PageUptodate(s)) {
309 d = dest->stripe_pages[i];
313 dest->stripe_pages[i] = s;
314 src->stripe_pages[i] = NULL;
319 * merging means we take the bio_list from the victim and
320 * splice it into the destination. The victim should
321 * be discarded afterwards.
323 * must be called with dest->rbio_list_lock held
325 static void merge_rbio(struct btrfs_raid_bio *dest,
326 struct btrfs_raid_bio *victim)
328 bio_list_merge(&dest->bio_list, &victim->bio_list);
329 dest->bio_list_bytes += victim->bio_list_bytes;
330 dest->generic_bio_cnt += victim->generic_bio_cnt;
331 bio_list_init(&victim->bio_list);
335 * used to prune items that are in the cache. The caller
336 * must hold the hash table lock.
338 static void __remove_rbio_from_cache(struct btrfs_raid_bio *rbio)
340 int bucket = rbio_bucket(rbio);
341 struct btrfs_stripe_hash_table *table;
342 struct btrfs_stripe_hash *h;
346 * check the bit again under the hash table lock.
348 if (!test_bit(RBIO_CACHE_BIT, &rbio->flags))
351 table = rbio->fs_info->stripe_hash_table;
352 h = table->table + bucket;
354 /* hold the lock for the bucket because we may be
355 * removing it from the hash table
360 * hold the lock for the bio list because we need
361 * to make sure the bio list is empty
363 spin_lock(&rbio->bio_list_lock);
365 if (test_and_clear_bit(RBIO_CACHE_BIT, &rbio->flags)) {
366 list_del_init(&rbio->stripe_cache);
367 table->cache_size -= 1;
370 /* if the bio list isn't empty, this rbio is
371 * still involved in an IO. We take it out
372 * of the cache list, and drop the ref that
373 * was held for the list.
375 * If the bio_list was empty, we also remove
376 * the rbio from the hash_table, and drop
377 * the corresponding ref
379 if (bio_list_empty(&rbio->bio_list)) {
380 if (!list_empty(&rbio->hash_list)) {
381 list_del_init(&rbio->hash_list);
382 refcount_dec(&rbio->refs);
383 BUG_ON(!list_empty(&rbio->plug_list));
388 spin_unlock(&rbio->bio_list_lock);
389 spin_unlock(&h->lock);
392 __free_raid_bio(rbio);
396 * prune a given rbio from the cache
398 static void remove_rbio_from_cache(struct btrfs_raid_bio *rbio)
400 struct btrfs_stripe_hash_table *table;
403 if (!test_bit(RBIO_CACHE_BIT, &rbio->flags))
406 table = rbio->fs_info->stripe_hash_table;
408 spin_lock_irqsave(&table->cache_lock, flags);
409 __remove_rbio_from_cache(rbio);
410 spin_unlock_irqrestore(&table->cache_lock, flags);
414 * remove everything in the cache
416 static void btrfs_clear_rbio_cache(struct btrfs_fs_info *info)
418 struct btrfs_stripe_hash_table *table;
420 struct btrfs_raid_bio *rbio;
422 table = info->stripe_hash_table;
424 spin_lock_irqsave(&table->cache_lock, flags);
425 while (!list_empty(&table->stripe_cache)) {
426 rbio = list_entry(table->stripe_cache.next,
427 struct btrfs_raid_bio,
429 __remove_rbio_from_cache(rbio);
431 spin_unlock_irqrestore(&table->cache_lock, flags);
435 * remove all cached entries and free the hash table
438 void btrfs_free_stripe_hash_table(struct btrfs_fs_info *info)
440 if (!info->stripe_hash_table)
442 btrfs_clear_rbio_cache(info);
443 kvfree(info->stripe_hash_table);
444 info->stripe_hash_table = NULL;
448 * insert an rbio into the stripe cache. It
449 * must have already been prepared by calling
452 * If this rbio was already cached, it gets
453 * moved to the front of the lru.
455 * If the size of the rbio cache is too big, we
458 static void cache_rbio(struct btrfs_raid_bio *rbio)
460 struct btrfs_stripe_hash_table *table;
463 if (!test_bit(RBIO_CACHE_READY_BIT, &rbio->flags))
466 table = rbio->fs_info->stripe_hash_table;
468 spin_lock_irqsave(&table->cache_lock, flags);
469 spin_lock(&rbio->bio_list_lock);
471 /* bump our ref if we were not in the list before */
472 if (!test_and_set_bit(RBIO_CACHE_BIT, &rbio->flags))
473 refcount_inc(&rbio->refs);
475 if (!list_empty(&rbio->stripe_cache)){
476 list_move(&rbio->stripe_cache, &table->stripe_cache);
478 list_add(&rbio->stripe_cache, &table->stripe_cache);
479 table->cache_size += 1;
482 spin_unlock(&rbio->bio_list_lock);
484 if (table->cache_size > RBIO_CACHE_SIZE) {
485 struct btrfs_raid_bio *found;
487 found = list_entry(table->stripe_cache.prev,
488 struct btrfs_raid_bio,
492 __remove_rbio_from_cache(found);
495 spin_unlock_irqrestore(&table->cache_lock, flags);
499 * helper function to run the xor_blocks api. It is only
500 * able to do MAX_XOR_BLOCKS at a time, so we need to
503 static void run_xor(void **pages, int src_cnt, ssize_t len)
507 void *dest = pages[src_cnt];
510 xor_src_cnt = min(src_cnt, MAX_XOR_BLOCKS);
511 xor_blocks(xor_src_cnt, len, dest, pages + src_off);
513 src_cnt -= xor_src_cnt;
514 src_off += xor_src_cnt;
519 * returns true if the bio list inside this rbio
520 * covers an entire stripe (no rmw required).
521 * Must be called with the bio list lock held, or
522 * at a time when you know it is impossible to add
523 * new bios into the list
525 static int __rbio_is_full(struct btrfs_raid_bio *rbio)
527 unsigned long size = rbio->bio_list_bytes;
530 if (size != rbio->nr_data * rbio->stripe_len)
533 BUG_ON(size > rbio->nr_data * rbio->stripe_len);
537 static int rbio_is_full(struct btrfs_raid_bio *rbio)
542 spin_lock_irqsave(&rbio->bio_list_lock, flags);
543 ret = __rbio_is_full(rbio);
544 spin_unlock_irqrestore(&rbio->bio_list_lock, flags);
549 * returns 1 if it is safe to merge two rbios together.
550 * The merging is safe if the two rbios correspond to
551 * the same stripe and if they are both going in the same
552 * direction (read vs write), and if neither one is
553 * locked for final IO
555 * The caller is responsible for locking such that
556 * rmw_locked is safe to test
558 static int rbio_can_merge(struct btrfs_raid_bio *last,
559 struct btrfs_raid_bio *cur)
561 if (test_bit(RBIO_RMW_LOCKED_BIT, &last->flags) ||
562 test_bit(RBIO_RMW_LOCKED_BIT, &cur->flags))
566 * we can't merge with cached rbios, since the
567 * idea is that when we merge the destination
568 * rbio is going to run our IO for us. We can
569 * steal from cached rbios though, other functions
572 if (test_bit(RBIO_CACHE_BIT, &last->flags) ||
573 test_bit(RBIO_CACHE_BIT, &cur->flags))
576 if (last->bbio->raid_map[0] !=
577 cur->bbio->raid_map[0])
580 /* we can't merge with different operations */
581 if (last->operation != cur->operation)
584 * We've need read the full stripe from the drive.
585 * check and repair the parity and write the new results.
587 * We're not allowed to add any new bios to the
588 * bio list here, anyone else that wants to
589 * change this stripe needs to do their own rmw.
591 if (last->operation == BTRFS_RBIO_PARITY_SCRUB)
594 if (last->operation == BTRFS_RBIO_REBUILD_MISSING)
597 if (last->operation == BTRFS_RBIO_READ_REBUILD) {
598 int fa = last->faila;
599 int fb = last->failb;
600 int cur_fa = cur->faila;
601 int cur_fb = cur->failb;
603 if (last->faila >= last->failb) {
608 if (cur->faila >= cur->failb) {
613 if (fa != cur_fa || fb != cur_fb)
619 static int rbio_stripe_page_index(struct btrfs_raid_bio *rbio, int stripe,
622 return stripe * rbio->stripe_npages + index;
626 * these are just the pages from the rbio array, not from anything
627 * the FS sent down to us
629 static struct page *rbio_stripe_page(struct btrfs_raid_bio *rbio, int stripe,
632 return rbio->stripe_pages[rbio_stripe_page_index(rbio, stripe, index)];
636 * helper to index into the pstripe
638 static struct page *rbio_pstripe_page(struct btrfs_raid_bio *rbio, int index)
640 return rbio_stripe_page(rbio, rbio->nr_data, index);
644 * helper to index into the qstripe, returns null
645 * if there is no qstripe
647 static struct page *rbio_qstripe_page(struct btrfs_raid_bio *rbio, int index)
649 if (rbio->nr_data + 1 == rbio->real_stripes)
651 return rbio_stripe_page(rbio, rbio->nr_data + 1, index);
655 * The first stripe in the table for a logical address
656 * has the lock. rbios are added in one of three ways:
658 * 1) Nobody has the stripe locked yet. The rbio is given
659 * the lock and 0 is returned. The caller must start the IO
662 * 2) Someone has the stripe locked, but we're able to merge
663 * with the lock owner. The rbio is freed and the IO will
664 * start automatically along with the existing rbio. 1 is returned.
666 * 3) Someone has the stripe locked, but we're not able to merge.
667 * The rbio is added to the lock owner's plug list, or merged into
668 * an rbio already on the plug list. When the lock owner unlocks,
669 * the next rbio on the list is run and the IO is started automatically.
672 * If we return 0, the caller still owns the rbio and must continue with
673 * IO submission. If we return 1, the caller must assume the rbio has
674 * already been freed.
676 static noinline int lock_stripe_add(struct btrfs_raid_bio *rbio)
678 int bucket = rbio_bucket(rbio);
679 struct btrfs_stripe_hash *h = rbio->fs_info->stripe_hash_table->table + bucket;
680 struct btrfs_raid_bio *cur;
681 struct btrfs_raid_bio *pending;
683 struct btrfs_raid_bio *freeit = NULL;
684 struct btrfs_raid_bio *cache_drop = NULL;
687 spin_lock_irqsave(&h->lock, flags);
688 list_for_each_entry(cur, &h->hash_list, hash_list) {
689 if (cur->bbio->raid_map[0] == rbio->bbio->raid_map[0]) {
690 spin_lock(&cur->bio_list_lock);
692 /* can we steal this cached rbio's pages? */
693 if (bio_list_empty(&cur->bio_list) &&
694 list_empty(&cur->plug_list) &&
695 test_bit(RBIO_CACHE_BIT, &cur->flags) &&
696 !test_bit(RBIO_RMW_LOCKED_BIT, &cur->flags)) {
697 list_del_init(&cur->hash_list);
698 refcount_dec(&cur->refs);
700 steal_rbio(cur, rbio);
702 spin_unlock(&cur->bio_list_lock);
707 /* can we merge into the lock owner? */
708 if (rbio_can_merge(cur, rbio)) {
709 merge_rbio(cur, rbio);
710 spin_unlock(&cur->bio_list_lock);
718 * we couldn't merge with the running
719 * rbio, see if we can merge with the
720 * pending ones. We don't have to
721 * check for rmw_locked because there
722 * is no way they are inside finish_rmw
725 list_for_each_entry(pending, &cur->plug_list,
727 if (rbio_can_merge(pending, rbio)) {
728 merge_rbio(pending, rbio);
729 spin_unlock(&cur->bio_list_lock);
736 /* no merging, put us on the tail of the plug list,
737 * our rbio will be started with the currently
738 * running rbio unlocks
740 list_add_tail(&rbio->plug_list, &cur->plug_list);
741 spin_unlock(&cur->bio_list_lock);
747 refcount_inc(&rbio->refs);
748 list_add(&rbio->hash_list, &h->hash_list);
750 spin_unlock_irqrestore(&h->lock, flags);
752 remove_rbio_from_cache(cache_drop);
754 __free_raid_bio(freeit);
759 * called as rmw or parity rebuild is completed. If the plug list has more
760 * rbios waiting for this stripe, the next one on the list will be started
762 static noinline void unlock_stripe(struct btrfs_raid_bio *rbio)
765 struct btrfs_stripe_hash *h;
769 bucket = rbio_bucket(rbio);
770 h = rbio->fs_info->stripe_hash_table->table + bucket;
772 if (list_empty(&rbio->plug_list))
775 spin_lock_irqsave(&h->lock, flags);
776 spin_lock(&rbio->bio_list_lock);
778 if (!list_empty(&rbio->hash_list)) {
780 * if we're still cached and there is no other IO
781 * to perform, just leave this rbio here for others
782 * to steal from later
784 if (list_empty(&rbio->plug_list) &&
785 test_bit(RBIO_CACHE_BIT, &rbio->flags)) {
787 clear_bit(RBIO_RMW_LOCKED_BIT, &rbio->flags);
788 BUG_ON(!bio_list_empty(&rbio->bio_list));
792 list_del_init(&rbio->hash_list);
793 refcount_dec(&rbio->refs);
796 * we use the plug list to hold all the rbios
797 * waiting for the chance to lock this stripe.
798 * hand the lock over to one of them.
800 if (!list_empty(&rbio->plug_list)) {
801 struct btrfs_raid_bio *next;
802 struct list_head *head = rbio->plug_list.next;
804 next = list_entry(head, struct btrfs_raid_bio,
807 list_del_init(&rbio->plug_list);
809 list_add(&next->hash_list, &h->hash_list);
810 refcount_inc(&next->refs);
811 spin_unlock(&rbio->bio_list_lock);
812 spin_unlock_irqrestore(&h->lock, flags);
814 if (next->operation == BTRFS_RBIO_READ_REBUILD)
815 async_read_rebuild(next);
816 else if (next->operation == BTRFS_RBIO_REBUILD_MISSING) {
817 steal_rbio(rbio, next);
818 async_read_rebuild(next);
819 } else if (next->operation == BTRFS_RBIO_WRITE) {
820 steal_rbio(rbio, next);
821 async_rmw_stripe(next);
822 } else if (next->operation == BTRFS_RBIO_PARITY_SCRUB) {
823 steal_rbio(rbio, next);
824 async_scrub_parity(next);
831 spin_unlock(&rbio->bio_list_lock);
832 spin_unlock_irqrestore(&h->lock, flags);
836 remove_rbio_from_cache(rbio);
839 static void __free_raid_bio(struct btrfs_raid_bio *rbio)
843 if (!refcount_dec_and_test(&rbio->refs))
846 WARN_ON(!list_empty(&rbio->stripe_cache));
847 WARN_ON(!list_empty(&rbio->hash_list));
848 WARN_ON(!bio_list_empty(&rbio->bio_list));
850 for (i = 0; i < rbio->nr_pages; i++) {
851 if (rbio->stripe_pages[i]) {
852 __free_page(rbio->stripe_pages[i]);
853 rbio->stripe_pages[i] = NULL;
857 btrfs_put_bbio(rbio->bbio);
861 static void rbio_endio_bio_list(struct bio *cur, blk_status_t err)
868 cur->bi_status = err;
875 * this frees the rbio and runs through all the bios in the
876 * bio_list and calls end_io on them
878 static void rbio_orig_end_io(struct btrfs_raid_bio *rbio, blk_status_t err)
880 struct bio *cur = bio_list_get(&rbio->bio_list);
883 if (rbio->generic_bio_cnt)
884 btrfs_bio_counter_sub(rbio->fs_info, rbio->generic_bio_cnt);
887 * At this moment, rbio->bio_list is empty, however since rbio does not
888 * always have RBIO_RMW_LOCKED_BIT set and rbio is still linked on the
889 * hash list, rbio may be merged with others so that rbio->bio_list
891 * Once unlock_stripe() is done, rbio->bio_list will not be updated any
892 * more and we can call bio_endio() on all queued bios.
895 extra = bio_list_get(&rbio->bio_list);
896 __free_raid_bio(rbio);
898 rbio_endio_bio_list(cur, err);
900 rbio_endio_bio_list(extra, err);
904 * end io function used by finish_rmw. When we finally
905 * get here, we've written a full stripe
907 static void raid_write_end_io(struct bio *bio)
909 struct btrfs_raid_bio *rbio = bio->bi_private;
910 blk_status_t err = bio->bi_status;
914 fail_bio_stripe(rbio, bio);
918 if (!atomic_dec_and_test(&rbio->stripes_pending))
923 /* OK, we have read all the stripes we need to. */
924 max_errors = (rbio->operation == BTRFS_RBIO_PARITY_SCRUB) ?
925 0 : rbio->bbio->max_errors;
926 if (atomic_read(&rbio->error) > max_errors)
929 rbio_orig_end_io(rbio, err);
933 * the read/modify/write code wants to use the original bio for
934 * any pages it included, and then use the rbio for everything
935 * else. This function decides if a given index (stripe number)
936 * and page number in that stripe fall inside the original bio
939 * if you set bio_list_only, you'll get a NULL back for any ranges
940 * that are outside the bio_list
942 * This doesn't take any refs on anything, you get a bare page pointer
943 * and the caller must bump refs as required.
945 * You must call index_rbio_pages once before you can trust
946 * the answers from this function.
948 static struct page *page_in_rbio(struct btrfs_raid_bio *rbio,
949 int index, int pagenr, int bio_list_only)
952 struct page *p = NULL;
954 chunk_page = index * (rbio->stripe_len >> PAGE_SHIFT) + pagenr;
956 spin_lock_irq(&rbio->bio_list_lock);
957 p = rbio->bio_pages[chunk_page];
958 spin_unlock_irq(&rbio->bio_list_lock);
960 if (p || bio_list_only)
963 return rbio->stripe_pages[chunk_page];
967 * number of pages we need for the entire stripe across all the
970 static unsigned long rbio_nr_pages(unsigned long stripe_len, int nr_stripes)
972 return DIV_ROUND_UP(stripe_len, PAGE_SIZE) * nr_stripes;
976 * allocation and initial setup for the btrfs_raid_bio. Not
977 * this does not allocate any pages for rbio->pages.
979 static struct btrfs_raid_bio *alloc_rbio(struct btrfs_fs_info *fs_info,
980 struct btrfs_bio *bbio,
983 struct btrfs_raid_bio *rbio;
985 int real_stripes = bbio->num_stripes - bbio->num_tgtdevs;
986 int num_pages = rbio_nr_pages(stripe_len, real_stripes);
987 int stripe_npages = DIV_ROUND_UP(stripe_len, PAGE_SIZE);
990 rbio = kzalloc(sizeof(*rbio) +
991 sizeof(*rbio->stripe_pages) * num_pages +
992 sizeof(*rbio->bio_pages) * num_pages +
993 sizeof(*rbio->finish_pointers) * real_stripes +
994 sizeof(*rbio->dbitmap) * BITS_TO_LONGS(stripe_npages) +
995 sizeof(*rbio->finish_pbitmap) *
996 BITS_TO_LONGS(stripe_npages),
999 return ERR_PTR(-ENOMEM);
1001 bio_list_init(&rbio->bio_list);
1002 INIT_LIST_HEAD(&rbio->plug_list);
1003 spin_lock_init(&rbio->bio_list_lock);
1004 INIT_LIST_HEAD(&rbio->stripe_cache);
1005 INIT_LIST_HEAD(&rbio->hash_list);
1007 rbio->fs_info = fs_info;
1008 rbio->stripe_len = stripe_len;
1009 rbio->nr_pages = num_pages;
1010 rbio->real_stripes = real_stripes;
1011 rbio->stripe_npages = stripe_npages;
1014 refcount_set(&rbio->refs, 1);
1015 atomic_set(&rbio->error, 0);
1016 atomic_set(&rbio->stripes_pending, 0);
1019 * the stripe_pages, bio_pages, etc arrays point to the extra
1020 * memory we allocated past the end of the rbio
1023 #define CONSUME_ALLOC(ptr, count) do { \
1025 p = (unsigned char *)p + sizeof(*(ptr)) * (count); \
1027 CONSUME_ALLOC(rbio->stripe_pages, num_pages);
1028 CONSUME_ALLOC(rbio->bio_pages, num_pages);
1029 CONSUME_ALLOC(rbio->finish_pointers, real_stripes);
1030 CONSUME_ALLOC(rbio->dbitmap, BITS_TO_LONGS(stripe_npages));
1031 CONSUME_ALLOC(rbio->finish_pbitmap, BITS_TO_LONGS(stripe_npages));
1032 #undef CONSUME_ALLOC
1034 if (bbio->map_type & BTRFS_BLOCK_GROUP_RAID5)
1035 nr_data = real_stripes - 1;
1036 else if (bbio->map_type & BTRFS_BLOCK_GROUP_RAID6)
1037 nr_data = real_stripes - 2;
1041 rbio->nr_data = nr_data;
1045 /* allocate pages for all the stripes in the bio, including parity */
1046 static int alloc_rbio_pages(struct btrfs_raid_bio *rbio)
1051 for (i = 0; i < rbio->nr_pages; i++) {
1052 if (rbio->stripe_pages[i])
1054 page = alloc_page(GFP_NOFS | __GFP_HIGHMEM);
1057 rbio->stripe_pages[i] = page;
1062 /* only allocate pages for p/q stripes */
1063 static int alloc_rbio_parity_pages(struct btrfs_raid_bio *rbio)
1068 i = rbio_stripe_page_index(rbio, rbio->nr_data, 0);
1070 for (; i < rbio->nr_pages; i++) {
1071 if (rbio->stripe_pages[i])
1073 page = alloc_page(GFP_NOFS | __GFP_HIGHMEM);
1076 rbio->stripe_pages[i] = page;
1082 * add a single page from a specific stripe into our list of bios for IO
1083 * this will try to merge into existing bios if possible, and returns
1084 * zero if all went well.
1086 static int rbio_add_io_page(struct btrfs_raid_bio *rbio,
1087 struct bio_list *bio_list,
1090 unsigned long page_index,
1091 unsigned long bio_max_len)
1093 struct bio *last = bio_list->tail;
1097 struct btrfs_bio_stripe *stripe;
1100 stripe = &rbio->bbio->stripes[stripe_nr];
1101 disk_start = stripe->physical + (page_index << PAGE_SHIFT);
1103 /* if the device is missing, just fail this stripe */
1104 if (!stripe->dev->bdev)
1105 return fail_rbio_index(rbio, stripe_nr);
1107 /* see if we can add this page onto our existing bio */
1109 last_end = (u64)last->bi_iter.bi_sector << 9;
1110 last_end += last->bi_iter.bi_size;
1113 * we can't merge these if they are from different
1114 * devices or if they are not contiguous
1116 if (last_end == disk_start && stripe->dev->bdev &&
1118 last->bi_disk == stripe->dev->bdev->bd_disk &&
1119 last->bi_partno == stripe->dev->bdev->bd_partno) {
1120 ret = bio_add_page(last, page, PAGE_SIZE, 0);
1121 if (ret == PAGE_SIZE)
1126 /* put a new bio on the list */
1127 bio = btrfs_io_bio_alloc(bio_max_len >> PAGE_SHIFT ?: 1);
1128 bio->bi_iter.bi_size = 0;
1129 bio_set_dev(bio, stripe->dev->bdev);
1130 bio->bi_iter.bi_sector = disk_start >> 9;
1132 bio_add_page(bio, page, PAGE_SIZE, 0);
1133 bio_list_add(bio_list, bio);
1138 * while we're doing the read/modify/write cycle, we could
1139 * have errors in reading pages off the disk. This checks
1140 * for errors and if we're not able to read the page it'll
1141 * trigger parity reconstruction. The rmw will be finished
1142 * after we've reconstructed the failed stripes
1144 static void validate_rbio_for_rmw(struct btrfs_raid_bio *rbio)
1146 if (rbio->faila >= 0 || rbio->failb >= 0) {
1147 BUG_ON(rbio->faila == rbio->real_stripes - 1);
1148 __raid56_parity_recover(rbio);
1155 * helper function to walk our bio list and populate the bio_pages array with
1156 * the result. This seems expensive, but it is faster than constantly
1157 * searching through the bio list as we setup the IO in finish_rmw or stripe
1160 * This must be called before you trust the answers from page_in_rbio
1162 static void index_rbio_pages(struct btrfs_raid_bio *rbio)
1166 unsigned long stripe_offset;
1167 unsigned long page_index;
1169 spin_lock_irq(&rbio->bio_list_lock);
1170 bio_list_for_each(bio, &rbio->bio_list) {
1171 struct bio_vec bvec;
1172 struct bvec_iter iter;
1175 start = (u64)bio->bi_iter.bi_sector << 9;
1176 stripe_offset = start - rbio->bbio->raid_map[0];
1177 page_index = stripe_offset >> PAGE_SHIFT;
1179 if (bio_flagged(bio, BIO_CLONED))
1180 bio->bi_iter = btrfs_io_bio(bio)->iter;
1182 bio_for_each_segment(bvec, bio, iter) {
1183 rbio->bio_pages[page_index + i] = bvec.bv_page;
1187 spin_unlock_irq(&rbio->bio_list_lock);
1191 * this is called from one of two situations. We either
1192 * have a full stripe from the higher layers, or we've read all
1193 * the missing bits off disk.
1195 * This will calculate the parity and then send down any
1198 static noinline void finish_rmw(struct btrfs_raid_bio *rbio)
1200 struct btrfs_bio *bbio = rbio->bbio;
1201 void **pointers = rbio->finish_pointers;
1202 int nr_data = rbio->nr_data;
1207 struct bio_list bio_list;
1211 bio_list_init(&bio_list);
1213 if (rbio->real_stripes - rbio->nr_data == 1) {
1214 p_stripe = rbio->real_stripes - 1;
1215 } else if (rbio->real_stripes - rbio->nr_data == 2) {
1216 p_stripe = rbio->real_stripes - 2;
1217 q_stripe = rbio->real_stripes - 1;
1222 /* at this point we either have a full stripe,
1223 * or we've read the full stripe from the drive.
1224 * recalculate the parity and write the new results.
1226 * We're not allowed to add any new bios to the
1227 * bio list here, anyone else that wants to
1228 * change this stripe needs to do their own rmw.
1230 spin_lock_irq(&rbio->bio_list_lock);
1231 set_bit(RBIO_RMW_LOCKED_BIT, &rbio->flags);
1232 spin_unlock_irq(&rbio->bio_list_lock);
1234 atomic_set(&rbio->error, 0);
1237 * now that we've set rmw_locked, run through the
1238 * bio list one last time and map the page pointers
1240 * We don't cache full rbios because we're assuming
1241 * the higher layers are unlikely to use this area of
1242 * the disk again soon. If they do use it again,
1243 * hopefully they will send another full bio.
1245 index_rbio_pages(rbio);
1246 if (!rbio_is_full(rbio))
1247 cache_rbio_pages(rbio);
1249 clear_bit(RBIO_CACHE_READY_BIT, &rbio->flags);
1251 for (pagenr = 0; pagenr < rbio->stripe_npages; pagenr++) {
1253 /* first collect one page from each data stripe */
1254 for (stripe = 0; stripe < nr_data; stripe++) {
1255 p = page_in_rbio(rbio, stripe, pagenr, 0);
1256 pointers[stripe] = kmap(p);
1259 /* then add the parity stripe */
1260 p = rbio_pstripe_page(rbio, pagenr);
1262 pointers[stripe++] = kmap(p);
1264 if (q_stripe != -1) {
1267 * raid6, add the qstripe and call the
1268 * library function to fill in our p/q
1270 p = rbio_qstripe_page(rbio, pagenr);
1272 pointers[stripe++] = kmap(p);
1274 raid6_call.gen_syndrome(rbio->real_stripes, PAGE_SIZE,
1278 memcpy(pointers[nr_data], pointers[0], PAGE_SIZE);
1279 run_xor(pointers + 1, nr_data - 1, PAGE_SIZE);
1283 for (stripe = 0; stripe < rbio->real_stripes; stripe++)
1284 kunmap(page_in_rbio(rbio, stripe, pagenr, 0));
1288 * time to start writing. Make bios for everything from the
1289 * higher layers (the bio_list in our rbio) and our p/q. Ignore
1292 for (stripe = 0; stripe < rbio->real_stripes; stripe++) {
1293 for (pagenr = 0; pagenr < rbio->stripe_npages; pagenr++) {
1295 if (stripe < rbio->nr_data) {
1296 page = page_in_rbio(rbio, stripe, pagenr, 1);
1300 page = rbio_stripe_page(rbio, stripe, pagenr);
1303 ret = rbio_add_io_page(rbio, &bio_list,
1304 page, stripe, pagenr, rbio->stripe_len);
1310 if (likely(!bbio->num_tgtdevs))
1313 for (stripe = 0; stripe < rbio->real_stripes; stripe++) {
1314 if (!bbio->tgtdev_map[stripe])
1317 for (pagenr = 0; pagenr < rbio->stripe_npages; pagenr++) {
1319 if (stripe < rbio->nr_data) {
1320 page = page_in_rbio(rbio, stripe, pagenr, 1);
1324 page = rbio_stripe_page(rbio, stripe, pagenr);
1327 ret = rbio_add_io_page(rbio, &bio_list, page,
1328 rbio->bbio->tgtdev_map[stripe],
1329 pagenr, rbio->stripe_len);
1336 atomic_set(&rbio->stripes_pending, bio_list_size(&bio_list));
1337 BUG_ON(atomic_read(&rbio->stripes_pending) == 0);
1340 bio = bio_list_pop(&bio_list);
1344 bio->bi_private = rbio;
1345 bio->bi_end_io = raid_write_end_io;
1346 bio_set_op_attrs(bio, REQ_OP_WRITE, 0);
1353 rbio_orig_end_io(rbio, BLK_STS_IOERR);
1355 while ((bio = bio_list_pop(&bio_list)))
1360 * helper to find the stripe number for a given bio. Used to figure out which
1361 * stripe has failed. This expects the bio to correspond to a physical disk,
1362 * so it looks up based on physical sector numbers.
1364 static int find_bio_stripe(struct btrfs_raid_bio *rbio,
1367 u64 physical = bio->bi_iter.bi_sector;
1370 struct btrfs_bio_stripe *stripe;
1374 for (i = 0; i < rbio->bbio->num_stripes; i++) {
1375 stripe = &rbio->bbio->stripes[i];
1376 stripe_start = stripe->physical;
1377 if (physical >= stripe_start &&
1378 physical < stripe_start + rbio->stripe_len &&
1379 stripe->dev->bdev &&
1380 bio->bi_disk == stripe->dev->bdev->bd_disk &&
1381 bio->bi_partno == stripe->dev->bdev->bd_partno) {
1389 * helper to find the stripe number for a given
1390 * bio (before mapping). Used to figure out which stripe has
1391 * failed. This looks up based on logical block numbers.
1393 static int find_logical_bio_stripe(struct btrfs_raid_bio *rbio,
1396 u64 logical = bio->bi_iter.bi_sector;
1402 for (i = 0; i < rbio->nr_data; i++) {
1403 stripe_start = rbio->bbio->raid_map[i];
1404 if (logical >= stripe_start &&
1405 logical < stripe_start + rbio->stripe_len) {
1413 * returns -EIO if we had too many failures
1415 static int fail_rbio_index(struct btrfs_raid_bio *rbio, int failed)
1417 unsigned long flags;
1420 spin_lock_irqsave(&rbio->bio_list_lock, flags);
1422 /* we already know this stripe is bad, move on */
1423 if (rbio->faila == failed || rbio->failb == failed)
1426 if (rbio->faila == -1) {
1427 /* first failure on this rbio */
1428 rbio->faila = failed;
1429 atomic_inc(&rbio->error);
1430 } else if (rbio->failb == -1) {
1431 /* second failure on this rbio */
1432 rbio->failb = failed;
1433 atomic_inc(&rbio->error);
1438 spin_unlock_irqrestore(&rbio->bio_list_lock, flags);
1444 * helper to fail a stripe based on a physical disk
1447 static int fail_bio_stripe(struct btrfs_raid_bio *rbio,
1450 int failed = find_bio_stripe(rbio, bio);
1455 return fail_rbio_index(rbio, failed);
1459 * this sets each page in the bio uptodate. It should only be used on private
1460 * rbio pages, nothing that comes in from the higher layers
1462 static void set_bio_pages_uptodate(struct bio *bio)
1464 struct bio_vec *bvec;
1467 ASSERT(!bio_flagged(bio, BIO_CLONED));
1469 bio_for_each_segment_all(bvec, bio, i)
1470 SetPageUptodate(bvec->bv_page);
1474 * end io for the read phase of the rmw cycle. All the bios here are physical
1475 * stripe bios we've read from the disk so we can recalculate the parity of the
1478 * This will usually kick off finish_rmw once all the bios are read in, but it
1479 * may trigger parity reconstruction if we had any errors along the way
1481 static void raid_rmw_end_io(struct bio *bio)
1483 struct btrfs_raid_bio *rbio = bio->bi_private;
1486 fail_bio_stripe(rbio, bio);
1488 set_bio_pages_uptodate(bio);
1492 if (!atomic_dec_and_test(&rbio->stripes_pending))
1495 if (atomic_read(&rbio->error) > rbio->bbio->max_errors)
1499 * this will normally call finish_rmw to start our write
1500 * but if there are any failed stripes we'll reconstruct
1503 validate_rbio_for_rmw(rbio);
1508 rbio_orig_end_io(rbio, BLK_STS_IOERR);
1511 static void async_rmw_stripe(struct btrfs_raid_bio *rbio)
1513 btrfs_init_work(&rbio->work, btrfs_rmw_helper, rmw_work, NULL, NULL);
1514 btrfs_queue_work(rbio->fs_info->rmw_workers, &rbio->work);
1517 static void async_read_rebuild(struct btrfs_raid_bio *rbio)
1519 btrfs_init_work(&rbio->work, btrfs_rmw_helper,
1520 read_rebuild_work, NULL, NULL);
1522 btrfs_queue_work(rbio->fs_info->rmw_workers, &rbio->work);
1526 * the stripe must be locked by the caller. It will
1527 * unlock after all the writes are done
1529 static int raid56_rmw_stripe(struct btrfs_raid_bio *rbio)
1531 int bios_to_read = 0;
1532 struct bio_list bio_list;
1538 bio_list_init(&bio_list);
1540 ret = alloc_rbio_pages(rbio);
1544 index_rbio_pages(rbio);
1546 atomic_set(&rbio->error, 0);
1548 * build a list of bios to read all the missing parts of this
1551 for (stripe = 0; stripe < rbio->nr_data; stripe++) {
1552 for (pagenr = 0; pagenr < rbio->stripe_npages; pagenr++) {
1555 * we want to find all the pages missing from
1556 * the rbio and read them from the disk. If
1557 * page_in_rbio finds a page in the bio list
1558 * we don't need to read it off the stripe.
1560 page = page_in_rbio(rbio, stripe, pagenr, 1);
1564 page = rbio_stripe_page(rbio, stripe, pagenr);
1566 * the bio cache may have handed us an uptodate
1567 * page. If so, be happy and use it
1569 if (PageUptodate(page))
1572 ret = rbio_add_io_page(rbio, &bio_list, page,
1573 stripe, pagenr, rbio->stripe_len);
1579 bios_to_read = bio_list_size(&bio_list);
1580 if (!bios_to_read) {
1582 * this can happen if others have merged with
1583 * us, it means there is nothing left to read.
1584 * But if there are missing devices it may not be
1585 * safe to do the full stripe write yet.
1591 * the bbio may be freed once we submit the last bio. Make sure
1592 * not to touch it after that
1594 atomic_set(&rbio->stripes_pending, bios_to_read);
1596 bio = bio_list_pop(&bio_list);
1600 bio->bi_private = rbio;
1601 bio->bi_end_io = raid_rmw_end_io;
1602 bio_set_op_attrs(bio, REQ_OP_READ, 0);
1604 btrfs_bio_wq_end_io(rbio->fs_info, bio, BTRFS_WQ_ENDIO_RAID56);
1608 /* the actual write will happen once the reads are done */
1612 rbio_orig_end_io(rbio, BLK_STS_IOERR);
1614 while ((bio = bio_list_pop(&bio_list)))
1620 validate_rbio_for_rmw(rbio);
1625 * if the upper layers pass in a full stripe, we thank them by only allocating
1626 * enough pages to hold the parity, and sending it all down quickly.
1628 static int full_stripe_write(struct btrfs_raid_bio *rbio)
1632 ret = alloc_rbio_parity_pages(rbio);
1634 __free_raid_bio(rbio);
1638 ret = lock_stripe_add(rbio);
1645 * partial stripe writes get handed over to async helpers.
1646 * We're really hoping to merge a few more writes into this
1647 * rbio before calculating new parity
1649 static int partial_stripe_write(struct btrfs_raid_bio *rbio)
1653 ret = lock_stripe_add(rbio);
1655 async_rmw_stripe(rbio);
1660 * sometimes while we were reading from the drive to
1661 * recalculate parity, enough new bios come into create
1662 * a full stripe. So we do a check here to see if we can
1663 * go directly to finish_rmw
1665 static int __raid56_parity_write(struct btrfs_raid_bio *rbio)
1667 /* head off into rmw land if we don't have a full stripe */
1668 if (!rbio_is_full(rbio))
1669 return partial_stripe_write(rbio);
1670 return full_stripe_write(rbio);
1674 * We use plugging call backs to collect full stripes.
1675 * Any time we get a partial stripe write while plugged
1676 * we collect it into a list. When the unplug comes down,
1677 * we sort the list by logical block number and merge
1678 * everything we can into the same rbios
1680 struct btrfs_plug_cb {
1681 struct blk_plug_cb cb;
1682 struct btrfs_fs_info *info;
1683 struct list_head rbio_list;
1684 struct btrfs_work work;
1688 * rbios on the plug list are sorted for easier merging.
1690 static int plug_cmp(void *priv, struct list_head *a, struct list_head *b)
1692 struct btrfs_raid_bio *ra = container_of(a, struct btrfs_raid_bio,
1694 struct btrfs_raid_bio *rb = container_of(b, struct btrfs_raid_bio,
1696 u64 a_sector = ra->bio_list.head->bi_iter.bi_sector;
1697 u64 b_sector = rb->bio_list.head->bi_iter.bi_sector;
1699 if (a_sector < b_sector)
1701 if (a_sector > b_sector)
1706 static void run_plug(struct btrfs_plug_cb *plug)
1708 struct btrfs_raid_bio *cur;
1709 struct btrfs_raid_bio *last = NULL;
1712 * sort our plug list then try to merge
1713 * everything we can in hopes of creating full
1716 list_sort(NULL, &plug->rbio_list, plug_cmp);
1717 while (!list_empty(&plug->rbio_list)) {
1718 cur = list_entry(plug->rbio_list.next,
1719 struct btrfs_raid_bio, plug_list);
1720 list_del_init(&cur->plug_list);
1722 if (rbio_is_full(cur)) {
1723 /* we have a full stripe, send it down */
1724 full_stripe_write(cur);
1728 if (rbio_can_merge(last, cur)) {
1729 merge_rbio(last, cur);
1730 __free_raid_bio(cur);
1734 __raid56_parity_write(last);
1739 __raid56_parity_write(last);
1745 * if the unplug comes from schedule, we have to push the
1746 * work off to a helper thread
1748 static void unplug_work(struct btrfs_work *work)
1750 struct btrfs_plug_cb *plug;
1751 plug = container_of(work, struct btrfs_plug_cb, work);
1755 static void btrfs_raid_unplug(struct blk_plug_cb *cb, bool from_schedule)
1757 struct btrfs_plug_cb *plug;
1758 plug = container_of(cb, struct btrfs_plug_cb, cb);
1760 if (from_schedule) {
1761 btrfs_init_work(&plug->work, btrfs_rmw_helper,
1762 unplug_work, NULL, NULL);
1763 btrfs_queue_work(plug->info->rmw_workers,
1771 * our main entry point for writes from the rest of the FS.
1773 int raid56_parity_write(struct btrfs_fs_info *fs_info, struct bio *bio,
1774 struct btrfs_bio *bbio, u64 stripe_len)
1776 struct btrfs_raid_bio *rbio;
1777 struct btrfs_plug_cb *plug = NULL;
1778 struct blk_plug_cb *cb;
1781 rbio = alloc_rbio(fs_info, bbio, stripe_len);
1783 btrfs_put_bbio(bbio);
1784 return PTR_ERR(rbio);
1786 bio_list_add(&rbio->bio_list, bio);
1787 rbio->bio_list_bytes = bio->bi_iter.bi_size;
1788 rbio->operation = BTRFS_RBIO_WRITE;
1790 btrfs_bio_counter_inc_noblocked(fs_info);
1791 rbio->generic_bio_cnt = 1;
1794 * don't plug on full rbios, just get them out the door
1795 * as quickly as we can
1797 if (rbio_is_full(rbio)) {
1798 ret = full_stripe_write(rbio);
1800 btrfs_bio_counter_dec(fs_info);
1804 cb = blk_check_plugged(btrfs_raid_unplug, fs_info, sizeof(*plug));
1806 plug = container_of(cb, struct btrfs_plug_cb, cb);
1808 plug->info = fs_info;
1809 INIT_LIST_HEAD(&plug->rbio_list);
1811 list_add_tail(&rbio->plug_list, &plug->rbio_list);
1814 ret = __raid56_parity_write(rbio);
1816 btrfs_bio_counter_dec(fs_info);
1822 * all parity reconstruction happens here. We've read in everything
1823 * we can find from the drives and this does the heavy lifting of
1824 * sorting the good from the bad.
1826 static void __raid_recover_end_io(struct btrfs_raid_bio *rbio)
1830 int faila = -1, failb = -1;
1835 pointers = kcalloc(rbio->real_stripes, sizeof(void *), GFP_NOFS);
1837 err = BLK_STS_RESOURCE;
1841 faila = rbio->faila;
1842 failb = rbio->failb;
1844 if (rbio->operation == BTRFS_RBIO_READ_REBUILD ||
1845 rbio->operation == BTRFS_RBIO_REBUILD_MISSING) {
1846 spin_lock_irq(&rbio->bio_list_lock);
1847 set_bit(RBIO_RMW_LOCKED_BIT, &rbio->flags);
1848 spin_unlock_irq(&rbio->bio_list_lock);
1851 index_rbio_pages(rbio);
1853 for (pagenr = 0; pagenr < rbio->stripe_npages; pagenr++) {
1855 * Now we just use bitmap to mark the horizontal stripes in
1856 * which we have data when doing parity scrub.
1858 if (rbio->operation == BTRFS_RBIO_PARITY_SCRUB &&
1859 !test_bit(pagenr, rbio->dbitmap))
1862 /* setup our array of pointers with pages
1865 for (stripe = 0; stripe < rbio->real_stripes; stripe++) {
1867 * if we're rebuilding a read, we have to use
1868 * pages from the bio list
1870 if ((rbio->operation == BTRFS_RBIO_READ_REBUILD ||
1871 rbio->operation == BTRFS_RBIO_REBUILD_MISSING) &&
1872 (stripe == faila || stripe == failb)) {
1873 page = page_in_rbio(rbio, stripe, pagenr, 0);
1875 page = rbio_stripe_page(rbio, stripe, pagenr);
1877 pointers[stripe] = kmap(page);
1880 /* all raid6 handling here */
1881 if (rbio->bbio->map_type & BTRFS_BLOCK_GROUP_RAID6) {
1883 * single failure, rebuild from parity raid5
1887 if (faila == rbio->nr_data) {
1889 * Just the P stripe has failed, without
1890 * a bad data or Q stripe.
1891 * TODO, we should redo the xor here.
1893 err = BLK_STS_IOERR;
1897 * a single failure in raid6 is rebuilt
1898 * in the pstripe code below
1903 /* make sure our ps and qs are in order */
1904 if (faila > failb) {
1910 /* if the q stripe is failed, do a pstripe reconstruction
1912 * If both the q stripe and the P stripe are failed, we're
1913 * here due to a crc mismatch and we can't give them the
1916 if (rbio->bbio->raid_map[failb] == RAID6_Q_STRIPE) {
1917 if (rbio->bbio->raid_map[faila] ==
1919 err = BLK_STS_IOERR;
1923 * otherwise we have one bad data stripe and
1924 * a good P stripe. raid5!
1929 if (rbio->bbio->raid_map[failb] == RAID5_P_STRIPE) {
1930 raid6_datap_recov(rbio->real_stripes,
1931 PAGE_SIZE, faila, pointers);
1933 raid6_2data_recov(rbio->real_stripes,
1934 PAGE_SIZE, faila, failb,
1940 /* rebuild from P stripe here (raid5 or raid6) */
1941 BUG_ON(failb != -1);
1943 /* Copy parity block into failed block to start with */
1944 memcpy(pointers[faila],
1945 pointers[rbio->nr_data],
1948 /* rearrange the pointer array */
1949 p = pointers[faila];
1950 for (stripe = faila; stripe < rbio->nr_data - 1; stripe++)
1951 pointers[stripe] = pointers[stripe + 1];
1952 pointers[rbio->nr_data - 1] = p;
1954 /* xor in the rest */
1955 run_xor(pointers, rbio->nr_data - 1, PAGE_SIZE);
1957 /* if we're doing this rebuild as part of an rmw, go through
1958 * and set all of our private rbio pages in the
1959 * failed stripes as uptodate. This way finish_rmw will
1960 * know they can be trusted. If this was a read reconstruction,
1961 * other endio functions will fiddle the uptodate bits
1963 if (rbio->operation == BTRFS_RBIO_WRITE) {
1964 for (i = 0; i < rbio->stripe_npages; i++) {
1966 page = rbio_stripe_page(rbio, faila, i);
1967 SetPageUptodate(page);
1970 page = rbio_stripe_page(rbio, failb, i);
1971 SetPageUptodate(page);
1975 for (stripe = 0; stripe < rbio->real_stripes; stripe++) {
1977 * if we're rebuilding a read, we have to use
1978 * pages from the bio list
1980 if ((rbio->operation == BTRFS_RBIO_READ_REBUILD ||
1981 rbio->operation == BTRFS_RBIO_REBUILD_MISSING) &&
1982 (stripe == faila || stripe == failb)) {
1983 page = page_in_rbio(rbio, stripe, pagenr, 0);
1985 page = rbio_stripe_page(rbio, stripe, pagenr);
1997 * Similar to READ_REBUILD, REBUILD_MISSING at this point also has a
1998 * valid rbio which is consistent with ondisk content, thus such a
1999 * valid rbio can be cached to avoid further disk reads.
2001 if (rbio->operation == BTRFS_RBIO_READ_REBUILD ||
2002 rbio->operation == BTRFS_RBIO_REBUILD_MISSING) {
2004 * - In case of two failures, where rbio->failb != -1:
2006 * Do not cache this rbio since the above read reconstruction
2007 * (raid6_datap_recov() or raid6_2data_recov()) may have
2008 * changed some content of stripes which are not identical to
2009 * on-disk content any more, otherwise, a later write/recover
2010 * may steal stripe_pages from this rbio and end up with
2011 * corruptions or rebuild failures.
2013 * - In case of single failure, where rbio->failb == -1:
2015 * Cache this rbio iff the above read reconstruction is
2016 * excuted without problems.
2018 if (err == BLK_STS_OK && rbio->failb < 0)
2019 cache_rbio_pages(rbio);
2021 clear_bit(RBIO_CACHE_READY_BIT, &rbio->flags);
2023 rbio_orig_end_io(rbio, err);
2024 } else if (err == BLK_STS_OK) {
2028 if (rbio->operation == BTRFS_RBIO_WRITE)
2030 else if (rbio->operation == BTRFS_RBIO_PARITY_SCRUB)
2031 finish_parity_scrub(rbio, 0);
2035 rbio_orig_end_io(rbio, err);
2040 * This is called only for stripes we've read from disk to
2041 * reconstruct the parity.
2043 static void raid_recover_end_io(struct bio *bio)
2045 struct btrfs_raid_bio *rbio = bio->bi_private;
2048 * we only read stripe pages off the disk, set them
2049 * up to date if there were no errors
2052 fail_bio_stripe(rbio, bio);
2054 set_bio_pages_uptodate(bio);
2057 if (!atomic_dec_and_test(&rbio->stripes_pending))
2060 if (atomic_read(&rbio->error) > rbio->bbio->max_errors)
2061 rbio_orig_end_io(rbio, BLK_STS_IOERR);
2063 __raid_recover_end_io(rbio);
2067 * reads everything we need off the disk to reconstruct
2068 * the parity. endio handlers trigger final reconstruction
2069 * when the IO is done.
2071 * This is used both for reads from the higher layers and for
2072 * parity construction required to finish a rmw cycle.
2074 static int __raid56_parity_recover(struct btrfs_raid_bio *rbio)
2076 int bios_to_read = 0;
2077 struct bio_list bio_list;
2083 bio_list_init(&bio_list);
2085 ret = alloc_rbio_pages(rbio);
2089 atomic_set(&rbio->error, 0);
2092 * read everything that hasn't failed. Thanks to the
2093 * stripe cache, it is possible that some or all of these
2094 * pages are going to be uptodate.
2096 for (stripe = 0; stripe < rbio->real_stripes; stripe++) {
2097 if (rbio->faila == stripe || rbio->failb == stripe) {
2098 atomic_inc(&rbio->error);
2102 for (pagenr = 0; pagenr < rbio->stripe_npages; pagenr++) {
2106 * the rmw code may have already read this
2109 p = rbio_stripe_page(rbio, stripe, pagenr);
2110 if (PageUptodate(p))
2113 ret = rbio_add_io_page(rbio, &bio_list,
2114 rbio_stripe_page(rbio, stripe, pagenr),
2115 stripe, pagenr, rbio->stripe_len);
2121 bios_to_read = bio_list_size(&bio_list);
2122 if (!bios_to_read) {
2124 * we might have no bios to read just because the pages
2125 * were up to date, or we might have no bios to read because
2126 * the devices were gone.
2128 if (atomic_read(&rbio->error) <= rbio->bbio->max_errors) {
2129 __raid_recover_end_io(rbio);
2137 * the bbio may be freed once we submit the last bio. Make sure
2138 * not to touch it after that
2140 atomic_set(&rbio->stripes_pending, bios_to_read);
2142 bio = bio_list_pop(&bio_list);
2146 bio->bi_private = rbio;
2147 bio->bi_end_io = raid_recover_end_io;
2148 bio_set_op_attrs(bio, REQ_OP_READ, 0);
2150 btrfs_bio_wq_end_io(rbio->fs_info, bio, BTRFS_WQ_ENDIO_RAID56);
2158 if (rbio->operation == BTRFS_RBIO_READ_REBUILD ||
2159 rbio->operation == BTRFS_RBIO_REBUILD_MISSING)
2160 rbio_orig_end_io(rbio, BLK_STS_IOERR);
2162 while ((bio = bio_list_pop(&bio_list)))
2169 * the main entry point for reads from the higher layers. This
2170 * is really only called when the normal read path had a failure,
2171 * so we assume the bio they send down corresponds to a failed part
2174 int raid56_parity_recover(struct btrfs_fs_info *fs_info, struct bio *bio,
2175 struct btrfs_bio *bbio, u64 stripe_len,
2176 int mirror_num, int generic_io)
2178 struct btrfs_raid_bio *rbio;
2182 ASSERT(bbio->mirror_num == mirror_num);
2183 btrfs_io_bio(bio)->mirror_num = mirror_num;
2186 rbio = alloc_rbio(fs_info, bbio, stripe_len);
2189 btrfs_put_bbio(bbio);
2190 return PTR_ERR(rbio);
2193 rbio->operation = BTRFS_RBIO_READ_REBUILD;
2194 bio_list_add(&rbio->bio_list, bio);
2195 rbio->bio_list_bytes = bio->bi_iter.bi_size;
2197 rbio->faila = find_logical_bio_stripe(rbio, bio);
2198 if (rbio->faila == -1) {
2200 "%s could not find the bad stripe in raid56 so that we cannot recover any more (bio has logical %llu len %llu, bbio has map_type %llu)",
2201 __func__, (u64)bio->bi_iter.bi_sector << 9,
2202 (u64)bio->bi_iter.bi_size, bbio->map_type);
2204 btrfs_put_bbio(bbio);
2210 btrfs_bio_counter_inc_noblocked(fs_info);
2211 rbio->generic_bio_cnt = 1;
2213 btrfs_get_bbio(bbio);
2218 * for 'mirror == 2', reconstruct from all other stripes.
2219 * for 'mirror_num > 2', select a stripe to fail on every retry.
2221 if (mirror_num > 2) {
2223 * 'mirror == 3' is to fail the p stripe and
2224 * reconstruct from the q stripe. 'mirror > 3' is to
2225 * fail a data stripe and reconstruct from p+q stripe.
2227 rbio->failb = rbio->real_stripes - (mirror_num - 1);
2228 ASSERT(rbio->failb > 0);
2229 if (rbio->failb <= rbio->faila)
2233 ret = lock_stripe_add(rbio);
2236 * __raid56_parity_recover will end the bio with
2237 * any errors it hits. We don't want to return
2238 * its error value up the stack because our caller
2239 * will end up calling bio_endio with any nonzero
2243 __raid56_parity_recover(rbio);
2245 * our rbio has been added to the list of
2246 * rbios that will be handled after the
2247 * currently lock owner is done
2253 static void rmw_work(struct btrfs_work *work)
2255 struct btrfs_raid_bio *rbio;
2257 rbio = container_of(work, struct btrfs_raid_bio, work);
2258 raid56_rmw_stripe(rbio);
2261 static void read_rebuild_work(struct btrfs_work *work)
2263 struct btrfs_raid_bio *rbio;
2265 rbio = container_of(work, struct btrfs_raid_bio, work);
2266 __raid56_parity_recover(rbio);
2270 * The following code is used to scrub/replace the parity stripe
2272 * Caller must have already increased bio_counter for getting @bbio.
2274 * Note: We need make sure all the pages that add into the scrub/replace
2275 * raid bio are correct and not be changed during the scrub/replace. That
2276 * is those pages just hold metadata or file data with checksum.
2279 struct btrfs_raid_bio *
2280 raid56_parity_alloc_scrub_rbio(struct btrfs_fs_info *fs_info, struct bio *bio,
2281 struct btrfs_bio *bbio, u64 stripe_len,
2282 struct btrfs_device *scrub_dev,
2283 unsigned long *dbitmap, int stripe_nsectors)
2285 struct btrfs_raid_bio *rbio;
2288 rbio = alloc_rbio(fs_info, bbio, stripe_len);
2291 bio_list_add(&rbio->bio_list, bio);
2293 * This is a special bio which is used to hold the completion handler
2294 * and make the scrub rbio is similar to the other types
2296 ASSERT(!bio->bi_iter.bi_size);
2297 rbio->operation = BTRFS_RBIO_PARITY_SCRUB;
2300 * After mapping bbio with BTRFS_MAP_WRITE, parities have been sorted
2301 * to the end position, so this search can start from the first parity
2304 for (i = rbio->nr_data; i < rbio->real_stripes; i++) {
2305 if (bbio->stripes[i].dev == scrub_dev) {
2310 ASSERT(i < rbio->real_stripes);
2312 /* Now we just support the sectorsize equals to page size */
2313 ASSERT(fs_info->sectorsize == PAGE_SIZE);
2314 ASSERT(rbio->stripe_npages == stripe_nsectors);
2315 bitmap_copy(rbio->dbitmap, dbitmap, stripe_nsectors);
2318 * We have already increased bio_counter when getting bbio, record it
2319 * so we can free it at rbio_orig_end_io().
2321 rbio->generic_bio_cnt = 1;
2326 /* Used for both parity scrub and missing. */
2327 void raid56_add_scrub_pages(struct btrfs_raid_bio *rbio, struct page *page,
2333 ASSERT(logical >= rbio->bbio->raid_map[0]);
2334 ASSERT(logical + PAGE_SIZE <= rbio->bbio->raid_map[0] +
2335 rbio->stripe_len * rbio->nr_data);
2336 stripe_offset = (int)(logical - rbio->bbio->raid_map[0]);
2337 index = stripe_offset >> PAGE_SHIFT;
2338 rbio->bio_pages[index] = page;
2342 * We just scrub the parity that we have correct data on the same horizontal,
2343 * so we needn't allocate all pages for all the stripes.
2345 static int alloc_rbio_essential_pages(struct btrfs_raid_bio *rbio)
2352 for_each_set_bit(bit, rbio->dbitmap, rbio->stripe_npages) {
2353 for (i = 0; i < rbio->real_stripes; i++) {
2354 index = i * rbio->stripe_npages + bit;
2355 if (rbio->stripe_pages[index])
2358 page = alloc_page(GFP_NOFS | __GFP_HIGHMEM);
2361 rbio->stripe_pages[index] = page;
2367 static noinline void finish_parity_scrub(struct btrfs_raid_bio *rbio,
2370 struct btrfs_bio *bbio = rbio->bbio;
2371 void **pointers = rbio->finish_pointers;
2372 unsigned long *pbitmap = rbio->finish_pbitmap;
2373 int nr_data = rbio->nr_data;
2378 struct page *p_page = NULL;
2379 struct page *q_page = NULL;
2380 struct bio_list bio_list;
2385 bio_list_init(&bio_list);
2387 if (rbio->real_stripes - rbio->nr_data == 1) {
2388 p_stripe = rbio->real_stripes - 1;
2389 } else if (rbio->real_stripes - rbio->nr_data == 2) {
2390 p_stripe = rbio->real_stripes - 2;
2391 q_stripe = rbio->real_stripes - 1;
2396 if (bbio->num_tgtdevs && bbio->tgtdev_map[rbio->scrubp]) {
2398 bitmap_copy(pbitmap, rbio->dbitmap, rbio->stripe_npages);
2402 * Because the higher layers(scrubber) are unlikely to
2403 * use this area of the disk again soon, so don't cache
2406 clear_bit(RBIO_CACHE_READY_BIT, &rbio->flags);
2411 p_page = alloc_page(GFP_NOFS | __GFP_HIGHMEM);
2414 SetPageUptodate(p_page);
2416 if (q_stripe != -1) {
2417 q_page = alloc_page(GFP_NOFS | __GFP_HIGHMEM);
2419 __free_page(p_page);
2422 SetPageUptodate(q_page);
2425 atomic_set(&rbio->error, 0);
2427 for_each_set_bit(pagenr, rbio->dbitmap, rbio->stripe_npages) {
2430 /* first collect one page from each data stripe */
2431 for (stripe = 0; stripe < nr_data; stripe++) {
2432 p = page_in_rbio(rbio, stripe, pagenr, 0);
2433 pointers[stripe] = kmap(p);
2436 /* then add the parity stripe */
2437 pointers[stripe++] = kmap(p_page);
2439 if (q_stripe != -1) {
2442 * raid6, add the qstripe and call the
2443 * library function to fill in our p/q
2445 pointers[stripe++] = kmap(q_page);
2447 raid6_call.gen_syndrome(rbio->real_stripes, PAGE_SIZE,
2451 memcpy(pointers[nr_data], pointers[0], PAGE_SIZE);
2452 run_xor(pointers + 1, nr_data - 1, PAGE_SIZE);
2455 /* Check scrubbing parity and repair it */
2456 p = rbio_stripe_page(rbio, rbio->scrubp, pagenr);
2458 if (memcmp(parity, pointers[rbio->scrubp], PAGE_SIZE))
2459 memcpy(parity, pointers[rbio->scrubp], PAGE_SIZE);
2461 /* Parity is right, needn't writeback */
2462 bitmap_clear(rbio->dbitmap, pagenr, 1);
2465 for (stripe = 0; stripe < rbio->real_stripes; stripe++)
2466 kunmap(page_in_rbio(rbio, stripe, pagenr, 0));
2469 __free_page(p_page);
2471 __free_page(q_page);
2475 * time to start writing. Make bios for everything from the
2476 * higher layers (the bio_list in our rbio) and our p/q. Ignore
2479 for_each_set_bit(pagenr, rbio->dbitmap, rbio->stripe_npages) {
2482 page = rbio_stripe_page(rbio, rbio->scrubp, pagenr);
2483 ret = rbio_add_io_page(rbio, &bio_list,
2484 page, rbio->scrubp, pagenr, rbio->stripe_len);
2492 for_each_set_bit(pagenr, pbitmap, rbio->stripe_npages) {
2495 page = rbio_stripe_page(rbio, rbio->scrubp, pagenr);
2496 ret = rbio_add_io_page(rbio, &bio_list, page,
2497 bbio->tgtdev_map[rbio->scrubp],
2498 pagenr, rbio->stripe_len);
2504 nr_data = bio_list_size(&bio_list);
2506 /* Every parity is right */
2507 rbio_orig_end_io(rbio, BLK_STS_OK);
2511 atomic_set(&rbio->stripes_pending, nr_data);
2514 bio = bio_list_pop(&bio_list);
2518 bio->bi_private = rbio;
2519 bio->bi_end_io = raid_write_end_io;
2520 bio_set_op_attrs(bio, REQ_OP_WRITE, 0);
2527 rbio_orig_end_io(rbio, BLK_STS_IOERR);
2529 while ((bio = bio_list_pop(&bio_list)))
2533 static inline int is_data_stripe(struct btrfs_raid_bio *rbio, int stripe)
2535 if (stripe >= 0 && stripe < rbio->nr_data)
2541 * While we're doing the parity check and repair, we could have errors
2542 * in reading pages off the disk. This checks for errors and if we're
2543 * not able to read the page it'll trigger parity reconstruction. The
2544 * parity scrub will be finished after we've reconstructed the failed
2547 static void validate_rbio_for_parity_scrub(struct btrfs_raid_bio *rbio)
2549 if (atomic_read(&rbio->error) > rbio->bbio->max_errors)
2552 if (rbio->faila >= 0 || rbio->failb >= 0) {
2553 int dfail = 0, failp = -1;
2555 if (is_data_stripe(rbio, rbio->faila))
2557 else if (is_parity_stripe(rbio->faila))
2558 failp = rbio->faila;
2560 if (is_data_stripe(rbio, rbio->failb))
2562 else if (is_parity_stripe(rbio->failb))
2563 failp = rbio->failb;
2566 * Because we can not use a scrubbing parity to repair
2567 * the data, so the capability of the repair is declined.
2568 * (In the case of RAID5, we can not repair anything)
2570 if (dfail > rbio->bbio->max_errors - 1)
2574 * If all data is good, only parity is correctly, just
2575 * repair the parity.
2578 finish_parity_scrub(rbio, 0);
2583 * Here means we got one corrupted data stripe and one
2584 * corrupted parity on RAID6, if the corrupted parity
2585 * is scrubbing parity, luckily, use the other one to repair
2586 * the data, or we can not repair the data stripe.
2588 if (failp != rbio->scrubp)
2591 __raid_recover_end_io(rbio);
2593 finish_parity_scrub(rbio, 1);
2598 rbio_orig_end_io(rbio, BLK_STS_IOERR);
2602 * end io for the read phase of the rmw cycle. All the bios here are physical
2603 * stripe bios we've read from the disk so we can recalculate the parity of the
2606 * This will usually kick off finish_rmw once all the bios are read in, but it
2607 * may trigger parity reconstruction if we had any errors along the way
2609 static void raid56_parity_scrub_end_io(struct bio *bio)
2611 struct btrfs_raid_bio *rbio = bio->bi_private;
2614 fail_bio_stripe(rbio, bio);
2616 set_bio_pages_uptodate(bio);
2620 if (!atomic_dec_and_test(&rbio->stripes_pending))
2624 * this will normally call finish_rmw to start our write
2625 * but if there are any failed stripes we'll reconstruct
2628 validate_rbio_for_parity_scrub(rbio);
2631 static void raid56_parity_scrub_stripe(struct btrfs_raid_bio *rbio)
2633 int bios_to_read = 0;
2634 struct bio_list bio_list;
2640 bio_list_init(&bio_list);
2642 ret = alloc_rbio_essential_pages(rbio);
2646 atomic_set(&rbio->error, 0);
2648 * build a list of bios to read all the missing parts of this
2651 for (stripe = 0; stripe < rbio->real_stripes; stripe++) {
2652 for_each_set_bit(pagenr, rbio->dbitmap, rbio->stripe_npages) {
2655 * we want to find all the pages missing from
2656 * the rbio and read them from the disk. If
2657 * page_in_rbio finds a page in the bio list
2658 * we don't need to read it off the stripe.
2660 page = page_in_rbio(rbio, stripe, pagenr, 1);
2664 page = rbio_stripe_page(rbio, stripe, pagenr);
2666 * the bio cache may have handed us an uptodate
2667 * page. If so, be happy and use it
2669 if (PageUptodate(page))
2672 ret = rbio_add_io_page(rbio, &bio_list, page,
2673 stripe, pagenr, rbio->stripe_len);
2679 bios_to_read = bio_list_size(&bio_list);
2680 if (!bios_to_read) {
2682 * this can happen if others have merged with
2683 * us, it means there is nothing left to read.
2684 * But if there are missing devices it may not be
2685 * safe to do the full stripe write yet.
2691 * the bbio may be freed once we submit the last bio. Make sure
2692 * not to touch it after that
2694 atomic_set(&rbio->stripes_pending, bios_to_read);
2696 bio = bio_list_pop(&bio_list);
2700 bio->bi_private = rbio;
2701 bio->bi_end_io = raid56_parity_scrub_end_io;
2702 bio_set_op_attrs(bio, REQ_OP_READ, 0);
2704 btrfs_bio_wq_end_io(rbio->fs_info, bio, BTRFS_WQ_ENDIO_RAID56);
2708 /* the actual write will happen once the reads are done */
2712 rbio_orig_end_io(rbio, BLK_STS_IOERR);
2714 while ((bio = bio_list_pop(&bio_list)))
2720 validate_rbio_for_parity_scrub(rbio);
2723 static void scrub_parity_work(struct btrfs_work *work)
2725 struct btrfs_raid_bio *rbio;
2727 rbio = container_of(work, struct btrfs_raid_bio, work);
2728 raid56_parity_scrub_stripe(rbio);
2731 static void async_scrub_parity(struct btrfs_raid_bio *rbio)
2733 btrfs_init_work(&rbio->work, btrfs_rmw_helper,
2734 scrub_parity_work, NULL, NULL);
2736 btrfs_queue_work(rbio->fs_info->rmw_workers, &rbio->work);
2739 void raid56_parity_submit_scrub_rbio(struct btrfs_raid_bio *rbio)
2741 if (!lock_stripe_add(rbio))
2742 async_scrub_parity(rbio);
2745 /* The following code is used for dev replace of a missing RAID 5/6 device. */
2747 struct btrfs_raid_bio *
2748 raid56_alloc_missing_rbio(struct btrfs_fs_info *fs_info, struct bio *bio,
2749 struct btrfs_bio *bbio, u64 length)
2751 struct btrfs_raid_bio *rbio;
2753 rbio = alloc_rbio(fs_info, bbio, length);
2757 rbio->operation = BTRFS_RBIO_REBUILD_MISSING;
2758 bio_list_add(&rbio->bio_list, bio);
2760 * This is a special bio which is used to hold the completion handler
2761 * and make the scrub rbio is similar to the other types
2763 ASSERT(!bio->bi_iter.bi_size);
2765 rbio->faila = find_logical_bio_stripe(rbio, bio);
2766 if (rbio->faila == -1) {
2773 * When we get bbio, we have already increased bio_counter, record it
2774 * so we can free it at rbio_orig_end_io()
2776 rbio->generic_bio_cnt = 1;
2781 void raid56_submit_missing_rbio(struct btrfs_raid_bio *rbio)
2783 if (!lock_stripe_add(rbio))
2784 async_read_rebuild(rbio);