]> asedeno.scripts.mit.edu Git - linux.git/blob - fs/btrfs/scrub.c
a12c450e55fa92ecbc883d280cb1cfb9312aee58
[linux.git] / fs / btrfs / scrub.c
1 /*
2  * Copyright (C) 2011, 2012 STRATO.  All rights reserved.
3  *
4  * This program is free software; you can redistribute it and/or
5  * modify it under the terms of the GNU General Public
6  * License v2 as published by the Free Software Foundation.
7  *
8  * This program is distributed in the hope that it will be useful,
9  * but WITHOUT ANY WARRANTY; without even the implied warranty of
10  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
11  * General Public License for more details.
12  *
13  * You should have received a copy of the GNU General Public
14  * License along with this program; if not, write to the
15  * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
16  * Boston, MA 021110-1307, USA.
17  */
18
19 #include <linux/blkdev.h>
20 #include <linux/ratelimit.h>
21 #include "ctree.h"
22 #include "volumes.h"
23 #include "disk-io.h"
24 #include "ordered-data.h"
25 #include "transaction.h"
26 #include "backref.h"
27 #include "extent_io.h"
28 #include "dev-replace.h"
29 #include "check-integrity.h"
30 #include "rcu-string.h"
31 #include "raid56.h"
32
33 /*
34  * This is only the first step towards a full-features scrub. It reads all
35  * extent and super block and verifies the checksums. In case a bad checksum
36  * is found or the extent cannot be read, good data will be written back if
37  * any can be found.
38  *
39  * Future enhancements:
40  *  - In case an unrepairable extent is encountered, track which files are
41  *    affected and report them
42  *  - track and record media errors, throw out bad devices
43  *  - add a mode to also read unallocated space
44  */
45
46 struct scrub_block;
47 struct scrub_ctx;
48
49 /*
50  * the following three values only influence the performance.
51  * The last one configures the number of parallel and outstanding I/O
52  * operations. The first two values configure an upper limit for the number
53  * of (dynamically allocated) pages that are added to a bio.
54  */
55 #define SCRUB_PAGES_PER_RD_BIO  32      /* 128k per bio */
56 #define SCRUB_PAGES_PER_WR_BIO  32      /* 128k per bio */
57 #define SCRUB_BIOS_PER_SCTX     64      /* 8MB per device in flight */
58
59 /*
60  * the following value times PAGE_SIZE needs to be large enough to match the
61  * largest node/leaf/sector size that shall be supported.
62  * Values larger than BTRFS_STRIPE_LEN are not supported.
63  */
64 #define SCRUB_MAX_PAGES_PER_BLOCK       16      /* 64k per node/leaf/sector */
65
66 struct scrub_recover {
67         atomic_t                refs;
68         struct btrfs_bio        *bbio;
69         u64                     map_length;
70 };
71
72 struct scrub_page {
73         struct scrub_block      *sblock;
74         struct page             *page;
75         struct btrfs_device     *dev;
76         struct list_head        list;
77         u64                     flags;  /* extent flags */
78         u64                     generation;
79         u64                     logical;
80         u64                     physical;
81         u64                     physical_for_dev_replace;
82         atomic_t                refs;
83         struct {
84                 unsigned int    mirror_num:8;
85                 unsigned int    have_csum:1;
86                 unsigned int    io_error:1;
87         };
88         u8                      csum[BTRFS_CSUM_SIZE];
89
90         struct scrub_recover    *recover;
91 };
92
93 struct scrub_bio {
94         int                     index;
95         struct scrub_ctx        *sctx;
96         struct btrfs_device     *dev;
97         struct bio              *bio;
98         int                     err;
99         u64                     logical;
100         u64                     physical;
101 #if SCRUB_PAGES_PER_WR_BIO >= SCRUB_PAGES_PER_RD_BIO
102         struct scrub_page       *pagev[SCRUB_PAGES_PER_WR_BIO];
103 #else
104         struct scrub_page       *pagev[SCRUB_PAGES_PER_RD_BIO];
105 #endif
106         int                     page_count;
107         int                     next_free;
108         struct btrfs_work       work;
109 };
110
111 struct scrub_block {
112         struct scrub_page       *pagev[SCRUB_MAX_PAGES_PER_BLOCK];
113         int                     page_count;
114         atomic_t                outstanding_pages;
115         atomic_t                refs; /* free mem on transition to zero */
116         struct scrub_ctx        *sctx;
117         struct scrub_parity     *sparity;
118         struct {
119                 unsigned int    header_error:1;
120                 unsigned int    checksum_error:1;
121                 unsigned int    no_io_error_seen:1;
122                 unsigned int    generation_error:1; /* also sets header_error */
123
124                 /* The following is for the data used to check parity */
125                 /* It is for the data with checksum */
126                 unsigned int    data_corrected:1;
127         };
128 };
129
130 /* Used for the chunks with parity stripe such RAID5/6 */
131 struct scrub_parity {
132         struct scrub_ctx        *sctx;
133
134         struct btrfs_device     *scrub_dev;
135
136         u64                     logic_start;
137
138         u64                     logic_end;
139
140         int                     nsectors;
141
142         int                     stripe_len;
143
144         atomic_t                refs;
145
146         struct list_head        spages;
147
148         /* Work of parity check and repair */
149         struct btrfs_work       work;
150
151         /* Mark the parity blocks which have data */
152         unsigned long           *dbitmap;
153
154         /*
155          * Mark the parity blocks which have data, but errors happen when
156          * read data or check data
157          */
158         unsigned long           *ebitmap;
159
160         unsigned long           bitmap[0];
161 };
162
163 struct scrub_wr_ctx {
164         struct scrub_bio *wr_curr_bio;
165         struct btrfs_device *tgtdev;
166         int pages_per_wr_bio; /* <= SCRUB_PAGES_PER_WR_BIO */
167         atomic_t flush_all_writes;
168         struct mutex wr_lock;
169 };
170
171 struct scrub_ctx {
172         struct scrub_bio        *bios[SCRUB_BIOS_PER_SCTX];
173         struct btrfs_root       *dev_root;
174         int                     first_free;
175         int                     curr;
176         atomic_t                bios_in_flight;
177         atomic_t                workers_pending;
178         spinlock_t              list_lock;
179         wait_queue_head_t       list_wait;
180         u16                     csum_size;
181         struct list_head        csum_list;
182         atomic_t                cancel_req;
183         int                     readonly;
184         int                     pages_per_rd_bio;
185         u32                     sectorsize;
186         u32                     nodesize;
187
188         int                     is_dev_replace;
189         struct scrub_wr_ctx     wr_ctx;
190
191         /*
192          * statistics
193          */
194         struct btrfs_scrub_progress stat;
195         spinlock_t              stat_lock;
196
197         /*
198          * Use a ref counter to avoid use-after-free issues. Scrub workers
199          * decrement bios_in_flight and workers_pending and then do a wakeup
200          * on the list_wait wait queue. We must ensure the main scrub task
201          * doesn't free the scrub context before or while the workers are
202          * doing the wakeup() call.
203          */
204         atomic_t                refs;
205 };
206
207 struct scrub_fixup_nodatasum {
208         struct scrub_ctx        *sctx;
209         struct btrfs_device     *dev;
210         u64                     logical;
211         struct btrfs_root       *root;
212         struct btrfs_work       work;
213         int                     mirror_num;
214 };
215
216 struct scrub_nocow_inode {
217         u64                     inum;
218         u64                     offset;
219         u64                     root;
220         struct list_head        list;
221 };
222
223 struct scrub_copy_nocow_ctx {
224         struct scrub_ctx        *sctx;
225         u64                     logical;
226         u64                     len;
227         int                     mirror_num;
228         u64                     physical_for_dev_replace;
229         struct list_head        inodes;
230         struct btrfs_work       work;
231 };
232
233 struct scrub_warning {
234         struct btrfs_path       *path;
235         u64                     extent_item_size;
236         const char              *errstr;
237         sector_t                sector;
238         u64                     logical;
239         struct btrfs_device     *dev;
240 };
241
242 static void scrub_pending_bio_inc(struct scrub_ctx *sctx);
243 static void scrub_pending_bio_dec(struct scrub_ctx *sctx);
244 static void scrub_pending_trans_workers_inc(struct scrub_ctx *sctx);
245 static void scrub_pending_trans_workers_dec(struct scrub_ctx *sctx);
246 static int scrub_handle_errored_block(struct scrub_block *sblock_to_check);
247 static int scrub_setup_recheck_block(struct scrub_block *original_sblock,
248                                      struct scrub_block *sblocks_for_recheck);
249 static void scrub_recheck_block(struct btrfs_fs_info *fs_info,
250                                 struct scrub_block *sblock, int is_metadata,
251                                 int have_csum, u8 *csum, u64 generation,
252                                 u16 csum_size, int retry_failed_mirror);
253 static void scrub_recheck_block_checksum(struct btrfs_fs_info *fs_info,
254                                          struct scrub_block *sblock,
255                                          int is_metadata, int have_csum,
256                                          const u8 *csum, u64 generation,
257                                          u16 csum_size);
258 static int scrub_repair_block_from_good_copy(struct scrub_block *sblock_bad,
259                                              struct scrub_block *sblock_good);
260 static int scrub_repair_page_from_good_copy(struct scrub_block *sblock_bad,
261                                             struct scrub_block *sblock_good,
262                                             int page_num, int force_write);
263 static void scrub_write_block_to_dev_replace(struct scrub_block *sblock);
264 static int scrub_write_page_to_dev_replace(struct scrub_block *sblock,
265                                            int page_num);
266 static int scrub_checksum_data(struct scrub_block *sblock);
267 static int scrub_checksum_tree_block(struct scrub_block *sblock);
268 static int scrub_checksum_super(struct scrub_block *sblock);
269 static void scrub_block_get(struct scrub_block *sblock);
270 static void scrub_block_put(struct scrub_block *sblock);
271 static void scrub_page_get(struct scrub_page *spage);
272 static void scrub_page_put(struct scrub_page *spage);
273 static void scrub_parity_get(struct scrub_parity *sparity);
274 static void scrub_parity_put(struct scrub_parity *sparity);
275 static int scrub_add_page_to_rd_bio(struct scrub_ctx *sctx,
276                                     struct scrub_page *spage);
277 static int scrub_pages(struct scrub_ctx *sctx, u64 logical, u64 len,
278                        u64 physical, struct btrfs_device *dev, u64 flags,
279                        u64 gen, int mirror_num, u8 *csum, int force,
280                        u64 physical_for_dev_replace);
281 static void scrub_bio_end_io(struct bio *bio, int err);
282 static void scrub_bio_end_io_worker(struct btrfs_work *work);
283 static void scrub_block_complete(struct scrub_block *sblock);
284 static void scrub_remap_extent(struct btrfs_fs_info *fs_info,
285                                u64 extent_logical, u64 extent_len,
286                                u64 *extent_physical,
287                                struct btrfs_device **extent_dev,
288                                int *extent_mirror_num);
289 static int scrub_setup_wr_ctx(struct scrub_ctx *sctx,
290                               struct scrub_wr_ctx *wr_ctx,
291                               struct btrfs_fs_info *fs_info,
292                               struct btrfs_device *dev,
293                               int is_dev_replace);
294 static void scrub_free_wr_ctx(struct scrub_wr_ctx *wr_ctx);
295 static int scrub_add_page_to_wr_bio(struct scrub_ctx *sctx,
296                                     struct scrub_page *spage);
297 static void scrub_wr_submit(struct scrub_ctx *sctx);
298 static void scrub_wr_bio_end_io(struct bio *bio, int err);
299 static void scrub_wr_bio_end_io_worker(struct btrfs_work *work);
300 static int write_page_nocow(struct scrub_ctx *sctx,
301                             u64 physical_for_dev_replace, struct page *page);
302 static int copy_nocow_pages_for_inode(u64 inum, u64 offset, u64 root,
303                                       struct scrub_copy_nocow_ctx *ctx);
304 static int copy_nocow_pages(struct scrub_ctx *sctx, u64 logical, u64 len,
305                             int mirror_num, u64 physical_for_dev_replace);
306 static void copy_nocow_pages_worker(struct btrfs_work *work);
307 static void __scrub_blocked_if_needed(struct btrfs_fs_info *fs_info);
308 static void scrub_blocked_if_needed(struct btrfs_fs_info *fs_info);
309 static void scrub_put_ctx(struct scrub_ctx *sctx);
310
311
312 static void scrub_pending_bio_inc(struct scrub_ctx *sctx)
313 {
314         atomic_inc(&sctx->refs);
315         atomic_inc(&sctx->bios_in_flight);
316 }
317
318 static void scrub_pending_bio_dec(struct scrub_ctx *sctx)
319 {
320         atomic_dec(&sctx->bios_in_flight);
321         wake_up(&sctx->list_wait);
322         scrub_put_ctx(sctx);
323 }
324
325 static void __scrub_blocked_if_needed(struct btrfs_fs_info *fs_info)
326 {
327         while (atomic_read(&fs_info->scrub_pause_req)) {
328                 mutex_unlock(&fs_info->scrub_lock);
329                 wait_event(fs_info->scrub_pause_wait,
330                    atomic_read(&fs_info->scrub_pause_req) == 0);
331                 mutex_lock(&fs_info->scrub_lock);
332         }
333 }
334
335 static void scrub_pause_on(struct btrfs_fs_info *fs_info)
336 {
337         atomic_inc(&fs_info->scrubs_paused);
338         wake_up(&fs_info->scrub_pause_wait);
339 }
340
341 static void scrub_pause_off(struct btrfs_fs_info *fs_info)
342 {
343         mutex_lock(&fs_info->scrub_lock);
344         __scrub_blocked_if_needed(fs_info);
345         atomic_dec(&fs_info->scrubs_paused);
346         mutex_unlock(&fs_info->scrub_lock);
347
348         wake_up(&fs_info->scrub_pause_wait);
349 }
350
351 static void scrub_blocked_if_needed(struct btrfs_fs_info *fs_info)
352 {
353         scrub_pause_on(fs_info);
354         scrub_pause_off(fs_info);
355 }
356
357 /*
358  * used for workers that require transaction commits (i.e., for the
359  * NOCOW case)
360  */
361 static void scrub_pending_trans_workers_inc(struct scrub_ctx *sctx)
362 {
363         struct btrfs_fs_info *fs_info = sctx->dev_root->fs_info;
364
365         atomic_inc(&sctx->refs);
366         /*
367          * increment scrubs_running to prevent cancel requests from
368          * completing as long as a worker is running. we must also
369          * increment scrubs_paused to prevent deadlocking on pause
370          * requests used for transactions commits (as the worker uses a
371          * transaction context). it is safe to regard the worker
372          * as paused for all matters practical. effectively, we only
373          * avoid cancellation requests from completing.
374          */
375         mutex_lock(&fs_info->scrub_lock);
376         atomic_inc(&fs_info->scrubs_running);
377         atomic_inc(&fs_info->scrubs_paused);
378         mutex_unlock(&fs_info->scrub_lock);
379
380         /*
381          * check if @scrubs_running=@scrubs_paused condition
382          * inside wait_event() is not an atomic operation.
383          * which means we may inc/dec @scrub_running/paused
384          * at any time. Let's wake up @scrub_pause_wait as
385          * much as we can to let commit transaction blocked less.
386          */
387         wake_up(&fs_info->scrub_pause_wait);
388
389         atomic_inc(&sctx->workers_pending);
390 }
391
392 /* used for workers that require transaction commits */
393 static void scrub_pending_trans_workers_dec(struct scrub_ctx *sctx)
394 {
395         struct btrfs_fs_info *fs_info = sctx->dev_root->fs_info;
396
397         /*
398          * see scrub_pending_trans_workers_inc() why we're pretending
399          * to be paused in the scrub counters
400          */
401         mutex_lock(&fs_info->scrub_lock);
402         atomic_dec(&fs_info->scrubs_running);
403         atomic_dec(&fs_info->scrubs_paused);
404         mutex_unlock(&fs_info->scrub_lock);
405         atomic_dec(&sctx->workers_pending);
406         wake_up(&fs_info->scrub_pause_wait);
407         wake_up(&sctx->list_wait);
408         scrub_put_ctx(sctx);
409 }
410
411 static void scrub_free_csums(struct scrub_ctx *sctx)
412 {
413         while (!list_empty(&sctx->csum_list)) {
414                 struct btrfs_ordered_sum *sum;
415                 sum = list_first_entry(&sctx->csum_list,
416                                        struct btrfs_ordered_sum, list);
417                 list_del(&sum->list);
418                 kfree(sum);
419         }
420 }
421
422 static noinline_for_stack void scrub_free_ctx(struct scrub_ctx *sctx)
423 {
424         int i;
425
426         if (!sctx)
427                 return;
428
429         scrub_free_wr_ctx(&sctx->wr_ctx);
430
431         /* this can happen when scrub is cancelled */
432         if (sctx->curr != -1) {
433                 struct scrub_bio *sbio = sctx->bios[sctx->curr];
434
435                 for (i = 0; i < sbio->page_count; i++) {
436                         WARN_ON(!sbio->pagev[i]->page);
437                         scrub_block_put(sbio->pagev[i]->sblock);
438                 }
439                 bio_put(sbio->bio);
440         }
441
442         for (i = 0; i < SCRUB_BIOS_PER_SCTX; ++i) {
443                 struct scrub_bio *sbio = sctx->bios[i];
444
445                 if (!sbio)
446                         break;
447                 kfree(sbio);
448         }
449
450         scrub_free_csums(sctx);
451         kfree(sctx);
452 }
453
454 static void scrub_put_ctx(struct scrub_ctx *sctx)
455 {
456         if (atomic_dec_and_test(&sctx->refs))
457                 scrub_free_ctx(sctx);
458 }
459
460 static noinline_for_stack
461 struct scrub_ctx *scrub_setup_ctx(struct btrfs_device *dev, int is_dev_replace)
462 {
463         struct scrub_ctx *sctx;
464         int             i;
465         struct btrfs_fs_info *fs_info = dev->dev_root->fs_info;
466         int pages_per_rd_bio;
467         int ret;
468
469         /*
470          * the setting of pages_per_rd_bio is correct for scrub but might
471          * be wrong for the dev_replace code where we might read from
472          * different devices in the initial huge bios. However, that
473          * code is able to correctly handle the case when adding a page
474          * to a bio fails.
475          */
476         if (dev->bdev)
477                 pages_per_rd_bio = min_t(int, SCRUB_PAGES_PER_RD_BIO,
478                                          bio_get_nr_vecs(dev->bdev));
479         else
480                 pages_per_rd_bio = SCRUB_PAGES_PER_RD_BIO;
481         sctx = kzalloc(sizeof(*sctx), GFP_NOFS);
482         if (!sctx)
483                 goto nomem;
484         atomic_set(&sctx->refs, 1);
485         sctx->is_dev_replace = is_dev_replace;
486         sctx->pages_per_rd_bio = pages_per_rd_bio;
487         sctx->curr = -1;
488         sctx->dev_root = dev->dev_root;
489         for (i = 0; i < SCRUB_BIOS_PER_SCTX; ++i) {
490                 struct scrub_bio *sbio;
491
492                 sbio = kzalloc(sizeof(*sbio), GFP_NOFS);
493                 if (!sbio)
494                         goto nomem;
495                 sctx->bios[i] = sbio;
496
497                 sbio->index = i;
498                 sbio->sctx = sctx;
499                 sbio->page_count = 0;
500                 btrfs_init_work(&sbio->work, btrfs_scrub_helper,
501                                 scrub_bio_end_io_worker, NULL, NULL);
502
503                 if (i != SCRUB_BIOS_PER_SCTX - 1)
504                         sctx->bios[i]->next_free = i + 1;
505                 else
506                         sctx->bios[i]->next_free = -1;
507         }
508         sctx->first_free = 0;
509         sctx->nodesize = dev->dev_root->nodesize;
510         sctx->sectorsize = dev->dev_root->sectorsize;
511         atomic_set(&sctx->bios_in_flight, 0);
512         atomic_set(&sctx->workers_pending, 0);
513         atomic_set(&sctx->cancel_req, 0);
514         sctx->csum_size = btrfs_super_csum_size(fs_info->super_copy);
515         INIT_LIST_HEAD(&sctx->csum_list);
516
517         spin_lock_init(&sctx->list_lock);
518         spin_lock_init(&sctx->stat_lock);
519         init_waitqueue_head(&sctx->list_wait);
520
521         ret = scrub_setup_wr_ctx(sctx, &sctx->wr_ctx, fs_info,
522                                  fs_info->dev_replace.tgtdev, is_dev_replace);
523         if (ret) {
524                 scrub_free_ctx(sctx);
525                 return ERR_PTR(ret);
526         }
527         return sctx;
528
529 nomem:
530         scrub_free_ctx(sctx);
531         return ERR_PTR(-ENOMEM);
532 }
533
534 static int scrub_print_warning_inode(u64 inum, u64 offset, u64 root,
535                                      void *warn_ctx)
536 {
537         u64 isize;
538         u32 nlink;
539         int ret;
540         int i;
541         struct extent_buffer *eb;
542         struct btrfs_inode_item *inode_item;
543         struct scrub_warning *swarn = warn_ctx;
544         struct btrfs_fs_info *fs_info = swarn->dev->dev_root->fs_info;
545         struct inode_fs_paths *ipath = NULL;
546         struct btrfs_root *local_root;
547         struct btrfs_key root_key;
548         struct btrfs_key key;
549
550         root_key.objectid = root;
551         root_key.type = BTRFS_ROOT_ITEM_KEY;
552         root_key.offset = (u64)-1;
553         local_root = btrfs_read_fs_root_no_name(fs_info, &root_key);
554         if (IS_ERR(local_root)) {
555                 ret = PTR_ERR(local_root);
556                 goto err;
557         }
558
559         /*
560          * this makes the path point to (inum INODE_ITEM ioff)
561          */
562         key.objectid = inum;
563         key.type = BTRFS_INODE_ITEM_KEY;
564         key.offset = 0;
565
566         ret = btrfs_search_slot(NULL, local_root, &key, swarn->path, 0, 0);
567         if (ret) {
568                 btrfs_release_path(swarn->path);
569                 goto err;
570         }
571
572         eb = swarn->path->nodes[0];
573         inode_item = btrfs_item_ptr(eb, swarn->path->slots[0],
574                                         struct btrfs_inode_item);
575         isize = btrfs_inode_size(eb, inode_item);
576         nlink = btrfs_inode_nlink(eb, inode_item);
577         btrfs_release_path(swarn->path);
578
579         ipath = init_ipath(4096, local_root, swarn->path);
580         if (IS_ERR(ipath)) {
581                 ret = PTR_ERR(ipath);
582                 ipath = NULL;
583                 goto err;
584         }
585         ret = paths_from_inode(inum, ipath);
586
587         if (ret < 0)
588                 goto err;
589
590         /*
591          * we deliberately ignore the bit ipath might have been too small to
592          * hold all of the paths here
593          */
594         for (i = 0; i < ipath->fspath->elem_cnt; ++i)
595                 printk_in_rcu(KERN_WARNING "BTRFS: %s at logical %llu on dev "
596                         "%s, sector %llu, root %llu, inode %llu, offset %llu, "
597                         "length %llu, links %u (path: %s)\n", swarn->errstr,
598                         swarn->logical, rcu_str_deref(swarn->dev->name),
599                         (unsigned long long)swarn->sector, root, inum, offset,
600                         min(isize - offset, (u64)PAGE_SIZE), nlink,
601                         (char *)(unsigned long)ipath->fspath->val[i]);
602
603         free_ipath(ipath);
604         return 0;
605
606 err:
607         printk_in_rcu(KERN_WARNING "BTRFS: %s at logical %llu on dev "
608                 "%s, sector %llu, root %llu, inode %llu, offset %llu: path "
609                 "resolving failed with ret=%d\n", swarn->errstr,
610                 swarn->logical, rcu_str_deref(swarn->dev->name),
611                 (unsigned long long)swarn->sector, root, inum, offset, ret);
612
613         free_ipath(ipath);
614         return 0;
615 }
616
617 static void scrub_print_warning(const char *errstr, struct scrub_block *sblock)
618 {
619         struct btrfs_device *dev;
620         struct btrfs_fs_info *fs_info;
621         struct btrfs_path *path;
622         struct btrfs_key found_key;
623         struct extent_buffer *eb;
624         struct btrfs_extent_item *ei;
625         struct scrub_warning swarn;
626         unsigned long ptr = 0;
627         u64 extent_item_pos;
628         u64 flags = 0;
629         u64 ref_root;
630         u32 item_size;
631         u8 ref_level;
632         int ret;
633
634         WARN_ON(sblock->page_count < 1);
635         dev = sblock->pagev[0]->dev;
636         fs_info = sblock->sctx->dev_root->fs_info;
637
638         path = btrfs_alloc_path();
639         if (!path)
640                 return;
641
642         swarn.sector = (sblock->pagev[0]->physical) >> 9;
643         swarn.logical = sblock->pagev[0]->logical;
644         swarn.errstr = errstr;
645         swarn.dev = NULL;
646
647         ret = extent_from_logical(fs_info, swarn.logical, path, &found_key,
648                                   &flags);
649         if (ret < 0)
650                 goto out;
651
652         extent_item_pos = swarn.logical - found_key.objectid;
653         swarn.extent_item_size = found_key.offset;
654
655         eb = path->nodes[0];
656         ei = btrfs_item_ptr(eb, path->slots[0], struct btrfs_extent_item);
657         item_size = btrfs_item_size_nr(eb, path->slots[0]);
658
659         if (flags & BTRFS_EXTENT_FLAG_TREE_BLOCK) {
660                 do {
661                         ret = tree_backref_for_extent(&ptr, eb, &found_key, ei,
662                                                       item_size, &ref_root,
663                                                       &ref_level);
664                         printk_in_rcu(KERN_WARNING
665                                 "BTRFS: %s at logical %llu on dev %s, "
666                                 "sector %llu: metadata %s (level %d) in tree "
667                                 "%llu\n", errstr, swarn.logical,
668                                 rcu_str_deref(dev->name),
669                                 (unsigned long long)swarn.sector,
670                                 ref_level ? "node" : "leaf",
671                                 ret < 0 ? -1 : ref_level,
672                                 ret < 0 ? -1 : ref_root);
673                 } while (ret != 1);
674                 btrfs_release_path(path);
675         } else {
676                 btrfs_release_path(path);
677                 swarn.path = path;
678                 swarn.dev = dev;
679                 iterate_extent_inodes(fs_info, found_key.objectid,
680                                         extent_item_pos, 1,
681                                         scrub_print_warning_inode, &swarn);
682         }
683
684 out:
685         btrfs_free_path(path);
686 }
687
688 static int scrub_fixup_readpage(u64 inum, u64 offset, u64 root, void *fixup_ctx)
689 {
690         struct page *page = NULL;
691         unsigned long index;
692         struct scrub_fixup_nodatasum *fixup = fixup_ctx;
693         int ret;
694         int corrected = 0;
695         struct btrfs_key key;
696         struct inode *inode = NULL;
697         struct btrfs_fs_info *fs_info;
698         u64 end = offset + PAGE_SIZE - 1;
699         struct btrfs_root *local_root;
700         int srcu_index;
701
702         key.objectid = root;
703         key.type = BTRFS_ROOT_ITEM_KEY;
704         key.offset = (u64)-1;
705
706         fs_info = fixup->root->fs_info;
707         srcu_index = srcu_read_lock(&fs_info->subvol_srcu);
708
709         local_root = btrfs_read_fs_root_no_name(fs_info, &key);
710         if (IS_ERR(local_root)) {
711                 srcu_read_unlock(&fs_info->subvol_srcu, srcu_index);
712                 return PTR_ERR(local_root);
713         }
714
715         key.type = BTRFS_INODE_ITEM_KEY;
716         key.objectid = inum;
717         key.offset = 0;
718         inode = btrfs_iget(fs_info->sb, &key, local_root, NULL);
719         srcu_read_unlock(&fs_info->subvol_srcu, srcu_index);
720         if (IS_ERR(inode))
721                 return PTR_ERR(inode);
722
723         index = offset >> PAGE_CACHE_SHIFT;
724
725         page = find_or_create_page(inode->i_mapping, index, GFP_NOFS);
726         if (!page) {
727                 ret = -ENOMEM;
728                 goto out;
729         }
730
731         if (PageUptodate(page)) {
732                 if (PageDirty(page)) {
733                         /*
734                          * we need to write the data to the defect sector. the
735                          * data that was in that sector is not in memory,
736                          * because the page was modified. we must not write the
737                          * modified page to that sector.
738                          *
739                          * TODO: what could be done here: wait for the delalloc
740                          *       runner to write out that page (might involve
741                          *       COW) and see whether the sector is still
742                          *       referenced afterwards.
743                          *
744                          * For the meantime, we'll treat this error
745                          * incorrectable, although there is a chance that a
746                          * later scrub will find the bad sector again and that
747                          * there's no dirty page in memory, then.
748                          */
749                         ret = -EIO;
750                         goto out;
751                 }
752                 ret = repair_io_failure(inode, offset, PAGE_SIZE,
753                                         fixup->logical, page,
754                                         offset - page_offset(page),
755                                         fixup->mirror_num);
756                 unlock_page(page);
757                 corrected = !ret;
758         } else {
759                 /*
760                  * we need to get good data first. the general readpage path
761                  * will call repair_io_failure for us, we just have to make
762                  * sure we read the bad mirror.
763                  */
764                 ret = set_extent_bits(&BTRFS_I(inode)->io_tree, offset, end,
765                                         EXTENT_DAMAGED, GFP_NOFS);
766                 if (ret) {
767                         /* set_extent_bits should give proper error */
768                         WARN_ON(ret > 0);
769                         if (ret > 0)
770                                 ret = -EFAULT;
771                         goto out;
772                 }
773
774                 ret = extent_read_full_page(&BTRFS_I(inode)->io_tree, page,
775                                                 btrfs_get_extent,
776                                                 fixup->mirror_num);
777                 wait_on_page_locked(page);
778
779                 corrected = !test_range_bit(&BTRFS_I(inode)->io_tree, offset,
780                                                 end, EXTENT_DAMAGED, 0, NULL);
781                 if (!corrected)
782                         clear_extent_bits(&BTRFS_I(inode)->io_tree, offset, end,
783                                                 EXTENT_DAMAGED, GFP_NOFS);
784         }
785
786 out:
787         if (page)
788                 put_page(page);
789
790         iput(inode);
791
792         if (ret < 0)
793                 return ret;
794
795         if (ret == 0 && corrected) {
796                 /*
797                  * we only need to call readpage for one of the inodes belonging
798                  * to this extent. so make iterate_extent_inodes stop
799                  */
800                 return 1;
801         }
802
803         return -EIO;
804 }
805
806 static void scrub_fixup_nodatasum(struct btrfs_work *work)
807 {
808         int ret;
809         struct scrub_fixup_nodatasum *fixup;
810         struct scrub_ctx *sctx;
811         struct btrfs_trans_handle *trans = NULL;
812         struct btrfs_path *path;
813         int uncorrectable = 0;
814
815         fixup = container_of(work, struct scrub_fixup_nodatasum, work);
816         sctx = fixup->sctx;
817
818         path = btrfs_alloc_path();
819         if (!path) {
820                 spin_lock(&sctx->stat_lock);
821                 ++sctx->stat.malloc_errors;
822                 spin_unlock(&sctx->stat_lock);
823                 uncorrectable = 1;
824                 goto out;
825         }
826
827         trans = btrfs_join_transaction(fixup->root);
828         if (IS_ERR(trans)) {
829                 uncorrectable = 1;
830                 goto out;
831         }
832
833         /*
834          * the idea is to trigger a regular read through the standard path. we
835          * read a page from the (failed) logical address by specifying the
836          * corresponding copynum of the failed sector. thus, that readpage is
837          * expected to fail.
838          * that is the point where on-the-fly error correction will kick in
839          * (once it's finished) and rewrite the failed sector if a good copy
840          * can be found.
841          */
842         ret = iterate_inodes_from_logical(fixup->logical, fixup->root->fs_info,
843                                                 path, scrub_fixup_readpage,
844                                                 fixup);
845         if (ret < 0) {
846                 uncorrectable = 1;
847                 goto out;
848         }
849         WARN_ON(ret != 1);
850
851         spin_lock(&sctx->stat_lock);
852         ++sctx->stat.corrected_errors;
853         spin_unlock(&sctx->stat_lock);
854
855 out:
856         if (trans && !IS_ERR(trans))
857                 btrfs_end_transaction(trans, fixup->root);
858         if (uncorrectable) {
859                 spin_lock(&sctx->stat_lock);
860                 ++sctx->stat.uncorrectable_errors;
861                 spin_unlock(&sctx->stat_lock);
862                 btrfs_dev_replace_stats_inc(
863                         &sctx->dev_root->fs_info->dev_replace.
864                         num_uncorrectable_read_errors);
865                 printk_ratelimited_in_rcu(KERN_ERR "BTRFS: "
866                     "unable to fixup (nodatasum) error at logical %llu on dev %s\n",
867                         fixup->logical, rcu_str_deref(fixup->dev->name));
868         }
869
870         btrfs_free_path(path);
871         kfree(fixup);
872
873         scrub_pending_trans_workers_dec(sctx);
874 }
875
876 static inline void scrub_get_recover(struct scrub_recover *recover)
877 {
878         atomic_inc(&recover->refs);
879 }
880
881 static inline void scrub_put_recover(struct scrub_recover *recover)
882 {
883         if (atomic_dec_and_test(&recover->refs)) {
884                 btrfs_put_bbio(recover->bbio);
885                 kfree(recover);
886         }
887 }
888
889 /*
890  * scrub_handle_errored_block gets called when either verification of the
891  * pages failed or the bio failed to read, e.g. with EIO. In the latter
892  * case, this function handles all pages in the bio, even though only one
893  * may be bad.
894  * The goal of this function is to repair the errored block by using the
895  * contents of one of the mirrors.
896  */
897 static int scrub_handle_errored_block(struct scrub_block *sblock_to_check)
898 {
899         struct scrub_ctx *sctx = sblock_to_check->sctx;
900         struct btrfs_device *dev;
901         struct btrfs_fs_info *fs_info;
902         u64 length;
903         u64 logical;
904         u64 generation;
905         unsigned int failed_mirror_index;
906         unsigned int is_metadata;
907         unsigned int have_csum;
908         u8 *csum;
909         struct scrub_block *sblocks_for_recheck; /* holds one for each mirror */
910         struct scrub_block *sblock_bad;
911         int ret;
912         int mirror_index;
913         int page_num;
914         int success;
915         static DEFINE_RATELIMIT_STATE(_rs, DEFAULT_RATELIMIT_INTERVAL,
916                                       DEFAULT_RATELIMIT_BURST);
917
918         BUG_ON(sblock_to_check->page_count < 1);
919         fs_info = sctx->dev_root->fs_info;
920         if (sblock_to_check->pagev[0]->flags & BTRFS_EXTENT_FLAG_SUPER) {
921                 /*
922                  * if we find an error in a super block, we just report it.
923                  * They will get written with the next transaction commit
924                  * anyway
925                  */
926                 spin_lock(&sctx->stat_lock);
927                 ++sctx->stat.super_errors;
928                 spin_unlock(&sctx->stat_lock);
929                 return 0;
930         }
931         length = sblock_to_check->page_count * PAGE_SIZE;
932         logical = sblock_to_check->pagev[0]->logical;
933         generation = sblock_to_check->pagev[0]->generation;
934         BUG_ON(sblock_to_check->pagev[0]->mirror_num < 1);
935         failed_mirror_index = sblock_to_check->pagev[0]->mirror_num - 1;
936         is_metadata = !(sblock_to_check->pagev[0]->flags &
937                         BTRFS_EXTENT_FLAG_DATA);
938         have_csum = sblock_to_check->pagev[0]->have_csum;
939         csum = sblock_to_check->pagev[0]->csum;
940         dev = sblock_to_check->pagev[0]->dev;
941
942         if (sctx->is_dev_replace && !is_metadata && !have_csum) {
943                 sblocks_for_recheck = NULL;
944                 goto nodatasum_case;
945         }
946
947         /*
948          * read all mirrors one after the other. This includes to
949          * re-read the extent or metadata block that failed (that was
950          * the cause that this fixup code is called) another time,
951          * page by page this time in order to know which pages
952          * caused I/O errors and which ones are good (for all mirrors).
953          * It is the goal to handle the situation when more than one
954          * mirror contains I/O errors, but the errors do not
955          * overlap, i.e. the data can be repaired by selecting the
956          * pages from those mirrors without I/O error on the
957          * particular pages. One example (with blocks >= 2 * PAGE_SIZE)
958          * would be that mirror #1 has an I/O error on the first page,
959          * the second page is good, and mirror #2 has an I/O error on
960          * the second page, but the first page is good.
961          * Then the first page of the first mirror can be repaired by
962          * taking the first page of the second mirror, and the
963          * second page of the second mirror can be repaired by
964          * copying the contents of the 2nd page of the 1st mirror.
965          * One more note: if the pages of one mirror contain I/O
966          * errors, the checksum cannot be verified. In order to get
967          * the best data for repairing, the first attempt is to find
968          * a mirror without I/O errors and with a validated checksum.
969          * Only if this is not possible, the pages are picked from
970          * mirrors with I/O errors without considering the checksum.
971          * If the latter is the case, at the end, the checksum of the
972          * repaired area is verified in order to correctly maintain
973          * the statistics.
974          */
975
976         sblocks_for_recheck = kcalloc(BTRFS_MAX_MIRRORS,
977                                       sizeof(*sblocks_for_recheck), GFP_NOFS);
978         if (!sblocks_for_recheck) {
979                 spin_lock(&sctx->stat_lock);
980                 sctx->stat.malloc_errors++;
981                 sctx->stat.read_errors++;
982                 sctx->stat.uncorrectable_errors++;
983                 spin_unlock(&sctx->stat_lock);
984                 btrfs_dev_stat_inc_and_print(dev, BTRFS_DEV_STAT_READ_ERRS);
985                 goto out;
986         }
987
988         /* setup the context, map the logical blocks and alloc the pages */
989         ret = scrub_setup_recheck_block(sblock_to_check, sblocks_for_recheck);
990         if (ret) {
991                 spin_lock(&sctx->stat_lock);
992                 sctx->stat.read_errors++;
993                 sctx->stat.uncorrectable_errors++;
994                 spin_unlock(&sctx->stat_lock);
995                 btrfs_dev_stat_inc_and_print(dev, BTRFS_DEV_STAT_READ_ERRS);
996                 goto out;
997         }
998         BUG_ON(failed_mirror_index >= BTRFS_MAX_MIRRORS);
999         sblock_bad = sblocks_for_recheck + failed_mirror_index;
1000
1001         /* build and submit the bios for the failed mirror, check checksums */
1002         scrub_recheck_block(fs_info, sblock_bad, is_metadata, have_csum,
1003                             csum, generation, sctx->csum_size, 1);
1004
1005         if (!sblock_bad->header_error && !sblock_bad->checksum_error &&
1006             sblock_bad->no_io_error_seen) {
1007                 /*
1008                  * the error disappeared after reading page by page, or
1009                  * the area was part of a huge bio and other parts of the
1010                  * bio caused I/O errors, or the block layer merged several
1011                  * read requests into one and the error is caused by a
1012                  * different bio (usually one of the two latter cases is
1013                  * the cause)
1014                  */
1015                 spin_lock(&sctx->stat_lock);
1016                 sctx->stat.unverified_errors++;
1017                 sblock_to_check->data_corrected = 1;
1018                 spin_unlock(&sctx->stat_lock);
1019
1020                 if (sctx->is_dev_replace)
1021                         scrub_write_block_to_dev_replace(sblock_bad);
1022                 goto out;
1023         }
1024
1025         if (!sblock_bad->no_io_error_seen) {
1026                 spin_lock(&sctx->stat_lock);
1027                 sctx->stat.read_errors++;
1028                 spin_unlock(&sctx->stat_lock);
1029                 if (__ratelimit(&_rs))
1030                         scrub_print_warning("i/o error", sblock_to_check);
1031                 btrfs_dev_stat_inc_and_print(dev, BTRFS_DEV_STAT_READ_ERRS);
1032         } else if (sblock_bad->checksum_error) {
1033                 spin_lock(&sctx->stat_lock);
1034                 sctx->stat.csum_errors++;
1035                 spin_unlock(&sctx->stat_lock);
1036                 if (__ratelimit(&_rs))
1037                         scrub_print_warning("checksum error", sblock_to_check);
1038                 btrfs_dev_stat_inc_and_print(dev,
1039                                              BTRFS_DEV_STAT_CORRUPTION_ERRS);
1040         } else if (sblock_bad->header_error) {
1041                 spin_lock(&sctx->stat_lock);
1042                 sctx->stat.verify_errors++;
1043                 spin_unlock(&sctx->stat_lock);
1044                 if (__ratelimit(&_rs))
1045                         scrub_print_warning("checksum/header error",
1046                                             sblock_to_check);
1047                 if (sblock_bad->generation_error)
1048                         btrfs_dev_stat_inc_and_print(dev,
1049                                 BTRFS_DEV_STAT_GENERATION_ERRS);
1050                 else
1051                         btrfs_dev_stat_inc_and_print(dev,
1052                                 BTRFS_DEV_STAT_CORRUPTION_ERRS);
1053         }
1054
1055         if (sctx->readonly) {
1056                 ASSERT(!sctx->is_dev_replace);
1057                 goto out;
1058         }
1059
1060         if (!is_metadata && !have_csum) {
1061                 struct scrub_fixup_nodatasum *fixup_nodatasum;
1062
1063                 WARN_ON(sctx->is_dev_replace);
1064
1065 nodatasum_case:
1066
1067                 /*
1068                  * !is_metadata and !have_csum, this means that the data
1069                  * might not be COW'ed, that it might be modified
1070                  * concurrently. The general strategy to work on the
1071                  * commit root does not help in the case when COW is not
1072                  * used.
1073                  */
1074                 fixup_nodatasum = kzalloc(sizeof(*fixup_nodatasum), GFP_NOFS);
1075                 if (!fixup_nodatasum)
1076                         goto did_not_correct_error;
1077                 fixup_nodatasum->sctx = sctx;
1078                 fixup_nodatasum->dev = dev;
1079                 fixup_nodatasum->logical = logical;
1080                 fixup_nodatasum->root = fs_info->extent_root;
1081                 fixup_nodatasum->mirror_num = failed_mirror_index + 1;
1082                 scrub_pending_trans_workers_inc(sctx);
1083                 btrfs_init_work(&fixup_nodatasum->work, btrfs_scrub_helper,
1084                                 scrub_fixup_nodatasum, NULL, NULL);
1085                 btrfs_queue_work(fs_info->scrub_workers,
1086                                  &fixup_nodatasum->work);
1087                 goto out;
1088         }
1089
1090         /*
1091          * now build and submit the bios for the other mirrors, check
1092          * checksums.
1093          * First try to pick the mirror which is completely without I/O
1094          * errors and also does not have a checksum error.
1095          * If one is found, and if a checksum is present, the full block
1096          * that is known to contain an error is rewritten. Afterwards
1097          * the block is known to be corrected.
1098          * If a mirror is found which is completely correct, and no
1099          * checksum is present, only those pages are rewritten that had
1100          * an I/O error in the block to be repaired, since it cannot be
1101          * determined, which copy of the other pages is better (and it
1102          * could happen otherwise that a correct page would be
1103          * overwritten by a bad one).
1104          */
1105         for (mirror_index = 0;
1106              mirror_index < BTRFS_MAX_MIRRORS &&
1107              sblocks_for_recheck[mirror_index].page_count > 0;
1108              mirror_index++) {
1109                 struct scrub_block *sblock_other;
1110
1111                 if (mirror_index == failed_mirror_index)
1112                         continue;
1113                 sblock_other = sblocks_for_recheck + mirror_index;
1114
1115                 /* build and submit the bios, check checksums */
1116                 scrub_recheck_block(fs_info, sblock_other, is_metadata,
1117                                     have_csum, csum, generation,
1118                                     sctx->csum_size, 0);
1119
1120                 if (!sblock_other->header_error &&
1121                     !sblock_other->checksum_error &&
1122                     sblock_other->no_io_error_seen) {
1123                         if (sctx->is_dev_replace) {
1124                                 scrub_write_block_to_dev_replace(sblock_other);
1125                                 goto corrected_error;
1126                         } else {
1127                                 ret = scrub_repair_block_from_good_copy(
1128                                                 sblock_bad, sblock_other);
1129                                 if (!ret)
1130                                         goto corrected_error;
1131                         }
1132                 }
1133         }
1134
1135         if (sblock_bad->no_io_error_seen && !sctx->is_dev_replace)
1136                 goto did_not_correct_error;
1137
1138         /*
1139          * In case of I/O errors in the area that is supposed to be
1140          * repaired, continue by picking good copies of those pages.
1141          * Select the good pages from mirrors to rewrite bad pages from
1142          * the area to fix. Afterwards verify the checksum of the block
1143          * that is supposed to be repaired. This verification step is
1144          * only done for the purpose of statistic counting and for the
1145          * final scrub report, whether errors remain.
1146          * A perfect algorithm could make use of the checksum and try
1147          * all possible combinations of pages from the different mirrors
1148          * until the checksum verification succeeds. For example, when
1149          * the 2nd page of mirror #1 faces I/O errors, and the 2nd page
1150          * of mirror #2 is readable but the final checksum test fails,
1151          * then the 2nd page of mirror #3 could be tried, whether now
1152          * the final checksum succeedes. But this would be a rare
1153          * exception and is therefore not implemented. At least it is
1154          * avoided that the good copy is overwritten.
1155          * A more useful improvement would be to pick the sectors
1156          * without I/O error based on sector sizes (512 bytes on legacy
1157          * disks) instead of on PAGE_SIZE. Then maybe 512 byte of one
1158          * mirror could be repaired by taking 512 byte of a different
1159          * mirror, even if other 512 byte sectors in the same PAGE_SIZE
1160          * area are unreadable.
1161          */
1162         success = 1;
1163         for (page_num = 0; page_num < sblock_bad->page_count;
1164              page_num++) {
1165                 struct scrub_page *page_bad = sblock_bad->pagev[page_num];
1166                 struct scrub_block *sblock_other = NULL;
1167
1168                 /* skip no-io-error page in scrub */
1169                 if (!page_bad->io_error && !sctx->is_dev_replace)
1170                         continue;
1171
1172                 /* try to find no-io-error page in mirrors */
1173                 if (page_bad->io_error) {
1174                         for (mirror_index = 0;
1175                              mirror_index < BTRFS_MAX_MIRRORS &&
1176                              sblocks_for_recheck[mirror_index].page_count > 0;
1177                              mirror_index++) {
1178                                 if (!sblocks_for_recheck[mirror_index].
1179                                     pagev[page_num]->io_error) {
1180                                         sblock_other = sblocks_for_recheck +
1181                                                        mirror_index;
1182                                         break;
1183                                 }
1184                         }
1185                         if (!sblock_other)
1186                                 success = 0;
1187                 }
1188
1189                 if (sctx->is_dev_replace) {
1190                         /*
1191                          * did not find a mirror to fetch the page
1192                          * from. scrub_write_page_to_dev_replace()
1193                          * handles this case (page->io_error), by
1194                          * filling the block with zeros before
1195                          * submitting the write request
1196                          */
1197                         if (!sblock_other)
1198                                 sblock_other = sblock_bad;
1199
1200                         if (scrub_write_page_to_dev_replace(sblock_other,
1201                                                             page_num) != 0) {
1202                                 btrfs_dev_replace_stats_inc(
1203                                         &sctx->dev_root->
1204                                         fs_info->dev_replace.
1205                                         num_write_errors);
1206                                 success = 0;
1207                         }
1208                 } else if (sblock_other) {
1209                         ret = scrub_repair_page_from_good_copy(sblock_bad,
1210                                                                sblock_other,
1211                                                                page_num, 0);
1212                         if (0 == ret)
1213                                 page_bad->io_error = 0;
1214                         else
1215                                 success = 0;
1216                 }
1217         }
1218
1219         if (success && !sctx->is_dev_replace) {
1220                 if (is_metadata || have_csum) {
1221                         /*
1222                          * need to verify the checksum now that all
1223                          * sectors on disk are repaired (the write
1224                          * request for data to be repaired is on its way).
1225                          * Just be lazy and use scrub_recheck_block()
1226                          * which re-reads the data before the checksum
1227                          * is verified, but most likely the data comes out
1228                          * of the page cache.
1229                          */
1230                         scrub_recheck_block(fs_info, sblock_bad,
1231                                             is_metadata, have_csum, csum,
1232                                             generation, sctx->csum_size, 1);
1233                         if (!sblock_bad->header_error &&
1234                             !sblock_bad->checksum_error &&
1235                             sblock_bad->no_io_error_seen)
1236                                 goto corrected_error;
1237                         else
1238                                 goto did_not_correct_error;
1239                 } else {
1240 corrected_error:
1241                         spin_lock(&sctx->stat_lock);
1242                         sctx->stat.corrected_errors++;
1243                         sblock_to_check->data_corrected = 1;
1244                         spin_unlock(&sctx->stat_lock);
1245                         printk_ratelimited_in_rcu(KERN_ERR
1246                                 "BTRFS: fixed up error at logical %llu on dev %s\n",
1247                                 logical, rcu_str_deref(dev->name));
1248                 }
1249         } else {
1250 did_not_correct_error:
1251                 spin_lock(&sctx->stat_lock);
1252                 sctx->stat.uncorrectable_errors++;
1253                 spin_unlock(&sctx->stat_lock);
1254                 printk_ratelimited_in_rcu(KERN_ERR
1255                         "BTRFS: unable to fixup (regular) error at logical %llu on dev %s\n",
1256                         logical, rcu_str_deref(dev->name));
1257         }
1258
1259 out:
1260         if (sblocks_for_recheck) {
1261                 for (mirror_index = 0; mirror_index < BTRFS_MAX_MIRRORS;
1262                      mirror_index++) {
1263                         struct scrub_block *sblock = sblocks_for_recheck +
1264                                                      mirror_index;
1265                         struct scrub_recover *recover;
1266                         int page_index;
1267
1268                         for (page_index = 0; page_index < sblock->page_count;
1269                              page_index++) {
1270                                 sblock->pagev[page_index]->sblock = NULL;
1271                                 recover = sblock->pagev[page_index]->recover;
1272                                 if (recover) {
1273                                         scrub_put_recover(recover);
1274                                         sblock->pagev[page_index]->recover =
1275                                                                         NULL;
1276                                 }
1277                                 scrub_page_put(sblock->pagev[page_index]);
1278                         }
1279                 }
1280                 kfree(sblocks_for_recheck);
1281         }
1282
1283         return 0;
1284 }
1285
1286 static inline int scrub_nr_raid_mirrors(struct btrfs_bio *bbio)
1287 {
1288         if (bbio->map_type & BTRFS_BLOCK_GROUP_RAID5)
1289                 return 2;
1290         else if (bbio->map_type & BTRFS_BLOCK_GROUP_RAID6)
1291                 return 3;
1292         else
1293                 return (int)bbio->num_stripes;
1294 }
1295
1296 static inline void scrub_stripe_index_and_offset(u64 logical, u64 map_type,
1297                                                  u64 *raid_map,
1298                                                  u64 mapped_length,
1299                                                  int nstripes, int mirror,
1300                                                  int *stripe_index,
1301                                                  u64 *stripe_offset)
1302 {
1303         int i;
1304
1305         if (map_type & BTRFS_BLOCK_GROUP_RAID56_MASK) {
1306                 /* RAID5/6 */
1307                 for (i = 0; i < nstripes; i++) {
1308                         if (raid_map[i] == RAID6_Q_STRIPE ||
1309                             raid_map[i] == RAID5_P_STRIPE)
1310                                 continue;
1311
1312                         if (logical >= raid_map[i] &&
1313                             logical < raid_map[i] + mapped_length)
1314                                 break;
1315                 }
1316
1317                 *stripe_index = i;
1318                 *stripe_offset = logical - raid_map[i];
1319         } else {
1320                 /* The other RAID type */
1321                 *stripe_index = mirror;
1322                 *stripe_offset = 0;
1323         }
1324 }
1325
1326 static int scrub_setup_recheck_block(struct scrub_block *original_sblock,
1327                                      struct scrub_block *sblocks_for_recheck)
1328 {
1329         struct scrub_ctx *sctx = original_sblock->sctx;
1330         struct btrfs_fs_info *fs_info = sctx->dev_root->fs_info;
1331         u64 length = original_sblock->page_count * PAGE_SIZE;
1332         u64 logical = original_sblock->pagev[0]->logical;
1333         struct scrub_recover *recover;
1334         struct btrfs_bio *bbio;
1335         u64 sublen;
1336         u64 mapped_length;
1337         u64 stripe_offset;
1338         int stripe_index;
1339         int page_index = 0;
1340         int mirror_index;
1341         int nmirrors;
1342         int ret;
1343
1344         /*
1345          * note: the two members refs and outstanding_pages
1346          * are not used (and not set) in the blocks that are used for
1347          * the recheck procedure
1348          */
1349
1350         while (length > 0) {
1351                 sublen = min_t(u64, length, PAGE_SIZE);
1352                 mapped_length = sublen;
1353                 bbio = NULL;
1354
1355                 /*
1356                  * with a length of PAGE_SIZE, each returned stripe
1357                  * represents one mirror
1358                  */
1359                 ret = btrfs_map_sblock(fs_info, REQ_GET_READ_MIRRORS, logical,
1360                                        &mapped_length, &bbio, 0, 1);
1361                 if (ret || !bbio || mapped_length < sublen) {
1362                         btrfs_put_bbio(bbio);
1363                         return -EIO;
1364                 }
1365
1366                 recover = kzalloc(sizeof(struct scrub_recover), GFP_NOFS);
1367                 if (!recover) {
1368                         btrfs_put_bbio(bbio);
1369                         return -ENOMEM;
1370                 }
1371
1372                 atomic_set(&recover->refs, 1);
1373                 recover->bbio = bbio;
1374                 recover->map_length = mapped_length;
1375
1376                 BUG_ON(page_index >= SCRUB_PAGES_PER_RD_BIO);
1377
1378                 nmirrors = min(scrub_nr_raid_mirrors(bbio), BTRFS_MAX_MIRRORS);
1379
1380                 for (mirror_index = 0; mirror_index < nmirrors;
1381                      mirror_index++) {
1382                         struct scrub_block *sblock;
1383                         struct scrub_page *page;
1384
1385                         sblock = sblocks_for_recheck + mirror_index;
1386                         sblock->sctx = sctx;
1387                         page = kzalloc(sizeof(*page), GFP_NOFS);
1388                         if (!page) {
1389 leave_nomem:
1390                                 spin_lock(&sctx->stat_lock);
1391                                 sctx->stat.malloc_errors++;
1392                                 spin_unlock(&sctx->stat_lock);
1393                                 scrub_put_recover(recover);
1394                                 return -ENOMEM;
1395                         }
1396                         scrub_page_get(page);
1397                         sblock->pagev[page_index] = page;
1398                         page->logical = logical;
1399
1400                         scrub_stripe_index_and_offset(logical,
1401                                                       bbio->map_type,
1402                                                       bbio->raid_map,
1403                                                       mapped_length,
1404                                                       bbio->num_stripes -
1405                                                       bbio->num_tgtdevs,
1406                                                       mirror_index,
1407                                                       &stripe_index,
1408                                                       &stripe_offset);
1409                         page->physical = bbio->stripes[stripe_index].physical +
1410                                          stripe_offset;
1411                         page->dev = bbio->stripes[stripe_index].dev;
1412
1413                         BUG_ON(page_index >= original_sblock->page_count);
1414                         page->physical_for_dev_replace =
1415                                 original_sblock->pagev[page_index]->
1416                                 physical_for_dev_replace;
1417                         /* for missing devices, dev->bdev is NULL */
1418                         page->mirror_num = mirror_index + 1;
1419                         sblock->page_count++;
1420                         page->page = alloc_page(GFP_NOFS);
1421                         if (!page->page)
1422                                 goto leave_nomem;
1423
1424                         scrub_get_recover(recover);
1425                         page->recover = recover;
1426                 }
1427                 scrub_put_recover(recover);
1428                 length -= sublen;
1429                 logical += sublen;
1430                 page_index++;
1431         }
1432
1433         return 0;
1434 }
1435
1436 struct scrub_bio_ret {
1437         struct completion event;
1438         int error;
1439 };
1440
1441 static void scrub_bio_wait_endio(struct bio *bio, int error)
1442 {
1443         struct scrub_bio_ret *ret = bio->bi_private;
1444
1445         ret->error = error;
1446         complete(&ret->event);
1447 }
1448
1449 static inline int scrub_is_page_on_raid56(struct scrub_page *page)
1450 {
1451         return page->recover &&
1452                (page->recover->bbio->map_type & BTRFS_BLOCK_GROUP_RAID56_MASK);
1453 }
1454
1455 static int scrub_submit_raid56_bio_wait(struct btrfs_fs_info *fs_info,
1456                                         struct bio *bio,
1457                                         struct scrub_page *page)
1458 {
1459         struct scrub_bio_ret done;
1460         int ret;
1461
1462         init_completion(&done.event);
1463         done.error = 0;
1464         bio->bi_iter.bi_sector = page->logical >> 9;
1465         bio->bi_private = &done;
1466         bio->bi_end_io = scrub_bio_wait_endio;
1467
1468         ret = raid56_parity_recover(fs_info->fs_root, bio, page->recover->bbio,
1469                                     page->recover->map_length,
1470                                     page->mirror_num, 0);
1471         if (ret)
1472                 return ret;
1473
1474         wait_for_completion(&done.event);
1475         if (done.error)
1476                 return -EIO;
1477
1478         return 0;
1479 }
1480
1481 /*
1482  * this function will check the on disk data for checksum errors, header
1483  * errors and read I/O errors. If any I/O errors happen, the exact pages
1484  * which are errored are marked as being bad. The goal is to enable scrub
1485  * to take those pages that are not errored from all the mirrors so that
1486  * the pages that are errored in the just handled mirror can be repaired.
1487  */
1488 static void scrub_recheck_block(struct btrfs_fs_info *fs_info,
1489                                 struct scrub_block *sblock, int is_metadata,
1490                                 int have_csum, u8 *csum, u64 generation,
1491                                 u16 csum_size, int retry_failed_mirror)
1492 {
1493         int page_num;
1494
1495         sblock->no_io_error_seen = 1;
1496         sblock->header_error = 0;
1497         sblock->checksum_error = 0;
1498
1499         for (page_num = 0; page_num < sblock->page_count; page_num++) {
1500                 struct bio *bio;
1501                 struct scrub_page *page = sblock->pagev[page_num];
1502
1503                 if (page->dev->bdev == NULL) {
1504                         page->io_error = 1;
1505                         sblock->no_io_error_seen = 0;
1506                         continue;
1507                 }
1508
1509                 WARN_ON(!page->page);
1510                 bio = btrfs_io_bio_alloc(GFP_NOFS, 1);
1511                 if (!bio) {
1512                         page->io_error = 1;
1513                         sblock->no_io_error_seen = 0;
1514                         continue;
1515                 }
1516                 bio->bi_bdev = page->dev->bdev;
1517
1518                 bio_add_page(bio, page->page, PAGE_SIZE, 0);
1519                 if (!retry_failed_mirror && scrub_is_page_on_raid56(page)) {
1520                         if (scrub_submit_raid56_bio_wait(fs_info, bio, page))
1521                                 sblock->no_io_error_seen = 0;
1522                 } else {
1523                         bio->bi_iter.bi_sector = page->physical >> 9;
1524
1525                         if (btrfsic_submit_bio_wait(READ, bio))
1526                                 sblock->no_io_error_seen = 0;
1527                 }
1528
1529                 bio_put(bio);
1530         }
1531
1532         if (sblock->no_io_error_seen)
1533                 scrub_recheck_block_checksum(fs_info, sblock, is_metadata,
1534                                              have_csum, csum, generation,
1535                                              csum_size);
1536
1537         return;
1538 }
1539
1540 static inline int scrub_check_fsid(u8 fsid[],
1541                                    struct scrub_page *spage)
1542 {
1543         struct btrfs_fs_devices *fs_devices = spage->dev->fs_devices;
1544         int ret;
1545
1546         ret = memcmp(fsid, fs_devices->fsid, BTRFS_UUID_SIZE);
1547         return !ret;
1548 }
1549
1550 static void scrub_recheck_block_checksum(struct btrfs_fs_info *fs_info,
1551                                          struct scrub_block *sblock,
1552                                          int is_metadata, int have_csum,
1553                                          const u8 *csum, u64 generation,
1554                                          u16 csum_size)
1555 {
1556         int page_num;
1557         u8 calculated_csum[BTRFS_CSUM_SIZE];
1558         u32 crc = ~(u32)0;
1559         void *mapped_buffer;
1560
1561         WARN_ON(!sblock->pagev[0]->page);
1562         if (is_metadata) {
1563                 struct btrfs_header *h;
1564
1565                 mapped_buffer = kmap_atomic(sblock->pagev[0]->page);
1566                 h = (struct btrfs_header *)mapped_buffer;
1567
1568                 if (sblock->pagev[0]->logical != btrfs_stack_header_bytenr(h) ||
1569                     !scrub_check_fsid(h->fsid, sblock->pagev[0]) ||
1570                     memcmp(h->chunk_tree_uuid, fs_info->chunk_tree_uuid,
1571                            BTRFS_UUID_SIZE)) {
1572                         sblock->header_error = 1;
1573                 } else if (generation != btrfs_stack_header_generation(h)) {
1574                         sblock->header_error = 1;
1575                         sblock->generation_error = 1;
1576                 }
1577                 csum = h->csum;
1578         } else {
1579                 if (!have_csum)
1580                         return;
1581
1582                 mapped_buffer = kmap_atomic(sblock->pagev[0]->page);
1583         }
1584
1585         for (page_num = 0;;) {
1586                 if (page_num == 0 && is_metadata)
1587                         crc = btrfs_csum_data(
1588                                 ((u8 *)mapped_buffer) + BTRFS_CSUM_SIZE,
1589                                 crc, PAGE_SIZE - BTRFS_CSUM_SIZE);
1590                 else
1591                         crc = btrfs_csum_data(mapped_buffer, crc, PAGE_SIZE);
1592
1593                 kunmap_atomic(mapped_buffer);
1594                 page_num++;
1595                 if (page_num >= sblock->page_count)
1596                         break;
1597                 WARN_ON(!sblock->pagev[page_num]->page);
1598
1599                 mapped_buffer = kmap_atomic(sblock->pagev[page_num]->page);
1600         }
1601
1602         btrfs_csum_final(crc, calculated_csum);
1603         if (memcmp(calculated_csum, csum, csum_size))
1604                 sblock->checksum_error = 1;
1605 }
1606
1607 static int scrub_repair_block_from_good_copy(struct scrub_block *sblock_bad,
1608                                              struct scrub_block *sblock_good)
1609 {
1610         int page_num;
1611         int ret = 0;
1612
1613         for (page_num = 0; page_num < sblock_bad->page_count; page_num++) {
1614                 int ret_sub;
1615
1616                 ret_sub = scrub_repair_page_from_good_copy(sblock_bad,
1617                                                            sblock_good,
1618                                                            page_num, 1);
1619                 if (ret_sub)
1620                         ret = ret_sub;
1621         }
1622
1623         return ret;
1624 }
1625
1626 static int scrub_repair_page_from_good_copy(struct scrub_block *sblock_bad,
1627                                             struct scrub_block *sblock_good,
1628                                             int page_num, int force_write)
1629 {
1630         struct scrub_page *page_bad = sblock_bad->pagev[page_num];
1631         struct scrub_page *page_good = sblock_good->pagev[page_num];
1632
1633         BUG_ON(page_bad->page == NULL);
1634         BUG_ON(page_good->page == NULL);
1635         if (force_write || sblock_bad->header_error ||
1636             sblock_bad->checksum_error || page_bad->io_error) {
1637                 struct bio *bio;
1638                 int ret;
1639
1640                 if (!page_bad->dev->bdev) {
1641                         printk_ratelimited(KERN_WARNING "BTRFS: "
1642                                 "scrub_repair_page_from_good_copy(bdev == NULL) "
1643                                 "is unexpected!\n");
1644                         return -EIO;
1645                 }
1646
1647                 bio = btrfs_io_bio_alloc(GFP_NOFS, 1);
1648                 if (!bio)
1649                         return -EIO;
1650                 bio->bi_bdev = page_bad->dev->bdev;
1651                 bio->bi_iter.bi_sector = page_bad->physical >> 9;
1652
1653                 ret = bio_add_page(bio, page_good->page, PAGE_SIZE, 0);
1654                 if (PAGE_SIZE != ret) {
1655                         bio_put(bio);
1656                         return -EIO;
1657                 }
1658
1659                 if (btrfsic_submit_bio_wait(WRITE, bio)) {
1660                         btrfs_dev_stat_inc_and_print(page_bad->dev,
1661                                 BTRFS_DEV_STAT_WRITE_ERRS);
1662                         btrfs_dev_replace_stats_inc(
1663                                 &sblock_bad->sctx->dev_root->fs_info->
1664                                 dev_replace.num_write_errors);
1665                         bio_put(bio);
1666                         return -EIO;
1667                 }
1668                 bio_put(bio);
1669         }
1670
1671         return 0;
1672 }
1673
1674 static void scrub_write_block_to_dev_replace(struct scrub_block *sblock)
1675 {
1676         int page_num;
1677
1678         /*
1679          * This block is used for the check of the parity on the source device,
1680          * so the data needn't be written into the destination device.
1681          */
1682         if (sblock->sparity)
1683                 return;
1684
1685         for (page_num = 0; page_num < sblock->page_count; page_num++) {
1686                 int ret;
1687
1688                 ret = scrub_write_page_to_dev_replace(sblock, page_num);
1689                 if (ret)
1690                         btrfs_dev_replace_stats_inc(
1691                                 &sblock->sctx->dev_root->fs_info->dev_replace.
1692                                 num_write_errors);
1693         }
1694 }
1695
1696 static int scrub_write_page_to_dev_replace(struct scrub_block *sblock,
1697                                            int page_num)
1698 {
1699         struct scrub_page *spage = sblock->pagev[page_num];
1700
1701         BUG_ON(spage->page == NULL);
1702         if (spage->io_error) {
1703                 void *mapped_buffer = kmap_atomic(spage->page);
1704
1705                 memset(mapped_buffer, 0, PAGE_CACHE_SIZE);
1706                 flush_dcache_page(spage->page);
1707                 kunmap_atomic(mapped_buffer);
1708         }
1709         return scrub_add_page_to_wr_bio(sblock->sctx, spage);
1710 }
1711
1712 static int scrub_add_page_to_wr_bio(struct scrub_ctx *sctx,
1713                                     struct scrub_page *spage)
1714 {
1715         struct scrub_wr_ctx *wr_ctx = &sctx->wr_ctx;
1716         struct scrub_bio *sbio;
1717         int ret;
1718
1719         mutex_lock(&wr_ctx->wr_lock);
1720 again:
1721         if (!wr_ctx->wr_curr_bio) {
1722                 wr_ctx->wr_curr_bio = kzalloc(sizeof(*wr_ctx->wr_curr_bio),
1723                                               GFP_NOFS);
1724                 if (!wr_ctx->wr_curr_bio) {
1725                         mutex_unlock(&wr_ctx->wr_lock);
1726                         return -ENOMEM;
1727                 }
1728                 wr_ctx->wr_curr_bio->sctx = sctx;
1729                 wr_ctx->wr_curr_bio->page_count = 0;
1730         }
1731         sbio = wr_ctx->wr_curr_bio;
1732         if (sbio->page_count == 0) {
1733                 struct bio *bio;
1734
1735                 sbio->physical = spage->physical_for_dev_replace;
1736                 sbio->logical = spage->logical;
1737                 sbio->dev = wr_ctx->tgtdev;
1738                 bio = sbio->bio;
1739                 if (!bio) {
1740                         bio = btrfs_io_bio_alloc(GFP_NOFS, wr_ctx->pages_per_wr_bio);
1741                         if (!bio) {
1742                                 mutex_unlock(&wr_ctx->wr_lock);
1743                                 return -ENOMEM;
1744                         }
1745                         sbio->bio = bio;
1746                 }
1747
1748                 bio->bi_private = sbio;
1749                 bio->bi_end_io = scrub_wr_bio_end_io;
1750                 bio->bi_bdev = sbio->dev->bdev;
1751                 bio->bi_iter.bi_sector = sbio->physical >> 9;
1752                 sbio->err = 0;
1753         } else if (sbio->physical + sbio->page_count * PAGE_SIZE !=
1754                    spage->physical_for_dev_replace ||
1755                    sbio->logical + sbio->page_count * PAGE_SIZE !=
1756                    spage->logical) {
1757                 scrub_wr_submit(sctx);
1758                 goto again;
1759         }
1760
1761         ret = bio_add_page(sbio->bio, spage->page, PAGE_SIZE, 0);
1762         if (ret != PAGE_SIZE) {
1763                 if (sbio->page_count < 1) {
1764                         bio_put(sbio->bio);
1765                         sbio->bio = NULL;
1766                         mutex_unlock(&wr_ctx->wr_lock);
1767                         return -EIO;
1768                 }
1769                 scrub_wr_submit(sctx);
1770                 goto again;
1771         }
1772
1773         sbio->pagev[sbio->page_count] = spage;
1774         scrub_page_get(spage);
1775         sbio->page_count++;
1776         if (sbio->page_count == wr_ctx->pages_per_wr_bio)
1777                 scrub_wr_submit(sctx);
1778         mutex_unlock(&wr_ctx->wr_lock);
1779
1780         return 0;
1781 }
1782
1783 static void scrub_wr_submit(struct scrub_ctx *sctx)
1784 {
1785         struct scrub_wr_ctx *wr_ctx = &sctx->wr_ctx;
1786         struct scrub_bio *sbio;
1787
1788         if (!wr_ctx->wr_curr_bio)
1789                 return;
1790
1791         sbio = wr_ctx->wr_curr_bio;
1792         wr_ctx->wr_curr_bio = NULL;
1793         WARN_ON(!sbio->bio->bi_bdev);
1794         scrub_pending_bio_inc(sctx);
1795         /* process all writes in a single worker thread. Then the block layer
1796          * orders the requests before sending them to the driver which
1797          * doubled the write performance on spinning disks when measured
1798          * with Linux 3.5 */
1799         btrfsic_submit_bio(WRITE, sbio->bio);
1800 }
1801
1802 static void scrub_wr_bio_end_io(struct bio *bio, int err)
1803 {
1804         struct scrub_bio *sbio = bio->bi_private;
1805         struct btrfs_fs_info *fs_info = sbio->dev->dev_root->fs_info;
1806
1807         sbio->err = err;
1808         sbio->bio = bio;
1809
1810         btrfs_init_work(&sbio->work, btrfs_scrubwrc_helper,
1811                          scrub_wr_bio_end_io_worker, NULL, NULL);
1812         btrfs_queue_work(fs_info->scrub_wr_completion_workers, &sbio->work);
1813 }
1814
1815 static void scrub_wr_bio_end_io_worker(struct btrfs_work *work)
1816 {
1817         struct scrub_bio *sbio = container_of(work, struct scrub_bio, work);
1818         struct scrub_ctx *sctx = sbio->sctx;
1819         int i;
1820
1821         WARN_ON(sbio->page_count > SCRUB_PAGES_PER_WR_BIO);
1822         if (sbio->err) {
1823                 struct btrfs_dev_replace *dev_replace =
1824                         &sbio->sctx->dev_root->fs_info->dev_replace;
1825
1826                 for (i = 0; i < sbio->page_count; i++) {
1827                         struct scrub_page *spage = sbio->pagev[i];
1828
1829                         spage->io_error = 1;
1830                         btrfs_dev_replace_stats_inc(&dev_replace->
1831                                                     num_write_errors);
1832                 }
1833         }
1834
1835         for (i = 0; i < sbio->page_count; i++)
1836                 scrub_page_put(sbio->pagev[i]);
1837
1838         bio_put(sbio->bio);
1839         kfree(sbio);
1840         scrub_pending_bio_dec(sctx);
1841 }
1842
1843 static int scrub_checksum(struct scrub_block *sblock)
1844 {
1845         u64 flags;
1846         int ret;
1847
1848         WARN_ON(sblock->page_count < 1);
1849         flags = sblock->pagev[0]->flags;
1850         ret = 0;
1851         if (flags & BTRFS_EXTENT_FLAG_DATA)
1852                 ret = scrub_checksum_data(sblock);
1853         else if (flags & BTRFS_EXTENT_FLAG_TREE_BLOCK)
1854                 ret = scrub_checksum_tree_block(sblock);
1855         else if (flags & BTRFS_EXTENT_FLAG_SUPER)
1856                 (void)scrub_checksum_super(sblock);
1857         else
1858                 WARN_ON(1);
1859         if (ret)
1860                 scrub_handle_errored_block(sblock);
1861
1862         return ret;
1863 }
1864
1865 static int scrub_checksum_data(struct scrub_block *sblock)
1866 {
1867         struct scrub_ctx *sctx = sblock->sctx;
1868         u8 csum[BTRFS_CSUM_SIZE];
1869         u8 *on_disk_csum;
1870         struct page *page;
1871         void *buffer;
1872         u32 crc = ~(u32)0;
1873         int fail = 0;
1874         u64 len;
1875         int index;
1876
1877         BUG_ON(sblock->page_count < 1);
1878         if (!sblock->pagev[0]->have_csum)
1879                 return 0;
1880
1881         on_disk_csum = sblock->pagev[0]->csum;
1882         page = sblock->pagev[0]->page;
1883         buffer = kmap_atomic(page);
1884
1885         len = sctx->sectorsize;
1886         index = 0;
1887         for (;;) {
1888                 u64 l = min_t(u64, len, PAGE_SIZE);
1889
1890                 crc = btrfs_csum_data(buffer, crc, l);
1891                 kunmap_atomic(buffer);
1892                 len -= l;
1893                 if (len == 0)
1894                         break;
1895                 index++;
1896                 BUG_ON(index >= sblock->page_count);
1897                 BUG_ON(!sblock->pagev[index]->page);
1898                 page = sblock->pagev[index]->page;
1899                 buffer = kmap_atomic(page);
1900         }
1901
1902         btrfs_csum_final(crc, csum);
1903         if (memcmp(csum, on_disk_csum, sctx->csum_size))
1904                 fail = 1;
1905
1906         return fail;
1907 }
1908
1909 static int scrub_checksum_tree_block(struct scrub_block *sblock)
1910 {
1911         struct scrub_ctx *sctx = sblock->sctx;
1912         struct btrfs_header *h;
1913         struct btrfs_root *root = sctx->dev_root;
1914         struct btrfs_fs_info *fs_info = root->fs_info;
1915         u8 calculated_csum[BTRFS_CSUM_SIZE];
1916         u8 on_disk_csum[BTRFS_CSUM_SIZE];
1917         struct page *page;
1918         void *mapped_buffer;
1919         u64 mapped_size;
1920         void *p;
1921         u32 crc = ~(u32)0;
1922         int fail = 0;
1923         int crc_fail = 0;
1924         u64 len;
1925         int index;
1926
1927         BUG_ON(sblock->page_count < 1);
1928         page = sblock->pagev[0]->page;
1929         mapped_buffer = kmap_atomic(page);
1930         h = (struct btrfs_header *)mapped_buffer;
1931         memcpy(on_disk_csum, h->csum, sctx->csum_size);
1932
1933         /*
1934          * we don't use the getter functions here, as we
1935          * a) don't have an extent buffer and
1936          * b) the page is already kmapped
1937          */
1938
1939         if (sblock->pagev[0]->logical != btrfs_stack_header_bytenr(h))
1940                 ++fail;
1941
1942         if (sblock->pagev[0]->generation != btrfs_stack_header_generation(h))
1943                 ++fail;
1944
1945         if (!scrub_check_fsid(h->fsid, sblock->pagev[0]))
1946                 ++fail;
1947
1948         if (memcmp(h->chunk_tree_uuid, fs_info->chunk_tree_uuid,
1949                    BTRFS_UUID_SIZE))
1950                 ++fail;
1951
1952         len = sctx->nodesize - BTRFS_CSUM_SIZE;
1953         mapped_size = PAGE_SIZE - BTRFS_CSUM_SIZE;
1954         p = ((u8 *)mapped_buffer) + BTRFS_CSUM_SIZE;
1955         index = 0;
1956         for (;;) {
1957                 u64 l = min_t(u64, len, mapped_size);
1958
1959                 crc = btrfs_csum_data(p, crc, l);
1960                 kunmap_atomic(mapped_buffer);
1961                 len -= l;
1962                 if (len == 0)
1963                         break;
1964                 index++;
1965                 BUG_ON(index >= sblock->page_count);
1966                 BUG_ON(!sblock->pagev[index]->page);
1967                 page = sblock->pagev[index]->page;
1968                 mapped_buffer = kmap_atomic(page);
1969                 mapped_size = PAGE_SIZE;
1970                 p = mapped_buffer;
1971         }
1972
1973         btrfs_csum_final(crc, calculated_csum);
1974         if (memcmp(calculated_csum, on_disk_csum, sctx->csum_size))
1975                 ++crc_fail;
1976
1977         return fail || crc_fail;
1978 }
1979
1980 static int scrub_checksum_super(struct scrub_block *sblock)
1981 {
1982         struct btrfs_super_block *s;
1983         struct scrub_ctx *sctx = sblock->sctx;
1984         u8 calculated_csum[BTRFS_CSUM_SIZE];
1985         u8 on_disk_csum[BTRFS_CSUM_SIZE];
1986         struct page *page;
1987         void *mapped_buffer;
1988         u64 mapped_size;
1989         void *p;
1990         u32 crc = ~(u32)0;
1991         int fail_gen = 0;
1992         int fail_cor = 0;
1993         u64 len;
1994         int index;
1995
1996         BUG_ON(sblock->page_count < 1);
1997         page = sblock->pagev[0]->page;
1998         mapped_buffer = kmap_atomic(page);
1999         s = (struct btrfs_super_block *)mapped_buffer;
2000         memcpy(on_disk_csum, s->csum, sctx->csum_size);
2001
2002         if (sblock->pagev[0]->logical != btrfs_super_bytenr(s))
2003                 ++fail_cor;
2004
2005         if (sblock->pagev[0]->generation != btrfs_super_generation(s))
2006                 ++fail_gen;
2007
2008         if (!scrub_check_fsid(s->fsid, sblock->pagev[0]))
2009                 ++fail_cor;
2010
2011         len = BTRFS_SUPER_INFO_SIZE - BTRFS_CSUM_SIZE;
2012         mapped_size = PAGE_SIZE - BTRFS_CSUM_SIZE;
2013         p = ((u8 *)mapped_buffer) + BTRFS_CSUM_SIZE;
2014         index = 0;
2015         for (;;) {
2016                 u64 l = min_t(u64, len, mapped_size);
2017
2018                 crc = btrfs_csum_data(p, crc, l);
2019                 kunmap_atomic(mapped_buffer);
2020                 len -= l;
2021                 if (len == 0)
2022                         break;
2023                 index++;
2024                 BUG_ON(index >= sblock->page_count);
2025                 BUG_ON(!sblock->pagev[index]->page);
2026                 page = sblock->pagev[index]->page;
2027                 mapped_buffer = kmap_atomic(page);
2028                 mapped_size = PAGE_SIZE;
2029                 p = mapped_buffer;
2030         }
2031
2032         btrfs_csum_final(crc, calculated_csum);
2033         if (memcmp(calculated_csum, on_disk_csum, sctx->csum_size))
2034                 ++fail_cor;
2035
2036         if (fail_cor + fail_gen) {
2037                 /*
2038                  * if we find an error in a super block, we just report it.
2039                  * They will get written with the next transaction commit
2040                  * anyway
2041                  */
2042                 spin_lock(&sctx->stat_lock);
2043                 ++sctx->stat.super_errors;
2044                 spin_unlock(&sctx->stat_lock);
2045                 if (fail_cor)
2046                         btrfs_dev_stat_inc_and_print(sblock->pagev[0]->dev,
2047                                 BTRFS_DEV_STAT_CORRUPTION_ERRS);
2048                 else
2049                         btrfs_dev_stat_inc_and_print(sblock->pagev[0]->dev,
2050                                 BTRFS_DEV_STAT_GENERATION_ERRS);
2051         }
2052
2053         return fail_cor + fail_gen;
2054 }
2055
2056 static void scrub_block_get(struct scrub_block *sblock)
2057 {
2058         atomic_inc(&sblock->refs);
2059 }
2060
2061 static void scrub_block_put(struct scrub_block *sblock)
2062 {
2063         if (atomic_dec_and_test(&sblock->refs)) {
2064                 int i;
2065
2066                 if (sblock->sparity)
2067                         scrub_parity_put(sblock->sparity);
2068
2069                 for (i = 0; i < sblock->page_count; i++)
2070                         scrub_page_put(sblock->pagev[i]);
2071                 kfree(sblock);
2072         }
2073 }
2074
2075 static void scrub_page_get(struct scrub_page *spage)
2076 {
2077         atomic_inc(&spage->refs);
2078 }
2079
2080 static void scrub_page_put(struct scrub_page *spage)
2081 {
2082         if (atomic_dec_and_test(&spage->refs)) {
2083                 if (spage->page)
2084                         __free_page(spage->page);
2085                 kfree(spage);
2086         }
2087 }
2088
2089 static void scrub_submit(struct scrub_ctx *sctx)
2090 {
2091         struct scrub_bio *sbio;
2092
2093         if (sctx->curr == -1)
2094                 return;
2095
2096         sbio = sctx->bios[sctx->curr];
2097         sctx->curr = -1;
2098         scrub_pending_bio_inc(sctx);
2099         btrfsic_submit_bio(READ, sbio->bio);
2100 }
2101
2102 static int scrub_add_page_to_rd_bio(struct scrub_ctx *sctx,
2103                                     struct scrub_page *spage)
2104 {
2105         struct scrub_block *sblock = spage->sblock;
2106         struct scrub_bio *sbio;
2107         int ret;
2108
2109 again:
2110         /*
2111          * grab a fresh bio or wait for one to become available
2112          */
2113         while (sctx->curr == -1) {
2114                 spin_lock(&sctx->list_lock);
2115                 sctx->curr = sctx->first_free;
2116                 if (sctx->curr != -1) {
2117                         sctx->first_free = sctx->bios[sctx->curr]->next_free;
2118                         sctx->bios[sctx->curr]->next_free = -1;
2119                         sctx->bios[sctx->curr]->page_count = 0;
2120                         spin_unlock(&sctx->list_lock);
2121                 } else {
2122                         spin_unlock(&sctx->list_lock);
2123                         wait_event(sctx->list_wait, sctx->first_free != -1);
2124                 }
2125         }
2126         sbio = sctx->bios[sctx->curr];
2127         if (sbio->page_count == 0) {
2128                 struct bio *bio;
2129
2130                 sbio->physical = spage->physical;
2131                 sbio->logical = spage->logical;
2132                 sbio->dev = spage->dev;
2133                 bio = sbio->bio;
2134                 if (!bio) {
2135                         bio = btrfs_io_bio_alloc(GFP_NOFS, sctx->pages_per_rd_bio);
2136                         if (!bio)
2137                                 return -ENOMEM;
2138                         sbio->bio = bio;
2139                 }
2140
2141                 bio->bi_private = sbio;
2142                 bio->bi_end_io = scrub_bio_end_io;
2143                 bio->bi_bdev = sbio->dev->bdev;
2144                 bio->bi_iter.bi_sector = sbio->physical >> 9;
2145                 sbio->err = 0;
2146         } else if (sbio->physical + sbio->page_count * PAGE_SIZE !=
2147                    spage->physical ||
2148                    sbio->logical + sbio->page_count * PAGE_SIZE !=
2149                    spage->logical ||
2150                    sbio->dev != spage->dev) {
2151                 scrub_submit(sctx);
2152                 goto again;
2153         }
2154
2155         sbio->pagev[sbio->page_count] = spage;
2156         ret = bio_add_page(sbio->bio, spage->page, PAGE_SIZE, 0);
2157         if (ret != PAGE_SIZE) {
2158                 if (sbio->page_count < 1) {
2159                         bio_put(sbio->bio);
2160                         sbio->bio = NULL;
2161                         return -EIO;
2162                 }
2163                 scrub_submit(sctx);
2164                 goto again;
2165         }
2166
2167         scrub_block_get(sblock); /* one for the page added to the bio */
2168         atomic_inc(&sblock->outstanding_pages);
2169         sbio->page_count++;
2170         if (sbio->page_count == sctx->pages_per_rd_bio)
2171                 scrub_submit(sctx);
2172
2173         return 0;
2174 }
2175
2176 static int scrub_pages(struct scrub_ctx *sctx, u64 logical, u64 len,
2177                        u64 physical, struct btrfs_device *dev, u64 flags,
2178                        u64 gen, int mirror_num, u8 *csum, int force,
2179                        u64 physical_for_dev_replace)
2180 {
2181         struct scrub_block *sblock;
2182         int index;
2183
2184         sblock = kzalloc(sizeof(*sblock), GFP_NOFS);
2185         if (!sblock) {
2186                 spin_lock(&sctx->stat_lock);
2187                 sctx->stat.malloc_errors++;
2188                 spin_unlock(&sctx->stat_lock);
2189                 return -ENOMEM;
2190         }
2191
2192         /* one ref inside this function, plus one for each page added to
2193          * a bio later on */
2194         atomic_set(&sblock->refs, 1);
2195         sblock->sctx = sctx;
2196         sblock->no_io_error_seen = 1;
2197
2198         for (index = 0; len > 0; index++) {
2199                 struct scrub_page *spage;
2200                 u64 l = min_t(u64, len, PAGE_SIZE);
2201
2202                 spage = kzalloc(sizeof(*spage), GFP_NOFS);
2203                 if (!spage) {
2204 leave_nomem:
2205                         spin_lock(&sctx->stat_lock);
2206                         sctx->stat.malloc_errors++;
2207                         spin_unlock(&sctx->stat_lock);
2208                         scrub_block_put(sblock);
2209                         return -ENOMEM;
2210                 }
2211                 BUG_ON(index >= SCRUB_MAX_PAGES_PER_BLOCK);
2212                 scrub_page_get(spage);
2213                 sblock->pagev[index] = spage;
2214                 spage->sblock = sblock;
2215                 spage->dev = dev;
2216                 spage->flags = flags;
2217                 spage->generation = gen;
2218                 spage->logical = logical;
2219                 spage->physical = physical;
2220                 spage->physical_for_dev_replace = physical_for_dev_replace;
2221                 spage->mirror_num = mirror_num;
2222                 if (csum) {
2223                         spage->have_csum = 1;
2224                         memcpy(spage->csum, csum, sctx->csum_size);
2225                 } else {
2226                         spage->have_csum = 0;
2227                 }
2228                 sblock->page_count++;
2229                 spage->page = alloc_page(GFP_NOFS);
2230                 if (!spage->page)
2231                         goto leave_nomem;
2232                 len -= l;
2233                 logical += l;
2234                 physical += l;
2235                 physical_for_dev_replace += l;
2236         }
2237
2238         WARN_ON(sblock->page_count == 0);
2239         for (index = 0; index < sblock->page_count; index++) {
2240                 struct scrub_page *spage = sblock->pagev[index];
2241                 int ret;
2242
2243                 ret = scrub_add_page_to_rd_bio(sctx, spage);
2244                 if (ret) {
2245                         scrub_block_put(sblock);
2246                         return ret;
2247                 }
2248         }
2249
2250         if (force)
2251                 scrub_submit(sctx);
2252
2253         /* last one frees, either here or in bio completion for last page */
2254         scrub_block_put(sblock);
2255         return 0;
2256 }
2257
2258 static void scrub_bio_end_io(struct bio *bio, int err)
2259 {
2260         struct scrub_bio *sbio = bio->bi_private;
2261         struct btrfs_fs_info *fs_info = sbio->dev->dev_root->fs_info;
2262
2263         sbio->err = err;
2264         sbio->bio = bio;
2265
2266         btrfs_queue_work(fs_info->scrub_workers, &sbio->work);
2267 }
2268
2269 static void scrub_bio_end_io_worker(struct btrfs_work *work)
2270 {
2271         struct scrub_bio *sbio = container_of(work, struct scrub_bio, work);
2272         struct scrub_ctx *sctx = sbio->sctx;
2273         int i;
2274
2275         BUG_ON(sbio->page_count > SCRUB_PAGES_PER_RD_BIO);
2276         if (sbio->err) {
2277                 for (i = 0; i < sbio->page_count; i++) {
2278                         struct scrub_page *spage = sbio->pagev[i];
2279
2280                         spage->io_error = 1;
2281                         spage->sblock->no_io_error_seen = 0;
2282                 }
2283         }
2284
2285         /* now complete the scrub_block items that have all pages completed */
2286         for (i = 0; i < sbio->page_count; i++) {
2287                 struct scrub_page *spage = sbio->pagev[i];
2288                 struct scrub_block *sblock = spage->sblock;
2289
2290                 if (atomic_dec_and_test(&sblock->outstanding_pages))
2291                         scrub_block_complete(sblock);
2292                 scrub_block_put(sblock);
2293         }
2294
2295         bio_put(sbio->bio);
2296         sbio->bio = NULL;
2297         spin_lock(&sctx->list_lock);
2298         sbio->next_free = sctx->first_free;
2299         sctx->first_free = sbio->index;
2300         spin_unlock(&sctx->list_lock);
2301
2302         if (sctx->is_dev_replace &&
2303             atomic_read(&sctx->wr_ctx.flush_all_writes)) {
2304                 mutex_lock(&sctx->wr_ctx.wr_lock);
2305                 scrub_wr_submit(sctx);
2306                 mutex_unlock(&sctx->wr_ctx.wr_lock);
2307         }
2308
2309         scrub_pending_bio_dec(sctx);
2310 }
2311
2312 static inline void __scrub_mark_bitmap(struct scrub_parity *sparity,
2313                                        unsigned long *bitmap,
2314                                        u64 start, u64 len)
2315 {
2316         u32 offset;
2317         int nsectors;
2318         int sectorsize = sparity->sctx->dev_root->sectorsize;
2319
2320         if (len >= sparity->stripe_len) {
2321                 bitmap_set(bitmap, 0, sparity->nsectors);
2322                 return;
2323         }
2324
2325         start -= sparity->logic_start;
2326         start = div_u64_rem(start, sparity->stripe_len, &offset);
2327         offset /= sectorsize;
2328         nsectors = (int)len / sectorsize;
2329
2330         if (offset + nsectors <= sparity->nsectors) {
2331                 bitmap_set(bitmap, offset, nsectors);
2332                 return;
2333         }
2334
2335         bitmap_set(bitmap, offset, sparity->nsectors - offset);
2336         bitmap_set(bitmap, 0, nsectors - (sparity->nsectors - offset));
2337 }
2338
2339 static inline void scrub_parity_mark_sectors_error(struct scrub_parity *sparity,
2340                                                    u64 start, u64 len)
2341 {
2342         __scrub_mark_bitmap(sparity, sparity->ebitmap, start, len);
2343 }
2344
2345 static inline void scrub_parity_mark_sectors_data(struct scrub_parity *sparity,
2346                                                   u64 start, u64 len)
2347 {
2348         __scrub_mark_bitmap(sparity, sparity->dbitmap, start, len);
2349 }
2350
2351 static void scrub_block_complete(struct scrub_block *sblock)
2352 {
2353         int corrupted = 0;
2354
2355         if (!sblock->no_io_error_seen) {
2356                 corrupted = 1;
2357                 scrub_handle_errored_block(sblock);
2358         } else {
2359                 /*
2360                  * if has checksum error, write via repair mechanism in
2361                  * dev replace case, otherwise write here in dev replace
2362                  * case.
2363                  */
2364                 corrupted = scrub_checksum(sblock);
2365                 if (!corrupted && sblock->sctx->is_dev_replace)
2366                         scrub_write_block_to_dev_replace(sblock);
2367         }
2368
2369         if (sblock->sparity && corrupted && !sblock->data_corrected) {
2370                 u64 start = sblock->pagev[0]->logical;
2371                 u64 end = sblock->pagev[sblock->page_count - 1]->logical +
2372                           PAGE_SIZE;
2373
2374                 scrub_parity_mark_sectors_error(sblock->sparity,
2375                                                 start, end - start);
2376         }
2377 }
2378
2379 static int scrub_find_csum(struct scrub_ctx *sctx, u64 logical, u64 len,
2380                            u8 *csum)
2381 {
2382         struct btrfs_ordered_sum *sum = NULL;
2383         unsigned long index;
2384         unsigned long num_sectors;
2385
2386         while (!list_empty(&sctx->csum_list)) {
2387                 sum = list_first_entry(&sctx->csum_list,
2388                                        struct btrfs_ordered_sum, list);
2389                 if (sum->bytenr > logical)
2390                         return 0;
2391                 if (sum->bytenr + sum->len > logical)
2392                         break;
2393
2394                 ++sctx->stat.csum_discards;
2395                 list_del(&sum->list);
2396                 kfree(sum);
2397                 sum = NULL;
2398         }
2399         if (!sum)
2400                 return 0;
2401
2402         index = ((u32)(logical - sum->bytenr)) / sctx->sectorsize;
2403         num_sectors = sum->len / sctx->sectorsize;
2404         memcpy(csum, sum->sums + index, sctx->csum_size);
2405         if (index == num_sectors - 1) {
2406                 list_del(&sum->list);
2407                 kfree(sum);
2408         }
2409         return 1;
2410 }
2411
2412 /* scrub extent tries to collect up to 64 kB for each bio */
2413 static int scrub_extent(struct scrub_ctx *sctx, u64 logical, u64 len,
2414                         u64 physical, struct btrfs_device *dev, u64 flags,
2415                         u64 gen, int mirror_num, u64 physical_for_dev_replace)
2416 {
2417         int ret;
2418         u8 csum[BTRFS_CSUM_SIZE];
2419         u32 blocksize;
2420
2421         if (flags & BTRFS_EXTENT_FLAG_DATA) {
2422                 blocksize = sctx->sectorsize;
2423                 spin_lock(&sctx->stat_lock);
2424                 sctx->stat.data_extents_scrubbed++;
2425                 sctx->stat.data_bytes_scrubbed += len;
2426                 spin_unlock(&sctx->stat_lock);
2427         } else if (flags & BTRFS_EXTENT_FLAG_TREE_BLOCK) {
2428                 blocksize = sctx->nodesize;
2429                 spin_lock(&sctx->stat_lock);
2430                 sctx->stat.tree_extents_scrubbed++;
2431                 sctx->stat.tree_bytes_scrubbed += len;
2432                 spin_unlock(&sctx->stat_lock);
2433         } else {
2434                 blocksize = sctx->sectorsize;
2435                 WARN_ON(1);
2436         }
2437
2438         while (len) {
2439                 u64 l = min_t(u64, len, blocksize);
2440                 int have_csum = 0;
2441
2442                 if (flags & BTRFS_EXTENT_FLAG_DATA) {
2443                         /* push csums to sbio */
2444                         have_csum = scrub_find_csum(sctx, logical, l, csum);
2445                         if (have_csum == 0)
2446                                 ++sctx->stat.no_csum;
2447                         if (sctx->is_dev_replace && !have_csum) {
2448                                 ret = copy_nocow_pages(sctx, logical, l,
2449                                                        mirror_num,
2450                                                       physical_for_dev_replace);
2451                                 goto behind_scrub_pages;
2452                         }
2453                 }
2454                 ret = scrub_pages(sctx, logical, l, physical, dev, flags, gen,
2455                                   mirror_num, have_csum ? csum : NULL, 0,
2456                                   physical_for_dev_replace);
2457 behind_scrub_pages:
2458                 if (ret)
2459                         return ret;
2460                 len -= l;
2461                 logical += l;
2462                 physical += l;
2463                 physical_for_dev_replace += l;
2464         }
2465         return 0;
2466 }
2467
2468 static int scrub_pages_for_parity(struct scrub_parity *sparity,
2469                                   u64 logical, u64 len,
2470                                   u64 physical, struct btrfs_device *dev,
2471                                   u64 flags, u64 gen, int mirror_num, u8 *csum)
2472 {
2473         struct scrub_ctx *sctx = sparity->sctx;
2474         struct scrub_block *sblock;
2475         int index;
2476
2477         sblock = kzalloc(sizeof(*sblock), GFP_NOFS);
2478         if (!sblock) {
2479                 spin_lock(&sctx->stat_lock);
2480                 sctx->stat.malloc_errors++;
2481                 spin_unlock(&sctx->stat_lock);
2482                 return -ENOMEM;
2483         }
2484
2485         /* one ref inside this function, plus one for each page added to
2486          * a bio later on */
2487         atomic_set(&sblock->refs, 1);
2488         sblock->sctx = sctx;
2489         sblock->no_io_error_seen = 1;
2490         sblock->sparity = sparity;
2491         scrub_parity_get(sparity);
2492
2493         for (index = 0; len > 0; index++) {
2494                 struct scrub_page *spage;
2495                 u64 l = min_t(u64, len, PAGE_SIZE);
2496
2497                 spage = kzalloc(sizeof(*spage), GFP_NOFS);
2498                 if (!spage) {
2499 leave_nomem:
2500                         spin_lock(&sctx->stat_lock);
2501                         sctx->stat.malloc_errors++;
2502                         spin_unlock(&sctx->stat_lock);
2503                         scrub_block_put(sblock);
2504                         return -ENOMEM;
2505                 }
2506                 BUG_ON(index >= SCRUB_MAX_PAGES_PER_BLOCK);
2507                 /* For scrub block */
2508                 scrub_page_get(spage);
2509                 sblock->pagev[index] = spage;
2510                 /* For scrub parity */
2511                 scrub_page_get(spage);
2512                 list_add_tail(&spage->list, &sparity->spages);
2513                 spage->sblock = sblock;
2514                 spage->dev = dev;
2515                 spage->flags = flags;
2516                 spage->generation = gen;
2517                 spage->logical = logical;
2518                 spage->physical = physical;
2519                 spage->mirror_num = mirror_num;
2520                 if (csum) {
2521                         spage->have_csum = 1;
2522                         memcpy(spage->csum, csum, sctx->csum_size);
2523                 } else {
2524                         spage->have_csum = 0;
2525                 }
2526                 sblock->page_count++;
2527                 spage->page = alloc_page(GFP_NOFS);
2528                 if (!spage->page)
2529                         goto leave_nomem;
2530                 len -= l;
2531                 logical += l;
2532                 physical += l;
2533         }
2534
2535         WARN_ON(sblock->page_count == 0);
2536         for (index = 0; index < sblock->page_count; index++) {
2537                 struct scrub_page *spage = sblock->pagev[index];
2538                 int ret;
2539
2540                 ret = scrub_add_page_to_rd_bio(sctx, spage);
2541                 if (ret) {
2542                         scrub_block_put(sblock);
2543                         return ret;
2544                 }
2545         }
2546
2547         /* last one frees, either here or in bio completion for last page */
2548         scrub_block_put(sblock);
2549         return 0;
2550 }
2551
2552 static int scrub_extent_for_parity(struct scrub_parity *sparity,
2553                                    u64 logical, u64 len,
2554                                    u64 physical, struct btrfs_device *dev,
2555                                    u64 flags, u64 gen, int mirror_num)
2556 {
2557         struct scrub_ctx *sctx = sparity->sctx;
2558         int ret;
2559         u8 csum[BTRFS_CSUM_SIZE];
2560         u32 blocksize;
2561
2562         if (flags & BTRFS_EXTENT_FLAG_DATA) {
2563                 blocksize = sctx->sectorsize;
2564         } else if (flags & BTRFS_EXTENT_FLAG_TREE_BLOCK) {
2565                 blocksize = sctx->nodesize;
2566         } else {
2567                 blocksize = sctx->sectorsize;
2568                 WARN_ON(1);
2569         }
2570
2571         while (len) {
2572                 u64 l = min_t(u64, len, blocksize);
2573                 int have_csum = 0;
2574
2575                 if (flags & BTRFS_EXTENT_FLAG_DATA) {
2576                         /* push csums to sbio */
2577                         have_csum = scrub_find_csum(sctx, logical, l, csum);
2578                         if (have_csum == 0)
2579                                 goto skip;
2580                 }
2581                 ret = scrub_pages_for_parity(sparity, logical, l, physical, dev,
2582                                              flags, gen, mirror_num,
2583                                              have_csum ? csum : NULL);
2584                 if (ret)
2585                         return ret;
2586 skip:
2587                 len -= l;
2588                 logical += l;
2589                 physical += l;
2590         }
2591         return 0;
2592 }
2593
2594 /*
2595  * Given a physical address, this will calculate it's
2596  * logical offset. if this is a parity stripe, it will return
2597  * the most left data stripe's logical offset.
2598  *
2599  * return 0 if it is a data stripe, 1 means parity stripe.
2600  */
2601 static int get_raid56_logic_offset(u64 physical, int num,
2602                                    struct map_lookup *map, u64 *offset,
2603                                    u64 *stripe_start)
2604 {
2605         int i;
2606         int j = 0;
2607         u64 stripe_nr;
2608         u64 last_offset;
2609         u32 stripe_index;
2610         u32 rot;
2611
2612         last_offset = (physical - map->stripes[num].physical) *
2613                       nr_data_stripes(map);
2614         if (stripe_start)
2615                 *stripe_start = last_offset;
2616
2617         *offset = last_offset;
2618         for (i = 0; i < nr_data_stripes(map); i++) {
2619                 *offset = last_offset + i * map->stripe_len;
2620
2621                 stripe_nr = div_u64(*offset, map->stripe_len);
2622                 stripe_nr = div_u64(stripe_nr, nr_data_stripes(map));
2623
2624                 /* Work out the disk rotation on this stripe-set */
2625                 stripe_nr = div_u64_rem(stripe_nr, map->num_stripes, &rot);
2626                 /* calculate which stripe this data locates */
2627                 rot += i;
2628                 stripe_index = rot % map->num_stripes;
2629                 if (stripe_index == num)
2630                         return 0;
2631                 if (stripe_index < num)
2632                         j++;
2633         }
2634         *offset = last_offset + j * map->stripe_len;
2635         return 1;
2636 }
2637
2638 static void scrub_free_parity(struct scrub_parity *sparity)
2639 {
2640         struct scrub_ctx *sctx = sparity->sctx;
2641         struct scrub_page *curr, *next;
2642         int nbits;
2643
2644         nbits = bitmap_weight(sparity->ebitmap, sparity->nsectors);
2645         if (nbits) {
2646                 spin_lock(&sctx->stat_lock);
2647                 sctx->stat.read_errors += nbits;
2648                 sctx->stat.uncorrectable_errors += nbits;
2649                 spin_unlock(&sctx->stat_lock);
2650         }
2651
2652         list_for_each_entry_safe(curr, next, &sparity->spages, list) {
2653                 list_del_init(&curr->list);
2654                 scrub_page_put(curr);
2655         }
2656
2657         kfree(sparity);
2658 }
2659
2660 static void scrub_parity_bio_endio_worker(struct btrfs_work *work)
2661 {
2662         struct scrub_parity *sparity = container_of(work, struct scrub_parity,
2663                                                     work);
2664         struct scrub_ctx *sctx = sparity->sctx;
2665
2666         scrub_free_parity(sparity);
2667         scrub_pending_bio_dec(sctx);
2668 }
2669
2670 static void scrub_parity_bio_endio(struct bio *bio, int error)
2671 {
2672         struct scrub_parity *sparity = (struct scrub_parity *)bio->bi_private;
2673
2674         if (error)
2675                 bitmap_or(sparity->ebitmap, sparity->ebitmap, sparity->dbitmap,
2676                           sparity->nsectors);
2677
2678         bio_put(bio);
2679
2680         btrfs_init_work(&sparity->work, btrfs_scrubparity_helper,
2681                         scrub_parity_bio_endio_worker, NULL, NULL);
2682         btrfs_queue_work(sparity->sctx->dev_root->fs_info->scrub_parity_workers,
2683                          &sparity->work);
2684 }
2685
2686 static void scrub_parity_check_and_repair(struct scrub_parity *sparity)
2687 {
2688         struct scrub_ctx *sctx = sparity->sctx;
2689         struct bio *bio;
2690         struct btrfs_raid_bio *rbio;
2691         struct scrub_page *spage;
2692         struct btrfs_bio *bbio = NULL;
2693         u64 length;
2694         int ret;
2695
2696         if (!bitmap_andnot(sparity->dbitmap, sparity->dbitmap, sparity->ebitmap,
2697                            sparity->nsectors))
2698                 goto out;
2699
2700         length = sparity->logic_end - sparity->logic_start;
2701         ret = btrfs_map_sblock(sctx->dev_root->fs_info, WRITE,
2702                                sparity->logic_start,
2703                                &length, &bbio, 0, 1);
2704         if (ret || !bbio || !bbio->raid_map)
2705                 goto bbio_out;
2706
2707         bio = btrfs_io_bio_alloc(GFP_NOFS, 0);
2708         if (!bio)
2709                 goto bbio_out;
2710
2711         bio->bi_iter.bi_sector = sparity->logic_start >> 9;
2712         bio->bi_private = sparity;
2713         bio->bi_end_io = scrub_parity_bio_endio;
2714
2715         rbio = raid56_parity_alloc_scrub_rbio(sctx->dev_root, bio, bbio,
2716                                               length, sparity->scrub_dev,
2717                                               sparity->dbitmap,
2718                                               sparity->nsectors);
2719         if (!rbio)
2720                 goto rbio_out;
2721
2722         list_for_each_entry(spage, &sparity->spages, list)
2723                 raid56_parity_add_scrub_pages(rbio, spage->page,
2724                                               spage->logical);
2725
2726         scrub_pending_bio_inc(sctx);
2727         raid56_parity_submit_scrub_rbio(rbio);
2728         return;
2729
2730 rbio_out:
2731         bio_put(bio);
2732 bbio_out:
2733         btrfs_put_bbio(bbio);
2734         bitmap_or(sparity->ebitmap, sparity->ebitmap, sparity->dbitmap,
2735                   sparity->nsectors);
2736         spin_lock(&sctx->stat_lock);
2737         sctx->stat.malloc_errors++;
2738         spin_unlock(&sctx->stat_lock);
2739 out:
2740         scrub_free_parity(sparity);
2741 }
2742
2743 static inline int scrub_calc_parity_bitmap_len(int nsectors)
2744 {
2745         return DIV_ROUND_UP(nsectors, BITS_PER_LONG) * (BITS_PER_LONG / 8);
2746 }
2747
2748 static void scrub_parity_get(struct scrub_parity *sparity)
2749 {
2750         atomic_inc(&sparity->refs);
2751 }
2752
2753 static void scrub_parity_put(struct scrub_parity *sparity)
2754 {
2755         if (!atomic_dec_and_test(&sparity->refs))
2756                 return;
2757
2758         scrub_parity_check_and_repair(sparity);
2759 }
2760
2761 static noinline_for_stack int scrub_raid56_parity(struct scrub_ctx *sctx,
2762                                                   struct map_lookup *map,
2763                                                   struct btrfs_device *sdev,
2764                                                   struct btrfs_path *path,
2765                                                   u64 logic_start,
2766                                                   u64 logic_end)
2767 {
2768         struct btrfs_fs_info *fs_info = sctx->dev_root->fs_info;
2769         struct btrfs_root *root = fs_info->extent_root;
2770         struct btrfs_root *csum_root = fs_info->csum_root;
2771         struct btrfs_extent_item *extent;
2772         u64 flags;
2773         int ret;
2774         int slot;
2775         struct extent_buffer *l;
2776         struct btrfs_key key;
2777         u64 generation;
2778         u64 extent_logical;
2779         u64 extent_physical;
2780         u64 extent_len;
2781         struct btrfs_device *extent_dev;
2782         struct scrub_parity *sparity;
2783         int nsectors;
2784         int bitmap_len;
2785         int extent_mirror_num;
2786         int stop_loop = 0;
2787
2788         nsectors = map->stripe_len / root->sectorsize;
2789         bitmap_len = scrub_calc_parity_bitmap_len(nsectors);
2790         sparity = kzalloc(sizeof(struct scrub_parity) + 2 * bitmap_len,
2791                           GFP_NOFS);
2792         if (!sparity) {
2793                 spin_lock(&sctx->stat_lock);
2794                 sctx->stat.malloc_errors++;
2795                 spin_unlock(&sctx->stat_lock);
2796                 return -ENOMEM;
2797         }
2798
2799         sparity->stripe_len = map->stripe_len;
2800         sparity->nsectors = nsectors;
2801         sparity->sctx = sctx;
2802         sparity->scrub_dev = sdev;
2803         sparity->logic_start = logic_start;
2804         sparity->logic_end = logic_end;
2805         atomic_set(&sparity->refs, 1);
2806         INIT_LIST_HEAD(&sparity->spages);
2807         sparity->dbitmap = sparity->bitmap;
2808         sparity->ebitmap = (void *)sparity->bitmap + bitmap_len;
2809
2810         ret = 0;
2811         while (logic_start < logic_end) {
2812                 if (btrfs_fs_incompat(fs_info, SKINNY_METADATA))
2813                         key.type = BTRFS_METADATA_ITEM_KEY;
2814                 else
2815                         key.type = BTRFS_EXTENT_ITEM_KEY;
2816                 key.objectid = logic_start;
2817                 key.offset = (u64)-1;
2818
2819                 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
2820                 if (ret < 0)
2821                         goto out;
2822
2823                 if (ret > 0) {
2824                         ret = btrfs_previous_extent_item(root, path, 0);
2825                         if (ret < 0)
2826                                 goto out;
2827                         if (ret > 0) {
2828                                 btrfs_release_path(path);
2829                                 ret = btrfs_search_slot(NULL, root, &key,
2830                                                         path, 0, 0);
2831                                 if (ret < 0)
2832                                         goto out;
2833                         }
2834                 }
2835
2836                 stop_loop = 0;
2837                 while (1) {
2838                         u64 bytes;
2839
2840                         l = path->nodes[0];
2841                         slot = path->slots[0];
2842                         if (slot >= btrfs_header_nritems(l)) {
2843                                 ret = btrfs_next_leaf(root, path);
2844                                 if (ret == 0)
2845                                         continue;
2846                                 if (ret < 0)
2847                                         goto out;
2848
2849                                 stop_loop = 1;
2850                                 break;
2851                         }
2852                         btrfs_item_key_to_cpu(l, &key, slot);
2853
2854                         if (key.type != BTRFS_EXTENT_ITEM_KEY &&
2855                             key.type != BTRFS_METADATA_ITEM_KEY)
2856                                 goto next;
2857
2858                         if (key.type == BTRFS_METADATA_ITEM_KEY)
2859                                 bytes = root->nodesize;
2860                         else
2861                                 bytes = key.offset;
2862
2863                         if (key.objectid + bytes <= logic_start)
2864                                 goto next;
2865
2866                         if (key.objectid >= logic_end) {
2867                                 stop_loop = 1;
2868                                 break;
2869                         }
2870
2871                         while (key.objectid >= logic_start + map->stripe_len)
2872                                 logic_start += map->stripe_len;
2873
2874                         extent = btrfs_item_ptr(l, slot,
2875                                                 struct btrfs_extent_item);
2876                         flags = btrfs_extent_flags(l, extent);
2877                         generation = btrfs_extent_generation(l, extent);
2878
2879                         if ((flags & BTRFS_EXTENT_FLAG_TREE_BLOCK) &&
2880                             (key.objectid < logic_start ||
2881                              key.objectid + bytes >
2882                              logic_start + map->stripe_len)) {
2883                                 btrfs_err(fs_info, "scrub: tree block %llu spanning stripes, ignored. logical=%llu",
2884                                           key.objectid, logic_start);
2885                                 goto next;
2886                         }
2887 again:
2888                         extent_logical = key.objectid;
2889                         extent_len = bytes;
2890
2891                         if (extent_logical < logic_start) {
2892                                 extent_len -= logic_start - extent_logical;
2893                                 extent_logical = logic_start;
2894                         }
2895
2896                         if (extent_logical + extent_len >
2897                             logic_start + map->stripe_len)
2898                                 extent_len = logic_start + map->stripe_len -
2899                                              extent_logical;
2900
2901                         scrub_parity_mark_sectors_data(sparity, extent_logical,
2902                                                        extent_len);
2903
2904                         scrub_remap_extent(fs_info, extent_logical,
2905                                            extent_len, &extent_physical,
2906                                            &extent_dev,
2907                                            &extent_mirror_num);
2908
2909                         ret = btrfs_lookup_csums_range(csum_root,
2910                                                 extent_logical,
2911                                                 extent_logical + extent_len - 1,
2912                                                 &sctx->csum_list, 1);
2913                         if (ret)
2914                                 goto out;
2915
2916                         ret = scrub_extent_for_parity(sparity, extent_logical,
2917                                                       extent_len,
2918                                                       extent_physical,
2919                                                       extent_dev, flags,
2920                                                       generation,
2921                                                       extent_mirror_num);
2922
2923                         scrub_free_csums(sctx);
2924
2925                         if (ret)
2926                                 goto out;
2927
2928                         if (extent_logical + extent_len <
2929                             key.objectid + bytes) {
2930                                 logic_start += map->stripe_len;
2931
2932                                 if (logic_start >= logic_end) {
2933                                         stop_loop = 1;
2934                                         break;
2935                                 }
2936
2937                                 if (logic_start < key.objectid + bytes) {
2938                                         cond_resched();
2939                                         goto again;
2940                                 }
2941                         }
2942 next:
2943                         path->slots[0]++;
2944                 }
2945
2946                 btrfs_release_path(path);
2947
2948                 if (stop_loop)
2949                         break;
2950
2951                 logic_start += map->stripe_len;
2952         }
2953 out:
2954         if (ret < 0)
2955                 scrub_parity_mark_sectors_error(sparity, logic_start,
2956                                                 logic_end - logic_start);
2957         scrub_parity_put(sparity);
2958         scrub_submit(sctx);
2959         mutex_lock(&sctx->wr_ctx.wr_lock);
2960         scrub_wr_submit(sctx);
2961         mutex_unlock(&sctx->wr_ctx.wr_lock);
2962
2963         btrfs_release_path(path);
2964         return ret < 0 ? ret : 0;
2965 }
2966
2967 static noinline_for_stack int scrub_stripe(struct scrub_ctx *sctx,
2968                                            struct map_lookup *map,
2969                                            struct btrfs_device *scrub_dev,
2970                                            int num, u64 base, u64 length,
2971                                            int is_dev_replace)
2972 {
2973         struct btrfs_path *path, *ppath;
2974         struct btrfs_fs_info *fs_info = sctx->dev_root->fs_info;
2975         struct btrfs_root *root = fs_info->extent_root;
2976         struct btrfs_root *csum_root = fs_info->csum_root;
2977         struct btrfs_extent_item *extent;
2978         struct blk_plug plug;
2979         u64 flags;
2980         int ret;
2981         int slot;
2982         u64 nstripes;
2983         struct extent_buffer *l;
2984         struct btrfs_key key;
2985         u64 physical;
2986         u64 logical;
2987         u64 logic_end;
2988         u64 physical_end;
2989         u64 generation;
2990         int mirror_num;
2991         struct reada_control *reada1;
2992         struct reada_control *reada2;
2993         struct btrfs_key key_start;
2994         struct btrfs_key key_end;
2995         u64 increment = map->stripe_len;
2996         u64 offset;
2997         u64 extent_logical;
2998         u64 extent_physical;
2999         u64 extent_len;
3000         u64 stripe_logical;
3001         u64 stripe_end;
3002         struct btrfs_device *extent_dev;
3003         int extent_mirror_num;
3004         int stop_loop = 0;
3005
3006         physical = map->stripes[num].physical;
3007         offset = 0;
3008         nstripes = div_u64(length, map->stripe_len);
3009         if (map->type & BTRFS_BLOCK_GROUP_RAID0) {
3010                 offset = map->stripe_len * num;
3011                 increment = map->stripe_len * map->num_stripes;
3012                 mirror_num = 1;
3013         } else if (map->type & BTRFS_BLOCK_GROUP_RAID10) {
3014                 int factor = map->num_stripes / map->sub_stripes;
3015                 offset = map->stripe_len * (num / map->sub_stripes);
3016                 increment = map->stripe_len * factor;
3017                 mirror_num = num % map->sub_stripes + 1;
3018         } else if (map->type & BTRFS_BLOCK_GROUP_RAID1) {
3019                 increment = map->stripe_len;
3020                 mirror_num = num % map->num_stripes + 1;
3021         } else if (map->type & BTRFS_BLOCK_GROUP_DUP) {
3022                 increment = map->stripe_len;
3023                 mirror_num = num % map->num_stripes + 1;
3024         } else if (map->type & BTRFS_BLOCK_GROUP_RAID56_MASK) {
3025                 get_raid56_logic_offset(physical, num, map, &offset, NULL);
3026                 increment = map->stripe_len * nr_data_stripes(map);
3027                 mirror_num = 1;
3028         } else {
3029                 increment = map->stripe_len;
3030                 mirror_num = 1;
3031         }
3032
3033         path = btrfs_alloc_path();
3034         if (!path)
3035                 return -ENOMEM;
3036
3037         ppath = btrfs_alloc_path();
3038         if (!ppath) {
3039                 btrfs_free_path(path);
3040                 return -ENOMEM;
3041         }
3042
3043         /*
3044          * work on commit root. The related disk blocks are static as
3045          * long as COW is applied. This means, it is save to rewrite
3046          * them to repair disk errors without any race conditions
3047          */
3048         path->search_commit_root = 1;
3049         path->skip_locking = 1;
3050
3051         ppath->search_commit_root = 1;
3052         ppath->skip_locking = 1;
3053         /*
3054          * trigger the readahead for extent tree csum tree and wait for
3055          * completion. During readahead, the scrub is officially paused
3056          * to not hold off transaction commits
3057          */
3058         logical = base + offset;
3059         physical_end = physical + nstripes * map->stripe_len;
3060         if (map->type & BTRFS_BLOCK_GROUP_RAID56_MASK) {
3061                 get_raid56_logic_offset(physical_end, num,
3062                                         map, &logic_end, NULL);
3063                 logic_end += base;
3064         } else {
3065                 logic_end = logical + increment * nstripes;
3066         }
3067         wait_event(sctx->list_wait,
3068                    atomic_read(&sctx->bios_in_flight) == 0);
3069         scrub_blocked_if_needed(fs_info);
3070
3071         /* FIXME it might be better to start readahead at commit root */
3072         key_start.objectid = logical;
3073         key_start.type = BTRFS_EXTENT_ITEM_KEY;
3074         key_start.offset = (u64)0;
3075         key_end.objectid = logic_end;
3076         key_end.type = BTRFS_METADATA_ITEM_KEY;
3077         key_end.offset = (u64)-1;
3078         reada1 = btrfs_reada_add(root, &key_start, &key_end);
3079
3080         key_start.objectid = BTRFS_EXTENT_CSUM_OBJECTID;
3081         key_start.type = BTRFS_EXTENT_CSUM_KEY;
3082         key_start.offset = logical;
3083         key_end.objectid = BTRFS_EXTENT_CSUM_OBJECTID;
3084         key_end.type = BTRFS_EXTENT_CSUM_KEY;
3085         key_end.offset = logic_end;
3086         reada2 = btrfs_reada_add(csum_root, &key_start, &key_end);
3087
3088         if (!IS_ERR(reada1))
3089                 btrfs_reada_wait(reada1);
3090         if (!IS_ERR(reada2))
3091                 btrfs_reada_wait(reada2);
3092
3093
3094         /*
3095          * collect all data csums for the stripe to avoid seeking during
3096          * the scrub. This might currently (crc32) end up to be about 1MB
3097          */
3098         blk_start_plug(&plug);
3099
3100         /*
3101          * now find all extents for each stripe and scrub them
3102          */
3103         ret = 0;
3104         while (physical < physical_end) {
3105                 /*
3106                  * canceled?
3107                  */
3108                 if (atomic_read(&fs_info->scrub_cancel_req) ||
3109                     atomic_read(&sctx->cancel_req)) {
3110                         ret = -ECANCELED;
3111                         goto out;
3112                 }
3113                 /*
3114                  * check to see if we have to pause
3115                  */
3116                 if (atomic_read(&fs_info->scrub_pause_req)) {
3117                         /* push queued extents */
3118                         atomic_set(&sctx->wr_ctx.flush_all_writes, 1);
3119                         scrub_submit(sctx);
3120                         mutex_lock(&sctx->wr_ctx.wr_lock);
3121                         scrub_wr_submit(sctx);
3122                         mutex_unlock(&sctx->wr_ctx.wr_lock);
3123                         wait_event(sctx->list_wait,
3124                                    atomic_read(&sctx->bios_in_flight) == 0);
3125                         atomic_set(&sctx->wr_ctx.flush_all_writes, 0);
3126                         scrub_blocked_if_needed(fs_info);
3127                 }
3128
3129                 /* for raid56, we skip parity stripe */
3130                 if (map->type & BTRFS_BLOCK_GROUP_RAID56_MASK) {
3131                         ret = get_raid56_logic_offset(physical, num, map,
3132                                                       &logical,
3133                                                       &stripe_logical);
3134                         logical += base;
3135                         if (ret) {
3136                                 stripe_logical += base;
3137                                 stripe_end = stripe_logical + increment;
3138                                 ret = scrub_raid56_parity(sctx, map, scrub_dev,
3139                                                           ppath, stripe_logical,
3140                                                           stripe_end);
3141                                 if (ret)
3142                                         goto out;
3143                                 goto skip;
3144                         }
3145                 }
3146
3147                 if (btrfs_fs_incompat(fs_info, SKINNY_METADATA))
3148                         key.type = BTRFS_METADATA_ITEM_KEY;
3149                 else
3150                         key.type = BTRFS_EXTENT_ITEM_KEY;
3151                 key.objectid = logical;
3152                 key.offset = (u64)-1;
3153
3154                 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
3155                 if (ret < 0)
3156                         goto out;
3157
3158                 if (ret > 0) {
3159                         ret = btrfs_previous_extent_item(root, path, 0);
3160                         if (ret < 0)
3161                                 goto out;
3162                         if (ret > 0) {
3163                                 /* there's no smaller item, so stick with the
3164                                  * larger one */
3165                                 btrfs_release_path(path);
3166                                 ret = btrfs_search_slot(NULL, root, &key,
3167                                                         path, 0, 0);
3168                                 if (ret < 0)
3169                                         goto out;
3170                         }
3171                 }
3172
3173                 stop_loop = 0;
3174                 while (1) {
3175                         u64 bytes;
3176
3177                         l = path->nodes[0];
3178                         slot = path->slots[0];
3179                         if (slot >= btrfs_header_nritems(l)) {
3180                                 ret = btrfs_next_leaf(root, path);
3181                                 if (ret == 0)
3182                                         continue;
3183                                 if (ret < 0)
3184                                         goto out;
3185
3186                                 stop_loop = 1;
3187                                 break;
3188                         }
3189                         btrfs_item_key_to_cpu(l, &key, slot);
3190
3191                         if (key.type != BTRFS_EXTENT_ITEM_KEY &&
3192                             key.type != BTRFS_METADATA_ITEM_KEY)
3193                                 goto next;
3194
3195                         if (key.type == BTRFS_METADATA_ITEM_KEY)
3196                                 bytes = root->nodesize;
3197                         else
3198                                 bytes = key.offset;
3199
3200                         if (key.objectid + bytes <= logical)
3201                                 goto next;
3202
3203                         if (key.objectid >= logical + map->stripe_len) {
3204                                 /* out of this device extent */
3205                                 if (key.objectid >= logic_end)
3206                                         stop_loop = 1;
3207                                 break;
3208                         }
3209
3210                         extent = btrfs_item_ptr(l, slot,
3211                                                 struct btrfs_extent_item);
3212                         flags = btrfs_extent_flags(l, extent);
3213                         generation = btrfs_extent_generation(l, extent);
3214
3215                         if ((flags & BTRFS_EXTENT_FLAG_TREE_BLOCK) &&
3216                             (key.objectid < logical ||
3217                              key.objectid + bytes >
3218                              logical + map->stripe_len)) {
3219                                 btrfs_err(fs_info,
3220                                            "scrub: tree block %llu spanning "
3221                                            "stripes, ignored. logical=%llu",
3222                                        key.objectid, logical);
3223                                 goto next;
3224                         }
3225
3226 again:
3227                         extent_logical = key.objectid;
3228                         extent_len = bytes;
3229
3230                         /*
3231                          * trim extent to this stripe
3232                          */
3233                         if (extent_logical < logical) {
3234                                 extent_len -= logical - extent_logical;
3235                                 extent_logical = logical;
3236                         }
3237                         if (extent_logical + extent_len >
3238                             logical + map->stripe_len) {
3239                                 extent_len = logical + map->stripe_len -
3240                                              extent_logical;
3241                         }
3242
3243                         extent_physical = extent_logical - logical + physical;
3244                         extent_dev = scrub_dev;
3245                         extent_mirror_num = mirror_num;
3246                         if (is_dev_replace)
3247                                 scrub_remap_extent(fs_info, extent_logical,
3248                                                    extent_len, &extent_physical,
3249                                                    &extent_dev,
3250                                                    &extent_mirror_num);
3251
3252                         ret = btrfs_lookup_csums_range(csum_root,
3253                                                        extent_logical,
3254                                                        extent_logical +
3255                                                        extent_len - 1,
3256                                                        &sctx->csum_list, 1);
3257                         if (ret)
3258                                 goto out;
3259
3260                         ret = scrub_extent(sctx, extent_logical, extent_len,
3261                                            extent_physical, extent_dev, flags,
3262                                            generation, extent_mirror_num,
3263                                            extent_logical - logical + physical);
3264
3265                         scrub_free_csums(sctx);
3266
3267                         if (ret)
3268                                 goto out;
3269
3270                         if (extent_logical + extent_len <
3271                             key.objectid + bytes) {
3272                                 if (map->type & BTRFS_BLOCK_GROUP_RAID56_MASK) {
3273                                         /*
3274                                          * loop until we find next data stripe
3275                                          * or we have finished all stripes.
3276                                          */
3277 loop:
3278                                         physical += map->stripe_len;
3279                                         ret = get_raid56_logic_offset(physical,
3280                                                         num, map, &logical,
3281                                                         &stripe_logical);
3282                                         logical += base;
3283
3284                                         if (ret && physical < physical_end) {
3285                                                 stripe_logical += base;
3286                                                 stripe_end = stripe_logical +
3287                                                                 increment;
3288                                                 ret = scrub_raid56_parity(sctx,
3289                                                         map, scrub_dev, ppath,
3290                                                         stripe_logical,
3291                                                         stripe_end);
3292                                                 if (ret)
3293                                                         goto out;
3294                                                 goto loop;
3295                                         }
3296                                 } else {
3297                                         physical += map->stripe_len;
3298                                         logical += increment;
3299                                 }
3300                                 if (logical < key.objectid + bytes) {
3301                                         cond_resched();
3302                                         goto again;
3303                                 }
3304
3305                                 if (physical >= physical_end) {
3306                                         stop_loop = 1;
3307                                         break;
3308                                 }
3309                         }
3310 next:
3311                         path->slots[0]++;
3312                 }
3313                 btrfs_release_path(path);
3314 skip:
3315                 logical += increment;
3316                 physical += map->stripe_len;
3317                 spin_lock(&sctx->stat_lock);
3318                 if (stop_loop)
3319                         sctx->stat.last_physical = map->stripes[num].physical +
3320                                                    length;
3321                 else
3322                         sctx->stat.last_physical = physical;
3323                 spin_unlock(&sctx->stat_lock);
3324                 if (stop_loop)
3325                         break;
3326         }
3327 out:
3328         /* push queued extents */
3329         scrub_submit(sctx);
3330         mutex_lock(&sctx->wr_ctx.wr_lock);
3331         scrub_wr_submit(sctx);
3332         mutex_unlock(&sctx->wr_ctx.wr_lock);
3333
3334         blk_finish_plug(&plug);
3335         btrfs_free_path(path);
3336         btrfs_free_path(ppath);
3337         return ret < 0 ? ret : 0;
3338 }
3339
3340 static noinline_for_stack int scrub_chunk(struct scrub_ctx *sctx,
3341                                           struct btrfs_device *scrub_dev,
3342                                           u64 chunk_tree, u64 chunk_objectid,
3343                                           u64 chunk_offset, u64 length,
3344                                           u64 dev_offset, int is_dev_replace)
3345 {
3346         struct btrfs_mapping_tree *map_tree =
3347                 &sctx->dev_root->fs_info->mapping_tree;
3348         struct map_lookup *map;
3349         struct extent_map *em;
3350         int i;
3351         int ret = 0;
3352
3353         read_lock(&map_tree->map_tree.lock);
3354         em = lookup_extent_mapping(&map_tree->map_tree, chunk_offset, 1);
3355         read_unlock(&map_tree->map_tree.lock);
3356
3357         if (!em)
3358                 return -EINVAL;
3359
3360         map = (struct map_lookup *)em->bdev;
3361         if (em->start != chunk_offset)
3362                 goto out;
3363
3364         if (em->len < length)
3365                 goto out;
3366
3367         for (i = 0; i < map->num_stripes; ++i) {
3368                 if (map->stripes[i].dev->bdev == scrub_dev->bdev &&
3369                     map->stripes[i].physical == dev_offset) {
3370                         ret = scrub_stripe(sctx, map, scrub_dev, i,
3371                                            chunk_offset, length,
3372                                            is_dev_replace);
3373                         if (ret)
3374                                 goto out;
3375                 }
3376         }
3377 out:
3378         free_extent_map(em);
3379
3380         return ret;
3381 }
3382
3383 static noinline_for_stack
3384 int scrub_enumerate_chunks(struct scrub_ctx *sctx,
3385                            struct btrfs_device *scrub_dev, u64 start, u64 end,
3386                            int is_dev_replace)
3387 {
3388         struct btrfs_dev_extent *dev_extent = NULL;
3389         struct btrfs_path *path;
3390         struct btrfs_root *root = sctx->dev_root;
3391         struct btrfs_fs_info *fs_info = root->fs_info;
3392         u64 length;
3393         u64 chunk_tree;
3394         u64 chunk_objectid;
3395         u64 chunk_offset;
3396         int ret = 0;
3397         int slot;
3398         struct extent_buffer *l;
3399         struct btrfs_key key;
3400         struct btrfs_key found_key;
3401         struct btrfs_block_group_cache *cache;
3402         struct btrfs_dev_replace *dev_replace = &fs_info->dev_replace;
3403
3404         path = btrfs_alloc_path();
3405         if (!path)
3406                 return -ENOMEM;
3407
3408         path->reada = 2;
3409         path->search_commit_root = 1;
3410         path->skip_locking = 1;
3411
3412         key.objectid = scrub_dev->devid;
3413         key.offset = 0ull;
3414         key.type = BTRFS_DEV_EXTENT_KEY;
3415
3416         while (1) {
3417                 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
3418                 if (ret < 0)
3419                         break;
3420                 if (ret > 0) {
3421                         if (path->slots[0] >=
3422                             btrfs_header_nritems(path->nodes[0])) {
3423                                 ret = btrfs_next_leaf(root, path);
3424                                 if (ret < 0)
3425                                         break;
3426                                 if (ret > 0) {
3427                                         ret = 0;
3428                                         break;
3429                                 }
3430                         } else {
3431                                 ret = 0;
3432                         }
3433                 }
3434
3435                 l = path->nodes[0];
3436                 slot = path->slots[0];
3437
3438                 btrfs_item_key_to_cpu(l, &found_key, slot);
3439
3440                 if (found_key.objectid != scrub_dev->devid)
3441                         break;
3442
3443                 if (found_key.type != BTRFS_DEV_EXTENT_KEY)
3444                         break;
3445
3446                 if (found_key.offset >= end)
3447                         break;
3448
3449                 if (found_key.offset < key.offset)
3450                         break;
3451
3452                 dev_extent = btrfs_item_ptr(l, slot, struct btrfs_dev_extent);
3453                 length = btrfs_dev_extent_length(l, dev_extent);
3454
3455                 if (found_key.offset + length <= start)
3456                         goto skip;
3457
3458                 chunk_tree = btrfs_dev_extent_chunk_tree(l, dev_extent);
3459                 chunk_objectid = btrfs_dev_extent_chunk_objectid(l, dev_extent);
3460                 chunk_offset = btrfs_dev_extent_chunk_offset(l, dev_extent);
3461
3462                 /*
3463                  * get a reference on the corresponding block group to prevent
3464                  * the chunk from going away while we scrub it
3465                  */
3466                 cache = btrfs_lookup_block_group(fs_info, chunk_offset);
3467
3468                 /* some chunks are removed but not committed to disk yet,
3469                  * continue scrubbing */
3470                 if (!cache)
3471                         goto skip;
3472
3473                 /*
3474                  * we need call btrfs_inc_block_group_ro() with scrubs_paused,
3475                  * to avoid deadlock caused by:
3476                  * btrfs_inc_block_group_ro()
3477                  * -> btrfs_wait_for_commit()
3478                  * -> btrfs_commit_transaction()
3479                  * -> btrfs_scrub_pause()
3480                  */
3481                 scrub_pause_on(fs_info);
3482                 ret = btrfs_inc_block_group_ro(root, cache);
3483                 scrub_pause_off(fs_info);
3484                 if (ret) {
3485                         btrfs_put_block_group(cache);
3486                         break;
3487                 }
3488
3489                 dev_replace->cursor_right = found_key.offset + length;
3490                 dev_replace->cursor_left = found_key.offset;
3491                 dev_replace->item_needs_writeback = 1;
3492                 ret = scrub_chunk(sctx, scrub_dev, chunk_tree, chunk_objectid,
3493                                   chunk_offset, length, found_key.offset,
3494                                   is_dev_replace);
3495
3496                 /*
3497                  * flush, submit all pending read and write bios, afterwards
3498                  * wait for them.
3499                  * Note that in the dev replace case, a read request causes
3500                  * write requests that are submitted in the read completion
3501                  * worker. Therefore in the current situation, it is required
3502                  * that all write requests are flushed, so that all read and
3503                  * write requests are really completed when bios_in_flight
3504                  * changes to 0.
3505                  */
3506                 atomic_set(&sctx->wr_ctx.flush_all_writes, 1);
3507                 scrub_submit(sctx);
3508                 mutex_lock(&sctx->wr_ctx.wr_lock);
3509                 scrub_wr_submit(sctx);
3510                 mutex_unlock(&sctx->wr_ctx.wr_lock);
3511
3512                 wait_event(sctx->list_wait,
3513                            atomic_read(&sctx->bios_in_flight) == 0);
3514
3515                 scrub_pause_on(fs_info);
3516
3517                 /*
3518                  * must be called before we decrease @scrub_paused.
3519                  * make sure we don't block transaction commit while
3520                  * we are waiting pending workers finished.
3521                  */
3522                 wait_event(sctx->list_wait,
3523                            atomic_read(&sctx->workers_pending) == 0);
3524                 atomic_set(&sctx->wr_ctx.flush_all_writes, 0);
3525
3526                 scrub_pause_off(fs_info);
3527
3528                 btrfs_dec_block_group_ro(root, cache);
3529
3530                 btrfs_put_block_group(cache);
3531                 if (ret)
3532                         break;
3533                 if (is_dev_replace &&
3534                     atomic64_read(&dev_replace->num_write_errors) > 0) {
3535                         ret = -EIO;
3536                         break;
3537                 }
3538                 if (sctx->stat.malloc_errors > 0) {
3539                         ret = -ENOMEM;
3540                         break;
3541                 }
3542
3543                 dev_replace->cursor_left = dev_replace->cursor_right;
3544                 dev_replace->item_needs_writeback = 1;
3545 skip:
3546                 key.offset = found_key.offset + length;
3547                 btrfs_release_path(path);
3548         }
3549
3550         btrfs_free_path(path);
3551
3552         return ret;
3553 }
3554
3555 static noinline_for_stack int scrub_supers(struct scrub_ctx *sctx,
3556                                            struct btrfs_device *scrub_dev)
3557 {
3558         int     i;
3559         u64     bytenr;
3560         u64     gen;
3561         int     ret;
3562         struct btrfs_root *root = sctx->dev_root;
3563
3564         if (test_bit(BTRFS_FS_STATE_ERROR, &root->fs_info->fs_state))
3565                 return -EIO;
3566
3567         /* Seed devices of a new filesystem has their own generation. */
3568         if (scrub_dev->fs_devices != root->fs_info->fs_devices)
3569                 gen = scrub_dev->generation;
3570         else
3571                 gen = root->fs_info->last_trans_committed;
3572
3573         for (i = 0; i < BTRFS_SUPER_MIRROR_MAX; i++) {
3574                 bytenr = btrfs_sb_offset(i);
3575                 if (bytenr + BTRFS_SUPER_INFO_SIZE >
3576                     scrub_dev->commit_total_bytes)
3577                         break;
3578
3579                 ret = scrub_pages(sctx, bytenr, BTRFS_SUPER_INFO_SIZE, bytenr,
3580                                   scrub_dev, BTRFS_EXTENT_FLAG_SUPER, gen, i,
3581                                   NULL, 1, bytenr);
3582                 if (ret)
3583                         return ret;
3584         }
3585         wait_event(sctx->list_wait, atomic_read(&sctx->bios_in_flight) == 0);
3586
3587         return 0;
3588 }
3589
3590 /*
3591  * get a reference count on fs_info->scrub_workers. start worker if necessary
3592  */
3593 static noinline_for_stack int scrub_workers_get(struct btrfs_fs_info *fs_info,
3594                                                 int is_dev_replace)
3595 {
3596         unsigned int flags = WQ_FREEZABLE | WQ_UNBOUND;
3597         int max_active = fs_info->thread_pool_size;
3598
3599         if (fs_info->scrub_workers_refcnt == 0) {
3600                 if (is_dev_replace)
3601                         fs_info->scrub_workers =
3602                                 btrfs_alloc_workqueue("btrfs-scrub", flags,
3603                                                       1, 4);
3604                 else
3605                         fs_info->scrub_workers =
3606                                 btrfs_alloc_workqueue("btrfs-scrub", flags,
3607                                                       max_active, 4);
3608                 if (!fs_info->scrub_workers)
3609                         goto fail_scrub_workers;
3610
3611                 fs_info->scrub_wr_completion_workers =
3612                         btrfs_alloc_workqueue("btrfs-scrubwrc", flags,
3613                                               max_active, 2);
3614                 if (!fs_info->scrub_wr_completion_workers)
3615                         goto fail_scrub_wr_completion_workers;
3616
3617                 fs_info->scrub_nocow_workers =
3618                         btrfs_alloc_workqueue("btrfs-scrubnc", flags, 1, 0);
3619                 if (!fs_info->scrub_nocow_workers)
3620                         goto fail_scrub_nocow_workers;
3621                 fs_info->scrub_parity_workers =
3622                         btrfs_alloc_workqueue("btrfs-scrubparity", flags,
3623                                               max_active, 2);
3624                 if (!fs_info->scrub_parity_workers)
3625                         goto fail_scrub_parity_workers;
3626         }
3627         ++fs_info->scrub_workers_refcnt;
3628         return 0;
3629
3630 fail_scrub_parity_workers:
3631         btrfs_destroy_workqueue(fs_info->scrub_nocow_workers);
3632 fail_scrub_nocow_workers:
3633         btrfs_destroy_workqueue(fs_info->scrub_wr_completion_workers);
3634 fail_scrub_wr_completion_workers:
3635         btrfs_destroy_workqueue(fs_info->scrub_workers);
3636 fail_scrub_workers:
3637         return -ENOMEM;
3638 }
3639
3640 static noinline_for_stack void scrub_workers_put(struct btrfs_fs_info *fs_info)
3641 {
3642         if (--fs_info->scrub_workers_refcnt == 0) {
3643                 btrfs_destroy_workqueue(fs_info->scrub_workers);
3644                 btrfs_destroy_workqueue(fs_info->scrub_wr_completion_workers);
3645                 btrfs_destroy_workqueue(fs_info->scrub_nocow_workers);
3646                 btrfs_destroy_workqueue(fs_info->scrub_parity_workers);
3647         }
3648         WARN_ON(fs_info->scrub_workers_refcnt < 0);
3649 }
3650
3651 int btrfs_scrub_dev(struct btrfs_fs_info *fs_info, u64 devid, u64 start,
3652                     u64 end, struct btrfs_scrub_progress *progress,
3653                     int readonly, int is_dev_replace)
3654 {
3655         struct scrub_ctx *sctx;
3656         int ret;
3657         struct btrfs_device *dev;
3658         struct rcu_string *name;
3659
3660         if (btrfs_fs_closing(fs_info))
3661                 return -EINVAL;
3662
3663         if (fs_info->chunk_root->nodesize > BTRFS_STRIPE_LEN) {
3664                 /*
3665                  * in this case scrub is unable to calculate the checksum
3666                  * the way scrub is implemented. Do not handle this
3667                  * situation at all because it won't ever happen.
3668                  */
3669                 btrfs_err(fs_info,
3670                            "scrub: size assumption nodesize <= BTRFS_STRIPE_LEN (%d <= %d) fails",
3671                        fs_info->chunk_root->nodesize, BTRFS_STRIPE_LEN);
3672                 return -EINVAL;
3673         }
3674
3675         if (fs_info->chunk_root->sectorsize != PAGE_SIZE) {
3676                 /* not supported for data w/o checksums */
3677                 btrfs_err(fs_info,
3678                            "scrub: size assumption sectorsize != PAGE_SIZE "
3679                            "(%d != %lu) fails",
3680                        fs_info->chunk_root->sectorsize, PAGE_SIZE);
3681                 return -EINVAL;
3682         }
3683
3684         if (fs_info->chunk_root->nodesize >
3685             PAGE_SIZE * SCRUB_MAX_PAGES_PER_BLOCK ||
3686             fs_info->chunk_root->sectorsize >
3687             PAGE_SIZE * SCRUB_MAX_PAGES_PER_BLOCK) {
3688                 /*
3689                  * would exhaust the array bounds of pagev member in
3690                  * struct scrub_block
3691                  */
3692                 btrfs_err(fs_info, "scrub: size assumption nodesize and sectorsize "
3693                            "<= SCRUB_MAX_PAGES_PER_BLOCK (%d <= %d && %d <= %d) fails",
3694                        fs_info->chunk_root->nodesize,
3695                        SCRUB_MAX_PAGES_PER_BLOCK,
3696                        fs_info->chunk_root->sectorsize,
3697                        SCRUB_MAX_PAGES_PER_BLOCK);
3698                 return -EINVAL;
3699         }
3700
3701
3702         mutex_lock(&fs_info->fs_devices->device_list_mutex);
3703         dev = btrfs_find_device(fs_info, devid, NULL, NULL);
3704         if (!dev || (dev->missing && !is_dev_replace)) {
3705                 mutex_unlock(&fs_info->fs_devices->device_list_mutex);
3706                 return -ENODEV;
3707         }
3708
3709         if (!is_dev_replace && !readonly && !dev->writeable) {
3710                 mutex_unlock(&fs_info->fs_devices->device_list_mutex);
3711                 rcu_read_lock();
3712                 name = rcu_dereference(dev->name);
3713                 btrfs_err(fs_info, "scrub: device %s is not writable",
3714                           name->str);
3715                 rcu_read_unlock();
3716                 return -EROFS;
3717         }
3718
3719         mutex_lock(&fs_info->scrub_lock);
3720         if (!dev->in_fs_metadata || dev->is_tgtdev_for_dev_replace) {
3721                 mutex_unlock(&fs_info->scrub_lock);
3722                 mutex_unlock(&fs_info->fs_devices->device_list_mutex);
3723                 return -EIO;
3724         }
3725
3726         btrfs_dev_replace_lock(&fs_info->dev_replace);
3727         if (dev->scrub_device ||
3728             (!is_dev_replace &&
3729              btrfs_dev_replace_is_ongoing(&fs_info->dev_replace))) {
3730                 btrfs_dev_replace_unlock(&fs_info->dev_replace);
3731                 mutex_unlock(&fs_info->scrub_lock);
3732                 mutex_unlock(&fs_info->fs_devices->device_list_mutex);
3733                 return -EINPROGRESS;
3734         }
3735         btrfs_dev_replace_unlock(&fs_info->dev_replace);
3736
3737         ret = scrub_workers_get(fs_info, is_dev_replace);
3738         if (ret) {
3739                 mutex_unlock(&fs_info->scrub_lock);
3740                 mutex_unlock(&fs_info->fs_devices->device_list_mutex);
3741                 return ret;
3742         }
3743
3744         sctx = scrub_setup_ctx(dev, is_dev_replace);
3745         if (IS_ERR(sctx)) {
3746                 mutex_unlock(&fs_info->scrub_lock);
3747                 mutex_unlock(&fs_info->fs_devices->device_list_mutex);
3748                 scrub_workers_put(fs_info);
3749                 return PTR_ERR(sctx);
3750         }
3751         sctx->readonly = readonly;
3752         dev->scrub_device = sctx;
3753         mutex_unlock(&fs_info->fs_devices->device_list_mutex);
3754
3755         /*
3756          * checking @scrub_pause_req here, we can avoid
3757          * race between committing transaction and scrubbing.
3758          */
3759         __scrub_blocked_if_needed(fs_info);
3760         atomic_inc(&fs_info->scrubs_running);
3761         mutex_unlock(&fs_info->scrub_lock);
3762
3763         if (!is_dev_replace) {
3764                 /*
3765                  * by holding device list mutex, we can
3766                  * kick off writing super in log tree sync.
3767                  */
3768                 mutex_lock(&fs_info->fs_devices->device_list_mutex);
3769                 ret = scrub_supers(sctx, dev);
3770                 mutex_unlock(&fs_info->fs_devices->device_list_mutex);
3771         }
3772
3773         if (!ret)
3774                 ret = scrub_enumerate_chunks(sctx, dev, start, end,
3775                                              is_dev_replace);
3776
3777         wait_event(sctx->list_wait, atomic_read(&sctx->bios_in_flight) == 0);
3778         atomic_dec(&fs_info->scrubs_running);
3779         wake_up(&fs_info->scrub_pause_wait);
3780
3781         wait_event(sctx->list_wait, atomic_read(&sctx->workers_pending) == 0);
3782
3783         if (progress)
3784                 memcpy(progress, &sctx->stat, sizeof(*progress));
3785
3786         mutex_lock(&fs_info->scrub_lock);
3787         dev->scrub_device = NULL;
3788         scrub_workers_put(fs_info);
3789         mutex_unlock(&fs_info->scrub_lock);
3790
3791         scrub_put_ctx(sctx);
3792
3793         return ret;
3794 }
3795
3796 void btrfs_scrub_pause(struct btrfs_root *root)
3797 {
3798         struct btrfs_fs_info *fs_info = root->fs_info;
3799
3800         mutex_lock(&fs_info->scrub_lock);
3801         atomic_inc(&fs_info->scrub_pause_req);
3802         while (atomic_read(&fs_info->scrubs_paused) !=
3803                atomic_read(&fs_info->scrubs_running)) {
3804                 mutex_unlock(&fs_info->scrub_lock);
3805                 wait_event(fs_info->scrub_pause_wait,
3806                            atomic_read(&fs_info->scrubs_paused) ==
3807                            atomic_read(&fs_info->scrubs_running));
3808                 mutex_lock(&fs_info->scrub_lock);
3809         }
3810         mutex_unlock(&fs_info->scrub_lock);
3811 }
3812
3813 void btrfs_scrub_continue(struct btrfs_root *root)
3814 {
3815         struct btrfs_fs_info *fs_info = root->fs_info;
3816
3817         atomic_dec(&fs_info->scrub_pause_req);
3818         wake_up(&fs_info->scrub_pause_wait);
3819 }
3820
3821 int btrfs_scrub_cancel(struct btrfs_fs_info *fs_info)
3822 {
3823         mutex_lock(&fs_info->scrub_lock);
3824         if (!atomic_read(&fs_info->scrubs_running)) {
3825                 mutex_unlock(&fs_info->scrub_lock);
3826                 return -ENOTCONN;
3827         }
3828
3829         atomic_inc(&fs_info->scrub_cancel_req);
3830         while (atomic_read(&fs_info->scrubs_running)) {
3831                 mutex_unlock(&fs_info->scrub_lock);
3832                 wait_event(fs_info->scrub_pause_wait,
3833                            atomic_read(&fs_info->scrubs_running) == 0);
3834                 mutex_lock(&fs_info->scrub_lock);
3835         }
3836         atomic_dec(&fs_info->scrub_cancel_req);
3837         mutex_unlock(&fs_info->scrub_lock);
3838
3839         return 0;
3840 }
3841
3842 int btrfs_scrub_cancel_dev(struct btrfs_fs_info *fs_info,
3843                            struct btrfs_device *dev)
3844 {
3845         struct scrub_ctx *sctx;
3846
3847         mutex_lock(&fs_info->scrub_lock);
3848         sctx = dev->scrub_device;
3849         if (!sctx) {
3850                 mutex_unlock(&fs_info->scrub_lock);
3851                 return -ENOTCONN;
3852         }
3853         atomic_inc(&sctx->cancel_req);
3854         while (dev->scrub_device) {
3855                 mutex_unlock(&fs_info->scrub_lock);
3856                 wait_event(fs_info->scrub_pause_wait,
3857                            dev->scrub_device == NULL);
3858                 mutex_lock(&fs_info->scrub_lock);
3859         }
3860         mutex_unlock(&fs_info->scrub_lock);
3861
3862         return 0;
3863 }
3864
3865 int btrfs_scrub_progress(struct btrfs_root *root, u64 devid,
3866                          struct btrfs_scrub_progress *progress)
3867 {
3868         struct btrfs_device *dev;
3869         struct scrub_ctx *sctx = NULL;
3870
3871         mutex_lock(&root->fs_info->fs_devices->device_list_mutex);
3872         dev = btrfs_find_device(root->fs_info, devid, NULL, NULL);
3873         if (dev)
3874                 sctx = dev->scrub_device;
3875         if (sctx)
3876                 memcpy(progress, &sctx->stat, sizeof(*progress));
3877         mutex_unlock(&root->fs_info->fs_devices->device_list_mutex);
3878
3879         return dev ? (sctx ? 0 : -ENOTCONN) : -ENODEV;
3880 }
3881
3882 static void scrub_remap_extent(struct btrfs_fs_info *fs_info,
3883                                u64 extent_logical, u64 extent_len,
3884                                u64 *extent_physical,
3885                                struct btrfs_device **extent_dev,
3886                                int *extent_mirror_num)
3887 {
3888         u64 mapped_length;
3889         struct btrfs_bio *bbio = NULL;
3890         int ret;
3891
3892         mapped_length = extent_len;
3893         ret = btrfs_map_block(fs_info, READ, extent_logical,
3894                               &mapped_length, &bbio, 0);
3895         if (ret || !bbio || mapped_length < extent_len ||
3896             !bbio->stripes[0].dev->bdev) {
3897                 btrfs_put_bbio(bbio);
3898                 return;
3899         }
3900
3901         *extent_physical = bbio->stripes[0].physical;
3902         *extent_mirror_num = bbio->mirror_num;
3903         *extent_dev = bbio->stripes[0].dev;
3904         btrfs_put_bbio(bbio);
3905 }
3906
3907 static int scrub_setup_wr_ctx(struct scrub_ctx *sctx,
3908                               struct scrub_wr_ctx *wr_ctx,
3909                               struct btrfs_fs_info *fs_info,
3910                               struct btrfs_device *dev,
3911                               int is_dev_replace)
3912 {
3913         WARN_ON(wr_ctx->wr_curr_bio != NULL);
3914
3915         mutex_init(&wr_ctx->wr_lock);
3916         wr_ctx->wr_curr_bio = NULL;
3917         if (!is_dev_replace)
3918                 return 0;
3919
3920         WARN_ON(!dev->bdev);
3921         wr_ctx->pages_per_wr_bio = min_t(int, SCRUB_PAGES_PER_WR_BIO,
3922                                          bio_get_nr_vecs(dev->bdev));
3923         wr_ctx->tgtdev = dev;
3924         atomic_set(&wr_ctx->flush_all_writes, 0);
3925         return 0;
3926 }
3927
3928 static void scrub_free_wr_ctx(struct scrub_wr_ctx *wr_ctx)
3929 {
3930         mutex_lock(&wr_ctx->wr_lock);
3931         kfree(wr_ctx->wr_curr_bio);
3932         wr_ctx->wr_curr_bio = NULL;
3933         mutex_unlock(&wr_ctx->wr_lock);
3934 }
3935
3936 static int copy_nocow_pages(struct scrub_ctx *sctx, u64 logical, u64 len,
3937                             int mirror_num, u64 physical_for_dev_replace)
3938 {
3939         struct scrub_copy_nocow_ctx *nocow_ctx;
3940         struct btrfs_fs_info *fs_info = sctx->dev_root->fs_info;
3941
3942         nocow_ctx = kzalloc(sizeof(*nocow_ctx), GFP_NOFS);
3943         if (!nocow_ctx) {
3944                 spin_lock(&sctx->stat_lock);
3945                 sctx->stat.malloc_errors++;
3946                 spin_unlock(&sctx->stat_lock);
3947                 return -ENOMEM;
3948         }
3949
3950         scrub_pending_trans_workers_inc(sctx);
3951
3952         nocow_ctx->sctx = sctx;
3953         nocow_ctx->logical = logical;
3954         nocow_ctx->len = len;
3955         nocow_ctx->mirror_num = mirror_num;
3956         nocow_ctx->physical_for_dev_replace = physical_for_dev_replace;
3957         btrfs_init_work(&nocow_ctx->work, btrfs_scrubnc_helper,
3958                         copy_nocow_pages_worker, NULL, NULL);
3959         INIT_LIST_HEAD(&nocow_ctx->inodes);
3960         btrfs_queue_work(fs_info->scrub_nocow_workers,
3961                          &nocow_ctx->work);
3962
3963         return 0;
3964 }
3965
3966 static int record_inode_for_nocow(u64 inum, u64 offset, u64 root, void *ctx)
3967 {
3968         struct scrub_copy_nocow_ctx *nocow_ctx = ctx;
3969         struct scrub_nocow_inode *nocow_inode;
3970
3971         nocow_inode = kzalloc(sizeof(*nocow_inode), GFP_NOFS);
3972         if (!nocow_inode)
3973                 return -ENOMEM;
3974         nocow_inode->inum = inum;
3975         nocow_inode->offset = offset;
3976         nocow_inode->root = root;
3977         list_add_tail(&nocow_inode->list, &nocow_ctx->inodes);
3978         return 0;
3979 }
3980
3981 #define COPY_COMPLETE 1
3982
3983 static void copy_nocow_pages_worker(struct btrfs_work *work)
3984 {
3985         struct scrub_copy_nocow_ctx *nocow_ctx =
3986                 container_of(work, struct scrub_copy_nocow_ctx, work);
3987         struct scrub_ctx *sctx = nocow_ctx->sctx;
3988         u64 logical = nocow_ctx->logical;
3989         u64 len = nocow_ctx->len;
3990         int mirror_num = nocow_ctx->mirror_num;
3991         u64 physical_for_dev_replace = nocow_ctx->physical_for_dev_replace;
3992         int ret;
3993         struct btrfs_trans_handle *trans = NULL;
3994         struct btrfs_fs_info *fs_info;
3995         struct btrfs_path *path;
3996         struct btrfs_root *root;
3997         int not_written = 0;
3998
3999         fs_info = sctx->dev_root->fs_info;
4000         root = fs_info->extent_root;
4001
4002         path = btrfs_alloc_path();
4003         if (!path) {
4004                 spin_lock(&sctx->stat_lock);
4005                 sctx->stat.malloc_errors++;
4006                 spin_unlock(&sctx->stat_lock);
4007                 not_written = 1;
4008                 goto out;
4009         }
4010
4011         trans = btrfs_join_transaction(root);
4012         if (IS_ERR(trans)) {
4013                 not_written = 1;
4014                 goto out;
4015         }
4016
4017         ret = iterate_inodes_from_logical(logical, fs_info, path,
4018                                           record_inode_for_nocow, nocow_ctx);
4019         if (ret != 0 && ret != -ENOENT) {
4020                 btrfs_warn(fs_info, "iterate_inodes_from_logical() failed: log %llu, "
4021                         "phys %llu, len %llu, mir %u, ret %d",
4022                         logical, physical_for_dev_replace, len, mirror_num,
4023                         ret);
4024                 not_written = 1;
4025                 goto out;
4026         }
4027
4028         btrfs_end_transaction(trans, root);
4029         trans = NULL;
4030         while (!list_empty(&nocow_ctx->inodes)) {
4031                 struct scrub_nocow_inode *entry;
4032                 entry = list_first_entry(&nocow_ctx->inodes,
4033                                          struct scrub_nocow_inode,
4034                                          list);
4035                 list_del_init(&entry->list);
4036                 ret = copy_nocow_pages_for_inode(entry->inum, entry->offset,
4037                                                  entry->root, nocow_ctx);
4038                 kfree(entry);
4039                 if (ret == COPY_COMPLETE) {
4040                         ret = 0;
4041                         break;
4042                 } else if (ret) {
4043                         break;
4044                 }
4045         }
4046 out:
4047         while (!list_empty(&nocow_ctx->inodes)) {
4048                 struct scrub_nocow_inode *entry;
4049                 entry = list_first_entry(&nocow_ctx->inodes,
4050                                          struct scrub_nocow_inode,
4051                                          list);
4052                 list_del_init(&entry->list);
4053                 kfree(entry);
4054         }
4055         if (trans && !IS_ERR(trans))
4056                 btrfs_end_transaction(trans, root);
4057         if (not_written)
4058                 btrfs_dev_replace_stats_inc(&fs_info->dev_replace.
4059                                             num_uncorrectable_read_errors);
4060
4061         btrfs_free_path(path);
4062         kfree(nocow_ctx);
4063
4064         scrub_pending_trans_workers_dec(sctx);
4065 }
4066
4067 static int check_extent_to_block(struct inode *inode, u64 start, u64 len,
4068                                  u64 logical)
4069 {
4070         struct extent_state *cached_state = NULL;
4071         struct btrfs_ordered_extent *ordered;
4072         struct extent_io_tree *io_tree;
4073         struct extent_map *em;
4074         u64 lockstart = start, lockend = start + len - 1;
4075         int ret = 0;
4076
4077         io_tree = &BTRFS_I(inode)->io_tree;
4078
4079         lock_extent_bits(io_tree, lockstart, lockend, 0, &cached_state);
4080         ordered = btrfs_lookup_ordered_range(inode, lockstart, len);
4081         if (ordered) {
4082                 btrfs_put_ordered_extent(ordered);
4083                 ret = 1;
4084                 goto out_unlock;
4085         }
4086
4087         em = btrfs_get_extent(inode, NULL, 0, start, len, 0);
4088         if (IS_ERR(em)) {
4089                 ret = PTR_ERR(em);
4090                 goto out_unlock;
4091         }
4092
4093         /*
4094          * This extent does not actually cover the logical extent anymore,
4095          * move on to the next inode.
4096          */
4097         if (em->block_start > logical ||
4098             em->block_start + em->block_len < logical + len) {
4099                 free_extent_map(em);
4100                 ret = 1;
4101                 goto out_unlock;
4102         }
4103         free_extent_map(em);
4104
4105 out_unlock:
4106         unlock_extent_cached(io_tree, lockstart, lockend, &cached_state,
4107                              GFP_NOFS);
4108         return ret;
4109 }
4110
4111 static int copy_nocow_pages_for_inode(u64 inum, u64 offset, u64 root,
4112                                       struct scrub_copy_nocow_ctx *nocow_ctx)
4113 {
4114         struct btrfs_fs_info *fs_info = nocow_ctx->sctx->dev_root->fs_info;
4115         struct btrfs_key key;
4116         struct inode *inode;
4117         struct page *page;
4118         struct btrfs_root *local_root;
4119         struct extent_io_tree *io_tree;
4120         u64 physical_for_dev_replace;
4121         u64 nocow_ctx_logical;
4122         u64 len = nocow_ctx->len;
4123         unsigned long index;
4124         int srcu_index;
4125         int ret = 0;
4126         int err = 0;
4127
4128         key.objectid = root;
4129         key.type = BTRFS_ROOT_ITEM_KEY;
4130         key.offset = (u64)-1;
4131
4132         srcu_index = srcu_read_lock(&fs_info->subvol_srcu);
4133
4134         local_root = btrfs_read_fs_root_no_name(fs_info, &key);
4135         if (IS_ERR(local_root)) {
4136                 srcu_read_unlock(&fs_info->subvol_srcu, srcu_index);
4137                 return PTR_ERR(local_root);
4138         }
4139
4140         key.type = BTRFS_INODE_ITEM_KEY;
4141         key.objectid = inum;
4142         key.offset = 0;
4143         inode = btrfs_iget(fs_info->sb, &key, local_root, NULL);
4144         srcu_read_unlock(&fs_info->subvol_srcu, srcu_index);
4145         if (IS_ERR(inode))
4146                 return PTR_ERR(inode);
4147
4148         /* Avoid truncate/dio/punch hole.. */
4149         mutex_lock(&inode->i_mutex);
4150         inode_dio_wait(inode);
4151
4152         physical_for_dev_replace = nocow_ctx->physical_for_dev_replace;
4153         io_tree = &BTRFS_I(inode)->io_tree;
4154         nocow_ctx_logical = nocow_ctx->logical;
4155
4156         ret = check_extent_to_block(inode, offset, len, nocow_ctx_logical);
4157         if (ret) {
4158                 ret = ret > 0 ? 0 : ret;
4159                 goto out;
4160         }
4161
4162         while (len >= PAGE_CACHE_SIZE) {
4163                 index = offset >> PAGE_CACHE_SHIFT;
4164 again:
4165                 page = find_or_create_page(inode->i_mapping, index, GFP_NOFS);
4166                 if (!page) {
4167                         btrfs_err(fs_info, "find_or_create_page() failed");
4168                         ret = -ENOMEM;
4169                         goto out;
4170                 }
4171
4172                 if (PageUptodate(page)) {
4173                         if (PageDirty(page))
4174                                 goto next_page;
4175                 } else {
4176                         ClearPageError(page);
4177                         err = extent_read_full_page(io_tree, page,
4178                                                            btrfs_get_extent,
4179                                                            nocow_ctx->mirror_num);
4180                         if (err) {
4181                                 ret = err;
4182                                 goto next_page;
4183                         }
4184
4185                         lock_page(page);
4186                         /*
4187                          * If the page has been remove from the page cache,
4188                          * the data on it is meaningless, because it may be
4189                          * old one, the new data may be written into the new
4190                          * page in the page cache.
4191                          */
4192                         if (page->mapping != inode->i_mapping) {
4193                                 unlock_page(page);
4194                                 page_cache_release(page);
4195                                 goto again;
4196                         }
4197                         if (!PageUptodate(page)) {
4198                                 ret = -EIO;
4199                                 goto next_page;
4200                         }
4201                 }
4202
4203                 ret = check_extent_to_block(inode, offset, len,
4204                                             nocow_ctx_logical);
4205                 if (ret) {
4206                         ret = ret > 0 ? 0 : ret;
4207                         goto next_page;
4208                 }
4209
4210                 err = write_page_nocow(nocow_ctx->sctx,
4211                                        physical_for_dev_replace, page);
4212                 if (err)
4213                         ret = err;
4214 next_page:
4215                 unlock_page(page);
4216                 page_cache_release(page);
4217
4218                 if (ret)
4219                         break;
4220
4221                 offset += PAGE_CACHE_SIZE;
4222                 physical_for_dev_replace += PAGE_CACHE_SIZE;
4223                 nocow_ctx_logical += PAGE_CACHE_SIZE;
4224                 len -= PAGE_CACHE_SIZE;
4225         }
4226         ret = COPY_COMPLETE;
4227 out:
4228         mutex_unlock(&inode->i_mutex);
4229         iput(inode);
4230         return ret;
4231 }
4232
4233 static int write_page_nocow(struct scrub_ctx *sctx,
4234                             u64 physical_for_dev_replace, struct page *page)
4235 {
4236         struct bio *bio;
4237         struct btrfs_device *dev;
4238         int ret;
4239
4240         dev = sctx->wr_ctx.tgtdev;
4241         if (!dev)
4242                 return -EIO;
4243         if (!dev->bdev) {
4244                 printk_ratelimited(KERN_WARNING
4245                         "BTRFS: scrub write_page_nocow(bdev == NULL) is unexpected!\n");
4246                 return -EIO;
4247         }
4248         bio = btrfs_io_bio_alloc(GFP_NOFS, 1);
4249         if (!bio) {
4250                 spin_lock(&sctx->stat_lock);
4251                 sctx->stat.malloc_errors++;
4252                 spin_unlock(&sctx->stat_lock);
4253                 return -ENOMEM;
4254         }
4255         bio->bi_iter.bi_size = 0;
4256         bio->bi_iter.bi_sector = physical_for_dev_replace >> 9;
4257         bio->bi_bdev = dev->bdev;
4258         ret = bio_add_page(bio, page, PAGE_CACHE_SIZE, 0);
4259         if (ret != PAGE_CACHE_SIZE) {
4260 leave_with_eio:
4261                 bio_put(bio);
4262                 btrfs_dev_stat_inc_and_print(dev, BTRFS_DEV_STAT_WRITE_ERRS);
4263                 return -EIO;
4264         }
4265
4266         if (btrfsic_submit_bio_wait(WRITE_SYNC, bio))
4267                 goto leave_with_eio;
4268
4269         bio_put(bio);
4270         return 0;
4271 }