]> git.karo-electronics.de Git - karo-tx-linux.git/blob - fs/btrfs/scrub.c
Btrfs: update scrub_parity to use u64 stripe_len
[karo-tx-linux.git] / fs / btrfs / scrub.c
1 /*
2  * Copyright (C) 2011, 2012 STRATO.  All rights reserved.
3  *
4  * This program is free software; you can redistribute it and/or
5  * modify it under the terms of the GNU General Public
6  * License v2 as published by the Free Software Foundation.
7  *
8  * This program is distributed in the hope that it will be useful,
9  * but WITHOUT ANY WARRANTY; without even the implied warranty of
10  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
11  * General Public License for more details.
12  *
13  * You should have received a copy of the GNU General Public
14  * License along with this program; if not, write to the
15  * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
16  * Boston, MA 021110-1307, USA.
17  */
18
19 #include <linux/blkdev.h>
20 #include <linux/ratelimit.h>
21 #include "ctree.h"
22 #include "volumes.h"
23 #include "disk-io.h"
24 #include "ordered-data.h"
25 #include "transaction.h"
26 #include "backref.h"
27 #include "extent_io.h"
28 #include "dev-replace.h"
29 #include "check-integrity.h"
30 #include "rcu-string.h"
31 #include "raid56.h"
32
33 /*
34  * This is only the first step towards a full-features scrub. It reads all
35  * extent and super block and verifies the checksums. In case a bad checksum
36  * is found or the extent cannot be read, good data will be written back if
37  * any can be found.
38  *
39  * Future enhancements:
40  *  - In case an unrepairable extent is encountered, track which files are
41  *    affected and report them
42  *  - track and record media errors, throw out bad devices
43  *  - add a mode to also read unallocated space
44  */
45
46 struct scrub_block;
47 struct scrub_ctx;
48
49 /*
50  * the following three values only influence the performance.
51  * The last one configures the number of parallel and outstanding I/O
52  * operations. The first two values configure an upper limit for the number
53  * of (dynamically allocated) pages that are added to a bio.
54  */
55 #define SCRUB_PAGES_PER_RD_BIO  32      /* 128k per bio */
56 #define SCRUB_PAGES_PER_WR_BIO  32      /* 128k per bio */
57 #define SCRUB_BIOS_PER_SCTX     64      /* 8MB per device in flight */
58
59 /*
60  * the following value times PAGE_SIZE needs to be large enough to match the
61  * largest node/leaf/sector size that shall be supported.
62  * Values larger than BTRFS_STRIPE_LEN are not supported.
63  */
64 #define SCRUB_MAX_PAGES_PER_BLOCK       16      /* 64k per node/leaf/sector */
65
66 struct scrub_recover {
67         refcount_t              refs;
68         struct btrfs_bio        *bbio;
69         u64                     map_length;
70 };
71
72 struct scrub_page {
73         struct scrub_block      *sblock;
74         struct page             *page;
75         struct btrfs_device     *dev;
76         struct list_head        list;
77         u64                     flags;  /* extent flags */
78         u64                     generation;
79         u64                     logical;
80         u64                     physical;
81         u64                     physical_for_dev_replace;
82         atomic_t                refs;
83         struct {
84                 unsigned int    mirror_num:8;
85                 unsigned int    have_csum:1;
86                 unsigned int    io_error:1;
87         };
88         u8                      csum[BTRFS_CSUM_SIZE];
89
90         struct scrub_recover    *recover;
91 };
92
93 struct scrub_bio {
94         int                     index;
95         struct scrub_ctx        *sctx;
96         struct btrfs_device     *dev;
97         struct bio              *bio;
98         int                     err;
99         u64                     logical;
100         u64                     physical;
101 #if SCRUB_PAGES_PER_WR_BIO >= SCRUB_PAGES_PER_RD_BIO
102         struct scrub_page       *pagev[SCRUB_PAGES_PER_WR_BIO];
103 #else
104         struct scrub_page       *pagev[SCRUB_PAGES_PER_RD_BIO];
105 #endif
106         int                     page_count;
107         int                     next_free;
108         struct btrfs_work       work;
109 };
110
111 struct scrub_block {
112         struct scrub_page       *pagev[SCRUB_MAX_PAGES_PER_BLOCK];
113         int                     page_count;
114         atomic_t                outstanding_pages;
115         refcount_t              refs; /* free mem on transition to zero */
116         struct scrub_ctx        *sctx;
117         struct scrub_parity     *sparity;
118         struct {
119                 unsigned int    header_error:1;
120                 unsigned int    checksum_error:1;
121                 unsigned int    no_io_error_seen:1;
122                 unsigned int    generation_error:1; /* also sets header_error */
123
124                 /* The following is for the data used to check parity */
125                 /* It is for the data with checksum */
126                 unsigned int    data_corrected:1;
127         };
128         struct btrfs_work       work;
129 };
130
131 /* Used for the chunks with parity stripe such RAID5/6 */
132 struct scrub_parity {
133         struct scrub_ctx        *sctx;
134
135         struct btrfs_device     *scrub_dev;
136
137         u64                     logic_start;
138
139         u64                     logic_end;
140
141         int                     nsectors;
142
143         u64                     stripe_len;
144
145         refcount_t              refs;
146
147         struct list_head        spages;
148
149         /* Work of parity check and repair */
150         struct btrfs_work       work;
151
152         /* Mark the parity blocks which have data */
153         unsigned long           *dbitmap;
154
155         /*
156          * Mark the parity blocks which have data, but errors happen when
157          * read data or check data
158          */
159         unsigned long           *ebitmap;
160
161         unsigned long           bitmap[0];
162 };
163
164 struct scrub_wr_ctx {
165         struct scrub_bio *wr_curr_bio;
166         struct btrfs_device *tgtdev;
167         int pages_per_wr_bio; /* <= SCRUB_PAGES_PER_WR_BIO */
168         atomic_t flush_all_writes;
169         struct mutex wr_lock;
170 };
171
172 struct scrub_ctx {
173         struct scrub_bio        *bios[SCRUB_BIOS_PER_SCTX];
174         struct btrfs_fs_info    *fs_info;
175         int                     first_free;
176         int                     curr;
177         atomic_t                bios_in_flight;
178         atomic_t                workers_pending;
179         spinlock_t              list_lock;
180         wait_queue_head_t       list_wait;
181         u16                     csum_size;
182         struct list_head        csum_list;
183         atomic_t                cancel_req;
184         int                     readonly;
185         int                     pages_per_rd_bio;
186         u32                     sectorsize;
187         u32                     nodesize;
188
189         int                     is_dev_replace;
190         struct scrub_wr_ctx     wr_ctx;
191
192         /*
193          * statistics
194          */
195         struct btrfs_scrub_progress stat;
196         spinlock_t              stat_lock;
197
198         /*
199          * Use a ref counter to avoid use-after-free issues. Scrub workers
200          * decrement bios_in_flight and workers_pending and then do a wakeup
201          * on the list_wait wait queue. We must ensure the main scrub task
202          * doesn't free the scrub context before or while the workers are
203          * doing the wakeup() call.
204          */
205         refcount_t              refs;
206 };
207
208 struct scrub_fixup_nodatasum {
209         struct scrub_ctx        *sctx;
210         struct btrfs_device     *dev;
211         u64                     logical;
212         struct btrfs_root       *root;
213         struct btrfs_work       work;
214         int                     mirror_num;
215 };
216
217 struct scrub_nocow_inode {
218         u64                     inum;
219         u64                     offset;
220         u64                     root;
221         struct list_head        list;
222 };
223
224 struct scrub_copy_nocow_ctx {
225         struct scrub_ctx        *sctx;
226         u64                     logical;
227         u64                     len;
228         int                     mirror_num;
229         u64                     physical_for_dev_replace;
230         struct list_head        inodes;
231         struct btrfs_work       work;
232 };
233
234 struct scrub_warning {
235         struct btrfs_path       *path;
236         u64                     extent_item_size;
237         const char              *errstr;
238         sector_t                sector;
239         u64                     logical;
240         struct btrfs_device     *dev;
241 };
242
243 static void scrub_pending_bio_inc(struct scrub_ctx *sctx);
244 static void scrub_pending_bio_dec(struct scrub_ctx *sctx);
245 static void scrub_pending_trans_workers_inc(struct scrub_ctx *sctx);
246 static void scrub_pending_trans_workers_dec(struct scrub_ctx *sctx);
247 static int scrub_handle_errored_block(struct scrub_block *sblock_to_check);
248 static int scrub_setup_recheck_block(struct scrub_block *original_sblock,
249                                      struct scrub_block *sblocks_for_recheck);
250 static void scrub_recheck_block(struct btrfs_fs_info *fs_info,
251                                 struct scrub_block *sblock,
252                                 int retry_failed_mirror);
253 static void scrub_recheck_block_checksum(struct scrub_block *sblock);
254 static int scrub_repair_block_from_good_copy(struct scrub_block *sblock_bad,
255                                              struct scrub_block *sblock_good);
256 static int scrub_repair_page_from_good_copy(struct scrub_block *sblock_bad,
257                                             struct scrub_block *sblock_good,
258                                             int page_num, int force_write);
259 static void scrub_write_block_to_dev_replace(struct scrub_block *sblock);
260 static int scrub_write_page_to_dev_replace(struct scrub_block *sblock,
261                                            int page_num);
262 static int scrub_checksum_data(struct scrub_block *sblock);
263 static int scrub_checksum_tree_block(struct scrub_block *sblock);
264 static int scrub_checksum_super(struct scrub_block *sblock);
265 static void scrub_block_get(struct scrub_block *sblock);
266 static void scrub_block_put(struct scrub_block *sblock);
267 static void scrub_page_get(struct scrub_page *spage);
268 static void scrub_page_put(struct scrub_page *spage);
269 static void scrub_parity_get(struct scrub_parity *sparity);
270 static void scrub_parity_put(struct scrub_parity *sparity);
271 static int scrub_add_page_to_rd_bio(struct scrub_ctx *sctx,
272                                     struct scrub_page *spage);
273 static int scrub_pages(struct scrub_ctx *sctx, u64 logical, u64 len,
274                        u64 physical, struct btrfs_device *dev, u64 flags,
275                        u64 gen, int mirror_num, u8 *csum, int force,
276                        u64 physical_for_dev_replace);
277 static void scrub_bio_end_io(struct bio *bio);
278 static void scrub_bio_end_io_worker(struct btrfs_work *work);
279 static void scrub_block_complete(struct scrub_block *sblock);
280 static void scrub_remap_extent(struct btrfs_fs_info *fs_info,
281                                u64 extent_logical, u64 extent_len,
282                                u64 *extent_physical,
283                                struct btrfs_device **extent_dev,
284                                int *extent_mirror_num);
285 static int scrub_setup_wr_ctx(struct scrub_wr_ctx *wr_ctx,
286                               struct btrfs_device *dev,
287                               int is_dev_replace);
288 static void scrub_free_wr_ctx(struct scrub_wr_ctx *wr_ctx);
289 static int scrub_add_page_to_wr_bio(struct scrub_ctx *sctx,
290                                     struct scrub_page *spage);
291 static void scrub_wr_submit(struct scrub_ctx *sctx);
292 static void scrub_wr_bio_end_io(struct bio *bio);
293 static void scrub_wr_bio_end_io_worker(struct btrfs_work *work);
294 static int write_page_nocow(struct scrub_ctx *sctx,
295                             u64 physical_for_dev_replace, struct page *page);
296 static int copy_nocow_pages_for_inode(u64 inum, u64 offset, u64 root,
297                                       struct scrub_copy_nocow_ctx *ctx);
298 static int copy_nocow_pages(struct scrub_ctx *sctx, u64 logical, u64 len,
299                             int mirror_num, u64 physical_for_dev_replace);
300 static void copy_nocow_pages_worker(struct btrfs_work *work);
301 static void __scrub_blocked_if_needed(struct btrfs_fs_info *fs_info);
302 static void scrub_blocked_if_needed(struct btrfs_fs_info *fs_info);
303 static void scrub_put_ctx(struct scrub_ctx *sctx);
304
305
306 static void scrub_pending_bio_inc(struct scrub_ctx *sctx)
307 {
308         refcount_inc(&sctx->refs);
309         atomic_inc(&sctx->bios_in_flight);
310 }
311
312 static void scrub_pending_bio_dec(struct scrub_ctx *sctx)
313 {
314         atomic_dec(&sctx->bios_in_flight);
315         wake_up(&sctx->list_wait);
316         scrub_put_ctx(sctx);
317 }
318
319 static void __scrub_blocked_if_needed(struct btrfs_fs_info *fs_info)
320 {
321         while (atomic_read(&fs_info->scrub_pause_req)) {
322                 mutex_unlock(&fs_info->scrub_lock);
323                 wait_event(fs_info->scrub_pause_wait,
324                    atomic_read(&fs_info->scrub_pause_req) == 0);
325                 mutex_lock(&fs_info->scrub_lock);
326         }
327 }
328
329 static void scrub_pause_on(struct btrfs_fs_info *fs_info)
330 {
331         atomic_inc(&fs_info->scrubs_paused);
332         wake_up(&fs_info->scrub_pause_wait);
333 }
334
335 static void scrub_pause_off(struct btrfs_fs_info *fs_info)
336 {
337         mutex_lock(&fs_info->scrub_lock);
338         __scrub_blocked_if_needed(fs_info);
339         atomic_dec(&fs_info->scrubs_paused);
340         mutex_unlock(&fs_info->scrub_lock);
341
342         wake_up(&fs_info->scrub_pause_wait);
343 }
344
345 static void scrub_blocked_if_needed(struct btrfs_fs_info *fs_info)
346 {
347         scrub_pause_on(fs_info);
348         scrub_pause_off(fs_info);
349 }
350
351 /*
352  * used for workers that require transaction commits (i.e., for the
353  * NOCOW case)
354  */
355 static void scrub_pending_trans_workers_inc(struct scrub_ctx *sctx)
356 {
357         struct btrfs_fs_info *fs_info = sctx->fs_info;
358
359         refcount_inc(&sctx->refs);
360         /*
361          * increment scrubs_running to prevent cancel requests from
362          * completing as long as a worker is running. we must also
363          * increment scrubs_paused to prevent deadlocking on pause
364          * requests used for transactions commits (as the worker uses a
365          * transaction context). it is safe to regard the worker
366          * as paused for all matters practical. effectively, we only
367          * avoid cancellation requests from completing.
368          */
369         mutex_lock(&fs_info->scrub_lock);
370         atomic_inc(&fs_info->scrubs_running);
371         atomic_inc(&fs_info->scrubs_paused);
372         mutex_unlock(&fs_info->scrub_lock);
373
374         /*
375          * check if @scrubs_running=@scrubs_paused condition
376          * inside wait_event() is not an atomic operation.
377          * which means we may inc/dec @scrub_running/paused
378          * at any time. Let's wake up @scrub_pause_wait as
379          * much as we can to let commit transaction blocked less.
380          */
381         wake_up(&fs_info->scrub_pause_wait);
382
383         atomic_inc(&sctx->workers_pending);
384 }
385
386 /* used for workers that require transaction commits */
387 static void scrub_pending_trans_workers_dec(struct scrub_ctx *sctx)
388 {
389         struct btrfs_fs_info *fs_info = sctx->fs_info;
390
391         /*
392          * see scrub_pending_trans_workers_inc() why we're pretending
393          * to be paused in the scrub counters
394          */
395         mutex_lock(&fs_info->scrub_lock);
396         atomic_dec(&fs_info->scrubs_running);
397         atomic_dec(&fs_info->scrubs_paused);
398         mutex_unlock(&fs_info->scrub_lock);
399         atomic_dec(&sctx->workers_pending);
400         wake_up(&fs_info->scrub_pause_wait);
401         wake_up(&sctx->list_wait);
402         scrub_put_ctx(sctx);
403 }
404
405 static void scrub_free_csums(struct scrub_ctx *sctx)
406 {
407         while (!list_empty(&sctx->csum_list)) {
408                 struct btrfs_ordered_sum *sum;
409                 sum = list_first_entry(&sctx->csum_list,
410                                        struct btrfs_ordered_sum, list);
411                 list_del(&sum->list);
412                 kfree(sum);
413         }
414 }
415
416 static noinline_for_stack void scrub_free_ctx(struct scrub_ctx *sctx)
417 {
418         int i;
419
420         if (!sctx)
421                 return;
422
423         scrub_free_wr_ctx(&sctx->wr_ctx);
424
425         /* this can happen when scrub is cancelled */
426         if (sctx->curr != -1) {
427                 struct scrub_bio *sbio = sctx->bios[sctx->curr];
428
429                 for (i = 0; i < sbio->page_count; i++) {
430                         WARN_ON(!sbio->pagev[i]->page);
431                         scrub_block_put(sbio->pagev[i]->sblock);
432                 }
433                 bio_put(sbio->bio);
434         }
435
436         for (i = 0; i < SCRUB_BIOS_PER_SCTX; ++i) {
437                 struct scrub_bio *sbio = sctx->bios[i];
438
439                 if (!sbio)
440                         break;
441                 kfree(sbio);
442         }
443
444         scrub_free_csums(sctx);
445         kfree(sctx);
446 }
447
448 static void scrub_put_ctx(struct scrub_ctx *sctx)
449 {
450         if (refcount_dec_and_test(&sctx->refs))
451                 scrub_free_ctx(sctx);
452 }
453
454 static noinline_for_stack
455 struct scrub_ctx *scrub_setup_ctx(struct btrfs_device *dev, int is_dev_replace)
456 {
457         struct scrub_ctx *sctx;
458         int             i;
459         struct btrfs_fs_info *fs_info = dev->fs_info;
460         int ret;
461
462         sctx = kzalloc(sizeof(*sctx), GFP_KERNEL);
463         if (!sctx)
464                 goto nomem;
465         refcount_set(&sctx->refs, 1);
466         sctx->is_dev_replace = is_dev_replace;
467         sctx->pages_per_rd_bio = SCRUB_PAGES_PER_RD_BIO;
468         sctx->curr = -1;
469         sctx->fs_info = dev->fs_info;
470         for (i = 0; i < SCRUB_BIOS_PER_SCTX; ++i) {
471                 struct scrub_bio *sbio;
472
473                 sbio = kzalloc(sizeof(*sbio), GFP_KERNEL);
474                 if (!sbio)
475                         goto nomem;
476                 sctx->bios[i] = sbio;
477
478                 sbio->index = i;
479                 sbio->sctx = sctx;
480                 sbio->page_count = 0;
481                 btrfs_init_work(&sbio->work, btrfs_scrub_helper,
482                                 scrub_bio_end_io_worker, NULL, NULL);
483
484                 if (i != SCRUB_BIOS_PER_SCTX - 1)
485                         sctx->bios[i]->next_free = i + 1;
486                 else
487                         sctx->bios[i]->next_free = -1;
488         }
489         sctx->first_free = 0;
490         sctx->nodesize = fs_info->nodesize;
491         sctx->sectorsize = fs_info->sectorsize;
492         atomic_set(&sctx->bios_in_flight, 0);
493         atomic_set(&sctx->workers_pending, 0);
494         atomic_set(&sctx->cancel_req, 0);
495         sctx->csum_size = btrfs_super_csum_size(fs_info->super_copy);
496         INIT_LIST_HEAD(&sctx->csum_list);
497
498         spin_lock_init(&sctx->list_lock);
499         spin_lock_init(&sctx->stat_lock);
500         init_waitqueue_head(&sctx->list_wait);
501
502         ret = scrub_setup_wr_ctx(&sctx->wr_ctx,
503                                  fs_info->dev_replace.tgtdev, is_dev_replace);
504         if (ret) {
505                 scrub_free_ctx(sctx);
506                 return ERR_PTR(ret);
507         }
508         return sctx;
509
510 nomem:
511         scrub_free_ctx(sctx);
512         return ERR_PTR(-ENOMEM);
513 }
514
515 static int scrub_print_warning_inode(u64 inum, u64 offset, u64 root,
516                                      void *warn_ctx)
517 {
518         u64 isize;
519         u32 nlink;
520         int ret;
521         int i;
522         struct extent_buffer *eb;
523         struct btrfs_inode_item *inode_item;
524         struct scrub_warning *swarn = warn_ctx;
525         struct btrfs_fs_info *fs_info = swarn->dev->fs_info;
526         struct inode_fs_paths *ipath = NULL;
527         struct btrfs_root *local_root;
528         struct btrfs_key root_key;
529         struct btrfs_key key;
530
531         root_key.objectid = root;
532         root_key.type = BTRFS_ROOT_ITEM_KEY;
533         root_key.offset = (u64)-1;
534         local_root = btrfs_read_fs_root_no_name(fs_info, &root_key);
535         if (IS_ERR(local_root)) {
536                 ret = PTR_ERR(local_root);
537                 goto err;
538         }
539
540         /*
541          * this makes the path point to (inum INODE_ITEM ioff)
542          */
543         key.objectid = inum;
544         key.type = BTRFS_INODE_ITEM_KEY;
545         key.offset = 0;
546
547         ret = btrfs_search_slot(NULL, local_root, &key, swarn->path, 0, 0);
548         if (ret) {
549                 btrfs_release_path(swarn->path);
550                 goto err;
551         }
552
553         eb = swarn->path->nodes[0];
554         inode_item = btrfs_item_ptr(eb, swarn->path->slots[0],
555                                         struct btrfs_inode_item);
556         isize = btrfs_inode_size(eb, inode_item);
557         nlink = btrfs_inode_nlink(eb, inode_item);
558         btrfs_release_path(swarn->path);
559
560         ipath = init_ipath(4096, local_root, swarn->path);
561         if (IS_ERR(ipath)) {
562                 ret = PTR_ERR(ipath);
563                 ipath = NULL;
564                 goto err;
565         }
566         ret = paths_from_inode(inum, ipath);
567
568         if (ret < 0)
569                 goto err;
570
571         /*
572          * we deliberately ignore the bit ipath might have been too small to
573          * hold all of the paths here
574          */
575         for (i = 0; i < ipath->fspath->elem_cnt; ++i)
576                 btrfs_warn_in_rcu(fs_info,
577                                   "%s at logical %llu on dev %s, sector %llu, root %llu, inode %llu, offset %llu, length %llu, links %u (path: %s)",
578                                   swarn->errstr, swarn->logical,
579                                   rcu_str_deref(swarn->dev->name),
580                                   (unsigned long long)swarn->sector,
581                                   root, inum, offset,
582                                   min(isize - offset, (u64)PAGE_SIZE), nlink,
583                                   (char *)(unsigned long)ipath->fspath->val[i]);
584
585         free_ipath(ipath);
586         return 0;
587
588 err:
589         btrfs_warn_in_rcu(fs_info,
590                           "%s at logical %llu on dev %s, sector %llu, root %llu, inode %llu, offset %llu: path resolving failed with ret=%d",
591                           swarn->errstr, swarn->logical,
592                           rcu_str_deref(swarn->dev->name),
593                           (unsigned long long)swarn->sector,
594                           root, inum, offset, ret);
595
596         free_ipath(ipath);
597         return 0;
598 }
599
600 static void scrub_print_warning(const char *errstr, struct scrub_block *sblock)
601 {
602         struct btrfs_device *dev;
603         struct btrfs_fs_info *fs_info;
604         struct btrfs_path *path;
605         struct btrfs_key found_key;
606         struct extent_buffer *eb;
607         struct btrfs_extent_item *ei;
608         struct scrub_warning swarn;
609         unsigned long ptr = 0;
610         u64 extent_item_pos;
611         u64 flags = 0;
612         u64 ref_root;
613         u32 item_size;
614         u8 ref_level = 0;
615         int ret;
616
617         WARN_ON(sblock->page_count < 1);
618         dev = sblock->pagev[0]->dev;
619         fs_info = sblock->sctx->fs_info;
620
621         path = btrfs_alloc_path();
622         if (!path)
623                 return;
624
625         swarn.sector = (sblock->pagev[0]->physical) >> 9;
626         swarn.logical = sblock->pagev[0]->logical;
627         swarn.errstr = errstr;
628         swarn.dev = NULL;
629
630         ret = extent_from_logical(fs_info, swarn.logical, path, &found_key,
631                                   &flags);
632         if (ret < 0)
633                 goto out;
634
635         extent_item_pos = swarn.logical - found_key.objectid;
636         swarn.extent_item_size = found_key.offset;
637
638         eb = path->nodes[0];
639         ei = btrfs_item_ptr(eb, path->slots[0], struct btrfs_extent_item);
640         item_size = btrfs_item_size_nr(eb, path->slots[0]);
641
642         if (flags & BTRFS_EXTENT_FLAG_TREE_BLOCK) {
643                 do {
644                         ret = tree_backref_for_extent(&ptr, eb, &found_key, ei,
645                                                       item_size, &ref_root,
646                                                       &ref_level);
647                         btrfs_warn_in_rcu(fs_info,
648                                 "%s at logical %llu on dev %s, sector %llu: metadata %s (level %d) in tree %llu",
649                                 errstr, swarn.logical,
650                                 rcu_str_deref(dev->name),
651                                 (unsigned long long)swarn.sector,
652                                 ref_level ? "node" : "leaf",
653                                 ret < 0 ? -1 : ref_level,
654                                 ret < 0 ? -1 : ref_root);
655                 } while (ret != 1);
656                 btrfs_release_path(path);
657         } else {
658                 btrfs_release_path(path);
659                 swarn.path = path;
660                 swarn.dev = dev;
661                 iterate_extent_inodes(fs_info, found_key.objectid,
662                                         extent_item_pos, 1,
663                                         scrub_print_warning_inode, &swarn);
664         }
665
666 out:
667         btrfs_free_path(path);
668 }
669
670 static int scrub_fixup_readpage(u64 inum, u64 offset, u64 root, void *fixup_ctx)
671 {
672         struct page *page = NULL;
673         unsigned long index;
674         struct scrub_fixup_nodatasum *fixup = fixup_ctx;
675         int ret;
676         int corrected = 0;
677         struct btrfs_key key;
678         struct inode *inode = NULL;
679         struct btrfs_fs_info *fs_info;
680         u64 end = offset + PAGE_SIZE - 1;
681         struct btrfs_root *local_root;
682         int srcu_index;
683
684         key.objectid = root;
685         key.type = BTRFS_ROOT_ITEM_KEY;
686         key.offset = (u64)-1;
687
688         fs_info = fixup->root->fs_info;
689         srcu_index = srcu_read_lock(&fs_info->subvol_srcu);
690
691         local_root = btrfs_read_fs_root_no_name(fs_info, &key);
692         if (IS_ERR(local_root)) {
693                 srcu_read_unlock(&fs_info->subvol_srcu, srcu_index);
694                 return PTR_ERR(local_root);
695         }
696
697         key.type = BTRFS_INODE_ITEM_KEY;
698         key.objectid = inum;
699         key.offset = 0;
700         inode = btrfs_iget(fs_info->sb, &key, local_root, NULL);
701         srcu_read_unlock(&fs_info->subvol_srcu, srcu_index);
702         if (IS_ERR(inode))
703                 return PTR_ERR(inode);
704
705         index = offset >> PAGE_SHIFT;
706
707         page = find_or_create_page(inode->i_mapping, index, GFP_NOFS);
708         if (!page) {
709                 ret = -ENOMEM;
710                 goto out;
711         }
712
713         if (PageUptodate(page)) {
714                 if (PageDirty(page)) {
715                         /*
716                          * we need to write the data to the defect sector. the
717                          * data that was in that sector is not in memory,
718                          * because the page was modified. we must not write the
719                          * modified page to that sector.
720                          *
721                          * TODO: what could be done here: wait for the delalloc
722                          *       runner to write out that page (might involve
723                          *       COW) and see whether the sector is still
724                          *       referenced afterwards.
725                          *
726                          * For the meantime, we'll treat this error
727                          * incorrectable, although there is a chance that a
728                          * later scrub will find the bad sector again and that
729                          * there's no dirty page in memory, then.
730                          */
731                         ret = -EIO;
732                         goto out;
733                 }
734                 ret = repair_io_failure(BTRFS_I(inode), offset, PAGE_SIZE,
735                                         fixup->logical, page,
736                                         offset - page_offset(page),
737                                         fixup->mirror_num);
738                 unlock_page(page);
739                 corrected = !ret;
740         } else {
741                 /*
742                  * we need to get good data first. the general readpage path
743                  * will call repair_io_failure for us, we just have to make
744                  * sure we read the bad mirror.
745                  */
746                 ret = set_extent_bits(&BTRFS_I(inode)->io_tree, offset, end,
747                                         EXTENT_DAMAGED);
748                 if (ret) {
749                         /* set_extent_bits should give proper error */
750                         WARN_ON(ret > 0);
751                         if (ret > 0)
752                                 ret = -EFAULT;
753                         goto out;
754                 }
755
756                 ret = extent_read_full_page(&BTRFS_I(inode)->io_tree, page,
757                                                 btrfs_get_extent,
758                                                 fixup->mirror_num);
759                 wait_on_page_locked(page);
760
761                 corrected = !test_range_bit(&BTRFS_I(inode)->io_tree, offset,
762                                                 end, EXTENT_DAMAGED, 0, NULL);
763                 if (!corrected)
764                         clear_extent_bits(&BTRFS_I(inode)->io_tree, offset, end,
765                                                 EXTENT_DAMAGED);
766         }
767
768 out:
769         if (page)
770                 put_page(page);
771
772         iput(inode);
773
774         if (ret < 0)
775                 return ret;
776
777         if (ret == 0 && corrected) {
778                 /*
779                  * we only need to call readpage for one of the inodes belonging
780                  * to this extent. so make iterate_extent_inodes stop
781                  */
782                 return 1;
783         }
784
785         return -EIO;
786 }
787
788 static void scrub_fixup_nodatasum(struct btrfs_work *work)
789 {
790         struct btrfs_fs_info *fs_info;
791         int ret;
792         struct scrub_fixup_nodatasum *fixup;
793         struct scrub_ctx *sctx;
794         struct btrfs_trans_handle *trans = NULL;
795         struct btrfs_path *path;
796         int uncorrectable = 0;
797
798         fixup = container_of(work, struct scrub_fixup_nodatasum, work);
799         sctx = fixup->sctx;
800         fs_info = fixup->root->fs_info;
801
802         path = btrfs_alloc_path();
803         if (!path) {
804                 spin_lock(&sctx->stat_lock);
805                 ++sctx->stat.malloc_errors;
806                 spin_unlock(&sctx->stat_lock);
807                 uncorrectable = 1;
808                 goto out;
809         }
810
811         trans = btrfs_join_transaction(fixup->root);
812         if (IS_ERR(trans)) {
813                 uncorrectable = 1;
814                 goto out;
815         }
816
817         /*
818          * the idea is to trigger a regular read through the standard path. we
819          * read a page from the (failed) logical address by specifying the
820          * corresponding copynum of the failed sector. thus, that readpage is
821          * expected to fail.
822          * that is the point where on-the-fly error correction will kick in
823          * (once it's finished) and rewrite the failed sector if a good copy
824          * can be found.
825          */
826         ret = iterate_inodes_from_logical(fixup->logical, fs_info, path,
827                                           scrub_fixup_readpage, fixup);
828         if (ret < 0) {
829                 uncorrectable = 1;
830                 goto out;
831         }
832         WARN_ON(ret != 1);
833
834         spin_lock(&sctx->stat_lock);
835         ++sctx->stat.corrected_errors;
836         spin_unlock(&sctx->stat_lock);
837
838 out:
839         if (trans && !IS_ERR(trans))
840                 btrfs_end_transaction(trans);
841         if (uncorrectable) {
842                 spin_lock(&sctx->stat_lock);
843                 ++sctx->stat.uncorrectable_errors;
844                 spin_unlock(&sctx->stat_lock);
845                 btrfs_dev_replace_stats_inc(
846                         &fs_info->dev_replace.num_uncorrectable_read_errors);
847                 btrfs_err_rl_in_rcu(fs_info,
848                     "unable to fixup (nodatasum) error at logical %llu on dev %s",
849                         fixup->logical, rcu_str_deref(fixup->dev->name));
850         }
851
852         btrfs_free_path(path);
853         kfree(fixup);
854
855         scrub_pending_trans_workers_dec(sctx);
856 }
857
858 static inline void scrub_get_recover(struct scrub_recover *recover)
859 {
860         refcount_inc(&recover->refs);
861 }
862
863 static inline void scrub_put_recover(struct btrfs_fs_info *fs_info,
864                                      struct scrub_recover *recover)
865 {
866         if (refcount_dec_and_test(&recover->refs)) {
867                 btrfs_bio_counter_dec(fs_info);
868                 btrfs_put_bbio(recover->bbio);
869                 kfree(recover);
870         }
871 }
872
873 /*
874  * scrub_handle_errored_block gets called when either verification of the
875  * pages failed or the bio failed to read, e.g. with EIO. In the latter
876  * case, this function handles all pages in the bio, even though only one
877  * may be bad.
878  * The goal of this function is to repair the errored block by using the
879  * contents of one of the mirrors.
880  */
881 static int scrub_handle_errored_block(struct scrub_block *sblock_to_check)
882 {
883         struct scrub_ctx *sctx = sblock_to_check->sctx;
884         struct btrfs_device *dev;
885         struct btrfs_fs_info *fs_info;
886         u64 length;
887         u64 logical;
888         unsigned int failed_mirror_index;
889         unsigned int is_metadata;
890         unsigned int have_csum;
891         struct scrub_block *sblocks_for_recheck; /* holds one for each mirror */
892         struct scrub_block *sblock_bad;
893         int ret;
894         int mirror_index;
895         int page_num;
896         int success;
897         static DEFINE_RATELIMIT_STATE(_rs, DEFAULT_RATELIMIT_INTERVAL,
898                                       DEFAULT_RATELIMIT_BURST);
899
900         BUG_ON(sblock_to_check->page_count < 1);
901         fs_info = sctx->fs_info;
902         if (sblock_to_check->pagev[0]->flags & BTRFS_EXTENT_FLAG_SUPER) {
903                 /*
904                  * if we find an error in a super block, we just report it.
905                  * They will get written with the next transaction commit
906                  * anyway
907                  */
908                 spin_lock(&sctx->stat_lock);
909                 ++sctx->stat.super_errors;
910                 spin_unlock(&sctx->stat_lock);
911                 return 0;
912         }
913         length = sblock_to_check->page_count * PAGE_SIZE;
914         logical = sblock_to_check->pagev[0]->logical;
915         BUG_ON(sblock_to_check->pagev[0]->mirror_num < 1);
916         failed_mirror_index = sblock_to_check->pagev[0]->mirror_num - 1;
917         is_metadata = !(sblock_to_check->pagev[0]->flags &
918                         BTRFS_EXTENT_FLAG_DATA);
919         have_csum = sblock_to_check->pagev[0]->have_csum;
920         dev = sblock_to_check->pagev[0]->dev;
921
922         if (sctx->is_dev_replace && !is_metadata && !have_csum) {
923                 sblocks_for_recheck = NULL;
924                 goto nodatasum_case;
925         }
926
927         /*
928          * read all mirrors one after the other. This includes to
929          * re-read the extent or metadata block that failed (that was
930          * the cause that this fixup code is called) another time,
931          * page by page this time in order to know which pages
932          * caused I/O errors and which ones are good (for all mirrors).
933          * It is the goal to handle the situation when more than one
934          * mirror contains I/O errors, but the errors do not
935          * overlap, i.e. the data can be repaired by selecting the
936          * pages from those mirrors without I/O error on the
937          * particular pages. One example (with blocks >= 2 * PAGE_SIZE)
938          * would be that mirror #1 has an I/O error on the first page,
939          * the second page is good, and mirror #2 has an I/O error on
940          * the second page, but the first page is good.
941          * Then the first page of the first mirror can be repaired by
942          * taking the first page of the second mirror, and the
943          * second page of the second mirror can be repaired by
944          * copying the contents of the 2nd page of the 1st mirror.
945          * One more note: if the pages of one mirror contain I/O
946          * errors, the checksum cannot be verified. In order to get
947          * the best data for repairing, the first attempt is to find
948          * a mirror without I/O errors and with a validated checksum.
949          * Only if this is not possible, the pages are picked from
950          * mirrors with I/O errors without considering the checksum.
951          * If the latter is the case, at the end, the checksum of the
952          * repaired area is verified in order to correctly maintain
953          * the statistics.
954          */
955
956         sblocks_for_recheck = kcalloc(BTRFS_MAX_MIRRORS,
957                                       sizeof(*sblocks_for_recheck), GFP_NOFS);
958         if (!sblocks_for_recheck) {
959                 spin_lock(&sctx->stat_lock);
960                 sctx->stat.malloc_errors++;
961                 sctx->stat.read_errors++;
962                 sctx->stat.uncorrectable_errors++;
963                 spin_unlock(&sctx->stat_lock);
964                 btrfs_dev_stat_inc_and_print(dev, BTRFS_DEV_STAT_READ_ERRS);
965                 goto out;
966         }
967
968         /* setup the context, map the logical blocks and alloc the pages */
969         ret = scrub_setup_recheck_block(sblock_to_check, sblocks_for_recheck);
970         if (ret) {
971                 spin_lock(&sctx->stat_lock);
972                 sctx->stat.read_errors++;
973                 sctx->stat.uncorrectable_errors++;
974                 spin_unlock(&sctx->stat_lock);
975                 btrfs_dev_stat_inc_and_print(dev, BTRFS_DEV_STAT_READ_ERRS);
976                 goto out;
977         }
978         BUG_ON(failed_mirror_index >= BTRFS_MAX_MIRRORS);
979         sblock_bad = sblocks_for_recheck + failed_mirror_index;
980
981         /* build and submit the bios for the failed mirror, check checksums */
982         scrub_recheck_block(fs_info, sblock_bad, 1);
983
984         if (!sblock_bad->header_error && !sblock_bad->checksum_error &&
985             sblock_bad->no_io_error_seen) {
986                 /*
987                  * the error disappeared after reading page by page, or
988                  * the area was part of a huge bio and other parts of the
989                  * bio caused I/O errors, or the block layer merged several
990                  * read requests into one and the error is caused by a
991                  * different bio (usually one of the two latter cases is
992                  * the cause)
993                  */
994                 spin_lock(&sctx->stat_lock);
995                 sctx->stat.unverified_errors++;
996                 sblock_to_check->data_corrected = 1;
997                 spin_unlock(&sctx->stat_lock);
998
999                 if (sctx->is_dev_replace)
1000                         scrub_write_block_to_dev_replace(sblock_bad);
1001                 goto out;
1002         }
1003
1004         if (!sblock_bad->no_io_error_seen) {
1005                 spin_lock(&sctx->stat_lock);
1006                 sctx->stat.read_errors++;
1007                 spin_unlock(&sctx->stat_lock);
1008                 if (__ratelimit(&_rs))
1009                         scrub_print_warning("i/o error", sblock_to_check);
1010                 btrfs_dev_stat_inc_and_print(dev, BTRFS_DEV_STAT_READ_ERRS);
1011         } else if (sblock_bad->checksum_error) {
1012                 spin_lock(&sctx->stat_lock);
1013                 sctx->stat.csum_errors++;
1014                 spin_unlock(&sctx->stat_lock);
1015                 if (__ratelimit(&_rs))
1016                         scrub_print_warning("checksum error", sblock_to_check);
1017                 btrfs_dev_stat_inc_and_print(dev,
1018                                              BTRFS_DEV_STAT_CORRUPTION_ERRS);
1019         } else if (sblock_bad->header_error) {
1020                 spin_lock(&sctx->stat_lock);
1021                 sctx->stat.verify_errors++;
1022                 spin_unlock(&sctx->stat_lock);
1023                 if (__ratelimit(&_rs))
1024                         scrub_print_warning("checksum/header error",
1025                                             sblock_to_check);
1026                 if (sblock_bad->generation_error)
1027                         btrfs_dev_stat_inc_and_print(dev,
1028                                 BTRFS_DEV_STAT_GENERATION_ERRS);
1029                 else
1030                         btrfs_dev_stat_inc_and_print(dev,
1031                                 BTRFS_DEV_STAT_CORRUPTION_ERRS);
1032         }
1033
1034         if (sctx->readonly) {
1035                 ASSERT(!sctx->is_dev_replace);
1036                 goto out;
1037         }
1038
1039         if (!is_metadata && !have_csum) {
1040                 struct scrub_fixup_nodatasum *fixup_nodatasum;
1041
1042                 WARN_ON(sctx->is_dev_replace);
1043
1044 nodatasum_case:
1045
1046                 /*
1047                  * !is_metadata and !have_csum, this means that the data
1048                  * might not be COWed, that it might be modified
1049                  * concurrently. The general strategy to work on the
1050                  * commit root does not help in the case when COW is not
1051                  * used.
1052                  */
1053                 fixup_nodatasum = kzalloc(sizeof(*fixup_nodatasum), GFP_NOFS);
1054                 if (!fixup_nodatasum)
1055                         goto did_not_correct_error;
1056                 fixup_nodatasum->sctx = sctx;
1057                 fixup_nodatasum->dev = dev;
1058                 fixup_nodatasum->logical = logical;
1059                 fixup_nodatasum->root = fs_info->extent_root;
1060                 fixup_nodatasum->mirror_num = failed_mirror_index + 1;
1061                 scrub_pending_trans_workers_inc(sctx);
1062                 btrfs_init_work(&fixup_nodatasum->work, btrfs_scrub_helper,
1063                                 scrub_fixup_nodatasum, NULL, NULL);
1064                 btrfs_queue_work(fs_info->scrub_workers,
1065                                  &fixup_nodatasum->work);
1066                 goto out;
1067         }
1068
1069         /*
1070          * now build and submit the bios for the other mirrors, check
1071          * checksums.
1072          * First try to pick the mirror which is completely without I/O
1073          * errors and also does not have a checksum error.
1074          * If one is found, and if a checksum is present, the full block
1075          * that is known to contain an error is rewritten. Afterwards
1076          * the block is known to be corrected.
1077          * If a mirror is found which is completely correct, and no
1078          * checksum is present, only those pages are rewritten that had
1079          * an I/O error in the block to be repaired, since it cannot be
1080          * determined, which copy of the other pages is better (and it
1081          * could happen otherwise that a correct page would be
1082          * overwritten by a bad one).
1083          */
1084         for (mirror_index = 0;
1085              mirror_index < BTRFS_MAX_MIRRORS &&
1086              sblocks_for_recheck[mirror_index].page_count > 0;
1087              mirror_index++) {
1088                 struct scrub_block *sblock_other;
1089
1090                 if (mirror_index == failed_mirror_index)
1091                         continue;
1092                 sblock_other = sblocks_for_recheck + mirror_index;
1093
1094                 /* build and submit the bios, check checksums */
1095                 scrub_recheck_block(fs_info, sblock_other, 0);
1096
1097                 if (!sblock_other->header_error &&
1098                     !sblock_other->checksum_error &&
1099                     sblock_other->no_io_error_seen) {
1100                         if (sctx->is_dev_replace) {
1101                                 scrub_write_block_to_dev_replace(sblock_other);
1102                                 goto corrected_error;
1103                         } else {
1104                                 ret = scrub_repair_block_from_good_copy(
1105                                                 sblock_bad, sblock_other);
1106                                 if (!ret)
1107                                         goto corrected_error;
1108                         }
1109                 }
1110         }
1111
1112         if (sblock_bad->no_io_error_seen && !sctx->is_dev_replace)
1113                 goto did_not_correct_error;
1114
1115         /*
1116          * In case of I/O errors in the area that is supposed to be
1117          * repaired, continue by picking good copies of those pages.
1118          * Select the good pages from mirrors to rewrite bad pages from
1119          * the area to fix. Afterwards verify the checksum of the block
1120          * that is supposed to be repaired. This verification step is
1121          * only done for the purpose of statistic counting and for the
1122          * final scrub report, whether errors remain.
1123          * A perfect algorithm could make use of the checksum and try
1124          * all possible combinations of pages from the different mirrors
1125          * until the checksum verification succeeds. For example, when
1126          * the 2nd page of mirror #1 faces I/O errors, and the 2nd page
1127          * of mirror #2 is readable but the final checksum test fails,
1128          * then the 2nd page of mirror #3 could be tried, whether now
1129          * the final checksum succeeds. But this would be a rare
1130          * exception and is therefore not implemented. At least it is
1131          * avoided that the good copy is overwritten.
1132          * A more useful improvement would be to pick the sectors
1133          * without I/O error based on sector sizes (512 bytes on legacy
1134          * disks) instead of on PAGE_SIZE. Then maybe 512 byte of one
1135          * mirror could be repaired by taking 512 byte of a different
1136          * mirror, even if other 512 byte sectors in the same PAGE_SIZE
1137          * area are unreadable.
1138          */
1139         success = 1;
1140         for (page_num = 0; page_num < sblock_bad->page_count;
1141              page_num++) {
1142                 struct scrub_page *page_bad = sblock_bad->pagev[page_num];
1143                 struct scrub_block *sblock_other = NULL;
1144
1145                 /* skip no-io-error page in scrub */
1146                 if (!page_bad->io_error && !sctx->is_dev_replace)
1147                         continue;
1148
1149                 /* try to find no-io-error page in mirrors */
1150                 if (page_bad->io_error) {
1151                         for (mirror_index = 0;
1152                              mirror_index < BTRFS_MAX_MIRRORS &&
1153                              sblocks_for_recheck[mirror_index].page_count > 0;
1154                              mirror_index++) {
1155                                 if (!sblocks_for_recheck[mirror_index].
1156                                     pagev[page_num]->io_error) {
1157                                         sblock_other = sblocks_for_recheck +
1158                                                        mirror_index;
1159                                         break;
1160                                 }
1161                         }
1162                         if (!sblock_other)
1163                                 success = 0;
1164                 }
1165
1166                 if (sctx->is_dev_replace) {
1167                         /*
1168                          * did not find a mirror to fetch the page
1169                          * from. scrub_write_page_to_dev_replace()
1170                          * handles this case (page->io_error), by
1171                          * filling the block with zeros before
1172                          * submitting the write request
1173                          */
1174                         if (!sblock_other)
1175                                 sblock_other = sblock_bad;
1176
1177                         if (scrub_write_page_to_dev_replace(sblock_other,
1178                                                             page_num) != 0) {
1179                                 btrfs_dev_replace_stats_inc(
1180                                         &fs_info->dev_replace.num_write_errors);
1181                                 success = 0;
1182                         }
1183                 } else if (sblock_other) {
1184                         ret = scrub_repair_page_from_good_copy(sblock_bad,
1185                                                                sblock_other,
1186                                                                page_num, 0);
1187                         if (0 == ret)
1188                                 page_bad->io_error = 0;
1189                         else
1190                                 success = 0;
1191                 }
1192         }
1193
1194         if (success && !sctx->is_dev_replace) {
1195                 if (is_metadata || have_csum) {
1196                         /*
1197                          * need to verify the checksum now that all
1198                          * sectors on disk are repaired (the write
1199                          * request for data to be repaired is on its way).
1200                          * Just be lazy and use scrub_recheck_block()
1201                          * which re-reads the data before the checksum
1202                          * is verified, but most likely the data comes out
1203                          * of the page cache.
1204                          */
1205                         scrub_recheck_block(fs_info, sblock_bad, 1);
1206                         if (!sblock_bad->header_error &&
1207                             !sblock_bad->checksum_error &&
1208                             sblock_bad->no_io_error_seen)
1209                                 goto corrected_error;
1210                         else
1211                                 goto did_not_correct_error;
1212                 } else {
1213 corrected_error:
1214                         spin_lock(&sctx->stat_lock);
1215                         sctx->stat.corrected_errors++;
1216                         sblock_to_check->data_corrected = 1;
1217                         spin_unlock(&sctx->stat_lock);
1218                         btrfs_err_rl_in_rcu(fs_info,
1219                                 "fixed up error at logical %llu on dev %s",
1220                                 logical, rcu_str_deref(dev->name));
1221                 }
1222         } else {
1223 did_not_correct_error:
1224                 spin_lock(&sctx->stat_lock);
1225                 sctx->stat.uncorrectable_errors++;
1226                 spin_unlock(&sctx->stat_lock);
1227                 btrfs_err_rl_in_rcu(fs_info,
1228                         "unable to fixup (regular) error at logical %llu on dev %s",
1229                         logical, rcu_str_deref(dev->name));
1230         }
1231
1232 out:
1233         if (sblocks_for_recheck) {
1234                 for (mirror_index = 0; mirror_index < BTRFS_MAX_MIRRORS;
1235                      mirror_index++) {
1236                         struct scrub_block *sblock = sblocks_for_recheck +
1237                                                      mirror_index;
1238                         struct scrub_recover *recover;
1239                         int page_index;
1240
1241                         for (page_index = 0; page_index < sblock->page_count;
1242                              page_index++) {
1243                                 sblock->pagev[page_index]->sblock = NULL;
1244                                 recover = sblock->pagev[page_index]->recover;
1245                                 if (recover) {
1246                                         scrub_put_recover(fs_info, recover);
1247                                         sblock->pagev[page_index]->recover =
1248                                                                         NULL;
1249                                 }
1250                                 scrub_page_put(sblock->pagev[page_index]);
1251                         }
1252                 }
1253                 kfree(sblocks_for_recheck);
1254         }
1255
1256         return 0;
1257 }
1258
1259 static inline int scrub_nr_raid_mirrors(struct btrfs_bio *bbio)
1260 {
1261         if (bbio->map_type & BTRFS_BLOCK_GROUP_RAID5)
1262                 return 2;
1263         else if (bbio->map_type & BTRFS_BLOCK_GROUP_RAID6)
1264                 return 3;
1265         else
1266                 return (int)bbio->num_stripes;
1267 }
1268
1269 static inline void scrub_stripe_index_and_offset(u64 logical, u64 map_type,
1270                                                  u64 *raid_map,
1271                                                  u64 mapped_length,
1272                                                  int nstripes, int mirror,
1273                                                  int *stripe_index,
1274                                                  u64 *stripe_offset)
1275 {
1276         int i;
1277
1278         if (map_type & BTRFS_BLOCK_GROUP_RAID56_MASK) {
1279                 /* RAID5/6 */
1280                 for (i = 0; i < nstripes; i++) {
1281                         if (raid_map[i] == RAID6_Q_STRIPE ||
1282                             raid_map[i] == RAID5_P_STRIPE)
1283                                 continue;
1284
1285                         if (logical >= raid_map[i] &&
1286                             logical < raid_map[i] + mapped_length)
1287                                 break;
1288                 }
1289
1290                 *stripe_index = i;
1291                 *stripe_offset = logical - raid_map[i];
1292         } else {
1293                 /* The other RAID type */
1294                 *stripe_index = mirror;
1295                 *stripe_offset = 0;
1296         }
1297 }
1298
1299 static int scrub_setup_recheck_block(struct scrub_block *original_sblock,
1300                                      struct scrub_block *sblocks_for_recheck)
1301 {
1302         struct scrub_ctx *sctx = original_sblock->sctx;
1303         struct btrfs_fs_info *fs_info = sctx->fs_info;
1304         u64 length = original_sblock->page_count * PAGE_SIZE;
1305         u64 logical = original_sblock->pagev[0]->logical;
1306         u64 generation = original_sblock->pagev[0]->generation;
1307         u64 flags = original_sblock->pagev[0]->flags;
1308         u64 have_csum = original_sblock->pagev[0]->have_csum;
1309         struct scrub_recover *recover;
1310         struct btrfs_bio *bbio;
1311         u64 sublen;
1312         u64 mapped_length;
1313         u64 stripe_offset;
1314         int stripe_index;
1315         int page_index = 0;
1316         int mirror_index;
1317         int nmirrors;
1318         int ret;
1319
1320         /*
1321          * note: the two members refs and outstanding_pages
1322          * are not used (and not set) in the blocks that are used for
1323          * the recheck procedure
1324          */
1325
1326         while (length > 0) {
1327                 sublen = min_t(u64, length, PAGE_SIZE);
1328                 mapped_length = sublen;
1329                 bbio = NULL;
1330
1331                 /*
1332                  * with a length of PAGE_SIZE, each returned stripe
1333                  * represents one mirror
1334                  */
1335                 btrfs_bio_counter_inc_blocked(fs_info);
1336                 ret = btrfs_map_sblock(fs_info, BTRFS_MAP_GET_READ_MIRRORS,
1337                                 logical, &mapped_length, &bbio);
1338                 if (ret || !bbio || mapped_length < sublen) {
1339                         btrfs_put_bbio(bbio);
1340                         btrfs_bio_counter_dec(fs_info);
1341                         return -EIO;
1342                 }
1343
1344                 recover = kzalloc(sizeof(struct scrub_recover), GFP_NOFS);
1345                 if (!recover) {
1346                         btrfs_put_bbio(bbio);
1347                         btrfs_bio_counter_dec(fs_info);
1348                         return -ENOMEM;
1349                 }
1350
1351                 refcount_set(&recover->refs, 1);
1352                 recover->bbio = bbio;
1353                 recover->map_length = mapped_length;
1354
1355                 BUG_ON(page_index >= SCRUB_MAX_PAGES_PER_BLOCK);
1356
1357                 nmirrors = min(scrub_nr_raid_mirrors(bbio), BTRFS_MAX_MIRRORS);
1358
1359                 for (mirror_index = 0; mirror_index < nmirrors;
1360                      mirror_index++) {
1361                         struct scrub_block *sblock;
1362                         struct scrub_page *page;
1363
1364                         sblock = sblocks_for_recheck + mirror_index;
1365                         sblock->sctx = sctx;
1366
1367                         page = kzalloc(sizeof(*page), GFP_NOFS);
1368                         if (!page) {
1369 leave_nomem:
1370                                 spin_lock(&sctx->stat_lock);
1371                                 sctx->stat.malloc_errors++;
1372                                 spin_unlock(&sctx->stat_lock);
1373                                 scrub_put_recover(fs_info, recover);
1374                                 return -ENOMEM;
1375                         }
1376                         scrub_page_get(page);
1377                         sblock->pagev[page_index] = page;
1378                         page->sblock = sblock;
1379                         page->flags = flags;
1380                         page->generation = generation;
1381                         page->logical = logical;
1382                         page->have_csum = have_csum;
1383                         if (have_csum)
1384                                 memcpy(page->csum,
1385                                        original_sblock->pagev[0]->csum,
1386                                        sctx->csum_size);
1387
1388                         scrub_stripe_index_and_offset(logical,
1389                                                       bbio->map_type,
1390                                                       bbio->raid_map,
1391                                                       mapped_length,
1392                                                       bbio->num_stripes -
1393                                                       bbio->num_tgtdevs,
1394                                                       mirror_index,
1395                                                       &stripe_index,
1396                                                       &stripe_offset);
1397                         page->physical = bbio->stripes[stripe_index].physical +
1398                                          stripe_offset;
1399                         page->dev = bbio->stripes[stripe_index].dev;
1400
1401                         BUG_ON(page_index >= original_sblock->page_count);
1402                         page->physical_for_dev_replace =
1403                                 original_sblock->pagev[page_index]->
1404                                 physical_for_dev_replace;
1405                         /* for missing devices, dev->bdev is NULL */
1406                         page->mirror_num = mirror_index + 1;
1407                         sblock->page_count++;
1408                         page->page = alloc_page(GFP_NOFS);
1409                         if (!page->page)
1410                                 goto leave_nomem;
1411
1412                         scrub_get_recover(recover);
1413                         page->recover = recover;
1414                 }
1415                 scrub_put_recover(fs_info, recover);
1416                 length -= sublen;
1417                 logical += sublen;
1418                 page_index++;
1419         }
1420
1421         return 0;
1422 }
1423
1424 struct scrub_bio_ret {
1425         struct completion event;
1426         int error;
1427 };
1428
1429 static void scrub_bio_wait_endio(struct bio *bio)
1430 {
1431         struct scrub_bio_ret *ret = bio->bi_private;
1432
1433         ret->error = bio->bi_error;
1434         complete(&ret->event);
1435 }
1436
1437 static inline int scrub_is_page_on_raid56(struct scrub_page *page)
1438 {
1439         return page->recover &&
1440                (page->recover->bbio->map_type & BTRFS_BLOCK_GROUP_RAID56_MASK);
1441 }
1442
1443 static int scrub_submit_raid56_bio_wait(struct btrfs_fs_info *fs_info,
1444                                         struct bio *bio,
1445                                         struct scrub_page *page)
1446 {
1447         struct scrub_bio_ret done;
1448         int ret;
1449
1450         init_completion(&done.event);
1451         done.error = 0;
1452         bio->bi_iter.bi_sector = page->logical >> 9;
1453         bio->bi_private = &done;
1454         bio->bi_end_io = scrub_bio_wait_endio;
1455
1456         ret = raid56_parity_recover(fs_info, bio, page->recover->bbio,
1457                                     page->recover->map_length,
1458                                     page->mirror_num, 0);
1459         if (ret)
1460                 return ret;
1461
1462         wait_for_completion(&done.event);
1463         if (done.error)
1464                 return -EIO;
1465
1466         return 0;
1467 }
1468
1469 /*
1470  * this function will check the on disk data for checksum errors, header
1471  * errors and read I/O errors. If any I/O errors happen, the exact pages
1472  * which are errored are marked as being bad. The goal is to enable scrub
1473  * to take those pages that are not errored from all the mirrors so that
1474  * the pages that are errored in the just handled mirror can be repaired.
1475  */
1476 static void scrub_recheck_block(struct btrfs_fs_info *fs_info,
1477                                 struct scrub_block *sblock,
1478                                 int retry_failed_mirror)
1479 {
1480         int page_num;
1481
1482         sblock->no_io_error_seen = 1;
1483
1484         for (page_num = 0; page_num < sblock->page_count; page_num++) {
1485                 struct bio *bio;
1486                 struct scrub_page *page = sblock->pagev[page_num];
1487
1488                 if (page->dev->bdev == NULL) {
1489                         page->io_error = 1;
1490                         sblock->no_io_error_seen = 0;
1491                         continue;
1492                 }
1493
1494                 WARN_ON(!page->page);
1495                 bio = btrfs_io_bio_alloc(GFP_NOFS, 1);
1496                 if (!bio) {
1497                         page->io_error = 1;
1498                         sblock->no_io_error_seen = 0;
1499                         continue;
1500                 }
1501                 bio->bi_bdev = page->dev->bdev;
1502
1503                 bio_add_page(bio, page->page, PAGE_SIZE, 0);
1504                 if (!retry_failed_mirror && scrub_is_page_on_raid56(page)) {
1505                         if (scrub_submit_raid56_bio_wait(fs_info, bio, page)) {
1506                                 page->io_error = 1;
1507                                 sblock->no_io_error_seen = 0;
1508                         }
1509                 } else {
1510                         bio->bi_iter.bi_sector = page->physical >> 9;
1511                         bio_set_op_attrs(bio, REQ_OP_READ, 0);
1512
1513                         if (btrfsic_submit_bio_wait(bio)) {
1514                                 page->io_error = 1;
1515                                 sblock->no_io_error_seen = 0;
1516                         }
1517                 }
1518
1519                 bio_put(bio);
1520         }
1521
1522         if (sblock->no_io_error_seen)
1523                 scrub_recheck_block_checksum(sblock);
1524 }
1525
1526 static inline int scrub_check_fsid(u8 fsid[],
1527                                    struct scrub_page *spage)
1528 {
1529         struct btrfs_fs_devices *fs_devices = spage->dev->fs_devices;
1530         int ret;
1531
1532         ret = memcmp(fsid, fs_devices->fsid, BTRFS_UUID_SIZE);
1533         return !ret;
1534 }
1535
1536 static void scrub_recheck_block_checksum(struct scrub_block *sblock)
1537 {
1538         sblock->header_error = 0;
1539         sblock->checksum_error = 0;
1540         sblock->generation_error = 0;
1541
1542         if (sblock->pagev[0]->flags & BTRFS_EXTENT_FLAG_DATA)
1543                 scrub_checksum_data(sblock);
1544         else
1545                 scrub_checksum_tree_block(sblock);
1546 }
1547
1548 static int scrub_repair_block_from_good_copy(struct scrub_block *sblock_bad,
1549                                              struct scrub_block *sblock_good)
1550 {
1551         int page_num;
1552         int ret = 0;
1553
1554         for (page_num = 0; page_num < sblock_bad->page_count; page_num++) {
1555                 int ret_sub;
1556
1557                 ret_sub = scrub_repair_page_from_good_copy(sblock_bad,
1558                                                            sblock_good,
1559                                                            page_num, 1);
1560                 if (ret_sub)
1561                         ret = ret_sub;
1562         }
1563
1564         return ret;
1565 }
1566
1567 static int scrub_repair_page_from_good_copy(struct scrub_block *sblock_bad,
1568                                             struct scrub_block *sblock_good,
1569                                             int page_num, int force_write)
1570 {
1571         struct scrub_page *page_bad = sblock_bad->pagev[page_num];
1572         struct scrub_page *page_good = sblock_good->pagev[page_num];
1573         struct btrfs_fs_info *fs_info = sblock_bad->sctx->fs_info;
1574
1575         BUG_ON(page_bad->page == NULL);
1576         BUG_ON(page_good->page == NULL);
1577         if (force_write || sblock_bad->header_error ||
1578             sblock_bad->checksum_error || page_bad->io_error) {
1579                 struct bio *bio;
1580                 int ret;
1581
1582                 if (!page_bad->dev->bdev) {
1583                         btrfs_warn_rl(fs_info,
1584                                 "scrub_repair_page_from_good_copy(bdev == NULL) is unexpected");
1585                         return -EIO;
1586                 }
1587
1588                 bio = btrfs_io_bio_alloc(GFP_NOFS, 1);
1589                 if (!bio)
1590                         return -EIO;
1591                 bio->bi_bdev = page_bad->dev->bdev;
1592                 bio->bi_iter.bi_sector = page_bad->physical >> 9;
1593                 bio_set_op_attrs(bio, REQ_OP_WRITE, 0);
1594
1595                 ret = bio_add_page(bio, page_good->page, PAGE_SIZE, 0);
1596                 if (PAGE_SIZE != ret) {
1597                         bio_put(bio);
1598                         return -EIO;
1599                 }
1600
1601                 if (btrfsic_submit_bio_wait(bio)) {
1602                         btrfs_dev_stat_inc_and_print(page_bad->dev,
1603                                 BTRFS_DEV_STAT_WRITE_ERRS);
1604                         btrfs_dev_replace_stats_inc(
1605                                 &fs_info->dev_replace.num_write_errors);
1606                         bio_put(bio);
1607                         return -EIO;
1608                 }
1609                 bio_put(bio);
1610         }
1611
1612         return 0;
1613 }
1614
1615 static void scrub_write_block_to_dev_replace(struct scrub_block *sblock)
1616 {
1617         struct btrfs_fs_info *fs_info = sblock->sctx->fs_info;
1618         int page_num;
1619
1620         /*
1621          * This block is used for the check of the parity on the source device,
1622          * so the data needn't be written into the destination device.
1623          */
1624         if (sblock->sparity)
1625                 return;
1626
1627         for (page_num = 0; page_num < sblock->page_count; page_num++) {
1628                 int ret;
1629
1630                 ret = scrub_write_page_to_dev_replace(sblock, page_num);
1631                 if (ret)
1632                         btrfs_dev_replace_stats_inc(
1633                                 &fs_info->dev_replace.num_write_errors);
1634         }
1635 }
1636
1637 static int scrub_write_page_to_dev_replace(struct scrub_block *sblock,
1638                                            int page_num)
1639 {
1640         struct scrub_page *spage = sblock->pagev[page_num];
1641
1642         BUG_ON(spage->page == NULL);
1643         if (spage->io_error) {
1644                 void *mapped_buffer = kmap_atomic(spage->page);
1645
1646                 clear_page(mapped_buffer);
1647                 flush_dcache_page(spage->page);
1648                 kunmap_atomic(mapped_buffer);
1649         }
1650         return scrub_add_page_to_wr_bio(sblock->sctx, spage);
1651 }
1652
1653 static int scrub_add_page_to_wr_bio(struct scrub_ctx *sctx,
1654                                     struct scrub_page *spage)
1655 {
1656         struct scrub_wr_ctx *wr_ctx = &sctx->wr_ctx;
1657         struct scrub_bio *sbio;
1658         int ret;
1659
1660         mutex_lock(&wr_ctx->wr_lock);
1661 again:
1662         if (!wr_ctx->wr_curr_bio) {
1663                 wr_ctx->wr_curr_bio = kzalloc(sizeof(*wr_ctx->wr_curr_bio),
1664                                               GFP_KERNEL);
1665                 if (!wr_ctx->wr_curr_bio) {
1666                         mutex_unlock(&wr_ctx->wr_lock);
1667                         return -ENOMEM;
1668                 }
1669                 wr_ctx->wr_curr_bio->sctx = sctx;
1670                 wr_ctx->wr_curr_bio->page_count = 0;
1671         }
1672         sbio = wr_ctx->wr_curr_bio;
1673         if (sbio->page_count == 0) {
1674                 struct bio *bio;
1675
1676                 sbio->physical = spage->physical_for_dev_replace;
1677                 sbio->logical = spage->logical;
1678                 sbio->dev = wr_ctx->tgtdev;
1679                 bio = sbio->bio;
1680                 if (!bio) {
1681                         bio = btrfs_io_bio_alloc(GFP_KERNEL,
1682                                         wr_ctx->pages_per_wr_bio);
1683                         if (!bio) {
1684                                 mutex_unlock(&wr_ctx->wr_lock);
1685                                 return -ENOMEM;
1686                         }
1687                         sbio->bio = bio;
1688                 }
1689
1690                 bio->bi_private = sbio;
1691                 bio->bi_end_io = scrub_wr_bio_end_io;
1692                 bio->bi_bdev = sbio->dev->bdev;
1693                 bio->bi_iter.bi_sector = sbio->physical >> 9;
1694                 bio_set_op_attrs(bio, REQ_OP_WRITE, 0);
1695                 sbio->err = 0;
1696         } else if (sbio->physical + sbio->page_count * PAGE_SIZE !=
1697                    spage->physical_for_dev_replace ||
1698                    sbio->logical + sbio->page_count * PAGE_SIZE !=
1699                    spage->logical) {
1700                 scrub_wr_submit(sctx);
1701                 goto again;
1702         }
1703
1704         ret = bio_add_page(sbio->bio, spage->page, PAGE_SIZE, 0);
1705         if (ret != PAGE_SIZE) {
1706                 if (sbio->page_count < 1) {
1707                         bio_put(sbio->bio);
1708                         sbio->bio = NULL;
1709                         mutex_unlock(&wr_ctx->wr_lock);
1710                         return -EIO;
1711                 }
1712                 scrub_wr_submit(sctx);
1713                 goto again;
1714         }
1715
1716         sbio->pagev[sbio->page_count] = spage;
1717         scrub_page_get(spage);
1718         sbio->page_count++;
1719         if (sbio->page_count == wr_ctx->pages_per_wr_bio)
1720                 scrub_wr_submit(sctx);
1721         mutex_unlock(&wr_ctx->wr_lock);
1722
1723         return 0;
1724 }
1725
1726 static void scrub_wr_submit(struct scrub_ctx *sctx)
1727 {
1728         struct scrub_wr_ctx *wr_ctx = &sctx->wr_ctx;
1729         struct scrub_bio *sbio;
1730
1731         if (!wr_ctx->wr_curr_bio)
1732                 return;
1733
1734         sbio = wr_ctx->wr_curr_bio;
1735         wr_ctx->wr_curr_bio = NULL;
1736         WARN_ON(!sbio->bio->bi_bdev);
1737         scrub_pending_bio_inc(sctx);
1738         /* process all writes in a single worker thread. Then the block layer
1739          * orders the requests before sending them to the driver which
1740          * doubled the write performance on spinning disks when measured
1741          * with Linux 3.5 */
1742         btrfsic_submit_bio(sbio->bio);
1743 }
1744
1745 static void scrub_wr_bio_end_io(struct bio *bio)
1746 {
1747         struct scrub_bio *sbio = bio->bi_private;
1748         struct btrfs_fs_info *fs_info = sbio->dev->fs_info;
1749
1750         sbio->err = bio->bi_error;
1751         sbio->bio = bio;
1752
1753         btrfs_init_work(&sbio->work, btrfs_scrubwrc_helper,
1754                          scrub_wr_bio_end_io_worker, NULL, NULL);
1755         btrfs_queue_work(fs_info->scrub_wr_completion_workers, &sbio->work);
1756 }
1757
1758 static void scrub_wr_bio_end_io_worker(struct btrfs_work *work)
1759 {
1760         struct scrub_bio *sbio = container_of(work, struct scrub_bio, work);
1761         struct scrub_ctx *sctx = sbio->sctx;
1762         int i;
1763
1764         WARN_ON(sbio->page_count > SCRUB_PAGES_PER_WR_BIO);
1765         if (sbio->err) {
1766                 struct btrfs_dev_replace *dev_replace =
1767                         &sbio->sctx->fs_info->dev_replace;
1768
1769                 for (i = 0; i < sbio->page_count; i++) {
1770                         struct scrub_page *spage = sbio->pagev[i];
1771
1772                         spage->io_error = 1;
1773                         btrfs_dev_replace_stats_inc(&dev_replace->
1774                                                     num_write_errors);
1775                 }
1776         }
1777
1778         for (i = 0; i < sbio->page_count; i++)
1779                 scrub_page_put(sbio->pagev[i]);
1780
1781         bio_put(sbio->bio);
1782         kfree(sbio);
1783         scrub_pending_bio_dec(sctx);
1784 }
1785
1786 static int scrub_checksum(struct scrub_block *sblock)
1787 {
1788         u64 flags;
1789         int ret;
1790
1791         /*
1792          * No need to initialize these stats currently,
1793          * because this function only use return value
1794          * instead of these stats value.
1795          *
1796          * Todo:
1797          * always use stats
1798          */
1799         sblock->header_error = 0;
1800         sblock->generation_error = 0;
1801         sblock->checksum_error = 0;
1802
1803         WARN_ON(sblock->page_count < 1);
1804         flags = sblock->pagev[0]->flags;
1805         ret = 0;
1806         if (flags & BTRFS_EXTENT_FLAG_DATA)
1807                 ret = scrub_checksum_data(sblock);
1808         else if (flags & BTRFS_EXTENT_FLAG_TREE_BLOCK)
1809                 ret = scrub_checksum_tree_block(sblock);
1810         else if (flags & BTRFS_EXTENT_FLAG_SUPER)
1811                 (void)scrub_checksum_super(sblock);
1812         else
1813                 WARN_ON(1);
1814         if (ret)
1815                 scrub_handle_errored_block(sblock);
1816
1817         return ret;
1818 }
1819
1820 static int scrub_checksum_data(struct scrub_block *sblock)
1821 {
1822         struct scrub_ctx *sctx = sblock->sctx;
1823         u8 csum[BTRFS_CSUM_SIZE];
1824         u8 *on_disk_csum;
1825         struct page *page;
1826         void *buffer;
1827         u32 crc = ~(u32)0;
1828         u64 len;
1829         int index;
1830
1831         BUG_ON(sblock->page_count < 1);
1832         if (!sblock->pagev[0]->have_csum)
1833                 return 0;
1834
1835         on_disk_csum = sblock->pagev[0]->csum;
1836         page = sblock->pagev[0]->page;
1837         buffer = kmap_atomic(page);
1838
1839         len = sctx->sectorsize;
1840         index = 0;
1841         for (;;) {
1842                 u64 l = min_t(u64, len, PAGE_SIZE);
1843
1844                 crc = btrfs_csum_data(buffer, crc, l);
1845                 kunmap_atomic(buffer);
1846                 len -= l;
1847                 if (len == 0)
1848                         break;
1849                 index++;
1850                 BUG_ON(index >= sblock->page_count);
1851                 BUG_ON(!sblock->pagev[index]->page);
1852                 page = sblock->pagev[index]->page;
1853                 buffer = kmap_atomic(page);
1854         }
1855
1856         btrfs_csum_final(crc, csum);
1857         if (memcmp(csum, on_disk_csum, sctx->csum_size))
1858                 sblock->checksum_error = 1;
1859
1860         return sblock->checksum_error;
1861 }
1862
1863 static int scrub_checksum_tree_block(struct scrub_block *sblock)
1864 {
1865         struct scrub_ctx *sctx = sblock->sctx;
1866         struct btrfs_header *h;
1867         struct btrfs_fs_info *fs_info = sctx->fs_info;
1868         u8 calculated_csum[BTRFS_CSUM_SIZE];
1869         u8 on_disk_csum[BTRFS_CSUM_SIZE];
1870         struct page *page;
1871         void *mapped_buffer;
1872         u64 mapped_size;
1873         void *p;
1874         u32 crc = ~(u32)0;
1875         u64 len;
1876         int index;
1877
1878         BUG_ON(sblock->page_count < 1);
1879         page = sblock->pagev[0]->page;
1880         mapped_buffer = kmap_atomic(page);
1881         h = (struct btrfs_header *)mapped_buffer;
1882         memcpy(on_disk_csum, h->csum, sctx->csum_size);
1883
1884         /*
1885          * we don't use the getter functions here, as we
1886          * a) don't have an extent buffer and
1887          * b) the page is already kmapped
1888          */
1889         if (sblock->pagev[0]->logical != btrfs_stack_header_bytenr(h))
1890                 sblock->header_error = 1;
1891
1892         if (sblock->pagev[0]->generation != btrfs_stack_header_generation(h)) {
1893                 sblock->header_error = 1;
1894                 sblock->generation_error = 1;
1895         }
1896
1897         if (!scrub_check_fsid(h->fsid, sblock->pagev[0]))
1898                 sblock->header_error = 1;
1899
1900         if (memcmp(h->chunk_tree_uuid, fs_info->chunk_tree_uuid,
1901                    BTRFS_UUID_SIZE))
1902                 sblock->header_error = 1;
1903
1904         len = sctx->nodesize - BTRFS_CSUM_SIZE;
1905         mapped_size = PAGE_SIZE - BTRFS_CSUM_SIZE;
1906         p = ((u8 *)mapped_buffer) + BTRFS_CSUM_SIZE;
1907         index = 0;
1908         for (;;) {
1909                 u64 l = min_t(u64, len, mapped_size);
1910
1911                 crc = btrfs_csum_data(p, crc, l);
1912                 kunmap_atomic(mapped_buffer);
1913                 len -= l;
1914                 if (len == 0)
1915                         break;
1916                 index++;
1917                 BUG_ON(index >= sblock->page_count);
1918                 BUG_ON(!sblock->pagev[index]->page);
1919                 page = sblock->pagev[index]->page;
1920                 mapped_buffer = kmap_atomic(page);
1921                 mapped_size = PAGE_SIZE;
1922                 p = mapped_buffer;
1923         }
1924
1925         btrfs_csum_final(crc, calculated_csum);
1926         if (memcmp(calculated_csum, on_disk_csum, sctx->csum_size))
1927                 sblock->checksum_error = 1;
1928
1929         return sblock->header_error || sblock->checksum_error;
1930 }
1931
1932 static int scrub_checksum_super(struct scrub_block *sblock)
1933 {
1934         struct btrfs_super_block *s;
1935         struct scrub_ctx *sctx = sblock->sctx;
1936         u8 calculated_csum[BTRFS_CSUM_SIZE];
1937         u8 on_disk_csum[BTRFS_CSUM_SIZE];
1938         struct page *page;
1939         void *mapped_buffer;
1940         u64 mapped_size;
1941         void *p;
1942         u32 crc = ~(u32)0;
1943         int fail_gen = 0;
1944         int fail_cor = 0;
1945         u64 len;
1946         int index;
1947
1948         BUG_ON(sblock->page_count < 1);
1949         page = sblock->pagev[0]->page;
1950         mapped_buffer = kmap_atomic(page);
1951         s = (struct btrfs_super_block *)mapped_buffer;
1952         memcpy(on_disk_csum, s->csum, sctx->csum_size);
1953
1954         if (sblock->pagev[0]->logical != btrfs_super_bytenr(s))
1955                 ++fail_cor;
1956
1957         if (sblock->pagev[0]->generation != btrfs_super_generation(s))
1958                 ++fail_gen;
1959
1960         if (!scrub_check_fsid(s->fsid, sblock->pagev[0]))
1961                 ++fail_cor;
1962
1963         len = BTRFS_SUPER_INFO_SIZE - BTRFS_CSUM_SIZE;
1964         mapped_size = PAGE_SIZE - BTRFS_CSUM_SIZE;
1965         p = ((u8 *)mapped_buffer) + BTRFS_CSUM_SIZE;
1966         index = 0;
1967         for (;;) {
1968                 u64 l = min_t(u64, len, mapped_size);
1969
1970                 crc = btrfs_csum_data(p, crc, l);
1971                 kunmap_atomic(mapped_buffer);
1972                 len -= l;
1973                 if (len == 0)
1974                         break;
1975                 index++;
1976                 BUG_ON(index >= sblock->page_count);
1977                 BUG_ON(!sblock->pagev[index]->page);
1978                 page = sblock->pagev[index]->page;
1979                 mapped_buffer = kmap_atomic(page);
1980                 mapped_size = PAGE_SIZE;
1981                 p = mapped_buffer;
1982         }
1983
1984         btrfs_csum_final(crc, calculated_csum);
1985         if (memcmp(calculated_csum, on_disk_csum, sctx->csum_size))
1986                 ++fail_cor;
1987
1988         if (fail_cor + fail_gen) {
1989                 /*
1990                  * if we find an error in a super block, we just report it.
1991                  * They will get written with the next transaction commit
1992                  * anyway
1993                  */
1994                 spin_lock(&sctx->stat_lock);
1995                 ++sctx->stat.super_errors;
1996                 spin_unlock(&sctx->stat_lock);
1997                 if (fail_cor)
1998                         btrfs_dev_stat_inc_and_print(sblock->pagev[0]->dev,
1999                                 BTRFS_DEV_STAT_CORRUPTION_ERRS);
2000                 else
2001                         btrfs_dev_stat_inc_and_print(sblock->pagev[0]->dev,
2002                                 BTRFS_DEV_STAT_GENERATION_ERRS);
2003         }
2004
2005         return fail_cor + fail_gen;
2006 }
2007
2008 static void scrub_block_get(struct scrub_block *sblock)
2009 {
2010         refcount_inc(&sblock->refs);
2011 }
2012
2013 static void scrub_block_put(struct scrub_block *sblock)
2014 {
2015         if (refcount_dec_and_test(&sblock->refs)) {
2016                 int i;
2017
2018                 if (sblock->sparity)
2019                         scrub_parity_put(sblock->sparity);
2020
2021                 for (i = 0; i < sblock->page_count; i++)
2022                         scrub_page_put(sblock->pagev[i]);
2023                 kfree(sblock);
2024         }
2025 }
2026
2027 static void scrub_page_get(struct scrub_page *spage)
2028 {
2029         atomic_inc(&spage->refs);
2030 }
2031
2032 static void scrub_page_put(struct scrub_page *spage)
2033 {
2034         if (atomic_dec_and_test(&spage->refs)) {
2035                 if (spage->page)
2036                         __free_page(spage->page);
2037                 kfree(spage);
2038         }
2039 }
2040
2041 static void scrub_submit(struct scrub_ctx *sctx)
2042 {
2043         struct scrub_bio *sbio;
2044
2045         if (sctx->curr == -1)
2046                 return;
2047
2048         sbio = sctx->bios[sctx->curr];
2049         sctx->curr = -1;
2050         scrub_pending_bio_inc(sctx);
2051         btrfsic_submit_bio(sbio->bio);
2052 }
2053
2054 static int scrub_add_page_to_rd_bio(struct scrub_ctx *sctx,
2055                                     struct scrub_page *spage)
2056 {
2057         struct scrub_block *sblock = spage->sblock;
2058         struct scrub_bio *sbio;
2059         int ret;
2060
2061 again:
2062         /*
2063          * grab a fresh bio or wait for one to become available
2064          */
2065         while (sctx->curr == -1) {
2066                 spin_lock(&sctx->list_lock);
2067                 sctx->curr = sctx->first_free;
2068                 if (sctx->curr != -1) {
2069                         sctx->first_free = sctx->bios[sctx->curr]->next_free;
2070                         sctx->bios[sctx->curr]->next_free = -1;
2071                         sctx->bios[sctx->curr]->page_count = 0;
2072                         spin_unlock(&sctx->list_lock);
2073                 } else {
2074                         spin_unlock(&sctx->list_lock);
2075                         wait_event(sctx->list_wait, sctx->first_free != -1);
2076                 }
2077         }
2078         sbio = sctx->bios[sctx->curr];
2079         if (sbio->page_count == 0) {
2080                 struct bio *bio;
2081
2082                 sbio->physical = spage->physical;
2083                 sbio->logical = spage->logical;
2084                 sbio->dev = spage->dev;
2085                 bio = sbio->bio;
2086                 if (!bio) {
2087                         bio = btrfs_io_bio_alloc(GFP_KERNEL,
2088                                         sctx->pages_per_rd_bio);
2089                         if (!bio)
2090                                 return -ENOMEM;
2091                         sbio->bio = bio;
2092                 }
2093
2094                 bio->bi_private = sbio;
2095                 bio->bi_end_io = scrub_bio_end_io;
2096                 bio->bi_bdev = sbio->dev->bdev;
2097                 bio->bi_iter.bi_sector = sbio->physical >> 9;
2098                 bio_set_op_attrs(bio, REQ_OP_READ, 0);
2099                 sbio->err = 0;
2100         } else if (sbio->physical + sbio->page_count * PAGE_SIZE !=
2101                    spage->physical ||
2102                    sbio->logical + sbio->page_count * PAGE_SIZE !=
2103                    spage->logical ||
2104                    sbio->dev != spage->dev) {
2105                 scrub_submit(sctx);
2106                 goto again;
2107         }
2108
2109         sbio->pagev[sbio->page_count] = spage;
2110         ret = bio_add_page(sbio->bio, spage->page, PAGE_SIZE, 0);
2111         if (ret != PAGE_SIZE) {
2112                 if (sbio->page_count < 1) {
2113                         bio_put(sbio->bio);
2114                         sbio->bio = NULL;
2115                         return -EIO;
2116                 }
2117                 scrub_submit(sctx);
2118                 goto again;
2119         }
2120
2121         scrub_block_get(sblock); /* one for the page added to the bio */
2122         atomic_inc(&sblock->outstanding_pages);
2123         sbio->page_count++;
2124         if (sbio->page_count == sctx->pages_per_rd_bio)
2125                 scrub_submit(sctx);
2126
2127         return 0;
2128 }
2129
2130 static void scrub_missing_raid56_end_io(struct bio *bio)
2131 {
2132         struct scrub_block *sblock = bio->bi_private;
2133         struct btrfs_fs_info *fs_info = sblock->sctx->fs_info;
2134
2135         if (bio->bi_error)
2136                 sblock->no_io_error_seen = 0;
2137
2138         bio_put(bio);
2139
2140         btrfs_queue_work(fs_info->scrub_workers, &sblock->work);
2141 }
2142
2143 static void scrub_missing_raid56_worker(struct btrfs_work *work)
2144 {
2145         struct scrub_block *sblock = container_of(work, struct scrub_block, work);
2146         struct scrub_ctx *sctx = sblock->sctx;
2147         struct btrfs_fs_info *fs_info = sctx->fs_info;
2148         u64 logical;
2149         struct btrfs_device *dev;
2150
2151         logical = sblock->pagev[0]->logical;
2152         dev = sblock->pagev[0]->dev;
2153
2154         if (sblock->no_io_error_seen)
2155                 scrub_recheck_block_checksum(sblock);
2156
2157         if (!sblock->no_io_error_seen) {
2158                 spin_lock(&sctx->stat_lock);
2159                 sctx->stat.read_errors++;
2160                 spin_unlock(&sctx->stat_lock);
2161                 btrfs_err_rl_in_rcu(fs_info,
2162                         "IO error rebuilding logical %llu for dev %s",
2163                         logical, rcu_str_deref(dev->name));
2164         } else if (sblock->header_error || sblock->checksum_error) {
2165                 spin_lock(&sctx->stat_lock);
2166                 sctx->stat.uncorrectable_errors++;
2167                 spin_unlock(&sctx->stat_lock);
2168                 btrfs_err_rl_in_rcu(fs_info,
2169                         "failed to rebuild valid logical %llu for dev %s",
2170                         logical, rcu_str_deref(dev->name));
2171         } else {
2172                 scrub_write_block_to_dev_replace(sblock);
2173         }
2174
2175         scrub_block_put(sblock);
2176
2177         if (sctx->is_dev_replace &&
2178             atomic_read(&sctx->wr_ctx.flush_all_writes)) {
2179                 mutex_lock(&sctx->wr_ctx.wr_lock);
2180                 scrub_wr_submit(sctx);
2181                 mutex_unlock(&sctx->wr_ctx.wr_lock);
2182         }
2183
2184         scrub_pending_bio_dec(sctx);
2185 }
2186
2187 static void scrub_missing_raid56_pages(struct scrub_block *sblock)
2188 {
2189         struct scrub_ctx *sctx = sblock->sctx;
2190         struct btrfs_fs_info *fs_info = sctx->fs_info;
2191         u64 length = sblock->page_count * PAGE_SIZE;
2192         u64 logical = sblock->pagev[0]->logical;
2193         struct btrfs_bio *bbio = NULL;
2194         struct bio *bio;
2195         struct btrfs_raid_bio *rbio;
2196         int ret;
2197         int i;
2198
2199         btrfs_bio_counter_inc_blocked(fs_info);
2200         ret = btrfs_map_sblock(fs_info, BTRFS_MAP_GET_READ_MIRRORS, logical,
2201                         &length, &bbio);
2202         if (ret || !bbio || !bbio->raid_map)
2203                 goto bbio_out;
2204
2205         if (WARN_ON(!sctx->is_dev_replace ||
2206                     !(bbio->map_type & BTRFS_BLOCK_GROUP_RAID56_MASK))) {
2207                 /*
2208                  * We shouldn't be scrubbing a missing device. Even for dev
2209                  * replace, we should only get here for RAID 5/6. We either
2210                  * managed to mount something with no mirrors remaining or
2211                  * there's a bug in scrub_remap_extent()/btrfs_map_block().
2212                  */
2213                 goto bbio_out;
2214         }
2215
2216         bio = btrfs_io_bio_alloc(GFP_NOFS, 0);
2217         if (!bio)
2218                 goto bbio_out;
2219
2220         bio->bi_iter.bi_sector = logical >> 9;
2221         bio->bi_private = sblock;
2222         bio->bi_end_io = scrub_missing_raid56_end_io;
2223
2224         rbio = raid56_alloc_missing_rbio(fs_info, bio, bbio, length);
2225         if (!rbio)
2226                 goto rbio_out;
2227
2228         for (i = 0; i < sblock->page_count; i++) {
2229                 struct scrub_page *spage = sblock->pagev[i];
2230
2231                 raid56_add_scrub_pages(rbio, spage->page, spage->logical);
2232         }
2233
2234         btrfs_init_work(&sblock->work, btrfs_scrub_helper,
2235                         scrub_missing_raid56_worker, NULL, NULL);
2236         scrub_block_get(sblock);
2237         scrub_pending_bio_inc(sctx);
2238         raid56_submit_missing_rbio(rbio);
2239         return;
2240
2241 rbio_out:
2242         bio_put(bio);
2243 bbio_out:
2244         btrfs_bio_counter_dec(fs_info);
2245         btrfs_put_bbio(bbio);
2246         spin_lock(&sctx->stat_lock);
2247         sctx->stat.malloc_errors++;
2248         spin_unlock(&sctx->stat_lock);
2249 }
2250
2251 static int scrub_pages(struct scrub_ctx *sctx, u64 logical, u64 len,
2252                        u64 physical, struct btrfs_device *dev, u64 flags,
2253                        u64 gen, int mirror_num, u8 *csum, int force,
2254                        u64 physical_for_dev_replace)
2255 {
2256         struct scrub_block *sblock;
2257         int index;
2258
2259         sblock = kzalloc(sizeof(*sblock), GFP_KERNEL);
2260         if (!sblock) {
2261                 spin_lock(&sctx->stat_lock);
2262                 sctx->stat.malloc_errors++;
2263                 spin_unlock(&sctx->stat_lock);
2264                 return -ENOMEM;
2265         }
2266
2267         /* one ref inside this function, plus one for each page added to
2268          * a bio later on */
2269         refcount_set(&sblock->refs, 1);
2270         sblock->sctx = sctx;
2271         sblock->no_io_error_seen = 1;
2272
2273         for (index = 0; len > 0; index++) {
2274                 struct scrub_page *spage;
2275                 u64 l = min_t(u64, len, PAGE_SIZE);
2276
2277                 spage = kzalloc(sizeof(*spage), GFP_KERNEL);
2278                 if (!spage) {
2279 leave_nomem:
2280                         spin_lock(&sctx->stat_lock);
2281                         sctx->stat.malloc_errors++;
2282                         spin_unlock(&sctx->stat_lock);
2283                         scrub_block_put(sblock);
2284                         return -ENOMEM;
2285                 }
2286                 BUG_ON(index >= SCRUB_MAX_PAGES_PER_BLOCK);
2287                 scrub_page_get(spage);
2288                 sblock->pagev[index] = spage;
2289                 spage->sblock = sblock;
2290                 spage->dev = dev;
2291                 spage->flags = flags;
2292                 spage->generation = gen;
2293                 spage->logical = logical;
2294                 spage->physical = physical;
2295                 spage->physical_for_dev_replace = physical_for_dev_replace;
2296                 spage->mirror_num = mirror_num;
2297                 if (csum) {
2298                         spage->have_csum = 1;
2299                         memcpy(spage->csum, csum, sctx->csum_size);
2300                 } else {
2301                         spage->have_csum = 0;
2302                 }
2303                 sblock->page_count++;
2304                 spage->page = alloc_page(GFP_KERNEL);
2305                 if (!spage->page)
2306                         goto leave_nomem;
2307                 len -= l;
2308                 logical += l;
2309                 physical += l;
2310                 physical_for_dev_replace += l;
2311         }
2312
2313         WARN_ON(sblock->page_count == 0);
2314         if (dev->missing) {
2315                 /*
2316                  * This case should only be hit for RAID 5/6 device replace. See
2317                  * the comment in scrub_missing_raid56_pages() for details.
2318                  */
2319                 scrub_missing_raid56_pages(sblock);
2320         } else {
2321                 for (index = 0; index < sblock->page_count; index++) {
2322                         struct scrub_page *spage = sblock->pagev[index];
2323                         int ret;
2324
2325                         ret = scrub_add_page_to_rd_bio(sctx, spage);
2326                         if (ret) {
2327                                 scrub_block_put(sblock);
2328                                 return ret;
2329                         }
2330                 }
2331
2332                 if (force)
2333                         scrub_submit(sctx);
2334         }
2335
2336         /* last one frees, either here or in bio completion for last page */
2337         scrub_block_put(sblock);
2338         return 0;
2339 }
2340
2341 static void scrub_bio_end_io(struct bio *bio)
2342 {
2343         struct scrub_bio *sbio = bio->bi_private;
2344         struct btrfs_fs_info *fs_info = sbio->dev->fs_info;
2345
2346         sbio->err = bio->bi_error;
2347         sbio->bio = bio;
2348
2349         btrfs_queue_work(fs_info->scrub_workers, &sbio->work);
2350 }
2351
2352 static void scrub_bio_end_io_worker(struct btrfs_work *work)
2353 {
2354         struct scrub_bio *sbio = container_of(work, struct scrub_bio, work);
2355         struct scrub_ctx *sctx = sbio->sctx;
2356         int i;
2357
2358         BUG_ON(sbio->page_count > SCRUB_PAGES_PER_RD_BIO);
2359         if (sbio->err) {
2360                 for (i = 0; i < sbio->page_count; i++) {
2361                         struct scrub_page *spage = sbio->pagev[i];
2362
2363                         spage->io_error = 1;
2364                         spage->sblock->no_io_error_seen = 0;
2365                 }
2366         }
2367
2368         /* now complete the scrub_block items that have all pages completed */
2369         for (i = 0; i < sbio->page_count; i++) {
2370                 struct scrub_page *spage = sbio->pagev[i];
2371                 struct scrub_block *sblock = spage->sblock;
2372
2373                 if (atomic_dec_and_test(&sblock->outstanding_pages))
2374                         scrub_block_complete(sblock);
2375                 scrub_block_put(sblock);
2376         }
2377
2378         bio_put(sbio->bio);
2379         sbio->bio = NULL;
2380         spin_lock(&sctx->list_lock);
2381         sbio->next_free = sctx->first_free;
2382         sctx->first_free = sbio->index;
2383         spin_unlock(&sctx->list_lock);
2384
2385         if (sctx->is_dev_replace &&
2386             atomic_read(&sctx->wr_ctx.flush_all_writes)) {
2387                 mutex_lock(&sctx->wr_ctx.wr_lock);
2388                 scrub_wr_submit(sctx);
2389                 mutex_unlock(&sctx->wr_ctx.wr_lock);
2390         }
2391
2392         scrub_pending_bio_dec(sctx);
2393 }
2394
2395 static inline void __scrub_mark_bitmap(struct scrub_parity *sparity,
2396                                        unsigned long *bitmap,
2397                                        u64 start, u64 len)
2398 {
2399         u64 offset;
2400         int nsectors;
2401         int sectorsize = sparity->sctx->fs_info->sectorsize;
2402
2403         if (len >= sparity->stripe_len) {
2404                 bitmap_set(bitmap, 0, sparity->nsectors);
2405                 return;
2406         }
2407
2408         start -= sparity->logic_start;
2409         start = div64_u64_rem(start, sparity->stripe_len, &offset);
2410         offset = div_u64(offset, sectorsize);
2411         nsectors = (int)len / sectorsize;
2412
2413         if (offset + nsectors <= sparity->nsectors) {
2414                 bitmap_set(bitmap, offset, nsectors);
2415                 return;
2416         }
2417
2418         bitmap_set(bitmap, offset, sparity->nsectors - offset);
2419         bitmap_set(bitmap, 0, nsectors - (sparity->nsectors - offset));
2420 }
2421
2422 static inline void scrub_parity_mark_sectors_error(struct scrub_parity *sparity,
2423                                                    u64 start, u64 len)
2424 {
2425         __scrub_mark_bitmap(sparity, sparity->ebitmap, start, len);
2426 }
2427
2428 static inline void scrub_parity_mark_sectors_data(struct scrub_parity *sparity,
2429                                                   u64 start, u64 len)
2430 {
2431         __scrub_mark_bitmap(sparity, sparity->dbitmap, start, len);
2432 }
2433
2434 static void scrub_block_complete(struct scrub_block *sblock)
2435 {
2436         int corrupted = 0;
2437
2438         if (!sblock->no_io_error_seen) {
2439                 corrupted = 1;
2440                 scrub_handle_errored_block(sblock);
2441         } else {
2442                 /*
2443                  * if has checksum error, write via repair mechanism in
2444                  * dev replace case, otherwise write here in dev replace
2445                  * case.
2446                  */
2447                 corrupted = scrub_checksum(sblock);
2448                 if (!corrupted && sblock->sctx->is_dev_replace)
2449                         scrub_write_block_to_dev_replace(sblock);
2450         }
2451
2452         if (sblock->sparity && corrupted && !sblock->data_corrected) {
2453                 u64 start = sblock->pagev[0]->logical;
2454                 u64 end = sblock->pagev[sblock->page_count - 1]->logical +
2455                           PAGE_SIZE;
2456
2457                 scrub_parity_mark_sectors_error(sblock->sparity,
2458                                                 start, end - start);
2459         }
2460 }
2461
2462 static int scrub_find_csum(struct scrub_ctx *sctx, u64 logical, u8 *csum)
2463 {
2464         struct btrfs_ordered_sum *sum = NULL;
2465         unsigned long index;
2466         unsigned long num_sectors;
2467
2468         while (!list_empty(&sctx->csum_list)) {
2469                 sum = list_first_entry(&sctx->csum_list,
2470                                        struct btrfs_ordered_sum, list);
2471                 if (sum->bytenr > logical)
2472                         return 0;
2473                 if (sum->bytenr + sum->len > logical)
2474                         break;
2475
2476                 ++sctx->stat.csum_discards;
2477                 list_del(&sum->list);
2478                 kfree(sum);
2479                 sum = NULL;
2480         }
2481         if (!sum)
2482                 return 0;
2483
2484         index = ((u32)(logical - sum->bytenr)) / sctx->sectorsize;
2485         num_sectors = sum->len / sctx->sectorsize;
2486         memcpy(csum, sum->sums + index, sctx->csum_size);
2487         if (index == num_sectors - 1) {
2488                 list_del(&sum->list);
2489                 kfree(sum);
2490         }
2491         return 1;
2492 }
2493
2494 /* scrub extent tries to collect up to 64 kB for each bio */
2495 static int scrub_extent(struct scrub_ctx *sctx, u64 logical, u64 len,
2496                         u64 physical, struct btrfs_device *dev, u64 flags,
2497                         u64 gen, int mirror_num, u64 physical_for_dev_replace)
2498 {
2499         int ret;
2500         u8 csum[BTRFS_CSUM_SIZE];
2501         u32 blocksize;
2502
2503         if (flags & BTRFS_EXTENT_FLAG_DATA) {
2504                 blocksize = sctx->sectorsize;
2505                 spin_lock(&sctx->stat_lock);
2506                 sctx->stat.data_extents_scrubbed++;
2507                 sctx->stat.data_bytes_scrubbed += len;
2508                 spin_unlock(&sctx->stat_lock);
2509         } else if (flags & BTRFS_EXTENT_FLAG_TREE_BLOCK) {
2510                 blocksize = sctx->nodesize;
2511                 spin_lock(&sctx->stat_lock);
2512                 sctx->stat.tree_extents_scrubbed++;
2513                 sctx->stat.tree_bytes_scrubbed += len;
2514                 spin_unlock(&sctx->stat_lock);
2515         } else {
2516                 blocksize = sctx->sectorsize;
2517                 WARN_ON(1);
2518         }
2519
2520         while (len) {
2521                 u64 l = min_t(u64, len, blocksize);
2522                 int have_csum = 0;
2523
2524                 if (flags & BTRFS_EXTENT_FLAG_DATA) {
2525                         /* push csums to sbio */
2526                         have_csum = scrub_find_csum(sctx, logical, csum);
2527                         if (have_csum == 0)
2528                                 ++sctx->stat.no_csum;
2529                         if (sctx->is_dev_replace && !have_csum) {
2530                                 ret = copy_nocow_pages(sctx, logical, l,
2531                                                        mirror_num,
2532                                                       physical_for_dev_replace);
2533                                 goto behind_scrub_pages;
2534                         }
2535                 }
2536                 ret = scrub_pages(sctx, logical, l, physical, dev, flags, gen,
2537                                   mirror_num, have_csum ? csum : NULL, 0,
2538                                   physical_for_dev_replace);
2539 behind_scrub_pages:
2540                 if (ret)
2541                         return ret;
2542                 len -= l;
2543                 logical += l;
2544                 physical += l;
2545                 physical_for_dev_replace += l;
2546         }
2547         return 0;
2548 }
2549
2550 static int scrub_pages_for_parity(struct scrub_parity *sparity,
2551                                   u64 logical, u64 len,
2552                                   u64 physical, struct btrfs_device *dev,
2553                                   u64 flags, u64 gen, int mirror_num, u8 *csum)
2554 {
2555         struct scrub_ctx *sctx = sparity->sctx;
2556         struct scrub_block *sblock;
2557         int index;
2558
2559         sblock = kzalloc(sizeof(*sblock), GFP_KERNEL);
2560         if (!sblock) {
2561                 spin_lock(&sctx->stat_lock);
2562                 sctx->stat.malloc_errors++;
2563                 spin_unlock(&sctx->stat_lock);
2564                 return -ENOMEM;
2565         }
2566
2567         /* one ref inside this function, plus one for each page added to
2568          * a bio later on */
2569         refcount_set(&sblock->refs, 1);
2570         sblock->sctx = sctx;
2571         sblock->no_io_error_seen = 1;
2572         sblock->sparity = sparity;
2573         scrub_parity_get(sparity);
2574
2575         for (index = 0; len > 0; index++) {
2576                 struct scrub_page *spage;
2577                 u64 l = min_t(u64, len, PAGE_SIZE);
2578
2579                 spage = kzalloc(sizeof(*spage), GFP_KERNEL);
2580                 if (!spage) {
2581 leave_nomem:
2582                         spin_lock(&sctx->stat_lock);
2583                         sctx->stat.malloc_errors++;
2584                         spin_unlock(&sctx->stat_lock);
2585                         scrub_block_put(sblock);
2586                         return -ENOMEM;
2587                 }
2588                 BUG_ON(index >= SCRUB_MAX_PAGES_PER_BLOCK);
2589                 /* For scrub block */
2590                 scrub_page_get(spage);
2591                 sblock->pagev[index] = spage;
2592                 /* For scrub parity */
2593                 scrub_page_get(spage);
2594                 list_add_tail(&spage->list, &sparity->spages);
2595                 spage->sblock = sblock;
2596                 spage->dev = dev;
2597                 spage->flags = flags;
2598                 spage->generation = gen;
2599                 spage->logical = logical;
2600                 spage->physical = physical;
2601                 spage->mirror_num = mirror_num;
2602                 if (csum) {
2603                         spage->have_csum = 1;
2604                         memcpy(spage->csum, csum, sctx->csum_size);
2605                 } else {
2606                         spage->have_csum = 0;
2607                 }
2608                 sblock->page_count++;
2609                 spage->page = alloc_page(GFP_KERNEL);
2610                 if (!spage->page)
2611                         goto leave_nomem;
2612                 len -= l;
2613                 logical += l;
2614                 physical += l;
2615         }
2616
2617         WARN_ON(sblock->page_count == 0);
2618         for (index = 0; index < sblock->page_count; index++) {
2619                 struct scrub_page *spage = sblock->pagev[index];
2620                 int ret;
2621
2622                 ret = scrub_add_page_to_rd_bio(sctx, spage);
2623                 if (ret) {
2624                         scrub_block_put(sblock);
2625                         return ret;
2626                 }
2627         }
2628
2629         /* last one frees, either here or in bio completion for last page */
2630         scrub_block_put(sblock);
2631         return 0;
2632 }
2633
2634 static int scrub_extent_for_parity(struct scrub_parity *sparity,
2635                                    u64 logical, u64 len,
2636                                    u64 physical, struct btrfs_device *dev,
2637                                    u64 flags, u64 gen, int mirror_num)
2638 {
2639         struct scrub_ctx *sctx = sparity->sctx;
2640         int ret;
2641         u8 csum[BTRFS_CSUM_SIZE];
2642         u32 blocksize;
2643
2644         if (dev->missing) {
2645                 scrub_parity_mark_sectors_error(sparity, logical, len);
2646                 return 0;
2647         }
2648
2649         if (flags & BTRFS_EXTENT_FLAG_DATA) {
2650                 blocksize = sctx->sectorsize;
2651         } else if (flags & BTRFS_EXTENT_FLAG_TREE_BLOCK) {
2652                 blocksize = sctx->nodesize;
2653         } else {
2654                 blocksize = sctx->sectorsize;
2655                 WARN_ON(1);
2656         }
2657
2658         while (len) {
2659                 u64 l = min_t(u64, len, blocksize);
2660                 int have_csum = 0;
2661
2662                 if (flags & BTRFS_EXTENT_FLAG_DATA) {
2663                         /* push csums to sbio */
2664                         have_csum = scrub_find_csum(sctx, logical, csum);
2665                         if (have_csum == 0)
2666                                 goto skip;
2667                 }
2668                 ret = scrub_pages_for_parity(sparity, logical, l, physical, dev,
2669                                              flags, gen, mirror_num,
2670                                              have_csum ? csum : NULL);
2671                 if (ret)
2672                         return ret;
2673 skip:
2674                 len -= l;
2675                 logical += l;
2676                 physical += l;
2677         }
2678         return 0;
2679 }
2680
2681 /*
2682  * Given a physical address, this will calculate it's
2683  * logical offset. if this is a parity stripe, it will return
2684  * the most left data stripe's logical offset.
2685  *
2686  * return 0 if it is a data stripe, 1 means parity stripe.
2687  */
2688 static int get_raid56_logic_offset(u64 physical, int num,
2689                                    struct map_lookup *map, u64 *offset,
2690                                    u64 *stripe_start)
2691 {
2692         int i;
2693         int j = 0;
2694         u64 stripe_nr;
2695         u64 last_offset;
2696         u32 stripe_index;
2697         u32 rot;
2698
2699         last_offset = (physical - map->stripes[num].physical) *
2700                       nr_data_stripes(map);
2701         if (stripe_start)
2702                 *stripe_start = last_offset;
2703
2704         *offset = last_offset;
2705         for (i = 0; i < nr_data_stripes(map); i++) {
2706                 *offset = last_offset + i * map->stripe_len;
2707
2708                 stripe_nr = div_u64(*offset, map->stripe_len);
2709                 stripe_nr = div_u64(stripe_nr, nr_data_stripes(map));
2710
2711                 /* Work out the disk rotation on this stripe-set */
2712                 stripe_nr = div_u64_rem(stripe_nr, map->num_stripes, &rot);
2713                 /* calculate which stripe this data locates */
2714                 rot += i;
2715                 stripe_index = rot % map->num_stripes;
2716                 if (stripe_index == num)
2717                         return 0;
2718                 if (stripe_index < num)
2719                         j++;
2720         }
2721         *offset = last_offset + j * map->stripe_len;
2722         return 1;
2723 }
2724
2725 static void scrub_free_parity(struct scrub_parity *sparity)
2726 {
2727         struct scrub_ctx *sctx = sparity->sctx;
2728         struct scrub_page *curr, *next;
2729         int nbits;
2730
2731         nbits = bitmap_weight(sparity->ebitmap, sparity->nsectors);
2732         if (nbits) {
2733                 spin_lock(&sctx->stat_lock);
2734                 sctx->stat.read_errors += nbits;
2735                 sctx->stat.uncorrectable_errors += nbits;
2736                 spin_unlock(&sctx->stat_lock);
2737         }
2738
2739         list_for_each_entry_safe(curr, next, &sparity->spages, list) {
2740                 list_del_init(&curr->list);
2741                 scrub_page_put(curr);
2742         }
2743
2744         kfree(sparity);
2745 }
2746
2747 static void scrub_parity_bio_endio_worker(struct btrfs_work *work)
2748 {
2749         struct scrub_parity *sparity = container_of(work, struct scrub_parity,
2750                                                     work);
2751         struct scrub_ctx *sctx = sparity->sctx;
2752
2753         scrub_free_parity(sparity);
2754         scrub_pending_bio_dec(sctx);
2755 }
2756
2757 static void scrub_parity_bio_endio(struct bio *bio)
2758 {
2759         struct scrub_parity *sparity = (struct scrub_parity *)bio->bi_private;
2760         struct btrfs_fs_info *fs_info = sparity->sctx->fs_info;
2761
2762         if (bio->bi_error)
2763                 bitmap_or(sparity->ebitmap, sparity->ebitmap, sparity->dbitmap,
2764                           sparity->nsectors);
2765
2766         bio_put(bio);
2767
2768         btrfs_init_work(&sparity->work, btrfs_scrubparity_helper,
2769                         scrub_parity_bio_endio_worker, NULL, NULL);
2770         btrfs_queue_work(fs_info->scrub_parity_workers, &sparity->work);
2771 }
2772
2773 static void scrub_parity_check_and_repair(struct scrub_parity *sparity)
2774 {
2775         struct scrub_ctx *sctx = sparity->sctx;
2776         struct btrfs_fs_info *fs_info = sctx->fs_info;
2777         struct bio *bio;
2778         struct btrfs_raid_bio *rbio;
2779         struct btrfs_bio *bbio = NULL;
2780         u64 length;
2781         int ret;
2782
2783         if (!bitmap_andnot(sparity->dbitmap, sparity->dbitmap, sparity->ebitmap,
2784                            sparity->nsectors))
2785                 goto out;
2786
2787         length = sparity->logic_end - sparity->logic_start;
2788
2789         btrfs_bio_counter_inc_blocked(fs_info);
2790         ret = btrfs_map_sblock(fs_info, BTRFS_MAP_WRITE, sparity->logic_start,
2791                                &length, &bbio);
2792         if (ret || !bbio || !bbio->raid_map)
2793                 goto bbio_out;
2794
2795         bio = btrfs_io_bio_alloc(GFP_NOFS, 0);
2796         if (!bio)
2797                 goto bbio_out;
2798
2799         bio->bi_iter.bi_sector = sparity->logic_start >> 9;
2800         bio->bi_private = sparity;
2801         bio->bi_end_io = scrub_parity_bio_endio;
2802
2803         rbio = raid56_parity_alloc_scrub_rbio(fs_info, bio, bbio,
2804                                               length, sparity->scrub_dev,
2805                                               sparity->dbitmap,
2806                                               sparity->nsectors);
2807         if (!rbio)
2808                 goto rbio_out;
2809
2810         scrub_pending_bio_inc(sctx);
2811         raid56_parity_submit_scrub_rbio(rbio);
2812         return;
2813
2814 rbio_out:
2815         bio_put(bio);
2816 bbio_out:
2817         btrfs_bio_counter_dec(fs_info);
2818         btrfs_put_bbio(bbio);
2819         bitmap_or(sparity->ebitmap, sparity->ebitmap, sparity->dbitmap,
2820                   sparity->nsectors);
2821         spin_lock(&sctx->stat_lock);
2822         sctx->stat.malloc_errors++;
2823         spin_unlock(&sctx->stat_lock);
2824 out:
2825         scrub_free_parity(sparity);
2826 }
2827
2828 static inline int scrub_calc_parity_bitmap_len(int nsectors)
2829 {
2830         return DIV_ROUND_UP(nsectors, BITS_PER_LONG) * sizeof(long);
2831 }
2832
2833 static void scrub_parity_get(struct scrub_parity *sparity)
2834 {
2835         refcount_inc(&sparity->refs);
2836 }
2837
2838 static void scrub_parity_put(struct scrub_parity *sparity)
2839 {
2840         if (!refcount_dec_and_test(&sparity->refs))
2841                 return;
2842
2843         scrub_parity_check_and_repair(sparity);
2844 }
2845
2846 static noinline_for_stack int scrub_raid56_parity(struct scrub_ctx *sctx,
2847                                                   struct map_lookup *map,
2848                                                   struct btrfs_device *sdev,
2849                                                   struct btrfs_path *path,
2850                                                   u64 logic_start,
2851                                                   u64 logic_end)
2852 {
2853         struct btrfs_fs_info *fs_info = sctx->fs_info;
2854         struct btrfs_root *root = fs_info->extent_root;
2855         struct btrfs_root *csum_root = fs_info->csum_root;
2856         struct btrfs_extent_item *extent;
2857         struct btrfs_bio *bbio = NULL;
2858         u64 flags;
2859         int ret;
2860         int slot;
2861         struct extent_buffer *l;
2862         struct btrfs_key key;
2863         u64 generation;
2864         u64 extent_logical;
2865         u64 extent_physical;
2866         u64 extent_len;
2867         u64 mapped_length;
2868         struct btrfs_device *extent_dev;
2869         struct scrub_parity *sparity;
2870         int nsectors;
2871         int bitmap_len;
2872         int extent_mirror_num;
2873         int stop_loop = 0;
2874
2875         nsectors = div_u64(map->stripe_len, fs_info->sectorsize);
2876         bitmap_len = scrub_calc_parity_bitmap_len(nsectors);
2877         sparity = kzalloc(sizeof(struct scrub_parity) + 2 * bitmap_len,
2878                           GFP_NOFS);
2879         if (!sparity) {
2880                 spin_lock(&sctx->stat_lock);
2881                 sctx->stat.malloc_errors++;
2882                 spin_unlock(&sctx->stat_lock);
2883                 return -ENOMEM;
2884         }
2885
2886         sparity->stripe_len = map->stripe_len;
2887         sparity->nsectors = nsectors;
2888         sparity->sctx = sctx;
2889         sparity->scrub_dev = sdev;
2890         sparity->logic_start = logic_start;
2891         sparity->logic_end = logic_end;
2892         refcount_set(&sparity->refs, 1);
2893         INIT_LIST_HEAD(&sparity->spages);
2894         sparity->dbitmap = sparity->bitmap;
2895         sparity->ebitmap = (void *)sparity->bitmap + bitmap_len;
2896
2897         ret = 0;
2898         while (logic_start < logic_end) {
2899                 if (btrfs_fs_incompat(fs_info, SKINNY_METADATA))
2900                         key.type = BTRFS_METADATA_ITEM_KEY;
2901                 else
2902                         key.type = BTRFS_EXTENT_ITEM_KEY;
2903                 key.objectid = logic_start;
2904                 key.offset = (u64)-1;
2905
2906                 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
2907                 if (ret < 0)
2908                         goto out;
2909
2910                 if (ret > 0) {
2911                         ret = btrfs_previous_extent_item(root, path, 0);
2912                         if (ret < 0)
2913                                 goto out;
2914                         if (ret > 0) {
2915                                 btrfs_release_path(path);
2916                                 ret = btrfs_search_slot(NULL, root, &key,
2917                                                         path, 0, 0);
2918                                 if (ret < 0)
2919                                         goto out;
2920                         }
2921                 }
2922
2923                 stop_loop = 0;
2924                 while (1) {
2925                         u64 bytes;
2926
2927                         l = path->nodes[0];
2928                         slot = path->slots[0];
2929                         if (slot >= btrfs_header_nritems(l)) {
2930                                 ret = btrfs_next_leaf(root, path);
2931                                 if (ret == 0)
2932                                         continue;
2933                                 if (ret < 0)
2934                                         goto out;
2935
2936                                 stop_loop = 1;
2937                                 break;
2938                         }
2939                         btrfs_item_key_to_cpu(l, &key, slot);
2940
2941                         if (key.type != BTRFS_EXTENT_ITEM_KEY &&
2942                             key.type != BTRFS_METADATA_ITEM_KEY)
2943                                 goto next;
2944
2945                         if (key.type == BTRFS_METADATA_ITEM_KEY)
2946                                 bytes = fs_info->nodesize;
2947                         else
2948                                 bytes = key.offset;
2949
2950                         if (key.objectid + bytes <= logic_start)
2951                                 goto next;
2952
2953                         if (key.objectid >= logic_end) {
2954                                 stop_loop = 1;
2955                                 break;
2956                         }
2957
2958                         while (key.objectid >= logic_start + map->stripe_len)
2959                                 logic_start += map->stripe_len;
2960
2961                         extent = btrfs_item_ptr(l, slot,
2962                                                 struct btrfs_extent_item);
2963                         flags = btrfs_extent_flags(l, extent);
2964                         generation = btrfs_extent_generation(l, extent);
2965
2966                         if ((flags & BTRFS_EXTENT_FLAG_TREE_BLOCK) &&
2967                             (key.objectid < logic_start ||
2968                              key.objectid + bytes >
2969                              logic_start + map->stripe_len)) {
2970                                 btrfs_err(fs_info,
2971                                           "scrub: tree block %llu spanning stripes, ignored. logical=%llu",
2972                                           key.objectid, logic_start);
2973                                 spin_lock(&sctx->stat_lock);
2974                                 sctx->stat.uncorrectable_errors++;
2975                                 spin_unlock(&sctx->stat_lock);
2976                                 goto next;
2977                         }
2978 again:
2979                         extent_logical = key.objectid;
2980                         extent_len = bytes;
2981
2982                         if (extent_logical < logic_start) {
2983                                 extent_len -= logic_start - extent_logical;
2984                                 extent_logical = logic_start;
2985                         }
2986
2987                         if (extent_logical + extent_len >
2988                             logic_start + map->stripe_len)
2989                                 extent_len = logic_start + map->stripe_len -
2990                                              extent_logical;
2991
2992                         scrub_parity_mark_sectors_data(sparity, extent_logical,
2993                                                        extent_len);
2994
2995                         mapped_length = extent_len;
2996                         bbio = NULL;
2997                         ret = btrfs_map_block(fs_info, BTRFS_MAP_READ,
2998                                         extent_logical, &mapped_length, &bbio,
2999                                         0);
3000                         if (!ret) {
3001                                 if (!bbio || mapped_length < extent_len)
3002                                         ret = -EIO;
3003                         }
3004                         if (ret) {
3005                                 btrfs_put_bbio(bbio);
3006                                 goto out;
3007                         }
3008                         extent_physical = bbio->stripes[0].physical;
3009                         extent_mirror_num = bbio->mirror_num;
3010                         extent_dev = bbio->stripes[0].dev;
3011                         btrfs_put_bbio(bbio);
3012
3013                         ret = btrfs_lookup_csums_range(csum_root,
3014                                                 extent_logical,
3015                                                 extent_logical + extent_len - 1,
3016                                                 &sctx->csum_list, 1);
3017                         if (ret)
3018                                 goto out;
3019
3020                         ret = scrub_extent_for_parity(sparity, extent_logical,
3021                                                       extent_len,
3022                                                       extent_physical,
3023                                                       extent_dev, flags,
3024                                                       generation,
3025                                                       extent_mirror_num);
3026
3027                         scrub_free_csums(sctx);
3028
3029                         if (ret)
3030                                 goto out;
3031
3032                         if (extent_logical + extent_len <
3033                             key.objectid + bytes) {
3034                                 logic_start += map->stripe_len;
3035
3036                                 if (logic_start >= logic_end) {
3037                                         stop_loop = 1;
3038                                         break;
3039                                 }
3040
3041                                 if (logic_start < key.objectid + bytes) {
3042                                         cond_resched();
3043                                         goto again;
3044                                 }
3045                         }
3046 next:
3047                         path->slots[0]++;
3048                 }
3049
3050                 btrfs_release_path(path);
3051
3052                 if (stop_loop)
3053                         break;
3054
3055                 logic_start += map->stripe_len;
3056         }
3057 out:
3058         if (ret < 0)
3059                 scrub_parity_mark_sectors_error(sparity, logic_start,
3060                                                 logic_end - logic_start);
3061         scrub_parity_put(sparity);
3062         scrub_submit(sctx);
3063         mutex_lock(&sctx->wr_ctx.wr_lock);
3064         scrub_wr_submit(sctx);
3065         mutex_unlock(&sctx->wr_ctx.wr_lock);
3066
3067         btrfs_release_path(path);
3068         return ret < 0 ? ret : 0;
3069 }
3070
3071 static noinline_for_stack int scrub_stripe(struct scrub_ctx *sctx,
3072                                            struct map_lookup *map,
3073                                            struct btrfs_device *scrub_dev,
3074                                            int num, u64 base, u64 length,
3075                                            int is_dev_replace)
3076 {
3077         struct btrfs_path *path, *ppath;
3078         struct btrfs_fs_info *fs_info = sctx->fs_info;
3079         struct btrfs_root *root = fs_info->extent_root;
3080         struct btrfs_root *csum_root = fs_info->csum_root;
3081         struct btrfs_extent_item *extent;
3082         struct blk_plug plug;
3083         u64 flags;
3084         int ret;
3085         int slot;
3086         u64 nstripes;
3087         struct extent_buffer *l;
3088         u64 physical;
3089         u64 logical;
3090         u64 logic_end;
3091         u64 physical_end;
3092         u64 generation;
3093         int mirror_num;
3094         struct reada_control *reada1;
3095         struct reada_control *reada2;
3096         struct btrfs_key key;
3097         struct btrfs_key key_end;
3098         u64 increment = map->stripe_len;
3099         u64 offset;
3100         u64 extent_logical;
3101         u64 extent_physical;
3102         u64 extent_len;
3103         u64 stripe_logical;
3104         u64 stripe_end;
3105         struct btrfs_device *extent_dev;
3106         int extent_mirror_num;
3107         int stop_loop = 0;
3108
3109         physical = map->stripes[num].physical;
3110         offset = 0;
3111         nstripes = div_u64(length, map->stripe_len);
3112         if (map->type & BTRFS_BLOCK_GROUP_RAID0) {
3113                 offset = map->stripe_len * num;
3114                 increment = map->stripe_len * map->num_stripes;
3115                 mirror_num = 1;
3116         } else if (map->type & BTRFS_BLOCK_GROUP_RAID10) {
3117                 int factor = map->num_stripes / map->sub_stripes;
3118                 offset = map->stripe_len * (num / map->sub_stripes);
3119                 increment = map->stripe_len * factor;
3120                 mirror_num = num % map->sub_stripes + 1;
3121         } else if (map->type & BTRFS_BLOCK_GROUP_RAID1) {
3122                 increment = map->stripe_len;
3123                 mirror_num = num % map->num_stripes + 1;
3124         } else if (map->type & BTRFS_BLOCK_GROUP_DUP) {
3125                 increment = map->stripe_len;
3126                 mirror_num = num % map->num_stripes + 1;
3127         } else if (map->type & BTRFS_BLOCK_GROUP_RAID56_MASK) {
3128                 get_raid56_logic_offset(physical, num, map, &offset, NULL);
3129                 increment = map->stripe_len * nr_data_stripes(map);
3130                 mirror_num = 1;
3131         } else {
3132                 increment = map->stripe_len;
3133                 mirror_num = 1;
3134         }
3135
3136         path = btrfs_alloc_path();
3137         if (!path)
3138                 return -ENOMEM;
3139
3140         ppath = btrfs_alloc_path();
3141         if (!ppath) {
3142                 btrfs_free_path(path);
3143                 return -ENOMEM;
3144         }
3145
3146         /*
3147          * work on commit root. The related disk blocks are static as
3148          * long as COW is applied. This means, it is save to rewrite
3149          * them to repair disk errors without any race conditions
3150          */
3151         path->search_commit_root = 1;
3152         path->skip_locking = 1;
3153
3154         ppath->search_commit_root = 1;
3155         ppath->skip_locking = 1;
3156         /*
3157          * trigger the readahead for extent tree csum tree and wait for
3158          * completion. During readahead, the scrub is officially paused
3159          * to not hold off transaction commits
3160          */
3161         logical = base + offset;
3162         physical_end = physical + nstripes * map->stripe_len;
3163         if (map->type & BTRFS_BLOCK_GROUP_RAID56_MASK) {
3164                 get_raid56_logic_offset(physical_end, num,
3165                                         map, &logic_end, NULL);
3166                 logic_end += base;
3167         } else {
3168                 logic_end = logical + increment * nstripes;
3169         }
3170         wait_event(sctx->list_wait,
3171                    atomic_read(&sctx->bios_in_flight) == 0);
3172         scrub_blocked_if_needed(fs_info);
3173
3174         /* FIXME it might be better to start readahead at commit root */
3175         key.objectid = logical;
3176         key.type = BTRFS_EXTENT_ITEM_KEY;
3177         key.offset = (u64)0;
3178         key_end.objectid = logic_end;
3179         key_end.type = BTRFS_METADATA_ITEM_KEY;
3180         key_end.offset = (u64)-1;
3181         reada1 = btrfs_reada_add(root, &key, &key_end);
3182
3183         key.objectid = BTRFS_EXTENT_CSUM_OBJECTID;
3184         key.type = BTRFS_EXTENT_CSUM_KEY;
3185         key.offset = logical;
3186         key_end.objectid = BTRFS_EXTENT_CSUM_OBJECTID;
3187         key_end.type = BTRFS_EXTENT_CSUM_KEY;
3188         key_end.offset = logic_end;
3189         reada2 = btrfs_reada_add(csum_root, &key, &key_end);
3190
3191         if (!IS_ERR(reada1))
3192                 btrfs_reada_wait(reada1);
3193         if (!IS_ERR(reada2))
3194                 btrfs_reada_wait(reada2);
3195
3196
3197         /*
3198          * collect all data csums for the stripe to avoid seeking during
3199          * the scrub. This might currently (crc32) end up to be about 1MB
3200          */
3201         blk_start_plug(&plug);
3202
3203         /*
3204          * now find all extents for each stripe and scrub them
3205          */
3206         ret = 0;
3207         while (physical < physical_end) {
3208                 /*
3209                  * canceled?
3210                  */
3211                 if (atomic_read(&fs_info->scrub_cancel_req) ||
3212                     atomic_read(&sctx->cancel_req)) {
3213                         ret = -ECANCELED;
3214                         goto out;
3215                 }
3216                 /*
3217                  * check to see if we have to pause
3218                  */
3219                 if (atomic_read(&fs_info->scrub_pause_req)) {
3220                         /* push queued extents */
3221                         atomic_set(&sctx->wr_ctx.flush_all_writes, 1);
3222                         scrub_submit(sctx);
3223                         mutex_lock(&sctx->wr_ctx.wr_lock);
3224                         scrub_wr_submit(sctx);
3225                         mutex_unlock(&sctx->wr_ctx.wr_lock);
3226                         wait_event(sctx->list_wait,
3227                                    atomic_read(&sctx->bios_in_flight) == 0);
3228                         atomic_set(&sctx->wr_ctx.flush_all_writes, 0);
3229                         scrub_blocked_if_needed(fs_info);
3230                 }
3231
3232                 if (map->type & BTRFS_BLOCK_GROUP_RAID56_MASK) {
3233                         ret = get_raid56_logic_offset(physical, num, map,
3234                                                       &logical,
3235                                                       &stripe_logical);
3236                         logical += base;
3237                         if (ret) {
3238                                 /* it is parity strip */
3239                                 stripe_logical += base;
3240                                 stripe_end = stripe_logical + increment;
3241                                 ret = scrub_raid56_parity(sctx, map, scrub_dev,
3242                                                           ppath, stripe_logical,
3243                                                           stripe_end);
3244                                 if (ret)
3245                                         goto out;
3246                                 goto skip;
3247                         }
3248                 }
3249
3250                 if (btrfs_fs_incompat(fs_info, SKINNY_METADATA))
3251                         key.type = BTRFS_METADATA_ITEM_KEY;
3252                 else
3253                         key.type = BTRFS_EXTENT_ITEM_KEY;
3254                 key.objectid = logical;
3255                 key.offset = (u64)-1;
3256
3257                 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
3258                 if (ret < 0)
3259                         goto out;
3260
3261                 if (ret > 0) {
3262                         ret = btrfs_previous_extent_item(root, path, 0);
3263                         if (ret < 0)
3264                                 goto out;
3265                         if (ret > 0) {
3266                                 /* there's no smaller item, so stick with the
3267                                  * larger one */
3268                                 btrfs_release_path(path);
3269                                 ret = btrfs_search_slot(NULL, root, &key,
3270                                                         path, 0, 0);
3271                                 if (ret < 0)
3272                                         goto out;
3273                         }
3274                 }
3275
3276                 stop_loop = 0;
3277                 while (1) {
3278                         u64 bytes;
3279
3280                         l = path->nodes[0];
3281                         slot = path->slots[0];
3282                         if (slot >= btrfs_header_nritems(l)) {
3283                                 ret = btrfs_next_leaf(root, path);
3284                                 if (ret == 0)
3285                                         continue;
3286                                 if (ret < 0)
3287                                         goto out;
3288
3289                                 stop_loop = 1;
3290                                 break;
3291                         }
3292                         btrfs_item_key_to_cpu(l, &key, slot);
3293
3294                         if (key.type != BTRFS_EXTENT_ITEM_KEY &&
3295                             key.type != BTRFS_METADATA_ITEM_KEY)
3296                                 goto next;
3297
3298                         if (key.type == BTRFS_METADATA_ITEM_KEY)
3299                                 bytes = fs_info->nodesize;
3300                         else
3301                                 bytes = key.offset;
3302
3303                         if (key.objectid + bytes <= logical)
3304                                 goto next;
3305
3306                         if (key.objectid >= logical + map->stripe_len) {
3307                                 /* out of this device extent */
3308                                 if (key.objectid >= logic_end)
3309                                         stop_loop = 1;
3310                                 break;
3311                         }
3312
3313                         extent = btrfs_item_ptr(l, slot,
3314                                                 struct btrfs_extent_item);
3315                         flags = btrfs_extent_flags(l, extent);
3316                         generation = btrfs_extent_generation(l, extent);
3317
3318                         if ((flags & BTRFS_EXTENT_FLAG_TREE_BLOCK) &&
3319                             (key.objectid < logical ||
3320                              key.objectid + bytes >
3321                              logical + map->stripe_len)) {
3322                                 btrfs_err(fs_info,
3323                                            "scrub: tree block %llu spanning stripes, ignored. logical=%llu",
3324                                        key.objectid, logical);
3325                                 spin_lock(&sctx->stat_lock);
3326                                 sctx->stat.uncorrectable_errors++;
3327                                 spin_unlock(&sctx->stat_lock);
3328                                 goto next;
3329                         }
3330
3331 again:
3332                         extent_logical = key.objectid;
3333                         extent_len = bytes;
3334
3335                         /*
3336                          * trim extent to this stripe
3337                          */
3338                         if (extent_logical < logical) {
3339                                 extent_len -= logical - extent_logical;
3340                                 extent_logical = logical;
3341                         }
3342                         if (extent_logical + extent_len >
3343                             logical + map->stripe_len) {
3344                                 extent_len = logical + map->stripe_len -
3345                                              extent_logical;
3346                         }
3347
3348                         extent_physical = extent_logical - logical + physical;
3349                         extent_dev = scrub_dev;
3350                         extent_mirror_num = mirror_num;
3351                         if (is_dev_replace)
3352                                 scrub_remap_extent(fs_info, extent_logical,
3353                                                    extent_len, &extent_physical,
3354                                                    &extent_dev,
3355                                                    &extent_mirror_num);
3356
3357                         ret = btrfs_lookup_csums_range(csum_root,
3358                                                        extent_logical,
3359                                                        extent_logical +
3360                                                        extent_len - 1,
3361                                                        &sctx->csum_list, 1);
3362                         if (ret)
3363                                 goto out;
3364
3365                         ret = scrub_extent(sctx, extent_logical, extent_len,
3366                                            extent_physical, extent_dev, flags,
3367                                            generation, extent_mirror_num,
3368                                            extent_logical - logical + physical);
3369
3370                         scrub_free_csums(sctx);
3371
3372                         if (ret)
3373                                 goto out;
3374
3375                         if (extent_logical + extent_len <
3376                             key.objectid + bytes) {
3377                                 if (map->type & BTRFS_BLOCK_GROUP_RAID56_MASK) {
3378                                         /*
3379                                          * loop until we find next data stripe
3380                                          * or we have finished all stripes.
3381                                          */
3382 loop:
3383                                         physical += map->stripe_len;
3384                                         ret = get_raid56_logic_offset(physical,
3385                                                         num, map, &logical,
3386                                                         &stripe_logical);
3387                                         logical += base;
3388
3389                                         if (ret && physical < physical_end) {
3390                                                 stripe_logical += base;
3391                                                 stripe_end = stripe_logical +
3392                                                                 increment;
3393                                                 ret = scrub_raid56_parity(sctx,
3394                                                         map, scrub_dev, ppath,
3395                                                         stripe_logical,
3396                                                         stripe_end);
3397                                                 if (ret)
3398                                                         goto out;
3399                                                 goto loop;
3400                                         }
3401                                 } else {
3402                                         physical += map->stripe_len;
3403                                         logical += increment;
3404                                 }
3405                                 if (logical < key.objectid + bytes) {
3406                                         cond_resched();
3407                                         goto again;
3408                                 }
3409
3410                                 if (physical >= physical_end) {
3411                                         stop_loop = 1;
3412                                         break;
3413                                 }
3414                         }
3415 next:
3416                         path->slots[0]++;
3417                 }
3418                 btrfs_release_path(path);
3419 skip:
3420                 logical += increment;
3421                 physical += map->stripe_len;
3422                 spin_lock(&sctx->stat_lock);
3423                 if (stop_loop)
3424                         sctx->stat.last_physical = map->stripes[num].physical +
3425                                                    length;
3426                 else
3427                         sctx->stat.last_physical = physical;
3428                 spin_unlock(&sctx->stat_lock);
3429                 if (stop_loop)
3430                         break;
3431         }
3432 out:
3433         /* push queued extents */
3434         scrub_submit(sctx);
3435         mutex_lock(&sctx->wr_ctx.wr_lock);
3436         scrub_wr_submit(sctx);
3437         mutex_unlock(&sctx->wr_ctx.wr_lock);
3438
3439         blk_finish_plug(&plug);
3440         btrfs_free_path(path);
3441         btrfs_free_path(ppath);
3442         return ret < 0 ? ret : 0;
3443 }
3444
3445 static noinline_for_stack int scrub_chunk(struct scrub_ctx *sctx,
3446                                           struct btrfs_device *scrub_dev,
3447                                           u64 chunk_offset, u64 length,
3448                                           u64 dev_offset,
3449                                           struct btrfs_block_group_cache *cache,
3450                                           int is_dev_replace)
3451 {
3452         struct btrfs_fs_info *fs_info = sctx->fs_info;
3453         struct btrfs_mapping_tree *map_tree = &fs_info->mapping_tree;
3454         struct map_lookup *map;
3455         struct extent_map *em;
3456         int i;
3457         int ret = 0;
3458
3459         read_lock(&map_tree->map_tree.lock);
3460         em = lookup_extent_mapping(&map_tree->map_tree, chunk_offset, 1);
3461         read_unlock(&map_tree->map_tree.lock);
3462
3463         if (!em) {
3464                 /*
3465                  * Might have been an unused block group deleted by the cleaner
3466                  * kthread or relocation.
3467                  */
3468                 spin_lock(&cache->lock);
3469                 if (!cache->removed)
3470                         ret = -EINVAL;
3471                 spin_unlock(&cache->lock);
3472
3473                 return ret;
3474         }
3475
3476         map = em->map_lookup;
3477         if (em->start != chunk_offset)
3478                 goto out;
3479
3480         if (em->len < length)
3481                 goto out;
3482
3483         for (i = 0; i < map->num_stripes; ++i) {
3484                 if (map->stripes[i].dev->bdev == scrub_dev->bdev &&
3485                     map->stripes[i].physical == dev_offset) {
3486                         ret = scrub_stripe(sctx, map, scrub_dev, i,
3487                                            chunk_offset, length,
3488                                            is_dev_replace);
3489                         if (ret)
3490                                 goto out;
3491                 }
3492         }
3493 out:
3494         free_extent_map(em);
3495
3496         return ret;
3497 }
3498
3499 static noinline_for_stack
3500 int scrub_enumerate_chunks(struct scrub_ctx *sctx,
3501                            struct btrfs_device *scrub_dev, u64 start, u64 end,
3502                            int is_dev_replace)
3503 {
3504         struct btrfs_dev_extent *dev_extent = NULL;
3505         struct btrfs_path *path;
3506         struct btrfs_fs_info *fs_info = sctx->fs_info;
3507         struct btrfs_root *root = fs_info->dev_root;
3508         u64 length;
3509         u64 chunk_offset;
3510         int ret = 0;
3511         int ro_set;
3512         int slot;
3513         struct extent_buffer *l;
3514         struct btrfs_key key;
3515         struct btrfs_key found_key;
3516         struct btrfs_block_group_cache *cache;
3517         struct btrfs_dev_replace *dev_replace = &fs_info->dev_replace;
3518
3519         path = btrfs_alloc_path();
3520         if (!path)
3521                 return -ENOMEM;
3522
3523         path->reada = READA_FORWARD;
3524         path->search_commit_root = 1;
3525         path->skip_locking = 1;
3526
3527         key.objectid = scrub_dev->devid;
3528         key.offset = 0ull;
3529         key.type = BTRFS_DEV_EXTENT_KEY;
3530
3531         while (1) {
3532                 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
3533                 if (ret < 0)
3534                         break;
3535                 if (ret > 0) {
3536                         if (path->slots[0] >=
3537                             btrfs_header_nritems(path->nodes[0])) {
3538                                 ret = btrfs_next_leaf(root, path);
3539                                 if (ret < 0)
3540                                         break;
3541                                 if (ret > 0) {
3542                                         ret = 0;
3543                                         break;
3544                                 }
3545                         } else {
3546                                 ret = 0;
3547                         }
3548                 }
3549
3550                 l = path->nodes[0];
3551                 slot = path->slots[0];
3552
3553                 btrfs_item_key_to_cpu(l, &found_key, slot);
3554
3555                 if (found_key.objectid != scrub_dev->devid)
3556                         break;
3557
3558                 if (found_key.type != BTRFS_DEV_EXTENT_KEY)
3559                         break;
3560
3561                 if (found_key.offset >= end)
3562                         break;
3563
3564                 if (found_key.offset < key.offset)
3565                         break;
3566
3567                 dev_extent = btrfs_item_ptr(l, slot, struct btrfs_dev_extent);
3568                 length = btrfs_dev_extent_length(l, dev_extent);
3569
3570                 if (found_key.offset + length <= start)
3571                         goto skip;
3572
3573                 chunk_offset = btrfs_dev_extent_chunk_offset(l, dev_extent);
3574
3575                 /*
3576                  * get a reference on the corresponding block group to prevent
3577                  * the chunk from going away while we scrub it
3578                  */
3579                 cache = btrfs_lookup_block_group(fs_info, chunk_offset);
3580
3581                 /* some chunks are removed but not committed to disk yet,
3582                  * continue scrubbing */
3583                 if (!cache)
3584                         goto skip;
3585
3586                 /*
3587                  * we need call btrfs_inc_block_group_ro() with scrubs_paused,
3588                  * to avoid deadlock caused by:
3589                  * btrfs_inc_block_group_ro()
3590                  * -> btrfs_wait_for_commit()
3591                  * -> btrfs_commit_transaction()
3592                  * -> btrfs_scrub_pause()
3593                  */
3594                 scrub_pause_on(fs_info);
3595                 ret = btrfs_inc_block_group_ro(fs_info, cache);
3596                 if (!ret && is_dev_replace) {
3597                         /*
3598                          * If we are doing a device replace wait for any tasks
3599                          * that started dellaloc right before we set the block
3600                          * group to RO mode, as they might have just allocated
3601                          * an extent from it or decided they could do a nocow
3602                          * write. And if any such tasks did that, wait for their
3603                          * ordered extents to complete and then commit the
3604                          * current transaction, so that we can later see the new
3605                          * extent items in the extent tree - the ordered extents
3606                          * create delayed data references (for cow writes) when
3607                          * they complete, which will be run and insert the
3608                          * corresponding extent items into the extent tree when
3609                          * we commit the transaction they used when running
3610                          * inode.c:btrfs_finish_ordered_io(). We later use
3611                          * the commit root of the extent tree to find extents
3612                          * to copy from the srcdev into the tgtdev, and we don't
3613                          * want to miss any new extents.
3614                          */
3615                         btrfs_wait_block_group_reservations(cache);
3616                         btrfs_wait_nocow_writers(cache);
3617                         ret = btrfs_wait_ordered_roots(fs_info, -1,
3618                                                        cache->key.objectid,
3619                                                        cache->key.offset);
3620                         if (ret > 0) {
3621                                 struct btrfs_trans_handle *trans;
3622
3623                                 trans = btrfs_join_transaction(root);
3624                                 if (IS_ERR(trans))
3625                                         ret = PTR_ERR(trans);
3626                                 else
3627                                         ret = btrfs_commit_transaction(trans);
3628                                 if (ret) {
3629                                         scrub_pause_off(fs_info);
3630                                         btrfs_put_block_group(cache);
3631                                         break;
3632                                 }
3633                         }
3634                 }
3635                 scrub_pause_off(fs_info);
3636
3637                 if (ret == 0) {
3638                         ro_set = 1;
3639                 } else if (ret == -ENOSPC) {
3640                         /*
3641                          * btrfs_inc_block_group_ro return -ENOSPC when it
3642                          * failed in creating new chunk for metadata.
3643                          * It is not a problem for scrub/replace, because
3644                          * metadata are always cowed, and our scrub paused
3645                          * commit_transactions.
3646                          */
3647                         ro_set = 0;
3648                 } else {
3649                         btrfs_warn(fs_info,
3650                                    "failed setting block group ro, ret=%d\n",
3651                                    ret);
3652                         btrfs_put_block_group(cache);
3653                         break;
3654                 }
3655
3656                 btrfs_dev_replace_lock(&fs_info->dev_replace, 1);
3657                 dev_replace->cursor_right = found_key.offset + length;
3658                 dev_replace->cursor_left = found_key.offset;
3659                 dev_replace->item_needs_writeback = 1;
3660                 btrfs_dev_replace_unlock(&fs_info->dev_replace, 1);
3661                 ret = scrub_chunk(sctx, scrub_dev, chunk_offset, length,
3662                                   found_key.offset, cache, is_dev_replace);
3663
3664                 /*
3665                  * flush, submit all pending read and write bios, afterwards
3666                  * wait for them.
3667                  * Note that in the dev replace case, a read request causes
3668                  * write requests that are submitted in the read completion
3669                  * worker. Therefore in the current situation, it is required
3670                  * that all write requests are flushed, so that all read and
3671                  * write requests are really completed when bios_in_flight
3672                  * changes to 0.
3673                  */
3674                 atomic_set(&sctx->wr_ctx.flush_all_writes, 1);
3675                 scrub_submit(sctx);
3676                 mutex_lock(&sctx->wr_ctx.wr_lock);
3677                 scrub_wr_submit(sctx);
3678                 mutex_unlock(&sctx->wr_ctx.wr_lock);
3679
3680                 wait_event(sctx->list_wait,
3681                            atomic_read(&sctx->bios_in_flight) == 0);
3682
3683                 scrub_pause_on(fs_info);
3684
3685                 /*
3686                  * must be called before we decrease @scrub_paused.
3687                  * make sure we don't block transaction commit while
3688                  * we are waiting pending workers finished.
3689                  */
3690                 wait_event(sctx->list_wait,
3691                            atomic_read(&sctx->workers_pending) == 0);
3692                 atomic_set(&sctx->wr_ctx.flush_all_writes, 0);
3693
3694                 scrub_pause_off(fs_info);
3695
3696                 btrfs_dev_replace_lock(&fs_info->dev_replace, 1);
3697                 dev_replace->cursor_left = dev_replace->cursor_right;
3698                 dev_replace->item_needs_writeback = 1;
3699                 btrfs_dev_replace_unlock(&fs_info->dev_replace, 1);
3700
3701                 if (ro_set)
3702                         btrfs_dec_block_group_ro(cache);
3703
3704                 /*
3705                  * We might have prevented the cleaner kthread from deleting
3706                  * this block group if it was already unused because we raced
3707                  * and set it to RO mode first. So add it back to the unused
3708                  * list, otherwise it might not ever be deleted unless a manual
3709                  * balance is triggered or it becomes used and unused again.
3710                  */
3711                 spin_lock(&cache->lock);
3712                 if (!cache->removed && !cache->ro && cache->reserved == 0 &&
3713                     btrfs_block_group_used(&cache->item) == 0) {
3714                         spin_unlock(&cache->lock);
3715                         spin_lock(&fs_info->unused_bgs_lock);
3716                         if (list_empty(&cache->bg_list)) {
3717                                 btrfs_get_block_group(cache);
3718                                 list_add_tail(&cache->bg_list,
3719                                               &fs_info->unused_bgs);
3720                         }
3721                         spin_unlock(&fs_info->unused_bgs_lock);
3722                 } else {
3723                         spin_unlock(&cache->lock);
3724                 }
3725
3726                 btrfs_put_block_group(cache);
3727                 if (ret)
3728                         break;
3729                 if (is_dev_replace &&
3730                     atomic64_read(&dev_replace->num_write_errors) > 0) {
3731                         ret = -EIO;
3732                         break;
3733                 }
3734                 if (sctx->stat.malloc_errors > 0) {
3735                         ret = -ENOMEM;
3736                         break;
3737                 }
3738 skip:
3739                 key.offset = found_key.offset + length;
3740                 btrfs_release_path(path);
3741         }
3742
3743         btrfs_free_path(path);
3744
3745         return ret;
3746 }
3747
3748 static noinline_for_stack int scrub_supers(struct scrub_ctx *sctx,
3749                                            struct btrfs_device *scrub_dev)
3750 {
3751         int     i;
3752         u64     bytenr;
3753         u64     gen;
3754         int     ret;
3755         struct btrfs_fs_info *fs_info = sctx->fs_info;
3756
3757         if (test_bit(BTRFS_FS_STATE_ERROR, &fs_info->fs_state))
3758                 return -EIO;
3759
3760         /* Seed devices of a new filesystem has their own generation. */
3761         if (scrub_dev->fs_devices != fs_info->fs_devices)
3762                 gen = scrub_dev->generation;
3763         else
3764                 gen = fs_info->last_trans_committed;
3765
3766         for (i = 0; i < BTRFS_SUPER_MIRROR_MAX; i++) {
3767                 bytenr = btrfs_sb_offset(i);
3768                 if (bytenr + BTRFS_SUPER_INFO_SIZE >
3769                     scrub_dev->commit_total_bytes)
3770                         break;
3771
3772                 ret = scrub_pages(sctx, bytenr, BTRFS_SUPER_INFO_SIZE, bytenr,
3773                                   scrub_dev, BTRFS_EXTENT_FLAG_SUPER, gen, i,
3774                                   NULL, 1, bytenr);
3775                 if (ret)
3776                         return ret;
3777         }
3778         wait_event(sctx->list_wait, atomic_read(&sctx->bios_in_flight) == 0);
3779
3780         return 0;
3781 }
3782
3783 /*
3784  * get a reference count on fs_info->scrub_workers. start worker if necessary
3785  */
3786 static noinline_for_stack int scrub_workers_get(struct btrfs_fs_info *fs_info,
3787                                                 int is_dev_replace)
3788 {
3789         unsigned int flags = WQ_FREEZABLE | WQ_UNBOUND;
3790         int max_active = fs_info->thread_pool_size;
3791
3792         if (fs_info->scrub_workers_refcnt == 0) {
3793                 if (is_dev_replace)
3794                         fs_info->scrub_workers =
3795                                 btrfs_alloc_workqueue(fs_info, "scrub", flags,
3796                                                       1, 4);
3797                 else
3798                         fs_info->scrub_workers =
3799                                 btrfs_alloc_workqueue(fs_info, "scrub", flags,
3800                                                       max_active, 4);
3801                 if (!fs_info->scrub_workers)
3802                         goto fail_scrub_workers;
3803
3804                 fs_info->scrub_wr_completion_workers =
3805                         btrfs_alloc_workqueue(fs_info, "scrubwrc", flags,
3806                                               max_active, 2);
3807                 if (!fs_info->scrub_wr_completion_workers)
3808                         goto fail_scrub_wr_completion_workers;
3809
3810                 fs_info->scrub_nocow_workers =
3811                         btrfs_alloc_workqueue(fs_info, "scrubnc", flags, 1, 0);
3812                 if (!fs_info->scrub_nocow_workers)
3813                         goto fail_scrub_nocow_workers;
3814                 fs_info->scrub_parity_workers =
3815                         btrfs_alloc_workqueue(fs_info, "scrubparity", flags,
3816                                               max_active, 2);
3817                 if (!fs_info->scrub_parity_workers)
3818                         goto fail_scrub_parity_workers;
3819         }
3820         ++fs_info->scrub_workers_refcnt;
3821         return 0;
3822
3823 fail_scrub_parity_workers:
3824         btrfs_destroy_workqueue(fs_info->scrub_nocow_workers);
3825 fail_scrub_nocow_workers:
3826         btrfs_destroy_workqueue(fs_info->scrub_wr_completion_workers);
3827 fail_scrub_wr_completion_workers:
3828         btrfs_destroy_workqueue(fs_info->scrub_workers);
3829 fail_scrub_workers:
3830         return -ENOMEM;
3831 }
3832
3833 static noinline_for_stack void scrub_workers_put(struct btrfs_fs_info *fs_info)
3834 {
3835         if (--fs_info->scrub_workers_refcnt == 0) {
3836                 btrfs_destroy_workqueue(fs_info->scrub_workers);
3837                 btrfs_destroy_workqueue(fs_info->scrub_wr_completion_workers);
3838                 btrfs_destroy_workqueue(fs_info->scrub_nocow_workers);
3839                 btrfs_destroy_workqueue(fs_info->scrub_parity_workers);
3840         }
3841         WARN_ON(fs_info->scrub_workers_refcnt < 0);
3842 }
3843
3844 int btrfs_scrub_dev(struct btrfs_fs_info *fs_info, u64 devid, u64 start,
3845                     u64 end, struct btrfs_scrub_progress *progress,
3846                     int readonly, int is_dev_replace)
3847 {
3848         struct scrub_ctx *sctx;
3849         int ret;
3850         struct btrfs_device *dev;
3851         struct rcu_string *name;
3852
3853         if (btrfs_fs_closing(fs_info))
3854                 return -EINVAL;
3855
3856         if (fs_info->nodesize > BTRFS_STRIPE_LEN) {
3857                 /*
3858                  * in this case scrub is unable to calculate the checksum
3859                  * the way scrub is implemented. Do not handle this
3860                  * situation at all because it won't ever happen.
3861                  */
3862                 btrfs_err(fs_info,
3863                            "scrub: size assumption nodesize <= BTRFS_STRIPE_LEN (%d <= %d) fails",
3864                        fs_info->nodesize,
3865                        BTRFS_STRIPE_LEN);
3866                 return -EINVAL;
3867         }
3868
3869         if (fs_info->sectorsize != PAGE_SIZE) {
3870                 /* not supported for data w/o checksums */
3871                 btrfs_err_rl(fs_info,
3872                            "scrub: size assumption sectorsize != PAGE_SIZE (%d != %lu) fails",
3873                        fs_info->sectorsize, PAGE_SIZE);
3874                 return -EINVAL;
3875         }
3876
3877         if (fs_info->nodesize >
3878             PAGE_SIZE * SCRUB_MAX_PAGES_PER_BLOCK ||
3879             fs_info->sectorsize > PAGE_SIZE * SCRUB_MAX_PAGES_PER_BLOCK) {
3880                 /*
3881                  * would exhaust the array bounds of pagev member in
3882                  * struct scrub_block
3883                  */
3884                 btrfs_err(fs_info,
3885                           "scrub: size assumption nodesize and sectorsize <= SCRUB_MAX_PAGES_PER_BLOCK (%d <= %d && %d <= %d) fails",
3886                        fs_info->nodesize,
3887                        SCRUB_MAX_PAGES_PER_BLOCK,
3888                        fs_info->sectorsize,
3889                        SCRUB_MAX_PAGES_PER_BLOCK);
3890                 return -EINVAL;
3891         }
3892
3893
3894         mutex_lock(&fs_info->fs_devices->device_list_mutex);
3895         dev = btrfs_find_device(fs_info, devid, NULL, NULL);
3896         if (!dev || (dev->missing && !is_dev_replace)) {
3897                 mutex_unlock(&fs_info->fs_devices->device_list_mutex);
3898                 return -ENODEV;
3899         }
3900
3901         if (!is_dev_replace && !readonly && !dev->writeable) {
3902                 mutex_unlock(&fs_info->fs_devices->device_list_mutex);
3903                 rcu_read_lock();
3904                 name = rcu_dereference(dev->name);
3905                 btrfs_err(fs_info, "scrub: device %s is not writable",
3906                           name->str);
3907                 rcu_read_unlock();
3908                 return -EROFS;
3909         }
3910
3911         mutex_lock(&fs_info->scrub_lock);
3912         if (!dev->in_fs_metadata || dev->is_tgtdev_for_dev_replace) {
3913                 mutex_unlock(&fs_info->scrub_lock);
3914                 mutex_unlock(&fs_info->fs_devices->device_list_mutex);
3915                 return -EIO;
3916         }
3917
3918         btrfs_dev_replace_lock(&fs_info->dev_replace, 0);
3919         if (dev->scrub_device ||
3920             (!is_dev_replace &&
3921              btrfs_dev_replace_is_ongoing(&fs_info->dev_replace))) {
3922                 btrfs_dev_replace_unlock(&fs_info->dev_replace, 0);
3923                 mutex_unlock(&fs_info->scrub_lock);
3924                 mutex_unlock(&fs_info->fs_devices->device_list_mutex);
3925                 return -EINPROGRESS;
3926         }
3927         btrfs_dev_replace_unlock(&fs_info->dev_replace, 0);
3928
3929         ret = scrub_workers_get(fs_info, is_dev_replace);
3930         if (ret) {
3931                 mutex_unlock(&fs_info->scrub_lock);
3932                 mutex_unlock(&fs_info->fs_devices->device_list_mutex);
3933                 return ret;
3934         }
3935
3936         sctx = scrub_setup_ctx(dev, is_dev_replace);
3937         if (IS_ERR(sctx)) {
3938                 mutex_unlock(&fs_info->scrub_lock);
3939                 mutex_unlock(&fs_info->fs_devices->device_list_mutex);
3940                 scrub_workers_put(fs_info);
3941                 return PTR_ERR(sctx);
3942         }
3943         sctx->readonly = readonly;
3944         dev->scrub_device = sctx;
3945         mutex_unlock(&fs_info->fs_devices->device_list_mutex);
3946
3947         /*
3948          * checking @scrub_pause_req here, we can avoid
3949          * race between committing transaction and scrubbing.
3950          */
3951         __scrub_blocked_if_needed(fs_info);
3952         atomic_inc(&fs_info->scrubs_running);
3953         mutex_unlock(&fs_info->scrub_lock);
3954
3955         if (!is_dev_replace) {
3956                 /*
3957                  * by holding device list mutex, we can
3958                  * kick off writing super in log tree sync.
3959                  */
3960                 mutex_lock(&fs_info->fs_devices->device_list_mutex);
3961                 ret = scrub_supers(sctx, dev);
3962                 mutex_unlock(&fs_info->fs_devices->device_list_mutex);
3963         }
3964
3965         if (!ret)
3966                 ret = scrub_enumerate_chunks(sctx, dev, start, end,
3967                                              is_dev_replace);
3968
3969         wait_event(sctx->list_wait, atomic_read(&sctx->bios_in_flight) == 0);
3970         atomic_dec(&fs_info->scrubs_running);
3971         wake_up(&fs_info->scrub_pause_wait);
3972
3973         wait_event(sctx->list_wait, atomic_read(&sctx->workers_pending) == 0);
3974
3975         if (progress)
3976                 memcpy(progress, &sctx->stat, sizeof(*progress));
3977
3978         mutex_lock(&fs_info->scrub_lock);
3979         dev->scrub_device = NULL;
3980         scrub_workers_put(fs_info);
3981         mutex_unlock(&fs_info->scrub_lock);
3982
3983         scrub_put_ctx(sctx);
3984
3985         return ret;
3986 }
3987
3988 void btrfs_scrub_pause(struct btrfs_fs_info *fs_info)
3989 {
3990         mutex_lock(&fs_info->scrub_lock);
3991         atomic_inc(&fs_info->scrub_pause_req);
3992         while (atomic_read(&fs_info->scrubs_paused) !=
3993                atomic_read(&fs_info->scrubs_running)) {
3994                 mutex_unlock(&fs_info->scrub_lock);
3995                 wait_event(fs_info->scrub_pause_wait,
3996                            atomic_read(&fs_info->scrubs_paused) ==
3997                            atomic_read(&fs_info->scrubs_running));
3998                 mutex_lock(&fs_info->scrub_lock);
3999         }
4000         mutex_unlock(&fs_info->scrub_lock);
4001 }
4002
4003 void btrfs_scrub_continue(struct btrfs_fs_info *fs_info)
4004 {
4005         atomic_dec(&fs_info->scrub_pause_req);
4006         wake_up(&fs_info->scrub_pause_wait);
4007 }
4008
4009 int btrfs_scrub_cancel(struct btrfs_fs_info *fs_info)
4010 {
4011         mutex_lock(&fs_info->scrub_lock);
4012         if (!atomic_read(&fs_info->scrubs_running)) {
4013                 mutex_unlock(&fs_info->scrub_lock);
4014                 return -ENOTCONN;
4015         }
4016
4017         atomic_inc(&fs_info->scrub_cancel_req);
4018         while (atomic_read(&fs_info->scrubs_running)) {
4019                 mutex_unlock(&fs_info->scrub_lock);
4020                 wait_event(fs_info->scrub_pause_wait,
4021                            atomic_read(&fs_info->scrubs_running) == 0);
4022                 mutex_lock(&fs_info->scrub_lock);
4023         }
4024         atomic_dec(&fs_info->scrub_cancel_req);
4025         mutex_unlock(&fs_info->scrub_lock);
4026
4027         return 0;
4028 }
4029
4030 int btrfs_scrub_cancel_dev(struct btrfs_fs_info *fs_info,
4031                            struct btrfs_device *dev)
4032 {
4033         struct scrub_ctx *sctx;
4034
4035         mutex_lock(&fs_info->scrub_lock);
4036         sctx = dev->scrub_device;
4037         if (!sctx) {
4038                 mutex_unlock(&fs_info->scrub_lock);
4039                 return -ENOTCONN;
4040         }
4041         atomic_inc(&sctx->cancel_req);
4042         while (dev->scrub_device) {
4043                 mutex_unlock(&fs_info->scrub_lock);
4044                 wait_event(fs_info->scrub_pause_wait,
4045                            dev->scrub_device == NULL);
4046                 mutex_lock(&fs_info->scrub_lock);
4047         }
4048         mutex_unlock(&fs_info->scrub_lock);
4049
4050         return 0;
4051 }
4052
4053 int btrfs_scrub_progress(struct btrfs_fs_info *fs_info, u64 devid,
4054                          struct btrfs_scrub_progress *progress)
4055 {
4056         struct btrfs_device *dev;
4057         struct scrub_ctx *sctx = NULL;
4058
4059         mutex_lock(&fs_info->fs_devices->device_list_mutex);
4060         dev = btrfs_find_device(fs_info, devid, NULL, NULL);
4061         if (dev)
4062                 sctx = dev->scrub_device;
4063         if (sctx)
4064                 memcpy(progress, &sctx->stat, sizeof(*progress));
4065         mutex_unlock(&fs_info->fs_devices->device_list_mutex);
4066
4067         return dev ? (sctx ? 0 : -ENOTCONN) : -ENODEV;
4068 }
4069
4070 static void scrub_remap_extent(struct btrfs_fs_info *fs_info,
4071                                u64 extent_logical, u64 extent_len,
4072                                u64 *extent_physical,
4073                                struct btrfs_device **extent_dev,
4074                                int *extent_mirror_num)
4075 {
4076         u64 mapped_length;
4077         struct btrfs_bio *bbio = NULL;
4078         int ret;
4079
4080         mapped_length = extent_len;
4081         ret = btrfs_map_block(fs_info, BTRFS_MAP_READ, extent_logical,
4082                               &mapped_length, &bbio, 0);
4083         if (ret || !bbio || mapped_length < extent_len ||
4084             !bbio->stripes[0].dev->bdev) {
4085                 btrfs_put_bbio(bbio);
4086                 return;
4087         }
4088
4089         *extent_physical = bbio->stripes[0].physical;
4090         *extent_mirror_num = bbio->mirror_num;
4091         *extent_dev = bbio->stripes[0].dev;
4092         btrfs_put_bbio(bbio);
4093 }
4094
4095 static int scrub_setup_wr_ctx(struct scrub_wr_ctx *wr_ctx,
4096                               struct btrfs_device *dev,
4097                               int is_dev_replace)
4098 {
4099         WARN_ON(wr_ctx->wr_curr_bio != NULL);
4100
4101         mutex_init(&wr_ctx->wr_lock);
4102         wr_ctx->wr_curr_bio = NULL;
4103         if (!is_dev_replace)
4104                 return 0;
4105
4106         WARN_ON(!dev->bdev);
4107         wr_ctx->pages_per_wr_bio = SCRUB_PAGES_PER_WR_BIO;
4108         wr_ctx->tgtdev = dev;
4109         atomic_set(&wr_ctx->flush_all_writes, 0);
4110         return 0;
4111 }
4112
4113 static void scrub_free_wr_ctx(struct scrub_wr_ctx *wr_ctx)
4114 {
4115         mutex_lock(&wr_ctx->wr_lock);
4116         kfree(wr_ctx->wr_curr_bio);
4117         wr_ctx->wr_curr_bio = NULL;
4118         mutex_unlock(&wr_ctx->wr_lock);
4119 }
4120
4121 static int copy_nocow_pages(struct scrub_ctx *sctx, u64 logical, u64 len,
4122                             int mirror_num, u64 physical_for_dev_replace)
4123 {
4124         struct scrub_copy_nocow_ctx *nocow_ctx;
4125         struct btrfs_fs_info *fs_info = sctx->fs_info;
4126
4127         nocow_ctx = kzalloc(sizeof(*nocow_ctx), GFP_NOFS);
4128         if (!nocow_ctx) {
4129                 spin_lock(&sctx->stat_lock);
4130                 sctx->stat.malloc_errors++;
4131                 spin_unlock(&sctx->stat_lock);
4132                 return -ENOMEM;
4133         }
4134
4135         scrub_pending_trans_workers_inc(sctx);
4136
4137         nocow_ctx->sctx = sctx;
4138         nocow_ctx->logical = logical;
4139         nocow_ctx->len = len;
4140         nocow_ctx->mirror_num = mirror_num;
4141         nocow_ctx->physical_for_dev_replace = physical_for_dev_replace;
4142         btrfs_init_work(&nocow_ctx->work, btrfs_scrubnc_helper,
4143                         copy_nocow_pages_worker, NULL, NULL);
4144         INIT_LIST_HEAD(&nocow_ctx->inodes);
4145         btrfs_queue_work(fs_info->scrub_nocow_workers,
4146                          &nocow_ctx->work);
4147
4148         return 0;
4149 }
4150
4151 static int record_inode_for_nocow(u64 inum, u64 offset, u64 root, void *ctx)
4152 {
4153         struct scrub_copy_nocow_ctx *nocow_ctx = ctx;
4154         struct scrub_nocow_inode *nocow_inode;
4155
4156         nocow_inode = kzalloc(sizeof(*nocow_inode), GFP_NOFS);
4157         if (!nocow_inode)
4158                 return -ENOMEM;
4159         nocow_inode->inum = inum;
4160         nocow_inode->offset = offset;
4161         nocow_inode->root = root;
4162         list_add_tail(&nocow_inode->list, &nocow_ctx->inodes);
4163         return 0;
4164 }
4165
4166 #define COPY_COMPLETE 1
4167
4168 static void copy_nocow_pages_worker(struct btrfs_work *work)
4169 {
4170         struct scrub_copy_nocow_ctx *nocow_ctx =
4171                 container_of(work, struct scrub_copy_nocow_ctx, work);
4172         struct scrub_ctx *sctx = nocow_ctx->sctx;
4173         struct btrfs_fs_info *fs_info = sctx->fs_info;
4174         struct btrfs_root *root = fs_info->extent_root;
4175         u64 logical = nocow_ctx->logical;
4176         u64 len = nocow_ctx->len;
4177         int mirror_num = nocow_ctx->mirror_num;
4178         u64 physical_for_dev_replace = nocow_ctx->physical_for_dev_replace;
4179         int ret;
4180         struct btrfs_trans_handle *trans = NULL;
4181         struct btrfs_path *path;
4182         int not_written = 0;
4183
4184         path = btrfs_alloc_path();
4185         if (!path) {
4186                 spin_lock(&sctx->stat_lock);
4187                 sctx->stat.malloc_errors++;
4188                 spin_unlock(&sctx->stat_lock);
4189                 not_written = 1;
4190                 goto out;
4191         }
4192
4193         trans = btrfs_join_transaction(root);
4194         if (IS_ERR(trans)) {
4195                 not_written = 1;
4196                 goto out;
4197         }
4198
4199         ret = iterate_inodes_from_logical(logical, fs_info, path,
4200                                           record_inode_for_nocow, nocow_ctx);
4201         if (ret != 0 && ret != -ENOENT) {
4202                 btrfs_warn(fs_info,
4203                            "iterate_inodes_from_logical() failed: log %llu, phys %llu, len %llu, mir %u, ret %d",
4204                            logical, physical_for_dev_replace, len, mirror_num,
4205                            ret);
4206                 not_written = 1;
4207                 goto out;
4208         }
4209
4210         btrfs_end_transaction(trans);
4211         trans = NULL;
4212         while (!list_empty(&nocow_ctx->inodes)) {
4213                 struct scrub_nocow_inode *entry;
4214                 entry = list_first_entry(&nocow_ctx->inodes,
4215                                          struct scrub_nocow_inode,
4216                                          list);
4217                 list_del_init(&entry->list);
4218                 ret = copy_nocow_pages_for_inode(entry->inum, entry->offset,
4219                                                  entry->root, nocow_ctx);
4220                 kfree(entry);
4221                 if (ret == COPY_COMPLETE) {
4222                         ret = 0;
4223                         break;
4224                 } else if (ret) {
4225                         break;
4226                 }
4227         }
4228 out:
4229         while (!list_empty(&nocow_ctx->inodes)) {
4230                 struct scrub_nocow_inode *entry;
4231                 entry = list_first_entry(&nocow_ctx->inodes,
4232                                          struct scrub_nocow_inode,
4233                                          list);
4234                 list_del_init(&entry->list);
4235                 kfree(entry);
4236         }
4237         if (trans && !IS_ERR(trans))
4238                 btrfs_end_transaction(trans);
4239         if (not_written)
4240                 btrfs_dev_replace_stats_inc(&fs_info->dev_replace.
4241                                             num_uncorrectable_read_errors);
4242
4243         btrfs_free_path(path);
4244         kfree(nocow_ctx);
4245
4246         scrub_pending_trans_workers_dec(sctx);
4247 }
4248
4249 static int check_extent_to_block(struct btrfs_inode *inode, u64 start, u64 len,
4250                                  u64 logical)
4251 {
4252         struct extent_state *cached_state = NULL;
4253         struct btrfs_ordered_extent *ordered;
4254         struct extent_io_tree *io_tree;
4255         struct extent_map *em;
4256         u64 lockstart = start, lockend = start + len - 1;
4257         int ret = 0;
4258
4259         io_tree = &inode->io_tree;
4260
4261         lock_extent_bits(io_tree, lockstart, lockend, &cached_state);
4262         ordered = btrfs_lookup_ordered_range(inode, lockstart, len);
4263         if (ordered) {
4264                 btrfs_put_ordered_extent(ordered);
4265                 ret = 1;
4266                 goto out_unlock;
4267         }
4268
4269         em = btrfs_get_extent(inode, NULL, 0, start, len, 0);
4270         if (IS_ERR(em)) {
4271                 ret = PTR_ERR(em);
4272                 goto out_unlock;
4273         }
4274
4275         /*
4276          * This extent does not actually cover the logical extent anymore,
4277          * move on to the next inode.
4278          */
4279         if (em->block_start > logical ||
4280             em->block_start + em->block_len < logical + len) {
4281                 free_extent_map(em);
4282                 ret = 1;
4283                 goto out_unlock;
4284         }
4285         free_extent_map(em);
4286
4287 out_unlock:
4288         unlock_extent_cached(io_tree, lockstart, lockend, &cached_state,
4289                              GFP_NOFS);
4290         return ret;
4291 }
4292
4293 static int copy_nocow_pages_for_inode(u64 inum, u64 offset, u64 root,
4294                                       struct scrub_copy_nocow_ctx *nocow_ctx)
4295 {
4296         struct btrfs_fs_info *fs_info = nocow_ctx->sctx->fs_info;
4297         struct btrfs_key key;
4298         struct inode *inode;
4299         struct page *page;
4300         struct btrfs_root *local_root;
4301         struct extent_io_tree *io_tree;
4302         u64 physical_for_dev_replace;
4303         u64 nocow_ctx_logical;
4304         u64 len = nocow_ctx->len;
4305         unsigned long index;
4306         int srcu_index;
4307         int ret = 0;
4308         int err = 0;
4309
4310         key.objectid = root;
4311         key.type = BTRFS_ROOT_ITEM_KEY;
4312         key.offset = (u64)-1;
4313
4314         srcu_index = srcu_read_lock(&fs_info->subvol_srcu);
4315
4316         local_root = btrfs_read_fs_root_no_name(fs_info, &key);
4317         if (IS_ERR(local_root)) {
4318                 srcu_read_unlock(&fs_info->subvol_srcu, srcu_index);
4319                 return PTR_ERR(local_root);
4320         }
4321
4322         key.type = BTRFS_INODE_ITEM_KEY;
4323         key.objectid = inum;
4324         key.offset = 0;
4325         inode = btrfs_iget(fs_info->sb, &key, local_root, NULL);
4326         srcu_read_unlock(&fs_info->subvol_srcu, srcu_index);
4327         if (IS_ERR(inode))
4328                 return PTR_ERR(inode);
4329
4330         /* Avoid truncate/dio/punch hole.. */
4331         inode_lock(inode);
4332         inode_dio_wait(inode);
4333
4334         physical_for_dev_replace = nocow_ctx->physical_for_dev_replace;
4335         io_tree = &BTRFS_I(inode)->io_tree;
4336         nocow_ctx_logical = nocow_ctx->logical;
4337
4338         ret = check_extent_to_block(BTRFS_I(inode), offset, len,
4339                         nocow_ctx_logical);
4340         if (ret) {
4341                 ret = ret > 0 ? 0 : ret;
4342                 goto out;
4343         }
4344
4345         while (len >= PAGE_SIZE) {
4346                 index = offset >> PAGE_SHIFT;
4347 again:
4348                 page = find_or_create_page(inode->i_mapping, index, GFP_NOFS);
4349                 if (!page) {
4350                         btrfs_err(fs_info, "find_or_create_page() failed");
4351                         ret = -ENOMEM;
4352                         goto out;
4353                 }
4354
4355                 if (PageUptodate(page)) {
4356                         if (PageDirty(page))
4357                                 goto next_page;
4358                 } else {
4359                         ClearPageError(page);
4360                         err = extent_read_full_page(io_tree, page,
4361                                                            btrfs_get_extent,
4362                                                            nocow_ctx->mirror_num);
4363                         if (err) {
4364                                 ret = err;
4365                                 goto next_page;
4366                         }
4367
4368                         lock_page(page);
4369                         /*
4370                          * If the page has been remove from the page cache,
4371                          * the data on it is meaningless, because it may be
4372                          * old one, the new data may be written into the new
4373                          * page in the page cache.
4374                          */
4375                         if (page->mapping != inode->i_mapping) {
4376                                 unlock_page(page);
4377                                 put_page(page);
4378                                 goto again;
4379                         }
4380                         if (!PageUptodate(page)) {
4381                                 ret = -EIO;
4382                                 goto next_page;
4383                         }
4384                 }
4385
4386                 ret = check_extent_to_block(BTRFS_I(inode), offset, len,
4387                                             nocow_ctx_logical);
4388                 if (ret) {
4389                         ret = ret > 0 ? 0 : ret;
4390                         goto next_page;
4391                 }
4392
4393                 err = write_page_nocow(nocow_ctx->sctx,
4394                                        physical_for_dev_replace, page);
4395                 if (err)
4396                         ret = err;
4397 next_page:
4398                 unlock_page(page);
4399                 put_page(page);
4400
4401                 if (ret)
4402                         break;
4403
4404                 offset += PAGE_SIZE;
4405                 physical_for_dev_replace += PAGE_SIZE;
4406                 nocow_ctx_logical += PAGE_SIZE;
4407                 len -= PAGE_SIZE;
4408         }
4409         ret = COPY_COMPLETE;
4410 out:
4411         inode_unlock(inode);
4412         iput(inode);
4413         return ret;
4414 }
4415
4416 static int write_page_nocow(struct scrub_ctx *sctx,
4417                             u64 physical_for_dev_replace, struct page *page)
4418 {
4419         struct bio *bio;
4420         struct btrfs_device *dev;
4421         int ret;
4422
4423         dev = sctx->wr_ctx.tgtdev;
4424         if (!dev)
4425                 return -EIO;
4426         if (!dev->bdev) {
4427                 btrfs_warn_rl(dev->fs_info,
4428                         "scrub write_page_nocow(bdev == NULL) is unexpected");
4429                 return -EIO;
4430         }
4431         bio = btrfs_io_bio_alloc(GFP_NOFS, 1);
4432         if (!bio) {
4433                 spin_lock(&sctx->stat_lock);
4434                 sctx->stat.malloc_errors++;
4435                 spin_unlock(&sctx->stat_lock);
4436                 return -ENOMEM;
4437         }
4438         bio->bi_iter.bi_size = 0;
4439         bio->bi_iter.bi_sector = physical_for_dev_replace >> 9;
4440         bio->bi_bdev = dev->bdev;
4441         bio->bi_opf = REQ_OP_WRITE | REQ_SYNC;
4442         ret = bio_add_page(bio, page, PAGE_SIZE, 0);
4443         if (ret != PAGE_SIZE) {
4444 leave_with_eio:
4445                 bio_put(bio);
4446                 btrfs_dev_stat_inc_and_print(dev, BTRFS_DEV_STAT_WRITE_ERRS);
4447                 return -EIO;
4448         }
4449
4450         if (btrfsic_submit_bio_wait(bio))
4451                 goto leave_with_eio;
4452
4453         bio_put(bio);
4454         return 0;
4455 }