2 * Copyright (C) 2011, 2012 STRATO. All rights reserved.
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public
6 * License v2 as published by the Free Software Foundation.
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11 * General Public License for more details.
13 * You should have received a copy of the GNU General Public
14 * License along with this program; if not, write to the
15 * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
16 * Boston, MA 021110-1307, USA.
19 #include <linux/blkdev.h>
20 #include <linux/ratelimit.h>
24 #include "ordered-data.h"
25 #include "transaction.h"
27 #include "extent_io.h"
28 #include "dev-replace.h"
29 #include "check-integrity.h"
30 #include "rcu-string.h"
34 * This is only the first step towards a full-features scrub. It reads all
35 * extent and super block and verifies the checksums. In case a bad checksum
36 * is found or the extent cannot be read, good data will be written back if
39 * Future enhancements:
40 * - In case an unrepairable extent is encountered, track which files are
41 * affected and report them
42 * - track and record media errors, throw out bad devices
43 * - add a mode to also read unallocated space
50 * the following three values only influence the performance.
51 * The last one configures the number of parallel and outstanding I/O
52 * operations. The first two values configure an upper limit for the number
53 * of (dynamically allocated) pages that are added to a bio.
55 #define SCRUB_PAGES_PER_RD_BIO 32 /* 128k per bio */
56 #define SCRUB_PAGES_PER_WR_BIO 32 /* 128k per bio */
57 #define SCRUB_BIOS_PER_SCTX 64 /* 8MB per device in flight */
60 * the following value times PAGE_SIZE needs to be large enough to match the
61 * largest node/leaf/sector size that shall be supported.
62 * Values larger than BTRFS_STRIPE_LEN are not supported.
64 #define SCRUB_MAX_PAGES_PER_BLOCK 16 /* 64k per node/leaf/sector */
66 struct scrub_recover {
68 struct btrfs_bio *bbio;
73 struct scrub_block *sblock;
75 struct btrfs_device *dev;
76 struct list_head list;
77 u64 flags; /* extent flags */
81 u64 physical_for_dev_replace;
84 unsigned int mirror_num:8;
85 unsigned int have_csum:1;
86 unsigned int io_error:1;
88 u8 csum[BTRFS_CSUM_SIZE];
90 struct scrub_recover *recover;
95 struct scrub_ctx *sctx;
96 struct btrfs_device *dev;
101 #if SCRUB_PAGES_PER_WR_BIO >= SCRUB_PAGES_PER_RD_BIO
102 struct scrub_page *pagev[SCRUB_PAGES_PER_WR_BIO];
104 struct scrub_page *pagev[SCRUB_PAGES_PER_RD_BIO];
108 struct btrfs_work work;
112 struct scrub_page *pagev[SCRUB_MAX_PAGES_PER_BLOCK];
114 atomic_t outstanding_pages;
115 refcount_t refs; /* free mem on transition to zero */
116 struct scrub_ctx *sctx;
117 struct scrub_parity *sparity;
119 unsigned int header_error:1;
120 unsigned int checksum_error:1;
121 unsigned int no_io_error_seen:1;
122 unsigned int generation_error:1; /* also sets header_error */
124 /* The following is for the data used to check parity */
125 /* It is for the data with checksum */
126 unsigned int data_corrected:1;
128 struct btrfs_work work;
131 /* Used for the chunks with parity stripe such RAID5/6 */
132 struct scrub_parity {
133 struct scrub_ctx *sctx;
135 struct btrfs_device *scrub_dev;
147 struct list_head spages;
149 /* Work of parity check and repair */
150 struct btrfs_work work;
152 /* Mark the parity blocks which have data */
153 unsigned long *dbitmap;
156 * Mark the parity blocks which have data, but errors happen when
157 * read data or check data
159 unsigned long *ebitmap;
161 unsigned long bitmap[0];
164 struct scrub_wr_ctx {
165 struct scrub_bio *wr_curr_bio;
166 struct btrfs_device *tgtdev;
167 int pages_per_wr_bio; /* <= SCRUB_PAGES_PER_WR_BIO */
168 atomic_t flush_all_writes;
169 struct mutex wr_lock;
173 struct scrub_bio *bios[SCRUB_BIOS_PER_SCTX];
174 struct btrfs_fs_info *fs_info;
177 atomic_t bios_in_flight;
178 atomic_t workers_pending;
179 spinlock_t list_lock;
180 wait_queue_head_t list_wait;
182 struct list_head csum_list;
185 int pages_per_rd_bio;
190 struct scrub_wr_ctx wr_ctx;
195 struct btrfs_scrub_progress stat;
196 spinlock_t stat_lock;
199 * Use a ref counter to avoid use-after-free issues. Scrub workers
200 * decrement bios_in_flight and workers_pending and then do a wakeup
201 * on the list_wait wait queue. We must ensure the main scrub task
202 * doesn't free the scrub context before or while the workers are
203 * doing the wakeup() call.
208 struct scrub_fixup_nodatasum {
209 struct scrub_ctx *sctx;
210 struct btrfs_device *dev;
212 struct btrfs_root *root;
213 struct btrfs_work work;
217 struct scrub_nocow_inode {
221 struct list_head list;
224 struct scrub_copy_nocow_ctx {
225 struct scrub_ctx *sctx;
229 u64 physical_for_dev_replace;
230 struct list_head inodes;
231 struct btrfs_work work;
234 struct scrub_warning {
235 struct btrfs_path *path;
236 u64 extent_item_size;
240 struct btrfs_device *dev;
243 static void scrub_pending_bio_inc(struct scrub_ctx *sctx);
244 static void scrub_pending_bio_dec(struct scrub_ctx *sctx);
245 static void scrub_pending_trans_workers_inc(struct scrub_ctx *sctx);
246 static void scrub_pending_trans_workers_dec(struct scrub_ctx *sctx);
247 static int scrub_handle_errored_block(struct scrub_block *sblock_to_check);
248 static int scrub_setup_recheck_block(struct scrub_block *original_sblock,
249 struct scrub_block *sblocks_for_recheck);
250 static void scrub_recheck_block(struct btrfs_fs_info *fs_info,
251 struct scrub_block *sblock,
252 int retry_failed_mirror);
253 static void scrub_recheck_block_checksum(struct scrub_block *sblock);
254 static int scrub_repair_block_from_good_copy(struct scrub_block *sblock_bad,
255 struct scrub_block *sblock_good);
256 static int scrub_repair_page_from_good_copy(struct scrub_block *sblock_bad,
257 struct scrub_block *sblock_good,
258 int page_num, int force_write);
259 static void scrub_write_block_to_dev_replace(struct scrub_block *sblock);
260 static int scrub_write_page_to_dev_replace(struct scrub_block *sblock,
262 static int scrub_checksum_data(struct scrub_block *sblock);
263 static int scrub_checksum_tree_block(struct scrub_block *sblock);
264 static int scrub_checksum_super(struct scrub_block *sblock);
265 static void scrub_block_get(struct scrub_block *sblock);
266 static void scrub_block_put(struct scrub_block *sblock);
267 static void scrub_page_get(struct scrub_page *spage);
268 static void scrub_page_put(struct scrub_page *spage);
269 static void scrub_parity_get(struct scrub_parity *sparity);
270 static void scrub_parity_put(struct scrub_parity *sparity);
271 static int scrub_add_page_to_rd_bio(struct scrub_ctx *sctx,
272 struct scrub_page *spage);
273 static int scrub_pages(struct scrub_ctx *sctx, u64 logical, u64 len,
274 u64 physical, struct btrfs_device *dev, u64 flags,
275 u64 gen, int mirror_num, u8 *csum, int force,
276 u64 physical_for_dev_replace);
277 static void scrub_bio_end_io(struct bio *bio);
278 static void scrub_bio_end_io_worker(struct btrfs_work *work);
279 static void scrub_block_complete(struct scrub_block *sblock);
280 static void scrub_remap_extent(struct btrfs_fs_info *fs_info,
281 u64 extent_logical, u64 extent_len,
282 u64 *extent_physical,
283 struct btrfs_device **extent_dev,
284 int *extent_mirror_num);
285 static int scrub_setup_wr_ctx(struct scrub_wr_ctx *wr_ctx,
286 struct btrfs_device *dev,
288 static void scrub_free_wr_ctx(struct scrub_wr_ctx *wr_ctx);
289 static int scrub_add_page_to_wr_bio(struct scrub_ctx *sctx,
290 struct scrub_page *spage);
291 static void scrub_wr_submit(struct scrub_ctx *sctx);
292 static void scrub_wr_bio_end_io(struct bio *bio);
293 static void scrub_wr_bio_end_io_worker(struct btrfs_work *work);
294 static int write_page_nocow(struct scrub_ctx *sctx,
295 u64 physical_for_dev_replace, struct page *page);
296 static int copy_nocow_pages_for_inode(u64 inum, u64 offset, u64 root,
297 struct scrub_copy_nocow_ctx *ctx);
298 static int copy_nocow_pages(struct scrub_ctx *sctx, u64 logical, u64 len,
299 int mirror_num, u64 physical_for_dev_replace);
300 static void copy_nocow_pages_worker(struct btrfs_work *work);
301 static void __scrub_blocked_if_needed(struct btrfs_fs_info *fs_info);
302 static void scrub_blocked_if_needed(struct btrfs_fs_info *fs_info);
303 static void scrub_put_ctx(struct scrub_ctx *sctx);
306 static void scrub_pending_bio_inc(struct scrub_ctx *sctx)
308 refcount_inc(&sctx->refs);
309 atomic_inc(&sctx->bios_in_flight);
312 static void scrub_pending_bio_dec(struct scrub_ctx *sctx)
314 atomic_dec(&sctx->bios_in_flight);
315 wake_up(&sctx->list_wait);
319 static void __scrub_blocked_if_needed(struct btrfs_fs_info *fs_info)
321 while (atomic_read(&fs_info->scrub_pause_req)) {
322 mutex_unlock(&fs_info->scrub_lock);
323 wait_event(fs_info->scrub_pause_wait,
324 atomic_read(&fs_info->scrub_pause_req) == 0);
325 mutex_lock(&fs_info->scrub_lock);
329 static void scrub_pause_on(struct btrfs_fs_info *fs_info)
331 atomic_inc(&fs_info->scrubs_paused);
332 wake_up(&fs_info->scrub_pause_wait);
335 static void scrub_pause_off(struct btrfs_fs_info *fs_info)
337 mutex_lock(&fs_info->scrub_lock);
338 __scrub_blocked_if_needed(fs_info);
339 atomic_dec(&fs_info->scrubs_paused);
340 mutex_unlock(&fs_info->scrub_lock);
342 wake_up(&fs_info->scrub_pause_wait);
345 static void scrub_blocked_if_needed(struct btrfs_fs_info *fs_info)
347 scrub_pause_on(fs_info);
348 scrub_pause_off(fs_info);
352 * used for workers that require transaction commits (i.e., for the
355 static void scrub_pending_trans_workers_inc(struct scrub_ctx *sctx)
357 struct btrfs_fs_info *fs_info = sctx->fs_info;
359 refcount_inc(&sctx->refs);
361 * increment scrubs_running to prevent cancel requests from
362 * completing as long as a worker is running. we must also
363 * increment scrubs_paused to prevent deadlocking on pause
364 * requests used for transactions commits (as the worker uses a
365 * transaction context). it is safe to regard the worker
366 * as paused for all matters practical. effectively, we only
367 * avoid cancellation requests from completing.
369 mutex_lock(&fs_info->scrub_lock);
370 atomic_inc(&fs_info->scrubs_running);
371 atomic_inc(&fs_info->scrubs_paused);
372 mutex_unlock(&fs_info->scrub_lock);
375 * check if @scrubs_running=@scrubs_paused condition
376 * inside wait_event() is not an atomic operation.
377 * which means we may inc/dec @scrub_running/paused
378 * at any time. Let's wake up @scrub_pause_wait as
379 * much as we can to let commit transaction blocked less.
381 wake_up(&fs_info->scrub_pause_wait);
383 atomic_inc(&sctx->workers_pending);
386 /* used for workers that require transaction commits */
387 static void scrub_pending_trans_workers_dec(struct scrub_ctx *sctx)
389 struct btrfs_fs_info *fs_info = sctx->fs_info;
392 * see scrub_pending_trans_workers_inc() why we're pretending
393 * to be paused in the scrub counters
395 mutex_lock(&fs_info->scrub_lock);
396 atomic_dec(&fs_info->scrubs_running);
397 atomic_dec(&fs_info->scrubs_paused);
398 mutex_unlock(&fs_info->scrub_lock);
399 atomic_dec(&sctx->workers_pending);
400 wake_up(&fs_info->scrub_pause_wait);
401 wake_up(&sctx->list_wait);
405 static void scrub_free_csums(struct scrub_ctx *sctx)
407 while (!list_empty(&sctx->csum_list)) {
408 struct btrfs_ordered_sum *sum;
409 sum = list_first_entry(&sctx->csum_list,
410 struct btrfs_ordered_sum, list);
411 list_del(&sum->list);
416 static noinline_for_stack void scrub_free_ctx(struct scrub_ctx *sctx)
423 scrub_free_wr_ctx(&sctx->wr_ctx);
425 /* this can happen when scrub is cancelled */
426 if (sctx->curr != -1) {
427 struct scrub_bio *sbio = sctx->bios[sctx->curr];
429 for (i = 0; i < sbio->page_count; i++) {
430 WARN_ON(!sbio->pagev[i]->page);
431 scrub_block_put(sbio->pagev[i]->sblock);
436 for (i = 0; i < SCRUB_BIOS_PER_SCTX; ++i) {
437 struct scrub_bio *sbio = sctx->bios[i];
444 scrub_free_csums(sctx);
448 static void scrub_put_ctx(struct scrub_ctx *sctx)
450 if (refcount_dec_and_test(&sctx->refs))
451 scrub_free_ctx(sctx);
454 static noinline_for_stack
455 struct scrub_ctx *scrub_setup_ctx(struct btrfs_device *dev, int is_dev_replace)
457 struct scrub_ctx *sctx;
459 struct btrfs_fs_info *fs_info = dev->fs_info;
462 sctx = kzalloc(sizeof(*sctx), GFP_KERNEL);
465 refcount_set(&sctx->refs, 1);
466 sctx->is_dev_replace = is_dev_replace;
467 sctx->pages_per_rd_bio = SCRUB_PAGES_PER_RD_BIO;
469 sctx->fs_info = dev->fs_info;
470 for (i = 0; i < SCRUB_BIOS_PER_SCTX; ++i) {
471 struct scrub_bio *sbio;
473 sbio = kzalloc(sizeof(*sbio), GFP_KERNEL);
476 sctx->bios[i] = sbio;
480 sbio->page_count = 0;
481 btrfs_init_work(&sbio->work, btrfs_scrub_helper,
482 scrub_bio_end_io_worker, NULL, NULL);
484 if (i != SCRUB_BIOS_PER_SCTX - 1)
485 sctx->bios[i]->next_free = i + 1;
487 sctx->bios[i]->next_free = -1;
489 sctx->first_free = 0;
490 sctx->nodesize = fs_info->nodesize;
491 sctx->sectorsize = fs_info->sectorsize;
492 atomic_set(&sctx->bios_in_flight, 0);
493 atomic_set(&sctx->workers_pending, 0);
494 atomic_set(&sctx->cancel_req, 0);
495 sctx->csum_size = btrfs_super_csum_size(fs_info->super_copy);
496 INIT_LIST_HEAD(&sctx->csum_list);
498 spin_lock_init(&sctx->list_lock);
499 spin_lock_init(&sctx->stat_lock);
500 init_waitqueue_head(&sctx->list_wait);
502 ret = scrub_setup_wr_ctx(&sctx->wr_ctx,
503 fs_info->dev_replace.tgtdev, is_dev_replace);
505 scrub_free_ctx(sctx);
511 scrub_free_ctx(sctx);
512 return ERR_PTR(-ENOMEM);
515 static int scrub_print_warning_inode(u64 inum, u64 offset, u64 root,
522 struct extent_buffer *eb;
523 struct btrfs_inode_item *inode_item;
524 struct scrub_warning *swarn = warn_ctx;
525 struct btrfs_fs_info *fs_info = swarn->dev->fs_info;
526 struct inode_fs_paths *ipath = NULL;
527 struct btrfs_root *local_root;
528 struct btrfs_key root_key;
529 struct btrfs_key key;
531 root_key.objectid = root;
532 root_key.type = BTRFS_ROOT_ITEM_KEY;
533 root_key.offset = (u64)-1;
534 local_root = btrfs_read_fs_root_no_name(fs_info, &root_key);
535 if (IS_ERR(local_root)) {
536 ret = PTR_ERR(local_root);
541 * this makes the path point to (inum INODE_ITEM ioff)
544 key.type = BTRFS_INODE_ITEM_KEY;
547 ret = btrfs_search_slot(NULL, local_root, &key, swarn->path, 0, 0);
549 btrfs_release_path(swarn->path);
553 eb = swarn->path->nodes[0];
554 inode_item = btrfs_item_ptr(eb, swarn->path->slots[0],
555 struct btrfs_inode_item);
556 isize = btrfs_inode_size(eb, inode_item);
557 nlink = btrfs_inode_nlink(eb, inode_item);
558 btrfs_release_path(swarn->path);
560 ipath = init_ipath(4096, local_root, swarn->path);
562 ret = PTR_ERR(ipath);
566 ret = paths_from_inode(inum, ipath);
572 * we deliberately ignore the bit ipath might have been too small to
573 * hold all of the paths here
575 for (i = 0; i < ipath->fspath->elem_cnt; ++i)
576 btrfs_warn_in_rcu(fs_info,
577 "%s at logical %llu on dev %s, sector %llu, root %llu, inode %llu, offset %llu, length %llu, links %u (path: %s)",
578 swarn->errstr, swarn->logical,
579 rcu_str_deref(swarn->dev->name),
580 (unsigned long long)swarn->sector,
582 min(isize - offset, (u64)PAGE_SIZE), nlink,
583 (char *)(unsigned long)ipath->fspath->val[i]);
589 btrfs_warn_in_rcu(fs_info,
590 "%s at logical %llu on dev %s, sector %llu, root %llu, inode %llu, offset %llu: path resolving failed with ret=%d",
591 swarn->errstr, swarn->logical,
592 rcu_str_deref(swarn->dev->name),
593 (unsigned long long)swarn->sector,
594 root, inum, offset, ret);
600 static void scrub_print_warning(const char *errstr, struct scrub_block *sblock)
602 struct btrfs_device *dev;
603 struct btrfs_fs_info *fs_info;
604 struct btrfs_path *path;
605 struct btrfs_key found_key;
606 struct extent_buffer *eb;
607 struct btrfs_extent_item *ei;
608 struct scrub_warning swarn;
609 unsigned long ptr = 0;
617 WARN_ON(sblock->page_count < 1);
618 dev = sblock->pagev[0]->dev;
619 fs_info = sblock->sctx->fs_info;
621 path = btrfs_alloc_path();
625 swarn.sector = (sblock->pagev[0]->physical) >> 9;
626 swarn.logical = sblock->pagev[0]->logical;
627 swarn.errstr = errstr;
630 ret = extent_from_logical(fs_info, swarn.logical, path, &found_key,
635 extent_item_pos = swarn.logical - found_key.objectid;
636 swarn.extent_item_size = found_key.offset;
639 ei = btrfs_item_ptr(eb, path->slots[0], struct btrfs_extent_item);
640 item_size = btrfs_item_size_nr(eb, path->slots[0]);
642 if (flags & BTRFS_EXTENT_FLAG_TREE_BLOCK) {
644 ret = tree_backref_for_extent(&ptr, eb, &found_key, ei,
645 item_size, &ref_root,
647 btrfs_warn_in_rcu(fs_info,
648 "%s at logical %llu on dev %s, sector %llu: metadata %s (level %d) in tree %llu",
649 errstr, swarn.logical,
650 rcu_str_deref(dev->name),
651 (unsigned long long)swarn.sector,
652 ref_level ? "node" : "leaf",
653 ret < 0 ? -1 : ref_level,
654 ret < 0 ? -1 : ref_root);
656 btrfs_release_path(path);
658 btrfs_release_path(path);
661 iterate_extent_inodes(fs_info, found_key.objectid,
663 scrub_print_warning_inode, &swarn);
667 btrfs_free_path(path);
670 static int scrub_fixup_readpage(u64 inum, u64 offset, u64 root, void *fixup_ctx)
672 struct page *page = NULL;
674 struct scrub_fixup_nodatasum *fixup = fixup_ctx;
677 struct btrfs_key key;
678 struct inode *inode = NULL;
679 struct btrfs_fs_info *fs_info;
680 u64 end = offset + PAGE_SIZE - 1;
681 struct btrfs_root *local_root;
685 key.type = BTRFS_ROOT_ITEM_KEY;
686 key.offset = (u64)-1;
688 fs_info = fixup->root->fs_info;
689 srcu_index = srcu_read_lock(&fs_info->subvol_srcu);
691 local_root = btrfs_read_fs_root_no_name(fs_info, &key);
692 if (IS_ERR(local_root)) {
693 srcu_read_unlock(&fs_info->subvol_srcu, srcu_index);
694 return PTR_ERR(local_root);
697 key.type = BTRFS_INODE_ITEM_KEY;
700 inode = btrfs_iget(fs_info->sb, &key, local_root, NULL);
701 srcu_read_unlock(&fs_info->subvol_srcu, srcu_index);
703 return PTR_ERR(inode);
705 index = offset >> PAGE_SHIFT;
707 page = find_or_create_page(inode->i_mapping, index, GFP_NOFS);
713 if (PageUptodate(page)) {
714 if (PageDirty(page)) {
716 * we need to write the data to the defect sector. the
717 * data that was in that sector is not in memory,
718 * because the page was modified. we must not write the
719 * modified page to that sector.
721 * TODO: what could be done here: wait for the delalloc
722 * runner to write out that page (might involve
723 * COW) and see whether the sector is still
724 * referenced afterwards.
726 * For the meantime, we'll treat this error
727 * incorrectable, although there is a chance that a
728 * later scrub will find the bad sector again and that
729 * there's no dirty page in memory, then.
734 ret = repair_io_failure(BTRFS_I(inode), offset, PAGE_SIZE,
735 fixup->logical, page,
736 offset - page_offset(page),
742 * we need to get good data first. the general readpage path
743 * will call repair_io_failure for us, we just have to make
744 * sure we read the bad mirror.
746 ret = set_extent_bits(&BTRFS_I(inode)->io_tree, offset, end,
749 /* set_extent_bits should give proper error */
756 ret = extent_read_full_page(&BTRFS_I(inode)->io_tree, page,
759 wait_on_page_locked(page);
761 corrected = !test_range_bit(&BTRFS_I(inode)->io_tree, offset,
762 end, EXTENT_DAMAGED, 0, NULL);
764 clear_extent_bits(&BTRFS_I(inode)->io_tree, offset, end,
777 if (ret == 0 && corrected) {
779 * we only need to call readpage for one of the inodes belonging
780 * to this extent. so make iterate_extent_inodes stop
788 static void scrub_fixup_nodatasum(struct btrfs_work *work)
790 struct btrfs_fs_info *fs_info;
792 struct scrub_fixup_nodatasum *fixup;
793 struct scrub_ctx *sctx;
794 struct btrfs_trans_handle *trans = NULL;
795 struct btrfs_path *path;
796 int uncorrectable = 0;
798 fixup = container_of(work, struct scrub_fixup_nodatasum, work);
800 fs_info = fixup->root->fs_info;
802 path = btrfs_alloc_path();
804 spin_lock(&sctx->stat_lock);
805 ++sctx->stat.malloc_errors;
806 spin_unlock(&sctx->stat_lock);
811 trans = btrfs_join_transaction(fixup->root);
818 * the idea is to trigger a regular read through the standard path. we
819 * read a page from the (failed) logical address by specifying the
820 * corresponding copynum of the failed sector. thus, that readpage is
822 * that is the point where on-the-fly error correction will kick in
823 * (once it's finished) and rewrite the failed sector if a good copy
826 ret = iterate_inodes_from_logical(fixup->logical, fs_info, path,
827 scrub_fixup_readpage, fixup);
834 spin_lock(&sctx->stat_lock);
835 ++sctx->stat.corrected_errors;
836 spin_unlock(&sctx->stat_lock);
839 if (trans && !IS_ERR(trans))
840 btrfs_end_transaction(trans);
842 spin_lock(&sctx->stat_lock);
843 ++sctx->stat.uncorrectable_errors;
844 spin_unlock(&sctx->stat_lock);
845 btrfs_dev_replace_stats_inc(
846 &fs_info->dev_replace.num_uncorrectable_read_errors);
847 btrfs_err_rl_in_rcu(fs_info,
848 "unable to fixup (nodatasum) error at logical %llu on dev %s",
849 fixup->logical, rcu_str_deref(fixup->dev->name));
852 btrfs_free_path(path);
855 scrub_pending_trans_workers_dec(sctx);
858 static inline void scrub_get_recover(struct scrub_recover *recover)
860 refcount_inc(&recover->refs);
863 static inline void scrub_put_recover(struct btrfs_fs_info *fs_info,
864 struct scrub_recover *recover)
866 if (refcount_dec_and_test(&recover->refs)) {
867 btrfs_bio_counter_dec(fs_info);
868 btrfs_put_bbio(recover->bbio);
874 * scrub_handle_errored_block gets called when either verification of the
875 * pages failed or the bio failed to read, e.g. with EIO. In the latter
876 * case, this function handles all pages in the bio, even though only one
878 * The goal of this function is to repair the errored block by using the
879 * contents of one of the mirrors.
881 static int scrub_handle_errored_block(struct scrub_block *sblock_to_check)
883 struct scrub_ctx *sctx = sblock_to_check->sctx;
884 struct btrfs_device *dev;
885 struct btrfs_fs_info *fs_info;
888 unsigned int failed_mirror_index;
889 unsigned int is_metadata;
890 unsigned int have_csum;
891 struct scrub_block *sblocks_for_recheck; /* holds one for each mirror */
892 struct scrub_block *sblock_bad;
897 static DEFINE_RATELIMIT_STATE(_rs, DEFAULT_RATELIMIT_INTERVAL,
898 DEFAULT_RATELIMIT_BURST);
900 BUG_ON(sblock_to_check->page_count < 1);
901 fs_info = sctx->fs_info;
902 if (sblock_to_check->pagev[0]->flags & BTRFS_EXTENT_FLAG_SUPER) {
904 * if we find an error in a super block, we just report it.
905 * They will get written with the next transaction commit
908 spin_lock(&sctx->stat_lock);
909 ++sctx->stat.super_errors;
910 spin_unlock(&sctx->stat_lock);
913 length = sblock_to_check->page_count * PAGE_SIZE;
914 logical = sblock_to_check->pagev[0]->logical;
915 BUG_ON(sblock_to_check->pagev[0]->mirror_num < 1);
916 failed_mirror_index = sblock_to_check->pagev[0]->mirror_num - 1;
917 is_metadata = !(sblock_to_check->pagev[0]->flags &
918 BTRFS_EXTENT_FLAG_DATA);
919 have_csum = sblock_to_check->pagev[0]->have_csum;
920 dev = sblock_to_check->pagev[0]->dev;
922 if (sctx->is_dev_replace && !is_metadata && !have_csum) {
923 sblocks_for_recheck = NULL;
928 * read all mirrors one after the other. This includes to
929 * re-read the extent or metadata block that failed (that was
930 * the cause that this fixup code is called) another time,
931 * page by page this time in order to know which pages
932 * caused I/O errors and which ones are good (for all mirrors).
933 * It is the goal to handle the situation when more than one
934 * mirror contains I/O errors, but the errors do not
935 * overlap, i.e. the data can be repaired by selecting the
936 * pages from those mirrors without I/O error on the
937 * particular pages. One example (with blocks >= 2 * PAGE_SIZE)
938 * would be that mirror #1 has an I/O error on the first page,
939 * the second page is good, and mirror #2 has an I/O error on
940 * the second page, but the first page is good.
941 * Then the first page of the first mirror can be repaired by
942 * taking the first page of the second mirror, and the
943 * second page of the second mirror can be repaired by
944 * copying the contents of the 2nd page of the 1st mirror.
945 * One more note: if the pages of one mirror contain I/O
946 * errors, the checksum cannot be verified. In order to get
947 * the best data for repairing, the first attempt is to find
948 * a mirror without I/O errors and with a validated checksum.
949 * Only if this is not possible, the pages are picked from
950 * mirrors with I/O errors without considering the checksum.
951 * If the latter is the case, at the end, the checksum of the
952 * repaired area is verified in order to correctly maintain
956 sblocks_for_recheck = kcalloc(BTRFS_MAX_MIRRORS,
957 sizeof(*sblocks_for_recheck), GFP_NOFS);
958 if (!sblocks_for_recheck) {
959 spin_lock(&sctx->stat_lock);
960 sctx->stat.malloc_errors++;
961 sctx->stat.read_errors++;
962 sctx->stat.uncorrectable_errors++;
963 spin_unlock(&sctx->stat_lock);
964 btrfs_dev_stat_inc_and_print(dev, BTRFS_DEV_STAT_READ_ERRS);
968 /* setup the context, map the logical blocks and alloc the pages */
969 ret = scrub_setup_recheck_block(sblock_to_check, sblocks_for_recheck);
971 spin_lock(&sctx->stat_lock);
972 sctx->stat.read_errors++;
973 sctx->stat.uncorrectable_errors++;
974 spin_unlock(&sctx->stat_lock);
975 btrfs_dev_stat_inc_and_print(dev, BTRFS_DEV_STAT_READ_ERRS);
978 BUG_ON(failed_mirror_index >= BTRFS_MAX_MIRRORS);
979 sblock_bad = sblocks_for_recheck + failed_mirror_index;
981 /* build and submit the bios for the failed mirror, check checksums */
982 scrub_recheck_block(fs_info, sblock_bad, 1);
984 if (!sblock_bad->header_error && !sblock_bad->checksum_error &&
985 sblock_bad->no_io_error_seen) {
987 * the error disappeared after reading page by page, or
988 * the area was part of a huge bio and other parts of the
989 * bio caused I/O errors, or the block layer merged several
990 * read requests into one and the error is caused by a
991 * different bio (usually one of the two latter cases is
994 spin_lock(&sctx->stat_lock);
995 sctx->stat.unverified_errors++;
996 sblock_to_check->data_corrected = 1;
997 spin_unlock(&sctx->stat_lock);
999 if (sctx->is_dev_replace)
1000 scrub_write_block_to_dev_replace(sblock_bad);
1004 if (!sblock_bad->no_io_error_seen) {
1005 spin_lock(&sctx->stat_lock);
1006 sctx->stat.read_errors++;
1007 spin_unlock(&sctx->stat_lock);
1008 if (__ratelimit(&_rs))
1009 scrub_print_warning("i/o error", sblock_to_check);
1010 btrfs_dev_stat_inc_and_print(dev, BTRFS_DEV_STAT_READ_ERRS);
1011 } else if (sblock_bad->checksum_error) {
1012 spin_lock(&sctx->stat_lock);
1013 sctx->stat.csum_errors++;
1014 spin_unlock(&sctx->stat_lock);
1015 if (__ratelimit(&_rs))
1016 scrub_print_warning("checksum error", sblock_to_check);
1017 btrfs_dev_stat_inc_and_print(dev,
1018 BTRFS_DEV_STAT_CORRUPTION_ERRS);
1019 } else if (sblock_bad->header_error) {
1020 spin_lock(&sctx->stat_lock);
1021 sctx->stat.verify_errors++;
1022 spin_unlock(&sctx->stat_lock);
1023 if (__ratelimit(&_rs))
1024 scrub_print_warning("checksum/header error",
1026 if (sblock_bad->generation_error)
1027 btrfs_dev_stat_inc_and_print(dev,
1028 BTRFS_DEV_STAT_GENERATION_ERRS);
1030 btrfs_dev_stat_inc_and_print(dev,
1031 BTRFS_DEV_STAT_CORRUPTION_ERRS);
1034 if (sctx->readonly) {
1035 ASSERT(!sctx->is_dev_replace);
1039 if (!is_metadata && !have_csum) {
1040 struct scrub_fixup_nodatasum *fixup_nodatasum;
1042 WARN_ON(sctx->is_dev_replace);
1047 * !is_metadata and !have_csum, this means that the data
1048 * might not be COWed, that it might be modified
1049 * concurrently. The general strategy to work on the
1050 * commit root does not help in the case when COW is not
1053 fixup_nodatasum = kzalloc(sizeof(*fixup_nodatasum), GFP_NOFS);
1054 if (!fixup_nodatasum)
1055 goto did_not_correct_error;
1056 fixup_nodatasum->sctx = sctx;
1057 fixup_nodatasum->dev = dev;
1058 fixup_nodatasum->logical = logical;
1059 fixup_nodatasum->root = fs_info->extent_root;
1060 fixup_nodatasum->mirror_num = failed_mirror_index + 1;
1061 scrub_pending_trans_workers_inc(sctx);
1062 btrfs_init_work(&fixup_nodatasum->work, btrfs_scrub_helper,
1063 scrub_fixup_nodatasum, NULL, NULL);
1064 btrfs_queue_work(fs_info->scrub_workers,
1065 &fixup_nodatasum->work);
1070 * now build and submit the bios for the other mirrors, check
1072 * First try to pick the mirror which is completely without I/O
1073 * errors and also does not have a checksum error.
1074 * If one is found, and if a checksum is present, the full block
1075 * that is known to contain an error is rewritten. Afterwards
1076 * the block is known to be corrected.
1077 * If a mirror is found which is completely correct, and no
1078 * checksum is present, only those pages are rewritten that had
1079 * an I/O error in the block to be repaired, since it cannot be
1080 * determined, which copy of the other pages is better (and it
1081 * could happen otherwise that a correct page would be
1082 * overwritten by a bad one).
1084 for (mirror_index = 0;
1085 mirror_index < BTRFS_MAX_MIRRORS &&
1086 sblocks_for_recheck[mirror_index].page_count > 0;
1088 struct scrub_block *sblock_other;
1090 if (mirror_index == failed_mirror_index)
1092 sblock_other = sblocks_for_recheck + mirror_index;
1094 /* build and submit the bios, check checksums */
1095 scrub_recheck_block(fs_info, sblock_other, 0);
1097 if (!sblock_other->header_error &&
1098 !sblock_other->checksum_error &&
1099 sblock_other->no_io_error_seen) {
1100 if (sctx->is_dev_replace) {
1101 scrub_write_block_to_dev_replace(sblock_other);
1102 goto corrected_error;
1104 ret = scrub_repair_block_from_good_copy(
1105 sblock_bad, sblock_other);
1107 goto corrected_error;
1112 if (sblock_bad->no_io_error_seen && !sctx->is_dev_replace)
1113 goto did_not_correct_error;
1116 * In case of I/O errors in the area that is supposed to be
1117 * repaired, continue by picking good copies of those pages.
1118 * Select the good pages from mirrors to rewrite bad pages from
1119 * the area to fix. Afterwards verify the checksum of the block
1120 * that is supposed to be repaired. This verification step is
1121 * only done for the purpose of statistic counting and for the
1122 * final scrub report, whether errors remain.
1123 * A perfect algorithm could make use of the checksum and try
1124 * all possible combinations of pages from the different mirrors
1125 * until the checksum verification succeeds. For example, when
1126 * the 2nd page of mirror #1 faces I/O errors, and the 2nd page
1127 * of mirror #2 is readable but the final checksum test fails,
1128 * then the 2nd page of mirror #3 could be tried, whether now
1129 * the final checksum succeeds. But this would be a rare
1130 * exception and is therefore not implemented. At least it is
1131 * avoided that the good copy is overwritten.
1132 * A more useful improvement would be to pick the sectors
1133 * without I/O error based on sector sizes (512 bytes on legacy
1134 * disks) instead of on PAGE_SIZE. Then maybe 512 byte of one
1135 * mirror could be repaired by taking 512 byte of a different
1136 * mirror, even if other 512 byte sectors in the same PAGE_SIZE
1137 * area are unreadable.
1140 for (page_num = 0; page_num < sblock_bad->page_count;
1142 struct scrub_page *page_bad = sblock_bad->pagev[page_num];
1143 struct scrub_block *sblock_other = NULL;
1145 /* skip no-io-error page in scrub */
1146 if (!page_bad->io_error && !sctx->is_dev_replace)
1149 /* try to find no-io-error page in mirrors */
1150 if (page_bad->io_error) {
1151 for (mirror_index = 0;
1152 mirror_index < BTRFS_MAX_MIRRORS &&
1153 sblocks_for_recheck[mirror_index].page_count > 0;
1155 if (!sblocks_for_recheck[mirror_index].
1156 pagev[page_num]->io_error) {
1157 sblock_other = sblocks_for_recheck +
1166 if (sctx->is_dev_replace) {
1168 * did not find a mirror to fetch the page
1169 * from. scrub_write_page_to_dev_replace()
1170 * handles this case (page->io_error), by
1171 * filling the block with zeros before
1172 * submitting the write request
1175 sblock_other = sblock_bad;
1177 if (scrub_write_page_to_dev_replace(sblock_other,
1179 btrfs_dev_replace_stats_inc(
1180 &fs_info->dev_replace.num_write_errors);
1183 } else if (sblock_other) {
1184 ret = scrub_repair_page_from_good_copy(sblock_bad,
1188 page_bad->io_error = 0;
1194 if (success && !sctx->is_dev_replace) {
1195 if (is_metadata || have_csum) {
1197 * need to verify the checksum now that all
1198 * sectors on disk are repaired (the write
1199 * request for data to be repaired is on its way).
1200 * Just be lazy and use scrub_recheck_block()
1201 * which re-reads the data before the checksum
1202 * is verified, but most likely the data comes out
1203 * of the page cache.
1205 scrub_recheck_block(fs_info, sblock_bad, 1);
1206 if (!sblock_bad->header_error &&
1207 !sblock_bad->checksum_error &&
1208 sblock_bad->no_io_error_seen)
1209 goto corrected_error;
1211 goto did_not_correct_error;
1214 spin_lock(&sctx->stat_lock);
1215 sctx->stat.corrected_errors++;
1216 sblock_to_check->data_corrected = 1;
1217 spin_unlock(&sctx->stat_lock);
1218 btrfs_err_rl_in_rcu(fs_info,
1219 "fixed up error at logical %llu on dev %s",
1220 logical, rcu_str_deref(dev->name));
1223 did_not_correct_error:
1224 spin_lock(&sctx->stat_lock);
1225 sctx->stat.uncorrectable_errors++;
1226 spin_unlock(&sctx->stat_lock);
1227 btrfs_err_rl_in_rcu(fs_info,
1228 "unable to fixup (regular) error at logical %llu on dev %s",
1229 logical, rcu_str_deref(dev->name));
1233 if (sblocks_for_recheck) {
1234 for (mirror_index = 0; mirror_index < BTRFS_MAX_MIRRORS;
1236 struct scrub_block *sblock = sblocks_for_recheck +
1238 struct scrub_recover *recover;
1241 for (page_index = 0; page_index < sblock->page_count;
1243 sblock->pagev[page_index]->sblock = NULL;
1244 recover = sblock->pagev[page_index]->recover;
1246 scrub_put_recover(fs_info, recover);
1247 sblock->pagev[page_index]->recover =
1250 scrub_page_put(sblock->pagev[page_index]);
1253 kfree(sblocks_for_recheck);
1259 static inline int scrub_nr_raid_mirrors(struct btrfs_bio *bbio)
1261 if (bbio->map_type & BTRFS_BLOCK_GROUP_RAID5)
1263 else if (bbio->map_type & BTRFS_BLOCK_GROUP_RAID6)
1266 return (int)bbio->num_stripes;
1269 static inline void scrub_stripe_index_and_offset(u64 logical, u64 map_type,
1272 int nstripes, int mirror,
1278 if (map_type & BTRFS_BLOCK_GROUP_RAID56_MASK) {
1280 for (i = 0; i < nstripes; i++) {
1281 if (raid_map[i] == RAID6_Q_STRIPE ||
1282 raid_map[i] == RAID5_P_STRIPE)
1285 if (logical >= raid_map[i] &&
1286 logical < raid_map[i] + mapped_length)
1291 *stripe_offset = logical - raid_map[i];
1293 /* The other RAID type */
1294 *stripe_index = mirror;
1299 static int scrub_setup_recheck_block(struct scrub_block *original_sblock,
1300 struct scrub_block *sblocks_for_recheck)
1302 struct scrub_ctx *sctx = original_sblock->sctx;
1303 struct btrfs_fs_info *fs_info = sctx->fs_info;
1304 u64 length = original_sblock->page_count * PAGE_SIZE;
1305 u64 logical = original_sblock->pagev[0]->logical;
1306 u64 generation = original_sblock->pagev[0]->generation;
1307 u64 flags = original_sblock->pagev[0]->flags;
1308 u64 have_csum = original_sblock->pagev[0]->have_csum;
1309 struct scrub_recover *recover;
1310 struct btrfs_bio *bbio;
1321 * note: the two members refs and outstanding_pages
1322 * are not used (and not set) in the blocks that are used for
1323 * the recheck procedure
1326 while (length > 0) {
1327 sublen = min_t(u64, length, PAGE_SIZE);
1328 mapped_length = sublen;
1332 * with a length of PAGE_SIZE, each returned stripe
1333 * represents one mirror
1335 btrfs_bio_counter_inc_blocked(fs_info);
1336 ret = btrfs_map_sblock(fs_info, BTRFS_MAP_GET_READ_MIRRORS,
1337 logical, &mapped_length, &bbio);
1338 if (ret || !bbio || mapped_length < sublen) {
1339 btrfs_put_bbio(bbio);
1340 btrfs_bio_counter_dec(fs_info);
1344 recover = kzalloc(sizeof(struct scrub_recover), GFP_NOFS);
1346 btrfs_put_bbio(bbio);
1347 btrfs_bio_counter_dec(fs_info);
1351 refcount_set(&recover->refs, 1);
1352 recover->bbio = bbio;
1353 recover->map_length = mapped_length;
1355 BUG_ON(page_index >= SCRUB_MAX_PAGES_PER_BLOCK);
1357 nmirrors = min(scrub_nr_raid_mirrors(bbio), BTRFS_MAX_MIRRORS);
1359 for (mirror_index = 0; mirror_index < nmirrors;
1361 struct scrub_block *sblock;
1362 struct scrub_page *page;
1364 sblock = sblocks_for_recheck + mirror_index;
1365 sblock->sctx = sctx;
1367 page = kzalloc(sizeof(*page), GFP_NOFS);
1370 spin_lock(&sctx->stat_lock);
1371 sctx->stat.malloc_errors++;
1372 spin_unlock(&sctx->stat_lock);
1373 scrub_put_recover(fs_info, recover);
1376 scrub_page_get(page);
1377 sblock->pagev[page_index] = page;
1378 page->sblock = sblock;
1379 page->flags = flags;
1380 page->generation = generation;
1381 page->logical = logical;
1382 page->have_csum = have_csum;
1385 original_sblock->pagev[0]->csum,
1388 scrub_stripe_index_and_offset(logical,
1397 page->physical = bbio->stripes[stripe_index].physical +
1399 page->dev = bbio->stripes[stripe_index].dev;
1401 BUG_ON(page_index >= original_sblock->page_count);
1402 page->physical_for_dev_replace =
1403 original_sblock->pagev[page_index]->
1404 physical_for_dev_replace;
1405 /* for missing devices, dev->bdev is NULL */
1406 page->mirror_num = mirror_index + 1;
1407 sblock->page_count++;
1408 page->page = alloc_page(GFP_NOFS);
1412 scrub_get_recover(recover);
1413 page->recover = recover;
1415 scrub_put_recover(fs_info, recover);
1424 struct scrub_bio_ret {
1425 struct completion event;
1429 static void scrub_bio_wait_endio(struct bio *bio)
1431 struct scrub_bio_ret *ret = bio->bi_private;
1433 ret->error = bio->bi_error;
1434 complete(&ret->event);
1437 static inline int scrub_is_page_on_raid56(struct scrub_page *page)
1439 return page->recover &&
1440 (page->recover->bbio->map_type & BTRFS_BLOCK_GROUP_RAID56_MASK);
1443 static int scrub_submit_raid56_bio_wait(struct btrfs_fs_info *fs_info,
1445 struct scrub_page *page)
1447 struct scrub_bio_ret done;
1450 init_completion(&done.event);
1452 bio->bi_iter.bi_sector = page->logical >> 9;
1453 bio->bi_private = &done;
1454 bio->bi_end_io = scrub_bio_wait_endio;
1456 ret = raid56_parity_recover(fs_info, bio, page->recover->bbio,
1457 page->recover->map_length,
1458 page->mirror_num, 0);
1462 wait_for_completion(&done.event);
1470 * this function will check the on disk data for checksum errors, header
1471 * errors and read I/O errors. If any I/O errors happen, the exact pages
1472 * which are errored are marked as being bad. The goal is to enable scrub
1473 * to take those pages that are not errored from all the mirrors so that
1474 * the pages that are errored in the just handled mirror can be repaired.
1476 static void scrub_recheck_block(struct btrfs_fs_info *fs_info,
1477 struct scrub_block *sblock,
1478 int retry_failed_mirror)
1482 sblock->no_io_error_seen = 1;
1484 for (page_num = 0; page_num < sblock->page_count; page_num++) {
1486 struct scrub_page *page = sblock->pagev[page_num];
1488 if (page->dev->bdev == NULL) {
1490 sblock->no_io_error_seen = 0;
1494 WARN_ON(!page->page);
1495 bio = btrfs_io_bio_alloc(GFP_NOFS, 1);
1498 sblock->no_io_error_seen = 0;
1501 bio->bi_bdev = page->dev->bdev;
1503 bio_add_page(bio, page->page, PAGE_SIZE, 0);
1504 if (!retry_failed_mirror && scrub_is_page_on_raid56(page)) {
1505 if (scrub_submit_raid56_bio_wait(fs_info, bio, page)) {
1507 sblock->no_io_error_seen = 0;
1510 bio->bi_iter.bi_sector = page->physical >> 9;
1511 bio_set_op_attrs(bio, REQ_OP_READ, 0);
1513 if (btrfsic_submit_bio_wait(bio)) {
1515 sblock->no_io_error_seen = 0;
1522 if (sblock->no_io_error_seen)
1523 scrub_recheck_block_checksum(sblock);
1526 static inline int scrub_check_fsid(u8 fsid[],
1527 struct scrub_page *spage)
1529 struct btrfs_fs_devices *fs_devices = spage->dev->fs_devices;
1532 ret = memcmp(fsid, fs_devices->fsid, BTRFS_UUID_SIZE);
1536 static void scrub_recheck_block_checksum(struct scrub_block *sblock)
1538 sblock->header_error = 0;
1539 sblock->checksum_error = 0;
1540 sblock->generation_error = 0;
1542 if (sblock->pagev[0]->flags & BTRFS_EXTENT_FLAG_DATA)
1543 scrub_checksum_data(sblock);
1545 scrub_checksum_tree_block(sblock);
1548 static int scrub_repair_block_from_good_copy(struct scrub_block *sblock_bad,
1549 struct scrub_block *sblock_good)
1554 for (page_num = 0; page_num < sblock_bad->page_count; page_num++) {
1557 ret_sub = scrub_repair_page_from_good_copy(sblock_bad,
1567 static int scrub_repair_page_from_good_copy(struct scrub_block *sblock_bad,
1568 struct scrub_block *sblock_good,
1569 int page_num, int force_write)
1571 struct scrub_page *page_bad = sblock_bad->pagev[page_num];
1572 struct scrub_page *page_good = sblock_good->pagev[page_num];
1573 struct btrfs_fs_info *fs_info = sblock_bad->sctx->fs_info;
1575 BUG_ON(page_bad->page == NULL);
1576 BUG_ON(page_good->page == NULL);
1577 if (force_write || sblock_bad->header_error ||
1578 sblock_bad->checksum_error || page_bad->io_error) {
1582 if (!page_bad->dev->bdev) {
1583 btrfs_warn_rl(fs_info,
1584 "scrub_repair_page_from_good_copy(bdev == NULL) is unexpected");
1588 bio = btrfs_io_bio_alloc(GFP_NOFS, 1);
1591 bio->bi_bdev = page_bad->dev->bdev;
1592 bio->bi_iter.bi_sector = page_bad->physical >> 9;
1593 bio_set_op_attrs(bio, REQ_OP_WRITE, 0);
1595 ret = bio_add_page(bio, page_good->page, PAGE_SIZE, 0);
1596 if (PAGE_SIZE != ret) {
1601 if (btrfsic_submit_bio_wait(bio)) {
1602 btrfs_dev_stat_inc_and_print(page_bad->dev,
1603 BTRFS_DEV_STAT_WRITE_ERRS);
1604 btrfs_dev_replace_stats_inc(
1605 &fs_info->dev_replace.num_write_errors);
1615 static void scrub_write_block_to_dev_replace(struct scrub_block *sblock)
1617 struct btrfs_fs_info *fs_info = sblock->sctx->fs_info;
1621 * This block is used for the check of the parity on the source device,
1622 * so the data needn't be written into the destination device.
1624 if (sblock->sparity)
1627 for (page_num = 0; page_num < sblock->page_count; page_num++) {
1630 ret = scrub_write_page_to_dev_replace(sblock, page_num);
1632 btrfs_dev_replace_stats_inc(
1633 &fs_info->dev_replace.num_write_errors);
1637 static int scrub_write_page_to_dev_replace(struct scrub_block *sblock,
1640 struct scrub_page *spage = sblock->pagev[page_num];
1642 BUG_ON(spage->page == NULL);
1643 if (spage->io_error) {
1644 void *mapped_buffer = kmap_atomic(spage->page);
1646 clear_page(mapped_buffer);
1647 flush_dcache_page(spage->page);
1648 kunmap_atomic(mapped_buffer);
1650 return scrub_add_page_to_wr_bio(sblock->sctx, spage);
1653 static int scrub_add_page_to_wr_bio(struct scrub_ctx *sctx,
1654 struct scrub_page *spage)
1656 struct scrub_wr_ctx *wr_ctx = &sctx->wr_ctx;
1657 struct scrub_bio *sbio;
1660 mutex_lock(&wr_ctx->wr_lock);
1662 if (!wr_ctx->wr_curr_bio) {
1663 wr_ctx->wr_curr_bio = kzalloc(sizeof(*wr_ctx->wr_curr_bio),
1665 if (!wr_ctx->wr_curr_bio) {
1666 mutex_unlock(&wr_ctx->wr_lock);
1669 wr_ctx->wr_curr_bio->sctx = sctx;
1670 wr_ctx->wr_curr_bio->page_count = 0;
1672 sbio = wr_ctx->wr_curr_bio;
1673 if (sbio->page_count == 0) {
1676 sbio->physical = spage->physical_for_dev_replace;
1677 sbio->logical = spage->logical;
1678 sbio->dev = wr_ctx->tgtdev;
1681 bio = btrfs_io_bio_alloc(GFP_KERNEL,
1682 wr_ctx->pages_per_wr_bio);
1684 mutex_unlock(&wr_ctx->wr_lock);
1690 bio->bi_private = sbio;
1691 bio->bi_end_io = scrub_wr_bio_end_io;
1692 bio->bi_bdev = sbio->dev->bdev;
1693 bio->bi_iter.bi_sector = sbio->physical >> 9;
1694 bio_set_op_attrs(bio, REQ_OP_WRITE, 0);
1696 } else if (sbio->physical + sbio->page_count * PAGE_SIZE !=
1697 spage->physical_for_dev_replace ||
1698 sbio->logical + sbio->page_count * PAGE_SIZE !=
1700 scrub_wr_submit(sctx);
1704 ret = bio_add_page(sbio->bio, spage->page, PAGE_SIZE, 0);
1705 if (ret != PAGE_SIZE) {
1706 if (sbio->page_count < 1) {
1709 mutex_unlock(&wr_ctx->wr_lock);
1712 scrub_wr_submit(sctx);
1716 sbio->pagev[sbio->page_count] = spage;
1717 scrub_page_get(spage);
1719 if (sbio->page_count == wr_ctx->pages_per_wr_bio)
1720 scrub_wr_submit(sctx);
1721 mutex_unlock(&wr_ctx->wr_lock);
1726 static void scrub_wr_submit(struct scrub_ctx *sctx)
1728 struct scrub_wr_ctx *wr_ctx = &sctx->wr_ctx;
1729 struct scrub_bio *sbio;
1731 if (!wr_ctx->wr_curr_bio)
1734 sbio = wr_ctx->wr_curr_bio;
1735 wr_ctx->wr_curr_bio = NULL;
1736 WARN_ON(!sbio->bio->bi_bdev);
1737 scrub_pending_bio_inc(sctx);
1738 /* process all writes in a single worker thread. Then the block layer
1739 * orders the requests before sending them to the driver which
1740 * doubled the write performance on spinning disks when measured
1742 btrfsic_submit_bio(sbio->bio);
1745 static void scrub_wr_bio_end_io(struct bio *bio)
1747 struct scrub_bio *sbio = bio->bi_private;
1748 struct btrfs_fs_info *fs_info = sbio->dev->fs_info;
1750 sbio->err = bio->bi_error;
1753 btrfs_init_work(&sbio->work, btrfs_scrubwrc_helper,
1754 scrub_wr_bio_end_io_worker, NULL, NULL);
1755 btrfs_queue_work(fs_info->scrub_wr_completion_workers, &sbio->work);
1758 static void scrub_wr_bio_end_io_worker(struct btrfs_work *work)
1760 struct scrub_bio *sbio = container_of(work, struct scrub_bio, work);
1761 struct scrub_ctx *sctx = sbio->sctx;
1764 WARN_ON(sbio->page_count > SCRUB_PAGES_PER_WR_BIO);
1766 struct btrfs_dev_replace *dev_replace =
1767 &sbio->sctx->fs_info->dev_replace;
1769 for (i = 0; i < sbio->page_count; i++) {
1770 struct scrub_page *spage = sbio->pagev[i];
1772 spage->io_error = 1;
1773 btrfs_dev_replace_stats_inc(&dev_replace->
1778 for (i = 0; i < sbio->page_count; i++)
1779 scrub_page_put(sbio->pagev[i]);
1783 scrub_pending_bio_dec(sctx);
1786 static int scrub_checksum(struct scrub_block *sblock)
1792 * No need to initialize these stats currently,
1793 * because this function only use return value
1794 * instead of these stats value.
1799 sblock->header_error = 0;
1800 sblock->generation_error = 0;
1801 sblock->checksum_error = 0;
1803 WARN_ON(sblock->page_count < 1);
1804 flags = sblock->pagev[0]->flags;
1806 if (flags & BTRFS_EXTENT_FLAG_DATA)
1807 ret = scrub_checksum_data(sblock);
1808 else if (flags & BTRFS_EXTENT_FLAG_TREE_BLOCK)
1809 ret = scrub_checksum_tree_block(sblock);
1810 else if (flags & BTRFS_EXTENT_FLAG_SUPER)
1811 (void)scrub_checksum_super(sblock);
1815 scrub_handle_errored_block(sblock);
1820 static int scrub_checksum_data(struct scrub_block *sblock)
1822 struct scrub_ctx *sctx = sblock->sctx;
1823 u8 csum[BTRFS_CSUM_SIZE];
1831 BUG_ON(sblock->page_count < 1);
1832 if (!sblock->pagev[0]->have_csum)
1835 on_disk_csum = sblock->pagev[0]->csum;
1836 page = sblock->pagev[0]->page;
1837 buffer = kmap_atomic(page);
1839 len = sctx->sectorsize;
1842 u64 l = min_t(u64, len, PAGE_SIZE);
1844 crc = btrfs_csum_data(buffer, crc, l);
1845 kunmap_atomic(buffer);
1850 BUG_ON(index >= sblock->page_count);
1851 BUG_ON(!sblock->pagev[index]->page);
1852 page = sblock->pagev[index]->page;
1853 buffer = kmap_atomic(page);
1856 btrfs_csum_final(crc, csum);
1857 if (memcmp(csum, on_disk_csum, sctx->csum_size))
1858 sblock->checksum_error = 1;
1860 return sblock->checksum_error;
1863 static int scrub_checksum_tree_block(struct scrub_block *sblock)
1865 struct scrub_ctx *sctx = sblock->sctx;
1866 struct btrfs_header *h;
1867 struct btrfs_fs_info *fs_info = sctx->fs_info;
1868 u8 calculated_csum[BTRFS_CSUM_SIZE];
1869 u8 on_disk_csum[BTRFS_CSUM_SIZE];
1871 void *mapped_buffer;
1878 BUG_ON(sblock->page_count < 1);
1879 page = sblock->pagev[0]->page;
1880 mapped_buffer = kmap_atomic(page);
1881 h = (struct btrfs_header *)mapped_buffer;
1882 memcpy(on_disk_csum, h->csum, sctx->csum_size);
1885 * we don't use the getter functions here, as we
1886 * a) don't have an extent buffer and
1887 * b) the page is already kmapped
1889 if (sblock->pagev[0]->logical != btrfs_stack_header_bytenr(h))
1890 sblock->header_error = 1;
1892 if (sblock->pagev[0]->generation != btrfs_stack_header_generation(h)) {
1893 sblock->header_error = 1;
1894 sblock->generation_error = 1;
1897 if (!scrub_check_fsid(h->fsid, sblock->pagev[0]))
1898 sblock->header_error = 1;
1900 if (memcmp(h->chunk_tree_uuid, fs_info->chunk_tree_uuid,
1902 sblock->header_error = 1;
1904 len = sctx->nodesize - BTRFS_CSUM_SIZE;
1905 mapped_size = PAGE_SIZE - BTRFS_CSUM_SIZE;
1906 p = ((u8 *)mapped_buffer) + BTRFS_CSUM_SIZE;
1909 u64 l = min_t(u64, len, mapped_size);
1911 crc = btrfs_csum_data(p, crc, l);
1912 kunmap_atomic(mapped_buffer);
1917 BUG_ON(index >= sblock->page_count);
1918 BUG_ON(!sblock->pagev[index]->page);
1919 page = sblock->pagev[index]->page;
1920 mapped_buffer = kmap_atomic(page);
1921 mapped_size = PAGE_SIZE;
1925 btrfs_csum_final(crc, calculated_csum);
1926 if (memcmp(calculated_csum, on_disk_csum, sctx->csum_size))
1927 sblock->checksum_error = 1;
1929 return sblock->header_error || sblock->checksum_error;
1932 static int scrub_checksum_super(struct scrub_block *sblock)
1934 struct btrfs_super_block *s;
1935 struct scrub_ctx *sctx = sblock->sctx;
1936 u8 calculated_csum[BTRFS_CSUM_SIZE];
1937 u8 on_disk_csum[BTRFS_CSUM_SIZE];
1939 void *mapped_buffer;
1948 BUG_ON(sblock->page_count < 1);
1949 page = sblock->pagev[0]->page;
1950 mapped_buffer = kmap_atomic(page);
1951 s = (struct btrfs_super_block *)mapped_buffer;
1952 memcpy(on_disk_csum, s->csum, sctx->csum_size);
1954 if (sblock->pagev[0]->logical != btrfs_super_bytenr(s))
1957 if (sblock->pagev[0]->generation != btrfs_super_generation(s))
1960 if (!scrub_check_fsid(s->fsid, sblock->pagev[0]))
1963 len = BTRFS_SUPER_INFO_SIZE - BTRFS_CSUM_SIZE;
1964 mapped_size = PAGE_SIZE - BTRFS_CSUM_SIZE;
1965 p = ((u8 *)mapped_buffer) + BTRFS_CSUM_SIZE;
1968 u64 l = min_t(u64, len, mapped_size);
1970 crc = btrfs_csum_data(p, crc, l);
1971 kunmap_atomic(mapped_buffer);
1976 BUG_ON(index >= sblock->page_count);
1977 BUG_ON(!sblock->pagev[index]->page);
1978 page = sblock->pagev[index]->page;
1979 mapped_buffer = kmap_atomic(page);
1980 mapped_size = PAGE_SIZE;
1984 btrfs_csum_final(crc, calculated_csum);
1985 if (memcmp(calculated_csum, on_disk_csum, sctx->csum_size))
1988 if (fail_cor + fail_gen) {
1990 * if we find an error in a super block, we just report it.
1991 * They will get written with the next transaction commit
1994 spin_lock(&sctx->stat_lock);
1995 ++sctx->stat.super_errors;
1996 spin_unlock(&sctx->stat_lock);
1998 btrfs_dev_stat_inc_and_print(sblock->pagev[0]->dev,
1999 BTRFS_DEV_STAT_CORRUPTION_ERRS);
2001 btrfs_dev_stat_inc_and_print(sblock->pagev[0]->dev,
2002 BTRFS_DEV_STAT_GENERATION_ERRS);
2005 return fail_cor + fail_gen;
2008 static void scrub_block_get(struct scrub_block *sblock)
2010 refcount_inc(&sblock->refs);
2013 static void scrub_block_put(struct scrub_block *sblock)
2015 if (refcount_dec_and_test(&sblock->refs)) {
2018 if (sblock->sparity)
2019 scrub_parity_put(sblock->sparity);
2021 for (i = 0; i < sblock->page_count; i++)
2022 scrub_page_put(sblock->pagev[i]);
2027 static void scrub_page_get(struct scrub_page *spage)
2029 atomic_inc(&spage->refs);
2032 static void scrub_page_put(struct scrub_page *spage)
2034 if (atomic_dec_and_test(&spage->refs)) {
2036 __free_page(spage->page);
2041 static void scrub_submit(struct scrub_ctx *sctx)
2043 struct scrub_bio *sbio;
2045 if (sctx->curr == -1)
2048 sbio = sctx->bios[sctx->curr];
2050 scrub_pending_bio_inc(sctx);
2051 btrfsic_submit_bio(sbio->bio);
2054 static int scrub_add_page_to_rd_bio(struct scrub_ctx *sctx,
2055 struct scrub_page *spage)
2057 struct scrub_block *sblock = spage->sblock;
2058 struct scrub_bio *sbio;
2063 * grab a fresh bio or wait for one to become available
2065 while (sctx->curr == -1) {
2066 spin_lock(&sctx->list_lock);
2067 sctx->curr = sctx->first_free;
2068 if (sctx->curr != -1) {
2069 sctx->first_free = sctx->bios[sctx->curr]->next_free;
2070 sctx->bios[sctx->curr]->next_free = -1;
2071 sctx->bios[sctx->curr]->page_count = 0;
2072 spin_unlock(&sctx->list_lock);
2074 spin_unlock(&sctx->list_lock);
2075 wait_event(sctx->list_wait, sctx->first_free != -1);
2078 sbio = sctx->bios[sctx->curr];
2079 if (sbio->page_count == 0) {
2082 sbio->physical = spage->physical;
2083 sbio->logical = spage->logical;
2084 sbio->dev = spage->dev;
2087 bio = btrfs_io_bio_alloc(GFP_KERNEL,
2088 sctx->pages_per_rd_bio);
2094 bio->bi_private = sbio;
2095 bio->bi_end_io = scrub_bio_end_io;
2096 bio->bi_bdev = sbio->dev->bdev;
2097 bio->bi_iter.bi_sector = sbio->physical >> 9;
2098 bio_set_op_attrs(bio, REQ_OP_READ, 0);
2100 } else if (sbio->physical + sbio->page_count * PAGE_SIZE !=
2102 sbio->logical + sbio->page_count * PAGE_SIZE !=
2104 sbio->dev != spage->dev) {
2109 sbio->pagev[sbio->page_count] = spage;
2110 ret = bio_add_page(sbio->bio, spage->page, PAGE_SIZE, 0);
2111 if (ret != PAGE_SIZE) {
2112 if (sbio->page_count < 1) {
2121 scrub_block_get(sblock); /* one for the page added to the bio */
2122 atomic_inc(&sblock->outstanding_pages);
2124 if (sbio->page_count == sctx->pages_per_rd_bio)
2130 static void scrub_missing_raid56_end_io(struct bio *bio)
2132 struct scrub_block *sblock = bio->bi_private;
2133 struct btrfs_fs_info *fs_info = sblock->sctx->fs_info;
2136 sblock->no_io_error_seen = 0;
2140 btrfs_queue_work(fs_info->scrub_workers, &sblock->work);
2143 static void scrub_missing_raid56_worker(struct btrfs_work *work)
2145 struct scrub_block *sblock = container_of(work, struct scrub_block, work);
2146 struct scrub_ctx *sctx = sblock->sctx;
2147 struct btrfs_fs_info *fs_info = sctx->fs_info;
2149 struct btrfs_device *dev;
2151 logical = sblock->pagev[0]->logical;
2152 dev = sblock->pagev[0]->dev;
2154 if (sblock->no_io_error_seen)
2155 scrub_recheck_block_checksum(sblock);
2157 if (!sblock->no_io_error_seen) {
2158 spin_lock(&sctx->stat_lock);
2159 sctx->stat.read_errors++;
2160 spin_unlock(&sctx->stat_lock);
2161 btrfs_err_rl_in_rcu(fs_info,
2162 "IO error rebuilding logical %llu for dev %s",
2163 logical, rcu_str_deref(dev->name));
2164 } else if (sblock->header_error || sblock->checksum_error) {
2165 spin_lock(&sctx->stat_lock);
2166 sctx->stat.uncorrectable_errors++;
2167 spin_unlock(&sctx->stat_lock);
2168 btrfs_err_rl_in_rcu(fs_info,
2169 "failed to rebuild valid logical %llu for dev %s",
2170 logical, rcu_str_deref(dev->name));
2172 scrub_write_block_to_dev_replace(sblock);
2175 scrub_block_put(sblock);
2177 if (sctx->is_dev_replace &&
2178 atomic_read(&sctx->wr_ctx.flush_all_writes)) {
2179 mutex_lock(&sctx->wr_ctx.wr_lock);
2180 scrub_wr_submit(sctx);
2181 mutex_unlock(&sctx->wr_ctx.wr_lock);
2184 scrub_pending_bio_dec(sctx);
2187 static void scrub_missing_raid56_pages(struct scrub_block *sblock)
2189 struct scrub_ctx *sctx = sblock->sctx;
2190 struct btrfs_fs_info *fs_info = sctx->fs_info;
2191 u64 length = sblock->page_count * PAGE_SIZE;
2192 u64 logical = sblock->pagev[0]->logical;
2193 struct btrfs_bio *bbio = NULL;
2195 struct btrfs_raid_bio *rbio;
2199 btrfs_bio_counter_inc_blocked(fs_info);
2200 ret = btrfs_map_sblock(fs_info, BTRFS_MAP_GET_READ_MIRRORS, logical,
2202 if (ret || !bbio || !bbio->raid_map)
2205 if (WARN_ON(!sctx->is_dev_replace ||
2206 !(bbio->map_type & BTRFS_BLOCK_GROUP_RAID56_MASK))) {
2208 * We shouldn't be scrubbing a missing device. Even for dev
2209 * replace, we should only get here for RAID 5/6. We either
2210 * managed to mount something with no mirrors remaining or
2211 * there's a bug in scrub_remap_extent()/btrfs_map_block().
2216 bio = btrfs_io_bio_alloc(GFP_NOFS, 0);
2220 bio->bi_iter.bi_sector = logical >> 9;
2221 bio->bi_private = sblock;
2222 bio->bi_end_io = scrub_missing_raid56_end_io;
2224 rbio = raid56_alloc_missing_rbio(fs_info, bio, bbio, length);
2228 for (i = 0; i < sblock->page_count; i++) {
2229 struct scrub_page *spage = sblock->pagev[i];
2231 raid56_add_scrub_pages(rbio, spage->page, spage->logical);
2234 btrfs_init_work(&sblock->work, btrfs_scrub_helper,
2235 scrub_missing_raid56_worker, NULL, NULL);
2236 scrub_block_get(sblock);
2237 scrub_pending_bio_inc(sctx);
2238 raid56_submit_missing_rbio(rbio);
2244 btrfs_bio_counter_dec(fs_info);
2245 btrfs_put_bbio(bbio);
2246 spin_lock(&sctx->stat_lock);
2247 sctx->stat.malloc_errors++;
2248 spin_unlock(&sctx->stat_lock);
2251 static int scrub_pages(struct scrub_ctx *sctx, u64 logical, u64 len,
2252 u64 physical, struct btrfs_device *dev, u64 flags,
2253 u64 gen, int mirror_num, u8 *csum, int force,
2254 u64 physical_for_dev_replace)
2256 struct scrub_block *sblock;
2259 sblock = kzalloc(sizeof(*sblock), GFP_KERNEL);
2261 spin_lock(&sctx->stat_lock);
2262 sctx->stat.malloc_errors++;
2263 spin_unlock(&sctx->stat_lock);
2267 /* one ref inside this function, plus one for each page added to
2269 refcount_set(&sblock->refs, 1);
2270 sblock->sctx = sctx;
2271 sblock->no_io_error_seen = 1;
2273 for (index = 0; len > 0; index++) {
2274 struct scrub_page *spage;
2275 u64 l = min_t(u64, len, PAGE_SIZE);
2277 spage = kzalloc(sizeof(*spage), GFP_KERNEL);
2280 spin_lock(&sctx->stat_lock);
2281 sctx->stat.malloc_errors++;
2282 spin_unlock(&sctx->stat_lock);
2283 scrub_block_put(sblock);
2286 BUG_ON(index >= SCRUB_MAX_PAGES_PER_BLOCK);
2287 scrub_page_get(spage);
2288 sblock->pagev[index] = spage;
2289 spage->sblock = sblock;
2291 spage->flags = flags;
2292 spage->generation = gen;
2293 spage->logical = logical;
2294 spage->physical = physical;
2295 spage->physical_for_dev_replace = physical_for_dev_replace;
2296 spage->mirror_num = mirror_num;
2298 spage->have_csum = 1;
2299 memcpy(spage->csum, csum, sctx->csum_size);
2301 spage->have_csum = 0;
2303 sblock->page_count++;
2304 spage->page = alloc_page(GFP_KERNEL);
2310 physical_for_dev_replace += l;
2313 WARN_ON(sblock->page_count == 0);
2316 * This case should only be hit for RAID 5/6 device replace. See
2317 * the comment in scrub_missing_raid56_pages() for details.
2319 scrub_missing_raid56_pages(sblock);
2321 for (index = 0; index < sblock->page_count; index++) {
2322 struct scrub_page *spage = sblock->pagev[index];
2325 ret = scrub_add_page_to_rd_bio(sctx, spage);
2327 scrub_block_put(sblock);
2336 /* last one frees, either here or in bio completion for last page */
2337 scrub_block_put(sblock);
2341 static void scrub_bio_end_io(struct bio *bio)
2343 struct scrub_bio *sbio = bio->bi_private;
2344 struct btrfs_fs_info *fs_info = sbio->dev->fs_info;
2346 sbio->err = bio->bi_error;
2349 btrfs_queue_work(fs_info->scrub_workers, &sbio->work);
2352 static void scrub_bio_end_io_worker(struct btrfs_work *work)
2354 struct scrub_bio *sbio = container_of(work, struct scrub_bio, work);
2355 struct scrub_ctx *sctx = sbio->sctx;
2358 BUG_ON(sbio->page_count > SCRUB_PAGES_PER_RD_BIO);
2360 for (i = 0; i < sbio->page_count; i++) {
2361 struct scrub_page *spage = sbio->pagev[i];
2363 spage->io_error = 1;
2364 spage->sblock->no_io_error_seen = 0;
2368 /* now complete the scrub_block items that have all pages completed */
2369 for (i = 0; i < sbio->page_count; i++) {
2370 struct scrub_page *spage = sbio->pagev[i];
2371 struct scrub_block *sblock = spage->sblock;
2373 if (atomic_dec_and_test(&sblock->outstanding_pages))
2374 scrub_block_complete(sblock);
2375 scrub_block_put(sblock);
2380 spin_lock(&sctx->list_lock);
2381 sbio->next_free = sctx->first_free;
2382 sctx->first_free = sbio->index;
2383 spin_unlock(&sctx->list_lock);
2385 if (sctx->is_dev_replace &&
2386 atomic_read(&sctx->wr_ctx.flush_all_writes)) {
2387 mutex_lock(&sctx->wr_ctx.wr_lock);
2388 scrub_wr_submit(sctx);
2389 mutex_unlock(&sctx->wr_ctx.wr_lock);
2392 scrub_pending_bio_dec(sctx);
2395 static inline void __scrub_mark_bitmap(struct scrub_parity *sparity,
2396 unsigned long *bitmap,
2401 int sectorsize = sparity->sctx->fs_info->sectorsize;
2403 if (len >= sparity->stripe_len) {
2404 bitmap_set(bitmap, 0, sparity->nsectors);
2408 start -= sparity->logic_start;
2409 start = div64_u64_rem(start, sparity->stripe_len, &offset);
2410 offset = div_u64(offset, sectorsize);
2411 nsectors = (int)len / sectorsize;
2413 if (offset + nsectors <= sparity->nsectors) {
2414 bitmap_set(bitmap, offset, nsectors);
2418 bitmap_set(bitmap, offset, sparity->nsectors - offset);
2419 bitmap_set(bitmap, 0, nsectors - (sparity->nsectors - offset));
2422 static inline void scrub_parity_mark_sectors_error(struct scrub_parity *sparity,
2425 __scrub_mark_bitmap(sparity, sparity->ebitmap, start, len);
2428 static inline void scrub_parity_mark_sectors_data(struct scrub_parity *sparity,
2431 __scrub_mark_bitmap(sparity, sparity->dbitmap, start, len);
2434 static void scrub_block_complete(struct scrub_block *sblock)
2438 if (!sblock->no_io_error_seen) {
2440 scrub_handle_errored_block(sblock);
2443 * if has checksum error, write via repair mechanism in
2444 * dev replace case, otherwise write here in dev replace
2447 corrupted = scrub_checksum(sblock);
2448 if (!corrupted && sblock->sctx->is_dev_replace)
2449 scrub_write_block_to_dev_replace(sblock);
2452 if (sblock->sparity && corrupted && !sblock->data_corrected) {
2453 u64 start = sblock->pagev[0]->logical;
2454 u64 end = sblock->pagev[sblock->page_count - 1]->logical +
2457 scrub_parity_mark_sectors_error(sblock->sparity,
2458 start, end - start);
2462 static int scrub_find_csum(struct scrub_ctx *sctx, u64 logical, u8 *csum)
2464 struct btrfs_ordered_sum *sum = NULL;
2465 unsigned long index;
2466 unsigned long num_sectors;
2468 while (!list_empty(&sctx->csum_list)) {
2469 sum = list_first_entry(&sctx->csum_list,
2470 struct btrfs_ordered_sum, list);
2471 if (sum->bytenr > logical)
2473 if (sum->bytenr + sum->len > logical)
2476 ++sctx->stat.csum_discards;
2477 list_del(&sum->list);
2484 index = ((u32)(logical - sum->bytenr)) / sctx->sectorsize;
2485 num_sectors = sum->len / sctx->sectorsize;
2486 memcpy(csum, sum->sums + index, sctx->csum_size);
2487 if (index == num_sectors - 1) {
2488 list_del(&sum->list);
2494 /* scrub extent tries to collect up to 64 kB for each bio */
2495 static int scrub_extent(struct scrub_ctx *sctx, u64 logical, u64 len,
2496 u64 physical, struct btrfs_device *dev, u64 flags,
2497 u64 gen, int mirror_num, u64 physical_for_dev_replace)
2500 u8 csum[BTRFS_CSUM_SIZE];
2503 if (flags & BTRFS_EXTENT_FLAG_DATA) {
2504 blocksize = sctx->sectorsize;
2505 spin_lock(&sctx->stat_lock);
2506 sctx->stat.data_extents_scrubbed++;
2507 sctx->stat.data_bytes_scrubbed += len;
2508 spin_unlock(&sctx->stat_lock);
2509 } else if (flags & BTRFS_EXTENT_FLAG_TREE_BLOCK) {
2510 blocksize = sctx->nodesize;
2511 spin_lock(&sctx->stat_lock);
2512 sctx->stat.tree_extents_scrubbed++;
2513 sctx->stat.tree_bytes_scrubbed += len;
2514 spin_unlock(&sctx->stat_lock);
2516 blocksize = sctx->sectorsize;
2521 u64 l = min_t(u64, len, blocksize);
2524 if (flags & BTRFS_EXTENT_FLAG_DATA) {
2525 /* push csums to sbio */
2526 have_csum = scrub_find_csum(sctx, logical, csum);
2528 ++sctx->stat.no_csum;
2529 if (sctx->is_dev_replace && !have_csum) {
2530 ret = copy_nocow_pages(sctx, logical, l,
2532 physical_for_dev_replace);
2533 goto behind_scrub_pages;
2536 ret = scrub_pages(sctx, logical, l, physical, dev, flags, gen,
2537 mirror_num, have_csum ? csum : NULL, 0,
2538 physical_for_dev_replace);
2545 physical_for_dev_replace += l;
2550 static int scrub_pages_for_parity(struct scrub_parity *sparity,
2551 u64 logical, u64 len,
2552 u64 physical, struct btrfs_device *dev,
2553 u64 flags, u64 gen, int mirror_num, u8 *csum)
2555 struct scrub_ctx *sctx = sparity->sctx;
2556 struct scrub_block *sblock;
2559 sblock = kzalloc(sizeof(*sblock), GFP_KERNEL);
2561 spin_lock(&sctx->stat_lock);
2562 sctx->stat.malloc_errors++;
2563 spin_unlock(&sctx->stat_lock);
2567 /* one ref inside this function, plus one for each page added to
2569 refcount_set(&sblock->refs, 1);
2570 sblock->sctx = sctx;
2571 sblock->no_io_error_seen = 1;
2572 sblock->sparity = sparity;
2573 scrub_parity_get(sparity);
2575 for (index = 0; len > 0; index++) {
2576 struct scrub_page *spage;
2577 u64 l = min_t(u64, len, PAGE_SIZE);
2579 spage = kzalloc(sizeof(*spage), GFP_KERNEL);
2582 spin_lock(&sctx->stat_lock);
2583 sctx->stat.malloc_errors++;
2584 spin_unlock(&sctx->stat_lock);
2585 scrub_block_put(sblock);
2588 BUG_ON(index >= SCRUB_MAX_PAGES_PER_BLOCK);
2589 /* For scrub block */
2590 scrub_page_get(spage);
2591 sblock->pagev[index] = spage;
2592 /* For scrub parity */
2593 scrub_page_get(spage);
2594 list_add_tail(&spage->list, &sparity->spages);
2595 spage->sblock = sblock;
2597 spage->flags = flags;
2598 spage->generation = gen;
2599 spage->logical = logical;
2600 spage->physical = physical;
2601 spage->mirror_num = mirror_num;
2603 spage->have_csum = 1;
2604 memcpy(spage->csum, csum, sctx->csum_size);
2606 spage->have_csum = 0;
2608 sblock->page_count++;
2609 spage->page = alloc_page(GFP_KERNEL);
2617 WARN_ON(sblock->page_count == 0);
2618 for (index = 0; index < sblock->page_count; index++) {
2619 struct scrub_page *spage = sblock->pagev[index];
2622 ret = scrub_add_page_to_rd_bio(sctx, spage);
2624 scrub_block_put(sblock);
2629 /* last one frees, either here or in bio completion for last page */
2630 scrub_block_put(sblock);
2634 static int scrub_extent_for_parity(struct scrub_parity *sparity,
2635 u64 logical, u64 len,
2636 u64 physical, struct btrfs_device *dev,
2637 u64 flags, u64 gen, int mirror_num)
2639 struct scrub_ctx *sctx = sparity->sctx;
2641 u8 csum[BTRFS_CSUM_SIZE];
2645 scrub_parity_mark_sectors_error(sparity, logical, len);
2649 if (flags & BTRFS_EXTENT_FLAG_DATA) {
2650 blocksize = sctx->sectorsize;
2651 } else if (flags & BTRFS_EXTENT_FLAG_TREE_BLOCK) {
2652 blocksize = sctx->nodesize;
2654 blocksize = sctx->sectorsize;
2659 u64 l = min_t(u64, len, blocksize);
2662 if (flags & BTRFS_EXTENT_FLAG_DATA) {
2663 /* push csums to sbio */
2664 have_csum = scrub_find_csum(sctx, logical, csum);
2668 ret = scrub_pages_for_parity(sparity, logical, l, physical, dev,
2669 flags, gen, mirror_num,
2670 have_csum ? csum : NULL);
2682 * Given a physical address, this will calculate it's
2683 * logical offset. if this is a parity stripe, it will return
2684 * the most left data stripe's logical offset.
2686 * return 0 if it is a data stripe, 1 means parity stripe.
2688 static int get_raid56_logic_offset(u64 physical, int num,
2689 struct map_lookup *map, u64 *offset,
2699 last_offset = (physical - map->stripes[num].physical) *
2700 nr_data_stripes(map);
2702 *stripe_start = last_offset;
2704 *offset = last_offset;
2705 for (i = 0; i < nr_data_stripes(map); i++) {
2706 *offset = last_offset + i * map->stripe_len;
2708 stripe_nr = div_u64(*offset, map->stripe_len);
2709 stripe_nr = div_u64(stripe_nr, nr_data_stripes(map));
2711 /* Work out the disk rotation on this stripe-set */
2712 stripe_nr = div_u64_rem(stripe_nr, map->num_stripes, &rot);
2713 /* calculate which stripe this data locates */
2715 stripe_index = rot % map->num_stripes;
2716 if (stripe_index == num)
2718 if (stripe_index < num)
2721 *offset = last_offset + j * map->stripe_len;
2725 static void scrub_free_parity(struct scrub_parity *sparity)
2727 struct scrub_ctx *sctx = sparity->sctx;
2728 struct scrub_page *curr, *next;
2731 nbits = bitmap_weight(sparity->ebitmap, sparity->nsectors);
2733 spin_lock(&sctx->stat_lock);
2734 sctx->stat.read_errors += nbits;
2735 sctx->stat.uncorrectable_errors += nbits;
2736 spin_unlock(&sctx->stat_lock);
2739 list_for_each_entry_safe(curr, next, &sparity->spages, list) {
2740 list_del_init(&curr->list);
2741 scrub_page_put(curr);
2747 static void scrub_parity_bio_endio_worker(struct btrfs_work *work)
2749 struct scrub_parity *sparity = container_of(work, struct scrub_parity,
2751 struct scrub_ctx *sctx = sparity->sctx;
2753 scrub_free_parity(sparity);
2754 scrub_pending_bio_dec(sctx);
2757 static void scrub_parity_bio_endio(struct bio *bio)
2759 struct scrub_parity *sparity = (struct scrub_parity *)bio->bi_private;
2760 struct btrfs_fs_info *fs_info = sparity->sctx->fs_info;
2763 bitmap_or(sparity->ebitmap, sparity->ebitmap, sparity->dbitmap,
2768 btrfs_init_work(&sparity->work, btrfs_scrubparity_helper,
2769 scrub_parity_bio_endio_worker, NULL, NULL);
2770 btrfs_queue_work(fs_info->scrub_parity_workers, &sparity->work);
2773 static void scrub_parity_check_and_repair(struct scrub_parity *sparity)
2775 struct scrub_ctx *sctx = sparity->sctx;
2776 struct btrfs_fs_info *fs_info = sctx->fs_info;
2778 struct btrfs_raid_bio *rbio;
2779 struct btrfs_bio *bbio = NULL;
2783 if (!bitmap_andnot(sparity->dbitmap, sparity->dbitmap, sparity->ebitmap,
2787 length = sparity->logic_end - sparity->logic_start;
2789 btrfs_bio_counter_inc_blocked(fs_info);
2790 ret = btrfs_map_sblock(fs_info, BTRFS_MAP_WRITE, sparity->logic_start,
2792 if (ret || !bbio || !bbio->raid_map)
2795 bio = btrfs_io_bio_alloc(GFP_NOFS, 0);
2799 bio->bi_iter.bi_sector = sparity->logic_start >> 9;
2800 bio->bi_private = sparity;
2801 bio->bi_end_io = scrub_parity_bio_endio;
2803 rbio = raid56_parity_alloc_scrub_rbio(fs_info, bio, bbio,
2804 length, sparity->scrub_dev,
2810 scrub_pending_bio_inc(sctx);
2811 raid56_parity_submit_scrub_rbio(rbio);
2817 btrfs_bio_counter_dec(fs_info);
2818 btrfs_put_bbio(bbio);
2819 bitmap_or(sparity->ebitmap, sparity->ebitmap, sparity->dbitmap,
2821 spin_lock(&sctx->stat_lock);
2822 sctx->stat.malloc_errors++;
2823 spin_unlock(&sctx->stat_lock);
2825 scrub_free_parity(sparity);
2828 static inline int scrub_calc_parity_bitmap_len(int nsectors)
2830 return DIV_ROUND_UP(nsectors, BITS_PER_LONG) * sizeof(long);
2833 static void scrub_parity_get(struct scrub_parity *sparity)
2835 refcount_inc(&sparity->refs);
2838 static void scrub_parity_put(struct scrub_parity *sparity)
2840 if (!refcount_dec_and_test(&sparity->refs))
2843 scrub_parity_check_and_repair(sparity);
2846 static noinline_for_stack int scrub_raid56_parity(struct scrub_ctx *sctx,
2847 struct map_lookup *map,
2848 struct btrfs_device *sdev,
2849 struct btrfs_path *path,
2853 struct btrfs_fs_info *fs_info = sctx->fs_info;
2854 struct btrfs_root *root = fs_info->extent_root;
2855 struct btrfs_root *csum_root = fs_info->csum_root;
2856 struct btrfs_extent_item *extent;
2857 struct btrfs_bio *bbio = NULL;
2861 struct extent_buffer *l;
2862 struct btrfs_key key;
2865 u64 extent_physical;
2868 struct btrfs_device *extent_dev;
2869 struct scrub_parity *sparity;
2872 int extent_mirror_num;
2875 nsectors = div_u64(map->stripe_len, fs_info->sectorsize);
2876 bitmap_len = scrub_calc_parity_bitmap_len(nsectors);
2877 sparity = kzalloc(sizeof(struct scrub_parity) + 2 * bitmap_len,
2880 spin_lock(&sctx->stat_lock);
2881 sctx->stat.malloc_errors++;
2882 spin_unlock(&sctx->stat_lock);
2886 sparity->stripe_len = map->stripe_len;
2887 sparity->nsectors = nsectors;
2888 sparity->sctx = sctx;
2889 sparity->scrub_dev = sdev;
2890 sparity->logic_start = logic_start;
2891 sparity->logic_end = logic_end;
2892 refcount_set(&sparity->refs, 1);
2893 INIT_LIST_HEAD(&sparity->spages);
2894 sparity->dbitmap = sparity->bitmap;
2895 sparity->ebitmap = (void *)sparity->bitmap + bitmap_len;
2898 while (logic_start < logic_end) {
2899 if (btrfs_fs_incompat(fs_info, SKINNY_METADATA))
2900 key.type = BTRFS_METADATA_ITEM_KEY;
2902 key.type = BTRFS_EXTENT_ITEM_KEY;
2903 key.objectid = logic_start;
2904 key.offset = (u64)-1;
2906 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
2911 ret = btrfs_previous_extent_item(root, path, 0);
2915 btrfs_release_path(path);
2916 ret = btrfs_search_slot(NULL, root, &key,
2928 slot = path->slots[0];
2929 if (slot >= btrfs_header_nritems(l)) {
2930 ret = btrfs_next_leaf(root, path);
2939 btrfs_item_key_to_cpu(l, &key, slot);
2941 if (key.type != BTRFS_EXTENT_ITEM_KEY &&
2942 key.type != BTRFS_METADATA_ITEM_KEY)
2945 if (key.type == BTRFS_METADATA_ITEM_KEY)
2946 bytes = fs_info->nodesize;
2950 if (key.objectid + bytes <= logic_start)
2953 if (key.objectid >= logic_end) {
2958 while (key.objectid >= logic_start + map->stripe_len)
2959 logic_start += map->stripe_len;
2961 extent = btrfs_item_ptr(l, slot,
2962 struct btrfs_extent_item);
2963 flags = btrfs_extent_flags(l, extent);
2964 generation = btrfs_extent_generation(l, extent);
2966 if ((flags & BTRFS_EXTENT_FLAG_TREE_BLOCK) &&
2967 (key.objectid < logic_start ||
2968 key.objectid + bytes >
2969 logic_start + map->stripe_len)) {
2971 "scrub: tree block %llu spanning stripes, ignored. logical=%llu",
2972 key.objectid, logic_start);
2973 spin_lock(&sctx->stat_lock);
2974 sctx->stat.uncorrectable_errors++;
2975 spin_unlock(&sctx->stat_lock);
2979 extent_logical = key.objectid;
2982 if (extent_logical < logic_start) {
2983 extent_len -= logic_start - extent_logical;
2984 extent_logical = logic_start;
2987 if (extent_logical + extent_len >
2988 logic_start + map->stripe_len)
2989 extent_len = logic_start + map->stripe_len -
2992 scrub_parity_mark_sectors_data(sparity, extent_logical,
2995 mapped_length = extent_len;
2997 ret = btrfs_map_block(fs_info, BTRFS_MAP_READ,
2998 extent_logical, &mapped_length, &bbio,
3001 if (!bbio || mapped_length < extent_len)
3005 btrfs_put_bbio(bbio);
3008 extent_physical = bbio->stripes[0].physical;
3009 extent_mirror_num = bbio->mirror_num;
3010 extent_dev = bbio->stripes[0].dev;
3011 btrfs_put_bbio(bbio);
3013 ret = btrfs_lookup_csums_range(csum_root,
3015 extent_logical + extent_len - 1,
3016 &sctx->csum_list, 1);
3020 ret = scrub_extent_for_parity(sparity, extent_logical,
3027 scrub_free_csums(sctx);
3032 if (extent_logical + extent_len <
3033 key.objectid + bytes) {
3034 logic_start += map->stripe_len;
3036 if (logic_start >= logic_end) {
3041 if (logic_start < key.objectid + bytes) {
3050 btrfs_release_path(path);
3055 logic_start += map->stripe_len;
3059 scrub_parity_mark_sectors_error(sparity, logic_start,
3060 logic_end - logic_start);
3061 scrub_parity_put(sparity);
3063 mutex_lock(&sctx->wr_ctx.wr_lock);
3064 scrub_wr_submit(sctx);
3065 mutex_unlock(&sctx->wr_ctx.wr_lock);
3067 btrfs_release_path(path);
3068 return ret < 0 ? ret : 0;
3071 static noinline_for_stack int scrub_stripe(struct scrub_ctx *sctx,
3072 struct map_lookup *map,
3073 struct btrfs_device *scrub_dev,
3074 int num, u64 base, u64 length,
3077 struct btrfs_path *path, *ppath;
3078 struct btrfs_fs_info *fs_info = sctx->fs_info;
3079 struct btrfs_root *root = fs_info->extent_root;
3080 struct btrfs_root *csum_root = fs_info->csum_root;
3081 struct btrfs_extent_item *extent;
3082 struct blk_plug plug;
3087 struct extent_buffer *l;
3094 struct reada_control *reada1;
3095 struct reada_control *reada2;
3096 struct btrfs_key key;
3097 struct btrfs_key key_end;
3098 u64 increment = map->stripe_len;
3101 u64 extent_physical;
3105 struct btrfs_device *extent_dev;
3106 int extent_mirror_num;
3109 physical = map->stripes[num].physical;
3111 nstripes = div_u64(length, map->stripe_len);
3112 if (map->type & BTRFS_BLOCK_GROUP_RAID0) {
3113 offset = map->stripe_len * num;
3114 increment = map->stripe_len * map->num_stripes;
3116 } else if (map->type & BTRFS_BLOCK_GROUP_RAID10) {
3117 int factor = map->num_stripes / map->sub_stripes;
3118 offset = map->stripe_len * (num / map->sub_stripes);
3119 increment = map->stripe_len * factor;
3120 mirror_num = num % map->sub_stripes + 1;
3121 } else if (map->type & BTRFS_BLOCK_GROUP_RAID1) {
3122 increment = map->stripe_len;
3123 mirror_num = num % map->num_stripes + 1;
3124 } else if (map->type & BTRFS_BLOCK_GROUP_DUP) {
3125 increment = map->stripe_len;
3126 mirror_num = num % map->num_stripes + 1;
3127 } else if (map->type & BTRFS_BLOCK_GROUP_RAID56_MASK) {
3128 get_raid56_logic_offset(physical, num, map, &offset, NULL);
3129 increment = map->stripe_len * nr_data_stripes(map);
3132 increment = map->stripe_len;
3136 path = btrfs_alloc_path();
3140 ppath = btrfs_alloc_path();
3142 btrfs_free_path(path);
3147 * work on commit root. The related disk blocks are static as
3148 * long as COW is applied. This means, it is save to rewrite
3149 * them to repair disk errors without any race conditions
3151 path->search_commit_root = 1;
3152 path->skip_locking = 1;
3154 ppath->search_commit_root = 1;
3155 ppath->skip_locking = 1;
3157 * trigger the readahead for extent tree csum tree and wait for
3158 * completion. During readahead, the scrub is officially paused
3159 * to not hold off transaction commits
3161 logical = base + offset;
3162 physical_end = physical + nstripes * map->stripe_len;
3163 if (map->type & BTRFS_BLOCK_GROUP_RAID56_MASK) {
3164 get_raid56_logic_offset(physical_end, num,
3165 map, &logic_end, NULL);
3168 logic_end = logical + increment * nstripes;
3170 wait_event(sctx->list_wait,
3171 atomic_read(&sctx->bios_in_flight) == 0);
3172 scrub_blocked_if_needed(fs_info);
3174 /* FIXME it might be better to start readahead at commit root */
3175 key.objectid = logical;
3176 key.type = BTRFS_EXTENT_ITEM_KEY;
3177 key.offset = (u64)0;
3178 key_end.objectid = logic_end;
3179 key_end.type = BTRFS_METADATA_ITEM_KEY;
3180 key_end.offset = (u64)-1;
3181 reada1 = btrfs_reada_add(root, &key, &key_end);
3183 key.objectid = BTRFS_EXTENT_CSUM_OBJECTID;
3184 key.type = BTRFS_EXTENT_CSUM_KEY;
3185 key.offset = logical;
3186 key_end.objectid = BTRFS_EXTENT_CSUM_OBJECTID;
3187 key_end.type = BTRFS_EXTENT_CSUM_KEY;
3188 key_end.offset = logic_end;
3189 reada2 = btrfs_reada_add(csum_root, &key, &key_end);
3191 if (!IS_ERR(reada1))
3192 btrfs_reada_wait(reada1);
3193 if (!IS_ERR(reada2))
3194 btrfs_reada_wait(reada2);
3198 * collect all data csums for the stripe to avoid seeking during
3199 * the scrub. This might currently (crc32) end up to be about 1MB
3201 blk_start_plug(&plug);
3204 * now find all extents for each stripe and scrub them
3207 while (physical < physical_end) {
3211 if (atomic_read(&fs_info->scrub_cancel_req) ||
3212 atomic_read(&sctx->cancel_req)) {
3217 * check to see if we have to pause
3219 if (atomic_read(&fs_info->scrub_pause_req)) {
3220 /* push queued extents */
3221 atomic_set(&sctx->wr_ctx.flush_all_writes, 1);
3223 mutex_lock(&sctx->wr_ctx.wr_lock);
3224 scrub_wr_submit(sctx);
3225 mutex_unlock(&sctx->wr_ctx.wr_lock);
3226 wait_event(sctx->list_wait,
3227 atomic_read(&sctx->bios_in_flight) == 0);
3228 atomic_set(&sctx->wr_ctx.flush_all_writes, 0);
3229 scrub_blocked_if_needed(fs_info);
3232 if (map->type & BTRFS_BLOCK_GROUP_RAID56_MASK) {
3233 ret = get_raid56_logic_offset(physical, num, map,
3238 /* it is parity strip */
3239 stripe_logical += base;
3240 stripe_end = stripe_logical + increment;
3241 ret = scrub_raid56_parity(sctx, map, scrub_dev,
3242 ppath, stripe_logical,
3250 if (btrfs_fs_incompat(fs_info, SKINNY_METADATA))
3251 key.type = BTRFS_METADATA_ITEM_KEY;
3253 key.type = BTRFS_EXTENT_ITEM_KEY;
3254 key.objectid = logical;
3255 key.offset = (u64)-1;
3257 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
3262 ret = btrfs_previous_extent_item(root, path, 0);
3266 /* there's no smaller item, so stick with the
3268 btrfs_release_path(path);
3269 ret = btrfs_search_slot(NULL, root, &key,
3281 slot = path->slots[0];
3282 if (slot >= btrfs_header_nritems(l)) {
3283 ret = btrfs_next_leaf(root, path);
3292 btrfs_item_key_to_cpu(l, &key, slot);
3294 if (key.type != BTRFS_EXTENT_ITEM_KEY &&
3295 key.type != BTRFS_METADATA_ITEM_KEY)
3298 if (key.type == BTRFS_METADATA_ITEM_KEY)
3299 bytes = fs_info->nodesize;
3303 if (key.objectid + bytes <= logical)
3306 if (key.objectid >= logical + map->stripe_len) {
3307 /* out of this device extent */
3308 if (key.objectid >= logic_end)
3313 extent = btrfs_item_ptr(l, slot,
3314 struct btrfs_extent_item);
3315 flags = btrfs_extent_flags(l, extent);
3316 generation = btrfs_extent_generation(l, extent);
3318 if ((flags & BTRFS_EXTENT_FLAG_TREE_BLOCK) &&
3319 (key.objectid < logical ||
3320 key.objectid + bytes >
3321 logical + map->stripe_len)) {
3323 "scrub: tree block %llu spanning stripes, ignored. logical=%llu",
3324 key.objectid, logical);
3325 spin_lock(&sctx->stat_lock);
3326 sctx->stat.uncorrectable_errors++;
3327 spin_unlock(&sctx->stat_lock);
3332 extent_logical = key.objectid;
3336 * trim extent to this stripe
3338 if (extent_logical < logical) {
3339 extent_len -= logical - extent_logical;
3340 extent_logical = logical;
3342 if (extent_logical + extent_len >
3343 logical + map->stripe_len) {
3344 extent_len = logical + map->stripe_len -
3348 extent_physical = extent_logical - logical + physical;
3349 extent_dev = scrub_dev;
3350 extent_mirror_num = mirror_num;
3352 scrub_remap_extent(fs_info, extent_logical,
3353 extent_len, &extent_physical,
3355 &extent_mirror_num);
3357 ret = btrfs_lookup_csums_range(csum_root,
3361 &sctx->csum_list, 1);
3365 ret = scrub_extent(sctx, extent_logical, extent_len,
3366 extent_physical, extent_dev, flags,
3367 generation, extent_mirror_num,
3368 extent_logical - logical + physical);
3370 scrub_free_csums(sctx);
3375 if (extent_logical + extent_len <
3376 key.objectid + bytes) {
3377 if (map->type & BTRFS_BLOCK_GROUP_RAID56_MASK) {
3379 * loop until we find next data stripe
3380 * or we have finished all stripes.
3383 physical += map->stripe_len;
3384 ret = get_raid56_logic_offset(physical,
3389 if (ret && physical < physical_end) {
3390 stripe_logical += base;
3391 stripe_end = stripe_logical +
3393 ret = scrub_raid56_parity(sctx,
3394 map, scrub_dev, ppath,
3402 physical += map->stripe_len;
3403 logical += increment;
3405 if (logical < key.objectid + bytes) {
3410 if (physical >= physical_end) {
3418 btrfs_release_path(path);
3420 logical += increment;
3421 physical += map->stripe_len;
3422 spin_lock(&sctx->stat_lock);
3424 sctx->stat.last_physical = map->stripes[num].physical +
3427 sctx->stat.last_physical = physical;
3428 spin_unlock(&sctx->stat_lock);
3433 /* push queued extents */
3435 mutex_lock(&sctx->wr_ctx.wr_lock);
3436 scrub_wr_submit(sctx);
3437 mutex_unlock(&sctx->wr_ctx.wr_lock);
3439 blk_finish_plug(&plug);
3440 btrfs_free_path(path);
3441 btrfs_free_path(ppath);
3442 return ret < 0 ? ret : 0;
3445 static noinline_for_stack int scrub_chunk(struct scrub_ctx *sctx,
3446 struct btrfs_device *scrub_dev,
3447 u64 chunk_offset, u64 length,
3449 struct btrfs_block_group_cache *cache,
3452 struct btrfs_fs_info *fs_info = sctx->fs_info;
3453 struct btrfs_mapping_tree *map_tree = &fs_info->mapping_tree;
3454 struct map_lookup *map;
3455 struct extent_map *em;
3459 read_lock(&map_tree->map_tree.lock);
3460 em = lookup_extent_mapping(&map_tree->map_tree, chunk_offset, 1);
3461 read_unlock(&map_tree->map_tree.lock);
3465 * Might have been an unused block group deleted by the cleaner
3466 * kthread or relocation.
3468 spin_lock(&cache->lock);
3469 if (!cache->removed)
3471 spin_unlock(&cache->lock);
3476 map = em->map_lookup;
3477 if (em->start != chunk_offset)
3480 if (em->len < length)
3483 for (i = 0; i < map->num_stripes; ++i) {
3484 if (map->stripes[i].dev->bdev == scrub_dev->bdev &&
3485 map->stripes[i].physical == dev_offset) {
3486 ret = scrub_stripe(sctx, map, scrub_dev, i,
3487 chunk_offset, length,
3494 free_extent_map(em);
3499 static noinline_for_stack
3500 int scrub_enumerate_chunks(struct scrub_ctx *sctx,
3501 struct btrfs_device *scrub_dev, u64 start, u64 end,
3504 struct btrfs_dev_extent *dev_extent = NULL;
3505 struct btrfs_path *path;
3506 struct btrfs_fs_info *fs_info = sctx->fs_info;
3507 struct btrfs_root *root = fs_info->dev_root;
3513 struct extent_buffer *l;
3514 struct btrfs_key key;
3515 struct btrfs_key found_key;
3516 struct btrfs_block_group_cache *cache;
3517 struct btrfs_dev_replace *dev_replace = &fs_info->dev_replace;
3519 path = btrfs_alloc_path();
3523 path->reada = READA_FORWARD;
3524 path->search_commit_root = 1;
3525 path->skip_locking = 1;
3527 key.objectid = scrub_dev->devid;
3529 key.type = BTRFS_DEV_EXTENT_KEY;
3532 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
3536 if (path->slots[0] >=
3537 btrfs_header_nritems(path->nodes[0])) {
3538 ret = btrfs_next_leaf(root, path);
3551 slot = path->slots[0];
3553 btrfs_item_key_to_cpu(l, &found_key, slot);
3555 if (found_key.objectid != scrub_dev->devid)
3558 if (found_key.type != BTRFS_DEV_EXTENT_KEY)
3561 if (found_key.offset >= end)
3564 if (found_key.offset < key.offset)
3567 dev_extent = btrfs_item_ptr(l, slot, struct btrfs_dev_extent);
3568 length = btrfs_dev_extent_length(l, dev_extent);
3570 if (found_key.offset + length <= start)
3573 chunk_offset = btrfs_dev_extent_chunk_offset(l, dev_extent);
3576 * get a reference on the corresponding block group to prevent
3577 * the chunk from going away while we scrub it
3579 cache = btrfs_lookup_block_group(fs_info, chunk_offset);
3581 /* some chunks are removed but not committed to disk yet,
3582 * continue scrubbing */
3587 * we need call btrfs_inc_block_group_ro() with scrubs_paused,
3588 * to avoid deadlock caused by:
3589 * btrfs_inc_block_group_ro()
3590 * -> btrfs_wait_for_commit()
3591 * -> btrfs_commit_transaction()
3592 * -> btrfs_scrub_pause()
3594 scrub_pause_on(fs_info);
3595 ret = btrfs_inc_block_group_ro(fs_info, cache);
3596 if (!ret && is_dev_replace) {
3598 * If we are doing a device replace wait for any tasks
3599 * that started dellaloc right before we set the block
3600 * group to RO mode, as they might have just allocated
3601 * an extent from it or decided they could do a nocow
3602 * write. And if any such tasks did that, wait for their
3603 * ordered extents to complete and then commit the
3604 * current transaction, so that we can later see the new
3605 * extent items in the extent tree - the ordered extents
3606 * create delayed data references (for cow writes) when
3607 * they complete, which will be run and insert the
3608 * corresponding extent items into the extent tree when
3609 * we commit the transaction they used when running
3610 * inode.c:btrfs_finish_ordered_io(). We later use
3611 * the commit root of the extent tree to find extents
3612 * to copy from the srcdev into the tgtdev, and we don't
3613 * want to miss any new extents.
3615 btrfs_wait_block_group_reservations(cache);
3616 btrfs_wait_nocow_writers(cache);
3617 ret = btrfs_wait_ordered_roots(fs_info, -1,
3618 cache->key.objectid,
3621 struct btrfs_trans_handle *trans;
3623 trans = btrfs_join_transaction(root);
3625 ret = PTR_ERR(trans);
3627 ret = btrfs_commit_transaction(trans);
3629 scrub_pause_off(fs_info);
3630 btrfs_put_block_group(cache);
3635 scrub_pause_off(fs_info);
3639 } else if (ret == -ENOSPC) {
3641 * btrfs_inc_block_group_ro return -ENOSPC when it
3642 * failed in creating new chunk for metadata.
3643 * It is not a problem for scrub/replace, because
3644 * metadata are always cowed, and our scrub paused
3645 * commit_transactions.
3650 "failed setting block group ro, ret=%d\n",
3652 btrfs_put_block_group(cache);
3656 btrfs_dev_replace_lock(&fs_info->dev_replace, 1);
3657 dev_replace->cursor_right = found_key.offset + length;
3658 dev_replace->cursor_left = found_key.offset;
3659 dev_replace->item_needs_writeback = 1;
3660 btrfs_dev_replace_unlock(&fs_info->dev_replace, 1);
3661 ret = scrub_chunk(sctx, scrub_dev, chunk_offset, length,
3662 found_key.offset, cache, is_dev_replace);
3665 * flush, submit all pending read and write bios, afterwards
3667 * Note that in the dev replace case, a read request causes
3668 * write requests that are submitted in the read completion
3669 * worker. Therefore in the current situation, it is required
3670 * that all write requests are flushed, so that all read and
3671 * write requests are really completed when bios_in_flight
3674 atomic_set(&sctx->wr_ctx.flush_all_writes, 1);
3676 mutex_lock(&sctx->wr_ctx.wr_lock);
3677 scrub_wr_submit(sctx);
3678 mutex_unlock(&sctx->wr_ctx.wr_lock);
3680 wait_event(sctx->list_wait,
3681 atomic_read(&sctx->bios_in_flight) == 0);
3683 scrub_pause_on(fs_info);
3686 * must be called before we decrease @scrub_paused.
3687 * make sure we don't block transaction commit while
3688 * we are waiting pending workers finished.
3690 wait_event(sctx->list_wait,
3691 atomic_read(&sctx->workers_pending) == 0);
3692 atomic_set(&sctx->wr_ctx.flush_all_writes, 0);
3694 scrub_pause_off(fs_info);
3696 btrfs_dev_replace_lock(&fs_info->dev_replace, 1);
3697 dev_replace->cursor_left = dev_replace->cursor_right;
3698 dev_replace->item_needs_writeback = 1;
3699 btrfs_dev_replace_unlock(&fs_info->dev_replace, 1);
3702 btrfs_dec_block_group_ro(cache);
3705 * We might have prevented the cleaner kthread from deleting
3706 * this block group if it was already unused because we raced
3707 * and set it to RO mode first. So add it back to the unused
3708 * list, otherwise it might not ever be deleted unless a manual
3709 * balance is triggered or it becomes used and unused again.
3711 spin_lock(&cache->lock);
3712 if (!cache->removed && !cache->ro && cache->reserved == 0 &&
3713 btrfs_block_group_used(&cache->item) == 0) {
3714 spin_unlock(&cache->lock);
3715 spin_lock(&fs_info->unused_bgs_lock);
3716 if (list_empty(&cache->bg_list)) {
3717 btrfs_get_block_group(cache);
3718 list_add_tail(&cache->bg_list,
3719 &fs_info->unused_bgs);
3721 spin_unlock(&fs_info->unused_bgs_lock);
3723 spin_unlock(&cache->lock);
3726 btrfs_put_block_group(cache);
3729 if (is_dev_replace &&
3730 atomic64_read(&dev_replace->num_write_errors) > 0) {
3734 if (sctx->stat.malloc_errors > 0) {
3739 key.offset = found_key.offset + length;
3740 btrfs_release_path(path);
3743 btrfs_free_path(path);
3748 static noinline_for_stack int scrub_supers(struct scrub_ctx *sctx,
3749 struct btrfs_device *scrub_dev)
3755 struct btrfs_fs_info *fs_info = sctx->fs_info;
3757 if (test_bit(BTRFS_FS_STATE_ERROR, &fs_info->fs_state))
3760 /* Seed devices of a new filesystem has their own generation. */
3761 if (scrub_dev->fs_devices != fs_info->fs_devices)
3762 gen = scrub_dev->generation;
3764 gen = fs_info->last_trans_committed;
3766 for (i = 0; i < BTRFS_SUPER_MIRROR_MAX; i++) {
3767 bytenr = btrfs_sb_offset(i);
3768 if (bytenr + BTRFS_SUPER_INFO_SIZE >
3769 scrub_dev->commit_total_bytes)
3772 ret = scrub_pages(sctx, bytenr, BTRFS_SUPER_INFO_SIZE, bytenr,
3773 scrub_dev, BTRFS_EXTENT_FLAG_SUPER, gen, i,
3778 wait_event(sctx->list_wait, atomic_read(&sctx->bios_in_flight) == 0);
3784 * get a reference count on fs_info->scrub_workers. start worker if necessary
3786 static noinline_for_stack int scrub_workers_get(struct btrfs_fs_info *fs_info,
3789 unsigned int flags = WQ_FREEZABLE | WQ_UNBOUND;
3790 int max_active = fs_info->thread_pool_size;
3792 if (fs_info->scrub_workers_refcnt == 0) {
3794 fs_info->scrub_workers =
3795 btrfs_alloc_workqueue(fs_info, "scrub", flags,
3798 fs_info->scrub_workers =
3799 btrfs_alloc_workqueue(fs_info, "scrub", flags,
3801 if (!fs_info->scrub_workers)
3802 goto fail_scrub_workers;
3804 fs_info->scrub_wr_completion_workers =
3805 btrfs_alloc_workqueue(fs_info, "scrubwrc", flags,
3807 if (!fs_info->scrub_wr_completion_workers)
3808 goto fail_scrub_wr_completion_workers;
3810 fs_info->scrub_nocow_workers =
3811 btrfs_alloc_workqueue(fs_info, "scrubnc", flags, 1, 0);
3812 if (!fs_info->scrub_nocow_workers)
3813 goto fail_scrub_nocow_workers;
3814 fs_info->scrub_parity_workers =
3815 btrfs_alloc_workqueue(fs_info, "scrubparity", flags,
3817 if (!fs_info->scrub_parity_workers)
3818 goto fail_scrub_parity_workers;
3820 ++fs_info->scrub_workers_refcnt;
3823 fail_scrub_parity_workers:
3824 btrfs_destroy_workqueue(fs_info->scrub_nocow_workers);
3825 fail_scrub_nocow_workers:
3826 btrfs_destroy_workqueue(fs_info->scrub_wr_completion_workers);
3827 fail_scrub_wr_completion_workers:
3828 btrfs_destroy_workqueue(fs_info->scrub_workers);
3833 static noinline_for_stack void scrub_workers_put(struct btrfs_fs_info *fs_info)
3835 if (--fs_info->scrub_workers_refcnt == 0) {
3836 btrfs_destroy_workqueue(fs_info->scrub_workers);
3837 btrfs_destroy_workqueue(fs_info->scrub_wr_completion_workers);
3838 btrfs_destroy_workqueue(fs_info->scrub_nocow_workers);
3839 btrfs_destroy_workqueue(fs_info->scrub_parity_workers);
3841 WARN_ON(fs_info->scrub_workers_refcnt < 0);
3844 int btrfs_scrub_dev(struct btrfs_fs_info *fs_info, u64 devid, u64 start,
3845 u64 end, struct btrfs_scrub_progress *progress,
3846 int readonly, int is_dev_replace)
3848 struct scrub_ctx *sctx;
3850 struct btrfs_device *dev;
3851 struct rcu_string *name;
3853 if (btrfs_fs_closing(fs_info))
3856 if (fs_info->nodesize > BTRFS_STRIPE_LEN) {
3858 * in this case scrub is unable to calculate the checksum
3859 * the way scrub is implemented. Do not handle this
3860 * situation at all because it won't ever happen.
3863 "scrub: size assumption nodesize <= BTRFS_STRIPE_LEN (%d <= %d) fails",
3869 if (fs_info->sectorsize != PAGE_SIZE) {
3870 /* not supported for data w/o checksums */
3871 btrfs_err_rl(fs_info,
3872 "scrub: size assumption sectorsize != PAGE_SIZE (%d != %lu) fails",
3873 fs_info->sectorsize, PAGE_SIZE);
3877 if (fs_info->nodesize >
3878 PAGE_SIZE * SCRUB_MAX_PAGES_PER_BLOCK ||
3879 fs_info->sectorsize > PAGE_SIZE * SCRUB_MAX_PAGES_PER_BLOCK) {
3881 * would exhaust the array bounds of pagev member in
3882 * struct scrub_block
3885 "scrub: size assumption nodesize and sectorsize <= SCRUB_MAX_PAGES_PER_BLOCK (%d <= %d && %d <= %d) fails",
3887 SCRUB_MAX_PAGES_PER_BLOCK,
3888 fs_info->sectorsize,
3889 SCRUB_MAX_PAGES_PER_BLOCK);
3894 mutex_lock(&fs_info->fs_devices->device_list_mutex);
3895 dev = btrfs_find_device(fs_info, devid, NULL, NULL);
3896 if (!dev || (dev->missing && !is_dev_replace)) {
3897 mutex_unlock(&fs_info->fs_devices->device_list_mutex);
3901 if (!is_dev_replace && !readonly && !dev->writeable) {
3902 mutex_unlock(&fs_info->fs_devices->device_list_mutex);
3904 name = rcu_dereference(dev->name);
3905 btrfs_err(fs_info, "scrub: device %s is not writable",
3911 mutex_lock(&fs_info->scrub_lock);
3912 if (!dev->in_fs_metadata || dev->is_tgtdev_for_dev_replace) {
3913 mutex_unlock(&fs_info->scrub_lock);
3914 mutex_unlock(&fs_info->fs_devices->device_list_mutex);
3918 btrfs_dev_replace_lock(&fs_info->dev_replace, 0);
3919 if (dev->scrub_device ||
3921 btrfs_dev_replace_is_ongoing(&fs_info->dev_replace))) {
3922 btrfs_dev_replace_unlock(&fs_info->dev_replace, 0);
3923 mutex_unlock(&fs_info->scrub_lock);
3924 mutex_unlock(&fs_info->fs_devices->device_list_mutex);
3925 return -EINPROGRESS;
3927 btrfs_dev_replace_unlock(&fs_info->dev_replace, 0);
3929 ret = scrub_workers_get(fs_info, is_dev_replace);
3931 mutex_unlock(&fs_info->scrub_lock);
3932 mutex_unlock(&fs_info->fs_devices->device_list_mutex);
3936 sctx = scrub_setup_ctx(dev, is_dev_replace);
3938 mutex_unlock(&fs_info->scrub_lock);
3939 mutex_unlock(&fs_info->fs_devices->device_list_mutex);
3940 scrub_workers_put(fs_info);
3941 return PTR_ERR(sctx);
3943 sctx->readonly = readonly;
3944 dev->scrub_device = sctx;
3945 mutex_unlock(&fs_info->fs_devices->device_list_mutex);
3948 * checking @scrub_pause_req here, we can avoid
3949 * race between committing transaction and scrubbing.
3951 __scrub_blocked_if_needed(fs_info);
3952 atomic_inc(&fs_info->scrubs_running);
3953 mutex_unlock(&fs_info->scrub_lock);
3955 if (!is_dev_replace) {
3957 * by holding device list mutex, we can
3958 * kick off writing super in log tree sync.
3960 mutex_lock(&fs_info->fs_devices->device_list_mutex);
3961 ret = scrub_supers(sctx, dev);
3962 mutex_unlock(&fs_info->fs_devices->device_list_mutex);
3966 ret = scrub_enumerate_chunks(sctx, dev, start, end,
3969 wait_event(sctx->list_wait, atomic_read(&sctx->bios_in_flight) == 0);
3970 atomic_dec(&fs_info->scrubs_running);
3971 wake_up(&fs_info->scrub_pause_wait);
3973 wait_event(sctx->list_wait, atomic_read(&sctx->workers_pending) == 0);
3976 memcpy(progress, &sctx->stat, sizeof(*progress));
3978 mutex_lock(&fs_info->scrub_lock);
3979 dev->scrub_device = NULL;
3980 scrub_workers_put(fs_info);
3981 mutex_unlock(&fs_info->scrub_lock);
3983 scrub_put_ctx(sctx);
3988 void btrfs_scrub_pause(struct btrfs_fs_info *fs_info)
3990 mutex_lock(&fs_info->scrub_lock);
3991 atomic_inc(&fs_info->scrub_pause_req);
3992 while (atomic_read(&fs_info->scrubs_paused) !=
3993 atomic_read(&fs_info->scrubs_running)) {
3994 mutex_unlock(&fs_info->scrub_lock);
3995 wait_event(fs_info->scrub_pause_wait,
3996 atomic_read(&fs_info->scrubs_paused) ==
3997 atomic_read(&fs_info->scrubs_running));
3998 mutex_lock(&fs_info->scrub_lock);
4000 mutex_unlock(&fs_info->scrub_lock);
4003 void btrfs_scrub_continue(struct btrfs_fs_info *fs_info)
4005 atomic_dec(&fs_info->scrub_pause_req);
4006 wake_up(&fs_info->scrub_pause_wait);
4009 int btrfs_scrub_cancel(struct btrfs_fs_info *fs_info)
4011 mutex_lock(&fs_info->scrub_lock);
4012 if (!atomic_read(&fs_info->scrubs_running)) {
4013 mutex_unlock(&fs_info->scrub_lock);
4017 atomic_inc(&fs_info->scrub_cancel_req);
4018 while (atomic_read(&fs_info->scrubs_running)) {
4019 mutex_unlock(&fs_info->scrub_lock);
4020 wait_event(fs_info->scrub_pause_wait,
4021 atomic_read(&fs_info->scrubs_running) == 0);
4022 mutex_lock(&fs_info->scrub_lock);
4024 atomic_dec(&fs_info->scrub_cancel_req);
4025 mutex_unlock(&fs_info->scrub_lock);
4030 int btrfs_scrub_cancel_dev(struct btrfs_fs_info *fs_info,
4031 struct btrfs_device *dev)
4033 struct scrub_ctx *sctx;
4035 mutex_lock(&fs_info->scrub_lock);
4036 sctx = dev->scrub_device;
4038 mutex_unlock(&fs_info->scrub_lock);
4041 atomic_inc(&sctx->cancel_req);
4042 while (dev->scrub_device) {
4043 mutex_unlock(&fs_info->scrub_lock);
4044 wait_event(fs_info->scrub_pause_wait,
4045 dev->scrub_device == NULL);
4046 mutex_lock(&fs_info->scrub_lock);
4048 mutex_unlock(&fs_info->scrub_lock);
4053 int btrfs_scrub_progress(struct btrfs_fs_info *fs_info, u64 devid,
4054 struct btrfs_scrub_progress *progress)
4056 struct btrfs_device *dev;
4057 struct scrub_ctx *sctx = NULL;
4059 mutex_lock(&fs_info->fs_devices->device_list_mutex);
4060 dev = btrfs_find_device(fs_info, devid, NULL, NULL);
4062 sctx = dev->scrub_device;
4064 memcpy(progress, &sctx->stat, sizeof(*progress));
4065 mutex_unlock(&fs_info->fs_devices->device_list_mutex);
4067 return dev ? (sctx ? 0 : -ENOTCONN) : -ENODEV;
4070 static void scrub_remap_extent(struct btrfs_fs_info *fs_info,
4071 u64 extent_logical, u64 extent_len,
4072 u64 *extent_physical,
4073 struct btrfs_device **extent_dev,
4074 int *extent_mirror_num)
4077 struct btrfs_bio *bbio = NULL;
4080 mapped_length = extent_len;
4081 ret = btrfs_map_block(fs_info, BTRFS_MAP_READ, extent_logical,
4082 &mapped_length, &bbio, 0);
4083 if (ret || !bbio || mapped_length < extent_len ||
4084 !bbio->stripes[0].dev->bdev) {
4085 btrfs_put_bbio(bbio);
4089 *extent_physical = bbio->stripes[0].physical;
4090 *extent_mirror_num = bbio->mirror_num;
4091 *extent_dev = bbio->stripes[0].dev;
4092 btrfs_put_bbio(bbio);
4095 static int scrub_setup_wr_ctx(struct scrub_wr_ctx *wr_ctx,
4096 struct btrfs_device *dev,
4099 WARN_ON(wr_ctx->wr_curr_bio != NULL);
4101 mutex_init(&wr_ctx->wr_lock);
4102 wr_ctx->wr_curr_bio = NULL;
4103 if (!is_dev_replace)
4106 WARN_ON(!dev->bdev);
4107 wr_ctx->pages_per_wr_bio = SCRUB_PAGES_PER_WR_BIO;
4108 wr_ctx->tgtdev = dev;
4109 atomic_set(&wr_ctx->flush_all_writes, 0);
4113 static void scrub_free_wr_ctx(struct scrub_wr_ctx *wr_ctx)
4115 mutex_lock(&wr_ctx->wr_lock);
4116 kfree(wr_ctx->wr_curr_bio);
4117 wr_ctx->wr_curr_bio = NULL;
4118 mutex_unlock(&wr_ctx->wr_lock);
4121 static int copy_nocow_pages(struct scrub_ctx *sctx, u64 logical, u64 len,
4122 int mirror_num, u64 physical_for_dev_replace)
4124 struct scrub_copy_nocow_ctx *nocow_ctx;
4125 struct btrfs_fs_info *fs_info = sctx->fs_info;
4127 nocow_ctx = kzalloc(sizeof(*nocow_ctx), GFP_NOFS);
4129 spin_lock(&sctx->stat_lock);
4130 sctx->stat.malloc_errors++;
4131 spin_unlock(&sctx->stat_lock);
4135 scrub_pending_trans_workers_inc(sctx);
4137 nocow_ctx->sctx = sctx;
4138 nocow_ctx->logical = logical;
4139 nocow_ctx->len = len;
4140 nocow_ctx->mirror_num = mirror_num;
4141 nocow_ctx->physical_for_dev_replace = physical_for_dev_replace;
4142 btrfs_init_work(&nocow_ctx->work, btrfs_scrubnc_helper,
4143 copy_nocow_pages_worker, NULL, NULL);
4144 INIT_LIST_HEAD(&nocow_ctx->inodes);
4145 btrfs_queue_work(fs_info->scrub_nocow_workers,
4151 static int record_inode_for_nocow(u64 inum, u64 offset, u64 root, void *ctx)
4153 struct scrub_copy_nocow_ctx *nocow_ctx = ctx;
4154 struct scrub_nocow_inode *nocow_inode;
4156 nocow_inode = kzalloc(sizeof(*nocow_inode), GFP_NOFS);
4159 nocow_inode->inum = inum;
4160 nocow_inode->offset = offset;
4161 nocow_inode->root = root;
4162 list_add_tail(&nocow_inode->list, &nocow_ctx->inodes);
4166 #define COPY_COMPLETE 1
4168 static void copy_nocow_pages_worker(struct btrfs_work *work)
4170 struct scrub_copy_nocow_ctx *nocow_ctx =
4171 container_of(work, struct scrub_copy_nocow_ctx, work);
4172 struct scrub_ctx *sctx = nocow_ctx->sctx;
4173 struct btrfs_fs_info *fs_info = sctx->fs_info;
4174 struct btrfs_root *root = fs_info->extent_root;
4175 u64 logical = nocow_ctx->logical;
4176 u64 len = nocow_ctx->len;
4177 int mirror_num = nocow_ctx->mirror_num;
4178 u64 physical_for_dev_replace = nocow_ctx->physical_for_dev_replace;
4180 struct btrfs_trans_handle *trans = NULL;
4181 struct btrfs_path *path;
4182 int not_written = 0;
4184 path = btrfs_alloc_path();
4186 spin_lock(&sctx->stat_lock);
4187 sctx->stat.malloc_errors++;
4188 spin_unlock(&sctx->stat_lock);
4193 trans = btrfs_join_transaction(root);
4194 if (IS_ERR(trans)) {
4199 ret = iterate_inodes_from_logical(logical, fs_info, path,
4200 record_inode_for_nocow, nocow_ctx);
4201 if (ret != 0 && ret != -ENOENT) {
4203 "iterate_inodes_from_logical() failed: log %llu, phys %llu, len %llu, mir %u, ret %d",
4204 logical, physical_for_dev_replace, len, mirror_num,
4210 btrfs_end_transaction(trans);
4212 while (!list_empty(&nocow_ctx->inodes)) {
4213 struct scrub_nocow_inode *entry;
4214 entry = list_first_entry(&nocow_ctx->inodes,
4215 struct scrub_nocow_inode,
4217 list_del_init(&entry->list);
4218 ret = copy_nocow_pages_for_inode(entry->inum, entry->offset,
4219 entry->root, nocow_ctx);
4221 if (ret == COPY_COMPLETE) {
4229 while (!list_empty(&nocow_ctx->inodes)) {
4230 struct scrub_nocow_inode *entry;
4231 entry = list_first_entry(&nocow_ctx->inodes,
4232 struct scrub_nocow_inode,
4234 list_del_init(&entry->list);
4237 if (trans && !IS_ERR(trans))
4238 btrfs_end_transaction(trans);
4240 btrfs_dev_replace_stats_inc(&fs_info->dev_replace.
4241 num_uncorrectable_read_errors);
4243 btrfs_free_path(path);
4246 scrub_pending_trans_workers_dec(sctx);
4249 static int check_extent_to_block(struct btrfs_inode *inode, u64 start, u64 len,
4252 struct extent_state *cached_state = NULL;
4253 struct btrfs_ordered_extent *ordered;
4254 struct extent_io_tree *io_tree;
4255 struct extent_map *em;
4256 u64 lockstart = start, lockend = start + len - 1;
4259 io_tree = &inode->io_tree;
4261 lock_extent_bits(io_tree, lockstart, lockend, &cached_state);
4262 ordered = btrfs_lookup_ordered_range(inode, lockstart, len);
4264 btrfs_put_ordered_extent(ordered);
4269 em = btrfs_get_extent(inode, NULL, 0, start, len, 0);
4276 * This extent does not actually cover the logical extent anymore,
4277 * move on to the next inode.
4279 if (em->block_start > logical ||
4280 em->block_start + em->block_len < logical + len) {
4281 free_extent_map(em);
4285 free_extent_map(em);
4288 unlock_extent_cached(io_tree, lockstart, lockend, &cached_state,
4293 static int copy_nocow_pages_for_inode(u64 inum, u64 offset, u64 root,
4294 struct scrub_copy_nocow_ctx *nocow_ctx)
4296 struct btrfs_fs_info *fs_info = nocow_ctx->sctx->fs_info;
4297 struct btrfs_key key;
4298 struct inode *inode;
4300 struct btrfs_root *local_root;
4301 struct extent_io_tree *io_tree;
4302 u64 physical_for_dev_replace;
4303 u64 nocow_ctx_logical;
4304 u64 len = nocow_ctx->len;
4305 unsigned long index;
4310 key.objectid = root;
4311 key.type = BTRFS_ROOT_ITEM_KEY;
4312 key.offset = (u64)-1;
4314 srcu_index = srcu_read_lock(&fs_info->subvol_srcu);
4316 local_root = btrfs_read_fs_root_no_name(fs_info, &key);
4317 if (IS_ERR(local_root)) {
4318 srcu_read_unlock(&fs_info->subvol_srcu, srcu_index);
4319 return PTR_ERR(local_root);
4322 key.type = BTRFS_INODE_ITEM_KEY;
4323 key.objectid = inum;
4325 inode = btrfs_iget(fs_info->sb, &key, local_root, NULL);
4326 srcu_read_unlock(&fs_info->subvol_srcu, srcu_index);
4328 return PTR_ERR(inode);
4330 /* Avoid truncate/dio/punch hole.. */
4332 inode_dio_wait(inode);
4334 physical_for_dev_replace = nocow_ctx->physical_for_dev_replace;
4335 io_tree = &BTRFS_I(inode)->io_tree;
4336 nocow_ctx_logical = nocow_ctx->logical;
4338 ret = check_extent_to_block(BTRFS_I(inode), offset, len,
4341 ret = ret > 0 ? 0 : ret;
4345 while (len >= PAGE_SIZE) {
4346 index = offset >> PAGE_SHIFT;
4348 page = find_or_create_page(inode->i_mapping, index, GFP_NOFS);
4350 btrfs_err(fs_info, "find_or_create_page() failed");
4355 if (PageUptodate(page)) {
4356 if (PageDirty(page))
4359 ClearPageError(page);
4360 err = extent_read_full_page(io_tree, page,
4362 nocow_ctx->mirror_num);
4370 * If the page has been remove from the page cache,
4371 * the data on it is meaningless, because it may be
4372 * old one, the new data may be written into the new
4373 * page in the page cache.
4375 if (page->mapping != inode->i_mapping) {
4380 if (!PageUptodate(page)) {
4386 ret = check_extent_to_block(BTRFS_I(inode), offset, len,
4389 ret = ret > 0 ? 0 : ret;
4393 err = write_page_nocow(nocow_ctx->sctx,
4394 physical_for_dev_replace, page);
4404 offset += PAGE_SIZE;
4405 physical_for_dev_replace += PAGE_SIZE;
4406 nocow_ctx_logical += PAGE_SIZE;
4409 ret = COPY_COMPLETE;
4411 inode_unlock(inode);
4416 static int write_page_nocow(struct scrub_ctx *sctx,
4417 u64 physical_for_dev_replace, struct page *page)
4420 struct btrfs_device *dev;
4423 dev = sctx->wr_ctx.tgtdev;
4427 btrfs_warn_rl(dev->fs_info,
4428 "scrub write_page_nocow(bdev == NULL) is unexpected");
4431 bio = btrfs_io_bio_alloc(GFP_NOFS, 1);
4433 spin_lock(&sctx->stat_lock);
4434 sctx->stat.malloc_errors++;
4435 spin_unlock(&sctx->stat_lock);
4438 bio->bi_iter.bi_size = 0;
4439 bio->bi_iter.bi_sector = physical_for_dev_replace >> 9;
4440 bio->bi_bdev = dev->bdev;
4441 bio->bi_opf = REQ_OP_WRITE | REQ_SYNC;
4442 ret = bio_add_page(bio, page, PAGE_SIZE, 0);
4443 if (ret != PAGE_SIZE) {
4446 btrfs_dev_stat_inc_and_print(dev, BTRFS_DEV_STAT_WRITE_ERRS);
4450 if (btrfsic_submit_bio_wait(bio))
4451 goto leave_with_eio;