static noinline void finish_parity_scrub(struct btrfs_raid_bio *rbio,
int need_check)
{
+ struct btrfs_bio *bbio = rbio->bbio;
void *pointers[rbio->real_stripes];
+ DECLARE_BITMAP(pbitmap, rbio->stripe_npages);
int nr_data = rbio->nr_data;
int stripe;
int pagenr;
struct page *q_page = NULL;
struct bio_list bio_list;
struct bio *bio;
+ int is_replace = 0;
int ret;
bio_list_init(&bio_list);
BUG();
}
+ if (bbio->num_tgtdevs && bbio->tgtdev_map[rbio->scrubp]) {
+ is_replace = 1;
+ bitmap_copy(pbitmap, rbio->dbitmap, rbio->stripe_npages);
+ }
+
/*
* Because the higher layers(scrubber) are unlikely to
* use this area of the disk again soon, so don't cache
goto cleanup;
}
+ if (!is_replace)
+ goto submit_write;
+
+ for_each_set_bit(pagenr, pbitmap, rbio->stripe_npages) {
+ struct page *page;
+
+ page = rbio_stripe_page(rbio, rbio->scrubp, pagenr);
+ ret = rbio_add_io_page(rbio, &bio_list, page,
+ bbio->tgtdev_map[rbio->scrubp],
+ pagenr, rbio->stripe_len);
+ if (ret)
+ goto cleanup;
+ }
+
+submit_write:
nr_data = bio_list_size(&bio_list);
if (!nr_data) {
/* Every parity is right */
goto out;
length = sparity->logic_end - sparity->logic_start + 1;
- ret = btrfs_map_sblock(sctx->dev_root->fs_info, REQ_GET_READ_MIRRORS,
+ ret = btrfs_map_sblock(sctx->dev_root->fs_info, WRITE,
sparity->logic_start,
&length, &bbio, 0, &raid_map);
if (ret || !bbio || !raid_map)