]> git.karo-electronics.de Git - linux-beck.git/blobdiff - drivers/md/raid10.c
md/raid10: fix conversion from RAID0 to RAID10
[linux-beck.git] / drivers / md / raid10.c
index 32e282f4c83c3aa2bfe7e911327b4172cf8dc22d..b8d76b1fba6457dd722410d8d82d752c67f84bba 100644 (file)
@@ -674,7 +674,7 @@ static sector_t raid10_find_virt(struct r10conf *conf, sector_t sector, int dev)
 
 /**
  *     raid10_mergeable_bvec -- tell bio layer if a two requests can be merged
- *     @q: request queue
+ *     @mddev: the md device
  *     @bvm: properties of new bio
  *     @biovec: the request that could be merged to it.
  *
@@ -682,11 +682,10 @@ static sector_t raid10_find_virt(struct r10conf *conf, sector_t sector, int dev)
  *     This requires checking for end-of-chunk if near_copies != raid_disks,
  *     and for subordinate merge_bvec_fns if merge_check_needed.
  */
-static int raid10_mergeable_bvec(struct request_queue *q,
+static int raid10_mergeable_bvec(struct mddev *mddev,
                                 struct bvec_merge_data *bvm,
                                 struct bio_vec *biovec)
 {
-       struct mddev *mddev = q->queuedata;
        struct r10conf *conf = mddev->private;
        sector_t sector = bvm->bi_sector + get_start_sect(bvm->bi_bdev);
        int max;
@@ -910,7 +909,7 @@ retry:
        return rdev;
 }
 
-int md_raid10_congested(struct mddev *mddev, int bits)
+static int raid10_congested(struct mddev *mddev, int bits)
 {
        struct r10conf *conf = mddev->private;
        int i, ret = 0;
@@ -934,15 +933,6 @@ int md_raid10_congested(struct mddev *mddev, int bits)
        rcu_read_unlock();
        return ret;
 }
-EXPORT_SYMBOL_GPL(md_raid10_congested);
-
-static int raid10_congested(void *data, int bits)
-{
-       struct mddev *mddev = data;
-
-       return mddev_congested(mddev, bits) ||
-               md_raid10_congested(mddev, bits);
-}
 
 static void flush_pending_writes(struct r10conf *conf)
 {
@@ -3757,8 +3747,6 @@ static int run(struct mddev *mddev)
        if (mddev->queue) {
                int stripe = conf->geo.raid_disks *
                        ((mddev->chunk_sectors << 9) / PAGE_SIZE);
-               mddev->queue->backing_dev_info.congested_fn = raid10_congested;
-               mddev->queue->backing_dev_info.congested_data = mddev;
 
                /* Calculate max read-ahead size.
                 * We need to readahead at least twice a whole stripe....
@@ -3767,7 +3755,6 @@ static int run(struct mddev *mddev)
                stripe /= conf->geo.near_copies;
                if (mddev->queue->backing_dev_info.ra_pages < 2 * stripe)
                        mddev->queue->backing_dev_info.ra_pages = 2 * stripe;
-               blk_queue_merge_bvec(mddev->queue, raid10_mergeable_bvec);
        }
 
        if (md_integrity_register(mddev))
@@ -3811,17 +3798,9 @@ out:
        return -EIO;
 }
 
-static int stop(struct mddev *mddev)
+static void raid10_free(struct mddev *mddev, void *priv)
 {
-       struct r10conf *conf = mddev->private;
-
-       raise_barrier(conf, 0);
-       lower_barrier(conf);
-
-       md_unregister_thread(&mddev->thread);
-       if (mddev->queue)
-               /* the unplug fn references 'conf'*/
-               blk_sync_queue(mddev->queue);
+       struct r10conf *conf = priv;
 
        if (conf->r10bio_pool)
                mempool_destroy(conf->r10bio_pool);
@@ -3830,8 +3809,6 @@ static int stop(struct mddev *mddev)
        kfree(conf->mirrors_old);
        kfree(conf->mirrors_new);
        kfree(conf);
-       mddev->private = NULL;
-       return 0;
 }
 
 static void raid10_quiesce(struct mddev *mddev, int state)
@@ -3895,7 +3872,7 @@ static int raid10_resize(struct mddev *mddev, sector_t sectors)
        return 0;
 }
 
-static void *raid10_takeover_raid0(struct mddev *mddev)
+static void *raid10_takeover_raid0(struct mddev *mddev, sector_t size, int devs)
 {
        struct md_rdev *rdev;
        struct r10conf *conf;
@@ -3905,6 +3882,7 @@ static void *raid10_takeover_raid0(struct mddev *mddev)
                       mdname(mddev));
                return ERR_PTR(-EINVAL);
        }
+       sector_div(size, devs);
 
        /* Set new parameters */
        mddev->new_level = 10;
@@ -3915,12 +3893,15 @@ static void *raid10_takeover_raid0(struct mddev *mddev)
        mddev->raid_disks *= 2;
        /* make sure it will be not marked as dirty */
        mddev->recovery_cp = MaxSector;
+       mddev->dev_sectors = size;
 
        conf = setup_conf(mddev);
        if (!IS_ERR(conf)) {
                rdev_for_each(rdev, mddev)
-                       if (rdev->raid_disk >= 0)
+                       if (rdev->raid_disk >= 0) {
                                rdev->new_raid_disk = rdev->raid_disk * 2;
+                               rdev->sectors = size;
+                       }
                conf->barrier = 1;
        }
 
@@ -3943,7 +3924,9 @@ static void *raid10_takeover(struct mddev *mddev)
                               mdname(mddev));
                        return ERR_PTR(-EINVAL);
                }
-               return raid10_takeover_raid0(mddev);
+               return raid10_takeover_raid0(mddev,
+                       raid0_conf->strip_zone->zone_end,
+                       raid0_conf->strip_zone->nb_dev);
        }
        return ERR_PTR(-EINVAL);
 }
@@ -4713,7 +4696,7 @@ static struct md_personality raid10_personality =
        .owner          = THIS_MODULE,
        .make_request   = make_request,
        .run            = run,
-       .stop           = stop,
+       .free           = raid10_free,
        .status         = status,
        .error_handler  = error,
        .hot_add_disk   = raid10_add_disk,
@@ -4727,6 +4710,8 @@ static struct md_personality raid10_personality =
        .check_reshape  = raid10_check_reshape,
        .start_reshape  = raid10_start_reshape,
        .finish_reshape = raid10_finish_reshape,
+       .congested      = raid10_congested,
+       .mergeable_bvec = raid10_mergeable_bvec,
 };
 
 static int __init raid_init(void)