]> git.karo-electronics.de Git - karo-tx-linux.git/blobdiff - drivers/md/raid5.c
md: fix some bugs with growing raid5/raid6 arrays.
[karo-tx-linux.git] / drivers / md / raid5.c
index 0b66afef2d82009be4f54c34880df588a8a22b61..4d63773ee73a634fe31d72eb04edad4d8942f8e2 100644 (file)
@@ -289,7 +289,7 @@ static struct stripe_head *__find_stripe(raid5_conf_t *conf, sector_t sector, in
 }
 
 static void unplug_slaves(mddev_t *mddev);
-static void raid5_unplug_device(request_queue_t *q);
+static void raid5_unplug_device(struct request_queue *q);
 
 static struct stripe_head *get_active_stripe(raid5_conf_t *conf, sector_t sector, int disks,
                                             int pd_idx, int noblock)
@@ -493,12 +493,12 @@ async_copy_data(int frombio, struct bio *bio, struct page *page,
                        if (frombio)
                                tx = async_memcpy(page, bio_page, page_offset,
                                        b_offset, clen,
-                                       ASYNC_TX_DEP_ACK | ASYNC_TX_KMAP_SRC,
+                                       ASYNC_TX_DEP_ACK,
                                        tx, NULL, NULL);
                        else
                                tx = async_memcpy(bio_page, page, b_offset,
                                        page_offset, clen,
-                                       ASYNC_TX_DEP_ACK | ASYNC_TX_KMAP_DST,
+                                       ASYNC_TX_DEP_ACK,
                                        tx, NULL, NULL);
                }
                if (clen < len) /* hit end of page */
@@ -951,7 +951,7 @@ static int grow_stripes(raid5_conf_t *conf, int num)
        conf->active_name = 0;
        sc = kmem_cache_create(conf->cache_name[conf->active_name],
                               sizeof(struct stripe_head)+(devs-1)*sizeof(struct r5dev),
-                              0, 0, NULL, NULL);
+                              0, 0, NULL);
        if (!sc)
                return 1;
        conf->slab_cache = sc;
@@ -1003,7 +1003,7 @@ static int resize_stripes(raid5_conf_t *conf, int newsize)
        /* Step 1 */
        sc = kmem_cache_create(conf->cache_name[1-conf->active_name],
                               sizeof(struct stripe_head)+(newsize-1)*sizeof(struct r5dev),
-                              0, 0, NULL, NULL);
+                              0, 0, NULL);
        if (!sc)
                return -ENOMEM;
 
@@ -2541,7 +2541,7 @@ static void handle_stripe_expansion(raid5_conf_t *conf, struct stripe_head *sh,
        struct dma_async_tx_descriptor *tx = NULL;
        clear_bit(STRIPE_EXPAND_SOURCE, &sh->state);
        for (i = 0; i < sh->disks; i++)
-               if (i != sh->pd_idx && (r6s && i != r6s->qd_idx)) {
+               if (i != sh->pd_idx && (!r6s || i != r6s->qd_idx)) {
                        int dd_idx, pd_idx, j;
                        struct stripe_head *sh2;
 
@@ -2574,7 +2574,8 @@ static void handle_stripe_expansion(raid5_conf_t *conf, struct stripe_head *sh,
                        set_bit(R5_UPTODATE, &sh2->dev[dd_idx].flags);
                        for (j = 0; j < conf->raid_disks; j++)
                                if (j != sh2->pd_idx &&
-                                   (r6s && j != r6s->qd_idx) &&
+                                   (!r6s || j != raid6_next_disk(sh2->pd_idx,
+                                                                sh2->disks)) &&
                                    !test_bit(R5_Expanded, &sh2->dev[j].flags))
                                        break;
                        if (j == conf->raid_disks) {
@@ -2583,12 +2584,12 @@ static void handle_stripe_expansion(raid5_conf_t *conf, struct stripe_head *sh,
                        }
                        release_stripe(sh2);
 
-                       /* done submitting copies, wait for them to complete */
-                       if (i + 1 >= sh->disks) {
-                               async_tx_ack(tx);
-                               dma_wait_for_async_tx(tx);
-                       }
                }
+       /* done submitting copies, wait for them to complete */
+       if (tx) {
+               async_tx_ack(tx);
+               dma_wait_for_async_tx(tx);
+       }
 }
 
 /*
@@ -2855,7 +2856,7 @@ static void handle_stripe5(struct stripe_head *sh)
                sh->disks = conf->raid_disks;
                sh->pd_idx = stripe_to_pdidx(sh->sector, conf,
                        conf->raid_disks);
-               s.locked += handle_write_operations5(sh, 0, 1);
+               s.locked += handle_write_operations5(sh, 1, 1);
        } else if (s.expanded &&
                !test_bit(STRIPE_OP_POSTXOR, &sh->ops.pending)) {
                clear_bit(STRIPE_EXPAND_READY, &sh->state);
@@ -3182,7 +3183,7 @@ static void unplug_slaves(mddev_t *mddev)
        for (i=0; i<mddev->raid_disks; i++) {
                mdk_rdev_t *rdev = rcu_dereference(conf->disks[i].rdev);
                if (rdev && !test_bit(Faulty, &rdev->flags) && atomic_read(&rdev->nr_pending)) {
-                       request_queue_t *r_queue = bdev_get_queue(rdev->bdev);
+                       struct request_queue *r_queue = bdev_get_queue(rdev->bdev);
 
                        atomic_inc(&rdev->nr_pending);
                        rcu_read_unlock();
@@ -3197,7 +3198,7 @@ static void unplug_slaves(mddev_t *mddev)
        rcu_read_unlock();
 }
 
-static void raid5_unplug_device(request_queue_t *q)
+static void raid5_unplug_device(struct request_queue *q)
 {
        mddev_t *mddev = q->queuedata;
        raid5_conf_t *conf = mddev_to_conf(mddev);
@@ -3216,7 +3217,7 @@ static void raid5_unplug_device(request_queue_t *q)
        unplug_slaves(mddev);
 }
 
-static int raid5_issue_flush(request_queue_t *q, struct gendisk *disk,
+static int raid5_issue_flush(struct request_queue *q, struct gendisk *disk,
                             sector_t *error_sector)
 {
        mddev_t *mddev = q->queuedata;
@@ -3228,7 +3229,7 @@ static int raid5_issue_flush(request_queue_t *q, struct gendisk *disk,
                mdk_rdev_t *rdev = rcu_dereference(conf->disks[i].rdev);
                if (rdev && !test_bit(Faulty, &rdev->flags)) {
                        struct block_device *bdev = rdev->bdev;
-                       request_queue_t *r_queue = bdev_get_queue(bdev);
+                       struct request_queue *r_queue = bdev_get_queue(bdev);
 
                        if (!r_queue->issue_flush_fn)
                                ret = -EOPNOTSUPP;
@@ -3267,7 +3268,7 @@ static int raid5_congested(void *data, int bits)
 /* We want read requests to align with chunks where possible,
  * but write requests don't need to.
  */
-static int raid5_mergeable_bvec(request_queue_t *q, struct bio *bio, struct bio_vec *biovec)
+static int raid5_mergeable_bvec(struct request_queue *q, struct bio *bio, struct bio_vec *biovec)
 {
        mddev_t *mddev = q->queuedata;
        sector_t sector = bio->bi_sector + get_start_sect(bio->bi_bdev);
@@ -3377,7 +3378,7 @@ static int raid5_align_endio(struct bio *bi, unsigned int bytes, int error)
 
 static int bio_fits_rdev(struct bio *bi)
 {
-       request_queue_t *q = bdev_get_queue(bi->bi_bdev);
+       struct request_queue *q = bdev_get_queue(bi->bi_bdev);
 
        if ((bi->bi_size>>9) > q->max_sectors)
                return 0;
@@ -3396,7 +3397,7 @@ static int bio_fits_rdev(struct bio *bi)
 }
 
 
-static int chunk_aligned_read(request_queue_t *q, struct bio * raid_bio)
+static int chunk_aligned_read(struct request_queue *q, struct bio * raid_bio)
 {
        mddev_t *mddev = q->queuedata;
        raid5_conf_t *conf = mddev_to_conf(mddev);
@@ -3466,7 +3467,7 @@ static int chunk_aligned_read(request_queue_t *q, struct bio * raid_bio)
 }
 
 
-static int make_request(request_queue_t *q, struct bio * bi)
+static int make_request(struct request_queue *q, struct bio * bi)
 {
        mddev_t *mddev = q->queuedata;
        raid5_conf_t *conf = mddev_to_conf(mddev);