]> git.karo-electronics.de Git - karo-tx-linux.git/commitdiff
md/raid5: add blktrace calls
authorNeilBrown <neilb@suse.de>
Mon, 15 Oct 2012 22:18:55 +0000 (09:18 +1100)
committerNeilBrown <neilb@suse.de>
Mon, 15 Oct 2012 22:18:55 +0000 (09:18 +1100)
This makes it easier to trace what raid5 is doing.

Signed-off-by: NeilBrown <neilb@suse.de>
drivers/md/raid5.c

index fb775a1cd18985237f00baa7a4aee00559b7f1b0..8f2a2ffd470285268ed3c121781e82e899ed9781 100644 (file)
@@ -53,6 +53,8 @@
 #include <linux/cpu.h>
 #include <linux/slab.h>
 #include <linux/ratelimit.h>
+#include <trace/events/block.h>
+
 #include "md.h"
 #include "raid5.h"
 #include "raid0.h"
@@ -182,6 +184,8 @@ static void return_io(struct bio *return_bi)
                return_bi = bi->bi_next;
                bi->bi_next = NULL;
                bi->bi_size = 0;
+               trace_block_bio_complete(bdev_get_queue(bi->bi_bdev),
+                                        bi, 0);
                bio_endio(bi, 0);
                bi = return_bi;
        }
@@ -697,6 +701,9 @@ static void ops_run_io(struct stripe_head *sh, struct stripe_head_state *s)
                        bi->bi_next = NULL;
                        if (rrdev)
                                set_bit(R5_DOUBLE_LOCKED, &sh->dev[i].flags);
+                       trace_block_bio_remap(bdev_get_queue(bi->bi_bdev),
+                                             bi, disk_devt(conf->mddev->gendisk),
+                                             sh->dev[i].sector);
                        generic_make_request(bi);
                }
                if (rrdev) {
@@ -724,6 +731,9 @@ static void ops_run_io(struct stripe_head *sh, struct stripe_head_state *s)
                        rbi->bi_io_vec[0].bv_offset = 0;
                        rbi->bi_size = STRIPE_SIZE;
                        rbi->bi_next = NULL;
+                       trace_block_bio_remap(bdev_get_queue(rbi->bi_bdev),
+                                             rbi, disk_devt(conf->mddev->gendisk),
+                                             sh->dev[i].sector);
                        generic_make_request(rbi);
                }
                if (!rdev && !rrdev) {
@@ -2878,8 +2888,10 @@ static void handle_stripe_dirtying(struct r5conf *conf,
        pr_debug("for sector %llu, rmw=%d rcw=%d\n",
                (unsigned long long)sh->sector, rmw, rcw);
        set_bit(STRIPE_HANDLE, &sh->state);
-       if (rmw < rcw && rmw > 0)
+       if (rmw < rcw && rmw > 0) {
                /* prefer read-modify-write, but need to get some data */
+               blk_add_trace_msg(conf->mddev->queue, "raid5 rmw %llu %d",
+                                 (unsigned long long)sh->sector, rmw);
                for (i = disks; i--; ) {
                        struct r5dev *dev = &sh->dev[i];
                        if ((dev->towrite || i == sh->pd_idx) &&
@@ -2890,7 +2902,7 @@ static void handle_stripe_dirtying(struct r5conf *conf,
                                if (
                                  test_bit(STRIPE_PREREAD_ACTIVE, &sh->state)) {
                                        pr_debug("Read_old block "
-                                               "%d for r-m-w\n", i);
+                                                "%d for r-m-w\n", i);
                                        set_bit(R5_LOCKED, &dev->flags);
                                        set_bit(R5_Wantread, &dev->flags);
                                        s->locked++;
@@ -2900,8 +2912,10 @@ static void handle_stripe_dirtying(struct r5conf *conf,
                                }
                        }
                }
+       }
        if (rcw <= rmw && rcw > 0) {
                /* want reconstruct write, but need to get some data */
+               int qread =0;
                rcw = 0;
                for (i = disks; i--; ) {
                        struct r5dev *dev = &sh->dev[i];
@@ -2920,12 +2934,17 @@ static void handle_stripe_dirtying(struct r5conf *conf,
                                        set_bit(R5_LOCKED, &dev->flags);
                                        set_bit(R5_Wantread, &dev->flags);
                                        s->locked++;
+                                       qread++;
                                } else {
                                        set_bit(STRIPE_DELAYED, &sh->state);
                                        set_bit(STRIPE_HANDLE, &sh->state);
                                }
                        }
                }
+               if (rcw)
+                       blk_add_trace_msg(conf->mddev->queue, "raid5 rcw %llu %d %d %d",
+                                         (unsigned long long)sh->sector,
+                                         rcw, qread, test_bit(STRIPE_DELAYED, &sh->state));
        }
        /* now if nothing is locked, and if we have enough data,
         * we can start a write request
@@ -3933,6 +3952,8 @@ static void raid5_align_endio(struct bio *bi, int error)
        rdev_dec_pending(rdev, conf->mddev);
 
        if (!error && uptodate) {
+               trace_block_bio_complete(bdev_get_queue(raid_bi->bi_bdev),
+                                        raid_bi, 0);
                bio_endio(raid_bi, 0);
                if (atomic_dec_and_test(&conf->active_aligned_reads))
                        wake_up(&conf->wait_for_stripe);
@@ -4037,6 +4058,9 @@ static int chunk_aligned_read(struct mddev *mddev, struct bio * raid_bio)
                atomic_inc(&conf->active_aligned_reads);
                spin_unlock_irq(&conf->device_lock);
 
+               trace_block_bio_remap(bdev_get_queue(align_bi->bi_bdev),
+                                     align_bi, disk_devt(mddev->gendisk),
+                                     raid_bio->bi_sector);
                generic_make_request(align_bi);
                return 1;
        } else {
@@ -4133,6 +4157,7 @@ static void raid5_unplug(struct blk_plug_cb *blk_cb, bool from_schedule)
        struct stripe_head *sh;
        struct mddev *mddev = cb->cb.data;
        struct r5conf *conf = mddev->private;
+       int cnt = 0;
 
        if (cb->list.next && !list_empty(&cb->list)) {
                spin_lock_irq(&conf->device_lock);
@@ -4147,9 +4172,11 @@ static void raid5_unplug(struct blk_plug_cb *blk_cb, bool from_schedule)
                        smp_mb__before_clear_bit();
                        clear_bit(STRIPE_ON_UNPLUG_LIST, &sh->state);
                        __release_stripe(conf, sh);
+                       cnt++;
                }
                spin_unlock_irq(&conf->device_lock);
        }
+       trace_block_unplug(mddev->queue, cnt, !from_schedule);
        kfree(cb);
 }
 
@@ -4407,6 +4434,8 @@ static void make_request(struct mddev *mddev, struct bio * bi)
                if ( rw == WRITE )
                        md_write_end(mddev);
 
+               trace_block_bio_complete(bdev_get_queue(bi->bi_bdev),
+                                        bi, 0);
                bio_endio(bi, 0);
        }
 }
@@ -4783,8 +4812,11 @@ static int  retry_aligned_read(struct r5conf *conf, struct bio *raid_bio)
                handled++;
        }
        remaining = raid5_dec_bi_active_stripes(raid_bio);
-       if (remaining == 0)
+       if (remaining == 0) {
+               trace_block_bio_complete(bdev_get_queue(raid_bio->bi_bdev),
+                                        raid_bio, 0);
                bio_endio(raid_bio, 0);
+       }
        if (atomic_dec_and_test(&conf->active_aligned_reads))
                wake_up(&conf->wait_for_stripe);
        return handled;