]> git.karo-electronics.de Git - karo-tx-linux.git/commitdiff
xen/blkback: Move the plugging/unplugging to a higher level.
authorKonrad Rzeszutek Wilk <konrad.wilk@oracle.com>
Tue, 26 Apr 2011 16:57:59 +0000 (12:57 -0400)
committerKonrad Rzeszutek Wilk <konrad.wilk@oracle.com>
Tue, 26 Apr 2011 17:01:32 +0000 (13:01 -0400)
We used to the plug/unplug on the submit_bio. But that means
if within a stream of WRITE, WRITE, WRITE,...,WRITE we have
one READ, it could stall the pipeline (as the 'submio_bio'
could trigger the unplug_fnc to be called and stall/sync
when doing the READ). Instead we want to move the unplugging
when the whole (or as a much as possible) ring buffer has been
processed. This also eliminates us doing plug/unplug for
each request.

Signed-off-by: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com>
drivers/block/xen-blkback/blkback.c

index c4bc85e69d33c48e316ca6bd2495439b4fcdd7ac..ed85ba94b2e0a85a5962d4621c2ef55b035e3562 100644 (file)
@@ -276,6 +276,8 @@ int xen_blkif_schedule(void *arg)
                printk(KERN_DEBUG "%s: started\n", current->comm);
 
        while (!kthread_should_stop()) {
+               struct blk_plug plug;
+
                if (try_to_freeze())
                        continue;
                if (unlikely(vbd->size != vbd_sz(vbd)))
@@ -292,9 +294,13 @@ int xen_blkif_schedule(void *arg)
                blkif->waiting_reqs = 0;
                smp_mb(); /* clear flag *before* checking for work */
 
+               blk_start_plug(&plug);
+
                if (do_block_io_op(blkif))
                        blkif->waiting_reqs = 1;
 
+               blk_finish_plug(&plug);
+
                if (log_stats && time_after(jiffies, blkif->st_print))
                        print_stats(blkif);
        }
@@ -547,7 +553,6 @@ static void dispatch_rw_block_io(struct blkif_st *blkif,
        struct bio *biolist[BLKIF_MAX_SEGMENTS_PER_REQUEST];
        int i, nbio = 0;
        int operation;
-       struct blk_plug plug;
 
        switch (req->operation) {
        case BLKIF_OP_READ:
@@ -660,15 +665,9 @@ static void dispatch_rw_block_io(struct blkif_st *blkif,
         */
        atomic_set(&pending_req->pendcnt, nbio);
 
-       /* Get a reference count for the disk queue and start sending I/O */
-       blk_start_plug(&plug);
-
        for (i = 0; i < nbio; i++)
                submit_bio(operation, biolist[i]);
 
-       blk_finish_plug(&plug);
-       /* Let the I/Os go.. */
-
        if (operation == READ)
                blkif->st_rd_sect += preq.nr_sects;
        else if (operation == WRITE || operation == WRITE_BARRIER)