From: Jens Axboe Date: Wed, 19 Oct 2011 12:30:42 +0000 (+0200) Subject: Merge branch 'v3.1-rc10' into for-3.2/core X-Git-Url: https://git.karo-electronics.de/?a=commitdiff_plain;h=5c04b426f2e8b46cfc7969a35b2631063a3c646c;p=linux-beck.git Merge branch 'v3.1-rc10' into for-3.2/core Conflicts: block/blk-core.c include/linux/blkdev.h Signed-off-by: Jens Axboe --- 5c04b426f2e8b46cfc7969a35b2631063a3c646c diff --cc block/blk-core.c index 97e9e5405b83,d34433ae7917..79e41a76d96a --- a/block/blk-core.c +++ b/block/blk-core.c @@@ -1235,8 -1240,8 +1238,8 @@@ void blk_queue_bio(struct request_queu * Check if we can merge with the plugged list before grabbing * any locks. */ - if (attempt_plug_merge(current, q, bio)) + if (attempt_plug_merge(current, q, bio, &request_count)) - goto out; + return; spin_lock_irq(q->queue_lock); diff --cc drivers/md/raid1.c index 97f2a5f977b1,d9587dffe533..d4ddfa627301 --- a/drivers/md/raid1.c +++ b/drivers/md/raid1.c @@@ -1117,8 -1116,15 +1116,13 @@@ read_again goto retry_write; } + r1_bio_write_done(r1_bio); + + /* In case raid1d snuck in to freeze_array */ + wake_up(&conf->wait_barrier); + if (do_sync || !bitmap || !plugged) md_wakeup_thread(mddev->thread); - - return 0; } static void status(struct seq_file *seq, mddev_t *mddev) diff --cc include/linux/blkdev.h index 1978655faa3b,7fbaa9103344..0b68044e7abb --- a/include/linux/blkdev.h +++ b/include/linux/blkdev.h @@@ -860,23 -863,16 +865,22 @@@ struct request_queue *blk_alloc_queue_n extern void blk_put_queue(struct request_queue *); /* - * Note: Code in between changing the blk_plug list/cb_list or element of such - * lists is preemptable, but such code can't do sleep (or be very careful), - * otherwise data is corrupted. For details, please check schedule() where - * blk_schedule_flush_plug() is called. + * blk_plug permits building a queue of related requests by holding the I/O + * fragments for a short period. This allows merging of sequential requests + * into single larger request. As the requests are moved from a per-task list to + * the device's request_queue in a batch, this results in improved scalability + * as the lock contention for request_queue lock is reduced. + * + * It is ok not to disable preemption when adding the request to the plug list + * or when attempting a merge, because blk_schedule_flush_list() will only flush + * the plug list when the task sleeps by itself. For details, please see + * schedule() where blk_schedule_flush_plug() is called. */ struct blk_plug { - unsigned long magic; - struct list_head list; - struct list_head cb_list; - unsigned int should_sort; + unsigned long magic; /* detect uninitialized use-cases */ + struct list_head list; /* requests */ + struct list_head cb_list; /* md requires an unplug callback */ + unsigned int should_sort; /* list to be sorted before flushing? */ - unsigned int count; /* number of queued requests */ }; #define BLK_MAX_REQUEST_COUNT 16