]> git.karo-electronics.de Git - karo-tx-linux.git/commitdiff
block: remove per-queue plugging
authorJens Axboe <jaxboe@fusionio.com>
Thu, 10 Mar 2011 07:52:07 +0000 (08:52 +0100)
committerJens Axboe <jaxboe@fusionio.com>
Thu, 10 Mar 2011 07:52:07 +0000 (08:52 +0100)
Code has been converted over to the new explicit on-stack plugging,
and delay users have been converted to use the new API for that.
So lets kill off the old plugging along with aops->sync_page().

Signed-off-by: Jens Axboe <jaxboe@fusionio.com>
119 files changed:
Documentation/block/biodoc.txt
block/blk-core.c
block/blk-exec.c
block/blk-flush.c
block/blk-settings.c
block/blk-throttle.c
block/blk.h
block/cfq-iosched.c
block/deadline-iosched.c
block/elevator.c
block/noop-iosched.c
drivers/block/cciss.c
drivers/block/cpqarray.c
drivers/block/drbd/drbd_actlog.c
drivers/block/drbd/drbd_bitmap.c
drivers/block/drbd/drbd_int.h
drivers/block/drbd/drbd_main.c
drivers/block/drbd/drbd_receiver.c
drivers/block/drbd/drbd_req.c
drivers/block/drbd/drbd_worker.c
drivers/block/drbd/drbd_wrappers.h
drivers/block/floppy.c
drivers/block/loop.c
drivers/block/pktcdvd.c
drivers/block/umem.c
drivers/ide/ide-atapi.c
drivers/ide/ide-io.c
drivers/ide/ide-park.c
drivers/md/bitmap.c
drivers/md/dm-crypt.c
drivers/md/dm-kcopyd.c
drivers/md/dm-raid.c
drivers/md/dm-raid1.c
drivers/md/dm-table.c
drivers/md/dm.c
drivers/md/linear.c
drivers/md/md.c
drivers/md/multipath.c
drivers/md/raid0.c
drivers/md/raid1.c
drivers/md/raid10.c
drivers/md/raid5.c
drivers/md/raid5.h
drivers/message/i2o/i2o_block.c
drivers/mmc/card/queue.c
drivers/s390/block/dasd.c
drivers/s390/char/tape_block.c
drivers/scsi/scsi_transport_fc.c
drivers/scsi/scsi_transport_sas.c
drivers/target/target_core_iblock.c
fs/adfs/inode.c
fs/affs/file.c
fs/aio.c
fs/befs/linuxvfs.c
fs/bfs/file.c
fs/block_dev.c
fs/btrfs/disk-io.c
fs/btrfs/inode.c
fs/btrfs/volumes.c
fs/buffer.c
fs/cifs/file.c
fs/direct-io.c
fs/efs/inode.c
fs/exofs/inode.c
fs/ext2/inode.c
fs/ext3/inode.c
fs/ext4/inode.c
fs/fat/inode.c
fs/freevxfs/vxfs_subr.c
fs/fuse/inode.c
fs/gfs2/aops.c
fs/gfs2/meta_io.c
fs/hfs/inode.c
fs/hfsplus/inode.c
fs/hpfs/file.c
fs/isofs/inode.c
fs/jfs/inode.c
fs/jfs/jfs_metapage.c
fs/logfs/dev_bdev.c
fs/minix/inode.c
fs/nilfs2/btnode.c
fs/nilfs2/gcinode.c
fs/nilfs2/inode.c
fs/nilfs2/mdt.c
fs/nilfs2/page.c
fs/nilfs2/page.h
fs/ntfs/aops.c
fs/ntfs/compress.c
fs/ocfs2/aops.c
fs/ocfs2/cluster/heartbeat.c
fs/omfs/file.c
fs/qnx4/inode.c
fs/reiserfs/inode.c
fs/sysv/itree.c
fs/ubifs/super.c
fs/udf/file.c
fs/udf/inode.c
fs/ufs/inode.c
fs/ufs/truncate.c
fs/xfs/linux-2.6/xfs_aops.c
fs/xfs/linux-2.6/xfs_buf.c
include/linux/backing-dev.h
include/linux/blkdev.h
include/linux/buffer_head.h
include/linux/device-mapper.h
include/linux/elevator.h
include/linux/fs.h
include/linux/pagemap.h
include/linux/swap.h
mm/backing-dev.c
mm/filemap.c
mm/memory-failure.c
mm/nommu.c
mm/page-writeback.c
mm/readahead.c
mm/shmem.c
mm/swap_state.c
mm/swapfile.c
mm/vmscan.c

index b9a83dd24732486965f41225d8184405e4766c4a..2a7b38c832c72e705bd9136ace4d9ece77c16628 100644 (file)
@@ -963,11 +963,6 @@ elevator_dispatch_fn*              fills the dispatch queue with ready requests.
 
 elevator_add_req_fn*           called to add a new request into the scheduler
 
-elevator_queue_empty_fn                returns true if the merge queue is empty.
-                               Drivers shouldn't use this, but rather check
-                               if elv_next_request is NULL (without losing the
-                               request if one exists!)
-
 elevator_former_req_fn
 elevator_latter_req_fn         These return the request before or after the
                                one specified in disk sort order. Used by the
index 6efb55cc5af069e273e7ca8e011e8f1d8aa4f641..82a45898ba76ffee794c8c2d819bb5c132e51c03 100644 (file)
@@ -198,6 +198,19 @@ void blk_dump_rq_flags(struct request *rq, char *msg)
 }
 EXPORT_SYMBOL(blk_dump_rq_flags);
 
+/*
+ * Make sure that plugs that were pending when this function was entered,
+ * are now complete and requests pushed to the queue.
+*/
+static inline void queue_sync_plugs(struct request_queue *q)
+{
+       /*
+        * If the current process is plugged and has barriers submitted,
+        * we will livelock if we don't unplug first.
+        */
+       blk_flush_plug(current);
+}
+
 static void blk_delay_work(struct work_struct *work)
 {
        struct request_queue *q;
@@ -224,137 +237,6 @@ void blk_delay_queue(struct request_queue *q, unsigned long msecs)
 }
 EXPORT_SYMBOL(blk_delay_queue);
 
-/*
- * "plug" the device if there are no outstanding requests: this will
- * force the transfer to start only after we have put all the requests
- * on the list.
- *
- * This is called with interrupts off and no requests on the queue and
- * with the queue lock held.
- */
-void blk_plug_device(struct request_queue *q)
-{
-       WARN_ON(!irqs_disabled());
-
-       /*
-        * don't plug a stopped queue, it must be paired with blk_start_queue()
-        * which will restart the queueing
-        */
-       if (blk_queue_stopped(q))
-               return;
-
-       if (!queue_flag_test_and_set(QUEUE_FLAG_PLUGGED, q)) {
-               mod_timer(&q->unplug_timer, jiffies + q->unplug_delay);
-               trace_block_plug(q);
-       }
-}
-EXPORT_SYMBOL(blk_plug_device);
-
-/**
- * blk_plug_device_unlocked - plug a device without queue lock held
- * @q:    The &struct request_queue to plug
- *
- * Description:
- *   Like @blk_plug_device(), but grabs the queue lock and disables
- *   interrupts.
- **/
-void blk_plug_device_unlocked(struct request_queue *q)
-{
-       unsigned long flags;
-
-       spin_lock_irqsave(q->queue_lock, flags);
-       blk_plug_device(q);
-       spin_unlock_irqrestore(q->queue_lock, flags);
-}
-EXPORT_SYMBOL(blk_plug_device_unlocked);
-
-/*
- * remove the queue from the plugged list, if present. called with
- * queue lock held and interrupts disabled.
- */
-int blk_remove_plug(struct request_queue *q)
-{
-       WARN_ON(!irqs_disabled());
-
-       if (!queue_flag_test_and_clear(QUEUE_FLAG_PLUGGED, q))
-               return 0;
-
-       del_timer(&q->unplug_timer);
-       return 1;
-}
-EXPORT_SYMBOL(blk_remove_plug);
-
-/*
- * remove the plug and let it rip..
- */
-void __generic_unplug_device(struct request_queue *q)
-{
-       if (unlikely(blk_queue_stopped(q)))
-               return;
-       if (!blk_remove_plug(q) && !blk_queue_nonrot(q))
-               return;
-
-       q->request_fn(q);
-}
-
-/**
- * generic_unplug_device - fire a request queue
- * @q:    The &struct request_queue in question
- *
- * Description:
- *   Linux uses plugging to build bigger requests queues before letting
- *   the device have at them. If a queue is plugged, the I/O scheduler
- *   is still adding and merging requests on the queue. Once the queue
- *   gets unplugged, the request_fn defined for the queue is invoked and
- *   transfers started.
- **/
-void generic_unplug_device(struct request_queue *q)
-{
-       if (blk_queue_plugged(q)) {
-               spin_lock_irq(q->queue_lock);
-               __generic_unplug_device(q);
-               spin_unlock_irq(q->queue_lock);
-       }
-}
-EXPORT_SYMBOL(generic_unplug_device);
-
-static void blk_backing_dev_unplug(struct backing_dev_info *bdi,
-                                  struct page *page)
-{
-       struct request_queue *q = bdi->unplug_io_data;
-
-       blk_unplug(q);
-}
-
-void blk_unplug_work(struct work_struct *work)
-{
-       struct request_queue *q =
-               container_of(work, struct request_queue, unplug_work);
-
-       trace_block_unplug_io(q);
-       q->unplug_fn(q);
-}
-
-void blk_unplug_timeout(unsigned long data)
-{
-       struct request_queue *q = (struct request_queue *)data;
-
-       trace_block_unplug_timer(q);
-       kblockd_schedule_work(q, &q->unplug_work);
-}
-
-void blk_unplug(struct request_queue *q)
-{
-       /*
-        * devices don't necessarily have an ->unplug_fn defined
-        */
-       if (q->unplug_fn) {
-               trace_block_unplug_io(q);
-               q->unplug_fn(q);
-       }
-}
-EXPORT_SYMBOL(blk_unplug);
-
 /**
  * blk_start_queue - restart a previously stopped queue
  * @q:    The &struct request_queue in question
@@ -389,7 +271,6 @@ EXPORT_SYMBOL(blk_start_queue);
  **/
 void blk_stop_queue(struct request_queue *q)
 {
-       blk_remove_plug(q);
        cancel_delayed_work(&q->delay_work);
        queue_flag_set(QUEUE_FLAG_STOPPED, q);
 }
@@ -411,11 +292,10 @@ EXPORT_SYMBOL(blk_stop_queue);
  */
 void blk_sync_queue(struct request_queue *q)
 {
-       del_timer_sync(&q->unplug_timer);
        del_timer_sync(&q->timeout);
-       cancel_work_sync(&q->unplug_work);
        throtl_shutdown_timer_wq(q);
        cancel_delayed_work_sync(&q->delay_work);
+       queue_sync_plugs(q);
 }
 EXPORT_SYMBOL(blk_sync_queue);
 
@@ -430,14 +310,9 @@ EXPORT_SYMBOL(blk_sync_queue);
  */
 void __blk_run_queue(struct request_queue *q)
 {
-       blk_remove_plug(q);
-
        if (unlikely(blk_queue_stopped(q)))
                return;
 
-       if (elv_queue_empty(q))
-               return;
-
        /*
         * Only recurse once to avoid overrunning the stack, let the unplug
         * handling reinvoke the handler shortly if we already got there.
@@ -445,10 +320,8 @@ void __blk_run_queue(struct request_queue *q)
        if (!queue_flag_test_and_set(QUEUE_FLAG_REENTER, q)) {
                q->request_fn(q);
                queue_flag_clear(QUEUE_FLAG_REENTER, q);
-       } else {
-               queue_flag_set(QUEUE_FLAG_PLUGGED, q);
-               kblockd_schedule_work(q, &q->unplug_work);
-       }
+       } else
+               queue_delayed_work(kblockd_workqueue, &q->delay_work, 0);
 }
 EXPORT_SYMBOL(__blk_run_queue);
 
@@ -535,8 +408,6 @@ struct request_queue *blk_alloc_queue_node(gfp_t gfp_mask, int node_id)
        if (!q)
                return NULL;
 
-       q->backing_dev_info.unplug_io_fn = blk_backing_dev_unplug;
-       q->backing_dev_info.unplug_io_data = q;
        q->backing_dev_info.ra_pages =
                        (VM_MAX_READAHEAD * 1024) / PAGE_CACHE_SIZE;
        q->backing_dev_info.state = 0;
@@ -556,13 +427,11 @@ struct request_queue *blk_alloc_queue_node(gfp_t gfp_mask, int node_id)
 
        setup_timer(&q->backing_dev_info.laptop_mode_wb_timer,
                    laptop_mode_timer_fn, (unsigned long) q);
-       init_timer(&q->unplug_timer);
        setup_timer(&q->timeout, blk_rq_timed_out_timer, (unsigned long) q);
        INIT_LIST_HEAD(&q->timeout_list);
        INIT_LIST_HEAD(&q->flush_queue[0]);
        INIT_LIST_HEAD(&q->flush_queue[1]);
        INIT_LIST_HEAD(&q->flush_data_in_flight);
-       INIT_WORK(&q->unplug_work, blk_unplug_work);
        INIT_DELAYED_WORK(&q->delay_work, blk_delay_work);
 
        kobject_init(&q->kobj, &blk_queue_ktype);
@@ -652,7 +521,6 @@ blk_init_allocated_queue_node(struct request_queue *q, request_fn_proc *rfn,
        q->request_fn           = rfn;
        q->prep_rq_fn           = NULL;
        q->unprep_rq_fn         = NULL;
-       q->unplug_fn            = generic_unplug_device;
        q->queue_flags          = QUEUE_FLAG_DEFAULT;
        q->queue_lock           = lock;
 
@@ -910,8 +778,8 @@ out:
 }
 
 /*
- * No available requests for this queue, unplug the device and wait for some
- * requests to become available.
+ * No available requests for this queue, wait for some requests to become
+ * available.
  *
  * Called with q->queue_lock held, and returns with it unlocked.
  */
@@ -932,7 +800,6 @@ static struct request *get_request_wait(struct request_queue *q, int rw_flags,
 
                trace_block_sleeprq(q, bio, rw_flags & 1);
 
-               __generic_unplug_device(q);
                spin_unlock_irq(q->queue_lock);
                io_schedule();
 
@@ -1058,7 +925,7 @@ static void add_acct_request(struct request_queue *q, struct request *rq,
                             int where)
 {
        drive_stat_acct(rq, 1);
-       __elv_add_request(q, rq, where, 0);
+       __elv_add_request(q, rq, where);
 }
 
 /**
@@ -2798,7 +2665,7 @@ static void flush_plug_list(struct blk_plug *plug)
                /*
                 * rq is already accounted, so use raw insert
                 */
-               __elv_add_request(q, rq, ELEVATOR_INSERT_SORT, 0);
+               __elv_add_request(q, rq, ELEVATOR_INSERT_SORT);
        }
 
        if (q) {
index cf1456a02acdf7f4fc7fb924ff1153dcb4620c06..81e31819a597bb0c6ed6dd4f781eb037c986a243 100644 (file)
@@ -54,8 +54,8 @@ void blk_execute_rq_nowait(struct request_queue *q, struct gendisk *bd_disk,
        rq->end_io = done;
        WARN_ON(irqs_disabled());
        spin_lock_irq(q->queue_lock);
-       __elv_add_request(q, rq, where, 1);
-       __generic_unplug_device(q);
+       __elv_add_request(q, rq, where);
+       __blk_run_queue(q);
        /* the queue is stopped so it won't be plugged+unplugged */
        if (rq->cmd_type == REQ_TYPE_PM_RESUME)
                q->request_fn(q);
index 1e2aa8a8908c3ebe11cef1e5c4e58c435cc1a3ca..671fa9da75600e4d719d494614903da2fab8e7cd 100644 (file)
@@ -194,7 +194,6 @@ static void flush_end_io(struct request *flush_rq, int error)
 {
        struct request_queue *q = flush_rq->q;
        struct list_head *running = &q->flush_queue[q->flush_running_idx];
-       bool was_empty = elv_queue_empty(q);
        bool queued = false;
        struct request *rq, *n;
 
@@ -213,7 +212,7 @@ static void flush_end_io(struct request *flush_rq, int error)
        }
 
        /* after populating an empty queue, kick it to avoid stall */
-       if (queued && was_empty)
+       if (queued)
                __blk_run_queue(q);
 }
 
index 36c8c1f2af18088fb5fa34f4889d2fead2556600..c8d68921dddb276b9469bb2b0332a07bcc967ded 100644 (file)
@@ -164,14 +164,6 @@ void blk_queue_make_request(struct request_queue *q, make_request_fn *mfn)
        blk_queue_congestion_threshold(q);
        q->nr_batching = BLK_BATCH_REQ;
 
-       q->unplug_thresh = 4;           /* hmm */
-       q->unplug_delay = msecs_to_jiffies(3);  /* 3 milliseconds */
-       if (q->unplug_delay == 0)
-               q->unplug_delay = 1;
-
-       q->unplug_timer.function = blk_unplug_timeout;
-       q->unplug_timer.data = (unsigned long)q;
-
        blk_set_default_limits(&q->limits);
        blk_queue_max_hw_sectors(q, BLK_SAFE_MAX_SECTORS);
 
index a89043a3caa416bd59f9a24486698e8d5ce30e1c..b8dcdc2663a11c1622ab17f9fda14bd0945bca45 100644 (file)
@@ -800,7 +800,6 @@ out:
        if (nr_disp) {
                while((bio = bio_list_pop(&bio_list_on_stack)))
                        generic_make_request(bio);
-               blk_unplug(q);
        }
        return nr_disp;
 }
index 284b500852bd5fdfff707755b4b22ab27b6a9eb1..49d21af81d07faf36924deb463400dce2f97d6cb 100644 (file)
@@ -18,8 +18,6 @@ int blk_rq_append_bio(struct request_queue *q, struct request *rq,
 void blk_dequeue_request(struct request *rq);
 void __blk_queue_free_tags(struct request_queue *q);
 
-void blk_unplug_work(struct work_struct *work);
-void blk_unplug_timeout(unsigned long data);
 void blk_rq_timed_out_timer(unsigned long data);
 void blk_delete_timer(struct request *);
 void blk_add_timer(struct request *);
index 3202c7e87fb3d3d5a8d434fed535b410512f82b7..ef631539dd2ac4c1678f45b172828d97b9d9ce7f 100644 (file)
@@ -499,13 +499,6 @@ static inline void cfq_schedule_dispatch(struct cfq_data *cfqd)
        }
 }
 
-static int cfq_queue_empty(struct request_queue *q)
-{
-       struct cfq_data *cfqd = q->elevator->elevator_data;
-
-       return !cfqd->rq_queued;
-}
-
 /*
  * Scale schedule slice based on io priority. Use the sync time slice only
  * if a queue is marked sync and has sync io queued. A sync queue with async
@@ -4061,7 +4054,6 @@ static struct elevator_type iosched_cfq = {
                .elevator_add_req_fn =          cfq_insert_request,
                .elevator_activate_req_fn =     cfq_activate_request,
                .elevator_deactivate_req_fn =   cfq_deactivate_request,
-               .elevator_queue_empty_fn =      cfq_queue_empty,
                .elevator_completed_req_fn =    cfq_completed_request,
                .elevator_former_req_fn =       elv_rb_former_request,
                .elevator_latter_req_fn =       elv_rb_latter_request,
index b547cbca7b23a55dd9d1f0b444bed07d297fc646..5139c0ea1864a858b6072febcf636a71e9bc6f13 100644 (file)
@@ -326,14 +326,6 @@ dispatch_request:
        return 1;
 }
 
-static int deadline_queue_empty(struct request_queue *q)
-{
-       struct deadline_data *dd = q->elevator->elevator_data;
-
-       return list_empty(&dd->fifo_list[WRITE])
-               && list_empty(&dd->fifo_list[READ]);
-}
-
 static void deadline_exit_queue(struct elevator_queue *e)
 {
        struct deadline_data *dd = e->elevator_data;
@@ -445,7 +437,6 @@ static struct elevator_type iosched_deadline = {
                .elevator_merge_req_fn =        deadline_merged_requests,
                .elevator_dispatch_fn =         deadline_dispatch_requests,
                .elevator_add_req_fn =          deadline_add_request,
-               .elevator_queue_empty_fn =      deadline_queue_empty,
                .elevator_former_req_fn =       elv_rb_former_request,
                .elevator_latter_req_fn =       elv_rb_latter_request,
                .elevator_init_fn =             deadline_init_queue,
index 25713927c0d3657a59807c5ceabb258a888cd565..3ea208256e7832d691e7fe5a28e0770131069bfb 100644 (file)
@@ -619,21 +619,12 @@ void elv_quiesce_end(struct request_queue *q)
 
 void elv_insert(struct request_queue *q, struct request *rq, int where)
 {
-       int unplug_it = 1;
-
        trace_block_rq_insert(q, rq);
 
        rq->q = q;
 
        switch (where) {
        case ELEVATOR_INSERT_REQUEUE:
-               /*
-                * Most requeues happen because of a busy condition,
-                * don't force unplug of the queue for that case.
-                * Clear unplug_it and fall through.
-                */
-               unplug_it = 0;
-
        case ELEVATOR_INSERT_FRONT:
                rq->cmd_flags |= REQ_SOFTBARRIER;
                list_add(&rq->queuelist, &q->queue_head);
@@ -679,24 +670,14 @@ void elv_insert(struct request_queue *q, struct request *rq, int where)
                rq->cmd_flags |= REQ_SOFTBARRIER;
                blk_insert_flush(rq);
                break;
-
        default:
                printk(KERN_ERR "%s: bad insertion point %d\n",
                       __func__, where);
                BUG();
        }
-
-       if (unplug_it && blk_queue_plugged(q)) {
-               int nrq = q->rq.count[BLK_RW_SYNC] + q->rq.count[BLK_RW_ASYNC]
-                               - queue_in_flight(q);
-
-               if (nrq >= q->unplug_thresh)
-                       __generic_unplug_device(q);
-       }
 }
 
-void __elv_add_request(struct request_queue *q, struct request *rq, int where,
-                      int plug)
+void __elv_add_request(struct request_queue *q, struct request *rq, int where)
 {
        BUG_ON(rq->cmd_flags & REQ_ON_PLUG);
 
@@ -711,38 +692,20 @@ void __elv_add_request(struct request_queue *q, struct request *rq, int where,
                    where == ELEVATOR_INSERT_SORT)
                where = ELEVATOR_INSERT_BACK;
 
-       if (plug)
-               blk_plug_device(q);
-
        elv_insert(q, rq, where);
 }
 EXPORT_SYMBOL(__elv_add_request);
 
-void elv_add_request(struct request_queue *q, struct request *rq, int where,
-                    int plug)
+void elv_add_request(struct request_queue *q, struct request *rq, int where)
 {
        unsigned long flags;
 
        spin_lock_irqsave(q->queue_lock, flags);
-       __elv_add_request(q, rq, where, plug);
+       __elv_add_request(q, rq, where);
        spin_unlock_irqrestore(q->queue_lock, flags);
 }
 EXPORT_SYMBOL(elv_add_request);
 
-int elv_queue_empty(struct request_queue *q)
-{
-       struct elevator_queue *e = q->elevator;
-
-       if (!list_empty(&q->queue_head))
-               return 0;
-
-       if (e->ops->elevator_queue_empty_fn)
-               return e->ops->elevator_queue_empty_fn(q);
-
-       return 1;
-}
-EXPORT_SYMBOL(elv_queue_empty);
-
 struct request *elv_latter_request(struct request_queue *q, struct request *rq)
 {
        struct elevator_queue *e = q->elevator;
index 232c4b38cd3769d31e0c79700a49f42915c683a6..06389e9ef96d552836a1509ddf86745d82c09762 100644 (file)
@@ -39,13 +39,6 @@ static void noop_add_request(struct request_queue *q, struct request *rq)
        list_add_tail(&rq->queuelist, &nd->queue);
 }
 
-static int noop_queue_empty(struct request_queue *q)
-{
-       struct noop_data *nd = q->elevator->elevator_data;
-
-       return list_empty(&nd->queue);
-}
-
 static struct request *
 noop_former_request(struct request_queue *q, struct request *rq)
 {
@@ -90,7 +83,6 @@ static struct elevator_type elevator_noop = {
                .elevator_merge_req_fn          = noop_merged_requests,
                .elevator_dispatch_fn           = noop_dispatch,
                .elevator_add_req_fn            = noop_add_request,
-               .elevator_queue_empty_fn        = noop_queue_empty,
                .elevator_former_req_fn         = noop_former_request,
                .elevator_latter_req_fn         = noop_latter_request,
                .elevator_init_fn               = noop_init_queue,
index 9279272b3732719c9b660e6abc6146dd6dfee960..35658f445fca4a494071438ad0f4788bab936231 100644 (file)
@@ -3170,12 +3170,6 @@ static void do_cciss_request(struct request_queue *q)
        int sg_index = 0;
        int chained = 0;
 
-       /* We call start_io here in case there is a command waiting on the
-        * queue that has not been sent.
-        */
-       if (blk_queue_plugged(q))
-               goto startio;
-
       queue:
        creq = blk_peek_request(q);
        if (!creq)
index 946dad4caef37c63615d151fecabc8a7ac5e0d4e..b2fceb53e8092bd28355f7678ea4e92351952299 100644 (file)
@@ -911,9 +911,6 @@ static void do_ida_request(struct request_queue *q)
        struct scatterlist tmp_sg[SG_MAX];
        int i, dir, seg;
 
-       if (blk_queue_plugged(q))
-               goto startio;
-
 queue_next:
        creq = blk_peek_request(q);
        if (!creq)
index ba95cba192be8d1d00f7b7e9b47f1cb05d460470..2096628d6e65502936c26be63f3706ab1b0209b1 100644 (file)
@@ -689,8 +689,6 @@ void drbd_al_to_on_disk_bm(struct drbd_conf *mdev)
                }
        }
 
-       drbd_blk_run_queue(bdev_get_queue(mdev->ldev->md_bdev));
-
        /* always (try to) flush bitmap to stable storage */
        drbd_md_flush(mdev);
 
index fd42832f785b86a6056e3c4efceccaeab37f04bd..0645ca829a94163c57f4ee5ac43e96aaa344870d 100644 (file)
@@ -840,7 +840,6 @@ static int bm_rw(struct drbd_conf *mdev, int rw) __must_hold(local)
        for (i = 0; i < num_pages; i++)
                bm_page_io_async(mdev, b, i, rw);
 
-       drbd_blk_run_queue(bdev_get_queue(mdev->ldev->md_bdev));
        wait_event(b->bm_io_wait, atomic_read(&b->bm_async_io) == 0);
 
        if (test_bit(BM_MD_IO_ERROR, &b->bm_flags)) {
index 3803a03489372911290cd539988da5e779ac85d2..0b5718e19586d59ea90bcb34471f70db0e546d4c 100644 (file)
@@ -2382,20 +2382,6 @@ static inline int drbd_queue_order_type(struct drbd_conf *mdev)
        return QUEUE_ORDERED_NONE;
 }
 
-static inline void drbd_blk_run_queue(struct request_queue *q)
-{
-       if (q && q->unplug_fn)
-               q->unplug_fn(q);
-}
-
-static inline void drbd_kick_lo(struct drbd_conf *mdev)
-{
-       if (get_ldev(mdev)) {
-               drbd_blk_run_queue(bdev_get_queue(mdev->ldev->backing_bdev));
-               put_ldev(mdev);
-       }
-}
-
 static inline void drbd_md_flush(struct drbd_conf *mdev)
 {
        int r;
index 29cd0dc9fe4f8c9fe6731aaaf6f85220b7bf5db0..6049cb85310dc332c1827dd41cbf0d413cccdc02 100644 (file)
@@ -2719,35 +2719,6 @@ static int drbd_release(struct gendisk *gd, fmode_t mode)
        return 0;
 }
 
-static void drbd_unplug_fn(struct request_queue *q)
-{
-       struct drbd_conf *mdev = q->queuedata;
-
-       /* unplug FIRST */
-       spin_lock_irq(q->queue_lock);
-       blk_remove_plug(q);
-       spin_unlock_irq(q->queue_lock);
-
-       /* only if connected */
-       spin_lock_irq(&mdev->req_lock);
-       if (mdev->state.pdsk >= D_INCONSISTENT && mdev->state.conn >= C_CONNECTED) {
-               D_ASSERT(mdev->state.role == R_PRIMARY);
-               if (test_and_clear_bit(UNPLUG_REMOTE, &mdev->flags)) {
-                       /* add to the data.work queue,
-                        * unless already queued.
-                        * XXX this might be a good addition to drbd_queue_work
-                        * anyways, to detect "double queuing" ... */
-                       if (list_empty(&mdev->unplug_work.list))
-                               drbd_queue_work(&mdev->data.work,
-                                               &mdev->unplug_work);
-               }
-       }
-       spin_unlock_irq(&mdev->req_lock);
-
-       if (mdev->state.disk >= D_INCONSISTENT)
-               drbd_kick_lo(mdev);
-}
-
 static void drbd_set_defaults(struct drbd_conf *mdev)
 {
        /* This way we get a compile error when sync_conf grows,
@@ -3222,9 +3193,7 @@ struct drbd_conf *drbd_new_device(unsigned int minor)
        blk_queue_max_segment_size(q, DRBD_MAX_SEGMENT_SIZE);
        blk_queue_bounce_limit(q, BLK_BOUNCE_ANY);
        blk_queue_merge_bvec(q, drbd_merge_bvec);
-       q->queue_lock = &mdev->req_lock; /* needed since we use */
-               /* plugging on a queue, that actually has no requests! */
-       q->unplug_fn = drbd_unplug_fn;
+       q->queue_lock = &mdev->req_lock;
 
        mdev->md_io_page = alloc_page(GFP_KERNEL);
        if (!mdev->md_io_page)
index 24487d4fb20297e6676a91e60c9525f26092e315..84132f8bf8a41aac1307478ec5a89fd060764a56 100644 (file)
@@ -187,15 +187,6 @@ static struct page *drbd_pp_first_pages_or_try_alloc(struct drbd_conf *mdev, int
        return NULL;
 }
 
-/* kick lower level device, if we have more than (arbitrary number)
- * reference counts on it, which typically are locally submitted io
- * requests.  don't use unacked_cnt, so we speed up proto A and B, too. */
-static void maybe_kick_lo(struct drbd_conf *mdev)
-{
-       if (atomic_read(&mdev->local_cnt) >= mdev->net_conf->unplug_watermark)
-               drbd_kick_lo(mdev);
-}
-
 static void reclaim_net_ee(struct drbd_conf *mdev, struct list_head *to_be_freed)
 {
        struct drbd_epoch_entry *e;
@@ -219,7 +210,6 @@ static void drbd_kick_lo_and_reclaim_net(struct drbd_conf *mdev)
        LIST_HEAD(reclaimed);
        struct drbd_epoch_entry *e, *t;
 
-       maybe_kick_lo(mdev);
        spin_lock_irq(&mdev->req_lock);
        reclaim_net_ee(mdev, &reclaimed);
        spin_unlock_irq(&mdev->req_lock);
@@ -436,8 +426,7 @@ void _drbd_wait_ee_list_empty(struct drbd_conf *mdev, struct list_head *head)
        while (!list_empty(head)) {
                prepare_to_wait(&mdev->ee_wait, &wait, TASK_UNINTERRUPTIBLE);
                spin_unlock_irq(&mdev->req_lock);
-               drbd_kick_lo(mdev);
-               schedule();
+               io_schedule();
                finish_wait(&mdev->ee_wait, &wait);
                spin_lock_irq(&mdev->req_lock);
        }
@@ -1147,7 +1136,6 @@ next_bio:
 
                drbd_generic_make_request(mdev, fault_type, bio);
        } while (bios);
-       maybe_kick_lo(mdev);
        return 0;
 
 fail:
@@ -1167,9 +1155,6 @@ static int receive_Barrier(struct drbd_conf *mdev, enum drbd_packets cmd, unsign
 
        inc_unacked(mdev);
 
-       if (mdev->net_conf->wire_protocol != DRBD_PROT_C)
-               drbd_kick_lo(mdev);
-
        mdev->current_epoch->barrier_nr = p->barrier;
        rv = drbd_may_finish_epoch(mdev, mdev->current_epoch, EV_GOT_BARRIER_NR);
 
@@ -3556,9 +3541,6 @@ static int receive_skip(struct drbd_conf *mdev, enum drbd_packets cmd, unsigned
 
 static int receive_UnplugRemote(struct drbd_conf *mdev, enum drbd_packets cmd, unsigned int data_size)
 {
-       if (mdev->state.disk >= D_INCONSISTENT)
-               drbd_kick_lo(mdev);
-
        /* Make sure we've acked all the TCP data associated
         * with the data requests being unplugged */
        drbd_tcp_quickack(mdev->data.socket);
index 11a75d32a2e27f0d78c26b511921780f4d336264..ad3fc6228f27924d3523976fd3a5a4486c3a5b4a 100644 (file)
@@ -960,10 +960,6 @@ allocate_barrier:
                        bio_endio(req->private_bio, -EIO);
        }
 
-       /* we need to plug ALWAYS since we possibly need to kick lo_dev.
-        * we plug after submit, so we won't miss an unplug event */
-       drbd_plug_device(mdev);
-
        return 0;
 
 fail_conflicting:
index 34f224b018b37b1e1781134fedf67179799ab20e..e027446590d3752d63d2301c7d65e17d749f7b13 100644 (file)
@@ -792,7 +792,6 @@ int drbd_resync_finished(struct drbd_conf *mdev)
                 * queue (or even the read operations for those packets
                 * is not finished by now).   Retry in 100ms. */
 
-               drbd_kick_lo(mdev);
                __set_current_state(TASK_INTERRUPTIBLE);
                schedule_timeout(HZ / 10);
                w = kmalloc(sizeof(struct drbd_work), GFP_ATOMIC);
index defdb5013ea3444f272e83084d378964eca621ba..53586fa5ae1b098686e6171ea821fa21dc1bdb1f 100644 (file)
@@ -45,24 +45,6 @@ static inline void drbd_generic_make_request(struct drbd_conf *mdev,
                generic_make_request(bio);
 }
 
-static inline void drbd_plug_device(struct drbd_conf *mdev)
-{
-       struct request_queue *q;
-       q = bdev_get_queue(mdev->this_bdev);
-
-       spin_lock_irq(q->queue_lock);
-
-/* XXX the check on !blk_queue_plugged is redundant,
- * implicitly checked in blk_plug_device */
-
-       if (!blk_queue_plugged(q)) {
-               blk_plug_device(q);
-               del_timer(&q->unplug_timer);
-               /* unplugging should not happen automatically... */
-       }
-       spin_unlock_irq(q->queue_lock);
-}
-
 static inline int drbd_crypto_is_hash(struct crypto_tfm *tfm)
 {
         return (crypto_tfm_alg_type(tfm) & CRYPTO_ALG_TYPE_HASH_MASK)
index b9ba04fc2b34eb0845cf0c724b64151e53b05c65..271142b9e2cd6e38bf3bbc080045055bd214aa8c 100644 (file)
@@ -3837,7 +3837,6 @@ static int __floppy_read_block_0(struct block_device *bdev)
        bio.bi_end_io = floppy_rb0_complete;
 
        submit_bio(READ, &bio);
-       generic_unplug_device(bdev_get_queue(bdev));
        process_fd_request();
        wait_for_completion(&complete);
 
index 49e6a545eb63fde25bc1984e3b2c165432f984b9..01b8e4a87c9f0861afc5cffbe139a931bcf3b80c 100644 (file)
@@ -541,17 +541,6 @@ out:
        return 0;
 }
 
-/*
- * kick off io on the underlying address space
- */
-static void loop_unplug(struct request_queue *q)
-{
-       struct loop_device *lo = q->queuedata;
-
-       queue_flag_clear_unlocked(QUEUE_FLAG_PLUGGED, q);
-       blk_run_address_space(lo->lo_backing_file->f_mapping);
-}
-
 struct switch_request {
        struct file *file;
        struct completion wait;
@@ -918,7 +907,6 @@ static int loop_set_fd(struct loop_device *lo, fmode_t mode,
         */
        blk_queue_make_request(lo->lo_queue, loop_make_request);
        lo->lo_queue->queuedata = lo;
-       lo->lo_queue->unplug_fn = loop_unplug;
 
        if (!(lo_flags & LO_FLAGS_READ_ONLY) && file->f_op->fsync)
                blk_queue_flush(lo->lo_queue, REQ_FLUSH);
@@ -1020,7 +1008,6 @@ static int loop_clr_fd(struct loop_device *lo, struct block_device *bdev)
 
        kthread_stop(lo->lo_thread);
 
-       lo->lo_queue->unplug_fn = NULL;
        lo->lo_backing_file = NULL;
 
        loop_release_xfer(lo);
index 77d70eebb6b2121e49b94b7626383b31552a2a35..d20e13f8000179f5329aed9a4a758feeded00dd5 100644 (file)
@@ -1606,8 +1606,6 @@ static int kcdrwd(void *foobar)
                                        min_sleep_time = pkt->sleep_time;
                        }
 
-                       generic_unplug_device(bdev_get_queue(pd->bdev));
-
                        VPRINTK("kcdrwd: sleeping\n");
                        residue = schedule_timeout(min_sleep_time);
                        VPRINTK("kcdrwd: wake up\n");
index 8be57151f5d6570cd9b9c9a629618bc23ae3466b..653439faa729325298a20d367b2927b45acc5676 100644 (file)
@@ -241,8 +241,7 @@ static void dump_dmastat(struct cardinfo *card, unsigned int dmastat)
  *
  * Whenever IO on the active page completes, the Ready page is activated
  * and the ex-Active page is clean out and made Ready.
- * Otherwise the Ready page is only activated when it becomes full, or
- * when mm_unplug_device is called via the unplug_io_fn.
+ * Otherwise the Ready page is only activated when it becomes full.
  *
  * If a request arrives while both pages a full, it is queued, and b_rdev is
  * overloaded to record whether it was a read or a write.
@@ -333,17 +332,6 @@ static inline void reset_page(struct mm_page *page)
        page->biotail = &page->bio;
 }
 
-static void mm_unplug_device(struct request_queue *q)
-{
-       struct cardinfo *card = q->queuedata;
-       unsigned long flags;
-
-       spin_lock_irqsave(&card->lock, flags);
-       if (blk_remove_plug(q))
-               activate(card);
-       spin_unlock_irqrestore(&card->lock, flags);
-}
-
 /*
  * If there is room on Ready page, take
  * one bh off list and add it.
@@ -535,7 +523,6 @@ static int mm_make_request(struct request_queue *q, struct bio *bio)
        *card->biotail = bio;
        bio->bi_next = NULL;
        card->biotail = &bio->bi_next;
-       blk_plug_device(q);
        spin_unlock_irq(&card->lock);
 
        return 0;
@@ -907,7 +894,6 @@ static int __devinit mm_pci_probe(struct pci_dev *dev,
        blk_queue_make_request(card->queue, mm_make_request);
        card->queue->queue_lock = &card->lock;
        card->queue->queuedata = card;
-       card->queue->unplug_fn = mm_unplug_device;
 
        tasklet_init(&card->tasklet, process_page, (unsigned long)card);
 
index e88a2cf177110f37fdd6bfc6d1801184ec2b94f4..6f218e014e9940a4021533159dd72e9b64fac5f5 100644 (file)
@@ -233,8 +233,7 @@ int ide_queue_sense_rq(ide_drive_t *drive, void *special)
 
        drive->hwif->rq = NULL;
 
-       elv_add_request(drive->queue, &drive->sense_rq,
-                       ELEVATOR_INSERT_FRONT, 0);
+       elv_add_request(drive->queue, &drive->sense_rq, ELEVATOR_INSERT_FRONT);
        return 0;
 }
 EXPORT_SYMBOL_GPL(ide_queue_sense_rq);
index 999dac054bccc708dee92756b2ec4eb361e590bd..f4077840d3abdbc027585617b8379304865dae81 100644 (file)
@@ -549,8 +549,6 @@ plug_device_2:
 
        if (rq)
                blk_requeue_request(q, rq);
-       if (!elv_queue_empty(q))
-               blk_plug_device(q);
 }
 
 void ide_requeue_and_plug(ide_drive_t *drive, struct request *rq)
@@ -562,8 +560,6 @@ void ide_requeue_and_plug(ide_drive_t *drive, struct request *rq)
 
        if (rq)
                blk_requeue_request(q, rq);
-       if (!elv_queue_empty(q))
-               blk_plug_device(q);
 
        spin_unlock_irqrestore(q->queue_lock, flags);
 }
index 88a380c5a4708bd9f396a52da5dd47128137f74f..6ab9ab2a5081eba538b43abf738f549562cb166b 100644 (file)
@@ -52,7 +52,7 @@ static void issue_park_cmd(ide_drive_t *drive, unsigned long timeout)
        rq->cmd[0] = REQ_UNPARK_HEADS;
        rq->cmd_len = 1;
        rq->cmd_type = REQ_TYPE_SPECIAL;
-       elv_add_request(q, rq, ELEVATOR_INSERT_FRONT, 1);
+       elv_add_request(q, rq, ELEVATOR_INSERT_FRONT);
 
 out:
        return;
index 9a35320fb59f774b0e840d934bd2a16422e99f22..54bfc274b39abe9fab10f0f1501e1d346b43138f 100644 (file)
@@ -1339,8 +1339,7 @@ int bitmap_startwrite(struct bitmap *bitmap, sector_t offset, unsigned long sect
                        prepare_to_wait(&bitmap->overflow_wait, &__wait,
                                        TASK_UNINTERRUPTIBLE);
                        spin_unlock_irq(&bitmap->lock);
-                       md_unplug(bitmap->mddev);
-                       schedule();
+                       io_schedule();
                        finish_wait(&bitmap->overflow_wait, &__wait);
                        continue;
                }
index 4e054bd91664a8047968d51ec313958013b4a8af..2c62c1169f78afa27109532d1970ec201d082cae 100644 (file)
@@ -991,11 +991,6 @@ static void clone_init(struct dm_crypt_io *io, struct bio *clone)
        clone->bi_destructor = dm_crypt_bio_destructor;
 }
 
-static void kcryptd_unplug(struct crypt_config *cc)
-{
-       blk_unplug(bdev_get_queue(cc->dev->bdev));
-}
-
 static int kcryptd_io_read(struct dm_crypt_io *io, gfp_t gfp)
 {
        struct crypt_config *cc = io->target->private;
@@ -1008,10 +1003,8 @@ static int kcryptd_io_read(struct dm_crypt_io *io, gfp_t gfp)
         * one in order to decrypt the whole bio data *afterwards*.
         */
        clone = bio_alloc_bioset(gfp, bio_segments(base_bio), cc->bs);
-       if (!clone) {
-               kcryptd_unplug(cc);
+       if (!clone)
                return 1;
-       }
 
        crypt_inc_pending(io);
 
index 924f5f0084c27191604907eaa9983a2bf687b6c1..400cf35094a4ef752a7fafbb9d9a9305818997af 100644 (file)
@@ -37,13 +37,6 @@ struct dm_kcopyd_client {
        unsigned int nr_pages;
        unsigned int nr_free_pages;
 
-       /*
-        * Block devices to unplug.
-        * Non-NULL pointer means that a block device has some pending requests
-        * and needs to be unplugged.
-        */
-       struct block_device *unplug[2];
-
        struct dm_io_client *io_client;
 
        wait_queue_head_t destroyq;
@@ -315,31 +308,6 @@ static int run_complete_job(struct kcopyd_job *job)
        return 0;
 }
 
-/*
- * Unplug the block device at the specified index.
- */
-static void unplug(struct dm_kcopyd_client *kc, int rw)
-{
-       if (kc->unplug[rw] != NULL) {
-               blk_unplug(bdev_get_queue(kc->unplug[rw]));
-               kc->unplug[rw] = NULL;
-       }
-}
-
-/*
- * Prepare block device unplug. If there's another device
- * to be unplugged at the same array index, we unplug that
- * device first.
- */
-static void prepare_unplug(struct dm_kcopyd_client *kc, int rw,
-                          struct block_device *bdev)
-{
-       if (likely(kc->unplug[rw] == bdev))
-               return;
-       unplug(kc, rw);
-       kc->unplug[rw] = bdev;
-}
-
 static void complete_io(unsigned long error, void *context)
 {
        struct kcopyd_job *job = (struct kcopyd_job *) context;
@@ -386,15 +354,12 @@ static int run_io_job(struct kcopyd_job *job)
                .client = job->kc->io_client,
        };
 
-       if (job->rw == READ) {
+       if (job->rw == READ)
                r = dm_io(&io_req, 1, &job->source, NULL);
-               prepare_unplug(job->kc, READ, job->source.bdev);
-       } else {
+       else {
                if (job->num_dests > 1)
                        io_req.bi_rw |= REQ_UNPLUG;
                r = dm_io(&io_req, job->num_dests, job->dests, NULL);
-               if (!(io_req.bi_rw & REQ_UNPLUG))
-                       prepare_unplug(job->kc, WRITE, job->dests[0].bdev);
        }
 
        return r;
@@ -466,6 +431,7 @@ static void do_work(struct work_struct *work)
 {
        struct dm_kcopyd_client *kc = container_of(work,
                                        struct dm_kcopyd_client, kcopyd_work);
+       struct blk_plug plug;
 
        /*
         * The order that these are called is *very* important.
@@ -473,18 +439,12 @@ static void do_work(struct work_struct *work)
         * Pages jobs when successful will jump onto the io jobs
         * list.  io jobs call wake when they complete and it all
         * starts again.
-        *
-        * Note that io_jobs add block devices to the unplug array,
-        * this array is cleared with "unplug" calls. It is thus
-        * forbidden to run complete_jobs after io_jobs and before
-        * unplug because the block device could be destroyed in
-        * job completion callback.
         */
+       blk_start_plug(&plug);
        process_jobs(&kc->complete_jobs, kc, run_complete_job);
        process_jobs(&kc->pages_jobs, kc, run_pages_job);
        process_jobs(&kc->io_jobs, kc, run_io_job);
-       unplug(kc, READ);
-       unplug(kc, WRITE);
+       blk_finish_plug(&plug);
 }
 
 /*
@@ -665,8 +625,6 @@ int dm_kcopyd_client_create(unsigned int nr_pages,
        INIT_LIST_HEAD(&kc->io_jobs);
        INIT_LIST_HEAD(&kc->pages_jobs);
 
-       memset(kc->unplug, 0, sizeof(kc->unplug));
-
        kc->job_pool = mempool_create_slab_pool(MIN_JOBS, _job_cache);
        if (!kc->job_pool)
                goto bad_slab;
index b9e1e15ef11cb3d45f53b8c06a0dd6b709fa1164..5ef136cdba91dd4a891f00920029962a54185d63 100644 (file)
@@ -394,7 +394,7 @@ static void raid_unplug(struct dm_target_callbacks *cb)
 {
        struct raid_set *rs = container_of(cb, struct raid_set, callbacks);
 
-       md_raid5_unplug_device(rs->md.private);
+       md_raid5_kick_device(rs->md.private);
 }
 
 /*
index dee326775c6064b045c8cf523a25a528d9882caf..976ad4688afc2ee03189e208663429a74f759109 100644 (file)
@@ -842,8 +842,6 @@ static void do_mirror(struct work_struct *work)
        do_reads(ms, &reads);
        do_writes(ms, &writes);
        do_failures(ms, &failures);
-
-       dm_table_unplug_all(ms->ti->table);
 }
 
 /*-----------------------------------------------------------------
index 38e4eb1bb9656ba565150a48d68594836f51c4f9..f50a7b95225794272c81abdf2f66a396a7a69159 100644 (file)
@@ -1275,29 +1275,6 @@ int dm_table_any_busy_target(struct dm_table *t)
        return 0;
 }
 
-void dm_table_unplug_all(struct dm_table *t)
-{
-       struct dm_dev_internal *dd;
-       struct list_head *devices = dm_table_get_devices(t);
-       struct dm_target_callbacks *cb;
-
-       list_for_each_entry(dd, devices, list) {
-               struct request_queue *q = bdev_get_queue(dd->dm_dev.bdev);
-               char b[BDEVNAME_SIZE];
-
-               if (likely(q))
-                       blk_unplug(q);
-               else
-                       DMWARN_LIMIT("%s: Cannot unplug nonexistent device %s",
-                                    dm_device_name(t->md),
-                                    bdevname(dd->dm_dev.bdev, b));
-       }
-
-       list_for_each_entry(cb, &t->target_callbacks, list)
-               if (cb->unplug_fn)
-                       cb->unplug_fn(cb);
-}
-
 struct mapped_device *dm_table_get_md(struct dm_table *t)
 {
        return t->md;
@@ -1345,4 +1322,3 @@ EXPORT_SYMBOL(dm_table_get_mode);
 EXPORT_SYMBOL(dm_table_get_md);
 EXPORT_SYMBOL(dm_table_put);
 EXPORT_SYMBOL(dm_table_get);
-EXPORT_SYMBOL(dm_table_unplug_all);
index eaa3af0e0632af0c96c5c17566024265578e2303..d22b9905c1682e79e6f998dc6e638f9915d57355 100644 (file)
@@ -807,8 +807,6 @@ void dm_requeue_unmapped_request(struct request *clone)
        dm_unprep_request(rq);
 
        spin_lock_irqsave(q->queue_lock, flags);
-       if (elv_queue_empty(q))
-               blk_plug_device(q);
        blk_requeue_request(q, rq);
        spin_unlock_irqrestore(q->queue_lock, flags);
 
@@ -1613,10 +1611,10 @@ static void dm_request_fn(struct request_queue *q)
         * number of in-flight I/Os after the queue is stopped in
         * dm_suspend().
         */
-       while (!blk_queue_plugged(q) && !blk_queue_stopped(q)) {
+       while (!blk_queue_stopped(q)) {
                rq = blk_peek_request(q);
                if (!rq)
-                       goto plug_and_out;
+                       goto delay_and_out;
 
                /* always use block 0 to find the target for flushes for now */
                pos = 0;
@@ -1627,7 +1625,7 @@ static void dm_request_fn(struct request_queue *q)
                BUG_ON(!dm_target_is_valid(ti));
 
                if (ti->type->busy && ti->type->busy(ti))
-                       goto plug_and_out;
+                       goto delay_and_out;
 
                blk_start_request(rq);
                clone = rq->special;
@@ -1647,11 +1645,8 @@ requeued:
        BUG_ON(!irqs_disabled());
        spin_lock(q->queue_lock);
 
-plug_and_out:
-       if (!elv_queue_empty(q))
-               /* Some requests still remain, retry later */
-               blk_plug_device(q);
-
+delay_and_out:
+       blk_delay_queue(q, HZ / 10);
 out:
        dm_table_put(map);
 
@@ -1680,20 +1675,6 @@ static int dm_lld_busy(struct request_queue *q)
        return r;
 }
 
-static void dm_unplug_all(struct request_queue *q)
-{
-       struct mapped_device *md = q->queuedata;
-       struct dm_table *map = dm_get_live_table(md);
-
-       if (map) {
-               if (dm_request_based(md))
-                       generic_unplug_device(q);
-
-               dm_table_unplug_all(map);
-               dm_table_put(map);
-       }
-}
-
 static int dm_any_congested(void *congested_data, int bdi_bits)
 {
        int r = bdi_bits;
@@ -1817,7 +1798,6 @@ static void dm_init_md_queue(struct mapped_device *md)
        md->queue->backing_dev_info.congested_data = md;
        blk_queue_make_request(md->queue, dm_request);
        blk_queue_bounce_limit(md->queue, BLK_BOUNCE_ANY);
-       md->queue->unplug_fn = dm_unplug_all;
        blk_queue_merge_bvec(md->queue, dm_merge_bvec);
        blk_queue_flush(md->queue, REQ_FLUSH | REQ_FUA);
 }
@@ -2263,8 +2243,6 @@ static int dm_wait_for_completion(struct mapped_device *md, int interruptible)
        int r = 0;
        DECLARE_WAITQUEUE(wait, current);
 
-       dm_unplug_all(md->queue);
-
        add_wait_queue(&md->wait, &wait);
 
        while (1) {
@@ -2539,7 +2517,6 @@ int dm_resume(struct mapped_device *md)
 
        clear_bit(DMF_SUSPENDED, &md->flags);
 
-       dm_table_unplug_all(map);
        r = 0;
 out:
        dm_table_put(map);
index 8a2f767f26d80c2dc3c949914049cf14ef591ff0..38861b5b9d90eab3a3bbe3e916ce33b5e43328f7 100644 (file)
@@ -87,22 +87,6 @@ static int linear_mergeable_bvec(struct request_queue *q,
        return maxsectors << 9;
 }
 
-static void linear_unplug(struct request_queue *q)
-{
-       mddev_t *mddev = q->queuedata;
-       linear_conf_t *conf;
-       int i;
-
-       rcu_read_lock();
-       conf = rcu_dereference(mddev->private);
-
-       for (i=0; i < mddev->raid_disks; i++) {
-               struct request_queue *r_queue = bdev_get_queue(conf->disks[i].rdev->bdev);
-               blk_unplug(r_queue);
-       }
-       rcu_read_unlock();
-}
-
 static int linear_congested(void *data, int bits)
 {
        mddev_t *mddev = data;
@@ -225,7 +209,6 @@ static int linear_run (mddev_t *mddev)
        md_set_array_sectors(mddev, linear_size(mddev, 0, 0));
 
        blk_queue_merge_bvec(mddev->queue, linear_mergeable_bvec);
-       mddev->queue->unplug_fn = linear_unplug;
        mddev->queue->backing_dev_info.congested_fn = linear_congested;
        mddev->queue->backing_dev_info.congested_data = mddev;
        md_integrity_register(mddev);
index 0cc30ecda4c128196cd147fc06f8cf9ddbb1085b..ca0d79c264b98ad66c32231e819ff1e0ecfab060 100644 (file)
@@ -4812,7 +4812,6 @@ static int do_md_stop(mddev_t * mddev, int mode, int is_open)
                __md_stop_writes(mddev);
                md_stop(mddev);
                mddev->queue->merge_bvec_fn = NULL;
-               mddev->queue->unplug_fn = NULL;
                mddev->queue->backing_dev_info.congested_fn = NULL;
 
                /* tell userspace to handle 'inactive' */
@@ -6669,8 +6668,6 @@ EXPORT_SYMBOL_GPL(md_allow_write);
 
 void md_unplug(mddev_t *mddev)
 {
-       if (mddev->queue)
-               blk_unplug(mddev->queue);
        if (mddev->plug)
                mddev->plug->unplug_fn(mddev->plug);
 }
@@ -6853,7 +6850,6 @@ void md_do_sync(mddev_t *mddev)
                     >= mddev->resync_max - mddev->curr_resync_completed
                            )) {
                        /* time to update curr_resync_completed */
-                       md_unplug(mddev);
                        wait_event(mddev->recovery_wait,
                                   atomic_read(&mddev->recovery_active) == 0);
                        mddev->curr_resync_completed = j;
@@ -6929,7 +6925,6 @@ void md_do_sync(mddev_t *mddev)
                 * about not overloading the IO subsystem. (things like an
                 * e2fsck being done on the RAID array should execute fast)
                 */
-               md_unplug(mddev);
                cond_resched();
 
                currspeed = ((unsigned long)(io_sectors-mddev->resync_mark_cnt))/2
@@ -6948,8 +6943,6 @@ void md_do_sync(mddev_t *mddev)
         * this also signals 'finished resyncing' to md_stop
         */
  out:
-       md_unplug(mddev);
-
        wait_event(mddev->recovery_wait, !atomic_read(&mddev->recovery_active));
 
        /* tell personality that we are finished */
index 6d7ddf32ef2ec932040d0611cbf109b82a7a656c..1cc8ed44e4ad053fb400ba64a4d40c9c1c172be9 100644 (file)
@@ -106,36 +106,6 @@ static void multipath_end_request(struct bio *bio, int error)
        rdev_dec_pending(rdev, conf->mddev);
 }
 
-static void unplug_slaves(mddev_t *mddev)
-{
-       multipath_conf_t *conf = mddev->private;
-       int i;
-
-       rcu_read_lock();
-       for (i=0; i<mddev->raid_disks; i++) {
-               mdk_rdev_t *rdev = rcu_dereference(conf->multipaths[i].rdev);
-               if (rdev && !test_bit(Faulty, &rdev->flags)
-                   && atomic_read(&rdev->nr_pending)) {
-                       struct request_queue *r_queue = bdev_get_queue(rdev->bdev);
-
-                       atomic_inc(&rdev->nr_pending);
-                       rcu_read_unlock();
-
-                       blk_unplug(r_queue);
-
-                       rdev_dec_pending(rdev, mddev);
-                       rcu_read_lock();
-               }
-       }
-       rcu_read_unlock();
-}
-
-static void multipath_unplug(struct request_queue *q)
-{
-       unplug_slaves(q->queuedata);
-}
-
-
 static int multipath_make_request(mddev_t *mddev, struct bio * bio)
 {
        multipath_conf_t *conf = mddev->private;
@@ -518,7 +488,6 @@ static int multipath_run (mddev_t *mddev)
         */
        md_set_array_sectors(mddev, multipath_size(mddev, 0, 0));
 
-       mddev->queue->unplug_fn = multipath_unplug;
        mddev->queue->backing_dev_info.congested_fn = multipath_congested;
        mddev->queue->backing_dev_info.congested_data = mddev;
        md_integrity_register(mddev);
index 637a96855edb2b6c9c99e988fcdb78b696e703b2..6338c0fe6208f96981f13554784b9c8e9f0cd2fd 100644 (file)
 #include "raid0.h"
 #include "raid5.h"
 
-static void raid0_unplug(struct request_queue *q)
-{
-       mddev_t *mddev = q->queuedata;
-       raid0_conf_t *conf = mddev->private;
-       mdk_rdev_t **devlist = conf->devlist;
-       int raid_disks = conf->strip_zone[0].nb_dev;
-       int i;
-
-       for (i=0; i < raid_disks; i++) {
-               struct request_queue *r_queue = bdev_get_queue(devlist[i]->bdev);
-
-               blk_unplug(r_queue);
-       }
-}
-
 static int raid0_congested(void *data, int bits)
 {
        mddev_t *mddev = data;
@@ -272,7 +257,6 @@ static int create_strip_zones(mddev_t *mddev, raid0_conf_t **private_conf)
                       mdname(mddev),
                       (unsigned long long)smallest->sectors);
        }
-       mddev->queue->unplug_fn = raid0_unplug;
        mddev->queue->backing_dev_info.congested_fn = raid0_congested;
        mddev->queue->backing_dev_info.congested_data = mddev;
 
index a23ffa397ba91b0cd4dc2bb148f50122d72d6beb..b67d822d57ae20ef384a68f046cc74cd50278209 100644 (file)
 #define        NR_RAID1_BIOS 256
 
 
-static void unplug_slaves(mddev_t *mddev);
-
 static void allow_barrier(conf_t *conf);
 static void lower_barrier(conf_t *conf);
 
 static void * r1bio_pool_alloc(gfp_t gfp_flags, void *data)
 {
        struct pool_info *pi = data;
-       r1bio_t *r1_bio;
        int size = offsetof(r1bio_t, bios[pi->raid_disks]);
 
        /* allocate a r1bio with room for raid_disks entries in the bios array */
-       r1_bio = kzalloc(size, gfp_flags);
-       if (!r1_bio && pi->mddev)
-               unplug_slaves(pi->mddev);
-
-       return r1_bio;
+       return kzalloc(size, gfp_flags);
 }
 
 static void r1bio_pool_free(void *r1_bio, void *data)
@@ -91,10 +84,8 @@ static void * r1buf_pool_alloc(gfp_t gfp_flags, void *data)
        int i, j;
 
        r1_bio = r1bio_pool_alloc(gfp_flags, pi);
-       if (!r1_bio) {
-               unplug_slaves(pi->mddev);
+       if (!r1_bio)
                return NULL;
-       }
 
        /*
         * Allocate bios : 1 for reading, n-1 for writing
@@ -520,37 +511,6 @@ static int read_balance(conf_t *conf, r1bio_t *r1_bio)
        return new_disk;
 }
 
-static void unplug_slaves(mddev_t *mddev)
-{
-       conf_t *conf = mddev->private;
-       int i;
-
-       rcu_read_lock();
-       for (i=0; i<mddev->raid_disks; i++) {
-               mdk_rdev_t *rdev = rcu_dereference(conf->mirrors[i].rdev);
-               if (rdev && !test_bit(Faulty, &rdev->flags) && atomic_read(&rdev->nr_pending)) {
-                       struct request_queue *r_queue = bdev_get_queue(rdev->bdev);
-
-                       atomic_inc(&rdev->nr_pending);
-                       rcu_read_unlock();
-
-                       blk_unplug(r_queue);
-
-                       rdev_dec_pending(rdev, mddev);
-                       rcu_read_lock();
-               }
-       }
-       rcu_read_unlock();
-}
-
-static void raid1_unplug(struct request_queue *q)
-{
-       mddev_t *mddev = q->queuedata;
-
-       unplug_slaves(mddev);
-       md_wakeup_thread(mddev->thread);
-}
-
 static int raid1_congested(void *data, int bits)
 {
        mddev_t *mddev = data;
@@ -580,20 +540,16 @@ static int raid1_congested(void *data, int bits)
 }
 
 
-static int flush_pending_writes(conf_t *conf)
+static void flush_pending_writes(conf_t *conf)
 {
        /* Any writes that have been queued but are awaiting
         * bitmap updates get flushed here.
-        * We return 1 if any requests were actually submitted.
         */
-       int rv = 0;
-
        spin_lock_irq(&conf->device_lock);
 
        if (conf->pending_bio_list.head) {
                struct bio *bio;
                bio = bio_list_get(&conf->pending_bio_list);
-               blk_remove_plug(conf->mddev->queue);
                spin_unlock_irq(&conf->device_lock);
                /* flush any pending bitmap writes to
                 * disk before proceeding w/ I/O */
@@ -605,10 +561,14 @@ static int flush_pending_writes(conf_t *conf)
                        generic_make_request(bio);
                        bio = next;
                }
-               rv = 1;
        } else
                spin_unlock_irq(&conf->device_lock);
-       return rv;
+}
+
+static void md_kick_device(mddev_t *mddev)
+{
+       blk_flush_plug(current);
+       md_wakeup_thread(mddev->thread);
 }
 
 /* Barriers....
@@ -640,8 +600,7 @@ static void raise_barrier(conf_t *conf)
 
        /* Wait until no block IO is waiting */
        wait_event_lock_irq(conf->wait_barrier, !conf->nr_waiting,
-                           conf->resync_lock,
-                           raid1_unplug(conf->mddev->queue));
+                           conf->resync_lock, md_kick_device(conf->mddev));
 
        /* block any new IO from starting */
        conf->barrier++;
@@ -649,8 +608,7 @@ static void raise_barrier(conf_t *conf)
        /* Now wait for all pending IO to complete */
        wait_event_lock_irq(conf->wait_barrier,
                            !conf->nr_pending && conf->barrier < RESYNC_DEPTH,
-                           conf->resync_lock,
-                           raid1_unplug(conf->mddev->queue));
+                           conf->resync_lock, md_kick_device(conf->mddev));
 
        spin_unlock_irq(&conf->resync_lock);
 }
@@ -672,7 +630,7 @@ static void wait_barrier(conf_t *conf)
                conf->nr_waiting++;
                wait_event_lock_irq(conf->wait_barrier, !conf->barrier,
                                    conf->resync_lock,
-                                   raid1_unplug(conf->mddev->queue));
+                                   md_kick_device(conf->mddev));
                conf->nr_waiting--;
        }
        conf->nr_pending++;
@@ -709,7 +667,7 @@ static void freeze_array(conf_t *conf)
                            conf->nr_pending == conf->nr_queued+1,
                            conf->resync_lock,
                            ({ flush_pending_writes(conf);
-                              raid1_unplug(conf->mddev->queue); }));
+                              md_kick_device(conf->mddev); }));
        spin_unlock_irq(&conf->resync_lock);
 }
 static void unfreeze_array(conf_t *conf)
@@ -959,7 +917,6 @@ static int make_request(mddev_t *mddev, struct bio * bio)
                atomic_inc(&r1_bio->remaining);
                spin_lock_irqsave(&conf->device_lock, flags);
                bio_list_add(&conf->pending_bio_list, mbio);
-               blk_plug_device(mddev->queue);
                spin_unlock_irqrestore(&conf->device_lock, flags);
        }
        r1_bio_write_done(r1_bio, bio->bi_vcnt, behind_pages, behind_pages != NULL);
@@ -968,7 +925,7 @@ static int make_request(mddev_t *mddev, struct bio * bio)
        /* In case raid1d snuck in to freeze_array */
        wake_up(&conf->wait_barrier);
 
-       if (do_sync)
+       if (do_sync || !bitmap)
                md_wakeup_thread(mddev->thread);
 
        return 0;
@@ -1558,7 +1515,6 @@ static void raid1d(mddev_t *mddev)
        unsigned long flags;
        conf_t *conf = mddev->private;
        struct list_head *head = &conf->retry_list;
-       int unplug=0;
        mdk_rdev_t *rdev;
 
        md_check_recovery(mddev);
@@ -1566,7 +1522,7 @@ static void raid1d(mddev_t *mddev)
        for (;;) {
                char b[BDEVNAME_SIZE];
 
-               unplug += flush_pending_writes(conf);
+               flush_pending_writes(conf);
 
                spin_lock_irqsave(&conf->device_lock, flags);
                if (list_empty(head)) {
@@ -1580,10 +1536,9 @@ static void raid1d(mddev_t *mddev)
 
                mddev = r1_bio->mddev;
                conf = mddev->private;
-               if (test_bit(R1BIO_IsSync, &r1_bio->state)) {
+               if (test_bit(R1BIO_IsSync, &r1_bio->state))
                        sync_request_write(mddev, r1_bio);
-                       unplug = 1;
-               } else {
+               else {
                        int disk;
 
                        /* we got a read error. Maybe the drive is bad.  Maybe just
@@ -1633,14 +1588,11 @@ static void raid1d(mddev_t *mddev)
                                bio->bi_end_io = raid1_end_read_request;
                                bio->bi_rw = READ | do_sync;
                                bio->bi_private = r1_bio;
-                               unplug = 1;
                                generic_make_request(bio);
                        }
                }
                cond_resched();
        }
-       if (unplug)
-               unplug_slaves(mddev);
 }
 
 
@@ -2064,7 +2016,6 @@ static int run(mddev_t *mddev)
 
        md_set_array_sectors(mddev, raid1_size(mddev, 0, 0));
 
-       mddev->queue->unplug_fn = raid1_unplug;
        mddev->queue->backing_dev_info.congested_fn = raid1_congested;
        mddev->queue->backing_dev_info.congested_data = mddev;
        md_integrity_register(mddev);
index 3b607b28741b8e666c0a19d43477203c475e7e42..e79f1c5bf71b52be6705d4acc144b3a2028a6b8a 100644 (file)
  */
 #define        NR_RAID10_BIOS 256
 
-static void unplug_slaves(mddev_t *mddev);
-
 static void allow_barrier(conf_t *conf);
 static void lower_barrier(conf_t *conf);
 
 static void * r10bio_pool_alloc(gfp_t gfp_flags, void *data)
 {
        conf_t *conf = data;
-       r10bio_t *r10_bio;
        int size = offsetof(struct r10bio_s, devs[conf->copies]);
 
        /* allocate a r10bio with room for raid_disks entries in the bios array */
-       r10_bio = kzalloc(size, gfp_flags);
-       if (!r10_bio && conf->mddev)
-               unplug_slaves(conf->mddev);
-
-       return r10_bio;
+       return kzalloc(size, gfp_flags);
 }
 
 static void r10bio_pool_free(void *r10_bio, void *data)
@@ -106,10 +99,8 @@ static void * r10buf_pool_alloc(gfp_t gfp_flags, void *data)
        int nalloc;
 
        r10_bio = r10bio_pool_alloc(gfp_flags, conf);
-       if (!r10_bio) {
-               unplug_slaves(conf->mddev);
+       if (!r10_bio)
                return NULL;
-       }
 
        if (test_bit(MD_RECOVERY_SYNC, &conf->mddev->recovery))
                nalloc = conf->copies; /* resync */
@@ -597,37 +588,6 @@ rb_out:
        return disk;
 }
 
-static void unplug_slaves(mddev_t *mddev)
-{
-       conf_t *conf = mddev->private;
-       int i;
-
-       rcu_read_lock();
-       for (i=0; i < conf->raid_disks; i++) {
-               mdk_rdev_t *rdev = rcu_dereference(conf->mirrors[i].rdev);
-               if (rdev && !test_bit(Faulty, &rdev->flags) && atomic_read(&rdev->nr_pending)) {
-                       struct request_queue *r_queue = bdev_get_queue(rdev->bdev);
-
-                       atomic_inc(&rdev->nr_pending);
-                       rcu_read_unlock();
-
-                       blk_unplug(r_queue);
-
-                       rdev_dec_pending(rdev, mddev);
-                       rcu_read_lock();
-               }
-       }
-       rcu_read_unlock();
-}
-
-static void raid10_unplug(struct request_queue *q)
-{
-       mddev_t *mddev = q->queuedata;
-
-       unplug_slaves(q->queuedata);
-       md_wakeup_thread(mddev->thread);
-}
-
 static int raid10_congested(void *data, int bits)
 {
        mddev_t *mddev = data;
@@ -649,20 +609,16 @@ static int raid10_congested(void *data, int bits)
        return ret;
 }
 
-static int flush_pending_writes(conf_t *conf)
+static void flush_pending_writes(conf_t *conf)
 {
        /* Any writes that have been queued but are awaiting
         * bitmap updates get flushed here.
-        * We return 1 if any requests were actually submitted.
         */
-       int rv = 0;
-
        spin_lock_irq(&conf->device_lock);
 
        if (conf->pending_bio_list.head) {
                struct bio *bio;
                bio = bio_list_get(&conf->pending_bio_list);
-               blk_remove_plug(conf->mddev->queue);
                spin_unlock_irq(&conf->device_lock);
                /* flush any pending bitmap writes to disk
                 * before proceeding w/ I/O */
@@ -674,11 +630,16 @@ static int flush_pending_writes(conf_t *conf)
                        generic_make_request(bio);
                        bio = next;
                }
-               rv = 1;
        } else
                spin_unlock_irq(&conf->device_lock);
-       return rv;
 }
+
+static void md_kick_device(mddev_t *mddev)
+{
+       blk_flush_plug(current);
+       md_wakeup_thread(mddev->thread);
+}
+
 /* Barriers....
  * Sometimes we need to suspend IO while we do something else,
  * either some resync/recovery, or reconfigure the array.
@@ -708,8 +669,7 @@ static void raise_barrier(conf_t *conf, int force)
 
        /* Wait until no block IO is waiting (unless 'force') */
        wait_event_lock_irq(conf->wait_barrier, force || !conf->nr_waiting,
-                           conf->resync_lock,
-                           raid10_unplug(conf->mddev->queue));
+                           conf->resync_lock, md_kick_device(conf->mddev));
 
        /* block any new IO from starting */
        conf->barrier++;
@@ -717,8 +677,7 @@ static void raise_barrier(conf_t *conf, int force)
        /* No wait for all pending IO to complete */
        wait_event_lock_irq(conf->wait_barrier,
                            !conf->nr_pending && conf->barrier < RESYNC_DEPTH,
-                           conf->resync_lock,
-                           raid10_unplug(conf->mddev->queue));
+                           conf->resync_lock, md_kick_device(conf->mddev));
 
        spin_unlock_irq(&conf->resync_lock);
 }
@@ -739,7 +698,7 @@ static void wait_barrier(conf_t *conf)
                conf->nr_waiting++;
                wait_event_lock_irq(conf->wait_barrier, !conf->barrier,
                                    conf->resync_lock,
-                                   raid10_unplug(conf->mddev->queue));
+                                   md_kick_device(conf->mddev));
                conf->nr_waiting--;
        }
        conf->nr_pending++;
@@ -776,7 +735,7 @@ static void freeze_array(conf_t *conf)
                            conf->nr_pending == conf->nr_queued+1,
                            conf->resync_lock,
                            ({ flush_pending_writes(conf);
-                              raid10_unplug(conf->mddev->queue); }));
+                              md_kick_device(conf->mddev); }));
        spin_unlock_irq(&conf->resync_lock);
 }
 
@@ -971,7 +930,6 @@ static int make_request(mddev_t *mddev, struct bio * bio)
                atomic_inc(&r10_bio->remaining);
                spin_lock_irqsave(&conf->device_lock, flags);
                bio_list_add(&conf->pending_bio_list, mbio);
-               blk_plug_device(mddev->queue);
                spin_unlock_irqrestore(&conf->device_lock, flags);
        }
 
@@ -988,7 +946,7 @@ static int make_request(mddev_t *mddev, struct bio * bio)
        /* In case raid10d snuck in to freeze_array */
        wake_up(&conf->wait_barrier);
 
-       if (do_sync)
+       if (do_sync || !mddev->bitmap)
                md_wakeup_thread(mddev->thread);
 
        return 0;
@@ -1681,7 +1639,6 @@ static void raid10d(mddev_t *mddev)
        unsigned long flags;
        conf_t *conf = mddev->private;
        struct list_head *head = &conf->retry_list;
-       int unplug=0;
        mdk_rdev_t *rdev;
 
        md_check_recovery(mddev);
@@ -1689,7 +1646,7 @@ static void raid10d(mddev_t *mddev)
        for (;;) {
                char b[BDEVNAME_SIZE];
 
-               unplug += flush_pending_writes(conf);
+               flush_pending_writes(conf);
 
                spin_lock_irqsave(&conf->device_lock, flags);
                if (list_empty(head)) {
@@ -1703,13 +1660,11 @@ static void raid10d(mddev_t *mddev)
 
                mddev = r10_bio->mddev;
                conf = mddev->private;
-               if (test_bit(R10BIO_IsSync, &r10_bio->state)) {
+               if (test_bit(R10BIO_IsSync, &r10_bio->state))
                        sync_request_write(mddev, r10_bio);
-                       unplug = 1;
-               } else  if (test_bit(R10BIO_IsRecover, &r10_bio->state)) {
+               else if (test_bit(R10BIO_IsRecover, &r10_bio->state))
                        recovery_request_write(mddev, r10_bio);
-                       unplug = 1;
-               } else {
+               else {
                        int mirror;
                        /* we got a read error. Maybe the drive is bad.  Maybe just
                         * the block and we can fix it.
@@ -1756,14 +1711,11 @@ static void raid10d(mddev_t *mddev)
                                bio->bi_rw = READ | do_sync;
                                bio->bi_private = r10_bio;
                                bio->bi_end_io = raid10_end_read_request;
-                               unplug = 1;
                                generic_make_request(bio);
                        }
                }
                cond_resched();
        }
-       if (unplug)
-               unplug_slaves(mddev);
 }
 
 
@@ -2376,7 +2328,6 @@ static int run(mddev_t *mddev)
        md_set_array_sectors(mddev, size);
        mddev->resync_max_sectors = size;
 
-       mddev->queue->unplug_fn = raid10_unplug;
        mddev->queue->backing_dev_info.congested_fn = raid10_congested;
        mddev->queue->backing_dev_info.congested_data = mddev;
 
index 702812824195ae7c0a6333381659575f552a8f62..e867ee42b15239707c0dfede4be71d2bc9a72e20 100644 (file)
@@ -433,8 +433,6 @@ static int has_failed(raid5_conf_t *conf)
        return 0;
 }
 
-static void unplug_slaves(mddev_t *mddev);
-
 static struct stripe_head *
 get_active_stripe(raid5_conf_t *conf, sector_t sector,
                  int previous, int noblock, int noquiesce)
@@ -463,8 +461,7 @@ get_active_stripe(raid5_conf_t *conf, sector_t sector,
                                                     < (conf->max_nr_stripes *3/4)
                                                     || !conf->inactive_blocked),
                                                    conf->device_lock,
-                                                   md_raid5_unplug_device(conf)
-                                       );
+                                                   md_raid5_kick_device(conf));
                                conf->inactive_blocked = 0;
                        } else
                                init_stripe(sh, sector, previous);
@@ -1473,8 +1470,7 @@ static int resize_stripes(raid5_conf_t *conf, int newsize)
                wait_event_lock_irq(conf->wait_for_stripe,
                                    !list_empty(&conf->inactive_list),
                                    conf->device_lock,
-                                   unplug_slaves(conf->mddev)
-                       );
+                                   blk_flush_plug(current));
                osh = get_free_stripe(conf);
                spin_unlock_irq(&conf->device_lock);
                atomic_set(&nsh->count, 1);
@@ -3645,58 +3641,19 @@ static void activate_bit_delay(raid5_conf_t *conf)
        }
 }
 
-static void unplug_slaves(mddev_t *mddev)
+void md_raid5_kick_device(raid5_conf_t *conf)
 {
-       raid5_conf_t *conf = mddev->private;
-       int i;
-       int devs = max(conf->raid_disks, conf->previous_raid_disks);
-
-       rcu_read_lock();
-       for (i = 0; i < devs; i++) {
-               mdk_rdev_t *rdev = rcu_dereference(conf->disks[i].rdev);
-               if (rdev && !test_bit(Faulty, &rdev->flags) && atomic_read(&rdev->nr_pending)) {
-                       struct request_queue *r_queue = bdev_get_queue(rdev->bdev);
-
-                       atomic_inc(&rdev->nr_pending);
-                       rcu_read_unlock();
-
-                       blk_unplug(r_queue);
-
-                       rdev_dec_pending(rdev, mddev);
-                       rcu_read_lock();
-               }
-       }
-       rcu_read_unlock();
-}
-
-void md_raid5_unplug_device(raid5_conf_t *conf)
-{
-       unsigned long flags;
-
-       spin_lock_irqsave(&conf->device_lock, flags);
-
-       if (plugger_remove_plug(&conf->plug)) {
-               conf->seq_flush++;
-               raid5_activate_delayed(conf);
-       }
+       blk_flush_plug(current);
+       raid5_activate_delayed(conf);
        md_wakeup_thread(conf->mddev->thread);
-
-       spin_unlock_irqrestore(&conf->device_lock, flags);
-
-       unplug_slaves(conf->mddev);
 }
-EXPORT_SYMBOL_GPL(md_raid5_unplug_device);
+EXPORT_SYMBOL_GPL(md_raid5_kick_device);
 
 static void raid5_unplug(struct plug_handle *plug)
 {
        raid5_conf_t *conf = container_of(plug, raid5_conf_t, plug);
-       md_raid5_unplug_device(conf);
-}
 
-static void raid5_unplug_queue(struct request_queue *q)
-{
-       mddev_t *mddev = q->queuedata;
-       md_raid5_unplug_device(mddev->private);
+       md_raid5_kick_device(conf);
 }
 
 int md_raid5_congested(mddev_t *mddev, int bits)
@@ -4100,7 +4057,7 @@ static int make_request(mddev_t *mddev, struct bio * bi)
                                 * add failed due to overlap.  Flush everything
                                 * and wait a while
                                 */
-                               md_raid5_unplug_device(conf);
+                               md_raid5_kick_device(conf);
                                release_stripe(sh);
                                schedule();
                                goto retry;
@@ -4365,7 +4322,6 @@ static inline sector_t sync_request(mddev_t *mddev, sector_t sector_nr, int *ski
 
        if (sector_nr >= max_sector) {
                /* just being told to finish up .. nothing much to do */
-               unplug_slaves(mddev);
 
                if (test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery)) {
                        end_reshape(conf);
@@ -4569,7 +4525,6 @@ static void raid5d(mddev_t *mddev)
        spin_unlock_irq(&conf->device_lock);
 
        async_tx_issue_pending_all();
-       unplug_slaves(mddev);
 
        pr_debug("--- raid5d inactive\n");
 }
@@ -5205,7 +5160,6 @@ static int run(mddev_t *mddev)
                mddev->queue->backing_dev_info.congested_data = mddev;
                mddev->queue->backing_dev_info.congested_fn = raid5_congested;
                mddev->queue->queue_lock = &conf->device_lock;
-               mddev->queue->unplug_fn = raid5_unplug_queue;
 
                chunk_size = mddev->chunk_sectors << 9;
                blk_queue_io_min(mddev->queue, chunk_size);
index 2ace0582b4098f102cb294aac4355c025ad4723e..8d563a4f022a778a6d9354a3c1bf514c6f5e10b6 100644 (file)
@@ -503,6 +503,6 @@ static inline int algorithm_is_DDF(int layout)
 }
 
 extern int md_raid5_congested(mddev_t *mddev, int bits);
-extern void md_raid5_unplug_device(raid5_conf_t *conf);
+extern void md_raid5_kick_device(raid5_conf_t *conf);
 extern int raid5_set_cache_size(mddev_t *mddev, int size);
 #endif
index ae7cad1858987f489b3b0b24a3dcaff64d143dd6..b29eb4eaa86e1342fc3c484c69e627ee66f247db 100644 (file)
@@ -895,11 +895,7 @@ static void i2o_block_request_fn(struct request_queue *q)
 {
        struct request *req;
 
-       while (!blk_queue_plugged(q)) {
-               req = blk_peek_request(q);
-               if (!req)
-                       break;
-
+       while ((req = blk_peek_request(q)) != NULL) {
                if (req->cmd_type == REQ_TYPE_FS) {
                        struct i2o_block_delayed_request *dreq;
                        struct i2o_block_request *ireq = req->special;
index 4e42d030e09724324e9eb6ec475103dff83a86af..2ae727568df92b9edeaf330ea46751d6e0493474 100644 (file)
@@ -55,8 +55,7 @@ static int mmc_queue_thread(void *d)
 
                spin_lock_irq(q->queue_lock);
                set_current_state(TASK_INTERRUPTIBLE);
-               if (!blk_queue_plugged(q))
-                       req = blk_fetch_request(q);
+               req = blk_fetch_request(q);
                mq->req = req;
                spin_unlock_irq(q->queue_lock);
 
index 794bfd962266f3c55c95e24c60c768a9f5b261c0..4d2df2f76ea0daa718215c179e88fe2b45249516 100644 (file)
@@ -1917,7 +1917,7 @@ static void __dasd_process_request_queue(struct dasd_block *block)
                return;
        }
        /* Now we try to fetch requests from the request queue */
-       while (!blk_queue_plugged(queue) && (req = blk_peek_request(queue))) {
+       while ((req = blk_peek_request(queue))) {
                if (basedev->features & DASD_FEATURE_READONLY &&
                    rq_data_dir(req) == WRITE) {
                        DBF_DEV_EVENT(DBF_ERR, basedev,
index 55d2d0f4eabc9cbd67a4a6fe1638305f883beb00..f061b2527b44175320a5bad26af65515d54ea83e 100644 (file)
@@ -161,7 +161,6 @@ tapeblock_requeue(struct work_struct *work) {
 
        spin_lock_irq(&device->blk_data.request_queue_lock);
        while (
-               !blk_queue_plugged(queue) &&
                blk_peek_request(queue) &&
                nr_queued < TAPEBLOCK_MIN_REQUEUE
        ) {
index 998c01be3234f7dfe7ff87ffe3d73bcb4ae63b67..2cefabd5bdb58048db0d61c879573af2b36bf9bd 100644 (file)
@@ -3913,7 +3913,7 @@ fc_bsg_request_handler(struct request_queue *q, struct Scsi_Host *shost,
        if (!get_device(dev))
                return;
 
-       while (!blk_queue_plugged(q)) {
+       while (1) {
                if (rport && (rport->port_state == FC_PORTSTATE_BLOCKED) &&
                    !(rport->flags & FC_RPORT_FAST_FAIL_TIMEDOUT))
                        break;
index 927e99cb72250c61154639ea8ca1eae2dd32b198..c6fcf76cade549f2fb880d976c839b915e0b9dc3 100644 (file)
@@ -173,11 +173,7 @@ static void sas_smp_request(struct request_queue *q, struct Scsi_Host *shost,
        int ret;
        int (*handler)(struct Scsi_Host *, struct sas_rphy *, struct request *);
 
-       while (!blk_queue_plugged(q)) {
-               req = blk_fetch_request(q);
-               if (!req)
-                       break;
-
+       while ((req = blk_fetch_request(q)) != NULL) {
                spin_unlock_irq(q->queue_lock);
 
                handler = to_sas_internal(shost->transportt)->f->smp_handler;
index 67f0c09983c87ef9c367b986527328e0a03e10c6..c1b539d7b0d37ac86a454cc13d4a14e629e20f84 100644 (file)
@@ -392,9 +392,8 @@ static int iblock_do_task(struct se_task *task)
 {
        struct se_device *dev = task->task_se_cmd->se_dev;
        struct iblock_req *req = IBLOCK_REQ(task);
-       struct iblock_dev *ibd = (struct iblock_dev *)req->ib_dev;
-       struct request_queue *q = bdev_get_queue(ibd->ibd_bd);
        struct bio *bio = req->ib_bio, *nbio = NULL;
+       struct blk_plug plug;
        int rw;
 
        if (task->task_data_direction == DMA_TO_DEVICE) {
@@ -412,6 +411,7 @@ static int iblock_do_task(struct se_task *task)
                rw = READ;
        }
 
+       blk_start_plug(&plug);
        while (bio) {
                nbio = bio->bi_next;
                bio->bi_next = NULL;
@@ -421,9 +421,8 @@ static int iblock_do_task(struct se_task *task)
                submit_bio(rw, bio);
                bio = nbio;
        }
+       blk_finish_plug(&plug);
 
-       if (q->unplug_fn)
-               q->unplug_fn(q);
        return PYX_TRANSPORT_SENT_TO_TRANSPORT;
 }
 
index 65794b8fe79ebe81d760bd1da6a4e8b5dd461680..1cc84b2761312ed82206a1a12d8b7d283bfed357 100644 (file)
@@ -73,7 +73,6 @@ static sector_t _adfs_bmap(struct address_space *mapping, sector_t block)
 static const struct address_space_operations adfs_aops = {
        .readpage       = adfs_readpage,
        .writepage      = adfs_writepage,
-       .sync_page      = block_sync_page,
        .write_begin    = adfs_write_begin,
        .write_end      = generic_write_end,
        .bmap           = _adfs_bmap
index 0a90dcd46de28d33f2768fde829ac7800abdc9e4..acf321b70fcd1a8522da3658449ebcaed3a9e733 100644 (file)
@@ -429,7 +429,6 @@ static sector_t _affs_bmap(struct address_space *mapping, sector_t block)
 const struct address_space_operations affs_aops = {
        .readpage = affs_readpage,
        .writepage = affs_writepage,
-       .sync_page = block_sync_page,
        .write_begin = affs_write_begin,
        .write_end = generic_write_end,
        .bmap = _affs_bmap
@@ -786,7 +785,6 @@ out:
 const struct address_space_operations affs_aops_ofs = {
        .readpage = affs_readpage_ofs,
        //.writepage = affs_writepage_ofs,
-       //.sync_page = affs_sync_page_ofs,
        .write_begin = affs_write_begin_ofs,
        .write_end = affs_write_end_ofs
 };
index fc557a3be0a9af055a9a9505d3c161a77a7f2129..c5ea494ea9e248a7c82b9bf98a967df9f430221f 100644 (file)
--- a/fs/aio.c
+++ b/fs/aio.c
@@ -1550,9 +1550,11 @@ static void aio_batch_free(struct hlist_head *batch_hash)
        struct hlist_node *pos, *n;
        int i;
 
+       /*
+        * TODO: kill this
+        */
        for (i = 0; i < AIO_BATCH_HASH_SIZE; i++) {
                hlist_for_each_entry_safe(abe, pos, n, &batch_hash[i], list) {
-                       blk_run_address_space(abe->mapping);
                        iput(abe->mapping->host);
                        hlist_del(&abe->list);
                        mempool_free(abe, abe_pool);
index b1d0c794747b7537dbade4895eccae70aa78cc5e..06457ed8f3e7c5fc258fc8cdc7f0f3bf88eeddf4 100644 (file)
@@ -75,7 +75,6 @@ static const struct inode_operations befs_dir_inode_operations = {
 
 static const struct address_space_operations befs_aops = {
        .readpage       = befs_readpage,
-       .sync_page      = block_sync_page,
        .bmap           = befs_bmap,
 };
 
index eb67edd0f8ea3f39c112faebfed1a877b37f686d..f20e8a71062f4d2bc15e1daacf44398b8cab4906 100644 (file)
@@ -186,7 +186,6 @@ static sector_t bfs_bmap(struct address_space *mapping, sector_t block)
 const struct address_space_operations bfs_aops = {
        .readpage       = bfs_readpage,
        .writepage      = bfs_writepage,
-       .sync_page      = block_sync_page,
        .write_begin    = bfs_write_begin,
        .write_end      = generic_write_end,
        .bmap           = bfs_bmap,
index 4fb8a34315310ae43c158670426a9699a7c0f7b0..fffc2c67239644bd8a81cdc5d16ac9388a5583cb 100644 (file)
@@ -1520,7 +1520,6 @@ static int blkdev_releasepage(struct page *page, gfp_t wait)
 static const struct address_space_operations def_blk_aops = {
        .readpage       = blkdev_readpage,
        .writepage      = blkdev_writepage,
-       .sync_page      = block_sync_page,
        .write_begin    = blkdev_write_begin,
        .write_end      = blkdev_write_end,
        .writepages     = generic_writepages,
index e1aa8d607bc7d1a85734d9d28f26c8cd6efff585..ada1f6bd0a571d4de0fe2d36848ebd4cb1ae74fa 100644 (file)
@@ -847,7 +847,6 @@ static const struct address_space_operations btree_aops = {
        .writepages     = btree_writepages,
        .releasepage    = btree_releasepage,
        .invalidatepage = btree_invalidatepage,
-       .sync_page      = block_sync_page,
 #ifdef CONFIG_MIGRATION
        .migratepage    = btree_migratepage,
 #endif
@@ -1330,82 +1329,6 @@ static int btrfs_congested_fn(void *congested_data, int bdi_bits)
        return ret;
 }
 
-/*
- * this unplugs every device on the box, and it is only used when page
- * is null
- */
-static void __unplug_io_fn(struct backing_dev_info *bdi, struct page *page)
-{
-       struct btrfs_device *device;
-       struct btrfs_fs_info *info;
-
-       info = (struct btrfs_fs_info *)bdi->unplug_io_data;
-       list_for_each_entry(device, &info->fs_devices->devices, dev_list) {
-               if (!device->bdev)
-                       continue;
-
-               bdi = blk_get_backing_dev_info(device->bdev);
-               if (bdi->unplug_io_fn)
-                       bdi->unplug_io_fn(bdi, page);
-       }
-}
-
-static void btrfs_unplug_io_fn(struct backing_dev_info *bdi, struct page *page)
-{
-       struct inode *inode;
-       struct extent_map_tree *em_tree;
-       struct extent_map *em;
-       struct address_space *mapping;
-       u64 offset;
-
-       /* the generic O_DIRECT read code does this */
-       if (1 || !page) {
-               __unplug_io_fn(bdi, page);
-               return;
-       }
-
-       /*
-        * page->mapping may change at any time.  Get a consistent copy
-        * and use that for everything below
-        */
-       smp_mb();
-       mapping = page->mapping;
-       if (!mapping)
-               return;
-
-       inode = mapping->host;
-
-       /*
-        * don't do the expensive searching for a small number of
-        * devices
-        */
-       if (BTRFS_I(inode)->root->fs_info->fs_devices->open_devices <= 2) {
-               __unplug_io_fn(bdi, page);
-               return;
-       }
-
-       offset = page_offset(page);
-
-       em_tree = &BTRFS_I(inode)->extent_tree;
-       read_lock(&em_tree->lock);
-       em = lookup_extent_mapping(em_tree, offset, PAGE_CACHE_SIZE);
-       read_unlock(&em_tree->lock);
-       if (!em) {
-               __unplug_io_fn(bdi, page);
-               return;
-       }
-
-       if (em->block_start >= EXTENT_MAP_LAST_BYTE) {
-               free_extent_map(em);
-               __unplug_io_fn(bdi, page);
-               return;
-       }
-       offset = offset - em->start;
-       btrfs_unplug_page(&BTRFS_I(inode)->root->fs_info->mapping_tree,
-                         em->block_start + offset, page);
-       free_extent_map(em);
-}
-
 /*
  * If this fails, caller must call bdi_destroy() to get rid of the
  * bdi again.
@@ -1420,8 +1343,6 @@ static int setup_bdi(struct btrfs_fs_info *info, struct backing_dev_info *bdi)
                return err;
 
        bdi->ra_pages   = default_backing_dev_info.ra_pages;
-       bdi->unplug_io_fn       = btrfs_unplug_io_fn;
-       bdi->unplug_io_data     = info;
        bdi->congested_fn       = btrfs_congested_fn;
        bdi->congested_data     = info;
        return 0;
index fb9bd7832b6db20fb5331d6008ec6a49db394a0a..462e08e724b08edc1f5b94d1690488be9d039b78 100644 (file)
@@ -7218,7 +7218,6 @@ static const struct address_space_operations btrfs_aops = {
        .writepage      = btrfs_writepage,
        .writepages     = btrfs_writepages,
        .readpages      = btrfs_readpages,
-       .sync_page      = block_sync_page,
        .direct_IO      = btrfs_direct_IO,
        .invalidatepage = btrfs_invalidatepage,
        .releasepage    = btrfs_releasepage,
index af7dbca1527629417ed14a67aff93fc86fe4d8a9..6e0e82a1b188aa4b8f24d4770b91261816e1065f 100644 (file)
@@ -162,7 +162,6 @@ static noinline int run_scheduled_bios(struct btrfs_device *device)
        struct bio *cur;
        int again = 0;
        unsigned long num_run;
-       unsigned long num_sync_run;
        unsigned long batch_run = 0;
        unsigned long limit;
        unsigned long last_waited = 0;
@@ -173,11 +172,6 @@ static noinline int run_scheduled_bios(struct btrfs_device *device)
        limit = btrfs_async_submit_limit(fs_info);
        limit = limit * 2 / 3;
 
-       /* we want to make sure that every time we switch from the sync
-        * list to the normal list, we unplug
-        */
-       num_sync_run = 0;
-
 loop:
        spin_lock(&device->io_lock);
 
@@ -223,15 +217,6 @@ loop_lock:
 
        spin_unlock(&device->io_lock);
 
-       /*
-        * if we're doing the regular priority list, make sure we unplug
-        * for any high prio bios we've sent down
-        */
-       if (pending_bios == &device->pending_bios && num_sync_run > 0) {
-               num_sync_run = 0;
-               blk_run_backing_dev(bdi, NULL);
-       }
-
        while (pending) {
 
                rmb();
@@ -259,19 +244,11 @@ loop_lock:
 
                BUG_ON(atomic_read(&cur->bi_cnt) == 0);
 
-               if (cur->bi_rw & REQ_SYNC)
-                       num_sync_run++;
-
                submit_bio(cur->bi_rw, cur);
                num_run++;
                batch_run++;
-               if (need_resched()) {
-                       if (num_sync_run) {
-                               blk_run_backing_dev(bdi, NULL);
-                               num_sync_run = 0;
-                       }
+               if (need_resched())
                        cond_resched();
-               }
 
                /*
                 * we made progress, there is more work to do and the bdi
@@ -304,13 +281,8 @@ loop_lock:
                                 * against it before looping
                                 */
                                last_waited = ioc->last_waited;
-                               if (need_resched()) {
-                                       if (num_sync_run) {
-                                               blk_run_backing_dev(bdi, NULL);
-                                               num_sync_run = 0;
-                                       }
+                               if (need_resched())
                                        cond_resched();
-                               }
                                continue;
                        }
                        spin_lock(&device->io_lock);
@@ -323,22 +295,6 @@ loop_lock:
                }
        }
 
-       if (num_sync_run) {
-               num_sync_run = 0;
-               blk_run_backing_dev(bdi, NULL);
-       }
-       /*
-        * IO has already been through a long path to get here.  Checksumming,
-        * async helper threads, perhaps compression.  We've done a pretty
-        * good job of collecting a batch of IO and should just unplug
-        * the device right away.
-        *
-        * This will help anyone who is waiting on the IO, they might have
-        * already unplugged, but managed to do so before the bio they
-        * cared about found its way down here.
-        */
-       blk_run_backing_dev(bdi, NULL);
-
        cond_resched();
        if (again)
                goto loop;
@@ -2948,7 +2904,7 @@ static int find_live_mirror(struct map_lookup *map, int first, int num,
 static int __btrfs_map_block(struct btrfs_mapping_tree *map_tree, int rw,
                             u64 logical, u64 *length,
                             struct btrfs_multi_bio **multi_ret,
-                            int mirror_num, struct page *unplug_page)
+                            int mirror_num)
 {
        struct extent_map *em;
        struct map_lookup *map;
@@ -2980,11 +2936,6 @@ again:
        em = lookup_extent_mapping(em_tree, logical, *length);
        read_unlock(&em_tree->lock);
 
-       if (!em && unplug_page) {
-               kfree(multi);
-               return 0;
-       }
-
        if (!em) {
                printk(KERN_CRIT "unable to find logical %llu len %llu\n",
                       (unsigned long long)logical,
@@ -3040,13 +2991,13 @@ again:
                *length = em->len - offset;
        }
 
-       if (!multi_ret && !unplug_page)
+       if (!multi_ret)
                goto out;
 
        num_stripes = 1;
        stripe_index = 0;
        if (map->type & BTRFS_BLOCK_GROUP_RAID1) {
-               if (unplug_page || (rw & REQ_WRITE))
+               if (rw & REQ_WRITE)
                        num_stripes = map->num_stripes;
                else if (mirror_num)
                        stripe_index = mirror_num - 1;
@@ -3068,7 +3019,7 @@ again:
                stripe_index = do_div(stripe_nr, factor);
                stripe_index *= map->sub_stripes;
 
-               if (unplug_page || (rw & REQ_WRITE))
+               if (rw & REQ_WRITE)
                        num_stripes = map->sub_stripes;
                else if (mirror_num)
                        stripe_index += mirror_num - 1;
@@ -3088,22 +3039,10 @@ again:
        BUG_ON(stripe_index >= map->num_stripes);
 
        for (i = 0; i < num_stripes; i++) {
-               if (unplug_page) {
-                       struct btrfs_device *device;
-                       struct backing_dev_info *bdi;
-
-                       device = map->stripes[stripe_index].dev;
-                       if (device->bdev) {
-                               bdi = blk_get_backing_dev_info(device->bdev);
-                               if (bdi->unplug_io_fn)
-                                       bdi->unplug_io_fn(bdi, unplug_page);
-                       }
-               } else {
-                       multi->stripes[i].physical =
-                               map->stripes[stripe_index].physical +
-                               stripe_offset + stripe_nr * map->stripe_len;
-                       multi->stripes[i].dev = map->stripes[stripe_index].dev;
-               }
+               multi->stripes[i].physical =
+                       map->stripes[stripe_index].physical +
+                       stripe_offset + stripe_nr * map->stripe_len;
+               multi->stripes[i].dev = map->stripes[stripe_index].dev;
                stripe_index++;
        }
        if (multi_ret) {
@@ -3121,7 +3060,7 @@ int btrfs_map_block(struct btrfs_mapping_tree *map_tree, int rw,
                      struct btrfs_multi_bio **multi_ret, int mirror_num)
 {
        return __btrfs_map_block(map_tree, rw, logical, length, multi_ret,
-                                mirror_num, NULL);
+                                mirror_num);
 }
 
 int btrfs_rmap_block(struct btrfs_mapping_tree *map_tree,
@@ -3189,14 +3128,6 @@ int btrfs_rmap_block(struct btrfs_mapping_tree *map_tree,
        return 0;
 }
 
-int btrfs_unplug_page(struct btrfs_mapping_tree *map_tree,
-                     u64 logical, struct page *page)
-{
-       u64 length = PAGE_CACHE_SIZE;
-       return __btrfs_map_block(map_tree, READ, logical, &length,
-                                NULL, 0, page);
-}
-
 static void end_bio_multi_stripe(struct bio *bio, int err)
 {
        struct btrfs_multi_bio *multi = bio->bi_private;
index 2219a76e2caf08415b2e207bc23466d4154d35e0..f903f2e5b4fe649647ae8884f6f09e01977ba926 100644 (file)
@@ -54,23 +54,15 @@ init_buffer(struct buffer_head *bh, bh_end_io_t *handler, void *private)
 }
 EXPORT_SYMBOL(init_buffer);
 
-static int sync_buffer(void *word)
+static int sleep_on_buffer(void *word)
 {
-       struct block_device *bd;
-       struct buffer_head *bh
-               = container_of(word, struct buffer_head, b_state);
-
-       smp_mb();
-       bd = bh->b_bdev;
-       if (bd)
-               blk_run_address_space(bd->bd_inode->i_mapping);
        io_schedule();
        return 0;
 }
 
 void __lock_buffer(struct buffer_head *bh)
 {
-       wait_on_bit_lock(&bh->b_state, BH_Lock, sync_buffer,
+       wait_on_bit_lock(&bh->b_state, BH_Lock, sleep_on_buffer,
                                                        TASK_UNINTERRUPTIBLE);
 }
 EXPORT_SYMBOL(__lock_buffer);
@@ -90,7 +82,7 @@ EXPORT_SYMBOL(unlock_buffer);
  */
 void __wait_on_buffer(struct buffer_head * bh)
 {
-       wait_on_bit(&bh->b_state, BH_Lock, sync_buffer, TASK_UNINTERRUPTIBLE);
+       wait_on_bit(&bh->b_state, BH_Lock, sleep_on_buffer, TASK_UNINTERRUPTIBLE);
 }
 EXPORT_SYMBOL(__wait_on_buffer);
 
@@ -749,7 +741,7 @@ static int fsync_buffers_list(spinlock_t *lock, struct list_head *list)
 {
        struct buffer_head *bh;
        struct list_head tmp;
-       struct address_space *mapping, *prev_mapping = NULL;
+       struct address_space *mapping;
        int err = 0, err2;
 
        INIT_LIST_HEAD(&tmp);
@@ -783,10 +775,6 @@ static int fsync_buffers_list(spinlock_t *lock, struct list_head *list)
                                 * wait_on_buffer() will do that for us
                                 * through sync_buffer().
                                 */
-                               if (prev_mapping && prev_mapping != mapping)
-                                       blk_run_address_space(prev_mapping);
-                               prev_mapping = mapping;
-
                                brelse(bh);
                                spin_lock(lock);
                        }
@@ -3138,17 +3126,6 @@ out:
 }
 EXPORT_SYMBOL(try_to_free_buffers);
 
-void block_sync_page(struct page *page)
-{
-       struct address_space *mapping;
-
-       smp_mb();
-       mapping = page_mapping(page);
-       if (mapping)
-               blk_run_backing_dev(mapping->backing_dev_info, page);
-}
-EXPORT_SYMBOL(block_sync_page);
-
 /*
  * There are no bdflush tunables left.  But distributions are
  * still running obsolete flush daemons, so we terminate them here.
index e964b1cd5dd092bda83d274da53248a50dfd0ba3..c27d236738fc08ff80d32eceabc8f412140afbb2 100644 (file)
@@ -1569,34 +1569,6 @@ int cifs_fsync(struct file *file, int datasync)
        return rc;
 }
 
-/* static void cifs_sync_page(struct page *page)
-{
-       struct address_space *mapping;
-       struct inode *inode;
-       unsigned long index = page->index;
-       unsigned int rpages = 0;
-       int rc = 0;
-
-       cFYI(1, "sync page %p", page);
-       mapping = page->mapping;
-       if (!mapping)
-               return 0;
-       inode = mapping->host;
-       if (!inode)
-               return; */
-
-/*     fill in rpages then
-       result = cifs_pagein_inode(inode, index, rpages); */ /* BB finish */
-
-/*     cFYI(1, "rpages is %d for sync page of Index %ld", rpages, index);
-
-#if 0
-       if (rc < 0)
-               return rc;
-       return 0;
-#endif
-} */
-
 /*
  * As file closes, flush all cached write data for this inode checking
  * for write behind errors.
@@ -2510,7 +2482,6 @@ const struct address_space_operations cifs_addr_ops = {
        .set_page_dirty = __set_page_dirty_nobuffers,
        .releasepage = cifs_release_page,
        .invalidatepage = cifs_invalidate_page,
-       /* .sync_page = cifs_sync_page, */
        /* .direct_IO = */
 };
 
@@ -2528,6 +2499,5 @@ const struct address_space_operations cifs_addr_ops_smallbuf = {
        .set_page_dirty = __set_page_dirty_nobuffers,
        .releasepage = cifs_release_page,
        .invalidatepage = cifs_invalidate_page,
-       /* .sync_page = cifs_sync_page, */
        /* .direct_IO = */
 };
index b044705eedd4f0836cbcbe5d0f92bf6fb6e39cf7..df709b3b860a4c7969c47a3c1a6e32e64f543a47 100644 (file)
@@ -1110,11 +1110,8 @@ direct_io_worker(int rw, struct kiocb *iocb, struct inode *inode,
            ((rw & READ) || (dio->result == dio->size)))
                ret = -EIOCBQUEUED;
 
-       if (ret != -EIOCBQUEUED) {
-               /* All IO is now issued, send it on its way */
-               blk_run_address_space(inode->i_mapping);
+       if (ret != -EIOCBQUEUED)
                dio_await_completion(dio);
-       }
 
        /*
         * Sync will always be dropping the final ref and completing the
index a8e7797b947795f2ef70a05c82a204fd493ba472..9c13412e6c99c78d11c3a89a42e007c82fb1b58b 100644 (file)
@@ -23,7 +23,6 @@ static sector_t _efs_bmap(struct address_space *mapping, sector_t block)
 }
 static const struct address_space_operations efs_aops = {
        .readpage = efs_readpage,
-       .sync_page = block_sync_page,
        .bmap = _efs_bmap
 };
 
index a7555238c41aaf942237b2aafb780c8605244812..82b94c8f5d22070c2fe9f78a0e89a24695e70baa 100644 (file)
@@ -795,7 +795,6 @@ const struct address_space_operations exofs_aops = {
        .direct_IO      = NULL, /* TODO: Should be trivial to do */
 
        /* With these NULL has special meaning or default is not exported */
-       .sync_page      = NULL,
        .get_xip_mem    = NULL,
        .migratepage    = NULL,
        .launder_page   = NULL,
index 40ad210a5049a6eed9b184b3c30750ce6951fa46..c47f706878b5f1c8befcd65e5dc9c68b46382200 100644 (file)
@@ -860,7 +860,6 @@ const struct address_space_operations ext2_aops = {
        .readpage               = ext2_readpage,
        .readpages              = ext2_readpages,
        .writepage              = ext2_writepage,
-       .sync_page              = block_sync_page,
        .write_begin            = ext2_write_begin,
        .write_end              = ext2_write_end,
        .bmap                   = ext2_bmap,
@@ -880,7 +879,6 @@ const struct address_space_operations ext2_nobh_aops = {
        .readpage               = ext2_readpage,
        .readpages              = ext2_readpages,
        .writepage              = ext2_nobh_writepage,
-       .sync_page              = block_sync_page,
        .write_begin            = ext2_nobh_write_begin,
        .write_end              = nobh_write_end,
        .bmap                   = ext2_bmap,
index ae94f6d949f526d04fc0e2dcb8983622eeab8fa0..fe2541d250e44d0f9fe8a79f7ecbe39f5fbca34c 100644 (file)
@@ -1894,7 +1894,6 @@ static const struct address_space_operations ext3_ordered_aops = {
        .readpage               = ext3_readpage,
        .readpages              = ext3_readpages,
        .writepage              = ext3_ordered_writepage,
-       .sync_page              = block_sync_page,
        .write_begin            = ext3_write_begin,
        .write_end              = ext3_ordered_write_end,
        .bmap                   = ext3_bmap,
@@ -1910,7 +1909,6 @@ static const struct address_space_operations ext3_writeback_aops = {
        .readpage               = ext3_readpage,
        .readpages              = ext3_readpages,
        .writepage              = ext3_writeback_writepage,
-       .sync_page              = block_sync_page,
        .write_begin            = ext3_write_begin,
        .write_end              = ext3_writeback_write_end,
        .bmap                   = ext3_bmap,
@@ -1926,7 +1924,6 @@ static const struct address_space_operations ext3_journalled_aops = {
        .readpage               = ext3_readpage,
        .readpages              = ext3_readpages,
        .writepage              = ext3_journalled_writepage,
-       .sync_page              = block_sync_page,
        .write_begin            = ext3_write_begin,
        .write_end              = ext3_journalled_write_end,
        .set_page_dirty         = ext3_journalled_set_page_dirty,
index 9f7f9e49914fa775709d5c99e805440fd4ff0f9f..9297ad46c4658ee3d7e05198754dc14789db8c2e 100644 (file)
@@ -3903,7 +3903,6 @@ static const struct address_space_operations ext4_ordered_aops = {
        .readpage               = ext4_readpage,
        .readpages              = ext4_readpages,
        .writepage              = ext4_writepage,
-       .sync_page              = block_sync_page,
        .write_begin            = ext4_write_begin,
        .write_end              = ext4_ordered_write_end,
        .bmap                   = ext4_bmap,
@@ -3919,7 +3918,6 @@ static const struct address_space_operations ext4_writeback_aops = {
        .readpage               = ext4_readpage,
        .readpages              = ext4_readpages,
        .writepage              = ext4_writepage,
-       .sync_page              = block_sync_page,
        .write_begin            = ext4_write_begin,
        .write_end              = ext4_writeback_write_end,
        .bmap                   = ext4_bmap,
@@ -3935,7 +3933,6 @@ static const struct address_space_operations ext4_journalled_aops = {
        .readpage               = ext4_readpage,
        .readpages              = ext4_readpages,
        .writepage              = ext4_writepage,
-       .sync_page              = block_sync_page,
        .write_begin            = ext4_write_begin,
        .write_end              = ext4_journalled_write_end,
        .set_page_dirty         = ext4_journalled_set_page_dirty,
@@ -3951,7 +3948,6 @@ static const struct address_space_operations ext4_da_aops = {
        .readpages              = ext4_readpages,
        .writepage              = ext4_writepage,
        .writepages             = ext4_da_writepages,
-       .sync_page              = block_sync_page,
        .write_begin            = ext4_da_write_begin,
        .write_end              = ext4_da_write_end,
        .bmap                   = ext4_bmap,
index 86753fe10bd1d7a47d551dba030ee45013e5fbbe..f4ff09fb79b1a879dbc702c426788633721f7a0f 100644 (file)
@@ -236,7 +236,6 @@ static const struct address_space_operations fat_aops = {
        .readpages      = fat_readpages,
        .writepage      = fat_writepage,
        .writepages     = fat_writepages,
-       .sync_page      = block_sync_page,
        .write_begin    = fat_write_begin,
        .write_end      = fat_write_end,
        .direct_IO      = fat_direct_IO,
index 1429f3ae1e868f2cb6f066542c3675d1d09134cd..5d318c44f8554bdbf5304a707557e413089b2913 100644 (file)
@@ -44,7 +44,6 @@ static sector_t               vxfs_bmap(struct address_space *, sector_t);
 const struct address_space_operations vxfs_aops = {
        .readpage =             vxfs_readpage,
        .bmap =                 vxfs_bmap,
-       .sync_page =            block_sync_page,
 };
 
 inline void
index 9e3f68cc1bd1338e2c8a6f73de6646fface53be7..09e8d51eeb64fabee17b253a424cc1b9c38f2502 100644 (file)
@@ -868,7 +868,6 @@ static int fuse_bdi_init(struct fuse_conn *fc, struct super_block *sb)
 
        fc->bdi.name = "fuse";
        fc->bdi.ra_pages = (VM_MAX_READAHEAD * 1024) / PAGE_CACHE_SIZE;
-       fc->bdi.unplug_io_fn = default_unplug_io_fn;
        /* fuse does it's own writeback accounting */
        fc->bdi.capabilities = BDI_CAP_NO_ACCT_WB;
 
index 4f36f8832b9b1b167321e119260da82d89b4cdf0..2f87ad27efd0fa08a051f9f15f25006252afba35 100644 (file)
@@ -1116,7 +1116,6 @@ static const struct address_space_operations gfs2_writeback_aops = {
        .writepages = gfs2_writeback_writepages,
        .readpage = gfs2_readpage,
        .readpages = gfs2_readpages,
-       .sync_page = block_sync_page,
        .write_begin = gfs2_write_begin,
        .write_end = gfs2_write_end,
        .bmap = gfs2_bmap,
@@ -1132,7 +1131,6 @@ static const struct address_space_operations gfs2_ordered_aops = {
        .writepage = gfs2_ordered_writepage,
        .readpage = gfs2_readpage,
        .readpages = gfs2_readpages,
-       .sync_page = block_sync_page,
        .write_begin = gfs2_write_begin,
        .write_end = gfs2_write_end,
        .set_page_dirty = gfs2_set_page_dirty,
@@ -1150,7 +1148,6 @@ static const struct address_space_operations gfs2_jdata_aops = {
        .writepages = gfs2_jdata_writepages,
        .readpage = gfs2_readpage,
        .readpages = gfs2_readpages,
-       .sync_page = block_sync_page,
        .write_begin = gfs2_write_begin,
        .write_end = gfs2_write_end,
        .set_page_dirty = gfs2_set_page_dirty,
index 939739c7b3f9deb30cf2a2df4ce3c4ba8290b697..a566331db4e1a760233f0be8a16c0b48d8730c88 100644 (file)
@@ -94,7 +94,6 @@ static int gfs2_aspace_writepage(struct page *page, struct writeback_control *wb
 const struct address_space_operations gfs2_meta_aops = {
        .writepage = gfs2_aspace_writepage,
        .releasepage = gfs2_releasepage,
-       .sync_page = block_sync_page,
 };
 
 /**
index dffb4e996643557f04ae1c83292bd729ef5adada..fff16c968e67705a4d82e0bfcda6fca54aa005b0 100644 (file)
@@ -150,7 +150,6 @@ static int hfs_writepages(struct address_space *mapping,
 const struct address_space_operations hfs_btree_aops = {
        .readpage       = hfs_readpage,
        .writepage      = hfs_writepage,
-       .sync_page      = block_sync_page,
        .write_begin    = hfs_write_begin,
        .write_end      = generic_write_end,
        .bmap           = hfs_bmap,
@@ -160,7 +159,6 @@ const struct address_space_operations hfs_btree_aops = {
 const struct address_space_operations hfs_aops = {
        .readpage       = hfs_readpage,
        .writepage      = hfs_writepage,
-       .sync_page      = block_sync_page,
        .write_begin    = hfs_write_begin,
        .write_end      = generic_write_end,
        .bmap           = hfs_bmap,
index a8df651747f0eadaa8af44dffda657f2d64aea3d..b248a6cfcad93bb76631f7756b07b4ee6aff3d7e 100644 (file)
@@ -146,7 +146,6 @@ static int hfsplus_writepages(struct address_space *mapping,
 const struct address_space_operations hfsplus_btree_aops = {
        .readpage       = hfsplus_readpage,
        .writepage      = hfsplus_writepage,
-       .sync_page      = block_sync_page,
        .write_begin    = hfsplus_write_begin,
        .write_end      = generic_write_end,
        .bmap           = hfsplus_bmap,
@@ -156,7 +155,6 @@ const struct address_space_operations hfsplus_btree_aops = {
 const struct address_space_operations hfsplus_aops = {
        .readpage       = hfsplus_readpage,
        .writepage      = hfsplus_writepage,
-       .sync_page      = block_sync_page,
        .write_begin    = hfsplus_write_begin,
        .write_end      = generic_write_end,
        .bmap           = hfsplus_bmap,
index c0340887c7ea619efceba79852159003f3286620..9e84257b3ad5ea51b1c4f9beb696ddd6b9c600dc 100644 (file)
@@ -120,7 +120,6 @@ static sector_t _hpfs_bmap(struct address_space *mapping, sector_t block)
 const struct address_space_operations hpfs_aops = {
        .readpage = hpfs_readpage,
        .writepage = hpfs_writepage,
-       .sync_page = block_sync_page,
        .write_begin = hpfs_write_begin,
        .write_end = generic_write_end,
        .bmap = _hpfs_bmap
index a0f3833c0dbf578ae0f6477007ae443eae9b728b..3db5ba4568fc8efd30025a9e9906eb01a47f9c45 100644 (file)
@@ -1158,7 +1158,6 @@ static sector_t _isofs_bmap(struct address_space *mapping, sector_t block)
 
 static const struct address_space_operations isofs_aops = {
        .readpage = isofs_readpage,
-       .sync_page = block_sync_page,
        .bmap = _isofs_bmap
 };
 
index 9978803ceedc519ea90a84bbe04f7a77b8bc5b4e..eddbb373209e9c2dc013657bc041d44badb0aa29 100644 (file)
@@ -352,7 +352,6 @@ const struct address_space_operations jfs_aops = {
        .readpages      = jfs_readpages,
        .writepage      = jfs_writepage,
        .writepages     = jfs_writepages,
-       .sync_page      = block_sync_page,
        .write_begin    = jfs_write_begin,
        .write_end      = nobh_write_end,
        .bmap           = jfs_bmap,
index 48b44bd8267b960e7e5bd330521f9da93d6d1a6f..6740d34cd82b802e948b8760fcf13954a8a12ad2 100644 (file)
@@ -583,7 +583,6 @@ static void metapage_invalidatepage(struct page *page, unsigned long offset)
 const struct address_space_operations jfs_metapage_aops = {
        .readpage       = metapage_readpage,
        .writepage      = metapage_writepage,
-       .sync_page      = block_sync_page,
        .releasepage    = metapage_releasepage,
        .invalidatepage = metapage_invalidatepage,
        .set_page_dirty = __set_page_dirty_nobuffers,
index 723bc5bca09ae3837448c3a286e50ff38e427576..1adc8d455f0ea2d66237436184766c5ed120688f 100644 (file)
@@ -39,7 +39,6 @@ static int sync_request(struct page *page, struct block_device *bdev, int rw)
        bio.bi_end_io = request_complete;
 
        submit_bio(rw, &bio);
-       generic_unplug_device(bdev_get_queue(bdev));
        wait_for_completion(&complete);
        return test_bit(BIO_UPTODATE, &bio.bi_flags) ? 0 : -EIO;
 }
@@ -168,7 +167,6 @@ static void bdev_writeseg(struct super_block *sb, u64 ofs, size_t len)
        }
        len = PAGE_ALIGN(len);
        __bdev_writeseg(sb, ofs, ofs >> PAGE_SHIFT, len >> PAGE_SHIFT);
-       generic_unplug_device(bdev_get_queue(logfs_super(sb)->s_bdev));
 }
 
 
index ae0b83f476a63be6a51bdef77e10504b29bd9f99..adcdc0a4e182673ef953e5bafd8e040987df6948 100644 (file)
@@ -399,7 +399,6 @@ static sector_t minix_bmap(struct address_space *mapping, sector_t block)
 static const struct address_space_operations minix_aops = {
        .readpage = minix_readpage,
        .writepage = minix_writepage,
-       .sync_page = block_sync_page,
        .write_begin = minix_write_begin,
        .write_end = generic_write_end,
        .bmap = minix_bmap
index 388e9e8f5286f737970ccefa36e919556235e4bf..f4f1c08807eddfd7b30a43df58d6384475c2728c 100644 (file)
@@ -40,14 +40,10 @@ void nilfs_btnode_cache_init_once(struct address_space *btnc)
        nilfs_mapping_init_once(btnc);
 }
 
-static const struct address_space_operations def_btnode_aops = {
-       .sync_page              = block_sync_page,
-};
-
 void nilfs_btnode_cache_init(struct address_space *btnc,
                             struct backing_dev_info *bdi)
 {
-       nilfs_mapping_init(btnc, bdi, &def_btnode_aops);
+       nilfs_mapping_init(btnc, bdi);
 }
 
 void nilfs_btnode_cache_clear(struct address_space *btnc)
index caf9a6a3fb54f0e0dd4cd63ccf103fd6aef20a70..1c2a3e23f8b2dec6b1f895098239864d7f5e9946 100644 (file)
@@ -49,7 +49,6 @@
 #include "ifile.h"
 
 static const struct address_space_operations def_gcinode_aops = {
-       .sync_page              = block_sync_page,
 };
 
 /*
index 2fd440d8d6b8d9b2b469c2aaec757bca05a9ac58..c89d5d1ea7c7acff6016987cb55deaf48d96a51d 100644 (file)
@@ -262,7 +262,6 @@ nilfs_direct_IO(int rw, struct kiocb *iocb, const struct iovec *iov,
 const struct address_space_operations nilfs_aops = {
        .writepage              = nilfs_writepage,
        .readpage               = nilfs_readpage,
-       .sync_page              = block_sync_page,
        .writepages             = nilfs_writepages,
        .set_page_dirty         = nilfs_set_page_dirty,
        .readpages              = nilfs_readpages,
index 6a0e2a189f60650b3399f4b9d3732ac6037f791f..3fdb61d79c9a4ec1c92b9aa36364592a669a7910 100644 (file)
@@ -399,7 +399,6 @@ nilfs_mdt_write_page(struct page *page, struct writeback_control *wbc)
 
 static const struct address_space_operations def_mdt_aops = {
        .writepage              = nilfs_mdt_write_page,
-       .sync_page              = block_sync_page,
 };
 
 static const struct inode_operations def_mdt_iops;
@@ -438,10 +437,6 @@ void nilfs_mdt_set_entry_size(struct inode *inode, unsigned entry_size,
        mi->mi_first_entry_offset = DIV_ROUND_UP(header_size, entry_size);
 }
 
-static const struct address_space_operations shadow_map_aops = {
-       .sync_page              = block_sync_page,
-};
-
 /**
  * nilfs_mdt_setup_shadow_map - setup shadow map and bind it to metadata file
  * @inode: inode of the metadata file
@@ -455,9 +450,9 @@ int nilfs_mdt_setup_shadow_map(struct inode *inode,
 
        INIT_LIST_HEAD(&shadow->frozen_buffers);
        nilfs_mapping_init_once(&shadow->frozen_data);
-       nilfs_mapping_init(&shadow->frozen_data, bdi, &shadow_map_aops);
+       nilfs_mapping_init(&shadow->frozen_data, bdi);
        nilfs_mapping_init_once(&shadow->frozen_btnodes);
-       nilfs_mapping_init(&shadow->frozen_btnodes, bdi, &shadow_map_aops);
+       nilfs_mapping_init(&shadow->frozen_btnodes, bdi);
        mi->mi_shadow = shadow;
        return 0;
 }
index 0c432416cfefc608383dc8f7ee58d88140a26f59..3da37cc5de3479985999639cbc08cbfd4dfa2884 100644 (file)
@@ -506,15 +506,14 @@ void nilfs_mapping_init_once(struct address_space *mapping)
 }
 
 void nilfs_mapping_init(struct address_space *mapping,
-                       struct backing_dev_info *bdi,
-                       const struct address_space_operations *aops)
+                       struct backing_dev_info *bdi)
 {
        mapping->host = NULL;
        mapping->flags = 0;
        mapping_set_gfp_mask(mapping, GFP_NOFS);
        mapping->assoc_mapping = NULL;
        mapping->backing_dev_info = bdi;
-       mapping->a_ops = aops;
+       mapping->a_ops = NULL;
 }
 
 /*
index 622df27cd89155d74a94ef786910715f9f3a11ad..ba4d6fd40b04dd5a906903db5493cc4fff9888ae 100644 (file)
@@ -63,8 +63,7 @@ void nilfs_copy_back_pages(struct address_space *, struct address_space *);
 void nilfs_clear_dirty_pages(struct address_space *);
 void nilfs_mapping_init_once(struct address_space *mapping);
 void nilfs_mapping_init(struct address_space *mapping,
-                       struct backing_dev_info *bdi,
-                       const struct address_space_operations *aops);
+                       struct backing_dev_info *bdi);
 unsigned nilfs_page_count_clean_buffers(struct page *, unsigned, unsigned);
 unsigned long nilfs_find_uncommitted_extent(struct inode *inode,
                                            sector_t start_blk,
index c3c2c7ac9020402d28dd0d2357ae2828c6e639a5..0b1e885b8cf8f8f72325c5a1c09d17fef64acd6f 100644 (file)
@@ -1543,8 +1543,6 @@ err_out:
  */
 const struct address_space_operations ntfs_aops = {
        .readpage       = ntfs_readpage,        /* Fill page with data. */
-       .sync_page      = block_sync_page,      /* Currently, just unplugs the
-                                                  disk request queue. */
 #ifdef NTFS_RW
        .writepage      = ntfs_writepage,       /* Write dirty page to disk. */
 #endif /* NTFS_RW */
@@ -1560,8 +1558,6 @@ const struct address_space_operations ntfs_aops = {
  */
 const struct address_space_operations ntfs_mst_aops = {
        .readpage       = ntfs_readpage,        /* Fill page with data. */
-       .sync_page      = block_sync_page,      /* Currently, just unplugs the
-                                                  disk request queue. */
 #ifdef NTFS_RW
        .writepage      = ntfs_writepage,       /* Write dirty page to disk. */
        .set_page_dirty = __set_page_dirty_nobuffers,   /* Set the page dirty
index 6551c7cbad92954202258d8715a3d44e06bc7d4b..ef9ed854255c8d2c8b15bdaf0e99545004f05aed 100644 (file)
@@ -698,8 +698,7 @@ lock_retry_remap:
                                        "uptodate! Unplugging the disk queue "
                                        "and rescheduling.");
                        get_bh(tbh);
-                       blk_run_address_space(mapping);
-                       schedule();
+                       io_schedule();
                        put_bh(tbh);
                        if (unlikely(!buffer_uptodate(tbh)))
                                goto read_err;
index 1fbb0e20131bf39e82f1822e0de172b7889497de..daea0359e9740263a525cec5c7f645e456d9a909 100644 (file)
@@ -2043,7 +2043,6 @@ const struct address_space_operations ocfs2_aops = {
        .write_begin            = ocfs2_write_begin,
        .write_end              = ocfs2_write_end,
        .bmap                   = ocfs2_bmap,
-       .sync_page              = block_sync_page,
        .direct_IO              = ocfs2_direct_IO,
        .invalidatepage         = ocfs2_invalidatepage,
        .releasepage            = ocfs2_releasepage,
index b108e863d8f65ab23ec5921fd425a9273f78365e..1adab287bd24c814ceb042be381744b3c3fc3e7c 100644 (file)
@@ -367,11 +367,7 @@ static inline void o2hb_bio_wait_dec(struct o2hb_bio_wait_ctxt *wc,
 static void o2hb_wait_on_io(struct o2hb_region *reg,
                            struct o2hb_bio_wait_ctxt *wc)
 {
-       struct address_space *mapping = reg->hr_bdev->bd_inode->i_mapping;
-
-       blk_run_address_space(mapping);
        o2hb_bio_wait_dec(wc, 1);
-
        wait_for_completion(&wc->wc_io_complete);
 }
 
index 8a6d34fa668a0715e349000f0706b7d6526bfe61..d738a7e493ddc07ed1b4b1fb7a30198b3d477c5f 100644 (file)
@@ -372,7 +372,6 @@ const struct address_space_operations omfs_aops = {
        .readpages = omfs_readpages,
        .writepage = omfs_writepage,
        .writepages = omfs_writepages,
-       .sync_page = block_sync_page,
        .write_begin = omfs_write_begin,
        .write_end = generic_write_end,
        .bmap = omfs_bmap,
index e63b4171d583bca9f5f32bc106eacac52ba89fde..2b0646613f5a1f86da0637e7c131dc36c28e9005 100644 (file)
@@ -335,7 +335,6 @@ static sector_t qnx4_bmap(struct address_space *mapping, sector_t block)
 static const struct address_space_operations qnx4_aops = {
        .readpage       = qnx4_readpage,
        .writepage      = qnx4_writepage,
-       .sync_page      = block_sync_page,
        .write_begin    = qnx4_write_begin,
        .write_end      = generic_write_end,
        .bmap           = qnx4_bmap
index 0bae036831e2ca2aeeae94eb78d74f2aa769b54d..03674675f886a0f43442f881db0f33543e65a33c 100644 (file)
@@ -3212,7 +3212,6 @@ const struct address_space_operations reiserfs_address_space_operations = {
        .readpages = reiserfs_readpages,
        .releasepage = reiserfs_releasepage,
        .invalidatepage = reiserfs_invalidatepage,
-       .sync_page = block_sync_page,
        .write_begin = reiserfs_write_begin,
        .write_end = reiserfs_write_end,
        .bmap = reiserfs_aop_bmap,
index 9ca66276315e08828b4b4708b82060013f416320..fa8d43c92bb81a7a03d3eda50f35dccec42bd44b 100644 (file)
@@ -488,7 +488,6 @@ static sector_t sysv_bmap(struct address_space *mapping, sector_t block)
 const struct address_space_operations sysv_aops = {
        .readpage = sysv_readpage,
        .writepage = sysv_writepage,
-       .sync_page = block_sync_page,
        .write_begin = sysv_write_begin,
        .write_end = generic_write_end,
        .bmap = sysv_bmap
index 6e11c2975dcf504ae447d0132bb58297d24da428..81368d4d4a2c459853a9910d9bc628b371493bda 100644 (file)
@@ -1979,7 +1979,6 @@ static int ubifs_fill_super(struct super_block *sb, void *data, int silent)
         */
        c->bdi.name = "ubifs",
        c->bdi.capabilities = BDI_CAP_MAP_COPY;
-       c->bdi.unplug_io_fn = default_unplug_io_fn;
        err  = bdi_init(&c->bdi);
        if (err)
                goto out_close;
index 89c78486cbbe589ff899350eba218e41334f1719..94e4553491c90f7d497bbcec825574bc22cf2dc8 100644 (file)
@@ -98,7 +98,6 @@ static int udf_adinicb_write_end(struct file *file,
 const struct address_space_operations udf_adinicb_aops = {
        .readpage       = udf_adinicb_readpage,
        .writepage      = udf_adinicb_writepage,
-       .sync_page      = block_sync_page,
        .write_begin = simple_write_begin,
        .write_end = udf_adinicb_write_end,
 };
index c6a2e782b97b17d881fb7db61d798c51adf738d3..fa96fc0fe12b42d1fe9d1da5d384d84ec8070418 100644 (file)
@@ -133,7 +133,6 @@ static sector_t udf_bmap(struct address_space *mapping, sector_t block)
 const struct address_space_operations udf_aops = {
        .readpage       = udf_readpage,
        .writepage      = udf_writepage,
-       .sync_page      = block_sync_page,
        .write_begin            = udf_write_begin,
        .write_end              = generic_write_end,
        .bmap           = udf_bmap,
index 2b251f2093afc2976f23f971813bb6cc06b885fd..83b28444eb17c6b87cbb37a721faaa98071a5372 100644 (file)
@@ -588,7 +588,6 @@ static sector_t ufs_bmap(struct address_space *mapping, sector_t block)
 const struct address_space_operations ufs_aops = {
        .readpage = ufs_readpage,
        .writepage = ufs_writepage,
-       .sync_page = block_sync_page,
        .write_begin = ufs_write_begin,
        .write_end = generic_write_end,
        .bmap = ufs_bmap
index a58f9155fc9a7baea4169efec7529434035d8458..ff0e79276f2dd84efb92fd912f02da5450872a91 100644 (file)
@@ -481,7 +481,7 @@ int ufs_truncate(struct inode *inode, loff_t old_i_size)
                        break;
                if (IS_SYNC(inode) && (inode->i_state & I_DIRTY))
                        ufs_sync_inode (inode);
-               blk_run_address_space(inode->i_mapping);
+               blk_flush_plug(current);
                yield();
        }
 
index ec7bbb5645b63d85811e2ee731637139881b370c..83c1c20d145ad5483ea20d2ab6329c6af2b5043a 100644 (file)
@@ -1495,7 +1495,6 @@ const struct address_space_operations xfs_address_space_operations = {
        .readpages              = xfs_vm_readpages,
        .writepage              = xfs_vm_writepage,
        .writepages             = xfs_vm_writepages,
-       .sync_page              = block_sync_page,
        .releasepage            = xfs_vm_releasepage,
        .invalidatepage         = xfs_vm_invalidatepage,
        .write_begin            = xfs_vm_write_begin,
index ac1c7e8378ddd0a63cd33c5dfbcecd2fcfe0e74d..4f8f53c4d42cd442c3d134dced9143495657b295 100644 (file)
@@ -991,7 +991,7 @@ xfs_buf_lock(
        if (atomic_read(&bp->b_pin_count) && (bp->b_flags & XBF_STALE))
                xfs_log_force(bp->b_target->bt_mount, 0);
        if (atomic_read(&bp->b_io_remaining))
-               blk_run_address_space(bp->b_target->bt_mapping);
+               blk_flush_plug(current);
        down(&bp->b_sema);
        XB_SET_OWNER(bp);
 
@@ -1035,9 +1035,7 @@ xfs_buf_wait_unpin(
                set_current_state(TASK_UNINTERRUPTIBLE);
                if (atomic_read(&bp->b_pin_count) == 0)
                        break;
-               if (atomic_read(&bp->b_io_remaining))
-                       blk_run_address_space(bp->b_target->bt_mapping);
-               schedule();
+               io_schedule();
        }
        remove_wait_queue(&bp->b_waiters, &wait);
        set_current_state(TASK_RUNNING);
@@ -1443,7 +1441,7 @@ xfs_buf_iowait(
        trace_xfs_buf_iowait(bp, _RET_IP_);
 
        if (atomic_read(&bp->b_io_remaining))
-               blk_run_address_space(bp->b_target->bt_mapping);
+               blk_flush_plug(current);
        wait_for_completion(&bp->b_iowait);
 
        trace_xfs_buf_iowait_done(bp, _RET_IP_);
@@ -1667,7 +1665,6 @@ xfs_mapping_buftarg(
        struct inode            *inode;
        struct address_space    *mapping;
        static const struct address_space_operations mapping_aops = {
-               .sync_page = block_sync_page,
                .migratepage = fail_migrate_page,
        };
 
@@ -1948,7 +1945,7 @@ xfsbufd(
                        count++;
                }
                if (count)
-                       blk_run_address_space(target->bt_mapping);
+                       blk_flush_plug(current);
 
        } while (!kthread_should_stop());
 
@@ -1996,7 +1993,7 @@ xfs_flush_buftarg(
 
        if (wait) {
                /* Expedite and wait for IO to complete. */
-               blk_run_address_space(target->bt_mapping);
+               blk_flush_plug(current);
                while (!list_empty(&wait_list)) {
                        bp = list_first_entry(&wait_list, struct xfs_buf, b_list);
 
index 4ce34fa937d4910cf8d7546e54de1e888fb0abb2..96f4094b706d9ddd8f8e694655d36c8648326a8a 100644 (file)
@@ -66,8 +66,6 @@ struct backing_dev_info {
        unsigned int capabilities; /* Device capabilities */
        congested_fn *congested_fn; /* Function pointer if device is md/dm */
        void *congested_data;   /* Pointer to aux data for congested func */
-       void (*unplug_io_fn)(struct backing_dev_info *, struct page *);
-       void *unplug_io_data;
 
        char *name;
 
@@ -251,7 +249,6 @@ int bdi_set_max_ratio(struct backing_dev_info *bdi, unsigned int max_ratio);
 
 extern struct backing_dev_info default_backing_dev_info;
 extern struct backing_dev_info noop_backing_dev_info;
-void default_unplug_io_fn(struct backing_dev_info *bdi, struct page *page);
 
 int writeback_in_progress(struct backing_dev_info *bdi);
 
@@ -336,17 +333,4 @@ static inline int bdi_sched_wait(void *word)
        return 0;
 }
 
-static inline void blk_run_backing_dev(struct backing_dev_info *bdi,
-                                      struct page *page)
-{
-       if (bdi && bdi->unplug_io_fn)
-               bdi->unplug_io_fn(bdi, page);
-}
-
-static inline void blk_run_address_space(struct address_space *mapping)
-{
-       if (mapping)
-               blk_run_backing_dev(mapping->backing_dev_info, NULL);
-}
-
 #endif         /* _LINUX_BACKING_DEV_H */
index 5873037eeb9153d77e22d46aa215d29c4d15e8ff..64ab2a1bb1679b1583f00a44a2b1af0ec9e3f168 100644 (file)
@@ -196,7 +196,6 @@ typedef void (request_fn_proc) (struct request_queue *q);
 typedef int (make_request_fn) (struct request_queue *q, struct bio *bio);
 typedef int (prep_rq_fn) (struct request_queue *, struct request *);
 typedef void (unprep_rq_fn) (struct request_queue *, struct request *);
-typedef void (unplug_fn) (struct request_queue *);
 
 struct bio_vec;
 struct bvec_merge_data {
@@ -279,7 +278,6 @@ struct request_queue
        make_request_fn         *make_request_fn;
        prep_rq_fn              *prep_rq_fn;
        unprep_rq_fn            *unprep_rq_fn;
-       unplug_fn               *unplug_fn;
        merge_bvec_fn           *merge_bvec_fn;
        softirq_done_fn         *softirq_done_fn;
        rq_timed_out_fn         *rq_timed_out_fn;
@@ -292,14 +290,6 @@ struct request_queue
        sector_t                end_sector;
        struct request          *boundary_rq;
 
-       /*
-        * Auto-unplugging state
-        */
-       struct timer_list       unplug_timer;
-       int                     unplug_thresh;  /* After this many requests */
-       unsigned long           unplug_delay;   /* After this many jiffies */
-       struct work_struct      unplug_work;
-
        /*
         * Delayed queue handling
         */
@@ -399,14 +389,13 @@ struct request_queue
 #define QUEUE_FLAG_ASYNCFULL   4       /* write queue has been filled */
 #define QUEUE_FLAG_DEAD                5       /* queue being torn down */
 #define QUEUE_FLAG_REENTER     6       /* Re-entrancy avoidance */
-#define QUEUE_FLAG_PLUGGED     7       /* queue is plugged */
-#define QUEUE_FLAG_ELVSWITCH   8       /* don't use elevator, just do FIFO */
-#define QUEUE_FLAG_BIDI                9       /* queue supports bidi requests */
-#define QUEUE_FLAG_NOMERGES    10      /* disable merge attempts */
-#define QUEUE_FLAG_SAME_COMP   11      /* force complete on same CPU */
-#define QUEUE_FLAG_FAIL_IO     12      /* fake timeout */
-#define QUEUE_FLAG_STACKABLE   13      /* supports request stacking */
-#define QUEUE_FLAG_NONROT      14      /* non-rotational device (SSD) */
+#define QUEUE_FLAG_ELVSWITCH   7       /* don't use elevator, just do FIFO */
+#define QUEUE_FLAG_BIDI                8       /* queue supports bidi requests */
+#define QUEUE_FLAG_NOMERGES     9      /* disable merge attempts */
+#define QUEUE_FLAG_SAME_COMP   10      /* force complete on same CPU */
+#define QUEUE_FLAG_FAIL_IO     11      /* fake timeout */
+#define QUEUE_FLAG_STACKABLE   12      /* supports request stacking */
+#define QUEUE_FLAG_NONROT      13      /* non-rotational device (SSD) */
 #define QUEUE_FLAG_VIRT        QUEUE_FLAG_NONROT /* paravirt device */
 #define QUEUE_FLAG_IO_STAT     15      /* do IO stats */
 #define QUEUE_FLAG_DISCARD     16      /* supports DISCARD */
@@ -484,7 +473,6 @@ static inline void queue_flag_clear(unsigned int flag, struct request_queue *q)
        __clear_bit(flag, &q->queue_flags);
 }
 
-#define blk_queue_plugged(q)   test_bit(QUEUE_FLAG_PLUGGED, &(q)->queue_flags)
 #define blk_queue_tagged(q)    test_bit(QUEUE_FLAG_QUEUED, &(q)->queue_flags)
 #define blk_queue_stopped(q)   test_bit(QUEUE_FLAG_STOPPED, &(q)->queue_flags)
 #define blk_queue_nomerges(q)  test_bit(QUEUE_FLAG_NOMERGES, &(q)->queue_flags)
@@ -679,9 +667,6 @@ extern int blk_rq_prep_clone(struct request *rq, struct request *rq_src,
 extern void blk_rq_unprep_clone(struct request *rq);
 extern int blk_insert_cloned_request(struct request_queue *q,
                                     struct request *rq);
-extern void blk_plug_device(struct request_queue *);
-extern void blk_plug_device_unlocked(struct request_queue *);
-extern int blk_remove_plug(struct request_queue *);
 extern void blk_delay_queue(struct request_queue *, unsigned long);
 extern void blk_recount_segments(struct request_queue *, struct bio *);
 extern int scsi_cmd_ioctl(struct request_queue *, struct gendisk *, fmode_t,
@@ -726,7 +711,6 @@ extern int blk_execute_rq(struct request_queue *, struct gendisk *,
                          struct request *, int);
 extern void blk_execute_rq_nowait(struct request_queue *, struct gendisk *,
                                  struct request *, int, rq_end_io_fn *);
-extern void blk_unplug(struct request_queue *q);
 
 static inline struct request_queue *bdev_get_queue(struct block_device *bdev)
 {
@@ -863,7 +847,6 @@ extern struct backing_dev_info *blk_get_backing_dev_info(struct block_device *bd
 
 extern int blk_rq_map_sg(struct request_queue *, struct request *, struct scatterlist *);
 extern void blk_dump_rq_flags(struct request *, char *);
-extern void generic_unplug_device(struct request_queue *);
 extern long nr_blockdev_pages(void);
 
 int blk_get_queue(struct request_queue *);
index 68d1fe7b877c82e2d302fb31f3702836bc6d6843..f5df23561b96d0428cfed1d26fdaa11127e46b0d 100644 (file)
@@ -219,7 +219,6 @@ int generic_cont_expand_simple(struct inode *inode, loff_t size);
 int block_commit_write(struct page *page, unsigned from, unsigned to);
 int block_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf,
                                get_block_t get_block);
-void block_sync_page(struct page *);
 sector_t generic_block_bmap(struct address_space *, sector_t, get_block_t *);
 int block_truncate_page(struct address_space *, loff_t, get_block_t *);
 int nobh_write_begin(struct address_space *, loff_t, unsigned, unsigned,
index 272496d1fae41bbb7de1db89b1131ab87283972b..e2768834f39775f4baeae6e5e9316b64184a823c 100644 (file)
@@ -285,11 +285,6 @@ void dm_table_add_target_callbacks(struct dm_table *t, struct dm_target_callback
  */
 int dm_table_complete(struct dm_table *t);
 
-/*
- * Unplug all devices in a table.
- */
-void dm_table_unplug_all(struct dm_table *t);
-
 /*
  * Table reference counting.
  */
index 8857cf9adbb711b3e5791175a2d46ea50e3547cb..ec6f72b844774b7414f69e99a48243ddbbc72af1 100644 (file)
@@ -20,7 +20,6 @@ typedef void (elevator_bio_merged_fn) (struct request_queue *,
 typedef int (elevator_dispatch_fn) (struct request_queue *, int);
 
 typedef void (elevator_add_req_fn) (struct request_queue *, struct request *);
-typedef int (elevator_queue_empty_fn) (struct request_queue *);
 typedef struct request *(elevator_request_list_fn) (struct request_queue *, struct request *);
 typedef void (elevator_completed_req_fn) (struct request_queue *, struct request *);
 typedef int (elevator_may_queue_fn) (struct request_queue *, int);
@@ -46,7 +45,6 @@ struct elevator_ops
        elevator_activate_req_fn *elevator_activate_req_fn;
        elevator_deactivate_req_fn *elevator_deactivate_req_fn;
 
-       elevator_queue_empty_fn *elevator_queue_empty_fn;
        elevator_completed_req_fn *elevator_completed_req_fn;
 
        elevator_request_list_fn *elevator_former_req_fn;
@@ -101,8 +99,8 @@ struct elevator_queue
  */
 extern void elv_dispatch_sort(struct request_queue *, struct request *);
 extern void elv_dispatch_add_tail(struct request_queue *, struct request *);
-extern void elv_add_request(struct request_queue *, struct request *, int, int);
-extern void __elv_add_request(struct request_queue *, struct request *, int, int);
+extern void elv_add_request(struct request_queue *, struct request *, int);
+extern void __elv_add_request(struct request_queue *, struct request *, int);
 extern void elv_insert(struct request_queue *, struct request *, int);
 extern int elv_merge(struct request_queue *, struct request **, struct bio *);
 extern int elv_try_merge(struct request *, struct bio *);
@@ -112,7 +110,6 @@ extern void elv_merged_request(struct request_queue *, struct request *, int);
 extern void elv_bio_merged(struct request_queue *q, struct request *,
                                struct bio *);
 extern void elv_requeue_request(struct request_queue *, struct request *);
-extern int elv_queue_empty(struct request_queue *);
 extern struct request *elv_former_request(struct request_queue *, struct request *);
 extern struct request *elv_latter_request(struct request_queue *, struct request *);
 extern int elv_register_queue(struct request_queue *q);
index bd3215940c3746ec22d8351854649f9b5b485f67..9f2cf69911b810db127c57b819f89445d015a69f 100644 (file)
@@ -583,7 +583,6 @@ typedef int (*read_actor_t)(read_descriptor_t *, struct page *,
 struct address_space_operations {
        int (*writepage)(struct page *page, struct writeback_control *wbc);
        int (*readpage)(struct file *, struct page *);
-       void (*sync_page)(struct page *);
 
        /* Write back some dirty pages from this mapping. */
        int (*writepages)(struct address_space *, struct writeback_control *);
index 9c66e994540f6d91750e6d78b30201c0156e0de9..e112b8db2f3c095a997d9901fe72b9deb770d3df 100644 (file)
@@ -298,7 +298,6 @@ static inline pgoff_t linear_page_index(struct vm_area_struct *vma,
 
 extern void __lock_page(struct page *page);
 extern int __lock_page_killable(struct page *page);
-extern void __lock_page_nosync(struct page *page);
 extern int __lock_page_or_retry(struct page *page, struct mm_struct *mm,
                                unsigned int flags);
 extern void unlock_page(struct page *page);
@@ -341,17 +340,6 @@ static inline int lock_page_killable(struct page *page)
        return 0;
 }
 
-/*
- * lock_page_nosync should only be used if we can't pin the page's inode.
- * Doesn't play quite so well with block device plugging.
- */
-static inline void lock_page_nosync(struct page *page)
-{
-       might_sleep();
-       if (!trylock_page(page))
-               __lock_page_nosync(page);
-}
-       
 /*
  * lock_page_or_retry - Lock the page, unless this would block and the
  * caller indicated that it can handle a retry.
index 4d559325d919fb5fa194d6a7e61711ae52e01192..9ee321833b21d0dc465f4e8e658f11c13c4f2b07 100644 (file)
@@ -299,8 +299,6 @@ extern void mem_cgroup_get_shmem_target(struct inode *inode, pgoff_t pgoff,
                                        struct page **pagep, swp_entry_t *ent);
 #endif
 
-extern void swap_unplug_io_fn(struct backing_dev_info *, struct page *);
-
 #ifdef CONFIG_SWAP
 /* linux/mm/page_io.c */
 extern int swap_readpage(struct page *);
index 027100d30227fead0a4010d1feb0e2791a98fcaa..c91e139a652e713e7d15a053498dc9da8cac9e2d 100644 (file)
 
 static atomic_long_t bdi_seq = ATOMIC_LONG_INIT(0);
 
-void default_unplug_io_fn(struct backing_dev_info *bdi, struct page *page)
-{
-}
-EXPORT_SYMBOL(default_unplug_io_fn);
-
 struct backing_dev_info default_backing_dev_info = {
        .name           = "default",
        .ra_pages       = VM_MAX_READAHEAD * 1024 / PAGE_CACHE_SIZE,
        .state          = 0,
        .capabilities   = BDI_CAP_MAP_COPY,
-       .unplug_io_fn   = default_unplug_io_fn,
 };
 EXPORT_SYMBOL_GPL(default_backing_dev_info);
 
index 83a45d35468b961340fb19a958eee77d0e2e2297..380776c2a9acc3378879d5d05ab8fe3842c1cc03 100644 (file)
@@ -155,45 +155,15 @@ void remove_from_page_cache(struct page *page)
 }
 EXPORT_SYMBOL(remove_from_page_cache);
 
-static int sync_page(void *word)
+static int sleep_on_page(void *word)
 {
-       struct address_space *mapping;
-       struct page *page;
-
-       page = container_of((unsigned long *)word, struct page, flags);
-
-       /*
-        * page_mapping() is being called without PG_locked held.
-        * Some knowledge of the state and use of the page is used to
-        * reduce the requirements down to a memory barrier.
-        * The danger here is of a stale page_mapping() return value
-        * indicating a struct address_space different from the one it's
-        * associated with when it is associated with one.
-        * After smp_mb(), it's either the correct page_mapping() for
-        * the page, or an old page_mapping() and the page's own
-        * page_mapping() has gone NULL.
-        * The ->sync_page() address_space operation must tolerate
-        * page_mapping() going NULL. By an amazing coincidence,
-        * this comes about because none of the users of the page
-        * in the ->sync_page() methods make essential use of the
-        * page_mapping(), merely passing the page down to the backing
-        * device's unplug functions when it's non-NULL, which in turn
-        * ignore it for all cases but swap, where only page_private(page) is
-        * of interest. When page_mapping() does go NULL, the entire
-        * call stack gracefully ignores the page and returns.
-        * -- wli
-        */
-       smp_mb();
-       mapping = page_mapping(page);
-       if (mapping && mapping->a_ops && mapping->a_ops->sync_page)
-               mapping->a_ops->sync_page(page);
        io_schedule();
        return 0;
 }
 
-static int sync_page_killable(void *word)
+static int sleep_on_page_killable(void *word)
 {
-       sync_page(word);
+       sleep_on_page(word);
        return fatal_signal_pending(current) ? -EINTR : 0;
 }
 
@@ -479,12 +449,6 @@ struct page *__page_cache_alloc(gfp_t gfp)
 EXPORT_SYMBOL(__page_cache_alloc);
 #endif
 
-static int __sleep_on_page_lock(void *word)
-{
-       io_schedule();
-       return 0;
-}
-
 /*
  * In order to wait for pages to become available there must be
  * waitqueues associated with pages. By using a hash table of
@@ -512,7 +476,7 @@ void wait_on_page_bit(struct page *page, int bit_nr)
        DEFINE_WAIT_BIT(wait, &page->flags, bit_nr);
 
        if (test_bit(bit_nr, &page->flags))
-               __wait_on_bit(page_waitqueue(page), &wait, sync_page,
+               __wait_on_bit(page_waitqueue(page), &wait, sleep_on_page,
                                                        TASK_UNINTERRUPTIBLE);
 }
 EXPORT_SYMBOL(wait_on_page_bit);
@@ -576,17 +540,12 @@ EXPORT_SYMBOL(end_page_writeback);
 /**
  * __lock_page - get a lock on the page, assuming we need to sleep to get it
  * @page: the page to lock
- *
- * Ugly. Running sync_page() in state TASK_UNINTERRUPTIBLE is scary.  If some
- * random driver's requestfn sets TASK_RUNNING, we could busywait.  However
- * chances are that on the second loop, the block layer's plug list is empty,
- * so sync_page() will then return in state TASK_UNINTERRUPTIBLE.
  */
 void __lock_page(struct page *page)
 {
        DEFINE_WAIT_BIT(wait, &page->flags, PG_locked);
 
-       __wait_on_bit_lock(page_waitqueue(page), &wait, sync_page,
+       __wait_on_bit_lock(page_waitqueue(page), &wait, sleep_on_page,
                                                        TASK_UNINTERRUPTIBLE);
 }
 EXPORT_SYMBOL(__lock_page);
@@ -596,24 +555,10 @@ int __lock_page_killable(struct page *page)
        DEFINE_WAIT_BIT(wait, &page->flags, PG_locked);
 
        return __wait_on_bit_lock(page_waitqueue(page), &wait,
-                                       sync_page_killable, TASK_KILLABLE);
+                                       sleep_on_page_killable, TASK_KILLABLE);
 }
 EXPORT_SYMBOL_GPL(__lock_page_killable);
 
-/**
- * __lock_page_nosync - get a lock on the page, without calling sync_page()
- * @page: the page to lock
- *
- * Variant of lock_page that does not require the caller to hold a reference
- * on the page's mapping.
- */
-void __lock_page_nosync(struct page *page)
-{
-       DEFINE_WAIT_BIT(wait, &page->flags, PG_locked);
-       __wait_on_bit_lock(page_waitqueue(page), &wait, __sleep_on_page_lock,
-                                                       TASK_UNINTERRUPTIBLE);
-}
-
 int __lock_page_or_retry(struct page *page, struct mm_struct *mm,
                         unsigned int flags)
 {
index 0207c2f6f8bd73466bff24c2b6d167406d6042e5..bfba796d374d403ed28e2916c65ddfa3fab845f6 100644 (file)
@@ -945,7 +945,7 @@ static int hwpoison_user_mappings(struct page *p, unsigned long pfn,
                collect_procs(ppage, &tokill);
 
        if (hpage != ppage)
-               lock_page_nosync(ppage);
+               lock_page(ppage);
 
        ret = try_to_unmap(ppage, ttu);
        if (ret != SWAP_SUCCESS)
@@ -1038,7 +1038,7 @@ int __memory_failure(unsigned long pfn, int trapno, int flags)
                         * Check "just unpoisoned", "filter hit", and
                         * "race with other subpage."
                         */
-                       lock_page_nosync(hpage);
+                       lock_page(hpage);
                        if (!PageHWPoison(hpage)
                            || (hwpoison_filter(p) && TestClearPageHWPoison(p))
                            || (p != hpage && TestSetPageHWPoison(hpage))) {
@@ -1088,7 +1088,7 @@ int __memory_failure(unsigned long pfn, int trapno, int flags)
         * It's very difficult to mess with pages currently under IO
         * and in many cases impossible, so we just avoid it here.
         */
-       lock_page_nosync(hpage);
+       lock_page(hpage);
 
        /*
         * unpoison always clear PG_hwpoison inside page lock
@@ -1231,7 +1231,7 @@ int unpoison_memory(unsigned long pfn)
                return 0;
        }
 
-       lock_page_nosync(page);
+       lock_page(page);
        /*
         * This test is racy because PG_hwpoison is set outside of page lock.
         * That's acceptable because that won't trigger kernel panic. Instead,
index f59e1424d3db650fc23617f60d060d6b68ce271b..fb6cbd6abe1688928f768176da128bb864da3a17 100644 (file)
@@ -1842,10 +1842,6 @@ int remap_vmalloc_range(struct vm_area_struct *vma, void *addr,
 }
 EXPORT_SYMBOL(remap_vmalloc_range);
 
-void swap_unplug_io_fn(struct backing_dev_info *bdi, struct page *page)
-{
-}
-
 unsigned long arch_get_unmapped_area(struct file *file, unsigned long addr,
        unsigned long len, unsigned long pgoff, unsigned long flags)
 {
index 2cb01f6ec5d019e7e431d9f18cdba6f3bde573e4..cc0ede169e41d3f3d9be3f78641ed1459fb5335a 100644 (file)
@@ -1239,7 +1239,7 @@ int set_page_dirty_lock(struct page *page)
 {
        int ret;
 
-       lock_page_nosync(page);
+       lock_page(page);
        ret = set_page_dirty(page);
        unlock_page(page);
        return ret;
index 77506a291a2d4caa90c64839896df7b9176bbaa1..cbddc3e1724671bdf7bc2d9b3d95d54af987538f 100644 (file)
@@ -554,17 +554,5 @@ page_cache_async_readahead(struct address_space *mapping,
 
        /* do read-ahead */
        ondemand_readahead(mapping, ra, filp, true, offset, req_size);
-
-#ifdef CONFIG_BLOCK
-       /*
-        * Normally the current page is !uptodate and lock_page() will be
-        * immediately called to implicitly unplug the device. However this
-        * is not always true for RAID conifgurations, where data arrives
-        * not strictly in their submission order. In this case we need to
-        * explicitly kick off the IO.
-        */
-       if (PageUptodate(page))
-               blk_run_backing_dev(mapping->backing_dev_info, NULL);
-#endif
 }
 EXPORT_SYMBOL_GPL(page_cache_async_readahead);
index 5ee67c9906022a15b566711da17b8c78e65fa16d..24d23f5bedf14619c763ded89276c651003d3060 100644 (file)
@@ -224,7 +224,6 @@ static const struct vm_operations_struct shmem_vm_ops;
 static struct backing_dev_info shmem_backing_dev_info  __read_mostly = {
        .ra_pages       = 0,    /* No readahead */
        .capabilities   = BDI_CAP_NO_ACCT_AND_WRITEBACK | BDI_CAP_SWAP_BACKED,
-       .unplug_io_fn   = default_unplug_io_fn,
 };
 
 static LIST_HEAD(shmem_swaplist);
index 5c8cfabbc9bc3abdbf7f342656ea8c58b727aae8..46680461785bef647c3618bc7d493d1909aea80f 100644 (file)
 
 /*
  * swapper_space is a fiction, retained to simplify the path through
- * vmscan's shrink_page_list, to make sync_page look nicer, and to allow
- * future use of radix_tree tags in the swap cache.
+ * vmscan's shrink_page_list.
  */
 static const struct address_space_operations swap_aops = {
        .writepage      = swap_writepage,
-       .sync_page      = block_sync_page,
        .set_page_dirty = __set_page_dirty_nobuffers,
        .migratepage    = migrate_page,
 };
@@ -37,7 +35,6 @@ static const struct address_space_operations swap_aops = {
 static struct backing_dev_info swap_backing_dev_info = {
        .name           = "swap",
        .capabilities   = BDI_CAP_NO_ACCT_AND_WRITEBACK | BDI_CAP_SWAP_BACKED,
-       .unplug_io_fn   = swap_unplug_io_fn,
 };
 
 struct address_space swapper_space = {
index 07a458d72fa880f5adc366b8acf03e610841880e..7ceea78ceb2001297cb8c63f20eca082c060a8b2 100644 (file)
@@ -94,39 +94,6 @@ __try_to_reclaim_swap(struct swap_info_struct *si, unsigned long offset)
        return ret;
 }
 
-/*
- * We need this because the bdev->unplug_fn can sleep and we cannot
- * hold swap_lock while calling the unplug_fn. And swap_lock
- * cannot be turned into a mutex.
- */
-static DECLARE_RWSEM(swap_unplug_sem);
-
-void swap_unplug_io_fn(struct backing_dev_info *unused_bdi, struct page *page)
-{
-       swp_entry_t entry;
-
-       down_read(&swap_unplug_sem);
-       entry.val = page_private(page);
-       if (PageSwapCache(page)) {
-               struct block_device *bdev = swap_info[swp_type(entry)]->bdev;
-               struct backing_dev_info *bdi;
-
-               /*
-                * If the page is removed from swapcache from under us (with a
-                * racy try_to_unuse/swapoff) we need an additional reference
-                * count to avoid reading garbage from page_private(page) above.
-                * If the WARN_ON triggers during a swapoff it maybe the race
-                * condition and it's harmless. However if it triggers without
-                * swapoff it signals a problem.
-                */
-               WARN_ON(page_count(page) <= 1);
-
-               bdi = bdev->bd_inode->i_mapping->backing_dev_info;
-               blk_run_backing_dev(bdi, page);
-       }
-       up_read(&swap_unplug_sem);
-}
-
 /*
  * swapon tell device that all the old swap contents can be discarded,
  * to allow the swap device to optimize its wear-levelling.
@@ -1643,10 +1610,6 @@ SYSCALL_DEFINE1(swapoff, const char __user *, specialfile)
                goto out_dput;
        }
 
-       /* wait for any unplug function to finish */
-       down_write(&swap_unplug_sem);
-       up_write(&swap_unplug_sem);
-
        destroy_swap_extents(p);
        if (p->flags & SWP_CONTINUED)
                free_swap_count_continuations(p);
index 17497d0cd8b9e4c95bf5ce39abf1f59d2eb15f2f..251bed73ac034d3a98e2c1ab1986ae73e15b0992 100644 (file)
@@ -358,7 +358,7 @@ static int may_write_to_queue(struct backing_dev_info *bdi,
 static void handle_write_error(struct address_space *mapping,
                                struct page *page, int error)
 {
-       lock_page_nosync(page);
+       lock_page(page);
        if (page_mapping(page) == mapping)
                mapping_set_error(mapping, error);
        unlock_page(page);