]> git.karo-electronics.de Git - karo-tx-linux.git/commitdiff
block: strip out locking optimization in put_io_context()
authorTejun Heo <tj@kernel.org>
Tue, 7 Feb 2012 06:51:30 +0000 (07:51 +0100)
committerJens Axboe <axboe@kernel.dk>
Tue, 7 Feb 2012 06:51:30 +0000 (07:51 +0100)
put_io_context() performed a complex trylock dancing to avoid
deferring ioc release to workqueue.  It was also broken on UP because
trylock was always assumed to succeed which resulted in unbalanced
preemption count.

While there are ways to fix the UP breakage, even the most
pathological microbench (forced ioc allocation and tight fork/exit
loop) fails to show any appreciable performance benefit of the
optimization.  Strip it out.  If there turns out to be workloads which
are affected by this change, simpler optimization from the discussion
thread can be applied later.

Signed-off-by: Tejun Heo <tj@kernel.org>
LKML-Reference: <1328514611.21268.66.camel@sli10-conroe>
Signed-off-by: Jens Axboe <axboe@kernel.dk>
block/blk-cgroup.c
block/blk-core.c
block/blk-ioc.c
block/cfq-iosched.c
fs/ioprio.c
include/linux/blkdev.h
include/linux/iocontext.h
kernel/fork.c

index fa8f26309444d2cdda41cae813cf6f5a70f1de06..75642a352a8f40595069f0833a8e7cef131eca02 100644 (file)
@@ -1659,7 +1659,7 @@ static void blkiocg_attach(struct cgroup_subsys *ss, struct cgroup *cgrp,
                ioc = get_task_io_context(task, GFP_ATOMIC, NUMA_NO_NODE);
                if (ioc) {
                        ioc_cgroup_changed(ioc);
-                       put_io_context(ioc, NULL);
+                       put_io_context(ioc);
                }
        }
 }
index 6367025751184904e571fa791ee8259266282301..532b3a21b38306125f6f1ef87724d1698e9a4035 100644 (file)
@@ -642,7 +642,7 @@ static inline void blk_free_request(struct request_queue *q, struct request *rq)
        if (rq->cmd_flags & REQ_ELVPRIV) {
                elv_put_request(q, rq);
                if (rq->elv.icq)
-                       put_io_context(rq->elv.icq->ioc, q);
+                       put_io_context(rq->elv.icq->ioc);
        }
 
        mempool_free(rq, q->rq.rq_pool);
index 7490b6da24539d84c646901d7ff9b4e905bbbec2..9884fd7427fef7658f01b0d5cb6af129d9ecb645 100644 (file)
@@ -29,21 +29,6 @@ void get_io_context(struct io_context *ioc)
 }
 EXPORT_SYMBOL(get_io_context);
 
-/*
- * Releasing ioc may nest into another put_io_context() leading to nested
- * fast path release.  As the ioc's can't be the same, this is okay but
- * makes lockdep whine.  Keep track of nesting and use it as subclass.
- */
-#ifdef CONFIG_LOCKDEP
-#define ioc_release_depth(q)           ((q) ? (q)->ioc_release_depth : 0)
-#define ioc_release_depth_inc(q)       (q)->ioc_release_depth++
-#define ioc_release_depth_dec(q)       (q)->ioc_release_depth--
-#else
-#define ioc_release_depth(q)           0
-#define ioc_release_depth_inc(q)       do { } while (0)
-#define ioc_release_depth_dec(q)       do { } while (0)
-#endif
-
 static void icq_free_icq_rcu(struct rcu_head *head)
 {
        struct io_cq *icq = container_of(head, struct io_cq, __rcu_head);
@@ -75,11 +60,8 @@ static void ioc_exit_icq(struct io_cq *icq)
        if (rcu_dereference_raw(ioc->icq_hint) == icq)
                rcu_assign_pointer(ioc->icq_hint, NULL);
 
-       if (et->ops.elevator_exit_icq_fn) {
-               ioc_release_depth_inc(q);
+       if (et->ops.elevator_exit_icq_fn)
                et->ops.elevator_exit_icq_fn(icq);
-               ioc_release_depth_dec(q);
-       }
 
        /*
         * @icq->q might have gone away by the time RCU callback runs
@@ -149,81 +131,29 @@ static void ioc_release_fn(struct work_struct *work)
 /**
  * put_io_context - put a reference of io_context
  * @ioc: io_context to put
- * @locked_q: request_queue the caller is holding queue_lock of (hint)
  *
  * Decrement reference count of @ioc and release it if the count reaches
- * zero.  If the caller is holding queue_lock of a queue, it can indicate
- * that with @locked_q.  This is an optimization hint and the caller is
- * allowed to pass in %NULL even when it's holding a queue_lock.
+ * zero.
  */
-void put_io_context(struct io_context *ioc, struct request_queue *locked_q)
+void put_io_context(struct io_context *ioc)
 {
-       struct request_queue *last_q = locked_q;
        unsigned long flags;
 
        if (ioc == NULL)
                return;
 
        BUG_ON(atomic_long_read(&ioc->refcount) <= 0);
-       if (locked_q)
-               lockdep_assert_held(locked_q->queue_lock);
-
-       if (!atomic_long_dec_and_test(&ioc->refcount))
-               return;
 
        /*
-        * Destroy @ioc.  This is a bit messy because icq's are chained
-        * from both ioc and queue, and ioc->lock nests inside queue_lock.
-        * The inner ioc->lock should be held to walk our icq_list and then
-        * for each icq the outer matching queue_lock should be grabbed.
-        * ie. We need to do reverse-order double lock dancing.
-        *
-        * Another twist is that we are often called with one of the
-        * matching queue_locks held as indicated by @locked_q, which
-        * prevents performing double-lock dance for other queues.
-        *
-        * So, we do it in two stages.  The fast path uses the queue_lock
-        * the caller is holding and, if other queues need to be accessed,
-        * uses trylock to avoid introducing locking dependency.  This can
-        * handle most cases, especially if @ioc was performing IO on only
-        * single device.
-        *
-        * If trylock doesn't cut it, we defer to @ioc->release_work which
-        * can do all the double-locking dancing.
+        * Releasing ioc requires reverse order double locking and we may
+        * already be holding a queue_lock.  Do it asynchronously from wq.
         */
-       spin_lock_irqsave_nested(&ioc->lock, flags,
-                                ioc_release_depth(locked_q));
-
-       while (!hlist_empty(&ioc->icq_list)) {
-               struct io_cq *icq = hlist_entry(ioc->icq_list.first,
-                                               struct io_cq, ioc_node);
-               struct request_queue *this_q = icq->q;
-
-               if (this_q != last_q) {
-                       if (last_q && last_q != locked_q)
-                               spin_unlock(last_q->queue_lock);
-                       last_q = NULL;
-
-                       /* spin_trylock() always successes in UP case */
-                       if (this_q != locked_q &&
-                           !spin_trylock(this_q->queue_lock))
-                               break;
-                       last_q = this_q;
-                       continue;
-               }
-               ioc_exit_icq(icq);
+       if (atomic_long_dec_and_test(&ioc->refcount)) {
+               spin_lock_irqsave(&ioc->lock, flags);
+               if (!hlist_empty(&ioc->icq_list))
+                       schedule_work(&ioc->release_work);
+               spin_unlock_irqrestore(&ioc->lock, flags);
        }
-
-       if (last_q && last_q != locked_q)
-               spin_unlock(last_q->queue_lock);
-
-       spin_unlock_irqrestore(&ioc->lock, flags);
-
-       /* if no icq is left, we're done; otherwise, kick release_work */
-       if (hlist_empty(&ioc->icq_list))
-               kmem_cache_free(iocontext_cachep, ioc);
-       else
-               schedule_work(&ioc->release_work);
 }
 EXPORT_SYMBOL(put_io_context);
 
@@ -238,7 +168,7 @@ void exit_io_context(struct task_struct *task)
        task_unlock(task);
 
        atomic_dec(&ioc->nr_tasks);
-       put_io_context(ioc, NULL);
+       put_io_context(ioc);
 }
 
 /**
index da21c24dbed37f12e53195ea62fef3f6f13c33f9..5684df6848bc75a2017d572cc15978349d65dd8c 100644 (file)
@@ -1794,7 +1794,7 @@ __cfq_slice_expired(struct cfq_data *cfqd, struct cfq_queue *cfqq,
                cfqd->active_queue = NULL;
 
        if (cfqd->active_cic) {
-               put_io_context(cfqd->active_cic->icq.ioc, cfqd->queue);
+               put_io_context(cfqd->active_cic->icq.ioc);
                cfqd->active_cic = NULL;
        }
 }
index f84b380d65e5d1a1898248bdb7a77dd3bc8b5aa4..0f1b9515213b14e5f4c2e8606efea4ff1a8c2fc4 100644 (file)
@@ -51,7 +51,7 @@ int set_task_ioprio(struct task_struct *task, int ioprio)
        ioc = get_task_io_context(task, GFP_ATOMIC, NUMA_NO_NODE);
        if (ioc) {
                ioc_ioprio_changed(ioc, ioprio);
-               put_io_context(ioc, NULL);
+               put_io_context(ioc);
        }
 
        return err;
index 6c6a1f008065984821435a50f12fd4c08d17fe42..606cf339bb561f6b526c2a58ddcf1378c8376bf1 100644 (file)
@@ -399,9 +399,6 @@ struct request_queue {
        /* Throttle data */
        struct throtl_data *td;
 #endif
-#ifdef CONFIG_LOCKDEP
-       int                     ioc_release_depth;
-#endif
 };
 
 #define QUEUE_FLAG_QUEUED      1       /* uses generic tag queueing */
index 7e1371c4bccf93143e6234e15b11924b4375274f..119773eebe3144a46925c8f616a70049b745d8f9 100644 (file)
@@ -133,7 +133,7 @@ static inline struct io_context *ioc_task_link(struct io_context *ioc)
 
 struct task_struct;
 #ifdef CONFIG_BLOCK
-void put_io_context(struct io_context *ioc, struct request_queue *locked_q);
+void put_io_context(struct io_context *ioc);
 void exit_io_context(struct task_struct *task);
 struct io_context *get_task_io_context(struct task_struct *task,
                                       gfp_t gfp_flags, int node);
@@ -141,8 +141,7 @@ void ioc_ioprio_changed(struct io_context *ioc, int ioprio);
 void ioc_cgroup_changed(struct io_context *ioc);
 #else
 struct io_context;
-static inline void put_io_context(struct io_context *ioc,
-                                 struct request_queue *locked_q) { }
+static inline void put_io_context(struct io_context *ioc) { }
 static inline void exit_io_context(struct task_struct *task) { }
 #endif
 
index 051f090d40c1230462d4b4d53c05133879805f08..c574aefa8d1ba68fe96181b0a4c2afd00efc9004 100644 (file)
@@ -890,7 +890,7 @@ static int copy_io(unsigned long clone_flags, struct task_struct *tsk)
                        return -ENOMEM;
 
                new_ioc->ioprio = ioc->ioprio;
-               put_io_context(new_ioc, NULL);
+               put_io_context(new_ioc);
        }
 #endif
        return 0;