]> git.karo-electronics.de Git - karo-tx-linux.git/blobdiff - block/blk-ioc.c
afs: remove the second argument of k[un]map_atomic()
[karo-tx-linux.git] / block / blk-ioc.c
index 27a06e00eaec4312c5596ad58ebaa7fcc8c438ef..fb95dd2f889a6071d85e334d7d5d9342ca56ab75 100644 (file)
@@ -29,21 +29,6 @@ void get_io_context(struct io_context *ioc)
 }
 EXPORT_SYMBOL(get_io_context);
 
-/*
- * Releasing ioc may nest into another put_io_context() leading to nested
- * fast path release.  As the ioc's can't be the same, this is okay but
- * makes lockdep whine.  Keep track of nesting and use it as subclass.
- */
-#ifdef CONFIG_LOCKDEP
-#define ioc_release_depth(q)           ((q) ? (q)->ioc_release_depth : 0)
-#define ioc_release_depth_inc(q)       (q)->ioc_release_depth++
-#define ioc_release_depth_dec(q)       (q)->ioc_release_depth--
-#else
-#define ioc_release_depth(q)           0
-#define ioc_release_depth_inc(q)       do { } while (0)
-#define ioc_release_depth_dec(q)       do { } while (0)
-#endif
-
 static void icq_free_icq_rcu(struct rcu_head *head)
 {
        struct io_cq *icq = container_of(head, struct io_cq, __rcu_head);
@@ -51,10 +36,22 @@ static void icq_free_icq_rcu(struct rcu_head *head)
        kmem_cache_free(icq->__rcu_icq_cache, icq);
 }
 
-/*
- * Exit and free an icq.  Called with both ioc and q locked.
- */
+/* Exit an icq. Called with both ioc and q locked. */
 static void ioc_exit_icq(struct io_cq *icq)
+{
+       struct elevator_type *et = icq->q->elevator->type;
+
+       if (icq->flags & ICQ_EXITED)
+               return;
+
+       if (et->ops.elevator_exit_icq_fn)
+               et->ops.elevator_exit_icq_fn(icq);
+
+       icq->flags |= ICQ_EXITED;
+}
+
+/* Release an icq.  Called with both ioc and q locked. */
+static void ioc_destroy_icq(struct io_cq *icq)
 {
        struct io_context *ioc = icq->ioc;
        struct request_queue *q = icq->q;
@@ -75,11 +72,7 @@ static void ioc_exit_icq(struct io_cq *icq)
        if (rcu_dereference_raw(ioc->icq_hint) == icq)
                rcu_assign_pointer(ioc->icq_hint, NULL);
 
-       if (et->ops.elevator_exit_icq_fn) {
-               ioc_release_depth_inc(q);
-               et->ops.elevator_exit_icq_fn(icq);
-               ioc_release_depth_dec(q);
-       }
+       ioc_exit_icq(icq);
 
        /*
         * @icq->q might have gone away by the time RCU callback runs
@@ -97,51 +90,32 @@ static void ioc_release_fn(struct work_struct *work)
 {
        struct io_context *ioc = container_of(work, struct io_context,
                                              release_work);
-       struct request_queue *last_q = NULL;
+       unsigned long flags;
 
-       spin_lock_irq(&ioc->lock);
+       /*
+        * Exiting icq may call into put_io_context() through elevator
+        * which will trigger lockdep warning.  The ioc's are guaranteed to
+        * be different, use a different locking subclass here.  Use
+        * irqsave variant as there's no spin_lock_irq_nested().
+        */
+       spin_lock_irqsave_nested(&ioc->lock, flags, 1);
 
        while (!hlist_empty(&ioc->icq_list)) {
                struct io_cq *icq = hlist_entry(ioc->icq_list.first,
                                                struct io_cq, ioc_node);
-               struct request_queue *this_q = icq->q;
-
-               if (this_q != last_q) {
-                       /*
-                        * Need to switch to @this_q.  Once we release
-                        * @ioc->lock, it can go away along with @cic.
-                        * Hold on to it.
-                        */
-                       __blk_get_queue(this_q);
-
-                       /*
-                        * blk_put_queue() might sleep thanks to kobject
-                        * idiocy.  Always release both locks, put and
-                        * restart.
-                        */
-                       if (last_q) {
-                               spin_unlock(last_q->queue_lock);
-                               spin_unlock_irq(&ioc->lock);
-                               blk_put_queue(last_q);
-                       } else {
-                               spin_unlock_irq(&ioc->lock);
-                       }
-
-                       last_q = this_q;
-                       spin_lock_irq(this_q->queue_lock);
-                       spin_lock(&ioc->lock);
-                       continue;
+               struct request_queue *q = icq->q;
+
+               if (spin_trylock(q->queue_lock)) {
+                       ioc_destroy_icq(icq);
+                       spin_unlock(q->queue_lock);
+               } else {
+                       spin_unlock_irqrestore(&ioc->lock, flags);
+                       cpu_relax();
+                       spin_lock_irqsave_nested(&ioc->lock, flags, 1);
                }
-               ioc_exit_icq(icq);
        }
 
-       if (last_q) {
-               spin_unlock(last_q->queue_lock);
-               spin_unlock_irq(&ioc->lock);
-               blk_put_queue(last_q);
-       } else {
-               spin_unlock_irq(&ioc->lock);
-       }
+       spin_unlock_irqrestore(&ioc->lock, flags);
 
        kmem_cache_free(iocontext_cachep, ioc);
 }
@@ -149,79 +123,35 @@ static void ioc_release_fn(struct work_struct *work)
 /**
  * put_io_context - put a reference of io_context
  * @ioc: io_context to put
- * @locked_q: request_queue the caller is holding queue_lock of (hint)
  *
  * Decrement reference count of @ioc and release it if the count reaches
- * zero.  If the caller is holding queue_lock of a queue, it can indicate
- * that with @locked_q.  This is an optimization hint and the caller is
- * allowed to pass in %NULL even when it's holding a queue_lock.
+ * zero.
  */
-void put_io_context(struct io_context *ioc, struct request_queue *locked_q)
+void put_io_context(struct io_context *ioc)
 {
-       struct request_queue *last_q = locked_q;
        unsigned long flags;
+       bool free_ioc = false;
 
        if (ioc == NULL)
                return;
 
        BUG_ON(atomic_long_read(&ioc->refcount) <= 0);
-       if (locked_q)
-               lockdep_assert_held(locked_q->queue_lock);
-
-       if (!atomic_long_dec_and_test(&ioc->refcount))
-               return;
 
        /*
-        * Destroy @ioc.  This is a bit messy because icq's are chained
-        * from both ioc and queue, and ioc->lock nests inside queue_lock.
-        * The inner ioc->lock should be held to walk our icq_list and then
-        * for each icq the outer matching queue_lock should be grabbed.
-        * ie. We need to do reverse-order double lock dancing.
-        *
-        * Another twist is that we are often called with one of the
-        * matching queue_locks held as indicated by @locked_q, which
-        * prevents performing double-lock dance for other queues.
-        *
-        * So, we do it in two stages.  The fast path uses the queue_lock
-        * the caller is holding and, if other queues need to be accessed,
-        * uses trylock to avoid introducing locking dependency.  This can
-        * handle most cases, especially if @ioc was performing IO on only
-        * single device.
-        *
-        * If trylock doesn't cut it, we defer to @ioc->release_work which
-        * can do all the double-locking dancing.
+        * Releasing ioc requires reverse order double locking and we may
+        * already be holding a queue_lock.  Do it asynchronously from wq.
         */
-       spin_lock_irqsave_nested(&ioc->lock, flags,
-                                ioc_release_depth(locked_q));
-
-       while (!hlist_empty(&ioc->icq_list)) {
-               struct io_cq *icq = hlist_entry(ioc->icq_list.first,
-                                               struct io_cq, ioc_node);
-               struct request_queue *this_q = icq->q;
-
-               if (this_q != last_q) {
-                       if (last_q && last_q != locked_q)
-                               spin_unlock(last_q->queue_lock);
-                       last_q = NULL;
-
-                       if (!spin_trylock(this_q->queue_lock))
-                               break;
-                       last_q = this_q;
-                       continue;
-               }
-               ioc_exit_icq(icq);
+       if (atomic_long_dec_and_test(&ioc->refcount)) {
+               spin_lock_irqsave(&ioc->lock, flags);
+               if (!hlist_empty(&ioc->icq_list))
+                       schedule_work(&ioc->release_work);
+               else
+                       free_ioc = true;
+               spin_unlock_irqrestore(&ioc->lock, flags);
        }
 
-       if (last_q && last_q != locked_q)
-               spin_unlock(last_q->queue_lock);
-
-       spin_unlock_irqrestore(&ioc->lock, flags);
-
-       /* if no icq is left, we're done; otherwise, kick release_work */
-       if (hlist_empty(&ioc->icq_list))
+       if (free_ioc)
                kmem_cache_free(iocontext_cachep, ioc);
-       else
-               schedule_work(&ioc->release_work);
 }
 EXPORT_SYMBOL(put_io_context);
 
@@ -229,14 +159,42 @@ EXPORT_SYMBOL(put_io_context);
 void exit_io_context(struct task_struct *task)
 {
        struct io_context *ioc;
+       struct io_cq *icq;
+       struct hlist_node *n;
+       unsigned long flags;
 
        task_lock(task);
        ioc = task->io_context;
        task->io_context = NULL;
        task_unlock(task);
 
-       atomic_dec(&ioc->nr_tasks);
-       put_io_context(ioc, NULL);
+       if (!atomic_dec_and_test(&ioc->nr_tasks)) {
+               put_io_context(ioc);
+               return;
+       }
+
+       /*
+        * Need ioc lock to walk icq_list and q lock to exit icq.  Perform
+        * reverse double locking.  Read comment in ioc_release_fn() for
+        * explanation on the nested locking annotation.
+        */
+retry:
+       spin_lock_irqsave_nested(&ioc->lock, flags, 1);
+       hlist_for_each_entry(icq, n, &ioc->icq_list, ioc_node) {
+               if (icq->flags & ICQ_EXITED)
+                       continue;
+               if (spin_trylock(icq->q->queue_lock)) {
+                       ioc_exit_icq(icq);
+                       spin_unlock(icq->q->queue_lock);
+               } else {
+                       spin_unlock_irqrestore(&ioc->lock, flags);
+                       cpu_relax();
+                       goto retry;
+               }
+       }
+       spin_unlock_irqrestore(&ioc->lock, flags);
+
+       put_io_context(ioc);
 }
 
 /**
@@ -255,7 +213,7 @@ void ioc_clear_queue(struct request_queue *q)
                struct io_context *ioc = icq->ioc;
 
                spin_lock(&ioc->lock);
-               ioc_exit_icq(icq);
+               ioc_destroy_icq(icq);
                spin_unlock(&ioc->lock);
        }
 }
@@ -424,13 +382,13 @@ struct io_cq *ioc_create_icq(struct request_queue *q, gfp_t gfp_mask)
        return icq;
 }
 
-void ioc_set_changed(struct io_context *ioc, int which)
+void ioc_set_icq_flags(struct io_context *ioc, unsigned int flags)
 {
        struct io_cq *icq;
        struct hlist_node *n;
 
        hlist_for_each_entry(icq, n, &ioc->icq_list, ioc_node)
-               set_bit(which, &icq->changed);
+               icq->flags |= flags;
 }
 
 /**
@@ -448,7 +406,7 @@ void ioc_ioprio_changed(struct io_context *ioc, int ioprio)
 
        spin_lock_irqsave(&ioc->lock, flags);
        ioc->ioprio = ioprio;
-       ioc_set_changed(ioc, ICQ_IOPRIO_CHANGED);
+       ioc_set_icq_flags(ioc, ICQ_IOPRIO_CHANGED);
        spin_unlock_irqrestore(&ioc->lock, flags);
 }
 
@@ -465,11 +423,33 @@ void ioc_cgroup_changed(struct io_context *ioc)
        unsigned long flags;
 
        spin_lock_irqsave(&ioc->lock, flags);
-       ioc_set_changed(ioc, ICQ_CGROUP_CHANGED);
+       ioc_set_icq_flags(ioc, ICQ_CGROUP_CHANGED);
        spin_unlock_irqrestore(&ioc->lock, flags);
 }
 EXPORT_SYMBOL(ioc_cgroup_changed);
 
+/**
+ * icq_get_changed - fetch and clear icq changed mask
+ * @icq: icq of interest
+ *
+ * Fetch and clear ICQ_*_CHANGED bits from @icq.  Grabs and releases
+ * @icq->ioc->lock.
+ */
+unsigned icq_get_changed(struct io_cq *icq)
+{
+       unsigned int changed = 0;
+       unsigned long flags;
+
+       if (unlikely(icq->flags & ICQ_CHANGED_MASK)) {
+               spin_lock_irqsave(&icq->ioc->lock, flags);
+               changed = icq->flags & ICQ_CHANGED_MASK;
+               icq->flags &= ~ICQ_CHANGED_MASK;
+               spin_unlock_irqrestore(&icq->ioc->lock, flags);
+       }
+       return changed;
+}
+EXPORT_SYMBOL(icq_get_changed);
+
 static int __init blk_ioc_init(void)
 {
        iocontext_cachep = kmem_cache_create("blkdev_ioc",