]> git.karo-electronics.de Git - linux-beck.git/blobdiff - block/blk-mq.c
blk-mq: allocate flush_rq in blk_mq_init_flush()
[linux-beck.git] / block / blk-mq.c
index 1713686f5c2f1a1b6aee6d01acfa18f718ae51d3..78bcf8bfb22a71072de9625b60628efc087c516e 100644 (file)
@@ -20,6 +20,7 @@
 #include <linux/cache.h>
 #include <linux/sched/sysctl.h>
 #include <linux/delay.h>
+#include <linux/crash_dump.h>
 
 #include <trace/events/block.h>
 
@@ -439,7 +440,6 @@ static void __blk_mq_requeue_request(struct request *rq)
 void blk_mq_requeue_request(struct request *rq)
 {
        __blk_mq_requeue_request(rq);
-       blk_clear_rq_complete(rq);
 
        BUG_ON(blk_queued_rq(rq));
        blk_mq_add_to_requeue_list(rq, true);
@@ -526,60 +526,14 @@ struct request *blk_mq_tag_to_rq(struct blk_mq_tags *tags, unsigned int tag)
 EXPORT_SYMBOL(blk_mq_tag_to_rq);
 
 struct blk_mq_timeout_data {
-       struct blk_mq_hw_ctx *hctx;
-       unsigned long *next;
-       unsigned int *next_set;
+       unsigned long next;
+       unsigned int next_set;
 };
 
-static void blk_mq_timeout_check(void *__data, unsigned long *free_tags)
+void blk_mq_rq_timed_out(struct request *req, bool reserved)
 {
-       struct blk_mq_timeout_data *data = __data;
-       struct blk_mq_hw_ctx *hctx = data->hctx;
-       unsigned int tag;
-
-        /* It may not be in flight yet (this is where
-        * the REQ_ATOMIC_STARTED flag comes in). The requests are
-        * statically allocated, so we know it's always safe to access the
-        * memory associated with a bit offset into ->rqs[].
-        */
-       tag = 0;
-       do {
-               struct request *rq;
-
-               tag = find_next_zero_bit(free_tags, hctx->tags->nr_tags, tag);
-               if (tag >= hctx->tags->nr_tags)
-                       break;
-
-               rq = blk_mq_tag_to_rq(hctx->tags, tag++);
-               if (rq->q != hctx->queue)
-                       continue;
-               if (!test_bit(REQ_ATOM_STARTED, &rq->atomic_flags))
-                       continue;
-
-               blk_rq_check_expired(rq, data->next, data->next_set);
-       } while (1);
-}
-
-static void blk_mq_hw_ctx_check_timeout(struct blk_mq_hw_ctx *hctx,
-                                       unsigned long *next,
-                                       unsigned int *next_set)
-{
-       struct blk_mq_timeout_data data = {
-               .hctx           = hctx,
-               .next           = next,
-               .next_set       = next_set,
-       };
-
-       /*
-        * Ask the tagging code to iterate busy requests, so we can
-        * check them for timeout.
-        */
-       blk_mq_tag_busy_iter(hctx->tags, blk_mq_timeout_check, &data);
-}
-
-static enum blk_eh_timer_return blk_mq_rq_timed_out(struct request *rq)
-{
-       struct request_queue *q = rq->q;
+       struct blk_mq_ops *ops = req->q->mq_ops;
+       enum blk_eh_timer_return ret = BLK_EH_RESET_TIMER;
 
        /*
         * We know that complete is set at this point. If STARTED isn't set
@@ -590,21 +544,54 @@ static enum blk_eh_timer_return blk_mq_rq_timed_out(struct request *rq)
         * we both flags will get cleared. So check here again, and ignore
         * a timeout event with a request that isn't active.
         */
-       if (!test_bit(REQ_ATOM_STARTED, &rq->atomic_flags))
-               return BLK_EH_NOT_HANDLED;
+       if (!test_bit(REQ_ATOM_STARTED, &req->atomic_flags))
+               return;
 
-       if (!q->mq_ops->timeout)
-               return BLK_EH_RESET_TIMER;
+       if (ops->timeout)
+               ret = ops->timeout(req, reserved);
+
+       switch (ret) {
+       case BLK_EH_HANDLED:
+               __blk_mq_complete_request(req);
+               break;
+       case BLK_EH_RESET_TIMER:
+               blk_add_timer(req);
+               blk_clear_rq_complete(req);
+               break;
+       case BLK_EH_NOT_HANDLED:
+               break;
+       default:
+               printk(KERN_ERR "block: bad eh return: %d\n", ret);
+               break;
+       }
+}
+               
+static void blk_mq_check_expired(struct blk_mq_hw_ctx *hctx,
+               struct request *rq, void *priv, bool reserved)
+{
+       struct blk_mq_timeout_data *data = priv;
+
+       if (!test_bit(REQ_ATOM_STARTED, &rq->atomic_flags))
+               return;
 
-       return q->mq_ops->timeout(rq);
+       if (time_after_eq(jiffies, rq->deadline)) {
+               if (!blk_mark_rq_complete(rq))
+                       blk_mq_rq_timed_out(rq, reserved);
+       } else if (!data->next_set || time_after(data->next, rq->deadline)) {
+               data->next = rq->deadline;
+               data->next_set = 1;
+       }
 }
 
-static void blk_mq_rq_timer(unsigned long data)
+static void blk_mq_rq_timer(unsigned long priv)
 {
-       struct request_queue *q = (struct request_queue *) data;
+       struct request_queue *q = (struct request_queue *)priv;
+       struct blk_mq_timeout_data data = {
+               .next           = 0,
+               .next_set       = 0,
+       };
        struct blk_mq_hw_ctx *hctx;
-       unsigned long next = 0;
-       int i, next_set = 0;
+       int i;
 
        queue_for_each_hw_ctx(q, hctx, i) {
                /*
@@ -614,12 +601,12 @@ static void blk_mq_rq_timer(unsigned long data)
                if (!hctx->nr_ctx || !hctx->tags)
                        continue;
 
-               blk_mq_hw_ctx_check_timeout(hctx, &next, &next_set);
+               blk_mq_tag_busy_iter(hctx, blk_mq_check_expired, &data);
        }
 
-       if (next_set) {
-               next = blk_rq_timeout(round_jiffies_up(next));
-               mod_timer(&q->timeout, next);
+       if (data.next_set) {
+               data.next = blk_rq_timeout(round_jiffies_up(data.next));
+               mod_timer(&q->timeout, data.next);
        } else {
                queue_for_each_hw_ctx(q, hctx, i)
                        blk_mq_tag_idle(hctx);
@@ -1522,6 +1509,20 @@ static int blk_mq_hctx_notify(void *data, unsigned long action,
        return NOTIFY_OK;
 }
 
+static void blk_mq_exit_hctx(struct request_queue *q,
+               struct blk_mq_tag_set *set,
+               struct blk_mq_hw_ctx *hctx, unsigned int hctx_idx)
+{
+       blk_mq_tag_idle(hctx);
+
+       if (set->ops->exit_hctx)
+               set->ops->exit_hctx(hctx, hctx_idx);
+
+       blk_mq_unregister_cpu_notifier(&hctx->cpu_notifier);
+       kfree(hctx->ctxs);
+       blk_mq_free_bitmap(&hctx->ctx_map);
+}
+
 static void blk_mq_exit_hw_queues(struct request_queue *q,
                struct blk_mq_tag_set *set, int nr_queue)
 {
@@ -1531,17 +1532,8 @@ static void blk_mq_exit_hw_queues(struct request_queue *q,
        queue_for_each_hw_ctx(q, hctx, i) {
                if (i == nr_queue)
                        break;
-
-               blk_mq_tag_idle(hctx);
-
-               if (set->ops->exit_hctx)
-                       set->ops->exit_hctx(hctx, i);
-
-               blk_mq_unregister_cpu_notifier(&hctx->cpu_notifier);
-               kfree(hctx->ctxs);
-               blk_mq_free_bitmap(&hctx->ctx_map);
+               blk_mq_exit_hctx(q, set, hctx, i);
        }
-
 }
 
 static void blk_mq_free_hw_queues(struct request_queue *q,
@@ -1556,53 +1548,72 @@ static void blk_mq_free_hw_queues(struct request_queue *q,
        }
 }
 
-static int blk_mq_init_hw_queues(struct request_queue *q,
-               struct blk_mq_tag_set *set)
+static int blk_mq_init_hctx(struct request_queue *q,
+               struct blk_mq_tag_set *set,
+               struct blk_mq_hw_ctx *hctx, unsigned hctx_idx)
 {
-       struct blk_mq_hw_ctx *hctx;
-       unsigned int i;
+       int node;
+
+       node = hctx->numa_node;
+       if (node == NUMA_NO_NODE)
+               node = hctx->numa_node = set->numa_node;
+
+       INIT_DELAYED_WORK(&hctx->run_work, blk_mq_run_work_fn);
+       INIT_DELAYED_WORK(&hctx->delay_work, blk_mq_delay_work_fn);
+       spin_lock_init(&hctx->lock);
+       INIT_LIST_HEAD(&hctx->dispatch);
+       hctx->queue = q;
+       hctx->queue_num = hctx_idx;
+       hctx->flags = set->flags;
+       hctx->cmd_size = set->cmd_size;
+
+       blk_mq_init_cpu_notifier(&hctx->cpu_notifier,
+                                       blk_mq_hctx_notify, hctx);
+       blk_mq_register_cpu_notifier(&hctx->cpu_notifier);
+
+       hctx->tags = set->tags[hctx_idx];
 
        /*
-        * Initialize hardware queues
+        * Allocate space for all possible cpus to avoid allocation at
+        * runtime
         */
-       queue_for_each_hw_ctx(q, hctx, i) {
-               int node;
+       hctx->ctxs = kmalloc_node(nr_cpu_ids * sizeof(void *),
+                                       GFP_KERNEL, node);
+       if (!hctx->ctxs)
+               goto unregister_cpu_notifier;
 
-               node = hctx->numa_node;
-               if (node == NUMA_NO_NODE)
-                       node = hctx->numa_node = set->numa_node;
+       if (blk_mq_alloc_bitmap(&hctx->ctx_map, node))
+               goto free_ctxs;
 
-               INIT_DELAYED_WORK(&hctx->run_work, blk_mq_run_work_fn);
-               INIT_DELAYED_WORK(&hctx->delay_work, blk_mq_delay_work_fn);
-               spin_lock_init(&hctx->lock);
-               INIT_LIST_HEAD(&hctx->dispatch);
-               hctx->queue = q;
-               hctx->queue_num = i;
-               hctx->flags = set->flags;
-               hctx->cmd_size = set->cmd_size;
+       hctx->nr_ctx = 0;
 
-               blk_mq_init_cpu_notifier(&hctx->cpu_notifier,
-                                               blk_mq_hctx_notify, hctx);
-               blk_mq_register_cpu_notifier(&hctx->cpu_notifier);
+       if (set->ops->init_hctx &&
+           set->ops->init_hctx(hctx, set->driver_data, hctx_idx))
+               goto free_bitmap;
 
-               hctx->tags = set->tags[i];
+       return 0;
 
-               /*
-                * Allocate space for all possible cpus to avoid allocation at
-                * runtime
-                */
-               hctx->ctxs = kmalloc_node(nr_cpu_ids * sizeof(void *),
-                                               GFP_KERNEL, node);
-               if (!hctx->ctxs)
-                       break;
+ free_bitmap:
+       blk_mq_free_bitmap(&hctx->ctx_map);
+ free_ctxs:
+       kfree(hctx->ctxs);
+ unregister_cpu_notifier:
+       blk_mq_unregister_cpu_notifier(&hctx->cpu_notifier);
 
-               if (blk_mq_alloc_bitmap(&hctx->ctx_map, node))
-                       break;
+       return -1;
+}
 
-               hctx->nr_ctx = 0;
+static int blk_mq_init_hw_queues(struct request_queue *q,
+               struct blk_mq_tag_set *set)
+{
+       struct blk_mq_hw_ctx *hctx;
+       unsigned int i;
 
-               if (set->ops->init_hctx &&
-                   set->ops->init_hctx(hctx, set->driver_data, i))
+       /*
+        * Initialize hardware queues
+        */
+       queue_for_each_hw_ctx(q, hctx, i) {
+               if (blk_mq_init_hctx(q, set, hctx, i))
                        break;
        }
 
@@ -1756,6 +1767,16 @@ struct request_queue *blk_mq_init_queue(struct blk_mq_tag_set *set)
        if (!ctx)
                return ERR_PTR(-ENOMEM);
 
+       /*
+        * If a crashdump is active, then we are potentially in a very
+        * memory constrained environment. Limit us to 1 queue and
+        * 64 tags to prevent using too much memory.
+        */
+       if (is_kdump_kernel()) {
+               set->nr_hw_queues = 1;
+               set->queue_depth = min(64U, set->queue_depth);
+       }
+
        hctxs = kmalloc_node(set->nr_hw_queues * sizeof(*hctxs), GFP_KERNEL,
                        set->numa_node);
 
@@ -1816,7 +1837,6 @@ struct request_queue *blk_mq_init_queue(struct blk_mq_tag_set *set)
        else
                blk_queue_make_request(q, blk_sq_make_request);
 
-       blk_queue_rq_timed_out(q, blk_mq_rq_timed_out);
        if (set->timeout)
                blk_queue_rq_timeout(q, set->timeout);
 
@@ -1828,17 +1848,10 @@ struct request_queue *blk_mq_init_queue(struct blk_mq_tag_set *set)
        if (set->ops->complete)
                blk_queue_softirq_done(q, set->ops->complete);
 
-       blk_mq_init_flush(q);
        blk_mq_init_cpu_queues(q, set->nr_hw_queues);
 
-       q->flush_rq = kzalloc(round_up(sizeof(struct request) +
-                               set->cmd_size, cache_line_size()),
-                               GFP_KERNEL);
-       if (!q->flush_rq)
-               goto err_hw;
-
        if (blk_mq_init_hw_queues(q, set))
-               goto err_flush_rq;
+               goto err_hw;
 
        mutex_lock(&all_q_mutex);
        list_add_tail(&q->all_q_node, &all_q_list);
@@ -1846,12 +1859,15 @@ struct request_queue *blk_mq_init_queue(struct blk_mq_tag_set *set)
 
        blk_mq_add_queue_tag_set(set, q);
 
+       if (blk_mq_init_flush(q))
+               goto err_hw_queues;
+
        blk_mq_map_swqueue(q);
 
        return q;
 
-err_flush_rq:
-       kfree(q->flush_rq);
+err_hw_queues:
+       blk_mq_exit_hw_queues(q, set, set->nr_hw_queues);
 err_hw:
        blk_cleanup_queue(q);
 err_hctxs: