]> git.karo-electronics.de Git - linux-beck.git/blobdiff - block/blk-mq.c
blk-mq: allocate flush_rq in blk_mq_init_flush()
[linux-beck.git] / block / blk-mq.c
index 4aac82615a46fd2363d03d28007fc5cf6be53929..78bcf8bfb22a71072de9625b60628efc087c516e 100644 (file)
@@ -20,6 +20,7 @@
 #include <linux/cache.h>
 #include <linux/sched/sysctl.h>
 #include <linux/delay.h>
+#include <linux/crash_dump.h>
 
 #include <trace/events/block.h>
 
@@ -203,7 +204,6 @@ __blk_mq_alloc_request(struct blk_mq_alloc_data *data, int rw)
        if (tag != BLK_MQ_TAG_FAIL) {
                rq = data->hctx->tags->rqs[tag];
 
-               rq->cmd_flags = 0;
                if (blk_mq_tag_busy(data->hctx)) {
                        rq->cmd_flags = REQ_MQ_INFLIGHT;
                        atomic_inc(&data->hctx->nr_active);
@@ -224,9 +224,11 @@ struct request *blk_mq_alloc_request(struct request_queue *q, int rw, gfp_t gfp,
        struct blk_mq_hw_ctx *hctx;
        struct request *rq;
        struct blk_mq_alloc_data alloc_data;
+       int ret;
 
-       if (blk_mq_queue_enter(q))
-               return NULL;
+       ret = blk_mq_queue_enter(q);
+       if (ret)
+               return ERR_PTR(ret);
 
        ctx = blk_mq_get_ctx(q);
        hctx = q->mq_ops->map_queue(q, ctx->cpu);
@@ -246,6 +248,8 @@ struct request *blk_mq_alloc_request(struct request_queue *q, int rw, gfp_t gfp,
                ctx = alloc_data.ctx;
        }
        blk_mq_put_ctx(ctx);
+       if (!rq)
+               return ERR_PTR(-EWOULDBLOCK);
        return rq;
 }
 EXPORT_SYMBOL(blk_mq_alloc_request);
@@ -258,6 +262,7 @@ static void __blk_mq_free_request(struct blk_mq_hw_ctx *hctx,
 
        if (rq->cmd_flags & REQ_MQ_INFLIGHT)
                atomic_dec(&hctx->nr_active);
+       rq->cmd_flags = 0;
 
        clear_bit(REQ_ATOM_STARTED, &rq->atomic_flags);
        blk_mq_put_tag(hctx, tag, &ctx->last_tag);
@@ -296,7 +301,7 @@ void blk_mq_clone_flush_request(struct request *flush_rq,
                hctx->cmd_size);
 }
 
-inline void __blk_mq_end_io(struct request *rq, int error)
+inline void __blk_mq_end_request(struct request *rq, int error)
 {
        blk_account_io_done(rq);
 
@@ -308,15 +313,15 @@ inline void __blk_mq_end_io(struct request *rq, int error)
                blk_mq_free_request(rq);
        }
 }
-EXPORT_SYMBOL(__blk_mq_end_io);
+EXPORT_SYMBOL(__blk_mq_end_request);
 
-void blk_mq_end_io(struct request *rq, int error)
+void blk_mq_end_request(struct request *rq, int error)
 {
        if (blk_update_request(rq, error, blk_rq_bytes(rq)))
                BUG();
-       __blk_mq_end_io(rq, error);
+       __blk_mq_end_request(rq, error);
 }
-EXPORT_SYMBOL(blk_mq_end_io);
+EXPORT_SYMBOL(blk_mq_end_request);
 
 static void __blk_mq_complete_request_remote(void *data)
 {
@@ -356,7 +361,7 @@ void __blk_mq_complete_request(struct request *rq)
        struct request_queue *q = rq->q;
 
        if (!q->softirq_done_fn)
-               blk_mq_end_io(rq, rq->errors);
+               blk_mq_end_request(rq, rq->errors);
        else
                blk_mq_ipi_complete_request(rq);
 }
@@ -380,7 +385,7 @@ void blk_mq_complete_request(struct request *rq)
 }
 EXPORT_SYMBOL(blk_mq_complete_request);
 
-static void blk_mq_start_request(struct request *rq, bool last)
+void blk_mq_start_request(struct request *rq)
 {
        struct request_queue *q = rq->q;
 
@@ -392,6 +397,12 @@ static void blk_mq_start_request(struct request *rq, bool last)
 
        blk_add_timer(rq);
 
+       /*
+        * Ensure that ->deadline is visible before set the started
+        * flag and clear the completed flag.
+        */
+       smp_mb__before_atomic();
+
        /*
         * Mark us as started and clear complete. Complete might have been
         * set if requeue raced with timeout, which then marked it as
@@ -411,35 +422,24 @@ static void blk_mq_start_request(struct request *rq, bool last)
                 */
                rq->nr_phys_segments++;
        }
-
-       /*
-        * Flag the last request in the series so that drivers know when IO
-        * should be kicked off, if they don't do it on a per-request basis.
-        *
-        * Note: the flag isn't the only condition drivers should do kick off.
-        * If drive is busy, the last request might not have the bit set.
-        */
-       if (last)
-               rq->cmd_flags |= REQ_END;
 }
+EXPORT_SYMBOL(blk_mq_start_request);
 
 static void __blk_mq_requeue_request(struct request *rq)
 {
        struct request_queue *q = rq->q;
 
        trace_block_rq_requeue(q, rq);
-       clear_bit(REQ_ATOM_STARTED, &rq->atomic_flags);
 
-       rq->cmd_flags &= ~REQ_END;
-
-       if (q->dma_drain_size && blk_rq_bytes(rq))
-               rq->nr_phys_segments--;
+       if (test_and_clear_bit(REQ_ATOM_STARTED, &rq->atomic_flags)) {
+               if (q->dma_drain_size && blk_rq_bytes(rq))
+                       rq->nr_phys_segments--;
+       }
 }
 
 void blk_mq_requeue_request(struct request *rq)
 {
        __blk_mq_requeue_request(rq);
-       blk_clear_rq_complete(rq);
 
        BUG_ON(blk_queued_rq(rq));
        blk_mq_add_to_requeue_list(rq, true);
@@ -473,7 +473,11 @@ static void blk_mq_requeue_work(struct work_struct *work)
                blk_mq_insert_request(rq, false, false, false);
        }
 
-       blk_mq_run_queues(q, false);
+       /*
+        * Use the start variant of queue running here, so that running
+        * the requeue work will kick stopped queues.
+        */
+       blk_mq_start_hw_queues(q);
 }
 
 void blk_mq_add_to_requeue_list(struct request *rq, bool at_head)
@@ -522,60 +526,14 @@ struct request *blk_mq_tag_to_rq(struct blk_mq_tags *tags, unsigned int tag)
 EXPORT_SYMBOL(blk_mq_tag_to_rq);
 
 struct blk_mq_timeout_data {
-       struct blk_mq_hw_ctx *hctx;
-       unsigned long *next;
-       unsigned int *next_set;
+       unsigned long next;
+       unsigned int next_set;
 };
 
-static void blk_mq_timeout_check(void *__data, unsigned long *free_tags)
-{
-       struct blk_mq_timeout_data *data = __data;
-       struct blk_mq_hw_ctx *hctx = data->hctx;
-       unsigned int tag;
-
-        /* It may not be in flight yet (this is where
-        * the REQ_ATOMIC_STARTED flag comes in). The requests are
-        * statically allocated, so we know it's always safe to access the
-        * memory associated with a bit offset into ->rqs[].
-        */
-       tag = 0;
-       do {
-               struct request *rq;
-
-               tag = find_next_zero_bit(free_tags, hctx->tags->nr_tags, tag);
-               if (tag >= hctx->tags->nr_tags)
-                       break;
-
-               rq = blk_mq_tag_to_rq(hctx->tags, tag++);
-               if (rq->q != hctx->queue)
-                       continue;
-               if (!test_bit(REQ_ATOM_STARTED, &rq->atomic_flags))
-                       continue;
-
-               blk_rq_check_expired(rq, data->next, data->next_set);
-       } while (1);
-}
-
-static void blk_mq_hw_ctx_check_timeout(struct blk_mq_hw_ctx *hctx,
-                                       unsigned long *next,
-                                       unsigned int *next_set)
+void blk_mq_rq_timed_out(struct request *req, bool reserved)
 {
-       struct blk_mq_timeout_data data = {
-               .hctx           = hctx,
-               .next           = next,
-               .next_set       = next_set,
-       };
-
-       /*
-        * Ask the tagging code to iterate busy requests, so we can
-        * check them for timeout.
-        */
-       blk_mq_tag_busy_iter(hctx->tags, blk_mq_timeout_check, &data);
-}
-
-static enum blk_eh_timer_return blk_mq_rq_timed_out(struct request *rq)
-{
-       struct request_queue *q = rq->q;
+       struct blk_mq_ops *ops = req->q->mq_ops;
+       enum blk_eh_timer_return ret = BLK_EH_RESET_TIMER;
 
        /*
         * We know that complete is set at this point. If STARTED isn't set
@@ -586,21 +544,54 @@ static enum blk_eh_timer_return blk_mq_rq_timed_out(struct request *rq)
         * we both flags will get cleared. So check here again, and ignore
         * a timeout event with a request that isn't active.
         */
-       if (!test_bit(REQ_ATOM_STARTED, &rq->atomic_flags))
-               return BLK_EH_NOT_HANDLED;
+       if (!test_bit(REQ_ATOM_STARTED, &req->atomic_flags))
+               return;
 
-       if (!q->mq_ops->timeout)
-               return BLK_EH_RESET_TIMER;
+       if (ops->timeout)
+               ret = ops->timeout(req, reserved);
+
+       switch (ret) {
+       case BLK_EH_HANDLED:
+               __blk_mq_complete_request(req);
+               break;
+       case BLK_EH_RESET_TIMER:
+               blk_add_timer(req);
+               blk_clear_rq_complete(req);
+               break;
+       case BLK_EH_NOT_HANDLED:
+               break;
+       default:
+               printk(KERN_ERR "block: bad eh return: %d\n", ret);
+               break;
+       }
+}
+               
+static void blk_mq_check_expired(struct blk_mq_hw_ctx *hctx,
+               struct request *rq, void *priv, bool reserved)
+{
+       struct blk_mq_timeout_data *data = priv;
+
+       if (!test_bit(REQ_ATOM_STARTED, &rq->atomic_flags))
+               return;
 
-       return q->mq_ops->timeout(rq);
+       if (time_after_eq(jiffies, rq->deadline)) {
+               if (!blk_mark_rq_complete(rq))
+                       blk_mq_rq_timed_out(rq, reserved);
+       } else if (!data->next_set || time_after(data->next, rq->deadline)) {
+               data->next = rq->deadline;
+               data->next_set = 1;
+       }
 }
 
-static void blk_mq_rq_timer(unsigned long data)
+static void blk_mq_rq_timer(unsigned long priv)
 {
-       struct request_queue *q = (struct request_queue *) data;
+       struct request_queue *q = (struct request_queue *)priv;
+       struct blk_mq_timeout_data data = {
+               .next           = 0,
+               .next_set       = 0,
+       };
        struct blk_mq_hw_ctx *hctx;
-       unsigned long next = 0;
-       int i, next_set = 0;
+       int i;
 
        queue_for_each_hw_ctx(q, hctx, i) {
                /*
@@ -610,12 +601,12 @@ static void blk_mq_rq_timer(unsigned long data)
                if (!hctx->nr_ctx || !hctx->tags)
                        continue;
 
-               blk_mq_hw_ctx_check_timeout(hctx, &next, &next_set);
+               blk_mq_tag_busy_iter(hctx, blk_mq_check_expired, &data);
        }
 
-       if (next_set) {
-               next = blk_rq_timeout(round_jiffies_up(next));
-               mod_timer(&q->timeout, next);
+       if (data.next_set) {
+               data.next = blk_rq_timeout(round_jiffies_up(data.next));
+               mod_timer(&q->timeout, data.next);
        } else {
                queue_for_each_hw_ctx(q, hctx, i)
                        blk_mq_tag_idle(hctx);
@@ -741,9 +732,7 @@ static void __blk_mq_run_hw_queue(struct blk_mq_hw_ctx *hctx)
                rq = list_first_entry(&rq_list, struct request, queuelist);
                list_del_init(&rq->queuelist);
 
-               blk_mq_start_request(rq, list_empty(&rq_list));
-
-               ret = q->mq_ops->queue_rq(hctx, rq);
+               ret = q->mq_ops->queue_rq(hctx, rq, list_empty(&rq_list));
                switch (ret) {
                case BLK_MQ_RQ_QUEUE_OK:
                        queued++;
@@ -756,7 +745,7 @@ static void __blk_mq_run_hw_queue(struct blk_mq_hw_ctx *hctx)
                        pr_err("blk-mq: bad return on queue: %d\n", ret);
                case BLK_MQ_RQ_QUEUE_ERROR:
                        rq->errors = -EIO;
-                       blk_mq_end_io(rq, rq->errors);
+                       blk_mq_end_request(rq, rq->errors);
                        break;
                }
 
@@ -957,14 +946,9 @@ void blk_mq_insert_request(struct request *rq, bool at_head, bool run_queue,
 
        hctx = q->mq_ops->map_queue(q, ctx->cpu);
 
-       if (rq->cmd_flags & (REQ_FLUSH | REQ_FUA) &&
-           !(rq->cmd_flags & (REQ_FLUSH_SEQ))) {
-               blk_insert_flush(rq);
-       } else {
-               spin_lock(&ctx->lock);
-               __blk_mq_insert_request(hctx, rq, at_head);
-               spin_unlock(&ctx->lock);
-       }
+       spin_lock(&ctx->lock);
+       __blk_mq_insert_request(hctx, rq, at_head);
+       spin_unlock(&ctx->lock);
 
        if (run_queue)
                blk_mq_run_hw_queue(hctx, async);
@@ -1189,14 +1173,13 @@ static void blk_mq_make_request(struct request_queue *q, struct bio *bio)
                int ret;
 
                blk_mq_bio_to_request(rq, bio);
-               blk_mq_start_request(rq, true);
 
                /*
                 * For OK queue, we are done. For error, kill it. Any other
                 * error (busy), just add it to our list as we previously
                 * would have done
                 */
-               ret = q->mq_ops->queue_rq(data.hctx, rq);
+               ret = q->mq_ops->queue_rq(data.hctx, rq, true);
                if (ret == BLK_MQ_RQ_QUEUE_OK)
                        goto done;
                else {
@@ -1204,7 +1187,7 @@ static void blk_mq_make_request(struct request_queue *q, struct bio *bio)
 
                        if (ret == BLK_MQ_RQ_QUEUE_ERROR) {
                                rq->errors = -EIO;
-                               blk_mq_end_io(rq, rq->errors);
+                               blk_mq_end_request(rq, rq->errors);
                                goto done;
                        }
                }
@@ -1321,6 +1304,7 @@ static void blk_mq_free_rq_map(struct blk_mq_tag_set *set,
                                continue;
                        set->ops->exit_request(set->driver_data, tags->rqs[i],
                                                hctx_idx, i);
+                       tags->rqs[i] = NULL;
                }
        }
 
@@ -1354,8 +1338,9 @@ static struct blk_mq_tags *blk_mq_init_rq_map(struct blk_mq_tag_set *set,
 
        INIT_LIST_HEAD(&tags->page_list);
 
-       tags->rqs = kmalloc_node(set->queue_depth * sizeof(struct request *),
-                                       GFP_KERNEL, set->numa_node);
+       tags->rqs = kzalloc_node(set->queue_depth * sizeof(struct request *),
+                                GFP_KERNEL | __GFP_NOWARN | __GFP_NORETRY,
+                                set->numa_node);
        if (!tags->rqs) {
                blk_mq_free_tags(tags);
                return NULL;
@@ -1379,8 +1364,9 @@ static struct blk_mq_tags *blk_mq_init_rq_map(struct blk_mq_tag_set *set,
                        this_order--;
 
                do {
-                       page = alloc_pages_node(set->numa_node, GFP_KERNEL,
-                                               this_order);
+                       page = alloc_pages_node(set->numa_node,
+                               GFP_KERNEL | __GFP_NOWARN | __GFP_NORETRY,
+                               this_order);
                        if (page)
                                break;
                        if (!this_order--)
@@ -1401,11 +1387,15 @@ static struct blk_mq_tags *blk_mq_init_rq_map(struct blk_mq_tag_set *set,
                left -= to_do * rq_size;
                for (j = 0; j < to_do; j++) {
                        tags->rqs[i] = p;
+                       tags->rqs[i]->atomic_flags = 0;
+                       tags->rqs[i]->cmd_flags = 0;
                        if (set->ops->init_request) {
                                if (set->ops->init_request(set->driver_data,
                                                tags->rqs[i], hctx_idx, i,
-                                               set->numa_node))
+                                               set->numa_node)) {
+                                       tags->rqs[i] = NULL;
                                        goto fail;
+                               }
                        }
 
                        p += rq_size;
@@ -1416,7 +1406,6 @@ static struct blk_mq_tags *blk_mq_init_rq_map(struct blk_mq_tag_set *set,
        return tags;
 
 fail:
-       pr_warn("%s: failed to allocate requests\n", __func__);
        blk_mq_free_rq_map(set, tags, hctx_idx);
        return NULL;
 }
@@ -1520,6 +1509,20 @@ static int blk_mq_hctx_notify(void *data, unsigned long action,
        return NOTIFY_OK;
 }
 
+static void blk_mq_exit_hctx(struct request_queue *q,
+               struct blk_mq_tag_set *set,
+               struct blk_mq_hw_ctx *hctx, unsigned int hctx_idx)
+{
+       blk_mq_tag_idle(hctx);
+
+       if (set->ops->exit_hctx)
+               set->ops->exit_hctx(hctx, hctx_idx);
+
+       blk_mq_unregister_cpu_notifier(&hctx->cpu_notifier);
+       kfree(hctx->ctxs);
+       blk_mq_free_bitmap(&hctx->ctx_map);
+}
+
 static void blk_mq_exit_hw_queues(struct request_queue *q,
                struct blk_mq_tag_set *set, int nr_queue)
 {
@@ -1529,17 +1532,8 @@ static void blk_mq_exit_hw_queues(struct request_queue *q,
        queue_for_each_hw_ctx(q, hctx, i) {
                if (i == nr_queue)
                        break;
-
-               blk_mq_tag_idle(hctx);
-
-               if (set->ops->exit_hctx)
-                       set->ops->exit_hctx(hctx, i);
-
-               blk_mq_unregister_cpu_notifier(&hctx->cpu_notifier);
-               kfree(hctx->ctxs);
-               blk_mq_free_bitmap(&hctx->ctx_map);
+               blk_mq_exit_hctx(q, set, hctx, i);
        }
-
 }
 
 static void blk_mq_free_hw_queues(struct request_queue *q,
@@ -1554,53 +1548,72 @@ static void blk_mq_free_hw_queues(struct request_queue *q,
        }
 }
 
-static int blk_mq_init_hw_queues(struct request_queue *q,
-               struct blk_mq_tag_set *set)
+static int blk_mq_init_hctx(struct request_queue *q,
+               struct blk_mq_tag_set *set,
+               struct blk_mq_hw_ctx *hctx, unsigned hctx_idx)
 {
-       struct blk_mq_hw_ctx *hctx;
-       unsigned int i;
+       int node;
+
+       node = hctx->numa_node;
+       if (node == NUMA_NO_NODE)
+               node = hctx->numa_node = set->numa_node;
+
+       INIT_DELAYED_WORK(&hctx->run_work, blk_mq_run_work_fn);
+       INIT_DELAYED_WORK(&hctx->delay_work, blk_mq_delay_work_fn);
+       spin_lock_init(&hctx->lock);
+       INIT_LIST_HEAD(&hctx->dispatch);
+       hctx->queue = q;
+       hctx->queue_num = hctx_idx;
+       hctx->flags = set->flags;
+       hctx->cmd_size = set->cmd_size;
+
+       blk_mq_init_cpu_notifier(&hctx->cpu_notifier,
+                                       blk_mq_hctx_notify, hctx);
+       blk_mq_register_cpu_notifier(&hctx->cpu_notifier);
+
+       hctx->tags = set->tags[hctx_idx];
 
        /*
-        * Initialize hardware queues
+        * Allocate space for all possible cpus to avoid allocation at
+        * runtime
         */
-       queue_for_each_hw_ctx(q, hctx, i) {
-               int node;
+       hctx->ctxs = kmalloc_node(nr_cpu_ids * sizeof(void *),
+                                       GFP_KERNEL, node);
+       if (!hctx->ctxs)
+               goto unregister_cpu_notifier;
 
-               node = hctx->numa_node;
-               if (node == NUMA_NO_NODE)
-                       node = hctx->numa_node = set->numa_node;
+       if (blk_mq_alloc_bitmap(&hctx->ctx_map, node))
+               goto free_ctxs;
 
-               INIT_DELAYED_WORK(&hctx->run_work, blk_mq_run_work_fn);
-               INIT_DELAYED_WORK(&hctx->delay_work, blk_mq_delay_work_fn);
-               spin_lock_init(&hctx->lock);
-               INIT_LIST_HEAD(&hctx->dispatch);
-               hctx->queue = q;
-               hctx->queue_num = i;
-               hctx->flags = set->flags;
-               hctx->cmd_size = set->cmd_size;
+       hctx->nr_ctx = 0;
 
-               blk_mq_init_cpu_notifier(&hctx->cpu_notifier,
-                                               blk_mq_hctx_notify, hctx);
-               blk_mq_register_cpu_notifier(&hctx->cpu_notifier);
+       if (set->ops->init_hctx &&
+           set->ops->init_hctx(hctx, set->driver_data, hctx_idx))
+               goto free_bitmap;
 
-               hctx->tags = set->tags[i];
+       return 0;
 
-               /*
-                * Allocate space for all possible cpus to avoid allocation at
-                * runtime
-                */
-               hctx->ctxs = kmalloc_node(nr_cpu_ids * sizeof(void *),
-                                               GFP_KERNEL, node);
-               if (!hctx->ctxs)
-                       break;
+ free_bitmap:
+       blk_mq_free_bitmap(&hctx->ctx_map);
+ free_ctxs:
+       kfree(hctx->ctxs);
+ unregister_cpu_notifier:
+       blk_mq_unregister_cpu_notifier(&hctx->cpu_notifier);
 
-               if (blk_mq_alloc_bitmap(&hctx->ctx_map, node))
-                       break;
+       return -1;
+}
 
-               hctx->nr_ctx = 0;
+static int blk_mq_init_hw_queues(struct request_queue *q,
+               struct blk_mq_tag_set *set)
+{
+       struct blk_mq_hw_ctx *hctx;
+       unsigned int i;
 
-               if (set->ops->init_hctx &&
-                   set->ops->init_hctx(hctx, set->driver_data, i))
+       /*
+        * Initialize hardware queues
+        */
+       queue_for_each_hw_ctx(q, hctx, i) {
+               if (blk_mq_init_hctx(q, set, hctx, i))
                        break;
        }
 
@@ -1754,6 +1767,16 @@ struct request_queue *blk_mq_init_queue(struct blk_mq_tag_set *set)
        if (!ctx)
                return ERR_PTR(-ENOMEM);
 
+       /*
+        * If a crashdump is active, then we are potentially in a very
+        * memory constrained environment. Limit us to 1 queue and
+        * 64 tags to prevent using too much memory.
+        */
+       if (is_kdump_kernel()) {
+               set->nr_hw_queues = 1;
+               set->queue_depth = min(64U, set->queue_depth);
+       }
+
        hctxs = kmalloc_node(set->nr_hw_queues * sizeof(*hctxs), GFP_KERNEL,
                        set->numa_node);
 
@@ -1814,7 +1837,6 @@ struct request_queue *blk_mq_init_queue(struct blk_mq_tag_set *set)
        else
                blk_queue_make_request(q, blk_sq_make_request);
 
-       blk_queue_rq_timed_out(q, blk_mq_rq_timed_out);
        if (set->timeout)
                blk_queue_rq_timeout(q, set->timeout);
 
@@ -1826,17 +1848,10 @@ struct request_queue *blk_mq_init_queue(struct blk_mq_tag_set *set)
        if (set->ops->complete)
                blk_queue_softirq_done(q, set->ops->complete);
 
-       blk_mq_init_flush(q);
        blk_mq_init_cpu_queues(q, set->nr_hw_queues);
 
-       q->flush_rq = kzalloc(round_up(sizeof(struct request) +
-                               set->cmd_size, cache_line_size()),
-                               GFP_KERNEL);
-       if (!q->flush_rq)
-               goto err_hw;
-
        if (blk_mq_init_hw_queues(q, set))
-               goto err_flush_rq;
+               goto err_hw;
 
        mutex_lock(&all_q_mutex);
        list_add_tail(&q->all_q_node, &all_q_list);
@@ -1844,12 +1859,15 @@ struct request_queue *blk_mq_init_queue(struct blk_mq_tag_set *set)
 
        blk_mq_add_queue_tag_set(set, q);
 
+       if (blk_mq_init_flush(q))
+               goto err_hw_queues;
+
        blk_mq_map_swqueue(q);
 
        return q;
 
-err_flush_rq:
-       kfree(q->flush_rq);
+err_hw_queues:
+       blk_mq_exit_hw_queues(q, set, set->nr_hw_queues);
 err_hw:
        blk_cleanup_queue(q);
 err_hctxs:
@@ -1936,6 +1954,60 @@ static int blk_mq_queue_reinit_notify(struct notifier_block *nb,
        return NOTIFY_OK;
 }
 
+static int __blk_mq_alloc_rq_maps(struct blk_mq_tag_set *set)
+{
+       int i;
+
+       for (i = 0; i < set->nr_hw_queues; i++) {
+               set->tags[i] = blk_mq_init_rq_map(set, i);
+               if (!set->tags[i])
+                       goto out_unwind;
+       }
+
+       return 0;
+
+out_unwind:
+       while (--i >= 0)
+               blk_mq_free_rq_map(set, set->tags[i], i);
+
+       return -ENOMEM;
+}
+
+/*
+ * Allocate the request maps associated with this tag_set. Note that this
+ * may reduce the depth asked for, if memory is tight. set->queue_depth
+ * will be updated to reflect the allocated depth.
+ */
+static int blk_mq_alloc_rq_maps(struct blk_mq_tag_set *set)
+{
+       unsigned int depth;
+       int err;
+
+       depth = set->queue_depth;
+       do {
+               err = __blk_mq_alloc_rq_maps(set);
+               if (!err)
+                       break;
+
+               set->queue_depth >>= 1;
+               if (set->queue_depth < set->reserved_tags + BLK_MQ_TAG_MIN) {
+                       err = -ENOMEM;
+                       break;
+               }
+       } while (set->queue_depth);
+
+       if (!set->queue_depth || err) {
+               pr_err("blk-mq: failed to allocate request map\n");
+               return -ENOMEM;
+       }
+
+       if (depth != set->queue_depth)
+               pr_info("blk-mq: reduced tag depth (%u -> %u)\n",
+                                               depth, set->queue_depth);
+
+       return 0;
+}
+
 /*
  * Alloc a tag set to be associated with one or more request queues.
  * May fail with EINVAL for various error conditions. May adjust the
@@ -1944,8 +2016,6 @@ static int blk_mq_queue_reinit_notify(struct notifier_block *nb,
  */
 int blk_mq_alloc_tag_set(struct blk_mq_tag_set *set)
 {
-       int i;
-
        if (!set->nr_hw_queues)
                return -EINVAL;
        if (!set->queue_depth)
@@ -1966,23 +2036,18 @@ int blk_mq_alloc_tag_set(struct blk_mq_tag_set *set)
                                 sizeof(struct blk_mq_tags *),
                                 GFP_KERNEL, set->numa_node);
        if (!set->tags)
-               goto out;
+               return -ENOMEM;
 
-       for (i = 0; i < set->nr_hw_queues; i++) {
-               set->tags[i] = blk_mq_init_rq_map(set, i);
-               if (!set->tags[i])
-                       goto out_unwind;
-       }
+       if (blk_mq_alloc_rq_maps(set))
+               goto enomem;
 
        mutex_init(&set->tag_list_lock);
        INIT_LIST_HEAD(&set->tag_list);
 
        return 0;
-
-out_unwind:
-       while (--i >= 0)
-               blk_mq_free_rq_map(set, set->tags[i], i);
-out:
+enomem:
+       kfree(set->tags);
+       set->tags = NULL;
        return -ENOMEM;
 }
 EXPORT_SYMBOL(blk_mq_alloc_tag_set);
@@ -1997,6 +2062,7 @@ void blk_mq_free_tag_set(struct blk_mq_tag_set *set)
        }
 
        kfree(set->tags);
+       set->tags = NULL;
 }
 EXPORT_SYMBOL(blk_mq_free_tag_set);