]> git.karo-electronics.de Git - karo-tx-linux.git/blob - block/blk-mq.c
Merge tag 'gfs2-4.11.fixes' of git://git.kernel.org/pub/scm/linux/kernel/git/gfs2...
[karo-tx-linux.git] / block / blk-mq.c
1 /*
2  * Block multiqueue core code
3  *
4  * Copyright (C) 2013-2014 Jens Axboe
5  * Copyright (C) 2013-2014 Christoph Hellwig
6  */
7 #include <linux/kernel.h>
8 #include <linux/module.h>
9 #include <linux/backing-dev.h>
10 #include <linux/bio.h>
11 #include <linux/blkdev.h>
12 #include <linux/kmemleak.h>
13 #include <linux/mm.h>
14 #include <linux/init.h>
15 #include <linux/slab.h>
16 #include <linux/workqueue.h>
17 #include <linux/smp.h>
18 #include <linux/llist.h>
19 #include <linux/list_sort.h>
20 #include <linux/cpu.h>
21 #include <linux/cache.h>
22 #include <linux/sched/sysctl.h>
23 #include <linux/delay.h>
24 #include <linux/crash_dump.h>
25 #include <linux/prefetch.h>
26
27 #include <trace/events/block.h>
28
29 #include <linux/blk-mq.h>
30 #include "blk.h"
31 #include "blk-mq.h"
32 #include "blk-mq-tag.h"
33 #include "blk-stat.h"
34 #include "blk-wbt.h"
35
36 static DEFINE_MUTEX(all_q_mutex);
37 static LIST_HEAD(all_q_list);
38
39 /*
40  * Check if any of the ctx's have pending work in this hardware queue
41  */
42 static bool blk_mq_hctx_has_pending(struct blk_mq_hw_ctx *hctx)
43 {
44         return sbitmap_any_bit_set(&hctx->ctx_map);
45 }
46
47 /*
48  * Mark this ctx as having pending work in this hardware queue
49  */
50 static void blk_mq_hctx_mark_pending(struct blk_mq_hw_ctx *hctx,
51                                      struct blk_mq_ctx *ctx)
52 {
53         if (!sbitmap_test_bit(&hctx->ctx_map, ctx->index_hw))
54                 sbitmap_set_bit(&hctx->ctx_map, ctx->index_hw);
55 }
56
57 static void blk_mq_hctx_clear_pending(struct blk_mq_hw_ctx *hctx,
58                                       struct blk_mq_ctx *ctx)
59 {
60         sbitmap_clear_bit(&hctx->ctx_map, ctx->index_hw);
61 }
62
63 void blk_mq_freeze_queue_start(struct request_queue *q)
64 {
65         int freeze_depth;
66
67         freeze_depth = atomic_inc_return(&q->mq_freeze_depth);
68         if (freeze_depth == 1) {
69                 percpu_ref_kill(&q->q_usage_counter);
70                 blk_mq_run_hw_queues(q, false);
71         }
72 }
73 EXPORT_SYMBOL_GPL(blk_mq_freeze_queue_start);
74
75 static void blk_mq_freeze_queue_wait(struct request_queue *q)
76 {
77         wait_event(q->mq_freeze_wq, percpu_ref_is_zero(&q->q_usage_counter));
78 }
79
80 /*
81  * Guarantee no request is in use, so we can change any data structure of
82  * the queue afterward.
83  */
84 void blk_freeze_queue(struct request_queue *q)
85 {
86         /*
87          * In the !blk_mq case we are only calling this to kill the
88          * q_usage_counter, otherwise this increases the freeze depth
89          * and waits for it to return to zero.  For this reason there is
90          * no blk_unfreeze_queue(), and blk_freeze_queue() is not
91          * exported to drivers as the only user for unfreeze is blk_mq.
92          */
93         blk_mq_freeze_queue_start(q);
94         blk_mq_freeze_queue_wait(q);
95 }
96
97 void blk_mq_freeze_queue(struct request_queue *q)
98 {
99         /*
100          * ...just an alias to keep freeze and unfreeze actions balanced
101          * in the blk_mq_* namespace
102          */
103         blk_freeze_queue(q);
104 }
105 EXPORT_SYMBOL_GPL(blk_mq_freeze_queue);
106
107 void blk_mq_unfreeze_queue(struct request_queue *q)
108 {
109         int freeze_depth;
110
111         freeze_depth = atomic_dec_return(&q->mq_freeze_depth);
112         WARN_ON_ONCE(freeze_depth < 0);
113         if (!freeze_depth) {
114                 percpu_ref_reinit(&q->q_usage_counter);
115                 wake_up_all(&q->mq_freeze_wq);
116         }
117 }
118 EXPORT_SYMBOL_GPL(blk_mq_unfreeze_queue);
119
120 /**
121  * blk_mq_quiesce_queue() - wait until all ongoing queue_rq calls have finished
122  * @q: request queue.
123  *
124  * Note: this function does not prevent that the struct request end_io()
125  * callback function is invoked. Additionally, it is not prevented that
126  * new queue_rq() calls occur unless the queue has been stopped first.
127  */
128 void blk_mq_quiesce_queue(struct request_queue *q)
129 {
130         struct blk_mq_hw_ctx *hctx;
131         unsigned int i;
132         bool rcu = false;
133
134         blk_mq_stop_hw_queues(q);
135
136         queue_for_each_hw_ctx(q, hctx, i) {
137                 if (hctx->flags & BLK_MQ_F_BLOCKING)
138                         synchronize_srcu(&hctx->queue_rq_srcu);
139                 else
140                         rcu = true;
141         }
142         if (rcu)
143                 synchronize_rcu();
144 }
145 EXPORT_SYMBOL_GPL(blk_mq_quiesce_queue);
146
147 void blk_mq_wake_waiters(struct request_queue *q)
148 {
149         struct blk_mq_hw_ctx *hctx;
150         unsigned int i;
151
152         queue_for_each_hw_ctx(q, hctx, i)
153                 if (blk_mq_hw_queue_mapped(hctx))
154                         blk_mq_tag_wakeup_all(hctx->tags, true);
155
156         /*
157          * If we are called because the queue has now been marked as
158          * dying, we need to ensure that processes currently waiting on
159          * the queue are notified as well.
160          */
161         wake_up_all(&q->mq_freeze_wq);
162 }
163
164 bool blk_mq_can_queue(struct blk_mq_hw_ctx *hctx)
165 {
166         return blk_mq_has_free_tags(hctx->tags);
167 }
168 EXPORT_SYMBOL(blk_mq_can_queue);
169
170 static void blk_mq_rq_ctx_init(struct request_queue *q, struct blk_mq_ctx *ctx,
171                                struct request *rq, unsigned int op)
172 {
173         INIT_LIST_HEAD(&rq->queuelist);
174         /* csd/requeue_work/fifo_time is initialized before use */
175         rq->q = q;
176         rq->mq_ctx = ctx;
177         rq->cmd_flags = op;
178         if (blk_queue_io_stat(q))
179                 rq->rq_flags |= RQF_IO_STAT;
180         /* do not touch atomic flags, it needs atomic ops against the timer */
181         rq->cpu = -1;
182         INIT_HLIST_NODE(&rq->hash);
183         RB_CLEAR_NODE(&rq->rb_node);
184         rq->rq_disk = NULL;
185         rq->part = NULL;
186         rq->start_time = jiffies;
187 #ifdef CONFIG_BLK_CGROUP
188         rq->rl = NULL;
189         set_start_time_ns(rq);
190         rq->io_start_time_ns = 0;
191 #endif
192         rq->nr_phys_segments = 0;
193 #if defined(CONFIG_BLK_DEV_INTEGRITY)
194         rq->nr_integrity_segments = 0;
195 #endif
196         rq->special = NULL;
197         /* tag was already set */
198         rq->errors = 0;
199
200         rq->cmd = rq->__cmd;
201
202         rq->extra_len = 0;
203         rq->sense_len = 0;
204         rq->resid_len = 0;
205         rq->sense = NULL;
206
207         INIT_LIST_HEAD(&rq->timeout_list);
208         rq->timeout = 0;
209
210         rq->end_io = NULL;
211         rq->end_io_data = NULL;
212         rq->next_rq = NULL;
213
214         ctx->rq_dispatched[op_is_sync(op)]++;
215 }
216
217 static struct request *
218 __blk_mq_alloc_request(struct blk_mq_alloc_data *data, unsigned int op)
219 {
220         struct request *rq;
221         unsigned int tag;
222
223         tag = blk_mq_get_tag(data);
224         if (tag != BLK_MQ_TAG_FAIL) {
225                 rq = data->hctx->tags->rqs[tag];
226
227                 if (blk_mq_tag_busy(data->hctx)) {
228                         rq->rq_flags = RQF_MQ_INFLIGHT;
229                         atomic_inc(&data->hctx->nr_active);
230                 }
231
232                 rq->tag = tag;
233                 blk_mq_rq_ctx_init(data->q, data->ctx, rq, op);
234                 return rq;
235         }
236
237         return NULL;
238 }
239
240 struct request *blk_mq_alloc_request(struct request_queue *q, int rw,
241                 unsigned int flags)
242 {
243         struct blk_mq_ctx *ctx;
244         struct blk_mq_hw_ctx *hctx;
245         struct request *rq;
246         struct blk_mq_alloc_data alloc_data;
247         int ret;
248
249         ret = blk_queue_enter(q, flags & BLK_MQ_REQ_NOWAIT);
250         if (ret)
251                 return ERR_PTR(ret);
252
253         ctx = blk_mq_get_ctx(q);
254         hctx = blk_mq_map_queue(q, ctx->cpu);
255         blk_mq_set_alloc_data(&alloc_data, q, flags, ctx, hctx);
256         rq = __blk_mq_alloc_request(&alloc_data, rw);
257         blk_mq_put_ctx(ctx);
258
259         if (!rq) {
260                 blk_queue_exit(q);
261                 return ERR_PTR(-EWOULDBLOCK);
262         }
263
264         rq->__data_len = 0;
265         rq->__sector = (sector_t) -1;
266         rq->bio = rq->biotail = NULL;
267         return rq;
268 }
269 EXPORT_SYMBOL(blk_mq_alloc_request);
270
271 struct request *blk_mq_alloc_request_hctx(struct request_queue *q, int rw,
272                 unsigned int flags, unsigned int hctx_idx)
273 {
274         struct blk_mq_hw_ctx *hctx;
275         struct blk_mq_ctx *ctx;
276         struct request *rq;
277         struct blk_mq_alloc_data alloc_data;
278         int ret;
279
280         /*
281          * If the tag allocator sleeps we could get an allocation for a
282          * different hardware context.  No need to complicate the low level
283          * allocator for this for the rare use case of a command tied to
284          * a specific queue.
285          */
286         if (WARN_ON_ONCE(!(flags & BLK_MQ_REQ_NOWAIT)))
287                 return ERR_PTR(-EINVAL);
288
289         if (hctx_idx >= q->nr_hw_queues)
290                 return ERR_PTR(-EIO);
291
292         ret = blk_queue_enter(q, true);
293         if (ret)
294                 return ERR_PTR(ret);
295
296         /*
297          * Check if the hardware context is actually mapped to anything.
298          * If not tell the caller that it should skip this queue.
299          */
300         hctx = q->queue_hw_ctx[hctx_idx];
301         if (!blk_mq_hw_queue_mapped(hctx)) {
302                 ret = -EXDEV;
303                 goto out_queue_exit;
304         }
305         ctx = __blk_mq_get_ctx(q, cpumask_first(hctx->cpumask));
306
307         blk_mq_set_alloc_data(&alloc_data, q, flags, ctx, hctx);
308         rq = __blk_mq_alloc_request(&alloc_data, rw);
309         if (!rq) {
310                 ret = -EWOULDBLOCK;
311                 goto out_queue_exit;
312         }
313
314         return rq;
315
316 out_queue_exit:
317         blk_queue_exit(q);
318         return ERR_PTR(ret);
319 }
320 EXPORT_SYMBOL_GPL(blk_mq_alloc_request_hctx);
321
322 static void __blk_mq_free_request(struct blk_mq_hw_ctx *hctx,
323                                   struct blk_mq_ctx *ctx, struct request *rq)
324 {
325         const int tag = rq->tag;
326         struct request_queue *q = rq->q;
327
328         if (rq->rq_flags & RQF_MQ_INFLIGHT)
329                 atomic_dec(&hctx->nr_active);
330
331         wbt_done(q->rq_wb, &rq->issue_stat);
332         rq->rq_flags = 0;
333
334         clear_bit(REQ_ATOM_STARTED, &rq->atomic_flags);
335         clear_bit(REQ_ATOM_POLL_SLEPT, &rq->atomic_flags);
336         blk_mq_put_tag(hctx, ctx, tag);
337         blk_queue_exit(q);
338 }
339
340 void blk_mq_free_hctx_request(struct blk_mq_hw_ctx *hctx, struct request *rq)
341 {
342         struct blk_mq_ctx *ctx = rq->mq_ctx;
343
344         ctx->rq_completed[rq_is_sync(rq)]++;
345         __blk_mq_free_request(hctx, ctx, rq);
346
347 }
348 EXPORT_SYMBOL_GPL(blk_mq_free_hctx_request);
349
350 void blk_mq_free_request(struct request *rq)
351 {
352         blk_mq_free_hctx_request(blk_mq_map_queue(rq->q, rq->mq_ctx->cpu), rq);
353 }
354 EXPORT_SYMBOL_GPL(blk_mq_free_request);
355
356 inline void __blk_mq_end_request(struct request *rq, int error)
357 {
358         blk_account_io_done(rq);
359
360         if (rq->end_io) {
361                 wbt_done(rq->q->rq_wb, &rq->issue_stat);
362                 rq->end_io(rq, error);
363         } else {
364                 if (unlikely(blk_bidi_rq(rq)))
365                         blk_mq_free_request(rq->next_rq);
366                 blk_mq_free_request(rq);
367         }
368 }
369 EXPORT_SYMBOL(__blk_mq_end_request);
370
371 void blk_mq_end_request(struct request *rq, int error)
372 {
373         if (blk_update_request(rq, error, blk_rq_bytes(rq)))
374                 BUG();
375         __blk_mq_end_request(rq, error);
376 }
377 EXPORT_SYMBOL(blk_mq_end_request);
378
379 static void __blk_mq_complete_request_remote(void *data)
380 {
381         struct request *rq = data;
382
383         rq->q->softirq_done_fn(rq);
384 }
385
386 static void blk_mq_ipi_complete_request(struct request *rq)
387 {
388         struct blk_mq_ctx *ctx = rq->mq_ctx;
389         bool shared = false;
390         int cpu;
391
392         if (!test_bit(QUEUE_FLAG_SAME_COMP, &rq->q->queue_flags)) {
393                 rq->q->softirq_done_fn(rq);
394                 return;
395         }
396
397         cpu = get_cpu();
398         if (!test_bit(QUEUE_FLAG_SAME_FORCE, &rq->q->queue_flags))
399                 shared = cpus_share_cache(cpu, ctx->cpu);
400
401         if (cpu != ctx->cpu && !shared && cpu_online(ctx->cpu)) {
402                 rq->csd.func = __blk_mq_complete_request_remote;
403                 rq->csd.info = rq;
404                 rq->csd.flags = 0;
405                 smp_call_function_single_async(ctx->cpu, &rq->csd);
406         } else {
407                 rq->q->softirq_done_fn(rq);
408         }
409         put_cpu();
410 }
411
412 static void blk_mq_stat_add(struct request *rq)
413 {
414         if (rq->rq_flags & RQF_STATS) {
415                 /*
416                  * We could rq->mq_ctx here, but there's less of a risk
417                  * of races if we have the completion event add the stats
418                  * to the local software queue.
419                  */
420                 struct blk_mq_ctx *ctx;
421
422                 ctx = __blk_mq_get_ctx(rq->q, raw_smp_processor_id());
423                 blk_stat_add(&ctx->stat[rq_data_dir(rq)], rq);
424         }
425 }
426
427 static void __blk_mq_complete_request(struct request *rq)
428 {
429         struct request_queue *q = rq->q;
430
431         blk_mq_stat_add(rq);
432
433         if (!q->softirq_done_fn)
434                 blk_mq_end_request(rq, rq->errors);
435         else
436                 blk_mq_ipi_complete_request(rq);
437 }
438
439 /**
440  * blk_mq_complete_request - end I/O on a request
441  * @rq:         the request being processed
442  *
443  * Description:
444  *      Ends all I/O on a request. It does not handle partial completions.
445  *      The actual completion happens out-of-order, through a IPI handler.
446  **/
447 void blk_mq_complete_request(struct request *rq, int error)
448 {
449         struct request_queue *q = rq->q;
450
451         if (unlikely(blk_should_fake_timeout(q)))
452                 return;
453         if (!blk_mark_rq_complete(rq)) {
454                 rq->errors = error;
455                 __blk_mq_complete_request(rq);
456         }
457 }
458 EXPORT_SYMBOL(blk_mq_complete_request);
459
460 int blk_mq_request_started(struct request *rq)
461 {
462         return test_bit(REQ_ATOM_STARTED, &rq->atomic_flags);
463 }
464 EXPORT_SYMBOL_GPL(blk_mq_request_started);
465
466 void blk_mq_start_request(struct request *rq)
467 {
468         struct request_queue *q = rq->q;
469
470         trace_block_rq_issue(q, rq);
471
472         rq->resid_len = blk_rq_bytes(rq);
473         if (unlikely(blk_bidi_rq(rq)))
474                 rq->next_rq->resid_len = blk_rq_bytes(rq->next_rq);
475
476         if (test_bit(QUEUE_FLAG_STATS, &q->queue_flags)) {
477                 blk_stat_set_issue_time(&rq->issue_stat);
478                 rq->rq_flags |= RQF_STATS;
479                 wbt_issue(q->rq_wb, &rq->issue_stat);
480         }
481
482         blk_add_timer(rq);
483
484         /*
485          * Ensure that ->deadline is visible before set the started
486          * flag and clear the completed flag.
487          */
488         smp_mb__before_atomic();
489
490         /*
491          * Mark us as started and clear complete. Complete might have been
492          * set if requeue raced with timeout, which then marked it as
493          * complete. So be sure to clear complete again when we start
494          * the request, otherwise we'll ignore the completion event.
495          */
496         if (!test_bit(REQ_ATOM_STARTED, &rq->atomic_flags))
497                 set_bit(REQ_ATOM_STARTED, &rq->atomic_flags);
498         if (test_bit(REQ_ATOM_COMPLETE, &rq->atomic_flags))
499                 clear_bit(REQ_ATOM_COMPLETE, &rq->atomic_flags);
500
501         if (q->dma_drain_size && blk_rq_bytes(rq)) {
502                 /*
503                  * Make sure space for the drain appears.  We know we can do
504                  * this because max_hw_segments has been adjusted to be one
505                  * fewer than the device can handle.
506                  */
507                 rq->nr_phys_segments++;
508         }
509 }
510 EXPORT_SYMBOL(blk_mq_start_request);
511
512 static void __blk_mq_requeue_request(struct request *rq)
513 {
514         struct request_queue *q = rq->q;
515
516         trace_block_rq_requeue(q, rq);
517         wbt_requeue(q->rq_wb, &rq->issue_stat);
518
519         if (test_and_clear_bit(REQ_ATOM_STARTED, &rq->atomic_flags)) {
520                 if (q->dma_drain_size && blk_rq_bytes(rq))
521                         rq->nr_phys_segments--;
522         }
523 }
524
525 void blk_mq_requeue_request(struct request *rq, bool kick_requeue_list)
526 {
527         __blk_mq_requeue_request(rq);
528
529         BUG_ON(blk_queued_rq(rq));
530         blk_mq_add_to_requeue_list(rq, true, kick_requeue_list);
531 }
532 EXPORT_SYMBOL(blk_mq_requeue_request);
533
534 static void blk_mq_requeue_work(struct work_struct *work)
535 {
536         struct request_queue *q =
537                 container_of(work, struct request_queue, requeue_work.work);
538         LIST_HEAD(rq_list);
539         struct request *rq, *next;
540         unsigned long flags;
541
542         spin_lock_irqsave(&q->requeue_lock, flags);
543         list_splice_init(&q->requeue_list, &rq_list);
544         spin_unlock_irqrestore(&q->requeue_lock, flags);
545
546         list_for_each_entry_safe(rq, next, &rq_list, queuelist) {
547                 if (!(rq->rq_flags & RQF_SOFTBARRIER))
548                         continue;
549
550                 rq->rq_flags &= ~RQF_SOFTBARRIER;
551                 list_del_init(&rq->queuelist);
552                 blk_mq_insert_request(rq, true, false, false);
553         }
554
555         while (!list_empty(&rq_list)) {
556                 rq = list_entry(rq_list.next, struct request, queuelist);
557                 list_del_init(&rq->queuelist);
558                 blk_mq_insert_request(rq, false, false, false);
559         }
560
561         blk_mq_run_hw_queues(q, false);
562 }
563
564 void blk_mq_add_to_requeue_list(struct request *rq, bool at_head,
565                                 bool kick_requeue_list)
566 {
567         struct request_queue *q = rq->q;
568         unsigned long flags;
569
570         /*
571          * We abuse this flag that is otherwise used by the I/O scheduler to
572          * request head insertation from the workqueue.
573          */
574         BUG_ON(rq->rq_flags & RQF_SOFTBARRIER);
575
576         spin_lock_irqsave(&q->requeue_lock, flags);
577         if (at_head) {
578                 rq->rq_flags |= RQF_SOFTBARRIER;
579                 list_add(&rq->queuelist, &q->requeue_list);
580         } else {
581                 list_add_tail(&rq->queuelist, &q->requeue_list);
582         }
583         spin_unlock_irqrestore(&q->requeue_lock, flags);
584
585         if (kick_requeue_list)
586                 blk_mq_kick_requeue_list(q);
587 }
588 EXPORT_SYMBOL(blk_mq_add_to_requeue_list);
589
590 void blk_mq_kick_requeue_list(struct request_queue *q)
591 {
592         kblockd_schedule_delayed_work(&q->requeue_work, 0);
593 }
594 EXPORT_SYMBOL(blk_mq_kick_requeue_list);
595
596 void blk_mq_delay_kick_requeue_list(struct request_queue *q,
597                                     unsigned long msecs)
598 {
599         kblockd_schedule_delayed_work(&q->requeue_work,
600                                       msecs_to_jiffies(msecs));
601 }
602 EXPORT_SYMBOL(blk_mq_delay_kick_requeue_list);
603
604 void blk_mq_abort_requeue_list(struct request_queue *q)
605 {
606         unsigned long flags;
607         LIST_HEAD(rq_list);
608
609         spin_lock_irqsave(&q->requeue_lock, flags);
610         list_splice_init(&q->requeue_list, &rq_list);
611         spin_unlock_irqrestore(&q->requeue_lock, flags);
612
613         while (!list_empty(&rq_list)) {
614                 struct request *rq;
615
616                 rq = list_first_entry(&rq_list, struct request, queuelist);
617                 list_del_init(&rq->queuelist);
618                 rq->errors = -EIO;
619                 blk_mq_end_request(rq, rq->errors);
620         }
621 }
622 EXPORT_SYMBOL(blk_mq_abort_requeue_list);
623
624 struct request *blk_mq_tag_to_rq(struct blk_mq_tags *tags, unsigned int tag)
625 {
626         if (tag < tags->nr_tags) {
627                 prefetch(tags->rqs[tag]);
628                 return tags->rqs[tag];
629         }
630
631         return NULL;
632 }
633 EXPORT_SYMBOL(blk_mq_tag_to_rq);
634
635 struct blk_mq_timeout_data {
636         unsigned long next;
637         unsigned int next_set;
638 };
639
640 void blk_mq_rq_timed_out(struct request *req, bool reserved)
641 {
642         struct blk_mq_ops *ops = req->q->mq_ops;
643         enum blk_eh_timer_return ret = BLK_EH_RESET_TIMER;
644
645         /*
646          * We know that complete is set at this point. If STARTED isn't set
647          * anymore, then the request isn't active and the "timeout" should
648          * just be ignored. This can happen due to the bitflag ordering.
649          * Timeout first checks if STARTED is set, and if it is, assumes
650          * the request is active. But if we race with completion, then
651          * we both flags will get cleared. So check here again, and ignore
652          * a timeout event with a request that isn't active.
653          */
654         if (!test_bit(REQ_ATOM_STARTED, &req->atomic_flags))
655                 return;
656
657         if (ops->timeout)
658                 ret = ops->timeout(req, reserved);
659
660         switch (ret) {
661         case BLK_EH_HANDLED:
662                 __blk_mq_complete_request(req);
663                 break;
664         case BLK_EH_RESET_TIMER:
665                 blk_add_timer(req);
666                 blk_clear_rq_complete(req);
667                 break;
668         case BLK_EH_NOT_HANDLED:
669                 break;
670         default:
671                 printk(KERN_ERR "block: bad eh return: %d\n", ret);
672                 break;
673         }
674 }
675
676 static void blk_mq_check_expired(struct blk_mq_hw_ctx *hctx,
677                 struct request *rq, void *priv, bool reserved)
678 {
679         struct blk_mq_timeout_data *data = priv;
680
681         if (!test_bit(REQ_ATOM_STARTED, &rq->atomic_flags)) {
682                 /*
683                  * If a request wasn't started before the queue was
684                  * marked dying, kill it here or it'll go unnoticed.
685                  */
686                 if (unlikely(blk_queue_dying(rq->q))) {
687                         rq->errors = -EIO;
688                         blk_mq_end_request(rq, rq->errors);
689                 }
690                 return;
691         }
692
693         if (time_after_eq(jiffies, rq->deadline)) {
694                 if (!blk_mark_rq_complete(rq))
695                         blk_mq_rq_timed_out(rq, reserved);
696         } else if (!data->next_set || time_after(data->next, rq->deadline)) {
697                 data->next = rq->deadline;
698                 data->next_set = 1;
699         }
700 }
701
702 static void blk_mq_timeout_work(struct work_struct *work)
703 {
704         struct request_queue *q =
705                 container_of(work, struct request_queue, timeout_work);
706         struct blk_mq_timeout_data data = {
707                 .next           = 0,
708                 .next_set       = 0,
709         };
710         int i;
711
712         /* A deadlock might occur if a request is stuck requiring a
713          * timeout at the same time a queue freeze is waiting
714          * completion, since the timeout code would not be able to
715          * acquire the queue reference here.
716          *
717          * That's why we don't use blk_queue_enter here; instead, we use
718          * percpu_ref_tryget directly, because we need to be able to
719          * obtain a reference even in the short window between the queue
720          * starting to freeze, by dropping the first reference in
721          * blk_mq_freeze_queue_start, and the moment the last request is
722          * consumed, marked by the instant q_usage_counter reaches
723          * zero.
724          */
725         if (!percpu_ref_tryget(&q->q_usage_counter))
726                 return;
727
728         blk_mq_queue_tag_busy_iter(q, blk_mq_check_expired, &data);
729
730         if (data.next_set) {
731                 data.next = blk_rq_timeout(round_jiffies_up(data.next));
732                 mod_timer(&q->timeout, data.next);
733         } else {
734                 struct blk_mq_hw_ctx *hctx;
735
736                 queue_for_each_hw_ctx(q, hctx, i) {
737                         /* the hctx may be unmapped, so check it here */
738                         if (blk_mq_hw_queue_mapped(hctx))
739                                 blk_mq_tag_idle(hctx);
740                 }
741         }
742         blk_queue_exit(q);
743 }
744
745 /*
746  * Reverse check our software queue for entries that we could potentially
747  * merge with. Currently includes a hand-wavy stop count of 8, to not spend
748  * too much time checking for merges.
749  */
750 static bool blk_mq_attempt_merge(struct request_queue *q,
751                                  struct blk_mq_ctx *ctx, struct bio *bio)
752 {
753         struct request *rq;
754         int checked = 8;
755
756         list_for_each_entry_reverse(rq, &ctx->rq_list, queuelist) {
757                 int el_ret;
758
759                 if (!checked--)
760                         break;
761
762                 if (!blk_rq_merge_ok(rq, bio))
763                         continue;
764
765                 el_ret = blk_try_merge(rq, bio);
766                 if (el_ret == ELEVATOR_BACK_MERGE) {
767                         if (bio_attempt_back_merge(q, rq, bio)) {
768                                 ctx->rq_merged++;
769                                 return true;
770                         }
771                         break;
772                 } else if (el_ret == ELEVATOR_FRONT_MERGE) {
773                         if (bio_attempt_front_merge(q, rq, bio)) {
774                                 ctx->rq_merged++;
775                                 return true;
776                         }
777                         break;
778                 }
779         }
780
781         return false;
782 }
783
784 struct flush_busy_ctx_data {
785         struct blk_mq_hw_ctx *hctx;
786         struct list_head *list;
787 };
788
789 static bool flush_busy_ctx(struct sbitmap *sb, unsigned int bitnr, void *data)
790 {
791         struct flush_busy_ctx_data *flush_data = data;
792         struct blk_mq_hw_ctx *hctx = flush_data->hctx;
793         struct blk_mq_ctx *ctx = hctx->ctxs[bitnr];
794
795         sbitmap_clear_bit(sb, bitnr);
796         spin_lock(&ctx->lock);
797         list_splice_tail_init(&ctx->rq_list, flush_data->list);
798         spin_unlock(&ctx->lock);
799         return true;
800 }
801
802 /*
803  * Process software queues that have been marked busy, splicing them
804  * to the for-dispatch
805  */
806 static void flush_busy_ctxs(struct blk_mq_hw_ctx *hctx, struct list_head *list)
807 {
808         struct flush_busy_ctx_data data = {
809                 .hctx = hctx,
810                 .list = list,
811         };
812
813         sbitmap_for_each_set(&hctx->ctx_map, flush_busy_ctx, &data);
814 }
815
816 static inline unsigned int queued_to_index(unsigned int queued)
817 {
818         if (!queued)
819                 return 0;
820
821         return min(BLK_MQ_MAX_DISPATCH_ORDER - 1, ilog2(queued) + 1);
822 }
823
824 bool blk_mq_dispatch_rq_list(struct blk_mq_hw_ctx *hctx, struct list_head *list)
825 {
826         struct request_queue *q = hctx->queue;
827         struct request *rq;
828         LIST_HEAD(driver_list);
829         struct list_head *dptr;
830         int queued, ret = BLK_MQ_RQ_QUEUE_OK;
831
832         /*
833          * Start off with dptr being NULL, so we start the first request
834          * immediately, even if we have more pending.
835          */
836         dptr = NULL;
837
838         /*
839          * Now process all the entries, sending them to the driver.
840          */
841         queued = 0;
842         while (!list_empty(list)) {
843                 struct blk_mq_queue_data bd;
844
845                 rq = list_first_entry(list, struct request, queuelist);
846                 list_del_init(&rq->queuelist);
847
848                 bd.rq = rq;
849                 bd.list = dptr;
850                 bd.last = list_empty(list);
851
852                 ret = q->mq_ops->queue_rq(hctx, &bd);
853                 switch (ret) {
854                 case BLK_MQ_RQ_QUEUE_OK:
855                         queued++;
856                         break;
857                 case BLK_MQ_RQ_QUEUE_BUSY:
858                         list_add(&rq->queuelist, list);
859                         __blk_mq_requeue_request(rq);
860                         break;
861                 default:
862                         pr_err("blk-mq: bad return on queue: %d\n", ret);
863                 case BLK_MQ_RQ_QUEUE_ERROR:
864                         rq->errors = -EIO;
865                         blk_mq_end_request(rq, rq->errors);
866                         break;
867                 }
868
869                 if (ret == BLK_MQ_RQ_QUEUE_BUSY)
870                         break;
871
872                 /*
873                  * We've done the first request. If we have more than 1
874                  * left in the list, set dptr to defer issue.
875                  */
876                 if (!dptr && list->next != list->prev)
877                         dptr = &driver_list;
878         }
879
880         hctx->dispatched[queued_to_index(queued)]++;
881
882         /*
883          * Any items that need requeuing? Stuff them into hctx->dispatch,
884          * that is where we will continue on next queue run.
885          */
886         if (!list_empty(list)) {
887                 spin_lock(&hctx->lock);
888                 list_splice(list, &hctx->dispatch);
889                 spin_unlock(&hctx->lock);
890
891                 /*
892                  * the queue is expected stopped with BLK_MQ_RQ_QUEUE_BUSY, but
893                  * it's possible the queue is stopped and restarted again
894                  * before this. Queue restart will dispatch requests. And since
895                  * requests in rq_list aren't added into hctx->dispatch yet,
896                  * the requests in rq_list might get lost.
897                  *
898                  * blk_mq_run_hw_queue() already checks the STOPPED bit
899                  **/
900                 blk_mq_run_hw_queue(hctx, true);
901         }
902
903         return ret != BLK_MQ_RQ_QUEUE_BUSY;
904 }
905
906 /*
907  * Run this hardware queue, pulling any software queues mapped to it in.
908  * Note that this function currently has various problems around ordering
909  * of IO. In particular, we'd like FIFO behaviour on handling existing
910  * items on the hctx->dispatch list. Ignore that for now.
911  */
912 static void blk_mq_process_rq_list(struct blk_mq_hw_ctx *hctx)
913 {
914         LIST_HEAD(rq_list);
915
916         if (unlikely(blk_mq_hctx_stopped(hctx)))
917                 return;
918
919         hctx->run++;
920
921         /*
922          * Touch any software queue that has pending entries.
923          */
924         flush_busy_ctxs(hctx, &rq_list);
925
926         /*
927          * If we have previous entries on our dispatch list, grab them
928          * and stuff them at the front for more fair dispatch.
929          */
930         if (!list_empty_careful(&hctx->dispatch)) {
931                 spin_lock(&hctx->lock);
932                 if (!list_empty(&hctx->dispatch))
933                         list_splice_init(&hctx->dispatch, &rq_list);
934                 spin_unlock(&hctx->lock);
935         }
936
937         blk_mq_dispatch_rq_list(hctx, &rq_list);
938 }
939
940 static void __blk_mq_run_hw_queue(struct blk_mq_hw_ctx *hctx)
941 {
942         int srcu_idx;
943
944         WARN_ON(!cpumask_test_cpu(raw_smp_processor_id(), hctx->cpumask) &&
945                 cpu_online(hctx->next_cpu));
946
947         if (!(hctx->flags & BLK_MQ_F_BLOCKING)) {
948                 rcu_read_lock();
949                 blk_mq_process_rq_list(hctx);
950                 rcu_read_unlock();
951         } else {
952                 srcu_idx = srcu_read_lock(&hctx->queue_rq_srcu);
953                 blk_mq_process_rq_list(hctx);
954                 srcu_read_unlock(&hctx->queue_rq_srcu, srcu_idx);
955         }
956 }
957
958 /*
959  * It'd be great if the workqueue API had a way to pass
960  * in a mask and had some smarts for more clever placement.
961  * For now we just round-robin here, switching for every
962  * BLK_MQ_CPU_WORK_BATCH queued items.
963  */
964 static int blk_mq_hctx_next_cpu(struct blk_mq_hw_ctx *hctx)
965 {
966         if (hctx->queue->nr_hw_queues == 1)
967                 return WORK_CPU_UNBOUND;
968
969         if (--hctx->next_cpu_batch <= 0) {
970                 int next_cpu;
971
972                 next_cpu = cpumask_next(hctx->next_cpu, hctx->cpumask);
973                 if (next_cpu >= nr_cpu_ids)
974                         next_cpu = cpumask_first(hctx->cpumask);
975
976                 hctx->next_cpu = next_cpu;
977                 hctx->next_cpu_batch = BLK_MQ_CPU_WORK_BATCH;
978         }
979
980         return hctx->next_cpu;
981 }
982
983 void blk_mq_run_hw_queue(struct blk_mq_hw_ctx *hctx, bool async)
984 {
985         if (unlikely(blk_mq_hctx_stopped(hctx) ||
986                      !blk_mq_hw_queue_mapped(hctx)))
987                 return;
988
989         if (!async && !(hctx->flags & BLK_MQ_F_BLOCKING)) {
990                 int cpu = get_cpu();
991                 if (cpumask_test_cpu(cpu, hctx->cpumask)) {
992                         __blk_mq_run_hw_queue(hctx);
993                         put_cpu();
994                         return;
995                 }
996
997                 put_cpu();
998         }
999
1000         kblockd_schedule_work_on(blk_mq_hctx_next_cpu(hctx), &hctx->run_work);
1001 }
1002
1003 void blk_mq_run_hw_queues(struct request_queue *q, bool async)
1004 {
1005         struct blk_mq_hw_ctx *hctx;
1006         int i;
1007
1008         queue_for_each_hw_ctx(q, hctx, i) {
1009                 if ((!blk_mq_hctx_has_pending(hctx) &&
1010                     list_empty_careful(&hctx->dispatch)) ||
1011                     blk_mq_hctx_stopped(hctx))
1012                         continue;
1013
1014                 blk_mq_run_hw_queue(hctx, async);
1015         }
1016 }
1017 EXPORT_SYMBOL(blk_mq_run_hw_queues);
1018
1019 /**
1020  * blk_mq_queue_stopped() - check whether one or more hctxs have been stopped
1021  * @q: request queue.
1022  *
1023  * The caller is responsible for serializing this function against
1024  * blk_mq_{start,stop}_hw_queue().
1025  */
1026 bool blk_mq_queue_stopped(struct request_queue *q)
1027 {
1028         struct blk_mq_hw_ctx *hctx;
1029         int i;
1030
1031         queue_for_each_hw_ctx(q, hctx, i)
1032                 if (blk_mq_hctx_stopped(hctx))
1033                         return true;
1034
1035         return false;
1036 }
1037 EXPORT_SYMBOL(blk_mq_queue_stopped);
1038
1039 void blk_mq_stop_hw_queue(struct blk_mq_hw_ctx *hctx)
1040 {
1041         cancel_work(&hctx->run_work);
1042         cancel_delayed_work(&hctx->delay_work);
1043         set_bit(BLK_MQ_S_STOPPED, &hctx->state);
1044 }
1045 EXPORT_SYMBOL(blk_mq_stop_hw_queue);
1046
1047 void blk_mq_stop_hw_queues(struct request_queue *q)
1048 {
1049         struct blk_mq_hw_ctx *hctx;
1050         int i;
1051
1052         queue_for_each_hw_ctx(q, hctx, i)
1053                 blk_mq_stop_hw_queue(hctx);
1054 }
1055 EXPORT_SYMBOL(blk_mq_stop_hw_queues);
1056
1057 void blk_mq_start_hw_queue(struct blk_mq_hw_ctx *hctx)
1058 {
1059         clear_bit(BLK_MQ_S_STOPPED, &hctx->state);
1060
1061         blk_mq_run_hw_queue(hctx, false);
1062 }
1063 EXPORT_SYMBOL(blk_mq_start_hw_queue);
1064
1065 void blk_mq_start_hw_queues(struct request_queue *q)
1066 {
1067         struct blk_mq_hw_ctx *hctx;
1068         int i;
1069
1070         queue_for_each_hw_ctx(q, hctx, i)
1071                 blk_mq_start_hw_queue(hctx);
1072 }
1073 EXPORT_SYMBOL(blk_mq_start_hw_queues);
1074
1075 void blk_mq_start_stopped_hw_queue(struct blk_mq_hw_ctx *hctx, bool async)
1076 {
1077         if (!blk_mq_hctx_stopped(hctx))
1078                 return;
1079
1080         clear_bit(BLK_MQ_S_STOPPED, &hctx->state);
1081         blk_mq_run_hw_queue(hctx, async);
1082 }
1083 EXPORT_SYMBOL_GPL(blk_mq_start_stopped_hw_queue);
1084
1085 void blk_mq_start_stopped_hw_queues(struct request_queue *q, bool async)
1086 {
1087         struct blk_mq_hw_ctx *hctx;
1088         int i;
1089
1090         queue_for_each_hw_ctx(q, hctx, i)
1091                 blk_mq_start_stopped_hw_queue(hctx, async);
1092 }
1093 EXPORT_SYMBOL(blk_mq_start_stopped_hw_queues);
1094
1095 static void blk_mq_run_work_fn(struct work_struct *work)
1096 {
1097         struct blk_mq_hw_ctx *hctx;
1098
1099         hctx = container_of(work, struct blk_mq_hw_ctx, run_work);
1100
1101         __blk_mq_run_hw_queue(hctx);
1102 }
1103
1104 static void blk_mq_delay_work_fn(struct work_struct *work)
1105 {
1106         struct blk_mq_hw_ctx *hctx;
1107
1108         hctx = container_of(work, struct blk_mq_hw_ctx, delay_work.work);
1109
1110         if (test_and_clear_bit(BLK_MQ_S_STOPPED, &hctx->state))
1111                 __blk_mq_run_hw_queue(hctx);
1112 }
1113
1114 void blk_mq_delay_queue(struct blk_mq_hw_ctx *hctx, unsigned long msecs)
1115 {
1116         if (unlikely(!blk_mq_hw_queue_mapped(hctx)))
1117                 return;
1118
1119         kblockd_schedule_delayed_work_on(blk_mq_hctx_next_cpu(hctx),
1120                         &hctx->delay_work, msecs_to_jiffies(msecs));
1121 }
1122 EXPORT_SYMBOL(blk_mq_delay_queue);
1123
1124 static inline void __blk_mq_insert_req_list(struct blk_mq_hw_ctx *hctx,
1125                                             struct request *rq,
1126                                             bool at_head)
1127 {
1128         struct blk_mq_ctx *ctx = rq->mq_ctx;
1129
1130         trace_block_rq_insert(hctx->queue, rq);
1131
1132         if (at_head)
1133                 list_add(&rq->queuelist, &ctx->rq_list);
1134         else
1135                 list_add_tail(&rq->queuelist, &ctx->rq_list);
1136 }
1137
1138 static void __blk_mq_insert_request(struct blk_mq_hw_ctx *hctx,
1139                                     struct request *rq, bool at_head)
1140 {
1141         struct blk_mq_ctx *ctx = rq->mq_ctx;
1142
1143         __blk_mq_insert_req_list(hctx, rq, at_head);
1144         blk_mq_hctx_mark_pending(hctx, ctx);
1145 }
1146
1147 void blk_mq_insert_request(struct request *rq, bool at_head, bool run_queue,
1148                            bool async)
1149 {
1150         struct blk_mq_ctx *ctx = rq->mq_ctx;
1151         struct request_queue *q = rq->q;
1152         struct blk_mq_hw_ctx *hctx = blk_mq_map_queue(q, ctx->cpu);
1153
1154         spin_lock(&ctx->lock);
1155         __blk_mq_insert_request(hctx, rq, at_head);
1156         spin_unlock(&ctx->lock);
1157
1158         if (run_queue)
1159                 blk_mq_run_hw_queue(hctx, async);
1160 }
1161
1162 static void blk_mq_insert_requests(struct request_queue *q,
1163                                      struct blk_mq_ctx *ctx,
1164                                      struct list_head *list,
1165                                      int depth,
1166                                      bool from_schedule)
1167
1168 {
1169         struct blk_mq_hw_ctx *hctx = blk_mq_map_queue(q, ctx->cpu);
1170
1171         trace_block_unplug(q, depth, !from_schedule);
1172
1173         /*
1174          * preemption doesn't flush plug list, so it's possible ctx->cpu is
1175          * offline now
1176          */
1177         spin_lock(&ctx->lock);
1178         while (!list_empty(list)) {
1179                 struct request *rq;
1180
1181                 rq = list_first_entry(list, struct request, queuelist);
1182                 BUG_ON(rq->mq_ctx != ctx);
1183                 list_del_init(&rq->queuelist);
1184                 __blk_mq_insert_req_list(hctx, rq, false);
1185         }
1186         blk_mq_hctx_mark_pending(hctx, ctx);
1187         spin_unlock(&ctx->lock);
1188
1189         blk_mq_run_hw_queue(hctx, from_schedule);
1190 }
1191
1192 static int plug_ctx_cmp(void *priv, struct list_head *a, struct list_head *b)
1193 {
1194         struct request *rqa = container_of(a, struct request, queuelist);
1195         struct request *rqb = container_of(b, struct request, queuelist);
1196
1197         return !(rqa->mq_ctx < rqb->mq_ctx ||
1198                  (rqa->mq_ctx == rqb->mq_ctx &&
1199                   blk_rq_pos(rqa) < blk_rq_pos(rqb)));
1200 }
1201
1202 void blk_mq_flush_plug_list(struct blk_plug *plug, bool from_schedule)
1203 {
1204         struct blk_mq_ctx *this_ctx;
1205         struct request_queue *this_q;
1206         struct request *rq;
1207         LIST_HEAD(list);
1208         LIST_HEAD(ctx_list);
1209         unsigned int depth;
1210
1211         list_splice_init(&plug->mq_list, &list);
1212
1213         list_sort(NULL, &list, plug_ctx_cmp);
1214
1215         this_q = NULL;
1216         this_ctx = NULL;
1217         depth = 0;
1218
1219         while (!list_empty(&list)) {
1220                 rq = list_entry_rq(list.next);
1221                 list_del_init(&rq->queuelist);
1222                 BUG_ON(!rq->q);
1223                 if (rq->mq_ctx != this_ctx) {
1224                         if (this_ctx) {
1225                                 blk_mq_insert_requests(this_q, this_ctx,
1226                                                         &ctx_list, depth,
1227                                                         from_schedule);
1228                         }
1229
1230                         this_ctx = rq->mq_ctx;
1231                         this_q = rq->q;
1232                         depth = 0;
1233                 }
1234
1235                 depth++;
1236                 list_add_tail(&rq->queuelist, &ctx_list);
1237         }
1238
1239         /*
1240          * If 'this_ctx' is set, we know we have entries to complete
1241          * on 'ctx_list'. Do those.
1242          */
1243         if (this_ctx) {
1244                 blk_mq_insert_requests(this_q, this_ctx, &ctx_list, depth,
1245                                        from_schedule);
1246         }
1247 }
1248
1249 static void blk_mq_bio_to_request(struct request *rq, struct bio *bio)
1250 {
1251         init_request_from_bio(rq, bio);
1252
1253         blk_account_io_start(rq, true);
1254 }
1255
1256 static inline bool hctx_allow_merges(struct blk_mq_hw_ctx *hctx)
1257 {
1258         return (hctx->flags & BLK_MQ_F_SHOULD_MERGE) &&
1259                 !blk_queue_nomerges(hctx->queue);
1260 }
1261
1262 static inline bool blk_mq_merge_queue_io(struct blk_mq_hw_ctx *hctx,
1263                                          struct blk_mq_ctx *ctx,
1264                                          struct request *rq, struct bio *bio)
1265 {
1266         if (!hctx_allow_merges(hctx) || !bio_mergeable(bio)) {
1267                 blk_mq_bio_to_request(rq, bio);
1268                 spin_lock(&ctx->lock);
1269 insert_rq:
1270                 __blk_mq_insert_request(hctx, rq, false);
1271                 spin_unlock(&ctx->lock);
1272                 return false;
1273         } else {
1274                 struct request_queue *q = hctx->queue;
1275
1276                 spin_lock(&ctx->lock);
1277                 if (!blk_mq_attempt_merge(q, ctx, bio)) {
1278                         blk_mq_bio_to_request(rq, bio);
1279                         goto insert_rq;
1280                 }
1281
1282                 spin_unlock(&ctx->lock);
1283                 __blk_mq_free_request(hctx, ctx, rq);
1284                 return true;
1285         }
1286 }
1287
1288 static struct request *blk_mq_map_request(struct request_queue *q,
1289                                           struct bio *bio,
1290                                           struct blk_mq_alloc_data *data)
1291 {
1292         struct blk_mq_hw_ctx *hctx;
1293         struct blk_mq_ctx *ctx;
1294         struct request *rq;
1295
1296         blk_queue_enter_live(q);
1297         ctx = blk_mq_get_ctx(q);
1298         hctx = blk_mq_map_queue(q, ctx->cpu);
1299
1300         trace_block_getrq(q, bio, bio->bi_opf);
1301         blk_mq_set_alloc_data(data, q, 0, ctx, hctx);
1302         rq = __blk_mq_alloc_request(data, bio->bi_opf);
1303
1304         data->hctx->queued++;
1305         return rq;
1306 }
1307
1308 static void blk_mq_try_issue_directly(struct request *rq, blk_qc_t *cookie)
1309 {
1310         int ret;
1311         struct request_queue *q = rq->q;
1312         struct blk_mq_hw_ctx *hctx = blk_mq_map_queue(q, rq->mq_ctx->cpu);
1313         struct blk_mq_queue_data bd = {
1314                 .rq = rq,
1315                 .list = NULL,
1316                 .last = 1
1317         };
1318         blk_qc_t new_cookie = blk_tag_to_qc_t(rq->tag, hctx->queue_num);
1319
1320         if (blk_mq_hctx_stopped(hctx))
1321                 goto insert;
1322
1323         /*
1324          * For OK queue, we are done. For error, kill it. Any other
1325          * error (busy), just add it to our list as we previously
1326          * would have done
1327          */
1328         ret = q->mq_ops->queue_rq(hctx, &bd);
1329         if (ret == BLK_MQ_RQ_QUEUE_OK) {
1330                 *cookie = new_cookie;
1331                 return;
1332         }
1333
1334         __blk_mq_requeue_request(rq);
1335
1336         if (ret == BLK_MQ_RQ_QUEUE_ERROR) {
1337                 *cookie = BLK_QC_T_NONE;
1338                 rq->errors = -EIO;
1339                 blk_mq_end_request(rq, rq->errors);
1340                 return;
1341         }
1342
1343 insert:
1344         blk_mq_insert_request(rq, false, true, true);
1345 }
1346
1347 /*
1348  * Multiple hardware queue variant. This will not use per-process plugs,
1349  * but will attempt to bypass the hctx queueing if we can go straight to
1350  * hardware for SYNC IO.
1351  */
1352 static blk_qc_t blk_mq_make_request(struct request_queue *q, struct bio *bio)
1353 {
1354         const int is_sync = op_is_sync(bio->bi_opf);
1355         const int is_flush_fua = bio->bi_opf & (REQ_PREFLUSH | REQ_FUA);
1356         struct blk_mq_alloc_data data;
1357         struct request *rq;
1358         unsigned int request_count = 0, srcu_idx;
1359         struct blk_plug *plug;
1360         struct request *same_queue_rq = NULL;
1361         blk_qc_t cookie;
1362         unsigned int wb_acct;
1363
1364         blk_queue_bounce(q, &bio);
1365
1366         if (bio_integrity_enabled(bio) && bio_integrity_prep(bio)) {
1367                 bio_io_error(bio);
1368                 return BLK_QC_T_NONE;
1369         }
1370
1371         blk_queue_split(q, &bio, q->bio_split);
1372
1373         if (!is_flush_fua && !blk_queue_nomerges(q) &&
1374             blk_attempt_plug_merge(q, bio, &request_count, &same_queue_rq))
1375                 return BLK_QC_T_NONE;
1376
1377         wb_acct = wbt_wait(q->rq_wb, bio, NULL);
1378
1379         rq = blk_mq_map_request(q, bio, &data);
1380         if (unlikely(!rq)) {
1381                 __wbt_done(q->rq_wb, wb_acct);
1382                 return BLK_QC_T_NONE;
1383         }
1384
1385         wbt_track(&rq->issue_stat, wb_acct);
1386
1387         cookie = blk_tag_to_qc_t(rq->tag, data.hctx->queue_num);
1388
1389         if (unlikely(is_flush_fua)) {
1390                 blk_mq_bio_to_request(rq, bio);
1391                 blk_insert_flush(rq);
1392                 goto run_queue;
1393         }
1394
1395         plug = current->plug;
1396         /*
1397          * If the driver supports defer issued based on 'last', then
1398          * queue it up like normal since we can potentially save some
1399          * CPU this way.
1400          */
1401         if (((plug && !blk_queue_nomerges(q)) || is_sync) &&
1402             !(data.hctx->flags & BLK_MQ_F_DEFER_ISSUE)) {
1403                 struct request *old_rq = NULL;
1404
1405                 blk_mq_bio_to_request(rq, bio);
1406
1407                 /*
1408                  * We do limited plugging. If the bio can be merged, do that.
1409                  * Otherwise the existing request in the plug list will be
1410                  * issued. So the plug list will have one request at most
1411                  */
1412                 if (plug) {
1413                         /*
1414                          * The plug list might get flushed before this. If that
1415                          * happens, same_queue_rq is invalid and plug list is
1416                          * empty
1417                          */
1418                         if (same_queue_rq && !list_empty(&plug->mq_list)) {
1419                                 old_rq = same_queue_rq;
1420                                 list_del_init(&old_rq->queuelist);
1421                         }
1422                         list_add_tail(&rq->queuelist, &plug->mq_list);
1423                 } else /* is_sync */
1424                         old_rq = rq;
1425                 blk_mq_put_ctx(data.ctx);
1426                 if (!old_rq)
1427                         goto done;
1428
1429                 if (!(data.hctx->flags & BLK_MQ_F_BLOCKING)) {
1430                         rcu_read_lock();
1431                         blk_mq_try_issue_directly(old_rq, &cookie);
1432                         rcu_read_unlock();
1433                 } else {
1434                         srcu_idx = srcu_read_lock(&data.hctx->queue_rq_srcu);
1435                         blk_mq_try_issue_directly(old_rq, &cookie);
1436                         srcu_read_unlock(&data.hctx->queue_rq_srcu, srcu_idx);
1437                 }
1438                 goto done;
1439         }
1440
1441         if (!blk_mq_merge_queue_io(data.hctx, data.ctx, rq, bio)) {
1442                 /*
1443                  * For a SYNC request, send it to the hardware immediately. For
1444                  * an ASYNC request, just ensure that we run it later on. The
1445                  * latter allows for merging opportunities and more efficient
1446                  * dispatching.
1447                  */
1448 run_queue:
1449                 blk_mq_run_hw_queue(data.hctx, !is_sync || is_flush_fua);
1450         }
1451         blk_mq_put_ctx(data.ctx);
1452 done:
1453         return cookie;
1454 }
1455
1456 /*
1457  * Single hardware queue variant. This will attempt to use any per-process
1458  * plug for merging and IO deferral.
1459  */
1460 static blk_qc_t blk_sq_make_request(struct request_queue *q, struct bio *bio)
1461 {
1462         const int is_sync = op_is_sync(bio->bi_opf);
1463         const int is_flush_fua = bio->bi_opf & (REQ_PREFLUSH | REQ_FUA);
1464         struct blk_plug *plug;
1465         unsigned int request_count = 0;
1466         struct blk_mq_alloc_data data;
1467         struct request *rq;
1468         blk_qc_t cookie;
1469         unsigned int wb_acct;
1470
1471         blk_queue_bounce(q, &bio);
1472
1473         if (bio_integrity_enabled(bio) && bio_integrity_prep(bio)) {
1474                 bio_io_error(bio);
1475                 return BLK_QC_T_NONE;
1476         }
1477
1478         blk_queue_split(q, &bio, q->bio_split);
1479
1480         if (!is_flush_fua && !blk_queue_nomerges(q)) {
1481                 if (blk_attempt_plug_merge(q, bio, &request_count, NULL))
1482                         return BLK_QC_T_NONE;
1483         } else
1484                 request_count = blk_plug_queued_count(q);
1485
1486         wb_acct = wbt_wait(q->rq_wb, bio, NULL);
1487
1488         rq = blk_mq_map_request(q, bio, &data);
1489         if (unlikely(!rq)) {
1490                 __wbt_done(q->rq_wb, wb_acct);
1491                 return BLK_QC_T_NONE;
1492         }
1493
1494         wbt_track(&rq->issue_stat, wb_acct);
1495
1496         cookie = blk_tag_to_qc_t(rq->tag, data.hctx->queue_num);
1497
1498         if (unlikely(is_flush_fua)) {
1499                 blk_mq_bio_to_request(rq, bio);
1500                 blk_insert_flush(rq);
1501                 goto run_queue;
1502         }
1503
1504         /*
1505          * A task plug currently exists. Since this is completely lockless,
1506          * utilize that to temporarily store requests until the task is
1507          * either done or scheduled away.
1508          */
1509         plug = current->plug;
1510         if (plug) {
1511                 struct request *last = NULL;
1512
1513                 blk_mq_bio_to_request(rq, bio);
1514
1515                 /*
1516                  * @request_count may become stale because of schedule
1517                  * out, so check the list again.
1518                  */
1519                 if (list_empty(&plug->mq_list))
1520                         request_count = 0;
1521                 if (!request_count)
1522                         trace_block_plug(q);
1523                 else
1524                         last = list_entry_rq(plug->mq_list.prev);
1525
1526                 blk_mq_put_ctx(data.ctx);
1527
1528                 if (request_count >= BLK_MAX_REQUEST_COUNT || (last &&
1529                     blk_rq_bytes(last) >= BLK_PLUG_FLUSH_SIZE)) {
1530                         blk_flush_plug_list(plug, false);
1531                         trace_block_plug(q);
1532                 }
1533
1534                 list_add_tail(&rq->queuelist, &plug->mq_list);
1535                 return cookie;
1536         }
1537
1538         if (!blk_mq_merge_queue_io(data.hctx, data.ctx, rq, bio)) {
1539                 /*
1540                  * For a SYNC request, send it to the hardware immediately. For
1541                  * an ASYNC request, just ensure that we run it later on. The
1542                  * latter allows for merging opportunities and more efficient
1543                  * dispatching.
1544                  */
1545 run_queue:
1546                 blk_mq_run_hw_queue(data.hctx, !is_sync || is_flush_fua);
1547         }
1548
1549         blk_mq_put_ctx(data.ctx);
1550         return cookie;
1551 }
1552
1553 static void blk_mq_free_rq_map(struct blk_mq_tag_set *set,
1554                 struct blk_mq_tags *tags, unsigned int hctx_idx)
1555 {
1556         struct page *page;
1557
1558         if (tags->rqs && set->ops->exit_request) {
1559                 int i;
1560
1561                 for (i = 0; i < tags->nr_tags; i++) {
1562                         if (!tags->rqs[i])
1563                                 continue;
1564                         set->ops->exit_request(set->driver_data, tags->rqs[i],
1565                                                 hctx_idx, i);
1566                         tags->rqs[i] = NULL;
1567                 }
1568         }
1569
1570         while (!list_empty(&tags->page_list)) {
1571                 page = list_first_entry(&tags->page_list, struct page, lru);
1572                 list_del_init(&page->lru);
1573                 /*
1574                  * Remove kmemleak object previously allocated in
1575                  * blk_mq_init_rq_map().
1576                  */
1577                 kmemleak_free(page_address(page));
1578                 __free_pages(page, page->private);
1579         }
1580
1581         kfree(tags->rqs);
1582
1583         blk_mq_free_tags(tags);
1584 }
1585
1586 static size_t order_to_size(unsigned int order)
1587 {
1588         return (size_t)PAGE_SIZE << order;
1589 }
1590
1591 static struct blk_mq_tags *blk_mq_init_rq_map(struct blk_mq_tag_set *set,
1592                 unsigned int hctx_idx)
1593 {
1594         struct blk_mq_tags *tags;
1595         unsigned int i, j, entries_per_page, max_order = 4;
1596         size_t rq_size, left;
1597
1598         tags = blk_mq_init_tags(set->queue_depth, set->reserved_tags,
1599                                 set->numa_node,
1600                                 BLK_MQ_FLAG_TO_ALLOC_POLICY(set->flags));
1601         if (!tags)
1602                 return NULL;
1603
1604         INIT_LIST_HEAD(&tags->page_list);
1605
1606         tags->rqs = kzalloc_node(set->queue_depth * sizeof(struct request *),
1607                                  GFP_NOIO | __GFP_NOWARN | __GFP_NORETRY,
1608                                  set->numa_node);
1609         if (!tags->rqs) {
1610                 blk_mq_free_tags(tags);
1611                 return NULL;
1612         }
1613
1614         /*
1615          * rq_size is the size of the request plus driver payload, rounded
1616          * to the cacheline size
1617          */
1618         rq_size = round_up(sizeof(struct request) + set->cmd_size,
1619                                 cache_line_size());
1620         left = rq_size * set->queue_depth;
1621
1622         for (i = 0; i < set->queue_depth; ) {
1623                 int this_order = max_order;
1624                 struct page *page;
1625                 int to_do;
1626                 void *p;
1627
1628                 while (this_order && left < order_to_size(this_order - 1))
1629                         this_order--;
1630
1631                 do {
1632                         page = alloc_pages_node(set->numa_node,
1633                                 GFP_NOIO | __GFP_NOWARN | __GFP_NORETRY | __GFP_ZERO,
1634                                 this_order);
1635                         if (page)
1636                                 break;
1637                         if (!this_order--)
1638                                 break;
1639                         if (order_to_size(this_order) < rq_size)
1640                                 break;
1641                 } while (1);
1642
1643                 if (!page)
1644                         goto fail;
1645
1646                 page->private = this_order;
1647                 list_add_tail(&page->lru, &tags->page_list);
1648
1649                 p = page_address(page);
1650                 /*
1651                  * Allow kmemleak to scan these pages as they contain pointers
1652                  * to additional allocations like via ops->init_request().
1653                  */
1654                 kmemleak_alloc(p, order_to_size(this_order), 1, GFP_NOIO);
1655                 entries_per_page = order_to_size(this_order) / rq_size;
1656                 to_do = min(entries_per_page, set->queue_depth - i);
1657                 left -= to_do * rq_size;
1658                 for (j = 0; j < to_do; j++) {
1659                         tags->rqs[i] = p;
1660                         if (set->ops->init_request) {
1661                                 if (set->ops->init_request(set->driver_data,
1662                                                 tags->rqs[i], hctx_idx, i,
1663                                                 set->numa_node)) {
1664                                         tags->rqs[i] = NULL;
1665                                         goto fail;
1666                                 }
1667                         }
1668
1669                         p += rq_size;
1670                         i++;
1671                 }
1672         }
1673         return tags;
1674
1675 fail:
1676         blk_mq_free_rq_map(set, tags, hctx_idx);
1677         return NULL;
1678 }
1679
1680 /*
1681  * 'cpu' is going away. splice any existing rq_list entries from this
1682  * software queue to the hw queue dispatch list, and ensure that it
1683  * gets run.
1684  */
1685 static int blk_mq_hctx_notify_dead(unsigned int cpu, struct hlist_node *node)
1686 {
1687         struct blk_mq_hw_ctx *hctx;
1688         struct blk_mq_ctx *ctx;
1689         LIST_HEAD(tmp);
1690
1691         hctx = hlist_entry_safe(node, struct blk_mq_hw_ctx, cpuhp_dead);
1692         ctx = __blk_mq_get_ctx(hctx->queue, cpu);
1693
1694         spin_lock(&ctx->lock);
1695         if (!list_empty(&ctx->rq_list)) {
1696                 list_splice_init(&ctx->rq_list, &tmp);
1697                 blk_mq_hctx_clear_pending(hctx, ctx);
1698         }
1699         spin_unlock(&ctx->lock);
1700
1701         if (list_empty(&tmp))
1702                 return 0;
1703
1704         spin_lock(&hctx->lock);
1705         list_splice_tail_init(&tmp, &hctx->dispatch);
1706         spin_unlock(&hctx->lock);
1707
1708         blk_mq_run_hw_queue(hctx, true);
1709         return 0;
1710 }
1711
1712 static void blk_mq_remove_cpuhp(struct blk_mq_hw_ctx *hctx)
1713 {
1714         cpuhp_state_remove_instance_nocalls(CPUHP_BLK_MQ_DEAD,
1715                                             &hctx->cpuhp_dead);
1716 }
1717
1718 /* hctx->ctxs will be freed in queue's release handler */
1719 static void blk_mq_exit_hctx(struct request_queue *q,
1720                 struct blk_mq_tag_set *set,
1721                 struct blk_mq_hw_ctx *hctx, unsigned int hctx_idx)
1722 {
1723         unsigned flush_start_tag = set->queue_depth;
1724
1725         blk_mq_tag_idle(hctx);
1726
1727         if (set->ops->exit_request)
1728                 set->ops->exit_request(set->driver_data,
1729                                        hctx->fq->flush_rq, hctx_idx,
1730                                        flush_start_tag + hctx_idx);
1731
1732         if (set->ops->exit_hctx)
1733                 set->ops->exit_hctx(hctx, hctx_idx);
1734
1735         if (hctx->flags & BLK_MQ_F_BLOCKING)
1736                 cleanup_srcu_struct(&hctx->queue_rq_srcu);
1737
1738         blk_mq_remove_cpuhp(hctx);
1739         blk_free_flush_queue(hctx->fq);
1740         sbitmap_free(&hctx->ctx_map);
1741 }
1742
1743 static void blk_mq_exit_hw_queues(struct request_queue *q,
1744                 struct blk_mq_tag_set *set, int nr_queue)
1745 {
1746         struct blk_mq_hw_ctx *hctx;
1747         unsigned int i;
1748
1749         queue_for_each_hw_ctx(q, hctx, i) {
1750                 if (i == nr_queue)
1751                         break;
1752                 blk_mq_exit_hctx(q, set, hctx, i);
1753         }
1754 }
1755
1756 static void blk_mq_free_hw_queues(struct request_queue *q,
1757                 struct blk_mq_tag_set *set)
1758 {
1759         struct blk_mq_hw_ctx *hctx;
1760         unsigned int i;
1761
1762         queue_for_each_hw_ctx(q, hctx, i)
1763                 free_cpumask_var(hctx->cpumask);
1764 }
1765
1766 static int blk_mq_init_hctx(struct request_queue *q,
1767                 struct blk_mq_tag_set *set,
1768                 struct blk_mq_hw_ctx *hctx, unsigned hctx_idx)
1769 {
1770         int node;
1771         unsigned flush_start_tag = set->queue_depth;
1772
1773         node = hctx->numa_node;
1774         if (node == NUMA_NO_NODE)
1775                 node = hctx->numa_node = set->numa_node;
1776
1777         INIT_WORK(&hctx->run_work, blk_mq_run_work_fn);
1778         INIT_DELAYED_WORK(&hctx->delay_work, blk_mq_delay_work_fn);
1779         spin_lock_init(&hctx->lock);
1780         INIT_LIST_HEAD(&hctx->dispatch);
1781         hctx->queue = q;
1782         hctx->queue_num = hctx_idx;
1783         hctx->flags = set->flags & ~BLK_MQ_F_TAG_SHARED;
1784
1785         cpuhp_state_add_instance_nocalls(CPUHP_BLK_MQ_DEAD, &hctx->cpuhp_dead);
1786
1787         hctx->tags = set->tags[hctx_idx];
1788
1789         /*
1790          * Allocate space for all possible cpus to avoid allocation at
1791          * runtime
1792          */
1793         hctx->ctxs = kmalloc_node(nr_cpu_ids * sizeof(void *),
1794                                         GFP_KERNEL, node);
1795         if (!hctx->ctxs)
1796                 goto unregister_cpu_notifier;
1797
1798         if (sbitmap_init_node(&hctx->ctx_map, nr_cpu_ids, ilog2(8), GFP_KERNEL,
1799                               node))
1800                 goto free_ctxs;
1801
1802         hctx->nr_ctx = 0;
1803
1804         if (set->ops->init_hctx &&
1805             set->ops->init_hctx(hctx, set->driver_data, hctx_idx))
1806                 goto free_bitmap;
1807
1808         hctx->fq = blk_alloc_flush_queue(q, hctx->numa_node, set->cmd_size);
1809         if (!hctx->fq)
1810                 goto exit_hctx;
1811
1812         if (set->ops->init_request &&
1813             set->ops->init_request(set->driver_data,
1814                                    hctx->fq->flush_rq, hctx_idx,
1815                                    flush_start_tag + hctx_idx, node))
1816                 goto free_fq;
1817
1818         if (hctx->flags & BLK_MQ_F_BLOCKING)
1819                 init_srcu_struct(&hctx->queue_rq_srcu);
1820
1821         return 0;
1822
1823  free_fq:
1824         kfree(hctx->fq);
1825  exit_hctx:
1826         if (set->ops->exit_hctx)
1827                 set->ops->exit_hctx(hctx, hctx_idx);
1828  free_bitmap:
1829         sbitmap_free(&hctx->ctx_map);
1830  free_ctxs:
1831         kfree(hctx->ctxs);
1832  unregister_cpu_notifier:
1833         blk_mq_remove_cpuhp(hctx);
1834         return -1;
1835 }
1836
1837 static void blk_mq_init_cpu_queues(struct request_queue *q,
1838                                    unsigned int nr_hw_queues)
1839 {
1840         unsigned int i;
1841
1842         for_each_possible_cpu(i) {
1843                 struct blk_mq_ctx *__ctx = per_cpu_ptr(q->queue_ctx, i);
1844                 struct blk_mq_hw_ctx *hctx;
1845
1846                 memset(__ctx, 0, sizeof(*__ctx));
1847                 __ctx->cpu = i;
1848                 spin_lock_init(&__ctx->lock);
1849                 INIT_LIST_HEAD(&__ctx->rq_list);
1850                 __ctx->queue = q;
1851                 blk_stat_init(&__ctx->stat[BLK_STAT_READ]);
1852                 blk_stat_init(&__ctx->stat[BLK_STAT_WRITE]);
1853
1854                 /* If the cpu isn't online, the cpu is mapped to first hctx */
1855                 if (!cpu_online(i))
1856                         continue;
1857
1858                 hctx = blk_mq_map_queue(q, i);
1859
1860                 /*
1861                  * Set local node, IFF we have more than one hw queue. If
1862                  * not, we remain on the home node of the device
1863                  */
1864                 if (nr_hw_queues > 1 && hctx->numa_node == NUMA_NO_NODE)
1865                         hctx->numa_node = local_memory_node(cpu_to_node(i));
1866         }
1867 }
1868
1869 static void blk_mq_map_swqueue(struct request_queue *q,
1870                                const struct cpumask *online_mask)
1871 {
1872         unsigned int i, hctx_idx;
1873         struct blk_mq_hw_ctx *hctx;
1874         struct blk_mq_ctx *ctx;
1875         struct blk_mq_tag_set *set = q->tag_set;
1876
1877         /*
1878          * Avoid others reading imcomplete hctx->cpumask through sysfs
1879          */
1880         mutex_lock(&q->sysfs_lock);
1881
1882         queue_for_each_hw_ctx(q, hctx, i) {
1883                 cpumask_clear(hctx->cpumask);
1884                 hctx->nr_ctx = 0;
1885         }
1886
1887         /*
1888          * Map software to hardware queues
1889          */
1890         for_each_possible_cpu(i) {
1891                 /* If the cpu isn't online, the cpu is mapped to first hctx */
1892                 if (!cpumask_test_cpu(i, online_mask))
1893                         continue;
1894
1895                 hctx_idx = q->mq_map[i];
1896                 /* unmapped hw queue can be remapped after CPU topo changed */
1897                 if (!set->tags[hctx_idx]) {
1898                         set->tags[hctx_idx] = blk_mq_init_rq_map(set, hctx_idx);
1899
1900                         /*
1901                          * If tags initialization fail for some hctx,
1902                          * that hctx won't be brought online.  In this
1903                          * case, remap the current ctx to hctx[0] which
1904                          * is guaranteed to always have tags allocated
1905                          */
1906                         if (!set->tags[hctx_idx])
1907                                 q->mq_map[i] = 0;
1908                 }
1909
1910                 ctx = per_cpu_ptr(q->queue_ctx, i);
1911                 hctx = blk_mq_map_queue(q, i);
1912
1913                 cpumask_set_cpu(i, hctx->cpumask);
1914                 ctx->index_hw = hctx->nr_ctx;
1915                 hctx->ctxs[hctx->nr_ctx++] = ctx;
1916         }
1917
1918         mutex_unlock(&q->sysfs_lock);
1919
1920         queue_for_each_hw_ctx(q, hctx, i) {
1921                 /*
1922                  * If no software queues are mapped to this hardware queue,
1923                  * disable it and free the request entries.
1924                  */
1925                 if (!hctx->nr_ctx) {
1926                         /* Never unmap queue 0.  We need it as a
1927                          * fallback in case of a new remap fails
1928                          * allocation
1929                          */
1930                         if (i && set->tags[i]) {
1931                                 blk_mq_free_rq_map(set, set->tags[i], i);
1932                                 set->tags[i] = NULL;
1933                         }
1934                         hctx->tags = NULL;
1935                         continue;
1936                 }
1937
1938                 hctx->tags = set->tags[i];
1939                 WARN_ON(!hctx->tags);
1940
1941                 /*
1942                  * Set the map size to the number of mapped software queues.
1943                  * This is more accurate and more efficient than looping
1944                  * over all possibly mapped software queues.
1945                  */
1946                 sbitmap_resize(&hctx->ctx_map, hctx->nr_ctx);
1947
1948                 /*
1949                  * Initialize batch roundrobin counts
1950                  */
1951                 hctx->next_cpu = cpumask_first(hctx->cpumask);
1952                 hctx->next_cpu_batch = BLK_MQ_CPU_WORK_BATCH;
1953         }
1954 }
1955
1956 static void queue_set_hctx_shared(struct request_queue *q, bool shared)
1957 {
1958         struct blk_mq_hw_ctx *hctx;
1959         int i;
1960
1961         queue_for_each_hw_ctx(q, hctx, i) {
1962                 if (shared)
1963                         hctx->flags |= BLK_MQ_F_TAG_SHARED;
1964                 else
1965                         hctx->flags &= ~BLK_MQ_F_TAG_SHARED;
1966         }
1967 }
1968
1969 static void blk_mq_update_tag_set_depth(struct blk_mq_tag_set *set, bool shared)
1970 {
1971         struct request_queue *q;
1972
1973         list_for_each_entry(q, &set->tag_list, tag_set_list) {
1974                 blk_mq_freeze_queue(q);
1975                 queue_set_hctx_shared(q, shared);
1976                 blk_mq_unfreeze_queue(q);
1977         }
1978 }
1979
1980 static void blk_mq_del_queue_tag_set(struct request_queue *q)
1981 {
1982         struct blk_mq_tag_set *set = q->tag_set;
1983
1984         mutex_lock(&set->tag_list_lock);
1985         list_del_init(&q->tag_set_list);
1986         if (list_is_singular(&set->tag_list)) {
1987                 /* just transitioned to unshared */
1988                 set->flags &= ~BLK_MQ_F_TAG_SHARED;
1989                 /* update existing queue */
1990                 blk_mq_update_tag_set_depth(set, false);
1991         }
1992         mutex_unlock(&set->tag_list_lock);
1993 }
1994
1995 static void blk_mq_add_queue_tag_set(struct blk_mq_tag_set *set,
1996                                      struct request_queue *q)
1997 {
1998         q->tag_set = set;
1999
2000         mutex_lock(&set->tag_list_lock);
2001
2002         /* Check to see if we're transitioning to shared (from 1 to 2 queues). */
2003         if (!list_empty(&set->tag_list) && !(set->flags & BLK_MQ_F_TAG_SHARED)) {
2004                 set->flags |= BLK_MQ_F_TAG_SHARED;
2005                 /* update existing queue */
2006                 blk_mq_update_tag_set_depth(set, true);
2007         }
2008         if (set->flags & BLK_MQ_F_TAG_SHARED)
2009                 queue_set_hctx_shared(q, true);
2010         list_add_tail(&q->tag_set_list, &set->tag_list);
2011
2012         mutex_unlock(&set->tag_list_lock);
2013 }
2014
2015 /*
2016  * It is the actual release handler for mq, but we do it from
2017  * request queue's release handler for avoiding use-after-free
2018  * and headache because q->mq_kobj shouldn't have been introduced,
2019  * but we can't group ctx/kctx kobj without it.
2020  */
2021 void blk_mq_release(struct request_queue *q)
2022 {
2023         struct blk_mq_hw_ctx *hctx;
2024         unsigned int i;
2025
2026         /* hctx kobj stays in hctx */
2027         queue_for_each_hw_ctx(q, hctx, i) {
2028                 if (!hctx)
2029                         continue;
2030                 kfree(hctx->ctxs);
2031                 kfree(hctx);
2032         }
2033
2034         q->mq_map = NULL;
2035
2036         kfree(q->queue_hw_ctx);
2037
2038         /* ctx kobj stays in queue_ctx */
2039         free_percpu(q->queue_ctx);
2040 }
2041
2042 struct request_queue *blk_mq_init_queue(struct blk_mq_tag_set *set)
2043 {
2044         struct request_queue *uninit_q, *q;
2045
2046         uninit_q = blk_alloc_queue_node(GFP_KERNEL, set->numa_node);
2047         if (!uninit_q)
2048                 return ERR_PTR(-ENOMEM);
2049
2050         q = blk_mq_init_allocated_queue(set, uninit_q);
2051         if (IS_ERR(q))
2052                 blk_cleanup_queue(uninit_q);
2053
2054         return q;
2055 }
2056 EXPORT_SYMBOL(blk_mq_init_queue);
2057
2058 static void blk_mq_realloc_hw_ctxs(struct blk_mq_tag_set *set,
2059                                                 struct request_queue *q)
2060 {
2061         int i, j;
2062         struct blk_mq_hw_ctx **hctxs = q->queue_hw_ctx;
2063
2064         blk_mq_sysfs_unregister(q);
2065         for (i = 0; i < set->nr_hw_queues; i++) {
2066                 int node;
2067
2068                 if (hctxs[i])
2069                         continue;
2070
2071                 node = blk_mq_hw_queue_to_node(q->mq_map, i);
2072                 hctxs[i] = kzalloc_node(sizeof(struct blk_mq_hw_ctx),
2073                                         GFP_KERNEL, node);
2074                 if (!hctxs[i])
2075                         break;
2076
2077                 if (!zalloc_cpumask_var_node(&hctxs[i]->cpumask, GFP_KERNEL,
2078                                                 node)) {
2079                         kfree(hctxs[i]);
2080                         hctxs[i] = NULL;
2081                         break;
2082                 }
2083
2084                 atomic_set(&hctxs[i]->nr_active, 0);
2085                 hctxs[i]->numa_node = node;
2086                 hctxs[i]->queue_num = i;
2087
2088                 if (blk_mq_init_hctx(q, set, hctxs[i], i)) {
2089                         free_cpumask_var(hctxs[i]->cpumask);
2090                         kfree(hctxs[i]);
2091                         hctxs[i] = NULL;
2092                         break;
2093                 }
2094                 blk_mq_hctx_kobj_init(hctxs[i]);
2095         }
2096         for (j = i; j < q->nr_hw_queues; j++) {
2097                 struct blk_mq_hw_ctx *hctx = hctxs[j];
2098
2099                 if (hctx) {
2100                         if (hctx->tags) {
2101                                 blk_mq_free_rq_map(set, hctx->tags, j);
2102                                 set->tags[j] = NULL;
2103                         }
2104                         blk_mq_exit_hctx(q, set, hctx, j);
2105                         free_cpumask_var(hctx->cpumask);
2106                         kobject_put(&hctx->kobj);
2107                         kfree(hctx->ctxs);
2108                         kfree(hctx);
2109                         hctxs[j] = NULL;
2110
2111                 }
2112         }
2113         q->nr_hw_queues = i;
2114         blk_mq_sysfs_register(q);
2115 }
2116
2117 struct request_queue *blk_mq_init_allocated_queue(struct blk_mq_tag_set *set,
2118                                                   struct request_queue *q)
2119 {
2120         /* mark the queue as mq asap */
2121         q->mq_ops = set->ops;
2122
2123         q->queue_ctx = alloc_percpu(struct blk_mq_ctx);
2124         if (!q->queue_ctx)
2125                 goto err_exit;
2126
2127         q->queue_hw_ctx = kzalloc_node(nr_cpu_ids * sizeof(*(q->queue_hw_ctx)),
2128                                                 GFP_KERNEL, set->numa_node);
2129         if (!q->queue_hw_ctx)
2130                 goto err_percpu;
2131
2132         q->mq_map = set->mq_map;
2133
2134         blk_mq_realloc_hw_ctxs(set, q);
2135         if (!q->nr_hw_queues)
2136                 goto err_hctxs;
2137
2138         INIT_WORK(&q->timeout_work, blk_mq_timeout_work);
2139         blk_queue_rq_timeout(q, set->timeout ? set->timeout : 30 * HZ);
2140
2141         q->nr_queues = nr_cpu_ids;
2142
2143         q->queue_flags |= QUEUE_FLAG_MQ_DEFAULT;
2144
2145         if (!(set->flags & BLK_MQ_F_SG_MERGE))
2146                 q->queue_flags |= 1 << QUEUE_FLAG_NO_SG_MERGE;
2147
2148         q->sg_reserved_size = INT_MAX;
2149
2150         INIT_DELAYED_WORK(&q->requeue_work, blk_mq_requeue_work);
2151         INIT_LIST_HEAD(&q->requeue_list);
2152         spin_lock_init(&q->requeue_lock);
2153
2154         if (q->nr_hw_queues > 1)
2155                 blk_queue_make_request(q, blk_mq_make_request);
2156         else
2157                 blk_queue_make_request(q, blk_sq_make_request);
2158
2159         /*
2160          * Do this after blk_queue_make_request() overrides it...
2161          */
2162         q->nr_requests = set->queue_depth;
2163
2164         /*
2165          * Default to classic polling
2166          */
2167         q->poll_nsec = -1;
2168
2169         if (set->ops->complete)
2170                 blk_queue_softirq_done(q, set->ops->complete);
2171
2172         blk_mq_init_cpu_queues(q, set->nr_hw_queues);
2173
2174         get_online_cpus();
2175         mutex_lock(&all_q_mutex);
2176
2177         list_add_tail(&q->all_q_node, &all_q_list);
2178         blk_mq_add_queue_tag_set(set, q);
2179         blk_mq_map_swqueue(q, cpu_online_mask);
2180
2181         mutex_unlock(&all_q_mutex);
2182         put_online_cpus();
2183
2184         return q;
2185
2186 err_hctxs:
2187         kfree(q->queue_hw_ctx);
2188 err_percpu:
2189         free_percpu(q->queue_ctx);
2190 err_exit:
2191         q->mq_ops = NULL;
2192         return ERR_PTR(-ENOMEM);
2193 }
2194 EXPORT_SYMBOL(blk_mq_init_allocated_queue);
2195
2196 void blk_mq_free_queue(struct request_queue *q)
2197 {
2198         struct blk_mq_tag_set   *set = q->tag_set;
2199
2200         mutex_lock(&all_q_mutex);
2201         list_del_init(&q->all_q_node);
2202         mutex_unlock(&all_q_mutex);
2203
2204         wbt_exit(q);
2205
2206         blk_mq_del_queue_tag_set(q);
2207
2208         blk_mq_exit_hw_queues(q, set, set->nr_hw_queues);
2209         blk_mq_free_hw_queues(q, set);
2210 }
2211
2212 /* Basically redo blk_mq_init_queue with queue frozen */
2213 static void blk_mq_queue_reinit(struct request_queue *q,
2214                                 const struct cpumask *online_mask)
2215 {
2216         WARN_ON_ONCE(!atomic_read(&q->mq_freeze_depth));
2217
2218         blk_mq_sysfs_unregister(q);
2219
2220         /*
2221          * redo blk_mq_init_cpu_queues and blk_mq_init_hw_queues. FIXME: maybe
2222          * we should change hctx numa_node according to new topology (this
2223          * involves free and re-allocate memory, worthy doing?)
2224          */
2225
2226         blk_mq_map_swqueue(q, online_mask);
2227
2228         blk_mq_sysfs_register(q);
2229 }
2230
2231 /*
2232  * New online cpumask which is going to be set in this hotplug event.
2233  * Declare this cpumasks as global as cpu-hotplug operation is invoked
2234  * one-by-one and dynamically allocating this could result in a failure.
2235  */
2236 static struct cpumask cpuhp_online_new;
2237
2238 static void blk_mq_queue_reinit_work(void)
2239 {
2240         struct request_queue *q;
2241
2242         mutex_lock(&all_q_mutex);
2243         /*
2244          * We need to freeze and reinit all existing queues.  Freezing
2245          * involves synchronous wait for an RCU grace period and doing it
2246          * one by one may take a long time.  Start freezing all queues in
2247          * one swoop and then wait for the completions so that freezing can
2248          * take place in parallel.
2249          */
2250         list_for_each_entry(q, &all_q_list, all_q_node)
2251                 blk_mq_freeze_queue_start(q);
2252         list_for_each_entry(q, &all_q_list, all_q_node)
2253                 blk_mq_freeze_queue_wait(q);
2254
2255         list_for_each_entry(q, &all_q_list, all_q_node)
2256                 blk_mq_queue_reinit(q, &cpuhp_online_new);
2257
2258         list_for_each_entry(q, &all_q_list, all_q_node)
2259                 blk_mq_unfreeze_queue(q);
2260
2261         mutex_unlock(&all_q_mutex);
2262 }
2263
2264 static int blk_mq_queue_reinit_dead(unsigned int cpu)
2265 {
2266         cpumask_copy(&cpuhp_online_new, cpu_online_mask);
2267         blk_mq_queue_reinit_work();
2268         return 0;
2269 }
2270
2271 /*
2272  * Before hotadded cpu starts handling requests, new mappings must be
2273  * established.  Otherwise, these requests in hw queue might never be
2274  * dispatched.
2275  *
2276  * For example, there is a single hw queue (hctx) and two CPU queues (ctx0
2277  * for CPU0, and ctx1 for CPU1).
2278  *
2279  * Now CPU1 is just onlined and a request is inserted into ctx1->rq_list
2280  * and set bit0 in pending bitmap as ctx1->index_hw is still zero.
2281  *
2282  * And then while running hw queue, flush_busy_ctxs() finds bit0 is set in
2283  * pending bitmap and tries to retrieve requests in hctx->ctxs[0]->rq_list.
2284  * But htx->ctxs[0] is a pointer to ctx0, so the request in ctx1->rq_list
2285  * is ignored.
2286  */
2287 static int blk_mq_queue_reinit_prepare(unsigned int cpu)
2288 {
2289         cpumask_copy(&cpuhp_online_new, cpu_online_mask);
2290         cpumask_set_cpu(cpu, &cpuhp_online_new);
2291         blk_mq_queue_reinit_work();
2292         return 0;
2293 }
2294
2295 static int __blk_mq_alloc_rq_maps(struct blk_mq_tag_set *set)
2296 {
2297         int i;
2298
2299         for (i = 0; i < set->nr_hw_queues; i++) {
2300                 set->tags[i] = blk_mq_init_rq_map(set, i);
2301                 if (!set->tags[i])
2302                         goto out_unwind;
2303         }
2304
2305         return 0;
2306
2307 out_unwind:
2308         while (--i >= 0)
2309                 blk_mq_free_rq_map(set, set->tags[i], i);
2310
2311         return -ENOMEM;
2312 }
2313
2314 /*
2315  * Allocate the request maps associated with this tag_set. Note that this
2316  * may reduce the depth asked for, if memory is tight. set->queue_depth
2317  * will be updated to reflect the allocated depth.
2318  */
2319 static int blk_mq_alloc_rq_maps(struct blk_mq_tag_set *set)
2320 {
2321         unsigned int depth;
2322         int err;
2323
2324         depth = set->queue_depth;
2325         do {
2326                 err = __blk_mq_alloc_rq_maps(set);
2327                 if (!err)
2328                         break;
2329
2330                 set->queue_depth >>= 1;
2331                 if (set->queue_depth < set->reserved_tags + BLK_MQ_TAG_MIN) {
2332                         err = -ENOMEM;
2333                         break;
2334                 }
2335         } while (set->queue_depth);
2336
2337         if (!set->queue_depth || err) {
2338                 pr_err("blk-mq: failed to allocate request map\n");
2339                 return -ENOMEM;
2340         }
2341
2342         if (depth != set->queue_depth)
2343                 pr_info("blk-mq: reduced tag depth (%u -> %u)\n",
2344                                                 depth, set->queue_depth);
2345
2346         return 0;
2347 }
2348
2349 /*
2350  * Alloc a tag set to be associated with one or more request queues.
2351  * May fail with EINVAL for various error conditions. May adjust the
2352  * requested depth down, if if it too large. In that case, the set
2353  * value will be stored in set->queue_depth.
2354  */
2355 int blk_mq_alloc_tag_set(struct blk_mq_tag_set *set)
2356 {
2357         int ret;
2358
2359         BUILD_BUG_ON(BLK_MQ_MAX_DEPTH > 1 << BLK_MQ_UNIQUE_TAG_BITS);
2360
2361         if (!set->nr_hw_queues)
2362                 return -EINVAL;
2363         if (!set->queue_depth)
2364                 return -EINVAL;
2365         if (set->queue_depth < set->reserved_tags + BLK_MQ_TAG_MIN)
2366                 return -EINVAL;
2367
2368         if (!set->ops->queue_rq)
2369                 return -EINVAL;
2370
2371         if (set->queue_depth > BLK_MQ_MAX_DEPTH) {
2372                 pr_info("blk-mq: reduced tag depth to %u\n",
2373                         BLK_MQ_MAX_DEPTH);
2374                 set->queue_depth = BLK_MQ_MAX_DEPTH;
2375         }
2376
2377         /*
2378          * If a crashdump is active, then we are potentially in a very
2379          * memory constrained environment. Limit us to 1 queue and
2380          * 64 tags to prevent using too much memory.
2381          */
2382         if (is_kdump_kernel()) {
2383                 set->nr_hw_queues = 1;
2384                 set->queue_depth = min(64U, set->queue_depth);
2385         }
2386         /*
2387          * There is no use for more h/w queues than cpus.
2388          */
2389         if (set->nr_hw_queues > nr_cpu_ids)
2390                 set->nr_hw_queues = nr_cpu_ids;
2391
2392         set->tags = kzalloc_node(nr_cpu_ids * sizeof(struct blk_mq_tags *),
2393                                  GFP_KERNEL, set->numa_node);
2394         if (!set->tags)
2395                 return -ENOMEM;
2396
2397         ret = -ENOMEM;
2398         set->mq_map = kzalloc_node(sizeof(*set->mq_map) * nr_cpu_ids,
2399                         GFP_KERNEL, set->numa_node);
2400         if (!set->mq_map)
2401                 goto out_free_tags;
2402
2403         if (set->ops->map_queues)
2404                 ret = set->ops->map_queues(set);
2405         else
2406                 ret = blk_mq_map_queues(set);
2407         if (ret)
2408                 goto out_free_mq_map;
2409
2410         ret = blk_mq_alloc_rq_maps(set);
2411         if (ret)
2412                 goto out_free_mq_map;
2413
2414         mutex_init(&set->tag_list_lock);
2415         INIT_LIST_HEAD(&set->tag_list);
2416
2417         return 0;
2418
2419 out_free_mq_map:
2420         kfree(set->mq_map);
2421         set->mq_map = NULL;
2422 out_free_tags:
2423         kfree(set->tags);
2424         set->tags = NULL;
2425         return ret;
2426 }
2427 EXPORT_SYMBOL(blk_mq_alloc_tag_set);
2428
2429 void blk_mq_free_tag_set(struct blk_mq_tag_set *set)
2430 {
2431         int i;
2432
2433         for (i = 0; i < nr_cpu_ids; i++) {
2434                 if (set->tags[i])
2435                         blk_mq_free_rq_map(set, set->tags[i], i);
2436         }
2437
2438         kfree(set->mq_map);
2439         set->mq_map = NULL;
2440
2441         kfree(set->tags);
2442         set->tags = NULL;
2443 }
2444 EXPORT_SYMBOL(blk_mq_free_tag_set);
2445
2446 int blk_mq_update_nr_requests(struct request_queue *q, unsigned int nr)
2447 {
2448         struct blk_mq_tag_set *set = q->tag_set;
2449         struct blk_mq_hw_ctx *hctx;
2450         int i, ret;
2451
2452         if (!set || nr > set->queue_depth)
2453                 return -EINVAL;
2454
2455         ret = 0;
2456         queue_for_each_hw_ctx(q, hctx, i) {
2457                 if (!hctx->tags)
2458                         continue;
2459                 ret = blk_mq_tag_update_depth(hctx->tags, nr);
2460                 if (ret)
2461                         break;
2462         }
2463
2464         if (!ret)
2465                 q->nr_requests = nr;
2466
2467         return ret;
2468 }
2469
2470 void blk_mq_update_nr_hw_queues(struct blk_mq_tag_set *set, int nr_hw_queues)
2471 {
2472         struct request_queue *q;
2473
2474         if (nr_hw_queues > nr_cpu_ids)
2475                 nr_hw_queues = nr_cpu_ids;
2476         if (nr_hw_queues < 1 || nr_hw_queues == set->nr_hw_queues)
2477                 return;
2478
2479         list_for_each_entry(q, &set->tag_list, tag_set_list)
2480                 blk_mq_freeze_queue(q);
2481
2482         set->nr_hw_queues = nr_hw_queues;
2483         list_for_each_entry(q, &set->tag_list, tag_set_list) {
2484                 blk_mq_realloc_hw_ctxs(set, q);
2485
2486                 if (q->nr_hw_queues > 1)
2487                         blk_queue_make_request(q, blk_mq_make_request);
2488                 else
2489                         blk_queue_make_request(q, blk_sq_make_request);
2490
2491                 blk_mq_queue_reinit(q, cpu_online_mask);
2492         }
2493
2494         list_for_each_entry(q, &set->tag_list, tag_set_list)
2495                 blk_mq_unfreeze_queue(q);
2496 }
2497 EXPORT_SYMBOL_GPL(blk_mq_update_nr_hw_queues);
2498
2499 static unsigned long blk_mq_poll_nsecs(struct request_queue *q,
2500                                        struct blk_mq_hw_ctx *hctx,
2501                                        struct request *rq)
2502 {
2503         struct blk_rq_stat stat[2];
2504         unsigned long ret = 0;
2505
2506         /*
2507          * If stats collection isn't on, don't sleep but turn it on for
2508          * future users
2509          */
2510         if (!blk_stat_enable(q))
2511                 return 0;
2512
2513         /*
2514          * We don't have to do this once per IO, should optimize this
2515          * to just use the current window of stats until it changes
2516          */
2517         memset(&stat, 0, sizeof(stat));
2518         blk_hctx_stat_get(hctx, stat);
2519
2520         /*
2521          * As an optimistic guess, use half of the mean service time
2522          * for this type of request. We can (and should) make this smarter.
2523          * For instance, if the completion latencies are tight, we can
2524          * get closer than just half the mean. This is especially
2525          * important on devices where the completion latencies are longer
2526          * than ~10 usec.
2527          */
2528         if (req_op(rq) == REQ_OP_READ && stat[BLK_STAT_READ].nr_samples)
2529                 ret = (stat[BLK_STAT_READ].mean + 1) / 2;
2530         else if (req_op(rq) == REQ_OP_WRITE && stat[BLK_STAT_WRITE].nr_samples)
2531                 ret = (stat[BLK_STAT_WRITE].mean + 1) / 2;
2532
2533         return ret;
2534 }
2535
2536 static bool blk_mq_poll_hybrid_sleep(struct request_queue *q,
2537                                      struct blk_mq_hw_ctx *hctx,
2538                                      struct request *rq)
2539 {
2540         struct hrtimer_sleeper hs;
2541         enum hrtimer_mode mode;
2542         unsigned int nsecs;
2543         ktime_t kt;
2544
2545         if (test_bit(REQ_ATOM_POLL_SLEPT, &rq->atomic_flags))
2546                 return false;
2547
2548         /*
2549          * poll_nsec can be:
2550          *
2551          * -1:  don't ever hybrid sleep
2552          *  0:  use half of prev avg
2553          * >0:  use this specific value
2554          */
2555         if (q->poll_nsec == -1)
2556                 return false;
2557         else if (q->poll_nsec > 0)
2558                 nsecs = q->poll_nsec;
2559         else
2560                 nsecs = blk_mq_poll_nsecs(q, hctx, rq);
2561
2562         if (!nsecs)
2563                 return false;
2564
2565         set_bit(REQ_ATOM_POLL_SLEPT, &rq->atomic_flags);
2566
2567         /*
2568          * This will be replaced with the stats tracking code, using
2569          * 'avg_completion_time / 2' as the pre-sleep target.
2570          */
2571         kt = nsecs;
2572
2573         mode = HRTIMER_MODE_REL;
2574         hrtimer_init_on_stack(&hs.timer, CLOCK_MONOTONIC, mode);
2575         hrtimer_set_expires(&hs.timer, kt);
2576
2577         hrtimer_init_sleeper(&hs, current);
2578         do {
2579                 if (test_bit(REQ_ATOM_COMPLETE, &rq->atomic_flags))
2580                         break;
2581                 set_current_state(TASK_UNINTERRUPTIBLE);
2582                 hrtimer_start_expires(&hs.timer, mode);
2583                 if (hs.task)
2584                         io_schedule();
2585                 hrtimer_cancel(&hs.timer);
2586                 mode = HRTIMER_MODE_ABS;
2587         } while (hs.task && !signal_pending(current));
2588
2589         __set_current_state(TASK_RUNNING);
2590         destroy_hrtimer_on_stack(&hs.timer);
2591         return true;
2592 }
2593
2594 static bool __blk_mq_poll(struct blk_mq_hw_ctx *hctx, struct request *rq)
2595 {
2596         struct request_queue *q = hctx->queue;
2597         long state;
2598
2599         /*
2600          * If we sleep, have the caller restart the poll loop to reset
2601          * the state. Like for the other success return cases, the
2602          * caller is responsible for checking if the IO completed. If
2603          * the IO isn't complete, we'll get called again and will go
2604          * straight to the busy poll loop.
2605          */
2606         if (blk_mq_poll_hybrid_sleep(q, hctx, rq))
2607                 return true;
2608
2609         hctx->poll_considered++;
2610
2611         state = current->state;
2612         while (!need_resched()) {
2613                 int ret;
2614
2615                 hctx->poll_invoked++;
2616
2617                 ret = q->mq_ops->poll(hctx, rq->tag);
2618                 if (ret > 0) {
2619                         hctx->poll_success++;
2620                         set_current_state(TASK_RUNNING);
2621                         return true;
2622                 }
2623
2624                 if (signal_pending_state(state, current))
2625                         set_current_state(TASK_RUNNING);
2626
2627                 if (current->state == TASK_RUNNING)
2628                         return true;
2629                 if (ret < 0)
2630                         break;
2631                 cpu_relax();
2632         }
2633
2634         return false;
2635 }
2636
2637 bool blk_mq_poll(struct request_queue *q, blk_qc_t cookie)
2638 {
2639         struct blk_mq_hw_ctx *hctx;
2640         struct blk_plug *plug;
2641         struct request *rq;
2642
2643         if (!q->mq_ops || !q->mq_ops->poll || !blk_qc_t_valid(cookie) ||
2644             !test_bit(QUEUE_FLAG_POLL, &q->queue_flags))
2645                 return false;
2646
2647         plug = current->plug;
2648         if (plug)
2649                 blk_flush_plug_list(plug, false);
2650
2651         hctx = q->queue_hw_ctx[blk_qc_t_to_queue_num(cookie)];
2652         rq = blk_mq_tag_to_rq(hctx->tags, blk_qc_t_to_tag(cookie));
2653
2654         return __blk_mq_poll(hctx, rq);
2655 }
2656 EXPORT_SYMBOL_GPL(blk_mq_poll);
2657
2658 void blk_mq_disable_hotplug(void)
2659 {
2660         mutex_lock(&all_q_mutex);
2661 }
2662
2663 void blk_mq_enable_hotplug(void)
2664 {
2665         mutex_unlock(&all_q_mutex);
2666 }
2667
2668 static int __init blk_mq_init(void)
2669 {
2670         cpuhp_setup_state_multi(CPUHP_BLK_MQ_DEAD, "block/mq:dead", NULL,
2671                                 blk_mq_hctx_notify_dead);
2672
2673         cpuhp_setup_state_nocalls(CPUHP_BLK_MQ_PREPARE, "block/mq:prepare",
2674                                   blk_mq_queue_reinit_prepare,
2675                                   blk_mq_queue_reinit_dead);
2676         return 0;
2677 }
2678 subsys_initcall(blk_mq_init);