]> git.karo-electronics.de Git - karo-tx-linux.git/blob - block/blk-mq.c
blk-mq-sched: add framework for MQ capable IO schedulers
[karo-tx-linux.git] / block / blk-mq.c
1 /*
2  * Block multiqueue core code
3  *
4  * Copyright (C) 2013-2014 Jens Axboe
5  * Copyright (C) 2013-2014 Christoph Hellwig
6  */
7 #include <linux/kernel.h>
8 #include <linux/module.h>
9 #include <linux/backing-dev.h>
10 #include <linux/bio.h>
11 #include <linux/blkdev.h>
12 #include <linux/kmemleak.h>
13 #include <linux/mm.h>
14 #include <linux/init.h>
15 #include <linux/slab.h>
16 #include <linux/workqueue.h>
17 #include <linux/smp.h>
18 #include <linux/llist.h>
19 #include <linux/list_sort.h>
20 #include <linux/cpu.h>
21 #include <linux/cache.h>
22 #include <linux/sched/sysctl.h>
23 #include <linux/delay.h>
24 #include <linux/crash_dump.h>
25 #include <linux/prefetch.h>
26
27 #include <trace/events/block.h>
28
29 #include <linux/blk-mq.h>
30 #include "blk.h"
31 #include "blk-mq.h"
32 #include "blk-mq-tag.h"
33 #include "blk-stat.h"
34 #include "blk-wbt.h"
35 #include "blk-mq-sched.h"
36
37 static DEFINE_MUTEX(all_q_mutex);
38 static LIST_HEAD(all_q_list);
39
40 /*
41  * Check if any of the ctx's have pending work in this hardware queue
42  */
43 static bool blk_mq_hctx_has_pending(struct blk_mq_hw_ctx *hctx)
44 {
45         return sbitmap_any_bit_set(&hctx->ctx_map) ||
46                         !list_empty_careful(&hctx->dispatch) ||
47                         blk_mq_sched_has_work(hctx);
48 }
49
50 /*
51  * Mark this ctx as having pending work in this hardware queue
52  */
53 static void blk_mq_hctx_mark_pending(struct blk_mq_hw_ctx *hctx,
54                                      struct blk_mq_ctx *ctx)
55 {
56         if (!sbitmap_test_bit(&hctx->ctx_map, ctx->index_hw))
57                 sbitmap_set_bit(&hctx->ctx_map, ctx->index_hw);
58 }
59
60 static void blk_mq_hctx_clear_pending(struct blk_mq_hw_ctx *hctx,
61                                       struct blk_mq_ctx *ctx)
62 {
63         sbitmap_clear_bit(&hctx->ctx_map, ctx->index_hw);
64 }
65
66 void blk_mq_freeze_queue_start(struct request_queue *q)
67 {
68         int freeze_depth;
69
70         freeze_depth = atomic_inc_return(&q->mq_freeze_depth);
71         if (freeze_depth == 1) {
72                 percpu_ref_kill(&q->q_usage_counter);
73                 blk_mq_run_hw_queues(q, false);
74         }
75 }
76 EXPORT_SYMBOL_GPL(blk_mq_freeze_queue_start);
77
78 static void blk_mq_freeze_queue_wait(struct request_queue *q)
79 {
80         wait_event(q->mq_freeze_wq, percpu_ref_is_zero(&q->q_usage_counter));
81 }
82
83 /*
84  * Guarantee no request is in use, so we can change any data structure of
85  * the queue afterward.
86  */
87 void blk_freeze_queue(struct request_queue *q)
88 {
89         /*
90          * In the !blk_mq case we are only calling this to kill the
91          * q_usage_counter, otherwise this increases the freeze depth
92          * and waits for it to return to zero.  For this reason there is
93          * no blk_unfreeze_queue(), and blk_freeze_queue() is not
94          * exported to drivers as the only user for unfreeze is blk_mq.
95          */
96         blk_mq_freeze_queue_start(q);
97         blk_mq_freeze_queue_wait(q);
98 }
99
100 void blk_mq_freeze_queue(struct request_queue *q)
101 {
102         /*
103          * ...just an alias to keep freeze and unfreeze actions balanced
104          * in the blk_mq_* namespace
105          */
106         blk_freeze_queue(q);
107 }
108 EXPORT_SYMBOL_GPL(blk_mq_freeze_queue);
109
110 void blk_mq_unfreeze_queue(struct request_queue *q)
111 {
112         int freeze_depth;
113
114         freeze_depth = atomic_dec_return(&q->mq_freeze_depth);
115         WARN_ON_ONCE(freeze_depth < 0);
116         if (!freeze_depth) {
117                 percpu_ref_reinit(&q->q_usage_counter);
118                 wake_up_all(&q->mq_freeze_wq);
119         }
120 }
121 EXPORT_SYMBOL_GPL(blk_mq_unfreeze_queue);
122
123 /**
124  * blk_mq_quiesce_queue() - wait until all ongoing queue_rq calls have finished
125  * @q: request queue.
126  *
127  * Note: this function does not prevent that the struct request end_io()
128  * callback function is invoked. Additionally, it is not prevented that
129  * new queue_rq() calls occur unless the queue has been stopped first.
130  */
131 void blk_mq_quiesce_queue(struct request_queue *q)
132 {
133         struct blk_mq_hw_ctx *hctx;
134         unsigned int i;
135         bool rcu = false;
136
137         blk_mq_stop_hw_queues(q);
138
139         queue_for_each_hw_ctx(q, hctx, i) {
140                 if (hctx->flags & BLK_MQ_F_BLOCKING)
141                         synchronize_srcu(&hctx->queue_rq_srcu);
142                 else
143                         rcu = true;
144         }
145         if (rcu)
146                 synchronize_rcu();
147 }
148 EXPORT_SYMBOL_GPL(blk_mq_quiesce_queue);
149
150 void blk_mq_wake_waiters(struct request_queue *q)
151 {
152         struct blk_mq_hw_ctx *hctx;
153         unsigned int i;
154
155         queue_for_each_hw_ctx(q, hctx, i)
156                 if (blk_mq_hw_queue_mapped(hctx))
157                         blk_mq_tag_wakeup_all(hctx->tags, true);
158
159         /*
160          * If we are called because the queue has now been marked as
161          * dying, we need to ensure that processes currently waiting on
162          * the queue are notified as well.
163          */
164         wake_up_all(&q->mq_freeze_wq);
165 }
166
167 bool blk_mq_can_queue(struct blk_mq_hw_ctx *hctx)
168 {
169         return blk_mq_has_free_tags(hctx->tags);
170 }
171 EXPORT_SYMBOL(blk_mq_can_queue);
172
173 void blk_mq_rq_ctx_init(struct request_queue *q, struct blk_mq_ctx *ctx,
174                         struct request *rq, unsigned int op)
175 {
176         INIT_LIST_HEAD(&rq->queuelist);
177         /* csd/requeue_work/fifo_time is initialized before use */
178         rq->q = q;
179         rq->mq_ctx = ctx;
180         rq->cmd_flags = op;
181         if (blk_queue_io_stat(q))
182                 rq->rq_flags |= RQF_IO_STAT;
183         /* do not touch atomic flags, it needs atomic ops against the timer */
184         rq->cpu = -1;
185         INIT_HLIST_NODE(&rq->hash);
186         RB_CLEAR_NODE(&rq->rb_node);
187         rq->rq_disk = NULL;
188         rq->part = NULL;
189         rq->start_time = jiffies;
190 #ifdef CONFIG_BLK_CGROUP
191         rq->rl = NULL;
192         set_start_time_ns(rq);
193         rq->io_start_time_ns = 0;
194 #endif
195         rq->nr_phys_segments = 0;
196 #if defined(CONFIG_BLK_DEV_INTEGRITY)
197         rq->nr_integrity_segments = 0;
198 #endif
199         rq->special = NULL;
200         /* tag was already set */
201         rq->errors = 0;
202
203         rq->cmd = rq->__cmd;
204
205         rq->extra_len = 0;
206         rq->sense_len = 0;
207         rq->resid_len = 0;
208         rq->sense = NULL;
209
210         INIT_LIST_HEAD(&rq->timeout_list);
211         rq->timeout = 0;
212
213         rq->end_io = NULL;
214         rq->end_io_data = NULL;
215         rq->next_rq = NULL;
216
217         ctx->rq_dispatched[op_is_sync(op)]++;
218 }
219 EXPORT_SYMBOL_GPL(blk_mq_rq_ctx_init);
220
221 struct request *__blk_mq_alloc_request(struct blk_mq_alloc_data *data,
222                                        unsigned int op)
223 {
224         struct request *rq;
225         unsigned int tag;
226
227         tag = blk_mq_get_tag(data);
228         if (tag != BLK_MQ_TAG_FAIL) {
229                 struct blk_mq_tags *tags = blk_mq_tags_from_data(data);
230
231                 rq = tags->static_rqs[tag];
232
233                 if (blk_mq_tag_busy(data->hctx)) {
234                         rq->rq_flags = RQF_MQ_INFLIGHT;
235                         atomic_inc(&data->hctx->nr_active);
236                 }
237
238                 if (data->flags & BLK_MQ_REQ_INTERNAL) {
239                         rq->tag = -1;
240                         rq->internal_tag = tag;
241                 } else {
242                         rq->tag = tag;
243                         rq->internal_tag = -1;
244                 }
245
246                 blk_mq_rq_ctx_init(data->q, data->ctx, rq, op);
247                 return rq;
248         }
249
250         return NULL;
251 }
252 EXPORT_SYMBOL_GPL(__blk_mq_alloc_request);
253
254 struct request *blk_mq_alloc_request(struct request_queue *q, int rw,
255                 unsigned int flags)
256 {
257         struct blk_mq_alloc_data alloc_data;
258         struct request *rq;
259         int ret;
260
261         ret = blk_queue_enter(q, flags & BLK_MQ_REQ_NOWAIT);
262         if (ret)
263                 return ERR_PTR(ret);
264
265         rq = blk_mq_sched_get_request(q, NULL, rw, &alloc_data);
266
267         blk_mq_put_ctx(alloc_data.ctx);
268         blk_queue_exit(q);
269
270         if (!rq)
271                 return ERR_PTR(-EWOULDBLOCK);
272
273         rq->__data_len = 0;
274         rq->__sector = (sector_t) -1;
275         rq->bio = rq->biotail = NULL;
276         return rq;
277 }
278 EXPORT_SYMBOL(blk_mq_alloc_request);
279
280 struct request *blk_mq_alloc_request_hctx(struct request_queue *q, int rw,
281                 unsigned int flags, unsigned int hctx_idx)
282 {
283         struct blk_mq_hw_ctx *hctx;
284         struct blk_mq_ctx *ctx;
285         struct request *rq;
286         struct blk_mq_alloc_data alloc_data;
287         int ret;
288
289         /*
290          * If the tag allocator sleeps we could get an allocation for a
291          * different hardware context.  No need to complicate the low level
292          * allocator for this for the rare use case of a command tied to
293          * a specific queue.
294          */
295         if (WARN_ON_ONCE(!(flags & BLK_MQ_REQ_NOWAIT)))
296                 return ERR_PTR(-EINVAL);
297
298         if (hctx_idx >= q->nr_hw_queues)
299                 return ERR_PTR(-EIO);
300
301         ret = blk_queue_enter(q, true);
302         if (ret)
303                 return ERR_PTR(ret);
304
305         /*
306          * Check if the hardware context is actually mapped to anything.
307          * If not tell the caller that it should skip this queue.
308          */
309         hctx = q->queue_hw_ctx[hctx_idx];
310         if (!blk_mq_hw_queue_mapped(hctx)) {
311                 ret = -EXDEV;
312                 goto out_queue_exit;
313         }
314         ctx = __blk_mq_get_ctx(q, cpumask_first(hctx->cpumask));
315
316         blk_mq_set_alloc_data(&alloc_data, q, flags, ctx, hctx);
317         rq = __blk_mq_alloc_request(&alloc_data, rw);
318         if (!rq) {
319                 ret = -EWOULDBLOCK;
320                 goto out_queue_exit;
321         }
322
323         return rq;
324
325 out_queue_exit:
326         blk_queue_exit(q);
327         return ERR_PTR(ret);
328 }
329 EXPORT_SYMBOL_GPL(blk_mq_alloc_request_hctx);
330
331 void __blk_mq_finish_request(struct blk_mq_hw_ctx *hctx, struct blk_mq_ctx *ctx,
332                              struct request *rq)
333 {
334         const int sched_tag = rq->internal_tag;
335         struct request_queue *q = rq->q;
336
337         if (rq->rq_flags & RQF_MQ_INFLIGHT)
338                 atomic_dec(&hctx->nr_active);
339
340         wbt_done(q->rq_wb, &rq->issue_stat);
341         rq->rq_flags = 0;
342
343         clear_bit(REQ_ATOM_STARTED, &rq->atomic_flags);
344         clear_bit(REQ_ATOM_POLL_SLEPT, &rq->atomic_flags);
345         if (rq->tag != -1)
346                 blk_mq_put_tag(hctx, hctx->tags, ctx, rq->tag);
347         if (sched_tag != -1)
348                 blk_mq_sched_completed_request(hctx, rq);
349         blk_queue_exit(q);
350 }
351
352 static void blk_mq_finish_hctx_request(struct blk_mq_hw_ctx *hctx,
353                                      struct request *rq)
354 {
355         struct blk_mq_ctx *ctx = rq->mq_ctx;
356
357         ctx->rq_completed[rq_is_sync(rq)]++;
358         __blk_mq_finish_request(hctx, ctx, rq);
359 }
360
361 void blk_mq_finish_request(struct request *rq)
362 {
363         blk_mq_finish_hctx_request(blk_mq_map_queue(rq->q, rq->mq_ctx->cpu), rq);
364 }
365
366 void blk_mq_free_request(struct request *rq)
367 {
368         blk_mq_sched_put_request(rq);
369 }
370 EXPORT_SYMBOL_GPL(blk_mq_free_request);
371
372 inline void __blk_mq_end_request(struct request *rq, int error)
373 {
374         blk_account_io_done(rq);
375
376         if (rq->end_io) {
377                 wbt_done(rq->q->rq_wb, &rq->issue_stat);
378                 rq->end_io(rq, error);
379         } else {
380                 if (unlikely(blk_bidi_rq(rq)))
381                         blk_mq_free_request(rq->next_rq);
382                 blk_mq_free_request(rq);
383         }
384 }
385 EXPORT_SYMBOL(__blk_mq_end_request);
386
387 void blk_mq_end_request(struct request *rq, int error)
388 {
389         if (blk_update_request(rq, error, blk_rq_bytes(rq)))
390                 BUG();
391         __blk_mq_end_request(rq, error);
392 }
393 EXPORT_SYMBOL(blk_mq_end_request);
394
395 static void __blk_mq_complete_request_remote(void *data)
396 {
397         struct request *rq = data;
398
399         rq->q->softirq_done_fn(rq);
400 }
401
402 static void blk_mq_ipi_complete_request(struct request *rq)
403 {
404         struct blk_mq_ctx *ctx = rq->mq_ctx;
405         bool shared = false;
406         int cpu;
407
408         if (!test_bit(QUEUE_FLAG_SAME_COMP, &rq->q->queue_flags)) {
409                 rq->q->softirq_done_fn(rq);
410                 return;
411         }
412
413         cpu = get_cpu();
414         if (!test_bit(QUEUE_FLAG_SAME_FORCE, &rq->q->queue_flags))
415                 shared = cpus_share_cache(cpu, ctx->cpu);
416
417         if (cpu != ctx->cpu && !shared && cpu_online(ctx->cpu)) {
418                 rq->csd.func = __blk_mq_complete_request_remote;
419                 rq->csd.info = rq;
420                 rq->csd.flags = 0;
421                 smp_call_function_single_async(ctx->cpu, &rq->csd);
422         } else {
423                 rq->q->softirq_done_fn(rq);
424         }
425         put_cpu();
426 }
427
428 static void blk_mq_stat_add(struct request *rq)
429 {
430         if (rq->rq_flags & RQF_STATS) {
431                 /*
432                  * We could rq->mq_ctx here, but there's less of a risk
433                  * of races if we have the completion event add the stats
434                  * to the local software queue.
435                  */
436                 struct blk_mq_ctx *ctx;
437
438                 ctx = __blk_mq_get_ctx(rq->q, raw_smp_processor_id());
439                 blk_stat_add(&ctx->stat[rq_data_dir(rq)], rq);
440         }
441 }
442
443 static void __blk_mq_complete_request(struct request *rq)
444 {
445         struct request_queue *q = rq->q;
446
447         blk_mq_stat_add(rq);
448
449         if (!q->softirq_done_fn)
450                 blk_mq_end_request(rq, rq->errors);
451         else
452                 blk_mq_ipi_complete_request(rq);
453 }
454
455 /**
456  * blk_mq_complete_request - end I/O on a request
457  * @rq:         the request being processed
458  *
459  * Description:
460  *      Ends all I/O on a request. It does not handle partial completions.
461  *      The actual completion happens out-of-order, through a IPI handler.
462  **/
463 void blk_mq_complete_request(struct request *rq, int error)
464 {
465         struct request_queue *q = rq->q;
466
467         if (unlikely(blk_should_fake_timeout(q)))
468                 return;
469         if (!blk_mark_rq_complete(rq)) {
470                 rq->errors = error;
471                 __blk_mq_complete_request(rq);
472         }
473 }
474 EXPORT_SYMBOL(blk_mq_complete_request);
475
476 int blk_mq_request_started(struct request *rq)
477 {
478         return test_bit(REQ_ATOM_STARTED, &rq->atomic_flags);
479 }
480 EXPORT_SYMBOL_GPL(blk_mq_request_started);
481
482 void blk_mq_start_request(struct request *rq)
483 {
484         struct request_queue *q = rq->q;
485
486         blk_mq_sched_started_request(rq);
487
488         trace_block_rq_issue(q, rq);
489
490         rq->resid_len = blk_rq_bytes(rq);
491         if (unlikely(blk_bidi_rq(rq)))
492                 rq->next_rq->resid_len = blk_rq_bytes(rq->next_rq);
493
494         if (test_bit(QUEUE_FLAG_STATS, &q->queue_flags)) {
495                 blk_stat_set_issue_time(&rq->issue_stat);
496                 rq->rq_flags |= RQF_STATS;
497                 wbt_issue(q->rq_wb, &rq->issue_stat);
498         }
499
500         blk_add_timer(rq);
501
502         /*
503          * Ensure that ->deadline is visible before set the started
504          * flag and clear the completed flag.
505          */
506         smp_mb__before_atomic();
507
508         /*
509          * Mark us as started and clear complete. Complete might have been
510          * set if requeue raced with timeout, which then marked it as
511          * complete. So be sure to clear complete again when we start
512          * the request, otherwise we'll ignore the completion event.
513          */
514         if (!test_bit(REQ_ATOM_STARTED, &rq->atomic_flags))
515                 set_bit(REQ_ATOM_STARTED, &rq->atomic_flags);
516         if (test_bit(REQ_ATOM_COMPLETE, &rq->atomic_flags))
517                 clear_bit(REQ_ATOM_COMPLETE, &rq->atomic_flags);
518
519         if (q->dma_drain_size && blk_rq_bytes(rq)) {
520                 /*
521                  * Make sure space for the drain appears.  We know we can do
522                  * this because max_hw_segments has been adjusted to be one
523                  * fewer than the device can handle.
524                  */
525                 rq->nr_phys_segments++;
526         }
527 }
528 EXPORT_SYMBOL(blk_mq_start_request);
529
530 static void __blk_mq_requeue_request(struct request *rq)
531 {
532         struct request_queue *q = rq->q;
533
534         trace_block_rq_requeue(q, rq);
535         wbt_requeue(q->rq_wb, &rq->issue_stat);
536         blk_mq_sched_requeue_request(rq);
537
538         if (test_and_clear_bit(REQ_ATOM_STARTED, &rq->atomic_flags)) {
539                 if (q->dma_drain_size && blk_rq_bytes(rq))
540                         rq->nr_phys_segments--;
541         }
542 }
543
544 void blk_mq_requeue_request(struct request *rq, bool kick_requeue_list)
545 {
546         __blk_mq_requeue_request(rq);
547
548         BUG_ON(blk_queued_rq(rq));
549         blk_mq_add_to_requeue_list(rq, true, kick_requeue_list);
550 }
551 EXPORT_SYMBOL(blk_mq_requeue_request);
552
553 static void blk_mq_requeue_work(struct work_struct *work)
554 {
555         struct request_queue *q =
556                 container_of(work, struct request_queue, requeue_work.work);
557         LIST_HEAD(rq_list);
558         struct request *rq, *next;
559         unsigned long flags;
560
561         spin_lock_irqsave(&q->requeue_lock, flags);
562         list_splice_init(&q->requeue_list, &rq_list);
563         spin_unlock_irqrestore(&q->requeue_lock, flags);
564
565         list_for_each_entry_safe(rq, next, &rq_list, queuelist) {
566                 if (!(rq->rq_flags & RQF_SOFTBARRIER))
567                         continue;
568
569                 rq->rq_flags &= ~RQF_SOFTBARRIER;
570                 list_del_init(&rq->queuelist);
571                 blk_mq_sched_insert_request(rq, true, false, false);
572         }
573
574         while (!list_empty(&rq_list)) {
575                 rq = list_entry(rq_list.next, struct request, queuelist);
576                 list_del_init(&rq->queuelist);
577                 blk_mq_sched_insert_request(rq, false, false, false);
578         }
579
580         blk_mq_run_hw_queues(q, false);
581 }
582
583 void blk_mq_add_to_requeue_list(struct request *rq, bool at_head,
584                                 bool kick_requeue_list)
585 {
586         struct request_queue *q = rq->q;
587         unsigned long flags;
588
589         /*
590          * We abuse this flag that is otherwise used by the I/O scheduler to
591          * request head insertation from the workqueue.
592          */
593         BUG_ON(rq->rq_flags & RQF_SOFTBARRIER);
594
595         spin_lock_irqsave(&q->requeue_lock, flags);
596         if (at_head) {
597                 rq->rq_flags |= RQF_SOFTBARRIER;
598                 list_add(&rq->queuelist, &q->requeue_list);
599         } else {
600                 list_add_tail(&rq->queuelist, &q->requeue_list);
601         }
602         spin_unlock_irqrestore(&q->requeue_lock, flags);
603
604         if (kick_requeue_list)
605                 blk_mq_kick_requeue_list(q);
606 }
607 EXPORT_SYMBOL(blk_mq_add_to_requeue_list);
608
609 void blk_mq_kick_requeue_list(struct request_queue *q)
610 {
611         kblockd_schedule_delayed_work(&q->requeue_work, 0);
612 }
613 EXPORT_SYMBOL(blk_mq_kick_requeue_list);
614
615 void blk_mq_delay_kick_requeue_list(struct request_queue *q,
616                                     unsigned long msecs)
617 {
618         kblockd_schedule_delayed_work(&q->requeue_work,
619                                       msecs_to_jiffies(msecs));
620 }
621 EXPORT_SYMBOL(blk_mq_delay_kick_requeue_list);
622
623 void blk_mq_abort_requeue_list(struct request_queue *q)
624 {
625         unsigned long flags;
626         LIST_HEAD(rq_list);
627
628         spin_lock_irqsave(&q->requeue_lock, flags);
629         list_splice_init(&q->requeue_list, &rq_list);
630         spin_unlock_irqrestore(&q->requeue_lock, flags);
631
632         while (!list_empty(&rq_list)) {
633                 struct request *rq;
634
635                 rq = list_first_entry(&rq_list, struct request, queuelist);
636                 list_del_init(&rq->queuelist);
637                 rq->errors = -EIO;
638                 blk_mq_end_request(rq, rq->errors);
639         }
640 }
641 EXPORT_SYMBOL(blk_mq_abort_requeue_list);
642
643 struct request *blk_mq_tag_to_rq(struct blk_mq_tags *tags, unsigned int tag)
644 {
645         if (tag < tags->nr_tags) {
646                 prefetch(tags->rqs[tag]);
647                 return tags->rqs[tag];
648         }
649
650         return NULL;
651 }
652 EXPORT_SYMBOL(blk_mq_tag_to_rq);
653
654 struct blk_mq_timeout_data {
655         unsigned long next;
656         unsigned int next_set;
657 };
658
659 void blk_mq_rq_timed_out(struct request *req, bool reserved)
660 {
661         const struct blk_mq_ops *ops = req->q->mq_ops;
662         enum blk_eh_timer_return ret = BLK_EH_RESET_TIMER;
663
664         /*
665          * We know that complete is set at this point. If STARTED isn't set
666          * anymore, then the request isn't active and the "timeout" should
667          * just be ignored. This can happen due to the bitflag ordering.
668          * Timeout first checks if STARTED is set, and if it is, assumes
669          * the request is active. But if we race with completion, then
670          * we both flags will get cleared. So check here again, and ignore
671          * a timeout event with a request that isn't active.
672          */
673         if (!test_bit(REQ_ATOM_STARTED, &req->atomic_flags))
674                 return;
675
676         if (ops->timeout)
677                 ret = ops->timeout(req, reserved);
678
679         switch (ret) {
680         case BLK_EH_HANDLED:
681                 __blk_mq_complete_request(req);
682                 break;
683         case BLK_EH_RESET_TIMER:
684                 blk_add_timer(req);
685                 blk_clear_rq_complete(req);
686                 break;
687         case BLK_EH_NOT_HANDLED:
688                 break;
689         default:
690                 printk(KERN_ERR "block: bad eh return: %d\n", ret);
691                 break;
692         }
693 }
694
695 static void blk_mq_check_expired(struct blk_mq_hw_ctx *hctx,
696                 struct request *rq, void *priv, bool reserved)
697 {
698         struct blk_mq_timeout_data *data = priv;
699
700         if (!test_bit(REQ_ATOM_STARTED, &rq->atomic_flags)) {
701                 /*
702                  * If a request wasn't started before the queue was
703                  * marked dying, kill it here or it'll go unnoticed.
704                  */
705                 if (unlikely(blk_queue_dying(rq->q))) {
706                         rq->errors = -EIO;
707                         blk_mq_end_request(rq, rq->errors);
708                 }
709                 return;
710         }
711
712         if (time_after_eq(jiffies, rq->deadline)) {
713                 if (!blk_mark_rq_complete(rq))
714                         blk_mq_rq_timed_out(rq, reserved);
715         } else if (!data->next_set || time_after(data->next, rq->deadline)) {
716                 data->next = rq->deadline;
717                 data->next_set = 1;
718         }
719 }
720
721 static void blk_mq_timeout_work(struct work_struct *work)
722 {
723         struct request_queue *q =
724                 container_of(work, struct request_queue, timeout_work);
725         struct blk_mq_timeout_data data = {
726                 .next           = 0,
727                 .next_set       = 0,
728         };
729         int i;
730
731         /* A deadlock might occur if a request is stuck requiring a
732          * timeout at the same time a queue freeze is waiting
733          * completion, since the timeout code would not be able to
734          * acquire the queue reference here.
735          *
736          * That's why we don't use blk_queue_enter here; instead, we use
737          * percpu_ref_tryget directly, because we need to be able to
738          * obtain a reference even in the short window between the queue
739          * starting to freeze, by dropping the first reference in
740          * blk_mq_freeze_queue_start, and the moment the last request is
741          * consumed, marked by the instant q_usage_counter reaches
742          * zero.
743          */
744         if (!percpu_ref_tryget(&q->q_usage_counter))
745                 return;
746
747         blk_mq_queue_tag_busy_iter(q, blk_mq_check_expired, &data);
748
749         if (data.next_set) {
750                 data.next = blk_rq_timeout(round_jiffies_up(data.next));
751                 mod_timer(&q->timeout, data.next);
752         } else {
753                 struct blk_mq_hw_ctx *hctx;
754
755                 queue_for_each_hw_ctx(q, hctx, i) {
756                         /* the hctx may be unmapped, so check it here */
757                         if (blk_mq_hw_queue_mapped(hctx))
758                                 blk_mq_tag_idle(hctx);
759                 }
760         }
761         blk_queue_exit(q);
762 }
763
764 /*
765  * Reverse check our software queue for entries that we could potentially
766  * merge with. Currently includes a hand-wavy stop count of 8, to not spend
767  * too much time checking for merges.
768  */
769 static bool blk_mq_attempt_merge(struct request_queue *q,
770                                  struct blk_mq_ctx *ctx, struct bio *bio)
771 {
772         struct request *rq;
773         int checked = 8;
774
775         list_for_each_entry_reverse(rq, &ctx->rq_list, queuelist) {
776                 int el_ret;
777
778                 if (!checked--)
779                         break;
780
781                 if (!blk_rq_merge_ok(rq, bio))
782                         continue;
783
784                 el_ret = blk_try_merge(rq, bio);
785                 if (el_ret == ELEVATOR_NO_MERGE)
786                         continue;
787
788                 if (!blk_mq_sched_allow_merge(q, rq, bio))
789                         break;
790
791                 if (el_ret == ELEVATOR_BACK_MERGE) {
792                         if (bio_attempt_back_merge(q, rq, bio)) {
793                                 ctx->rq_merged++;
794                                 return true;
795                         }
796                         break;
797                 } else if (el_ret == ELEVATOR_FRONT_MERGE) {
798                         if (bio_attempt_front_merge(q, rq, bio)) {
799                                 ctx->rq_merged++;
800                                 return true;
801                         }
802                         break;
803                 }
804         }
805
806         return false;
807 }
808
809 struct flush_busy_ctx_data {
810         struct blk_mq_hw_ctx *hctx;
811         struct list_head *list;
812 };
813
814 static bool flush_busy_ctx(struct sbitmap *sb, unsigned int bitnr, void *data)
815 {
816         struct flush_busy_ctx_data *flush_data = data;
817         struct blk_mq_hw_ctx *hctx = flush_data->hctx;
818         struct blk_mq_ctx *ctx = hctx->ctxs[bitnr];
819
820         sbitmap_clear_bit(sb, bitnr);
821         spin_lock(&ctx->lock);
822         list_splice_tail_init(&ctx->rq_list, flush_data->list);
823         spin_unlock(&ctx->lock);
824         return true;
825 }
826
827 /*
828  * Process software queues that have been marked busy, splicing them
829  * to the for-dispatch
830  */
831 void blk_mq_flush_busy_ctxs(struct blk_mq_hw_ctx *hctx, struct list_head *list)
832 {
833         struct flush_busy_ctx_data data = {
834                 .hctx = hctx,
835                 .list = list,
836         };
837
838         sbitmap_for_each_set(&hctx->ctx_map, flush_busy_ctx, &data);
839 }
840 EXPORT_SYMBOL_GPL(blk_mq_flush_busy_ctxs);
841
842 static inline unsigned int queued_to_index(unsigned int queued)
843 {
844         if (!queued)
845                 return 0;
846
847         return min(BLK_MQ_MAX_DISPATCH_ORDER - 1, ilog2(queued) + 1);
848 }
849
850 static bool blk_mq_get_driver_tag(struct request *rq,
851                                   struct blk_mq_hw_ctx **hctx, bool wait)
852 {
853         struct blk_mq_alloc_data data = {
854                 .q = rq->q,
855                 .ctx = rq->mq_ctx,
856                 .hctx = blk_mq_map_queue(rq->q, rq->mq_ctx->cpu),
857                 .flags = wait ? 0 : BLK_MQ_REQ_NOWAIT,
858         };
859
860         if (blk_mq_hctx_stopped(data.hctx))
861                 return false;
862
863         if (rq->tag != -1) {
864 done:
865                 if (hctx)
866                         *hctx = data.hctx;
867                 return true;
868         }
869
870         rq->tag = blk_mq_get_tag(&data);
871         if (rq->tag >= 0) {
872                 data.hctx->tags->rqs[rq->tag] = rq;
873                 goto done;
874         }
875
876         return false;
877 }
878
879 /*
880  * If we fail getting a driver tag because all the driver tags are already
881  * assigned and on the dispatch list, BUT the first entry does not have a
882  * tag, then we could deadlock. For that case, move entries with assigned
883  * driver tags to the front, leaving the set of tagged requests in the
884  * same order, and the untagged set in the same order.
885  */
886 static bool reorder_tags_to_front(struct list_head *list)
887 {
888         struct request *rq, *tmp, *first = NULL;
889
890         list_for_each_entry_safe_reverse(rq, tmp, list, queuelist) {
891                 if (rq == first)
892                         break;
893                 if (rq->tag != -1) {
894                         list_move(&rq->queuelist, list);
895                         if (!first)
896                                 first = rq;
897                 }
898         }
899
900         return first != NULL;
901 }
902
903 bool blk_mq_dispatch_rq_list(struct blk_mq_hw_ctx *hctx, struct list_head *list)
904 {
905         struct request_queue *q = hctx->queue;
906         struct request *rq;
907         LIST_HEAD(driver_list);
908         struct list_head *dptr;
909         int queued, ret = BLK_MQ_RQ_QUEUE_OK;
910
911         /*
912          * Start off with dptr being NULL, so we start the first request
913          * immediately, even if we have more pending.
914          */
915         dptr = NULL;
916
917         /*
918          * Now process all the entries, sending them to the driver.
919          */
920         queued = 0;
921         while (!list_empty(list)) {
922                 struct blk_mq_queue_data bd;
923
924                 rq = list_first_entry(list, struct request, queuelist);
925                 if (!blk_mq_get_driver_tag(rq, &hctx, false)) {
926                         if (!queued && reorder_tags_to_front(list))
927                                 continue;
928                         blk_mq_sched_mark_restart(hctx);
929                         break;
930                 }
931                 list_del_init(&rq->queuelist);
932
933                 bd.rq = rq;
934                 bd.list = dptr;
935                 bd.last = list_empty(list);
936
937                 ret = q->mq_ops->queue_rq(hctx, &bd);
938                 switch (ret) {
939                 case BLK_MQ_RQ_QUEUE_OK:
940                         queued++;
941                         break;
942                 case BLK_MQ_RQ_QUEUE_BUSY:
943                         list_add(&rq->queuelist, list);
944                         __blk_mq_requeue_request(rq);
945                         break;
946                 default:
947                         pr_err("blk-mq: bad return on queue: %d\n", ret);
948                 case BLK_MQ_RQ_QUEUE_ERROR:
949                         rq->errors = -EIO;
950                         blk_mq_end_request(rq, rq->errors);
951                         break;
952                 }
953
954                 if (ret == BLK_MQ_RQ_QUEUE_BUSY)
955                         break;
956
957                 /*
958                  * We've done the first request. If we have more than 1
959                  * left in the list, set dptr to defer issue.
960                  */
961                 if (!dptr && list->next != list->prev)
962                         dptr = &driver_list;
963         }
964
965         hctx->dispatched[queued_to_index(queued)]++;
966
967         /*
968          * Any items that need requeuing? Stuff them into hctx->dispatch,
969          * that is where we will continue on next queue run.
970          */
971         if (!list_empty(list)) {
972                 spin_lock(&hctx->lock);
973                 list_splice(list, &hctx->dispatch);
974                 spin_unlock(&hctx->lock);
975
976                 /*
977                  * the queue is expected stopped with BLK_MQ_RQ_QUEUE_BUSY, but
978                  * it's possible the queue is stopped and restarted again
979                  * before this. Queue restart will dispatch requests. And since
980                  * requests in rq_list aren't added into hctx->dispatch yet,
981                  * the requests in rq_list might get lost.
982                  *
983                  * blk_mq_run_hw_queue() already checks the STOPPED bit
984                  *
985                  * If RESTART is set, then let completion restart the queue
986                  * instead of potentially looping here.
987                  */
988                 if (!blk_mq_sched_needs_restart(hctx))
989                         blk_mq_run_hw_queue(hctx, true);
990         }
991
992         return ret != BLK_MQ_RQ_QUEUE_BUSY;
993 }
994
995 static void __blk_mq_run_hw_queue(struct blk_mq_hw_ctx *hctx)
996 {
997         int srcu_idx;
998
999         WARN_ON(!cpumask_test_cpu(raw_smp_processor_id(), hctx->cpumask) &&
1000                 cpu_online(hctx->next_cpu));
1001
1002         if (!(hctx->flags & BLK_MQ_F_BLOCKING)) {
1003                 rcu_read_lock();
1004                 blk_mq_sched_dispatch_requests(hctx);
1005                 rcu_read_unlock();
1006         } else {
1007                 srcu_idx = srcu_read_lock(&hctx->queue_rq_srcu);
1008                 blk_mq_sched_dispatch_requests(hctx);
1009                 srcu_read_unlock(&hctx->queue_rq_srcu, srcu_idx);
1010         }
1011 }
1012
1013 /*
1014  * It'd be great if the workqueue API had a way to pass
1015  * in a mask and had some smarts for more clever placement.
1016  * For now we just round-robin here, switching for every
1017  * BLK_MQ_CPU_WORK_BATCH queued items.
1018  */
1019 static int blk_mq_hctx_next_cpu(struct blk_mq_hw_ctx *hctx)
1020 {
1021         if (hctx->queue->nr_hw_queues == 1)
1022                 return WORK_CPU_UNBOUND;
1023
1024         if (--hctx->next_cpu_batch <= 0) {
1025                 int next_cpu;
1026
1027                 next_cpu = cpumask_next(hctx->next_cpu, hctx->cpumask);
1028                 if (next_cpu >= nr_cpu_ids)
1029                         next_cpu = cpumask_first(hctx->cpumask);
1030
1031                 hctx->next_cpu = next_cpu;
1032                 hctx->next_cpu_batch = BLK_MQ_CPU_WORK_BATCH;
1033         }
1034
1035         return hctx->next_cpu;
1036 }
1037
1038 void blk_mq_run_hw_queue(struct blk_mq_hw_ctx *hctx, bool async)
1039 {
1040         if (unlikely(blk_mq_hctx_stopped(hctx) ||
1041                      !blk_mq_hw_queue_mapped(hctx)))
1042                 return;
1043
1044         if (!async && !(hctx->flags & BLK_MQ_F_BLOCKING)) {
1045                 int cpu = get_cpu();
1046                 if (cpumask_test_cpu(cpu, hctx->cpumask)) {
1047                         __blk_mq_run_hw_queue(hctx);
1048                         put_cpu();
1049                         return;
1050                 }
1051
1052                 put_cpu();
1053         }
1054
1055         kblockd_schedule_work_on(blk_mq_hctx_next_cpu(hctx), &hctx->run_work);
1056 }
1057
1058 void blk_mq_run_hw_queues(struct request_queue *q, bool async)
1059 {
1060         struct blk_mq_hw_ctx *hctx;
1061         int i;
1062
1063         queue_for_each_hw_ctx(q, hctx, i) {
1064                 if (!blk_mq_hctx_has_pending(hctx) ||
1065                     blk_mq_hctx_stopped(hctx))
1066                         continue;
1067
1068                 blk_mq_run_hw_queue(hctx, async);
1069         }
1070 }
1071 EXPORT_SYMBOL(blk_mq_run_hw_queues);
1072
1073 /**
1074  * blk_mq_queue_stopped() - check whether one or more hctxs have been stopped
1075  * @q: request queue.
1076  *
1077  * The caller is responsible for serializing this function against
1078  * blk_mq_{start,stop}_hw_queue().
1079  */
1080 bool blk_mq_queue_stopped(struct request_queue *q)
1081 {
1082         struct blk_mq_hw_ctx *hctx;
1083         int i;
1084
1085         queue_for_each_hw_ctx(q, hctx, i)
1086                 if (blk_mq_hctx_stopped(hctx))
1087                         return true;
1088
1089         return false;
1090 }
1091 EXPORT_SYMBOL(blk_mq_queue_stopped);
1092
1093 void blk_mq_stop_hw_queue(struct blk_mq_hw_ctx *hctx)
1094 {
1095         cancel_work(&hctx->run_work);
1096         cancel_delayed_work(&hctx->delay_work);
1097         set_bit(BLK_MQ_S_STOPPED, &hctx->state);
1098 }
1099 EXPORT_SYMBOL(blk_mq_stop_hw_queue);
1100
1101 void blk_mq_stop_hw_queues(struct request_queue *q)
1102 {
1103         struct blk_mq_hw_ctx *hctx;
1104         int i;
1105
1106         queue_for_each_hw_ctx(q, hctx, i)
1107                 blk_mq_stop_hw_queue(hctx);
1108 }
1109 EXPORT_SYMBOL(blk_mq_stop_hw_queues);
1110
1111 void blk_mq_start_hw_queue(struct blk_mq_hw_ctx *hctx)
1112 {
1113         clear_bit(BLK_MQ_S_STOPPED, &hctx->state);
1114
1115         blk_mq_run_hw_queue(hctx, false);
1116 }
1117 EXPORT_SYMBOL(blk_mq_start_hw_queue);
1118
1119 void blk_mq_start_hw_queues(struct request_queue *q)
1120 {
1121         struct blk_mq_hw_ctx *hctx;
1122         int i;
1123
1124         queue_for_each_hw_ctx(q, hctx, i)
1125                 blk_mq_start_hw_queue(hctx);
1126 }
1127 EXPORT_SYMBOL(blk_mq_start_hw_queues);
1128
1129 void blk_mq_start_stopped_hw_queue(struct blk_mq_hw_ctx *hctx, bool async)
1130 {
1131         if (!blk_mq_hctx_stopped(hctx))
1132                 return;
1133
1134         clear_bit(BLK_MQ_S_STOPPED, &hctx->state);
1135         blk_mq_run_hw_queue(hctx, async);
1136 }
1137 EXPORT_SYMBOL_GPL(blk_mq_start_stopped_hw_queue);
1138
1139 void blk_mq_start_stopped_hw_queues(struct request_queue *q, bool async)
1140 {
1141         struct blk_mq_hw_ctx *hctx;
1142         int i;
1143
1144         queue_for_each_hw_ctx(q, hctx, i)
1145                 blk_mq_start_stopped_hw_queue(hctx, async);
1146 }
1147 EXPORT_SYMBOL(blk_mq_start_stopped_hw_queues);
1148
1149 static void blk_mq_run_work_fn(struct work_struct *work)
1150 {
1151         struct blk_mq_hw_ctx *hctx;
1152
1153         hctx = container_of(work, struct blk_mq_hw_ctx, run_work);
1154
1155         __blk_mq_run_hw_queue(hctx);
1156 }
1157
1158 static void blk_mq_delay_work_fn(struct work_struct *work)
1159 {
1160         struct blk_mq_hw_ctx *hctx;
1161
1162         hctx = container_of(work, struct blk_mq_hw_ctx, delay_work.work);
1163
1164         if (test_and_clear_bit(BLK_MQ_S_STOPPED, &hctx->state))
1165                 __blk_mq_run_hw_queue(hctx);
1166 }
1167
1168 void blk_mq_delay_queue(struct blk_mq_hw_ctx *hctx, unsigned long msecs)
1169 {
1170         if (unlikely(!blk_mq_hw_queue_mapped(hctx)))
1171                 return;
1172
1173         kblockd_schedule_delayed_work_on(blk_mq_hctx_next_cpu(hctx),
1174                         &hctx->delay_work, msecs_to_jiffies(msecs));
1175 }
1176 EXPORT_SYMBOL(blk_mq_delay_queue);
1177
1178 static inline void __blk_mq_insert_req_list(struct blk_mq_hw_ctx *hctx,
1179                                             struct request *rq,
1180                                             bool at_head)
1181 {
1182         struct blk_mq_ctx *ctx = rq->mq_ctx;
1183
1184         trace_block_rq_insert(hctx->queue, rq);
1185
1186         if (at_head)
1187                 list_add(&rq->queuelist, &ctx->rq_list);
1188         else
1189                 list_add_tail(&rq->queuelist, &ctx->rq_list);
1190 }
1191
1192 void __blk_mq_insert_request(struct blk_mq_hw_ctx *hctx, struct request *rq,
1193                              bool at_head)
1194 {
1195         struct blk_mq_ctx *ctx = rq->mq_ctx;
1196
1197         __blk_mq_insert_req_list(hctx, rq, at_head);
1198         blk_mq_hctx_mark_pending(hctx, ctx);
1199 }
1200
1201 void blk_mq_insert_requests(struct blk_mq_hw_ctx *hctx, struct blk_mq_ctx *ctx,
1202                             struct list_head *list)
1203
1204 {
1205         /*
1206          * preemption doesn't flush plug list, so it's possible ctx->cpu is
1207          * offline now
1208          */
1209         spin_lock(&ctx->lock);
1210         while (!list_empty(list)) {
1211                 struct request *rq;
1212
1213                 rq = list_first_entry(list, struct request, queuelist);
1214                 BUG_ON(rq->mq_ctx != ctx);
1215                 list_del_init(&rq->queuelist);
1216                 __blk_mq_insert_req_list(hctx, rq, false);
1217         }
1218         blk_mq_hctx_mark_pending(hctx, ctx);
1219         spin_unlock(&ctx->lock);
1220 }
1221
1222 static int plug_ctx_cmp(void *priv, struct list_head *a, struct list_head *b)
1223 {
1224         struct request *rqa = container_of(a, struct request, queuelist);
1225         struct request *rqb = container_of(b, struct request, queuelist);
1226
1227         return !(rqa->mq_ctx < rqb->mq_ctx ||
1228                  (rqa->mq_ctx == rqb->mq_ctx &&
1229                   blk_rq_pos(rqa) < blk_rq_pos(rqb)));
1230 }
1231
1232 void blk_mq_flush_plug_list(struct blk_plug *plug, bool from_schedule)
1233 {
1234         struct blk_mq_ctx *this_ctx;
1235         struct request_queue *this_q;
1236         struct request *rq;
1237         LIST_HEAD(list);
1238         LIST_HEAD(ctx_list);
1239         unsigned int depth;
1240
1241         list_splice_init(&plug->mq_list, &list);
1242
1243         list_sort(NULL, &list, plug_ctx_cmp);
1244
1245         this_q = NULL;
1246         this_ctx = NULL;
1247         depth = 0;
1248
1249         while (!list_empty(&list)) {
1250                 rq = list_entry_rq(list.next);
1251                 list_del_init(&rq->queuelist);
1252                 BUG_ON(!rq->q);
1253                 if (rq->mq_ctx != this_ctx) {
1254                         if (this_ctx) {
1255                                 trace_block_unplug(this_q, depth, from_schedule);
1256                                 blk_mq_sched_insert_requests(this_q, this_ctx,
1257                                                                 &ctx_list,
1258                                                                 from_schedule);
1259                         }
1260
1261                         this_ctx = rq->mq_ctx;
1262                         this_q = rq->q;
1263                         depth = 0;
1264                 }
1265
1266                 depth++;
1267                 list_add_tail(&rq->queuelist, &ctx_list);
1268         }
1269
1270         /*
1271          * If 'this_ctx' is set, we know we have entries to complete
1272          * on 'ctx_list'. Do those.
1273          */
1274         if (this_ctx) {
1275                 trace_block_unplug(this_q, depth, from_schedule);
1276                 blk_mq_sched_insert_requests(this_q, this_ctx, &ctx_list,
1277                                                 from_schedule);
1278         }
1279 }
1280
1281 static void blk_mq_bio_to_request(struct request *rq, struct bio *bio)
1282 {
1283         init_request_from_bio(rq, bio);
1284
1285         blk_account_io_start(rq, true);
1286 }
1287
1288 static inline bool hctx_allow_merges(struct blk_mq_hw_ctx *hctx)
1289 {
1290         return (hctx->flags & BLK_MQ_F_SHOULD_MERGE) &&
1291                 !blk_queue_nomerges(hctx->queue);
1292 }
1293
1294 static inline bool blk_mq_merge_queue_io(struct blk_mq_hw_ctx *hctx,
1295                                          struct blk_mq_ctx *ctx,
1296                                          struct request *rq, struct bio *bio)
1297 {
1298         if (!hctx_allow_merges(hctx) || !bio_mergeable(bio)) {
1299                 blk_mq_bio_to_request(rq, bio);
1300                 spin_lock(&ctx->lock);
1301 insert_rq:
1302                 __blk_mq_insert_request(hctx, rq, false);
1303                 spin_unlock(&ctx->lock);
1304                 return false;
1305         } else {
1306                 struct request_queue *q = hctx->queue;
1307
1308                 spin_lock(&ctx->lock);
1309                 if (!blk_mq_attempt_merge(q, ctx, bio)) {
1310                         blk_mq_bio_to_request(rq, bio);
1311                         goto insert_rq;
1312                 }
1313
1314                 spin_unlock(&ctx->lock);
1315                 __blk_mq_finish_request(hctx, ctx, rq);
1316                 return true;
1317         }
1318 }
1319
1320 static blk_qc_t request_to_qc_t(struct blk_mq_hw_ctx *hctx, struct request *rq)
1321 {
1322         if (rq->tag != -1)
1323                 return blk_tag_to_qc_t(rq->tag, hctx->queue_num, false);
1324
1325         return blk_tag_to_qc_t(rq->internal_tag, hctx->queue_num, true);
1326 }
1327
1328 static void blk_mq_try_issue_directly(struct request *rq, blk_qc_t *cookie)
1329 {
1330         struct request_queue *q = rq->q;
1331         struct blk_mq_queue_data bd = {
1332                 .rq = rq,
1333                 .list = NULL,
1334                 .last = 1
1335         };
1336         struct blk_mq_hw_ctx *hctx;
1337         blk_qc_t new_cookie;
1338         int ret;
1339
1340         if (q->elevator)
1341                 goto insert;
1342
1343         if (!blk_mq_get_driver_tag(rq, &hctx, false))
1344                 goto insert;
1345
1346         new_cookie = request_to_qc_t(hctx, rq);
1347
1348         /*
1349          * For OK queue, we are done. For error, kill it. Any other
1350          * error (busy), just add it to our list as we previously
1351          * would have done
1352          */
1353         ret = q->mq_ops->queue_rq(hctx, &bd);
1354         if (ret == BLK_MQ_RQ_QUEUE_OK) {
1355                 *cookie = new_cookie;
1356                 return;
1357         }
1358
1359         __blk_mq_requeue_request(rq);
1360
1361         if (ret == BLK_MQ_RQ_QUEUE_ERROR) {
1362                 *cookie = BLK_QC_T_NONE;
1363                 rq->errors = -EIO;
1364                 blk_mq_end_request(rq, rq->errors);
1365                 return;
1366         }
1367
1368 insert:
1369         blk_mq_sched_insert_request(rq, false, true, true);
1370 }
1371
1372 /*
1373  * Multiple hardware queue variant. This will not use per-process plugs,
1374  * but will attempt to bypass the hctx queueing if we can go straight to
1375  * hardware for SYNC IO.
1376  */
1377 static blk_qc_t blk_mq_make_request(struct request_queue *q, struct bio *bio)
1378 {
1379         const int is_sync = op_is_sync(bio->bi_opf);
1380         const int is_flush_fua = bio->bi_opf & (REQ_PREFLUSH | REQ_FUA);
1381         struct blk_mq_alloc_data data;
1382         struct request *rq;
1383         unsigned int request_count = 0, srcu_idx;
1384         struct blk_plug *plug;
1385         struct request *same_queue_rq = NULL;
1386         blk_qc_t cookie;
1387         unsigned int wb_acct;
1388
1389         blk_queue_bounce(q, &bio);
1390
1391         if (bio_integrity_enabled(bio) && bio_integrity_prep(bio)) {
1392                 bio_io_error(bio);
1393                 return BLK_QC_T_NONE;
1394         }
1395
1396         blk_queue_split(q, &bio, q->bio_split);
1397
1398         if (!is_flush_fua && !blk_queue_nomerges(q) &&
1399             blk_attempt_plug_merge(q, bio, &request_count, &same_queue_rq))
1400                 return BLK_QC_T_NONE;
1401
1402         if (blk_mq_sched_bio_merge(q, bio))
1403                 return BLK_QC_T_NONE;
1404
1405         wb_acct = wbt_wait(q->rq_wb, bio, NULL);
1406
1407         trace_block_getrq(q, bio, bio->bi_opf);
1408
1409         rq = blk_mq_sched_get_request(q, bio, bio->bi_opf, &data);
1410         if (unlikely(!rq)) {
1411                 __wbt_done(q->rq_wb, wb_acct);
1412                 return BLK_QC_T_NONE;
1413         }
1414
1415         wbt_track(&rq->issue_stat, wb_acct);
1416
1417         cookie = request_to_qc_t(data.hctx, rq);
1418
1419         if (unlikely(is_flush_fua)) {
1420                 blk_mq_bio_to_request(rq, bio);
1421                 blk_mq_get_driver_tag(rq, NULL, true);
1422                 blk_insert_flush(rq);
1423                 goto run_queue;
1424         }
1425
1426         plug = current->plug;
1427         /*
1428          * If the driver supports defer issued based on 'last', then
1429          * queue it up like normal since we can potentially save some
1430          * CPU this way.
1431          */
1432         if (((plug && !blk_queue_nomerges(q)) || is_sync) &&
1433             !(data.hctx->flags & BLK_MQ_F_DEFER_ISSUE)) {
1434                 struct request *old_rq = NULL;
1435
1436                 blk_mq_bio_to_request(rq, bio);
1437
1438                 /*
1439                  * We do limited plugging. If the bio can be merged, do that.
1440                  * Otherwise the existing request in the plug list will be
1441                  * issued. So the plug list will have one request at most
1442                  */
1443                 if (plug) {
1444                         /*
1445                          * The plug list might get flushed before this. If that
1446                          * happens, same_queue_rq is invalid and plug list is
1447                          * empty
1448                          */
1449                         if (same_queue_rq && !list_empty(&plug->mq_list)) {
1450                                 old_rq = same_queue_rq;
1451                                 list_del_init(&old_rq->queuelist);
1452                         }
1453                         list_add_tail(&rq->queuelist, &plug->mq_list);
1454                 } else /* is_sync */
1455                         old_rq = rq;
1456                 blk_mq_put_ctx(data.ctx);
1457                 if (!old_rq)
1458                         goto done;
1459
1460                 if (!(data.hctx->flags & BLK_MQ_F_BLOCKING)) {
1461                         rcu_read_lock();
1462                         blk_mq_try_issue_directly(old_rq, &cookie);
1463                         rcu_read_unlock();
1464                 } else {
1465                         srcu_idx = srcu_read_lock(&data.hctx->queue_rq_srcu);
1466                         blk_mq_try_issue_directly(old_rq, &cookie);
1467                         srcu_read_unlock(&data.hctx->queue_rq_srcu, srcu_idx);
1468                 }
1469                 goto done;
1470         }
1471
1472         if (q->elevator) {
1473                 blk_mq_put_ctx(data.ctx);
1474                 blk_mq_bio_to_request(rq, bio);
1475                 blk_mq_sched_insert_request(rq, false, true, true);
1476                 goto done;
1477         }
1478         if (!blk_mq_merge_queue_io(data.hctx, data.ctx, rq, bio)) {
1479                 /*
1480                  * For a SYNC request, send it to the hardware immediately. For
1481                  * an ASYNC request, just ensure that we run it later on. The
1482                  * latter allows for merging opportunities and more efficient
1483                  * dispatching.
1484                  */
1485 run_queue:
1486                 blk_mq_run_hw_queue(data.hctx, !is_sync || is_flush_fua);
1487         }
1488         blk_mq_put_ctx(data.ctx);
1489 done:
1490         return cookie;
1491 }
1492
1493 /*
1494  * Single hardware queue variant. This will attempt to use any per-process
1495  * plug for merging and IO deferral.
1496  */
1497 static blk_qc_t blk_sq_make_request(struct request_queue *q, struct bio *bio)
1498 {
1499         const int is_sync = op_is_sync(bio->bi_opf);
1500         const int is_flush_fua = bio->bi_opf & (REQ_PREFLUSH | REQ_FUA);
1501         struct blk_plug *plug;
1502         unsigned int request_count = 0;
1503         struct blk_mq_alloc_data data;
1504         struct request *rq;
1505         blk_qc_t cookie;
1506         unsigned int wb_acct;
1507
1508         blk_queue_bounce(q, &bio);
1509
1510         if (bio_integrity_enabled(bio) && bio_integrity_prep(bio)) {
1511                 bio_io_error(bio);
1512                 return BLK_QC_T_NONE;
1513         }
1514
1515         blk_queue_split(q, &bio, q->bio_split);
1516
1517         if (!is_flush_fua && !blk_queue_nomerges(q)) {
1518                 if (blk_attempt_plug_merge(q, bio, &request_count, NULL))
1519                         return BLK_QC_T_NONE;
1520         } else
1521                 request_count = blk_plug_queued_count(q);
1522
1523         if (blk_mq_sched_bio_merge(q, bio))
1524                 return BLK_QC_T_NONE;
1525
1526         wb_acct = wbt_wait(q->rq_wb, bio, NULL);
1527
1528         trace_block_getrq(q, bio, bio->bi_opf);
1529
1530         rq = blk_mq_sched_get_request(q, bio, bio->bi_opf, &data);
1531         if (unlikely(!rq)) {
1532                 __wbt_done(q->rq_wb, wb_acct);
1533                 return BLK_QC_T_NONE;
1534         }
1535
1536         wbt_track(&rq->issue_stat, wb_acct);
1537
1538         cookie = request_to_qc_t(data.hctx, rq);
1539
1540         if (unlikely(is_flush_fua)) {
1541                 blk_mq_bio_to_request(rq, bio);
1542                 blk_mq_get_driver_tag(rq, NULL, true);
1543                 blk_insert_flush(rq);
1544                 goto run_queue;
1545         }
1546
1547         /*
1548          * A task plug currently exists. Since this is completely lockless,
1549          * utilize that to temporarily store requests until the task is
1550          * either done or scheduled away.
1551          */
1552         plug = current->plug;
1553         if (plug) {
1554                 struct request *last = NULL;
1555
1556                 blk_mq_bio_to_request(rq, bio);
1557
1558                 /*
1559                  * @request_count may become stale because of schedule
1560                  * out, so check the list again.
1561                  */
1562                 if (list_empty(&plug->mq_list))
1563                         request_count = 0;
1564                 if (!request_count)
1565                         trace_block_plug(q);
1566                 else
1567                         last = list_entry_rq(plug->mq_list.prev);
1568
1569                 blk_mq_put_ctx(data.ctx);
1570
1571                 if (request_count >= BLK_MAX_REQUEST_COUNT || (last &&
1572                     blk_rq_bytes(last) >= BLK_PLUG_FLUSH_SIZE)) {
1573                         blk_flush_plug_list(plug, false);
1574                         trace_block_plug(q);
1575                 }
1576
1577                 list_add_tail(&rq->queuelist, &plug->mq_list);
1578                 return cookie;
1579         }
1580
1581         if (q->elevator) {
1582                 blk_mq_put_ctx(data.ctx);
1583                 blk_mq_bio_to_request(rq, bio);
1584                 blk_mq_sched_insert_request(rq, false, true, true);
1585                 goto done;
1586         }
1587         if (!blk_mq_merge_queue_io(data.hctx, data.ctx, rq, bio)) {
1588                 /*
1589                  * For a SYNC request, send it to the hardware immediately. For
1590                  * an ASYNC request, just ensure that we run it later on. The
1591                  * latter allows for merging opportunities and more efficient
1592                  * dispatching.
1593                  */
1594 run_queue:
1595                 blk_mq_run_hw_queue(data.hctx, !is_sync || is_flush_fua);
1596         }
1597
1598         blk_mq_put_ctx(data.ctx);
1599 done:
1600         return cookie;
1601 }
1602
1603 void blk_mq_free_rqs(struct blk_mq_tag_set *set, struct blk_mq_tags *tags,
1604                      unsigned int hctx_idx)
1605 {
1606         struct page *page;
1607
1608         if (tags->rqs && set->ops->exit_request) {
1609                 int i;
1610
1611                 for (i = 0; i < tags->nr_tags; i++) {
1612                         struct request *rq = tags->static_rqs[i];
1613
1614                         if (!rq)
1615                                 continue;
1616                         set->ops->exit_request(set->driver_data, rq,
1617                                                 hctx_idx, i);
1618                         tags->static_rqs[i] = NULL;
1619                 }
1620         }
1621
1622         while (!list_empty(&tags->page_list)) {
1623                 page = list_first_entry(&tags->page_list, struct page, lru);
1624                 list_del_init(&page->lru);
1625                 /*
1626                  * Remove kmemleak object previously allocated in
1627                  * blk_mq_init_rq_map().
1628                  */
1629                 kmemleak_free(page_address(page));
1630                 __free_pages(page, page->private);
1631         }
1632 }
1633
1634 void blk_mq_free_rq_map(struct blk_mq_tags *tags)
1635 {
1636         kfree(tags->rqs);
1637         tags->rqs = NULL;
1638         kfree(tags->static_rqs);
1639         tags->static_rqs = NULL;
1640
1641         blk_mq_free_tags(tags);
1642 }
1643
1644 struct blk_mq_tags *blk_mq_alloc_rq_map(struct blk_mq_tag_set *set,
1645                                         unsigned int hctx_idx,
1646                                         unsigned int nr_tags,
1647                                         unsigned int reserved_tags)
1648 {
1649         struct blk_mq_tags *tags;
1650
1651         tags = blk_mq_init_tags(nr_tags, reserved_tags,
1652                                 set->numa_node,
1653                                 BLK_MQ_FLAG_TO_ALLOC_POLICY(set->flags));
1654         if (!tags)
1655                 return NULL;
1656
1657         tags->rqs = kzalloc_node(nr_tags * sizeof(struct request *),
1658                                  GFP_NOIO | __GFP_NOWARN | __GFP_NORETRY,
1659                                  set->numa_node);
1660         if (!tags->rqs) {
1661                 blk_mq_free_tags(tags);
1662                 return NULL;
1663         }
1664
1665         tags->static_rqs = kzalloc_node(nr_tags * sizeof(struct request *),
1666                                  GFP_NOIO | __GFP_NOWARN | __GFP_NORETRY,
1667                                  set->numa_node);
1668         if (!tags->static_rqs) {
1669                 kfree(tags->rqs);
1670                 blk_mq_free_tags(tags);
1671                 return NULL;
1672         }
1673
1674         return tags;
1675 }
1676
1677 static size_t order_to_size(unsigned int order)
1678 {
1679         return (size_t)PAGE_SIZE << order;
1680 }
1681
1682 int blk_mq_alloc_rqs(struct blk_mq_tag_set *set, struct blk_mq_tags *tags,
1683                      unsigned int hctx_idx, unsigned int depth)
1684 {
1685         unsigned int i, j, entries_per_page, max_order = 4;
1686         size_t rq_size, left;
1687
1688         INIT_LIST_HEAD(&tags->page_list);
1689
1690         /*
1691          * rq_size is the size of the request plus driver payload, rounded
1692          * to the cacheline size
1693          */
1694         rq_size = round_up(sizeof(struct request) + set->cmd_size,
1695                                 cache_line_size());
1696         left = rq_size * depth;
1697
1698         for (i = 0; i < depth; ) {
1699                 int this_order = max_order;
1700                 struct page *page;
1701                 int to_do;
1702                 void *p;
1703
1704                 while (this_order && left < order_to_size(this_order - 1))
1705                         this_order--;
1706
1707                 do {
1708                         page = alloc_pages_node(set->numa_node,
1709                                 GFP_NOIO | __GFP_NOWARN | __GFP_NORETRY | __GFP_ZERO,
1710                                 this_order);
1711                         if (page)
1712                                 break;
1713                         if (!this_order--)
1714                                 break;
1715                         if (order_to_size(this_order) < rq_size)
1716                                 break;
1717                 } while (1);
1718
1719                 if (!page)
1720                         goto fail;
1721
1722                 page->private = this_order;
1723                 list_add_tail(&page->lru, &tags->page_list);
1724
1725                 p = page_address(page);
1726                 /*
1727                  * Allow kmemleak to scan these pages as they contain pointers
1728                  * to additional allocations like via ops->init_request().
1729                  */
1730                 kmemleak_alloc(p, order_to_size(this_order), 1, GFP_NOIO);
1731                 entries_per_page = order_to_size(this_order) / rq_size;
1732                 to_do = min(entries_per_page, depth - i);
1733                 left -= to_do * rq_size;
1734                 for (j = 0; j < to_do; j++) {
1735                         struct request *rq = p;
1736
1737                         tags->static_rqs[i] = rq;
1738                         if (set->ops->init_request) {
1739                                 if (set->ops->init_request(set->driver_data,
1740                                                 rq, hctx_idx, i,
1741                                                 set->numa_node)) {
1742                                         tags->static_rqs[i] = NULL;
1743                                         goto fail;
1744                                 }
1745                         }
1746
1747                         p += rq_size;
1748                         i++;
1749                 }
1750         }
1751         return 0;
1752
1753 fail:
1754         blk_mq_free_rqs(set, tags, hctx_idx);
1755         return -ENOMEM;
1756 }
1757
1758 /*
1759  * 'cpu' is going away. splice any existing rq_list entries from this
1760  * software queue to the hw queue dispatch list, and ensure that it
1761  * gets run.
1762  */
1763 static int blk_mq_hctx_notify_dead(unsigned int cpu, struct hlist_node *node)
1764 {
1765         struct blk_mq_hw_ctx *hctx;
1766         struct blk_mq_ctx *ctx;
1767         LIST_HEAD(tmp);
1768
1769         hctx = hlist_entry_safe(node, struct blk_mq_hw_ctx, cpuhp_dead);
1770         ctx = __blk_mq_get_ctx(hctx->queue, cpu);
1771
1772         spin_lock(&ctx->lock);
1773         if (!list_empty(&ctx->rq_list)) {
1774                 list_splice_init(&ctx->rq_list, &tmp);
1775                 blk_mq_hctx_clear_pending(hctx, ctx);
1776         }
1777         spin_unlock(&ctx->lock);
1778
1779         if (list_empty(&tmp))
1780                 return 0;
1781
1782         spin_lock(&hctx->lock);
1783         list_splice_tail_init(&tmp, &hctx->dispatch);
1784         spin_unlock(&hctx->lock);
1785
1786         blk_mq_run_hw_queue(hctx, true);
1787         return 0;
1788 }
1789
1790 static void blk_mq_remove_cpuhp(struct blk_mq_hw_ctx *hctx)
1791 {
1792         cpuhp_state_remove_instance_nocalls(CPUHP_BLK_MQ_DEAD,
1793                                             &hctx->cpuhp_dead);
1794 }
1795
1796 /* hctx->ctxs will be freed in queue's release handler */
1797 static void blk_mq_exit_hctx(struct request_queue *q,
1798                 struct blk_mq_tag_set *set,
1799                 struct blk_mq_hw_ctx *hctx, unsigned int hctx_idx)
1800 {
1801         unsigned flush_start_tag = set->queue_depth;
1802
1803         blk_mq_tag_idle(hctx);
1804
1805         if (set->ops->exit_request)
1806                 set->ops->exit_request(set->driver_data,
1807                                        hctx->fq->flush_rq, hctx_idx,
1808                                        flush_start_tag + hctx_idx);
1809
1810         if (set->ops->exit_hctx)
1811                 set->ops->exit_hctx(hctx, hctx_idx);
1812
1813         if (hctx->flags & BLK_MQ_F_BLOCKING)
1814                 cleanup_srcu_struct(&hctx->queue_rq_srcu);
1815
1816         blk_mq_remove_cpuhp(hctx);
1817         blk_free_flush_queue(hctx->fq);
1818         sbitmap_free(&hctx->ctx_map);
1819 }
1820
1821 static void blk_mq_exit_hw_queues(struct request_queue *q,
1822                 struct blk_mq_tag_set *set, int nr_queue)
1823 {
1824         struct blk_mq_hw_ctx *hctx;
1825         unsigned int i;
1826
1827         queue_for_each_hw_ctx(q, hctx, i) {
1828                 if (i == nr_queue)
1829                         break;
1830                 blk_mq_exit_hctx(q, set, hctx, i);
1831         }
1832 }
1833
1834 static void blk_mq_free_hw_queues(struct request_queue *q,
1835                 struct blk_mq_tag_set *set)
1836 {
1837         struct blk_mq_hw_ctx *hctx;
1838         unsigned int i;
1839
1840         queue_for_each_hw_ctx(q, hctx, i)
1841                 free_cpumask_var(hctx->cpumask);
1842 }
1843
1844 static int blk_mq_init_hctx(struct request_queue *q,
1845                 struct blk_mq_tag_set *set,
1846                 struct blk_mq_hw_ctx *hctx, unsigned hctx_idx)
1847 {
1848         int node;
1849         unsigned flush_start_tag = set->queue_depth;
1850
1851         node = hctx->numa_node;
1852         if (node == NUMA_NO_NODE)
1853                 node = hctx->numa_node = set->numa_node;
1854
1855         INIT_WORK(&hctx->run_work, blk_mq_run_work_fn);
1856         INIT_DELAYED_WORK(&hctx->delay_work, blk_mq_delay_work_fn);
1857         spin_lock_init(&hctx->lock);
1858         INIT_LIST_HEAD(&hctx->dispatch);
1859         hctx->queue = q;
1860         hctx->queue_num = hctx_idx;
1861         hctx->flags = set->flags & ~BLK_MQ_F_TAG_SHARED;
1862
1863         cpuhp_state_add_instance_nocalls(CPUHP_BLK_MQ_DEAD, &hctx->cpuhp_dead);
1864
1865         hctx->tags = set->tags[hctx_idx];
1866
1867         /*
1868          * Allocate space for all possible cpus to avoid allocation at
1869          * runtime
1870          */
1871         hctx->ctxs = kmalloc_node(nr_cpu_ids * sizeof(void *),
1872                                         GFP_KERNEL, node);
1873         if (!hctx->ctxs)
1874                 goto unregister_cpu_notifier;
1875
1876         if (sbitmap_init_node(&hctx->ctx_map, nr_cpu_ids, ilog2(8), GFP_KERNEL,
1877                               node))
1878                 goto free_ctxs;
1879
1880         hctx->nr_ctx = 0;
1881
1882         if (set->ops->init_hctx &&
1883             set->ops->init_hctx(hctx, set->driver_data, hctx_idx))
1884                 goto free_bitmap;
1885
1886         hctx->fq = blk_alloc_flush_queue(q, hctx->numa_node, set->cmd_size);
1887         if (!hctx->fq)
1888                 goto exit_hctx;
1889
1890         if (set->ops->init_request &&
1891             set->ops->init_request(set->driver_data,
1892                                    hctx->fq->flush_rq, hctx_idx,
1893                                    flush_start_tag + hctx_idx, node))
1894                 goto free_fq;
1895
1896         if (hctx->flags & BLK_MQ_F_BLOCKING)
1897                 init_srcu_struct(&hctx->queue_rq_srcu);
1898
1899         return 0;
1900
1901  free_fq:
1902         kfree(hctx->fq);
1903  exit_hctx:
1904         if (set->ops->exit_hctx)
1905                 set->ops->exit_hctx(hctx, hctx_idx);
1906  free_bitmap:
1907         sbitmap_free(&hctx->ctx_map);
1908  free_ctxs:
1909         kfree(hctx->ctxs);
1910  unregister_cpu_notifier:
1911         blk_mq_remove_cpuhp(hctx);
1912         return -1;
1913 }
1914
1915 static void blk_mq_init_cpu_queues(struct request_queue *q,
1916                                    unsigned int nr_hw_queues)
1917 {
1918         unsigned int i;
1919
1920         for_each_possible_cpu(i) {
1921                 struct blk_mq_ctx *__ctx = per_cpu_ptr(q->queue_ctx, i);
1922                 struct blk_mq_hw_ctx *hctx;
1923
1924                 memset(__ctx, 0, sizeof(*__ctx));
1925                 __ctx->cpu = i;
1926                 spin_lock_init(&__ctx->lock);
1927                 INIT_LIST_HEAD(&__ctx->rq_list);
1928                 __ctx->queue = q;
1929                 blk_stat_init(&__ctx->stat[BLK_STAT_READ]);
1930                 blk_stat_init(&__ctx->stat[BLK_STAT_WRITE]);
1931
1932                 /* If the cpu isn't online, the cpu is mapped to first hctx */
1933                 if (!cpu_online(i))
1934                         continue;
1935
1936                 hctx = blk_mq_map_queue(q, i);
1937
1938                 /*
1939                  * Set local node, IFF we have more than one hw queue. If
1940                  * not, we remain on the home node of the device
1941                  */
1942                 if (nr_hw_queues > 1 && hctx->numa_node == NUMA_NO_NODE)
1943                         hctx->numa_node = local_memory_node(cpu_to_node(i));
1944         }
1945 }
1946
1947 static bool __blk_mq_alloc_rq_map(struct blk_mq_tag_set *set, int hctx_idx)
1948 {
1949         int ret = 0;
1950
1951         set->tags[hctx_idx] = blk_mq_alloc_rq_map(set, hctx_idx,
1952                                         set->queue_depth, set->reserved_tags);
1953         if (!set->tags[hctx_idx])
1954                 return false;
1955
1956         ret = blk_mq_alloc_rqs(set, set->tags[hctx_idx], hctx_idx,
1957                                 set->queue_depth);
1958         if (!ret)
1959                 return true;
1960
1961         blk_mq_free_rq_map(set->tags[hctx_idx]);
1962         set->tags[hctx_idx] = NULL;
1963         return false;
1964 }
1965
1966 static void blk_mq_free_map_and_requests(struct blk_mq_tag_set *set,
1967                                          unsigned int hctx_idx)
1968 {
1969         if (set->tags[hctx_idx]) {
1970                 blk_mq_free_rqs(set, set->tags[hctx_idx], hctx_idx);
1971                 blk_mq_free_rq_map(set->tags[hctx_idx]);
1972                 set->tags[hctx_idx] = NULL;
1973         }
1974 }
1975
1976 static void blk_mq_map_swqueue(struct request_queue *q,
1977                                const struct cpumask *online_mask)
1978 {
1979         unsigned int i, hctx_idx;
1980         struct blk_mq_hw_ctx *hctx;
1981         struct blk_mq_ctx *ctx;
1982         struct blk_mq_tag_set *set = q->tag_set;
1983
1984         /*
1985          * Avoid others reading imcomplete hctx->cpumask through sysfs
1986          */
1987         mutex_lock(&q->sysfs_lock);
1988
1989         queue_for_each_hw_ctx(q, hctx, i) {
1990                 cpumask_clear(hctx->cpumask);
1991                 hctx->nr_ctx = 0;
1992         }
1993
1994         /*
1995          * Map software to hardware queues
1996          */
1997         for_each_possible_cpu(i) {
1998                 /* If the cpu isn't online, the cpu is mapped to first hctx */
1999                 if (!cpumask_test_cpu(i, online_mask))
2000                         continue;
2001
2002                 hctx_idx = q->mq_map[i];
2003                 /* unmapped hw queue can be remapped after CPU topo changed */
2004                 if (!set->tags[hctx_idx] &&
2005                     !__blk_mq_alloc_rq_map(set, hctx_idx)) {
2006                         /*
2007                          * If tags initialization fail for some hctx,
2008                          * that hctx won't be brought online.  In this
2009                          * case, remap the current ctx to hctx[0] which
2010                          * is guaranteed to always have tags allocated
2011                          */
2012                         q->mq_map[i] = 0;
2013                 }
2014
2015                 ctx = per_cpu_ptr(q->queue_ctx, i);
2016                 hctx = blk_mq_map_queue(q, i);
2017
2018                 cpumask_set_cpu(i, hctx->cpumask);
2019                 ctx->index_hw = hctx->nr_ctx;
2020                 hctx->ctxs[hctx->nr_ctx++] = ctx;
2021         }
2022
2023         mutex_unlock(&q->sysfs_lock);
2024
2025         queue_for_each_hw_ctx(q, hctx, i) {
2026                 /*
2027                  * If no software queues are mapped to this hardware queue,
2028                  * disable it and free the request entries.
2029                  */
2030                 if (!hctx->nr_ctx) {
2031                         /* Never unmap queue 0.  We need it as a
2032                          * fallback in case of a new remap fails
2033                          * allocation
2034                          */
2035                         if (i && set->tags[i])
2036                                 blk_mq_free_map_and_requests(set, i);
2037
2038                         hctx->tags = NULL;
2039                         continue;
2040                 }
2041
2042                 hctx->tags = set->tags[i];
2043                 WARN_ON(!hctx->tags);
2044
2045                 /*
2046                  * Set the map size to the number of mapped software queues.
2047                  * This is more accurate and more efficient than looping
2048                  * over all possibly mapped software queues.
2049                  */
2050                 sbitmap_resize(&hctx->ctx_map, hctx->nr_ctx);
2051
2052                 /*
2053                  * Initialize batch roundrobin counts
2054                  */
2055                 hctx->next_cpu = cpumask_first(hctx->cpumask);
2056                 hctx->next_cpu_batch = BLK_MQ_CPU_WORK_BATCH;
2057         }
2058 }
2059
2060 static void queue_set_hctx_shared(struct request_queue *q, bool shared)
2061 {
2062         struct blk_mq_hw_ctx *hctx;
2063         int i;
2064
2065         queue_for_each_hw_ctx(q, hctx, i) {
2066                 if (shared)
2067                         hctx->flags |= BLK_MQ_F_TAG_SHARED;
2068                 else
2069                         hctx->flags &= ~BLK_MQ_F_TAG_SHARED;
2070         }
2071 }
2072
2073 static void blk_mq_update_tag_set_depth(struct blk_mq_tag_set *set, bool shared)
2074 {
2075         struct request_queue *q;
2076
2077         list_for_each_entry(q, &set->tag_list, tag_set_list) {
2078                 blk_mq_freeze_queue(q);
2079                 queue_set_hctx_shared(q, shared);
2080                 blk_mq_unfreeze_queue(q);
2081         }
2082 }
2083
2084 static void blk_mq_del_queue_tag_set(struct request_queue *q)
2085 {
2086         struct blk_mq_tag_set *set = q->tag_set;
2087
2088         mutex_lock(&set->tag_list_lock);
2089         list_del_init(&q->tag_set_list);
2090         if (list_is_singular(&set->tag_list)) {
2091                 /* just transitioned to unshared */
2092                 set->flags &= ~BLK_MQ_F_TAG_SHARED;
2093                 /* update existing queue */
2094                 blk_mq_update_tag_set_depth(set, false);
2095         }
2096         mutex_unlock(&set->tag_list_lock);
2097 }
2098
2099 static void blk_mq_add_queue_tag_set(struct blk_mq_tag_set *set,
2100                                      struct request_queue *q)
2101 {
2102         q->tag_set = set;
2103
2104         mutex_lock(&set->tag_list_lock);
2105
2106         /* Check to see if we're transitioning to shared (from 1 to 2 queues). */
2107         if (!list_empty(&set->tag_list) && !(set->flags & BLK_MQ_F_TAG_SHARED)) {
2108                 set->flags |= BLK_MQ_F_TAG_SHARED;
2109                 /* update existing queue */
2110                 blk_mq_update_tag_set_depth(set, true);
2111         }
2112         if (set->flags & BLK_MQ_F_TAG_SHARED)
2113                 queue_set_hctx_shared(q, true);
2114         list_add_tail(&q->tag_set_list, &set->tag_list);
2115
2116         mutex_unlock(&set->tag_list_lock);
2117 }
2118
2119 /*
2120  * It is the actual release handler for mq, but we do it from
2121  * request queue's release handler for avoiding use-after-free
2122  * and headache because q->mq_kobj shouldn't have been introduced,
2123  * but we can't group ctx/kctx kobj without it.
2124  */
2125 void blk_mq_release(struct request_queue *q)
2126 {
2127         struct blk_mq_hw_ctx *hctx;
2128         unsigned int i;
2129
2130         blk_mq_sched_teardown(q);
2131
2132         /* hctx kobj stays in hctx */
2133         queue_for_each_hw_ctx(q, hctx, i) {
2134                 if (!hctx)
2135                         continue;
2136                 kfree(hctx->ctxs);
2137                 kfree(hctx);
2138         }
2139
2140         q->mq_map = NULL;
2141
2142         kfree(q->queue_hw_ctx);
2143
2144         /* ctx kobj stays in queue_ctx */
2145         free_percpu(q->queue_ctx);
2146 }
2147
2148 struct request_queue *blk_mq_init_queue(struct blk_mq_tag_set *set)
2149 {
2150         struct request_queue *uninit_q, *q;
2151
2152         uninit_q = blk_alloc_queue_node(GFP_KERNEL, set->numa_node);
2153         if (!uninit_q)
2154                 return ERR_PTR(-ENOMEM);
2155
2156         q = blk_mq_init_allocated_queue(set, uninit_q);
2157         if (IS_ERR(q))
2158                 blk_cleanup_queue(uninit_q);
2159
2160         return q;
2161 }
2162 EXPORT_SYMBOL(blk_mq_init_queue);
2163
2164 static void blk_mq_realloc_hw_ctxs(struct blk_mq_tag_set *set,
2165                                                 struct request_queue *q)
2166 {
2167         int i, j;
2168         struct blk_mq_hw_ctx **hctxs = q->queue_hw_ctx;
2169
2170         blk_mq_sysfs_unregister(q);
2171         for (i = 0; i < set->nr_hw_queues; i++) {
2172                 int node;
2173
2174                 if (hctxs[i])
2175                         continue;
2176
2177                 node = blk_mq_hw_queue_to_node(q->mq_map, i);
2178                 hctxs[i] = kzalloc_node(sizeof(struct blk_mq_hw_ctx),
2179                                         GFP_KERNEL, node);
2180                 if (!hctxs[i])
2181                         break;
2182
2183                 if (!zalloc_cpumask_var_node(&hctxs[i]->cpumask, GFP_KERNEL,
2184                                                 node)) {
2185                         kfree(hctxs[i]);
2186                         hctxs[i] = NULL;
2187                         break;
2188                 }
2189
2190                 atomic_set(&hctxs[i]->nr_active, 0);
2191                 hctxs[i]->numa_node = node;
2192                 hctxs[i]->queue_num = i;
2193
2194                 if (blk_mq_init_hctx(q, set, hctxs[i], i)) {
2195                         free_cpumask_var(hctxs[i]->cpumask);
2196                         kfree(hctxs[i]);
2197                         hctxs[i] = NULL;
2198                         break;
2199                 }
2200                 blk_mq_hctx_kobj_init(hctxs[i]);
2201         }
2202         for (j = i; j < q->nr_hw_queues; j++) {
2203                 struct blk_mq_hw_ctx *hctx = hctxs[j];
2204
2205                 if (hctx) {
2206                         if (hctx->tags)
2207                                 blk_mq_free_map_and_requests(set, j);
2208                         blk_mq_exit_hctx(q, set, hctx, j);
2209                         free_cpumask_var(hctx->cpumask);
2210                         kobject_put(&hctx->kobj);
2211                         kfree(hctx->ctxs);
2212                         kfree(hctx);
2213                         hctxs[j] = NULL;
2214
2215                 }
2216         }
2217         q->nr_hw_queues = i;
2218         blk_mq_sysfs_register(q);
2219 }
2220
2221 struct request_queue *blk_mq_init_allocated_queue(struct blk_mq_tag_set *set,
2222                                                   struct request_queue *q)
2223 {
2224         /* mark the queue as mq asap */
2225         q->mq_ops = set->ops;
2226
2227         q->queue_ctx = alloc_percpu(struct blk_mq_ctx);
2228         if (!q->queue_ctx)
2229                 goto err_exit;
2230
2231         q->queue_hw_ctx = kzalloc_node(nr_cpu_ids * sizeof(*(q->queue_hw_ctx)),
2232                                                 GFP_KERNEL, set->numa_node);
2233         if (!q->queue_hw_ctx)
2234                 goto err_percpu;
2235
2236         q->mq_map = set->mq_map;
2237
2238         blk_mq_realloc_hw_ctxs(set, q);
2239         if (!q->nr_hw_queues)
2240                 goto err_hctxs;
2241
2242         INIT_WORK(&q->timeout_work, blk_mq_timeout_work);
2243         blk_queue_rq_timeout(q, set->timeout ? set->timeout : 30 * HZ);
2244
2245         q->nr_queues = nr_cpu_ids;
2246
2247         q->queue_flags |= QUEUE_FLAG_MQ_DEFAULT;
2248
2249         if (!(set->flags & BLK_MQ_F_SG_MERGE))
2250                 q->queue_flags |= 1 << QUEUE_FLAG_NO_SG_MERGE;
2251
2252         q->sg_reserved_size = INT_MAX;
2253
2254         INIT_DELAYED_WORK(&q->requeue_work, blk_mq_requeue_work);
2255         INIT_LIST_HEAD(&q->requeue_list);
2256         spin_lock_init(&q->requeue_lock);
2257
2258         if (q->nr_hw_queues > 1)
2259                 blk_queue_make_request(q, blk_mq_make_request);
2260         else
2261                 blk_queue_make_request(q, blk_sq_make_request);
2262
2263         /*
2264          * Do this after blk_queue_make_request() overrides it...
2265          */
2266         q->nr_requests = set->queue_depth;
2267
2268         /*
2269          * Default to classic polling
2270          */
2271         q->poll_nsec = -1;
2272
2273         if (set->ops->complete)
2274                 blk_queue_softirq_done(q, set->ops->complete);
2275
2276         blk_mq_init_cpu_queues(q, set->nr_hw_queues);
2277
2278         get_online_cpus();
2279         mutex_lock(&all_q_mutex);
2280
2281         list_add_tail(&q->all_q_node, &all_q_list);
2282         blk_mq_add_queue_tag_set(set, q);
2283         blk_mq_map_swqueue(q, cpu_online_mask);
2284
2285         mutex_unlock(&all_q_mutex);
2286         put_online_cpus();
2287
2288         return q;
2289
2290 err_hctxs:
2291         kfree(q->queue_hw_ctx);
2292 err_percpu:
2293         free_percpu(q->queue_ctx);
2294 err_exit:
2295         q->mq_ops = NULL;
2296         return ERR_PTR(-ENOMEM);
2297 }
2298 EXPORT_SYMBOL(blk_mq_init_allocated_queue);
2299
2300 void blk_mq_free_queue(struct request_queue *q)
2301 {
2302         struct blk_mq_tag_set   *set = q->tag_set;
2303
2304         mutex_lock(&all_q_mutex);
2305         list_del_init(&q->all_q_node);
2306         mutex_unlock(&all_q_mutex);
2307
2308         wbt_exit(q);
2309
2310         blk_mq_del_queue_tag_set(q);
2311
2312         blk_mq_exit_hw_queues(q, set, set->nr_hw_queues);
2313         blk_mq_free_hw_queues(q, set);
2314 }
2315
2316 /* Basically redo blk_mq_init_queue with queue frozen */
2317 static void blk_mq_queue_reinit(struct request_queue *q,
2318                                 const struct cpumask *online_mask)
2319 {
2320         WARN_ON_ONCE(!atomic_read(&q->mq_freeze_depth));
2321
2322         blk_mq_sysfs_unregister(q);
2323
2324         /*
2325          * redo blk_mq_init_cpu_queues and blk_mq_init_hw_queues. FIXME: maybe
2326          * we should change hctx numa_node according to new topology (this
2327          * involves free and re-allocate memory, worthy doing?)
2328          */
2329
2330         blk_mq_map_swqueue(q, online_mask);
2331
2332         blk_mq_sysfs_register(q);
2333 }
2334
2335 /*
2336  * New online cpumask which is going to be set in this hotplug event.
2337  * Declare this cpumasks as global as cpu-hotplug operation is invoked
2338  * one-by-one and dynamically allocating this could result in a failure.
2339  */
2340 static struct cpumask cpuhp_online_new;
2341
2342 static void blk_mq_queue_reinit_work(void)
2343 {
2344         struct request_queue *q;
2345
2346         mutex_lock(&all_q_mutex);
2347         /*
2348          * We need to freeze and reinit all existing queues.  Freezing
2349          * involves synchronous wait for an RCU grace period and doing it
2350          * one by one may take a long time.  Start freezing all queues in
2351          * one swoop and then wait for the completions so that freezing can
2352          * take place in parallel.
2353          */
2354         list_for_each_entry(q, &all_q_list, all_q_node)
2355                 blk_mq_freeze_queue_start(q);
2356         list_for_each_entry(q, &all_q_list, all_q_node)
2357                 blk_mq_freeze_queue_wait(q);
2358
2359         list_for_each_entry(q, &all_q_list, all_q_node)
2360                 blk_mq_queue_reinit(q, &cpuhp_online_new);
2361
2362         list_for_each_entry(q, &all_q_list, all_q_node)
2363                 blk_mq_unfreeze_queue(q);
2364
2365         mutex_unlock(&all_q_mutex);
2366 }
2367
2368 static int blk_mq_queue_reinit_dead(unsigned int cpu)
2369 {
2370         cpumask_copy(&cpuhp_online_new, cpu_online_mask);
2371         blk_mq_queue_reinit_work();
2372         return 0;
2373 }
2374
2375 /*
2376  * Before hotadded cpu starts handling requests, new mappings must be
2377  * established.  Otherwise, these requests in hw queue might never be
2378  * dispatched.
2379  *
2380  * For example, there is a single hw queue (hctx) and two CPU queues (ctx0
2381  * for CPU0, and ctx1 for CPU1).
2382  *
2383  * Now CPU1 is just onlined and a request is inserted into ctx1->rq_list
2384  * and set bit0 in pending bitmap as ctx1->index_hw is still zero.
2385  *
2386  * And then while running hw queue, blk_mq_flush_busy_ctxs() finds bit0 is set
2387  * in pending bitmap and tries to retrieve requests in hctx->ctxs[0]->rq_list.
2388  * But htx->ctxs[0] is a pointer to ctx0, so the request in ctx1->rq_list is
2389  * ignored.
2390  */
2391 static int blk_mq_queue_reinit_prepare(unsigned int cpu)
2392 {
2393         cpumask_copy(&cpuhp_online_new, cpu_online_mask);
2394         cpumask_set_cpu(cpu, &cpuhp_online_new);
2395         blk_mq_queue_reinit_work();
2396         return 0;
2397 }
2398
2399 static int __blk_mq_alloc_rq_maps(struct blk_mq_tag_set *set)
2400 {
2401         int i;
2402
2403         for (i = 0; i < set->nr_hw_queues; i++)
2404                 if (!__blk_mq_alloc_rq_map(set, i))
2405                         goto out_unwind;
2406
2407         return 0;
2408
2409 out_unwind:
2410         while (--i >= 0)
2411                 blk_mq_free_rq_map(set->tags[i]);
2412
2413         return -ENOMEM;
2414 }
2415
2416 /*
2417  * Allocate the request maps associated with this tag_set. Note that this
2418  * may reduce the depth asked for, if memory is tight. set->queue_depth
2419  * will be updated to reflect the allocated depth.
2420  */
2421 static int blk_mq_alloc_rq_maps(struct blk_mq_tag_set *set)
2422 {
2423         unsigned int depth;
2424         int err;
2425
2426         depth = set->queue_depth;
2427         do {
2428                 err = __blk_mq_alloc_rq_maps(set);
2429                 if (!err)
2430                         break;
2431
2432                 set->queue_depth >>= 1;
2433                 if (set->queue_depth < set->reserved_tags + BLK_MQ_TAG_MIN) {
2434                         err = -ENOMEM;
2435                         break;
2436                 }
2437         } while (set->queue_depth);
2438
2439         if (!set->queue_depth || err) {
2440                 pr_err("blk-mq: failed to allocate request map\n");
2441                 return -ENOMEM;
2442         }
2443
2444         if (depth != set->queue_depth)
2445                 pr_info("blk-mq: reduced tag depth (%u -> %u)\n",
2446                                                 depth, set->queue_depth);
2447
2448         return 0;
2449 }
2450
2451 /*
2452  * Alloc a tag set to be associated with one or more request queues.
2453  * May fail with EINVAL for various error conditions. May adjust the
2454  * requested depth down, if if it too large. In that case, the set
2455  * value will be stored in set->queue_depth.
2456  */
2457 int blk_mq_alloc_tag_set(struct blk_mq_tag_set *set)
2458 {
2459         int ret;
2460
2461         BUILD_BUG_ON(BLK_MQ_MAX_DEPTH > 1 << BLK_MQ_UNIQUE_TAG_BITS);
2462
2463         if (!set->nr_hw_queues)
2464                 return -EINVAL;
2465         if (!set->queue_depth)
2466                 return -EINVAL;
2467         if (set->queue_depth < set->reserved_tags + BLK_MQ_TAG_MIN)
2468                 return -EINVAL;
2469
2470         if (!set->ops->queue_rq)
2471                 return -EINVAL;
2472
2473         if (set->queue_depth > BLK_MQ_MAX_DEPTH) {
2474                 pr_info("blk-mq: reduced tag depth to %u\n",
2475                         BLK_MQ_MAX_DEPTH);
2476                 set->queue_depth = BLK_MQ_MAX_DEPTH;
2477         }
2478
2479         /*
2480          * If a crashdump is active, then we are potentially in a very
2481          * memory constrained environment. Limit us to 1 queue and
2482          * 64 tags to prevent using too much memory.
2483          */
2484         if (is_kdump_kernel()) {
2485                 set->nr_hw_queues = 1;
2486                 set->queue_depth = min(64U, set->queue_depth);
2487         }
2488         /*
2489          * There is no use for more h/w queues than cpus.
2490          */
2491         if (set->nr_hw_queues > nr_cpu_ids)
2492                 set->nr_hw_queues = nr_cpu_ids;
2493
2494         set->tags = kzalloc_node(nr_cpu_ids * sizeof(struct blk_mq_tags *),
2495                                  GFP_KERNEL, set->numa_node);
2496         if (!set->tags)
2497                 return -ENOMEM;
2498
2499         ret = -ENOMEM;
2500         set->mq_map = kzalloc_node(sizeof(*set->mq_map) * nr_cpu_ids,
2501                         GFP_KERNEL, set->numa_node);
2502         if (!set->mq_map)
2503                 goto out_free_tags;
2504
2505         if (set->ops->map_queues)
2506                 ret = set->ops->map_queues(set);
2507         else
2508                 ret = blk_mq_map_queues(set);
2509         if (ret)
2510                 goto out_free_mq_map;
2511
2512         ret = blk_mq_alloc_rq_maps(set);
2513         if (ret)
2514                 goto out_free_mq_map;
2515
2516         mutex_init(&set->tag_list_lock);
2517         INIT_LIST_HEAD(&set->tag_list);
2518
2519         return 0;
2520
2521 out_free_mq_map:
2522         kfree(set->mq_map);
2523         set->mq_map = NULL;
2524 out_free_tags:
2525         kfree(set->tags);
2526         set->tags = NULL;
2527         return ret;
2528 }
2529 EXPORT_SYMBOL(blk_mq_alloc_tag_set);
2530
2531 void blk_mq_free_tag_set(struct blk_mq_tag_set *set)
2532 {
2533         int i;
2534
2535         for (i = 0; i < nr_cpu_ids; i++)
2536                 blk_mq_free_map_and_requests(set, i);
2537
2538         kfree(set->mq_map);
2539         set->mq_map = NULL;
2540
2541         kfree(set->tags);
2542         set->tags = NULL;
2543 }
2544 EXPORT_SYMBOL(blk_mq_free_tag_set);
2545
2546 int blk_mq_update_nr_requests(struct request_queue *q, unsigned int nr)
2547 {
2548         struct blk_mq_tag_set *set = q->tag_set;
2549         struct blk_mq_hw_ctx *hctx;
2550         int i, ret;
2551
2552         if (!set)
2553                 return -EINVAL;
2554
2555         ret = 0;
2556         queue_for_each_hw_ctx(q, hctx, i) {
2557                 if (!hctx->tags)
2558                         continue;
2559                 /*
2560                  * If we're using an MQ scheduler, just update the scheduler
2561                  * queue depth. This is similar to what the old code would do.
2562                  */
2563                 if (!hctx->sched_tags)
2564                         ret = blk_mq_tag_update_depth(hctx->tags,
2565                                                         min(nr, set->queue_depth));
2566                 else
2567                         ret = blk_mq_tag_update_depth(hctx->sched_tags, nr);
2568                 if (ret)
2569                         break;
2570         }
2571
2572         if (!ret)
2573                 q->nr_requests = nr;
2574
2575         return ret;
2576 }
2577
2578 void blk_mq_update_nr_hw_queues(struct blk_mq_tag_set *set, int nr_hw_queues)
2579 {
2580         struct request_queue *q;
2581
2582         if (nr_hw_queues > nr_cpu_ids)
2583                 nr_hw_queues = nr_cpu_ids;
2584         if (nr_hw_queues < 1 || nr_hw_queues == set->nr_hw_queues)
2585                 return;
2586
2587         list_for_each_entry(q, &set->tag_list, tag_set_list)
2588                 blk_mq_freeze_queue(q);
2589
2590         set->nr_hw_queues = nr_hw_queues;
2591         list_for_each_entry(q, &set->tag_list, tag_set_list) {
2592                 blk_mq_realloc_hw_ctxs(set, q);
2593
2594                 if (q->nr_hw_queues > 1)
2595                         blk_queue_make_request(q, blk_mq_make_request);
2596                 else
2597                         blk_queue_make_request(q, blk_sq_make_request);
2598
2599                 blk_mq_queue_reinit(q, cpu_online_mask);
2600         }
2601
2602         list_for_each_entry(q, &set->tag_list, tag_set_list)
2603                 blk_mq_unfreeze_queue(q);
2604 }
2605 EXPORT_SYMBOL_GPL(blk_mq_update_nr_hw_queues);
2606
2607 static unsigned long blk_mq_poll_nsecs(struct request_queue *q,
2608                                        struct blk_mq_hw_ctx *hctx,
2609                                        struct request *rq)
2610 {
2611         struct blk_rq_stat stat[2];
2612         unsigned long ret = 0;
2613
2614         /*
2615          * If stats collection isn't on, don't sleep but turn it on for
2616          * future users
2617          */
2618         if (!blk_stat_enable(q))
2619                 return 0;
2620
2621         /*
2622          * We don't have to do this once per IO, should optimize this
2623          * to just use the current window of stats until it changes
2624          */
2625         memset(&stat, 0, sizeof(stat));
2626         blk_hctx_stat_get(hctx, stat);
2627
2628         /*
2629          * As an optimistic guess, use half of the mean service time
2630          * for this type of request. We can (and should) make this smarter.
2631          * For instance, if the completion latencies are tight, we can
2632          * get closer than just half the mean. This is especially
2633          * important on devices where the completion latencies are longer
2634          * than ~10 usec.
2635          */
2636         if (req_op(rq) == REQ_OP_READ && stat[BLK_STAT_READ].nr_samples)
2637                 ret = (stat[BLK_STAT_READ].mean + 1) / 2;
2638         else if (req_op(rq) == REQ_OP_WRITE && stat[BLK_STAT_WRITE].nr_samples)
2639                 ret = (stat[BLK_STAT_WRITE].mean + 1) / 2;
2640
2641         return ret;
2642 }
2643
2644 static bool blk_mq_poll_hybrid_sleep(struct request_queue *q,
2645                                      struct blk_mq_hw_ctx *hctx,
2646                                      struct request *rq)
2647 {
2648         struct hrtimer_sleeper hs;
2649         enum hrtimer_mode mode;
2650         unsigned int nsecs;
2651         ktime_t kt;
2652
2653         if (test_bit(REQ_ATOM_POLL_SLEPT, &rq->atomic_flags))
2654                 return false;
2655
2656         /*
2657          * poll_nsec can be:
2658          *
2659          * -1:  don't ever hybrid sleep
2660          *  0:  use half of prev avg
2661          * >0:  use this specific value
2662          */
2663         if (q->poll_nsec == -1)
2664                 return false;
2665         else if (q->poll_nsec > 0)
2666                 nsecs = q->poll_nsec;
2667         else
2668                 nsecs = blk_mq_poll_nsecs(q, hctx, rq);
2669
2670         if (!nsecs)
2671                 return false;
2672
2673         set_bit(REQ_ATOM_POLL_SLEPT, &rq->atomic_flags);
2674
2675         /*
2676          * This will be replaced with the stats tracking code, using
2677          * 'avg_completion_time / 2' as the pre-sleep target.
2678          */
2679         kt = nsecs;
2680
2681         mode = HRTIMER_MODE_REL;
2682         hrtimer_init_on_stack(&hs.timer, CLOCK_MONOTONIC, mode);
2683         hrtimer_set_expires(&hs.timer, kt);
2684
2685         hrtimer_init_sleeper(&hs, current);
2686         do {
2687                 if (test_bit(REQ_ATOM_COMPLETE, &rq->atomic_flags))
2688                         break;
2689                 set_current_state(TASK_UNINTERRUPTIBLE);
2690                 hrtimer_start_expires(&hs.timer, mode);
2691                 if (hs.task)
2692                         io_schedule();
2693                 hrtimer_cancel(&hs.timer);
2694                 mode = HRTIMER_MODE_ABS;
2695         } while (hs.task && !signal_pending(current));
2696
2697         __set_current_state(TASK_RUNNING);
2698         destroy_hrtimer_on_stack(&hs.timer);
2699         return true;
2700 }
2701
2702 static bool __blk_mq_poll(struct blk_mq_hw_ctx *hctx, struct request *rq)
2703 {
2704         struct request_queue *q = hctx->queue;
2705         long state;
2706
2707         /*
2708          * If we sleep, have the caller restart the poll loop to reset
2709          * the state. Like for the other success return cases, the
2710          * caller is responsible for checking if the IO completed. If
2711          * the IO isn't complete, we'll get called again and will go
2712          * straight to the busy poll loop.
2713          */
2714         if (blk_mq_poll_hybrid_sleep(q, hctx, rq))
2715                 return true;
2716
2717         hctx->poll_considered++;
2718
2719         state = current->state;
2720         while (!need_resched()) {
2721                 int ret;
2722
2723                 hctx->poll_invoked++;
2724
2725                 ret = q->mq_ops->poll(hctx, rq->tag);
2726                 if (ret > 0) {
2727                         hctx->poll_success++;
2728                         set_current_state(TASK_RUNNING);
2729                         return true;
2730                 }
2731
2732                 if (signal_pending_state(state, current))
2733                         set_current_state(TASK_RUNNING);
2734
2735                 if (current->state == TASK_RUNNING)
2736                         return true;
2737                 if (ret < 0)
2738                         break;
2739                 cpu_relax();
2740         }
2741
2742         return false;
2743 }
2744
2745 bool blk_mq_poll(struct request_queue *q, blk_qc_t cookie)
2746 {
2747         struct blk_mq_hw_ctx *hctx;
2748         struct blk_plug *plug;
2749         struct request *rq;
2750
2751         if (!q->mq_ops || !q->mq_ops->poll || !blk_qc_t_valid(cookie) ||
2752             !test_bit(QUEUE_FLAG_POLL, &q->queue_flags))
2753                 return false;
2754
2755         plug = current->plug;
2756         if (plug)
2757                 blk_flush_plug_list(plug, false);
2758
2759         hctx = q->queue_hw_ctx[blk_qc_t_to_queue_num(cookie)];
2760         if (!blk_qc_t_is_internal(cookie))
2761                 rq = blk_mq_tag_to_rq(hctx->tags, blk_qc_t_to_tag(cookie));
2762         else
2763                 rq = blk_mq_tag_to_rq(hctx->sched_tags, blk_qc_t_to_tag(cookie));
2764
2765         return __blk_mq_poll(hctx, rq);
2766 }
2767 EXPORT_SYMBOL_GPL(blk_mq_poll);
2768
2769 void blk_mq_disable_hotplug(void)
2770 {
2771         mutex_lock(&all_q_mutex);
2772 }
2773
2774 void blk_mq_enable_hotplug(void)
2775 {
2776         mutex_unlock(&all_q_mutex);
2777 }
2778
2779 static int __init blk_mq_init(void)
2780 {
2781         cpuhp_setup_state_multi(CPUHP_BLK_MQ_DEAD, "block/mq:dead", NULL,
2782                                 blk_mq_hctx_notify_dead);
2783
2784         cpuhp_setup_state_nocalls(CPUHP_BLK_MQ_PREPARE, "block/mq:prepare",
2785                                   blk_mq_queue_reinit_prepare,
2786                                   blk_mq_queue_reinit_dead);
2787         return 0;
2788 }
2789 subsys_initcall(blk_mq_init);