]> git.karo-electronics.de Git - karo-tx-linux.git/blob - block/elevator.c
Merge tag 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/dledford/rdma
[karo-tx-linux.git] / block / elevator.c
1 /*
2  *  Block device elevator/IO-scheduler.
3  *
4  *  Copyright (C) 2000 Andrea Arcangeli <andrea@suse.de> SuSE
5  *
6  * 30042000 Jens Axboe <axboe@kernel.dk> :
7  *
8  * Split the elevator a bit so that it is possible to choose a different
9  * one or even write a new "plug in". There are three pieces:
10  * - elevator_fn, inserts a new request in the queue list
11  * - elevator_merge_fn, decides whether a new buffer can be merged with
12  *   an existing request
13  * - elevator_dequeue_fn, called when a request is taken off the active list
14  *
15  * 20082000 Dave Jones <davej@suse.de> :
16  * Removed tests for max-bomb-segments, which was breaking elvtune
17  *  when run without -bN
18  *
19  * Jens:
20  * - Rework again to work with bio instead of buffer_heads
21  * - loose bi_dev comparisons, partition handling is right now
22  * - completely modularize elevator setup and teardown
23  *
24  */
25 #include <linux/kernel.h>
26 #include <linux/fs.h>
27 #include <linux/blkdev.h>
28 #include <linux/elevator.h>
29 #include <linux/bio.h>
30 #include <linux/module.h>
31 #include <linux/slab.h>
32 #include <linux/init.h>
33 #include <linux/compiler.h>
34 #include <linux/blktrace_api.h>
35 #include <linux/hash.h>
36 #include <linux/uaccess.h>
37 #include <linux/pm_runtime.h>
38 #include <linux/blk-cgroup.h>
39
40 #include <trace/events/block.h>
41
42 #include "blk.h"
43 #include "blk-mq-sched.h"
44
45 static DEFINE_SPINLOCK(elv_list_lock);
46 static LIST_HEAD(elv_list);
47
48 /*
49  * Merge hash stuff.
50  */
51 #define rq_hash_key(rq)         (blk_rq_pos(rq) + blk_rq_sectors(rq))
52
53 /*
54  * Query io scheduler to see if the current process issuing bio may be
55  * merged with rq.
56  */
57 static int elv_iosched_allow_bio_merge(struct request *rq, struct bio *bio)
58 {
59         struct request_queue *q = rq->q;
60         struct elevator_queue *e = q->elevator;
61
62         if (e->uses_mq && e->type->ops.mq.allow_merge)
63                 return e->type->ops.mq.allow_merge(q, rq, bio);
64         else if (!e->uses_mq && e->type->ops.sq.elevator_allow_bio_merge_fn)
65                 return e->type->ops.sq.elevator_allow_bio_merge_fn(q, rq, bio);
66
67         return 1;
68 }
69
70 /*
71  * can we safely merge with this request?
72  */
73 bool elv_bio_merge_ok(struct request *rq, struct bio *bio)
74 {
75         if (!blk_rq_merge_ok(rq, bio))
76                 return false;
77
78         if (!elv_iosched_allow_bio_merge(rq, bio))
79                 return false;
80
81         return true;
82 }
83 EXPORT_SYMBOL(elv_bio_merge_ok);
84
85 static struct elevator_type *elevator_find(const char *name)
86 {
87         struct elevator_type *e;
88
89         list_for_each_entry(e, &elv_list, list) {
90                 if (!strcmp(e->elevator_name, name))
91                         return e;
92         }
93
94         return NULL;
95 }
96
97 static void elevator_put(struct elevator_type *e)
98 {
99         module_put(e->elevator_owner);
100 }
101
102 static struct elevator_type *elevator_get(const char *name, bool try_loading)
103 {
104         struct elevator_type *e;
105
106         spin_lock(&elv_list_lock);
107
108         e = elevator_find(name);
109         if (!e && try_loading) {
110                 spin_unlock(&elv_list_lock);
111                 request_module("%s-iosched", name);
112                 spin_lock(&elv_list_lock);
113                 e = elevator_find(name);
114         }
115
116         if (e && !try_module_get(e->elevator_owner))
117                 e = NULL;
118
119         spin_unlock(&elv_list_lock);
120
121         return e;
122 }
123
124 static char chosen_elevator[ELV_NAME_MAX];
125
126 static int __init elevator_setup(char *str)
127 {
128         /*
129          * Be backwards-compatible with previous kernels, so users
130          * won't get the wrong elevator.
131          */
132         strncpy(chosen_elevator, str, sizeof(chosen_elevator) - 1);
133         return 1;
134 }
135
136 __setup("elevator=", elevator_setup);
137
138 /* called during boot to load the elevator chosen by the elevator param */
139 void __init load_default_elevator_module(void)
140 {
141         struct elevator_type *e;
142
143         if (!chosen_elevator[0])
144                 return;
145
146         spin_lock(&elv_list_lock);
147         e = elevator_find(chosen_elevator);
148         spin_unlock(&elv_list_lock);
149
150         if (!e)
151                 request_module("%s-iosched", chosen_elevator);
152 }
153
154 static struct kobj_type elv_ktype;
155
156 struct elevator_queue *elevator_alloc(struct request_queue *q,
157                                   struct elevator_type *e)
158 {
159         struct elevator_queue *eq;
160
161         eq = kzalloc_node(sizeof(*eq), GFP_KERNEL, q->node);
162         if (unlikely(!eq))
163                 return NULL;
164
165         eq->type = e;
166         kobject_init(&eq->kobj, &elv_ktype);
167         mutex_init(&eq->sysfs_lock);
168         hash_init(eq->hash);
169         eq->uses_mq = e->uses_mq;
170
171         return eq;
172 }
173 EXPORT_SYMBOL(elevator_alloc);
174
175 static void elevator_release(struct kobject *kobj)
176 {
177         struct elevator_queue *e;
178
179         e = container_of(kobj, struct elevator_queue, kobj);
180         elevator_put(e->type);
181         kfree(e);
182 }
183
184 int elevator_init(struct request_queue *q, char *name)
185 {
186         struct elevator_type *e = NULL;
187         int err;
188
189         /*
190          * q->sysfs_lock must be held to provide mutual exclusion between
191          * elevator_switch() and here.
192          */
193         lockdep_assert_held(&q->sysfs_lock);
194
195         if (unlikely(q->elevator))
196                 return 0;
197
198         INIT_LIST_HEAD(&q->queue_head);
199         q->last_merge = NULL;
200         q->end_sector = 0;
201         q->boundary_rq = NULL;
202
203         if (name) {
204                 e = elevator_get(name, true);
205                 if (!e)
206                         return -EINVAL;
207         }
208
209         /*
210          * Use the default elevator specified by config boot param for
211          * non-mq devices, or by config option. Don't try to load modules
212          * as we could be running off async and request_module() isn't
213          * allowed from async.
214          */
215         if (!e && !q->mq_ops && *chosen_elevator) {
216                 e = elevator_get(chosen_elevator, false);
217                 if (!e)
218                         printk(KERN_ERR "I/O scheduler %s not found\n",
219                                                         chosen_elevator);
220         }
221
222         if (!e) {
223                 if (q->mq_ops && q->nr_hw_queues == 1)
224                         e = elevator_get(CONFIG_DEFAULT_SQ_IOSCHED, false);
225                 else if (q->mq_ops)
226                         e = elevator_get(CONFIG_DEFAULT_MQ_IOSCHED, false);
227                 else
228                         e = elevator_get(CONFIG_DEFAULT_IOSCHED, false);
229
230                 if (!e) {
231                         printk(KERN_ERR
232                                 "Default I/O scheduler not found. " \
233                                 "Using noop/none.\n");
234                         e = elevator_get("noop", false);
235                 }
236         }
237
238         if (e->uses_mq) {
239                 err = blk_mq_sched_setup(q);
240                 if (!err)
241                         err = e->ops.mq.init_sched(q, e);
242         } else
243                 err = e->ops.sq.elevator_init_fn(q, e);
244         if (err) {
245                 if (e->uses_mq)
246                         blk_mq_sched_teardown(q);
247                 elevator_put(e);
248         }
249         return err;
250 }
251 EXPORT_SYMBOL(elevator_init);
252
253 void elevator_exit(struct elevator_queue *e)
254 {
255         mutex_lock(&e->sysfs_lock);
256         if (e->uses_mq && e->type->ops.mq.exit_sched)
257                 e->type->ops.mq.exit_sched(e);
258         else if (!e->uses_mq && e->type->ops.sq.elevator_exit_fn)
259                 e->type->ops.sq.elevator_exit_fn(e);
260         mutex_unlock(&e->sysfs_lock);
261
262         kobject_put(&e->kobj);
263 }
264 EXPORT_SYMBOL(elevator_exit);
265
266 static inline void __elv_rqhash_del(struct request *rq)
267 {
268         hash_del(&rq->hash);
269         rq->rq_flags &= ~RQF_HASHED;
270 }
271
272 void elv_rqhash_del(struct request_queue *q, struct request *rq)
273 {
274         if (ELV_ON_HASH(rq))
275                 __elv_rqhash_del(rq);
276 }
277 EXPORT_SYMBOL_GPL(elv_rqhash_del);
278
279 void elv_rqhash_add(struct request_queue *q, struct request *rq)
280 {
281         struct elevator_queue *e = q->elevator;
282
283         BUG_ON(ELV_ON_HASH(rq));
284         hash_add(e->hash, &rq->hash, rq_hash_key(rq));
285         rq->rq_flags |= RQF_HASHED;
286 }
287 EXPORT_SYMBOL_GPL(elv_rqhash_add);
288
289 void elv_rqhash_reposition(struct request_queue *q, struct request *rq)
290 {
291         __elv_rqhash_del(rq);
292         elv_rqhash_add(q, rq);
293 }
294
295 struct request *elv_rqhash_find(struct request_queue *q, sector_t offset)
296 {
297         struct elevator_queue *e = q->elevator;
298         struct hlist_node *next;
299         struct request *rq;
300
301         hash_for_each_possible_safe(e->hash, rq, next, hash, offset) {
302                 BUG_ON(!ELV_ON_HASH(rq));
303
304                 if (unlikely(!rq_mergeable(rq))) {
305                         __elv_rqhash_del(rq);
306                         continue;
307                 }
308
309                 if (rq_hash_key(rq) == offset)
310                         return rq;
311         }
312
313         return NULL;
314 }
315
316 /*
317  * RB-tree support functions for inserting/lookup/removal of requests
318  * in a sorted RB tree.
319  */
320 void elv_rb_add(struct rb_root *root, struct request *rq)
321 {
322         struct rb_node **p = &root->rb_node;
323         struct rb_node *parent = NULL;
324         struct request *__rq;
325
326         while (*p) {
327                 parent = *p;
328                 __rq = rb_entry(parent, struct request, rb_node);
329
330                 if (blk_rq_pos(rq) < blk_rq_pos(__rq))
331                         p = &(*p)->rb_left;
332                 else if (blk_rq_pos(rq) >= blk_rq_pos(__rq))
333                         p = &(*p)->rb_right;
334         }
335
336         rb_link_node(&rq->rb_node, parent, p);
337         rb_insert_color(&rq->rb_node, root);
338 }
339 EXPORT_SYMBOL(elv_rb_add);
340
341 void elv_rb_del(struct rb_root *root, struct request *rq)
342 {
343         BUG_ON(RB_EMPTY_NODE(&rq->rb_node));
344         rb_erase(&rq->rb_node, root);
345         RB_CLEAR_NODE(&rq->rb_node);
346 }
347 EXPORT_SYMBOL(elv_rb_del);
348
349 struct request *elv_rb_find(struct rb_root *root, sector_t sector)
350 {
351         struct rb_node *n = root->rb_node;
352         struct request *rq;
353
354         while (n) {
355                 rq = rb_entry(n, struct request, rb_node);
356
357                 if (sector < blk_rq_pos(rq))
358                         n = n->rb_left;
359                 else if (sector > blk_rq_pos(rq))
360                         n = n->rb_right;
361                 else
362                         return rq;
363         }
364
365         return NULL;
366 }
367 EXPORT_SYMBOL(elv_rb_find);
368
369 /*
370  * Insert rq into dispatch queue of q.  Queue lock must be held on
371  * entry.  rq is sort instead into the dispatch queue. To be used by
372  * specific elevators.
373  */
374 void elv_dispatch_sort(struct request_queue *q, struct request *rq)
375 {
376         sector_t boundary;
377         struct list_head *entry;
378
379         if (q->last_merge == rq)
380                 q->last_merge = NULL;
381
382         elv_rqhash_del(q, rq);
383
384         q->nr_sorted--;
385
386         boundary = q->end_sector;
387         list_for_each_prev(entry, &q->queue_head) {
388                 struct request *pos = list_entry_rq(entry);
389
390                 if (req_op(rq) != req_op(pos))
391                         break;
392                 if (rq_data_dir(rq) != rq_data_dir(pos))
393                         break;
394                 if (pos->rq_flags & (RQF_STARTED | RQF_SOFTBARRIER))
395                         break;
396                 if (blk_rq_pos(rq) >= boundary) {
397                         if (blk_rq_pos(pos) < boundary)
398                                 continue;
399                 } else {
400                         if (blk_rq_pos(pos) >= boundary)
401                                 break;
402                 }
403                 if (blk_rq_pos(rq) >= blk_rq_pos(pos))
404                         break;
405         }
406
407         list_add(&rq->queuelist, entry);
408 }
409 EXPORT_SYMBOL(elv_dispatch_sort);
410
411 /*
412  * Insert rq into dispatch queue of q.  Queue lock must be held on
413  * entry.  rq is added to the back of the dispatch queue. To be used by
414  * specific elevators.
415  */
416 void elv_dispatch_add_tail(struct request_queue *q, struct request *rq)
417 {
418         if (q->last_merge == rq)
419                 q->last_merge = NULL;
420
421         elv_rqhash_del(q, rq);
422
423         q->nr_sorted--;
424
425         q->end_sector = rq_end_sector(rq);
426         q->boundary_rq = rq;
427         list_add_tail(&rq->queuelist, &q->queue_head);
428 }
429 EXPORT_SYMBOL(elv_dispatch_add_tail);
430
431 enum elv_merge elv_merge(struct request_queue *q, struct request **req,
432                 struct bio *bio)
433 {
434         struct elevator_queue *e = q->elevator;
435         struct request *__rq;
436
437         /*
438          * Levels of merges:
439          *      nomerges:  No merges at all attempted
440          *      noxmerges: Only simple one-hit cache try
441          *      merges:    All merge tries attempted
442          */
443         if (blk_queue_nomerges(q) || !bio_mergeable(bio))
444                 return ELEVATOR_NO_MERGE;
445
446         /*
447          * First try one-hit cache.
448          */
449         if (q->last_merge && elv_bio_merge_ok(q->last_merge, bio)) {
450                 enum elv_merge ret = blk_try_merge(q->last_merge, bio);
451
452                 if (ret != ELEVATOR_NO_MERGE) {
453                         *req = q->last_merge;
454                         return ret;
455                 }
456         }
457
458         if (blk_queue_noxmerges(q))
459                 return ELEVATOR_NO_MERGE;
460
461         /*
462          * See if our hash lookup can find a potential backmerge.
463          */
464         __rq = elv_rqhash_find(q, bio->bi_iter.bi_sector);
465         if (__rq && elv_bio_merge_ok(__rq, bio)) {
466                 *req = __rq;
467                 return ELEVATOR_BACK_MERGE;
468         }
469
470         if (e->uses_mq && e->type->ops.mq.request_merge)
471                 return e->type->ops.mq.request_merge(q, req, bio);
472         else if (!e->uses_mq && e->type->ops.sq.elevator_merge_fn)
473                 return e->type->ops.sq.elevator_merge_fn(q, req, bio);
474
475         return ELEVATOR_NO_MERGE;
476 }
477
478 /*
479  * Attempt to do an insertion back merge. Only check for the case where
480  * we can append 'rq' to an existing request, so we can throw 'rq' away
481  * afterwards.
482  *
483  * Returns true if we merged, false otherwise
484  */
485 bool elv_attempt_insert_merge(struct request_queue *q, struct request *rq)
486 {
487         struct request *__rq;
488         bool ret;
489
490         if (blk_queue_nomerges(q))
491                 return false;
492
493         /*
494          * First try one-hit cache.
495          */
496         if (q->last_merge && blk_attempt_req_merge(q, q->last_merge, rq))
497                 return true;
498
499         if (blk_queue_noxmerges(q))
500                 return false;
501
502         ret = false;
503         /*
504          * See if our hash lookup can find a potential backmerge.
505          */
506         while (1) {
507                 __rq = elv_rqhash_find(q, blk_rq_pos(rq));
508                 if (!__rq || !blk_attempt_req_merge(q, __rq, rq))
509                         break;
510
511                 /* The merged request could be merged with others, try again */
512                 ret = true;
513                 rq = __rq;
514         }
515
516         return ret;
517 }
518
519 void elv_merged_request(struct request_queue *q, struct request *rq,
520                 enum elv_merge type)
521 {
522         struct elevator_queue *e = q->elevator;
523
524         if (e->uses_mq && e->type->ops.mq.request_merged)
525                 e->type->ops.mq.request_merged(q, rq, type);
526         else if (!e->uses_mq && e->type->ops.sq.elevator_merged_fn)
527                 e->type->ops.sq.elevator_merged_fn(q, rq, type);
528
529         if (type == ELEVATOR_BACK_MERGE)
530                 elv_rqhash_reposition(q, rq);
531
532         q->last_merge = rq;
533 }
534
535 void elv_merge_requests(struct request_queue *q, struct request *rq,
536                              struct request *next)
537 {
538         struct elevator_queue *e = q->elevator;
539         bool next_sorted = false;
540
541         if (e->uses_mq && e->type->ops.mq.requests_merged)
542                 e->type->ops.mq.requests_merged(q, rq, next);
543         else if (e->type->ops.sq.elevator_merge_req_fn) {
544                 next_sorted = (__force bool)(next->rq_flags & RQF_SORTED);
545                 if (next_sorted)
546                         e->type->ops.sq.elevator_merge_req_fn(q, rq, next);
547         }
548
549         elv_rqhash_reposition(q, rq);
550
551         if (next_sorted) {
552                 elv_rqhash_del(q, next);
553                 q->nr_sorted--;
554         }
555
556         q->last_merge = rq;
557 }
558
559 void elv_bio_merged(struct request_queue *q, struct request *rq,
560                         struct bio *bio)
561 {
562         struct elevator_queue *e = q->elevator;
563
564         if (WARN_ON_ONCE(e->uses_mq))
565                 return;
566
567         if (e->type->ops.sq.elevator_bio_merged_fn)
568                 e->type->ops.sq.elevator_bio_merged_fn(q, rq, bio);
569 }
570
571 #ifdef CONFIG_PM
572 static void blk_pm_requeue_request(struct request *rq)
573 {
574         if (rq->q->dev && !(rq->rq_flags & RQF_PM))
575                 rq->q->nr_pending--;
576 }
577
578 static void blk_pm_add_request(struct request_queue *q, struct request *rq)
579 {
580         if (q->dev && !(rq->rq_flags & RQF_PM) && q->nr_pending++ == 0 &&
581             (q->rpm_status == RPM_SUSPENDED || q->rpm_status == RPM_SUSPENDING))
582                 pm_request_resume(q->dev);
583 }
584 #else
585 static inline void blk_pm_requeue_request(struct request *rq) {}
586 static inline void blk_pm_add_request(struct request_queue *q,
587                                       struct request *rq)
588 {
589 }
590 #endif
591
592 void elv_requeue_request(struct request_queue *q, struct request *rq)
593 {
594         /*
595          * it already went through dequeue, we need to decrement the
596          * in_flight count again
597          */
598         if (blk_account_rq(rq)) {
599                 q->in_flight[rq_is_sync(rq)]--;
600                 if (rq->rq_flags & RQF_SORTED)
601                         elv_deactivate_rq(q, rq);
602         }
603
604         rq->rq_flags &= ~RQF_STARTED;
605
606         blk_pm_requeue_request(rq);
607
608         __elv_add_request(q, rq, ELEVATOR_INSERT_REQUEUE);
609 }
610
611 void elv_drain_elevator(struct request_queue *q)
612 {
613         struct elevator_queue *e = q->elevator;
614         static int printed;
615
616         if (WARN_ON_ONCE(e->uses_mq))
617                 return;
618
619         lockdep_assert_held(q->queue_lock);
620
621         while (e->type->ops.sq.elevator_dispatch_fn(q, 1))
622                 ;
623         if (q->nr_sorted && printed++ < 10) {
624                 printk(KERN_ERR "%s: forced dispatching is broken "
625                        "(nr_sorted=%u), please report this\n",
626                        q->elevator->type->elevator_name, q->nr_sorted);
627         }
628 }
629
630 void __elv_add_request(struct request_queue *q, struct request *rq, int where)
631 {
632         trace_block_rq_insert(q, rq);
633
634         blk_pm_add_request(q, rq);
635
636         rq->q = q;
637
638         if (rq->rq_flags & RQF_SOFTBARRIER) {
639                 /* barriers are scheduling boundary, update end_sector */
640                 if (!blk_rq_is_passthrough(rq)) {
641                         q->end_sector = rq_end_sector(rq);
642                         q->boundary_rq = rq;
643                 }
644         } else if (!(rq->rq_flags & RQF_ELVPRIV) &&
645                     (where == ELEVATOR_INSERT_SORT ||
646                      where == ELEVATOR_INSERT_SORT_MERGE))
647                 where = ELEVATOR_INSERT_BACK;
648
649         switch (where) {
650         case ELEVATOR_INSERT_REQUEUE:
651         case ELEVATOR_INSERT_FRONT:
652                 rq->rq_flags |= RQF_SOFTBARRIER;
653                 list_add(&rq->queuelist, &q->queue_head);
654                 break;
655
656         case ELEVATOR_INSERT_BACK:
657                 rq->rq_flags |= RQF_SOFTBARRIER;
658                 elv_drain_elevator(q);
659                 list_add_tail(&rq->queuelist, &q->queue_head);
660                 /*
661                  * We kick the queue here for the following reasons.
662                  * - The elevator might have returned NULL previously
663                  *   to delay requests and returned them now.  As the
664                  *   queue wasn't empty before this request, ll_rw_blk
665                  *   won't run the queue on return, resulting in hang.
666                  * - Usually, back inserted requests won't be merged
667                  *   with anything.  There's no point in delaying queue
668                  *   processing.
669                  */
670                 __blk_run_queue(q);
671                 break;
672
673         case ELEVATOR_INSERT_SORT_MERGE:
674                 /*
675                  * If we succeed in merging this request with one in the
676                  * queue already, we are done - rq has now been freed,
677                  * so no need to do anything further.
678                  */
679                 if (elv_attempt_insert_merge(q, rq))
680                         break;
681         case ELEVATOR_INSERT_SORT:
682                 BUG_ON(blk_rq_is_passthrough(rq));
683                 rq->rq_flags |= RQF_SORTED;
684                 q->nr_sorted++;
685                 if (rq_mergeable(rq)) {
686                         elv_rqhash_add(q, rq);
687                         if (!q->last_merge)
688                                 q->last_merge = rq;
689                 }
690
691                 /*
692                  * Some ioscheds (cfq) run q->request_fn directly, so
693                  * rq cannot be accessed after calling
694                  * elevator_add_req_fn.
695                  */
696                 q->elevator->type->ops.sq.elevator_add_req_fn(q, rq);
697                 break;
698
699         case ELEVATOR_INSERT_FLUSH:
700                 rq->rq_flags |= RQF_SOFTBARRIER;
701                 blk_insert_flush(rq);
702                 break;
703         default:
704                 printk(KERN_ERR "%s: bad insertion point %d\n",
705                        __func__, where);
706                 BUG();
707         }
708 }
709 EXPORT_SYMBOL(__elv_add_request);
710
711 void elv_add_request(struct request_queue *q, struct request *rq, int where)
712 {
713         unsigned long flags;
714
715         spin_lock_irqsave(q->queue_lock, flags);
716         __elv_add_request(q, rq, where);
717         spin_unlock_irqrestore(q->queue_lock, flags);
718 }
719 EXPORT_SYMBOL(elv_add_request);
720
721 struct request *elv_latter_request(struct request_queue *q, struct request *rq)
722 {
723         struct elevator_queue *e = q->elevator;
724
725         if (e->uses_mq && e->type->ops.mq.next_request)
726                 return e->type->ops.mq.next_request(q, rq);
727         else if (!e->uses_mq && e->type->ops.sq.elevator_latter_req_fn)
728                 return e->type->ops.sq.elevator_latter_req_fn(q, rq);
729
730         return NULL;
731 }
732
733 struct request *elv_former_request(struct request_queue *q, struct request *rq)
734 {
735         struct elevator_queue *e = q->elevator;
736
737         if (e->uses_mq && e->type->ops.mq.former_request)
738                 return e->type->ops.mq.former_request(q, rq);
739         if (!e->uses_mq && e->type->ops.sq.elevator_former_req_fn)
740                 return e->type->ops.sq.elevator_former_req_fn(q, rq);
741         return NULL;
742 }
743
744 int elv_set_request(struct request_queue *q, struct request *rq,
745                     struct bio *bio, gfp_t gfp_mask)
746 {
747         struct elevator_queue *e = q->elevator;
748
749         if (WARN_ON_ONCE(e->uses_mq))
750                 return 0;
751
752         if (e->type->ops.sq.elevator_set_req_fn)
753                 return e->type->ops.sq.elevator_set_req_fn(q, rq, bio, gfp_mask);
754         return 0;
755 }
756
757 void elv_put_request(struct request_queue *q, struct request *rq)
758 {
759         struct elevator_queue *e = q->elevator;
760
761         if (WARN_ON_ONCE(e->uses_mq))
762                 return;
763
764         if (e->type->ops.sq.elevator_put_req_fn)
765                 e->type->ops.sq.elevator_put_req_fn(rq);
766 }
767
768 int elv_may_queue(struct request_queue *q, unsigned int op)
769 {
770         struct elevator_queue *e = q->elevator;
771
772         if (WARN_ON_ONCE(e->uses_mq))
773                 return 0;
774
775         if (e->type->ops.sq.elevator_may_queue_fn)
776                 return e->type->ops.sq.elevator_may_queue_fn(q, op);
777
778         return ELV_MQUEUE_MAY;
779 }
780
781 void elv_completed_request(struct request_queue *q, struct request *rq)
782 {
783         struct elevator_queue *e = q->elevator;
784
785         if (WARN_ON_ONCE(e->uses_mq))
786                 return;
787
788         /*
789          * request is released from the driver, io must be done
790          */
791         if (blk_account_rq(rq)) {
792                 q->in_flight[rq_is_sync(rq)]--;
793                 if ((rq->rq_flags & RQF_SORTED) &&
794                     e->type->ops.sq.elevator_completed_req_fn)
795                         e->type->ops.sq.elevator_completed_req_fn(q, rq);
796         }
797 }
798
799 #define to_elv(atr) container_of((atr), struct elv_fs_entry, attr)
800
801 static ssize_t
802 elv_attr_show(struct kobject *kobj, struct attribute *attr, char *page)
803 {
804         struct elv_fs_entry *entry = to_elv(attr);
805         struct elevator_queue *e;
806         ssize_t error;
807
808         if (!entry->show)
809                 return -EIO;
810
811         e = container_of(kobj, struct elevator_queue, kobj);
812         mutex_lock(&e->sysfs_lock);
813         error = e->type ? entry->show(e, page) : -ENOENT;
814         mutex_unlock(&e->sysfs_lock);
815         return error;
816 }
817
818 static ssize_t
819 elv_attr_store(struct kobject *kobj, struct attribute *attr,
820                const char *page, size_t length)
821 {
822         struct elv_fs_entry *entry = to_elv(attr);
823         struct elevator_queue *e;
824         ssize_t error;
825
826         if (!entry->store)
827                 return -EIO;
828
829         e = container_of(kobj, struct elevator_queue, kobj);
830         mutex_lock(&e->sysfs_lock);
831         error = e->type ? entry->store(e, page, length) : -ENOENT;
832         mutex_unlock(&e->sysfs_lock);
833         return error;
834 }
835
836 static const struct sysfs_ops elv_sysfs_ops = {
837         .show   = elv_attr_show,
838         .store  = elv_attr_store,
839 };
840
841 static struct kobj_type elv_ktype = {
842         .sysfs_ops      = &elv_sysfs_ops,
843         .release        = elevator_release,
844 };
845
846 int elv_register_queue(struct request_queue *q)
847 {
848         struct elevator_queue *e = q->elevator;
849         int error;
850
851         error = kobject_add(&e->kobj, &q->kobj, "%s", "iosched");
852         if (!error) {
853                 struct elv_fs_entry *attr = e->type->elevator_attrs;
854                 if (attr) {
855                         while (attr->attr.name) {
856                                 if (sysfs_create_file(&e->kobj, &attr->attr))
857                                         break;
858                                 attr++;
859                         }
860                 }
861                 kobject_uevent(&e->kobj, KOBJ_ADD);
862                 e->registered = 1;
863                 if (!e->uses_mq && e->type->ops.sq.elevator_registered_fn)
864                         e->type->ops.sq.elevator_registered_fn(q);
865         }
866         return error;
867 }
868 EXPORT_SYMBOL(elv_register_queue);
869
870 void elv_unregister_queue(struct request_queue *q)
871 {
872         if (q) {
873                 struct elevator_queue *e = q->elevator;
874
875                 kobject_uevent(&e->kobj, KOBJ_REMOVE);
876                 kobject_del(&e->kobj);
877                 e->registered = 0;
878         }
879 }
880 EXPORT_SYMBOL(elv_unregister_queue);
881
882 int elv_register(struct elevator_type *e)
883 {
884         char *def = "";
885
886         /* create icq_cache if requested */
887         if (e->icq_size) {
888                 if (WARN_ON(e->icq_size < sizeof(struct io_cq)) ||
889                     WARN_ON(e->icq_align < __alignof__(struct io_cq)))
890                         return -EINVAL;
891
892                 snprintf(e->icq_cache_name, sizeof(e->icq_cache_name),
893                          "%s_io_cq", e->elevator_name);
894                 e->icq_cache = kmem_cache_create(e->icq_cache_name, e->icq_size,
895                                                  e->icq_align, 0, NULL);
896                 if (!e->icq_cache)
897                         return -ENOMEM;
898         }
899
900         /* register, don't allow duplicate names */
901         spin_lock(&elv_list_lock);
902         if (elevator_find(e->elevator_name)) {
903                 spin_unlock(&elv_list_lock);
904                 if (e->icq_cache)
905                         kmem_cache_destroy(e->icq_cache);
906                 return -EBUSY;
907         }
908         list_add_tail(&e->list, &elv_list);
909         spin_unlock(&elv_list_lock);
910
911         /* print pretty message */
912         if (!strcmp(e->elevator_name, chosen_elevator) ||
913                         (!*chosen_elevator &&
914                          !strcmp(e->elevator_name, CONFIG_DEFAULT_IOSCHED)))
915                                 def = " (default)";
916
917         printk(KERN_INFO "io scheduler %s registered%s\n", e->elevator_name,
918                                                                 def);
919         return 0;
920 }
921 EXPORT_SYMBOL_GPL(elv_register);
922
923 void elv_unregister(struct elevator_type *e)
924 {
925         /* unregister */
926         spin_lock(&elv_list_lock);
927         list_del_init(&e->list);
928         spin_unlock(&elv_list_lock);
929
930         /*
931          * Destroy icq_cache if it exists.  icq's are RCU managed.  Make
932          * sure all RCU operations are complete before proceeding.
933          */
934         if (e->icq_cache) {
935                 rcu_barrier();
936                 kmem_cache_destroy(e->icq_cache);
937                 e->icq_cache = NULL;
938         }
939 }
940 EXPORT_SYMBOL_GPL(elv_unregister);
941
942 /*
943  * switch to new_e io scheduler. be careful not to introduce deadlocks -
944  * we don't free the old io scheduler, before we have allocated what we
945  * need for the new one. this way we have a chance of going back to the old
946  * one, if the new one fails init for some reason.
947  */
948 static int elevator_switch(struct request_queue *q, struct elevator_type *new_e)
949 {
950         struct elevator_queue *old = q->elevator;
951         bool old_registered = false;
952         int err;
953
954         if (q->mq_ops) {
955                 blk_mq_freeze_queue(q);
956                 blk_mq_quiesce_queue(q);
957         }
958
959         /*
960          * Turn on BYPASS and drain all requests w/ elevator private data.
961          * Block layer doesn't call into a quiesced elevator - all requests
962          * are directly put on the dispatch list without elevator data
963          * using INSERT_BACK.  All requests have SOFTBARRIER set and no
964          * merge happens either.
965          */
966         if (old) {
967                 old_registered = old->registered;
968
969                 if (old->uses_mq)
970                         blk_mq_sched_teardown(q);
971
972                 if (!q->mq_ops)
973                         blk_queue_bypass_start(q);
974
975                 /* unregister and clear all auxiliary data of the old elevator */
976                 if (old_registered)
977                         elv_unregister_queue(q);
978
979                 spin_lock_irq(q->queue_lock);
980                 ioc_clear_queue(q);
981                 spin_unlock_irq(q->queue_lock);
982         }
983
984         /* allocate, init and register new elevator */
985         if (new_e) {
986                 if (new_e->uses_mq) {
987                         err = blk_mq_sched_setup(q);
988                         if (!err)
989                                 err = new_e->ops.mq.init_sched(q, new_e);
990                 } else
991                         err = new_e->ops.sq.elevator_init_fn(q, new_e);
992                 if (err)
993                         goto fail_init;
994
995                 err = elv_register_queue(q);
996                 if (err)
997                         goto fail_register;
998         } else
999                 q->elevator = NULL;
1000
1001         /* done, kill the old one and finish */
1002         if (old) {
1003                 elevator_exit(old);
1004                 if (!q->mq_ops)
1005                         blk_queue_bypass_end(q);
1006         }
1007
1008         if (q->mq_ops) {
1009                 blk_mq_unfreeze_queue(q);
1010                 blk_mq_start_stopped_hw_queues(q, true);
1011         }
1012
1013         if (new_e)
1014                 blk_add_trace_msg(q, "elv switch: %s", new_e->elevator_name);
1015         else
1016                 blk_add_trace_msg(q, "elv switch: none");
1017
1018         return 0;
1019
1020 fail_register:
1021         if (q->mq_ops)
1022                 blk_mq_sched_teardown(q);
1023         elevator_exit(q->elevator);
1024 fail_init:
1025         /* switch failed, restore and re-register old elevator */
1026         if (old) {
1027                 q->elevator = old;
1028                 elv_register_queue(q);
1029                 if (!q->mq_ops)
1030                         blk_queue_bypass_end(q);
1031         }
1032         if (q->mq_ops) {
1033                 blk_mq_unfreeze_queue(q);
1034                 blk_mq_start_stopped_hw_queues(q, true);
1035         }
1036
1037         return err;
1038 }
1039
1040 /*
1041  * Switch this queue to the given IO scheduler.
1042  */
1043 static int __elevator_change(struct request_queue *q, const char *name)
1044 {
1045         char elevator_name[ELV_NAME_MAX];
1046         struct elevator_type *e;
1047
1048         /*
1049          * Special case for mq, turn off scheduling
1050          */
1051         if (q->mq_ops && !strncmp(name, "none", 4))
1052                 return elevator_switch(q, NULL);
1053
1054         strlcpy(elevator_name, name, sizeof(elevator_name));
1055         e = elevator_get(strstrip(elevator_name), true);
1056         if (!e) {
1057                 printk(KERN_ERR "elevator: type %s not found\n", elevator_name);
1058                 return -EINVAL;
1059         }
1060
1061         if (q->elevator &&
1062             !strcmp(elevator_name, q->elevator->type->elevator_name)) {
1063                 elevator_put(e);
1064                 return 0;
1065         }
1066
1067         if (!e->uses_mq && q->mq_ops) {
1068                 elevator_put(e);
1069                 return -EINVAL;
1070         }
1071         if (e->uses_mq && !q->mq_ops) {
1072                 elevator_put(e);
1073                 return -EINVAL;
1074         }
1075
1076         return elevator_switch(q, e);
1077 }
1078
1079 int elevator_change(struct request_queue *q, const char *name)
1080 {
1081         int ret;
1082
1083         /* Protect q->elevator from elevator_init() */
1084         mutex_lock(&q->sysfs_lock);
1085         ret = __elevator_change(q, name);
1086         mutex_unlock(&q->sysfs_lock);
1087
1088         return ret;
1089 }
1090 EXPORT_SYMBOL(elevator_change);
1091
1092 ssize_t elv_iosched_store(struct request_queue *q, const char *name,
1093                           size_t count)
1094 {
1095         int ret;
1096
1097         if (!(q->mq_ops || q->request_fn))
1098                 return count;
1099
1100         ret = __elevator_change(q, name);
1101         if (!ret)
1102                 return count;
1103
1104         printk(KERN_ERR "elevator: switch to %s failed\n", name);
1105         return ret;
1106 }
1107
1108 ssize_t elv_iosched_show(struct request_queue *q, char *name)
1109 {
1110         struct elevator_queue *e = q->elevator;
1111         struct elevator_type *elv = NULL;
1112         struct elevator_type *__e;
1113         int len = 0;
1114
1115         if (!blk_queue_stackable(q))
1116                 return sprintf(name, "none\n");
1117
1118         if (!q->elevator)
1119                 len += sprintf(name+len, "[none] ");
1120         else
1121                 elv = e->type;
1122
1123         spin_lock(&elv_list_lock);
1124         list_for_each_entry(__e, &elv_list, list) {
1125                 if (elv && !strcmp(elv->elevator_name, __e->elevator_name)) {
1126                         len += sprintf(name+len, "[%s] ", elv->elevator_name);
1127                         continue;
1128                 }
1129                 if (__e->uses_mq && q->mq_ops)
1130                         len += sprintf(name+len, "%s ", __e->elevator_name);
1131                 else if (!__e->uses_mq && !q->mq_ops)
1132                         len += sprintf(name+len, "%s ", __e->elevator_name);
1133         }
1134         spin_unlock(&elv_list_lock);
1135
1136         if (q->mq_ops && q->elevator)
1137                 len += sprintf(name+len, "none");
1138
1139         len += sprintf(len+name, "\n");
1140         return len;
1141 }
1142
1143 struct request *elv_rb_former_request(struct request_queue *q,
1144                                       struct request *rq)
1145 {
1146         struct rb_node *rbprev = rb_prev(&rq->rb_node);
1147
1148         if (rbprev)
1149                 return rb_entry_rq(rbprev);
1150
1151         return NULL;
1152 }
1153 EXPORT_SYMBOL(elv_rb_former_request);
1154
1155 struct request *elv_rb_latter_request(struct request_queue *q,
1156                                       struct request *rq)
1157 {
1158         struct rb_node *rbnext = rb_next(&rq->rb_node);
1159
1160         if (rbnext)
1161                 return rb_entry_rq(rbnext);
1162
1163         return NULL;
1164 }
1165 EXPORT_SYMBOL(elv_rb_latter_request);