2 * Block device elevator/IO-scheduler.
4 * Copyright (C) 2000 Andrea Arcangeli <andrea@suse.de> SuSE
6 * 30042000 Jens Axboe <axboe@kernel.dk> :
8 * Split the elevator a bit so that it is possible to choose a different
9 * one or even write a new "plug in". There are three pieces:
10 * - elevator_fn, inserts a new request in the queue list
11 * - elevator_merge_fn, decides whether a new buffer can be merged with
13 * - elevator_dequeue_fn, called when a request is taken off the active list
15 * 20082000 Dave Jones <davej@suse.de> :
16 * Removed tests for max-bomb-segments, which was breaking elvtune
17 * when run without -bN
20 * - Rework again to work with bio instead of buffer_heads
21 * - loose bi_dev comparisons, partition handling is right now
22 * - completely modularize elevator setup and teardown
25 #include <linux/kernel.h>
27 #include <linux/blkdev.h>
28 #include <linux/elevator.h>
29 #include <linux/bio.h>
30 #include <linux/module.h>
31 #include <linux/slab.h>
32 #include <linux/init.h>
33 #include <linux/compiler.h>
34 #include <linux/blktrace_api.h>
35 #include <linux/hash.h>
36 #include <linux/uaccess.h>
37 #include <linux/pm_runtime.h>
38 #include <linux/blk-cgroup.h>
40 #include <trace/events/block.h>
43 #include "blk-mq-sched.h"
45 static DEFINE_SPINLOCK(elv_list_lock);
46 static LIST_HEAD(elv_list);
51 #define rq_hash_key(rq) (blk_rq_pos(rq) + blk_rq_sectors(rq))
54 * Query io scheduler to see if the current process issuing bio may be
57 static int elv_iosched_allow_bio_merge(struct request *rq, struct bio *bio)
59 struct request_queue *q = rq->q;
60 struct elevator_queue *e = q->elevator;
62 if (e->uses_mq && e->type->ops.mq.allow_merge)
63 return e->type->ops.mq.allow_merge(q, rq, bio);
64 else if (!e->uses_mq && e->type->ops.sq.elevator_allow_bio_merge_fn)
65 return e->type->ops.sq.elevator_allow_bio_merge_fn(q, rq, bio);
71 * can we safely merge with this request?
73 bool elv_bio_merge_ok(struct request *rq, struct bio *bio)
75 if (!blk_rq_merge_ok(rq, bio))
78 if (!elv_iosched_allow_bio_merge(rq, bio))
83 EXPORT_SYMBOL(elv_bio_merge_ok);
85 static struct elevator_type *elevator_find(const char *name)
87 struct elevator_type *e;
89 list_for_each_entry(e, &elv_list, list) {
90 if (!strcmp(e->elevator_name, name))
97 static void elevator_put(struct elevator_type *e)
99 module_put(e->elevator_owner);
102 static struct elevator_type *elevator_get(const char *name, bool try_loading)
104 struct elevator_type *e;
106 spin_lock(&elv_list_lock);
108 e = elevator_find(name);
109 if (!e && try_loading) {
110 spin_unlock(&elv_list_lock);
111 request_module("%s-iosched", name);
112 spin_lock(&elv_list_lock);
113 e = elevator_find(name);
116 if (e && !try_module_get(e->elevator_owner))
119 spin_unlock(&elv_list_lock);
124 static char chosen_elevator[ELV_NAME_MAX];
126 static int __init elevator_setup(char *str)
129 * Be backwards-compatible with previous kernels, so users
130 * won't get the wrong elevator.
132 strncpy(chosen_elevator, str, sizeof(chosen_elevator) - 1);
136 __setup("elevator=", elevator_setup);
138 /* called during boot to load the elevator chosen by the elevator param */
139 void __init load_default_elevator_module(void)
141 struct elevator_type *e;
143 if (!chosen_elevator[0])
146 spin_lock(&elv_list_lock);
147 e = elevator_find(chosen_elevator);
148 spin_unlock(&elv_list_lock);
151 request_module("%s-iosched", chosen_elevator);
154 static struct kobj_type elv_ktype;
156 struct elevator_queue *elevator_alloc(struct request_queue *q,
157 struct elevator_type *e)
159 struct elevator_queue *eq;
161 eq = kzalloc_node(sizeof(*eq), GFP_KERNEL, q->node);
166 kobject_init(&eq->kobj, &elv_ktype);
167 mutex_init(&eq->sysfs_lock);
169 eq->uses_mq = e->uses_mq;
173 EXPORT_SYMBOL(elevator_alloc);
175 static void elevator_release(struct kobject *kobj)
177 struct elevator_queue *e;
179 e = container_of(kobj, struct elevator_queue, kobj);
180 elevator_put(e->type);
184 int elevator_init(struct request_queue *q, char *name)
186 struct elevator_type *e = NULL;
190 * q->sysfs_lock must be held to provide mutual exclusion between
191 * elevator_switch() and here.
193 lockdep_assert_held(&q->sysfs_lock);
195 if (unlikely(q->elevator))
198 INIT_LIST_HEAD(&q->queue_head);
199 q->last_merge = NULL;
201 q->boundary_rq = NULL;
204 e = elevator_get(name, true);
210 * Use the default elevator specified by config boot param or
211 * config option. Don't try to load modules as we could be running
212 * off async and request_module() isn't allowed from async.
214 if (!e && *chosen_elevator) {
215 e = elevator_get(chosen_elevator, false);
217 printk(KERN_ERR "I/O scheduler %s not found\n",
222 e = elevator_get(CONFIG_DEFAULT_IOSCHED, false);
225 "Default I/O scheduler not found. " \
226 "Using noop/none.\n");
231 e = elevator_get("noop", false);
236 err = blk_mq_sched_setup(q);
238 err = e->ops.mq.init_sched(q, e);
240 err = e->ops.sq.elevator_init_fn(q, e);
243 blk_mq_sched_teardown(q);
248 EXPORT_SYMBOL(elevator_init);
250 void elevator_exit(struct elevator_queue *e)
252 mutex_lock(&e->sysfs_lock);
253 if (e->uses_mq && e->type->ops.mq.exit_sched)
254 e->type->ops.mq.exit_sched(e);
255 else if (!e->uses_mq && e->type->ops.sq.elevator_exit_fn)
256 e->type->ops.sq.elevator_exit_fn(e);
257 mutex_unlock(&e->sysfs_lock);
259 kobject_put(&e->kobj);
261 EXPORT_SYMBOL(elevator_exit);
263 static inline void __elv_rqhash_del(struct request *rq)
266 rq->rq_flags &= ~RQF_HASHED;
269 void elv_rqhash_del(struct request_queue *q, struct request *rq)
272 __elv_rqhash_del(rq);
274 EXPORT_SYMBOL_GPL(elv_rqhash_del);
276 void elv_rqhash_add(struct request_queue *q, struct request *rq)
278 struct elevator_queue *e = q->elevator;
280 BUG_ON(ELV_ON_HASH(rq));
281 hash_add(e->hash, &rq->hash, rq_hash_key(rq));
282 rq->rq_flags |= RQF_HASHED;
284 EXPORT_SYMBOL_GPL(elv_rqhash_add);
286 void elv_rqhash_reposition(struct request_queue *q, struct request *rq)
288 __elv_rqhash_del(rq);
289 elv_rqhash_add(q, rq);
292 struct request *elv_rqhash_find(struct request_queue *q, sector_t offset)
294 struct elevator_queue *e = q->elevator;
295 struct hlist_node *next;
298 hash_for_each_possible_safe(e->hash, rq, next, hash, offset) {
299 BUG_ON(!ELV_ON_HASH(rq));
301 if (unlikely(!rq_mergeable(rq))) {
302 __elv_rqhash_del(rq);
306 if (rq_hash_key(rq) == offset)
314 * RB-tree support functions for inserting/lookup/removal of requests
315 * in a sorted RB tree.
317 void elv_rb_add(struct rb_root *root, struct request *rq)
319 struct rb_node **p = &root->rb_node;
320 struct rb_node *parent = NULL;
321 struct request *__rq;
325 __rq = rb_entry(parent, struct request, rb_node);
327 if (blk_rq_pos(rq) < blk_rq_pos(__rq))
329 else if (blk_rq_pos(rq) >= blk_rq_pos(__rq))
333 rb_link_node(&rq->rb_node, parent, p);
334 rb_insert_color(&rq->rb_node, root);
336 EXPORT_SYMBOL(elv_rb_add);
338 void elv_rb_del(struct rb_root *root, struct request *rq)
340 BUG_ON(RB_EMPTY_NODE(&rq->rb_node));
341 rb_erase(&rq->rb_node, root);
342 RB_CLEAR_NODE(&rq->rb_node);
344 EXPORT_SYMBOL(elv_rb_del);
346 struct request *elv_rb_find(struct rb_root *root, sector_t sector)
348 struct rb_node *n = root->rb_node;
352 rq = rb_entry(n, struct request, rb_node);
354 if (sector < blk_rq_pos(rq))
356 else if (sector > blk_rq_pos(rq))
364 EXPORT_SYMBOL(elv_rb_find);
367 * Insert rq into dispatch queue of q. Queue lock must be held on
368 * entry. rq is sort instead into the dispatch queue. To be used by
369 * specific elevators.
371 void elv_dispatch_sort(struct request_queue *q, struct request *rq)
374 struct list_head *entry;
376 if (q->last_merge == rq)
377 q->last_merge = NULL;
379 elv_rqhash_del(q, rq);
383 boundary = q->end_sector;
384 list_for_each_prev(entry, &q->queue_head) {
385 struct request *pos = list_entry_rq(entry);
387 if (req_op(rq) != req_op(pos))
389 if (rq_data_dir(rq) != rq_data_dir(pos))
391 if (pos->rq_flags & (RQF_STARTED | RQF_SOFTBARRIER))
393 if (blk_rq_pos(rq) >= boundary) {
394 if (blk_rq_pos(pos) < boundary)
397 if (blk_rq_pos(pos) >= boundary)
400 if (blk_rq_pos(rq) >= blk_rq_pos(pos))
404 list_add(&rq->queuelist, entry);
406 EXPORT_SYMBOL(elv_dispatch_sort);
409 * Insert rq into dispatch queue of q. Queue lock must be held on
410 * entry. rq is added to the back of the dispatch queue. To be used by
411 * specific elevators.
413 void elv_dispatch_add_tail(struct request_queue *q, struct request *rq)
415 if (q->last_merge == rq)
416 q->last_merge = NULL;
418 elv_rqhash_del(q, rq);
422 q->end_sector = rq_end_sector(rq);
424 list_add_tail(&rq->queuelist, &q->queue_head);
426 EXPORT_SYMBOL(elv_dispatch_add_tail);
428 int elv_merge(struct request_queue *q, struct request **req, struct bio *bio)
430 struct elevator_queue *e = q->elevator;
431 struct request *__rq;
436 * nomerges: No merges at all attempted
437 * noxmerges: Only simple one-hit cache try
438 * merges: All merge tries attempted
440 if (blk_queue_nomerges(q) || !bio_mergeable(bio))
441 return ELEVATOR_NO_MERGE;
444 * First try one-hit cache.
446 if (q->last_merge && elv_bio_merge_ok(q->last_merge, bio)) {
447 ret = blk_try_merge(q->last_merge, bio);
448 if (ret != ELEVATOR_NO_MERGE) {
449 *req = q->last_merge;
454 if (blk_queue_noxmerges(q))
455 return ELEVATOR_NO_MERGE;
458 * See if our hash lookup can find a potential backmerge.
460 __rq = elv_rqhash_find(q, bio->bi_iter.bi_sector);
461 if (__rq && elv_bio_merge_ok(__rq, bio)) {
463 return ELEVATOR_BACK_MERGE;
466 if (e->uses_mq && e->type->ops.mq.request_merge)
467 return e->type->ops.mq.request_merge(q, req, bio);
468 else if (!e->uses_mq && e->type->ops.sq.elevator_merge_fn)
469 return e->type->ops.sq.elevator_merge_fn(q, req, bio);
471 return ELEVATOR_NO_MERGE;
475 * Attempt to do an insertion back merge. Only check for the case where
476 * we can append 'rq' to an existing request, so we can throw 'rq' away
479 * Returns true if we merged, false otherwise
481 bool elv_attempt_insert_merge(struct request_queue *q, struct request *rq)
483 struct request *__rq;
486 if (blk_queue_nomerges(q))
490 * First try one-hit cache.
492 if (q->last_merge && blk_attempt_req_merge(q, q->last_merge, rq))
495 if (blk_queue_noxmerges(q))
500 * See if our hash lookup can find a potential backmerge.
503 __rq = elv_rqhash_find(q, blk_rq_pos(rq));
504 if (!__rq || !blk_attempt_req_merge(q, __rq, rq))
507 /* The merged request could be merged with others, try again */
515 void elv_merged_request(struct request_queue *q, struct request *rq, int type)
517 struct elevator_queue *e = q->elevator;
519 if (e->uses_mq && e->type->ops.mq.request_merged)
520 e->type->ops.mq.request_merged(q, rq, type);
521 else if (!e->uses_mq && e->type->ops.sq.elevator_merged_fn)
522 e->type->ops.sq.elevator_merged_fn(q, rq, type);
524 if (type == ELEVATOR_BACK_MERGE)
525 elv_rqhash_reposition(q, rq);
530 void elv_merge_requests(struct request_queue *q, struct request *rq,
531 struct request *next)
533 struct elevator_queue *e = q->elevator;
534 bool next_sorted = false;
536 if (e->uses_mq && e->type->ops.mq.requests_merged)
537 e->type->ops.mq.requests_merged(q, rq, next);
538 else if (e->type->ops.sq.elevator_merge_req_fn) {
539 next_sorted = next->rq_flags & RQF_SORTED;
541 e->type->ops.sq.elevator_merge_req_fn(q, rq, next);
544 elv_rqhash_reposition(q, rq);
547 elv_rqhash_del(q, next);
554 void elv_bio_merged(struct request_queue *q, struct request *rq,
557 struct elevator_queue *e = q->elevator;
559 if (WARN_ON_ONCE(e->uses_mq))
562 if (e->type->ops.sq.elevator_bio_merged_fn)
563 e->type->ops.sq.elevator_bio_merged_fn(q, rq, bio);
567 static void blk_pm_requeue_request(struct request *rq)
569 if (rq->q->dev && !(rq->rq_flags & RQF_PM))
573 static void blk_pm_add_request(struct request_queue *q, struct request *rq)
575 if (q->dev && !(rq->rq_flags & RQF_PM) && q->nr_pending++ == 0 &&
576 (q->rpm_status == RPM_SUSPENDED || q->rpm_status == RPM_SUSPENDING))
577 pm_request_resume(q->dev);
580 static inline void blk_pm_requeue_request(struct request *rq) {}
581 static inline void blk_pm_add_request(struct request_queue *q,
587 void elv_requeue_request(struct request_queue *q, struct request *rq)
590 * it already went through dequeue, we need to decrement the
591 * in_flight count again
593 if (blk_account_rq(rq)) {
594 q->in_flight[rq_is_sync(rq)]--;
595 if (rq->rq_flags & RQF_SORTED)
596 elv_deactivate_rq(q, rq);
599 rq->rq_flags &= ~RQF_STARTED;
601 blk_pm_requeue_request(rq);
603 __elv_add_request(q, rq, ELEVATOR_INSERT_REQUEUE);
606 void elv_drain_elevator(struct request_queue *q)
608 struct elevator_queue *e = q->elevator;
611 if (WARN_ON_ONCE(e->uses_mq))
614 lockdep_assert_held(q->queue_lock);
616 while (e->type->ops.sq.elevator_dispatch_fn(q, 1))
618 if (q->nr_sorted && printed++ < 10) {
619 printk(KERN_ERR "%s: forced dispatching is broken "
620 "(nr_sorted=%u), please report this\n",
621 q->elevator->type->elevator_name, q->nr_sorted);
625 void __elv_add_request(struct request_queue *q, struct request *rq, int where)
627 trace_block_rq_insert(q, rq);
629 blk_pm_add_request(q, rq);
633 if (rq->rq_flags & RQF_SOFTBARRIER) {
634 /* barriers are scheduling boundary, update end_sector */
635 if (rq->cmd_type == REQ_TYPE_FS) {
636 q->end_sector = rq_end_sector(rq);
639 } else if (!(rq->rq_flags & RQF_ELVPRIV) &&
640 (where == ELEVATOR_INSERT_SORT ||
641 where == ELEVATOR_INSERT_SORT_MERGE))
642 where = ELEVATOR_INSERT_BACK;
645 case ELEVATOR_INSERT_REQUEUE:
646 case ELEVATOR_INSERT_FRONT:
647 rq->rq_flags |= RQF_SOFTBARRIER;
648 list_add(&rq->queuelist, &q->queue_head);
651 case ELEVATOR_INSERT_BACK:
652 rq->rq_flags |= RQF_SOFTBARRIER;
653 elv_drain_elevator(q);
654 list_add_tail(&rq->queuelist, &q->queue_head);
656 * We kick the queue here for the following reasons.
657 * - The elevator might have returned NULL previously
658 * to delay requests and returned them now. As the
659 * queue wasn't empty before this request, ll_rw_blk
660 * won't run the queue on return, resulting in hang.
661 * - Usually, back inserted requests won't be merged
662 * with anything. There's no point in delaying queue
668 case ELEVATOR_INSERT_SORT_MERGE:
670 * If we succeed in merging this request with one in the
671 * queue already, we are done - rq has now been freed,
672 * so no need to do anything further.
674 if (elv_attempt_insert_merge(q, rq))
676 case ELEVATOR_INSERT_SORT:
677 BUG_ON(rq->cmd_type != REQ_TYPE_FS);
678 rq->rq_flags |= RQF_SORTED;
680 if (rq_mergeable(rq)) {
681 elv_rqhash_add(q, rq);
687 * Some ioscheds (cfq) run q->request_fn directly, so
688 * rq cannot be accessed after calling
689 * elevator_add_req_fn.
691 q->elevator->type->ops.sq.elevator_add_req_fn(q, rq);
694 case ELEVATOR_INSERT_FLUSH:
695 rq->rq_flags |= RQF_SOFTBARRIER;
696 blk_insert_flush(rq);
699 printk(KERN_ERR "%s: bad insertion point %d\n",
704 EXPORT_SYMBOL(__elv_add_request);
706 void elv_add_request(struct request_queue *q, struct request *rq, int where)
710 spin_lock_irqsave(q->queue_lock, flags);
711 __elv_add_request(q, rq, where);
712 spin_unlock_irqrestore(q->queue_lock, flags);
714 EXPORT_SYMBOL(elv_add_request);
716 struct request *elv_latter_request(struct request_queue *q, struct request *rq)
718 struct elevator_queue *e = q->elevator;
720 if (e->uses_mq && e->type->ops.mq.next_request)
721 return e->type->ops.mq.next_request(q, rq);
722 else if (!e->uses_mq && e->type->ops.sq.elevator_latter_req_fn)
723 return e->type->ops.sq.elevator_latter_req_fn(q, rq);
728 struct request *elv_former_request(struct request_queue *q, struct request *rq)
730 struct elevator_queue *e = q->elevator;
732 if (e->uses_mq && e->type->ops.mq.former_request)
733 return e->type->ops.mq.former_request(q, rq);
734 if (!e->uses_mq && e->type->ops.sq.elevator_former_req_fn)
735 return e->type->ops.sq.elevator_former_req_fn(q, rq);
739 int elv_set_request(struct request_queue *q, struct request *rq,
740 struct bio *bio, gfp_t gfp_mask)
742 struct elevator_queue *e = q->elevator;
744 if (WARN_ON_ONCE(e->uses_mq))
747 if (e->type->ops.sq.elevator_set_req_fn)
748 return e->type->ops.sq.elevator_set_req_fn(q, rq, bio, gfp_mask);
752 void elv_put_request(struct request_queue *q, struct request *rq)
754 struct elevator_queue *e = q->elevator;
756 if (WARN_ON_ONCE(e->uses_mq))
759 if (e->type->ops.sq.elevator_put_req_fn)
760 e->type->ops.sq.elevator_put_req_fn(rq);
763 int elv_may_queue(struct request_queue *q, unsigned int op)
765 struct elevator_queue *e = q->elevator;
767 if (WARN_ON_ONCE(e->uses_mq))
770 if (e->type->ops.sq.elevator_may_queue_fn)
771 return e->type->ops.sq.elevator_may_queue_fn(q, op);
773 return ELV_MQUEUE_MAY;
776 void elv_completed_request(struct request_queue *q, struct request *rq)
778 struct elevator_queue *e = q->elevator;
780 if (WARN_ON_ONCE(e->uses_mq))
784 * request is released from the driver, io must be done
786 if (blk_account_rq(rq)) {
787 q->in_flight[rq_is_sync(rq)]--;
788 if ((rq->rq_flags & RQF_SORTED) &&
789 e->type->ops.sq.elevator_completed_req_fn)
790 e->type->ops.sq.elevator_completed_req_fn(q, rq);
794 #define to_elv(atr) container_of((atr), struct elv_fs_entry, attr)
797 elv_attr_show(struct kobject *kobj, struct attribute *attr, char *page)
799 struct elv_fs_entry *entry = to_elv(attr);
800 struct elevator_queue *e;
806 e = container_of(kobj, struct elevator_queue, kobj);
807 mutex_lock(&e->sysfs_lock);
808 error = e->type ? entry->show(e, page) : -ENOENT;
809 mutex_unlock(&e->sysfs_lock);
814 elv_attr_store(struct kobject *kobj, struct attribute *attr,
815 const char *page, size_t length)
817 struct elv_fs_entry *entry = to_elv(attr);
818 struct elevator_queue *e;
824 e = container_of(kobj, struct elevator_queue, kobj);
825 mutex_lock(&e->sysfs_lock);
826 error = e->type ? entry->store(e, page, length) : -ENOENT;
827 mutex_unlock(&e->sysfs_lock);
831 static const struct sysfs_ops elv_sysfs_ops = {
832 .show = elv_attr_show,
833 .store = elv_attr_store,
836 static struct kobj_type elv_ktype = {
837 .sysfs_ops = &elv_sysfs_ops,
838 .release = elevator_release,
841 int elv_register_queue(struct request_queue *q)
843 struct elevator_queue *e = q->elevator;
846 error = kobject_add(&e->kobj, &q->kobj, "%s", "iosched");
848 struct elv_fs_entry *attr = e->type->elevator_attrs;
850 while (attr->attr.name) {
851 if (sysfs_create_file(&e->kobj, &attr->attr))
856 kobject_uevent(&e->kobj, KOBJ_ADD);
858 if (!e->uses_mq && e->type->ops.sq.elevator_registered_fn)
859 e->type->ops.sq.elevator_registered_fn(q);
863 EXPORT_SYMBOL(elv_register_queue);
865 void elv_unregister_queue(struct request_queue *q)
868 struct elevator_queue *e = q->elevator;
870 kobject_uevent(&e->kobj, KOBJ_REMOVE);
871 kobject_del(&e->kobj);
875 EXPORT_SYMBOL(elv_unregister_queue);
877 int elv_register(struct elevator_type *e)
881 /* create icq_cache if requested */
883 if (WARN_ON(e->icq_size < sizeof(struct io_cq)) ||
884 WARN_ON(e->icq_align < __alignof__(struct io_cq)))
887 snprintf(e->icq_cache_name, sizeof(e->icq_cache_name),
888 "%s_io_cq", e->elevator_name);
889 e->icq_cache = kmem_cache_create(e->icq_cache_name, e->icq_size,
890 e->icq_align, 0, NULL);
895 /* register, don't allow duplicate names */
896 spin_lock(&elv_list_lock);
897 if (elevator_find(e->elevator_name)) {
898 spin_unlock(&elv_list_lock);
900 kmem_cache_destroy(e->icq_cache);
903 list_add_tail(&e->list, &elv_list);
904 spin_unlock(&elv_list_lock);
906 /* print pretty message */
907 if (!strcmp(e->elevator_name, chosen_elevator) ||
908 (!*chosen_elevator &&
909 !strcmp(e->elevator_name, CONFIG_DEFAULT_IOSCHED)))
912 printk(KERN_INFO "io scheduler %s registered%s\n", e->elevator_name,
916 EXPORT_SYMBOL_GPL(elv_register);
918 void elv_unregister(struct elevator_type *e)
921 spin_lock(&elv_list_lock);
922 list_del_init(&e->list);
923 spin_unlock(&elv_list_lock);
926 * Destroy icq_cache if it exists. icq's are RCU managed. Make
927 * sure all RCU operations are complete before proceeding.
931 kmem_cache_destroy(e->icq_cache);
935 EXPORT_SYMBOL_GPL(elv_unregister);
938 * switch to new_e io scheduler. be careful not to introduce deadlocks -
939 * we don't free the old io scheduler, before we have allocated what we
940 * need for the new one. this way we have a chance of going back to the old
941 * one, if the new one fails init for some reason.
943 static int elevator_switch(struct request_queue *q, struct elevator_type *new_e)
945 struct elevator_queue *old = q->elevator;
946 bool old_registered = false;
950 blk_mq_freeze_queue(q);
951 blk_mq_quiesce_queue(q);
955 * Turn on BYPASS and drain all requests w/ elevator private data.
956 * Block layer doesn't call into a quiesced elevator - all requests
957 * are directly put on the dispatch list without elevator data
958 * using INSERT_BACK. All requests have SOFTBARRIER set and no
959 * merge happens either.
962 old_registered = old->registered;
965 blk_mq_sched_teardown(q);
968 blk_queue_bypass_start(q);
970 /* unregister and clear all auxiliary data of the old elevator */
972 elv_unregister_queue(q);
974 spin_lock_irq(q->queue_lock);
976 spin_unlock_irq(q->queue_lock);
979 /* allocate, init and register new elevator */
981 if (new_e->uses_mq) {
982 err = blk_mq_sched_setup(q);
984 err = new_e->ops.mq.init_sched(q, new_e);
986 err = new_e->ops.sq.elevator_init_fn(q, new_e);
990 err = elv_register_queue(q);
996 /* done, kill the old one and finish */
1000 blk_queue_bypass_end(q);
1004 blk_mq_unfreeze_queue(q);
1005 blk_mq_start_stopped_hw_queues(q, true);
1009 blk_add_trace_msg(q, "elv switch: %s", new_e->elevator_name);
1011 blk_add_trace_msg(q, "elv switch: none");
1017 blk_mq_sched_teardown(q);
1018 elevator_exit(q->elevator);
1020 /* switch failed, restore and re-register old elevator */
1023 elv_register_queue(q);
1025 blk_queue_bypass_end(q);
1028 blk_mq_unfreeze_queue(q);
1029 blk_mq_start_stopped_hw_queues(q, true);
1036 * Switch this queue to the given IO scheduler.
1038 static int __elevator_change(struct request_queue *q, const char *name)
1040 char elevator_name[ELV_NAME_MAX];
1041 struct elevator_type *e;
1044 * Special case for mq, turn off scheduling
1046 if (q->mq_ops && !strncmp(name, "none", 4))
1047 return elevator_switch(q, NULL);
1049 strlcpy(elevator_name, name, sizeof(elevator_name));
1050 e = elevator_get(strstrip(elevator_name), true);
1052 printk(KERN_ERR "elevator: type %s not found\n", elevator_name);
1057 !strcmp(elevator_name, q->elevator->type->elevator_name)) {
1062 if (!e->uses_mq && q->mq_ops) {
1066 if (e->uses_mq && !q->mq_ops) {
1071 return elevator_switch(q, e);
1074 int elevator_change(struct request_queue *q, const char *name)
1078 /* Protect q->elevator from elevator_init() */
1079 mutex_lock(&q->sysfs_lock);
1080 ret = __elevator_change(q, name);
1081 mutex_unlock(&q->sysfs_lock);
1085 EXPORT_SYMBOL(elevator_change);
1087 ssize_t elv_iosched_store(struct request_queue *q, const char *name,
1092 if (!(q->mq_ops || q->request_fn))
1095 ret = __elevator_change(q, name);
1099 printk(KERN_ERR "elevator: switch to %s failed\n", name);
1103 ssize_t elv_iosched_show(struct request_queue *q, char *name)
1105 struct elevator_queue *e = q->elevator;
1106 struct elevator_type *elv = NULL;
1107 struct elevator_type *__e;
1110 if (!blk_queue_stackable(q))
1111 return sprintf(name, "none\n");
1114 len += sprintf(name+len, "[none] ");
1118 spin_lock(&elv_list_lock);
1119 list_for_each_entry(__e, &elv_list, list) {
1120 if (elv && !strcmp(elv->elevator_name, __e->elevator_name)) {
1121 len += sprintf(name+len, "[%s] ", elv->elevator_name);
1124 if (__e->uses_mq && q->mq_ops)
1125 len += sprintf(name+len, "%s ", __e->elevator_name);
1126 else if (!__e->uses_mq && !q->mq_ops)
1127 len += sprintf(name+len, "%s ", __e->elevator_name);
1129 spin_unlock(&elv_list_lock);
1131 if (q->mq_ops && q->elevator)
1132 len += sprintf(name+len, "none");
1134 len += sprintf(len+name, "\n");
1138 struct request *elv_rb_former_request(struct request_queue *q,
1141 struct rb_node *rbprev = rb_prev(&rq->rb_node);
1144 return rb_entry_rq(rbprev);
1148 EXPORT_SYMBOL(elv_rb_former_request);
1150 struct request *elv_rb_latter_request(struct request_queue *q,
1153 struct rb_node *rbnext = rb_next(&rq->rb_node);
1156 return rb_entry_rq(rbnext);
1160 EXPORT_SYMBOL(elv_rb_latter_request);