]> git.karo-electronics.de Git - karo-tx-linux.git/blob - block/cfq-iosched.c
blkcg: move refcnt to blkcg core
[karo-tx-linux.git] / block / cfq-iosched.c
1 /*
2  *  CFQ, or complete fairness queueing, disk scheduler.
3  *
4  *  Based on ideas from a previously unfinished io
5  *  scheduler (round robin per-process disk scheduling) and Andrea Arcangeli.
6  *
7  *  Copyright (C) 2003 Jens Axboe <axboe@kernel.dk>
8  */
9 #include <linux/module.h>
10 #include <linux/slab.h>
11 #include <linux/blkdev.h>
12 #include <linux/elevator.h>
13 #include <linux/jiffies.h>
14 #include <linux/rbtree.h>
15 #include <linux/ioprio.h>
16 #include <linux/blktrace_api.h>
17 #include "blk.h"
18 #include "cfq.h"
19
20 static struct blkio_policy_type blkio_policy_cfq;
21
22 /*
23  * tunables
24  */
25 /* max queue in one round of service */
26 static const int cfq_quantum = 8;
27 static const int cfq_fifo_expire[2] = { HZ / 4, HZ / 8 };
28 /* maximum backwards seek, in KiB */
29 static const int cfq_back_max = 16 * 1024;
30 /* penalty of a backwards seek */
31 static const int cfq_back_penalty = 2;
32 static const int cfq_slice_sync = HZ / 10;
33 static int cfq_slice_async = HZ / 25;
34 static const int cfq_slice_async_rq = 2;
35 static int cfq_slice_idle = HZ / 125;
36 static int cfq_group_idle = HZ / 125;
37 static const int cfq_target_latency = HZ * 3/10; /* 300 ms */
38 static const int cfq_hist_divisor = 4;
39
40 /*
41  * offset from end of service tree
42  */
43 #define CFQ_IDLE_DELAY          (HZ / 5)
44
45 /*
46  * below this threshold, we consider thinktime immediate
47  */
48 #define CFQ_MIN_TT              (2)
49
50 #define CFQ_SLICE_SCALE         (5)
51 #define CFQ_HW_QUEUE_MIN        (5)
52 #define CFQ_SERVICE_SHIFT       12
53
54 #define CFQQ_SEEK_THR           (sector_t)(8 * 100)
55 #define CFQQ_CLOSE_THR          (sector_t)(8 * 1024)
56 #define CFQQ_SECT_THR_NONROT    (sector_t)(2 * 32)
57 #define CFQQ_SEEKY(cfqq)        (hweight32(cfqq->seek_history) > 32/8)
58
59 #define RQ_CIC(rq)              icq_to_cic((rq)->elv.icq)
60 #define RQ_CFQQ(rq)             (struct cfq_queue *) ((rq)->elv.priv[0])
61 #define RQ_CFQG(rq)             (struct cfq_group *) ((rq)->elv.priv[1])
62
63 static struct kmem_cache *cfq_pool;
64
65 #define CFQ_PRIO_LISTS          IOPRIO_BE_NR
66 #define cfq_class_idle(cfqq)    ((cfqq)->ioprio_class == IOPRIO_CLASS_IDLE)
67 #define cfq_class_rt(cfqq)      ((cfqq)->ioprio_class == IOPRIO_CLASS_RT)
68
69 #define sample_valid(samples)   ((samples) > 80)
70 #define rb_entry_cfqg(node)     rb_entry((node), struct cfq_group, rb_node)
71
72 struct cfq_ttime {
73         unsigned long last_end_request;
74
75         unsigned long ttime_total;
76         unsigned long ttime_samples;
77         unsigned long ttime_mean;
78 };
79
80 /*
81  * Most of our rbtree usage is for sorting with min extraction, so
82  * if we cache the leftmost node we don't have to walk down the tree
83  * to find it. Idea borrowed from Ingo Molnars CFS scheduler. We should
84  * move this into the elevator for the rq sorting as well.
85  */
86 struct cfq_rb_root {
87         struct rb_root rb;
88         struct rb_node *left;
89         unsigned count;
90         unsigned total_weight;
91         u64 min_vdisktime;
92         struct cfq_ttime ttime;
93 };
94 #define CFQ_RB_ROOT     (struct cfq_rb_root) { .rb = RB_ROOT, \
95                         .ttime = {.last_end_request = jiffies,},}
96
97 /*
98  * Per process-grouping structure
99  */
100 struct cfq_queue {
101         /* reference count */
102         int ref;
103         /* various state flags, see below */
104         unsigned int flags;
105         /* parent cfq_data */
106         struct cfq_data *cfqd;
107         /* service_tree member */
108         struct rb_node rb_node;
109         /* service_tree key */
110         unsigned long rb_key;
111         /* prio tree member */
112         struct rb_node p_node;
113         /* prio tree root we belong to, if any */
114         struct rb_root *p_root;
115         /* sorted list of pending requests */
116         struct rb_root sort_list;
117         /* if fifo isn't expired, next request to serve */
118         struct request *next_rq;
119         /* requests queued in sort_list */
120         int queued[2];
121         /* currently allocated requests */
122         int allocated[2];
123         /* fifo list of requests in sort_list */
124         struct list_head fifo;
125
126         /* time when queue got scheduled in to dispatch first request. */
127         unsigned long dispatch_start;
128         unsigned int allocated_slice;
129         unsigned int slice_dispatch;
130         /* time when first request from queue completed and slice started. */
131         unsigned long slice_start;
132         unsigned long slice_end;
133         long slice_resid;
134
135         /* pending priority requests */
136         int prio_pending;
137         /* number of requests that are on the dispatch list or inside driver */
138         int dispatched;
139
140         /* io prio of this group */
141         unsigned short ioprio, org_ioprio;
142         unsigned short ioprio_class;
143
144         pid_t pid;
145
146         u32 seek_history;
147         sector_t last_request_pos;
148
149         struct cfq_rb_root *service_tree;
150         struct cfq_queue *new_cfqq;
151         struct cfq_group *cfqg;
152         /* Number of sectors dispatched from queue in single dispatch round */
153         unsigned long nr_sectors;
154 };
155
156 /*
157  * First index in the service_trees.
158  * IDLE is handled separately, so it has negative index
159  */
160 enum wl_prio_t {
161         BE_WORKLOAD = 0,
162         RT_WORKLOAD = 1,
163         IDLE_WORKLOAD = 2,
164         CFQ_PRIO_NR,
165 };
166
167 /*
168  * Second index in the service_trees.
169  */
170 enum wl_type_t {
171         ASYNC_WORKLOAD = 0,
172         SYNC_NOIDLE_WORKLOAD = 1,
173         SYNC_WORKLOAD = 2
174 };
175
176 /* This is per cgroup per device grouping structure */
177 struct cfq_group {
178         /* group service_tree member */
179         struct rb_node rb_node;
180
181         /* group service_tree key */
182         u64 vdisktime;
183         unsigned int weight;
184         unsigned int new_weight;
185         bool needs_update;
186
187         /* number of cfqq currently on this group */
188         int nr_cfqq;
189
190         /*
191          * Per group busy queues average. Useful for workload slice calc. We
192          * create the array for each prio class but at run time it is used
193          * only for RT and BE class and slot for IDLE class remains unused.
194          * This is primarily done to avoid confusion and a gcc warning.
195          */
196         unsigned int busy_queues_avg[CFQ_PRIO_NR];
197         /*
198          * rr lists of queues with requests. We maintain service trees for
199          * RT and BE classes. These trees are subdivided in subclasses
200          * of SYNC, SYNC_NOIDLE and ASYNC based on workload type. For IDLE
201          * class there is no subclassification and all the cfq queues go on
202          * a single tree service_tree_idle.
203          * Counts are embedded in the cfq_rb_root
204          */
205         struct cfq_rb_root service_trees[2][3];
206         struct cfq_rb_root service_tree_idle;
207
208         unsigned long saved_workload_slice;
209         enum wl_type_t saved_workload;
210         enum wl_prio_t saved_serving_prio;
211 #ifdef CONFIG_CFQ_GROUP_IOSCHED
212         struct hlist_node cfqd_node;
213 #endif
214         /* number of requests that are on the dispatch list or inside driver */
215         int dispatched;
216         struct cfq_ttime ttime;
217 };
218
219 struct cfq_io_cq {
220         struct io_cq            icq;            /* must be the first member */
221         struct cfq_queue        *cfqq[2];
222         struct cfq_ttime        ttime;
223 };
224
225 /*
226  * Per block device queue structure
227  */
228 struct cfq_data {
229         struct request_queue *queue;
230         /* Root service tree for cfq_groups */
231         struct cfq_rb_root grp_service_tree;
232         struct cfq_group *root_group;
233
234         /*
235          * The priority currently being served
236          */
237         enum wl_prio_t serving_prio;
238         enum wl_type_t serving_type;
239         unsigned long workload_expires;
240         struct cfq_group *serving_group;
241
242         /*
243          * Each priority tree is sorted by next_request position.  These
244          * trees are used when determining if two or more queues are
245          * interleaving requests (see cfq_close_cooperator).
246          */
247         struct rb_root prio_trees[CFQ_PRIO_LISTS];
248
249         unsigned int busy_queues;
250         unsigned int busy_sync_queues;
251
252         int rq_in_driver;
253         int rq_in_flight[2];
254
255         /*
256          * queue-depth detection
257          */
258         int rq_queued;
259         int hw_tag;
260         /*
261          * hw_tag can be
262          * -1 => indeterminate, (cfq will behave as if NCQ is present, to allow better detection)
263          *  1 => NCQ is present (hw_tag_est_depth is the estimated max depth)
264          *  0 => no NCQ
265          */
266         int hw_tag_est_depth;
267         unsigned int hw_tag_samples;
268
269         /*
270          * idle window management
271          */
272         struct timer_list idle_slice_timer;
273         struct work_struct unplug_work;
274
275         struct cfq_queue *active_queue;
276         struct cfq_io_cq *active_cic;
277
278         /*
279          * async queue for each priority case
280          */
281         struct cfq_queue *async_cfqq[2][IOPRIO_BE_NR];
282         struct cfq_queue *async_idle_cfqq;
283
284         sector_t last_position;
285
286         /*
287          * tunables, see top of file
288          */
289         unsigned int cfq_quantum;
290         unsigned int cfq_fifo_expire[2];
291         unsigned int cfq_back_penalty;
292         unsigned int cfq_back_max;
293         unsigned int cfq_slice[2];
294         unsigned int cfq_slice_async_rq;
295         unsigned int cfq_slice_idle;
296         unsigned int cfq_group_idle;
297         unsigned int cfq_latency;
298
299         /*
300          * Fallback dummy cfqq for extreme OOM conditions
301          */
302         struct cfq_queue oom_cfqq;
303
304         unsigned long last_delayed_sync;
305
306         /* List of cfq groups being managed on this device*/
307         struct hlist_head cfqg_list;
308
309         /* Number of groups which are on blkcg->blkg_list */
310         unsigned int nr_blkcg_linked_grps;
311 };
312
313 static inline struct cfq_group *blkg_to_cfqg(struct blkio_group *blkg)
314 {
315         return blkg_to_pdata(blkg, &blkio_policy_cfq);
316 }
317
318 static inline struct blkio_group *cfqg_to_blkg(struct cfq_group *cfqg)
319 {
320         return pdata_to_blkg(cfqg, &blkio_policy_cfq);
321 }
322
323 static struct cfq_group *cfq_get_next_cfqg(struct cfq_data *cfqd);
324
325 static struct cfq_rb_root *service_tree_for(struct cfq_group *cfqg,
326                                             enum wl_prio_t prio,
327                                             enum wl_type_t type)
328 {
329         if (!cfqg)
330                 return NULL;
331
332         if (prio == IDLE_WORKLOAD)
333                 return &cfqg->service_tree_idle;
334
335         return &cfqg->service_trees[prio][type];
336 }
337
338 enum cfqq_state_flags {
339         CFQ_CFQQ_FLAG_on_rr = 0,        /* on round-robin busy list */
340         CFQ_CFQQ_FLAG_wait_request,     /* waiting for a request */
341         CFQ_CFQQ_FLAG_must_dispatch,    /* must be allowed a dispatch */
342         CFQ_CFQQ_FLAG_must_alloc_slice, /* per-slice must_alloc flag */
343         CFQ_CFQQ_FLAG_fifo_expire,      /* FIFO checked in this slice */
344         CFQ_CFQQ_FLAG_idle_window,      /* slice idling enabled */
345         CFQ_CFQQ_FLAG_prio_changed,     /* task priority has changed */
346         CFQ_CFQQ_FLAG_slice_new,        /* no requests dispatched in slice */
347         CFQ_CFQQ_FLAG_sync,             /* synchronous queue */
348         CFQ_CFQQ_FLAG_coop,             /* cfqq is shared */
349         CFQ_CFQQ_FLAG_split_coop,       /* shared cfqq will be splitted */
350         CFQ_CFQQ_FLAG_deep,             /* sync cfqq experienced large depth */
351         CFQ_CFQQ_FLAG_wait_busy,        /* Waiting for next request */
352 };
353
354 #define CFQ_CFQQ_FNS(name)                                              \
355 static inline void cfq_mark_cfqq_##name(struct cfq_queue *cfqq)         \
356 {                                                                       \
357         (cfqq)->flags |= (1 << CFQ_CFQQ_FLAG_##name);                   \
358 }                                                                       \
359 static inline void cfq_clear_cfqq_##name(struct cfq_queue *cfqq)        \
360 {                                                                       \
361         (cfqq)->flags &= ~(1 << CFQ_CFQQ_FLAG_##name);                  \
362 }                                                                       \
363 static inline int cfq_cfqq_##name(const struct cfq_queue *cfqq)         \
364 {                                                                       \
365         return ((cfqq)->flags & (1 << CFQ_CFQQ_FLAG_##name)) != 0;      \
366 }
367
368 CFQ_CFQQ_FNS(on_rr);
369 CFQ_CFQQ_FNS(wait_request);
370 CFQ_CFQQ_FNS(must_dispatch);
371 CFQ_CFQQ_FNS(must_alloc_slice);
372 CFQ_CFQQ_FNS(fifo_expire);
373 CFQ_CFQQ_FNS(idle_window);
374 CFQ_CFQQ_FNS(prio_changed);
375 CFQ_CFQQ_FNS(slice_new);
376 CFQ_CFQQ_FNS(sync);
377 CFQ_CFQQ_FNS(coop);
378 CFQ_CFQQ_FNS(split_coop);
379 CFQ_CFQQ_FNS(deep);
380 CFQ_CFQQ_FNS(wait_busy);
381 #undef CFQ_CFQQ_FNS
382
383 #ifdef CONFIG_CFQ_GROUP_IOSCHED
384 #define cfq_log_cfqq(cfqd, cfqq, fmt, args...)  \
385         blk_add_trace_msg((cfqd)->queue, "cfq%d%c %s " fmt, (cfqq)->pid, \
386                         cfq_cfqq_sync((cfqq)) ? 'S' : 'A', \
387                         blkg_path(cfqg_to_blkg((cfqq)->cfqg)), ##args)
388
389 #define cfq_log_cfqg(cfqd, cfqg, fmt, args...)                          \
390         blk_add_trace_msg((cfqd)->queue, "%s " fmt,                     \
391                         blkg_path(cfqg_to_blkg((cfqg))), ##args)        \
392
393 #else
394 #define cfq_log_cfqq(cfqd, cfqq, fmt, args...)  \
395         blk_add_trace_msg((cfqd)->queue, "cfq%d " fmt, (cfqq)->pid, ##args)
396 #define cfq_log_cfqg(cfqd, cfqg, fmt, args...)          do {} while (0)
397 #endif
398 #define cfq_log(cfqd, fmt, args...)     \
399         blk_add_trace_msg((cfqd)->queue, "cfq " fmt, ##args)
400
401 /* Traverses through cfq group service trees */
402 #define for_each_cfqg_st(cfqg, i, j, st) \
403         for (i = 0; i <= IDLE_WORKLOAD; i++) \
404                 for (j = 0, st = i < IDLE_WORKLOAD ? &cfqg->service_trees[i][j]\
405                         : &cfqg->service_tree_idle; \
406                         (i < IDLE_WORKLOAD && j <= SYNC_WORKLOAD) || \
407                         (i == IDLE_WORKLOAD && j == 0); \
408                         j++, st = i < IDLE_WORKLOAD ? \
409                         &cfqg->service_trees[i][j]: NULL) \
410
411 static inline bool cfq_io_thinktime_big(struct cfq_data *cfqd,
412         struct cfq_ttime *ttime, bool group_idle)
413 {
414         unsigned long slice;
415         if (!sample_valid(ttime->ttime_samples))
416                 return false;
417         if (group_idle)
418                 slice = cfqd->cfq_group_idle;
419         else
420                 slice = cfqd->cfq_slice_idle;
421         return ttime->ttime_mean > slice;
422 }
423
424 static inline bool iops_mode(struct cfq_data *cfqd)
425 {
426         /*
427          * If we are not idling on queues and it is a NCQ drive, parallel
428          * execution of requests is on and measuring time is not possible
429          * in most of the cases until and unless we drive shallower queue
430          * depths and that becomes a performance bottleneck. In such cases
431          * switch to start providing fairness in terms of number of IOs.
432          */
433         if (!cfqd->cfq_slice_idle && cfqd->hw_tag)
434                 return true;
435         else
436                 return false;
437 }
438
439 static inline enum wl_prio_t cfqq_prio(struct cfq_queue *cfqq)
440 {
441         if (cfq_class_idle(cfqq))
442                 return IDLE_WORKLOAD;
443         if (cfq_class_rt(cfqq))
444                 return RT_WORKLOAD;
445         return BE_WORKLOAD;
446 }
447
448
449 static enum wl_type_t cfqq_type(struct cfq_queue *cfqq)
450 {
451         if (!cfq_cfqq_sync(cfqq))
452                 return ASYNC_WORKLOAD;
453         if (!cfq_cfqq_idle_window(cfqq))
454                 return SYNC_NOIDLE_WORKLOAD;
455         return SYNC_WORKLOAD;
456 }
457
458 static inline int cfq_group_busy_queues_wl(enum wl_prio_t wl,
459                                         struct cfq_data *cfqd,
460                                         struct cfq_group *cfqg)
461 {
462         if (wl == IDLE_WORKLOAD)
463                 return cfqg->service_tree_idle.count;
464
465         return cfqg->service_trees[wl][ASYNC_WORKLOAD].count
466                 + cfqg->service_trees[wl][SYNC_NOIDLE_WORKLOAD].count
467                 + cfqg->service_trees[wl][SYNC_WORKLOAD].count;
468 }
469
470 static inline int cfqg_busy_async_queues(struct cfq_data *cfqd,
471                                         struct cfq_group *cfqg)
472 {
473         return cfqg->service_trees[RT_WORKLOAD][ASYNC_WORKLOAD].count
474                 + cfqg->service_trees[BE_WORKLOAD][ASYNC_WORKLOAD].count;
475 }
476
477 static void cfq_dispatch_insert(struct request_queue *, struct request *);
478 static struct cfq_queue *cfq_get_queue(struct cfq_data *, bool,
479                                        struct io_context *, gfp_t);
480
481 static inline struct cfq_io_cq *icq_to_cic(struct io_cq *icq)
482 {
483         /* cic->icq is the first member, %NULL will convert to %NULL */
484         return container_of(icq, struct cfq_io_cq, icq);
485 }
486
487 static inline struct cfq_io_cq *cfq_cic_lookup(struct cfq_data *cfqd,
488                                                struct io_context *ioc)
489 {
490         if (ioc)
491                 return icq_to_cic(ioc_lookup_icq(ioc, cfqd->queue));
492         return NULL;
493 }
494
495 static inline struct cfq_queue *cic_to_cfqq(struct cfq_io_cq *cic, bool is_sync)
496 {
497         return cic->cfqq[is_sync];
498 }
499
500 static inline void cic_set_cfqq(struct cfq_io_cq *cic, struct cfq_queue *cfqq,
501                                 bool is_sync)
502 {
503         cic->cfqq[is_sync] = cfqq;
504 }
505
506 static inline struct cfq_data *cic_to_cfqd(struct cfq_io_cq *cic)
507 {
508         return cic->icq.q->elevator->elevator_data;
509 }
510
511 /*
512  * We regard a request as SYNC, if it's either a read or has the SYNC bit
513  * set (in which case it could also be direct WRITE).
514  */
515 static inline bool cfq_bio_sync(struct bio *bio)
516 {
517         return bio_data_dir(bio) == READ || (bio->bi_rw & REQ_SYNC);
518 }
519
520 /*
521  * scheduler run of queue, if there are requests pending and no one in the
522  * driver that will restart queueing
523  */
524 static inline void cfq_schedule_dispatch(struct cfq_data *cfqd)
525 {
526         if (cfqd->busy_queues) {
527                 cfq_log(cfqd, "schedule dispatch");
528                 kblockd_schedule_work(cfqd->queue, &cfqd->unplug_work);
529         }
530 }
531
532 /*
533  * Scale schedule slice based on io priority. Use the sync time slice only
534  * if a queue is marked sync and has sync io queued. A sync queue with async
535  * io only, should not get full sync slice length.
536  */
537 static inline int cfq_prio_slice(struct cfq_data *cfqd, bool sync,
538                                  unsigned short prio)
539 {
540         const int base_slice = cfqd->cfq_slice[sync];
541
542         WARN_ON(prio >= IOPRIO_BE_NR);
543
544         return base_slice + (base_slice/CFQ_SLICE_SCALE * (4 - prio));
545 }
546
547 static inline int
548 cfq_prio_to_slice(struct cfq_data *cfqd, struct cfq_queue *cfqq)
549 {
550         return cfq_prio_slice(cfqd, cfq_cfqq_sync(cfqq), cfqq->ioprio);
551 }
552
553 static inline u64 cfq_scale_slice(unsigned long delta, struct cfq_group *cfqg)
554 {
555         u64 d = delta << CFQ_SERVICE_SHIFT;
556
557         d = d * BLKIO_WEIGHT_DEFAULT;
558         do_div(d, cfqg->weight);
559         return d;
560 }
561
562 static inline u64 max_vdisktime(u64 min_vdisktime, u64 vdisktime)
563 {
564         s64 delta = (s64)(vdisktime - min_vdisktime);
565         if (delta > 0)
566                 min_vdisktime = vdisktime;
567
568         return min_vdisktime;
569 }
570
571 static inline u64 min_vdisktime(u64 min_vdisktime, u64 vdisktime)
572 {
573         s64 delta = (s64)(vdisktime - min_vdisktime);
574         if (delta < 0)
575                 min_vdisktime = vdisktime;
576
577         return min_vdisktime;
578 }
579
580 static void update_min_vdisktime(struct cfq_rb_root *st)
581 {
582         struct cfq_group *cfqg;
583
584         if (st->left) {
585                 cfqg = rb_entry_cfqg(st->left);
586                 st->min_vdisktime = max_vdisktime(st->min_vdisktime,
587                                                   cfqg->vdisktime);
588         }
589 }
590
591 /*
592  * get averaged number of queues of RT/BE priority.
593  * average is updated, with a formula that gives more weight to higher numbers,
594  * to quickly follows sudden increases and decrease slowly
595  */
596
597 static inline unsigned cfq_group_get_avg_queues(struct cfq_data *cfqd,
598                                         struct cfq_group *cfqg, bool rt)
599 {
600         unsigned min_q, max_q;
601         unsigned mult  = cfq_hist_divisor - 1;
602         unsigned round = cfq_hist_divisor / 2;
603         unsigned busy = cfq_group_busy_queues_wl(rt, cfqd, cfqg);
604
605         min_q = min(cfqg->busy_queues_avg[rt], busy);
606         max_q = max(cfqg->busy_queues_avg[rt], busy);
607         cfqg->busy_queues_avg[rt] = (mult * max_q + min_q + round) /
608                 cfq_hist_divisor;
609         return cfqg->busy_queues_avg[rt];
610 }
611
612 static inline unsigned
613 cfq_group_slice(struct cfq_data *cfqd, struct cfq_group *cfqg)
614 {
615         struct cfq_rb_root *st = &cfqd->grp_service_tree;
616
617         return cfq_target_latency * cfqg->weight / st->total_weight;
618 }
619
620 static inline unsigned
621 cfq_scaled_cfqq_slice(struct cfq_data *cfqd, struct cfq_queue *cfqq)
622 {
623         unsigned slice = cfq_prio_to_slice(cfqd, cfqq);
624         if (cfqd->cfq_latency) {
625                 /*
626                  * interested queues (we consider only the ones with the same
627                  * priority class in the cfq group)
628                  */
629                 unsigned iq = cfq_group_get_avg_queues(cfqd, cfqq->cfqg,
630                                                 cfq_class_rt(cfqq));
631                 unsigned sync_slice = cfqd->cfq_slice[1];
632                 unsigned expect_latency = sync_slice * iq;
633                 unsigned group_slice = cfq_group_slice(cfqd, cfqq->cfqg);
634
635                 if (expect_latency > group_slice) {
636                         unsigned base_low_slice = 2 * cfqd->cfq_slice_idle;
637                         /* scale low_slice according to IO priority
638                          * and sync vs async */
639                         unsigned low_slice =
640                                 min(slice, base_low_slice * slice / sync_slice);
641                         /* the adapted slice value is scaled to fit all iqs
642                          * into the target latency */
643                         slice = max(slice * group_slice / expect_latency,
644                                     low_slice);
645                 }
646         }
647         return slice;
648 }
649
650 static inline void
651 cfq_set_prio_slice(struct cfq_data *cfqd, struct cfq_queue *cfqq)
652 {
653         unsigned slice = cfq_scaled_cfqq_slice(cfqd, cfqq);
654
655         cfqq->slice_start = jiffies;
656         cfqq->slice_end = jiffies + slice;
657         cfqq->allocated_slice = slice;
658         cfq_log_cfqq(cfqd, cfqq, "set_slice=%lu", cfqq->slice_end - jiffies);
659 }
660
661 /*
662  * We need to wrap this check in cfq_cfqq_slice_new(), since ->slice_end
663  * isn't valid until the first request from the dispatch is activated
664  * and the slice time set.
665  */
666 static inline bool cfq_slice_used(struct cfq_queue *cfqq)
667 {
668         if (cfq_cfqq_slice_new(cfqq))
669                 return false;
670         if (time_before(jiffies, cfqq->slice_end))
671                 return false;
672
673         return true;
674 }
675
676 /*
677  * Lifted from AS - choose which of rq1 and rq2 that is best served now.
678  * We choose the request that is closest to the head right now. Distance
679  * behind the head is penalized and only allowed to a certain extent.
680  */
681 static struct request *
682 cfq_choose_req(struct cfq_data *cfqd, struct request *rq1, struct request *rq2, sector_t last)
683 {
684         sector_t s1, s2, d1 = 0, d2 = 0;
685         unsigned long back_max;
686 #define CFQ_RQ1_WRAP    0x01 /* request 1 wraps */
687 #define CFQ_RQ2_WRAP    0x02 /* request 2 wraps */
688         unsigned wrap = 0; /* bit mask: requests behind the disk head? */
689
690         if (rq1 == NULL || rq1 == rq2)
691                 return rq2;
692         if (rq2 == NULL)
693                 return rq1;
694
695         if (rq_is_sync(rq1) != rq_is_sync(rq2))
696                 return rq_is_sync(rq1) ? rq1 : rq2;
697
698         if ((rq1->cmd_flags ^ rq2->cmd_flags) & REQ_PRIO)
699                 return rq1->cmd_flags & REQ_PRIO ? rq1 : rq2;
700
701         s1 = blk_rq_pos(rq1);
702         s2 = blk_rq_pos(rq2);
703
704         /*
705          * by definition, 1KiB is 2 sectors
706          */
707         back_max = cfqd->cfq_back_max * 2;
708
709         /*
710          * Strict one way elevator _except_ in the case where we allow
711          * short backward seeks which are biased as twice the cost of a
712          * similar forward seek.
713          */
714         if (s1 >= last)
715                 d1 = s1 - last;
716         else if (s1 + back_max >= last)
717                 d1 = (last - s1) * cfqd->cfq_back_penalty;
718         else
719                 wrap |= CFQ_RQ1_WRAP;
720
721         if (s2 >= last)
722                 d2 = s2 - last;
723         else if (s2 + back_max >= last)
724                 d2 = (last - s2) * cfqd->cfq_back_penalty;
725         else
726                 wrap |= CFQ_RQ2_WRAP;
727
728         /* Found required data */
729
730         /*
731          * By doing switch() on the bit mask "wrap" we avoid having to
732          * check two variables for all permutations: --> faster!
733          */
734         switch (wrap) {
735         case 0: /* common case for CFQ: rq1 and rq2 not wrapped */
736                 if (d1 < d2)
737                         return rq1;
738                 else if (d2 < d1)
739                         return rq2;
740                 else {
741                         if (s1 >= s2)
742                                 return rq1;
743                         else
744                                 return rq2;
745                 }
746
747         case CFQ_RQ2_WRAP:
748                 return rq1;
749         case CFQ_RQ1_WRAP:
750                 return rq2;
751         case (CFQ_RQ1_WRAP|CFQ_RQ2_WRAP): /* both rqs wrapped */
752         default:
753                 /*
754                  * Since both rqs are wrapped,
755                  * start with the one that's further behind head
756                  * (--> only *one* back seek required),
757                  * since back seek takes more time than forward.
758                  */
759                 if (s1 <= s2)
760                         return rq1;
761                 else
762                         return rq2;
763         }
764 }
765
766 /*
767  * The below is leftmost cache rbtree addon
768  */
769 static struct cfq_queue *cfq_rb_first(struct cfq_rb_root *root)
770 {
771         /* Service tree is empty */
772         if (!root->count)
773                 return NULL;
774
775         if (!root->left)
776                 root->left = rb_first(&root->rb);
777
778         if (root->left)
779                 return rb_entry(root->left, struct cfq_queue, rb_node);
780
781         return NULL;
782 }
783
784 static struct cfq_group *cfq_rb_first_group(struct cfq_rb_root *root)
785 {
786         if (!root->left)
787                 root->left = rb_first(&root->rb);
788
789         if (root->left)
790                 return rb_entry_cfqg(root->left);
791
792         return NULL;
793 }
794
795 static void rb_erase_init(struct rb_node *n, struct rb_root *root)
796 {
797         rb_erase(n, root);
798         RB_CLEAR_NODE(n);
799 }
800
801 static void cfq_rb_erase(struct rb_node *n, struct cfq_rb_root *root)
802 {
803         if (root->left == n)
804                 root->left = NULL;
805         rb_erase_init(n, &root->rb);
806         --root->count;
807 }
808
809 /*
810  * would be nice to take fifo expire time into account as well
811  */
812 static struct request *
813 cfq_find_next_rq(struct cfq_data *cfqd, struct cfq_queue *cfqq,
814                   struct request *last)
815 {
816         struct rb_node *rbnext = rb_next(&last->rb_node);
817         struct rb_node *rbprev = rb_prev(&last->rb_node);
818         struct request *next = NULL, *prev = NULL;
819
820         BUG_ON(RB_EMPTY_NODE(&last->rb_node));
821
822         if (rbprev)
823                 prev = rb_entry_rq(rbprev);
824
825         if (rbnext)
826                 next = rb_entry_rq(rbnext);
827         else {
828                 rbnext = rb_first(&cfqq->sort_list);
829                 if (rbnext && rbnext != &last->rb_node)
830                         next = rb_entry_rq(rbnext);
831         }
832
833         return cfq_choose_req(cfqd, next, prev, blk_rq_pos(last));
834 }
835
836 static unsigned long cfq_slice_offset(struct cfq_data *cfqd,
837                                       struct cfq_queue *cfqq)
838 {
839         /*
840          * just an approximation, should be ok.
841          */
842         return (cfqq->cfqg->nr_cfqq - 1) * (cfq_prio_slice(cfqd, 1, 0) -
843                        cfq_prio_slice(cfqd, cfq_cfqq_sync(cfqq), cfqq->ioprio));
844 }
845
846 static inline s64
847 cfqg_key(struct cfq_rb_root *st, struct cfq_group *cfqg)
848 {
849         return cfqg->vdisktime - st->min_vdisktime;
850 }
851
852 static void
853 __cfq_group_service_tree_add(struct cfq_rb_root *st, struct cfq_group *cfqg)
854 {
855         struct rb_node **node = &st->rb.rb_node;
856         struct rb_node *parent = NULL;
857         struct cfq_group *__cfqg;
858         s64 key = cfqg_key(st, cfqg);
859         int left = 1;
860
861         while (*node != NULL) {
862                 parent = *node;
863                 __cfqg = rb_entry_cfqg(parent);
864
865                 if (key < cfqg_key(st, __cfqg))
866                         node = &parent->rb_left;
867                 else {
868                         node = &parent->rb_right;
869                         left = 0;
870                 }
871         }
872
873         if (left)
874                 st->left = &cfqg->rb_node;
875
876         rb_link_node(&cfqg->rb_node, parent, node);
877         rb_insert_color(&cfqg->rb_node, &st->rb);
878 }
879
880 static void
881 cfq_update_group_weight(struct cfq_group *cfqg)
882 {
883         BUG_ON(!RB_EMPTY_NODE(&cfqg->rb_node));
884         if (cfqg->needs_update) {
885                 cfqg->weight = cfqg->new_weight;
886                 cfqg->needs_update = false;
887         }
888 }
889
890 static void
891 cfq_group_service_tree_add(struct cfq_rb_root *st, struct cfq_group *cfqg)
892 {
893         BUG_ON(!RB_EMPTY_NODE(&cfqg->rb_node));
894
895         cfq_update_group_weight(cfqg);
896         __cfq_group_service_tree_add(st, cfqg);
897         st->total_weight += cfqg->weight;
898 }
899
900 static void
901 cfq_group_notify_queue_add(struct cfq_data *cfqd, struct cfq_group *cfqg)
902 {
903         struct cfq_rb_root *st = &cfqd->grp_service_tree;
904         struct cfq_group *__cfqg;
905         struct rb_node *n;
906
907         cfqg->nr_cfqq++;
908         if (!RB_EMPTY_NODE(&cfqg->rb_node))
909                 return;
910
911         /*
912          * Currently put the group at the end. Later implement something
913          * so that groups get lesser vtime based on their weights, so that
914          * if group does not loose all if it was not continuously backlogged.
915          */
916         n = rb_last(&st->rb);
917         if (n) {
918                 __cfqg = rb_entry_cfqg(n);
919                 cfqg->vdisktime = __cfqg->vdisktime + CFQ_IDLE_DELAY;
920         } else
921                 cfqg->vdisktime = st->min_vdisktime;
922         cfq_group_service_tree_add(st, cfqg);
923 }
924
925 static void
926 cfq_group_service_tree_del(struct cfq_rb_root *st, struct cfq_group *cfqg)
927 {
928         st->total_weight -= cfqg->weight;
929         if (!RB_EMPTY_NODE(&cfqg->rb_node))
930                 cfq_rb_erase(&cfqg->rb_node, st);
931 }
932
933 static void
934 cfq_group_notify_queue_del(struct cfq_data *cfqd, struct cfq_group *cfqg)
935 {
936         struct cfq_rb_root *st = &cfqd->grp_service_tree;
937
938         BUG_ON(cfqg->nr_cfqq < 1);
939         cfqg->nr_cfqq--;
940
941         /* If there are other cfq queues under this group, don't delete it */
942         if (cfqg->nr_cfqq)
943                 return;
944
945         cfq_log_cfqg(cfqd, cfqg, "del_from_rr group");
946         cfq_group_service_tree_del(st, cfqg);
947         cfqg->saved_workload_slice = 0;
948         cfq_blkiocg_update_dequeue_stats(cfqg_to_blkg(cfqg), 1);
949 }
950
951 static inline unsigned int cfq_cfqq_slice_usage(struct cfq_queue *cfqq,
952                                                 unsigned int *unaccounted_time)
953 {
954         unsigned int slice_used;
955
956         /*
957          * Queue got expired before even a single request completed or
958          * got expired immediately after first request completion.
959          */
960         if (!cfqq->slice_start || cfqq->slice_start == jiffies) {
961                 /*
962                  * Also charge the seek time incurred to the group, otherwise
963                  * if there are mutiple queues in the group, each can dispatch
964                  * a single request on seeky media and cause lots of seek time
965                  * and group will never know it.
966                  */
967                 slice_used = max_t(unsigned, (jiffies - cfqq->dispatch_start),
968                                         1);
969         } else {
970                 slice_used = jiffies - cfqq->slice_start;
971                 if (slice_used > cfqq->allocated_slice) {
972                         *unaccounted_time = slice_used - cfqq->allocated_slice;
973                         slice_used = cfqq->allocated_slice;
974                 }
975                 if (time_after(cfqq->slice_start, cfqq->dispatch_start))
976                         *unaccounted_time += cfqq->slice_start -
977                                         cfqq->dispatch_start;
978         }
979
980         return slice_used;
981 }
982
983 static void cfq_group_served(struct cfq_data *cfqd, struct cfq_group *cfqg,
984                                 struct cfq_queue *cfqq)
985 {
986         struct cfq_rb_root *st = &cfqd->grp_service_tree;
987         unsigned int used_sl, charge, unaccounted_sl = 0;
988         int nr_sync = cfqg->nr_cfqq - cfqg_busy_async_queues(cfqd, cfqg)
989                         - cfqg->service_tree_idle.count;
990
991         BUG_ON(nr_sync < 0);
992         used_sl = charge = cfq_cfqq_slice_usage(cfqq, &unaccounted_sl);
993
994         if (iops_mode(cfqd))
995                 charge = cfqq->slice_dispatch;
996         else if (!cfq_cfqq_sync(cfqq) && !nr_sync)
997                 charge = cfqq->allocated_slice;
998
999         /* Can't update vdisktime while group is on service tree */
1000         cfq_group_service_tree_del(st, cfqg);
1001         cfqg->vdisktime += cfq_scale_slice(charge, cfqg);
1002         /* If a new weight was requested, update now, off tree */
1003         cfq_group_service_tree_add(st, cfqg);
1004
1005         /* This group is being expired. Save the context */
1006         if (time_after(cfqd->workload_expires, jiffies)) {
1007                 cfqg->saved_workload_slice = cfqd->workload_expires
1008                                                 - jiffies;
1009                 cfqg->saved_workload = cfqd->serving_type;
1010                 cfqg->saved_serving_prio = cfqd->serving_prio;
1011         } else
1012                 cfqg->saved_workload_slice = 0;
1013
1014         cfq_log_cfqg(cfqd, cfqg, "served: vt=%llu min_vt=%llu", cfqg->vdisktime,
1015                                         st->min_vdisktime);
1016         cfq_log_cfqq(cfqq->cfqd, cfqq,
1017                      "sl_used=%u disp=%u charge=%u iops=%u sect=%lu",
1018                      used_sl, cfqq->slice_dispatch, charge,
1019                      iops_mode(cfqd), cfqq->nr_sectors);
1020         cfq_blkiocg_update_timeslice_used(cfqg_to_blkg(cfqg), used_sl,
1021                                           unaccounted_sl);
1022         cfq_blkiocg_set_start_empty_time(cfqg_to_blkg(cfqg));
1023 }
1024
1025 /**
1026  * cfq_init_cfqg_base - initialize base part of a cfq_group
1027  * @cfqg: cfq_group to initialize
1028  *
1029  * Initialize the base part which is used whether %CONFIG_CFQ_GROUP_IOSCHED
1030  * is enabled or not.
1031  */
1032 static void cfq_init_cfqg_base(struct cfq_group *cfqg)
1033 {
1034         struct cfq_rb_root *st;
1035         int i, j;
1036
1037         for_each_cfqg_st(cfqg, i, j, st)
1038                 *st = CFQ_RB_ROOT;
1039         RB_CLEAR_NODE(&cfqg->rb_node);
1040
1041         cfqg->ttime.last_end_request = jiffies;
1042 }
1043
1044 #ifdef CONFIG_CFQ_GROUP_IOSCHED
1045 static void cfq_update_blkio_group_weight(struct request_queue *q,
1046                                           struct blkio_group *blkg,
1047                                           unsigned int weight)
1048 {
1049         struct cfq_group *cfqg = blkg_to_cfqg(blkg);
1050
1051         cfqg->new_weight = weight;
1052         cfqg->needs_update = true;
1053 }
1054
1055 static void cfq_link_blkio_group(struct request_queue *q,
1056                                  struct blkio_group *blkg)
1057 {
1058         struct cfq_data *cfqd = q->elevator->elevator_data;
1059         struct cfq_group *cfqg = blkg_to_cfqg(blkg);
1060
1061         cfqd->nr_blkcg_linked_grps++;
1062
1063         /* Add group on cfqd list */
1064         hlist_add_head(&cfqg->cfqd_node, &cfqd->cfqg_list);
1065 }
1066
1067 static void cfq_init_blkio_group(struct blkio_group *blkg)
1068 {
1069         struct cfq_group *cfqg = blkg_to_cfqg(blkg);
1070
1071         cfq_init_cfqg_base(cfqg);
1072         cfqg->weight = blkg->blkcg->weight;
1073 }
1074
1075 /*
1076  * Search for the cfq group current task belongs to. request_queue lock must
1077  * be held.
1078  */
1079 static struct cfq_group *cfq_lookup_create_cfqg(struct cfq_data *cfqd,
1080                                                 struct blkio_cgroup *blkcg)
1081 {
1082         struct request_queue *q = cfqd->queue;
1083         struct cfq_group *cfqg = NULL;
1084
1085         /* avoid lookup for the common case where there's no blkio cgroup */
1086         if (blkcg == &blkio_root_cgroup) {
1087                 cfqg = cfqd->root_group;
1088         } else {
1089                 struct blkio_group *blkg;
1090
1091                 blkg = blkg_lookup_create(blkcg, q, BLKIO_POLICY_PROP, false);
1092                 if (!IS_ERR(blkg))
1093                         cfqg = blkg_to_cfqg(blkg);
1094         }
1095
1096         return cfqg;
1097 }
1098
1099 static void cfq_link_cfqq_cfqg(struct cfq_queue *cfqq, struct cfq_group *cfqg)
1100 {
1101         /* Currently, all async queues are mapped to root group */
1102         if (!cfq_cfqq_sync(cfqq))
1103                 cfqg = cfqq->cfqd->root_group;
1104
1105         cfqq->cfqg = cfqg;
1106         /* cfqq reference on cfqg */
1107         blkg_get(cfqg_to_blkg(cfqg));
1108 }
1109
1110 static void cfq_destroy_cfqg(struct cfq_data *cfqd, struct cfq_group *cfqg)
1111 {
1112         /* Something wrong if we are trying to remove same group twice */
1113         BUG_ON(hlist_unhashed(&cfqg->cfqd_node));
1114
1115         hlist_del_init(&cfqg->cfqd_node);
1116
1117         BUG_ON(cfqd->nr_blkcg_linked_grps <= 0);
1118         cfqd->nr_blkcg_linked_grps--;
1119
1120         /*
1121          * Put the reference taken at the time of creation so that when all
1122          * queues are gone, group can be destroyed.
1123          */
1124         blkg_put(cfqg_to_blkg(cfqg));
1125 }
1126
1127 static bool cfq_release_cfq_groups(struct cfq_data *cfqd)
1128 {
1129         struct hlist_node *pos, *n;
1130         struct cfq_group *cfqg;
1131         bool empty = true;
1132
1133         hlist_for_each_entry_safe(cfqg, pos, n, &cfqd->cfqg_list, cfqd_node) {
1134                 /*
1135                  * If cgroup removal path got to blk_group first and removed
1136                  * it from cgroup list, then it will take care of destroying
1137                  * cfqg also.
1138                  */
1139                 if (!cfq_blkiocg_del_blkio_group(cfqg_to_blkg(cfqg)))
1140                         cfq_destroy_cfqg(cfqd, cfqg);
1141                 else
1142                         empty = false;
1143         }
1144         return empty;
1145 }
1146
1147 /*
1148  * Blk cgroup controller notification saying that blkio_group object is being
1149  * delinked as associated cgroup object is going away. That also means that
1150  * no new IO will come in this group. So get rid of this group as soon as
1151  * any pending IO in the group is finished.
1152  *
1153  * This function is called under rcu_read_lock(). key is the rcu protected
1154  * pointer. That means @q is a valid request_queue pointer as long as we
1155  * are rcu read lock.
1156  *
1157  * @q was fetched from blkio_group under blkio_cgroup->lock. That means
1158  * it should not be NULL as even if elevator was exiting, cgroup deltion
1159  * path got to it first.
1160  */
1161 static void cfq_unlink_blkio_group(struct request_queue *q,
1162                                    struct blkio_group *blkg)
1163 {
1164         struct cfq_data *cfqd = q->elevator->elevator_data;
1165         unsigned long flags;
1166
1167         spin_lock_irqsave(q->queue_lock, flags);
1168         cfq_destroy_cfqg(cfqd, blkg_to_cfqg(blkg));
1169         spin_unlock_irqrestore(q->queue_lock, flags);
1170 }
1171
1172 static struct elevator_type iosched_cfq;
1173
1174 static bool cfq_clear_queue(struct request_queue *q)
1175 {
1176         lockdep_assert_held(q->queue_lock);
1177
1178         /* shoot down blkgs iff the current elevator is cfq */
1179         if (!q->elevator || q->elevator->type != &iosched_cfq)
1180                 return true;
1181
1182         return cfq_release_cfq_groups(q->elevator->elevator_data);
1183 }
1184
1185 #else /* GROUP_IOSCHED */
1186 static struct cfq_group *cfq_lookup_create_cfqg(struct cfq_data *cfqd,
1187                                                 struct blkio_cgroup *blkcg)
1188 {
1189         return cfqd->root_group;
1190 }
1191
1192 static inline void
1193 cfq_link_cfqq_cfqg(struct cfq_queue *cfqq, struct cfq_group *cfqg) {
1194         cfqq->cfqg = cfqg;
1195 }
1196
1197 static void cfq_release_cfq_groups(struct cfq_data *cfqd) {}
1198
1199 #endif /* GROUP_IOSCHED */
1200
1201 /*
1202  * The cfqd->service_trees holds all pending cfq_queue's that have
1203  * requests waiting to be processed. It is sorted in the order that
1204  * we will service the queues.
1205  */
1206 static void cfq_service_tree_add(struct cfq_data *cfqd, struct cfq_queue *cfqq,
1207                                  bool add_front)
1208 {
1209         struct rb_node **p, *parent;
1210         struct cfq_queue *__cfqq;
1211         unsigned long rb_key;
1212         struct cfq_rb_root *service_tree;
1213         int left;
1214         int new_cfqq = 1;
1215
1216         service_tree = service_tree_for(cfqq->cfqg, cfqq_prio(cfqq),
1217                                                 cfqq_type(cfqq));
1218         if (cfq_class_idle(cfqq)) {
1219                 rb_key = CFQ_IDLE_DELAY;
1220                 parent = rb_last(&service_tree->rb);
1221                 if (parent && parent != &cfqq->rb_node) {
1222                         __cfqq = rb_entry(parent, struct cfq_queue, rb_node);
1223                         rb_key += __cfqq->rb_key;
1224                 } else
1225                         rb_key += jiffies;
1226         } else if (!add_front) {
1227                 /*
1228                  * Get our rb key offset. Subtract any residual slice
1229                  * value carried from last service. A negative resid
1230                  * count indicates slice overrun, and this should position
1231                  * the next service time further away in the tree.
1232                  */
1233                 rb_key = cfq_slice_offset(cfqd, cfqq) + jiffies;
1234                 rb_key -= cfqq->slice_resid;
1235                 cfqq->slice_resid = 0;
1236         } else {
1237                 rb_key = -HZ;
1238                 __cfqq = cfq_rb_first(service_tree);
1239                 rb_key += __cfqq ? __cfqq->rb_key : jiffies;
1240         }
1241
1242         if (!RB_EMPTY_NODE(&cfqq->rb_node)) {
1243                 new_cfqq = 0;
1244                 /*
1245                  * same position, nothing more to do
1246                  */
1247                 if (rb_key == cfqq->rb_key &&
1248                     cfqq->service_tree == service_tree)
1249                         return;
1250
1251                 cfq_rb_erase(&cfqq->rb_node, cfqq->service_tree);
1252                 cfqq->service_tree = NULL;
1253         }
1254
1255         left = 1;
1256         parent = NULL;
1257         cfqq->service_tree = service_tree;
1258         p = &service_tree->rb.rb_node;
1259         while (*p) {
1260                 struct rb_node **n;
1261
1262                 parent = *p;
1263                 __cfqq = rb_entry(parent, struct cfq_queue, rb_node);
1264
1265                 /*
1266                  * sort by key, that represents service time.
1267                  */
1268                 if (time_before(rb_key, __cfqq->rb_key))
1269                         n = &(*p)->rb_left;
1270                 else {
1271                         n = &(*p)->rb_right;
1272                         left = 0;
1273                 }
1274
1275                 p = n;
1276         }
1277
1278         if (left)
1279                 service_tree->left = &cfqq->rb_node;
1280
1281         cfqq->rb_key = rb_key;
1282         rb_link_node(&cfqq->rb_node, parent, p);
1283         rb_insert_color(&cfqq->rb_node, &service_tree->rb);
1284         service_tree->count++;
1285         if (add_front || !new_cfqq)
1286                 return;
1287         cfq_group_notify_queue_add(cfqd, cfqq->cfqg);
1288 }
1289
1290 static struct cfq_queue *
1291 cfq_prio_tree_lookup(struct cfq_data *cfqd, struct rb_root *root,
1292                      sector_t sector, struct rb_node **ret_parent,
1293                      struct rb_node ***rb_link)
1294 {
1295         struct rb_node **p, *parent;
1296         struct cfq_queue *cfqq = NULL;
1297
1298         parent = NULL;
1299         p = &root->rb_node;
1300         while (*p) {
1301                 struct rb_node **n;
1302
1303                 parent = *p;
1304                 cfqq = rb_entry(parent, struct cfq_queue, p_node);
1305
1306                 /*
1307                  * Sort strictly based on sector.  Smallest to the left,
1308                  * largest to the right.
1309                  */
1310                 if (sector > blk_rq_pos(cfqq->next_rq))
1311                         n = &(*p)->rb_right;
1312                 else if (sector < blk_rq_pos(cfqq->next_rq))
1313                         n = &(*p)->rb_left;
1314                 else
1315                         break;
1316                 p = n;
1317                 cfqq = NULL;
1318         }
1319
1320         *ret_parent = parent;
1321         if (rb_link)
1322                 *rb_link = p;
1323         return cfqq;
1324 }
1325
1326 static void cfq_prio_tree_add(struct cfq_data *cfqd, struct cfq_queue *cfqq)
1327 {
1328         struct rb_node **p, *parent;
1329         struct cfq_queue *__cfqq;
1330
1331         if (cfqq->p_root) {
1332                 rb_erase(&cfqq->p_node, cfqq->p_root);
1333                 cfqq->p_root = NULL;
1334         }
1335
1336         if (cfq_class_idle(cfqq))
1337                 return;
1338         if (!cfqq->next_rq)
1339                 return;
1340
1341         cfqq->p_root = &cfqd->prio_trees[cfqq->org_ioprio];
1342         __cfqq = cfq_prio_tree_lookup(cfqd, cfqq->p_root,
1343                                       blk_rq_pos(cfqq->next_rq), &parent, &p);
1344         if (!__cfqq) {
1345                 rb_link_node(&cfqq->p_node, parent, p);
1346                 rb_insert_color(&cfqq->p_node, cfqq->p_root);
1347         } else
1348                 cfqq->p_root = NULL;
1349 }
1350
1351 /*
1352  * Update cfqq's position in the service tree.
1353  */
1354 static void cfq_resort_rr_list(struct cfq_data *cfqd, struct cfq_queue *cfqq)
1355 {
1356         /*
1357          * Resorting requires the cfqq to be on the RR list already.
1358          */
1359         if (cfq_cfqq_on_rr(cfqq)) {
1360                 cfq_service_tree_add(cfqd, cfqq, 0);
1361                 cfq_prio_tree_add(cfqd, cfqq);
1362         }
1363 }
1364
1365 /*
1366  * add to busy list of queues for service, trying to be fair in ordering
1367  * the pending list according to last request service
1368  */
1369 static void cfq_add_cfqq_rr(struct cfq_data *cfqd, struct cfq_queue *cfqq)
1370 {
1371         cfq_log_cfqq(cfqd, cfqq, "add_to_rr");
1372         BUG_ON(cfq_cfqq_on_rr(cfqq));
1373         cfq_mark_cfqq_on_rr(cfqq);
1374         cfqd->busy_queues++;
1375         if (cfq_cfqq_sync(cfqq))
1376                 cfqd->busy_sync_queues++;
1377
1378         cfq_resort_rr_list(cfqd, cfqq);
1379 }
1380
1381 /*
1382  * Called when the cfqq no longer has requests pending, remove it from
1383  * the service tree.
1384  */
1385 static void cfq_del_cfqq_rr(struct cfq_data *cfqd, struct cfq_queue *cfqq)
1386 {
1387         cfq_log_cfqq(cfqd, cfqq, "del_from_rr");
1388         BUG_ON(!cfq_cfqq_on_rr(cfqq));
1389         cfq_clear_cfqq_on_rr(cfqq);
1390
1391         if (!RB_EMPTY_NODE(&cfqq->rb_node)) {
1392                 cfq_rb_erase(&cfqq->rb_node, cfqq->service_tree);
1393                 cfqq->service_tree = NULL;
1394         }
1395         if (cfqq->p_root) {
1396                 rb_erase(&cfqq->p_node, cfqq->p_root);
1397                 cfqq->p_root = NULL;
1398         }
1399
1400         cfq_group_notify_queue_del(cfqd, cfqq->cfqg);
1401         BUG_ON(!cfqd->busy_queues);
1402         cfqd->busy_queues--;
1403         if (cfq_cfqq_sync(cfqq))
1404                 cfqd->busy_sync_queues--;
1405 }
1406
1407 /*
1408  * rb tree support functions
1409  */
1410 static void cfq_del_rq_rb(struct request *rq)
1411 {
1412         struct cfq_queue *cfqq = RQ_CFQQ(rq);
1413         const int sync = rq_is_sync(rq);
1414
1415         BUG_ON(!cfqq->queued[sync]);
1416         cfqq->queued[sync]--;
1417
1418         elv_rb_del(&cfqq->sort_list, rq);
1419
1420         if (cfq_cfqq_on_rr(cfqq) && RB_EMPTY_ROOT(&cfqq->sort_list)) {
1421                 /*
1422                  * Queue will be deleted from service tree when we actually
1423                  * expire it later. Right now just remove it from prio tree
1424                  * as it is empty.
1425                  */
1426                 if (cfqq->p_root) {
1427                         rb_erase(&cfqq->p_node, cfqq->p_root);
1428                         cfqq->p_root = NULL;
1429                 }
1430         }
1431 }
1432
1433 static void cfq_add_rq_rb(struct request *rq)
1434 {
1435         struct cfq_queue *cfqq = RQ_CFQQ(rq);
1436         struct cfq_data *cfqd = cfqq->cfqd;
1437         struct request *prev;
1438
1439         cfqq->queued[rq_is_sync(rq)]++;
1440
1441         elv_rb_add(&cfqq->sort_list, rq);
1442
1443         if (!cfq_cfqq_on_rr(cfqq))
1444                 cfq_add_cfqq_rr(cfqd, cfqq);
1445
1446         /*
1447          * check if this request is a better next-serve candidate
1448          */
1449         prev = cfqq->next_rq;
1450         cfqq->next_rq = cfq_choose_req(cfqd, cfqq->next_rq, rq, cfqd->last_position);
1451
1452         /*
1453          * adjust priority tree position, if ->next_rq changes
1454          */
1455         if (prev != cfqq->next_rq)
1456                 cfq_prio_tree_add(cfqd, cfqq);
1457
1458         BUG_ON(!cfqq->next_rq);
1459 }
1460
1461 static void cfq_reposition_rq_rb(struct cfq_queue *cfqq, struct request *rq)
1462 {
1463         elv_rb_del(&cfqq->sort_list, rq);
1464         cfqq->queued[rq_is_sync(rq)]--;
1465         cfq_blkiocg_update_io_remove_stats(cfqg_to_blkg(RQ_CFQG(rq)),
1466                                         rq_data_dir(rq), rq_is_sync(rq));
1467         cfq_add_rq_rb(rq);
1468         cfq_blkiocg_update_io_add_stats(cfqg_to_blkg(RQ_CFQG(rq)),
1469                                         cfqg_to_blkg(cfqq->cfqd->serving_group),
1470                                         rq_data_dir(rq), rq_is_sync(rq));
1471 }
1472
1473 static struct request *
1474 cfq_find_rq_fmerge(struct cfq_data *cfqd, struct bio *bio)
1475 {
1476         struct task_struct *tsk = current;
1477         struct cfq_io_cq *cic;
1478         struct cfq_queue *cfqq;
1479
1480         cic = cfq_cic_lookup(cfqd, tsk->io_context);
1481         if (!cic)
1482                 return NULL;
1483
1484         cfqq = cic_to_cfqq(cic, cfq_bio_sync(bio));
1485         if (cfqq) {
1486                 sector_t sector = bio->bi_sector + bio_sectors(bio);
1487
1488                 return elv_rb_find(&cfqq->sort_list, sector);
1489         }
1490
1491         return NULL;
1492 }
1493
1494 static void cfq_activate_request(struct request_queue *q, struct request *rq)
1495 {
1496         struct cfq_data *cfqd = q->elevator->elevator_data;
1497
1498         cfqd->rq_in_driver++;
1499         cfq_log_cfqq(cfqd, RQ_CFQQ(rq), "activate rq, drv=%d",
1500                                                 cfqd->rq_in_driver);
1501
1502         cfqd->last_position = blk_rq_pos(rq) + blk_rq_sectors(rq);
1503 }
1504
1505 static void cfq_deactivate_request(struct request_queue *q, struct request *rq)
1506 {
1507         struct cfq_data *cfqd = q->elevator->elevator_data;
1508
1509         WARN_ON(!cfqd->rq_in_driver);
1510         cfqd->rq_in_driver--;
1511         cfq_log_cfqq(cfqd, RQ_CFQQ(rq), "deactivate rq, drv=%d",
1512                                                 cfqd->rq_in_driver);
1513 }
1514
1515 static void cfq_remove_request(struct request *rq)
1516 {
1517         struct cfq_queue *cfqq = RQ_CFQQ(rq);
1518
1519         if (cfqq->next_rq == rq)
1520                 cfqq->next_rq = cfq_find_next_rq(cfqq->cfqd, cfqq, rq);
1521
1522         list_del_init(&rq->queuelist);
1523         cfq_del_rq_rb(rq);
1524
1525         cfqq->cfqd->rq_queued--;
1526         cfq_blkiocg_update_io_remove_stats(cfqg_to_blkg(RQ_CFQG(rq)),
1527                                         rq_data_dir(rq), rq_is_sync(rq));
1528         if (rq->cmd_flags & REQ_PRIO) {
1529                 WARN_ON(!cfqq->prio_pending);
1530                 cfqq->prio_pending--;
1531         }
1532 }
1533
1534 static int cfq_merge(struct request_queue *q, struct request **req,
1535                      struct bio *bio)
1536 {
1537         struct cfq_data *cfqd = q->elevator->elevator_data;
1538         struct request *__rq;
1539
1540         __rq = cfq_find_rq_fmerge(cfqd, bio);
1541         if (__rq && elv_rq_merge_ok(__rq, bio)) {
1542                 *req = __rq;
1543                 return ELEVATOR_FRONT_MERGE;
1544         }
1545
1546         return ELEVATOR_NO_MERGE;
1547 }
1548
1549 static void cfq_merged_request(struct request_queue *q, struct request *req,
1550                                int type)
1551 {
1552         if (type == ELEVATOR_FRONT_MERGE) {
1553                 struct cfq_queue *cfqq = RQ_CFQQ(req);
1554
1555                 cfq_reposition_rq_rb(cfqq, req);
1556         }
1557 }
1558
1559 static void cfq_bio_merged(struct request_queue *q, struct request *req,
1560                                 struct bio *bio)
1561 {
1562         cfq_blkiocg_update_io_merged_stats(cfqg_to_blkg(RQ_CFQG(req)),
1563                                         bio_data_dir(bio), cfq_bio_sync(bio));
1564 }
1565
1566 static void
1567 cfq_merged_requests(struct request_queue *q, struct request *rq,
1568                     struct request *next)
1569 {
1570         struct cfq_queue *cfqq = RQ_CFQQ(rq);
1571         struct cfq_data *cfqd = q->elevator->elevator_data;
1572
1573         /*
1574          * reposition in fifo if next is older than rq
1575          */
1576         if (!list_empty(&rq->queuelist) && !list_empty(&next->queuelist) &&
1577             time_before(rq_fifo_time(next), rq_fifo_time(rq))) {
1578                 list_move(&rq->queuelist, &next->queuelist);
1579                 rq_set_fifo_time(rq, rq_fifo_time(next));
1580         }
1581
1582         if (cfqq->next_rq == next)
1583                 cfqq->next_rq = rq;
1584         cfq_remove_request(next);
1585         cfq_blkiocg_update_io_merged_stats(cfqg_to_blkg(RQ_CFQG(rq)),
1586                                         rq_data_dir(next), rq_is_sync(next));
1587
1588         cfqq = RQ_CFQQ(next);
1589         /*
1590          * all requests of this queue are merged to other queues, delete it
1591          * from the service tree. If it's the active_queue,
1592          * cfq_dispatch_requests() will choose to expire it or do idle
1593          */
1594         if (cfq_cfqq_on_rr(cfqq) && RB_EMPTY_ROOT(&cfqq->sort_list) &&
1595             cfqq != cfqd->active_queue)
1596                 cfq_del_cfqq_rr(cfqd, cfqq);
1597 }
1598
1599 static int cfq_allow_merge(struct request_queue *q, struct request *rq,
1600                            struct bio *bio)
1601 {
1602         struct cfq_data *cfqd = q->elevator->elevator_data;
1603         struct cfq_io_cq *cic;
1604         struct cfq_queue *cfqq;
1605
1606         /*
1607          * Disallow merge of a sync bio into an async request.
1608          */
1609         if (cfq_bio_sync(bio) && !rq_is_sync(rq))
1610                 return false;
1611
1612         /*
1613          * Lookup the cfqq that this bio will be queued with and allow
1614          * merge only if rq is queued there.
1615          */
1616         cic = cfq_cic_lookup(cfqd, current->io_context);
1617         if (!cic)
1618                 return false;
1619
1620         cfqq = cic_to_cfqq(cic, cfq_bio_sync(bio));
1621         return cfqq == RQ_CFQQ(rq);
1622 }
1623
1624 static inline void cfq_del_timer(struct cfq_data *cfqd, struct cfq_queue *cfqq)
1625 {
1626         del_timer(&cfqd->idle_slice_timer);
1627         cfq_blkiocg_update_idle_time_stats(cfqg_to_blkg(cfqq->cfqg));
1628 }
1629
1630 static void __cfq_set_active_queue(struct cfq_data *cfqd,
1631                                    struct cfq_queue *cfqq)
1632 {
1633         if (cfqq) {
1634                 cfq_log_cfqq(cfqd, cfqq, "set_active wl_prio:%d wl_type:%d",
1635                                 cfqd->serving_prio, cfqd->serving_type);
1636                 cfq_blkiocg_update_avg_queue_size_stats(cfqg_to_blkg(cfqq->cfqg));
1637                 cfqq->slice_start = 0;
1638                 cfqq->dispatch_start = jiffies;
1639                 cfqq->allocated_slice = 0;
1640                 cfqq->slice_end = 0;
1641                 cfqq->slice_dispatch = 0;
1642                 cfqq->nr_sectors = 0;
1643
1644                 cfq_clear_cfqq_wait_request(cfqq);
1645                 cfq_clear_cfqq_must_dispatch(cfqq);
1646                 cfq_clear_cfqq_must_alloc_slice(cfqq);
1647                 cfq_clear_cfqq_fifo_expire(cfqq);
1648                 cfq_mark_cfqq_slice_new(cfqq);
1649
1650                 cfq_del_timer(cfqd, cfqq);
1651         }
1652
1653         cfqd->active_queue = cfqq;
1654 }
1655
1656 /*
1657  * current cfqq expired its slice (or was too idle), select new one
1658  */
1659 static void
1660 __cfq_slice_expired(struct cfq_data *cfqd, struct cfq_queue *cfqq,
1661                     bool timed_out)
1662 {
1663         cfq_log_cfqq(cfqd, cfqq, "slice expired t=%d", timed_out);
1664
1665         if (cfq_cfqq_wait_request(cfqq))
1666                 cfq_del_timer(cfqd, cfqq);
1667
1668         cfq_clear_cfqq_wait_request(cfqq);
1669         cfq_clear_cfqq_wait_busy(cfqq);
1670
1671         /*
1672          * If this cfqq is shared between multiple processes, check to
1673          * make sure that those processes are still issuing I/Os within
1674          * the mean seek distance.  If not, it may be time to break the
1675          * queues apart again.
1676          */
1677         if (cfq_cfqq_coop(cfqq) && CFQQ_SEEKY(cfqq))
1678                 cfq_mark_cfqq_split_coop(cfqq);
1679
1680         /*
1681          * store what was left of this slice, if the queue idled/timed out
1682          */
1683         if (timed_out) {
1684                 if (cfq_cfqq_slice_new(cfqq))
1685                         cfqq->slice_resid = cfq_scaled_cfqq_slice(cfqd, cfqq);
1686                 else
1687                         cfqq->slice_resid = cfqq->slice_end - jiffies;
1688                 cfq_log_cfqq(cfqd, cfqq, "resid=%ld", cfqq->slice_resid);
1689         }
1690
1691         cfq_group_served(cfqd, cfqq->cfqg, cfqq);
1692
1693         if (cfq_cfqq_on_rr(cfqq) && RB_EMPTY_ROOT(&cfqq->sort_list))
1694                 cfq_del_cfqq_rr(cfqd, cfqq);
1695
1696         cfq_resort_rr_list(cfqd, cfqq);
1697
1698         if (cfqq == cfqd->active_queue)
1699                 cfqd->active_queue = NULL;
1700
1701         if (cfqd->active_cic) {
1702                 put_io_context(cfqd->active_cic->icq.ioc);
1703                 cfqd->active_cic = NULL;
1704         }
1705 }
1706
1707 static inline void cfq_slice_expired(struct cfq_data *cfqd, bool timed_out)
1708 {
1709         struct cfq_queue *cfqq = cfqd->active_queue;
1710
1711         if (cfqq)
1712                 __cfq_slice_expired(cfqd, cfqq, timed_out);
1713 }
1714
1715 /*
1716  * Get next queue for service. Unless we have a queue preemption,
1717  * we'll simply select the first cfqq in the service tree.
1718  */
1719 static struct cfq_queue *cfq_get_next_queue(struct cfq_data *cfqd)
1720 {
1721         struct cfq_rb_root *service_tree =
1722                 service_tree_for(cfqd->serving_group, cfqd->serving_prio,
1723                                         cfqd->serving_type);
1724
1725         if (!cfqd->rq_queued)
1726                 return NULL;
1727
1728         /* There is nothing to dispatch */
1729         if (!service_tree)
1730                 return NULL;
1731         if (RB_EMPTY_ROOT(&service_tree->rb))
1732                 return NULL;
1733         return cfq_rb_first(service_tree);
1734 }
1735
1736 static struct cfq_queue *cfq_get_next_queue_forced(struct cfq_data *cfqd)
1737 {
1738         struct cfq_group *cfqg;
1739         struct cfq_queue *cfqq;
1740         int i, j;
1741         struct cfq_rb_root *st;
1742
1743         if (!cfqd->rq_queued)
1744                 return NULL;
1745
1746         cfqg = cfq_get_next_cfqg(cfqd);
1747         if (!cfqg)
1748                 return NULL;
1749
1750         for_each_cfqg_st(cfqg, i, j, st)
1751                 if ((cfqq = cfq_rb_first(st)) != NULL)
1752                         return cfqq;
1753         return NULL;
1754 }
1755
1756 /*
1757  * Get and set a new active queue for service.
1758  */
1759 static struct cfq_queue *cfq_set_active_queue(struct cfq_data *cfqd,
1760                                               struct cfq_queue *cfqq)
1761 {
1762         if (!cfqq)
1763                 cfqq = cfq_get_next_queue(cfqd);
1764
1765         __cfq_set_active_queue(cfqd, cfqq);
1766         return cfqq;
1767 }
1768
1769 static inline sector_t cfq_dist_from_last(struct cfq_data *cfqd,
1770                                           struct request *rq)
1771 {
1772         if (blk_rq_pos(rq) >= cfqd->last_position)
1773                 return blk_rq_pos(rq) - cfqd->last_position;
1774         else
1775                 return cfqd->last_position - blk_rq_pos(rq);
1776 }
1777
1778 static inline int cfq_rq_close(struct cfq_data *cfqd, struct cfq_queue *cfqq,
1779                                struct request *rq)
1780 {
1781         return cfq_dist_from_last(cfqd, rq) <= CFQQ_CLOSE_THR;
1782 }
1783
1784 static struct cfq_queue *cfqq_close(struct cfq_data *cfqd,
1785                                     struct cfq_queue *cur_cfqq)
1786 {
1787         struct rb_root *root = &cfqd->prio_trees[cur_cfqq->org_ioprio];
1788         struct rb_node *parent, *node;
1789         struct cfq_queue *__cfqq;
1790         sector_t sector = cfqd->last_position;
1791
1792         if (RB_EMPTY_ROOT(root))
1793                 return NULL;
1794
1795         /*
1796          * First, if we find a request starting at the end of the last
1797          * request, choose it.
1798          */
1799         __cfqq = cfq_prio_tree_lookup(cfqd, root, sector, &parent, NULL);
1800         if (__cfqq)
1801                 return __cfqq;
1802
1803         /*
1804          * If the exact sector wasn't found, the parent of the NULL leaf
1805          * will contain the closest sector.
1806          */
1807         __cfqq = rb_entry(parent, struct cfq_queue, p_node);
1808         if (cfq_rq_close(cfqd, cur_cfqq, __cfqq->next_rq))
1809                 return __cfqq;
1810
1811         if (blk_rq_pos(__cfqq->next_rq) < sector)
1812                 node = rb_next(&__cfqq->p_node);
1813         else
1814                 node = rb_prev(&__cfqq->p_node);
1815         if (!node)
1816                 return NULL;
1817
1818         __cfqq = rb_entry(node, struct cfq_queue, p_node);
1819         if (cfq_rq_close(cfqd, cur_cfqq, __cfqq->next_rq))
1820                 return __cfqq;
1821
1822         return NULL;
1823 }
1824
1825 /*
1826  * cfqd - obvious
1827  * cur_cfqq - passed in so that we don't decide that the current queue is
1828  *            closely cooperating with itself.
1829  *
1830  * So, basically we're assuming that that cur_cfqq has dispatched at least
1831  * one request, and that cfqd->last_position reflects a position on the disk
1832  * associated with the I/O issued by cur_cfqq.  I'm not sure this is a valid
1833  * assumption.
1834  */
1835 static struct cfq_queue *cfq_close_cooperator(struct cfq_data *cfqd,
1836                                               struct cfq_queue *cur_cfqq)
1837 {
1838         struct cfq_queue *cfqq;
1839
1840         if (cfq_class_idle(cur_cfqq))
1841                 return NULL;
1842         if (!cfq_cfqq_sync(cur_cfqq))
1843                 return NULL;
1844         if (CFQQ_SEEKY(cur_cfqq))
1845                 return NULL;
1846
1847         /*
1848          * Don't search priority tree if it's the only queue in the group.
1849          */
1850         if (cur_cfqq->cfqg->nr_cfqq == 1)
1851                 return NULL;
1852
1853         /*
1854          * We should notice if some of the queues are cooperating, eg
1855          * working closely on the same area of the disk. In that case,
1856          * we can group them together and don't waste time idling.
1857          */
1858         cfqq = cfqq_close(cfqd, cur_cfqq);
1859         if (!cfqq)
1860                 return NULL;
1861
1862         /* If new queue belongs to different cfq_group, don't choose it */
1863         if (cur_cfqq->cfqg != cfqq->cfqg)
1864                 return NULL;
1865
1866         /*
1867          * It only makes sense to merge sync queues.
1868          */
1869         if (!cfq_cfqq_sync(cfqq))
1870                 return NULL;
1871         if (CFQQ_SEEKY(cfqq))
1872                 return NULL;
1873
1874         /*
1875          * Do not merge queues of different priority classes
1876          */
1877         if (cfq_class_rt(cfqq) != cfq_class_rt(cur_cfqq))
1878                 return NULL;
1879
1880         return cfqq;
1881 }
1882
1883 /*
1884  * Determine whether we should enforce idle window for this queue.
1885  */
1886
1887 static bool cfq_should_idle(struct cfq_data *cfqd, struct cfq_queue *cfqq)
1888 {
1889         enum wl_prio_t prio = cfqq_prio(cfqq);
1890         struct cfq_rb_root *service_tree = cfqq->service_tree;
1891
1892         BUG_ON(!service_tree);
1893         BUG_ON(!service_tree->count);
1894
1895         if (!cfqd->cfq_slice_idle)
1896                 return false;
1897
1898         /* We never do for idle class queues. */
1899         if (prio == IDLE_WORKLOAD)
1900                 return false;
1901
1902         /* We do for queues that were marked with idle window flag. */
1903         if (cfq_cfqq_idle_window(cfqq) &&
1904            !(blk_queue_nonrot(cfqd->queue) && cfqd->hw_tag))
1905                 return true;
1906
1907         /*
1908          * Otherwise, we do only if they are the last ones
1909          * in their service tree.
1910          */
1911         if (service_tree->count == 1 && cfq_cfqq_sync(cfqq) &&
1912            !cfq_io_thinktime_big(cfqd, &service_tree->ttime, false))
1913                 return true;
1914         cfq_log_cfqq(cfqd, cfqq, "Not idling. st->count:%d",
1915                         service_tree->count);
1916         return false;
1917 }
1918
1919 static void cfq_arm_slice_timer(struct cfq_data *cfqd)
1920 {
1921         struct cfq_queue *cfqq = cfqd->active_queue;
1922         struct cfq_io_cq *cic;
1923         unsigned long sl, group_idle = 0;
1924
1925         /*
1926          * SSD device without seek penalty, disable idling. But only do so
1927          * for devices that support queuing, otherwise we still have a problem
1928          * with sync vs async workloads.
1929          */
1930         if (blk_queue_nonrot(cfqd->queue) && cfqd->hw_tag)
1931                 return;
1932
1933         WARN_ON(!RB_EMPTY_ROOT(&cfqq->sort_list));
1934         WARN_ON(cfq_cfqq_slice_new(cfqq));
1935
1936         /*
1937          * idle is disabled, either manually or by past process history
1938          */
1939         if (!cfq_should_idle(cfqd, cfqq)) {
1940                 /* no queue idling. Check for group idling */
1941                 if (cfqd->cfq_group_idle)
1942                         group_idle = cfqd->cfq_group_idle;
1943                 else
1944                         return;
1945         }
1946
1947         /*
1948          * still active requests from this queue, don't idle
1949          */
1950         if (cfqq->dispatched)
1951                 return;
1952
1953         /*
1954          * task has exited, don't wait
1955          */
1956         cic = cfqd->active_cic;
1957         if (!cic || !atomic_read(&cic->icq.ioc->nr_tasks))
1958                 return;
1959
1960         /*
1961          * If our average think time is larger than the remaining time
1962          * slice, then don't idle. This avoids overrunning the allotted
1963          * time slice.
1964          */
1965         if (sample_valid(cic->ttime.ttime_samples) &&
1966             (cfqq->slice_end - jiffies < cic->ttime.ttime_mean)) {
1967                 cfq_log_cfqq(cfqd, cfqq, "Not idling. think_time:%lu",
1968                              cic->ttime.ttime_mean);
1969                 return;
1970         }
1971
1972         /* There are other queues in the group, don't do group idle */
1973         if (group_idle && cfqq->cfqg->nr_cfqq > 1)
1974                 return;
1975
1976         cfq_mark_cfqq_wait_request(cfqq);
1977
1978         if (group_idle)
1979                 sl = cfqd->cfq_group_idle;
1980         else
1981                 sl = cfqd->cfq_slice_idle;
1982
1983         mod_timer(&cfqd->idle_slice_timer, jiffies + sl);
1984         cfq_blkiocg_update_set_idle_time_stats(cfqg_to_blkg(cfqq->cfqg));
1985         cfq_log_cfqq(cfqd, cfqq, "arm_idle: %lu group_idle: %d", sl,
1986                         group_idle ? 1 : 0);
1987 }
1988
1989 /*
1990  * Move request from internal lists to the request queue dispatch list.
1991  */
1992 static void cfq_dispatch_insert(struct request_queue *q, struct request *rq)
1993 {
1994         struct cfq_data *cfqd = q->elevator->elevator_data;
1995         struct cfq_queue *cfqq = RQ_CFQQ(rq);
1996
1997         cfq_log_cfqq(cfqd, cfqq, "dispatch_insert");
1998
1999         cfqq->next_rq = cfq_find_next_rq(cfqd, cfqq, rq);
2000         cfq_remove_request(rq);
2001         cfqq->dispatched++;
2002         (RQ_CFQG(rq))->dispatched++;
2003         elv_dispatch_sort(q, rq);
2004
2005         cfqd->rq_in_flight[cfq_cfqq_sync(cfqq)]++;
2006         cfqq->nr_sectors += blk_rq_sectors(rq);
2007         cfq_blkiocg_update_dispatch_stats(cfqg_to_blkg(cfqq->cfqg),
2008                                           blk_rq_bytes(rq), rq_data_dir(rq),
2009                                           rq_is_sync(rq));
2010 }
2011
2012 /*
2013  * return expired entry, or NULL to just start from scratch in rbtree
2014  */
2015 static struct request *cfq_check_fifo(struct cfq_queue *cfqq)
2016 {
2017         struct request *rq = NULL;
2018
2019         if (cfq_cfqq_fifo_expire(cfqq))
2020                 return NULL;
2021
2022         cfq_mark_cfqq_fifo_expire(cfqq);
2023
2024         if (list_empty(&cfqq->fifo))
2025                 return NULL;
2026
2027         rq = rq_entry_fifo(cfqq->fifo.next);
2028         if (time_before(jiffies, rq_fifo_time(rq)))
2029                 rq = NULL;
2030
2031         cfq_log_cfqq(cfqq->cfqd, cfqq, "fifo=%p", rq);
2032         return rq;
2033 }
2034
2035 static inline int
2036 cfq_prio_to_maxrq(struct cfq_data *cfqd, struct cfq_queue *cfqq)
2037 {
2038         const int base_rq = cfqd->cfq_slice_async_rq;
2039
2040         WARN_ON(cfqq->ioprio >= IOPRIO_BE_NR);
2041
2042         return 2 * base_rq * (IOPRIO_BE_NR - cfqq->ioprio);
2043 }
2044
2045 /*
2046  * Must be called with the queue_lock held.
2047  */
2048 static int cfqq_process_refs(struct cfq_queue *cfqq)
2049 {
2050         int process_refs, io_refs;
2051
2052         io_refs = cfqq->allocated[READ] + cfqq->allocated[WRITE];
2053         process_refs = cfqq->ref - io_refs;
2054         BUG_ON(process_refs < 0);
2055         return process_refs;
2056 }
2057
2058 static void cfq_setup_merge(struct cfq_queue *cfqq, struct cfq_queue *new_cfqq)
2059 {
2060         int process_refs, new_process_refs;
2061         struct cfq_queue *__cfqq;
2062
2063         /*
2064          * If there are no process references on the new_cfqq, then it is
2065          * unsafe to follow the ->new_cfqq chain as other cfqq's in the
2066          * chain may have dropped their last reference (not just their
2067          * last process reference).
2068          */
2069         if (!cfqq_process_refs(new_cfqq))
2070                 return;
2071
2072         /* Avoid a circular list and skip interim queue merges */
2073         while ((__cfqq = new_cfqq->new_cfqq)) {
2074                 if (__cfqq == cfqq)
2075                         return;
2076                 new_cfqq = __cfqq;
2077         }
2078
2079         process_refs = cfqq_process_refs(cfqq);
2080         new_process_refs = cfqq_process_refs(new_cfqq);
2081         /*
2082          * If the process for the cfqq has gone away, there is no
2083          * sense in merging the queues.
2084          */
2085         if (process_refs == 0 || new_process_refs == 0)
2086                 return;
2087
2088         /*
2089          * Merge in the direction of the lesser amount of work.
2090          */
2091         if (new_process_refs >= process_refs) {
2092                 cfqq->new_cfqq = new_cfqq;
2093                 new_cfqq->ref += process_refs;
2094         } else {
2095                 new_cfqq->new_cfqq = cfqq;
2096                 cfqq->ref += new_process_refs;
2097         }
2098 }
2099
2100 static enum wl_type_t cfq_choose_wl(struct cfq_data *cfqd,
2101                                 struct cfq_group *cfqg, enum wl_prio_t prio)
2102 {
2103         struct cfq_queue *queue;
2104         int i;
2105         bool key_valid = false;
2106         unsigned long lowest_key = 0;
2107         enum wl_type_t cur_best = SYNC_NOIDLE_WORKLOAD;
2108
2109         for (i = 0; i <= SYNC_WORKLOAD; ++i) {
2110                 /* select the one with lowest rb_key */
2111                 queue = cfq_rb_first(service_tree_for(cfqg, prio, i));
2112                 if (queue &&
2113                     (!key_valid || time_before(queue->rb_key, lowest_key))) {
2114                         lowest_key = queue->rb_key;
2115                         cur_best = i;
2116                         key_valid = true;
2117                 }
2118         }
2119
2120         return cur_best;
2121 }
2122
2123 static void choose_service_tree(struct cfq_data *cfqd, struct cfq_group *cfqg)
2124 {
2125         unsigned slice;
2126         unsigned count;
2127         struct cfq_rb_root *st;
2128         unsigned group_slice;
2129         enum wl_prio_t original_prio = cfqd->serving_prio;
2130
2131         /* Choose next priority. RT > BE > IDLE */
2132         if (cfq_group_busy_queues_wl(RT_WORKLOAD, cfqd, cfqg))
2133                 cfqd->serving_prio = RT_WORKLOAD;
2134         else if (cfq_group_busy_queues_wl(BE_WORKLOAD, cfqd, cfqg))
2135                 cfqd->serving_prio = BE_WORKLOAD;
2136         else {
2137                 cfqd->serving_prio = IDLE_WORKLOAD;
2138                 cfqd->workload_expires = jiffies + 1;
2139                 return;
2140         }
2141
2142         if (original_prio != cfqd->serving_prio)
2143                 goto new_workload;
2144
2145         /*
2146          * For RT and BE, we have to choose also the type
2147          * (SYNC, SYNC_NOIDLE, ASYNC), and to compute a workload
2148          * expiration time
2149          */
2150         st = service_tree_for(cfqg, cfqd->serving_prio, cfqd->serving_type);
2151         count = st->count;
2152
2153         /*
2154          * check workload expiration, and that we still have other queues ready
2155          */
2156         if (count && !time_after(jiffies, cfqd->workload_expires))
2157                 return;
2158
2159 new_workload:
2160         /* otherwise select new workload type */
2161         cfqd->serving_type =
2162                 cfq_choose_wl(cfqd, cfqg, cfqd->serving_prio);
2163         st = service_tree_for(cfqg, cfqd->serving_prio, cfqd->serving_type);
2164         count = st->count;
2165
2166         /*
2167          * the workload slice is computed as a fraction of target latency
2168          * proportional to the number of queues in that workload, over
2169          * all the queues in the same priority class
2170          */
2171         group_slice = cfq_group_slice(cfqd, cfqg);
2172
2173         slice = group_slice * count /
2174                 max_t(unsigned, cfqg->busy_queues_avg[cfqd->serving_prio],
2175                       cfq_group_busy_queues_wl(cfqd->serving_prio, cfqd, cfqg));
2176
2177         if (cfqd->serving_type == ASYNC_WORKLOAD) {
2178                 unsigned int tmp;
2179
2180                 /*
2181                  * Async queues are currently system wide. Just taking
2182                  * proportion of queues with-in same group will lead to higher
2183                  * async ratio system wide as generally root group is going
2184                  * to have higher weight. A more accurate thing would be to
2185                  * calculate system wide asnc/sync ratio.
2186                  */
2187                 tmp = cfq_target_latency * cfqg_busy_async_queues(cfqd, cfqg);
2188                 tmp = tmp/cfqd->busy_queues;
2189                 slice = min_t(unsigned, slice, tmp);
2190
2191                 /* async workload slice is scaled down according to
2192                  * the sync/async slice ratio. */
2193                 slice = slice * cfqd->cfq_slice[0] / cfqd->cfq_slice[1];
2194         } else
2195                 /* sync workload slice is at least 2 * cfq_slice_idle */
2196                 slice = max(slice, 2 * cfqd->cfq_slice_idle);
2197
2198         slice = max_t(unsigned, slice, CFQ_MIN_TT);
2199         cfq_log(cfqd, "workload slice:%d", slice);
2200         cfqd->workload_expires = jiffies + slice;
2201 }
2202
2203 static struct cfq_group *cfq_get_next_cfqg(struct cfq_data *cfqd)
2204 {
2205         struct cfq_rb_root *st = &cfqd->grp_service_tree;
2206         struct cfq_group *cfqg;
2207
2208         if (RB_EMPTY_ROOT(&st->rb))
2209                 return NULL;
2210         cfqg = cfq_rb_first_group(st);
2211         update_min_vdisktime(st);
2212         return cfqg;
2213 }
2214
2215 static void cfq_choose_cfqg(struct cfq_data *cfqd)
2216 {
2217         struct cfq_group *cfqg = cfq_get_next_cfqg(cfqd);
2218
2219         cfqd->serving_group = cfqg;
2220
2221         /* Restore the workload type data */
2222         if (cfqg->saved_workload_slice) {
2223                 cfqd->workload_expires = jiffies + cfqg->saved_workload_slice;
2224                 cfqd->serving_type = cfqg->saved_workload;
2225                 cfqd->serving_prio = cfqg->saved_serving_prio;
2226         } else
2227                 cfqd->workload_expires = jiffies - 1;
2228
2229         choose_service_tree(cfqd, cfqg);
2230 }
2231
2232 /*
2233  * Select a queue for service. If we have a current active queue,
2234  * check whether to continue servicing it, or retrieve and set a new one.
2235  */
2236 static struct cfq_queue *cfq_select_queue(struct cfq_data *cfqd)
2237 {
2238         struct cfq_queue *cfqq, *new_cfqq = NULL;
2239
2240         cfqq = cfqd->active_queue;
2241         if (!cfqq)
2242                 goto new_queue;
2243
2244         if (!cfqd->rq_queued)
2245                 return NULL;
2246
2247         /*
2248          * We were waiting for group to get backlogged. Expire the queue
2249          */
2250         if (cfq_cfqq_wait_busy(cfqq) && !RB_EMPTY_ROOT(&cfqq->sort_list))
2251                 goto expire;
2252
2253         /*
2254          * The active queue has run out of time, expire it and select new.
2255          */
2256         if (cfq_slice_used(cfqq) && !cfq_cfqq_must_dispatch(cfqq)) {
2257                 /*
2258                  * If slice had not expired at the completion of last request
2259                  * we might not have turned on wait_busy flag. Don't expire
2260                  * the queue yet. Allow the group to get backlogged.
2261                  *
2262                  * The very fact that we have used the slice, that means we
2263                  * have been idling all along on this queue and it should be
2264                  * ok to wait for this request to complete.
2265                  */
2266                 if (cfqq->cfqg->nr_cfqq == 1 && RB_EMPTY_ROOT(&cfqq->sort_list)
2267                     && cfqq->dispatched && cfq_should_idle(cfqd, cfqq)) {
2268                         cfqq = NULL;
2269                         goto keep_queue;
2270                 } else
2271                         goto check_group_idle;
2272         }
2273
2274         /*
2275          * The active queue has requests and isn't expired, allow it to
2276          * dispatch.
2277          */
2278         if (!RB_EMPTY_ROOT(&cfqq->sort_list))
2279                 goto keep_queue;
2280
2281         /*
2282          * If another queue has a request waiting within our mean seek
2283          * distance, let it run.  The expire code will check for close
2284          * cooperators and put the close queue at the front of the service
2285          * tree.  If possible, merge the expiring queue with the new cfqq.
2286          */
2287         new_cfqq = cfq_close_cooperator(cfqd, cfqq);
2288         if (new_cfqq) {
2289                 if (!cfqq->new_cfqq)
2290                         cfq_setup_merge(cfqq, new_cfqq);
2291                 goto expire;
2292         }
2293
2294         /*
2295          * No requests pending. If the active queue still has requests in
2296          * flight or is idling for a new request, allow either of these
2297          * conditions to happen (or time out) before selecting a new queue.
2298          */
2299         if (timer_pending(&cfqd->idle_slice_timer)) {
2300                 cfqq = NULL;
2301                 goto keep_queue;
2302         }
2303
2304         /*
2305          * This is a deep seek queue, but the device is much faster than
2306          * the queue can deliver, don't idle
2307          **/
2308         if (CFQQ_SEEKY(cfqq) && cfq_cfqq_idle_window(cfqq) &&
2309             (cfq_cfqq_slice_new(cfqq) ||
2310             (cfqq->slice_end - jiffies > jiffies - cfqq->slice_start))) {
2311                 cfq_clear_cfqq_deep(cfqq);
2312                 cfq_clear_cfqq_idle_window(cfqq);
2313         }
2314
2315         if (cfqq->dispatched && cfq_should_idle(cfqd, cfqq)) {
2316                 cfqq = NULL;
2317                 goto keep_queue;
2318         }
2319
2320         /*
2321          * If group idle is enabled and there are requests dispatched from
2322          * this group, wait for requests to complete.
2323          */
2324 check_group_idle:
2325         if (cfqd->cfq_group_idle && cfqq->cfqg->nr_cfqq == 1 &&
2326             cfqq->cfqg->dispatched &&
2327             !cfq_io_thinktime_big(cfqd, &cfqq->cfqg->ttime, true)) {
2328                 cfqq = NULL;
2329                 goto keep_queue;
2330         }
2331
2332 expire:
2333         cfq_slice_expired(cfqd, 0);
2334 new_queue:
2335         /*
2336          * Current queue expired. Check if we have to switch to a new
2337          * service tree
2338          */
2339         if (!new_cfqq)
2340                 cfq_choose_cfqg(cfqd);
2341
2342         cfqq = cfq_set_active_queue(cfqd, new_cfqq);
2343 keep_queue:
2344         return cfqq;
2345 }
2346
2347 static int __cfq_forced_dispatch_cfqq(struct cfq_queue *cfqq)
2348 {
2349         int dispatched = 0;
2350
2351         while (cfqq->next_rq) {
2352                 cfq_dispatch_insert(cfqq->cfqd->queue, cfqq->next_rq);
2353                 dispatched++;
2354         }
2355
2356         BUG_ON(!list_empty(&cfqq->fifo));
2357
2358         /* By default cfqq is not expired if it is empty. Do it explicitly */
2359         __cfq_slice_expired(cfqq->cfqd, cfqq, 0);
2360         return dispatched;
2361 }
2362
2363 /*
2364  * Drain our current requests. Used for barriers and when switching
2365  * io schedulers on-the-fly.
2366  */
2367 static int cfq_forced_dispatch(struct cfq_data *cfqd)
2368 {
2369         struct cfq_queue *cfqq;
2370         int dispatched = 0;
2371
2372         /* Expire the timeslice of the current active queue first */
2373         cfq_slice_expired(cfqd, 0);
2374         while ((cfqq = cfq_get_next_queue_forced(cfqd)) != NULL) {
2375                 __cfq_set_active_queue(cfqd, cfqq);
2376                 dispatched += __cfq_forced_dispatch_cfqq(cfqq);
2377         }
2378
2379         BUG_ON(cfqd->busy_queues);
2380
2381         cfq_log(cfqd, "forced_dispatch=%d", dispatched);
2382         return dispatched;
2383 }
2384
2385 static inline bool cfq_slice_used_soon(struct cfq_data *cfqd,
2386         struct cfq_queue *cfqq)
2387 {
2388         /* the queue hasn't finished any request, can't estimate */
2389         if (cfq_cfqq_slice_new(cfqq))
2390                 return true;
2391         if (time_after(jiffies + cfqd->cfq_slice_idle * cfqq->dispatched,
2392                 cfqq->slice_end))
2393                 return true;
2394
2395         return false;
2396 }
2397
2398 static bool cfq_may_dispatch(struct cfq_data *cfqd, struct cfq_queue *cfqq)
2399 {
2400         unsigned int max_dispatch;
2401
2402         /*
2403          * Drain async requests before we start sync IO
2404          */
2405         if (cfq_should_idle(cfqd, cfqq) && cfqd->rq_in_flight[BLK_RW_ASYNC])
2406                 return false;
2407
2408         /*
2409          * If this is an async queue and we have sync IO in flight, let it wait
2410          */
2411         if (cfqd->rq_in_flight[BLK_RW_SYNC] && !cfq_cfqq_sync(cfqq))
2412                 return false;
2413
2414         max_dispatch = max_t(unsigned int, cfqd->cfq_quantum / 2, 1);
2415         if (cfq_class_idle(cfqq))
2416                 max_dispatch = 1;
2417
2418         /*
2419          * Does this cfqq already have too much IO in flight?
2420          */
2421         if (cfqq->dispatched >= max_dispatch) {
2422                 bool promote_sync = false;
2423                 /*
2424                  * idle queue must always only have a single IO in flight
2425                  */
2426                 if (cfq_class_idle(cfqq))
2427                         return false;
2428
2429                 /*
2430                  * If there is only one sync queue
2431                  * we can ignore async queue here and give the sync
2432                  * queue no dispatch limit. The reason is a sync queue can
2433                  * preempt async queue, limiting the sync queue doesn't make
2434                  * sense. This is useful for aiostress test.
2435                  */
2436                 if (cfq_cfqq_sync(cfqq) && cfqd->busy_sync_queues == 1)
2437                         promote_sync = true;
2438
2439                 /*
2440                  * We have other queues, don't allow more IO from this one
2441                  */
2442                 if (cfqd->busy_queues > 1 && cfq_slice_used_soon(cfqd, cfqq) &&
2443                                 !promote_sync)
2444                         return false;
2445
2446                 /*
2447                  * Sole queue user, no limit
2448                  */
2449                 if (cfqd->busy_queues == 1 || promote_sync)
2450                         max_dispatch = -1;
2451                 else
2452                         /*
2453                          * Normally we start throttling cfqq when cfq_quantum/2
2454                          * requests have been dispatched. But we can drive
2455                          * deeper queue depths at the beginning of slice
2456                          * subjected to upper limit of cfq_quantum.
2457                          * */
2458                         max_dispatch = cfqd->cfq_quantum;
2459         }
2460
2461         /*
2462          * Async queues must wait a bit before being allowed dispatch.
2463          * We also ramp up the dispatch depth gradually for async IO,
2464          * based on the last sync IO we serviced
2465          */
2466         if (!cfq_cfqq_sync(cfqq) && cfqd->cfq_latency) {
2467                 unsigned long last_sync = jiffies - cfqd->last_delayed_sync;
2468                 unsigned int depth;
2469
2470                 depth = last_sync / cfqd->cfq_slice[1];
2471                 if (!depth && !cfqq->dispatched)
2472                         depth = 1;
2473                 if (depth < max_dispatch)
2474                         max_dispatch = depth;
2475         }
2476
2477         /*
2478          * If we're below the current max, allow a dispatch
2479          */
2480         return cfqq->dispatched < max_dispatch;
2481 }
2482
2483 /*
2484  * Dispatch a request from cfqq, moving them to the request queue
2485  * dispatch list.
2486  */
2487 static bool cfq_dispatch_request(struct cfq_data *cfqd, struct cfq_queue *cfqq)
2488 {
2489         struct request *rq;
2490
2491         BUG_ON(RB_EMPTY_ROOT(&cfqq->sort_list));
2492
2493         if (!cfq_may_dispatch(cfqd, cfqq))
2494                 return false;
2495
2496         /*
2497          * follow expired path, else get first next available
2498          */
2499         rq = cfq_check_fifo(cfqq);
2500         if (!rq)
2501                 rq = cfqq->next_rq;
2502
2503         /*
2504          * insert request into driver dispatch list
2505          */
2506         cfq_dispatch_insert(cfqd->queue, rq);
2507
2508         if (!cfqd->active_cic) {
2509                 struct cfq_io_cq *cic = RQ_CIC(rq);
2510
2511                 atomic_long_inc(&cic->icq.ioc->refcount);
2512                 cfqd->active_cic = cic;
2513         }
2514
2515         return true;
2516 }
2517
2518 /*
2519  * Find the cfqq that we need to service and move a request from that to the
2520  * dispatch list
2521  */
2522 static int cfq_dispatch_requests(struct request_queue *q, int force)
2523 {
2524         struct cfq_data *cfqd = q->elevator->elevator_data;
2525         struct cfq_queue *cfqq;
2526
2527         if (!cfqd->busy_queues)
2528                 return 0;
2529
2530         if (unlikely(force))
2531                 return cfq_forced_dispatch(cfqd);
2532
2533         cfqq = cfq_select_queue(cfqd);
2534         if (!cfqq)
2535                 return 0;
2536
2537         /*
2538          * Dispatch a request from this cfqq, if it is allowed
2539          */
2540         if (!cfq_dispatch_request(cfqd, cfqq))
2541                 return 0;
2542
2543         cfqq->slice_dispatch++;
2544         cfq_clear_cfqq_must_dispatch(cfqq);
2545
2546         /*
2547          * expire an async queue immediately if it has used up its slice. idle
2548          * queue always expire after 1 dispatch round.
2549          */
2550         if (cfqd->busy_queues > 1 && ((!cfq_cfqq_sync(cfqq) &&
2551             cfqq->slice_dispatch >= cfq_prio_to_maxrq(cfqd, cfqq)) ||
2552             cfq_class_idle(cfqq))) {
2553                 cfqq->slice_end = jiffies + 1;
2554                 cfq_slice_expired(cfqd, 0);
2555         }
2556
2557         cfq_log_cfqq(cfqd, cfqq, "dispatched a request");
2558         return 1;
2559 }
2560
2561 /*
2562  * task holds one reference to the queue, dropped when task exits. each rq
2563  * in-flight on this queue also holds a reference, dropped when rq is freed.
2564  *
2565  * Each cfq queue took a reference on the parent group. Drop it now.
2566  * queue lock must be held here.
2567  */
2568 static void cfq_put_queue(struct cfq_queue *cfqq)
2569 {
2570         struct cfq_data *cfqd = cfqq->cfqd;
2571         struct cfq_group *cfqg;
2572
2573         BUG_ON(cfqq->ref <= 0);
2574
2575         cfqq->ref--;
2576         if (cfqq->ref)
2577                 return;
2578
2579         cfq_log_cfqq(cfqd, cfqq, "put_queue");
2580         BUG_ON(rb_first(&cfqq->sort_list));
2581         BUG_ON(cfqq->allocated[READ] + cfqq->allocated[WRITE]);
2582         cfqg = cfqq->cfqg;
2583
2584         if (unlikely(cfqd->active_queue == cfqq)) {
2585                 __cfq_slice_expired(cfqd, cfqq, 0);
2586                 cfq_schedule_dispatch(cfqd);
2587         }
2588
2589         BUG_ON(cfq_cfqq_on_rr(cfqq));
2590         kmem_cache_free(cfq_pool, cfqq);
2591         blkg_put(cfqg_to_blkg(cfqg));
2592 }
2593
2594 static void cfq_put_cooperator(struct cfq_queue *cfqq)
2595 {
2596         struct cfq_queue *__cfqq, *next;
2597
2598         /*
2599          * If this queue was scheduled to merge with another queue, be
2600          * sure to drop the reference taken on that queue (and others in
2601          * the merge chain).  See cfq_setup_merge and cfq_merge_cfqqs.
2602          */
2603         __cfqq = cfqq->new_cfqq;
2604         while (__cfqq) {
2605                 if (__cfqq == cfqq) {
2606                         WARN(1, "cfqq->new_cfqq loop detected\n");
2607                         break;
2608                 }
2609                 next = __cfqq->new_cfqq;
2610                 cfq_put_queue(__cfqq);
2611                 __cfqq = next;
2612         }
2613 }
2614
2615 static void cfq_exit_cfqq(struct cfq_data *cfqd, struct cfq_queue *cfqq)
2616 {
2617         if (unlikely(cfqq == cfqd->active_queue)) {
2618                 __cfq_slice_expired(cfqd, cfqq, 0);
2619                 cfq_schedule_dispatch(cfqd);
2620         }
2621
2622         cfq_put_cooperator(cfqq);
2623
2624         cfq_put_queue(cfqq);
2625 }
2626
2627 static void cfq_init_icq(struct io_cq *icq)
2628 {
2629         struct cfq_io_cq *cic = icq_to_cic(icq);
2630
2631         cic->ttime.last_end_request = jiffies;
2632 }
2633
2634 static void cfq_exit_icq(struct io_cq *icq)
2635 {
2636         struct cfq_io_cq *cic = icq_to_cic(icq);
2637         struct cfq_data *cfqd = cic_to_cfqd(cic);
2638
2639         if (cic->cfqq[BLK_RW_ASYNC]) {
2640                 cfq_exit_cfqq(cfqd, cic->cfqq[BLK_RW_ASYNC]);
2641                 cic->cfqq[BLK_RW_ASYNC] = NULL;
2642         }
2643
2644         if (cic->cfqq[BLK_RW_SYNC]) {
2645                 cfq_exit_cfqq(cfqd, cic->cfqq[BLK_RW_SYNC]);
2646                 cic->cfqq[BLK_RW_SYNC] = NULL;
2647         }
2648 }
2649
2650 static void cfq_init_prio_data(struct cfq_queue *cfqq, struct io_context *ioc)
2651 {
2652         struct task_struct *tsk = current;
2653         int ioprio_class;
2654
2655         if (!cfq_cfqq_prio_changed(cfqq))
2656                 return;
2657
2658         ioprio_class = IOPRIO_PRIO_CLASS(ioc->ioprio);
2659         switch (ioprio_class) {
2660         default:
2661                 printk(KERN_ERR "cfq: bad prio %x\n", ioprio_class);
2662         case IOPRIO_CLASS_NONE:
2663                 /*
2664                  * no prio set, inherit CPU scheduling settings
2665                  */
2666                 cfqq->ioprio = task_nice_ioprio(tsk);
2667                 cfqq->ioprio_class = task_nice_ioclass(tsk);
2668                 break;
2669         case IOPRIO_CLASS_RT:
2670                 cfqq->ioprio = task_ioprio(ioc);
2671                 cfqq->ioprio_class = IOPRIO_CLASS_RT;
2672                 break;
2673         case IOPRIO_CLASS_BE:
2674                 cfqq->ioprio = task_ioprio(ioc);
2675                 cfqq->ioprio_class = IOPRIO_CLASS_BE;
2676                 break;
2677         case IOPRIO_CLASS_IDLE:
2678                 cfqq->ioprio_class = IOPRIO_CLASS_IDLE;
2679                 cfqq->ioprio = 7;
2680                 cfq_clear_cfqq_idle_window(cfqq);
2681                 break;
2682         }
2683
2684         /*
2685          * keep track of original prio settings in case we have to temporarily
2686          * elevate the priority of this queue
2687          */
2688         cfqq->org_ioprio = cfqq->ioprio;
2689         cfq_clear_cfqq_prio_changed(cfqq);
2690 }
2691
2692 static void changed_ioprio(struct cfq_io_cq *cic)
2693 {
2694         struct cfq_data *cfqd = cic_to_cfqd(cic);
2695         struct cfq_queue *cfqq;
2696
2697         if (unlikely(!cfqd))
2698                 return;
2699
2700         cfqq = cic->cfqq[BLK_RW_ASYNC];
2701         if (cfqq) {
2702                 struct cfq_queue *new_cfqq;
2703                 new_cfqq = cfq_get_queue(cfqd, BLK_RW_ASYNC, cic->icq.ioc,
2704                                                 GFP_ATOMIC);
2705                 if (new_cfqq) {
2706                         cic->cfqq[BLK_RW_ASYNC] = new_cfqq;
2707                         cfq_put_queue(cfqq);
2708                 }
2709         }
2710
2711         cfqq = cic->cfqq[BLK_RW_SYNC];
2712         if (cfqq)
2713                 cfq_mark_cfqq_prio_changed(cfqq);
2714 }
2715
2716 static void cfq_init_cfqq(struct cfq_data *cfqd, struct cfq_queue *cfqq,
2717                           pid_t pid, bool is_sync)
2718 {
2719         RB_CLEAR_NODE(&cfqq->rb_node);
2720         RB_CLEAR_NODE(&cfqq->p_node);
2721         INIT_LIST_HEAD(&cfqq->fifo);
2722
2723         cfqq->ref = 0;
2724         cfqq->cfqd = cfqd;
2725
2726         cfq_mark_cfqq_prio_changed(cfqq);
2727
2728         if (is_sync) {
2729                 if (!cfq_class_idle(cfqq))
2730                         cfq_mark_cfqq_idle_window(cfqq);
2731                 cfq_mark_cfqq_sync(cfqq);
2732         }
2733         cfqq->pid = pid;
2734 }
2735
2736 #ifdef CONFIG_CFQ_GROUP_IOSCHED
2737 static void changed_cgroup(struct cfq_io_cq *cic)
2738 {
2739         struct cfq_queue *sync_cfqq = cic_to_cfqq(cic, 1);
2740         struct cfq_data *cfqd = cic_to_cfqd(cic);
2741         struct request_queue *q;
2742
2743         if (unlikely(!cfqd))
2744                 return;
2745
2746         q = cfqd->queue;
2747
2748         if (sync_cfqq) {
2749                 /*
2750                  * Drop reference to sync queue. A new sync queue will be
2751                  * assigned in new group upon arrival of a fresh request.
2752                  */
2753                 cfq_log_cfqq(cfqd, sync_cfqq, "changed cgroup");
2754                 cic_set_cfqq(cic, NULL, 1);
2755                 cfq_put_queue(sync_cfqq);
2756         }
2757 }
2758 #endif  /* CONFIG_CFQ_GROUP_IOSCHED */
2759
2760 static struct cfq_queue *
2761 cfq_find_alloc_queue(struct cfq_data *cfqd, bool is_sync,
2762                      struct io_context *ioc, gfp_t gfp_mask)
2763 {
2764         struct blkio_cgroup *blkcg;
2765         struct cfq_queue *cfqq, *new_cfqq = NULL;
2766         struct cfq_io_cq *cic;
2767         struct cfq_group *cfqg;
2768
2769 retry:
2770         rcu_read_lock();
2771
2772         blkcg = task_blkio_cgroup(current);
2773
2774         cfqg = cfq_lookup_create_cfqg(cfqd, blkcg);
2775
2776         cic = cfq_cic_lookup(cfqd, ioc);
2777         /* cic always exists here */
2778         cfqq = cic_to_cfqq(cic, is_sync);
2779
2780         /*
2781          * Always try a new alloc if we fell back to the OOM cfqq
2782          * originally, since it should just be a temporary situation.
2783          */
2784         if (!cfqq || cfqq == &cfqd->oom_cfqq) {
2785                 cfqq = NULL;
2786                 if (new_cfqq) {
2787                         cfqq = new_cfqq;
2788                         new_cfqq = NULL;
2789                 } else if (gfp_mask & __GFP_WAIT) {
2790                         rcu_read_unlock();
2791                         spin_unlock_irq(cfqd->queue->queue_lock);
2792                         new_cfqq = kmem_cache_alloc_node(cfq_pool,
2793                                         gfp_mask | __GFP_ZERO,
2794                                         cfqd->queue->node);
2795                         spin_lock_irq(cfqd->queue->queue_lock);
2796                         if (new_cfqq)
2797                                 goto retry;
2798                 } else {
2799                         cfqq = kmem_cache_alloc_node(cfq_pool,
2800                                         gfp_mask | __GFP_ZERO,
2801                                         cfqd->queue->node);
2802                 }
2803
2804                 if (cfqq) {
2805                         cfq_init_cfqq(cfqd, cfqq, current->pid, is_sync);
2806                         cfq_init_prio_data(cfqq, ioc);
2807                         cfq_link_cfqq_cfqg(cfqq, cfqg);
2808                         cfq_log_cfqq(cfqd, cfqq, "alloced");
2809                 } else
2810                         cfqq = &cfqd->oom_cfqq;
2811         }
2812
2813         if (new_cfqq)
2814                 kmem_cache_free(cfq_pool, new_cfqq);
2815
2816         rcu_read_unlock();
2817         return cfqq;
2818 }
2819
2820 static struct cfq_queue **
2821 cfq_async_queue_prio(struct cfq_data *cfqd, int ioprio_class, int ioprio)
2822 {
2823         switch (ioprio_class) {
2824         case IOPRIO_CLASS_RT:
2825                 return &cfqd->async_cfqq[0][ioprio];
2826         case IOPRIO_CLASS_BE:
2827                 return &cfqd->async_cfqq[1][ioprio];
2828         case IOPRIO_CLASS_IDLE:
2829                 return &cfqd->async_idle_cfqq;
2830         default:
2831                 BUG();
2832         }
2833 }
2834
2835 static struct cfq_queue *
2836 cfq_get_queue(struct cfq_data *cfqd, bool is_sync, struct io_context *ioc,
2837               gfp_t gfp_mask)
2838 {
2839         const int ioprio = task_ioprio(ioc);
2840         const int ioprio_class = task_ioprio_class(ioc);
2841         struct cfq_queue **async_cfqq = NULL;
2842         struct cfq_queue *cfqq = NULL;
2843
2844         if (!is_sync) {
2845                 async_cfqq = cfq_async_queue_prio(cfqd, ioprio_class, ioprio);
2846                 cfqq = *async_cfqq;
2847         }
2848
2849         if (!cfqq)
2850                 cfqq = cfq_find_alloc_queue(cfqd, is_sync, ioc, gfp_mask);
2851
2852         /*
2853          * pin the queue now that it's allocated, scheduler exit will prune it
2854          */
2855         if (!is_sync && !(*async_cfqq)) {
2856                 cfqq->ref++;
2857                 *async_cfqq = cfqq;
2858         }
2859
2860         cfqq->ref++;
2861         return cfqq;
2862 }
2863
2864 static void
2865 __cfq_update_io_thinktime(struct cfq_ttime *ttime, unsigned long slice_idle)
2866 {
2867         unsigned long elapsed = jiffies - ttime->last_end_request;
2868         elapsed = min(elapsed, 2UL * slice_idle);
2869
2870         ttime->ttime_samples = (7*ttime->ttime_samples + 256) / 8;
2871         ttime->ttime_total = (7*ttime->ttime_total + 256*elapsed) / 8;
2872         ttime->ttime_mean = (ttime->ttime_total + 128) / ttime->ttime_samples;
2873 }
2874
2875 static void
2876 cfq_update_io_thinktime(struct cfq_data *cfqd, struct cfq_queue *cfqq,
2877                         struct cfq_io_cq *cic)
2878 {
2879         if (cfq_cfqq_sync(cfqq)) {
2880                 __cfq_update_io_thinktime(&cic->ttime, cfqd->cfq_slice_idle);
2881                 __cfq_update_io_thinktime(&cfqq->service_tree->ttime,
2882                         cfqd->cfq_slice_idle);
2883         }
2884 #ifdef CONFIG_CFQ_GROUP_IOSCHED
2885         __cfq_update_io_thinktime(&cfqq->cfqg->ttime, cfqd->cfq_group_idle);
2886 #endif
2887 }
2888
2889 static void
2890 cfq_update_io_seektime(struct cfq_data *cfqd, struct cfq_queue *cfqq,
2891                        struct request *rq)
2892 {
2893         sector_t sdist = 0;
2894         sector_t n_sec = blk_rq_sectors(rq);
2895         if (cfqq->last_request_pos) {
2896                 if (cfqq->last_request_pos < blk_rq_pos(rq))
2897                         sdist = blk_rq_pos(rq) - cfqq->last_request_pos;
2898                 else
2899                         sdist = cfqq->last_request_pos - blk_rq_pos(rq);
2900         }
2901
2902         cfqq->seek_history <<= 1;
2903         if (blk_queue_nonrot(cfqd->queue))
2904                 cfqq->seek_history |= (n_sec < CFQQ_SECT_THR_NONROT);
2905         else
2906                 cfqq->seek_history |= (sdist > CFQQ_SEEK_THR);
2907 }
2908
2909 /*
2910  * Disable idle window if the process thinks too long or seeks so much that
2911  * it doesn't matter
2912  */
2913 static void
2914 cfq_update_idle_window(struct cfq_data *cfqd, struct cfq_queue *cfqq,
2915                        struct cfq_io_cq *cic)
2916 {
2917         int old_idle, enable_idle;
2918
2919         /*
2920          * Don't idle for async or idle io prio class
2921          */
2922         if (!cfq_cfqq_sync(cfqq) || cfq_class_idle(cfqq))
2923                 return;
2924
2925         enable_idle = old_idle = cfq_cfqq_idle_window(cfqq);
2926
2927         if (cfqq->queued[0] + cfqq->queued[1] >= 4)
2928                 cfq_mark_cfqq_deep(cfqq);
2929
2930         if (cfqq->next_rq && (cfqq->next_rq->cmd_flags & REQ_NOIDLE))
2931                 enable_idle = 0;
2932         else if (!atomic_read(&cic->icq.ioc->nr_tasks) ||
2933                  !cfqd->cfq_slice_idle ||
2934                  (!cfq_cfqq_deep(cfqq) && CFQQ_SEEKY(cfqq)))
2935                 enable_idle = 0;
2936         else if (sample_valid(cic->ttime.ttime_samples)) {
2937                 if (cic->ttime.ttime_mean > cfqd->cfq_slice_idle)
2938                         enable_idle = 0;
2939                 else
2940                         enable_idle = 1;
2941         }
2942
2943         if (old_idle != enable_idle) {
2944                 cfq_log_cfqq(cfqd, cfqq, "idle=%d", enable_idle);
2945                 if (enable_idle)
2946                         cfq_mark_cfqq_idle_window(cfqq);
2947                 else
2948                         cfq_clear_cfqq_idle_window(cfqq);
2949         }
2950 }
2951
2952 /*
2953  * Check if new_cfqq should preempt the currently active queue. Return 0 for
2954  * no or if we aren't sure, a 1 will cause a preempt.
2955  */
2956 static bool
2957 cfq_should_preempt(struct cfq_data *cfqd, struct cfq_queue *new_cfqq,
2958                    struct request *rq)
2959 {
2960         struct cfq_queue *cfqq;
2961
2962         cfqq = cfqd->active_queue;
2963         if (!cfqq)
2964                 return false;
2965
2966         if (cfq_class_idle(new_cfqq))
2967                 return false;
2968
2969         if (cfq_class_idle(cfqq))
2970                 return true;
2971
2972         /*
2973          * Don't allow a non-RT request to preempt an ongoing RT cfqq timeslice.
2974          */
2975         if (cfq_class_rt(cfqq) && !cfq_class_rt(new_cfqq))
2976                 return false;
2977
2978         /*
2979          * if the new request is sync, but the currently running queue is
2980          * not, let the sync request have priority.
2981          */
2982         if (rq_is_sync(rq) && !cfq_cfqq_sync(cfqq))
2983                 return true;
2984
2985         if (new_cfqq->cfqg != cfqq->cfqg)
2986                 return false;
2987
2988         if (cfq_slice_used(cfqq))
2989                 return true;
2990
2991         /* Allow preemption only if we are idling on sync-noidle tree */
2992         if (cfqd->serving_type == SYNC_NOIDLE_WORKLOAD &&
2993             cfqq_type(new_cfqq) == SYNC_NOIDLE_WORKLOAD &&
2994             new_cfqq->service_tree->count == 2 &&
2995             RB_EMPTY_ROOT(&cfqq->sort_list))
2996                 return true;
2997
2998         /*
2999          * So both queues are sync. Let the new request get disk time if
3000          * it's a metadata request and the current queue is doing regular IO.
3001          */
3002         if ((rq->cmd_flags & REQ_PRIO) && !cfqq->prio_pending)
3003                 return true;
3004
3005         /*
3006          * Allow an RT request to pre-empt an ongoing non-RT cfqq timeslice.
3007          */
3008         if (cfq_class_rt(new_cfqq) && !cfq_class_rt(cfqq))
3009                 return true;
3010
3011         /* An idle queue should not be idle now for some reason */
3012         if (RB_EMPTY_ROOT(&cfqq->sort_list) && !cfq_should_idle(cfqd, cfqq))
3013                 return true;
3014
3015         if (!cfqd->active_cic || !cfq_cfqq_wait_request(cfqq))
3016                 return false;
3017
3018         /*
3019          * if this request is as-good as one we would expect from the
3020          * current cfqq, let it preempt
3021          */
3022         if (cfq_rq_close(cfqd, cfqq, rq))
3023                 return true;
3024
3025         return false;
3026 }
3027
3028 /*
3029  * cfqq preempts the active queue. if we allowed preempt with no slice left,
3030  * let it have half of its nominal slice.
3031  */
3032 static void cfq_preempt_queue(struct cfq_data *cfqd, struct cfq_queue *cfqq)
3033 {
3034         enum wl_type_t old_type = cfqq_type(cfqd->active_queue);
3035
3036         cfq_log_cfqq(cfqd, cfqq, "preempt");
3037         cfq_slice_expired(cfqd, 1);
3038
3039         /*
3040          * workload type is changed, don't save slice, otherwise preempt
3041          * doesn't happen
3042          */
3043         if (old_type != cfqq_type(cfqq))
3044                 cfqq->cfqg->saved_workload_slice = 0;
3045
3046         /*
3047          * Put the new queue at the front of the of the current list,
3048          * so we know that it will be selected next.
3049          */
3050         BUG_ON(!cfq_cfqq_on_rr(cfqq));
3051
3052         cfq_service_tree_add(cfqd, cfqq, 1);
3053
3054         cfqq->slice_end = 0;
3055         cfq_mark_cfqq_slice_new(cfqq);
3056 }
3057
3058 /*
3059  * Called when a new fs request (rq) is added (to cfqq). Check if there's
3060  * something we should do about it
3061  */
3062 static void
3063 cfq_rq_enqueued(struct cfq_data *cfqd, struct cfq_queue *cfqq,
3064                 struct request *rq)
3065 {
3066         struct cfq_io_cq *cic = RQ_CIC(rq);
3067
3068         cfqd->rq_queued++;
3069         if (rq->cmd_flags & REQ_PRIO)
3070                 cfqq->prio_pending++;
3071
3072         cfq_update_io_thinktime(cfqd, cfqq, cic);
3073         cfq_update_io_seektime(cfqd, cfqq, rq);
3074         cfq_update_idle_window(cfqd, cfqq, cic);
3075
3076         cfqq->last_request_pos = blk_rq_pos(rq) + blk_rq_sectors(rq);
3077
3078         if (cfqq == cfqd->active_queue) {
3079                 /*
3080                  * Remember that we saw a request from this process, but
3081                  * don't start queuing just yet. Otherwise we risk seeing lots
3082                  * of tiny requests, because we disrupt the normal plugging
3083                  * and merging. If the request is already larger than a single
3084                  * page, let it rip immediately. For that case we assume that
3085                  * merging is already done. Ditto for a busy system that
3086                  * has other work pending, don't risk delaying until the
3087                  * idle timer unplug to continue working.
3088                  */
3089                 if (cfq_cfqq_wait_request(cfqq)) {
3090                         if (blk_rq_bytes(rq) > PAGE_CACHE_SIZE ||
3091                             cfqd->busy_queues > 1) {
3092                                 cfq_del_timer(cfqd, cfqq);
3093                                 cfq_clear_cfqq_wait_request(cfqq);
3094                                 __blk_run_queue(cfqd->queue);
3095                         } else {
3096                                 cfq_blkiocg_update_idle_time_stats(
3097                                                 cfqg_to_blkg(cfqq->cfqg));
3098                                 cfq_mark_cfqq_must_dispatch(cfqq);
3099                         }
3100                 }
3101         } else if (cfq_should_preempt(cfqd, cfqq, rq)) {
3102                 /*
3103                  * not the active queue - expire current slice if it is
3104                  * idle and has expired it's mean thinktime or this new queue
3105                  * has some old slice time left and is of higher priority or
3106                  * this new queue is RT and the current one is BE
3107                  */
3108                 cfq_preempt_queue(cfqd, cfqq);
3109                 __blk_run_queue(cfqd->queue);
3110         }
3111 }
3112
3113 static void cfq_insert_request(struct request_queue *q, struct request *rq)
3114 {
3115         struct cfq_data *cfqd = q->elevator->elevator_data;
3116         struct cfq_queue *cfqq = RQ_CFQQ(rq);
3117
3118         cfq_log_cfqq(cfqd, cfqq, "insert_request");
3119         cfq_init_prio_data(cfqq, RQ_CIC(rq)->icq.ioc);
3120
3121         rq_set_fifo_time(rq, jiffies + cfqd->cfq_fifo_expire[rq_is_sync(rq)]);
3122         list_add_tail(&rq->queuelist, &cfqq->fifo);
3123         cfq_add_rq_rb(rq);
3124         cfq_blkiocg_update_io_add_stats(cfqg_to_blkg(RQ_CFQG(rq)),
3125                                         cfqg_to_blkg(cfqd->serving_group),
3126                                         rq_data_dir(rq), rq_is_sync(rq));
3127         cfq_rq_enqueued(cfqd, cfqq, rq);
3128 }
3129
3130 /*
3131  * Update hw_tag based on peak queue depth over 50 samples under
3132  * sufficient load.
3133  */
3134 static void cfq_update_hw_tag(struct cfq_data *cfqd)
3135 {
3136         struct cfq_queue *cfqq = cfqd->active_queue;
3137
3138         if (cfqd->rq_in_driver > cfqd->hw_tag_est_depth)
3139                 cfqd->hw_tag_est_depth = cfqd->rq_in_driver;
3140
3141         if (cfqd->hw_tag == 1)
3142                 return;
3143
3144         if (cfqd->rq_queued <= CFQ_HW_QUEUE_MIN &&
3145             cfqd->rq_in_driver <= CFQ_HW_QUEUE_MIN)
3146                 return;
3147
3148         /*
3149          * If active queue hasn't enough requests and can idle, cfq might not
3150          * dispatch sufficient requests to hardware. Don't zero hw_tag in this
3151          * case
3152          */
3153         if (cfqq && cfq_cfqq_idle_window(cfqq) &&
3154             cfqq->dispatched + cfqq->queued[0] + cfqq->queued[1] <
3155             CFQ_HW_QUEUE_MIN && cfqd->rq_in_driver < CFQ_HW_QUEUE_MIN)
3156                 return;
3157
3158         if (cfqd->hw_tag_samples++ < 50)
3159                 return;
3160
3161         if (cfqd->hw_tag_est_depth >= CFQ_HW_QUEUE_MIN)
3162                 cfqd->hw_tag = 1;
3163         else
3164                 cfqd->hw_tag = 0;
3165 }
3166
3167 static bool cfq_should_wait_busy(struct cfq_data *cfqd, struct cfq_queue *cfqq)
3168 {
3169         struct cfq_io_cq *cic = cfqd->active_cic;
3170
3171         /* If the queue already has requests, don't wait */
3172         if (!RB_EMPTY_ROOT(&cfqq->sort_list))
3173                 return false;
3174
3175         /* If there are other queues in the group, don't wait */
3176         if (cfqq->cfqg->nr_cfqq > 1)
3177                 return false;
3178
3179         /* the only queue in the group, but think time is big */
3180         if (cfq_io_thinktime_big(cfqd, &cfqq->cfqg->ttime, true))
3181                 return false;
3182
3183         if (cfq_slice_used(cfqq))
3184                 return true;
3185
3186         /* if slice left is less than think time, wait busy */
3187         if (cic && sample_valid(cic->ttime.ttime_samples)
3188             && (cfqq->slice_end - jiffies < cic->ttime.ttime_mean))
3189                 return true;
3190
3191         /*
3192          * If think times is less than a jiffy than ttime_mean=0 and above
3193          * will not be true. It might happen that slice has not expired yet
3194          * but will expire soon (4-5 ns) during select_queue(). To cover the
3195          * case where think time is less than a jiffy, mark the queue wait
3196          * busy if only 1 jiffy is left in the slice.
3197          */
3198         if (cfqq->slice_end - jiffies == 1)
3199                 return true;
3200
3201         return false;
3202 }
3203
3204 static void cfq_completed_request(struct request_queue *q, struct request *rq)
3205 {
3206         struct cfq_queue *cfqq = RQ_CFQQ(rq);
3207         struct cfq_data *cfqd = cfqq->cfqd;
3208         const int sync = rq_is_sync(rq);
3209         unsigned long now;
3210
3211         now = jiffies;
3212         cfq_log_cfqq(cfqd, cfqq, "complete rqnoidle %d",
3213                      !!(rq->cmd_flags & REQ_NOIDLE));
3214
3215         cfq_update_hw_tag(cfqd);
3216
3217         WARN_ON(!cfqd->rq_in_driver);
3218         WARN_ON(!cfqq->dispatched);
3219         cfqd->rq_in_driver--;
3220         cfqq->dispatched--;
3221         (RQ_CFQG(rq))->dispatched--;
3222         cfq_blkiocg_update_completion_stats(cfqg_to_blkg(cfqq->cfqg),
3223                         rq_start_time_ns(rq), rq_io_start_time_ns(rq),
3224                         rq_data_dir(rq), rq_is_sync(rq));
3225
3226         cfqd->rq_in_flight[cfq_cfqq_sync(cfqq)]--;
3227
3228         if (sync) {
3229                 struct cfq_rb_root *service_tree;
3230
3231                 RQ_CIC(rq)->ttime.last_end_request = now;
3232
3233                 if (cfq_cfqq_on_rr(cfqq))
3234                         service_tree = cfqq->service_tree;
3235                 else
3236                         service_tree = service_tree_for(cfqq->cfqg,
3237                                 cfqq_prio(cfqq), cfqq_type(cfqq));
3238                 service_tree->ttime.last_end_request = now;
3239                 if (!time_after(rq->start_time + cfqd->cfq_fifo_expire[1], now))
3240                         cfqd->last_delayed_sync = now;
3241         }
3242
3243 #ifdef CONFIG_CFQ_GROUP_IOSCHED
3244         cfqq->cfqg->ttime.last_end_request = now;
3245 #endif
3246
3247         /*
3248          * If this is the active queue, check if it needs to be expired,
3249          * or if we want to idle in case it has no pending requests.
3250          */
3251         if (cfqd->active_queue == cfqq) {
3252                 const bool cfqq_empty = RB_EMPTY_ROOT(&cfqq->sort_list);
3253
3254                 if (cfq_cfqq_slice_new(cfqq)) {
3255                         cfq_set_prio_slice(cfqd, cfqq);
3256                         cfq_clear_cfqq_slice_new(cfqq);
3257                 }
3258
3259                 /*
3260                  * Should we wait for next request to come in before we expire
3261                  * the queue.
3262                  */
3263                 if (cfq_should_wait_busy(cfqd, cfqq)) {
3264                         unsigned long extend_sl = cfqd->cfq_slice_idle;
3265                         if (!cfqd->cfq_slice_idle)
3266                                 extend_sl = cfqd->cfq_group_idle;
3267                         cfqq->slice_end = jiffies + extend_sl;
3268                         cfq_mark_cfqq_wait_busy(cfqq);
3269                         cfq_log_cfqq(cfqd, cfqq, "will busy wait");
3270                 }
3271
3272                 /*
3273                  * Idling is not enabled on:
3274                  * - expired queues
3275                  * - idle-priority queues
3276                  * - async queues
3277                  * - queues with still some requests queued
3278                  * - when there is a close cooperator
3279                  */
3280                 if (cfq_slice_used(cfqq) || cfq_class_idle(cfqq))
3281                         cfq_slice_expired(cfqd, 1);
3282                 else if (sync && cfqq_empty &&
3283                          !cfq_close_cooperator(cfqd, cfqq)) {
3284                         cfq_arm_slice_timer(cfqd);
3285                 }
3286         }
3287
3288         if (!cfqd->rq_in_driver)
3289                 cfq_schedule_dispatch(cfqd);
3290 }
3291
3292 static inline int __cfq_may_queue(struct cfq_queue *cfqq)
3293 {
3294         if (cfq_cfqq_wait_request(cfqq) && !cfq_cfqq_must_alloc_slice(cfqq)) {
3295                 cfq_mark_cfqq_must_alloc_slice(cfqq);
3296                 return ELV_MQUEUE_MUST;
3297         }
3298
3299         return ELV_MQUEUE_MAY;
3300 }
3301
3302 static int cfq_may_queue(struct request_queue *q, int rw)
3303 {
3304         struct cfq_data *cfqd = q->elevator->elevator_data;
3305         struct task_struct *tsk = current;
3306         struct cfq_io_cq *cic;
3307         struct cfq_queue *cfqq;
3308
3309         /*
3310          * don't force setup of a queue from here, as a call to may_queue
3311          * does not necessarily imply that a request actually will be queued.
3312          * so just lookup a possibly existing queue, or return 'may queue'
3313          * if that fails
3314          */
3315         cic = cfq_cic_lookup(cfqd, tsk->io_context);
3316         if (!cic)
3317                 return ELV_MQUEUE_MAY;
3318
3319         cfqq = cic_to_cfqq(cic, rw_is_sync(rw));
3320         if (cfqq) {
3321                 cfq_init_prio_data(cfqq, cic->icq.ioc);
3322
3323                 return __cfq_may_queue(cfqq);
3324         }
3325
3326         return ELV_MQUEUE_MAY;
3327 }
3328
3329 /*
3330  * queue lock held here
3331  */
3332 static void cfq_put_request(struct request *rq)
3333 {
3334         struct cfq_queue *cfqq = RQ_CFQQ(rq);
3335
3336         if (cfqq) {
3337                 const int rw = rq_data_dir(rq);
3338
3339                 BUG_ON(!cfqq->allocated[rw]);
3340                 cfqq->allocated[rw]--;
3341
3342                 /* Put down rq reference on cfqg */
3343                 blkg_put(cfqg_to_blkg(RQ_CFQG(rq)));
3344                 rq->elv.priv[0] = NULL;
3345                 rq->elv.priv[1] = NULL;
3346
3347                 cfq_put_queue(cfqq);
3348         }
3349 }
3350
3351 static struct cfq_queue *
3352 cfq_merge_cfqqs(struct cfq_data *cfqd, struct cfq_io_cq *cic,
3353                 struct cfq_queue *cfqq)
3354 {
3355         cfq_log_cfqq(cfqd, cfqq, "merging with queue %p", cfqq->new_cfqq);
3356         cic_set_cfqq(cic, cfqq->new_cfqq, 1);
3357         cfq_mark_cfqq_coop(cfqq->new_cfqq);
3358         cfq_put_queue(cfqq);
3359         return cic_to_cfqq(cic, 1);
3360 }
3361
3362 /*
3363  * Returns NULL if a new cfqq should be allocated, or the old cfqq if this
3364  * was the last process referring to said cfqq.
3365  */
3366 static struct cfq_queue *
3367 split_cfqq(struct cfq_io_cq *cic, struct cfq_queue *cfqq)
3368 {
3369         if (cfqq_process_refs(cfqq) == 1) {
3370                 cfqq->pid = current->pid;
3371                 cfq_clear_cfqq_coop(cfqq);
3372                 cfq_clear_cfqq_split_coop(cfqq);
3373                 return cfqq;
3374         }
3375
3376         cic_set_cfqq(cic, NULL, 1);
3377
3378         cfq_put_cooperator(cfqq);
3379
3380         cfq_put_queue(cfqq);
3381         return NULL;
3382 }
3383 /*
3384  * Allocate cfq data structures associated with this request.
3385  */
3386 static int
3387 cfq_set_request(struct request_queue *q, struct request *rq, gfp_t gfp_mask)
3388 {
3389         struct cfq_data *cfqd = q->elevator->elevator_data;
3390         struct cfq_io_cq *cic = icq_to_cic(rq->elv.icq);
3391         const int rw = rq_data_dir(rq);
3392         const bool is_sync = rq_is_sync(rq);
3393         struct cfq_queue *cfqq;
3394         unsigned int changed;
3395
3396         might_sleep_if(gfp_mask & __GFP_WAIT);
3397
3398         spin_lock_irq(q->queue_lock);
3399
3400         /* handle changed notifications */
3401         changed = icq_get_changed(&cic->icq);
3402         if (unlikely(changed & ICQ_IOPRIO_CHANGED))
3403                 changed_ioprio(cic);
3404 #ifdef CONFIG_CFQ_GROUP_IOSCHED
3405         if (unlikely(changed & ICQ_CGROUP_CHANGED))
3406                 changed_cgroup(cic);
3407 #endif
3408
3409 new_queue:
3410         cfqq = cic_to_cfqq(cic, is_sync);
3411         if (!cfqq || cfqq == &cfqd->oom_cfqq) {
3412                 cfqq = cfq_get_queue(cfqd, is_sync, cic->icq.ioc, gfp_mask);
3413                 cic_set_cfqq(cic, cfqq, is_sync);
3414         } else {
3415                 /*
3416                  * If the queue was seeky for too long, break it apart.
3417                  */
3418                 if (cfq_cfqq_coop(cfqq) && cfq_cfqq_split_coop(cfqq)) {
3419                         cfq_log_cfqq(cfqd, cfqq, "breaking apart cfqq");
3420                         cfqq = split_cfqq(cic, cfqq);
3421                         if (!cfqq)
3422                                 goto new_queue;
3423                 }
3424
3425                 /*
3426                  * Check to see if this queue is scheduled to merge with
3427                  * another, closely cooperating queue.  The merging of
3428                  * queues happens here as it must be done in process context.
3429                  * The reference on new_cfqq was taken in merge_cfqqs.
3430                  */
3431                 if (cfqq->new_cfqq)
3432                         cfqq = cfq_merge_cfqqs(cfqd, cic, cfqq);
3433         }
3434
3435         cfqq->allocated[rw]++;
3436
3437         cfqq->ref++;
3438         blkg_get(cfqg_to_blkg(cfqq->cfqg));
3439         rq->elv.priv[0] = cfqq;
3440         rq->elv.priv[1] = cfqq->cfqg;
3441         spin_unlock_irq(q->queue_lock);
3442         return 0;
3443 }
3444
3445 static void cfq_kick_queue(struct work_struct *work)
3446 {
3447         struct cfq_data *cfqd =
3448                 container_of(work, struct cfq_data, unplug_work);
3449         struct request_queue *q = cfqd->queue;
3450
3451         spin_lock_irq(q->queue_lock);
3452         __blk_run_queue(cfqd->queue);
3453         spin_unlock_irq(q->queue_lock);
3454 }
3455
3456 /*
3457  * Timer running if the active_queue is currently idling inside its time slice
3458  */
3459 static void cfq_idle_slice_timer(unsigned long data)
3460 {
3461         struct cfq_data *cfqd = (struct cfq_data *) data;
3462         struct cfq_queue *cfqq;
3463         unsigned long flags;
3464         int timed_out = 1;
3465
3466         cfq_log(cfqd, "idle timer fired");
3467
3468         spin_lock_irqsave(cfqd->queue->queue_lock, flags);
3469
3470         cfqq = cfqd->active_queue;
3471         if (cfqq) {
3472                 timed_out = 0;
3473
3474                 /*
3475                  * We saw a request before the queue expired, let it through
3476                  */
3477                 if (cfq_cfqq_must_dispatch(cfqq))
3478                         goto out_kick;
3479
3480                 /*
3481                  * expired
3482                  */
3483                 if (cfq_slice_used(cfqq))
3484                         goto expire;
3485
3486                 /*
3487                  * only expire and reinvoke request handler, if there are
3488                  * other queues with pending requests
3489                  */
3490                 if (!cfqd->busy_queues)
3491                         goto out_cont;
3492
3493                 /*
3494                  * not expired and it has a request pending, let it dispatch
3495                  */
3496                 if (!RB_EMPTY_ROOT(&cfqq->sort_list))
3497                         goto out_kick;
3498
3499                 /*
3500                  * Queue depth flag is reset only when the idle didn't succeed
3501                  */
3502                 cfq_clear_cfqq_deep(cfqq);
3503         }
3504 expire:
3505         cfq_slice_expired(cfqd, timed_out);
3506 out_kick:
3507         cfq_schedule_dispatch(cfqd);
3508 out_cont:
3509         spin_unlock_irqrestore(cfqd->queue->queue_lock, flags);
3510 }
3511
3512 static void cfq_shutdown_timer_wq(struct cfq_data *cfqd)
3513 {
3514         del_timer_sync(&cfqd->idle_slice_timer);
3515         cancel_work_sync(&cfqd->unplug_work);
3516 }
3517
3518 static void cfq_put_async_queues(struct cfq_data *cfqd)
3519 {
3520         int i;
3521
3522         for (i = 0; i < IOPRIO_BE_NR; i++) {
3523                 if (cfqd->async_cfqq[0][i])
3524                         cfq_put_queue(cfqd->async_cfqq[0][i]);
3525                 if (cfqd->async_cfqq[1][i])
3526                         cfq_put_queue(cfqd->async_cfqq[1][i]);
3527         }
3528
3529         if (cfqd->async_idle_cfqq)
3530                 cfq_put_queue(cfqd->async_idle_cfqq);
3531 }
3532
3533 static void cfq_exit_queue(struct elevator_queue *e)
3534 {
3535         struct cfq_data *cfqd = e->elevator_data;
3536         struct request_queue *q = cfqd->queue;
3537         bool wait = false;
3538
3539         cfq_shutdown_timer_wq(cfqd);
3540
3541         spin_lock_irq(q->queue_lock);
3542
3543         if (cfqd->active_queue)
3544                 __cfq_slice_expired(cfqd, cfqd->active_queue, 0);
3545
3546         cfq_put_async_queues(cfqd);
3547         cfq_release_cfq_groups(cfqd);
3548
3549         /*
3550          * If there are groups which we could not unlink from blkcg list,
3551          * wait for a rcu period for them to be freed.
3552          */
3553         if (cfqd->nr_blkcg_linked_grps)
3554                 wait = true;
3555
3556         spin_unlock_irq(q->queue_lock);
3557
3558         cfq_shutdown_timer_wq(cfqd);
3559
3560         /*
3561          * Wait for cfqg->blkg->key accessors to exit their grace periods.
3562          * Do this wait only if there are other unlinked groups out
3563          * there. This can happen if cgroup deletion path claimed the
3564          * responsibility of cleaning up a group before queue cleanup code
3565          * get to the group.
3566          *
3567          * Do not call synchronize_rcu() unconditionally as there are drivers
3568          * which create/delete request queue hundreds of times during scan/boot
3569          * and synchronize_rcu() can take significant time and slow down boot.
3570          */
3571         if (wait)
3572                 synchronize_rcu();
3573
3574 #ifndef CONFIG_CFQ_GROUP_IOSCHED
3575         kfree(cfqd->root_group);
3576 #endif
3577         kfree(cfqd);
3578 }
3579
3580 static int cfq_init_queue(struct request_queue *q)
3581 {
3582         struct cfq_data *cfqd;
3583         struct blkio_group *blkg __maybe_unused;
3584         int i;
3585
3586         cfqd = kmalloc_node(sizeof(*cfqd), GFP_KERNEL | __GFP_ZERO, q->node);
3587         if (!cfqd)
3588                 return -ENOMEM;
3589
3590         cfqd->queue = q;
3591         q->elevator->elevator_data = cfqd;
3592
3593         /* Init root service tree */
3594         cfqd->grp_service_tree = CFQ_RB_ROOT;
3595
3596         /* Init root group and prefer root group over other groups by default */
3597 #ifdef CONFIG_CFQ_GROUP_IOSCHED
3598         rcu_read_lock();
3599         spin_lock_irq(q->queue_lock);
3600
3601         blkg = blkg_lookup_create(&blkio_root_cgroup, q, BLKIO_POLICY_PROP,
3602                                   true);
3603         if (!IS_ERR(blkg))
3604                 cfqd->root_group = blkg_to_cfqg(blkg);
3605
3606         spin_unlock_irq(q->queue_lock);
3607         rcu_read_unlock();
3608 #else
3609         cfqd->root_group = kzalloc_node(sizeof(*cfqd->root_group),
3610                                         GFP_KERNEL, cfqd->queue->node);
3611         if (cfqd->root_group)
3612                 cfq_init_cfqg_base(cfqd->root_group);
3613 #endif
3614         if (!cfqd->root_group) {
3615                 kfree(cfqd);
3616                 return -ENOMEM;
3617         }
3618
3619         cfqd->root_group->weight = 2*BLKIO_WEIGHT_DEFAULT;
3620
3621         /*
3622          * Not strictly needed (since RB_ROOT just clears the node and we
3623          * zeroed cfqd on alloc), but better be safe in case someone decides
3624          * to add magic to the rb code
3625          */
3626         for (i = 0; i < CFQ_PRIO_LISTS; i++)
3627                 cfqd->prio_trees[i] = RB_ROOT;
3628
3629         /*
3630          * Our fallback cfqq if cfq_find_alloc_queue() runs into OOM issues.
3631          * Grab a permanent reference to it, so that the normal code flow
3632          * will not attempt to free it.  oom_cfqq is linked to root_group
3633          * but shouldn't hold a reference as it'll never be unlinked.  Lose
3634          * the reference from linking right away.
3635          */
3636         cfq_init_cfqq(cfqd, &cfqd->oom_cfqq, 1, 0);
3637         cfqd->oom_cfqq.ref++;
3638
3639         spin_lock_irq(q->queue_lock);
3640         cfq_link_cfqq_cfqg(&cfqd->oom_cfqq, cfqd->root_group);
3641         blkg_put(cfqg_to_blkg(cfqd->root_group));
3642         spin_unlock_irq(q->queue_lock);
3643
3644         init_timer(&cfqd->idle_slice_timer);
3645         cfqd->idle_slice_timer.function = cfq_idle_slice_timer;
3646         cfqd->idle_slice_timer.data = (unsigned long) cfqd;
3647
3648         INIT_WORK(&cfqd->unplug_work, cfq_kick_queue);
3649
3650         cfqd->cfq_quantum = cfq_quantum;
3651         cfqd->cfq_fifo_expire[0] = cfq_fifo_expire[0];
3652         cfqd->cfq_fifo_expire[1] = cfq_fifo_expire[1];
3653         cfqd->cfq_back_max = cfq_back_max;
3654         cfqd->cfq_back_penalty = cfq_back_penalty;
3655         cfqd->cfq_slice[0] = cfq_slice_async;
3656         cfqd->cfq_slice[1] = cfq_slice_sync;
3657         cfqd->cfq_slice_async_rq = cfq_slice_async_rq;
3658         cfqd->cfq_slice_idle = cfq_slice_idle;
3659         cfqd->cfq_group_idle = cfq_group_idle;
3660         cfqd->cfq_latency = 1;
3661         cfqd->hw_tag = -1;
3662         /*
3663          * we optimistically start assuming sync ops weren't delayed in last
3664          * second, in order to have larger depth for async operations.
3665          */
3666         cfqd->last_delayed_sync = jiffies - HZ;
3667         return 0;
3668 }
3669
3670 /*
3671  * sysfs parts below -->
3672  */
3673 static ssize_t
3674 cfq_var_show(unsigned int var, char *page)
3675 {
3676         return sprintf(page, "%d\n", var);
3677 }
3678
3679 static ssize_t
3680 cfq_var_store(unsigned int *var, const char *page, size_t count)
3681 {
3682         char *p = (char *) page;
3683
3684         *var = simple_strtoul(p, &p, 10);
3685         return count;
3686 }
3687
3688 #define SHOW_FUNCTION(__FUNC, __VAR, __CONV)                            \
3689 static ssize_t __FUNC(struct elevator_queue *e, char *page)             \
3690 {                                                                       \
3691         struct cfq_data *cfqd = e->elevator_data;                       \
3692         unsigned int __data = __VAR;                                    \
3693         if (__CONV)                                                     \
3694                 __data = jiffies_to_msecs(__data);                      \
3695         return cfq_var_show(__data, (page));                            \
3696 }
3697 SHOW_FUNCTION(cfq_quantum_show, cfqd->cfq_quantum, 0);
3698 SHOW_FUNCTION(cfq_fifo_expire_sync_show, cfqd->cfq_fifo_expire[1], 1);
3699 SHOW_FUNCTION(cfq_fifo_expire_async_show, cfqd->cfq_fifo_expire[0], 1);
3700 SHOW_FUNCTION(cfq_back_seek_max_show, cfqd->cfq_back_max, 0);
3701 SHOW_FUNCTION(cfq_back_seek_penalty_show, cfqd->cfq_back_penalty, 0);
3702 SHOW_FUNCTION(cfq_slice_idle_show, cfqd->cfq_slice_idle, 1);
3703 SHOW_FUNCTION(cfq_group_idle_show, cfqd->cfq_group_idle, 1);
3704 SHOW_FUNCTION(cfq_slice_sync_show, cfqd->cfq_slice[1], 1);
3705 SHOW_FUNCTION(cfq_slice_async_show, cfqd->cfq_slice[0], 1);
3706 SHOW_FUNCTION(cfq_slice_async_rq_show, cfqd->cfq_slice_async_rq, 0);
3707 SHOW_FUNCTION(cfq_low_latency_show, cfqd->cfq_latency, 0);
3708 #undef SHOW_FUNCTION
3709
3710 #define STORE_FUNCTION(__FUNC, __PTR, MIN, MAX, __CONV)                 \
3711 static ssize_t __FUNC(struct elevator_queue *e, const char *page, size_t count) \
3712 {                                                                       \
3713         struct cfq_data *cfqd = e->elevator_data;                       \
3714         unsigned int __data;                                            \
3715         int ret = cfq_var_store(&__data, (page), count);                \
3716         if (__data < (MIN))                                             \
3717                 __data = (MIN);                                         \
3718         else if (__data > (MAX))                                        \
3719                 __data = (MAX);                                         \
3720         if (__CONV)                                                     \
3721                 *(__PTR) = msecs_to_jiffies(__data);                    \
3722         else                                                            \
3723                 *(__PTR) = __data;                                      \
3724         return ret;                                                     \
3725 }
3726 STORE_FUNCTION(cfq_quantum_store, &cfqd->cfq_quantum, 1, UINT_MAX, 0);
3727 STORE_FUNCTION(cfq_fifo_expire_sync_store, &cfqd->cfq_fifo_expire[1], 1,
3728                 UINT_MAX, 1);
3729 STORE_FUNCTION(cfq_fifo_expire_async_store, &cfqd->cfq_fifo_expire[0], 1,
3730                 UINT_MAX, 1);
3731 STORE_FUNCTION(cfq_back_seek_max_store, &cfqd->cfq_back_max, 0, UINT_MAX, 0);
3732 STORE_FUNCTION(cfq_back_seek_penalty_store, &cfqd->cfq_back_penalty, 1,
3733                 UINT_MAX, 0);
3734 STORE_FUNCTION(cfq_slice_idle_store, &cfqd->cfq_slice_idle, 0, UINT_MAX, 1);
3735 STORE_FUNCTION(cfq_group_idle_store, &cfqd->cfq_group_idle, 0, UINT_MAX, 1);
3736 STORE_FUNCTION(cfq_slice_sync_store, &cfqd->cfq_slice[1], 1, UINT_MAX, 1);
3737 STORE_FUNCTION(cfq_slice_async_store, &cfqd->cfq_slice[0], 1, UINT_MAX, 1);
3738 STORE_FUNCTION(cfq_slice_async_rq_store, &cfqd->cfq_slice_async_rq, 1,
3739                 UINT_MAX, 0);
3740 STORE_FUNCTION(cfq_low_latency_store, &cfqd->cfq_latency, 0, 1, 0);
3741 #undef STORE_FUNCTION
3742
3743 #define CFQ_ATTR(name) \
3744         __ATTR(name, S_IRUGO|S_IWUSR, cfq_##name##_show, cfq_##name##_store)
3745
3746 static struct elv_fs_entry cfq_attrs[] = {
3747         CFQ_ATTR(quantum),
3748         CFQ_ATTR(fifo_expire_sync),
3749         CFQ_ATTR(fifo_expire_async),
3750         CFQ_ATTR(back_seek_max),
3751         CFQ_ATTR(back_seek_penalty),
3752         CFQ_ATTR(slice_sync),
3753         CFQ_ATTR(slice_async),
3754         CFQ_ATTR(slice_async_rq),
3755         CFQ_ATTR(slice_idle),
3756         CFQ_ATTR(group_idle),
3757         CFQ_ATTR(low_latency),
3758         __ATTR_NULL
3759 };
3760
3761 static struct elevator_type iosched_cfq = {
3762         .ops = {
3763                 .elevator_merge_fn =            cfq_merge,
3764                 .elevator_merged_fn =           cfq_merged_request,
3765                 .elevator_merge_req_fn =        cfq_merged_requests,
3766                 .elevator_allow_merge_fn =      cfq_allow_merge,
3767                 .elevator_bio_merged_fn =       cfq_bio_merged,
3768                 .elevator_dispatch_fn =         cfq_dispatch_requests,
3769                 .elevator_add_req_fn =          cfq_insert_request,
3770                 .elevator_activate_req_fn =     cfq_activate_request,
3771                 .elevator_deactivate_req_fn =   cfq_deactivate_request,
3772                 .elevator_completed_req_fn =    cfq_completed_request,
3773                 .elevator_former_req_fn =       elv_rb_former_request,
3774                 .elevator_latter_req_fn =       elv_rb_latter_request,
3775                 .elevator_init_icq_fn =         cfq_init_icq,
3776                 .elevator_exit_icq_fn =         cfq_exit_icq,
3777                 .elevator_set_req_fn =          cfq_set_request,
3778                 .elevator_put_req_fn =          cfq_put_request,
3779                 .elevator_may_queue_fn =        cfq_may_queue,
3780                 .elevator_init_fn =             cfq_init_queue,
3781                 .elevator_exit_fn =             cfq_exit_queue,
3782         },
3783         .icq_size       =       sizeof(struct cfq_io_cq),
3784         .icq_align      =       __alignof__(struct cfq_io_cq),
3785         .elevator_attrs =       cfq_attrs,
3786         .elevator_name  =       "cfq",
3787         .elevator_owner =       THIS_MODULE,
3788 };
3789
3790 #ifdef CONFIG_CFQ_GROUP_IOSCHED
3791 static struct blkio_policy_type blkio_policy_cfq = {
3792         .ops = {
3793                 .blkio_init_group_fn =          cfq_init_blkio_group,
3794                 .blkio_link_group_fn =          cfq_link_blkio_group,
3795                 .blkio_unlink_group_fn =        cfq_unlink_blkio_group,
3796                 .blkio_clear_queue_fn = cfq_clear_queue,
3797                 .blkio_update_group_weight_fn = cfq_update_blkio_group_weight,
3798         },
3799         .plid = BLKIO_POLICY_PROP,
3800         .pdata_size = sizeof(struct cfq_group),
3801 };
3802 #endif
3803
3804 static int __init cfq_init(void)
3805 {
3806         int ret;
3807
3808         /*
3809          * could be 0 on HZ < 1000 setups
3810          */
3811         if (!cfq_slice_async)
3812                 cfq_slice_async = 1;
3813         if (!cfq_slice_idle)
3814                 cfq_slice_idle = 1;
3815
3816 #ifdef CONFIG_CFQ_GROUP_IOSCHED
3817         if (!cfq_group_idle)
3818                 cfq_group_idle = 1;
3819 #else
3820                 cfq_group_idle = 0;
3821 #endif
3822         cfq_pool = KMEM_CACHE(cfq_queue, 0);
3823         if (!cfq_pool)
3824                 return -ENOMEM;
3825
3826         ret = elv_register(&iosched_cfq);
3827         if (ret) {
3828                 kmem_cache_destroy(cfq_pool);
3829                 return ret;
3830         }
3831
3832 #ifdef CONFIG_CFQ_GROUP_IOSCHED
3833         blkio_policy_register(&blkio_policy_cfq);
3834 #endif
3835         return 0;
3836 }
3837
3838 static void __exit cfq_exit(void)
3839 {
3840 #ifdef CONFIG_CFQ_GROUP_IOSCHED
3841         blkio_policy_unregister(&blkio_policy_cfq);
3842 #endif
3843         elv_unregister(&iosched_cfq);
3844         kmem_cache_destroy(cfq_pool);
3845 }
3846
3847 module_init(cfq_init);
3848 module_exit(cfq_exit);
3849
3850 MODULE_AUTHOR("Jens Axboe");
3851 MODULE_LICENSE("GPL");
3852 MODULE_DESCRIPTION("Completely Fair Queueing IO scheduler");