]> git.karo-electronics.de Git - karo-tx-linux.git/blob - block/cfq-iosched.c
blkcg: don't allow or retain configuration of missing devices
[karo-tx-linux.git] / block / cfq-iosched.c
1 /*
2  *  CFQ, or complete fairness queueing, disk scheduler.
3  *
4  *  Based on ideas from a previously unfinished io
5  *  scheduler (round robin per-process disk scheduling) and Andrea Arcangeli.
6  *
7  *  Copyright (C) 2003 Jens Axboe <axboe@kernel.dk>
8  */
9 #include <linux/module.h>
10 #include <linux/slab.h>
11 #include <linux/blkdev.h>
12 #include <linux/elevator.h>
13 #include <linux/jiffies.h>
14 #include <linux/rbtree.h>
15 #include <linux/ioprio.h>
16 #include <linux/blktrace_api.h>
17 #include "blk.h"
18 #include "cfq.h"
19
20 /*
21  * tunables
22  */
23 /* max queue in one round of service */
24 static const int cfq_quantum = 8;
25 static const int cfq_fifo_expire[2] = { HZ / 4, HZ / 8 };
26 /* maximum backwards seek, in KiB */
27 static const int cfq_back_max = 16 * 1024;
28 /* penalty of a backwards seek */
29 static const int cfq_back_penalty = 2;
30 static const int cfq_slice_sync = HZ / 10;
31 static int cfq_slice_async = HZ / 25;
32 static const int cfq_slice_async_rq = 2;
33 static int cfq_slice_idle = HZ / 125;
34 static int cfq_group_idle = HZ / 125;
35 static const int cfq_target_latency = HZ * 3/10; /* 300 ms */
36 static const int cfq_hist_divisor = 4;
37
38 /*
39  * offset from end of service tree
40  */
41 #define CFQ_IDLE_DELAY          (HZ / 5)
42
43 /*
44  * below this threshold, we consider thinktime immediate
45  */
46 #define CFQ_MIN_TT              (2)
47
48 #define CFQ_SLICE_SCALE         (5)
49 #define CFQ_HW_QUEUE_MIN        (5)
50 #define CFQ_SERVICE_SHIFT       12
51
52 #define CFQQ_SEEK_THR           (sector_t)(8 * 100)
53 #define CFQQ_CLOSE_THR          (sector_t)(8 * 1024)
54 #define CFQQ_SECT_THR_NONROT    (sector_t)(2 * 32)
55 #define CFQQ_SEEKY(cfqq)        (hweight32(cfqq->seek_history) > 32/8)
56
57 #define RQ_CIC(rq)              icq_to_cic((rq)->elv.icq)
58 #define RQ_CFQQ(rq)             (struct cfq_queue *) ((rq)->elv.priv[0])
59 #define RQ_CFQG(rq)             (struct cfq_group *) ((rq)->elv.priv[1])
60
61 static struct kmem_cache *cfq_pool;
62
63 #define CFQ_PRIO_LISTS          IOPRIO_BE_NR
64 #define cfq_class_idle(cfqq)    ((cfqq)->ioprio_class == IOPRIO_CLASS_IDLE)
65 #define cfq_class_rt(cfqq)      ((cfqq)->ioprio_class == IOPRIO_CLASS_RT)
66
67 #define sample_valid(samples)   ((samples) > 80)
68 #define rb_entry_cfqg(node)     rb_entry((node), struct cfq_group, rb_node)
69
70 struct cfq_ttime {
71         unsigned long last_end_request;
72
73         unsigned long ttime_total;
74         unsigned long ttime_samples;
75         unsigned long ttime_mean;
76 };
77
78 /*
79  * Most of our rbtree usage is for sorting with min extraction, so
80  * if we cache the leftmost node we don't have to walk down the tree
81  * to find it. Idea borrowed from Ingo Molnars CFS scheduler. We should
82  * move this into the elevator for the rq sorting as well.
83  */
84 struct cfq_rb_root {
85         struct rb_root rb;
86         struct rb_node *left;
87         unsigned count;
88         unsigned total_weight;
89         u64 min_vdisktime;
90         struct cfq_ttime ttime;
91 };
92 #define CFQ_RB_ROOT     (struct cfq_rb_root) { .rb = RB_ROOT, \
93                         .ttime = {.last_end_request = jiffies,},}
94
95 /*
96  * Per process-grouping structure
97  */
98 struct cfq_queue {
99         /* reference count */
100         int ref;
101         /* various state flags, see below */
102         unsigned int flags;
103         /* parent cfq_data */
104         struct cfq_data *cfqd;
105         /* service_tree member */
106         struct rb_node rb_node;
107         /* service_tree key */
108         unsigned long rb_key;
109         /* prio tree member */
110         struct rb_node p_node;
111         /* prio tree root we belong to, if any */
112         struct rb_root *p_root;
113         /* sorted list of pending requests */
114         struct rb_root sort_list;
115         /* if fifo isn't expired, next request to serve */
116         struct request *next_rq;
117         /* requests queued in sort_list */
118         int queued[2];
119         /* currently allocated requests */
120         int allocated[2];
121         /* fifo list of requests in sort_list */
122         struct list_head fifo;
123
124         /* time when queue got scheduled in to dispatch first request. */
125         unsigned long dispatch_start;
126         unsigned int allocated_slice;
127         unsigned int slice_dispatch;
128         /* time when first request from queue completed and slice started. */
129         unsigned long slice_start;
130         unsigned long slice_end;
131         long slice_resid;
132
133         /* pending priority requests */
134         int prio_pending;
135         /* number of requests that are on the dispatch list or inside driver */
136         int dispatched;
137
138         /* io prio of this group */
139         unsigned short ioprio, org_ioprio;
140         unsigned short ioprio_class;
141
142         pid_t pid;
143
144         u32 seek_history;
145         sector_t last_request_pos;
146
147         struct cfq_rb_root *service_tree;
148         struct cfq_queue *new_cfqq;
149         struct cfq_group *cfqg;
150         /* Number of sectors dispatched from queue in single dispatch round */
151         unsigned long nr_sectors;
152 };
153
154 /*
155  * First index in the service_trees.
156  * IDLE is handled separately, so it has negative index
157  */
158 enum wl_prio_t {
159         BE_WORKLOAD = 0,
160         RT_WORKLOAD = 1,
161         IDLE_WORKLOAD = 2,
162         CFQ_PRIO_NR,
163 };
164
165 /*
166  * Second index in the service_trees.
167  */
168 enum wl_type_t {
169         ASYNC_WORKLOAD = 0,
170         SYNC_NOIDLE_WORKLOAD = 1,
171         SYNC_WORKLOAD = 2
172 };
173
174 /* This is per cgroup per device grouping structure */
175 struct cfq_group {
176         /* group service_tree member */
177         struct rb_node rb_node;
178
179         /* group service_tree key */
180         u64 vdisktime;
181         unsigned int weight;
182         unsigned int new_weight;
183         bool needs_update;
184
185         /* number of cfqq currently on this group */
186         int nr_cfqq;
187
188         /*
189          * Per group busy queues average. Useful for workload slice calc. We
190          * create the array for each prio class but at run time it is used
191          * only for RT and BE class and slot for IDLE class remains unused.
192          * This is primarily done to avoid confusion and a gcc warning.
193          */
194         unsigned int busy_queues_avg[CFQ_PRIO_NR];
195         /*
196          * rr lists of queues with requests. We maintain service trees for
197          * RT and BE classes. These trees are subdivided in subclasses
198          * of SYNC, SYNC_NOIDLE and ASYNC based on workload type. For IDLE
199          * class there is no subclassification and all the cfq queues go on
200          * a single tree service_tree_idle.
201          * Counts are embedded in the cfq_rb_root
202          */
203         struct cfq_rb_root service_trees[2][3];
204         struct cfq_rb_root service_tree_idle;
205
206         unsigned long saved_workload_slice;
207         enum wl_type_t saved_workload;
208         enum wl_prio_t saved_serving_prio;
209         struct blkio_group blkg;
210 #ifdef CONFIG_CFQ_GROUP_IOSCHED
211         struct hlist_node cfqd_node;
212         int ref;
213 #endif
214         /* number of requests that are on the dispatch list or inside driver */
215         int dispatched;
216         struct cfq_ttime ttime;
217 };
218
219 struct cfq_io_cq {
220         struct io_cq            icq;            /* must be the first member */
221         struct cfq_queue        *cfqq[2];
222         struct cfq_ttime        ttime;
223 };
224
225 /*
226  * Per block device queue structure
227  */
228 struct cfq_data {
229         struct request_queue *queue;
230         /* Root service tree for cfq_groups */
231         struct cfq_rb_root grp_service_tree;
232         struct cfq_group *root_group;
233
234         /*
235          * The priority currently being served
236          */
237         enum wl_prio_t serving_prio;
238         enum wl_type_t serving_type;
239         unsigned long workload_expires;
240         struct cfq_group *serving_group;
241
242         /*
243          * Each priority tree is sorted by next_request position.  These
244          * trees are used when determining if two or more queues are
245          * interleaving requests (see cfq_close_cooperator).
246          */
247         struct rb_root prio_trees[CFQ_PRIO_LISTS];
248
249         unsigned int busy_queues;
250         unsigned int busy_sync_queues;
251
252         int rq_in_driver;
253         int rq_in_flight[2];
254
255         /*
256          * queue-depth detection
257          */
258         int rq_queued;
259         int hw_tag;
260         /*
261          * hw_tag can be
262          * -1 => indeterminate, (cfq will behave as if NCQ is present, to allow better detection)
263          *  1 => NCQ is present (hw_tag_est_depth is the estimated max depth)
264          *  0 => no NCQ
265          */
266         int hw_tag_est_depth;
267         unsigned int hw_tag_samples;
268
269         /*
270          * idle window management
271          */
272         struct timer_list idle_slice_timer;
273         struct work_struct unplug_work;
274
275         struct cfq_queue *active_queue;
276         struct cfq_io_cq *active_cic;
277
278         /*
279          * async queue for each priority case
280          */
281         struct cfq_queue *async_cfqq[2][IOPRIO_BE_NR];
282         struct cfq_queue *async_idle_cfqq;
283
284         sector_t last_position;
285
286         /*
287          * tunables, see top of file
288          */
289         unsigned int cfq_quantum;
290         unsigned int cfq_fifo_expire[2];
291         unsigned int cfq_back_penalty;
292         unsigned int cfq_back_max;
293         unsigned int cfq_slice[2];
294         unsigned int cfq_slice_async_rq;
295         unsigned int cfq_slice_idle;
296         unsigned int cfq_group_idle;
297         unsigned int cfq_latency;
298
299         /*
300          * Fallback dummy cfqq for extreme OOM conditions
301          */
302         struct cfq_queue oom_cfqq;
303
304         unsigned long last_delayed_sync;
305
306         /* List of cfq groups being managed on this device*/
307         struct hlist_head cfqg_list;
308
309         /* Number of groups which are on blkcg->blkg_list */
310         unsigned int nr_blkcg_linked_grps;
311 };
312
313 static struct cfq_group *cfq_get_next_cfqg(struct cfq_data *cfqd);
314
315 static struct cfq_rb_root *service_tree_for(struct cfq_group *cfqg,
316                                             enum wl_prio_t prio,
317                                             enum wl_type_t type)
318 {
319         if (!cfqg)
320                 return NULL;
321
322         if (prio == IDLE_WORKLOAD)
323                 return &cfqg->service_tree_idle;
324
325         return &cfqg->service_trees[prio][type];
326 }
327
328 enum cfqq_state_flags {
329         CFQ_CFQQ_FLAG_on_rr = 0,        /* on round-robin busy list */
330         CFQ_CFQQ_FLAG_wait_request,     /* waiting for a request */
331         CFQ_CFQQ_FLAG_must_dispatch,    /* must be allowed a dispatch */
332         CFQ_CFQQ_FLAG_must_alloc_slice, /* per-slice must_alloc flag */
333         CFQ_CFQQ_FLAG_fifo_expire,      /* FIFO checked in this slice */
334         CFQ_CFQQ_FLAG_idle_window,      /* slice idling enabled */
335         CFQ_CFQQ_FLAG_prio_changed,     /* task priority has changed */
336         CFQ_CFQQ_FLAG_slice_new,        /* no requests dispatched in slice */
337         CFQ_CFQQ_FLAG_sync,             /* synchronous queue */
338         CFQ_CFQQ_FLAG_coop,             /* cfqq is shared */
339         CFQ_CFQQ_FLAG_split_coop,       /* shared cfqq will be splitted */
340         CFQ_CFQQ_FLAG_deep,             /* sync cfqq experienced large depth */
341         CFQ_CFQQ_FLAG_wait_busy,        /* Waiting for next request */
342 };
343
344 #define CFQ_CFQQ_FNS(name)                                              \
345 static inline void cfq_mark_cfqq_##name(struct cfq_queue *cfqq)         \
346 {                                                                       \
347         (cfqq)->flags |= (1 << CFQ_CFQQ_FLAG_##name);                   \
348 }                                                                       \
349 static inline void cfq_clear_cfqq_##name(struct cfq_queue *cfqq)        \
350 {                                                                       \
351         (cfqq)->flags &= ~(1 << CFQ_CFQQ_FLAG_##name);                  \
352 }                                                                       \
353 static inline int cfq_cfqq_##name(const struct cfq_queue *cfqq)         \
354 {                                                                       \
355         return ((cfqq)->flags & (1 << CFQ_CFQQ_FLAG_##name)) != 0;      \
356 }
357
358 CFQ_CFQQ_FNS(on_rr);
359 CFQ_CFQQ_FNS(wait_request);
360 CFQ_CFQQ_FNS(must_dispatch);
361 CFQ_CFQQ_FNS(must_alloc_slice);
362 CFQ_CFQQ_FNS(fifo_expire);
363 CFQ_CFQQ_FNS(idle_window);
364 CFQ_CFQQ_FNS(prio_changed);
365 CFQ_CFQQ_FNS(slice_new);
366 CFQ_CFQQ_FNS(sync);
367 CFQ_CFQQ_FNS(coop);
368 CFQ_CFQQ_FNS(split_coop);
369 CFQ_CFQQ_FNS(deep);
370 CFQ_CFQQ_FNS(wait_busy);
371 #undef CFQ_CFQQ_FNS
372
373 #ifdef CONFIG_CFQ_GROUP_IOSCHED
374 #define cfq_log_cfqq(cfqd, cfqq, fmt, args...)  \
375         blk_add_trace_msg((cfqd)->queue, "cfq%d%c %s " fmt, (cfqq)->pid, \
376                         cfq_cfqq_sync((cfqq)) ? 'S' : 'A', \
377                         blkg_path(&(cfqq)->cfqg->blkg), ##args)
378
379 #define cfq_log_cfqg(cfqd, cfqg, fmt, args...)                          \
380         blk_add_trace_msg((cfqd)->queue, "%s " fmt,                     \
381                                 blkg_path(&(cfqg)->blkg), ##args)       \
382
383 #else
384 #define cfq_log_cfqq(cfqd, cfqq, fmt, args...)  \
385         blk_add_trace_msg((cfqd)->queue, "cfq%d " fmt, (cfqq)->pid, ##args)
386 #define cfq_log_cfqg(cfqd, cfqg, fmt, args...)          do {} while (0)
387 #endif
388 #define cfq_log(cfqd, fmt, args...)     \
389         blk_add_trace_msg((cfqd)->queue, "cfq " fmt, ##args)
390
391 /* Traverses through cfq group service trees */
392 #define for_each_cfqg_st(cfqg, i, j, st) \
393         for (i = 0; i <= IDLE_WORKLOAD; i++) \
394                 for (j = 0, st = i < IDLE_WORKLOAD ? &cfqg->service_trees[i][j]\
395                         : &cfqg->service_tree_idle; \
396                         (i < IDLE_WORKLOAD && j <= SYNC_WORKLOAD) || \
397                         (i == IDLE_WORKLOAD && j == 0); \
398                         j++, st = i < IDLE_WORKLOAD ? \
399                         &cfqg->service_trees[i][j]: NULL) \
400
401 static inline bool cfq_io_thinktime_big(struct cfq_data *cfqd,
402         struct cfq_ttime *ttime, bool group_idle)
403 {
404         unsigned long slice;
405         if (!sample_valid(ttime->ttime_samples))
406                 return false;
407         if (group_idle)
408                 slice = cfqd->cfq_group_idle;
409         else
410                 slice = cfqd->cfq_slice_idle;
411         return ttime->ttime_mean > slice;
412 }
413
414 static inline bool iops_mode(struct cfq_data *cfqd)
415 {
416         /*
417          * If we are not idling on queues and it is a NCQ drive, parallel
418          * execution of requests is on and measuring time is not possible
419          * in most of the cases until and unless we drive shallower queue
420          * depths and that becomes a performance bottleneck. In such cases
421          * switch to start providing fairness in terms of number of IOs.
422          */
423         if (!cfqd->cfq_slice_idle && cfqd->hw_tag)
424                 return true;
425         else
426                 return false;
427 }
428
429 static inline enum wl_prio_t cfqq_prio(struct cfq_queue *cfqq)
430 {
431         if (cfq_class_idle(cfqq))
432                 return IDLE_WORKLOAD;
433         if (cfq_class_rt(cfqq))
434                 return RT_WORKLOAD;
435         return BE_WORKLOAD;
436 }
437
438
439 static enum wl_type_t cfqq_type(struct cfq_queue *cfqq)
440 {
441         if (!cfq_cfqq_sync(cfqq))
442                 return ASYNC_WORKLOAD;
443         if (!cfq_cfqq_idle_window(cfqq))
444                 return SYNC_NOIDLE_WORKLOAD;
445         return SYNC_WORKLOAD;
446 }
447
448 static inline int cfq_group_busy_queues_wl(enum wl_prio_t wl,
449                                         struct cfq_data *cfqd,
450                                         struct cfq_group *cfqg)
451 {
452         if (wl == IDLE_WORKLOAD)
453                 return cfqg->service_tree_idle.count;
454
455         return cfqg->service_trees[wl][ASYNC_WORKLOAD].count
456                 + cfqg->service_trees[wl][SYNC_NOIDLE_WORKLOAD].count
457                 + cfqg->service_trees[wl][SYNC_WORKLOAD].count;
458 }
459
460 static inline int cfqg_busy_async_queues(struct cfq_data *cfqd,
461                                         struct cfq_group *cfqg)
462 {
463         return cfqg->service_trees[RT_WORKLOAD][ASYNC_WORKLOAD].count
464                 + cfqg->service_trees[BE_WORKLOAD][ASYNC_WORKLOAD].count;
465 }
466
467 static void cfq_dispatch_insert(struct request_queue *, struct request *);
468 static struct cfq_queue *cfq_get_queue(struct cfq_data *, bool,
469                                        struct io_context *, gfp_t);
470
471 static inline struct cfq_io_cq *icq_to_cic(struct io_cq *icq)
472 {
473         /* cic->icq is the first member, %NULL will convert to %NULL */
474         return container_of(icq, struct cfq_io_cq, icq);
475 }
476
477 static inline struct cfq_io_cq *cfq_cic_lookup(struct cfq_data *cfqd,
478                                                struct io_context *ioc)
479 {
480         if (ioc)
481                 return icq_to_cic(ioc_lookup_icq(ioc, cfqd->queue));
482         return NULL;
483 }
484
485 static inline struct cfq_queue *cic_to_cfqq(struct cfq_io_cq *cic, bool is_sync)
486 {
487         return cic->cfqq[is_sync];
488 }
489
490 static inline void cic_set_cfqq(struct cfq_io_cq *cic, struct cfq_queue *cfqq,
491                                 bool is_sync)
492 {
493         cic->cfqq[is_sync] = cfqq;
494 }
495
496 static inline struct cfq_data *cic_to_cfqd(struct cfq_io_cq *cic)
497 {
498         return cic->icq.q->elevator->elevator_data;
499 }
500
501 /*
502  * We regard a request as SYNC, if it's either a read or has the SYNC bit
503  * set (in which case it could also be direct WRITE).
504  */
505 static inline bool cfq_bio_sync(struct bio *bio)
506 {
507         return bio_data_dir(bio) == READ || (bio->bi_rw & REQ_SYNC);
508 }
509
510 /*
511  * scheduler run of queue, if there are requests pending and no one in the
512  * driver that will restart queueing
513  */
514 static inline void cfq_schedule_dispatch(struct cfq_data *cfqd)
515 {
516         if (cfqd->busy_queues) {
517                 cfq_log(cfqd, "schedule dispatch");
518                 kblockd_schedule_work(cfqd->queue, &cfqd->unplug_work);
519         }
520 }
521
522 /*
523  * Scale schedule slice based on io priority. Use the sync time slice only
524  * if a queue is marked sync and has sync io queued. A sync queue with async
525  * io only, should not get full sync slice length.
526  */
527 static inline int cfq_prio_slice(struct cfq_data *cfqd, bool sync,
528                                  unsigned short prio)
529 {
530         const int base_slice = cfqd->cfq_slice[sync];
531
532         WARN_ON(prio >= IOPRIO_BE_NR);
533
534         return base_slice + (base_slice/CFQ_SLICE_SCALE * (4 - prio));
535 }
536
537 static inline int
538 cfq_prio_to_slice(struct cfq_data *cfqd, struct cfq_queue *cfqq)
539 {
540         return cfq_prio_slice(cfqd, cfq_cfqq_sync(cfqq), cfqq->ioprio);
541 }
542
543 static inline u64 cfq_scale_slice(unsigned long delta, struct cfq_group *cfqg)
544 {
545         u64 d = delta << CFQ_SERVICE_SHIFT;
546
547         d = d * BLKIO_WEIGHT_DEFAULT;
548         do_div(d, cfqg->weight);
549         return d;
550 }
551
552 static inline u64 max_vdisktime(u64 min_vdisktime, u64 vdisktime)
553 {
554         s64 delta = (s64)(vdisktime - min_vdisktime);
555         if (delta > 0)
556                 min_vdisktime = vdisktime;
557
558         return min_vdisktime;
559 }
560
561 static inline u64 min_vdisktime(u64 min_vdisktime, u64 vdisktime)
562 {
563         s64 delta = (s64)(vdisktime - min_vdisktime);
564         if (delta < 0)
565                 min_vdisktime = vdisktime;
566
567         return min_vdisktime;
568 }
569
570 static void update_min_vdisktime(struct cfq_rb_root *st)
571 {
572         struct cfq_group *cfqg;
573
574         if (st->left) {
575                 cfqg = rb_entry_cfqg(st->left);
576                 st->min_vdisktime = max_vdisktime(st->min_vdisktime,
577                                                   cfqg->vdisktime);
578         }
579 }
580
581 /*
582  * get averaged number of queues of RT/BE priority.
583  * average is updated, with a formula that gives more weight to higher numbers,
584  * to quickly follows sudden increases and decrease slowly
585  */
586
587 static inline unsigned cfq_group_get_avg_queues(struct cfq_data *cfqd,
588                                         struct cfq_group *cfqg, bool rt)
589 {
590         unsigned min_q, max_q;
591         unsigned mult  = cfq_hist_divisor - 1;
592         unsigned round = cfq_hist_divisor / 2;
593         unsigned busy = cfq_group_busy_queues_wl(rt, cfqd, cfqg);
594
595         min_q = min(cfqg->busy_queues_avg[rt], busy);
596         max_q = max(cfqg->busy_queues_avg[rt], busy);
597         cfqg->busy_queues_avg[rt] = (mult * max_q + min_q + round) /
598                 cfq_hist_divisor;
599         return cfqg->busy_queues_avg[rt];
600 }
601
602 static inline unsigned
603 cfq_group_slice(struct cfq_data *cfqd, struct cfq_group *cfqg)
604 {
605         struct cfq_rb_root *st = &cfqd->grp_service_tree;
606
607         return cfq_target_latency * cfqg->weight / st->total_weight;
608 }
609
610 static inline unsigned
611 cfq_scaled_cfqq_slice(struct cfq_data *cfqd, struct cfq_queue *cfqq)
612 {
613         unsigned slice = cfq_prio_to_slice(cfqd, cfqq);
614         if (cfqd->cfq_latency) {
615                 /*
616                  * interested queues (we consider only the ones with the same
617                  * priority class in the cfq group)
618                  */
619                 unsigned iq = cfq_group_get_avg_queues(cfqd, cfqq->cfqg,
620                                                 cfq_class_rt(cfqq));
621                 unsigned sync_slice = cfqd->cfq_slice[1];
622                 unsigned expect_latency = sync_slice * iq;
623                 unsigned group_slice = cfq_group_slice(cfqd, cfqq->cfqg);
624
625                 if (expect_latency > group_slice) {
626                         unsigned base_low_slice = 2 * cfqd->cfq_slice_idle;
627                         /* scale low_slice according to IO priority
628                          * and sync vs async */
629                         unsigned low_slice =
630                                 min(slice, base_low_slice * slice / sync_slice);
631                         /* the adapted slice value is scaled to fit all iqs
632                          * into the target latency */
633                         slice = max(slice * group_slice / expect_latency,
634                                     low_slice);
635                 }
636         }
637         return slice;
638 }
639
640 static inline void
641 cfq_set_prio_slice(struct cfq_data *cfqd, struct cfq_queue *cfqq)
642 {
643         unsigned slice = cfq_scaled_cfqq_slice(cfqd, cfqq);
644
645         cfqq->slice_start = jiffies;
646         cfqq->slice_end = jiffies + slice;
647         cfqq->allocated_slice = slice;
648         cfq_log_cfqq(cfqd, cfqq, "set_slice=%lu", cfqq->slice_end - jiffies);
649 }
650
651 /*
652  * We need to wrap this check in cfq_cfqq_slice_new(), since ->slice_end
653  * isn't valid until the first request from the dispatch is activated
654  * and the slice time set.
655  */
656 static inline bool cfq_slice_used(struct cfq_queue *cfqq)
657 {
658         if (cfq_cfqq_slice_new(cfqq))
659                 return false;
660         if (time_before(jiffies, cfqq->slice_end))
661                 return false;
662
663         return true;
664 }
665
666 /*
667  * Lifted from AS - choose which of rq1 and rq2 that is best served now.
668  * We choose the request that is closest to the head right now. Distance
669  * behind the head is penalized and only allowed to a certain extent.
670  */
671 static struct request *
672 cfq_choose_req(struct cfq_data *cfqd, struct request *rq1, struct request *rq2, sector_t last)
673 {
674         sector_t s1, s2, d1 = 0, d2 = 0;
675         unsigned long back_max;
676 #define CFQ_RQ1_WRAP    0x01 /* request 1 wraps */
677 #define CFQ_RQ2_WRAP    0x02 /* request 2 wraps */
678         unsigned wrap = 0; /* bit mask: requests behind the disk head? */
679
680         if (rq1 == NULL || rq1 == rq2)
681                 return rq2;
682         if (rq2 == NULL)
683                 return rq1;
684
685         if (rq_is_sync(rq1) != rq_is_sync(rq2))
686                 return rq_is_sync(rq1) ? rq1 : rq2;
687
688         if ((rq1->cmd_flags ^ rq2->cmd_flags) & REQ_PRIO)
689                 return rq1->cmd_flags & REQ_PRIO ? rq1 : rq2;
690
691         s1 = blk_rq_pos(rq1);
692         s2 = blk_rq_pos(rq2);
693
694         /*
695          * by definition, 1KiB is 2 sectors
696          */
697         back_max = cfqd->cfq_back_max * 2;
698
699         /*
700          * Strict one way elevator _except_ in the case where we allow
701          * short backward seeks which are biased as twice the cost of a
702          * similar forward seek.
703          */
704         if (s1 >= last)
705                 d1 = s1 - last;
706         else if (s1 + back_max >= last)
707                 d1 = (last - s1) * cfqd->cfq_back_penalty;
708         else
709                 wrap |= CFQ_RQ1_WRAP;
710
711         if (s2 >= last)
712                 d2 = s2 - last;
713         else if (s2 + back_max >= last)
714                 d2 = (last - s2) * cfqd->cfq_back_penalty;
715         else
716                 wrap |= CFQ_RQ2_WRAP;
717
718         /* Found required data */
719
720         /*
721          * By doing switch() on the bit mask "wrap" we avoid having to
722          * check two variables for all permutations: --> faster!
723          */
724         switch (wrap) {
725         case 0: /* common case for CFQ: rq1 and rq2 not wrapped */
726                 if (d1 < d2)
727                         return rq1;
728                 else if (d2 < d1)
729                         return rq2;
730                 else {
731                         if (s1 >= s2)
732                                 return rq1;
733                         else
734                                 return rq2;
735                 }
736
737         case CFQ_RQ2_WRAP:
738                 return rq1;
739         case CFQ_RQ1_WRAP:
740                 return rq2;
741         case (CFQ_RQ1_WRAP|CFQ_RQ2_WRAP): /* both rqs wrapped */
742         default:
743                 /*
744                  * Since both rqs are wrapped,
745                  * start with the one that's further behind head
746                  * (--> only *one* back seek required),
747                  * since back seek takes more time than forward.
748                  */
749                 if (s1 <= s2)
750                         return rq1;
751                 else
752                         return rq2;
753         }
754 }
755
756 /*
757  * The below is leftmost cache rbtree addon
758  */
759 static struct cfq_queue *cfq_rb_first(struct cfq_rb_root *root)
760 {
761         /* Service tree is empty */
762         if (!root->count)
763                 return NULL;
764
765         if (!root->left)
766                 root->left = rb_first(&root->rb);
767
768         if (root->left)
769                 return rb_entry(root->left, struct cfq_queue, rb_node);
770
771         return NULL;
772 }
773
774 static struct cfq_group *cfq_rb_first_group(struct cfq_rb_root *root)
775 {
776         if (!root->left)
777                 root->left = rb_first(&root->rb);
778
779         if (root->left)
780                 return rb_entry_cfqg(root->left);
781
782         return NULL;
783 }
784
785 static void rb_erase_init(struct rb_node *n, struct rb_root *root)
786 {
787         rb_erase(n, root);
788         RB_CLEAR_NODE(n);
789 }
790
791 static void cfq_rb_erase(struct rb_node *n, struct cfq_rb_root *root)
792 {
793         if (root->left == n)
794                 root->left = NULL;
795         rb_erase_init(n, &root->rb);
796         --root->count;
797 }
798
799 /*
800  * would be nice to take fifo expire time into account as well
801  */
802 static struct request *
803 cfq_find_next_rq(struct cfq_data *cfqd, struct cfq_queue *cfqq,
804                   struct request *last)
805 {
806         struct rb_node *rbnext = rb_next(&last->rb_node);
807         struct rb_node *rbprev = rb_prev(&last->rb_node);
808         struct request *next = NULL, *prev = NULL;
809
810         BUG_ON(RB_EMPTY_NODE(&last->rb_node));
811
812         if (rbprev)
813                 prev = rb_entry_rq(rbprev);
814
815         if (rbnext)
816                 next = rb_entry_rq(rbnext);
817         else {
818                 rbnext = rb_first(&cfqq->sort_list);
819                 if (rbnext && rbnext != &last->rb_node)
820                         next = rb_entry_rq(rbnext);
821         }
822
823         return cfq_choose_req(cfqd, next, prev, blk_rq_pos(last));
824 }
825
826 static unsigned long cfq_slice_offset(struct cfq_data *cfqd,
827                                       struct cfq_queue *cfqq)
828 {
829         /*
830          * just an approximation, should be ok.
831          */
832         return (cfqq->cfqg->nr_cfqq - 1) * (cfq_prio_slice(cfqd, 1, 0) -
833                        cfq_prio_slice(cfqd, cfq_cfqq_sync(cfqq), cfqq->ioprio));
834 }
835
836 static inline s64
837 cfqg_key(struct cfq_rb_root *st, struct cfq_group *cfqg)
838 {
839         return cfqg->vdisktime - st->min_vdisktime;
840 }
841
842 static void
843 __cfq_group_service_tree_add(struct cfq_rb_root *st, struct cfq_group *cfqg)
844 {
845         struct rb_node **node = &st->rb.rb_node;
846         struct rb_node *parent = NULL;
847         struct cfq_group *__cfqg;
848         s64 key = cfqg_key(st, cfqg);
849         int left = 1;
850
851         while (*node != NULL) {
852                 parent = *node;
853                 __cfqg = rb_entry_cfqg(parent);
854
855                 if (key < cfqg_key(st, __cfqg))
856                         node = &parent->rb_left;
857                 else {
858                         node = &parent->rb_right;
859                         left = 0;
860                 }
861         }
862
863         if (left)
864                 st->left = &cfqg->rb_node;
865
866         rb_link_node(&cfqg->rb_node, parent, node);
867         rb_insert_color(&cfqg->rb_node, &st->rb);
868 }
869
870 static void
871 cfq_update_group_weight(struct cfq_group *cfqg)
872 {
873         BUG_ON(!RB_EMPTY_NODE(&cfqg->rb_node));
874         if (cfqg->needs_update) {
875                 cfqg->weight = cfqg->new_weight;
876                 cfqg->needs_update = false;
877         }
878 }
879
880 static void
881 cfq_group_service_tree_add(struct cfq_rb_root *st, struct cfq_group *cfqg)
882 {
883         BUG_ON(!RB_EMPTY_NODE(&cfqg->rb_node));
884
885         cfq_update_group_weight(cfqg);
886         __cfq_group_service_tree_add(st, cfqg);
887         st->total_weight += cfqg->weight;
888 }
889
890 static void
891 cfq_group_notify_queue_add(struct cfq_data *cfqd, struct cfq_group *cfqg)
892 {
893         struct cfq_rb_root *st = &cfqd->grp_service_tree;
894         struct cfq_group *__cfqg;
895         struct rb_node *n;
896
897         cfqg->nr_cfqq++;
898         if (!RB_EMPTY_NODE(&cfqg->rb_node))
899                 return;
900
901         /*
902          * Currently put the group at the end. Later implement something
903          * so that groups get lesser vtime based on their weights, so that
904          * if group does not loose all if it was not continuously backlogged.
905          */
906         n = rb_last(&st->rb);
907         if (n) {
908                 __cfqg = rb_entry_cfqg(n);
909                 cfqg->vdisktime = __cfqg->vdisktime + CFQ_IDLE_DELAY;
910         } else
911                 cfqg->vdisktime = st->min_vdisktime;
912         cfq_group_service_tree_add(st, cfqg);
913 }
914
915 static void
916 cfq_group_service_tree_del(struct cfq_rb_root *st, struct cfq_group *cfqg)
917 {
918         st->total_weight -= cfqg->weight;
919         if (!RB_EMPTY_NODE(&cfqg->rb_node))
920                 cfq_rb_erase(&cfqg->rb_node, st);
921 }
922
923 static void
924 cfq_group_notify_queue_del(struct cfq_data *cfqd, struct cfq_group *cfqg)
925 {
926         struct cfq_rb_root *st = &cfqd->grp_service_tree;
927
928         BUG_ON(cfqg->nr_cfqq < 1);
929         cfqg->nr_cfqq--;
930
931         /* If there are other cfq queues under this group, don't delete it */
932         if (cfqg->nr_cfqq)
933                 return;
934
935         cfq_log_cfqg(cfqd, cfqg, "del_from_rr group");
936         cfq_group_service_tree_del(st, cfqg);
937         cfqg->saved_workload_slice = 0;
938         cfq_blkiocg_update_dequeue_stats(&cfqg->blkg, 1);
939 }
940
941 static inline unsigned int cfq_cfqq_slice_usage(struct cfq_queue *cfqq,
942                                                 unsigned int *unaccounted_time)
943 {
944         unsigned int slice_used;
945
946         /*
947          * Queue got expired before even a single request completed or
948          * got expired immediately after first request completion.
949          */
950         if (!cfqq->slice_start || cfqq->slice_start == jiffies) {
951                 /*
952                  * Also charge the seek time incurred to the group, otherwise
953                  * if there are mutiple queues in the group, each can dispatch
954                  * a single request on seeky media and cause lots of seek time
955                  * and group will never know it.
956                  */
957                 slice_used = max_t(unsigned, (jiffies - cfqq->dispatch_start),
958                                         1);
959         } else {
960                 slice_used = jiffies - cfqq->slice_start;
961                 if (slice_used > cfqq->allocated_slice) {
962                         *unaccounted_time = slice_used - cfqq->allocated_slice;
963                         slice_used = cfqq->allocated_slice;
964                 }
965                 if (time_after(cfqq->slice_start, cfqq->dispatch_start))
966                         *unaccounted_time += cfqq->slice_start -
967                                         cfqq->dispatch_start;
968         }
969
970         return slice_used;
971 }
972
973 static void cfq_group_served(struct cfq_data *cfqd, struct cfq_group *cfqg,
974                                 struct cfq_queue *cfqq)
975 {
976         struct cfq_rb_root *st = &cfqd->grp_service_tree;
977         unsigned int used_sl, charge, unaccounted_sl = 0;
978         int nr_sync = cfqg->nr_cfqq - cfqg_busy_async_queues(cfqd, cfqg)
979                         - cfqg->service_tree_idle.count;
980
981         BUG_ON(nr_sync < 0);
982         used_sl = charge = cfq_cfqq_slice_usage(cfqq, &unaccounted_sl);
983
984         if (iops_mode(cfqd))
985                 charge = cfqq->slice_dispatch;
986         else if (!cfq_cfqq_sync(cfqq) && !nr_sync)
987                 charge = cfqq->allocated_slice;
988
989         /* Can't update vdisktime while group is on service tree */
990         cfq_group_service_tree_del(st, cfqg);
991         cfqg->vdisktime += cfq_scale_slice(charge, cfqg);
992         /* If a new weight was requested, update now, off tree */
993         cfq_group_service_tree_add(st, cfqg);
994
995         /* This group is being expired. Save the context */
996         if (time_after(cfqd->workload_expires, jiffies)) {
997                 cfqg->saved_workload_slice = cfqd->workload_expires
998                                                 - jiffies;
999                 cfqg->saved_workload = cfqd->serving_type;
1000                 cfqg->saved_serving_prio = cfqd->serving_prio;
1001         } else
1002                 cfqg->saved_workload_slice = 0;
1003
1004         cfq_log_cfqg(cfqd, cfqg, "served: vt=%llu min_vt=%llu", cfqg->vdisktime,
1005                                         st->min_vdisktime);
1006         cfq_log_cfqq(cfqq->cfqd, cfqq,
1007                      "sl_used=%u disp=%u charge=%u iops=%u sect=%lu",
1008                      used_sl, cfqq->slice_dispatch, charge,
1009                      iops_mode(cfqd), cfqq->nr_sectors);
1010         cfq_blkiocg_update_timeslice_used(&cfqg->blkg, used_sl,
1011                                           unaccounted_sl);
1012         cfq_blkiocg_set_start_empty_time(&cfqg->blkg);
1013 }
1014
1015 /**
1016  * cfq_init_cfqg_base - initialize base part of a cfq_group
1017  * @cfqg: cfq_group to initialize
1018  *
1019  * Initialize the base part which is used whether %CONFIG_CFQ_GROUP_IOSCHED
1020  * is enabled or not.
1021  */
1022 static void cfq_init_cfqg_base(struct cfq_group *cfqg)
1023 {
1024         struct cfq_rb_root *st;
1025         int i, j;
1026
1027         for_each_cfqg_st(cfqg, i, j, st)
1028                 *st = CFQ_RB_ROOT;
1029         RB_CLEAR_NODE(&cfqg->rb_node);
1030
1031         cfqg->ttime.last_end_request = jiffies;
1032 }
1033
1034 #ifdef CONFIG_CFQ_GROUP_IOSCHED
1035 static inline struct cfq_group *cfqg_of_blkg(struct blkio_group *blkg)
1036 {
1037         if (blkg)
1038                 return container_of(blkg, struct cfq_group, blkg);
1039         return NULL;
1040 }
1041
1042 static void cfq_update_blkio_group_weight(struct request_queue *q,
1043                                           struct blkio_group *blkg,
1044                                           unsigned int weight)
1045 {
1046         struct cfq_group *cfqg = cfqg_of_blkg(blkg);
1047         cfqg->new_weight = weight;
1048         cfqg->needs_update = true;
1049 }
1050
1051 static void cfq_link_blkio_group(struct request_queue *q,
1052                                  struct blkio_group *blkg)
1053 {
1054         struct cfq_data *cfqd = q->elevator->elevator_data;
1055         struct backing_dev_info *bdi = &q->backing_dev_info;
1056         struct cfq_group *cfqg = cfqg_of_blkg(blkg);
1057         unsigned int major, minor;
1058
1059         /*
1060          * Add group onto cgroup list. It might happen that bdi->dev is
1061          * not initialized yet. Initialize this new group without major
1062          * and minor info and this info will be filled in once a new thread
1063          * comes for IO.
1064          */
1065         if (bdi->dev) {
1066                 sscanf(dev_name(bdi->dev), "%u:%u", &major, &minor);
1067                 blkg->dev = MKDEV(major, minor);
1068         }
1069
1070         cfqd->nr_blkcg_linked_grps++;
1071
1072         /* Add group on cfqd list */
1073         hlist_add_head(&cfqg->cfqd_node, &cfqd->cfqg_list);
1074 }
1075
1076 static struct blkio_group *cfq_alloc_blkio_group(struct request_queue *q,
1077                                                  struct blkio_cgroup *blkcg)
1078 {
1079         struct cfq_group *cfqg;
1080
1081         cfqg = kzalloc_node(sizeof(*cfqg), GFP_ATOMIC, q->node);
1082         if (!cfqg)
1083                 return NULL;
1084
1085         cfq_init_cfqg_base(cfqg);
1086         cfqg->weight = blkcg->weight;
1087
1088         /*
1089          * Take the initial reference that will be released on destroy
1090          * This can be thought of a joint reference by cgroup and
1091          * elevator which will be dropped by either elevator exit
1092          * or cgroup deletion path depending on who is exiting first.
1093          */
1094         cfqg->ref = 1;
1095
1096         return &cfqg->blkg;
1097 }
1098
1099 /*
1100  * Search for the cfq group current task belongs to. request_queue lock must
1101  * be held.
1102  */
1103 static struct cfq_group *cfq_lookup_create_cfqg(struct cfq_data *cfqd,
1104                                                 struct blkio_cgroup *blkcg)
1105 {
1106         struct request_queue *q = cfqd->queue;
1107         struct backing_dev_info *bdi = &q->backing_dev_info;
1108         struct cfq_group *cfqg = NULL;
1109
1110         /* avoid lookup for the common case where there's no blkio cgroup */
1111         if (blkcg == &blkio_root_cgroup) {
1112                 cfqg = cfqd->root_group;
1113         } else {
1114                 struct blkio_group *blkg;
1115
1116                 blkg = blkg_lookup_create(blkcg, q, BLKIO_POLICY_PROP, false);
1117                 if (!IS_ERR(blkg))
1118                         cfqg = cfqg_of_blkg(blkg);
1119         }
1120
1121         if (cfqg && !cfqg->blkg.dev && bdi->dev && dev_name(bdi->dev)) {
1122                 unsigned int major, minor;
1123
1124                 sscanf(dev_name(bdi->dev), "%u:%u", &major, &minor);
1125                 cfqg->blkg.dev = MKDEV(major, minor);
1126         }
1127
1128         return cfqg;
1129 }
1130
1131 static inline struct cfq_group *cfq_ref_get_cfqg(struct cfq_group *cfqg)
1132 {
1133         cfqg->ref++;
1134         return cfqg;
1135 }
1136
1137 static void cfq_link_cfqq_cfqg(struct cfq_queue *cfqq, struct cfq_group *cfqg)
1138 {
1139         /* Currently, all async queues are mapped to root group */
1140         if (!cfq_cfqq_sync(cfqq))
1141                 cfqg = cfqq->cfqd->root_group;
1142
1143         cfqq->cfqg = cfqg;
1144         /* cfqq reference on cfqg */
1145         cfqq->cfqg->ref++;
1146 }
1147
1148 static void cfq_put_cfqg(struct cfq_group *cfqg)
1149 {
1150         struct cfq_rb_root *st;
1151         int i, j;
1152
1153         BUG_ON(cfqg->ref <= 0);
1154         cfqg->ref--;
1155         if (cfqg->ref)
1156                 return;
1157         for_each_cfqg_st(cfqg, i, j, st)
1158                 BUG_ON(!RB_EMPTY_ROOT(&st->rb));
1159         free_percpu(cfqg->blkg.stats_cpu);
1160         kfree(cfqg);
1161 }
1162
1163 static void cfq_destroy_cfqg(struct cfq_data *cfqd, struct cfq_group *cfqg)
1164 {
1165         /* Something wrong if we are trying to remove same group twice */
1166         BUG_ON(hlist_unhashed(&cfqg->cfqd_node));
1167
1168         hlist_del_init(&cfqg->cfqd_node);
1169
1170         BUG_ON(cfqd->nr_blkcg_linked_grps <= 0);
1171         cfqd->nr_blkcg_linked_grps--;
1172
1173         /*
1174          * Put the reference taken at the time of creation so that when all
1175          * queues are gone, group can be destroyed.
1176          */
1177         cfq_put_cfqg(cfqg);
1178 }
1179
1180 static bool cfq_release_cfq_groups(struct cfq_data *cfqd)
1181 {
1182         struct hlist_node *pos, *n;
1183         struct cfq_group *cfqg;
1184         bool empty = true;
1185
1186         hlist_for_each_entry_safe(cfqg, pos, n, &cfqd->cfqg_list, cfqd_node) {
1187                 /*
1188                  * If cgroup removal path got to blk_group first and removed
1189                  * it from cgroup list, then it will take care of destroying
1190                  * cfqg also.
1191                  */
1192                 if (!cfq_blkiocg_del_blkio_group(&cfqg->blkg))
1193                         cfq_destroy_cfqg(cfqd, cfqg);
1194                 else
1195                         empty = false;
1196         }
1197         return empty;
1198 }
1199
1200 /*
1201  * Blk cgroup controller notification saying that blkio_group object is being
1202  * delinked as associated cgroup object is going away. That also means that
1203  * no new IO will come in this group. So get rid of this group as soon as
1204  * any pending IO in the group is finished.
1205  *
1206  * This function is called under rcu_read_lock(). key is the rcu protected
1207  * pointer. That means @q is a valid request_queue pointer as long as we
1208  * are rcu read lock.
1209  *
1210  * @q was fetched from blkio_group under blkio_cgroup->lock. That means
1211  * it should not be NULL as even if elevator was exiting, cgroup deltion
1212  * path got to it first.
1213  */
1214 static void cfq_unlink_blkio_group(struct request_queue *q,
1215                                    struct blkio_group *blkg)
1216 {
1217         struct cfq_data *cfqd = q->elevator->elevator_data;
1218         unsigned long flags;
1219
1220         spin_lock_irqsave(q->queue_lock, flags);
1221         cfq_destroy_cfqg(cfqd, cfqg_of_blkg(blkg));
1222         spin_unlock_irqrestore(q->queue_lock, flags);
1223 }
1224
1225 static struct elevator_type iosched_cfq;
1226
1227 static bool cfq_clear_queue(struct request_queue *q)
1228 {
1229         lockdep_assert_held(q->queue_lock);
1230
1231         /* shoot down blkgs iff the current elevator is cfq */
1232         if (!q->elevator || q->elevator->type != &iosched_cfq)
1233                 return true;
1234
1235         return cfq_release_cfq_groups(q->elevator->elevator_data);
1236 }
1237
1238 #else /* GROUP_IOSCHED */
1239 static struct cfq_group *cfq_lookup_create_cfqg(struct cfq_data *cfqd,
1240                                                 struct blkio_cgroup *blkcg)
1241 {
1242         return cfqd->root_group;
1243 }
1244
1245 static inline struct cfq_group *cfq_ref_get_cfqg(struct cfq_group *cfqg)
1246 {
1247         return cfqg;
1248 }
1249
1250 static inline void
1251 cfq_link_cfqq_cfqg(struct cfq_queue *cfqq, struct cfq_group *cfqg) {
1252         cfqq->cfqg = cfqg;
1253 }
1254
1255 static void cfq_release_cfq_groups(struct cfq_data *cfqd) {}
1256 static inline void cfq_put_cfqg(struct cfq_group *cfqg) {}
1257
1258 #endif /* GROUP_IOSCHED */
1259
1260 /*
1261  * The cfqd->service_trees holds all pending cfq_queue's that have
1262  * requests waiting to be processed. It is sorted in the order that
1263  * we will service the queues.
1264  */
1265 static void cfq_service_tree_add(struct cfq_data *cfqd, struct cfq_queue *cfqq,
1266                                  bool add_front)
1267 {
1268         struct rb_node **p, *parent;
1269         struct cfq_queue *__cfqq;
1270         unsigned long rb_key;
1271         struct cfq_rb_root *service_tree;
1272         int left;
1273         int new_cfqq = 1;
1274
1275         service_tree = service_tree_for(cfqq->cfqg, cfqq_prio(cfqq),
1276                                                 cfqq_type(cfqq));
1277         if (cfq_class_idle(cfqq)) {
1278                 rb_key = CFQ_IDLE_DELAY;
1279                 parent = rb_last(&service_tree->rb);
1280                 if (parent && parent != &cfqq->rb_node) {
1281                         __cfqq = rb_entry(parent, struct cfq_queue, rb_node);
1282                         rb_key += __cfqq->rb_key;
1283                 } else
1284                         rb_key += jiffies;
1285         } else if (!add_front) {
1286                 /*
1287                  * Get our rb key offset. Subtract any residual slice
1288                  * value carried from last service. A negative resid
1289                  * count indicates slice overrun, and this should position
1290                  * the next service time further away in the tree.
1291                  */
1292                 rb_key = cfq_slice_offset(cfqd, cfqq) + jiffies;
1293                 rb_key -= cfqq->slice_resid;
1294                 cfqq->slice_resid = 0;
1295         } else {
1296                 rb_key = -HZ;
1297                 __cfqq = cfq_rb_first(service_tree);
1298                 rb_key += __cfqq ? __cfqq->rb_key : jiffies;
1299         }
1300
1301         if (!RB_EMPTY_NODE(&cfqq->rb_node)) {
1302                 new_cfqq = 0;
1303                 /*
1304                  * same position, nothing more to do
1305                  */
1306                 if (rb_key == cfqq->rb_key &&
1307                     cfqq->service_tree == service_tree)
1308                         return;
1309
1310                 cfq_rb_erase(&cfqq->rb_node, cfqq->service_tree);
1311                 cfqq->service_tree = NULL;
1312         }
1313
1314         left = 1;
1315         parent = NULL;
1316         cfqq->service_tree = service_tree;
1317         p = &service_tree->rb.rb_node;
1318         while (*p) {
1319                 struct rb_node **n;
1320
1321                 parent = *p;
1322                 __cfqq = rb_entry(parent, struct cfq_queue, rb_node);
1323
1324                 /*
1325                  * sort by key, that represents service time.
1326                  */
1327                 if (time_before(rb_key, __cfqq->rb_key))
1328                         n = &(*p)->rb_left;
1329                 else {
1330                         n = &(*p)->rb_right;
1331                         left = 0;
1332                 }
1333
1334                 p = n;
1335         }
1336
1337         if (left)
1338                 service_tree->left = &cfqq->rb_node;
1339
1340         cfqq->rb_key = rb_key;
1341         rb_link_node(&cfqq->rb_node, parent, p);
1342         rb_insert_color(&cfqq->rb_node, &service_tree->rb);
1343         service_tree->count++;
1344         if (add_front || !new_cfqq)
1345                 return;
1346         cfq_group_notify_queue_add(cfqd, cfqq->cfqg);
1347 }
1348
1349 static struct cfq_queue *
1350 cfq_prio_tree_lookup(struct cfq_data *cfqd, struct rb_root *root,
1351                      sector_t sector, struct rb_node **ret_parent,
1352                      struct rb_node ***rb_link)
1353 {
1354         struct rb_node **p, *parent;
1355         struct cfq_queue *cfqq = NULL;
1356
1357         parent = NULL;
1358         p = &root->rb_node;
1359         while (*p) {
1360                 struct rb_node **n;
1361
1362                 parent = *p;
1363                 cfqq = rb_entry(parent, struct cfq_queue, p_node);
1364
1365                 /*
1366                  * Sort strictly based on sector.  Smallest to the left,
1367                  * largest to the right.
1368                  */
1369                 if (sector > blk_rq_pos(cfqq->next_rq))
1370                         n = &(*p)->rb_right;
1371                 else if (sector < blk_rq_pos(cfqq->next_rq))
1372                         n = &(*p)->rb_left;
1373                 else
1374                         break;
1375                 p = n;
1376                 cfqq = NULL;
1377         }
1378
1379         *ret_parent = parent;
1380         if (rb_link)
1381                 *rb_link = p;
1382         return cfqq;
1383 }
1384
1385 static void cfq_prio_tree_add(struct cfq_data *cfqd, struct cfq_queue *cfqq)
1386 {
1387         struct rb_node **p, *parent;
1388         struct cfq_queue *__cfqq;
1389
1390         if (cfqq->p_root) {
1391                 rb_erase(&cfqq->p_node, cfqq->p_root);
1392                 cfqq->p_root = NULL;
1393         }
1394
1395         if (cfq_class_idle(cfqq))
1396                 return;
1397         if (!cfqq->next_rq)
1398                 return;
1399
1400         cfqq->p_root = &cfqd->prio_trees[cfqq->org_ioprio];
1401         __cfqq = cfq_prio_tree_lookup(cfqd, cfqq->p_root,
1402                                       blk_rq_pos(cfqq->next_rq), &parent, &p);
1403         if (!__cfqq) {
1404                 rb_link_node(&cfqq->p_node, parent, p);
1405                 rb_insert_color(&cfqq->p_node, cfqq->p_root);
1406         } else
1407                 cfqq->p_root = NULL;
1408 }
1409
1410 /*
1411  * Update cfqq's position in the service tree.
1412  */
1413 static void cfq_resort_rr_list(struct cfq_data *cfqd, struct cfq_queue *cfqq)
1414 {
1415         /*
1416          * Resorting requires the cfqq to be on the RR list already.
1417          */
1418         if (cfq_cfqq_on_rr(cfqq)) {
1419                 cfq_service_tree_add(cfqd, cfqq, 0);
1420                 cfq_prio_tree_add(cfqd, cfqq);
1421         }
1422 }
1423
1424 /*
1425  * add to busy list of queues for service, trying to be fair in ordering
1426  * the pending list according to last request service
1427  */
1428 static void cfq_add_cfqq_rr(struct cfq_data *cfqd, struct cfq_queue *cfqq)
1429 {
1430         cfq_log_cfqq(cfqd, cfqq, "add_to_rr");
1431         BUG_ON(cfq_cfqq_on_rr(cfqq));
1432         cfq_mark_cfqq_on_rr(cfqq);
1433         cfqd->busy_queues++;
1434         if (cfq_cfqq_sync(cfqq))
1435                 cfqd->busy_sync_queues++;
1436
1437         cfq_resort_rr_list(cfqd, cfqq);
1438 }
1439
1440 /*
1441  * Called when the cfqq no longer has requests pending, remove it from
1442  * the service tree.
1443  */
1444 static void cfq_del_cfqq_rr(struct cfq_data *cfqd, struct cfq_queue *cfqq)
1445 {
1446         cfq_log_cfqq(cfqd, cfqq, "del_from_rr");
1447         BUG_ON(!cfq_cfqq_on_rr(cfqq));
1448         cfq_clear_cfqq_on_rr(cfqq);
1449
1450         if (!RB_EMPTY_NODE(&cfqq->rb_node)) {
1451                 cfq_rb_erase(&cfqq->rb_node, cfqq->service_tree);
1452                 cfqq->service_tree = NULL;
1453         }
1454         if (cfqq->p_root) {
1455                 rb_erase(&cfqq->p_node, cfqq->p_root);
1456                 cfqq->p_root = NULL;
1457         }
1458
1459         cfq_group_notify_queue_del(cfqd, cfqq->cfqg);
1460         BUG_ON(!cfqd->busy_queues);
1461         cfqd->busy_queues--;
1462         if (cfq_cfqq_sync(cfqq))
1463                 cfqd->busy_sync_queues--;
1464 }
1465
1466 /*
1467  * rb tree support functions
1468  */
1469 static void cfq_del_rq_rb(struct request *rq)
1470 {
1471         struct cfq_queue *cfqq = RQ_CFQQ(rq);
1472         const int sync = rq_is_sync(rq);
1473
1474         BUG_ON(!cfqq->queued[sync]);
1475         cfqq->queued[sync]--;
1476
1477         elv_rb_del(&cfqq->sort_list, rq);
1478
1479         if (cfq_cfqq_on_rr(cfqq) && RB_EMPTY_ROOT(&cfqq->sort_list)) {
1480                 /*
1481                  * Queue will be deleted from service tree when we actually
1482                  * expire it later. Right now just remove it from prio tree
1483                  * as it is empty.
1484                  */
1485                 if (cfqq->p_root) {
1486                         rb_erase(&cfqq->p_node, cfqq->p_root);
1487                         cfqq->p_root = NULL;
1488                 }
1489         }
1490 }
1491
1492 static void cfq_add_rq_rb(struct request *rq)
1493 {
1494         struct cfq_queue *cfqq = RQ_CFQQ(rq);
1495         struct cfq_data *cfqd = cfqq->cfqd;
1496         struct request *prev;
1497
1498         cfqq->queued[rq_is_sync(rq)]++;
1499
1500         elv_rb_add(&cfqq->sort_list, rq);
1501
1502         if (!cfq_cfqq_on_rr(cfqq))
1503                 cfq_add_cfqq_rr(cfqd, cfqq);
1504
1505         /*
1506          * check if this request is a better next-serve candidate
1507          */
1508         prev = cfqq->next_rq;
1509         cfqq->next_rq = cfq_choose_req(cfqd, cfqq->next_rq, rq, cfqd->last_position);
1510
1511         /*
1512          * adjust priority tree position, if ->next_rq changes
1513          */
1514         if (prev != cfqq->next_rq)
1515                 cfq_prio_tree_add(cfqd, cfqq);
1516
1517         BUG_ON(!cfqq->next_rq);
1518 }
1519
1520 static void cfq_reposition_rq_rb(struct cfq_queue *cfqq, struct request *rq)
1521 {
1522         elv_rb_del(&cfqq->sort_list, rq);
1523         cfqq->queued[rq_is_sync(rq)]--;
1524         cfq_blkiocg_update_io_remove_stats(&(RQ_CFQG(rq))->blkg,
1525                                         rq_data_dir(rq), rq_is_sync(rq));
1526         cfq_add_rq_rb(rq);
1527         cfq_blkiocg_update_io_add_stats(&(RQ_CFQG(rq))->blkg,
1528                         &cfqq->cfqd->serving_group->blkg, rq_data_dir(rq),
1529                         rq_is_sync(rq));
1530 }
1531
1532 static struct request *
1533 cfq_find_rq_fmerge(struct cfq_data *cfqd, struct bio *bio)
1534 {
1535         struct task_struct *tsk = current;
1536         struct cfq_io_cq *cic;
1537         struct cfq_queue *cfqq;
1538
1539         cic = cfq_cic_lookup(cfqd, tsk->io_context);
1540         if (!cic)
1541                 return NULL;
1542
1543         cfqq = cic_to_cfqq(cic, cfq_bio_sync(bio));
1544         if (cfqq) {
1545                 sector_t sector = bio->bi_sector + bio_sectors(bio);
1546
1547                 return elv_rb_find(&cfqq->sort_list, sector);
1548         }
1549
1550         return NULL;
1551 }
1552
1553 static void cfq_activate_request(struct request_queue *q, struct request *rq)
1554 {
1555         struct cfq_data *cfqd = q->elevator->elevator_data;
1556
1557         cfqd->rq_in_driver++;
1558         cfq_log_cfqq(cfqd, RQ_CFQQ(rq), "activate rq, drv=%d",
1559                                                 cfqd->rq_in_driver);
1560
1561         cfqd->last_position = blk_rq_pos(rq) + blk_rq_sectors(rq);
1562 }
1563
1564 static void cfq_deactivate_request(struct request_queue *q, struct request *rq)
1565 {
1566         struct cfq_data *cfqd = q->elevator->elevator_data;
1567
1568         WARN_ON(!cfqd->rq_in_driver);
1569         cfqd->rq_in_driver--;
1570         cfq_log_cfqq(cfqd, RQ_CFQQ(rq), "deactivate rq, drv=%d",
1571                                                 cfqd->rq_in_driver);
1572 }
1573
1574 static void cfq_remove_request(struct request *rq)
1575 {
1576         struct cfq_queue *cfqq = RQ_CFQQ(rq);
1577
1578         if (cfqq->next_rq == rq)
1579                 cfqq->next_rq = cfq_find_next_rq(cfqq->cfqd, cfqq, rq);
1580
1581         list_del_init(&rq->queuelist);
1582         cfq_del_rq_rb(rq);
1583
1584         cfqq->cfqd->rq_queued--;
1585         cfq_blkiocg_update_io_remove_stats(&(RQ_CFQG(rq))->blkg,
1586                                         rq_data_dir(rq), rq_is_sync(rq));
1587         if (rq->cmd_flags & REQ_PRIO) {
1588                 WARN_ON(!cfqq->prio_pending);
1589                 cfqq->prio_pending--;
1590         }
1591 }
1592
1593 static int cfq_merge(struct request_queue *q, struct request **req,
1594                      struct bio *bio)
1595 {
1596         struct cfq_data *cfqd = q->elevator->elevator_data;
1597         struct request *__rq;
1598
1599         __rq = cfq_find_rq_fmerge(cfqd, bio);
1600         if (__rq && elv_rq_merge_ok(__rq, bio)) {
1601                 *req = __rq;
1602                 return ELEVATOR_FRONT_MERGE;
1603         }
1604
1605         return ELEVATOR_NO_MERGE;
1606 }
1607
1608 static void cfq_merged_request(struct request_queue *q, struct request *req,
1609                                int type)
1610 {
1611         if (type == ELEVATOR_FRONT_MERGE) {
1612                 struct cfq_queue *cfqq = RQ_CFQQ(req);
1613
1614                 cfq_reposition_rq_rb(cfqq, req);
1615         }
1616 }
1617
1618 static void cfq_bio_merged(struct request_queue *q, struct request *req,
1619                                 struct bio *bio)
1620 {
1621         cfq_blkiocg_update_io_merged_stats(&(RQ_CFQG(req))->blkg,
1622                                         bio_data_dir(bio), cfq_bio_sync(bio));
1623 }
1624
1625 static void
1626 cfq_merged_requests(struct request_queue *q, struct request *rq,
1627                     struct request *next)
1628 {
1629         struct cfq_queue *cfqq = RQ_CFQQ(rq);
1630         struct cfq_data *cfqd = q->elevator->elevator_data;
1631
1632         /*
1633          * reposition in fifo if next is older than rq
1634          */
1635         if (!list_empty(&rq->queuelist) && !list_empty(&next->queuelist) &&
1636             time_before(rq_fifo_time(next), rq_fifo_time(rq))) {
1637                 list_move(&rq->queuelist, &next->queuelist);
1638                 rq_set_fifo_time(rq, rq_fifo_time(next));
1639         }
1640
1641         if (cfqq->next_rq == next)
1642                 cfqq->next_rq = rq;
1643         cfq_remove_request(next);
1644         cfq_blkiocg_update_io_merged_stats(&(RQ_CFQG(rq))->blkg,
1645                                         rq_data_dir(next), rq_is_sync(next));
1646
1647         cfqq = RQ_CFQQ(next);
1648         /*
1649          * all requests of this queue are merged to other queues, delete it
1650          * from the service tree. If it's the active_queue,
1651          * cfq_dispatch_requests() will choose to expire it or do idle
1652          */
1653         if (cfq_cfqq_on_rr(cfqq) && RB_EMPTY_ROOT(&cfqq->sort_list) &&
1654             cfqq != cfqd->active_queue)
1655                 cfq_del_cfqq_rr(cfqd, cfqq);
1656 }
1657
1658 static int cfq_allow_merge(struct request_queue *q, struct request *rq,
1659                            struct bio *bio)
1660 {
1661         struct cfq_data *cfqd = q->elevator->elevator_data;
1662         struct cfq_io_cq *cic;
1663         struct cfq_queue *cfqq;
1664
1665         /*
1666          * Disallow merge of a sync bio into an async request.
1667          */
1668         if (cfq_bio_sync(bio) && !rq_is_sync(rq))
1669                 return false;
1670
1671         /*
1672          * Lookup the cfqq that this bio will be queued with and allow
1673          * merge only if rq is queued there.
1674          */
1675         cic = cfq_cic_lookup(cfqd, current->io_context);
1676         if (!cic)
1677                 return false;
1678
1679         cfqq = cic_to_cfqq(cic, cfq_bio_sync(bio));
1680         return cfqq == RQ_CFQQ(rq);
1681 }
1682
1683 static inline void cfq_del_timer(struct cfq_data *cfqd, struct cfq_queue *cfqq)
1684 {
1685         del_timer(&cfqd->idle_slice_timer);
1686         cfq_blkiocg_update_idle_time_stats(&cfqq->cfqg->blkg);
1687 }
1688
1689 static void __cfq_set_active_queue(struct cfq_data *cfqd,
1690                                    struct cfq_queue *cfqq)
1691 {
1692         if (cfqq) {
1693                 cfq_log_cfqq(cfqd, cfqq, "set_active wl_prio:%d wl_type:%d",
1694                                 cfqd->serving_prio, cfqd->serving_type);
1695                 cfq_blkiocg_update_avg_queue_size_stats(&cfqq->cfqg->blkg);
1696                 cfqq->slice_start = 0;
1697                 cfqq->dispatch_start = jiffies;
1698                 cfqq->allocated_slice = 0;
1699                 cfqq->slice_end = 0;
1700                 cfqq->slice_dispatch = 0;
1701                 cfqq->nr_sectors = 0;
1702
1703                 cfq_clear_cfqq_wait_request(cfqq);
1704                 cfq_clear_cfqq_must_dispatch(cfqq);
1705                 cfq_clear_cfqq_must_alloc_slice(cfqq);
1706                 cfq_clear_cfqq_fifo_expire(cfqq);
1707                 cfq_mark_cfqq_slice_new(cfqq);
1708
1709                 cfq_del_timer(cfqd, cfqq);
1710         }
1711
1712         cfqd->active_queue = cfqq;
1713 }
1714
1715 /*
1716  * current cfqq expired its slice (or was too idle), select new one
1717  */
1718 static void
1719 __cfq_slice_expired(struct cfq_data *cfqd, struct cfq_queue *cfqq,
1720                     bool timed_out)
1721 {
1722         cfq_log_cfqq(cfqd, cfqq, "slice expired t=%d", timed_out);
1723
1724         if (cfq_cfqq_wait_request(cfqq))
1725                 cfq_del_timer(cfqd, cfqq);
1726
1727         cfq_clear_cfqq_wait_request(cfqq);
1728         cfq_clear_cfqq_wait_busy(cfqq);
1729
1730         /*
1731          * If this cfqq is shared between multiple processes, check to
1732          * make sure that those processes are still issuing I/Os within
1733          * the mean seek distance.  If not, it may be time to break the
1734          * queues apart again.
1735          */
1736         if (cfq_cfqq_coop(cfqq) && CFQQ_SEEKY(cfqq))
1737                 cfq_mark_cfqq_split_coop(cfqq);
1738
1739         /*
1740          * store what was left of this slice, if the queue idled/timed out
1741          */
1742         if (timed_out) {
1743                 if (cfq_cfqq_slice_new(cfqq))
1744                         cfqq->slice_resid = cfq_scaled_cfqq_slice(cfqd, cfqq);
1745                 else
1746                         cfqq->slice_resid = cfqq->slice_end - jiffies;
1747                 cfq_log_cfqq(cfqd, cfqq, "resid=%ld", cfqq->slice_resid);
1748         }
1749
1750         cfq_group_served(cfqd, cfqq->cfqg, cfqq);
1751
1752         if (cfq_cfqq_on_rr(cfqq) && RB_EMPTY_ROOT(&cfqq->sort_list))
1753                 cfq_del_cfqq_rr(cfqd, cfqq);
1754
1755         cfq_resort_rr_list(cfqd, cfqq);
1756
1757         if (cfqq == cfqd->active_queue)
1758                 cfqd->active_queue = NULL;
1759
1760         if (cfqd->active_cic) {
1761                 put_io_context(cfqd->active_cic->icq.ioc);
1762                 cfqd->active_cic = NULL;
1763         }
1764 }
1765
1766 static inline void cfq_slice_expired(struct cfq_data *cfqd, bool timed_out)
1767 {
1768         struct cfq_queue *cfqq = cfqd->active_queue;
1769
1770         if (cfqq)
1771                 __cfq_slice_expired(cfqd, cfqq, timed_out);
1772 }
1773
1774 /*
1775  * Get next queue for service. Unless we have a queue preemption,
1776  * we'll simply select the first cfqq in the service tree.
1777  */
1778 static struct cfq_queue *cfq_get_next_queue(struct cfq_data *cfqd)
1779 {
1780         struct cfq_rb_root *service_tree =
1781                 service_tree_for(cfqd->serving_group, cfqd->serving_prio,
1782                                         cfqd->serving_type);
1783
1784         if (!cfqd->rq_queued)
1785                 return NULL;
1786
1787         /* There is nothing to dispatch */
1788         if (!service_tree)
1789                 return NULL;
1790         if (RB_EMPTY_ROOT(&service_tree->rb))
1791                 return NULL;
1792         return cfq_rb_first(service_tree);
1793 }
1794
1795 static struct cfq_queue *cfq_get_next_queue_forced(struct cfq_data *cfqd)
1796 {
1797         struct cfq_group *cfqg;
1798         struct cfq_queue *cfqq;
1799         int i, j;
1800         struct cfq_rb_root *st;
1801
1802         if (!cfqd->rq_queued)
1803                 return NULL;
1804
1805         cfqg = cfq_get_next_cfqg(cfqd);
1806         if (!cfqg)
1807                 return NULL;
1808
1809         for_each_cfqg_st(cfqg, i, j, st)
1810                 if ((cfqq = cfq_rb_first(st)) != NULL)
1811                         return cfqq;
1812         return NULL;
1813 }
1814
1815 /*
1816  * Get and set a new active queue for service.
1817  */
1818 static struct cfq_queue *cfq_set_active_queue(struct cfq_data *cfqd,
1819                                               struct cfq_queue *cfqq)
1820 {
1821         if (!cfqq)
1822                 cfqq = cfq_get_next_queue(cfqd);
1823
1824         __cfq_set_active_queue(cfqd, cfqq);
1825         return cfqq;
1826 }
1827
1828 static inline sector_t cfq_dist_from_last(struct cfq_data *cfqd,
1829                                           struct request *rq)
1830 {
1831         if (blk_rq_pos(rq) >= cfqd->last_position)
1832                 return blk_rq_pos(rq) - cfqd->last_position;
1833         else
1834                 return cfqd->last_position - blk_rq_pos(rq);
1835 }
1836
1837 static inline int cfq_rq_close(struct cfq_data *cfqd, struct cfq_queue *cfqq,
1838                                struct request *rq)
1839 {
1840         return cfq_dist_from_last(cfqd, rq) <= CFQQ_CLOSE_THR;
1841 }
1842
1843 static struct cfq_queue *cfqq_close(struct cfq_data *cfqd,
1844                                     struct cfq_queue *cur_cfqq)
1845 {
1846         struct rb_root *root = &cfqd->prio_trees[cur_cfqq->org_ioprio];
1847         struct rb_node *parent, *node;
1848         struct cfq_queue *__cfqq;
1849         sector_t sector = cfqd->last_position;
1850
1851         if (RB_EMPTY_ROOT(root))
1852                 return NULL;
1853
1854         /*
1855          * First, if we find a request starting at the end of the last
1856          * request, choose it.
1857          */
1858         __cfqq = cfq_prio_tree_lookup(cfqd, root, sector, &parent, NULL);
1859         if (__cfqq)
1860                 return __cfqq;
1861
1862         /*
1863          * If the exact sector wasn't found, the parent of the NULL leaf
1864          * will contain the closest sector.
1865          */
1866         __cfqq = rb_entry(parent, struct cfq_queue, p_node);
1867         if (cfq_rq_close(cfqd, cur_cfqq, __cfqq->next_rq))
1868                 return __cfqq;
1869
1870         if (blk_rq_pos(__cfqq->next_rq) < sector)
1871                 node = rb_next(&__cfqq->p_node);
1872         else
1873                 node = rb_prev(&__cfqq->p_node);
1874         if (!node)
1875                 return NULL;
1876
1877         __cfqq = rb_entry(node, struct cfq_queue, p_node);
1878         if (cfq_rq_close(cfqd, cur_cfqq, __cfqq->next_rq))
1879                 return __cfqq;
1880
1881         return NULL;
1882 }
1883
1884 /*
1885  * cfqd - obvious
1886  * cur_cfqq - passed in so that we don't decide that the current queue is
1887  *            closely cooperating with itself.
1888  *
1889  * So, basically we're assuming that that cur_cfqq has dispatched at least
1890  * one request, and that cfqd->last_position reflects a position on the disk
1891  * associated with the I/O issued by cur_cfqq.  I'm not sure this is a valid
1892  * assumption.
1893  */
1894 static struct cfq_queue *cfq_close_cooperator(struct cfq_data *cfqd,
1895                                               struct cfq_queue *cur_cfqq)
1896 {
1897         struct cfq_queue *cfqq;
1898
1899         if (cfq_class_idle(cur_cfqq))
1900                 return NULL;
1901         if (!cfq_cfqq_sync(cur_cfqq))
1902                 return NULL;
1903         if (CFQQ_SEEKY(cur_cfqq))
1904                 return NULL;
1905
1906         /*
1907          * Don't search priority tree if it's the only queue in the group.
1908          */
1909         if (cur_cfqq->cfqg->nr_cfqq == 1)
1910                 return NULL;
1911
1912         /*
1913          * We should notice if some of the queues are cooperating, eg
1914          * working closely on the same area of the disk. In that case,
1915          * we can group them together and don't waste time idling.
1916          */
1917         cfqq = cfqq_close(cfqd, cur_cfqq);
1918         if (!cfqq)
1919                 return NULL;
1920
1921         /* If new queue belongs to different cfq_group, don't choose it */
1922         if (cur_cfqq->cfqg != cfqq->cfqg)
1923                 return NULL;
1924
1925         /*
1926          * It only makes sense to merge sync queues.
1927          */
1928         if (!cfq_cfqq_sync(cfqq))
1929                 return NULL;
1930         if (CFQQ_SEEKY(cfqq))
1931                 return NULL;
1932
1933         /*
1934          * Do not merge queues of different priority classes
1935          */
1936         if (cfq_class_rt(cfqq) != cfq_class_rt(cur_cfqq))
1937                 return NULL;
1938
1939         return cfqq;
1940 }
1941
1942 /*
1943  * Determine whether we should enforce idle window for this queue.
1944  */
1945
1946 static bool cfq_should_idle(struct cfq_data *cfqd, struct cfq_queue *cfqq)
1947 {
1948         enum wl_prio_t prio = cfqq_prio(cfqq);
1949         struct cfq_rb_root *service_tree = cfqq->service_tree;
1950
1951         BUG_ON(!service_tree);
1952         BUG_ON(!service_tree->count);
1953
1954         if (!cfqd->cfq_slice_idle)
1955                 return false;
1956
1957         /* We never do for idle class queues. */
1958         if (prio == IDLE_WORKLOAD)
1959                 return false;
1960
1961         /* We do for queues that were marked with idle window flag. */
1962         if (cfq_cfqq_idle_window(cfqq) &&
1963            !(blk_queue_nonrot(cfqd->queue) && cfqd->hw_tag))
1964                 return true;
1965
1966         /*
1967          * Otherwise, we do only if they are the last ones
1968          * in their service tree.
1969          */
1970         if (service_tree->count == 1 && cfq_cfqq_sync(cfqq) &&
1971            !cfq_io_thinktime_big(cfqd, &service_tree->ttime, false))
1972                 return true;
1973         cfq_log_cfqq(cfqd, cfqq, "Not idling. st->count:%d",
1974                         service_tree->count);
1975         return false;
1976 }
1977
1978 static void cfq_arm_slice_timer(struct cfq_data *cfqd)
1979 {
1980         struct cfq_queue *cfqq = cfqd->active_queue;
1981         struct cfq_io_cq *cic;
1982         unsigned long sl, group_idle = 0;
1983
1984         /*
1985          * SSD device without seek penalty, disable idling. But only do so
1986          * for devices that support queuing, otherwise we still have a problem
1987          * with sync vs async workloads.
1988          */
1989         if (blk_queue_nonrot(cfqd->queue) && cfqd->hw_tag)
1990                 return;
1991
1992         WARN_ON(!RB_EMPTY_ROOT(&cfqq->sort_list));
1993         WARN_ON(cfq_cfqq_slice_new(cfqq));
1994
1995         /*
1996          * idle is disabled, either manually or by past process history
1997          */
1998         if (!cfq_should_idle(cfqd, cfqq)) {
1999                 /* no queue idling. Check for group idling */
2000                 if (cfqd->cfq_group_idle)
2001                         group_idle = cfqd->cfq_group_idle;
2002                 else
2003                         return;
2004         }
2005
2006         /*
2007          * still active requests from this queue, don't idle
2008          */
2009         if (cfqq->dispatched)
2010                 return;
2011
2012         /*
2013          * task has exited, don't wait
2014          */
2015         cic = cfqd->active_cic;
2016         if (!cic || !atomic_read(&cic->icq.ioc->nr_tasks))
2017                 return;
2018
2019         /*
2020          * If our average think time is larger than the remaining time
2021          * slice, then don't idle. This avoids overrunning the allotted
2022          * time slice.
2023          */
2024         if (sample_valid(cic->ttime.ttime_samples) &&
2025             (cfqq->slice_end - jiffies < cic->ttime.ttime_mean)) {
2026                 cfq_log_cfqq(cfqd, cfqq, "Not idling. think_time:%lu",
2027                              cic->ttime.ttime_mean);
2028                 return;
2029         }
2030
2031         /* There are other queues in the group, don't do group idle */
2032         if (group_idle && cfqq->cfqg->nr_cfqq > 1)
2033                 return;
2034
2035         cfq_mark_cfqq_wait_request(cfqq);
2036
2037         if (group_idle)
2038                 sl = cfqd->cfq_group_idle;
2039         else
2040                 sl = cfqd->cfq_slice_idle;
2041
2042         mod_timer(&cfqd->idle_slice_timer, jiffies + sl);
2043         cfq_blkiocg_update_set_idle_time_stats(&cfqq->cfqg->blkg);
2044         cfq_log_cfqq(cfqd, cfqq, "arm_idle: %lu group_idle: %d", sl,
2045                         group_idle ? 1 : 0);
2046 }
2047
2048 /*
2049  * Move request from internal lists to the request queue dispatch list.
2050  */
2051 static void cfq_dispatch_insert(struct request_queue *q, struct request *rq)
2052 {
2053         struct cfq_data *cfqd = q->elevator->elevator_data;
2054         struct cfq_queue *cfqq = RQ_CFQQ(rq);
2055
2056         cfq_log_cfqq(cfqd, cfqq, "dispatch_insert");
2057
2058         cfqq->next_rq = cfq_find_next_rq(cfqd, cfqq, rq);
2059         cfq_remove_request(rq);
2060         cfqq->dispatched++;
2061         (RQ_CFQG(rq))->dispatched++;
2062         elv_dispatch_sort(q, rq);
2063
2064         cfqd->rq_in_flight[cfq_cfqq_sync(cfqq)]++;
2065         cfqq->nr_sectors += blk_rq_sectors(rq);
2066         cfq_blkiocg_update_dispatch_stats(&cfqq->cfqg->blkg, blk_rq_bytes(rq),
2067                                         rq_data_dir(rq), rq_is_sync(rq));
2068 }
2069
2070 /*
2071  * return expired entry, or NULL to just start from scratch in rbtree
2072  */
2073 static struct request *cfq_check_fifo(struct cfq_queue *cfqq)
2074 {
2075         struct request *rq = NULL;
2076
2077         if (cfq_cfqq_fifo_expire(cfqq))
2078                 return NULL;
2079
2080         cfq_mark_cfqq_fifo_expire(cfqq);
2081
2082         if (list_empty(&cfqq->fifo))
2083                 return NULL;
2084
2085         rq = rq_entry_fifo(cfqq->fifo.next);
2086         if (time_before(jiffies, rq_fifo_time(rq)))
2087                 rq = NULL;
2088
2089         cfq_log_cfqq(cfqq->cfqd, cfqq, "fifo=%p", rq);
2090         return rq;
2091 }
2092
2093 static inline int
2094 cfq_prio_to_maxrq(struct cfq_data *cfqd, struct cfq_queue *cfqq)
2095 {
2096         const int base_rq = cfqd->cfq_slice_async_rq;
2097
2098         WARN_ON(cfqq->ioprio >= IOPRIO_BE_NR);
2099
2100         return 2 * base_rq * (IOPRIO_BE_NR - cfqq->ioprio);
2101 }
2102
2103 /*
2104  * Must be called with the queue_lock held.
2105  */
2106 static int cfqq_process_refs(struct cfq_queue *cfqq)
2107 {
2108         int process_refs, io_refs;
2109
2110         io_refs = cfqq->allocated[READ] + cfqq->allocated[WRITE];
2111         process_refs = cfqq->ref - io_refs;
2112         BUG_ON(process_refs < 0);
2113         return process_refs;
2114 }
2115
2116 static void cfq_setup_merge(struct cfq_queue *cfqq, struct cfq_queue *new_cfqq)
2117 {
2118         int process_refs, new_process_refs;
2119         struct cfq_queue *__cfqq;
2120
2121         /*
2122          * If there are no process references on the new_cfqq, then it is
2123          * unsafe to follow the ->new_cfqq chain as other cfqq's in the
2124          * chain may have dropped their last reference (not just their
2125          * last process reference).
2126          */
2127         if (!cfqq_process_refs(new_cfqq))
2128                 return;
2129
2130         /* Avoid a circular list and skip interim queue merges */
2131         while ((__cfqq = new_cfqq->new_cfqq)) {
2132                 if (__cfqq == cfqq)
2133                         return;
2134                 new_cfqq = __cfqq;
2135         }
2136
2137         process_refs = cfqq_process_refs(cfqq);
2138         new_process_refs = cfqq_process_refs(new_cfqq);
2139         /*
2140          * If the process for the cfqq has gone away, there is no
2141          * sense in merging the queues.
2142          */
2143         if (process_refs == 0 || new_process_refs == 0)
2144                 return;
2145
2146         /*
2147          * Merge in the direction of the lesser amount of work.
2148          */
2149         if (new_process_refs >= process_refs) {
2150                 cfqq->new_cfqq = new_cfqq;
2151                 new_cfqq->ref += process_refs;
2152         } else {
2153                 new_cfqq->new_cfqq = cfqq;
2154                 cfqq->ref += new_process_refs;
2155         }
2156 }
2157
2158 static enum wl_type_t cfq_choose_wl(struct cfq_data *cfqd,
2159                                 struct cfq_group *cfqg, enum wl_prio_t prio)
2160 {
2161         struct cfq_queue *queue;
2162         int i;
2163         bool key_valid = false;
2164         unsigned long lowest_key = 0;
2165         enum wl_type_t cur_best = SYNC_NOIDLE_WORKLOAD;
2166
2167         for (i = 0; i <= SYNC_WORKLOAD; ++i) {
2168                 /* select the one with lowest rb_key */
2169                 queue = cfq_rb_first(service_tree_for(cfqg, prio, i));
2170                 if (queue &&
2171                     (!key_valid || time_before(queue->rb_key, lowest_key))) {
2172                         lowest_key = queue->rb_key;
2173                         cur_best = i;
2174                         key_valid = true;
2175                 }
2176         }
2177
2178         return cur_best;
2179 }
2180
2181 static void choose_service_tree(struct cfq_data *cfqd, struct cfq_group *cfqg)
2182 {
2183         unsigned slice;
2184         unsigned count;
2185         struct cfq_rb_root *st;
2186         unsigned group_slice;
2187         enum wl_prio_t original_prio = cfqd->serving_prio;
2188
2189         /* Choose next priority. RT > BE > IDLE */
2190         if (cfq_group_busy_queues_wl(RT_WORKLOAD, cfqd, cfqg))
2191                 cfqd->serving_prio = RT_WORKLOAD;
2192         else if (cfq_group_busy_queues_wl(BE_WORKLOAD, cfqd, cfqg))
2193                 cfqd->serving_prio = BE_WORKLOAD;
2194         else {
2195                 cfqd->serving_prio = IDLE_WORKLOAD;
2196                 cfqd->workload_expires = jiffies + 1;
2197                 return;
2198         }
2199
2200         if (original_prio != cfqd->serving_prio)
2201                 goto new_workload;
2202
2203         /*
2204          * For RT and BE, we have to choose also the type
2205          * (SYNC, SYNC_NOIDLE, ASYNC), and to compute a workload
2206          * expiration time
2207          */
2208         st = service_tree_for(cfqg, cfqd->serving_prio, cfqd->serving_type);
2209         count = st->count;
2210
2211         /*
2212          * check workload expiration, and that we still have other queues ready
2213          */
2214         if (count && !time_after(jiffies, cfqd->workload_expires))
2215                 return;
2216
2217 new_workload:
2218         /* otherwise select new workload type */
2219         cfqd->serving_type =
2220                 cfq_choose_wl(cfqd, cfqg, cfqd->serving_prio);
2221         st = service_tree_for(cfqg, cfqd->serving_prio, cfqd->serving_type);
2222         count = st->count;
2223
2224         /*
2225          * the workload slice is computed as a fraction of target latency
2226          * proportional to the number of queues in that workload, over
2227          * all the queues in the same priority class
2228          */
2229         group_slice = cfq_group_slice(cfqd, cfqg);
2230
2231         slice = group_slice * count /
2232                 max_t(unsigned, cfqg->busy_queues_avg[cfqd->serving_prio],
2233                       cfq_group_busy_queues_wl(cfqd->serving_prio, cfqd, cfqg));
2234
2235         if (cfqd->serving_type == ASYNC_WORKLOAD) {
2236                 unsigned int tmp;
2237
2238                 /*
2239                  * Async queues are currently system wide. Just taking
2240                  * proportion of queues with-in same group will lead to higher
2241                  * async ratio system wide as generally root group is going
2242                  * to have higher weight. A more accurate thing would be to
2243                  * calculate system wide asnc/sync ratio.
2244                  */
2245                 tmp = cfq_target_latency * cfqg_busy_async_queues(cfqd, cfqg);
2246                 tmp = tmp/cfqd->busy_queues;
2247                 slice = min_t(unsigned, slice, tmp);
2248
2249                 /* async workload slice is scaled down according to
2250                  * the sync/async slice ratio. */
2251                 slice = slice * cfqd->cfq_slice[0] / cfqd->cfq_slice[1];
2252         } else
2253                 /* sync workload slice is at least 2 * cfq_slice_idle */
2254                 slice = max(slice, 2 * cfqd->cfq_slice_idle);
2255
2256         slice = max_t(unsigned, slice, CFQ_MIN_TT);
2257         cfq_log(cfqd, "workload slice:%d", slice);
2258         cfqd->workload_expires = jiffies + slice;
2259 }
2260
2261 static struct cfq_group *cfq_get_next_cfqg(struct cfq_data *cfqd)
2262 {
2263         struct cfq_rb_root *st = &cfqd->grp_service_tree;
2264         struct cfq_group *cfqg;
2265
2266         if (RB_EMPTY_ROOT(&st->rb))
2267                 return NULL;
2268         cfqg = cfq_rb_first_group(st);
2269         update_min_vdisktime(st);
2270         return cfqg;
2271 }
2272
2273 static void cfq_choose_cfqg(struct cfq_data *cfqd)
2274 {
2275         struct cfq_group *cfqg = cfq_get_next_cfqg(cfqd);
2276
2277         cfqd->serving_group = cfqg;
2278
2279         /* Restore the workload type data */
2280         if (cfqg->saved_workload_slice) {
2281                 cfqd->workload_expires = jiffies + cfqg->saved_workload_slice;
2282                 cfqd->serving_type = cfqg->saved_workload;
2283                 cfqd->serving_prio = cfqg->saved_serving_prio;
2284         } else
2285                 cfqd->workload_expires = jiffies - 1;
2286
2287         choose_service_tree(cfqd, cfqg);
2288 }
2289
2290 /*
2291  * Select a queue for service. If we have a current active queue,
2292  * check whether to continue servicing it, or retrieve and set a new one.
2293  */
2294 static struct cfq_queue *cfq_select_queue(struct cfq_data *cfqd)
2295 {
2296         struct cfq_queue *cfqq, *new_cfqq = NULL;
2297
2298         cfqq = cfqd->active_queue;
2299         if (!cfqq)
2300                 goto new_queue;
2301
2302         if (!cfqd->rq_queued)
2303                 return NULL;
2304
2305         /*
2306          * We were waiting for group to get backlogged. Expire the queue
2307          */
2308         if (cfq_cfqq_wait_busy(cfqq) && !RB_EMPTY_ROOT(&cfqq->sort_list))
2309                 goto expire;
2310
2311         /*
2312          * The active queue has run out of time, expire it and select new.
2313          */
2314         if (cfq_slice_used(cfqq) && !cfq_cfqq_must_dispatch(cfqq)) {
2315                 /*
2316                  * If slice had not expired at the completion of last request
2317                  * we might not have turned on wait_busy flag. Don't expire
2318                  * the queue yet. Allow the group to get backlogged.
2319                  *
2320                  * The very fact that we have used the slice, that means we
2321                  * have been idling all along on this queue and it should be
2322                  * ok to wait for this request to complete.
2323                  */
2324                 if (cfqq->cfqg->nr_cfqq == 1 && RB_EMPTY_ROOT(&cfqq->sort_list)
2325                     && cfqq->dispatched && cfq_should_idle(cfqd, cfqq)) {
2326                         cfqq = NULL;
2327                         goto keep_queue;
2328                 } else
2329                         goto check_group_idle;
2330         }
2331
2332         /*
2333          * The active queue has requests and isn't expired, allow it to
2334          * dispatch.
2335          */
2336         if (!RB_EMPTY_ROOT(&cfqq->sort_list))
2337                 goto keep_queue;
2338
2339         /*
2340          * If another queue has a request waiting within our mean seek
2341          * distance, let it run.  The expire code will check for close
2342          * cooperators and put the close queue at the front of the service
2343          * tree.  If possible, merge the expiring queue with the new cfqq.
2344          */
2345         new_cfqq = cfq_close_cooperator(cfqd, cfqq);
2346         if (new_cfqq) {
2347                 if (!cfqq->new_cfqq)
2348                         cfq_setup_merge(cfqq, new_cfqq);
2349                 goto expire;
2350         }
2351
2352         /*
2353          * No requests pending. If the active queue still has requests in
2354          * flight or is idling for a new request, allow either of these
2355          * conditions to happen (or time out) before selecting a new queue.
2356          */
2357         if (timer_pending(&cfqd->idle_slice_timer)) {
2358                 cfqq = NULL;
2359                 goto keep_queue;
2360         }
2361
2362         /*
2363          * This is a deep seek queue, but the device is much faster than
2364          * the queue can deliver, don't idle
2365          **/
2366         if (CFQQ_SEEKY(cfqq) && cfq_cfqq_idle_window(cfqq) &&
2367             (cfq_cfqq_slice_new(cfqq) ||
2368             (cfqq->slice_end - jiffies > jiffies - cfqq->slice_start))) {
2369                 cfq_clear_cfqq_deep(cfqq);
2370                 cfq_clear_cfqq_idle_window(cfqq);
2371         }
2372
2373         if (cfqq->dispatched && cfq_should_idle(cfqd, cfqq)) {
2374                 cfqq = NULL;
2375                 goto keep_queue;
2376         }
2377
2378         /*
2379          * If group idle is enabled and there are requests dispatched from
2380          * this group, wait for requests to complete.
2381          */
2382 check_group_idle:
2383         if (cfqd->cfq_group_idle && cfqq->cfqg->nr_cfqq == 1 &&
2384             cfqq->cfqg->dispatched &&
2385             !cfq_io_thinktime_big(cfqd, &cfqq->cfqg->ttime, true)) {
2386                 cfqq = NULL;
2387                 goto keep_queue;
2388         }
2389
2390 expire:
2391         cfq_slice_expired(cfqd, 0);
2392 new_queue:
2393         /*
2394          * Current queue expired. Check if we have to switch to a new
2395          * service tree
2396          */
2397         if (!new_cfqq)
2398                 cfq_choose_cfqg(cfqd);
2399
2400         cfqq = cfq_set_active_queue(cfqd, new_cfqq);
2401 keep_queue:
2402         return cfqq;
2403 }
2404
2405 static int __cfq_forced_dispatch_cfqq(struct cfq_queue *cfqq)
2406 {
2407         int dispatched = 0;
2408
2409         while (cfqq->next_rq) {
2410                 cfq_dispatch_insert(cfqq->cfqd->queue, cfqq->next_rq);
2411                 dispatched++;
2412         }
2413
2414         BUG_ON(!list_empty(&cfqq->fifo));
2415
2416         /* By default cfqq is not expired if it is empty. Do it explicitly */
2417         __cfq_slice_expired(cfqq->cfqd, cfqq, 0);
2418         return dispatched;
2419 }
2420
2421 /*
2422  * Drain our current requests. Used for barriers and when switching
2423  * io schedulers on-the-fly.
2424  */
2425 static int cfq_forced_dispatch(struct cfq_data *cfqd)
2426 {
2427         struct cfq_queue *cfqq;
2428         int dispatched = 0;
2429
2430         /* Expire the timeslice of the current active queue first */
2431         cfq_slice_expired(cfqd, 0);
2432         while ((cfqq = cfq_get_next_queue_forced(cfqd)) != NULL) {
2433                 __cfq_set_active_queue(cfqd, cfqq);
2434                 dispatched += __cfq_forced_dispatch_cfqq(cfqq);
2435         }
2436
2437         BUG_ON(cfqd->busy_queues);
2438
2439         cfq_log(cfqd, "forced_dispatch=%d", dispatched);
2440         return dispatched;
2441 }
2442
2443 static inline bool cfq_slice_used_soon(struct cfq_data *cfqd,
2444         struct cfq_queue *cfqq)
2445 {
2446         /* the queue hasn't finished any request, can't estimate */
2447         if (cfq_cfqq_slice_new(cfqq))
2448                 return true;
2449         if (time_after(jiffies + cfqd->cfq_slice_idle * cfqq->dispatched,
2450                 cfqq->slice_end))
2451                 return true;
2452
2453         return false;
2454 }
2455
2456 static bool cfq_may_dispatch(struct cfq_data *cfqd, struct cfq_queue *cfqq)
2457 {
2458         unsigned int max_dispatch;
2459
2460         /*
2461          * Drain async requests before we start sync IO
2462          */
2463         if (cfq_should_idle(cfqd, cfqq) && cfqd->rq_in_flight[BLK_RW_ASYNC])
2464                 return false;
2465
2466         /*
2467          * If this is an async queue and we have sync IO in flight, let it wait
2468          */
2469         if (cfqd->rq_in_flight[BLK_RW_SYNC] && !cfq_cfqq_sync(cfqq))
2470                 return false;
2471
2472         max_dispatch = max_t(unsigned int, cfqd->cfq_quantum / 2, 1);
2473         if (cfq_class_idle(cfqq))
2474                 max_dispatch = 1;
2475
2476         /*
2477          * Does this cfqq already have too much IO in flight?
2478          */
2479         if (cfqq->dispatched >= max_dispatch) {
2480                 bool promote_sync = false;
2481                 /*
2482                  * idle queue must always only have a single IO in flight
2483                  */
2484                 if (cfq_class_idle(cfqq))
2485                         return false;
2486
2487                 /*
2488                  * If there is only one sync queue
2489                  * we can ignore async queue here and give the sync
2490                  * queue no dispatch limit. The reason is a sync queue can
2491                  * preempt async queue, limiting the sync queue doesn't make
2492                  * sense. This is useful for aiostress test.
2493                  */
2494                 if (cfq_cfqq_sync(cfqq) && cfqd->busy_sync_queues == 1)
2495                         promote_sync = true;
2496
2497                 /*
2498                  * We have other queues, don't allow more IO from this one
2499                  */
2500                 if (cfqd->busy_queues > 1 && cfq_slice_used_soon(cfqd, cfqq) &&
2501                                 !promote_sync)
2502                         return false;
2503
2504                 /*
2505                  * Sole queue user, no limit
2506                  */
2507                 if (cfqd->busy_queues == 1 || promote_sync)
2508                         max_dispatch = -1;
2509                 else
2510                         /*
2511                          * Normally we start throttling cfqq when cfq_quantum/2
2512                          * requests have been dispatched. But we can drive
2513                          * deeper queue depths at the beginning of slice
2514                          * subjected to upper limit of cfq_quantum.
2515                          * */
2516                         max_dispatch = cfqd->cfq_quantum;
2517         }
2518
2519         /*
2520          * Async queues must wait a bit before being allowed dispatch.
2521          * We also ramp up the dispatch depth gradually for async IO,
2522          * based on the last sync IO we serviced
2523          */
2524         if (!cfq_cfqq_sync(cfqq) && cfqd->cfq_latency) {
2525                 unsigned long last_sync = jiffies - cfqd->last_delayed_sync;
2526                 unsigned int depth;
2527
2528                 depth = last_sync / cfqd->cfq_slice[1];
2529                 if (!depth && !cfqq->dispatched)
2530                         depth = 1;
2531                 if (depth < max_dispatch)
2532                         max_dispatch = depth;
2533         }
2534
2535         /*
2536          * If we're below the current max, allow a dispatch
2537          */
2538         return cfqq->dispatched < max_dispatch;
2539 }
2540
2541 /*
2542  * Dispatch a request from cfqq, moving them to the request queue
2543  * dispatch list.
2544  */
2545 static bool cfq_dispatch_request(struct cfq_data *cfqd, struct cfq_queue *cfqq)
2546 {
2547         struct request *rq;
2548
2549         BUG_ON(RB_EMPTY_ROOT(&cfqq->sort_list));
2550
2551         if (!cfq_may_dispatch(cfqd, cfqq))
2552                 return false;
2553
2554         /*
2555          * follow expired path, else get first next available
2556          */
2557         rq = cfq_check_fifo(cfqq);
2558         if (!rq)
2559                 rq = cfqq->next_rq;
2560
2561         /*
2562          * insert request into driver dispatch list
2563          */
2564         cfq_dispatch_insert(cfqd->queue, rq);
2565
2566         if (!cfqd->active_cic) {
2567                 struct cfq_io_cq *cic = RQ_CIC(rq);
2568
2569                 atomic_long_inc(&cic->icq.ioc->refcount);
2570                 cfqd->active_cic = cic;
2571         }
2572
2573         return true;
2574 }
2575
2576 /*
2577  * Find the cfqq that we need to service and move a request from that to the
2578  * dispatch list
2579  */
2580 static int cfq_dispatch_requests(struct request_queue *q, int force)
2581 {
2582         struct cfq_data *cfqd = q->elevator->elevator_data;
2583         struct cfq_queue *cfqq;
2584
2585         if (!cfqd->busy_queues)
2586                 return 0;
2587
2588         if (unlikely(force))
2589                 return cfq_forced_dispatch(cfqd);
2590
2591         cfqq = cfq_select_queue(cfqd);
2592         if (!cfqq)
2593                 return 0;
2594
2595         /*
2596          * Dispatch a request from this cfqq, if it is allowed
2597          */
2598         if (!cfq_dispatch_request(cfqd, cfqq))
2599                 return 0;
2600
2601         cfqq->slice_dispatch++;
2602         cfq_clear_cfqq_must_dispatch(cfqq);
2603
2604         /*
2605          * expire an async queue immediately if it has used up its slice. idle
2606          * queue always expire after 1 dispatch round.
2607          */
2608         if (cfqd->busy_queues > 1 && ((!cfq_cfqq_sync(cfqq) &&
2609             cfqq->slice_dispatch >= cfq_prio_to_maxrq(cfqd, cfqq)) ||
2610             cfq_class_idle(cfqq))) {
2611                 cfqq->slice_end = jiffies + 1;
2612                 cfq_slice_expired(cfqd, 0);
2613         }
2614
2615         cfq_log_cfqq(cfqd, cfqq, "dispatched a request");
2616         return 1;
2617 }
2618
2619 /*
2620  * task holds one reference to the queue, dropped when task exits. each rq
2621  * in-flight on this queue also holds a reference, dropped when rq is freed.
2622  *
2623  * Each cfq queue took a reference on the parent group. Drop it now.
2624  * queue lock must be held here.
2625  */
2626 static void cfq_put_queue(struct cfq_queue *cfqq)
2627 {
2628         struct cfq_data *cfqd = cfqq->cfqd;
2629         struct cfq_group *cfqg;
2630
2631         BUG_ON(cfqq->ref <= 0);
2632
2633         cfqq->ref--;
2634         if (cfqq->ref)
2635                 return;
2636
2637         cfq_log_cfqq(cfqd, cfqq, "put_queue");
2638         BUG_ON(rb_first(&cfqq->sort_list));
2639         BUG_ON(cfqq->allocated[READ] + cfqq->allocated[WRITE]);
2640         cfqg = cfqq->cfqg;
2641
2642         if (unlikely(cfqd->active_queue == cfqq)) {
2643                 __cfq_slice_expired(cfqd, cfqq, 0);
2644                 cfq_schedule_dispatch(cfqd);
2645         }
2646
2647         BUG_ON(cfq_cfqq_on_rr(cfqq));
2648         kmem_cache_free(cfq_pool, cfqq);
2649         cfq_put_cfqg(cfqg);
2650 }
2651
2652 static void cfq_put_cooperator(struct cfq_queue *cfqq)
2653 {
2654         struct cfq_queue *__cfqq, *next;
2655
2656         /*
2657          * If this queue was scheduled to merge with another queue, be
2658          * sure to drop the reference taken on that queue (and others in
2659          * the merge chain).  See cfq_setup_merge and cfq_merge_cfqqs.
2660          */
2661         __cfqq = cfqq->new_cfqq;
2662         while (__cfqq) {
2663                 if (__cfqq == cfqq) {
2664                         WARN(1, "cfqq->new_cfqq loop detected\n");
2665                         break;
2666                 }
2667                 next = __cfqq->new_cfqq;
2668                 cfq_put_queue(__cfqq);
2669                 __cfqq = next;
2670         }
2671 }
2672
2673 static void cfq_exit_cfqq(struct cfq_data *cfqd, struct cfq_queue *cfqq)
2674 {
2675         if (unlikely(cfqq == cfqd->active_queue)) {
2676                 __cfq_slice_expired(cfqd, cfqq, 0);
2677                 cfq_schedule_dispatch(cfqd);
2678         }
2679
2680         cfq_put_cooperator(cfqq);
2681
2682         cfq_put_queue(cfqq);
2683 }
2684
2685 static void cfq_init_icq(struct io_cq *icq)
2686 {
2687         struct cfq_io_cq *cic = icq_to_cic(icq);
2688
2689         cic->ttime.last_end_request = jiffies;
2690 }
2691
2692 static void cfq_exit_icq(struct io_cq *icq)
2693 {
2694         struct cfq_io_cq *cic = icq_to_cic(icq);
2695         struct cfq_data *cfqd = cic_to_cfqd(cic);
2696
2697         if (cic->cfqq[BLK_RW_ASYNC]) {
2698                 cfq_exit_cfqq(cfqd, cic->cfqq[BLK_RW_ASYNC]);
2699                 cic->cfqq[BLK_RW_ASYNC] = NULL;
2700         }
2701
2702         if (cic->cfqq[BLK_RW_SYNC]) {
2703                 cfq_exit_cfqq(cfqd, cic->cfqq[BLK_RW_SYNC]);
2704                 cic->cfqq[BLK_RW_SYNC] = NULL;
2705         }
2706 }
2707
2708 static void cfq_init_prio_data(struct cfq_queue *cfqq, struct io_context *ioc)
2709 {
2710         struct task_struct *tsk = current;
2711         int ioprio_class;
2712
2713         if (!cfq_cfqq_prio_changed(cfqq))
2714                 return;
2715
2716         ioprio_class = IOPRIO_PRIO_CLASS(ioc->ioprio);
2717         switch (ioprio_class) {
2718         default:
2719                 printk(KERN_ERR "cfq: bad prio %x\n", ioprio_class);
2720         case IOPRIO_CLASS_NONE:
2721                 /*
2722                  * no prio set, inherit CPU scheduling settings
2723                  */
2724                 cfqq->ioprio = task_nice_ioprio(tsk);
2725                 cfqq->ioprio_class = task_nice_ioclass(tsk);
2726                 break;
2727         case IOPRIO_CLASS_RT:
2728                 cfqq->ioprio = task_ioprio(ioc);
2729                 cfqq->ioprio_class = IOPRIO_CLASS_RT;
2730                 break;
2731         case IOPRIO_CLASS_BE:
2732                 cfqq->ioprio = task_ioprio(ioc);
2733                 cfqq->ioprio_class = IOPRIO_CLASS_BE;
2734                 break;
2735         case IOPRIO_CLASS_IDLE:
2736                 cfqq->ioprio_class = IOPRIO_CLASS_IDLE;
2737                 cfqq->ioprio = 7;
2738                 cfq_clear_cfqq_idle_window(cfqq);
2739                 break;
2740         }
2741
2742         /*
2743          * keep track of original prio settings in case we have to temporarily
2744          * elevate the priority of this queue
2745          */
2746         cfqq->org_ioprio = cfqq->ioprio;
2747         cfq_clear_cfqq_prio_changed(cfqq);
2748 }
2749
2750 static void changed_ioprio(struct cfq_io_cq *cic)
2751 {
2752         struct cfq_data *cfqd = cic_to_cfqd(cic);
2753         struct cfq_queue *cfqq;
2754
2755         if (unlikely(!cfqd))
2756                 return;
2757
2758         cfqq = cic->cfqq[BLK_RW_ASYNC];
2759         if (cfqq) {
2760                 struct cfq_queue *new_cfqq;
2761                 new_cfqq = cfq_get_queue(cfqd, BLK_RW_ASYNC, cic->icq.ioc,
2762                                                 GFP_ATOMIC);
2763                 if (new_cfqq) {
2764                         cic->cfqq[BLK_RW_ASYNC] = new_cfqq;
2765                         cfq_put_queue(cfqq);
2766                 }
2767         }
2768
2769         cfqq = cic->cfqq[BLK_RW_SYNC];
2770         if (cfqq)
2771                 cfq_mark_cfqq_prio_changed(cfqq);
2772 }
2773
2774 static void cfq_init_cfqq(struct cfq_data *cfqd, struct cfq_queue *cfqq,
2775                           pid_t pid, bool is_sync)
2776 {
2777         RB_CLEAR_NODE(&cfqq->rb_node);
2778         RB_CLEAR_NODE(&cfqq->p_node);
2779         INIT_LIST_HEAD(&cfqq->fifo);
2780
2781         cfqq->ref = 0;
2782         cfqq->cfqd = cfqd;
2783
2784         cfq_mark_cfqq_prio_changed(cfqq);
2785
2786         if (is_sync) {
2787                 if (!cfq_class_idle(cfqq))
2788                         cfq_mark_cfqq_idle_window(cfqq);
2789                 cfq_mark_cfqq_sync(cfqq);
2790         }
2791         cfqq->pid = pid;
2792 }
2793
2794 #ifdef CONFIG_CFQ_GROUP_IOSCHED
2795 static void changed_cgroup(struct cfq_io_cq *cic)
2796 {
2797         struct cfq_queue *sync_cfqq = cic_to_cfqq(cic, 1);
2798         struct cfq_data *cfqd = cic_to_cfqd(cic);
2799         struct request_queue *q;
2800
2801         if (unlikely(!cfqd))
2802                 return;
2803
2804         q = cfqd->queue;
2805
2806         if (sync_cfqq) {
2807                 /*
2808                  * Drop reference to sync queue. A new sync queue will be
2809                  * assigned in new group upon arrival of a fresh request.
2810                  */
2811                 cfq_log_cfqq(cfqd, sync_cfqq, "changed cgroup");
2812                 cic_set_cfqq(cic, NULL, 1);
2813                 cfq_put_queue(sync_cfqq);
2814         }
2815 }
2816 #endif  /* CONFIG_CFQ_GROUP_IOSCHED */
2817
2818 static struct cfq_queue *
2819 cfq_find_alloc_queue(struct cfq_data *cfqd, bool is_sync,
2820                      struct io_context *ioc, gfp_t gfp_mask)
2821 {
2822         struct blkio_cgroup *blkcg;
2823         struct cfq_queue *cfqq, *new_cfqq = NULL;
2824         struct cfq_io_cq *cic;
2825         struct cfq_group *cfqg;
2826
2827 retry:
2828         rcu_read_lock();
2829
2830         blkcg = task_blkio_cgroup(current);
2831
2832         cfqg = cfq_lookup_create_cfqg(cfqd, blkcg);
2833
2834         cic = cfq_cic_lookup(cfqd, ioc);
2835         /* cic always exists here */
2836         cfqq = cic_to_cfqq(cic, is_sync);
2837
2838         /*
2839          * Always try a new alloc if we fell back to the OOM cfqq
2840          * originally, since it should just be a temporary situation.
2841          */
2842         if (!cfqq || cfqq == &cfqd->oom_cfqq) {
2843                 cfqq = NULL;
2844                 if (new_cfqq) {
2845                         cfqq = new_cfqq;
2846                         new_cfqq = NULL;
2847                 } else if (gfp_mask & __GFP_WAIT) {
2848                         rcu_read_unlock();
2849                         spin_unlock_irq(cfqd->queue->queue_lock);
2850                         new_cfqq = kmem_cache_alloc_node(cfq_pool,
2851                                         gfp_mask | __GFP_ZERO,
2852                                         cfqd->queue->node);
2853                         spin_lock_irq(cfqd->queue->queue_lock);
2854                         if (new_cfqq)
2855                                 goto retry;
2856                 } else {
2857                         cfqq = kmem_cache_alloc_node(cfq_pool,
2858                                         gfp_mask | __GFP_ZERO,
2859                                         cfqd->queue->node);
2860                 }
2861
2862                 if (cfqq) {
2863                         cfq_init_cfqq(cfqd, cfqq, current->pid, is_sync);
2864                         cfq_init_prio_data(cfqq, ioc);
2865                         cfq_link_cfqq_cfqg(cfqq, cfqg);
2866                         cfq_log_cfqq(cfqd, cfqq, "alloced");
2867                 } else
2868                         cfqq = &cfqd->oom_cfqq;
2869         }
2870
2871         if (new_cfqq)
2872                 kmem_cache_free(cfq_pool, new_cfqq);
2873
2874         rcu_read_unlock();
2875         return cfqq;
2876 }
2877
2878 static struct cfq_queue **
2879 cfq_async_queue_prio(struct cfq_data *cfqd, int ioprio_class, int ioprio)
2880 {
2881         switch (ioprio_class) {
2882         case IOPRIO_CLASS_RT:
2883                 return &cfqd->async_cfqq[0][ioprio];
2884         case IOPRIO_CLASS_BE:
2885                 return &cfqd->async_cfqq[1][ioprio];
2886         case IOPRIO_CLASS_IDLE:
2887                 return &cfqd->async_idle_cfqq;
2888         default:
2889                 BUG();
2890         }
2891 }
2892
2893 static struct cfq_queue *
2894 cfq_get_queue(struct cfq_data *cfqd, bool is_sync, struct io_context *ioc,
2895               gfp_t gfp_mask)
2896 {
2897         const int ioprio = task_ioprio(ioc);
2898         const int ioprio_class = task_ioprio_class(ioc);
2899         struct cfq_queue **async_cfqq = NULL;
2900         struct cfq_queue *cfqq = NULL;
2901
2902         if (!is_sync) {
2903                 async_cfqq = cfq_async_queue_prio(cfqd, ioprio_class, ioprio);
2904                 cfqq = *async_cfqq;
2905         }
2906
2907         if (!cfqq)
2908                 cfqq = cfq_find_alloc_queue(cfqd, is_sync, ioc, gfp_mask);
2909
2910         /*
2911          * pin the queue now that it's allocated, scheduler exit will prune it
2912          */
2913         if (!is_sync && !(*async_cfqq)) {
2914                 cfqq->ref++;
2915                 *async_cfqq = cfqq;
2916         }
2917
2918         cfqq->ref++;
2919         return cfqq;
2920 }
2921
2922 static void
2923 __cfq_update_io_thinktime(struct cfq_ttime *ttime, unsigned long slice_idle)
2924 {
2925         unsigned long elapsed = jiffies - ttime->last_end_request;
2926         elapsed = min(elapsed, 2UL * slice_idle);
2927
2928         ttime->ttime_samples = (7*ttime->ttime_samples + 256) / 8;
2929         ttime->ttime_total = (7*ttime->ttime_total + 256*elapsed) / 8;
2930         ttime->ttime_mean = (ttime->ttime_total + 128) / ttime->ttime_samples;
2931 }
2932
2933 static void
2934 cfq_update_io_thinktime(struct cfq_data *cfqd, struct cfq_queue *cfqq,
2935                         struct cfq_io_cq *cic)
2936 {
2937         if (cfq_cfqq_sync(cfqq)) {
2938                 __cfq_update_io_thinktime(&cic->ttime, cfqd->cfq_slice_idle);
2939                 __cfq_update_io_thinktime(&cfqq->service_tree->ttime,
2940                         cfqd->cfq_slice_idle);
2941         }
2942 #ifdef CONFIG_CFQ_GROUP_IOSCHED
2943         __cfq_update_io_thinktime(&cfqq->cfqg->ttime, cfqd->cfq_group_idle);
2944 #endif
2945 }
2946
2947 static void
2948 cfq_update_io_seektime(struct cfq_data *cfqd, struct cfq_queue *cfqq,
2949                        struct request *rq)
2950 {
2951         sector_t sdist = 0;
2952         sector_t n_sec = blk_rq_sectors(rq);
2953         if (cfqq->last_request_pos) {
2954                 if (cfqq->last_request_pos < blk_rq_pos(rq))
2955                         sdist = blk_rq_pos(rq) - cfqq->last_request_pos;
2956                 else
2957                         sdist = cfqq->last_request_pos - blk_rq_pos(rq);
2958         }
2959
2960         cfqq->seek_history <<= 1;
2961         if (blk_queue_nonrot(cfqd->queue))
2962                 cfqq->seek_history |= (n_sec < CFQQ_SECT_THR_NONROT);
2963         else
2964                 cfqq->seek_history |= (sdist > CFQQ_SEEK_THR);
2965 }
2966
2967 /*
2968  * Disable idle window if the process thinks too long or seeks so much that
2969  * it doesn't matter
2970  */
2971 static void
2972 cfq_update_idle_window(struct cfq_data *cfqd, struct cfq_queue *cfqq,
2973                        struct cfq_io_cq *cic)
2974 {
2975         int old_idle, enable_idle;
2976
2977         /*
2978          * Don't idle for async or idle io prio class
2979          */
2980         if (!cfq_cfqq_sync(cfqq) || cfq_class_idle(cfqq))
2981                 return;
2982
2983         enable_idle = old_idle = cfq_cfqq_idle_window(cfqq);
2984
2985         if (cfqq->queued[0] + cfqq->queued[1] >= 4)
2986                 cfq_mark_cfqq_deep(cfqq);
2987
2988         if (cfqq->next_rq && (cfqq->next_rq->cmd_flags & REQ_NOIDLE))
2989                 enable_idle = 0;
2990         else if (!atomic_read(&cic->icq.ioc->nr_tasks) ||
2991                  !cfqd->cfq_slice_idle ||
2992                  (!cfq_cfqq_deep(cfqq) && CFQQ_SEEKY(cfqq)))
2993                 enable_idle = 0;
2994         else if (sample_valid(cic->ttime.ttime_samples)) {
2995                 if (cic->ttime.ttime_mean > cfqd->cfq_slice_idle)
2996                         enable_idle = 0;
2997                 else
2998                         enable_idle = 1;
2999         }
3000
3001         if (old_idle != enable_idle) {
3002                 cfq_log_cfqq(cfqd, cfqq, "idle=%d", enable_idle);
3003                 if (enable_idle)
3004                         cfq_mark_cfqq_idle_window(cfqq);
3005                 else
3006                         cfq_clear_cfqq_idle_window(cfqq);
3007         }
3008 }
3009
3010 /*
3011  * Check if new_cfqq should preempt the currently active queue. Return 0 for
3012  * no or if we aren't sure, a 1 will cause a preempt.
3013  */
3014 static bool
3015 cfq_should_preempt(struct cfq_data *cfqd, struct cfq_queue *new_cfqq,
3016                    struct request *rq)
3017 {
3018         struct cfq_queue *cfqq;
3019
3020         cfqq = cfqd->active_queue;
3021         if (!cfqq)
3022                 return false;
3023
3024         if (cfq_class_idle(new_cfqq))
3025                 return false;
3026
3027         if (cfq_class_idle(cfqq))
3028                 return true;
3029
3030         /*
3031          * Don't allow a non-RT request to preempt an ongoing RT cfqq timeslice.
3032          */
3033         if (cfq_class_rt(cfqq) && !cfq_class_rt(new_cfqq))
3034                 return false;
3035
3036         /*
3037          * if the new request is sync, but the currently running queue is
3038          * not, let the sync request have priority.
3039          */
3040         if (rq_is_sync(rq) && !cfq_cfqq_sync(cfqq))
3041                 return true;
3042
3043         if (new_cfqq->cfqg != cfqq->cfqg)
3044                 return false;
3045
3046         if (cfq_slice_used(cfqq))
3047                 return true;
3048
3049         /* Allow preemption only if we are idling on sync-noidle tree */
3050         if (cfqd->serving_type == SYNC_NOIDLE_WORKLOAD &&
3051             cfqq_type(new_cfqq) == SYNC_NOIDLE_WORKLOAD &&
3052             new_cfqq->service_tree->count == 2 &&
3053             RB_EMPTY_ROOT(&cfqq->sort_list))
3054                 return true;
3055
3056         /*
3057          * So both queues are sync. Let the new request get disk time if
3058          * it's a metadata request and the current queue is doing regular IO.
3059          */
3060         if ((rq->cmd_flags & REQ_PRIO) && !cfqq->prio_pending)
3061                 return true;
3062
3063         /*
3064          * Allow an RT request to pre-empt an ongoing non-RT cfqq timeslice.
3065          */
3066         if (cfq_class_rt(new_cfqq) && !cfq_class_rt(cfqq))
3067                 return true;
3068
3069         /* An idle queue should not be idle now for some reason */
3070         if (RB_EMPTY_ROOT(&cfqq->sort_list) && !cfq_should_idle(cfqd, cfqq))
3071                 return true;
3072
3073         if (!cfqd->active_cic || !cfq_cfqq_wait_request(cfqq))
3074                 return false;
3075
3076         /*
3077          * if this request is as-good as one we would expect from the
3078          * current cfqq, let it preempt
3079          */
3080         if (cfq_rq_close(cfqd, cfqq, rq))
3081                 return true;
3082
3083         return false;
3084 }
3085
3086 /*
3087  * cfqq preempts the active queue. if we allowed preempt with no slice left,
3088  * let it have half of its nominal slice.
3089  */
3090 static void cfq_preempt_queue(struct cfq_data *cfqd, struct cfq_queue *cfqq)
3091 {
3092         enum wl_type_t old_type = cfqq_type(cfqd->active_queue);
3093
3094         cfq_log_cfqq(cfqd, cfqq, "preempt");
3095         cfq_slice_expired(cfqd, 1);
3096
3097         /*
3098          * workload type is changed, don't save slice, otherwise preempt
3099          * doesn't happen
3100          */
3101         if (old_type != cfqq_type(cfqq))
3102                 cfqq->cfqg->saved_workload_slice = 0;
3103
3104         /*
3105          * Put the new queue at the front of the of the current list,
3106          * so we know that it will be selected next.
3107          */
3108         BUG_ON(!cfq_cfqq_on_rr(cfqq));
3109
3110         cfq_service_tree_add(cfqd, cfqq, 1);
3111
3112         cfqq->slice_end = 0;
3113         cfq_mark_cfqq_slice_new(cfqq);
3114 }
3115
3116 /*
3117  * Called when a new fs request (rq) is added (to cfqq). Check if there's
3118  * something we should do about it
3119  */
3120 static void
3121 cfq_rq_enqueued(struct cfq_data *cfqd, struct cfq_queue *cfqq,
3122                 struct request *rq)
3123 {
3124         struct cfq_io_cq *cic = RQ_CIC(rq);
3125
3126         cfqd->rq_queued++;
3127         if (rq->cmd_flags & REQ_PRIO)
3128                 cfqq->prio_pending++;
3129
3130         cfq_update_io_thinktime(cfqd, cfqq, cic);
3131         cfq_update_io_seektime(cfqd, cfqq, rq);
3132         cfq_update_idle_window(cfqd, cfqq, cic);
3133
3134         cfqq->last_request_pos = blk_rq_pos(rq) + blk_rq_sectors(rq);
3135
3136         if (cfqq == cfqd->active_queue) {
3137                 /*
3138                  * Remember that we saw a request from this process, but
3139                  * don't start queuing just yet. Otherwise we risk seeing lots
3140                  * of tiny requests, because we disrupt the normal plugging
3141                  * and merging. If the request is already larger than a single
3142                  * page, let it rip immediately. For that case we assume that
3143                  * merging is already done. Ditto for a busy system that
3144                  * has other work pending, don't risk delaying until the
3145                  * idle timer unplug to continue working.
3146                  */
3147                 if (cfq_cfqq_wait_request(cfqq)) {
3148                         if (blk_rq_bytes(rq) > PAGE_CACHE_SIZE ||
3149                             cfqd->busy_queues > 1) {
3150                                 cfq_del_timer(cfqd, cfqq);
3151                                 cfq_clear_cfqq_wait_request(cfqq);
3152                                 __blk_run_queue(cfqd->queue);
3153                         } else {
3154                                 cfq_blkiocg_update_idle_time_stats(
3155                                                 &cfqq->cfqg->blkg);
3156                                 cfq_mark_cfqq_must_dispatch(cfqq);
3157                         }
3158                 }
3159         } else if (cfq_should_preempt(cfqd, cfqq, rq)) {
3160                 /*
3161                  * not the active queue - expire current slice if it is
3162                  * idle and has expired it's mean thinktime or this new queue
3163                  * has some old slice time left and is of higher priority or
3164                  * this new queue is RT and the current one is BE
3165                  */
3166                 cfq_preempt_queue(cfqd, cfqq);
3167                 __blk_run_queue(cfqd->queue);
3168         }
3169 }
3170
3171 static void cfq_insert_request(struct request_queue *q, struct request *rq)
3172 {
3173         struct cfq_data *cfqd = q->elevator->elevator_data;
3174         struct cfq_queue *cfqq = RQ_CFQQ(rq);
3175
3176         cfq_log_cfqq(cfqd, cfqq, "insert_request");
3177         cfq_init_prio_data(cfqq, RQ_CIC(rq)->icq.ioc);
3178
3179         rq_set_fifo_time(rq, jiffies + cfqd->cfq_fifo_expire[rq_is_sync(rq)]);
3180         list_add_tail(&rq->queuelist, &cfqq->fifo);
3181         cfq_add_rq_rb(rq);
3182         cfq_blkiocg_update_io_add_stats(&(RQ_CFQG(rq))->blkg,
3183                         &cfqd->serving_group->blkg, rq_data_dir(rq),
3184                         rq_is_sync(rq));
3185         cfq_rq_enqueued(cfqd, cfqq, rq);
3186 }
3187
3188 /*
3189  * Update hw_tag based on peak queue depth over 50 samples under
3190  * sufficient load.
3191  */
3192 static void cfq_update_hw_tag(struct cfq_data *cfqd)
3193 {
3194         struct cfq_queue *cfqq = cfqd->active_queue;
3195
3196         if (cfqd->rq_in_driver > cfqd->hw_tag_est_depth)
3197                 cfqd->hw_tag_est_depth = cfqd->rq_in_driver;
3198
3199         if (cfqd->hw_tag == 1)
3200                 return;
3201
3202         if (cfqd->rq_queued <= CFQ_HW_QUEUE_MIN &&
3203             cfqd->rq_in_driver <= CFQ_HW_QUEUE_MIN)
3204                 return;
3205
3206         /*
3207          * If active queue hasn't enough requests and can idle, cfq might not
3208          * dispatch sufficient requests to hardware. Don't zero hw_tag in this
3209          * case
3210          */
3211         if (cfqq && cfq_cfqq_idle_window(cfqq) &&
3212             cfqq->dispatched + cfqq->queued[0] + cfqq->queued[1] <
3213             CFQ_HW_QUEUE_MIN && cfqd->rq_in_driver < CFQ_HW_QUEUE_MIN)
3214                 return;
3215
3216         if (cfqd->hw_tag_samples++ < 50)
3217                 return;
3218
3219         if (cfqd->hw_tag_est_depth >= CFQ_HW_QUEUE_MIN)
3220                 cfqd->hw_tag = 1;
3221         else
3222                 cfqd->hw_tag = 0;
3223 }
3224
3225 static bool cfq_should_wait_busy(struct cfq_data *cfqd, struct cfq_queue *cfqq)
3226 {
3227         struct cfq_io_cq *cic = cfqd->active_cic;
3228
3229         /* If the queue already has requests, don't wait */
3230         if (!RB_EMPTY_ROOT(&cfqq->sort_list))
3231                 return false;
3232
3233         /* If there are other queues in the group, don't wait */
3234         if (cfqq->cfqg->nr_cfqq > 1)
3235                 return false;
3236
3237         /* the only queue in the group, but think time is big */
3238         if (cfq_io_thinktime_big(cfqd, &cfqq->cfqg->ttime, true))
3239                 return false;
3240
3241         if (cfq_slice_used(cfqq))
3242                 return true;
3243
3244         /* if slice left is less than think time, wait busy */
3245         if (cic && sample_valid(cic->ttime.ttime_samples)
3246             && (cfqq->slice_end - jiffies < cic->ttime.ttime_mean))
3247                 return true;
3248
3249         /*
3250          * If think times is less than a jiffy than ttime_mean=0 and above
3251          * will not be true. It might happen that slice has not expired yet
3252          * but will expire soon (4-5 ns) during select_queue(). To cover the
3253          * case where think time is less than a jiffy, mark the queue wait
3254          * busy if only 1 jiffy is left in the slice.
3255          */
3256         if (cfqq->slice_end - jiffies == 1)
3257                 return true;
3258
3259         return false;
3260 }
3261
3262 static void cfq_completed_request(struct request_queue *q, struct request *rq)
3263 {
3264         struct cfq_queue *cfqq = RQ_CFQQ(rq);
3265         struct cfq_data *cfqd = cfqq->cfqd;
3266         const int sync = rq_is_sync(rq);
3267         unsigned long now;
3268
3269         now = jiffies;
3270         cfq_log_cfqq(cfqd, cfqq, "complete rqnoidle %d",
3271                      !!(rq->cmd_flags & REQ_NOIDLE));
3272
3273         cfq_update_hw_tag(cfqd);
3274
3275         WARN_ON(!cfqd->rq_in_driver);
3276         WARN_ON(!cfqq->dispatched);
3277         cfqd->rq_in_driver--;
3278         cfqq->dispatched--;
3279         (RQ_CFQG(rq))->dispatched--;
3280         cfq_blkiocg_update_completion_stats(&cfqq->cfqg->blkg,
3281                         rq_start_time_ns(rq), rq_io_start_time_ns(rq),
3282                         rq_data_dir(rq), rq_is_sync(rq));
3283
3284         cfqd->rq_in_flight[cfq_cfqq_sync(cfqq)]--;
3285
3286         if (sync) {
3287                 struct cfq_rb_root *service_tree;
3288
3289                 RQ_CIC(rq)->ttime.last_end_request = now;
3290
3291                 if (cfq_cfqq_on_rr(cfqq))
3292                         service_tree = cfqq->service_tree;
3293                 else
3294                         service_tree = service_tree_for(cfqq->cfqg,
3295                                 cfqq_prio(cfqq), cfqq_type(cfqq));
3296                 service_tree->ttime.last_end_request = now;
3297                 if (!time_after(rq->start_time + cfqd->cfq_fifo_expire[1], now))
3298                         cfqd->last_delayed_sync = now;
3299         }
3300
3301 #ifdef CONFIG_CFQ_GROUP_IOSCHED
3302         cfqq->cfqg->ttime.last_end_request = now;
3303 #endif
3304
3305         /*
3306          * If this is the active queue, check if it needs to be expired,
3307          * or if we want to idle in case it has no pending requests.
3308          */
3309         if (cfqd->active_queue == cfqq) {
3310                 const bool cfqq_empty = RB_EMPTY_ROOT(&cfqq->sort_list);
3311
3312                 if (cfq_cfqq_slice_new(cfqq)) {
3313                         cfq_set_prio_slice(cfqd, cfqq);
3314                         cfq_clear_cfqq_slice_new(cfqq);
3315                 }
3316
3317                 /*
3318                  * Should we wait for next request to come in before we expire
3319                  * the queue.
3320                  */
3321                 if (cfq_should_wait_busy(cfqd, cfqq)) {
3322                         unsigned long extend_sl = cfqd->cfq_slice_idle;
3323                         if (!cfqd->cfq_slice_idle)
3324                                 extend_sl = cfqd->cfq_group_idle;
3325                         cfqq->slice_end = jiffies + extend_sl;
3326                         cfq_mark_cfqq_wait_busy(cfqq);
3327                         cfq_log_cfqq(cfqd, cfqq, "will busy wait");
3328                 }
3329
3330                 /*
3331                  * Idling is not enabled on:
3332                  * - expired queues
3333                  * - idle-priority queues
3334                  * - async queues
3335                  * - queues with still some requests queued
3336                  * - when there is a close cooperator
3337                  */
3338                 if (cfq_slice_used(cfqq) || cfq_class_idle(cfqq))
3339                         cfq_slice_expired(cfqd, 1);
3340                 else if (sync && cfqq_empty &&
3341                          !cfq_close_cooperator(cfqd, cfqq)) {
3342                         cfq_arm_slice_timer(cfqd);
3343                 }
3344         }
3345
3346         if (!cfqd->rq_in_driver)
3347                 cfq_schedule_dispatch(cfqd);
3348 }
3349
3350 static inline int __cfq_may_queue(struct cfq_queue *cfqq)
3351 {
3352         if (cfq_cfqq_wait_request(cfqq) && !cfq_cfqq_must_alloc_slice(cfqq)) {
3353                 cfq_mark_cfqq_must_alloc_slice(cfqq);
3354                 return ELV_MQUEUE_MUST;
3355         }
3356
3357         return ELV_MQUEUE_MAY;
3358 }
3359
3360 static int cfq_may_queue(struct request_queue *q, int rw)
3361 {
3362         struct cfq_data *cfqd = q->elevator->elevator_data;
3363         struct task_struct *tsk = current;
3364         struct cfq_io_cq *cic;
3365         struct cfq_queue *cfqq;
3366
3367         /*
3368          * don't force setup of a queue from here, as a call to may_queue
3369          * does not necessarily imply that a request actually will be queued.
3370          * so just lookup a possibly existing queue, or return 'may queue'
3371          * if that fails
3372          */
3373         cic = cfq_cic_lookup(cfqd, tsk->io_context);
3374         if (!cic)
3375                 return ELV_MQUEUE_MAY;
3376
3377         cfqq = cic_to_cfqq(cic, rw_is_sync(rw));
3378         if (cfqq) {
3379                 cfq_init_prio_data(cfqq, cic->icq.ioc);
3380
3381                 return __cfq_may_queue(cfqq);
3382         }
3383
3384         return ELV_MQUEUE_MAY;
3385 }
3386
3387 /*
3388  * queue lock held here
3389  */
3390 static void cfq_put_request(struct request *rq)
3391 {
3392         struct cfq_queue *cfqq = RQ_CFQQ(rq);
3393
3394         if (cfqq) {
3395                 const int rw = rq_data_dir(rq);
3396
3397                 BUG_ON(!cfqq->allocated[rw]);
3398                 cfqq->allocated[rw]--;
3399
3400                 /* Put down rq reference on cfqg */
3401                 cfq_put_cfqg(RQ_CFQG(rq));
3402                 rq->elv.priv[0] = NULL;
3403                 rq->elv.priv[1] = NULL;
3404
3405                 cfq_put_queue(cfqq);
3406         }
3407 }
3408
3409 static struct cfq_queue *
3410 cfq_merge_cfqqs(struct cfq_data *cfqd, struct cfq_io_cq *cic,
3411                 struct cfq_queue *cfqq)
3412 {
3413         cfq_log_cfqq(cfqd, cfqq, "merging with queue %p", cfqq->new_cfqq);
3414         cic_set_cfqq(cic, cfqq->new_cfqq, 1);
3415         cfq_mark_cfqq_coop(cfqq->new_cfqq);
3416         cfq_put_queue(cfqq);
3417         return cic_to_cfqq(cic, 1);
3418 }
3419
3420 /*
3421  * Returns NULL if a new cfqq should be allocated, or the old cfqq if this
3422  * was the last process referring to said cfqq.
3423  */
3424 static struct cfq_queue *
3425 split_cfqq(struct cfq_io_cq *cic, struct cfq_queue *cfqq)
3426 {
3427         if (cfqq_process_refs(cfqq) == 1) {
3428                 cfqq->pid = current->pid;
3429                 cfq_clear_cfqq_coop(cfqq);
3430                 cfq_clear_cfqq_split_coop(cfqq);
3431                 return cfqq;
3432         }
3433
3434         cic_set_cfqq(cic, NULL, 1);
3435
3436         cfq_put_cooperator(cfqq);
3437
3438         cfq_put_queue(cfqq);
3439         return NULL;
3440 }
3441 /*
3442  * Allocate cfq data structures associated with this request.
3443  */
3444 static int
3445 cfq_set_request(struct request_queue *q, struct request *rq, gfp_t gfp_mask)
3446 {
3447         struct cfq_data *cfqd = q->elevator->elevator_data;
3448         struct cfq_io_cq *cic = icq_to_cic(rq->elv.icq);
3449         const int rw = rq_data_dir(rq);
3450         const bool is_sync = rq_is_sync(rq);
3451         struct cfq_queue *cfqq;
3452         unsigned int changed;
3453
3454         might_sleep_if(gfp_mask & __GFP_WAIT);
3455
3456         spin_lock_irq(q->queue_lock);
3457
3458         /* handle changed notifications */
3459         changed = icq_get_changed(&cic->icq);
3460         if (unlikely(changed & ICQ_IOPRIO_CHANGED))
3461                 changed_ioprio(cic);
3462 #ifdef CONFIG_CFQ_GROUP_IOSCHED
3463         if (unlikely(changed & ICQ_CGROUP_CHANGED))
3464                 changed_cgroup(cic);
3465 #endif
3466
3467 new_queue:
3468         cfqq = cic_to_cfqq(cic, is_sync);
3469         if (!cfqq || cfqq == &cfqd->oom_cfqq) {
3470                 cfqq = cfq_get_queue(cfqd, is_sync, cic->icq.ioc, gfp_mask);
3471                 cic_set_cfqq(cic, cfqq, is_sync);
3472         } else {
3473                 /*
3474                  * If the queue was seeky for too long, break it apart.
3475                  */
3476                 if (cfq_cfqq_coop(cfqq) && cfq_cfqq_split_coop(cfqq)) {
3477                         cfq_log_cfqq(cfqd, cfqq, "breaking apart cfqq");
3478                         cfqq = split_cfqq(cic, cfqq);
3479                         if (!cfqq)
3480                                 goto new_queue;
3481                 }
3482
3483                 /*
3484                  * Check to see if this queue is scheduled to merge with
3485                  * another, closely cooperating queue.  The merging of
3486                  * queues happens here as it must be done in process context.
3487                  * The reference on new_cfqq was taken in merge_cfqqs.
3488                  */
3489                 if (cfqq->new_cfqq)
3490                         cfqq = cfq_merge_cfqqs(cfqd, cic, cfqq);
3491         }
3492
3493         cfqq->allocated[rw]++;
3494
3495         cfqq->ref++;
3496         rq->elv.priv[0] = cfqq;
3497         rq->elv.priv[1] = cfq_ref_get_cfqg(cfqq->cfqg);
3498         spin_unlock_irq(q->queue_lock);
3499         return 0;
3500 }
3501
3502 static void cfq_kick_queue(struct work_struct *work)
3503 {
3504         struct cfq_data *cfqd =
3505                 container_of(work, struct cfq_data, unplug_work);
3506         struct request_queue *q = cfqd->queue;
3507
3508         spin_lock_irq(q->queue_lock);
3509         __blk_run_queue(cfqd->queue);
3510         spin_unlock_irq(q->queue_lock);
3511 }
3512
3513 /*
3514  * Timer running if the active_queue is currently idling inside its time slice
3515  */
3516 static void cfq_idle_slice_timer(unsigned long data)
3517 {
3518         struct cfq_data *cfqd = (struct cfq_data *) data;
3519         struct cfq_queue *cfqq;
3520         unsigned long flags;
3521         int timed_out = 1;
3522
3523         cfq_log(cfqd, "idle timer fired");
3524
3525         spin_lock_irqsave(cfqd->queue->queue_lock, flags);
3526
3527         cfqq = cfqd->active_queue;
3528         if (cfqq) {
3529                 timed_out = 0;
3530
3531                 /*
3532                  * We saw a request before the queue expired, let it through
3533                  */
3534                 if (cfq_cfqq_must_dispatch(cfqq))
3535                         goto out_kick;
3536
3537                 /*
3538                  * expired
3539                  */
3540                 if (cfq_slice_used(cfqq))
3541                         goto expire;
3542
3543                 /*
3544                  * only expire and reinvoke request handler, if there are
3545                  * other queues with pending requests
3546                  */
3547                 if (!cfqd->busy_queues)
3548                         goto out_cont;
3549
3550                 /*
3551                  * not expired and it has a request pending, let it dispatch
3552                  */
3553                 if (!RB_EMPTY_ROOT(&cfqq->sort_list))
3554                         goto out_kick;
3555
3556                 /*
3557                  * Queue depth flag is reset only when the idle didn't succeed
3558                  */
3559                 cfq_clear_cfqq_deep(cfqq);
3560         }
3561 expire:
3562         cfq_slice_expired(cfqd, timed_out);
3563 out_kick:
3564         cfq_schedule_dispatch(cfqd);
3565 out_cont:
3566         spin_unlock_irqrestore(cfqd->queue->queue_lock, flags);
3567 }
3568
3569 static void cfq_shutdown_timer_wq(struct cfq_data *cfqd)
3570 {
3571         del_timer_sync(&cfqd->idle_slice_timer);
3572         cancel_work_sync(&cfqd->unplug_work);
3573 }
3574
3575 static void cfq_put_async_queues(struct cfq_data *cfqd)
3576 {
3577         int i;
3578
3579         for (i = 0; i < IOPRIO_BE_NR; i++) {
3580                 if (cfqd->async_cfqq[0][i])
3581                         cfq_put_queue(cfqd->async_cfqq[0][i]);
3582                 if (cfqd->async_cfqq[1][i])
3583                         cfq_put_queue(cfqd->async_cfqq[1][i]);
3584         }
3585
3586         if (cfqd->async_idle_cfqq)
3587                 cfq_put_queue(cfqd->async_idle_cfqq);
3588 }
3589
3590 static void cfq_exit_queue(struct elevator_queue *e)
3591 {
3592         struct cfq_data *cfqd = e->elevator_data;
3593         struct request_queue *q = cfqd->queue;
3594         bool wait = false;
3595
3596         cfq_shutdown_timer_wq(cfqd);
3597
3598         spin_lock_irq(q->queue_lock);
3599
3600         if (cfqd->active_queue)
3601                 __cfq_slice_expired(cfqd, cfqd->active_queue, 0);
3602
3603         cfq_put_async_queues(cfqd);
3604         cfq_release_cfq_groups(cfqd);
3605
3606         /*
3607          * If there are groups which we could not unlink from blkcg list,
3608          * wait for a rcu period for them to be freed.
3609          */
3610         if (cfqd->nr_blkcg_linked_grps)
3611                 wait = true;
3612
3613         spin_unlock_irq(q->queue_lock);
3614
3615         cfq_shutdown_timer_wq(cfqd);
3616
3617         /*
3618          * Wait for cfqg->blkg->key accessors to exit their grace periods.
3619          * Do this wait only if there are other unlinked groups out
3620          * there. This can happen if cgroup deletion path claimed the
3621          * responsibility of cleaning up a group before queue cleanup code
3622          * get to the group.
3623          *
3624          * Do not call synchronize_rcu() unconditionally as there are drivers
3625          * which create/delete request queue hundreds of times during scan/boot
3626          * and synchronize_rcu() can take significant time and slow down boot.
3627          */
3628         if (wait)
3629                 synchronize_rcu();
3630
3631 #ifndef CONFIG_CFQ_GROUP_IOSCHED
3632         kfree(cfqd->root_group);
3633 #endif
3634         kfree(cfqd);
3635 }
3636
3637 static int cfq_init_queue(struct request_queue *q)
3638 {
3639         struct cfq_data *cfqd;
3640         struct blkio_group *blkg __maybe_unused;
3641         int i;
3642
3643         cfqd = kmalloc_node(sizeof(*cfqd), GFP_KERNEL | __GFP_ZERO, q->node);
3644         if (!cfqd)
3645                 return -ENOMEM;
3646
3647         cfqd->queue = q;
3648         q->elevator->elevator_data = cfqd;
3649
3650         /* Init root service tree */
3651         cfqd->grp_service_tree = CFQ_RB_ROOT;
3652
3653         /* Init root group and prefer root group over other groups by default */
3654 #ifdef CONFIG_CFQ_GROUP_IOSCHED
3655         rcu_read_lock();
3656         spin_lock_irq(q->queue_lock);
3657
3658         blkg = blkg_lookup_create(&blkio_root_cgroup, q, BLKIO_POLICY_PROP,
3659                                   true);
3660         if (!IS_ERR(blkg))
3661                 cfqd->root_group = cfqg_of_blkg(blkg);
3662
3663         spin_unlock_irq(q->queue_lock);
3664         rcu_read_unlock();
3665 #else
3666         cfqd->root_group = kzalloc_node(sizeof(*cfqd->root_group),
3667                                         GFP_KERNEL, cfqd->queue->node);
3668         if (cfqd->root_group)
3669                 cfq_init_cfqg_base(cfqd->root_group);
3670 #endif
3671         if (!cfqd->root_group) {
3672                 kfree(cfqd);
3673                 return -ENOMEM;
3674         }
3675
3676         cfqd->root_group->weight = 2*BLKIO_WEIGHT_DEFAULT;
3677
3678         /*
3679          * Not strictly needed (since RB_ROOT just clears the node and we
3680          * zeroed cfqd on alloc), but better be safe in case someone decides
3681          * to add magic to the rb code
3682          */
3683         for (i = 0; i < CFQ_PRIO_LISTS; i++)
3684                 cfqd->prio_trees[i] = RB_ROOT;
3685
3686         /*
3687          * Our fallback cfqq if cfq_find_alloc_queue() runs into OOM issues.
3688          * Grab a permanent reference to it, so that the normal code flow
3689          * will not attempt to free it.  oom_cfqq is linked to root_group
3690          * but shouldn't hold a reference as it'll never be unlinked.  Lose
3691          * the reference from linking right away.
3692          */
3693         cfq_init_cfqq(cfqd, &cfqd->oom_cfqq, 1, 0);
3694         cfqd->oom_cfqq.ref++;
3695         cfq_link_cfqq_cfqg(&cfqd->oom_cfqq, cfqd->root_group);
3696         cfq_put_cfqg(cfqd->root_group);
3697
3698         init_timer(&cfqd->idle_slice_timer);
3699         cfqd->idle_slice_timer.function = cfq_idle_slice_timer;
3700         cfqd->idle_slice_timer.data = (unsigned long) cfqd;
3701
3702         INIT_WORK(&cfqd->unplug_work, cfq_kick_queue);
3703
3704         cfqd->cfq_quantum = cfq_quantum;
3705         cfqd->cfq_fifo_expire[0] = cfq_fifo_expire[0];
3706         cfqd->cfq_fifo_expire[1] = cfq_fifo_expire[1];
3707         cfqd->cfq_back_max = cfq_back_max;
3708         cfqd->cfq_back_penalty = cfq_back_penalty;
3709         cfqd->cfq_slice[0] = cfq_slice_async;
3710         cfqd->cfq_slice[1] = cfq_slice_sync;
3711         cfqd->cfq_slice_async_rq = cfq_slice_async_rq;
3712         cfqd->cfq_slice_idle = cfq_slice_idle;
3713         cfqd->cfq_group_idle = cfq_group_idle;
3714         cfqd->cfq_latency = 1;
3715         cfqd->hw_tag = -1;
3716         /*
3717          * we optimistically start assuming sync ops weren't delayed in last
3718          * second, in order to have larger depth for async operations.
3719          */
3720         cfqd->last_delayed_sync = jiffies - HZ;
3721         return 0;
3722 }
3723
3724 /*
3725  * sysfs parts below -->
3726  */
3727 static ssize_t
3728 cfq_var_show(unsigned int var, char *page)
3729 {
3730         return sprintf(page, "%d\n", var);
3731 }
3732
3733 static ssize_t
3734 cfq_var_store(unsigned int *var, const char *page, size_t count)
3735 {
3736         char *p = (char *) page;
3737
3738         *var = simple_strtoul(p, &p, 10);
3739         return count;
3740 }
3741
3742 #define SHOW_FUNCTION(__FUNC, __VAR, __CONV)                            \
3743 static ssize_t __FUNC(struct elevator_queue *e, char *page)             \
3744 {                                                                       \
3745         struct cfq_data *cfqd = e->elevator_data;                       \
3746         unsigned int __data = __VAR;                                    \
3747         if (__CONV)                                                     \
3748                 __data = jiffies_to_msecs(__data);                      \
3749         return cfq_var_show(__data, (page));                            \
3750 }
3751 SHOW_FUNCTION(cfq_quantum_show, cfqd->cfq_quantum, 0);
3752 SHOW_FUNCTION(cfq_fifo_expire_sync_show, cfqd->cfq_fifo_expire[1], 1);
3753 SHOW_FUNCTION(cfq_fifo_expire_async_show, cfqd->cfq_fifo_expire[0], 1);
3754 SHOW_FUNCTION(cfq_back_seek_max_show, cfqd->cfq_back_max, 0);
3755 SHOW_FUNCTION(cfq_back_seek_penalty_show, cfqd->cfq_back_penalty, 0);
3756 SHOW_FUNCTION(cfq_slice_idle_show, cfqd->cfq_slice_idle, 1);
3757 SHOW_FUNCTION(cfq_group_idle_show, cfqd->cfq_group_idle, 1);
3758 SHOW_FUNCTION(cfq_slice_sync_show, cfqd->cfq_slice[1], 1);
3759 SHOW_FUNCTION(cfq_slice_async_show, cfqd->cfq_slice[0], 1);
3760 SHOW_FUNCTION(cfq_slice_async_rq_show, cfqd->cfq_slice_async_rq, 0);
3761 SHOW_FUNCTION(cfq_low_latency_show, cfqd->cfq_latency, 0);
3762 #undef SHOW_FUNCTION
3763
3764 #define STORE_FUNCTION(__FUNC, __PTR, MIN, MAX, __CONV)                 \
3765 static ssize_t __FUNC(struct elevator_queue *e, const char *page, size_t count) \
3766 {                                                                       \
3767         struct cfq_data *cfqd = e->elevator_data;                       \
3768         unsigned int __data;                                            \
3769         int ret = cfq_var_store(&__data, (page), count);                \
3770         if (__data < (MIN))                                             \
3771                 __data = (MIN);                                         \
3772         else if (__data > (MAX))                                        \
3773                 __data = (MAX);                                         \
3774         if (__CONV)                                                     \
3775                 *(__PTR) = msecs_to_jiffies(__data);                    \
3776         else                                                            \
3777                 *(__PTR) = __data;                                      \
3778         return ret;                                                     \
3779 }
3780 STORE_FUNCTION(cfq_quantum_store, &cfqd->cfq_quantum, 1, UINT_MAX, 0);
3781 STORE_FUNCTION(cfq_fifo_expire_sync_store, &cfqd->cfq_fifo_expire[1], 1,
3782                 UINT_MAX, 1);
3783 STORE_FUNCTION(cfq_fifo_expire_async_store, &cfqd->cfq_fifo_expire[0], 1,
3784                 UINT_MAX, 1);
3785 STORE_FUNCTION(cfq_back_seek_max_store, &cfqd->cfq_back_max, 0, UINT_MAX, 0);
3786 STORE_FUNCTION(cfq_back_seek_penalty_store, &cfqd->cfq_back_penalty, 1,
3787                 UINT_MAX, 0);
3788 STORE_FUNCTION(cfq_slice_idle_store, &cfqd->cfq_slice_idle, 0, UINT_MAX, 1);
3789 STORE_FUNCTION(cfq_group_idle_store, &cfqd->cfq_group_idle, 0, UINT_MAX, 1);
3790 STORE_FUNCTION(cfq_slice_sync_store, &cfqd->cfq_slice[1], 1, UINT_MAX, 1);
3791 STORE_FUNCTION(cfq_slice_async_store, &cfqd->cfq_slice[0], 1, UINT_MAX, 1);
3792 STORE_FUNCTION(cfq_slice_async_rq_store, &cfqd->cfq_slice_async_rq, 1,
3793                 UINT_MAX, 0);
3794 STORE_FUNCTION(cfq_low_latency_store, &cfqd->cfq_latency, 0, 1, 0);
3795 #undef STORE_FUNCTION
3796
3797 #define CFQ_ATTR(name) \
3798         __ATTR(name, S_IRUGO|S_IWUSR, cfq_##name##_show, cfq_##name##_store)
3799
3800 static struct elv_fs_entry cfq_attrs[] = {
3801         CFQ_ATTR(quantum),
3802         CFQ_ATTR(fifo_expire_sync),
3803         CFQ_ATTR(fifo_expire_async),
3804         CFQ_ATTR(back_seek_max),
3805         CFQ_ATTR(back_seek_penalty),
3806         CFQ_ATTR(slice_sync),
3807         CFQ_ATTR(slice_async),
3808         CFQ_ATTR(slice_async_rq),
3809         CFQ_ATTR(slice_idle),
3810         CFQ_ATTR(group_idle),
3811         CFQ_ATTR(low_latency),
3812         __ATTR_NULL
3813 };
3814
3815 static struct elevator_type iosched_cfq = {
3816         .ops = {
3817                 .elevator_merge_fn =            cfq_merge,
3818                 .elevator_merged_fn =           cfq_merged_request,
3819                 .elevator_merge_req_fn =        cfq_merged_requests,
3820                 .elevator_allow_merge_fn =      cfq_allow_merge,
3821                 .elevator_bio_merged_fn =       cfq_bio_merged,
3822                 .elevator_dispatch_fn =         cfq_dispatch_requests,
3823                 .elevator_add_req_fn =          cfq_insert_request,
3824                 .elevator_activate_req_fn =     cfq_activate_request,
3825                 .elevator_deactivate_req_fn =   cfq_deactivate_request,
3826                 .elevator_completed_req_fn =    cfq_completed_request,
3827                 .elevator_former_req_fn =       elv_rb_former_request,
3828                 .elevator_latter_req_fn =       elv_rb_latter_request,
3829                 .elevator_init_icq_fn =         cfq_init_icq,
3830                 .elevator_exit_icq_fn =         cfq_exit_icq,
3831                 .elevator_set_req_fn =          cfq_set_request,
3832                 .elevator_put_req_fn =          cfq_put_request,
3833                 .elevator_may_queue_fn =        cfq_may_queue,
3834                 .elevator_init_fn =             cfq_init_queue,
3835                 .elevator_exit_fn =             cfq_exit_queue,
3836         },
3837         .icq_size       =       sizeof(struct cfq_io_cq),
3838         .icq_align      =       __alignof__(struct cfq_io_cq),
3839         .elevator_attrs =       cfq_attrs,
3840         .elevator_name  =       "cfq",
3841         .elevator_owner =       THIS_MODULE,
3842 };
3843
3844 #ifdef CONFIG_CFQ_GROUP_IOSCHED
3845 static struct blkio_policy_type blkio_policy_cfq = {
3846         .ops = {
3847                 .blkio_alloc_group_fn =         cfq_alloc_blkio_group,
3848                 .blkio_link_group_fn =          cfq_link_blkio_group,
3849                 .blkio_unlink_group_fn =        cfq_unlink_blkio_group,
3850                 .blkio_clear_queue_fn = cfq_clear_queue,
3851                 .blkio_update_group_weight_fn = cfq_update_blkio_group_weight,
3852         },
3853         .plid = BLKIO_POLICY_PROP,
3854 };
3855 #endif
3856
3857 static int __init cfq_init(void)
3858 {
3859         int ret;
3860
3861         /*
3862          * could be 0 on HZ < 1000 setups
3863          */
3864         if (!cfq_slice_async)
3865                 cfq_slice_async = 1;
3866         if (!cfq_slice_idle)
3867                 cfq_slice_idle = 1;
3868
3869 #ifdef CONFIG_CFQ_GROUP_IOSCHED
3870         if (!cfq_group_idle)
3871                 cfq_group_idle = 1;
3872 #else
3873                 cfq_group_idle = 0;
3874 #endif
3875         cfq_pool = KMEM_CACHE(cfq_queue, 0);
3876         if (!cfq_pool)
3877                 return -ENOMEM;
3878
3879         ret = elv_register(&iosched_cfq);
3880         if (ret) {
3881                 kmem_cache_destroy(cfq_pool);
3882                 return ret;
3883         }
3884
3885 #ifdef CONFIG_CFQ_GROUP_IOSCHED
3886         blkio_policy_register(&blkio_policy_cfq);
3887 #endif
3888         return 0;
3889 }
3890
3891 static void __exit cfq_exit(void)
3892 {
3893 #ifdef CONFIG_CFQ_GROUP_IOSCHED
3894         blkio_policy_unregister(&blkio_policy_cfq);
3895 #endif
3896         elv_unregister(&iosched_cfq);
3897         kmem_cache_destroy(cfq_pool);
3898 }
3899
3900 module_init(cfq_init);
3901 module_exit(cfq_exit);
3902
3903 MODULE_AUTHOR("Jens Axboe");
3904 MODULE_LICENSE("GPL");
3905 MODULE_DESCRIPTION("Completely Fair Queueing IO scheduler");