]> git.karo-electronics.de Git - karo-tx-linux.git/blob - block/cfq-iosched.c
adab10de6eea0af610b70b53d552c595e025c66c
[karo-tx-linux.git] / block / cfq-iosched.c
1 /*
2  *  CFQ, or complete fairness queueing, disk scheduler.
3  *
4  *  Based on ideas from a previously unfinished io
5  *  scheduler (round robin per-process disk scheduling) and Andrea Arcangeli.
6  *
7  *  Copyright (C) 2003 Jens Axboe <axboe@kernel.dk>
8  */
9 #include <linux/module.h>
10 #include <linux/slab.h>
11 #include <linux/blkdev.h>
12 #include <linux/elevator.h>
13 #include <linux/jiffies.h>
14 #include <linux/rbtree.h>
15 #include <linux/ioprio.h>
16 #include <linux/blktrace_api.h>
17 #include "blk.h"
18 #include "blk-cgroup.h"
19
20 static struct blkio_policy_type blkio_policy_cfq;
21
22 /*
23  * tunables
24  */
25 /* max queue in one round of service */
26 static const int cfq_quantum = 8;
27 static const int cfq_fifo_expire[2] = { HZ / 4, HZ / 8 };
28 /* maximum backwards seek, in KiB */
29 static const int cfq_back_max = 16 * 1024;
30 /* penalty of a backwards seek */
31 static const int cfq_back_penalty = 2;
32 static const int cfq_slice_sync = HZ / 10;
33 static int cfq_slice_async = HZ / 25;
34 static const int cfq_slice_async_rq = 2;
35 static int cfq_slice_idle = HZ / 125;
36 static int cfq_group_idle = HZ / 125;
37 static const int cfq_target_latency = HZ * 3/10; /* 300 ms */
38 static const int cfq_hist_divisor = 4;
39
40 /*
41  * offset from end of service tree
42  */
43 #define CFQ_IDLE_DELAY          (HZ / 5)
44
45 /*
46  * below this threshold, we consider thinktime immediate
47  */
48 #define CFQ_MIN_TT              (2)
49
50 #define CFQ_SLICE_SCALE         (5)
51 #define CFQ_HW_QUEUE_MIN        (5)
52 #define CFQ_SERVICE_SHIFT       12
53
54 #define CFQQ_SEEK_THR           (sector_t)(8 * 100)
55 #define CFQQ_CLOSE_THR          (sector_t)(8 * 1024)
56 #define CFQQ_SECT_THR_NONROT    (sector_t)(2 * 32)
57 #define CFQQ_SEEKY(cfqq)        (hweight32(cfqq->seek_history) > 32/8)
58
59 #define RQ_CIC(rq)              icq_to_cic((rq)->elv.icq)
60 #define RQ_CFQQ(rq)             (struct cfq_queue *) ((rq)->elv.priv[0])
61 #define RQ_CFQG(rq)             (struct cfq_group *) ((rq)->elv.priv[1])
62
63 static struct kmem_cache *cfq_pool;
64
65 #define CFQ_PRIO_LISTS          IOPRIO_BE_NR
66 #define cfq_class_idle(cfqq)    ((cfqq)->ioprio_class == IOPRIO_CLASS_IDLE)
67 #define cfq_class_rt(cfqq)      ((cfqq)->ioprio_class == IOPRIO_CLASS_RT)
68
69 #define sample_valid(samples)   ((samples) > 80)
70 #define rb_entry_cfqg(node)     rb_entry((node), struct cfq_group, rb_node)
71
72 struct cfq_ttime {
73         unsigned long last_end_request;
74
75         unsigned long ttime_total;
76         unsigned long ttime_samples;
77         unsigned long ttime_mean;
78 };
79
80 /*
81  * Most of our rbtree usage is for sorting with min extraction, so
82  * if we cache the leftmost node we don't have to walk down the tree
83  * to find it. Idea borrowed from Ingo Molnars CFS scheduler. We should
84  * move this into the elevator for the rq sorting as well.
85  */
86 struct cfq_rb_root {
87         struct rb_root rb;
88         struct rb_node *left;
89         unsigned count;
90         unsigned total_weight;
91         u64 min_vdisktime;
92         struct cfq_ttime ttime;
93 };
94 #define CFQ_RB_ROOT     (struct cfq_rb_root) { .rb = RB_ROOT, \
95                         .ttime = {.last_end_request = jiffies,},}
96
97 /*
98  * Per process-grouping structure
99  */
100 struct cfq_queue {
101         /* reference count */
102         int ref;
103         /* various state flags, see below */
104         unsigned int flags;
105         /* parent cfq_data */
106         struct cfq_data *cfqd;
107         /* service_tree member */
108         struct rb_node rb_node;
109         /* service_tree key */
110         unsigned long rb_key;
111         /* prio tree member */
112         struct rb_node p_node;
113         /* prio tree root we belong to, if any */
114         struct rb_root *p_root;
115         /* sorted list of pending requests */
116         struct rb_root sort_list;
117         /* if fifo isn't expired, next request to serve */
118         struct request *next_rq;
119         /* requests queued in sort_list */
120         int queued[2];
121         /* currently allocated requests */
122         int allocated[2];
123         /* fifo list of requests in sort_list */
124         struct list_head fifo;
125
126         /* time when queue got scheduled in to dispatch first request. */
127         unsigned long dispatch_start;
128         unsigned int allocated_slice;
129         unsigned int slice_dispatch;
130         /* time when first request from queue completed and slice started. */
131         unsigned long slice_start;
132         unsigned long slice_end;
133         long slice_resid;
134
135         /* pending priority requests */
136         int prio_pending;
137         /* number of requests that are on the dispatch list or inside driver */
138         int dispatched;
139
140         /* io prio of this group */
141         unsigned short ioprio, org_ioprio;
142         unsigned short ioprio_class;
143
144         pid_t pid;
145
146         u32 seek_history;
147         sector_t last_request_pos;
148
149         struct cfq_rb_root *service_tree;
150         struct cfq_queue *new_cfqq;
151         struct cfq_group *cfqg;
152         /* Number of sectors dispatched from queue in single dispatch round */
153         unsigned long nr_sectors;
154 };
155
156 /*
157  * First index in the service_trees.
158  * IDLE is handled separately, so it has negative index
159  */
160 enum wl_prio_t {
161         BE_WORKLOAD = 0,
162         RT_WORKLOAD = 1,
163         IDLE_WORKLOAD = 2,
164         CFQ_PRIO_NR,
165 };
166
167 /*
168  * Second index in the service_trees.
169  */
170 enum wl_type_t {
171         ASYNC_WORKLOAD = 0,
172         SYNC_NOIDLE_WORKLOAD = 1,
173         SYNC_WORKLOAD = 2
174 };
175
176 struct cfqg_stats {
177 #ifdef CONFIG_CFQ_GROUP_IOSCHED
178         /* total bytes transferred */
179         struct blkg_rwstat              service_bytes;
180         /* total IOs serviced, post merge */
181         struct blkg_rwstat              serviced;
182         /* number of ios merged */
183         struct blkg_rwstat              merged;
184         /* total time spent on device in ns, may not be accurate w/ queueing */
185         struct blkg_rwstat              service_time;
186         /* total time spent waiting in scheduler queue in ns */
187         struct blkg_rwstat              wait_time;
188         /* number of IOs queued up */
189         struct blkg_rwstat              queued;
190         /* total sectors transferred */
191         struct blkg_stat                sectors;
192         /* total disk time and nr sectors dispatched by this group */
193         struct blkg_stat                time;
194 #ifdef CONFIG_DEBUG_BLK_CGROUP
195         /* time not charged to this cgroup */
196         struct blkg_stat                unaccounted_time;
197         /* sum of number of ios queued across all samples */
198         struct blkg_stat                avg_queue_size_sum;
199         /* count of samples taken for average */
200         struct blkg_stat                avg_queue_size_samples;
201         /* how many times this group has been removed from service tree */
202         struct blkg_stat                dequeue;
203         /* total time spent waiting for it to be assigned a timeslice. */
204         struct blkg_stat                group_wait_time;
205         /* time spent idling for this blkio_group */
206         struct blkg_stat                idle_time;
207         /* total time with empty current active q with other requests queued */
208         struct blkg_stat                empty_time;
209         /* fields after this shouldn't be cleared on stat reset */
210         uint64_t                        start_group_wait_time;
211         uint64_t                        start_idle_time;
212         uint64_t                        start_empty_time;
213         uint16_t                        flags;
214 #endif  /* CONFIG_DEBUG_BLK_CGROUP */
215 #endif  /* CONFIG_CFQ_GROUP_IOSCHED */
216 };
217
218 /* This is per cgroup per device grouping structure */
219 struct cfq_group {
220         /* group service_tree member */
221         struct rb_node rb_node;
222
223         /* group service_tree key */
224         u64 vdisktime;
225         unsigned int weight;
226         unsigned int new_weight;
227         unsigned int dev_weight;
228
229         /* number of cfqq currently on this group */
230         int nr_cfqq;
231
232         /*
233          * Per group busy queues average. Useful for workload slice calc. We
234          * create the array for each prio class but at run time it is used
235          * only for RT and BE class and slot for IDLE class remains unused.
236          * This is primarily done to avoid confusion and a gcc warning.
237          */
238         unsigned int busy_queues_avg[CFQ_PRIO_NR];
239         /*
240          * rr lists of queues with requests. We maintain service trees for
241          * RT and BE classes. These trees are subdivided in subclasses
242          * of SYNC, SYNC_NOIDLE and ASYNC based on workload type. For IDLE
243          * class there is no subclassification and all the cfq queues go on
244          * a single tree service_tree_idle.
245          * Counts are embedded in the cfq_rb_root
246          */
247         struct cfq_rb_root service_trees[2][3];
248         struct cfq_rb_root service_tree_idle;
249
250         unsigned long saved_workload_slice;
251         enum wl_type_t saved_workload;
252         enum wl_prio_t saved_serving_prio;
253
254         /* number of requests that are on the dispatch list or inside driver */
255         int dispatched;
256         struct cfq_ttime ttime;
257         struct cfqg_stats stats;
258 };
259
260 struct cfq_io_cq {
261         struct io_cq            icq;            /* must be the first member */
262         struct cfq_queue        *cfqq[2];
263         struct cfq_ttime        ttime;
264         int                     ioprio;         /* the current ioprio */
265 #ifdef CONFIG_CFQ_GROUP_IOSCHED
266         uint64_t                blkcg_id;       /* the current blkcg ID */
267 #endif
268 };
269
270 /*
271  * Per block device queue structure
272  */
273 struct cfq_data {
274         struct request_queue *queue;
275         /* Root service tree for cfq_groups */
276         struct cfq_rb_root grp_service_tree;
277         struct cfq_group *root_group;
278
279         /*
280          * The priority currently being served
281          */
282         enum wl_prio_t serving_prio;
283         enum wl_type_t serving_type;
284         unsigned long workload_expires;
285         struct cfq_group *serving_group;
286
287         /*
288          * Each priority tree is sorted by next_request position.  These
289          * trees are used when determining if two or more queues are
290          * interleaving requests (see cfq_close_cooperator).
291          */
292         struct rb_root prio_trees[CFQ_PRIO_LISTS];
293
294         unsigned int busy_queues;
295         unsigned int busy_sync_queues;
296
297         int rq_in_driver;
298         int rq_in_flight[2];
299
300         /*
301          * queue-depth detection
302          */
303         int rq_queued;
304         int hw_tag;
305         /*
306          * hw_tag can be
307          * -1 => indeterminate, (cfq will behave as if NCQ is present, to allow better detection)
308          *  1 => NCQ is present (hw_tag_est_depth is the estimated max depth)
309          *  0 => no NCQ
310          */
311         int hw_tag_est_depth;
312         unsigned int hw_tag_samples;
313
314         /*
315          * idle window management
316          */
317         struct timer_list idle_slice_timer;
318         struct work_struct unplug_work;
319
320         struct cfq_queue *active_queue;
321         struct cfq_io_cq *active_cic;
322
323         /*
324          * async queue for each priority case
325          */
326         struct cfq_queue *async_cfqq[2][IOPRIO_BE_NR];
327         struct cfq_queue *async_idle_cfqq;
328
329         sector_t last_position;
330
331         /*
332          * tunables, see top of file
333          */
334         unsigned int cfq_quantum;
335         unsigned int cfq_fifo_expire[2];
336         unsigned int cfq_back_penalty;
337         unsigned int cfq_back_max;
338         unsigned int cfq_slice[2];
339         unsigned int cfq_slice_async_rq;
340         unsigned int cfq_slice_idle;
341         unsigned int cfq_group_idle;
342         unsigned int cfq_latency;
343
344         /*
345          * Fallback dummy cfqq for extreme OOM conditions
346          */
347         struct cfq_queue oom_cfqq;
348
349         unsigned long last_delayed_sync;
350 };
351
352 static struct cfq_group *cfq_get_next_cfqg(struct cfq_data *cfqd);
353
354 static struct cfq_rb_root *service_tree_for(struct cfq_group *cfqg,
355                                             enum wl_prio_t prio,
356                                             enum wl_type_t type)
357 {
358         if (!cfqg)
359                 return NULL;
360
361         if (prio == IDLE_WORKLOAD)
362                 return &cfqg->service_tree_idle;
363
364         return &cfqg->service_trees[prio][type];
365 }
366
367 enum cfqq_state_flags {
368         CFQ_CFQQ_FLAG_on_rr = 0,        /* on round-robin busy list */
369         CFQ_CFQQ_FLAG_wait_request,     /* waiting for a request */
370         CFQ_CFQQ_FLAG_must_dispatch,    /* must be allowed a dispatch */
371         CFQ_CFQQ_FLAG_must_alloc_slice, /* per-slice must_alloc flag */
372         CFQ_CFQQ_FLAG_fifo_expire,      /* FIFO checked in this slice */
373         CFQ_CFQQ_FLAG_idle_window,      /* slice idling enabled */
374         CFQ_CFQQ_FLAG_prio_changed,     /* task priority has changed */
375         CFQ_CFQQ_FLAG_slice_new,        /* no requests dispatched in slice */
376         CFQ_CFQQ_FLAG_sync,             /* synchronous queue */
377         CFQ_CFQQ_FLAG_coop,             /* cfqq is shared */
378         CFQ_CFQQ_FLAG_split_coop,       /* shared cfqq will be splitted */
379         CFQ_CFQQ_FLAG_deep,             /* sync cfqq experienced large depth */
380         CFQ_CFQQ_FLAG_wait_busy,        /* Waiting for next request */
381 };
382
383 #define CFQ_CFQQ_FNS(name)                                              \
384 static inline void cfq_mark_cfqq_##name(struct cfq_queue *cfqq)         \
385 {                                                                       \
386         (cfqq)->flags |= (1 << CFQ_CFQQ_FLAG_##name);                   \
387 }                                                                       \
388 static inline void cfq_clear_cfqq_##name(struct cfq_queue *cfqq)        \
389 {                                                                       \
390         (cfqq)->flags &= ~(1 << CFQ_CFQQ_FLAG_##name);                  \
391 }                                                                       \
392 static inline int cfq_cfqq_##name(const struct cfq_queue *cfqq)         \
393 {                                                                       \
394         return ((cfqq)->flags & (1 << CFQ_CFQQ_FLAG_##name)) != 0;      \
395 }
396
397 CFQ_CFQQ_FNS(on_rr);
398 CFQ_CFQQ_FNS(wait_request);
399 CFQ_CFQQ_FNS(must_dispatch);
400 CFQ_CFQQ_FNS(must_alloc_slice);
401 CFQ_CFQQ_FNS(fifo_expire);
402 CFQ_CFQQ_FNS(idle_window);
403 CFQ_CFQQ_FNS(prio_changed);
404 CFQ_CFQQ_FNS(slice_new);
405 CFQ_CFQQ_FNS(sync);
406 CFQ_CFQQ_FNS(coop);
407 CFQ_CFQQ_FNS(split_coop);
408 CFQ_CFQQ_FNS(deep);
409 CFQ_CFQQ_FNS(wait_busy);
410 #undef CFQ_CFQQ_FNS
411
412 #if defined(CONFIG_CFQ_GROUP_IOSCHED) && defined(CONFIG_DEBUG_BLK_CGROUP)
413
414 /* cfqg stats flags */
415 enum cfqg_stats_flags {
416         CFQG_stats_waiting = 0,
417         CFQG_stats_idling,
418         CFQG_stats_empty,
419 };
420
421 #define CFQG_FLAG_FNS(name)                                             \
422 static inline void cfqg_stats_mark_##name(struct cfqg_stats *stats)     \
423 {                                                                       \
424         stats->flags |= (1 << CFQG_stats_##name);                       \
425 }                                                                       \
426 static inline void cfqg_stats_clear_##name(struct cfqg_stats *stats)    \
427 {                                                                       \
428         stats->flags &= ~(1 << CFQG_stats_##name);                      \
429 }                                                                       \
430 static inline int cfqg_stats_##name(struct cfqg_stats *stats)           \
431 {                                                                       \
432         return (stats->flags & (1 << CFQG_stats_##name)) != 0;          \
433 }                                                                       \
434
435 CFQG_FLAG_FNS(waiting)
436 CFQG_FLAG_FNS(idling)
437 CFQG_FLAG_FNS(empty)
438 #undef CFQG_FLAG_FNS
439
440 /* This should be called with the queue_lock held. */
441 static void cfqg_stats_update_group_wait_time(struct cfqg_stats *stats)
442 {
443         unsigned long long now;
444
445         if (!cfqg_stats_waiting(stats))
446                 return;
447
448         now = sched_clock();
449         if (time_after64(now, stats->start_group_wait_time))
450                 blkg_stat_add(&stats->group_wait_time,
451                               now - stats->start_group_wait_time);
452         cfqg_stats_clear_waiting(stats);
453 }
454
455 /* This should be called with the queue_lock held. */
456 static void cfqg_stats_set_start_group_wait_time(struct cfq_group *cfqg,
457                                                  struct cfq_group *curr_cfqg)
458 {
459         struct cfqg_stats *stats = &cfqg->stats;
460
461         if (cfqg_stats_waiting(stats))
462                 return;
463         if (cfqg == curr_cfqg)
464                 return;
465         stats->start_group_wait_time = sched_clock();
466         cfqg_stats_mark_waiting(stats);
467 }
468
469 /* This should be called with the queue_lock held. */
470 static void cfqg_stats_end_empty_time(struct cfqg_stats *stats)
471 {
472         unsigned long long now;
473
474         if (!cfqg_stats_empty(stats))
475                 return;
476
477         now = sched_clock();
478         if (time_after64(now, stats->start_empty_time))
479                 blkg_stat_add(&stats->empty_time,
480                               now - stats->start_empty_time);
481         cfqg_stats_clear_empty(stats);
482 }
483
484 static void cfqg_stats_update_dequeue(struct cfq_group *cfqg)
485 {
486         blkg_stat_add(&cfqg->stats.dequeue, 1);
487 }
488
489 static void cfqg_stats_set_start_empty_time(struct cfq_group *cfqg)
490 {
491         struct cfqg_stats *stats = &cfqg->stats;
492
493         if (blkg_rwstat_sum(&stats->queued))
494                 return;
495
496         /*
497          * group is already marked empty. This can happen if cfqq got new
498          * request in parent group and moved to this group while being added
499          * to service tree. Just ignore the event and move on.
500          */
501         if (cfqg_stats_empty(stats))
502                 return;
503
504         stats->start_empty_time = sched_clock();
505         cfqg_stats_mark_empty(stats);
506 }
507
508 static void cfqg_stats_update_idle_time(struct cfq_group *cfqg)
509 {
510         struct cfqg_stats *stats = &cfqg->stats;
511
512         if (cfqg_stats_idling(stats)) {
513                 unsigned long long now = sched_clock();
514
515                 if (time_after64(now, stats->start_idle_time))
516                         blkg_stat_add(&stats->idle_time,
517                                       now - stats->start_idle_time);
518                 cfqg_stats_clear_idling(stats);
519         }
520 }
521
522 static void cfqg_stats_set_start_idle_time(struct cfq_group *cfqg)
523 {
524         struct cfqg_stats *stats = &cfqg->stats;
525
526         BUG_ON(cfqg_stats_idling(stats));
527
528         stats->start_idle_time = sched_clock();
529         cfqg_stats_mark_idling(stats);
530 }
531
532 static void cfqg_stats_update_avg_queue_size(struct cfq_group *cfqg)
533 {
534         struct cfqg_stats *stats = &cfqg->stats;
535
536         blkg_stat_add(&stats->avg_queue_size_sum,
537                       blkg_rwstat_sum(&stats->queued));
538         blkg_stat_add(&stats->avg_queue_size_samples, 1);
539         cfqg_stats_update_group_wait_time(stats);
540 }
541
542 #else   /* CONFIG_CFQ_GROUP_IOSCHED && CONFIG_DEBUG_BLK_CGROUP */
543
544 static void cfqg_stats_set_start_group_wait_time(struct cfq_group *cfqg,
545                                                  struct cfq_group *curr_cfqg) { }
546 static void cfqg_stats_end_empty_time(struct cfqg_stats *stats) { }
547 static void cfqg_stats_update_dequeue(struct cfq_group *cfqg) { }
548 static void cfqg_stats_set_start_empty_time(struct cfq_group *cfqg) { }
549 static void cfqg_stats_update_idle_time(struct cfq_group *cfqg) { }
550 static void cfqg_stats_set_start_idle_time(struct cfq_group *cfqg) { }
551 static void cfqg_stats_update_avg_queue_size(struct cfq_group *cfqg) { }
552
553 #endif  /* CONFIG_CFQ_GROUP_IOSCHED && CONFIG_DEBUG_BLK_CGROUP */
554
555 #ifdef CONFIG_CFQ_GROUP_IOSCHED
556
557 static inline struct cfq_group *blkg_to_cfqg(struct blkio_group *blkg)
558 {
559         return blkg_to_pdata(blkg, &blkio_policy_cfq);
560 }
561
562 static inline struct blkio_group *cfqg_to_blkg(struct cfq_group *cfqg)
563 {
564         return pdata_to_blkg(cfqg);
565 }
566
567 static inline void cfqg_get(struct cfq_group *cfqg)
568 {
569         return blkg_get(cfqg_to_blkg(cfqg));
570 }
571
572 static inline void cfqg_put(struct cfq_group *cfqg)
573 {
574         return blkg_put(cfqg_to_blkg(cfqg));
575 }
576
577 #define cfq_log_cfqq(cfqd, cfqq, fmt, args...)  \
578         blk_add_trace_msg((cfqd)->queue, "cfq%d%c %s " fmt, (cfqq)->pid, \
579                         cfq_cfqq_sync((cfqq)) ? 'S' : 'A', \
580                         blkg_path(cfqg_to_blkg((cfqq)->cfqg)), ##args)
581
582 #define cfq_log_cfqg(cfqd, cfqg, fmt, args...)                          \
583         blk_add_trace_msg((cfqd)->queue, "%s " fmt,                     \
584                         blkg_path(cfqg_to_blkg((cfqg))), ##args)        \
585
586 static inline void cfqg_stats_update_io_add(struct cfq_group *cfqg,
587                                             struct cfq_group *curr_cfqg, int rw)
588 {
589         blkg_rwstat_add(&cfqg->stats.queued, rw, 1);
590         cfqg_stats_end_empty_time(&cfqg->stats);
591         cfqg_stats_set_start_group_wait_time(cfqg, curr_cfqg);
592 }
593
594 static inline void cfqg_stats_update_timeslice_used(struct cfq_group *cfqg,
595                         unsigned long time, unsigned long unaccounted_time)
596 {
597         blkg_stat_add(&cfqg->stats.time, time);
598 #ifdef CONFIG_DEBUG_BLK_CGROUP
599         blkg_stat_add(&cfqg->stats.unaccounted_time, unaccounted_time);
600 #endif
601 }
602
603 static inline void cfqg_stats_update_io_remove(struct cfq_group *cfqg, int rw)
604 {
605         blkg_rwstat_add(&cfqg->stats.queued, rw, -1);
606 }
607
608 static inline void cfqg_stats_update_io_merged(struct cfq_group *cfqg, int rw)
609 {
610         blkg_rwstat_add(&cfqg->stats.merged, rw, 1);
611 }
612
613 static inline void cfqg_stats_update_dispatch(struct cfq_group *cfqg,
614                                               uint64_t bytes, int rw)
615 {
616         blkg_stat_add(&cfqg->stats.sectors, bytes >> 9);
617         blkg_rwstat_add(&cfqg->stats.serviced, rw, 1);
618         blkg_rwstat_add(&cfqg->stats.service_bytes, rw, bytes);
619 }
620
621 static inline void cfqg_stats_update_completion(struct cfq_group *cfqg,
622                         uint64_t start_time, uint64_t io_start_time, int rw)
623 {
624         struct cfqg_stats *stats = &cfqg->stats;
625         unsigned long long now = sched_clock();
626
627         if (time_after64(now, io_start_time))
628                 blkg_rwstat_add(&stats->service_time, rw, now - io_start_time);
629         if (time_after64(io_start_time, start_time))
630                 blkg_rwstat_add(&stats->wait_time, rw,
631                                 io_start_time - start_time);
632 }
633
634 static void cfqg_stats_reset(struct blkio_group *blkg)
635 {
636         struct cfq_group *cfqg = blkg_to_cfqg(blkg);
637         struct cfqg_stats *stats = &cfqg->stats;
638
639         /* queued stats shouldn't be cleared */
640         blkg_rwstat_reset(&stats->service_bytes);
641         blkg_rwstat_reset(&stats->serviced);
642         blkg_rwstat_reset(&stats->merged);
643         blkg_rwstat_reset(&stats->service_time);
644         blkg_rwstat_reset(&stats->wait_time);
645         blkg_stat_reset(&stats->time);
646 #ifdef CONFIG_DEBUG_BLK_CGROUP
647         blkg_stat_reset(&stats->unaccounted_time);
648         blkg_stat_reset(&stats->avg_queue_size_sum);
649         blkg_stat_reset(&stats->avg_queue_size_samples);
650         blkg_stat_reset(&stats->dequeue);
651         blkg_stat_reset(&stats->group_wait_time);
652         blkg_stat_reset(&stats->idle_time);
653         blkg_stat_reset(&stats->empty_time);
654 #endif
655 }
656
657 #else   /* CONFIG_CFQ_GROUP_IOSCHED */
658
659 static inline struct cfq_group *blkg_to_cfqg(struct blkio_group *blkg) { return NULL; }
660 static inline struct blkio_group *cfqg_to_blkg(struct cfq_group *cfqg) { return NULL; }
661 static inline void cfqg_get(struct cfq_group *cfqg) { }
662 static inline void cfqg_put(struct cfq_group *cfqg) { }
663
664 #define cfq_log_cfqq(cfqd, cfqq, fmt, args...)  \
665         blk_add_trace_msg((cfqd)->queue, "cfq%d " fmt, (cfqq)->pid, ##args)
666 #define cfq_log_cfqg(cfqd, cfqg, fmt, args...)          do {} while (0)
667
668 static inline void cfqg_stats_update_io_add(struct cfq_group *cfqg,
669                         struct cfq_group *curr_cfqg, int rw) { }
670 static inline void cfqg_stats_update_timeslice_used(struct cfq_group *cfqg,
671                         unsigned long time, unsigned long unaccounted_time) { }
672 static inline void cfqg_stats_update_io_remove(struct cfq_group *cfqg, int rw) { }
673 static inline void cfqg_stats_update_io_merged(struct cfq_group *cfqg, int rw) { }
674 static inline void cfqg_stats_update_dispatch(struct cfq_group *cfqg,
675                                               uint64_t bytes, int rw) { }
676 static inline void cfqg_stats_update_completion(struct cfq_group *cfqg,
677                         uint64_t start_time, uint64_t io_start_time, int rw) { }
678
679 #endif  /* CONFIG_CFQ_GROUP_IOSCHED */
680
681 #define cfq_log(cfqd, fmt, args...)     \
682         blk_add_trace_msg((cfqd)->queue, "cfq " fmt, ##args)
683
684 /* Traverses through cfq group service trees */
685 #define for_each_cfqg_st(cfqg, i, j, st) \
686         for (i = 0; i <= IDLE_WORKLOAD; i++) \
687                 for (j = 0, st = i < IDLE_WORKLOAD ? &cfqg->service_trees[i][j]\
688                         : &cfqg->service_tree_idle; \
689                         (i < IDLE_WORKLOAD && j <= SYNC_WORKLOAD) || \
690                         (i == IDLE_WORKLOAD && j == 0); \
691                         j++, st = i < IDLE_WORKLOAD ? \
692                         &cfqg->service_trees[i][j]: NULL) \
693
694 static inline bool cfq_io_thinktime_big(struct cfq_data *cfqd,
695         struct cfq_ttime *ttime, bool group_idle)
696 {
697         unsigned long slice;
698         if (!sample_valid(ttime->ttime_samples))
699                 return false;
700         if (group_idle)
701                 slice = cfqd->cfq_group_idle;
702         else
703                 slice = cfqd->cfq_slice_idle;
704         return ttime->ttime_mean > slice;
705 }
706
707 static inline bool iops_mode(struct cfq_data *cfqd)
708 {
709         /*
710          * If we are not idling on queues and it is a NCQ drive, parallel
711          * execution of requests is on and measuring time is not possible
712          * in most of the cases until and unless we drive shallower queue
713          * depths and that becomes a performance bottleneck. In such cases
714          * switch to start providing fairness in terms of number of IOs.
715          */
716         if (!cfqd->cfq_slice_idle && cfqd->hw_tag)
717                 return true;
718         else
719                 return false;
720 }
721
722 static inline enum wl_prio_t cfqq_prio(struct cfq_queue *cfqq)
723 {
724         if (cfq_class_idle(cfqq))
725                 return IDLE_WORKLOAD;
726         if (cfq_class_rt(cfqq))
727                 return RT_WORKLOAD;
728         return BE_WORKLOAD;
729 }
730
731
732 static enum wl_type_t cfqq_type(struct cfq_queue *cfqq)
733 {
734         if (!cfq_cfqq_sync(cfqq))
735                 return ASYNC_WORKLOAD;
736         if (!cfq_cfqq_idle_window(cfqq))
737                 return SYNC_NOIDLE_WORKLOAD;
738         return SYNC_WORKLOAD;
739 }
740
741 static inline int cfq_group_busy_queues_wl(enum wl_prio_t wl,
742                                         struct cfq_data *cfqd,
743                                         struct cfq_group *cfqg)
744 {
745         if (wl == IDLE_WORKLOAD)
746                 return cfqg->service_tree_idle.count;
747
748         return cfqg->service_trees[wl][ASYNC_WORKLOAD].count
749                 + cfqg->service_trees[wl][SYNC_NOIDLE_WORKLOAD].count
750                 + cfqg->service_trees[wl][SYNC_WORKLOAD].count;
751 }
752
753 static inline int cfqg_busy_async_queues(struct cfq_data *cfqd,
754                                         struct cfq_group *cfqg)
755 {
756         return cfqg->service_trees[RT_WORKLOAD][ASYNC_WORKLOAD].count
757                 + cfqg->service_trees[BE_WORKLOAD][ASYNC_WORKLOAD].count;
758 }
759
760 static void cfq_dispatch_insert(struct request_queue *, struct request *);
761 static struct cfq_queue *cfq_get_queue(struct cfq_data *cfqd, bool is_sync,
762                                        struct cfq_io_cq *cic, struct bio *bio,
763                                        gfp_t gfp_mask);
764
765 static inline struct cfq_io_cq *icq_to_cic(struct io_cq *icq)
766 {
767         /* cic->icq is the first member, %NULL will convert to %NULL */
768         return container_of(icq, struct cfq_io_cq, icq);
769 }
770
771 static inline struct cfq_io_cq *cfq_cic_lookup(struct cfq_data *cfqd,
772                                                struct io_context *ioc)
773 {
774         if (ioc)
775                 return icq_to_cic(ioc_lookup_icq(ioc, cfqd->queue));
776         return NULL;
777 }
778
779 static inline struct cfq_queue *cic_to_cfqq(struct cfq_io_cq *cic, bool is_sync)
780 {
781         return cic->cfqq[is_sync];
782 }
783
784 static inline void cic_set_cfqq(struct cfq_io_cq *cic, struct cfq_queue *cfqq,
785                                 bool is_sync)
786 {
787         cic->cfqq[is_sync] = cfqq;
788 }
789
790 static inline struct cfq_data *cic_to_cfqd(struct cfq_io_cq *cic)
791 {
792         return cic->icq.q->elevator->elevator_data;
793 }
794
795 /*
796  * We regard a request as SYNC, if it's either a read or has the SYNC bit
797  * set (in which case it could also be direct WRITE).
798  */
799 static inline bool cfq_bio_sync(struct bio *bio)
800 {
801         return bio_data_dir(bio) == READ || (bio->bi_rw & REQ_SYNC);
802 }
803
804 /*
805  * scheduler run of queue, if there are requests pending and no one in the
806  * driver that will restart queueing
807  */
808 static inline void cfq_schedule_dispatch(struct cfq_data *cfqd)
809 {
810         if (cfqd->busy_queues) {
811                 cfq_log(cfqd, "schedule dispatch");
812                 kblockd_schedule_work(cfqd->queue, &cfqd->unplug_work);
813         }
814 }
815
816 /*
817  * Scale schedule slice based on io priority. Use the sync time slice only
818  * if a queue is marked sync and has sync io queued. A sync queue with async
819  * io only, should not get full sync slice length.
820  */
821 static inline int cfq_prio_slice(struct cfq_data *cfqd, bool sync,
822                                  unsigned short prio)
823 {
824         const int base_slice = cfqd->cfq_slice[sync];
825
826         WARN_ON(prio >= IOPRIO_BE_NR);
827
828         return base_slice + (base_slice/CFQ_SLICE_SCALE * (4 - prio));
829 }
830
831 static inline int
832 cfq_prio_to_slice(struct cfq_data *cfqd, struct cfq_queue *cfqq)
833 {
834         return cfq_prio_slice(cfqd, cfq_cfqq_sync(cfqq), cfqq->ioprio);
835 }
836
837 static inline u64 cfq_scale_slice(unsigned long delta, struct cfq_group *cfqg)
838 {
839         u64 d = delta << CFQ_SERVICE_SHIFT;
840
841         d = d * CFQ_WEIGHT_DEFAULT;
842         do_div(d, cfqg->weight);
843         return d;
844 }
845
846 static inline u64 max_vdisktime(u64 min_vdisktime, u64 vdisktime)
847 {
848         s64 delta = (s64)(vdisktime - min_vdisktime);
849         if (delta > 0)
850                 min_vdisktime = vdisktime;
851
852         return min_vdisktime;
853 }
854
855 static inline u64 min_vdisktime(u64 min_vdisktime, u64 vdisktime)
856 {
857         s64 delta = (s64)(vdisktime - min_vdisktime);
858         if (delta < 0)
859                 min_vdisktime = vdisktime;
860
861         return min_vdisktime;
862 }
863
864 static void update_min_vdisktime(struct cfq_rb_root *st)
865 {
866         struct cfq_group *cfqg;
867
868         if (st->left) {
869                 cfqg = rb_entry_cfqg(st->left);
870                 st->min_vdisktime = max_vdisktime(st->min_vdisktime,
871                                                   cfqg->vdisktime);
872         }
873 }
874
875 /*
876  * get averaged number of queues of RT/BE priority.
877  * average is updated, with a formula that gives more weight to higher numbers,
878  * to quickly follows sudden increases and decrease slowly
879  */
880
881 static inline unsigned cfq_group_get_avg_queues(struct cfq_data *cfqd,
882                                         struct cfq_group *cfqg, bool rt)
883 {
884         unsigned min_q, max_q;
885         unsigned mult  = cfq_hist_divisor - 1;
886         unsigned round = cfq_hist_divisor / 2;
887         unsigned busy = cfq_group_busy_queues_wl(rt, cfqd, cfqg);
888
889         min_q = min(cfqg->busy_queues_avg[rt], busy);
890         max_q = max(cfqg->busy_queues_avg[rt], busy);
891         cfqg->busy_queues_avg[rt] = (mult * max_q + min_q + round) /
892                 cfq_hist_divisor;
893         return cfqg->busy_queues_avg[rt];
894 }
895
896 static inline unsigned
897 cfq_group_slice(struct cfq_data *cfqd, struct cfq_group *cfqg)
898 {
899         struct cfq_rb_root *st = &cfqd->grp_service_tree;
900
901         return cfq_target_latency * cfqg->weight / st->total_weight;
902 }
903
904 static inline unsigned
905 cfq_scaled_cfqq_slice(struct cfq_data *cfqd, struct cfq_queue *cfqq)
906 {
907         unsigned slice = cfq_prio_to_slice(cfqd, cfqq);
908         if (cfqd->cfq_latency) {
909                 /*
910                  * interested queues (we consider only the ones with the same
911                  * priority class in the cfq group)
912                  */
913                 unsigned iq = cfq_group_get_avg_queues(cfqd, cfqq->cfqg,
914                                                 cfq_class_rt(cfqq));
915                 unsigned sync_slice = cfqd->cfq_slice[1];
916                 unsigned expect_latency = sync_slice * iq;
917                 unsigned group_slice = cfq_group_slice(cfqd, cfqq->cfqg);
918
919                 if (expect_latency > group_slice) {
920                         unsigned base_low_slice = 2 * cfqd->cfq_slice_idle;
921                         /* scale low_slice according to IO priority
922                          * and sync vs async */
923                         unsigned low_slice =
924                                 min(slice, base_low_slice * slice / sync_slice);
925                         /* the adapted slice value is scaled to fit all iqs
926                          * into the target latency */
927                         slice = max(slice * group_slice / expect_latency,
928                                     low_slice);
929                 }
930         }
931         return slice;
932 }
933
934 static inline void
935 cfq_set_prio_slice(struct cfq_data *cfqd, struct cfq_queue *cfqq)
936 {
937         unsigned slice = cfq_scaled_cfqq_slice(cfqd, cfqq);
938
939         cfqq->slice_start = jiffies;
940         cfqq->slice_end = jiffies + slice;
941         cfqq->allocated_slice = slice;
942         cfq_log_cfqq(cfqd, cfqq, "set_slice=%lu", cfqq->slice_end - jiffies);
943 }
944
945 /*
946  * We need to wrap this check in cfq_cfqq_slice_new(), since ->slice_end
947  * isn't valid until the first request from the dispatch is activated
948  * and the slice time set.
949  */
950 static inline bool cfq_slice_used(struct cfq_queue *cfqq)
951 {
952         if (cfq_cfqq_slice_new(cfqq))
953                 return false;
954         if (time_before(jiffies, cfqq->slice_end))
955                 return false;
956
957         return true;
958 }
959
960 /*
961  * Lifted from AS - choose which of rq1 and rq2 that is best served now.
962  * We choose the request that is closest to the head right now. Distance
963  * behind the head is penalized and only allowed to a certain extent.
964  */
965 static struct request *
966 cfq_choose_req(struct cfq_data *cfqd, struct request *rq1, struct request *rq2, sector_t last)
967 {
968         sector_t s1, s2, d1 = 0, d2 = 0;
969         unsigned long back_max;
970 #define CFQ_RQ1_WRAP    0x01 /* request 1 wraps */
971 #define CFQ_RQ2_WRAP    0x02 /* request 2 wraps */
972         unsigned wrap = 0; /* bit mask: requests behind the disk head? */
973
974         if (rq1 == NULL || rq1 == rq2)
975                 return rq2;
976         if (rq2 == NULL)
977                 return rq1;
978
979         if (rq_is_sync(rq1) != rq_is_sync(rq2))
980                 return rq_is_sync(rq1) ? rq1 : rq2;
981
982         if ((rq1->cmd_flags ^ rq2->cmd_flags) & REQ_PRIO)
983                 return rq1->cmd_flags & REQ_PRIO ? rq1 : rq2;
984
985         s1 = blk_rq_pos(rq1);
986         s2 = blk_rq_pos(rq2);
987
988         /*
989          * by definition, 1KiB is 2 sectors
990          */
991         back_max = cfqd->cfq_back_max * 2;
992
993         /*
994          * Strict one way elevator _except_ in the case where we allow
995          * short backward seeks which are biased as twice the cost of a
996          * similar forward seek.
997          */
998         if (s1 >= last)
999                 d1 = s1 - last;
1000         else if (s1 + back_max >= last)
1001                 d1 = (last - s1) * cfqd->cfq_back_penalty;
1002         else
1003                 wrap |= CFQ_RQ1_WRAP;
1004
1005         if (s2 >= last)
1006                 d2 = s2 - last;
1007         else if (s2 + back_max >= last)
1008                 d2 = (last - s2) * cfqd->cfq_back_penalty;
1009         else
1010                 wrap |= CFQ_RQ2_WRAP;
1011
1012         /* Found required data */
1013
1014         /*
1015          * By doing switch() on the bit mask "wrap" we avoid having to
1016          * check two variables for all permutations: --> faster!
1017          */
1018         switch (wrap) {
1019         case 0: /* common case for CFQ: rq1 and rq2 not wrapped */
1020                 if (d1 < d2)
1021                         return rq1;
1022                 else if (d2 < d1)
1023                         return rq2;
1024                 else {
1025                         if (s1 >= s2)
1026                                 return rq1;
1027                         else
1028                                 return rq2;
1029                 }
1030
1031         case CFQ_RQ2_WRAP:
1032                 return rq1;
1033         case CFQ_RQ1_WRAP:
1034                 return rq2;
1035         case (CFQ_RQ1_WRAP|CFQ_RQ2_WRAP): /* both rqs wrapped */
1036         default:
1037                 /*
1038                  * Since both rqs are wrapped,
1039                  * start with the one that's further behind head
1040                  * (--> only *one* back seek required),
1041                  * since back seek takes more time than forward.
1042                  */
1043                 if (s1 <= s2)
1044                         return rq1;
1045                 else
1046                         return rq2;
1047         }
1048 }
1049
1050 /*
1051  * The below is leftmost cache rbtree addon
1052  */
1053 static struct cfq_queue *cfq_rb_first(struct cfq_rb_root *root)
1054 {
1055         /* Service tree is empty */
1056         if (!root->count)
1057                 return NULL;
1058
1059         if (!root->left)
1060                 root->left = rb_first(&root->rb);
1061
1062         if (root->left)
1063                 return rb_entry(root->left, struct cfq_queue, rb_node);
1064
1065         return NULL;
1066 }
1067
1068 static struct cfq_group *cfq_rb_first_group(struct cfq_rb_root *root)
1069 {
1070         if (!root->left)
1071                 root->left = rb_first(&root->rb);
1072
1073         if (root->left)
1074                 return rb_entry_cfqg(root->left);
1075
1076         return NULL;
1077 }
1078
1079 static void rb_erase_init(struct rb_node *n, struct rb_root *root)
1080 {
1081         rb_erase(n, root);
1082         RB_CLEAR_NODE(n);
1083 }
1084
1085 static void cfq_rb_erase(struct rb_node *n, struct cfq_rb_root *root)
1086 {
1087         if (root->left == n)
1088                 root->left = NULL;
1089         rb_erase_init(n, &root->rb);
1090         --root->count;
1091 }
1092
1093 /*
1094  * would be nice to take fifo expire time into account as well
1095  */
1096 static struct request *
1097 cfq_find_next_rq(struct cfq_data *cfqd, struct cfq_queue *cfqq,
1098                   struct request *last)
1099 {
1100         struct rb_node *rbnext = rb_next(&last->rb_node);
1101         struct rb_node *rbprev = rb_prev(&last->rb_node);
1102         struct request *next = NULL, *prev = NULL;
1103
1104         BUG_ON(RB_EMPTY_NODE(&last->rb_node));
1105
1106         if (rbprev)
1107                 prev = rb_entry_rq(rbprev);
1108
1109         if (rbnext)
1110                 next = rb_entry_rq(rbnext);
1111         else {
1112                 rbnext = rb_first(&cfqq->sort_list);
1113                 if (rbnext && rbnext != &last->rb_node)
1114                         next = rb_entry_rq(rbnext);
1115         }
1116
1117         return cfq_choose_req(cfqd, next, prev, blk_rq_pos(last));
1118 }
1119
1120 static unsigned long cfq_slice_offset(struct cfq_data *cfqd,
1121                                       struct cfq_queue *cfqq)
1122 {
1123         /*
1124          * just an approximation, should be ok.
1125          */
1126         return (cfqq->cfqg->nr_cfqq - 1) * (cfq_prio_slice(cfqd, 1, 0) -
1127                        cfq_prio_slice(cfqd, cfq_cfqq_sync(cfqq), cfqq->ioprio));
1128 }
1129
1130 static inline s64
1131 cfqg_key(struct cfq_rb_root *st, struct cfq_group *cfqg)
1132 {
1133         return cfqg->vdisktime - st->min_vdisktime;
1134 }
1135
1136 static void
1137 __cfq_group_service_tree_add(struct cfq_rb_root *st, struct cfq_group *cfqg)
1138 {
1139         struct rb_node **node = &st->rb.rb_node;
1140         struct rb_node *parent = NULL;
1141         struct cfq_group *__cfqg;
1142         s64 key = cfqg_key(st, cfqg);
1143         int left = 1;
1144
1145         while (*node != NULL) {
1146                 parent = *node;
1147                 __cfqg = rb_entry_cfqg(parent);
1148
1149                 if (key < cfqg_key(st, __cfqg))
1150                         node = &parent->rb_left;
1151                 else {
1152                         node = &parent->rb_right;
1153                         left = 0;
1154                 }
1155         }
1156
1157         if (left)
1158                 st->left = &cfqg->rb_node;
1159
1160         rb_link_node(&cfqg->rb_node, parent, node);
1161         rb_insert_color(&cfqg->rb_node, &st->rb);
1162 }
1163
1164 static void
1165 cfq_update_group_weight(struct cfq_group *cfqg)
1166 {
1167         BUG_ON(!RB_EMPTY_NODE(&cfqg->rb_node));
1168         if (cfqg->new_weight) {
1169                 cfqg->weight = cfqg->new_weight;
1170                 cfqg->new_weight = 0;
1171         }
1172 }
1173
1174 static void
1175 cfq_group_service_tree_add(struct cfq_rb_root *st, struct cfq_group *cfqg)
1176 {
1177         BUG_ON(!RB_EMPTY_NODE(&cfqg->rb_node));
1178
1179         cfq_update_group_weight(cfqg);
1180         __cfq_group_service_tree_add(st, cfqg);
1181         st->total_weight += cfqg->weight;
1182 }
1183
1184 static void
1185 cfq_group_notify_queue_add(struct cfq_data *cfqd, struct cfq_group *cfqg)
1186 {
1187         struct cfq_rb_root *st = &cfqd->grp_service_tree;
1188         struct cfq_group *__cfqg;
1189         struct rb_node *n;
1190
1191         cfqg->nr_cfqq++;
1192         if (!RB_EMPTY_NODE(&cfqg->rb_node))
1193                 return;
1194
1195         /*
1196          * Currently put the group at the end. Later implement something
1197          * so that groups get lesser vtime based on their weights, so that
1198          * if group does not loose all if it was not continuously backlogged.
1199          */
1200         n = rb_last(&st->rb);
1201         if (n) {
1202                 __cfqg = rb_entry_cfqg(n);
1203                 cfqg->vdisktime = __cfqg->vdisktime + CFQ_IDLE_DELAY;
1204         } else
1205                 cfqg->vdisktime = st->min_vdisktime;
1206         cfq_group_service_tree_add(st, cfqg);
1207 }
1208
1209 static void
1210 cfq_group_service_tree_del(struct cfq_rb_root *st, struct cfq_group *cfqg)
1211 {
1212         st->total_weight -= cfqg->weight;
1213         if (!RB_EMPTY_NODE(&cfqg->rb_node))
1214                 cfq_rb_erase(&cfqg->rb_node, st);
1215 }
1216
1217 static void
1218 cfq_group_notify_queue_del(struct cfq_data *cfqd, struct cfq_group *cfqg)
1219 {
1220         struct cfq_rb_root *st = &cfqd->grp_service_tree;
1221
1222         BUG_ON(cfqg->nr_cfqq < 1);
1223         cfqg->nr_cfqq--;
1224
1225         /* If there are other cfq queues under this group, don't delete it */
1226         if (cfqg->nr_cfqq)
1227                 return;
1228
1229         cfq_log_cfqg(cfqd, cfqg, "del_from_rr group");
1230         cfq_group_service_tree_del(st, cfqg);
1231         cfqg->saved_workload_slice = 0;
1232         cfqg_stats_update_dequeue(cfqg);
1233 }
1234
1235 static inline unsigned int cfq_cfqq_slice_usage(struct cfq_queue *cfqq,
1236                                                 unsigned int *unaccounted_time)
1237 {
1238         unsigned int slice_used;
1239
1240         /*
1241          * Queue got expired before even a single request completed or
1242          * got expired immediately after first request completion.
1243          */
1244         if (!cfqq->slice_start || cfqq->slice_start == jiffies) {
1245                 /*
1246                  * Also charge the seek time incurred to the group, otherwise
1247                  * if there are mutiple queues in the group, each can dispatch
1248                  * a single request on seeky media and cause lots of seek time
1249                  * and group will never know it.
1250                  */
1251                 slice_used = max_t(unsigned, (jiffies - cfqq->dispatch_start),
1252                                         1);
1253         } else {
1254                 slice_used = jiffies - cfqq->slice_start;
1255                 if (slice_used > cfqq->allocated_slice) {
1256                         *unaccounted_time = slice_used - cfqq->allocated_slice;
1257                         slice_used = cfqq->allocated_slice;
1258                 }
1259                 if (time_after(cfqq->slice_start, cfqq->dispatch_start))
1260                         *unaccounted_time += cfqq->slice_start -
1261                                         cfqq->dispatch_start;
1262         }
1263
1264         return slice_used;
1265 }
1266
1267 static void cfq_group_served(struct cfq_data *cfqd, struct cfq_group *cfqg,
1268                                 struct cfq_queue *cfqq)
1269 {
1270         struct cfq_rb_root *st = &cfqd->grp_service_tree;
1271         unsigned int used_sl, charge, unaccounted_sl = 0;
1272         int nr_sync = cfqg->nr_cfqq - cfqg_busy_async_queues(cfqd, cfqg)
1273                         - cfqg->service_tree_idle.count;
1274
1275         BUG_ON(nr_sync < 0);
1276         used_sl = charge = cfq_cfqq_slice_usage(cfqq, &unaccounted_sl);
1277
1278         if (iops_mode(cfqd))
1279                 charge = cfqq->slice_dispatch;
1280         else if (!cfq_cfqq_sync(cfqq) && !nr_sync)
1281                 charge = cfqq->allocated_slice;
1282
1283         /* Can't update vdisktime while group is on service tree */
1284         cfq_group_service_tree_del(st, cfqg);
1285         cfqg->vdisktime += cfq_scale_slice(charge, cfqg);
1286         /* If a new weight was requested, update now, off tree */
1287         cfq_group_service_tree_add(st, cfqg);
1288
1289         /* This group is being expired. Save the context */
1290         if (time_after(cfqd->workload_expires, jiffies)) {
1291                 cfqg->saved_workload_slice = cfqd->workload_expires
1292                                                 - jiffies;
1293                 cfqg->saved_workload = cfqd->serving_type;
1294                 cfqg->saved_serving_prio = cfqd->serving_prio;
1295         } else
1296                 cfqg->saved_workload_slice = 0;
1297
1298         cfq_log_cfqg(cfqd, cfqg, "served: vt=%llu min_vt=%llu", cfqg->vdisktime,
1299                                         st->min_vdisktime);
1300         cfq_log_cfqq(cfqq->cfqd, cfqq,
1301                      "sl_used=%u disp=%u charge=%u iops=%u sect=%lu",
1302                      used_sl, cfqq->slice_dispatch, charge,
1303                      iops_mode(cfqd), cfqq->nr_sectors);
1304         cfqg_stats_update_timeslice_used(cfqg, used_sl, unaccounted_sl);
1305         cfqg_stats_set_start_empty_time(cfqg);
1306 }
1307
1308 /**
1309  * cfq_init_cfqg_base - initialize base part of a cfq_group
1310  * @cfqg: cfq_group to initialize
1311  *
1312  * Initialize the base part which is used whether %CONFIG_CFQ_GROUP_IOSCHED
1313  * is enabled or not.
1314  */
1315 static void cfq_init_cfqg_base(struct cfq_group *cfqg)
1316 {
1317         struct cfq_rb_root *st;
1318         int i, j;
1319
1320         for_each_cfqg_st(cfqg, i, j, st)
1321                 *st = CFQ_RB_ROOT;
1322         RB_CLEAR_NODE(&cfqg->rb_node);
1323
1324         cfqg->ttime.last_end_request = jiffies;
1325 }
1326
1327 #ifdef CONFIG_CFQ_GROUP_IOSCHED
1328 static void cfq_init_blkio_group(struct blkio_group *blkg)
1329 {
1330         struct cfq_group *cfqg = blkg_to_cfqg(blkg);
1331
1332         cfq_init_cfqg_base(cfqg);
1333         cfqg->weight = blkg->blkcg->cfq_weight;
1334 }
1335
1336 /*
1337  * Search for the cfq group current task belongs to. request_queue lock must
1338  * be held.
1339  */
1340 static struct cfq_group *cfq_lookup_create_cfqg(struct cfq_data *cfqd,
1341                                                 struct blkio_cgroup *blkcg)
1342 {
1343         struct request_queue *q = cfqd->queue;
1344         struct cfq_group *cfqg = NULL;
1345
1346         /* avoid lookup for the common case where there's no blkio cgroup */
1347         if (blkcg == &blkio_root_cgroup) {
1348                 cfqg = cfqd->root_group;
1349         } else {
1350                 struct blkio_group *blkg;
1351
1352                 blkg = blkg_lookup_create(blkcg, q, false);
1353                 if (!IS_ERR(blkg))
1354                         cfqg = blkg_to_cfqg(blkg);
1355         }
1356
1357         return cfqg;
1358 }
1359
1360 static void cfq_link_cfqq_cfqg(struct cfq_queue *cfqq, struct cfq_group *cfqg)
1361 {
1362         /* Currently, all async queues are mapped to root group */
1363         if (!cfq_cfqq_sync(cfqq))
1364                 cfqg = cfqq->cfqd->root_group;
1365
1366         cfqq->cfqg = cfqg;
1367         /* cfqq reference on cfqg */
1368         cfqg_get(cfqg);
1369 }
1370
1371 static u64 cfqg_prfill_weight_device(struct seq_file *sf,
1372                                      struct blkg_policy_data *pd, int off)
1373 {
1374         struct cfq_group *cfqg = (void *)pd->pdata;
1375
1376         if (!cfqg->dev_weight)
1377                 return 0;
1378         return __blkg_prfill_u64(sf, pd, cfqg->dev_weight);
1379 }
1380
1381 static int cfqg_print_weight_device(struct cgroup *cgrp, struct cftype *cft,
1382                                     struct seq_file *sf)
1383 {
1384         blkcg_print_blkgs(sf, cgroup_to_blkio_cgroup(cgrp),
1385                           cfqg_prfill_weight_device, BLKIO_POLICY_PROP, 0,
1386                           false);
1387         return 0;
1388 }
1389
1390 static int cfq_print_weight(struct cgroup *cgrp, struct cftype *cft,
1391                             struct seq_file *sf)
1392 {
1393         seq_printf(sf, "%u\n", cgroup_to_blkio_cgroup(cgrp)->cfq_weight);
1394         return 0;
1395 }
1396
1397 static int cfqg_set_weight_device(struct cgroup *cgrp, struct cftype *cft,
1398                                   const char *buf)
1399 {
1400         struct blkio_cgroup *blkcg = cgroup_to_blkio_cgroup(cgrp);
1401         struct blkg_conf_ctx ctx;
1402         struct cfq_group *cfqg;
1403         int ret;
1404
1405         ret = blkg_conf_prep(blkcg, buf, &ctx);
1406         if (ret)
1407                 return ret;
1408
1409         ret = -EINVAL;
1410         cfqg = blkg_to_cfqg(ctx.blkg);
1411         if (cfqg && (!ctx.v || (ctx.v >= CFQ_WEIGHT_MIN &&
1412                                 ctx.v <= CFQ_WEIGHT_MAX))) {
1413                 cfqg->dev_weight = ctx.v;
1414                 cfqg->new_weight = cfqg->dev_weight ?: blkcg->cfq_weight;
1415                 ret = 0;
1416         }
1417
1418         blkg_conf_finish(&ctx);
1419         return ret;
1420 }
1421
1422 static int cfq_set_weight(struct cgroup *cgrp, struct cftype *cft, u64 val)
1423 {
1424         struct blkio_cgroup *blkcg = cgroup_to_blkio_cgroup(cgrp);
1425         struct blkio_group *blkg;
1426         struct hlist_node *n;
1427
1428         if (val < CFQ_WEIGHT_MIN || val > CFQ_WEIGHT_MAX)
1429                 return -EINVAL;
1430
1431         spin_lock_irq(&blkcg->lock);
1432         blkcg->cfq_weight = (unsigned int)val;
1433
1434         hlist_for_each_entry(blkg, n, &blkcg->blkg_list, blkcg_node) {
1435                 struct cfq_group *cfqg = blkg_to_cfqg(blkg);
1436
1437                 if (cfqg && !cfqg->dev_weight)
1438                         cfqg->new_weight = blkcg->cfq_weight;
1439         }
1440
1441         spin_unlock_irq(&blkcg->lock);
1442         return 0;
1443 }
1444
1445 #ifdef CONFIG_DEBUG_BLK_CGROUP
1446 static u64 cfqg_prfill_avg_queue_size(struct seq_file *sf,
1447                                       struct blkg_policy_data *pd, int off)
1448 {
1449         struct cfq_group *cfqg = (void *)pd->pdata;
1450         u64 samples = blkg_stat_read(&cfqg->stats.avg_queue_size_samples);
1451         u64 v = 0;
1452
1453         if (samples) {
1454                 v = blkg_stat_read(&cfqg->stats.avg_queue_size_sum);
1455                 do_div(v, samples);
1456         }
1457         __blkg_prfill_u64(sf, pd, v);
1458         return 0;
1459 }
1460
1461 /* print avg_queue_size */
1462 static int cfqg_print_avg_queue_size(struct cgroup *cgrp, struct cftype *cft,
1463                                      struct seq_file *sf)
1464 {
1465         struct blkio_cgroup *blkcg = cgroup_to_blkio_cgroup(cgrp);
1466
1467         blkcg_print_blkgs(sf, blkcg, cfqg_prfill_avg_queue_size,
1468                           BLKIO_POLICY_PROP, 0, false);
1469         return 0;
1470 }
1471 #endif  /* CONFIG_DEBUG_BLK_CGROUP */
1472
1473 static struct cftype cfq_blkcg_files[] = {
1474         {
1475                 .name = "weight_device",
1476                 .read_seq_string = cfqg_print_weight_device,
1477                 .write_string = cfqg_set_weight_device,
1478                 .max_write_len = 256,
1479         },
1480         {
1481                 .name = "weight",
1482                 .read_seq_string = cfq_print_weight,
1483                 .write_u64 = cfq_set_weight,
1484         },
1485         {
1486                 .name = "time",
1487                 .private = BLKCG_STAT_PRIV(BLKIO_POLICY_PROP,
1488                                 offsetof(struct cfq_group, stats.time)),
1489                 .read_seq_string = blkcg_print_stat,
1490         },
1491         {
1492                 .name = "sectors",
1493                 .private = BLKCG_STAT_PRIV(BLKIO_POLICY_PROP,
1494                                 offsetof(struct cfq_group, stats.sectors)),
1495                 .read_seq_string = blkcg_print_stat,
1496         },
1497         {
1498                 .name = "io_service_bytes",
1499                 .private = BLKCG_STAT_PRIV(BLKIO_POLICY_PROP,
1500                                 offsetof(struct cfq_group, stats.service_bytes)),
1501                 .read_seq_string = blkcg_print_rwstat,
1502         },
1503         {
1504                 .name = "io_serviced",
1505                 .private = BLKCG_STAT_PRIV(BLKIO_POLICY_PROP,
1506                                 offsetof(struct cfq_group, stats.serviced)),
1507                 .read_seq_string = blkcg_print_rwstat,
1508         },
1509         {
1510                 .name = "io_service_time",
1511                 .private = BLKCG_STAT_PRIV(BLKIO_POLICY_PROP,
1512                                 offsetof(struct cfq_group, stats.service_time)),
1513                 .read_seq_string = blkcg_print_rwstat,
1514         },
1515         {
1516                 .name = "io_wait_time",
1517                 .private = BLKCG_STAT_PRIV(BLKIO_POLICY_PROP,
1518                                 offsetof(struct cfq_group, stats.wait_time)),
1519                 .read_seq_string = blkcg_print_rwstat,
1520         },
1521         {
1522                 .name = "io_merged",
1523                 .private = BLKCG_STAT_PRIV(BLKIO_POLICY_PROP,
1524                                 offsetof(struct cfq_group, stats.merged)),
1525                 .read_seq_string = blkcg_print_rwstat,
1526         },
1527         {
1528                 .name = "io_queued",
1529                 .private = BLKCG_STAT_PRIV(BLKIO_POLICY_PROP,
1530                                 offsetof(struct cfq_group, stats.queued)),
1531                 .read_seq_string = blkcg_print_rwstat,
1532         },
1533 #ifdef CONFIG_DEBUG_BLK_CGROUP
1534         {
1535                 .name = "avg_queue_size",
1536                 .read_seq_string = cfqg_print_avg_queue_size,
1537         },
1538         {
1539                 .name = "group_wait_time",
1540                 .private = BLKCG_STAT_PRIV(BLKIO_POLICY_PROP,
1541                                 offsetof(struct cfq_group, stats.group_wait_time)),
1542                 .read_seq_string = blkcg_print_stat,
1543         },
1544         {
1545                 .name = "idle_time",
1546                 .private = BLKCG_STAT_PRIV(BLKIO_POLICY_PROP,
1547                                 offsetof(struct cfq_group, stats.idle_time)),
1548                 .read_seq_string = blkcg_print_stat,
1549         },
1550         {
1551                 .name = "empty_time",
1552                 .private = BLKCG_STAT_PRIV(BLKIO_POLICY_PROP,
1553                                 offsetof(struct cfq_group, stats.empty_time)),
1554                 .read_seq_string = blkcg_print_stat,
1555         },
1556         {
1557                 .name = "dequeue",
1558                 .private = BLKCG_STAT_PRIV(BLKIO_POLICY_PROP,
1559                                 offsetof(struct cfq_group, stats.dequeue)),
1560                 .read_seq_string = blkcg_print_stat,
1561         },
1562         {
1563                 .name = "unaccounted_time",
1564                 .private = BLKCG_STAT_PRIV(BLKIO_POLICY_PROP,
1565                                 offsetof(struct cfq_group, stats.unaccounted_time)),
1566                 .read_seq_string = blkcg_print_stat,
1567         },
1568 #endif  /* CONFIG_DEBUG_BLK_CGROUP */
1569         { }     /* terminate */
1570 };
1571 #else /* GROUP_IOSCHED */
1572 static struct cfq_group *cfq_lookup_create_cfqg(struct cfq_data *cfqd,
1573                                                 struct blkio_cgroup *blkcg)
1574 {
1575         return cfqd->root_group;
1576 }
1577
1578 static inline void
1579 cfq_link_cfqq_cfqg(struct cfq_queue *cfqq, struct cfq_group *cfqg) {
1580         cfqq->cfqg = cfqg;
1581 }
1582
1583 #endif /* GROUP_IOSCHED */
1584
1585 /*
1586  * The cfqd->service_trees holds all pending cfq_queue's that have
1587  * requests waiting to be processed. It is sorted in the order that
1588  * we will service the queues.
1589  */
1590 static void cfq_service_tree_add(struct cfq_data *cfqd, struct cfq_queue *cfqq,
1591                                  bool add_front)
1592 {
1593         struct rb_node **p, *parent;
1594         struct cfq_queue *__cfqq;
1595         unsigned long rb_key;
1596         struct cfq_rb_root *service_tree;
1597         int left;
1598         int new_cfqq = 1;
1599
1600         service_tree = service_tree_for(cfqq->cfqg, cfqq_prio(cfqq),
1601                                                 cfqq_type(cfqq));
1602         if (cfq_class_idle(cfqq)) {
1603                 rb_key = CFQ_IDLE_DELAY;
1604                 parent = rb_last(&service_tree->rb);
1605                 if (parent && parent != &cfqq->rb_node) {
1606                         __cfqq = rb_entry(parent, struct cfq_queue, rb_node);
1607                         rb_key += __cfqq->rb_key;
1608                 } else
1609                         rb_key += jiffies;
1610         } else if (!add_front) {
1611                 /*
1612                  * Get our rb key offset. Subtract any residual slice
1613                  * value carried from last service. A negative resid
1614                  * count indicates slice overrun, and this should position
1615                  * the next service time further away in the tree.
1616                  */
1617                 rb_key = cfq_slice_offset(cfqd, cfqq) + jiffies;
1618                 rb_key -= cfqq->slice_resid;
1619                 cfqq->slice_resid = 0;
1620         } else {
1621                 rb_key = -HZ;
1622                 __cfqq = cfq_rb_first(service_tree);
1623                 rb_key += __cfqq ? __cfqq->rb_key : jiffies;
1624         }
1625
1626         if (!RB_EMPTY_NODE(&cfqq->rb_node)) {
1627                 new_cfqq = 0;
1628                 /*
1629                  * same position, nothing more to do
1630                  */
1631                 if (rb_key == cfqq->rb_key &&
1632                     cfqq->service_tree == service_tree)
1633                         return;
1634
1635                 cfq_rb_erase(&cfqq->rb_node, cfqq->service_tree);
1636                 cfqq->service_tree = NULL;
1637         }
1638
1639         left = 1;
1640         parent = NULL;
1641         cfqq->service_tree = service_tree;
1642         p = &service_tree->rb.rb_node;
1643         while (*p) {
1644                 struct rb_node **n;
1645
1646                 parent = *p;
1647                 __cfqq = rb_entry(parent, struct cfq_queue, rb_node);
1648
1649                 /*
1650                  * sort by key, that represents service time.
1651                  */
1652                 if (time_before(rb_key, __cfqq->rb_key))
1653                         n = &(*p)->rb_left;
1654                 else {
1655                         n = &(*p)->rb_right;
1656                         left = 0;
1657                 }
1658
1659                 p = n;
1660         }
1661
1662         if (left)
1663                 service_tree->left = &cfqq->rb_node;
1664
1665         cfqq->rb_key = rb_key;
1666         rb_link_node(&cfqq->rb_node, parent, p);
1667         rb_insert_color(&cfqq->rb_node, &service_tree->rb);
1668         service_tree->count++;
1669         if (add_front || !new_cfqq)
1670                 return;
1671         cfq_group_notify_queue_add(cfqd, cfqq->cfqg);
1672 }
1673
1674 static struct cfq_queue *
1675 cfq_prio_tree_lookup(struct cfq_data *cfqd, struct rb_root *root,
1676                      sector_t sector, struct rb_node **ret_parent,
1677                      struct rb_node ***rb_link)
1678 {
1679         struct rb_node **p, *parent;
1680         struct cfq_queue *cfqq = NULL;
1681
1682         parent = NULL;
1683         p = &root->rb_node;
1684         while (*p) {
1685                 struct rb_node **n;
1686
1687                 parent = *p;
1688                 cfqq = rb_entry(parent, struct cfq_queue, p_node);
1689
1690                 /*
1691                  * Sort strictly based on sector.  Smallest to the left,
1692                  * largest to the right.
1693                  */
1694                 if (sector > blk_rq_pos(cfqq->next_rq))
1695                         n = &(*p)->rb_right;
1696                 else if (sector < blk_rq_pos(cfqq->next_rq))
1697                         n = &(*p)->rb_left;
1698                 else
1699                         break;
1700                 p = n;
1701                 cfqq = NULL;
1702         }
1703
1704         *ret_parent = parent;
1705         if (rb_link)
1706                 *rb_link = p;
1707         return cfqq;
1708 }
1709
1710 static void cfq_prio_tree_add(struct cfq_data *cfqd, struct cfq_queue *cfqq)
1711 {
1712         struct rb_node **p, *parent;
1713         struct cfq_queue *__cfqq;
1714
1715         if (cfqq->p_root) {
1716                 rb_erase(&cfqq->p_node, cfqq->p_root);
1717                 cfqq->p_root = NULL;
1718         }
1719
1720         if (cfq_class_idle(cfqq))
1721                 return;
1722         if (!cfqq->next_rq)
1723                 return;
1724
1725         cfqq->p_root = &cfqd->prio_trees[cfqq->org_ioprio];
1726         __cfqq = cfq_prio_tree_lookup(cfqd, cfqq->p_root,
1727                                       blk_rq_pos(cfqq->next_rq), &parent, &p);
1728         if (!__cfqq) {
1729                 rb_link_node(&cfqq->p_node, parent, p);
1730                 rb_insert_color(&cfqq->p_node, cfqq->p_root);
1731         } else
1732                 cfqq->p_root = NULL;
1733 }
1734
1735 /*
1736  * Update cfqq's position in the service tree.
1737  */
1738 static void cfq_resort_rr_list(struct cfq_data *cfqd, struct cfq_queue *cfqq)
1739 {
1740         /*
1741          * Resorting requires the cfqq to be on the RR list already.
1742          */
1743         if (cfq_cfqq_on_rr(cfqq)) {
1744                 cfq_service_tree_add(cfqd, cfqq, 0);
1745                 cfq_prio_tree_add(cfqd, cfqq);
1746         }
1747 }
1748
1749 /*
1750  * add to busy list of queues for service, trying to be fair in ordering
1751  * the pending list according to last request service
1752  */
1753 static void cfq_add_cfqq_rr(struct cfq_data *cfqd, struct cfq_queue *cfqq)
1754 {
1755         cfq_log_cfqq(cfqd, cfqq, "add_to_rr");
1756         BUG_ON(cfq_cfqq_on_rr(cfqq));
1757         cfq_mark_cfqq_on_rr(cfqq);
1758         cfqd->busy_queues++;
1759         if (cfq_cfqq_sync(cfqq))
1760                 cfqd->busy_sync_queues++;
1761
1762         cfq_resort_rr_list(cfqd, cfqq);
1763 }
1764
1765 /*
1766  * Called when the cfqq no longer has requests pending, remove it from
1767  * the service tree.
1768  */
1769 static void cfq_del_cfqq_rr(struct cfq_data *cfqd, struct cfq_queue *cfqq)
1770 {
1771         cfq_log_cfqq(cfqd, cfqq, "del_from_rr");
1772         BUG_ON(!cfq_cfqq_on_rr(cfqq));
1773         cfq_clear_cfqq_on_rr(cfqq);
1774
1775         if (!RB_EMPTY_NODE(&cfqq->rb_node)) {
1776                 cfq_rb_erase(&cfqq->rb_node, cfqq->service_tree);
1777                 cfqq->service_tree = NULL;
1778         }
1779         if (cfqq->p_root) {
1780                 rb_erase(&cfqq->p_node, cfqq->p_root);
1781                 cfqq->p_root = NULL;
1782         }
1783
1784         cfq_group_notify_queue_del(cfqd, cfqq->cfqg);
1785         BUG_ON(!cfqd->busy_queues);
1786         cfqd->busy_queues--;
1787         if (cfq_cfqq_sync(cfqq))
1788                 cfqd->busy_sync_queues--;
1789 }
1790
1791 /*
1792  * rb tree support functions
1793  */
1794 static void cfq_del_rq_rb(struct request *rq)
1795 {
1796         struct cfq_queue *cfqq = RQ_CFQQ(rq);
1797         const int sync = rq_is_sync(rq);
1798
1799         BUG_ON(!cfqq->queued[sync]);
1800         cfqq->queued[sync]--;
1801
1802         elv_rb_del(&cfqq->sort_list, rq);
1803
1804         if (cfq_cfqq_on_rr(cfqq) && RB_EMPTY_ROOT(&cfqq->sort_list)) {
1805                 /*
1806                  * Queue will be deleted from service tree when we actually
1807                  * expire it later. Right now just remove it from prio tree
1808                  * as it is empty.
1809                  */
1810                 if (cfqq->p_root) {
1811                         rb_erase(&cfqq->p_node, cfqq->p_root);
1812                         cfqq->p_root = NULL;
1813                 }
1814         }
1815 }
1816
1817 static void cfq_add_rq_rb(struct request *rq)
1818 {
1819         struct cfq_queue *cfqq = RQ_CFQQ(rq);
1820         struct cfq_data *cfqd = cfqq->cfqd;
1821         struct request *prev;
1822
1823         cfqq->queued[rq_is_sync(rq)]++;
1824
1825         elv_rb_add(&cfqq->sort_list, rq);
1826
1827         if (!cfq_cfqq_on_rr(cfqq))
1828                 cfq_add_cfqq_rr(cfqd, cfqq);
1829
1830         /*
1831          * check if this request is a better next-serve candidate
1832          */
1833         prev = cfqq->next_rq;
1834         cfqq->next_rq = cfq_choose_req(cfqd, cfqq->next_rq, rq, cfqd->last_position);
1835
1836         /*
1837          * adjust priority tree position, if ->next_rq changes
1838          */
1839         if (prev != cfqq->next_rq)
1840                 cfq_prio_tree_add(cfqd, cfqq);
1841
1842         BUG_ON(!cfqq->next_rq);
1843 }
1844
1845 static void cfq_reposition_rq_rb(struct cfq_queue *cfqq, struct request *rq)
1846 {
1847         elv_rb_del(&cfqq->sort_list, rq);
1848         cfqq->queued[rq_is_sync(rq)]--;
1849         cfqg_stats_update_io_remove(RQ_CFQG(rq), rq->cmd_flags);
1850         cfq_add_rq_rb(rq);
1851         cfqg_stats_update_io_add(RQ_CFQG(rq), cfqq->cfqd->serving_group,
1852                                  rq->cmd_flags);
1853 }
1854
1855 static struct request *
1856 cfq_find_rq_fmerge(struct cfq_data *cfqd, struct bio *bio)
1857 {
1858         struct task_struct *tsk = current;
1859         struct cfq_io_cq *cic;
1860         struct cfq_queue *cfqq;
1861
1862         cic = cfq_cic_lookup(cfqd, tsk->io_context);
1863         if (!cic)
1864                 return NULL;
1865
1866         cfqq = cic_to_cfqq(cic, cfq_bio_sync(bio));
1867         if (cfqq) {
1868                 sector_t sector = bio->bi_sector + bio_sectors(bio);
1869
1870                 return elv_rb_find(&cfqq->sort_list, sector);
1871         }
1872
1873         return NULL;
1874 }
1875
1876 static void cfq_activate_request(struct request_queue *q, struct request *rq)
1877 {
1878         struct cfq_data *cfqd = q->elevator->elevator_data;
1879
1880         cfqd->rq_in_driver++;
1881         cfq_log_cfqq(cfqd, RQ_CFQQ(rq), "activate rq, drv=%d",
1882                                                 cfqd->rq_in_driver);
1883
1884         cfqd->last_position = blk_rq_pos(rq) + blk_rq_sectors(rq);
1885 }
1886
1887 static void cfq_deactivate_request(struct request_queue *q, struct request *rq)
1888 {
1889         struct cfq_data *cfqd = q->elevator->elevator_data;
1890
1891         WARN_ON(!cfqd->rq_in_driver);
1892         cfqd->rq_in_driver--;
1893         cfq_log_cfqq(cfqd, RQ_CFQQ(rq), "deactivate rq, drv=%d",
1894                                                 cfqd->rq_in_driver);
1895 }
1896
1897 static void cfq_remove_request(struct request *rq)
1898 {
1899         struct cfq_queue *cfqq = RQ_CFQQ(rq);
1900
1901         if (cfqq->next_rq == rq)
1902                 cfqq->next_rq = cfq_find_next_rq(cfqq->cfqd, cfqq, rq);
1903
1904         list_del_init(&rq->queuelist);
1905         cfq_del_rq_rb(rq);
1906
1907         cfqq->cfqd->rq_queued--;
1908         cfqg_stats_update_io_remove(RQ_CFQG(rq), rq->cmd_flags);
1909         if (rq->cmd_flags & REQ_PRIO) {
1910                 WARN_ON(!cfqq->prio_pending);
1911                 cfqq->prio_pending--;
1912         }
1913 }
1914
1915 static int cfq_merge(struct request_queue *q, struct request **req,
1916                      struct bio *bio)
1917 {
1918         struct cfq_data *cfqd = q->elevator->elevator_data;
1919         struct request *__rq;
1920
1921         __rq = cfq_find_rq_fmerge(cfqd, bio);
1922         if (__rq && elv_rq_merge_ok(__rq, bio)) {
1923                 *req = __rq;
1924                 return ELEVATOR_FRONT_MERGE;
1925         }
1926
1927         return ELEVATOR_NO_MERGE;
1928 }
1929
1930 static void cfq_merged_request(struct request_queue *q, struct request *req,
1931                                int type)
1932 {
1933         if (type == ELEVATOR_FRONT_MERGE) {
1934                 struct cfq_queue *cfqq = RQ_CFQQ(req);
1935
1936                 cfq_reposition_rq_rb(cfqq, req);
1937         }
1938 }
1939
1940 static void cfq_bio_merged(struct request_queue *q, struct request *req,
1941                                 struct bio *bio)
1942 {
1943         cfqg_stats_update_io_merged(RQ_CFQG(req), bio->bi_rw);
1944 }
1945
1946 static void
1947 cfq_merged_requests(struct request_queue *q, struct request *rq,
1948                     struct request *next)
1949 {
1950         struct cfq_queue *cfqq = RQ_CFQQ(rq);
1951         struct cfq_data *cfqd = q->elevator->elevator_data;
1952
1953         /*
1954          * reposition in fifo if next is older than rq
1955          */
1956         if (!list_empty(&rq->queuelist) && !list_empty(&next->queuelist) &&
1957             time_before(rq_fifo_time(next), rq_fifo_time(rq))) {
1958                 list_move(&rq->queuelist, &next->queuelist);
1959                 rq_set_fifo_time(rq, rq_fifo_time(next));
1960         }
1961
1962         if (cfqq->next_rq == next)
1963                 cfqq->next_rq = rq;
1964         cfq_remove_request(next);
1965         cfqg_stats_update_io_merged(RQ_CFQG(rq), next->cmd_flags);
1966
1967         cfqq = RQ_CFQQ(next);
1968         /*
1969          * all requests of this queue are merged to other queues, delete it
1970          * from the service tree. If it's the active_queue,
1971          * cfq_dispatch_requests() will choose to expire it or do idle
1972          */
1973         if (cfq_cfqq_on_rr(cfqq) && RB_EMPTY_ROOT(&cfqq->sort_list) &&
1974             cfqq != cfqd->active_queue)
1975                 cfq_del_cfqq_rr(cfqd, cfqq);
1976 }
1977
1978 static int cfq_allow_merge(struct request_queue *q, struct request *rq,
1979                            struct bio *bio)
1980 {
1981         struct cfq_data *cfqd = q->elevator->elevator_data;
1982         struct cfq_io_cq *cic;
1983         struct cfq_queue *cfqq;
1984
1985         /*
1986          * Disallow merge of a sync bio into an async request.
1987          */
1988         if (cfq_bio_sync(bio) && !rq_is_sync(rq))
1989                 return false;
1990
1991         /*
1992          * Lookup the cfqq that this bio will be queued with and allow
1993          * merge only if rq is queued there.
1994          */
1995         cic = cfq_cic_lookup(cfqd, current->io_context);
1996         if (!cic)
1997                 return false;
1998
1999         cfqq = cic_to_cfqq(cic, cfq_bio_sync(bio));
2000         return cfqq == RQ_CFQQ(rq);
2001 }
2002
2003 static inline void cfq_del_timer(struct cfq_data *cfqd, struct cfq_queue *cfqq)
2004 {
2005         del_timer(&cfqd->idle_slice_timer);
2006         cfqg_stats_update_idle_time(cfqq->cfqg);
2007 }
2008
2009 static void __cfq_set_active_queue(struct cfq_data *cfqd,
2010                                    struct cfq_queue *cfqq)
2011 {
2012         if (cfqq) {
2013                 cfq_log_cfqq(cfqd, cfqq, "set_active wl_prio:%d wl_type:%d",
2014                                 cfqd->serving_prio, cfqd->serving_type);
2015                 cfqg_stats_update_avg_queue_size(cfqq->cfqg);
2016                 cfqq->slice_start = 0;
2017                 cfqq->dispatch_start = jiffies;
2018                 cfqq->allocated_slice = 0;
2019                 cfqq->slice_end = 0;
2020                 cfqq->slice_dispatch = 0;
2021                 cfqq->nr_sectors = 0;
2022
2023                 cfq_clear_cfqq_wait_request(cfqq);
2024                 cfq_clear_cfqq_must_dispatch(cfqq);
2025                 cfq_clear_cfqq_must_alloc_slice(cfqq);
2026                 cfq_clear_cfqq_fifo_expire(cfqq);
2027                 cfq_mark_cfqq_slice_new(cfqq);
2028
2029                 cfq_del_timer(cfqd, cfqq);
2030         }
2031
2032         cfqd->active_queue = cfqq;
2033 }
2034
2035 /*
2036  * current cfqq expired its slice (or was too idle), select new one
2037  */
2038 static void
2039 __cfq_slice_expired(struct cfq_data *cfqd, struct cfq_queue *cfqq,
2040                     bool timed_out)
2041 {
2042         cfq_log_cfqq(cfqd, cfqq, "slice expired t=%d", timed_out);
2043
2044         if (cfq_cfqq_wait_request(cfqq))
2045                 cfq_del_timer(cfqd, cfqq);
2046
2047         cfq_clear_cfqq_wait_request(cfqq);
2048         cfq_clear_cfqq_wait_busy(cfqq);
2049
2050         /*
2051          * If this cfqq is shared between multiple processes, check to
2052          * make sure that those processes are still issuing I/Os within
2053          * the mean seek distance.  If not, it may be time to break the
2054          * queues apart again.
2055          */
2056         if (cfq_cfqq_coop(cfqq) && CFQQ_SEEKY(cfqq))
2057                 cfq_mark_cfqq_split_coop(cfqq);
2058
2059         /*
2060          * store what was left of this slice, if the queue idled/timed out
2061          */
2062         if (timed_out) {
2063                 if (cfq_cfqq_slice_new(cfqq))
2064                         cfqq->slice_resid = cfq_scaled_cfqq_slice(cfqd, cfqq);
2065                 else
2066                         cfqq->slice_resid = cfqq->slice_end - jiffies;
2067                 cfq_log_cfqq(cfqd, cfqq, "resid=%ld", cfqq->slice_resid);
2068         }
2069
2070         cfq_group_served(cfqd, cfqq->cfqg, cfqq);
2071
2072         if (cfq_cfqq_on_rr(cfqq) && RB_EMPTY_ROOT(&cfqq->sort_list))
2073                 cfq_del_cfqq_rr(cfqd, cfqq);
2074
2075         cfq_resort_rr_list(cfqd, cfqq);
2076
2077         if (cfqq == cfqd->active_queue)
2078                 cfqd->active_queue = NULL;
2079
2080         if (cfqd->active_cic) {
2081                 put_io_context(cfqd->active_cic->icq.ioc);
2082                 cfqd->active_cic = NULL;
2083         }
2084 }
2085
2086 static inline void cfq_slice_expired(struct cfq_data *cfqd, bool timed_out)
2087 {
2088         struct cfq_queue *cfqq = cfqd->active_queue;
2089
2090         if (cfqq)
2091                 __cfq_slice_expired(cfqd, cfqq, timed_out);
2092 }
2093
2094 /*
2095  * Get next queue for service. Unless we have a queue preemption,
2096  * we'll simply select the first cfqq in the service tree.
2097  */
2098 static struct cfq_queue *cfq_get_next_queue(struct cfq_data *cfqd)
2099 {
2100         struct cfq_rb_root *service_tree =
2101                 service_tree_for(cfqd->serving_group, cfqd->serving_prio,
2102                                         cfqd->serving_type);
2103
2104         if (!cfqd->rq_queued)
2105                 return NULL;
2106
2107         /* There is nothing to dispatch */
2108         if (!service_tree)
2109                 return NULL;
2110         if (RB_EMPTY_ROOT(&service_tree->rb))
2111                 return NULL;
2112         return cfq_rb_first(service_tree);
2113 }
2114
2115 static struct cfq_queue *cfq_get_next_queue_forced(struct cfq_data *cfqd)
2116 {
2117         struct cfq_group *cfqg;
2118         struct cfq_queue *cfqq;
2119         int i, j;
2120         struct cfq_rb_root *st;
2121
2122         if (!cfqd->rq_queued)
2123                 return NULL;
2124
2125         cfqg = cfq_get_next_cfqg(cfqd);
2126         if (!cfqg)
2127                 return NULL;
2128
2129         for_each_cfqg_st(cfqg, i, j, st)
2130                 if ((cfqq = cfq_rb_first(st)) != NULL)
2131                         return cfqq;
2132         return NULL;
2133 }
2134
2135 /*
2136  * Get and set a new active queue for service.
2137  */
2138 static struct cfq_queue *cfq_set_active_queue(struct cfq_data *cfqd,
2139                                               struct cfq_queue *cfqq)
2140 {
2141         if (!cfqq)
2142                 cfqq = cfq_get_next_queue(cfqd);
2143
2144         __cfq_set_active_queue(cfqd, cfqq);
2145         return cfqq;
2146 }
2147
2148 static inline sector_t cfq_dist_from_last(struct cfq_data *cfqd,
2149                                           struct request *rq)
2150 {
2151         if (blk_rq_pos(rq) >= cfqd->last_position)
2152                 return blk_rq_pos(rq) - cfqd->last_position;
2153         else
2154                 return cfqd->last_position - blk_rq_pos(rq);
2155 }
2156
2157 static inline int cfq_rq_close(struct cfq_data *cfqd, struct cfq_queue *cfqq,
2158                                struct request *rq)
2159 {
2160         return cfq_dist_from_last(cfqd, rq) <= CFQQ_CLOSE_THR;
2161 }
2162
2163 static struct cfq_queue *cfqq_close(struct cfq_data *cfqd,
2164                                     struct cfq_queue *cur_cfqq)
2165 {
2166         struct rb_root *root = &cfqd->prio_trees[cur_cfqq->org_ioprio];
2167         struct rb_node *parent, *node;
2168         struct cfq_queue *__cfqq;
2169         sector_t sector = cfqd->last_position;
2170
2171         if (RB_EMPTY_ROOT(root))
2172                 return NULL;
2173
2174         /*
2175          * First, if we find a request starting at the end of the last
2176          * request, choose it.
2177          */
2178         __cfqq = cfq_prio_tree_lookup(cfqd, root, sector, &parent, NULL);
2179         if (__cfqq)
2180                 return __cfqq;
2181
2182         /*
2183          * If the exact sector wasn't found, the parent of the NULL leaf
2184          * will contain the closest sector.
2185          */
2186         __cfqq = rb_entry(parent, struct cfq_queue, p_node);
2187         if (cfq_rq_close(cfqd, cur_cfqq, __cfqq->next_rq))
2188                 return __cfqq;
2189
2190         if (blk_rq_pos(__cfqq->next_rq) < sector)
2191                 node = rb_next(&__cfqq->p_node);
2192         else
2193                 node = rb_prev(&__cfqq->p_node);
2194         if (!node)
2195                 return NULL;
2196
2197         __cfqq = rb_entry(node, struct cfq_queue, p_node);
2198         if (cfq_rq_close(cfqd, cur_cfqq, __cfqq->next_rq))
2199                 return __cfqq;
2200
2201         return NULL;
2202 }
2203
2204 /*
2205  * cfqd - obvious
2206  * cur_cfqq - passed in so that we don't decide that the current queue is
2207  *            closely cooperating with itself.
2208  *
2209  * So, basically we're assuming that that cur_cfqq has dispatched at least
2210  * one request, and that cfqd->last_position reflects a position on the disk
2211  * associated with the I/O issued by cur_cfqq.  I'm not sure this is a valid
2212  * assumption.
2213  */
2214 static struct cfq_queue *cfq_close_cooperator(struct cfq_data *cfqd,
2215                                               struct cfq_queue *cur_cfqq)
2216 {
2217         struct cfq_queue *cfqq;
2218
2219         if (cfq_class_idle(cur_cfqq))
2220                 return NULL;
2221         if (!cfq_cfqq_sync(cur_cfqq))
2222                 return NULL;
2223         if (CFQQ_SEEKY(cur_cfqq))
2224                 return NULL;
2225
2226         /*
2227          * Don't search priority tree if it's the only queue in the group.
2228          */
2229         if (cur_cfqq->cfqg->nr_cfqq == 1)
2230                 return NULL;
2231
2232         /*
2233          * We should notice if some of the queues are cooperating, eg
2234          * working closely on the same area of the disk. In that case,
2235          * we can group them together and don't waste time idling.
2236          */
2237         cfqq = cfqq_close(cfqd, cur_cfqq);
2238         if (!cfqq)
2239                 return NULL;
2240
2241         /* If new queue belongs to different cfq_group, don't choose it */
2242         if (cur_cfqq->cfqg != cfqq->cfqg)
2243                 return NULL;
2244
2245         /*
2246          * It only makes sense to merge sync queues.
2247          */
2248         if (!cfq_cfqq_sync(cfqq))
2249                 return NULL;
2250         if (CFQQ_SEEKY(cfqq))
2251                 return NULL;
2252
2253         /*
2254          * Do not merge queues of different priority classes
2255          */
2256         if (cfq_class_rt(cfqq) != cfq_class_rt(cur_cfqq))
2257                 return NULL;
2258
2259         return cfqq;
2260 }
2261
2262 /*
2263  * Determine whether we should enforce idle window for this queue.
2264  */
2265
2266 static bool cfq_should_idle(struct cfq_data *cfqd, struct cfq_queue *cfqq)
2267 {
2268         enum wl_prio_t prio = cfqq_prio(cfqq);
2269         struct cfq_rb_root *service_tree = cfqq->service_tree;
2270
2271         BUG_ON(!service_tree);
2272         BUG_ON(!service_tree->count);
2273
2274         if (!cfqd->cfq_slice_idle)
2275                 return false;
2276
2277         /* We never do for idle class queues. */
2278         if (prio == IDLE_WORKLOAD)
2279                 return false;
2280
2281         /* We do for queues that were marked with idle window flag. */
2282         if (cfq_cfqq_idle_window(cfqq) &&
2283            !(blk_queue_nonrot(cfqd->queue) && cfqd->hw_tag))
2284                 return true;
2285
2286         /*
2287          * Otherwise, we do only if they are the last ones
2288          * in their service tree.
2289          */
2290         if (service_tree->count == 1 && cfq_cfqq_sync(cfqq) &&
2291            !cfq_io_thinktime_big(cfqd, &service_tree->ttime, false))
2292                 return true;
2293         cfq_log_cfqq(cfqd, cfqq, "Not idling. st->count:%d",
2294                         service_tree->count);
2295         return false;
2296 }
2297
2298 static void cfq_arm_slice_timer(struct cfq_data *cfqd)
2299 {
2300         struct cfq_queue *cfqq = cfqd->active_queue;
2301         struct cfq_io_cq *cic;
2302         unsigned long sl, group_idle = 0;
2303
2304         /*
2305          * SSD device without seek penalty, disable idling. But only do so
2306          * for devices that support queuing, otherwise we still have a problem
2307          * with sync vs async workloads.
2308          */
2309         if (blk_queue_nonrot(cfqd->queue) && cfqd->hw_tag)
2310                 return;
2311
2312         WARN_ON(!RB_EMPTY_ROOT(&cfqq->sort_list));
2313         WARN_ON(cfq_cfqq_slice_new(cfqq));
2314
2315         /*
2316          * idle is disabled, either manually or by past process history
2317          */
2318         if (!cfq_should_idle(cfqd, cfqq)) {
2319                 /* no queue idling. Check for group idling */
2320                 if (cfqd->cfq_group_idle)
2321                         group_idle = cfqd->cfq_group_idle;
2322                 else
2323                         return;
2324         }
2325
2326         /*
2327          * still active requests from this queue, don't idle
2328          */
2329         if (cfqq->dispatched)
2330                 return;
2331
2332         /*
2333          * task has exited, don't wait
2334          */
2335         cic = cfqd->active_cic;
2336         if (!cic || !atomic_read(&cic->icq.ioc->active_ref))
2337                 return;
2338
2339         /*
2340          * If our average think time is larger than the remaining time
2341          * slice, then don't idle. This avoids overrunning the allotted
2342          * time slice.
2343          */
2344         if (sample_valid(cic->ttime.ttime_samples) &&
2345             (cfqq->slice_end - jiffies < cic->ttime.ttime_mean)) {
2346                 cfq_log_cfqq(cfqd, cfqq, "Not idling. think_time:%lu",
2347                              cic->ttime.ttime_mean);
2348                 return;
2349         }
2350
2351         /* There are other queues in the group, don't do group idle */
2352         if (group_idle && cfqq->cfqg->nr_cfqq > 1)
2353                 return;
2354
2355         cfq_mark_cfqq_wait_request(cfqq);
2356
2357         if (group_idle)
2358                 sl = cfqd->cfq_group_idle;
2359         else
2360                 sl = cfqd->cfq_slice_idle;
2361
2362         mod_timer(&cfqd->idle_slice_timer, jiffies + sl);
2363         cfqg_stats_set_start_idle_time(cfqq->cfqg);
2364         cfq_log_cfqq(cfqd, cfqq, "arm_idle: %lu group_idle: %d", sl,
2365                         group_idle ? 1 : 0);
2366 }
2367
2368 /*
2369  * Move request from internal lists to the request queue dispatch list.
2370  */
2371 static void cfq_dispatch_insert(struct request_queue *q, struct request *rq)
2372 {
2373         struct cfq_data *cfqd = q->elevator->elevator_data;
2374         struct cfq_queue *cfqq = RQ_CFQQ(rq);
2375
2376         cfq_log_cfqq(cfqd, cfqq, "dispatch_insert");
2377
2378         cfqq->next_rq = cfq_find_next_rq(cfqd, cfqq, rq);
2379         cfq_remove_request(rq);
2380         cfqq->dispatched++;
2381         (RQ_CFQG(rq))->dispatched++;
2382         elv_dispatch_sort(q, rq);
2383
2384         cfqd->rq_in_flight[cfq_cfqq_sync(cfqq)]++;
2385         cfqq->nr_sectors += blk_rq_sectors(rq);
2386         cfqg_stats_update_dispatch(cfqq->cfqg, blk_rq_bytes(rq), rq->cmd_flags);
2387 }
2388
2389 /*
2390  * return expired entry, or NULL to just start from scratch in rbtree
2391  */
2392 static struct request *cfq_check_fifo(struct cfq_queue *cfqq)
2393 {
2394         struct request *rq = NULL;
2395
2396         if (cfq_cfqq_fifo_expire(cfqq))
2397                 return NULL;
2398
2399         cfq_mark_cfqq_fifo_expire(cfqq);
2400
2401         if (list_empty(&cfqq->fifo))
2402                 return NULL;
2403
2404         rq = rq_entry_fifo(cfqq->fifo.next);
2405         if (time_before(jiffies, rq_fifo_time(rq)))
2406                 rq = NULL;
2407
2408         cfq_log_cfqq(cfqq->cfqd, cfqq, "fifo=%p", rq);
2409         return rq;
2410 }
2411
2412 static inline int
2413 cfq_prio_to_maxrq(struct cfq_data *cfqd, struct cfq_queue *cfqq)
2414 {
2415         const int base_rq = cfqd->cfq_slice_async_rq;
2416
2417         WARN_ON(cfqq->ioprio >= IOPRIO_BE_NR);
2418
2419         return 2 * base_rq * (IOPRIO_BE_NR - cfqq->ioprio);
2420 }
2421
2422 /*
2423  * Must be called with the queue_lock held.
2424  */
2425 static int cfqq_process_refs(struct cfq_queue *cfqq)
2426 {
2427         int process_refs, io_refs;
2428
2429         io_refs = cfqq->allocated[READ] + cfqq->allocated[WRITE];
2430         process_refs = cfqq->ref - io_refs;
2431         BUG_ON(process_refs < 0);
2432         return process_refs;
2433 }
2434
2435 static void cfq_setup_merge(struct cfq_queue *cfqq, struct cfq_queue *new_cfqq)
2436 {
2437         int process_refs, new_process_refs;
2438         struct cfq_queue *__cfqq;
2439
2440         /*
2441          * If there are no process references on the new_cfqq, then it is
2442          * unsafe to follow the ->new_cfqq chain as other cfqq's in the
2443          * chain may have dropped their last reference (not just their
2444          * last process reference).
2445          */
2446         if (!cfqq_process_refs(new_cfqq))
2447                 return;
2448
2449         /* Avoid a circular list and skip interim queue merges */
2450         while ((__cfqq = new_cfqq->new_cfqq)) {
2451                 if (__cfqq == cfqq)
2452                         return;
2453                 new_cfqq = __cfqq;
2454         }
2455
2456         process_refs = cfqq_process_refs(cfqq);
2457         new_process_refs = cfqq_process_refs(new_cfqq);
2458         /*
2459          * If the process for the cfqq has gone away, there is no
2460          * sense in merging the queues.
2461          */
2462         if (process_refs == 0 || new_process_refs == 0)
2463                 return;
2464
2465         /*
2466          * Merge in the direction of the lesser amount of work.
2467          */
2468         if (new_process_refs >= process_refs) {
2469                 cfqq->new_cfqq = new_cfqq;
2470                 new_cfqq->ref += process_refs;
2471         } else {
2472                 new_cfqq->new_cfqq = cfqq;
2473                 cfqq->ref += new_process_refs;
2474         }
2475 }
2476
2477 static enum wl_type_t cfq_choose_wl(struct cfq_data *cfqd,
2478                                 struct cfq_group *cfqg, enum wl_prio_t prio)
2479 {
2480         struct cfq_queue *queue;
2481         int i;
2482         bool key_valid = false;
2483         unsigned long lowest_key = 0;
2484         enum wl_type_t cur_best = SYNC_NOIDLE_WORKLOAD;
2485
2486         for (i = 0; i <= SYNC_WORKLOAD; ++i) {
2487                 /* select the one with lowest rb_key */
2488                 queue = cfq_rb_first(service_tree_for(cfqg, prio, i));
2489                 if (queue &&
2490                     (!key_valid || time_before(queue->rb_key, lowest_key))) {
2491                         lowest_key = queue->rb_key;
2492                         cur_best = i;
2493                         key_valid = true;
2494                 }
2495         }
2496
2497         return cur_best;
2498 }
2499
2500 static void choose_service_tree(struct cfq_data *cfqd, struct cfq_group *cfqg)
2501 {
2502         unsigned slice;
2503         unsigned count;
2504         struct cfq_rb_root *st;
2505         unsigned group_slice;
2506         enum wl_prio_t original_prio = cfqd->serving_prio;
2507
2508         /* Choose next priority. RT > BE > IDLE */
2509         if (cfq_group_busy_queues_wl(RT_WORKLOAD, cfqd, cfqg))
2510                 cfqd->serving_prio = RT_WORKLOAD;
2511         else if (cfq_group_busy_queues_wl(BE_WORKLOAD, cfqd, cfqg))
2512                 cfqd->serving_prio = BE_WORKLOAD;
2513         else {
2514                 cfqd->serving_prio = IDLE_WORKLOAD;
2515                 cfqd->workload_expires = jiffies + 1;
2516                 return;
2517         }
2518
2519         if (original_prio != cfqd->serving_prio)
2520                 goto new_workload;
2521
2522         /*
2523          * For RT and BE, we have to choose also the type
2524          * (SYNC, SYNC_NOIDLE, ASYNC), and to compute a workload
2525          * expiration time
2526          */
2527         st = service_tree_for(cfqg, cfqd->serving_prio, cfqd->serving_type);
2528         count = st->count;
2529
2530         /*
2531          * check workload expiration, and that we still have other queues ready
2532          */
2533         if (count && !time_after(jiffies, cfqd->workload_expires))
2534                 return;
2535
2536 new_workload:
2537         /* otherwise select new workload type */
2538         cfqd->serving_type =
2539                 cfq_choose_wl(cfqd, cfqg, cfqd->serving_prio);
2540         st = service_tree_for(cfqg, cfqd->serving_prio, cfqd->serving_type);
2541         count = st->count;
2542
2543         /*
2544          * the workload slice is computed as a fraction of target latency
2545          * proportional to the number of queues in that workload, over
2546          * all the queues in the same priority class
2547          */
2548         group_slice = cfq_group_slice(cfqd, cfqg);
2549
2550         slice = group_slice * count /
2551                 max_t(unsigned, cfqg->busy_queues_avg[cfqd->serving_prio],
2552                       cfq_group_busy_queues_wl(cfqd->serving_prio, cfqd, cfqg));
2553
2554         if (cfqd->serving_type == ASYNC_WORKLOAD) {
2555                 unsigned int tmp;
2556
2557                 /*
2558                  * Async queues are currently system wide. Just taking
2559                  * proportion of queues with-in same group will lead to higher
2560                  * async ratio system wide as generally root group is going
2561                  * to have higher weight. A more accurate thing would be to
2562                  * calculate system wide asnc/sync ratio.
2563                  */
2564                 tmp = cfq_target_latency * cfqg_busy_async_queues(cfqd, cfqg);
2565                 tmp = tmp/cfqd->busy_queues;
2566                 slice = min_t(unsigned, slice, tmp);
2567
2568                 /* async workload slice is scaled down according to
2569                  * the sync/async slice ratio. */
2570                 slice = slice * cfqd->cfq_slice[0] / cfqd->cfq_slice[1];
2571         } else
2572                 /* sync workload slice is at least 2 * cfq_slice_idle */
2573                 slice = max(slice, 2 * cfqd->cfq_slice_idle);
2574
2575         slice = max_t(unsigned, slice, CFQ_MIN_TT);
2576         cfq_log(cfqd, "workload slice:%d", slice);
2577         cfqd->workload_expires = jiffies + slice;
2578 }
2579
2580 static struct cfq_group *cfq_get_next_cfqg(struct cfq_data *cfqd)
2581 {
2582         struct cfq_rb_root *st = &cfqd->grp_service_tree;
2583         struct cfq_group *cfqg;
2584
2585         if (RB_EMPTY_ROOT(&st->rb))
2586                 return NULL;
2587         cfqg = cfq_rb_first_group(st);
2588         update_min_vdisktime(st);
2589         return cfqg;
2590 }
2591
2592 static void cfq_choose_cfqg(struct cfq_data *cfqd)
2593 {
2594         struct cfq_group *cfqg = cfq_get_next_cfqg(cfqd);
2595
2596         cfqd->serving_group = cfqg;
2597
2598         /* Restore the workload type data */
2599         if (cfqg->saved_workload_slice) {
2600                 cfqd->workload_expires = jiffies + cfqg->saved_workload_slice;
2601                 cfqd->serving_type = cfqg->saved_workload;
2602                 cfqd->serving_prio = cfqg->saved_serving_prio;
2603         } else
2604                 cfqd->workload_expires = jiffies - 1;
2605
2606         choose_service_tree(cfqd, cfqg);
2607 }
2608
2609 /*
2610  * Select a queue for service. If we have a current active queue,
2611  * check whether to continue servicing it, or retrieve and set a new one.
2612  */
2613 static struct cfq_queue *cfq_select_queue(struct cfq_data *cfqd)
2614 {
2615         struct cfq_queue *cfqq, *new_cfqq = NULL;
2616
2617         cfqq = cfqd->active_queue;
2618         if (!cfqq)
2619                 goto new_queue;
2620
2621         if (!cfqd->rq_queued)
2622                 return NULL;
2623
2624         /*
2625          * We were waiting for group to get backlogged. Expire the queue
2626          */
2627         if (cfq_cfqq_wait_busy(cfqq) && !RB_EMPTY_ROOT(&cfqq->sort_list))
2628                 goto expire;
2629
2630         /*
2631          * The active queue has run out of time, expire it and select new.
2632          */
2633         if (cfq_slice_used(cfqq) && !cfq_cfqq_must_dispatch(cfqq)) {
2634                 /*
2635                  * If slice had not expired at the completion of last request
2636                  * we might not have turned on wait_busy flag. Don't expire
2637                  * the queue yet. Allow the group to get backlogged.
2638                  *
2639                  * The very fact that we have used the slice, that means we
2640                  * have been idling all along on this queue and it should be
2641                  * ok to wait for this request to complete.
2642                  */
2643                 if (cfqq->cfqg->nr_cfqq == 1 && RB_EMPTY_ROOT(&cfqq->sort_list)
2644                     && cfqq->dispatched && cfq_should_idle(cfqd, cfqq)) {
2645                         cfqq = NULL;
2646                         goto keep_queue;
2647                 } else
2648                         goto check_group_idle;
2649         }
2650
2651         /*
2652          * The active queue has requests and isn't expired, allow it to
2653          * dispatch.
2654          */
2655         if (!RB_EMPTY_ROOT(&cfqq->sort_list))
2656                 goto keep_queue;
2657
2658         /*
2659          * If another queue has a request waiting within our mean seek
2660          * distance, let it run.  The expire code will check for close
2661          * cooperators and put the close queue at the front of the service
2662          * tree.  If possible, merge the expiring queue with the new cfqq.
2663          */
2664         new_cfqq = cfq_close_cooperator(cfqd, cfqq);
2665         if (new_cfqq) {
2666                 if (!cfqq->new_cfqq)
2667                         cfq_setup_merge(cfqq, new_cfqq);
2668                 goto expire;
2669         }
2670
2671         /*
2672          * No requests pending. If the active queue still has requests in
2673          * flight or is idling for a new request, allow either of these
2674          * conditions to happen (or time out) before selecting a new queue.
2675          */
2676         if (timer_pending(&cfqd->idle_slice_timer)) {
2677                 cfqq = NULL;
2678                 goto keep_queue;
2679         }
2680
2681         /*
2682          * This is a deep seek queue, but the device is much faster than
2683          * the queue can deliver, don't idle
2684          **/
2685         if (CFQQ_SEEKY(cfqq) && cfq_cfqq_idle_window(cfqq) &&
2686             (cfq_cfqq_slice_new(cfqq) ||
2687             (cfqq->slice_end - jiffies > jiffies - cfqq->slice_start))) {
2688                 cfq_clear_cfqq_deep(cfqq);
2689                 cfq_clear_cfqq_idle_window(cfqq);
2690         }
2691
2692         if (cfqq->dispatched && cfq_should_idle(cfqd, cfqq)) {
2693                 cfqq = NULL;
2694                 goto keep_queue;
2695         }
2696
2697         /*
2698          * If group idle is enabled and there are requests dispatched from
2699          * this group, wait for requests to complete.
2700          */
2701 check_group_idle:
2702         if (cfqd->cfq_group_idle && cfqq->cfqg->nr_cfqq == 1 &&
2703             cfqq->cfqg->dispatched &&
2704             !cfq_io_thinktime_big(cfqd, &cfqq->cfqg->ttime, true)) {
2705                 cfqq = NULL;
2706                 goto keep_queue;
2707         }
2708
2709 expire:
2710         cfq_slice_expired(cfqd, 0);
2711 new_queue:
2712         /*
2713          * Current queue expired. Check if we have to switch to a new
2714          * service tree
2715          */
2716         if (!new_cfqq)
2717                 cfq_choose_cfqg(cfqd);
2718
2719         cfqq = cfq_set_active_queue(cfqd, new_cfqq);
2720 keep_queue:
2721         return cfqq;
2722 }
2723
2724 static int __cfq_forced_dispatch_cfqq(struct cfq_queue *cfqq)
2725 {
2726         int dispatched = 0;
2727
2728         while (cfqq->next_rq) {
2729                 cfq_dispatch_insert(cfqq->cfqd->queue, cfqq->next_rq);
2730                 dispatched++;
2731         }
2732
2733         BUG_ON(!list_empty(&cfqq->fifo));
2734
2735         /* By default cfqq is not expired if it is empty. Do it explicitly */
2736         __cfq_slice_expired(cfqq->cfqd, cfqq, 0);
2737         return dispatched;
2738 }
2739
2740 /*
2741  * Drain our current requests. Used for barriers and when switching
2742  * io schedulers on-the-fly.
2743  */
2744 static int cfq_forced_dispatch(struct cfq_data *cfqd)
2745 {
2746         struct cfq_queue *cfqq;
2747         int dispatched = 0;
2748
2749         /* Expire the timeslice of the current active queue first */
2750         cfq_slice_expired(cfqd, 0);
2751         while ((cfqq = cfq_get_next_queue_forced(cfqd)) != NULL) {
2752                 __cfq_set_active_queue(cfqd, cfqq);
2753                 dispatched += __cfq_forced_dispatch_cfqq(cfqq);
2754         }
2755
2756         BUG_ON(cfqd->busy_queues);
2757
2758         cfq_log(cfqd, "forced_dispatch=%d", dispatched);
2759         return dispatched;
2760 }
2761
2762 static inline bool cfq_slice_used_soon(struct cfq_data *cfqd,
2763         struct cfq_queue *cfqq)
2764 {
2765         /* the queue hasn't finished any request, can't estimate */
2766         if (cfq_cfqq_slice_new(cfqq))
2767                 return true;
2768         if (time_after(jiffies + cfqd->cfq_slice_idle * cfqq->dispatched,
2769                 cfqq->slice_end))
2770                 return true;
2771
2772         return false;
2773 }
2774
2775 static bool cfq_may_dispatch(struct cfq_data *cfqd, struct cfq_queue *cfqq)
2776 {
2777         unsigned int max_dispatch;
2778
2779         /*
2780          * Drain async requests before we start sync IO
2781          */
2782         if (cfq_should_idle(cfqd, cfqq) && cfqd->rq_in_flight[BLK_RW_ASYNC])
2783                 return false;
2784
2785         /*
2786          * If this is an async queue and we have sync IO in flight, let it wait
2787          */
2788         if (cfqd->rq_in_flight[BLK_RW_SYNC] && !cfq_cfqq_sync(cfqq))
2789                 return false;
2790
2791         max_dispatch = max_t(unsigned int, cfqd->cfq_quantum / 2, 1);
2792         if (cfq_class_idle(cfqq))
2793                 max_dispatch = 1;
2794
2795         /*
2796          * Does this cfqq already have too much IO in flight?
2797          */
2798         if (cfqq->dispatched >= max_dispatch) {
2799                 bool promote_sync = false;
2800                 /*
2801                  * idle queue must always only have a single IO in flight
2802                  */
2803                 if (cfq_class_idle(cfqq))
2804                         return false;
2805
2806                 /*
2807                  * If there is only one sync queue
2808                  * we can ignore async queue here and give the sync
2809                  * queue no dispatch limit. The reason is a sync queue can
2810                  * preempt async queue, limiting the sync queue doesn't make
2811                  * sense. This is useful for aiostress test.
2812                  */
2813                 if (cfq_cfqq_sync(cfqq) && cfqd->busy_sync_queues == 1)
2814                         promote_sync = true;
2815
2816                 /*
2817                  * We have other queues, don't allow more IO from this one
2818                  */
2819                 if (cfqd->busy_queues > 1 && cfq_slice_used_soon(cfqd, cfqq) &&
2820                                 !promote_sync)
2821                         return false;
2822
2823                 /*
2824                  * Sole queue user, no limit
2825                  */
2826                 if (cfqd->busy_queues == 1 || promote_sync)
2827                         max_dispatch = -1;
2828                 else
2829                         /*
2830                          * Normally we start throttling cfqq when cfq_quantum/2
2831                          * requests have been dispatched. But we can drive
2832                          * deeper queue depths at the beginning of slice
2833                          * subjected to upper limit of cfq_quantum.
2834                          * */
2835                         max_dispatch = cfqd->cfq_quantum;
2836         }
2837
2838         /*
2839          * Async queues must wait a bit before being allowed dispatch.
2840          * We also ramp up the dispatch depth gradually for async IO,
2841          * based on the last sync IO we serviced
2842          */
2843         if (!cfq_cfqq_sync(cfqq) && cfqd->cfq_latency) {
2844                 unsigned long last_sync = jiffies - cfqd->last_delayed_sync;
2845                 unsigned int depth;
2846
2847                 depth = last_sync / cfqd->cfq_slice[1];
2848                 if (!depth && !cfqq->dispatched)
2849                         depth = 1;
2850                 if (depth < max_dispatch)
2851                         max_dispatch = depth;
2852         }
2853
2854         /*
2855          * If we're below the current max, allow a dispatch
2856          */
2857         return cfqq->dispatched < max_dispatch;
2858 }
2859
2860 /*
2861  * Dispatch a request from cfqq, moving them to the request queue
2862  * dispatch list.
2863  */
2864 static bool cfq_dispatch_request(struct cfq_data *cfqd, struct cfq_queue *cfqq)
2865 {
2866         struct request *rq;
2867
2868         BUG_ON(RB_EMPTY_ROOT(&cfqq->sort_list));
2869
2870         if (!cfq_may_dispatch(cfqd, cfqq))
2871                 return false;
2872
2873         /*
2874          * follow expired path, else get first next available
2875          */
2876         rq = cfq_check_fifo(cfqq);
2877         if (!rq)
2878                 rq = cfqq->next_rq;
2879
2880         /*
2881          * insert request into driver dispatch list
2882          */
2883         cfq_dispatch_insert(cfqd->queue, rq);
2884
2885         if (!cfqd->active_cic) {
2886                 struct cfq_io_cq *cic = RQ_CIC(rq);
2887
2888                 atomic_long_inc(&cic->icq.ioc->refcount);
2889                 cfqd->active_cic = cic;
2890         }
2891
2892         return true;
2893 }
2894
2895 /*
2896  * Find the cfqq that we need to service and move a request from that to the
2897  * dispatch list
2898  */
2899 static int cfq_dispatch_requests(struct request_queue *q, int force)
2900 {
2901         struct cfq_data *cfqd = q->elevator->elevator_data;
2902         struct cfq_queue *cfqq;
2903
2904         if (!cfqd->busy_queues)
2905                 return 0;
2906
2907         if (unlikely(force))
2908                 return cfq_forced_dispatch(cfqd);
2909
2910         cfqq = cfq_select_queue(cfqd);
2911         if (!cfqq)
2912                 return 0;
2913
2914         /*
2915          * Dispatch a request from this cfqq, if it is allowed
2916          */
2917         if (!cfq_dispatch_request(cfqd, cfqq))
2918                 return 0;
2919
2920         cfqq->slice_dispatch++;
2921         cfq_clear_cfqq_must_dispatch(cfqq);
2922
2923         /*
2924          * expire an async queue immediately if it has used up its slice. idle
2925          * queue always expire after 1 dispatch round.
2926          */
2927         if (cfqd->busy_queues > 1 && ((!cfq_cfqq_sync(cfqq) &&
2928             cfqq->slice_dispatch >= cfq_prio_to_maxrq(cfqd, cfqq)) ||
2929             cfq_class_idle(cfqq))) {
2930                 cfqq->slice_end = jiffies + 1;
2931                 cfq_slice_expired(cfqd, 0);
2932         }
2933
2934         cfq_log_cfqq(cfqd, cfqq, "dispatched a request");
2935         return 1;
2936 }
2937
2938 /*
2939  * task holds one reference to the queue, dropped when task exits. each rq
2940  * in-flight on this queue also holds a reference, dropped when rq is freed.
2941  *
2942  * Each cfq queue took a reference on the parent group. Drop it now.
2943  * queue lock must be held here.
2944  */
2945 static void cfq_put_queue(struct cfq_queue *cfqq)
2946 {
2947         struct cfq_data *cfqd = cfqq->cfqd;
2948         struct cfq_group *cfqg;
2949
2950         BUG_ON(cfqq->ref <= 0);
2951
2952         cfqq->ref--;
2953         if (cfqq->ref)
2954                 return;
2955
2956         cfq_log_cfqq(cfqd, cfqq, "put_queue");
2957         BUG_ON(rb_first(&cfqq->sort_list));
2958         BUG_ON(cfqq->allocated[READ] + cfqq->allocated[WRITE]);
2959         cfqg = cfqq->cfqg;
2960
2961         if (unlikely(cfqd->active_queue == cfqq)) {
2962                 __cfq_slice_expired(cfqd, cfqq, 0);
2963                 cfq_schedule_dispatch(cfqd);
2964         }
2965
2966         BUG_ON(cfq_cfqq_on_rr(cfqq));
2967         kmem_cache_free(cfq_pool, cfqq);
2968         cfqg_put(cfqg);
2969 }
2970
2971 static void cfq_put_cooperator(struct cfq_queue *cfqq)
2972 {
2973         struct cfq_queue *__cfqq, *next;
2974
2975         /*
2976          * If this queue was scheduled to merge with another queue, be
2977          * sure to drop the reference taken on that queue (and others in
2978          * the merge chain).  See cfq_setup_merge and cfq_merge_cfqqs.
2979          */
2980         __cfqq = cfqq->new_cfqq;
2981         while (__cfqq) {
2982                 if (__cfqq == cfqq) {
2983                         WARN(1, "cfqq->new_cfqq loop detected\n");
2984                         break;
2985                 }
2986                 next = __cfqq->new_cfqq;
2987                 cfq_put_queue(__cfqq);
2988                 __cfqq = next;
2989         }
2990 }
2991
2992 static void cfq_exit_cfqq(struct cfq_data *cfqd, struct cfq_queue *cfqq)
2993 {
2994         if (unlikely(cfqq == cfqd->active_queue)) {
2995                 __cfq_slice_expired(cfqd, cfqq, 0);
2996                 cfq_schedule_dispatch(cfqd);
2997         }
2998
2999         cfq_put_cooperator(cfqq);
3000
3001         cfq_put_queue(cfqq);
3002 }
3003
3004 static void cfq_init_icq(struct io_cq *icq)
3005 {
3006         struct cfq_io_cq *cic = icq_to_cic(icq);
3007
3008         cic->ttime.last_end_request = jiffies;
3009 }
3010
3011 static void cfq_exit_icq(struct io_cq *icq)
3012 {
3013         struct cfq_io_cq *cic = icq_to_cic(icq);
3014         struct cfq_data *cfqd = cic_to_cfqd(cic);
3015
3016         if (cic->cfqq[BLK_RW_ASYNC]) {
3017                 cfq_exit_cfqq(cfqd, cic->cfqq[BLK_RW_ASYNC]);
3018                 cic->cfqq[BLK_RW_ASYNC] = NULL;
3019         }
3020
3021         if (cic->cfqq[BLK_RW_SYNC]) {
3022                 cfq_exit_cfqq(cfqd, cic->cfqq[BLK_RW_SYNC]);
3023                 cic->cfqq[BLK_RW_SYNC] = NULL;
3024         }
3025 }
3026
3027 static void cfq_init_prio_data(struct cfq_queue *cfqq, struct cfq_io_cq *cic)
3028 {
3029         struct task_struct *tsk = current;
3030         int ioprio_class;
3031
3032         if (!cfq_cfqq_prio_changed(cfqq))
3033                 return;
3034
3035         ioprio_class = IOPRIO_PRIO_CLASS(cic->ioprio);
3036         switch (ioprio_class) {
3037         default:
3038                 printk(KERN_ERR "cfq: bad prio %x\n", ioprio_class);
3039         case IOPRIO_CLASS_NONE:
3040                 /*
3041                  * no prio set, inherit CPU scheduling settings
3042                  */
3043                 cfqq->ioprio = task_nice_ioprio(tsk);
3044                 cfqq->ioprio_class = task_nice_ioclass(tsk);
3045                 break;
3046         case IOPRIO_CLASS_RT:
3047                 cfqq->ioprio = IOPRIO_PRIO_DATA(cic->ioprio);
3048                 cfqq->ioprio_class = IOPRIO_CLASS_RT;
3049                 break;
3050         case IOPRIO_CLASS_BE:
3051                 cfqq->ioprio = IOPRIO_PRIO_DATA(cic->ioprio);
3052                 cfqq->ioprio_class = IOPRIO_CLASS_BE;
3053                 break;
3054         case IOPRIO_CLASS_IDLE:
3055                 cfqq->ioprio_class = IOPRIO_CLASS_IDLE;
3056                 cfqq->ioprio = 7;
3057                 cfq_clear_cfqq_idle_window(cfqq);
3058                 break;
3059         }
3060
3061         /*
3062          * keep track of original prio settings in case we have to temporarily
3063          * elevate the priority of this queue
3064          */
3065         cfqq->org_ioprio = cfqq->ioprio;
3066         cfq_clear_cfqq_prio_changed(cfqq);
3067 }
3068
3069 static void check_ioprio_changed(struct cfq_io_cq *cic, struct bio *bio)
3070 {
3071         int ioprio = cic->icq.ioc->ioprio;
3072         struct cfq_data *cfqd = cic_to_cfqd(cic);
3073         struct cfq_queue *cfqq;
3074
3075         /*
3076          * Check whether ioprio has changed.  The condition may trigger
3077          * spuriously on a newly created cic but there's no harm.
3078          */
3079         if (unlikely(!cfqd) || likely(cic->ioprio == ioprio))
3080                 return;
3081
3082         cfqq = cic->cfqq[BLK_RW_ASYNC];
3083         if (cfqq) {
3084                 struct cfq_queue *new_cfqq;
3085                 new_cfqq = cfq_get_queue(cfqd, BLK_RW_ASYNC, cic, bio,
3086                                          GFP_ATOMIC);
3087                 if (new_cfqq) {
3088                         cic->cfqq[BLK_RW_ASYNC] = new_cfqq;
3089                         cfq_put_queue(cfqq);
3090                 }
3091         }
3092
3093         cfqq = cic->cfqq[BLK_RW_SYNC];
3094         if (cfqq)
3095                 cfq_mark_cfqq_prio_changed(cfqq);
3096
3097         cic->ioprio = ioprio;
3098 }
3099
3100 static void cfq_init_cfqq(struct cfq_data *cfqd, struct cfq_queue *cfqq,
3101                           pid_t pid, bool is_sync)
3102 {
3103         RB_CLEAR_NODE(&cfqq->rb_node);
3104         RB_CLEAR_NODE(&cfqq->p_node);
3105         INIT_LIST_HEAD(&cfqq->fifo);
3106
3107         cfqq->ref = 0;
3108         cfqq->cfqd = cfqd;
3109
3110         cfq_mark_cfqq_prio_changed(cfqq);
3111
3112         if (is_sync) {
3113                 if (!cfq_class_idle(cfqq))
3114                         cfq_mark_cfqq_idle_window(cfqq);
3115                 cfq_mark_cfqq_sync(cfqq);
3116         }
3117         cfqq->pid = pid;
3118 }
3119
3120 #ifdef CONFIG_CFQ_GROUP_IOSCHED
3121 static void check_blkcg_changed(struct cfq_io_cq *cic, struct bio *bio)
3122 {
3123         struct cfq_data *cfqd = cic_to_cfqd(cic);
3124         struct cfq_queue *sync_cfqq;
3125         uint64_t id;
3126
3127         rcu_read_lock();
3128         id = bio_blkio_cgroup(bio)->id;
3129         rcu_read_unlock();
3130
3131         /*
3132          * Check whether blkcg has changed.  The condition may trigger
3133          * spuriously on a newly created cic but there's no harm.
3134          */
3135         if (unlikely(!cfqd) || likely(cic->blkcg_id == id))
3136                 return;
3137
3138         sync_cfqq = cic_to_cfqq(cic, 1);
3139         if (sync_cfqq) {
3140                 /*
3141                  * Drop reference to sync queue. A new sync queue will be
3142                  * assigned in new group upon arrival of a fresh request.
3143                  */
3144                 cfq_log_cfqq(cfqd, sync_cfqq, "changed cgroup");
3145                 cic_set_cfqq(cic, NULL, 1);
3146                 cfq_put_queue(sync_cfqq);
3147         }
3148
3149         cic->blkcg_id = id;
3150 }
3151 #else
3152 static inline void check_blkcg_changed(struct cfq_io_cq *cic, struct bio *bio) { }
3153 #endif  /* CONFIG_CFQ_GROUP_IOSCHED */
3154
3155 static struct cfq_queue *
3156 cfq_find_alloc_queue(struct cfq_data *cfqd, bool is_sync, struct cfq_io_cq *cic,
3157                      struct bio *bio, gfp_t gfp_mask)
3158 {
3159         struct blkio_cgroup *blkcg;
3160         struct cfq_queue *cfqq, *new_cfqq = NULL;
3161         struct cfq_group *cfqg;
3162
3163 retry:
3164         rcu_read_lock();
3165
3166         blkcg = bio_blkio_cgroup(bio);
3167         cfqg = cfq_lookup_create_cfqg(cfqd, blkcg);
3168         cfqq = cic_to_cfqq(cic, is_sync);
3169
3170         /*
3171          * Always try a new alloc if we fell back to the OOM cfqq
3172          * originally, since it should just be a temporary situation.
3173          */
3174         if (!cfqq || cfqq == &cfqd->oom_cfqq) {
3175                 cfqq = NULL;
3176                 if (new_cfqq) {
3177                         cfqq = new_cfqq;
3178                         new_cfqq = NULL;
3179                 } else if (gfp_mask & __GFP_WAIT) {
3180                         rcu_read_unlock();
3181                         spin_unlock_irq(cfqd->queue->queue_lock);
3182                         new_cfqq = kmem_cache_alloc_node(cfq_pool,
3183                                         gfp_mask | __GFP_ZERO,
3184                                         cfqd->queue->node);
3185                         spin_lock_irq(cfqd->queue->queue_lock);
3186                         if (new_cfqq)
3187                                 goto retry;
3188                 } else {
3189                         cfqq = kmem_cache_alloc_node(cfq_pool,
3190                                         gfp_mask | __GFP_ZERO,
3191                                         cfqd->queue->node);
3192                 }
3193
3194                 if (cfqq) {
3195                         cfq_init_cfqq(cfqd, cfqq, current->pid, is_sync);
3196                         cfq_init_prio_data(cfqq, cic);
3197                         cfq_link_cfqq_cfqg(cfqq, cfqg);
3198                         cfq_log_cfqq(cfqd, cfqq, "alloced");
3199                 } else
3200                         cfqq = &cfqd->oom_cfqq;
3201         }
3202
3203         if (new_cfqq)
3204                 kmem_cache_free(cfq_pool, new_cfqq);
3205
3206         rcu_read_unlock();
3207         return cfqq;
3208 }
3209
3210 static struct cfq_queue **
3211 cfq_async_queue_prio(struct cfq_data *cfqd, int ioprio_class, int ioprio)
3212 {
3213         switch (ioprio_class) {
3214         case IOPRIO_CLASS_RT:
3215                 return &cfqd->async_cfqq[0][ioprio];
3216         case IOPRIO_CLASS_NONE:
3217                 ioprio = IOPRIO_NORM;
3218                 /* fall through */
3219         case IOPRIO_CLASS_BE:
3220                 return &cfqd->async_cfqq[1][ioprio];
3221         case IOPRIO_CLASS_IDLE:
3222                 return &cfqd->async_idle_cfqq;
3223         default:
3224                 BUG();
3225         }
3226 }
3227
3228 static struct cfq_queue *
3229 cfq_get_queue(struct cfq_data *cfqd, bool is_sync, struct cfq_io_cq *cic,
3230               struct bio *bio, gfp_t gfp_mask)
3231 {
3232         const int ioprio_class = IOPRIO_PRIO_CLASS(cic->ioprio);
3233         const int ioprio = IOPRIO_PRIO_DATA(cic->ioprio);
3234         struct cfq_queue **async_cfqq = NULL;
3235         struct cfq_queue *cfqq = NULL;
3236
3237         if (!is_sync) {
3238                 async_cfqq = cfq_async_queue_prio(cfqd, ioprio_class, ioprio);
3239                 cfqq = *async_cfqq;
3240         }
3241
3242         if (!cfqq)
3243                 cfqq = cfq_find_alloc_queue(cfqd, is_sync, cic, bio, gfp_mask);
3244
3245         /*
3246          * pin the queue now that it's allocated, scheduler exit will prune it
3247          */
3248         if (!is_sync && !(*async_cfqq)) {
3249                 cfqq->ref++;
3250                 *async_cfqq = cfqq;
3251         }
3252
3253         cfqq->ref++;
3254         return cfqq;
3255 }
3256
3257 static void
3258 __cfq_update_io_thinktime(struct cfq_ttime *ttime, unsigned long slice_idle)
3259 {
3260         unsigned long elapsed = jiffies - ttime->last_end_request;
3261         elapsed = min(elapsed, 2UL * slice_idle);
3262
3263         ttime->ttime_samples = (7*ttime->ttime_samples + 256) / 8;
3264         ttime->ttime_total = (7*ttime->ttime_total + 256*elapsed) / 8;
3265         ttime->ttime_mean = (ttime->ttime_total + 128) / ttime->ttime_samples;
3266 }
3267
3268 static void
3269 cfq_update_io_thinktime(struct cfq_data *cfqd, struct cfq_queue *cfqq,
3270                         struct cfq_io_cq *cic)
3271 {
3272         if (cfq_cfqq_sync(cfqq)) {
3273                 __cfq_update_io_thinktime(&cic->ttime, cfqd->cfq_slice_idle);
3274                 __cfq_update_io_thinktime(&cfqq->service_tree->ttime,
3275                         cfqd->cfq_slice_idle);
3276         }
3277 #ifdef CONFIG_CFQ_GROUP_IOSCHED
3278         __cfq_update_io_thinktime(&cfqq->cfqg->ttime, cfqd->cfq_group_idle);
3279 #endif
3280 }
3281
3282 static void
3283 cfq_update_io_seektime(struct cfq_data *cfqd, struct cfq_queue *cfqq,
3284                        struct request *rq)
3285 {
3286         sector_t sdist = 0;
3287         sector_t n_sec = blk_rq_sectors(rq);
3288         if (cfqq->last_request_pos) {
3289                 if (cfqq->last_request_pos < blk_rq_pos(rq))
3290                         sdist = blk_rq_pos(rq) - cfqq->last_request_pos;
3291                 else
3292                         sdist = cfqq->last_request_pos - blk_rq_pos(rq);
3293         }
3294
3295         cfqq->seek_history <<= 1;
3296         if (blk_queue_nonrot(cfqd->queue))
3297                 cfqq->seek_history |= (n_sec < CFQQ_SECT_THR_NONROT);
3298         else
3299                 cfqq->seek_history |= (sdist > CFQQ_SEEK_THR);
3300 }
3301
3302 /*
3303  * Disable idle window if the process thinks too long or seeks so much that
3304  * it doesn't matter
3305  */
3306 static void
3307 cfq_update_idle_window(struct cfq_data *cfqd, struct cfq_queue *cfqq,
3308                        struct cfq_io_cq *cic)
3309 {
3310         int old_idle, enable_idle;
3311
3312         /*
3313          * Don't idle for async or idle io prio class
3314          */
3315         if (!cfq_cfqq_sync(cfqq) || cfq_class_idle(cfqq))
3316                 return;
3317
3318         enable_idle = old_idle = cfq_cfqq_idle_window(cfqq);
3319
3320         if (cfqq->queued[0] + cfqq->queued[1] >= 4)
3321                 cfq_mark_cfqq_deep(cfqq);
3322
3323         if (cfqq->next_rq && (cfqq->next_rq->cmd_flags & REQ_NOIDLE))
3324                 enable_idle = 0;
3325         else if (!atomic_read(&cic->icq.ioc->active_ref) ||
3326                  !cfqd->cfq_slice_idle ||
3327                  (!cfq_cfqq_deep(cfqq) && CFQQ_SEEKY(cfqq)))
3328                 enable_idle = 0;
3329         else if (sample_valid(cic->ttime.ttime_samples)) {
3330                 if (cic->ttime.ttime_mean > cfqd->cfq_slice_idle)
3331                         enable_idle = 0;
3332                 else
3333                         enable_idle = 1;
3334         }
3335
3336         if (old_idle != enable_idle) {
3337                 cfq_log_cfqq(cfqd, cfqq, "idle=%d", enable_idle);
3338                 if (enable_idle)
3339                         cfq_mark_cfqq_idle_window(cfqq);
3340                 else
3341                         cfq_clear_cfqq_idle_window(cfqq);
3342         }
3343 }
3344
3345 /*
3346  * Check if new_cfqq should preempt the currently active queue. Return 0 for
3347  * no or if we aren't sure, a 1 will cause a preempt.
3348  */
3349 static bool
3350 cfq_should_preempt(struct cfq_data *cfqd, struct cfq_queue *new_cfqq,
3351                    struct request *rq)
3352 {
3353         struct cfq_queue *cfqq;
3354
3355         cfqq = cfqd->active_queue;
3356         if (!cfqq)
3357                 return false;
3358
3359         if (cfq_class_idle(new_cfqq))
3360                 return false;
3361
3362         if (cfq_class_idle(cfqq))
3363                 return true;
3364
3365         /*
3366          * Don't allow a non-RT request to preempt an ongoing RT cfqq timeslice.
3367          */
3368         if (cfq_class_rt(cfqq) && !cfq_class_rt(new_cfqq))
3369                 return false;
3370
3371         /*
3372          * if the new request is sync, but the currently running queue is
3373          * not, let the sync request have priority.
3374          */
3375         if (rq_is_sync(rq) && !cfq_cfqq_sync(cfqq))
3376                 return true;
3377
3378         if (new_cfqq->cfqg != cfqq->cfqg)
3379                 return false;
3380
3381         if (cfq_slice_used(cfqq))
3382                 return true;
3383
3384         /* Allow preemption only if we are idling on sync-noidle tree */
3385         if (cfqd->serving_type == SYNC_NOIDLE_WORKLOAD &&
3386             cfqq_type(new_cfqq) == SYNC_NOIDLE_WORKLOAD &&
3387             new_cfqq->service_tree->count == 2 &&
3388             RB_EMPTY_ROOT(&cfqq->sort_list))
3389                 return true;
3390
3391         /*
3392          * So both queues are sync. Let the new request get disk time if
3393          * it's a metadata request and the current queue is doing regular IO.
3394          */
3395         if ((rq->cmd_flags & REQ_PRIO) && !cfqq->prio_pending)
3396                 return true;
3397
3398         /*
3399          * Allow an RT request to pre-empt an ongoing non-RT cfqq timeslice.
3400          */
3401         if (cfq_class_rt(new_cfqq) && !cfq_class_rt(cfqq))
3402                 return true;
3403
3404         /* An idle queue should not be idle now for some reason */
3405         if (RB_EMPTY_ROOT(&cfqq->sort_list) && !cfq_should_idle(cfqd, cfqq))
3406                 return true;
3407
3408         if (!cfqd->active_cic || !cfq_cfqq_wait_request(cfqq))
3409                 return false;
3410
3411         /*
3412          * if this request is as-good as one we would expect from the
3413          * current cfqq, let it preempt
3414          */
3415         if (cfq_rq_close(cfqd, cfqq, rq))
3416                 return true;
3417
3418         return false;
3419 }
3420
3421 /*
3422  * cfqq preempts the active queue. if we allowed preempt with no slice left,
3423  * let it have half of its nominal slice.
3424  */
3425 static void cfq_preempt_queue(struct cfq_data *cfqd, struct cfq_queue *cfqq)
3426 {
3427         enum wl_type_t old_type = cfqq_type(cfqd->active_queue);
3428
3429         cfq_log_cfqq(cfqd, cfqq, "preempt");
3430         cfq_slice_expired(cfqd, 1);
3431
3432         /*
3433          * workload type is changed, don't save slice, otherwise preempt
3434          * doesn't happen
3435          */
3436         if (old_type != cfqq_type(cfqq))
3437                 cfqq->cfqg->saved_workload_slice = 0;
3438
3439         /*
3440          * Put the new queue at the front of the of the current list,
3441          * so we know that it will be selected next.
3442          */
3443         BUG_ON(!cfq_cfqq_on_rr(cfqq));
3444
3445         cfq_service_tree_add(cfqd, cfqq, 1);
3446
3447         cfqq->slice_end = 0;
3448         cfq_mark_cfqq_slice_new(cfqq);
3449 }
3450
3451 /*
3452  * Called when a new fs request (rq) is added (to cfqq). Check if there's
3453  * something we should do about it
3454  */
3455 static void
3456 cfq_rq_enqueued(struct cfq_data *cfqd, struct cfq_queue *cfqq,
3457                 struct request *rq)
3458 {
3459         struct cfq_io_cq *cic = RQ_CIC(rq);
3460
3461         cfqd->rq_queued++;
3462         if (rq->cmd_flags & REQ_PRIO)
3463                 cfqq->prio_pending++;
3464
3465         cfq_update_io_thinktime(cfqd, cfqq, cic);
3466         cfq_update_io_seektime(cfqd, cfqq, rq);
3467         cfq_update_idle_window(cfqd, cfqq, cic);
3468
3469         cfqq->last_request_pos = blk_rq_pos(rq) + blk_rq_sectors(rq);
3470
3471         if (cfqq == cfqd->active_queue) {
3472                 /*
3473                  * Remember that we saw a request from this process, but
3474                  * don't start queuing just yet. Otherwise we risk seeing lots
3475                  * of tiny requests, because we disrupt the normal plugging
3476                  * and merging. If the request is already larger than a single
3477                  * page, let it rip immediately. For that case we assume that
3478                  * merging is already done. Ditto for a busy system that
3479                  * has other work pending, don't risk delaying until the
3480                  * idle timer unplug to continue working.
3481                  */
3482                 if (cfq_cfqq_wait_request(cfqq)) {
3483                         if (blk_rq_bytes(rq) > PAGE_CACHE_SIZE ||
3484                             cfqd->busy_queues > 1) {
3485                                 cfq_del_timer(cfqd, cfqq);
3486                                 cfq_clear_cfqq_wait_request(cfqq);
3487                                 __blk_run_queue(cfqd->queue);
3488                         } else {
3489                                 cfqg_stats_update_idle_time(cfqq->cfqg);
3490                                 cfq_mark_cfqq_must_dispatch(cfqq);
3491                         }
3492                 }
3493         } else if (cfq_should_preempt(cfqd, cfqq, rq)) {
3494                 /*
3495                  * not the active queue - expire current slice if it is
3496                  * idle and has expired it's mean thinktime or this new queue
3497                  * has some old slice time left and is of higher priority or
3498                  * this new queue is RT and the current one is BE
3499                  */
3500                 cfq_preempt_queue(cfqd, cfqq);
3501                 __blk_run_queue(cfqd->queue);
3502         }
3503 }
3504
3505 static void cfq_insert_request(struct request_queue *q, struct request *rq)
3506 {
3507         struct cfq_data *cfqd = q->elevator->elevator_data;
3508         struct cfq_queue *cfqq = RQ_CFQQ(rq);
3509
3510         cfq_log_cfqq(cfqd, cfqq, "insert_request");
3511         cfq_init_prio_data(cfqq, RQ_CIC(rq));
3512
3513         rq_set_fifo_time(rq, jiffies + cfqd->cfq_fifo_expire[rq_is_sync(rq)]);
3514         list_add_tail(&rq->queuelist, &cfqq->fifo);
3515         cfq_add_rq_rb(rq);
3516         cfqg_stats_update_io_add(RQ_CFQG(rq), cfqd->serving_group,
3517                                  rq->cmd_flags);
3518         cfq_rq_enqueued(cfqd, cfqq, rq);
3519 }
3520
3521 /*
3522  * Update hw_tag based on peak queue depth over 50 samples under
3523  * sufficient load.
3524  */
3525 static void cfq_update_hw_tag(struct cfq_data *cfqd)
3526 {
3527         struct cfq_queue *cfqq = cfqd->active_queue;
3528
3529         if (cfqd->rq_in_driver > cfqd->hw_tag_est_depth)
3530                 cfqd->hw_tag_est_depth = cfqd->rq_in_driver;
3531
3532         if (cfqd->hw_tag == 1)
3533                 return;
3534
3535         if (cfqd->rq_queued <= CFQ_HW_QUEUE_MIN &&
3536             cfqd->rq_in_driver <= CFQ_HW_QUEUE_MIN)
3537                 return;
3538
3539         /*
3540          * If active queue hasn't enough requests and can idle, cfq might not
3541          * dispatch sufficient requests to hardware. Don't zero hw_tag in this
3542          * case
3543          */
3544         if (cfqq && cfq_cfqq_idle_window(cfqq) &&
3545             cfqq->dispatched + cfqq->queued[0] + cfqq->queued[1] <
3546             CFQ_HW_QUEUE_MIN && cfqd->rq_in_driver < CFQ_HW_QUEUE_MIN)
3547                 return;
3548
3549         if (cfqd->hw_tag_samples++ < 50)
3550                 return;
3551
3552         if (cfqd->hw_tag_est_depth >= CFQ_HW_QUEUE_MIN)
3553                 cfqd->hw_tag = 1;
3554         else
3555                 cfqd->hw_tag = 0;
3556 }
3557
3558 static bool cfq_should_wait_busy(struct cfq_data *cfqd, struct cfq_queue *cfqq)
3559 {
3560         struct cfq_io_cq *cic = cfqd->active_cic;
3561
3562         /* If the queue already has requests, don't wait */
3563         if (!RB_EMPTY_ROOT(&cfqq->sort_list))
3564                 return false;
3565
3566         /* If there are other queues in the group, don't wait */
3567         if (cfqq->cfqg->nr_cfqq > 1)
3568                 return false;
3569
3570         /* the only queue in the group, but think time is big */
3571         if (cfq_io_thinktime_big(cfqd, &cfqq->cfqg->ttime, true))
3572                 return false;
3573
3574         if (cfq_slice_used(cfqq))
3575                 return true;
3576
3577         /* if slice left is less than think time, wait busy */
3578         if (cic && sample_valid(cic->ttime.ttime_samples)
3579             && (cfqq->slice_end - jiffies < cic->ttime.ttime_mean))
3580                 return true;
3581
3582         /*
3583          * If think times is less than a jiffy than ttime_mean=0 and above
3584          * will not be true. It might happen that slice has not expired yet
3585          * but will expire soon (4-5 ns) during select_queue(). To cover the
3586          * case where think time is less than a jiffy, mark the queue wait
3587          * busy if only 1 jiffy is left in the slice.
3588          */
3589         if (cfqq->slice_end - jiffies == 1)
3590                 return true;
3591
3592         return false;
3593 }
3594
3595 static void cfq_completed_request(struct request_queue *q, struct request *rq)
3596 {
3597         struct cfq_queue *cfqq = RQ_CFQQ(rq);
3598         struct cfq_data *cfqd = cfqq->cfqd;
3599         const int sync = rq_is_sync(rq);
3600         unsigned long now;
3601
3602         now = jiffies;
3603         cfq_log_cfqq(cfqd, cfqq, "complete rqnoidle %d",
3604                      !!(rq->cmd_flags & REQ_NOIDLE));
3605
3606         cfq_update_hw_tag(cfqd);
3607
3608         WARN_ON(!cfqd->rq_in_driver);
3609         WARN_ON(!cfqq->dispatched);
3610         cfqd->rq_in_driver--;
3611         cfqq->dispatched--;
3612         (RQ_CFQG(rq))->dispatched--;
3613         cfqg_stats_update_completion(cfqq->cfqg, rq_start_time_ns(rq),
3614                                      rq_io_start_time_ns(rq), rq->cmd_flags);
3615
3616         cfqd->rq_in_flight[cfq_cfqq_sync(cfqq)]--;
3617
3618         if (sync) {
3619                 struct cfq_rb_root *service_tree;
3620
3621                 RQ_CIC(rq)->ttime.last_end_request = now;
3622
3623                 if (cfq_cfqq_on_rr(cfqq))
3624                         service_tree = cfqq->service_tree;
3625                 else
3626                         service_tree = service_tree_for(cfqq->cfqg,
3627                                 cfqq_prio(cfqq), cfqq_type(cfqq));
3628                 service_tree->ttime.last_end_request = now;
3629                 if (!time_after(rq->start_time + cfqd->cfq_fifo_expire[1], now))
3630                         cfqd->last_delayed_sync = now;
3631         }
3632
3633 #ifdef CONFIG_CFQ_GROUP_IOSCHED
3634         cfqq->cfqg->ttime.last_end_request = now;
3635 #endif
3636
3637         /*
3638          * If this is the active queue, check if it needs to be expired,
3639          * or if we want to idle in case it has no pending requests.
3640          */
3641         if (cfqd->active_queue == cfqq) {
3642                 const bool cfqq_empty = RB_EMPTY_ROOT(&cfqq->sort_list);
3643
3644                 if (cfq_cfqq_slice_new(cfqq)) {
3645                         cfq_set_prio_slice(cfqd, cfqq);
3646                         cfq_clear_cfqq_slice_new(cfqq);
3647                 }
3648
3649                 /*
3650                  * Should we wait for next request to come in before we expire
3651                  * the queue.
3652                  */
3653                 if (cfq_should_wait_busy(cfqd, cfqq)) {
3654                         unsigned long extend_sl = cfqd->cfq_slice_idle;
3655                         if (!cfqd->cfq_slice_idle)
3656                                 extend_sl = cfqd->cfq_group_idle;
3657                         cfqq->slice_end = jiffies + extend_sl;
3658                         cfq_mark_cfqq_wait_busy(cfqq);
3659                         cfq_log_cfqq(cfqd, cfqq, "will busy wait");
3660                 }
3661
3662                 /*
3663                  * Idling is not enabled on:
3664                  * - expired queues
3665                  * - idle-priority queues
3666                  * - async queues
3667                  * - queues with still some requests queued
3668                  * - when there is a close cooperator
3669                  */
3670                 if (cfq_slice_used(cfqq) || cfq_class_idle(cfqq))
3671                         cfq_slice_expired(cfqd, 1);
3672                 else if (sync && cfqq_empty &&
3673                          !cfq_close_cooperator(cfqd, cfqq)) {
3674                         cfq_arm_slice_timer(cfqd);
3675                 }
3676         }
3677
3678         if (!cfqd->rq_in_driver)
3679                 cfq_schedule_dispatch(cfqd);
3680 }
3681
3682 static inline int __cfq_may_queue(struct cfq_queue *cfqq)
3683 {
3684         if (cfq_cfqq_wait_request(cfqq) && !cfq_cfqq_must_alloc_slice(cfqq)) {
3685                 cfq_mark_cfqq_must_alloc_slice(cfqq);
3686                 return ELV_MQUEUE_MUST;
3687         }
3688
3689         return ELV_MQUEUE_MAY;
3690 }
3691
3692 static int cfq_may_queue(struct request_queue *q, int rw)
3693 {
3694         struct cfq_data *cfqd = q->elevator->elevator_data;
3695         struct task_struct *tsk = current;
3696         struct cfq_io_cq *cic;
3697         struct cfq_queue *cfqq;
3698
3699         /*
3700          * don't force setup of a queue from here, as a call to may_queue
3701          * does not necessarily imply that a request actually will be queued.
3702          * so just lookup a possibly existing queue, or return 'may queue'
3703          * if that fails
3704          */
3705         cic = cfq_cic_lookup(cfqd, tsk->io_context);
3706         if (!cic)
3707                 return ELV_MQUEUE_MAY;
3708
3709         cfqq = cic_to_cfqq(cic, rw_is_sync(rw));
3710         if (cfqq) {
3711                 cfq_init_prio_data(cfqq, cic);
3712
3713                 return __cfq_may_queue(cfqq);
3714         }
3715
3716         return ELV_MQUEUE_MAY;
3717 }
3718
3719 /*
3720  * queue lock held here
3721  */
3722 static void cfq_put_request(struct request *rq)
3723 {
3724         struct cfq_queue *cfqq = RQ_CFQQ(rq);
3725
3726         if (cfqq) {
3727                 const int rw = rq_data_dir(rq);
3728
3729                 BUG_ON(!cfqq->allocated[rw]);
3730                 cfqq->allocated[rw]--;
3731
3732                 /* Put down rq reference on cfqg */
3733                 cfqg_put(RQ_CFQG(rq));
3734                 rq->elv.priv[0] = NULL;
3735                 rq->elv.priv[1] = NULL;
3736
3737                 cfq_put_queue(cfqq);
3738         }
3739 }
3740
3741 static struct cfq_queue *
3742 cfq_merge_cfqqs(struct cfq_data *cfqd, struct cfq_io_cq *cic,
3743                 struct cfq_queue *cfqq)
3744 {
3745         cfq_log_cfqq(cfqd, cfqq, "merging with queue %p", cfqq->new_cfqq);
3746         cic_set_cfqq(cic, cfqq->new_cfqq, 1);
3747         cfq_mark_cfqq_coop(cfqq->new_cfqq);
3748         cfq_put_queue(cfqq);
3749         return cic_to_cfqq(cic, 1);
3750 }
3751
3752 /*
3753  * Returns NULL if a new cfqq should be allocated, or the old cfqq if this
3754  * was the last process referring to said cfqq.
3755  */
3756 static struct cfq_queue *
3757 split_cfqq(struct cfq_io_cq *cic, struct cfq_queue *cfqq)
3758 {
3759         if (cfqq_process_refs(cfqq) == 1) {
3760                 cfqq->pid = current->pid;
3761                 cfq_clear_cfqq_coop(cfqq);
3762                 cfq_clear_cfqq_split_coop(cfqq);
3763                 return cfqq;
3764         }
3765
3766         cic_set_cfqq(cic, NULL, 1);
3767
3768         cfq_put_cooperator(cfqq);
3769
3770         cfq_put_queue(cfqq);
3771         return NULL;
3772 }
3773 /*
3774  * Allocate cfq data structures associated with this request.
3775  */
3776 static int
3777 cfq_set_request(struct request_queue *q, struct request *rq, struct bio *bio,
3778                 gfp_t gfp_mask)
3779 {
3780         struct cfq_data *cfqd = q->elevator->elevator_data;
3781         struct cfq_io_cq *cic = icq_to_cic(rq->elv.icq);
3782         const int rw = rq_data_dir(rq);
3783         const bool is_sync = rq_is_sync(rq);
3784         struct cfq_queue *cfqq;
3785
3786         might_sleep_if(gfp_mask & __GFP_WAIT);
3787
3788         spin_lock_irq(q->queue_lock);
3789
3790         check_ioprio_changed(cic, bio);
3791         check_blkcg_changed(cic, bio);
3792 new_queue:
3793         cfqq = cic_to_cfqq(cic, is_sync);
3794         if (!cfqq || cfqq == &cfqd->oom_cfqq) {
3795                 cfqq = cfq_get_queue(cfqd, is_sync, cic, bio, gfp_mask);
3796                 cic_set_cfqq(cic, cfqq, is_sync);
3797         } else {
3798                 /*
3799                  * If the queue was seeky for too long, break it apart.
3800                  */
3801                 if (cfq_cfqq_coop(cfqq) && cfq_cfqq_split_coop(cfqq)) {
3802                         cfq_log_cfqq(cfqd, cfqq, "breaking apart cfqq");
3803                         cfqq = split_cfqq(cic, cfqq);
3804                         if (!cfqq)
3805                                 goto new_queue;
3806                 }
3807
3808                 /*
3809                  * Check to see if this queue is scheduled to merge with
3810                  * another, closely cooperating queue.  The merging of
3811                  * queues happens here as it must be done in process context.
3812                  * The reference on new_cfqq was taken in merge_cfqqs.
3813                  */
3814                 if (cfqq->new_cfqq)
3815                         cfqq = cfq_merge_cfqqs(cfqd, cic, cfqq);
3816         }
3817
3818         cfqq->allocated[rw]++;
3819
3820         cfqq->ref++;
3821         cfqg_get(cfqq->cfqg);
3822         rq->elv.priv[0] = cfqq;
3823         rq->elv.priv[1] = cfqq->cfqg;
3824         spin_unlock_irq(q->queue_lock);
3825         return 0;
3826 }
3827
3828 static void cfq_kick_queue(struct work_struct *work)
3829 {
3830         struct cfq_data *cfqd =
3831                 container_of(work, struct cfq_data, unplug_work);
3832         struct request_queue *q = cfqd->queue;
3833
3834         spin_lock_irq(q->queue_lock);
3835         __blk_run_queue(cfqd->queue);
3836         spin_unlock_irq(q->queue_lock);
3837 }
3838
3839 /*
3840  * Timer running if the active_queue is currently idling inside its time slice
3841  */
3842 static void cfq_idle_slice_timer(unsigned long data)
3843 {
3844         struct cfq_data *cfqd = (struct cfq_data *) data;
3845         struct cfq_queue *cfqq;
3846         unsigned long flags;
3847         int timed_out = 1;
3848
3849         cfq_log(cfqd, "idle timer fired");
3850
3851         spin_lock_irqsave(cfqd->queue->queue_lock, flags);
3852
3853         cfqq = cfqd->active_queue;
3854         if (cfqq) {
3855                 timed_out = 0;
3856
3857                 /*
3858                  * We saw a request before the queue expired, let it through
3859                  */
3860                 if (cfq_cfqq_must_dispatch(cfqq))
3861                         goto out_kick;
3862
3863                 /*
3864                  * expired
3865                  */
3866                 if (cfq_slice_used(cfqq))
3867                         goto expire;
3868
3869                 /*
3870                  * only expire and reinvoke request handler, if there are
3871                  * other queues with pending requests
3872                  */
3873                 if (!cfqd->busy_queues)
3874                         goto out_cont;
3875
3876                 /*
3877                  * not expired and it has a request pending, let it dispatch
3878                  */
3879                 if (!RB_EMPTY_ROOT(&cfqq->sort_list))
3880                         goto out_kick;
3881
3882                 /*
3883                  * Queue depth flag is reset only when the idle didn't succeed
3884                  */
3885                 cfq_clear_cfqq_deep(cfqq);
3886         }
3887 expire:
3888         cfq_slice_expired(cfqd, timed_out);
3889 out_kick:
3890         cfq_schedule_dispatch(cfqd);
3891 out_cont:
3892         spin_unlock_irqrestore(cfqd->queue->queue_lock, flags);
3893 }
3894
3895 static void cfq_shutdown_timer_wq(struct cfq_data *cfqd)
3896 {
3897         del_timer_sync(&cfqd->idle_slice_timer);
3898         cancel_work_sync(&cfqd->unplug_work);
3899 }
3900
3901 static void cfq_put_async_queues(struct cfq_data *cfqd)
3902 {
3903         int i;
3904
3905         for (i = 0; i < IOPRIO_BE_NR; i++) {
3906                 if (cfqd->async_cfqq[0][i])
3907                         cfq_put_queue(cfqd->async_cfqq[0][i]);
3908                 if (cfqd->async_cfqq[1][i])
3909                         cfq_put_queue(cfqd->async_cfqq[1][i]);
3910         }
3911
3912         if (cfqd->async_idle_cfqq)
3913                 cfq_put_queue(cfqd->async_idle_cfqq);
3914 }
3915
3916 static void cfq_exit_queue(struct elevator_queue *e)
3917 {
3918         struct cfq_data *cfqd = e->elevator_data;
3919         struct request_queue *q = cfqd->queue;
3920
3921         cfq_shutdown_timer_wq(cfqd);
3922
3923         spin_lock_irq(q->queue_lock);
3924
3925         if (cfqd->active_queue)
3926                 __cfq_slice_expired(cfqd, cfqd->active_queue, 0);
3927
3928         cfq_put_async_queues(cfqd);
3929
3930         spin_unlock_irq(q->queue_lock);
3931
3932         cfq_shutdown_timer_wq(cfqd);
3933
3934 #ifndef CONFIG_CFQ_GROUP_IOSCHED
3935         kfree(cfqd->root_group);
3936 #endif
3937         update_root_blkg_pd(q, BLKIO_POLICY_PROP);
3938         kfree(cfqd);
3939 }
3940
3941 static int cfq_init_queue(struct request_queue *q)
3942 {
3943         struct cfq_data *cfqd;
3944         struct blkio_group *blkg __maybe_unused;
3945         int i;
3946
3947         cfqd = kmalloc_node(sizeof(*cfqd), GFP_KERNEL | __GFP_ZERO, q->node);
3948         if (!cfqd)
3949                 return -ENOMEM;
3950
3951         cfqd->queue = q;
3952         q->elevator->elevator_data = cfqd;
3953
3954         /* Init root service tree */
3955         cfqd->grp_service_tree = CFQ_RB_ROOT;
3956
3957         /* Init root group and prefer root group over other groups by default */
3958 #ifdef CONFIG_CFQ_GROUP_IOSCHED
3959         rcu_read_lock();
3960         spin_lock_irq(q->queue_lock);
3961
3962         blkg = blkg_lookup_create(&blkio_root_cgroup, q, true);
3963         if (!IS_ERR(blkg))
3964                 cfqd->root_group = blkg_to_cfqg(blkg);
3965
3966         spin_unlock_irq(q->queue_lock);
3967         rcu_read_unlock();
3968 #else
3969         cfqd->root_group = kzalloc_node(sizeof(*cfqd->root_group),
3970                                         GFP_KERNEL, cfqd->queue->node);
3971         if (cfqd->root_group)
3972                 cfq_init_cfqg_base(cfqd->root_group);
3973 #endif
3974         if (!cfqd->root_group) {
3975                 kfree(cfqd);
3976                 return -ENOMEM;
3977         }
3978
3979         cfqd->root_group->weight = 2 * CFQ_WEIGHT_DEFAULT;
3980
3981         /*
3982          * Not strictly needed (since RB_ROOT just clears the node and we
3983          * zeroed cfqd on alloc), but better be safe in case someone decides
3984          * to add magic to the rb code
3985          */
3986         for (i = 0; i < CFQ_PRIO_LISTS; i++)
3987                 cfqd->prio_trees[i] = RB_ROOT;
3988
3989         /*
3990          * Our fallback cfqq if cfq_find_alloc_queue() runs into OOM issues.
3991          * Grab a permanent reference to it, so that the normal code flow
3992          * will not attempt to free it.  oom_cfqq is linked to root_group
3993          * but shouldn't hold a reference as it'll never be unlinked.  Lose
3994          * the reference from linking right away.
3995          */
3996         cfq_init_cfqq(cfqd, &cfqd->oom_cfqq, 1, 0);
3997         cfqd->oom_cfqq.ref++;
3998
3999         spin_lock_irq(q->queue_lock);
4000         cfq_link_cfqq_cfqg(&cfqd->oom_cfqq, cfqd->root_group);
4001         cfqg_put(cfqd->root_group);
4002         spin_unlock_irq(q->queue_lock);
4003
4004         init_timer(&cfqd->idle_slice_timer);
4005         cfqd->idle_slice_timer.function = cfq_idle_slice_timer;
4006         cfqd->idle_slice_timer.data = (unsigned long) cfqd;
4007
4008         INIT_WORK(&cfqd->unplug_work, cfq_kick_queue);
4009
4010         cfqd->cfq_quantum = cfq_quantum;
4011         cfqd->cfq_fifo_expire[0] = cfq_fifo_expire[0];
4012         cfqd->cfq_fifo_expire[1] = cfq_fifo_expire[1];
4013         cfqd->cfq_back_max = cfq_back_max;
4014         cfqd->cfq_back_penalty = cfq_back_penalty;
4015         cfqd->cfq_slice[0] = cfq_slice_async;
4016         cfqd->cfq_slice[1] = cfq_slice_sync;
4017         cfqd->cfq_slice_async_rq = cfq_slice_async_rq;
4018         cfqd->cfq_slice_idle = cfq_slice_idle;
4019         cfqd->cfq_group_idle = cfq_group_idle;
4020         cfqd->cfq_latency = 1;
4021         cfqd->hw_tag = -1;
4022         /*
4023          * we optimistically start assuming sync ops weren't delayed in last
4024          * second, in order to have larger depth for async operations.
4025          */
4026         cfqd->last_delayed_sync = jiffies - HZ;
4027         return 0;
4028 }
4029
4030 /*
4031  * sysfs parts below -->
4032  */
4033 static ssize_t
4034 cfq_var_show(unsigned int var, char *page)
4035 {
4036         return sprintf(page, "%d\n", var);
4037 }
4038
4039 static ssize_t
4040 cfq_var_store(unsigned int *var, const char *page, size_t count)
4041 {
4042         char *p = (char *) page;
4043
4044         *var = simple_strtoul(p, &p, 10);
4045         return count;
4046 }
4047
4048 #define SHOW_FUNCTION(__FUNC, __VAR, __CONV)                            \
4049 static ssize_t __FUNC(struct elevator_queue *e, char *page)             \
4050 {                                                                       \
4051         struct cfq_data *cfqd = e->elevator_data;                       \
4052         unsigned int __data = __VAR;                                    \
4053         if (__CONV)                                                     \
4054                 __data = jiffies_to_msecs(__data);                      \
4055         return cfq_var_show(__data, (page));                            \
4056 }
4057 SHOW_FUNCTION(cfq_quantum_show, cfqd->cfq_quantum, 0);
4058 SHOW_FUNCTION(cfq_fifo_expire_sync_show, cfqd->cfq_fifo_expire[1], 1);
4059 SHOW_FUNCTION(cfq_fifo_expire_async_show, cfqd->cfq_fifo_expire[0], 1);
4060 SHOW_FUNCTION(cfq_back_seek_max_show, cfqd->cfq_back_max, 0);
4061 SHOW_FUNCTION(cfq_back_seek_penalty_show, cfqd->cfq_back_penalty, 0);
4062 SHOW_FUNCTION(cfq_slice_idle_show, cfqd->cfq_slice_idle, 1);
4063 SHOW_FUNCTION(cfq_group_idle_show, cfqd->cfq_group_idle, 1);
4064 SHOW_FUNCTION(cfq_slice_sync_show, cfqd->cfq_slice[1], 1);
4065 SHOW_FUNCTION(cfq_slice_async_show, cfqd->cfq_slice[0], 1);
4066 SHOW_FUNCTION(cfq_slice_async_rq_show, cfqd->cfq_slice_async_rq, 0);
4067 SHOW_FUNCTION(cfq_low_latency_show, cfqd->cfq_latency, 0);
4068 #undef SHOW_FUNCTION
4069
4070 #define STORE_FUNCTION(__FUNC, __PTR, MIN, MAX, __CONV)                 \
4071 static ssize_t __FUNC(struct elevator_queue *e, const char *page, size_t count) \
4072 {                                                                       \
4073         struct cfq_data *cfqd = e->elevator_data;                       \
4074         unsigned int __data;                                            \
4075         int ret = cfq_var_store(&__data, (page), count);                \
4076         if (__data < (MIN))                                             \
4077                 __data = (MIN);                                         \
4078         else if (__data > (MAX))                                        \
4079                 __data = (MAX);                                         \
4080         if (__CONV)                                                     \
4081                 *(__PTR) = msecs_to_jiffies(__data);                    \
4082         else                                                            \
4083                 *(__PTR) = __data;                                      \
4084         return ret;                                                     \
4085 }
4086 STORE_FUNCTION(cfq_quantum_store, &cfqd->cfq_quantum, 1, UINT_MAX, 0);
4087 STORE_FUNCTION(cfq_fifo_expire_sync_store, &cfqd->cfq_fifo_expire[1], 1,
4088                 UINT_MAX, 1);
4089 STORE_FUNCTION(cfq_fifo_expire_async_store, &cfqd->cfq_fifo_expire[0], 1,
4090                 UINT_MAX, 1);
4091 STORE_FUNCTION(cfq_back_seek_max_store, &cfqd->cfq_back_max, 0, UINT_MAX, 0);
4092 STORE_FUNCTION(cfq_back_seek_penalty_store, &cfqd->cfq_back_penalty, 1,
4093                 UINT_MAX, 0);
4094 STORE_FUNCTION(cfq_slice_idle_store, &cfqd->cfq_slice_idle, 0, UINT_MAX, 1);
4095 STORE_FUNCTION(cfq_group_idle_store, &cfqd->cfq_group_idle, 0, UINT_MAX, 1);
4096 STORE_FUNCTION(cfq_slice_sync_store, &cfqd->cfq_slice[1], 1, UINT_MAX, 1);
4097 STORE_FUNCTION(cfq_slice_async_store, &cfqd->cfq_slice[0], 1, UINT_MAX, 1);
4098 STORE_FUNCTION(cfq_slice_async_rq_store, &cfqd->cfq_slice_async_rq, 1,
4099                 UINT_MAX, 0);
4100 STORE_FUNCTION(cfq_low_latency_store, &cfqd->cfq_latency, 0, 1, 0);
4101 #undef STORE_FUNCTION
4102
4103 #define CFQ_ATTR(name) \
4104         __ATTR(name, S_IRUGO|S_IWUSR, cfq_##name##_show, cfq_##name##_store)
4105
4106 static struct elv_fs_entry cfq_attrs[] = {
4107         CFQ_ATTR(quantum),
4108         CFQ_ATTR(fifo_expire_sync),
4109         CFQ_ATTR(fifo_expire_async),
4110         CFQ_ATTR(back_seek_max),
4111         CFQ_ATTR(back_seek_penalty),
4112         CFQ_ATTR(slice_sync),
4113         CFQ_ATTR(slice_async),
4114         CFQ_ATTR(slice_async_rq),
4115         CFQ_ATTR(slice_idle),
4116         CFQ_ATTR(group_idle),
4117         CFQ_ATTR(low_latency),
4118         __ATTR_NULL
4119 };
4120
4121 static struct elevator_type iosched_cfq = {
4122         .ops = {
4123                 .elevator_merge_fn =            cfq_merge,
4124                 .elevator_merged_fn =           cfq_merged_request,
4125                 .elevator_merge_req_fn =        cfq_merged_requests,
4126                 .elevator_allow_merge_fn =      cfq_allow_merge,
4127                 .elevator_bio_merged_fn =       cfq_bio_merged,
4128                 .elevator_dispatch_fn =         cfq_dispatch_requests,
4129                 .elevator_add_req_fn =          cfq_insert_request,
4130                 .elevator_activate_req_fn =     cfq_activate_request,
4131                 .elevator_deactivate_req_fn =   cfq_deactivate_request,
4132                 .elevator_completed_req_fn =    cfq_completed_request,
4133                 .elevator_former_req_fn =       elv_rb_former_request,
4134                 .elevator_latter_req_fn =       elv_rb_latter_request,
4135                 .elevator_init_icq_fn =         cfq_init_icq,
4136                 .elevator_exit_icq_fn =         cfq_exit_icq,
4137                 .elevator_set_req_fn =          cfq_set_request,
4138                 .elevator_put_req_fn =          cfq_put_request,
4139                 .elevator_may_queue_fn =        cfq_may_queue,
4140                 .elevator_init_fn =             cfq_init_queue,
4141                 .elevator_exit_fn =             cfq_exit_queue,
4142         },
4143         .icq_size       =       sizeof(struct cfq_io_cq),
4144         .icq_align      =       __alignof__(struct cfq_io_cq),
4145         .elevator_attrs =       cfq_attrs,
4146         .elevator_name  =       "cfq",
4147         .elevator_owner =       THIS_MODULE,
4148 };
4149
4150 #ifdef CONFIG_CFQ_GROUP_IOSCHED
4151 static struct blkio_policy_type blkio_policy_cfq = {
4152         .ops = {
4153                 .blkio_init_group_fn =          cfq_init_blkio_group,
4154                 .blkio_reset_group_stats_fn =   cfqg_stats_reset,
4155         },
4156         .plid = BLKIO_POLICY_PROP,
4157         .pdata_size = sizeof(struct cfq_group),
4158         .cftypes = cfq_blkcg_files,
4159 };
4160 #endif
4161
4162 static int __init cfq_init(void)
4163 {
4164         int ret;
4165
4166         /*
4167          * could be 0 on HZ < 1000 setups
4168          */
4169         if (!cfq_slice_async)
4170                 cfq_slice_async = 1;
4171         if (!cfq_slice_idle)
4172                 cfq_slice_idle = 1;
4173
4174 #ifdef CONFIG_CFQ_GROUP_IOSCHED
4175         if (!cfq_group_idle)
4176                 cfq_group_idle = 1;
4177 #else
4178                 cfq_group_idle = 0;
4179 #endif
4180         cfq_pool = KMEM_CACHE(cfq_queue, 0);
4181         if (!cfq_pool)
4182                 return -ENOMEM;
4183
4184         ret = elv_register(&iosched_cfq);
4185         if (ret) {
4186                 kmem_cache_destroy(cfq_pool);
4187                 return ret;
4188         }
4189
4190 #ifdef CONFIG_CFQ_GROUP_IOSCHED
4191         blkio_policy_register(&blkio_policy_cfq);
4192 #endif
4193         return 0;
4194 }
4195
4196 static void __exit cfq_exit(void)
4197 {
4198 #ifdef CONFIG_CFQ_GROUP_IOSCHED
4199         blkio_policy_unregister(&blkio_policy_cfq);
4200 #endif
4201         elv_unregister(&iosched_cfq);
4202         kmem_cache_destroy(cfq_pool);
4203 }
4204
4205 module_init(cfq_init);
4206 module_exit(cfq_exit);
4207
4208 MODULE_AUTHOR("Jens Axboe");
4209 MODULE_LICENSE("GPL");
4210 MODULE_DESCRIPTION("Completely Fair Queueing IO scheduler");