]> git.karo-electronics.de Git - karo-tx-linux.git/blob - block/cfq-iosched.c
blkcg: cfq doesn't need per-cpu dispatch stats
[karo-tx-linux.git] / block / cfq-iosched.c
1 /*
2  *  CFQ, or complete fairness queueing, disk scheduler.
3  *
4  *  Based on ideas from a previously unfinished io
5  *  scheduler (round robin per-process disk scheduling) and Andrea Arcangeli.
6  *
7  *  Copyright (C) 2003 Jens Axboe <axboe@kernel.dk>
8  */
9 #include <linux/module.h>
10 #include <linux/slab.h>
11 #include <linux/blkdev.h>
12 #include <linux/elevator.h>
13 #include <linux/jiffies.h>
14 #include <linux/rbtree.h>
15 #include <linux/ioprio.h>
16 #include <linux/blktrace_api.h>
17 #include "blk.h"
18 #include "blk-cgroup.h"
19
20 static struct blkio_policy_type blkio_policy_cfq;
21
22 /*
23  * tunables
24  */
25 /* max queue in one round of service */
26 static const int cfq_quantum = 8;
27 static const int cfq_fifo_expire[2] = { HZ / 4, HZ / 8 };
28 /* maximum backwards seek, in KiB */
29 static const int cfq_back_max = 16 * 1024;
30 /* penalty of a backwards seek */
31 static const int cfq_back_penalty = 2;
32 static const int cfq_slice_sync = HZ / 10;
33 static int cfq_slice_async = HZ / 25;
34 static const int cfq_slice_async_rq = 2;
35 static int cfq_slice_idle = HZ / 125;
36 static int cfq_group_idle = HZ / 125;
37 static const int cfq_target_latency = HZ * 3/10; /* 300 ms */
38 static const int cfq_hist_divisor = 4;
39
40 /*
41  * offset from end of service tree
42  */
43 #define CFQ_IDLE_DELAY          (HZ / 5)
44
45 /*
46  * below this threshold, we consider thinktime immediate
47  */
48 #define CFQ_MIN_TT              (2)
49
50 #define CFQ_SLICE_SCALE         (5)
51 #define CFQ_HW_QUEUE_MIN        (5)
52 #define CFQ_SERVICE_SHIFT       12
53
54 #define CFQQ_SEEK_THR           (sector_t)(8 * 100)
55 #define CFQQ_CLOSE_THR          (sector_t)(8 * 1024)
56 #define CFQQ_SECT_THR_NONROT    (sector_t)(2 * 32)
57 #define CFQQ_SEEKY(cfqq)        (hweight32(cfqq->seek_history) > 32/8)
58
59 #define RQ_CIC(rq)              icq_to_cic((rq)->elv.icq)
60 #define RQ_CFQQ(rq)             (struct cfq_queue *) ((rq)->elv.priv[0])
61 #define RQ_CFQG(rq)             (struct cfq_group *) ((rq)->elv.priv[1])
62
63 static struct kmem_cache *cfq_pool;
64
65 #define CFQ_PRIO_LISTS          IOPRIO_BE_NR
66 #define cfq_class_idle(cfqq)    ((cfqq)->ioprio_class == IOPRIO_CLASS_IDLE)
67 #define cfq_class_rt(cfqq)      ((cfqq)->ioprio_class == IOPRIO_CLASS_RT)
68
69 #define sample_valid(samples)   ((samples) > 80)
70 #define rb_entry_cfqg(node)     rb_entry((node), struct cfq_group, rb_node)
71
72 struct cfq_ttime {
73         unsigned long last_end_request;
74
75         unsigned long ttime_total;
76         unsigned long ttime_samples;
77         unsigned long ttime_mean;
78 };
79
80 /*
81  * Most of our rbtree usage is for sorting with min extraction, so
82  * if we cache the leftmost node we don't have to walk down the tree
83  * to find it. Idea borrowed from Ingo Molnars CFS scheduler. We should
84  * move this into the elevator for the rq sorting as well.
85  */
86 struct cfq_rb_root {
87         struct rb_root rb;
88         struct rb_node *left;
89         unsigned count;
90         unsigned total_weight;
91         u64 min_vdisktime;
92         struct cfq_ttime ttime;
93 };
94 #define CFQ_RB_ROOT     (struct cfq_rb_root) { .rb = RB_ROOT, \
95                         .ttime = {.last_end_request = jiffies,},}
96
97 /*
98  * Per process-grouping structure
99  */
100 struct cfq_queue {
101         /* reference count */
102         int ref;
103         /* various state flags, see below */
104         unsigned int flags;
105         /* parent cfq_data */
106         struct cfq_data *cfqd;
107         /* service_tree member */
108         struct rb_node rb_node;
109         /* service_tree key */
110         unsigned long rb_key;
111         /* prio tree member */
112         struct rb_node p_node;
113         /* prio tree root we belong to, if any */
114         struct rb_root *p_root;
115         /* sorted list of pending requests */
116         struct rb_root sort_list;
117         /* if fifo isn't expired, next request to serve */
118         struct request *next_rq;
119         /* requests queued in sort_list */
120         int queued[2];
121         /* currently allocated requests */
122         int allocated[2];
123         /* fifo list of requests in sort_list */
124         struct list_head fifo;
125
126         /* time when queue got scheduled in to dispatch first request. */
127         unsigned long dispatch_start;
128         unsigned int allocated_slice;
129         unsigned int slice_dispatch;
130         /* time when first request from queue completed and slice started. */
131         unsigned long slice_start;
132         unsigned long slice_end;
133         long slice_resid;
134
135         /* pending priority requests */
136         int prio_pending;
137         /* number of requests that are on the dispatch list or inside driver */
138         int dispatched;
139
140         /* io prio of this group */
141         unsigned short ioprio, org_ioprio;
142         unsigned short ioprio_class;
143
144         pid_t pid;
145
146         u32 seek_history;
147         sector_t last_request_pos;
148
149         struct cfq_rb_root *service_tree;
150         struct cfq_queue *new_cfqq;
151         struct cfq_group *cfqg;
152         /* Number of sectors dispatched from queue in single dispatch round */
153         unsigned long nr_sectors;
154 };
155
156 /*
157  * First index in the service_trees.
158  * IDLE is handled separately, so it has negative index
159  */
160 enum wl_prio_t {
161         BE_WORKLOAD = 0,
162         RT_WORKLOAD = 1,
163         IDLE_WORKLOAD = 2,
164         CFQ_PRIO_NR,
165 };
166
167 /*
168  * Second index in the service_trees.
169  */
170 enum wl_type_t {
171         ASYNC_WORKLOAD = 0,
172         SYNC_NOIDLE_WORKLOAD = 1,
173         SYNC_WORKLOAD = 2
174 };
175
176 /* This is per cgroup per device grouping structure */
177 struct cfq_group {
178         /* group service_tree member */
179         struct rb_node rb_node;
180
181         /* group service_tree key */
182         u64 vdisktime;
183         unsigned int weight;
184         unsigned int new_weight;
185         bool needs_update;
186
187         /* number of cfqq currently on this group */
188         int nr_cfqq;
189
190         /*
191          * Per group busy queues average. Useful for workload slice calc. We
192          * create the array for each prio class but at run time it is used
193          * only for RT and BE class and slot for IDLE class remains unused.
194          * This is primarily done to avoid confusion and a gcc warning.
195          */
196         unsigned int busy_queues_avg[CFQ_PRIO_NR];
197         /*
198          * rr lists of queues with requests. We maintain service trees for
199          * RT and BE classes. These trees are subdivided in subclasses
200          * of SYNC, SYNC_NOIDLE and ASYNC based on workload type. For IDLE
201          * class there is no subclassification and all the cfq queues go on
202          * a single tree service_tree_idle.
203          * Counts are embedded in the cfq_rb_root
204          */
205         struct cfq_rb_root service_trees[2][3];
206         struct cfq_rb_root service_tree_idle;
207
208         unsigned long saved_workload_slice;
209         enum wl_type_t saved_workload;
210         enum wl_prio_t saved_serving_prio;
211
212         /* number of requests that are on the dispatch list or inside driver */
213         int dispatched;
214         struct cfq_ttime ttime;
215 };
216
217 struct cfq_io_cq {
218         struct io_cq            icq;            /* must be the first member */
219         struct cfq_queue        *cfqq[2];
220         struct cfq_ttime        ttime;
221         int                     ioprio;         /* the current ioprio */
222 #ifdef CONFIG_CFQ_GROUP_IOSCHED
223         uint64_t                blkcg_id;       /* the current blkcg ID */
224 #endif
225 };
226
227 /*
228  * Per block device queue structure
229  */
230 struct cfq_data {
231         struct request_queue *queue;
232         /* Root service tree for cfq_groups */
233         struct cfq_rb_root grp_service_tree;
234         struct cfq_group *root_group;
235
236         /*
237          * The priority currently being served
238          */
239         enum wl_prio_t serving_prio;
240         enum wl_type_t serving_type;
241         unsigned long workload_expires;
242         struct cfq_group *serving_group;
243
244         /*
245          * Each priority tree is sorted by next_request position.  These
246          * trees are used when determining if two or more queues are
247          * interleaving requests (see cfq_close_cooperator).
248          */
249         struct rb_root prio_trees[CFQ_PRIO_LISTS];
250
251         unsigned int busy_queues;
252         unsigned int busy_sync_queues;
253
254         int rq_in_driver;
255         int rq_in_flight[2];
256
257         /*
258          * queue-depth detection
259          */
260         int rq_queued;
261         int hw_tag;
262         /*
263          * hw_tag can be
264          * -1 => indeterminate, (cfq will behave as if NCQ is present, to allow better detection)
265          *  1 => NCQ is present (hw_tag_est_depth is the estimated max depth)
266          *  0 => no NCQ
267          */
268         int hw_tag_est_depth;
269         unsigned int hw_tag_samples;
270
271         /*
272          * idle window management
273          */
274         struct timer_list idle_slice_timer;
275         struct work_struct unplug_work;
276
277         struct cfq_queue *active_queue;
278         struct cfq_io_cq *active_cic;
279
280         /*
281          * async queue for each priority case
282          */
283         struct cfq_queue *async_cfqq[2][IOPRIO_BE_NR];
284         struct cfq_queue *async_idle_cfqq;
285
286         sector_t last_position;
287
288         /*
289          * tunables, see top of file
290          */
291         unsigned int cfq_quantum;
292         unsigned int cfq_fifo_expire[2];
293         unsigned int cfq_back_penalty;
294         unsigned int cfq_back_max;
295         unsigned int cfq_slice[2];
296         unsigned int cfq_slice_async_rq;
297         unsigned int cfq_slice_idle;
298         unsigned int cfq_group_idle;
299         unsigned int cfq_latency;
300
301         /*
302          * Fallback dummy cfqq for extreme OOM conditions
303          */
304         struct cfq_queue oom_cfqq;
305
306         unsigned long last_delayed_sync;
307 };
308
309 static struct cfq_group *cfq_get_next_cfqg(struct cfq_data *cfqd);
310
311 static struct cfq_rb_root *service_tree_for(struct cfq_group *cfqg,
312                                             enum wl_prio_t prio,
313                                             enum wl_type_t type)
314 {
315         if (!cfqg)
316                 return NULL;
317
318         if (prio == IDLE_WORKLOAD)
319                 return &cfqg->service_tree_idle;
320
321         return &cfqg->service_trees[prio][type];
322 }
323
324 enum cfqq_state_flags {
325         CFQ_CFQQ_FLAG_on_rr = 0,        /* on round-robin busy list */
326         CFQ_CFQQ_FLAG_wait_request,     /* waiting for a request */
327         CFQ_CFQQ_FLAG_must_dispatch,    /* must be allowed a dispatch */
328         CFQ_CFQQ_FLAG_must_alloc_slice, /* per-slice must_alloc flag */
329         CFQ_CFQQ_FLAG_fifo_expire,      /* FIFO checked in this slice */
330         CFQ_CFQQ_FLAG_idle_window,      /* slice idling enabled */
331         CFQ_CFQQ_FLAG_prio_changed,     /* task priority has changed */
332         CFQ_CFQQ_FLAG_slice_new,        /* no requests dispatched in slice */
333         CFQ_CFQQ_FLAG_sync,             /* synchronous queue */
334         CFQ_CFQQ_FLAG_coop,             /* cfqq is shared */
335         CFQ_CFQQ_FLAG_split_coop,       /* shared cfqq will be splitted */
336         CFQ_CFQQ_FLAG_deep,             /* sync cfqq experienced large depth */
337         CFQ_CFQQ_FLAG_wait_busy,        /* Waiting for next request */
338 };
339
340 #define CFQ_CFQQ_FNS(name)                                              \
341 static inline void cfq_mark_cfqq_##name(struct cfq_queue *cfqq)         \
342 {                                                                       \
343         (cfqq)->flags |= (1 << CFQ_CFQQ_FLAG_##name);                   \
344 }                                                                       \
345 static inline void cfq_clear_cfqq_##name(struct cfq_queue *cfqq)        \
346 {                                                                       \
347         (cfqq)->flags &= ~(1 << CFQ_CFQQ_FLAG_##name);                  \
348 }                                                                       \
349 static inline int cfq_cfqq_##name(const struct cfq_queue *cfqq)         \
350 {                                                                       \
351         return ((cfqq)->flags & (1 << CFQ_CFQQ_FLAG_##name)) != 0;      \
352 }
353
354 CFQ_CFQQ_FNS(on_rr);
355 CFQ_CFQQ_FNS(wait_request);
356 CFQ_CFQQ_FNS(must_dispatch);
357 CFQ_CFQQ_FNS(must_alloc_slice);
358 CFQ_CFQQ_FNS(fifo_expire);
359 CFQ_CFQQ_FNS(idle_window);
360 CFQ_CFQQ_FNS(prio_changed);
361 CFQ_CFQQ_FNS(slice_new);
362 CFQ_CFQQ_FNS(sync);
363 CFQ_CFQQ_FNS(coop);
364 CFQ_CFQQ_FNS(split_coop);
365 CFQ_CFQQ_FNS(deep);
366 CFQ_CFQQ_FNS(wait_busy);
367 #undef CFQ_CFQQ_FNS
368
369 #if defined(CONFIG_CFQ_GROUP_IOSCHED) && defined(CONFIG_DEBUG_BLK_CGROUP)
370
371 /* blkg state flags */
372 enum blkg_state_flags {
373         BLKG_waiting = 0,
374         BLKG_idling,
375         BLKG_empty,
376 };
377
378 #define BLKG_FLAG_FNS(name)                                             \
379 static inline void blkio_mark_blkg_##name(                              \
380                 struct blkio_group_stats *stats)                        \
381 {                                                                       \
382         stats->flags |= (1 << BLKG_##name);                             \
383 }                                                                       \
384 static inline void blkio_clear_blkg_##name(                             \
385                 struct blkio_group_stats *stats)                        \
386 {                                                                       \
387         stats->flags &= ~(1 << BLKG_##name);                            \
388 }                                                                       \
389 static inline int blkio_blkg_##name(struct blkio_group_stats *stats)    \
390 {                                                                       \
391         return (stats->flags & (1 << BLKG_##name)) != 0;                \
392 }                                                                       \
393
394 BLKG_FLAG_FNS(waiting)
395 BLKG_FLAG_FNS(idling)
396 BLKG_FLAG_FNS(empty)
397 #undef BLKG_FLAG_FNS
398
399 /* This should be called with the queue_lock held. */
400 static void blkio_update_group_wait_time(struct blkio_group_stats *stats)
401 {
402         unsigned long long now;
403
404         if (!blkio_blkg_waiting(stats))
405                 return;
406
407         now = sched_clock();
408         if (time_after64(now, stats->start_group_wait_time))
409                 blkg_stat_add(&stats->group_wait_time,
410                               now - stats->start_group_wait_time);
411         blkio_clear_blkg_waiting(stats);
412 }
413
414 /* This should be called with the queue_lock held. */
415 static void blkio_set_start_group_wait_time(struct blkio_group *blkg,
416                                             struct blkio_policy_type *pol,
417                                             struct blkio_group *curr_blkg)
418 {
419         struct blkg_policy_data *pd = blkg->pd[pol->plid];
420
421         if (blkio_blkg_waiting(&pd->stats))
422                 return;
423         if (blkg == curr_blkg)
424                 return;
425         pd->stats.start_group_wait_time = sched_clock();
426         blkio_mark_blkg_waiting(&pd->stats);
427 }
428
429 /* This should be called with the queue_lock held. */
430 static void blkio_end_empty_time(struct blkio_group_stats *stats)
431 {
432         unsigned long long now;
433
434         if (!blkio_blkg_empty(stats))
435                 return;
436
437         now = sched_clock();
438         if (time_after64(now, stats->start_empty_time))
439                 blkg_stat_add(&stats->empty_time,
440                               now - stats->start_empty_time);
441         blkio_clear_blkg_empty(stats);
442 }
443
444 static void cfq_blkiocg_update_dequeue_stats(struct blkio_group *blkg,
445                                              struct blkio_policy_type *pol,
446                                              unsigned long dequeue)
447 {
448         struct blkg_policy_data *pd = blkg->pd[pol->plid];
449
450         lockdep_assert_held(blkg->q->queue_lock);
451
452         blkg_stat_add(&pd->stats.dequeue, dequeue);
453 }
454
455 static void cfq_blkiocg_set_start_empty_time(struct blkio_group *blkg,
456                                              struct blkio_policy_type *pol)
457 {
458         struct blkio_group_stats *stats = &blkg->pd[pol->plid]->stats;
459
460         lockdep_assert_held(blkg->q->queue_lock);
461
462         if (blkg_rwstat_sum(&stats->queued))
463                 return;
464
465         /*
466          * group is already marked empty. This can happen if cfqq got new
467          * request in parent group and moved to this group while being added
468          * to service tree. Just ignore the event and move on.
469          */
470         if (blkio_blkg_empty(stats))
471                 return;
472
473         stats->start_empty_time = sched_clock();
474         blkio_mark_blkg_empty(stats);
475 }
476
477 static void cfq_blkiocg_update_idle_time_stats(struct blkio_group *blkg,
478                                                struct blkio_policy_type *pol)
479 {
480         struct blkio_group_stats *stats = &blkg->pd[pol->plid]->stats;
481
482         lockdep_assert_held(blkg->q->queue_lock);
483
484         if (blkio_blkg_idling(stats)) {
485                 unsigned long long now = sched_clock();
486
487                 if (time_after64(now, stats->start_idle_time))
488                         blkg_stat_add(&stats->idle_time,
489                                       now - stats->start_idle_time);
490                 blkio_clear_blkg_idling(stats);
491         }
492 }
493
494 static void cfq_blkiocg_update_set_idle_time_stats(struct blkio_group *blkg,
495                                                    struct blkio_policy_type *pol)
496 {
497         struct blkio_group_stats *stats = &blkg->pd[pol->plid]->stats;
498
499         lockdep_assert_held(blkg->q->queue_lock);
500         BUG_ON(blkio_blkg_idling(stats));
501
502         stats->start_idle_time = sched_clock();
503         blkio_mark_blkg_idling(stats);
504 }
505
506 static void cfq_blkiocg_update_avg_queue_size_stats(struct blkio_group *blkg,
507                                                     struct blkio_policy_type *pol)
508 {
509         struct blkio_group_stats *stats = &blkg->pd[pol->plid]->stats;
510
511         lockdep_assert_held(blkg->q->queue_lock);
512
513         blkg_stat_add(&stats->avg_queue_size_sum,
514                       blkg_rwstat_sum(&stats->queued));
515         blkg_stat_add(&stats->avg_queue_size_samples, 1);
516         blkio_update_group_wait_time(stats);
517 }
518
519 #else   /* CONFIG_CFQ_GROUP_IOSCHED && CONFIG_DEBUG_BLK_CGROUP */
520
521 static void blkio_set_start_group_wait_time(struct blkio_group *blkg,
522                                             struct blkio_policy_type *pol,
523                                             struct blkio_group *curr_blkg) { }
524 static void blkio_end_empty_time(struct blkio_group_stats *stats) { }
525 static void cfq_blkiocg_update_dequeue_stats(struct blkio_group *blkg,
526                                              struct blkio_policy_type *pol,
527                                              unsigned long dequeue) { }
528 static void cfq_blkiocg_set_start_empty_time(struct blkio_group *blkg,
529                                              struct blkio_policy_type *pol) { }
530 static void cfq_blkiocg_update_idle_time_stats(struct blkio_group *blkg,
531                                                struct blkio_policy_type *pol) { }
532 static void cfq_blkiocg_update_set_idle_time_stats(struct blkio_group *blkg,
533                                                    struct blkio_policy_type *pol) { }
534 static void cfq_blkiocg_update_avg_queue_size_stats(struct blkio_group *blkg,
535                                                     struct blkio_policy_type *pol) { }
536
537 #endif  /* CONFIG_CFQ_GROUP_IOSCHED && CONFIG_DEBUG_BLK_CGROUP */
538
539 #ifdef CONFIG_CFQ_GROUP_IOSCHED
540
541 static inline struct cfq_group *blkg_to_cfqg(struct blkio_group *blkg)
542 {
543         return blkg_to_pdata(blkg, &blkio_policy_cfq);
544 }
545
546 static inline struct blkio_group *cfqg_to_blkg(struct cfq_group *cfqg)
547 {
548         return pdata_to_blkg(cfqg);
549 }
550
551 static inline void cfqg_get(struct cfq_group *cfqg)
552 {
553         return blkg_get(cfqg_to_blkg(cfqg));
554 }
555
556 static inline void cfqg_put(struct cfq_group *cfqg)
557 {
558         return blkg_put(cfqg_to_blkg(cfqg));
559 }
560
561 #define cfq_log_cfqq(cfqd, cfqq, fmt, args...)  \
562         blk_add_trace_msg((cfqd)->queue, "cfq%d%c %s " fmt, (cfqq)->pid, \
563                         cfq_cfqq_sync((cfqq)) ? 'S' : 'A', \
564                         blkg_path(cfqg_to_blkg((cfqq)->cfqg)), ##args)
565
566 #define cfq_log_cfqg(cfqd, cfqg, fmt, args...)                          \
567         blk_add_trace_msg((cfqd)->queue, "%s " fmt,                     \
568                         blkg_path(cfqg_to_blkg((cfqg))), ##args)        \
569
570 static inline void cfq_blkiocg_update_io_add_stats(struct blkio_group *blkg,
571                         struct blkio_policy_type *pol,
572                         struct blkio_group *curr_blkg,
573                         bool direction, bool sync)
574 {
575         struct blkio_group_stats *stats = &blkg->pd[pol->plid]->stats;
576         int rw = (direction ? REQ_WRITE : 0) | (sync ? REQ_SYNC : 0);
577
578         lockdep_assert_held(blkg->q->queue_lock);
579
580         blkg_rwstat_add(&stats->queued, rw, 1);
581         blkio_end_empty_time(stats);
582         blkio_set_start_group_wait_time(blkg, pol, curr_blkg);
583 }
584
585 static inline void cfq_blkiocg_update_timeslice_used(struct blkio_group *blkg,
586                         struct blkio_policy_type *pol, unsigned long time,
587                         unsigned long unaccounted_time)
588 {
589         struct blkio_group_stats *stats = &blkg->pd[pol->plid]->stats;
590
591         lockdep_assert_held(blkg->q->queue_lock);
592
593         blkg_stat_add(&stats->time, time);
594 #ifdef CONFIG_DEBUG_BLK_CGROUP
595         blkg_stat_add(&stats->unaccounted_time, unaccounted_time);
596 #endif
597 }
598
599 static inline void cfq_blkiocg_update_io_remove_stats(struct blkio_group *blkg,
600                         struct blkio_policy_type *pol, bool direction,
601                         bool sync)
602 {
603         struct blkio_group_stats *stats = &blkg->pd[pol->plid]->stats;
604         int rw = (direction ? REQ_WRITE : 0) | (sync ? REQ_SYNC : 0);
605
606         lockdep_assert_held(blkg->q->queue_lock);
607
608         blkg_rwstat_add(&stats->queued, rw, -1);
609 }
610
611 static inline void cfq_blkiocg_update_io_merged_stats(struct blkio_group *blkg,
612                         struct blkio_policy_type *pol, bool direction,
613                         bool sync)
614 {
615         struct blkio_group_stats *stats = &blkg->pd[pol->plid]->stats;
616         int rw = (direction ? REQ_WRITE : 0) | (sync ? REQ_SYNC : 0);
617
618         lockdep_assert_held(blkg->q->queue_lock);
619
620         blkg_rwstat_add(&stats->merged, rw, 1);
621 }
622
623 static inline void cfq_blkiocg_update_dispatch_stats(struct blkio_group *blkg,
624                         struct blkio_policy_type *pol, uint64_t bytes,
625                         bool direction, bool sync)
626 {
627         struct blkio_group_stats *stats = &blkg->pd[pol->plid]->stats;
628         int rw = (direction ? REQ_WRITE : 0) | (sync ? REQ_SYNC : 0);
629
630         blkg_stat_add(&stats->sectors, bytes >> 9);
631         blkg_rwstat_add(&stats->serviced, rw, 1);
632         blkg_rwstat_add(&stats->service_bytes, rw, bytes);
633 }
634
635 static inline void cfq_blkiocg_update_completion_stats(struct blkio_group *blkg,
636                         struct blkio_policy_type *pol, uint64_t start_time,
637                         uint64_t io_start_time, bool direction, bool sync)
638 {
639         struct blkio_group_stats *stats = &blkg->pd[pol->plid]->stats;
640         unsigned long long now = sched_clock();
641         int rw = (direction ? REQ_WRITE : 0) | (sync ? REQ_SYNC : 0);
642
643         lockdep_assert_held(blkg->q->queue_lock);
644
645         if (time_after64(now, io_start_time))
646                 blkg_rwstat_add(&stats->service_time, rw, now - io_start_time);
647         if (time_after64(io_start_time, start_time))
648                 blkg_rwstat_add(&stats->wait_time, rw,
649                                 io_start_time - start_time);
650 }
651
652 #else   /* CONFIG_CFQ_GROUP_IOSCHED */
653
654 static inline struct cfq_group *blkg_to_cfqg(struct blkio_group *blkg) { return NULL; }
655 static inline struct blkio_group *cfqg_to_blkg(struct cfq_group *cfqg) { return NULL; }
656 static inline void cfqg_get(struct cfq_group *cfqg) { }
657 static inline void cfqg_put(struct cfq_group *cfqg) { }
658
659 #define cfq_log_cfqq(cfqd, cfqq, fmt, args...)  \
660         blk_add_trace_msg((cfqd)->queue, "cfq%d " fmt, (cfqq)->pid, ##args)
661 #define cfq_log_cfqg(cfqd, cfqg, fmt, args...)          do {} while (0)
662
663 static inline void cfq_blkiocg_update_io_add_stats(struct blkio_group *blkg,
664                         struct blkio_policy_type *pol,
665                         struct blkio_group *curr_blkg, bool direction,
666                         bool sync) { }
667 static inline void cfq_blkiocg_update_timeslice_used(struct blkio_group *blkg,
668                         struct blkio_policy_type *pol, unsigned long time,
669                         unsigned long unaccounted_time) { }
670 static inline void cfq_blkiocg_update_io_remove_stats(struct blkio_group *blkg,
671                         struct blkio_policy_type *pol, bool direction,
672                         bool sync) { }
673 static inline void cfq_blkiocg_update_io_merged_stats(struct blkio_group *blkg,
674                         struct blkio_policy_type *pol, bool direction,
675                         bool sync) { }
676 static inline void cfq_blkiocg_update_dispatch_stats(struct blkio_group *blkg,
677                         struct blkio_policy_type *pol, uint64_t bytes,
678                         bool direction, bool sync) { }
679 static inline void cfq_blkiocg_update_completion_stats(struct blkio_group *blkg,
680                         struct blkio_policy_type *pol, uint64_t start_time,
681                         uint64_t io_start_time, bool direction, bool sync) { }
682
683 #endif  /* CONFIG_CFQ_GROUP_IOSCHED */
684
685 #define cfq_log(cfqd, fmt, args...)     \
686         blk_add_trace_msg((cfqd)->queue, "cfq " fmt, ##args)
687
688 /* Traverses through cfq group service trees */
689 #define for_each_cfqg_st(cfqg, i, j, st) \
690         for (i = 0; i <= IDLE_WORKLOAD; i++) \
691                 for (j = 0, st = i < IDLE_WORKLOAD ? &cfqg->service_trees[i][j]\
692                         : &cfqg->service_tree_idle; \
693                         (i < IDLE_WORKLOAD && j <= SYNC_WORKLOAD) || \
694                         (i == IDLE_WORKLOAD && j == 0); \
695                         j++, st = i < IDLE_WORKLOAD ? \
696                         &cfqg->service_trees[i][j]: NULL) \
697
698 static inline bool cfq_io_thinktime_big(struct cfq_data *cfqd,
699         struct cfq_ttime *ttime, bool group_idle)
700 {
701         unsigned long slice;
702         if (!sample_valid(ttime->ttime_samples))
703                 return false;
704         if (group_idle)
705                 slice = cfqd->cfq_group_idle;
706         else
707                 slice = cfqd->cfq_slice_idle;
708         return ttime->ttime_mean > slice;
709 }
710
711 static inline bool iops_mode(struct cfq_data *cfqd)
712 {
713         /*
714          * If we are not idling on queues and it is a NCQ drive, parallel
715          * execution of requests is on and measuring time is not possible
716          * in most of the cases until and unless we drive shallower queue
717          * depths and that becomes a performance bottleneck. In such cases
718          * switch to start providing fairness in terms of number of IOs.
719          */
720         if (!cfqd->cfq_slice_idle && cfqd->hw_tag)
721                 return true;
722         else
723                 return false;
724 }
725
726 static inline enum wl_prio_t cfqq_prio(struct cfq_queue *cfqq)
727 {
728         if (cfq_class_idle(cfqq))
729                 return IDLE_WORKLOAD;
730         if (cfq_class_rt(cfqq))
731                 return RT_WORKLOAD;
732         return BE_WORKLOAD;
733 }
734
735
736 static enum wl_type_t cfqq_type(struct cfq_queue *cfqq)
737 {
738         if (!cfq_cfqq_sync(cfqq))
739                 return ASYNC_WORKLOAD;
740         if (!cfq_cfqq_idle_window(cfqq))
741                 return SYNC_NOIDLE_WORKLOAD;
742         return SYNC_WORKLOAD;
743 }
744
745 static inline int cfq_group_busy_queues_wl(enum wl_prio_t wl,
746                                         struct cfq_data *cfqd,
747                                         struct cfq_group *cfqg)
748 {
749         if (wl == IDLE_WORKLOAD)
750                 return cfqg->service_tree_idle.count;
751
752         return cfqg->service_trees[wl][ASYNC_WORKLOAD].count
753                 + cfqg->service_trees[wl][SYNC_NOIDLE_WORKLOAD].count
754                 + cfqg->service_trees[wl][SYNC_WORKLOAD].count;
755 }
756
757 static inline int cfqg_busy_async_queues(struct cfq_data *cfqd,
758                                         struct cfq_group *cfqg)
759 {
760         return cfqg->service_trees[RT_WORKLOAD][ASYNC_WORKLOAD].count
761                 + cfqg->service_trees[BE_WORKLOAD][ASYNC_WORKLOAD].count;
762 }
763
764 static void cfq_dispatch_insert(struct request_queue *, struct request *);
765 static struct cfq_queue *cfq_get_queue(struct cfq_data *cfqd, bool is_sync,
766                                        struct cfq_io_cq *cic, struct bio *bio,
767                                        gfp_t gfp_mask);
768
769 static inline struct cfq_io_cq *icq_to_cic(struct io_cq *icq)
770 {
771         /* cic->icq is the first member, %NULL will convert to %NULL */
772         return container_of(icq, struct cfq_io_cq, icq);
773 }
774
775 static inline struct cfq_io_cq *cfq_cic_lookup(struct cfq_data *cfqd,
776                                                struct io_context *ioc)
777 {
778         if (ioc)
779                 return icq_to_cic(ioc_lookup_icq(ioc, cfqd->queue));
780         return NULL;
781 }
782
783 static inline struct cfq_queue *cic_to_cfqq(struct cfq_io_cq *cic, bool is_sync)
784 {
785         return cic->cfqq[is_sync];
786 }
787
788 static inline void cic_set_cfqq(struct cfq_io_cq *cic, struct cfq_queue *cfqq,
789                                 bool is_sync)
790 {
791         cic->cfqq[is_sync] = cfqq;
792 }
793
794 static inline struct cfq_data *cic_to_cfqd(struct cfq_io_cq *cic)
795 {
796         return cic->icq.q->elevator->elevator_data;
797 }
798
799 /*
800  * We regard a request as SYNC, if it's either a read or has the SYNC bit
801  * set (in which case it could also be direct WRITE).
802  */
803 static inline bool cfq_bio_sync(struct bio *bio)
804 {
805         return bio_data_dir(bio) == READ || (bio->bi_rw & REQ_SYNC);
806 }
807
808 /*
809  * scheduler run of queue, if there are requests pending and no one in the
810  * driver that will restart queueing
811  */
812 static inline void cfq_schedule_dispatch(struct cfq_data *cfqd)
813 {
814         if (cfqd->busy_queues) {
815                 cfq_log(cfqd, "schedule dispatch");
816                 kblockd_schedule_work(cfqd->queue, &cfqd->unplug_work);
817         }
818 }
819
820 /*
821  * Scale schedule slice based on io priority. Use the sync time slice only
822  * if a queue is marked sync and has sync io queued. A sync queue with async
823  * io only, should not get full sync slice length.
824  */
825 static inline int cfq_prio_slice(struct cfq_data *cfqd, bool sync,
826                                  unsigned short prio)
827 {
828         const int base_slice = cfqd->cfq_slice[sync];
829
830         WARN_ON(prio >= IOPRIO_BE_NR);
831
832         return base_slice + (base_slice/CFQ_SLICE_SCALE * (4 - prio));
833 }
834
835 static inline int
836 cfq_prio_to_slice(struct cfq_data *cfqd, struct cfq_queue *cfqq)
837 {
838         return cfq_prio_slice(cfqd, cfq_cfqq_sync(cfqq), cfqq->ioprio);
839 }
840
841 static inline u64 cfq_scale_slice(unsigned long delta, struct cfq_group *cfqg)
842 {
843         u64 d = delta << CFQ_SERVICE_SHIFT;
844
845         d = d * BLKIO_WEIGHT_DEFAULT;
846         do_div(d, cfqg->weight);
847         return d;
848 }
849
850 static inline u64 max_vdisktime(u64 min_vdisktime, u64 vdisktime)
851 {
852         s64 delta = (s64)(vdisktime - min_vdisktime);
853         if (delta > 0)
854                 min_vdisktime = vdisktime;
855
856         return min_vdisktime;
857 }
858
859 static inline u64 min_vdisktime(u64 min_vdisktime, u64 vdisktime)
860 {
861         s64 delta = (s64)(vdisktime - min_vdisktime);
862         if (delta < 0)
863                 min_vdisktime = vdisktime;
864
865         return min_vdisktime;
866 }
867
868 static void update_min_vdisktime(struct cfq_rb_root *st)
869 {
870         struct cfq_group *cfqg;
871
872         if (st->left) {
873                 cfqg = rb_entry_cfqg(st->left);
874                 st->min_vdisktime = max_vdisktime(st->min_vdisktime,
875                                                   cfqg->vdisktime);
876         }
877 }
878
879 /*
880  * get averaged number of queues of RT/BE priority.
881  * average is updated, with a formula that gives more weight to higher numbers,
882  * to quickly follows sudden increases and decrease slowly
883  */
884
885 static inline unsigned cfq_group_get_avg_queues(struct cfq_data *cfqd,
886                                         struct cfq_group *cfqg, bool rt)
887 {
888         unsigned min_q, max_q;
889         unsigned mult  = cfq_hist_divisor - 1;
890         unsigned round = cfq_hist_divisor / 2;
891         unsigned busy = cfq_group_busy_queues_wl(rt, cfqd, cfqg);
892
893         min_q = min(cfqg->busy_queues_avg[rt], busy);
894         max_q = max(cfqg->busy_queues_avg[rt], busy);
895         cfqg->busy_queues_avg[rt] = (mult * max_q + min_q + round) /
896                 cfq_hist_divisor;
897         return cfqg->busy_queues_avg[rt];
898 }
899
900 static inline unsigned
901 cfq_group_slice(struct cfq_data *cfqd, struct cfq_group *cfqg)
902 {
903         struct cfq_rb_root *st = &cfqd->grp_service_tree;
904
905         return cfq_target_latency * cfqg->weight / st->total_weight;
906 }
907
908 static inline unsigned
909 cfq_scaled_cfqq_slice(struct cfq_data *cfqd, struct cfq_queue *cfqq)
910 {
911         unsigned slice = cfq_prio_to_slice(cfqd, cfqq);
912         if (cfqd->cfq_latency) {
913                 /*
914                  * interested queues (we consider only the ones with the same
915                  * priority class in the cfq group)
916                  */
917                 unsigned iq = cfq_group_get_avg_queues(cfqd, cfqq->cfqg,
918                                                 cfq_class_rt(cfqq));
919                 unsigned sync_slice = cfqd->cfq_slice[1];
920                 unsigned expect_latency = sync_slice * iq;
921                 unsigned group_slice = cfq_group_slice(cfqd, cfqq->cfqg);
922
923                 if (expect_latency > group_slice) {
924                         unsigned base_low_slice = 2 * cfqd->cfq_slice_idle;
925                         /* scale low_slice according to IO priority
926                          * and sync vs async */
927                         unsigned low_slice =
928                                 min(slice, base_low_slice * slice / sync_slice);
929                         /* the adapted slice value is scaled to fit all iqs
930                          * into the target latency */
931                         slice = max(slice * group_slice / expect_latency,
932                                     low_slice);
933                 }
934         }
935         return slice;
936 }
937
938 static inline void
939 cfq_set_prio_slice(struct cfq_data *cfqd, struct cfq_queue *cfqq)
940 {
941         unsigned slice = cfq_scaled_cfqq_slice(cfqd, cfqq);
942
943         cfqq->slice_start = jiffies;
944         cfqq->slice_end = jiffies + slice;
945         cfqq->allocated_slice = slice;
946         cfq_log_cfqq(cfqd, cfqq, "set_slice=%lu", cfqq->slice_end - jiffies);
947 }
948
949 /*
950  * We need to wrap this check in cfq_cfqq_slice_new(), since ->slice_end
951  * isn't valid until the first request from the dispatch is activated
952  * and the slice time set.
953  */
954 static inline bool cfq_slice_used(struct cfq_queue *cfqq)
955 {
956         if (cfq_cfqq_slice_new(cfqq))
957                 return false;
958         if (time_before(jiffies, cfqq->slice_end))
959                 return false;
960
961         return true;
962 }
963
964 /*
965  * Lifted from AS - choose which of rq1 and rq2 that is best served now.
966  * We choose the request that is closest to the head right now. Distance
967  * behind the head is penalized and only allowed to a certain extent.
968  */
969 static struct request *
970 cfq_choose_req(struct cfq_data *cfqd, struct request *rq1, struct request *rq2, sector_t last)
971 {
972         sector_t s1, s2, d1 = 0, d2 = 0;
973         unsigned long back_max;
974 #define CFQ_RQ1_WRAP    0x01 /* request 1 wraps */
975 #define CFQ_RQ2_WRAP    0x02 /* request 2 wraps */
976         unsigned wrap = 0; /* bit mask: requests behind the disk head? */
977
978         if (rq1 == NULL || rq1 == rq2)
979                 return rq2;
980         if (rq2 == NULL)
981                 return rq1;
982
983         if (rq_is_sync(rq1) != rq_is_sync(rq2))
984                 return rq_is_sync(rq1) ? rq1 : rq2;
985
986         if ((rq1->cmd_flags ^ rq2->cmd_flags) & REQ_PRIO)
987                 return rq1->cmd_flags & REQ_PRIO ? rq1 : rq2;
988
989         s1 = blk_rq_pos(rq1);
990         s2 = blk_rq_pos(rq2);
991
992         /*
993          * by definition, 1KiB is 2 sectors
994          */
995         back_max = cfqd->cfq_back_max * 2;
996
997         /*
998          * Strict one way elevator _except_ in the case where we allow
999          * short backward seeks which are biased as twice the cost of a
1000          * similar forward seek.
1001          */
1002         if (s1 >= last)
1003                 d1 = s1 - last;
1004         else if (s1 + back_max >= last)
1005                 d1 = (last - s1) * cfqd->cfq_back_penalty;
1006         else
1007                 wrap |= CFQ_RQ1_WRAP;
1008
1009         if (s2 >= last)
1010                 d2 = s2 - last;
1011         else if (s2 + back_max >= last)
1012                 d2 = (last - s2) * cfqd->cfq_back_penalty;
1013         else
1014                 wrap |= CFQ_RQ2_WRAP;
1015
1016         /* Found required data */
1017
1018         /*
1019          * By doing switch() on the bit mask "wrap" we avoid having to
1020          * check two variables for all permutations: --> faster!
1021          */
1022         switch (wrap) {
1023         case 0: /* common case for CFQ: rq1 and rq2 not wrapped */
1024                 if (d1 < d2)
1025                         return rq1;
1026                 else if (d2 < d1)
1027                         return rq2;
1028                 else {
1029                         if (s1 >= s2)
1030                                 return rq1;
1031                         else
1032                                 return rq2;
1033                 }
1034
1035         case CFQ_RQ2_WRAP:
1036                 return rq1;
1037         case CFQ_RQ1_WRAP:
1038                 return rq2;
1039         case (CFQ_RQ1_WRAP|CFQ_RQ2_WRAP): /* both rqs wrapped */
1040         default:
1041                 /*
1042                  * Since both rqs are wrapped,
1043                  * start with the one that's further behind head
1044                  * (--> only *one* back seek required),
1045                  * since back seek takes more time than forward.
1046                  */
1047                 if (s1 <= s2)
1048                         return rq1;
1049                 else
1050                         return rq2;
1051         }
1052 }
1053
1054 /*
1055  * The below is leftmost cache rbtree addon
1056  */
1057 static struct cfq_queue *cfq_rb_first(struct cfq_rb_root *root)
1058 {
1059         /* Service tree is empty */
1060         if (!root->count)
1061                 return NULL;
1062
1063         if (!root->left)
1064                 root->left = rb_first(&root->rb);
1065
1066         if (root->left)
1067                 return rb_entry(root->left, struct cfq_queue, rb_node);
1068
1069         return NULL;
1070 }
1071
1072 static struct cfq_group *cfq_rb_first_group(struct cfq_rb_root *root)
1073 {
1074         if (!root->left)
1075                 root->left = rb_first(&root->rb);
1076
1077         if (root->left)
1078                 return rb_entry_cfqg(root->left);
1079
1080         return NULL;
1081 }
1082
1083 static void rb_erase_init(struct rb_node *n, struct rb_root *root)
1084 {
1085         rb_erase(n, root);
1086         RB_CLEAR_NODE(n);
1087 }
1088
1089 static void cfq_rb_erase(struct rb_node *n, struct cfq_rb_root *root)
1090 {
1091         if (root->left == n)
1092                 root->left = NULL;
1093         rb_erase_init(n, &root->rb);
1094         --root->count;
1095 }
1096
1097 /*
1098  * would be nice to take fifo expire time into account as well
1099  */
1100 static struct request *
1101 cfq_find_next_rq(struct cfq_data *cfqd, struct cfq_queue *cfqq,
1102                   struct request *last)
1103 {
1104         struct rb_node *rbnext = rb_next(&last->rb_node);
1105         struct rb_node *rbprev = rb_prev(&last->rb_node);
1106         struct request *next = NULL, *prev = NULL;
1107
1108         BUG_ON(RB_EMPTY_NODE(&last->rb_node));
1109
1110         if (rbprev)
1111                 prev = rb_entry_rq(rbprev);
1112
1113         if (rbnext)
1114                 next = rb_entry_rq(rbnext);
1115         else {
1116                 rbnext = rb_first(&cfqq->sort_list);
1117                 if (rbnext && rbnext != &last->rb_node)
1118                         next = rb_entry_rq(rbnext);
1119         }
1120
1121         return cfq_choose_req(cfqd, next, prev, blk_rq_pos(last));
1122 }
1123
1124 static unsigned long cfq_slice_offset(struct cfq_data *cfqd,
1125                                       struct cfq_queue *cfqq)
1126 {
1127         /*
1128          * just an approximation, should be ok.
1129          */
1130         return (cfqq->cfqg->nr_cfqq - 1) * (cfq_prio_slice(cfqd, 1, 0) -
1131                        cfq_prio_slice(cfqd, cfq_cfqq_sync(cfqq), cfqq->ioprio));
1132 }
1133
1134 static inline s64
1135 cfqg_key(struct cfq_rb_root *st, struct cfq_group *cfqg)
1136 {
1137         return cfqg->vdisktime - st->min_vdisktime;
1138 }
1139
1140 static void
1141 __cfq_group_service_tree_add(struct cfq_rb_root *st, struct cfq_group *cfqg)
1142 {
1143         struct rb_node **node = &st->rb.rb_node;
1144         struct rb_node *parent = NULL;
1145         struct cfq_group *__cfqg;
1146         s64 key = cfqg_key(st, cfqg);
1147         int left = 1;
1148
1149         while (*node != NULL) {
1150                 parent = *node;
1151                 __cfqg = rb_entry_cfqg(parent);
1152
1153                 if (key < cfqg_key(st, __cfqg))
1154                         node = &parent->rb_left;
1155                 else {
1156                         node = &parent->rb_right;
1157                         left = 0;
1158                 }
1159         }
1160
1161         if (left)
1162                 st->left = &cfqg->rb_node;
1163
1164         rb_link_node(&cfqg->rb_node, parent, node);
1165         rb_insert_color(&cfqg->rb_node, &st->rb);
1166 }
1167
1168 static void
1169 cfq_update_group_weight(struct cfq_group *cfqg)
1170 {
1171         BUG_ON(!RB_EMPTY_NODE(&cfqg->rb_node));
1172         if (cfqg->needs_update) {
1173                 cfqg->weight = cfqg->new_weight;
1174                 cfqg->needs_update = false;
1175         }
1176 }
1177
1178 static void
1179 cfq_group_service_tree_add(struct cfq_rb_root *st, struct cfq_group *cfqg)
1180 {
1181         BUG_ON(!RB_EMPTY_NODE(&cfqg->rb_node));
1182
1183         cfq_update_group_weight(cfqg);
1184         __cfq_group_service_tree_add(st, cfqg);
1185         st->total_weight += cfqg->weight;
1186 }
1187
1188 static void
1189 cfq_group_notify_queue_add(struct cfq_data *cfqd, struct cfq_group *cfqg)
1190 {
1191         struct cfq_rb_root *st = &cfqd->grp_service_tree;
1192         struct cfq_group *__cfqg;
1193         struct rb_node *n;
1194
1195         cfqg->nr_cfqq++;
1196         if (!RB_EMPTY_NODE(&cfqg->rb_node))
1197                 return;
1198
1199         /*
1200          * Currently put the group at the end. Later implement something
1201          * so that groups get lesser vtime based on their weights, so that
1202          * if group does not loose all if it was not continuously backlogged.
1203          */
1204         n = rb_last(&st->rb);
1205         if (n) {
1206                 __cfqg = rb_entry_cfqg(n);
1207                 cfqg->vdisktime = __cfqg->vdisktime + CFQ_IDLE_DELAY;
1208         } else
1209                 cfqg->vdisktime = st->min_vdisktime;
1210         cfq_group_service_tree_add(st, cfqg);
1211 }
1212
1213 static void
1214 cfq_group_service_tree_del(struct cfq_rb_root *st, struct cfq_group *cfqg)
1215 {
1216         st->total_weight -= cfqg->weight;
1217         if (!RB_EMPTY_NODE(&cfqg->rb_node))
1218                 cfq_rb_erase(&cfqg->rb_node, st);
1219 }
1220
1221 static void
1222 cfq_group_notify_queue_del(struct cfq_data *cfqd, struct cfq_group *cfqg)
1223 {
1224         struct cfq_rb_root *st = &cfqd->grp_service_tree;
1225
1226         BUG_ON(cfqg->nr_cfqq < 1);
1227         cfqg->nr_cfqq--;
1228
1229         /* If there are other cfq queues under this group, don't delete it */
1230         if (cfqg->nr_cfqq)
1231                 return;
1232
1233         cfq_log_cfqg(cfqd, cfqg, "del_from_rr group");
1234         cfq_group_service_tree_del(st, cfqg);
1235         cfqg->saved_workload_slice = 0;
1236         cfq_blkiocg_update_dequeue_stats(cfqg_to_blkg(cfqg),
1237                                          &blkio_policy_cfq, 1);
1238 }
1239
1240 static inline unsigned int cfq_cfqq_slice_usage(struct cfq_queue *cfqq,
1241                                                 unsigned int *unaccounted_time)
1242 {
1243         unsigned int slice_used;
1244
1245         /*
1246          * Queue got expired before even a single request completed or
1247          * got expired immediately after first request completion.
1248          */
1249         if (!cfqq->slice_start || cfqq->slice_start == jiffies) {
1250                 /*
1251                  * Also charge the seek time incurred to the group, otherwise
1252                  * if there are mutiple queues in the group, each can dispatch
1253                  * a single request on seeky media and cause lots of seek time
1254                  * and group will never know it.
1255                  */
1256                 slice_used = max_t(unsigned, (jiffies - cfqq->dispatch_start),
1257                                         1);
1258         } else {
1259                 slice_used = jiffies - cfqq->slice_start;
1260                 if (slice_used > cfqq->allocated_slice) {
1261                         *unaccounted_time = slice_used - cfqq->allocated_slice;
1262                         slice_used = cfqq->allocated_slice;
1263                 }
1264                 if (time_after(cfqq->slice_start, cfqq->dispatch_start))
1265                         *unaccounted_time += cfqq->slice_start -
1266                                         cfqq->dispatch_start;
1267         }
1268
1269         return slice_used;
1270 }
1271
1272 static void cfq_group_served(struct cfq_data *cfqd, struct cfq_group *cfqg,
1273                                 struct cfq_queue *cfqq)
1274 {
1275         struct cfq_rb_root *st = &cfqd->grp_service_tree;
1276         unsigned int used_sl, charge, unaccounted_sl = 0;
1277         int nr_sync = cfqg->nr_cfqq - cfqg_busy_async_queues(cfqd, cfqg)
1278                         - cfqg->service_tree_idle.count;
1279
1280         BUG_ON(nr_sync < 0);
1281         used_sl = charge = cfq_cfqq_slice_usage(cfqq, &unaccounted_sl);
1282
1283         if (iops_mode(cfqd))
1284                 charge = cfqq->slice_dispatch;
1285         else if (!cfq_cfqq_sync(cfqq) && !nr_sync)
1286                 charge = cfqq->allocated_slice;
1287
1288         /* Can't update vdisktime while group is on service tree */
1289         cfq_group_service_tree_del(st, cfqg);
1290         cfqg->vdisktime += cfq_scale_slice(charge, cfqg);
1291         /* If a new weight was requested, update now, off tree */
1292         cfq_group_service_tree_add(st, cfqg);
1293
1294         /* This group is being expired. Save the context */
1295         if (time_after(cfqd->workload_expires, jiffies)) {
1296                 cfqg->saved_workload_slice = cfqd->workload_expires
1297                                                 - jiffies;
1298                 cfqg->saved_workload = cfqd->serving_type;
1299                 cfqg->saved_serving_prio = cfqd->serving_prio;
1300         } else
1301                 cfqg->saved_workload_slice = 0;
1302
1303         cfq_log_cfqg(cfqd, cfqg, "served: vt=%llu min_vt=%llu", cfqg->vdisktime,
1304                                         st->min_vdisktime);
1305         cfq_log_cfqq(cfqq->cfqd, cfqq,
1306                      "sl_used=%u disp=%u charge=%u iops=%u sect=%lu",
1307                      used_sl, cfqq->slice_dispatch, charge,
1308                      iops_mode(cfqd), cfqq->nr_sectors);
1309         cfq_blkiocg_update_timeslice_used(cfqg_to_blkg(cfqg), &blkio_policy_cfq,
1310                                           used_sl, unaccounted_sl);
1311         cfq_blkiocg_set_start_empty_time(cfqg_to_blkg(cfqg), &blkio_policy_cfq);
1312 }
1313
1314 /**
1315  * cfq_init_cfqg_base - initialize base part of a cfq_group
1316  * @cfqg: cfq_group to initialize
1317  *
1318  * Initialize the base part which is used whether %CONFIG_CFQ_GROUP_IOSCHED
1319  * is enabled or not.
1320  */
1321 static void cfq_init_cfqg_base(struct cfq_group *cfqg)
1322 {
1323         struct cfq_rb_root *st;
1324         int i, j;
1325
1326         for_each_cfqg_st(cfqg, i, j, st)
1327                 *st = CFQ_RB_ROOT;
1328         RB_CLEAR_NODE(&cfqg->rb_node);
1329
1330         cfqg->ttime.last_end_request = jiffies;
1331 }
1332
1333 #ifdef CONFIG_CFQ_GROUP_IOSCHED
1334 static void cfq_update_blkio_group_weight(struct blkio_group *blkg,
1335                                           unsigned int weight)
1336 {
1337         struct cfq_group *cfqg = blkg_to_cfqg(blkg);
1338
1339         cfqg->new_weight = weight;
1340         cfqg->needs_update = true;
1341 }
1342
1343 static void cfq_init_blkio_group(struct blkio_group *blkg)
1344 {
1345         struct cfq_group *cfqg = blkg_to_cfqg(blkg);
1346
1347         cfq_init_cfqg_base(cfqg);
1348         cfqg->weight = blkg->blkcg->weight;
1349 }
1350
1351 /*
1352  * Search for the cfq group current task belongs to. request_queue lock must
1353  * be held.
1354  */
1355 static struct cfq_group *cfq_lookup_create_cfqg(struct cfq_data *cfqd,
1356                                                 struct blkio_cgroup *blkcg)
1357 {
1358         struct request_queue *q = cfqd->queue;
1359         struct cfq_group *cfqg = NULL;
1360
1361         /* avoid lookup for the common case where there's no blkio cgroup */
1362         if (blkcg == &blkio_root_cgroup) {
1363                 cfqg = cfqd->root_group;
1364         } else {
1365                 struct blkio_group *blkg;
1366
1367                 blkg = blkg_lookup_create(blkcg, q, false);
1368                 if (!IS_ERR(blkg))
1369                         cfqg = blkg_to_cfqg(blkg);
1370         }
1371
1372         return cfqg;
1373 }
1374
1375 static void cfq_link_cfqq_cfqg(struct cfq_queue *cfqq, struct cfq_group *cfqg)
1376 {
1377         /* Currently, all async queues are mapped to root group */
1378         if (!cfq_cfqq_sync(cfqq))
1379                 cfqg = cfqq->cfqd->root_group;
1380
1381         cfqq->cfqg = cfqg;
1382         /* cfqq reference on cfqg */
1383         cfqg_get(cfqg);
1384 }
1385
1386 static u64 blkg_prfill_weight_device(struct seq_file *sf,
1387                                      struct blkg_policy_data *pd, int off)
1388 {
1389         if (!pd->conf.weight)
1390                 return 0;
1391         return __blkg_prfill_u64(sf, pd, pd->conf.weight);
1392 }
1393
1394 static int blkcg_print_weight_device(struct cgroup *cgrp, struct cftype *cft,
1395                                      struct seq_file *sf)
1396 {
1397         blkcg_print_blkgs(sf, cgroup_to_blkio_cgroup(cgrp),
1398                           blkg_prfill_weight_device, BLKIO_POLICY_PROP, 0,
1399                           false);
1400         return 0;
1401 }
1402
1403 static int blkcg_print_weight(struct cgroup *cgrp, struct cftype *cft,
1404                               struct seq_file *sf)
1405 {
1406         seq_printf(sf, "%u\n", cgroup_to_blkio_cgroup(cgrp)->weight);
1407         return 0;
1408 }
1409
1410 static int blkcg_set_weight_device(struct cgroup *cgrp, struct cftype *cft,
1411                                    const char *buf)
1412 {
1413         struct blkio_cgroup *blkcg = cgroup_to_blkio_cgroup(cgrp);
1414         struct blkg_policy_data *pd;
1415         struct blkg_conf_ctx ctx;
1416         int ret;
1417
1418         ret = blkg_conf_prep(blkcg, buf, &ctx);
1419         if (ret)
1420                 return ret;
1421
1422         ret = -EINVAL;
1423         pd = ctx.blkg->pd[BLKIO_POLICY_PROP];
1424         if (pd && (!ctx.v || (ctx.v >= BLKIO_WEIGHT_MIN &&
1425                               ctx.v <= BLKIO_WEIGHT_MAX))) {
1426                 pd->conf.weight = ctx.v;
1427                 cfq_update_blkio_group_weight(ctx.blkg, ctx.v ?: blkcg->weight);
1428                 ret = 0;
1429         }
1430
1431         blkg_conf_finish(&ctx);
1432         return ret;
1433 }
1434
1435 static int blkcg_set_weight(struct cgroup *cgrp, struct cftype *cft, u64 val)
1436 {
1437         struct blkio_cgroup *blkcg = cgroup_to_blkio_cgroup(cgrp);
1438         struct blkio_group *blkg;
1439         struct hlist_node *n;
1440
1441         if (val < BLKIO_WEIGHT_MIN || val > BLKIO_WEIGHT_MAX)
1442                 return -EINVAL;
1443
1444         spin_lock_irq(&blkcg->lock);
1445         blkcg->weight = (unsigned int)val;
1446
1447         hlist_for_each_entry(blkg, n, &blkcg->blkg_list, blkcg_node) {
1448                 struct blkg_policy_data *pd = blkg->pd[BLKIO_POLICY_PROP];
1449
1450                 if (pd && !pd->conf.weight)
1451                         cfq_update_blkio_group_weight(blkg, blkcg->weight);
1452         }
1453
1454         spin_unlock_irq(&blkcg->lock);
1455         return 0;
1456 }
1457
1458 #ifdef CONFIG_DEBUG_BLK_CGROUP
1459 static u64 blkg_prfill_avg_queue_size(struct seq_file *sf,
1460                                       struct blkg_policy_data *pd, int off)
1461 {
1462         u64 samples = blkg_stat_read(&pd->stats.avg_queue_size_samples);
1463         u64 v = 0;
1464
1465         if (samples) {
1466                 v = blkg_stat_read(&pd->stats.avg_queue_size_sum);
1467                 do_div(v, samples);
1468         }
1469         __blkg_prfill_u64(sf, pd, v);
1470         return 0;
1471 }
1472
1473 /* print avg_queue_size */
1474 static int blkcg_print_avg_queue_size(struct cgroup *cgrp, struct cftype *cft,
1475                                       struct seq_file *sf)
1476 {
1477         struct blkio_cgroup *blkcg = cgroup_to_blkio_cgroup(cgrp);
1478
1479         blkcg_print_blkgs(sf, blkcg, blkg_prfill_avg_queue_size,
1480                           BLKIO_POLICY_PROP, 0, false);
1481         return 0;
1482 }
1483 #endif  /* CONFIG_DEBUG_BLK_CGROUP */
1484
1485 static struct cftype cfq_blkcg_files[] = {
1486         {
1487                 .name = "weight_device",
1488                 .read_seq_string = blkcg_print_weight_device,
1489                 .write_string = blkcg_set_weight_device,
1490                 .max_write_len = 256,
1491         },
1492         {
1493                 .name = "weight",
1494                 .read_seq_string = blkcg_print_weight,
1495                 .write_u64 = blkcg_set_weight,
1496         },
1497         {
1498                 .name = "time",
1499                 .private = BLKCG_STAT_PRIV(BLKIO_POLICY_PROP,
1500                                 offsetof(struct blkio_group_stats, time)),
1501                 .read_seq_string = blkcg_print_stat,
1502         },
1503         {
1504                 .name = "sectors",
1505                 .private = BLKCG_STAT_PRIV(BLKIO_POLICY_PROP,
1506                                 offsetof(struct blkio_group_stats, sectors)),
1507                 .read_seq_string = blkcg_print_stat,
1508         },
1509         {
1510                 .name = "io_service_bytes",
1511                 .private = BLKCG_STAT_PRIV(BLKIO_POLICY_PROP,
1512                                 offsetof(struct blkio_group_stats, service_bytes)),
1513                 .read_seq_string = blkcg_print_rwstat,
1514         },
1515         {
1516                 .name = "io_serviced",
1517                 .private = BLKCG_STAT_PRIV(BLKIO_POLICY_PROP,
1518                                 offsetof(struct blkio_group_stats, serviced)),
1519                 .read_seq_string = blkcg_print_rwstat,
1520         },
1521         {
1522                 .name = "io_service_time",
1523                 .private = BLKCG_STAT_PRIV(BLKIO_POLICY_PROP,
1524                                 offsetof(struct blkio_group_stats, service_time)),
1525                 .read_seq_string = blkcg_print_rwstat,
1526         },
1527         {
1528                 .name = "io_wait_time",
1529                 .private = BLKCG_STAT_PRIV(BLKIO_POLICY_PROP,
1530                                 offsetof(struct blkio_group_stats, wait_time)),
1531                 .read_seq_string = blkcg_print_rwstat,
1532         },
1533         {
1534                 .name = "io_merged",
1535                 .private = BLKCG_STAT_PRIV(BLKIO_POLICY_PROP,
1536                                 offsetof(struct blkio_group_stats, merged)),
1537                 .read_seq_string = blkcg_print_rwstat,
1538         },
1539         {
1540                 .name = "io_queued",
1541                 .private = BLKCG_STAT_PRIV(BLKIO_POLICY_PROP,
1542                                 offsetof(struct blkio_group_stats, queued)),
1543                 .read_seq_string = blkcg_print_rwstat,
1544         },
1545 #ifdef CONFIG_DEBUG_BLK_CGROUP
1546         {
1547                 .name = "avg_queue_size",
1548                 .read_seq_string = blkcg_print_avg_queue_size,
1549         },
1550         {
1551                 .name = "group_wait_time",
1552                 .private = BLKCG_STAT_PRIV(BLKIO_POLICY_PROP,
1553                                 offsetof(struct blkio_group_stats, group_wait_time)),
1554                 .read_seq_string = blkcg_print_stat,
1555         },
1556         {
1557                 .name = "idle_time",
1558                 .private = BLKCG_STAT_PRIV(BLKIO_POLICY_PROP,
1559                                 offsetof(struct blkio_group_stats, idle_time)),
1560                 .read_seq_string = blkcg_print_stat,
1561         },
1562         {
1563                 .name = "empty_time",
1564                 .private = BLKCG_STAT_PRIV(BLKIO_POLICY_PROP,
1565                                 offsetof(struct blkio_group_stats, empty_time)),
1566                 .read_seq_string = blkcg_print_stat,
1567         },
1568         {
1569                 .name = "dequeue",
1570                 .private = BLKCG_STAT_PRIV(BLKIO_POLICY_PROP,
1571                                 offsetof(struct blkio_group_stats, dequeue)),
1572                 .read_seq_string = blkcg_print_stat,
1573         },
1574         {
1575                 .name = "unaccounted_time",
1576                 .private = BLKCG_STAT_PRIV(BLKIO_POLICY_PROP,
1577                                 offsetof(struct blkio_group_stats, unaccounted_time)),
1578                 .read_seq_string = blkcg_print_stat,
1579         },
1580 #endif  /* CONFIG_DEBUG_BLK_CGROUP */
1581         { }     /* terminate */
1582 };
1583 #else /* GROUP_IOSCHED */
1584 static struct cfq_group *cfq_lookup_create_cfqg(struct cfq_data *cfqd,
1585                                                 struct blkio_cgroup *blkcg)
1586 {
1587         return cfqd->root_group;
1588 }
1589
1590 static inline void
1591 cfq_link_cfqq_cfqg(struct cfq_queue *cfqq, struct cfq_group *cfqg) {
1592         cfqq->cfqg = cfqg;
1593 }
1594
1595 #endif /* GROUP_IOSCHED */
1596
1597 /*
1598  * The cfqd->service_trees holds all pending cfq_queue's that have
1599  * requests waiting to be processed. It is sorted in the order that
1600  * we will service the queues.
1601  */
1602 static void cfq_service_tree_add(struct cfq_data *cfqd, struct cfq_queue *cfqq,
1603                                  bool add_front)
1604 {
1605         struct rb_node **p, *parent;
1606         struct cfq_queue *__cfqq;
1607         unsigned long rb_key;
1608         struct cfq_rb_root *service_tree;
1609         int left;
1610         int new_cfqq = 1;
1611
1612         service_tree = service_tree_for(cfqq->cfqg, cfqq_prio(cfqq),
1613                                                 cfqq_type(cfqq));
1614         if (cfq_class_idle(cfqq)) {
1615                 rb_key = CFQ_IDLE_DELAY;
1616                 parent = rb_last(&service_tree->rb);
1617                 if (parent && parent != &cfqq->rb_node) {
1618                         __cfqq = rb_entry(parent, struct cfq_queue, rb_node);
1619                         rb_key += __cfqq->rb_key;
1620                 } else
1621                         rb_key += jiffies;
1622         } else if (!add_front) {
1623                 /*
1624                  * Get our rb key offset. Subtract any residual slice
1625                  * value carried from last service. A negative resid
1626                  * count indicates slice overrun, and this should position
1627                  * the next service time further away in the tree.
1628                  */
1629                 rb_key = cfq_slice_offset(cfqd, cfqq) + jiffies;
1630                 rb_key -= cfqq->slice_resid;
1631                 cfqq->slice_resid = 0;
1632         } else {
1633                 rb_key = -HZ;
1634                 __cfqq = cfq_rb_first(service_tree);
1635                 rb_key += __cfqq ? __cfqq->rb_key : jiffies;
1636         }
1637
1638         if (!RB_EMPTY_NODE(&cfqq->rb_node)) {
1639                 new_cfqq = 0;
1640                 /*
1641                  * same position, nothing more to do
1642                  */
1643                 if (rb_key == cfqq->rb_key &&
1644                     cfqq->service_tree == service_tree)
1645                         return;
1646
1647                 cfq_rb_erase(&cfqq->rb_node, cfqq->service_tree);
1648                 cfqq->service_tree = NULL;
1649         }
1650
1651         left = 1;
1652         parent = NULL;
1653         cfqq->service_tree = service_tree;
1654         p = &service_tree->rb.rb_node;
1655         while (*p) {
1656                 struct rb_node **n;
1657
1658                 parent = *p;
1659                 __cfqq = rb_entry(parent, struct cfq_queue, rb_node);
1660
1661                 /*
1662                  * sort by key, that represents service time.
1663                  */
1664                 if (time_before(rb_key, __cfqq->rb_key))
1665                         n = &(*p)->rb_left;
1666                 else {
1667                         n = &(*p)->rb_right;
1668                         left = 0;
1669                 }
1670
1671                 p = n;
1672         }
1673
1674         if (left)
1675                 service_tree->left = &cfqq->rb_node;
1676
1677         cfqq->rb_key = rb_key;
1678         rb_link_node(&cfqq->rb_node, parent, p);
1679         rb_insert_color(&cfqq->rb_node, &service_tree->rb);
1680         service_tree->count++;
1681         if (add_front || !new_cfqq)
1682                 return;
1683         cfq_group_notify_queue_add(cfqd, cfqq->cfqg);
1684 }
1685
1686 static struct cfq_queue *
1687 cfq_prio_tree_lookup(struct cfq_data *cfqd, struct rb_root *root,
1688                      sector_t sector, struct rb_node **ret_parent,
1689                      struct rb_node ***rb_link)
1690 {
1691         struct rb_node **p, *parent;
1692         struct cfq_queue *cfqq = NULL;
1693
1694         parent = NULL;
1695         p = &root->rb_node;
1696         while (*p) {
1697                 struct rb_node **n;
1698
1699                 parent = *p;
1700                 cfqq = rb_entry(parent, struct cfq_queue, p_node);
1701
1702                 /*
1703                  * Sort strictly based on sector.  Smallest to the left,
1704                  * largest to the right.
1705                  */
1706                 if (sector > blk_rq_pos(cfqq->next_rq))
1707                         n = &(*p)->rb_right;
1708                 else if (sector < blk_rq_pos(cfqq->next_rq))
1709                         n = &(*p)->rb_left;
1710                 else
1711                         break;
1712                 p = n;
1713                 cfqq = NULL;
1714         }
1715
1716         *ret_parent = parent;
1717         if (rb_link)
1718                 *rb_link = p;
1719         return cfqq;
1720 }
1721
1722 static void cfq_prio_tree_add(struct cfq_data *cfqd, struct cfq_queue *cfqq)
1723 {
1724         struct rb_node **p, *parent;
1725         struct cfq_queue *__cfqq;
1726
1727         if (cfqq->p_root) {
1728                 rb_erase(&cfqq->p_node, cfqq->p_root);
1729                 cfqq->p_root = NULL;
1730         }
1731
1732         if (cfq_class_idle(cfqq))
1733                 return;
1734         if (!cfqq->next_rq)
1735                 return;
1736
1737         cfqq->p_root = &cfqd->prio_trees[cfqq->org_ioprio];
1738         __cfqq = cfq_prio_tree_lookup(cfqd, cfqq->p_root,
1739                                       blk_rq_pos(cfqq->next_rq), &parent, &p);
1740         if (!__cfqq) {
1741                 rb_link_node(&cfqq->p_node, parent, p);
1742                 rb_insert_color(&cfqq->p_node, cfqq->p_root);
1743         } else
1744                 cfqq->p_root = NULL;
1745 }
1746
1747 /*
1748  * Update cfqq's position in the service tree.
1749  */
1750 static void cfq_resort_rr_list(struct cfq_data *cfqd, struct cfq_queue *cfqq)
1751 {
1752         /*
1753          * Resorting requires the cfqq to be on the RR list already.
1754          */
1755         if (cfq_cfqq_on_rr(cfqq)) {
1756                 cfq_service_tree_add(cfqd, cfqq, 0);
1757                 cfq_prio_tree_add(cfqd, cfqq);
1758         }
1759 }
1760
1761 /*
1762  * add to busy list of queues for service, trying to be fair in ordering
1763  * the pending list according to last request service
1764  */
1765 static void cfq_add_cfqq_rr(struct cfq_data *cfqd, struct cfq_queue *cfqq)
1766 {
1767         cfq_log_cfqq(cfqd, cfqq, "add_to_rr");
1768         BUG_ON(cfq_cfqq_on_rr(cfqq));
1769         cfq_mark_cfqq_on_rr(cfqq);
1770         cfqd->busy_queues++;
1771         if (cfq_cfqq_sync(cfqq))
1772                 cfqd->busy_sync_queues++;
1773
1774         cfq_resort_rr_list(cfqd, cfqq);
1775 }
1776
1777 /*
1778  * Called when the cfqq no longer has requests pending, remove it from
1779  * the service tree.
1780  */
1781 static void cfq_del_cfqq_rr(struct cfq_data *cfqd, struct cfq_queue *cfqq)
1782 {
1783         cfq_log_cfqq(cfqd, cfqq, "del_from_rr");
1784         BUG_ON(!cfq_cfqq_on_rr(cfqq));
1785         cfq_clear_cfqq_on_rr(cfqq);
1786
1787         if (!RB_EMPTY_NODE(&cfqq->rb_node)) {
1788                 cfq_rb_erase(&cfqq->rb_node, cfqq->service_tree);
1789                 cfqq->service_tree = NULL;
1790         }
1791         if (cfqq->p_root) {
1792                 rb_erase(&cfqq->p_node, cfqq->p_root);
1793                 cfqq->p_root = NULL;
1794         }
1795
1796         cfq_group_notify_queue_del(cfqd, cfqq->cfqg);
1797         BUG_ON(!cfqd->busy_queues);
1798         cfqd->busy_queues--;
1799         if (cfq_cfqq_sync(cfqq))
1800                 cfqd->busy_sync_queues--;
1801 }
1802
1803 /*
1804  * rb tree support functions
1805  */
1806 static void cfq_del_rq_rb(struct request *rq)
1807 {
1808         struct cfq_queue *cfqq = RQ_CFQQ(rq);
1809         const int sync = rq_is_sync(rq);
1810
1811         BUG_ON(!cfqq->queued[sync]);
1812         cfqq->queued[sync]--;
1813
1814         elv_rb_del(&cfqq->sort_list, rq);
1815
1816         if (cfq_cfqq_on_rr(cfqq) && RB_EMPTY_ROOT(&cfqq->sort_list)) {
1817                 /*
1818                  * Queue will be deleted from service tree when we actually
1819                  * expire it later. Right now just remove it from prio tree
1820                  * as it is empty.
1821                  */
1822                 if (cfqq->p_root) {
1823                         rb_erase(&cfqq->p_node, cfqq->p_root);
1824                         cfqq->p_root = NULL;
1825                 }
1826         }
1827 }
1828
1829 static void cfq_add_rq_rb(struct request *rq)
1830 {
1831         struct cfq_queue *cfqq = RQ_CFQQ(rq);
1832         struct cfq_data *cfqd = cfqq->cfqd;
1833         struct request *prev;
1834
1835         cfqq->queued[rq_is_sync(rq)]++;
1836
1837         elv_rb_add(&cfqq->sort_list, rq);
1838
1839         if (!cfq_cfqq_on_rr(cfqq))
1840                 cfq_add_cfqq_rr(cfqd, cfqq);
1841
1842         /*
1843          * check if this request is a better next-serve candidate
1844          */
1845         prev = cfqq->next_rq;
1846         cfqq->next_rq = cfq_choose_req(cfqd, cfqq->next_rq, rq, cfqd->last_position);
1847
1848         /*
1849          * adjust priority tree position, if ->next_rq changes
1850          */
1851         if (prev != cfqq->next_rq)
1852                 cfq_prio_tree_add(cfqd, cfqq);
1853
1854         BUG_ON(!cfqq->next_rq);
1855 }
1856
1857 static void cfq_reposition_rq_rb(struct cfq_queue *cfqq, struct request *rq)
1858 {
1859         elv_rb_del(&cfqq->sort_list, rq);
1860         cfqq->queued[rq_is_sync(rq)]--;
1861         cfq_blkiocg_update_io_remove_stats(cfqg_to_blkg(RQ_CFQG(rq)),
1862                                            &blkio_policy_cfq, rq_data_dir(rq),
1863                                            rq_is_sync(rq));
1864         cfq_add_rq_rb(rq);
1865         cfq_blkiocg_update_io_add_stats(cfqg_to_blkg(RQ_CFQG(rq)),
1866                                         &blkio_policy_cfq,
1867                                         cfqg_to_blkg(cfqq->cfqd->serving_group),
1868                                         rq_data_dir(rq), rq_is_sync(rq));
1869 }
1870
1871 static struct request *
1872 cfq_find_rq_fmerge(struct cfq_data *cfqd, struct bio *bio)
1873 {
1874         struct task_struct *tsk = current;
1875         struct cfq_io_cq *cic;
1876         struct cfq_queue *cfqq;
1877
1878         cic = cfq_cic_lookup(cfqd, tsk->io_context);
1879         if (!cic)
1880                 return NULL;
1881
1882         cfqq = cic_to_cfqq(cic, cfq_bio_sync(bio));
1883         if (cfqq) {
1884                 sector_t sector = bio->bi_sector + bio_sectors(bio);
1885
1886                 return elv_rb_find(&cfqq->sort_list, sector);
1887         }
1888
1889         return NULL;
1890 }
1891
1892 static void cfq_activate_request(struct request_queue *q, struct request *rq)
1893 {
1894         struct cfq_data *cfqd = q->elevator->elevator_data;
1895
1896         cfqd->rq_in_driver++;
1897         cfq_log_cfqq(cfqd, RQ_CFQQ(rq), "activate rq, drv=%d",
1898                                                 cfqd->rq_in_driver);
1899
1900         cfqd->last_position = blk_rq_pos(rq) + blk_rq_sectors(rq);
1901 }
1902
1903 static void cfq_deactivate_request(struct request_queue *q, struct request *rq)
1904 {
1905         struct cfq_data *cfqd = q->elevator->elevator_data;
1906
1907         WARN_ON(!cfqd->rq_in_driver);
1908         cfqd->rq_in_driver--;
1909         cfq_log_cfqq(cfqd, RQ_CFQQ(rq), "deactivate rq, drv=%d",
1910                                                 cfqd->rq_in_driver);
1911 }
1912
1913 static void cfq_remove_request(struct request *rq)
1914 {
1915         struct cfq_queue *cfqq = RQ_CFQQ(rq);
1916
1917         if (cfqq->next_rq == rq)
1918                 cfqq->next_rq = cfq_find_next_rq(cfqq->cfqd, cfqq, rq);
1919
1920         list_del_init(&rq->queuelist);
1921         cfq_del_rq_rb(rq);
1922
1923         cfqq->cfqd->rq_queued--;
1924         cfq_blkiocg_update_io_remove_stats(cfqg_to_blkg(RQ_CFQG(rq)),
1925                                            &blkio_policy_cfq, rq_data_dir(rq),
1926                                            rq_is_sync(rq));
1927         if (rq->cmd_flags & REQ_PRIO) {
1928                 WARN_ON(!cfqq->prio_pending);
1929                 cfqq->prio_pending--;
1930         }
1931 }
1932
1933 static int cfq_merge(struct request_queue *q, struct request **req,
1934                      struct bio *bio)
1935 {
1936         struct cfq_data *cfqd = q->elevator->elevator_data;
1937         struct request *__rq;
1938
1939         __rq = cfq_find_rq_fmerge(cfqd, bio);
1940         if (__rq && elv_rq_merge_ok(__rq, bio)) {
1941                 *req = __rq;
1942                 return ELEVATOR_FRONT_MERGE;
1943         }
1944
1945         return ELEVATOR_NO_MERGE;
1946 }
1947
1948 static void cfq_merged_request(struct request_queue *q, struct request *req,
1949                                int type)
1950 {
1951         if (type == ELEVATOR_FRONT_MERGE) {
1952                 struct cfq_queue *cfqq = RQ_CFQQ(req);
1953
1954                 cfq_reposition_rq_rb(cfqq, req);
1955         }
1956 }
1957
1958 static void cfq_bio_merged(struct request_queue *q, struct request *req,
1959                                 struct bio *bio)
1960 {
1961         cfq_blkiocg_update_io_merged_stats(cfqg_to_blkg(RQ_CFQG(req)),
1962                                            &blkio_policy_cfq, bio_data_dir(bio),
1963                                            cfq_bio_sync(bio));
1964 }
1965
1966 static void
1967 cfq_merged_requests(struct request_queue *q, struct request *rq,
1968                     struct request *next)
1969 {
1970         struct cfq_queue *cfqq = RQ_CFQQ(rq);
1971         struct cfq_data *cfqd = q->elevator->elevator_data;
1972
1973         /*
1974          * reposition in fifo if next is older than rq
1975          */
1976         if (!list_empty(&rq->queuelist) && !list_empty(&next->queuelist) &&
1977             time_before(rq_fifo_time(next), rq_fifo_time(rq))) {
1978                 list_move(&rq->queuelist, &next->queuelist);
1979                 rq_set_fifo_time(rq, rq_fifo_time(next));
1980         }
1981
1982         if (cfqq->next_rq == next)
1983                 cfqq->next_rq = rq;
1984         cfq_remove_request(next);
1985         cfq_blkiocg_update_io_merged_stats(cfqg_to_blkg(RQ_CFQG(rq)),
1986                                            &blkio_policy_cfq, rq_data_dir(next),
1987                                            rq_is_sync(next));
1988
1989         cfqq = RQ_CFQQ(next);
1990         /*
1991          * all requests of this queue are merged to other queues, delete it
1992          * from the service tree. If it's the active_queue,
1993          * cfq_dispatch_requests() will choose to expire it or do idle
1994          */
1995         if (cfq_cfqq_on_rr(cfqq) && RB_EMPTY_ROOT(&cfqq->sort_list) &&
1996             cfqq != cfqd->active_queue)
1997                 cfq_del_cfqq_rr(cfqd, cfqq);
1998 }
1999
2000 static int cfq_allow_merge(struct request_queue *q, struct request *rq,
2001                            struct bio *bio)
2002 {
2003         struct cfq_data *cfqd = q->elevator->elevator_data;
2004         struct cfq_io_cq *cic;
2005         struct cfq_queue *cfqq;
2006
2007         /*
2008          * Disallow merge of a sync bio into an async request.
2009          */
2010         if (cfq_bio_sync(bio) && !rq_is_sync(rq))
2011                 return false;
2012
2013         /*
2014          * Lookup the cfqq that this bio will be queued with and allow
2015          * merge only if rq is queued there.
2016          */
2017         cic = cfq_cic_lookup(cfqd, current->io_context);
2018         if (!cic)
2019                 return false;
2020
2021         cfqq = cic_to_cfqq(cic, cfq_bio_sync(bio));
2022         return cfqq == RQ_CFQQ(rq);
2023 }
2024
2025 static inline void cfq_del_timer(struct cfq_data *cfqd, struct cfq_queue *cfqq)
2026 {
2027         del_timer(&cfqd->idle_slice_timer);
2028         cfq_blkiocg_update_idle_time_stats(cfqg_to_blkg(cfqq->cfqg),
2029                                            &blkio_policy_cfq);
2030 }
2031
2032 static void __cfq_set_active_queue(struct cfq_data *cfqd,
2033                                    struct cfq_queue *cfqq)
2034 {
2035         if (cfqq) {
2036                 cfq_log_cfqq(cfqd, cfqq, "set_active wl_prio:%d wl_type:%d",
2037                                 cfqd->serving_prio, cfqd->serving_type);
2038                 cfq_blkiocg_update_avg_queue_size_stats(cfqg_to_blkg(cfqq->cfqg),
2039                                                         &blkio_policy_cfq);
2040                 cfqq->slice_start = 0;
2041                 cfqq->dispatch_start = jiffies;
2042                 cfqq->allocated_slice = 0;
2043                 cfqq->slice_end = 0;
2044                 cfqq->slice_dispatch = 0;
2045                 cfqq->nr_sectors = 0;
2046
2047                 cfq_clear_cfqq_wait_request(cfqq);
2048                 cfq_clear_cfqq_must_dispatch(cfqq);
2049                 cfq_clear_cfqq_must_alloc_slice(cfqq);
2050                 cfq_clear_cfqq_fifo_expire(cfqq);
2051                 cfq_mark_cfqq_slice_new(cfqq);
2052
2053                 cfq_del_timer(cfqd, cfqq);
2054         }
2055
2056         cfqd->active_queue = cfqq;
2057 }
2058
2059 /*
2060  * current cfqq expired its slice (or was too idle), select new one
2061  */
2062 static void
2063 __cfq_slice_expired(struct cfq_data *cfqd, struct cfq_queue *cfqq,
2064                     bool timed_out)
2065 {
2066         cfq_log_cfqq(cfqd, cfqq, "slice expired t=%d", timed_out);
2067
2068         if (cfq_cfqq_wait_request(cfqq))
2069                 cfq_del_timer(cfqd, cfqq);
2070
2071         cfq_clear_cfqq_wait_request(cfqq);
2072         cfq_clear_cfqq_wait_busy(cfqq);
2073
2074         /*
2075          * If this cfqq is shared between multiple processes, check to
2076          * make sure that those processes are still issuing I/Os within
2077          * the mean seek distance.  If not, it may be time to break the
2078          * queues apart again.
2079          */
2080         if (cfq_cfqq_coop(cfqq) && CFQQ_SEEKY(cfqq))
2081                 cfq_mark_cfqq_split_coop(cfqq);
2082
2083         /*
2084          * store what was left of this slice, if the queue idled/timed out
2085          */
2086         if (timed_out) {
2087                 if (cfq_cfqq_slice_new(cfqq))
2088                         cfqq->slice_resid = cfq_scaled_cfqq_slice(cfqd, cfqq);
2089                 else
2090                         cfqq->slice_resid = cfqq->slice_end - jiffies;
2091                 cfq_log_cfqq(cfqd, cfqq, "resid=%ld", cfqq->slice_resid);
2092         }
2093
2094         cfq_group_served(cfqd, cfqq->cfqg, cfqq);
2095
2096         if (cfq_cfqq_on_rr(cfqq) && RB_EMPTY_ROOT(&cfqq->sort_list))
2097                 cfq_del_cfqq_rr(cfqd, cfqq);
2098
2099         cfq_resort_rr_list(cfqd, cfqq);
2100
2101         if (cfqq == cfqd->active_queue)
2102                 cfqd->active_queue = NULL;
2103
2104         if (cfqd->active_cic) {
2105                 put_io_context(cfqd->active_cic->icq.ioc);
2106                 cfqd->active_cic = NULL;
2107         }
2108 }
2109
2110 static inline void cfq_slice_expired(struct cfq_data *cfqd, bool timed_out)
2111 {
2112         struct cfq_queue *cfqq = cfqd->active_queue;
2113
2114         if (cfqq)
2115                 __cfq_slice_expired(cfqd, cfqq, timed_out);
2116 }
2117
2118 /*
2119  * Get next queue for service. Unless we have a queue preemption,
2120  * we'll simply select the first cfqq in the service tree.
2121  */
2122 static struct cfq_queue *cfq_get_next_queue(struct cfq_data *cfqd)
2123 {
2124         struct cfq_rb_root *service_tree =
2125                 service_tree_for(cfqd->serving_group, cfqd->serving_prio,
2126                                         cfqd->serving_type);
2127
2128         if (!cfqd->rq_queued)
2129                 return NULL;
2130
2131         /* There is nothing to dispatch */
2132         if (!service_tree)
2133                 return NULL;
2134         if (RB_EMPTY_ROOT(&service_tree->rb))
2135                 return NULL;
2136         return cfq_rb_first(service_tree);
2137 }
2138
2139 static struct cfq_queue *cfq_get_next_queue_forced(struct cfq_data *cfqd)
2140 {
2141         struct cfq_group *cfqg;
2142         struct cfq_queue *cfqq;
2143         int i, j;
2144         struct cfq_rb_root *st;
2145
2146         if (!cfqd->rq_queued)
2147                 return NULL;
2148
2149         cfqg = cfq_get_next_cfqg(cfqd);
2150         if (!cfqg)
2151                 return NULL;
2152
2153         for_each_cfqg_st(cfqg, i, j, st)
2154                 if ((cfqq = cfq_rb_first(st)) != NULL)
2155                         return cfqq;
2156         return NULL;
2157 }
2158
2159 /*
2160  * Get and set a new active queue for service.
2161  */
2162 static struct cfq_queue *cfq_set_active_queue(struct cfq_data *cfqd,
2163                                               struct cfq_queue *cfqq)
2164 {
2165         if (!cfqq)
2166                 cfqq = cfq_get_next_queue(cfqd);
2167
2168         __cfq_set_active_queue(cfqd, cfqq);
2169         return cfqq;
2170 }
2171
2172 static inline sector_t cfq_dist_from_last(struct cfq_data *cfqd,
2173                                           struct request *rq)
2174 {
2175         if (blk_rq_pos(rq) >= cfqd->last_position)
2176                 return blk_rq_pos(rq) - cfqd->last_position;
2177         else
2178                 return cfqd->last_position - blk_rq_pos(rq);
2179 }
2180
2181 static inline int cfq_rq_close(struct cfq_data *cfqd, struct cfq_queue *cfqq,
2182                                struct request *rq)
2183 {
2184         return cfq_dist_from_last(cfqd, rq) <= CFQQ_CLOSE_THR;
2185 }
2186
2187 static struct cfq_queue *cfqq_close(struct cfq_data *cfqd,
2188                                     struct cfq_queue *cur_cfqq)
2189 {
2190         struct rb_root *root = &cfqd->prio_trees[cur_cfqq->org_ioprio];
2191         struct rb_node *parent, *node;
2192         struct cfq_queue *__cfqq;
2193         sector_t sector = cfqd->last_position;
2194
2195         if (RB_EMPTY_ROOT(root))
2196                 return NULL;
2197
2198         /*
2199          * First, if we find a request starting at the end of the last
2200          * request, choose it.
2201          */
2202         __cfqq = cfq_prio_tree_lookup(cfqd, root, sector, &parent, NULL);
2203         if (__cfqq)
2204                 return __cfqq;
2205
2206         /*
2207          * If the exact sector wasn't found, the parent of the NULL leaf
2208          * will contain the closest sector.
2209          */
2210         __cfqq = rb_entry(parent, struct cfq_queue, p_node);
2211         if (cfq_rq_close(cfqd, cur_cfqq, __cfqq->next_rq))
2212                 return __cfqq;
2213
2214         if (blk_rq_pos(__cfqq->next_rq) < sector)
2215                 node = rb_next(&__cfqq->p_node);
2216         else
2217                 node = rb_prev(&__cfqq->p_node);
2218         if (!node)
2219                 return NULL;
2220
2221         __cfqq = rb_entry(node, struct cfq_queue, p_node);
2222         if (cfq_rq_close(cfqd, cur_cfqq, __cfqq->next_rq))
2223                 return __cfqq;
2224
2225         return NULL;
2226 }
2227
2228 /*
2229  * cfqd - obvious
2230  * cur_cfqq - passed in so that we don't decide that the current queue is
2231  *            closely cooperating with itself.
2232  *
2233  * So, basically we're assuming that that cur_cfqq has dispatched at least
2234  * one request, and that cfqd->last_position reflects a position on the disk
2235  * associated with the I/O issued by cur_cfqq.  I'm not sure this is a valid
2236  * assumption.
2237  */
2238 static struct cfq_queue *cfq_close_cooperator(struct cfq_data *cfqd,
2239                                               struct cfq_queue *cur_cfqq)
2240 {
2241         struct cfq_queue *cfqq;
2242
2243         if (cfq_class_idle(cur_cfqq))
2244                 return NULL;
2245         if (!cfq_cfqq_sync(cur_cfqq))
2246                 return NULL;
2247         if (CFQQ_SEEKY(cur_cfqq))
2248                 return NULL;
2249
2250         /*
2251          * Don't search priority tree if it's the only queue in the group.
2252          */
2253         if (cur_cfqq->cfqg->nr_cfqq == 1)
2254                 return NULL;
2255
2256         /*
2257          * We should notice if some of the queues are cooperating, eg
2258          * working closely on the same area of the disk. In that case,
2259          * we can group them together and don't waste time idling.
2260          */
2261         cfqq = cfqq_close(cfqd, cur_cfqq);
2262         if (!cfqq)
2263                 return NULL;
2264
2265         /* If new queue belongs to different cfq_group, don't choose it */
2266         if (cur_cfqq->cfqg != cfqq->cfqg)
2267                 return NULL;
2268
2269         /*
2270          * It only makes sense to merge sync queues.
2271          */
2272         if (!cfq_cfqq_sync(cfqq))
2273                 return NULL;
2274         if (CFQQ_SEEKY(cfqq))
2275                 return NULL;
2276
2277         /*
2278          * Do not merge queues of different priority classes
2279          */
2280         if (cfq_class_rt(cfqq) != cfq_class_rt(cur_cfqq))
2281                 return NULL;
2282
2283         return cfqq;
2284 }
2285
2286 /*
2287  * Determine whether we should enforce idle window for this queue.
2288  */
2289
2290 static bool cfq_should_idle(struct cfq_data *cfqd, struct cfq_queue *cfqq)
2291 {
2292         enum wl_prio_t prio = cfqq_prio(cfqq);
2293         struct cfq_rb_root *service_tree = cfqq->service_tree;
2294
2295         BUG_ON(!service_tree);
2296         BUG_ON(!service_tree->count);
2297
2298         if (!cfqd->cfq_slice_idle)
2299                 return false;
2300
2301         /* We never do for idle class queues. */
2302         if (prio == IDLE_WORKLOAD)
2303                 return false;
2304
2305         /* We do for queues that were marked with idle window flag. */
2306         if (cfq_cfqq_idle_window(cfqq) &&
2307            !(blk_queue_nonrot(cfqd->queue) && cfqd->hw_tag))
2308                 return true;
2309
2310         /*
2311          * Otherwise, we do only if they are the last ones
2312          * in their service tree.
2313          */
2314         if (service_tree->count == 1 && cfq_cfqq_sync(cfqq) &&
2315            !cfq_io_thinktime_big(cfqd, &service_tree->ttime, false))
2316                 return true;
2317         cfq_log_cfqq(cfqd, cfqq, "Not idling. st->count:%d",
2318                         service_tree->count);
2319         return false;
2320 }
2321
2322 static void cfq_arm_slice_timer(struct cfq_data *cfqd)
2323 {
2324         struct cfq_queue *cfqq = cfqd->active_queue;
2325         struct cfq_io_cq *cic;
2326         unsigned long sl, group_idle = 0;
2327
2328         /*
2329          * SSD device without seek penalty, disable idling. But only do so
2330          * for devices that support queuing, otherwise we still have a problem
2331          * with sync vs async workloads.
2332          */
2333         if (blk_queue_nonrot(cfqd->queue) && cfqd->hw_tag)
2334                 return;
2335
2336         WARN_ON(!RB_EMPTY_ROOT(&cfqq->sort_list));
2337         WARN_ON(cfq_cfqq_slice_new(cfqq));
2338
2339         /*
2340          * idle is disabled, either manually or by past process history
2341          */
2342         if (!cfq_should_idle(cfqd, cfqq)) {
2343                 /* no queue idling. Check for group idling */
2344                 if (cfqd->cfq_group_idle)
2345                         group_idle = cfqd->cfq_group_idle;
2346                 else
2347                         return;
2348         }
2349
2350         /*
2351          * still active requests from this queue, don't idle
2352          */
2353         if (cfqq->dispatched)
2354                 return;
2355
2356         /*
2357          * task has exited, don't wait
2358          */
2359         cic = cfqd->active_cic;
2360         if (!cic || !atomic_read(&cic->icq.ioc->active_ref))
2361                 return;
2362
2363         /*
2364          * If our average think time is larger than the remaining time
2365          * slice, then don't idle. This avoids overrunning the allotted
2366          * time slice.
2367          */
2368         if (sample_valid(cic->ttime.ttime_samples) &&
2369             (cfqq->slice_end - jiffies < cic->ttime.ttime_mean)) {
2370                 cfq_log_cfqq(cfqd, cfqq, "Not idling. think_time:%lu",
2371                              cic->ttime.ttime_mean);
2372                 return;
2373         }
2374
2375         /* There are other queues in the group, don't do group idle */
2376         if (group_idle && cfqq->cfqg->nr_cfqq > 1)
2377                 return;
2378
2379         cfq_mark_cfqq_wait_request(cfqq);
2380
2381         if (group_idle)
2382                 sl = cfqd->cfq_group_idle;
2383         else
2384                 sl = cfqd->cfq_slice_idle;
2385
2386         mod_timer(&cfqd->idle_slice_timer, jiffies + sl);
2387         cfq_blkiocg_update_set_idle_time_stats(cfqg_to_blkg(cfqq->cfqg),
2388                                                &blkio_policy_cfq);
2389         cfq_log_cfqq(cfqd, cfqq, "arm_idle: %lu group_idle: %d", sl,
2390                         group_idle ? 1 : 0);
2391 }
2392
2393 /*
2394  * Move request from internal lists to the request queue dispatch list.
2395  */
2396 static void cfq_dispatch_insert(struct request_queue *q, struct request *rq)
2397 {
2398         struct cfq_data *cfqd = q->elevator->elevator_data;
2399         struct cfq_queue *cfqq = RQ_CFQQ(rq);
2400
2401         cfq_log_cfqq(cfqd, cfqq, "dispatch_insert");
2402
2403         cfqq->next_rq = cfq_find_next_rq(cfqd, cfqq, rq);
2404         cfq_remove_request(rq);
2405         cfqq->dispatched++;
2406         (RQ_CFQG(rq))->dispatched++;
2407         elv_dispatch_sort(q, rq);
2408
2409         cfqd->rq_in_flight[cfq_cfqq_sync(cfqq)]++;
2410         cfqq->nr_sectors += blk_rq_sectors(rq);
2411         cfq_blkiocg_update_dispatch_stats(cfqg_to_blkg(cfqq->cfqg),
2412                                           &blkio_policy_cfq, blk_rq_bytes(rq),
2413                                           rq_data_dir(rq), rq_is_sync(rq));
2414 }
2415
2416 /*
2417  * return expired entry, or NULL to just start from scratch in rbtree
2418  */
2419 static struct request *cfq_check_fifo(struct cfq_queue *cfqq)
2420 {
2421         struct request *rq = NULL;
2422
2423         if (cfq_cfqq_fifo_expire(cfqq))
2424                 return NULL;
2425
2426         cfq_mark_cfqq_fifo_expire(cfqq);
2427
2428         if (list_empty(&cfqq->fifo))
2429                 return NULL;
2430
2431         rq = rq_entry_fifo(cfqq->fifo.next);
2432         if (time_before(jiffies, rq_fifo_time(rq)))
2433                 rq = NULL;
2434
2435         cfq_log_cfqq(cfqq->cfqd, cfqq, "fifo=%p", rq);
2436         return rq;
2437 }
2438
2439 static inline int
2440 cfq_prio_to_maxrq(struct cfq_data *cfqd, struct cfq_queue *cfqq)
2441 {
2442         const int base_rq = cfqd->cfq_slice_async_rq;
2443
2444         WARN_ON(cfqq->ioprio >= IOPRIO_BE_NR);
2445
2446         return 2 * base_rq * (IOPRIO_BE_NR - cfqq->ioprio);
2447 }
2448
2449 /*
2450  * Must be called with the queue_lock held.
2451  */
2452 static int cfqq_process_refs(struct cfq_queue *cfqq)
2453 {
2454         int process_refs, io_refs;
2455
2456         io_refs = cfqq->allocated[READ] + cfqq->allocated[WRITE];
2457         process_refs = cfqq->ref - io_refs;
2458         BUG_ON(process_refs < 0);
2459         return process_refs;
2460 }
2461
2462 static void cfq_setup_merge(struct cfq_queue *cfqq, struct cfq_queue *new_cfqq)
2463 {
2464         int process_refs, new_process_refs;
2465         struct cfq_queue *__cfqq;
2466
2467         /*
2468          * If there are no process references on the new_cfqq, then it is
2469          * unsafe to follow the ->new_cfqq chain as other cfqq's in the
2470          * chain may have dropped their last reference (not just their
2471          * last process reference).
2472          */
2473         if (!cfqq_process_refs(new_cfqq))
2474                 return;
2475
2476         /* Avoid a circular list and skip interim queue merges */
2477         while ((__cfqq = new_cfqq->new_cfqq)) {
2478                 if (__cfqq == cfqq)
2479                         return;
2480                 new_cfqq = __cfqq;
2481         }
2482
2483         process_refs = cfqq_process_refs(cfqq);
2484         new_process_refs = cfqq_process_refs(new_cfqq);
2485         /*
2486          * If the process for the cfqq has gone away, there is no
2487          * sense in merging the queues.
2488          */
2489         if (process_refs == 0 || new_process_refs == 0)
2490                 return;
2491
2492         /*
2493          * Merge in the direction of the lesser amount of work.
2494          */
2495         if (new_process_refs >= process_refs) {
2496                 cfqq->new_cfqq = new_cfqq;
2497                 new_cfqq->ref += process_refs;
2498         } else {
2499                 new_cfqq->new_cfqq = cfqq;
2500                 cfqq->ref += new_process_refs;
2501         }
2502 }
2503
2504 static enum wl_type_t cfq_choose_wl(struct cfq_data *cfqd,
2505                                 struct cfq_group *cfqg, enum wl_prio_t prio)
2506 {
2507         struct cfq_queue *queue;
2508         int i;
2509         bool key_valid = false;
2510         unsigned long lowest_key = 0;
2511         enum wl_type_t cur_best = SYNC_NOIDLE_WORKLOAD;
2512
2513         for (i = 0; i <= SYNC_WORKLOAD; ++i) {
2514                 /* select the one with lowest rb_key */
2515                 queue = cfq_rb_first(service_tree_for(cfqg, prio, i));
2516                 if (queue &&
2517                     (!key_valid || time_before(queue->rb_key, lowest_key))) {
2518                         lowest_key = queue->rb_key;
2519                         cur_best = i;
2520                         key_valid = true;
2521                 }
2522         }
2523
2524         return cur_best;
2525 }
2526
2527 static void choose_service_tree(struct cfq_data *cfqd, struct cfq_group *cfqg)
2528 {
2529         unsigned slice;
2530         unsigned count;
2531         struct cfq_rb_root *st;
2532         unsigned group_slice;
2533         enum wl_prio_t original_prio = cfqd->serving_prio;
2534
2535         /* Choose next priority. RT > BE > IDLE */
2536         if (cfq_group_busy_queues_wl(RT_WORKLOAD, cfqd, cfqg))
2537                 cfqd->serving_prio = RT_WORKLOAD;
2538         else if (cfq_group_busy_queues_wl(BE_WORKLOAD, cfqd, cfqg))
2539                 cfqd->serving_prio = BE_WORKLOAD;
2540         else {
2541                 cfqd->serving_prio = IDLE_WORKLOAD;
2542                 cfqd->workload_expires = jiffies + 1;
2543                 return;
2544         }
2545
2546         if (original_prio != cfqd->serving_prio)
2547                 goto new_workload;
2548
2549         /*
2550          * For RT and BE, we have to choose also the type
2551          * (SYNC, SYNC_NOIDLE, ASYNC), and to compute a workload
2552          * expiration time
2553          */
2554         st = service_tree_for(cfqg, cfqd->serving_prio, cfqd->serving_type);
2555         count = st->count;
2556
2557         /*
2558          * check workload expiration, and that we still have other queues ready
2559          */
2560         if (count && !time_after(jiffies, cfqd->workload_expires))
2561                 return;
2562
2563 new_workload:
2564         /* otherwise select new workload type */
2565         cfqd->serving_type =
2566                 cfq_choose_wl(cfqd, cfqg, cfqd->serving_prio);
2567         st = service_tree_for(cfqg, cfqd->serving_prio, cfqd->serving_type);
2568         count = st->count;
2569
2570         /*
2571          * the workload slice is computed as a fraction of target latency
2572          * proportional to the number of queues in that workload, over
2573          * all the queues in the same priority class
2574          */
2575         group_slice = cfq_group_slice(cfqd, cfqg);
2576
2577         slice = group_slice * count /
2578                 max_t(unsigned, cfqg->busy_queues_avg[cfqd->serving_prio],
2579                       cfq_group_busy_queues_wl(cfqd->serving_prio, cfqd, cfqg));
2580
2581         if (cfqd->serving_type == ASYNC_WORKLOAD) {
2582                 unsigned int tmp;
2583
2584                 /*
2585                  * Async queues are currently system wide. Just taking
2586                  * proportion of queues with-in same group will lead to higher
2587                  * async ratio system wide as generally root group is going
2588                  * to have higher weight. A more accurate thing would be to
2589                  * calculate system wide asnc/sync ratio.
2590                  */
2591                 tmp = cfq_target_latency * cfqg_busy_async_queues(cfqd, cfqg);
2592                 tmp = tmp/cfqd->busy_queues;
2593                 slice = min_t(unsigned, slice, tmp);
2594
2595                 /* async workload slice is scaled down according to
2596                  * the sync/async slice ratio. */
2597                 slice = slice * cfqd->cfq_slice[0] / cfqd->cfq_slice[1];
2598         } else
2599                 /* sync workload slice is at least 2 * cfq_slice_idle */
2600                 slice = max(slice, 2 * cfqd->cfq_slice_idle);
2601
2602         slice = max_t(unsigned, slice, CFQ_MIN_TT);
2603         cfq_log(cfqd, "workload slice:%d", slice);
2604         cfqd->workload_expires = jiffies + slice;
2605 }
2606
2607 static struct cfq_group *cfq_get_next_cfqg(struct cfq_data *cfqd)
2608 {
2609         struct cfq_rb_root *st = &cfqd->grp_service_tree;
2610         struct cfq_group *cfqg;
2611
2612         if (RB_EMPTY_ROOT(&st->rb))
2613                 return NULL;
2614         cfqg = cfq_rb_first_group(st);
2615         update_min_vdisktime(st);
2616         return cfqg;
2617 }
2618
2619 static void cfq_choose_cfqg(struct cfq_data *cfqd)
2620 {
2621         struct cfq_group *cfqg = cfq_get_next_cfqg(cfqd);
2622
2623         cfqd->serving_group = cfqg;
2624
2625         /* Restore the workload type data */
2626         if (cfqg->saved_workload_slice) {
2627                 cfqd->workload_expires = jiffies + cfqg->saved_workload_slice;
2628                 cfqd->serving_type = cfqg->saved_workload;
2629                 cfqd->serving_prio = cfqg->saved_serving_prio;
2630         } else
2631                 cfqd->workload_expires = jiffies - 1;
2632
2633         choose_service_tree(cfqd, cfqg);
2634 }
2635
2636 /*
2637  * Select a queue for service. If we have a current active queue,
2638  * check whether to continue servicing it, or retrieve and set a new one.
2639  */
2640 static struct cfq_queue *cfq_select_queue(struct cfq_data *cfqd)
2641 {
2642         struct cfq_queue *cfqq, *new_cfqq = NULL;
2643
2644         cfqq = cfqd->active_queue;
2645         if (!cfqq)
2646                 goto new_queue;
2647
2648         if (!cfqd->rq_queued)
2649                 return NULL;
2650
2651         /*
2652          * We were waiting for group to get backlogged. Expire the queue
2653          */
2654         if (cfq_cfqq_wait_busy(cfqq) && !RB_EMPTY_ROOT(&cfqq->sort_list))
2655                 goto expire;
2656
2657         /*
2658          * The active queue has run out of time, expire it and select new.
2659          */
2660         if (cfq_slice_used(cfqq) && !cfq_cfqq_must_dispatch(cfqq)) {
2661                 /*
2662                  * If slice had not expired at the completion of last request
2663                  * we might not have turned on wait_busy flag. Don't expire
2664                  * the queue yet. Allow the group to get backlogged.
2665                  *
2666                  * The very fact that we have used the slice, that means we
2667                  * have been idling all along on this queue and it should be
2668                  * ok to wait for this request to complete.
2669                  */
2670                 if (cfqq->cfqg->nr_cfqq == 1 && RB_EMPTY_ROOT(&cfqq->sort_list)
2671                     && cfqq->dispatched && cfq_should_idle(cfqd, cfqq)) {
2672                         cfqq = NULL;
2673                         goto keep_queue;
2674                 } else
2675                         goto check_group_idle;
2676         }
2677
2678         /*
2679          * The active queue has requests and isn't expired, allow it to
2680          * dispatch.
2681          */
2682         if (!RB_EMPTY_ROOT(&cfqq->sort_list))
2683                 goto keep_queue;
2684
2685         /*
2686          * If another queue has a request waiting within our mean seek
2687          * distance, let it run.  The expire code will check for close
2688          * cooperators and put the close queue at the front of the service
2689          * tree.  If possible, merge the expiring queue with the new cfqq.
2690          */
2691         new_cfqq = cfq_close_cooperator(cfqd, cfqq);
2692         if (new_cfqq) {
2693                 if (!cfqq->new_cfqq)
2694                         cfq_setup_merge(cfqq, new_cfqq);
2695                 goto expire;
2696         }
2697
2698         /*
2699          * No requests pending. If the active queue still has requests in
2700          * flight or is idling for a new request, allow either of these
2701          * conditions to happen (or time out) before selecting a new queue.
2702          */
2703         if (timer_pending(&cfqd->idle_slice_timer)) {
2704                 cfqq = NULL;
2705                 goto keep_queue;
2706         }
2707
2708         /*
2709          * This is a deep seek queue, but the device is much faster than
2710          * the queue can deliver, don't idle
2711          **/
2712         if (CFQQ_SEEKY(cfqq) && cfq_cfqq_idle_window(cfqq) &&
2713             (cfq_cfqq_slice_new(cfqq) ||
2714             (cfqq->slice_end - jiffies > jiffies - cfqq->slice_start))) {
2715                 cfq_clear_cfqq_deep(cfqq);
2716                 cfq_clear_cfqq_idle_window(cfqq);
2717         }
2718
2719         if (cfqq->dispatched && cfq_should_idle(cfqd, cfqq)) {
2720                 cfqq = NULL;
2721                 goto keep_queue;
2722         }
2723
2724         /*
2725          * If group idle is enabled and there are requests dispatched from
2726          * this group, wait for requests to complete.
2727          */
2728 check_group_idle:
2729         if (cfqd->cfq_group_idle && cfqq->cfqg->nr_cfqq == 1 &&
2730             cfqq->cfqg->dispatched &&
2731             !cfq_io_thinktime_big(cfqd, &cfqq->cfqg->ttime, true)) {
2732                 cfqq = NULL;
2733                 goto keep_queue;
2734         }
2735
2736 expire:
2737         cfq_slice_expired(cfqd, 0);
2738 new_queue:
2739         /*
2740          * Current queue expired. Check if we have to switch to a new
2741          * service tree
2742          */
2743         if (!new_cfqq)
2744                 cfq_choose_cfqg(cfqd);
2745
2746         cfqq = cfq_set_active_queue(cfqd, new_cfqq);
2747 keep_queue:
2748         return cfqq;
2749 }
2750
2751 static int __cfq_forced_dispatch_cfqq(struct cfq_queue *cfqq)
2752 {
2753         int dispatched = 0;
2754
2755         while (cfqq->next_rq) {
2756                 cfq_dispatch_insert(cfqq->cfqd->queue, cfqq->next_rq);
2757                 dispatched++;
2758         }
2759
2760         BUG_ON(!list_empty(&cfqq->fifo));
2761
2762         /* By default cfqq is not expired if it is empty. Do it explicitly */
2763         __cfq_slice_expired(cfqq->cfqd, cfqq, 0);
2764         return dispatched;
2765 }
2766
2767 /*
2768  * Drain our current requests. Used for barriers and when switching
2769  * io schedulers on-the-fly.
2770  */
2771 static int cfq_forced_dispatch(struct cfq_data *cfqd)
2772 {
2773         struct cfq_queue *cfqq;
2774         int dispatched = 0;
2775
2776         /* Expire the timeslice of the current active queue first */
2777         cfq_slice_expired(cfqd, 0);
2778         while ((cfqq = cfq_get_next_queue_forced(cfqd)) != NULL) {
2779                 __cfq_set_active_queue(cfqd, cfqq);
2780                 dispatched += __cfq_forced_dispatch_cfqq(cfqq);
2781         }
2782
2783         BUG_ON(cfqd->busy_queues);
2784
2785         cfq_log(cfqd, "forced_dispatch=%d", dispatched);
2786         return dispatched;
2787 }
2788
2789 static inline bool cfq_slice_used_soon(struct cfq_data *cfqd,
2790         struct cfq_queue *cfqq)
2791 {
2792         /* the queue hasn't finished any request, can't estimate */
2793         if (cfq_cfqq_slice_new(cfqq))
2794                 return true;
2795         if (time_after(jiffies + cfqd->cfq_slice_idle * cfqq->dispatched,
2796                 cfqq->slice_end))
2797                 return true;
2798
2799         return false;
2800 }
2801
2802 static bool cfq_may_dispatch(struct cfq_data *cfqd, struct cfq_queue *cfqq)
2803 {
2804         unsigned int max_dispatch;
2805
2806         /*
2807          * Drain async requests before we start sync IO
2808          */
2809         if (cfq_should_idle(cfqd, cfqq) && cfqd->rq_in_flight[BLK_RW_ASYNC])
2810                 return false;
2811
2812         /*
2813          * If this is an async queue and we have sync IO in flight, let it wait
2814          */
2815         if (cfqd->rq_in_flight[BLK_RW_SYNC] && !cfq_cfqq_sync(cfqq))
2816                 return false;
2817
2818         max_dispatch = max_t(unsigned int, cfqd->cfq_quantum / 2, 1);
2819         if (cfq_class_idle(cfqq))
2820                 max_dispatch = 1;
2821
2822         /*
2823          * Does this cfqq already have too much IO in flight?
2824          */
2825         if (cfqq->dispatched >= max_dispatch) {
2826                 bool promote_sync = false;
2827                 /*
2828                  * idle queue must always only have a single IO in flight
2829                  */
2830                 if (cfq_class_idle(cfqq))
2831                         return false;
2832
2833                 /*
2834                  * If there is only one sync queue
2835                  * we can ignore async queue here and give the sync
2836                  * queue no dispatch limit. The reason is a sync queue can
2837                  * preempt async queue, limiting the sync queue doesn't make
2838                  * sense. This is useful for aiostress test.
2839                  */
2840                 if (cfq_cfqq_sync(cfqq) && cfqd->busy_sync_queues == 1)
2841                         promote_sync = true;
2842
2843                 /*
2844                  * We have other queues, don't allow more IO from this one
2845                  */
2846                 if (cfqd->busy_queues > 1 && cfq_slice_used_soon(cfqd, cfqq) &&
2847                                 !promote_sync)
2848                         return false;
2849
2850                 /*
2851                  * Sole queue user, no limit
2852                  */
2853                 if (cfqd->busy_queues == 1 || promote_sync)
2854                         max_dispatch = -1;
2855                 else
2856                         /*
2857                          * Normally we start throttling cfqq when cfq_quantum/2
2858                          * requests have been dispatched. But we can drive
2859                          * deeper queue depths at the beginning of slice
2860                          * subjected to upper limit of cfq_quantum.
2861                          * */
2862                         max_dispatch = cfqd->cfq_quantum;
2863         }
2864
2865         /*
2866          * Async queues must wait a bit before being allowed dispatch.
2867          * We also ramp up the dispatch depth gradually for async IO,
2868          * based on the last sync IO we serviced
2869          */
2870         if (!cfq_cfqq_sync(cfqq) && cfqd->cfq_latency) {
2871                 unsigned long last_sync = jiffies - cfqd->last_delayed_sync;
2872                 unsigned int depth;
2873
2874                 depth = last_sync / cfqd->cfq_slice[1];
2875                 if (!depth && !cfqq->dispatched)
2876                         depth = 1;
2877                 if (depth < max_dispatch)
2878                         max_dispatch = depth;
2879         }
2880
2881         /*
2882          * If we're below the current max, allow a dispatch
2883          */
2884         return cfqq->dispatched < max_dispatch;
2885 }
2886
2887 /*
2888  * Dispatch a request from cfqq, moving them to the request queue
2889  * dispatch list.
2890  */
2891 static bool cfq_dispatch_request(struct cfq_data *cfqd, struct cfq_queue *cfqq)
2892 {
2893         struct request *rq;
2894
2895         BUG_ON(RB_EMPTY_ROOT(&cfqq->sort_list));
2896
2897         if (!cfq_may_dispatch(cfqd, cfqq))
2898                 return false;
2899
2900         /*
2901          * follow expired path, else get first next available
2902          */
2903         rq = cfq_check_fifo(cfqq);
2904         if (!rq)
2905                 rq = cfqq->next_rq;
2906
2907         /*
2908          * insert request into driver dispatch list
2909          */
2910         cfq_dispatch_insert(cfqd->queue, rq);
2911
2912         if (!cfqd->active_cic) {
2913                 struct cfq_io_cq *cic = RQ_CIC(rq);
2914
2915                 atomic_long_inc(&cic->icq.ioc->refcount);
2916                 cfqd->active_cic = cic;
2917         }
2918
2919         return true;
2920 }
2921
2922 /*
2923  * Find the cfqq that we need to service and move a request from that to the
2924  * dispatch list
2925  */
2926 static int cfq_dispatch_requests(struct request_queue *q, int force)
2927 {
2928         struct cfq_data *cfqd = q->elevator->elevator_data;
2929         struct cfq_queue *cfqq;
2930
2931         if (!cfqd->busy_queues)
2932                 return 0;
2933
2934         if (unlikely(force))
2935                 return cfq_forced_dispatch(cfqd);
2936
2937         cfqq = cfq_select_queue(cfqd);
2938         if (!cfqq)
2939                 return 0;
2940
2941         /*
2942          * Dispatch a request from this cfqq, if it is allowed
2943          */
2944         if (!cfq_dispatch_request(cfqd, cfqq))
2945                 return 0;
2946
2947         cfqq->slice_dispatch++;
2948         cfq_clear_cfqq_must_dispatch(cfqq);
2949
2950         /*
2951          * expire an async queue immediately if it has used up its slice. idle
2952          * queue always expire after 1 dispatch round.
2953          */
2954         if (cfqd->busy_queues > 1 && ((!cfq_cfqq_sync(cfqq) &&
2955             cfqq->slice_dispatch >= cfq_prio_to_maxrq(cfqd, cfqq)) ||
2956             cfq_class_idle(cfqq))) {
2957                 cfqq->slice_end = jiffies + 1;
2958                 cfq_slice_expired(cfqd, 0);
2959         }
2960
2961         cfq_log_cfqq(cfqd, cfqq, "dispatched a request");
2962         return 1;
2963 }
2964
2965 /*
2966  * task holds one reference to the queue, dropped when task exits. each rq
2967  * in-flight on this queue also holds a reference, dropped when rq is freed.
2968  *
2969  * Each cfq queue took a reference on the parent group. Drop it now.
2970  * queue lock must be held here.
2971  */
2972 static void cfq_put_queue(struct cfq_queue *cfqq)
2973 {
2974         struct cfq_data *cfqd = cfqq->cfqd;
2975         struct cfq_group *cfqg;
2976
2977         BUG_ON(cfqq->ref <= 0);
2978
2979         cfqq->ref--;
2980         if (cfqq->ref)
2981                 return;
2982
2983         cfq_log_cfqq(cfqd, cfqq, "put_queue");
2984         BUG_ON(rb_first(&cfqq->sort_list));
2985         BUG_ON(cfqq->allocated[READ] + cfqq->allocated[WRITE]);
2986         cfqg = cfqq->cfqg;
2987
2988         if (unlikely(cfqd->active_queue == cfqq)) {
2989                 __cfq_slice_expired(cfqd, cfqq, 0);
2990                 cfq_schedule_dispatch(cfqd);
2991         }
2992
2993         BUG_ON(cfq_cfqq_on_rr(cfqq));
2994         kmem_cache_free(cfq_pool, cfqq);
2995         cfqg_put(cfqg);
2996 }
2997
2998 static void cfq_put_cooperator(struct cfq_queue *cfqq)
2999 {
3000         struct cfq_queue *__cfqq, *next;
3001
3002         /*
3003          * If this queue was scheduled to merge with another queue, be
3004          * sure to drop the reference taken on that queue (and others in
3005          * the merge chain).  See cfq_setup_merge and cfq_merge_cfqqs.
3006          */
3007         __cfqq = cfqq->new_cfqq;
3008         while (__cfqq) {
3009                 if (__cfqq == cfqq) {
3010                         WARN(1, "cfqq->new_cfqq loop detected\n");
3011                         break;
3012                 }
3013                 next = __cfqq->new_cfqq;
3014                 cfq_put_queue(__cfqq);
3015                 __cfqq = next;
3016         }
3017 }
3018
3019 static void cfq_exit_cfqq(struct cfq_data *cfqd, struct cfq_queue *cfqq)
3020 {
3021         if (unlikely(cfqq == cfqd->active_queue)) {
3022                 __cfq_slice_expired(cfqd, cfqq, 0);
3023                 cfq_schedule_dispatch(cfqd);
3024         }
3025
3026         cfq_put_cooperator(cfqq);
3027
3028         cfq_put_queue(cfqq);
3029 }
3030
3031 static void cfq_init_icq(struct io_cq *icq)
3032 {
3033         struct cfq_io_cq *cic = icq_to_cic(icq);
3034
3035         cic->ttime.last_end_request = jiffies;
3036 }
3037
3038 static void cfq_exit_icq(struct io_cq *icq)
3039 {
3040         struct cfq_io_cq *cic = icq_to_cic(icq);
3041         struct cfq_data *cfqd = cic_to_cfqd(cic);
3042
3043         if (cic->cfqq[BLK_RW_ASYNC]) {
3044                 cfq_exit_cfqq(cfqd, cic->cfqq[BLK_RW_ASYNC]);
3045                 cic->cfqq[BLK_RW_ASYNC] = NULL;
3046         }
3047
3048         if (cic->cfqq[BLK_RW_SYNC]) {
3049                 cfq_exit_cfqq(cfqd, cic->cfqq[BLK_RW_SYNC]);
3050                 cic->cfqq[BLK_RW_SYNC] = NULL;
3051         }
3052 }
3053
3054 static void cfq_init_prio_data(struct cfq_queue *cfqq, struct cfq_io_cq *cic)
3055 {
3056         struct task_struct *tsk = current;
3057         int ioprio_class;
3058
3059         if (!cfq_cfqq_prio_changed(cfqq))
3060                 return;
3061
3062         ioprio_class = IOPRIO_PRIO_CLASS(cic->ioprio);
3063         switch (ioprio_class) {
3064         default:
3065                 printk(KERN_ERR "cfq: bad prio %x\n", ioprio_class);
3066         case IOPRIO_CLASS_NONE:
3067                 /*
3068                  * no prio set, inherit CPU scheduling settings
3069                  */
3070                 cfqq->ioprio = task_nice_ioprio(tsk);
3071                 cfqq->ioprio_class = task_nice_ioclass(tsk);
3072                 break;
3073         case IOPRIO_CLASS_RT:
3074                 cfqq->ioprio = IOPRIO_PRIO_DATA(cic->ioprio);
3075                 cfqq->ioprio_class = IOPRIO_CLASS_RT;
3076                 break;
3077         case IOPRIO_CLASS_BE:
3078                 cfqq->ioprio = IOPRIO_PRIO_DATA(cic->ioprio);
3079                 cfqq->ioprio_class = IOPRIO_CLASS_BE;
3080                 break;
3081         case IOPRIO_CLASS_IDLE:
3082                 cfqq->ioprio_class = IOPRIO_CLASS_IDLE;
3083                 cfqq->ioprio = 7;
3084                 cfq_clear_cfqq_idle_window(cfqq);
3085                 break;
3086         }
3087
3088         /*
3089          * keep track of original prio settings in case we have to temporarily
3090          * elevate the priority of this queue
3091          */
3092         cfqq->org_ioprio = cfqq->ioprio;
3093         cfq_clear_cfqq_prio_changed(cfqq);
3094 }
3095
3096 static void check_ioprio_changed(struct cfq_io_cq *cic, struct bio *bio)
3097 {
3098         int ioprio = cic->icq.ioc->ioprio;
3099         struct cfq_data *cfqd = cic_to_cfqd(cic);
3100         struct cfq_queue *cfqq;
3101
3102         /*
3103          * Check whether ioprio has changed.  The condition may trigger
3104          * spuriously on a newly created cic but there's no harm.
3105          */
3106         if (unlikely(!cfqd) || likely(cic->ioprio == ioprio))
3107                 return;
3108
3109         cfqq = cic->cfqq[BLK_RW_ASYNC];
3110         if (cfqq) {
3111                 struct cfq_queue *new_cfqq;
3112                 new_cfqq = cfq_get_queue(cfqd, BLK_RW_ASYNC, cic, bio,
3113                                          GFP_ATOMIC);
3114                 if (new_cfqq) {
3115                         cic->cfqq[BLK_RW_ASYNC] = new_cfqq;
3116                         cfq_put_queue(cfqq);
3117                 }
3118         }
3119
3120         cfqq = cic->cfqq[BLK_RW_SYNC];
3121         if (cfqq)
3122                 cfq_mark_cfqq_prio_changed(cfqq);
3123
3124         cic->ioprio = ioprio;
3125 }
3126
3127 static void cfq_init_cfqq(struct cfq_data *cfqd, struct cfq_queue *cfqq,
3128                           pid_t pid, bool is_sync)
3129 {
3130         RB_CLEAR_NODE(&cfqq->rb_node);
3131         RB_CLEAR_NODE(&cfqq->p_node);
3132         INIT_LIST_HEAD(&cfqq->fifo);
3133
3134         cfqq->ref = 0;
3135         cfqq->cfqd = cfqd;
3136
3137         cfq_mark_cfqq_prio_changed(cfqq);
3138
3139         if (is_sync) {
3140                 if (!cfq_class_idle(cfqq))
3141                         cfq_mark_cfqq_idle_window(cfqq);
3142                 cfq_mark_cfqq_sync(cfqq);
3143         }
3144         cfqq->pid = pid;
3145 }
3146
3147 #ifdef CONFIG_CFQ_GROUP_IOSCHED
3148 static void check_blkcg_changed(struct cfq_io_cq *cic, struct bio *bio)
3149 {
3150         struct cfq_data *cfqd = cic_to_cfqd(cic);
3151         struct cfq_queue *sync_cfqq;
3152         uint64_t id;
3153
3154         rcu_read_lock();
3155         id = bio_blkio_cgroup(bio)->id;
3156         rcu_read_unlock();
3157
3158         /*
3159          * Check whether blkcg has changed.  The condition may trigger
3160          * spuriously on a newly created cic but there's no harm.
3161          */
3162         if (unlikely(!cfqd) || likely(cic->blkcg_id == id))
3163                 return;
3164
3165         sync_cfqq = cic_to_cfqq(cic, 1);
3166         if (sync_cfqq) {
3167                 /*
3168                  * Drop reference to sync queue. A new sync queue will be
3169                  * assigned in new group upon arrival of a fresh request.
3170                  */
3171                 cfq_log_cfqq(cfqd, sync_cfqq, "changed cgroup");
3172                 cic_set_cfqq(cic, NULL, 1);
3173                 cfq_put_queue(sync_cfqq);
3174         }
3175
3176         cic->blkcg_id = id;
3177 }
3178 #else
3179 static inline void check_blkcg_changed(struct cfq_io_cq *cic, struct bio *bio) { }
3180 #endif  /* CONFIG_CFQ_GROUP_IOSCHED */
3181
3182 static struct cfq_queue *
3183 cfq_find_alloc_queue(struct cfq_data *cfqd, bool is_sync, struct cfq_io_cq *cic,
3184                      struct bio *bio, gfp_t gfp_mask)
3185 {
3186         struct blkio_cgroup *blkcg;
3187         struct cfq_queue *cfqq, *new_cfqq = NULL;
3188         struct cfq_group *cfqg;
3189
3190 retry:
3191         rcu_read_lock();
3192
3193         blkcg = bio_blkio_cgroup(bio);
3194         cfqg = cfq_lookup_create_cfqg(cfqd, blkcg);
3195         cfqq = cic_to_cfqq(cic, is_sync);
3196
3197         /*
3198          * Always try a new alloc if we fell back to the OOM cfqq
3199          * originally, since it should just be a temporary situation.
3200          */
3201         if (!cfqq || cfqq == &cfqd->oom_cfqq) {
3202                 cfqq = NULL;
3203                 if (new_cfqq) {
3204                         cfqq = new_cfqq;
3205                         new_cfqq = NULL;
3206                 } else if (gfp_mask & __GFP_WAIT) {
3207                         rcu_read_unlock();
3208                         spin_unlock_irq(cfqd->queue->queue_lock);
3209                         new_cfqq = kmem_cache_alloc_node(cfq_pool,
3210                                         gfp_mask | __GFP_ZERO,
3211                                         cfqd->queue->node);
3212                         spin_lock_irq(cfqd->queue->queue_lock);
3213                         if (new_cfqq)
3214                                 goto retry;
3215                 } else {
3216                         cfqq = kmem_cache_alloc_node(cfq_pool,
3217                                         gfp_mask | __GFP_ZERO,
3218                                         cfqd->queue->node);
3219                 }
3220
3221                 if (cfqq) {
3222                         cfq_init_cfqq(cfqd, cfqq, current->pid, is_sync);
3223                         cfq_init_prio_data(cfqq, cic);
3224                         cfq_link_cfqq_cfqg(cfqq, cfqg);
3225                         cfq_log_cfqq(cfqd, cfqq, "alloced");
3226                 } else
3227                         cfqq = &cfqd->oom_cfqq;
3228         }
3229
3230         if (new_cfqq)
3231                 kmem_cache_free(cfq_pool, new_cfqq);
3232
3233         rcu_read_unlock();
3234         return cfqq;
3235 }
3236
3237 static struct cfq_queue **
3238 cfq_async_queue_prio(struct cfq_data *cfqd, int ioprio_class, int ioprio)
3239 {
3240         switch (ioprio_class) {
3241         case IOPRIO_CLASS_RT:
3242                 return &cfqd->async_cfqq[0][ioprio];
3243         case IOPRIO_CLASS_NONE:
3244                 ioprio = IOPRIO_NORM;
3245                 /* fall through */
3246         case IOPRIO_CLASS_BE:
3247                 return &cfqd->async_cfqq[1][ioprio];
3248         case IOPRIO_CLASS_IDLE:
3249                 return &cfqd->async_idle_cfqq;
3250         default:
3251                 BUG();
3252         }
3253 }
3254
3255 static struct cfq_queue *
3256 cfq_get_queue(struct cfq_data *cfqd, bool is_sync, struct cfq_io_cq *cic,
3257               struct bio *bio, gfp_t gfp_mask)
3258 {
3259         const int ioprio_class = IOPRIO_PRIO_CLASS(cic->ioprio);
3260         const int ioprio = IOPRIO_PRIO_DATA(cic->ioprio);
3261         struct cfq_queue **async_cfqq = NULL;
3262         struct cfq_queue *cfqq = NULL;
3263
3264         if (!is_sync) {
3265                 async_cfqq = cfq_async_queue_prio(cfqd, ioprio_class, ioprio);
3266                 cfqq = *async_cfqq;
3267         }
3268
3269         if (!cfqq)
3270                 cfqq = cfq_find_alloc_queue(cfqd, is_sync, cic, bio, gfp_mask);
3271
3272         /*
3273          * pin the queue now that it's allocated, scheduler exit will prune it
3274          */
3275         if (!is_sync && !(*async_cfqq)) {
3276                 cfqq->ref++;
3277                 *async_cfqq = cfqq;
3278         }
3279
3280         cfqq->ref++;
3281         return cfqq;
3282 }
3283
3284 static void
3285 __cfq_update_io_thinktime(struct cfq_ttime *ttime, unsigned long slice_idle)
3286 {
3287         unsigned long elapsed = jiffies - ttime->last_end_request;
3288         elapsed = min(elapsed, 2UL * slice_idle);
3289
3290         ttime->ttime_samples = (7*ttime->ttime_samples + 256) / 8;
3291         ttime->ttime_total = (7*ttime->ttime_total + 256*elapsed) / 8;
3292         ttime->ttime_mean = (ttime->ttime_total + 128) / ttime->ttime_samples;
3293 }
3294
3295 static void
3296 cfq_update_io_thinktime(struct cfq_data *cfqd, struct cfq_queue *cfqq,
3297                         struct cfq_io_cq *cic)
3298 {
3299         if (cfq_cfqq_sync(cfqq)) {
3300                 __cfq_update_io_thinktime(&cic->ttime, cfqd->cfq_slice_idle);
3301                 __cfq_update_io_thinktime(&cfqq->service_tree->ttime,
3302                         cfqd->cfq_slice_idle);
3303         }
3304 #ifdef CONFIG_CFQ_GROUP_IOSCHED
3305         __cfq_update_io_thinktime(&cfqq->cfqg->ttime, cfqd->cfq_group_idle);
3306 #endif
3307 }
3308
3309 static void
3310 cfq_update_io_seektime(struct cfq_data *cfqd, struct cfq_queue *cfqq,
3311                        struct request *rq)
3312 {
3313         sector_t sdist = 0;
3314         sector_t n_sec = blk_rq_sectors(rq);
3315         if (cfqq->last_request_pos) {
3316                 if (cfqq->last_request_pos < blk_rq_pos(rq))
3317                         sdist = blk_rq_pos(rq) - cfqq->last_request_pos;
3318                 else
3319                         sdist = cfqq->last_request_pos - blk_rq_pos(rq);
3320         }
3321
3322         cfqq->seek_history <<= 1;
3323         if (blk_queue_nonrot(cfqd->queue))
3324                 cfqq->seek_history |= (n_sec < CFQQ_SECT_THR_NONROT);
3325         else
3326                 cfqq->seek_history |= (sdist > CFQQ_SEEK_THR);
3327 }
3328
3329 /*
3330  * Disable idle window if the process thinks too long or seeks so much that
3331  * it doesn't matter
3332  */
3333 static void
3334 cfq_update_idle_window(struct cfq_data *cfqd, struct cfq_queue *cfqq,
3335                        struct cfq_io_cq *cic)
3336 {
3337         int old_idle, enable_idle;
3338
3339         /*
3340          * Don't idle for async or idle io prio class
3341          */
3342         if (!cfq_cfqq_sync(cfqq) || cfq_class_idle(cfqq))
3343                 return;
3344
3345         enable_idle = old_idle = cfq_cfqq_idle_window(cfqq);
3346
3347         if (cfqq->queued[0] + cfqq->queued[1] >= 4)
3348                 cfq_mark_cfqq_deep(cfqq);
3349
3350         if (cfqq->next_rq && (cfqq->next_rq->cmd_flags & REQ_NOIDLE))
3351                 enable_idle = 0;
3352         else if (!atomic_read(&cic->icq.ioc->active_ref) ||
3353                  !cfqd->cfq_slice_idle ||
3354                  (!cfq_cfqq_deep(cfqq) && CFQQ_SEEKY(cfqq)))
3355                 enable_idle = 0;
3356         else if (sample_valid(cic->ttime.ttime_samples)) {
3357                 if (cic->ttime.ttime_mean > cfqd->cfq_slice_idle)
3358                         enable_idle = 0;
3359                 else
3360                         enable_idle = 1;
3361         }
3362
3363         if (old_idle != enable_idle) {
3364                 cfq_log_cfqq(cfqd, cfqq, "idle=%d", enable_idle);
3365                 if (enable_idle)
3366                         cfq_mark_cfqq_idle_window(cfqq);
3367                 else
3368                         cfq_clear_cfqq_idle_window(cfqq);
3369         }
3370 }
3371
3372 /*
3373  * Check if new_cfqq should preempt the currently active queue. Return 0 for
3374  * no or if we aren't sure, a 1 will cause a preempt.
3375  */
3376 static bool
3377 cfq_should_preempt(struct cfq_data *cfqd, struct cfq_queue *new_cfqq,
3378                    struct request *rq)
3379 {
3380         struct cfq_queue *cfqq;
3381
3382         cfqq = cfqd->active_queue;
3383         if (!cfqq)
3384                 return false;
3385
3386         if (cfq_class_idle(new_cfqq))
3387                 return false;
3388
3389         if (cfq_class_idle(cfqq))
3390                 return true;
3391
3392         /*
3393          * Don't allow a non-RT request to preempt an ongoing RT cfqq timeslice.
3394          */
3395         if (cfq_class_rt(cfqq) && !cfq_class_rt(new_cfqq))
3396                 return false;
3397
3398         /*
3399          * if the new request is sync, but the currently running queue is
3400          * not, let the sync request have priority.
3401          */
3402         if (rq_is_sync(rq) && !cfq_cfqq_sync(cfqq))
3403                 return true;
3404
3405         if (new_cfqq->cfqg != cfqq->cfqg)
3406                 return false;
3407
3408         if (cfq_slice_used(cfqq))
3409                 return true;
3410
3411         /* Allow preemption only if we are idling on sync-noidle tree */
3412         if (cfqd->serving_type == SYNC_NOIDLE_WORKLOAD &&
3413             cfqq_type(new_cfqq) == SYNC_NOIDLE_WORKLOAD &&
3414             new_cfqq->service_tree->count == 2 &&
3415             RB_EMPTY_ROOT(&cfqq->sort_list))
3416                 return true;
3417
3418         /*
3419          * So both queues are sync. Let the new request get disk time if
3420          * it's a metadata request and the current queue is doing regular IO.
3421          */
3422         if ((rq->cmd_flags & REQ_PRIO) && !cfqq->prio_pending)
3423                 return true;
3424
3425         /*
3426          * Allow an RT request to pre-empt an ongoing non-RT cfqq timeslice.
3427          */
3428         if (cfq_class_rt(new_cfqq) && !cfq_class_rt(cfqq))
3429                 return true;
3430
3431         /* An idle queue should not be idle now for some reason */
3432         if (RB_EMPTY_ROOT(&cfqq->sort_list) && !cfq_should_idle(cfqd, cfqq))
3433                 return true;
3434
3435         if (!cfqd->active_cic || !cfq_cfqq_wait_request(cfqq))
3436                 return false;
3437
3438         /*
3439          * if this request is as-good as one we would expect from the
3440          * current cfqq, let it preempt
3441          */
3442         if (cfq_rq_close(cfqd, cfqq, rq))
3443                 return true;
3444
3445         return false;
3446 }
3447
3448 /*
3449  * cfqq preempts the active queue. if we allowed preempt with no slice left,
3450  * let it have half of its nominal slice.
3451  */
3452 static void cfq_preempt_queue(struct cfq_data *cfqd, struct cfq_queue *cfqq)
3453 {
3454         enum wl_type_t old_type = cfqq_type(cfqd->active_queue);
3455
3456         cfq_log_cfqq(cfqd, cfqq, "preempt");
3457         cfq_slice_expired(cfqd, 1);
3458
3459         /*
3460          * workload type is changed, don't save slice, otherwise preempt
3461          * doesn't happen
3462          */
3463         if (old_type != cfqq_type(cfqq))
3464                 cfqq->cfqg->saved_workload_slice = 0;
3465
3466         /*
3467          * Put the new queue at the front of the of the current list,
3468          * so we know that it will be selected next.
3469          */
3470         BUG_ON(!cfq_cfqq_on_rr(cfqq));
3471
3472         cfq_service_tree_add(cfqd, cfqq, 1);
3473
3474         cfqq->slice_end = 0;
3475         cfq_mark_cfqq_slice_new(cfqq);
3476 }
3477
3478 /*
3479  * Called when a new fs request (rq) is added (to cfqq). Check if there's
3480  * something we should do about it
3481  */
3482 static void
3483 cfq_rq_enqueued(struct cfq_data *cfqd, struct cfq_queue *cfqq,
3484                 struct request *rq)
3485 {
3486         struct cfq_io_cq *cic = RQ_CIC(rq);
3487
3488         cfqd->rq_queued++;
3489         if (rq->cmd_flags & REQ_PRIO)
3490                 cfqq->prio_pending++;
3491
3492         cfq_update_io_thinktime(cfqd, cfqq, cic);
3493         cfq_update_io_seektime(cfqd, cfqq, rq);
3494         cfq_update_idle_window(cfqd, cfqq, cic);
3495
3496         cfqq->last_request_pos = blk_rq_pos(rq) + blk_rq_sectors(rq);
3497
3498         if (cfqq == cfqd->active_queue) {
3499                 /*
3500                  * Remember that we saw a request from this process, but
3501                  * don't start queuing just yet. Otherwise we risk seeing lots
3502                  * of tiny requests, because we disrupt the normal plugging
3503                  * and merging. If the request is already larger than a single
3504                  * page, let it rip immediately. For that case we assume that
3505                  * merging is already done. Ditto for a busy system that
3506                  * has other work pending, don't risk delaying until the
3507                  * idle timer unplug to continue working.
3508                  */
3509                 if (cfq_cfqq_wait_request(cfqq)) {
3510                         if (blk_rq_bytes(rq) > PAGE_CACHE_SIZE ||
3511                             cfqd->busy_queues > 1) {
3512                                 cfq_del_timer(cfqd, cfqq);
3513                                 cfq_clear_cfqq_wait_request(cfqq);
3514                                 __blk_run_queue(cfqd->queue);
3515                         } else {
3516                                 cfq_blkiocg_update_idle_time_stats(
3517                                                 cfqg_to_blkg(cfqq->cfqg),
3518                                                 &blkio_policy_cfq);
3519                                 cfq_mark_cfqq_must_dispatch(cfqq);
3520                         }
3521                 }
3522         } else if (cfq_should_preempt(cfqd, cfqq, rq)) {
3523                 /*
3524                  * not the active queue - expire current slice if it is
3525                  * idle and has expired it's mean thinktime or this new queue
3526                  * has some old slice time left and is of higher priority or
3527                  * this new queue is RT and the current one is BE
3528                  */
3529                 cfq_preempt_queue(cfqd, cfqq);
3530                 __blk_run_queue(cfqd->queue);
3531         }
3532 }
3533
3534 static void cfq_insert_request(struct request_queue *q, struct request *rq)
3535 {
3536         struct cfq_data *cfqd = q->elevator->elevator_data;
3537         struct cfq_queue *cfqq = RQ_CFQQ(rq);
3538
3539         cfq_log_cfqq(cfqd, cfqq, "insert_request");
3540         cfq_init_prio_data(cfqq, RQ_CIC(rq));
3541
3542         rq_set_fifo_time(rq, jiffies + cfqd->cfq_fifo_expire[rq_is_sync(rq)]);
3543         list_add_tail(&rq->queuelist, &cfqq->fifo);
3544         cfq_add_rq_rb(rq);
3545         cfq_blkiocg_update_io_add_stats(cfqg_to_blkg(RQ_CFQG(rq)),
3546                                         &blkio_policy_cfq,
3547                                         cfqg_to_blkg(cfqd->serving_group),
3548                                         rq_data_dir(rq), rq_is_sync(rq));
3549         cfq_rq_enqueued(cfqd, cfqq, rq);
3550 }
3551
3552 /*
3553  * Update hw_tag based on peak queue depth over 50 samples under
3554  * sufficient load.
3555  */
3556 static void cfq_update_hw_tag(struct cfq_data *cfqd)
3557 {
3558         struct cfq_queue *cfqq = cfqd->active_queue;
3559
3560         if (cfqd->rq_in_driver > cfqd->hw_tag_est_depth)
3561                 cfqd->hw_tag_est_depth = cfqd->rq_in_driver;
3562
3563         if (cfqd->hw_tag == 1)
3564                 return;
3565
3566         if (cfqd->rq_queued <= CFQ_HW_QUEUE_MIN &&
3567             cfqd->rq_in_driver <= CFQ_HW_QUEUE_MIN)
3568                 return;
3569
3570         /*
3571          * If active queue hasn't enough requests and can idle, cfq might not
3572          * dispatch sufficient requests to hardware. Don't zero hw_tag in this
3573          * case
3574          */
3575         if (cfqq && cfq_cfqq_idle_window(cfqq) &&
3576             cfqq->dispatched + cfqq->queued[0] + cfqq->queued[1] <
3577             CFQ_HW_QUEUE_MIN && cfqd->rq_in_driver < CFQ_HW_QUEUE_MIN)
3578                 return;
3579
3580         if (cfqd->hw_tag_samples++ < 50)
3581                 return;
3582
3583         if (cfqd->hw_tag_est_depth >= CFQ_HW_QUEUE_MIN)
3584                 cfqd->hw_tag = 1;
3585         else
3586                 cfqd->hw_tag = 0;
3587 }
3588
3589 static bool cfq_should_wait_busy(struct cfq_data *cfqd, struct cfq_queue *cfqq)
3590 {
3591         struct cfq_io_cq *cic = cfqd->active_cic;
3592
3593         /* If the queue already has requests, don't wait */
3594         if (!RB_EMPTY_ROOT(&cfqq->sort_list))
3595                 return false;
3596
3597         /* If there are other queues in the group, don't wait */
3598         if (cfqq->cfqg->nr_cfqq > 1)
3599                 return false;
3600
3601         /* the only queue in the group, but think time is big */
3602         if (cfq_io_thinktime_big(cfqd, &cfqq->cfqg->ttime, true))
3603                 return false;
3604
3605         if (cfq_slice_used(cfqq))
3606                 return true;
3607
3608         /* if slice left is less than think time, wait busy */
3609         if (cic && sample_valid(cic->ttime.ttime_samples)
3610             && (cfqq->slice_end - jiffies < cic->ttime.ttime_mean))
3611                 return true;
3612
3613         /*
3614          * If think times is less than a jiffy than ttime_mean=0 and above
3615          * will not be true. It might happen that slice has not expired yet
3616          * but will expire soon (4-5 ns) during select_queue(). To cover the
3617          * case where think time is less than a jiffy, mark the queue wait
3618          * busy if only 1 jiffy is left in the slice.
3619          */
3620         if (cfqq->slice_end - jiffies == 1)
3621                 return true;
3622
3623         return false;
3624 }
3625
3626 static void cfq_completed_request(struct request_queue *q, struct request *rq)
3627 {
3628         struct cfq_queue *cfqq = RQ_CFQQ(rq);
3629         struct cfq_data *cfqd = cfqq->cfqd;
3630         const int sync = rq_is_sync(rq);
3631         unsigned long now;
3632
3633         now = jiffies;
3634         cfq_log_cfqq(cfqd, cfqq, "complete rqnoidle %d",
3635                      !!(rq->cmd_flags & REQ_NOIDLE));
3636
3637         cfq_update_hw_tag(cfqd);
3638
3639         WARN_ON(!cfqd->rq_in_driver);
3640         WARN_ON(!cfqq->dispatched);
3641         cfqd->rq_in_driver--;
3642         cfqq->dispatched--;
3643         (RQ_CFQG(rq))->dispatched--;
3644         cfq_blkiocg_update_completion_stats(cfqg_to_blkg(cfqq->cfqg),
3645                         &blkio_policy_cfq, rq_start_time_ns(rq),
3646                         rq_io_start_time_ns(rq), rq_data_dir(rq),
3647                         rq_is_sync(rq));
3648
3649         cfqd->rq_in_flight[cfq_cfqq_sync(cfqq)]--;
3650
3651         if (sync) {
3652                 struct cfq_rb_root *service_tree;
3653
3654                 RQ_CIC(rq)->ttime.last_end_request = now;
3655
3656                 if (cfq_cfqq_on_rr(cfqq))
3657                         service_tree = cfqq->service_tree;
3658                 else
3659                         service_tree = service_tree_for(cfqq->cfqg,
3660                                 cfqq_prio(cfqq), cfqq_type(cfqq));
3661                 service_tree->ttime.last_end_request = now;
3662                 if (!time_after(rq->start_time + cfqd->cfq_fifo_expire[1], now))
3663                         cfqd->last_delayed_sync = now;
3664         }
3665
3666 #ifdef CONFIG_CFQ_GROUP_IOSCHED
3667         cfqq->cfqg->ttime.last_end_request = now;
3668 #endif
3669
3670         /*
3671          * If this is the active queue, check if it needs to be expired,
3672          * or if we want to idle in case it has no pending requests.
3673          */
3674         if (cfqd->active_queue == cfqq) {
3675                 const bool cfqq_empty = RB_EMPTY_ROOT(&cfqq->sort_list);
3676
3677                 if (cfq_cfqq_slice_new(cfqq)) {
3678                         cfq_set_prio_slice(cfqd, cfqq);
3679                         cfq_clear_cfqq_slice_new(cfqq);
3680                 }
3681
3682                 /*
3683                  * Should we wait for next request to come in before we expire
3684                  * the queue.
3685                  */
3686                 if (cfq_should_wait_busy(cfqd, cfqq)) {
3687                         unsigned long extend_sl = cfqd->cfq_slice_idle;
3688                         if (!cfqd->cfq_slice_idle)
3689                                 extend_sl = cfqd->cfq_group_idle;
3690                         cfqq->slice_end = jiffies + extend_sl;
3691                         cfq_mark_cfqq_wait_busy(cfqq);
3692                         cfq_log_cfqq(cfqd, cfqq, "will busy wait");
3693                 }
3694
3695                 /*
3696                  * Idling is not enabled on:
3697                  * - expired queues
3698                  * - idle-priority queues
3699                  * - async queues
3700                  * - queues with still some requests queued
3701                  * - when there is a close cooperator
3702                  */
3703                 if (cfq_slice_used(cfqq) || cfq_class_idle(cfqq))
3704                         cfq_slice_expired(cfqd, 1);
3705                 else if (sync && cfqq_empty &&
3706                          !cfq_close_cooperator(cfqd, cfqq)) {
3707                         cfq_arm_slice_timer(cfqd);
3708                 }
3709         }
3710
3711         if (!cfqd->rq_in_driver)
3712                 cfq_schedule_dispatch(cfqd);
3713 }
3714
3715 static inline int __cfq_may_queue(struct cfq_queue *cfqq)
3716 {
3717         if (cfq_cfqq_wait_request(cfqq) && !cfq_cfqq_must_alloc_slice(cfqq)) {
3718                 cfq_mark_cfqq_must_alloc_slice(cfqq);
3719                 return ELV_MQUEUE_MUST;
3720         }
3721
3722         return ELV_MQUEUE_MAY;
3723 }
3724
3725 static int cfq_may_queue(struct request_queue *q, int rw)
3726 {
3727         struct cfq_data *cfqd = q->elevator->elevator_data;
3728         struct task_struct *tsk = current;
3729         struct cfq_io_cq *cic;
3730         struct cfq_queue *cfqq;
3731
3732         /*
3733          * don't force setup of a queue from here, as a call to may_queue
3734          * does not necessarily imply that a request actually will be queued.
3735          * so just lookup a possibly existing queue, or return 'may queue'
3736          * if that fails
3737          */
3738         cic = cfq_cic_lookup(cfqd, tsk->io_context);
3739         if (!cic)
3740                 return ELV_MQUEUE_MAY;
3741
3742         cfqq = cic_to_cfqq(cic, rw_is_sync(rw));
3743         if (cfqq) {
3744                 cfq_init_prio_data(cfqq, cic);
3745
3746                 return __cfq_may_queue(cfqq);
3747         }
3748
3749         return ELV_MQUEUE_MAY;
3750 }
3751
3752 /*
3753  * queue lock held here
3754  */
3755 static void cfq_put_request(struct request *rq)
3756 {
3757         struct cfq_queue *cfqq = RQ_CFQQ(rq);
3758
3759         if (cfqq) {
3760                 const int rw = rq_data_dir(rq);
3761
3762                 BUG_ON(!cfqq->allocated[rw]);
3763                 cfqq->allocated[rw]--;
3764
3765                 /* Put down rq reference on cfqg */
3766                 cfqg_put(RQ_CFQG(rq));
3767                 rq->elv.priv[0] = NULL;
3768                 rq->elv.priv[1] = NULL;
3769
3770                 cfq_put_queue(cfqq);
3771         }
3772 }
3773
3774 static struct cfq_queue *
3775 cfq_merge_cfqqs(struct cfq_data *cfqd, struct cfq_io_cq *cic,
3776                 struct cfq_queue *cfqq)
3777 {
3778         cfq_log_cfqq(cfqd, cfqq, "merging with queue %p", cfqq->new_cfqq);
3779         cic_set_cfqq(cic, cfqq->new_cfqq, 1);
3780         cfq_mark_cfqq_coop(cfqq->new_cfqq);
3781         cfq_put_queue(cfqq);
3782         return cic_to_cfqq(cic, 1);
3783 }
3784
3785 /*
3786  * Returns NULL if a new cfqq should be allocated, or the old cfqq if this
3787  * was the last process referring to said cfqq.
3788  */
3789 static struct cfq_queue *
3790 split_cfqq(struct cfq_io_cq *cic, struct cfq_queue *cfqq)
3791 {
3792         if (cfqq_process_refs(cfqq) == 1) {
3793                 cfqq->pid = current->pid;
3794                 cfq_clear_cfqq_coop(cfqq);
3795                 cfq_clear_cfqq_split_coop(cfqq);
3796                 return cfqq;
3797         }
3798
3799         cic_set_cfqq(cic, NULL, 1);
3800
3801         cfq_put_cooperator(cfqq);
3802
3803         cfq_put_queue(cfqq);
3804         return NULL;
3805 }
3806 /*
3807  * Allocate cfq data structures associated with this request.
3808  */
3809 static int
3810 cfq_set_request(struct request_queue *q, struct request *rq, struct bio *bio,
3811                 gfp_t gfp_mask)
3812 {
3813         struct cfq_data *cfqd = q->elevator->elevator_data;
3814         struct cfq_io_cq *cic = icq_to_cic(rq->elv.icq);
3815         const int rw = rq_data_dir(rq);
3816         const bool is_sync = rq_is_sync(rq);
3817         struct cfq_queue *cfqq;
3818
3819         might_sleep_if(gfp_mask & __GFP_WAIT);
3820
3821         spin_lock_irq(q->queue_lock);
3822
3823         check_ioprio_changed(cic, bio);
3824         check_blkcg_changed(cic, bio);
3825 new_queue:
3826         cfqq = cic_to_cfqq(cic, is_sync);
3827         if (!cfqq || cfqq == &cfqd->oom_cfqq) {
3828                 cfqq = cfq_get_queue(cfqd, is_sync, cic, bio, gfp_mask);
3829                 cic_set_cfqq(cic, cfqq, is_sync);
3830         } else {
3831                 /*
3832                  * If the queue was seeky for too long, break it apart.
3833                  */
3834                 if (cfq_cfqq_coop(cfqq) && cfq_cfqq_split_coop(cfqq)) {
3835                         cfq_log_cfqq(cfqd, cfqq, "breaking apart cfqq");
3836                         cfqq = split_cfqq(cic, cfqq);
3837                         if (!cfqq)
3838                                 goto new_queue;
3839                 }
3840
3841                 /*
3842                  * Check to see if this queue is scheduled to merge with
3843                  * another, closely cooperating queue.  The merging of
3844                  * queues happens here as it must be done in process context.
3845                  * The reference on new_cfqq was taken in merge_cfqqs.
3846                  */
3847                 if (cfqq->new_cfqq)
3848                         cfqq = cfq_merge_cfqqs(cfqd, cic, cfqq);
3849         }
3850
3851         cfqq->allocated[rw]++;
3852
3853         cfqq->ref++;
3854         cfqg_get(cfqq->cfqg);
3855         rq->elv.priv[0] = cfqq;
3856         rq->elv.priv[1] = cfqq->cfqg;
3857         spin_unlock_irq(q->queue_lock);
3858         return 0;
3859 }
3860
3861 static void cfq_kick_queue(struct work_struct *work)
3862 {
3863         struct cfq_data *cfqd =
3864                 container_of(work, struct cfq_data, unplug_work);
3865         struct request_queue *q = cfqd->queue;
3866
3867         spin_lock_irq(q->queue_lock);
3868         __blk_run_queue(cfqd->queue);
3869         spin_unlock_irq(q->queue_lock);
3870 }
3871
3872 /*
3873  * Timer running if the active_queue is currently idling inside its time slice
3874  */
3875 static void cfq_idle_slice_timer(unsigned long data)
3876 {
3877         struct cfq_data *cfqd = (struct cfq_data *) data;
3878         struct cfq_queue *cfqq;
3879         unsigned long flags;
3880         int timed_out = 1;
3881
3882         cfq_log(cfqd, "idle timer fired");
3883
3884         spin_lock_irqsave(cfqd->queue->queue_lock, flags);
3885
3886         cfqq = cfqd->active_queue;
3887         if (cfqq) {
3888                 timed_out = 0;
3889
3890                 /*
3891                  * We saw a request before the queue expired, let it through
3892                  */
3893                 if (cfq_cfqq_must_dispatch(cfqq))
3894                         goto out_kick;
3895
3896                 /*
3897                  * expired
3898                  */
3899                 if (cfq_slice_used(cfqq))
3900                         goto expire;
3901
3902                 /*
3903                  * only expire and reinvoke request handler, if there are
3904                  * other queues with pending requests
3905                  */
3906                 if (!cfqd->busy_queues)
3907                         goto out_cont;
3908
3909                 /*
3910                  * not expired and it has a request pending, let it dispatch
3911                  */
3912                 if (!RB_EMPTY_ROOT(&cfqq->sort_list))
3913                         goto out_kick;
3914
3915                 /*
3916                  * Queue depth flag is reset only when the idle didn't succeed
3917                  */
3918                 cfq_clear_cfqq_deep(cfqq);
3919         }
3920 expire:
3921         cfq_slice_expired(cfqd, timed_out);
3922 out_kick:
3923         cfq_schedule_dispatch(cfqd);
3924 out_cont:
3925         spin_unlock_irqrestore(cfqd->queue->queue_lock, flags);
3926 }
3927
3928 static void cfq_shutdown_timer_wq(struct cfq_data *cfqd)
3929 {
3930         del_timer_sync(&cfqd->idle_slice_timer);
3931         cancel_work_sync(&cfqd->unplug_work);
3932 }
3933
3934 static void cfq_put_async_queues(struct cfq_data *cfqd)
3935 {
3936         int i;
3937
3938         for (i = 0; i < IOPRIO_BE_NR; i++) {
3939                 if (cfqd->async_cfqq[0][i])
3940                         cfq_put_queue(cfqd->async_cfqq[0][i]);
3941                 if (cfqd->async_cfqq[1][i])
3942                         cfq_put_queue(cfqd->async_cfqq[1][i]);
3943         }
3944
3945         if (cfqd->async_idle_cfqq)
3946                 cfq_put_queue(cfqd->async_idle_cfqq);
3947 }
3948
3949 static void cfq_exit_queue(struct elevator_queue *e)
3950 {
3951         struct cfq_data *cfqd = e->elevator_data;
3952         struct request_queue *q = cfqd->queue;
3953
3954         cfq_shutdown_timer_wq(cfqd);
3955
3956         spin_lock_irq(q->queue_lock);
3957
3958         if (cfqd->active_queue)
3959                 __cfq_slice_expired(cfqd, cfqd->active_queue, 0);
3960
3961         cfq_put_async_queues(cfqd);
3962
3963         spin_unlock_irq(q->queue_lock);
3964
3965         cfq_shutdown_timer_wq(cfqd);
3966
3967 #ifndef CONFIG_CFQ_GROUP_IOSCHED
3968         kfree(cfqd->root_group);
3969 #endif
3970         update_root_blkg_pd(q, BLKIO_POLICY_PROP);
3971         kfree(cfqd);
3972 }
3973
3974 static int cfq_init_queue(struct request_queue *q)
3975 {
3976         struct cfq_data *cfqd;
3977         struct blkio_group *blkg __maybe_unused;
3978         int i;
3979
3980         cfqd = kmalloc_node(sizeof(*cfqd), GFP_KERNEL | __GFP_ZERO, q->node);
3981         if (!cfqd)
3982                 return -ENOMEM;
3983
3984         cfqd->queue = q;
3985         q->elevator->elevator_data = cfqd;
3986
3987         /* Init root service tree */
3988         cfqd->grp_service_tree = CFQ_RB_ROOT;
3989
3990         /* Init root group and prefer root group over other groups by default */
3991 #ifdef CONFIG_CFQ_GROUP_IOSCHED
3992         rcu_read_lock();
3993         spin_lock_irq(q->queue_lock);
3994
3995         blkg = blkg_lookup_create(&blkio_root_cgroup, q, true);
3996         if (!IS_ERR(blkg))
3997                 cfqd->root_group = blkg_to_cfqg(blkg);
3998
3999         spin_unlock_irq(q->queue_lock);
4000         rcu_read_unlock();
4001 #else
4002         cfqd->root_group = kzalloc_node(sizeof(*cfqd->root_group),
4003                                         GFP_KERNEL, cfqd->queue->node);
4004         if (cfqd->root_group)
4005                 cfq_init_cfqg_base(cfqd->root_group);
4006 #endif
4007         if (!cfqd->root_group) {
4008                 kfree(cfqd);
4009                 return -ENOMEM;
4010         }
4011
4012         cfqd->root_group->weight = 2*BLKIO_WEIGHT_DEFAULT;
4013
4014         /*
4015          * Not strictly needed (since RB_ROOT just clears the node and we
4016          * zeroed cfqd on alloc), but better be safe in case someone decides
4017          * to add magic to the rb code
4018          */
4019         for (i = 0; i < CFQ_PRIO_LISTS; i++)
4020                 cfqd->prio_trees[i] = RB_ROOT;
4021
4022         /*
4023          * Our fallback cfqq if cfq_find_alloc_queue() runs into OOM issues.
4024          * Grab a permanent reference to it, so that the normal code flow
4025          * will not attempt to free it.  oom_cfqq is linked to root_group
4026          * but shouldn't hold a reference as it'll never be unlinked.  Lose
4027          * the reference from linking right away.
4028          */
4029         cfq_init_cfqq(cfqd, &cfqd->oom_cfqq, 1, 0);
4030         cfqd->oom_cfqq.ref++;
4031
4032         spin_lock_irq(q->queue_lock);
4033         cfq_link_cfqq_cfqg(&cfqd->oom_cfqq, cfqd->root_group);
4034         cfqg_put(cfqd->root_group);
4035         spin_unlock_irq(q->queue_lock);
4036
4037         init_timer(&cfqd->idle_slice_timer);
4038         cfqd->idle_slice_timer.function = cfq_idle_slice_timer;
4039         cfqd->idle_slice_timer.data = (unsigned long) cfqd;
4040
4041         INIT_WORK(&cfqd->unplug_work, cfq_kick_queue);
4042
4043         cfqd->cfq_quantum = cfq_quantum;
4044         cfqd->cfq_fifo_expire[0] = cfq_fifo_expire[0];
4045         cfqd->cfq_fifo_expire[1] = cfq_fifo_expire[1];
4046         cfqd->cfq_back_max = cfq_back_max;
4047         cfqd->cfq_back_penalty = cfq_back_penalty;
4048         cfqd->cfq_slice[0] = cfq_slice_async;
4049         cfqd->cfq_slice[1] = cfq_slice_sync;
4050         cfqd->cfq_slice_async_rq = cfq_slice_async_rq;
4051         cfqd->cfq_slice_idle = cfq_slice_idle;
4052         cfqd->cfq_group_idle = cfq_group_idle;
4053         cfqd->cfq_latency = 1;
4054         cfqd->hw_tag = -1;
4055         /*
4056          * we optimistically start assuming sync ops weren't delayed in last
4057          * second, in order to have larger depth for async operations.
4058          */
4059         cfqd->last_delayed_sync = jiffies - HZ;
4060         return 0;
4061 }
4062
4063 /*
4064  * sysfs parts below -->
4065  */
4066 static ssize_t
4067 cfq_var_show(unsigned int var, char *page)
4068 {
4069         return sprintf(page, "%d\n", var);
4070 }
4071
4072 static ssize_t
4073 cfq_var_store(unsigned int *var, const char *page, size_t count)
4074 {
4075         char *p = (char *) page;
4076
4077         *var = simple_strtoul(p, &p, 10);
4078         return count;
4079 }
4080
4081 #define SHOW_FUNCTION(__FUNC, __VAR, __CONV)                            \
4082 static ssize_t __FUNC(struct elevator_queue *e, char *page)             \
4083 {                                                                       \
4084         struct cfq_data *cfqd = e->elevator_data;                       \
4085         unsigned int __data = __VAR;                                    \
4086         if (__CONV)                                                     \
4087                 __data = jiffies_to_msecs(__data);                      \
4088         return cfq_var_show(__data, (page));                            \
4089 }
4090 SHOW_FUNCTION(cfq_quantum_show, cfqd->cfq_quantum, 0);
4091 SHOW_FUNCTION(cfq_fifo_expire_sync_show, cfqd->cfq_fifo_expire[1], 1);
4092 SHOW_FUNCTION(cfq_fifo_expire_async_show, cfqd->cfq_fifo_expire[0], 1);
4093 SHOW_FUNCTION(cfq_back_seek_max_show, cfqd->cfq_back_max, 0);
4094 SHOW_FUNCTION(cfq_back_seek_penalty_show, cfqd->cfq_back_penalty, 0);
4095 SHOW_FUNCTION(cfq_slice_idle_show, cfqd->cfq_slice_idle, 1);
4096 SHOW_FUNCTION(cfq_group_idle_show, cfqd->cfq_group_idle, 1);
4097 SHOW_FUNCTION(cfq_slice_sync_show, cfqd->cfq_slice[1], 1);
4098 SHOW_FUNCTION(cfq_slice_async_show, cfqd->cfq_slice[0], 1);
4099 SHOW_FUNCTION(cfq_slice_async_rq_show, cfqd->cfq_slice_async_rq, 0);
4100 SHOW_FUNCTION(cfq_low_latency_show, cfqd->cfq_latency, 0);
4101 #undef SHOW_FUNCTION
4102
4103 #define STORE_FUNCTION(__FUNC, __PTR, MIN, MAX, __CONV)                 \
4104 static ssize_t __FUNC(struct elevator_queue *e, const char *page, size_t count) \
4105 {                                                                       \
4106         struct cfq_data *cfqd = e->elevator_data;                       \
4107         unsigned int __data;                                            \
4108         int ret = cfq_var_store(&__data, (page), count);                \
4109         if (__data < (MIN))                                             \
4110                 __data = (MIN);                                         \
4111         else if (__data > (MAX))                                        \
4112                 __data = (MAX);                                         \
4113         if (__CONV)                                                     \
4114                 *(__PTR) = msecs_to_jiffies(__data);                    \
4115         else                                                            \
4116                 *(__PTR) = __data;                                      \
4117         return ret;                                                     \
4118 }
4119 STORE_FUNCTION(cfq_quantum_store, &cfqd->cfq_quantum, 1, UINT_MAX, 0);
4120 STORE_FUNCTION(cfq_fifo_expire_sync_store, &cfqd->cfq_fifo_expire[1], 1,
4121                 UINT_MAX, 1);
4122 STORE_FUNCTION(cfq_fifo_expire_async_store, &cfqd->cfq_fifo_expire[0], 1,
4123                 UINT_MAX, 1);
4124 STORE_FUNCTION(cfq_back_seek_max_store, &cfqd->cfq_back_max, 0, UINT_MAX, 0);
4125 STORE_FUNCTION(cfq_back_seek_penalty_store, &cfqd->cfq_back_penalty, 1,
4126                 UINT_MAX, 0);
4127 STORE_FUNCTION(cfq_slice_idle_store, &cfqd->cfq_slice_idle, 0, UINT_MAX, 1);
4128 STORE_FUNCTION(cfq_group_idle_store, &cfqd->cfq_group_idle, 0, UINT_MAX, 1);
4129 STORE_FUNCTION(cfq_slice_sync_store, &cfqd->cfq_slice[1], 1, UINT_MAX, 1);
4130 STORE_FUNCTION(cfq_slice_async_store, &cfqd->cfq_slice[0], 1, UINT_MAX, 1);
4131 STORE_FUNCTION(cfq_slice_async_rq_store, &cfqd->cfq_slice_async_rq, 1,
4132                 UINT_MAX, 0);
4133 STORE_FUNCTION(cfq_low_latency_store, &cfqd->cfq_latency, 0, 1, 0);
4134 #undef STORE_FUNCTION
4135
4136 #define CFQ_ATTR(name) \
4137         __ATTR(name, S_IRUGO|S_IWUSR, cfq_##name##_show, cfq_##name##_store)
4138
4139 static struct elv_fs_entry cfq_attrs[] = {
4140         CFQ_ATTR(quantum),
4141         CFQ_ATTR(fifo_expire_sync),
4142         CFQ_ATTR(fifo_expire_async),
4143         CFQ_ATTR(back_seek_max),
4144         CFQ_ATTR(back_seek_penalty),
4145         CFQ_ATTR(slice_sync),
4146         CFQ_ATTR(slice_async),
4147         CFQ_ATTR(slice_async_rq),
4148         CFQ_ATTR(slice_idle),
4149         CFQ_ATTR(group_idle),
4150         CFQ_ATTR(low_latency),
4151         __ATTR_NULL
4152 };
4153
4154 static struct elevator_type iosched_cfq = {
4155         .ops = {
4156                 .elevator_merge_fn =            cfq_merge,
4157                 .elevator_merged_fn =           cfq_merged_request,
4158                 .elevator_merge_req_fn =        cfq_merged_requests,
4159                 .elevator_allow_merge_fn =      cfq_allow_merge,
4160                 .elevator_bio_merged_fn =       cfq_bio_merged,
4161                 .elevator_dispatch_fn =         cfq_dispatch_requests,
4162                 .elevator_add_req_fn =          cfq_insert_request,
4163                 .elevator_activate_req_fn =     cfq_activate_request,
4164                 .elevator_deactivate_req_fn =   cfq_deactivate_request,
4165                 .elevator_completed_req_fn =    cfq_completed_request,
4166                 .elevator_former_req_fn =       elv_rb_former_request,
4167                 .elevator_latter_req_fn =       elv_rb_latter_request,
4168                 .elevator_init_icq_fn =         cfq_init_icq,
4169                 .elevator_exit_icq_fn =         cfq_exit_icq,
4170                 .elevator_set_req_fn =          cfq_set_request,
4171                 .elevator_put_req_fn =          cfq_put_request,
4172                 .elevator_may_queue_fn =        cfq_may_queue,
4173                 .elevator_init_fn =             cfq_init_queue,
4174                 .elevator_exit_fn =             cfq_exit_queue,
4175         },
4176         .icq_size       =       sizeof(struct cfq_io_cq),
4177         .icq_align      =       __alignof__(struct cfq_io_cq),
4178         .elevator_attrs =       cfq_attrs,
4179         .elevator_name  =       "cfq",
4180         .elevator_owner =       THIS_MODULE,
4181 };
4182
4183 #ifdef CONFIG_CFQ_GROUP_IOSCHED
4184 static struct blkio_policy_type blkio_policy_cfq = {
4185         .ops = {
4186                 .blkio_init_group_fn =          cfq_init_blkio_group,
4187         },
4188         .plid = BLKIO_POLICY_PROP,
4189         .pdata_size = sizeof(struct cfq_group),
4190         .cftypes = cfq_blkcg_files,
4191 };
4192 #endif
4193
4194 static int __init cfq_init(void)
4195 {
4196         int ret;
4197
4198         /*
4199          * could be 0 on HZ < 1000 setups
4200          */
4201         if (!cfq_slice_async)
4202                 cfq_slice_async = 1;
4203         if (!cfq_slice_idle)
4204                 cfq_slice_idle = 1;
4205
4206 #ifdef CONFIG_CFQ_GROUP_IOSCHED
4207         if (!cfq_group_idle)
4208                 cfq_group_idle = 1;
4209 #else
4210                 cfq_group_idle = 0;
4211 #endif
4212         cfq_pool = KMEM_CACHE(cfq_queue, 0);
4213         if (!cfq_pool)
4214                 return -ENOMEM;
4215
4216         ret = elv_register(&iosched_cfq);
4217         if (ret) {
4218                 kmem_cache_destroy(cfq_pool);
4219                 return ret;
4220         }
4221
4222 #ifdef CONFIG_CFQ_GROUP_IOSCHED
4223         blkio_policy_register(&blkio_policy_cfq);
4224 #endif
4225         return 0;
4226 }
4227
4228 static void __exit cfq_exit(void)
4229 {
4230 #ifdef CONFIG_CFQ_GROUP_IOSCHED
4231         blkio_policy_unregister(&blkio_policy_cfq);
4232 #endif
4233         elv_unregister(&iosched_cfq);
4234         kmem_cache_destroy(cfq_pool);
4235 }
4236
4237 module_init(cfq_init);
4238 module_exit(cfq_exit);
4239
4240 MODULE_AUTHOR("Jens Axboe");
4241 MODULE_LICENSE("GPL");
4242 MODULE_DESCRIPTION("Completely Fair Queueing IO scheduler");