]> git.karo-electronics.de Git - karo-tx-linux.git/blob - block/blk-throttle.c
blk-throttle: respect 0 bps/iops settings for io.low
[karo-tx-linux.git] / block / blk-throttle.c
1 /*
2  * Interface for controlling IO bandwidth on a request queue
3  *
4  * Copyright (C) 2010 Vivek Goyal <vgoyal@redhat.com>
5  */
6
7 #include <linux/module.h>
8 #include <linux/slab.h>
9 #include <linux/blkdev.h>
10 #include <linux/bio.h>
11 #include <linux/blktrace_api.h>
12 #include <linux/blk-cgroup.h>
13 #include "blk.h"
14
15 /* Max dispatch from a group in 1 round */
16 static int throtl_grp_quantum = 8;
17
18 /* Total max dispatch from all groups in one round */
19 static int throtl_quantum = 32;
20
21 /* Throttling is performed over a slice and after that slice is renewed */
22 #define DFL_THROTL_SLICE_HD (HZ / 10)
23 #define DFL_THROTL_SLICE_SSD (HZ / 50)
24 #define MAX_THROTL_SLICE (HZ)
25 #define DFL_IDLE_THRESHOLD_SSD (1000L) /* 1 ms */
26 #define DFL_IDLE_THRESHOLD_HD (100L * 1000) /* 100 ms */
27 #define MAX_IDLE_TIME (5L * 1000 * 1000) /* 5 s */
28 /* default latency target is 0, eg, guarantee IO latency by default */
29 #define DFL_LATENCY_TARGET (0)
30 #define MIN_THROTL_BPS (320 * 1024)
31 #define MIN_THROTL_IOPS (10)
32
33 #define SKIP_LATENCY (((u64)1) << BLK_STAT_RES_SHIFT)
34
35 static struct blkcg_policy blkcg_policy_throtl;
36
37 /* A workqueue to queue throttle related work */
38 static struct workqueue_struct *kthrotld_workqueue;
39
40 /*
41  * To implement hierarchical throttling, throtl_grps form a tree and bios
42  * are dispatched upwards level by level until they reach the top and get
43  * issued.  When dispatching bios from the children and local group at each
44  * level, if the bios are dispatched into a single bio_list, there's a risk
45  * of a local or child group which can queue many bios at once filling up
46  * the list starving others.
47  *
48  * To avoid such starvation, dispatched bios are queued separately
49  * according to where they came from.  When they are again dispatched to
50  * the parent, they're popped in round-robin order so that no single source
51  * hogs the dispatch window.
52  *
53  * throtl_qnode is used to keep the queued bios separated by their sources.
54  * Bios are queued to throtl_qnode which in turn is queued to
55  * throtl_service_queue and then dispatched in round-robin order.
56  *
57  * It's also used to track the reference counts on blkg's.  A qnode always
58  * belongs to a throtl_grp and gets queued on itself or the parent, so
59  * incrementing the reference of the associated throtl_grp when a qnode is
60  * queued and decrementing when dequeued is enough to keep the whole blkg
61  * tree pinned while bios are in flight.
62  */
63 struct throtl_qnode {
64         struct list_head        node;           /* service_queue->queued[] */
65         struct bio_list         bios;           /* queued bios */
66         struct throtl_grp       *tg;            /* tg this qnode belongs to */
67 };
68
69 struct throtl_service_queue {
70         struct throtl_service_queue *parent_sq; /* the parent service_queue */
71
72         /*
73          * Bios queued directly to this service_queue or dispatched from
74          * children throtl_grp's.
75          */
76         struct list_head        queued[2];      /* throtl_qnode [READ/WRITE] */
77         unsigned int            nr_queued[2];   /* number of queued bios */
78
79         /*
80          * RB tree of active children throtl_grp's, which are sorted by
81          * their ->disptime.
82          */
83         struct rb_root          pending_tree;   /* RB tree of active tgs */
84         struct rb_node          *first_pending; /* first node in the tree */
85         unsigned int            nr_pending;     /* # queued in the tree */
86         unsigned long           first_pending_disptime; /* disptime of the first tg */
87         struct timer_list       pending_timer;  /* fires on first_pending_disptime */
88 };
89
90 enum tg_state_flags {
91         THROTL_TG_PENDING       = 1 << 0,       /* on parent's pending tree */
92         THROTL_TG_WAS_EMPTY     = 1 << 1,       /* bio_lists[] became non-empty */
93 };
94
95 #define rb_entry_tg(node)       rb_entry((node), struct throtl_grp, rb_node)
96
97 enum {
98         LIMIT_LOW,
99         LIMIT_MAX,
100         LIMIT_CNT,
101 };
102
103 struct throtl_grp {
104         /* must be the first member */
105         struct blkg_policy_data pd;
106
107         /* active throtl group service_queue member */
108         struct rb_node rb_node;
109
110         /* throtl_data this group belongs to */
111         struct throtl_data *td;
112
113         /* this group's service queue */
114         struct throtl_service_queue service_queue;
115
116         /*
117          * qnode_on_self is used when bios are directly queued to this
118          * throtl_grp so that local bios compete fairly with bios
119          * dispatched from children.  qnode_on_parent is used when bios are
120          * dispatched from this throtl_grp into its parent and will compete
121          * with the sibling qnode_on_parents and the parent's
122          * qnode_on_self.
123          */
124         struct throtl_qnode qnode_on_self[2];
125         struct throtl_qnode qnode_on_parent[2];
126
127         /*
128          * Dispatch time in jiffies. This is the estimated time when group
129          * will unthrottle and is ready to dispatch more bio. It is used as
130          * key to sort active groups in service tree.
131          */
132         unsigned long disptime;
133
134         unsigned int flags;
135
136         /* are there any throtl rules between this group and td? */
137         bool has_rules[2];
138
139         /* internally used bytes per second rate limits */
140         uint64_t bps[2][LIMIT_CNT];
141         /* user configured bps limits */
142         uint64_t bps_conf[2][LIMIT_CNT];
143
144         /* internally used IOPS limits */
145         unsigned int iops[2][LIMIT_CNT];
146         /* user configured IOPS limits */
147         unsigned int iops_conf[2][LIMIT_CNT];
148
149         /* Number of bytes disptached in current slice */
150         uint64_t bytes_disp[2];
151         /* Number of bio's dispatched in current slice */
152         unsigned int io_disp[2];
153
154         unsigned long last_low_overflow_time[2];
155
156         uint64_t last_bytes_disp[2];
157         unsigned int last_io_disp[2];
158
159         unsigned long last_check_time;
160
161         unsigned long latency_target; /* us */
162         unsigned long latency_target_conf; /* us */
163         /* When did we start a new slice */
164         unsigned long slice_start[2];
165         unsigned long slice_end[2];
166
167         unsigned long last_finish_time; /* ns / 1024 */
168         unsigned long checked_last_finish_time; /* ns / 1024 */
169         unsigned long avg_idletime; /* ns / 1024 */
170         unsigned long idletime_threshold; /* us */
171         unsigned long idletime_threshold_conf; /* us */
172
173         unsigned int bio_cnt; /* total bios */
174         unsigned int bad_bio_cnt; /* bios exceeding latency threshold */
175         unsigned long bio_cnt_reset_time;
176 };
177
178 /* We measure latency for request size from <= 4k to >= 1M */
179 #define LATENCY_BUCKET_SIZE 9
180
181 struct latency_bucket {
182         unsigned long total_latency; /* ns / 1024 */
183         int samples;
184 };
185
186 struct avg_latency_bucket {
187         unsigned long latency; /* ns / 1024 */
188         bool valid;
189 };
190
191 struct throtl_data
192 {
193         /* service tree for active throtl groups */
194         struct throtl_service_queue service_queue;
195
196         struct request_queue *queue;
197
198         /* Total Number of queued bios on READ and WRITE lists */
199         unsigned int nr_queued[2];
200
201         unsigned int throtl_slice;
202
203         /* Work for dispatching throttled bios */
204         struct work_struct dispatch_work;
205         unsigned int limit_index;
206         bool limit_valid[LIMIT_CNT];
207
208         unsigned long dft_idletime_threshold; /* us */
209
210         unsigned long low_upgrade_time;
211         unsigned long low_downgrade_time;
212
213         unsigned int scale;
214
215         struct latency_bucket tmp_buckets[LATENCY_BUCKET_SIZE];
216         struct avg_latency_bucket avg_buckets[LATENCY_BUCKET_SIZE];
217         struct latency_bucket __percpu *latency_buckets;
218         unsigned long last_calculate_time;
219
220         bool track_bio_latency;
221 };
222
223 static void throtl_pending_timer_fn(unsigned long arg);
224
225 static inline struct throtl_grp *pd_to_tg(struct blkg_policy_data *pd)
226 {
227         return pd ? container_of(pd, struct throtl_grp, pd) : NULL;
228 }
229
230 static inline struct throtl_grp *blkg_to_tg(struct blkcg_gq *blkg)
231 {
232         return pd_to_tg(blkg_to_pd(blkg, &blkcg_policy_throtl));
233 }
234
235 static inline struct blkcg_gq *tg_to_blkg(struct throtl_grp *tg)
236 {
237         return pd_to_blkg(&tg->pd);
238 }
239
240 /**
241  * sq_to_tg - return the throl_grp the specified service queue belongs to
242  * @sq: the throtl_service_queue of interest
243  *
244  * Return the throtl_grp @sq belongs to.  If @sq is the top-level one
245  * embedded in throtl_data, %NULL is returned.
246  */
247 static struct throtl_grp *sq_to_tg(struct throtl_service_queue *sq)
248 {
249         if (sq && sq->parent_sq)
250                 return container_of(sq, struct throtl_grp, service_queue);
251         else
252                 return NULL;
253 }
254
255 /**
256  * sq_to_td - return throtl_data the specified service queue belongs to
257  * @sq: the throtl_service_queue of interest
258  *
259  * A service_queue can be embedded in either a throtl_grp or throtl_data.
260  * Determine the associated throtl_data accordingly and return it.
261  */
262 static struct throtl_data *sq_to_td(struct throtl_service_queue *sq)
263 {
264         struct throtl_grp *tg = sq_to_tg(sq);
265
266         if (tg)
267                 return tg->td;
268         else
269                 return container_of(sq, struct throtl_data, service_queue);
270 }
271
272 /*
273  * cgroup's limit in LIMIT_MAX is scaled if low limit is set. This scale is to
274  * make the IO dispatch more smooth.
275  * Scale up: linearly scale up according to lapsed time since upgrade. For
276  *           every throtl_slice, the limit scales up 1/2 .low limit till the
277  *           limit hits .max limit
278  * Scale down: exponentially scale down if a cgroup doesn't hit its .low limit
279  */
280 static uint64_t throtl_adjusted_limit(uint64_t low, struct throtl_data *td)
281 {
282         /* arbitrary value to avoid too big scale */
283         if (td->scale < 4096 && time_after_eq(jiffies,
284             td->low_upgrade_time + td->scale * td->throtl_slice))
285                 td->scale = (jiffies - td->low_upgrade_time) / td->throtl_slice;
286
287         return low + (low >> 1) * td->scale;
288 }
289
290 static uint64_t tg_bps_limit(struct throtl_grp *tg, int rw)
291 {
292         struct blkcg_gq *blkg = tg_to_blkg(tg);
293         struct throtl_data *td;
294         uint64_t ret;
295
296         if (cgroup_subsys_on_dfl(io_cgrp_subsys) && !blkg->parent)
297                 return U64_MAX;
298
299         td = tg->td;
300         ret = tg->bps[rw][td->limit_index];
301         if (ret == 0 && td->limit_index == LIMIT_LOW) {
302                 /* intermediate node or iops isn't 0 */
303                 if (!list_empty(&blkg->blkcg->css.children) ||
304                     tg->iops[rw][td->limit_index])
305                         return U64_MAX;
306                 else
307                         return MIN_THROTL_BPS;
308         }
309
310         if (td->limit_index == LIMIT_MAX && tg->bps[rw][LIMIT_LOW] &&
311             tg->bps[rw][LIMIT_LOW] != tg->bps[rw][LIMIT_MAX]) {
312                 uint64_t adjusted;
313
314                 adjusted = throtl_adjusted_limit(tg->bps[rw][LIMIT_LOW], td);
315                 ret = min(tg->bps[rw][LIMIT_MAX], adjusted);
316         }
317         return ret;
318 }
319
320 static unsigned int tg_iops_limit(struct throtl_grp *tg, int rw)
321 {
322         struct blkcg_gq *blkg = tg_to_blkg(tg);
323         struct throtl_data *td;
324         unsigned int ret;
325
326         if (cgroup_subsys_on_dfl(io_cgrp_subsys) && !blkg->parent)
327                 return UINT_MAX;
328
329         td = tg->td;
330         ret = tg->iops[rw][td->limit_index];
331         if (ret == 0 && tg->td->limit_index == LIMIT_LOW) {
332                 /* intermediate node or bps isn't 0 */
333                 if (!list_empty(&blkg->blkcg->css.children) ||
334                     tg->bps[rw][td->limit_index])
335                         return UINT_MAX;
336                 else
337                         return MIN_THROTL_IOPS;
338         }
339
340         if (td->limit_index == LIMIT_MAX && tg->iops[rw][LIMIT_LOW] &&
341             tg->iops[rw][LIMIT_LOW] != tg->iops[rw][LIMIT_MAX]) {
342                 uint64_t adjusted;
343
344                 adjusted = throtl_adjusted_limit(tg->iops[rw][LIMIT_LOW], td);
345                 if (adjusted > UINT_MAX)
346                         adjusted = UINT_MAX;
347                 ret = min_t(unsigned int, tg->iops[rw][LIMIT_MAX], adjusted);
348         }
349         return ret;
350 }
351
352 #define request_bucket_index(sectors) \
353         clamp_t(int, order_base_2(sectors) - 3, 0, LATENCY_BUCKET_SIZE - 1)
354
355 /**
356  * throtl_log - log debug message via blktrace
357  * @sq: the service_queue being reported
358  * @fmt: printf format string
359  * @args: printf args
360  *
361  * The messages are prefixed with "throtl BLKG_NAME" if @sq belongs to a
362  * throtl_grp; otherwise, just "throtl".
363  */
364 #define throtl_log(sq, fmt, args...)    do {                            \
365         struct throtl_grp *__tg = sq_to_tg((sq));                       \
366         struct throtl_data *__td = sq_to_td((sq));                      \
367                                                                         \
368         (void)__td;                                                     \
369         if (likely(!blk_trace_note_message_enabled(__td->queue)))       \
370                 break;                                                  \
371         if ((__tg)) {                                                   \
372                 char __pbuf[128];                                       \
373                                                                         \
374                 blkg_path(tg_to_blkg(__tg), __pbuf, sizeof(__pbuf));    \
375                 blk_add_trace_msg(__td->queue, "throtl %s " fmt, __pbuf, ##args); \
376         } else {                                                        \
377                 blk_add_trace_msg(__td->queue, "throtl " fmt, ##args);  \
378         }                                                               \
379 } while (0)
380
381 static void throtl_qnode_init(struct throtl_qnode *qn, struct throtl_grp *tg)
382 {
383         INIT_LIST_HEAD(&qn->node);
384         bio_list_init(&qn->bios);
385         qn->tg = tg;
386 }
387
388 /**
389  * throtl_qnode_add_bio - add a bio to a throtl_qnode and activate it
390  * @bio: bio being added
391  * @qn: qnode to add bio to
392  * @queued: the service_queue->queued[] list @qn belongs to
393  *
394  * Add @bio to @qn and put @qn on @queued if it's not already on.
395  * @qn->tg's reference count is bumped when @qn is activated.  See the
396  * comment on top of throtl_qnode definition for details.
397  */
398 static void throtl_qnode_add_bio(struct bio *bio, struct throtl_qnode *qn,
399                                  struct list_head *queued)
400 {
401         bio_list_add(&qn->bios, bio);
402         if (list_empty(&qn->node)) {
403                 list_add_tail(&qn->node, queued);
404                 blkg_get(tg_to_blkg(qn->tg));
405         }
406 }
407
408 /**
409  * throtl_peek_queued - peek the first bio on a qnode list
410  * @queued: the qnode list to peek
411  */
412 static struct bio *throtl_peek_queued(struct list_head *queued)
413 {
414         struct throtl_qnode *qn = list_first_entry(queued, struct throtl_qnode, node);
415         struct bio *bio;
416
417         if (list_empty(queued))
418                 return NULL;
419
420         bio = bio_list_peek(&qn->bios);
421         WARN_ON_ONCE(!bio);
422         return bio;
423 }
424
425 /**
426  * throtl_pop_queued - pop the first bio form a qnode list
427  * @queued: the qnode list to pop a bio from
428  * @tg_to_put: optional out argument for throtl_grp to put
429  *
430  * Pop the first bio from the qnode list @queued.  After popping, the first
431  * qnode is removed from @queued if empty or moved to the end of @queued so
432  * that the popping order is round-robin.
433  *
434  * When the first qnode is removed, its associated throtl_grp should be put
435  * too.  If @tg_to_put is NULL, this function automatically puts it;
436  * otherwise, *@tg_to_put is set to the throtl_grp to put and the caller is
437  * responsible for putting it.
438  */
439 static struct bio *throtl_pop_queued(struct list_head *queued,
440                                      struct throtl_grp **tg_to_put)
441 {
442         struct throtl_qnode *qn = list_first_entry(queued, struct throtl_qnode, node);
443         struct bio *bio;
444
445         if (list_empty(queued))
446                 return NULL;
447
448         bio = bio_list_pop(&qn->bios);
449         WARN_ON_ONCE(!bio);
450
451         if (bio_list_empty(&qn->bios)) {
452                 list_del_init(&qn->node);
453                 if (tg_to_put)
454                         *tg_to_put = qn->tg;
455                 else
456                         blkg_put(tg_to_blkg(qn->tg));
457         } else {
458                 list_move_tail(&qn->node, queued);
459         }
460
461         return bio;
462 }
463
464 /* init a service_queue, assumes the caller zeroed it */
465 static void throtl_service_queue_init(struct throtl_service_queue *sq)
466 {
467         INIT_LIST_HEAD(&sq->queued[0]);
468         INIT_LIST_HEAD(&sq->queued[1]);
469         sq->pending_tree = RB_ROOT;
470         setup_timer(&sq->pending_timer, throtl_pending_timer_fn,
471                     (unsigned long)sq);
472 }
473
474 static struct blkg_policy_data *throtl_pd_alloc(gfp_t gfp, int node)
475 {
476         struct throtl_grp *tg;
477         int rw;
478
479         tg = kzalloc_node(sizeof(*tg), gfp, node);
480         if (!tg)
481                 return NULL;
482
483         throtl_service_queue_init(&tg->service_queue);
484
485         for (rw = READ; rw <= WRITE; rw++) {
486                 throtl_qnode_init(&tg->qnode_on_self[rw], tg);
487                 throtl_qnode_init(&tg->qnode_on_parent[rw], tg);
488         }
489
490         RB_CLEAR_NODE(&tg->rb_node);
491         tg->bps[READ][LIMIT_MAX] = U64_MAX;
492         tg->bps[WRITE][LIMIT_MAX] = U64_MAX;
493         tg->iops[READ][LIMIT_MAX] = UINT_MAX;
494         tg->iops[WRITE][LIMIT_MAX] = UINT_MAX;
495         tg->bps_conf[READ][LIMIT_MAX] = U64_MAX;
496         tg->bps_conf[WRITE][LIMIT_MAX] = U64_MAX;
497         tg->iops_conf[READ][LIMIT_MAX] = UINT_MAX;
498         tg->iops_conf[WRITE][LIMIT_MAX] = UINT_MAX;
499         /* LIMIT_LOW will have default value 0 */
500
501         tg->latency_target = DFL_LATENCY_TARGET;
502         tg->latency_target_conf = DFL_LATENCY_TARGET;
503
504         return &tg->pd;
505 }
506
507 static void throtl_pd_init(struct blkg_policy_data *pd)
508 {
509         struct throtl_grp *tg = pd_to_tg(pd);
510         struct blkcg_gq *blkg = tg_to_blkg(tg);
511         struct throtl_data *td = blkg->q->td;
512         struct throtl_service_queue *sq = &tg->service_queue;
513
514         /*
515          * If on the default hierarchy, we switch to properly hierarchical
516          * behavior where limits on a given throtl_grp are applied to the
517          * whole subtree rather than just the group itself.  e.g. If 16M
518          * read_bps limit is set on the root group, the whole system can't
519          * exceed 16M for the device.
520          *
521          * If not on the default hierarchy, the broken flat hierarchy
522          * behavior is retained where all throtl_grps are treated as if
523          * they're all separate root groups right below throtl_data.
524          * Limits of a group don't interact with limits of other groups
525          * regardless of the position of the group in the hierarchy.
526          */
527         sq->parent_sq = &td->service_queue;
528         if (cgroup_subsys_on_dfl(io_cgrp_subsys) && blkg->parent)
529                 sq->parent_sq = &blkg_to_tg(blkg->parent)->service_queue;
530         tg->td = td;
531
532         tg->idletime_threshold = td->dft_idletime_threshold;
533         tg->idletime_threshold_conf = td->dft_idletime_threshold;
534 }
535
536 /*
537  * Set has_rules[] if @tg or any of its parents have limits configured.
538  * This doesn't require walking up to the top of the hierarchy as the
539  * parent's has_rules[] is guaranteed to be correct.
540  */
541 static void tg_update_has_rules(struct throtl_grp *tg)
542 {
543         struct throtl_grp *parent_tg = sq_to_tg(tg->service_queue.parent_sq);
544         struct throtl_data *td = tg->td;
545         int rw;
546
547         for (rw = READ; rw <= WRITE; rw++)
548                 tg->has_rules[rw] = (parent_tg && parent_tg->has_rules[rw]) ||
549                         (td->limit_valid[td->limit_index] &&
550                          (tg_bps_limit(tg, rw) != U64_MAX ||
551                           tg_iops_limit(tg, rw) != UINT_MAX));
552 }
553
554 static void throtl_pd_online(struct blkg_policy_data *pd)
555 {
556         struct throtl_grp *tg = pd_to_tg(pd);
557         /*
558          * We don't want new groups to escape the limits of its ancestors.
559          * Update has_rules[] after a new group is brought online.
560          */
561         tg_update_has_rules(tg);
562 }
563
564 static void blk_throtl_update_limit_valid(struct throtl_data *td)
565 {
566         struct cgroup_subsys_state *pos_css;
567         struct blkcg_gq *blkg;
568         bool low_valid = false;
569
570         rcu_read_lock();
571         blkg_for_each_descendant_post(blkg, pos_css, td->queue->root_blkg) {
572                 struct throtl_grp *tg = blkg_to_tg(blkg);
573
574                 if (tg->bps[READ][LIMIT_LOW] || tg->bps[WRITE][LIMIT_LOW] ||
575                     tg->iops[READ][LIMIT_LOW] || tg->iops[WRITE][LIMIT_LOW])
576                         low_valid = true;
577         }
578         rcu_read_unlock();
579
580         td->limit_valid[LIMIT_LOW] = low_valid;
581 }
582
583 static void throtl_upgrade_state(struct throtl_data *td);
584 static void throtl_pd_offline(struct blkg_policy_data *pd)
585 {
586         struct throtl_grp *tg = pd_to_tg(pd);
587
588         tg->bps[READ][LIMIT_LOW] = 0;
589         tg->bps[WRITE][LIMIT_LOW] = 0;
590         tg->iops[READ][LIMIT_LOW] = 0;
591         tg->iops[WRITE][LIMIT_LOW] = 0;
592
593         blk_throtl_update_limit_valid(tg->td);
594
595         if (!tg->td->limit_valid[tg->td->limit_index])
596                 throtl_upgrade_state(tg->td);
597 }
598
599 static void throtl_pd_free(struct blkg_policy_data *pd)
600 {
601         struct throtl_grp *tg = pd_to_tg(pd);
602
603         del_timer_sync(&tg->service_queue.pending_timer);
604         kfree(tg);
605 }
606
607 static struct throtl_grp *
608 throtl_rb_first(struct throtl_service_queue *parent_sq)
609 {
610         /* Service tree is empty */
611         if (!parent_sq->nr_pending)
612                 return NULL;
613
614         if (!parent_sq->first_pending)
615                 parent_sq->first_pending = rb_first(&parent_sq->pending_tree);
616
617         if (parent_sq->first_pending)
618                 return rb_entry_tg(parent_sq->first_pending);
619
620         return NULL;
621 }
622
623 static void rb_erase_init(struct rb_node *n, struct rb_root *root)
624 {
625         rb_erase(n, root);
626         RB_CLEAR_NODE(n);
627 }
628
629 static void throtl_rb_erase(struct rb_node *n,
630                             struct throtl_service_queue *parent_sq)
631 {
632         if (parent_sq->first_pending == n)
633                 parent_sq->first_pending = NULL;
634         rb_erase_init(n, &parent_sq->pending_tree);
635         --parent_sq->nr_pending;
636 }
637
638 static void update_min_dispatch_time(struct throtl_service_queue *parent_sq)
639 {
640         struct throtl_grp *tg;
641
642         tg = throtl_rb_first(parent_sq);
643         if (!tg)
644                 return;
645
646         parent_sq->first_pending_disptime = tg->disptime;
647 }
648
649 static void tg_service_queue_add(struct throtl_grp *tg)
650 {
651         struct throtl_service_queue *parent_sq = tg->service_queue.parent_sq;
652         struct rb_node **node = &parent_sq->pending_tree.rb_node;
653         struct rb_node *parent = NULL;
654         struct throtl_grp *__tg;
655         unsigned long key = tg->disptime;
656         int left = 1;
657
658         while (*node != NULL) {
659                 parent = *node;
660                 __tg = rb_entry_tg(parent);
661
662                 if (time_before(key, __tg->disptime))
663                         node = &parent->rb_left;
664                 else {
665                         node = &parent->rb_right;
666                         left = 0;
667                 }
668         }
669
670         if (left)
671                 parent_sq->first_pending = &tg->rb_node;
672
673         rb_link_node(&tg->rb_node, parent, node);
674         rb_insert_color(&tg->rb_node, &parent_sq->pending_tree);
675 }
676
677 static void __throtl_enqueue_tg(struct throtl_grp *tg)
678 {
679         tg_service_queue_add(tg);
680         tg->flags |= THROTL_TG_PENDING;
681         tg->service_queue.parent_sq->nr_pending++;
682 }
683
684 static void throtl_enqueue_tg(struct throtl_grp *tg)
685 {
686         if (!(tg->flags & THROTL_TG_PENDING))
687                 __throtl_enqueue_tg(tg);
688 }
689
690 static void __throtl_dequeue_tg(struct throtl_grp *tg)
691 {
692         throtl_rb_erase(&tg->rb_node, tg->service_queue.parent_sq);
693         tg->flags &= ~THROTL_TG_PENDING;
694 }
695
696 static void throtl_dequeue_tg(struct throtl_grp *tg)
697 {
698         if (tg->flags & THROTL_TG_PENDING)
699                 __throtl_dequeue_tg(tg);
700 }
701
702 /* Call with queue lock held */
703 static void throtl_schedule_pending_timer(struct throtl_service_queue *sq,
704                                           unsigned long expires)
705 {
706         unsigned long max_expire = jiffies + 8 * sq_to_tg(sq)->td->throtl_slice;
707
708         /*
709          * Since we are adjusting the throttle limit dynamically, the sleep
710          * time calculated according to previous limit might be invalid. It's
711          * possible the cgroup sleep time is very long and no other cgroups
712          * have IO running so notify the limit changes. Make sure the cgroup
713          * doesn't sleep too long to avoid the missed notification.
714          */
715         if (time_after(expires, max_expire))
716                 expires = max_expire;
717         mod_timer(&sq->pending_timer, expires);
718         throtl_log(sq, "schedule timer. delay=%lu jiffies=%lu",
719                    expires - jiffies, jiffies);
720 }
721
722 /**
723  * throtl_schedule_next_dispatch - schedule the next dispatch cycle
724  * @sq: the service_queue to schedule dispatch for
725  * @force: force scheduling
726  *
727  * Arm @sq->pending_timer so that the next dispatch cycle starts on the
728  * dispatch time of the first pending child.  Returns %true if either timer
729  * is armed or there's no pending child left.  %false if the current
730  * dispatch window is still open and the caller should continue
731  * dispatching.
732  *
733  * If @force is %true, the dispatch timer is always scheduled and this
734  * function is guaranteed to return %true.  This is to be used when the
735  * caller can't dispatch itself and needs to invoke pending_timer
736  * unconditionally.  Note that forced scheduling is likely to induce short
737  * delay before dispatch starts even if @sq->first_pending_disptime is not
738  * in the future and thus shouldn't be used in hot paths.
739  */
740 static bool throtl_schedule_next_dispatch(struct throtl_service_queue *sq,
741                                           bool force)
742 {
743         /* any pending children left? */
744         if (!sq->nr_pending)
745                 return true;
746
747         update_min_dispatch_time(sq);
748
749         /* is the next dispatch time in the future? */
750         if (force || time_after(sq->first_pending_disptime, jiffies)) {
751                 throtl_schedule_pending_timer(sq, sq->first_pending_disptime);
752                 return true;
753         }
754
755         /* tell the caller to continue dispatching */
756         return false;
757 }
758
759 static inline void throtl_start_new_slice_with_credit(struct throtl_grp *tg,
760                 bool rw, unsigned long start)
761 {
762         tg->bytes_disp[rw] = 0;
763         tg->io_disp[rw] = 0;
764
765         /*
766          * Previous slice has expired. We must have trimmed it after last
767          * bio dispatch. That means since start of last slice, we never used
768          * that bandwidth. Do try to make use of that bandwidth while giving
769          * credit.
770          */
771         if (time_after_eq(start, tg->slice_start[rw]))
772                 tg->slice_start[rw] = start;
773
774         tg->slice_end[rw] = jiffies + tg->td->throtl_slice;
775         throtl_log(&tg->service_queue,
776                    "[%c] new slice with credit start=%lu end=%lu jiffies=%lu",
777                    rw == READ ? 'R' : 'W', tg->slice_start[rw],
778                    tg->slice_end[rw], jiffies);
779 }
780
781 static inline void throtl_start_new_slice(struct throtl_grp *tg, bool rw)
782 {
783         tg->bytes_disp[rw] = 0;
784         tg->io_disp[rw] = 0;
785         tg->slice_start[rw] = jiffies;
786         tg->slice_end[rw] = jiffies + tg->td->throtl_slice;
787         throtl_log(&tg->service_queue,
788                    "[%c] new slice start=%lu end=%lu jiffies=%lu",
789                    rw == READ ? 'R' : 'W', tg->slice_start[rw],
790                    tg->slice_end[rw], jiffies);
791 }
792
793 static inline void throtl_set_slice_end(struct throtl_grp *tg, bool rw,
794                                         unsigned long jiffy_end)
795 {
796         tg->slice_end[rw] = roundup(jiffy_end, tg->td->throtl_slice);
797 }
798
799 static inline void throtl_extend_slice(struct throtl_grp *tg, bool rw,
800                                        unsigned long jiffy_end)
801 {
802         tg->slice_end[rw] = roundup(jiffy_end, tg->td->throtl_slice);
803         throtl_log(&tg->service_queue,
804                    "[%c] extend slice start=%lu end=%lu jiffies=%lu",
805                    rw == READ ? 'R' : 'W', tg->slice_start[rw],
806                    tg->slice_end[rw], jiffies);
807 }
808
809 /* Determine if previously allocated or extended slice is complete or not */
810 static bool throtl_slice_used(struct throtl_grp *tg, bool rw)
811 {
812         if (time_in_range(jiffies, tg->slice_start[rw], tg->slice_end[rw]))
813                 return false;
814
815         return 1;
816 }
817
818 /* Trim the used slices and adjust slice start accordingly */
819 static inline void throtl_trim_slice(struct throtl_grp *tg, bool rw)
820 {
821         unsigned long nr_slices, time_elapsed, io_trim;
822         u64 bytes_trim, tmp;
823
824         BUG_ON(time_before(tg->slice_end[rw], tg->slice_start[rw]));
825
826         /*
827          * If bps are unlimited (-1), then time slice don't get
828          * renewed. Don't try to trim the slice if slice is used. A new
829          * slice will start when appropriate.
830          */
831         if (throtl_slice_used(tg, rw))
832                 return;
833
834         /*
835          * A bio has been dispatched. Also adjust slice_end. It might happen
836          * that initially cgroup limit was very low resulting in high
837          * slice_end, but later limit was bumped up and bio was dispached
838          * sooner, then we need to reduce slice_end. A high bogus slice_end
839          * is bad because it does not allow new slice to start.
840          */
841
842         throtl_set_slice_end(tg, rw, jiffies + tg->td->throtl_slice);
843
844         time_elapsed = jiffies - tg->slice_start[rw];
845
846         nr_slices = time_elapsed / tg->td->throtl_slice;
847
848         if (!nr_slices)
849                 return;
850         tmp = tg_bps_limit(tg, rw) * tg->td->throtl_slice * nr_slices;
851         do_div(tmp, HZ);
852         bytes_trim = tmp;
853
854         io_trim = (tg_iops_limit(tg, rw) * tg->td->throtl_slice * nr_slices) /
855                 HZ;
856
857         if (!bytes_trim && !io_trim)
858                 return;
859
860         if (tg->bytes_disp[rw] >= bytes_trim)
861                 tg->bytes_disp[rw] -= bytes_trim;
862         else
863                 tg->bytes_disp[rw] = 0;
864
865         if (tg->io_disp[rw] >= io_trim)
866                 tg->io_disp[rw] -= io_trim;
867         else
868                 tg->io_disp[rw] = 0;
869
870         tg->slice_start[rw] += nr_slices * tg->td->throtl_slice;
871
872         throtl_log(&tg->service_queue,
873                    "[%c] trim slice nr=%lu bytes=%llu io=%lu start=%lu end=%lu jiffies=%lu",
874                    rw == READ ? 'R' : 'W', nr_slices, bytes_trim, io_trim,
875                    tg->slice_start[rw], tg->slice_end[rw], jiffies);
876 }
877
878 static bool tg_with_in_iops_limit(struct throtl_grp *tg, struct bio *bio,
879                                   unsigned long *wait)
880 {
881         bool rw = bio_data_dir(bio);
882         unsigned int io_allowed;
883         unsigned long jiffy_elapsed, jiffy_wait, jiffy_elapsed_rnd;
884         u64 tmp;
885
886         jiffy_elapsed = jiffy_elapsed_rnd = jiffies - tg->slice_start[rw];
887
888         /* Slice has just started. Consider one slice interval */
889         if (!jiffy_elapsed)
890                 jiffy_elapsed_rnd = tg->td->throtl_slice;
891
892         jiffy_elapsed_rnd = roundup(jiffy_elapsed_rnd, tg->td->throtl_slice);
893
894         /*
895          * jiffy_elapsed_rnd should not be a big value as minimum iops can be
896          * 1 then at max jiffy elapsed should be equivalent of 1 second as we
897          * will allow dispatch after 1 second and after that slice should
898          * have been trimmed.
899          */
900
901         tmp = (u64)tg_iops_limit(tg, rw) * jiffy_elapsed_rnd;
902         do_div(tmp, HZ);
903
904         if (tmp > UINT_MAX)
905                 io_allowed = UINT_MAX;
906         else
907                 io_allowed = tmp;
908
909         if (tg->io_disp[rw] + 1 <= io_allowed) {
910                 if (wait)
911                         *wait = 0;
912                 return true;
913         }
914
915         /* Calc approx time to dispatch */
916         jiffy_wait = ((tg->io_disp[rw] + 1) * HZ) / tg_iops_limit(tg, rw) + 1;
917
918         if (jiffy_wait > jiffy_elapsed)
919                 jiffy_wait = jiffy_wait - jiffy_elapsed;
920         else
921                 jiffy_wait = 1;
922
923         if (wait)
924                 *wait = jiffy_wait;
925         return 0;
926 }
927
928 static bool tg_with_in_bps_limit(struct throtl_grp *tg, struct bio *bio,
929                                  unsigned long *wait)
930 {
931         bool rw = bio_data_dir(bio);
932         u64 bytes_allowed, extra_bytes, tmp;
933         unsigned long jiffy_elapsed, jiffy_wait, jiffy_elapsed_rnd;
934
935         jiffy_elapsed = jiffy_elapsed_rnd = jiffies - tg->slice_start[rw];
936
937         /* Slice has just started. Consider one slice interval */
938         if (!jiffy_elapsed)
939                 jiffy_elapsed_rnd = tg->td->throtl_slice;
940
941         jiffy_elapsed_rnd = roundup(jiffy_elapsed_rnd, tg->td->throtl_slice);
942
943         tmp = tg_bps_limit(tg, rw) * jiffy_elapsed_rnd;
944         do_div(tmp, HZ);
945         bytes_allowed = tmp;
946
947         if (tg->bytes_disp[rw] + bio->bi_iter.bi_size <= bytes_allowed) {
948                 if (wait)
949                         *wait = 0;
950                 return true;
951         }
952
953         /* Calc approx time to dispatch */
954         extra_bytes = tg->bytes_disp[rw] + bio->bi_iter.bi_size - bytes_allowed;
955         jiffy_wait = div64_u64(extra_bytes * HZ, tg_bps_limit(tg, rw));
956
957         if (!jiffy_wait)
958                 jiffy_wait = 1;
959
960         /*
961          * This wait time is without taking into consideration the rounding
962          * up we did. Add that time also.
963          */
964         jiffy_wait = jiffy_wait + (jiffy_elapsed_rnd - jiffy_elapsed);
965         if (wait)
966                 *wait = jiffy_wait;
967         return 0;
968 }
969
970 /*
971  * Returns whether one can dispatch a bio or not. Also returns approx number
972  * of jiffies to wait before this bio is with-in IO rate and can be dispatched
973  */
974 static bool tg_may_dispatch(struct throtl_grp *tg, struct bio *bio,
975                             unsigned long *wait)
976 {
977         bool rw = bio_data_dir(bio);
978         unsigned long bps_wait = 0, iops_wait = 0, max_wait = 0;
979
980         /*
981          * Currently whole state machine of group depends on first bio
982          * queued in the group bio list. So one should not be calling
983          * this function with a different bio if there are other bios
984          * queued.
985          */
986         BUG_ON(tg->service_queue.nr_queued[rw] &&
987                bio != throtl_peek_queued(&tg->service_queue.queued[rw]));
988
989         /* If tg->bps = -1, then BW is unlimited */
990         if (tg_bps_limit(tg, rw) == U64_MAX &&
991             tg_iops_limit(tg, rw) == UINT_MAX) {
992                 if (wait)
993                         *wait = 0;
994                 return true;
995         }
996
997         /*
998          * If previous slice expired, start a new one otherwise renew/extend
999          * existing slice to make sure it is at least throtl_slice interval
1000          * long since now. New slice is started only for empty throttle group.
1001          * If there is queued bio, that means there should be an active
1002          * slice and it should be extended instead.
1003          */
1004         if (throtl_slice_used(tg, rw) && !(tg->service_queue.nr_queued[rw]))
1005                 throtl_start_new_slice(tg, rw);
1006         else {
1007                 if (time_before(tg->slice_end[rw],
1008                     jiffies + tg->td->throtl_slice))
1009                         throtl_extend_slice(tg, rw,
1010                                 jiffies + tg->td->throtl_slice);
1011         }
1012
1013         if (tg_with_in_bps_limit(tg, bio, &bps_wait) &&
1014             tg_with_in_iops_limit(tg, bio, &iops_wait)) {
1015                 if (wait)
1016                         *wait = 0;
1017                 return 1;
1018         }
1019
1020         max_wait = max(bps_wait, iops_wait);
1021
1022         if (wait)
1023                 *wait = max_wait;
1024
1025         if (time_before(tg->slice_end[rw], jiffies + max_wait))
1026                 throtl_extend_slice(tg, rw, jiffies + max_wait);
1027
1028         return 0;
1029 }
1030
1031 static void throtl_charge_bio(struct throtl_grp *tg, struct bio *bio)
1032 {
1033         bool rw = bio_data_dir(bio);
1034
1035         /* Charge the bio to the group */
1036         tg->bytes_disp[rw] += bio->bi_iter.bi_size;
1037         tg->io_disp[rw]++;
1038         tg->last_bytes_disp[rw] += bio->bi_iter.bi_size;
1039         tg->last_io_disp[rw]++;
1040
1041         /*
1042          * BIO_THROTTLED is used to prevent the same bio to be throttled
1043          * more than once as a throttled bio will go through blk-throtl the
1044          * second time when it eventually gets issued.  Set it when a bio
1045          * is being charged to a tg.
1046          */
1047         if (!bio_flagged(bio, BIO_THROTTLED))
1048                 bio_set_flag(bio, BIO_THROTTLED);
1049 }
1050
1051 /**
1052  * throtl_add_bio_tg - add a bio to the specified throtl_grp
1053  * @bio: bio to add
1054  * @qn: qnode to use
1055  * @tg: the target throtl_grp
1056  *
1057  * Add @bio to @tg's service_queue using @qn.  If @qn is not specified,
1058  * tg->qnode_on_self[] is used.
1059  */
1060 static void throtl_add_bio_tg(struct bio *bio, struct throtl_qnode *qn,
1061                               struct throtl_grp *tg)
1062 {
1063         struct throtl_service_queue *sq = &tg->service_queue;
1064         bool rw = bio_data_dir(bio);
1065
1066         if (!qn)
1067                 qn = &tg->qnode_on_self[rw];
1068
1069         /*
1070          * If @tg doesn't currently have any bios queued in the same
1071          * direction, queueing @bio can change when @tg should be
1072          * dispatched.  Mark that @tg was empty.  This is automatically
1073          * cleaered on the next tg_update_disptime().
1074          */
1075         if (!sq->nr_queued[rw])
1076                 tg->flags |= THROTL_TG_WAS_EMPTY;
1077
1078         throtl_qnode_add_bio(bio, qn, &sq->queued[rw]);
1079
1080         sq->nr_queued[rw]++;
1081         throtl_enqueue_tg(tg);
1082 }
1083
1084 static void tg_update_disptime(struct throtl_grp *tg)
1085 {
1086         struct throtl_service_queue *sq = &tg->service_queue;
1087         unsigned long read_wait = -1, write_wait = -1, min_wait = -1, disptime;
1088         struct bio *bio;
1089
1090         bio = throtl_peek_queued(&sq->queued[READ]);
1091         if (bio)
1092                 tg_may_dispatch(tg, bio, &read_wait);
1093
1094         bio = throtl_peek_queued(&sq->queued[WRITE]);
1095         if (bio)
1096                 tg_may_dispatch(tg, bio, &write_wait);
1097
1098         min_wait = min(read_wait, write_wait);
1099         disptime = jiffies + min_wait;
1100
1101         /* Update dispatch time */
1102         throtl_dequeue_tg(tg);
1103         tg->disptime = disptime;
1104         throtl_enqueue_tg(tg);
1105
1106         /* see throtl_add_bio_tg() */
1107         tg->flags &= ~THROTL_TG_WAS_EMPTY;
1108 }
1109
1110 static void start_parent_slice_with_credit(struct throtl_grp *child_tg,
1111                                         struct throtl_grp *parent_tg, bool rw)
1112 {
1113         if (throtl_slice_used(parent_tg, rw)) {
1114                 throtl_start_new_slice_with_credit(parent_tg, rw,
1115                                 child_tg->slice_start[rw]);
1116         }
1117
1118 }
1119
1120 static void tg_dispatch_one_bio(struct throtl_grp *tg, bool rw)
1121 {
1122         struct throtl_service_queue *sq = &tg->service_queue;
1123         struct throtl_service_queue *parent_sq = sq->parent_sq;
1124         struct throtl_grp *parent_tg = sq_to_tg(parent_sq);
1125         struct throtl_grp *tg_to_put = NULL;
1126         struct bio *bio;
1127
1128         /*
1129          * @bio is being transferred from @tg to @parent_sq.  Popping a bio
1130          * from @tg may put its reference and @parent_sq might end up
1131          * getting released prematurely.  Remember the tg to put and put it
1132          * after @bio is transferred to @parent_sq.
1133          */
1134         bio = throtl_pop_queued(&sq->queued[rw], &tg_to_put);
1135         sq->nr_queued[rw]--;
1136
1137         throtl_charge_bio(tg, bio);
1138
1139         /*
1140          * If our parent is another tg, we just need to transfer @bio to
1141          * the parent using throtl_add_bio_tg().  If our parent is
1142          * @td->service_queue, @bio is ready to be issued.  Put it on its
1143          * bio_lists[] and decrease total number queued.  The caller is
1144          * responsible for issuing these bios.
1145          */
1146         if (parent_tg) {
1147                 throtl_add_bio_tg(bio, &tg->qnode_on_parent[rw], parent_tg);
1148                 start_parent_slice_with_credit(tg, parent_tg, rw);
1149         } else {
1150                 throtl_qnode_add_bio(bio, &tg->qnode_on_parent[rw],
1151                                      &parent_sq->queued[rw]);
1152                 BUG_ON(tg->td->nr_queued[rw] <= 0);
1153                 tg->td->nr_queued[rw]--;
1154         }
1155
1156         throtl_trim_slice(tg, rw);
1157
1158         if (tg_to_put)
1159                 blkg_put(tg_to_blkg(tg_to_put));
1160 }
1161
1162 static int throtl_dispatch_tg(struct throtl_grp *tg)
1163 {
1164         struct throtl_service_queue *sq = &tg->service_queue;
1165         unsigned int nr_reads = 0, nr_writes = 0;
1166         unsigned int max_nr_reads = throtl_grp_quantum*3/4;
1167         unsigned int max_nr_writes = throtl_grp_quantum - max_nr_reads;
1168         struct bio *bio;
1169
1170         /* Try to dispatch 75% READS and 25% WRITES */
1171
1172         while ((bio = throtl_peek_queued(&sq->queued[READ])) &&
1173                tg_may_dispatch(tg, bio, NULL)) {
1174
1175                 tg_dispatch_one_bio(tg, bio_data_dir(bio));
1176                 nr_reads++;
1177
1178                 if (nr_reads >= max_nr_reads)
1179                         break;
1180         }
1181
1182         while ((bio = throtl_peek_queued(&sq->queued[WRITE])) &&
1183                tg_may_dispatch(tg, bio, NULL)) {
1184
1185                 tg_dispatch_one_bio(tg, bio_data_dir(bio));
1186                 nr_writes++;
1187
1188                 if (nr_writes >= max_nr_writes)
1189                         break;
1190         }
1191
1192         return nr_reads + nr_writes;
1193 }
1194
1195 static int throtl_select_dispatch(struct throtl_service_queue *parent_sq)
1196 {
1197         unsigned int nr_disp = 0;
1198
1199         while (1) {
1200                 struct throtl_grp *tg = throtl_rb_first(parent_sq);
1201                 struct throtl_service_queue *sq = &tg->service_queue;
1202
1203                 if (!tg)
1204                         break;
1205
1206                 if (time_before(jiffies, tg->disptime))
1207                         break;
1208
1209                 throtl_dequeue_tg(tg);
1210
1211                 nr_disp += throtl_dispatch_tg(tg);
1212
1213                 if (sq->nr_queued[0] || sq->nr_queued[1])
1214                         tg_update_disptime(tg);
1215
1216                 if (nr_disp >= throtl_quantum)
1217                         break;
1218         }
1219
1220         return nr_disp;
1221 }
1222
1223 static bool throtl_can_upgrade(struct throtl_data *td,
1224         struct throtl_grp *this_tg);
1225 /**
1226  * throtl_pending_timer_fn - timer function for service_queue->pending_timer
1227  * @arg: the throtl_service_queue being serviced
1228  *
1229  * This timer is armed when a child throtl_grp with active bio's become
1230  * pending and queued on the service_queue's pending_tree and expires when
1231  * the first child throtl_grp should be dispatched.  This function
1232  * dispatches bio's from the children throtl_grps to the parent
1233  * service_queue.
1234  *
1235  * If the parent's parent is another throtl_grp, dispatching is propagated
1236  * by either arming its pending_timer or repeating dispatch directly.  If
1237  * the top-level service_tree is reached, throtl_data->dispatch_work is
1238  * kicked so that the ready bio's are issued.
1239  */
1240 static void throtl_pending_timer_fn(unsigned long arg)
1241 {
1242         struct throtl_service_queue *sq = (void *)arg;
1243         struct throtl_grp *tg = sq_to_tg(sq);
1244         struct throtl_data *td = sq_to_td(sq);
1245         struct request_queue *q = td->queue;
1246         struct throtl_service_queue *parent_sq;
1247         bool dispatched;
1248         int ret;
1249
1250         spin_lock_irq(q->queue_lock);
1251         if (throtl_can_upgrade(td, NULL))
1252                 throtl_upgrade_state(td);
1253
1254 again:
1255         parent_sq = sq->parent_sq;
1256         dispatched = false;
1257
1258         while (true) {
1259                 throtl_log(sq, "dispatch nr_queued=%u read=%u write=%u",
1260                            sq->nr_queued[READ] + sq->nr_queued[WRITE],
1261                            sq->nr_queued[READ], sq->nr_queued[WRITE]);
1262
1263                 ret = throtl_select_dispatch(sq);
1264                 if (ret) {
1265                         throtl_log(sq, "bios disp=%u", ret);
1266                         dispatched = true;
1267                 }
1268
1269                 if (throtl_schedule_next_dispatch(sq, false))
1270                         break;
1271
1272                 /* this dispatch windows is still open, relax and repeat */
1273                 spin_unlock_irq(q->queue_lock);
1274                 cpu_relax();
1275                 spin_lock_irq(q->queue_lock);
1276         }
1277
1278         if (!dispatched)
1279                 goto out_unlock;
1280
1281         if (parent_sq) {
1282                 /* @parent_sq is another throl_grp, propagate dispatch */
1283                 if (tg->flags & THROTL_TG_WAS_EMPTY) {
1284                         tg_update_disptime(tg);
1285                         if (!throtl_schedule_next_dispatch(parent_sq, false)) {
1286                                 /* window is already open, repeat dispatching */
1287                                 sq = parent_sq;
1288                                 tg = sq_to_tg(sq);
1289                                 goto again;
1290                         }
1291                 }
1292         } else {
1293                 /* reached the top-level, queue issueing */
1294                 queue_work(kthrotld_workqueue, &td->dispatch_work);
1295         }
1296 out_unlock:
1297         spin_unlock_irq(q->queue_lock);
1298 }
1299
1300 /**
1301  * blk_throtl_dispatch_work_fn - work function for throtl_data->dispatch_work
1302  * @work: work item being executed
1303  *
1304  * This function is queued for execution when bio's reach the bio_lists[]
1305  * of throtl_data->service_queue.  Those bio's are ready and issued by this
1306  * function.
1307  */
1308 static void blk_throtl_dispatch_work_fn(struct work_struct *work)
1309 {
1310         struct throtl_data *td = container_of(work, struct throtl_data,
1311                                               dispatch_work);
1312         struct throtl_service_queue *td_sq = &td->service_queue;
1313         struct request_queue *q = td->queue;
1314         struct bio_list bio_list_on_stack;
1315         struct bio *bio;
1316         struct blk_plug plug;
1317         int rw;
1318
1319         bio_list_init(&bio_list_on_stack);
1320
1321         spin_lock_irq(q->queue_lock);
1322         for (rw = READ; rw <= WRITE; rw++)
1323                 while ((bio = throtl_pop_queued(&td_sq->queued[rw], NULL)))
1324                         bio_list_add(&bio_list_on_stack, bio);
1325         spin_unlock_irq(q->queue_lock);
1326
1327         if (!bio_list_empty(&bio_list_on_stack)) {
1328                 blk_start_plug(&plug);
1329                 while((bio = bio_list_pop(&bio_list_on_stack)))
1330                         generic_make_request(bio);
1331                 blk_finish_plug(&plug);
1332         }
1333 }
1334
1335 static u64 tg_prfill_conf_u64(struct seq_file *sf, struct blkg_policy_data *pd,
1336                               int off)
1337 {
1338         struct throtl_grp *tg = pd_to_tg(pd);
1339         u64 v = *(u64 *)((void *)tg + off);
1340
1341         if (v == U64_MAX)
1342                 return 0;
1343         return __blkg_prfill_u64(sf, pd, v);
1344 }
1345
1346 static u64 tg_prfill_conf_uint(struct seq_file *sf, struct blkg_policy_data *pd,
1347                                int off)
1348 {
1349         struct throtl_grp *tg = pd_to_tg(pd);
1350         unsigned int v = *(unsigned int *)((void *)tg + off);
1351
1352         if (v == UINT_MAX)
1353                 return 0;
1354         return __blkg_prfill_u64(sf, pd, v);
1355 }
1356
1357 static int tg_print_conf_u64(struct seq_file *sf, void *v)
1358 {
1359         blkcg_print_blkgs(sf, css_to_blkcg(seq_css(sf)), tg_prfill_conf_u64,
1360                           &blkcg_policy_throtl, seq_cft(sf)->private, false);
1361         return 0;
1362 }
1363
1364 static int tg_print_conf_uint(struct seq_file *sf, void *v)
1365 {
1366         blkcg_print_blkgs(sf, css_to_blkcg(seq_css(sf)), tg_prfill_conf_uint,
1367                           &blkcg_policy_throtl, seq_cft(sf)->private, false);
1368         return 0;
1369 }
1370
1371 static void tg_conf_updated(struct throtl_grp *tg, bool global)
1372 {
1373         struct throtl_service_queue *sq = &tg->service_queue;
1374         struct cgroup_subsys_state *pos_css;
1375         struct blkcg_gq *blkg;
1376
1377         throtl_log(&tg->service_queue,
1378                    "limit change rbps=%llu wbps=%llu riops=%u wiops=%u",
1379                    tg_bps_limit(tg, READ), tg_bps_limit(tg, WRITE),
1380                    tg_iops_limit(tg, READ), tg_iops_limit(tg, WRITE));
1381
1382         /*
1383          * Update has_rules[] flags for the updated tg's subtree.  A tg is
1384          * considered to have rules if either the tg itself or any of its
1385          * ancestors has rules.  This identifies groups without any
1386          * restrictions in the whole hierarchy and allows them to bypass
1387          * blk-throttle.
1388          */
1389         blkg_for_each_descendant_pre(blkg, pos_css,
1390                         global ? tg->td->queue->root_blkg : tg_to_blkg(tg)) {
1391                 struct throtl_grp *this_tg = blkg_to_tg(blkg);
1392                 struct throtl_grp *parent_tg;
1393
1394                 tg_update_has_rules(this_tg);
1395                 /* ignore root/second level */
1396                 if (!cgroup_subsys_on_dfl(io_cgrp_subsys) || !blkg->parent ||
1397                     !blkg->parent->parent)
1398                         continue;
1399                 parent_tg = blkg_to_tg(blkg->parent);
1400                 /*
1401                  * make sure all children has lower idle time threshold and
1402                  * higher latency target
1403                  */
1404                 this_tg->idletime_threshold = min(this_tg->idletime_threshold,
1405                                 parent_tg->idletime_threshold);
1406                 this_tg->latency_target = max(this_tg->latency_target,
1407                                 parent_tg->latency_target);
1408         }
1409
1410         /*
1411          * We're already holding queue_lock and know @tg is valid.  Let's
1412          * apply the new config directly.
1413          *
1414          * Restart the slices for both READ and WRITES. It might happen
1415          * that a group's limit are dropped suddenly and we don't want to
1416          * account recently dispatched IO with new low rate.
1417          */
1418         throtl_start_new_slice(tg, 0);
1419         throtl_start_new_slice(tg, 1);
1420
1421         if (tg->flags & THROTL_TG_PENDING) {
1422                 tg_update_disptime(tg);
1423                 throtl_schedule_next_dispatch(sq->parent_sq, true);
1424         }
1425 }
1426
1427 static ssize_t tg_set_conf(struct kernfs_open_file *of,
1428                            char *buf, size_t nbytes, loff_t off, bool is_u64)
1429 {
1430         struct blkcg *blkcg = css_to_blkcg(of_css(of));
1431         struct blkg_conf_ctx ctx;
1432         struct throtl_grp *tg;
1433         int ret;
1434         u64 v;
1435
1436         ret = blkg_conf_prep(blkcg, &blkcg_policy_throtl, buf, &ctx);
1437         if (ret)
1438                 return ret;
1439
1440         ret = -EINVAL;
1441         if (sscanf(ctx.body, "%llu", &v) != 1)
1442                 goto out_finish;
1443         if (!v)
1444                 v = U64_MAX;
1445
1446         tg = blkg_to_tg(ctx.blkg);
1447
1448         if (is_u64)
1449                 *(u64 *)((void *)tg + of_cft(of)->private) = v;
1450         else
1451                 *(unsigned int *)((void *)tg + of_cft(of)->private) = v;
1452
1453         tg_conf_updated(tg, false);
1454         ret = 0;
1455 out_finish:
1456         blkg_conf_finish(&ctx);
1457         return ret ?: nbytes;
1458 }
1459
1460 static ssize_t tg_set_conf_u64(struct kernfs_open_file *of,
1461                                char *buf, size_t nbytes, loff_t off)
1462 {
1463         return tg_set_conf(of, buf, nbytes, off, true);
1464 }
1465
1466 static ssize_t tg_set_conf_uint(struct kernfs_open_file *of,
1467                                 char *buf, size_t nbytes, loff_t off)
1468 {
1469         return tg_set_conf(of, buf, nbytes, off, false);
1470 }
1471
1472 static struct cftype throtl_legacy_files[] = {
1473         {
1474                 .name = "throttle.read_bps_device",
1475                 .private = offsetof(struct throtl_grp, bps[READ][LIMIT_MAX]),
1476                 .seq_show = tg_print_conf_u64,
1477                 .write = tg_set_conf_u64,
1478         },
1479         {
1480                 .name = "throttle.write_bps_device",
1481                 .private = offsetof(struct throtl_grp, bps[WRITE][LIMIT_MAX]),
1482                 .seq_show = tg_print_conf_u64,
1483                 .write = tg_set_conf_u64,
1484         },
1485         {
1486                 .name = "throttle.read_iops_device",
1487                 .private = offsetof(struct throtl_grp, iops[READ][LIMIT_MAX]),
1488                 .seq_show = tg_print_conf_uint,
1489                 .write = tg_set_conf_uint,
1490         },
1491         {
1492                 .name = "throttle.write_iops_device",
1493                 .private = offsetof(struct throtl_grp, iops[WRITE][LIMIT_MAX]),
1494                 .seq_show = tg_print_conf_uint,
1495                 .write = tg_set_conf_uint,
1496         },
1497         {
1498                 .name = "throttle.io_service_bytes",
1499                 .private = (unsigned long)&blkcg_policy_throtl,
1500                 .seq_show = blkg_print_stat_bytes,
1501         },
1502         {
1503                 .name = "throttle.io_serviced",
1504                 .private = (unsigned long)&blkcg_policy_throtl,
1505                 .seq_show = blkg_print_stat_ios,
1506         },
1507         { }     /* terminate */
1508 };
1509
1510 static u64 tg_prfill_limit(struct seq_file *sf, struct blkg_policy_data *pd,
1511                          int off)
1512 {
1513         struct throtl_grp *tg = pd_to_tg(pd);
1514         const char *dname = blkg_dev_name(pd->blkg);
1515         char bufs[4][21] = { "max", "max", "max", "max" };
1516         u64 bps_dft;
1517         unsigned int iops_dft;
1518         char idle_time[26] = "";
1519         char latency_time[26] = "";
1520
1521         if (!dname)
1522                 return 0;
1523
1524         if (off == LIMIT_LOW) {
1525                 bps_dft = 0;
1526                 iops_dft = 0;
1527         } else {
1528                 bps_dft = U64_MAX;
1529                 iops_dft = UINT_MAX;
1530         }
1531
1532         if (tg->bps_conf[READ][off] == bps_dft &&
1533             tg->bps_conf[WRITE][off] == bps_dft &&
1534             tg->iops_conf[READ][off] == iops_dft &&
1535             tg->iops_conf[WRITE][off] == iops_dft &&
1536             (off != LIMIT_LOW ||
1537              (tg->idletime_threshold_conf == tg->td->dft_idletime_threshold &&
1538               tg->latency_target_conf == DFL_LATENCY_TARGET)))
1539                 return 0;
1540
1541         if (tg->bps_conf[READ][off] != U64_MAX)
1542                 snprintf(bufs[0], sizeof(bufs[0]), "%llu",
1543                         tg->bps_conf[READ][off]);
1544         if (tg->bps_conf[WRITE][off] != U64_MAX)
1545                 snprintf(bufs[1], sizeof(bufs[1]), "%llu",
1546                         tg->bps_conf[WRITE][off]);
1547         if (tg->iops_conf[READ][off] != UINT_MAX)
1548                 snprintf(bufs[2], sizeof(bufs[2]), "%u",
1549                         tg->iops_conf[READ][off]);
1550         if (tg->iops_conf[WRITE][off] != UINT_MAX)
1551                 snprintf(bufs[3], sizeof(bufs[3]), "%u",
1552                         tg->iops_conf[WRITE][off]);
1553         if (off == LIMIT_LOW) {
1554                 if (tg->idletime_threshold_conf == ULONG_MAX)
1555                         strcpy(idle_time, " idle=max");
1556                 else
1557                         snprintf(idle_time, sizeof(idle_time), " idle=%lu",
1558                                 tg->idletime_threshold_conf);
1559
1560                 if (tg->latency_target_conf == ULONG_MAX)
1561                         strcpy(latency_time, " latency=max");
1562                 else
1563                         snprintf(latency_time, sizeof(latency_time),
1564                                 " latency=%lu", tg->latency_target_conf);
1565         }
1566
1567         seq_printf(sf, "%s rbps=%s wbps=%s riops=%s wiops=%s%s%s\n",
1568                    dname, bufs[0], bufs[1], bufs[2], bufs[3], idle_time,
1569                    latency_time);
1570         return 0;
1571 }
1572
1573 static int tg_print_limit(struct seq_file *sf, void *v)
1574 {
1575         blkcg_print_blkgs(sf, css_to_blkcg(seq_css(sf)), tg_prfill_limit,
1576                           &blkcg_policy_throtl, seq_cft(sf)->private, false);
1577         return 0;
1578 }
1579
1580 static ssize_t tg_set_limit(struct kernfs_open_file *of,
1581                           char *buf, size_t nbytes, loff_t off)
1582 {
1583         struct blkcg *blkcg = css_to_blkcg(of_css(of));
1584         struct blkg_conf_ctx ctx;
1585         struct throtl_grp *tg;
1586         u64 v[4];
1587         unsigned long idle_time;
1588         unsigned long latency_time;
1589         int ret;
1590         int index = of_cft(of)->private;
1591
1592         ret = blkg_conf_prep(blkcg, &blkcg_policy_throtl, buf, &ctx);
1593         if (ret)
1594                 return ret;
1595
1596         tg = blkg_to_tg(ctx.blkg);
1597
1598         v[0] = tg->bps_conf[READ][index];
1599         v[1] = tg->bps_conf[WRITE][index];
1600         v[2] = tg->iops_conf[READ][index];
1601         v[3] = tg->iops_conf[WRITE][index];
1602
1603         idle_time = tg->idletime_threshold_conf;
1604         latency_time = tg->latency_target_conf;
1605         while (true) {
1606                 char tok[27];   /* wiops=18446744073709551616 */
1607                 char *p;
1608                 u64 val = U64_MAX;
1609                 int len;
1610
1611                 if (sscanf(ctx.body, "%26s%n", tok, &len) != 1)
1612                         break;
1613                 if (tok[0] == '\0')
1614                         break;
1615                 ctx.body += len;
1616
1617                 ret = -EINVAL;
1618                 p = tok;
1619                 strsep(&p, "=");
1620                 if (!p || (sscanf(p, "%llu", &val) != 1 && strcmp(p, "max")))
1621                         goto out_finish;
1622
1623                 ret = -ERANGE;
1624                 if (!val)
1625                         goto out_finish;
1626
1627                 ret = -EINVAL;
1628                 if (!strcmp(tok, "rbps"))
1629                         v[0] = val;
1630                 else if (!strcmp(tok, "wbps"))
1631                         v[1] = val;
1632                 else if (!strcmp(tok, "riops"))
1633                         v[2] = min_t(u64, val, UINT_MAX);
1634                 else if (!strcmp(tok, "wiops"))
1635                         v[3] = min_t(u64, val, UINT_MAX);
1636                 else if (off == LIMIT_LOW && !strcmp(tok, "idle"))
1637                         idle_time = val;
1638                 else if (off == LIMIT_LOW && !strcmp(tok, "latency"))
1639                         latency_time = val;
1640                 else
1641                         goto out_finish;
1642         }
1643
1644         tg->bps_conf[READ][index] = v[0];
1645         tg->bps_conf[WRITE][index] = v[1];
1646         tg->iops_conf[READ][index] = v[2];
1647         tg->iops_conf[WRITE][index] = v[3];
1648
1649         if (index == LIMIT_MAX) {
1650                 tg->bps[READ][index] = v[0];
1651                 tg->bps[WRITE][index] = v[1];
1652                 tg->iops[READ][index] = v[2];
1653                 tg->iops[WRITE][index] = v[3];
1654         }
1655         tg->bps[READ][LIMIT_LOW] = min(tg->bps_conf[READ][LIMIT_LOW],
1656                 tg->bps_conf[READ][LIMIT_MAX]);
1657         tg->bps[WRITE][LIMIT_LOW] = min(tg->bps_conf[WRITE][LIMIT_LOW],
1658                 tg->bps_conf[WRITE][LIMIT_MAX]);
1659         tg->iops[READ][LIMIT_LOW] = min(tg->iops_conf[READ][LIMIT_LOW],
1660                 tg->iops_conf[READ][LIMIT_MAX]);
1661         tg->iops[WRITE][LIMIT_LOW] = min(tg->iops_conf[WRITE][LIMIT_LOW],
1662                 tg->iops_conf[WRITE][LIMIT_MAX]);
1663
1664         if (index == LIMIT_LOW) {
1665                 blk_throtl_update_limit_valid(tg->td);
1666                 if (tg->td->limit_valid[LIMIT_LOW])
1667                         tg->td->limit_index = LIMIT_LOW;
1668                 tg->idletime_threshold_conf = idle_time;
1669                 tg->idletime_threshold = tg->idletime_threshold_conf;
1670                 tg->latency_target_conf = latency_time;
1671                 tg->latency_target = tg->latency_target_conf;
1672         }
1673         tg_conf_updated(tg, index == LIMIT_LOW &&
1674                 tg->td->limit_valid[LIMIT_LOW]);
1675         ret = 0;
1676 out_finish:
1677         blkg_conf_finish(&ctx);
1678         return ret ?: nbytes;
1679 }
1680
1681 static struct cftype throtl_files[] = {
1682 #ifdef CONFIG_BLK_DEV_THROTTLING_LOW
1683         {
1684                 .name = "low",
1685                 .flags = CFTYPE_NOT_ON_ROOT,
1686                 .seq_show = tg_print_limit,
1687                 .write = tg_set_limit,
1688                 .private = LIMIT_LOW,
1689         },
1690 #endif
1691         {
1692                 .name = "max",
1693                 .flags = CFTYPE_NOT_ON_ROOT,
1694                 .seq_show = tg_print_limit,
1695                 .write = tg_set_limit,
1696                 .private = LIMIT_MAX,
1697         },
1698         { }     /* terminate */
1699 };
1700
1701 static void throtl_shutdown_wq(struct request_queue *q)
1702 {
1703         struct throtl_data *td = q->td;
1704
1705         cancel_work_sync(&td->dispatch_work);
1706 }
1707
1708 static struct blkcg_policy blkcg_policy_throtl = {
1709         .dfl_cftypes            = throtl_files,
1710         .legacy_cftypes         = throtl_legacy_files,
1711
1712         .pd_alloc_fn            = throtl_pd_alloc,
1713         .pd_init_fn             = throtl_pd_init,
1714         .pd_online_fn           = throtl_pd_online,
1715         .pd_offline_fn          = throtl_pd_offline,
1716         .pd_free_fn             = throtl_pd_free,
1717 };
1718
1719 static unsigned long __tg_last_low_overflow_time(struct throtl_grp *tg)
1720 {
1721         unsigned long rtime = jiffies, wtime = jiffies;
1722
1723         if (tg->bps[READ][LIMIT_LOW] || tg->iops[READ][LIMIT_LOW])
1724                 rtime = tg->last_low_overflow_time[READ];
1725         if (tg->bps[WRITE][LIMIT_LOW] || tg->iops[WRITE][LIMIT_LOW])
1726                 wtime = tg->last_low_overflow_time[WRITE];
1727         return min(rtime, wtime);
1728 }
1729
1730 /* tg should not be an intermediate node */
1731 static unsigned long tg_last_low_overflow_time(struct throtl_grp *tg)
1732 {
1733         struct throtl_service_queue *parent_sq;
1734         struct throtl_grp *parent = tg;
1735         unsigned long ret = __tg_last_low_overflow_time(tg);
1736
1737         while (true) {
1738                 parent_sq = parent->service_queue.parent_sq;
1739                 parent = sq_to_tg(parent_sq);
1740                 if (!parent)
1741                         break;
1742
1743                 /*
1744                  * The parent doesn't have low limit, it always reaches low
1745                  * limit. Its overflow time is useless for children
1746                  */
1747                 if (!parent->bps[READ][LIMIT_LOW] &&
1748                     !parent->iops[READ][LIMIT_LOW] &&
1749                     !parent->bps[WRITE][LIMIT_LOW] &&
1750                     !parent->iops[WRITE][LIMIT_LOW])
1751                         continue;
1752                 if (time_after(__tg_last_low_overflow_time(parent), ret))
1753                         ret = __tg_last_low_overflow_time(parent);
1754         }
1755         return ret;
1756 }
1757
1758 static bool throtl_tg_is_idle(struct throtl_grp *tg)
1759 {
1760         /*
1761          * cgroup is idle if:
1762          * - single idle is too long, longer than a fixed value (in case user
1763          *   configure a too big threshold) or 4 times of slice
1764          * - average think time is more than threshold
1765          * - IO latency is largely below threshold
1766          */
1767         unsigned long time = jiffies_to_usecs(4 * tg->td->throtl_slice);
1768         bool ret;
1769
1770         time = min_t(unsigned long, MAX_IDLE_TIME, time);
1771         ret = (ktime_get_ns() >> 10) - tg->last_finish_time > time ||
1772                tg->avg_idletime > tg->idletime_threshold ||
1773                (tg->latency_target && tg->bio_cnt &&
1774                 tg->bad_bio_cnt * 5 < tg->bio_cnt);
1775         throtl_log(&tg->service_queue,
1776                 "avg_idle=%ld, idle_threshold=%ld, bad_bio=%d, total_bio=%d, is_idle=%d, scale=%d",
1777                 tg->avg_idletime, tg->idletime_threshold, tg->bad_bio_cnt,
1778                 tg->bio_cnt, ret, tg->td->scale);
1779         return ret;
1780 }
1781
1782 static bool throtl_tg_can_upgrade(struct throtl_grp *tg)
1783 {
1784         struct throtl_service_queue *sq = &tg->service_queue;
1785         bool read_limit, write_limit;
1786
1787         /*
1788          * if cgroup reaches low limit (if low limit is 0, the cgroup always
1789          * reaches), it's ok to upgrade to next limit
1790          */
1791         read_limit = tg->bps[READ][LIMIT_LOW] || tg->iops[READ][LIMIT_LOW];
1792         write_limit = tg->bps[WRITE][LIMIT_LOW] || tg->iops[WRITE][LIMIT_LOW];
1793         if (!read_limit && !write_limit)
1794                 return true;
1795         if (read_limit && sq->nr_queued[READ] &&
1796             (!write_limit || sq->nr_queued[WRITE]))
1797                 return true;
1798         if (write_limit && sq->nr_queued[WRITE] &&
1799             (!read_limit || sq->nr_queued[READ]))
1800                 return true;
1801
1802         if (time_after_eq(jiffies,
1803                 tg_last_low_overflow_time(tg) + tg->td->throtl_slice) &&
1804             throtl_tg_is_idle(tg))
1805                 return true;
1806         return false;
1807 }
1808
1809 static bool throtl_hierarchy_can_upgrade(struct throtl_grp *tg)
1810 {
1811         while (true) {
1812                 if (throtl_tg_can_upgrade(tg))
1813                         return true;
1814                 tg = sq_to_tg(tg->service_queue.parent_sq);
1815                 if (!tg || !tg_to_blkg(tg)->parent)
1816                         return false;
1817         }
1818         return false;
1819 }
1820
1821 static bool throtl_can_upgrade(struct throtl_data *td,
1822         struct throtl_grp *this_tg)
1823 {
1824         struct cgroup_subsys_state *pos_css;
1825         struct blkcg_gq *blkg;
1826
1827         if (td->limit_index != LIMIT_LOW)
1828                 return false;
1829
1830         if (time_before(jiffies, td->low_downgrade_time + td->throtl_slice))
1831                 return false;
1832
1833         rcu_read_lock();
1834         blkg_for_each_descendant_post(blkg, pos_css, td->queue->root_blkg) {
1835                 struct throtl_grp *tg = blkg_to_tg(blkg);
1836
1837                 if (tg == this_tg)
1838                         continue;
1839                 if (!list_empty(&tg_to_blkg(tg)->blkcg->css.children))
1840                         continue;
1841                 if (!throtl_hierarchy_can_upgrade(tg)) {
1842                         rcu_read_unlock();
1843                         return false;
1844                 }
1845         }
1846         rcu_read_unlock();
1847         return true;
1848 }
1849
1850 static void throtl_upgrade_check(struct throtl_grp *tg)
1851 {
1852         unsigned long now = jiffies;
1853
1854         if (tg->td->limit_index != LIMIT_LOW)
1855                 return;
1856
1857         if (time_after(tg->last_check_time + tg->td->throtl_slice, now))
1858                 return;
1859
1860         tg->last_check_time = now;
1861
1862         if (!time_after_eq(now,
1863              __tg_last_low_overflow_time(tg) + tg->td->throtl_slice))
1864                 return;
1865
1866         if (throtl_can_upgrade(tg->td, NULL))
1867                 throtl_upgrade_state(tg->td);
1868 }
1869
1870 static void throtl_upgrade_state(struct throtl_data *td)
1871 {
1872         struct cgroup_subsys_state *pos_css;
1873         struct blkcg_gq *blkg;
1874
1875         throtl_log(&td->service_queue, "upgrade to max");
1876         td->limit_index = LIMIT_MAX;
1877         td->low_upgrade_time = jiffies;
1878         td->scale = 0;
1879         rcu_read_lock();
1880         blkg_for_each_descendant_post(blkg, pos_css, td->queue->root_blkg) {
1881                 struct throtl_grp *tg = blkg_to_tg(blkg);
1882                 struct throtl_service_queue *sq = &tg->service_queue;
1883
1884                 tg->disptime = jiffies - 1;
1885                 throtl_select_dispatch(sq);
1886                 throtl_schedule_next_dispatch(sq, false);
1887         }
1888         rcu_read_unlock();
1889         throtl_select_dispatch(&td->service_queue);
1890         throtl_schedule_next_dispatch(&td->service_queue, false);
1891         queue_work(kthrotld_workqueue, &td->dispatch_work);
1892 }
1893
1894 static void throtl_downgrade_state(struct throtl_data *td, int new)
1895 {
1896         td->scale /= 2;
1897
1898         throtl_log(&td->service_queue, "downgrade, scale %d", td->scale);
1899         if (td->scale) {
1900                 td->low_upgrade_time = jiffies - td->scale * td->throtl_slice;
1901                 return;
1902         }
1903
1904         td->limit_index = new;
1905         td->low_downgrade_time = jiffies;
1906 }
1907
1908 static bool throtl_tg_can_downgrade(struct throtl_grp *tg)
1909 {
1910         struct throtl_data *td = tg->td;
1911         unsigned long now = jiffies;
1912
1913         /*
1914          * If cgroup is below low limit, consider downgrade and throttle other
1915          * cgroups
1916          */
1917         if (time_after_eq(now, td->low_upgrade_time + td->throtl_slice) &&
1918             time_after_eq(now, tg_last_low_overflow_time(tg) +
1919                                         td->throtl_slice) &&
1920             (!throtl_tg_is_idle(tg) ||
1921              !list_empty(&tg_to_blkg(tg)->blkcg->css.children)))
1922                 return true;
1923         return false;
1924 }
1925
1926 static bool throtl_hierarchy_can_downgrade(struct throtl_grp *tg)
1927 {
1928         while (true) {
1929                 if (!throtl_tg_can_downgrade(tg))
1930                         return false;
1931                 tg = sq_to_tg(tg->service_queue.parent_sq);
1932                 if (!tg || !tg_to_blkg(tg)->parent)
1933                         break;
1934         }
1935         return true;
1936 }
1937
1938 static void throtl_downgrade_check(struct throtl_grp *tg)
1939 {
1940         uint64_t bps;
1941         unsigned int iops;
1942         unsigned long elapsed_time;
1943         unsigned long now = jiffies;
1944
1945         if (tg->td->limit_index != LIMIT_MAX ||
1946             !tg->td->limit_valid[LIMIT_LOW])
1947                 return;
1948         if (!list_empty(&tg_to_blkg(tg)->blkcg->css.children))
1949                 return;
1950         if (time_after(tg->last_check_time + tg->td->throtl_slice, now))
1951                 return;
1952
1953         elapsed_time = now - tg->last_check_time;
1954         tg->last_check_time = now;
1955
1956         if (time_before(now, tg_last_low_overflow_time(tg) +
1957                         tg->td->throtl_slice))
1958                 return;
1959
1960         if (tg->bps[READ][LIMIT_LOW]) {
1961                 bps = tg->last_bytes_disp[READ] * HZ;
1962                 do_div(bps, elapsed_time);
1963                 if (bps >= tg->bps[READ][LIMIT_LOW])
1964                         tg->last_low_overflow_time[READ] = now;
1965         }
1966
1967         if (tg->bps[WRITE][LIMIT_LOW]) {
1968                 bps = tg->last_bytes_disp[WRITE] * HZ;
1969                 do_div(bps, elapsed_time);
1970                 if (bps >= tg->bps[WRITE][LIMIT_LOW])
1971                         tg->last_low_overflow_time[WRITE] = now;
1972         }
1973
1974         if (tg->iops[READ][LIMIT_LOW]) {
1975                 iops = tg->last_io_disp[READ] * HZ / elapsed_time;
1976                 if (iops >= tg->iops[READ][LIMIT_LOW])
1977                         tg->last_low_overflow_time[READ] = now;
1978         }
1979
1980         if (tg->iops[WRITE][LIMIT_LOW]) {
1981                 iops = tg->last_io_disp[WRITE] * HZ / elapsed_time;
1982                 if (iops >= tg->iops[WRITE][LIMIT_LOW])
1983                         tg->last_low_overflow_time[WRITE] = now;
1984         }
1985
1986         /*
1987          * If cgroup is below low limit, consider downgrade and throttle other
1988          * cgroups
1989          */
1990         if (throtl_hierarchy_can_downgrade(tg))
1991                 throtl_downgrade_state(tg->td, LIMIT_LOW);
1992
1993         tg->last_bytes_disp[READ] = 0;
1994         tg->last_bytes_disp[WRITE] = 0;
1995         tg->last_io_disp[READ] = 0;
1996         tg->last_io_disp[WRITE] = 0;
1997 }
1998
1999 static void blk_throtl_update_idletime(struct throtl_grp *tg)
2000 {
2001         unsigned long now = ktime_get_ns() >> 10;
2002         unsigned long last_finish_time = tg->last_finish_time;
2003
2004         if (now <= last_finish_time || last_finish_time == 0 ||
2005             last_finish_time == tg->checked_last_finish_time)
2006                 return;
2007
2008         tg->avg_idletime = (tg->avg_idletime * 7 + now - last_finish_time) >> 3;
2009         tg->checked_last_finish_time = last_finish_time;
2010 }
2011
2012 #ifdef CONFIG_BLK_DEV_THROTTLING_LOW
2013 static void throtl_update_latency_buckets(struct throtl_data *td)
2014 {
2015         struct avg_latency_bucket avg_latency[LATENCY_BUCKET_SIZE];
2016         int i, cpu;
2017         unsigned long last_latency = 0;
2018         unsigned long latency;
2019
2020         if (!blk_queue_nonrot(td->queue))
2021                 return;
2022         if (time_before(jiffies, td->last_calculate_time + HZ))
2023                 return;
2024         td->last_calculate_time = jiffies;
2025
2026         memset(avg_latency, 0, sizeof(avg_latency));
2027         for (i = 0; i < LATENCY_BUCKET_SIZE; i++) {
2028                 struct latency_bucket *tmp = &td->tmp_buckets[i];
2029
2030                 for_each_possible_cpu(cpu) {
2031                         struct latency_bucket *bucket;
2032
2033                         /* this isn't race free, but ok in practice */
2034                         bucket = per_cpu_ptr(td->latency_buckets, cpu);
2035                         tmp->total_latency += bucket[i].total_latency;
2036                         tmp->samples += bucket[i].samples;
2037                         bucket[i].total_latency = 0;
2038                         bucket[i].samples = 0;
2039                 }
2040
2041                 if (tmp->samples >= 32) {
2042                         int samples = tmp->samples;
2043
2044                         latency = tmp->total_latency;
2045
2046                         tmp->total_latency = 0;
2047                         tmp->samples = 0;
2048                         latency /= samples;
2049                         if (latency == 0)
2050                                 continue;
2051                         avg_latency[i].latency = latency;
2052                 }
2053         }
2054
2055         for (i = 0; i < LATENCY_BUCKET_SIZE; i++) {
2056                 if (!avg_latency[i].latency) {
2057                         if (td->avg_buckets[i].latency < last_latency)
2058                                 td->avg_buckets[i].latency = last_latency;
2059                         continue;
2060                 }
2061
2062                 if (!td->avg_buckets[i].valid)
2063                         latency = avg_latency[i].latency;
2064                 else
2065                         latency = (td->avg_buckets[i].latency * 7 +
2066                                 avg_latency[i].latency) >> 3;
2067
2068                 td->avg_buckets[i].latency = max(latency, last_latency);
2069                 td->avg_buckets[i].valid = true;
2070                 last_latency = td->avg_buckets[i].latency;
2071         }
2072
2073         for (i = 0; i < LATENCY_BUCKET_SIZE; i++)
2074                 throtl_log(&td->service_queue,
2075                         "Latency bucket %d: latency=%ld, valid=%d", i,
2076                         td->avg_buckets[i].latency, td->avg_buckets[i].valid);
2077 }
2078 #else
2079 static inline void throtl_update_latency_buckets(struct throtl_data *td)
2080 {
2081 }
2082 #endif
2083
2084 static void blk_throtl_assoc_bio(struct throtl_grp *tg, struct bio *bio)
2085 {
2086 #ifdef CONFIG_BLK_DEV_THROTTLING_LOW
2087         int ret;
2088
2089         ret = bio_associate_current(bio);
2090         if (ret == 0 || ret == -EBUSY)
2091                 bio->bi_cg_private = tg;
2092         blk_stat_set_issue(&bio->bi_issue_stat, bio_sectors(bio));
2093 #else
2094         bio_associate_current(bio);
2095 #endif
2096 }
2097
2098 bool blk_throtl_bio(struct request_queue *q, struct blkcg_gq *blkg,
2099                     struct bio *bio)
2100 {
2101         struct throtl_qnode *qn = NULL;
2102         struct throtl_grp *tg = blkg_to_tg(blkg ?: q->root_blkg);
2103         struct throtl_service_queue *sq;
2104         bool rw = bio_data_dir(bio);
2105         bool throttled = false;
2106         struct throtl_data *td = tg->td;
2107
2108         WARN_ON_ONCE(!rcu_read_lock_held());
2109
2110         /* see throtl_charge_bio() */
2111         if (bio_flagged(bio, BIO_THROTTLED) || !tg->has_rules[rw])
2112                 goto out;
2113
2114         spin_lock_irq(q->queue_lock);
2115
2116         throtl_update_latency_buckets(td);
2117
2118         if (unlikely(blk_queue_bypass(q)))
2119                 goto out_unlock;
2120
2121         blk_throtl_assoc_bio(tg, bio);
2122         blk_throtl_update_idletime(tg);
2123
2124         sq = &tg->service_queue;
2125
2126 again:
2127         while (true) {
2128                 if (tg->last_low_overflow_time[rw] == 0)
2129                         tg->last_low_overflow_time[rw] = jiffies;
2130                 throtl_downgrade_check(tg);
2131                 throtl_upgrade_check(tg);
2132                 /* throtl is FIFO - if bios are already queued, should queue */
2133                 if (sq->nr_queued[rw])
2134                         break;
2135
2136                 /* if above limits, break to queue */
2137                 if (!tg_may_dispatch(tg, bio, NULL)) {
2138                         tg->last_low_overflow_time[rw] = jiffies;
2139                         if (throtl_can_upgrade(td, tg)) {
2140                                 throtl_upgrade_state(td);
2141                                 goto again;
2142                         }
2143                         break;
2144                 }
2145
2146                 /* within limits, let's charge and dispatch directly */
2147                 throtl_charge_bio(tg, bio);
2148
2149                 /*
2150                  * We need to trim slice even when bios are not being queued
2151                  * otherwise it might happen that a bio is not queued for
2152                  * a long time and slice keeps on extending and trim is not
2153                  * called for a long time. Now if limits are reduced suddenly
2154                  * we take into account all the IO dispatched so far at new
2155                  * low rate and * newly queued IO gets a really long dispatch
2156                  * time.
2157                  *
2158                  * So keep on trimming slice even if bio is not queued.
2159                  */
2160                 throtl_trim_slice(tg, rw);
2161
2162                 /*
2163                  * @bio passed through this layer without being throttled.
2164                  * Climb up the ladder.  If we''re already at the top, it
2165                  * can be executed directly.
2166                  */
2167                 qn = &tg->qnode_on_parent[rw];
2168                 sq = sq->parent_sq;
2169                 tg = sq_to_tg(sq);
2170                 if (!tg)
2171                         goto out_unlock;
2172         }
2173
2174         /* out-of-limit, queue to @tg */
2175         throtl_log(sq, "[%c] bio. bdisp=%llu sz=%u bps=%llu iodisp=%u iops=%u queued=%d/%d",
2176                    rw == READ ? 'R' : 'W',
2177                    tg->bytes_disp[rw], bio->bi_iter.bi_size,
2178                    tg_bps_limit(tg, rw),
2179                    tg->io_disp[rw], tg_iops_limit(tg, rw),
2180                    sq->nr_queued[READ], sq->nr_queued[WRITE]);
2181
2182         tg->last_low_overflow_time[rw] = jiffies;
2183
2184         td->nr_queued[rw]++;
2185         throtl_add_bio_tg(bio, qn, tg);
2186         throttled = true;
2187
2188         /*
2189          * Update @tg's dispatch time and force schedule dispatch if @tg
2190          * was empty before @bio.  The forced scheduling isn't likely to
2191          * cause undue delay as @bio is likely to be dispatched directly if
2192          * its @tg's disptime is not in the future.
2193          */
2194         if (tg->flags & THROTL_TG_WAS_EMPTY) {
2195                 tg_update_disptime(tg);
2196                 throtl_schedule_next_dispatch(tg->service_queue.parent_sq, true);
2197         }
2198
2199 out_unlock:
2200         spin_unlock_irq(q->queue_lock);
2201 out:
2202         /*
2203          * As multiple blk-throtls may stack in the same issue path, we
2204          * don't want bios to leave with the flag set.  Clear the flag if
2205          * being issued.
2206          */
2207         if (!throttled)
2208                 bio_clear_flag(bio, BIO_THROTTLED);
2209
2210 #ifdef CONFIG_BLK_DEV_THROTTLING_LOW
2211         if (throttled || !td->track_bio_latency)
2212                 bio->bi_issue_stat.stat |= SKIP_LATENCY;
2213 #endif
2214         return throttled;
2215 }
2216
2217 #ifdef CONFIG_BLK_DEV_THROTTLING_LOW
2218 static void throtl_track_latency(struct throtl_data *td, sector_t size,
2219         int op, unsigned long time)
2220 {
2221         struct latency_bucket *latency;
2222         int index;
2223
2224         if (!td || td->limit_index != LIMIT_LOW || op != REQ_OP_READ ||
2225             !blk_queue_nonrot(td->queue))
2226                 return;
2227
2228         index = request_bucket_index(size);
2229
2230         latency = get_cpu_ptr(td->latency_buckets);
2231         latency[index].total_latency += time;
2232         latency[index].samples++;
2233         put_cpu_ptr(td->latency_buckets);
2234 }
2235
2236 void blk_throtl_stat_add(struct request *rq, u64 time_ns)
2237 {
2238         struct request_queue *q = rq->q;
2239         struct throtl_data *td = q->td;
2240
2241         throtl_track_latency(td, blk_stat_size(&rq->issue_stat),
2242                 req_op(rq), time_ns >> 10);
2243 }
2244
2245 void blk_throtl_bio_endio(struct bio *bio)
2246 {
2247         struct throtl_grp *tg;
2248         u64 finish_time_ns;
2249         unsigned long finish_time;
2250         unsigned long start_time;
2251         unsigned long lat;
2252
2253         tg = bio->bi_cg_private;
2254         if (!tg)
2255                 return;
2256         bio->bi_cg_private = NULL;
2257
2258         finish_time_ns = ktime_get_ns();
2259         tg->last_finish_time = finish_time_ns >> 10;
2260
2261         start_time = blk_stat_time(&bio->bi_issue_stat) >> 10;
2262         finish_time = __blk_stat_time(finish_time_ns) >> 10;
2263         if (!start_time || finish_time <= start_time)
2264                 return;
2265
2266         lat = finish_time - start_time;
2267         /* this is only for bio based driver */
2268         if (!(bio->bi_issue_stat.stat & SKIP_LATENCY))
2269                 throtl_track_latency(tg->td, blk_stat_size(&bio->bi_issue_stat),
2270                         bio_op(bio), lat);
2271
2272         if (tg->latency_target) {
2273                 int bucket;
2274                 unsigned int threshold;
2275
2276                 bucket = request_bucket_index(
2277                         blk_stat_size(&bio->bi_issue_stat));
2278                 threshold = tg->td->avg_buckets[bucket].latency +
2279                         tg->latency_target;
2280                 if (lat > threshold)
2281                         tg->bad_bio_cnt++;
2282                 /*
2283                  * Not race free, could get wrong count, which means cgroups
2284                  * will be throttled
2285                  */
2286                 tg->bio_cnt++;
2287         }
2288
2289         if (time_after(jiffies, tg->bio_cnt_reset_time) || tg->bio_cnt > 1024) {
2290                 tg->bio_cnt_reset_time = tg->td->throtl_slice + jiffies;
2291                 tg->bio_cnt /= 2;
2292                 tg->bad_bio_cnt /= 2;
2293         }
2294 }
2295 #endif
2296
2297 /*
2298  * Dispatch all bios from all children tg's queued on @parent_sq.  On
2299  * return, @parent_sq is guaranteed to not have any active children tg's
2300  * and all bios from previously active tg's are on @parent_sq->bio_lists[].
2301  */
2302 static void tg_drain_bios(struct throtl_service_queue *parent_sq)
2303 {
2304         struct throtl_grp *tg;
2305
2306         while ((tg = throtl_rb_first(parent_sq))) {
2307                 struct throtl_service_queue *sq = &tg->service_queue;
2308                 struct bio *bio;
2309
2310                 throtl_dequeue_tg(tg);
2311
2312                 while ((bio = throtl_peek_queued(&sq->queued[READ])))
2313                         tg_dispatch_one_bio(tg, bio_data_dir(bio));
2314                 while ((bio = throtl_peek_queued(&sq->queued[WRITE])))
2315                         tg_dispatch_one_bio(tg, bio_data_dir(bio));
2316         }
2317 }
2318
2319 /**
2320  * blk_throtl_drain - drain throttled bios
2321  * @q: request_queue to drain throttled bios for
2322  *
2323  * Dispatch all currently throttled bios on @q through ->make_request_fn().
2324  */
2325 void blk_throtl_drain(struct request_queue *q)
2326         __releases(q->queue_lock) __acquires(q->queue_lock)
2327 {
2328         struct throtl_data *td = q->td;
2329         struct blkcg_gq *blkg;
2330         struct cgroup_subsys_state *pos_css;
2331         struct bio *bio;
2332         int rw;
2333
2334         queue_lockdep_assert_held(q);
2335         rcu_read_lock();
2336
2337         /*
2338          * Drain each tg while doing post-order walk on the blkg tree, so
2339          * that all bios are propagated to td->service_queue.  It'd be
2340          * better to walk service_queue tree directly but blkg walk is
2341          * easier.
2342          */
2343         blkg_for_each_descendant_post(blkg, pos_css, td->queue->root_blkg)
2344                 tg_drain_bios(&blkg_to_tg(blkg)->service_queue);
2345
2346         /* finally, transfer bios from top-level tg's into the td */
2347         tg_drain_bios(&td->service_queue);
2348
2349         rcu_read_unlock();
2350         spin_unlock_irq(q->queue_lock);
2351
2352         /* all bios now should be in td->service_queue, issue them */
2353         for (rw = READ; rw <= WRITE; rw++)
2354                 while ((bio = throtl_pop_queued(&td->service_queue.queued[rw],
2355                                                 NULL)))
2356                         generic_make_request(bio);
2357
2358         spin_lock_irq(q->queue_lock);
2359 }
2360
2361 int blk_throtl_init(struct request_queue *q)
2362 {
2363         struct throtl_data *td;
2364         int ret;
2365
2366         td = kzalloc_node(sizeof(*td), GFP_KERNEL, q->node);
2367         if (!td)
2368                 return -ENOMEM;
2369         td->latency_buckets = __alloc_percpu(sizeof(struct latency_bucket) *
2370                 LATENCY_BUCKET_SIZE, __alignof__(u64));
2371         if (!td->latency_buckets) {
2372                 kfree(td);
2373                 return -ENOMEM;
2374         }
2375
2376         INIT_WORK(&td->dispatch_work, blk_throtl_dispatch_work_fn);
2377         throtl_service_queue_init(&td->service_queue);
2378
2379         q->td = td;
2380         td->queue = q;
2381
2382         td->limit_valid[LIMIT_MAX] = true;
2383         td->limit_index = LIMIT_MAX;
2384         td->low_upgrade_time = jiffies;
2385         td->low_downgrade_time = jiffies;
2386
2387         /* activate policy */
2388         ret = blkcg_activate_policy(q, &blkcg_policy_throtl);
2389         if (ret) {
2390                 free_percpu(td->latency_buckets);
2391                 kfree(td);
2392         }
2393         return ret;
2394 }
2395
2396 void blk_throtl_exit(struct request_queue *q)
2397 {
2398         BUG_ON(!q->td);
2399         throtl_shutdown_wq(q);
2400         blkcg_deactivate_policy(q, &blkcg_policy_throtl);
2401         free_percpu(q->td->latency_buckets);
2402         kfree(q->td);
2403 }
2404
2405 void blk_throtl_register_queue(struct request_queue *q)
2406 {
2407         struct throtl_data *td;
2408         struct cgroup_subsys_state *pos_css;
2409         struct blkcg_gq *blkg;
2410
2411         td = q->td;
2412         BUG_ON(!td);
2413
2414         if (blk_queue_nonrot(q)) {
2415                 td->throtl_slice = DFL_THROTL_SLICE_SSD;
2416                 td->dft_idletime_threshold = DFL_IDLE_THRESHOLD_SSD;
2417         } else {
2418                 td->throtl_slice = DFL_THROTL_SLICE_HD;
2419                 td->dft_idletime_threshold = DFL_IDLE_THRESHOLD_HD;
2420         }
2421 #ifndef CONFIG_BLK_DEV_THROTTLING_LOW
2422         /* if no low limit, use previous default */
2423         td->throtl_slice = DFL_THROTL_SLICE_HD;
2424 #endif
2425
2426         td->track_bio_latency = !q->mq_ops && !q->request_fn;
2427         if (!td->track_bio_latency)
2428                 blk_stat_enable_accounting(q);
2429
2430         /*
2431          * some tg are created before queue is fully initialized, eg, nonrot
2432          * isn't initialized yet
2433          */
2434         rcu_read_lock();
2435         blkg_for_each_descendant_post(blkg, pos_css, q->root_blkg) {
2436                 struct throtl_grp *tg = blkg_to_tg(blkg);
2437
2438                 tg->idletime_threshold = td->dft_idletime_threshold;
2439                 tg->idletime_threshold_conf = td->dft_idletime_threshold;
2440         }
2441         rcu_read_unlock();
2442 }
2443
2444 #ifdef CONFIG_BLK_DEV_THROTTLING_LOW
2445 ssize_t blk_throtl_sample_time_show(struct request_queue *q, char *page)
2446 {
2447         if (!q->td)
2448                 return -EINVAL;
2449         return sprintf(page, "%u\n", jiffies_to_msecs(q->td->throtl_slice));
2450 }
2451
2452 ssize_t blk_throtl_sample_time_store(struct request_queue *q,
2453         const char *page, size_t count)
2454 {
2455         unsigned long v;
2456         unsigned long t;
2457
2458         if (!q->td)
2459                 return -EINVAL;
2460         if (kstrtoul(page, 10, &v))
2461                 return -EINVAL;
2462         t = msecs_to_jiffies(v);
2463         if (t == 0 || t > MAX_THROTL_SLICE)
2464                 return -EINVAL;
2465         q->td->throtl_slice = t;
2466         return count;
2467 }
2468 #endif
2469
2470 static int __init throtl_init(void)
2471 {
2472         kthrotld_workqueue = alloc_workqueue("kthrotld", WQ_MEM_RECLAIM, 0);
2473         if (!kthrotld_workqueue)
2474                 panic("Failed to create kthrotld\n");
2475
2476         return blkcg_policy_register(&blkcg_policy_throtl);
2477 }
2478
2479 module_init(throtl_init);