2 * Interface for controlling IO bandwidth on a request queue
4 * Copyright (C) 2010 Vivek Goyal <vgoyal@redhat.com>
7 #include <linux/module.h>
8 #include <linux/slab.h>
9 #include <linux/blkdev.h>
10 #include <linux/bio.h>
11 #include <linux/blktrace_api.h>
12 #include "blk-cgroup.h"
15 /* Max dispatch from a group in 1 round */
16 static int throtl_grp_quantum = 8;
18 /* Total max dispatch from all groups in one round */
19 static int throtl_quantum = 32;
21 /* Throttling is performed over 100ms slice and after that slice is renewed */
22 static unsigned long throtl_slice = HZ/10; /* 100 ms */
24 /* A workqueue to queue throttle related work */
25 static struct workqueue_struct *kthrotld_workqueue;
26 static void throtl_schedule_delayed_work(struct throtl_data *td,
29 struct throtl_rb_root {
33 unsigned long min_disptime;
36 #define THROTL_RB_ROOT (struct throtl_rb_root) { .rb = RB_ROOT, .left = NULL, \
37 .count = 0, .min_disptime = 0}
39 #define rb_entry_tg(node) rb_entry((node), struct throtl_grp, rb_node)
42 /* List of throtl groups on the request queue*/
43 struct hlist_node tg_node;
45 /* active throtl group service_tree member */
46 struct rb_node rb_node;
49 * Dispatch time in jiffies. This is the estimated time when group
50 * will unthrottle and is ready to dispatch more bio. It is used as
51 * key to sort active groups in service tree.
53 unsigned long disptime;
55 struct blkio_group blkg;
59 /* Two lists for READ and WRITE */
60 struct bio_list bio_lists[2];
62 /* Number of queued bios on READ and WRITE lists */
63 unsigned int nr_queued[2];
65 /* bytes per second rate limits */
71 /* Number of bytes disptached in current slice */
72 uint64_t bytes_disp[2];
73 /* Number of bio's dispatched in current slice */
74 unsigned int io_disp[2];
76 /* When did we start a new slice */
77 unsigned long slice_start[2];
78 unsigned long slice_end[2];
80 /* Some throttle limits got updated for the group */
83 struct rcu_head rcu_head;
88 /* List of throtl groups */
89 struct hlist_head tg_list;
91 /* service tree for active throtl groups */
92 struct throtl_rb_root tg_service_tree;
94 struct throtl_grp *root_tg;
95 struct request_queue *queue;
97 /* Total Number of queued bios on READ and WRITE lists */
98 unsigned int nr_queued[2];
101 * number of total undestroyed groups
103 unsigned int nr_undestroyed_grps;
105 /* Work for dispatching throttled bios */
106 struct delayed_work throtl_work;
111 enum tg_state_flags {
112 THROTL_TG_FLAG_on_rr = 0, /* on round-robin busy list */
115 #define THROTL_TG_FNS(name) \
116 static inline void throtl_mark_tg_##name(struct throtl_grp *tg) \
118 (tg)->flags |= (1 << THROTL_TG_FLAG_##name); \
120 static inline void throtl_clear_tg_##name(struct throtl_grp *tg) \
122 (tg)->flags &= ~(1 << THROTL_TG_FLAG_##name); \
124 static inline int throtl_tg_##name(const struct throtl_grp *tg) \
126 return ((tg)->flags & (1 << THROTL_TG_FLAG_##name)) != 0; \
129 THROTL_TG_FNS(on_rr);
131 #define throtl_log_tg(td, tg, fmt, args...) \
132 blk_add_trace_msg((td)->queue, "throtl %s " fmt, \
133 blkg_path(&(tg)->blkg), ##args); \
135 #define throtl_log(td, fmt, args...) \
136 blk_add_trace_msg((td)->queue, "throtl " fmt, ##args)
138 static inline struct throtl_grp *tg_of_blkg(struct blkio_group *blkg)
141 return container_of(blkg, struct throtl_grp, blkg);
146 static inline unsigned int total_nr_queued(struct throtl_data *td)
148 return td->nr_queued[0] + td->nr_queued[1];
151 static inline struct throtl_grp *throtl_ref_get_tg(struct throtl_grp *tg)
153 atomic_inc(&tg->ref);
157 static void throtl_free_tg(struct rcu_head *head)
159 struct throtl_grp *tg;
161 tg = container_of(head, struct throtl_grp, rcu_head);
162 free_percpu(tg->blkg.stats_cpu);
166 static void throtl_put_tg(struct throtl_grp *tg)
168 BUG_ON(atomic_read(&tg->ref) <= 0);
169 if (!atomic_dec_and_test(&tg->ref))
173 * A group is freed in rcu manner. But having an rcu lock does not
174 * mean that one can access all the fields of blkg and assume these
175 * are valid. For example, don't try to follow throtl_data and
176 * request queue links.
178 * Having a reference to blkg under an rcu allows acess to only
179 * values local to groups like group stats and group rate limits
181 call_rcu(&tg->rcu_head, throtl_free_tg);
184 static void throtl_init_group(struct throtl_grp *tg)
186 INIT_HLIST_NODE(&tg->tg_node);
187 RB_CLEAR_NODE(&tg->rb_node);
188 bio_list_init(&tg->bio_lists[0]);
189 bio_list_init(&tg->bio_lists[1]);
190 tg->limits_changed = false;
192 /* Practically unlimited BW */
193 tg->bps[0] = tg->bps[1] = -1;
194 tg->iops[0] = tg->iops[1] = -1;
197 * Take the initial reference that will be released on destroy
198 * This can be thought of a joint reference by cgroup and
199 * request queue which will be dropped by either request queue
200 * exit or cgroup deletion path depending on who is exiting first.
202 atomic_set(&tg->ref, 1);
205 /* Should be called with rcu read lock held (needed for blkcg) */
207 throtl_add_group_to_td_list(struct throtl_data *td, struct throtl_grp *tg)
209 hlist_add_head(&tg->tg_node, &td->tg_list);
210 td->nr_undestroyed_grps++;
214 __throtl_tg_fill_dev_details(struct throtl_data *td, struct throtl_grp *tg)
216 struct backing_dev_info *bdi = &td->queue->backing_dev_info;
217 unsigned int major, minor;
219 if (!tg || tg->blkg.dev)
223 * Fill in device details for a group which might not have been
224 * filled at group creation time as queue was being instantiated
225 * and driver had not attached a device yet
227 if (bdi->dev && dev_name(bdi->dev)) {
228 sscanf(dev_name(bdi->dev), "%u:%u", &major, &minor);
229 tg->blkg.dev = MKDEV(major, minor);
234 * Should be called with without queue lock held. Here queue lock will be
235 * taken rarely. It will be taken only once during life time of a group
239 throtl_tg_fill_dev_details(struct throtl_data *td, struct throtl_grp *tg)
241 if (!tg || tg->blkg.dev)
244 spin_lock_irq(td->queue->queue_lock);
245 __throtl_tg_fill_dev_details(td, tg);
246 spin_unlock_irq(td->queue->queue_lock);
249 static void throtl_init_add_tg_lists(struct throtl_data *td,
250 struct throtl_grp *tg, struct blkio_cgroup *blkcg)
252 __throtl_tg_fill_dev_details(td, tg);
254 /* Add group onto cgroup list */
255 blkiocg_add_blkio_group(blkcg, &tg->blkg, (void *)td,
256 tg->blkg.dev, BLKIO_POLICY_THROTL);
258 tg->bps[READ] = blkcg_get_read_bps(blkcg, tg->blkg.dev);
259 tg->bps[WRITE] = blkcg_get_write_bps(blkcg, tg->blkg.dev);
260 tg->iops[READ] = blkcg_get_read_iops(blkcg, tg->blkg.dev);
261 tg->iops[WRITE] = blkcg_get_write_iops(blkcg, tg->blkg.dev);
263 throtl_add_group_to_td_list(td, tg);
266 /* Should be called without queue lock and outside of rcu period */
267 static struct throtl_grp *throtl_alloc_tg(struct throtl_data *td)
269 struct throtl_grp *tg = NULL;
272 tg = kzalloc_node(sizeof(*tg), GFP_ATOMIC, td->queue->node);
276 ret = blkio_alloc_blkg_stats(&tg->blkg);
283 throtl_init_group(tg);
288 throtl_grp *throtl_find_tg(struct throtl_data *td, struct blkio_cgroup *blkcg)
290 struct throtl_grp *tg = NULL;
294 * This is the common case when there are no blkio cgroups.
295 * Avoid lookup in this case
297 if (blkcg == &blkio_root_cgroup)
300 tg = tg_of_blkg(blkiocg_lookup_group(blkcg, key));
302 __throtl_tg_fill_dev_details(td, tg);
307 * This function returns with queue lock unlocked in case of error, like
308 * request queue is no more
310 static struct throtl_grp * throtl_get_tg(struct throtl_data *td)
312 struct throtl_grp *tg = NULL, *__tg = NULL;
313 struct blkio_cgroup *blkcg;
314 struct request_queue *q = td->queue;
317 blkcg = task_blkio_cgroup(current);
318 tg = throtl_find_tg(td, blkcg);
325 * Need to allocate a group. Allocation of group also needs allocation
326 * of per cpu stats which in-turn takes a mutex() and can block. Hence
327 * we need to drop rcu lock and queue_lock before we call alloc.
330 spin_unlock_irq(q->queue_lock);
332 tg = throtl_alloc_tg(td);
334 * We might have slept in group allocation. Make sure queue is not
337 if (unlikely(test_bit(QUEUE_FLAG_DEAD, &q->queue_flags))) {
341 return ERR_PTR(-ENODEV);
344 /* Group allocated and queue is still alive. take the lock */
345 spin_lock_irq(q->queue_lock);
348 * Initialize the new group. After sleeping, read the blkcg again.
351 blkcg = task_blkio_cgroup(current);
354 * If some other thread already allocated the group while we were
355 * not holding queue lock, free up the group
357 __tg = throtl_find_tg(td, blkcg);
365 /* Group allocation failed. Account the IO to root group */
371 throtl_init_add_tg_lists(td, tg, blkcg);
376 static struct throtl_grp *throtl_rb_first(struct throtl_rb_root *root)
378 /* Service tree is empty */
383 root->left = rb_first(&root->rb);
386 return rb_entry_tg(root->left);
391 static void rb_erase_init(struct rb_node *n, struct rb_root *root)
397 static void throtl_rb_erase(struct rb_node *n, struct throtl_rb_root *root)
401 rb_erase_init(n, &root->rb);
405 static void update_min_dispatch_time(struct throtl_rb_root *st)
407 struct throtl_grp *tg;
409 tg = throtl_rb_first(st);
413 st->min_disptime = tg->disptime;
417 tg_service_tree_add(struct throtl_rb_root *st, struct throtl_grp *tg)
419 struct rb_node **node = &st->rb.rb_node;
420 struct rb_node *parent = NULL;
421 struct throtl_grp *__tg;
422 unsigned long key = tg->disptime;
425 while (*node != NULL) {
427 __tg = rb_entry_tg(parent);
429 if (time_before(key, __tg->disptime))
430 node = &parent->rb_left;
432 node = &parent->rb_right;
438 st->left = &tg->rb_node;
440 rb_link_node(&tg->rb_node, parent, node);
441 rb_insert_color(&tg->rb_node, &st->rb);
444 static void __throtl_enqueue_tg(struct throtl_data *td, struct throtl_grp *tg)
446 struct throtl_rb_root *st = &td->tg_service_tree;
448 tg_service_tree_add(st, tg);
449 throtl_mark_tg_on_rr(tg);
453 static void throtl_enqueue_tg(struct throtl_data *td, struct throtl_grp *tg)
455 if (!throtl_tg_on_rr(tg))
456 __throtl_enqueue_tg(td, tg);
459 static void __throtl_dequeue_tg(struct throtl_data *td, struct throtl_grp *tg)
461 throtl_rb_erase(&tg->rb_node, &td->tg_service_tree);
462 throtl_clear_tg_on_rr(tg);
465 static void throtl_dequeue_tg(struct throtl_data *td, struct throtl_grp *tg)
467 if (throtl_tg_on_rr(tg))
468 __throtl_dequeue_tg(td, tg);
471 static void throtl_schedule_next_dispatch(struct throtl_data *td)
473 struct throtl_rb_root *st = &td->tg_service_tree;
476 * If there are more bios pending, schedule more work.
478 if (!total_nr_queued(td))
483 update_min_dispatch_time(st);
485 if (time_before_eq(st->min_disptime, jiffies))
486 throtl_schedule_delayed_work(td, 0);
488 throtl_schedule_delayed_work(td, (st->min_disptime - jiffies));
492 throtl_start_new_slice(struct throtl_data *td, struct throtl_grp *tg, bool rw)
494 tg->bytes_disp[rw] = 0;
496 tg->slice_start[rw] = jiffies;
497 tg->slice_end[rw] = jiffies + throtl_slice;
498 throtl_log_tg(td, tg, "[%c] new slice start=%lu end=%lu jiffies=%lu",
499 rw == READ ? 'R' : 'W', tg->slice_start[rw],
500 tg->slice_end[rw], jiffies);
503 static inline void throtl_set_slice_end(struct throtl_data *td,
504 struct throtl_grp *tg, bool rw, unsigned long jiffy_end)
506 tg->slice_end[rw] = roundup(jiffy_end, throtl_slice);
509 static inline void throtl_extend_slice(struct throtl_data *td,
510 struct throtl_grp *tg, bool rw, unsigned long jiffy_end)
512 tg->slice_end[rw] = roundup(jiffy_end, throtl_slice);
513 throtl_log_tg(td, tg, "[%c] extend slice start=%lu end=%lu jiffies=%lu",
514 rw == READ ? 'R' : 'W', tg->slice_start[rw],
515 tg->slice_end[rw], jiffies);
518 /* Determine if previously allocated or extended slice is complete or not */
520 throtl_slice_used(struct throtl_data *td, struct throtl_grp *tg, bool rw)
522 if (time_in_range(jiffies, tg->slice_start[rw], tg->slice_end[rw]))
528 /* Trim the used slices and adjust slice start accordingly */
530 throtl_trim_slice(struct throtl_data *td, struct throtl_grp *tg, bool rw)
532 unsigned long nr_slices, time_elapsed, io_trim;
535 BUG_ON(time_before(tg->slice_end[rw], tg->slice_start[rw]));
538 * If bps are unlimited (-1), then time slice don't get
539 * renewed. Don't try to trim the slice if slice is used. A new
540 * slice will start when appropriate.
542 if (throtl_slice_used(td, tg, rw))
546 * A bio has been dispatched. Also adjust slice_end. It might happen
547 * that initially cgroup limit was very low resulting in high
548 * slice_end, but later limit was bumped up and bio was dispached
549 * sooner, then we need to reduce slice_end. A high bogus slice_end
550 * is bad because it does not allow new slice to start.
553 throtl_set_slice_end(td, tg, rw, jiffies + throtl_slice);
555 time_elapsed = jiffies - tg->slice_start[rw];
557 nr_slices = time_elapsed / throtl_slice;
561 tmp = tg->bps[rw] * throtl_slice * nr_slices;
565 io_trim = (tg->iops[rw] * throtl_slice * nr_slices)/HZ;
567 if (!bytes_trim && !io_trim)
570 if (tg->bytes_disp[rw] >= bytes_trim)
571 tg->bytes_disp[rw] -= bytes_trim;
573 tg->bytes_disp[rw] = 0;
575 if (tg->io_disp[rw] >= io_trim)
576 tg->io_disp[rw] -= io_trim;
580 tg->slice_start[rw] += nr_slices * throtl_slice;
582 throtl_log_tg(td, tg, "[%c] trim slice nr=%lu bytes=%llu io=%lu"
583 " start=%lu end=%lu jiffies=%lu",
584 rw == READ ? 'R' : 'W', nr_slices, bytes_trim, io_trim,
585 tg->slice_start[rw], tg->slice_end[rw], jiffies);
588 static bool tg_with_in_iops_limit(struct throtl_data *td, struct throtl_grp *tg,
589 struct bio *bio, unsigned long *wait)
591 bool rw = bio_data_dir(bio);
592 unsigned int io_allowed;
593 unsigned long jiffy_elapsed, jiffy_wait, jiffy_elapsed_rnd;
596 jiffy_elapsed = jiffy_elapsed_rnd = jiffies - tg->slice_start[rw];
598 /* Slice has just started. Consider one slice interval */
600 jiffy_elapsed_rnd = throtl_slice;
602 jiffy_elapsed_rnd = roundup(jiffy_elapsed_rnd, throtl_slice);
605 * jiffy_elapsed_rnd should not be a big value as minimum iops can be
606 * 1 then at max jiffy elapsed should be equivalent of 1 second as we
607 * will allow dispatch after 1 second and after that slice should
611 tmp = (u64)tg->iops[rw] * jiffy_elapsed_rnd;
615 io_allowed = UINT_MAX;
619 if (tg->io_disp[rw] + 1 <= io_allowed) {
625 /* Calc approx time to dispatch */
626 jiffy_wait = ((tg->io_disp[rw] + 1) * HZ)/tg->iops[rw] + 1;
628 if (jiffy_wait > jiffy_elapsed)
629 jiffy_wait = jiffy_wait - jiffy_elapsed;
638 static bool tg_with_in_bps_limit(struct throtl_data *td, struct throtl_grp *tg,
639 struct bio *bio, unsigned long *wait)
641 bool rw = bio_data_dir(bio);
642 u64 bytes_allowed, extra_bytes, tmp;
643 unsigned long jiffy_elapsed, jiffy_wait, jiffy_elapsed_rnd;
645 jiffy_elapsed = jiffy_elapsed_rnd = jiffies - tg->slice_start[rw];
647 /* Slice has just started. Consider one slice interval */
649 jiffy_elapsed_rnd = throtl_slice;
651 jiffy_elapsed_rnd = roundup(jiffy_elapsed_rnd, throtl_slice);
653 tmp = tg->bps[rw] * jiffy_elapsed_rnd;
657 if (tg->bytes_disp[rw] + bio->bi_size <= bytes_allowed) {
663 /* Calc approx time to dispatch */
664 extra_bytes = tg->bytes_disp[rw] + bio->bi_size - bytes_allowed;
665 jiffy_wait = div64_u64(extra_bytes * HZ, tg->bps[rw]);
671 * This wait time is without taking into consideration the rounding
672 * up we did. Add that time also.
674 jiffy_wait = jiffy_wait + (jiffy_elapsed_rnd - jiffy_elapsed);
680 static bool tg_no_rule_group(struct throtl_grp *tg, bool rw) {
681 if (tg->bps[rw] == -1 && tg->iops[rw] == -1)
687 * Returns whether one can dispatch a bio or not. Also returns approx number
688 * of jiffies to wait before this bio is with-in IO rate and can be dispatched
690 static bool tg_may_dispatch(struct throtl_data *td, struct throtl_grp *tg,
691 struct bio *bio, unsigned long *wait)
693 bool rw = bio_data_dir(bio);
694 unsigned long bps_wait = 0, iops_wait = 0, max_wait = 0;
697 * Currently whole state machine of group depends on first bio
698 * queued in the group bio list. So one should not be calling
699 * this function with a different bio if there are other bios
702 BUG_ON(tg->nr_queued[rw] && bio != bio_list_peek(&tg->bio_lists[rw]));
704 /* If tg->bps = -1, then BW is unlimited */
705 if (tg->bps[rw] == -1 && tg->iops[rw] == -1) {
712 * If previous slice expired, start a new one otherwise renew/extend
713 * existing slice to make sure it is at least throtl_slice interval
716 if (throtl_slice_used(td, tg, rw))
717 throtl_start_new_slice(td, tg, rw);
719 if (time_before(tg->slice_end[rw], jiffies + throtl_slice))
720 throtl_extend_slice(td, tg, rw, jiffies + throtl_slice);
723 if (tg_with_in_bps_limit(td, tg, bio, &bps_wait)
724 && tg_with_in_iops_limit(td, tg, bio, &iops_wait)) {
730 max_wait = max(bps_wait, iops_wait);
735 if (time_before(tg->slice_end[rw], jiffies + max_wait))
736 throtl_extend_slice(td, tg, rw, jiffies + max_wait);
741 static void throtl_charge_bio(struct throtl_grp *tg, struct bio *bio)
743 bool rw = bio_data_dir(bio);
744 bool sync = rw_is_sync(bio->bi_rw);
746 /* Charge the bio to the group */
747 tg->bytes_disp[rw] += bio->bi_size;
750 blkiocg_update_dispatch_stats(&tg->blkg, bio->bi_size, rw, sync);
753 static void throtl_add_bio_tg(struct throtl_data *td, struct throtl_grp *tg,
756 bool rw = bio_data_dir(bio);
758 bio_list_add(&tg->bio_lists[rw], bio);
759 /* Take a bio reference on tg */
760 throtl_ref_get_tg(tg);
763 throtl_enqueue_tg(td, tg);
766 static void tg_update_disptime(struct throtl_data *td, struct throtl_grp *tg)
768 unsigned long read_wait = -1, write_wait = -1, min_wait = -1, disptime;
771 if ((bio = bio_list_peek(&tg->bio_lists[READ])))
772 tg_may_dispatch(td, tg, bio, &read_wait);
774 if ((bio = bio_list_peek(&tg->bio_lists[WRITE])))
775 tg_may_dispatch(td, tg, bio, &write_wait);
777 min_wait = min(read_wait, write_wait);
778 disptime = jiffies + min_wait;
780 /* Update dispatch time */
781 throtl_dequeue_tg(td, tg);
782 tg->disptime = disptime;
783 throtl_enqueue_tg(td, tg);
786 static void tg_dispatch_one_bio(struct throtl_data *td, struct throtl_grp *tg,
787 bool rw, struct bio_list *bl)
791 bio = bio_list_pop(&tg->bio_lists[rw]);
793 /* Drop bio reference on tg */
796 BUG_ON(td->nr_queued[rw] <= 0);
799 throtl_charge_bio(tg, bio);
800 bio_list_add(bl, bio);
801 bio->bi_rw |= REQ_THROTTLED;
803 throtl_trim_slice(td, tg, rw);
806 static int throtl_dispatch_tg(struct throtl_data *td, struct throtl_grp *tg,
809 unsigned int nr_reads = 0, nr_writes = 0;
810 unsigned int max_nr_reads = throtl_grp_quantum*3/4;
811 unsigned int max_nr_writes = throtl_grp_quantum - max_nr_reads;
814 /* Try to dispatch 75% READS and 25% WRITES */
816 while ((bio = bio_list_peek(&tg->bio_lists[READ]))
817 && tg_may_dispatch(td, tg, bio, NULL)) {
819 tg_dispatch_one_bio(td, tg, bio_data_dir(bio), bl);
822 if (nr_reads >= max_nr_reads)
826 while ((bio = bio_list_peek(&tg->bio_lists[WRITE]))
827 && tg_may_dispatch(td, tg, bio, NULL)) {
829 tg_dispatch_one_bio(td, tg, bio_data_dir(bio), bl);
832 if (nr_writes >= max_nr_writes)
836 return nr_reads + nr_writes;
839 static int throtl_select_dispatch(struct throtl_data *td, struct bio_list *bl)
841 unsigned int nr_disp = 0;
842 struct throtl_grp *tg;
843 struct throtl_rb_root *st = &td->tg_service_tree;
846 tg = throtl_rb_first(st);
851 if (time_before(jiffies, tg->disptime))
854 throtl_dequeue_tg(td, tg);
856 nr_disp += throtl_dispatch_tg(td, tg, bl);
858 if (tg->nr_queued[0] || tg->nr_queued[1]) {
859 tg_update_disptime(td, tg);
860 throtl_enqueue_tg(td, tg);
863 if (nr_disp >= throtl_quantum)
870 static void throtl_process_limit_change(struct throtl_data *td)
872 struct throtl_grp *tg;
873 struct hlist_node *pos, *n;
875 if (!td->limits_changed)
878 xchg(&td->limits_changed, false);
880 throtl_log(td, "limits changed");
882 hlist_for_each_entry_safe(tg, pos, n, &td->tg_list, tg_node) {
883 if (!tg->limits_changed)
886 if (!xchg(&tg->limits_changed, false))
889 throtl_log_tg(td, tg, "limit change rbps=%llu wbps=%llu"
890 " riops=%u wiops=%u", tg->bps[READ], tg->bps[WRITE],
891 tg->iops[READ], tg->iops[WRITE]);
894 * Restart the slices for both READ and WRITES. It
895 * might happen that a group's limit are dropped
896 * suddenly and we don't want to account recently
897 * dispatched IO with new low rate
899 throtl_start_new_slice(td, tg, 0);
900 throtl_start_new_slice(td, tg, 1);
902 if (throtl_tg_on_rr(tg))
903 tg_update_disptime(td, tg);
907 /* Dispatch throttled bios. Should be called without queue lock held. */
908 static int throtl_dispatch(struct request_queue *q)
910 struct throtl_data *td = q->td;
911 unsigned int nr_disp = 0;
912 struct bio_list bio_list_on_stack;
914 struct blk_plug plug;
916 spin_lock_irq(q->queue_lock);
918 throtl_process_limit_change(td);
920 if (!total_nr_queued(td))
923 bio_list_init(&bio_list_on_stack);
925 throtl_log(td, "dispatch nr_queued=%u read=%u write=%u",
926 total_nr_queued(td), td->nr_queued[READ],
927 td->nr_queued[WRITE]);
929 nr_disp = throtl_select_dispatch(td, &bio_list_on_stack);
932 throtl_log(td, "bios disp=%u", nr_disp);
934 throtl_schedule_next_dispatch(td);
936 spin_unlock_irq(q->queue_lock);
939 * If we dispatched some requests, unplug the queue to make sure
943 blk_start_plug(&plug);
944 while((bio = bio_list_pop(&bio_list_on_stack)))
945 generic_make_request(bio);
946 blk_finish_plug(&plug);
951 void blk_throtl_work(struct work_struct *work)
953 struct throtl_data *td = container_of(work, struct throtl_data,
955 struct request_queue *q = td->queue;
960 /* Call with queue lock held */
962 throtl_schedule_delayed_work(struct throtl_data *td, unsigned long delay)
965 struct delayed_work *dwork = &td->throtl_work;
967 /* schedule work if limits changed even if no bio is queued */
968 if (total_nr_queued(td) || td->limits_changed) {
970 * We might have a work scheduled to be executed in future.
971 * Cancel that and schedule a new one.
973 __cancel_delayed_work(dwork);
974 queue_delayed_work(kthrotld_workqueue, dwork, delay);
975 throtl_log(td, "schedule work. delay=%lu jiffies=%lu",
981 throtl_destroy_tg(struct throtl_data *td, struct throtl_grp *tg)
983 /* Something wrong if we are trying to remove same group twice */
984 BUG_ON(hlist_unhashed(&tg->tg_node));
986 hlist_del_init(&tg->tg_node);
989 * Put the reference taken at the time of creation so that when all
990 * queues are gone, group can be destroyed.
993 td->nr_undestroyed_grps--;
996 static void throtl_release_tgs(struct throtl_data *td)
998 struct hlist_node *pos, *n;
999 struct throtl_grp *tg;
1001 hlist_for_each_entry_safe(tg, pos, n, &td->tg_list, tg_node) {
1003 * If cgroup removal path got to blk_group first and removed
1004 * it from cgroup list, then it will take care of destroying
1007 if (!blkiocg_del_blkio_group(&tg->blkg))
1008 throtl_destroy_tg(td, tg);
1012 static void throtl_td_free(struct throtl_data *td)
1018 * Blk cgroup controller notification saying that blkio_group object is being
1019 * delinked as associated cgroup object is going away. That also means that
1020 * no new IO will come in this group. So get rid of this group as soon as
1021 * any pending IO in the group is finished.
1023 * This function is called under rcu_read_lock(). key is the rcu protected
1024 * pointer. That means "key" is a valid throtl_data pointer as long as we are
1027 * "key" was fetched from blkio_group under blkio_cgroup->lock. That means
1028 * it should not be NULL as even if queue was going away, cgroup deltion
1029 * path got to it first.
1031 void throtl_unlink_blkio_group(void *key, struct blkio_group *blkg)
1033 unsigned long flags;
1034 struct throtl_data *td = key;
1036 spin_lock_irqsave(td->queue->queue_lock, flags);
1037 throtl_destroy_tg(td, tg_of_blkg(blkg));
1038 spin_unlock_irqrestore(td->queue->queue_lock, flags);
1041 static void throtl_update_blkio_group_common(struct throtl_data *td,
1042 struct throtl_grp *tg)
1044 xchg(&tg->limits_changed, true);
1045 xchg(&td->limits_changed, true);
1046 /* Schedule a work now to process the limit change */
1047 throtl_schedule_delayed_work(td, 0);
1051 * For all update functions, key should be a valid pointer because these
1052 * update functions are called under blkcg_lock, that means, blkg is
1053 * valid and in turn key is valid. queue exit path can not race because
1056 * Can not take queue lock in update functions as queue lock under blkcg_lock
1057 * is not allowed. Under other paths we take blkcg_lock under queue_lock.
1059 static void throtl_update_blkio_group_read_bps(void *key,
1060 struct blkio_group *blkg, u64 read_bps)
1062 struct throtl_data *td = key;
1063 struct throtl_grp *tg = tg_of_blkg(blkg);
1065 tg->bps[READ] = read_bps;
1066 throtl_update_blkio_group_common(td, tg);
1069 static void throtl_update_blkio_group_write_bps(void *key,
1070 struct blkio_group *blkg, u64 write_bps)
1072 struct throtl_data *td = key;
1073 struct throtl_grp *tg = tg_of_blkg(blkg);
1075 tg->bps[WRITE] = write_bps;
1076 throtl_update_blkio_group_common(td, tg);
1079 static void throtl_update_blkio_group_read_iops(void *key,
1080 struct blkio_group *blkg, unsigned int read_iops)
1082 struct throtl_data *td = key;
1083 struct throtl_grp *tg = tg_of_blkg(blkg);
1085 tg->iops[READ] = read_iops;
1086 throtl_update_blkio_group_common(td, tg);
1089 static void throtl_update_blkio_group_write_iops(void *key,
1090 struct blkio_group *blkg, unsigned int write_iops)
1092 struct throtl_data *td = key;
1093 struct throtl_grp *tg = tg_of_blkg(blkg);
1095 tg->iops[WRITE] = write_iops;
1096 throtl_update_blkio_group_common(td, tg);
1099 static void throtl_shutdown_wq(struct request_queue *q)
1101 struct throtl_data *td = q->td;
1103 cancel_delayed_work_sync(&td->throtl_work);
1106 static struct blkio_policy_type blkio_policy_throtl = {
1108 .blkio_unlink_group_fn = throtl_unlink_blkio_group,
1109 .blkio_update_group_read_bps_fn =
1110 throtl_update_blkio_group_read_bps,
1111 .blkio_update_group_write_bps_fn =
1112 throtl_update_blkio_group_write_bps,
1113 .blkio_update_group_read_iops_fn =
1114 throtl_update_blkio_group_read_iops,
1115 .blkio_update_group_write_iops_fn =
1116 throtl_update_blkio_group_write_iops,
1118 .plid = BLKIO_POLICY_THROTL,
1121 int blk_throtl_bio(struct request_queue *q, struct bio **biop)
1123 struct throtl_data *td = q->td;
1124 struct throtl_grp *tg;
1125 struct bio *bio = *biop;
1126 bool rw = bio_data_dir(bio), update_disptime = true;
1127 struct blkio_cgroup *blkcg;
1129 if (bio->bi_rw & REQ_THROTTLED) {
1130 bio->bi_rw &= ~REQ_THROTTLED;
1135 * A throtl_grp pointer retrieved under rcu can be used to access
1136 * basic fields like stats and io rates. If a group has no rules,
1137 * just update the dispatch stats in lockless manner and return.
1141 blkcg = task_blkio_cgroup(current);
1142 tg = throtl_find_tg(td, blkcg);
1144 throtl_tg_fill_dev_details(td, tg);
1146 if (tg_no_rule_group(tg, rw)) {
1147 blkiocg_update_dispatch_stats(&tg->blkg, bio->bi_size,
1148 rw, rw_is_sync(bio->bi_rw));
1156 * Either group has not been allocated yet or it is not an unlimited
1160 spin_lock_irq(q->queue_lock);
1161 tg = throtl_get_tg(td);
1164 if (PTR_ERR(tg) == -ENODEV) {
1166 * Queue is gone. No queue lock held here.
1172 if (tg->nr_queued[rw]) {
1174 * There is already another bio queued in same dir. No
1175 * need to update dispatch time.
1177 update_disptime = false;
1182 /* Bio is with-in rate limit of group */
1183 if (tg_may_dispatch(td, tg, bio, NULL)) {
1184 throtl_charge_bio(tg, bio);
1187 * We need to trim slice even when bios are not being queued
1188 * otherwise it might happen that a bio is not queued for
1189 * a long time and slice keeps on extending and trim is not
1190 * called for a long time. Now if limits are reduced suddenly
1191 * we take into account all the IO dispatched so far at new
1192 * low rate and * newly queued IO gets a really long dispatch
1195 * So keep on trimming slice even if bio is not queued.
1197 throtl_trim_slice(td, tg, rw);
1202 throtl_log_tg(td, tg, "[%c] bio. bdisp=%llu sz=%u bps=%llu"
1203 " iodisp=%u iops=%u queued=%d/%d",
1204 rw == READ ? 'R' : 'W',
1205 tg->bytes_disp[rw], bio->bi_size, tg->bps[rw],
1206 tg->io_disp[rw], tg->iops[rw],
1207 tg->nr_queued[READ], tg->nr_queued[WRITE]);
1209 throtl_add_bio_tg(q->td, tg, bio);
1212 if (update_disptime) {
1213 tg_update_disptime(td, tg);
1214 throtl_schedule_next_dispatch(td);
1218 spin_unlock_irq(q->queue_lock);
1222 int blk_throtl_init(struct request_queue *q)
1224 struct throtl_data *td;
1225 struct throtl_grp *tg;
1227 td = kzalloc_node(sizeof(*td), GFP_KERNEL, q->node);
1231 INIT_HLIST_HEAD(&td->tg_list);
1232 td->tg_service_tree = THROTL_RB_ROOT;
1233 td->limits_changed = false;
1234 INIT_DELAYED_WORK(&td->throtl_work, blk_throtl_work);
1236 /* alloc and Init root group. */
1238 tg = throtl_alloc_tg(td);
1248 throtl_init_add_tg_lists(td, tg, &blkio_root_cgroup);
1251 /* Attach throtl data to request queue */
1256 void blk_throtl_exit(struct request_queue *q)
1258 struct throtl_data *td = q->td;
1263 throtl_shutdown_wq(q);
1265 spin_lock_irq(q->queue_lock);
1266 throtl_release_tgs(td);
1268 /* If there are other groups */
1269 if (td->nr_undestroyed_grps > 0)
1272 spin_unlock_irq(q->queue_lock);
1275 * Wait for tg->blkg->key accessors to exit their grace periods.
1276 * Do this wait only if there are other undestroyed groups out
1277 * there (other than root group). This can happen if cgroup deletion
1278 * path claimed the responsibility of cleaning up a group before
1279 * queue cleanup code get to the group.
1281 * Do not call synchronize_rcu() unconditionally as there are drivers
1282 * which create/delete request queue hundreds of times during scan/boot
1283 * and synchronize_rcu() can take significant time and slow down boot.
1289 * Just being safe to make sure after previous flush if some body did
1290 * update limits through cgroup and another work got queued, cancel
1293 throtl_shutdown_wq(q);
1297 static int __init throtl_init(void)
1299 kthrotld_workqueue = alloc_workqueue("kthrotld", WQ_MEM_RECLAIM, 0);
1300 if (!kthrotld_workqueue)
1301 panic("Failed to create kthrotld\n");
1303 blkio_policy_register(&blkio_policy_throtl);
1307 module_init(throtl_init);