2 * Interface for controlling IO bandwidth on a request queue
4 * Copyright (C) 2010 Vivek Goyal <vgoyal@redhat.com>
7 #include <linux/module.h>
8 #include <linux/slab.h>
9 #include <linux/blkdev.h>
10 #include <linux/bio.h>
11 #include <linux/blktrace_api.h>
12 #include "blk-cgroup.h"
15 /* Max dispatch from a group in 1 round */
16 static int throtl_grp_quantum = 8;
18 /* Total max dispatch from all groups in one round */
19 static int throtl_quantum = 32;
21 /* Throttling is performed over 100ms slice and after that slice is renewed */
22 static unsigned long throtl_slice = HZ/10; /* 100 ms */
24 static struct blkio_policy_type blkio_policy_throtl;
26 /* A workqueue to queue throttle related work */
27 static struct workqueue_struct *kthrotld_workqueue;
28 static void throtl_schedule_delayed_work(struct throtl_data *td,
31 struct throtl_rb_root {
35 unsigned long min_disptime;
38 #define THROTL_RB_ROOT (struct throtl_rb_root) { .rb = RB_ROOT, .left = NULL, \
39 .count = 0, .min_disptime = 0}
41 #define rb_entry_tg(node) rb_entry((node), struct throtl_grp, rb_node)
44 /* active throtl group service_tree member */
45 struct rb_node rb_node;
48 * Dispatch time in jiffies. This is the estimated time when group
49 * will unthrottle and is ready to dispatch more bio. It is used as
50 * key to sort active groups in service tree.
52 unsigned long disptime;
56 /* Two lists for READ and WRITE */
57 struct bio_list bio_lists[2];
59 /* Number of queued bios on READ and WRITE lists */
60 unsigned int nr_queued[2];
62 /* bytes per second rate limits */
68 /* Number of bytes disptached in current slice */
69 uint64_t bytes_disp[2];
70 /* Number of bio's dispatched in current slice */
71 unsigned int io_disp[2];
73 /* When did we start a new slice */
74 unsigned long slice_start[2];
75 unsigned long slice_end[2];
77 /* Some throttle limits got updated for the group */
83 /* service tree for active throtl groups */
84 struct throtl_rb_root tg_service_tree;
86 struct throtl_grp *root_tg;
87 struct request_queue *queue;
89 /* Total Number of queued bios on READ and WRITE lists */
90 unsigned int nr_queued[2];
93 * number of total undestroyed groups
95 unsigned int nr_undestroyed_grps;
97 /* Work for dispatching throttled bios */
98 struct delayed_work throtl_work;
103 static inline struct throtl_grp *blkg_to_tg(struct blkio_group *blkg)
105 return blkg_to_pdata(blkg, &blkio_policy_throtl);
108 static inline struct blkio_group *tg_to_blkg(struct throtl_grp *tg)
110 return pdata_to_blkg(tg);
113 enum tg_state_flags {
114 THROTL_TG_FLAG_on_rr = 0, /* on round-robin busy list */
117 #define THROTL_TG_FNS(name) \
118 static inline void throtl_mark_tg_##name(struct throtl_grp *tg) \
120 (tg)->flags |= (1 << THROTL_TG_FLAG_##name); \
122 static inline void throtl_clear_tg_##name(struct throtl_grp *tg) \
124 (tg)->flags &= ~(1 << THROTL_TG_FLAG_##name); \
126 static inline int throtl_tg_##name(const struct throtl_grp *tg) \
128 return ((tg)->flags & (1 << THROTL_TG_FLAG_##name)) != 0; \
131 THROTL_TG_FNS(on_rr);
133 #define throtl_log_tg(td, tg, fmt, args...) \
134 blk_add_trace_msg((td)->queue, "throtl %s " fmt, \
135 blkg_path(tg_to_blkg(tg)), ##args); \
137 #define throtl_log(td, fmt, args...) \
138 blk_add_trace_msg((td)->queue, "throtl " fmt, ##args)
140 static inline unsigned int total_nr_queued(struct throtl_data *td)
142 return td->nr_queued[0] + td->nr_queued[1];
145 static void throtl_init_blkio_group(struct blkio_group *blkg)
147 struct throtl_grp *tg = blkg_to_tg(blkg);
149 RB_CLEAR_NODE(&tg->rb_node);
150 bio_list_init(&tg->bio_lists[0]);
151 bio_list_init(&tg->bio_lists[1]);
152 tg->limits_changed = false;
157 tg->iops[WRITE] = -1;
161 throtl_grp *throtl_lookup_tg(struct throtl_data *td, struct blkio_cgroup *blkcg)
164 * This is the common case when there are no blkio cgroups.
165 * Avoid lookup in this case
167 if (blkcg == &blkio_root_cgroup)
170 return blkg_to_tg(blkg_lookup(blkcg, td->queue));
173 static struct throtl_grp *throtl_lookup_create_tg(struct throtl_data *td,
174 struct blkio_cgroup *blkcg)
176 struct request_queue *q = td->queue;
177 struct throtl_grp *tg = NULL;
180 * This is the common case when there are no blkio cgroups.
181 * Avoid lookup in this case
183 if (blkcg == &blkio_root_cgroup) {
186 struct blkio_group *blkg;
188 blkg = blkg_lookup_create(blkcg, q, false);
190 /* if %NULL and @q is alive, fall back to root_tg */
192 tg = blkg_to_tg(blkg);
193 else if (!blk_queue_dead(q))
200 static struct throtl_grp *throtl_rb_first(struct throtl_rb_root *root)
202 /* Service tree is empty */
207 root->left = rb_first(&root->rb);
210 return rb_entry_tg(root->left);
215 static void rb_erase_init(struct rb_node *n, struct rb_root *root)
221 static void throtl_rb_erase(struct rb_node *n, struct throtl_rb_root *root)
225 rb_erase_init(n, &root->rb);
229 static void update_min_dispatch_time(struct throtl_rb_root *st)
231 struct throtl_grp *tg;
233 tg = throtl_rb_first(st);
237 st->min_disptime = tg->disptime;
241 tg_service_tree_add(struct throtl_rb_root *st, struct throtl_grp *tg)
243 struct rb_node **node = &st->rb.rb_node;
244 struct rb_node *parent = NULL;
245 struct throtl_grp *__tg;
246 unsigned long key = tg->disptime;
249 while (*node != NULL) {
251 __tg = rb_entry_tg(parent);
253 if (time_before(key, __tg->disptime))
254 node = &parent->rb_left;
256 node = &parent->rb_right;
262 st->left = &tg->rb_node;
264 rb_link_node(&tg->rb_node, parent, node);
265 rb_insert_color(&tg->rb_node, &st->rb);
268 static void __throtl_enqueue_tg(struct throtl_data *td, struct throtl_grp *tg)
270 struct throtl_rb_root *st = &td->tg_service_tree;
272 tg_service_tree_add(st, tg);
273 throtl_mark_tg_on_rr(tg);
277 static void throtl_enqueue_tg(struct throtl_data *td, struct throtl_grp *tg)
279 if (!throtl_tg_on_rr(tg))
280 __throtl_enqueue_tg(td, tg);
283 static void __throtl_dequeue_tg(struct throtl_data *td, struct throtl_grp *tg)
285 throtl_rb_erase(&tg->rb_node, &td->tg_service_tree);
286 throtl_clear_tg_on_rr(tg);
289 static void throtl_dequeue_tg(struct throtl_data *td, struct throtl_grp *tg)
291 if (throtl_tg_on_rr(tg))
292 __throtl_dequeue_tg(td, tg);
295 static void throtl_schedule_next_dispatch(struct throtl_data *td)
297 struct throtl_rb_root *st = &td->tg_service_tree;
300 * If there are more bios pending, schedule more work.
302 if (!total_nr_queued(td))
307 update_min_dispatch_time(st);
309 if (time_before_eq(st->min_disptime, jiffies))
310 throtl_schedule_delayed_work(td, 0);
312 throtl_schedule_delayed_work(td, (st->min_disptime - jiffies));
316 throtl_start_new_slice(struct throtl_data *td, struct throtl_grp *tg, bool rw)
318 tg->bytes_disp[rw] = 0;
320 tg->slice_start[rw] = jiffies;
321 tg->slice_end[rw] = jiffies + throtl_slice;
322 throtl_log_tg(td, tg, "[%c] new slice start=%lu end=%lu jiffies=%lu",
323 rw == READ ? 'R' : 'W', tg->slice_start[rw],
324 tg->slice_end[rw], jiffies);
327 static inline void throtl_set_slice_end(struct throtl_data *td,
328 struct throtl_grp *tg, bool rw, unsigned long jiffy_end)
330 tg->slice_end[rw] = roundup(jiffy_end, throtl_slice);
333 static inline void throtl_extend_slice(struct throtl_data *td,
334 struct throtl_grp *tg, bool rw, unsigned long jiffy_end)
336 tg->slice_end[rw] = roundup(jiffy_end, throtl_slice);
337 throtl_log_tg(td, tg, "[%c] extend slice start=%lu end=%lu jiffies=%lu",
338 rw == READ ? 'R' : 'W', tg->slice_start[rw],
339 tg->slice_end[rw], jiffies);
342 /* Determine if previously allocated or extended slice is complete or not */
344 throtl_slice_used(struct throtl_data *td, struct throtl_grp *tg, bool rw)
346 if (time_in_range(jiffies, tg->slice_start[rw], tg->slice_end[rw]))
352 /* Trim the used slices and adjust slice start accordingly */
354 throtl_trim_slice(struct throtl_data *td, struct throtl_grp *tg, bool rw)
356 unsigned long nr_slices, time_elapsed, io_trim;
359 BUG_ON(time_before(tg->slice_end[rw], tg->slice_start[rw]));
362 * If bps are unlimited (-1), then time slice don't get
363 * renewed. Don't try to trim the slice if slice is used. A new
364 * slice will start when appropriate.
366 if (throtl_slice_used(td, tg, rw))
370 * A bio has been dispatched. Also adjust slice_end. It might happen
371 * that initially cgroup limit was very low resulting in high
372 * slice_end, but later limit was bumped up and bio was dispached
373 * sooner, then we need to reduce slice_end. A high bogus slice_end
374 * is bad because it does not allow new slice to start.
377 throtl_set_slice_end(td, tg, rw, jiffies + throtl_slice);
379 time_elapsed = jiffies - tg->slice_start[rw];
381 nr_slices = time_elapsed / throtl_slice;
385 tmp = tg->bps[rw] * throtl_slice * nr_slices;
389 io_trim = (tg->iops[rw] * throtl_slice * nr_slices)/HZ;
391 if (!bytes_trim && !io_trim)
394 if (tg->bytes_disp[rw] >= bytes_trim)
395 tg->bytes_disp[rw] -= bytes_trim;
397 tg->bytes_disp[rw] = 0;
399 if (tg->io_disp[rw] >= io_trim)
400 tg->io_disp[rw] -= io_trim;
404 tg->slice_start[rw] += nr_slices * throtl_slice;
406 throtl_log_tg(td, tg, "[%c] trim slice nr=%lu bytes=%llu io=%lu"
407 " start=%lu end=%lu jiffies=%lu",
408 rw == READ ? 'R' : 'W', nr_slices, bytes_trim, io_trim,
409 tg->slice_start[rw], tg->slice_end[rw], jiffies);
412 static bool tg_with_in_iops_limit(struct throtl_data *td, struct throtl_grp *tg,
413 struct bio *bio, unsigned long *wait)
415 bool rw = bio_data_dir(bio);
416 unsigned int io_allowed;
417 unsigned long jiffy_elapsed, jiffy_wait, jiffy_elapsed_rnd;
420 jiffy_elapsed = jiffy_elapsed_rnd = jiffies - tg->slice_start[rw];
422 /* Slice has just started. Consider one slice interval */
424 jiffy_elapsed_rnd = throtl_slice;
426 jiffy_elapsed_rnd = roundup(jiffy_elapsed_rnd, throtl_slice);
429 * jiffy_elapsed_rnd should not be a big value as minimum iops can be
430 * 1 then at max jiffy elapsed should be equivalent of 1 second as we
431 * will allow dispatch after 1 second and after that slice should
435 tmp = (u64)tg->iops[rw] * jiffy_elapsed_rnd;
439 io_allowed = UINT_MAX;
443 if (tg->io_disp[rw] + 1 <= io_allowed) {
449 /* Calc approx time to dispatch */
450 jiffy_wait = ((tg->io_disp[rw] + 1) * HZ)/tg->iops[rw] + 1;
452 if (jiffy_wait > jiffy_elapsed)
453 jiffy_wait = jiffy_wait - jiffy_elapsed;
462 static bool tg_with_in_bps_limit(struct throtl_data *td, struct throtl_grp *tg,
463 struct bio *bio, unsigned long *wait)
465 bool rw = bio_data_dir(bio);
466 u64 bytes_allowed, extra_bytes, tmp;
467 unsigned long jiffy_elapsed, jiffy_wait, jiffy_elapsed_rnd;
469 jiffy_elapsed = jiffy_elapsed_rnd = jiffies - tg->slice_start[rw];
471 /* Slice has just started. Consider one slice interval */
473 jiffy_elapsed_rnd = throtl_slice;
475 jiffy_elapsed_rnd = roundup(jiffy_elapsed_rnd, throtl_slice);
477 tmp = tg->bps[rw] * jiffy_elapsed_rnd;
481 if (tg->bytes_disp[rw] + bio->bi_size <= bytes_allowed) {
487 /* Calc approx time to dispatch */
488 extra_bytes = tg->bytes_disp[rw] + bio->bi_size - bytes_allowed;
489 jiffy_wait = div64_u64(extra_bytes * HZ, tg->bps[rw]);
495 * This wait time is without taking into consideration the rounding
496 * up we did. Add that time also.
498 jiffy_wait = jiffy_wait + (jiffy_elapsed_rnd - jiffy_elapsed);
504 static bool tg_no_rule_group(struct throtl_grp *tg, bool rw) {
505 if (tg->bps[rw] == -1 && tg->iops[rw] == -1)
511 * Returns whether one can dispatch a bio or not. Also returns approx number
512 * of jiffies to wait before this bio is with-in IO rate and can be dispatched
514 static bool tg_may_dispatch(struct throtl_data *td, struct throtl_grp *tg,
515 struct bio *bio, unsigned long *wait)
517 bool rw = bio_data_dir(bio);
518 unsigned long bps_wait = 0, iops_wait = 0, max_wait = 0;
521 * Currently whole state machine of group depends on first bio
522 * queued in the group bio list. So one should not be calling
523 * this function with a different bio if there are other bios
526 BUG_ON(tg->nr_queued[rw] && bio != bio_list_peek(&tg->bio_lists[rw]));
528 /* If tg->bps = -1, then BW is unlimited */
529 if (tg->bps[rw] == -1 && tg->iops[rw] == -1) {
536 * If previous slice expired, start a new one otherwise renew/extend
537 * existing slice to make sure it is at least throtl_slice interval
540 if (throtl_slice_used(td, tg, rw))
541 throtl_start_new_slice(td, tg, rw);
543 if (time_before(tg->slice_end[rw], jiffies + throtl_slice))
544 throtl_extend_slice(td, tg, rw, jiffies + throtl_slice);
547 if (tg_with_in_bps_limit(td, tg, bio, &bps_wait)
548 && tg_with_in_iops_limit(td, tg, bio, &iops_wait)) {
554 max_wait = max(bps_wait, iops_wait);
559 if (time_before(tg->slice_end[rw], jiffies + max_wait))
560 throtl_extend_slice(td, tg, rw, jiffies + max_wait);
565 static void throtl_update_dispatch_stats(struct blkio_group *blkg, u64 bytes,
568 struct blkg_policy_data *pd = blkg->pd[BLKIO_POLICY_THROTL];
569 struct blkio_group_stats_cpu *stats_cpu;
572 /* If per cpu stats are not allocated yet, don't do any accounting. */
573 if (pd->stats_cpu == NULL)
577 * Disabling interrupts to provide mutual exclusion between two
578 * writes on same cpu. It probably is not needed for 64bit. Not
579 * optimizing that case yet.
581 local_irq_save(flags);
583 stats_cpu = this_cpu_ptr(pd->stats_cpu);
585 blkg_rwstat_add(&stats_cpu->serviced, rw, 1);
586 blkg_rwstat_add(&stats_cpu->service_bytes, rw, bytes);
588 local_irq_restore(flags);
591 static void throtl_charge_bio(struct throtl_grp *tg, struct bio *bio)
593 bool rw = bio_data_dir(bio);
595 /* Charge the bio to the group */
596 tg->bytes_disp[rw] += bio->bi_size;
599 throtl_update_dispatch_stats(tg_to_blkg(tg), bio->bi_size, bio->bi_rw);
602 static void throtl_add_bio_tg(struct throtl_data *td, struct throtl_grp *tg,
605 bool rw = bio_data_dir(bio);
607 bio_list_add(&tg->bio_lists[rw], bio);
608 /* Take a bio reference on tg */
609 blkg_get(tg_to_blkg(tg));
612 throtl_enqueue_tg(td, tg);
615 static void tg_update_disptime(struct throtl_data *td, struct throtl_grp *tg)
617 unsigned long read_wait = -1, write_wait = -1, min_wait = -1, disptime;
620 if ((bio = bio_list_peek(&tg->bio_lists[READ])))
621 tg_may_dispatch(td, tg, bio, &read_wait);
623 if ((bio = bio_list_peek(&tg->bio_lists[WRITE])))
624 tg_may_dispatch(td, tg, bio, &write_wait);
626 min_wait = min(read_wait, write_wait);
627 disptime = jiffies + min_wait;
629 /* Update dispatch time */
630 throtl_dequeue_tg(td, tg);
631 tg->disptime = disptime;
632 throtl_enqueue_tg(td, tg);
635 static void tg_dispatch_one_bio(struct throtl_data *td, struct throtl_grp *tg,
636 bool rw, struct bio_list *bl)
640 bio = bio_list_pop(&tg->bio_lists[rw]);
642 /* Drop bio reference on blkg */
643 blkg_put(tg_to_blkg(tg));
645 BUG_ON(td->nr_queued[rw] <= 0);
648 throtl_charge_bio(tg, bio);
649 bio_list_add(bl, bio);
650 bio->bi_rw |= REQ_THROTTLED;
652 throtl_trim_slice(td, tg, rw);
655 static int throtl_dispatch_tg(struct throtl_data *td, struct throtl_grp *tg,
658 unsigned int nr_reads = 0, nr_writes = 0;
659 unsigned int max_nr_reads = throtl_grp_quantum*3/4;
660 unsigned int max_nr_writes = throtl_grp_quantum - max_nr_reads;
663 /* Try to dispatch 75% READS and 25% WRITES */
665 while ((bio = bio_list_peek(&tg->bio_lists[READ]))
666 && tg_may_dispatch(td, tg, bio, NULL)) {
668 tg_dispatch_one_bio(td, tg, bio_data_dir(bio), bl);
671 if (nr_reads >= max_nr_reads)
675 while ((bio = bio_list_peek(&tg->bio_lists[WRITE]))
676 && tg_may_dispatch(td, tg, bio, NULL)) {
678 tg_dispatch_one_bio(td, tg, bio_data_dir(bio), bl);
681 if (nr_writes >= max_nr_writes)
685 return nr_reads + nr_writes;
688 static int throtl_select_dispatch(struct throtl_data *td, struct bio_list *bl)
690 unsigned int nr_disp = 0;
691 struct throtl_grp *tg;
692 struct throtl_rb_root *st = &td->tg_service_tree;
695 tg = throtl_rb_first(st);
700 if (time_before(jiffies, tg->disptime))
703 throtl_dequeue_tg(td, tg);
705 nr_disp += throtl_dispatch_tg(td, tg, bl);
707 if (tg->nr_queued[0] || tg->nr_queued[1]) {
708 tg_update_disptime(td, tg);
709 throtl_enqueue_tg(td, tg);
712 if (nr_disp >= throtl_quantum)
719 static void throtl_process_limit_change(struct throtl_data *td)
721 struct request_queue *q = td->queue;
722 struct blkio_group *blkg, *n;
724 if (!td->limits_changed)
727 xchg(&td->limits_changed, false);
729 throtl_log(td, "limits changed");
731 list_for_each_entry_safe(blkg, n, &q->blkg_list, q_node) {
732 struct throtl_grp *tg = blkg_to_tg(blkg);
734 if (!tg->limits_changed)
737 if (!xchg(&tg->limits_changed, false))
740 throtl_log_tg(td, tg, "limit change rbps=%llu wbps=%llu"
741 " riops=%u wiops=%u", tg->bps[READ], tg->bps[WRITE],
742 tg->iops[READ], tg->iops[WRITE]);
745 * Restart the slices for both READ and WRITES. It
746 * might happen that a group's limit are dropped
747 * suddenly and we don't want to account recently
748 * dispatched IO with new low rate
750 throtl_start_new_slice(td, tg, 0);
751 throtl_start_new_slice(td, tg, 1);
753 if (throtl_tg_on_rr(tg))
754 tg_update_disptime(td, tg);
758 /* Dispatch throttled bios. Should be called without queue lock held. */
759 static int throtl_dispatch(struct request_queue *q)
761 struct throtl_data *td = q->td;
762 unsigned int nr_disp = 0;
763 struct bio_list bio_list_on_stack;
765 struct blk_plug plug;
767 spin_lock_irq(q->queue_lock);
769 throtl_process_limit_change(td);
771 if (!total_nr_queued(td))
774 bio_list_init(&bio_list_on_stack);
776 throtl_log(td, "dispatch nr_queued=%u read=%u write=%u",
777 total_nr_queued(td), td->nr_queued[READ],
778 td->nr_queued[WRITE]);
780 nr_disp = throtl_select_dispatch(td, &bio_list_on_stack);
783 throtl_log(td, "bios disp=%u", nr_disp);
785 throtl_schedule_next_dispatch(td);
787 spin_unlock_irq(q->queue_lock);
790 * If we dispatched some requests, unplug the queue to make sure
794 blk_start_plug(&plug);
795 while((bio = bio_list_pop(&bio_list_on_stack)))
796 generic_make_request(bio);
797 blk_finish_plug(&plug);
802 void blk_throtl_work(struct work_struct *work)
804 struct throtl_data *td = container_of(work, struct throtl_data,
806 struct request_queue *q = td->queue;
811 /* Call with queue lock held */
813 throtl_schedule_delayed_work(struct throtl_data *td, unsigned long delay)
816 struct delayed_work *dwork = &td->throtl_work;
818 /* schedule work if limits changed even if no bio is queued */
819 if (total_nr_queued(td) || td->limits_changed) {
821 * We might have a work scheduled to be executed in future.
822 * Cancel that and schedule a new one.
824 __cancel_delayed_work(dwork);
825 queue_delayed_work(kthrotld_workqueue, dwork, delay);
826 throtl_log(td, "schedule work. delay=%lu jiffies=%lu",
832 * Can not take queue lock in update functions as queue lock under
833 * blkcg_lock is not allowed. Under other paths we take blkcg_lock under
836 static void throtl_update_blkio_group_common(struct throtl_data *td,
837 struct throtl_grp *tg)
839 xchg(&tg->limits_changed, true);
840 xchg(&td->limits_changed, true);
841 /* Schedule a work now to process the limit change */
842 throtl_schedule_delayed_work(td, 0);
845 static u64 blkg_prfill_cpu_rwstat(struct seq_file *sf,
846 struct blkg_policy_data *pd, int off)
848 struct blkg_rwstat rwstat = { }, tmp;
851 for_each_possible_cpu(cpu) {
852 struct blkio_group_stats_cpu *sc =
853 per_cpu_ptr(pd->stats_cpu, cpu);
855 tmp = blkg_rwstat_read((void *)sc + off);
856 for (i = 0; i < BLKG_RWSTAT_NR; i++)
857 rwstat.cnt[i] += tmp.cnt[i];
860 return __blkg_prfill_rwstat(sf, pd, &rwstat);
863 /* print per-cpu blkg_rwstat specified by BLKCG_STAT_PRIV() */
864 static int blkcg_print_cpu_rwstat(struct cgroup *cgrp, struct cftype *cft,
867 struct blkio_cgroup *blkcg = cgroup_to_blkio_cgroup(cgrp);
869 blkcg_print_blkgs(sf, blkcg, blkg_prfill_cpu_rwstat,
870 BLKCG_STAT_POL(cft->private),
871 BLKCG_STAT_OFF(cft->private), true);
875 static u64 blkg_prfill_conf_u64(struct seq_file *sf,
876 struct blkg_policy_data *pd, int off)
878 u64 v = *(u64 *)((void *)&pd->conf + off);
882 return __blkg_prfill_u64(sf, pd, v);
885 static int blkcg_print_conf_u64(struct cgroup *cgrp, struct cftype *cft,
888 blkcg_print_blkgs(sf, cgroup_to_blkio_cgroup(cgrp),
889 blkg_prfill_conf_u64, BLKIO_POLICY_THROTL,
890 cft->private, false);
894 static void throtl_update_blkio_group_read_bps(struct blkio_group *blkg,
897 struct throtl_grp *tg = blkg_to_tg(blkg);
899 tg->bps[READ] = read_bps;
900 throtl_update_blkio_group_common(blkg->q->td, tg);
903 static void throtl_update_blkio_group_write_bps(struct blkio_group *blkg,
906 struct throtl_grp *tg = blkg_to_tg(blkg);
908 tg->bps[WRITE] = write_bps;
909 throtl_update_blkio_group_common(blkg->q->td, tg);
912 static void throtl_update_blkio_group_read_iops(struct blkio_group *blkg,
915 struct throtl_grp *tg = blkg_to_tg(blkg);
917 tg->iops[READ] = read_iops;
918 throtl_update_blkio_group_common(blkg->q->td, tg);
921 static void throtl_update_blkio_group_write_iops(struct blkio_group *blkg,
924 struct throtl_grp *tg = blkg_to_tg(blkg);
926 tg->iops[WRITE] = write_iops;
927 throtl_update_blkio_group_common(blkg->q->td, tg);
930 static int blkcg_set_conf_u64(struct cgroup *cgrp, struct cftype *cft,
932 void (*update)(struct blkio_group *, u64))
934 struct blkio_cgroup *blkcg = cgroup_to_blkio_cgroup(cgrp);
935 struct blkg_policy_data *pd;
936 struct blkg_conf_ctx ctx;
939 ret = blkg_conf_prep(blkcg, buf, &ctx);
944 pd = ctx.blkg->pd[BLKIO_POLICY_THROTL];
946 *(u64 *)((void *)&pd->conf + cft->private) = ctx.v;
947 update(ctx.blkg, ctx.v ?: -1);
951 blkg_conf_finish(&ctx);
955 static int blkcg_set_conf_bps_r(struct cgroup *cgrp, struct cftype *cft,
958 return blkcg_set_conf_u64(cgrp, cft, buf,
959 throtl_update_blkio_group_read_bps);
962 static int blkcg_set_conf_bps_w(struct cgroup *cgrp, struct cftype *cft,
965 return blkcg_set_conf_u64(cgrp, cft, buf,
966 throtl_update_blkio_group_write_bps);
969 static int blkcg_set_conf_iops_r(struct cgroup *cgrp, struct cftype *cft,
972 return blkcg_set_conf_u64(cgrp, cft, buf,
973 throtl_update_blkio_group_read_iops);
976 static int blkcg_set_conf_iops_w(struct cgroup *cgrp, struct cftype *cft,
979 return blkcg_set_conf_u64(cgrp, cft, buf,
980 throtl_update_blkio_group_write_iops);
983 static struct cftype throtl_files[] = {
985 .name = "throttle.read_bps_device",
986 .private = offsetof(struct blkio_group_conf, bps[READ]),
987 .read_seq_string = blkcg_print_conf_u64,
988 .write_string = blkcg_set_conf_bps_r,
989 .max_write_len = 256,
992 .name = "throttle.write_bps_device",
993 .private = offsetof(struct blkio_group_conf, bps[WRITE]),
994 .read_seq_string = blkcg_print_conf_u64,
995 .write_string = blkcg_set_conf_bps_w,
996 .max_write_len = 256,
999 .name = "throttle.read_iops_device",
1000 .private = offsetof(struct blkio_group_conf, iops[READ]),
1001 .read_seq_string = blkcg_print_conf_u64,
1002 .write_string = blkcg_set_conf_iops_r,
1003 .max_write_len = 256,
1006 .name = "throttle.write_iops_device",
1007 .private = offsetof(struct blkio_group_conf, iops[WRITE]),
1008 .read_seq_string = blkcg_print_conf_u64,
1009 .write_string = blkcg_set_conf_iops_w,
1010 .max_write_len = 256,
1013 .name = "throttle.io_service_bytes",
1014 .private = BLKCG_STAT_PRIV(BLKIO_POLICY_THROTL,
1015 offsetof(struct blkio_group_stats_cpu, service_bytes)),
1016 .read_seq_string = blkcg_print_cpu_rwstat,
1019 .name = "throttle.io_serviced",
1020 .private = BLKCG_STAT_PRIV(BLKIO_POLICY_THROTL,
1021 offsetof(struct blkio_group_stats_cpu, serviced)),
1022 .read_seq_string = blkcg_print_cpu_rwstat,
1027 static void throtl_shutdown_wq(struct request_queue *q)
1029 struct throtl_data *td = q->td;
1031 cancel_delayed_work_sync(&td->throtl_work);
1034 static struct blkio_policy_type blkio_policy_throtl = {
1036 .blkio_init_group_fn = throtl_init_blkio_group,
1038 .plid = BLKIO_POLICY_THROTL,
1039 .pdata_size = sizeof(struct throtl_grp),
1040 .cftypes = throtl_files,
1043 bool blk_throtl_bio(struct request_queue *q, struct bio *bio)
1045 struct throtl_data *td = q->td;
1046 struct throtl_grp *tg;
1047 bool rw = bio_data_dir(bio), update_disptime = true;
1048 struct blkio_cgroup *blkcg;
1049 bool throttled = false;
1051 if (bio->bi_rw & REQ_THROTTLED) {
1052 bio->bi_rw &= ~REQ_THROTTLED;
1056 /* bio_associate_current() needs ioc, try creating */
1057 create_io_context(GFP_ATOMIC, q->node);
1060 * A throtl_grp pointer retrieved under rcu can be used to access
1061 * basic fields like stats and io rates. If a group has no rules,
1062 * just update the dispatch stats in lockless manner and return.
1065 blkcg = bio_blkio_cgroup(bio);
1066 tg = throtl_lookup_tg(td, blkcg);
1068 if (tg_no_rule_group(tg, rw)) {
1069 throtl_update_dispatch_stats(tg_to_blkg(tg),
1070 bio->bi_size, bio->bi_rw);
1071 goto out_unlock_rcu;
1076 * Either group has not been allocated yet or it is not an unlimited
1079 spin_lock_irq(q->queue_lock);
1080 tg = throtl_lookup_create_tg(td, blkcg);
1084 if (tg->nr_queued[rw]) {
1086 * There is already another bio queued in same dir. No
1087 * need to update dispatch time.
1089 update_disptime = false;
1094 /* Bio is with-in rate limit of group */
1095 if (tg_may_dispatch(td, tg, bio, NULL)) {
1096 throtl_charge_bio(tg, bio);
1099 * We need to trim slice even when bios are not being queued
1100 * otherwise it might happen that a bio is not queued for
1101 * a long time and slice keeps on extending and trim is not
1102 * called for a long time. Now if limits are reduced suddenly
1103 * we take into account all the IO dispatched so far at new
1104 * low rate and * newly queued IO gets a really long dispatch
1107 * So keep on trimming slice even if bio is not queued.
1109 throtl_trim_slice(td, tg, rw);
1114 throtl_log_tg(td, tg, "[%c] bio. bdisp=%llu sz=%u bps=%llu"
1115 " iodisp=%u iops=%u queued=%d/%d",
1116 rw == READ ? 'R' : 'W',
1117 tg->bytes_disp[rw], bio->bi_size, tg->bps[rw],
1118 tg->io_disp[rw], tg->iops[rw],
1119 tg->nr_queued[READ], tg->nr_queued[WRITE]);
1121 bio_associate_current(bio);
1122 throtl_add_bio_tg(q->td, tg, bio);
1125 if (update_disptime) {
1126 tg_update_disptime(td, tg);
1127 throtl_schedule_next_dispatch(td);
1131 spin_unlock_irq(q->queue_lock);
1139 * blk_throtl_drain - drain throttled bios
1140 * @q: request_queue to drain throttled bios for
1142 * Dispatch all currently throttled bios on @q through ->make_request_fn().
1144 void blk_throtl_drain(struct request_queue *q)
1145 __releases(q->queue_lock) __acquires(q->queue_lock)
1147 struct throtl_data *td = q->td;
1148 struct throtl_rb_root *st = &td->tg_service_tree;
1149 struct throtl_grp *tg;
1153 WARN_ON_ONCE(!queue_is_locked(q));
1157 while ((tg = throtl_rb_first(st))) {
1158 throtl_dequeue_tg(td, tg);
1160 while ((bio = bio_list_peek(&tg->bio_lists[READ])))
1161 tg_dispatch_one_bio(td, tg, bio_data_dir(bio), &bl);
1162 while ((bio = bio_list_peek(&tg->bio_lists[WRITE])))
1163 tg_dispatch_one_bio(td, tg, bio_data_dir(bio), &bl);
1165 spin_unlock_irq(q->queue_lock);
1167 while ((bio = bio_list_pop(&bl)))
1168 generic_make_request(bio);
1170 spin_lock_irq(q->queue_lock);
1173 int blk_throtl_init(struct request_queue *q)
1175 struct throtl_data *td;
1176 struct blkio_group *blkg;
1178 td = kzalloc_node(sizeof(*td), GFP_KERNEL, q->node);
1182 td->tg_service_tree = THROTL_RB_ROOT;
1183 td->limits_changed = false;
1184 INIT_DELAYED_WORK(&td->throtl_work, blk_throtl_work);
1189 /* alloc and init root group. */
1191 spin_lock_irq(q->queue_lock);
1193 blkg = blkg_lookup_create(&blkio_root_cgroup, q, true);
1195 td->root_tg = blkg_to_tg(blkg);
1197 spin_unlock_irq(q->queue_lock);
1207 void blk_throtl_exit(struct request_queue *q)
1210 throtl_shutdown_wq(q);
1214 static int __init throtl_init(void)
1216 kthrotld_workqueue = alloc_workqueue("kthrotld", WQ_MEM_RECLAIM, 0);
1217 if (!kthrotld_workqueue)
1218 panic("Failed to create kthrotld\n");
1220 blkio_policy_register(&blkio_policy_throtl);
1224 module_init(throtl_init);