2 * Common Block IO controller cgroup interface
4 * Based on ideas and code from CFQ, CFS and BFQ:
5 * Copyright (C) 2003 Jens Axboe <axboe@kernel.dk>
7 * Copyright (C) 2008 Fabio Checconi <fabio@gandalf.sssup.it>
8 * Paolo Valente <paolo.valente@unimore.it>
10 * Copyright (C) 2009 Vivek Goyal <vgoyal@redhat.com>
11 * Nauman Rafique <nauman@google.com>
13 #include <linux/ioprio.h>
14 #include <linux/seq_file.h>
15 #include <linux/kdev_t.h>
16 #include <linux/module.h>
17 #include <linux/err.h>
18 #include <linux/blkdev.h>
19 #include <linux/slab.h>
20 #include "blk-cgroup.h"
21 #include <linux/genhd.h>
23 #define MAX_KEY_LEN 100
25 static DEFINE_SPINLOCK(blkio_list_lock);
26 static LIST_HEAD(blkio_list);
28 struct blkio_cgroup blkio_root_cgroup = { .weight = 2*BLKIO_WEIGHT_DEFAULT };
29 EXPORT_SYMBOL_GPL(blkio_root_cgroup);
31 static struct cgroup_subsys_state *blkiocg_create(struct cgroup_subsys *,
33 static int blkiocg_can_attach(struct cgroup_subsys *, struct cgroup *,
34 struct task_struct *, bool);
35 static void blkiocg_attach(struct cgroup_subsys *, struct cgroup *,
36 struct cgroup *, struct task_struct *, bool);
37 static void blkiocg_destroy(struct cgroup_subsys *, struct cgroup *);
38 static int blkiocg_populate(struct cgroup_subsys *, struct cgroup *);
40 /* for encoding cft->private value on file */
41 #define BLKIOFILE_PRIVATE(x, val) (((x) << 16) | (val))
42 /* What policy owns the file, proportional or throttle */
43 #define BLKIOFILE_POLICY(val) (((val) >> 16) & 0xffff)
44 #define BLKIOFILE_ATTR(val) ((val) & 0xffff)
46 struct cgroup_subsys blkio_subsys = {
48 .create = blkiocg_create,
49 .can_attach = blkiocg_can_attach,
50 .attach = blkiocg_attach,
51 .destroy = blkiocg_destroy,
52 .populate = blkiocg_populate,
53 #ifdef CONFIG_BLK_CGROUP
54 /* note: blkio_subsys_id is otherwise defined in blk-cgroup.h */
55 .subsys_id = blkio_subsys_id,
58 .module = THIS_MODULE,
60 EXPORT_SYMBOL_GPL(blkio_subsys);
62 static inline void blkio_policy_insert_node(struct blkio_cgroup *blkcg,
63 struct blkio_policy_node *pn)
65 list_add(&pn->node, &blkcg->policy_list);
68 static inline bool cftype_blkg_same_policy(struct cftype *cft,
69 struct blkio_group *blkg)
71 enum blkio_policy_id plid = BLKIOFILE_POLICY(cft->private);
73 if (blkg->plid == plid)
79 /* Determines if policy node matches cgroup file being accessed */
80 static inline bool pn_matches_cftype(struct cftype *cft,
81 struct blkio_policy_node *pn)
83 enum blkio_policy_id plid = BLKIOFILE_POLICY(cft->private);
84 int fileid = BLKIOFILE_ATTR(cft->private);
86 return (plid == pn->plid && fileid == pn->fileid);
89 /* Must be called with blkcg->lock held */
90 static inline void blkio_policy_delete_node(struct blkio_policy_node *pn)
95 /* Must be called with blkcg->lock held */
96 static struct blkio_policy_node *
97 blkio_policy_search_node(const struct blkio_cgroup *blkcg, dev_t dev,
98 enum blkio_policy_id plid, int fileid)
100 struct blkio_policy_node *pn;
102 list_for_each_entry(pn, &blkcg->policy_list, node) {
103 if (pn->dev == dev && pn->plid == plid && pn->fileid == fileid)
110 struct blkio_cgroup *cgroup_to_blkio_cgroup(struct cgroup *cgroup)
112 return container_of(cgroup_subsys_state(cgroup, blkio_subsys_id),
113 struct blkio_cgroup, css);
115 EXPORT_SYMBOL_GPL(cgroup_to_blkio_cgroup);
118 blkio_update_group_weight(struct blkio_group *blkg, unsigned int weight)
120 struct blkio_policy_type *blkiop;
122 list_for_each_entry(blkiop, &blkio_list, list) {
123 /* If this policy does not own the blkg, do not send updates */
124 if (blkiop->plid != blkg->plid)
126 if (blkiop->ops.blkio_update_group_weight_fn)
127 blkiop->ops.blkio_update_group_weight_fn(blkg, weight);
131 static inline void blkio_update_group_bps(struct blkio_group *blkg, u64 bps,
134 struct blkio_policy_type *blkiop;
136 list_for_each_entry(blkiop, &blkio_list, list) {
138 /* If this policy does not own the blkg, do not send updates */
139 if (blkiop->plid != blkg->plid)
142 if (fileid == BLKIO_THROTL_read_bps_device
143 && blkiop->ops.blkio_update_group_read_bps_fn)
144 blkiop->ops.blkio_update_group_read_bps_fn(blkg, bps);
146 if (fileid == BLKIO_THROTL_write_bps_device
147 && blkiop->ops.blkio_update_group_write_bps_fn)
148 blkiop->ops.blkio_update_group_write_bps_fn(blkg, bps);
153 * Add to the appropriate stat variable depending on the request type.
154 * This should be called with the blkg->stats_lock held.
156 static void blkio_add_stat(uint64_t *stat, uint64_t add, bool direction,
160 stat[BLKIO_STAT_WRITE] += add;
162 stat[BLKIO_STAT_READ] += add;
164 stat[BLKIO_STAT_SYNC] += add;
166 stat[BLKIO_STAT_ASYNC] += add;
170 * Decrements the appropriate stat variable if non-zero depending on the
171 * request type. Panics on value being zero.
172 * This should be called with the blkg->stats_lock held.
174 static void blkio_check_and_dec_stat(uint64_t *stat, bool direction, bool sync)
177 BUG_ON(stat[BLKIO_STAT_WRITE] == 0);
178 stat[BLKIO_STAT_WRITE]--;
180 BUG_ON(stat[BLKIO_STAT_READ] == 0);
181 stat[BLKIO_STAT_READ]--;
184 BUG_ON(stat[BLKIO_STAT_SYNC] == 0);
185 stat[BLKIO_STAT_SYNC]--;
187 BUG_ON(stat[BLKIO_STAT_ASYNC] == 0);
188 stat[BLKIO_STAT_ASYNC]--;
192 #ifdef CONFIG_DEBUG_BLK_CGROUP
193 /* This should be called with the blkg->stats_lock held. */
194 static void blkio_set_start_group_wait_time(struct blkio_group *blkg,
195 struct blkio_group *curr_blkg)
197 if (blkio_blkg_waiting(&blkg->stats))
199 if (blkg == curr_blkg)
201 blkg->stats.start_group_wait_time = sched_clock();
202 blkio_mark_blkg_waiting(&blkg->stats);
205 /* This should be called with the blkg->stats_lock held. */
206 static void blkio_update_group_wait_time(struct blkio_group_stats *stats)
208 unsigned long long now;
210 if (!blkio_blkg_waiting(stats))
214 if (time_after64(now, stats->start_group_wait_time))
215 stats->group_wait_time += now - stats->start_group_wait_time;
216 blkio_clear_blkg_waiting(stats);
219 /* This should be called with the blkg->stats_lock held. */
220 static void blkio_end_empty_time(struct blkio_group_stats *stats)
222 unsigned long long now;
224 if (!blkio_blkg_empty(stats))
228 if (time_after64(now, stats->start_empty_time))
229 stats->empty_time += now - stats->start_empty_time;
230 blkio_clear_blkg_empty(stats);
233 void blkiocg_update_set_idle_time_stats(struct blkio_group *blkg)
237 spin_lock_irqsave(&blkg->stats_lock, flags);
238 BUG_ON(blkio_blkg_idling(&blkg->stats));
239 blkg->stats.start_idle_time = sched_clock();
240 blkio_mark_blkg_idling(&blkg->stats);
241 spin_unlock_irqrestore(&blkg->stats_lock, flags);
243 EXPORT_SYMBOL_GPL(blkiocg_update_set_idle_time_stats);
245 void blkiocg_update_idle_time_stats(struct blkio_group *blkg)
248 unsigned long long now;
249 struct blkio_group_stats *stats;
251 spin_lock_irqsave(&blkg->stats_lock, flags);
252 stats = &blkg->stats;
253 if (blkio_blkg_idling(stats)) {
255 if (time_after64(now, stats->start_idle_time))
256 stats->idle_time += now - stats->start_idle_time;
257 blkio_clear_blkg_idling(stats);
259 spin_unlock_irqrestore(&blkg->stats_lock, flags);
261 EXPORT_SYMBOL_GPL(blkiocg_update_idle_time_stats);
263 void blkiocg_update_avg_queue_size_stats(struct blkio_group *blkg)
266 struct blkio_group_stats *stats;
268 spin_lock_irqsave(&blkg->stats_lock, flags);
269 stats = &blkg->stats;
270 stats->avg_queue_size_sum +=
271 stats->stat_arr[BLKIO_STAT_QUEUED][BLKIO_STAT_READ] +
272 stats->stat_arr[BLKIO_STAT_QUEUED][BLKIO_STAT_WRITE];
273 stats->avg_queue_size_samples++;
274 blkio_update_group_wait_time(stats);
275 spin_unlock_irqrestore(&blkg->stats_lock, flags);
277 EXPORT_SYMBOL_GPL(blkiocg_update_avg_queue_size_stats);
279 void blkiocg_set_start_empty_time(struct blkio_group *blkg)
282 struct blkio_group_stats *stats;
284 spin_lock_irqsave(&blkg->stats_lock, flags);
285 stats = &blkg->stats;
287 if (stats->stat_arr[BLKIO_STAT_QUEUED][BLKIO_STAT_READ] ||
288 stats->stat_arr[BLKIO_STAT_QUEUED][BLKIO_STAT_WRITE]) {
289 spin_unlock_irqrestore(&blkg->stats_lock, flags);
294 * group is already marked empty. This can happen if cfqq got new
295 * request in parent group and moved to this group while being added
296 * to service tree. Just ignore the event and move on.
298 if(blkio_blkg_empty(stats)) {
299 spin_unlock_irqrestore(&blkg->stats_lock, flags);
303 stats->start_empty_time = sched_clock();
304 blkio_mark_blkg_empty(stats);
305 spin_unlock_irqrestore(&blkg->stats_lock, flags);
307 EXPORT_SYMBOL_GPL(blkiocg_set_start_empty_time);
309 void blkiocg_update_dequeue_stats(struct blkio_group *blkg,
310 unsigned long dequeue)
312 blkg->stats.dequeue += dequeue;
314 EXPORT_SYMBOL_GPL(blkiocg_update_dequeue_stats);
316 static inline void blkio_set_start_group_wait_time(struct blkio_group *blkg,
317 struct blkio_group *curr_blkg) {}
318 static inline void blkio_end_empty_time(struct blkio_group_stats *stats) {}
321 void blkiocg_update_io_add_stats(struct blkio_group *blkg,
322 struct blkio_group *curr_blkg, bool direction,
327 spin_lock_irqsave(&blkg->stats_lock, flags);
328 blkio_add_stat(blkg->stats.stat_arr[BLKIO_STAT_QUEUED], 1, direction,
330 blkio_end_empty_time(&blkg->stats);
331 blkio_set_start_group_wait_time(blkg, curr_blkg);
332 spin_unlock_irqrestore(&blkg->stats_lock, flags);
334 EXPORT_SYMBOL_GPL(blkiocg_update_io_add_stats);
336 void blkiocg_update_io_remove_stats(struct blkio_group *blkg,
337 bool direction, bool sync)
341 spin_lock_irqsave(&blkg->stats_lock, flags);
342 blkio_check_and_dec_stat(blkg->stats.stat_arr[BLKIO_STAT_QUEUED],
344 spin_unlock_irqrestore(&blkg->stats_lock, flags);
346 EXPORT_SYMBOL_GPL(blkiocg_update_io_remove_stats);
348 void blkiocg_update_timeslice_used(struct blkio_group *blkg, unsigned long time)
352 spin_lock_irqsave(&blkg->stats_lock, flags);
353 blkg->stats.time += time;
354 spin_unlock_irqrestore(&blkg->stats_lock, flags);
356 EXPORT_SYMBOL_GPL(blkiocg_update_timeslice_used);
358 void blkiocg_update_dispatch_stats(struct blkio_group *blkg,
359 uint64_t bytes, bool direction, bool sync)
361 struct blkio_group_stats *stats;
364 spin_lock_irqsave(&blkg->stats_lock, flags);
365 stats = &blkg->stats;
366 stats->sectors += bytes >> 9;
367 blkio_add_stat(stats->stat_arr[BLKIO_STAT_SERVICED], 1, direction,
369 blkio_add_stat(stats->stat_arr[BLKIO_STAT_SERVICE_BYTES], bytes,
371 spin_unlock_irqrestore(&blkg->stats_lock, flags);
373 EXPORT_SYMBOL_GPL(blkiocg_update_dispatch_stats);
375 void blkiocg_update_completion_stats(struct blkio_group *blkg,
376 uint64_t start_time, uint64_t io_start_time, bool direction, bool sync)
378 struct blkio_group_stats *stats;
380 unsigned long long now = sched_clock();
382 spin_lock_irqsave(&blkg->stats_lock, flags);
383 stats = &blkg->stats;
384 if (time_after64(now, io_start_time))
385 blkio_add_stat(stats->stat_arr[BLKIO_STAT_SERVICE_TIME],
386 now - io_start_time, direction, sync);
387 if (time_after64(io_start_time, start_time))
388 blkio_add_stat(stats->stat_arr[BLKIO_STAT_WAIT_TIME],
389 io_start_time - start_time, direction, sync);
390 spin_unlock_irqrestore(&blkg->stats_lock, flags);
392 EXPORT_SYMBOL_GPL(blkiocg_update_completion_stats);
394 void blkiocg_update_io_merged_stats(struct blkio_group *blkg, bool direction,
399 spin_lock_irqsave(&blkg->stats_lock, flags);
400 blkio_add_stat(blkg->stats.stat_arr[BLKIO_STAT_MERGED], 1, direction,
402 spin_unlock_irqrestore(&blkg->stats_lock, flags);
404 EXPORT_SYMBOL_GPL(blkiocg_update_io_merged_stats);
406 void blkiocg_add_blkio_group(struct blkio_cgroup *blkcg,
407 struct blkio_group *blkg, void *key, dev_t dev,
408 enum blkio_policy_id plid)
412 spin_lock_irqsave(&blkcg->lock, flags);
413 spin_lock_init(&blkg->stats_lock);
414 rcu_assign_pointer(blkg->key, key);
415 blkg->blkcg_id = css_id(&blkcg->css);
416 hlist_add_head_rcu(&blkg->blkcg_node, &blkcg->blkg_list);
418 spin_unlock_irqrestore(&blkcg->lock, flags);
419 /* Need to take css reference ? */
420 cgroup_path(blkcg->css.cgroup, blkg->path, sizeof(blkg->path));
423 EXPORT_SYMBOL_GPL(blkiocg_add_blkio_group);
425 static void __blkiocg_del_blkio_group(struct blkio_group *blkg)
427 hlist_del_init_rcu(&blkg->blkcg_node);
432 * returns 0 if blkio_group was still on cgroup list. Otherwise returns 1
433 * indicating that blk_group was unhashed by the time we got to it.
435 int blkiocg_del_blkio_group(struct blkio_group *blkg)
437 struct blkio_cgroup *blkcg;
439 struct cgroup_subsys_state *css;
443 css = css_lookup(&blkio_subsys, blkg->blkcg_id);
445 blkcg = container_of(css, struct blkio_cgroup, css);
446 spin_lock_irqsave(&blkcg->lock, flags);
447 if (!hlist_unhashed(&blkg->blkcg_node)) {
448 __blkiocg_del_blkio_group(blkg);
451 spin_unlock_irqrestore(&blkcg->lock, flags);
457 EXPORT_SYMBOL_GPL(blkiocg_del_blkio_group);
459 /* called under rcu_read_lock(). */
460 struct blkio_group *blkiocg_lookup_group(struct blkio_cgroup *blkcg, void *key)
462 struct blkio_group *blkg;
463 struct hlist_node *n;
466 hlist_for_each_entry_rcu(blkg, n, &blkcg->blkg_list, blkcg_node) {
474 EXPORT_SYMBOL_GPL(blkiocg_lookup_group);
477 blkiocg_reset_stats(struct cgroup *cgroup, struct cftype *cftype, u64 val)
479 struct blkio_cgroup *blkcg;
480 struct blkio_group *blkg;
481 struct blkio_group_stats *stats;
482 struct hlist_node *n;
483 uint64_t queued[BLKIO_STAT_TOTAL];
485 #ifdef CONFIG_DEBUG_BLK_CGROUP
486 bool idling, waiting, empty;
487 unsigned long long now = sched_clock();
490 blkcg = cgroup_to_blkio_cgroup(cgroup);
491 spin_lock_irq(&blkcg->lock);
492 hlist_for_each_entry(blkg, n, &blkcg->blkg_list, blkcg_node) {
493 spin_lock(&blkg->stats_lock);
494 stats = &blkg->stats;
495 #ifdef CONFIG_DEBUG_BLK_CGROUP
496 idling = blkio_blkg_idling(stats);
497 waiting = blkio_blkg_waiting(stats);
498 empty = blkio_blkg_empty(stats);
500 for (i = 0; i < BLKIO_STAT_TOTAL; i++)
501 queued[i] = stats->stat_arr[BLKIO_STAT_QUEUED][i];
502 memset(stats, 0, sizeof(struct blkio_group_stats));
503 for (i = 0; i < BLKIO_STAT_TOTAL; i++)
504 stats->stat_arr[BLKIO_STAT_QUEUED][i] = queued[i];
505 #ifdef CONFIG_DEBUG_BLK_CGROUP
507 blkio_mark_blkg_idling(stats);
508 stats->start_idle_time = now;
511 blkio_mark_blkg_waiting(stats);
512 stats->start_group_wait_time = now;
515 blkio_mark_blkg_empty(stats);
516 stats->start_empty_time = now;
519 spin_unlock(&blkg->stats_lock);
521 spin_unlock_irq(&blkcg->lock);
525 static void blkio_get_key_name(enum stat_sub_type type, dev_t dev, char *str,
526 int chars_left, bool diskname_only)
528 snprintf(str, chars_left, "%d:%d", MAJOR(dev), MINOR(dev));
529 chars_left -= strlen(str);
530 if (chars_left <= 0) {
532 "Possibly incorrect cgroup stat display format");
538 case BLKIO_STAT_READ:
539 strlcat(str, " Read", chars_left);
541 case BLKIO_STAT_WRITE:
542 strlcat(str, " Write", chars_left);
544 case BLKIO_STAT_SYNC:
545 strlcat(str, " Sync", chars_left);
547 case BLKIO_STAT_ASYNC:
548 strlcat(str, " Async", chars_left);
550 case BLKIO_STAT_TOTAL:
551 strlcat(str, " Total", chars_left);
554 strlcat(str, " Invalid", chars_left);
558 static uint64_t blkio_fill_stat(char *str, int chars_left, uint64_t val,
559 struct cgroup_map_cb *cb, dev_t dev)
561 blkio_get_key_name(0, dev, str, chars_left, true);
562 cb->fill(cb, str, val);
566 /* This should be called with blkg->stats_lock held */
567 static uint64_t blkio_get_stat(struct blkio_group *blkg,
568 struct cgroup_map_cb *cb, dev_t dev, enum stat_type type)
571 char key_str[MAX_KEY_LEN];
572 enum stat_sub_type sub_type;
574 if (type == BLKIO_STAT_TIME)
575 return blkio_fill_stat(key_str, MAX_KEY_LEN - 1,
576 blkg->stats.time, cb, dev);
577 if (type == BLKIO_STAT_SECTORS)
578 return blkio_fill_stat(key_str, MAX_KEY_LEN - 1,
579 blkg->stats.sectors, cb, dev);
580 #ifdef CONFIG_DEBUG_BLK_CGROUP
581 if (type == BLKIO_STAT_AVG_QUEUE_SIZE) {
582 uint64_t sum = blkg->stats.avg_queue_size_sum;
583 uint64_t samples = blkg->stats.avg_queue_size_samples;
585 do_div(sum, samples);
588 return blkio_fill_stat(key_str, MAX_KEY_LEN - 1, sum, cb, dev);
590 if (type == BLKIO_STAT_GROUP_WAIT_TIME)
591 return blkio_fill_stat(key_str, MAX_KEY_LEN - 1,
592 blkg->stats.group_wait_time, cb, dev);
593 if (type == BLKIO_STAT_IDLE_TIME)
594 return blkio_fill_stat(key_str, MAX_KEY_LEN - 1,
595 blkg->stats.idle_time, cb, dev);
596 if (type == BLKIO_STAT_EMPTY_TIME)
597 return blkio_fill_stat(key_str, MAX_KEY_LEN - 1,
598 blkg->stats.empty_time, cb, dev);
599 if (type == BLKIO_STAT_DEQUEUE)
600 return blkio_fill_stat(key_str, MAX_KEY_LEN - 1,
601 blkg->stats.dequeue, cb, dev);
604 for (sub_type = BLKIO_STAT_READ; sub_type < BLKIO_STAT_TOTAL;
606 blkio_get_key_name(sub_type, dev, key_str, MAX_KEY_LEN, false);
607 cb->fill(cb, key_str, blkg->stats.stat_arr[type][sub_type]);
609 disk_total = blkg->stats.stat_arr[type][BLKIO_STAT_READ] +
610 blkg->stats.stat_arr[type][BLKIO_STAT_WRITE];
611 blkio_get_key_name(BLKIO_STAT_TOTAL, dev, key_str, MAX_KEY_LEN, false);
612 cb->fill(cb, key_str, disk_total);
616 static int blkio_check_dev_num(dev_t dev)
619 struct gendisk *disk;
621 disk = get_gendisk(dev, &part);
628 static int blkio_policy_parse_and_set(char *buf,
629 struct blkio_policy_node *newpn, enum blkio_policy_id plid, int fileid)
631 char *s[4], *p, *major_s = NULL, *minor_s = NULL;
633 unsigned long major, minor, temp;
638 memset(s, 0, sizeof(s));
640 while ((p = strsep(&buf, " ")) != NULL) {
646 /* Prevent from inputing too many things */
654 p = strsep(&s[0], ":");
664 ret = strict_strtoul(major_s, 10, &major);
668 ret = strict_strtoul(minor_s, 10, &minor);
672 dev = MKDEV(major, minor);
674 ret = blkio_check_dev_num(dev);
684 case BLKIO_POLICY_PROP:
685 ret = strict_strtoul(s[1], 10, &temp);
686 if (ret || (temp < BLKIO_WEIGHT_MIN && temp > 0) ||
687 temp > BLKIO_WEIGHT_MAX)
691 newpn->fileid = fileid;
692 newpn->val.weight = temp;
694 case BLKIO_POLICY_THROTL:
695 ret = strict_strtoull(s[1], 10, &bps);
700 newpn->fileid = fileid;
701 newpn->val.bps = bps;
710 unsigned int blkcg_get_weight(struct blkio_cgroup *blkcg,
713 struct blkio_policy_node *pn;
715 pn = blkio_policy_search_node(blkcg, dev, BLKIO_POLICY_PROP,
716 BLKIO_PROP_weight_device);
718 return pn->val.weight;
720 return blkcg->weight;
722 EXPORT_SYMBOL_GPL(blkcg_get_weight);
724 uint64_t blkcg_get_read_bps(struct blkio_cgroup *blkcg, dev_t dev)
726 struct blkio_policy_node *pn;
728 pn = blkio_policy_search_node(blkcg, dev, BLKIO_POLICY_THROTL,
729 BLKIO_THROTL_read_bps_device);
736 uint64_t blkcg_get_write_bps(struct blkio_cgroup *blkcg, dev_t dev)
738 struct blkio_policy_node *pn;
739 pn = blkio_policy_search_node(blkcg, dev, BLKIO_POLICY_THROTL,
740 BLKIO_THROTL_write_bps_device);
747 /* Checks whether user asked for deleting a policy rule */
748 static bool blkio_delete_rule_command(struct blkio_policy_node *pn)
751 case BLKIO_POLICY_PROP:
752 if (pn->val.weight == 0)
755 case BLKIO_POLICY_THROTL:
756 if (pn->val.bps == 0)
766 static void blkio_update_policy_rule(struct blkio_policy_node *oldpn,
767 struct blkio_policy_node *newpn)
769 switch(oldpn->plid) {
770 case BLKIO_POLICY_PROP:
771 oldpn->val.weight = newpn->val.weight;
773 case BLKIO_POLICY_THROTL:
774 oldpn->val.bps = newpn->val.bps;
782 * Some rules/values in blkg have changed. Propogate those to respective
785 static void blkio_update_blkg_policy(struct blkio_cgroup *blkcg,
786 struct blkio_group *blkg, struct blkio_policy_node *pn)
792 case BLKIO_POLICY_PROP:
793 weight = pn->val.weight ? pn->val.weight :
795 blkio_update_group_weight(blkg, weight);
797 case BLKIO_POLICY_THROTL:
799 case BLKIO_THROTL_read_bps_device:
800 case BLKIO_THROTL_write_bps_device:
801 bps = pn->val.bps ? pn->val.bps : (-1);
802 blkio_update_group_bps(blkg, bps, pn->fileid);
812 * A policy node rule has been updated. Propogate this update to all the
813 * block groups which might be affected by this update.
815 static void blkio_update_policy_node_blkg(struct blkio_cgroup *blkcg,
816 struct blkio_policy_node *pn)
818 struct blkio_group *blkg;
819 struct hlist_node *n;
821 spin_lock(&blkio_list_lock);
822 spin_lock_irq(&blkcg->lock);
824 hlist_for_each_entry(blkg, n, &blkcg->blkg_list, blkcg_node) {
825 if (pn->dev != blkg->dev || pn->plid != blkg->plid)
827 blkio_update_blkg_policy(blkcg, blkg, pn);
830 spin_unlock_irq(&blkcg->lock);
831 spin_unlock(&blkio_list_lock);
834 static int blkiocg_file_write(struct cgroup *cgrp, struct cftype *cft,
839 struct blkio_policy_node *newpn, *pn;
840 struct blkio_cgroup *blkcg;
842 enum blkio_policy_id plid = BLKIOFILE_POLICY(cft->private);
843 int fileid = BLKIOFILE_ATTR(cft->private);
845 buf = kstrdup(buffer, GFP_KERNEL);
849 newpn = kzalloc(sizeof(*newpn), GFP_KERNEL);
855 ret = blkio_policy_parse_and_set(buf, newpn, plid, fileid);
859 blkcg = cgroup_to_blkio_cgroup(cgrp);
861 spin_lock_irq(&blkcg->lock);
863 pn = blkio_policy_search_node(blkcg, newpn->dev, plid, fileid);
865 if (!blkio_delete_rule_command(newpn)) {
866 blkio_policy_insert_node(blkcg, newpn);
869 spin_unlock_irq(&blkcg->lock);
870 goto update_io_group;
873 if (blkio_delete_rule_command(newpn)) {
874 blkio_policy_delete_node(pn);
875 spin_unlock_irq(&blkcg->lock);
876 goto update_io_group;
878 spin_unlock_irq(&blkcg->lock);
880 blkio_update_policy_rule(pn, newpn);
883 blkio_update_policy_node_blkg(blkcg, newpn);
894 blkio_print_policy_node(struct seq_file *m, struct blkio_policy_node *pn)
897 case BLKIO_POLICY_PROP:
898 if (pn->fileid == BLKIO_PROP_weight_device)
899 seq_printf(m, "%u:%u\t%u\n", MAJOR(pn->dev),
900 MINOR(pn->dev), pn->val.weight);
902 case BLKIO_POLICY_THROTL:
903 if (pn->fileid == BLKIO_THROTL_read_bps_device)
904 seq_printf(m, "%u:%u\t%llu\n", MAJOR(pn->dev),
905 MINOR(pn->dev), pn->val.bps);
906 else if (pn->fileid == BLKIO_THROTL_write_bps_device)
907 seq_printf(m, "%u:%u\t%llu\n", MAJOR(pn->dev),
908 MINOR(pn->dev), pn->val.bps);
917 /* cgroup files which read their data from policy nodes end up here */
918 static void blkio_read_policy_node_files(struct cftype *cft,
919 struct blkio_cgroup *blkcg, struct seq_file *m)
921 struct blkio_policy_node *pn;
923 if (!list_empty(&blkcg->policy_list)) {
924 spin_lock_irq(&blkcg->lock);
925 list_for_each_entry(pn, &blkcg->policy_list, node) {
926 if (!pn_matches_cftype(cft, pn))
928 blkio_print_policy_node(m, pn);
930 spin_unlock_irq(&blkcg->lock);
934 static int blkiocg_file_read(struct cgroup *cgrp, struct cftype *cft,
937 struct blkio_cgroup *blkcg;
938 enum blkio_policy_id plid = BLKIOFILE_POLICY(cft->private);
939 int name = BLKIOFILE_ATTR(cft->private);
941 blkcg = cgroup_to_blkio_cgroup(cgrp);
944 case BLKIO_POLICY_PROP:
946 case BLKIO_PROP_weight_device:
947 blkio_read_policy_node_files(cft, blkcg, m);
953 case BLKIO_POLICY_THROTL:
955 case BLKIO_THROTL_read_bps_device:
956 case BLKIO_THROTL_write_bps_device:
957 blkio_read_policy_node_files(cft, blkcg, m);
970 static int blkio_read_blkg_stats(struct blkio_cgroup *blkcg,
971 struct cftype *cft, struct cgroup_map_cb *cb, enum stat_type type,
974 struct blkio_group *blkg;
975 struct hlist_node *n;
976 uint64_t cgroup_total = 0;
979 hlist_for_each_entry_rcu(blkg, n, &blkcg->blkg_list, blkcg_node) {
981 if (!cftype_blkg_same_policy(cft, blkg))
983 spin_lock_irq(&blkg->stats_lock);
984 cgroup_total += blkio_get_stat(blkg, cb, blkg->dev,
986 spin_unlock_irq(&blkg->stats_lock);
990 cb->fill(cb, "Total", cgroup_total);
995 /* All map kind of cgroup file get serviced by this function */
996 static int blkiocg_file_read_map(struct cgroup *cgrp, struct cftype *cft,
997 struct cgroup_map_cb *cb)
999 struct blkio_cgroup *blkcg;
1000 enum blkio_policy_id plid = BLKIOFILE_POLICY(cft->private);
1001 int name = BLKIOFILE_ATTR(cft->private);
1003 blkcg = cgroup_to_blkio_cgroup(cgrp);
1006 case BLKIO_POLICY_PROP:
1008 case BLKIO_PROP_time:
1009 return blkio_read_blkg_stats(blkcg, cft, cb,
1010 BLKIO_STAT_TIME, 0);
1011 case BLKIO_PROP_sectors:
1012 return blkio_read_blkg_stats(blkcg, cft, cb,
1013 BLKIO_STAT_SECTORS, 0);
1014 case BLKIO_PROP_io_service_bytes:
1015 return blkio_read_blkg_stats(blkcg, cft, cb,
1016 BLKIO_STAT_SERVICE_BYTES, 1);
1017 case BLKIO_PROP_io_serviced:
1018 return blkio_read_blkg_stats(blkcg, cft, cb,
1019 BLKIO_STAT_SERVICED, 1);
1020 case BLKIO_PROP_io_service_time:
1021 return blkio_read_blkg_stats(blkcg, cft, cb,
1022 BLKIO_STAT_SERVICE_TIME, 1);
1023 case BLKIO_PROP_io_wait_time:
1024 return blkio_read_blkg_stats(blkcg, cft, cb,
1025 BLKIO_STAT_WAIT_TIME, 1);
1026 case BLKIO_PROP_io_merged:
1027 return blkio_read_blkg_stats(blkcg, cft, cb,
1028 BLKIO_STAT_MERGED, 1);
1029 case BLKIO_PROP_io_queued:
1030 return blkio_read_blkg_stats(blkcg, cft, cb,
1031 BLKIO_STAT_QUEUED, 1);
1032 #ifdef CONFIG_DEBUG_BLK_CGROUP
1033 case BLKIO_PROP_dequeue:
1034 return blkio_read_blkg_stats(blkcg, cft, cb,
1035 BLKIO_STAT_DEQUEUE, 0);
1036 case BLKIO_PROP_avg_queue_size:
1037 return blkio_read_blkg_stats(blkcg, cft, cb,
1038 BLKIO_STAT_AVG_QUEUE_SIZE, 0);
1039 case BLKIO_PROP_group_wait_time:
1040 return blkio_read_blkg_stats(blkcg, cft, cb,
1041 BLKIO_STAT_GROUP_WAIT_TIME, 0);
1042 case BLKIO_PROP_idle_time:
1043 return blkio_read_blkg_stats(blkcg, cft, cb,
1044 BLKIO_STAT_IDLE_TIME, 0);
1045 case BLKIO_PROP_empty_time:
1046 return blkio_read_blkg_stats(blkcg, cft, cb,
1047 BLKIO_STAT_EMPTY_TIME, 0);
1053 case BLKIO_POLICY_THROTL:
1055 case BLKIO_THROTL_io_service_bytes:
1056 return blkio_read_blkg_stats(blkcg, cft, cb,
1057 BLKIO_STAT_SERVICE_BYTES, 1);
1058 case BLKIO_THROTL_io_serviced:
1059 return blkio_read_blkg_stats(blkcg, cft, cb,
1060 BLKIO_STAT_SERVICED, 1);
1072 static int blkio_weight_write(struct blkio_cgroup *blkcg, u64 val)
1074 struct blkio_group *blkg;
1075 struct hlist_node *n;
1076 struct blkio_policy_node *pn;
1078 if (val < BLKIO_WEIGHT_MIN || val > BLKIO_WEIGHT_MAX)
1081 spin_lock(&blkio_list_lock);
1082 spin_lock_irq(&blkcg->lock);
1083 blkcg->weight = (unsigned int)val;
1085 hlist_for_each_entry(blkg, n, &blkcg->blkg_list, blkcg_node) {
1086 pn = blkio_policy_search_node(blkcg, blkg->dev,
1087 BLKIO_POLICY_PROP, BLKIO_PROP_weight_device);
1091 blkio_update_group_weight(blkg, blkcg->weight);
1093 spin_unlock_irq(&blkcg->lock);
1094 spin_unlock(&blkio_list_lock);
1098 static u64 blkiocg_file_read_u64 (struct cgroup *cgrp, struct cftype *cft) {
1099 struct blkio_cgroup *blkcg;
1100 enum blkio_policy_id plid = BLKIOFILE_POLICY(cft->private);
1101 int name = BLKIOFILE_ATTR(cft->private);
1103 blkcg = cgroup_to_blkio_cgroup(cgrp);
1106 case BLKIO_POLICY_PROP:
1108 case BLKIO_PROP_weight:
1109 return (u64)blkcg->weight;
1119 blkiocg_file_write_u64(struct cgroup *cgrp, struct cftype *cft, u64 val)
1121 struct blkio_cgroup *blkcg;
1122 enum blkio_policy_id plid = BLKIOFILE_POLICY(cft->private);
1123 int name = BLKIOFILE_ATTR(cft->private);
1125 blkcg = cgroup_to_blkio_cgroup(cgrp);
1128 case BLKIO_POLICY_PROP:
1130 case BLKIO_PROP_weight:
1131 return blkio_weight_write(blkcg, val);
1141 struct cftype blkio_files[] = {
1143 .name = "weight_device",
1144 .private = BLKIOFILE_PRIVATE(BLKIO_POLICY_PROP,
1145 BLKIO_PROP_weight_device),
1146 .read_seq_string = blkiocg_file_read,
1147 .write_string = blkiocg_file_write,
1148 .max_write_len = 256,
1152 .private = BLKIOFILE_PRIVATE(BLKIO_POLICY_PROP,
1154 .read_u64 = blkiocg_file_read_u64,
1155 .write_u64 = blkiocg_file_write_u64,
1158 .name = "throttle.read_bps_device",
1159 .private = BLKIOFILE_PRIVATE(BLKIO_POLICY_THROTL,
1160 BLKIO_THROTL_read_bps_device),
1161 .read_seq_string = blkiocg_file_read,
1162 .write_string = blkiocg_file_write,
1163 .max_write_len = 256,
1167 .name = "throttle.write_bps_device",
1168 .private = BLKIOFILE_PRIVATE(BLKIO_POLICY_THROTL,
1169 BLKIO_THROTL_write_bps_device),
1170 .read_seq_string = blkiocg_file_read,
1171 .write_string = blkiocg_file_write,
1172 .max_write_len = 256,
1176 .private = BLKIOFILE_PRIVATE(BLKIO_POLICY_PROP,
1178 .read_map = blkiocg_file_read_map,
1182 .private = BLKIOFILE_PRIVATE(BLKIO_POLICY_PROP,
1183 BLKIO_PROP_sectors),
1184 .read_map = blkiocg_file_read_map,
1187 .name = "io_service_bytes",
1188 .private = BLKIOFILE_PRIVATE(BLKIO_POLICY_PROP,
1189 BLKIO_PROP_io_service_bytes),
1190 .read_map = blkiocg_file_read_map,
1193 .name = "throttle.io_service_bytes",
1194 .private = BLKIOFILE_PRIVATE(BLKIO_POLICY_THROTL,
1195 BLKIO_THROTL_io_service_bytes),
1196 .read_map = blkiocg_file_read_map,
1199 .name = "io_serviced",
1200 .private = BLKIOFILE_PRIVATE(BLKIO_POLICY_PROP,
1201 BLKIO_PROP_io_serviced),
1202 .read_map = blkiocg_file_read_map,
1205 .name = "throttle.io_serviced",
1206 .private = BLKIOFILE_PRIVATE(BLKIO_POLICY_THROTL,
1207 BLKIO_THROTL_io_serviced),
1208 .read_map = blkiocg_file_read_map,
1211 .name = "io_service_time",
1212 .private = BLKIOFILE_PRIVATE(BLKIO_POLICY_PROP,
1213 BLKIO_PROP_io_service_time),
1214 .read_map = blkiocg_file_read_map,
1217 .name = "io_wait_time",
1218 .private = BLKIOFILE_PRIVATE(BLKIO_POLICY_PROP,
1219 BLKIO_PROP_io_wait_time),
1220 .read_map = blkiocg_file_read_map,
1223 .name = "io_merged",
1224 .private = BLKIOFILE_PRIVATE(BLKIO_POLICY_PROP,
1225 BLKIO_PROP_io_merged),
1226 .read_map = blkiocg_file_read_map,
1229 .name = "io_queued",
1230 .private = BLKIOFILE_PRIVATE(BLKIO_POLICY_PROP,
1231 BLKIO_PROP_io_queued),
1232 .read_map = blkiocg_file_read_map,
1235 .name = "reset_stats",
1236 .write_u64 = blkiocg_reset_stats,
1238 #ifdef CONFIG_DEBUG_BLK_CGROUP
1240 .name = "avg_queue_size",
1241 .private = BLKIOFILE_PRIVATE(BLKIO_POLICY_PROP,
1242 BLKIO_PROP_avg_queue_size),
1243 .read_map = blkiocg_file_read_map,
1246 .name = "group_wait_time",
1247 .private = BLKIOFILE_PRIVATE(BLKIO_POLICY_PROP,
1248 BLKIO_PROP_group_wait_time),
1249 .read_map = blkiocg_file_read_map,
1252 .name = "idle_time",
1253 .private = BLKIOFILE_PRIVATE(BLKIO_POLICY_PROP,
1254 BLKIO_PROP_idle_time),
1255 .read_map = blkiocg_file_read_map,
1258 .name = "empty_time",
1259 .private = BLKIOFILE_PRIVATE(BLKIO_POLICY_PROP,
1260 BLKIO_PROP_empty_time),
1261 .read_map = blkiocg_file_read_map,
1265 .private = BLKIOFILE_PRIVATE(BLKIO_POLICY_PROP,
1266 BLKIO_PROP_dequeue),
1267 .read_map = blkiocg_file_read_map,
1272 static int blkiocg_populate(struct cgroup_subsys *subsys, struct cgroup *cgroup)
1274 return cgroup_add_files(cgroup, subsys, blkio_files,
1275 ARRAY_SIZE(blkio_files));
1278 static void blkiocg_destroy(struct cgroup_subsys *subsys, struct cgroup *cgroup)
1280 struct blkio_cgroup *blkcg = cgroup_to_blkio_cgroup(cgroup);
1281 unsigned long flags;
1282 struct blkio_group *blkg;
1284 struct blkio_policy_type *blkiop;
1285 struct blkio_policy_node *pn, *pntmp;
1289 spin_lock_irqsave(&blkcg->lock, flags);
1291 if (hlist_empty(&blkcg->blkg_list)) {
1292 spin_unlock_irqrestore(&blkcg->lock, flags);
1296 blkg = hlist_entry(blkcg->blkg_list.first, struct blkio_group,
1298 key = rcu_dereference(blkg->key);
1299 __blkiocg_del_blkio_group(blkg);
1301 spin_unlock_irqrestore(&blkcg->lock, flags);
1304 * This blkio_group is being unlinked as associated cgroup is
1305 * going away. Let all the IO controlling policies know about
1306 * this event. Currently this is static call to one io
1307 * controlling policy. Once we have more policies in place, we
1308 * need some dynamic registration of callback function.
1310 spin_lock(&blkio_list_lock);
1311 list_for_each_entry(blkiop, &blkio_list, list)
1312 blkiop->ops.blkio_unlink_group_fn(key, blkg);
1313 spin_unlock(&blkio_list_lock);
1316 list_for_each_entry_safe(pn, pntmp, &blkcg->policy_list, node) {
1317 blkio_policy_delete_node(pn);
1321 free_css_id(&blkio_subsys, &blkcg->css);
1323 if (blkcg != &blkio_root_cgroup)
1327 static struct cgroup_subsys_state *
1328 blkiocg_create(struct cgroup_subsys *subsys, struct cgroup *cgroup)
1330 struct blkio_cgroup *blkcg;
1331 struct cgroup *parent = cgroup->parent;
1334 blkcg = &blkio_root_cgroup;
1338 /* Currently we do not support hierarchy deeper than two level (0,1) */
1339 if (parent != cgroup->top_cgroup)
1340 return ERR_PTR(-EINVAL);
1342 blkcg = kzalloc(sizeof(*blkcg), GFP_KERNEL);
1344 return ERR_PTR(-ENOMEM);
1346 blkcg->weight = BLKIO_WEIGHT_DEFAULT;
1348 spin_lock_init(&blkcg->lock);
1349 INIT_HLIST_HEAD(&blkcg->blkg_list);
1351 INIT_LIST_HEAD(&blkcg->policy_list);
1356 * We cannot support shared io contexts, as we have no mean to support
1357 * two tasks with the same ioc in two different groups without major rework
1358 * of the main cic data structures. For now we allow a task to change
1359 * its cgroup only if it's the only owner of its ioc.
1361 static int blkiocg_can_attach(struct cgroup_subsys *subsys,
1362 struct cgroup *cgroup, struct task_struct *tsk,
1365 struct io_context *ioc;
1368 /* task_lock() is needed to avoid races with exit_io_context() */
1370 ioc = tsk->io_context;
1371 if (ioc && atomic_read(&ioc->nr_tasks) > 1)
1378 static void blkiocg_attach(struct cgroup_subsys *subsys, struct cgroup *cgroup,
1379 struct cgroup *prev, struct task_struct *tsk,
1382 struct io_context *ioc;
1385 ioc = tsk->io_context;
1387 ioc->cgroup_changed = 1;
1391 void blkio_policy_register(struct blkio_policy_type *blkiop)
1393 spin_lock(&blkio_list_lock);
1394 list_add_tail(&blkiop->list, &blkio_list);
1395 spin_unlock(&blkio_list_lock);
1397 EXPORT_SYMBOL_GPL(blkio_policy_register);
1399 void blkio_policy_unregister(struct blkio_policy_type *blkiop)
1401 spin_lock(&blkio_list_lock);
1402 list_del_init(&blkiop->list);
1403 spin_unlock(&blkio_list_lock);
1405 EXPORT_SYMBOL_GPL(blkio_policy_unregister);
1407 static int __init init_cgroup_blkio(void)
1409 return cgroup_load_subsys(&blkio_subsys);
1412 static void __exit exit_cgroup_blkio(void)
1414 cgroup_unload_subsys(&blkio_subsys);
1417 module_init(init_cgroup_blkio);
1418 module_exit(exit_cgroup_blkio);
1419 MODULE_LICENSE("GPL");