]> git.karo-electronics.de Git - linux-beck.git/blobdiff - block/blk-cgroup.c
blkio: Changes to IO controller additional stats patches
[linux-beck.git] / block / blk-cgroup.c
index 4b686ad08eaaad5384f7d0c6535b5b4154679c5f..6797df50882172a5f73755b9af06d2af447592c5 100644 (file)
 #include <linux/kdev_t.h>
 #include <linux/module.h>
 #include <linux/err.h>
+#include <linux/blkdev.h>
 #include "blk-cgroup.h"
 
+#define MAX_KEY_LEN 100
+
 static DEFINE_SPINLOCK(blkio_list_lock);
 static LIST_HEAD(blkio_list);
 
@@ -55,13 +58,74 @@ struct blkio_cgroup *cgroup_to_blkio_cgroup(struct cgroup *cgroup)
 }
 EXPORT_SYMBOL_GPL(cgroup_to_blkio_cgroup);
 
-void blkiocg_update_blkio_group_stats(struct blkio_group *blkg,
-                       unsigned long time, unsigned long sectors)
+void blkio_group_init(struct blkio_group *blkg)
+{
+       spin_lock_init(&blkg->stats_lock);
+}
+EXPORT_SYMBOL_GPL(blkio_group_init);
+
+/*
+ * Add to the appropriate stat variable depending on the request type.
+ * This should be called with the blkg->stats_lock held.
+ */
+static void blkio_add_stat(uint64_t *stat, uint64_t add, bool direction,
+                               bool sync)
+{
+       if (direction)
+               stat[BLKIO_STAT_WRITE] += add;
+       else
+               stat[BLKIO_STAT_READ] += add;
+       if (sync)
+               stat[BLKIO_STAT_SYNC] += add;
+       else
+               stat[BLKIO_STAT_ASYNC] += add;
+}
+
+void blkiocg_update_timeslice_used(struct blkio_group *blkg, unsigned long time)
+{
+       unsigned long flags;
+
+       spin_lock_irqsave(&blkg->stats_lock, flags);
+       blkg->stats.time += time;
+       spin_unlock_irqrestore(&blkg->stats_lock, flags);
+}
+EXPORT_SYMBOL_GPL(blkiocg_update_timeslice_used);
+
+void blkiocg_update_dispatch_stats(struct blkio_group *blkg,
+                               uint64_t bytes, bool direction, bool sync)
+{
+       struct blkio_group_stats *stats;
+       unsigned long flags;
+
+       spin_lock_irqsave(&blkg->stats_lock, flags);
+       stats = &blkg->stats;
+       stats->sectors += bytes >> 9;
+       blkio_add_stat(stats->stat_arr[BLKIO_STAT_SERVICED], 1, direction,
+                       sync);
+       blkio_add_stat(stats->stat_arr[BLKIO_STAT_SERVICE_BYTES], bytes,
+                       direction, sync);
+       spin_unlock_irqrestore(&blkg->stats_lock, flags);
+}
+EXPORT_SYMBOL_GPL(blkiocg_update_dispatch_stats);
+
+void blkiocg_update_completion_stats(struct blkio_group *blkg,
+       uint64_t start_time, uint64_t io_start_time, bool direction, bool sync)
 {
-       blkg->time += time;
-       blkg->sectors += sectors;
+       struct blkio_group_stats *stats;
+       unsigned long flags;
+       unsigned long long now = sched_clock();
+
+       spin_lock_irqsave(&blkg->stats_lock, flags);
+       stats = &blkg->stats;
+       if (time_after64(now, io_start_time))
+               blkio_add_stat(stats->stat_arr[BLKIO_STAT_SERVICE_TIME],
+                               now - io_start_time, direction, sync);
+       if (time_after64(io_start_time, start_time))
+               blkio_add_stat(stats->stat_arr[BLKIO_STAT_WAIT_TIME],
+                               io_start_time - start_time, direction, sync);
+       spin_unlock_irqrestore(&blkg->stats_lock, flags);
 }
-EXPORT_SYMBOL_GPL(blkiocg_update_blkio_group_stats);
+EXPORT_SYMBOL_GPL(blkiocg_update_completion_stats);
 
 void blkiocg_add_blkio_group(struct blkio_cgroup *blkcg,
                        struct blkio_group *blkg, void *key, dev_t dev)
@@ -171,13 +235,107 @@ blkiocg_weight_write(struct cgroup *cgroup, struct cftype *cftype, u64 val)
        return 0;
 }
 
-#define SHOW_FUNCTION_PER_GROUP(__VAR)                                 \
+static int
+blkiocg_reset_stats(struct cgroup *cgroup, struct cftype *cftype, u64 val)
+{
+       struct blkio_cgroup *blkcg;
+       struct blkio_group *blkg;
+       struct hlist_node *n;
+       struct blkio_group_stats *stats;
+
+       blkcg = cgroup_to_blkio_cgroup(cgroup);
+       spin_lock_irq(&blkcg->lock);
+       hlist_for_each_entry(blkg, n, &blkcg->blkg_list, blkcg_node) {
+               spin_lock(&blkg->stats_lock);
+               stats = &blkg->stats;
+               memset(stats, 0, sizeof(struct blkio_group_stats));
+               spin_unlock(&blkg->stats_lock);
+       }
+       spin_unlock_irq(&blkcg->lock);
+       return 0;
+}
+
+static void blkio_get_key_name(enum stat_sub_type type, dev_t dev, char *str,
+                               int chars_left, bool diskname_only)
+{
+       snprintf(str, chars_left, "%d:%d", MAJOR(dev), MINOR(dev));
+       chars_left -= strlen(str);
+       if (chars_left <= 0) {
+               printk(KERN_WARNING
+                       "Possibly incorrect cgroup stat display format");
+               return;
+       }
+       if (diskname_only)
+               return;
+       switch (type) {
+       case BLKIO_STAT_READ:
+               strlcat(str, " Read", chars_left);
+               break;
+       case BLKIO_STAT_WRITE:
+               strlcat(str, " Write", chars_left);
+               break;
+       case BLKIO_STAT_SYNC:
+               strlcat(str, " Sync", chars_left);
+               break;
+       case BLKIO_STAT_ASYNC:
+               strlcat(str, " Async", chars_left);
+               break;
+       case BLKIO_STAT_TOTAL:
+               strlcat(str, " Total", chars_left);
+               break;
+       default:
+               strlcat(str, " Invalid", chars_left);
+       }
+}
+
+static uint64_t blkio_fill_stat(char *str, int chars_left, uint64_t val,
+                               struct cgroup_map_cb *cb, dev_t dev)
+{
+       blkio_get_key_name(0, dev, str, chars_left, true);
+       cb->fill(cb, str, val);
+       return val;
+}
+
+/* This should be called with blkg->stats_lock held */
+static uint64_t blkio_get_stat(struct blkio_group *blkg,
+               struct cgroup_map_cb *cb, dev_t dev, enum stat_type type)
+{
+       uint64_t disk_total;
+       char key_str[MAX_KEY_LEN];
+       enum stat_sub_type sub_type;
+
+       if (type == BLKIO_STAT_TIME)
+               return blkio_fill_stat(key_str, MAX_KEY_LEN - 1,
+                                       blkg->stats.time, cb, dev);
+       if (type == BLKIO_STAT_SECTORS)
+               return blkio_fill_stat(key_str, MAX_KEY_LEN - 1,
+                                       blkg->stats.sectors, cb, dev);
+#ifdef CONFIG_DEBUG_BLK_CGROUP
+       if (type == BLKIO_STAT_DEQUEUE)
+               return blkio_fill_stat(key_str, MAX_KEY_LEN - 1,
+                                       blkg->stats.dequeue, cb, dev);
+#endif
+
+       for (sub_type = BLKIO_STAT_READ; sub_type < BLKIO_STAT_TOTAL;
+                       sub_type++) {
+               blkio_get_key_name(sub_type, dev, key_str, MAX_KEY_LEN, false);
+               cb->fill(cb, key_str, blkg->stats.stat_arr[type][sub_type]);
+       }
+       disk_total = blkg->stats.stat_arr[type][BLKIO_STAT_READ] +
+                       blkg->stats.stat_arr[type][BLKIO_STAT_WRITE];
+       blkio_get_key_name(BLKIO_STAT_TOTAL, dev, key_str, MAX_KEY_LEN, false);
+       cb->fill(cb, key_str, disk_total);
+       return disk_total;
+}
+
+#define SHOW_FUNCTION_PER_GROUP(__VAR, type, show_total)               \
 static int blkiocg_##__VAR##_read(struct cgroup *cgroup,               \
-                       struct cftype *cftype, struct seq_file *m)      \
+               struct cftype *cftype, struct cgroup_map_cb *cb)        \
 {                                                                      \
        struct blkio_cgroup *blkcg;                                     \
        struct blkio_group *blkg;                                       \
        struct hlist_node *n;                                           \
+       uint64_t cgroup_total = 0;                                      \
                                                                        \
        if (!cgroup_lock_live_group(cgroup))                            \
                return -ENODEV;                                         \
@@ -185,29 +343,38 @@ static int blkiocg_##__VAR##_read(struct cgroup *cgroup,          \
        blkcg = cgroup_to_blkio_cgroup(cgroup);                         \
        rcu_read_lock();                                                \
        hlist_for_each_entry_rcu(blkg, n, &blkcg->blkg_list, blkcg_node) {\
-               if (blkg->dev)                                          \
-                       seq_printf(m, "%u:%u %lu\n", MAJOR(blkg->dev),  \
-                                MINOR(blkg->dev), blkg->__VAR);        \
+               if (blkg->dev) {                                        \
+                       spin_lock_irq(&blkg->stats_lock);               \
+                       cgroup_total += blkio_get_stat(blkg, cb,        \
+                                               blkg->dev, type);       \
+                       spin_unlock_irq(&blkg->stats_lock);             \
+               }                                                       \
        }                                                               \
+       if (show_total)                                                 \
+               cb->fill(cb, "Total", cgroup_total);                    \
        rcu_read_unlock();                                              \
        cgroup_unlock();                                                \
        return 0;                                                       \
 }
 
-SHOW_FUNCTION_PER_GROUP(time);
-SHOW_FUNCTION_PER_GROUP(sectors);
+SHOW_FUNCTION_PER_GROUP(time, BLKIO_STAT_TIME, 0);
+SHOW_FUNCTION_PER_GROUP(sectors, BLKIO_STAT_SECTORS, 0);
+SHOW_FUNCTION_PER_GROUP(io_service_bytes, BLKIO_STAT_SERVICE_BYTES, 1);
+SHOW_FUNCTION_PER_GROUP(io_serviced, BLKIO_STAT_SERVICED, 1);
+SHOW_FUNCTION_PER_GROUP(io_service_time, BLKIO_STAT_SERVICE_TIME, 1);
+SHOW_FUNCTION_PER_GROUP(io_wait_time, BLKIO_STAT_WAIT_TIME, 1);
 #ifdef CONFIG_DEBUG_BLK_CGROUP
-SHOW_FUNCTION_PER_GROUP(dequeue);
+SHOW_FUNCTION_PER_GROUP(dequeue, BLKIO_STAT_DEQUEUE, 0);
 #endif
 #undef SHOW_FUNCTION_PER_GROUP
 
 #ifdef CONFIG_DEBUG_BLK_CGROUP
-void blkiocg_update_blkio_group_dequeue_stats(struct blkio_group *blkg,
+void blkiocg_update_dequeue_stats(struct blkio_group *blkg,
                        unsigned long dequeue)
 {
-       blkg->dequeue += dequeue;
+       blkg->stats.dequeue += dequeue;
 }
-EXPORT_SYMBOL_GPL(blkiocg_update_blkio_group_dequeue_stats);
+EXPORT_SYMBOL_GPL(blkiocg_update_dequeue_stats);
 #endif
 
 struct cftype blkio_files[] = {
@@ -218,16 +385,36 @@ struct cftype blkio_files[] = {
        },
        {
                .name = "time",
-               .read_seq_string = blkiocg_time_read,
+               .read_map = blkiocg_time_read,
        },
        {
                .name = "sectors",
-               .read_seq_string = blkiocg_sectors_read,
+               .read_map = blkiocg_sectors_read,
+       },
+       {
+               .name = "io_service_bytes",
+               .read_map = blkiocg_io_service_bytes_read,
+       },
+       {
+               .name = "io_serviced",
+               .read_map = blkiocg_io_serviced_read,
+       },
+       {
+               .name = "io_service_time",
+               .read_map = blkiocg_io_service_time_read,
+       },
+       {
+               .name = "io_wait_time",
+               .read_map = blkiocg_io_wait_time_read,
+       },
+       {
+               .name = "reset_stats",
+               .write_u64 = blkiocg_reset_stats,
        },
 #ifdef CONFIG_DEBUG_BLK_CGROUP
        {
                .name = "dequeue",
-               .read_seq_string = blkiocg_dequeue_read,
+               .read_map = blkiocg_dequeue_read,
        },
 #endif
 };