]> git.karo-electronics.de Git - karo-tx-linux.git/blob - block/blk-stat.c
Merge git://git.kernel.org/pub/scm/linux/kernel/git/pablo/nf
[karo-tx-linux.git] / block / blk-stat.c
1 /*
2  * Block stat tracking code
3  *
4  * Copyright (C) 2016 Jens Axboe
5  */
6 #include <linux/kernel.h>
7 #include <linux/rculist.h>
8 #include <linux/blk-mq.h>
9
10 #include "blk-stat.h"
11 #include "blk-mq.h"
12 #include "blk.h"
13
14 #define BLK_RQ_STAT_BATCH       64
15
16 struct blk_queue_stats {
17         struct list_head callbacks;
18         spinlock_t lock;
19         bool enable_accounting;
20 };
21
22 static void blk_stat_init(struct blk_rq_stat *stat)
23 {
24         stat->min = -1ULL;
25         stat->max = stat->nr_samples = stat->mean = 0;
26         stat->batch = stat->nr_batch = 0;
27 }
28
29 static void blk_stat_flush_batch(struct blk_rq_stat *stat)
30 {
31         const s32 nr_batch = READ_ONCE(stat->nr_batch);
32         const s32 nr_samples = READ_ONCE(stat->nr_samples);
33
34         if (!nr_batch)
35                 return;
36         if (!nr_samples)
37                 stat->mean = div64_s64(stat->batch, nr_batch);
38         else {
39                 stat->mean = div64_s64((stat->mean * nr_samples) +
40                                         stat->batch,
41                                         nr_batch + nr_samples);
42         }
43
44         stat->nr_samples += nr_batch;
45         stat->nr_batch = stat->batch = 0;
46 }
47
48 static void blk_stat_sum(struct blk_rq_stat *dst, struct blk_rq_stat *src)
49 {
50         blk_stat_flush_batch(src);
51
52         if (!src->nr_samples)
53                 return;
54
55         dst->min = min(dst->min, src->min);
56         dst->max = max(dst->max, src->max);
57
58         if (!dst->nr_samples)
59                 dst->mean = src->mean;
60         else {
61                 dst->mean = div64_s64((src->mean * src->nr_samples) +
62                                         (dst->mean * dst->nr_samples),
63                                         dst->nr_samples + src->nr_samples);
64         }
65         dst->nr_samples += src->nr_samples;
66 }
67
68 static void __blk_stat_add(struct blk_rq_stat *stat, u64 value)
69 {
70         stat->min = min(stat->min, value);
71         stat->max = max(stat->max, value);
72
73         if (stat->batch + value < stat->batch ||
74             stat->nr_batch + 1 == BLK_RQ_STAT_BATCH)
75                 blk_stat_flush_batch(stat);
76
77         stat->batch += value;
78         stat->nr_batch++;
79 }
80
81 void blk_stat_add(struct request *rq)
82 {
83         struct request_queue *q = rq->q;
84         struct blk_stat_callback *cb;
85         struct blk_rq_stat *stat;
86         int bucket;
87         s64 now, value;
88
89         now = __blk_stat_time(ktime_to_ns(ktime_get()));
90         if (now < blk_stat_time(&rq->issue_stat))
91                 return;
92
93         value = now - blk_stat_time(&rq->issue_stat);
94
95         blk_throtl_stat_add(rq, value);
96
97         rcu_read_lock();
98         list_for_each_entry_rcu(cb, &q->stats->callbacks, list) {
99                 if (!blk_stat_is_active(cb))
100                         continue;
101
102                 bucket = cb->bucket_fn(rq);
103                 if (bucket < 0)
104                         continue;
105
106                 stat = &get_cpu_ptr(cb->cpu_stat)[bucket];
107                 __blk_stat_add(stat, value);
108                 put_cpu_ptr(cb->cpu_stat);
109         }
110         rcu_read_unlock();
111 }
112
113 static void blk_stat_timer_fn(unsigned long data)
114 {
115         struct blk_stat_callback *cb = (void *)data;
116         unsigned int bucket;
117         int cpu;
118
119         for (bucket = 0; bucket < cb->buckets; bucket++)
120                 blk_stat_init(&cb->stat[bucket]);
121
122         for_each_online_cpu(cpu) {
123                 struct blk_rq_stat *cpu_stat;
124
125                 cpu_stat = per_cpu_ptr(cb->cpu_stat, cpu);
126                 for (bucket = 0; bucket < cb->buckets; bucket++) {
127                         blk_stat_sum(&cb->stat[bucket], &cpu_stat[bucket]);
128                         blk_stat_init(&cpu_stat[bucket]);
129                 }
130         }
131
132         cb->timer_fn(cb);
133 }
134
135 struct blk_stat_callback *
136 blk_stat_alloc_callback(void (*timer_fn)(struct blk_stat_callback *),
137                         int (*bucket_fn)(const struct request *),
138                         unsigned int buckets, void *data)
139 {
140         struct blk_stat_callback *cb;
141
142         cb = kmalloc(sizeof(*cb), GFP_KERNEL);
143         if (!cb)
144                 return NULL;
145
146         cb->stat = kmalloc_array(buckets, sizeof(struct blk_rq_stat),
147                                  GFP_KERNEL);
148         if (!cb->stat) {
149                 kfree(cb);
150                 return NULL;
151         }
152         cb->cpu_stat = __alloc_percpu(buckets * sizeof(struct blk_rq_stat),
153                                       __alignof__(struct blk_rq_stat));
154         if (!cb->cpu_stat) {
155                 kfree(cb->stat);
156                 kfree(cb);
157                 return NULL;
158         }
159
160         cb->timer_fn = timer_fn;
161         cb->bucket_fn = bucket_fn;
162         cb->data = data;
163         cb->buckets = buckets;
164         setup_timer(&cb->timer, blk_stat_timer_fn, (unsigned long)cb);
165
166         return cb;
167 }
168 EXPORT_SYMBOL_GPL(blk_stat_alloc_callback);
169
170 void blk_stat_add_callback(struct request_queue *q,
171                            struct blk_stat_callback *cb)
172 {
173         unsigned int bucket;
174         int cpu;
175
176         for_each_possible_cpu(cpu) {
177                 struct blk_rq_stat *cpu_stat;
178
179                 cpu_stat = per_cpu_ptr(cb->cpu_stat, cpu);
180                 for (bucket = 0; bucket < cb->buckets; bucket++)
181                         blk_stat_init(&cpu_stat[bucket]);
182         }
183
184         spin_lock(&q->stats->lock);
185         list_add_tail_rcu(&cb->list, &q->stats->callbacks);
186         set_bit(QUEUE_FLAG_STATS, &q->queue_flags);
187         spin_unlock(&q->stats->lock);
188 }
189 EXPORT_SYMBOL_GPL(blk_stat_add_callback);
190
191 void blk_stat_remove_callback(struct request_queue *q,
192                               struct blk_stat_callback *cb)
193 {
194         spin_lock(&q->stats->lock);
195         list_del_rcu(&cb->list);
196         if (list_empty(&q->stats->callbacks) && !q->stats->enable_accounting)
197                 clear_bit(QUEUE_FLAG_STATS, &q->queue_flags);
198         spin_unlock(&q->stats->lock);
199
200         del_timer_sync(&cb->timer);
201 }
202 EXPORT_SYMBOL_GPL(blk_stat_remove_callback);
203
204 static void blk_stat_free_callback_rcu(struct rcu_head *head)
205 {
206         struct blk_stat_callback *cb;
207
208         cb = container_of(head, struct blk_stat_callback, rcu);
209         free_percpu(cb->cpu_stat);
210         kfree(cb->stat);
211         kfree(cb);
212 }
213
214 void blk_stat_free_callback(struct blk_stat_callback *cb)
215 {
216         if (cb)
217                 call_rcu(&cb->rcu, blk_stat_free_callback_rcu);
218 }
219 EXPORT_SYMBOL_GPL(blk_stat_free_callback);
220
221 void blk_stat_enable_accounting(struct request_queue *q)
222 {
223         spin_lock(&q->stats->lock);
224         q->stats->enable_accounting = true;
225         set_bit(QUEUE_FLAG_STATS, &q->queue_flags);
226         spin_unlock(&q->stats->lock);
227 }
228
229 struct blk_queue_stats *blk_alloc_queue_stats(void)
230 {
231         struct blk_queue_stats *stats;
232
233         stats = kmalloc(sizeof(*stats), GFP_KERNEL);
234         if (!stats)
235                 return NULL;
236
237         INIT_LIST_HEAD(&stats->callbacks);
238         spin_lock_init(&stats->lock);
239         stats->enable_accounting = false;
240
241         return stats;
242 }
243
244 void blk_free_queue_stats(struct blk_queue_stats *stats)
245 {
246         if (!stats)
247                 return;
248
249         WARN_ON(!list_empty(&stats->callbacks));
250
251         kfree(stats);
252 }