]> git.karo-electronics.de Git - karo-tx-linux.git/blob - block/blk-stat.c
xen: make xen_flush_tlb_all() static
[karo-tx-linux.git] / block / blk-stat.c
1 /*
2  * Block stat tracking code
3  *
4  * Copyright (C) 2016 Jens Axboe
5  */
6 #include <linux/kernel.h>
7 #include <linux/rculist.h>
8 #include <linux/blk-mq.h>
9
10 #include "blk-stat.h"
11 #include "blk-mq.h"
12 #include "blk.h"
13
14 #define BLK_RQ_STAT_BATCH       64
15
16 struct blk_queue_stats {
17         struct list_head callbacks;
18         spinlock_t lock;
19         bool enable_accounting;
20 };
21
22 static void blk_stat_init(struct blk_rq_stat *stat)
23 {
24         stat->min = -1ULL;
25         stat->max = stat->nr_samples = stat->mean = 0;
26         stat->batch = stat->nr_batch = 0;
27 }
28
29 static void blk_stat_flush_batch(struct blk_rq_stat *stat)
30 {
31         const s32 nr_batch = READ_ONCE(stat->nr_batch);
32         const s32 nr_samples = READ_ONCE(stat->nr_samples);
33
34         if (!nr_batch)
35                 return;
36         if (!nr_samples)
37                 stat->mean = div64_s64(stat->batch, nr_batch);
38         else {
39                 stat->mean = div64_s64((stat->mean * nr_samples) +
40                                         stat->batch,
41                                         nr_batch + nr_samples);
42         }
43
44         stat->nr_samples += nr_batch;
45         stat->nr_batch = stat->batch = 0;
46 }
47
48 static void blk_stat_sum(struct blk_rq_stat *dst, struct blk_rq_stat *src)
49 {
50         blk_stat_flush_batch(src);
51
52         if (!src->nr_samples)
53                 return;
54
55         dst->min = min(dst->min, src->min);
56         dst->max = max(dst->max, src->max);
57
58         if (!dst->nr_samples)
59                 dst->mean = src->mean;
60         else {
61                 dst->mean = div64_s64((src->mean * src->nr_samples) +
62                                         (dst->mean * dst->nr_samples),
63                                         dst->nr_samples + src->nr_samples);
64         }
65         dst->nr_samples += src->nr_samples;
66 }
67
68 static void __blk_stat_add(struct blk_rq_stat *stat, u64 value)
69 {
70         stat->min = min(stat->min, value);
71         stat->max = max(stat->max, value);
72
73         if (stat->batch + value < stat->batch ||
74             stat->nr_batch + 1 == BLK_RQ_STAT_BATCH)
75                 blk_stat_flush_batch(stat);
76
77         stat->batch += value;
78         stat->nr_batch++;
79 }
80
81 void blk_stat_add(struct request *rq)
82 {
83         struct request_queue *q = rq->q;
84         struct blk_stat_callback *cb;
85         struct blk_rq_stat *stat;
86         int bucket;
87         s64 now, value;
88
89         now = __blk_stat_time(ktime_to_ns(ktime_get()));
90         if (now < blk_stat_time(&rq->issue_stat))
91                 return;
92
93         value = now - blk_stat_time(&rq->issue_stat);
94
95         blk_throtl_stat_add(rq, value);
96
97         rcu_read_lock();
98         list_for_each_entry_rcu(cb, &q->stats->callbacks, list) {
99                 if (blk_stat_is_active(cb)) {
100                         bucket = cb->bucket_fn(rq);
101                         if (bucket < 0)
102                                 continue;
103                         stat = &this_cpu_ptr(cb->cpu_stat)[bucket];
104                         __blk_stat_add(stat, value);
105                 }
106         }
107         rcu_read_unlock();
108 }
109
110 static void blk_stat_timer_fn(unsigned long data)
111 {
112         struct blk_stat_callback *cb = (void *)data;
113         unsigned int bucket;
114         int cpu;
115
116         for (bucket = 0; bucket < cb->buckets; bucket++)
117                 blk_stat_init(&cb->stat[bucket]);
118
119         for_each_online_cpu(cpu) {
120                 struct blk_rq_stat *cpu_stat;
121
122                 cpu_stat = per_cpu_ptr(cb->cpu_stat, cpu);
123                 for (bucket = 0; bucket < cb->buckets; bucket++) {
124                         blk_stat_sum(&cb->stat[bucket], &cpu_stat[bucket]);
125                         blk_stat_init(&cpu_stat[bucket]);
126                 }
127         }
128
129         cb->timer_fn(cb);
130 }
131
132 struct blk_stat_callback *
133 blk_stat_alloc_callback(void (*timer_fn)(struct blk_stat_callback *),
134                         int (*bucket_fn)(const struct request *),
135                         unsigned int buckets, void *data)
136 {
137         struct blk_stat_callback *cb;
138
139         cb = kmalloc(sizeof(*cb), GFP_KERNEL);
140         if (!cb)
141                 return NULL;
142
143         cb->stat = kmalloc_array(buckets, sizeof(struct blk_rq_stat),
144                                  GFP_KERNEL);
145         if (!cb->stat) {
146                 kfree(cb);
147                 return NULL;
148         }
149         cb->cpu_stat = __alloc_percpu(buckets * sizeof(struct blk_rq_stat),
150                                       __alignof__(struct blk_rq_stat));
151         if (!cb->cpu_stat) {
152                 kfree(cb->stat);
153                 kfree(cb);
154                 return NULL;
155         }
156
157         cb->timer_fn = timer_fn;
158         cb->bucket_fn = bucket_fn;
159         cb->data = data;
160         cb->buckets = buckets;
161         setup_timer(&cb->timer, blk_stat_timer_fn, (unsigned long)cb);
162
163         return cb;
164 }
165 EXPORT_SYMBOL_GPL(blk_stat_alloc_callback);
166
167 void blk_stat_add_callback(struct request_queue *q,
168                            struct blk_stat_callback *cb)
169 {
170         unsigned int bucket;
171         int cpu;
172
173         for_each_possible_cpu(cpu) {
174                 struct blk_rq_stat *cpu_stat;
175
176                 cpu_stat = per_cpu_ptr(cb->cpu_stat, cpu);
177                 for (bucket = 0; bucket < cb->buckets; bucket++)
178                         blk_stat_init(&cpu_stat[bucket]);
179         }
180
181         spin_lock(&q->stats->lock);
182         list_add_tail_rcu(&cb->list, &q->stats->callbacks);
183         set_bit(QUEUE_FLAG_STATS, &q->queue_flags);
184         spin_unlock(&q->stats->lock);
185 }
186 EXPORT_SYMBOL_GPL(blk_stat_add_callback);
187
188 void blk_stat_remove_callback(struct request_queue *q,
189                               struct blk_stat_callback *cb)
190 {
191         spin_lock(&q->stats->lock);
192         list_del_rcu(&cb->list);
193         if (list_empty(&q->stats->callbacks) && !q->stats->enable_accounting)
194                 clear_bit(QUEUE_FLAG_STATS, &q->queue_flags);
195         spin_unlock(&q->stats->lock);
196
197         del_timer_sync(&cb->timer);
198 }
199 EXPORT_SYMBOL_GPL(blk_stat_remove_callback);
200
201 static void blk_stat_free_callback_rcu(struct rcu_head *head)
202 {
203         struct blk_stat_callback *cb;
204
205         cb = container_of(head, struct blk_stat_callback, rcu);
206         free_percpu(cb->cpu_stat);
207         kfree(cb->stat);
208         kfree(cb);
209 }
210
211 void blk_stat_free_callback(struct blk_stat_callback *cb)
212 {
213         if (cb)
214                 call_rcu(&cb->rcu, blk_stat_free_callback_rcu);
215 }
216 EXPORT_SYMBOL_GPL(blk_stat_free_callback);
217
218 void blk_stat_enable_accounting(struct request_queue *q)
219 {
220         spin_lock(&q->stats->lock);
221         q->stats->enable_accounting = true;
222         set_bit(QUEUE_FLAG_STATS, &q->queue_flags);
223         spin_unlock(&q->stats->lock);
224 }
225
226 struct blk_queue_stats *blk_alloc_queue_stats(void)
227 {
228         struct blk_queue_stats *stats;
229
230         stats = kmalloc(sizeof(*stats), GFP_KERNEL);
231         if (!stats)
232                 return NULL;
233
234         INIT_LIST_HEAD(&stats->callbacks);
235         spin_lock_init(&stats->lock);
236         stats->enable_accounting = false;
237
238         return stats;
239 }
240
241 void blk_free_queue_stats(struct blk_queue_stats *stats)
242 {
243         if (!stats)
244                 return;
245
246         WARN_ON(!list_empty(&stats->callbacks));
247
248         kfree(stats);
249 }