]> git.karo-electronics.de Git - karo-tx-linux.git/blob - arch/s390/kernel/cache.c
Merge branch 'slab/urgent' of git://git.kernel.org/pub/scm/linux/kernel/git/penberg...
[karo-tx-linux.git] / arch / s390 / kernel / cache.c
1 /*
2  * Extract CPU cache information and expose them via sysfs.
3  *
4  *    Copyright IBM Corp. 2012
5  *    Author(s): Heiko Carstens <heiko.carstens@de.ibm.com>
6  */
7
8 #include <linux/notifier.h>
9 #include <linux/seq_file.h>
10 #include <linux/init.h>
11 #include <linux/list.h>
12 #include <linux/slab.h>
13 #include <linux/cpu.h>
14 #include <asm/facility.h>
15
16 struct cache {
17         unsigned long size;
18         unsigned int line_size;
19         unsigned int associativity;
20         unsigned int nr_sets;
21         unsigned int level   : 3;
22         unsigned int type    : 2;
23         unsigned int private : 1;
24         struct list_head list;
25 };
26
27 struct cache_dir {
28         struct kobject *kobj;
29         struct cache_index_dir *index;
30 };
31
32 struct cache_index_dir {
33         struct kobject kobj;
34         int cpu;
35         struct cache *cache;
36         struct cache_index_dir *next;
37 };
38
39 enum {
40         CACHE_SCOPE_NOTEXISTS,
41         CACHE_SCOPE_PRIVATE,
42         CACHE_SCOPE_SHARED,
43         CACHE_SCOPE_RESERVED,
44 };
45
46 enum {
47         CACHE_TYPE_SEPARATE,
48         CACHE_TYPE_DATA,
49         CACHE_TYPE_INSTRUCTION,
50         CACHE_TYPE_UNIFIED,
51 };
52
53 enum {
54         EXTRACT_TOPOLOGY,
55         EXTRACT_LINE_SIZE,
56         EXTRACT_SIZE,
57         EXTRACT_ASSOCIATIVITY,
58 };
59
60 enum {
61         CACHE_TI_UNIFIED = 0,
62         CACHE_TI_INSTRUCTION = 0,
63         CACHE_TI_DATA,
64 };
65
66 struct cache_info {
67         unsigned char       : 4;
68         unsigned char scope : 2;
69         unsigned char type  : 2;
70 };
71
72 #define CACHE_MAX_LEVEL 8
73
74 union cache_topology {
75         struct cache_info ci[CACHE_MAX_LEVEL];
76         unsigned long long raw;
77 };
78
79 static const char * const cache_type_string[] = {
80         "Data",
81         "Instruction",
82         "Unified",
83 };
84
85 static struct cache_dir *cache_dir_cpu[NR_CPUS];
86 static LIST_HEAD(cache_list);
87
88 void show_cacheinfo(struct seq_file *m)
89 {
90         struct cache *cache;
91         int index = 0;
92
93         list_for_each_entry(cache, &cache_list, list) {
94                 seq_printf(m, "cache%-11d: ", index);
95                 seq_printf(m, "level=%d ", cache->level);
96                 seq_printf(m, "type=%s ", cache_type_string[cache->type]);
97                 seq_printf(m, "scope=%s ", cache->private ? "Private" : "Shared");
98                 seq_printf(m, "size=%luK ", cache->size >> 10);
99                 seq_printf(m, "line_size=%u ", cache->line_size);
100                 seq_printf(m, "associativity=%d", cache->associativity);
101                 seq_puts(m, "\n");
102                 index++;
103         }
104 }
105
106 static inline unsigned long ecag(int ai, int li, int ti)
107 {
108         unsigned long cmd, val;
109
110         cmd = ai << 4 | li << 1 | ti;
111         asm volatile(".insn     rsy,0xeb000000004c,%0,0,0(%1)" /* ecag */
112                      : "=d" (val) : "a" (cmd));
113         return val;
114 }
115
116 static int __init cache_add(int level, int private, int type)
117 {
118         struct cache *cache;
119         int ti;
120
121         cache = kzalloc(sizeof(*cache), GFP_KERNEL);
122         if (!cache)
123                 return -ENOMEM;
124         ti = type == CACHE_TYPE_DATA ? CACHE_TI_DATA : CACHE_TI_UNIFIED;
125         cache->size = ecag(EXTRACT_SIZE, level, ti);
126         cache->line_size = ecag(EXTRACT_LINE_SIZE, level, ti);
127         cache->associativity = ecag(EXTRACT_ASSOCIATIVITY, level, ti);
128         cache->nr_sets = cache->size / cache->associativity;
129         cache->nr_sets /= cache->line_size;
130         cache->private = private;
131         cache->level = level + 1;
132         cache->type = type - 1;
133         list_add_tail(&cache->list, &cache_list);
134         return 0;
135 }
136
137 static void __init cache_build_info(void)
138 {
139         struct cache *cache, *next;
140         union cache_topology ct;
141         int level, private, rc;
142
143         ct.raw = ecag(EXTRACT_TOPOLOGY, 0, 0);
144         for (level = 0; level < CACHE_MAX_LEVEL; level++) {
145                 switch (ct.ci[level].scope) {
146                 case CACHE_SCOPE_NOTEXISTS:
147                 case CACHE_SCOPE_RESERVED:
148                         return;
149                 case CACHE_SCOPE_SHARED:
150                         private = 0;
151                         break;
152                 case CACHE_SCOPE_PRIVATE:
153                         private = 1;
154                         break;
155                 }
156                 if (ct.ci[level].type == CACHE_TYPE_SEPARATE) {
157                         rc  = cache_add(level, private, CACHE_TYPE_DATA);
158                         rc |= cache_add(level, private, CACHE_TYPE_INSTRUCTION);
159                 } else {
160                         rc = cache_add(level, private, ct.ci[level].type);
161                 }
162                 if (rc)
163                         goto error;
164         }
165         return;
166 error:
167         list_for_each_entry_safe(cache, next, &cache_list, list) {
168                 list_del(&cache->list);
169                 kfree(cache);
170         }
171 }
172
173 static struct cache_dir *__cpuinit cache_create_cache_dir(int cpu)
174 {
175         struct cache_dir *cache_dir;
176         struct kobject *kobj = NULL;
177         struct device *dev;
178
179         dev = get_cpu_device(cpu);
180         if (!dev)
181                 goto out;
182         kobj = kobject_create_and_add("cache", &dev->kobj);
183         if (!kobj)
184                 goto out;
185         cache_dir = kzalloc(sizeof(*cache_dir), GFP_KERNEL);
186         if (!cache_dir)
187                 goto out;
188         cache_dir->kobj = kobj;
189         cache_dir_cpu[cpu] = cache_dir;
190         return cache_dir;
191 out:
192         kobject_put(kobj);
193         return NULL;
194 }
195
196 static struct cache_index_dir *kobj_to_cache_index_dir(struct kobject *kobj)
197 {
198         return container_of(kobj, struct cache_index_dir, kobj);
199 }
200
201 static void cache_index_release(struct kobject *kobj)
202 {
203         struct cache_index_dir *index;
204
205         index = kobj_to_cache_index_dir(kobj);
206         kfree(index);
207 }
208
209 static ssize_t cache_index_show(struct kobject *kobj,
210                                 struct attribute *attr, char *buf)
211 {
212         struct kobj_attribute *kobj_attr;
213
214         kobj_attr = container_of(attr, struct kobj_attribute, attr);
215         return kobj_attr->show(kobj, kobj_attr, buf);
216 }
217
218 #define DEFINE_CACHE_ATTR(_name, _format, _value)                       \
219 static ssize_t cache_##_name##_show(struct kobject *kobj,               \
220                                     struct kobj_attribute *attr,        \
221                                     char *buf)                          \
222 {                                                                       \
223         struct cache_index_dir *index;                                  \
224                                                                         \
225         index = kobj_to_cache_index_dir(kobj);                          \
226         return sprintf(buf, _format, _value);                           \
227 }                                                                       \
228 static struct kobj_attribute cache_##_name##_attr =                     \
229         __ATTR(_name, 0444, cache_##_name##_show, NULL);
230
231 DEFINE_CACHE_ATTR(size, "%luK\n", index->cache->size >> 10);
232 DEFINE_CACHE_ATTR(coherency_line_size, "%u\n", index->cache->line_size);
233 DEFINE_CACHE_ATTR(number_of_sets, "%u\n", index->cache->nr_sets);
234 DEFINE_CACHE_ATTR(ways_of_associativity, "%u\n", index->cache->associativity);
235 DEFINE_CACHE_ATTR(type, "%s\n", cache_type_string[index->cache->type]);
236 DEFINE_CACHE_ATTR(level, "%d\n", index->cache->level);
237
238 static ssize_t shared_cpu_map_func(struct kobject *kobj, int type, char *buf)
239 {
240         struct cache_index_dir *index;
241         int len;
242
243         index = kobj_to_cache_index_dir(kobj);
244         len = type ?
245                 cpulist_scnprintf(buf, PAGE_SIZE - 2, cpumask_of(index->cpu)) :
246                 cpumask_scnprintf(buf, PAGE_SIZE - 2, cpumask_of(index->cpu));
247         len += sprintf(&buf[len], "\n");
248         return len;
249 }
250
251 static ssize_t shared_cpu_map_show(struct kobject *kobj,
252                                    struct kobj_attribute *attr, char *buf)
253 {
254         return shared_cpu_map_func(kobj, 0, buf);
255 }
256 static struct kobj_attribute cache_shared_cpu_map_attr =
257         __ATTR(shared_cpu_map, 0444, shared_cpu_map_show, NULL);
258
259 static ssize_t shared_cpu_list_show(struct kobject *kobj,
260                                     struct kobj_attribute *attr, char *buf)
261 {
262         return shared_cpu_map_func(kobj, 1, buf);
263 }
264 static struct kobj_attribute cache_shared_cpu_list_attr =
265         __ATTR(shared_cpu_list, 0444, shared_cpu_list_show, NULL);
266
267 static struct attribute *cache_index_default_attrs[] = {
268         &cache_type_attr.attr,
269         &cache_size_attr.attr,
270         &cache_number_of_sets_attr.attr,
271         &cache_ways_of_associativity_attr.attr,
272         &cache_level_attr.attr,
273         &cache_coherency_line_size_attr.attr,
274         &cache_shared_cpu_map_attr.attr,
275         &cache_shared_cpu_list_attr.attr,
276         NULL,
277 };
278
279 static const struct sysfs_ops cache_index_ops = {
280         .show = cache_index_show,
281 };
282
283 static struct kobj_type cache_index_type = {
284         .sysfs_ops = &cache_index_ops,
285         .release = cache_index_release,
286         .default_attrs = cache_index_default_attrs,
287 };
288
289 static int __cpuinit cache_create_index_dir(struct cache_dir *cache_dir,
290                                             struct cache *cache, int index,
291                                             int cpu)
292 {
293         struct cache_index_dir *index_dir;
294         int rc;
295
296         index_dir = kzalloc(sizeof(*index_dir), GFP_KERNEL);
297         if (!index_dir)
298                 return -ENOMEM;
299         index_dir->cache = cache;
300         index_dir->cpu = cpu;
301         rc = kobject_init_and_add(&index_dir->kobj, &cache_index_type,
302                                   cache_dir->kobj, "index%d", index);
303         if (rc)
304                 goto out;
305         index_dir->next = cache_dir->index;
306         cache_dir->index = index_dir;
307         return 0;
308 out:
309         kfree(index_dir);
310         return rc;
311 }
312
313 static int __cpuinit cache_add_cpu(int cpu)
314 {
315         struct cache_dir *cache_dir;
316         struct cache *cache;
317         int rc, index = 0;
318
319         if (list_empty(&cache_list))
320                 return 0;
321         cache_dir = cache_create_cache_dir(cpu);
322         if (!cache_dir)
323                 return -ENOMEM;
324         list_for_each_entry(cache, &cache_list, list) {
325                 if (!cache->private)
326                         break;
327                 rc = cache_create_index_dir(cache_dir, cache, index, cpu);
328                 if (rc)
329                         return rc;
330                 index++;
331         }
332         return 0;
333 }
334
335 static void __cpuinit cache_remove_cpu(int cpu)
336 {
337         struct cache_index_dir *index, *next;
338         struct cache_dir *cache_dir;
339
340         cache_dir = cache_dir_cpu[cpu];
341         if (!cache_dir)
342                 return;
343         index = cache_dir->index;
344         while (index) {
345                 next = index->next;
346                 kobject_put(&index->kobj);
347                 index = next;
348         }
349         kobject_put(cache_dir->kobj);
350         kfree(cache_dir);
351         cache_dir_cpu[cpu] = NULL;
352 }
353
354 static int __cpuinit cache_hotplug(struct notifier_block *nfb,
355                                    unsigned long action, void *hcpu)
356 {
357         int cpu = (long)hcpu;
358         int rc = 0;
359
360         switch (action & ~CPU_TASKS_FROZEN) {
361         case CPU_ONLINE:
362                 rc = cache_add_cpu(cpu);
363                 if (rc)
364                         cache_remove_cpu(cpu);
365                 break;
366         case CPU_DEAD:
367                 cache_remove_cpu(cpu);
368                 break;
369         }
370         return rc ? NOTIFY_BAD : NOTIFY_OK;
371 }
372
373 static int __init cache_init(void)
374 {
375         int cpu;
376
377         if (!test_facility(34))
378                 return 0;
379         cache_build_info();
380         for_each_online_cpu(cpu)
381                 cache_add_cpu(cpu);
382         hotcpu_notifier(cache_hotplug, 0);
383         return 0;
384 }
385 device_initcall(cache_init);