2 * Extract CPU cache information and expose them via sysfs.
4 * Copyright IBM Corp. 2012
5 * Author(s): Heiko Carstens <heiko.carstens@de.ibm.com>
8 #include <linux/notifier.h>
9 #include <linux/seq_file.h>
10 #include <linux/init.h>
11 #include <linux/list.h>
12 #include <linux/slab.h>
13 #include <linux/cpu.h>
14 #include <asm/facility.h>
18 unsigned int line_size;
19 unsigned int associativity;
21 unsigned int level : 3;
22 unsigned int type : 2;
23 unsigned int private : 1;
24 struct list_head list;
29 struct cache_index_dir *index;
32 struct cache_index_dir {
36 struct cache_index_dir *next;
40 CACHE_SCOPE_NOTEXISTS,
49 CACHE_TYPE_INSTRUCTION,
57 EXTRACT_ASSOCIATIVITY,
62 CACHE_TI_INSTRUCTION = 0,
68 unsigned char scope : 2;
69 unsigned char type : 2;
72 #define CACHE_MAX_LEVEL 8
74 union cache_topology {
75 struct cache_info ci[CACHE_MAX_LEVEL];
76 unsigned long long raw;
79 static const char * const cache_type_string[] = {
85 static struct cache_dir *cache_dir_cpu[NR_CPUS];
86 static LIST_HEAD(cache_list);
88 void show_cacheinfo(struct seq_file *m)
93 list_for_each_entry(cache, &cache_list, list) {
94 seq_printf(m, "cache%-11d: ", index);
95 seq_printf(m, "level=%d ", cache->level);
96 seq_printf(m, "type=%s ", cache_type_string[cache->type]);
97 seq_printf(m, "scope=%s ", cache->private ? "Private" : "Shared");
98 seq_printf(m, "size=%luK ", cache->size >> 10);
99 seq_printf(m, "line_size=%u ", cache->line_size);
100 seq_printf(m, "associativity=%d", cache->associativity);
106 static inline unsigned long ecag(int ai, int li, int ti)
108 unsigned long cmd, val;
110 cmd = ai << 4 | li << 1 | ti;
111 asm volatile(".insn rsy,0xeb000000004c,%0,0,0(%1)" /* ecag */
112 : "=d" (val) : "a" (cmd));
116 static int __init cache_add(int level, int private, int type)
121 cache = kzalloc(sizeof(*cache), GFP_KERNEL);
124 ti = type == CACHE_TYPE_DATA ? CACHE_TI_DATA : CACHE_TI_UNIFIED;
125 cache->size = ecag(EXTRACT_SIZE, level, ti);
126 cache->line_size = ecag(EXTRACT_LINE_SIZE, level, ti);
127 cache->associativity = ecag(EXTRACT_ASSOCIATIVITY, level, ti);
128 cache->nr_sets = cache->size / cache->associativity;
129 cache->nr_sets /= cache->line_size;
130 cache->private = private;
131 cache->level = level + 1;
132 cache->type = type - 1;
133 list_add_tail(&cache->list, &cache_list);
137 static void __init cache_build_info(void)
139 struct cache *cache, *next;
140 union cache_topology ct;
141 int level, private, rc;
143 ct.raw = ecag(EXTRACT_TOPOLOGY, 0, 0);
144 for (level = 0; level < CACHE_MAX_LEVEL; level++) {
145 switch (ct.ci[level].scope) {
146 case CACHE_SCOPE_NOTEXISTS:
147 case CACHE_SCOPE_RESERVED:
149 case CACHE_SCOPE_SHARED:
152 case CACHE_SCOPE_PRIVATE:
156 if (ct.ci[level].type == CACHE_TYPE_SEPARATE) {
157 rc = cache_add(level, private, CACHE_TYPE_DATA);
158 rc |= cache_add(level, private, CACHE_TYPE_INSTRUCTION);
160 rc = cache_add(level, private, ct.ci[level].type);
167 list_for_each_entry_safe(cache, next, &cache_list, list) {
168 list_del(&cache->list);
173 static struct cache_dir *__cpuinit cache_create_cache_dir(int cpu)
175 struct cache_dir *cache_dir;
176 struct kobject *kobj = NULL;
179 dev = get_cpu_device(cpu);
182 kobj = kobject_create_and_add("cache", &dev->kobj);
185 cache_dir = kzalloc(sizeof(*cache_dir), GFP_KERNEL);
188 cache_dir->kobj = kobj;
189 cache_dir_cpu[cpu] = cache_dir;
196 static struct cache_index_dir *kobj_to_cache_index_dir(struct kobject *kobj)
198 return container_of(kobj, struct cache_index_dir, kobj);
201 static void cache_index_release(struct kobject *kobj)
203 struct cache_index_dir *index;
205 index = kobj_to_cache_index_dir(kobj);
209 static ssize_t cache_index_show(struct kobject *kobj,
210 struct attribute *attr, char *buf)
212 struct kobj_attribute *kobj_attr;
214 kobj_attr = container_of(attr, struct kobj_attribute, attr);
215 return kobj_attr->show(kobj, kobj_attr, buf);
218 #define DEFINE_CACHE_ATTR(_name, _format, _value) \
219 static ssize_t cache_##_name##_show(struct kobject *kobj, \
220 struct kobj_attribute *attr, \
223 struct cache_index_dir *index; \
225 index = kobj_to_cache_index_dir(kobj); \
226 return sprintf(buf, _format, _value); \
228 static struct kobj_attribute cache_##_name##_attr = \
229 __ATTR(_name, 0444, cache_##_name##_show, NULL);
231 DEFINE_CACHE_ATTR(size, "%luK\n", index->cache->size >> 10);
232 DEFINE_CACHE_ATTR(coherency_line_size, "%u\n", index->cache->line_size);
233 DEFINE_CACHE_ATTR(number_of_sets, "%u\n", index->cache->nr_sets);
234 DEFINE_CACHE_ATTR(ways_of_associativity, "%u\n", index->cache->associativity);
235 DEFINE_CACHE_ATTR(type, "%s\n", cache_type_string[index->cache->type]);
236 DEFINE_CACHE_ATTR(level, "%d\n", index->cache->level);
238 static ssize_t shared_cpu_map_func(struct kobject *kobj, int type, char *buf)
240 struct cache_index_dir *index;
243 index = kobj_to_cache_index_dir(kobj);
245 cpulist_scnprintf(buf, PAGE_SIZE - 2, cpumask_of(index->cpu)) :
246 cpumask_scnprintf(buf, PAGE_SIZE - 2, cpumask_of(index->cpu));
247 len += sprintf(&buf[len], "\n");
251 static ssize_t shared_cpu_map_show(struct kobject *kobj,
252 struct kobj_attribute *attr, char *buf)
254 return shared_cpu_map_func(kobj, 0, buf);
256 static struct kobj_attribute cache_shared_cpu_map_attr =
257 __ATTR(shared_cpu_map, 0444, shared_cpu_map_show, NULL);
259 static ssize_t shared_cpu_list_show(struct kobject *kobj,
260 struct kobj_attribute *attr, char *buf)
262 return shared_cpu_map_func(kobj, 1, buf);
264 static struct kobj_attribute cache_shared_cpu_list_attr =
265 __ATTR(shared_cpu_list, 0444, shared_cpu_list_show, NULL);
267 static struct attribute *cache_index_default_attrs[] = {
268 &cache_type_attr.attr,
269 &cache_size_attr.attr,
270 &cache_number_of_sets_attr.attr,
271 &cache_ways_of_associativity_attr.attr,
272 &cache_level_attr.attr,
273 &cache_coherency_line_size_attr.attr,
274 &cache_shared_cpu_map_attr.attr,
275 &cache_shared_cpu_list_attr.attr,
279 static const struct sysfs_ops cache_index_ops = {
280 .show = cache_index_show,
283 static struct kobj_type cache_index_type = {
284 .sysfs_ops = &cache_index_ops,
285 .release = cache_index_release,
286 .default_attrs = cache_index_default_attrs,
289 static int __cpuinit cache_create_index_dir(struct cache_dir *cache_dir,
290 struct cache *cache, int index,
293 struct cache_index_dir *index_dir;
296 index_dir = kzalloc(sizeof(*index_dir), GFP_KERNEL);
299 index_dir->cache = cache;
300 index_dir->cpu = cpu;
301 rc = kobject_init_and_add(&index_dir->kobj, &cache_index_type,
302 cache_dir->kobj, "index%d", index);
305 index_dir->next = cache_dir->index;
306 cache_dir->index = index_dir;
313 static int __cpuinit cache_add_cpu(int cpu)
315 struct cache_dir *cache_dir;
319 if (list_empty(&cache_list))
321 cache_dir = cache_create_cache_dir(cpu);
324 list_for_each_entry(cache, &cache_list, list) {
327 rc = cache_create_index_dir(cache_dir, cache, index, cpu);
335 static void __cpuinit cache_remove_cpu(int cpu)
337 struct cache_index_dir *index, *next;
338 struct cache_dir *cache_dir;
340 cache_dir = cache_dir_cpu[cpu];
343 index = cache_dir->index;
346 kobject_put(&index->kobj);
349 kobject_put(cache_dir->kobj);
351 cache_dir_cpu[cpu] = NULL;
354 static int __cpuinit cache_hotplug(struct notifier_block *nfb,
355 unsigned long action, void *hcpu)
357 int cpu = (long)hcpu;
360 switch (action & ~CPU_TASKS_FROZEN) {
362 rc = cache_add_cpu(cpu);
364 cache_remove_cpu(cpu);
367 cache_remove_cpu(cpu);
370 return rc ? NOTIFY_BAD : NOTIFY_OK;
373 static int __init cache_init(void)
377 if (!test_facility(34))
380 for_each_online_cpu(cpu)
382 hotcpu_notifier(cache_hotplug, 0);
385 device_initcall(cache_init);