2 * Routines to indentify caches on Intel CPU.
5 * Venkatesh Pallipadi : Adding cache identification through cpuid(4)
6 * Ashok Raj <ashok.raj@intel.com>: Work with CPU hotplug infrastructure.
7 * Andi Kleen / Andreas Herrmann : CPUID4 emulation on AMD.
10 #include <linux/init.h>
11 #include <linux/slab.h>
12 #include <linux/device.h>
13 #include <linux/compiler.h>
14 #include <linux/cpu.h>
15 #include <linux/sched.h>
16 #include <linux/pci.h>
18 #include <asm/processor.h>
19 #include <linux/smp.h>
20 #include <asm/amd_nb.h>
30 unsigned char descriptor;
35 #define MB(x) ((x) * 1024)
37 /* All the cache descriptor types we care about (no TLB or
38 trace cache entries) */
40 static const struct _cache_table __cpuinitconst cache_table[] =
42 { 0x06, LVL_1_INST, 8 }, /* 4-way set assoc, 32 byte line size */
43 { 0x08, LVL_1_INST, 16 }, /* 4-way set assoc, 32 byte line size */
44 { 0x09, LVL_1_INST, 32 }, /* 4-way set assoc, 64 byte line size */
45 { 0x0a, LVL_1_DATA, 8 }, /* 2 way set assoc, 32 byte line size */
46 { 0x0c, LVL_1_DATA, 16 }, /* 4-way set assoc, 32 byte line size */
47 { 0x0d, LVL_1_DATA, 16 }, /* 4-way set assoc, 64 byte line size */
48 { 0x0e, LVL_1_DATA, 24 }, /* 6-way set assoc, 64 byte line size */
49 { 0x21, LVL_2, 256 }, /* 8-way set assoc, 64 byte line size */
50 { 0x22, LVL_3, 512 }, /* 4-way set assoc, sectored cache, 64 byte line size */
51 { 0x23, LVL_3, MB(1) }, /* 8-way set assoc, sectored cache, 64 byte line size */
52 { 0x25, LVL_3, MB(2) }, /* 8-way set assoc, sectored cache, 64 byte line size */
53 { 0x29, LVL_3, MB(4) }, /* 8-way set assoc, sectored cache, 64 byte line size */
54 { 0x2c, LVL_1_DATA, 32 }, /* 8-way set assoc, 64 byte line size */
55 { 0x30, LVL_1_INST, 32 }, /* 8-way set assoc, 64 byte line size */
56 { 0x39, LVL_2, 128 }, /* 4-way set assoc, sectored cache, 64 byte line size */
57 { 0x3a, LVL_2, 192 }, /* 6-way set assoc, sectored cache, 64 byte line size */
58 { 0x3b, LVL_2, 128 }, /* 2-way set assoc, sectored cache, 64 byte line size */
59 { 0x3c, LVL_2, 256 }, /* 4-way set assoc, sectored cache, 64 byte line size */
60 { 0x3d, LVL_2, 384 }, /* 6-way set assoc, sectored cache, 64 byte line size */
61 { 0x3e, LVL_2, 512 }, /* 4-way set assoc, sectored cache, 64 byte line size */
62 { 0x3f, LVL_2, 256 }, /* 2-way set assoc, 64 byte line size */
63 { 0x41, LVL_2, 128 }, /* 4-way set assoc, 32 byte line size */
64 { 0x42, LVL_2, 256 }, /* 4-way set assoc, 32 byte line size */
65 { 0x43, LVL_2, 512 }, /* 4-way set assoc, 32 byte line size */
66 { 0x44, LVL_2, MB(1) }, /* 4-way set assoc, 32 byte line size */
67 { 0x45, LVL_2, MB(2) }, /* 4-way set assoc, 32 byte line size */
68 { 0x46, LVL_3, MB(4) }, /* 4-way set assoc, 64 byte line size */
69 { 0x47, LVL_3, MB(8) }, /* 8-way set assoc, 64 byte line size */
70 { 0x48, LVL_2, MB(3) }, /* 12-way set assoc, 64 byte line size */
71 { 0x49, LVL_3, MB(4) }, /* 16-way set assoc, 64 byte line size */
72 { 0x4a, LVL_3, MB(6) }, /* 12-way set assoc, 64 byte line size */
73 { 0x4b, LVL_3, MB(8) }, /* 16-way set assoc, 64 byte line size */
74 { 0x4c, LVL_3, MB(12) }, /* 12-way set assoc, 64 byte line size */
75 { 0x4d, LVL_3, MB(16) }, /* 16-way set assoc, 64 byte line size */
76 { 0x4e, LVL_2, MB(6) }, /* 24-way set assoc, 64 byte line size */
77 { 0x60, LVL_1_DATA, 16 }, /* 8-way set assoc, sectored cache, 64 byte line size */
78 { 0x66, LVL_1_DATA, 8 }, /* 4-way set assoc, sectored cache, 64 byte line size */
79 { 0x67, LVL_1_DATA, 16 }, /* 4-way set assoc, sectored cache, 64 byte line size */
80 { 0x68, LVL_1_DATA, 32 }, /* 4-way set assoc, sectored cache, 64 byte line size */
81 { 0x70, LVL_TRACE, 12 }, /* 8-way set assoc */
82 { 0x71, LVL_TRACE, 16 }, /* 8-way set assoc */
83 { 0x72, LVL_TRACE, 32 }, /* 8-way set assoc */
84 { 0x73, LVL_TRACE, 64 }, /* 8-way set assoc */
85 { 0x78, LVL_2, MB(1) }, /* 4-way set assoc, 64 byte line size */
86 { 0x79, LVL_2, 128 }, /* 8-way set assoc, sectored cache, 64 byte line size */
87 { 0x7a, LVL_2, 256 }, /* 8-way set assoc, sectored cache, 64 byte line size */
88 { 0x7b, LVL_2, 512 }, /* 8-way set assoc, sectored cache, 64 byte line size */
89 { 0x7c, LVL_2, MB(1) }, /* 8-way set assoc, sectored cache, 64 byte line size */
90 { 0x7d, LVL_2, MB(2) }, /* 8-way set assoc, 64 byte line size */
91 { 0x7f, LVL_2, 512 }, /* 2-way set assoc, 64 byte line size */
92 { 0x80, LVL_2, 512 }, /* 8-way set assoc, 64 byte line size */
93 { 0x82, LVL_2, 256 }, /* 8-way set assoc, 32 byte line size */
94 { 0x83, LVL_2, 512 }, /* 8-way set assoc, 32 byte line size */
95 { 0x84, LVL_2, MB(1) }, /* 8-way set assoc, 32 byte line size */
96 { 0x85, LVL_2, MB(2) }, /* 8-way set assoc, 32 byte line size */
97 { 0x86, LVL_2, 512 }, /* 4-way set assoc, 64 byte line size */
98 { 0x87, LVL_2, MB(1) }, /* 8-way set assoc, 64 byte line size */
99 { 0xd0, LVL_3, 512 }, /* 4-way set assoc, 64 byte line size */
100 { 0xd1, LVL_3, MB(1) }, /* 4-way set assoc, 64 byte line size */
101 { 0xd2, LVL_3, MB(2) }, /* 4-way set assoc, 64 byte line size */
102 { 0xd6, LVL_3, MB(1) }, /* 8-way set assoc, 64 byte line size */
103 { 0xd7, LVL_3, MB(2) }, /* 8-way set assoc, 64 byte line size */
104 { 0xd8, LVL_3, MB(4) }, /* 12-way set assoc, 64 byte line size */
105 { 0xdc, LVL_3, MB(2) }, /* 12-way set assoc, 64 byte line size */
106 { 0xdd, LVL_3, MB(4) }, /* 12-way set assoc, 64 byte line size */
107 { 0xde, LVL_3, MB(8) }, /* 12-way set assoc, 64 byte line size */
108 { 0xe2, LVL_3, MB(2) }, /* 16-way set assoc, 64 byte line size */
109 { 0xe3, LVL_3, MB(4) }, /* 16-way set assoc, 64 byte line size */
110 { 0xe4, LVL_3, MB(8) }, /* 16-way set assoc, 64 byte line size */
111 { 0xea, LVL_3, MB(12) }, /* 24-way set assoc, 64 byte line size */
112 { 0xeb, LVL_3, MB(18) }, /* 24-way set assoc, 64 byte line size */
113 { 0xec, LVL_3, MB(24) }, /* 24-way set assoc, 64 byte line size */
122 CACHE_TYPE_UNIFIED = 3
125 union _cpuid4_leaf_eax {
127 enum _cache_type type:5;
128 unsigned int level:3;
129 unsigned int is_self_initializing:1;
130 unsigned int is_fully_associative:1;
131 unsigned int reserved:4;
132 unsigned int num_threads_sharing:12;
133 unsigned int num_cores_on_die:6;
138 union _cpuid4_leaf_ebx {
140 unsigned int coherency_line_size:12;
141 unsigned int physical_line_partition:10;
142 unsigned int ways_of_associativity:10;
147 union _cpuid4_leaf_ecx {
149 unsigned int number_of_sets:32;
154 struct _cpuid4_info_regs {
155 union _cpuid4_leaf_eax eax;
156 union _cpuid4_leaf_ebx ebx;
157 union _cpuid4_leaf_ecx ecx;
159 struct amd_northbridge *nb;
162 struct _cpuid4_info {
163 struct _cpuid4_info_regs base;
164 DECLARE_BITMAP(shared_cpu_map, NR_CPUS);
167 unsigned short num_cache_leaves;
169 /* AMD doesn't have CPUID4. Emulate it here to report the same
170 information to the user. This makes some assumptions about the machine:
171 L2 not shared, no SMT etc. that is currently true on AMD CPUs.
173 In theory the TLBs could be reported as fake type (they are in "dummy").
177 unsigned line_size:8;
178 unsigned lines_per_tag:8;
180 unsigned size_in_kb:8;
187 unsigned line_size:8;
188 unsigned lines_per_tag:4;
190 unsigned size_in_kb:16;
197 unsigned line_size:8;
198 unsigned lines_per_tag:4;
201 unsigned size_encoded:14;
206 static const unsigned short __cpuinitconst assocs[] = {
217 [0xf] = 0xffff /* fully associative - no way to show this currently */
220 static const unsigned char __cpuinitconst levels[] = { 1, 1, 2, 3 };
221 static const unsigned char __cpuinitconst types[] = { 1, 2, 3, 3 };
223 static void __cpuinit
224 amd_cpuid4(int leaf, union _cpuid4_leaf_eax *eax,
225 union _cpuid4_leaf_ebx *ebx,
226 union _cpuid4_leaf_ecx *ecx)
229 unsigned line_size, lines_per_tag, assoc, size_in_kb;
230 union l1_cache l1i, l1d;
233 union l1_cache *l1 = &l1d;
239 cpuid(0x80000005, &dummy, &dummy, &l1d.val, &l1i.val);
240 cpuid(0x80000006, &dummy, &dummy, &l2.val, &l3.val);
248 assoc = assocs[l1->assoc];
249 line_size = l1->line_size;
250 lines_per_tag = l1->lines_per_tag;
251 size_in_kb = l1->size_in_kb;
256 assoc = assocs[l2.assoc];
257 line_size = l2.line_size;
258 lines_per_tag = l2.lines_per_tag;
259 /* cpu_data has errata corrections for K7 applied */
260 size_in_kb = __this_cpu_read(cpu_info.x86_cache_size);
265 assoc = assocs[l3.assoc];
266 line_size = l3.line_size;
267 lines_per_tag = l3.lines_per_tag;
268 size_in_kb = l3.size_encoded * 512;
269 if (boot_cpu_has(X86_FEATURE_AMD_DCM)) {
270 size_in_kb = size_in_kb >> 1;
278 eax->split.is_self_initializing = 1;
279 eax->split.type = types[leaf];
280 eax->split.level = levels[leaf];
281 eax->split.num_threads_sharing = 0;
282 eax->split.num_cores_on_die = __this_cpu_read(cpu_info.x86_max_cores) - 1;
286 eax->split.is_fully_associative = 1;
287 ebx->split.coherency_line_size = line_size - 1;
288 ebx->split.ways_of_associativity = assoc - 1;
289 ebx->split.physical_line_partition = lines_per_tag - 1;
290 ecx->split.number_of_sets = (size_in_kb * 1024) / line_size /
291 (ebx->split.ways_of_associativity + 1) - 1;
295 struct attribute attr;
296 ssize_t (*show)(struct _cpuid4_info *, char *, unsigned int);
297 ssize_t (*store)(struct _cpuid4_info *, const char *, size_t count,
301 #if defined(CONFIG_AMD_NB) && defined(CONFIG_SYSFS)
303 * L3 cache descriptors
305 static void __cpuinit amd_calc_l3_indices(struct amd_northbridge *nb)
307 struct amd_l3_cache *l3 = &nb->l3_cache;
308 unsigned int sc0, sc1, sc2, sc3;
311 pci_read_config_dword(nb->misc, 0x1C4, &val);
313 /* calculate subcache sizes */
314 l3->subcaches[0] = sc0 = !(val & BIT(0));
315 l3->subcaches[1] = sc1 = !(val & BIT(4));
317 if (boot_cpu_data.x86 == 0x15) {
318 l3->subcaches[0] = sc0 += !(val & BIT(1));
319 l3->subcaches[1] = sc1 += !(val & BIT(5));
322 l3->subcaches[2] = sc2 = !(val & BIT(8)) + !(val & BIT(9));
323 l3->subcaches[3] = sc3 = !(val & BIT(12)) + !(val & BIT(13));
325 l3->indices = (max(max3(sc0, sc1, sc2), sc3) << 10) - 1;
328 static void __cpuinit amd_init_l3_cache(struct _cpuid4_info_regs *this_leaf, int index)
332 /* only for L3, and not in virtualized environments */
336 node = amd_get_nb_id(smp_processor_id());
337 this_leaf->nb = node_to_amd_nb(node);
338 if (this_leaf->nb && !this_leaf->nb->l3_cache.indices)
339 amd_calc_l3_indices(this_leaf->nb);
343 * check whether a slot used for disabling an L3 index is occupied.
344 * @l3: L3 cache descriptor
345 * @slot: slot number (0..1)
347 * @returns: the disabled index if used or negative value if slot free.
349 int amd_get_l3_disable_slot(struct amd_northbridge *nb, unsigned slot)
351 unsigned int reg = 0;
353 pci_read_config_dword(nb->misc, 0x1BC + slot * 4, ®);
355 /* check whether this slot is activated already */
356 if (reg & (3UL << 30))
362 static ssize_t show_cache_disable(struct _cpuid4_info *this_leaf, char *buf,
367 if (!this_leaf->base.nb || !amd_nb_has_feature(AMD_NB_L3_INDEX_DISABLE))
370 index = amd_get_l3_disable_slot(this_leaf->base.nb, slot);
372 return sprintf(buf, "%d\n", index);
374 return sprintf(buf, "FREE\n");
377 #define SHOW_CACHE_DISABLE(slot) \
379 show_cache_disable_##slot(struct _cpuid4_info *this_leaf, char *buf, \
382 return show_cache_disable(this_leaf, buf, slot); \
384 SHOW_CACHE_DISABLE(0)
385 SHOW_CACHE_DISABLE(1)
387 static void amd_l3_disable_index(struct amd_northbridge *nb, int cpu,
388 unsigned slot, unsigned long idx)
395 * disable index in all 4 subcaches
397 for (i = 0; i < 4; i++) {
398 u32 reg = idx | (i << 20);
400 if (!nb->l3_cache.subcaches[i])
403 pci_write_config_dword(nb->misc, 0x1BC + slot * 4, reg);
406 * We need to WBINVD on a core on the node containing the L3
407 * cache which indices we disable therefore a simple wbinvd()
413 pci_write_config_dword(nb->misc, 0x1BC + slot * 4, reg);
418 * disable a L3 cache index by using a disable-slot
420 * @l3: L3 cache descriptor
421 * @cpu: A CPU on the node containing the L3 cache
422 * @slot: slot number (0..1)
423 * @index: index to disable
425 * @return: 0 on success, error status on failure
427 int amd_set_l3_disable_slot(struct amd_northbridge *nb, int cpu, unsigned slot,
432 /* check if @slot is already used or the index is already disabled */
433 ret = amd_get_l3_disable_slot(nb, slot);
437 if (index > nb->l3_cache.indices)
440 /* check whether the other slot has disabled the same index already */
441 if (index == amd_get_l3_disable_slot(nb, !slot))
444 amd_l3_disable_index(nb, cpu, slot, index);
449 static ssize_t store_cache_disable(struct _cpuid4_info *this_leaf,
450 const char *buf, size_t count,
453 unsigned long val = 0;
456 if (!capable(CAP_SYS_ADMIN))
459 if (!this_leaf->base.nb || !amd_nb_has_feature(AMD_NB_L3_INDEX_DISABLE))
462 cpu = cpumask_first(to_cpumask(this_leaf->shared_cpu_map));
464 if (strict_strtoul(buf, 10, &val) < 0)
467 err = amd_set_l3_disable_slot(this_leaf->base.nb, cpu, slot, val);
470 pr_warning("L3 slot %d in use/index already disabled!\n",
477 #define STORE_CACHE_DISABLE(slot) \
479 store_cache_disable_##slot(struct _cpuid4_info *this_leaf, \
480 const char *buf, size_t count, \
483 return store_cache_disable(this_leaf, buf, count, slot); \
485 STORE_CACHE_DISABLE(0)
486 STORE_CACHE_DISABLE(1)
488 static struct _cache_attr cache_disable_0 = __ATTR(cache_disable_0, 0644,
489 show_cache_disable_0, store_cache_disable_0);
490 static struct _cache_attr cache_disable_1 = __ATTR(cache_disable_1, 0644,
491 show_cache_disable_1, store_cache_disable_1);
494 show_subcaches(struct _cpuid4_info *this_leaf, char *buf, unsigned int cpu)
496 if (!this_leaf->base.nb || !amd_nb_has_feature(AMD_NB_L3_PARTITIONING))
499 return sprintf(buf, "%x\n", amd_get_subcaches(cpu));
503 store_subcaches(struct _cpuid4_info *this_leaf, const char *buf, size_t count,
508 if (!capable(CAP_SYS_ADMIN))
511 if (!this_leaf->base.nb || !amd_nb_has_feature(AMD_NB_L3_PARTITIONING))
514 if (strict_strtoul(buf, 16, &val) < 0)
517 if (amd_set_subcaches(cpu, val))
523 static struct _cache_attr subcaches =
524 __ATTR(subcaches, 0644, show_subcaches, store_subcaches);
527 #define amd_init_l3_cache(x, y)
528 #endif /* CONFIG_AMD_NB && CONFIG_SYSFS */
531 __cpuinit cpuid4_cache_lookup_regs(int index,
532 struct _cpuid4_info_regs *this_leaf)
534 union _cpuid4_leaf_eax eax;
535 union _cpuid4_leaf_ebx ebx;
536 union _cpuid4_leaf_ecx ecx;
539 if (boot_cpu_data.x86_vendor == X86_VENDOR_AMD) {
541 cpuid_count(0x8000001d, index, &eax.full,
542 &ebx.full, &ecx.full, &edx);
544 amd_cpuid4(index, &eax, &ebx, &ecx);
545 amd_init_l3_cache(this_leaf, index);
547 cpuid_count(4, index, &eax.full, &ebx.full, &ecx.full, &edx);
550 if (eax.split.type == CACHE_TYPE_NULL)
551 return -EIO; /* better error ? */
553 this_leaf->eax = eax;
554 this_leaf->ebx = ebx;
555 this_leaf->ecx = ecx;
556 this_leaf->size = (ecx.split.number_of_sets + 1) *
557 (ebx.split.coherency_line_size + 1) *
558 (ebx.split.physical_line_partition + 1) *
559 (ebx.split.ways_of_associativity + 1);
563 static int __cpuinit find_num_cache_leaves(struct cpuinfo_x86 *c)
565 unsigned int eax, ebx, ecx, edx, op;
566 union _cpuid4_leaf_eax cache_eax;
569 if (c->x86_vendor == X86_VENDOR_AMD)
576 /* Do cpuid(op) loop to find out num_cache_leaves */
577 cpuid_count(op, i, &eax, &ebx, &ecx, &edx);
578 cache_eax.full = eax;
579 } while (cache_eax.split.type != CACHE_TYPE_NULL);
583 void __cpuinit init_amd_cacheinfo(struct cpuinfo_x86 *c)
586 if (cpu_has_topoext) {
587 num_cache_leaves = find_num_cache_leaves(c);
588 } else if (c->extended_cpuid_level >= 0x80000006) {
589 if (cpuid_edx(0x80000006) & 0xf000)
590 num_cache_leaves = 4;
592 num_cache_leaves = 3;
596 unsigned int __cpuinit init_intel_cacheinfo(struct cpuinfo_x86 *c)
599 unsigned int trace = 0, l1i = 0, l1d = 0, l2 = 0, l3 = 0;
600 unsigned int new_l1d = 0, new_l1i = 0; /* Cache sizes from cpuid(4) */
601 unsigned int new_l2 = 0, new_l3 = 0, i; /* Cache sizes from cpuid(4) */
602 unsigned int l2_id = 0, l3_id = 0, num_threads_sharing, index_msb;
604 unsigned int cpu = c->cpu_index;
607 if (c->cpuid_level > 3) {
608 static int is_initialized;
610 if (is_initialized == 0) {
611 /* Init num_cache_leaves from boot CPU */
612 num_cache_leaves = find_num_cache_leaves(c);
617 * Whenever possible use cpuid(4), deterministic cache
618 * parameters cpuid leaf to find the cache details
620 for (i = 0; i < num_cache_leaves; i++) {
621 struct _cpuid4_info_regs this_leaf = {};
624 retval = cpuid4_cache_lookup_regs(i, &this_leaf);
628 switch (this_leaf.eax.split.level) {
630 if (this_leaf.eax.split.type == CACHE_TYPE_DATA)
631 new_l1d = this_leaf.size/1024;
632 else if (this_leaf.eax.split.type == CACHE_TYPE_INST)
633 new_l1i = this_leaf.size/1024;
636 new_l2 = this_leaf.size/1024;
637 num_threads_sharing = 1 + this_leaf.eax.split.num_threads_sharing;
638 index_msb = get_count_order(num_threads_sharing);
639 l2_id = c->apicid & ~((1 << index_msb) - 1);
642 new_l3 = this_leaf.size/1024;
643 num_threads_sharing = 1 + this_leaf.eax.split.num_threads_sharing;
644 index_msb = get_count_order(num_threads_sharing);
645 l3_id = c->apicid & ~((1 << index_msb) - 1);
653 * Don't use cpuid2 if cpuid4 is supported. For P4, we use cpuid2 for
656 if ((num_cache_leaves == 0 || c->x86 == 15) && c->cpuid_level > 1) {
657 /* supports eax=2 call */
659 unsigned int regs[4];
660 unsigned char *dp = (unsigned char *)regs;
663 if (num_cache_leaves != 0 && c->x86 == 15)
666 /* Number of times to iterate */
667 n = cpuid_eax(2) & 0xFF;
669 for (i = 0 ; i < n ; i++) {
670 cpuid(2, ®s[0], ®s[1], ®s[2], ®s[3]);
672 /* If bit 31 is set, this is an unknown format */
673 for (j = 0 ; j < 3 ; j++)
674 if (regs[j] & (1 << 31))
677 /* Byte 0 is level count, not a descriptor */
678 for (j = 1 ; j < 16 ; j++) {
679 unsigned char des = dp[j];
682 /* look up this descriptor in the table */
683 while (cache_table[k].descriptor != 0) {
684 if (cache_table[k].descriptor == des) {
685 if (only_trace && cache_table[k].cache_type != LVL_TRACE)
687 switch (cache_table[k].cache_type) {
689 l1i += cache_table[k].size;
692 l1d += cache_table[k].size;
695 l2 += cache_table[k].size;
698 l3 += cache_table[k].size;
701 trace += cache_table[k].size;
723 per_cpu(cpu_llc_id, cpu) = l2_id;
730 per_cpu(cpu_llc_id, cpu) = l3_id;
734 c->x86_cache_size = l3 ? l3 : (l2 ? l2 : (l1i+l1d));
741 /* pointer to _cpuid4_info array (for each cache leaf) */
742 static DEFINE_PER_CPU(struct _cpuid4_info *, ici_cpuid4_info);
743 #define CPUID4_INFO_IDX(x, y) (&((per_cpu(ici_cpuid4_info, x))[y]))
747 static int __cpuinit cache_shared_amd_cpu_map_setup(unsigned int cpu, int index)
749 struct _cpuid4_info *this_leaf;
752 if (cpu_has_topoext) {
753 unsigned int apicid, nshared, first, last;
755 if (!per_cpu(ici_cpuid4_info, cpu))
758 this_leaf = CPUID4_INFO_IDX(cpu, index);
759 nshared = this_leaf->base.eax.split.num_threads_sharing + 1;
760 apicid = cpu_data(cpu).apicid;
761 first = apicid - (apicid % nshared);
762 last = first + nshared - 1;
764 for_each_online_cpu(i) {
765 apicid = cpu_data(i).apicid;
766 if ((apicid < first) || (apicid > last))
768 if (!per_cpu(ici_cpuid4_info, i))
770 this_leaf = CPUID4_INFO_IDX(i, index);
772 for_each_online_cpu(sibling) {
773 apicid = cpu_data(sibling).apicid;
774 if ((apicid < first) || (apicid > last))
776 set_bit(sibling, this_leaf->shared_cpu_map);
779 } else if (index == 3) {
780 for_each_cpu(i, cpu_llc_shared_mask(cpu)) {
781 if (!per_cpu(ici_cpuid4_info, i))
783 this_leaf = CPUID4_INFO_IDX(i, index);
784 for_each_cpu(sibling, cpu_llc_shared_mask(cpu)) {
785 if (!cpu_online(sibling))
787 set_bit(sibling, this_leaf->shared_cpu_map);
796 static void __cpuinit cache_shared_cpu_map_setup(unsigned int cpu, int index)
798 struct _cpuid4_info *this_leaf, *sibling_leaf;
799 unsigned long num_threads_sharing;
801 struct cpuinfo_x86 *c = &cpu_data(cpu);
803 if (c->x86_vendor == X86_VENDOR_AMD) {
804 if (cache_shared_amd_cpu_map_setup(cpu, index))
808 this_leaf = CPUID4_INFO_IDX(cpu, index);
809 num_threads_sharing = 1 + this_leaf->base.eax.split.num_threads_sharing;
811 if (num_threads_sharing == 1)
812 cpumask_set_cpu(cpu, to_cpumask(this_leaf->shared_cpu_map));
814 index_msb = get_count_order(num_threads_sharing);
816 for_each_online_cpu(i) {
817 if (cpu_data(i).apicid >> index_msb ==
818 c->apicid >> index_msb) {
820 to_cpumask(this_leaf->shared_cpu_map));
821 if (i != cpu && per_cpu(ici_cpuid4_info, i)) {
823 CPUID4_INFO_IDX(i, index);
824 cpumask_set_cpu(cpu, to_cpumask(
825 sibling_leaf->shared_cpu_map));
831 static void __cpuinit cache_remove_shared_cpu_map(unsigned int cpu, int index)
833 struct _cpuid4_info *this_leaf, *sibling_leaf;
836 this_leaf = CPUID4_INFO_IDX(cpu, index);
837 for_each_cpu(sibling, to_cpumask(this_leaf->shared_cpu_map)) {
838 sibling_leaf = CPUID4_INFO_IDX(sibling, index);
839 cpumask_clear_cpu(cpu,
840 to_cpumask(sibling_leaf->shared_cpu_map));
844 static void __cpuinit cache_shared_cpu_map_setup(unsigned int cpu, int index)
848 static void __cpuinit cache_remove_shared_cpu_map(unsigned int cpu, int index)
853 static void __cpuinit free_cache_attributes(unsigned int cpu)
857 for (i = 0; i < num_cache_leaves; i++)
858 cache_remove_shared_cpu_map(cpu, i);
860 kfree(per_cpu(ici_cpuid4_info, cpu));
861 per_cpu(ici_cpuid4_info, cpu) = NULL;
864 static void __cpuinit get_cpu_leaves(void *_retval)
866 int j, *retval = _retval, cpu = smp_processor_id();
868 /* Do cpuid and store the results */
869 for (j = 0; j < num_cache_leaves; j++) {
870 struct _cpuid4_info *this_leaf = CPUID4_INFO_IDX(cpu, j);
872 *retval = cpuid4_cache_lookup_regs(j, &this_leaf->base);
873 if (unlikely(*retval < 0)) {
876 for (i = 0; i < j; i++)
877 cache_remove_shared_cpu_map(cpu, i);
880 cache_shared_cpu_map_setup(cpu, j);
884 static int __cpuinit detect_cache_attributes(unsigned int cpu)
888 if (num_cache_leaves == 0)
891 per_cpu(ici_cpuid4_info, cpu) = kzalloc(
892 sizeof(struct _cpuid4_info) * num_cache_leaves, GFP_KERNEL);
893 if (per_cpu(ici_cpuid4_info, cpu) == NULL)
896 smp_call_function_single(cpu, get_cpu_leaves, &retval, true);
898 kfree(per_cpu(ici_cpuid4_info, cpu));
899 per_cpu(ici_cpuid4_info, cpu) = NULL;
905 #include <linux/kobject.h>
906 #include <linux/sysfs.h>
907 #include <linux/cpu.h>
909 /* pointer to kobject for cpuX/cache */
910 static DEFINE_PER_CPU(struct kobject *, ici_cache_kobject);
912 struct _index_kobject {
915 unsigned short index;
918 /* pointer to array of kobjects for cpuX/cache/indexY */
919 static DEFINE_PER_CPU(struct _index_kobject *, ici_index_kobject);
920 #define INDEX_KOBJECT_PTR(x, y) (&((per_cpu(ici_index_kobject, x))[y]))
922 #define show_one_plus(file_name, object, val) \
923 static ssize_t show_##file_name(struct _cpuid4_info *this_leaf, char *buf, \
926 return sprintf(buf, "%lu\n", (unsigned long)this_leaf->object + val); \
929 show_one_plus(level, base.eax.split.level, 0);
930 show_one_plus(coherency_line_size, base.ebx.split.coherency_line_size, 1);
931 show_one_plus(physical_line_partition, base.ebx.split.physical_line_partition, 1);
932 show_one_plus(ways_of_associativity, base.ebx.split.ways_of_associativity, 1);
933 show_one_plus(number_of_sets, base.ecx.split.number_of_sets, 1);
935 static ssize_t show_size(struct _cpuid4_info *this_leaf, char *buf,
938 return sprintf(buf, "%luK\n", this_leaf->base.size / 1024);
941 static ssize_t show_shared_cpu_map_func(struct _cpuid4_info *this_leaf,
944 ptrdiff_t len = PTR_ALIGN(buf + PAGE_SIZE - 1, PAGE_SIZE) - buf;
948 const struct cpumask *mask;
950 mask = to_cpumask(this_leaf->shared_cpu_map);
952 cpulist_scnprintf(buf, len-2, mask) :
953 cpumask_scnprintf(buf, len-2, mask);
960 static inline ssize_t show_shared_cpu_map(struct _cpuid4_info *leaf, char *buf,
963 return show_shared_cpu_map_func(leaf, 0, buf);
966 static inline ssize_t show_shared_cpu_list(struct _cpuid4_info *leaf, char *buf,
969 return show_shared_cpu_map_func(leaf, 1, buf);
972 static ssize_t show_type(struct _cpuid4_info *this_leaf, char *buf,
975 switch (this_leaf->base.eax.split.type) {
976 case CACHE_TYPE_DATA:
977 return sprintf(buf, "Data\n");
978 case CACHE_TYPE_INST:
979 return sprintf(buf, "Instruction\n");
980 case CACHE_TYPE_UNIFIED:
981 return sprintf(buf, "Unified\n");
983 return sprintf(buf, "Unknown\n");
987 #define to_object(k) container_of(k, struct _index_kobject, kobj)
988 #define to_attr(a) container_of(a, struct _cache_attr, attr)
990 #define define_one_ro(_name) \
991 static struct _cache_attr _name = \
992 __ATTR(_name, 0444, show_##_name, NULL)
994 define_one_ro(level);
996 define_one_ro(coherency_line_size);
997 define_one_ro(physical_line_partition);
998 define_one_ro(ways_of_associativity);
999 define_one_ro(number_of_sets);
1000 define_one_ro(size);
1001 define_one_ro(shared_cpu_map);
1002 define_one_ro(shared_cpu_list);
1004 static struct attribute *default_attrs[] = {
1007 &coherency_line_size.attr,
1008 &physical_line_partition.attr,
1009 &ways_of_associativity.attr,
1010 &number_of_sets.attr,
1012 &shared_cpu_map.attr,
1013 &shared_cpu_list.attr,
1017 #ifdef CONFIG_AMD_NB
1018 static struct attribute ** __cpuinit amd_l3_attrs(void)
1020 static struct attribute **attrs;
1026 n = ARRAY_SIZE(default_attrs);
1028 if (amd_nb_has_feature(AMD_NB_L3_INDEX_DISABLE))
1031 if (amd_nb_has_feature(AMD_NB_L3_PARTITIONING))
1034 attrs = kzalloc(n * sizeof (struct attribute *), GFP_KERNEL);
1036 return attrs = default_attrs;
1038 for (n = 0; default_attrs[n]; n++)
1039 attrs[n] = default_attrs[n];
1041 if (amd_nb_has_feature(AMD_NB_L3_INDEX_DISABLE)) {
1042 attrs[n++] = &cache_disable_0.attr;
1043 attrs[n++] = &cache_disable_1.attr;
1046 if (amd_nb_has_feature(AMD_NB_L3_PARTITIONING))
1047 attrs[n++] = &subcaches.attr;
1053 static ssize_t show(struct kobject *kobj, struct attribute *attr, char *buf)
1055 struct _cache_attr *fattr = to_attr(attr);
1056 struct _index_kobject *this_leaf = to_object(kobj);
1060 fattr->show(CPUID4_INFO_IDX(this_leaf->cpu, this_leaf->index),
1061 buf, this_leaf->cpu) :
1066 static ssize_t store(struct kobject *kobj, struct attribute *attr,
1067 const char *buf, size_t count)
1069 struct _cache_attr *fattr = to_attr(attr);
1070 struct _index_kobject *this_leaf = to_object(kobj);
1073 ret = fattr->store ?
1074 fattr->store(CPUID4_INFO_IDX(this_leaf->cpu, this_leaf->index),
1075 buf, count, this_leaf->cpu) :
1080 static const struct sysfs_ops sysfs_ops = {
1085 static struct kobj_type ktype_cache = {
1086 .sysfs_ops = &sysfs_ops,
1087 .default_attrs = default_attrs,
1090 static struct kobj_type ktype_percpu_entry = {
1091 .sysfs_ops = &sysfs_ops,
1094 static void __cpuinit cpuid4_cache_sysfs_exit(unsigned int cpu)
1096 kfree(per_cpu(ici_cache_kobject, cpu));
1097 kfree(per_cpu(ici_index_kobject, cpu));
1098 per_cpu(ici_cache_kobject, cpu) = NULL;
1099 per_cpu(ici_index_kobject, cpu) = NULL;
1100 free_cache_attributes(cpu);
1103 static int __cpuinit cpuid4_cache_sysfs_init(unsigned int cpu)
1107 if (num_cache_leaves == 0)
1110 err = detect_cache_attributes(cpu);
1114 /* Allocate all required memory */
1115 per_cpu(ici_cache_kobject, cpu) =
1116 kzalloc(sizeof(struct kobject), GFP_KERNEL);
1117 if (unlikely(per_cpu(ici_cache_kobject, cpu) == NULL))
1120 per_cpu(ici_index_kobject, cpu) = kzalloc(
1121 sizeof(struct _index_kobject) * num_cache_leaves, GFP_KERNEL);
1122 if (unlikely(per_cpu(ici_index_kobject, cpu) == NULL))
1128 cpuid4_cache_sysfs_exit(cpu);
1132 static DECLARE_BITMAP(cache_dev_map, NR_CPUS);
1134 /* Add/Remove cache interface for CPU device */
1135 static int __cpuinit cache_add_dev(struct device *dev)
1137 unsigned int cpu = dev->id;
1139 struct _index_kobject *this_object;
1140 struct _cpuid4_info *this_leaf;
1143 retval = cpuid4_cache_sysfs_init(cpu);
1144 if (unlikely(retval < 0))
1147 retval = kobject_init_and_add(per_cpu(ici_cache_kobject, cpu),
1148 &ktype_percpu_entry,
1149 &dev->kobj, "%s", "cache");
1151 cpuid4_cache_sysfs_exit(cpu);
1155 for (i = 0; i < num_cache_leaves; i++) {
1156 this_object = INDEX_KOBJECT_PTR(cpu, i);
1157 this_object->cpu = cpu;
1158 this_object->index = i;
1160 this_leaf = CPUID4_INFO_IDX(cpu, i);
1162 ktype_cache.default_attrs = default_attrs;
1163 #ifdef CONFIG_AMD_NB
1164 if (this_leaf->base.nb)
1165 ktype_cache.default_attrs = amd_l3_attrs();
1167 retval = kobject_init_and_add(&(this_object->kobj),
1169 per_cpu(ici_cache_kobject, cpu),
1171 if (unlikely(retval)) {
1172 for (j = 0; j < i; j++)
1173 kobject_put(&(INDEX_KOBJECT_PTR(cpu, j)->kobj));
1174 kobject_put(per_cpu(ici_cache_kobject, cpu));
1175 cpuid4_cache_sysfs_exit(cpu);
1178 kobject_uevent(&(this_object->kobj), KOBJ_ADD);
1180 cpumask_set_cpu(cpu, to_cpumask(cache_dev_map));
1182 kobject_uevent(per_cpu(ici_cache_kobject, cpu), KOBJ_ADD);
1186 static void __cpuinit cache_remove_dev(struct device *dev)
1188 unsigned int cpu = dev->id;
1191 if (per_cpu(ici_cpuid4_info, cpu) == NULL)
1193 if (!cpumask_test_cpu(cpu, to_cpumask(cache_dev_map)))
1195 cpumask_clear_cpu(cpu, to_cpumask(cache_dev_map));
1197 for (i = 0; i < num_cache_leaves; i++)
1198 kobject_put(&(INDEX_KOBJECT_PTR(cpu, i)->kobj));
1199 kobject_put(per_cpu(ici_cache_kobject, cpu));
1200 cpuid4_cache_sysfs_exit(cpu);
1203 static int __cpuinit cacheinfo_cpu_callback(struct notifier_block *nfb,
1204 unsigned long action, void *hcpu)
1206 unsigned int cpu = (unsigned long)hcpu;
1209 dev = get_cpu_device(cpu);
1212 case CPU_ONLINE_FROZEN:
1216 case CPU_DEAD_FROZEN:
1217 cache_remove_dev(dev);
1223 static struct notifier_block __cpuinitdata cacheinfo_cpu_notifier = {
1224 .notifier_call = cacheinfo_cpu_callback,
1227 static int __init cache_sysfs_init(void)
1231 if (num_cache_leaves == 0)
1234 for_each_online_cpu(i) {
1236 struct device *dev = get_cpu_device(i);
1238 err = cache_add_dev(dev);
1242 register_hotcpu_notifier(&cacheinfo_cpu_notifier);
1246 device_initcall(cache_sysfs_init);