2 * (c) 2005, 2006 Advanced Micro Devices, Inc.
3 * Your use of this code is subject to the terms and conditions of the
4 * GNU general public license version 2. See "COPYING" or
5 * http://www.gnu.org/licenses/gpl.html
7 * Written by Jacob Shin - AMD, Inc.
9 * Support : jacob.shin@amd.com
12 * - added support for AMD Family 0x10 processors
14 * All MC4_MISCi registers are shared between multi-cores
16 #include <linux/interrupt.h>
17 #include <linux/notifier.h>
18 #include <linux/kobject.h>
19 #include <linux/percpu.h>
20 #include <linux/sysdev.h>
21 #include <linux/errno.h>
22 #include <linux/sched.h>
23 #include <linux/sysfs.h>
24 #include <linux/slab.h>
25 #include <linux/init.h>
26 #include <linux/cpu.h>
27 #include <linux/smp.h>
34 #define PFX "mce_threshold: "
35 #define VERSION "version 1.1.1"
38 #define THRESHOLD_MAX 0xFFF
39 #define INT_TYPE_APIC 0x00020000
40 #define MASK_VALID_HI 0x80000000
41 #define MASK_CNTP_HI 0x40000000
42 #define MASK_LOCKED_HI 0x20000000
43 #define MASK_LVTOFF_HI 0x00F00000
44 #define MASK_COUNT_EN_HI 0x00080000
45 #define MASK_INT_TYPE_HI 0x00060000
46 #define MASK_OVERFLOW_HI 0x00010000
47 #define MASK_ERR_COUNT_HI 0x00000FFF
48 #define MASK_BLKPTR_LO 0xFF000000
49 #define MCG_XBLK_ADDR 0xC0000400
51 struct threshold_block {
59 struct list_head miscj;
62 /* defaults used early on boot */
63 static struct threshold_block threshold_defaults = {
64 .interrupt_enable = 0,
65 .threshold_limit = THRESHOLD_MAX,
68 struct threshold_bank {
70 struct threshold_block *blocks;
73 static DEFINE_PER_CPU(struct threshold_bank * [NR_BANKS], threshold_banks);
76 static unsigned char shared_bank[NR_BANKS] = {
81 static DEFINE_PER_CPU(unsigned char, bank_map); /* see which banks are on */
83 static void amd_threshold_interrupt(void);
89 struct thresh_restart {
90 struct threshold_block *b;
95 /* must be called with correct cpu affinity */
96 /* Called via smp_call_function_single() */
97 static void threshold_restart_bank(void *_tr)
99 struct thresh_restart *tr = _tr;
100 u32 mci_misc_hi, mci_misc_lo;
102 rdmsr(tr->b->address, mci_misc_lo, mci_misc_hi);
104 if (tr->b->threshold_limit < (mci_misc_hi & THRESHOLD_MAX))
105 tr->reset = 1; /* limit cannot be lower than err count */
107 if (tr->reset) { /* reset err count and overflow bit */
109 (mci_misc_hi & ~(MASK_ERR_COUNT_HI | MASK_OVERFLOW_HI)) |
110 (THRESHOLD_MAX - tr->b->threshold_limit);
111 } else if (tr->old_limit) { /* change limit w/o reset */
112 int new_count = (mci_misc_hi & THRESHOLD_MAX) +
113 (tr->old_limit - tr->b->threshold_limit);
115 mci_misc_hi = (mci_misc_hi & ~MASK_ERR_COUNT_HI) |
116 (new_count & THRESHOLD_MAX);
119 tr->b->interrupt_enable ?
120 (mci_misc_hi = (mci_misc_hi & ~MASK_INT_TYPE_HI) | INT_TYPE_APIC) :
121 (mci_misc_hi &= ~MASK_INT_TYPE_HI);
123 mci_misc_hi |= MASK_COUNT_EN_HI;
124 wrmsr(tr->b->address, mci_misc_lo, mci_misc_hi);
127 /* cpu init entry point, called from mce.c with preempt off */
128 void mce_amd_feature_init(struct cpuinfo_x86 *c)
130 unsigned int cpu = smp_processor_id();
131 u32 low = 0, high = 0, address = 0;
132 unsigned int bank, block;
133 struct thresh_restart tr;
136 for (bank = 0; bank < NR_BANKS; ++bank) {
137 for (block = 0; block < NR_BLOCKS; ++block) {
139 address = MSR_IA32_MC0_MISC + bank * 4;
140 else if (block == 1) {
141 address = (low & MASK_BLKPTR_LO) >> 21;
144 address += MCG_XBLK_ADDR;
148 if (rdmsr_safe(address, &low, &high))
151 if (!(high & MASK_VALID_HI)) {
158 if (!(high & MASK_CNTP_HI) ||
159 (high & MASK_LOCKED_HI))
163 per_cpu(bank_map, cpu) |= (1 << bank);
165 if (shared_bank[bank] && c->cpu_core_id)
168 lvt_off = setup_APIC_eilvt_mce(THRESHOLD_APIC_VECTOR,
169 APIC_EILVT_MSG_FIX, 0);
171 high &= ~MASK_LVTOFF_HI;
172 high |= lvt_off << 20;
173 wrmsr(address, low, high);
175 threshold_defaults.address = address;
176 tr.b = &threshold_defaults;
179 threshold_restart_bank(&tr);
181 mce_threshold_vector = amd_threshold_interrupt;
187 * APIC Interrupt Handler
191 * threshold interrupt handler will service THRESHOLD_APIC_VECTOR.
192 * the interrupt goes off when error_count reaches threshold_limit.
193 * the handler will simply log mcelog w/ software defined bank number.
195 static void amd_threshold_interrupt(void)
197 u32 low = 0, high = 0, address = 0;
198 unsigned int bank, block;
203 /* assume first bank caused it */
204 for (bank = 0; bank < NR_BANKS; ++bank) {
205 if (!(per_cpu(bank_map, m.cpu) & (1 << bank)))
207 for (block = 0; block < NR_BLOCKS; ++block) {
209 address = MSR_IA32_MC0_MISC + bank * 4;
210 } else if (block == 1) {
211 address = (low & MASK_BLKPTR_LO) >> 21;
214 address += MCG_XBLK_ADDR;
219 if (rdmsr_safe(address, &low, &high))
222 if (!(high & MASK_VALID_HI)) {
229 if (!(high & MASK_CNTP_HI) ||
230 (high & MASK_LOCKED_HI))
234 * Log the machine check that caused the threshold
237 machine_check_poll(MCP_TIMESTAMP,
238 &__get_cpu_var(mce_poll_banks));
240 if (high & MASK_OVERFLOW_HI) {
241 rdmsrl(address, m.misc);
242 rdmsrl(MSR_IA32_MC0_STATUS + bank * 4,
244 m.bank = K8_MCE_THRESHOLD_BASE
258 struct threshold_attr {
259 struct attribute attr;
260 ssize_t (*show) (struct threshold_block *, char *);
261 ssize_t (*store) (struct threshold_block *, const char *, size_t count);
264 #define SHOW_FIELDS(name) \
265 static ssize_t show_ ## name(struct threshold_block *b, char *buf) \
267 return sprintf(buf, "%lx\n", (unsigned long) b->name); \
269 SHOW_FIELDS(interrupt_enable)
270 SHOW_FIELDS(threshold_limit)
273 store_interrupt_enable(struct threshold_block *b, const char *buf, size_t size)
275 struct thresh_restart tr;
278 if (strict_strtoul(buf, 0, &new) < 0)
281 b->interrupt_enable = !!new;
287 smp_call_function_single(b->cpu, threshold_restart_bank, &tr, 1);
293 store_threshold_limit(struct threshold_block *b, const char *buf, size_t size)
295 struct thresh_restart tr;
298 if (strict_strtoul(buf, 0, &new) < 0)
301 if (new > THRESHOLD_MAX)
306 tr.old_limit = b->threshold_limit;
307 b->threshold_limit = new;
311 smp_call_function_single(b->cpu, threshold_restart_bank, &tr, 1);
316 struct threshold_block_cross_cpu {
317 struct threshold_block *tb;
321 static void local_error_count_handler(void *_tbcc)
323 struct threshold_block_cross_cpu *tbcc = _tbcc;
324 struct threshold_block *b = tbcc->tb;
327 rdmsr(b->address, low, high);
328 tbcc->retval = (high & 0xFFF) - (THRESHOLD_MAX - b->threshold_limit);
331 static ssize_t show_error_count(struct threshold_block *b, char *buf)
333 struct threshold_block_cross_cpu tbcc = { .tb = b, };
335 smp_call_function_single(b->cpu, local_error_count_handler, &tbcc, 1);
336 return sprintf(buf, "%lx\n", tbcc.retval);
339 static ssize_t store_error_count(struct threshold_block *b,
340 const char *buf, size_t count)
342 struct thresh_restart tr = { .b = b, .reset = 1, .old_limit = 0 };
344 smp_call_function_single(b->cpu, threshold_restart_bank, &tr, 1);
348 #define RW_ATTR(val) \
349 static struct threshold_attr val = { \
350 .attr = {.name = __stringify(val), .mode = 0644 }, \
351 .show = show_## val, \
352 .store = store_## val, \
355 RW_ATTR(interrupt_enable);
356 RW_ATTR(threshold_limit);
357 RW_ATTR(error_count);
359 static struct attribute *default_attrs[] = {
360 &interrupt_enable.attr,
361 &threshold_limit.attr,
366 #define to_block(k) container_of(k, struct threshold_block, kobj)
367 #define to_attr(a) container_of(a, struct threshold_attr, attr)
369 static ssize_t show(struct kobject *kobj, struct attribute *attr, char *buf)
371 struct threshold_block *b = to_block(kobj);
372 struct threshold_attr *a = to_attr(attr);
375 ret = a->show ? a->show(b, buf) : -EIO;
380 static ssize_t store(struct kobject *kobj, struct attribute *attr,
381 const char *buf, size_t count)
383 struct threshold_block *b = to_block(kobj);
384 struct threshold_attr *a = to_attr(attr);
387 ret = a->store ? a->store(b, buf, count) : -EIO;
392 static const struct sysfs_ops threshold_ops = {
397 static struct kobj_type threshold_ktype = {
398 .sysfs_ops = &threshold_ops,
399 .default_attrs = default_attrs,
402 static __cpuinit int allocate_threshold_blocks(unsigned int cpu,
407 struct threshold_block *b = NULL;
411 if ((bank >= NR_BANKS) || (block >= NR_BLOCKS))
414 if (rdmsr_safe_on_cpu(cpu, address, &low, &high))
417 if (!(high & MASK_VALID_HI)) {
424 if (!(high & MASK_CNTP_HI) ||
425 (high & MASK_LOCKED_HI))
428 b = kzalloc(sizeof(struct threshold_block), GFP_KERNEL);
435 b->address = address;
436 b->interrupt_enable = 0;
437 b->threshold_limit = THRESHOLD_MAX;
439 INIT_LIST_HEAD(&b->miscj);
441 if (per_cpu(threshold_banks, cpu)[bank]->blocks) {
443 &per_cpu(threshold_banks, cpu)[bank]->blocks->miscj);
445 per_cpu(threshold_banks, cpu)[bank]->blocks = b;
448 err = kobject_init_and_add(&b->kobj, &threshold_ktype,
449 per_cpu(threshold_banks, cpu)[bank]->kobj,
455 address = (low & MASK_BLKPTR_LO) >> 21;
458 address += MCG_XBLK_ADDR;
463 err = allocate_threshold_blocks(cpu, bank, ++block, address);
468 kobject_uevent(&b->kobj, KOBJ_ADD);
474 kobject_put(&b->kobj);
480 static __cpuinit long
481 local_allocate_threshold_blocks(int cpu, unsigned int bank)
483 return allocate_threshold_blocks(cpu, bank, 0,
484 MSR_IA32_MC0_MISC + bank * 4);
487 /* symlinks sibling shared banks to first core. first core owns dir/files. */
488 static __cpuinit int threshold_create_bank(unsigned int cpu, unsigned int bank)
491 struct threshold_bank *b = NULL;
494 struct cpuinfo_x86 *c = &cpu_data(cpu);
497 sprintf(name, "threshold_bank%i", bank);
500 if (cpu_data(cpu).cpu_core_id && shared_bank[bank]) { /* symlink */
501 i = cpumask_first(c->llc_shared_map);
503 /* first core not up yet */
504 if (cpu_data(i).cpu_core_id)
508 if (per_cpu(threshold_banks, cpu)[bank])
511 b = per_cpu(threshold_banks, i)[bank];
516 err = sysfs_create_link(&per_cpu(mce_dev, cpu).kobj,
521 cpumask_copy(b->cpus, c->llc_shared_map);
522 per_cpu(threshold_banks, cpu)[bank] = b;
528 b = kzalloc(sizeof(struct threshold_bank), GFP_KERNEL);
533 if (!alloc_cpumask_var(&b->cpus, GFP_KERNEL)) {
539 b->kobj = kobject_create_and_add(name, &per_cpu(mce_dev, cpu).kobj);
544 cpumask_setall(b->cpus);
546 cpumask_copy(b->cpus, c->llc_shared_map);
549 per_cpu(threshold_banks, cpu)[bank] = b;
551 err = local_allocate_threshold_blocks(cpu, bank);
555 for_each_cpu(i, b->cpus) {
559 err = sysfs_create_link(&per_cpu(mce_dev, i).kobj,
564 per_cpu(threshold_banks, i)[bank] = b;
570 per_cpu(threshold_banks, cpu)[bank] = NULL;
571 free_cpumask_var(b->cpus);
577 /* create dir/files for all valid threshold banks */
578 static __cpuinit int threshold_create_device(unsigned int cpu)
583 for (bank = 0; bank < NR_BANKS; ++bank) {
584 if (!(per_cpu(bank_map, cpu) & (1 << bank)))
586 err = threshold_create_bank(cpu, bank);
595 * let's be hotplug friendly.
596 * in case of multiple core processors, the first core always takes ownership
597 * of shared sysfs dir/files, and rest of the cores will be symlinked to it.
600 static void deallocate_threshold_block(unsigned int cpu,
603 struct threshold_block *pos = NULL;
604 struct threshold_block *tmp = NULL;
605 struct threshold_bank *head = per_cpu(threshold_banks, cpu)[bank];
610 list_for_each_entry_safe(pos, tmp, &head->blocks->miscj, miscj) {
611 kobject_put(&pos->kobj);
612 list_del(&pos->miscj);
616 kfree(per_cpu(threshold_banks, cpu)[bank]->blocks);
617 per_cpu(threshold_banks, cpu)[bank]->blocks = NULL;
620 static void threshold_remove_bank(unsigned int cpu, int bank)
622 struct threshold_bank *b;
626 b = per_cpu(threshold_banks, cpu)[bank];
632 sprintf(name, "threshold_bank%i", bank);
635 /* sibling symlink */
636 if (shared_bank[bank] && b->blocks->cpu != cpu) {
637 sysfs_remove_link(&per_cpu(mce_dev, cpu).kobj, name);
638 per_cpu(threshold_banks, cpu)[bank] = NULL;
644 /* remove all sibling symlinks before unregistering */
645 for_each_cpu(i, b->cpus) {
649 sysfs_remove_link(&per_cpu(mce_dev, i).kobj, name);
650 per_cpu(threshold_banks, i)[bank] = NULL;
653 deallocate_threshold_block(cpu, bank);
656 kobject_del(b->kobj);
657 kobject_put(b->kobj);
658 free_cpumask_var(b->cpus);
660 per_cpu(threshold_banks, cpu)[bank] = NULL;
663 static void threshold_remove_device(unsigned int cpu)
667 for (bank = 0; bank < NR_BANKS; ++bank) {
668 if (!(per_cpu(bank_map, cpu) & (1 << bank)))
670 threshold_remove_bank(cpu, bank);
674 /* get notified when a cpu comes on/off */
675 static void __cpuinit
676 amd_64_threshold_cpu_callback(unsigned long action, unsigned int cpu)
680 case CPU_ONLINE_FROZEN:
681 threshold_create_device(cpu);
684 case CPU_DEAD_FROZEN:
685 threshold_remove_device(cpu);
692 static __init int threshold_init_device(void)
696 /* to hit CPUs online before the notifier is up */
697 for_each_online_cpu(lcpu) {
698 int err = threshold_create_device(lcpu);
703 threshold_cpu_callback = amd_64_threshold_cpu_callback;
707 device_initcall(threshold_init_device);