1 #include "amd64_edac.h"
2 #include <asm/amd_nb.h>
4 static struct edac_pci_ctl_info *amd64_ctl_pci;
6 static int report_gart_errors;
7 module_param(report_gart_errors, int, 0644);
10 * Set by command line parameter. If BIOS has enabled the ECC, this override is
11 * cleared to prevent re-enabling the hardware by this driver.
13 static int ecc_enable_override;
14 module_param(ecc_enable_override, int, 0644);
16 static struct msr __percpu *msrs;
19 * count successfully initialized driver instances for setup_pci_device()
21 static atomic_t drv_instances = ATOMIC_INIT(0);
23 /* Per-node driver instances */
24 static struct mem_ctl_info **mcis;
25 static struct ecc_settings **ecc_stngs;
28 * Valid scrub rates for the K8 hardware memory scrubber. We map the scrubbing
29 * bandwidth to a valid bit pattern. The 'set' operation finds the 'matching-
32 *FIXME: Produce a better mapping/linearisation.
35 u32 scrubval; /* bit pattern for scrub rate */
36 u32 bandwidth; /* bandwidth consumed (bytes/sec) */
38 { 0x01, 1600000000UL},
60 { 0x00, 0UL}, /* scrubbing off */
63 static int __amd64_read_pci_cfg_dword(struct pci_dev *pdev, int offset,
64 u32 *val, const char *func)
68 err = pci_read_config_dword(pdev, offset, val);
70 amd64_warn("%s: error reading F%dx%03x.\n",
71 func, PCI_FUNC(pdev->devfn), offset);
76 int __amd64_write_pci_cfg_dword(struct pci_dev *pdev, int offset,
77 u32 val, const char *func)
81 err = pci_write_config_dword(pdev, offset, val);
83 amd64_warn("%s: error writing to F%dx%03x.\n",
84 func, PCI_FUNC(pdev->devfn), offset);
91 * Depending on the family, F2 DCT reads need special handling:
93 * K8: has a single DCT only
95 * F10h: each DCT has its own set of regs
99 * F15h: we select which DCT we access using F1x10C[DctCfgSel]
102 static int k8_read_dct_pci_cfg(struct amd64_pvt *pvt, int addr, u32 *val,
108 return __amd64_read_pci_cfg_dword(pvt->F2, addr, val, func);
111 static int f10_read_dct_pci_cfg(struct amd64_pvt *pvt, int addr, u32 *val,
114 return __amd64_read_pci_cfg_dword(pvt->F2, addr, val, func);
118 * Select DCT to which PCI cfg accesses are routed
120 static void f15h_select_dct(struct amd64_pvt *pvt, u8 dct)
124 amd64_read_pci_cfg(pvt->F1, DCT_CFG_SEL, ®);
127 amd64_write_pci_cfg(pvt->F1, DCT_CFG_SEL, reg);
130 static int f15_read_dct_pci_cfg(struct amd64_pvt *pvt, int addr, u32 *val,
135 if (addr >= 0x140 && addr <= 0x1a0) {
140 f15h_select_dct(pvt, dct);
142 return __amd64_read_pci_cfg_dword(pvt->F2, addr, val, func);
146 * Memory scrubber control interface. For K8, memory scrubbing is handled by
147 * hardware and can involve L2 cache, dcache as well as the main memory. With
148 * F10, this is extended to L3 cache scrubbing on CPU models sporting that
151 * This causes the "units" for the scrubbing speed to vary from 64 byte blocks
152 * (dram) over to cache lines. This is nasty, so we will use bandwidth in
153 * bytes/sec for the setting.
155 * Currently, we only do dram scrubbing. If the scrubbing is done in software on
156 * other archs, we might not have access to the caches directly.
160 * scan the scrub rate mapping table for a close or matching bandwidth value to
161 * issue. If requested is too big, then use last maximum value found.
163 static int __amd64_set_scrub_rate(struct pci_dev *ctl, u32 new_bw, u32 min_rate)
169 * map the configured rate (new_bw) to a value specific to the AMD64
170 * memory controller and apply to register. Search for the first
171 * bandwidth entry that is greater or equal than the setting requested
172 * and program that. If at last entry, turn off DRAM scrubbing.
174 for (i = 0; i < ARRAY_SIZE(scrubrates); i++) {
176 * skip scrub rates which aren't recommended
177 * (see F10 BKDG, F3x58)
179 if (scrubrates[i].scrubval < min_rate)
182 if (scrubrates[i].bandwidth <= new_bw)
186 * if no suitable bandwidth found, turn off DRAM scrubbing
187 * entirely by falling back to the last element in the
192 scrubval = scrubrates[i].scrubval;
194 pci_write_bits32(ctl, SCRCTRL, scrubval, 0x001F);
197 return scrubrates[i].bandwidth;
202 static int amd64_set_scrub_rate(struct mem_ctl_info *mci, u32 bw)
204 struct amd64_pvt *pvt = mci->pvt_info;
205 u32 min_scrubrate = 0x5;
207 if (boot_cpu_data.x86 == 0xf)
210 /* F15h Erratum #505 */
211 if (boot_cpu_data.x86 == 0x15)
212 f15h_select_dct(pvt, 0);
214 return __amd64_set_scrub_rate(pvt->F3, bw, min_scrubrate);
217 static int amd64_get_scrub_rate(struct mem_ctl_info *mci)
219 struct amd64_pvt *pvt = mci->pvt_info;
221 int i, retval = -EINVAL;
223 /* F15h Erratum #505 */
224 if (boot_cpu_data.x86 == 0x15)
225 f15h_select_dct(pvt, 0);
227 amd64_read_pci_cfg(pvt->F3, SCRCTRL, &scrubval);
229 scrubval = scrubval & 0x001F;
231 for (i = 0; i < ARRAY_SIZE(scrubrates); i++) {
232 if (scrubrates[i].scrubval == scrubval) {
233 retval = scrubrates[i].bandwidth;
241 * returns true if the SysAddr given by sys_addr matches the
242 * DRAM base/limit associated with node_id
244 static bool amd64_base_limit_match(struct amd64_pvt *pvt, u64 sys_addr,
249 /* The K8 treats this as a 40-bit value. However, bits 63-40 will be
250 * all ones if the most significant implemented address bit is 1.
251 * Here we discard bits 63-40. See section 3.4.2 of AMD publication
252 * 24592: AMD x86-64 Architecture Programmer's Manual Volume 1
253 * Application Programming.
255 addr = sys_addr & 0x000000ffffffffffull;
257 return ((addr >= get_dram_base(pvt, nid)) &&
258 (addr <= get_dram_limit(pvt, nid)));
262 * Attempt to map a SysAddr to a node. On success, return a pointer to the
263 * mem_ctl_info structure for the node that the SysAddr maps to.
265 * On failure, return NULL.
267 static struct mem_ctl_info *find_mc_by_sys_addr(struct mem_ctl_info *mci,
270 struct amd64_pvt *pvt;
275 * Here we use the DRAM Base (section 3.4.4.1) and DRAM Limit (section
276 * 3.4.4.2) registers to map the SysAddr to a node ID.
281 * The value of this field should be the same for all DRAM Base
282 * registers. Therefore we arbitrarily choose to read it from the
283 * register for node 0.
285 intlv_en = dram_intlv_en(pvt, 0);
288 for (node_id = 0; node_id < DRAM_RANGES; node_id++) {
289 if (amd64_base_limit_match(pvt, sys_addr, node_id))
295 if (unlikely((intlv_en != 0x01) &&
296 (intlv_en != 0x03) &&
297 (intlv_en != 0x07))) {
298 amd64_warn("DRAM Base[IntlvEn] junk value: 0x%x, BIOS bug?\n", intlv_en);
302 bits = (((u32) sys_addr) >> 12) & intlv_en;
304 for (node_id = 0; ; ) {
305 if ((dram_intlv_sel(pvt, node_id) & intlv_en) == bits)
306 break; /* intlv_sel field matches */
308 if (++node_id >= DRAM_RANGES)
312 /* sanity test for sys_addr */
313 if (unlikely(!amd64_base_limit_match(pvt, sys_addr, node_id))) {
314 amd64_warn("%s: sys_addr 0x%llx falls outside base/limit address"
315 "range for node %d with node interleaving enabled.\n",
316 __func__, sys_addr, node_id);
321 return edac_mc_find((int)node_id);
324 debugf2("sys_addr 0x%lx doesn't match any node\n",
325 (unsigned long)sys_addr);
331 * compute the CS base address of the @csrow on the DRAM controller @dct.
332 * For details see F2x[5C:40] in the processor's BKDG
334 static void get_cs_base_and_mask(struct amd64_pvt *pvt, int csrow, u8 dct,
335 u64 *base, u64 *mask)
337 u64 csbase, csmask, base_bits, mask_bits;
340 if (boot_cpu_data.x86 == 0xf && pvt->ext_model < K8_REV_F) {
341 csbase = pvt->csels[dct].csbases[csrow];
342 csmask = pvt->csels[dct].csmasks[csrow];
343 base_bits = GENMASK(21, 31) | GENMASK(9, 15);
344 mask_bits = GENMASK(21, 29) | GENMASK(9, 15);
347 csbase = pvt->csels[dct].csbases[csrow];
348 csmask = pvt->csels[dct].csmasks[csrow >> 1];
351 if (boot_cpu_data.x86 == 0x15)
352 base_bits = mask_bits = GENMASK(19,30) | GENMASK(5,13);
354 base_bits = mask_bits = GENMASK(19,28) | GENMASK(5,13);
357 *base = (csbase & base_bits) << addr_shift;
360 /* poke holes for the csmask */
361 *mask &= ~(mask_bits << addr_shift);
363 *mask |= (csmask & mask_bits) << addr_shift;
366 #define for_each_chip_select(i, dct, pvt) \
367 for (i = 0; i < pvt->csels[dct].b_cnt; i++)
369 #define chip_select_base(i, dct, pvt) \
370 pvt->csels[dct].csbases[i]
372 #define for_each_chip_select_mask(i, dct, pvt) \
373 for (i = 0; i < pvt->csels[dct].m_cnt; i++)
376 * @input_addr is an InputAddr associated with the node given by mci. Return the
377 * csrow that input_addr maps to, or -1 on failure (no csrow claims input_addr).
379 static int input_addr_to_csrow(struct mem_ctl_info *mci, u64 input_addr)
381 struct amd64_pvt *pvt;
387 for_each_chip_select(csrow, 0, pvt) {
388 if (!csrow_enabled(csrow, 0, pvt))
391 get_cs_base_and_mask(pvt, csrow, 0, &base, &mask);
395 if ((input_addr & mask) == (base & mask)) {
396 debugf2("InputAddr 0x%lx matches csrow %d (node %d)\n",
397 (unsigned long)input_addr, csrow,
403 debugf2("no matching csrow for InputAddr 0x%lx (MC node %d)\n",
404 (unsigned long)input_addr, pvt->mc_node_id);
410 * Obtain info from the DRAM Hole Address Register (section 3.4.8, pub #26094)
411 * for the node represented by mci. Info is passed back in *hole_base,
412 * *hole_offset, and *hole_size. Function returns 0 if info is valid or 1 if
413 * info is invalid. Info may be invalid for either of the following reasons:
415 * - The revision of the node is not E or greater. In this case, the DRAM Hole
416 * Address Register does not exist.
418 * - The DramHoleValid bit is cleared in the DRAM Hole Address Register,
419 * indicating that its contents are not valid.
421 * The values passed back in *hole_base, *hole_offset, and *hole_size are
422 * complete 32-bit values despite the fact that the bitfields in the DHAR
423 * only represent bits 31-24 of the base and offset values.
425 int amd64_get_dram_hole_info(struct mem_ctl_info *mci, u64 *hole_base,
426 u64 *hole_offset, u64 *hole_size)
428 struct amd64_pvt *pvt = mci->pvt_info;
431 /* only revE and later have the DRAM Hole Address Register */
432 if (boot_cpu_data.x86 == 0xf && pvt->ext_model < K8_REV_E) {
433 debugf1(" revision %d for node %d does not support DHAR\n",
434 pvt->ext_model, pvt->mc_node_id);
438 /* valid for Fam10h and above */
439 if (boot_cpu_data.x86 >= 0x10 && !dhar_mem_hoist_valid(pvt)) {
440 debugf1(" Dram Memory Hoisting is DISABLED on this system\n");
444 if (!dhar_valid(pvt)) {
445 debugf1(" Dram Memory Hoisting is DISABLED on this node %d\n",
450 /* This node has Memory Hoisting */
452 /* +------------------+--------------------+--------------------+-----
453 * | memory | DRAM hole | relocated |
454 * | [0, (x - 1)] | [x, 0xffffffff] | addresses from |
456 * | | | [0x100000000, |
457 * | | | (0x100000000+ |
458 * | | | (0xffffffff-x))] |
459 * +------------------+--------------------+--------------------+-----
461 * Above is a diagram of physical memory showing the DRAM hole and the
462 * relocated addresses from the DRAM hole. As shown, the DRAM hole
463 * starts at address x (the base address) and extends through address
464 * 0xffffffff. The DRAM Hole Address Register (DHAR) relocates the
465 * addresses in the hole so that they start at 0x100000000.
468 base = dhar_base(pvt);
471 *hole_size = (0x1ull << 32) - base;
473 if (boot_cpu_data.x86 > 0xf)
474 *hole_offset = f10_dhar_offset(pvt);
476 *hole_offset = k8_dhar_offset(pvt);
478 debugf1(" DHAR info for node %d base 0x%lx offset 0x%lx size 0x%lx\n",
479 pvt->mc_node_id, (unsigned long)*hole_base,
480 (unsigned long)*hole_offset, (unsigned long)*hole_size);
484 EXPORT_SYMBOL_GPL(amd64_get_dram_hole_info);
487 * Return the DramAddr that the SysAddr given by @sys_addr maps to. It is
488 * assumed that sys_addr maps to the node given by mci.
490 * The first part of section 3.4.4 (p. 70) shows how the DRAM Base (section
491 * 3.4.4.1) and DRAM Limit (section 3.4.4.2) registers are used to translate a
492 * SysAddr to a DramAddr. If the DRAM Hole Address Register (DHAR) is enabled,
493 * then it is also involved in translating a SysAddr to a DramAddr. Sections
494 * 3.4.8 and 3.5.8.2 describe the DHAR and how it is used for memory hoisting.
495 * These parts of the documentation are unclear. I interpret them as follows:
497 * When node n receives a SysAddr, it processes the SysAddr as follows:
499 * 1. It extracts the DRAMBase and DRAMLimit values from the DRAM Base and DRAM
500 * Limit registers for node n. If the SysAddr is not within the range
501 * specified by the base and limit values, then node n ignores the Sysaddr
502 * (since it does not map to node n). Otherwise continue to step 2 below.
504 * 2. If the DramHoleValid bit of the DHAR for node n is clear, the DHAR is
505 * disabled so skip to step 3 below. Otherwise see if the SysAddr is within
506 * the range of relocated addresses (starting at 0x100000000) from the DRAM
507 * hole. If not, skip to step 3 below. Else get the value of the
508 * DramHoleOffset field from the DHAR. To obtain the DramAddr, subtract the
509 * offset defined by this value from the SysAddr.
511 * 3. Obtain the base address for node n from the DRAMBase field of the DRAM
512 * Base register for node n. To obtain the DramAddr, subtract the base
513 * address from the SysAddr, as shown near the start of section 3.4.4 (p.70).
515 static u64 sys_addr_to_dram_addr(struct mem_ctl_info *mci, u64 sys_addr)
517 struct amd64_pvt *pvt = mci->pvt_info;
518 u64 dram_base, hole_base, hole_offset, hole_size, dram_addr;
521 dram_base = get_dram_base(pvt, pvt->mc_node_id);
523 ret = amd64_get_dram_hole_info(mci, &hole_base, &hole_offset,
526 if ((sys_addr >= (1ull << 32)) &&
527 (sys_addr < ((1ull << 32) + hole_size))) {
528 /* use DHAR to translate SysAddr to DramAddr */
529 dram_addr = sys_addr - hole_offset;
531 debugf2("using DHAR to translate SysAddr 0x%lx to "
533 (unsigned long)sys_addr,
534 (unsigned long)dram_addr);
541 * Translate the SysAddr to a DramAddr as shown near the start of
542 * section 3.4.4 (p. 70). Although sys_addr is a 64-bit value, the k8
543 * only deals with 40-bit values. Therefore we discard bits 63-40 of
544 * sys_addr below. If bit 39 of sys_addr is 1 then the bits we
545 * discard are all 1s. Otherwise the bits we discard are all 0s. See
546 * section 3.4.2 of AMD publication 24592: AMD x86-64 Architecture
547 * Programmer's Manual Volume 1 Application Programming.
549 dram_addr = (sys_addr & GENMASK(0, 39)) - dram_base;
551 debugf2("using DRAM Base register to translate SysAddr 0x%lx to "
552 "DramAddr 0x%lx\n", (unsigned long)sys_addr,
553 (unsigned long)dram_addr);
558 * @intlv_en is the value of the IntlvEn field from a DRAM Base register
559 * (section 3.4.4.1). Return the number of bits from a SysAddr that are used
560 * for node interleaving.
562 static int num_node_interleave_bits(unsigned intlv_en)
564 static const int intlv_shift_table[] = { 0, 1, 0, 2, 0, 0, 0, 3 };
567 BUG_ON(intlv_en > 7);
568 n = intlv_shift_table[intlv_en];
572 /* Translate the DramAddr given by @dram_addr to an InputAddr. */
573 static u64 dram_addr_to_input_addr(struct mem_ctl_info *mci, u64 dram_addr)
575 struct amd64_pvt *pvt;
582 * See the start of section 3.4.4 (p. 70, BKDG #26094, K8, revA-E)
583 * concerning translating a DramAddr to an InputAddr.
585 intlv_shift = num_node_interleave_bits(dram_intlv_en(pvt, 0));
586 input_addr = ((dram_addr >> intlv_shift) & GENMASK(12, 35)) +
589 debugf2(" Intlv Shift=%d DramAddr=0x%lx maps to InputAddr=0x%lx\n",
590 intlv_shift, (unsigned long)dram_addr,
591 (unsigned long)input_addr);
597 * Translate the SysAddr represented by @sys_addr to an InputAddr. It is
598 * assumed that @sys_addr maps to the node given by mci.
600 static u64 sys_addr_to_input_addr(struct mem_ctl_info *mci, u64 sys_addr)
605 dram_addr_to_input_addr(mci, sys_addr_to_dram_addr(mci, sys_addr));
607 debugf2("SysAdddr 0x%lx translates to InputAddr 0x%lx\n",
608 (unsigned long)sys_addr, (unsigned long)input_addr);
615 * @input_addr is an InputAddr associated with the node represented by mci.
616 * Translate @input_addr to a DramAddr and return the result.
618 static u64 input_addr_to_dram_addr(struct mem_ctl_info *mci, u64 input_addr)
620 struct amd64_pvt *pvt;
621 unsigned node_id, intlv_shift;
626 * Near the start of section 3.4.4 (p. 70, BKDG #26094, K8, revA-E)
627 * shows how to translate a DramAddr to an InputAddr. Here we reverse
628 * this procedure. When translating from a DramAddr to an InputAddr, the
629 * bits used for node interleaving are discarded. Here we recover these
630 * bits from the IntlvSel field of the DRAM Limit register (section
631 * 3.4.4.2) for the node that input_addr is associated with.
634 node_id = pvt->mc_node_id;
638 intlv_shift = num_node_interleave_bits(dram_intlv_en(pvt, 0));
639 if (intlv_shift == 0) {
640 debugf1(" InputAddr 0x%lx translates to DramAddr of "
641 "same value\n", (unsigned long)input_addr);
646 bits = ((input_addr & GENMASK(12, 35)) << intlv_shift) +
647 (input_addr & 0xfff);
649 intlv_sel = dram_intlv_sel(pvt, node_id) & ((1 << intlv_shift) - 1);
650 dram_addr = bits + (intlv_sel << 12);
652 debugf1("InputAddr 0x%lx translates to DramAddr 0x%lx "
653 "(%d node interleave bits)\n", (unsigned long)input_addr,
654 (unsigned long)dram_addr, intlv_shift);
660 * @dram_addr is a DramAddr that maps to the node represented by mci. Convert
661 * @dram_addr to a SysAddr.
663 static u64 dram_addr_to_sys_addr(struct mem_ctl_info *mci, u64 dram_addr)
665 struct amd64_pvt *pvt = mci->pvt_info;
666 u64 hole_base, hole_offset, hole_size, base, sys_addr;
669 ret = amd64_get_dram_hole_info(mci, &hole_base, &hole_offset,
672 if ((dram_addr >= hole_base) &&
673 (dram_addr < (hole_base + hole_size))) {
674 sys_addr = dram_addr + hole_offset;
676 debugf1("using DHAR to translate DramAddr 0x%lx to "
677 "SysAddr 0x%lx\n", (unsigned long)dram_addr,
678 (unsigned long)sys_addr);
684 base = get_dram_base(pvt, pvt->mc_node_id);
685 sys_addr = dram_addr + base;
688 * The sys_addr we have computed up to this point is a 40-bit value
689 * because the k8 deals with 40-bit values. However, the value we are
690 * supposed to return is a full 64-bit physical address. The AMD
691 * x86-64 architecture specifies that the most significant implemented
692 * address bit through bit 63 of a physical address must be either all
693 * 0s or all 1s. Therefore we sign-extend the 40-bit sys_addr to a
694 * 64-bit value below. See section 3.4.2 of AMD publication 24592:
695 * AMD x86-64 Architecture Programmer's Manual Volume 1 Application
698 sys_addr |= ~((sys_addr & (1ull << 39)) - 1);
700 debugf1(" Node %d, DramAddr 0x%lx to SysAddr 0x%lx\n",
701 pvt->mc_node_id, (unsigned long)dram_addr,
702 (unsigned long)sys_addr);
708 * @input_addr is an InputAddr associated with the node given by mci. Translate
709 * @input_addr to a SysAddr.
711 static inline u64 input_addr_to_sys_addr(struct mem_ctl_info *mci,
714 return dram_addr_to_sys_addr(mci,
715 input_addr_to_dram_addr(mci, input_addr));
718 /* Map the Error address to a PAGE and PAGE OFFSET. */
719 static inline void error_address_to_page_and_offset(u64 error_address,
720 u32 *page, u32 *offset)
722 *page = (u32) (error_address >> PAGE_SHIFT);
723 *offset = ((u32) error_address) & ~PAGE_MASK;
727 * @sys_addr is an error address (a SysAddr) extracted from the MCA NB Address
728 * Low (section 3.6.4.5) and MCA NB Address High (section 3.6.4.6) registers
729 * of a node that detected an ECC memory error. mci represents the node that
730 * the error address maps to (possibly different from the node that detected
731 * the error). Return the number of the csrow that sys_addr maps to, or -1 on
734 static int sys_addr_to_csrow(struct mem_ctl_info *mci, u64 sys_addr)
738 csrow = input_addr_to_csrow(mci, sys_addr_to_input_addr(mci, sys_addr));
741 amd64_mc_err(mci, "Failed to translate InputAddr to csrow for "
742 "address 0x%lx\n", (unsigned long)sys_addr);
746 static int get_channel_from_ecc_syndrome(struct mem_ctl_info *, u16);
749 * Determine if the DIMMs have ECC enabled. ECC is enabled ONLY if all the DIMMs
752 static unsigned long amd64_determine_edac_cap(struct amd64_pvt *pvt)
755 unsigned long edac_cap = EDAC_FLAG_NONE;
757 bit = (boot_cpu_data.x86 > 0xf || pvt->ext_model >= K8_REV_F)
761 if (pvt->dclr0 & BIT(bit))
762 edac_cap = EDAC_FLAG_SECDED;
767 static void amd64_debug_display_dimm_sizes(struct amd64_pvt *, u8);
769 static void amd64_dump_dramcfg_low(u32 dclr, int chan)
771 debugf1("F2x%d90 (DRAM Cfg Low): 0x%08x\n", chan, dclr);
773 debugf1(" DIMM type: %sbuffered; all DIMMs support ECC: %s\n",
774 (dclr & BIT(16)) ? "un" : "",
775 (dclr & BIT(19)) ? "yes" : "no");
777 debugf1(" PAR/ERR parity: %s\n",
778 (dclr & BIT(8)) ? "enabled" : "disabled");
780 if (boot_cpu_data.x86 == 0x10)
781 debugf1(" DCT 128bit mode width: %s\n",
782 (dclr & BIT(11)) ? "128b" : "64b");
784 debugf1(" x4 logical DIMMs present: L0: %s L1: %s L2: %s L3: %s\n",
785 (dclr & BIT(12)) ? "yes" : "no",
786 (dclr & BIT(13)) ? "yes" : "no",
787 (dclr & BIT(14)) ? "yes" : "no",
788 (dclr & BIT(15)) ? "yes" : "no");
791 /* Display and decode various NB registers for debug purposes. */
792 static void dump_misc_regs(struct amd64_pvt *pvt)
794 debugf1("F3xE8 (NB Cap): 0x%08x\n", pvt->nbcap);
796 debugf1(" NB two channel DRAM capable: %s\n",
797 (pvt->nbcap & NBCAP_DCT_DUAL) ? "yes" : "no");
799 debugf1(" ECC capable: %s, ChipKill ECC capable: %s\n",
800 (pvt->nbcap & NBCAP_SECDED) ? "yes" : "no",
801 (pvt->nbcap & NBCAP_CHIPKILL) ? "yes" : "no");
803 amd64_dump_dramcfg_low(pvt->dclr0, 0);
805 debugf1("F3xB0 (Online Spare): 0x%08x\n", pvt->online_spare);
807 debugf1("F1xF0 (DRAM Hole Address): 0x%08x, base: 0x%08x, "
809 pvt->dhar, dhar_base(pvt),
810 (boot_cpu_data.x86 == 0xf) ? k8_dhar_offset(pvt)
811 : f10_dhar_offset(pvt));
813 debugf1(" DramHoleValid: %s\n", dhar_valid(pvt) ? "yes" : "no");
815 amd64_debug_display_dimm_sizes(pvt, 0);
817 /* everything below this point is Fam10h and above */
818 if (boot_cpu_data.x86 == 0xf)
821 amd64_debug_display_dimm_sizes(pvt, 1);
823 amd64_info("using %s syndromes.\n", ((pvt->ecc_sym_sz == 8) ? "x8" : "x4"));
825 /* Only if NOT ganged does dclr1 have valid info */
826 if (!dct_ganging_enabled(pvt))
827 amd64_dump_dramcfg_low(pvt->dclr1, 1);
831 * see BKDG, F2x[1,0][5C:40], F2[1,0][6C:60]
833 static void prep_chip_selects(struct amd64_pvt *pvt)
835 if (boot_cpu_data.x86 == 0xf && pvt->ext_model < K8_REV_F) {
836 pvt->csels[0].b_cnt = pvt->csels[1].b_cnt = 8;
837 pvt->csels[0].m_cnt = pvt->csels[1].m_cnt = 8;
839 pvt->csels[0].b_cnt = pvt->csels[1].b_cnt = 8;
840 pvt->csels[0].m_cnt = pvt->csels[1].m_cnt = 4;
845 * Function 2 Offset F10_DCSB0; read in the DCS Base and DCS Mask registers
847 static void read_dct_base_mask(struct amd64_pvt *pvt)
851 prep_chip_selects(pvt);
853 for_each_chip_select(cs, 0, pvt) {
854 int reg0 = DCSB0 + (cs * 4);
855 int reg1 = DCSB1 + (cs * 4);
856 u32 *base0 = &pvt->csels[0].csbases[cs];
857 u32 *base1 = &pvt->csels[1].csbases[cs];
859 if (!amd64_read_dct_pci_cfg(pvt, reg0, base0))
860 debugf0(" DCSB0[%d]=0x%08x reg: F2x%x\n",
863 if (boot_cpu_data.x86 == 0xf || dct_ganging_enabled(pvt))
866 if (!amd64_read_dct_pci_cfg(pvt, reg1, base1))
867 debugf0(" DCSB1[%d]=0x%08x reg: F2x%x\n",
871 for_each_chip_select_mask(cs, 0, pvt) {
872 int reg0 = DCSM0 + (cs * 4);
873 int reg1 = DCSM1 + (cs * 4);
874 u32 *mask0 = &pvt->csels[0].csmasks[cs];
875 u32 *mask1 = &pvt->csels[1].csmasks[cs];
877 if (!amd64_read_dct_pci_cfg(pvt, reg0, mask0))
878 debugf0(" DCSM0[%d]=0x%08x reg: F2x%x\n",
881 if (boot_cpu_data.x86 == 0xf || dct_ganging_enabled(pvt))
884 if (!amd64_read_dct_pci_cfg(pvt, reg1, mask1))
885 debugf0(" DCSM1[%d]=0x%08x reg: F2x%x\n",
890 static enum mem_type amd64_determine_memory_type(struct amd64_pvt *pvt, int cs)
894 /* F15h supports only DDR3 */
895 if (boot_cpu_data.x86 >= 0x15)
896 type = (pvt->dclr0 & BIT(16)) ? MEM_DDR3 : MEM_RDDR3;
897 else if (boot_cpu_data.x86 == 0x10 || pvt->ext_model >= K8_REV_F) {
898 if (pvt->dchr0 & DDR3_MODE)
899 type = (pvt->dclr0 & BIT(16)) ? MEM_DDR3 : MEM_RDDR3;
901 type = (pvt->dclr0 & BIT(16)) ? MEM_DDR2 : MEM_RDDR2;
903 type = (pvt->dclr0 & BIT(18)) ? MEM_DDR : MEM_RDDR;
906 amd64_info("CS%d: %s\n", cs, edac_mem_types[type]);
911 /* Get the number of DCT channels the memory controller is using. */
912 static int k8_early_channel_count(struct amd64_pvt *pvt)
916 if (pvt->ext_model >= K8_REV_F)
917 /* RevF (NPT) and later */
918 flag = pvt->dclr0 & WIDTH_128;
920 /* RevE and earlier */
921 flag = pvt->dclr0 & REVE_WIDTH_128;
926 return (flag) ? 2 : 1;
929 /* On F10h and later ErrAddr is MC4_ADDR[47:1] */
930 static u64 get_error_address(struct mce *m)
932 struct cpuinfo_x86 *c = &boot_cpu_data;
942 addr = m->addr & GENMASK(start_bit, end_bit);
945 * Erratum 637 workaround
947 if (c->x86 == 0x15) {
948 struct amd64_pvt *pvt;
949 u64 cc6_base, tmp_addr;
951 u8 mce_nid, intlv_en;
953 if ((addr & GENMASK(24, 47)) >> 24 != 0x00fdf7)
956 mce_nid = amd_get_nb_id(m->extcpu);
957 pvt = mcis[mce_nid]->pvt_info;
959 amd64_read_pci_cfg(pvt->F1, DRAM_LOCAL_NODE_LIM, &tmp);
960 intlv_en = tmp >> 21 & 0x7;
962 /* add [47:27] + 3 trailing bits */
963 cc6_base = (tmp & GENMASK(0, 20)) << 3;
965 /* reverse and add DramIntlvEn */
966 cc6_base |= intlv_en ^ 0x7;
972 return cc6_base | (addr & GENMASK(0, 23));
974 amd64_read_pci_cfg(pvt->F1, DRAM_LOCAL_NODE_BASE, &tmp);
977 tmp_addr = (addr & GENMASK(12, 23)) << __fls(intlv_en + 1);
979 /* OR DramIntlvSel into bits [14:12] */
980 tmp_addr |= (tmp & GENMASK(21, 23)) >> 9;
982 /* add remaining [11:0] bits from original MC4_ADDR */
983 tmp_addr |= addr & GENMASK(0, 11);
985 return cc6_base | tmp_addr;
991 static void read_dram_base_limit_regs(struct amd64_pvt *pvt, unsigned range)
993 struct cpuinfo_x86 *c = &boot_cpu_data;
994 int off = range << 3;
996 amd64_read_pci_cfg(pvt->F1, DRAM_BASE_LO + off, &pvt->ranges[range].base.lo);
997 amd64_read_pci_cfg(pvt->F1, DRAM_LIMIT_LO + off, &pvt->ranges[range].lim.lo);
1002 if (!dram_rw(pvt, range))
1005 amd64_read_pci_cfg(pvt->F1, DRAM_BASE_HI + off, &pvt->ranges[range].base.hi);
1006 amd64_read_pci_cfg(pvt->F1, DRAM_LIMIT_HI + off, &pvt->ranges[range].lim.hi);
1008 /* Factor in CC6 save area by reading dst node's limit reg */
1009 if (c->x86 == 0x15) {
1010 struct pci_dev *f1 = NULL;
1011 u8 nid = dram_dst_node(pvt, range);
1014 f1 = pci_get_domain_bus_and_slot(0, 0, PCI_DEVFN(0x18 + nid, 1));
1018 amd64_read_pci_cfg(f1, DRAM_LOCAL_NODE_LIM, &llim);
1020 pvt->ranges[range].lim.lo &= GENMASK(0, 15);
1022 /* {[39:27],111b} */
1023 pvt->ranges[range].lim.lo |= ((llim & 0x1fff) << 3 | 0x7) << 16;
1025 pvt->ranges[range].lim.hi &= GENMASK(0, 7);
1028 pvt->ranges[range].lim.hi |= llim >> 13;
1034 static void k8_map_sysaddr_to_csrow(struct mem_ctl_info *mci, u64 sys_addr,
1037 struct mem_ctl_info *src_mci;
1038 struct amd64_pvt *pvt = mci->pvt_info;
1042 error_address_to_page_and_offset(sys_addr, &page, &offset);
1045 * Find out which node the error address belongs to. This may be
1046 * different from the node that detected the error.
1048 src_mci = find_mc_by_sys_addr(mci, sys_addr);
1050 amd64_mc_err(mci, "failed to map error addr 0x%lx to a node\n",
1051 (unsigned long)sys_addr);
1052 edac_mc_handle_error(HW_EVENT_ERR_CORRECTED, mci,
1053 page, offset, syndrome,
1056 "failed to map error addr to a node",
1061 /* Now map the sys_addr to a CSROW */
1062 csrow = sys_addr_to_csrow(src_mci, sys_addr);
1064 edac_mc_handle_error(HW_EVENT_ERR_CORRECTED, mci,
1065 page, offset, syndrome,
1068 "failed to map error addr to a csrow",
1073 /* CHIPKILL enabled */
1074 if (pvt->nbcfg & NBCFG_CHIPKILL) {
1075 channel = get_channel_from_ecc_syndrome(mci, syndrome);
1078 * Syndrome didn't map, so we don't know which of the
1079 * 2 DIMMs is in error. So we need to ID 'both' of them
1082 amd64_mc_warn(src_mci, "unknown syndrome 0x%04x - "
1083 "possible error reporting race\n",
1085 edac_mc_handle_error(HW_EVENT_ERR_CORRECTED, mci,
1086 page, offset, syndrome,
1089 "unknown syndrome - possible error reporting race",
1095 * non-chipkill ecc mode
1097 * The k8 documentation is unclear about how to determine the
1098 * channel number when using non-chipkill memory. This method
1099 * was obtained from email communication with someone at AMD.
1100 * (Wish the email was placed in this comment - norsk)
1102 channel = ((sys_addr & BIT(3)) != 0);
1105 edac_mc_handle_error(HW_EVENT_ERR_CORRECTED, src_mci,
1106 page, offset, syndrome,
1108 EDAC_MOD_STR, "", NULL);
1111 static int ddr2_cs_size(unsigned i, bool dct_width)
1117 else if (!(i & 0x1))
1120 shift = (i + 1) >> 1;
1122 return 128 << (shift + !!dct_width);
1125 static int k8_dbam_to_chip_select(struct amd64_pvt *pvt, u8 dct,
1128 u32 dclr = dct ? pvt->dclr1 : pvt->dclr0;
1130 if (pvt->ext_model >= K8_REV_F) {
1131 WARN_ON(cs_mode > 11);
1132 return ddr2_cs_size(cs_mode, dclr & WIDTH_128);
1134 else if (pvt->ext_model >= K8_REV_D) {
1136 WARN_ON(cs_mode > 10);
1139 * the below calculation, besides trying to win an obfuscated C
1140 * contest, maps cs_mode values to DIMM chip select sizes. The
1143 * cs_mode CS size (mb)
1144 * ======= ============
1157 * Basically, it calculates a value with which to shift the
1158 * smallest CS size of 32MB.
1160 * ddr[23]_cs_size have a similar purpose.
1162 diff = cs_mode/3 + (unsigned)(cs_mode > 5);
1164 return 32 << (cs_mode - diff);
1167 WARN_ON(cs_mode > 6);
1168 return 32 << cs_mode;
1173 * Get the number of DCT channels in use.
1176 * number of Memory Channels in operation
1178 * contents of the DCL0_LOW register
1180 static int f1x_early_channel_count(struct amd64_pvt *pvt)
1182 int i, j, channels = 0;
1184 /* On F10h, if we are in 128 bit mode, then we are using 2 channels */
1185 if (boot_cpu_data.x86 == 0x10 && (pvt->dclr0 & WIDTH_128))
1189 * Need to check if in unganged mode: In such, there are 2 channels,
1190 * but they are not in 128 bit mode and thus the above 'dclr0' status
1193 * Need to check DCT0[0] and DCT1[0] to see if only one of them has
1194 * their CSEnable bit on. If so, then SINGLE DIMM case.
1196 debugf0("Data width is not 128 bits - need more decoding\n");
1199 * Check DRAM Bank Address Mapping values for each DIMM to see if there
1200 * is more than just one DIMM present in unganged mode. Need to check
1201 * both controllers since DIMMs can be placed in either one.
1203 for (i = 0; i < 2; i++) {
1204 u32 dbam = (i ? pvt->dbam1 : pvt->dbam0);
1206 for (j = 0; j < 4; j++) {
1207 if (DBAM_DIMM(j, dbam) > 0) {
1217 amd64_info("MCT channel count: %d\n", channels);
1222 static int ddr3_cs_size(unsigned i, bool dct_width)
1227 if (i == 0 || i == 3 || i == 4)
1233 else if (!(i & 0x1))
1236 shift = (i + 1) >> 1;
1239 cs_size = (128 * (1 << !!dct_width)) << shift;
1244 static int f10_dbam_to_chip_select(struct amd64_pvt *pvt, u8 dct,
1247 u32 dclr = dct ? pvt->dclr1 : pvt->dclr0;
1249 WARN_ON(cs_mode > 11);
1251 if (pvt->dchr0 & DDR3_MODE || pvt->dchr1 & DDR3_MODE)
1252 return ddr3_cs_size(cs_mode, dclr & WIDTH_128);
1254 return ddr2_cs_size(cs_mode, dclr & WIDTH_128);
1258 * F15h supports only 64bit DCT interfaces
1260 static int f15_dbam_to_chip_select(struct amd64_pvt *pvt, u8 dct,
1263 WARN_ON(cs_mode > 12);
1265 return ddr3_cs_size(cs_mode, false);
1268 static void read_dram_ctl_register(struct amd64_pvt *pvt)
1271 if (boot_cpu_data.x86 == 0xf)
1274 if (!amd64_read_dct_pci_cfg(pvt, DCT_SEL_LO, &pvt->dct_sel_lo)) {
1275 debugf0("F2x110 (DCTSelLow): 0x%08x, High range addrs at: 0x%x\n",
1276 pvt->dct_sel_lo, dct_sel_baseaddr(pvt));
1278 debugf0(" DCTs operate in %s mode.\n",
1279 (dct_ganging_enabled(pvt) ? "ganged" : "unganged"));
1281 if (!dct_ganging_enabled(pvt))
1282 debugf0(" Address range split per DCT: %s\n",
1283 (dct_high_range_enabled(pvt) ? "yes" : "no"));
1285 debugf0(" data interleave for ECC: %s, "
1286 "DRAM cleared since last warm reset: %s\n",
1287 (dct_data_intlv_enabled(pvt) ? "enabled" : "disabled"),
1288 (dct_memory_cleared(pvt) ? "yes" : "no"));
1290 debugf0(" channel interleave: %s, "
1291 "interleave bits selector: 0x%x\n",
1292 (dct_interleave_enabled(pvt) ? "enabled" : "disabled"),
1293 dct_sel_interleave_addr(pvt));
1296 amd64_read_dct_pci_cfg(pvt, DCT_SEL_HI, &pvt->dct_sel_hi);
1300 * Determine channel (DCT) based on the interleaving mode: F10h BKDG, 2.8.9 Memory
1301 * Interleaving Modes.
1303 static u8 f1x_determine_channel(struct amd64_pvt *pvt, u64 sys_addr,
1304 bool hi_range_sel, u8 intlv_en)
1306 u8 dct_sel_high = (pvt->dct_sel_lo >> 1) & 1;
1308 if (dct_ganging_enabled(pvt))
1312 return dct_sel_high;
1315 * see F2x110[DctSelIntLvAddr] - channel interleave mode
1317 if (dct_interleave_enabled(pvt)) {
1318 u8 intlv_addr = dct_sel_interleave_addr(pvt);
1320 /* return DCT select function: 0=DCT0, 1=DCT1 */
1322 return sys_addr >> 6 & 1;
1324 if (intlv_addr & 0x2) {
1325 u8 shift = intlv_addr & 0x1 ? 9 : 6;
1326 u32 temp = hweight_long((u32) ((sys_addr >> 16) & 0x1F)) % 2;
1328 return ((sys_addr >> shift) & 1) ^ temp;
1331 return (sys_addr >> (12 + hweight8(intlv_en))) & 1;
1334 if (dct_high_range_enabled(pvt))
1335 return ~dct_sel_high & 1;
1340 /* Convert the sys_addr to the normalized DCT address */
1341 static u64 f1x_get_norm_dct_addr(struct amd64_pvt *pvt, unsigned range,
1342 u64 sys_addr, bool hi_rng,
1343 u32 dct_sel_base_addr)
1346 u64 dram_base = get_dram_base(pvt, range);
1347 u64 hole_off = f10_dhar_offset(pvt);
1348 u64 dct_sel_base_off = (pvt->dct_sel_hi & 0xFFFFFC00) << 16;
1353 * base address of high range is below 4Gb
1354 * (bits [47:27] at [31:11])
1355 * DRAM address space on this DCT is hoisted above 4Gb &&
1358 * remove hole offset from sys_addr
1360 * remove high range offset from sys_addr
1362 if ((!(dct_sel_base_addr >> 16) ||
1363 dct_sel_base_addr < dhar_base(pvt)) &&
1365 (sys_addr >= BIT_64(32)))
1366 chan_off = hole_off;
1368 chan_off = dct_sel_base_off;
1372 * we have a valid hole &&
1377 * remove dram base to normalize to DCT address
1379 if (dhar_valid(pvt) && (sys_addr >= BIT_64(32)))
1380 chan_off = hole_off;
1382 chan_off = dram_base;
1385 return (sys_addr & GENMASK(6,47)) - (chan_off & GENMASK(23,47));
1389 * checks if the csrow passed in is marked as SPARED, if so returns the new
1392 static int f10_process_possible_spare(struct amd64_pvt *pvt, u8 dct, int csrow)
1396 if (online_spare_swap_done(pvt, dct) &&
1397 csrow == online_spare_bad_dramcs(pvt, dct)) {
1399 for_each_chip_select(tmp_cs, dct, pvt) {
1400 if (chip_select_base(tmp_cs, dct, pvt) & 0x2) {
1410 * Iterate over the DRAM DCT "base" and "mask" registers looking for a
1411 * SystemAddr match on the specified 'ChannelSelect' and 'NodeID'
1414 * -EINVAL: NOT FOUND
1415 * 0..csrow = Chip-Select Row
1417 static int f1x_lookup_addr_in_dct(u64 in_addr, u32 nid, u8 dct)
1419 struct mem_ctl_info *mci;
1420 struct amd64_pvt *pvt;
1421 u64 cs_base, cs_mask;
1422 int cs_found = -EINVAL;
1429 pvt = mci->pvt_info;
1431 debugf1("input addr: 0x%llx, DCT: %d\n", in_addr, dct);
1433 for_each_chip_select(csrow, dct, pvt) {
1434 if (!csrow_enabled(csrow, dct, pvt))
1437 get_cs_base_and_mask(pvt, csrow, dct, &cs_base, &cs_mask);
1439 debugf1(" CSROW=%d CSBase=0x%llx CSMask=0x%llx\n",
1440 csrow, cs_base, cs_mask);
1444 debugf1(" (InputAddr & ~CSMask)=0x%llx "
1445 "(CSBase & ~CSMask)=0x%llx\n",
1446 (in_addr & cs_mask), (cs_base & cs_mask));
1448 if ((in_addr & cs_mask) == (cs_base & cs_mask)) {
1449 cs_found = f10_process_possible_spare(pvt, dct, csrow);
1451 debugf1(" MATCH csrow=%d\n", cs_found);
1459 * See F2x10C. Non-interleaved graphics framebuffer memory under the 16G is
1460 * swapped with a region located at the bottom of memory so that the GPU can use
1461 * the interleaved region and thus two channels.
1463 static u64 f1x_swap_interleaved_region(struct amd64_pvt *pvt, u64 sys_addr)
1465 u32 swap_reg, swap_base, swap_limit, rgn_size, tmp_addr;
1467 if (boot_cpu_data.x86 == 0x10) {
1468 /* only revC3 and revE have that feature */
1469 if (boot_cpu_data.x86_model < 4 ||
1470 (boot_cpu_data.x86_model < 0xa &&
1471 boot_cpu_data.x86_mask < 3))
1475 amd64_read_dct_pci_cfg(pvt, SWAP_INTLV_REG, &swap_reg);
1477 if (!(swap_reg & 0x1))
1480 swap_base = (swap_reg >> 3) & 0x7f;
1481 swap_limit = (swap_reg >> 11) & 0x7f;
1482 rgn_size = (swap_reg >> 20) & 0x7f;
1483 tmp_addr = sys_addr >> 27;
1485 if (!(sys_addr >> 34) &&
1486 (((tmp_addr >= swap_base) &&
1487 (tmp_addr <= swap_limit)) ||
1488 (tmp_addr < rgn_size)))
1489 return sys_addr ^ (u64)swap_base << 27;
1494 /* For a given @dram_range, check if @sys_addr falls within it. */
1495 static int f1x_match_to_this_node(struct amd64_pvt *pvt, unsigned range,
1496 u64 sys_addr, int *nid, int *chan_sel)
1498 int cs_found = -EINVAL;
1502 bool high_range = false;
1504 u8 node_id = dram_dst_node(pvt, range);
1505 u8 intlv_en = dram_intlv_en(pvt, range);
1506 u32 intlv_sel = dram_intlv_sel(pvt, range);
1508 debugf1("(range %d) SystemAddr= 0x%llx Limit=0x%llx\n",
1509 range, sys_addr, get_dram_limit(pvt, range));
1511 if (dhar_valid(pvt) &&
1512 dhar_base(pvt) <= sys_addr &&
1513 sys_addr < BIT_64(32)) {
1514 amd64_warn("Huh? Address is in the MMIO hole: 0x%016llx\n",
1519 if (intlv_en && (intlv_sel != ((sys_addr >> 12) & intlv_en)))
1522 sys_addr = f1x_swap_interleaved_region(pvt, sys_addr);
1524 dct_sel_base = dct_sel_baseaddr(pvt);
1527 * check whether addresses >= DctSelBaseAddr[47:27] are to be used to
1528 * select between DCT0 and DCT1.
1530 if (dct_high_range_enabled(pvt) &&
1531 !dct_ganging_enabled(pvt) &&
1532 ((sys_addr >> 27) >= (dct_sel_base >> 11)))
1535 channel = f1x_determine_channel(pvt, sys_addr, high_range, intlv_en);
1537 chan_addr = f1x_get_norm_dct_addr(pvt, range, sys_addr,
1538 high_range, dct_sel_base);
1540 /* Remove node interleaving, see F1x120 */
1542 chan_addr = ((chan_addr >> (12 + hweight8(intlv_en))) << 12) |
1543 (chan_addr & 0xfff);
1545 /* remove channel interleave */
1546 if (dct_interleave_enabled(pvt) &&
1547 !dct_high_range_enabled(pvt) &&
1548 !dct_ganging_enabled(pvt)) {
1550 if (dct_sel_interleave_addr(pvt) != 1) {
1551 if (dct_sel_interleave_addr(pvt) == 0x3)
1553 chan_addr = ((chan_addr >> 10) << 9) |
1554 (chan_addr & 0x1ff);
1556 /* A[6] or hash 6 */
1557 chan_addr = ((chan_addr >> 7) << 6) |
1561 chan_addr = ((chan_addr >> 13) << 12) |
1562 (chan_addr & 0xfff);
1565 debugf1(" Normalized DCT addr: 0x%llx\n", chan_addr);
1567 cs_found = f1x_lookup_addr_in_dct(chan_addr, node_id, channel);
1569 if (cs_found >= 0) {
1571 *chan_sel = channel;
1576 static int f1x_translate_sysaddr_to_cs(struct amd64_pvt *pvt, u64 sys_addr,
1577 int *node, int *chan_sel)
1579 int cs_found = -EINVAL;
1582 for (range = 0; range < DRAM_RANGES; range++) {
1584 if (!dram_rw(pvt, range))
1587 if ((get_dram_base(pvt, range) <= sys_addr) &&
1588 (get_dram_limit(pvt, range) >= sys_addr)) {
1590 cs_found = f1x_match_to_this_node(pvt, range,
1601 * For reference see "2.8.5 Routing DRAM Requests" in F10 BKDG. This code maps
1602 * a @sys_addr to NodeID, DCT (channel) and chip select (CSROW).
1604 * The @sys_addr is usually an error address received from the hardware
1607 static void f1x_map_sysaddr_to_csrow(struct mem_ctl_info *mci, u64 sys_addr,
1610 struct amd64_pvt *pvt = mci->pvt_info;
1612 int nid, csrow, chan = 0;
1614 error_address_to_page_and_offset(sys_addr, &page, &offset);
1616 csrow = f1x_translate_sysaddr_to_cs(pvt, sys_addr, &nid, &chan);
1619 edac_mc_handle_error(HW_EVENT_ERR_CORRECTED, mci,
1620 page, offset, syndrome,
1623 "failed to map error addr to a csrow",
1629 * We need the syndromes for channel detection only when we're
1630 * ganged. Otherwise @chan should already contain the channel at
1633 if (dct_ganging_enabled(pvt))
1634 chan = get_channel_from_ecc_syndrome(mci, syndrome);
1636 edac_mc_handle_error(HW_EVENT_ERR_CORRECTED, mci,
1637 page, offset, syndrome,
1639 EDAC_MOD_STR, "", NULL);
1643 * debug routine to display the memory sizes of all logical DIMMs and its
1646 static void amd64_debug_display_dimm_sizes(struct amd64_pvt *pvt, u8 ctrl)
1648 int dimm, size0, size1, factor = 0;
1649 u32 *dcsb = ctrl ? pvt->csels[1].csbases : pvt->csels[0].csbases;
1650 u32 dbam = ctrl ? pvt->dbam1 : pvt->dbam0;
1652 if (boot_cpu_data.x86 == 0xf) {
1653 if (pvt->dclr0 & WIDTH_128)
1656 /* K8 families < revF not supported yet */
1657 if (pvt->ext_model < K8_REV_F)
1663 dbam = (ctrl && !dct_ganging_enabled(pvt)) ? pvt->dbam1 : pvt->dbam0;
1664 dcsb = (ctrl && !dct_ganging_enabled(pvt)) ? pvt->csels[1].csbases
1665 : pvt->csels[0].csbases;
1667 debugf1("F2x%d80 (DRAM Bank Address Mapping): 0x%08x\n", ctrl, dbam);
1669 edac_printk(KERN_DEBUG, EDAC_MC, "DCT%d chip selects:\n", ctrl);
1671 /* Dump memory sizes for DIMM and its CSROWs */
1672 for (dimm = 0; dimm < 4; dimm++) {
1675 if (dcsb[dimm*2] & DCSB_CS_ENABLE)
1676 size0 = pvt->ops->dbam_to_cs(pvt, ctrl,
1677 DBAM_DIMM(dimm, dbam));
1680 if (dcsb[dimm*2 + 1] & DCSB_CS_ENABLE)
1681 size1 = pvt->ops->dbam_to_cs(pvt, ctrl,
1682 DBAM_DIMM(dimm, dbam));
1684 amd64_info(EDAC_MC ": %d: %5dMB %d: %5dMB\n",
1685 dimm * 2, size0 << factor,
1686 dimm * 2 + 1, size1 << factor);
1690 static struct amd64_family_type amd64_family_types[] = {
1693 .f1_id = PCI_DEVICE_ID_AMD_K8_NB_ADDRMAP,
1694 .f3_id = PCI_DEVICE_ID_AMD_K8_NB_MISC,
1696 .early_channel_count = k8_early_channel_count,
1697 .map_sysaddr_to_csrow = k8_map_sysaddr_to_csrow,
1698 .dbam_to_cs = k8_dbam_to_chip_select,
1699 .read_dct_pci_cfg = k8_read_dct_pci_cfg,
1704 .f1_id = PCI_DEVICE_ID_AMD_10H_NB_MAP,
1705 .f3_id = PCI_DEVICE_ID_AMD_10H_NB_MISC,
1707 .early_channel_count = f1x_early_channel_count,
1708 .map_sysaddr_to_csrow = f1x_map_sysaddr_to_csrow,
1709 .dbam_to_cs = f10_dbam_to_chip_select,
1710 .read_dct_pci_cfg = f10_read_dct_pci_cfg,
1715 .f1_id = PCI_DEVICE_ID_AMD_15H_NB_F1,
1716 .f3_id = PCI_DEVICE_ID_AMD_15H_NB_F3,
1718 .early_channel_count = f1x_early_channel_count,
1719 .map_sysaddr_to_csrow = f1x_map_sysaddr_to_csrow,
1720 .dbam_to_cs = f15_dbam_to_chip_select,
1721 .read_dct_pci_cfg = f15_read_dct_pci_cfg,
1726 static struct pci_dev *pci_get_related_function(unsigned int vendor,
1727 unsigned int device,
1728 struct pci_dev *related)
1730 struct pci_dev *dev = NULL;
1732 dev = pci_get_device(vendor, device, dev);
1734 if ((dev->bus->number == related->bus->number) &&
1735 (PCI_SLOT(dev->devfn) == PCI_SLOT(related->devfn)))
1737 dev = pci_get_device(vendor, device, dev);
1744 * These are tables of eigenvectors (one per line) which can be used for the
1745 * construction of the syndrome tables. The modified syndrome search algorithm
1746 * uses those to find the symbol in error and thus the DIMM.
1748 * Algorithm courtesy of Ross LaFetra from AMD.
1750 static u16 x4_vectors[] = {
1751 0x2f57, 0x1afe, 0x66cc, 0xdd88,
1752 0x11eb, 0x3396, 0x7f4c, 0xeac8,
1753 0x0001, 0x0002, 0x0004, 0x0008,
1754 0x1013, 0x3032, 0x4044, 0x8088,
1755 0x106b, 0x30d6, 0x70fc, 0xe0a8,
1756 0x4857, 0xc4fe, 0x13cc, 0x3288,
1757 0x1ac5, 0x2f4a, 0x5394, 0xa1e8,
1758 0x1f39, 0x251e, 0xbd6c, 0x6bd8,
1759 0x15c1, 0x2a42, 0x89ac, 0x4758,
1760 0x2b03, 0x1602, 0x4f0c, 0xca08,
1761 0x1f07, 0x3a0e, 0x6b04, 0xbd08,
1762 0x8ba7, 0x465e, 0x244c, 0x1cc8,
1763 0x2b87, 0x164e, 0x642c, 0xdc18,
1764 0x40b9, 0x80de, 0x1094, 0x20e8,
1765 0x27db, 0x1eb6, 0x9dac, 0x7b58,
1766 0x11c1, 0x2242, 0x84ac, 0x4c58,
1767 0x1be5, 0x2d7a, 0x5e34, 0xa718,
1768 0x4b39, 0x8d1e, 0x14b4, 0x28d8,
1769 0x4c97, 0xc87e, 0x11fc, 0x33a8,
1770 0x8e97, 0x497e, 0x2ffc, 0x1aa8,
1771 0x16b3, 0x3d62, 0x4f34, 0x8518,
1772 0x1e2f, 0x391a, 0x5cac, 0xf858,
1773 0x1d9f, 0x3b7a, 0x572c, 0xfe18,
1774 0x15f5, 0x2a5a, 0x5264, 0xa3b8,
1775 0x1dbb, 0x3b66, 0x715c, 0xe3f8,
1776 0x4397, 0xc27e, 0x17fc, 0x3ea8,
1777 0x1617, 0x3d3e, 0x6464, 0xb8b8,
1778 0x23ff, 0x12aa, 0xab6c, 0x56d8,
1779 0x2dfb, 0x1ba6, 0x913c, 0x7328,
1780 0x185d, 0x2ca6, 0x7914, 0x9e28,
1781 0x171b, 0x3e36, 0x7d7c, 0xebe8,
1782 0x4199, 0x82ee, 0x19f4, 0x2e58,
1783 0x4807, 0xc40e, 0x130c, 0x3208,
1784 0x1905, 0x2e0a, 0x5804, 0xac08,
1785 0x213f, 0x132a, 0xadfc, 0x5ba8,
1786 0x19a9, 0x2efe, 0xb5cc, 0x6f88,
1789 static u16 x8_vectors[] = {
1790 0x0145, 0x028a, 0x2374, 0x43c8, 0xa1f0, 0x0520, 0x0a40, 0x1480,
1791 0x0211, 0x0422, 0x0844, 0x1088, 0x01b0, 0x44e0, 0x23c0, 0xed80,
1792 0x1011, 0x0116, 0x022c, 0x0458, 0x08b0, 0x8c60, 0x2740, 0x4e80,
1793 0x0411, 0x0822, 0x1044, 0x0158, 0x02b0, 0x2360, 0x46c0, 0xab80,
1794 0x0811, 0x1022, 0x012c, 0x0258, 0x04b0, 0x4660, 0x8cc0, 0x2780,
1795 0x2071, 0x40e2, 0xa0c4, 0x0108, 0x0210, 0x0420, 0x0840, 0x1080,
1796 0x4071, 0x80e2, 0x0104, 0x0208, 0x0410, 0x0820, 0x1040, 0x2080,
1797 0x8071, 0x0102, 0x0204, 0x0408, 0x0810, 0x1020, 0x2040, 0x4080,
1798 0x019d, 0x03d6, 0x136c, 0x2198, 0x50b0, 0xb2e0, 0x0740, 0x0e80,
1799 0x0189, 0x03ea, 0x072c, 0x0e58, 0x1cb0, 0x56e0, 0x37c0, 0xf580,
1800 0x01fd, 0x0376, 0x06ec, 0x0bb8, 0x1110, 0x2220, 0x4440, 0x8880,
1801 0x0163, 0x02c6, 0x1104, 0x0758, 0x0eb0, 0x2be0, 0x6140, 0xc280,
1802 0x02fd, 0x01c6, 0x0b5c, 0x1108, 0x07b0, 0x25a0, 0x8840, 0x6180,
1803 0x0801, 0x012e, 0x025c, 0x04b8, 0x1370, 0x26e0, 0x57c0, 0xb580,
1804 0x0401, 0x0802, 0x015c, 0x02b8, 0x22b0, 0x13e0, 0x7140, 0xe280,
1805 0x0201, 0x0402, 0x0804, 0x01b8, 0x11b0, 0x31a0, 0x8040, 0x7180,
1806 0x0101, 0x0202, 0x0404, 0x0808, 0x1010, 0x2020, 0x4040, 0x8080,
1807 0x0001, 0x0002, 0x0004, 0x0008, 0x0010, 0x0020, 0x0040, 0x0080,
1808 0x0100, 0x0200, 0x0400, 0x0800, 0x1000, 0x2000, 0x4000, 0x8000,
1811 static int decode_syndrome(u16 syndrome, u16 *vectors, unsigned num_vecs,
1814 unsigned int i, err_sym;
1816 for (err_sym = 0; err_sym < num_vecs / v_dim; err_sym++) {
1818 unsigned v_idx = err_sym * v_dim;
1819 unsigned v_end = (err_sym + 1) * v_dim;
1821 /* walk over all 16 bits of the syndrome */
1822 for (i = 1; i < (1U << 16); i <<= 1) {
1824 /* if bit is set in that eigenvector... */
1825 if (v_idx < v_end && vectors[v_idx] & i) {
1826 u16 ev_comp = vectors[v_idx++];
1828 /* ... and bit set in the modified syndrome, */
1838 /* can't get to zero, move to next symbol */
1843 debugf0("syndrome(%x) not found\n", syndrome);
1847 static int map_err_sym_to_channel(int err_sym, int sym_size)
1860 return err_sym >> 4;
1866 /* imaginary bits not in a DIMM */
1868 WARN(1, KERN_ERR "Invalid error symbol: 0x%x\n",
1880 return err_sym >> 3;
1886 static int get_channel_from_ecc_syndrome(struct mem_ctl_info *mci, u16 syndrome)
1888 struct amd64_pvt *pvt = mci->pvt_info;
1891 if (pvt->ecc_sym_sz == 8)
1892 err_sym = decode_syndrome(syndrome, x8_vectors,
1893 ARRAY_SIZE(x8_vectors),
1895 else if (pvt->ecc_sym_sz == 4)
1896 err_sym = decode_syndrome(syndrome, x4_vectors,
1897 ARRAY_SIZE(x4_vectors),
1900 amd64_warn("Illegal syndrome type: %u\n", pvt->ecc_sym_sz);
1904 return map_err_sym_to_channel(err_sym, pvt->ecc_sym_sz);
1908 * Handle any Correctable Errors (CEs) that have occurred. Check for valid ERROR
1909 * ADDRESS and process.
1911 static void amd64_handle_ce(struct mem_ctl_info *mci, struct mce *m)
1913 struct amd64_pvt *pvt = mci->pvt_info;
1917 /* Ensure that the Error Address is VALID */
1918 if (!(m->status & MCI_STATUS_ADDRV)) {
1919 amd64_mc_err(mci, "HW has no ERROR_ADDRESS available\n");
1920 edac_mc_handle_error(HW_EVENT_ERR_CORRECTED, mci,
1924 "HW has no ERROR_ADDRESS available",
1929 sys_addr = get_error_address(m);
1930 syndrome = extract_syndrome(m->status);
1932 amd64_mc_err(mci, "CE ERROR_ADDRESS= 0x%llx\n", sys_addr);
1934 pvt->ops->map_sysaddr_to_csrow(mci, sys_addr, syndrome);
1937 /* Handle any Un-correctable Errors (UEs) */
1938 static void amd64_handle_ue(struct mem_ctl_info *mci, struct mce *m)
1940 struct mem_ctl_info *log_mci, *src_mci = NULL;
1947 if (!(m->status & MCI_STATUS_ADDRV)) {
1948 amd64_mc_err(mci, "HW has no ERROR_ADDRESS available\n");
1949 edac_mc_handle_error(HW_EVENT_ERR_UNCORRECTED, mci,
1953 "HW has no ERROR_ADDRESS available",
1958 sys_addr = get_error_address(m);
1959 error_address_to_page_and_offset(sys_addr, &page, &offset);
1962 * Find out which node the error address belongs to. This may be
1963 * different from the node that detected the error.
1965 src_mci = find_mc_by_sys_addr(mci, sys_addr);
1967 amd64_mc_err(mci, "ERROR ADDRESS (0x%lx) NOT mapped to a MC\n",
1968 (unsigned long)sys_addr);
1969 edac_mc_handle_error(HW_EVENT_ERR_UNCORRECTED, mci,
1973 "ERROR ADDRESS NOT mapped to a MC", NULL);
1979 csrow = sys_addr_to_csrow(log_mci, sys_addr);
1981 amd64_mc_err(mci, "ERROR_ADDRESS (0x%lx) NOT mapped to CS\n",
1982 (unsigned long)sys_addr);
1983 edac_mc_handle_error(HW_EVENT_ERR_UNCORRECTED, mci,
1987 "ERROR ADDRESS NOT mapped to CS",
1990 edac_mc_handle_error(HW_EVENT_ERR_UNCORRECTED, mci,
1993 EDAC_MOD_STR, "", NULL);
1997 static inline void __amd64_decode_bus_error(struct mem_ctl_info *mci,
2000 u16 ec = EC(m->status);
2001 u8 xec = XEC(m->status, 0x1f);
2002 u8 ecc_type = (m->status >> 45) & 0x3;
2004 /* Bail early out if this was an 'observed' error */
2005 if (PP(ec) == NBSL_PP_OBS)
2008 /* Do only ECC errors */
2009 if (xec && xec != F10_NBSL_EXT_ERR_ECC)
2013 amd64_handle_ce(mci, m);
2014 else if (ecc_type == 1)
2015 amd64_handle_ue(mci, m);
2018 void amd64_decode_bus_error(int node_id, struct mce *m)
2020 __amd64_decode_bus_error(mcis[node_id], m);
2024 * Use pvt->F2 which contains the F2 CPU PCI device to get the related
2025 * F1 (AddrMap) and F3 (Misc) devices. Return negative value on error.
2027 static int reserve_mc_sibling_devs(struct amd64_pvt *pvt, u16 f1_id, u16 f3_id)
2029 /* Reserve the ADDRESS MAP Device */
2030 pvt->F1 = pci_get_related_function(pvt->F2->vendor, f1_id, pvt->F2);
2032 amd64_err("error address map device not found: "
2033 "vendor %x device 0x%x (broken BIOS?)\n",
2034 PCI_VENDOR_ID_AMD, f1_id);
2038 /* Reserve the MISC Device */
2039 pvt->F3 = pci_get_related_function(pvt->F2->vendor, f3_id, pvt->F2);
2041 pci_dev_put(pvt->F1);
2044 amd64_err("error F3 device not found: "
2045 "vendor %x device 0x%x (broken BIOS?)\n",
2046 PCI_VENDOR_ID_AMD, f3_id);
2050 debugf1("F1: %s\n", pci_name(pvt->F1));
2051 debugf1("F2: %s\n", pci_name(pvt->F2));
2052 debugf1("F3: %s\n", pci_name(pvt->F3));
2057 static void free_mc_sibling_devs(struct amd64_pvt *pvt)
2059 pci_dev_put(pvt->F1);
2060 pci_dev_put(pvt->F3);
2064 * Retrieve the hardware registers of the memory controller (this includes the
2065 * 'Address Map' and 'Misc' device regs)
2067 static void read_mc_regs(struct amd64_pvt *pvt)
2069 struct cpuinfo_x86 *c = &boot_cpu_data;
2075 * Retrieve TOP_MEM and TOP_MEM2; no masking off of reserved bits since
2076 * those are Read-As-Zero
2078 rdmsrl(MSR_K8_TOP_MEM1, pvt->top_mem);
2079 debugf0(" TOP_MEM: 0x%016llx\n", pvt->top_mem);
2081 /* check first whether TOP_MEM2 is enabled */
2082 rdmsrl(MSR_K8_SYSCFG, msr_val);
2083 if (msr_val & (1U << 21)) {
2084 rdmsrl(MSR_K8_TOP_MEM2, pvt->top_mem2);
2085 debugf0(" TOP_MEM2: 0x%016llx\n", pvt->top_mem2);
2087 debugf0(" TOP_MEM2 disabled.\n");
2089 amd64_read_pci_cfg(pvt->F3, NBCAP, &pvt->nbcap);
2091 read_dram_ctl_register(pvt);
2093 for (range = 0; range < DRAM_RANGES; range++) {
2096 /* read settings for this DRAM range */
2097 read_dram_base_limit_regs(pvt, range);
2099 rw = dram_rw(pvt, range);
2103 debugf1(" DRAM range[%d], base: 0x%016llx; limit: 0x%016llx\n",
2105 get_dram_base(pvt, range),
2106 get_dram_limit(pvt, range));
2108 debugf1(" IntlvEn=%s; Range access: %s%s IntlvSel=%d DstNode=%d\n",
2109 dram_intlv_en(pvt, range) ? "Enabled" : "Disabled",
2110 (rw & 0x1) ? "R" : "-",
2111 (rw & 0x2) ? "W" : "-",
2112 dram_intlv_sel(pvt, range),
2113 dram_dst_node(pvt, range));
2116 read_dct_base_mask(pvt);
2118 amd64_read_pci_cfg(pvt->F1, DHAR, &pvt->dhar);
2119 amd64_read_dct_pci_cfg(pvt, DBAM0, &pvt->dbam0);
2121 amd64_read_pci_cfg(pvt->F3, F10_ONLINE_SPARE, &pvt->online_spare);
2123 amd64_read_dct_pci_cfg(pvt, DCLR0, &pvt->dclr0);
2124 amd64_read_dct_pci_cfg(pvt, DCHR0, &pvt->dchr0);
2126 if (!dct_ganging_enabled(pvt)) {
2127 amd64_read_dct_pci_cfg(pvt, DCLR1, &pvt->dclr1);
2128 amd64_read_dct_pci_cfg(pvt, DCHR1, &pvt->dchr1);
2131 pvt->ecc_sym_sz = 4;
2133 if (c->x86 >= 0x10) {
2134 amd64_read_pci_cfg(pvt->F3, EXT_NB_MCA_CFG, &tmp);
2135 amd64_read_dct_pci_cfg(pvt, DBAM1, &pvt->dbam1);
2137 /* F10h, revD and later can do x8 ECC too */
2138 if ((c->x86 > 0x10 || c->x86_model > 7) && tmp & BIT(25))
2139 pvt->ecc_sym_sz = 8;
2141 dump_misc_regs(pvt);
2145 * NOTE: CPU Revision Dependent code
2148 * @csrow_nr ChipSelect Row Number (0..NUM_CHIPSELECTS-1)
2149 * k8 private pointer to -->
2150 * DRAM Bank Address mapping register
2152 * DCL register where dual_channel_active is
2154 * The DBAM register consists of 4 sets of 4 bits each definitions:
2157 * 0-3 CSROWs 0 and 1
2158 * 4-7 CSROWs 2 and 3
2159 * 8-11 CSROWs 4 and 5
2160 * 12-15 CSROWs 6 and 7
2162 * Values range from: 0 to 15
2163 * The meaning of the values depends on CPU revision and dual-channel state,
2164 * see relevant BKDG more info.
2166 * The memory controller provides for total of only 8 CSROWs in its current
2167 * architecture. Each "pair" of CSROWs normally represents just one DIMM in
2168 * single channel or two (2) DIMMs in dual channel mode.
2170 * The following code logic collapses the various tables for CSROW based on CPU
2174 * The number of PAGE_SIZE pages on the specified CSROW number it
2178 static u32 amd64_csrow_nr_pages(struct amd64_pvt *pvt, u8 dct, int csrow_nr)
2180 u32 cs_mode, nr_pages;
2181 u32 dbam = dct ? pvt->dbam1 : pvt->dbam0;
2184 * The math on this doesn't look right on the surface because x/2*4 can
2185 * be simplified to x*2 but this expression makes use of the fact that
2186 * it is integral math where 1/2=0. This intermediate value becomes the
2187 * number of bits to shift the DBAM register to extract the proper CSROW
2190 cs_mode = (dbam >> ((csrow_nr / 2) * 4)) & 0xF;
2192 nr_pages = pvt->ops->dbam_to_cs(pvt, dct, cs_mode) << (20 - PAGE_SHIFT);
2194 debugf0(" (csrow=%d) DBAM map index= %d\n", csrow_nr, cs_mode);
2195 debugf0(" nr_pages/channel= %u channel-count = %d\n",
2196 nr_pages, pvt->channel_count);
2202 * Initialize the array of csrow attribute instances, based on the values
2203 * from pci config hardware registers.
2205 static int init_csrows(struct mem_ctl_info *mci)
2207 struct csrow_info *csrow;
2208 struct amd64_pvt *pvt = mci->pvt_info;
2211 int i, j, empty = 1;
2212 enum mem_type mtype;
2213 enum edac_type edac_mode;
2216 amd64_read_pci_cfg(pvt->F3, NBCFG, &val);
2220 debugf0("node %d, NBCFG=0x%08x[ChipKillEccCap: %d|DramEccEn: %d]\n",
2221 pvt->mc_node_id, val,
2222 !!(val & NBCFG_CHIPKILL), !!(val & NBCFG_ECC_ENABLE));
2224 for_each_chip_select(i, 0, pvt) {
2225 csrow = &mci->csrows[i];
2227 if (!csrow_enabled(i, 0, pvt) && !csrow_enabled(i, 1, pvt)) {
2228 debugf1("----CSROW %d EMPTY for node %d\n", i,
2233 debugf1("----CSROW %d VALID for MC node %d\n",
2234 i, pvt->mc_node_id);
2237 if (csrow_enabled(i, 0, pvt))
2238 nr_pages = amd64_csrow_nr_pages(pvt, 0, i);
2239 if (csrow_enabled(i, 1, pvt))
2240 nr_pages += amd64_csrow_nr_pages(pvt, 1, i);
2242 get_cs_base_and_mask(pvt, i, 0, &base, &mask);
2243 /* 8 bytes of resolution */
2245 mtype = amd64_determine_memory_type(pvt, i);
2247 debugf1(" for MC node %d csrow %d:\n", pvt->mc_node_id, i);
2248 debugf1(" nr_pages: %u\n", nr_pages * pvt->channel_count);
2251 * determine whether CHIPKILL or JUST ECC or NO ECC is operating
2253 if (pvt->nbcfg & NBCFG_ECC_ENABLE)
2254 edac_mode = (pvt->nbcfg & NBCFG_CHIPKILL) ?
2255 EDAC_S4ECD4ED : EDAC_SECDED;
2257 edac_mode = EDAC_NONE;
2259 for (j = 0; j < pvt->channel_count; j++) {
2260 csrow->channels[j].dimm->mtype = mtype;
2261 csrow->channels[j].dimm->edac_mode = edac_mode;
2262 csrow->channels[j].dimm->nr_pages = nr_pages;
2269 /* get all cores on this DCT */
2270 static void get_cpus_on_this_dct_cpumask(struct cpumask *mask, unsigned nid)
2274 for_each_online_cpu(cpu)
2275 if (amd_get_nb_id(cpu) == nid)
2276 cpumask_set_cpu(cpu, mask);
2279 /* check MCG_CTL on all the cpus on this node */
2280 static bool amd64_nb_mce_bank_enabled_on_node(unsigned nid)
2286 if (!zalloc_cpumask_var(&mask, GFP_KERNEL)) {
2287 amd64_warn("%s: Error allocating mask\n", __func__);
2291 get_cpus_on_this_dct_cpumask(mask, nid);
2293 rdmsr_on_cpus(mask, MSR_IA32_MCG_CTL, msrs);
2295 for_each_cpu(cpu, mask) {
2296 struct msr *reg = per_cpu_ptr(msrs, cpu);
2297 nbe = reg->l & MSR_MCGCTL_NBE;
2299 debugf0("core: %u, MCG_CTL: 0x%llx, NB MSR is %s\n",
2301 (nbe ? "enabled" : "disabled"));
2309 free_cpumask_var(mask);
2313 static int toggle_ecc_err_reporting(struct ecc_settings *s, u8 nid, bool on)
2315 cpumask_var_t cmask;
2318 if (!zalloc_cpumask_var(&cmask, GFP_KERNEL)) {
2319 amd64_warn("%s: error allocating mask\n", __func__);
2323 get_cpus_on_this_dct_cpumask(cmask, nid);
2325 rdmsr_on_cpus(cmask, MSR_IA32_MCG_CTL, msrs);
2327 for_each_cpu(cpu, cmask) {
2329 struct msr *reg = per_cpu_ptr(msrs, cpu);
2332 if (reg->l & MSR_MCGCTL_NBE)
2333 s->flags.nb_mce_enable = 1;
2335 reg->l |= MSR_MCGCTL_NBE;
2338 * Turn off NB MCE reporting only when it was off before
2340 if (!s->flags.nb_mce_enable)
2341 reg->l &= ~MSR_MCGCTL_NBE;
2344 wrmsr_on_cpus(cmask, MSR_IA32_MCG_CTL, msrs);
2346 free_cpumask_var(cmask);
2351 static bool enable_ecc_error_reporting(struct ecc_settings *s, u8 nid,
2355 u32 value, mask = 0x3; /* UECC/CECC enable */
2357 if (toggle_ecc_err_reporting(s, nid, ON)) {
2358 amd64_warn("Error enabling ECC reporting over MCGCTL!\n");
2362 amd64_read_pci_cfg(F3, NBCTL, &value);
2364 s->old_nbctl = value & mask;
2365 s->nbctl_valid = true;
2368 amd64_write_pci_cfg(F3, NBCTL, value);
2370 amd64_read_pci_cfg(F3, NBCFG, &value);
2372 debugf0("1: node %d, NBCFG=0x%08x[DramEccEn: %d]\n",
2373 nid, value, !!(value & NBCFG_ECC_ENABLE));
2375 if (!(value & NBCFG_ECC_ENABLE)) {
2376 amd64_warn("DRAM ECC disabled on this node, enabling...\n");
2378 s->flags.nb_ecc_prev = 0;
2380 /* Attempt to turn on DRAM ECC Enable */
2381 value |= NBCFG_ECC_ENABLE;
2382 amd64_write_pci_cfg(F3, NBCFG, value);
2384 amd64_read_pci_cfg(F3, NBCFG, &value);
2386 if (!(value & NBCFG_ECC_ENABLE)) {
2387 amd64_warn("Hardware rejected DRAM ECC enable,"
2388 "check memory DIMM configuration.\n");
2391 amd64_info("Hardware accepted DRAM ECC Enable\n");
2394 s->flags.nb_ecc_prev = 1;
2397 debugf0("2: node %d, NBCFG=0x%08x[DramEccEn: %d]\n",
2398 nid, value, !!(value & NBCFG_ECC_ENABLE));
2403 static void restore_ecc_error_reporting(struct ecc_settings *s, u8 nid,
2406 u32 value, mask = 0x3; /* UECC/CECC enable */
2409 if (!s->nbctl_valid)
2412 amd64_read_pci_cfg(F3, NBCTL, &value);
2414 value |= s->old_nbctl;
2416 amd64_write_pci_cfg(F3, NBCTL, value);
2418 /* restore previous BIOS DRAM ECC "off" setting we force-enabled */
2419 if (!s->flags.nb_ecc_prev) {
2420 amd64_read_pci_cfg(F3, NBCFG, &value);
2421 value &= ~NBCFG_ECC_ENABLE;
2422 amd64_write_pci_cfg(F3, NBCFG, value);
2425 /* restore the NB Enable MCGCTL bit */
2426 if (toggle_ecc_err_reporting(s, nid, OFF))
2427 amd64_warn("Error restoring NB MCGCTL settings!\n");
2431 * EDAC requires that the BIOS have ECC enabled before
2432 * taking over the processing of ECC errors. A command line
2433 * option allows to force-enable hardware ECC later in
2434 * enable_ecc_error_reporting().
2436 static const char *ecc_msg =
2437 "ECC disabled in the BIOS or no ECC capability, module will not load.\n"
2438 " Either enable ECC checking or force module loading by setting "
2439 "'ecc_enable_override'.\n"
2440 " (Note that use of the override may cause unknown side effects.)\n";
2442 static bool ecc_enabled(struct pci_dev *F3, u8 nid)
2446 bool nb_mce_en = false;
2448 amd64_read_pci_cfg(F3, NBCFG, &value);
2450 ecc_en = !!(value & NBCFG_ECC_ENABLE);
2451 amd64_info("DRAM ECC %s.\n", (ecc_en ? "enabled" : "disabled"));
2453 nb_mce_en = amd64_nb_mce_bank_enabled_on_node(nid);
2455 amd64_notice("NB MCE bank disabled, set MSR "
2456 "0x%08x[4] on node %d to enable.\n",
2457 MSR_IA32_MCG_CTL, nid);
2459 if (!ecc_en || !nb_mce_en) {
2460 amd64_notice("%s", ecc_msg);
2466 struct mcidev_sysfs_attribute sysfs_attrs[ARRAY_SIZE(amd64_dbg_attrs) +
2467 ARRAY_SIZE(amd64_inj_attrs) +
2470 struct mcidev_sysfs_attribute terminator = { .attr = { .name = NULL } };
2472 static void set_mc_sysfs_attrs(struct mem_ctl_info *mci)
2474 unsigned int i = 0, j = 0;
2476 for (; i < ARRAY_SIZE(amd64_dbg_attrs); i++)
2477 sysfs_attrs[i] = amd64_dbg_attrs[i];
2479 if (boot_cpu_data.x86 >= 0x10)
2480 for (j = 0; j < ARRAY_SIZE(amd64_inj_attrs); j++, i++)
2481 sysfs_attrs[i] = amd64_inj_attrs[j];
2483 sysfs_attrs[i] = terminator;
2485 mci->mc_driver_sysfs_attributes = sysfs_attrs;
2488 static void setup_mci_misc_attrs(struct mem_ctl_info *mci,
2489 struct amd64_family_type *fam)
2491 struct amd64_pvt *pvt = mci->pvt_info;
2493 mci->mtype_cap = MEM_FLAG_DDR2 | MEM_FLAG_RDDR2;
2494 mci->edac_ctl_cap = EDAC_FLAG_NONE;
2496 if (pvt->nbcap & NBCAP_SECDED)
2497 mci->edac_ctl_cap |= EDAC_FLAG_SECDED;
2499 if (pvt->nbcap & NBCAP_CHIPKILL)
2500 mci->edac_ctl_cap |= EDAC_FLAG_S4ECD4ED;
2502 mci->edac_cap = amd64_determine_edac_cap(pvt);
2503 mci->mod_name = EDAC_MOD_STR;
2504 mci->mod_ver = EDAC_AMD64_VERSION;
2505 mci->ctl_name = fam->ctl_name;
2506 mci->dev_name = pci_name(pvt->F2);
2507 mci->ctl_page_to_phys = NULL;
2509 /* memory scrubber interface */
2510 mci->set_sdram_scrub_rate = amd64_set_scrub_rate;
2511 mci->get_sdram_scrub_rate = amd64_get_scrub_rate;
2515 * returns a pointer to the family descriptor on success, NULL otherwise.
2517 static struct amd64_family_type *amd64_per_family_init(struct amd64_pvt *pvt)
2519 u8 fam = boot_cpu_data.x86;
2520 struct amd64_family_type *fam_type = NULL;
2524 fam_type = &amd64_family_types[K8_CPUS];
2525 pvt->ops = &amd64_family_types[K8_CPUS].ops;
2529 fam_type = &amd64_family_types[F10_CPUS];
2530 pvt->ops = &amd64_family_types[F10_CPUS].ops;
2534 fam_type = &amd64_family_types[F15_CPUS];
2535 pvt->ops = &amd64_family_types[F15_CPUS].ops;
2539 amd64_err("Unsupported family!\n");
2543 pvt->ext_model = boot_cpu_data.x86_model >> 4;
2545 amd64_info("%s %sdetected (node %d).\n", fam_type->ctl_name,
2547 (pvt->ext_model >= K8_REV_F ? "revF or later "
2548 : "revE or earlier ")
2549 : ""), pvt->mc_node_id);
2553 static int amd64_init_one_instance(struct pci_dev *F2)
2555 struct amd64_pvt *pvt = NULL;
2556 struct amd64_family_type *fam_type = NULL;
2557 struct mem_ctl_info *mci = NULL;
2558 struct edac_mc_layer layers[2];
2560 u8 nid = get_node_id(F2);
2563 pvt = kzalloc(sizeof(struct amd64_pvt), GFP_KERNEL);
2567 pvt->mc_node_id = nid;
2571 fam_type = amd64_per_family_init(pvt);
2576 err = reserve_mc_sibling_devs(pvt, fam_type->f1_id, fam_type->f3_id);
2583 * We need to determine how many memory channels there are. Then use
2584 * that information for calculating the size of the dynamic instance
2585 * tables in the 'mci' structure.
2588 pvt->channel_count = pvt->ops->early_channel_count(pvt);
2589 if (pvt->channel_count < 0)
2593 layers[0].type = EDAC_MC_LAYER_CHIP_SELECT;
2594 layers[0].size = pvt->csels[0].b_cnt;
2595 layers[0].is_virt_csrow = true;
2596 layers[1].type = EDAC_MC_LAYER_CHANNEL;
2597 layers[1].size = pvt->channel_count;
2598 layers[1].is_virt_csrow = false;
2599 mci = edac_mc_alloc(nid, ARRAY_SIZE(layers), layers, 0);
2603 mci->pvt_info = pvt;
2604 mci->dev = &pvt->F2->dev;
2606 setup_mci_misc_attrs(mci, fam_type);
2608 if (init_csrows(mci))
2609 mci->edac_cap = EDAC_FLAG_NONE;
2611 set_mc_sysfs_attrs(mci);
2614 if (edac_mc_add_mc(mci)) {
2615 debugf1("failed edac_mc_add_mc()\n");
2619 /* register stuff with EDAC MCE */
2620 if (report_gart_errors)
2621 amd_report_gart_errors(true);
2623 amd_register_ecc_decoder(amd64_decode_bus_error);
2627 atomic_inc(&drv_instances);
2635 free_mc_sibling_devs(pvt);
2644 static int __devinit amd64_probe_one_instance(struct pci_dev *pdev,
2645 const struct pci_device_id *mc_type)
2647 u8 nid = get_node_id(pdev);
2648 struct pci_dev *F3 = node_to_amd_nb(nid)->misc;
2649 struct ecc_settings *s;
2652 ret = pci_enable_device(pdev);
2654 debugf0("ret=%d\n", ret);
2659 s = kzalloc(sizeof(struct ecc_settings), GFP_KERNEL);
2665 if (!ecc_enabled(F3, nid)) {
2668 if (!ecc_enable_override)
2671 amd64_warn("Forcing ECC on!\n");
2673 if (!enable_ecc_error_reporting(s, nid, F3))
2677 ret = amd64_init_one_instance(pdev);
2679 amd64_err("Error probing instance: %d\n", nid);
2680 restore_ecc_error_reporting(s, nid, F3);
2687 ecc_stngs[nid] = NULL;
2693 static void __devexit amd64_remove_one_instance(struct pci_dev *pdev)
2695 struct mem_ctl_info *mci;
2696 struct amd64_pvt *pvt;
2697 u8 nid = get_node_id(pdev);
2698 struct pci_dev *F3 = node_to_amd_nb(nid)->misc;
2699 struct ecc_settings *s = ecc_stngs[nid];
2701 /* Remove from EDAC CORE tracking list */
2702 mci = edac_mc_del_mc(&pdev->dev);
2706 pvt = mci->pvt_info;
2708 restore_ecc_error_reporting(s, nid, F3);
2710 free_mc_sibling_devs(pvt);
2712 /* unregister from EDAC MCE */
2713 amd_report_gart_errors(false);
2714 amd_unregister_ecc_decoder(amd64_decode_bus_error);
2716 kfree(ecc_stngs[nid]);
2717 ecc_stngs[nid] = NULL;
2719 /* Free the EDAC CORE resources */
2720 mci->pvt_info = NULL;
2728 * This table is part of the interface for loading drivers for PCI devices. The
2729 * PCI core identifies what devices are on a system during boot, and then
2730 * inquiry this table to see if this driver is for a given device found.
2732 static DEFINE_PCI_DEVICE_TABLE(amd64_pci_table) = {
2734 .vendor = PCI_VENDOR_ID_AMD,
2735 .device = PCI_DEVICE_ID_AMD_K8_NB_MEMCTL,
2736 .subvendor = PCI_ANY_ID,
2737 .subdevice = PCI_ANY_ID,
2742 .vendor = PCI_VENDOR_ID_AMD,
2743 .device = PCI_DEVICE_ID_AMD_10H_NB_DRAM,
2744 .subvendor = PCI_ANY_ID,
2745 .subdevice = PCI_ANY_ID,
2750 .vendor = PCI_VENDOR_ID_AMD,
2751 .device = PCI_DEVICE_ID_AMD_15H_NB_F2,
2752 .subvendor = PCI_ANY_ID,
2753 .subdevice = PCI_ANY_ID,
2760 MODULE_DEVICE_TABLE(pci, amd64_pci_table);
2762 static struct pci_driver amd64_pci_driver = {
2763 .name = EDAC_MOD_STR,
2764 .probe = amd64_probe_one_instance,
2765 .remove = __devexit_p(amd64_remove_one_instance),
2766 .id_table = amd64_pci_table,
2769 static void setup_pci_device(void)
2771 struct mem_ctl_info *mci;
2772 struct amd64_pvt *pvt;
2780 pvt = mci->pvt_info;
2782 edac_pci_create_generic_ctl(&pvt->F2->dev, EDAC_MOD_STR);
2784 if (!amd64_ctl_pci) {
2785 pr_warning("%s(): Unable to create PCI control\n",
2788 pr_warning("%s(): PCI error report via EDAC not set\n",
2794 static int __init amd64_edac_init(void)
2798 printk(KERN_INFO "AMD64 EDAC driver v%s\n", EDAC_AMD64_VERSION);
2802 if (amd_cache_northbridges() < 0)
2806 mcis = kzalloc(amd_nb_num() * sizeof(mcis[0]), GFP_KERNEL);
2807 ecc_stngs = kzalloc(amd_nb_num() * sizeof(ecc_stngs[0]), GFP_KERNEL);
2808 if (!(mcis && ecc_stngs))
2811 msrs = msrs_alloc();
2815 err = pci_register_driver(&amd64_pci_driver);
2820 if (!atomic_read(&drv_instances))
2821 goto err_no_instances;
2827 pci_unregister_driver(&amd64_pci_driver);
2844 static void __exit amd64_edac_exit(void)
2847 edac_pci_release_generic_ctl(amd64_ctl_pci);
2849 pci_unregister_driver(&amd64_pci_driver);
2861 module_init(amd64_edac_init);
2862 module_exit(amd64_edac_exit);
2864 MODULE_LICENSE("GPL");
2865 MODULE_AUTHOR("SoftwareBitMaker: Doug Thompson, "
2866 "Dave Peterson, Thayne Harbaugh");
2867 MODULE_DESCRIPTION("MC support for AMD64 memory controllers - "
2868 EDAC_AMD64_VERSION);
2870 module_param(edac_op_state, int, 0444);
2871 MODULE_PARM_DESC(edac_op_state, "EDAC Error Reporting state: 0=Poll,1=NMI");