1 #include "amd64_edac.h"
2 #include <asm/amd_nb.h>
4 static struct edac_pci_ctl_info *pci_ctl;
6 static int report_gart_errors;
7 module_param(report_gart_errors, int, 0644);
10 * Set by command line parameter. If BIOS has enabled the ECC, this override is
11 * cleared to prevent re-enabling the hardware by this driver.
13 static int ecc_enable_override;
14 module_param(ecc_enable_override, int, 0644);
16 static struct msr __percpu *msrs;
19 * count successfully initialized driver instances for setup_pci_device()
21 static atomic_t drv_instances = ATOMIC_INIT(0);
23 /* Per-node driver instances */
24 static struct mem_ctl_info **mcis;
25 static struct ecc_settings **ecc_stngs;
28 * Valid scrub rates for the K8 hardware memory scrubber. We map the scrubbing
29 * bandwidth to a valid bit pattern. The 'set' operation finds the 'matching-
32 *FIXME: Produce a better mapping/linearisation.
34 static const struct scrubrate {
35 u32 scrubval; /* bit pattern for scrub rate */
36 u32 bandwidth; /* bandwidth consumed (bytes/sec) */
38 { 0x01, 1600000000UL},
60 { 0x00, 0UL}, /* scrubbing off */
63 int __amd64_read_pci_cfg_dword(struct pci_dev *pdev, int offset,
64 u32 *val, const char *func)
68 err = pci_read_config_dword(pdev, offset, val);
70 amd64_warn("%s: error reading F%dx%03x.\n",
71 func, PCI_FUNC(pdev->devfn), offset);
76 int __amd64_write_pci_cfg_dword(struct pci_dev *pdev, int offset,
77 u32 val, const char *func)
81 err = pci_write_config_dword(pdev, offset, val);
83 amd64_warn("%s: error writing to F%dx%03x.\n",
84 func, PCI_FUNC(pdev->devfn), offset);
90 * Select DCT to which PCI cfg accesses are routed
92 static void f15h_select_dct(struct amd64_pvt *pvt, u8 dct)
96 amd64_read_pci_cfg(pvt->F1, DCT_CFG_SEL, ®);
97 reg &= (pvt->model == 0x30) ? ~3 : ~1;
99 amd64_write_pci_cfg(pvt->F1, DCT_CFG_SEL, reg);
104 * Depending on the family, F2 DCT reads need special handling:
106 * K8: has a single DCT only and no address offsets >= 0x100
108 * F10h: each DCT has its own set of regs
112 * F16h: has only 1 DCT
114 * F15h: we select which DCT we access using F1x10C[DctCfgSel]
116 static inline int amd64_read_dct_pci_cfg(struct amd64_pvt *pvt, u8 dct,
117 int offset, u32 *val)
121 if (dct || offset >= 0x100)
128 * Note: If ganging is enabled, barring the regs
129 * F2x[1,0]98 and F2x[1,0]9C; reads reads to F2x1xx
130 * return 0. (cf. Section 2.8.1 F10h BKDG)
132 if (dct_ganging_enabled(pvt))
141 * F15h: F2x1xx addresses do not map explicitly to DCT1.
142 * We should select which DCT we access using F1x10C[DctCfgSel]
144 dct = (dct && pvt->model == 0x30) ? 3 : dct;
145 f15h_select_dct(pvt, dct);
156 return amd64_read_pci_cfg(pvt->F2, offset, val);
160 * Memory scrubber control interface. For K8, memory scrubbing is handled by
161 * hardware and can involve L2 cache, dcache as well as the main memory. With
162 * F10, this is extended to L3 cache scrubbing on CPU models sporting that
165 * This causes the "units" for the scrubbing speed to vary from 64 byte blocks
166 * (dram) over to cache lines. This is nasty, so we will use bandwidth in
167 * bytes/sec for the setting.
169 * Currently, we only do dram scrubbing. If the scrubbing is done in software on
170 * other archs, we might not have access to the caches directly.
174 * scan the scrub rate mapping table for a close or matching bandwidth value to
175 * issue. If requested is too big, then use last maximum value found.
177 static int __set_scrub_rate(struct pci_dev *ctl, u32 new_bw, u32 min_rate)
183 * map the configured rate (new_bw) to a value specific to the AMD64
184 * memory controller and apply to register. Search for the first
185 * bandwidth entry that is greater or equal than the setting requested
186 * and program that. If at last entry, turn off DRAM scrubbing.
188 * If no suitable bandwidth is found, turn off DRAM scrubbing entirely
189 * by falling back to the last element in scrubrates[].
191 for (i = 0; i < ARRAY_SIZE(scrubrates) - 1; i++) {
193 * skip scrub rates which aren't recommended
194 * (see F10 BKDG, F3x58)
196 if (scrubrates[i].scrubval < min_rate)
199 if (scrubrates[i].bandwidth <= new_bw)
203 scrubval = scrubrates[i].scrubval;
205 pci_write_bits32(ctl, SCRCTRL, scrubval, 0x001F);
208 return scrubrates[i].bandwidth;
213 static int set_scrub_rate(struct mem_ctl_info *mci, u32 bw)
215 struct amd64_pvt *pvt = mci->pvt_info;
216 u32 min_scrubrate = 0x5;
222 if (pvt->fam == 0x15 && pvt->model < 0x10)
223 f15h_select_dct(pvt, 0);
225 return __set_scrub_rate(pvt->F3, bw, min_scrubrate);
228 static int get_scrub_rate(struct mem_ctl_info *mci)
230 struct amd64_pvt *pvt = mci->pvt_info;
232 int i, retval = -EINVAL;
235 if (pvt->fam == 0x15 && pvt->model < 0x10)
236 f15h_select_dct(pvt, 0);
238 amd64_read_pci_cfg(pvt->F3, SCRCTRL, &scrubval);
240 scrubval = scrubval & 0x001F;
242 for (i = 0; i < ARRAY_SIZE(scrubrates); i++) {
243 if (scrubrates[i].scrubval == scrubval) {
244 retval = scrubrates[i].bandwidth;
252 * returns true if the SysAddr given by sys_addr matches the
253 * DRAM base/limit associated with node_id
255 static bool base_limit_match(struct amd64_pvt *pvt, u64 sys_addr, u8 nid)
259 /* The K8 treats this as a 40-bit value. However, bits 63-40 will be
260 * all ones if the most significant implemented address bit is 1.
261 * Here we discard bits 63-40. See section 3.4.2 of AMD publication
262 * 24592: AMD x86-64 Architecture Programmer's Manual Volume 1
263 * Application Programming.
265 addr = sys_addr & 0x000000ffffffffffull;
267 return ((addr >= get_dram_base(pvt, nid)) &&
268 (addr <= get_dram_limit(pvt, nid)));
272 * Attempt to map a SysAddr to a node. On success, return a pointer to the
273 * mem_ctl_info structure for the node that the SysAddr maps to.
275 * On failure, return NULL.
277 static struct mem_ctl_info *find_mc_by_sys_addr(struct mem_ctl_info *mci,
280 struct amd64_pvt *pvt;
285 * Here we use the DRAM Base (section 3.4.4.1) and DRAM Limit (section
286 * 3.4.4.2) registers to map the SysAddr to a node ID.
291 * The value of this field should be the same for all DRAM Base
292 * registers. Therefore we arbitrarily choose to read it from the
293 * register for node 0.
295 intlv_en = dram_intlv_en(pvt, 0);
298 for (node_id = 0; node_id < DRAM_RANGES; node_id++) {
299 if (base_limit_match(pvt, sys_addr, node_id))
305 if (unlikely((intlv_en != 0x01) &&
306 (intlv_en != 0x03) &&
307 (intlv_en != 0x07))) {
308 amd64_warn("DRAM Base[IntlvEn] junk value: 0x%x, BIOS bug?\n", intlv_en);
312 bits = (((u32) sys_addr) >> 12) & intlv_en;
314 for (node_id = 0; ; ) {
315 if ((dram_intlv_sel(pvt, node_id) & intlv_en) == bits)
316 break; /* intlv_sel field matches */
318 if (++node_id >= DRAM_RANGES)
322 /* sanity test for sys_addr */
323 if (unlikely(!base_limit_match(pvt, sys_addr, node_id))) {
324 amd64_warn("%s: sys_addr 0x%llx falls outside base/limit address"
325 "range for node %d with node interleaving enabled.\n",
326 __func__, sys_addr, node_id);
331 return edac_mc_find((int)node_id);
334 edac_dbg(2, "sys_addr 0x%lx doesn't match any node\n",
335 (unsigned long)sys_addr);
341 * compute the CS base address of the @csrow on the DRAM controller @dct.
342 * For details see F2x[5C:40] in the processor's BKDG
344 static void get_cs_base_and_mask(struct amd64_pvt *pvt, int csrow, u8 dct,
345 u64 *base, u64 *mask)
347 u64 csbase, csmask, base_bits, mask_bits;
350 if (pvt->fam == 0xf && pvt->ext_model < K8_REV_F) {
351 csbase = pvt->csels[dct].csbases[csrow];
352 csmask = pvt->csels[dct].csmasks[csrow];
353 base_bits = GENMASK_ULL(31, 21) | GENMASK_ULL(15, 9);
354 mask_bits = GENMASK_ULL(29, 21) | GENMASK_ULL(15, 9);
358 * F16h and F15h, models 30h and later need two addr_shift values:
359 * 8 for high and 6 for low (cf. F16h BKDG).
361 } else if (pvt->fam == 0x16 ||
362 (pvt->fam == 0x15 && pvt->model >= 0x30)) {
363 csbase = pvt->csels[dct].csbases[csrow];
364 csmask = pvt->csels[dct].csmasks[csrow >> 1];
366 *base = (csbase & GENMASK_ULL(15, 5)) << 6;
367 *base |= (csbase & GENMASK_ULL(30, 19)) << 8;
370 /* poke holes for the csmask */
371 *mask &= ~((GENMASK_ULL(15, 5) << 6) |
372 (GENMASK_ULL(30, 19) << 8));
374 *mask |= (csmask & GENMASK_ULL(15, 5)) << 6;
375 *mask |= (csmask & GENMASK_ULL(30, 19)) << 8;
379 csbase = pvt->csels[dct].csbases[csrow];
380 csmask = pvt->csels[dct].csmasks[csrow >> 1];
383 if (pvt->fam == 0x15)
384 base_bits = mask_bits =
385 GENMASK_ULL(30,19) | GENMASK_ULL(13,5);
387 base_bits = mask_bits =
388 GENMASK_ULL(28,19) | GENMASK_ULL(13,5);
391 *base = (csbase & base_bits) << addr_shift;
394 /* poke holes for the csmask */
395 *mask &= ~(mask_bits << addr_shift);
397 *mask |= (csmask & mask_bits) << addr_shift;
400 #define for_each_chip_select(i, dct, pvt) \
401 for (i = 0; i < pvt->csels[dct].b_cnt; i++)
403 #define chip_select_base(i, dct, pvt) \
404 pvt->csels[dct].csbases[i]
406 #define for_each_chip_select_mask(i, dct, pvt) \
407 for (i = 0; i < pvt->csels[dct].m_cnt; i++)
410 * @input_addr is an InputAddr associated with the node given by mci. Return the
411 * csrow that input_addr maps to, or -1 on failure (no csrow claims input_addr).
413 static int input_addr_to_csrow(struct mem_ctl_info *mci, u64 input_addr)
415 struct amd64_pvt *pvt;
421 for_each_chip_select(csrow, 0, pvt) {
422 if (!csrow_enabled(csrow, 0, pvt))
425 get_cs_base_and_mask(pvt, csrow, 0, &base, &mask);
429 if ((input_addr & mask) == (base & mask)) {
430 edac_dbg(2, "InputAddr 0x%lx matches csrow %d (node %d)\n",
431 (unsigned long)input_addr, csrow,
437 edac_dbg(2, "no matching csrow for InputAddr 0x%lx (MC node %d)\n",
438 (unsigned long)input_addr, pvt->mc_node_id);
444 * Obtain info from the DRAM Hole Address Register (section 3.4.8, pub #26094)
445 * for the node represented by mci. Info is passed back in *hole_base,
446 * *hole_offset, and *hole_size. Function returns 0 if info is valid or 1 if
447 * info is invalid. Info may be invalid for either of the following reasons:
449 * - The revision of the node is not E or greater. In this case, the DRAM Hole
450 * Address Register does not exist.
452 * - The DramHoleValid bit is cleared in the DRAM Hole Address Register,
453 * indicating that its contents are not valid.
455 * The values passed back in *hole_base, *hole_offset, and *hole_size are
456 * complete 32-bit values despite the fact that the bitfields in the DHAR
457 * only represent bits 31-24 of the base and offset values.
459 int amd64_get_dram_hole_info(struct mem_ctl_info *mci, u64 *hole_base,
460 u64 *hole_offset, u64 *hole_size)
462 struct amd64_pvt *pvt = mci->pvt_info;
464 /* only revE and later have the DRAM Hole Address Register */
465 if (pvt->fam == 0xf && pvt->ext_model < K8_REV_E) {
466 edac_dbg(1, " revision %d for node %d does not support DHAR\n",
467 pvt->ext_model, pvt->mc_node_id);
471 /* valid for Fam10h and above */
472 if (pvt->fam >= 0x10 && !dhar_mem_hoist_valid(pvt)) {
473 edac_dbg(1, " Dram Memory Hoisting is DISABLED on this system\n");
477 if (!dhar_valid(pvt)) {
478 edac_dbg(1, " Dram Memory Hoisting is DISABLED on this node %d\n",
483 /* This node has Memory Hoisting */
485 /* +------------------+--------------------+--------------------+-----
486 * | memory | DRAM hole | relocated |
487 * | [0, (x - 1)] | [x, 0xffffffff] | addresses from |
489 * | | | [0x100000000, |
490 * | | | (0x100000000+ |
491 * | | | (0xffffffff-x))] |
492 * +------------------+--------------------+--------------------+-----
494 * Above is a diagram of physical memory showing the DRAM hole and the
495 * relocated addresses from the DRAM hole. As shown, the DRAM hole
496 * starts at address x (the base address) and extends through address
497 * 0xffffffff. The DRAM Hole Address Register (DHAR) relocates the
498 * addresses in the hole so that they start at 0x100000000.
501 *hole_base = dhar_base(pvt);
502 *hole_size = (1ULL << 32) - *hole_base;
504 *hole_offset = (pvt->fam > 0xf) ? f10_dhar_offset(pvt)
505 : k8_dhar_offset(pvt);
507 edac_dbg(1, " DHAR info for node %d base 0x%lx offset 0x%lx size 0x%lx\n",
508 pvt->mc_node_id, (unsigned long)*hole_base,
509 (unsigned long)*hole_offset, (unsigned long)*hole_size);
513 EXPORT_SYMBOL_GPL(amd64_get_dram_hole_info);
516 * Return the DramAddr that the SysAddr given by @sys_addr maps to. It is
517 * assumed that sys_addr maps to the node given by mci.
519 * The first part of section 3.4.4 (p. 70) shows how the DRAM Base (section
520 * 3.4.4.1) and DRAM Limit (section 3.4.4.2) registers are used to translate a
521 * SysAddr to a DramAddr. If the DRAM Hole Address Register (DHAR) is enabled,
522 * then it is also involved in translating a SysAddr to a DramAddr. Sections
523 * 3.4.8 and 3.5.8.2 describe the DHAR and how it is used for memory hoisting.
524 * These parts of the documentation are unclear. I interpret them as follows:
526 * When node n receives a SysAddr, it processes the SysAddr as follows:
528 * 1. It extracts the DRAMBase and DRAMLimit values from the DRAM Base and DRAM
529 * Limit registers for node n. If the SysAddr is not within the range
530 * specified by the base and limit values, then node n ignores the Sysaddr
531 * (since it does not map to node n). Otherwise continue to step 2 below.
533 * 2. If the DramHoleValid bit of the DHAR for node n is clear, the DHAR is
534 * disabled so skip to step 3 below. Otherwise see if the SysAddr is within
535 * the range of relocated addresses (starting at 0x100000000) from the DRAM
536 * hole. If not, skip to step 3 below. Else get the value of the
537 * DramHoleOffset field from the DHAR. To obtain the DramAddr, subtract the
538 * offset defined by this value from the SysAddr.
540 * 3. Obtain the base address for node n from the DRAMBase field of the DRAM
541 * Base register for node n. To obtain the DramAddr, subtract the base
542 * address from the SysAddr, as shown near the start of section 3.4.4 (p.70).
544 static u64 sys_addr_to_dram_addr(struct mem_ctl_info *mci, u64 sys_addr)
546 struct amd64_pvt *pvt = mci->pvt_info;
547 u64 dram_base, hole_base, hole_offset, hole_size, dram_addr;
550 dram_base = get_dram_base(pvt, pvt->mc_node_id);
552 ret = amd64_get_dram_hole_info(mci, &hole_base, &hole_offset,
555 if ((sys_addr >= (1ULL << 32)) &&
556 (sys_addr < ((1ULL << 32) + hole_size))) {
557 /* use DHAR to translate SysAddr to DramAddr */
558 dram_addr = sys_addr - hole_offset;
560 edac_dbg(2, "using DHAR to translate SysAddr 0x%lx to DramAddr 0x%lx\n",
561 (unsigned long)sys_addr,
562 (unsigned long)dram_addr);
569 * Translate the SysAddr to a DramAddr as shown near the start of
570 * section 3.4.4 (p. 70). Although sys_addr is a 64-bit value, the k8
571 * only deals with 40-bit values. Therefore we discard bits 63-40 of
572 * sys_addr below. If bit 39 of sys_addr is 1 then the bits we
573 * discard are all 1s. Otherwise the bits we discard are all 0s. See
574 * section 3.4.2 of AMD publication 24592: AMD x86-64 Architecture
575 * Programmer's Manual Volume 1 Application Programming.
577 dram_addr = (sys_addr & GENMASK_ULL(39, 0)) - dram_base;
579 edac_dbg(2, "using DRAM Base register to translate SysAddr 0x%lx to DramAddr 0x%lx\n",
580 (unsigned long)sys_addr, (unsigned long)dram_addr);
585 * @intlv_en is the value of the IntlvEn field from a DRAM Base register
586 * (section 3.4.4.1). Return the number of bits from a SysAddr that are used
587 * for node interleaving.
589 static int num_node_interleave_bits(unsigned intlv_en)
591 static const int intlv_shift_table[] = { 0, 1, 0, 2, 0, 0, 0, 3 };
594 BUG_ON(intlv_en > 7);
595 n = intlv_shift_table[intlv_en];
599 /* Translate the DramAddr given by @dram_addr to an InputAddr. */
600 static u64 dram_addr_to_input_addr(struct mem_ctl_info *mci, u64 dram_addr)
602 struct amd64_pvt *pvt;
609 * See the start of section 3.4.4 (p. 70, BKDG #26094, K8, revA-E)
610 * concerning translating a DramAddr to an InputAddr.
612 intlv_shift = num_node_interleave_bits(dram_intlv_en(pvt, 0));
613 input_addr = ((dram_addr >> intlv_shift) & GENMASK_ULL(35, 12)) +
616 edac_dbg(2, " Intlv Shift=%d DramAddr=0x%lx maps to InputAddr=0x%lx\n",
617 intlv_shift, (unsigned long)dram_addr,
618 (unsigned long)input_addr);
624 * Translate the SysAddr represented by @sys_addr to an InputAddr. It is
625 * assumed that @sys_addr maps to the node given by mci.
627 static u64 sys_addr_to_input_addr(struct mem_ctl_info *mci, u64 sys_addr)
632 dram_addr_to_input_addr(mci, sys_addr_to_dram_addr(mci, sys_addr));
634 edac_dbg(2, "SysAdddr 0x%lx translates to InputAddr 0x%lx\n",
635 (unsigned long)sys_addr, (unsigned long)input_addr);
640 /* Map the Error address to a PAGE and PAGE OFFSET. */
641 static inline void error_address_to_page_and_offset(u64 error_address,
642 struct err_info *err)
644 err->page = (u32) (error_address >> PAGE_SHIFT);
645 err->offset = ((u32) error_address) & ~PAGE_MASK;
649 * @sys_addr is an error address (a SysAddr) extracted from the MCA NB Address
650 * Low (section 3.6.4.5) and MCA NB Address High (section 3.6.4.6) registers
651 * of a node that detected an ECC memory error. mci represents the node that
652 * the error address maps to (possibly different from the node that detected
653 * the error). Return the number of the csrow that sys_addr maps to, or -1 on
656 static int sys_addr_to_csrow(struct mem_ctl_info *mci, u64 sys_addr)
660 csrow = input_addr_to_csrow(mci, sys_addr_to_input_addr(mci, sys_addr));
663 amd64_mc_err(mci, "Failed to translate InputAddr to csrow for "
664 "address 0x%lx\n", (unsigned long)sys_addr);
668 static int get_channel_from_ecc_syndrome(struct mem_ctl_info *, u16);
671 * Determine if the DIMMs have ECC enabled. ECC is enabled ONLY if all the DIMMs
674 static unsigned long determine_edac_cap(struct amd64_pvt *pvt)
677 unsigned long edac_cap = EDAC_FLAG_NONE;
679 bit = (pvt->fam > 0xf || pvt->ext_model >= K8_REV_F)
683 if (pvt->dclr0 & BIT(bit))
684 edac_cap = EDAC_FLAG_SECDED;
689 static void debug_display_dimm_sizes(struct amd64_pvt *, u8);
691 static void debug_dump_dramcfg_low(struct amd64_pvt *pvt, u32 dclr, int chan)
693 edac_dbg(1, "F2x%d90 (DRAM Cfg Low): 0x%08x\n", chan, dclr);
695 edac_dbg(1, " DIMM type: %sbuffered; all DIMMs support ECC: %s\n",
696 (dclr & BIT(16)) ? "un" : "",
697 (dclr & BIT(19)) ? "yes" : "no");
699 edac_dbg(1, " PAR/ERR parity: %s\n",
700 (dclr & BIT(8)) ? "enabled" : "disabled");
702 if (pvt->fam == 0x10)
703 edac_dbg(1, " DCT 128bit mode width: %s\n",
704 (dclr & BIT(11)) ? "128b" : "64b");
706 edac_dbg(1, " x4 logical DIMMs present: L0: %s L1: %s L2: %s L3: %s\n",
707 (dclr & BIT(12)) ? "yes" : "no",
708 (dclr & BIT(13)) ? "yes" : "no",
709 (dclr & BIT(14)) ? "yes" : "no",
710 (dclr & BIT(15)) ? "yes" : "no");
713 /* Display and decode various NB registers for debug purposes. */
714 static void dump_misc_regs(struct amd64_pvt *pvt)
716 edac_dbg(1, "F3xE8 (NB Cap): 0x%08x\n", pvt->nbcap);
718 edac_dbg(1, " NB two channel DRAM capable: %s\n",
719 (pvt->nbcap & NBCAP_DCT_DUAL) ? "yes" : "no");
721 edac_dbg(1, " ECC capable: %s, ChipKill ECC capable: %s\n",
722 (pvt->nbcap & NBCAP_SECDED) ? "yes" : "no",
723 (pvt->nbcap & NBCAP_CHIPKILL) ? "yes" : "no");
725 debug_dump_dramcfg_low(pvt, pvt->dclr0, 0);
727 edac_dbg(1, "F3xB0 (Online Spare): 0x%08x\n", pvt->online_spare);
729 edac_dbg(1, "F1xF0 (DRAM Hole Address): 0x%08x, base: 0x%08x, offset: 0x%08x\n",
730 pvt->dhar, dhar_base(pvt),
731 (pvt->fam == 0xf) ? k8_dhar_offset(pvt)
732 : f10_dhar_offset(pvt));
734 edac_dbg(1, " DramHoleValid: %s\n", dhar_valid(pvt) ? "yes" : "no");
736 debug_display_dimm_sizes(pvt, 0);
738 /* everything below this point is Fam10h and above */
742 debug_display_dimm_sizes(pvt, 1);
744 amd64_info("using %s syndromes.\n", ((pvt->ecc_sym_sz == 8) ? "x8" : "x4"));
746 /* Only if NOT ganged does dclr1 have valid info */
747 if (!dct_ganging_enabled(pvt))
748 debug_dump_dramcfg_low(pvt, pvt->dclr1, 1);
752 * See BKDG, F2x[1,0][5C:40], F2[1,0][6C:60]
754 static void prep_chip_selects(struct amd64_pvt *pvt)
756 if (pvt->fam == 0xf && pvt->ext_model < K8_REV_F) {
757 pvt->csels[0].b_cnt = pvt->csels[1].b_cnt = 8;
758 pvt->csels[0].m_cnt = pvt->csels[1].m_cnt = 8;
759 } else if (pvt->fam == 0x15 && pvt->model >= 0x30) {
760 pvt->csels[0].b_cnt = pvt->csels[1].b_cnt = 4;
761 pvt->csels[0].m_cnt = pvt->csels[1].m_cnt = 2;
763 pvt->csels[0].b_cnt = pvt->csels[1].b_cnt = 8;
764 pvt->csels[0].m_cnt = pvt->csels[1].m_cnt = 4;
769 * Function 2 Offset F10_DCSB0; read in the DCS Base and DCS Mask registers
771 static void read_dct_base_mask(struct amd64_pvt *pvt)
775 prep_chip_selects(pvt);
777 for_each_chip_select(cs, 0, pvt) {
778 int reg0 = DCSB0 + (cs * 4);
779 int reg1 = DCSB1 + (cs * 4);
780 u32 *base0 = &pvt->csels[0].csbases[cs];
781 u32 *base1 = &pvt->csels[1].csbases[cs];
783 if (!amd64_read_dct_pci_cfg(pvt, 0, reg0, base0))
784 edac_dbg(0, " DCSB0[%d]=0x%08x reg: F2x%x\n",
790 if (!amd64_read_dct_pci_cfg(pvt, 1, reg0, base1))
791 edac_dbg(0, " DCSB1[%d]=0x%08x reg: F2x%x\n",
792 cs, *base1, (pvt->fam == 0x10) ? reg1
796 for_each_chip_select_mask(cs, 0, pvt) {
797 int reg0 = DCSM0 + (cs * 4);
798 int reg1 = DCSM1 + (cs * 4);
799 u32 *mask0 = &pvt->csels[0].csmasks[cs];
800 u32 *mask1 = &pvt->csels[1].csmasks[cs];
802 if (!amd64_read_dct_pci_cfg(pvt, 0, reg0, mask0))
803 edac_dbg(0, " DCSM0[%d]=0x%08x reg: F2x%x\n",
809 if (!amd64_read_dct_pci_cfg(pvt, 1, reg0, mask1))
810 edac_dbg(0, " DCSM1[%d]=0x%08x reg: F2x%x\n",
811 cs, *mask1, (pvt->fam == 0x10) ? reg1
816 static enum mem_type determine_memory_type(struct amd64_pvt *pvt, int cs)
820 /* F15h supports only DDR3 */
821 if (pvt->fam >= 0x15)
822 type = (pvt->dclr0 & BIT(16)) ? MEM_DDR3 : MEM_RDDR3;
823 else if (pvt->fam == 0x10 || pvt->ext_model >= K8_REV_F) {
824 if (pvt->dchr0 & DDR3_MODE)
825 type = (pvt->dclr0 & BIT(16)) ? MEM_DDR3 : MEM_RDDR3;
827 type = (pvt->dclr0 & BIT(16)) ? MEM_DDR2 : MEM_RDDR2;
829 type = (pvt->dclr0 & BIT(18)) ? MEM_DDR : MEM_RDDR;
832 amd64_info("CS%d: %s\n", cs, edac_mem_types[type]);
837 /* Get the number of DCT channels the memory controller is using. */
838 static int k8_early_channel_count(struct amd64_pvt *pvt)
842 if (pvt->ext_model >= K8_REV_F)
843 /* RevF (NPT) and later */
844 flag = pvt->dclr0 & WIDTH_128;
846 /* RevE and earlier */
847 flag = pvt->dclr0 & REVE_WIDTH_128;
852 return (flag) ? 2 : 1;
855 /* On F10h and later ErrAddr is MC4_ADDR[47:1] */
856 static u64 get_error_address(struct amd64_pvt *pvt, struct mce *m)
862 if (pvt->fam == 0xf) {
867 addr = m->addr & GENMASK_ULL(end_bit, start_bit);
870 * Erratum 637 workaround
872 if (pvt->fam == 0x15) {
873 struct amd64_pvt *pvt;
874 u64 cc6_base, tmp_addr;
879 if ((addr & GENMASK_ULL(47, 24)) >> 24 != 0x00fdf7)
882 mce_nid = amd_get_nb_id(m->extcpu);
883 pvt = mcis[mce_nid]->pvt_info;
885 amd64_read_pci_cfg(pvt->F1, DRAM_LOCAL_NODE_LIM, &tmp);
886 intlv_en = tmp >> 21 & 0x7;
888 /* add [47:27] + 3 trailing bits */
889 cc6_base = (tmp & GENMASK_ULL(20, 0)) << 3;
891 /* reverse and add DramIntlvEn */
892 cc6_base |= intlv_en ^ 0x7;
898 return cc6_base | (addr & GENMASK_ULL(23, 0));
900 amd64_read_pci_cfg(pvt->F1, DRAM_LOCAL_NODE_BASE, &tmp);
903 tmp_addr = (addr & GENMASK_ULL(23, 12)) << __fls(intlv_en + 1);
905 /* OR DramIntlvSel into bits [14:12] */
906 tmp_addr |= (tmp & GENMASK_ULL(23, 21)) >> 9;
908 /* add remaining [11:0] bits from original MC4_ADDR */
909 tmp_addr |= addr & GENMASK_ULL(11, 0);
911 return cc6_base | tmp_addr;
917 static struct pci_dev *pci_get_related_function(unsigned int vendor,
919 struct pci_dev *related)
921 struct pci_dev *dev = NULL;
923 while ((dev = pci_get_device(vendor, device, dev))) {
924 if (pci_domain_nr(dev->bus) == pci_domain_nr(related->bus) &&
925 (dev->bus->number == related->bus->number) &&
926 (PCI_SLOT(dev->devfn) == PCI_SLOT(related->devfn)))
933 static void read_dram_base_limit_regs(struct amd64_pvt *pvt, unsigned range)
935 struct amd_northbridge *nb;
936 struct pci_dev *f1 = NULL;
937 unsigned int pci_func;
938 int off = range << 3;
941 amd64_read_pci_cfg(pvt->F1, DRAM_BASE_LO + off, &pvt->ranges[range].base.lo);
942 amd64_read_pci_cfg(pvt->F1, DRAM_LIMIT_LO + off, &pvt->ranges[range].lim.lo);
947 if (!dram_rw(pvt, range))
950 amd64_read_pci_cfg(pvt->F1, DRAM_BASE_HI + off, &pvt->ranges[range].base.hi);
951 amd64_read_pci_cfg(pvt->F1, DRAM_LIMIT_HI + off, &pvt->ranges[range].lim.hi);
953 /* F15h: factor in CC6 save area by reading dst node's limit reg */
954 if (pvt->fam != 0x15)
957 nb = node_to_amd_nb(dram_dst_node(pvt, range));
961 pci_func = (pvt->model == 0x30) ? PCI_DEVICE_ID_AMD_15H_M30H_NB_F1
962 : PCI_DEVICE_ID_AMD_15H_NB_F1;
964 f1 = pci_get_related_function(nb->misc->vendor, pci_func, nb->misc);
968 amd64_read_pci_cfg(f1, DRAM_LOCAL_NODE_LIM, &llim);
970 pvt->ranges[range].lim.lo &= GENMASK_ULL(15, 0);
973 pvt->ranges[range].lim.lo |= ((llim & 0x1fff) << 3 | 0x7) << 16;
975 pvt->ranges[range].lim.hi &= GENMASK_ULL(7, 0);
978 pvt->ranges[range].lim.hi |= llim >> 13;
983 static void k8_map_sysaddr_to_csrow(struct mem_ctl_info *mci, u64 sys_addr,
984 struct err_info *err)
986 struct amd64_pvt *pvt = mci->pvt_info;
988 error_address_to_page_and_offset(sys_addr, err);
991 * Find out which node the error address belongs to. This may be
992 * different from the node that detected the error.
994 err->src_mci = find_mc_by_sys_addr(mci, sys_addr);
996 amd64_mc_err(mci, "failed to map error addr 0x%lx to a node\n",
997 (unsigned long)sys_addr);
998 err->err_code = ERR_NODE;
1002 /* Now map the sys_addr to a CSROW */
1003 err->csrow = sys_addr_to_csrow(err->src_mci, sys_addr);
1004 if (err->csrow < 0) {
1005 err->err_code = ERR_CSROW;
1009 /* CHIPKILL enabled */
1010 if (pvt->nbcfg & NBCFG_CHIPKILL) {
1011 err->channel = get_channel_from_ecc_syndrome(mci, err->syndrome);
1012 if (err->channel < 0) {
1014 * Syndrome didn't map, so we don't know which of the
1015 * 2 DIMMs is in error. So we need to ID 'both' of them
1018 amd64_mc_warn(err->src_mci, "unknown syndrome 0x%04x - "
1019 "possible error reporting race\n",
1021 err->err_code = ERR_CHANNEL;
1026 * non-chipkill ecc mode
1028 * The k8 documentation is unclear about how to determine the
1029 * channel number when using non-chipkill memory. This method
1030 * was obtained from email communication with someone at AMD.
1031 * (Wish the email was placed in this comment - norsk)
1033 err->channel = ((sys_addr & BIT(3)) != 0);
1037 static int ddr2_cs_size(unsigned i, bool dct_width)
1043 else if (!(i & 0x1))
1046 shift = (i + 1) >> 1;
1048 return 128 << (shift + !!dct_width);
1051 static int k8_dbam_to_chip_select(struct amd64_pvt *pvt, u8 dct,
1054 u32 dclr = dct ? pvt->dclr1 : pvt->dclr0;
1056 if (pvt->ext_model >= K8_REV_F) {
1057 WARN_ON(cs_mode > 11);
1058 return ddr2_cs_size(cs_mode, dclr & WIDTH_128);
1060 else if (pvt->ext_model >= K8_REV_D) {
1062 WARN_ON(cs_mode > 10);
1065 * the below calculation, besides trying to win an obfuscated C
1066 * contest, maps cs_mode values to DIMM chip select sizes. The
1069 * cs_mode CS size (mb)
1070 * ======= ============
1083 * Basically, it calculates a value with which to shift the
1084 * smallest CS size of 32MB.
1086 * ddr[23]_cs_size have a similar purpose.
1088 diff = cs_mode/3 + (unsigned)(cs_mode > 5);
1090 return 32 << (cs_mode - diff);
1093 WARN_ON(cs_mode > 6);
1094 return 32 << cs_mode;
1099 * Get the number of DCT channels in use.
1102 * number of Memory Channels in operation
1104 * contents of the DCL0_LOW register
1106 static int f1x_early_channel_count(struct amd64_pvt *pvt)
1108 int i, j, channels = 0;
1110 /* On F10h, if we are in 128 bit mode, then we are using 2 channels */
1111 if (pvt->fam == 0x10 && (pvt->dclr0 & WIDTH_128))
1115 * Need to check if in unganged mode: In such, there are 2 channels,
1116 * but they are not in 128 bit mode and thus the above 'dclr0' status
1119 * Need to check DCT0[0] and DCT1[0] to see if only one of them has
1120 * their CSEnable bit on. If so, then SINGLE DIMM case.
1122 edac_dbg(0, "Data width is not 128 bits - need more decoding\n");
1125 * Check DRAM Bank Address Mapping values for each DIMM to see if there
1126 * is more than just one DIMM present in unganged mode. Need to check
1127 * both controllers since DIMMs can be placed in either one.
1129 for (i = 0; i < 2; i++) {
1130 u32 dbam = (i ? pvt->dbam1 : pvt->dbam0);
1132 for (j = 0; j < 4; j++) {
1133 if (DBAM_DIMM(j, dbam) > 0) {
1143 amd64_info("MCT channel count: %d\n", channels);
1148 static int ddr3_cs_size(unsigned i, bool dct_width)
1153 if (i == 0 || i == 3 || i == 4)
1159 else if (!(i & 0x1))
1162 shift = (i + 1) >> 1;
1165 cs_size = (128 * (1 << !!dct_width)) << shift;
1170 static int f10_dbam_to_chip_select(struct amd64_pvt *pvt, u8 dct,
1173 u32 dclr = dct ? pvt->dclr1 : pvt->dclr0;
1175 WARN_ON(cs_mode > 11);
1177 if (pvt->dchr0 & DDR3_MODE || pvt->dchr1 & DDR3_MODE)
1178 return ddr3_cs_size(cs_mode, dclr & WIDTH_128);
1180 return ddr2_cs_size(cs_mode, dclr & WIDTH_128);
1184 * F15h supports only 64bit DCT interfaces
1186 static int f15_dbam_to_chip_select(struct amd64_pvt *pvt, u8 dct,
1189 WARN_ON(cs_mode > 12);
1191 return ddr3_cs_size(cs_mode, false);
1195 * F16h and F15h model 30h have only limited cs_modes.
1197 static int f16_dbam_to_chip_select(struct amd64_pvt *pvt, u8 dct,
1200 WARN_ON(cs_mode > 12);
1202 if (cs_mode == 6 || cs_mode == 8 ||
1203 cs_mode == 9 || cs_mode == 12)
1206 return ddr3_cs_size(cs_mode, false);
1209 static void read_dram_ctl_register(struct amd64_pvt *pvt)
1212 if (pvt->fam == 0xf)
1215 if (!amd64_read_pci_cfg(pvt->F2, DCT_SEL_LO, &pvt->dct_sel_lo)) {
1216 edac_dbg(0, "F2x110 (DCTSelLow): 0x%08x, High range addrs at: 0x%x\n",
1217 pvt->dct_sel_lo, dct_sel_baseaddr(pvt));
1219 edac_dbg(0, " DCTs operate in %s mode\n",
1220 (dct_ganging_enabled(pvt) ? "ganged" : "unganged"));
1222 if (!dct_ganging_enabled(pvt))
1223 edac_dbg(0, " Address range split per DCT: %s\n",
1224 (dct_high_range_enabled(pvt) ? "yes" : "no"));
1226 edac_dbg(0, " data interleave for ECC: %s, DRAM cleared since last warm reset: %s\n",
1227 (dct_data_intlv_enabled(pvt) ? "enabled" : "disabled"),
1228 (dct_memory_cleared(pvt) ? "yes" : "no"));
1230 edac_dbg(0, " channel interleave: %s, "
1231 "interleave bits selector: 0x%x\n",
1232 (dct_interleave_enabled(pvt) ? "enabled" : "disabled"),
1233 dct_sel_interleave_addr(pvt));
1236 amd64_read_pci_cfg(pvt->F2, DCT_SEL_HI, &pvt->dct_sel_hi);
1240 * Determine channel (DCT) based on the interleaving mode (see F15h M30h BKDG,
1241 * 2.10.12 Memory Interleaving Modes).
1243 static u8 f15_m30h_determine_channel(struct amd64_pvt *pvt, u64 sys_addr,
1244 u8 intlv_en, int num_dcts_intlv,
1251 return (u8)(dct_sel);
1253 if (num_dcts_intlv == 2) {
1254 select = (sys_addr >> 8) & 0x3;
1255 channel = select ? 0x3 : 0;
1256 } else if (num_dcts_intlv == 4) {
1257 u8 intlv_addr = dct_sel_interleave_addr(pvt);
1258 switch (intlv_addr) {
1260 channel = (sys_addr >> 8) & 0x3;
1263 channel = (sys_addr >> 9) & 0x3;
1271 * Determine channel (DCT) based on the interleaving mode: F10h BKDG, 2.8.9 Memory
1272 * Interleaving Modes.
1274 static u8 f1x_determine_channel(struct amd64_pvt *pvt, u64 sys_addr,
1275 bool hi_range_sel, u8 intlv_en)
1277 u8 dct_sel_high = (pvt->dct_sel_lo >> 1) & 1;
1279 if (dct_ganging_enabled(pvt))
1283 return dct_sel_high;
1286 * see F2x110[DctSelIntLvAddr] - channel interleave mode
1288 if (dct_interleave_enabled(pvt)) {
1289 u8 intlv_addr = dct_sel_interleave_addr(pvt);
1291 /* return DCT select function: 0=DCT0, 1=DCT1 */
1293 return sys_addr >> 6 & 1;
1295 if (intlv_addr & 0x2) {
1296 u8 shift = intlv_addr & 0x1 ? 9 : 6;
1297 u32 temp = hweight_long((u32) ((sys_addr >> 16) & 0x1F)) % 2;
1299 return ((sys_addr >> shift) & 1) ^ temp;
1302 return (sys_addr >> (12 + hweight8(intlv_en))) & 1;
1305 if (dct_high_range_enabled(pvt))
1306 return ~dct_sel_high & 1;
1311 /* Convert the sys_addr to the normalized DCT address */
1312 static u64 f1x_get_norm_dct_addr(struct amd64_pvt *pvt, u8 range,
1313 u64 sys_addr, bool hi_rng,
1314 u32 dct_sel_base_addr)
1317 u64 dram_base = get_dram_base(pvt, range);
1318 u64 hole_off = f10_dhar_offset(pvt);
1319 u64 dct_sel_base_off = (pvt->dct_sel_hi & 0xFFFFFC00) << 16;
1324 * base address of high range is below 4Gb
1325 * (bits [47:27] at [31:11])
1326 * DRAM address space on this DCT is hoisted above 4Gb &&
1329 * remove hole offset from sys_addr
1331 * remove high range offset from sys_addr
1333 if ((!(dct_sel_base_addr >> 16) ||
1334 dct_sel_base_addr < dhar_base(pvt)) &&
1336 (sys_addr >= BIT_64(32)))
1337 chan_off = hole_off;
1339 chan_off = dct_sel_base_off;
1343 * we have a valid hole &&
1348 * remove dram base to normalize to DCT address
1350 if (dhar_valid(pvt) && (sys_addr >= BIT_64(32)))
1351 chan_off = hole_off;
1353 chan_off = dram_base;
1356 return (sys_addr & GENMASK_ULL(47,6)) - (chan_off & GENMASK_ULL(47,23));
1360 * checks if the csrow passed in is marked as SPARED, if so returns the new
1363 static int f10_process_possible_spare(struct amd64_pvt *pvt, u8 dct, int csrow)
1367 if (online_spare_swap_done(pvt, dct) &&
1368 csrow == online_spare_bad_dramcs(pvt, dct)) {
1370 for_each_chip_select(tmp_cs, dct, pvt) {
1371 if (chip_select_base(tmp_cs, dct, pvt) & 0x2) {
1381 * Iterate over the DRAM DCT "base" and "mask" registers looking for a
1382 * SystemAddr match on the specified 'ChannelSelect' and 'NodeID'
1385 * -EINVAL: NOT FOUND
1386 * 0..csrow = Chip-Select Row
1388 static int f1x_lookup_addr_in_dct(u64 in_addr, u8 nid, u8 dct)
1390 struct mem_ctl_info *mci;
1391 struct amd64_pvt *pvt;
1392 u64 cs_base, cs_mask;
1393 int cs_found = -EINVAL;
1400 pvt = mci->pvt_info;
1402 edac_dbg(1, "input addr: 0x%llx, DCT: %d\n", in_addr, dct);
1404 for_each_chip_select(csrow, dct, pvt) {
1405 if (!csrow_enabled(csrow, dct, pvt))
1408 get_cs_base_and_mask(pvt, csrow, dct, &cs_base, &cs_mask);
1410 edac_dbg(1, " CSROW=%d CSBase=0x%llx CSMask=0x%llx\n",
1411 csrow, cs_base, cs_mask);
1415 edac_dbg(1, " (InputAddr & ~CSMask)=0x%llx (CSBase & ~CSMask)=0x%llx\n",
1416 (in_addr & cs_mask), (cs_base & cs_mask));
1418 if ((in_addr & cs_mask) == (cs_base & cs_mask)) {
1419 if (pvt->fam == 0x15 && pvt->model >= 0x30) {
1423 cs_found = f10_process_possible_spare(pvt, dct, csrow);
1425 edac_dbg(1, " MATCH csrow=%d\n", cs_found);
1433 * See F2x10C. Non-interleaved graphics framebuffer memory under the 16G is
1434 * swapped with a region located at the bottom of memory so that the GPU can use
1435 * the interleaved region and thus two channels.
1437 static u64 f1x_swap_interleaved_region(struct amd64_pvt *pvt, u64 sys_addr)
1439 u32 swap_reg, swap_base, swap_limit, rgn_size, tmp_addr;
1441 if (pvt->fam == 0x10) {
1442 /* only revC3 and revE have that feature */
1443 if (pvt->model < 4 || (pvt->model < 0xa && pvt->stepping < 3))
1447 amd64_read_pci_cfg(pvt->F2, SWAP_INTLV_REG, &swap_reg);
1449 if (!(swap_reg & 0x1))
1452 swap_base = (swap_reg >> 3) & 0x7f;
1453 swap_limit = (swap_reg >> 11) & 0x7f;
1454 rgn_size = (swap_reg >> 20) & 0x7f;
1455 tmp_addr = sys_addr >> 27;
1457 if (!(sys_addr >> 34) &&
1458 (((tmp_addr >= swap_base) &&
1459 (tmp_addr <= swap_limit)) ||
1460 (tmp_addr < rgn_size)))
1461 return sys_addr ^ (u64)swap_base << 27;
1466 /* For a given @dram_range, check if @sys_addr falls within it. */
1467 static int f1x_match_to_this_node(struct amd64_pvt *pvt, unsigned range,
1468 u64 sys_addr, int *chan_sel)
1470 int cs_found = -EINVAL;
1474 bool high_range = false;
1476 u8 node_id = dram_dst_node(pvt, range);
1477 u8 intlv_en = dram_intlv_en(pvt, range);
1478 u32 intlv_sel = dram_intlv_sel(pvt, range);
1480 edac_dbg(1, "(range %d) SystemAddr= 0x%llx Limit=0x%llx\n",
1481 range, sys_addr, get_dram_limit(pvt, range));
1483 if (dhar_valid(pvt) &&
1484 dhar_base(pvt) <= sys_addr &&
1485 sys_addr < BIT_64(32)) {
1486 amd64_warn("Huh? Address is in the MMIO hole: 0x%016llx\n",
1491 if (intlv_en && (intlv_sel != ((sys_addr >> 12) & intlv_en)))
1494 sys_addr = f1x_swap_interleaved_region(pvt, sys_addr);
1496 dct_sel_base = dct_sel_baseaddr(pvt);
1499 * check whether addresses >= DctSelBaseAddr[47:27] are to be used to
1500 * select between DCT0 and DCT1.
1502 if (dct_high_range_enabled(pvt) &&
1503 !dct_ganging_enabled(pvt) &&
1504 ((sys_addr >> 27) >= (dct_sel_base >> 11)))
1507 channel = f1x_determine_channel(pvt, sys_addr, high_range, intlv_en);
1509 chan_addr = f1x_get_norm_dct_addr(pvt, range, sys_addr,
1510 high_range, dct_sel_base);
1512 /* Remove node interleaving, see F1x120 */
1514 chan_addr = ((chan_addr >> (12 + hweight8(intlv_en))) << 12) |
1515 (chan_addr & 0xfff);
1517 /* remove channel interleave */
1518 if (dct_interleave_enabled(pvt) &&
1519 !dct_high_range_enabled(pvt) &&
1520 !dct_ganging_enabled(pvt)) {
1522 if (dct_sel_interleave_addr(pvt) != 1) {
1523 if (dct_sel_interleave_addr(pvt) == 0x3)
1525 chan_addr = ((chan_addr >> 10) << 9) |
1526 (chan_addr & 0x1ff);
1528 /* A[6] or hash 6 */
1529 chan_addr = ((chan_addr >> 7) << 6) |
1533 chan_addr = ((chan_addr >> 13) << 12) |
1534 (chan_addr & 0xfff);
1537 edac_dbg(1, " Normalized DCT addr: 0x%llx\n", chan_addr);
1539 cs_found = f1x_lookup_addr_in_dct(chan_addr, node_id, channel);
1542 *chan_sel = channel;
1547 static int f15_m30h_match_to_this_node(struct amd64_pvt *pvt, unsigned range,
1548 u64 sys_addr, int *chan_sel)
1550 int cs_found = -EINVAL;
1551 int num_dcts_intlv = 0;
1552 u64 chan_addr, chan_offset;
1553 u64 dct_base, dct_limit;
1554 u32 dct_cont_base_reg, dct_cont_limit_reg, tmp;
1555 u8 channel, alias_channel, leg_mmio_hole, dct_sel, dct_offset_en;
1557 u64 dhar_offset = f10_dhar_offset(pvt);
1558 u8 intlv_addr = dct_sel_interleave_addr(pvt);
1559 u8 node_id = dram_dst_node(pvt, range);
1560 u8 intlv_en = dram_intlv_en(pvt, range);
1562 amd64_read_pci_cfg(pvt->F1, DRAM_CONT_BASE, &dct_cont_base_reg);
1563 amd64_read_pci_cfg(pvt->F1, DRAM_CONT_LIMIT, &dct_cont_limit_reg);
1565 dct_offset_en = (u8) ((dct_cont_base_reg >> 3) & BIT(0));
1566 dct_sel = (u8) ((dct_cont_base_reg >> 4) & 0x7);
1568 edac_dbg(1, "(range %d) SystemAddr= 0x%llx Limit=0x%llx\n",
1569 range, sys_addr, get_dram_limit(pvt, range));
1571 if (!(get_dram_base(pvt, range) <= sys_addr) &&
1572 !(get_dram_limit(pvt, range) >= sys_addr))
1575 if (dhar_valid(pvt) &&
1576 dhar_base(pvt) <= sys_addr &&
1577 sys_addr < BIT_64(32)) {
1578 amd64_warn("Huh? Address is in the MMIO hole: 0x%016llx\n",
1583 /* Verify sys_addr is within DCT Range. */
1584 dct_base = (u64) dct_sel_baseaddr(pvt);
1585 dct_limit = (dct_cont_limit_reg >> 11) & 0x1FFF;
1587 if (!(dct_cont_base_reg & BIT(0)) &&
1588 !(dct_base <= (sys_addr >> 27) &&
1589 dct_limit >= (sys_addr >> 27)))
1592 /* Verify number of dct's that participate in channel interleaving. */
1593 num_dcts_intlv = (int) hweight8(intlv_en);
1595 if (!(num_dcts_intlv % 2 == 0) || (num_dcts_intlv > 4))
1598 channel = f15_m30h_determine_channel(pvt, sys_addr, intlv_en,
1599 num_dcts_intlv, dct_sel);
1601 /* Verify we stay within the MAX number of channels allowed */
1605 leg_mmio_hole = (u8) (dct_cont_base_reg >> 1 & BIT(0));
1607 /* Get normalized DCT addr */
1608 if (leg_mmio_hole && (sys_addr >= BIT_64(32)))
1609 chan_offset = dhar_offset;
1611 chan_offset = dct_base << 27;
1613 chan_addr = sys_addr - chan_offset;
1615 /* remove channel interleave */
1616 if (num_dcts_intlv == 2) {
1617 if (intlv_addr == 0x4)
1618 chan_addr = ((chan_addr >> 9) << 8) |
1620 else if (intlv_addr == 0x5)
1621 chan_addr = ((chan_addr >> 10) << 9) |
1622 (chan_addr & 0x1ff);
1626 } else if (num_dcts_intlv == 4) {
1627 if (intlv_addr == 0x4)
1628 chan_addr = ((chan_addr >> 10) << 8) |
1630 else if (intlv_addr == 0x5)
1631 chan_addr = ((chan_addr >> 11) << 9) |
1632 (chan_addr & 0x1ff);
1637 if (dct_offset_en) {
1638 amd64_read_pci_cfg(pvt->F1,
1639 DRAM_CONT_HIGH_OFF + (int) channel * 4,
1641 chan_addr += (u64) ((tmp >> 11) & 0xfff) << 27;
1644 f15h_select_dct(pvt, channel);
1646 edac_dbg(1, " Normalized DCT addr: 0x%llx\n", chan_addr);
1650 * if channel = 3, then alias it to 1. This is because, in F15 M30h,
1651 * there is support for 4 DCT's, but only 2 are currently functional.
1652 * They are DCT0 and DCT3. But we have read all registers of DCT3 into
1653 * pvt->csels[1]. So we need to use '1' here to get correct info.
1654 * Refer F15 M30h BKDG Section 2.10 and 2.10.3 for clarifications.
1656 alias_channel = (channel == 3) ? 1 : channel;
1658 cs_found = f1x_lookup_addr_in_dct(chan_addr, node_id, alias_channel);
1661 *chan_sel = alias_channel;
1666 static int f1x_translate_sysaddr_to_cs(struct amd64_pvt *pvt,
1670 int cs_found = -EINVAL;
1673 for (range = 0; range < DRAM_RANGES; range++) {
1674 if (!dram_rw(pvt, range))
1677 if (pvt->fam == 0x15 && pvt->model >= 0x30)
1678 cs_found = f15_m30h_match_to_this_node(pvt, range,
1682 else if ((get_dram_base(pvt, range) <= sys_addr) &&
1683 (get_dram_limit(pvt, range) >= sys_addr)) {
1684 cs_found = f1x_match_to_this_node(pvt, range,
1685 sys_addr, chan_sel);
1694 * For reference see "2.8.5 Routing DRAM Requests" in F10 BKDG. This code maps
1695 * a @sys_addr to NodeID, DCT (channel) and chip select (CSROW).
1697 * The @sys_addr is usually an error address received from the hardware
1700 static void f1x_map_sysaddr_to_csrow(struct mem_ctl_info *mci, u64 sys_addr,
1701 struct err_info *err)
1703 struct amd64_pvt *pvt = mci->pvt_info;
1705 error_address_to_page_and_offset(sys_addr, err);
1707 err->csrow = f1x_translate_sysaddr_to_cs(pvt, sys_addr, &err->channel);
1708 if (err->csrow < 0) {
1709 err->err_code = ERR_CSROW;
1714 * We need the syndromes for channel detection only when we're
1715 * ganged. Otherwise @chan should already contain the channel at
1718 if (dct_ganging_enabled(pvt))
1719 err->channel = get_channel_from_ecc_syndrome(mci, err->syndrome);
1723 * debug routine to display the memory sizes of all logical DIMMs and its
1726 static void debug_display_dimm_sizes(struct amd64_pvt *pvt, u8 ctrl)
1728 int dimm, size0, size1;
1729 u32 *dcsb = ctrl ? pvt->csels[1].csbases : pvt->csels[0].csbases;
1730 u32 dbam = ctrl ? pvt->dbam1 : pvt->dbam0;
1732 if (pvt->fam == 0xf) {
1733 /* K8 families < revF not supported yet */
1734 if (pvt->ext_model < K8_REV_F)
1740 if (pvt->fam == 0x10) {
1741 dbam = (ctrl && !dct_ganging_enabled(pvt)) ? pvt->dbam1
1743 dcsb = (ctrl && !dct_ganging_enabled(pvt)) ?
1744 pvt->csels[1].csbases :
1745 pvt->csels[0].csbases;
1748 dcsb = pvt->csels[1].csbases;
1750 edac_dbg(1, "F2x%d80 (DRAM Bank Address Mapping): 0x%08x\n",
1753 edac_printk(KERN_DEBUG, EDAC_MC, "DCT%d chip selects:\n", ctrl);
1755 /* Dump memory sizes for DIMM and its CSROWs */
1756 for (dimm = 0; dimm < 4; dimm++) {
1759 if (dcsb[dimm*2] & DCSB_CS_ENABLE)
1760 size0 = pvt->ops->dbam_to_cs(pvt, ctrl,
1761 DBAM_DIMM(dimm, dbam));
1764 if (dcsb[dimm*2 + 1] & DCSB_CS_ENABLE)
1765 size1 = pvt->ops->dbam_to_cs(pvt, ctrl,
1766 DBAM_DIMM(dimm, dbam));
1768 amd64_info(EDAC_MC ": %d: %5dMB %d: %5dMB\n",
1770 dimm * 2 + 1, size1);
1774 static struct amd64_family_type family_types[] = {
1777 .f1_id = PCI_DEVICE_ID_AMD_K8_NB_ADDRMAP,
1778 .f3_id = PCI_DEVICE_ID_AMD_K8_NB_MISC,
1780 .early_channel_count = k8_early_channel_count,
1781 .map_sysaddr_to_csrow = k8_map_sysaddr_to_csrow,
1782 .dbam_to_cs = k8_dbam_to_chip_select,
1787 .f1_id = PCI_DEVICE_ID_AMD_10H_NB_MAP,
1788 .f3_id = PCI_DEVICE_ID_AMD_10H_NB_MISC,
1790 .early_channel_count = f1x_early_channel_count,
1791 .map_sysaddr_to_csrow = f1x_map_sysaddr_to_csrow,
1792 .dbam_to_cs = f10_dbam_to_chip_select,
1797 .f1_id = PCI_DEVICE_ID_AMD_15H_NB_F1,
1798 .f3_id = PCI_DEVICE_ID_AMD_15H_NB_F3,
1800 .early_channel_count = f1x_early_channel_count,
1801 .map_sysaddr_to_csrow = f1x_map_sysaddr_to_csrow,
1802 .dbam_to_cs = f15_dbam_to_chip_select,
1806 .ctl_name = "F15h_M30h",
1807 .f1_id = PCI_DEVICE_ID_AMD_15H_M30H_NB_F1,
1808 .f3_id = PCI_DEVICE_ID_AMD_15H_M30H_NB_F3,
1810 .early_channel_count = f1x_early_channel_count,
1811 .map_sysaddr_to_csrow = f1x_map_sysaddr_to_csrow,
1812 .dbam_to_cs = f16_dbam_to_chip_select,
1817 .f1_id = PCI_DEVICE_ID_AMD_16H_NB_F1,
1818 .f3_id = PCI_DEVICE_ID_AMD_16H_NB_F3,
1820 .early_channel_count = f1x_early_channel_count,
1821 .map_sysaddr_to_csrow = f1x_map_sysaddr_to_csrow,
1822 .dbam_to_cs = f16_dbam_to_chip_select,
1826 .ctl_name = "F16h_M30h",
1827 .f1_id = PCI_DEVICE_ID_AMD_16H_M30H_NB_F1,
1828 .f3_id = PCI_DEVICE_ID_AMD_16H_M30H_NB_F3,
1830 .early_channel_count = f1x_early_channel_count,
1831 .map_sysaddr_to_csrow = f1x_map_sysaddr_to_csrow,
1832 .dbam_to_cs = f16_dbam_to_chip_select,
1838 * These are tables of eigenvectors (one per line) which can be used for the
1839 * construction of the syndrome tables. The modified syndrome search algorithm
1840 * uses those to find the symbol in error and thus the DIMM.
1842 * Algorithm courtesy of Ross LaFetra from AMD.
1844 static const u16 x4_vectors[] = {
1845 0x2f57, 0x1afe, 0x66cc, 0xdd88,
1846 0x11eb, 0x3396, 0x7f4c, 0xeac8,
1847 0x0001, 0x0002, 0x0004, 0x0008,
1848 0x1013, 0x3032, 0x4044, 0x8088,
1849 0x106b, 0x30d6, 0x70fc, 0xe0a8,
1850 0x4857, 0xc4fe, 0x13cc, 0x3288,
1851 0x1ac5, 0x2f4a, 0x5394, 0xa1e8,
1852 0x1f39, 0x251e, 0xbd6c, 0x6bd8,
1853 0x15c1, 0x2a42, 0x89ac, 0x4758,
1854 0x2b03, 0x1602, 0x4f0c, 0xca08,
1855 0x1f07, 0x3a0e, 0x6b04, 0xbd08,
1856 0x8ba7, 0x465e, 0x244c, 0x1cc8,
1857 0x2b87, 0x164e, 0x642c, 0xdc18,
1858 0x40b9, 0x80de, 0x1094, 0x20e8,
1859 0x27db, 0x1eb6, 0x9dac, 0x7b58,
1860 0x11c1, 0x2242, 0x84ac, 0x4c58,
1861 0x1be5, 0x2d7a, 0x5e34, 0xa718,
1862 0x4b39, 0x8d1e, 0x14b4, 0x28d8,
1863 0x4c97, 0xc87e, 0x11fc, 0x33a8,
1864 0x8e97, 0x497e, 0x2ffc, 0x1aa8,
1865 0x16b3, 0x3d62, 0x4f34, 0x8518,
1866 0x1e2f, 0x391a, 0x5cac, 0xf858,
1867 0x1d9f, 0x3b7a, 0x572c, 0xfe18,
1868 0x15f5, 0x2a5a, 0x5264, 0xa3b8,
1869 0x1dbb, 0x3b66, 0x715c, 0xe3f8,
1870 0x4397, 0xc27e, 0x17fc, 0x3ea8,
1871 0x1617, 0x3d3e, 0x6464, 0xb8b8,
1872 0x23ff, 0x12aa, 0xab6c, 0x56d8,
1873 0x2dfb, 0x1ba6, 0x913c, 0x7328,
1874 0x185d, 0x2ca6, 0x7914, 0x9e28,
1875 0x171b, 0x3e36, 0x7d7c, 0xebe8,
1876 0x4199, 0x82ee, 0x19f4, 0x2e58,
1877 0x4807, 0xc40e, 0x130c, 0x3208,
1878 0x1905, 0x2e0a, 0x5804, 0xac08,
1879 0x213f, 0x132a, 0xadfc, 0x5ba8,
1880 0x19a9, 0x2efe, 0xb5cc, 0x6f88,
1883 static const u16 x8_vectors[] = {
1884 0x0145, 0x028a, 0x2374, 0x43c8, 0xa1f0, 0x0520, 0x0a40, 0x1480,
1885 0x0211, 0x0422, 0x0844, 0x1088, 0x01b0, 0x44e0, 0x23c0, 0xed80,
1886 0x1011, 0x0116, 0x022c, 0x0458, 0x08b0, 0x8c60, 0x2740, 0x4e80,
1887 0x0411, 0x0822, 0x1044, 0x0158, 0x02b0, 0x2360, 0x46c0, 0xab80,
1888 0x0811, 0x1022, 0x012c, 0x0258, 0x04b0, 0x4660, 0x8cc0, 0x2780,
1889 0x2071, 0x40e2, 0xa0c4, 0x0108, 0x0210, 0x0420, 0x0840, 0x1080,
1890 0x4071, 0x80e2, 0x0104, 0x0208, 0x0410, 0x0820, 0x1040, 0x2080,
1891 0x8071, 0x0102, 0x0204, 0x0408, 0x0810, 0x1020, 0x2040, 0x4080,
1892 0x019d, 0x03d6, 0x136c, 0x2198, 0x50b0, 0xb2e0, 0x0740, 0x0e80,
1893 0x0189, 0x03ea, 0x072c, 0x0e58, 0x1cb0, 0x56e0, 0x37c0, 0xf580,
1894 0x01fd, 0x0376, 0x06ec, 0x0bb8, 0x1110, 0x2220, 0x4440, 0x8880,
1895 0x0163, 0x02c6, 0x1104, 0x0758, 0x0eb0, 0x2be0, 0x6140, 0xc280,
1896 0x02fd, 0x01c6, 0x0b5c, 0x1108, 0x07b0, 0x25a0, 0x8840, 0x6180,
1897 0x0801, 0x012e, 0x025c, 0x04b8, 0x1370, 0x26e0, 0x57c0, 0xb580,
1898 0x0401, 0x0802, 0x015c, 0x02b8, 0x22b0, 0x13e0, 0x7140, 0xe280,
1899 0x0201, 0x0402, 0x0804, 0x01b8, 0x11b0, 0x31a0, 0x8040, 0x7180,
1900 0x0101, 0x0202, 0x0404, 0x0808, 0x1010, 0x2020, 0x4040, 0x8080,
1901 0x0001, 0x0002, 0x0004, 0x0008, 0x0010, 0x0020, 0x0040, 0x0080,
1902 0x0100, 0x0200, 0x0400, 0x0800, 0x1000, 0x2000, 0x4000, 0x8000,
1905 static int decode_syndrome(u16 syndrome, const u16 *vectors, unsigned num_vecs,
1908 unsigned int i, err_sym;
1910 for (err_sym = 0; err_sym < num_vecs / v_dim; err_sym++) {
1912 unsigned v_idx = err_sym * v_dim;
1913 unsigned v_end = (err_sym + 1) * v_dim;
1915 /* walk over all 16 bits of the syndrome */
1916 for (i = 1; i < (1U << 16); i <<= 1) {
1918 /* if bit is set in that eigenvector... */
1919 if (v_idx < v_end && vectors[v_idx] & i) {
1920 u16 ev_comp = vectors[v_idx++];
1922 /* ... and bit set in the modified syndrome, */
1932 /* can't get to zero, move to next symbol */
1937 edac_dbg(0, "syndrome(%x) not found\n", syndrome);
1941 static int map_err_sym_to_channel(int err_sym, int sym_size)
1954 return err_sym >> 4;
1960 /* imaginary bits not in a DIMM */
1962 WARN(1, KERN_ERR "Invalid error symbol: 0x%x\n",
1974 return err_sym >> 3;
1980 static int get_channel_from_ecc_syndrome(struct mem_ctl_info *mci, u16 syndrome)
1982 struct amd64_pvt *pvt = mci->pvt_info;
1985 if (pvt->ecc_sym_sz == 8)
1986 err_sym = decode_syndrome(syndrome, x8_vectors,
1987 ARRAY_SIZE(x8_vectors),
1989 else if (pvt->ecc_sym_sz == 4)
1990 err_sym = decode_syndrome(syndrome, x4_vectors,
1991 ARRAY_SIZE(x4_vectors),
1994 amd64_warn("Illegal syndrome type: %u\n", pvt->ecc_sym_sz);
1998 return map_err_sym_to_channel(err_sym, pvt->ecc_sym_sz);
2001 static void __log_bus_error(struct mem_ctl_info *mci, struct err_info *err,
2004 enum hw_event_mc_err_type err_type;
2008 err_type = HW_EVENT_ERR_CORRECTED;
2009 else if (ecc_type == 1)
2010 err_type = HW_EVENT_ERR_UNCORRECTED;
2012 WARN(1, "Something is rotten in the state of Denmark.\n");
2016 switch (err->err_code) {
2021 string = "Failed to map error addr to a node";
2024 string = "Failed to map error addr to a csrow";
2027 string = "unknown syndrome - possible error reporting race";
2030 string = "WTF error";
2034 edac_mc_handle_error(err_type, mci, 1,
2035 err->page, err->offset, err->syndrome,
2036 err->csrow, err->channel, -1,
2040 static inline void decode_bus_error(int node_id, struct mce *m)
2042 struct mem_ctl_info *mci = mcis[node_id];
2043 struct amd64_pvt *pvt = mci->pvt_info;
2044 u8 ecc_type = (m->status >> 45) & 0x3;
2045 u8 xec = XEC(m->status, 0x1f);
2046 u16 ec = EC(m->status);
2048 struct err_info err;
2050 /* Bail out early if this was an 'observed' error */
2051 if (PP(ec) == NBSL_PP_OBS)
2054 /* Do only ECC errors */
2055 if (xec && xec != F10_NBSL_EXT_ERR_ECC)
2058 memset(&err, 0, sizeof(err));
2060 sys_addr = get_error_address(pvt, m);
2063 err.syndrome = extract_syndrome(m->status);
2065 pvt->ops->map_sysaddr_to_csrow(mci, sys_addr, &err);
2067 __log_bus_error(mci, &err, ecc_type);
2071 * Use pvt->F2 which contains the F2 CPU PCI device to get the related
2072 * F1 (AddrMap) and F3 (Misc) devices. Return negative value on error.
2074 static int reserve_mc_sibling_devs(struct amd64_pvt *pvt, u16 f1_id, u16 f3_id)
2076 /* Reserve the ADDRESS MAP Device */
2077 pvt->F1 = pci_get_related_function(pvt->F2->vendor, f1_id, pvt->F2);
2079 amd64_err("error address map device not found: "
2080 "vendor %x device 0x%x (broken BIOS?)\n",
2081 PCI_VENDOR_ID_AMD, f1_id);
2085 /* Reserve the MISC Device */
2086 pvt->F3 = pci_get_related_function(pvt->F2->vendor, f3_id, pvt->F2);
2088 pci_dev_put(pvt->F1);
2091 amd64_err("error F3 device not found: "
2092 "vendor %x device 0x%x (broken BIOS?)\n",
2093 PCI_VENDOR_ID_AMD, f3_id);
2097 edac_dbg(1, "F1: %s\n", pci_name(pvt->F1));
2098 edac_dbg(1, "F2: %s\n", pci_name(pvt->F2));
2099 edac_dbg(1, "F3: %s\n", pci_name(pvt->F3));
2104 static void free_mc_sibling_devs(struct amd64_pvt *pvt)
2106 pci_dev_put(pvt->F1);
2107 pci_dev_put(pvt->F3);
2111 * Retrieve the hardware registers of the memory controller (this includes the
2112 * 'Address Map' and 'Misc' device regs)
2114 static void read_mc_regs(struct amd64_pvt *pvt)
2121 * Retrieve TOP_MEM and TOP_MEM2; no masking off of reserved bits since
2122 * those are Read-As-Zero
2124 rdmsrl(MSR_K8_TOP_MEM1, pvt->top_mem);
2125 edac_dbg(0, " TOP_MEM: 0x%016llx\n", pvt->top_mem);
2127 /* check first whether TOP_MEM2 is enabled */
2128 rdmsrl(MSR_K8_SYSCFG, msr_val);
2129 if (msr_val & (1U << 21)) {
2130 rdmsrl(MSR_K8_TOP_MEM2, pvt->top_mem2);
2131 edac_dbg(0, " TOP_MEM2: 0x%016llx\n", pvt->top_mem2);
2133 edac_dbg(0, " TOP_MEM2 disabled\n");
2135 amd64_read_pci_cfg(pvt->F3, NBCAP, &pvt->nbcap);
2137 read_dram_ctl_register(pvt);
2139 for (range = 0; range < DRAM_RANGES; range++) {
2142 /* read settings for this DRAM range */
2143 read_dram_base_limit_regs(pvt, range);
2145 rw = dram_rw(pvt, range);
2149 edac_dbg(1, " DRAM range[%d], base: 0x%016llx; limit: 0x%016llx\n",
2151 get_dram_base(pvt, range),
2152 get_dram_limit(pvt, range));
2154 edac_dbg(1, " IntlvEn=%s; Range access: %s%s IntlvSel=%d DstNode=%d\n",
2155 dram_intlv_en(pvt, range) ? "Enabled" : "Disabled",
2156 (rw & 0x1) ? "R" : "-",
2157 (rw & 0x2) ? "W" : "-",
2158 dram_intlv_sel(pvt, range),
2159 dram_dst_node(pvt, range));
2162 read_dct_base_mask(pvt);
2164 amd64_read_pci_cfg(pvt->F1, DHAR, &pvt->dhar);
2165 amd64_read_dct_pci_cfg(pvt, 0, DBAM0, &pvt->dbam0);
2167 amd64_read_pci_cfg(pvt->F3, F10_ONLINE_SPARE, &pvt->online_spare);
2169 amd64_read_dct_pci_cfg(pvt, 0, DCLR0, &pvt->dclr0);
2170 amd64_read_dct_pci_cfg(pvt, 0, DCHR0, &pvt->dchr0);
2172 if (!dct_ganging_enabled(pvt)) {
2173 amd64_read_dct_pci_cfg(pvt, 1, DCLR0, &pvt->dclr1);
2174 amd64_read_dct_pci_cfg(pvt, 1, DCHR0, &pvt->dchr1);
2177 pvt->ecc_sym_sz = 4;
2179 if (pvt->fam >= 0x10) {
2180 amd64_read_pci_cfg(pvt->F3, EXT_NB_MCA_CFG, &tmp);
2181 /* F16h has only DCT0, so no need to read dbam1 */
2182 if (pvt->fam != 0x16)
2183 amd64_read_dct_pci_cfg(pvt, 1, DBAM0, &pvt->dbam1);
2185 /* F10h, revD and later can do x8 ECC too */
2186 if ((pvt->fam > 0x10 || pvt->model > 7) && tmp & BIT(25))
2187 pvt->ecc_sym_sz = 8;
2189 dump_misc_regs(pvt);
2193 * NOTE: CPU Revision Dependent code
2196 * @csrow_nr ChipSelect Row Number (0..NUM_CHIPSELECTS-1)
2197 * k8 private pointer to -->
2198 * DRAM Bank Address mapping register
2200 * DCL register where dual_channel_active is
2202 * The DBAM register consists of 4 sets of 4 bits each definitions:
2205 * 0-3 CSROWs 0 and 1
2206 * 4-7 CSROWs 2 and 3
2207 * 8-11 CSROWs 4 and 5
2208 * 12-15 CSROWs 6 and 7
2210 * Values range from: 0 to 15
2211 * The meaning of the values depends on CPU revision and dual-channel state,
2212 * see relevant BKDG more info.
2214 * The memory controller provides for total of only 8 CSROWs in its current
2215 * architecture. Each "pair" of CSROWs normally represents just one DIMM in
2216 * single channel or two (2) DIMMs in dual channel mode.
2218 * The following code logic collapses the various tables for CSROW based on CPU
2222 * The number of PAGE_SIZE pages on the specified CSROW number it
2226 static u32 get_csrow_nr_pages(struct amd64_pvt *pvt, u8 dct, int csrow_nr)
2228 u32 cs_mode, nr_pages;
2229 u32 dbam = dct ? pvt->dbam1 : pvt->dbam0;
2233 * The math on this doesn't look right on the surface because x/2*4 can
2234 * be simplified to x*2 but this expression makes use of the fact that
2235 * it is integral math where 1/2=0. This intermediate value becomes the
2236 * number of bits to shift the DBAM register to extract the proper CSROW
2239 cs_mode = DBAM_DIMM(csrow_nr / 2, dbam);
2241 nr_pages = pvt->ops->dbam_to_cs(pvt, dct, cs_mode) << (20 - PAGE_SHIFT);
2243 edac_dbg(0, "csrow: %d, channel: %d, DBAM idx: %d\n",
2244 csrow_nr, dct, cs_mode);
2245 edac_dbg(0, "nr_pages/channel: %u\n", nr_pages);
2251 * Initialize the array of csrow attribute instances, based on the values
2252 * from pci config hardware registers.
2254 static int init_csrows(struct mem_ctl_info *mci)
2256 struct amd64_pvt *pvt = mci->pvt_info;
2257 struct csrow_info *csrow;
2258 struct dimm_info *dimm;
2259 enum edac_type edac_mode;
2260 enum mem_type mtype;
2261 int i, j, empty = 1;
2265 amd64_read_pci_cfg(pvt->F3, NBCFG, &val);
2269 edac_dbg(0, "node %d, NBCFG=0x%08x[ChipKillEccCap: %d|DramEccEn: %d]\n",
2270 pvt->mc_node_id, val,
2271 !!(val & NBCFG_CHIPKILL), !!(val & NBCFG_ECC_ENABLE));
2274 * We iterate over DCT0 here but we look at DCT1 in parallel, if needed.
2276 for_each_chip_select(i, 0, pvt) {
2277 bool row_dct0 = !!csrow_enabled(i, 0, pvt);
2278 bool row_dct1 = false;
2280 if (pvt->fam != 0xf)
2281 row_dct1 = !!csrow_enabled(i, 1, pvt);
2283 if (!row_dct0 && !row_dct1)
2286 csrow = mci->csrows[i];
2289 edac_dbg(1, "MC node: %d, csrow: %d\n",
2290 pvt->mc_node_id, i);
2293 nr_pages = get_csrow_nr_pages(pvt, 0, i);
2294 csrow->channels[0]->dimm->nr_pages = nr_pages;
2297 /* K8 has only one DCT */
2298 if (pvt->fam != 0xf && row_dct1) {
2299 int row_dct1_pages = get_csrow_nr_pages(pvt, 1, i);
2301 csrow->channels[1]->dimm->nr_pages = row_dct1_pages;
2302 nr_pages += row_dct1_pages;
2305 mtype = determine_memory_type(pvt, i);
2307 edac_dbg(1, "Total csrow%d pages: %u\n", i, nr_pages);
2310 * determine whether CHIPKILL or JUST ECC or NO ECC is operating
2312 if (pvt->nbcfg & NBCFG_ECC_ENABLE)
2313 edac_mode = (pvt->nbcfg & NBCFG_CHIPKILL) ?
2314 EDAC_S4ECD4ED : EDAC_SECDED;
2316 edac_mode = EDAC_NONE;
2318 for (j = 0; j < pvt->channel_count; j++) {
2319 dimm = csrow->channels[j]->dimm;
2320 dimm->mtype = mtype;
2321 dimm->edac_mode = edac_mode;
2328 /* get all cores on this DCT */
2329 static void get_cpus_on_this_dct_cpumask(struct cpumask *mask, u16 nid)
2333 for_each_online_cpu(cpu)
2334 if (amd_get_nb_id(cpu) == nid)
2335 cpumask_set_cpu(cpu, mask);
2338 /* check MCG_CTL on all the cpus on this node */
2339 static bool nb_mce_bank_enabled_on_node(u16 nid)
2345 if (!zalloc_cpumask_var(&mask, GFP_KERNEL)) {
2346 amd64_warn("%s: Error allocating mask\n", __func__);
2350 get_cpus_on_this_dct_cpumask(mask, nid);
2352 rdmsr_on_cpus(mask, MSR_IA32_MCG_CTL, msrs);
2354 for_each_cpu(cpu, mask) {
2355 struct msr *reg = per_cpu_ptr(msrs, cpu);
2356 nbe = reg->l & MSR_MCGCTL_NBE;
2358 edac_dbg(0, "core: %u, MCG_CTL: 0x%llx, NB MSR is %s\n",
2360 (nbe ? "enabled" : "disabled"));
2368 free_cpumask_var(mask);
2372 static int toggle_ecc_err_reporting(struct ecc_settings *s, u16 nid, bool on)
2374 cpumask_var_t cmask;
2377 if (!zalloc_cpumask_var(&cmask, GFP_KERNEL)) {
2378 amd64_warn("%s: error allocating mask\n", __func__);
2382 get_cpus_on_this_dct_cpumask(cmask, nid);
2384 rdmsr_on_cpus(cmask, MSR_IA32_MCG_CTL, msrs);
2386 for_each_cpu(cpu, cmask) {
2388 struct msr *reg = per_cpu_ptr(msrs, cpu);
2391 if (reg->l & MSR_MCGCTL_NBE)
2392 s->flags.nb_mce_enable = 1;
2394 reg->l |= MSR_MCGCTL_NBE;
2397 * Turn off NB MCE reporting only when it was off before
2399 if (!s->flags.nb_mce_enable)
2400 reg->l &= ~MSR_MCGCTL_NBE;
2403 wrmsr_on_cpus(cmask, MSR_IA32_MCG_CTL, msrs);
2405 free_cpumask_var(cmask);
2410 static bool enable_ecc_error_reporting(struct ecc_settings *s, u16 nid,
2414 u32 value, mask = 0x3; /* UECC/CECC enable */
2416 if (toggle_ecc_err_reporting(s, nid, ON)) {
2417 amd64_warn("Error enabling ECC reporting over MCGCTL!\n");
2421 amd64_read_pci_cfg(F3, NBCTL, &value);
2423 s->old_nbctl = value & mask;
2424 s->nbctl_valid = true;
2427 amd64_write_pci_cfg(F3, NBCTL, value);
2429 amd64_read_pci_cfg(F3, NBCFG, &value);
2431 edac_dbg(0, "1: node %d, NBCFG=0x%08x[DramEccEn: %d]\n",
2432 nid, value, !!(value & NBCFG_ECC_ENABLE));
2434 if (!(value & NBCFG_ECC_ENABLE)) {
2435 amd64_warn("DRAM ECC disabled on this node, enabling...\n");
2437 s->flags.nb_ecc_prev = 0;
2439 /* Attempt to turn on DRAM ECC Enable */
2440 value |= NBCFG_ECC_ENABLE;
2441 amd64_write_pci_cfg(F3, NBCFG, value);
2443 amd64_read_pci_cfg(F3, NBCFG, &value);
2445 if (!(value & NBCFG_ECC_ENABLE)) {
2446 amd64_warn("Hardware rejected DRAM ECC enable,"
2447 "check memory DIMM configuration.\n");
2450 amd64_info("Hardware accepted DRAM ECC Enable\n");
2453 s->flags.nb_ecc_prev = 1;
2456 edac_dbg(0, "2: node %d, NBCFG=0x%08x[DramEccEn: %d]\n",
2457 nid, value, !!(value & NBCFG_ECC_ENABLE));
2462 static void restore_ecc_error_reporting(struct ecc_settings *s, u16 nid,
2465 u32 value, mask = 0x3; /* UECC/CECC enable */
2468 if (!s->nbctl_valid)
2471 amd64_read_pci_cfg(F3, NBCTL, &value);
2473 value |= s->old_nbctl;
2475 amd64_write_pci_cfg(F3, NBCTL, value);
2477 /* restore previous BIOS DRAM ECC "off" setting we force-enabled */
2478 if (!s->flags.nb_ecc_prev) {
2479 amd64_read_pci_cfg(F3, NBCFG, &value);
2480 value &= ~NBCFG_ECC_ENABLE;
2481 amd64_write_pci_cfg(F3, NBCFG, value);
2484 /* restore the NB Enable MCGCTL bit */
2485 if (toggle_ecc_err_reporting(s, nid, OFF))
2486 amd64_warn("Error restoring NB MCGCTL settings!\n");
2490 * EDAC requires that the BIOS have ECC enabled before
2491 * taking over the processing of ECC errors. A command line
2492 * option allows to force-enable hardware ECC later in
2493 * enable_ecc_error_reporting().
2495 static const char *ecc_msg =
2496 "ECC disabled in the BIOS or no ECC capability, module will not load.\n"
2497 " Either enable ECC checking or force module loading by setting "
2498 "'ecc_enable_override'.\n"
2499 " (Note that use of the override may cause unknown side effects.)\n";
2501 static bool ecc_enabled(struct pci_dev *F3, u16 nid)
2505 bool nb_mce_en = false;
2507 amd64_read_pci_cfg(F3, NBCFG, &value);
2509 ecc_en = !!(value & NBCFG_ECC_ENABLE);
2510 amd64_info("DRAM ECC %s.\n", (ecc_en ? "enabled" : "disabled"));
2512 nb_mce_en = nb_mce_bank_enabled_on_node(nid);
2514 amd64_notice("NB MCE bank disabled, set MSR "
2515 "0x%08x[4] on node %d to enable.\n",
2516 MSR_IA32_MCG_CTL, nid);
2518 if (!ecc_en || !nb_mce_en) {
2519 amd64_notice("%s", ecc_msg);
2525 static int set_mc_sysfs_attrs(struct mem_ctl_info *mci)
2527 struct amd64_pvt *pvt = mci->pvt_info;
2530 rc = amd64_create_sysfs_dbg_files(mci);
2534 if (pvt->fam >= 0x10) {
2535 rc = amd64_create_sysfs_inject_files(mci);
2543 static void del_mc_sysfs_attrs(struct mem_ctl_info *mci)
2545 struct amd64_pvt *pvt = mci->pvt_info;
2547 amd64_remove_sysfs_dbg_files(mci);
2549 if (pvt->fam >= 0x10)
2550 amd64_remove_sysfs_inject_files(mci);
2553 static void setup_mci_misc_attrs(struct mem_ctl_info *mci,
2554 struct amd64_family_type *fam)
2556 struct amd64_pvt *pvt = mci->pvt_info;
2558 mci->mtype_cap = MEM_FLAG_DDR2 | MEM_FLAG_RDDR2;
2559 mci->edac_ctl_cap = EDAC_FLAG_NONE;
2561 if (pvt->nbcap & NBCAP_SECDED)
2562 mci->edac_ctl_cap |= EDAC_FLAG_SECDED;
2564 if (pvt->nbcap & NBCAP_CHIPKILL)
2565 mci->edac_ctl_cap |= EDAC_FLAG_S4ECD4ED;
2567 mci->edac_cap = determine_edac_cap(pvt);
2568 mci->mod_name = EDAC_MOD_STR;
2569 mci->mod_ver = EDAC_AMD64_VERSION;
2570 mci->ctl_name = fam->ctl_name;
2571 mci->dev_name = pci_name(pvt->F2);
2572 mci->ctl_page_to_phys = NULL;
2574 /* memory scrubber interface */
2575 mci->set_sdram_scrub_rate = set_scrub_rate;
2576 mci->get_sdram_scrub_rate = get_scrub_rate;
2580 * returns a pointer to the family descriptor on success, NULL otherwise.
2582 static struct amd64_family_type *per_family_init(struct amd64_pvt *pvt)
2584 struct amd64_family_type *fam_type = NULL;
2586 pvt->ext_model = boot_cpu_data.x86_model >> 4;
2587 pvt->stepping = boot_cpu_data.x86_mask;
2588 pvt->model = boot_cpu_data.x86_model;
2589 pvt->fam = boot_cpu_data.x86;
2593 fam_type = &family_types[K8_CPUS];
2594 pvt->ops = &family_types[K8_CPUS].ops;
2598 fam_type = &family_types[F10_CPUS];
2599 pvt->ops = &family_types[F10_CPUS].ops;
2603 if (pvt->model == 0x30) {
2604 fam_type = &family_types[F15_M30H_CPUS];
2605 pvt->ops = &family_types[F15_M30H_CPUS].ops;
2609 fam_type = &family_types[F15_CPUS];
2610 pvt->ops = &family_types[F15_CPUS].ops;
2614 if (pvt->model == 0x30) {
2615 fam_type = &family_types[F16_M30H_CPUS];
2616 pvt->ops = &family_types[F16_M30H_CPUS].ops;
2619 fam_type = &family_types[F16_CPUS];
2620 pvt->ops = &family_types[F16_CPUS].ops;
2624 amd64_err("Unsupported family!\n");
2628 amd64_info("%s %sdetected (node %d).\n", fam_type->ctl_name,
2630 (pvt->ext_model >= K8_REV_F ? "revF or later "
2631 : "revE or earlier ")
2632 : ""), pvt->mc_node_id);
2636 static int init_one_instance(struct pci_dev *F2)
2638 struct amd64_pvt *pvt = NULL;
2639 struct amd64_family_type *fam_type = NULL;
2640 struct mem_ctl_info *mci = NULL;
2641 struct edac_mc_layer layers[2];
2643 u16 nid = amd_get_node_id(F2);
2646 pvt = kzalloc(sizeof(struct amd64_pvt), GFP_KERNEL);
2650 pvt->mc_node_id = nid;
2654 fam_type = per_family_init(pvt);
2659 err = reserve_mc_sibling_devs(pvt, fam_type->f1_id, fam_type->f3_id);
2666 * We need to determine how many memory channels there are. Then use
2667 * that information for calculating the size of the dynamic instance
2668 * tables in the 'mci' structure.
2671 pvt->channel_count = pvt->ops->early_channel_count(pvt);
2672 if (pvt->channel_count < 0)
2676 layers[0].type = EDAC_MC_LAYER_CHIP_SELECT;
2677 layers[0].size = pvt->csels[0].b_cnt;
2678 layers[0].is_virt_csrow = true;
2679 layers[1].type = EDAC_MC_LAYER_CHANNEL;
2682 * Always allocate two channels since we can have setups with DIMMs on
2683 * only one channel. Also, this simplifies handling later for the price
2684 * of a couple of KBs tops.
2687 layers[1].is_virt_csrow = false;
2689 mci = edac_mc_alloc(nid, ARRAY_SIZE(layers), layers, 0);
2693 mci->pvt_info = pvt;
2694 mci->pdev = &pvt->F2->dev;
2696 setup_mci_misc_attrs(mci, fam_type);
2698 if (init_csrows(mci))
2699 mci->edac_cap = EDAC_FLAG_NONE;
2702 if (edac_mc_add_mc(mci)) {
2703 edac_dbg(1, "failed edac_mc_add_mc()\n");
2706 if (set_mc_sysfs_attrs(mci)) {
2707 edac_dbg(1, "failed edac_mc_add_mc()\n");
2711 /* register stuff with EDAC MCE */
2712 if (report_gart_errors)
2713 amd_report_gart_errors(true);
2715 amd_register_ecc_decoder(decode_bus_error);
2719 atomic_inc(&drv_instances);
2724 edac_mc_del_mc(mci->pdev);
2729 free_mc_sibling_devs(pvt);
2738 static int probe_one_instance(struct pci_dev *pdev,
2739 const struct pci_device_id *mc_type)
2741 u16 nid = amd_get_node_id(pdev);
2742 struct pci_dev *F3 = node_to_amd_nb(nid)->misc;
2743 struct ecc_settings *s;
2746 ret = pci_enable_device(pdev);
2748 edac_dbg(0, "ret=%d\n", ret);
2753 s = kzalloc(sizeof(struct ecc_settings), GFP_KERNEL);
2759 if (!ecc_enabled(F3, nid)) {
2762 if (!ecc_enable_override)
2765 amd64_warn("Forcing ECC on!\n");
2767 if (!enable_ecc_error_reporting(s, nid, F3))
2771 ret = init_one_instance(pdev);
2773 amd64_err("Error probing instance: %d\n", nid);
2774 restore_ecc_error_reporting(s, nid, F3);
2781 ecc_stngs[nid] = NULL;
2787 static void remove_one_instance(struct pci_dev *pdev)
2789 struct mem_ctl_info *mci;
2790 struct amd64_pvt *pvt;
2791 u16 nid = amd_get_node_id(pdev);
2792 struct pci_dev *F3 = node_to_amd_nb(nid)->misc;
2793 struct ecc_settings *s = ecc_stngs[nid];
2795 mci = find_mci_by_dev(&pdev->dev);
2798 del_mc_sysfs_attrs(mci);
2799 /* Remove from EDAC CORE tracking list */
2800 mci = edac_mc_del_mc(&pdev->dev);
2804 pvt = mci->pvt_info;
2806 restore_ecc_error_reporting(s, nid, F3);
2808 free_mc_sibling_devs(pvt);
2810 /* unregister from EDAC MCE */
2811 amd_report_gart_errors(false);
2812 amd_unregister_ecc_decoder(decode_bus_error);
2814 kfree(ecc_stngs[nid]);
2815 ecc_stngs[nid] = NULL;
2817 /* Free the EDAC CORE resources */
2818 mci->pvt_info = NULL;
2826 * This table is part of the interface for loading drivers for PCI devices. The
2827 * PCI core identifies what devices are on a system during boot, and then
2828 * inquiry this table to see if this driver is for a given device found.
2830 static const struct pci_device_id amd64_pci_table[] = {
2832 .vendor = PCI_VENDOR_ID_AMD,
2833 .device = PCI_DEVICE_ID_AMD_K8_NB_MEMCTL,
2834 .subvendor = PCI_ANY_ID,
2835 .subdevice = PCI_ANY_ID,
2840 .vendor = PCI_VENDOR_ID_AMD,
2841 .device = PCI_DEVICE_ID_AMD_10H_NB_DRAM,
2842 .subvendor = PCI_ANY_ID,
2843 .subdevice = PCI_ANY_ID,
2848 .vendor = PCI_VENDOR_ID_AMD,
2849 .device = PCI_DEVICE_ID_AMD_15H_NB_F2,
2850 .subvendor = PCI_ANY_ID,
2851 .subdevice = PCI_ANY_ID,
2856 .vendor = PCI_VENDOR_ID_AMD,
2857 .device = PCI_DEVICE_ID_AMD_15H_M30H_NB_F2,
2858 .subvendor = PCI_ANY_ID,
2859 .subdevice = PCI_ANY_ID,
2864 .vendor = PCI_VENDOR_ID_AMD,
2865 .device = PCI_DEVICE_ID_AMD_16H_NB_F2,
2866 .subvendor = PCI_ANY_ID,
2867 .subdevice = PCI_ANY_ID,
2872 .vendor = PCI_VENDOR_ID_AMD,
2873 .device = PCI_DEVICE_ID_AMD_16H_M30H_NB_F2,
2874 .subvendor = PCI_ANY_ID,
2875 .subdevice = PCI_ANY_ID,
2882 MODULE_DEVICE_TABLE(pci, amd64_pci_table);
2884 static struct pci_driver amd64_pci_driver = {
2885 .name = EDAC_MOD_STR,
2886 .probe = probe_one_instance,
2887 .remove = remove_one_instance,
2888 .id_table = amd64_pci_table,
2891 static void setup_pci_device(void)
2893 struct mem_ctl_info *mci;
2894 struct amd64_pvt *pvt;
2903 pvt = mci->pvt_info;
2904 pci_ctl = edac_pci_create_generic_ctl(&pvt->F2->dev, EDAC_MOD_STR);
2906 pr_warn("%s(): Unable to create PCI control\n", __func__);
2907 pr_warn("%s(): PCI error report via EDAC not set\n", __func__);
2911 static int __init amd64_edac_init(void)
2915 printk(KERN_INFO "AMD64 EDAC driver v%s\n", EDAC_AMD64_VERSION);
2919 if (amd_cache_northbridges() < 0)
2923 mcis = kzalloc(amd_nb_num() * sizeof(mcis[0]), GFP_KERNEL);
2924 ecc_stngs = kzalloc(amd_nb_num() * sizeof(ecc_stngs[0]), GFP_KERNEL);
2925 if (!(mcis && ecc_stngs))
2928 msrs = msrs_alloc();
2932 err = pci_register_driver(&amd64_pci_driver);
2937 if (!atomic_read(&drv_instances))
2938 goto err_no_instances;
2944 pci_unregister_driver(&amd64_pci_driver);
2961 static void __exit amd64_edac_exit(void)
2964 edac_pci_release_generic_ctl(pci_ctl);
2966 pci_unregister_driver(&amd64_pci_driver);
2978 module_init(amd64_edac_init);
2979 module_exit(amd64_edac_exit);
2981 MODULE_LICENSE("GPL");
2982 MODULE_AUTHOR("SoftwareBitMaker: Doug Thompson, "
2983 "Dave Peterson, Thayne Harbaugh");
2984 MODULE_DESCRIPTION("MC support for AMD64 memory controllers - "
2985 EDAC_AMD64_VERSION);
2987 module_param(edac_op_state, int, 0444);
2988 MODULE_PARM_DESC(edac_op_state, "EDAC Error Reporting state: 0=Poll,1=NMI");