2 * Driver for Pondicherry2 memory controller.
4 * Copyright (c) 2016, Intel Corporation.
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms and conditions of the GNU General Public License,
8 * version 2, as published by the Free Software Foundation.
10 * This program is distributed in the hope it will be useful, but WITHOUT
11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
15 * [Derived from sb_edac.c]
17 * Translation of system physical addresses to DIMM addresses
18 * is a two stage process:
20 * First the Pondicherry 2 memory controller handles slice and channel interleaving
21 * in "sys2pmi()". This is (almost) completley common between platforms.
23 * Then a platform specific dunit (DIMM unit) completes the process to provide DIMM,
24 * rank, bank, row and column using the appropriate "dunit_ops" functions/parameters.
27 #include <linux/module.h>
28 #include <linux/init.h>
29 #include <linux/pci.h>
30 #include <linux/pci_ids.h>
31 #include <linux/slab.h>
32 #include <linux/delay.h>
33 #include <linux/edac.h>
34 #include <linux/mmzone.h>
35 #include <linux/smp.h>
36 #include <linux/bitmap.h>
37 #include <linux/math64.h>
38 #include <linux/mod_devicetable.h>
39 #include <asm/cpu_device_id.h>
40 #include <asm/intel-family.h>
41 #include <asm/processor.h>
45 #include "edac_module.h"
46 #include "pnd2_edac.h"
48 #define APL_NUM_CHANNELS 4
49 #define DNV_NUM_CHANNELS 2
50 #define DNV_MAX_DIMMS 2 /* Max DIMMs per channel */
54 DNV, /* All requests go to PMI CH0 on each slice (CH1 disabled) */
67 int dimm_geom[APL_NUM_CHANNELS];
72 * System address space is divided into multiple regions with
73 * different interleave rules in each. The as0/as1 regions
74 * have no interleaving at all. The as2 region is interleaved
75 * between two channels. The mot region is magic and may overlap
76 * other regions, with its interleave rules taking precedence.
77 * Addresses not in any of these regions are interleaved across
80 static struct region {
86 static struct dunit_ops {
92 int dimms_per_channel;
93 int (*rd_reg)(int port, int off, int op, void *data, size_t sz, char *name);
94 int (*get_registers)(void);
95 int (*check_ecc)(void);
96 void (*mk_region)(char *name, struct region *rp, void *asym);
97 void (*get_dimm_config)(struct mem_ctl_info *mci);
98 int (*pmi2mem)(struct mem_ctl_info *mci, u64 pmiaddr, u32 pmiidx,
99 struct dram_addr *daddr, char *msg);
102 static struct mem_ctl_info *pnd2_mci;
104 #define PND2_MSG_SIZE 256
107 #define pnd2_printk(level, fmt, arg...) \
108 edac_printk(level, "pnd2", fmt, ##arg)
110 #define pnd2_mc_printk(mci, level, fmt, arg...) \
111 edac_mc_chipset_printk(mci, level, "pnd2", fmt, ##arg)
113 #define MOT_CHAN_INTLV_BIT_1SLC_2CH 12
114 #define MOT_CHAN_INTLV_BIT_2SLC_2CH 13
115 #define SELECTOR_DISABLED (-1)
116 #define _4GB (1ul << 32)
118 #define PMI_ADDRESS_WIDTH 31
119 #define PND_MAX_PHYS_BIT 39
121 #define APL_ASYMSHIFT 28
122 #define DNV_ASYMSHIFT 31
123 #define CH_HASH_MASK_LSB 6
124 #define SLICE_HASH_MASK_LSB 6
125 #define MOT_SLC_INTLV_BIT 12
126 #define LOG2_PMI_ADDR_GRANULARITY 5
129 #define GET_BITFIELD(v, lo, hi) (((v) & GENMASK_ULL(hi, lo)) >> (lo))
130 #define U64_LSHIFT(val, s) ((u64)(val) << (s))
132 #ifdef CONFIG_X86_INTEL_SBI_APL
133 #include "linux/platform_data/sbi_apl.h"
134 static int sbi_send(int port, int off, int op, u32 *data)
136 struct sbi_apl_message sbi_arg;
139 memset(&sbi_arg, 0, sizeof(sbi_arg));
141 if (op == 0 || op == 4 || op == 6)
144 sbi_arg.data = *data;
147 sbi_arg.port_address = port;
148 sbi_arg.register_offset = off;
149 ret = sbi_apl_commit(&sbi_arg);
150 if (ret || sbi_arg.status)
151 edac_dbg(2, "sbi_send status=%d ret=%d data=%x\n",
152 sbi_arg.status, ret, sbi_arg.data);
155 ret = sbi_arg.status;
157 if (ret == 0 && read)
158 *data = sbi_arg.data;
163 static int sbi_send(int port, int off, int op, u32 *data)
169 static int apl_rd_reg(int port, int off, int op, void *data, size_t sz, char *name)
173 edac_dbg(2, "Read %s port=%x off=%x op=%x\n", name, port, off, op);
176 ret = sbi_send(port, off + 4, op, (u32 *)(data + 4));
179 ret |= sbi_send(port, off, op, (u32 *)data);
180 pnd2_printk(KERN_DEBUG, "%s=%x%08x ret=%d\n", name,
181 sz == 8 ? *((u32 *)(data + 4)) : 0, *((u32 *)data), ret);
188 static u64 get_mem_ctrl_hub_base_addr(void)
190 struct b_cr_mchbar_lo_pci lo;
191 struct b_cr_mchbar_hi_pci hi;
192 struct pci_dev *pdev;
194 pdev = pci_get_device(PCI_VENDOR_ID_INTEL, 0x1980, NULL);
196 pci_read_config_dword(pdev, 0x48, (u32 *)&lo);
197 pci_read_config_dword(pdev, 0x4c, (u32 *)&hi);
204 edac_dbg(2, "MMIO via memory controller hub base address is disabled!\n");
208 return U64_LSHIFT(hi.base, 32) | U64_LSHIFT(lo.base, 15);
211 static u64 get_sideband_reg_base_addr(void)
213 struct pci_dev *pdev;
216 pdev = pci_get_device(PCI_VENDOR_ID_INTEL, 0x19dd, NULL);
218 pci_read_config_dword(pdev, 0x10, &lo);
219 pci_read_config_dword(pdev, 0x14, &hi);
221 return (U64_LSHIFT(hi, 32) | U64_LSHIFT(lo, 0));
227 static int dnv_rd_reg(int port, int off, int op, void *data, size_t sz, char *name)
229 struct pci_dev *pdev;
234 pdev = pci_get_device(PCI_VENDOR_ID_INTEL, 0x1980, NULL);
238 pci_read_config_dword(pdev, off, data);
241 /* MMIO via memory controller hub base address */
242 if (op == 0 && port == 0x4c) {
243 addr = get_mem_ctrl_hub_base_addr();
247 /* MMIO via sideband register base address */
248 addr = get_sideband_reg_base_addr();
251 addr += (port << 16);
254 base = ioremap((resource_size_t)addr, 0x10000);
259 *(u32 *)(data + 4) = *(u32 *)(base + off + 4);
260 *(u32 *)data = *(u32 *)(base + off);
265 edac_dbg(2, "Read %s=%.8x_%.8x\n", name,
266 (sz == 8) ? *(u32 *)(data + 4) : 0, *(u32 *)data);
271 #define RD_REGP(regp, regname, port) \
274 regname##_r_opcode, \
275 regp, sizeof(struct regname), \
278 #define RD_REG(regp, regname) \
279 ops->rd_reg(regname ## _port, \
281 regname##_r_opcode, \
282 regp, sizeof(struct regname), \
285 static u64 top_lm, top_hm;
286 static bool two_slices;
287 static bool two_channels; /* Both PMI channels in one slice enabled */
289 static u8 sym_chan_mask;
290 static u8 asym_chan_mask;
293 static int slice_selector = -1;
294 static int chan_selector = -1;
295 static u64 slice_hash_mask;
296 static u64 chan_hash_mask;
298 static void mk_region(char *name, struct region *rp, u64 base, u64 limit)
303 edac_dbg(2, "Region:%s [%llx, %llx]\n", name, base, limit);
306 static void mk_region_mask(char *name, struct region *rp, u64 base, u64 mask)
309 pr_info(FW_BUG "MOT mask cannot be zero\n");
312 if (mask != GENMASK_ULL(PND_MAX_PHYS_BIT, __ffs(mask))) {
313 pr_info(FW_BUG "MOT mask not power of two\n");
317 pr_info(FW_BUG "MOT region base/mask alignment error\n");
321 rp->limit = (base | ~mask) & GENMASK_ULL(PND_MAX_PHYS_BIT, 0);
323 edac_dbg(2, "Region:%s [%llx, %llx]\n", name, base, rp->limit);
326 static bool in_region(struct region *rp, u64 addr)
331 return rp->base <= addr && addr <= rp->limit;
334 static int gen_sym_mask(struct b_cr_slice_channel_hash *p)
338 if (!p->slice_0_mem_disabled)
339 mask |= p->sym_slice0_channel_enabled;
341 if (!p->slice_1_disabled)
342 mask |= p->sym_slice1_channel_enabled << 2;
344 if (p->ch_1_disabled || p->enable_pmi_dual_data_mode)
350 static int gen_asym_mask(struct b_cr_slice_channel_hash *p,
351 struct b_cr_asym_mem_region0_mchbar *as0,
352 struct b_cr_asym_mem_region1_mchbar *as1,
353 struct b_cr_asym_2way_mem_region_mchbar *as2way)
355 const int intlv[] = { 0x5, 0xA, 0x3, 0xC };
358 if (as2way->asym_2way_interleave_enable)
359 mask = intlv[as2way->asym_2way_intlv_mode];
360 if (as0->slice0_asym_enable)
361 mask |= (1 << as0->slice0_asym_channel_select);
362 if (as1->slice1_asym_enable)
363 mask |= (4 << as1->slice1_asym_channel_select);
364 if (p->slice_0_mem_disabled)
366 if (p->slice_1_disabled)
368 if (p->ch_1_disabled || p->enable_pmi_dual_data_mode)
374 static struct b_cr_tolud_pci tolud;
375 static struct b_cr_touud_lo_pci touud_lo;
376 static struct b_cr_touud_hi_pci touud_hi;
377 static struct b_cr_asym_mem_region0_mchbar asym0;
378 static struct b_cr_asym_mem_region1_mchbar asym1;
379 static struct b_cr_asym_2way_mem_region_mchbar asym_2way;
380 static struct b_cr_mot_out_base_mchbar mot_base;
381 static struct b_cr_mot_out_mask_mchbar mot_mask;
382 static struct b_cr_slice_channel_hash chash;
384 /* Apollo Lake dunit */
386 * Validated on board with just two DIMMs in the [0] and [2] positions
387 * in this array. Other port number matches documentation, but caution
390 static const int apl_dports[APL_NUM_CHANNELS] = { 0x18, 0x10, 0x11, 0x19 };
391 static struct d_cr_drp0 drp0[APL_NUM_CHANNELS];
393 /* Denverton dunit */
394 static const int dnv_dports[DNV_NUM_CHANNELS] = { 0x10, 0x12 };
395 static struct d_cr_dsch dsch;
396 static struct d_cr_ecc_ctrl ecc_ctrl[DNV_NUM_CHANNELS];
397 static struct d_cr_drp drp[DNV_NUM_CHANNELS];
398 static struct d_cr_dmap dmap[DNV_NUM_CHANNELS];
399 static struct d_cr_dmap1 dmap1[DNV_NUM_CHANNELS];
400 static struct d_cr_dmap2 dmap2[DNV_NUM_CHANNELS];
401 static struct d_cr_dmap3 dmap3[DNV_NUM_CHANNELS];
402 static struct d_cr_dmap4 dmap4[DNV_NUM_CHANNELS];
403 static struct d_cr_dmap5 dmap5[DNV_NUM_CHANNELS];
405 static void apl_mk_region(char *name, struct region *rp, void *asym)
407 struct b_cr_asym_mem_region0_mchbar *a = asym;
410 U64_LSHIFT(a->slice0_asym_base, APL_ASYMSHIFT),
411 U64_LSHIFT(a->slice0_asym_limit, APL_ASYMSHIFT) +
412 GENMASK_ULL(APL_ASYMSHIFT - 1, 0));
415 static void dnv_mk_region(char *name, struct region *rp, void *asym)
417 struct b_cr_asym_mem_region_denverton *a = asym;
420 U64_LSHIFT(a->slice_asym_base, DNV_ASYMSHIFT),
421 U64_LSHIFT(a->slice_asym_limit, DNV_ASYMSHIFT) +
422 GENMASK_ULL(DNV_ASYMSHIFT - 1, 0));
425 static int apl_get_registers(void)
430 if (RD_REG(&asym_2way, b_cr_asym_2way_mem_region_mchbar))
434 * RD_REGP() will fail for unpopulated or non-existent
435 * DIMM slots. Return success if we find at least one DIMM.
437 for (i = 0; i < APL_NUM_CHANNELS; i++)
438 if (!RD_REGP(&drp0[i], d_cr_drp0, apl_dports[i]))
444 static int dnv_get_registers(void)
448 if (RD_REG(&dsch, d_cr_dsch))
451 for (i = 0; i < DNV_NUM_CHANNELS; i++)
452 if (RD_REGP(&ecc_ctrl[i], d_cr_ecc_ctrl, dnv_dports[i]) ||
453 RD_REGP(&drp[i], d_cr_drp, dnv_dports[i]) ||
454 RD_REGP(&dmap[i], d_cr_dmap, dnv_dports[i]) ||
455 RD_REGP(&dmap1[i], d_cr_dmap1, dnv_dports[i]) ||
456 RD_REGP(&dmap2[i], d_cr_dmap2, dnv_dports[i]) ||
457 RD_REGP(&dmap3[i], d_cr_dmap3, dnv_dports[i]) ||
458 RD_REGP(&dmap4[i], d_cr_dmap4, dnv_dports[i]) ||
459 RD_REGP(&dmap5[i], d_cr_dmap5, dnv_dports[i]))
466 * Read all the h/w config registers once here (they don't
467 * change at run time. Figure out which address ranges have
468 * which interleave characteristics.
470 static int get_registers(void)
472 const int intlv[] = { 10, 11, 12, 12 };
474 if (RD_REG(&tolud, b_cr_tolud_pci) ||
475 RD_REG(&touud_lo, b_cr_touud_lo_pci) ||
476 RD_REG(&touud_hi, b_cr_touud_hi_pci) ||
477 RD_REG(&asym0, b_cr_asym_mem_region0_mchbar) ||
478 RD_REG(&asym1, b_cr_asym_mem_region1_mchbar) ||
479 RD_REG(&mot_base, b_cr_mot_out_base_mchbar) ||
480 RD_REG(&mot_mask, b_cr_mot_out_mask_mchbar) ||
481 RD_REG(&chash, b_cr_slice_channel_hash))
484 if (ops->get_registers())
487 if (ops->type == DNV) {
488 /* PMI channel idx (always 0) for asymmetric region */
489 asym0.slice0_asym_channel_select = 0;
490 asym1.slice1_asym_channel_select = 0;
491 /* PMI channel bitmap (always 1) for symmetric region */
492 chash.sym_slice0_channel_enabled = 0x1;
493 chash.sym_slice1_channel_enabled = 0x1;
496 if (asym0.slice0_asym_enable)
497 ops->mk_region("as0", &as0, &asym0);
499 if (asym1.slice1_asym_enable)
500 ops->mk_region("as1", &as1, &asym1);
502 if (asym_2way.asym_2way_interleave_enable) {
503 mk_region("as2way", &as2,
504 U64_LSHIFT(asym_2way.asym_2way_base, APL_ASYMSHIFT),
505 U64_LSHIFT(asym_2way.asym_2way_limit, APL_ASYMSHIFT) +
506 GENMASK_ULL(APL_ASYMSHIFT - 1, 0));
509 if (mot_base.imr_en) {
510 mk_region_mask("mot", &mot,
511 U64_LSHIFT(mot_base.mot_out_base, MOT_SHIFT),
512 U64_LSHIFT(mot_mask.mot_out_mask, MOT_SHIFT));
515 top_lm = U64_LSHIFT(tolud.tolud, 20);
516 top_hm = U64_LSHIFT(touud_hi.touud, 32) | U64_LSHIFT(touud_lo.touud, 20);
518 two_slices = !chash.slice_1_disabled &&
519 !chash.slice_0_mem_disabled &&
520 (chash.sym_slice0_channel_enabled != 0) &&
521 (chash.sym_slice1_channel_enabled != 0);
522 two_channels = !chash.ch_1_disabled &&
523 !chash.enable_pmi_dual_data_mode &&
524 ((chash.sym_slice0_channel_enabled == 3) ||
525 (chash.sym_slice1_channel_enabled == 3));
527 sym_chan_mask = gen_sym_mask(&chash);
528 asym_chan_mask = gen_asym_mask(&chash, &asym0, &asym1, &asym_2way);
529 chan_mask = sym_chan_mask | asym_chan_mask;
531 if (two_slices && !two_channels) {
535 slice_selector = intlv[chash.interleave_mode];
536 } else if (!two_slices && two_channels) {
540 chan_selector = intlv[chash.interleave_mode];
541 } else if (two_slices && two_channels) {
542 if (chash.hvm_mode) {
546 slice_selector = intlv[chash.interleave_mode];
547 chan_selector = intlv[chash.interleave_mode] + 1;
553 slice_hash_mask = chash.slice_hash_mask << SLICE_HASH_MASK_LSB;
555 slice_hash_mask |= BIT_ULL(slice_selector);
560 chan_hash_mask = chash.ch_hash_mask << CH_HASH_MASK_LSB;
562 chan_hash_mask |= BIT_ULL(chan_selector);
568 /* Get a contiguous memory address (remove the MMIO gap) */
569 static u64 remove_mmio_gap(u64 sys)
571 return (sys < _4GB) ? sys : sys - (_4GB - top_lm);
574 /* Squeeze out one address bit, shift upper part down to fill gap */
575 static void remove_addr_bit(u64 *addr, int bitidx)
582 mask = (1ull << bitidx) - 1;
583 *addr = ((*addr >> 1) & ~mask) | (*addr & mask);
586 /* XOR all the bits from addr specified in mask */
587 static int hash_by_mask(u64 addr, u64 mask)
589 u64 result = addr & mask;
591 result = (result >> 32) ^ result;
592 result = (result >> 16) ^ result;
593 result = (result >> 8) ^ result;
594 result = (result >> 4) ^ result;
595 result = (result >> 2) ^ result;
596 result = (result >> 1) ^ result;
598 return (int)result & 1;
602 * First stage decode. Take the system address and figure out which
603 * second stage will deal with it based on interleave modes.
605 static int sys2pmi(const u64 addr, u32 *pmiidx, u64 *pmiaddr, char *msg)
607 u64 contig_addr, contig_base, contig_offset, contig_base_adj;
608 int mot_intlv_bit = two_slices ? MOT_CHAN_INTLV_BIT_2SLC_2CH :
609 MOT_CHAN_INTLV_BIT_1SLC_2CH;
610 int slice_intlv_bit_rm = SELECTOR_DISABLED;
611 int chan_intlv_bit_rm = SELECTOR_DISABLED;
612 /* Determine if address is in the MOT region. */
613 bool mot_hit = in_region(&mot, addr);
614 /* Calculate the number of symmetric regions enabled. */
615 int sym_channels = hweight8(sym_chan_mask);
618 * The amount we need to shift the asym base can be determined by the
619 * number of enabled symmetric channels.
620 * NOTE: This can only work because symmetric memory is not supposed
621 * to do a 3-way interleave.
623 int sym_chan_shift = sym_channels >> 1;
625 /* Give up if address is out of range, or in MMIO gap */
626 if (addr >= (1ul << PND_MAX_PHYS_BIT) ||
627 (addr >= top_lm && addr < _4GB) || addr >= top_hm) {
628 snprintf(msg, PND2_MSG_SIZE, "Error address 0x%llx is not DRAM", addr);
632 /* Get a contiguous memory address (remove the MMIO gap) */
633 contig_addr = remove_mmio_gap(addr);
635 if (in_region(&as0, addr)) {
636 *pmiidx = asym0.slice0_asym_channel_select;
638 contig_base = remove_mmio_gap(as0.base);
639 contig_offset = contig_addr - contig_base;
640 contig_base_adj = (contig_base >> sym_chan_shift) *
641 ((chash.sym_slice0_channel_enabled >> (*pmiidx & 1)) & 1);
642 contig_addr = contig_offset + ((sym_channels > 0) ? contig_base_adj : 0ull);
643 } else if (in_region(&as1, addr)) {
644 *pmiidx = 2u + asym1.slice1_asym_channel_select;
646 contig_base = remove_mmio_gap(as1.base);
647 contig_offset = contig_addr - contig_base;
648 contig_base_adj = (contig_base >> sym_chan_shift) *
649 ((chash.sym_slice1_channel_enabled >> (*pmiidx & 1)) & 1);
650 contig_addr = contig_offset + ((sym_channels > 0) ? contig_base_adj : 0ull);
651 } else if (in_region(&as2, addr) && (asym_2way.asym_2way_intlv_mode == 0x3ul)) {
654 mot_intlv_bit = MOT_CHAN_INTLV_BIT_1SLC_2CH;
655 *pmiidx = (asym_2way.asym_2way_intlv_mode & 1) << 1;
656 channel1 = mot_hit ? ((bool)((addr >> mot_intlv_bit) & 1)) :
657 hash_by_mask(contig_addr, chan_hash_mask);
658 *pmiidx |= (u32)channel1;
660 contig_base = remove_mmio_gap(as2.base);
661 chan_intlv_bit_rm = mot_hit ? mot_intlv_bit : chan_selector;
662 contig_offset = contig_addr - contig_base;
663 remove_addr_bit(&contig_offset, chan_intlv_bit_rm);
664 contig_addr = (contig_base >> sym_chan_shift) + contig_offset;
666 /* Otherwise we're in normal, boring symmetric mode. */
673 slice_intlv_bit_rm = MOT_SLC_INTLV_BIT;
674 slice1 = (addr >> MOT_SLC_INTLV_BIT) & 1;
676 slice_intlv_bit_rm = slice_selector;
677 slice1 = hash_by_mask(addr, slice_hash_mask);
680 *pmiidx = (u32)slice1 << 1;
686 mot_intlv_bit = two_slices ? MOT_CHAN_INTLV_BIT_2SLC_2CH :
687 MOT_CHAN_INTLV_BIT_1SLC_2CH;
690 chan_intlv_bit_rm = mot_intlv_bit;
691 channel1 = (addr >> mot_intlv_bit) & 1;
693 chan_intlv_bit_rm = chan_selector;
694 channel1 = hash_by_mask(contig_addr, chan_hash_mask);
697 *pmiidx |= (u32)channel1;
701 /* Remove the chan_selector bit first */
702 remove_addr_bit(&contig_addr, chan_intlv_bit_rm);
703 /* Remove the slice bit (we remove it second because it must be lower */
704 remove_addr_bit(&contig_addr, slice_intlv_bit_rm);
705 *pmiaddr = contig_addr;
710 /* Translate PMI address to memory (rank, row, bank, column) */
711 #define C(n) (0x10 | (n)) /* column */
712 #define B(n) (0x20 | (n)) /* bank */
713 #define R(n) (0x40 | (n)) /* row */
714 #define RS (0x80) /* rank */
730 static struct dimm_geometry {
735 u16 bits[PMI_ADDRESS_WIDTH];
738 .addrdec = AMAP_1KB, .dden = DEN_4Gb, .dwid = X16,
739 .rowbits = 15, .colbits = 10,
741 C(2), C(3), C(4), C(5), C(6), B(0), B(1), B(2), R(0),
742 R(1), R(2), R(3), R(4), R(5), R(6), R(7), R(8), R(9),
743 R(10), C(7), C(8), C(9), R(11), RS, R(12), R(13), R(14),
748 .addrdec = AMAP_1KB, .dden = DEN_4Gb, .dwid = X8,
749 .rowbits = 16, .colbits = 10,
751 C(2), C(3), C(4), C(5), C(6), B(0), B(1), B(2), R(0),
752 R(1), R(2), R(3), R(4), R(5), R(6), R(7), R(8), R(9),
753 R(10), C(7), C(8), C(9), R(11), RS, R(12), R(13), R(14),
758 .addrdec = AMAP_1KB, .dden = DEN_8Gb, .dwid = X16,
759 .rowbits = 16, .colbits = 10,
761 C(2), C(3), C(4), C(5), C(6), B(0), B(1), B(2), R(0),
762 R(1), R(2), R(3), R(4), R(5), R(6), R(7), R(8), R(9),
763 R(10), C(7), C(8), C(9), R(11), RS, R(12), R(13), R(14),
768 .addrdec = AMAP_1KB, .dden = DEN_8Gb, .dwid = X8,
769 .rowbits = 16, .colbits = 11,
771 C(2), C(3), C(4), C(5), C(6), B(0), B(1), B(2), R(0),
772 R(1), R(2), R(3), R(4), R(5), R(6), R(7), R(8), R(9),
773 R(10), C(7), C(8), C(9), R(11), RS, C(11), R(12), R(13),
778 .addrdec = AMAP_2KB, .dden = DEN_4Gb, .dwid = X16,
779 .rowbits = 15, .colbits = 10,
781 C(2), C(3), C(4), C(5), C(6), C(7), B(0), B(1), B(2),
782 R(0), R(1), R(2), R(3), R(4), R(5), R(6), R(7), R(8),
783 R(9), R(10), C(8), C(9), R(11), RS, R(12), R(13), R(14),
788 .addrdec = AMAP_2KB, .dden = DEN_4Gb, .dwid = X8,
789 .rowbits = 16, .colbits = 10,
791 C(2), C(3), C(4), C(5), C(6), C(7), B(0), B(1), B(2),
792 R(0), R(1), R(2), R(3), R(4), R(5), R(6), R(7), R(8),
793 R(9), R(10), C(8), C(9), R(11), RS, R(12), R(13), R(14),
798 .addrdec = AMAP_2KB, .dden = DEN_8Gb, .dwid = X16,
799 .rowbits = 16, .colbits = 10,
801 C(2), C(3), C(4), C(5), C(6), C(7), B(0), B(1), B(2),
802 R(0), R(1), R(2), R(3), R(4), R(5), R(6), R(7), R(8),
803 R(9), R(10), C(8), C(9), R(11), RS, R(12), R(13), R(14),
808 .addrdec = AMAP_2KB, .dden = DEN_8Gb, .dwid = X8,
809 .rowbits = 16, .colbits = 11,
811 C(2), C(3), C(4), C(5), C(6), C(7), B(0), B(1), B(2),
812 R(0), R(1), R(2), R(3), R(4), R(5), R(6), R(7), R(8),
813 R(9), R(10), C(8), C(9), R(11), RS, C(11), R(12), R(13),
818 .addrdec = AMAP_4KB, .dden = DEN_4Gb, .dwid = X16,
819 .rowbits = 15, .colbits = 10,
821 C(2), C(3), C(4), C(5), C(6), C(7), C(8), B(0), B(1),
822 B(2), R(0), R(1), R(2), R(3), R(4), R(5), R(6), R(7),
823 R(8), R(9), R(10), C(9), R(11), RS, R(12), R(13), R(14),
828 .addrdec = AMAP_4KB, .dden = DEN_4Gb, .dwid = X8,
829 .rowbits = 16, .colbits = 10,
831 C(2), C(3), C(4), C(5), C(6), C(7), C(8), B(0), B(1),
832 B(2), R(0), R(1), R(2), R(3), R(4), R(5), R(6), R(7),
833 R(8), R(9), R(10), C(9), R(11), RS, R(12), R(13), R(14),
838 .addrdec = AMAP_4KB, .dden = DEN_8Gb, .dwid = X16,
839 .rowbits = 16, .colbits = 10,
841 C(2), C(3), C(4), C(5), C(6), C(7), C(8), B(0), B(1),
842 B(2), R(0), R(1), R(2), R(3), R(4), R(5), R(6), R(7),
843 R(8), R(9), R(10), C(9), R(11), RS, R(12), R(13), R(14),
848 .addrdec = AMAP_4KB, .dden = DEN_8Gb, .dwid = X8,
849 .rowbits = 16, .colbits = 11,
851 C(2), C(3), C(4), C(5), C(6), C(7), C(8), B(0), B(1),
852 B(2), R(0), R(1), R(2), R(3), R(4), R(5), R(6), R(7),
853 R(8), R(9), R(10), C(9), R(11), RS, C(11), R(12), R(13),
859 static int bank_hash(u64 pmiaddr, int idx, int shft)
865 bhash ^= ((pmiaddr >> (12 + shft)) ^ (pmiaddr >> (9 + shft))) & 1;
868 bhash ^= (((pmiaddr >> (10 + shft)) ^ (pmiaddr >> (8 + shft))) & 1) << 1;
869 bhash ^= ((pmiaddr >> 22) & 1) << 1;
872 bhash ^= (((pmiaddr >> (13 + shft)) ^ (pmiaddr >> (11 + shft))) & 1) << 2;
879 static int rank_hash(u64 pmiaddr)
881 return ((pmiaddr >> 16) ^ (pmiaddr >> 10)) & 1;
884 /* Second stage decode. Compute rank, bank, row & column. */
885 static int apl_pmi2mem(struct mem_ctl_info *mci, u64 pmiaddr, u32 pmiidx,
886 struct dram_addr *daddr, char *msg)
888 struct d_cr_drp0 *cr_drp0 = &drp0[pmiidx];
889 struct pnd2_pvt *pvt = mci->pvt_info;
890 int g = pvt->dimm_geom[pmiidx];
891 struct dimm_geometry *d = &dimms[g];
892 int column = 0, bank = 0, row = 0, rank = 0;
893 int i, idx, type, skiprs = 0;
895 for (i = 0; i < PMI_ADDRESS_WIDTH; i++) {
896 int bit = (pmiaddr >> i) & 1;
898 if (i + skiprs >= PMI_ADDRESS_WIDTH) {
899 snprintf(msg, PND2_MSG_SIZE, "Bad dimm_geometry[] table\n");
903 type = d->bits[i + skiprs] & ~0xf;
904 idx = d->bits[i + skiprs] & 0xf;
907 * On single rank DIMMs ignore the rank select bit
908 * and shift remainder of "bits[]" down one place.
910 if (type == RS && (cr_drp0->rken0 + cr_drp0->rken1) == 1) {
912 type = d->bits[i + skiprs] & ~0xf;
913 idx = d->bits[i + skiprs] & 0xf;
918 column |= (bit << idx);
921 bank |= (bit << idx);
923 bank ^= bank_hash(pmiaddr, idx, d->addrdec);
931 rank ^= rank_hash(pmiaddr);
935 snprintf(msg, PND2_MSG_SIZE, "Bad translation\n");
952 /* Pluck bit "in" from pmiaddr and return value shifted to bit "out" */
953 #define dnv_get_bit(pmi, in, out) ((int)(((pmi) >> (in)) & 1u) << (out))
955 static int dnv_pmi2mem(struct mem_ctl_info *mci, u64 pmiaddr, u32 pmiidx,
956 struct dram_addr *daddr, char *msg)
959 daddr->rank = dnv_get_bit(pmiaddr, dmap[pmiidx].rs0 + 13, 0);
961 daddr->rank |= dnv_get_bit(pmiaddr, dmap[pmiidx].rs1 + 13, 1);
964 * Normally ranks 0,1 are DIMM0, and 2,3 are DIMM1, but we
965 * flip them if DIMM1 is larger than DIMM0.
967 daddr->dimm = (daddr->rank >= 2) ^ drp[pmiidx].dimmflip;
969 daddr->bank = dnv_get_bit(pmiaddr, dmap[pmiidx].ba0 + 6, 0);
970 daddr->bank |= dnv_get_bit(pmiaddr, dmap[pmiidx].ba1 + 6, 1);
971 daddr->bank |= dnv_get_bit(pmiaddr, dmap[pmiidx].bg0 + 6, 2);
973 daddr->bank |= dnv_get_bit(pmiaddr, dmap[pmiidx].bg1 + 6, 3);
974 if (dmap1[pmiidx].bxor) {
976 daddr->bank ^= dnv_get_bit(pmiaddr, dmap3[pmiidx].row6 + 6, 0);
977 daddr->bank ^= dnv_get_bit(pmiaddr, dmap3[pmiidx].row7 + 6, 1);
978 if (dsch.chan_width == 0)
979 /* 64/72 bit dram channel width */
980 daddr->bank ^= dnv_get_bit(pmiaddr, dmap5[pmiidx].ca3 + 6, 2);
982 /* 32/40 bit dram channel width */
983 daddr->bank ^= dnv_get_bit(pmiaddr, dmap5[pmiidx].ca4 + 6, 2);
984 daddr->bank ^= dnv_get_bit(pmiaddr, dmap2[pmiidx].row2 + 6, 3);
986 daddr->bank ^= dnv_get_bit(pmiaddr, dmap2[pmiidx].row2 + 6, 0);
987 daddr->bank ^= dnv_get_bit(pmiaddr, dmap3[pmiidx].row6 + 6, 1);
988 if (dsch.chan_width == 0)
989 daddr->bank ^= dnv_get_bit(pmiaddr, dmap5[pmiidx].ca3 + 6, 2);
991 daddr->bank ^= dnv_get_bit(pmiaddr, dmap5[pmiidx].ca4 + 6, 2);
995 daddr->row = dnv_get_bit(pmiaddr, dmap2[pmiidx].row0 + 6, 0);
996 daddr->row |= dnv_get_bit(pmiaddr, dmap2[pmiidx].row1 + 6, 1);
997 daddr->row |= dnv_get_bit(pmiaddr, dmap2[pmiidx].row2 + 6, 2);
998 daddr->row |= dnv_get_bit(pmiaddr, dmap2[pmiidx].row3 + 6, 3);
999 daddr->row |= dnv_get_bit(pmiaddr, dmap2[pmiidx].row4 + 6, 4);
1000 daddr->row |= dnv_get_bit(pmiaddr, dmap2[pmiidx].row5 + 6, 5);
1001 daddr->row |= dnv_get_bit(pmiaddr, dmap3[pmiidx].row6 + 6, 6);
1002 daddr->row |= dnv_get_bit(pmiaddr, dmap3[pmiidx].row7 + 6, 7);
1003 daddr->row |= dnv_get_bit(pmiaddr, dmap3[pmiidx].row8 + 6, 8);
1004 daddr->row |= dnv_get_bit(pmiaddr, dmap3[pmiidx].row9 + 6, 9);
1005 daddr->row |= dnv_get_bit(pmiaddr, dmap3[pmiidx].row10 + 6, 10);
1006 daddr->row |= dnv_get_bit(pmiaddr, dmap3[pmiidx].row11 + 6, 11);
1007 daddr->row |= dnv_get_bit(pmiaddr, dmap4[pmiidx].row12 + 6, 12);
1008 daddr->row |= dnv_get_bit(pmiaddr, dmap4[pmiidx].row13 + 6, 13);
1009 if (dmap4[pmiidx].row14 != 31)
1010 daddr->row |= dnv_get_bit(pmiaddr, dmap4[pmiidx].row14 + 6, 14);
1011 if (dmap4[pmiidx].row15 != 31)
1012 daddr->row |= dnv_get_bit(pmiaddr, dmap4[pmiidx].row15 + 6, 15);
1013 if (dmap4[pmiidx].row16 != 31)
1014 daddr->row |= dnv_get_bit(pmiaddr, dmap4[pmiidx].row16 + 6, 16);
1015 if (dmap4[pmiidx].row17 != 31)
1016 daddr->row |= dnv_get_bit(pmiaddr, dmap4[pmiidx].row17 + 6, 17);
1018 daddr->col = dnv_get_bit(pmiaddr, dmap5[pmiidx].ca3 + 6, 3);
1019 daddr->col |= dnv_get_bit(pmiaddr, dmap5[pmiidx].ca4 + 6, 4);
1020 daddr->col |= dnv_get_bit(pmiaddr, dmap5[pmiidx].ca5 + 6, 5);
1021 daddr->col |= dnv_get_bit(pmiaddr, dmap5[pmiidx].ca6 + 6, 6);
1022 daddr->col |= dnv_get_bit(pmiaddr, dmap5[pmiidx].ca7 + 6, 7);
1023 daddr->col |= dnv_get_bit(pmiaddr, dmap5[pmiidx].ca8 + 6, 8);
1024 daddr->col |= dnv_get_bit(pmiaddr, dmap5[pmiidx].ca9 + 6, 9);
1025 if (!dsch.ddr4en && dmap1[pmiidx].ca11 != 0x3f)
1026 daddr->col |= dnv_get_bit(pmiaddr, dmap1[pmiidx].ca11 + 13, 11);
1031 static int check_channel(int ch)
1033 if (drp0[ch].dramtype != 0) {
1034 pnd2_printk(KERN_INFO, "Unsupported DIMM in channel %d\n", ch);
1036 } else if (drp0[ch].eccen == 0) {
1037 pnd2_printk(KERN_INFO, "ECC disabled on channel %d\n", ch);
1043 static int apl_check_ecc_active(void)
1047 /* Check dramtype and ECC mode for each present DIMM */
1048 for (i = 0; i < APL_NUM_CHANNELS; i++)
1049 if (chan_mask & BIT(i))
1050 ret += check_channel(i);
1051 return ret ? -EINVAL : 0;
1054 #define DIMMS_PRESENT(d) ((d)->rken0 + (d)->rken1 + (d)->rken2 + (d)->rken3)
1056 static int check_unit(int ch)
1058 struct d_cr_drp *d = &drp[ch];
1060 if (DIMMS_PRESENT(d) && !ecc_ctrl[ch].eccen) {
1061 pnd2_printk(KERN_INFO, "ECC disabled on channel %d\n", ch);
1067 static int dnv_check_ecc_active(void)
1071 for (i = 0; i < DNV_NUM_CHANNELS; i++)
1072 ret += check_unit(i);
1073 return ret ? -EINVAL : 0;
1076 static int get_memory_error_data(struct mem_ctl_info *mci, u64 addr,
1077 struct dram_addr *daddr, char *msg)
1083 ret = sys2pmi(addr, &pmiidx, &pmiaddr, msg);
1087 pmiaddr >>= ops->pmiaddr_shift;
1088 /* pmi channel idx to dimm channel idx */
1089 pmiidx >>= ops->pmiidx_shift;
1090 daddr->chan = pmiidx;
1092 ret = ops->pmi2mem(mci, pmiaddr, pmiidx, daddr, msg);
1096 edac_dbg(0, "SysAddr=%llx PmiAddr=%llx Channel=%d DIMM=%d Rank=%d Bank=%d Row=%d Column=%d\n",
1097 addr, pmiaddr, daddr->chan, daddr->dimm, daddr->rank, daddr->bank, daddr->row, daddr->col);
1102 static void pnd2_mce_output_error(struct mem_ctl_info *mci, const struct mce *m,
1103 struct dram_addr *daddr)
1105 enum hw_event_mc_err_type tp_event;
1106 char *optype, msg[PND2_MSG_SIZE];
1107 bool ripv = m->mcgstatus & MCG_STATUS_RIPV;
1108 bool overflow = m->status & MCI_STATUS_OVER;
1109 bool uc_err = m->status & MCI_STATUS_UC;
1110 bool recov = m->status & MCI_STATUS_S;
1111 u32 core_err_cnt = GET_BITFIELD(m->status, 38, 52);
1112 u32 mscod = GET_BITFIELD(m->status, 16, 31);
1113 u32 errcode = GET_BITFIELD(m->status, 0, 15);
1114 u32 optypenum = GET_BITFIELD(m->status, 4, 6);
1117 tp_event = uc_err ? (ripv ? HW_EVENT_ERR_FATAL : HW_EVENT_ERR_UNCORRECTED) :
1118 HW_EVENT_ERR_CORRECTED;
1121 * According with Table 15-9 of the Intel Architecture spec vol 3A,
1122 * memory errors should fit in this mask:
1123 * 000f 0000 1mmm cccc (binary)
1125 * f = Correction Report Filtering Bit. If 1, subsequent errors
1129 * If the mask doesn't match, report an error to the parsing logic
1131 if (!((errcode & 0xef80) == 0x80)) {
1132 optype = "Can't parse: it is not a mem";
1134 switch (optypenum) {
1136 optype = "generic undef request error";
1139 optype = "memory read error";
1142 optype = "memory write error";
1145 optype = "addr/cmd error";
1148 optype = "memory scrubbing error";
1151 optype = "reserved";
1156 /* Only decode errors with an valid address (ADDRV) */
1157 if (!(m->status & MCI_STATUS_ADDRV))
1160 rc = get_memory_error_data(mci, m->addr, daddr, msg);
1164 snprintf(msg, sizeof(msg),
1165 "%s%s err_code:%04x:%04x channel:%d DIMM:%d rank:%d row:%d bank:%d col:%d",
1166 overflow ? " OVERFLOW" : "", (uc_err && recov) ? " recoverable" : "", mscod,
1167 errcode, daddr->chan, daddr->dimm, daddr->rank, daddr->row, daddr->bank, daddr->col);
1169 edac_dbg(0, "%s\n", msg);
1171 /* Call the helper to output message */
1172 edac_mc_handle_error(tp_event, mci, core_err_cnt, m->addr >> PAGE_SHIFT,
1173 m->addr & ~PAGE_MASK, 0, daddr->chan, daddr->dimm, -1, optype, msg);
1178 edac_mc_handle_error(tp_event, mci, core_err_cnt, 0, 0, 0, -1, -1, -1, msg, "");
1181 static void apl_get_dimm_config(struct mem_ctl_info *mci)
1183 struct pnd2_pvt *pvt = mci->pvt_info;
1184 struct dimm_info *dimm;
1185 struct d_cr_drp0 *d;
1189 for (i = 0; i < APL_NUM_CHANNELS; i++) {
1190 if (!(chan_mask & BIT(i)))
1193 dimm = EDAC_DIMM_PTR(mci->layers, mci->dimms, mci->n_layers, i, 0, 0);
1195 edac_dbg(0, "No allocated DIMM for channel %d\n", i);
1200 for (g = 0; g < ARRAY_SIZE(dimms); g++)
1201 if (dimms[g].addrdec == d->addrdec &&
1202 dimms[g].dden == d->dden &&
1203 dimms[g].dwid == d->dwid)
1206 if (g == ARRAY_SIZE(dimms)) {
1207 edac_dbg(0, "Channel %d: unrecognized DIMM\n", i);
1211 pvt->dimm_geom[i] = g;
1212 capacity = (d->rken0 + d->rken1) * 8 * (1ul << dimms[g].rowbits) *
1213 (1ul << dimms[g].colbits);
1214 edac_dbg(0, "Channel %d: %lld MByte DIMM\n", i, capacity >> (20 - 3));
1215 dimm->nr_pages = MiB_TO_PAGES(capacity >> (20 - 3));
1217 dimm->dtype = (d->dwid == 0) ? DEV_X8 : DEV_X16;
1218 dimm->mtype = MEM_DDR3;
1219 dimm->edac_mode = EDAC_SECDED;
1220 snprintf(dimm->label, sizeof(dimm->label), "Slice#%d_Chan#%d", i / 2, i % 2);
1224 static const int dnv_dtypes[] = {
1225 DEV_X8, DEV_X4, DEV_X16, DEV_UNKNOWN
1228 static void dnv_get_dimm_config(struct mem_ctl_info *mci)
1230 int i, j, ranks_of_dimm[DNV_MAX_DIMMS], banks, rowbits, colbits, memtype;
1231 struct dimm_info *dimm;
1244 for (i = 0; i < DNV_NUM_CHANNELS; i++) {
1245 if (dmap4[i].row14 == 31)
1247 else if (dmap4[i].row15 == 31)
1249 else if (dmap4[i].row16 == 31)
1251 else if (dmap4[i].row17 == 31)
1256 if (memtype == MEM_DDR3) {
1257 if (dmap1[i].ca11 != 0x3f)
1264 /* DIMM0 is present if rank0 and/or rank1 is enabled */
1265 ranks_of_dimm[0] = d->rken0 + d->rken1;
1266 /* DIMM1 is present if rank2 and/or rank3 is enabled */
1267 ranks_of_dimm[1] = d->rken2 + d->rken3;
1269 for (j = 0; j < DNV_MAX_DIMMS; j++) {
1270 if (!ranks_of_dimm[j])
1273 dimm = EDAC_DIMM_PTR(mci->layers, mci->dimms, mci->n_layers, i, j, 0);
1275 edac_dbg(0, "No allocated DIMM for channel %d DIMM %d\n", i, j);
1279 capacity = ranks_of_dimm[j] * banks * (1ul << rowbits) * (1ul << colbits);
1280 edac_dbg(0, "Channel %d DIMM %d: %lld MByte DIMM\n", i, j, capacity >> (20 - 3));
1281 dimm->nr_pages = MiB_TO_PAGES(capacity >> (20 - 3));
1283 dimm->dtype = dnv_dtypes[j ? d->dimmdwid0 : d->dimmdwid1];
1284 dimm->mtype = memtype;
1285 dimm->edac_mode = EDAC_SECDED;
1286 snprintf(dimm->label, sizeof(dimm->label), "Chan#%d_DIMM#%d", i, j);
1291 static int pnd2_register_mci(struct mem_ctl_info **ppmci)
1293 struct edac_mc_layer layers[2];
1294 struct mem_ctl_info *mci;
1295 struct pnd2_pvt *pvt;
1298 rc = ops->check_ecc();
1302 /* Allocate a new MC control structure */
1303 layers[0].type = EDAC_MC_LAYER_CHANNEL;
1304 layers[0].size = ops->channels;
1305 layers[0].is_virt_csrow = false;
1306 layers[1].type = EDAC_MC_LAYER_SLOT;
1307 layers[1].size = ops->dimms_per_channel;
1308 layers[1].is_virt_csrow = true;
1309 mci = edac_mc_alloc(0, ARRAY_SIZE(layers), layers, sizeof(*pvt));
1313 pvt = mci->pvt_info;
1314 memset(pvt, 0, sizeof(*pvt));
1316 mci->mod_name = "pnd2_edac.c";
1317 mci->dev_name = ops->name;
1318 mci->ctl_name = "Pondicherry2";
1320 /* Get dimm basic config and the memory layout */
1321 ops->get_dimm_config(mci);
1323 if (edac_mc_add_mc(mci)) {
1324 edac_dbg(0, "MC: failed edac_mc_add_mc()\n");
1334 static void pnd2_unregister_mci(struct mem_ctl_info *mci)
1336 if (unlikely(!mci || !mci->pvt_info)) {
1337 pnd2_printk(KERN_ERR, "Couldn't find mci handler\n");
1341 /* Remove MC sysfs nodes */
1342 edac_mc_del_mc(NULL);
1343 edac_dbg(1, "%s: free mci struct\n", mci->ctl_name);
1348 * Callback function registered with core kernel mce code.
1349 * Called once for each logged error.
1351 static int pnd2_mce_check_error(struct notifier_block *nb, unsigned long val, void *data)
1353 struct mce *mce = (struct mce *)data;
1354 struct mem_ctl_info *mci;
1355 struct dram_addr daddr;
1358 if (edac_get_report_status() == EDAC_REPORTING_DISABLED)
1366 * Just let mcelog handle it if the error is
1367 * outside the memory controller. A memory error
1368 * is indicated by bit 7 = 1 and bits = 8-11,13-15 = 0.
1369 * bit 12 has an special meaning.
1371 if ((mce->status & 0xefff) >> 7 != 1)
1374 if (mce->mcgstatus & MCG_STATUS_MCIP)
1379 pnd2_mc_printk(mci, KERN_INFO, "HANDLING MCE MEMORY ERROR\n");
1380 pnd2_mc_printk(mci, KERN_INFO, "CPU %u: Machine Check %s: %llx Bank %u: %llx\n",
1381 mce->extcpu, type, mce->mcgstatus, mce->bank, mce->status);
1382 pnd2_mc_printk(mci, KERN_INFO, "TSC %llx ", mce->tsc);
1383 pnd2_mc_printk(mci, KERN_INFO, "ADDR %llx ", mce->addr);
1384 pnd2_mc_printk(mci, KERN_INFO, "MISC %llx ", mce->misc);
1385 pnd2_mc_printk(mci, KERN_INFO, "PROCESSOR %u:%x TIME %llu SOCKET %u APIC %x\n",
1386 mce->cpuvendor, mce->cpuid, mce->time, mce->socketid, mce->apicid);
1388 pnd2_mce_output_error(mci, mce, &daddr);
1390 /* Advice mcelog that the error were handled */
1394 static struct notifier_block pnd2_mce_dec = {
1395 .notifier_call = pnd2_mce_check_error,
1398 #ifdef CONFIG_EDAC_DEBUG
1400 * Write an address to this file to exercise the address decode
1401 * logic in this driver.
1403 static u64 pnd2_fake_addr;
1404 #define PND2_BLOB_SIZE 1024
1405 static char pnd2_result[PND2_BLOB_SIZE];
1406 static struct dentry *pnd2_test;
1407 static struct debugfs_blob_wrapper pnd2_blob = {
1408 .data = pnd2_result,
1412 static int debugfs_u64_set(void *data, u64 val)
1414 struct dram_addr daddr;
1419 /* ADDRV + MemRd + Unknown channel */
1420 m.status = MCI_STATUS_ADDRV + 0x9f;
1422 pnd2_mce_output_error(pnd2_mci, &m, &daddr);
1423 snprintf(pnd2_blob.data, PND2_BLOB_SIZE,
1424 "SysAddr=%llx Channel=%d DIMM=%d Rank=%d Bank=%d Row=%d Column=%d\n",
1425 m.addr, daddr.chan, daddr.dimm, daddr.rank, daddr.bank, daddr.row, daddr.col);
1426 pnd2_blob.size = strlen(pnd2_blob.data);
1430 DEFINE_DEBUGFS_ATTRIBUTE(fops_u64_wo, NULL, debugfs_u64_set, "%llu\n");
1432 static void setup_pnd2_debug(void)
1434 pnd2_test = edac_debugfs_create_dir("pnd2_test");
1435 edac_debugfs_create_file("pnd2_debug_addr", 0200, pnd2_test,
1436 &pnd2_fake_addr, &fops_u64_wo);
1437 debugfs_create_blob("pnd2_debug_results", 0400, pnd2_test, &pnd2_blob);
1440 static void teardown_pnd2_debug(void)
1442 debugfs_remove_recursive(pnd2_test);
1445 static void setup_pnd2_debug(void) {}
1446 static void teardown_pnd2_debug(void) {}
1447 #endif /* CONFIG_EDAC_DEBUG */
1450 static int pnd2_probe(void)
1455 rc = get_registers();
1459 return pnd2_register_mci(&pnd2_mci);
1462 static void pnd2_remove(void)
1465 pnd2_unregister_mci(pnd2_mci);
1468 static struct dunit_ops apl_ops = {
1471 .pmiaddr_shift = LOG2_PMI_ADDR_GRANULARITY,
1473 .channels = APL_NUM_CHANNELS,
1474 .dimms_per_channel = 1,
1475 .rd_reg = apl_rd_reg,
1476 .get_registers = apl_get_registers,
1477 .check_ecc = apl_check_ecc_active,
1478 .mk_region = apl_mk_region,
1479 .get_dimm_config = apl_get_dimm_config,
1480 .pmi2mem = apl_pmi2mem,
1483 static struct dunit_ops dnv_ops = {
1488 .channels = DNV_NUM_CHANNELS,
1489 .dimms_per_channel = 2,
1490 .rd_reg = dnv_rd_reg,
1491 .get_registers = dnv_get_registers,
1492 .check_ecc = dnv_check_ecc_active,
1493 .mk_region = dnv_mk_region,
1494 .get_dimm_config = dnv_get_dimm_config,
1495 .pmi2mem = dnv_pmi2mem,
1498 static const struct x86_cpu_id pnd2_cpuids[] = {
1499 { X86_VENDOR_INTEL, 6, INTEL_FAM6_ATOM_GOLDMONT, 0, (kernel_ulong_t)&apl_ops },
1500 { X86_VENDOR_INTEL, 6, INTEL_FAM6_ATOM_DENVERTON, 0, (kernel_ulong_t)&dnv_ops },
1503 MODULE_DEVICE_TABLE(x86cpu, pnd2_cpuids);
1505 static int __init pnd2_init(void)
1507 const struct x86_cpu_id *id;
1512 id = x86_match_cpu(pnd2_cpuids);
1516 ops = (struct dunit_ops *)id->driver_data;
1518 /* Ensure that the OPSTATE is set correctly for POLL or NMI */
1523 pnd2_printk(KERN_ERR, "Failed to register device with error %d.\n", rc);
1530 mce_register_decode_chain(&pnd2_mce_dec);
1536 static void __exit pnd2_exit(void)
1539 teardown_pnd2_debug();
1540 mce_unregister_decode_chain(&pnd2_mce_dec);
1544 module_init(pnd2_init);
1545 module_exit(pnd2_exit);
1547 module_param(edac_op_state, int, 0444);
1548 MODULE_PARM_DESC(edac_op_state, "EDAC Error Reporting state: 0=Poll,1=NMI");
1550 MODULE_LICENSE("GPL v2");
1551 MODULE_AUTHOR("Tony Luck");
1552 MODULE_DESCRIPTION("MC Driver for Intel SoC using Pondicherry memory controller");