2 * Driver for Pondicherry2 memory controller.
4 * Copyright (c) 2016, Intel Corporation.
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms and conditions of the GNU General Public License,
8 * version 2, as published by the Free Software Foundation.
10 * This program is distributed in the hope it will be useful, but WITHOUT
11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
15 * [Derived from sb_edac.c]
17 * Translation of system physical addresses to DIMM addresses
18 * is a two stage process:
20 * First the Pondicherry 2 memory controller handles slice and channel interleaving
21 * in "sys2pmi()". This is (almost) completley common between platforms.
23 * Then a platform specific dunit (DIMM unit) completes the process to provide DIMM,
24 * rank, bank, row and column using the appropriate "dunit_ops" functions/parameters.
27 #include <linux/module.h>
28 #include <linux/init.h>
29 #include <linux/pci.h>
30 #include <linux/pci_ids.h>
31 #include <linux/slab.h>
32 #include <linux/delay.h>
33 #include <linux/edac.h>
34 #include <linux/mmzone.h>
35 #include <linux/smp.h>
36 #include <linux/bitmap.h>
37 #include <linux/math64.h>
38 #include <linux/mod_devicetable.h>
39 #include <asm/cpu_device_id.h>
40 #include <asm/intel-family.h>
41 #include <asm/processor.h>
45 #include "edac_module.h"
46 #include "pnd2_edac.h"
48 #define APL_NUM_CHANNELS 4
49 #define DNV_NUM_CHANNELS 2
50 #define DNV_MAX_DIMMS 2 /* Max DIMMs per channel */
54 DNV, /* All requests go to PMI CH0 on each slice (CH1 disabled) */
67 int dimm_geom[APL_NUM_CHANNELS];
72 * System address space is divided into multiple regions with
73 * different interleave rules in each. The as0/as1 regions
74 * have no interleaving at all. The as2 region is interleaved
75 * between two channels. The mot region is magic and may overlap
76 * other regions, with its interleave rules taking precedence.
77 * Addresses not in any of these regions are interleaved across
80 static struct region {
86 static struct dunit_ops {
92 int dimms_per_channel;
93 int (*rd_reg)(int port, int off, int op, void *data, size_t sz, char *name);
94 int (*get_registers)(void);
95 int (*check_ecc)(void);
96 void (*mk_region)(char *name, struct region *rp, void *asym);
97 void (*get_dimm_config)(struct mem_ctl_info *mci);
98 int (*pmi2mem)(struct mem_ctl_info *mci, u64 pmiaddr, u32 pmiidx,
99 struct dram_addr *daddr, char *msg);
102 static struct mem_ctl_info *pnd2_mci;
104 #define PND2_MSG_SIZE 256
107 #define pnd2_printk(level, fmt, arg...) \
108 edac_printk(level, "pnd2", fmt, ##arg)
110 #define pnd2_mc_printk(mci, level, fmt, arg...) \
111 edac_mc_chipset_printk(mci, level, "pnd2", fmt, ##arg)
113 #define MOT_CHAN_INTLV_BIT_1SLC_2CH 12
114 #define MOT_CHAN_INTLV_BIT_2SLC_2CH 13
115 #define SELECTOR_DISABLED (-1)
116 #define _4GB (1ul << 32)
118 #define PMI_ADDRESS_WIDTH 31
119 #define PND_MAX_PHYS_BIT 39
121 #define APL_ASYMSHIFT 28
122 #define DNV_ASYMSHIFT 31
123 #define CH_HASH_MASK_LSB 6
124 #define SLICE_HASH_MASK_LSB 6
125 #define MOT_SLC_INTLV_BIT 12
126 #define LOG2_PMI_ADDR_GRANULARITY 5
129 #define GET_BITFIELD(v, lo, hi) (((v) & GENMASK_ULL(hi, lo)) >> (lo))
130 #define U64_LSHIFT(val, s) ((u64)(val) << (s))
132 #ifdef CONFIG_X86_INTEL_SBI_APL
133 #include "linux/platform_data/sbi_apl.h"
134 int sbi_send(int port, int off, int op, u32 *data)
136 struct sbi_apl_message sbi_arg;
139 memset(&sbi_arg, 0, sizeof(sbi_arg));
141 if (op == 0 || op == 4 || op == 6)
144 sbi_arg.data = *data;
147 sbi_arg.port_address = port;
148 sbi_arg.register_offset = off;
149 ret = sbi_apl_commit(&sbi_arg);
150 if (ret || sbi_arg.status)
151 edac_dbg(2, "sbi_send status=%d ret=%d data=%x\n",
152 sbi_arg.status, ret, sbi_arg.data);
155 ret = sbi_arg.status;
157 if (ret == 0 && read)
158 *data = sbi_arg.data;
163 int sbi_send(int port, int off, int op, u32 *data)
169 static int apl_rd_reg(int port, int off, int op, void *data, size_t sz, char *name)
173 edac_dbg(2, "Read %s port=%x off=%x op=%x\n", name, port, off, op);
176 ret = sbi_send(port, off + 4, op, (u32 *)(data + 4));
178 ret = sbi_send(port, off, op, (u32 *)data);
179 pnd2_printk(KERN_DEBUG, "%s=%x%08x ret=%d\n", name,
180 sz == 8 ? *((u32 *)(data + 4)) : 0, *((u32 *)data), ret);
187 static u64 get_mem_ctrl_hub_base_addr(void)
189 struct b_cr_mchbar_lo_pci lo;
190 struct b_cr_mchbar_hi_pci hi;
191 struct pci_dev *pdev;
193 pdev = pci_get_device(PCI_VENDOR_ID_INTEL, 0x1980, NULL);
195 pci_read_config_dword(pdev, 0x48, (u32 *)&lo);
196 pci_read_config_dword(pdev, 0x4c, (u32 *)&hi);
203 edac_dbg(2, "MMIO via memory controller hub base address is disabled!\n");
207 return U64_LSHIFT(hi.base, 32) | U64_LSHIFT(lo.base, 15);
210 static u64 get_sideband_reg_base_addr(void)
212 struct pci_dev *pdev;
215 pdev = pci_get_device(PCI_VENDOR_ID_INTEL, 0x19dd, NULL);
217 pci_read_config_dword(pdev, 0x10, &lo);
218 pci_read_config_dword(pdev, 0x14, &hi);
220 return (U64_LSHIFT(hi, 32) | U64_LSHIFT(lo, 0));
226 static int dnv_rd_reg(int port, int off, int op, void *data, size_t sz, char *name)
228 struct pci_dev *pdev;
233 pdev = pci_get_device(PCI_VENDOR_ID_INTEL, 0x1980, NULL);
237 pci_read_config_dword(pdev, off, data);
240 /* MMIO via memory controller hub base address */
241 if (op == 0 && port == 0x4c) {
242 addr = get_mem_ctrl_hub_base_addr();
246 /* MMIO via sideband register base address */
247 addr = get_sideband_reg_base_addr();
250 addr += (port << 16);
253 base = ioremap((resource_size_t)addr, 0x10000);
258 *(u32 *)(data + 4) = *(u32 *)(base + off + 4);
259 *(u32 *)data = *(u32 *)(base + off);
264 edac_dbg(2, "Read %s=%.8x_%.8x\n", name,
265 (sz == 8) ? *(u32 *)(data + 4) : 0, *(u32 *)data);
270 #define RD_REGP(regp, regname, port) \
273 regname##_r_opcode, \
274 regp, sizeof(struct regname), \
277 #define RD_REG(regp, regname) \
278 ops->rd_reg(regname ## _port, \
280 regname##_r_opcode, \
281 regp, sizeof(struct regname), \
284 static u64 top_lm, top_hm;
285 static bool two_slices;
286 static bool two_channels; /* Both PMI channels in one slice enabled */
288 static u8 sym_chan_mask;
289 static u8 asym_chan_mask;
292 static int slice_selector = -1;
293 static int chan_selector = -1;
294 static u64 slice_hash_mask;
295 static u64 chan_hash_mask;
297 static void mk_region(char *name, struct region *rp, u64 base, u64 limit)
302 edac_dbg(2, "Region:%s [%llx, %llx]\n", name, base, limit);
305 static void mk_region_mask(char *name, struct region *rp, u64 base, u64 mask)
308 pr_info(FW_BUG "MOT mask cannot be zero\n");
311 if (mask != GENMASK_ULL(PND_MAX_PHYS_BIT, __ffs(mask))) {
312 pr_info(FW_BUG "MOT mask not power of two\n");
316 pr_info(FW_BUG "MOT region base/mask alignment error\n");
320 rp->limit = (base | ~mask) & GENMASK_ULL(PND_MAX_PHYS_BIT, 0);
322 edac_dbg(2, "Region:%s [%llx, %llx]\n", name, base, rp->limit);
325 static bool in_region(struct region *rp, u64 addr)
330 return rp->base <= addr && addr <= rp->limit;
333 static int gen_sym_mask(struct b_cr_slice_channel_hash *p)
337 if (!p->slice_0_mem_disabled)
338 mask |= p->sym_slice0_channel_enabled;
340 if (!p->slice_1_disabled)
341 mask |= p->sym_slice1_channel_enabled << 2;
343 if (p->ch_1_disabled || p->enable_pmi_dual_data_mode)
349 static int gen_asym_mask(struct b_cr_slice_channel_hash *p,
350 struct b_cr_asym_mem_region0_mchbar *as0,
351 struct b_cr_asym_mem_region1_mchbar *as1,
352 struct b_cr_asym_2way_mem_region_mchbar *as2way)
354 const int intlv[] = { 0x5, 0xA, 0x3, 0xC };
357 if (as2way->asym_2way_interleave_enable)
358 mask = intlv[as2way->asym_2way_intlv_mode];
359 if (as0->slice0_asym_enable)
360 mask |= (1 << as0->slice0_asym_channel_select);
361 if (as1->slice1_asym_enable)
362 mask |= (4 << as1->slice1_asym_channel_select);
363 if (p->slice_0_mem_disabled)
365 if (p->slice_1_disabled)
367 if (p->ch_1_disabled || p->enable_pmi_dual_data_mode)
373 static struct b_cr_tolud_pci tolud;
374 static struct b_cr_touud_lo_pci touud_lo;
375 static struct b_cr_touud_hi_pci touud_hi;
376 static struct b_cr_asym_mem_region0_mchbar asym0;
377 static struct b_cr_asym_mem_region1_mchbar asym1;
378 static struct b_cr_asym_2way_mem_region_mchbar asym_2way;
379 static struct b_cr_mot_out_base_mchbar mot_base;
380 static struct b_cr_mot_out_mask_mchbar mot_mask;
381 static struct b_cr_slice_channel_hash chash;
383 /* Apollo Lake dunit */
385 * Validated on board with just two DIMMs in the [0] and [2] positions
386 * in this array. Other port number matches documentation, but caution
389 static const int apl_dports[APL_NUM_CHANNELS] = { 0x18, 0x10, 0x11, 0x19 };
390 static struct d_cr_drp0 drp0[APL_NUM_CHANNELS];
392 /* Denverton dunit */
393 static const int dnv_dports[DNV_NUM_CHANNELS] = { 0x10, 0x12 };
394 static struct d_cr_dsch dsch;
395 static struct d_cr_ecc_ctrl ecc_ctrl[DNV_NUM_CHANNELS];
396 static struct d_cr_drp drp[DNV_NUM_CHANNELS];
397 static struct d_cr_dmap dmap[DNV_NUM_CHANNELS];
398 static struct d_cr_dmap1 dmap1[DNV_NUM_CHANNELS];
399 static struct d_cr_dmap2 dmap2[DNV_NUM_CHANNELS];
400 static struct d_cr_dmap3 dmap3[DNV_NUM_CHANNELS];
401 static struct d_cr_dmap4 dmap4[DNV_NUM_CHANNELS];
402 static struct d_cr_dmap5 dmap5[DNV_NUM_CHANNELS];
404 static void apl_mk_region(char *name, struct region *rp, void *asym)
406 struct b_cr_asym_mem_region0_mchbar *a = asym;
409 U64_LSHIFT(a->slice0_asym_base, APL_ASYMSHIFT),
410 U64_LSHIFT(a->slice0_asym_limit, APL_ASYMSHIFT) +
411 GENMASK_ULL(APL_ASYMSHIFT - 1, 0));
414 static void dnv_mk_region(char *name, struct region *rp, void *asym)
416 struct b_cr_asym_mem_region_denverton *a = asym;
419 U64_LSHIFT(a->slice_asym_base, DNV_ASYMSHIFT),
420 U64_LSHIFT(a->slice_asym_limit, DNV_ASYMSHIFT) +
421 GENMASK_ULL(DNV_ASYMSHIFT - 1, 0));
424 static int apl_get_registers(void)
428 if (RD_REG(&asym_2way, b_cr_asym_2way_mem_region_mchbar))
431 for (i = 0; i < APL_NUM_CHANNELS; i++)
432 if (RD_REGP(&drp0[i], d_cr_drp0, apl_dports[i]))
438 static int dnv_get_registers(void)
442 if (RD_REG(&dsch, d_cr_dsch))
445 for (i = 0; i < DNV_NUM_CHANNELS; i++)
446 if (RD_REGP(&ecc_ctrl[i], d_cr_ecc_ctrl, dnv_dports[i]) ||
447 RD_REGP(&drp[i], d_cr_drp, dnv_dports[i]) ||
448 RD_REGP(&dmap[i], d_cr_dmap, dnv_dports[i]) ||
449 RD_REGP(&dmap1[i], d_cr_dmap1, dnv_dports[i]) ||
450 RD_REGP(&dmap2[i], d_cr_dmap2, dnv_dports[i]) ||
451 RD_REGP(&dmap3[i], d_cr_dmap3, dnv_dports[i]) ||
452 RD_REGP(&dmap4[i], d_cr_dmap4, dnv_dports[i]) ||
453 RD_REGP(&dmap5[i], d_cr_dmap5, dnv_dports[i]))
460 * Read all the h/w config registers once here (they don't
461 * change at run time. Figure out which address ranges have
462 * which interleave characteristics.
464 static int get_registers(void)
466 const int intlv[] = { 10, 11, 12, 12 };
468 if (RD_REG(&tolud, b_cr_tolud_pci) ||
469 RD_REG(&touud_lo, b_cr_touud_lo_pci) ||
470 RD_REG(&touud_hi, b_cr_touud_hi_pci) ||
471 RD_REG(&asym0, b_cr_asym_mem_region0_mchbar) ||
472 RD_REG(&asym1, b_cr_asym_mem_region1_mchbar) ||
473 RD_REG(&mot_base, b_cr_mot_out_base_mchbar) ||
474 RD_REG(&mot_mask, b_cr_mot_out_mask_mchbar) ||
475 RD_REG(&chash, b_cr_slice_channel_hash))
478 if (ops->get_registers())
481 if (ops->type == DNV) {
482 /* PMI channel idx (always 0) for asymmetric region */
483 asym0.slice0_asym_channel_select = 0;
484 asym1.slice1_asym_channel_select = 0;
485 /* PMI channel bitmap (always 1) for symmetric region */
486 chash.sym_slice0_channel_enabled = 0x1;
487 chash.sym_slice1_channel_enabled = 0x1;
490 if (asym0.slice0_asym_enable)
491 ops->mk_region("as0", &as0, &asym0);
493 if (asym1.slice1_asym_enable)
494 ops->mk_region("as1", &as1, &asym1);
496 if (asym_2way.asym_2way_interleave_enable) {
497 mk_region("as2way", &as2,
498 U64_LSHIFT(asym_2way.asym_2way_base, APL_ASYMSHIFT),
499 U64_LSHIFT(asym_2way.asym_2way_limit, APL_ASYMSHIFT) +
500 GENMASK_ULL(APL_ASYMSHIFT - 1, 0));
503 if (mot_base.imr_en) {
504 mk_region_mask("mot", &mot,
505 U64_LSHIFT(mot_base.mot_out_base, MOT_SHIFT),
506 U64_LSHIFT(mot_mask.mot_out_mask, MOT_SHIFT));
509 top_lm = U64_LSHIFT(tolud.tolud, 20);
510 top_hm = U64_LSHIFT(touud_hi.touud, 32) | U64_LSHIFT(touud_lo.touud, 20);
512 two_slices = !chash.slice_1_disabled &&
513 !chash.slice_0_mem_disabled &&
514 (chash.sym_slice0_channel_enabled != 0) &&
515 (chash.sym_slice1_channel_enabled != 0);
516 two_channels = !chash.ch_1_disabled &&
517 !chash.enable_pmi_dual_data_mode &&
518 ((chash.sym_slice0_channel_enabled == 3) ||
519 (chash.sym_slice1_channel_enabled == 3));
521 sym_chan_mask = gen_sym_mask(&chash);
522 asym_chan_mask = gen_asym_mask(&chash, &asym0, &asym1, &asym_2way);
523 chan_mask = sym_chan_mask | asym_chan_mask;
525 if (two_slices && !two_channels) {
529 slice_selector = intlv[chash.interleave_mode];
530 } else if (!two_slices && two_channels) {
534 chan_selector = intlv[chash.interleave_mode];
535 } else if (two_slices && two_channels) {
536 if (chash.hvm_mode) {
540 slice_selector = intlv[chash.interleave_mode];
541 chan_selector = intlv[chash.interleave_mode] + 1;
547 slice_hash_mask = chash.slice_hash_mask << SLICE_HASH_MASK_LSB;
549 slice_hash_mask |= BIT_ULL(slice_selector);
554 chan_hash_mask = chash.ch_hash_mask << CH_HASH_MASK_LSB;
556 chan_hash_mask |= BIT_ULL(chan_selector);
562 /* Get a contiguous memory address (remove the MMIO gap) */
563 static u64 remove_mmio_gap(u64 sys)
565 return (sys < _4GB) ? sys : sys - (_4GB - top_lm);
568 /* Squeeze out one address bit, shift upper part down to fill gap */
569 static void remove_addr_bit(u64 *addr, int bitidx)
576 mask = (1ull << bitidx) - 1;
577 *addr = ((*addr >> 1) & ~mask) | (*addr & mask);
580 /* XOR all the bits from addr specified in mask */
581 static int hash_by_mask(u64 addr, u64 mask)
583 u64 result = addr & mask;
585 result = (result >> 32) ^ result;
586 result = (result >> 16) ^ result;
587 result = (result >> 8) ^ result;
588 result = (result >> 4) ^ result;
589 result = (result >> 2) ^ result;
590 result = (result >> 1) ^ result;
592 return (int)result & 1;
596 * First stage decode. Take the system address and figure out which
597 * second stage will deal with it based on interleave modes.
599 static int sys2pmi(const u64 addr, u32 *pmiidx, u64 *pmiaddr, char *msg)
601 u64 contig_addr, contig_base, contig_offset, contig_base_adj;
602 int mot_intlv_bit = two_slices ? MOT_CHAN_INTLV_BIT_2SLC_2CH :
603 MOT_CHAN_INTLV_BIT_1SLC_2CH;
604 int slice_intlv_bit_rm = SELECTOR_DISABLED;
605 int chan_intlv_bit_rm = SELECTOR_DISABLED;
606 /* Determine if address is in the MOT region. */
607 bool mot_hit = in_region(&mot, addr);
608 /* Calculate the number of symmetric regions enabled. */
609 int sym_channels = hweight8(sym_chan_mask);
612 * The amount we need to shift the asym base can be determined by the
613 * number of enabled symmetric channels.
614 * NOTE: This can only work because symmetric memory is not supposed
615 * to do a 3-way interleave.
617 int sym_chan_shift = sym_channels >> 1;
619 /* Give up if address is out of range, or in MMIO gap */
620 if (addr >= (1ul << PND_MAX_PHYS_BIT) ||
621 (addr >= top_lm && addr < _4GB) || addr >= top_hm) {
622 snprintf(msg, PND2_MSG_SIZE, "Error address 0x%llx is not DRAM", addr);
626 /* Get a contiguous memory address (remove the MMIO gap) */
627 contig_addr = remove_mmio_gap(addr);
629 if (in_region(&as0, addr)) {
630 *pmiidx = asym0.slice0_asym_channel_select;
632 contig_base = remove_mmio_gap(as0.base);
633 contig_offset = contig_addr - contig_base;
634 contig_base_adj = (contig_base >> sym_chan_shift) *
635 ((chash.sym_slice0_channel_enabled >> (*pmiidx & 1)) & 1);
636 contig_addr = contig_offset + ((sym_channels > 0) ? contig_base_adj : 0ull);
637 } else if (in_region(&as1, addr)) {
638 *pmiidx = 2u + asym1.slice1_asym_channel_select;
640 contig_base = remove_mmio_gap(as1.base);
641 contig_offset = contig_addr - contig_base;
642 contig_base_adj = (contig_base >> sym_chan_shift) *
643 ((chash.sym_slice1_channel_enabled >> (*pmiidx & 1)) & 1);
644 contig_addr = contig_offset + ((sym_channels > 0) ? contig_base_adj : 0ull);
645 } else if (in_region(&as2, addr) && (asym_2way.asym_2way_intlv_mode == 0x3ul)) {
648 mot_intlv_bit = MOT_CHAN_INTLV_BIT_1SLC_2CH;
649 *pmiidx = (asym_2way.asym_2way_intlv_mode & 1) << 1;
650 channel1 = mot_hit ? ((bool)((addr >> mot_intlv_bit) & 1)) :
651 hash_by_mask(contig_addr, chan_hash_mask);
652 *pmiidx |= (u32)channel1;
654 contig_base = remove_mmio_gap(as2.base);
655 chan_intlv_bit_rm = mot_hit ? mot_intlv_bit : chan_selector;
656 contig_offset = contig_addr - contig_base;
657 remove_addr_bit(&contig_offset, chan_intlv_bit_rm);
658 contig_addr = (contig_base >> sym_chan_shift) + contig_offset;
660 /* Otherwise we're in normal, boring symmetric mode. */
667 slice_intlv_bit_rm = MOT_SLC_INTLV_BIT;
668 slice1 = (addr >> MOT_SLC_INTLV_BIT) & 1;
670 slice_intlv_bit_rm = slice_selector;
671 slice1 = hash_by_mask(addr, slice_hash_mask);
674 *pmiidx = (u32)slice1 << 1;
680 mot_intlv_bit = two_slices ? MOT_CHAN_INTLV_BIT_2SLC_2CH :
681 MOT_CHAN_INTLV_BIT_1SLC_2CH;
684 chan_intlv_bit_rm = mot_intlv_bit;
685 channel1 = (addr >> mot_intlv_bit) & 1;
687 chan_intlv_bit_rm = chan_selector;
688 channel1 = hash_by_mask(contig_addr, chan_hash_mask);
691 *pmiidx |= (u32)channel1;
695 /* Remove the chan_selector bit first */
696 remove_addr_bit(&contig_addr, chan_intlv_bit_rm);
697 /* Remove the slice bit (we remove it second because it must be lower */
698 remove_addr_bit(&contig_addr, slice_intlv_bit_rm);
699 *pmiaddr = contig_addr;
704 /* Translate PMI address to memory (rank, row, bank, column) */
705 #define C(n) (0x10 | (n)) /* column */
706 #define B(n) (0x20 | (n)) /* bank */
707 #define R(n) (0x40 | (n)) /* row */
708 #define RS (0x80) /* rank */
724 static struct dimm_geometry {
729 u16 bits[PMI_ADDRESS_WIDTH];
732 .addrdec = AMAP_1KB, .dden = DEN_4Gb, .dwid = X16,
733 .rowbits = 15, .colbits = 10,
735 C(2), C(3), C(4), C(5), C(6), B(0), B(1), B(2), R(0),
736 R(1), R(2), R(3), R(4), R(5), R(6), R(7), R(8), R(9),
737 R(10), C(7), C(8), C(9), R(11), RS, R(12), R(13), R(14),
742 .addrdec = AMAP_1KB, .dden = DEN_4Gb, .dwid = X8,
743 .rowbits = 16, .colbits = 10,
745 C(2), C(3), C(4), C(5), C(6), B(0), B(1), B(2), R(0),
746 R(1), R(2), R(3), R(4), R(5), R(6), R(7), R(8), R(9),
747 R(10), C(7), C(8), C(9), R(11), RS, R(12), R(13), R(14),
752 .addrdec = AMAP_1KB, .dden = DEN_8Gb, .dwid = X16,
753 .rowbits = 16, .colbits = 10,
755 C(2), C(3), C(4), C(5), C(6), B(0), B(1), B(2), R(0),
756 R(1), R(2), R(3), R(4), R(5), R(6), R(7), R(8), R(9),
757 R(10), C(7), C(8), C(9), R(11), RS, R(12), R(13), R(14),
762 .addrdec = AMAP_1KB, .dden = DEN_8Gb, .dwid = X8,
763 .rowbits = 16, .colbits = 11,
765 C(2), C(3), C(4), C(5), C(6), B(0), B(1), B(2), R(0),
766 R(1), R(2), R(3), R(4), R(5), R(6), R(7), R(8), R(9),
767 R(10), C(7), C(8), C(9), R(11), RS, C(11), R(12), R(13),
772 .addrdec = AMAP_2KB, .dden = DEN_4Gb, .dwid = X16,
773 .rowbits = 15, .colbits = 10,
775 C(2), C(3), C(4), C(5), C(6), C(7), B(0), B(1), B(2),
776 R(0), R(1), R(2), R(3), R(4), R(5), R(6), R(7), R(8),
777 R(9), R(10), C(8), C(9), R(11), RS, R(12), R(13), R(14),
782 .addrdec = AMAP_2KB, .dden = DEN_4Gb, .dwid = X8,
783 .rowbits = 16, .colbits = 10,
785 C(2), C(3), C(4), C(5), C(6), C(7), B(0), B(1), B(2),
786 R(0), R(1), R(2), R(3), R(4), R(5), R(6), R(7), R(8),
787 R(9), R(10), C(8), C(9), R(11), RS, R(12), R(13), R(14),
792 .addrdec = AMAP_2KB, .dden = DEN_8Gb, .dwid = X16,
793 .rowbits = 16, .colbits = 10,
795 C(2), C(3), C(4), C(5), C(6), C(7), B(0), B(1), B(2),
796 R(0), R(1), R(2), R(3), R(4), R(5), R(6), R(7), R(8),
797 R(9), R(10), C(8), C(9), R(11), RS, R(12), R(13), R(14),
802 .addrdec = AMAP_2KB, .dden = DEN_8Gb, .dwid = X8,
803 .rowbits = 16, .colbits = 11,
805 C(2), C(3), C(4), C(5), C(6), C(7), B(0), B(1), B(2),
806 R(0), R(1), R(2), R(3), R(4), R(5), R(6), R(7), R(8),
807 R(9), R(10), C(8), C(9), R(11), RS, C(11), R(12), R(13),
812 .addrdec = AMAP_4KB, .dden = DEN_4Gb, .dwid = X16,
813 .rowbits = 15, .colbits = 10,
815 C(2), C(3), C(4), C(5), C(6), C(7), C(8), B(0), B(1),
816 B(2), R(0), R(1), R(2), R(3), R(4), R(5), R(6), R(7),
817 R(8), R(9), R(10), C(9), R(11), RS, R(12), R(13), R(14),
822 .addrdec = AMAP_4KB, .dden = DEN_4Gb, .dwid = X8,
823 .rowbits = 16, .colbits = 10,
825 C(2), C(3), C(4), C(5), C(6), C(7), C(8), B(0), B(1),
826 B(2), R(0), R(1), R(2), R(3), R(4), R(5), R(6), R(7),
827 R(8), R(9), R(10), C(9), R(11), RS, R(12), R(13), R(14),
832 .addrdec = AMAP_4KB, .dden = DEN_8Gb, .dwid = X16,
833 .rowbits = 16, .colbits = 10,
835 C(2), C(3), C(4), C(5), C(6), C(7), C(8), B(0), B(1),
836 B(2), R(0), R(1), R(2), R(3), R(4), R(5), R(6), R(7),
837 R(8), R(9), R(10), C(9), R(11), RS, R(12), R(13), R(14),
842 .addrdec = AMAP_4KB, .dden = DEN_8Gb, .dwid = X8,
843 .rowbits = 16, .colbits = 11,
845 C(2), C(3), C(4), C(5), C(6), C(7), C(8), B(0), B(1),
846 B(2), R(0), R(1), R(2), R(3), R(4), R(5), R(6), R(7),
847 R(8), R(9), R(10), C(9), R(11), RS, C(11), R(12), R(13),
853 static int bank_hash(u64 pmiaddr, int idx, int shft)
859 bhash ^= ((pmiaddr >> (12 + shft)) ^ (pmiaddr >> (9 + shft))) & 1;
862 bhash ^= (((pmiaddr >> (10 + shft)) ^ (pmiaddr >> (8 + shft))) & 1) << 1;
863 bhash ^= ((pmiaddr >> 22) & 1) << 1;
866 bhash ^= (((pmiaddr >> (13 + shft)) ^ (pmiaddr >> (11 + shft))) & 1) << 2;
873 static int rank_hash(u64 pmiaddr)
875 return ((pmiaddr >> 16) ^ (pmiaddr >> 10)) & 1;
878 /* Second stage decode. Compute rank, bank, row & column. */
879 static int apl_pmi2mem(struct mem_ctl_info *mci, u64 pmiaddr, u32 pmiidx,
880 struct dram_addr *daddr, char *msg)
882 struct d_cr_drp0 *cr_drp0 = &drp0[pmiidx];
883 struct pnd2_pvt *pvt = mci->pvt_info;
884 int g = pvt->dimm_geom[pmiidx];
885 struct dimm_geometry *d = &dimms[g];
886 int column = 0, bank = 0, row = 0, rank = 0;
887 int i, idx, type, skiprs = 0;
889 for (i = 0; i < PMI_ADDRESS_WIDTH; i++) {
890 int bit = (pmiaddr >> i) & 1;
892 if (i + skiprs >= PMI_ADDRESS_WIDTH) {
893 snprintf(msg, PND2_MSG_SIZE, "Bad dimm_geometry[] table\n");
897 type = d->bits[i + skiprs] & ~0xf;
898 idx = d->bits[i + skiprs] & 0xf;
901 * On single rank DIMMs ignore the rank select bit
902 * and shift remainder of "bits[]" down one place.
904 if (type == RS && (cr_drp0->rken0 + cr_drp0->rken1) == 1) {
906 type = d->bits[i + skiprs] & ~0xf;
907 idx = d->bits[i + skiprs] & 0xf;
912 column |= (bit << idx);
915 bank |= (bit << idx);
917 bank ^= bank_hash(pmiaddr, idx, d->addrdec);
925 rank ^= rank_hash(pmiaddr);
929 snprintf(msg, PND2_MSG_SIZE, "Bad translation\n");
946 /* Pluck bit "in" from pmiaddr and return value shifted to bit "out" */
947 #define dnv_get_bit(pmi, in, out) ((int)(((pmi) >> (in)) & 1u) << (out))
949 static int dnv_pmi2mem(struct mem_ctl_info *mci, u64 pmiaddr, u32 pmiidx,
950 struct dram_addr *daddr, char *msg)
953 daddr->rank = dnv_get_bit(pmiaddr, dmap[pmiidx].rs0 + 13, 0);
955 daddr->rank |= dnv_get_bit(pmiaddr, dmap[pmiidx].rs1 + 13, 1);
958 * Normally ranks 0,1 are DIMM0, and 2,3 are DIMM1, but we
959 * flip them if DIMM1 is larger than DIMM0.
961 daddr->dimm = (daddr->rank >= 2) ^ drp[pmiidx].dimmflip;
963 daddr->bank = dnv_get_bit(pmiaddr, dmap[pmiidx].ba0 + 6, 0);
964 daddr->bank |= dnv_get_bit(pmiaddr, dmap[pmiidx].ba1 + 6, 1);
965 daddr->bank |= dnv_get_bit(pmiaddr, dmap[pmiidx].bg0 + 6, 2);
967 daddr->bank |= dnv_get_bit(pmiaddr, dmap[pmiidx].bg1 + 6, 3);
968 if (dmap1[pmiidx].bxor) {
970 daddr->bank ^= dnv_get_bit(pmiaddr, dmap3[pmiidx].row6 + 6, 0);
971 daddr->bank ^= dnv_get_bit(pmiaddr, dmap3[pmiidx].row7 + 6, 1);
972 if (dsch.chan_width == 0)
973 /* 64/72 bit dram channel width */
974 daddr->bank ^= dnv_get_bit(pmiaddr, dmap5[pmiidx].ca3 + 6, 2);
976 /* 32/40 bit dram channel width */
977 daddr->bank ^= dnv_get_bit(pmiaddr, dmap5[pmiidx].ca4 + 6, 2);
978 daddr->bank ^= dnv_get_bit(pmiaddr, dmap2[pmiidx].row2 + 6, 3);
980 daddr->bank ^= dnv_get_bit(pmiaddr, dmap2[pmiidx].row2 + 6, 0);
981 daddr->bank ^= dnv_get_bit(pmiaddr, dmap3[pmiidx].row6 + 6, 1);
982 if (dsch.chan_width == 0)
983 daddr->bank ^= dnv_get_bit(pmiaddr, dmap5[pmiidx].ca3 + 6, 2);
985 daddr->bank ^= dnv_get_bit(pmiaddr, dmap5[pmiidx].ca4 + 6, 2);
989 daddr->row = dnv_get_bit(pmiaddr, dmap2[pmiidx].row0 + 6, 0);
990 daddr->row |= dnv_get_bit(pmiaddr, dmap2[pmiidx].row1 + 6, 1);
991 daddr->row |= dnv_get_bit(pmiaddr, dmap2[pmiidx].row2 + 6, 2);
992 daddr->row |= dnv_get_bit(pmiaddr, dmap2[pmiidx].row3 + 6, 3);
993 daddr->row |= dnv_get_bit(pmiaddr, dmap2[pmiidx].row4 + 6, 4);
994 daddr->row |= dnv_get_bit(pmiaddr, dmap2[pmiidx].row5 + 6, 5);
995 daddr->row |= dnv_get_bit(pmiaddr, dmap3[pmiidx].row6 + 6, 6);
996 daddr->row |= dnv_get_bit(pmiaddr, dmap3[pmiidx].row7 + 6, 7);
997 daddr->row |= dnv_get_bit(pmiaddr, dmap3[pmiidx].row8 + 6, 8);
998 daddr->row |= dnv_get_bit(pmiaddr, dmap3[pmiidx].row9 + 6, 9);
999 daddr->row |= dnv_get_bit(pmiaddr, dmap3[pmiidx].row10 + 6, 10);
1000 daddr->row |= dnv_get_bit(pmiaddr, dmap3[pmiidx].row11 + 6, 11);
1001 daddr->row |= dnv_get_bit(pmiaddr, dmap4[pmiidx].row12 + 6, 12);
1002 daddr->row |= dnv_get_bit(pmiaddr, dmap4[pmiidx].row13 + 6, 13);
1003 if (dmap4[pmiidx].row14 != 31)
1004 daddr->row |= dnv_get_bit(pmiaddr, dmap4[pmiidx].row14 + 6, 14);
1005 if (dmap4[pmiidx].row15 != 31)
1006 daddr->row |= dnv_get_bit(pmiaddr, dmap4[pmiidx].row15 + 6, 15);
1007 if (dmap4[pmiidx].row16 != 31)
1008 daddr->row |= dnv_get_bit(pmiaddr, dmap4[pmiidx].row16 + 6, 16);
1009 if (dmap4[pmiidx].row17 != 31)
1010 daddr->row |= dnv_get_bit(pmiaddr, dmap4[pmiidx].row17 + 6, 17);
1012 daddr->col = dnv_get_bit(pmiaddr, dmap5[pmiidx].ca3 + 6, 3);
1013 daddr->col |= dnv_get_bit(pmiaddr, dmap5[pmiidx].ca4 + 6, 4);
1014 daddr->col |= dnv_get_bit(pmiaddr, dmap5[pmiidx].ca5 + 6, 5);
1015 daddr->col |= dnv_get_bit(pmiaddr, dmap5[pmiidx].ca6 + 6, 6);
1016 daddr->col |= dnv_get_bit(pmiaddr, dmap5[pmiidx].ca7 + 6, 7);
1017 daddr->col |= dnv_get_bit(pmiaddr, dmap5[pmiidx].ca8 + 6, 8);
1018 daddr->col |= dnv_get_bit(pmiaddr, dmap5[pmiidx].ca9 + 6, 9);
1019 if (!dsch.ddr4en && dmap1[pmiidx].ca11 != 0x3f)
1020 daddr->col |= dnv_get_bit(pmiaddr, dmap1[pmiidx].ca11 + 13, 11);
1025 static int check_channel(int ch)
1027 if (drp0[ch].dramtype != 0) {
1028 pnd2_printk(KERN_INFO, "Unsupported DIMM in channel %d\n", ch);
1030 } else if (drp0[ch].eccen == 0) {
1031 pnd2_printk(KERN_INFO, "ECC disabled on channel %d\n", ch);
1037 static int apl_check_ecc_active(void)
1041 /* Check dramtype and ECC mode for each present DIMM */
1042 for (i = 0; i < APL_NUM_CHANNELS; i++)
1043 if (chan_mask & BIT(i))
1044 ret += check_channel(i);
1045 return ret ? -EINVAL : 0;
1048 #define DIMMS_PRESENT(d) ((d)->rken0 + (d)->rken1 + (d)->rken2 + (d)->rken3)
1050 static int check_unit(int ch)
1052 struct d_cr_drp *d = &drp[ch];
1054 if (DIMMS_PRESENT(d) && !ecc_ctrl[ch].eccen) {
1055 pnd2_printk(KERN_INFO, "ECC disabled on channel %d\n", ch);
1061 static int dnv_check_ecc_active(void)
1065 for (i = 0; i < DNV_NUM_CHANNELS; i++)
1066 ret += check_unit(i);
1067 return ret ? -EINVAL : 0;
1070 static int get_memory_error_data(struct mem_ctl_info *mci, u64 addr,
1071 struct dram_addr *daddr, char *msg)
1077 ret = sys2pmi(addr, &pmiidx, &pmiaddr, msg);
1081 pmiaddr >>= ops->pmiaddr_shift;
1082 /* pmi channel idx to dimm channel idx */
1083 pmiidx >>= ops->pmiidx_shift;
1084 daddr->chan = pmiidx;
1086 ret = ops->pmi2mem(mci, pmiaddr, pmiidx, daddr, msg);
1090 edac_dbg(0, "SysAddr=%llx PmiAddr=%llx Channel=%d DIMM=%d Rank=%d Bank=%d Row=%d Column=%d\n",
1091 addr, pmiaddr, daddr->chan, daddr->dimm, daddr->rank, daddr->bank, daddr->row, daddr->col);
1096 static void pnd2_mce_output_error(struct mem_ctl_info *mci, const struct mce *m,
1097 struct dram_addr *daddr)
1099 enum hw_event_mc_err_type tp_event;
1100 char *optype, msg[PND2_MSG_SIZE];
1101 bool ripv = m->mcgstatus & MCG_STATUS_RIPV;
1102 bool overflow = m->status & MCI_STATUS_OVER;
1103 bool uc_err = m->status & MCI_STATUS_UC;
1104 bool recov = m->status & MCI_STATUS_S;
1105 u32 core_err_cnt = GET_BITFIELD(m->status, 38, 52);
1106 u32 mscod = GET_BITFIELD(m->status, 16, 31);
1107 u32 errcode = GET_BITFIELD(m->status, 0, 15);
1108 u32 optypenum = GET_BITFIELD(m->status, 4, 6);
1111 tp_event = uc_err ? (ripv ? HW_EVENT_ERR_FATAL : HW_EVENT_ERR_UNCORRECTED) :
1112 HW_EVENT_ERR_CORRECTED;
1115 * According with Table 15-9 of the Intel Architecture spec vol 3A,
1116 * memory errors should fit in this mask:
1117 * 000f 0000 1mmm cccc (binary)
1119 * f = Correction Report Filtering Bit. If 1, subsequent errors
1123 * If the mask doesn't match, report an error to the parsing logic
1125 if (!((errcode & 0xef80) == 0x80)) {
1126 optype = "Can't parse: it is not a mem";
1128 switch (optypenum) {
1130 optype = "generic undef request error";
1133 optype = "memory read error";
1136 optype = "memory write error";
1139 optype = "addr/cmd error";
1142 optype = "memory scrubbing error";
1145 optype = "reserved";
1150 /* Only decode errors with an valid address (ADDRV) */
1151 if (!(m->status & MCI_STATUS_ADDRV))
1154 rc = get_memory_error_data(mci, m->addr, daddr, msg);
1158 snprintf(msg, sizeof(msg),
1159 "%s%s err_code:%04x:%04x channel:%d DIMM:%d rank:%d row:%d bank:%d col:%d",
1160 overflow ? " OVERFLOW" : "", (uc_err && recov) ? " recoverable" : "", mscod,
1161 errcode, daddr->chan, daddr->dimm, daddr->rank, daddr->row, daddr->bank, daddr->col);
1163 edac_dbg(0, "%s\n", msg);
1165 /* Call the helper to output message */
1166 edac_mc_handle_error(tp_event, mci, core_err_cnt, m->addr >> PAGE_SHIFT,
1167 m->addr & ~PAGE_MASK, 0, daddr->chan, daddr->dimm, -1, optype, msg);
1172 edac_mc_handle_error(tp_event, mci, core_err_cnt, 0, 0, 0, -1, -1, -1, msg, "");
1175 static void apl_get_dimm_config(struct mem_ctl_info *mci)
1177 struct pnd2_pvt *pvt = mci->pvt_info;
1178 struct dimm_info *dimm;
1179 struct d_cr_drp0 *d;
1183 for (i = 0; i < APL_NUM_CHANNELS; i++) {
1184 if (!(chan_mask & BIT(i)))
1187 dimm = EDAC_DIMM_PTR(mci->layers, mci->dimms, mci->n_layers, i, 0, 0);
1189 edac_dbg(0, "No allocated DIMM for channel %d\n", i);
1194 for (g = 0; g < ARRAY_SIZE(dimms); g++)
1195 if (dimms[g].addrdec == d->addrdec &&
1196 dimms[g].dden == d->dden &&
1197 dimms[g].dwid == d->dwid)
1200 if (g == ARRAY_SIZE(dimms)) {
1201 edac_dbg(0, "Channel %d: unrecognized DIMM\n", i);
1205 pvt->dimm_geom[i] = g;
1206 capacity = (d->rken0 + d->rken1) * 8 * (1ul << dimms[g].rowbits) *
1207 (1ul << dimms[g].colbits);
1208 edac_dbg(0, "Channel %d: %lld MByte DIMM\n", i, capacity >> (20 - 3));
1209 dimm->nr_pages = MiB_TO_PAGES(capacity >> (20 - 3));
1211 dimm->dtype = (d->dwid == 0) ? DEV_X8 : DEV_X16;
1212 dimm->mtype = MEM_DDR3;
1213 dimm->edac_mode = EDAC_SECDED;
1214 snprintf(dimm->label, sizeof(dimm->label), "Slice#%d_Chan#%d", i / 2, i % 2);
1218 static const int dnv_dtypes[] = {
1219 DEV_X8, DEV_X4, DEV_X16, DEV_UNKNOWN
1222 static void dnv_get_dimm_config(struct mem_ctl_info *mci)
1224 int i, j, ranks_of_dimm[DNV_MAX_DIMMS], banks, rowbits, colbits, memtype;
1225 struct dimm_info *dimm;
1238 for (i = 0; i < DNV_NUM_CHANNELS; i++) {
1239 if (dmap4[i].row14 == 31)
1241 else if (dmap4[i].row15 == 31)
1243 else if (dmap4[i].row16 == 31)
1245 else if (dmap4[i].row17 == 31)
1250 if (memtype == MEM_DDR3) {
1251 if (dmap1[i].ca11 != 0x3f)
1258 /* DIMM0 is present if rank0 and/or rank1 is enabled */
1259 ranks_of_dimm[0] = d->rken0 + d->rken1;
1260 /* DIMM1 is present if rank2 and/or rank3 is enabled */
1261 ranks_of_dimm[1] = d->rken2 + d->rken3;
1263 for (j = 0; j < DNV_MAX_DIMMS; j++) {
1264 if (!ranks_of_dimm[j])
1267 dimm = EDAC_DIMM_PTR(mci->layers, mci->dimms, mci->n_layers, i, j, 0);
1269 edac_dbg(0, "No allocated DIMM for channel %d DIMM %d\n", i, j);
1273 capacity = ranks_of_dimm[j] * banks * (1ul << rowbits) * (1ul << colbits);
1274 edac_dbg(0, "Channel %d DIMM %d: %lld MByte DIMM\n", i, j, capacity >> (20 - 3));
1275 dimm->nr_pages = MiB_TO_PAGES(capacity >> (20 - 3));
1277 dimm->dtype = dnv_dtypes[j ? d->dimmdwid0 : d->dimmdwid1];
1278 dimm->mtype = memtype;
1279 dimm->edac_mode = EDAC_SECDED;
1280 snprintf(dimm->label, sizeof(dimm->label), "Chan#%d_DIMM#%d", i, j);
1285 static int pnd2_register_mci(struct mem_ctl_info **ppmci)
1287 struct edac_mc_layer layers[2];
1288 struct mem_ctl_info *mci;
1289 struct pnd2_pvt *pvt;
1292 rc = ops->check_ecc();
1296 /* Allocate a new MC control structure */
1297 layers[0].type = EDAC_MC_LAYER_CHANNEL;
1298 layers[0].size = ops->channels;
1299 layers[0].is_virt_csrow = false;
1300 layers[1].type = EDAC_MC_LAYER_SLOT;
1301 layers[1].size = ops->dimms_per_channel;
1302 layers[1].is_virt_csrow = true;
1303 mci = edac_mc_alloc(0, ARRAY_SIZE(layers), layers, sizeof(*pvt));
1307 pvt = mci->pvt_info;
1308 memset(pvt, 0, sizeof(*pvt));
1310 mci->mod_name = "pnd2_edac.c";
1311 mci->dev_name = ops->name;
1312 mci->ctl_name = "Pondicherry2";
1314 /* Get dimm basic config and the memory layout */
1315 ops->get_dimm_config(mci);
1317 if (edac_mc_add_mc(mci)) {
1318 edac_dbg(0, "MC: failed edac_mc_add_mc()\n");
1328 static void pnd2_unregister_mci(struct mem_ctl_info *mci)
1330 if (unlikely(!mci || !mci->pvt_info)) {
1331 pnd2_printk(KERN_ERR, "Couldn't find mci handler\n");
1335 /* Remove MC sysfs nodes */
1336 edac_mc_del_mc(NULL);
1337 edac_dbg(1, "%s: free mci struct\n", mci->ctl_name);
1342 * Callback function registered with core kernel mce code.
1343 * Called once for each logged error.
1345 static int pnd2_mce_check_error(struct notifier_block *nb, unsigned long val, void *data)
1347 struct mce *mce = (struct mce *)data;
1348 struct mem_ctl_info *mci;
1349 struct dram_addr daddr;
1352 if (edac_get_report_status() == EDAC_REPORTING_DISABLED)
1360 * Just let mcelog handle it if the error is
1361 * outside the memory controller. A memory error
1362 * is indicated by bit 7 = 1 and bits = 8-11,13-15 = 0.
1363 * bit 12 has an special meaning.
1365 if ((mce->status & 0xefff) >> 7 != 1)
1368 if (mce->mcgstatus & MCG_STATUS_MCIP)
1373 pnd2_mc_printk(mci, KERN_INFO, "HANDLING MCE MEMORY ERROR\n");
1374 pnd2_mc_printk(mci, KERN_INFO, "CPU %u: Machine Check %s: %llx Bank %u: %llx\n",
1375 mce->extcpu, type, mce->mcgstatus, mce->bank, mce->status);
1376 pnd2_mc_printk(mci, KERN_INFO, "TSC %llx ", mce->tsc);
1377 pnd2_mc_printk(mci, KERN_INFO, "ADDR %llx ", mce->addr);
1378 pnd2_mc_printk(mci, KERN_INFO, "MISC %llx ", mce->misc);
1379 pnd2_mc_printk(mci, KERN_INFO, "PROCESSOR %u:%x TIME %llu SOCKET %u APIC %x\n",
1380 mce->cpuvendor, mce->cpuid, mce->time, mce->socketid, mce->apicid);
1382 pnd2_mce_output_error(mci, mce, &daddr);
1384 /* Advice mcelog that the error were handled */
1388 static struct notifier_block pnd2_mce_dec = {
1389 .notifier_call = pnd2_mce_check_error,
1392 #ifdef CONFIG_EDAC_DEBUG
1394 * Write an address to this file to exercise the address decode
1395 * logic in this driver.
1397 static u64 pnd2_fake_addr;
1398 #define PND2_BLOB_SIZE 1024
1399 static char pnd2_result[PND2_BLOB_SIZE];
1400 static struct dentry *pnd2_test;
1401 static struct debugfs_blob_wrapper pnd2_blob = {
1402 .data = pnd2_result,
1406 static int debugfs_u64_set(void *data, u64 val)
1408 struct dram_addr daddr;
1413 /* ADDRV + MemRd + Unknown channel */
1414 m.status = MCI_STATUS_ADDRV + 0x9f;
1416 pnd2_mce_output_error(pnd2_mci, &m, &daddr);
1417 snprintf(pnd2_blob.data, PND2_BLOB_SIZE,
1418 "SysAddr=%llx Channel=%d DIMM=%d Rank=%d Bank=%d Row=%d Column=%d\n",
1419 m.addr, daddr.chan, daddr.dimm, daddr.rank, daddr.bank, daddr.row, daddr.col);
1420 pnd2_blob.size = strlen(pnd2_blob.data);
1424 DEFINE_DEBUGFS_ATTRIBUTE(fops_u64_wo, NULL, debugfs_u64_set, "%llu\n");
1426 static void setup_pnd2_debug(void)
1428 pnd2_test = edac_debugfs_create_dir("pnd2_test");
1429 edac_debugfs_create_file("pnd2_debug_addr", 0200, pnd2_test,
1430 &pnd2_fake_addr, &fops_u64_wo);
1431 debugfs_create_blob("pnd2_debug_results", 0400, pnd2_test, &pnd2_blob);
1434 static void teardown_pnd2_debug(void)
1436 debugfs_remove_recursive(pnd2_test);
1439 static void setup_pnd2_debug(void) {}
1440 static void teardown_pnd2_debug(void) {}
1441 #endif /* CONFIG_EDAC_DEBUG */
1444 static int pnd2_probe(void)
1449 rc = get_registers();
1453 return pnd2_register_mci(&pnd2_mci);
1456 static void pnd2_remove(void)
1459 pnd2_unregister_mci(pnd2_mci);
1462 static struct dunit_ops apl_ops = {
1465 .pmiaddr_shift = LOG2_PMI_ADDR_GRANULARITY,
1467 .channels = APL_NUM_CHANNELS,
1468 .dimms_per_channel = 1,
1469 .rd_reg = apl_rd_reg,
1470 .get_registers = apl_get_registers,
1471 .check_ecc = apl_check_ecc_active,
1472 .mk_region = apl_mk_region,
1473 .get_dimm_config = apl_get_dimm_config,
1474 .pmi2mem = apl_pmi2mem,
1477 static struct dunit_ops dnv_ops = {
1482 .channels = DNV_NUM_CHANNELS,
1483 .dimms_per_channel = 2,
1484 .rd_reg = dnv_rd_reg,
1485 .get_registers = dnv_get_registers,
1486 .check_ecc = dnv_check_ecc_active,
1487 .mk_region = dnv_mk_region,
1488 .get_dimm_config = dnv_get_dimm_config,
1489 .pmi2mem = dnv_pmi2mem,
1492 static const struct x86_cpu_id pnd2_cpuids[] = {
1493 { X86_VENDOR_INTEL, 6, INTEL_FAM6_ATOM_GOLDMONT, 0, (kernel_ulong_t)&apl_ops },
1494 { X86_VENDOR_INTEL, 6, INTEL_FAM6_ATOM_DENVERTON, 0, (kernel_ulong_t)&dnv_ops },
1497 MODULE_DEVICE_TABLE(x86cpu, pnd2_cpuids);
1499 static int __init pnd2_init(void)
1501 const struct x86_cpu_id *id;
1506 id = x86_match_cpu(pnd2_cpuids);
1510 ops = (struct dunit_ops *)id->driver_data;
1512 /* Ensure that the OPSTATE is set correctly for POLL or NMI */
1517 pnd2_printk(KERN_ERR, "Failed to register device with error %d.\n", rc);
1524 mce_register_decode_chain(&pnd2_mce_dec);
1530 static void __exit pnd2_exit(void)
1533 teardown_pnd2_debug();
1534 mce_unregister_decode_chain(&pnd2_mce_dec);
1538 module_init(pnd2_init);
1539 module_exit(pnd2_exit);
1541 module_param(edac_op_state, int, 0444);
1542 MODULE_PARM_DESC(edac_op_state, "EDAC Error Reporting state: 0=Poll,1=NMI");
1544 MODULE_LICENSE("GPL v2");
1545 MODULE_AUTHOR("Tony Luck");
1546 MODULE_DESCRIPTION("MC Driver for Intel SoC using Pondicherry memory controller");