1 /* Intel i7 core/Nehalem Memory Controller kernel module
3 * This driver supports yhe memory controllers found on the Intel
4 * processor families i7core, i7core 7xx/8xx, i5core, Xeon 35xx,
5 * Xeon 55xx and Xeon 56xx also known as Nehalem, Nehalem-EP, Lynnfield
8 * This file may be distributed under the terms of the
9 * GNU General Public License version 2 only.
11 * Copyright (c) 2009-2010 by:
12 * Mauro Carvalho Chehab <mchehab@redhat.com>
14 * Red Hat Inc. http://www.redhat.com
16 * Forked and adapted from the i5400_edac driver
18 * Based on the following public Intel datasheets:
19 * Intel Core i7 Processor Extreme Edition and Intel Core i7 Processor
20 * Datasheet, Volume 2:
21 * http://download.intel.com/design/processor/datashts/320835.pdf
22 * Intel Xeon Processor 5500 Series Datasheet Volume 2
23 * http://www.intel.com/Assets/PDF/datasheet/321322.pdf
25 * http://www.arrownac.com/manufacturers/intel/s/nehalem/5500-datasheet-v2.pdf
28 #include <linux/module.h>
29 #include <linux/init.h>
30 #include <linux/pci.h>
31 #include <linux/pci_ids.h>
32 #include <linux/slab.h>
33 #include <linux/delay.h>
34 #include <linux/edac.h>
35 #include <linux/mmzone.h>
36 #include <linux/edac_mce.h>
37 #include <linux/smp.h>
38 #include <asm/processor.h>
40 #include "edac_core.h"
43 static LIST_HEAD(i7core_edac_list);
44 static DEFINE_MUTEX(i7core_edac_lock);
47 static int use_pci_fixup;
48 module_param(use_pci_fixup, int, 0444);
49 MODULE_PARM_DESC(use_pci_fixup, "Enable PCI fixup to seek for hidden devices");
51 * This is used for Nehalem-EP and Nehalem-EX devices, where the non-core
52 * registers start at bus 255, and are not reported by BIOS.
53 * We currently find devices with only 2 sockets. In order to support more QPI
54 * Quick Path Interconnect, just increment this number.
56 #define MAX_SOCKET_BUSES 2
60 * Alter this version for the module when modifications are made
62 #define I7CORE_REVISION " Ver: 1.0.0 " __DATE__
63 #define EDAC_MOD_STR "i7core_edac"
68 #define i7core_printk(level, fmt, arg...) \
69 edac_printk(level, "i7core", fmt, ##arg)
71 #define i7core_mc_printk(mci, level, fmt, arg...) \
72 edac_mc_chipset_printk(mci, level, "i7core", fmt, ##arg)
75 * i7core Memory Controller Registers
78 /* OFFSETS for Device 0 Function 0 */
80 #define MC_CFG_CONTROL 0x90
82 /* OFFSETS for Device 3 Function 0 */
84 #define MC_CONTROL 0x48
85 #define MC_STATUS 0x4c
86 #define MC_MAX_DOD 0x64
89 * OFFSETS for Device 3 Function 4, as inicated on Xeon 5500 datasheet:
90 * http://www.arrownac.com/manufacturers/intel/s/nehalem/5500-datasheet-v2.pdf
93 #define MC_TEST_ERR_RCV1 0x60
94 #define DIMM2_COR_ERR(r) ((r) & 0x7fff)
96 #define MC_TEST_ERR_RCV0 0x64
97 #define DIMM1_COR_ERR(r) (((r) >> 16) & 0x7fff)
98 #define DIMM0_COR_ERR(r) ((r) & 0x7fff)
100 /* OFFSETS for Device 3 Function 2, as inicated on Xeon 5500 datasheet */
101 #define MC_COR_ECC_CNT_0 0x80
102 #define MC_COR_ECC_CNT_1 0x84
103 #define MC_COR_ECC_CNT_2 0x88
104 #define MC_COR_ECC_CNT_3 0x8c
105 #define MC_COR_ECC_CNT_4 0x90
106 #define MC_COR_ECC_CNT_5 0x94
108 #define DIMM_TOP_COR_ERR(r) (((r) >> 16) & 0x7fff)
109 #define DIMM_BOT_COR_ERR(r) ((r) & 0x7fff)
112 /* OFFSETS for Devices 4,5 and 6 Function 0 */
114 #define MC_CHANNEL_DIMM_INIT_PARAMS 0x58
115 #define THREE_DIMMS_PRESENT (1 << 24)
116 #define SINGLE_QUAD_RANK_PRESENT (1 << 23)
117 #define QUAD_RANK_PRESENT (1 << 22)
118 #define REGISTERED_DIMM (1 << 15)
120 #define MC_CHANNEL_MAPPER 0x60
121 #define RDLCH(r, ch) ((((r) >> (3 + (ch * 6))) & 0x07) - 1)
122 #define WRLCH(r, ch) ((((r) >> (ch * 6)) & 0x07) - 1)
124 #define MC_CHANNEL_RANK_PRESENT 0x7c
125 #define RANK_PRESENT_MASK 0xffff
127 #define MC_CHANNEL_ADDR_MATCH 0xf0
128 #define MC_CHANNEL_ERROR_MASK 0xf8
129 #define MC_CHANNEL_ERROR_INJECT 0xfc
130 #define INJECT_ADDR_PARITY 0x10
131 #define INJECT_ECC 0x08
132 #define MASK_CACHELINE 0x06
133 #define MASK_FULL_CACHELINE 0x06
134 #define MASK_MSB32_CACHELINE 0x04
135 #define MASK_LSB32_CACHELINE 0x02
136 #define NO_MASK_CACHELINE 0x00
137 #define REPEAT_EN 0x01
139 /* OFFSETS for Devices 4,5 and 6 Function 1 */
141 #define MC_DOD_CH_DIMM0 0x48
142 #define MC_DOD_CH_DIMM1 0x4c
143 #define MC_DOD_CH_DIMM2 0x50
144 #define RANKOFFSET_MASK ((1 << 12) | (1 << 11) | (1 << 10))
145 #define RANKOFFSET(x) ((x & RANKOFFSET_MASK) >> 10)
146 #define DIMM_PRESENT_MASK (1 << 9)
147 #define DIMM_PRESENT(x) (((x) & DIMM_PRESENT_MASK) >> 9)
148 #define MC_DOD_NUMBANK_MASK ((1 << 8) | (1 << 7))
149 #define MC_DOD_NUMBANK(x) (((x) & MC_DOD_NUMBANK_MASK) >> 7)
150 #define MC_DOD_NUMRANK_MASK ((1 << 6) | (1 << 5))
151 #define MC_DOD_NUMRANK(x) (((x) & MC_DOD_NUMRANK_MASK) >> 5)
152 #define MC_DOD_NUMROW_MASK ((1 << 4) | (1 << 3) | (1 << 2))
153 #define MC_DOD_NUMROW(x) (((x) & MC_DOD_NUMROW_MASK) >> 2)
154 #define MC_DOD_NUMCOL_MASK 3
155 #define MC_DOD_NUMCOL(x) ((x) & MC_DOD_NUMCOL_MASK)
157 #define MC_RANK_PRESENT 0x7c
159 #define MC_SAG_CH_0 0x80
160 #define MC_SAG_CH_1 0x84
161 #define MC_SAG_CH_2 0x88
162 #define MC_SAG_CH_3 0x8c
163 #define MC_SAG_CH_4 0x90
164 #define MC_SAG_CH_5 0x94
165 #define MC_SAG_CH_6 0x98
166 #define MC_SAG_CH_7 0x9c
168 #define MC_RIR_LIMIT_CH_0 0x40
169 #define MC_RIR_LIMIT_CH_1 0x44
170 #define MC_RIR_LIMIT_CH_2 0x48
171 #define MC_RIR_LIMIT_CH_3 0x4C
172 #define MC_RIR_LIMIT_CH_4 0x50
173 #define MC_RIR_LIMIT_CH_5 0x54
174 #define MC_RIR_LIMIT_CH_6 0x58
175 #define MC_RIR_LIMIT_CH_7 0x5C
176 #define MC_RIR_LIMIT_MASK ((1 << 10) - 1)
178 #define MC_RIR_WAY_CH 0x80
179 #define MC_RIR_WAY_OFFSET_MASK (((1 << 14) - 1) & ~0x7)
180 #define MC_RIR_WAY_RANK_MASK 0x7
187 #define MAX_DIMMS 3 /* Max DIMMS per channel */
188 #define MAX_MCR_FUNC 4
189 #define MAX_CHAN_FUNC 3
199 struct i7core_inject {
206 /* Error address mask */
207 int channel, dimm, rank, bank, page, col;
210 struct i7core_channel {
215 struct pci_id_descr {
222 struct pci_id_table {
223 const struct pci_id_descr *descr;
228 struct list_head list;
230 struct pci_dev **pdev;
232 struct mem_ctl_info *mci;
236 struct pci_dev *pci_noncore;
237 struct pci_dev *pci_mcr[MAX_MCR_FUNC + 1];
238 struct pci_dev *pci_ch[NUM_CHANS][MAX_CHAN_FUNC + 1];
240 struct i7core_dev *i7core_dev;
242 struct i7core_info info;
243 struct i7core_inject inject;
244 struct i7core_channel channel[NUM_CHANS];
246 int channels; /* Number of active channels */
248 int ce_count_available;
249 int csrow_map[NUM_CHANS][MAX_DIMMS];
251 /* ECC corrected errors counts per udimm */
252 unsigned long udimm_ce_count[MAX_DIMMS];
253 int udimm_last_ce_count[MAX_DIMMS];
254 /* ECC corrected errors counts per rdimm */
255 unsigned long rdimm_ce_count[NUM_CHANS][MAX_DIMMS];
256 int rdimm_last_ce_count[NUM_CHANS][MAX_DIMMS];
258 unsigned int is_registered;
261 struct edac_mce edac_mce;
263 /* Fifo double buffers */
264 struct mce mce_entry[MCE_LOG_LEN];
265 struct mce mce_outentry[MCE_LOG_LEN];
267 /* Fifo in/out counters */
268 unsigned mce_in, mce_out;
270 /* Count indicator to show errors not got */
271 unsigned mce_overrun;
273 /* Struct to control EDAC polling */
274 struct edac_pci_ctl_info *i7core_pci;
277 #define PCI_DESCR(device, function, device_id) \
279 .func = (function), \
280 .dev_id = (device_id)
282 static const struct pci_id_descr pci_dev_descr_i7core_nehalem[] = {
283 /* Memory controller */
284 { PCI_DESCR(3, 0, PCI_DEVICE_ID_INTEL_I7_MCR) },
285 { PCI_DESCR(3, 1, PCI_DEVICE_ID_INTEL_I7_MC_TAD) },
286 /* Exists only for RDIMM */
287 { PCI_DESCR(3, 2, PCI_DEVICE_ID_INTEL_I7_MC_RAS), .optional = 1 },
288 { PCI_DESCR(3, 4, PCI_DEVICE_ID_INTEL_I7_MC_TEST) },
291 { PCI_DESCR(4, 0, PCI_DEVICE_ID_INTEL_I7_MC_CH0_CTRL) },
292 { PCI_DESCR(4, 1, PCI_DEVICE_ID_INTEL_I7_MC_CH0_ADDR) },
293 { PCI_DESCR(4, 2, PCI_DEVICE_ID_INTEL_I7_MC_CH0_RANK) },
294 { PCI_DESCR(4, 3, PCI_DEVICE_ID_INTEL_I7_MC_CH0_TC) },
297 { PCI_DESCR(5, 0, PCI_DEVICE_ID_INTEL_I7_MC_CH1_CTRL) },
298 { PCI_DESCR(5, 1, PCI_DEVICE_ID_INTEL_I7_MC_CH1_ADDR) },
299 { PCI_DESCR(5, 2, PCI_DEVICE_ID_INTEL_I7_MC_CH1_RANK) },
300 { PCI_DESCR(5, 3, PCI_DEVICE_ID_INTEL_I7_MC_CH1_TC) },
303 { PCI_DESCR(6, 0, PCI_DEVICE_ID_INTEL_I7_MC_CH2_CTRL) },
304 { PCI_DESCR(6, 1, PCI_DEVICE_ID_INTEL_I7_MC_CH2_ADDR) },
305 { PCI_DESCR(6, 2, PCI_DEVICE_ID_INTEL_I7_MC_CH2_RANK) },
306 { PCI_DESCR(6, 3, PCI_DEVICE_ID_INTEL_I7_MC_CH2_TC) },
308 /* Generic Non-core registers */
310 * This is the PCI device on i7core and on Xeon 35xx (8086:2c41)
311 * On Xeon 55xx, however, it has a different id (8086:2c40). So,
312 * the probing code needs to test for the other address in case of
313 * failure of this one
315 { PCI_DESCR(0, 0, PCI_DEVICE_ID_INTEL_I7_NONCORE) },
319 static const struct pci_id_descr pci_dev_descr_lynnfield[] = {
320 { PCI_DESCR( 3, 0, PCI_DEVICE_ID_INTEL_LYNNFIELD_MCR) },
321 { PCI_DESCR( 3, 1, PCI_DEVICE_ID_INTEL_LYNNFIELD_MC_TAD) },
322 { PCI_DESCR( 3, 4, PCI_DEVICE_ID_INTEL_LYNNFIELD_MC_TEST) },
324 { PCI_DESCR( 4, 0, PCI_DEVICE_ID_INTEL_LYNNFIELD_MC_CH0_CTRL) },
325 { PCI_DESCR( 4, 1, PCI_DEVICE_ID_INTEL_LYNNFIELD_MC_CH0_ADDR) },
326 { PCI_DESCR( 4, 2, PCI_DEVICE_ID_INTEL_LYNNFIELD_MC_CH0_RANK) },
327 { PCI_DESCR( 4, 3, PCI_DEVICE_ID_INTEL_LYNNFIELD_MC_CH0_TC) },
329 { PCI_DESCR( 5, 0, PCI_DEVICE_ID_INTEL_LYNNFIELD_MC_CH1_CTRL) },
330 { PCI_DESCR( 5, 1, PCI_DEVICE_ID_INTEL_LYNNFIELD_MC_CH1_ADDR) },
331 { PCI_DESCR( 5, 2, PCI_DEVICE_ID_INTEL_LYNNFIELD_MC_CH1_RANK) },
332 { PCI_DESCR( 5, 3, PCI_DEVICE_ID_INTEL_LYNNFIELD_MC_CH1_TC) },
335 * This is the PCI device has an alternate address on some
336 * processors like Core i7 860
338 { PCI_DESCR( 0, 0, PCI_DEVICE_ID_INTEL_LYNNFIELD_NONCORE) },
341 static const struct pci_id_descr pci_dev_descr_i7core_westmere[] = {
342 /* Memory controller */
343 { PCI_DESCR(3, 0, PCI_DEVICE_ID_INTEL_LYNNFIELD_MCR_REV2) },
344 { PCI_DESCR(3, 1, PCI_DEVICE_ID_INTEL_LYNNFIELD_MC_TAD_REV2) },
345 /* Exists only for RDIMM */
346 { PCI_DESCR(3, 2, PCI_DEVICE_ID_INTEL_LYNNFIELD_MC_RAS_REV2), .optional = 1 },
347 { PCI_DESCR(3, 4, PCI_DEVICE_ID_INTEL_LYNNFIELD_MC_TEST_REV2) },
350 { PCI_DESCR(4, 0, PCI_DEVICE_ID_INTEL_LYNNFIELD_MC_CH0_CTRL_REV2) },
351 { PCI_DESCR(4, 1, PCI_DEVICE_ID_INTEL_LYNNFIELD_MC_CH0_ADDR_REV2) },
352 { PCI_DESCR(4, 2, PCI_DEVICE_ID_INTEL_LYNNFIELD_MC_CH0_RANK_REV2) },
353 { PCI_DESCR(4, 3, PCI_DEVICE_ID_INTEL_LYNNFIELD_MC_CH0_TC_REV2) },
356 { PCI_DESCR(5, 0, PCI_DEVICE_ID_INTEL_LYNNFIELD_MC_CH1_CTRL_REV2) },
357 { PCI_DESCR(5, 1, PCI_DEVICE_ID_INTEL_LYNNFIELD_MC_CH1_ADDR_REV2) },
358 { PCI_DESCR(5, 2, PCI_DEVICE_ID_INTEL_LYNNFIELD_MC_CH1_RANK_REV2) },
359 { PCI_DESCR(5, 3, PCI_DEVICE_ID_INTEL_LYNNFIELD_MC_CH1_TC_REV2) },
362 { PCI_DESCR(6, 0, PCI_DEVICE_ID_INTEL_LYNNFIELD_MC_CH2_CTRL_REV2) },
363 { PCI_DESCR(6, 1, PCI_DEVICE_ID_INTEL_LYNNFIELD_MC_CH2_ADDR_REV2) },
364 { PCI_DESCR(6, 2, PCI_DEVICE_ID_INTEL_LYNNFIELD_MC_CH2_RANK_REV2) },
365 { PCI_DESCR(6, 3, PCI_DEVICE_ID_INTEL_LYNNFIELD_MC_CH2_TC_REV2) },
367 /* Generic Non-core registers */
368 { PCI_DESCR(0, 0, PCI_DEVICE_ID_INTEL_LYNNFIELD_NONCORE_REV2) },
372 #define PCI_ID_TABLE_ENTRY(A) { .descr=A, .n_devs = ARRAY_SIZE(A) }
373 static const struct pci_id_table pci_dev_table[] = {
374 PCI_ID_TABLE_ENTRY(pci_dev_descr_i7core_nehalem),
375 PCI_ID_TABLE_ENTRY(pci_dev_descr_lynnfield),
376 PCI_ID_TABLE_ENTRY(pci_dev_descr_i7core_westmere),
380 * pci_device_id table for which devices we are looking for
382 static const struct pci_device_id i7core_pci_tbl[] __devinitdata = {
383 {PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_X58_HUB_MGMT)},
384 {PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_LYNNFIELD_QPI_LINK0)},
385 {0,} /* 0 terminated list. */
388 /****************************************************************************
389 Anciliary status routines
390 ****************************************************************************/
392 /* MC_CONTROL bits */
393 #define CH_ACTIVE(pvt, ch) ((pvt)->info.mc_control & (1 << (8 + ch)))
394 #define ECCx8(pvt) ((pvt)->info.mc_control & (1 << 1))
397 #define ECC_ENABLED(pvt) ((pvt)->info.mc_status & (1 << 4))
398 #define CH_DISABLED(pvt, ch) ((pvt)->info.mc_status & (1 << ch))
400 /* MC_MAX_DOD read functions */
401 static inline int numdimms(u32 dimms)
403 return (dimms & 0x3) + 1;
406 static inline int numrank(u32 rank)
408 static int ranks[4] = { 1, 2, 4, -EINVAL };
410 return ranks[rank & 0x3];
413 static inline int numbank(u32 bank)
415 static int banks[4] = { 4, 8, 16, -EINVAL };
417 return banks[bank & 0x3];
420 static inline int numrow(u32 row)
422 static int rows[8] = {
423 1 << 12, 1 << 13, 1 << 14, 1 << 15,
424 1 << 16, -EINVAL, -EINVAL, -EINVAL,
427 return rows[row & 0x7];
430 static inline int numcol(u32 col)
432 static int cols[8] = {
433 1 << 10, 1 << 11, 1 << 12, -EINVAL,
435 return cols[col & 0x3];
438 static struct i7core_dev *get_i7core_dev(u8 socket)
440 struct i7core_dev *i7core_dev;
442 list_for_each_entry(i7core_dev, &i7core_edac_list, list) {
443 if (i7core_dev->socket == socket)
450 static struct i7core_dev *alloc_i7core_dev(u8 socket,
451 const struct pci_id_table *table)
453 struct i7core_dev *i7core_dev;
455 i7core_dev = kzalloc(sizeof(*i7core_dev), GFP_KERNEL);
459 i7core_dev->pdev = kzalloc(sizeof(*i7core_dev->pdev) * table->n_devs,
461 if (!i7core_dev->pdev) {
466 i7core_dev->socket = socket;
467 i7core_dev->n_devs = table->n_devs;
468 list_add_tail(&i7core_dev->list, &i7core_edac_list);
473 static void free_i7core_dev(struct i7core_dev *i7core_dev)
475 list_del(&i7core_dev->list);
476 kfree(i7core_dev->pdev);
480 /****************************************************************************
481 Memory check routines
482 ****************************************************************************/
483 static struct pci_dev *get_pdev_slot_func(u8 socket, unsigned slot,
486 struct i7core_dev *i7core_dev = get_i7core_dev(socket);
492 for (i = 0; i < i7core_dev->n_devs; i++) {
493 if (!i7core_dev->pdev[i])
496 if (PCI_SLOT(i7core_dev->pdev[i]->devfn) == slot &&
497 PCI_FUNC(i7core_dev->pdev[i]->devfn) == func) {
498 return i7core_dev->pdev[i];
506 * i7core_get_active_channels() - gets the number of channels and csrows
507 * @socket: Quick Path Interconnect socket
508 * @channels: Number of channels that will be returned
509 * @csrows: Number of csrows found
511 * Since EDAC core needs to know in advance the number of available channels
512 * and csrows, in order to allocate memory for csrows/channels, it is needed
513 * to run two similar steps. At the first step, implemented on this function,
514 * it checks the number of csrows/channels present at one socket.
515 * this is used in order to properly allocate the size of mci components.
517 * It should be noticed that none of the current available datasheets explain
518 * or even mention how csrows are seen by the memory controller. So, we need
519 * to add a fake description for csrows.
520 * So, this driver is attributing one DIMM memory for one csrow.
522 static int i7core_get_active_channels(const u8 socket, unsigned *channels,
525 struct pci_dev *pdev = NULL;
532 pdev = get_pdev_slot_func(socket, 3, 0);
534 i7core_printk(KERN_ERR, "Couldn't find socket %d fn 3.0!!!\n",
539 /* Device 3 function 0 reads */
540 pci_read_config_dword(pdev, MC_STATUS, &status);
541 pci_read_config_dword(pdev, MC_CONTROL, &control);
543 for (i = 0; i < NUM_CHANS; i++) {
545 /* Check if the channel is active */
546 if (!(control & (1 << (8 + i))))
549 /* Check if the channel is disabled */
550 if (status & (1 << i))
553 pdev = get_pdev_slot_func(socket, i + 4, 1);
555 i7core_printk(KERN_ERR, "Couldn't find socket %d "
560 /* Devices 4-6 function 1 */
561 pci_read_config_dword(pdev,
562 MC_DOD_CH_DIMM0, &dimm_dod[0]);
563 pci_read_config_dword(pdev,
564 MC_DOD_CH_DIMM1, &dimm_dod[1]);
565 pci_read_config_dword(pdev,
566 MC_DOD_CH_DIMM2, &dimm_dod[2]);
570 for (j = 0; j < 3; j++) {
571 if (!DIMM_PRESENT(dimm_dod[j]))
577 debugf0("Number of active channels on socket %d: %d\n",
583 static int get_dimm_config(const struct mem_ctl_info *mci, int *csrow)
585 struct i7core_pvt *pvt = mci->pvt_info;
586 struct csrow_info *csr;
587 struct pci_dev *pdev;
589 unsigned long last_page = 0;
593 /* Get data from the MC register, function 0 */
594 pdev = pvt->pci_mcr[0];
598 /* Device 3 function 0 reads */
599 pci_read_config_dword(pdev, MC_CONTROL, &pvt->info.mc_control);
600 pci_read_config_dword(pdev, MC_STATUS, &pvt->info.mc_status);
601 pci_read_config_dword(pdev, MC_MAX_DOD, &pvt->info.max_dod);
602 pci_read_config_dword(pdev, MC_CHANNEL_MAPPER, &pvt->info.ch_map);
604 debugf0("QPI %d control=0x%08x status=0x%08x dod=0x%08x map=0x%08x\n",
605 pvt->i7core_dev->socket, pvt->info.mc_control, pvt->info.mc_status,
606 pvt->info.max_dod, pvt->info.ch_map);
608 if (ECC_ENABLED(pvt)) {
609 debugf0("ECC enabled with x%d SDCC\n", ECCx8(pvt) ? 8 : 4);
611 mode = EDAC_S8ECD8ED;
613 mode = EDAC_S4ECD4ED;
615 debugf0("ECC disabled\n");
619 /* FIXME: need to handle the error codes */
620 debugf0("DOD Max limits: DIMMS: %d, %d-ranked, %d-banked "
622 numdimms(pvt->info.max_dod),
623 numrank(pvt->info.max_dod >> 2),
624 numbank(pvt->info.max_dod >> 4),
625 numrow(pvt->info.max_dod >> 6),
626 numcol(pvt->info.max_dod >> 9));
628 for (i = 0; i < NUM_CHANS; i++) {
629 u32 data, dimm_dod[3], value[8];
631 if (!pvt->pci_ch[i][0])
634 if (!CH_ACTIVE(pvt, i)) {
635 debugf0("Channel %i is not active\n", i);
638 if (CH_DISABLED(pvt, i)) {
639 debugf0("Channel %i is disabled\n", i);
643 /* Devices 4-6 function 0 */
644 pci_read_config_dword(pvt->pci_ch[i][0],
645 MC_CHANNEL_DIMM_INIT_PARAMS, &data);
647 pvt->channel[i].ranks = (data & QUAD_RANK_PRESENT) ?
650 if (data & REGISTERED_DIMM)
655 if (data & THREE_DIMMS_PRESENT)
656 pvt->channel[i].dimms = 3;
657 else if (data & SINGLE_QUAD_RANK_PRESENT)
658 pvt->channel[i].dimms = 1;
660 pvt->channel[i].dimms = 2;
663 /* Devices 4-6 function 1 */
664 pci_read_config_dword(pvt->pci_ch[i][1],
665 MC_DOD_CH_DIMM0, &dimm_dod[0]);
666 pci_read_config_dword(pvt->pci_ch[i][1],
667 MC_DOD_CH_DIMM1, &dimm_dod[1]);
668 pci_read_config_dword(pvt->pci_ch[i][1],
669 MC_DOD_CH_DIMM2, &dimm_dod[2]);
671 debugf0("Ch%d phy rd%d, wr%d (0x%08x): "
672 "%d ranks, %cDIMMs\n",
674 RDLCH(pvt->info.ch_map, i), WRLCH(pvt->info.ch_map, i),
676 pvt->channel[i].ranks,
677 (data & REGISTERED_DIMM) ? 'R' : 'U');
679 for (j = 0; j < 3; j++) {
680 u32 banks, ranks, rows, cols;
683 if (!DIMM_PRESENT(dimm_dod[j]))
686 banks = numbank(MC_DOD_NUMBANK(dimm_dod[j]));
687 ranks = numrank(MC_DOD_NUMRANK(dimm_dod[j]));
688 rows = numrow(MC_DOD_NUMROW(dimm_dod[j]));
689 cols = numcol(MC_DOD_NUMCOL(dimm_dod[j]));
691 /* DDR3 has 8 I/O banks */
692 size = (rows * cols * banks * ranks) >> (20 - 3);
694 pvt->channel[i].dimms++;
696 debugf0("\tdimm %d %d Mb offset: %x, "
697 "bank: %d, rank: %d, row: %#x, col: %#x\n",
699 RANKOFFSET(dimm_dod[j]),
700 banks, ranks, rows, cols);
702 npages = MiB_TO_PAGES(size);
704 csr = &mci->csrows[*csrow];
705 csr->first_page = last_page + 1;
707 csr->last_page = last_page;
708 csr->nr_pages = npages;
712 csr->csrow_idx = *csrow;
713 csr->nr_channels = 1;
715 csr->channels[0].chan_idx = i;
716 csr->channels[0].ce_count = 0;
718 pvt->csrow_map[i][j] = *csrow;
728 csr->dtype = DEV_X16;
731 csr->dtype = DEV_UNKNOWN;
734 csr->edac_mode = mode;
740 pci_read_config_dword(pdev, MC_SAG_CH_0, &value[0]);
741 pci_read_config_dword(pdev, MC_SAG_CH_1, &value[1]);
742 pci_read_config_dword(pdev, MC_SAG_CH_2, &value[2]);
743 pci_read_config_dword(pdev, MC_SAG_CH_3, &value[3]);
744 pci_read_config_dword(pdev, MC_SAG_CH_4, &value[4]);
745 pci_read_config_dword(pdev, MC_SAG_CH_5, &value[5]);
746 pci_read_config_dword(pdev, MC_SAG_CH_6, &value[6]);
747 pci_read_config_dword(pdev, MC_SAG_CH_7, &value[7]);
748 debugf1("\t[%i] DIVBY3\tREMOVED\tOFFSET\n", i);
749 for (j = 0; j < 8; j++)
750 debugf1("\t\t%#x\t%#x\t%#x\n",
751 (value[j] >> 27) & 0x1,
752 (value[j] >> 24) & 0x7,
753 (value[j] && ((1 << 24) - 1)));
759 /****************************************************************************
760 Error insertion routines
761 ****************************************************************************/
763 /* The i7core has independent error injection features per channel.
764 However, to have a simpler code, we don't allow enabling error injection
765 on more than one channel.
766 Also, since a change at an inject parameter will be applied only at enable,
767 we're disabling error injection on all write calls to the sysfs nodes that
768 controls the error code injection.
770 static int disable_inject(const struct mem_ctl_info *mci)
772 struct i7core_pvt *pvt = mci->pvt_info;
774 pvt->inject.enable = 0;
776 if (!pvt->pci_ch[pvt->inject.channel][0])
779 pci_write_config_dword(pvt->pci_ch[pvt->inject.channel][0],
780 MC_CHANNEL_ERROR_INJECT, 0);
786 * i7core inject inject.section
788 * accept and store error injection inject.section value
789 * bit 0 - refers to the lower 32-byte half cacheline
790 * bit 1 - refers to the upper 32-byte half cacheline
792 static ssize_t i7core_inject_section_store(struct mem_ctl_info *mci,
793 const char *data, size_t count)
795 struct i7core_pvt *pvt = mci->pvt_info;
799 if (pvt->inject.enable)
802 rc = strict_strtoul(data, 10, &value);
803 if ((rc < 0) || (value > 3))
806 pvt->inject.section = (u32) value;
810 static ssize_t i7core_inject_section_show(struct mem_ctl_info *mci,
813 struct i7core_pvt *pvt = mci->pvt_info;
814 return sprintf(data, "0x%08x\n", pvt->inject.section);
820 * accept and store error injection inject.section value
821 * bit 0 - repeat enable - Enable error repetition
822 * bit 1 - inject ECC error
823 * bit 2 - inject parity error
825 static ssize_t i7core_inject_type_store(struct mem_ctl_info *mci,
826 const char *data, size_t count)
828 struct i7core_pvt *pvt = mci->pvt_info;
832 if (pvt->inject.enable)
835 rc = strict_strtoul(data, 10, &value);
836 if ((rc < 0) || (value > 7))
839 pvt->inject.type = (u32) value;
843 static ssize_t i7core_inject_type_show(struct mem_ctl_info *mci,
846 struct i7core_pvt *pvt = mci->pvt_info;
847 return sprintf(data, "0x%08x\n", pvt->inject.type);
851 * i7core_inject_inject.eccmask_store
853 * The type of error (UE/CE) will depend on the inject.eccmask value:
854 * Any bits set to a 1 will flip the corresponding ECC bit
855 * Correctable errors can be injected by flipping 1 bit or the bits within
856 * a symbol pair (2 consecutive aligned 8-bit pairs - i.e. 7:0 and 15:8 or
857 * 23:16 and 31:24). Flipping bits in two symbol pairs will cause an
858 * uncorrectable error to be injected.
860 static ssize_t i7core_inject_eccmask_store(struct mem_ctl_info *mci,
861 const char *data, size_t count)
863 struct i7core_pvt *pvt = mci->pvt_info;
867 if (pvt->inject.enable)
870 rc = strict_strtoul(data, 10, &value);
874 pvt->inject.eccmask = (u32) value;
878 static ssize_t i7core_inject_eccmask_show(struct mem_ctl_info *mci,
881 struct i7core_pvt *pvt = mci->pvt_info;
882 return sprintf(data, "0x%08x\n", pvt->inject.eccmask);
888 * The type of error (UE/CE) will depend on the inject.eccmask value:
889 * Any bits set to a 1 will flip the corresponding ECC bit
890 * Correctable errors can be injected by flipping 1 bit or the bits within
891 * a symbol pair (2 consecutive aligned 8-bit pairs - i.e. 7:0 and 15:8 or
892 * 23:16 and 31:24). Flipping bits in two symbol pairs will cause an
893 * uncorrectable error to be injected.
896 #define DECLARE_ADDR_MATCH(param, limit) \
897 static ssize_t i7core_inject_store_##param( \
898 struct mem_ctl_info *mci, \
899 const char *data, size_t count) \
901 struct i7core_pvt *pvt; \
905 debugf1("%s()\n", __func__); \
906 pvt = mci->pvt_info; \
908 if (pvt->inject.enable) \
909 disable_inject(mci); \
911 if (!strcasecmp(data, "any") || !strcasecmp(data, "any\n"))\
914 rc = strict_strtoul(data, 10, &value); \
915 if ((rc < 0) || (value >= limit)) \
919 pvt->inject.param = value; \
924 static ssize_t i7core_inject_show_##param( \
925 struct mem_ctl_info *mci, \
928 struct i7core_pvt *pvt; \
930 pvt = mci->pvt_info; \
931 debugf1("%s() pvt=%p\n", __func__, pvt); \
932 if (pvt->inject.param < 0) \
933 return sprintf(data, "any\n"); \
935 return sprintf(data, "%d\n", pvt->inject.param);\
938 #define ATTR_ADDR_MATCH(param) \
942 .mode = (S_IRUGO | S_IWUSR) \
944 .show = i7core_inject_show_##param, \
945 .store = i7core_inject_store_##param, \
948 DECLARE_ADDR_MATCH(channel, 3);
949 DECLARE_ADDR_MATCH(dimm, 3);
950 DECLARE_ADDR_MATCH(rank, 4);
951 DECLARE_ADDR_MATCH(bank, 32);
952 DECLARE_ADDR_MATCH(page, 0x10000);
953 DECLARE_ADDR_MATCH(col, 0x4000);
955 static int write_and_test(struct pci_dev *dev, const int where, const u32 val)
960 debugf0("setting pci %02x:%02x.%x reg=%02x value=%08x\n",
961 dev->bus->number, PCI_SLOT(dev->devfn), PCI_FUNC(dev->devfn),
964 for (count = 0; count < 10; count++) {
967 pci_write_config_dword(dev, where, val);
968 pci_read_config_dword(dev, where, &read);
974 i7core_printk(KERN_ERR, "Error during set pci %02x:%02x.%x reg=%02x "
975 "write=%08x. Read=%08x\n",
976 dev->bus->number, PCI_SLOT(dev->devfn), PCI_FUNC(dev->devfn),
983 * This routine prepares the Memory Controller for error injection.
984 * The error will be injected when some process tries to write to the
985 * memory that matches the given criteria.
986 * The criteria can be set in terms of a mask where dimm, rank, bank, page
987 * and col can be specified.
988 * A -1 value for any of the mask items will make the MCU to ignore
989 * that matching criteria for error injection.
991 * It should be noticed that the error will only happen after a write operation
992 * on a memory that matches the condition. if REPEAT_EN is not enabled at
993 * inject mask, then it will produce just one error. Otherwise, it will repeat
994 * until the injectmask would be cleaned.
996 * FIXME: This routine assumes that MAXNUMDIMMS value of MC_MAX_DOD
997 * is reliable enough to check if the MC is using the
998 * three channels. However, this is not clear at the datasheet.
1000 static ssize_t i7core_inject_enable_store(struct mem_ctl_info *mci,
1001 const char *data, size_t count)
1003 struct i7core_pvt *pvt = mci->pvt_info;
1009 if (!pvt->pci_ch[pvt->inject.channel][0])
1012 rc = strict_strtoul(data, 10, &enable);
1017 pvt->inject.enable = 1;
1019 disable_inject(mci);
1023 /* Sets pvt->inject.dimm mask */
1024 if (pvt->inject.dimm < 0)
1027 if (pvt->channel[pvt->inject.channel].dimms > 2)
1028 mask |= (pvt->inject.dimm & 0x3LL) << 35;
1030 mask |= (pvt->inject.dimm & 0x1LL) << 36;
1033 /* Sets pvt->inject.rank mask */
1034 if (pvt->inject.rank < 0)
1037 if (pvt->channel[pvt->inject.channel].dimms > 2)
1038 mask |= (pvt->inject.rank & 0x1LL) << 34;
1040 mask |= (pvt->inject.rank & 0x3LL) << 34;
1043 /* Sets pvt->inject.bank mask */
1044 if (pvt->inject.bank < 0)
1047 mask |= (pvt->inject.bank & 0x15LL) << 30;
1049 /* Sets pvt->inject.page mask */
1050 if (pvt->inject.page < 0)
1053 mask |= (pvt->inject.page & 0xffff) << 14;
1055 /* Sets pvt->inject.column mask */
1056 if (pvt->inject.col < 0)
1059 mask |= (pvt->inject.col & 0x3fff);
1063 * bits 1-2: MASK_HALF_CACHELINE
1065 * bit 4: INJECT_ADDR_PARITY
1068 injectmask = (pvt->inject.type & 1) |
1069 (pvt->inject.section & 0x3) << 1 |
1070 (pvt->inject.type & 0x6) << (3 - 1);
1072 /* Unlock writes to registers - this register is write only */
1073 pci_write_config_dword(pvt->pci_noncore,
1074 MC_CFG_CONTROL, 0x2);
1076 write_and_test(pvt->pci_ch[pvt->inject.channel][0],
1077 MC_CHANNEL_ADDR_MATCH, mask);
1078 write_and_test(pvt->pci_ch[pvt->inject.channel][0],
1079 MC_CHANNEL_ADDR_MATCH + 4, mask >> 32L);
1081 write_and_test(pvt->pci_ch[pvt->inject.channel][0],
1082 MC_CHANNEL_ERROR_MASK, pvt->inject.eccmask);
1084 write_and_test(pvt->pci_ch[pvt->inject.channel][0],
1085 MC_CHANNEL_ERROR_INJECT, injectmask);
1088 * This is something undocumented, based on my tests
1089 * Without writing 8 to this register, errors aren't injected. Not sure
1092 pci_write_config_dword(pvt->pci_noncore,
1095 debugf0("Error inject addr match 0x%016llx, ecc 0x%08x,"
1097 mask, pvt->inject.eccmask, injectmask);
1103 static ssize_t i7core_inject_enable_show(struct mem_ctl_info *mci,
1106 struct i7core_pvt *pvt = mci->pvt_info;
1109 if (!pvt->pci_ch[pvt->inject.channel][0])
1112 pci_read_config_dword(pvt->pci_ch[pvt->inject.channel][0],
1113 MC_CHANNEL_ERROR_INJECT, &injectmask);
1115 debugf0("Inject error read: 0x%018x\n", injectmask);
1117 if (injectmask & 0x0c)
1118 pvt->inject.enable = 1;
1120 return sprintf(data, "%d\n", pvt->inject.enable);
1123 #define DECLARE_COUNTER(param) \
1124 static ssize_t i7core_show_counter_##param( \
1125 struct mem_ctl_info *mci, \
1128 struct i7core_pvt *pvt = mci->pvt_info; \
1130 debugf1("%s() \n", __func__); \
1131 if (!pvt->ce_count_available || (pvt->is_registered)) \
1132 return sprintf(data, "data unavailable\n"); \
1133 return sprintf(data, "%lu\n", \
1134 pvt->udimm_ce_count[param]); \
1137 #define ATTR_COUNTER(param) \
1140 .name = __stringify(udimm##param), \
1141 .mode = (S_IRUGO | S_IWUSR) \
1143 .show = i7core_show_counter_##param \
1154 static const struct mcidev_sysfs_attribute i7core_addrmatch_attrs[] = {
1155 ATTR_ADDR_MATCH(channel),
1156 ATTR_ADDR_MATCH(dimm),
1157 ATTR_ADDR_MATCH(rank),
1158 ATTR_ADDR_MATCH(bank),
1159 ATTR_ADDR_MATCH(page),
1160 ATTR_ADDR_MATCH(col),
1161 { } /* End of list */
1164 static const struct mcidev_sysfs_group i7core_inject_addrmatch = {
1165 .name = "inject_addrmatch",
1166 .mcidev_attr = i7core_addrmatch_attrs,
1169 static const struct mcidev_sysfs_attribute i7core_udimm_counters_attrs[] = {
1173 { .attr = { .name = NULL } }
1176 static const struct mcidev_sysfs_group i7core_udimm_counters = {
1177 .name = "all_channel_counts",
1178 .mcidev_attr = i7core_udimm_counters_attrs,
1181 static const struct mcidev_sysfs_attribute i7core_sysfs_rdimm_attrs[] = {
1184 .name = "inject_section",
1185 .mode = (S_IRUGO | S_IWUSR)
1187 .show = i7core_inject_section_show,
1188 .store = i7core_inject_section_store,
1191 .name = "inject_type",
1192 .mode = (S_IRUGO | S_IWUSR)
1194 .show = i7core_inject_type_show,
1195 .store = i7core_inject_type_store,
1198 .name = "inject_eccmask",
1199 .mode = (S_IRUGO | S_IWUSR)
1201 .show = i7core_inject_eccmask_show,
1202 .store = i7core_inject_eccmask_store,
1204 .grp = &i7core_inject_addrmatch,
1207 .name = "inject_enable",
1208 .mode = (S_IRUGO | S_IWUSR)
1210 .show = i7core_inject_enable_show,
1211 .store = i7core_inject_enable_store,
1213 { } /* End of list */
1216 static const struct mcidev_sysfs_attribute i7core_sysfs_udimm_attrs[] = {
1219 .name = "inject_section",
1220 .mode = (S_IRUGO | S_IWUSR)
1222 .show = i7core_inject_section_show,
1223 .store = i7core_inject_section_store,
1226 .name = "inject_type",
1227 .mode = (S_IRUGO | S_IWUSR)
1229 .show = i7core_inject_type_show,
1230 .store = i7core_inject_type_store,
1233 .name = "inject_eccmask",
1234 .mode = (S_IRUGO | S_IWUSR)
1236 .show = i7core_inject_eccmask_show,
1237 .store = i7core_inject_eccmask_store,
1239 .grp = &i7core_inject_addrmatch,
1242 .name = "inject_enable",
1243 .mode = (S_IRUGO | S_IWUSR)
1245 .show = i7core_inject_enable_show,
1246 .store = i7core_inject_enable_store,
1248 .grp = &i7core_udimm_counters,
1250 { } /* End of list */
1253 /****************************************************************************
1254 Device initialization routines: put/get, init/exit
1255 ****************************************************************************/
1258 * i7core_put_all_devices 'put' all the devices that we have
1259 * reserved via 'get'
1261 static void i7core_put_devices(struct i7core_dev *i7core_dev)
1265 debugf0(__FILE__ ": %s()\n", __func__);
1266 for (i = 0; i < i7core_dev->n_devs; i++) {
1267 struct pci_dev *pdev = i7core_dev->pdev[i];
1270 debugf0("Removing dev %02x:%02x.%d\n",
1272 PCI_SLOT(pdev->devfn), PCI_FUNC(pdev->devfn));
1277 static void i7core_put_all_devices(void)
1279 struct i7core_dev *i7core_dev, *tmp;
1281 list_for_each_entry_safe(i7core_dev, tmp, &i7core_edac_list, list) {
1282 i7core_put_devices(i7core_dev);
1283 free_i7core_dev(i7core_dev);
1287 static void __init i7core_xeon_pci_fixup(const struct pci_id_table *table)
1289 struct pci_dev *pdev = NULL;
1293 * On Xeon 55xx, the Intel Quckpath Arch Generic Non-core pci buses
1294 * aren't announced by acpi. So, we need to use a legacy scan probing
1297 while (table && table->descr) {
1298 pdev = pci_get_device(PCI_VENDOR_ID_INTEL, table->descr[0].dev_id, NULL);
1299 if (unlikely(!pdev)) {
1300 for (i = 0; i < MAX_SOCKET_BUSES; i++)
1301 pcibios_scan_specific_bus(255-i);
1308 static unsigned i7core_pci_lastbus(void)
1310 int last_bus = 0, bus;
1311 struct pci_bus *b = NULL;
1313 while ((b = pci_find_next_bus(b)) != NULL) {
1315 debugf0("Found bus %d\n", bus);
1320 debugf0("Last bus %d\n", last_bus);
1326 * i7core_get_all_devices Find and perform 'get' operation on the MCH's
1327 * device/functions we want to reference for this driver
1329 * Need to 'get' device 16 func 1 and func 2
1331 static int i7core_get_onedevice(struct pci_dev **prev,
1332 const struct pci_id_table *table,
1333 const unsigned devno,
1334 const unsigned last_bus)
1336 struct i7core_dev *i7core_dev;
1337 const struct pci_id_descr *dev_descr = &table->descr[devno];
1339 struct pci_dev *pdev = NULL;
1343 pdev = pci_get_device(PCI_VENDOR_ID_INTEL,
1344 dev_descr->dev_id, *prev);
1347 * On Xeon 55xx, the Intel Quckpath Arch Generic Non-core regs
1348 * is at addr 8086:2c40, instead of 8086:2c41. So, we need
1349 * to probe for the alternate address in case of failure
1351 if (dev_descr->dev_id == PCI_DEVICE_ID_INTEL_I7_NONCORE && !pdev)
1352 pdev = pci_get_device(PCI_VENDOR_ID_INTEL,
1353 PCI_DEVICE_ID_INTEL_I7_NONCORE_ALT, *prev);
1355 if (dev_descr->dev_id == PCI_DEVICE_ID_INTEL_LYNNFIELD_NONCORE && !pdev)
1356 pdev = pci_get_device(PCI_VENDOR_ID_INTEL,
1357 PCI_DEVICE_ID_INTEL_LYNNFIELD_NONCORE_ALT,
1366 if (dev_descr->optional)
1372 i7core_printk(KERN_INFO,
1373 "Device not found: dev %02x.%d PCI ID %04x:%04x\n",
1374 dev_descr->dev, dev_descr->func,
1375 PCI_VENDOR_ID_INTEL, dev_descr->dev_id);
1377 /* End of list, leave */
1380 bus = pdev->bus->number;
1382 socket = last_bus - bus;
1384 i7core_dev = get_i7core_dev(socket);
1386 i7core_dev = alloc_i7core_dev(socket, table);
1393 if (i7core_dev->pdev[devno]) {
1394 i7core_printk(KERN_ERR,
1395 "Duplicated device for "
1396 "dev %02x:%02x.%d PCI ID %04x:%04x\n",
1397 bus, dev_descr->dev, dev_descr->func,
1398 PCI_VENDOR_ID_INTEL, dev_descr->dev_id);
1403 i7core_dev->pdev[devno] = pdev;
1406 if (unlikely(PCI_SLOT(pdev->devfn) != dev_descr->dev ||
1407 PCI_FUNC(pdev->devfn) != dev_descr->func)) {
1408 i7core_printk(KERN_ERR,
1409 "Device PCI ID %04x:%04x "
1410 "has dev %02x:%02x.%d instead of dev %02x:%02x.%d\n",
1411 PCI_VENDOR_ID_INTEL, dev_descr->dev_id,
1412 bus, PCI_SLOT(pdev->devfn), PCI_FUNC(pdev->devfn),
1413 bus, dev_descr->dev, dev_descr->func);
1417 /* Be sure that the device is enabled */
1418 if (unlikely(pci_enable_device(pdev) < 0)) {
1419 i7core_printk(KERN_ERR,
1421 "dev %02x:%02x.%d PCI ID %04x:%04x\n",
1422 bus, dev_descr->dev, dev_descr->func,
1423 PCI_VENDOR_ID_INTEL, dev_descr->dev_id);
1427 debugf0("Detected socket %d dev %02x:%02x.%d PCI ID %04x:%04x\n",
1428 socket, bus, dev_descr->dev,
1430 PCI_VENDOR_ID_INTEL, dev_descr->dev_id);
1437 static int i7core_get_all_devices(void)
1439 int i, j, rc, last_bus;
1440 struct pci_dev *pdev = NULL;
1441 const struct pci_id_table *table;
1443 last_bus = i7core_pci_lastbus();
1445 for (j = 0; j < ARRAY_SIZE(pci_dev_table); j++) {
1446 table = &pci_dev_table[j];
1447 for (i = 0; i < table->n_devs; i++) {
1450 rc = i7core_get_onedevice(&pdev, table, i,
1457 i7core_put_all_devices();
1467 static int mci_bind_devs(struct mem_ctl_info *mci,
1468 struct i7core_dev *i7core_dev)
1470 struct i7core_pvt *pvt = mci->pvt_info;
1471 struct pci_dev *pdev;
1474 /* Associates i7core_dev and mci for future usage */
1475 pvt->i7core_dev = i7core_dev;
1476 i7core_dev->mci = mci;
1478 pvt->is_registered = 0;
1479 for (i = 0; i < i7core_dev->n_devs; i++) {
1480 pdev = i7core_dev->pdev[i];
1484 func = PCI_FUNC(pdev->devfn);
1485 slot = PCI_SLOT(pdev->devfn);
1487 if (unlikely(func > MAX_MCR_FUNC))
1489 pvt->pci_mcr[func] = pdev;
1490 } else if (likely(slot >= 4 && slot < 4 + NUM_CHANS)) {
1491 if (unlikely(func > MAX_CHAN_FUNC))
1493 pvt->pci_ch[slot - 4][func] = pdev;
1494 } else if (!slot && !func)
1495 pvt->pci_noncore = pdev;
1499 debugf0("Associated fn %d.%d, dev = %p, socket %d\n",
1500 PCI_SLOT(pdev->devfn), PCI_FUNC(pdev->devfn),
1501 pdev, i7core_dev->socket);
1503 if (PCI_SLOT(pdev->devfn) == 3 &&
1504 PCI_FUNC(pdev->devfn) == 2)
1505 pvt->is_registered = 1;
1511 i7core_printk(KERN_ERR, "Device %d, function %d "
1512 "is out of the expected range\n",
1517 /****************************************************************************
1518 Error check routines
1519 ****************************************************************************/
1520 static void i7core_rdimm_update_csrow(struct mem_ctl_info *mci,
1526 struct i7core_pvt *pvt = mci->pvt_info;
1527 int row = pvt->csrow_map[chan][dimm], i;
1529 for (i = 0; i < add; i++) {
1530 msg = kasprintf(GFP_KERNEL, "Corrected error "
1531 "(Socket=%d channel=%d dimm=%d)",
1532 pvt->i7core_dev->socket, chan, dimm);
1534 edac_mc_handle_fbd_ce(mci, row, 0, msg);
1539 static void i7core_rdimm_update_ce_count(struct mem_ctl_info *mci,
1545 struct i7core_pvt *pvt = mci->pvt_info;
1546 int add0 = 0, add1 = 0, add2 = 0;
1547 /* Updates CE counters if it is not the first time here */
1548 if (pvt->ce_count_available) {
1549 /* Updates CE counters */
1551 add2 = new2 - pvt->rdimm_last_ce_count[chan][2];
1552 add1 = new1 - pvt->rdimm_last_ce_count[chan][1];
1553 add0 = new0 - pvt->rdimm_last_ce_count[chan][0];
1557 pvt->rdimm_ce_count[chan][2] += add2;
1561 pvt->rdimm_ce_count[chan][1] += add1;
1565 pvt->rdimm_ce_count[chan][0] += add0;
1567 pvt->ce_count_available = 1;
1569 /* Store the new values */
1570 pvt->rdimm_last_ce_count[chan][2] = new2;
1571 pvt->rdimm_last_ce_count[chan][1] = new1;
1572 pvt->rdimm_last_ce_count[chan][0] = new0;
1574 /*updated the edac core */
1576 i7core_rdimm_update_csrow(mci, chan, 0, add0);
1578 i7core_rdimm_update_csrow(mci, chan, 1, add1);
1580 i7core_rdimm_update_csrow(mci, chan, 2, add2);
1584 static void i7core_rdimm_check_mc_ecc_err(struct mem_ctl_info *mci)
1586 struct i7core_pvt *pvt = mci->pvt_info;
1588 int i, new0, new1, new2;
1590 /*Read DEV 3: FUN 2: MC_COR_ECC_CNT regs directly*/
1591 pci_read_config_dword(pvt->pci_mcr[2], MC_COR_ECC_CNT_0,
1593 pci_read_config_dword(pvt->pci_mcr[2], MC_COR_ECC_CNT_1,
1595 pci_read_config_dword(pvt->pci_mcr[2], MC_COR_ECC_CNT_2,
1597 pci_read_config_dword(pvt->pci_mcr[2], MC_COR_ECC_CNT_3,
1599 pci_read_config_dword(pvt->pci_mcr[2], MC_COR_ECC_CNT_4,
1601 pci_read_config_dword(pvt->pci_mcr[2], MC_COR_ECC_CNT_5,
1603 for (i = 0 ; i < 3; i++) {
1604 debugf3("MC_COR_ECC_CNT%d = 0x%x; MC_COR_ECC_CNT%d = 0x%x\n",
1605 (i * 2), rcv[i][0], (i * 2) + 1, rcv[i][1]);
1606 /*if the channel has 3 dimms*/
1607 if (pvt->channel[i].dimms > 2) {
1608 new0 = DIMM_BOT_COR_ERR(rcv[i][0]);
1609 new1 = DIMM_TOP_COR_ERR(rcv[i][0]);
1610 new2 = DIMM_BOT_COR_ERR(rcv[i][1]);
1612 new0 = DIMM_TOP_COR_ERR(rcv[i][0]) +
1613 DIMM_BOT_COR_ERR(rcv[i][0]);
1614 new1 = DIMM_TOP_COR_ERR(rcv[i][1]) +
1615 DIMM_BOT_COR_ERR(rcv[i][1]);
1619 i7core_rdimm_update_ce_count(mci, i, new0, new1, new2);
1623 /* This function is based on the device 3 function 4 registers as described on:
1624 * Intel Xeon Processor 5500 Series Datasheet Volume 2
1625 * http://www.intel.com/Assets/PDF/datasheet/321322.pdf
1626 * also available at:
1627 * http://www.arrownac.com/manufacturers/intel/s/nehalem/5500-datasheet-v2.pdf
1629 static void i7core_udimm_check_mc_ecc_err(struct mem_ctl_info *mci)
1631 struct i7core_pvt *pvt = mci->pvt_info;
1633 int new0, new1, new2;
1635 if (!pvt->pci_mcr[4]) {
1636 debugf0("%s MCR registers not found\n", __func__);
1640 /* Corrected test errors */
1641 pci_read_config_dword(pvt->pci_mcr[4], MC_TEST_ERR_RCV1, &rcv1);
1642 pci_read_config_dword(pvt->pci_mcr[4], MC_TEST_ERR_RCV0, &rcv0);
1644 /* Store the new values */
1645 new2 = DIMM2_COR_ERR(rcv1);
1646 new1 = DIMM1_COR_ERR(rcv0);
1647 new0 = DIMM0_COR_ERR(rcv0);
1649 /* Updates CE counters if it is not the first time here */
1650 if (pvt->ce_count_available) {
1651 /* Updates CE counters */
1652 int add0, add1, add2;
1654 add2 = new2 - pvt->udimm_last_ce_count[2];
1655 add1 = new1 - pvt->udimm_last_ce_count[1];
1656 add0 = new0 - pvt->udimm_last_ce_count[0];
1660 pvt->udimm_ce_count[2] += add2;
1664 pvt->udimm_ce_count[1] += add1;
1668 pvt->udimm_ce_count[0] += add0;
1670 if (add0 | add1 | add2)
1671 i7core_printk(KERN_ERR, "New Corrected error(s): "
1672 "dimm0: +%d, dimm1: +%d, dimm2 +%d\n",
1675 pvt->ce_count_available = 1;
1677 /* Store the new values */
1678 pvt->udimm_last_ce_count[2] = new2;
1679 pvt->udimm_last_ce_count[1] = new1;
1680 pvt->udimm_last_ce_count[0] = new0;
1684 * According with tables E-11 and E-12 of chapter E.3.3 of Intel 64 and IA-32
1685 * Architectures Software Developer’s Manual Volume 3B.
1686 * Nehalem are defined as family 0x06, model 0x1a
1688 * The MCA registers used here are the following ones:
1689 * struct mce field MCA Register
1690 * m->status MSR_IA32_MC8_STATUS
1691 * m->addr MSR_IA32_MC8_ADDR
1692 * m->misc MSR_IA32_MC8_MISC
1693 * In the case of Nehalem, the error information is masked at .status and .misc
1696 static void i7core_mce_output_error(struct mem_ctl_info *mci,
1697 const struct mce *m)
1699 struct i7core_pvt *pvt = mci->pvt_info;
1700 char *type, *optype, *err, *msg;
1701 unsigned long error = m->status & 0x1ff0000l;
1702 u32 optypenum = (m->status >> 4) & 0x07;
1703 u32 core_err_cnt = (m->status >> 38) && 0x7fff;
1704 u32 dimm = (m->misc >> 16) & 0x3;
1705 u32 channel = (m->misc >> 18) & 0x3;
1706 u32 syndrome = m->misc >> 32;
1707 u32 errnum = find_first_bit(&error, 32);
1710 if (m->mcgstatus & 1)
1715 switch (optypenum) {
1717 optype = "generic undef request";
1720 optype = "read error";
1723 optype = "write error";
1726 optype = "addr/cmd error";
1729 optype = "scrubbing error";
1732 optype = "reserved";
1738 err = "read ECC error";
1741 err = "RAS ECC error";
1744 err = "write parity error";
1747 err = "redundacy loss";
1753 err = "memory range error";
1756 err = "RTID out of range";
1759 err = "address parity error";
1762 err = "byte enable parity error";
1768 /* FIXME: should convert addr into bank and rank information */
1769 msg = kasprintf(GFP_ATOMIC,
1770 "%s (addr = 0x%08llx, cpu=%d, Dimm=%d, Channel=%d, "
1771 "syndrome=0x%08x, count=%d, Err=%08llx:%08llx (%s: %s))\n",
1772 type, (long long) m->addr, m->cpu, dimm, channel,
1773 syndrome, core_err_cnt, (long long)m->status,
1774 (long long)m->misc, optype, err);
1778 csrow = pvt->csrow_map[channel][dimm];
1780 /* Call the helper to output message */
1781 if (m->mcgstatus & 1)
1782 edac_mc_handle_fbd_ue(mci, csrow, 0,
1783 0 /* FIXME: should be channel here */, msg);
1784 else if (!pvt->is_registered)
1785 edac_mc_handle_fbd_ce(mci, csrow,
1786 0 /* FIXME: should be channel here */, msg);
1792 * i7core_check_error Retrieve and process errors reported by the
1793 * hardware. Called by the Core module.
1795 static void i7core_check_error(struct mem_ctl_info *mci)
1797 struct i7core_pvt *pvt = mci->pvt_info;
1803 * MCE first step: Copy all mce errors into a temporary buffer
1804 * We use a double buffering here, to reduce the risk of
1808 count = (pvt->mce_out + MCE_LOG_LEN - pvt->mce_in)
1811 goto check_ce_error;
1813 m = pvt->mce_outentry;
1814 if (pvt->mce_in + count > MCE_LOG_LEN) {
1815 unsigned l = MCE_LOG_LEN - pvt->mce_in;
1817 memcpy(m, &pvt->mce_entry[pvt->mce_in], sizeof(*m) * l);
1823 memcpy(m, &pvt->mce_entry[pvt->mce_in], sizeof(*m) * count);
1825 pvt->mce_in += count;
1828 if (pvt->mce_overrun) {
1829 i7core_printk(KERN_ERR, "Lost %d memory errors\n",
1832 pvt->mce_overrun = 0;
1836 * MCE second step: parse errors and display
1838 for (i = 0; i < count; i++)
1839 i7core_mce_output_error(mci, &pvt->mce_outentry[i]);
1842 * Now, let's increment CE error counts
1845 if (!pvt->is_registered)
1846 i7core_udimm_check_mc_ecc_err(mci);
1848 i7core_rdimm_check_mc_ecc_err(mci);
1852 * i7core_mce_check_error Replicates mcelog routine to get errors
1853 * This routine simply queues mcelog errors, and
1854 * return. The error itself should be handled later
1855 * by i7core_check_error.
1856 * WARNING: As this routine should be called at NMI time, extra care should
1857 * be taken to avoid deadlocks, and to be as fast as possible.
1859 static int i7core_mce_check_error(void *priv, struct mce *mce)
1861 struct mem_ctl_info *mci = priv;
1862 struct i7core_pvt *pvt = mci->pvt_info;
1865 * Just let mcelog handle it if the error is
1866 * outside the memory controller
1868 if (((mce->status & 0xffff) >> 7) != 1)
1871 /* Bank 8 registers are the only ones that we know how to handle */
1876 /* Only handle if it is the right mc controller */
1877 if (cpu_data(mce->cpu).phys_proc_id != pvt->i7core_dev->socket)
1882 if ((pvt->mce_out + 1) % MCE_LOG_LEN == pvt->mce_in) {
1888 /* Copy memory error at the ringbuffer */
1889 memcpy(&pvt->mce_entry[pvt->mce_out], mce, sizeof(*mce));
1891 pvt->mce_out = (pvt->mce_out + 1) % MCE_LOG_LEN;
1893 /* Handle fatal errors immediately */
1894 if (mce->mcgstatus & 1)
1895 i7core_check_error(mci);
1897 /* Advice mcelog that the error were handled */
1901 static void i7core_pci_ctl_create(struct i7core_pvt *pvt)
1903 pvt->i7core_pci = edac_pci_create_generic_ctl(
1904 &pvt->i7core_dev->pdev[0]->dev,
1906 if (unlikely(!pvt->i7core_pci))
1907 pr_warn("Unable to setup PCI error report via EDAC\n");
1910 static void i7core_pci_ctl_release(struct i7core_pvt *pvt)
1912 if (likely(pvt->i7core_pci))
1913 edac_pci_release_generic_ctl(pvt->i7core_pci);
1915 i7core_printk(KERN_ERR,
1916 "Couldn't find mem_ctl_info for socket %d\n",
1917 pvt->i7core_dev->socket);
1918 pvt->i7core_pci = NULL;
1921 static int i7core_register_mci(struct i7core_dev *i7core_dev,
1922 const int num_channels, const int num_csrows)
1924 struct mem_ctl_info *mci;
1925 struct i7core_pvt *pvt;
1929 /* allocate a new MC control structure */
1930 mci = edac_mc_alloc(sizeof(*pvt), num_csrows, num_channels,
1931 i7core_dev->socket);
1935 debugf0("MC: " __FILE__ ": %s(): mci = %p, dev = %p\n",
1936 __func__, mci, &i7core_dev->pdev[0]->dev);
1938 pvt = mci->pvt_info;
1939 memset(pvt, 0, sizeof(*pvt));
1942 * FIXME: how to handle RDDR3 at MCI level? It is possible to have
1943 * Mixed RDDR3/UDDR3 with Nehalem, provided that they are on different
1946 mci->mtype_cap = MEM_FLAG_DDR3;
1947 mci->edac_ctl_cap = EDAC_FLAG_NONE;
1948 mci->edac_cap = EDAC_FLAG_NONE;
1949 mci->mod_name = "i7core_edac.c";
1950 mci->mod_ver = I7CORE_REVISION;
1951 mci->ctl_name = kasprintf(GFP_KERNEL, "i7 core #%d",
1952 i7core_dev->socket);
1953 mci->dev_name = pci_name(i7core_dev->pdev[0]);
1954 mci->ctl_page_to_phys = NULL;
1956 /* Store pci devices at mci for faster access */
1957 rc = mci_bind_devs(mci, i7core_dev);
1958 if (unlikely(rc < 0))
1961 if (pvt->is_registered)
1962 mci->mc_driver_sysfs_attributes = i7core_sysfs_rdimm_attrs;
1964 mci->mc_driver_sysfs_attributes = i7core_sysfs_udimm_attrs;
1966 /* Get dimm basic config */
1967 get_dimm_config(mci, &csrow);
1968 /* record ptr to the generic device */
1969 mci->dev = &i7core_dev->pdev[0]->dev;
1970 /* Set the function pointer to an actual operation function */
1971 mci->edac_check = i7core_check_error;
1973 /* add this new MC control structure to EDAC's list of MCs */
1974 if (unlikely(edac_mc_add_mc(mci))) {
1975 debugf0("MC: " __FILE__
1976 ": %s(): failed edac_mc_add_mc()\n", __func__);
1977 /* FIXME: perhaps some code should go here that disables error
1978 * reporting if we just enabled it
1985 /* Default error mask is any memory */
1986 pvt->inject.channel = 0;
1987 pvt->inject.dimm = -1;
1988 pvt->inject.rank = -1;
1989 pvt->inject.bank = -1;
1990 pvt->inject.page = -1;
1991 pvt->inject.col = -1;
1993 /* allocating generic PCI control info */
1994 i7core_pci_ctl_create(pvt);
1996 /* Registers on edac_mce in order to receive memory errors */
1997 pvt->edac_mce.priv = mci;
1998 pvt->edac_mce.check_error = i7core_mce_check_error;
1999 rc = edac_mce_register(&pvt->edac_mce);
2000 if (unlikely(rc < 0)) {
2001 debugf0("MC: " __FILE__
2002 ": %s(): failed edac_mce_register()\n", __func__);
2009 i7core_pci_ctl_release(pvt);
2010 edac_mc_del_mc(mci->dev);
2012 kfree(mci->ctl_name);
2018 * i7core_probe Probe for ONE instance of device to see if it is
2021 * 0 for FOUND a device
2022 * < 0 for error code
2025 static int __devinit i7core_probe(struct pci_dev *pdev,
2026 const struct pci_device_id *id)
2029 struct i7core_dev *i7core_dev;
2031 /* get the pci devices we want to reserve for our use */
2032 mutex_lock(&i7core_edac_lock);
2035 * All memory controllers are allocated at the first pass.
2037 if (unlikely(probed >= 1)) {
2038 mutex_unlock(&i7core_edac_lock);
2043 rc = i7core_get_all_devices();
2044 if (unlikely(rc < 0))
2047 list_for_each_entry(i7core_dev, &i7core_edac_list, list) {
2051 /* Check the number of active and not disabled channels */
2052 rc = i7core_get_active_channels(i7core_dev->socket,
2053 &channels, &csrows);
2054 if (unlikely(rc < 0))
2057 rc = i7core_register_mci(i7core_dev, channels, csrows);
2058 if (unlikely(rc < 0))
2062 i7core_printk(KERN_INFO, "Driver loaded.\n");
2064 mutex_unlock(&i7core_edac_lock);
2068 i7core_put_all_devices();
2070 mutex_unlock(&i7core_edac_lock);
2075 * i7core_remove destructor for one instance of device
2078 static void __devexit i7core_remove(struct pci_dev *pdev)
2080 struct mem_ctl_info *mci;
2081 struct i7core_dev *i7core_dev;
2082 struct i7core_pvt *pvt;
2084 debugf0(__FILE__ ": %s()\n", __func__);
2087 * we have a trouble here: pdev value for removal will be wrong, since
2088 * it will point to the X58 register used to detect that the machine
2089 * is a Nehalem or upper design. However, due to the way several PCI
2090 * devices are grouped together to provide MC functionality, we need
2091 * to use a different method for releasing the devices
2094 mutex_lock(&i7core_edac_lock);
2096 if (unlikely(!probed)) {
2097 mutex_unlock(&i7core_edac_lock);
2101 list_for_each_entry(i7core_dev, &i7core_edac_list, list) {
2102 mci = i7core_dev->mci;
2103 if (unlikely(!mci || !mci->pvt_info)) {
2104 debugf0("MC: " __FILE__ ": %s(): dev = %p\n",
2105 __func__, &i7core_dev->pdev[0]->dev);
2107 i7core_printk(KERN_ERR,
2108 "Couldn't find mci hanler\n");
2110 pvt = mci->pvt_info;
2112 debugf0("MC: " __FILE__ ": %s(): mci = %p, dev = %p\n",
2113 __func__, mci, &i7core_dev->pdev[0]->dev);
2115 /* Disable MCE NMI handler */
2116 edac_mce_unregister(&pvt->edac_mce);
2118 /* Disable EDAC polling */
2119 i7core_pci_ctl_release(pvt);
2121 /* Remove MC sysfs nodes */
2122 edac_mc_del_mc(mci->dev);
2124 debugf1("%s: free mci struct\n", mci->ctl_name);
2125 kfree(mci->ctl_name);
2130 /* Release PCI resources */
2131 i7core_put_all_devices();
2135 mutex_unlock(&i7core_edac_lock);
2138 MODULE_DEVICE_TABLE(pci, i7core_pci_tbl);
2141 * i7core_driver pci_driver structure for this module
2144 static struct pci_driver i7core_driver = {
2145 .name = "i7core_edac",
2146 .probe = i7core_probe,
2147 .remove = __devexit_p(i7core_remove),
2148 .id_table = i7core_pci_tbl,
2152 * i7core_init Module entry function
2153 * Try to initialize this module for its devices
2155 static int __init i7core_init(void)
2159 debugf2("MC: " __FILE__ ": %s()\n", __func__);
2161 /* Ensure that the OPSTATE is set correctly for POLL or NMI */
2165 i7core_xeon_pci_fixup(pci_dev_table);
2167 pci_rc = pci_register_driver(&i7core_driver);
2172 i7core_printk(KERN_ERR, "Failed to register device with error %d.\n",
2179 * i7core_exit() Module exit function
2180 * Unregister the driver
2182 static void __exit i7core_exit(void)
2184 debugf2("MC: " __FILE__ ": %s()\n", __func__);
2185 pci_unregister_driver(&i7core_driver);
2188 module_init(i7core_init);
2189 module_exit(i7core_exit);
2191 MODULE_LICENSE("GPL");
2192 MODULE_AUTHOR("Mauro Carvalho Chehab <mchehab@redhat.com>");
2193 MODULE_AUTHOR("Red Hat Inc. (http://www.redhat.com)");
2194 MODULE_DESCRIPTION("MC Driver for Intel i7 Core memory controllers - "
2197 module_param(edac_op_state, int, 0444);
2198 MODULE_PARM_DESC(edac_op_state, "EDAC Error Reporting state: 0=Poll,1=NMI");