1 /* Intel i7 core/Nehalem Memory Controller kernel module
3 * This driver supports the memory controllers found on the Intel
4 * processor families i7core, i7core 7xx/8xx, i5core, Xeon 35xx,
5 * Xeon 55xx and Xeon 56xx also known as Nehalem, Nehalem-EP, Lynnfield
8 * This file may be distributed under the terms of the
9 * GNU General Public License version 2 only.
11 * Copyright (c) 2009-2010 by:
12 * Mauro Carvalho Chehab <mchehab@redhat.com>
14 * Red Hat Inc. http://www.redhat.com
16 * Forked and adapted from the i5400_edac driver
18 * Based on the following public Intel datasheets:
19 * Intel Core i7 Processor Extreme Edition and Intel Core i7 Processor
20 * Datasheet, Volume 2:
21 * http://download.intel.com/design/processor/datashts/320835.pdf
22 * Intel Xeon Processor 5500 Series Datasheet Volume 2
23 * http://www.intel.com/Assets/PDF/datasheet/321322.pdf
25 * http://www.arrownac.com/manufacturers/intel/s/nehalem/5500-datasheet-v2.pdf
28 #include <linux/module.h>
29 #include <linux/init.h>
30 #include <linux/pci.h>
31 #include <linux/pci_ids.h>
32 #include <linux/slab.h>
33 #include <linux/delay.h>
34 #include <linux/edac.h>
35 #include <linux/mmzone.h>
36 #include <linux/edac_mce.h>
37 #include <linux/smp.h>
38 #include <asm/processor.h>
40 #include "edac_core.h"
43 static LIST_HEAD(i7core_edac_list);
44 static DEFINE_MUTEX(i7core_edac_lock);
47 static int use_pci_fixup;
48 module_param(use_pci_fixup, int, 0444);
49 MODULE_PARM_DESC(use_pci_fixup, "Enable PCI fixup to seek for hidden devices");
51 * This is used for Nehalem-EP and Nehalem-EX devices, where the non-core
52 * registers start at bus 255, and are not reported by BIOS.
53 * We currently find devices with only 2 sockets. In order to support more QPI
54 * Quick Path Interconnect, just increment this number.
56 #define MAX_SOCKET_BUSES 2
60 * Alter this version for the module when modifications are made
62 #define I7CORE_REVISION " Ver: 1.0.0"
63 #define EDAC_MOD_STR "i7core_edac"
68 #define i7core_printk(level, fmt, arg...) \
69 edac_printk(level, "i7core", fmt, ##arg)
71 #define i7core_mc_printk(mci, level, fmt, arg...) \
72 edac_mc_chipset_printk(mci, level, "i7core", fmt, ##arg)
75 * i7core Memory Controller Registers
78 /* OFFSETS for Device 0 Function 0 */
80 #define MC_CFG_CONTROL 0x90
81 #define MC_CFG_UNLOCK 0x02
82 #define MC_CFG_LOCK 0x00
84 /* OFFSETS for Device 3 Function 0 */
86 #define MC_CONTROL 0x48
87 #define MC_STATUS 0x4c
88 #define MC_MAX_DOD 0x64
91 * OFFSETS for Device 3 Function 4, as inicated on Xeon 5500 datasheet:
92 * http://www.arrownac.com/manufacturers/intel/s/nehalem/5500-datasheet-v2.pdf
95 #define MC_TEST_ERR_RCV1 0x60
96 #define DIMM2_COR_ERR(r) ((r) & 0x7fff)
98 #define MC_TEST_ERR_RCV0 0x64
99 #define DIMM1_COR_ERR(r) (((r) >> 16) & 0x7fff)
100 #define DIMM0_COR_ERR(r) ((r) & 0x7fff)
102 /* OFFSETS for Device 3 Function 2, as inicated on Xeon 5500 datasheet */
103 #define MC_SSRCONTROL 0x48
104 #define SSR_MODE_DISABLE 0x00
105 #define SSR_MODE_ENABLE 0x01
106 #define SSR_MODE_MASK 0x03
108 #define MC_SCRUB_CONTROL 0x4c
109 #define STARTSCRUB (1 << 24)
111 #define MC_COR_ECC_CNT_0 0x80
112 #define MC_COR_ECC_CNT_1 0x84
113 #define MC_COR_ECC_CNT_2 0x88
114 #define MC_COR_ECC_CNT_3 0x8c
115 #define MC_COR_ECC_CNT_4 0x90
116 #define MC_COR_ECC_CNT_5 0x94
118 #define DIMM_TOP_COR_ERR(r) (((r) >> 16) & 0x7fff)
119 #define DIMM_BOT_COR_ERR(r) ((r) & 0x7fff)
122 /* OFFSETS for Devices 4,5 and 6 Function 0 */
124 #define MC_CHANNEL_DIMM_INIT_PARAMS 0x58
125 #define THREE_DIMMS_PRESENT (1 << 24)
126 #define SINGLE_QUAD_RANK_PRESENT (1 << 23)
127 #define QUAD_RANK_PRESENT (1 << 22)
128 #define REGISTERED_DIMM (1 << 15)
130 #define MC_CHANNEL_MAPPER 0x60
131 #define RDLCH(r, ch) ((((r) >> (3 + (ch * 6))) & 0x07) - 1)
132 #define WRLCH(r, ch) ((((r) >> (ch * 6)) & 0x07) - 1)
134 #define MC_CHANNEL_RANK_PRESENT 0x7c
135 #define RANK_PRESENT_MASK 0xffff
137 #define MC_CHANNEL_ADDR_MATCH 0xf0
138 #define MC_CHANNEL_ERROR_MASK 0xf8
139 #define MC_CHANNEL_ERROR_INJECT 0xfc
140 #define INJECT_ADDR_PARITY 0x10
141 #define INJECT_ECC 0x08
142 #define MASK_CACHELINE 0x06
143 #define MASK_FULL_CACHELINE 0x06
144 #define MASK_MSB32_CACHELINE 0x04
145 #define MASK_LSB32_CACHELINE 0x02
146 #define NO_MASK_CACHELINE 0x00
147 #define REPEAT_EN 0x01
149 /* OFFSETS for Devices 4,5 and 6 Function 1 */
151 #define MC_DOD_CH_DIMM0 0x48
152 #define MC_DOD_CH_DIMM1 0x4c
153 #define MC_DOD_CH_DIMM2 0x50
154 #define RANKOFFSET_MASK ((1 << 12) | (1 << 11) | (1 << 10))
155 #define RANKOFFSET(x) ((x & RANKOFFSET_MASK) >> 10)
156 #define DIMM_PRESENT_MASK (1 << 9)
157 #define DIMM_PRESENT(x) (((x) & DIMM_PRESENT_MASK) >> 9)
158 #define MC_DOD_NUMBANK_MASK ((1 << 8) | (1 << 7))
159 #define MC_DOD_NUMBANK(x) (((x) & MC_DOD_NUMBANK_MASK) >> 7)
160 #define MC_DOD_NUMRANK_MASK ((1 << 6) | (1 << 5))
161 #define MC_DOD_NUMRANK(x) (((x) & MC_DOD_NUMRANK_MASK) >> 5)
162 #define MC_DOD_NUMROW_MASK ((1 << 4) | (1 << 3) | (1 << 2))
163 #define MC_DOD_NUMROW(x) (((x) & MC_DOD_NUMROW_MASK) >> 2)
164 #define MC_DOD_NUMCOL_MASK 3
165 #define MC_DOD_NUMCOL(x) ((x) & MC_DOD_NUMCOL_MASK)
167 #define MC_RANK_PRESENT 0x7c
169 #define MC_SAG_CH_0 0x80
170 #define MC_SAG_CH_1 0x84
171 #define MC_SAG_CH_2 0x88
172 #define MC_SAG_CH_3 0x8c
173 #define MC_SAG_CH_4 0x90
174 #define MC_SAG_CH_5 0x94
175 #define MC_SAG_CH_6 0x98
176 #define MC_SAG_CH_7 0x9c
178 #define MC_RIR_LIMIT_CH_0 0x40
179 #define MC_RIR_LIMIT_CH_1 0x44
180 #define MC_RIR_LIMIT_CH_2 0x48
181 #define MC_RIR_LIMIT_CH_3 0x4C
182 #define MC_RIR_LIMIT_CH_4 0x50
183 #define MC_RIR_LIMIT_CH_5 0x54
184 #define MC_RIR_LIMIT_CH_6 0x58
185 #define MC_RIR_LIMIT_CH_7 0x5C
186 #define MC_RIR_LIMIT_MASK ((1 << 10) - 1)
188 #define MC_RIR_WAY_CH 0x80
189 #define MC_RIR_WAY_OFFSET_MASK (((1 << 14) - 1) & ~0x7)
190 #define MC_RIR_WAY_RANK_MASK 0x7
197 #define MAX_DIMMS 3 /* Max DIMMS per channel */
198 #define MAX_MCR_FUNC 4
199 #define MAX_CHAN_FUNC 3
209 struct i7core_inject {
216 /* Error address mask */
217 int channel, dimm, rank, bank, page, col;
220 struct i7core_channel {
225 struct pci_id_descr {
232 struct pci_id_table {
233 const struct pci_id_descr *descr;
238 struct list_head list;
240 struct pci_dev **pdev;
242 struct mem_ctl_info *mci;
246 struct pci_dev *pci_noncore;
247 struct pci_dev *pci_mcr[MAX_MCR_FUNC + 1];
248 struct pci_dev *pci_ch[NUM_CHANS][MAX_CHAN_FUNC + 1];
250 struct i7core_dev *i7core_dev;
252 struct i7core_info info;
253 struct i7core_inject inject;
254 struct i7core_channel channel[NUM_CHANS];
256 int ce_count_available;
257 int csrow_map[NUM_CHANS][MAX_DIMMS];
259 /* ECC corrected errors counts per udimm */
260 unsigned long udimm_ce_count[MAX_DIMMS];
261 int udimm_last_ce_count[MAX_DIMMS];
262 /* ECC corrected errors counts per rdimm */
263 unsigned long rdimm_ce_count[NUM_CHANS][MAX_DIMMS];
264 int rdimm_last_ce_count[NUM_CHANS][MAX_DIMMS];
266 unsigned int is_registered;
269 struct edac_mce edac_mce;
271 /* Fifo double buffers */
272 struct mce mce_entry[MCE_LOG_LEN];
273 struct mce mce_outentry[MCE_LOG_LEN];
275 /* Fifo in/out counters */
276 unsigned mce_in, mce_out;
278 /* Count indicator to show errors not got */
279 unsigned mce_overrun;
281 /* Struct to control EDAC polling */
282 struct edac_pci_ctl_info *i7core_pci;
285 #define PCI_DESCR(device, function, device_id) \
287 .func = (function), \
288 .dev_id = (device_id)
290 static const struct pci_id_descr pci_dev_descr_i7core_nehalem[] = {
291 /* Memory controller */
292 { PCI_DESCR(3, 0, PCI_DEVICE_ID_INTEL_I7_MCR) },
293 { PCI_DESCR(3, 1, PCI_DEVICE_ID_INTEL_I7_MC_TAD) },
294 /* Exists only for RDIMM */
295 { PCI_DESCR(3, 2, PCI_DEVICE_ID_INTEL_I7_MC_RAS), .optional = 1 },
296 { PCI_DESCR(3, 4, PCI_DEVICE_ID_INTEL_I7_MC_TEST) },
299 { PCI_DESCR(4, 0, PCI_DEVICE_ID_INTEL_I7_MC_CH0_CTRL) },
300 { PCI_DESCR(4, 1, PCI_DEVICE_ID_INTEL_I7_MC_CH0_ADDR) },
301 { PCI_DESCR(4, 2, PCI_DEVICE_ID_INTEL_I7_MC_CH0_RANK) },
302 { PCI_DESCR(4, 3, PCI_DEVICE_ID_INTEL_I7_MC_CH0_TC) },
305 { PCI_DESCR(5, 0, PCI_DEVICE_ID_INTEL_I7_MC_CH1_CTRL) },
306 { PCI_DESCR(5, 1, PCI_DEVICE_ID_INTEL_I7_MC_CH1_ADDR) },
307 { PCI_DESCR(5, 2, PCI_DEVICE_ID_INTEL_I7_MC_CH1_RANK) },
308 { PCI_DESCR(5, 3, PCI_DEVICE_ID_INTEL_I7_MC_CH1_TC) },
311 { PCI_DESCR(6, 0, PCI_DEVICE_ID_INTEL_I7_MC_CH2_CTRL) },
312 { PCI_DESCR(6, 1, PCI_DEVICE_ID_INTEL_I7_MC_CH2_ADDR) },
313 { PCI_DESCR(6, 2, PCI_DEVICE_ID_INTEL_I7_MC_CH2_RANK) },
314 { PCI_DESCR(6, 3, PCI_DEVICE_ID_INTEL_I7_MC_CH2_TC) },
316 /* Generic Non-core registers */
318 * This is the PCI device on i7core and on Xeon 35xx (8086:2c41)
319 * On Xeon 55xx, however, it has a different id (8086:2c40). So,
320 * the probing code needs to test for the other address in case of
321 * failure of this one
323 { PCI_DESCR(0, 0, PCI_DEVICE_ID_INTEL_I7_NONCORE) },
327 static const struct pci_id_descr pci_dev_descr_lynnfield[] = {
328 { PCI_DESCR( 3, 0, PCI_DEVICE_ID_INTEL_LYNNFIELD_MCR) },
329 { PCI_DESCR( 3, 1, PCI_DEVICE_ID_INTEL_LYNNFIELD_MC_TAD) },
330 { PCI_DESCR( 3, 4, PCI_DEVICE_ID_INTEL_LYNNFIELD_MC_TEST) },
332 { PCI_DESCR( 4, 0, PCI_DEVICE_ID_INTEL_LYNNFIELD_MC_CH0_CTRL) },
333 { PCI_DESCR( 4, 1, PCI_DEVICE_ID_INTEL_LYNNFIELD_MC_CH0_ADDR) },
334 { PCI_DESCR( 4, 2, PCI_DEVICE_ID_INTEL_LYNNFIELD_MC_CH0_RANK) },
335 { PCI_DESCR( 4, 3, PCI_DEVICE_ID_INTEL_LYNNFIELD_MC_CH0_TC) },
337 { PCI_DESCR( 5, 0, PCI_DEVICE_ID_INTEL_LYNNFIELD_MC_CH1_CTRL) },
338 { PCI_DESCR( 5, 1, PCI_DEVICE_ID_INTEL_LYNNFIELD_MC_CH1_ADDR) },
339 { PCI_DESCR( 5, 2, PCI_DEVICE_ID_INTEL_LYNNFIELD_MC_CH1_RANK) },
340 { PCI_DESCR( 5, 3, PCI_DEVICE_ID_INTEL_LYNNFIELD_MC_CH1_TC) },
343 * This is the PCI device has an alternate address on some
344 * processors like Core i7 860
346 { PCI_DESCR( 0, 0, PCI_DEVICE_ID_INTEL_LYNNFIELD_NONCORE) },
349 static const struct pci_id_descr pci_dev_descr_i7core_westmere[] = {
350 /* Memory controller */
351 { PCI_DESCR(3, 0, PCI_DEVICE_ID_INTEL_LYNNFIELD_MCR_REV2) },
352 { PCI_DESCR(3, 1, PCI_DEVICE_ID_INTEL_LYNNFIELD_MC_TAD_REV2) },
353 /* Exists only for RDIMM */
354 { PCI_DESCR(3, 2, PCI_DEVICE_ID_INTEL_LYNNFIELD_MC_RAS_REV2), .optional = 1 },
355 { PCI_DESCR(3, 4, PCI_DEVICE_ID_INTEL_LYNNFIELD_MC_TEST_REV2) },
358 { PCI_DESCR(4, 0, PCI_DEVICE_ID_INTEL_LYNNFIELD_MC_CH0_CTRL_REV2) },
359 { PCI_DESCR(4, 1, PCI_DEVICE_ID_INTEL_LYNNFIELD_MC_CH0_ADDR_REV2) },
360 { PCI_DESCR(4, 2, PCI_DEVICE_ID_INTEL_LYNNFIELD_MC_CH0_RANK_REV2) },
361 { PCI_DESCR(4, 3, PCI_DEVICE_ID_INTEL_LYNNFIELD_MC_CH0_TC_REV2) },
364 { PCI_DESCR(5, 0, PCI_DEVICE_ID_INTEL_LYNNFIELD_MC_CH1_CTRL_REV2) },
365 { PCI_DESCR(5, 1, PCI_DEVICE_ID_INTEL_LYNNFIELD_MC_CH1_ADDR_REV2) },
366 { PCI_DESCR(5, 2, PCI_DEVICE_ID_INTEL_LYNNFIELD_MC_CH1_RANK_REV2) },
367 { PCI_DESCR(5, 3, PCI_DEVICE_ID_INTEL_LYNNFIELD_MC_CH1_TC_REV2) },
370 { PCI_DESCR(6, 0, PCI_DEVICE_ID_INTEL_LYNNFIELD_MC_CH2_CTRL_REV2) },
371 { PCI_DESCR(6, 1, PCI_DEVICE_ID_INTEL_LYNNFIELD_MC_CH2_ADDR_REV2) },
372 { PCI_DESCR(6, 2, PCI_DEVICE_ID_INTEL_LYNNFIELD_MC_CH2_RANK_REV2) },
373 { PCI_DESCR(6, 3, PCI_DEVICE_ID_INTEL_LYNNFIELD_MC_CH2_TC_REV2) },
375 /* Generic Non-core registers */
376 { PCI_DESCR(0, 0, PCI_DEVICE_ID_INTEL_LYNNFIELD_NONCORE_REV2) },
380 #define PCI_ID_TABLE_ENTRY(A) { .descr=A, .n_devs = ARRAY_SIZE(A) }
381 static const struct pci_id_table pci_dev_table[] = {
382 PCI_ID_TABLE_ENTRY(pci_dev_descr_i7core_nehalem),
383 PCI_ID_TABLE_ENTRY(pci_dev_descr_lynnfield),
384 PCI_ID_TABLE_ENTRY(pci_dev_descr_i7core_westmere),
385 {0,} /* 0 terminated list. */
389 * pci_device_id table for which devices we are looking for
391 static const struct pci_device_id i7core_pci_tbl[] __devinitdata = {
392 {PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_X58_HUB_MGMT)},
393 {PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_LYNNFIELD_QPI_LINK0)},
394 {0,} /* 0 terminated list. */
397 /****************************************************************************
398 Anciliary status routines
399 ****************************************************************************/
401 /* MC_CONTROL bits */
402 #define CH_ACTIVE(pvt, ch) ((pvt)->info.mc_control & (1 << (8 + ch)))
403 #define ECCx8(pvt) ((pvt)->info.mc_control & (1 << 1))
406 #define ECC_ENABLED(pvt) ((pvt)->info.mc_status & (1 << 4))
407 #define CH_DISABLED(pvt, ch) ((pvt)->info.mc_status & (1 << ch))
409 /* MC_MAX_DOD read functions */
410 static inline int numdimms(u32 dimms)
412 return (dimms & 0x3) + 1;
415 static inline int numrank(u32 rank)
417 static int ranks[4] = { 1, 2, 4, -EINVAL };
419 return ranks[rank & 0x3];
422 static inline int numbank(u32 bank)
424 static int banks[4] = { 4, 8, 16, -EINVAL };
426 return banks[bank & 0x3];
429 static inline int numrow(u32 row)
431 static int rows[8] = {
432 1 << 12, 1 << 13, 1 << 14, 1 << 15,
433 1 << 16, -EINVAL, -EINVAL, -EINVAL,
436 return rows[row & 0x7];
439 static inline int numcol(u32 col)
441 static int cols[8] = {
442 1 << 10, 1 << 11, 1 << 12, -EINVAL,
444 return cols[col & 0x3];
447 static struct i7core_dev *get_i7core_dev(u8 socket)
449 struct i7core_dev *i7core_dev;
451 list_for_each_entry(i7core_dev, &i7core_edac_list, list) {
452 if (i7core_dev->socket == socket)
459 static struct i7core_dev *alloc_i7core_dev(u8 socket,
460 const struct pci_id_table *table)
462 struct i7core_dev *i7core_dev;
464 i7core_dev = kzalloc(sizeof(*i7core_dev), GFP_KERNEL);
468 i7core_dev->pdev = kzalloc(sizeof(*i7core_dev->pdev) * table->n_devs,
470 if (!i7core_dev->pdev) {
475 i7core_dev->socket = socket;
476 i7core_dev->n_devs = table->n_devs;
477 list_add_tail(&i7core_dev->list, &i7core_edac_list);
482 static void free_i7core_dev(struct i7core_dev *i7core_dev)
484 list_del(&i7core_dev->list);
485 kfree(i7core_dev->pdev);
489 /****************************************************************************
490 Memory check routines
491 ****************************************************************************/
492 static struct pci_dev *get_pdev_slot_func(u8 socket, unsigned slot,
495 struct i7core_dev *i7core_dev = get_i7core_dev(socket);
501 for (i = 0; i < i7core_dev->n_devs; i++) {
502 if (!i7core_dev->pdev[i])
505 if (PCI_SLOT(i7core_dev->pdev[i]->devfn) == slot &&
506 PCI_FUNC(i7core_dev->pdev[i]->devfn) == func) {
507 return i7core_dev->pdev[i];
515 * i7core_get_active_channels() - gets the number of channels and csrows
516 * @socket: Quick Path Interconnect socket
517 * @channels: Number of channels that will be returned
518 * @csrows: Number of csrows found
520 * Since EDAC core needs to know in advance the number of available channels
521 * and csrows, in order to allocate memory for csrows/channels, it is needed
522 * to run two similar steps. At the first step, implemented on this function,
523 * it checks the number of csrows/channels present at one socket.
524 * this is used in order to properly allocate the size of mci components.
526 * It should be noticed that none of the current available datasheets explain
527 * or even mention how csrows are seen by the memory controller. So, we need
528 * to add a fake description for csrows.
529 * So, this driver is attributing one DIMM memory for one csrow.
531 static int i7core_get_active_channels(const u8 socket, unsigned *channels,
534 struct pci_dev *pdev = NULL;
541 pdev = get_pdev_slot_func(socket, 3, 0);
543 i7core_printk(KERN_ERR, "Couldn't find socket %d fn 3.0!!!\n",
548 /* Device 3 function 0 reads */
549 pci_read_config_dword(pdev, MC_STATUS, &status);
550 pci_read_config_dword(pdev, MC_CONTROL, &control);
552 for (i = 0; i < NUM_CHANS; i++) {
554 /* Check if the channel is active */
555 if (!(control & (1 << (8 + i))))
558 /* Check if the channel is disabled */
559 if (status & (1 << i))
562 pdev = get_pdev_slot_func(socket, i + 4, 1);
564 i7core_printk(KERN_ERR, "Couldn't find socket %d "
569 /* Devices 4-6 function 1 */
570 pci_read_config_dword(pdev,
571 MC_DOD_CH_DIMM0, &dimm_dod[0]);
572 pci_read_config_dword(pdev,
573 MC_DOD_CH_DIMM1, &dimm_dod[1]);
574 pci_read_config_dword(pdev,
575 MC_DOD_CH_DIMM2, &dimm_dod[2]);
579 for (j = 0; j < 3; j++) {
580 if (!DIMM_PRESENT(dimm_dod[j]))
586 debugf0("Number of active channels on socket %d: %d\n",
592 static int get_dimm_config(const struct mem_ctl_info *mci)
594 struct i7core_pvt *pvt = mci->pvt_info;
595 struct csrow_info *csr;
596 struct pci_dev *pdev;
599 unsigned long last_page = 0;
603 /* Get data from the MC register, function 0 */
604 pdev = pvt->pci_mcr[0];
608 /* Device 3 function 0 reads */
609 pci_read_config_dword(pdev, MC_CONTROL, &pvt->info.mc_control);
610 pci_read_config_dword(pdev, MC_STATUS, &pvt->info.mc_status);
611 pci_read_config_dword(pdev, MC_MAX_DOD, &pvt->info.max_dod);
612 pci_read_config_dword(pdev, MC_CHANNEL_MAPPER, &pvt->info.ch_map);
614 debugf0("QPI %d control=0x%08x status=0x%08x dod=0x%08x map=0x%08x\n",
615 pvt->i7core_dev->socket, pvt->info.mc_control, pvt->info.mc_status,
616 pvt->info.max_dod, pvt->info.ch_map);
618 if (ECC_ENABLED(pvt)) {
619 debugf0("ECC enabled with x%d SDCC\n", ECCx8(pvt) ? 8 : 4);
621 mode = EDAC_S8ECD8ED;
623 mode = EDAC_S4ECD4ED;
625 debugf0("ECC disabled\n");
629 /* FIXME: need to handle the error codes */
630 debugf0("DOD Max limits: DIMMS: %d, %d-ranked, %d-banked "
632 numdimms(pvt->info.max_dod),
633 numrank(pvt->info.max_dod >> 2),
634 numbank(pvt->info.max_dod >> 4),
635 numrow(pvt->info.max_dod >> 6),
636 numcol(pvt->info.max_dod >> 9));
638 for (i = 0; i < NUM_CHANS; i++) {
639 u32 data, dimm_dod[3], value[8];
641 if (!pvt->pci_ch[i][0])
644 if (!CH_ACTIVE(pvt, i)) {
645 debugf0("Channel %i is not active\n", i);
648 if (CH_DISABLED(pvt, i)) {
649 debugf0("Channel %i is disabled\n", i);
653 /* Devices 4-6 function 0 */
654 pci_read_config_dword(pvt->pci_ch[i][0],
655 MC_CHANNEL_DIMM_INIT_PARAMS, &data);
657 pvt->channel[i].ranks = (data & QUAD_RANK_PRESENT) ?
660 if (data & REGISTERED_DIMM)
665 if (data & THREE_DIMMS_PRESENT)
666 pvt->channel[i].dimms = 3;
667 else if (data & SINGLE_QUAD_RANK_PRESENT)
668 pvt->channel[i].dimms = 1;
670 pvt->channel[i].dimms = 2;
673 /* Devices 4-6 function 1 */
674 pci_read_config_dword(pvt->pci_ch[i][1],
675 MC_DOD_CH_DIMM0, &dimm_dod[0]);
676 pci_read_config_dword(pvt->pci_ch[i][1],
677 MC_DOD_CH_DIMM1, &dimm_dod[1]);
678 pci_read_config_dword(pvt->pci_ch[i][1],
679 MC_DOD_CH_DIMM2, &dimm_dod[2]);
681 debugf0("Ch%d phy rd%d, wr%d (0x%08x): "
682 "%d ranks, %cDIMMs\n",
684 RDLCH(pvt->info.ch_map, i), WRLCH(pvt->info.ch_map, i),
686 pvt->channel[i].ranks,
687 (data & REGISTERED_DIMM) ? 'R' : 'U');
689 for (j = 0; j < 3; j++) {
690 u32 banks, ranks, rows, cols;
693 if (!DIMM_PRESENT(dimm_dod[j]))
696 banks = numbank(MC_DOD_NUMBANK(dimm_dod[j]));
697 ranks = numrank(MC_DOD_NUMRANK(dimm_dod[j]));
698 rows = numrow(MC_DOD_NUMROW(dimm_dod[j]));
699 cols = numcol(MC_DOD_NUMCOL(dimm_dod[j]));
701 /* DDR3 has 8 I/O banks */
702 size = (rows * cols * banks * ranks) >> (20 - 3);
704 pvt->channel[i].dimms++;
706 debugf0("\tdimm %d %d Mb offset: %x, "
707 "bank: %d, rank: %d, row: %#x, col: %#x\n",
709 RANKOFFSET(dimm_dod[j]),
710 banks, ranks, rows, cols);
712 npages = MiB_TO_PAGES(size);
714 csr = &mci->csrows[csrow];
715 csr->first_page = last_page + 1;
717 csr->last_page = last_page;
718 csr->nr_pages = npages;
722 csr->csrow_idx = csrow;
723 csr->nr_channels = 1;
725 csr->channels[0].chan_idx = i;
726 csr->channels[0].ce_count = 0;
728 pvt->csrow_map[i][j] = csrow;
738 csr->dtype = DEV_X16;
741 csr->dtype = DEV_UNKNOWN;
744 csr->edac_mode = mode;
750 pci_read_config_dword(pdev, MC_SAG_CH_0, &value[0]);
751 pci_read_config_dword(pdev, MC_SAG_CH_1, &value[1]);
752 pci_read_config_dword(pdev, MC_SAG_CH_2, &value[2]);
753 pci_read_config_dword(pdev, MC_SAG_CH_3, &value[3]);
754 pci_read_config_dword(pdev, MC_SAG_CH_4, &value[4]);
755 pci_read_config_dword(pdev, MC_SAG_CH_5, &value[5]);
756 pci_read_config_dword(pdev, MC_SAG_CH_6, &value[6]);
757 pci_read_config_dword(pdev, MC_SAG_CH_7, &value[7]);
758 debugf1("\t[%i] DIVBY3\tREMOVED\tOFFSET\n", i);
759 for (j = 0; j < 8; j++)
760 debugf1("\t\t%#x\t%#x\t%#x\n",
761 (value[j] >> 27) & 0x1,
762 (value[j] >> 24) & 0x7,
763 (value[j] & ((1 << 24) - 1)));
769 /****************************************************************************
770 Error insertion routines
771 ****************************************************************************/
773 /* The i7core has independent error injection features per channel.
774 However, to have a simpler code, we don't allow enabling error injection
775 on more than one channel.
776 Also, since a change at an inject parameter will be applied only at enable,
777 we're disabling error injection on all write calls to the sysfs nodes that
778 controls the error code injection.
780 static int disable_inject(const struct mem_ctl_info *mci)
782 struct i7core_pvt *pvt = mci->pvt_info;
784 pvt->inject.enable = 0;
786 if (!pvt->pci_ch[pvt->inject.channel][0])
789 pci_write_config_dword(pvt->pci_ch[pvt->inject.channel][0],
790 MC_CHANNEL_ERROR_INJECT, 0);
796 * i7core inject inject.section
798 * accept and store error injection inject.section value
799 * bit 0 - refers to the lower 32-byte half cacheline
800 * bit 1 - refers to the upper 32-byte half cacheline
802 static ssize_t i7core_inject_section_store(struct mem_ctl_info *mci,
803 const char *data, size_t count)
805 struct i7core_pvt *pvt = mci->pvt_info;
809 if (pvt->inject.enable)
812 rc = strict_strtoul(data, 10, &value);
813 if ((rc < 0) || (value > 3))
816 pvt->inject.section = (u32) value;
820 static ssize_t i7core_inject_section_show(struct mem_ctl_info *mci,
823 struct i7core_pvt *pvt = mci->pvt_info;
824 return sprintf(data, "0x%08x\n", pvt->inject.section);
830 * accept and store error injection inject.section value
831 * bit 0 - repeat enable - Enable error repetition
832 * bit 1 - inject ECC error
833 * bit 2 - inject parity error
835 static ssize_t i7core_inject_type_store(struct mem_ctl_info *mci,
836 const char *data, size_t count)
838 struct i7core_pvt *pvt = mci->pvt_info;
842 if (pvt->inject.enable)
845 rc = strict_strtoul(data, 10, &value);
846 if ((rc < 0) || (value > 7))
849 pvt->inject.type = (u32) value;
853 static ssize_t i7core_inject_type_show(struct mem_ctl_info *mci,
856 struct i7core_pvt *pvt = mci->pvt_info;
857 return sprintf(data, "0x%08x\n", pvt->inject.type);
861 * i7core_inject_inject.eccmask_store
863 * The type of error (UE/CE) will depend on the inject.eccmask value:
864 * Any bits set to a 1 will flip the corresponding ECC bit
865 * Correctable errors can be injected by flipping 1 bit or the bits within
866 * a symbol pair (2 consecutive aligned 8-bit pairs - i.e. 7:0 and 15:8 or
867 * 23:16 and 31:24). Flipping bits in two symbol pairs will cause an
868 * uncorrectable error to be injected.
870 static ssize_t i7core_inject_eccmask_store(struct mem_ctl_info *mci,
871 const char *data, size_t count)
873 struct i7core_pvt *pvt = mci->pvt_info;
877 if (pvt->inject.enable)
880 rc = strict_strtoul(data, 10, &value);
884 pvt->inject.eccmask = (u32) value;
888 static ssize_t i7core_inject_eccmask_show(struct mem_ctl_info *mci,
891 struct i7core_pvt *pvt = mci->pvt_info;
892 return sprintf(data, "0x%08x\n", pvt->inject.eccmask);
898 * The type of error (UE/CE) will depend on the inject.eccmask value:
899 * Any bits set to a 1 will flip the corresponding ECC bit
900 * Correctable errors can be injected by flipping 1 bit or the bits within
901 * a symbol pair (2 consecutive aligned 8-bit pairs - i.e. 7:0 and 15:8 or
902 * 23:16 and 31:24). Flipping bits in two symbol pairs will cause an
903 * uncorrectable error to be injected.
906 #define DECLARE_ADDR_MATCH(param, limit) \
907 static ssize_t i7core_inject_store_##param( \
908 struct mem_ctl_info *mci, \
909 const char *data, size_t count) \
911 struct i7core_pvt *pvt; \
915 debugf1("%s()\n", __func__); \
916 pvt = mci->pvt_info; \
918 if (pvt->inject.enable) \
919 disable_inject(mci); \
921 if (!strcasecmp(data, "any") || !strcasecmp(data, "any\n"))\
924 rc = strict_strtoul(data, 10, &value); \
925 if ((rc < 0) || (value >= limit)) \
929 pvt->inject.param = value; \
934 static ssize_t i7core_inject_show_##param( \
935 struct mem_ctl_info *mci, \
938 struct i7core_pvt *pvt; \
940 pvt = mci->pvt_info; \
941 debugf1("%s() pvt=%p\n", __func__, pvt); \
942 if (pvt->inject.param < 0) \
943 return sprintf(data, "any\n"); \
945 return sprintf(data, "%d\n", pvt->inject.param);\
948 #define ATTR_ADDR_MATCH(param) \
952 .mode = (S_IRUGO | S_IWUSR) \
954 .show = i7core_inject_show_##param, \
955 .store = i7core_inject_store_##param, \
958 DECLARE_ADDR_MATCH(channel, 3);
959 DECLARE_ADDR_MATCH(dimm, 3);
960 DECLARE_ADDR_MATCH(rank, 4);
961 DECLARE_ADDR_MATCH(bank, 32);
962 DECLARE_ADDR_MATCH(page, 0x10000);
963 DECLARE_ADDR_MATCH(col, 0x4000);
965 static int write_and_test(struct pci_dev *dev, const int where, const u32 val)
970 debugf0("setting pci %02x:%02x.%x reg=%02x value=%08x\n",
971 dev->bus->number, PCI_SLOT(dev->devfn), PCI_FUNC(dev->devfn),
974 for (count = 0; count < 10; count++) {
977 pci_write_config_dword(dev, where, val);
978 pci_read_config_dword(dev, where, &read);
984 i7core_printk(KERN_ERR, "Error during set pci %02x:%02x.%x reg=%02x "
985 "write=%08x. Read=%08x\n",
986 dev->bus->number, PCI_SLOT(dev->devfn), PCI_FUNC(dev->devfn),
993 * This routine prepares the Memory Controller for error injection.
994 * The error will be injected when some process tries to write to the
995 * memory that matches the given criteria.
996 * The criteria can be set in terms of a mask where dimm, rank, bank, page
997 * and col can be specified.
998 * A -1 value for any of the mask items will make the MCU to ignore
999 * that matching criteria for error injection.
1001 * It should be noticed that the error will only happen after a write operation
1002 * on a memory that matches the condition. if REPEAT_EN is not enabled at
1003 * inject mask, then it will produce just one error. Otherwise, it will repeat
1004 * until the injectmask would be cleaned.
1006 * FIXME: This routine assumes that MAXNUMDIMMS value of MC_MAX_DOD
1007 * is reliable enough to check if the MC is using the
1008 * three channels. However, this is not clear at the datasheet.
1010 static ssize_t i7core_inject_enable_store(struct mem_ctl_info *mci,
1011 const char *data, size_t count)
1013 struct i7core_pvt *pvt = mci->pvt_info;
1019 if (!pvt->pci_ch[pvt->inject.channel][0])
1022 rc = strict_strtoul(data, 10, &enable);
1027 pvt->inject.enable = 1;
1029 disable_inject(mci);
1033 /* Sets pvt->inject.dimm mask */
1034 if (pvt->inject.dimm < 0)
1037 if (pvt->channel[pvt->inject.channel].dimms > 2)
1038 mask |= (pvt->inject.dimm & 0x3LL) << 35;
1040 mask |= (pvt->inject.dimm & 0x1LL) << 36;
1043 /* Sets pvt->inject.rank mask */
1044 if (pvt->inject.rank < 0)
1047 if (pvt->channel[pvt->inject.channel].dimms > 2)
1048 mask |= (pvt->inject.rank & 0x1LL) << 34;
1050 mask |= (pvt->inject.rank & 0x3LL) << 34;
1053 /* Sets pvt->inject.bank mask */
1054 if (pvt->inject.bank < 0)
1057 mask |= (pvt->inject.bank & 0x15LL) << 30;
1059 /* Sets pvt->inject.page mask */
1060 if (pvt->inject.page < 0)
1063 mask |= (pvt->inject.page & 0xffff) << 14;
1065 /* Sets pvt->inject.column mask */
1066 if (pvt->inject.col < 0)
1069 mask |= (pvt->inject.col & 0x3fff);
1073 * bits 1-2: MASK_HALF_CACHELINE
1075 * bit 4: INJECT_ADDR_PARITY
1078 injectmask = (pvt->inject.type & 1) |
1079 (pvt->inject.section & 0x3) << 1 |
1080 (pvt->inject.type & 0x6) << (3 - 1);
1082 /* Unlock writes to registers - this register is write only */
1083 pci_write_config_dword(pvt->pci_noncore,
1084 MC_CFG_CONTROL, 0x2);
1086 write_and_test(pvt->pci_ch[pvt->inject.channel][0],
1087 MC_CHANNEL_ADDR_MATCH, mask);
1088 write_and_test(pvt->pci_ch[pvt->inject.channel][0],
1089 MC_CHANNEL_ADDR_MATCH + 4, mask >> 32L);
1091 write_and_test(pvt->pci_ch[pvt->inject.channel][0],
1092 MC_CHANNEL_ERROR_MASK, pvt->inject.eccmask);
1094 write_and_test(pvt->pci_ch[pvt->inject.channel][0],
1095 MC_CHANNEL_ERROR_INJECT, injectmask);
1098 * This is something undocumented, based on my tests
1099 * Without writing 8 to this register, errors aren't injected. Not sure
1102 pci_write_config_dword(pvt->pci_noncore,
1105 debugf0("Error inject addr match 0x%016llx, ecc 0x%08x,"
1107 mask, pvt->inject.eccmask, injectmask);
1113 static ssize_t i7core_inject_enable_show(struct mem_ctl_info *mci,
1116 struct i7core_pvt *pvt = mci->pvt_info;
1119 if (!pvt->pci_ch[pvt->inject.channel][0])
1122 pci_read_config_dword(pvt->pci_ch[pvt->inject.channel][0],
1123 MC_CHANNEL_ERROR_INJECT, &injectmask);
1125 debugf0("Inject error read: 0x%018x\n", injectmask);
1127 if (injectmask & 0x0c)
1128 pvt->inject.enable = 1;
1130 return sprintf(data, "%d\n", pvt->inject.enable);
1133 #define DECLARE_COUNTER(param) \
1134 static ssize_t i7core_show_counter_##param( \
1135 struct mem_ctl_info *mci, \
1138 struct i7core_pvt *pvt = mci->pvt_info; \
1140 debugf1("%s() \n", __func__); \
1141 if (!pvt->ce_count_available || (pvt->is_registered)) \
1142 return sprintf(data, "data unavailable\n"); \
1143 return sprintf(data, "%lu\n", \
1144 pvt->udimm_ce_count[param]); \
1147 #define ATTR_COUNTER(param) \
1150 .name = __stringify(udimm##param), \
1151 .mode = (S_IRUGO | S_IWUSR) \
1153 .show = i7core_show_counter_##param \
1164 static const struct mcidev_sysfs_attribute i7core_addrmatch_attrs[] = {
1165 ATTR_ADDR_MATCH(channel),
1166 ATTR_ADDR_MATCH(dimm),
1167 ATTR_ADDR_MATCH(rank),
1168 ATTR_ADDR_MATCH(bank),
1169 ATTR_ADDR_MATCH(page),
1170 ATTR_ADDR_MATCH(col),
1171 { } /* End of list */
1174 static const struct mcidev_sysfs_group i7core_inject_addrmatch = {
1175 .name = "inject_addrmatch",
1176 .mcidev_attr = i7core_addrmatch_attrs,
1179 static const struct mcidev_sysfs_attribute i7core_udimm_counters_attrs[] = {
1183 { .attr = { .name = NULL } }
1186 static const struct mcidev_sysfs_group i7core_udimm_counters = {
1187 .name = "all_channel_counts",
1188 .mcidev_attr = i7core_udimm_counters_attrs,
1191 static const struct mcidev_sysfs_attribute i7core_sysfs_rdimm_attrs[] = {
1194 .name = "inject_section",
1195 .mode = (S_IRUGO | S_IWUSR)
1197 .show = i7core_inject_section_show,
1198 .store = i7core_inject_section_store,
1201 .name = "inject_type",
1202 .mode = (S_IRUGO | S_IWUSR)
1204 .show = i7core_inject_type_show,
1205 .store = i7core_inject_type_store,
1208 .name = "inject_eccmask",
1209 .mode = (S_IRUGO | S_IWUSR)
1211 .show = i7core_inject_eccmask_show,
1212 .store = i7core_inject_eccmask_store,
1214 .grp = &i7core_inject_addrmatch,
1217 .name = "inject_enable",
1218 .mode = (S_IRUGO | S_IWUSR)
1220 .show = i7core_inject_enable_show,
1221 .store = i7core_inject_enable_store,
1223 { } /* End of list */
1226 static const struct mcidev_sysfs_attribute i7core_sysfs_udimm_attrs[] = {
1229 .name = "inject_section",
1230 .mode = (S_IRUGO | S_IWUSR)
1232 .show = i7core_inject_section_show,
1233 .store = i7core_inject_section_store,
1236 .name = "inject_type",
1237 .mode = (S_IRUGO | S_IWUSR)
1239 .show = i7core_inject_type_show,
1240 .store = i7core_inject_type_store,
1243 .name = "inject_eccmask",
1244 .mode = (S_IRUGO | S_IWUSR)
1246 .show = i7core_inject_eccmask_show,
1247 .store = i7core_inject_eccmask_store,
1249 .grp = &i7core_inject_addrmatch,
1252 .name = "inject_enable",
1253 .mode = (S_IRUGO | S_IWUSR)
1255 .show = i7core_inject_enable_show,
1256 .store = i7core_inject_enable_store,
1258 .grp = &i7core_udimm_counters,
1260 { } /* End of list */
1263 /****************************************************************************
1264 Device initialization routines: put/get, init/exit
1265 ****************************************************************************/
1268 * i7core_put_all_devices 'put' all the devices that we have
1269 * reserved via 'get'
1271 static void i7core_put_devices(struct i7core_dev *i7core_dev)
1275 debugf0(__FILE__ ": %s()\n", __func__);
1276 for (i = 0; i < i7core_dev->n_devs; i++) {
1277 struct pci_dev *pdev = i7core_dev->pdev[i];
1280 debugf0("Removing dev %02x:%02x.%d\n",
1282 PCI_SLOT(pdev->devfn), PCI_FUNC(pdev->devfn));
1287 static void i7core_put_all_devices(void)
1289 struct i7core_dev *i7core_dev, *tmp;
1291 list_for_each_entry_safe(i7core_dev, tmp, &i7core_edac_list, list) {
1292 i7core_put_devices(i7core_dev);
1293 free_i7core_dev(i7core_dev);
1297 static void __init i7core_xeon_pci_fixup(const struct pci_id_table *table)
1299 struct pci_dev *pdev = NULL;
1303 * On Xeon 55xx, the Intel Quick Path Arch Generic Non-core pci buses
1304 * aren't announced by acpi. So, we need to use a legacy scan probing
1307 while (table && table->descr) {
1308 pdev = pci_get_device(PCI_VENDOR_ID_INTEL, table->descr[0].dev_id, NULL);
1309 if (unlikely(!pdev)) {
1310 for (i = 0; i < MAX_SOCKET_BUSES; i++)
1311 pcibios_scan_specific_bus(255-i);
1318 static unsigned i7core_pci_lastbus(void)
1320 int last_bus = 0, bus;
1321 struct pci_bus *b = NULL;
1323 while ((b = pci_find_next_bus(b)) != NULL) {
1325 debugf0("Found bus %d\n", bus);
1330 debugf0("Last bus %d\n", last_bus);
1336 * i7core_get_all_devices Find and perform 'get' operation on the MCH's
1337 * device/functions we want to reference for this driver
1339 * Need to 'get' device 16 func 1 and func 2
1341 static int i7core_get_onedevice(struct pci_dev **prev,
1342 const struct pci_id_table *table,
1343 const unsigned devno,
1344 const unsigned last_bus)
1346 struct i7core_dev *i7core_dev;
1347 const struct pci_id_descr *dev_descr = &table->descr[devno];
1349 struct pci_dev *pdev = NULL;
1353 pdev = pci_get_device(PCI_VENDOR_ID_INTEL,
1354 dev_descr->dev_id, *prev);
1357 * On Xeon 55xx, the Intel Quckpath Arch Generic Non-core regs
1358 * is at addr 8086:2c40, instead of 8086:2c41. So, we need
1359 * to probe for the alternate address in case of failure
1361 if (dev_descr->dev_id == PCI_DEVICE_ID_INTEL_I7_NONCORE && !pdev)
1362 pdev = pci_get_device(PCI_VENDOR_ID_INTEL,
1363 PCI_DEVICE_ID_INTEL_I7_NONCORE_ALT, *prev);
1365 if (dev_descr->dev_id == PCI_DEVICE_ID_INTEL_LYNNFIELD_NONCORE && !pdev)
1366 pdev = pci_get_device(PCI_VENDOR_ID_INTEL,
1367 PCI_DEVICE_ID_INTEL_LYNNFIELD_NONCORE_ALT,
1376 if (dev_descr->optional)
1382 i7core_printk(KERN_INFO,
1383 "Device not found: dev %02x.%d PCI ID %04x:%04x\n",
1384 dev_descr->dev, dev_descr->func,
1385 PCI_VENDOR_ID_INTEL, dev_descr->dev_id);
1387 /* End of list, leave */
1390 bus = pdev->bus->number;
1392 socket = last_bus - bus;
1394 i7core_dev = get_i7core_dev(socket);
1396 i7core_dev = alloc_i7core_dev(socket, table);
1403 if (i7core_dev->pdev[devno]) {
1404 i7core_printk(KERN_ERR,
1405 "Duplicated device for "
1406 "dev %02x:%02x.%d PCI ID %04x:%04x\n",
1407 bus, dev_descr->dev, dev_descr->func,
1408 PCI_VENDOR_ID_INTEL, dev_descr->dev_id);
1413 i7core_dev->pdev[devno] = pdev;
1416 if (unlikely(PCI_SLOT(pdev->devfn) != dev_descr->dev ||
1417 PCI_FUNC(pdev->devfn) != dev_descr->func)) {
1418 i7core_printk(KERN_ERR,
1419 "Device PCI ID %04x:%04x "
1420 "has dev %02x:%02x.%d instead of dev %02x:%02x.%d\n",
1421 PCI_VENDOR_ID_INTEL, dev_descr->dev_id,
1422 bus, PCI_SLOT(pdev->devfn), PCI_FUNC(pdev->devfn),
1423 bus, dev_descr->dev, dev_descr->func);
1427 /* Be sure that the device is enabled */
1428 if (unlikely(pci_enable_device(pdev) < 0)) {
1429 i7core_printk(KERN_ERR,
1431 "dev %02x:%02x.%d PCI ID %04x:%04x\n",
1432 bus, dev_descr->dev, dev_descr->func,
1433 PCI_VENDOR_ID_INTEL, dev_descr->dev_id);
1437 debugf0("Detected socket %d dev %02x:%02x.%d PCI ID %04x:%04x\n",
1438 socket, bus, dev_descr->dev,
1440 PCI_VENDOR_ID_INTEL, dev_descr->dev_id);
1443 * As stated on drivers/pci/search.c, the reference count for
1444 * @from is always decremented if it is not %NULL. So, as we need
1445 * to get all devices up to null, we need to do a get for the device
1454 static int i7core_get_all_devices(void)
1456 int i, rc, last_bus;
1457 struct pci_dev *pdev = NULL;
1458 const struct pci_id_table *table = pci_dev_table;
1460 last_bus = i7core_pci_lastbus();
1462 while (table && table->descr) {
1463 for (i = 0; i < table->n_devs; i++) {
1466 rc = i7core_get_onedevice(&pdev, table, i,
1473 i7core_put_all_devices();
1484 static int mci_bind_devs(struct mem_ctl_info *mci,
1485 struct i7core_dev *i7core_dev)
1487 struct i7core_pvt *pvt = mci->pvt_info;
1488 struct pci_dev *pdev;
1491 pvt->is_registered = 0;
1492 for (i = 0; i < i7core_dev->n_devs; i++) {
1493 pdev = i7core_dev->pdev[i];
1497 func = PCI_FUNC(pdev->devfn);
1498 slot = PCI_SLOT(pdev->devfn);
1500 if (unlikely(func > MAX_MCR_FUNC))
1502 pvt->pci_mcr[func] = pdev;
1503 } else if (likely(slot >= 4 && slot < 4 + NUM_CHANS)) {
1504 if (unlikely(func > MAX_CHAN_FUNC))
1506 pvt->pci_ch[slot - 4][func] = pdev;
1507 } else if (!slot && !func)
1508 pvt->pci_noncore = pdev;
1512 debugf0("Associated fn %d.%d, dev = %p, socket %d\n",
1513 PCI_SLOT(pdev->devfn), PCI_FUNC(pdev->devfn),
1514 pdev, i7core_dev->socket);
1516 if (PCI_SLOT(pdev->devfn) == 3 &&
1517 PCI_FUNC(pdev->devfn) == 2)
1518 pvt->is_registered = 1;
1524 i7core_printk(KERN_ERR, "Device %d, function %d "
1525 "is out of the expected range\n",
1530 /****************************************************************************
1531 Error check routines
1532 ****************************************************************************/
1533 static void i7core_rdimm_update_csrow(struct mem_ctl_info *mci,
1539 struct i7core_pvt *pvt = mci->pvt_info;
1540 int row = pvt->csrow_map[chan][dimm], i;
1542 for (i = 0; i < add; i++) {
1543 msg = kasprintf(GFP_KERNEL, "Corrected error "
1544 "(Socket=%d channel=%d dimm=%d)",
1545 pvt->i7core_dev->socket, chan, dimm);
1547 edac_mc_handle_fbd_ce(mci, row, 0, msg);
1552 static void i7core_rdimm_update_ce_count(struct mem_ctl_info *mci,
1558 struct i7core_pvt *pvt = mci->pvt_info;
1559 int add0 = 0, add1 = 0, add2 = 0;
1560 /* Updates CE counters if it is not the first time here */
1561 if (pvt->ce_count_available) {
1562 /* Updates CE counters */
1564 add2 = new2 - pvt->rdimm_last_ce_count[chan][2];
1565 add1 = new1 - pvt->rdimm_last_ce_count[chan][1];
1566 add0 = new0 - pvt->rdimm_last_ce_count[chan][0];
1570 pvt->rdimm_ce_count[chan][2] += add2;
1574 pvt->rdimm_ce_count[chan][1] += add1;
1578 pvt->rdimm_ce_count[chan][0] += add0;
1580 pvt->ce_count_available = 1;
1582 /* Store the new values */
1583 pvt->rdimm_last_ce_count[chan][2] = new2;
1584 pvt->rdimm_last_ce_count[chan][1] = new1;
1585 pvt->rdimm_last_ce_count[chan][0] = new0;
1587 /*updated the edac core */
1589 i7core_rdimm_update_csrow(mci, chan, 0, add0);
1591 i7core_rdimm_update_csrow(mci, chan, 1, add1);
1593 i7core_rdimm_update_csrow(mci, chan, 2, add2);
1597 static void i7core_rdimm_check_mc_ecc_err(struct mem_ctl_info *mci)
1599 struct i7core_pvt *pvt = mci->pvt_info;
1601 int i, new0, new1, new2;
1603 /*Read DEV 3: FUN 2: MC_COR_ECC_CNT regs directly*/
1604 pci_read_config_dword(pvt->pci_mcr[2], MC_COR_ECC_CNT_0,
1606 pci_read_config_dword(pvt->pci_mcr[2], MC_COR_ECC_CNT_1,
1608 pci_read_config_dword(pvt->pci_mcr[2], MC_COR_ECC_CNT_2,
1610 pci_read_config_dword(pvt->pci_mcr[2], MC_COR_ECC_CNT_3,
1612 pci_read_config_dword(pvt->pci_mcr[2], MC_COR_ECC_CNT_4,
1614 pci_read_config_dword(pvt->pci_mcr[2], MC_COR_ECC_CNT_5,
1616 for (i = 0 ; i < 3; i++) {
1617 debugf3("MC_COR_ECC_CNT%d = 0x%x; MC_COR_ECC_CNT%d = 0x%x\n",
1618 (i * 2), rcv[i][0], (i * 2) + 1, rcv[i][1]);
1619 /*if the channel has 3 dimms*/
1620 if (pvt->channel[i].dimms > 2) {
1621 new0 = DIMM_BOT_COR_ERR(rcv[i][0]);
1622 new1 = DIMM_TOP_COR_ERR(rcv[i][0]);
1623 new2 = DIMM_BOT_COR_ERR(rcv[i][1]);
1625 new0 = DIMM_TOP_COR_ERR(rcv[i][0]) +
1626 DIMM_BOT_COR_ERR(rcv[i][0]);
1627 new1 = DIMM_TOP_COR_ERR(rcv[i][1]) +
1628 DIMM_BOT_COR_ERR(rcv[i][1]);
1632 i7core_rdimm_update_ce_count(mci, i, new0, new1, new2);
1636 /* This function is based on the device 3 function 4 registers as described on:
1637 * Intel Xeon Processor 5500 Series Datasheet Volume 2
1638 * http://www.intel.com/Assets/PDF/datasheet/321322.pdf
1639 * also available at:
1640 * http://www.arrownac.com/manufacturers/intel/s/nehalem/5500-datasheet-v2.pdf
1642 static void i7core_udimm_check_mc_ecc_err(struct mem_ctl_info *mci)
1644 struct i7core_pvt *pvt = mci->pvt_info;
1646 int new0, new1, new2;
1648 if (!pvt->pci_mcr[4]) {
1649 debugf0("%s MCR registers not found\n", __func__);
1653 /* Corrected test errors */
1654 pci_read_config_dword(pvt->pci_mcr[4], MC_TEST_ERR_RCV1, &rcv1);
1655 pci_read_config_dword(pvt->pci_mcr[4], MC_TEST_ERR_RCV0, &rcv0);
1657 /* Store the new values */
1658 new2 = DIMM2_COR_ERR(rcv1);
1659 new1 = DIMM1_COR_ERR(rcv0);
1660 new0 = DIMM0_COR_ERR(rcv0);
1662 /* Updates CE counters if it is not the first time here */
1663 if (pvt->ce_count_available) {
1664 /* Updates CE counters */
1665 int add0, add1, add2;
1667 add2 = new2 - pvt->udimm_last_ce_count[2];
1668 add1 = new1 - pvt->udimm_last_ce_count[1];
1669 add0 = new0 - pvt->udimm_last_ce_count[0];
1673 pvt->udimm_ce_count[2] += add2;
1677 pvt->udimm_ce_count[1] += add1;
1681 pvt->udimm_ce_count[0] += add0;
1683 if (add0 | add1 | add2)
1684 i7core_printk(KERN_ERR, "New Corrected error(s): "
1685 "dimm0: +%d, dimm1: +%d, dimm2 +%d\n",
1688 pvt->ce_count_available = 1;
1690 /* Store the new values */
1691 pvt->udimm_last_ce_count[2] = new2;
1692 pvt->udimm_last_ce_count[1] = new1;
1693 pvt->udimm_last_ce_count[0] = new0;
1697 * According with tables E-11 and E-12 of chapter E.3.3 of Intel 64 and IA-32
1698 * Architectures Software Developer’s Manual Volume 3B.
1699 * Nehalem are defined as family 0x06, model 0x1a
1701 * The MCA registers used here are the following ones:
1702 * struct mce field MCA Register
1703 * m->status MSR_IA32_MC8_STATUS
1704 * m->addr MSR_IA32_MC8_ADDR
1705 * m->misc MSR_IA32_MC8_MISC
1706 * In the case of Nehalem, the error information is masked at .status and .misc
1709 static void i7core_mce_output_error(struct mem_ctl_info *mci,
1710 const struct mce *m)
1712 struct i7core_pvt *pvt = mci->pvt_info;
1713 char *type, *optype, *err, *msg;
1714 unsigned long error = m->status & 0x1ff0000l;
1715 u32 optypenum = (m->status >> 4) & 0x07;
1716 u32 core_err_cnt = (m->status >> 38) & 0x7fff;
1717 u32 dimm = (m->misc >> 16) & 0x3;
1718 u32 channel = (m->misc >> 18) & 0x3;
1719 u32 syndrome = m->misc >> 32;
1720 u32 errnum = find_first_bit(&error, 32);
1723 if (m->mcgstatus & 1)
1728 switch (optypenum) {
1730 optype = "generic undef request";
1733 optype = "read error";
1736 optype = "write error";
1739 optype = "addr/cmd error";
1742 optype = "scrubbing error";
1745 optype = "reserved";
1751 err = "read ECC error";
1754 err = "RAS ECC error";
1757 err = "write parity error";
1760 err = "redundacy loss";
1766 err = "memory range error";
1769 err = "RTID out of range";
1772 err = "address parity error";
1775 err = "byte enable parity error";
1781 /* FIXME: should convert addr into bank and rank information */
1782 msg = kasprintf(GFP_ATOMIC,
1783 "%s (addr = 0x%08llx, cpu=%d, Dimm=%d, Channel=%d, "
1784 "syndrome=0x%08x, count=%d, Err=%08llx:%08llx (%s: %s))\n",
1785 type, (long long) m->addr, m->cpu, dimm, channel,
1786 syndrome, core_err_cnt, (long long)m->status,
1787 (long long)m->misc, optype, err);
1791 csrow = pvt->csrow_map[channel][dimm];
1793 /* Call the helper to output message */
1794 if (m->mcgstatus & 1)
1795 edac_mc_handle_fbd_ue(mci, csrow, 0,
1796 0 /* FIXME: should be channel here */, msg);
1797 else if (!pvt->is_registered)
1798 edac_mc_handle_fbd_ce(mci, csrow,
1799 0 /* FIXME: should be channel here */, msg);
1805 * i7core_check_error Retrieve and process errors reported by the
1806 * hardware. Called by the Core module.
1808 static void i7core_check_error(struct mem_ctl_info *mci)
1810 struct i7core_pvt *pvt = mci->pvt_info;
1816 * MCE first step: Copy all mce errors into a temporary buffer
1817 * We use a double buffering here, to reduce the risk of
1821 count = (pvt->mce_out + MCE_LOG_LEN - pvt->mce_in)
1824 goto check_ce_error;
1826 m = pvt->mce_outentry;
1827 if (pvt->mce_in + count > MCE_LOG_LEN) {
1828 unsigned l = MCE_LOG_LEN - pvt->mce_in;
1830 memcpy(m, &pvt->mce_entry[pvt->mce_in], sizeof(*m) * l);
1836 memcpy(m, &pvt->mce_entry[pvt->mce_in], sizeof(*m) * count);
1838 pvt->mce_in += count;
1841 if (pvt->mce_overrun) {
1842 i7core_printk(KERN_ERR, "Lost %d memory errors\n",
1845 pvt->mce_overrun = 0;
1849 * MCE second step: parse errors and display
1851 for (i = 0; i < count; i++)
1852 i7core_mce_output_error(mci, &pvt->mce_outentry[i]);
1855 * Now, let's increment CE error counts
1858 if (!pvt->is_registered)
1859 i7core_udimm_check_mc_ecc_err(mci);
1861 i7core_rdimm_check_mc_ecc_err(mci);
1865 * i7core_mce_check_error Replicates mcelog routine to get errors
1866 * This routine simply queues mcelog errors, and
1867 * return. The error itself should be handled later
1868 * by i7core_check_error.
1869 * WARNING: As this routine should be called at NMI time, extra care should
1870 * be taken to avoid deadlocks, and to be as fast as possible.
1872 static int i7core_mce_check_error(void *priv, struct mce *mce)
1874 struct mem_ctl_info *mci = priv;
1875 struct i7core_pvt *pvt = mci->pvt_info;
1878 * Just let mcelog handle it if the error is
1879 * outside the memory controller
1881 if (((mce->status & 0xffff) >> 7) != 1)
1884 /* Bank 8 registers are the only ones that we know how to handle */
1889 /* Only handle if it is the right mc controller */
1890 if (cpu_data(mce->cpu).phys_proc_id != pvt->i7core_dev->socket)
1895 if ((pvt->mce_out + 1) % MCE_LOG_LEN == pvt->mce_in) {
1901 /* Copy memory error at the ringbuffer */
1902 memcpy(&pvt->mce_entry[pvt->mce_out], mce, sizeof(*mce));
1904 pvt->mce_out = (pvt->mce_out + 1) % MCE_LOG_LEN;
1906 /* Handle fatal errors immediately */
1907 if (mce->mcgstatus & 1)
1908 i7core_check_error(mci);
1910 /* Advise mcelog that the errors were handled */
1915 * set_sdram_scrub_rate This routine sets byte/sec bandwidth scrub rate
1916 * to hardware according to SCRUBINTERVAL formula
1917 * found in datasheet.
1919 static int set_sdram_scrub_rate(struct mem_ctl_info *mci, u32 new_bw)
1921 struct i7core_pvt *pvt = mci->pvt_info;
1922 struct pci_dev *pdev;
1923 const u32 cache_line_size = 64;
1924 const u32 freq_dclk = 800*1000000;
1928 /* Get data from the MC register, function 2 */
1929 pdev = pvt->pci_mcr[2];
1933 pci_read_config_dword(pdev, MC_SCRUB_CONTROL, &dw_scrub);
1936 /* Prepare to disable petrol scrub */
1937 dw_scrub &= ~STARTSCRUB;
1938 /* Stop the patrol scrub engine */
1939 write_and_test(pdev, MC_SCRUB_CONTROL, dw_scrub & ~0x00ffffff);
1941 /* Get current status of scrub rate and set bit to disable */
1942 pci_read_config_dword(pdev, MC_SSRCONTROL, &dw_ssr);
1943 dw_ssr &= ~SSR_MODE_MASK;
1944 dw_ssr |= SSR_MODE_DISABLE;
1947 * Translate the desired scrub rate to a register value and
1948 * program the cooresponding register value.
1950 dw_scrub = 0x00ffffff & (cache_line_size * freq_dclk / new_bw);
1952 /* Start the patrol scrub engine */
1953 pci_write_config_dword(pdev, MC_SCRUB_CONTROL,
1954 STARTSCRUB | dw_scrub);
1956 /* Get current status of scrub rate and set bit to enable */
1957 pci_read_config_dword(pdev, MC_SSRCONTROL, &dw_ssr);
1958 dw_ssr &= ~SSR_MODE_MASK;
1959 dw_ssr |= SSR_MODE_ENABLE;
1961 /* Disable or enable scrubbing */
1962 pci_write_config_dword(pdev, MC_SSRCONTROL, dw_ssr);
1968 * get_sdram_scrub_rate This routine convert current scrub rate value
1969 * into byte/sec bandwidth accourding to
1970 * SCRUBINTERVAL formula found in datasheet.
1972 static int get_sdram_scrub_rate(struct mem_ctl_info *mci)
1974 struct i7core_pvt *pvt = mci->pvt_info;
1975 struct pci_dev *pdev;
1976 const u32 cache_line_size = 64;
1977 const u32 freq_dclk = 800*1000000;
1980 /* Get data from the MC register, function 2 */
1981 pdev = pvt->pci_mcr[2];
1985 /* Get current scrub control data */
1986 pci_read_config_dword(pdev, MC_SCRUB_CONTROL, &scrubval);
1988 /* Mask highest 8-bits to 0 */
1989 scrubval &= 0x00ffffff;
1993 /* Calculate scrub rate value into byte/sec bandwidth */
1994 return 0xffffffff & (cache_line_size * freq_dclk / (u64) scrubval);
1997 static void enable_sdram_scrub_setting(struct mem_ctl_info *mci)
1999 struct i7core_pvt *pvt = mci->pvt_info;
2002 /* Unlock writes to pci registers */
2003 pci_read_config_dword(pvt->pci_noncore, MC_CFG_CONTROL, &pci_lock);
2005 pci_write_config_dword(pvt->pci_noncore, MC_CFG_CONTROL,
2006 pci_lock | MC_CFG_UNLOCK);
2008 mci->set_sdram_scrub_rate = set_sdram_scrub_rate;
2009 mci->get_sdram_scrub_rate = get_sdram_scrub_rate;
2012 static void disable_sdram_scrub_setting(struct mem_ctl_info *mci)
2014 struct i7core_pvt *pvt = mci->pvt_info;
2017 /* Lock writes to pci registers */
2018 pci_read_config_dword(pvt->pci_noncore, MC_CFG_CONTROL, &pci_lock);
2020 pci_write_config_dword(pvt->pci_noncore, MC_CFG_CONTROL,
2021 pci_lock | MC_CFG_LOCK);
2024 static void i7core_pci_ctl_create(struct i7core_pvt *pvt)
2026 pvt->i7core_pci = edac_pci_create_generic_ctl(
2027 &pvt->i7core_dev->pdev[0]->dev,
2029 if (unlikely(!pvt->i7core_pci))
2030 pr_warn("Unable to setup PCI error report via EDAC\n");
2033 static void i7core_pci_ctl_release(struct i7core_pvt *pvt)
2035 if (likely(pvt->i7core_pci))
2036 edac_pci_release_generic_ctl(pvt->i7core_pci);
2038 i7core_printk(KERN_ERR,
2039 "Couldn't find mem_ctl_info for socket %d\n",
2040 pvt->i7core_dev->socket);
2041 pvt->i7core_pci = NULL;
2044 static void i7core_unregister_mci(struct i7core_dev *i7core_dev)
2046 struct mem_ctl_info *mci = i7core_dev->mci;
2047 struct i7core_pvt *pvt;
2049 if (unlikely(!mci || !mci->pvt_info)) {
2050 debugf0("MC: " __FILE__ ": %s(): dev = %p\n",
2051 __func__, &i7core_dev->pdev[0]->dev);
2053 i7core_printk(KERN_ERR, "Couldn't find mci handler\n");
2057 pvt = mci->pvt_info;
2059 debugf0("MC: " __FILE__ ": %s(): mci = %p, dev = %p\n",
2060 __func__, mci, &i7core_dev->pdev[0]->dev);
2062 /* Disable scrubrate setting */
2063 disable_sdram_scrub_setting(mci);
2065 /* Disable MCE NMI handler */
2066 edac_mce_unregister(&pvt->edac_mce);
2068 /* Disable EDAC polling */
2069 i7core_pci_ctl_release(pvt);
2071 /* Remove MC sysfs nodes */
2072 edac_mc_del_mc(mci->dev);
2074 debugf1("%s: free mci struct\n", mci->ctl_name);
2075 kfree(mci->ctl_name);
2077 i7core_dev->mci = NULL;
2080 static int i7core_register_mci(struct i7core_dev *i7core_dev)
2082 struct mem_ctl_info *mci;
2083 struct i7core_pvt *pvt;
2084 int rc, channels, csrows;
2086 /* Check the number of active and not disabled channels */
2087 rc = i7core_get_active_channels(i7core_dev->socket, &channels, &csrows);
2088 if (unlikely(rc < 0))
2091 /* allocate a new MC control structure */
2092 mci = edac_mc_alloc(sizeof(*pvt), csrows, channels, i7core_dev->socket);
2096 debugf0("MC: " __FILE__ ": %s(): mci = %p, dev = %p\n",
2097 __func__, mci, &i7core_dev->pdev[0]->dev);
2099 pvt = mci->pvt_info;
2100 memset(pvt, 0, sizeof(*pvt));
2102 /* Associates i7core_dev and mci for future usage */
2103 pvt->i7core_dev = i7core_dev;
2104 i7core_dev->mci = mci;
2107 * FIXME: how to handle RDDR3 at MCI level? It is possible to have
2108 * Mixed RDDR3/UDDR3 with Nehalem, provided that they are on different
2111 mci->mtype_cap = MEM_FLAG_DDR3;
2112 mci->edac_ctl_cap = EDAC_FLAG_NONE;
2113 mci->edac_cap = EDAC_FLAG_NONE;
2114 mci->mod_name = "i7core_edac.c";
2115 mci->mod_ver = I7CORE_REVISION;
2116 mci->ctl_name = kasprintf(GFP_KERNEL, "i7 core #%d",
2117 i7core_dev->socket);
2118 mci->dev_name = pci_name(i7core_dev->pdev[0]);
2119 mci->ctl_page_to_phys = NULL;
2121 /* Store pci devices at mci for faster access */
2122 rc = mci_bind_devs(mci, i7core_dev);
2123 if (unlikely(rc < 0))
2126 if (pvt->is_registered)
2127 mci->mc_driver_sysfs_attributes = i7core_sysfs_rdimm_attrs;
2129 mci->mc_driver_sysfs_attributes = i7core_sysfs_udimm_attrs;
2131 /* Get dimm basic config */
2132 get_dimm_config(mci);
2133 /* record ptr to the generic device */
2134 mci->dev = &i7core_dev->pdev[0]->dev;
2135 /* Set the function pointer to an actual operation function */
2136 mci->edac_check = i7core_check_error;
2138 /* Enable scrubrate setting */
2139 enable_sdram_scrub_setting(mci);
2141 /* add this new MC control structure to EDAC's list of MCs */
2142 if (unlikely(edac_mc_add_mc(mci))) {
2143 debugf0("MC: " __FILE__
2144 ": %s(): failed edac_mc_add_mc()\n", __func__);
2145 /* FIXME: perhaps some code should go here that disables error
2146 * reporting if we just enabled it
2153 /* Default error mask is any memory */
2154 pvt->inject.channel = 0;
2155 pvt->inject.dimm = -1;
2156 pvt->inject.rank = -1;
2157 pvt->inject.bank = -1;
2158 pvt->inject.page = -1;
2159 pvt->inject.col = -1;
2161 /* allocating generic PCI control info */
2162 i7core_pci_ctl_create(pvt);
2164 /* Registers on edac_mce in order to receive memory errors */
2165 pvt->edac_mce.priv = mci;
2166 pvt->edac_mce.check_error = i7core_mce_check_error;
2167 rc = edac_mce_register(&pvt->edac_mce);
2168 if (unlikely(rc < 0)) {
2169 debugf0("MC: " __FILE__
2170 ": %s(): failed edac_mce_register()\n", __func__);
2177 i7core_pci_ctl_release(pvt);
2178 edac_mc_del_mc(mci->dev);
2180 kfree(mci->ctl_name);
2182 i7core_dev->mci = NULL;
2187 * i7core_probe Probe for ONE instance of device to see if it is
2190 * 0 for FOUND a device
2191 * < 0 for error code
2194 static int __devinit i7core_probe(struct pci_dev *pdev,
2195 const struct pci_device_id *id)
2198 struct i7core_dev *i7core_dev;
2200 /* get the pci devices we want to reserve for our use */
2201 mutex_lock(&i7core_edac_lock);
2204 * All memory controllers are allocated at the first pass.
2206 if (unlikely(probed >= 1)) {
2207 mutex_unlock(&i7core_edac_lock);
2212 rc = i7core_get_all_devices();
2213 if (unlikely(rc < 0))
2216 list_for_each_entry(i7core_dev, &i7core_edac_list, list) {
2217 rc = i7core_register_mci(i7core_dev);
2218 if (unlikely(rc < 0))
2222 i7core_printk(KERN_INFO, "Driver loaded.\n");
2224 mutex_unlock(&i7core_edac_lock);
2228 list_for_each_entry(i7core_dev, &i7core_edac_list, list)
2229 i7core_unregister_mci(i7core_dev);
2231 i7core_put_all_devices();
2233 mutex_unlock(&i7core_edac_lock);
2238 * i7core_remove destructor for one instance of device
2241 static void __devexit i7core_remove(struct pci_dev *pdev)
2243 struct i7core_dev *i7core_dev;
2245 debugf0(__FILE__ ": %s()\n", __func__);
2248 * we have a trouble here: pdev value for removal will be wrong, since
2249 * it will point to the X58 register used to detect that the machine
2250 * is a Nehalem or upper design. However, due to the way several PCI
2251 * devices are grouped together to provide MC functionality, we need
2252 * to use a different method for releasing the devices
2255 mutex_lock(&i7core_edac_lock);
2257 if (unlikely(!probed)) {
2258 mutex_unlock(&i7core_edac_lock);
2262 list_for_each_entry(i7core_dev, &i7core_edac_list, list)
2263 i7core_unregister_mci(i7core_dev);
2265 /* Release PCI resources */
2266 i7core_put_all_devices();
2270 mutex_unlock(&i7core_edac_lock);
2273 MODULE_DEVICE_TABLE(pci, i7core_pci_tbl);
2276 * i7core_driver pci_driver structure for this module
2279 static struct pci_driver i7core_driver = {
2280 .name = "i7core_edac",
2281 .probe = i7core_probe,
2282 .remove = __devexit_p(i7core_remove),
2283 .id_table = i7core_pci_tbl,
2287 * i7core_init Module entry function
2288 * Try to initialize this module for its devices
2290 static int __init i7core_init(void)
2294 debugf2("MC: " __FILE__ ": %s()\n", __func__);
2296 /* Ensure that the OPSTATE is set correctly for POLL or NMI */
2300 i7core_xeon_pci_fixup(pci_dev_table);
2302 pci_rc = pci_register_driver(&i7core_driver);
2307 i7core_printk(KERN_ERR, "Failed to register device with error %d.\n",
2314 * i7core_exit() Module exit function
2315 * Unregister the driver
2317 static void __exit i7core_exit(void)
2319 debugf2("MC: " __FILE__ ": %s()\n", __func__);
2320 pci_unregister_driver(&i7core_driver);
2323 module_init(i7core_init);
2324 module_exit(i7core_exit);
2326 MODULE_LICENSE("GPL");
2327 MODULE_AUTHOR("Mauro Carvalho Chehab <mchehab@redhat.com>");
2328 MODULE_AUTHOR("Red Hat Inc. (http://www.redhat.com)");
2329 MODULE_DESCRIPTION("MC Driver for Intel i7 Core memory controllers - "
2332 module_param(edac_op_state, int, 0444);
2333 MODULE_PARM_DESC(edac_op_state, "EDAC Error Reporting state: 0=Poll,1=NMI");