]> git.karo-electronics.de Git - karo-tx-linux.git/blobdiff - drivers/edac/i7core_edac.c
x86: Add x86_init platform override to fix up NUMA core numbering
[karo-tx-linux.git] / drivers / edac / i7core_edac.c
index 6ae7795dea86d381cad9c0a375672656b79ac0ff..70ad8923f1d75d79a26c479e17e725499091a340 100644 (file)
 #include <linux/pci_ids.h>
 #include <linux/slab.h>
 #include <linux/delay.h>
+#include <linux/dmi.h>
 #include <linux/edac.h>
 #include <linux/mmzone.h>
 #include <linux/smp.h>
 #include <asm/mce.h>
 #include <asm/processor.h>
+#include <asm/div64.h>
 
 #include "edac_core.h"
 
@@ -107,6 +109,7 @@ MODULE_PARM_DESC(use_pci_fixup, "Enable PCI fixup to seek for hidden devices");
 
 #define MC_SCRUB_CONTROL       0x4c
   #define STARTSCRUB           (1 << 24)
+  #define SCRUBINTERVAL_MASK    0xffffff
 
 #define MC_COR_ECC_CNT_0       0x80
 #define MC_COR_ECC_CNT_1       0x84
@@ -275,6 +278,9 @@ struct i7core_pvt {
        /* Count indicator to show errors not got */
        unsigned                mce_overrun;
 
+       /* DCLK Frequency used for computing scrub rate */
+       int                     dclk_freq;
+
        /* Struct to control EDAC polling */
        struct edac_pci_ctl_info *i7core_pci;
 };
@@ -740,6 +746,10 @@ static int get_dimm_config(const struct mem_ctl_info *mci)
 
                        csr->edac_mode = mode;
                        csr->mtype = mtype;
+                       snprintf(csr->channels[0].label,
+                                       sizeof(csr->channels[0].label),
+                                       "CPU#%uChannel#%u_DIMM#%u",
+                                       pvt->i7core_dev->socket, i, j);
 
                        csrow++;
                }
@@ -1952,6 +1962,112 @@ static struct notifier_block i7_mce_dec = {
        .notifier_call  = i7core_mce_check_error,
 };
 
+struct memdev_dmi_entry {
+       u8 type;
+       u8 length;
+       u16 handle;
+       u16 phys_mem_array_handle;
+       u16 mem_err_info_handle;
+       u16 total_width;
+       u16 data_width;
+       u16 size;
+       u8 form;
+       u8 device_set;
+       u8 device_locator;
+       u8 bank_locator;
+       u8 memory_type;
+       u16 type_detail;
+       u16 speed;
+       u8 manufacturer;
+       u8 serial_number;
+       u8 asset_tag;
+       u8 part_number;
+       u8 attributes;
+       u32 extended_size;
+       u16 conf_mem_clk_speed;
+} __attribute__((__packed__));
+
+
+/*
+ * Decode the DRAM Clock Frequency, be paranoid, make sure that all
+ * memory devices show the same speed, and if they don't then consider
+ * all speeds to be invalid.
+ */
+static void decode_dclk(const struct dmi_header *dh, void *_dclk_freq)
+{
+       int *dclk_freq = _dclk_freq;
+       u16 dmi_mem_clk_speed;
+
+       if (*dclk_freq == -1)
+               return;
+
+       if (dh->type == DMI_ENTRY_MEM_DEVICE) {
+               struct memdev_dmi_entry *memdev_dmi_entry =
+                       (struct memdev_dmi_entry *)dh;
+               unsigned long conf_mem_clk_speed_offset =
+                       (unsigned long)&memdev_dmi_entry->conf_mem_clk_speed -
+                       (unsigned long)&memdev_dmi_entry->type;
+               unsigned long speed_offset =
+                       (unsigned long)&memdev_dmi_entry->speed -
+                       (unsigned long)&memdev_dmi_entry->type;
+
+               /* Check that a DIMM is present */
+               if (memdev_dmi_entry->size == 0)
+                       return;
+
+               /*
+                * Pick the configured speed if it's available, otherwise
+                * pick the DIMM speed, or we don't have a speed.
+                */
+               if (memdev_dmi_entry->length > conf_mem_clk_speed_offset) {
+                       dmi_mem_clk_speed =
+                               memdev_dmi_entry->conf_mem_clk_speed;
+               } else if (memdev_dmi_entry->length > speed_offset) {
+                       dmi_mem_clk_speed = memdev_dmi_entry->speed;
+               } else {
+                       *dclk_freq = -1;
+                       return;
+               }
+
+               if (*dclk_freq == 0) {
+                       /* First pass, speed was 0 */
+                       if (dmi_mem_clk_speed > 0) {
+                               /* Set speed if a valid speed is read */
+                               *dclk_freq = dmi_mem_clk_speed;
+                       } else {
+                               /* Otherwise we don't have a valid speed */
+                               *dclk_freq = -1;
+                       }
+               } else if (*dclk_freq > 0 &&
+                          *dclk_freq != dmi_mem_clk_speed) {
+                       /*
+                        * If we have a speed, check that all DIMMS are the same
+                        * speed, otherwise set the speed as invalid.
+                        */
+                       *dclk_freq = -1;
+               }
+       }
+}
+
+/*
+ * The default DCLK frequency is used as a fallback if we
+ * fail to find anything reliable in the DMI. The value
+ * is taken straight from the datasheet.
+ */
+#define DEFAULT_DCLK_FREQ 800
+
+static int get_dclk_freq(void)
+{
+       int dclk_freq = 0;
+
+       dmi_walk(decode_dclk, (void *)&dclk_freq);
+
+       if (dclk_freq < 1)
+               return DEFAULT_DCLK_FREQ;
+
+       return dclk_freq;
+}
+
 /*
  * set_sdram_scrub_rate                This routine sets byte/sec bandwidth scrub rate
  *                             to hardware according to SCRUBINTERVAL formula
@@ -1961,8 +2077,6 @@ static int set_sdram_scrub_rate(struct mem_ctl_info *mci, u32 new_bw)
 {
        struct i7core_pvt *pvt = mci->pvt_info;
        struct pci_dev *pdev;
-       const u32 cache_line_size = 64;
-       const u32 freq_dclk = 800*1000000;
        u32 dw_scrub;
        u32 dw_ssr;
 
@@ -1977,18 +2091,29 @@ static int set_sdram_scrub_rate(struct mem_ctl_info *mci, u32 new_bw)
                /* Prepare to disable petrol scrub */
                dw_scrub &= ~STARTSCRUB;
                /* Stop the patrol scrub engine */
-               write_and_test(pdev, MC_SCRUB_CONTROL, dw_scrub & ~0x00ffffff);
+               write_and_test(pdev, MC_SCRUB_CONTROL,
+                              dw_scrub & ~SCRUBINTERVAL_MASK);
 
                /* Get current status of scrub rate and set bit to disable */
                pci_read_config_dword(pdev, MC_SSRCONTROL, &dw_ssr);
                dw_ssr &= ~SSR_MODE_MASK;
                dw_ssr |= SSR_MODE_DISABLE;
        } else {
+               const int cache_line_size = 64;
+               const u32 freq_dclk_mhz = pvt->dclk_freq;
+               unsigned long long scrub_interval;
                /*
                 * Translate the desired scrub rate to a register value and
-                * program the cooresponding register value.
+                * program the corresponding register value.
                 */
-               dw_scrub = 0x00ffffff & (cache_line_size * freq_dclk / new_bw);
+               scrub_interval = (unsigned long long)freq_dclk_mhz *
+                       cache_line_size * 1000000;
+               do_div(scrub_interval, new_bw);
+
+               if (!scrub_interval || scrub_interval > SCRUBINTERVAL_MASK)
+                       return -EINVAL;
+
+               dw_scrub = SCRUBINTERVAL_MASK & scrub_interval;
 
                /* Start the patrol scrub engine */
                pci_write_config_dword(pdev, MC_SCRUB_CONTROL,
@@ -2015,7 +2140,8 @@ static int get_sdram_scrub_rate(struct mem_ctl_info *mci)
        struct i7core_pvt *pvt = mci->pvt_info;
        struct pci_dev *pdev;
        const u32 cache_line_size = 64;
-       const u32 freq_dclk = 800*1000000;
+       const u32 freq_dclk_mhz = pvt->dclk_freq;
+       unsigned long long scrub_rate;
        u32 scrubval;
 
        /* Get data from the MC register, function 2 */
@@ -2027,12 +2153,15 @@ static int get_sdram_scrub_rate(struct mem_ctl_info *mci)
        pci_read_config_dword(pdev, MC_SCRUB_CONTROL, &scrubval);
 
        /* Mask highest 8-bits to 0 */
-       scrubval &=  0x00ffffff;
+       scrubval &=  SCRUBINTERVAL_MASK;
        if (!scrubval)
                return 0;
 
        /* Calculate scrub rate value into byte/sec bandwidth */
-       return 0xffffffff & (cache_line_size * freq_dclk / (u64) scrubval);
+       scrub_rate =  (unsigned long long)freq_dclk_mhz *
+               1000000 * cache_line_size;
+       do_div(scrub_rate, scrubval);
+       return (int)scrub_rate;
 }
 
 static void enable_sdram_scrub_setting(struct mem_ctl_info *mci)
@@ -2204,6 +2333,9 @@ static int i7core_register_mci(struct i7core_dev *i7core_dev)
        /* allocating generic PCI control info */
        i7core_pci_ctl_create(pvt);
 
+       /* DCLK for scrub rate setting */
+       pvt->dclk_freq = get_dclk_freq();
+
        atomic_notifier_chain_register(&x86_mce_decoder_chain, &i7_mce_dec);
 
        return 0;