]> git.karo-electronics.de Git - mv-sheeva.git/blobdiff - arch/x86/oprofile/op_model_amd.c
Merge branch 'for-upstream/platform-x86_tpacpi' of git://repo.or.cz/linux-2.6/linux...
[mv-sheeva.git] / arch / x86 / oprofile / op_model_amd.c
index 97c84ebe3f244c43cfaea5f9b12863463cb72f89..6a58256dce9f79560a90adf380de6d87344b3490 100644 (file)
@@ -52,7 +52,7 @@ static unsigned long reset_value[NUM_VIRT_COUNTERS];
 #define IBS_FETCH_ENABLE               (1ULL<<48)
 #define IBS_FETCH_CNT_MASK             0xFFFF0000ULL
 
-/*IbsOpCtl bits */
+/* IbsOpCtl bits */
 #define IBS_OP_CNT_CTL                 (1ULL<<19)
 #define IBS_OP_VAL                     (1ULL<<18)
 #define IBS_OP_ENABLE                  (1ULL<<17)
@@ -72,6 +72,7 @@ struct op_ibs_config {
 };
 
 static struct op_ibs_config ibs_config;
+static u64 ibs_op_ctl;
 
 /*
  * IBS cpuid feature detection
@@ -84,8 +85,16 @@ static struct op_ibs_config ibs_config;
  * bit 0 is used to indicate the existence of IBS.
  */
 #define IBS_CAPS_AVAIL                 (1LL<<0)
+#define IBS_CAPS_RDWROPCNT             (1LL<<3)
 #define IBS_CAPS_OPCNT                 (1LL<<4)
 
+/*
+ * IBS randomization macros
+ */
+#define IBS_RANDOM_BITS                        12
+#define IBS_RANDOM_MASK                        ((1ULL << IBS_RANDOM_BITS) - 1)
+#define IBS_RANDOM_MAXCNT_OFFSET       (1ULL << (IBS_RANDOM_BITS - 5))
+
 static u32 get_ibs_caps(void)
 {
        u32 ibs_caps;
@@ -109,19 +118,6 @@ static u32 get_ibs_caps(void)
 
 #ifdef CONFIG_OPROFILE_EVENT_MULTIPLEX
 
-static void op_mux_fill_in_addresses(struct op_msrs * const msrs)
-{
-       int i;
-
-       for (i = 0; i < NUM_VIRT_COUNTERS; i++) {
-               int hw_counter = op_x86_virt_to_phys(i);
-               if (reserve_perfctr_nmi(MSR_K7_PERFCTR0 + i))
-                       msrs->multiplex[i].addr = MSR_K7_PERFCTR0 + hw_counter;
-               else
-                       msrs->multiplex[i].addr = 0;
-       }
-}
-
 static void op_mux_switch_ctrl(struct op_x86_model_spec const *model,
                               struct op_msrs const * const msrs)
 {
@@ -131,7 +127,7 @@ static void op_mux_switch_ctrl(struct op_x86_model_spec const *model,
        /* enable active counters */
        for (i = 0; i < NUM_COUNTERS; ++i) {
                int virt = op_x86_phys_to_virt(i);
-               if (!counter_config[virt].enabled)
+               if (!reset_value[virt])
                        continue;
                rdmsrl(msrs->controls[i].addr, val);
                val &= model->reserved;
@@ -140,10 +136,6 @@ static void op_mux_switch_ctrl(struct op_x86_model_spec const *model,
        }
 }
 
-#else
-
-static inline void op_mux_fill_in_addresses(struct op_msrs * const msrs) { }
-
 #endif
 
 /* functions for op_amd_spec */
@@ -155,18 +147,12 @@ static void op_amd_fill_in_addresses(struct op_msrs * const msrs)
        for (i = 0; i < NUM_COUNTERS; i++) {
                if (reserve_perfctr_nmi(MSR_K7_PERFCTR0 + i))
                        msrs->counters[i].addr = MSR_K7_PERFCTR0 + i;
-               else
-                       msrs->counters[i].addr = 0;
        }
 
        for (i = 0; i < NUM_CONTROLS; i++) {
                if (reserve_evntsel_nmi(MSR_K7_EVNTSEL0 + i))
                        msrs->controls[i].addr = MSR_K7_EVNTSEL0 + i;
-               else
-                       msrs->controls[i].addr = 0;
        }
-
-       op_mux_fill_in_addresses(msrs);
 }
 
 static void op_amd_setup_ctrs(struct op_x86_model_spec const *model,
@@ -177,7 +163,8 @@ static void op_amd_setup_ctrs(struct op_x86_model_spec const *model,
 
        /* setup reset_value */
        for (i = 0; i < NUM_VIRT_COUNTERS; ++i) {
-               if (counter_config[i].enabled)
+               if (counter_config[i].enabled
+                   && msrs->counters[op_x86_virt_to_phys(i)].addr)
                        reset_value[i] = counter_config[i].count;
                else
                        reset_value[i] = 0;
@@ -185,9 +172,18 @@ static void op_amd_setup_ctrs(struct op_x86_model_spec const *model,
 
        /* clear all counters */
        for (i = 0; i < NUM_CONTROLS; ++i) {
-               if (unlikely(!msrs->controls[i].addr))
+               if (unlikely(!msrs->controls[i].addr)) {
+                       if (counter_config[i].enabled && !smp_processor_id())
+                               /*
+                                * counter is reserved, this is on all
+                                * cpus, so report only for cpu #0
+                                */
+                               op_x86_warn_reserved(i);
                        continue;
+               }
                rdmsrl(msrs->controls[i].addr, val);
+               if (val & ARCH_PERFMON_EVENTSEL0_ENABLE)
+                       op_x86_warn_in_use(i);
                val &= model->reserved;
                wrmsrl(msrs->controls[i].addr, val);
        }
@@ -202,9 +198,7 @@ static void op_amd_setup_ctrs(struct op_x86_model_spec const *model,
        /* enable active counters */
        for (i = 0; i < NUM_COUNTERS; ++i) {
                int virt = op_x86_phys_to_virt(i);
-               if (!counter_config[virt].enabled)
-                       continue;
-               if (!msrs->counters[i].addr)
+               if (!reset_value[virt])
                        continue;
 
                /* setup counter registers */
@@ -241,6 +235,38 @@ static unsigned int lfsr_random(void)
        return lfsr_value;
 }
 
+/*
+ * IBS software randomization
+ *
+ * The IBS periodic op counter is randomized in software. The lower 12
+ * bits of the 20 bit counter are randomized. IbsOpCurCnt is
+ * initialized with a 12 bit random value.
+ */
+static inline u64 op_amd_randomize_ibs_op(u64 val)
+{
+       unsigned int random = lfsr_random();
+
+       if (!(ibs_caps & IBS_CAPS_RDWROPCNT))
+               /*
+                * Work around if the hw can not write to IbsOpCurCnt
+                *
+                * Randomize the lower 8 bits of the 16 bit
+                * IbsOpMaxCnt [15:0] value in the range of -128 to
+                * +127 by adding/subtracting an offset to the
+                * maximum count (IbsOpMaxCnt).
+                *
+                * To avoid over or underflows and protect upper bits
+                * starting at bit 16, the initial value for
+                * IbsOpMaxCnt must fit in the range from 0x0081 to
+                * 0xff80.
+                */
+               val += (s8)(random >> 4);
+       else
+               val |= (u64)(random & IBS_RANDOM_MASK) << 32;
+
+       return val;
+}
+
 static inline void
 op_amd_handle_ibs(struct pt_regs * const regs,
                  struct op_msrs const * const msrs)
@@ -290,8 +316,7 @@ op_amd_handle_ibs(struct pt_regs * const regs,
                        oprofile_write_commit(&entry);
 
                        /* reenable the IRQ */
-                       ctl &= ~IBS_OP_VAL & 0xFFFFFFFF;
-                       ctl |= IBS_OP_ENABLE;
+                       ctl = op_amd_randomize_ibs_op(ibs_op_ctl);
                        wrmsrl(MSR_AMD64_IBSOPCTL, ctl);
                }
        }
@@ -312,10 +337,27 @@ static inline void op_amd_start_ibs(void)
        }
 
        if (ibs_config.op_enabled) {
-               val = (ibs_config.max_cnt_op >> 4) & 0xFFFF;
+               ibs_op_ctl = ibs_config.max_cnt_op >> 4;
+               if (!(ibs_caps & IBS_CAPS_RDWROPCNT)) {
+                       /*
+                        * IbsOpCurCnt not supported.  See
+                        * op_amd_randomize_ibs_op() for details.
+                        */
+                       ibs_op_ctl = clamp(ibs_op_ctl, 0x0081ULL, 0xFF80ULL);
+               } else {
+                       /*
+                        * The start value is randomized with a
+                        * positive offset, we need to compensate it
+                        * with the half of the randomized range. Also
+                        * avoid underflows.
+                        */
+                       ibs_op_ctl = min(ibs_op_ctl + IBS_RANDOM_MAXCNT_OFFSET,
+                                        0xFFFFULL);
+               }
                if (ibs_caps & IBS_CAPS_OPCNT && ibs_config.dispatched_ops)
-                       val |= IBS_OP_CNT_CTL;
-               val |= IBS_OP_ENABLE;
+                       ibs_op_ctl |= IBS_OP_CNT_CTL;
+               ibs_op_ctl |= IBS_OP_ENABLE;
+               val = op_amd_randomize_ibs_op(ibs_op_ctl);
                wrmsrl(MSR_AMD64_IBSOPCTL, val);
        }
 }