]> git.karo-electronics.de Git - karo-tx-linux.git/commitdiff
Merge branch 'perf-urgent-for-linus' of git://git.kernel.org/pub/scm/linux/kernel...
authorLinus Torvalds <torvalds@linux-foundation.org>
Sun, 5 May 2013 18:37:16 +0000 (11:37 -0700)
committerLinus Torvalds <torvalds@linux-foundation.org>
Sun, 5 May 2013 18:37:16 +0000 (11:37 -0700)
Pull perf fixes from Ingo Molnar:
 "Misc fixes plus a small hw-enablement patch for Intel IB model 58
  uncore events"

* 'perf-urgent-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip:
  perf/x86/intel/lbr: Demand proper privileges for PERF_SAMPLE_BRANCH_KERNEL
  perf/x86/intel/lbr: Fix LBR filter
  perf/x86: Blacklist all MEM_*_RETIRED events for Ivy Bridge
  perf: Fix vmalloc ring buffer pages handling
  perf/x86/intel: Fix unintended variable name reuse
  perf/x86/intel: Add support for IvyBridge model 58 Uncore
  perf/x86/intel: Fix typo in perf_event_intel_uncore.c
  x86: Eliminate irq_mis_count counted in arch_irq_stat

1  2 
arch/x86/kernel/cpu/perf_event_intel.c
arch/x86/kernel/cpu/perf_event_intel_uncore.c

index ffd6050a1de44dd69ec419e01c3dc8d15363dfcd,4a0a462d5e991ef8be9f119a80613aafddef698d..f60d41ff9a97fba11bf28808a65ed775e1605666
@@@ -81,7 -81,6 +81,7 @@@ static struct event_constraint intel_ne
  static struct extra_reg intel_nehalem_extra_regs[] __read_mostly =
  {
        INTEL_EVENT_EXTRA_REG(0xb7, MSR_OFFCORE_RSP_0, 0xffff, RSP_0),
 +      INTEL_UEVENT_PEBS_LDLAT_EXTRA_REG(0x100b),
        EVENT_EXTRA_END
  };
  
@@@ -109,8 -108,6 +109,8 @@@ static struct event_constraint intel_sn
        INTEL_EVENT_CONSTRAINT(0x48, 0x4), /* L1D_PEND_MISS.PENDING */
        INTEL_UEVENT_CONSTRAINT(0x01c0, 0x2), /* INST_RETIRED.PREC_DIST */
        INTEL_EVENT_CONSTRAINT(0xcd, 0x8), /* MEM_TRANS_RETIRED.LOAD_LATENCY */
 +      INTEL_UEVENT_CONSTRAINT(0x04a3, 0xf), /* CYCLE_ACTIVITY.CYCLES_NO_DISPATCH */
 +      INTEL_UEVENT_CONSTRAINT(0x02a3, 0x4), /* CYCLE_ACTIVITY.CYCLES_L1D_PENDING */
        EVENT_CONSTRAINT_END
  };
  
@@@ -128,10 -125,15 +128,15 @@@ static struct event_constraint intel_iv
        INTEL_UEVENT_CONSTRAINT(0x08a3, 0x4), /* CYCLE_ACTIVITY.CYCLES_L1D_PENDING */
        INTEL_UEVENT_CONSTRAINT(0x0ca3, 0x4), /* CYCLE_ACTIVITY.STALLS_L1D_PENDING */
        INTEL_UEVENT_CONSTRAINT(0x01c0, 0x2), /* INST_RETIRED.PREC_DIST */
-       INTEL_EVENT_CONSTRAINT(0xd0, 0xf), /* MEM_UOPS_RETIRED.* */
-       INTEL_EVENT_CONSTRAINT(0xd1, 0xf), /* MEM_LOAD_UOPS_RETIRED.* */
-       INTEL_EVENT_CONSTRAINT(0xd2, 0xf), /* MEM_LOAD_UOPS_LLC_HIT_RETIRED.* */
-       INTEL_EVENT_CONSTRAINT(0xd3, 0xf), /*  MEM_LOAD_UOPS_LLC_MISS_RETIRED.* */
+       /*
+        * Errata BV98 -- MEM_*_RETIRED events can leak between counters of SMT
+        * siblings; disable these events because they can corrupt unrelated
+        * counters.
+        */
+       INTEL_EVENT_CONSTRAINT(0xd0, 0x0), /* MEM_UOPS_RETIRED.* */
+       INTEL_EVENT_CONSTRAINT(0xd1, 0x0), /* MEM_LOAD_UOPS_RETIRED.* */
+       INTEL_EVENT_CONSTRAINT(0xd2, 0x0), /* MEM_LOAD_UOPS_LLC_HIT_RETIRED.* */
+       INTEL_EVENT_CONSTRAINT(0xd3, 0x0), /* MEM_LOAD_UOPS_LLC_MISS_RETIRED.* */
        EVENT_CONSTRAINT_END
  };
  
@@@ -139,7 -141,6 +144,7 @@@ static struct extra_reg intel_westmere_
  {
        INTEL_EVENT_EXTRA_REG(0xb7, MSR_OFFCORE_RSP_0, 0xffff, RSP_0),
        INTEL_EVENT_EXTRA_REG(0xbb, MSR_OFFCORE_RSP_1, 0xffff, RSP_1),
 +      INTEL_UEVENT_PEBS_LDLAT_EXTRA_REG(0x100b),
        EVENT_EXTRA_END
  };
  
@@@ -159,8 -160,6 +164,8 @@@ static struct event_constraint intel_ge
  static struct extra_reg intel_snb_extra_regs[] __read_mostly = {
        INTEL_EVENT_EXTRA_REG(0xb7, MSR_OFFCORE_RSP_0, 0x3f807f8fffull, RSP_0),
        INTEL_EVENT_EXTRA_REG(0xbb, MSR_OFFCORE_RSP_1, 0x3f807f8fffull, RSP_1),
 +      INTEL_UEVENT_PEBS_LDLAT_EXTRA_REG(0x01cd),
 +      INTEL_UEVENT_PEBS_LDLAT_EXTRA_REG(0x01cd),
        EVENT_EXTRA_END
  };
  
@@@ -170,21 -169,6 +175,21 @@@ static struct extra_reg intel_snbep_ext
        EVENT_EXTRA_END
  };
  
 +EVENT_ATTR_STR(mem-loads, mem_ld_nhm, "event=0x0b,umask=0x10,ldlat=3");
 +EVENT_ATTR_STR(mem-loads, mem_ld_snb, "event=0xcd,umask=0x1,ldlat=3");
 +EVENT_ATTR_STR(mem-stores, mem_st_snb, "event=0xcd,umask=0x2");
 +
 +struct attribute *nhm_events_attrs[] = {
 +      EVENT_PTR(mem_ld_nhm),
 +      NULL,
 +};
 +
 +struct attribute *snb_events_attrs[] = {
 +      EVENT_PTR(mem_ld_snb),
 +      EVENT_PTR(mem_st_snb),
 +      NULL,
 +};
 +
  static u64 intel_pmu_event_map(int hw_event)
  {
        return intel_perfmon_event_map[hw_event];
@@@ -1419,11 -1403,8 +1424,11 @@@ x86_get_event_constraints(struct cpu_hw
  
        if (x86_pmu.event_constraints) {
                for_each_event_constraint(c, x86_pmu.event_constraints) {
 -                      if ((event->hw.config & c->cmask) == c->code)
 +                      if ((event->hw.config & c->cmask) == c->code) {
 +                              /* hw.flags zeroed at initialization */
 +                              event->hw.flags |= c->flags;
                                return c;
 +                      }
                }
        }
  
@@@ -1468,7 -1449,6 +1473,7 @@@ intel_put_shared_regs_event_constraints
  static void intel_put_event_constraints(struct cpu_hw_events *cpuc,
                                        struct perf_event *event)
  {
 +      event->hw.flags = 0;
        intel_put_shared_regs_event_constraints(cpuc, event);
  }
  
@@@ -1792,8 -1772,6 +1797,8 @@@ static void intel_pmu_flush_branch_stac
  
  PMU_FORMAT_ATTR(offcore_rsp, "config1:0-63");
  
 +PMU_FORMAT_ATTR(ldlat, "config1:0-15");
 +
  static struct attribute *intel_arch3_formats_attr[] = {
        &format_attr_event.attr,
        &format_attr_umask.attr,
        &format_attr_cmask.attr,
  
        &format_attr_offcore_rsp.attr, /* XXX do NHM/WSM + SNB breakout */
 +      &format_attr_ldlat.attr, /* PEBS load latency */
        NULL,
  };
  
@@@ -2065,8 -2042,6 +2070,8 @@@ __init int intel_pmu_init(void
                x86_pmu.enable_all = intel_pmu_nhm_enable_all;
                x86_pmu.extra_regs = intel_nehalem_extra_regs;
  
 +              x86_pmu.cpu_events = nhm_events_attrs;
 +
                /* UOPS_ISSUED.STALLED_CYCLES */
                intel_perfmon_event_map[PERF_COUNT_HW_STALLED_CYCLES_FRONTEND] =
                        X86_CONFIG(.event=0x0e, .umask=0x01, .inv=1, .cmask=1);
                x86_pmu.extra_regs = intel_westmere_extra_regs;
                x86_pmu.er_flags |= ERF_HAS_RSP_1;
  
 +              x86_pmu.cpu_events = nhm_events_attrs;
 +
                /* UOPS_ISSUED.STALLED_CYCLES */
                intel_perfmon_event_map[PERF_COUNT_HW_STALLED_CYCLES_FRONTEND] =
                        X86_CONFIG(.event=0x0e, .umask=0x01, .inv=1, .cmask=1);
                x86_pmu.er_flags |= ERF_HAS_RSP_1;
                x86_pmu.er_flags |= ERF_NO_HT_SHARING;
  
 +              x86_pmu.cpu_events = snb_events_attrs;
 +
                /* UOPS_ISSUED.ANY,c=1,i=1 to count stall cycles */
                intel_perfmon_event_map[PERF_COUNT_HW_STALLED_CYCLES_FRONTEND] =
                        X86_CONFIG(.event=0x0e, .umask=0x01, .inv=1, .cmask=1);
                x86_pmu.er_flags |= ERF_HAS_RSP_1;
                x86_pmu.er_flags |= ERF_NO_HT_SHARING;
  
 +              x86_pmu.cpu_events = snb_events_attrs;
 +
                /* UOPS_ISSUED.ANY,c=1,i=1 to count stall cycles */
                intel_perfmon_event_map[PERF_COUNT_HW_STALLED_CYCLES_FRONTEND] =
                        X86_CONFIG(.event=0x0e, .umask=0x01, .inv=1, .cmask=1);
index d0f9e5aa2151ba320986546fe024c2995f9d0b24,45f6d1336d3fa6d03e4e8a5aec9f572f8a0227bb..52441a2af5380d1d44bbee24001b81ea63d53362
@@@ -17,9 -17,6 +17,9 @@@ static struct event_constraint constrai
  static struct event_constraint constraint_empty =
        EVENT_CONSTRAINT(0, 0, 0);
  
 +#define __BITS_VALUE(x, i, n)  ((typeof(x))(((x) >> ((i) * (n))) & \
 +                              ((1ULL << (n)) - 1)))
 +
  DEFINE_UNCORE_FORMAT_ATTR(event, event, "config:0-7");
  DEFINE_UNCORE_FORMAT_ATTR(event_ext, event, "config:0-7,21");
  DEFINE_UNCORE_FORMAT_ATTR(umask, umask, "config:8-15");
@@@ -34,13 -31,9 +34,13 @@@ DEFINE_UNCORE_FORMAT_ATTR(occ_sel, occ_
  DEFINE_UNCORE_FORMAT_ATTR(occ_invert, occ_invert, "config:30");
  DEFINE_UNCORE_FORMAT_ATTR(occ_edge, occ_edge, "config:14-51");
  DEFINE_UNCORE_FORMAT_ATTR(filter_tid, filter_tid, "config1:0-4");
 +DEFINE_UNCORE_FORMAT_ATTR(filter_link, filter_link, "config1:5-8");
  DEFINE_UNCORE_FORMAT_ATTR(filter_nid, filter_nid, "config1:10-17");
 +DEFINE_UNCORE_FORMAT_ATTR(filter_nid2, filter_nid, "config1:32-47");
  DEFINE_UNCORE_FORMAT_ATTR(filter_state, filter_state, "config1:18-22");
 +DEFINE_UNCORE_FORMAT_ATTR(filter_state2, filter_state, "config1:17-22");
  DEFINE_UNCORE_FORMAT_ATTR(filter_opc, filter_opc, "config1:23-31");
 +DEFINE_UNCORE_FORMAT_ATTR(filter_opc2, filter_opc, "config1:52-60");
  DEFINE_UNCORE_FORMAT_ATTR(filter_band0, filter_band0, "config1:0-7");
  DEFINE_UNCORE_FORMAT_ATTR(filter_band1, filter_band1, "config1:8-15");
  DEFINE_UNCORE_FORMAT_ATTR(filter_band2, filter_band2, "config1:16-23");
@@@ -117,21 -110,6 +117,21 @@@ static void uncore_put_constraint(struc
        reg1->alloc = 0;
  }
  
 +static u64 uncore_shared_reg_config(struct intel_uncore_box *box, int idx)
 +{
 +      struct intel_uncore_extra_reg *er;
 +      unsigned long flags;
 +      u64 config;
 +
 +      er = &box->shared_regs[idx];
 +
 +      raw_spin_lock_irqsave(&er->lock, flags);
 +      config = er->config;
 +      raw_spin_unlock_irqrestore(&er->lock, flags);
 +
 +      return config;
 +}
 +
  /* Sandy Bridge-EP uncore support */
  static struct intel_uncore_type snbep_uncore_cbox;
  static struct intel_uncore_type snbep_uncore_pcu;
@@@ -227,7 -205,7 +227,7 @@@ static void snbep_uncore_msr_enable_eve
        struct hw_perf_event_extra *reg1 = &hwc->extra_reg;
  
        if (reg1->idx != EXTRA_REG_NONE)
 -              wrmsrl(reg1->reg, reg1->config);
 +              wrmsrl(reg1->reg, uncore_shared_reg_config(box, 0));
  
        wrmsrl(hwc->config_base, hwc->config | SNBEP_PMON_CTL_EN);
  }
@@@ -248,6 -226,29 +248,6 @@@ static void snbep_uncore_msr_init_box(s
                wrmsrl(msr, SNBEP_PMON_BOX_CTL_INT);
  }
  
 -static int snbep_uncore_hw_config(struct intel_uncore_box *box, struct perf_event *event)
 -{
 -      struct hw_perf_event *hwc = &event->hw;
 -      struct hw_perf_event_extra *reg1 = &hwc->extra_reg;
 -
 -      if (box->pmu->type == &snbep_uncore_cbox) {
 -              reg1->reg = SNBEP_C0_MSR_PMON_BOX_FILTER +
 -                      SNBEP_CBO_MSR_OFFSET * box->pmu->pmu_idx;
 -              reg1->config = event->attr.config1 &
 -                      SNBEP_CB0_MSR_PMON_BOX_FILTER_MASK;
 -      } else {
 -              if (box->pmu->type == &snbep_uncore_pcu) {
 -                      reg1->reg = SNBEP_PCU_MSR_PMON_BOX_FILTER;
 -                      reg1->config = event->attr.config1 & SNBEP_PCU_MSR_PMON_BOX_FILTER_MASK;
 -              } else {
 -                      return 0;
 -              }
 -      }
 -      reg1->idx = 0;
 -
 -      return 0;
 -}
 -
  static struct attribute *snbep_uncore_formats_attr[] = {
        &format_attr_event.attr,
        &format_attr_umask.attr,
@@@ -344,16 -345,16 +344,16 @@@ static struct attribute_group snbep_unc
        .attrs = snbep_uncore_qpi_formats_attr,
  };
  
 +#define SNBEP_UNCORE_MSR_OPS_COMMON_INIT()                    \
 +      .init_box       = snbep_uncore_msr_init_box,            \
 +      .disable_box    = snbep_uncore_msr_disable_box,         \
 +      .enable_box     = snbep_uncore_msr_enable_box,          \
 +      .disable_event  = snbep_uncore_msr_disable_event,       \
 +      .enable_event   = snbep_uncore_msr_enable_event,        \
 +      .read_counter   = uncore_msr_read_counter
 +
  static struct intel_uncore_ops snbep_uncore_msr_ops = {
 -      .init_box       = snbep_uncore_msr_init_box,
 -      .disable_box    = snbep_uncore_msr_disable_box,
 -      .enable_box     = snbep_uncore_msr_enable_box,
 -      .disable_event  = snbep_uncore_msr_disable_event,
 -      .enable_event   = snbep_uncore_msr_enable_event,
 -      .read_counter   = uncore_msr_read_counter,
 -      .get_constraint = uncore_get_constraint,
 -      .put_constraint = uncore_put_constraint,
 -      .hw_config      = snbep_uncore_hw_config,
 +      SNBEP_UNCORE_MSR_OPS_COMMON_INIT(),
  };
  
  static struct intel_uncore_ops snbep_uncore_pci_ops = {
@@@ -371,7 -372,6 +371,7 @@@ static struct event_constraint snbep_un
        UNCORE_EVENT_CONSTRAINT(0x04, 0x3),
        UNCORE_EVENT_CONSTRAINT(0x05, 0x3),
        UNCORE_EVENT_CONSTRAINT(0x07, 0x3),
 +      UNCORE_EVENT_CONSTRAINT(0x09, 0x3),
        UNCORE_EVENT_CONSTRAINT(0x11, 0x1),
        UNCORE_EVENT_CONSTRAINT(0x12, 0x3),
        UNCORE_EVENT_CONSTRAINT(0x13, 0x3),
@@@ -421,14 -421,6 +421,14 @@@ static struct event_constraint snbep_un
        UNCORE_EVENT_CONSTRAINT(0x24, 0x3),
        UNCORE_EVENT_CONSTRAINT(0x25, 0x3),
        UNCORE_EVENT_CONSTRAINT(0x26, 0x3),
 +      UNCORE_EVENT_CONSTRAINT(0x28, 0x3),
 +      UNCORE_EVENT_CONSTRAINT(0x29, 0x3),
 +      UNCORE_EVENT_CONSTRAINT(0x2a, 0x3),
 +      UNCORE_EVENT_CONSTRAINT(0x2b, 0x3),
 +      UNCORE_EVENT_CONSTRAINT(0x2c, 0x3),
 +      UNCORE_EVENT_CONSTRAINT(0x2d, 0x3),
 +      UNCORE_EVENT_CONSTRAINT(0x2e, 0x3),
 +      UNCORE_EVENT_CONSTRAINT(0x2f, 0x3),
        UNCORE_EVENT_CONSTRAINT(0x30, 0x3),
        UNCORE_EVENT_CONSTRAINT(0x31, 0x3),
        UNCORE_EVENT_CONSTRAINT(0x32, 0x3),
        UNCORE_EVENT_CONSTRAINT(0x34, 0x3),
        UNCORE_EVENT_CONSTRAINT(0x36, 0x3),
        UNCORE_EVENT_CONSTRAINT(0x37, 0x3),
 +      UNCORE_EVENT_CONSTRAINT(0x38, 0x3),
 +      UNCORE_EVENT_CONSTRAINT(0x39, 0x3),
        EVENT_CONSTRAINT_END
  };
  
@@@ -456,145 -446,6 +456,145 @@@ static struct intel_uncore_type snbep_u
        .format_group   = &snbep_uncore_ubox_format_group,
  };
  
 +static struct extra_reg snbep_uncore_cbox_extra_regs[] = {
 +      SNBEP_CBO_EVENT_EXTRA_REG(SNBEP_CBO_PMON_CTL_TID_EN,
 +                                SNBEP_CBO_PMON_CTL_TID_EN, 0x1),
 +      SNBEP_CBO_EVENT_EXTRA_REG(0x0334, 0xffff, 0x4),
 +      SNBEP_CBO_EVENT_EXTRA_REG(0x0534, 0xffff, 0x4),
 +      SNBEP_CBO_EVENT_EXTRA_REG(0x0934, 0xffff, 0x4),
 +      SNBEP_CBO_EVENT_EXTRA_REG(0x4134, 0xffff, 0x6),
 +      SNBEP_CBO_EVENT_EXTRA_REG(0x0135, 0xffff, 0x8),
 +      SNBEP_CBO_EVENT_EXTRA_REG(0x0335, 0xffff, 0x8),
 +      SNBEP_CBO_EVENT_EXTRA_REG(0x4135, 0xffff, 0xc),
 +      SNBEP_CBO_EVENT_EXTRA_REG(0x4335, 0xffff, 0xc),
 +      SNBEP_CBO_EVENT_EXTRA_REG(0x4435, 0xffff, 0x2),
 +      SNBEP_CBO_EVENT_EXTRA_REG(0x4835, 0xffff, 0x2),
 +      SNBEP_CBO_EVENT_EXTRA_REG(0x4a35, 0xffff, 0x2),
 +      SNBEP_CBO_EVENT_EXTRA_REG(0x5035, 0xffff, 0x2),
 +      SNBEP_CBO_EVENT_EXTRA_REG(0x0136, 0xffff, 0x8),
 +      SNBEP_CBO_EVENT_EXTRA_REG(0x0336, 0xffff, 0x8),
 +      SNBEP_CBO_EVENT_EXTRA_REG(0x4136, 0xffff, 0xc),
 +      SNBEP_CBO_EVENT_EXTRA_REG(0x4336, 0xffff, 0xc),
 +      SNBEP_CBO_EVENT_EXTRA_REG(0x4436, 0xffff, 0x2),
 +      SNBEP_CBO_EVENT_EXTRA_REG(0x4836, 0xffff, 0x2),
 +      SNBEP_CBO_EVENT_EXTRA_REG(0x4a36, 0xffff, 0x2),
 +      SNBEP_CBO_EVENT_EXTRA_REG(0x4037, 0x40ff, 0x2),
 +      EVENT_EXTRA_END
 +};
 +
 +static void snbep_cbox_put_constraint(struct intel_uncore_box *box, struct perf_event *event)
 +{
 +      struct hw_perf_event_extra *reg1 = &event->hw.extra_reg;
 +      struct intel_uncore_extra_reg *er = &box->shared_regs[0];
 +      int i;
 +
 +      if (uncore_box_is_fake(box))
 +              return;
 +
 +      for (i = 0; i < 5; i++) {
 +              if (reg1->alloc & (0x1 << i))
 +                      atomic_sub(1 << (i * 6), &er->ref);
 +      }
 +      reg1->alloc = 0;
 +}
 +
 +static struct event_constraint *
 +__snbep_cbox_get_constraint(struct intel_uncore_box *box, struct perf_event *event,
 +                          u64 (*cbox_filter_mask)(int fields))
 +{
 +      struct hw_perf_event_extra *reg1 = &event->hw.extra_reg;
 +      struct intel_uncore_extra_reg *er = &box->shared_regs[0];
 +      int i, alloc = 0;
 +      unsigned long flags;
 +      u64 mask;
 +
 +      if (reg1->idx == EXTRA_REG_NONE)
 +              return NULL;
 +
 +      raw_spin_lock_irqsave(&er->lock, flags);
 +      for (i = 0; i < 5; i++) {
 +              if (!(reg1->idx & (0x1 << i)))
 +                      continue;
 +              if (!uncore_box_is_fake(box) && (reg1->alloc & (0x1 << i)))
 +                      continue;
 +
 +              mask = cbox_filter_mask(0x1 << i);
 +              if (!__BITS_VALUE(atomic_read(&er->ref), i, 6) ||
 +                  !((reg1->config ^ er->config) & mask)) {
 +                      atomic_add(1 << (i * 6), &er->ref);
 +                      er->config &= ~mask;
 +                      er->config |= reg1->config & mask;
 +                      alloc |= (0x1 << i);
 +              } else {
 +                      break;
 +              }
 +      }
 +      raw_spin_unlock_irqrestore(&er->lock, flags);
 +      if (i < 5)
 +              goto fail;
 +
 +      if (!uncore_box_is_fake(box))
 +              reg1->alloc |= alloc;
 +
 +      return 0;
 +fail:
 +      for (; i >= 0; i--) {
 +              if (alloc & (0x1 << i))
 +                      atomic_sub(1 << (i * 6), &er->ref);
 +      }
 +      return &constraint_empty;
 +}
 +
 +static u64 snbep_cbox_filter_mask(int fields)
 +{
 +      u64 mask = 0;
 +
 +      if (fields & 0x1)
 +              mask |= SNBEP_CB0_MSR_PMON_BOX_FILTER_TID;
 +      if (fields & 0x2)
 +              mask |= SNBEP_CB0_MSR_PMON_BOX_FILTER_NID;
 +      if (fields & 0x4)
 +              mask |= SNBEP_CB0_MSR_PMON_BOX_FILTER_STATE;
 +      if (fields & 0x8)
 +              mask |= SNBEP_CB0_MSR_PMON_BOX_FILTER_OPC;
 +
 +      return mask;
 +}
 +
 +static struct event_constraint *
 +snbep_cbox_get_constraint(struct intel_uncore_box *box, struct perf_event *event)
 +{
 +      return __snbep_cbox_get_constraint(box, event, snbep_cbox_filter_mask);
 +}
 +
 +static int snbep_cbox_hw_config(struct intel_uncore_box *box, struct perf_event *event)
 +{
 +      struct hw_perf_event_extra *reg1 = &event->hw.extra_reg;
 +      struct extra_reg *er;
 +      int idx = 0;
 +
 +      for (er = snbep_uncore_cbox_extra_regs; er->msr; er++) {
 +              if (er->event != (event->hw.config & er->config_mask))
 +                      continue;
 +              idx |= er->idx;
 +      }
 +
 +      if (idx) {
 +              reg1->reg = SNBEP_C0_MSR_PMON_BOX_FILTER +
 +                      SNBEP_CBO_MSR_OFFSET * box->pmu->pmu_idx;
 +              reg1->config = event->attr.config1 & snbep_cbox_filter_mask(idx);
 +              reg1->idx = idx;
 +      }
 +      return 0;
 +}
 +
 +static struct intel_uncore_ops snbep_uncore_cbox_ops = {
 +      SNBEP_UNCORE_MSR_OPS_COMMON_INIT(),
 +      .hw_config              = snbep_cbox_hw_config,
 +      .get_constraint         = snbep_cbox_get_constraint,
 +      .put_constraint         = snbep_cbox_put_constraint,
 +};
 +
  static struct intel_uncore_type snbep_uncore_cbox = {
        .name                   = "cbox",
        .num_counters           = 4,
        .msr_offset             = SNBEP_CBO_MSR_OFFSET,
        .num_shared_regs        = 1,
        .constraints            = snbep_uncore_cbox_constraints,
 -      .ops                    = &snbep_uncore_msr_ops,
 +      .ops                    = &snbep_uncore_cbox_ops,
        .format_group           = &snbep_uncore_cbox_format_group,
  };
  
 +static u64 snbep_pcu_alter_er(struct perf_event *event, int new_idx, bool modify)
 +{
 +      struct hw_perf_event *hwc = &event->hw;
 +      struct hw_perf_event_extra *reg1 = &hwc->extra_reg;
 +      u64 config = reg1->config;
 +
 +      if (new_idx > reg1->idx)
 +              config <<= 8 * (new_idx - reg1->idx);
 +      else
 +              config >>= 8 * (reg1->idx - new_idx);
 +
 +      if (modify) {
 +              hwc->config += new_idx - reg1->idx;
 +              reg1->config = config;
 +              reg1->idx = new_idx;
 +      }
 +      return config;
 +}
 +
 +static struct event_constraint *
 +snbep_pcu_get_constraint(struct intel_uncore_box *box, struct perf_event *event)
 +{
 +      struct hw_perf_event_extra *reg1 = &event->hw.extra_reg;
 +      struct intel_uncore_extra_reg *er = &box->shared_regs[0];
 +      unsigned long flags;
 +      int idx = reg1->idx;
 +      u64 mask, config1 = reg1->config;
 +      bool ok = false;
 +
 +      if (reg1->idx == EXTRA_REG_NONE ||
 +          (!uncore_box_is_fake(box) && reg1->alloc))
 +              return NULL;
 +again:
 +      mask = 0xff << (idx * 8);
 +      raw_spin_lock_irqsave(&er->lock, flags);
 +      if (!__BITS_VALUE(atomic_read(&er->ref), idx, 8) ||
 +          !((config1 ^ er->config) & mask)) {
 +              atomic_add(1 << (idx * 8), &er->ref);
 +              er->config &= ~mask;
 +              er->config |= config1 & mask;
 +              ok = true;
 +      }
 +      raw_spin_unlock_irqrestore(&er->lock, flags);
 +
 +      if (!ok) {
 +              idx = (idx + 1) % 4;
 +              if (idx != reg1->idx) {
 +                      config1 = snbep_pcu_alter_er(event, idx, false);
 +                      goto again;
 +              }
 +              return &constraint_empty;
 +      }
 +
 +      if (!uncore_box_is_fake(box)) {
 +              if (idx != reg1->idx)
 +                      snbep_pcu_alter_er(event, idx, true);
 +              reg1->alloc = 1;
 +      }
 +      return NULL;
 +}
 +
 +static void snbep_pcu_put_constraint(struct intel_uncore_box *box, struct perf_event *event)
 +{
 +      struct hw_perf_event_extra *reg1 = &event->hw.extra_reg;
 +      struct intel_uncore_extra_reg *er = &box->shared_regs[0];
 +
 +      if (uncore_box_is_fake(box) || !reg1->alloc)
 +              return;
 +
 +      atomic_sub(1 << (reg1->idx * 8), &er->ref);
 +      reg1->alloc = 0;
 +}
 +
 +static int snbep_pcu_hw_config(struct intel_uncore_box *box, struct perf_event *event)
 +{
 +      struct hw_perf_event *hwc = &event->hw;
 +      struct hw_perf_event_extra *reg1 = &hwc->extra_reg;
 +      int ev_sel = hwc->config & SNBEP_PMON_CTL_EV_SEL_MASK;
 +
 +      if (ev_sel >= 0xb && ev_sel <= 0xe) {
 +              reg1->reg = SNBEP_PCU_MSR_PMON_BOX_FILTER;
 +              reg1->idx = ev_sel - 0xb;
 +              reg1->config = event->attr.config1 & (0xff << reg1->idx);
 +      }
 +      return 0;
 +}
 +
 +static struct intel_uncore_ops snbep_uncore_pcu_ops = {
 +      SNBEP_UNCORE_MSR_OPS_COMMON_INIT(),
 +      .hw_config              = snbep_pcu_hw_config,
 +      .get_constraint         = snbep_pcu_get_constraint,
 +      .put_constraint         = snbep_pcu_put_constraint,
 +};
 +
  static struct intel_uncore_type snbep_uncore_pcu = {
        .name                   = "pcu",
        .num_counters           = 4,
        .event_mask             = SNBEP_PCU_MSR_PMON_RAW_EVENT_MASK,
        .box_ctl                = SNBEP_PCU_MSR_PMON_BOX_CTL,
        .num_shared_regs        = 1,
 -      .ops                    = &snbep_uncore_msr_ops,
 +      .ops                    = &snbep_uncore_pcu_ops,
        .format_group           = &snbep_uncore_pcu_format_group,
  };
  
@@@ -787,63 -544,55 +787,63 @@@ static struct intel_uncore_type snbep_u
        SNBEP_UNCORE_PCI_COMMON_INIT(),
  };
  
 +enum {
 +      SNBEP_PCI_UNCORE_HA,
 +      SNBEP_PCI_UNCORE_IMC,
 +      SNBEP_PCI_UNCORE_QPI,
 +      SNBEP_PCI_UNCORE_R2PCIE,
 +      SNBEP_PCI_UNCORE_R3QPI,
 +};
 +
  static struct intel_uncore_type *snbep_pci_uncores[] = {
 -      &snbep_uncore_ha,
 -      &snbep_uncore_imc,
 -      &snbep_uncore_qpi,
 -      &snbep_uncore_r2pcie,
 -      &snbep_uncore_r3qpi,
 +      [SNBEP_PCI_UNCORE_HA]           = &snbep_uncore_ha,
 +      [SNBEP_PCI_UNCORE_IMC]          = &snbep_uncore_imc,
 +      [SNBEP_PCI_UNCORE_QPI]          = &snbep_uncore_qpi,
 +      [SNBEP_PCI_UNCORE_R2PCIE]       = &snbep_uncore_r2pcie,
 +      [SNBEP_PCI_UNCORE_R3QPI]        = &snbep_uncore_r3qpi,
        NULL,
  };
  
  static DEFINE_PCI_DEVICE_TABLE(snbep_uncore_pci_ids) = {
        { /* Home Agent */
                PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_UNC_HA),
 -              .driver_data = (unsigned long)&snbep_uncore_ha,
 +              .driver_data = SNBEP_PCI_UNCORE_HA,
        },
        { /* MC Channel 0 */
                PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_UNC_IMC0),
 -              .driver_data = (unsigned long)&snbep_uncore_imc,
 +              .driver_data = SNBEP_PCI_UNCORE_IMC,
        },
        { /* MC Channel 1 */
                PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_UNC_IMC1),
 -              .driver_data = (unsigned long)&snbep_uncore_imc,
 +              .driver_data = SNBEP_PCI_UNCORE_IMC,
        },
        { /* MC Channel 2 */
                PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_UNC_IMC2),
 -              .driver_data = (unsigned long)&snbep_uncore_imc,
 +              .driver_data = SNBEP_PCI_UNCORE_IMC,
        },
        { /* MC Channel 3 */
                PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_UNC_IMC3),
 -              .driver_data = (unsigned long)&snbep_uncore_imc,
 +              .driver_data = SNBEP_PCI_UNCORE_IMC,
        },
        { /* QPI Port 0 */
                PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_UNC_QPI0),
 -              .driver_data = (unsigned long)&snbep_uncore_qpi,
 +              .driver_data = SNBEP_PCI_UNCORE_QPI,
        },
        { /* QPI Port 1 */
                PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_UNC_QPI1),
 -              .driver_data = (unsigned long)&snbep_uncore_qpi,
 +              .driver_data = SNBEP_PCI_UNCORE_QPI,
        },
 -      { /* P2PCIe */
 +      { /* R2PCIe */
                PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_UNC_R2PCIE),
 -              .driver_data = (unsigned long)&snbep_uncore_r2pcie,
 +              .driver_data = SNBEP_PCI_UNCORE_R2PCIE,
        },
        { /* R3QPI Link 0 */
                PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_UNC_R3QPI0),
 -              .driver_data = (unsigned long)&snbep_uncore_r3qpi,
 +              .driver_data = SNBEP_PCI_UNCORE_R3QPI,
        },
        { /* R3QPI Link 1 */
                PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_UNC_R3QPI1),
 -              .driver_data = (unsigned long)&snbep_uncore_r3qpi,
 +              .driver_data = SNBEP_PCI_UNCORE_R3QPI,
        },
        { /* end: all zeroes */ }
  };
@@@ -856,7 -605,7 +856,7 @@@ static struct pci_driver snbep_uncore_p
  /*
   * build pci bus to socket mapping
   */
 -static int snbep_pci2phy_map_init(void)
 +static int snbep_pci2phy_map_init(int devid)
  {
        struct pci_dev *ubox_dev = NULL;
        int i, bus, nodeid;
  
        while (1) {
                /* find the UBOX device */
 -              ubox_dev = pci_get_device(PCI_VENDOR_ID_INTEL,
 -                                      PCI_DEVICE_ID_INTEL_JAKETOWN_UBOX,
 -                                      ubox_dev);
 +              ubox_dev = pci_get_device(PCI_VENDOR_ID_INTEL, devid, ubox_dev);
                if (!ubox_dev)
                        break;
                bus = ubox_dev->bus->number;
                                break;
                        }
                }
 -      };
 +      }
  
        if (ubox_dev)
                pci_dev_put(ubox_dev);
  }
  /* end of Sandy Bridge-EP uncore support */
  
 +/* IvyTown uncore support */
 +static void ivt_uncore_msr_init_box(struct intel_uncore_box *box)
 +{
 +      unsigned msr = uncore_msr_box_ctl(box);
 +      if (msr)
 +              wrmsrl(msr, IVT_PMON_BOX_CTL_INT);
 +}
 +
 +static void ivt_uncore_pci_init_box(struct intel_uncore_box *box)
 +{
 +      struct pci_dev *pdev = box->pci_dev;
 +
 +      pci_write_config_dword(pdev, SNBEP_PCI_PMON_BOX_CTL, IVT_PMON_BOX_CTL_INT);
 +}
 +
 +#define IVT_UNCORE_MSR_OPS_COMMON_INIT()                      \
 +      .init_box       = ivt_uncore_msr_init_box,              \
 +      .disable_box    = snbep_uncore_msr_disable_box,         \
 +      .enable_box     = snbep_uncore_msr_enable_box,          \
 +      .disable_event  = snbep_uncore_msr_disable_event,       \
 +      .enable_event   = snbep_uncore_msr_enable_event,        \
 +      .read_counter   = uncore_msr_read_counter
 +
 +static struct intel_uncore_ops ivt_uncore_msr_ops = {
 +      IVT_UNCORE_MSR_OPS_COMMON_INIT(),
 +};
 +
 +static struct intel_uncore_ops ivt_uncore_pci_ops = {
 +      .init_box       = ivt_uncore_pci_init_box,
 +      .disable_box    = snbep_uncore_pci_disable_box,
 +      .enable_box     = snbep_uncore_pci_enable_box,
 +      .disable_event  = snbep_uncore_pci_disable_event,
 +      .enable_event   = snbep_uncore_pci_enable_event,
 +      .read_counter   = snbep_uncore_pci_read_counter,
 +};
 +
 +#define IVT_UNCORE_PCI_COMMON_INIT()                          \
 +      .perf_ctr       = SNBEP_PCI_PMON_CTR0,                  \
 +      .event_ctl      = SNBEP_PCI_PMON_CTL0,                  \
 +      .event_mask     = IVT_PMON_RAW_EVENT_MASK,              \
 +      .box_ctl        = SNBEP_PCI_PMON_BOX_CTL,               \
 +      .ops            = &ivt_uncore_pci_ops,                  \
 +      .format_group   = &ivt_uncore_format_group
 +
 +static struct attribute *ivt_uncore_formats_attr[] = {
 +      &format_attr_event.attr,
 +      &format_attr_umask.attr,
 +      &format_attr_edge.attr,
 +      &format_attr_inv.attr,
 +      &format_attr_thresh8.attr,
 +      NULL,
 +};
 +
 +static struct attribute *ivt_uncore_ubox_formats_attr[] = {
 +      &format_attr_event.attr,
 +      &format_attr_umask.attr,
 +      &format_attr_edge.attr,
 +      &format_attr_inv.attr,
 +      &format_attr_thresh5.attr,
 +      NULL,
 +};
 +
 +static struct attribute *ivt_uncore_cbox_formats_attr[] = {
 +      &format_attr_event.attr,
 +      &format_attr_umask.attr,
 +      &format_attr_edge.attr,
 +      &format_attr_tid_en.attr,
 +      &format_attr_thresh8.attr,
 +      &format_attr_filter_tid.attr,
 +      &format_attr_filter_link.attr,
 +      &format_attr_filter_state2.attr,
 +      &format_attr_filter_nid2.attr,
 +      &format_attr_filter_opc2.attr,
 +      NULL,
 +};
 +
 +static struct attribute *ivt_uncore_pcu_formats_attr[] = {
 +      &format_attr_event_ext.attr,
 +      &format_attr_occ_sel.attr,
 +      &format_attr_edge.attr,
 +      &format_attr_thresh5.attr,
 +      &format_attr_occ_invert.attr,
 +      &format_attr_occ_edge.attr,
 +      &format_attr_filter_band0.attr,
 +      &format_attr_filter_band1.attr,
 +      &format_attr_filter_band2.attr,
 +      &format_attr_filter_band3.attr,
 +      NULL,
 +};
 +
 +static struct attribute *ivt_uncore_qpi_formats_attr[] = {
 +      &format_attr_event_ext.attr,
 +      &format_attr_umask.attr,
 +      &format_attr_edge.attr,
 +      &format_attr_thresh8.attr,
 +      NULL,
 +};
 +
 +static struct attribute_group ivt_uncore_format_group = {
 +      .name = "format",
 +      .attrs = ivt_uncore_formats_attr,
 +};
 +
 +static struct attribute_group ivt_uncore_ubox_format_group = {
 +      .name = "format",
 +      .attrs = ivt_uncore_ubox_formats_attr,
 +};
 +
 +static struct attribute_group ivt_uncore_cbox_format_group = {
 +      .name = "format",
 +      .attrs = ivt_uncore_cbox_formats_attr,
 +};
 +
 +static struct attribute_group ivt_uncore_pcu_format_group = {
 +      .name = "format",
 +      .attrs = ivt_uncore_pcu_formats_attr,
 +};
 +
 +static struct attribute_group ivt_uncore_qpi_format_group = {
 +      .name = "format",
 +      .attrs = ivt_uncore_qpi_formats_attr,
 +};
 +
 +static struct intel_uncore_type ivt_uncore_ubox = {
 +      .name           = "ubox",
 +      .num_counters   = 2,
 +      .num_boxes      = 1,
 +      .perf_ctr_bits  = 44,
 +      .fixed_ctr_bits = 48,
 +      .perf_ctr       = SNBEP_U_MSR_PMON_CTR0,
 +      .event_ctl      = SNBEP_U_MSR_PMON_CTL0,
 +      .event_mask     = IVT_U_MSR_PMON_RAW_EVENT_MASK,
 +      .fixed_ctr      = SNBEP_U_MSR_PMON_UCLK_FIXED_CTR,
 +      .fixed_ctl      = SNBEP_U_MSR_PMON_UCLK_FIXED_CTL,
 +      .ops            = &ivt_uncore_msr_ops,
 +      .format_group   = &ivt_uncore_ubox_format_group,
 +};
 +
 +static struct extra_reg ivt_uncore_cbox_extra_regs[] = {
 +      SNBEP_CBO_EVENT_EXTRA_REG(SNBEP_CBO_PMON_CTL_TID_EN,
 +                                SNBEP_CBO_PMON_CTL_TID_EN, 0x1),
 +      SNBEP_CBO_EVENT_EXTRA_REG(0x1031, 0x10ff, 0x2),
 +      SNBEP_CBO_EVENT_EXTRA_REG(0x0334, 0xffff, 0x4),
 +      SNBEP_CBO_EVENT_EXTRA_REG(0x0534, 0xffff, 0x4),
 +      SNBEP_CBO_EVENT_EXTRA_REG(0x0934, 0xffff, 0x4),
 +      SNBEP_CBO_EVENT_EXTRA_REG(0x4134, 0xffff, 0xc),
 +      SNBEP_CBO_EVENT_EXTRA_REG(0x0135, 0xffff, 0x10),
 +      SNBEP_CBO_EVENT_EXTRA_REG(0x0335, 0xffff, 0x10),
 +      SNBEP_CBO_EVENT_EXTRA_REG(0x2135, 0xffff, 0x10),
 +      SNBEP_CBO_EVENT_EXTRA_REG(0x2335, 0xffff, 0x10),
 +      SNBEP_CBO_EVENT_EXTRA_REG(0x4135, 0xffff, 0x18),
 +      SNBEP_CBO_EVENT_EXTRA_REG(0x4335, 0xffff, 0x18),
 +      SNBEP_CBO_EVENT_EXTRA_REG(0x4435, 0xffff, 0x8),
 +      SNBEP_CBO_EVENT_EXTRA_REG(0x4835, 0xffff, 0x8),
 +      SNBEP_CBO_EVENT_EXTRA_REG(0x4a35, 0xffff, 0x8),
 +      SNBEP_CBO_EVENT_EXTRA_REG(0x5035, 0xffff, 0x8),
 +      SNBEP_CBO_EVENT_EXTRA_REG(0x8135, 0xffff, 0x10),
 +      SNBEP_CBO_EVENT_EXTRA_REG(0x8335, 0xffff, 0x10),
 +      SNBEP_CBO_EVENT_EXTRA_REG(0x0136, 0xffff, 0x10),
 +      SNBEP_CBO_EVENT_EXTRA_REG(0x0336, 0xffff, 0x10),
 +      SNBEP_CBO_EVENT_EXTRA_REG(0x2336, 0xffff, 0x10),
 +      SNBEP_CBO_EVENT_EXTRA_REG(0x2336, 0xffff, 0x10),
 +      SNBEP_CBO_EVENT_EXTRA_REG(0x4136, 0xffff, 0x18),
 +      SNBEP_CBO_EVENT_EXTRA_REG(0x4336, 0xffff, 0x18),
 +      SNBEP_CBO_EVENT_EXTRA_REG(0x4436, 0xffff, 0x8),
 +      SNBEP_CBO_EVENT_EXTRA_REG(0x4836, 0xffff, 0x8),
 +      SNBEP_CBO_EVENT_EXTRA_REG(0x4a36, 0xffff, 0x8),
 +      SNBEP_CBO_EVENT_EXTRA_REG(0x5036, 0xffff, 0x8),
 +      SNBEP_CBO_EVENT_EXTRA_REG(0x8136, 0xffff, 0x10),
 +      SNBEP_CBO_EVENT_EXTRA_REG(0x8336, 0xffff, 0x10),
 +      SNBEP_CBO_EVENT_EXTRA_REG(0x4037, 0x40ff, 0x8),
 +      EVENT_EXTRA_END
 +};
 +
 +static u64 ivt_cbox_filter_mask(int fields)
 +{
 +      u64 mask = 0;
 +
 +      if (fields & 0x1)
 +              mask |= IVT_CB0_MSR_PMON_BOX_FILTER_TID;
 +      if (fields & 0x2)
 +              mask |= IVT_CB0_MSR_PMON_BOX_FILTER_LINK;
 +      if (fields & 0x4)
 +              mask |= IVT_CB0_MSR_PMON_BOX_FILTER_STATE;
 +      if (fields & 0x8)
 +              mask |= IVT_CB0_MSR_PMON_BOX_FILTER_NID;
 +      if (fields & 0x10)
 +              mask |= IVT_CB0_MSR_PMON_BOX_FILTER_OPC;
 +
 +      return mask;
 +}
 +
 +static struct event_constraint *
 +ivt_cbox_get_constraint(struct intel_uncore_box *box, struct perf_event *event)
 +{
 +      return __snbep_cbox_get_constraint(box, event, ivt_cbox_filter_mask);
 +}
 +
 +static int ivt_cbox_hw_config(struct intel_uncore_box *box, struct perf_event *event)
 +{
 +      struct hw_perf_event_extra *reg1 = &event->hw.extra_reg;
 +      struct extra_reg *er;
 +      int idx = 0;
 +
 +      for (er = ivt_uncore_cbox_extra_regs; er->msr; er++) {
 +              if (er->event != (event->hw.config & er->config_mask))
 +                      continue;
 +              idx |= er->idx;
 +      }
 +
 +      if (idx) {
 +              reg1->reg = SNBEP_C0_MSR_PMON_BOX_FILTER +
 +                      SNBEP_CBO_MSR_OFFSET * box->pmu->pmu_idx;
 +              reg1->config = event->attr.config1 & ivt_cbox_filter_mask(idx);
 +              reg1->idx = idx;
 +      }
 +      return 0;
 +}
 +
 +static void ivt_cbox_enable_event(struct intel_uncore_box *box, struct perf_event *event)
 +{
 +      struct hw_perf_event *hwc = &event->hw;
 +      struct hw_perf_event_extra *reg1 = &hwc->extra_reg;
 +
 +      if (reg1->idx != EXTRA_REG_NONE) {
 +              u64 filter = uncore_shared_reg_config(box, 0);
 +              wrmsrl(reg1->reg, filter & 0xffffffff);
 +              wrmsrl(reg1->reg + 6, filter >> 32);
 +      }
 +
 +      wrmsrl(hwc->config_base, hwc->config | SNBEP_PMON_CTL_EN);
 +}
 +
 +static struct intel_uncore_ops ivt_uncore_cbox_ops = {
 +      .init_box               = ivt_uncore_msr_init_box,
 +      .disable_box            = snbep_uncore_msr_disable_box,
 +      .enable_box             = snbep_uncore_msr_enable_box,
 +      .disable_event          = snbep_uncore_msr_disable_event,
 +      .enable_event           = ivt_cbox_enable_event,
 +      .read_counter           = uncore_msr_read_counter,
 +      .hw_config              = ivt_cbox_hw_config,
 +      .get_constraint         = ivt_cbox_get_constraint,
 +      .put_constraint         = snbep_cbox_put_constraint,
 +};
 +
 +static struct intel_uncore_type ivt_uncore_cbox = {
 +      .name                   = "cbox",
 +      .num_counters           = 4,
 +      .num_boxes              = 15,
 +      .perf_ctr_bits          = 44,
 +      .event_ctl              = SNBEP_C0_MSR_PMON_CTL0,
 +      .perf_ctr               = SNBEP_C0_MSR_PMON_CTR0,
 +      .event_mask             = IVT_CBO_MSR_PMON_RAW_EVENT_MASK,
 +      .box_ctl                = SNBEP_C0_MSR_PMON_BOX_CTL,
 +      .msr_offset             = SNBEP_CBO_MSR_OFFSET,
 +      .num_shared_regs        = 1,
 +      .constraints            = snbep_uncore_cbox_constraints,
 +      .ops                    = &ivt_uncore_cbox_ops,
 +      .format_group           = &ivt_uncore_cbox_format_group,
 +};
 +
 +static struct intel_uncore_ops ivt_uncore_pcu_ops = {
 +      IVT_UNCORE_MSR_OPS_COMMON_INIT(),
 +      .hw_config              = snbep_pcu_hw_config,
 +      .get_constraint         = snbep_pcu_get_constraint,
 +      .put_constraint         = snbep_pcu_put_constraint,
 +};
 +
 +static struct intel_uncore_type ivt_uncore_pcu = {
 +      .name                   = "pcu",
 +      .num_counters           = 4,
 +      .num_boxes              = 1,
 +      .perf_ctr_bits          = 48,
 +      .perf_ctr               = SNBEP_PCU_MSR_PMON_CTR0,
 +      .event_ctl              = SNBEP_PCU_MSR_PMON_CTL0,
 +      .event_mask             = IVT_PCU_MSR_PMON_RAW_EVENT_MASK,
 +      .box_ctl                = SNBEP_PCU_MSR_PMON_BOX_CTL,
 +      .num_shared_regs        = 1,
 +      .ops                    = &ivt_uncore_pcu_ops,
 +      .format_group           = &ivt_uncore_pcu_format_group,
 +};
 +
 +static struct intel_uncore_type *ivt_msr_uncores[] = {
 +      &ivt_uncore_ubox,
 +      &ivt_uncore_cbox,
 +      &ivt_uncore_pcu,
 +      NULL,
 +};
 +
 +static struct intel_uncore_type ivt_uncore_ha = {
 +      .name           = "ha",
 +      .num_counters   = 4,
 +      .num_boxes      = 2,
 +      .perf_ctr_bits  = 48,
 +      IVT_UNCORE_PCI_COMMON_INIT(),
 +};
 +
 +static struct intel_uncore_type ivt_uncore_imc = {
 +      .name           = "imc",
 +      .num_counters   = 4,
 +      .num_boxes      = 8,
 +      .perf_ctr_bits  = 48,
 +      .fixed_ctr_bits = 48,
 +      .fixed_ctr      = SNBEP_MC_CHy_PCI_PMON_FIXED_CTR,
 +      .fixed_ctl      = SNBEP_MC_CHy_PCI_PMON_FIXED_CTL,
 +      IVT_UNCORE_PCI_COMMON_INIT(),
 +};
 +
 +static struct intel_uncore_type ivt_uncore_qpi = {
 +      .name           = "qpi",
 +      .num_counters   = 4,
 +      .num_boxes      = 3,
 +      .perf_ctr_bits  = 48,
 +      .perf_ctr       = SNBEP_PCI_PMON_CTR0,
 +      .event_ctl      = SNBEP_PCI_PMON_CTL0,
 +      .event_mask     = IVT_QPI_PCI_PMON_RAW_EVENT_MASK,
 +      .box_ctl        = SNBEP_PCI_PMON_BOX_CTL,
 +      .ops            = &ivt_uncore_pci_ops,
 +      .format_group   = &ivt_uncore_qpi_format_group,
 +};
 +
 +static struct intel_uncore_type ivt_uncore_r2pcie = {
 +      .name           = "r2pcie",
 +      .num_counters   = 4,
 +      .num_boxes      = 1,
 +      .perf_ctr_bits  = 44,
 +      .constraints    = snbep_uncore_r2pcie_constraints,
 +      IVT_UNCORE_PCI_COMMON_INIT(),
 +};
 +
 +static struct intel_uncore_type ivt_uncore_r3qpi = {
 +      .name           = "r3qpi",
 +      .num_counters   = 3,
 +      .num_boxes      = 2,
 +      .perf_ctr_bits  = 44,
 +      .constraints    = snbep_uncore_r3qpi_constraints,
 +      IVT_UNCORE_PCI_COMMON_INIT(),
 +};
 +
 +enum {
 +      IVT_PCI_UNCORE_HA,
 +      IVT_PCI_UNCORE_IMC,
 +      IVT_PCI_UNCORE_QPI,
 +      IVT_PCI_UNCORE_R2PCIE,
 +      IVT_PCI_UNCORE_R3QPI,
 +};
 +
 +static struct intel_uncore_type *ivt_pci_uncores[] = {
 +      [IVT_PCI_UNCORE_HA]     = &ivt_uncore_ha,
 +      [IVT_PCI_UNCORE_IMC]    = &ivt_uncore_imc,
 +      [IVT_PCI_UNCORE_QPI]    = &ivt_uncore_qpi,
 +      [IVT_PCI_UNCORE_R2PCIE] = &ivt_uncore_r2pcie,
 +      [IVT_PCI_UNCORE_R3QPI]  = &ivt_uncore_r3qpi,
 +      NULL,
 +};
 +
 +static DEFINE_PCI_DEVICE_TABLE(ivt_uncore_pci_ids) = {
 +      { /* Home Agent 0 */
 +              PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xe30),
 +              .driver_data = IVT_PCI_UNCORE_HA,
 +      },
 +      { /* Home Agent 1 */
 +              PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xe38),
 +              .driver_data = IVT_PCI_UNCORE_HA,
 +      },
 +      { /* MC0 Channel 0 */
 +              PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xeb4),
 +              .driver_data = IVT_PCI_UNCORE_IMC,
 +      },
 +      { /* MC0 Channel 1 */
 +              PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xeb5),
 +              .driver_data = IVT_PCI_UNCORE_IMC,
 +      },
 +      { /* MC0 Channel 3 */
 +              PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xeb0),
 +              .driver_data = IVT_PCI_UNCORE_IMC,
 +      },
 +      { /* MC0 Channel 4 */
 +              PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xeb1),
 +              .driver_data = IVT_PCI_UNCORE_IMC,
 +      },
 +      { /* MC1 Channel 0 */
 +              PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xef4),
 +              .driver_data = IVT_PCI_UNCORE_IMC,
 +      },
 +      { /* MC1 Channel 1 */
 +              PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xef5),
 +              .driver_data = IVT_PCI_UNCORE_IMC,
 +      },
 +      { /* MC1 Channel 3 */
 +              PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xef0),
 +              .driver_data = IVT_PCI_UNCORE_IMC,
 +      },
 +      { /* MC1 Channel 4 */
 +              PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xef1),
 +              .driver_data = IVT_PCI_UNCORE_IMC,
 +      },
 +      { /* QPI0 Port 0 */
 +              PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xe32),
 +              .driver_data = IVT_PCI_UNCORE_QPI,
 +      },
 +      { /* QPI0 Port 1 */
 +              PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xe33),
 +              .driver_data = IVT_PCI_UNCORE_QPI,
 +      },
 +      { /* QPI1 Port 2 */
 +              PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xe3a),
 +              .driver_data = IVT_PCI_UNCORE_QPI,
 +      },
 +      { /* R2PCIe */
 +              PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xe34),
 +              .driver_data = IVT_PCI_UNCORE_R2PCIE,
 +      },
 +      { /* R3QPI0 Link 0 */
 +              PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xe36),
 +              .driver_data = IVT_PCI_UNCORE_R3QPI,
 +      },
 +      { /* R3QPI0 Link 1 */
 +              PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xe37),
 +              .driver_data = IVT_PCI_UNCORE_R3QPI,
 +      },
 +      { /* R3QPI1 Link 2 */
 +              PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xe3e),
 +              .driver_data = IVT_PCI_UNCORE_R3QPI,
 +      },
 +      { /* end: all zeroes */ }
 +};
 +
 +static struct pci_driver ivt_uncore_pci_driver = {
 +      .name           = "ivt_uncore",
 +      .id_table       = ivt_uncore_pci_ids,
 +};
 +/* end of IvyTown uncore support */
 +
  /* Sandy Bridge uncore support */
  static void snb_uncore_msr_enable_event(struct intel_uncore_box *box, struct perf_event *event)
  {
@@@ -1491,6 -808,9 +1491,6 @@@ static struct intel_uncore_type *nhm_ms
  /* end of Nehalem uncore support */
  
  /* Nehalem-EX uncore support */
 -#define __BITS_VALUE(x, i, n)  ((typeof(x))(((x) >> ((i) * (n))) & \
 -                              ((1ULL << (n)) - 1)))
 -
  DEFINE_UNCORE_FORMAT_ATTR(event5, event, "config:1-5");
  DEFINE_UNCORE_FORMAT_ATTR(counter, counter, "config:6-7");
  DEFINE_UNCORE_FORMAT_ATTR(match, match, "config1:0-63");
@@@ -1841,7 -1161,7 +1841,7 @@@ static struct extra_reg nhmex_uncore_mb
  };
  
  /* Nehalem-EX or Westmere-EX ? */
 -bool uncore_nhmex;
 +static bool uncore_nhmex;
  
  static bool nhmex_mbox_get_shared_reg(struct intel_uncore_box *box, int idx, u64 config)
  {
@@@ -1919,7 -1239,7 +1919,7 @@@ static void nhmex_mbox_put_shared_reg(s
        atomic_sub(1 << (idx * 8), &er->ref);
  }
  
 -u64 nhmex_mbox_alter_er(struct perf_event *event, int new_idx, bool modify)
 +static u64 nhmex_mbox_alter_er(struct perf_event *event, int new_idx, bool modify)
  {
        struct hw_perf_event *hwc = &event->hw;
        struct hw_perf_event_extra *reg1 = &hwc->extra_reg;
@@@ -2234,7 -1554,7 +2234,7 @@@ static struct intel_uncore_type nhmex_u
        .format_group           = &nhmex_uncore_mbox_format_group,
  };
  
 -void nhmex_rbox_alter_er(struct intel_uncore_box *box, struct perf_event *event)
 +static void nhmex_rbox_alter_er(struct intel_uncore_box *box, struct perf_event *event)
  {
        struct hw_perf_event *hwc = &event->hw;
        struct hw_perf_event_extra *reg1 = &hwc->extra_reg;
@@@ -2404,6 -1724,21 +2404,6 @@@ static int nhmex_rbox_hw_config(struct 
        return 0;
  }
  
 -static u64 nhmex_rbox_shared_reg_config(struct intel_uncore_box *box, int idx)
 -{
 -      struct intel_uncore_extra_reg *er;
 -      unsigned long flags;
 -      u64 config;
 -
 -      er = &box->shared_regs[idx];
 -
 -      raw_spin_lock_irqsave(&er->lock, flags);
 -      config = er->config;
 -      raw_spin_unlock_irqrestore(&er->lock, flags);
 -
 -      return config;
 -}
 -
  static void nhmex_rbox_msr_enable_event(struct intel_uncore_box *box, struct perf_event *event)
  {
        struct hw_perf_event *hwc = &event->hw;
        case 2:
        case 3:
                wrmsrl(NHMEX_R_MSR_PORTN_QLX_CFG(port),
 -                      nhmex_rbox_shared_reg_config(box, 2 + (idx / 6) * 5));
 +                      uncore_shared_reg_config(box, 2 + (idx / 6) * 5));
                break;
        case 4:
                wrmsrl(NHMEX_R_MSR_PORTN_XBR_SET1_MM_CFG(port),
@@@ -2950,7 -2285,7 +2950,7 @@@ out
        return ret;
  }
  
 -int uncore_pmu_event_init(struct perf_event *event)
 +static int uncore_pmu_event_init(struct perf_event *event)
  {
        struct intel_uncore_pmu *pmu;
        struct intel_uncore_box *box;
@@@ -3093,7 -2428,7 +3093,7 @@@ static void __init uncore_types_exit(st
  static int __init uncore_type_init(struct intel_uncore_type *type)
  {
        struct intel_uncore_pmu *pmus;
-       struct attribute_group *events_group;
+       struct attribute_group *attr_group;
        struct attribute **attrs;
        int i, j;
  
  
        type->unconstrainted = (struct event_constraint)
                __EVENT_CONSTRAINT(0, (1ULL << type->num_counters) - 1,
 -                              0, type->num_counters, 0);
 +                              0, type->num_counters, 0, 0);
  
        for (i = 0; i < type->num_boxes; i++) {
                pmus[i].func_id = -1;
                while (type->event_descs[i].attr.attr.name)
                        i++;
  
-               events_group = kzalloc(sizeof(struct attribute *) * (i + 1) +
-                                       sizeof(*events_group), GFP_KERNEL);
-               if (!events_group)
+               attr_group = kzalloc(sizeof(struct attribute *) * (i + 1) +
+                                       sizeof(*attr_group), GFP_KERNEL);
+               if (!attr_group)
                        goto fail;
  
-               attrs = (struct attribute **)(events_group + 1);
-               events_group->name = "events";
-               events_group->attrs = attrs;
+               attrs = (struct attribute **)(attr_group + 1);
+               attr_group->name = "events";
+               attr_group->attrs = attrs;
  
                for (j = 0; j < i; j++)
                        attrs[j] = &type->event_descs[j].attr.attr;
  
-               type->events_group = events_group;
+               type->events_group = attr_group;
        }
  
        type->pmu_group = &uncore_pmu_attr_group;
@@@ -3221,8 -2556,6 +3221,8 @@@ static void uncore_pci_remove(struct pc
        if (WARN_ON_ONCE(phys_id != box->phys_id))
                return;
  
 +      pci_set_drvdata(pdev, NULL);
 +
        raw_spin_lock(&uncore_box_lock);
        list_del(&box->list);
        raw_spin_unlock(&uncore_box_lock);
  static int uncore_pci_probe(struct pci_dev *pdev,
                            const struct pci_device_id *id)
  {
 -      struct intel_uncore_type *type;
 -
 -      type = (struct intel_uncore_type *)id->driver_data;
 -
 -      return uncore_pci_add(type, pdev);
 +      return uncore_pci_add(pci_uncores[id->driver_data], pdev);
  }
  
  static int __init uncore_pci_init(void)
  
        switch (boot_cpu_data.x86_model) {
        case 45: /* Sandy Bridge-EP */
 -              ret = snbep_pci2phy_map_init();
 +              ret = snbep_pci2phy_map_init(0x3ce0);
                if (ret)
                        return ret;
                pci_uncores = snbep_pci_uncores;
                uncore_pci_driver = &snbep_uncore_pci_driver;
                break;
 +      case 62: /* IvyTown */
 +              ret = snbep_pci2phy_map_init(0x0e1e);
 +              if (ret)
 +                      return ret;
 +              pci_uncores = ivt_pci_uncores;
 +              uncore_pci_driver = &ivt_uncore_pci_driver;
 +              break;
        default:
                return 0;
        }
@@@ -3292,21 -2622,6 +3292,21 @@@ static void __init uncore_pci_exit(void
        }
  }
  
 +/* CPU hot plug/unplug are serialized by cpu_add_remove_lock mutex */
 +static LIST_HEAD(boxes_to_free);
 +
 +static void __cpuinit uncore_kfree_boxes(void)
 +{
 +      struct intel_uncore_box *box;
 +
 +      while (!list_empty(&boxes_to_free)) {
 +              box = list_entry(boxes_to_free.next,
 +                               struct intel_uncore_box, list);
 +              list_del(&box->list);
 +              kfree(box);
 +      }
 +}
 +
  static void __cpuinit uncore_cpu_dying(int cpu)
  {
        struct intel_uncore_type *type;
                        box = *per_cpu_ptr(pmu->box, cpu);
                        *per_cpu_ptr(pmu->box, cpu) = NULL;
                        if (box && atomic_dec_and_test(&box->refcnt))
 -                              kfree(box);
 +                              list_add(&box->list, &boxes_to_free);
                }
        }
  }
@@@ -3351,11 -2666,8 +3351,11 @@@ static int __cpuinit uncore_cpu_startin
                                if (exist && exist->phys_id == phys_id) {
                                        atomic_inc(&exist->refcnt);
                                        *per_cpu_ptr(pmu->box, cpu) = exist;
 -                                      kfree(box);
 -                                      box = NULL;
 +                                      if (box) {
 +                                              list_add(&box->list,
 +                                                       &boxes_to_free);
 +                                              box = NULL;
 +                                      }
                                        break;
                                }
                        }
@@@ -3494,10 -2806,6 +3494,10 @@@ static in
        case CPU_DYING:
                uncore_cpu_dying(cpu);
                break;
 +      case CPU_ONLINE:
 +      case CPU_DEAD:
 +              uncore_kfree_boxes();
 +              break;
        default:
                break;
        }
@@@ -3545,11 -2853,12 +3545,12 @@@ static int __init uncore_cpu_init(void
                msr_uncores = nhm_msr_uncores;
                break;
        case 42: /* Sandy Bridge */
+       case 58: /* Ivy Bridge */
                if (snb_uncore_cbox.num_boxes > max_cores)
                        snb_uncore_cbox.num_boxes = max_cores;
                msr_uncores = snb_msr_uncores;
                break;
-       case 45: /* Sandy Birdge-EP */
+       case 45: /* Sandy Bridge-EP */
                if (snbep_uncore_cbox.num_boxes > max_cores)
                        snbep_uncore_cbox.num_boxes = max_cores;
                msr_uncores = snbep_msr_uncores;
                        nhmex_uncore_cbox.num_boxes = max_cores;
                msr_uncores = nhmex_msr_uncores;
                break;
 +      case 62: /* IvyTown */
 +              if (ivt_uncore_cbox.num_boxes > max_cores)
 +                      ivt_uncore_cbox.num_boxes = max_cores;
 +              msr_uncores = ivt_msr_uncores;
 +              break;
 +
        default:
                return 0;
        }