]> git.karo-electronics.de Git - karo-tx-linux.git/blob - arch/x86/kernel/cpu/perf_event_intel_uncore_snbep.c
Merge branch 'linus' into perf/core, to fix conflicts
[karo-tx-linux.git] / arch / x86 / kernel / cpu / perf_event_intel_uncore_snbep.c
1 /* SandyBridge-EP/IvyTown uncore support */
2 #include "perf_event_intel_uncore.h"
3
4
5 /* SNB-EP Box level control */
6 #define SNBEP_PMON_BOX_CTL_RST_CTRL     (1 << 0)
7 #define SNBEP_PMON_BOX_CTL_RST_CTRS     (1 << 1)
8 #define SNBEP_PMON_BOX_CTL_FRZ          (1 << 8)
9 #define SNBEP_PMON_BOX_CTL_FRZ_EN       (1 << 16)
10 #define SNBEP_PMON_BOX_CTL_INT          (SNBEP_PMON_BOX_CTL_RST_CTRL | \
11                                          SNBEP_PMON_BOX_CTL_RST_CTRS | \
12                                          SNBEP_PMON_BOX_CTL_FRZ_EN)
13 /* SNB-EP event control */
14 #define SNBEP_PMON_CTL_EV_SEL_MASK      0x000000ff
15 #define SNBEP_PMON_CTL_UMASK_MASK       0x0000ff00
16 #define SNBEP_PMON_CTL_RST              (1 << 17)
17 #define SNBEP_PMON_CTL_EDGE_DET         (1 << 18)
18 #define SNBEP_PMON_CTL_EV_SEL_EXT       (1 << 21)
19 #define SNBEP_PMON_CTL_EN               (1 << 22)
20 #define SNBEP_PMON_CTL_INVERT           (1 << 23)
21 #define SNBEP_PMON_CTL_TRESH_MASK       0xff000000
22 #define SNBEP_PMON_RAW_EVENT_MASK       (SNBEP_PMON_CTL_EV_SEL_MASK | \
23                                          SNBEP_PMON_CTL_UMASK_MASK | \
24                                          SNBEP_PMON_CTL_EDGE_DET | \
25                                          SNBEP_PMON_CTL_INVERT | \
26                                          SNBEP_PMON_CTL_TRESH_MASK)
27
28 /* SNB-EP Ubox event control */
29 #define SNBEP_U_MSR_PMON_CTL_TRESH_MASK         0x1f000000
30 #define SNBEP_U_MSR_PMON_RAW_EVENT_MASK         \
31                                 (SNBEP_PMON_CTL_EV_SEL_MASK | \
32                                  SNBEP_PMON_CTL_UMASK_MASK | \
33                                  SNBEP_PMON_CTL_EDGE_DET | \
34                                  SNBEP_PMON_CTL_INVERT | \
35                                  SNBEP_U_MSR_PMON_CTL_TRESH_MASK)
36
37 #define SNBEP_CBO_PMON_CTL_TID_EN               (1 << 19)
38 #define SNBEP_CBO_MSR_PMON_RAW_EVENT_MASK       (SNBEP_PMON_RAW_EVENT_MASK | \
39                                                  SNBEP_CBO_PMON_CTL_TID_EN)
40
41 /* SNB-EP PCU event control */
42 #define SNBEP_PCU_MSR_PMON_CTL_OCC_SEL_MASK     0x0000c000
43 #define SNBEP_PCU_MSR_PMON_CTL_TRESH_MASK       0x1f000000
44 #define SNBEP_PCU_MSR_PMON_CTL_OCC_INVERT       (1 << 30)
45 #define SNBEP_PCU_MSR_PMON_CTL_OCC_EDGE_DET     (1 << 31)
46 #define SNBEP_PCU_MSR_PMON_RAW_EVENT_MASK       \
47                                 (SNBEP_PMON_CTL_EV_SEL_MASK | \
48                                  SNBEP_PCU_MSR_PMON_CTL_OCC_SEL_MASK | \
49                                  SNBEP_PMON_CTL_EDGE_DET | \
50                                  SNBEP_PMON_CTL_EV_SEL_EXT | \
51                                  SNBEP_PMON_CTL_INVERT | \
52                                  SNBEP_PCU_MSR_PMON_CTL_TRESH_MASK | \
53                                  SNBEP_PCU_MSR_PMON_CTL_OCC_INVERT | \
54                                  SNBEP_PCU_MSR_PMON_CTL_OCC_EDGE_DET)
55
56 #define SNBEP_QPI_PCI_PMON_RAW_EVENT_MASK       \
57                                 (SNBEP_PMON_RAW_EVENT_MASK | \
58                                  SNBEP_PMON_CTL_EV_SEL_EXT)
59
60 /* SNB-EP pci control register */
61 #define SNBEP_PCI_PMON_BOX_CTL                  0xf4
62 #define SNBEP_PCI_PMON_CTL0                     0xd8
63 /* SNB-EP pci counter register */
64 #define SNBEP_PCI_PMON_CTR0                     0xa0
65
66 /* SNB-EP home agent register */
67 #define SNBEP_HA_PCI_PMON_BOX_ADDRMATCH0        0x40
68 #define SNBEP_HA_PCI_PMON_BOX_ADDRMATCH1        0x44
69 #define SNBEP_HA_PCI_PMON_BOX_OPCODEMATCH       0x48
70 /* SNB-EP memory controller register */
71 #define SNBEP_MC_CHy_PCI_PMON_FIXED_CTL         0xf0
72 #define SNBEP_MC_CHy_PCI_PMON_FIXED_CTR         0xd0
73 /* SNB-EP QPI register */
74 #define SNBEP_Q_Py_PCI_PMON_PKT_MATCH0          0x228
75 #define SNBEP_Q_Py_PCI_PMON_PKT_MATCH1          0x22c
76 #define SNBEP_Q_Py_PCI_PMON_PKT_MASK0           0x238
77 #define SNBEP_Q_Py_PCI_PMON_PKT_MASK1           0x23c
78
79 /* SNB-EP Ubox register */
80 #define SNBEP_U_MSR_PMON_CTR0                   0xc16
81 #define SNBEP_U_MSR_PMON_CTL0                   0xc10
82
83 #define SNBEP_U_MSR_PMON_UCLK_FIXED_CTL         0xc08
84 #define SNBEP_U_MSR_PMON_UCLK_FIXED_CTR         0xc09
85
86 /* SNB-EP Cbo register */
87 #define SNBEP_C0_MSR_PMON_CTR0                  0xd16
88 #define SNBEP_C0_MSR_PMON_CTL0                  0xd10
89 #define SNBEP_C0_MSR_PMON_BOX_CTL               0xd04
90 #define SNBEP_C0_MSR_PMON_BOX_FILTER            0xd14
91 #define SNBEP_CBO_MSR_OFFSET                    0x20
92
93 #define SNBEP_CB0_MSR_PMON_BOX_FILTER_TID       0x1f
94 #define SNBEP_CB0_MSR_PMON_BOX_FILTER_NID       0x3fc00
95 #define SNBEP_CB0_MSR_PMON_BOX_FILTER_STATE     0x7c0000
96 #define SNBEP_CB0_MSR_PMON_BOX_FILTER_OPC       0xff800000
97
98 #define SNBEP_CBO_EVENT_EXTRA_REG(e, m, i) {    \
99         .event = (e),                           \
100         .msr = SNBEP_C0_MSR_PMON_BOX_FILTER,    \
101         .config_mask = (m),                     \
102         .idx = (i)                              \
103 }
104
105 /* SNB-EP PCU register */
106 #define SNBEP_PCU_MSR_PMON_CTR0                 0xc36
107 #define SNBEP_PCU_MSR_PMON_CTL0                 0xc30
108 #define SNBEP_PCU_MSR_PMON_BOX_CTL              0xc24
109 #define SNBEP_PCU_MSR_PMON_BOX_FILTER           0xc34
110 #define SNBEP_PCU_MSR_PMON_BOX_FILTER_MASK      0xffffffff
111 #define SNBEP_PCU_MSR_CORE_C3_CTR               0x3fc
112 #define SNBEP_PCU_MSR_CORE_C6_CTR               0x3fd
113
114 /* IVBEP event control */
115 #define IVBEP_PMON_BOX_CTL_INT          (SNBEP_PMON_BOX_CTL_RST_CTRL | \
116                                          SNBEP_PMON_BOX_CTL_RST_CTRS)
117 #define IVBEP_PMON_RAW_EVENT_MASK               (SNBEP_PMON_CTL_EV_SEL_MASK | \
118                                          SNBEP_PMON_CTL_UMASK_MASK | \
119                                          SNBEP_PMON_CTL_EDGE_DET | \
120                                          SNBEP_PMON_CTL_TRESH_MASK)
121 /* IVBEP Ubox */
122 #define IVBEP_U_MSR_PMON_GLOBAL_CTL             0xc00
123 #define IVBEP_U_PMON_GLOBAL_FRZ_ALL             (1 << 31)
124 #define IVBEP_U_PMON_GLOBAL_UNFRZ_ALL           (1 << 29)
125
126 #define IVBEP_U_MSR_PMON_RAW_EVENT_MASK \
127                                 (SNBEP_PMON_CTL_EV_SEL_MASK | \
128                                  SNBEP_PMON_CTL_UMASK_MASK | \
129                                  SNBEP_PMON_CTL_EDGE_DET | \
130                                  SNBEP_U_MSR_PMON_CTL_TRESH_MASK)
131 /* IVBEP Cbo */
132 #define IVBEP_CBO_MSR_PMON_RAW_EVENT_MASK               (IVBEP_PMON_RAW_EVENT_MASK | \
133                                                  SNBEP_CBO_PMON_CTL_TID_EN)
134
135 #define IVBEP_CB0_MSR_PMON_BOX_FILTER_TID               (0x1fULL << 0)
136 #define IVBEP_CB0_MSR_PMON_BOX_FILTER_LINK      (0xfULL << 5)
137 #define IVBEP_CB0_MSR_PMON_BOX_FILTER_STATE     (0x3fULL << 17)
138 #define IVBEP_CB0_MSR_PMON_BOX_FILTER_NID               (0xffffULL << 32)
139 #define IVBEP_CB0_MSR_PMON_BOX_FILTER_OPC               (0x1ffULL << 52)
140 #define IVBEP_CB0_MSR_PMON_BOX_FILTER_C6                (0x1ULL << 61)
141 #define IVBEP_CB0_MSR_PMON_BOX_FILTER_NC                (0x1ULL << 62)
142 #define IVBEP_CB0_MSR_PMON_BOX_FILTER_IOSC      (0x1ULL << 63)
143
144 /* IVBEP home agent */
145 #define IVBEP_HA_PCI_PMON_CTL_Q_OCC_RST         (1 << 16)
146 #define IVBEP_HA_PCI_PMON_RAW_EVENT_MASK                \
147                                 (IVBEP_PMON_RAW_EVENT_MASK | \
148                                  IVBEP_HA_PCI_PMON_CTL_Q_OCC_RST)
149 /* IVBEP PCU */
150 #define IVBEP_PCU_MSR_PMON_RAW_EVENT_MASK       \
151                                 (SNBEP_PMON_CTL_EV_SEL_MASK | \
152                                  SNBEP_PMON_CTL_EV_SEL_EXT | \
153                                  SNBEP_PCU_MSR_PMON_CTL_OCC_SEL_MASK | \
154                                  SNBEP_PMON_CTL_EDGE_DET | \
155                                  SNBEP_PCU_MSR_PMON_CTL_TRESH_MASK | \
156                                  SNBEP_PCU_MSR_PMON_CTL_OCC_INVERT | \
157                                  SNBEP_PCU_MSR_PMON_CTL_OCC_EDGE_DET)
158 /* IVBEP QPI */
159 #define IVBEP_QPI_PCI_PMON_RAW_EVENT_MASK       \
160                                 (IVBEP_PMON_RAW_EVENT_MASK | \
161                                  SNBEP_PMON_CTL_EV_SEL_EXT)
162
163 #define __BITS_VALUE(x, i, n)  ((typeof(x))(((x) >> ((i) * (n))) & \
164                                 ((1ULL << (n)) - 1)))
165
166 DEFINE_UNCORE_FORMAT_ATTR(event, event, "config:0-7");
167 DEFINE_UNCORE_FORMAT_ATTR(event_ext, event, "config:0-7,21");
168 DEFINE_UNCORE_FORMAT_ATTR(umask, umask, "config:8-15");
169 DEFINE_UNCORE_FORMAT_ATTR(edge, edge, "config:18");
170 DEFINE_UNCORE_FORMAT_ATTR(tid_en, tid_en, "config:19");
171 DEFINE_UNCORE_FORMAT_ATTR(inv, inv, "config:23");
172 DEFINE_UNCORE_FORMAT_ATTR(thresh8, thresh, "config:24-31");
173 DEFINE_UNCORE_FORMAT_ATTR(thresh5, thresh, "config:24-28");
174 DEFINE_UNCORE_FORMAT_ATTR(occ_sel, occ_sel, "config:14-15");
175 DEFINE_UNCORE_FORMAT_ATTR(occ_invert, occ_invert, "config:30");
176 DEFINE_UNCORE_FORMAT_ATTR(occ_edge, occ_edge, "config:14-51");
177 DEFINE_UNCORE_FORMAT_ATTR(filter_tid, filter_tid, "config1:0-4");
178 DEFINE_UNCORE_FORMAT_ATTR(filter_link, filter_link, "config1:5-8");
179 DEFINE_UNCORE_FORMAT_ATTR(filter_nid, filter_nid, "config1:10-17");
180 DEFINE_UNCORE_FORMAT_ATTR(filter_nid2, filter_nid, "config1:32-47");
181 DEFINE_UNCORE_FORMAT_ATTR(filter_state, filter_state, "config1:18-22");
182 DEFINE_UNCORE_FORMAT_ATTR(filter_state2, filter_state, "config1:17-22");
183 DEFINE_UNCORE_FORMAT_ATTR(filter_opc, filter_opc, "config1:23-31");
184 DEFINE_UNCORE_FORMAT_ATTR(filter_opc2, filter_opc, "config1:52-60");
185 DEFINE_UNCORE_FORMAT_ATTR(filter_band0, filter_band0, "config1:0-7");
186 DEFINE_UNCORE_FORMAT_ATTR(filter_band1, filter_band1, "config1:8-15");
187 DEFINE_UNCORE_FORMAT_ATTR(filter_band2, filter_band2, "config1:16-23");
188 DEFINE_UNCORE_FORMAT_ATTR(filter_band3, filter_band3, "config1:24-31");
189 DEFINE_UNCORE_FORMAT_ATTR(match_rds, match_rds, "config1:48-51");
190 DEFINE_UNCORE_FORMAT_ATTR(match_rnid30, match_rnid30, "config1:32-35");
191 DEFINE_UNCORE_FORMAT_ATTR(match_rnid4, match_rnid4, "config1:31");
192 DEFINE_UNCORE_FORMAT_ATTR(match_dnid, match_dnid, "config1:13-17");
193 DEFINE_UNCORE_FORMAT_ATTR(match_mc, match_mc, "config1:9-12");
194 DEFINE_UNCORE_FORMAT_ATTR(match_opc, match_opc, "config1:5-8");
195 DEFINE_UNCORE_FORMAT_ATTR(match_vnw, match_vnw, "config1:3-4");
196 DEFINE_UNCORE_FORMAT_ATTR(match0, match0, "config1:0-31");
197 DEFINE_UNCORE_FORMAT_ATTR(match1, match1, "config1:32-63");
198 DEFINE_UNCORE_FORMAT_ATTR(mask_rds, mask_rds, "config2:48-51");
199 DEFINE_UNCORE_FORMAT_ATTR(mask_rnid30, mask_rnid30, "config2:32-35");
200 DEFINE_UNCORE_FORMAT_ATTR(mask_rnid4, mask_rnid4, "config2:31");
201 DEFINE_UNCORE_FORMAT_ATTR(mask_dnid, mask_dnid, "config2:13-17");
202 DEFINE_UNCORE_FORMAT_ATTR(mask_mc, mask_mc, "config2:9-12");
203 DEFINE_UNCORE_FORMAT_ATTR(mask_opc, mask_opc, "config2:5-8");
204 DEFINE_UNCORE_FORMAT_ATTR(mask_vnw, mask_vnw, "config2:3-4");
205 DEFINE_UNCORE_FORMAT_ATTR(mask0, mask0, "config2:0-31");
206 DEFINE_UNCORE_FORMAT_ATTR(mask1, mask1, "config2:32-63");
207
208 static void snbep_uncore_pci_disable_box(struct intel_uncore_box *box)
209 {
210         struct pci_dev *pdev = box->pci_dev;
211         int box_ctl = uncore_pci_box_ctl(box);
212         u32 config = 0;
213
214         if (!pci_read_config_dword(pdev, box_ctl, &config)) {
215                 config |= SNBEP_PMON_BOX_CTL_FRZ;
216                 pci_write_config_dword(pdev, box_ctl, config);
217         }
218 }
219
220 static void snbep_uncore_pci_enable_box(struct intel_uncore_box *box)
221 {
222         struct pci_dev *pdev = box->pci_dev;
223         int box_ctl = uncore_pci_box_ctl(box);
224         u32 config = 0;
225
226         if (!pci_read_config_dword(pdev, box_ctl, &config)) {
227                 config &= ~SNBEP_PMON_BOX_CTL_FRZ;
228                 pci_write_config_dword(pdev, box_ctl, config);
229         }
230 }
231
232 static void snbep_uncore_pci_enable_event(struct intel_uncore_box *box, struct perf_event *event)
233 {
234         struct pci_dev *pdev = box->pci_dev;
235         struct hw_perf_event *hwc = &event->hw;
236
237         pci_write_config_dword(pdev, hwc->config_base, hwc->config | SNBEP_PMON_CTL_EN);
238 }
239
240 static void snbep_uncore_pci_disable_event(struct intel_uncore_box *box, struct perf_event *event)
241 {
242         struct pci_dev *pdev = box->pci_dev;
243         struct hw_perf_event *hwc = &event->hw;
244
245         pci_write_config_dword(pdev, hwc->config_base, hwc->config);
246 }
247
248 static u64 snbep_uncore_pci_read_counter(struct intel_uncore_box *box, struct perf_event *event)
249 {
250         struct pci_dev *pdev = box->pci_dev;
251         struct hw_perf_event *hwc = &event->hw;
252         u64 count = 0;
253
254         pci_read_config_dword(pdev, hwc->event_base, (u32 *)&count);
255         pci_read_config_dword(pdev, hwc->event_base + 4, (u32 *)&count + 1);
256
257         return count;
258 }
259
260 static void snbep_uncore_pci_init_box(struct intel_uncore_box *box)
261 {
262         struct pci_dev *pdev = box->pci_dev;
263
264         pci_write_config_dword(pdev, SNBEP_PCI_PMON_BOX_CTL, SNBEP_PMON_BOX_CTL_INT);
265 }
266
267 static void snbep_uncore_msr_disable_box(struct intel_uncore_box *box)
268 {
269         u64 config;
270         unsigned msr;
271
272         msr = uncore_msr_box_ctl(box);
273         if (msr) {
274                 rdmsrl(msr, config);
275                 config |= SNBEP_PMON_BOX_CTL_FRZ;
276                 wrmsrl(msr, config);
277         }
278 }
279
280 static void snbep_uncore_msr_enable_box(struct intel_uncore_box *box)
281 {
282         u64 config;
283         unsigned msr;
284
285         msr = uncore_msr_box_ctl(box);
286         if (msr) {
287                 rdmsrl(msr, config);
288                 config &= ~SNBEP_PMON_BOX_CTL_FRZ;
289                 wrmsrl(msr, config);
290         }
291 }
292
293 static void snbep_uncore_msr_enable_event(struct intel_uncore_box *box, struct perf_event *event)
294 {
295         struct hw_perf_event *hwc = &event->hw;
296         struct hw_perf_event_extra *reg1 = &hwc->extra_reg;
297
298         if (reg1->idx != EXTRA_REG_NONE)
299                 wrmsrl(reg1->reg, uncore_shared_reg_config(box, 0));
300
301         wrmsrl(hwc->config_base, hwc->config | SNBEP_PMON_CTL_EN);
302 }
303
304 static void snbep_uncore_msr_disable_event(struct intel_uncore_box *box,
305                                         struct perf_event *event)
306 {
307         struct hw_perf_event *hwc = &event->hw;
308
309         wrmsrl(hwc->config_base, hwc->config);
310 }
311
312 static void snbep_uncore_msr_init_box(struct intel_uncore_box *box)
313 {
314         unsigned msr = uncore_msr_box_ctl(box);
315
316         if (msr)
317                 wrmsrl(msr, SNBEP_PMON_BOX_CTL_INT);
318 }
319
320 static struct attribute *snbep_uncore_formats_attr[] = {
321         &format_attr_event.attr,
322         &format_attr_umask.attr,
323         &format_attr_edge.attr,
324         &format_attr_inv.attr,
325         &format_attr_thresh8.attr,
326         NULL,
327 };
328
329 static struct attribute *snbep_uncore_ubox_formats_attr[] = {
330         &format_attr_event.attr,
331         &format_attr_umask.attr,
332         &format_attr_edge.attr,
333         &format_attr_inv.attr,
334         &format_attr_thresh5.attr,
335         NULL,
336 };
337
338 static struct attribute *snbep_uncore_cbox_formats_attr[] = {
339         &format_attr_event.attr,
340         &format_attr_umask.attr,
341         &format_attr_edge.attr,
342         &format_attr_tid_en.attr,
343         &format_attr_inv.attr,
344         &format_attr_thresh8.attr,
345         &format_attr_filter_tid.attr,
346         &format_attr_filter_nid.attr,
347         &format_attr_filter_state.attr,
348         &format_attr_filter_opc.attr,
349         NULL,
350 };
351
352 static struct attribute *snbep_uncore_pcu_formats_attr[] = {
353         &format_attr_event_ext.attr,
354         &format_attr_occ_sel.attr,
355         &format_attr_edge.attr,
356         &format_attr_inv.attr,
357         &format_attr_thresh5.attr,
358         &format_attr_occ_invert.attr,
359         &format_attr_occ_edge.attr,
360         &format_attr_filter_band0.attr,
361         &format_attr_filter_band1.attr,
362         &format_attr_filter_band2.attr,
363         &format_attr_filter_band3.attr,
364         NULL,
365 };
366
367 static struct attribute *snbep_uncore_qpi_formats_attr[] = {
368         &format_attr_event_ext.attr,
369         &format_attr_umask.attr,
370         &format_attr_edge.attr,
371         &format_attr_inv.attr,
372         &format_attr_thresh8.attr,
373         &format_attr_match_rds.attr,
374         &format_attr_match_rnid30.attr,
375         &format_attr_match_rnid4.attr,
376         &format_attr_match_dnid.attr,
377         &format_attr_match_mc.attr,
378         &format_attr_match_opc.attr,
379         &format_attr_match_vnw.attr,
380         &format_attr_match0.attr,
381         &format_attr_match1.attr,
382         &format_attr_mask_rds.attr,
383         &format_attr_mask_rnid30.attr,
384         &format_attr_mask_rnid4.attr,
385         &format_attr_mask_dnid.attr,
386         &format_attr_mask_mc.attr,
387         &format_attr_mask_opc.attr,
388         &format_attr_mask_vnw.attr,
389         &format_attr_mask0.attr,
390         &format_attr_mask1.attr,
391         NULL,
392 };
393
394 static struct uncore_event_desc snbep_uncore_imc_events[] = {
395         INTEL_UNCORE_EVENT_DESC(clockticks,      "event=0xff,umask=0x00"),
396         INTEL_UNCORE_EVENT_DESC(cas_count_read,  "event=0x04,umask=0x03"),
397         INTEL_UNCORE_EVENT_DESC(cas_count_write, "event=0x04,umask=0x0c"),
398         { /* end: all zeroes */ },
399 };
400
401 static struct uncore_event_desc snbep_uncore_qpi_events[] = {
402         INTEL_UNCORE_EVENT_DESC(clockticks,       "event=0x14"),
403         INTEL_UNCORE_EVENT_DESC(txl_flits_active, "event=0x00,umask=0x06"),
404         INTEL_UNCORE_EVENT_DESC(drs_data,         "event=0x102,umask=0x08"),
405         INTEL_UNCORE_EVENT_DESC(ncb_data,         "event=0x103,umask=0x04"),
406         { /* end: all zeroes */ },
407 };
408
409 static struct attribute_group snbep_uncore_format_group = {
410         .name = "format",
411         .attrs = snbep_uncore_formats_attr,
412 };
413
414 static struct attribute_group snbep_uncore_ubox_format_group = {
415         .name = "format",
416         .attrs = snbep_uncore_ubox_formats_attr,
417 };
418
419 static struct attribute_group snbep_uncore_cbox_format_group = {
420         .name = "format",
421         .attrs = snbep_uncore_cbox_formats_attr,
422 };
423
424 static struct attribute_group snbep_uncore_pcu_format_group = {
425         .name = "format",
426         .attrs = snbep_uncore_pcu_formats_attr,
427 };
428
429 static struct attribute_group snbep_uncore_qpi_format_group = {
430         .name = "format",
431         .attrs = snbep_uncore_qpi_formats_attr,
432 };
433
434 #define SNBEP_UNCORE_MSR_OPS_COMMON_INIT()                      \
435         .init_box       = snbep_uncore_msr_init_box,            \
436         .disable_box    = snbep_uncore_msr_disable_box,         \
437         .enable_box     = snbep_uncore_msr_enable_box,          \
438         .disable_event  = snbep_uncore_msr_disable_event,       \
439         .enable_event   = snbep_uncore_msr_enable_event,        \
440         .read_counter   = uncore_msr_read_counter
441
442 static struct intel_uncore_ops snbep_uncore_msr_ops = {
443         SNBEP_UNCORE_MSR_OPS_COMMON_INIT(),
444 };
445
446 #define SNBEP_UNCORE_PCI_OPS_COMMON_INIT()                      \
447         .init_box       = snbep_uncore_pci_init_box,            \
448         .disable_box    = snbep_uncore_pci_disable_box,         \
449         .enable_box     = snbep_uncore_pci_enable_box,          \
450         .disable_event  = snbep_uncore_pci_disable_event,       \
451         .read_counter   = snbep_uncore_pci_read_counter
452
453 static struct intel_uncore_ops snbep_uncore_pci_ops = {
454         SNBEP_UNCORE_PCI_OPS_COMMON_INIT(),
455         .enable_event   = snbep_uncore_pci_enable_event,        \
456 };
457
458 static struct event_constraint snbep_uncore_cbox_constraints[] = {
459         UNCORE_EVENT_CONSTRAINT(0x01, 0x1),
460         UNCORE_EVENT_CONSTRAINT(0x02, 0x3),
461         UNCORE_EVENT_CONSTRAINT(0x04, 0x3),
462         UNCORE_EVENT_CONSTRAINT(0x05, 0x3),
463         UNCORE_EVENT_CONSTRAINT(0x07, 0x3),
464         UNCORE_EVENT_CONSTRAINT(0x09, 0x3),
465         UNCORE_EVENT_CONSTRAINT(0x11, 0x1),
466         UNCORE_EVENT_CONSTRAINT(0x12, 0x3),
467         UNCORE_EVENT_CONSTRAINT(0x13, 0x3),
468         UNCORE_EVENT_CONSTRAINT(0x1b, 0xc),
469         UNCORE_EVENT_CONSTRAINT(0x1c, 0xc),
470         UNCORE_EVENT_CONSTRAINT(0x1d, 0xc),
471         UNCORE_EVENT_CONSTRAINT(0x1e, 0xc),
472         EVENT_CONSTRAINT_OVERLAP(0x1f, 0xe, 0xff),
473         UNCORE_EVENT_CONSTRAINT(0x21, 0x3),
474         UNCORE_EVENT_CONSTRAINT(0x23, 0x3),
475         UNCORE_EVENT_CONSTRAINT(0x31, 0x3),
476         UNCORE_EVENT_CONSTRAINT(0x32, 0x3),
477         UNCORE_EVENT_CONSTRAINT(0x33, 0x3),
478         UNCORE_EVENT_CONSTRAINT(0x34, 0x3),
479         UNCORE_EVENT_CONSTRAINT(0x35, 0x3),
480         UNCORE_EVENT_CONSTRAINT(0x36, 0x1),
481         UNCORE_EVENT_CONSTRAINT(0x37, 0x3),
482         UNCORE_EVENT_CONSTRAINT(0x38, 0x3),
483         UNCORE_EVENT_CONSTRAINT(0x39, 0x3),
484         UNCORE_EVENT_CONSTRAINT(0x3b, 0x1),
485         EVENT_CONSTRAINT_END
486 };
487
488 static struct event_constraint snbep_uncore_r2pcie_constraints[] = {
489         UNCORE_EVENT_CONSTRAINT(0x10, 0x3),
490         UNCORE_EVENT_CONSTRAINT(0x11, 0x3),
491         UNCORE_EVENT_CONSTRAINT(0x12, 0x1),
492         UNCORE_EVENT_CONSTRAINT(0x23, 0x3),
493         UNCORE_EVENT_CONSTRAINT(0x24, 0x3),
494         UNCORE_EVENT_CONSTRAINT(0x25, 0x3),
495         UNCORE_EVENT_CONSTRAINT(0x26, 0x3),
496         UNCORE_EVENT_CONSTRAINT(0x32, 0x3),
497         UNCORE_EVENT_CONSTRAINT(0x33, 0x3),
498         UNCORE_EVENT_CONSTRAINT(0x34, 0x3),
499         EVENT_CONSTRAINT_END
500 };
501
502 static struct event_constraint snbep_uncore_r3qpi_constraints[] = {
503         UNCORE_EVENT_CONSTRAINT(0x10, 0x3),
504         UNCORE_EVENT_CONSTRAINT(0x11, 0x3),
505         UNCORE_EVENT_CONSTRAINT(0x12, 0x3),
506         UNCORE_EVENT_CONSTRAINT(0x13, 0x1),
507         UNCORE_EVENT_CONSTRAINT(0x20, 0x3),
508         UNCORE_EVENT_CONSTRAINT(0x21, 0x3),
509         UNCORE_EVENT_CONSTRAINT(0x22, 0x3),
510         UNCORE_EVENT_CONSTRAINT(0x23, 0x3),
511         UNCORE_EVENT_CONSTRAINT(0x24, 0x3),
512         UNCORE_EVENT_CONSTRAINT(0x25, 0x3),
513         UNCORE_EVENT_CONSTRAINT(0x26, 0x3),
514         UNCORE_EVENT_CONSTRAINT(0x28, 0x3),
515         UNCORE_EVENT_CONSTRAINT(0x29, 0x3),
516         UNCORE_EVENT_CONSTRAINT(0x2a, 0x3),
517         UNCORE_EVENT_CONSTRAINT(0x2b, 0x3),
518         UNCORE_EVENT_CONSTRAINT(0x2c, 0x3),
519         UNCORE_EVENT_CONSTRAINT(0x2d, 0x3),
520         UNCORE_EVENT_CONSTRAINT(0x2e, 0x3),
521         UNCORE_EVENT_CONSTRAINT(0x2f, 0x3),
522         UNCORE_EVENT_CONSTRAINT(0x30, 0x3),
523         UNCORE_EVENT_CONSTRAINT(0x31, 0x3),
524         UNCORE_EVENT_CONSTRAINT(0x32, 0x3),
525         UNCORE_EVENT_CONSTRAINT(0x33, 0x3),
526         UNCORE_EVENT_CONSTRAINT(0x34, 0x3),
527         UNCORE_EVENT_CONSTRAINT(0x36, 0x3),
528         UNCORE_EVENT_CONSTRAINT(0x37, 0x3),
529         UNCORE_EVENT_CONSTRAINT(0x38, 0x3),
530         UNCORE_EVENT_CONSTRAINT(0x39, 0x3),
531         EVENT_CONSTRAINT_END
532 };
533
534 static struct intel_uncore_type snbep_uncore_ubox = {
535         .name           = "ubox",
536         .num_counters   = 2,
537         .num_boxes      = 1,
538         .perf_ctr_bits  = 44,
539         .fixed_ctr_bits = 48,
540         .perf_ctr       = SNBEP_U_MSR_PMON_CTR0,
541         .event_ctl      = SNBEP_U_MSR_PMON_CTL0,
542         .event_mask     = SNBEP_U_MSR_PMON_RAW_EVENT_MASK,
543         .fixed_ctr      = SNBEP_U_MSR_PMON_UCLK_FIXED_CTR,
544         .fixed_ctl      = SNBEP_U_MSR_PMON_UCLK_FIXED_CTL,
545         .ops            = &snbep_uncore_msr_ops,
546         .format_group   = &snbep_uncore_ubox_format_group,
547 };
548
549 static struct extra_reg snbep_uncore_cbox_extra_regs[] = {
550         SNBEP_CBO_EVENT_EXTRA_REG(SNBEP_CBO_PMON_CTL_TID_EN,
551                                   SNBEP_CBO_PMON_CTL_TID_EN, 0x1),
552         SNBEP_CBO_EVENT_EXTRA_REG(0x0334, 0xffff, 0x4),
553         SNBEP_CBO_EVENT_EXTRA_REG(0x4334, 0xffff, 0x6),
554         SNBEP_CBO_EVENT_EXTRA_REG(0x0534, 0xffff, 0x4),
555         SNBEP_CBO_EVENT_EXTRA_REG(0x4534, 0xffff, 0x6),
556         SNBEP_CBO_EVENT_EXTRA_REG(0x0934, 0xffff, 0x4),
557         SNBEP_CBO_EVENT_EXTRA_REG(0x4934, 0xffff, 0x6),
558         SNBEP_CBO_EVENT_EXTRA_REG(0x4134, 0xffff, 0x6),
559         SNBEP_CBO_EVENT_EXTRA_REG(0x0135, 0xffff, 0x8),
560         SNBEP_CBO_EVENT_EXTRA_REG(0x0335, 0xffff, 0x8),
561         SNBEP_CBO_EVENT_EXTRA_REG(0x4135, 0xffff, 0xa),
562         SNBEP_CBO_EVENT_EXTRA_REG(0x4335, 0xffff, 0xa),
563         SNBEP_CBO_EVENT_EXTRA_REG(0x4435, 0xffff, 0x2),
564         SNBEP_CBO_EVENT_EXTRA_REG(0x4835, 0xffff, 0x2),
565         SNBEP_CBO_EVENT_EXTRA_REG(0x4a35, 0xffff, 0x2),
566         SNBEP_CBO_EVENT_EXTRA_REG(0x5035, 0xffff, 0x2),
567         SNBEP_CBO_EVENT_EXTRA_REG(0x0136, 0xffff, 0x8),
568         SNBEP_CBO_EVENT_EXTRA_REG(0x0336, 0xffff, 0x8),
569         SNBEP_CBO_EVENT_EXTRA_REG(0x4136, 0xffff, 0xa),
570         SNBEP_CBO_EVENT_EXTRA_REG(0x4336, 0xffff, 0xa),
571         SNBEP_CBO_EVENT_EXTRA_REG(0x4436, 0xffff, 0x2),
572         SNBEP_CBO_EVENT_EXTRA_REG(0x4836, 0xffff, 0x2),
573         SNBEP_CBO_EVENT_EXTRA_REG(0x4a36, 0xffff, 0x2),
574         SNBEP_CBO_EVENT_EXTRA_REG(0x4037, 0x40ff, 0x2),
575         EVENT_EXTRA_END
576 };
577
578 static void snbep_cbox_put_constraint(struct intel_uncore_box *box, struct perf_event *event)
579 {
580         struct hw_perf_event_extra *reg1 = &event->hw.extra_reg;
581         struct intel_uncore_extra_reg *er = &box->shared_regs[0];
582         int i;
583
584         if (uncore_box_is_fake(box))
585                 return;
586
587         for (i = 0; i < 5; i++) {
588                 if (reg1->alloc & (0x1 << i))
589                         atomic_sub(1 << (i * 6), &er->ref);
590         }
591         reg1->alloc = 0;
592 }
593
594 static struct event_constraint *
595 __snbep_cbox_get_constraint(struct intel_uncore_box *box, struct perf_event *event,
596                             u64 (*cbox_filter_mask)(int fields))
597 {
598         struct hw_perf_event_extra *reg1 = &event->hw.extra_reg;
599         struct intel_uncore_extra_reg *er = &box->shared_regs[0];
600         int i, alloc = 0;
601         unsigned long flags;
602         u64 mask;
603
604         if (reg1->idx == EXTRA_REG_NONE)
605                 return NULL;
606
607         raw_spin_lock_irqsave(&er->lock, flags);
608         for (i = 0; i < 5; i++) {
609                 if (!(reg1->idx & (0x1 << i)))
610                         continue;
611                 if (!uncore_box_is_fake(box) && (reg1->alloc & (0x1 << i)))
612                         continue;
613
614                 mask = cbox_filter_mask(0x1 << i);
615                 if (!__BITS_VALUE(atomic_read(&er->ref), i, 6) ||
616                     !((reg1->config ^ er->config) & mask)) {
617                         atomic_add(1 << (i * 6), &er->ref);
618                         er->config &= ~mask;
619                         er->config |= reg1->config & mask;
620                         alloc |= (0x1 << i);
621                 } else {
622                         break;
623                 }
624         }
625         raw_spin_unlock_irqrestore(&er->lock, flags);
626         if (i < 5)
627                 goto fail;
628
629         if (!uncore_box_is_fake(box))
630                 reg1->alloc |= alloc;
631
632         return NULL;
633 fail:
634         for (; i >= 0; i--) {
635                 if (alloc & (0x1 << i))
636                         atomic_sub(1 << (i * 6), &er->ref);
637         }
638         return &uncore_constraint_empty;
639 }
640
641 static u64 snbep_cbox_filter_mask(int fields)
642 {
643         u64 mask = 0;
644
645         if (fields & 0x1)
646                 mask |= SNBEP_CB0_MSR_PMON_BOX_FILTER_TID;
647         if (fields & 0x2)
648                 mask |= SNBEP_CB0_MSR_PMON_BOX_FILTER_NID;
649         if (fields & 0x4)
650                 mask |= SNBEP_CB0_MSR_PMON_BOX_FILTER_STATE;
651         if (fields & 0x8)
652                 mask |= SNBEP_CB0_MSR_PMON_BOX_FILTER_OPC;
653
654         return mask;
655 }
656
657 static struct event_constraint *
658 snbep_cbox_get_constraint(struct intel_uncore_box *box, struct perf_event *event)
659 {
660         return __snbep_cbox_get_constraint(box, event, snbep_cbox_filter_mask);
661 }
662
663 static int snbep_cbox_hw_config(struct intel_uncore_box *box, struct perf_event *event)
664 {
665         struct hw_perf_event_extra *reg1 = &event->hw.extra_reg;
666         struct extra_reg *er;
667         int idx = 0;
668
669         for (er = snbep_uncore_cbox_extra_regs; er->msr; er++) {
670                 if (er->event != (event->hw.config & er->config_mask))
671                         continue;
672                 idx |= er->idx;
673         }
674
675         if (idx) {
676                 reg1->reg = SNBEP_C0_MSR_PMON_BOX_FILTER +
677                         SNBEP_CBO_MSR_OFFSET * box->pmu->pmu_idx;
678                 reg1->config = event->attr.config1 & snbep_cbox_filter_mask(idx);
679                 reg1->idx = idx;
680         }
681         return 0;
682 }
683
684 static struct intel_uncore_ops snbep_uncore_cbox_ops = {
685         SNBEP_UNCORE_MSR_OPS_COMMON_INIT(),
686         .hw_config              = snbep_cbox_hw_config,
687         .get_constraint         = snbep_cbox_get_constraint,
688         .put_constraint         = snbep_cbox_put_constraint,
689 };
690
691 static struct intel_uncore_type snbep_uncore_cbox = {
692         .name                   = "cbox",
693         .num_counters           = 4,
694         .num_boxes              = 8,
695         .perf_ctr_bits          = 44,
696         .event_ctl              = SNBEP_C0_MSR_PMON_CTL0,
697         .perf_ctr               = SNBEP_C0_MSR_PMON_CTR0,
698         .event_mask             = SNBEP_CBO_MSR_PMON_RAW_EVENT_MASK,
699         .box_ctl                = SNBEP_C0_MSR_PMON_BOX_CTL,
700         .msr_offset             = SNBEP_CBO_MSR_OFFSET,
701         .num_shared_regs        = 1,
702         .constraints            = snbep_uncore_cbox_constraints,
703         .ops                    = &snbep_uncore_cbox_ops,
704         .format_group           = &snbep_uncore_cbox_format_group,
705 };
706
707 static u64 snbep_pcu_alter_er(struct perf_event *event, int new_idx, bool modify)
708 {
709         struct hw_perf_event *hwc = &event->hw;
710         struct hw_perf_event_extra *reg1 = &hwc->extra_reg;
711         u64 config = reg1->config;
712
713         if (new_idx > reg1->idx)
714                 config <<= 8 * (new_idx - reg1->idx);
715         else
716                 config >>= 8 * (reg1->idx - new_idx);
717
718         if (modify) {
719                 hwc->config += new_idx - reg1->idx;
720                 reg1->config = config;
721                 reg1->idx = new_idx;
722         }
723         return config;
724 }
725
726 static struct event_constraint *
727 snbep_pcu_get_constraint(struct intel_uncore_box *box, struct perf_event *event)
728 {
729         struct hw_perf_event_extra *reg1 = &event->hw.extra_reg;
730         struct intel_uncore_extra_reg *er = &box->shared_regs[0];
731         unsigned long flags;
732         int idx = reg1->idx;
733         u64 mask, config1 = reg1->config;
734         bool ok = false;
735
736         if (reg1->idx == EXTRA_REG_NONE ||
737             (!uncore_box_is_fake(box) && reg1->alloc))
738                 return NULL;
739 again:
740         mask = 0xffULL << (idx * 8);
741         raw_spin_lock_irqsave(&er->lock, flags);
742         if (!__BITS_VALUE(atomic_read(&er->ref), idx, 8) ||
743             !((config1 ^ er->config) & mask)) {
744                 atomic_add(1 << (idx * 8), &er->ref);
745                 er->config &= ~mask;
746                 er->config |= config1 & mask;
747                 ok = true;
748         }
749         raw_spin_unlock_irqrestore(&er->lock, flags);
750
751         if (!ok) {
752                 idx = (idx + 1) % 4;
753                 if (idx != reg1->idx) {
754                         config1 = snbep_pcu_alter_er(event, idx, false);
755                         goto again;
756                 }
757                 return &uncore_constraint_empty;
758         }
759
760         if (!uncore_box_is_fake(box)) {
761                 if (idx != reg1->idx)
762                         snbep_pcu_alter_er(event, idx, true);
763                 reg1->alloc = 1;
764         }
765         return NULL;
766 }
767
768 static void snbep_pcu_put_constraint(struct intel_uncore_box *box, struct perf_event *event)
769 {
770         struct hw_perf_event_extra *reg1 = &event->hw.extra_reg;
771         struct intel_uncore_extra_reg *er = &box->shared_regs[0];
772
773         if (uncore_box_is_fake(box) || !reg1->alloc)
774                 return;
775
776         atomic_sub(1 << (reg1->idx * 8), &er->ref);
777         reg1->alloc = 0;
778 }
779
780 static int snbep_pcu_hw_config(struct intel_uncore_box *box, struct perf_event *event)
781 {
782         struct hw_perf_event *hwc = &event->hw;
783         struct hw_perf_event_extra *reg1 = &hwc->extra_reg;
784         int ev_sel = hwc->config & SNBEP_PMON_CTL_EV_SEL_MASK;
785
786         if (ev_sel >= 0xb && ev_sel <= 0xe) {
787                 reg1->reg = SNBEP_PCU_MSR_PMON_BOX_FILTER;
788                 reg1->idx = ev_sel - 0xb;
789                 reg1->config = event->attr.config1 & (0xff << reg1->idx);
790         }
791         return 0;
792 }
793
794 static struct intel_uncore_ops snbep_uncore_pcu_ops = {
795         SNBEP_UNCORE_MSR_OPS_COMMON_INIT(),
796         .hw_config              = snbep_pcu_hw_config,
797         .get_constraint         = snbep_pcu_get_constraint,
798         .put_constraint         = snbep_pcu_put_constraint,
799 };
800
801 static struct intel_uncore_type snbep_uncore_pcu = {
802         .name                   = "pcu",
803         .num_counters           = 4,
804         .num_boxes              = 1,
805         .perf_ctr_bits          = 48,
806         .perf_ctr               = SNBEP_PCU_MSR_PMON_CTR0,
807         .event_ctl              = SNBEP_PCU_MSR_PMON_CTL0,
808         .event_mask             = SNBEP_PCU_MSR_PMON_RAW_EVENT_MASK,
809         .box_ctl                = SNBEP_PCU_MSR_PMON_BOX_CTL,
810         .num_shared_regs        = 1,
811         .ops                    = &snbep_uncore_pcu_ops,
812         .format_group           = &snbep_uncore_pcu_format_group,
813 };
814
815 static struct intel_uncore_type *snbep_msr_uncores[] = {
816         &snbep_uncore_ubox,
817         &snbep_uncore_cbox,
818         &snbep_uncore_pcu,
819         NULL,
820 };
821
822 void snbep_uncore_cpu_init(void)
823 {
824         if (snbep_uncore_cbox.num_boxes > boot_cpu_data.x86_max_cores)
825                 snbep_uncore_cbox.num_boxes = boot_cpu_data.x86_max_cores;
826         uncore_msr_uncores = snbep_msr_uncores;
827 }
828
829 enum {
830         SNBEP_PCI_QPI_PORT0_FILTER,
831         SNBEP_PCI_QPI_PORT1_FILTER,
832 };
833
834 static int snbep_qpi_hw_config(struct intel_uncore_box *box, struct perf_event *event)
835 {
836         struct hw_perf_event *hwc = &event->hw;
837         struct hw_perf_event_extra *reg1 = &hwc->extra_reg;
838         struct hw_perf_event_extra *reg2 = &hwc->branch_reg;
839
840         if ((hwc->config & SNBEP_PMON_CTL_EV_SEL_MASK) == 0x38) {
841                 reg1->idx = 0;
842                 reg1->reg = SNBEP_Q_Py_PCI_PMON_PKT_MATCH0;
843                 reg1->config = event->attr.config1;
844                 reg2->reg = SNBEP_Q_Py_PCI_PMON_PKT_MASK0;
845                 reg2->config = event->attr.config2;
846         }
847         return 0;
848 }
849
850 static void snbep_qpi_enable_event(struct intel_uncore_box *box, struct perf_event *event)
851 {
852         struct pci_dev *pdev = box->pci_dev;
853         struct hw_perf_event *hwc = &event->hw;
854         struct hw_perf_event_extra *reg1 = &hwc->extra_reg;
855         struct hw_perf_event_extra *reg2 = &hwc->branch_reg;
856
857         if (reg1->idx != EXTRA_REG_NONE) {
858                 int idx = box->pmu->pmu_idx + SNBEP_PCI_QPI_PORT0_FILTER;
859                 struct pci_dev *filter_pdev = uncore_extra_pci_dev[box->phys_id][idx];
860                 WARN_ON_ONCE(!filter_pdev);
861                 if (filter_pdev) {
862                         pci_write_config_dword(filter_pdev, reg1->reg,
863                                                 (u32)reg1->config);
864                         pci_write_config_dword(filter_pdev, reg1->reg + 4,
865                                                 (u32)(reg1->config >> 32));
866                         pci_write_config_dword(filter_pdev, reg2->reg,
867                                                 (u32)reg2->config);
868                         pci_write_config_dword(filter_pdev, reg2->reg + 4,
869                                                 (u32)(reg2->config >> 32));
870                 }
871         }
872
873         pci_write_config_dword(pdev, hwc->config_base, hwc->config | SNBEP_PMON_CTL_EN);
874 }
875
876 static struct intel_uncore_ops snbep_uncore_qpi_ops = {
877         SNBEP_UNCORE_PCI_OPS_COMMON_INIT(),
878         .enable_event           = snbep_qpi_enable_event,
879         .hw_config              = snbep_qpi_hw_config,
880         .get_constraint         = uncore_get_constraint,
881         .put_constraint         = uncore_put_constraint,
882 };
883
884 #define SNBEP_UNCORE_PCI_COMMON_INIT()                          \
885         .perf_ctr       = SNBEP_PCI_PMON_CTR0,                  \
886         .event_ctl      = SNBEP_PCI_PMON_CTL0,                  \
887         .event_mask     = SNBEP_PMON_RAW_EVENT_MASK,            \
888         .box_ctl        = SNBEP_PCI_PMON_BOX_CTL,               \
889         .ops            = &snbep_uncore_pci_ops,                \
890         .format_group   = &snbep_uncore_format_group
891
892 static struct intel_uncore_type snbep_uncore_ha = {
893         .name           = "ha",
894         .num_counters   = 4,
895         .num_boxes      = 1,
896         .perf_ctr_bits  = 48,
897         SNBEP_UNCORE_PCI_COMMON_INIT(),
898 };
899
900 static struct intel_uncore_type snbep_uncore_imc = {
901         .name           = "imc",
902         .num_counters   = 4,
903         .num_boxes      = 4,
904         .perf_ctr_bits  = 48,
905         .fixed_ctr_bits = 48,
906         .fixed_ctr      = SNBEP_MC_CHy_PCI_PMON_FIXED_CTR,
907         .fixed_ctl      = SNBEP_MC_CHy_PCI_PMON_FIXED_CTL,
908         .event_descs    = snbep_uncore_imc_events,
909         SNBEP_UNCORE_PCI_COMMON_INIT(),
910 };
911
912 static struct intel_uncore_type snbep_uncore_qpi = {
913         .name                   = "qpi",
914         .num_counters           = 4,
915         .num_boxes              = 2,
916         .perf_ctr_bits          = 48,
917         .perf_ctr               = SNBEP_PCI_PMON_CTR0,
918         .event_ctl              = SNBEP_PCI_PMON_CTL0,
919         .event_mask             = SNBEP_QPI_PCI_PMON_RAW_EVENT_MASK,
920         .box_ctl                = SNBEP_PCI_PMON_BOX_CTL,
921         .num_shared_regs        = 1,
922         .ops                    = &snbep_uncore_qpi_ops,
923         .event_descs            = snbep_uncore_qpi_events,
924         .format_group           = &snbep_uncore_qpi_format_group,
925 };
926
927
928 static struct intel_uncore_type snbep_uncore_r2pcie = {
929         .name           = "r2pcie",
930         .num_counters   = 4,
931         .num_boxes      = 1,
932         .perf_ctr_bits  = 44,
933         .constraints    = snbep_uncore_r2pcie_constraints,
934         SNBEP_UNCORE_PCI_COMMON_INIT(),
935 };
936
937 static struct intel_uncore_type snbep_uncore_r3qpi = {
938         .name           = "r3qpi",
939         .num_counters   = 3,
940         .num_boxes      = 2,
941         .perf_ctr_bits  = 44,
942         .constraints    = snbep_uncore_r3qpi_constraints,
943         SNBEP_UNCORE_PCI_COMMON_INIT(),
944 };
945
946 enum {
947         SNBEP_PCI_UNCORE_HA,
948         SNBEP_PCI_UNCORE_IMC,
949         SNBEP_PCI_UNCORE_QPI,
950         SNBEP_PCI_UNCORE_R2PCIE,
951         SNBEP_PCI_UNCORE_R3QPI,
952 };
953
954 static struct intel_uncore_type *snbep_pci_uncores[] = {
955         [SNBEP_PCI_UNCORE_HA]           = &snbep_uncore_ha,
956         [SNBEP_PCI_UNCORE_IMC]          = &snbep_uncore_imc,
957         [SNBEP_PCI_UNCORE_QPI]          = &snbep_uncore_qpi,
958         [SNBEP_PCI_UNCORE_R2PCIE]       = &snbep_uncore_r2pcie,
959         [SNBEP_PCI_UNCORE_R3QPI]        = &snbep_uncore_r3qpi,
960         NULL,
961 };
962
963 static const struct pci_device_id snbep_uncore_pci_ids[] = {
964         { /* Home Agent */
965                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_UNC_HA),
966                 .driver_data = UNCORE_PCI_DEV_DATA(SNBEP_PCI_UNCORE_HA, 0),
967         },
968         { /* MC Channel 0 */
969                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_UNC_IMC0),
970                 .driver_data = UNCORE_PCI_DEV_DATA(SNBEP_PCI_UNCORE_IMC, 0),
971         },
972         { /* MC Channel 1 */
973                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_UNC_IMC1),
974                 .driver_data = UNCORE_PCI_DEV_DATA(SNBEP_PCI_UNCORE_IMC, 1),
975         },
976         { /* MC Channel 2 */
977                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_UNC_IMC2),
978                 .driver_data = UNCORE_PCI_DEV_DATA(SNBEP_PCI_UNCORE_IMC, 2),
979         },
980         { /* MC Channel 3 */
981                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_UNC_IMC3),
982                 .driver_data = UNCORE_PCI_DEV_DATA(SNBEP_PCI_UNCORE_IMC, 3),
983         },
984         { /* QPI Port 0 */
985                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_UNC_QPI0),
986                 .driver_data = UNCORE_PCI_DEV_DATA(SNBEP_PCI_UNCORE_QPI, 0),
987         },
988         { /* QPI Port 1 */
989                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_UNC_QPI1),
990                 .driver_data = UNCORE_PCI_DEV_DATA(SNBEP_PCI_UNCORE_QPI, 1),
991         },
992         { /* R2PCIe */
993                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_UNC_R2PCIE),
994                 .driver_data = UNCORE_PCI_DEV_DATA(SNBEP_PCI_UNCORE_R2PCIE, 0),
995         },
996         { /* R3QPI Link 0 */
997                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_UNC_R3QPI0),
998                 .driver_data = UNCORE_PCI_DEV_DATA(SNBEP_PCI_UNCORE_R3QPI, 0),
999         },
1000         { /* R3QPI Link 1 */
1001                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_UNC_R3QPI1),
1002                 .driver_data = UNCORE_PCI_DEV_DATA(SNBEP_PCI_UNCORE_R3QPI, 1),
1003         },
1004         { /* QPI Port 0 filter  */
1005                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x3c86),
1006                 .driver_data = UNCORE_PCI_DEV_DATA(UNCORE_EXTRA_PCI_DEV,
1007                                                    SNBEP_PCI_QPI_PORT0_FILTER),
1008         },
1009         { /* QPI Port 0 filter  */
1010                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x3c96),
1011                 .driver_data = UNCORE_PCI_DEV_DATA(UNCORE_EXTRA_PCI_DEV,
1012                                                    SNBEP_PCI_QPI_PORT1_FILTER),
1013         },
1014         { /* end: all zeroes */ }
1015 };
1016
1017 static struct pci_driver snbep_uncore_pci_driver = {
1018         .name           = "snbep_uncore",
1019         .id_table       = snbep_uncore_pci_ids,
1020 };
1021
1022 /*
1023  * build pci bus to socket mapping
1024  */
1025 static int snbep_pci2phy_map_init(int devid)
1026 {
1027         struct pci_dev *ubox_dev = NULL;
1028         int i, bus, nodeid;
1029         int err = 0;
1030         u32 config = 0;
1031
1032         while (1) {
1033                 /* find the UBOX device */
1034                 ubox_dev = pci_get_device(PCI_VENDOR_ID_INTEL, devid, ubox_dev);
1035                 if (!ubox_dev)
1036                         break;
1037                 bus = ubox_dev->bus->number;
1038                 /* get the Node ID of the local register */
1039                 err = pci_read_config_dword(ubox_dev, 0x40, &config);
1040                 if (err)
1041                         break;
1042                 nodeid = config;
1043                 /* get the Node ID mapping */
1044                 err = pci_read_config_dword(ubox_dev, 0x54, &config);
1045                 if (err)
1046                         break;
1047                 /*
1048                  * every three bits in the Node ID mapping register maps
1049                  * to a particular node.
1050                  */
1051                 for (i = 0; i < 8; i++) {
1052                         if (nodeid == ((config >> (3 * i)) & 0x7)) {
1053                                 uncore_pcibus_to_physid[bus] = i;
1054                                 break;
1055                         }
1056                 }
1057         }
1058
1059         if (!err) {
1060                 /*
1061                  * For PCI bus with no UBOX device, find the next bus
1062                  * that has UBOX device and use its mapping.
1063                  */
1064                 i = -1;
1065                 for (bus = 255; bus >= 0; bus--) {
1066                         if (uncore_pcibus_to_physid[bus] >= 0)
1067                                 i = uncore_pcibus_to_physid[bus];
1068                         else
1069                                 uncore_pcibus_to_physid[bus] = i;
1070                 }
1071         }
1072
1073         if (ubox_dev)
1074                 pci_dev_put(ubox_dev);
1075
1076         return err ? pcibios_err_to_errno(err) : 0;
1077 }
1078
1079 int snbep_uncore_pci_init(void)
1080 {
1081         int ret = snbep_pci2phy_map_init(0x3ce0);
1082         if (ret)
1083                 return ret;
1084         uncore_pci_uncores = snbep_pci_uncores;
1085         uncore_pci_driver = &snbep_uncore_pci_driver;
1086         return 0;
1087 }
1088 /* end of Sandy Bridge-EP uncore support */
1089
1090 /* IvyTown uncore support */
1091 static void ivbep_uncore_msr_init_box(struct intel_uncore_box *box)
1092 {
1093         unsigned msr = uncore_msr_box_ctl(box);
1094         if (msr)
1095                 wrmsrl(msr, IVBEP_PMON_BOX_CTL_INT);
1096 }
1097
1098 static void ivbep_uncore_pci_init_box(struct intel_uncore_box *box)
1099 {
1100         struct pci_dev *pdev = box->pci_dev;
1101
1102         pci_write_config_dword(pdev, SNBEP_PCI_PMON_BOX_CTL, IVBEP_PMON_BOX_CTL_INT);
1103 }
1104
1105 #define IVBEP_UNCORE_MSR_OPS_COMMON_INIT()                      \
1106         .init_box       = ivbep_uncore_msr_init_box,            \
1107         .disable_box    = snbep_uncore_msr_disable_box,         \
1108         .enable_box     = snbep_uncore_msr_enable_box,          \
1109         .disable_event  = snbep_uncore_msr_disable_event,       \
1110         .enable_event   = snbep_uncore_msr_enable_event,        \
1111         .read_counter   = uncore_msr_read_counter
1112
1113 static struct intel_uncore_ops ivbep_uncore_msr_ops = {
1114         IVBEP_UNCORE_MSR_OPS_COMMON_INIT(),
1115 };
1116
1117 static struct intel_uncore_ops ivbep_uncore_pci_ops = {
1118         .init_box       = ivbep_uncore_pci_init_box,
1119         .disable_box    = snbep_uncore_pci_disable_box,
1120         .enable_box     = snbep_uncore_pci_enable_box,
1121         .disable_event  = snbep_uncore_pci_disable_event,
1122         .enable_event   = snbep_uncore_pci_enable_event,
1123         .read_counter   = snbep_uncore_pci_read_counter,
1124 };
1125
1126 #define IVBEP_UNCORE_PCI_COMMON_INIT()                          \
1127         .perf_ctr       = SNBEP_PCI_PMON_CTR0,                  \
1128         .event_ctl      = SNBEP_PCI_PMON_CTL0,                  \
1129         .event_mask     = IVBEP_PMON_RAW_EVENT_MASK,            \
1130         .box_ctl        = SNBEP_PCI_PMON_BOX_CTL,               \
1131         .ops            = &ivbep_uncore_pci_ops,                        \
1132         .format_group   = &ivbep_uncore_format_group
1133
1134 static struct attribute *ivbep_uncore_formats_attr[] = {
1135         &format_attr_event.attr,
1136         &format_attr_umask.attr,
1137         &format_attr_edge.attr,
1138         &format_attr_inv.attr,
1139         &format_attr_thresh8.attr,
1140         NULL,
1141 };
1142
1143 static struct attribute *ivbep_uncore_ubox_formats_attr[] = {
1144         &format_attr_event.attr,
1145         &format_attr_umask.attr,
1146         &format_attr_edge.attr,
1147         &format_attr_inv.attr,
1148         &format_attr_thresh5.attr,
1149         NULL,
1150 };
1151
1152 static struct attribute *ivbep_uncore_cbox_formats_attr[] = {
1153         &format_attr_event.attr,
1154         &format_attr_umask.attr,
1155         &format_attr_edge.attr,
1156         &format_attr_tid_en.attr,
1157         &format_attr_thresh8.attr,
1158         &format_attr_filter_tid.attr,
1159         &format_attr_filter_link.attr,
1160         &format_attr_filter_state2.attr,
1161         &format_attr_filter_nid2.attr,
1162         &format_attr_filter_opc2.attr,
1163         NULL,
1164 };
1165
1166 static struct attribute *ivbep_uncore_pcu_formats_attr[] = {
1167         &format_attr_event_ext.attr,
1168         &format_attr_occ_sel.attr,
1169         &format_attr_edge.attr,
1170         &format_attr_thresh5.attr,
1171         &format_attr_occ_invert.attr,
1172         &format_attr_occ_edge.attr,
1173         &format_attr_filter_band0.attr,
1174         &format_attr_filter_band1.attr,
1175         &format_attr_filter_band2.attr,
1176         &format_attr_filter_band3.attr,
1177         NULL,
1178 };
1179
1180 static struct attribute *ivbep_uncore_qpi_formats_attr[] = {
1181         &format_attr_event_ext.attr,
1182         &format_attr_umask.attr,
1183         &format_attr_edge.attr,
1184         &format_attr_thresh8.attr,
1185         &format_attr_match_rds.attr,
1186         &format_attr_match_rnid30.attr,
1187         &format_attr_match_rnid4.attr,
1188         &format_attr_match_dnid.attr,
1189         &format_attr_match_mc.attr,
1190         &format_attr_match_opc.attr,
1191         &format_attr_match_vnw.attr,
1192         &format_attr_match0.attr,
1193         &format_attr_match1.attr,
1194         &format_attr_mask_rds.attr,
1195         &format_attr_mask_rnid30.attr,
1196         &format_attr_mask_rnid4.attr,
1197         &format_attr_mask_dnid.attr,
1198         &format_attr_mask_mc.attr,
1199         &format_attr_mask_opc.attr,
1200         &format_attr_mask_vnw.attr,
1201         &format_attr_mask0.attr,
1202         &format_attr_mask1.attr,
1203         NULL,
1204 };
1205
1206 static struct attribute_group ivbep_uncore_format_group = {
1207         .name = "format",
1208         .attrs = ivbep_uncore_formats_attr,
1209 };
1210
1211 static struct attribute_group ivbep_uncore_ubox_format_group = {
1212         .name = "format",
1213         .attrs = ivbep_uncore_ubox_formats_attr,
1214 };
1215
1216 static struct attribute_group ivbep_uncore_cbox_format_group = {
1217         .name = "format",
1218         .attrs = ivbep_uncore_cbox_formats_attr,
1219 };
1220
1221 static struct attribute_group ivbep_uncore_pcu_format_group = {
1222         .name = "format",
1223         .attrs = ivbep_uncore_pcu_formats_attr,
1224 };
1225
1226 static struct attribute_group ivbep_uncore_qpi_format_group = {
1227         .name = "format",
1228         .attrs = ivbep_uncore_qpi_formats_attr,
1229 };
1230
1231 static struct intel_uncore_type ivbep_uncore_ubox = {
1232         .name           = "ubox",
1233         .num_counters   = 2,
1234         .num_boxes      = 1,
1235         .perf_ctr_bits  = 44,
1236         .fixed_ctr_bits = 48,
1237         .perf_ctr       = SNBEP_U_MSR_PMON_CTR0,
1238         .event_ctl      = SNBEP_U_MSR_PMON_CTL0,
1239         .event_mask     = IVBEP_U_MSR_PMON_RAW_EVENT_MASK,
1240         .fixed_ctr      = SNBEP_U_MSR_PMON_UCLK_FIXED_CTR,
1241         .fixed_ctl      = SNBEP_U_MSR_PMON_UCLK_FIXED_CTL,
1242         .ops            = &ivbep_uncore_msr_ops,
1243         .format_group   = &ivbep_uncore_ubox_format_group,
1244 };
1245
1246 static struct extra_reg ivbep_uncore_cbox_extra_regs[] = {
1247         SNBEP_CBO_EVENT_EXTRA_REG(SNBEP_CBO_PMON_CTL_TID_EN,
1248                                   SNBEP_CBO_PMON_CTL_TID_EN, 0x1),
1249         SNBEP_CBO_EVENT_EXTRA_REG(0x1031, 0x10ff, 0x2),
1250         SNBEP_CBO_EVENT_EXTRA_REG(0x1134, 0xffff, 0x4),
1251         SNBEP_CBO_EVENT_EXTRA_REG(0x4134, 0xffff, 0xc),
1252         SNBEP_CBO_EVENT_EXTRA_REG(0x5134, 0xffff, 0xc),
1253         SNBEP_CBO_EVENT_EXTRA_REG(0x0334, 0xffff, 0x4),
1254         SNBEP_CBO_EVENT_EXTRA_REG(0x4334, 0xffff, 0xc),
1255         SNBEP_CBO_EVENT_EXTRA_REG(0x0534, 0xffff, 0x4),
1256         SNBEP_CBO_EVENT_EXTRA_REG(0x4534, 0xffff, 0xc),
1257         SNBEP_CBO_EVENT_EXTRA_REG(0x0934, 0xffff, 0x4),
1258         SNBEP_CBO_EVENT_EXTRA_REG(0x4934, 0xffff, 0xc),
1259         SNBEP_CBO_EVENT_EXTRA_REG(0x0135, 0xffff, 0x10),
1260         SNBEP_CBO_EVENT_EXTRA_REG(0x0335, 0xffff, 0x10),
1261         SNBEP_CBO_EVENT_EXTRA_REG(0x2135, 0xffff, 0x10),
1262         SNBEP_CBO_EVENT_EXTRA_REG(0x2335, 0xffff, 0x10),
1263         SNBEP_CBO_EVENT_EXTRA_REG(0x4135, 0xffff, 0x18),
1264         SNBEP_CBO_EVENT_EXTRA_REG(0x4335, 0xffff, 0x18),
1265         SNBEP_CBO_EVENT_EXTRA_REG(0x4435, 0xffff, 0x8),
1266         SNBEP_CBO_EVENT_EXTRA_REG(0x4835, 0xffff, 0x8),
1267         SNBEP_CBO_EVENT_EXTRA_REG(0x4a35, 0xffff, 0x8),
1268         SNBEP_CBO_EVENT_EXTRA_REG(0x5035, 0xffff, 0x8),
1269         SNBEP_CBO_EVENT_EXTRA_REG(0x8135, 0xffff, 0x10),
1270         SNBEP_CBO_EVENT_EXTRA_REG(0x8335, 0xffff, 0x10),
1271         SNBEP_CBO_EVENT_EXTRA_REG(0x0136, 0xffff, 0x10),
1272         SNBEP_CBO_EVENT_EXTRA_REG(0x0336, 0xffff, 0x10),
1273         SNBEP_CBO_EVENT_EXTRA_REG(0x2136, 0xffff, 0x10),
1274         SNBEP_CBO_EVENT_EXTRA_REG(0x2336, 0xffff, 0x10),
1275         SNBEP_CBO_EVENT_EXTRA_REG(0x4136, 0xffff, 0x18),
1276         SNBEP_CBO_EVENT_EXTRA_REG(0x4336, 0xffff, 0x18),
1277         SNBEP_CBO_EVENT_EXTRA_REG(0x4436, 0xffff, 0x8),
1278         SNBEP_CBO_EVENT_EXTRA_REG(0x4836, 0xffff, 0x8),
1279         SNBEP_CBO_EVENT_EXTRA_REG(0x4a36, 0xffff, 0x8),
1280         SNBEP_CBO_EVENT_EXTRA_REG(0x5036, 0xffff, 0x8),
1281         SNBEP_CBO_EVENT_EXTRA_REG(0x8136, 0xffff, 0x10),
1282         SNBEP_CBO_EVENT_EXTRA_REG(0x8336, 0xffff, 0x10),
1283         SNBEP_CBO_EVENT_EXTRA_REG(0x4037, 0x40ff, 0x8),
1284         EVENT_EXTRA_END
1285 };
1286
1287 static u64 ivbep_cbox_filter_mask(int fields)
1288 {
1289         u64 mask = 0;
1290
1291         if (fields & 0x1)
1292                 mask |= IVBEP_CB0_MSR_PMON_BOX_FILTER_TID;
1293         if (fields & 0x2)
1294                 mask |= IVBEP_CB0_MSR_PMON_BOX_FILTER_LINK;
1295         if (fields & 0x4)
1296                 mask |= IVBEP_CB0_MSR_PMON_BOX_FILTER_STATE;
1297         if (fields & 0x8)
1298                 mask |= IVBEP_CB0_MSR_PMON_BOX_FILTER_NID;
1299         if (fields & 0x10)
1300                 mask |= IVBEP_CB0_MSR_PMON_BOX_FILTER_OPC;
1301
1302         return mask;
1303 }
1304
1305 static struct event_constraint *
1306 ivbep_cbox_get_constraint(struct intel_uncore_box *box, struct perf_event *event)
1307 {
1308         return __snbep_cbox_get_constraint(box, event, ivbep_cbox_filter_mask);
1309 }
1310
1311 static int ivbep_cbox_hw_config(struct intel_uncore_box *box, struct perf_event *event)
1312 {
1313         struct hw_perf_event_extra *reg1 = &event->hw.extra_reg;
1314         struct extra_reg *er;
1315         int idx = 0;
1316
1317         for (er = ivbep_uncore_cbox_extra_regs; er->msr; er++) {
1318                 if (er->event != (event->hw.config & er->config_mask))
1319                         continue;
1320                 idx |= er->idx;
1321         }
1322
1323         if (idx) {
1324                 reg1->reg = SNBEP_C0_MSR_PMON_BOX_FILTER +
1325                         SNBEP_CBO_MSR_OFFSET * box->pmu->pmu_idx;
1326                 reg1->config = event->attr.config1 & ivbep_cbox_filter_mask(idx);
1327                 reg1->idx = idx;
1328         }
1329         return 0;
1330 }
1331
1332 static void ivbep_cbox_enable_event(struct intel_uncore_box *box, struct perf_event *event)
1333 {
1334         struct hw_perf_event *hwc = &event->hw;
1335         struct hw_perf_event_extra *reg1 = &hwc->extra_reg;
1336
1337         if (reg1->idx != EXTRA_REG_NONE) {
1338                 u64 filter = uncore_shared_reg_config(box, 0);
1339                 wrmsrl(reg1->reg, filter & 0xffffffff);
1340                 wrmsrl(reg1->reg + 6, filter >> 32);
1341         }
1342
1343         wrmsrl(hwc->config_base, hwc->config | SNBEP_PMON_CTL_EN);
1344 }
1345
1346 static struct intel_uncore_ops ivbep_uncore_cbox_ops = {
1347         .init_box               = ivbep_uncore_msr_init_box,
1348         .disable_box            = snbep_uncore_msr_disable_box,
1349         .enable_box             = snbep_uncore_msr_enable_box,
1350         .disable_event          = snbep_uncore_msr_disable_event,
1351         .enable_event           = ivbep_cbox_enable_event,
1352         .read_counter           = uncore_msr_read_counter,
1353         .hw_config              = ivbep_cbox_hw_config,
1354         .get_constraint         = ivbep_cbox_get_constraint,
1355         .put_constraint         = snbep_cbox_put_constraint,
1356 };
1357
1358 static struct intel_uncore_type ivbep_uncore_cbox = {
1359         .name                   = "cbox",
1360         .num_counters           = 4,
1361         .num_boxes              = 15,
1362         .perf_ctr_bits          = 44,
1363         .event_ctl              = SNBEP_C0_MSR_PMON_CTL0,
1364         .perf_ctr               = SNBEP_C0_MSR_PMON_CTR0,
1365         .event_mask             = IVBEP_CBO_MSR_PMON_RAW_EVENT_MASK,
1366         .box_ctl                = SNBEP_C0_MSR_PMON_BOX_CTL,
1367         .msr_offset             = SNBEP_CBO_MSR_OFFSET,
1368         .num_shared_regs        = 1,
1369         .constraints            = snbep_uncore_cbox_constraints,
1370         .ops                    = &ivbep_uncore_cbox_ops,
1371         .format_group           = &ivbep_uncore_cbox_format_group,
1372 };
1373
1374 static struct intel_uncore_ops ivbep_uncore_pcu_ops = {
1375         IVBEP_UNCORE_MSR_OPS_COMMON_INIT(),
1376         .hw_config              = snbep_pcu_hw_config,
1377         .get_constraint         = snbep_pcu_get_constraint,
1378         .put_constraint         = snbep_pcu_put_constraint,
1379 };
1380
1381 static struct intel_uncore_type ivbep_uncore_pcu = {
1382         .name                   = "pcu",
1383         .num_counters           = 4,
1384         .num_boxes              = 1,
1385         .perf_ctr_bits          = 48,
1386         .perf_ctr               = SNBEP_PCU_MSR_PMON_CTR0,
1387         .event_ctl              = SNBEP_PCU_MSR_PMON_CTL0,
1388         .event_mask             = IVBEP_PCU_MSR_PMON_RAW_EVENT_MASK,
1389         .box_ctl                = SNBEP_PCU_MSR_PMON_BOX_CTL,
1390         .num_shared_regs        = 1,
1391         .ops                    = &ivbep_uncore_pcu_ops,
1392         .format_group           = &ivbep_uncore_pcu_format_group,
1393 };
1394
1395 static struct intel_uncore_type *ivbep_msr_uncores[] = {
1396         &ivbep_uncore_ubox,
1397         &ivbep_uncore_cbox,
1398         &ivbep_uncore_pcu,
1399         NULL,
1400 };
1401
1402 void ivbep_uncore_cpu_init(void)
1403 {
1404         if (ivbep_uncore_cbox.num_boxes > boot_cpu_data.x86_max_cores)
1405                 ivbep_uncore_cbox.num_boxes = boot_cpu_data.x86_max_cores;
1406         uncore_msr_uncores = ivbep_msr_uncores;
1407 }
1408
1409 static struct intel_uncore_type ivbep_uncore_ha = {
1410         .name           = "ha",
1411         .num_counters   = 4,
1412         .num_boxes      = 2,
1413         .perf_ctr_bits  = 48,
1414         IVBEP_UNCORE_PCI_COMMON_INIT(),
1415 };
1416
1417 static struct intel_uncore_type ivbep_uncore_imc = {
1418         .name           = "imc",
1419         .num_counters   = 4,
1420         .num_boxes      = 8,
1421         .perf_ctr_bits  = 48,
1422         .fixed_ctr_bits = 48,
1423         .fixed_ctr      = SNBEP_MC_CHy_PCI_PMON_FIXED_CTR,
1424         .fixed_ctl      = SNBEP_MC_CHy_PCI_PMON_FIXED_CTL,
1425         .event_descs    = snbep_uncore_imc_events,
1426         IVBEP_UNCORE_PCI_COMMON_INIT(),
1427 };
1428
1429 /* registers in IRP boxes are not properly aligned */
1430 static unsigned ivbep_uncore_irp_ctls[] = {0xd8, 0xdc, 0xe0, 0xe4};
1431 static unsigned ivbep_uncore_irp_ctrs[] = {0xa0, 0xb0, 0xb8, 0xc0};
1432
1433 static void ivbep_uncore_irp_enable_event(struct intel_uncore_box *box, struct perf_event *event)
1434 {
1435         struct pci_dev *pdev = box->pci_dev;
1436         struct hw_perf_event *hwc = &event->hw;
1437
1438         pci_write_config_dword(pdev, ivbep_uncore_irp_ctls[hwc->idx],
1439                                hwc->config | SNBEP_PMON_CTL_EN);
1440 }
1441
1442 static void ivbep_uncore_irp_disable_event(struct intel_uncore_box *box, struct perf_event *event)
1443 {
1444         struct pci_dev *pdev = box->pci_dev;
1445         struct hw_perf_event *hwc = &event->hw;
1446
1447         pci_write_config_dword(pdev, ivbep_uncore_irp_ctls[hwc->idx], hwc->config);
1448 }
1449
1450 static u64 ivbep_uncore_irp_read_counter(struct intel_uncore_box *box, struct perf_event *event)
1451 {
1452         struct pci_dev *pdev = box->pci_dev;
1453         struct hw_perf_event *hwc = &event->hw;
1454         u64 count = 0;
1455
1456         pci_read_config_dword(pdev, ivbep_uncore_irp_ctrs[hwc->idx], (u32 *)&count);
1457         pci_read_config_dword(pdev, ivbep_uncore_irp_ctrs[hwc->idx] + 4, (u32 *)&count + 1);
1458
1459         return count;
1460 }
1461
1462 static struct intel_uncore_ops ivbep_uncore_irp_ops = {
1463         .init_box       = ivbep_uncore_pci_init_box,
1464         .disable_box    = snbep_uncore_pci_disable_box,
1465         .enable_box     = snbep_uncore_pci_enable_box,
1466         .disable_event  = ivbep_uncore_irp_disable_event,
1467         .enable_event   = ivbep_uncore_irp_enable_event,
1468         .read_counter   = ivbep_uncore_irp_read_counter,
1469 };
1470
1471 static struct intel_uncore_type ivbep_uncore_irp = {
1472         .name                   = "irp",
1473         .num_counters           = 4,
1474         .num_boxes              = 1,
1475         .perf_ctr_bits          = 48,
1476         .event_mask             = IVBEP_PMON_RAW_EVENT_MASK,
1477         .box_ctl                = SNBEP_PCI_PMON_BOX_CTL,
1478         .ops                    = &ivbep_uncore_irp_ops,
1479         .format_group           = &ivbep_uncore_format_group,
1480 };
1481
1482 static struct intel_uncore_ops ivbep_uncore_qpi_ops = {
1483         .init_box       = ivbep_uncore_pci_init_box,
1484         .disable_box    = snbep_uncore_pci_disable_box,
1485         .enable_box     = snbep_uncore_pci_enable_box,
1486         .disable_event  = snbep_uncore_pci_disable_event,
1487         .enable_event   = snbep_qpi_enable_event,
1488         .read_counter   = snbep_uncore_pci_read_counter,
1489         .hw_config      = snbep_qpi_hw_config,
1490         .get_constraint = uncore_get_constraint,
1491         .put_constraint = uncore_put_constraint,
1492 };
1493
1494 static struct intel_uncore_type ivbep_uncore_qpi = {
1495         .name                   = "qpi",
1496         .num_counters           = 4,
1497         .num_boxes              = 3,
1498         .perf_ctr_bits          = 48,
1499         .perf_ctr               = SNBEP_PCI_PMON_CTR0,
1500         .event_ctl              = SNBEP_PCI_PMON_CTL0,
1501         .event_mask             = IVBEP_QPI_PCI_PMON_RAW_EVENT_MASK,
1502         .box_ctl                = SNBEP_PCI_PMON_BOX_CTL,
1503         .num_shared_regs        = 1,
1504         .ops                    = &ivbep_uncore_qpi_ops,
1505         .format_group           = &ivbep_uncore_qpi_format_group,
1506 };
1507
1508 static struct intel_uncore_type ivbep_uncore_r2pcie = {
1509         .name           = "r2pcie",
1510         .num_counters   = 4,
1511         .num_boxes      = 1,
1512         .perf_ctr_bits  = 44,
1513         .constraints    = snbep_uncore_r2pcie_constraints,
1514         IVBEP_UNCORE_PCI_COMMON_INIT(),
1515 };
1516
1517 static struct intel_uncore_type ivbep_uncore_r3qpi = {
1518         .name           = "r3qpi",
1519         .num_counters   = 3,
1520         .num_boxes      = 2,
1521         .perf_ctr_bits  = 44,
1522         .constraints    = snbep_uncore_r3qpi_constraints,
1523         IVBEP_UNCORE_PCI_COMMON_INIT(),
1524 };
1525
1526 enum {
1527         IVBEP_PCI_UNCORE_HA,
1528         IVBEP_PCI_UNCORE_IMC,
1529         IVBEP_PCI_UNCORE_IRP,
1530         IVBEP_PCI_UNCORE_QPI,
1531         IVBEP_PCI_UNCORE_R2PCIE,
1532         IVBEP_PCI_UNCORE_R3QPI,
1533 };
1534
1535 static struct intel_uncore_type *ivbep_pci_uncores[] = {
1536         [IVBEP_PCI_UNCORE_HA]   = &ivbep_uncore_ha,
1537         [IVBEP_PCI_UNCORE_IMC]  = &ivbep_uncore_imc,
1538         [IVBEP_PCI_UNCORE_IRP]  = &ivbep_uncore_irp,
1539         [IVBEP_PCI_UNCORE_QPI]  = &ivbep_uncore_qpi,
1540         [IVBEP_PCI_UNCORE_R2PCIE]       = &ivbep_uncore_r2pcie,
1541         [IVBEP_PCI_UNCORE_R3QPI]        = &ivbep_uncore_r3qpi,
1542         NULL,
1543 };
1544
1545 static const struct pci_device_id ivbep_uncore_pci_ids[] = {
1546         { /* Home Agent 0 */
1547                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xe30),
1548                 .driver_data = UNCORE_PCI_DEV_DATA(IVBEP_PCI_UNCORE_HA, 0),
1549         },
1550         { /* Home Agent 1 */
1551                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xe38),
1552                 .driver_data = UNCORE_PCI_DEV_DATA(IVBEP_PCI_UNCORE_HA, 1),
1553         },
1554         { /* MC0 Channel 0 */
1555                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xeb4),
1556                 .driver_data = UNCORE_PCI_DEV_DATA(IVBEP_PCI_UNCORE_IMC, 0),
1557         },
1558         { /* MC0 Channel 1 */
1559                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xeb5),
1560                 .driver_data = UNCORE_PCI_DEV_DATA(IVBEP_PCI_UNCORE_IMC, 1),
1561         },
1562         { /* MC0 Channel 3 */
1563                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xeb0),
1564                 .driver_data = UNCORE_PCI_DEV_DATA(IVBEP_PCI_UNCORE_IMC, 2),
1565         },
1566         { /* MC0 Channel 4 */
1567                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xeb1),
1568                 .driver_data = UNCORE_PCI_DEV_DATA(IVBEP_PCI_UNCORE_IMC, 3),
1569         },
1570         { /* MC1 Channel 0 */
1571                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xef4),
1572                 .driver_data = UNCORE_PCI_DEV_DATA(IVBEP_PCI_UNCORE_IMC, 4),
1573         },
1574         { /* MC1 Channel 1 */
1575                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xef5),
1576                 .driver_data = UNCORE_PCI_DEV_DATA(IVBEP_PCI_UNCORE_IMC, 5),
1577         },
1578         { /* MC1 Channel 3 */
1579                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xef0),
1580                 .driver_data = UNCORE_PCI_DEV_DATA(IVBEP_PCI_UNCORE_IMC, 6),
1581         },
1582         { /* MC1 Channel 4 */
1583                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xef1),
1584                 .driver_data = UNCORE_PCI_DEV_DATA(IVBEP_PCI_UNCORE_IMC, 7),
1585         },
1586         { /* IRP */
1587                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xe39),
1588                 .driver_data = UNCORE_PCI_DEV_DATA(IVBEP_PCI_UNCORE_IRP, 0),
1589         },
1590         { /* QPI0 Port 0 */
1591                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xe32),
1592                 .driver_data = UNCORE_PCI_DEV_DATA(IVBEP_PCI_UNCORE_QPI, 0),
1593         },
1594         { /* QPI0 Port 1 */
1595                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xe33),
1596                 .driver_data = UNCORE_PCI_DEV_DATA(IVBEP_PCI_UNCORE_QPI, 1),
1597         },
1598         { /* QPI1 Port 2 */
1599                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xe3a),
1600                 .driver_data = UNCORE_PCI_DEV_DATA(IVBEP_PCI_UNCORE_QPI, 2),
1601         },
1602         { /* R2PCIe */
1603                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xe34),
1604                 .driver_data = UNCORE_PCI_DEV_DATA(IVBEP_PCI_UNCORE_R2PCIE, 0),
1605         },
1606         { /* R3QPI0 Link 0 */
1607                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xe36),
1608                 .driver_data = UNCORE_PCI_DEV_DATA(IVBEP_PCI_UNCORE_R3QPI, 0),
1609         },
1610         { /* R3QPI0 Link 1 */
1611                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xe37),
1612                 .driver_data = UNCORE_PCI_DEV_DATA(IVBEP_PCI_UNCORE_R3QPI, 1),
1613         },
1614         { /* R3QPI1 Link 2 */
1615                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xe3e),
1616                 .driver_data = UNCORE_PCI_DEV_DATA(IVBEP_PCI_UNCORE_R3QPI, 2),
1617         },
1618         { /* QPI Port 0 filter  */
1619                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xe86),
1620                 .driver_data = UNCORE_PCI_DEV_DATA(UNCORE_EXTRA_PCI_DEV,
1621                                                    SNBEP_PCI_QPI_PORT0_FILTER),
1622         },
1623         { /* QPI Port 0 filter  */
1624                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xe96),
1625                 .driver_data = UNCORE_PCI_DEV_DATA(UNCORE_EXTRA_PCI_DEV,
1626                                                    SNBEP_PCI_QPI_PORT1_FILTER),
1627         },
1628         { /* end: all zeroes */ }
1629 };
1630
1631 static struct pci_driver ivbep_uncore_pci_driver = {
1632         .name           = "ivbep_uncore",
1633         .id_table       = ivbep_uncore_pci_ids,
1634 };
1635
1636 int ivbep_uncore_pci_init(void)
1637 {
1638         int ret = snbep_pci2phy_map_init(0x0e1e);
1639         if (ret)
1640                 return ret;
1641         uncore_pci_uncores = ivbep_pci_uncores;
1642         uncore_pci_driver = &ivbep_uncore_pci_driver;
1643         return 0;
1644 }
1645 /* end of IvyTown uncore support */