]> git.karo-electronics.de Git - linux-beck.git/blob - arch/x86/kernel/cpu/perf_event_intel_uncore.c
perf/x86/uncore: add hrtimer to SNB uncore IMC PMU
[linux-beck.git] / arch / x86 / kernel / cpu / perf_event_intel_uncore.c
1 #include "perf_event_intel_uncore.h"
2
3 static struct intel_uncore_type *empty_uncore[] = { NULL, };
4 static struct intel_uncore_type **msr_uncores = empty_uncore;
5 static struct intel_uncore_type **pci_uncores = empty_uncore;
6 /* pci bus to socket mapping */
7 static int pcibus_to_physid[256] = { [0 ... 255] = -1, };
8
9 static struct pci_dev *extra_pci_dev[UNCORE_SOCKET_MAX][UNCORE_EXTRA_PCI_DEV_MAX];
10
11 static DEFINE_RAW_SPINLOCK(uncore_box_lock);
12
13 /* mask of cpus that collect uncore events */
14 static cpumask_t uncore_cpu_mask;
15
16 /* constraint for the fixed counter */
17 static struct event_constraint constraint_fixed =
18         EVENT_CONSTRAINT(~0ULL, 1 << UNCORE_PMC_IDX_FIXED, ~0ULL);
19 static struct event_constraint constraint_empty =
20         EVENT_CONSTRAINT(0, 0, 0);
21
22 #define __BITS_VALUE(x, i, n)  ((typeof(x))(((x) >> ((i) * (n))) & \
23                                 ((1ULL << (n)) - 1)))
24
25 DEFINE_UNCORE_FORMAT_ATTR(event, event, "config:0-7");
26 DEFINE_UNCORE_FORMAT_ATTR(event_ext, event, "config:0-7,21");
27 DEFINE_UNCORE_FORMAT_ATTR(umask, umask, "config:8-15");
28 DEFINE_UNCORE_FORMAT_ATTR(edge, edge, "config:18");
29 DEFINE_UNCORE_FORMAT_ATTR(tid_en, tid_en, "config:19");
30 DEFINE_UNCORE_FORMAT_ATTR(inv, inv, "config:23");
31 DEFINE_UNCORE_FORMAT_ATTR(cmask5, cmask, "config:24-28");
32 DEFINE_UNCORE_FORMAT_ATTR(cmask8, cmask, "config:24-31");
33 DEFINE_UNCORE_FORMAT_ATTR(thresh8, thresh, "config:24-31");
34 DEFINE_UNCORE_FORMAT_ATTR(thresh5, thresh, "config:24-28");
35 DEFINE_UNCORE_FORMAT_ATTR(occ_sel, occ_sel, "config:14-15");
36 DEFINE_UNCORE_FORMAT_ATTR(occ_invert, occ_invert, "config:30");
37 DEFINE_UNCORE_FORMAT_ATTR(occ_edge, occ_edge, "config:14-51");
38 DEFINE_UNCORE_FORMAT_ATTR(filter_tid, filter_tid, "config1:0-4");
39 DEFINE_UNCORE_FORMAT_ATTR(filter_link, filter_link, "config1:5-8");
40 DEFINE_UNCORE_FORMAT_ATTR(filter_nid, filter_nid, "config1:10-17");
41 DEFINE_UNCORE_FORMAT_ATTR(filter_nid2, filter_nid, "config1:32-47");
42 DEFINE_UNCORE_FORMAT_ATTR(filter_state, filter_state, "config1:18-22");
43 DEFINE_UNCORE_FORMAT_ATTR(filter_state2, filter_state, "config1:17-22");
44 DEFINE_UNCORE_FORMAT_ATTR(filter_opc, filter_opc, "config1:23-31");
45 DEFINE_UNCORE_FORMAT_ATTR(filter_opc2, filter_opc, "config1:52-60");
46 DEFINE_UNCORE_FORMAT_ATTR(filter_band0, filter_band0, "config1:0-7");
47 DEFINE_UNCORE_FORMAT_ATTR(filter_band1, filter_band1, "config1:8-15");
48 DEFINE_UNCORE_FORMAT_ATTR(filter_band2, filter_band2, "config1:16-23");
49 DEFINE_UNCORE_FORMAT_ATTR(filter_band3, filter_band3, "config1:24-31");
50 DEFINE_UNCORE_FORMAT_ATTR(match_rds, match_rds, "config1:48-51");
51 DEFINE_UNCORE_FORMAT_ATTR(match_rnid30, match_rnid30, "config1:32-35");
52 DEFINE_UNCORE_FORMAT_ATTR(match_rnid4, match_rnid4, "config1:31");
53 DEFINE_UNCORE_FORMAT_ATTR(match_dnid, match_dnid, "config1:13-17");
54 DEFINE_UNCORE_FORMAT_ATTR(match_mc, match_mc, "config1:9-12");
55 DEFINE_UNCORE_FORMAT_ATTR(match_opc, match_opc, "config1:5-8");
56 DEFINE_UNCORE_FORMAT_ATTR(match_vnw, match_vnw, "config1:3-4");
57 DEFINE_UNCORE_FORMAT_ATTR(match0, match0, "config1:0-31");
58 DEFINE_UNCORE_FORMAT_ATTR(match1, match1, "config1:32-63");
59 DEFINE_UNCORE_FORMAT_ATTR(mask_rds, mask_rds, "config2:48-51");
60 DEFINE_UNCORE_FORMAT_ATTR(mask_rnid30, mask_rnid30, "config2:32-35");
61 DEFINE_UNCORE_FORMAT_ATTR(mask_rnid4, mask_rnid4, "config2:31");
62 DEFINE_UNCORE_FORMAT_ATTR(mask_dnid, mask_dnid, "config2:13-17");
63 DEFINE_UNCORE_FORMAT_ATTR(mask_mc, mask_mc, "config2:9-12");
64 DEFINE_UNCORE_FORMAT_ATTR(mask_opc, mask_opc, "config2:5-8");
65 DEFINE_UNCORE_FORMAT_ATTR(mask_vnw, mask_vnw, "config2:3-4");
66 DEFINE_UNCORE_FORMAT_ATTR(mask0, mask0, "config2:0-31");
67 DEFINE_UNCORE_FORMAT_ATTR(mask1, mask1, "config2:32-63");
68
69 static void uncore_pmu_start_hrtimer(struct intel_uncore_box *box);
70 static void uncore_pmu_cancel_hrtimer(struct intel_uncore_box *box);
71 static void uncore_perf_event_update(struct intel_uncore_box *box, struct perf_event *event);
72 static void uncore_pmu_event_read(struct perf_event *event);
73
74 static struct intel_uncore_pmu *uncore_event_to_pmu(struct perf_event *event)
75 {
76         return container_of(event->pmu, struct intel_uncore_pmu, pmu);
77 }
78
79 static struct intel_uncore_box *
80 uncore_pmu_to_box(struct intel_uncore_pmu *pmu, int cpu)
81 {
82         struct intel_uncore_box *box;
83
84         box = *per_cpu_ptr(pmu->box, cpu);
85         if (box)
86                 return box;
87
88         raw_spin_lock(&uncore_box_lock);
89         list_for_each_entry(box, &pmu->box_list, list) {
90                 if (box->phys_id == topology_physical_package_id(cpu)) {
91                         atomic_inc(&box->refcnt);
92                         *per_cpu_ptr(pmu->box, cpu) = box;
93                         break;
94                 }
95         }
96         raw_spin_unlock(&uncore_box_lock);
97
98         return *per_cpu_ptr(pmu->box, cpu);
99 }
100
101 static struct intel_uncore_box *uncore_event_to_box(struct perf_event *event)
102 {
103         /*
104          * perf core schedules event on the basis of cpu, uncore events are
105          * collected by one of the cpus inside a physical package.
106          */
107         return uncore_pmu_to_box(uncore_event_to_pmu(event), smp_processor_id());
108 }
109
110 static u64 uncore_msr_read_counter(struct intel_uncore_box *box, struct perf_event *event)
111 {
112         u64 count;
113
114         rdmsrl(event->hw.event_base, count);
115
116         return count;
117 }
118
119 /*
120  * generic get constraint function for shared match/mask registers.
121  */
122 static struct event_constraint *
123 uncore_get_constraint(struct intel_uncore_box *box, struct perf_event *event)
124 {
125         struct intel_uncore_extra_reg *er;
126         struct hw_perf_event_extra *reg1 = &event->hw.extra_reg;
127         struct hw_perf_event_extra *reg2 = &event->hw.branch_reg;
128         unsigned long flags;
129         bool ok = false;
130
131         /*
132          * reg->alloc can be set due to existing state, so for fake box we
133          * need to ignore this, otherwise we might fail to allocate proper
134          * fake state for this extra reg constraint.
135          */
136         if (reg1->idx == EXTRA_REG_NONE ||
137             (!uncore_box_is_fake(box) && reg1->alloc))
138                 return NULL;
139
140         er = &box->shared_regs[reg1->idx];
141         raw_spin_lock_irqsave(&er->lock, flags);
142         if (!atomic_read(&er->ref) ||
143             (er->config1 == reg1->config && er->config2 == reg2->config)) {
144                 atomic_inc(&er->ref);
145                 er->config1 = reg1->config;
146                 er->config2 = reg2->config;
147                 ok = true;
148         }
149         raw_spin_unlock_irqrestore(&er->lock, flags);
150
151         if (ok) {
152                 if (!uncore_box_is_fake(box))
153                         reg1->alloc = 1;
154                 return NULL;
155         }
156
157         return &constraint_empty;
158 }
159
160 static void uncore_put_constraint(struct intel_uncore_box *box, struct perf_event *event)
161 {
162         struct intel_uncore_extra_reg *er;
163         struct hw_perf_event_extra *reg1 = &event->hw.extra_reg;
164
165         /*
166          * Only put constraint if extra reg was actually allocated. Also
167          * takes care of event which do not use an extra shared reg.
168          *
169          * Also, if this is a fake box we shouldn't touch any event state
170          * (reg->alloc) and we don't care about leaving inconsistent box
171          * state either since it will be thrown out.
172          */
173         if (uncore_box_is_fake(box) || !reg1->alloc)
174                 return;
175
176         er = &box->shared_regs[reg1->idx];
177         atomic_dec(&er->ref);
178         reg1->alloc = 0;
179 }
180
181 static u64 uncore_shared_reg_config(struct intel_uncore_box *box, int idx)
182 {
183         struct intel_uncore_extra_reg *er;
184         unsigned long flags;
185         u64 config;
186
187         er = &box->shared_regs[idx];
188
189         raw_spin_lock_irqsave(&er->lock, flags);
190         config = er->config;
191         raw_spin_unlock_irqrestore(&er->lock, flags);
192
193         return config;
194 }
195
196 /* Sandy Bridge-EP uncore support */
197 static struct intel_uncore_type snbep_uncore_cbox;
198 static struct intel_uncore_type snbep_uncore_pcu;
199
200 static void snbep_uncore_pci_disable_box(struct intel_uncore_box *box)
201 {
202         struct pci_dev *pdev = box->pci_dev;
203         int box_ctl = uncore_pci_box_ctl(box);
204         u32 config = 0;
205
206         if (!pci_read_config_dword(pdev, box_ctl, &config)) {
207                 config |= SNBEP_PMON_BOX_CTL_FRZ;
208                 pci_write_config_dword(pdev, box_ctl, config);
209         }
210 }
211
212 static void snbep_uncore_pci_enable_box(struct intel_uncore_box *box)
213 {
214         struct pci_dev *pdev = box->pci_dev;
215         int box_ctl = uncore_pci_box_ctl(box);
216         u32 config = 0;
217
218         if (!pci_read_config_dword(pdev, box_ctl, &config)) {
219                 config &= ~SNBEP_PMON_BOX_CTL_FRZ;
220                 pci_write_config_dword(pdev, box_ctl, config);
221         }
222 }
223
224 static void snbep_uncore_pci_enable_event(struct intel_uncore_box *box, struct perf_event *event)
225 {
226         struct pci_dev *pdev = box->pci_dev;
227         struct hw_perf_event *hwc = &event->hw;
228
229         pci_write_config_dword(pdev, hwc->config_base, hwc->config | SNBEP_PMON_CTL_EN);
230 }
231
232 static void snbep_uncore_pci_disable_event(struct intel_uncore_box *box, struct perf_event *event)
233 {
234         struct pci_dev *pdev = box->pci_dev;
235         struct hw_perf_event *hwc = &event->hw;
236
237         pci_write_config_dword(pdev, hwc->config_base, hwc->config);
238 }
239
240 static u64 snbep_uncore_pci_read_counter(struct intel_uncore_box *box, struct perf_event *event)
241 {
242         struct pci_dev *pdev = box->pci_dev;
243         struct hw_perf_event *hwc = &event->hw;
244         u64 count = 0;
245
246         pci_read_config_dword(pdev, hwc->event_base, (u32 *)&count);
247         pci_read_config_dword(pdev, hwc->event_base + 4, (u32 *)&count + 1);
248
249         return count;
250 }
251
252 static void snbep_uncore_pci_init_box(struct intel_uncore_box *box)
253 {
254         struct pci_dev *pdev = box->pci_dev;
255
256         pci_write_config_dword(pdev, SNBEP_PCI_PMON_BOX_CTL, SNBEP_PMON_BOX_CTL_INT);
257 }
258
259 static void snbep_uncore_msr_disable_box(struct intel_uncore_box *box)
260 {
261         u64 config;
262         unsigned msr;
263
264         msr = uncore_msr_box_ctl(box);
265         if (msr) {
266                 rdmsrl(msr, config);
267                 config |= SNBEP_PMON_BOX_CTL_FRZ;
268                 wrmsrl(msr, config);
269         }
270 }
271
272 static void snbep_uncore_msr_enable_box(struct intel_uncore_box *box)
273 {
274         u64 config;
275         unsigned msr;
276
277         msr = uncore_msr_box_ctl(box);
278         if (msr) {
279                 rdmsrl(msr, config);
280                 config &= ~SNBEP_PMON_BOX_CTL_FRZ;
281                 wrmsrl(msr, config);
282         }
283 }
284
285 static void snbep_uncore_msr_enable_event(struct intel_uncore_box *box, struct perf_event *event)
286 {
287         struct hw_perf_event *hwc = &event->hw;
288         struct hw_perf_event_extra *reg1 = &hwc->extra_reg;
289
290         if (reg1->idx != EXTRA_REG_NONE)
291                 wrmsrl(reg1->reg, uncore_shared_reg_config(box, 0));
292
293         wrmsrl(hwc->config_base, hwc->config | SNBEP_PMON_CTL_EN);
294 }
295
296 static void snbep_uncore_msr_disable_event(struct intel_uncore_box *box,
297                                         struct perf_event *event)
298 {
299         struct hw_perf_event *hwc = &event->hw;
300
301         wrmsrl(hwc->config_base, hwc->config);
302 }
303
304 static void snbep_uncore_msr_init_box(struct intel_uncore_box *box)
305 {
306         unsigned msr = uncore_msr_box_ctl(box);
307
308         if (msr)
309                 wrmsrl(msr, SNBEP_PMON_BOX_CTL_INT);
310 }
311
312 static struct attribute *snbep_uncore_formats_attr[] = {
313         &format_attr_event.attr,
314         &format_attr_umask.attr,
315         &format_attr_edge.attr,
316         &format_attr_inv.attr,
317         &format_attr_thresh8.attr,
318         NULL,
319 };
320
321 static struct attribute *snbep_uncore_ubox_formats_attr[] = {
322         &format_attr_event.attr,
323         &format_attr_umask.attr,
324         &format_attr_edge.attr,
325         &format_attr_inv.attr,
326         &format_attr_thresh5.attr,
327         NULL,
328 };
329
330 static struct attribute *snbep_uncore_cbox_formats_attr[] = {
331         &format_attr_event.attr,
332         &format_attr_umask.attr,
333         &format_attr_edge.attr,
334         &format_attr_tid_en.attr,
335         &format_attr_inv.attr,
336         &format_attr_thresh8.attr,
337         &format_attr_filter_tid.attr,
338         &format_attr_filter_nid.attr,
339         &format_attr_filter_state.attr,
340         &format_attr_filter_opc.attr,
341         NULL,
342 };
343
344 static struct attribute *snbep_uncore_pcu_formats_attr[] = {
345         &format_attr_event_ext.attr,
346         &format_attr_occ_sel.attr,
347         &format_attr_edge.attr,
348         &format_attr_inv.attr,
349         &format_attr_thresh5.attr,
350         &format_attr_occ_invert.attr,
351         &format_attr_occ_edge.attr,
352         &format_attr_filter_band0.attr,
353         &format_attr_filter_band1.attr,
354         &format_attr_filter_band2.attr,
355         &format_attr_filter_band3.attr,
356         NULL,
357 };
358
359 static struct attribute *snbep_uncore_qpi_formats_attr[] = {
360         &format_attr_event_ext.attr,
361         &format_attr_umask.attr,
362         &format_attr_edge.attr,
363         &format_attr_inv.attr,
364         &format_attr_thresh8.attr,
365         &format_attr_match_rds.attr,
366         &format_attr_match_rnid30.attr,
367         &format_attr_match_rnid4.attr,
368         &format_attr_match_dnid.attr,
369         &format_attr_match_mc.attr,
370         &format_attr_match_opc.attr,
371         &format_attr_match_vnw.attr,
372         &format_attr_match0.attr,
373         &format_attr_match1.attr,
374         &format_attr_mask_rds.attr,
375         &format_attr_mask_rnid30.attr,
376         &format_attr_mask_rnid4.attr,
377         &format_attr_mask_dnid.attr,
378         &format_attr_mask_mc.attr,
379         &format_attr_mask_opc.attr,
380         &format_attr_mask_vnw.attr,
381         &format_attr_mask0.attr,
382         &format_attr_mask1.attr,
383         NULL,
384 };
385
386 static struct uncore_event_desc snbep_uncore_imc_events[] = {
387         INTEL_UNCORE_EVENT_DESC(clockticks,      "event=0xff,umask=0x00"),
388         INTEL_UNCORE_EVENT_DESC(cas_count_read,  "event=0x04,umask=0x03"),
389         INTEL_UNCORE_EVENT_DESC(cas_count_write, "event=0x04,umask=0x0c"),
390         { /* end: all zeroes */ },
391 };
392
393 static struct uncore_event_desc snbep_uncore_qpi_events[] = {
394         INTEL_UNCORE_EVENT_DESC(clockticks,       "event=0x14"),
395         INTEL_UNCORE_EVENT_DESC(txl_flits_active, "event=0x00,umask=0x06"),
396         INTEL_UNCORE_EVENT_DESC(drs_data,         "event=0x102,umask=0x08"),
397         INTEL_UNCORE_EVENT_DESC(ncb_data,         "event=0x103,umask=0x04"),
398         { /* end: all zeroes */ },
399 };
400
401 static struct attribute_group snbep_uncore_format_group = {
402         .name = "format",
403         .attrs = snbep_uncore_formats_attr,
404 };
405
406 static struct attribute_group snbep_uncore_ubox_format_group = {
407         .name = "format",
408         .attrs = snbep_uncore_ubox_formats_attr,
409 };
410
411 static struct attribute_group snbep_uncore_cbox_format_group = {
412         .name = "format",
413         .attrs = snbep_uncore_cbox_formats_attr,
414 };
415
416 static struct attribute_group snbep_uncore_pcu_format_group = {
417         .name = "format",
418         .attrs = snbep_uncore_pcu_formats_attr,
419 };
420
421 static struct attribute_group snbep_uncore_qpi_format_group = {
422         .name = "format",
423         .attrs = snbep_uncore_qpi_formats_attr,
424 };
425
426 #define SNBEP_UNCORE_MSR_OPS_COMMON_INIT()                      \
427         .init_box       = snbep_uncore_msr_init_box,            \
428         .disable_box    = snbep_uncore_msr_disable_box,         \
429         .enable_box     = snbep_uncore_msr_enable_box,          \
430         .disable_event  = snbep_uncore_msr_disable_event,       \
431         .enable_event   = snbep_uncore_msr_enable_event,        \
432         .read_counter   = uncore_msr_read_counter
433
434 static struct intel_uncore_ops snbep_uncore_msr_ops = {
435         SNBEP_UNCORE_MSR_OPS_COMMON_INIT(),
436 };
437
438 #define SNBEP_UNCORE_PCI_OPS_COMMON_INIT()                      \
439         .init_box       = snbep_uncore_pci_init_box,            \
440         .disable_box    = snbep_uncore_pci_disable_box,         \
441         .enable_box     = snbep_uncore_pci_enable_box,          \
442         .disable_event  = snbep_uncore_pci_disable_event,       \
443         .read_counter   = snbep_uncore_pci_read_counter
444
445 static struct intel_uncore_ops snbep_uncore_pci_ops = {
446         SNBEP_UNCORE_PCI_OPS_COMMON_INIT(),
447         .enable_event   = snbep_uncore_pci_enable_event,        \
448 };
449
450 static struct event_constraint snbep_uncore_cbox_constraints[] = {
451         UNCORE_EVENT_CONSTRAINT(0x01, 0x1),
452         UNCORE_EVENT_CONSTRAINT(0x02, 0x3),
453         UNCORE_EVENT_CONSTRAINT(0x04, 0x3),
454         UNCORE_EVENT_CONSTRAINT(0x05, 0x3),
455         UNCORE_EVENT_CONSTRAINT(0x07, 0x3),
456         UNCORE_EVENT_CONSTRAINT(0x09, 0x3),
457         UNCORE_EVENT_CONSTRAINT(0x11, 0x1),
458         UNCORE_EVENT_CONSTRAINT(0x12, 0x3),
459         UNCORE_EVENT_CONSTRAINT(0x13, 0x3),
460         UNCORE_EVENT_CONSTRAINT(0x1b, 0xc),
461         UNCORE_EVENT_CONSTRAINT(0x1c, 0xc),
462         UNCORE_EVENT_CONSTRAINT(0x1d, 0xc),
463         UNCORE_EVENT_CONSTRAINT(0x1e, 0xc),
464         EVENT_CONSTRAINT_OVERLAP(0x1f, 0xe, 0xff),
465         UNCORE_EVENT_CONSTRAINT(0x21, 0x3),
466         UNCORE_EVENT_CONSTRAINT(0x23, 0x3),
467         UNCORE_EVENT_CONSTRAINT(0x31, 0x3),
468         UNCORE_EVENT_CONSTRAINT(0x32, 0x3),
469         UNCORE_EVENT_CONSTRAINT(0x33, 0x3),
470         UNCORE_EVENT_CONSTRAINT(0x34, 0x3),
471         UNCORE_EVENT_CONSTRAINT(0x35, 0x3),
472         UNCORE_EVENT_CONSTRAINT(0x36, 0x1),
473         UNCORE_EVENT_CONSTRAINT(0x37, 0x3),
474         UNCORE_EVENT_CONSTRAINT(0x38, 0x3),
475         UNCORE_EVENT_CONSTRAINT(0x39, 0x3),
476         UNCORE_EVENT_CONSTRAINT(0x3b, 0x1),
477         EVENT_CONSTRAINT_END
478 };
479
480 static struct event_constraint snbep_uncore_r2pcie_constraints[] = {
481         UNCORE_EVENT_CONSTRAINT(0x10, 0x3),
482         UNCORE_EVENT_CONSTRAINT(0x11, 0x3),
483         UNCORE_EVENT_CONSTRAINT(0x12, 0x1),
484         UNCORE_EVENT_CONSTRAINT(0x23, 0x3),
485         UNCORE_EVENT_CONSTRAINT(0x24, 0x3),
486         UNCORE_EVENT_CONSTRAINT(0x25, 0x3),
487         UNCORE_EVENT_CONSTRAINT(0x26, 0x3),
488         UNCORE_EVENT_CONSTRAINT(0x32, 0x3),
489         UNCORE_EVENT_CONSTRAINT(0x33, 0x3),
490         UNCORE_EVENT_CONSTRAINT(0x34, 0x3),
491         EVENT_CONSTRAINT_END
492 };
493
494 static struct event_constraint snbep_uncore_r3qpi_constraints[] = {
495         UNCORE_EVENT_CONSTRAINT(0x10, 0x3),
496         UNCORE_EVENT_CONSTRAINT(0x11, 0x3),
497         UNCORE_EVENT_CONSTRAINT(0x12, 0x3),
498         UNCORE_EVENT_CONSTRAINT(0x13, 0x1),
499         UNCORE_EVENT_CONSTRAINT(0x20, 0x3),
500         UNCORE_EVENT_CONSTRAINT(0x21, 0x3),
501         UNCORE_EVENT_CONSTRAINT(0x22, 0x3),
502         UNCORE_EVENT_CONSTRAINT(0x23, 0x3),
503         UNCORE_EVENT_CONSTRAINT(0x24, 0x3),
504         UNCORE_EVENT_CONSTRAINT(0x25, 0x3),
505         UNCORE_EVENT_CONSTRAINT(0x26, 0x3),
506         UNCORE_EVENT_CONSTRAINT(0x28, 0x3),
507         UNCORE_EVENT_CONSTRAINT(0x29, 0x3),
508         UNCORE_EVENT_CONSTRAINT(0x2a, 0x3),
509         UNCORE_EVENT_CONSTRAINT(0x2b, 0x3),
510         UNCORE_EVENT_CONSTRAINT(0x2c, 0x3),
511         UNCORE_EVENT_CONSTRAINT(0x2d, 0x3),
512         UNCORE_EVENT_CONSTRAINT(0x2e, 0x3),
513         UNCORE_EVENT_CONSTRAINT(0x2f, 0x3),
514         UNCORE_EVENT_CONSTRAINT(0x30, 0x3),
515         UNCORE_EVENT_CONSTRAINT(0x31, 0x3),
516         UNCORE_EVENT_CONSTRAINT(0x32, 0x3),
517         UNCORE_EVENT_CONSTRAINT(0x33, 0x3),
518         UNCORE_EVENT_CONSTRAINT(0x34, 0x3),
519         UNCORE_EVENT_CONSTRAINT(0x36, 0x3),
520         UNCORE_EVENT_CONSTRAINT(0x37, 0x3),
521         UNCORE_EVENT_CONSTRAINT(0x38, 0x3),
522         UNCORE_EVENT_CONSTRAINT(0x39, 0x3),
523         EVENT_CONSTRAINT_END
524 };
525
526 static struct intel_uncore_type snbep_uncore_ubox = {
527         .name           = "ubox",
528         .num_counters   = 2,
529         .num_boxes      = 1,
530         .perf_ctr_bits  = 44,
531         .fixed_ctr_bits = 48,
532         .perf_ctr       = SNBEP_U_MSR_PMON_CTR0,
533         .event_ctl      = SNBEP_U_MSR_PMON_CTL0,
534         .event_mask     = SNBEP_U_MSR_PMON_RAW_EVENT_MASK,
535         .fixed_ctr      = SNBEP_U_MSR_PMON_UCLK_FIXED_CTR,
536         .fixed_ctl      = SNBEP_U_MSR_PMON_UCLK_FIXED_CTL,
537         .ops            = &snbep_uncore_msr_ops,
538         .format_group   = &snbep_uncore_ubox_format_group,
539 };
540
541 static struct extra_reg snbep_uncore_cbox_extra_regs[] = {
542         SNBEP_CBO_EVENT_EXTRA_REG(SNBEP_CBO_PMON_CTL_TID_EN,
543                                   SNBEP_CBO_PMON_CTL_TID_EN, 0x1),
544         SNBEP_CBO_EVENT_EXTRA_REG(0x0334, 0xffff, 0x4),
545         SNBEP_CBO_EVENT_EXTRA_REG(0x0534, 0xffff, 0x4),
546         SNBEP_CBO_EVENT_EXTRA_REG(0x0934, 0xffff, 0x4),
547         SNBEP_CBO_EVENT_EXTRA_REG(0x4134, 0xffff, 0x6),
548         SNBEP_CBO_EVENT_EXTRA_REG(0x0135, 0xffff, 0x8),
549         SNBEP_CBO_EVENT_EXTRA_REG(0x0335, 0xffff, 0x8),
550         SNBEP_CBO_EVENT_EXTRA_REG(0x4135, 0xffff, 0xc),
551         SNBEP_CBO_EVENT_EXTRA_REG(0x4335, 0xffff, 0xc),
552         SNBEP_CBO_EVENT_EXTRA_REG(0x4435, 0xffff, 0x2),
553         SNBEP_CBO_EVENT_EXTRA_REG(0x4835, 0xffff, 0x2),
554         SNBEP_CBO_EVENT_EXTRA_REG(0x4a35, 0xffff, 0x2),
555         SNBEP_CBO_EVENT_EXTRA_REG(0x5035, 0xffff, 0x2),
556         SNBEP_CBO_EVENT_EXTRA_REG(0x0136, 0xffff, 0x8),
557         SNBEP_CBO_EVENT_EXTRA_REG(0x0336, 0xffff, 0x8),
558         SNBEP_CBO_EVENT_EXTRA_REG(0x4136, 0xffff, 0xc),
559         SNBEP_CBO_EVENT_EXTRA_REG(0x4336, 0xffff, 0xc),
560         SNBEP_CBO_EVENT_EXTRA_REG(0x4436, 0xffff, 0x2),
561         SNBEP_CBO_EVENT_EXTRA_REG(0x4836, 0xffff, 0x2),
562         SNBEP_CBO_EVENT_EXTRA_REG(0x4a36, 0xffff, 0x2),
563         SNBEP_CBO_EVENT_EXTRA_REG(0x4037, 0x40ff, 0x2),
564         EVENT_EXTRA_END
565 };
566
567 static void snbep_cbox_put_constraint(struct intel_uncore_box *box, struct perf_event *event)
568 {
569         struct hw_perf_event_extra *reg1 = &event->hw.extra_reg;
570         struct intel_uncore_extra_reg *er = &box->shared_regs[0];
571         int i;
572
573         if (uncore_box_is_fake(box))
574                 return;
575
576         for (i = 0; i < 5; i++) {
577                 if (reg1->alloc & (0x1 << i))
578                         atomic_sub(1 << (i * 6), &er->ref);
579         }
580         reg1->alloc = 0;
581 }
582
583 static struct event_constraint *
584 __snbep_cbox_get_constraint(struct intel_uncore_box *box, struct perf_event *event,
585                             u64 (*cbox_filter_mask)(int fields))
586 {
587         struct hw_perf_event_extra *reg1 = &event->hw.extra_reg;
588         struct intel_uncore_extra_reg *er = &box->shared_regs[0];
589         int i, alloc = 0;
590         unsigned long flags;
591         u64 mask;
592
593         if (reg1->idx == EXTRA_REG_NONE)
594                 return NULL;
595
596         raw_spin_lock_irqsave(&er->lock, flags);
597         for (i = 0; i < 5; i++) {
598                 if (!(reg1->idx & (0x1 << i)))
599                         continue;
600                 if (!uncore_box_is_fake(box) && (reg1->alloc & (0x1 << i)))
601                         continue;
602
603                 mask = cbox_filter_mask(0x1 << i);
604                 if (!__BITS_VALUE(atomic_read(&er->ref), i, 6) ||
605                     !((reg1->config ^ er->config) & mask)) {
606                         atomic_add(1 << (i * 6), &er->ref);
607                         er->config &= ~mask;
608                         er->config |= reg1->config & mask;
609                         alloc |= (0x1 << i);
610                 } else {
611                         break;
612                 }
613         }
614         raw_spin_unlock_irqrestore(&er->lock, flags);
615         if (i < 5)
616                 goto fail;
617
618         if (!uncore_box_is_fake(box))
619                 reg1->alloc |= alloc;
620
621         return NULL;
622 fail:
623         for (; i >= 0; i--) {
624                 if (alloc & (0x1 << i))
625                         atomic_sub(1 << (i * 6), &er->ref);
626         }
627         return &constraint_empty;
628 }
629
630 static u64 snbep_cbox_filter_mask(int fields)
631 {
632         u64 mask = 0;
633
634         if (fields & 0x1)
635                 mask |= SNBEP_CB0_MSR_PMON_BOX_FILTER_TID;
636         if (fields & 0x2)
637                 mask |= SNBEP_CB0_MSR_PMON_BOX_FILTER_NID;
638         if (fields & 0x4)
639                 mask |= SNBEP_CB0_MSR_PMON_BOX_FILTER_STATE;
640         if (fields & 0x8)
641                 mask |= SNBEP_CB0_MSR_PMON_BOX_FILTER_OPC;
642
643         return mask;
644 }
645
646 static struct event_constraint *
647 snbep_cbox_get_constraint(struct intel_uncore_box *box, struct perf_event *event)
648 {
649         return __snbep_cbox_get_constraint(box, event, snbep_cbox_filter_mask);
650 }
651
652 static int snbep_cbox_hw_config(struct intel_uncore_box *box, struct perf_event *event)
653 {
654         struct hw_perf_event_extra *reg1 = &event->hw.extra_reg;
655         struct extra_reg *er;
656         int idx = 0;
657
658         for (er = snbep_uncore_cbox_extra_regs; er->msr; er++) {
659                 if (er->event != (event->hw.config & er->config_mask))
660                         continue;
661                 idx |= er->idx;
662         }
663
664         if (idx) {
665                 reg1->reg = SNBEP_C0_MSR_PMON_BOX_FILTER +
666                         SNBEP_CBO_MSR_OFFSET * box->pmu->pmu_idx;
667                 reg1->config = event->attr.config1 & snbep_cbox_filter_mask(idx);
668                 reg1->idx = idx;
669         }
670         return 0;
671 }
672
673 static struct intel_uncore_ops snbep_uncore_cbox_ops = {
674         SNBEP_UNCORE_MSR_OPS_COMMON_INIT(),
675         .hw_config              = snbep_cbox_hw_config,
676         .get_constraint         = snbep_cbox_get_constraint,
677         .put_constraint         = snbep_cbox_put_constraint,
678 };
679
680 static struct intel_uncore_type snbep_uncore_cbox = {
681         .name                   = "cbox",
682         .num_counters           = 4,
683         .num_boxes              = 8,
684         .perf_ctr_bits          = 44,
685         .event_ctl              = SNBEP_C0_MSR_PMON_CTL0,
686         .perf_ctr               = SNBEP_C0_MSR_PMON_CTR0,
687         .event_mask             = SNBEP_CBO_MSR_PMON_RAW_EVENT_MASK,
688         .box_ctl                = SNBEP_C0_MSR_PMON_BOX_CTL,
689         .msr_offset             = SNBEP_CBO_MSR_OFFSET,
690         .num_shared_regs        = 1,
691         .constraints            = snbep_uncore_cbox_constraints,
692         .ops                    = &snbep_uncore_cbox_ops,
693         .format_group           = &snbep_uncore_cbox_format_group,
694 };
695
696 static u64 snbep_pcu_alter_er(struct perf_event *event, int new_idx, bool modify)
697 {
698         struct hw_perf_event *hwc = &event->hw;
699         struct hw_perf_event_extra *reg1 = &hwc->extra_reg;
700         u64 config = reg1->config;
701
702         if (new_idx > reg1->idx)
703                 config <<= 8 * (new_idx - reg1->idx);
704         else
705                 config >>= 8 * (reg1->idx - new_idx);
706
707         if (modify) {
708                 hwc->config += new_idx - reg1->idx;
709                 reg1->config = config;
710                 reg1->idx = new_idx;
711         }
712         return config;
713 }
714
715 static struct event_constraint *
716 snbep_pcu_get_constraint(struct intel_uncore_box *box, struct perf_event *event)
717 {
718         struct hw_perf_event_extra *reg1 = &event->hw.extra_reg;
719         struct intel_uncore_extra_reg *er = &box->shared_regs[0];
720         unsigned long flags;
721         int idx = reg1->idx;
722         u64 mask, config1 = reg1->config;
723         bool ok = false;
724
725         if (reg1->idx == EXTRA_REG_NONE ||
726             (!uncore_box_is_fake(box) && reg1->alloc))
727                 return NULL;
728 again:
729         mask = 0xffULL << (idx * 8);
730         raw_spin_lock_irqsave(&er->lock, flags);
731         if (!__BITS_VALUE(atomic_read(&er->ref), idx, 8) ||
732             !((config1 ^ er->config) & mask)) {
733                 atomic_add(1 << (idx * 8), &er->ref);
734                 er->config &= ~mask;
735                 er->config |= config1 & mask;
736                 ok = true;
737         }
738         raw_spin_unlock_irqrestore(&er->lock, flags);
739
740         if (!ok) {
741                 idx = (idx + 1) % 4;
742                 if (idx != reg1->idx) {
743                         config1 = snbep_pcu_alter_er(event, idx, false);
744                         goto again;
745                 }
746                 return &constraint_empty;
747         }
748
749         if (!uncore_box_is_fake(box)) {
750                 if (idx != reg1->idx)
751                         snbep_pcu_alter_er(event, idx, true);
752                 reg1->alloc = 1;
753         }
754         return NULL;
755 }
756
757 static void snbep_pcu_put_constraint(struct intel_uncore_box *box, struct perf_event *event)
758 {
759         struct hw_perf_event_extra *reg1 = &event->hw.extra_reg;
760         struct intel_uncore_extra_reg *er = &box->shared_regs[0];
761
762         if (uncore_box_is_fake(box) || !reg1->alloc)
763                 return;
764
765         atomic_sub(1 << (reg1->idx * 8), &er->ref);
766         reg1->alloc = 0;
767 }
768
769 static int snbep_pcu_hw_config(struct intel_uncore_box *box, struct perf_event *event)
770 {
771         struct hw_perf_event *hwc = &event->hw;
772         struct hw_perf_event_extra *reg1 = &hwc->extra_reg;
773         int ev_sel = hwc->config & SNBEP_PMON_CTL_EV_SEL_MASK;
774
775         if (ev_sel >= 0xb && ev_sel <= 0xe) {
776                 reg1->reg = SNBEP_PCU_MSR_PMON_BOX_FILTER;
777                 reg1->idx = ev_sel - 0xb;
778                 reg1->config = event->attr.config1 & (0xff << reg1->idx);
779         }
780         return 0;
781 }
782
783 static struct intel_uncore_ops snbep_uncore_pcu_ops = {
784         SNBEP_UNCORE_MSR_OPS_COMMON_INIT(),
785         .hw_config              = snbep_pcu_hw_config,
786         .get_constraint         = snbep_pcu_get_constraint,
787         .put_constraint         = snbep_pcu_put_constraint,
788 };
789
790 static struct intel_uncore_type snbep_uncore_pcu = {
791         .name                   = "pcu",
792         .num_counters           = 4,
793         .num_boxes              = 1,
794         .perf_ctr_bits          = 48,
795         .perf_ctr               = SNBEP_PCU_MSR_PMON_CTR0,
796         .event_ctl              = SNBEP_PCU_MSR_PMON_CTL0,
797         .event_mask             = SNBEP_PCU_MSR_PMON_RAW_EVENT_MASK,
798         .box_ctl                = SNBEP_PCU_MSR_PMON_BOX_CTL,
799         .num_shared_regs        = 1,
800         .ops                    = &snbep_uncore_pcu_ops,
801         .format_group           = &snbep_uncore_pcu_format_group,
802 };
803
804 static struct intel_uncore_type *snbep_msr_uncores[] = {
805         &snbep_uncore_ubox,
806         &snbep_uncore_cbox,
807         &snbep_uncore_pcu,
808         NULL,
809 };
810
811 enum {
812         SNBEP_PCI_QPI_PORT0_FILTER,
813         SNBEP_PCI_QPI_PORT1_FILTER,
814 };
815
816 static int snbep_qpi_hw_config(struct intel_uncore_box *box, struct perf_event *event)
817 {
818         struct hw_perf_event *hwc = &event->hw;
819         struct hw_perf_event_extra *reg1 = &hwc->extra_reg;
820         struct hw_perf_event_extra *reg2 = &hwc->branch_reg;
821
822         if ((hwc->config & SNBEP_PMON_CTL_EV_SEL_MASK) == 0x38) {
823                 reg1->idx = 0;
824                 reg1->reg = SNBEP_Q_Py_PCI_PMON_PKT_MATCH0;
825                 reg1->config = event->attr.config1;
826                 reg2->reg = SNBEP_Q_Py_PCI_PMON_PKT_MASK0;
827                 reg2->config = event->attr.config2;
828         }
829         return 0;
830 }
831
832 static void snbep_qpi_enable_event(struct intel_uncore_box *box, struct perf_event *event)
833 {
834         struct pci_dev *pdev = box->pci_dev;
835         struct hw_perf_event *hwc = &event->hw;
836         struct hw_perf_event_extra *reg1 = &hwc->extra_reg;
837         struct hw_perf_event_extra *reg2 = &hwc->branch_reg;
838
839         if (reg1->idx != EXTRA_REG_NONE) {
840                 int idx = box->pmu->pmu_idx + SNBEP_PCI_QPI_PORT0_FILTER;
841                 struct pci_dev *filter_pdev = extra_pci_dev[box->phys_id][idx];
842                 WARN_ON_ONCE(!filter_pdev);
843                 if (filter_pdev) {
844                         pci_write_config_dword(filter_pdev, reg1->reg,
845                                                 (u32)reg1->config);
846                         pci_write_config_dword(filter_pdev, reg1->reg + 4,
847                                                 (u32)(reg1->config >> 32));
848                         pci_write_config_dword(filter_pdev, reg2->reg,
849                                                 (u32)reg2->config);
850                         pci_write_config_dword(filter_pdev, reg2->reg + 4,
851                                                 (u32)(reg2->config >> 32));
852                 }
853         }
854
855         pci_write_config_dword(pdev, hwc->config_base, hwc->config | SNBEP_PMON_CTL_EN);
856 }
857
858 static struct intel_uncore_ops snbep_uncore_qpi_ops = {
859         SNBEP_UNCORE_PCI_OPS_COMMON_INIT(),
860         .enable_event           = snbep_qpi_enable_event,
861         .hw_config              = snbep_qpi_hw_config,
862         .get_constraint         = uncore_get_constraint,
863         .put_constraint         = uncore_put_constraint,
864 };
865
866 #define SNBEP_UNCORE_PCI_COMMON_INIT()                          \
867         .perf_ctr       = SNBEP_PCI_PMON_CTR0,                  \
868         .event_ctl      = SNBEP_PCI_PMON_CTL0,                  \
869         .event_mask     = SNBEP_PMON_RAW_EVENT_MASK,            \
870         .box_ctl        = SNBEP_PCI_PMON_BOX_CTL,               \
871         .ops            = &snbep_uncore_pci_ops,                \
872         .format_group   = &snbep_uncore_format_group
873
874 static struct intel_uncore_type snbep_uncore_ha = {
875         .name           = "ha",
876         .num_counters   = 4,
877         .num_boxes      = 1,
878         .perf_ctr_bits  = 48,
879         SNBEP_UNCORE_PCI_COMMON_INIT(),
880 };
881
882 static struct intel_uncore_type snbep_uncore_imc = {
883         .name           = "imc",
884         .num_counters   = 4,
885         .num_boxes      = 4,
886         .perf_ctr_bits  = 48,
887         .fixed_ctr_bits = 48,
888         .fixed_ctr      = SNBEP_MC_CHy_PCI_PMON_FIXED_CTR,
889         .fixed_ctl      = SNBEP_MC_CHy_PCI_PMON_FIXED_CTL,
890         .event_descs    = snbep_uncore_imc_events,
891         SNBEP_UNCORE_PCI_COMMON_INIT(),
892 };
893
894 static struct intel_uncore_type snbep_uncore_qpi = {
895         .name                   = "qpi",
896         .num_counters           = 4,
897         .num_boxes              = 2,
898         .perf_ctr_bits          = 48,
899         .perf_ctr               = SNBEP_PCI_PMON_CTR0,
900         .event_ctl              = SNBEP_PCI_PMON_CTL0,
901         .event_mask             = SNBEP_QPI_PCI_PMON_RAW_EVENT_MASK,
902         .box_ctl                = SNBEP_PCI_PMON_BOX_CTL,
903         .num_shared_regs        = 1,
904         .ops                    = &snbep_uncore_qpi_ops,
905         .event_descs            = snbep_uncore_qpi_events,
906         .format_group           = &snbep_uncore_qpi_format_group,
907 };
908
909
910 static struct intel_uncore_type snbep_uncore_r2pcie = {
911         .name           = "r2pcie",
912         .num_counters   = 4,
913         .num_boxes      = 1,
914         .perf_ctr_bits  = 44,
915         .constraints    = snbep_uncore_r2pcie_constraints,
916         SNBEP_UNCORE_PCI_COMMON_INIT(),
917 };
918
919 static struct intel_uncore_type snbep_uncore_r3qpi = {
920         .name           = "r3qpi",
921         .num_counters   = 3,
922         .num_boxes      = 2,
923         .perf_ctr_bits  = 44,
924         .constraints    = snbep_uncore_r3qpi_constraints,
925         SNBEP_UNCORE_PCI_COMMON_INIT(),
926 };
927
928 enum {
929         SNBEP_PCI_UNCORE_HA,
930         SNBEP_PCI_UNCORE_IMC,
931         SNBEP_PCI_UNCORE_QPI,
932         SNBEP_PCI_UNCORE_R2PCIE,
933         SNBEP_PCI_UNCORE_R3QPI,
934 };
935
936 static struct intel_uncore_type *snbep_pci_uncores[] = {
937         [SNBEP_PCI_UNCORE_HA]           = &snbep_uncore_ha,
938         [SNBEP_PCI_UNCORE_IMC]          = &snbep_uncore_imc,
939         [SNBEP_PCI_UNCORE_QPI]          = &snbep_uncore_qpi,
940         [SNBEP_PCI_UNCORE_R2PCIE]       = &snbep_uncore_r2pcie,
941         [SNBEP_PCI_UNCORE_R3QPI]        = &snbep_uncore_r3qpi,
942         NULL,
943 };
944
945 static DEFINE_PCI_DEVICE_TABLE(snbep_uncore_pci_ids) = {
946         { /* Home Agent */
947                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_UNC_HA),
948                 .driver_data = UNCORE_PCI_DEV_DATA(SNBEP_PCI_UNCORE_HA, 0),
949         },
950         { /* MC Channel 0 */
951                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_UNC_IMC0),
952                 .driver_data = UNCORE_PCI_DEV_DATA(SNBEP_PCI_UNCORE_IMC, 0),
953         },
954         { /* MC Channel 1 */
955                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_UNC_IMC1),
956                 .driver_data = UNCORE_PCI_DEV_DATA(SNBEP_PCI_UNCORE_IMC, 1),
957         },
958         { /* MC Channel 2 */
959                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_UNC_IMC2),
960                 .driver_data = UNCORE_PCI_DEV_DATA(SNBEP_PCI_UNCORE_IMC, 2),
961         },
962         { /* MC Channel 3 */
963                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_UNC_IMC3),
964                 .driver_data = UNCORE_PCI_DEV_DATA(SNBEP_PCI_UNCORE_IMC, 3),
965         },
966         { /* QPI Port 0 */
967                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_UNC_QPI0),
968                 .driver_data = UNCORE_PCI_DEV_DATA(SNBEP_PCI_UNCORE_QPI, 0),
969         },
970         { /* QPI Port 1 */
971                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_UNC_QPI1),
972                 .driver_data = UNCORE_PCI_DEV_DATA(SNBEP_PCI_UNCORE_QPI, 1),
973         },
974         { /* R2PCIe */
975                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_UNC_R2PCIE),
976                 .driver_data = UNCORE_PCI_DEV_DATA(SNBEP_PCI_UNCORE_R2PCIE, 0),
977         },
978         { /* R3QPI Link 0 */
979                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_UNC_R3QPI0),
980                 .driver_data = UNCORE_PCI_DEV_DATA(SNBEP_PCI_UNCORE_R3QPI, 0),
981         },
982         { /* R3QPI Link 1 */
983                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_UNC_R3QPI1),
984                 .driver_data = UNCORE_PCI_DEV_DATA(SNBEP_PCI_UNCORE_R3QPI, 1),
985         },
986         { /* QPI Port 0 filter  */
987                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x3c86),
988                 .driver_data = UNCORE_PCI_DEV_DATA(UNCORE_EXTRA_PCI_DEV,
989                                                    SNBEP_PCI_QPI_PORT0_FILTER),
990         },
991         { /* QPI Port 0 filter  */
992                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x3c96),
993                 .driver_data = UNCORE_PCI_DEV_DATA(UNCORE_EXTRA_PCI_DEV,
994                                                    SNBEP_PCI_QPI_PORT1_FILTER),
995         },
996         { /* end: all zeroes */ }
997 };
998
999 static struct pci_driver snbep_uncore_pci_driver = {
1000         .name           = "snbep_uncore",
1001         .id_table       = snbep_uncore_pci_ids,
1002 };
1003
1004 /*
1005  * build pci bus to socket mapping
1006  */
1007 static int snbep_pci2phy_map_init(int devid)
1008 {
1009         struct pci_dev *ubox_dev = NULL;
1010         int i, bus, nodeid;
1011         int err = 0;
1012         u32 config = 0;
1013
1014         while (1) {
1015                 /* find the UBOX device */
1016                 ubox_dev = pci_get_device(PCI_VENDOR_ID_INTEL, devid, ubox_dev);
1017                 if (!ubox_dev)
1018                         break;
1019                 bus = ubox_dev->bus->number;
1020                 /* get the Node ID of the local register */
1021                 err = pci_read_config_dword(ubox_dev, 0x40, &config);
1022                 if (err)
1023                         break;
1024                 nodeid = config;
1025                 /* get the Node ID mapping */
1026                 err = pci_read_config_dword(ubox_dev, 0x54, &config);
1027                 if (err)
1028                         break;
1029                 /*
1030                  * every three bits in the Node ID mapping register maps
1031                  * to a particular node.
1032                  */
1033                 for (i = 0; i < 8; i++) {
1034                         if (nodeid == ((config >> (3 * i)) & 0x7)) {
1035                                 pcibus_to_physid[bus] = i;
1036                                 break;
1037                         }
1038                 }
1039         }
1040
1041         if (!err) {
1042                 /*
1043                  * For PCI bus with no UBOX device, find the next bus
1044                  * that has UBOX device and use its mapping.
1045                  */
1046                 i = -1;
1047                 for (bus = 255; bus >= 0; bus--) {
1048                         if (pcibus_to_physid[bus] >= 0)
1049                                 i = pcibus_to_physid[bus];
1050                         else
1051                                 pcibus_to_physid[bus] = i;
1052                 }
1053         }
1054
1055         if (ubox_dev)
1056                 pci_dev_put(ubox_dev);
1057
1058         return err ? pcibios_err_to_errno(err) : 0;
1059 }
1060 /* end of Sandy Bridge-EP uncore support */
1061
1062 /* IvyTown uncore support */
1063 static void ivt_uncore_msr_init_box(struct intel_uncore_box *box)
1064 {
1065         unsigned msr = uncore_msr_box_ctl(box);
1066         if (msr)
1067                 wrmsrl(msr, IVT_PMON_BOX_CTL_INT);
1068 }
1069
1070 static void ivt_uncore_pci_init_box(struct intel_uncore_box *box)
1071 {
1072         struct pci_dev *pdev = box->pci_dev;
1073
1074         pci_write_config_dword(pdev, SNBEP_PCI_PMON_BOX_CTL, IVT_PMON_BOX_CTL_INT);
1075 }
1076
1077 #define IVT_UNCORE_MSR_OPS_COMMON_INIT()                        \
1078         .init_box       = ivt_uncore_msr_init_box,              \
1079         .disable_box    = snbep_uncore_msr_disable_box,         \
1080         .enable_box     = snbep_uncore_msr_enable_box,          \
1081         .disable_event  = snbep_uncore_msr_disable_event,       \
1082         .enable_event   = snbep_uncore_msr_enable_event,        \
1083         .read_counter   = uncore_msr_read_counter
1084
1085 static struct intel_uncore_ops ivt_uncore_msr_ops = {
1086         IVT_UNCORE_MSR_OPS_COMMON_INIT(),
1087 };
1088
1089 static struct intel_uncore_ops ivt_uncore_pci_ops = {
1090         .init_box       = ivt_uncore_pci_init_box,
1091         .disable_box    = snbep_uncore_pci_disable_box,
1092         .enable_box     = snbep_uncore_pci_enable_box,
1093         .disable_event  = snbep_uncore_pci_disable_event,
1094         .enable_event   = snbep_uncore_pci_enable_event,
1095         .read_counter   = snbep_uncore_pci_read_counter,
1096 };
1097
1098 #define IVT_UNCORE_PCI_COMMON_INIT()                            \
1099         .perf_ctr       = SNBEP_PCI_PMON_CTR0,                  \
1100         .event_ctl      = SNBEP_PCI_PMON_CTL0,                  \
1101         .event_mask     = IVT_PMON_RAW_EVENT_MASK,              \
1102         .box_ctl        = SNBEP_PCI_PMON_BOX_CTL,               \
1103         .ops            = &ivt_uncore_pci_ops,                  \
1104         .format_group   = &ivt_uncore_format_group
1105
1106 static struct attribute *ivt_uncore_formats_attr[] = {
1107         &format_attr_event.attr,
1108         &format_attr_umask.attr,
1109         &format_attr_edge.attr,
1110         &format_attr_inv.attr,
1111         &format_attr_thresh8.attr,
1112         NULL,
1113 };
1114
1115 static struct attribute *ivt_uncore_ubox_formats_attr[] = {
1116         &format_attr_event.attr,
1117         &format_attr_umask.attr,
1118         &format_attr_edge.attr,
1119         &format_attr_inv.attr,
1120         &format_attr_thresh5.attr,
1121         NULL,
1122 };
1123
1124 static struct attribute *ivt_uncore_cbox_formats_attr[] = {
1125         &format_attr_event.attr,
1126         &format_attr_umask.attr,
1127         &format_attr_edge.attr,
1128         &format_attr_tid_en.attr,
1129         &format_attr_thresh8.attr,
1130         &format_attr_filter_tid.attr,
1131         &format_attr_filter_link.attr,
1132         &format_attr_filter_state2.attr,
1133         &format_attr_filter_nid2.attr,
1134         &format_attr_filter_opc2.attr,
1135         NULL,
1136 };
1137
1138 static struct attribute *ivt_uncore_pcu_formats_attr[] = {
1139         &format_attr_event_ext.attr,
1140         &format_attr_occ_sel.attr,
1141         &format_attr_edge.attr,
1142         &format_attr_thresh5.attr,
1143         &format_attr_occ_invert.attr,
1144         &format_attr_occ_edge.attr,
1145         &format_attr_filter_band0.attr,
1146         &format_attr_filter_band1.attr,
1147         &format_attr_filter_band2.attr,
1148         &format_attr_filter_band3.attr,
1149         NULL,
1150 };
1151
1152 static struct attribute *ivt_uncore_qpi_formats_attr[] = {
1153         &format_attr_event_ext.attr,
1154         &format_attr_umask.attr,
1155         &format_attr_edge.attr,
1156         &format_attr_thresh8.attr,
1157         &format_attr_match_rds.attr,
1158         &format_attr_match_rnid30.attr,
1159         &format_attr_match_rnid4.attr,
1160         &format_attr_match_dnid.attr,
1161         &format_attr_match_mc.attr,
1162         &format_attr_match_opc.attr,
1163         &format_attr_match_vnw.attr,
1164         &format_attr_match0.attr,
1165         &format_attr_match1.attr,
1166         &format_attr_mask_rds.attr,
1167         &format_attr_mask_rnid30.attr,
1168         &format_attr_mask_rnid4.attr,
1169         &format_attr_mask_dnid.attr,
1170         &format_attr_mask_mc.attr,
1171         &format_attr_mask_opc.attr,
1172         &format_attr_mask_vnw.attr,
1173         &format_attr_mask0.attr,
1174         &format_attr_mask1.attr,
1175         NULL,
1176 };
1177
1178 static struct attribute_group ivt_uncore_format_group = {
1179         .name = "format",
1180         .attrs = ivt_uncore_formats_attr,
1181 };
1182
1183 static struct attribute_group ivt_uncore_ubox_format_group = {
1184         .name = "format",
1185         .attrs = ivt_uncore_ubox_formats_attr,
1186 };
1187
1188 static struct attribute_group ivt_uncore_cbox_format_group = {
1189         .name = "format",
1190         .attrs = ivt_uncore_cbox_formats_attr,
1191 };
1192
1193 static struct attribute_group ivt_uncore_pcu_format_group = {
1194         .name = "format",
1195         .attrs = ivt_uncore_pcu_formats_attr,
1196 };
1197
1198 static struct attribute_group ivt_uncore_qpi_format_group = {
1199         .name = "format",
1200         .attrs = ivt_uncore_qpi_formats_attr,
1201 };
1202
1203 static struct intel_uncore_type ivt_uncore_ubox = {
1204         .name           = "ubox",
1205         .num_counters   = 2,
1206         .num_boxes      = 1,
1207         .perf_ctr_bits  = 44,
1208         .fixed_ctr_bits = 48,
1209         .perf_ctr       = SNBEP_U_MSR_PMON_CTR0,
1210         .event_ctl      = SNBEP_U_MSR_PMON_CTL0,
1211         .event_mask     = IVT_U_MSR_PMON_RAW_EVENT_MASK,
1212         .fixed_ctr      = SNBEP_U_MSR_PMON_UCLK_FIXED_CTR,
1213         .fixed_ctl      = SNBEP_U_MSR_PMON_UCLK_FIXED_CTL,
1214         .ops            = &ivt_uncore_msr_ops,
1215         .format_group   = &ivt_uncore_ubox_format_group,
1216 };
1217
1218 static struct extra_reg ivt_uncore_cbox_extra_regs[] = {
1219         SNBEP_CBO_EVENT_EXTRA_REG(SNBEP_CBO_PMON_CTL_TID_EN,
1220                                   SNBEP_CBO_PMON_CTL_TID_EN, 0x1),
1221         SNBEP_CBO_EVENT_EXTRA_REG(0x1031, 0x10ff, 0x2),
1222         SNBEP_CBO_EVENT_EXTRA_REG(0x0334, 0xffff, 0x4),
1223         SNBEP_CBO_EVENT_EXTRA_REG(0x0534, 0xffff, 0x4),
1224         SNBEP_CBO_EVENT_EXTRA_REG(0x0934, 0xffff, 0x4),
1225         SNBEP_CBO_EVENT_EXTRA_REG(0x4134, 0xffff, 0xc),
1226         SNBEP_CBO_EVENT_EXTRA_REG(0x0135, 0xffff, 0x10),
1227         SNBEP_CBO_EVENT_EXTRA_REG(0x0335, 0xffff, 0x10),
1228         SNBEP_CBO_EVENT_EXTRA_REG(0x2135, 0xffff, 0x10),
1229         SNBEP_CBO_EVENT_EXTRA_REG(0x2335, 0xffff, 0x10),
1230         SNBEP_CBO_EVENT_EXTRA_REG(0x4135, 0xffff, 0x18),
1231         SNBEP_CBO_EVENT_EXTRA_REG(0x4335, 0xffff, 0x18),
1232         SNBEP_CBO_EVENT_EXTRA_REG(0x4435, 0xffff, 0x8),
1233         SNBEP_CBO_EVENT_EXTRA_REG(0x4835, 0xffff, 0x8),
1234         SNBEP_CBO_EVENT_EXTRA_REG(0x4a35, 0xffff, 0x8),
1235         SNBEP_CBO_EVENT_EXTRA_REG(0x5035, 0xffff, 0x8),
1236         SNBEP_CBO_EVENT_EXTRA_REG(0x8135, 0xffff, 0x10),
1237         SNBEP_CBO_EVENT_EXTRA_REG(0x8335, 0xffff, 0x10),
1238         SNBEP_CBO_EVENT_EXTRA_REG(0x0136, 0xffff, 0x10),
1239         SNBEP_CBO_EVENT_EXTRA_REG(0x0336, 0xffff, 0x10),
1240         SNBEP_CBO_EVENT_EXTRA_REG(0x2336, 0xffff, 0x10),
1241         SNBEP_CBO_EVENT_EXTRA_REG(0x2336, 0xffff, 0x10),
1242         SNBEP_CBO_EVENT_EXTRA_REG(0x4136, 0xffff, 0x18),
1243         SNBEP_CBO_EVENT_EXTRA_REG(0x4336, 0xffff, 0x18),
1244         SNBEP_CBO_EVENT_EXTRA_REG(0x4436, 0xffff, 0x8),
1245         SNBEP_CBO_EVENT_EXTRA_REG(0x4836, 0xffff, 0x8),
1246         SNBEP_CBO_EVENT_EXTRA_REG(0x4a36, 0xffff, 0x8),
1247         SNBEP_CBO_EVENT_EXTRA_REG(0x5036, 0xffff, 0x8),
1248         SNBEP_CBO_EVENT_EXTRA_REG(0x8136, 0xffff, 0x10),
1249         SNBEP_CBO_EVENT_EXTRA_REG(0x8336, 0xffff, 0x10),
1250         SNBEP_CBO_EVENT_EXTRA_REG(0x4037, 0x40ff, 0x8),
1251         EVENT_EXTRA_END
1252 };
1253
1254 static u64 ivt_cbox_filter_mask(int fields)
1255 {
1256         u64 mask = 0;
1257
1258         if (fields & 0x1)
1259                 mask |= IVT_CB0_MSR_PMON_BOX_FILTER_TID;
1260         if (fields & 0x2)
1261                 mask |= IVT_CB0_MSR_PMON_BOX_FILTER_LINK;
1262         if (fields & 0x4)
1263                 mask |= IVT_CB0_MSR_PMON_BOX_FILTER_STATE;
1264         if (fields & 0x8)
1265                 mask |= IVT_CB0_MSR_PMON_BOX_FILTER_NID;
1266         if (fields & 0x10)
1267                 mask |= IVT_CB0_MSR_PMON_BOX_FILTER_OPC;
1268
1269         return mask;
1270 }
1271
1272 static struct event_constraint *
1273 ivt_cbox_get_constraint(struct intel_uncore_box *box, struct perf_event *event)
1274 {
1275         return __snbep_cbox_get_constraint(box, event, ivt_cbox_filter_mask);
1276 }
1277
1278 static int ivt_cbox_hw_config(struct intel_uncore_box *box, struct perf_event *event)
1279 {
1280         struct hw_perf_event_extra *reg1 = &event->hw.extra_reg;
1281         struct extra_reg *er;
1282         int idx = 0;
1283
1284         for (er = ivt_uncore_cbox_extra_regs; er->msr; er++) {
1285                 if (er->event != (event->hw.config & er->config_mask))
1286                         continue;
1287                 idx |= er->idx;
1288         }
1289
1290         if (idx) {
1291                 reg1->reg = SNBEP_C0_MSR_PMON_BOX_FILTER +
1292                         SNBEP_CBO_MSR_OFFSET * box->pmu->pmu_idx;
1293                 reg1->config = event->attr.config1 & ivt_cbox_filter_mask(idx);
1294                 reg1->idx = idx;
1295         }
1296         return 0;
1297 }
1298
1299 static void ivt_cbox_enable_event(struct intel_uncore_box *box, struct perf_event *event)
1300 {
1301         struct hw_perf_event *hwc = &event->hw;
1302         struct hw_perf_event_extra *reg1 = &hwc->extra_reg;
1303
1304         if (reg1->idx != EXTRA_REG_NONE) {
1305                 u64 filter = uncore_shared_reg_config(box, 0);
1306                 wrmsrl(reg1->reg, filter & 0xffffffff);
1307                 wrmsrl(reg1->reg + 6, filter >> 32);
1308         }
1309
1310         wrmsrl(hwc->config_base, hwc->config | SNBEP_PMON_CTL_EN);
1311 }
1312
1313 static struct intel_uncore_ops ivt_uncore_cbox_ops = {
1314         .init_box               = ivt_uncore_msr_init_box,
1315         .disable_box            = snbep_uncore_msr_disable_box,
1316         .enable_box             = snbep_uncore_msr_enable_box,
1317         .disable_event          = snbep_uncore_msr_disable_event,
1318         .enable_event           = ivt_cbox_enable_event,
1319         .read_counter           = uncore_msr_read_counter,
1320         .hw_config              = ivt_cbox_hw_config,
1321         .get_constraint         = ivt_cbox_get_constraint,
1322         .put_constraint         = snbep_cbox_put_constraint,
1323 };
1324
1325 static struct intel_uncore_type ivt_uncore_cbox = {
1326         .name                   = "cbox",
1327         .num_counters           = 4,
1328         .num_boxes              = 15,
1329         .perf_ctr_bits          = 44,
1330         .event_ctl              = SNBEP_C0_MSR_PMON_CTL0,
1331         .perf_ctr               = SNBEP_C0_MSR_PMON_CTR0,
1332         .event_mask             = IVT_CBO_MSR_PMON_RAW_EVENT_MASK,
1333         .box_ctl                = SNBEP_C0_MSR_PMON_BOX_CTL,
1334         .msr_offset             = SNBEP_CBO_MSR_OFFSET,
1335         .num_shared_regs        = 1,
1336         .constraints            = snbep_uncore_cbox_constraints,
1337         .ops                    = &ivt_uncore_cbox_ops,
1338         .format_group           = &ivt_uncore_cbox_format_group,
1339 };
1340
1341 static struct intel_uncore_ops ivt_uncore_pcu_ops = {
1342         IVT_UNCORE_MSR_OPS_COMMON_INIT(),
1343         .hw_config              = snbep_pcu_hw_config,
1344         .get_constraint         = snbep_pcu_get_constraint,
1345         .put_constraint         = snbep_pcu_put_constraint,
1346 };
1347
1348 static struct intel_uncore_type ivt_uncore_pcu = {
1349         .name                   = "pcu",
1350         .num_counters           = 4,
1351         .num_boxes              = 1,
1352         .perf_ctr_bits          = 48,
1353         .perf_ctr               = SNBEP_PCU_MSR_PMON_CTR0,
1354         .event_ctl              = SNBEP_PCU_MSR_PMON_CTL0,
1355         .event_mask             = IVT_PCU_MSR_PMON_RAW_EVENT_MASK,
1356         .box_ctl                = SNBEP_PCU_MSR_PMON_BOX_CTL,
1357         .num_shared_regs        = 1,
1358         .ops                    = &ivt_uncore_pcu_ops,
1359         .format_group           = &ivt_uncore_pcu_format_group,
1360 };
1361
1362 static struct intel_uncore_type *ivt_msr_uncores[] = {
1363         &ivt_uncore_ubox,
1364         &ivt_uncore_cbox,
1365         &ivt_uncore_pcu,
1366         NULL,
1367 };
1368
1369 static struct intel_uncore_type ivt_uncore_ha = {
1370         .name           = "ha",
1371         .num_counters   = 4,
1372         .num_boxes      = 2,
1373         .perf_ctr_bits  = 48,
1374         IVT_UNCORE_PCI_COMMON_INIT(),
1375 };
1376
1377 static struct intel_uncore_type ivt_uncore_imc = {
1378         .name           = "imc",
1379         .num_counters   = 4,
1380         .num_boxes      = 8,
1381         .perf_ctr_bits  = 48,
1382         .fixed_ctr_bits = 48,
1383         .fixed_ctr      = SNBEP_MC_CHy_PCI_PMON_FIXED_CTR,
1384         .fixed_ctl      = SNBEP_MC_CHy_PCI_PMON_FIXED_CTL,
1385         IVT_UNCORE_PCI_COMMON_INIT(),
1386 };
1387
1388 /* registers in IRP boxes are not properly aligned */
1389 static unsigned ivt_uncore_irp_ctls[] = {0xd8, 0xdc, 0xe0, 0xe4};
1390 static unsigned ivt_uncore_irp_ctrs[] = {0xa0, 0xb0, 0xb8, 0xc0};
1391
1392 static void ivt_uncore_irp_enable_event(struct intel_uncore_box *box, struct perf_event *event)
1393 {
1394         struct pci_dev *pdev = box->pci_dev;
1395         struct hw_perf_event *hwc = &event->hw;
1396
1397         pci_write_config_dword(pdev, ivt_uncore_irp_ctls[hwc->idx],
1398                                hwc->config | SNBEP_PMON_CTL_EN);
1399 }
1400
1401 static void ivt_uncore_irp_disable_event(struct intel_uncore_box *box, struct perf_event *event)
1402 {
1403         struct pci_dev *pdev = box->pci_dev;
1404         struct hw_perf_event *hwc = &event->hw;
1405
1406         pci_write_config_dword(pdev, ivt_uncore_irp_ctls[hwc->idx], hwc->config);
1407 }
1408
1409 static u64 ivt_uncore_irp_read_counter(struct intel_uncore_box *box, struct perf_event *event)
1410 {
1411         struct pci_dev *pdev = box->pci_dev;
1412         struct hw_perf_event *hwc = &event->hw;
1413         u64 count = 0;
1414
1415         pci_read_config_dword(pdev, ivt_uncore_irp_ctrs[hwc->idx], (u32 *)&count);
1416         pci_read_config_dword(pdev, ivt_uncore_irp_ctrs[hwc->idx] + 4, (u32 *)&count + 1);
1417
1418         return count;
1419 }
1420
1421 static struct intel_uncore_ops ivt_uncore_irp_ops = {
1422         .init_box       = ivt_uncore_pci_init_box,
1423         .disable_box    = snbep_uncore_pci_disable_box,
1424         .enable_box     = snbep_uncore_pci_enable_box,
1425         .disable_event  = ivt_uncore_irp_disable_event,
1426         .enable_event   = ivt_uncore_irp_enable_event,
1427         .read_counter   = ivt_uncore_irp_read_counter,
1428 };
1429
1430 static struct intel_uncore_type ivt_uncore_irp = {
1431         .name                   = "irp",
1432         .num_counters           = 4,
1433         .num_boxes              = 1,
1434         .perf_ctr_bits          = 48,
1435         .event_mask             = IVT_PMON_RAW_EVENT_MASK,
1436         .box_ctl                = SNBEP_PCI_PMON_BOX_CTL,
1437         .ops                    = &ivt_uncore_irp_ops,
1438         .format_group           = &ivt_uncore_format_group,
1439 };
1440
1441 static struct intel_uncore_ops ivt_uncore_qpi_ops = {
1442         .init_box       = ivt_uncore_pci_init_box,
1443         .disable_box    = snbep_uncore_pci_disable_box,
1444         .enable_box     = snbep_uncore_pci_enable_box,
1445         .disable_event  = snbep_uncore_pci_disable_event,
1446         .enable_event   = snbep_qpi_enable_event,
1447         .read_counter   = snbep_uncore_pci_read_counter,
1448         .hw_config      = snbep_qpi_hw_config,
1449         .get_constraint = uncore_get_constraint,
1450         .put_constraint = uncore_put_constraint,
1451 };
1452
1453 static struct intel_uncore_type ivt_uncore_qpi = {
1454         .name                   = "qpi",
1455         .num_counters           = 4,
1456         .num_boxes              = 3,
1457         .perf_ctr_bits          = 48,
1458         .perf_ctr               = SNBEP_PCI_PMON_CTR0,
1459         .event_ctl              = SNBEP_PCI_PMON_CTL0,
1460         .event_mask             = IVT_QPI_PCI_PMON_RAW_EVENT_MASK,
1461         .box_ctl                = SNBEP_PCI_PMON_BOX_CTL,
1462         .num_shared_regs        = 1,
1463         .ops                    = &ivt_uncore_qpi_ops,
1464         .format_group           = &ivt_uncore_qpi_format_group,
1465 };
1466
1467 static struct intel_uncore_type ivt_uncore_r2pcie = {
1468         .name           = "r2pcie",
1469         .num_counters   = 4,
1470         .num_boxes      = 1,
1471         .perf_ctr_bits  = 44,
1472         .constraints    = snbep_uncore_r2pcie_constraints,
1473         IVT_UNCORE_PCI_COMMON_INIT(),
1474 };
1475
1476 static struct intel_uncore_type ivt_uncore_r3qpi = {
1477         .name           = "r3qpi",
1478         .num_counters   = 3,
1479         .num_boxes      = 2,
1480         .perf_ctr_bits  = 44,
1481         .constraints    = snbep_uncore_r3qpi_constraints,
1482         IVT_UNCORE_PCI_COMMON_INIT(),
1483 };
1484
1485 enum {
1486         IVT_PCI_UNCORE_HA,
1487         IVT_PCI_UNCORE_IMC,
1488         IVT_PCI_UNCORE_IRP,
1489         IVT_PCI_UNCORE_QPI,
1490         IVT_PCI_UNCORE_R2PCIE,
1491         IVT_PCI_UNCORE_R3QPI,
1492 };
1493
1494 static struct intel_uncore_type *ivt_pci_uncores[] = {
1495         [IVT_PCI_UNCORE_HA]     = &ivt_uncore_ha,
1496         [IVT_PCI_UNCORE_IMC]    = &ivt_uncore_imc,
1497         [IVT_PCI_UNCORE_IRP]    = &ivt_uncore_irp,
1498         [IVT_PCI_UNCORE_QPI]    = &ivt_uncore_qpi,
1499         [IVT_PCI_UNCORE_R2PCIE] = &ivt_uncore_r2pcie,
1500         [IVT_PCI_UNCORE_R3QPI]  = &ivt_uncore_r3qpi,
1501         NULL,
1502 };
1503
1504 static DEFINE_PCI_DEVICE_TABLE(ivt_uncore_pci_ids) = {
1505         { /* Home Agent 0 */
1506                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xe30),
1507                 .driver_data = UNCORE_PCI_DEV_DATA(IVT_PCI_UNCORE_HA, 0),
1508         },
1509         { /* Home Agent 1 */
1510                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xe38),
1511                 .driver_data = UNCORE_PCI_DEV_DATA(IVT_PCI_UNCORE_HA, 1),
1512         },
1513         { /* MC0 Channel 0 */
1514                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xeb4),
1515                 .driver_data = UNCORE_PCI_DEV_DATA(IVT_PCI_UNCORE_IMC, 0),
1516         },
1517         { /* MC0 Channel 1 */
1518                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xeb5),
1519                 .driver_data = UNCORE_PCI_DEV_DATA(IVT_PCI_UNCORE_IMC, 1),
1520         },
1521         { /* MC0 Channel 3 */
1522                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xeb0),
1523                 .driver_data = UNCORE_PCI_DEV_DATA(IVT_PCI_UNCORE_IMC, 2),
1524         },
1525         { /* MC0 Channel 4 */
1526                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xeb1),
1527                 .driver_data = UNCORE_PCI_DEV_DATA(IVT_PCI_UNCORE_IMC, 3),
1528         },
1529         { /* MC1 Channel 0 */
1530                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xef4),
1531                 .driver_data = UNCORE_PCI_DEV_DATA(IVT_PCI_UNCORE_IMC, 4),
1532         },
1533         { /* MC1 Channel 1 */
1534                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xef5),
1535                 .driver_data = UNCORE_PCI_DEV_DATA(IVT_PCI_UNCORE_IMC, 5),
1536         },
1537         { /* MC1 Channel 3 */
1538                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xef0),
1539                 .driver_data = UNCORE_PCI_DEV_DATA(IVT_PCI_UNCORE_IMC, 6),
1540         },
1541         { /* MC1 Channel 4 */
1542                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xef1),
1543                 .driver_data = UNCORE_PCI_DEV_DATA(IVT_PCI_UNCORE_IMC, 7),
1544         },
1545         { /* IRP */
1546                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xe39),
1547                 .driver_data = UNCORE_PCI_DEV_DATA(IVT_PCI_UNCORE_IRP, 0),
1548         },
1549         { /* QPI0 Port 0 */
1550                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xe32),
1551                 .driver_data = UNCORE_PCI_DEV_DATA(IVT_PCI_UNCORE_QPI, 0),
1552         },
1553         { /* QPI0 Port 1 */
1554                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xe33),
1555                 .driver_data = UNCORE_PCI_DEV_DATA(IVT_PCI_UNCORE_QPI, 1),
1556         },
1557         { /* QPI1 Port 2 */
1558                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xe3a),
1559                 .driver_data = UNCORE_PCI_DEV_DATA(IVT_PCI_UNCORE_QPI, 2),
1560         },
1561         { /* R2PCIe */
1562                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xe34),
1563                 .driver_data = UNCORE_PCI_DEV_DATA(IVT_PCI_UNCORE_R2PCIE, 0),
1564         },
1565         { /* R3QPI0 Link 0 */
1566                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xe36),
1567                 .driver_data = UNCORE_PCI_DEV_DATA(IVT_PCI_UNCORE_R3QPI, 0),
1568         },
1569         { /* R3QPI0 Link 1 */
1570                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xe37),
1571                 .driver_data = UNCORE_PCI_DEV_DATA(IVT_PCI_UNCORE_R3QPI, 1),
1572         },
1573         { /* R3QPI1 Link 2 */
1574                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xe3e),
1575                 .driver_data = UNCORE_PCI_DEV_DATA(IVT_PCI_UNCORE_R3QPI, 2),
1576         },
1577         { /* QPI Port 0 filter  */
1578                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xe86),
1579                 .driver_data = UNCORE_PCI_DEV_DATA(UNCORE_EXTRA_PCI_DEV,
1580                                                    SNBEP_PCI_QPI_PORT0_FILTER),
1581         },
1582         { /* QPI Port 0 filter  */
1583                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xe96),
1584                 .driver_data = UNCORE_PCI_DEV_DATA(UNCORE_EXTRA_PCI_DEV,
1585                                                    SNBEP_PCI_QPI_PORT1_FILTER),
1586         },
1587         { /* end: all zeroes */ }
1588 };
1589
1590 static struct pci_driver ivt_uncore_pci_driver = {
1591         .name           = "ivt_uncore",
1592         .id_table       = ivt_uncore_pci_ids,
1593 };
1594 /* end of IvyTown uncore support */
1595
1596 /* Sandy Bridge uncore support */
1597 static void snb_uncore_msr_enable_event(struct intel_uncore_box *box, struct perf_event *event)
1598 {
1599         struct hw_perf_event *hwc = &event->hw;
1600
1601         if (hwc->idx < UNCORE_PMC_IDX_FIXED)
1602                 wrmsrl(hwc->config_base, hwc->config | SNB_UNC_CTL_EN);
1603         else
1604                 wrmsrl(hwc->config_base, SNB_UNC_CTL_EN);
1605 }
1606
1607 static void snb_uncore_msr_disable_event(struct intel_uncore_box *box, struct perf_event *event)
1608 {
1609         wrmsrl(event->hw.config_base, 0);
1610 }
1611
1612 static void snb_uncore_msr_init_box(struct intel_uncore_box *box)
1613 {
1614         if (box->pmu->pmu_idx == 0) {
1615                 wrmsrl(SNB_UNC_PERF_GLOBAL_CTL,
1616                         SNB_UNC_GLOBAL_CTL_EN | SNB_UNC_GLOBAL_CTL_CORE_ALL);
1617         }
1618 }
1619
1620 static struct uncore_event_desc snb_uncore_events[] = {
1621         INTEL_UNCORE_EVENT_DESC(clockticks, "event=0xff,umask=0x00"),
1622         { /* end: all zeroes */ },
1623 };
1624
1625 static struct attribute *snb_uncore_formats_attr[] = {
1626         &format_attr_event.attr,
1627         &format_attr_umask.attr,
1628         &format_attr_edge.attr,
1629         &format_attr_inv.attr,
1630         &format_attr_cmask5.attr,
1631         NULL,
1632 };
1633
1634 static struct attribute_group snb_uncore_format_group = {
1635         .name           = "format",
1636         .attrs          = snb_uncore_formats_attr,
1637 };
1638
1639 static struct intel_uncore_ops snb_uncore_msr_ops = {
1640         .init_box       = snb_uncore_msr_init_box,
1641         .disable_event  = snb_uncore_msr_disable_event,
1642         .enable_event   = snb_uncore_msr_enable_event,
1643         .read_counter   = uncore_msr_read_counter,
1644 };
1645
1646 static struct event_constraint snb_uncore_cbox_constraints[] = {
1647         UNCORE_EVENT_CONSTRAINT(0x80, 0x1),
1648         UNCORE_EVENT_CONSTRAINT(0x83, 0x1),
1649         EVENT_CONSTRAINT_END
1650 };
1651
1652 static struct intel_uncore_type snb_uncore_cbox = {
1653         .name           = "cbox",
1654         .num_counters   = 2,
1655         .num_boxes      = 4,
1656         .perf_ctr_bits  = 44,
1657         .fixed_ctr_bits = 48,
1658         .perf_ctr       = SNB_UNC_CBO_0_PER_CTR0,
1659         .event_ctl      = SNB_UNC_CBO_0_PERFEVTSEL0,
1660         .fixed_ctr      = SNB_UNC_FIXED_CTR,
1661         .fixed_ctl      = SNB_UNC_FIXED_CTR_CTRL,
1662         .single_fixed   = 1,
1663         .event_mask     = SNB_UNC_RAW_EVENT_MASK,
1664         .msr_offset     = SNB_UNC_CBO_MSR_OFFSET,
1665         .constraints    = snb_uncore_cbox_constraints,
1666         .ops            = &snb_uncore_msr_ops,
1667         .format_group   = &snb_uncore_format_group,
1668         .event_descs    = snb_uncore_events,
1669 };
1670
1671 static struct intel_uncore_type *snb_msr_uncores[] = {
1672         &snb_uncore_cbox,
1673         NULL,
1674 };
1675
1676 enum {
1677         SNB_PCI_UNCORE_IMC,
1678 };
1679
1680 static struct uncore_event_desc snb_uncore_imc_events[] = {
1681         INTEL_UNCORE_EVENT_DESC(data_reads,  "event=0x01"),
1682         INTEL_UNCORE_EVENT_DESC(data_reads.scale, "64"),
1683         INTEL_UNCORE_EVENT_DESC(data_reads.unit, "bytes"),
1684
1685         INTEL_UNCORE_EVENT_DESC(data_writes, "event=0x02"),
1686         INTEL_UNCORE_EVENT_DESC(data_writes.scale, "64"),
1687         INTEL_UNCORE_EVENT_DESC(data_writes.unit, "bytes"),
1688
1689         { /* end: all zeroes */ },
1690 };
1691
1692 #define SNB_UNCORE_PCI_IMC_EVENT_MASK           0xff
1693 #define SNB_UNCORE_PCI_IMC_BAR_OFFSET           0x48
1694
1695 /* page size multiple covering all config regs */
1696 #define SNB_UNCORE_PCI_IMC_MAP_SIZE             0x6000
1697
1698 #define SNB_UNCORE_PCI_IMC_DATA_READS           0x1
1699 #define SNB_UNCORE_PCI_IMC_DATA_READS_BASE      0x5050
1700 #define SNB_UNCORE_PCI_IMC_DATA_WRITES          0x2
1701 #define SNB_UNCORE_PCI_IMC_DATA_WRITES_BASE     0x5054
1702 #define SNB_UNCORE_PCI_IMC_CTR_BASE             SNB_UNCORE_PCI_IMC_DATA_READS_BASE
1703
1704 static struct attribute *snb_uncore_imc_formats_attr[] = {
1705         &format_attr_event.attr,
1706         NULL,
1707 };
1708
1709 static struct attribute_group snb_uncore_imc_format_group = {
1710         .name = "format",
1711         .attrs = snb_uncore_imc_formats_attr,
1712 };
1713
1714 static void snb_uncore_imc_init_box(struct intel_uncore_box *box)
1715 {
1716         struct pci_dev *pdev = box->pci_dev;
1717         u32 addr_lo, addr_hi;
1718         resource_size_t addr;
1719
1720         pci_read_config_dword(pdev, SNB_UNCORE_PCI_IMC_BAR_OFFSET, &addr_lo);
1721         addr = addr_lo;
1722
1723 #ifdef CONFIG_PHYS_ADDR_T_64BIT
1724         pci_read_config_dword(pdev, SNB_UNCORE_PCI_IMC_BAR_OFFSET+4, &addr_hi);
1725         addr = ((resource_size_t)addr_hi << 32) | addr_lo;
1726 #endif
1727
1728         addr &= ~(PAGE_SIZE - 1);
1729
1730         box->io_addr = ioremap(addr, SNB_UNCORE_PCI_IMC_MAP_SIZE);
1731         box->hrtimer_duration = UNCORE_SNB_IMC_HRTIMER_INTERVAL;
1732 }
1733
1734 static void snb_uncore_imc_enable_box(struct intel_uncore_box *box)
1735 {}
1736
1737 static void snb_uncore_imc_disable_box(struct intel_uncore_box *box)
1738 {}
1739
1740 static void snb_uncore_imc_enable_event(struct intel_uncore_box *box, struct perf_event *event)
1741 {}
1742
1743 static void snb_uncore_imc_disable_event(struct intel_uncore_box *box, struct perf_event *event)
1744 {}
1745
1746 static u64 snb_uncore_imc_read_counter(struct intel_uncore_box *box, struct perf_event *event)
1747 {
1748         struct hw_perf_event *hwc = &event->hw;
1749
1750         return (u64)*(unsigned int *)(box->io_addr + hwc->event_base);
1751 }
1752
1753 /*
1754  * custom event_init() function because we define our own fixed, free
1755  * running counters, so we do not want to conflict with generic uncore
1756  * logic. Also simplifies processing
1757  */
1758 static int snb_uncore_imc_event_init(struct perf_event *event)
1759 {
1760         struct intel_uncore_pmu *pmu;
1761         struct intel_uncore_box *box;
1762         struct hw_perf_event *hwc = &event->hw;
1763         u64 cfg = event->attr.config & SNB_UNCORE_PCI_IMC_EVENT_MASK;
1764         int idx, base;
1765
1766         if (event->attr.type != event->pmu->type)
1767                 return -ENOENT;
1768
1769         pmu = uncore_event_to_pmu(event);
1770         /* no device found for this pmu */
1771         if (pmu->func_id < 0)
1772                 return -ENOENT;
1773
1774         /* Sampling not supported yet */
1775         if (hwc->sample_period)
1776                 return -EINVAL;
1777
1778         /* unsupported modes and filters */
1779         if (event->attr.exclude_user   ||
1780             event->attr.exclude_kernel ||
1781             event->attr.exclude_hv     ||
1782             event->attr.exclude_idle   ||
1783             event->attr.exclude_host   ||
1784             event->attr.exclude_guest  ||
1785             event->attr.sample_period) /* no sampling */
1786                 return -EINVAL;
1787
1788         /*
1789          * Place all uncore events for a particular physical package
1790          * onto a single cpu
1791          */
1792         if (event->cpu < 0)
1793                 return -EINVAL;
1794
1795         /* check only supported bits are set */
1796         if (event->attr.config & ~SNB_UNCORE_PCI_IMC_EVENT_MASK)
1797                 return -EINVAL;
1798
1799         box = uncore_pmu_to_box(pmu, event->cpu);
1800         if (!box || box->cpu < 0)
1801                 return -EINVAL;
1802
1803         event->cpu = box->cpu;
1804
1805         event->hw.idx = -1;
1806         event->hw.last_tag = ~0ULL;
1807         event->hw.extra_reg.idx = EXTRA_REG_NONE;
1808         event->hw.branch_reg.idx = EXTRA_REG_NONE;
1809         /*
1810          * check event is known (whitelist, determines counter)
1811          */
1812         switch (cfg) {
1813         case SNB_UNCORE_PCI_IMC_DATA_READS:
1814                 base = SNB_UNCORE_PCI_IMC_DATA_READS_BASE;
1815                 idx = UNCORE_PMC_IDX_FIXED;
1816                 break;
1817         case SNB_UNCORE_PCI_IMC_DATA_WRITES:
1818                 base = SNB_UNCORE_PCI_IMC_DATA_WRITES_BASE;
1819                 idx = UNCORE_PMC_IDX_FIXED + 1;
1820                 break;
1821         default:
1822                 return -EINVAL;
1823         }
1824
1825         /* must be done before validate_group */
1826         event->hw.event_base = base;
1827         event->hw.config = cfg;
1828         event->hw.idx = idx;
1829
1830         /* no group validation needed, we have free running counters */
1831
1832         return 0;
1833 }
1834
1835 static int snb_uncore_imc_hw_config(struct intel_uncore_box *box, struct perf_event *event)
1836 {
1837         return 0;
1838 }
1839
1840 static void snb_uncore_imc_event_start(struct perf_event *event, int flags)
1841 {
1842         struct intel_uncore_box *box = uncore_event_to_box(event);
1843         u64 count;
1844
1845         if (WARN_ON_ONCE(!(event->hw.state & PERF_HES_STOPPED)))
1846                 return;
1847
1848         event->hw.state = 0;
1849         box->n_active++;
1850
1851         list_add_tail(&event->active_entry, &box->active_list);
1852
1853         count = snb_uncore_imc_read_counter(box, event);
1854         local64_set(&event->hw.prev_count, count);
1855
1856         if (box->n_active == 1)
1857                 uncore_pmu_start_hrtimer(box);
1858 }
1859
1860 static void snb_uncore_imc_event_stop(struct perf_event *event, int flags)
1861 {
1862         struct intel_uncore_box *box = uncore_event_to_box(event);
1863         struct hw_perf_event *hwc = &event->hw;
1864
1865         if (!(hwc->state & PERF_HES_STOPPED)) {
1866                 box->n_active--;
1867
1868                 WARN_ON_ONCE(hwc->state & PERF_HES_STOPPED);
1869                 hwc->state |= PERF_HES_STOPPED;
1870
1871                 list_del(&event->active_entry);
1872
1873                 if (box->n_active == 0)
1874                         uncore_pmu_cancel_hrtimer(box);
1875         }
1876
1877         if ((flags & PERF_EF_UPDATE) && !(hwc->state & PERF_HES_UPTODATE)) {
1878                 /*
1879                  * Drain the remaining delta count out of a event
1880                  * that we are disabling:
1881                  */
1882                 uncore_perf_event_update(box, event);
1883                 hwc->state |= PERF_HES_UPTODATE;
1884         }
1885 }
1886
1887 static int snb_uncore_imc_event_add(struct perf_event *event, int flags)
1888 {
1889         struct intel_uncore_box *box = uncore_event_to_box(event);
1890         struct hw_perf_event *hwc = &event->hw;
1891
1892         if (!box)
1893                 return -ENODEV;
1894
1895         hwc->state = PERF_HES_UPTODATE | PERF_HES_STOPPED;
1896         if (!(flags & PERF_EF_START))
1897                 hwc->state |= PERF_HES_ARCH;
1898
1899         snb_uncore_imc_event_start(event, 0);
1900
1901         box->n_events++;
1902
1903         return 0;
1904 }
1905
1906 static void snb_uncore_imc_event_del(struct perf_event *event, int flags)
1907 {
1908         struct intel_uncore_box *box = uncore_event_to_box(event);
1909         int i;
1910
1911         snb_uncore_imc_event_stop(event, PERF_EF_UPDATE);
1912
1913         for (i = 0; i < box->n_events; i++) {
1914                 if (event == box->event_list[i]) {
1915                         --box->n_events;
1916                         break;
1917                 }
1918         }
1919 }
1920
1921 static int snb_pci2phy_map_init(int devid)
1922 {
1923         struct pci_dev *dev = NULL;
1924         int bus;
1925
1926         dev = pci_get_device(PCI_VENDOR_ID_INTEL, devid, dev);
1927         if (!dev)
1928                 return -ENOTTY;
1929
1930         bus = dev->bus->number;
1931
1932         pcibus_to_physid[bus] = 0;
1933
1934         pci_dev_put(dev);
1935
1936         return 0;
1937 }
1938
1939 static struct pmu snb_uncore_imc_pmu = {
1940         .task_ctx_nr    = perf_invalid_context,
1941         .event_init     = snb_uncore_imc_event_init,
1942         .add            = snb_uncore_imc_event_add,
1943         .del            = snb_uncore_imc_event_del,
1944         .start          = snb_uncore_imc_event_start,
1945         .stop           = snb_uncore_imc_event_stop,
1946         .read           = uncore_pmu_event_read,
1947 };
1948
1949 static struct intel_uncore_ops snb_uncore_imc_ops = {
1950         .init_box       = snb_uncore_imc_init_box,
1951         .enable_box     = snb_uncore_imc_enable_box,
1952         .disable_box    = snb_uncore_imc_disable_box,
1953         .disable_event  = snb_uncore_imc_disable_event,
1954         .enable_event   = snb_uncore_imc_enable_event,
1955         .hw_config      = snb_uncore_imc_hw_config,
1956         .read_counter   = snb_uncore_imc_read_counter,
1957 };
1958
1959 static struct intel_uncore_type snb_uncore_imc = {
1960         .name           = "imc",
1961         .num_counters   = 2,
1962         .num_boxes      = 1,
1963         .fixed_ctr_bits = 32,
1964         .fixed_ctr      = SNB_UNCORE_PCI_IMC_CTR_BASE,
1965         .event_descs    = snb_uncore_imc_events,
1966         .format_group   = &snb_uncore_imc_format_group,
1967         .perf_ctr       = SNB_UNCORE_PCI_IMC_DATA_READS_BASE,
1968         .event_mask     = SNB_UNCORE_PCI_IMC_EVENT_MASK,
1969         .ops            = &snb_uncore_imc_ops,
1970         .pmu            = &snb_uncore_imc_pmu,
1971 };
1972
1973 static struct intel_uncore_type *snb_pci_uncores[] = {
1974         [SNB_PCI_UNCORE_IMC]    = &snb_uncore_imc,
1975         NULL,
1976 };
1977
1978 static DEFINE_PCI_DEVICE_TABLE(snb_uncore_pci_ids) = {
1979         { /* IMC */
1980                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_SNB_IMC),
1981                 .driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0),
1982         },
1983 };
1984
1985 static DEFINE_PCI_DEVICE_TABLE(ivb_uncore_pci_ids) = {
1986         { /* IMC */
1987                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_IVB_IMC),
1988                 .driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0),
1989         },
1990 };
1991
1992 static DEFINE_PCI_DEVICE_TABLE(hsw_uncore_pci_ids) = {
1993         { /* IMC */
1994                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_HSW_IMC),
1995                 .driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0),
1996         },
1997 };
1998
1999 static struct pci_driver snb_uncore_pci_driver = {
2000         .name           = "snb_uncore",
2001         .id_table       = snb_uncore_pci_ids,
2002 };
2003
2004 static struct pci_driver ivb_uncore_pci_driver = {
2005         .name           = "ivb_uncore",
2006         .id_table       = ivb_uncore_pci_ids,
2007 };
2008
2009 static struct pci_driver hsw_uncore_pci_driver = {
2010         .name           = "hsw_uncore",
2011         .id_table       = hsw_uncore_pci_ids,
2012 };
2013
2014 /* end of Sandy Bridge uncore support */
2015
2016 /* Nehalem uncore support */
2017 static void nhm_uncore_msr_disable_box(struct intel_uncore_box *box)
2018 {
2019         wrmsrl(NHM_UNC_PERF_GLOBAL_CTL, 0);
2020 }
2021
2022 static void nhm_uncore_msr_enable_box(struct intel_uncore_box *box)
2023 {
2024         wrmsrl(NHM_UNC_PERF_GLOBAL_CTL, NHM_UNC_GLOBAL_CTL_EN_PC_ALL | NHM_UNC_GLOBAL_CTL_EN_FC);
2025 }
2026
2027 static void nhm_uncore_msr_enable_event(struct intel_uncore_box *box, struct perf_event *event)
2028 {
2029         struct hw_perf_event *hwc = &event->hw;
2030
2031         if (hwc->idx < UNCORE_PMC_IDX_FIXED)
2032                 wrmsrl(hwc->config_base, hwc->config | SNB_UNC_CTL_EN);
2033         else
2034                 wrmsrl(hwc->config_base, NHM_UNC_FIXED_CTR_CTL_EN);
2035 }
2036
2037 static struct attribute *nhm_uncore_formats_attr[] = {
2038         &format_attr_event.attr,
2039         &format_attr_umask.attr,
2040         &format_attr_edge.attr,
2041         &format_attr_inv.attr,
2042         &format_attr_cmask8.attr,
2043         NULL,
2044 };
2045
2046 static struct attribute_group nhm_uncore_format_group = {
2047         .name = "format",
2048         .attrs = nhm_uncore_formats_attr,
2049 };
2050
2051 static struct uncore_event_desc nhm_uncore_events[] = {
2052         INTEL_UNCORE_EVENT_DESC(clockticks,                "event=0xff,umask=0x00"),
2053         INTEL_UNCORE_EVENT_DESC(qmc_writes_full_any,       "event=0x2f,umask=0x0f"),
2054         INTEL_UNCORE_EVENT_DESC(qmc_normal_reads_any,      "event=0x2c,umask=0x0f"),
2055         INTEL_UNCORE_EVENT_DESC(qhl_request_ioh_reads,     "event=0x20,umask=0x01"),
2056         INTEL_UNCORE_EVENT_DESC(qhl_request_ioh_writes,    "event=0x20,umask=0x02"),
2057         INTEL_UNCORE_EVENT_DESC(qhl_request_remote_reads,  "event=0x20,umask=0x04"),
2058         INTEL_UNCORE_EVENT_DESC(qhl_request_remote_writes, "event=0x20,umask=0x08"),
2059         INTEL_UNCORE_EVENT_DESC(qhl_request_local_reads,   "event=0x20,umask=0x10"),
2060         INTEL_UNCORE_EVENT_DESC(qhl_request_local_writes,  "event=0x20,umask=0x20"),
2061         { /* end: all zeroes */ },
2062 };
2063
2064 static struct intel_uncore_ops nhm_uncore_msr_ops = {
2065         .disable_box    = nhm_uncore_msr_disable_box,
2066         .enable_box     = nhm_uncore_msr_enable_box,
2067         .disable_event  = snb_uncore_msr_disable_event,
2068         .enable_event   = nhm_uncore_msr_enable_event,
2069         .read_counter   = uncore_msr_read_counter,
2070 };
2071
2072 static struct intel_uncore_type nhm_uncore = {
2073         .name           = "",
2074         .num_counters   = 8,
2075         .num_boxes      = 1,
2076         .perf_ctr_bits  = 48,
2077         .fixed_ctr_bits = 48,
2078         .event_ctl      = NHM_UNC_PERFEVTSEL0,
2079         .perf_ctr       = NHM_UNC_UNCORE_PMC0,
2080         .fixed_ctr      = NHM_UNC_FIXED_CTR,
2081         .fixed_ctl      = NHM_UNC_FIXED_CTR_CTRL,
2082         .event_mask     = NHM_UNC_RAW_EVENT_MASK,
2083         .event_descs    = nhm_uncore_events,
2084         .ops            = &nhm_uncore_msr_ops,
2085         .format_group   = &nhm_uncore_format_group,
2086 };
2087
2088 static struct intel_uncore_type *nhm_msr_uncores[] = {
2089         &nhm_uncore,
2090         NULL,
2091 };
2092 /* end of Nehalem uncore support */
2093
2094 /* Nehalem-EX uncore support */
2095 DEFINE_UNCORE_FORMAT_ATTR(event5, event, "config:1-5");
2096 DEFINE_UNCORE_FORMAT_ATTR(counter, counter, "config:6-7");
2097 DEFINE_UNCORE_FORMAT_ATTR(match, match, "config1:0-63");
2098 DEFINE_UNCORE_FORMAT_ATTR(mask, mask, "config2:0-63");
2099
2100 static void nhmex_uncore_msr_init_box(struct intel_uncore_box *box)
2101 {
2102         wrmsrl(NHMEX_U_MSR_PMON_GLOBAL_CTL, NHMEX_U_PMON_GLOBAL_EN_ALL);
2103 }
2104
2105 static void nhmex_uncore_msr_disable_box(struct intel_uncore_box *box)
2106 {
2107         unsigned msr = uncore_msr_box_ctl(box);
2108         u64 config;
2109
2110         if (msr) {
2111                 rdmsrl(msr, config);
2112                 config &= ~((1ULL << uncore_num_counters(box)) - 1);
2113                 /* WBox has a fixed counter */
2114                 if (uncore_msr_fixed_ctl(box))
2115                         config &= ~NHMEX_W_PMON_GLOBAL_FIXED_EN;
2116                 wrmsrl(msr, config);
2117         }
2118 }
2119
2120 static void nhmex_uncore_msr_enable_box(struct intel_uncore_box *box)
2121 {
2122         unsigned msr = uncore_msr_box_ctl(box);
2123         u64 config;
2124
2125         if (msr) {
2126                 rdmsrl(msr, config);
2127                 config |= (1ULL << uncore_num_counters(box)) - 1;
2128                 /* WBox has a fixed counter */
2129                 if (uncore_msr_fixed_ctl(box))
2130                         config |= NHMEX_W_PMON_GLOBAL_FIXED_EN;
2131                 wrmsrl(msr, config);
2132         }
2133 }
2134
2135 static void nhmex_uncore_msr_disable_event(struct intel_uncore_box *box, struct perf_event *event)
2136 {
2137         wrmsrl(event->hw.config_base, 0);
2138 }
2139
2140 static void nhmex_uncore_msr_enable_event(struct intel_uncore_box *box, struct perf_event *event)
2141 {
2142         struct hw_perf_event *hwc = &event->hw;
2143
2144         if (hwc->idx >= UNCORE_PMC_IDX_FIXED)
2145                 wrmsrl(hwc->config_base, NHMEX_PMON_CTL_EN_BIT0);
2146         else if (box->pmu->type->event_mask & NHMEX_PMON_CTL_EN_BIT0)
2147                 wrmsrl(hwc->config_base, hwc->config | NHMEX_PMON_CTL_EN_BIT22);
2148         else
2149                 wrmsrl(hwc->config_base, hwc->config | NHMEX_PMON_CTL_EN_BIT0);
2150 }
2151
2152 #define NHMEX_UNCORE_OPS_COMMON_INIT()                          \
2153         .init_box       = nhmex_uncore_msr_init_box,            \
2154         .disable_box    = nhmex_uncore_msr_disable_box,         \
2155         .enable_box     = nhmex_uncore_msr_enable_box,          \
2156         .disable_event  = nhmex_uncore_msr_disable_event,       \
2157         .read_counter   = uncore_msr_read_counter
2158
2159 static struct intel_uncore_ops nhmex_uncore_ops = {
2160         NHMEX_UNCORE_OPS_COMMON_INIT(),
2161         .enable_event   = nhmex_uncore_msr_enable_event,
2162 };
2163
2164 static struct attribute *nhmex_uncore_ubox_formats_attr[] = {
2165         &format_attr_event.attr,
2166         &format_attr_edge.attr,
2167         NULL,
2168 };
2169
2170 static struct attribute_group nhmex_uncore_ubox_format_group = {
2171         .name           = "format",
2172         .attrs          = nhmex_uncore_ubox_formats_attr,
2173 };
2174
2175 static struct intel_uncore_type nhmex_uncore_ubox = {
2176         .name           = "ubox",
2177         .num_counters   = 1,
2178         .num_boxes      = 1,
2179         .perf_ctr_bits  = 48,
2180         .event_ctl      = NHMEX_U_MSR_PMON_EV_SEL,
2181         .perf_ctr       = NHMEX_U_MSR_PMON_CTR,
2182         .event_mask     = NHMEX_U_PMON_RAW_EVENT_MASK,
2183         .box_ctl        = NHMEX_U_MSR_PMON_GLOBAL_CTL,
2184         .ops            = &nhmex_uncore_ops,
2185         .format_group   = &nhmex_uncore_ubox_format_group
2186 };
2187
2188 static struct attribute *nhmex_uncore_cbox_formats_attr[] = {
2189         &format_attr_event.attr,
2190         &format_attr_umask.attr,
2191         &format_attr_edge.attr,
2192         &format_attr_inv.attr,
2193         &format_attr_thresh8.attr,
2194         NULL,
2195 };
2196
2197 static struct attribute_group nhmex_uncore_cbox_format_group = {
2198         .name = "format",
2199         .attrs = nhmex_uncore_cbox_formats_attr,
2200 };
2201
2202 /* msr offset for each instance of cbox */
2203 static unsigned nhmex_cbox_msr_offsets[] = {
2204         0x0, 0x80, 0x40, 0xc0, 0x20, 0xa0, 0x60, 0xe0, 0x240, 0x2c0,
2205 };
2206
2207 static struct intel_uncore_type nhmex_uncore_cbox = {
2208         .name                   = "cbox",
2209         .num_counters           = 6,
2210         .num_boxes              = 10,
2211         .perf_ctr_bits          = 48,
2212         .event_ctl              = NHMEX_C0_MSR_PMON_EV_SEL0,
2213         .perf_ctr               = NHMEX_C0_MSR_PMON_CTR0,
2214         .event_mask             = NHMEX_PMON_RAW_EVENT_MASK,
2215         .box_ctl                = NHMEX_C0_MSR_PMON_GLOBAL_CTL,
2216         .msr_offsets            = nhmex_cbox_msr_offsets,
2217         .pair_ctr_ctl           = 1,
2218         .ops                    = &nhmex_uncore_ops,
2219         .format_group           = &nhmex_uncore_cbox_format_group
2220 };
2221
2222 static struct uncore_event_desc nhmex_uncore_wbox_events[] = {
2223         INTEL_UNCORE_EVENT_DESC(clockticks, "event=0xff,umask=0"),
2224         { /* end: all zeroes */ },
2225 };
2226
2227 static struct intel_uncore_type nhmex_uncore_wbox = {
2228         .name                   = "wbox",
2229         .num_counters           = 4,
2230         .num_boxes              = 1,
2231         .perf_ctr_bits          = 48,
2232         .event_ctl              = NHMEX_W_MSR_PMON_CNT0,
2233         .perf_ctr               = NHMEX_W_MSR_PMON_EVT_SEL0,
2234         .fixed_ctr              = NHMEX_W_MSR_PMON_FIXED_CTR,
2235         .fixed_ctl              = NHMEX_W_MSR_PMON_FIXED_CTL,
2236         .event_mask             = NHMEX_PMON_RAW_EVENT_MASK,
2237         .box_ctl                = NHMEX_W_MSR_GLOBAL_CTL,
2238         .pair_ctr_ctl           = 1,
2239         .event_descs            = nhmex_uncore_wbox_events,
2240         .ops                    = &nhmex_uncore_ops,
2241         .format_group           = &nhmex_uncore_cbox_format_group
2242 };
2243
2244 static int nhmex_bbox_hw_config(struct intel_uncore_box *box, struct perf_event *event)
2245 {
2246         struct hw_perf_event *hwc = &event->hw;
2247         struct hw_perf_event_extra *reg1 = &hwc->extra_reg;
2248         struct hw_perf_event_extra *reg2 = &hwc->branch_reg;
2249         int ctr, ev_sel;
2250
2251         ctr = (hwc->config & NHMEX_B_PMON_CTR_MASK) >>
2252                 NHMEX_B_PMON_CTR_SHIFT;
2253         ev_sel = (hwc->config & NHMEX_B_PMON_CTL_EV_SEL_MASK) >>
2254                   NHMEX_B_PMON_CTL_EV_SEL_SHIFT;
2255
2256         /* events that do not use the match/mask registers */
2257         if ((ctr == 0 && ev_sel > 0x3) || (ctr == 1 && ev_sel > 0x6) ||
2258             (ctr == 2 && ev_sel != 0x4) || ctr == 3)
2259                 return 0;
2260
2261         if (box->pmu->pmu_idx == 0)
2262                 reg1->reg = NHMEX_B0_MSR_MATCH;
2263         else
2264                 reg1->reg = NHMEX_B1_MSR_MATCH;
2265         reg1->idx = 0;
2266         reg1->config = event->attr.config1;
2267         reg2->config = event->attr.config2;
2268         return 0;
2269 }
2270
2271 static void nhmex_bbox_msr_enable_event(struct intel_uncore_box *box, struct perf_event *event)
2272 {
2273         struct hw_perf_event *hwc = &event->hw;
2274         struct hw_perf_event_extra *reg1 = &hwc->extra_reg;
2275         struct hw_perf_event_extra *reg2 = &hwc->branch_reg;
2276
2277         if (reg1->idx != EXTRA_REG_NONE) {
2278                 wrmsrl(reg1->reg, reg1->config);
2279                 wrmsrl(reg1->reg + 1, reg2->config);
2280         }
2281         wrmsrl(hwc->config_base, NHMEX_PMON_CTL_EN_BIT0 |
2282                 (hwc->config & NHMEX_B_PMON_CTL_EV_SEL_MASK));
2283 }
2284
2285 /*
2286  * The Bbox has 4 counters, but each counter monitors different events.
2287  * Use bits 6-7 in the event config to select counter.
2288  */
2289 static struct event_constraint nhmex_uncore_bbox_constraints[] = {
2290         EVENT_CONSTRAINT(0 , 1, 0xc0),
2291         EVENT_CONSTRAINT(0x40, 2, 0xc0),
2292         EVENT_CONSTRAINT(0x80, 4, 0xc0),
2293         EVENT_CONSTRAINT(0xc0, 8, 0xc0),
2294         EVENT_CONSTRAINT_END,
2295 };
2296
2297 static struct attribute *nhmex_uncore_bbox_formats_attr[] = {
2298         &format_attr_event5.attr,
2299         &format_attr_counter.attr,
2300         &format_attr_match.attr,
2301         &format_attr_mask.attr,
2302         NULL,
2303 };
2304
2305 static struct attribute_group nhmex_uncore_bbox_format_group = {
2306         .name = "format",
2307         .attrs = nhmex_uncore_bbox_formats_attr,
2308 };
2309
2310 static struct intel_uncore_ops nhmex_uncore_bbox_ops = {
2311         NHMEX_UNCORE_OPS_COMMON_INIT(),
2312         .enable_event           = nhmex_bbox_msr_enable_event,
2313         .hw_config              = nhmex_bbox_hw_config,
2314         .get_constraint         = uncore_get_constraint,
2315         .put_constraint         = uncore_put_constraint,
2316 };
2317
2318 static struct intel_uncore_type nhmex_uncore_bbox = {
2319         .name                   = "bbox",
2320         .num_counters           = 4,
2321         .num_boxes              = 2,
2322         .perf_ctr_bits          = 48,
2323         .event_ctl              = NHMEX_B0_MSR_PMON_CTL0,
2324         .perf_ctr               = NHMEX_B0_MSR_PMON_CTR0,
2325         .event_mask             = NHMEX_B_PMON_RAW_EVENT_MASK,
2326         .box_ctl                = NHMEX_B0_MSR_PMON_GLOBAL_CTL,
2327         .msr_offset             = NHMEX_B_MSR_OFFSET,
2328         .pair_ctr_ctl           = 1,
2329         .num_shared_regs        = 1,
2330         .constraints            = nhmex_uncore_bbox_constraints,
2331         .ops                    = &nhmex_uncore_bbox_ops,
2332         .format_group           = &nhmex_uncore_bbox_format_group
2333 };
2334
2335 static int nhmex_sbox_hw_config(struct intel_uncore_box *box, struct perf_event *event)
2336 {
2337         struct hw_perf_event *hwc = &event->hw;
2338         struct hw_perf_event_extra *reg1 = &hwc->extra_reg;
2339         struct hw_perf_event_extra *reg2 = &hwc->branch_reg;
2340
2341         /* only TO_R_PROG_EV event uses the match/mask register */
2342         if ((hwc->config & NHMEX_PMON_CTL_EV_SEL_MASK) !=
2343             NHMEX_S_EVENT_TO_R_PROG_EV)
2344                 return 0;
2345
2346         if (box->pmu->pmu_idx == 0)
2347                 reg1->reg = NHMEX_S0_MSR_MM_CFG;
2348         else
2349                 reg1->reg = NHMEX_S1_MSR_MM_CFG;
2350         reg1->idx = 0;
2351         reg1->config = event->attr.config1;
2352         reg2->config = event->attr.config2;
2353         return 0;
2354 }
2355
2356 static void nhmex_sbox_msr_enable_event(struct intel_uncore_box *box, struct perf_event *event)
2357 {
2358         struct hw_perf_event *hwc = &event->hw;
2359         struct hw_perf_event_extra *reg1 = &hwc->extra_reg;
2360         struct hw_perf_event_extra *reg2 = &hwc->branch_reg;
2361
2362         if (reg1->idx != EXTRA_REG_NONE) {
2363                 wrmsrl(reg1->reg, 0);
2364                 wrmsrl(reg1->reg + 1, reg1->config);
2365                 wrmsrl(reg1->reg + 2, reg2->config);
2366                 wrmsrl(reg1->reg, NHMEX_S_PMON_MM_CFG_EN);
2367         }
2368         wrmsrl(hwc->config_base, hwc->config | NHMEX_PMON_CTL_EN_BIT22);
2369 }
2370
2371 static struct attribute *nhmex_uncore_sbox_formats_attr[] = {
2372         &format_attr_event.attr,
2373         &format_attr_umask.attr,
2374         &format_attr_edge.attr,
2375         &format_attr_inv.attr,
2376         &format_attr_thresh8.attr,
2377         &format_attr_match.attr,
2378         &format_attr_mask.attr,
2379         NULL,
2380 };
2381
2382 static struct attribute_group nhmex_uncore_sbox_format_group = {
2383         .name                   = "format",
2384         .attrs                  = nhmex_uncore_sbox_formats_attr,
2385 };
2386
2387 static struct intel_uncore_ops nhmex_uncore_sbox_ops = {
2388         NHMEX_UNCORE_OPS_COMMON_INIT(),
2389         .enable_event           = nhmex_sbox_msr_enable_event,
2390         .hw_config              = nhmex_sbox_hw_config,
2391         .get_constraint         = uncore_get_constraint,
2392         .put_constraint         = uncore_put_constraint,
2393 };
2394
2395 static struct intel_uncore_type nhmex_uncore_sbox = {
2396         .name                   = "sbox",
2397         .num_counters           = 4,
2398         .num_boxes              = 2,
2399         .perf_ctr_bits          = 48,
2400         .event_ctl              = NHMEX_S0_MSR_PMON_CTL0,
2401         .perf_ctr               = NHMEX_S0_MSR_PMON_CTR0,
2402         .event_mask             = NHMEX_PMON_RAW_EVENT_MASK,
2403         .box_ctl                = NHMEX_S0_MSR_PMON_GLOBAL_CTL,
2404         .msr_offset             = NHMEX_S_MSR_OFFSET,
2405         .pair_ctr_ctl           = 1,
2406         .num_shared_regs        = 1,
2407         .ops                    = &nhmex_uncore_sbox_ops,
2408         .format_group           = &nhmex_uncore_sbox_format_group
2409 };
2410
2411 enum {
2412         EXTRA_REG_NHMEX_M_FILTER,
2413         EXTRA_REG_NHMEX_M_DSP,
2414         EXTRA_REG_NHMEX_M_ISS,
2415         EXTRA_REG_NHMEX_M_MAP,
2416         EXTRA_REG_NHMEX_M_MSC_THR,
2417         EXTRA_REG_NHMEX_M_PGT,
2418         EXTRA_REG_NHMEX_M_PLD,
2419         EXTRA_REG_NHMEX_M_ZDP_CTL_FVC,
2420 };
2421
2422 static struct extra_reg nhmex_uncore_mbox_extra_regs[] = {
2423         MBOX_INC_SEL_EXTAR_REG(0x0, DSP),
2424         MBOX_INC_SEL_EXTAR_REG(0x4, MSC_THR),
2425         MBOX_INC_SEL_EXTAR_REG(0x5, MSC_THR),
2426         MBOX_INC_SEL_EXTAR_REG(0x9, ISS),
2427         /* event 0xa uses two extra registers */
2428         MBOX_INC_SEL_EXTAR_REG(0xa, ISS),
2429         MBOX_INC_SEL_EXTAR_REG(0xa, PLD),
2430         MBOX_INC_SEL_EXTAR_REG(0xb, PLD),
2431         /* events 0xd ~ 0x10 use the same extra register */
2432         MBOX_INC_SEL_EXTAR_REG(0xd, ZDP_CTL_FVC),
2433         MBOX_INC_SEL_EXTAR_REG(0xe, ZDP_CTL_FVC),
2434         MBOX_INC_SEL_EXTAR_REG(0xf, ZDP_CTL_FVC),
2435         MBOX_INC_SEL_EXTAR_REG(0x10, ZDP_CTL_FVC),
2436         MBOX_INC_SEL_EXTAR_REG(0x16, PGT),
2437         MBOX_SET_FLAG_SEL_EXTRA_REG(0x0, DSP),
2438         MBOX_SET_FLAG_SEL_EXTRA_REG(0x1, ISS),
2439         MBOX_SET_FLAG_SEL_EXTRA_REG(0x5, PGT),
2440         MBOX_SET_FLAG_SEL_EXTRA_REG(0x6, MAP),
2441         EVENT_EXTRA_END
2442 };
2443
2444 /* Nehalem-EX or Westmere-EX ? */
2445 static bool uncore_nhmex;
2446
2447 static bool nhmex_mbox_get_shared_reg(struct intel_uncore_box *box, int idx, u64 config)
2448 {
2449         struct intel_uncore_extra_reg *er;
2450         unsigned long flags;
2451         bool ret = false;
2452         u64 mask;
2453
2454         if (idx < EXTRA_REG_NHMEX_M_ZDP_CTL_FVC) {
2455                 er = &box->shared_regs[idx];
2456                 raw_spin_lock_irqsave(&er->lock, flags);
2457                 if (!atomic_read(&er->ref) || er->config == config) {
2458                         atomic_inc(&er->ref);
2459                         er->config = config;
2460                         ret = true;
2461                 }
2462                 raw_spin_unlock_irqrestore(&er->lock, flags);
2463
2464                 return ret;
2465         }
2466         /*
2467          * The ZDP_CTL_FVC MSR has 4 fields which are used to control
2468          * events 0xd ~ 0x10. Besides these 4 fields, there are additional
2469          * fields which are shared.
2470          */
2471         idx -= EXTRA_REG_NHMEX_M_ZDP_CTL_FVC;
2472         if (WARN_ON_ONCE(idx >= 4))
2473                 return false;
2474
2475         /* mask of the shared fields */
2476         if (uncore_nhmex)
2477                 mask = NHMEX_M_PMON_ZDP_CTL_FVC_MASK;
2478         else
2479                 mask = WSMEX_M_PMON_ZDP_CTL_FVC_MASK;
2480         er = &box->shared_regs[EXTRA_REG_NHMEX_M_ZDP_CTL_FVC];
2481
2482         raw_spin_lock_irqsave(&er->lock, flags);
2483         /* add mask of the non-shared field if it's in use */
2484         if (__BITS_VALUE(atomic_read(&er->ref), idx, 8)) {
2485                 if (uncore_nhmex)
2486                         mask |= NHMEX_M_PMON_ZDP_CTL_FVC_EVENT_MASK(idx);
2487                 else
2488                         mask |= WSMEX_M_PMON_ZDP_CTL_FVC_EVENT_MASK(idx);
2489         }
2490
2491         if (!atomic_read(&er->ref) || !((er->config ^ config) & mask)) {
2492                 atomic_add(1 << (idx * 8), &er->ref);
2493                 if (uncore_nhmex)
2494                         mask = NHMEX_M_PMON_ZDP_CTL_FVC_MASK |
2495                                 NHMEX_M_PMON_ZDP_CTL_FVC_EVENT_MASK(idx);
2496                 else
2497                         mask = WSMEX_M_PMON_ZDP_CTL_FVC_MASK |
2498                                 WSMEX_M_PMON_ZDP_CTL_FVC_EVENT_MASK(idx);
2499                 er->config &= ~mask;
2500                 er->config |= (config & mask);
2501                 ret = true;
2502         }
2503         raw_spin_unlock_irqrestore(&er->lock, flags);
2504
2505         return ret;
2506 }
2507
2508 static void nhmex_mbox_put_shared_reg(struct intel_uncore_box *box, int idx)
2509 {
2510         struct intel_uncore_extra_reg *er;
2511
2512         if (idx < EXTRA_REG_NHMEX_M_ZDP_CTL_FVC) {
2513                 er = &box->shared_regs[idx];
2514                 atomic_dec(&er->ref);
2515                 return;
2516         }
2517
2518         idx -= EXTRA_REG_NHMEX_M_ZDP_CTL_FVC;
2519         er = &box->shared_regs[EXTRA_REG_NHMEX_M_ZDP_CTL_FVC];
2520         atomic_sub(1 << (idx * 8), &er->ref);
2521 }
2522
2523 static u64 nhmex_mbox_alter_er(struct perf_event *event, int new_idx, bool modify)
2524 {
2525         struct hw_perf_event *hwc = &event->hw;
2526         struct hw_perf_event_extra *reg1 = &hwc->extra_reg;
2527         u64 idx, orig_idx = __BITS_VALUE(reg1->idx, 0, 8);
2528         u64 config = reg1->config;
2529
2530         /* get the non-shared control bits and shift them */
2531         idx = orig_idx - EXTRA_REG_NHMEX_M_ZDP_CTL_FVC;
2532         if (uncore_nhmex)
2533                 config &= NHMEX_M_PMON_ZDP_CTL_FVC_EVENT_MASK(idx);
2534         else
2535                 config &= WSMEX_M_PMON_ZDP_CTL_FVC_EVENT_MASK(idx);
2536         if (new_idx > orig_idx) {
2537                 idx = new_idx - orig_idx;
2538                 config <<= 3 * idx;
2539         } else {
2540                 idx = orig_idx - new_idx;
2541                 config >>= 3 * idx;
2542         }
2543
2544         /* add the shared control bits back */
2545         if (uncore_nhmex)
2546                 config |= NHMEX_M_PMON_ZDP_CTL_FVC_MASK & reg1->config;
2547         else
2548                 config |= WSMEX_M_PMON_ZDP_CTL_FVC_MASK & reg1->config;
2549         config |= NHMEX_M_PMON_ZDP_CTL_FVC_MASK & reg1->config;
2550         if (modify) {
2551                 /* adjust the main event selector */
2552                 if (new_idx > orig_idx)
2553                         hwc->config += idx << NHMEX_M_PMON_CTL_INC_SEL_SHIFT;
2554                 else
2555                         hwc->config -= idx << NHMEX_M_PMON_CTL_INC_SEL_SHIFT;
2556                 reg1->config = config;
2557                 reg1->idx = ~0xff | new_idx;
2558         }
2559         return config;
2560 }
2561
2562 static struct event_constraint *
2563 nhmex_mbox_get_constraint(struct intel_uncore_box *box, struct perf_event *event)
2564 {
2565         struct hw_perf_event_extra *reg1 = &event->hw.extra_reg;
2566         struct hw_perf_event_extra *reg2 = &event->hw.branch_reg;
2567         int i, idx[2], alloc = 0;
2568         u64 config1 = reg1->config;
2569
2570         idx[0] = __BITS_VALUE(reg1->idx, 0, 8);
2571         idx[1] = __BITS_VALUE(reg1->idx, 1, 8);
2572 again:
2573         for (i = 0; i < 2; i++) {
2574                 if (!uncore_box_is_fake(box) && (reg1->alloc & (0x1 << i)))
2575                         idx[i] = 0xff;
2576
2577                 if (idx[i] == 0xff)
2578                         continue;
2579
2580                 if (!nhmex_mbox_get_shared_reg(box, idx[i],
2581                                 __BITS_VALUE(config1, i, 32)))
2582                         goto fail;
2583                 alloc |= (0x1 << i);
2584         }
2585
2586         /* for the match/mask registers */
2587         if (reg2->idx != EXTRA_REG_NONE &&
2588             (uncore_box_is_fake(box) || !reg2->alloc) &&
2589             !nhmex_mbox_get_shared_reg(box, reg2->idx, reg2->config))
2590                 goto fail;
2591
2592         /*
2593          * If it's a fake box -- as per validate_{group,event}() we
2594          * shouldn't touch event state and we can avoid doing so
2595          * since both will only call get_event_constraints() once
2596          * on each event, this avoids the need for reg->alloc.
2597          */
2598         if (!uncore_box_is_fake(box)) {
2599                 if (idx[0] != 0xff && idx[0] != __BITS_VALUE(reg1->idx, 0, 8))
2600                         nhmex_mbox_alter_er(event, idx[0], true);
2601                 reg1->alloc |= alloc;
2602                 if (reg2->idx != EXTRA_REG_NONE)
2603                         reg2->alloc = 1;
2604         }
2605         return NULL;
2606 fail:
2607         if (idx[0] != 0xff && !(alloc & 0x1) &&
2608             idx[0] >= EXTRA_REG_NHMEX_M_ZDP_CTL_FVC) {
2609                 /*
2610                  * events 0xd ~ 0x10 are functional identical, but are
2611                  * controlled by different fields in the ZDP_CTL_FVC
2612                  * register. If we failed to take one field, try the
2613                  * rest 3 choices.
2614                  */
2615                 BUG_ON(__BITS_VALUE(reg1->idx, 1, 8) != 0xff);
2616                 idx[0] -= EXTRA_REG_NHMEX_M_ZDP_CTL_FVC;
2617                 idx[0] = (idx[0] + 1) % 4;
2618                 idx[0] += EXTRA_REG_NHMEX_M_ZDP_CTL_FVC;
2619                 if (idx[0] != __BITS_VALUE(reg1->idx, 0, 8)) {
2620                         config1 = nhmex_mbox_alter_er(event, idx[0], false);
2621                         goto again;
2622                 }
2623         }
2624
2625         if (alloc & 0x1)
2626                 nhmex_mbox_put_shared_reg(box, idx[0]);
2627         if (alloc & 0x2)
2628                 nhmex_mbox_put_shared_reg(box, idx[1]);
2629         return &constraint_empty;
2630 }
2631
2632 static void nhmex_mbox_put_constraint(struct intel_uncore_box *box, struct perf_event *event)
2633 {
2634         struct hw_perf_event_extra *reg1 = &event->hw.extra_reg;
2635         struct hw_perf_event_extra *reg2 = &event->hw.branch_reg;
2636
2637         if (uncore_box_is_fake(box))
2638                 return;
2639
2640         if (reg1->alloc & 0x1)
2641                 nhmex_mbox_put_shared_reg(box, __BITS_VALUE(reg1->idx, 0, 8));
2642         if (reg1->alloc & 0x2)
2643                 nhmex_mbox_put_shared_reg(box, __BITS_VALUE(reg1->idx, 1, 8));
2644         reg1->alloc = 0;
2645
2646         if (reg2->alloc) {
2647                 nhmex_mbox_put_shared_reg(box, reg2->idx);
2648                 reg2->alloc = 0;
2649         }
2650 }
2651
2652 static int nhmex_mbox_extra_reg_idx(struct extra_reg *er)
2653 {
2654         if (er->idx < EXTRA_REG_NHMEX_M_ZDP_CTL_FVC)
2655                 return er->idx;
2656         return er->idx + (er->event >> NHMEX_M_PMON_CTL_INC_SEL_SHIFT) - 0xd;
2657 }
2658
2659 static int nhmex_mbox_hw_config(struct intel_uncore_box *box, struct perf_event *event)
2660 {
2661         struct intel_uncore_type *type = box->pmu->type;
2662         struct hw_perf_event_extra *reg1 = &event->hw.extra_reg;
2663         struct hw_perf_event_extra *reg2 = &event->hw.branch_reg;
2664         struct extra_reg *er;
2665         unsigned msr;
2666         int reg_idx = 0;
2667         /*
2668          * The mbox events may require 2 extra MSRs at the most. But only
2669          * the lower 32 bits in these MSRs are significant, so we can use
2670          * config1 to pass two MSRs' config.
2671          */
2672         for (er = nhmex_uncore_mbox_extra_regs; er->msr; er++) {
2673                 if (er->event != (event->hw.config & er->config_mask))
2674                         continue;
2675                 if (event->attr.config1 & ~er->valid_mask)
2676                         return -EINVAL;
2677
2678                 msr = er->msr + type->msr_offset * box->pmu->pmu_idx;
2679                 if (WARN_ON_ONCE(msr >= 0xffff || er->idx >= 0xff))
2680                         return -EINVAL;
2681
2682                 /* always use the 32~63 bits to pass the PLD config */
2683                 if (er->idx == EXTRA_REG_NHMEX_M_PLD)
2684                         reg_idx = 1;
2685                 else if (WARN_ON_ONCE(reg_idx > 0))
2686                         return -EINVAL;
2687
2688                 reg1->idx &= ~(0xff << (reg_idx * 8));
2689                 reg1->reg &= ~(0xffff << (reg_idx * 16));
2690                 reg1->idx |= nhmex_mbox_extra_reg_idx(er) << (reg_idx * 8);
2691                 reg1->reg |= msr << (reg_idx * 16);
2692                 reg1->config = event->attr.config1;
2693                 reg_idx++;
2694         }
2695         /*
2696          * The mbox only provides ability to perform address matching
2697          * for the PLD events.
2698          */
2699         if (reg_idx == 2) {
2700                 reg2->idx = EXTRA_REG_NHMEX_M_FILTER;
2701                 if (event->attr.config2 & NHMEX_M_PMON_MM_CFG_EN)
2702                         reg2->config = event->attr.config2;
2703                 else
2704                         reg2->config = ~0ULL;
2705                 if (box->pmu->pmu_idx == 0)
2706                         reg2->reg = NHMEX_M0_MSR_PMU_MM_CFG;
2707                 else
2708                         reg2->reg = NHMEX_M1_MSR_PMU_MM_CFG;
2709         }
2710         return 0;
2711 }
2712
2713 static u64 nhmex_mbox_shared_reg_config(struct intel_uncore_box *box, int idx)
2714 {
2715         struct intel_uncore_extra_reg *er;
2716         unsigned long flags;
2717         u64 config;
2718
2719         if (idx < EXTRA_REG_NHMEX_M_ZDP_CTL_FVC)
2720                 return box->shared_regs[idx].config;
2721
2722         er = &box->shared_regs[EXTRA_REG_NHMEX_M_ZDP_CTL_FVC];
2723         raw_spin_lock_irqsave(&er->lock, flags);
2724         config = er->config;
2725         raw_spin_unlock_irqrestore(&er->lock, flags);
2726         return config;
2727 }
2728
2729 static void nhmex_mbox_msr_enable_event(struct intel_uncore_box *box, struct perf_event *event)
2730 {
2731         struct hw_perf_event *hwc = &event->hw;
2732         struct hw_perf_event_extra *reg1 = &hwc->extra_reg;
2733         struct hw_perf_event_extra *reg2 = &hwc->branch_reg;
2734         int idx;
2735
2736         idx = __BITS_VALUE(reg1->idx, 0, 8);
2737         if (idx != 0xff)
2738                 wrmsrl(__BITS_VALUE(reg1->reg, 0, 16),
2739                         nhmex_mbox_shared_reg_config(box, idx));
2740         idx = __BITS_VALUE(reg1->idx, 1, 8);
2741         if (idx != 0xff)
2742                 wrmsrl(__BITS_VALUE(reg1->reg, 1, 16),
2743                         nhmex_mbox_shared_reg_config(box, idx));
2744
2745         if (reg2->idx != EXTRA_REG_NONE) {
2746                 wrmsrl(reg2->reg, 0);
2747                 if (reg2->config != ~0ULL) {
2748                         wrmsrl(reg2->reg + 1,
2749                                 reg2->config & NHMEX_M_PMON_ADDR_MATCH_MASK);
2750                         wrmsrl(reg2->reg + 2, NHMEX_M_PMON_ADDR_MASK_MASK &
2751                                 (reg2->config >> NHMEX_M_PMON_ADDR_MASK_SHIFT));
2752                         wrmsrl(reg2->reg, NHMEX_M_PMON_MM_CFG_EN);
2753                 }
2754         }
2755
2756         wrmsrl(hwc->config_base, hwc->config | NHMEX_PMON_CTL_EN_BIT0);
2757 }
2758
2759 DEFINE_UNCORE_FORMAT_ATTR(count_mode,           count_mode,     "config:2-3");
2760 DEFINE_UNCORE_FORMAT_ATTR(storage_mode,         storage_mode,   "config:4-5");
2761 DEFINE_UNCORE_FORMAT_ATTR(wrap_mode,            wrap_mode,      "config:6");
2762 DEFINE_UNCORE_FORMAT_ATTR(flag_mode,            flag_mode,      "config:7");
2763 DEFINE_UNCORE_FORMAT_ATTR(inc_sel,              inc_sel,        "config:9-13");
2764 DEFINE_UNCORE_FORMAT_ATTR(set_flag_sel,         set_flag_sel,   "config:19-21");
2765 DEFINE_UNCORE_FORMAT_ATTR(filter_cfg_en,        filter_cfg_en,  "config2:63");
2766 DEFINE_UNCORE_FORMAT_ATTR(filter_match,         filter_match,   "config2:0-33");
2767 DEFINE_UNCORE_FORMAT_ATTR(filter_mask,          filter_mask,    "config2:34-61");
2768 DEFINE_UNCORE_FORMAT_ATTR(dsp,                  dsp,            "config1:0-31");
2769 DEFINE_UNCORE_FORMAT_ATTR(thr,                  thr,            "config1:0-31");
2770 DEFINE_UNCORE_FORMAT_ATTR(fvc,                  fvc,            "config1:0-31");
2771 DEFINE_UNCORE_FORMAT_ATTR(pgt,                  pgt,            "config1:0-31");
2772 DEFINE_UNCORE_FORMAT_ATTR(map,                  map,            "config1:0-31");
2773 DEFINE_UNCORE_FORMAT_ATTR(iss,                  iss,            "config1:0-31");
2774 DEFINE_UNCORE_FORMAT_ATTR(pld,                  pld,            "config1:32-63");
2775
2776 static struct attribute *nhmex_uncore_mbox_formats_attr[] = {
2777         &format_attr_count_mode.attr,
2778         &format_attr_storage_mode.attr,
2779         &format_attr_wrap_mode.attr,
2780         &format_attr_flag_mode.attr,
2781         &format_attr_inc_sel.attr,
2782         &format_attr_set_flag_sel.attr,
2783         &format_attr_filter_cfg_en.attr,
2784         &format_attr_filter_match.attr,
2785         &format_attr_filter_mask.attr,
2786         &format_attr_dsp.attr,
2787         &format_attr_thr.attr,
2788         &format_attr_fvc.attr,
2789         &format_attr_pgt.attr,
2790         &format_attr_map.attr,
2791         &format_attr_iss.attr,
2792         &format_attr_pld.attr,
2793         NULL,
2794 };
2795
2796 static struct attribute_group nhmex_uncore_mbox_format_group = {
2797         .name           = "format",
2798         .attrs          = nhmex_uncore_mbox_formats_attr,
2799 };
2800
2801 static struct uncore_event_desc nhmex_uncore_mbox_events[] = {
2802         INTEL_UNCORE_EVENT_DESC(bbox_cmds_read, "inc_sel=0xd,fvc=0x2800"),
2803         INTEL_UNCORE_EVENT_DESC(bbox_cmds_write, "inc_sel=0xd,fvc=0x2820"),
2804         { /* end: all zeroes */ },
2805 };
2806
2807 static struct uncore_event_desc wsmex_uncore_mbox_events[] = {
2808         INTEL_UNCORE_EVENT_DESC(bbox_cmds_read, "inc_sel=0xd,fvc=0x5000"),
2809         INTEL_UNCORE_EVENT_DESC(bbox_cmds_write, "inc_sel=0xd,fvc=0x5040"),
2810         { /* end: all zeroes */ },
2811 };
2812
2813 static struct intel_uncore_ops nhmex_uncore_mbox_ops = {
2814         NHMEX_UNCORE_OPS_COMMON_INIT(),
2815         .enable_event   = nhmex_mbox_msr_enable_event,
2816         .hw_config      = nhmex_mbox_hw_config,
2817         .get_constraint = nhmex_mbox_get_constraint,
2818         .put_constraint = nhmex_mbox_put_constraint,
2819 };
2820
2821 static struct intel_uncore_type nhmex_uncore_mbox = {
2822         .name                   = "mbox",
2823         .num_counters           = 6,
2824         .num_boxes              = 2,
2825         .perf_ctr_bits          = 48,
2826         .event_ctl              = NHMEX_M0_MSR_PMU_CTL0,
2827         .perf_ctr               = NHMEX_M0_MSR_PMU_CNT0,
2828         .event_mask             = NHMEX_M_PMON_RAW_EVENT_MASK,
2829         .box_ctl                = NHMEX_M0_MSR_GLOBAL_CTL,
2830         .msr_offset             = NHMEX_M_MSR_OFFSET,
2831         .pair_ctr_ctl           = 1,
2832         .num_shared_regs        = 8,
2833         .event_descs            = nhmex_uncore_mbox_events,
2834         .ops                    = &nhmex_uncore_mbox_ops,
2835         .format_group           = &nhmex_uncore_mbox_format_group,
2836 };
2837
2838 static void nhmex_rbox_alter_er(struct intel_uncore_box *box, struct perf_event *event)
2839 {
2840         struct hw_perf_event *hwc = &event->hw;
2841         struct hw_perf_event_extra *reg1 = &hwc->extra_reg;
2842
2843         /* adjust the main event selector and extra register index */
2844         if (reg1->idx % 2) {
2845                 reg1->idx--;
2846                 hwc->config -= 1 << NHMEX_R_PMON_CTL_EV_SEL_SHIFT;
2847         } else {
2848                 reg1->idx++;
2849                 hwc->config += 1 << NHMEX_R_PMON_CTL_EV_SEL_SHIFT;
2850         }
2851
2852         /* adjust extra register config */
2853         switch (reg1->idx % 6) {
2854         case 2:
2855                 /* shift the 8~15 bits to the 0~7 bits */
2856                 reg1->config >>= 8;
2857                 break;
2858         case 3:
2859                 /* shift the 0~7 bits to the 8~15 bits */
2860                 reg1->config <<= 8;
2861                 break;
2862         };
2863 }
2864
2865 /*
2866  * Each rbox has 4 event set which monitor PQI port 0~3 or 4~7.
2867  * An event set consists of 6 events, the 3rd and 4th events in
2868  * an event set use the same extra register. So an event set uses
2869  * 5 extra registers.
2870  */
2871 static struct event_constraint *
2872 nhmex_rbox_get_constraint(struct intel_uncore_box *box, struct perf_event *event)
2873 {
2874         struct hw_perf_event *hwc = &event->hw;
2875         struct hw_perf_event_extra *reg1 = &hwc->extra_reg;
2876         struct hw_perf_event_extra *reg2 = &hwc->branch_reg;
2877         struct intel_uncore_extra_reg *er;
2878         unsigned long flags;
2879         int idx, er_idx;
2880         u64 config1;
2881         bool ok = false;
2882
2883         if (!uncore_box_is_fake(box) && reg1->alloc)
2884                 return NULL;
2885
2886         idx = reg1->idx % 6;
2887         config1 = reg1->config;
2888 again:
2889         er_idx = idx;
2890         /* the 3rd and 4th events use the same extra register */
2891         if (er_idx > 2)
2892                 er_idx--;
2893         er_idx += (reg1->idx / 6) * 5;
2894
2895         er = &box->shared_regs[er_idx];
2896         raw_spin_lock_irqsave(&er->lock, flags);
2897         if (idx < 2) {
2898                 if (!atomic_read(&er->ref) || er->config == reg1->config) {
2899                         atomic_inc(&er->ref);
2900                         er->config = reg1->config;
2901                         ok = true;
2902                 }
2903         } else if (idx == 2 || idx == 3) {
2904                 /*
2905                  * these two events use different fields in a extra register,
2906                  * the 0~7 bits and the 8~15 bits respectively.
2907                  */
2908                 u64 mask = 0xff << ((idx - 2) * 8);
2909                 if (!__BITS_VALUE(atomic_read(&er->ref), idx - 2, 8) ||
2910                                 !((er->config ^ config1) & mask)) {
2911                         atomic_add(1 << ((idx - 2) * 8), &er->ref);
2912                         er->config &= ~mask;
2913                         er->config |= config1 & mask;
2914                         ok = true;
2915                 }
2916         } else {
2917                 if (!atomic_read(&er->ref) ||
2918                                 (er->config == (hwc->config >> 32) &&
2919                                  er->config1 == reg1->config &&
2920                                  er->config2 == reg2->config)) {
2921                         atomic_inc(&er->ref);
2922                         er->config = (hwc->config >> 32);
2923                         er->config1 = reg1->config;
2924                         er->config2 = reg2->config;
2925                         ok = true;
2926                 }
2927         }
2928         raw_spin_unlock_irqrestore(&er->lock, flags);
2929
2930         if (!ok) {
2931                 /*
2932                  * The Rbox events are always in pairs. The paired
2933                  * events are functional identical, but use different
2934                  * extra registers. If we failed to take an extra
2935                  * register, try the alternative.
2936                  */
2937                 if (idx % 2)
2938                         idx--;
2939                 else
2940                         idx++;
2941                 if (idx != reg1->idx % 6) {
2942                         if (idx == 2)
2943                                 config1 >>= 8;
2944                         else if (idx == 3)
2945                                 config1 <<= 8;
2946                         goto again;
2947                 }
2948         } else {
2949                 if (!uncore_box_is_fake(box)) {
2950                         if (idx != reg1->idx % 6)
2951                                 nhmex_rbox_alter_er(box, event);
2952                         reg1->alloc = 1;
2953                 }
2954                 return NULL;
2955         }
2956         return &constraint_empty;
2957 }
2958
2959 static void nhmex_rbox_put_constraint(struct intel_uncore_box *box, struct perf_event *event)
2960 {
2961         struct intel_uncore_extra_reg *er;
2962         struct hw_perf_event_extra *reg1 = &event->hw.extra_reg;
2963         int idx, er_idx;
2964
2965         if (uncore_box_is_fake(box) || !reg1->alloc)
2966                 return;
2967
2968         idx = reg1->idx % 6;
2969         er_idx = idx;
2970         if (er_idx > 2)
2971                 er_idx--;
2972         er_idx += (reg1->idx / 6) * 5;
2973
2974         er = &box->shared_regs[er_idx];
2975         if (idx == 2 || idx == 3)
2976                 atomic_sub(1 << ((idx - 2) * 8), &er->ref);
2977         else
2978                 atomic_dec(&er->ref);
2979
2980         reg1->alloc = 0;
2981 }
2982
2983 static int nhmex_rbox_hw_config(struct intel_uncore_box *box, struct perf_event *event)
2984 {
2985         struct hw_perf_event *hwc = &event->hw;
2986         struct hw_perf_event_extra *reg1 = &event->hw.extra_reg;
2987         struct hw_perf_event_extra *reg2 = &event->hw.branch_reg;
2988         int idx;
2989
2990         idx = (event->hw.config & NHMEX_R_PMON_CTL_EV_SEL_MASK) >>
2991                 NHMEX_R_PMON_CTL_EV_SEL_SHIFT;
2992         if (idx >= 0x18)
2993                 return -EINVAL;
2994
2995         reg1->idx = idx;
2996         reg1->config = event->attr.config1;
2997
2998         switch (idx % 6) {
2999         case 4:
3000         case 5:
3001                 hwc->config |= event->attr.config & (~0ULL << 32);
3002                 reg2->config = event->attr.config2;
3003                 break;
3004         };
3005         return 0;
3006 }
3007
3008 static void nhmex_rbox_msr_enable_event(struct intel_uncore_box *box, struct perf_event *event)
3009 {
3010         struct hw_perf_event *hwc = &event->hw;
3011         struct hw_perf_event_extra *reg1 = &hwc->extra_reg;
3012         struct hw_perf_event_extra *reg2 = &hwc->branch_reg;
3013         int idx, port;
3014
3015         idx = reg1->idx;
3016         port = idx / 6 + box->pmu->pmu_idx * 4;
3017
3018         switch (idx % 6) {
3019         case 0:
3020                 wrmsrl(NHMEX_R_MSR_PORTN_IPERF_CFG0(port), reg1->config);
3021                 break;
3022         case 1:
3023                 wrmsrl(NHMEX_R_MSR_PORTN_IPERF_CFG1(port), reg1->config);
3024                 break;
3025         case 2:
3026         case 3:
3027                 wrmsrl(NHMEX_R_MSR_PORTN_QLX_CFG(port),
3028                         uncore_shared_reg_config(box, 2 + (idx / 6) * 5));
3029                 break;
3030         case 4:
3031                 wrmsrl(NHMEX_R_MSR_PORTN_XBR_SET1_MM_CFG(port),
3032                         hwc->config >> 32);
3033                 wrmsrl(NHMEX_R_MSR_PORTN_XBR_SET1_MATCH(port), reg1->config);
3034                 wrmsrl(NHMEX_R_MSR_PORTN_XBR_SET1_MASK(port), reg2->config);
3035                 break;
3036         case 5:
3037                 wrmsrl(NHMEX_R_MSR_PORTN_XBR_SET2_MM_CFG(port),
3038                         hwc->config >> 32);
3039                 wrmsrl(NHMEX_R_MSR_PORTN_XBR_SET2_MATCH(port), reg1->config);
3040                 wrmsrl(NHMEX_R_MSR_PORTN_XBR_SET2_MASK(port), reg2->config);
3041                 break;
3042         };
3043
3044         wrmsrl(hwc->config_base, NHMEX_PMON_CTL_EN_BIT0 |
3045                 (hwc->config & NHMEX_R_PMON_CTL_EV_SEL_MASK));
3046 }
3047
3048 DEFINE_UNCORE_FORMAT_ATTR(xbr_mm_cfg, xbr_mm_cfg, "config:32-63");
3049 DEFINE_UNCORE_FORMAT_ATTR(xbr_match, xbr_match, "config1:0-63");
3050 DEFINE_UNCORE_FORMAT_ATTR(xbr_mask, xbr_mask, "config2:0-63");
3051 DEFINE_UNCORE_FORMAT_ATTR(qlx_cfg, qlx_cfg, "config1:0-15");
3052 DEFINE_UNCORE_FORMAT_ATTR(iperf_cfg, iperf_cfg, "config1:0-31");
3053
3054 static struct attribute *nhmex_uncore_rbox_formats_attr[] = {
3055         &format_attr_event5.attr,
3056         &format_attr_xbr_mm_cfg.attr,
3057         &format_attr_xbr_match.attr,
3058         &format_attr_xbr_mask.attr,
3059         &format_attr_qlx_cfg.attr,
3060         &format_attr_iperf_cfg.attr,
3061         NULL,
3062 };
3063
3064 static struct attribute_group nhmex_uncore_rbox_format_group = {
3065         .name = "format",
3066         .attrs = nhmex_uncore_rbox_formats_attr,
3067 };
3068
3069 static struct uncore_event_desc nhmex_uncore_rbox_events[] = {
3070         INTEL_UNCORE_EVENT_DESC(qpi0_flit_send,         "event=0x0,iperf_cfg=0x80000000"),
3071         INTEL_UNCORE_EVENT_DESC(qpi1_filt_send,         "event=0x6,iperf_cfg=0x80000000"),
3072         INTEL_UNCORE_EVENT_DESC(qpi0_idle_filt,         "event=0x0,iperf_cfg=0x40000000"),
3073         INTEL_UNCORE_EVENT_DESC(qpi1_idle_filt,         "event=0x6,iperf_cfg=0x40000000"),
3074         INTEL_UNCORE_EVENT_DESC(qpi0_date_response,     "event=0x0,iperf_cfg=0xc4"),
3075         INTEL_UNCORE_EVENT_DESC(qpi1_date_response,     "event=0x6,iperf_cfg=0xc4"),
3076         { /* end: all zeroes */ },
3077 };
3078
3079 static struct intel_uncore_ops nhmex_uncore_rbox_ops = {
3080         NHMEX_UNCORE_OPS_COMMON_INIT(),
3081         .enable_event           = nhmex_rbox_msr_enable_event,
3082         .hw_config              = nhmex_rbox_hw_config,
3083         .get_constraint         = nhmex_rbox_get_constraint,
3084         .put_constraint         = nhmex_rbox_put_constraint,
3085 };
3086
3087 static struct intel_uncore_type nhmex_uncore_rbox = {
3088         .name                   = "rbox",
3089         .num_counters           = 8,
3090         .num_boxes              = 2,
3091         .perf_ctr_bits          = 48,
3092         .event_ctl              = NHMEX_R_MSR_PMON_CTL0,
3093         .perf_ctr               = NHMEX_R_MSR_PMON_CNT0,
3094         .event_mask             = NHMEX_R_PMON_RAW_EVENT_MASK,
3095         .box_ctl                = NHMEX_R_MSR_GLOBAL_CTL,
3096         .msr_offset             = NHMEX_R_MSR_OFFSET,
3097         .pair_ctr_ctl           = 1,
3098         .num_shared_regs        = 20,
3099         .event_descs            = nhmex_uncore_rbox_events,
3100         .ops                    = &nhmex_uncore_rbox_ops,
3101         .format_group           = &nhmex_uncore_rbox_format_group
3102 };
3103
3104 static struct intel_uncore_type *nhmex_msr_uncores[] = {
3105         &nhmex_uncore_ubox,
3106         &nhmex_uncore_cbox,
3107         &nhmex_uncore_bbox,
3108         &nhmex_uncore_sbox,
3109         &nhmex_uncore_mbox,
3110         &nhmex_uncore_rbox,
3111         &nhmex_uncore_wbox,
3112         NULL,
3113 };
3114 /* end of Nehalem-EX uncore support */
3115
3116 static void uncore_assign_hw_event(struct intel_uncore_box *box, struct perf_event *event, int idx)
3117 {
3118         struct hw_perf_event *hwc = &event->hw;
3119
3120         hwc->idx = idx;
3121         hwc->last_tag = ++box->tags[idx];
3122
3123         if (hwc->idx == UNCORE_PMC_IDX_FIXED) {
3124                 hwc->event_base = uncore_fixed_ctr(box);
3125                 hwc->config_base = uncore_fixed_ctl(box);
3126                 return;
3127         }
3128
3129         hwc->config_base = uncore_event_ctl(box, hwc->idx);
3130         hwc->event_base  = uncore_perf_ctr(box, hwc->idx);
3131 }
3132
3133 static void uncore_perf_event_update(struct intel_uncore_box *box, struct perf_event *event)
3134 {
3135         u64 prev_count, new_count, delta;
3136         int shift;
3137
3138         if (event->hw.idx >= UNCORE_PMC_IDX_FIXED)
3139                 shift = 64 - uncore_fixed_ctr_bits(box);
3140         else
3141                 shift = 64 - uncore_perf_ctr_bits(box);
3142
3143         /* the hrtimer might modify the previous event value */
3144 again:
3145         prev_count = local64_read(&event->hw.prev_count);
3146         new_count = uncore_read_counter(box, event);
3147         if (local64_xchg(&event->hw.prev_count, new_count) != prev_count)
3148                 goto again;
3149
3150         delta = (new_count << shift) - (prev_count << shift);
3151         delta >>= shift;
3152
3153         local64_add(delta, &event->count);
3154 }
3155
3156 /*
3157  * The overflow interrupt is unavailable for SandyBridge-EP, is broken
3158  * for SandyBridge. So we use hrtimer to periodically poll the counter
3159  * to avoid overflow.
3160  */
3161 static enum hrtimer_restart uncore_pmu_hrtimer(struct hrtimer *hrtimer)
3162 {
3163         struct intel_uncore_box *box;
3164         struct perf_event *event;
3165         unsigned long flags;
3166         int bit;
3167
3168         box = container_of(hrtimer, struct intel_uncore_box, hrtimer);
3169         if (!box->n_active || box->cpu != smp_processor_id())
3170                 return HRTIMER_NORESTART;
3171         /*
3172          * disable local interrupt to prevent uncore_pmu_event_start/stop
3173          * to interrupt the update process
3174          */
3175         local_irq_save(flags);
3176
3177         /*
3178          * handle boxes with an active event list as opposed to active
3179          * counters
3180          */
3181         list_for_each_entry(event, &box->active_list, active_entry) {
3182                 uncore_perf_event_update(box, event);
3183         }
3184
3185         for_each_set_bit(bit, box->active_mask, UNCORE_PMC_IDX_MAX)
3186                 uncore_perf_event_update(box, box->events[bit]);
3187
3188         local_irq_restore(flags);
3189
3190         hrtimer_forward_now(hrtimer, ns_to_ktime(box->hrtimer_duration));
3191         return HRTIMER_RESTART;
3192 }
3193
3194 static void uncore_pmu_start_hrtimer(struct intel_uncore_box *box)
3195 {
3196         __hrtimer_start_range_ns(&box->hrtimer,
3197                         ns_to_ktime(box->hrtimer_duration), 0,
3198                         HRTIMER_MODE_REL_PINNED, 0);
3199 }
3200
3201 static void uncore_pmu_cancel_hrtimer(struct intel_uncore_box *box)
3202 {
3203         hrtimer_cancel(&box->hrtimer);
3204 }
3205
3206 static void uncore_pmu_init_hrtimer(struct intel_uncore_box *box)
3207 {
3208         hrtimer_init(&box->hrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
3209         box->hrtimer.function = uncore_pmu_hrtimer;
3210 }
3211
3212 static struct intel_uncore_box *uncore_alloc_box(struct intel_uncore_type *type, int node)
3213 {
3214         struct intel_uncore_box *box;
3215         int i, size;
3216
3217         size = sizeof(*box) + type->num_shared_regs * sizeof(struct intel_uncore_extra_reg);
3218
3219         box = kzalloc_node(size, GFP_KERNEL, node);
3220         if (!box)
3221                 return NULL;
3222
3223         for (i = 0; i < type->num_shared_regs; i++)
3224                 raw_spin_lock_init(&box->shared_regs[i].lock);
3225
3226         uncore_pmu_init_hrtimer(box);
3227         atomic_set(&box->refcnt, 1);
3228         box->cpu = -1;
3229         box->phys_id = -1;
3230
3231         /* set default hrtimer timeout */
3232         box->hrtimer_duration = UNCORE_PMU_HRTIMER_INTERVAL;
3233
3234         INIT_LIST_HEAD(&box->active_list);
3235
3236         return box;
3237 }
3238
3239 static int
3240 uncore_collect_events(struct intel_uncore_box *box, struct perf_event *leader, bool dogrp)
3241 {
3242         struct perf_event *event;
3243         int n, max_count;
3244
3245         max_count = box->pmu->type->num_counters;
3246         if (box->pmu->type->fixed_ctl)
3247                 max_count++;
3248
3249         if (box->n_events >= max_count)
3250                 return -EINVAL;
3251
3252         n = box->n_events;
3253         box->event_list[n] = leader;
3254         n++;
3255         if (!dogrp)
3256                 return n;
3257
3258         list_for_each_entry(event, &leader->sibling_list, group_entry) {
3259                 if (event->state <= PERF_EVENT_STATE_OFF)
3260                         continue;
3261
3262                 if (n >= max_count)
3263                         return -EINVAL;
3264
3265                 box->event_list[n] = event;
3266                 n++;
3267         }
3268         return n;
3269 }
3270
3271 static struct event_constraint *
3272 uncore_get_event_constraint(struct intel_uncore_box *box, struct perf_event *event)
3273 {
3274         struct intel_uncore_type *type = box->pmu->type;
3275         struct event_constraint *c;
3276
3277         if (type->ops->get_constraint) {
3278                 c = type->ops->get_constraint(box, event);
3279                 if (c)
3280                         return c;
3281         }
3282
3283         if (event->attr.config == UNCORE_FIXED_EVENT)
3284                 return &constraint_fixed;
3285
3286         if (type->constraints) {
3287                 for_each_event_constraint(c, type->constraints) {
3288                         if ((event->hw.config & c->cmask) == c->code)
3289                                 return c;
3290                 }
3291         }
3292
3293         return &type->unconstrainted;
3294 }
3295
3296 static void uncore_put_event_constraint(struct intel_uncore_box *box, struct perf_event *event)
3297 {
3298         if (box->pmu->type->ops->put_constraint)
3299                 box->pmu->type->ops->put_constraint(box, event);
3300 }
3301
3302 static int uncore_assign_events(struct intel_uncore_box *box, int assign[], int n)
3303 {
3304         unsigned long used_mask[BITS_TO_LONGS(UNCORE_PMC_IDX_MAX)];
3305         struct event_constraint *c;
3306         int i, wmin, wmax, ret = 0;
3307         struct hw_perf_event *hwc;
3308
3309         bitmap_zero(used_mask, UNCORE_PMC_IDX_MAX);
3310
3311         for (i = 0, wmin = UNCORE_PMC_IDX_MAX, wmax = 0; i < n; i++) {
3312                 hwc = &box->event_list[i]->hw;
3313                 c = uncore_get_event_constraint(box, box->event_list[i]);
3314                 hwc->constraint = c;
3315                 wmin = min(wmin, c->weight);
3316                 wmax = max(wmax, c->weight);
3317         }
3318
3319         /* fastpath, try to reuse previous register */
3320         for (i = 0; i < n; i++) {
3321                 hwc = &box->event_list[i]->hw;
3322                 c = hwc->constraint;
3323
3324                 /* never assigned */
3325                 if (hwc->idx == -1)
3326                         break;
3327
3328                 /* constraint still honored */
3329                 if (!test_bit(hwc->idx, c->idxmsk))
3330                         break;
3331
3332                 /* not already used */
3333                 if (test_bit(hwc->idx, used_mask))
3334                         break;
3335
3336                 __set_bit(hwc->idx, used_mask);
3337                 if (assign)
3338                         assign[i] = hwc->idx;
3339         }
3340         /* slow path */
3341         if (i != n)
3342                 ret = perf_assign_events(box->event_list, n,
3343                                          wmin, wmax, assign);
3344
3345         if (!assign || ret) {
3346                 for (i = 0; i < n; i++)
3347                         uncore_put_event_constraint(box, box->event_list[i]);
3348         }
3349         return ret ? -EINVAL : 0;
3350 }
3351
3352 static void uncore_pmu_event_start(struct perf_event *event, int flags)
3353 {
3354         struct intel_uncore_box *box = uncore_event_to_box(event);
3355         int idx = event->hw.idx;
3356
3357         if (WARN_ON_ONCE(!(event->hw.state & PERF_HES_STOPPED)))
3358                 return;
3359
3360         if (WARN_ON_ONCE(idx == -1 || idx >= UNCORE_PMC_IDX_MAX))
3361                 return;
3362
3363         event->hw.state = 0;
3364         box->events[idx] = event;
3365         box->n_active++;
3366         __set_bit(idx, box->active_mask);
3367
3368         local64_set(&event->hw.prev_count, uncore_read_counter(box, event));
3369         uncore_enable_event(box, event);
3370
3371         if (box->n_active == 1) {
3372                 uncore_enable_box(box);
3373                 uncore_pmu_start_hrtimer(box);
3374         }
3375 }
3376
3377 static void uncore_pmu_event_stop(struct perf_event *event, int flags)
3378 {
3379         struct intel_uncore_box *box = uncore_event_to_box(event);
3380         struct hw_perf_event *hwc = &event->hw;
3381
3382         if (__test_and_clear_bit(hwc->idx, box->active_mask)) {
3383                 uncore_disable_event(box, event);
3384                 box->n_active--;
3385                 box->events[hwc->idx] = NULL;
3386                 WARN_ON_ONCE(hwc->state & PERF_HES_STOPPED);
3387                 hwc->state |= PERF_HES_STOPPED;
3388
3389                 if (box->n_active == 0) {
3390                         uncore_disable_box(box);
3391                         uncore_pmu_cancel_hrtimer(box);
3392                 }
3393         }
3394
3395         if ((flags & PERF_EF_UPDATE) && !(hwc->state & PERF_HES_UPTODATE)) {
3396                 /*
3397                  * Drain the remaining delta count out of a event
3398                  * that we are disabling:
3399                  */
3400                 uncore_perf_event_update(box, event);
3401                 hwc->state |= PERF_HES_UPTODATE;
3402         }
3403 }
3404
3405 static int uncore_pmu_event_add(struct perf_event *event, int flags)
3406 {
3407         struct intel_uncore_box *box = uncore_event_to_box(event);
3408         struct hw_perf_event *hwc = &event->hw;
3409         int assign[UNCORE_PMC_IDX_MAX];
3410         int i, n, ret;
3411
3412         if (!box)
3413                 return -ENODEV;
3414
3415         ret = n = uncore_collect_events(box, event, false);
3416         if (ret < 0)
3417                 return ret;
3418
3419         hwc->state = PERF_HES_UPTODATE | PERF_HES_STOPPED;
3420         if (!(flags & PERF_EF_START))
3421                 hwc->state |= PERF_HES_ARCH;
3422
3423         ret = uncore_assign_events(box, assign, n);
3424         if (ret)
3425                 return ret;
3426
3427         /* save events moving to new counters */
3428         for (i = 0; i < box->n_events; i++) {
3429                 event = box->event_list[i];
3430                 hwc = &event->hw;
3431
3432                 if (hwc->idx == assign[i] &&
3433                         hwc->last_tag == box->tags[assign[i]])
3434                         continue;
3435                 /*
3436                  * Ensure we don't accidentally enable a stopped
3437                  * counter simply because we rescheduled.
3438                  */
3439                 if (hwc->state & PERF_HES_STOPPED)
3440                         hwc->state |= PERF_HES_ARCH;
3441
3442                 uncore_pmu_event_stop(event, PERF_EF_UPDATE);
3443         }
3444
3445         /* reprogram moved events into new counters */
3446         for (i = 0; i < n; i++) {
3447                 event = box->event_list[i];
3448                 hwc = &event->hw;
3449
3450                 if (hwc->idx != assign[i] ||
3451                         hwc->last_tag != box->tags[assign[i]])
3452                         uncore_assign_hw_event(box, event, assign[i]);
3453                 else if (i < box->n_events)
3454                         continue;
3455
3456                 if (hwc->state & PERF_HES_ARCH)
3457                         continue;
3458
3459                 uncore_pmu_event_start(event, 0);
3460         }
3461         box->n_events = n;
3462
3463         return 0;
3464 }
3465
3466 static void uncore_pmu_event_del(struct perf_event *event, int flags)
3467 {
3468         struct intel_uncore_box *box = uncore_event_to_box(event);
3469         int i;
3470
3471         uncore_pmu_event_stop(event, PERF_EF_UPDATE);
3472
3473         for (i = 0; i < box->n_events; i++) {
3474                 if (event == box->event_list[i]) {
3475                         uncore_put_event_constraint(box, event);
3476
3477                         while (++i < box->n_events)
3478                                 box->event_list[i - 1] = box->event_list[i];
3479
3480                         --box->n_events;
3481                         break;
3482                 }
3483         }
3484
3485         event->hw.idx = -1;
3486         event->hw.last_tag = ~0ULL;
3487 }
3488
3489 static void uncore_pmu_event_read(struct perf_event *event)
3490 {
3491         struct intel_uncore_box *box = uncore_event_to_box(event);
3492         uncore_perf_event_update(box, event);
3493 }
3494
3495 /*
3496  * validation ensures the group can be loaded onto the
3497  * PMU if it was the only group available.
3498  */
3499 static int uncore_validate_group(struct intel_uncore_pmu *pmu,
3500                                 struct perf_event *event)
3501 {
3502         struct perf_event *leader = event->group_leader;
3503         struct intel_uncore_box *fake_box;
3504         int ret = -EINVAL, n;
3505
3506         fake_box = uncore_alloc_box(pmu->type, NUMA_NO_NODE);
3507         if (!fake_box)
3508                 return -ENOMEM;
3509
3510         fake_box->pmu = pmu;
3511         /*
3512          * the event is not yet connected with its
3513          * siblings therefore we must first collect
3514          * existing siblings, then add the new event
3515          * before we can simulate the scheduling
3516          */
3517         n = uncore_collect_events(fake_box, leader, true);
3518         if (n < 0)
3519                 goto out;
3520
3521         fake_box->n_events = n;
3522         n = uncore_collect_events(fake_box, event, false);
3523         if (n < 0)
3524                 goto out;
3525
3526         fake_box->n_events = n;
3527
3528         ret = uncore_assign_events(fake_box, NULL, n);
3529 out:
3530         kfree(fake_box);
3531         return ret;
3532 }
3533
3534 static int uncore_pmu_event_init(struct perf_event *event)
3535 {
3536         struct intel_uncore_pmu *pmu;
3537         struct intel_uncore_box *box;
3538         struct hw_perf_event *hwc = &event->hw;
3539         int ret;
3540
3541         if (event->attr.type != event->pmu->type)
3542                 return -ENOENT;
3543
3544         pmu = uncore_event_to_pmu(event);
3545         /* no device found for this pmu */
3546         if (pmu->func_id < 0)
3547                 return -ENOENT;
3548
3549         /*
3550          * Uncore PMU does measure at all privilege level all the time.
3551          * So it doesn't make sense to specify any exclude bits.
3552          */
3553         if (event->attr.exclude_user || event->attr.exclude_kernel ||
3554                         event->attr.exclude_hv || event->attr.exclude_idle)
3555                 return -EINVAL;
3556
3557         /* Sampling not supported yet */
3558         if (hwc->sample_period)
3559                 return -EINVAL;
3560
3561         /*
3562          * Place all uncore events for a particular physical package
3563          * onto a single cpu
3564          */
3565         if (event->cpu < 0)
3566                 return -EINVAL;
3567         box = uncore_pmu_to_box(pmu, event->cpu);
3568         if (!box || box->cpu < 0)
3569                 return -EINVAL;
3570         event->cpu = box->cpu;
3571
3572         event->hw.idx = -1;
3573         event->hw.last_tag = ~0ULL;
3574         event->hw.extra_reg.idx = EXTRA_REG_NONE;
3575         event->hw.branch_reg.idx = EXTRA_REG_NONE;
3576
3577         if (event->attr.config == UNCORE_FIXED_EVENT) {
3578                 /* no fixed counter */
3579                 if (!pmu->type->fixed_ctl)
3580                         return -EINVAL;
3581                 /*
3582                  * if there is only one fixed counter, only the first pmu
3583                  * can access the fixed counter
3584                  */
3585                 if (pmu->type->single_fixed && pmu->pmu_idx > 0)
3586                         return -EINVAL;
3587
3588                 /* fixed counters have event field hardcoded to zero */
3589                 hwc->config = 0ULL;
3590         } else {
3591                 hwc->config = event->attr.config & pmu->type->event_mask;
3592                 if (pmu->type->ops->hw_config) {
3593                         ret = pmu->type->ops->hw_config(box, event);
3594                         if (ret)
3595                                 return ret;
3596                 }
3597         }
3598
3599         if (event->group_leader != event)
3600                 ret = uncore_validate_group(pmu, event);
3601         else
3602                 ret = 0;
3603
3604         return ret;
3605 }
3606
3607 static ssize_t uncore_get_attr_cpumask(struct device *dev,
3608                                 struct device_attribute *attr, char *buf)
3609 {
3610         int n = cpulist_scnprintf(buf, PAGE_SIZE - 2, &uncore_cpu_mask);
3611
3612         buf[n++] = '\n';
3613         buf[n] = '\0';
3614         return n;
3615 }
3616
3617 static DEVICE_ATTR(cpumask, S_IRUGO, uncore_get_attr_cpumask, NULL);
3618
3619 static struct attribute *uncore_pmu_attrs[] = {
3620         &dev_attr_cpumask.attr,
3621         NULL,
3622 };
3623
3624 static struct attribute_group uncore_pmu_attr_group = {
3625         .attrs = uncore_pmu_attrs,
3626 };
3627
3628 static int __init uncore_pmu_register(struct intel_uncore_pmu *pmu)
3629 {
3630         int ret;
3631
3632         if (!pmu->type->pmu) {
3633                 pmu->pmu = (struct pmu) {
3634                         .attr_groups    = pmu->type->attr_groups,
3635                         .task_ctx_nr    = perf_invalid_context,
3636                         .event_init     = uncore_pmu_event_init,
3637                         .add            = uncore_pmu_event_add,
3638                         .del            = uncore_pmu_event_del,
3639                         .start          = uncore_pmu_event_start,
3640                         .stop           = uncore_pmu_event_stop,
3641                         .read           = uncore_pmu_event_read,
3642                 };
3643         } else {
3644                 pmu->pmu = *pmu->type->pmu;
3645                 pmu->pmu.attr_groups = pmu->type->attr_groups;
3646         }
3647
3648         if (pmu->type->num_boxes == 1) {
3649                 if (strlen(pmu->type->name) > 0)
3650                         sprintf(pmu->name, "uncore_%s", pmu->type->name);
3651                 else
3652                         sprintf(pmu->name, "uncore");
3653         } else {
3654                 sprintf(pmu->name, "uncore_%s_%d", pmu->type->name,
3655                         pmu->pmu_idx);
3656         }
3657
3658         ret = perf_pmu_register(&pmu->pmu, pmu->name, -1);
3659         return ret;
3660 }
3661
3662 static void __init uncore_type_exit(struct intel_uncore_type *type)
3663 {
3664         int i;
3665
3666         for (i = 0; i < type->num_boxes; i++)
3667                 free_percpu(type->pmus[i].box);
3668         kfree(type->pmus);
3669         type->pmus = NULL;
3670         kfree(type->events_group);
3671         type->events_group = NULL;
3672 }
3673
3674 static void __init uncore_types_exit(struct intel_uncore_type **types)
3675 {
3676         int i;
3677         for (i = 0; types[i]; i++)
3678                 uncore_type_exit(types[i]);
3679 }
3680
3681 static int __init uncore_type_init(struct intel_uncore_type *type)
3682 {
3683         struct intel_uncore_pmu *pmus;
3684         struct attribute_group *attr_group;
3685         struct attribute **attrs;
3686         int i, j;
3687
3688         pmus = kzalloc(sizeof(*pmus) * type->num_boxes, GFP_KERNEL);
3689         if (!pmus)
3690                 return -ENOMEM;
3691
3692         type->unconstrainted = (struct event_constraint)
3693                 __EVENT_CONSTRAINT(0, (1ULL << type->num_counters) - 1,
3694                                 0, type->num_counters, 0, 0);
3695
3696         for (i = 0; i < type->num_boxes; i++) {
3697                 pmus[i].func_id = -1;
3698                 pmus[i].pmu_idx = i;
3699                 pmus[i].type = type;
3700                 INIT_LIST_HEAD(&pmus[i].box_list);
3701                 pmus[i].box = alloc_percpu(struct intel_uncore_box *);
3702                 if (!pmus[i].box)
3703                         goto fail;
3704         }
3705
3706         if (type->event_descs) {
3707                 i = 0;
3708                 while (type->event_descs[i].attr.attr.name)
3709                         i++;
3710
3711                 attr_group = kzalloc(sizeof(struct attribute *) * (i + 1) +
3712                                         sizeof(*attr_group), GFP_KERNEL);
3713                 if (!attr_group)
3714                         goto fail;
3715
3716                 attrs = (struct attribute **)(attr_group + 1);
3717                 attr_group->name = "events";
3718                 attr_group->attrs = attrs;
3719
3720                 for (j = 0; j < i; j++)
3721                         attrs[j] = &type->event_descs[j].attr.attr;
3722
3723                 type->events_group = attr_group;
3724         }
3725
3726         type->pmu_group = &uncore_pmu_attr_group;
3727         type->pmus = pmus;
3728         return 0;
3729 fail:
3730         uncore_type_exit(type);
3731         return -ENOMEM;
3732 }
3733
3734 static int __init uncore_types_init(struct intel_uncore_type **types)
3735 {
3736         int i, ret;
3737
3738         for (i = 0; types[i]; i++) {
3739                 ret = uncore_type_init(types[i]);
3740                 if (ret)
3741                         goto fail;
3742         }
3743         return 0;
3744 fail:
3745         while (--i >= 0)
3746                 uncore_type_exit(types[i]);
3747         return ret;
3748 }
3749
3750 static struct pci_driver *uncore_pci_driver;
3751 static bool pcidrv_registered;
3752
3753 /*
3754  * add a pci uncore device
3755  */
3756 static int uncore_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
3757 {
3758         struct intel_uncore_pmu *pmu;
3759         struct intel_uncore_box *box;
3760         struct intel_uncore_type *type;
3761         int phys_id;
3762
3763         phys_id = pcibus_to_physid[pdev->bus->number];
3764         if (phys_id < 0)
3765                 return -ENODEV;
3766
3767         if (UNCORE_PCI_DEV_TYPE(id->driver_data) == UNCORE_EXTRA_PCI_DEV) {
3768                 extra_pci_dev[phys_id][UNCORE_PCI_DEV_IDX(id->driver_data)] = pdev;
3769                 pci_set_drvdata(pdev, NULL);
3770                 return 0;
3771         }
3772
3773         type = pci_uncores[UNCORE_PCI_DEV_TYPE(id->driver_data)];
3774         box = uncore_alloc_box(type, NUMA_NO_NODE);
3775         if (!box)
3776                 return -ENOMEM;
3777
3778         /*
3779          * for performance monitoring unit with multiple boxes,
3780          * each box has a different function id.
3781          */
3782         pmu = &type->pmus[UNCORE_PCI_DEV_IDX(id->driver_data)];
3783         if (pmu->func_id < 0)
3784                 pmu->func_id = pdev->devfn;
3785         else
3786                 WARN_ON_ONCE(pmu->func_id != pdev->devfn);
3787
3788         box->phys_id = phys_id;
3789         box->pci_dev = pdev;
3790         box->pmu = pmu;
3791         uncore_box_init(box);
3792         pci_set_drvdata(pdev, box);
3793
3794         raw_spin_lock(&uncore_box_lock);
3795         list_add_tail(&box->list, &pmu->box_list);
3796         raw_spin_unlock(&uncore_box_lock);
3797
3798         return 0;
3799 }
3800
3801 static void uncore_pci_remove(struct pci_dev *pdev)
3802 {
3803         struct intel_uncore_box *box = pci_get_drvdata(pdev);
3804         struct intel_uncore_pmu *pmu;
3805         int i, cpu, phys_id = pcibus_to_physid[pdev->bus->number];
3806
3807         box = pci_get_drvdata(pdev);
3808         if (!box) {
3809                 for (i = 0; i < UNCORE_EXTRA_PCI_DEV_MAX; i++) {
3810                         if (extra_pci_dev[phys_id][i] == pdev) {
3811                                 extra_pci_dev[phys_id][i] = NULL;
3812                                 break;
3813                         }
3814                 }
3815                 WARN_ON_ONCE(i >= UNCORE_EXTRA_PCI_DEV_MAX);
3816                 return;
3817         }
3818
3819         pmu = box->pmu;
3820         if (WARN_ON_ONCE(phys_id != box->phys_id))
3821                 return;
3822
3823         pci_set_drvdata(pdev, NULL);
3824
3825         raw_spin_lock(&uncore_box_lock);
3826         list_del(&box->list);
3827         raw_spin_unlock(&uncore_box_lock);
3828
3829         for_each_possible_cpu(cpu) {
3830                 if (*per_cpu_ptr(pmu->box, cpu) == box) {
3831                         *per_cpu_ptr(pmu->box, cpu) = NULL;
3832                         atomic_dec(&box->refcnt);
3833                 }
3834         }
3835
3836         WARN_ON_ONCE(atomic_read(&box->refcnt) != 1);
3837         kfree(box);
3838 }
3839
3840 static int __init uncore_pci_init(void)
3841 {
3842         int ret;
3843
3844         switch (boot_cpu_data.x86_model) {
3845         case 45: /* Sandy Bridge-EP */
3846                 ret = snbep_pci2phy_map_init(0x3ce0);
3847                 if (ret)
3848                         return ret;
3849                 pci_uncores = snbep_pci_uncores;
3850                 uncore_pci_driver = &snbep_uncore_pci_driver;
3851                 break;
3852         case 62: /* IvyTown */
3853                 ret = snbep_pci2phy_map_init(0x0e1e);
3854                 if (ret)
3855                         return ret;
3856                 pci_uncores = ivt_pci_uncores;
3857                 uncore_pci_driver = &ivt_uncore_pci_driver;
3858                 break;
3859         case 42: /* Sandy Bridge */
3860                 ret = snb_pci2phy_map_init(PCI_DEVICE_ID_INTEL_SNB_IMC);
3861                 if (ret)
3862                         return ret;
3863                 pci_uncores = snb_pci_uncores;
3864                 uncore_pci_driver = &snb_uncore_pci_driver;
3865                 break;
3866         case 58: /* Ivy Bridge */
3867                 ret = snb_pci2phy_map_init(PCI_DEVICE_ID_INTEL_IVB_IMC);
3868                 if (ret)
3869                         return ret;
3870                 pci_uncores = snb_pci_uncores;
3871                 uncore_pci_driver = &ivb_uncore_pci_driver;
3872                 break;
3873         case 60: /* Haswell */
3874         case 69: /* Haswell Celeron */
3875                 ret = snb_pci2phy_map_init(PCI_DEVICE_ID_INTEL_HSW_IMC);
3876                 if (ret)
3877                         return ret;
3878                 pci_uncores = snb_pci_uncores;
3879                 uncore_pci_driver = &hsw_uncore_pci_driver;
3880                 break;
3881         default:
3882                 return 0;
3883         }
3884
3885         ret = uncore_types_init(pci_uncores);
3886         if (ret)
3887                 return ret;
3888
3889         uncore_pci_driver->probe = uncore_pci_probe;
3890         uncore_pci_driver->remove = uncore_pci_remove;
3891
3892         ret = pci_register_driver(uncore_pci_driver);
3893         if (ret == 0)
3894                 pcidrv_registered = true;
3895         else
3896                 uncore_types_exit(pci_uncores);
3897
3898         return ret;
3899 }
3900
3901 static void __init uncore_pci_exit(void)
3902 {
3903         if (pcidrv_registered) {
3904                 pcidrv_registered = false;
3905                 pci_unregister_driver(uncore_pci_driver);
3906                 uncore_types_exit(pci_uncores);
3907         }
3908 }
3909
3910 /* CPU hot plug/unplug are serialized by cpu_add_remove_lock mutex */
3911 static LIST_HEAD(boxes_to_free);
3912
3913 static void uncore_kfree_boxes(void)
3914 {
3915         struct intel_uncore_box *box;
3916
3917         while (!list_empty(&boxes_to_free)) {
3918                 box = list_entry(boxes_to_free.next,
3919                                  struct intel_uncore_box, list);
3920                 list_del(&box->list);
3921                 kfree(box);
3922         }
3923 }
3924
3925 static void uncore_cpu_dying(int cpu)
3926 {
3927         struct intel_uncore_type *type;
3928         struct intel_uncore_pmu *pmu;
3929         struct intel_uncore_box *box;
3930         int i, j;
3931
3932         for (i = 0; msr_uncores[i]; i++) {
3933                 type = msr_uncores[i];
3934                 for (j = 0; j < type->num_boxes; j++) {
3935                         pmu = &type->pmus[j];
3936                         box = *per_cpu_ptr(pmu->box, cpu);
3937                         *per_cpu_ptr(pmu->box, cpu) = NULL;
3938                         if (box && atomic_dec_and_test(&box->refcnt))
3939                                 list_add(&box->list, &boxes_to_free);
3940                 }
3941         }
3942 }
3943
3944 static int uncore_cpu_starting(int cpu)
3945 {
3946         struct intel_uncore_type *type;
3947         struct intel_uncore_pmu *pmu;
3948         struct intel_uncore_box *box, *exist;
3949         int i, j, k, phys_id;
3950
3951         phys_id = topology_physical_package_id(cpu);
3952
3953         for (i = 0; msr_uncores[i]; i++) {
3954                 type = msr_uncores[i];
3955                 for (j = 0; j < type->num_boxes; j++) {
3956                         pmu = &type->pmus[j];
3957                         box = *per_cpu_ptr(pmu->box, cpu);
3958                         /* called by uncore_cpu_init? */
3959                         if (box && box->phys_id >= 0) {
3960                                 uncore_box_init(box);
3961                                 continue;
3962                         }
3963
3964                         for_each_online_cpu(k) {
3965                                 exist = *per_cpu_ptr(pmu->box, k);
3966                                 if (exist && exist->phys_id == phys_id) {
3967                                         atomic_inc(&exist->refcnt);
3968                                         *per_cpu_ptr(pmu->box, cpu) = exist;
3969                                         if (box) {
3970                                                 list_add(&box->list,
3971                                                          &boxes_to_free);
3972                                                 box = NULL;
3973                                         }
3974                                         break;
3975                                 }
3976                         }
3977
3978                         if (box) {
3979                                 box->phys_id = phys_id;
3980                                 uncore_box_init(box);
3981                         }
3982                 }
3983         }
3984         return 0;
3985 }
3986
3987 static int uncore_cpu_prepare(int cpu, int phys_id)
3988 {
3989         struct intel_uncore_type *type;
3990         struct intel_uncore_pmu *pmu;
3991         struct intel_uncore_box *box;
3992         int i, j;
3993
3994         for (i = 0; msr_uncores[i]; i++) {
3995                 type = msr_uncores[i];
3996                 for (j = 0; j < type->num_boxes; j++) {
3997                         pmu = &type->pmus[j];
3998                         if (pmu->func_id < 0)
3999                                 pmu->func_id = j;
4000
4001                         box = uncore_alloc_box(type, cpu_to_node(cpu));
4002                         if (!box)
4003                                 return -ENOMEM;
4004
4005                         box->pmu = pmu;
4006                         box->phys_id = phys_id;
4007                         *per_cpu_ptr(pmu->box, cpu) = box;
4008                 }
4009         }
4010         return 0;
4011 }
4012
4013 static void
4014 uncore_change_context(struct intel_uncore_type **uncores, int old_cpu, int new_cpu)
4015 {
4016         struct intel_uncore_type *type;
4017         struct intel_uncore_pmu *pmu;
4018         struct intel_uncore_box *box;
4019         int i, j;
4020
4021         for (i = 0; uncores[i]; i++) {
4022                 type = uncores[i];
4023                 for (j = 0; j < type->num_boxes; j++) {
4024                         pmu = &type->pmus[j];
4025                         if (old_cpu < 0)
4026                                 box = uncore_pmu_to_box(pmu, new_cpu);
4027                         else
4028                                 box = uncore_pmu_to_box(pmu, old_cpu);
4029                         if (!box)
4030                                 continue;
4031
4032                         if (old_cpu < 0) {
4033                                 WARN_ON_ONCE(box->cpu != -1);
4034                                 box->cpu = new_cpu;
4035                                 continue;
4036                         }
4037
4038                         WARN_ON_ONCE(box->cpu != old_cpu);
4039                         if (new_cpu >= 0) {
4040                                 uncore_pmu_cancel_hrtimer(box);
4041                                 perf_pmu_migrate_context(&pmu->pmu,
4042                                                 old_cpu, new_cpu);
4043                                 box->cpu = new_cpu;
4044                         } else {
4045                                 box->cpu = -1;
4046                         }
4047                 }
4048         }
4049 }
4050
4051 static void uncore_event_exit_cpu(int cpu)
4052 {
4053         int i, phys_id, target;
4054
4055         /* if exiting cpu is used for collecting uncore events */
4056         if (!cpumask_test_and_clear_cpu(cpu, &uncore_cpu_mask))
4057                 return;
4058
4059         /* find a new cpu to collect uncore events */
4060         phys_id = topology_physical_package_id(cpu);
4061         target = -1;
4062         for_each_online_cpu(i) {
4063                 if (i == cpu)
4064                         continue;
4065                 if (phys_id == topology_physical_package_id(i)) {
4066                         target = i;
4067                         break;
4068                 }
4069         }
4070
4071         /* migrate uncore events to the new cpu */
4072         if (target >= 0)
4073                 cpumask_set_cpu(target, &uncore_cpu_mask);
4074
4075         uncore_change_context(msr_uncores, cpu, target);
4076         uncore_change_context(pci_uncores, cpu, target);
4077 }
4078
4079 static void uncore_event_init_cpu(int cpu)
4080 {
4081         int i, phys_id;
4082
4083         phys_id = topology_physical_package_id(cpu);
4084         for_each_cpu(i, &uncore_cpu_mask) {
4085                 if (phys_id == topology_physical_package_id(i))
4086                         return;
4087         }
4088
4089         cpumask_set_cpu(cpu, &uncore_cpu_mask);
4090
4091         uncore_change_context(msr_uncores, -1, cpu);
4092         uncore_change_context(pci_uncores, -1, cpu);
4093 }
4094
4095 static int uncore_cpu_notifier(struct notifier_block *self,
4096                                unsigned long action, void *hcpu)
4097 {
4098         unsigned int cpu = (long)hcpu;
4099
4100         /* allocate/free data structure for uncore box */
4101         switch (action & ~CPU_TASKS_FROZEN) {
4102         case CPU_UP_PREPARE:
4103                 uncore_cpu_prepare(cpu, -1);
4104                 break;
4105         case CPU_STARTING:
4106                 uncore_cpu_starting(cpu);
4107                 break;
4108         case CPU_UP_CANCELED:
4109         case CPU_DYING:
4110                 uncore_cpu_dying(cpu);
4111                 break;
4112         case CPU_ONLINE:
4113         case CPU_DEAD:
4114                 uncore_kfree_boxes();
4115                 break;
4116         default:
4117                 break;
4118         }
4119
4120         /* select the cpu that collects uncore events */
4121         switch (action & ~CPU_TASKS_FROZEN) {
4122         case CPU_DOWN_FAILED:
4123         case CPU_STARTING:
4124                 uncore_event_init_cpu(cpu);
4125                 break;
4126         case CPU_DOWN_PREPARE:
4127                 uncore_event_exit_cpu(cpu);
4128                 break;
4129         default:
4130                 break;
4131         }
4132
4133         return NOTIFY_OK;
4134 }
4135
4136 static struct notifier_block uncore_cpu_nb = {
4137         .notifier_call  = uncore_cpu_notifier,
4138         /*
4139          * to migrate uncore events, our notifier should be executed
4140          * before perf core's notifier.
4141          */
4142         .priority       = CPU_PRI_PERF + 1,
4143 };
4144
4145 static void __init uncore_cpu_setup(void *dummy)
4146 {
4147         uncore_cpu_starting(smp_processor_id());
4148 }
4149
4150 static int __init uncore_cpu_init(void)
4151 {
4152         int ret, max_cores;
4153
4154         max_cores = boot_cpu_data.x86_max_cores;
4155         switch (boot_cpu_data.x86_model) {
4156         case 26: /* Nehalem */
4157         case 30:
4158         case 37: /* Westmere */
4159         case 44:
4160                 msr_uncores = nhm_msr_uncores;
4161                 break;
4162         case 42: /* Sandy Bridge */
4163         case 58: /* Ivy Bridge */
4164                 if (snb_uncore_cbox.num_boxes > max_cores)
4165                         snb_uncore_cbox.num_boxes = max_cores;
4166                 msr_uncores = snb_msr_uncores;
4167                 break;
4168         case 45: /* Sandy Bridge-EP */
4169                 if (snbep_uncore_cbox.num_boxes > max_cores)
4170                         snbep_uncore_cbox.num_boxes = max_cores;
4171                 msr_uncores = snbep_msr_uncores;
4172                 break;
4173         case 46: /* Nehalem-EX */
4174                 uncore_nhmex = true;
4175         case 47: /* Westmere-EX aka. Xeon E7 */
4176                 if (!uncore_nhmex)
4177                         nhmex_uncore_mbox.event_descs = wsmex_uncore_mbox_events;
4178                 if (nhmex_uncore_cbox.num_boxes > max_cores)
4179                         nhmex_uncore_cbox.num_boxes = max_cores;
4180                 msr_uncores = nhmex_msr_uncores;
4181                 break;
4182         case 62: /* IvyTown */
4183                 if (ivt_uncore_cbox.num_boxes > max_cores)
4184                         ivt_uncore_cbox.num_boxes = max_cores;
4185                 msr_uncores = ivt_msr_uncores;
4186                 break;
4187
4188         default:
4189                 return 0;
4190         }
4191
4192         ret = uncore_types_init(msr_uncores);
4193         if (ret)
4194                 return ret;
4195
4196         return 0;
4197 }
4198
4199 static int __init uncore_pmus_register(void)
4200 {
4201         struct intel_uncore_pmu *pmu;
4202         struct intel_uncore_type *type;
4203         int i, j;
4204
4205         for (i = 0; msr_uncores[i]; i++) {
4206                 type = msr_uncores[i];
4207                 for (j = 0; j < type->num_boxes; j++) {
4208                         pmu = &type->pmus[j];
4209                         uncore_pmu_register(pmu);
4210                 }
4211         }
4212
4213         for (i = 0; pci_uncores[i]; i++) {
4214                 type = pci_uncores[i];
4215                 for (j = 0; j < type->num_boxes; j++) {
4216                         pmu = &type->pmus[j];
4217                         uncore_pmu_register(pmu);
4218                 }
4219         }
4220
4221         return 0;
4222 }
4223
4224 static void uncore_cpumask_init(void)
4225 {
4226         int cpu;
4227
4228         /*
4229          * ony invoke once from msr or pci init code
4230          */
4231         if (!cpumask_empty(&uncore_cpu_mask))
4232                 return;
4233
4234         get_online_cpus();
4235
4236         for_each_online_cpu(cpu) {
4237                 int i, phys_id = topology_physical_package_id(cpu);
4238
4239                 for_each_cpu(i, &uncore_cpu_mask) {
4240                         if (phys_id == topology_physical_package_id(i)) {
4241                                 phys_id = -1;
4242                                 break;
4243                         }
4244                 }
4245                 if (phys_id < 0)
4246                         continue;
4247
4248                 uncore_cpu_prepare(cpu, phys_id);
4249                 uncore_event_init_cpu(cpu);
4250         }
4251         on_each_cpu(uncore_cpu_setup, NULL, 1);
4252
4253         register_cpu_notifier(&uncore_cpu_nb);
4254
4255         put_online_cpus();
4256 }
4257
4258
4259 static int __init intel_uncore_init(void)
4260 {
4261         int ret;
4262
4263         if (boot_cpu_data.x86_vendor != X86_VENDOR_INTEL)
4264                 return -ENODEV;
4265
4266         if (cpu_has_hypervisor)
4267                 return -ENODEV;
4268
4269         ret = uncore_pci_init();
4270         if (ret)
4271                 goto fail;
4272         ret = uncore_cpu_init();
4273         if (ret) {
4274                 uncore_pci_exit();
4275                 goto fail;
4276         }
4277         uncore_cpumask_init();
4278
4279         uncore_pmus_register();
4280         return 0;
4281 fail:
4282         return ret;
4283 }
4284 device_initcall(intel_uncore_init);