]> git.karo-electronics.de Git - linux-beck.git/blob - arch/x86/kernel/cpu/perf_event_intel_uncore.c
perf/x86/uncore: Fix compilation warning in snb_uncore_imc_init_box()
[linux-beck.git] / arch / x86 / kernel / cpu / perf_event_intel_uncore.c
1 #include "perf_event_intel_uncore.h"
2
3 static struct intel_uncore_type *empty_uncore[] = { NULL, };
4 static struct intel_uncore_type **msr_uncores = empty_uncore;
5 static struct intel_uncore_type **pci_uncores = empty_uncore;
6 /* pci bus to socket mapping */
7 static int pcibus_to_physid[256] = { [0 ... 255] = -1, };
8
9 static struct pci_dev *extra_pci_dev[UNCORE_SOCKET_MAX][UNCORE_EXTRA_PCI_DEV_MAX];
10
11 static DEFINE_RAW_SPINLOCK(uncore_box_lock);
12
13 /* mask of cpus that collect uncore events */
14 static cpumask_t uncore_cpu_mask;
15
16 /* constraint for the fixed counter */
17 static struct event_constraint constraint_fixed =
18         EVENT_CONSTRAINT(~0ULL, 1 << UNCORE_PMC_IDX_FIXED, ~0ULL);
19 static struct event_constraint constraint_empty =
20         EVENT_CONSTRAINT(0, 0, 0);
21
22 #define __BITS_VALUE(x, i, n)  ((typeof(x))(((x) >> ((i) * (n))) & \
23                                 ((1ULL << (n)) - 1)))
24
25 DEFINE_UNCORE_FORMAT_ATTR(event, event, "config:0-7");
26 DEFINE_UNCORE_FORMAT_ATTR(event_ext, event, "config:0-7,21");
27 DEFINE_UNCORE_FORMAT_ATTR(umask, umask, "config:8-15");
28 DEFINE_UNCORE_FORMAT_ATTR(edge, edge, "config:18");
29 DEFINE_UNCORE_FORMAT_ATTR(tid_en, tid_en, "config:19");
30 DEFINE_UNCORE_FORMAT_ATTR(inv, inv, "config:23");
31 DEFINE_UNCORE_FORMAT_ATTR(cmask5, cmask, "config:24-28");
32 DEFINE_UNCORE_FORMAT_ATTR(cmask8, cmask, "config:24-31");
33 DEFINE_UNCORE_FORMAT_ATTR(thresh8, thresh, "config:24-31");
34 DEFINE_UNCORE_FORMAT_ATTR(thresh5, thresh, "config:24-28");
35 DEFINE_UNCORE_FORMAT_ATTR(occ_sel, occ_sel, "config:14-15");
36 DEFINE_UNCORE_FORMAT_ATTR(occ_invert, occ_invert, "config:30");
37 DEFINE_UNCORE_FORMAT_ATTR(occ_edge, occ_edge, "config:14-51");
38 DEFINE_UNCORE_FORMAT_ATTR(filter_tid, filter_tid, "config1:0-4");
39 DEFINE_UNCORE_FORMAT_ATTR(filter_link, filter_link, "config1:5-8");
40 DEFINE_UNCORE_FORMAT_ATTR(filter_nid, filter_nid, "config1:10-17");
41 DEFINE_UNCORE_FORMAT_ATTR(filter_nid2, filter_nid, "config1:32-47");
42 DEFINE_UNCORE_FORMAT_ATTR(filter_state, filter_state, "config1:18-22");
43 DEFINE_UNCORE_FORMAT_ATTR(filter_state2, filter_state, "config1:17-22");
44 DEFINE_UNCORE_FORMAT_ATTR(filter_opc, filter_opc, "config1:23-31");
45 DEFINE_UNCORE_FORMAT_ATTR(filter_opc2, filter_opc, "config1:52-60");
46 DEFINE_UNCORE_FORMAT_ATTR(filter_band0, filter_band0, "config1:0-7");
47 DEFINE_UNCORE_FORMAT_ATTR(filter_band1, filter_band1, "config1:8-15");
48 DEFINE_UNCORE_FORMAT_ATTR(filter_band2, filter_band2, "config1:16-23");
49 DEFINE_UNCORE_FORMAT_ATTR(filter_band3, filter_band3, "config1:24-31");
50 DEFINE_UNCORE_FORMAT_ATTR(match_rds, match_rds, "config1:48-51");
51 DEFINE_UNCORE_FORMAT_ATTR(match_rnid30, match_rnid30, "config1:32-35");
52 DEFINE_UNCORE_FORMAT_ATTR(match_rnid4, match_rnid4, "config1:31");
53 DEFINE_UNCORE_FORMAT_ATTR(match_dnid, match_dnid, "config1:13-17");
54 DEFINE_UNCORE_FORMAT_ATTR(match_mc, match_mc, "config1:9-12");
55 DEFINE_UNCORE_FORMAT_ATTR(match_opc, match_opc, "config1:5-8");
56 DEFINE_UNCORE_FORMAT_ATTR(match_vnw, match_vnw, "config1:3-4");
57 DEFINE_UNCORE_FORMAT_ATTR(match0, match0, "config1:0-31");
58 DEFINE_UNCORE_FORMAT_ATTR(match1, match1, "config1:32-63");
59 DEFINE_UNCORE_FORMAT_ATTR(mask_rds, mask_rds, "config2:48-51");
60 DEFINE_UNCORE_FORMAT_ATTR(mask_rnid30, mask_rnid30, "config2:32-35");
61 DEFINE_UNCORE_FORMAT_ATTR(mask_rnid4, mask_rnid4, "config2:31");
62 DEFINE_UNCORE_FORMAT_ATTR(mask_dnid, mask_dnid, "config2:13-17");
63 DEFINE_UNCORE_FORMAT_ATTR(mask_mc, mask_mc, "config2:9-12");
64 DEFINE_UNCORE_FORMAT_ATTR(mask_opc, mask_opc, "config2:5-8");
65 DEFINE_UNCORE_FORMAT_ATTR(mask_vnw, mask_vnw, "config2:3-4");
66 DEFINE_UNCORE_FORMAT_ATTR(mask0, mask0, "config2:0-31");
67 DEFINE_UNCORE_FORMAT_ATTR(mask1, mask1, "config2:32-63");
68
69 static void uncore_pmu_start_hrtimer(struct intel_uncore_box *box);
70 static void uncore_pmu_cancel_hrtimer(struct intel_uncore_box *box);
71 static void uncore_perf_event_update(struct intel_uncore_box *box, struct perf_event *event);
72 static void uncore_pmu_event_read(struct perf_event *event);
73
74 static struct intel_uncore_pmu *uncore_event_to_pmu(struct perf_event *event)
75 {
76         return container_of(event->pmu, struct intel_uncore_pmu, pmu);
77 }
78
79 static struct intel_uncore_box *
80 uncore_pmu_to_box(struct intel_uncore_pmu *pmu, int cpu)
81 {
82         struct intel_uncore_box *box;
83
84         box = *per_cpu_ptr(pmu->box, cpu);
85         if (box)
86                 return box;
87
88         raw_spin_lock(&uncore_box_lock);
89         list_for_each_entry(box, &pmu->box_list, list) {
90                 if (box->phys_id == topology_physical_package_id(cpu)) {
91                         atomic_inc(&box->refcnt);
92                         *per_cpu_ptr(pmu->box, cpu) = box;
93                         break;
94                 }
95         }
96         raw_spin_unlock(&uncore_box_lock);
97
98         return *per_cpu_ptr(pmu->box, cpu);
99 }
100
101 static struct intel_uncore_box *uncore_event_to_box(struct perf_event *event)
102 {
103         /*
104          * perf core schedules event on the basis of cpu, uncore events are
105          * collected by one of the cpus inside a physical package.
106          */
107         return uncore_pmu_to_box(uncore_event_to_pmu(event), smp_processor_id());
108 }
109
110 static u64 uncore_msr_read_counter(struct intel_uncore_box *box, struct perf_event *event)
111 {
112         u64 count;
113
114         rdmsrl(event->hw.event_base, count);
115
116         return count;
117 }
118
119 /*
120  * generic get constraint function for shared match/mask registers.
121  */
122 static struct event_constraint *
123 uncore_get_constraint(struct intel_uncore_box *box, struct perf_event *event)
124 {
125         struct intel_uncore_extra_reg *er;
126         struct hw_perf_event_extra *reg1 = &event->hw.extra_reg;
127         struct hw_perf_event_extra *reg2 = &event->hw.branch_reg;
128         unsigned long flags;
129         bool ok = false;
130
131         /*
132          * reg->alloc can be set due to existing state, so for fake box we
133          * need to ignore this, otherwise we might fail to allocate proper
134          * fake state for this extra reg constraint.
135          */
136         if (reg1->idx == EXTRA_REG_NONE ||
137             (!uncore_box_is_fake(box) && reg1->alloc))
138                 return NULL;
139
140         er = &box->shared_regs[reg1->idx];
141         raw_spin_lock_irqsave(&er->lock, flags);
142         if (!atomic_read(&er->ref) ||
143             (er->config1 == reg1->config && er->config2 == reg2->config)) {
144                 atomic_inc(&er->ref);
145                 er->config1 = reg1->config;
146                 er->config2 = reg2->config;
147                 ok = true;
148         }
149         raw_spin_unlock_irqrestore(&er->lock, flags);
150
151         if (ok) {
152                 if (!uncore_box_is_fake(box))
153                         reg1->alloc = 1;
154                 return NULL;
155         }
156
157         return &constraint_empty;
158 }
159
160 static void uncore_put_constraint(struct intel_uncore_box *box, struct perf_event *event)
161 {
162         struct intel_uncore_extra_reg *er;
163         struct hw_perf_event_extra *reg1 = &event->hw.extra_reg;
164
165         /*
166          * Only put constraint if extra reg was actually allocated. Also
167          * takes care of event which do not use an extra shared reg.
168          *
169          * Also, if this is a fake box we shouldn't touch any event state
170          * (reg->alloc) and we don't care about leaving inconsistent box
171          * state either since it will be thrown out.
172          */
173         if (uncore_box_is_fake(box) || !reg1->alloc)
174                 return;
175
176         er = &box->shared_regs[reg1->idx];
177         atomic_dec(&er->ref);
178         reg1->alloc = 0;
179 }
180
181 static u64 uncore_shared_reg_config(struct intel_uncore_box *box, int idx)
182 {
183         struct intel_uncore_extra_reg *er;
184         unsigned long flags;
185         u64 config;
186
187         er = &box->shared_regs[idx];
188
189         raw_spin_lock_irqsave(&er->lock, flags);
190         config = er->config;
191         raw_spin_unlock_irqrestore(&er->lock, flags);
192
193         return config;
194 }
195
196 /* Sandy Bridge-EP uncore support */
197 static struct intel_uncore_type snbep_uncore_cbox;
198 static struct intel_uncore_type snbep_uncore_pcu;
199
200 static void snbep_uncore_pci_disable_box(struct intel_uncore_box *box)
201 {
202         struct pci_dev *pdev = box->pci_dev;
203         int box_ctl = uncore_pci_box_ctl(box);
204         u32 config = 0;
205
206         if (!pci_read_config_dword(pdev, box_ctl, &config)) {
207                 config |= SNBEP_PMON_BOX_CTL_FRZ;
208                 pci_write_config_dword(pdev, box_ctl, config);
209         }
210 }
211
212 static void snbep_uncore_pci_enable_box(struct intel_uncore_box *box)
213 {
214         struct pci_dev *pdev = box->pci_dev;
215         int box_ctl = uncore_pci_box_ctl(box);
216         u32 config = 0;
217
218         if (!pci_read_config_dword(pdev, box_ctl, &config)) {
219                 config &= ~SNBEP_PMON_BOX_CTL_FRZ;
220                 pci_write_config_dword(pdev, box_ctl, config);
221         }
222 }
223
224 static void snbep_uncore_pci_enable_event(struct intel_uncore_box *box, struct perf_event *event)
225 {
226         struct pci_dev *pdev = box->pci_dev;
227         struct hw_perf_event *hwc = &event->hw;
228
229         pci_write_config_dword(pdev, hwc->config_base, hwc->config | SNBEP_PMON_CTL_EN);
230 }
231
232 static void snbep_uncore_pci_disable_event(struct intel_uncore_box *box, struct perf_event *event)
233 {
234         struct pci_dev *pdev = box->pci_dev;
235         struct hw_perf_event *hwc = &event->hw;
236
237         pci_write_config_dword(pdev, hwc->config_base, hwc->config);
238 }
239
240 static u64 snbep_uncore_pci_read_counter(struct intel_uncore_box *box, struct perf_event *event)
241 {
242         struct pci_dev *pdev = box->pci_dev;
243         struct hw_perf_event *hwc = &event->hw;
244         u64 count = 0;
245
246         pci_read_config_dword(pdev, hwc->event_base, (u32 *)&count);
247         pci_read_config_dword(pdev, hwc->event_base + 4, (u32 *)&count + 1);
248
249         return count;
250 }
251
252 static void snbep_uncore_pci_init_box(struct intel_uncore_box *box)
253 {
254         struct pci_dev *pdev = box->pci_dev;
255
256         pci_write_config_dword(pdev, SNBEP_PCI_PMON_BOX_CTL, SNBEP_PMON_BOX_CTL_INT);
257 }
258
259 static void snbep_uncore_msr_disable_box(struct intel_uncore_box *box)
260 {
261         u64 config;
262         unsigned msr;
263
264         msr = uncore_msr_box_ctl(box);
265         if (msr) {
266                 rdmsrl(msr, config);
267                 config |= SNBEP_PMON_BOX_CTL_FRZ;
268                 wrmsrl(msr, config);
269         }
270 }
271
272 static void snbep_uncore_msr_enable_box(struct intel_uncore_box *box)
273 {
274         u64 config;
275         unsigned msr;
276
277         msr = uncore_msr_box_ctl(box);
278         if (msr) {
279                 rdmsrl(msr, config);
280                 config &= ~SNBEP_PMON_BOX_CTL_FRZ;
281                 wrmsrl(msr, config);
282         }
283 }
284
285 static void snbep_uncore_msr_enable_event(struct intel_uncore_box *box, struct perf_event *event)
286 {
287         struct hw_perf_event *hwc = &event->hw;
288         struct hw_perf_event_extra *reg1 = &hwc->extra_reg;
289
290         if (reg1->idx != EXTRA_REG_NONE)
291                 wrmsrl(reg1->reg, uncore_shared_reg_config(box, 0));
292
293         wrmsrl(hwc->config_base, hwc->config | SNBEP_PMON_CTL_EN);
294 }
295
296 static void snbep_uncore_msr_disable_event(struct intel_uncore_box *box,
297                                         struct perf_event *event)
298 {
299         struct hw_perf_event *hwc = &event->hw;
300
301         wrmsrl(hwc->config_base, hwc->config);
302 }
303
304 static void snbep_uncore_msr_init_box(struct intel_uncore_box *box)
305 {
306         unsigned msr = uncore_msr_box_ctl(box);
307
308         if (msr)
309                 wrmsrl(msr, SNBEP_PMON_BOX_CTL_INT);
310 }
311
312 static struct attribute *snbep_uncore_formats_attr[] = {
313         &format_attr_event.attr,
314         &format_attr_umask.attr,
315         &format_attr_edge.attr,
316         &format_attr_inv.attr,
317         &format_attr_thresh8.attr,
318         NULL,
319 };
320
321 static struct attribute *snbep_uncore_ubox_formats_attr[] = {
322         &format_attr_event.attr,
323         &format_attr_umask.attr,
324         &format_attr_edge.attr,
325         &format_attr_inv.attr,
326         &format_attr_thresh5.attr,
327         NULL,
328 };
329
330 static struct attribute *snbep_uncore_cbox_formats_attr[] = {
331         &format_attr_event.attr,
332         &format_attr_umask.attr,
333         &format_attr_edge.attr,
334         &format_attr_tid_en.attr,
335         &format_attr_inv.attr,
336         &format_attr_thresh8.attr,
337         &format_attr_filter_tid.attr,
338         &format_attr_filter_nid.attr,
339         &format_attr_filter_state.attr,
340         &format_attr_filter_opc.attr,
341         NULL,
342 };
343
344 static struct attribute *snbep_uncore_pcu_formats_attr[] = {
345         &format_attr_event_ext.attr,
346         &format_attr_occ_sel.attr,
347         &format_attr_edge.attr,
348         &format_attr_inv.attr,
349         &format_attr_thresh5.attr,
350         &format_attr_occ_invert.attr,
351         &format_attr_occ_edge.attr,
352         &format_attr_filter_band0.attr,
353         &format_attr_filter_band1.attr,
354         &format_attr_filter_band2.attr,
355         &format_attr_filter_band3.attr,
356         NULL,
357 };
358
359 static struct attribute *snbep_uncore_qpi_formats_attr[] = {
360         &format_attr_event_ext.attr,
361         &format_attr_umask.attr,
362         &format_attr_edge.attr,
363         &format_attr_inv.attr,
364         &format_attr_thresh8.attr,
365         &format_attr_match_rds.attr,
366         &format_attr_match_rnid30.attr,
367         &format_attr_match_rnid4.attr,
368         &format_attr_match_dnid.attr,
369         &format_attr_match_mc.attr,
370         &format_attr_match_opc.attr,
371         &format_attr_match_vnw.attr,
372         &format_attr_match0.attr,
373         &format_attr_match1.attr,
374         &format_attr_mask_rds.attr,
375         &format_attr_mask_rnid30.attr,
376         &format_attr_mask_rnid4.attr,
377         &format_attr_mask_dnid.attr,
378         &format_attr_mask_mc.attr,
379         &format_attr_mask_opc.attr,
380         &format_attr_mask_vnw.attr,
381         &format_attr_mask0.attr,
382         &format_attr_mask1.attr,
383         NULL,
384 };
385
386 static struct uncore_event_desc snbep_uncore_imc_events[] = {
387         INTEL_UNCORE_EVENT_DESC(clockticks,      "event=0xff,umask=0x00"),
388         INTEL_UNCORE_EVENT_DESC(cas_count_read,  "event=0x04,umask=0x03"),
389         INTEL_UNCORE_EVENT_DESC(cas_count_write, "event=0x04,umask=0x0c"),
390         { /* end: all zeroes */ },
391 };
392
393 static struct uncore_event_desc snbep_uncore_qpi_events[] = {
394         INTEL_UNCORE_EVENT_DESC(clockticks,       "event=0x14"),
395         INTEL_UNCORE_EVENT_DESC(txl_flits_active, "event=0x00,umask=0x06"),
396         INTEL_UNCORE_EVENT_DESC(drs_data,         "event=0x102,umask=0x08"),
397         INTEL_UNCORE_EVENT_DESC(ncb_data,         "event=0x103,umask=0x04"),
398         { /* end: all zeroes */ },
399 };
400
401 static struct attribute_group snbep_uncore_format_group = {
402         .name = "format",
403         .attrs = snbep_uncore_formats_attr,
404 };
405
406 static struct attribute_group snbep_uncore_ubox_format_group = {
407         .name = "format",
408         .attrs = snbep_uncore_ubox_formats_attr,
409 };
410
411 static struct attribute_group snbep_uncore_cbox_format_group = {
412         .name = "format",
413         .attrs = snbep_uncore_cbox_formats_attr,
414 };
415
416 static struct attribute_group snbep_uncore_pcu_format_group = {
417         .name = "format",
418         .attrs = snbep_uncore_pcu_formats_attr,
419 };
420
421 static struct attribute_group snbep_uncore_qpi_format_group = {
422         .name = "format",
423         .attrs = snbep_uncore_qpi_formats_attr,
424 };
425
426 #define SNBEP_UNCORE_MSR_OPS_COMMON_INIT()                      \
427         .init_box       = snbep_uncore_msr_init_box,            \
428         .disable_box    = snbep_uncore_msr_disable_box,         \
429         .enable_box     = snbep_uncore_msr_enable_box,          \
430         .disable_event  = snbep_uncore_msr_disable_event,       \
431         .enable_event   = snbep_uncore_msr_enable_event,        \
432         .read_counter   = uncore_msr_read_counter
433
434 static struct intel_uncore_ops snbep_uncore_msr_ops = {
435         SNBEP_UNCORE_MSR_OPS_COMMON_INIT(),
436 };
437
438 #define SNBEP_UNCORE_PCI_OPS_COMMON_INIT()                      \
439         .init_box       = snbep_uncore_pci_init_box,            \
440         .disable_box    = snbep_uncore_pci_disable_box,         \
441         .enable_box     = snbep_uncore_pci_enable_box,          \
442         .disable_event  = snbep_uncore_pci_disable_event,       \
443         .read_counter   = snbep_uncore_pci_read_counter
444
445 static struct intel_uncore_ops snbep_uncore_pci_ops = {
446         SNBEP_UNCORE_PCI_OPS_COMMON_INIT(),
447         .enable_event   = snbep_uncore_pci_enable_event,        \
448 };
449
450 static struct event_constraint snbep_uncore_cbox_constraints[] = {
451         UNCORE_EVENT_CONSTRAINT(0x01, 0x1),
452         UNCORE_EVENT_CONSTRAINT(0x02, 0x3),
453         UNCORE_EVENT_CONSTRAINT(0x04, 0x3),
454         UNCORE_EVENT_CONSTRAINT(0x05, 0x3),
455         UNCORE_EVENT_CONSTRAINT(0x07, 0x3),
456         UNCORE_EVENT_CONSTRAINT(0x09, 0x3),
457         UNCORE_EVENT_CONSTRAINT(0x11, 0x1),
458         UNCORE_EVENT_CONSTRAINT(0x12, 0x3),
459         UNCORE_EVENT_CONSTRAINT(0x13, 0x3),
460         UNCORE_EVENT_CONSTRAINT(0x1b, 0xc),
461         UNCORE_EVENT_CONSTRAINT(0x1c, 0xc),
462         UNCORE_EVENT_CONSTRAINT(0x1d, 0xc),
463         UNCORE_EVENT_CONSTRAINT(0x1e, 0xc),
464         EVENT_CONSTRAINT_OVERLAP(0x1f, 0xe, 0xff),
465         UNCORE_EVENT_CONSTRAINT(0x21, 0x3),
466         UNCORE_EVENT_CONSTRAINT(0x23, 0x3),
467         UNCORE_EVENT_CONSTRAINT(0x31, 0x3),
468         UNCORE_EVENT_CONSTRAINT(0x32, 0x3),
469         UNCORE_EVENT_CONSTRAINT(0x33, 0x3),
470         UNCORE_EVENT_CONSTRAINT(0x34, 0x3),
471         UNCORE_EVENT_CONSTRAINT(0x35, 0x3),
472         UNCORE_EVENT_CONSTRAINT(0x36, 0x1),
473         UNCORE_EVENT_CONSTRAINT(0x37, 0x3),
474         UNCORE_EVENT_CONSTRAINT(0x38, 0x3),
475         UNCORE_EVENT_CONSTRAINT(0x39, 0x3),
476         UNCORE_EVENT_CONSTRAINT(0x3b, 0x1),
477         EVENT_CONSTRAINT_END
478 };
479
480 static struct event_constraint snbep_uncore_r2pcie_constraints[] = {
481         UNCORE_EVENT_CONSTRAINT(0x10, 0x3),
482         UNCORE_EVENT_CONSTRAINT(0x11, 0x3),
483         UNCORE_EVENT_CONSTRAINT(0x12, 0x1),
484         UNCORE_EVENT_CONSTRAINT(0x23, 0x3),
485         UNCORE_EVENT_CONSTRAINT(0x24, 0x3),
486         UNCORE_EVENT_CONSTRAINT(0x25, 0x3),
487         UNCORE_EVENT_CONSTRAINT(0x26, 0x3),
488         UNCORE_EVENT_CONSTRAINT(0x32, 0x3),
489         UNCORE_EVENT_CONSTRAINT(0x33, 0x3),
490         UNCORE_EVENT_CONSTRAINT(0x34, 0x3),
491         EVENT_CONSTRAINT_END
492 };
493
494 static struct event_constraint snbep_uncore_r3qpi_constraints[] = {
495         UNCORE_EVENT_CONSTRAINT(0x10, 0x3),
496         UNCORE_EVENT_CONSTRAINT(0x11, 0x3),
497         UNCORE_EVENT_CONSTRAINT(0x12, 0x3),
498         UNCORE_EVENT_CONSTRAINT(0x13, 0x1),
499         UNCORE_EVENT_CONSTRAINT(0x20, 0x3),
500         UNCORE_EVENT_CONSTRAINT(0x21, 0x3),
501         UNCORE_EVENT_CONSTRAINT(0x22, 0x3),
502         UNCORE_EVENT_CONSTRAINT(0x23, 0x3),
503         UNCORE_EVENT_CONSTRAINT(0x24, 0x3),
504         UNCORE_EVENT_CONSTRAINT(0x25, 0x3),
505         UNCORE_EVENT_CONSTRAINT(0x26, 0x3),
506         UNCORE_EVENT_CONSTRAINT(0x28, 0x3),
507         UNCORE_EVENT_CONSTRAINT(0x29, 0x3),
508         UNCORE_EVENT_CONSTRAINT(0x2a, 0x3),
509         UNCORE_EVENT_CONSTRAINT(0x2b, 0x3),
510         UNCORE_EVENT_CONSTRAINT(0x2c, 0x3),
511         UNCORE_EVENT_CONSTRAINT(0x2d, 0x3),
512         UNCORE_EVENT_CONSTRAINT(0x2e, 0x3),
513         UNCORE_EVENT_CONSTRAINT(0x2f, 0x3),
514         UNCORE_EVENT_CONSTRAINT(0x30, 0x3),
515         UNCORE_EVENT_CONSTRAINT(0x31, 0x3),
516         UNCORE_EVENT_CONSTRAINT(0x32, 0x3),
517         UNCORE_EVENT_CONSTRAINT(0x33, 0x3),
518         UNCORE_EVENT_CONSTRAINT(0x34, 0x3),
519         UNCORE_EVENT_CONSTRAINT(0x36, 0x3),
520         UNCORE_EVENT_CONSTRAINT(0x37, 0x3),
521         UNCORE_EVENT_CONSTRAINT(0x38, 0x3),
522         UNCORE_EVENT_CONSTRAINT(0x39, 0x3),
523         EVENT_CONSTRAINT_END
524 };
525
526 static struct intel_uncore_type snbep_uncore_ubox = {
527         .name           = "ubox",
528         .num_counters   = 2,
529         .num_boxes      = 1,
530         .perf_ctr_bits  = 44,
531         .fixed_ctr_bits = 48,
532         .perf_ctr       = SNBEP_U_MSR_PMON_CTR0,
533         .event_ctl      = SNBEP_U_MSR_PMON_CTL0,
534         .event_mask     = SNBEP_U_MSR_PMON_RAW_EVENT_MASK,
535         .fixed_ctr      = SNBEP_U_MSR_PMON_UCLK_FIXED_CTR,
536         .fixed_ctl      = SNBEP_U_MSR_PMON_UCLK_FIXED_CTL,
537         .ops            = &snbep_uncore_msr_ops,
538         .format_group   = &snbep_uncore_ubox_format_group,
539 };
540
541 static struct extra_reg snbep_uncore_cbox_extra_regs[] = {
542         SNBEP_CBO_EVENT_EXTRA_REG(SNBEP_CBO_PMON_CTL_TID_EN,
543                                   SNBEP_CBO_PMON_CTL_TID_EN, 0x1),
544         SNBEP_CBO_EVENT_EXTRA_REG(0x0334, 0xffff, 0x4),
545         SNBEP_CBO_EVENT_EXTRA_REG(0x4334, 0xffff, 0x6),
546         SNBEP_CBO_EVENT_EXTRA_REG(0x0534, 0xffff, 0x4),
547         SNBEP_CBO_EVENT_EXTRA_REG(0x4534, 0xffff, 0x6),
548         SNBEP_CBO_EVENT_EXTRA_REG(0x0934, 0xffff, 0x4),
549         SNBEP_CBO_EVENT_EXTRA_REG(0x4934, 0xffff, 0x6),
550         SNBEP_CBO_EVENT_EXTRA_REG(0x4134, 0xffff, 0x6),
551         SNBEP_CBO_EVENT_EXTRA_REG(0x0135, 0xffff, 0x8),
552         SNBEP_CBO_EVENT_EXTRA_REG(0x0335, 0xffff, 0x8),
553         SNBEP_CBO_EVENT_EXTRA_REG(0x4135, 0xffff, 0xc),
554         SNBEP_CBO_EVENT_EXTRA_REG(0x4335, 0xffff, 0xc),
555         SNBEP_CBO_EVENT_EXTRA_REG(0x4435, 0xffff, 0x2),
556         SNBEP_CBO_EVENT_EXTRA_REG(0x4835, 0xffff, 0x2),
557         SNBEP_CBO_EVENT_EXTRA_REG(0x4a35, 0xffff, 0x2),
558         SNBEP_CBO_EVENT_EXTRA_REG(0x5035, 0xffff, 0x2),
559         SNBEP_CBO_EVENT_EXTRA_REG(0x0136, 0xffff, 0x8),
560         SNBEP_CBO_EVENT_EXTRA_REG(0x0336, 0xffff, 0x8),
561         SNBEP_CBO_EVENT_EXTRA_REG(0x4136, 0xffff, 0xc),
562         SNBEP_CBO_EVENT_EXTRA_REG(0x4336, 0xffff, 0xc),
563         SNBEP_CBO_EVENT_EXTRA_REG(0x4436, 0xffff, 0x2),
564         SNBEP_CBO_EVENT_EXTRA_REG(0x4836, 0xffff, 0x2),
565         SNBEP_CBO_EVENT_EXTRA_REG(0x4a36, 0xffff, 0x2),
566         SNBEP_CBO_EVENT_EXTRA_REG(0x4037, 0x40ff, 0x2),
567         EVENT_EXTRA_END
568 };
569
570 static void snbep_cbox_put_constraint(struct intel_uncore_box *box, struct perf_event *event)
571 {
572         struct hw_perf_event_extra *reg1 = &event->hw.extra_reg;
573         struct intel_uncore_extra_reg *er = &box->shared_regs[0];
574         int i;
575
576         if (uncore_box_is_fake(box))
577                 return;
578
579         for (i = 0; i < 5; i++) {
580                 if (reg1->alloc & (0x1 << i))
581                         atomic_sub(1 << (i * 6), &er->ref);
582         }
583         reg1->alloc = 0;
584 }
585
586 static struct event_constraint *
587 __snbep_cbox_get_constraint(struct intel_uncore_box *box, struct perf_event *event,
588                             u64 (*cbox_filter_mask)(int fields))
589 {
590         struct hw_perf_event_extra *reg1 = &event->hw.extra_reg;
591         struct intel_uncore_extra_reg *er = &box->shared_regs[0];
592         int i, alloc = 0;
593         unsigned long flags;
594         u64 mask;
595
596         if (reg1->idx == EXTRA_REG_NONE)
597                 return NULL;
598
599         raw_spin_lock_irqsave(&er->lock, flags);
600         for (i = 0; i < 5; i++) {
601                 if (!(reg1->idx & (0x1 << i)))
602                         continue;
603                 if (!uncore_box_is_fake(box) && (reg1->alloc & (0x1 << i)))
604                         continue;
605
606                 mask = cbox_filter_mask(0x1 << i);
607                 if (!__BITS_VALUE(atomic_read(&er->ref), i, 6) ||
608                     !((reg1->config ^ er->config) & mask)) {
609                         atomic_add(1 << (i * 6), &er->ref);
610                         er->config &= ~mask;
611                         er->config |= reg1->config & mask;
612                         alloc |= (0x1 << i);
613                 } else {
614                         break;
615                 }
616         }
617         raw_spin_unlock_irqrestore(&er->lock, flags);
618         if (i < 5)
619                 goto fail;
620
621         if (!uncore_box_is_fake(box))
622                 reg1->alloc |= alloc;
623
624         return NULL;
625 fail:
626         for (; i >= 0; i--) {
627                 if (alloc & (0x1 << i))
628                         atomic_sub(1 << (i * 6), &er->ref);
629         }
630         return &constraint_empty;
631 }
632
633 static u64 snbep_cbox_filter_mask(int fields)
634 {
635         u64 mask = 0;
636
637         if (fields & 0x1)
638                 mask |= SNBEP_CB0_MSR_PMON_BOX_FILTER_TID;
639         if (fields & 0x2)
640                 mask |= SNBEP_CB0_MSR_PMON_BOX_FILTER_NID;
641         if (fields & 0x4)
642                 mask |= SNBEP_CB0_MSR_PMON_BOX_FILTER_STATE;
643         if (fields & 0x8)
644                 mask |= SNBEP_CB0_MSR_PMON_BOX_FILTER_OPC;
645
646         return mask;
647 }
648
649 static struct event_constraint *
650 snbep_cbox_get_constraint(struct intel_uncore_box *box, struct perf_event *event)
651 {
652         return __snbep_cbox_get_constraint(box, event, snbep_cbox_filter_mask);
653 }
654
655 static int snbep_cbox_hw_config(struct intel_uncore_box *box, struct perf_event *event)
656 {
657         struct hw_perf_event_extra *reg1 = &event->hw.extra_reg;
658         struct extra_reg *er;
659         int idx = 0;
660
661         for (er = snbep_uncore_cbox_extra_regs; er->msr; er++) {
662                 if (er->event != (event->hw.config & er->config_mask))
663                         continue;
664                 idx |= er->idx;
665         }
666
667         if (idx) {
668                 reg1->reg = SNBEP_C0_MSR_PMON_BOX_FILTER +
669                         SNBEP_CBO_MSR_OFFSET * box->pmu->pmu_idx;
670                 reg1->config = event->attr.config1 & snbep_cbox_filter_mask(idx);
671                 reg1->idx = idx;
672         }
673         return 0;
674 }
675
676 static struct intel_uncore_ops snbep_uncore_cbox_ops = {
677         SNBEP_UNCORE_MSR_OPS_COMMON_INIT(),
678         .hw_config              = snbep_cbox_hw_config,
679         .get_constraint         = snbep_cbox_get_constraint,
680         .put_constraint         = snbep_cbox_put_constraint,
681 };
682
683 static struct intel_uncore_type snbep_uncore_cbox = {
684         .name                   = "cbox",
685         .num_counters           = 4,
686         .num_boxes              = 8,
687         .perf_ctr_bits          = 44,
688         .event_ctl              = SNBEP_C0_MSR_PMON_CTL0,
689         .perf_ctr               = SNBEP_C0_MSR_PMON_CTR0,
690         .event_mask             = SNBEP_CBO_MSR_PMON_RAW_EVENT_MASK,
691         .box_ctl                = SNBEP_C0_MSR_PMON_BOX_CTL,
692         .msr_offset             = SNBEP_CBO_MSR_OFFSET,
693         .num_shared_regs        = 1,
694         .constraints            = snbep_uncore_cbox_constraints,
695         .ops                    = &snbep_uncore_cbox_ops,
696         .format_group           = &snbep_uncore_cbox_format_group,
697 };
698
699 static u64 snbep_pcu_alter_er(struct perf_event *event, int new_idx, bool modify)
700 {
701         struct hw_perf_event *hwc = &event->hw;
702         struct hw_perf_event_extra *reg1 = &hwc->extra_reg;
703         u64 config = reg1->config;
704
705         if (new_idx > reg1->idx)
706                 config <<= 8 * (new_idx - reg1->idx);
707         else
708                 config >>= 8 * (reg1->idx - new_idx);
709
710         if (modify) {
711                 hwc->config += new_idx - reg1->idx;
712                 reg1->config = config;
713                 reg1->idx = new_idx;
714         }
715         return config;
716 }
717
718 static struct event_constraint *
719 snbep_pcu_get_constraint(struct intel_uncore_box *box, struct perf_event *event)
720 {
721         struct hw_perf_event_extra *reg1 = &event->hw.extra_reg;
722         struct intel_uncore_extra_reg *er = &box->shared_regs[0];
723         unsigned long flags;
724         int idx = reg1->idx;
725         u64 mask, config1 = reg1->config;
726         bool ok = false;
727
728         if (reg1->idx == EXTRA_REG_NONE ||
729             (!uncore_box_is_fake(box) && reg1->alloc))
730                 return NULL;
731 again:
732         mask = 0xffULL << (idx * 8);
733         raw_spin_lock_irqsave(&er->lock, flags);
734         if (!__BITS_VALUE(atomic_read(&er->ref), idx, 8) ||
735             !((config1 ^ er->config) & mask)) {
736                 atomic_add(1 << (idx * 8), &er->ref);
737                 er->config &= ~mask;
738                 er->config |= config1 & mask;
739                 ok = true;
740         }
741         raw_spin_unlock_irqrestore(&er->lock, flags);
742
743         if (!ok) {
744                 idx = (idx + 1) % 4;
745                 if (idx != reg1->idx) {
746                         config1 = snbep_pcu_alter_er(event, idx, false);
747                         goto again;
748                 }
749                 return &constraint_empty;
750         }
751
752         if (!uncore_box_is_fake(box)) {
753                 if (idx != reg1->idx)
754                         snbep_pcu_alter_er(event, idx, true);
755                 reg1->alloc = 1;
756         }
757         return NULL;
758 }
759
760 static void snbep_pcu_put_constraint(struct intel_uncore_box *box, struct perf_event *event)
761 {
762         struct hw_perf_event_extra *reg1 = &event->hw.extra_reg;
763         struct intel_uncore_extra_reg *er = &box->shared_regs[0];
764
765         if (uncore_box_is_fake(box) || !reg1->alloc)
766                 return;
767
768         atomic_sub(1 << (reg1->idx * 8), &er->ref);
769         reg1->alloc = 0;
770 }
771
772 static int snbep_pcu_hw_config(struct intel_uncore_box *box, struct perf_event *event)
773 {
774         struct hw_perf_event *hwc = &event->hw;
775         struct hw_perf_event_extra *reg1 = &hwc->extra_reg;
776         int ev_sel = hwc->config & SNBEP_PMON_CTL_EV_SEL_MASK;
777
778         if (ev_sel >= 0xb && ev_sel <= 0xe) {
779                 reg1->reg = SNBEP_PCU_MSR_PMON_BOX_FILTER;
780                 reg1->idx = ev_sel - 0xb;
781                 reg1->config = event->attr.config1 & (0xff << reg1->idx);
782         }
783         return 0;
784 }
785
786 static struct intel_uncore_ops snbep_uncore_pcu_ops = {
787         SNBEP_UNCORE_MSR_OPS_COMMON_INIT(),
788         .hw_config              = snbep_pcu_hw_config,
789         .get_constraint         = snbep_pcu_get_constraint,
790         .put_constraint         = snbep_pcu_put_constraint,
791 };
792
793 static struct intel_uncore_type snbep_uncore_pcu = {
794         .name                   = "pcu",
795         .num_counters           = 4,
796         .num_boxes              = 1,
797         .perf_ctr_bits          = 48,
798         .perf_ctr               = SNBEP_PCU_MSR_PMON_CTR0,
799         .event_ctl              = SNBEP_PCU_MSR_PMON_CTL0,
800         .event_mask             = SNBEP_PCU_MSR_PMON_RAW_EVENT_MASK,
801         .box_ctl                = SNBEP_PCU_MSR_PMON_BOX_CTL,
802         .num_shared_regs        = 1,
803         .ops                    = &snbep_uncore_pcu_ops,
804         .format_group           = &snbep_uncore_pcu_format_group,
805 };
806
807 static struct intel_uncore_type *snbep_msr_uncores[] = {
808         &snbep_uncore_ubox,
809         &snbep_uncore_cbox,
810         &snbep_uncore_pcu,
811         NULL,
812 };
813
814 enum {
815         SNBEP_PCI_QPI_PORT0_FILTER,
816         SNBEP_PCI_QPI_PORT1_FILTER,
817 };
818
819 static int snbep_qpi_hw_config(struct intel_uncore_box *box, struct perf_event *event)
820 {
821         struct hw_perf_event *hwc = &event->hw;
822         struct hw_perf_event_extra *reg1 = &hwc->extra_reg;
823         struct hw_perf_event_extra *reg2 = &hwc->branch_reg;
824
825         if ((hwc->config & SNBEP_PMON_CTL_EV_SEL_MASK) == 0x38) {
826                 reg1->idx = 0;
827                 reg1->reg = SNBEP_Q_Py_PCI_PMON_PKT_MATCH0;
828                 reg1->config = event->attr.config1;
829                 reg2->reg = SNBEP_Q_Py_PCI_PMON_PKT_MASK0;
830                 reg2->config = event->attr.config2;
831         }
832         return 0;
833 }
834
835 static void snbep_qpi_enable_event(struct intel_uncore_box *box, struct perf_event *event)
836 {
837         struct pci_dev *pdev = box->pci_dev;
838         struct hw_perf_event *hwc = &event->hw;
839         struct hw_perf_event_extra *reg1 = &hwc->extra_reg;
840         struct hw_perf_event_extra *reg2 = &hwc->branch_reg;
841
842         if (reg1->idx != EXTRA_REG_NONE) {
843                 int idx = box->pmu->pmu_idx + SNBEP_PCI_QPI_PORT0_FILTER;
844                 struct pci_dev *filter_pdev = extra_pci_dev[box->phys_id][idx];
845                 WARN_ON_ONCE(!filter_pdev);
846                 if (filter_pdev) {
847                         pci_write_config_dword(filter_pdev, reg1->reg,
848                                                 (u32)reg1->config);
849                         pci_write_config_dword(filter_pdev, reg1->reg + 4,
850                                                 (u32)(reg1->config >> 32));
851                         pci_write_config_dword(filter_pdev, reg2->reg,
852                                                 (u32)reg2->config);
853                         pci_write_config_dword(filter_pdev, reg2->reg + 4,
854                                                 (u32)(reg2->config >> 32));
855                 }
856         }
857
858         pci_write_config_dword(pdev, hwc->config_base, hwc->config | SNBEP_PMON_CTL_EN);
859 }
860
861 static struct intel_uncore_ops snbep_uncore_qpi_ops = {
862         SNBEP_UNCORE_PCI_OPS_COMMON_INIT(),
863         .enable_event           = snbep_qpi_enable_event,
864         .hw_config              = snbep_qpi_hw_config,
865         .get_constraint         = uncore_get_constraint,
866         .put_constraint         = uncore_put_constraint,
867 };
868
869 #define SNBEP_UNCORE_PCI_COMMON_INIT()                          \
870         .perf_ctr       = SNBEP_PCI_PMON_CTR0,                  \
871         .event_ctl      = SNBEP_PCI_PMON_CTL0,                  \
872         .event_mask     = SNBEP_PMON_RAW_EVENT_MASK,            \
873         .box_ctl        = SNBEP_PCI_PMON_BOX_CTL,               \
874         .ops            = &snbep_uncore_pci_ops,                \
875         .format_group   = &snbep_uncore_format_group
876
877 static struct intel_uncore_type snbep_uncore_ha = {
878         .name           = "ha",
879         .num_counters   = 4,
880         .num_boxes      = 1,
881         .perf_ctr_bits  = 48,
882         SNBEP_UNCORE_PCI_COMMON_INIT(),
883 };
884
885 static struct intel_uncore_type snbep_uncore_imc = {
886         .name           = "imc",
887         .num_counters   = 4,
888         .num_boxes      = 4,
889         .perf_ctr_bits  = 48,
890         .fixed_ctr_bits = 48,
891         .fixed_ctr      = SNBEP_MC_CHy_PCI_PMON_FIXED_CTR,
892         .fixed_ctl      = SNBEP_MC_CHy_PCI_PMON_FIXED_CTL,
893         .event_descs    = snbep_uncore_imc_events,
894         SNBEP_UNCORE_PCI_COMMON_INIT(),
895 };
896
897 static struct intel_uncore_type snbep_uncore_qpi = {
898         .name                   = "qpi",
899         .num_counters           = 4,
900         .num_boxes              = 2,
901         .perf_ctr_bits          = 48,
902         .perf_ctr               = SNBEP_PCI_PMON_CTR0,
903         .event_ctl              = SNBEP_PCI_PMON_CTL0,
904         .event_mask             = SNBEP_QPI_PCI_PMON_RAW_EVENT_MASK,
905         .box_ctl                = SNBEP_PCI_PMON_BOX_CTL,
906         .num_shared_regs        = 1,
907         .ops                    = &snbep_uncore_qpi_ops,
908         .event_descs            = snbep_uncore_qpi_events,
909         .format_group           = &snbep_uncore_qpi_format_group,
910 };
911
912
913 static struct intel_uncore_type snbep_uncore_r2pcie = {
914         .name           = "r2pcie",
915         .num_counters   = 4,
916         .num_boxes      = 1,
917         .perf_ctr_bits  = 44,
918         .constraints    = snbep_uncore_r2pcie_constraints,
919         SNBEP_UNCORE_PCI_COMMON_INIT(),
920 };
921
922 static struct intel_uncore_type snbep_uncore_r3qpi = {
923         .name           = "r3qpi",
924         .num_counters   = 3,
925         .num_boxes      = 2,
926         .perf_ctr_bits  = 44,
927         .constraints    = snbep_uncore_r3qpi_constraints,
928         SNBEP_UNCORE_PCI_COMMON_INIT(),
929 };
930
931 enum {
932         SNBEP_PCI_UNCORE_HA,
933         SNBEP_PCI_UNCORE_IMC,
934         SNBEP_PCI_UNCORE_QPI,
935         SNBEP_PCI_UNCORE_R2PCIE,
936         SNBEP_PCI_UNCORE_R3QPI,
937 };
938
939 static struct intel_uncore_type *snbep_pci_uncores[] = {
940         [SNBEP_PCI_UNCORE_HA]           = &snbep_uncore_ha,
941         [SNBEP_PCI_UNCORE_IMC]          = &snbep_uncore_imc,
942         [SNBEP_PCI_UNCORE_QPI]          = &snbep_uncore_qpi,
943         [SNBEP_PCI_UNCORE_R2PCIE]       = &snbep_uncore_r2pcie,
944         [SNBEP_PCI_UNCORE_R3QPI]        = &snbep_uncore_r3qpi,
945         NULL,
946 };
947
948 static DEFINE_PCI_DEVICE_TABLE(snbep_uncore_pci_ids) = {
949         { /* Home Agent */
950                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_UNC_HA),
951                 .driver_data = UNCORE_PCI_DEV_DATA(SNBEP_PCI_UNCORE_HA, 0),
952         },
953         { /* MC Channel 0 */
954                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_UNC_IMC0),
955                 .driver_data = UNCORE_PCI_DEV_DATA(SNBEP_PCI_UNCORE_IMC, 0),
956         },
957         { /* MC Channel 1 */
958                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_UNC_IMC1),
959                 .driver_data = UNCORE_PCI_DEV_DATA(SNBEP_PCI_UNCORE_IMC, 1),
960         },
961         { /* MC Channel 2 */
962                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_UNC_IMC2),
963                 .driver_data = UNCORE_PCI_DEV_DATA(SNBEP_PCI_UNCORE_IMC, 2),
964         },
965         { /* MC Channel 3 */
966                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_UNC_IMC3),
967                 .driver_data = UNCORE_PCI_DEV_DATA(SNBEP_PCI_UNCORE_IMC, 3),
968         },
969         { /* QPI Port 0 */
970                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_UNC_QPI0),
971                 .driver_data = UNCORE_PCI_DEV_DATA(SNBEP_PCI_UNCORE_QPI, 0),
972         },
973         { /* QPI Port 1 */
974                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_UNC_QPI1),
975                 .driver_data = UNCORE_PCI_DEV_DATA(SNBEP_PCI_UNCORE_QPI, 1),
976         },
977         { /* R2PCIe */
978                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_UNC_R2PCIE),
979                 .driver_data = UNCORE_PCI_DEV_DATA(SNBEP_PCI_UNCORE_R2PCIE, 0),
980         },
981         { /* R3QPI Link 0 */
982                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_UNC_R3QPI0),
983                 .driver_data = UNCORE_PCI_DEV_DATA(SNBEP_PCI_UNCORE_R3QPI, 0),
984         },
985         { /* R3QPI Link 1 */
986                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_UNC_R3QPI1),
987                 .driver_data = UNCORE_PCI_DEV_DATA(SNBEP_PCI_UNCORE_R3QPI, 1),
988         },
989         { /* QPI Port 0 filter  */
990                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x3c86),
991                 .driver_data = UNCORE_PCI_DEV_DATA(UNCORE_EXTRA_PCI_DEV,
992                                                    SNBEP_PCI_QPI_PORT0_FILTER),
993         },
994         { /* QPI Port 0 filter  */
995                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x3c96),
996                 .driver_data = UNCORE_PCI_DEV_DATA(UNCORE_EXTRA_PCI_DEV,
997                                                    SNBEP_PCI_QPI_PORT1_FILTER),
998         },
999         { /* end: all zeroes */ }
1000 };
1001
1002 static struct pci_driver snbep_uncore_pci_driver = {
1003         .name           = "snbep_uncore",
1004         .id_table       = snbep_uncore_pci_ids,
1005 };
1006
1007 /*
1008  * build pci bus to socket mapping
1009  */
1010 static int snbep_pci2phy_map_init(int devid)
1011 {
1012         struct pci_dev *ubox_dev = NULL;
1013         int i, bus, nodeid;
1014         int err = 0;
1015         u32 config = 0;
1016
1017         while (1) {
1018                 /* find the UBOX device */
1019                 ubox_dev = pci_get_device(PCI_VENDOR_ID_INTEL, devid, ubox_dev);
1020                 if (!ubox_dev)
1021                         break;
1022                 bus = ubox_dev->bus->number;
1023                 /* get the Node ID of the local register */
1024                 err = pci_read_config_dword(ubox_dev, 0x40, &config);
1025                 if (err)
1026                         break;
1027                 nodeid = config;
1028                 /* get the Node ID mapping */
1029                 err = pci_read_config_dword(ubox_dev, 0x54, &config);
1030                 if (err)
1031                         break;
1032                 /*
1033                  * every three bits in the Node ID mapping register maps
1034                  * to a particular node.
1035                  */
1036                 for (i = 0; i < 8; i++) {
1037                         if (nodeid == ((config >> (3 * i)) & 0x7)) {
1038                                 pcibus_to_physid[bus] = i;
1039                                 break;
1040                         }
1041                 }
1042         }
1043
1044         if (!err) {
1045                 /*
1046                  * For PCI bus with no UBOX device, find the next bus
1047                  * that has UBOX device and use its mapping.
1048                  */
1049                 i = -1;
1050                 for (bus = 255; bus >= 0; bus--) {
1051                         if (pcibus_to_physid[bus] >= 0)
1052                                 i = pcibus_to_physid[bus];
1053                         else
1054                                 pcibus_to_physid[bus] = i;
1055                 }
1056         }
1057
1058         if (ubox_dev)
1059                 pci_dev_put(ubox_dev);
1060
1061         return err ? pcibios_err_to_errno(err) : 0;
1062 }
1063 /* end of Sandy Bridge-EP uncore support */
1064
1065 /* IvyTown uncore support */
1066 static void ivt_uncore_msr_init_box(struct intel_uncore_box *box)
1067 {
1068         unsigned msr = uncore_msr_box_ctl(box);
1069         if (msr)
1070                 wrmsrl(msr, IVT_PMON_BOX_CTL_INT);
1071 }
1072
1073 static void ivt_uncore_pci_init_box(struct intel_uncore_box *box)
1074 {
1075         struct pci_dev *pdev = box->pci_dev;
1076
1077         pci_write_config_dword(pdev, SNBEP_PCI_PMON_BOX_CTL, IVT_PMON_BOX_CTL_INT);
1078 }
1079
1080 #define IVT_UNCORE_MSR_OPS_COMMON_INIT()                        \
1081         .init_box       = ivt_uncore_msr_init_box,              \
1082         .disable_box    = snbep_uncore_msr_disable_box,         \
1083         .enable_box     = snbep_uncore_msr_enable_box,          \
1084         .disable_event  = snbep_uncore_msr_disable_event,       \
1085         .enable_event   = snbep_uncore_msr_enable_event,        \
1086         .read_counter   = uncore_msr_read_counter
1087
1088 static struct intel_uncore_ops ivt_uncore_msr_ops = {
1089         IVT_UNCORE_MSR_OPS_COMMON_INIT(),
1090 };
1091
1092 static struct intel_uncore_ops ivt_uncore_pci_ops = {
1093         .init_box       = ivt_uncore_pci_init_box,
1094         .disable_box    = snbep_uncore_pci_disable_box,
1095         .enable_box     = snbep_uncore_pci_enable_box,
1096         .disable_event  = snbep_uncore_pci_disable_event,
1097         .enable_event   = snbep_uncore_pci_enable_event,
1098         .read_counter   = snbep_uncore_pci_read_counter,
1099 };
1100
1101 #define IVT_UNCORE_PCI_COMMON_INIT()                            \
1102         .perf_ctr       = SNBEP_PCI_PMON_CTR0,                  \
1103         .event_ctl      = SNBEP_PCI_PMON_CTL0,                  \
1104         .event_mask     = IVT_PMON_RAW_EVENT_MASK,              \
1105         .box_ctl        = SNBEP_PCI_PMON_BOX_CTL,               \
1106         .ops            = &ivt_uncore_pci_ops,                  \
1107         .format_group   = &ivt_uncore_format_group
1108
1109 static struct attribute *ivt_uncore_formats_attr[] = {
1110         &format_attr_event.attr,
1111         &format_attr_umask.attr,
1112         &format_attr_edge.attr,
1113         &format_attr_inv.attr,
1114         &format_attr_thresh8.attr,
1115         NULL,
1116 };
1117
1118 static struct attribute *ivt_uncore_ubox_formats_attr[] = {
1119         &format_attr_event.attr,
1120         &format_attr_umask.attr,
1121         &format_attr_edge.attr,
1122         &format_attr_inv.attr,
1123         &format_attr_thresh5.attr,
1124         NULL,
1125 };
1126
1127 static struct attribute *ivt_uncore_cbox_formats_attr[] = {
1128         &format_attr_event.attr,
1129         &format_attr_umask.attr,
1130         &format_attr_edge.attr,
1131         &format_attr_tid_en.attr,
1132         &format_attr_thresh8.attr,
1133         &format_attr_filter_tid.attr,
1134         &format_attr_filter_link.attr,
1135         &format_attr_filter_state2.attr,
1136         &format_attr_filter_nid2.attr,
1137         &format_attr_filter_opc2.attr,
1138         NULL,
1139 };
1140
1141 static struct attribute *ivt_uncore_pcu_formats_attr[] = {
1142         &format_attr_event_ext.attr,
1143         &format_attr_occ_sel.attr,
1144         &format_attr_edge.attr,
1145         &format_attr_thresh5.attr,
1146         &format_attr_occ_invert.attr,
1147         &format_attr_occ_edge.attr,
1148         &format_attr_filter_band0.attr,
1149         &format_attr_filter_band1.attr,
1150         &format_attr_filter_band2.attr,
1151         &format_attr_filter_band3.attr,
1152         NULL,
1153 };
1154
1155 static struct attribute *ivt_uncore_qpi_formats_attr[] = {
1156         &format_attr_event_ext.attr,
1157         &format_attr_umask.attr,
1158         &format_attr_edge.attr,
1159         &format_attr_thresh8.attr,
1160         &format_attr_match_rds.attr,
1161         &format_attr_match_rnid30.attr,
1162         &format_attr_match_rnid4.attr,
1163         &format_attr_match_dnid.attr,
1164         &format_attr_match_mc.attr,
1165         &format_attr_match_opc.attr,
1166         &format_attr_match_vnw.attr,
1167         &format_attr_match0.attr,
1168         &format_attr_match1.attr,
1169         &format_attr_mask_rds.attr,
1170         &format_attr_mask_rnid30.attr,
1171         &format_attr_mask_rnid4.attr,
1172         &format_attr_mask_dnid.attr,
1173         &format_attr_mask_mc.attr,
1174         &format_attr_mask_opc.attr,
1175         &format_attr_mask_vnw.attr,
1176         &format_attr_mask0.attr,
1177         &format_attr_mask1.attr,
1178         NULL,
1179 };
1180
1181 static struct attribute_group ivt_uncore_format_group = {
1182         .name = "format",
1183         .attrs = ivt_uncore_formats_attr,
1184 };
1185
1186 static struct attribute_group ivt_uncore_ubox_format_group = {
1187         .name = "format",
1188         .attrs = ivt_uncore_ubox_formats_attr,
1189 };
1190
1191 static struct attribute_group ivt_uncore_cbox_format_group = {
1192         .name = "format",
1193         .attrs = ivt_uncore_cbox_formats_attr,
1194 };
1195
1196 static struct attribute_group ivt_uncore_pcu_format_group = {
1197         .name = "format",
1198         .attrs = ivt_uncore_pcu_formats_attr,
1199 };
1200
1201 static struct attribute_group ivt_uncore_qpi_format_group = {
1202         .name = "format",
1203         .attrs = ivt_uncore_qpi_formats_attr,
1204 };
1205
1206 static struct intel_uncore_type ivt_uncore_ubox = {
1207         .name           = "ubox",
1208         .num_counters   = 2,
1209         .num_boxes      = 1,
1210         .perf_ctr_bits  = 44,
1211         .fixed_ctr_bits = 48,
1212         .perf_ctr       = SNBEP_U_MSR_PMON_CTR0,
1213         .event_ctl      = SNBEP_U_MSR_PMON_CTL0,
1214         .event_mask     = IVT_U_MSR_PMON_RAW_EVENT_MASK,
1215         .fixed_ctr      = SNBEP_U_MSR_PMON_UCLK_FIXED_CTR,
1216         .fixed_ctl      = SNBEP_U_MSR_PMON_UCLK_FIXED_CTL,
1217         .ops            = &ivt_uncore_msr_ops,
1218         .format_group   = &ivt_uncore_ubox_format_group,
1219 };
1220
1221 static struct extra_reg ivt_uncore_cbox_extra_regs[] = {
1222         SNBEP_CBO_EVENT_EXTRA_REG(SNBEP_CBO_PMON_CTL_TID_EN,
1223                                   SNBEP_CBO_PMON_CTL_TID_EN, 0x1),
1224         SNBEP_CBO_EVENT_EXTRA_REG(0x1031, 0x10ff, 0x2),
1225         SNBEP_CBO_EVENT_EXTRA_REG(0x1134, 0xffff, 0x4),
1226         SNBEP_CBO_EVENT_EXTRA_REG(0x4134, 0xffff, 0xc),
1227         SNBEP_CBO_EVENT_EXTRA_REG(0x5134, 0xffff, 0xc),
1228         SNBEP_CBO_EVENT_EXTRA_REG(0x0334, 0xffff, 0x4),
1229         SNBEP_CBO_EVENT_EXTRA_REG(0x4334, 0xffff, 0xc),
1230         SNBEP_CBO_EVENT_EXTRA_REG(0x0534, 0xffff, 0x4),
1231         SNBEP_CBO_EVENT_EXTRA_REG(0x4534, 0xffff, 0xc),
1232         SNBEP_CBO_EVENT_EXTRA_REG(0x0934, 0xffff, 0x4),
1233         SNBEP_CBO_EVENT_EXTRA_REG(0x4934, 0xffff, 0xc),
1234         SNBEP_CBO_EVENT_EXTRA_REG(0x0135, 0xffff, 0x10),
1235         SNBEP_CBO_EVENT_EXTRA_REG(0x0335, 0xffff, 0x10),
1236         SNBEP_CBO_EVENT_EXTRA_REG(0x2135, 0xffff, 0x10),
1237         SNBEP_CBO_EVENT_EXTRA_REG(0x2335, 0xffff, 0x10),
1238         SNBEP_CBO_EVENT_EXTRA_REG(0x4135, 0xffff, 0x18),
1239         SNBEP_CBO_EVENT_EXTRA_REG(0x4335, 0xffff, 0x18),
1240         SNBEP_CBO_EVENT_EXTRA_REG(0x4435, 0xffff, 0x8),
1241         SNBEP_CBO_EVENT_EXTRA_REG(0x4835, 0xffff, 0x8),
1242         SNBEP_CBO_EVENT_EXTRA_REG(0x4a35, 0xffff, 0x8),
1243         SNBEP_CBO_EVENT_EXTRA_REG(0x5035, 0xffff, 0x8),
1244         SNBEP_CBO_EVENT_EXTRA_REG(0x8135, 0xffff, 0x10),
1245         SNBEP_CBO_EVENT_EXTRA_REG(0x8335, 0xffff, 0x10),
1246         SNBEP_CBO_EVENT_EXTRA_REG(0x0136, 0xffff, 0x10),
1247         SNBEP_CBO_EVENT_EXTRA_REG(0x0336, 0xffff, 0x10),
1248         SNBEP_CBO_EVENT_EXTRA_REG(0x2336, 0xffff, 0x10),
1249         SNBEP_CBO_EVENT_EXTRA_REG(0x2336, 0xffff, 0x10),
1250         SNBEP_CBO_EVENT_EXTRA_REG(0x4136, 0xffff, 0x18),
1251         SNBEP_CBO_EVENT_EXTRA_REG(0x4336, 0xffff, 0x18),
1252         SNBEP_CBO_EVENT_EXTRA_REG(0x4436, 0xffff, 0x8),
1253         SNBEP_CBO_EVENT_EXTRA_REG(0x4836, 0xffff, 0x8),
1254         SNBEP_CBO_EVENT_EXTRA_REG(0x4a36, 0xffff, 0x8),
1255         SNBEP_CBO_EVENT_EXTRA_REG(0x5036, 0xffff, 0x8),
1256         SNBEP_CBO_EVENT_EXTRA_REG(0x8136, 0xffff, 0x10),
1257         SNBEP_CBO_EVENT_EXTRA_REG(0x8336, 0xffff, 0x10),
1258         SNBEP_CBO_EVENT_EXTRA_REG(0x4037, 0x40ff, 0x8),
1259         EVENT_EXTRA_END
1260 };
1261
1262 static u64 ivt_cbox_filter_mask(int fields)
1263 {
1264         u64 mask = 0;
1265
1266         if (fields & 0x1)
1267                 mask |= IVT_CB0_MSR_PMON_BOX_FILTER_TID;
1268         if (fields & 0x2)
1269                 mask |= IVT_CB0_MSR_PMON_BOX_FILTER_LINK;
1270         if (fields & 0x4)
1271                 mask |= IVT_CB0_MSR_PMON_BOX_FILTER_STATE;
1272         if (fields & 0x8)
1273                 mask |= IVT_CB0_MSR_PMON_BOX_FILTER_NID;
1274         if (fields & 0x10)
1275                 mask |= IVT_CB0_MSR_PMON_BOX_FILTER_OPC;
1276
1277         return mask;
1278 }
1279
1280 static struct event_constraint *
1281 ivt_cbox_get_constraint(struct intel_uncore_box *box, struct perf_event *event)
1282 {
1283         return __snbep_cbox_get_constraint(box, event, ivt_cbox_filter_mask);
1284 }
1285
1286 static int ivt_cbox_hw_config(struct intel_uncore_box *box, struct perf_event *event)
1287 {
1288         struct hw_perf_event_extra *reg1 = &event->hw.extra_reg;
1289         struct extra_reg *er;
1290         int idx = 0;
1291
1292         for (er = ivt_uncore_cbox_extra_regs; er->msr; er++) {
1293                 if (er->event != (event->hw.config & er->config_mask))
1294                         continue;
1295                 idx |= er->idx;
1296         }
1297
1298         if (idx) {
1299                 reg1->reg = SNBEP_C0_MSR_PMON_BOX_FILTER +
1300                         SNBEP_CBO_MSR_OFFSET * box->pmu->pmu_idx;
1301                 reg1->config = event->attr.config1 & ivt_cbox_filter_mask(idx);
1302                 reg1->idx = idx;
1303         }
1304         return 0;
1305 }
1306
1307 static void ivt_cbox_enable_event(struct intel_uncore_box *box, struct perf_event *event)
1308 {
1309         struct hw_perf_event *hwc = &event->hw;
1310         struct hw_perf_event_extra *reg1 = &hwc->extra_reg;
1311
1312         if (reg1->idx != EXTRA_REG_NONE) {
1313                 u64 filter = uncore_shared_reg_config(box, 0);
1314                 wrmsrl(reg1->reg, filter & 0xffffffff);
1315                 wrmsrl(reg1->reg + 6, filter >> 32);
1316         }
1317
1318         wrmsrl(hwc->config_base, hwc->config | SNBEP_PMON_CTL_EN);
1319 }
1320
1321 static struct intel_uncore_ops ivt_uncore_cbox_ops = {
1322         .init_box               = ivt_uncore_msr_init_box,
1323         .disable_box            = snbep_uncore_msr_disable_box,
1324         .enable_box             = snbep_uncore_msr_enable_box,
1325         .disable_event          = snbep_uncore_msr_disable_event,
1326         .enable_event           = ivt_cbox_enable_event,
1327         .read_counter           = uncore_msr_read_counter,
1328         .hw_config              = ivt_cbox_hw_config,
1329         .get_constraint         = ivt_cbox_get_constraint,
1330         .put_constraint         = snbep_cbox_put_constraint,
1331 };
1332
1333 static struct intel_uncore_type ivt_uncore_cbox = {
1334         .name                   = "cbox",
1335         .num_counters           = 4,
1336         .num_boxes              = 15,
1337         .perf_ctr_bits          = 44,
1338         .event_ctl              = SNBEP_C0_MSR_PMON_CTL0,
1339         .perf_ctr               = SNBEP_C0_MSR_PMON_CTR0,
1340         .event_mask             = IVT_CBO_MSR_PMON_RAW_EVENT_MASK,
1341         .box_ctl                = SNBEP_C0_MSR_PMON_BOX_CTL,
1342         .msr_offset             = SNBEP_CBO_MSR_OFFSET,
1343         .num_shared_regs        = 1,
1344         .constraints            = snbep_uncore_cbox_constraints,
1345         .ops                    = &ivt_uncore_cbox_ops,
1346         .format_group           = &ivt_uncore_cbox_format_group,
1347 };
1348
1349 static struct intel_uncore_ops ivt_uncore_pcu_ops = {
1350         IVT_UNCORE_MSR_OPS_COMMON_INIT(),
1351         .hw_config              = snbep_pcu_hw_config,
1352         .get_constraint         = snbep_pcu_get_constraint,
1353         .put_constraint         = snbep_pcu_put_constraint,
1354 };
1355
1356 static struct intel_uncore_type ivt_uncore_pcu = {
1357         .name                   = "pcu",
1358         .num_counters           = 4,
1359         .num_boxes              = 1,
1360         .perf_ctr_bits          = 48,
1361         .perf_ctr               = SNBEP_PCU_MSR_PMON_CTR0,
1362         .event_ctl              = SNBEP_PCU_MSR_PMON_CTL0,
1363         .event_mask             = IVT_PCU_MSR_PMON_RAW_EVENT_MASK,
1364         .box_ctl                = SNBEP_PCU_MSR_PMON_BOX_CTL,
1365         .num_shared_regs        = 1,
1366         .ops                    = &ivt_uncore_pcu_ops,
1367         .format_group           = &ivt_uncore_pcu_format_group,
1368 };
1369
1370 static struct intel_uncore_type *ivt_msr_uncores[] = {
1371         &ivt_uncore_ubox,
1372         &ivt_uncore_cbox,
1373         &ivt_uncore_pcu,
1374         NULL,
1375 };
1376
1377 static struct intel_uncore_type ivt_uncore_ha = {
1378         .name           = "ha",
1379         .num_counters   = 4,
1380         .num_boxes      = 2,
1381         .perf_ctr_bits  = 48,
1382         IVT_UNCORE_PCI_COMMON_INIT(),
1383 };
1384
1385 static struct intel_uncore_type ivt_uncore_imc = {
1386         .name           = "imc",
1387         .num_counters   = 4,
1388         .num_boxes      = 8,
1389         .perf_ctr_bits  = 48,
1390         .fixed_ctr_bits = 48,
1391         .fixed_ctr      = SNBEP_MC_CHy_PCI_PMON_FIXED_CTR,
1392         .fixed_ctl      = SNBEP_MC_CHy_PCI_PMON_FIXED_CTL,
1393         IVT_UNCORE_PCI_COMMON_INIT(),
1394 };
1395
1396 /* registers in IRP boxes are not properly aligned */
1397 static unsigned ivt_uncore_irp_ctls[] = {0xd8, 0xdc, 0xe0, 0xe4};
1398 static unsigned ivt_uncore_irp_ctrs[] = {0xa0, 0xb0, 0xb8, 0xc0};
1399
1400 static void ivt_uncore_irp_enable_event(struct intel_uncore_box *box, struct perf_event *event)
1401 {
1402         struct pci_dev *pdev = box->pci_dev;
1403         struct hw_perf_event *hwc = &event->hw;
1404
1405         pci_write_config_dword(pdev, ivt_uncore_irp_ctls[hwc->idx],
1406                                hwc->config | SNBEP_PMON_CTL_EN);
1407 }
1408
1409 static void ivt_uncore_irp_disable_event(struct intel_uncore_box *box, struct perf_event *event)
1410 {
1411         struct pci_dev *pdev = box->pci_dev;
1412         struct hw_perf_event *hwc = &event->hw;
1413
1414         pci_write_config_dword(pdev, ivt_uncore_irp_ctls[hwc->idx], hwc->config);
1415 }
1416
1417 static u64 ivt_uncore_irp_read_counter(struct intel_uncore_box *box, struct perf_event *event)
1418 {
1419         struct pci_dev *pdev = box->pci_dev;
1420         struct hw_perf_event *hwc = &event->hw;
1421         u64 count = 0;
1422
1423         pci_read_config_dword(pdev, ivt_uncore_irp_ctrs[hwc->idx], (u32 *)&count);
1424         pci_read_config_dword(pdev, ivt_uncore_irp_ctrs[hwc->idx] + 4, (u32 *)&count + 1);
1425
1426         return count;
1427 }
1428
1429 static struct intel_uncore_ops ivt_uncore_irp_ops = {
1430         .init_box       = ivt_uncore_pci_init_box,
1431         .disable_box    = snbep_uncore_pci_disable_box,
1432         .enable_box     = snbep_uncore_pci_enable_box,
1433         .disable_event  = ivt_uncore_irp_disable_event,
1434         .enable_event   = ivt_uncore_irp_enable_event,
1435         .read_counter   = ivt_uncore_irp_read_counter,
1436 };
1437
1438 static struct intel_uncore_type ivt_uncore_irp = {
1439         .name                   = "irp",
1440         .num_counters           = 4,
1441         .num_boxes              = 1,
1442         .perf_ctr_bits          = 48,
1443         .event_mask             = IVT_PMON_RAW_EVENT_MASK,
1444         .box_ctl                = SNBEP_PCI_PMON_BOX_CTL,
1445         .ops                    = &ivt_uncore_irp_ops,
1446         .format_group           = &ivt_uncore_format_group,
1447 };
1448
1449 static struct intel_uncore_ops ivt_uncore_qpi_ops = {
1450         .init_box       = ivt_uncore_pci_init_box,
1451         .disable_box    = snbep_uncore_pci_disable_box,
1452         .enable_box     = snbep_uncore_pci_enable_box,
1453         .disable_event  = snbep_uncore_pci_disable_event,
1454         .enable_event   = snbep_qpi_enable_event,
1455         .read_counter   = snbep_uncore_pci_read_counter,
1456         .hw_config      = snbep_qpi_hw_config,
1457         .get_constraint = uncore_get_constraint,
1458         .put_constraint = uncore_put_constraint,
1459 };
1460
1461 static struct intel_uncore_type ivt_uncore_qpi = {
1462         .name                   = "qpi",
1463         .num_counters           = 4,
1464         .num_boxes              = 3,
1465         .perf_ctr_bits          = 48,
1466         .perf_ctr               = SNBEP_PCI_PMON_CTR0,
1467         .event_ctl              = SNBEP_PCI_PMON_CTL0,
1468         .event_mask             = IVT_QPI_PCI_PMON_RAW_EVENT_MASK,
1469         .box_ctl                = SNBEP_PCI_PMON_BOX_CTL,
1470         .num_shared_regs        = 1,
1471         .ops                    = &ivt_uncore_qpi_ops,
1472         .format_group           = &ivt_uncore_qpi_format_group,
1473 };
1474
1475 static struct intel_uncore_type ivt_uncore_r2pcie = {
1476         .name           = "r2pcie",
1477         .num_counters   = 4,
1478         .num_boxes      = 1,
1479         .perf_ctr_bits  = 44,
1480         .constraints    = snbep_uncore_r2pcie_constraints,
1481         IVT_UNCORE_PCI_COMMON_INIT(),
1482 };
1483
1484 static struct intel_uncore_type ivt_uncore_r3qpi = {
1485         .name           = "r3qpi",
1486         .num_counters   = 3,
1487         .num_boxes      = 2,
1488         .perf_ctr_bits  = 44,
1489         .constraints    = snbep_uncore_r3qpi_constraints,
1490         IVT_UNCORE_PCI_COMMON_INIT(),
1491 };
1492
1493 enum {
1494         IVT_PCI_UNCORE_HA,
1495         IVT_PCI_UNCORE_IMC,
1496         IVT_PCI_UNCORE_IRP,
1497         IVT_PCI_UNCORE_QPI,
1498         IVT_PCI_UNCORE_R2PCIE,
1499         IVT_PCI_UNCORE_R3QPI,
1500 };
1501
1502 static struct intel_uncore_type *ivt_pci_uncores[] = {
1503         [IVT_PCI_UNCORE_HA]     = &ivt_uncore_ha,
1504         [IVT_PCI_UNCORE_IMC]    = &ivt_uncore_imc,
1505         [IVT_PCI_UNCORE_IRP]    = &ivt_uncore_irp,
1506         [IVT_PCI_UNCORE_QPI]    = &ivt_uncore_qpi,
1507         [IVT_PCI_UNCORE_R2PCIE] = &ivt_uncore_r2pcie,
1508         [IVT_PCI_UNCORE_R3QPI]  = &ivt_uncore_r3qpi,
1509         NULL,
1510 };
1511
1512 static DEFINE_PCI_DEVICE_TABLE(ivt_uncore_pci_ids) = {
1513         { /* Home Agent 0 */
1514                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xe30),
1515                 .driver_data = UNCORE_PCI_DEV_DATA(IVT_PCI_UNCORE_HA, 0),
1516         },
1517         { /* Home Agent 1 */
1518                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xe38),
1519                 .driver_data = UNCORE_PCI_DEV_DATA(IVT_PCI_UNCORE_HA, 1),
1520         },
1521         { /* MC0 Channel 0 */
1522                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xeb4),
1523                 .driver_data = UNCORE_PCI_DEV_DATA(IVT_PCI_UNCORE_IMC, 0),
1524         },
1525         { /* MC0 Channel 1 */
1526                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xeb5),
1527                 .driver_data = UNCORE_PCI_DEV_DATA(IVT_PCI_UNCORE_IMC, 1),
1528         },
1529         { /* MC0 Channel 3 */
1530                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xeb0),
1531                 .driver_data = UNCORE_PCI_DEV_DATA(IVT_PCI_UNCORE_IMC, 2),
1532         },
1533         { /* MC0 Channel 4 */
1534                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xeb1),
1535                 .driver_data = UNCORE_PCI_DEV_DATA(IVT_PCI_UNCORE_IMC, 3),
1536         },
1537         { /* MC1 Channel 0 */
1538                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xef4),
1539                 .driver_data = UNCORE_PCI_DEV_DATA(IVT_PCI_UNCORE_IMC, 4),
1540         },
1541         { /* MC1 Channel 1 */
1542                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xef5),
1543                 .driver_data = UNCORE_PCI_DEV_DATA(IVT_PCI_UNCORE_IMC, 5),
1544         },
1545         { /* MC1 Channel 3 */
1546                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xef0),
1547                 .driver_data = UNCORE_PCI_DEV_DATA(IVT_PCI_UNCORE_IMC, 6),
1548         },
1549         { /* MC1 Channel 4 */
1550                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xef1),
1551                 .driver_data = UNCORE_PCI_DEV_DATA(IVT_PCI_UNCORE_IMC, 7),
1552         },
1553         { /* IRP */
1554                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xe39),
1555                 .driver_data = UNCORE_PCI_DEV_DATA(IVT_PCI_UNCORE_IRP, 0),
1556         },
1557         { /* QPI0 Port 0 */
1558                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xe32),
1559                 .driver_data = UNCORE_PCI_DEV_DATA(IVT_PCI_UNCORE_QPI, 0),
1560         },
1561         { /* QPI0 Port 1 */
1562                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xe33),
1563                 .driver_data = UNCORE_PCI_DEV_DATA(IVT_PCI_UNCORE_QPI, 1),
1564         },
1565         { /* QPI1 Port 2 */
1566                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xe3a),
1567                 .driver_data = UNCORE_PCI_DEV_DATA(IVT_PCI_UNCORE_QPI, 2),
1568         },
1569         { /* R2PCIe */
1570                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xe34),
1571                 .driver_data = UNCORE_PCI_DEV_DATA(IVT_PCI_UNCORE_R2PCIE, 0),
1572         },
1573         { /* R3QPI0 Link 0 */
1574                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xe36),
1575                 .driver_data = UNCORE_PCI_DEV_DATA(IVT_PCI_UNCORE_R3QPI, 0),
1576         },
1577         { /* R3QPI0 Link 1 */
1578                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xe37),
1579                 .driver_data = UNCORE_PCI_DEV_DATA(IVT_PCI_UNCORE_R3QPI, 1),
1580         },
1581         { /* R3QPI1 Link 2 */
1582                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xe3e),
1583                 .driver_data = UNCORE_PCI_DEV_DATA(IVT_PCI_UNCORE_R3QPI, 2),
1584         },
1585         { /* QPI Port 0 filter  */
1586                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xe86),
1587                 .driver_data = UNCORE_PCI_DEV_DATA(UNCORE_EXTRA_PCI_DEV,
1588                                                    SNBEP_PCI_QPI_PORT0_FILTER),
1589         },
1590         { /* QPI Port 0 filter  */
1591                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xe96),
1592                 .driver_data = UNCORE_PCI_DEV_DATA(UNCORE_EXTRA_PCI_DEV,
1593                                                    SNBEP_PCI_QPI_PORT1_FILTER),
1594         },
1595         { /* end: all zeroes */ }
1596 };
1597
1598 static struct pci_driver ivt_uncore_pci_driver = {
1599         .name           = "ivt_uncore",
1600         .id_table       = ivt_uncore_pci_ids,
1601 };
1602 /* end of IvyTown uncore support */
1603
1604 /* Sandy Bridge uncore support */
1605 static void snb_uncore_msr_enable_event(struct intel_uncore_box *box, struct perf_event *event)
1606 {
1607         struct hw_perf_event *hwc = &event->hw;
1608
1609         if (hwc->idx < UNCORE_PMC_IDX_FIXED)
1610                 wrmsrl(hwc->config_base, hwc->config | SNB_UNC_CTL_EN);
1611         else
1612                 wrmsrl(hwc->config_base, SNB_UNC_CTL_EN);
1613 }
1614
1615 static void snb_uncore_msr_disable_event(struct intel_uncore_box *box, struct perf_event *event)
1616 {
1617         wrmsrl(event->hw.config_base, 0);
1618 }
1619
1620 static void snb_uncore_msr_init_box(struct intel_uncore_box *box)
1621 {
1622         if (box->pmu->pmu_idx == 0) {
1623                 wrmsrl(SNB_UNC_PERF_GLOBAL_CTL,
1624                         SNB_UNC_GLOBAL_CTL_EN | SNB_UNC_GLOBAL_CTL_CORE_ALL);
1625         }
1626 }
1627
1628 static struct uncore_event_desc snb_uncore_events[] = {
1629         INTEL_UNCORE_EVENT_DESC(clockticks, "event=0xff,umask=0x00"),
1630         { /* end: all zeroes */ },
1631 };
1632
1633 static struct attribute *snb_uncore_formats_attr[] = {
1634         &format_attr_event.attr,
1635         &format_attr_umask.attr,
1636         &format_attr_edge.attr,
1637         &format_attr_inv.attr,
1638         &format_attr_cmask5.attr,
1639         NULL,
1640 };
1641
1642 static struct attribute_group snb_uncore_format_group = {
1643         .name           = "format",
1644         .attrs          = snb_uncore_formats_attr,
1645 };
1646
1647 static struct intel_uncore_ops snb_uncore_msr_ops = {
1648         .init_box       = snb_uncore_msr_init_box,
1649         .disable_event  = snb_uncore_msr_disable_event,
1650         .enable_event   = snb_uncore_msr_enable_event,
1651         .read_counter   = uncore_msr_read_counter,
1652 };
1653
1654 static struct event_constraint snb_uncore_cbox_constraints[] = {
1655         UNCORE_EVENT_CONSTRAINT(0x80, 0x1),
1656         UNCORE_EVENT_CONSTRAINT(0x83, 0x1),
1657         EVENT_CONSTRAINT_END
1658 };
1659
1660 static struct intel_uncore_type snb_uncore_cbox = {
1661         .name           = "cbox",
1662         .num_counters   = 2,
1663         .num_boxes      = 4,
1664         .perf_ctr_bits  = 44,
1665         .fixed_ctr_bits = 48,
1666         .perf_ctr       = SNB_UNC_CBO_0_PER_CTR0,
1667         .event_ctl      = SNB_UNC_CBO_0_PERFEVTSEL0,
1668         .fixed_ctr      = SNB_UNC_FIXED_CTR,
1669         .fixed_ctl      = SNB_UNC_FIXED_CTR_CTRL,
1670         .single_fixed   = 1,
1671         .event_mask     = SNB_UNC_RAW_EVENT_MASK,
1672         .msr_offset     = SNB_UNC_CBO_MSR_OFFSET,
1673         .constraints    = snb_uncore_cbox_constraints,
1674         .ops            = &snb_uncore_msr_ops,
1675         .format_group   = &snb_uncore_format_group,
1676         .event_descs    = snb_uncore_events,
1677 };
1678
1679 static struct intel_uncore_type *snb_msr_uncores[] = {
1680         &snb_uncore_cbox,
1681         NULL,
1682 };
1683
1684 enum {
1685         SNB_PCI_UNCORE_IMC,
1686 };
1687
1688 static struct uncore_event_desc snb_uncore_imc_events[] = {
1689         INTEL_UNCORE_EVENT_DESC(data_reads,  "event=0x01"),
1690         INTEL_UNCORE_EVENT_DESC(data_reads.scale, "6.103515625e-5"),
1691         INTEL_UNCORE_EVENT_DESC(data_reads.unit, "MiB"),
1692
1693         INTEL_UNCORE_EVENT_DESC(data_writes, "event=0x02"),
1694         INTEL_UNCORE_EVENT_DESC(data_writes.scale, "6.103515625e-5"),
1695         INTEL_UNCORE_EVENT_DESC(data_writes.unit, "MiB"),
1696
1697         { /* end: all zeroes */ },
1698 };
1699
1700 #define SNB_UNCORE_PCI_IMC_EVENT_MASK           0xff
1701 #define SNB_UNCORE_PCI_IMC_BAR_OFFSET           0x48
1702
1703 /* page size multiple covering all config regs */
1704 #define SNB_UNCORE_PCI_IMC_MAP_SIZE             0x6000
1705
1706 #define SNB_UNCORE_PCI_IMC_DATA_READS           0x1
1707 #define SNB_UNCORE_PCI_IMC_DATA_READS_BASE      0x5050
1708 #define SNB_UNCORE_PCI_IMC_DATA_WRITES          0x2
1709 #define SNB_UNCORE_PCI_IMC_DATA_WRITES_BASE     0x5054
1710 #define SNB_UNCORE_PCI_IMC_CTR_BASE             SNB_UNCORE_PCI_IMC_DATA_READS_BASE
1711
1712 static struct attribute *snb_uncore_imc_formats_attr[] = {
1713         &format_attr_event.attr,
1714         NULL,
1715 };
1716
1717 static struct attribute_group snb_uncore_imc_format_group = {
1718         .name = "format",
1719         .attrs = snb_uncore_imc_formats_attr,
1720 };
1721
1722 static void snb_uncore_imc_init_box(struct intel_uncore_box *box)
1723 {
1724         struct pci_dev *pdev = box->pci_dev;
1725         int where = SNB_UNCORE_PCI_IMC_BAR_OFFSET;
1726         resource_size_t addr;
1727         u32 pci_dword;
1728
1729         pci_read_config_dword(pdev, where, &pci_dword);
1730         addr = pci_dword;
1731
1732 #ifdef CONFIG_PHYS_ADDR_T_64BIT
1733         pci_read_config_dword(pdev, where + 4, &pci_dword);
1734         addr |= ((resource_size_t)pci_dword << 32);
1735 #endif
1736
1737         addr &= ~(PAGE_SIZE - 1);
1738
1739         box->io_addr = ioremap(addr, SNB_UNCORE_PCI_IMC_MAP_SIZE);
1740         box->hrtimer_duration = UNCORE_SNB_IMC_HRTIMER_INTERVAL;
1741 }
1742
1743 static void snb_uncore_imc_enable_box(struct intel_uncore_box *box)
1744 {}
1745
1746 static void snb_uncore_imc_disable_box(struct intel_uncore_box *box)
1747 {}
1748
1749 static void snb_uncore_imc_enable_event(struct intel_uncore_box *box, struct perf_event *event)
1750 {}
1751
1752 static void snb_uncore_imc_disable_event(struct intel_uncore_box *box, struct perf_event *event)
1753 {}
1754
1755 static u64 snb_uncore_imc_read_counter(struct intel_uncore_box *box, struct perf_event *event)
1756 {
1757         struct hw_perf_event *hwc = &event->hw;
1758
1759         return (u64)*(unsigned int *)(box->io_addr + hwc->event_base);
1760 }
1761
1762 /*
1763  * custom event_init() function because we define our own fixed, free
1764  * running counters, so we do not want to conflict with generic uncore
1765  * logic. Also simplifies processing
1766  */
1767 static int snb_uncore_imc_event_init(struct perf_event *event)
1768 {
1769         struct intel_uncore_pmu *pmu;
1770         struct intel_uncore_box *box;
1771         struct hw_perf_event *hwc = &event->hw;
1772         u64 cfg = event->attr.config & SNB_UNCORE_PCI_IMC_EVENT_MASK;
1773         int idx, base;
1774
1775         if (event->attr.type != event->pmu->type)
1776                 return -ENOENT;
1777
1778         pmu = uncore_event_to_pmu(event);
1779         /* no device found for this pmu */
1780         if (pmu->func_id < 0)
1781                 return -ENOENT;
1782
1783         /* Sampling not supported yet */
1784         if (hwc->sample_period)
1785                 return -EINVAL;
1786
1787         /* unsupported modes and filters */
1788         if (event->attr.exclude_user   ||
1789             event->attr.exclude_kernel ||
1790             event->attr.exclude_hv     ||
1791             event->attr.exclude_idle   ||
1792             event->attr.exclude_host   ||
1793             event->attr.exclude_guest  ||
1794             event->attr.sample_period) /* no sampling */
1795                 return -EINVAL;
1796
1797         /*
1798          * Place all uncore events for a particular physical package
1799          * onto a single cpu
1800          */
1801         if (event->cpu < 0)
1802                 return -EINVAL;
1803
1804         /* check only supported bits are set */
1805         if (event->attr.config & ~SNB_UNCORE_PCI_IMC_EVENT_MASK)
1806                 return -EINVAL;
1807
1808         box = uncore_pmu_to_box(pmu, event->cpu);
1809         if (!box || box->cpu < 0)
1810                 return -EINVAL;
1811
1812         event->cpu = box->cpu;
1813
1814         event->hw.idx = -1;
1815         event->hw.last_tag = ~0ULL;
1816         event->hw.extra_reg.idx = EXTRA_REG_NONE;
1817         event->hw.branch_reg.idx = EXTRA_REG_NONE;
1818         /*
1819          * check event is known (whitelist, determines counter)
1820          */
1821         switch (cfg) {
1822         case SNB_UNCORE_PCI_IMC_DATA_READS:
1823                 base = SNB_UNCORE_PCI_IMC_DATA_READS_BASE;
1824                 idx = UNCORE_PMC_IDX_FIXED;
1825                 break;
1826         case SNB_UNCORE_PCI_IMC_DATA_WRITES:
1827                 base = SNB_UNCORE_PCI_IMC_DATA_WRITES_BASE;
1828                 idx = UNCORE_PMC_IDX_FIXED + 1;
1829                 break;
1830         default:
1831                 return -EINVAL;
1832         }
1833
1834         /* must be done before validate_group */
1835         event->hw.event_base = base;
1836         event->hw.config = cfg;
1837         event->hw.idx = idx;
1838
1839         /* no group validation needed, we have free running counters */
1840
1841         return 0;
1842 }
1843
1844 static int snb_uncore_imc_hw_config(struct intel_uncore_box *box, struct perf_event *event)
1845 {
1846         return 0;
1847 }
1848
1849 static void snb_uncore_imc_event_start(struct perf_event *event, int flags)
1850 {
1851         struct intel_uncore_box *box = uncore_event_to_box(event);
1852         u64 count;
1853
1854         if (WARN_ON_ONCE(!(event->hw.state & PERF_HES_STOPPED)))
1855                 return;
1856
1857         event->hw.state = 0;
1858         box->n_active++;
1859
1860         list_add_tail(&event->active_entry, &box->active_list);
1861
1862         count = snb_uncore_imc_read_counter(box, event);
1863         local64_set(&event->hw.prev_count, count);
1864
1865         if (box->n_active == 1)
1866                 uncore_pmu_start_hrtimer(box);
1867 }
1868
1869 static void snb_uncore_imc_event_stop(struct perf_event *event, int flags)
1870 {
1871         struct intel_uncore_box *box = uncore_event_to_box(event);
1872         struct hw_perf_event *hwc = &event->hw;
1873
1874         if (!(hwc->state & PERF_HES_STOPPED)) {
1875                 box->n_active--;
1876
1877                 WARN_ON_ONCE(hwc->state & PERF_HES_STOPPED);
1878                 hwc->state |= PERF_HES_STOPPED;
1879
1880                 list_del(&event->active_entry);
1881
1882                 if (box->n_active == 0)
1883                         uncore_pmu_cancel_hrtimer(box);
1884         }
1885
1886         if ((flags & PERF_EF_UPDATE) && !(hwc->state & PERF_HES_UPTODATE)) {
1887                 /*
1888                  * Drain the remaining delta count out of a event
1889                  * that we are disabling:
1890                  */
1891                 uncore_perf_event_update(box, event);
1892                 hwc->state |= PERF_HES_UPTODATE;
1893         }
1894 }
1895
1896 static int snb_uncore_imc_event_add(struct perf_event *event, int flags)
1897 {
1898         struct intel_uncore_box *box = uncore_event_to_box(event);
1899         struct hw_perf_event *hwc = &event->hw;
1900
1901         if (!box)
1902                 return -ENODEV;
1903
1904         hwc->state = PERF_HES_UPTODATE | PERF_HES_STOPPED;
1905         if (!(flags & PERF_EF_START))
1906                 hwc->state |= PERF_HES_ARCH;
1907
1908         snb_uncore_imc_event_start(event, 0);
1909
1910         box->n_events++;
1911
1912         return 0;
1913 }
1914
1915 static void snb_uncore_imc_event_del(struct perf_event *event, int flags)
1916 {
1917         struct intel_uncore_box *box = uncore_event_to_box(event);
1918         int i;
1919
1920         snb_uncore_imc_event_stop(event, PERF_EF_UPDATE);
1921
1922         for (i = 0; i < box->n_events; i++) {
1923                 if (event == box->event_list[i]) {
1924                         --box->n_events;
1925                         break;
1926                 }
1927         }
1928 }
1929
1930 static int snb_pci2phy_map_init(int devid)
1931 {
1932         struct pci_dev *dev = NULL;
1933         int bus;
1934
1935         dev = pci_get_device(PCI_VENDOR_ID_INTEL, devid, dev);
1936         if (!dev)
1937                 return -ENOTTY;
1938
1939         bus = dev->bus->number;
1940
1941         pcibus_to_physid[bus] = 0;
1942
1943         pci_dev_put(dev);
1944
1945         return 0;
1946 }
1947
1948 static struct pmu snb_uncore_imc_pmu = {
1949         .task_ctx_nr    = perf_invalid_context,
1950         .event_init     = snb_uncore_imc_event_init,
1951         .add            = snb_uncore_imc_event_add,
1952         .del            = snb_uncore_imc_event_del,
1953         .start          = snb_uncore_imc_event_start,
1954         .stop           = snb_uncore_imc_event_stop,
1955         .read           = uncore_pmu_event_read,
1956 };
1957
1958 static struct intel_uncore_ops snb_uncore_imc_ops = {
1959         .init_box       = snb_uncore_imc_init_box,
1960         .enable_box     = snb_uncore_imc_enable_box,
1961         .disable_box    = snb_uncore_imc_disable_box,
1962         .disable_event  = snb_uncore_imc_disable_event,
1963         .enable_event   = snb_uncore_imc_enable_event,
1964         .hw_config      = snb_uncore_imc_hw_config,
1965         .read_counter   = snb_uncore_imc_read_counter,
1966 };
1967
1968 static struct intel_uncore_type snb_uncore_imc = {
1969         .name           = "imc",
1970         .num_counters   = 2,
1971         .num_boxes      = 1,
1972         .fixed_ctr_bits = 32,
1973         .fixed_ctr      = SNB_UNCORE_PCI_IMC_CTR_BASE,
1974         .event_descs    = snb_uncore_imc_events,
1975         .format_group   = &snb_uncore_imc_format_group,
1976         .perf_ctr       = SNB_UNCORE_PCI_IMC_DATA_READS_BASE,
1977         .event_mask     = SNB_UNCORE_PCI_IMC_EVENT_MASK,
1978         .ops            = &snb_uncore_imc_ops,
1979         .pmu            = &snb_uncore_imc_pmu,
1980 };
1981
1982 static struct intel_uncore_type *snb_pci_uncores[] = {
1983         [SNB_PCI_UNCORE_IMC]    = &snb_uncore_imc,
1984         NULL,
1985 };
1986
1987 static DEFINE_PCI_DEVICE_TABLE(snb_uncore_pci_ids) = {
1988         { /* IMC */
1989                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_SNB_IMC),
1990                 .driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0),
1991         },
1992 };
1993
1994 static DEFINE_PCI_DEVICE_TABLE(ivb_uncore_pci_ids) = {
1995         { /* IMC */
1996                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_IVB_IMC),
1997                 .driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0),
1998         },
1999 };
2000
2001 static DEFINE_PCI_DEVICE_TABLE(hsw_uncore_pci_ids) = {
2002         { /* IMC */
2003                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_HSW_IMC),
2004                 .driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0),
2005         },
2006 };
2007
2008 static struct pci_driver snb_uncore_pci_driver = {
2009         .name           = "snb_uncore",
2010         .id_table       = snb_uncore_pci_ids,
2011 };
2012
2013 static struct pci_driver ivb_uncore_pci_driver = {
2014         .name           = "ivb_uncore",
2015         .id_table       = ivb_uncore_pci_ids,
2016 };
2017
2018 static struct pci_driver hsw_uncore_pci_driver = {
2019         .name           = "hsw_uncore",
2020         .id_table       = hsw_uncore_pci_ids,
2021 };
2022
2023 /* end of Sandy Bridge uncore support */
2024
2025 /* Nehalem uncore support */
2026 static void nhm_uncore_msr_disable_box(struct intel_uncore_box *box)
2027 {
2028         wrmsrl(NHM_UNC_PERF_GLOBAL_CTL, 0);
2029 }
2030
2031 static void nhm_uncore_msr_enable_box(struct intel_uncore_box *box)
2032 {
2033         wrmsrl(NHM_UNC_PERF_GLOBAL_CTL, NHM_UNC_GLOBAL_CTL_EN_PC_ALL | NHM_UNC_GLOBAL_CTL_EN_FC);
2034 }
2035
2036 static void nhm_uncore_msr_enable_event(struct intel_uncore_box *box, struct perf_event *event)
2037 {
2038         struct hw_perf_event *hwc = &event->hw;
2039
2040         if (hwc->idx < UNCORE_PMC_IDX_FIXED)
2041                 wrmsrl(hwc->config_base, hwc->config | SNB_UNC_CTL_EN);
2042         else
2043                 wrmsrl(hwc->config_base, NHM_UNC_FIXED_CTR_CTL_EN);
2044 }
2045
2046 static struct attribute *nhm_uncore_formats_attr[] = {
2047         &format_attr_event.attr,
2048         &format_attr_umask.attr,
2049         &format_attr_edge.attr,
2050         &format_attr_inv.attr,
2051         &format_attr_cmask8.attr,
2052         NULL,
2053 };
2054
2055 static struct attribute_group nhm_uncore_format_group = {
2056         .name = "format",
2057         .attrs = nhm_uncore_formats_attr,
2058 };
2059
2060 static struct uncore_event_desc nhm_uncore_events[] = {
2061         INTEL_UNCORE_EVENT_DESC(clockticks,                "event=0xff,umask=0x00"),
2062         INTEL_UNCORE_EVENT_DESC(qmc_writes_full_any,       "event=0x2f,umask=0x0f"),
2063         INTEL_UNCORE_EVENT_DESC(qmc_normal_reads_any,      "event=0x2c,umask=0x0f"),
2064         INTEL_UNCORE_EVENT_DESC(qhl_request_ioh_reads,     "event=0x20,umask=0x01"),
2065         INTEL_UNCORE_EVENT_DESC(qhl_request_ioh_writes,    "event=0x20,umask=0x02"),
2066         INTEL_UNCORE_EVENT_DESC(qhl_request_remote_reads,  "event=0x20,umask=0x04"),
2067         INTEL_UNCORE_EVENT_DESC(qhl_request_remote_writes, "event=0x20,umask=0x08"),
2068         INTEL_UNCORE_EVENT_DESC(qhl_request_local_reads,   "event=0x20,umask=0x10"),
2069         INTEL_UNCORE_EVENT_DESC(qhl_request_local_writes,  "event=0x20,umask=0x20"),
2070         { /* end: all zeroes */ },
2071 };
2072
2073 static struct intel_uncore_ops nhm_uncore_msr_ops = {
2074         .disable_box    = nhm_uncore_msr_disable_box,
2075         .enable_box     = nhm_uncore_msr_enable_box,
2076         .disable_event  = snb_uncore_msr_disable_event,
2077         .enable_event   = nhm_uncore_msr_enable_event,
2078         .read_counter   = uncore_msr_read_counter,
2079 };
2080
2081 static struct intel_uncore_type nhm_uncore = {
2082         .name           = "",
2083         .num_counters   = 8,
2084         .num_boxes      = 1,
2085         .perf_ctr_bits  = 48,
2086         .fixed_ctr_bits = 48,
2087         .event_ctl      = NHM_UNC_PERFEVTSEL0,
2088         .perf_ctr       = NHM_UNC_UNCORE_PMC0,
2089         .fixed_ctr      = NHM_UNC_FIXED_CTR,
2090         .fixed_ctl      = NHM_UNC_FIXED_CTR_CTRL,
2091         .event_mask     = NHM_UNC_RAW_EVENT_MASK,
2092         .event_descs    = nhm_uncore_events,
2093         .ops            = &nhm_uncore_msr_ops,
2094         .format_group   = &nhm_uncore_format_group,
2095 };
2096
2097 static struct intel_uncore_type *nhm_msr_uncores[] = {
2098         &nhm_uncore,
2099         NULL,
2100 };
2101 /* end of Nehalem uncore support */
2102
2103 /* Nehalem-EX uncore support */
2104 DEFINE_UNCORE_FORMAT_ATTR(event5, event, "config:1-5");
2105 DEFINE_UNCORE_FORMAT_ATTR(counter, counter, "config:6-7");
2106 DEFINE_UNCORE_FORMAT_ATTR(match, match, "config1:0-63");
2107 DEFINE_UNCORE_FORMAT_ATTR(mask, mask, "config2:0-63");
2108
2109 static void nhmex_uncore_msr_init_box(struct intel_uncore_box *box)
2110 {
2111         wrmsrl(NHMEX_U_MSR_PMON_GLOBAL_CTL, NHMEX_U_PMON_GLOBAL_EN_ALL);
2112 }
2113
2114 static void nhmex_uncore_msr_disable_box(struct intel_uncore_box *box)
2115 {
2116         unsigned msr = uncore_msr_box_ctl(box);
2117         u64 config;
2118
2119         if (msr) {
2120                 rdmsrl(msr, config);
2121                 config &= ~((1ULL << uncore_num_counters(box)) - 1);
2122                 /* WBox has a fixed counter */
2123                 if (uncore_msr_fixed_ctl(box))
2124                         config &= ~NHMEX_W_PMON_GLOBAL_FIXED_EN;
2125                 wrmsrl(msr, config);
2126         }
2127 }
2128
2129 static void nhmex_uncore_msr_enable_box(struct intel_uncore_box *box)
2130 {
2131         unsigned msr = uncore_msr_box_ctl(box);
2132         u64 config;
2133
2134         if (msr) {
2135                 rdmsrl(msr, config);
2136                 config |= (1ULL << uncore_num_counters(box)) - 1;
2137                 /* WBox has a fixed counter */
2138                 if (uncore_msr_fixed_ctl(box))
2139                         config |= NHMEX_W_PMON_GLOBAL_FIXED_EN;
2140                 wrmsrl(msr, config);
2141         }
2142 }
2143
2144 static void nhmex_uncore_msr_disable_event(struct intel_uncore_box *box, struct perf_event *event)
2145 {
2146         wrmsrl(event->hw.config_base, 0);
2147 }
2148
2149 static void nhmex_uncore_msr_enable_event(struct intel_uncore_box *box, struct perf_event *event)
2150 {
2151         struct hw_perf_event *hwc = &event->hw;
2152
2153         if (hwc->idx >= UNCORE_PMC_IDX_FIXED)
2154                 wrmsrl(hwc->config_base, NHMEX_PMON_CTL_EN_BIT0);
2155         else if (box->pmu->type->event_mask & NHMEX_PMON_CTL_EN_BIT0)
2156                 wrmsrl(hwc->config_base, hwc->config | NHMEX_PMON_CTL_EN_BIT22);
2157         else
2158                 wrmsrl(hwc->config_base, hwc->config | NHMEX_PMON_CTL_EN_BIT0);
2159 }
2160
2161 #define NHMEX_UNCORE_OPS_COMMON_INIT()                          \
2162         .init_box       = nhmex_uncore_msr_init_box,            \
2163         .disable_box    = nhmex_uncore_msr_disable_box,         \
2164         .enable_box     = nhmex_uncore_msr_enable_box,          \
2165         .disable_event  = nhmex_uncore_msr_disable_event,       \
2166         .read_counter   = uncore_msr_read_counter
2167
2168 static struct intel_uncore_ops nhmex_uncore_ops = {
2169         NHMEX_UNCORE_OPS_COMMON_INIT(),
2170         .enable_event   = nhmex_uncore_msr_enable_event,
2171 };
2172
2173 static struct attribute *nhmex_uncore_ubox_formats_attr[] = {
2174         &format_attr_event.attr,
2175         &format_attr_edge.attr,
2176         NULL,
2177 };
2178
2179 static struct attribute_group nhmex_uncore_ubox_format_group = {
2180         .name           = "format",
2181         .attrs          = nhmex_uncore_ubox_formats_attr,
2182 };
2183
2184 static struct intel_uncore_type nhmex_uncore_ubox = {
2185         .name           = "ubox",
2186         .num_counters   = 1,
2187         .num_boxes      = 1,
2188         .perf_ctr_bits  = 48,
2189         .event_ctl      = NHMEX_U_MSR_PMON_EV_SEL,
2190         .perf_ctr       = NHMEX_U_MSR_PMON_CTR,
2191         .event_mask     = NHMEX_U_PMON_RAW_EVENT_MASK,
2192         .box_ctl        = NHMEX_U_MSR_PMON_GLOBAL_CTL,
2193         .ops            = &nhmex_uncore_ops,
2194         .format_group   = &nhmex_uncore_ubox_format_group
2195 };
2196
2197 static struct attribute *nhmex_uncore_cbox_formats_attr[] = {
2198         &format_attr_event.attr,
2199         &format_attr_umask.attr,
2200         &format_attr_edge.attr,
2201         &format_attr_inv.attr,
2202         &format_attr_thresh8.attr,
2203         NULL,
2204 };
2205
2206 static struct attribute_group nhmex_uncore_cbox_format_group = {
2207         .name = "format",
2208         .attrs = nhmex_uncore_cbox_formats_attr,
2209 };
2210
2211 /* msr offset for each instance of cbox */
2212 static unsigned nhmex_cbox_msr_offsets[] = {
2213         0x0, 0x80, 0x40, 0xc0, 0x20, 0xa0, 0x60, 0xe0, 0x240, 0x2c0,
2214 };
2215
2216 static struct intel_uncore_type nhmex_uncore_cbox = {
2217         .name                   = "cbox",
2218         .num_counters           = 6,
2219         .num_boxes              = 10,
2220         .perf_ctr_bits          = 48,
2221         .event_ctl              = NHMEX_C0_MSR_PMON_EV_SEL0,
2222         .perf_ctr               = NHMEX_C0_MSR_PMON_CTR0,
2223         .event_mask             = NHMEX_PMON_RAW_EVENT_MASK,
2224         .box_ctl                = NHMEX_C0_MSR_PMON_GLOBAL_CTL,
2225         .msr_offsets            = nhmex_cbox_msr_offsets,
2226         .pair_ctr_ctl           = 1,
2227         .ops                    = &nhmex_uncore_ops,
2228         .format_group           = &nhmex_uncore_cbox_format_group
2229 };
2230
2231 static struct uncore_event_desc nhmex_uncore_wbox_events[] = {
2232         INTEL_UNCORE_EVENT_DESC(clockticks, "event=0xff,umask=0"),
2233         { /* end: all zeroes */ },
2234 };
2235
2236 static struct intel_uncore_type nhmex_uncore_wbox = {
2237         .name                   = "wbox",
2238         .num_counters           = 4,
2239         .num_boxes              = 1,
2240         .perf_ctr_bits          = 48,
2241         .event_ctl              = NHMEX_W_MSR_PMON_CNT0,
2242         .perf_ctr               = NHMEX_W_MSR_PMON_EVT_SEL0,
2243         .fixed_ctr              = NHMEX_W_MSR_PMON_FIXED_CTR,
2244         .fixed_ctl              = NHMEX_W_MSR_PMON_FIXED_CTL,
2245         .event_mask             = NHMEX_PMON_RAW_EVENT_MASK,
2246         .box_ctl                = NHMEX_W_MSR_GLOBAL_CTL,
2247         .pair_ctr_ctl           = 1,
2248         .event_descs            = nhmex_uncore_wbox_events,
2249         .ops                    = &nhmex_uncore_ops,
2250         .format_group           = &nhmex_uncore_cbox_format_group
2251 };
2252
2253 static int nhmex_bbox_hw_config(struct intel_uncore_box *box, struct perf_event *event)
2254 {
2255         struct hw_perf_event *hwc = &event->hw;
2256         struct hw_perf_event_extra *reg1 = &hwc->extra_reg;
2257         struct hw_perf_event_extra *reg2 = &hwc->branch_reg;
2258         int ctr, ev_sel;
2259
2260         ctr = (hwc->config & NHMEX_B_PMON_CTR_MASK) >>
2261                 NHMEX_B_PMON_CTR_SHIFT;
2262         ev_sel = (hwc->config & NHMEX_B_PMON_CTL_EV_SEL_MASK) >>
2263                   NHMEX_B_PMON_CTL_EV_SEL_SHIFT;
2264
2265         /* events that do not use the match/mask registers */
2266         if ((ctr == 0 && ev_sel > 0x3) || (ctr == 1 && ev_sel > 0x6) ||
2267             (ctr == 2 && ev_sel != 0x4) || ctr == 3)
2268                 return 0;
2269
2270         if (box->pmu->pmu_idx == 0)
2271                 reg1->reg = NHMEX_B0_MSR_MATCH;
2272         else
2273                 reg1->reg = NHMEX_B1_MSR_MATCH;
2274         reg1->idx = 0;
2275         reg1->config = event->attr.config1;
2276         reg2->config = event->attr.config2;
2277         return 0;
2278 }
2279
2280 static void nhmex_bbox_msr_enable_event(struct intel_uncore_box *box, struct perf_event *event)
2281 {
2282         struct hw_perf_event *hwc = &event->hw;
2283         struct hw_perf_event_extra *reg1 = &hwc->extra_reg;
2284         struct hw_perf_event_extra *reg2 = &hwc->branch_reg;
2285
2286         if (reg1->idx != EXTRA_REG_NONE) {
2287                 wrmsrl(reg1->reg, reg1->config);
2288                 wrmsrl(reg1->reg + 1, reg2->config);
2289         }
2290         wrmsrl(hwc->config_base, NHMEX_PMON_CTL_EN_BIT0 |
2291                 (hwc->config & NHMEX_B_PMON_CTL_EV_SEL_MASK));
2292 }
2293
2294 /*
2295  * The Bbox has 4 counters, but each counter monitors different events.
2296  * Use bits 6-7 in the event config to select counter.
2297  */
2298 static struct event_constraint nhmex_uncore_bbox_constraints[] = {
2299         EVENT_CONSTRAINT(0 , 1, 0xc0),
2300         EVENT_CONSTRAINT(0x40, 2, 0xc0),
2301         EVENT_CONSTRAINT(0x80, 4, 0xc0),
2302         EVENT_CONSTRAINT(0xc0, 8, 0xc0),
2303         EVENT_CONSTRAINT_END,
2304 };
2305
2306 static struct attribute *nhmex_uncore_bbox_formats_attr[] = {
2307         &format_attr_event5.attr,
2308         &format_attr_counter.attr,
2309         &format_attr_match.attr,
2310         &format_attr_mask.attr,
2311         NULL,
2312 };
2313
2314 static struct attribute_group nhmex_uncore_bbox_format_group = {
2315         .name = "format",
2316         .attrs = nhmex_uncore_bbox_formats_attr,
2317 };
2318
2319 static struct intel_uncore_ops nhmex_uncore_bbox_ops = {
2320         NHMEX_UNCORE_OPS_COMMON_INIT(),
2321         .enable_event           = nhmex_bbox_msr_enable_event,
2322         .hw_config              = nhmex_bbox_hw_config,
2323         .get_constraint         = uncore_get_constraint,
2324         .put_constraint         = uncore_put_constraint,
2325 };
2326
2327 static struct intel_uncore_type nhmex_uncore_bbox = {
2328         .name                   = "bbox",
2329         .num_counters           = 4,
2330         .num_boxes              = 2,
2331         .perf_ctr_bits          = 48,
2332         .event_ctl              = NHMEX_B0_MSR_PMON_CTL0,
2333         .perf_ctr               = NHMEX_B0_MSR_PMON_CTR0,
2334         .event_mask             = NHMEX_B_PMON_RAW_EVENT_MASK,
2335         .box_ctl                = NHMEX_B0_MSR_PMON_GLOBAL_CTL,
2336         .msr_offset             = NHMEX_B_MSR_OFFSET,
2337         .pair_ctr_ctl           = 1,
2338         .num_shared_regs        = 1,
2339         .constraints            = nhmex_uncore_bbox_constraints,
2340         .ops                    = &nhmex_uncore_bbox_ops,
2341         .format_group           = &nhmex_uncore_bbox_format_group
2342 };
2343
2344 static int nhmex_sbox_hw_config(struct intel_uncore_box *box, struct perf_event *event)
2345 {
2346         struct hw_perf_event *hwc = &event->hw;
2347         struct hw_perf_event_extra *reg1 = &hwc->extra_reg;
2348         struct hw_perf_event_extra *reg2 = &hwc->branch_reg;
2349
2350         /* only TO_R_PROG_EV event uses the match/mask register */
2351         if ((hwc->config & NHMEX_PMON_CTL_EV_SEL_MASK) !=
2352             NHMEX_S_EVENT_TO_R_PROG_EV)
2353                 return 0;
2354
2355         if (box->pmu->pmu_idx == 0)
2356                 reg1->reg = NHMEX_S0_MSR_MM_CFG;
2357         else
2358                 reg1->reg = NHMEX_S1_MSR_MM_CFG;
2359         reg1->idx = 0;
2360         reg1->config = event->attr.config1;
2361         reg2->config = event->attr.config2;
2362         return 0;
2363 }
2364
2365 static void nhmex_sbox_msr_enable_event(struct intel_uncore_box *box, struct perf_event *event)
2366 {
2367         struct hw_perf_event *hwc = &event->hw;
2368         struct hw_perf_event_extra *reg1 = &hwc->extra_reg;
2369         struct hw_perf_event_extra *reg2 = &hwc->branch_reg;
2370
2371         if (reg1->idx != EXTRA_REG_NONE) {
2372                 wrmsrl(reg1->reg, 0);
2373                 wrmsrl(reg1->reg + 1, reg1->config);
2374                 wrmsrl(reg1->reg + 2, reg2->config);
2375                 wrmsrl(reg1->reg, NHMEX_S_PMON_MM_CFG_EN);
2376         }
2377         wrmsrl(hwc->config_base, hwc->config | NHMEX_PMON_CTL_EN_BIT22);
2378 }
2379
2380 static struct attribute *nhmex_uncore_sbox_formats_attr[] = {
2381         &format_attr_event.attr,
2382         &format_attr_umask.attr,
2383         &format_attr_edge.attr,
2384         &format_attr_inv.attr,
2385         &format_attr_thresh8.attr,
2386         &format_attr_match.attr,
2387         &format_attr_mask.attr,
2388         NULL,
2389 };
2390
2391 static struct attribute_group nhmex_uncore_sbox_format_group = {
2392         .name                   = "format",
2393         .attrs                  = nhmex_uncore_sbox_formats_attr,
2394 };
2395
2396 static struct intel_uncore_ops nhmex_uncore_sbox_ops = {
2397         NHMEX_UNCORE_OPS_COMMON_INIT(),
2398         .enable_event           = nhmex_sbox_msr_enable_event,
2399         .hw_config              = nhmex_sbox_hw_config,
2400         .get_constraint         = uncore_get_constraint,
2401         .put_constraint         = uncore_put_constraint,
2402 };
2403
2404 static struct intel_uncore_type nhmex_uncore_sbox = {
2405         .name                   = "sbox",
2406         .num_counters           = 4,
2407         .num_boxes              = 2,
2408         .perf_ctr_bits          = 48,
2409         .event_ctl              = NHMEX_S0_MSR_PMON_CTL0,
2410         .perf_ctr               = NHMEX_S0_MSR_PMON_CTR0,
2411         .event_mask             = NHMEX_PMON_RAW_EVENT_MASK,
2412         .box_ctl                = NHMEX_S0_MSR_PMON_GLOBAL_CTL,
2413         .msr_offset             = NHMEX_S_MSR_OFFSET,
2414         .pair_ctr_ctl           = 1,
2415         .num_shared_regs        = 1,
2416         .ops                    = &nhmex_uncore_sbox_ops,
2417         .format_group           = &nhmex_uncore_sbox_format_group
2418 };
2419
2420 enum {
2421         EXTRA_REG_NHMEX_M_FILTER,
2422         EXTRA_REG_NHMEX_M_DSP,
2423         EXTRA_REG_NHMEX_M_ISS,
2424         EXTRA_REG_NHMEX_M_MAP,
2425         EXTRA_REG_NHMEX_M_MSC_THR,
2426         EXTRA_REG_NHMEX_M_PGT,
2427         EXTRA_REG_NHMEX_M_PLD,
2428         EXTRA_REG_NHMEX_M_ZDP_CTL_FVC,
2429 };
2430
2431 static struct extra_reg nhmex_uncore_mbox_extra_regs[] = {
2432         MBOX_INC_SEL_EXTAR_REG(0x0, DSP),
2433         MBOX_INC_SEL_EXTAR_REG(0x4, MSC_THR),
2434         MBOX_INC_SEL_EXTAR_REG(0x5, MSC_THR),
2435         MBOX_INC_SEL_EXTAR_REG(0x9, ISS),
2436         /* event 0xa uses two extra registers */
2437         MBOX_INC_SEL_EXTAR_REG(0xa, ISS),
2438         MBOX_INC_SEL_EXTAR_REG(0xa, PLD),
2439         MBOX_INC_SEL_EXTAR_REG(0xb, PLD),
2440         /* events 0xd ~ 0x10 use the same extra register */
2441         MBOX_INC_SEL_EXTAR_REG(0xd, ZDP_CTL_FVC),
2442         MBOX_INC_SEL_EXTAR_REG(0xe, ZDP_CTL_FVC),
2443         MBOX_INC_SEL_EXTAR_REG(0xf, ZDP_CTL_FVC),
2444         MBOX_INC_SEL_EXTAR_REG(0x10, ZDP_CTL_FVC),
2445         MBOX_INC_SEL_EXTAR_REG(0x16, PGT),
2446         MBOX_SET_FLAG_SEL_EXTRA_REG(0x0, DSP),
2447         MBOX_SET_FLAG_SEL_EXTRA_REG(0x1, ISS),
2448         MBOX_SET_FLAG_SEL_EXTRA_REG(0x5, PGT),
2449         MBOX_SET_FLAG_SEL_EXTRA_REG(0x6, MAP),
2450         EVENT_EXTRA_END
2451 };
2452
2453 /* Nehalem-EX or Westmere-EX ? */
2454 static bool uncore_nhmex;
2455
2456 static bool nhmex_mbox_get_shared_reg(struct intel_uncore_box *box, int idx, u64 config)
2457 {
2458         struct intel_uncore_extra_reg *er;
2459         unsigned long flags;
2460         bool ret = false;
2461         u64 mask;
2462
2463         if (idx < EXTRA_REG_NHMEX_M_ZDP_CTL_FVC) {
2464                 er = &box->shared_regs[idx];
2465                 raw_spin_lock_irqsave(&er->lock, flags);
2466                 if (!atomic_read(&er->ref) || er->config == config) {
2467                         atomic_inc(&er->ref);
2468                         er->config = config;
2469                         ret = true;
2470                 }
2471                 raw_spin_unlock_irqrestore(&er->lock, flags);
2472
2473                 return ret;
2474         }
2475         /*
2476          * The ZDP_CTL_FVC MSR has 4 fields which are used to control
2477          * events 0xd ~ 0x10. Besides these 4 fields, there are additional
2478          * fields which are shared.
2479          */
2480         idx -= EXTRA_REG_NHMEX_M_ZDP_CTL_FVC;
2481         if (WARN_ON_ONCE(idx >= 4))
2482                 return false;
2483
2484         /* mask of the shared fields */
2485         if (uncore_nhmex)
2486                 mask = NHMEX_M_PMON_ZDP_CTL_FVC_MASK;
2487         else
2488                 mask = WSMEX_M_PMON_ZDP_CTL_FVC_MASK;
2489         er = &box->shared_regs[EXTRA_REG_NHMEX_M_ZDP_CTL_FVC];
2490
2491         raw_spin_lock_irqsave(&er->lock, flags);
2492         /* add mask of the non-shared field if it's in use */
2493         if (__BITS_VALUE(atomic_read(&er->ref), idx, 8)) {
2494                 if (uncore_nhmex)
2495                         mask |= NHMEX_M_PMON_ZDP_CTL_FVC_EVENT_MASK(idx);
2496                 else
2497                         mask |= WSMEX_M_PMON_ZDP_CTL_FVC_EVENT_MASK(idx);
2498         }
2499
2500         if (!atomic_read(&er->ref) || !((er->config ^ config) & mask)) {
2501                 atomic_add(1 << (idx * 8), &er->ref);
2502                 if (uncore_nhmex)
2503                         mask = NHMEX_M_PMON_ZDP_CTL_FVC_MASK |
2504                                 NHMEX_M_PMON_ZDP_CTL_FVC_EVENT_MASK(idx);
2505                 else
2506                         mask = WSMEX_M_PMON_ZDP_CTL_FVC_MASK |
2507                                 WSMEX_M_PMON_ZDP_CTL_FVC_EVENT_MASK(idx);
2508                 er->config &= ~mask;
2509                 er->config |= (config & mask);
2510                 ret = true;
2511         }
2512         raw_spin_unlock_irqrestore(&er->lock, flags);
2513
2514         return ret;
2515 }
2516
2517 static void nhmex_mbox_put_shared_reg(struct intel_uncore_box *box, int idx)
2518 {
2519         struct intel_uncore_extra_reg *er;
2520
2521         if (idx < EXTRA_REG_NHMEX_M_ZDP_CTL_FVC) {
2522                 er = &box->shared_regs[idx];
2523                 atomic_dec(&er->ref);
2524                 return;
2525         }
2526
2527         idx -= EXTRA_REG_NHMEX_M_ZDP_CTL_FVC;
2528         er = &box->shared_regs[EXTRA_REG_NHMEX_M_ZDP_CTL_FVC];
2529         atomic_sub(1 << (idx * 8), &er->ref);
2530 }
2531
2532 static u64 nhmex_mbox_alter_er(struct perf_event *event, int new_idx, bool modify)
2533 {
2534         struct hw_perf_event *hwc = &event->hw;
2535         struct hw_perf_event_extra *reg1 = &hwc->extra_reg;
2536         u64 idx, orig_idx = __BITS_VALUE(reg1->idx, 0, 8);
2537         u64 config = reg1->config;
2538
2539         /* get the non-shared control bits and shift them */
2540         idx = orig_idx - EXTRA_REG_NHMEX_M_ZDP_CTL_FVC;
2541         if (uncore_nhmex)
2542                 config &= NHMEX_M_PMON_ZDP_CTL_FVC_EVENT_MASK(idx);
2543         else
2544                 config &= WSMEX_M_PMON_ZDP_CTL_FVC_EVENT_MASK(idx);
2545         if (new_idx > orig_idx) {
2546                 idx = new_idx - orig_idx;
2547                 config <<= 3 * idx;
2548         } else {
2549                 idx = orig_idx - new_idx;
2550                 config >>= 3 * idx;
2551         }
2552
2553         /* add the shared control bits back */
2554         if (uncore_nhmex)
2555                 config |= NHMEX_M_PMON_ZDP_CTL_FVC_MASK & reg1->config;
2556         else
2557                 config |= WSMEX_M_PMON_ZDP_CTL_FVC_MASK & reg1->config;
2558         config |= NHMEX_M_PMON_ZDP_CTL_FVC_MASK & reg1->config;
2559         if (modify) {
2560                 /* adjust the main event selector */
2561                 if (new_idx > orig_idx)
2562                         hwc->config += idx << NHMEX_M_PMON_CTL_INC_SEL_SHIFT;
2563                 else
2564                         hwc->config -= idx << NHMEX_M_PMON_CTL_INC_SEL_SHIFT;
2565                 reg1->config = config;
2566                 reg1->idx = ~0xff | new_idx;
2567         }
2568         return config;
2569 }
2570
2571 static struct event_constraint *
2572 nhmex_mbox_get_constraint(struct intel_uncore_box *box, struct perf_event *event)
2573 {
2574         struct hw_perf_event_extra *reg1 = &event->hw.extra_reg;
2575         struct hw_perf_event_extra *reg2 = &event->hw.branch_reg;
2576         int i, idx[2], alloc = 0;
2577         u64 config1 = reg1->config;
2578
2579         idx[0] = __BITS_VALUE(reg1->idx, 0, 8);
2580         idx[1] = __BITS_VALUE(reg1->idx, 1, 8);
2581 again:
2582         for (i = 0; i < 2; i++) {
2583                 if (!uncore_box_is_fake(box) && (reg1->alloc & (0x1 << i)))
2584                         idx[i] = 0xff;
2585
2586                 if (idx[i] == 0xff)
2587                         continue;
2588
2589                 if (!nhmex_mbox_get_shared_reg(box, idx[i],
2590                                 __BITS_VALUE(config1, i, 32)))
2591                         goto fail;
2592                 alloc |= (0x1 << i);
2593         }
2594
2595         /* for the match/mask registers */
2596         if (reg2->idx != EXTRA_REG_NONE &&
2597             (uncore_box_is_fake(box) || !reg2->alloc) &&
2598             !nhmex_mbox_get_shared_reg(box, reg2->idx, reg2->config))
2599                 goto fail;
2600
2601         /*
2602          * If it's a fake box -- as per validate_{group,event}() we
2603          * shouldn't touch event state and we can avoid doing so
2604          * since both will only call get_event_constraints() once
2605          * on each event, this avoids the need for reg->alloc.
2606          */
2607         if (!uncore_box_is_fake(box)) {
2608                 if (idx[0] != 0xff && idx[0] != __BITS_VALUE(reg1->idx, 0, 8))
2609                         nhmex_mbox_alter_er(event, idx[0], true);
2610                 reg1->alloc |= alloc;
2611                 if (reg2->idx != EXTRA_REG_NONE)
2612                         reg2->alloc = 1;
2613         }
2614         return NULL;
2615 fail:
2616         if (idx[0] != 0xff && !(alloc & 0x1) &&
2617             idx[0] >= EXTRA_REG_NHMEX_M_ZDP_CTL_FVC) {
2618                 /*
2619                  * events 0xd ~ 0x10 are functional identical, but are
2620                  * controlled by different fields in the ZDP_CTL_FVC
2621                  * register. If we failed to take one field, try the
2622                  * rest 3 choices.
2623                  */
2624                 BUG_ON(__BITS_VALUE(reg1->idx, 1, 8) != 0xff);
2625                 idx[0] -= EXTRA_REG_NHMEX_M_ZDP_CTL_FVC;
2626                 idx[0] = (idx[0] + 1) % 4;
2627                 idx[0] += EXTRA_REG_NHMEX_M_ZDP_CTL_FVC;
2628                 if (idx[0] != __BITS_VALUE(reg1->idx, 0, 8)) {
2629                         config1 = nhmex_mbox_alter_er(event, idx[0], false);
2630                         goto again;
2631                 }
2632         }
2633
2634         if (alloc & 0x1)
2635                 nhmex_mbox_put_shared_reg(box, idx[0]);
2636         if (alloc & 0x2)
2637                 nhmex_mbox_put_shared_reg(box, idx[1]);
2638         return &constraint_empty;
2639 }
2640
2641 static void nhmex_mbox_put_constraint(struct intel_uncore_box *box, struct perf_event *event)
2642 {
2643         struct hw_perf_event_extra *reg1 = &event->hw.extra_reg;
2644         struct hw_perf_event_extra *reg2 = &event->hw.branch_reg;
2645
2646         if (uncore_box_is_fake(box))
2647                 return;
2648
2649         if (reg1->alloc & 0x1)
2650                 nhmex_mbox_put_shared_reg(box, __BITS_VALUE(reg1->idx, 0, 8));
2651         if (reg1->alloc & 0x2)
2652                 nhmex_mbox_put_shared_reg(box, __BITS_VALUE(reg1->idx, 1, 8));
2653         reg1->alloc = 0;
2654
2655         if (reg2->alloc) {
2656                 nhmex_mbox_put_shared_reg(box, reg2->idx);
2657                 reg2->alloc = 0;
2658         }
2659 }
2660
2661 static int nhmex_mbox_extra_reg_idx(struct extra_reg *er)
2662 {
2663         if (er->idx < EXTRA_REG_NHMEX_M_ZDP_CTL_FVC)
2664                 return er->idx;
2665         return er->idx + (er->event >> NHMEX_M_PMON_CTL_INC_SEL_SHIFT) - 0xd;
2666 }
2667
2668 static int nhmex_mbox_hw_config(struct intel_uncore_box *box, struct perf_event *event)
2669 {
2670         struct intel_uncore_type *type = box->pmu->type;
2671         struct hw_perf_event_extra *reg1 = &event->hw.extra_reg;
2672         struct hw_perf_event_extra *reg2 = &event->hw.branch_reg;
2673         struct extra_reg *er;
2674         unsigned msr;
2675         int reg_idx = 0;
2676         /*
2677          * The mbox events may require 2 extra MSRs at the most. But only
2678          * the lower 32 bits in these MSRs are significant, so we can use
2679          * config1 to pass two MSRs' config.
2680          */
2681         for (er = nhmex_uncore_mbox_extra_regs; er->msr; er++) {
2682                 if (er->event != (event->hw.config & er->config_mask))
2683                         continue;
2684                 if (event->attr.config1 & ~er->valid_mask)
2685                         return -EINVAL;
2686
2687                 msr = er->msr + type->msr_offset * box->pmu->pmu_idx;
2688                 if (WARN_ON_ONCE(msr >= 0xffff || er->idx >= 0xff))
2689                         return -EINVAL;
2690
2691                 /* always use the 32~63 bits to pass the PLD config */
2692                 if (er->idx == EXTRA_REG_NHMEX_M_PLD)
2693                         reg_idx = 1;
2694                 else if (WARN_ON_ONCE(reg_idx > 0))
2695                         return -EINVAL;
2696
2697                 reg1->idx &= ~(0xff << (reg_idx * 8));
2698                 reg1->reg &= ~(0xffff << (reg_idx * 16));
2699                 reg1->idx |= nhmex_mbox_extra_reg_idx(er) << (reg_idx * 8);
2700                 reg1->reg |= msr << (reg_idx * 16);
2701                 reg1->config = event->attr.config1;
2702                 reg_idx++;
2703         }
2704         /*
2705          * The mbox only provides ability to perform address matching
2706          * for the PLD events.
2707          */
2708         if (reg_idx == 2) {
2709                 reg2->idx = EXTRA_REG_NHMEX_M_FILTER;
2710                 if (event->attr.config2 & NHMEX_M_PMON_MM_CFG_EN)
2711                         reg2->config = event->attr.config2;
2712                 else
2713                         reg2->config = ~0ULL;
2714                 if (box->pmu->pmu_idx == 0)
2715                         reg2->reg = NHMEX_M0_MSR_PMU_MM_CFG;
2716                 else
2717                         reg2->reg = NHMEX_M1_MSR_PMU_MM_CFG;
2718         }
2719         return 0;
2720 }
2721
2722 static u64 nhmex_mbox_shared_reg_config(struct intel_uncore_box *box, int idx)
2723 {
2724         struct intel_uncore_extra_reg *er;
2725         unsigned long flags;
2726         u64 config;
2727
2728         if (idx < EXTRA_REG_NHMEX_M_ZDP_CTL_FVC)
2729                 return box->shared_regs[idx].config;
2730
2731         er = &box->shared_regs[EXTRA_REG_NHMEX_M_ZDP_CTL_FVC];
2732         raw_spin_lock_irqsave(&er->lock, flags);
2733         config = er->config;
2734         raw_spin_unlock_irqrestore(&er->lock, flags);
2735         return config;
2736 }
2737
2738 static void nhmex_mbox_msr_enable_event(struct intel_uncore_box *box, struct perf_event *event)
2739 {
2740         struct hw_perf_event *hwc = &event->hw;
2741         struct hw_perf_event_extra *reg1 = &hwc->extra_reg;
2742         struct hw_perf_event_extra *reg2 = &hwc->branch_reg;
2743         int idx;
2744
2745         idx = __BITS_VALUE(reg1->idx, 0, 8);
2746         if (idx != 0xff)
2747                 wrmsrl(__BITS_VALUE(reg1->reg, 0, 16),
2748                         nhmex_mbox_shared_reg_config(box, idx));
2749         idx = __BITS_VALUE(reg1->idx, 1, 8);
2750         if (idx != 0xff)
2751                 wrmsrl(__BITS_VALUE(reg1->reg, 1, 16),
2752                         nhmex_mbox_shared_reg_config(box, idx));
2753
2754         if (reg2->idx != EXTRA_REG_NONE) {
2755                 wrmsrl(reg2->reg, 0);
2756                 if (reg2->config != ~0ULL) {
2757                         wrmsrl(reg2->reg + 1,
2758                                 reg2->config & NHMEX_M_PMON_ADDR_MATCH_MASK);
2759                         wrmsrl(reg2->reg + 2, NHMEX_M_PMON_ADDR_MASK_MASK &
2760                                 (reg2->config >> NHMEX_M_PMON_ADDR_MASK_SHIFT));
2761                         wrmsrl(reg2->reg, NHMEX_M_PMON_MM_CFG_EN);
2762                 }
2763         }
2764
2765         wrmsrl(hwc->config_base, hwc->config | NHMEX_PMON_CTL_EN_BIT0);
2766 }
2767
2768 DEFINE_UNCORE_FORMAT_ATTR(count_mode,           count_mode,     "config:2-3");
2769 DEFINE_UNCORE_FORMAT_ATTR(storage_mode,         storage_mode,   "config:4-5");
2770 DEFINE_UNCORE_FORMAT_ATTR(wrap_mode,            wrap_mode,      "config:6");
2771 DEFINE_UNCORE_FORMAT_ATTR(flag_mode,            flag_mode,      "config:7");
2772 DEFINE_UNCORE_FORMAT_ATTR(inc_sel,              inc_sel,        "config:9-13");
2773 DEFINE_UNCORE_FORMAT_ATTR(set_flag_sel,         set_flag_sel,   "config:19-21");
2774 DEFINE_UNCORE_FORMAT_ATTR(filter_cfg_en,        filter_cfg_en,  "config2:63");
2775 DEFINE_UNCORE_FORMAT_ATTR(filter_match,         filter_match,   "config2:0-33");
2776 DEFINE_UNCORE_FORMAT_ATTR(filter_mask,          filter_mask,    "config2:34-61");
2777 DEFINE_UNCORE_FORMAT_ATTR(dsp,                  dsp,            "config1:0-31");
2778 DEFINE_UNCORE_FORMAT_ATTR(thr,                  thr,            "config1:0-31");
2779 DEFINE_UNCORE_FORMAT_ATTR(fvc,                  fvc,            "config1:0-31");
2780 DEFINE_UNCORE_FORMAT_ATTR(pgt,                  pgt,            "config1:0-31");
2781 DEFINE_UNCORE_FORMAT_ATTR(map,                  map,            "config1:0-31");
2782 DEFINE_UNCORE_FORMAT_ATTR(iss,                  iss,            "config1:0-31");
2783 DEFINE_UNCORE_FORMAT_ATTR(pld,                  pld,            "config1:32-63");
2784
2785 static struct attribute *nhmex_uncore_mbox_formats_attr[] = {
2786         &format_attr_count_mode.attr,
2787         &format_attr_storage_mode.attr,
2788         &format_attr_wrap_mode.attr,
2789         &format_attr_flag_mode.attr,
2790         &format_attr_inc_sel.attr,
2791         &format_attr_set_flag_sel.attr,
2792         &format_attr_filter_cfg_en.attr,
2793         &format_attr_filter_match.attr,
2794         &format_attr_filter_mask.attr,
2795         &format_attr_dsp.attr,
2796         &format_attr_thr.attr,
2797         &format_attr_fvc.attr,
2798         &format_attr_pgt.attr,
2799         &format_attr_map.attr,
2800         &format_attr_iss.attr,
2801         &format_attr_pld.attr,
2802         NULL,
2803 };
2804
2805 static struct attribute_group nhmex_uncore_mbox_format_group = {
2806         .name           = "format",
2807         .attrs          = nhmex_uncore_mbox_formats_attr,
2808 };
2809
2810 static struct uncore_event_desc nhmex_uncore_mbox_events[] = {
2811         INTEL_UNCORE_EVENT_DESC(bbox_cmds_read, "inc_sel=0xd,fvc=0x2800"),
2812         INTEL_UNCORE_EVENT_DESC(bbox_cmds_write, "inc_sel=0xd,fvc=0x2820"),
2813         { /* end: all zeroes */ },
2814 };
2815
2816 static struct uncore_event_desc wsmex_uncore_mbox_events[] = {
2817         INTEL_UNCORE_EVENT_DESC(bbox_cmds_read, "inc_sel=0xd,fvc=0x5000"),
2818         INTEL_UNCORE_EVENT_DESC(bbox_cmds_write, "inc_sel=0xd,fvc=0x5040"),
2819         { /* end: all zeroes */ },
2820 };
2821
2822 static struct intel_uncore_ops nhmex_uncore_mbox_ops = {
2823         NHMEX_UNCORE_OPS_COMMON_INIT(),
2824         .enable_event   = nhmex_mbox_msr_enable_event,
2825         .hw_config      = nhmex_mbox_hw_config,
2826         .get_constraint = nhmex_mbox_get_constraint,
2827         .put_constraint = nhmex_mbox_put_constraint,
2828 };
2829
2830 static struct intel_uncore_type nhmex_uncore_mbox = {
2831         .name                   = "mbox",
2832         .num_counters           = 6,
2833         .num_boxes              = 2,
2834         .perf_ctr_bits          = 48,
2835         .event_ctl              = NHMEX_M0_MSR_PMU_CTL0,
2836         .perf_ctr               = NHMEX_M0_MSR_PMU_CNT0,
2837         .event_mask             = NHMEX_M_PMON_RAW_EVENT_MASK,
2838         .box_ctl                = NHMEX_M0_MSR_GLOBAL_CTL,
2839         .msr_offset             = NHMEX_M_MSR_OFFSET,
2840         .pair_ctr_ctl           = 1,
2841         .num_shared_regs        = 8,
2842         .event_descs            = nhmex_uncore_mbox_events,
2843         .ops                    = &nhmex_uncore_mbox_ops,
2844         .format_group           = &nhmex_uncore_mbox_format_group,
2845 };
2846
2847 static void nhmex_rbox_alter_er(struct intel_uncore_box *box, struct perf_event *event)
2848 {
2849         struct hw_perf_event *hwc = &event->hw;
2850         struct hw_perf_event_extra *reg1 = &hwc->extra_reg;
2851
2852         /* adjust the main event selector and extra register index */
2853         if (reg1->idx % 2) {
2854                 reg1->idx--;
2855                 hwc->config -= 1 << NHMEX_R_PMON_CTL_EV_SEL_SHIFT;
2856         } else {
2857                 reg1->idx++;
2858                 hwc->config += 1 << NHMEX_R_PMON_CTL_EV_SEL_SHIFT;
2859         }
2860
2861         /* adjust extra register config */
2862         switch (reg1->idx % 6) {
2863         case 2:
2864                 /* shift the 8~15 bits to the 0~7 bits */
2865                 reg1->config >>= 8;
2866                 break;
2867         case 3:
2868                 /* shift the 0~7 bits to the 8~15 bits */
2869                 reg1->config <<= 8;
2870                 break;
2871         };
2872 }
2873
2874 /*
2875  * Each rbox has 4 event set which monitor PQI port 0~3 or 4~7.
2876  * An event set consists of 6 events, the 3rd and 4th events in
2877  * an event set use the same extra register. So an event set uses
2878  * 5 extra registers.
2879  */
2880 static struct event_constraint *
2881 nhmex_rbox_get_constraint(struct intel_uncore_box *box, struct perf_event *event)
2882 {
2883         struct hw_perf_event *hwc = &event->hw;
2884         struct hw_perf_event_extra *reg1 = &hwc->extra_reg;
2885         struct hw_perf_event_extra *reg2 = &hwc->branch_reg;
2886         struct intel_uncore_extra_reg *er;
2887         unsigned long flags;
2888         int idx, er_idx;
2889         u64 config1;
2890         bool ok = false;
2891
2892         if (!uncore_box_is_fake(box) && reg1->alloc)
2893                 return NULL;
2894
2895         idx = reg1->idx % 6;
2896         config1 = reg1->config;
2897 again:
2898         er_idx = idx;
2899         /* the 3rd and 4th events use the same extra register */
2900         if (er_idx > 2)
2901                 er_idx--;
2902         er_idx += (reg1->idx / 6) * 5;
2903
2904         er = &box->shared_regs[er_idx];
2905         raw_spin_lock_irqsave(&er->lock, flags);
2906         if (idx < 2) {
2907                 if (!atomic_read(&er->ref) || er->config == reg1->config) {
2908                         atomic_inc(&er->ref);
2909                         er->config = reg1->config;
2910                         ok = true;
2911                 }
2912         } else if (idx == 2 || idx == 3) {
2913                 /*
2914                  * these two events use different fields in a extra register,
2915                  * the 0~7 bits and the 8~15 bits respectively.
2916                  */
2917                 u64 mask = 0xff << ((idx - 2) * 8);
2918                 if (!__BITS_VALUE(atomic_read(&er->ref), idx - 2, 8) ||
2919                                 !((er->config ^ config1) & mask)) {
2920                         atomic_add(1 << ((idx - 2) * 8), &er->ref);
2921                         er->config &= ~mask;
2922                         er->config |= config1 & mask;
2923                         ok = true;
2924                 }
2925         } else {
2926                 if (!atomic_read(&er->ref) ||
2927                                 (er->config == (hwc->config >> 32) &&
2928                                  er->config1 == reg1->config &&
2929                                  er->config2 == reg2->config)) {
2930                         atomic_inc(&er->ref);
2931                         er->config = (hwc->config >> 32);
2932                         er->config1 = reg1->config;
2933                         er->config2 = reg2->config;
2934                         ok = true;
2935                 }
2936         }
2937         raw_spin_unlock_irqrestore(&er->lock, flags);
2938
2939         if (!ok) {
2940                 /*
2941                  * The Rbox events are always in pairs. The paired
2942                  * events are functional identical, but use different
2943                  * extra registers. If we failed to take an extra
2944                  * register, try the alternative.
2945                  */
2946                 if (idx % 2)
2947                         idx--;
2948                 else
2949                         idx++;
2950                 if (idx != reg1->idx % 6) {
2951                         if (idx == 2)
2952                                 config1 >>= 8;
2953                         else if (idx == 3)
2954                                 config1 <<= 8;
2955                         goto again;
2956                 }
2957         } else {
2958                 if (!uncore_box_is_fake(box)) {
2959                         if (idx != reg1->idx % 6)
2960                                 nhmex_rbox_alter_er(box, event);
2961                         reg1->alloc = 1;
2962                 }
2963                 return NULL;
2964         }
2965         return &constraint_empty;
2966 }
2967
2968 static void nhmex_rbox_put_constraint(struct intel_uncore_box *box, struct perf_event *event)
2969 {
2970         struct intel_uncore_extra_reg *er;
2971         struct hw_perf_event_extra *reg1 = &event->hw.extra_reg;
2972         int idx, er_idx;
2973
2974         if (uncore_box_is_fake(box) || !reg1->alloc)
2975                 return;
2976
2977         idx = reg1->idx % 6;
2978         er_idx = idx;
2979         if (er_idx > 2)
2980                 er_idx--;
2981         er_idx += (reg1->idx / 6) * 5;
2982
2983         er = &box->shared_regs[er_idx];
2984         if (idx == 2 || idx == 3)
2985                 atomic_sub(1 << ((idx - 2) * 8), &er->ref);
2986         else
2987                 atomic_dec(&er->ref);
2988
2989         reg1->alloc = 0;
2990 }
2991
2992 static int nhmex_rbox_hw_config(struct intel_uncore_box *box, struct perf_event *event)
2993 {
2994         struct hw_perf_event *hwc = &event->hw;
2995         struct hw_perf_event_extra *reg1 = &event->hw.extra_reg;
2996         struct hw_perf_event_extra *reg2 = &event->hw.branch_reg;
2997         int idx;
2998
2999         idx = (event->hw.config & NHMEX_R_PMON_CTL_EV_SEL_MASK) >>
3000                 NHMEX_R_PMON_CTL_EV_SEL_SHIFT;
3001         if (idx >= 0x18)
3002                 return -EINVAL;
3003
3004         reg1->idx = idx;
3005         reg1->config = event->attr.config1;
3006
3007         switch (idx % 6) {
3008         case 4:
3009         case 5:
3010                 hwc->config |= event->attr.config & (~0ULL << 32);
3011                 reg2->config = event->attr.config2;
3012                 break;
3013         };
3014         return 0;
3015 }
3016
3017 static void nhmex_rbox_msr_enable_event(struct intel_uncore_box *box, struct perf_event *event)
3018 {
3019         struct hw_perf_event *hwc = &event->hw;
3020         struct hw_perf_event_extra *reg1 = &hwc->extra_reg;
3021         struct hw_perf_event_extra *reg2 = &hwc->branch_reg;
3022         int idx, port;
3023
3024         idx = reg1->idx;
3025         port = idx / 6 + box->pmu->pmu_idx * 4;
3026
3027         switch (idx % 6) {
3028         case 0:
3029                 wrmsrl(NHMEX_R_MSR_PORTN_IPERF_CFG0(port), reg1->config);
3030                 break;
3031         case 1:
3032                 wrmsrl(NHMEX_R_MSR_PORTN_IPERF_CFG1(port), reg1->config);
3033                 break;
3034         case 2:
3035         case 3:
3036                 wrmsrl(NHMEX_R_MSR_PORTN_QLX_CFG(port),
3037                         uncore_shared_reg_config(box, 2 + (idx / 6) * 5));
3038                 break;
3039         case 4:
3040                 wrmsrl(NHMEX_R_MSR_PORTN_XBR_SET1_MM_CFG(port),
3041                         hwc->config >> 32);
3042                 wrmsrl(NHMEX_R_MSR_PORTN_XBR_SET1_MATCH(port), reg1->config);
3043                 wrmsrl(NHMEX_R_MSR_PORTN_XBR_SET1_MASK(port), reg2->config);
3044                 break;
3045         case 5:
3046                 wrmsrl(NHMEX_R_MSR_PORTN_XBR_SET2_MM_CFG(port),
3047                         hwc->config >> 32);
3048                 wrmsrl(NHMEX_R_MSR_PORTN_XBR_SET2_MATCH(port), reg1->config);
3049                 wrmsrl(NHMEX_R_MSR_PORTN_XBR_SET2_MASK(port), reg2->config);
3050                 break;
3051         };
3052
3053         wrmsrl(hwc->config_base, NHMEX_PMON_CTL_EN_BIT0 |
3054                 (hwc->config & NHMEX_R_PMON_CTL_EV_SEL_MASK));
3055 }
3056
3057 DEFINE_UNCORE_FORMAT_ATTR(xbr_mm_cfg, xbr_mm_cfg, "config:32-63");
3058 DEFINE_UNCORE_FORMAT_ATTR(xbr_match, xbr_match, "config1:0-63");
3059 DEFINE_UNCORE_FORMAT_ATTR(xbr_mask, xbr_mask, "config2:0-63");
3060 DEFINE_UNCORE_FORMAT_ATTR(qlx_cfg, qlx_cfg, "config1:0-15");
3061 DEFINE_UNCORE_FORMAT_ATTR(iperf_cfg, iperf_cfg, "config1:0-31");
3062
3063 static struct attribute *nhmex_uncore_rbox_formats_attr[] = {
3064         &format_attr_event5.attr,
3065         &format_attr_xbr_mm_cfg.attr,
3066         &format_attr_xbr_match.attr,
3067         &format_attr_xbr_mask.attr,
3068         &format_attr_qlx_cfg.attr,
3069         &format_attr_iperf_cfg.attr,
3070         NULL,
3071 };
3072
3073 static struct attribute_group nhmex_uncore_rbox_format_group = {
3074         .name = "format",
3075         .attrs = nhmex_uncore_rbox_formats_attr,
3076 };
3077
3078 static struct uncore_event_desc nhmex_uncore_rbox_events[] = {
3079         INTEL_UNCORE_EVENT_DESC(qpi0_flit_send,         "event=0x0,iperf_cfg=0x80000000"),
3080         INTEL_UNCORE_EVENT_DESC(qpi1_filt_send,         "event=0x6,iperf_cfg=0x80000000"),
3081         INTEL_UNCORE_EVENT_DESC(qpi0_idle_filt,         "event=0x0,iperf_cfg=0x40000000"),
3082         INTEL_UNCORE_EVENT_DESC(qpi1_idle_filt,         "event=0x6,iperf_cfg=0x40000000"),
3083         INTEL_UNCORE_EVENT_DESC(qpi0_date_response,     "event=0x0,iperf_cfg=0xc4"),
3084         INTEL_UNCORE_EVENT_DESC(qpi1_date_response,     "event=0x6,iperf_cfg=0xc4"),
3085         { /* end: all zeroes */ },
3086 };
3087
3088 static struct intel_uncore_ops nhmex_uncore_rbox_ops = {
3089         NHMEX_UNCORE_OPS_COMMON_INIT(),
3090         .enable_event           = nhmex_rbox_msr_enable_event,
3091         .hw_config              = nhmex_rbox_hw_config,
3092         .get_constraint         = nhmex_rbox_get_constraint,
3093         .put_constraint         = nhmex_rbox_put_constraint,
3094 };
3095
3096 static struct intel_uncore_type nhmex_uncore_rbox = {
3097         .name                   = "rbox",
3098         .num_counters           = 8,
3099         .num_boxes              = 2,
3100         .perf_ctr_bits          = 48,
3101         .event_ctl              = NHMEX_R_MSR_PMON_CTL0,
3102         .perf_ctr               = NHMEX_R_MSR_PMON_CNT0,
3103         .event_mask             = NHMEX_R_PMON_RAW_EVENT_MASK,
3104         .box_ctl                = NHMEX_R_MSR_GLOBAL_CTL,
3105         .msr_offset             = NHMEX_R_MSR_OFFSET,
3106         .pair_ctr_ctl           = 1,
3107         .num_shared_regs        = 20,
3108         .event_descs            = nhmex_uncore_rbox_events,
3109         .ops                    = &nhmex_uncore_rbox_ops,
3110         .format_group           = &nhmex_uncore_rbox_format_group
3111 };
3112
3113 static struct intel_uncore_type *nhmex_msr_uncores[] = {
3114         &nhmex_uncore_ubox,
3115         &nhmex_uncore_cbox,
3116         &nhmex_uncore_bbox,
3117         &nhmex_uncore_sbox,
3118         &nhmex_uncore_mbox,
3119         &nhmex_uncore_rbox,
3120         &nhmex_uncore_wbox,
3121         NULL,
3122 };
3123 /* end of Nehalem-EX uncore support */
3124
3125 static void uncore_assign_hw_event(struct intel_uncore_box *box, struct perf_event *event, int idx)
3126 {
3127         struct hw_perf_event *hwc = &event->hw;
3128
3129         hwc->idx = idx;
3130         hwc->last_tag = ++box->tags[idx];
3131
3132         if (hwc->idx == UNCORE_PMC_IDX_FIXED) {
3133                 hwc->event_base = uncore_fixed_ctr(box);
3134                 hwc->config_base = uncore_fixed_ctl(box);
3135                 return;
3136         }
3137
3138         hwc->config_base = uncore_event_ctl(box, hwc->idx);
3139         hwc->event_base  = uncore_perf_ctr(box, hwc->idx);
3140 }
3141
3142 static void uncore_perf_event_update(struct intel_uncore_box *box, struct perf_event *event)
3143 {
3144         u64 prev_count, new_count, delta;
3145         int shift;
3146
3147         if (event->hw.idx >= UNCORE_PMC_IDX_FIXED)
3148                 shift = 64 - uncore_fixed_ctr_bits(box);
3149         else
3150                 shift = 64 - uncore_perf_ctr_bits(box);
3151
3152         /* the hrtimer might modify the previous event value */
3153 again:
3154         prev_count = local64_read(&event->hw.prev_count);
3155         new_count = uncore_read_counter(box, event);
3156         if (local64_xchg(&event->hw.prev_count, new_count) != prev_count)
3157                 goto again;
3158
3159         delta = (new_count << shift) - (prev_count << shift);
3160         delta >>= shift;
3161
3162         local64_add(delta, &event->count);
3163 }
3164
3165 /*
3166  * The overflow interrupt is unavailable for SandyBridge-EP, is broken
3167  * for SandyBridge. So we use hrtimer to periodically poll the counter
3168  * to avoid overflow.
3169  */
3170 static enum hrtimer_restart uncore_pmu_hrtimer(struct hrtimer *hrtimer)
3171 {
3172         struct intel_uncore_box *box;
3173         struct perf_event *event;
3174         unsigned long flags;
3175         int bit;
3176
3177         box = container_of(hrtimer, struct intel_uncore_box, hrtimer);
3178         if (!box->n_active || box->cpu != smp_processor_id())
3179                 return HRTIMER_NORESTART;
3180         /*
3181          * disable local interrupt to prevent uncore_pmu_event_start/stop
3182          * to interrupt the update process
3183          */
3184         local_irq_save(flags);
3185
3186         /*
3187          * handle boxes with an active event list as opposed to active
3188          * counters
3189          */
3190         list_for_each_entry(event, &box->active_list, active_entry) {
3191                 uncore_perf_event_update(box, event);
3192         }
3193
3194         for_each_set_bit(bit, box->active_mask, UNCORE_PMC_IDX_MAX)
3195                 uncore_perf_event_update(box, box->events[bit]);
3196
3197         local_irq_restore(flags);
3198
3199         hrtimer_forward_now(hrtimer, ns_to_ktime(box->hrtimer_duration));
3200         return HRTIMER_RESTART;
3201 }
3202
3203 static void uncore_pmu_start_hrtimer(struct intel_uncore_box *box)
3204 {
3205         __hrtimer_start_range_ns(&box->hrtimer,
3206                         ns_to_ktime(box->hrtimer_duration), 0,
3207                         HRTIMER_MODE_REL_PINNED, 0);
3208 }
3209
3210 static void uncore_pmu_cancel_hrtimer(struct intel_uncore_box *box)
3211 {
3212         hrtimer_cancel(&box->hrtimer);
3213 }
3214
3215 static void uncore_pmu_init_hrtimer(struct intel_uncore_box *box)
3216 {
3217         hrtimer_init(&box->hrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
3218         box->hrtimer.function = uncore_pmu_hrtimer;
3219 }
3220
3221 static struct intel_uncore_box *uncore_alloc_box(struct intel_uncore_type *type, int node)
3222 {
3223         struct intel_uncore_box *box;
3224         int i, size;
3225
3226         size = sizeof(*box) + type->num_shared_regs * sizeof(struct intel_uncore_extra_reg);
3227
3228         box = kzalloc_node(size, GFP_KERNEL, node);
3229         if (!box)
3230                 return NULL;
3231
3232         for (i = 0; i < type->num_shared_regs; i++)
3233                 raw_spin_lock_init(&box->shared_regs[i].lock);
3234
3235         uncore_pmu_init_hrtimer(box);
3236         atomic_set(&box->refcnt, 1);
3237         box->cpu = -1;
3238         box->phys_id = -1;
3239
3240         /* set default hrtimer timeout */
3241         box->hrtimer_duration = UNCORE_PMU_HRTIMER_INTERVAL;
3242
3243         INIT_LIST_HEAD(&box->active_list);
3244
3245         return box;
3246 }
3247
3248 static int
3249 uncore_collect_events(struct intel_uncore_box *box, struct perf_event *leader, bool dogrp)
3250 {
3251         struct perf_event *event;
3252         int n, max_count;
3253
3254         max_count = box->pmu->type->num_counters;
3255         if (box->pmu->type->fixed_ctl)
3256                 max_count++;
3257
3258         if (box->n_events >= max_count)
3259                 return -EINVAL;
3260
3261         n = box->n_events;
3262         box->event_list[n] = leader;
3263         n++;
3264         if (!dogrp)
3265                 return n;
3266
3267         list_for_each_entry(event, &leader->sibling_list, group_entry) {
3268                 if (event->state <= PERF_EVENT_STATE_OFF)
3269                         continue;
3270
3271                 if (n >= max_count)
3272                         return -EINVAL;
3273
3274                 box->event_list[n] = event;
3275                 n++;
3276         }
3277         return n;
3278 }
3279
3280 static struct event_constraint *
3281 uncore_get_event_constraint(struct intel_uncore_box *box, struct perf_event *event)
3282 {
3283         struct intel_uncore_type *type = box->pmu->type;
3284         struct event_constraint *c;
3285
3286         if (type->ops->get_constraint) {
3287                 c = type->ops->get_constraint(box, event);
3288                 if (c)
3289                         return c;
3290         }
3291
3292         if (event->attr.config == UNCORE_FIXED_EVENT)
3293                 return &constraint_fixed;
3294
3295         if (type->constraints) {
3296                 for_each_event_constraint(c, type->constraints) {
3297                         if ((event->hw.config & c->cmask) == c->code)
3298                                 return c;
3299                 }
3300         }
3301
3302         return &type->unconstrainted;
3303 }
3304
3305 static void uncore_put_event_constraint(struct intel_uncore_box *box, struct perf_event *event)
3306 {
3307         if (box->pmu->type->ops->put_constraint)
3308                 box->pmu->type->ops->put_constraint(box, event);
3309 }
3310
3311 static int uncore_assign_events(struct intel_uncore_box *box, int assign[], int n)
3312 {
3313         unsigned long used_mask[BITS_TO_LONGS(UNCORE_PMC_IDX_MAX)];
3314         struct event_constraint *c;
3315         int i, wmin, wmax, ret = 0;
3316         struct hw_perf_event *hwc;
3317
3318         bitmap_zero(used_mask, UNCORE_PMC_IDX_MAX);
3319
3320         for (i = 0, wmin = UNCORE_PMC_IDX_MAX, wmax = 0; i < n; i++) {
3321                 hwc = &box->event_list[i]->hw;
3322                 c = uncore_get_event_constraint(box, box->event_list[i]);
3323                 hwc->constraint = c;
3324                 wmin = min(wmin, c->weight);
3325                 wmax = max(wmax, c->weight);
3326         }
3327
3328         /* fastpath, try to reuse previous register */
3329         for (i = 0; i < n; i++) {
3330                 hwc = &box->event_list[i]->hw;
3331                 c = hwc->constraint;
3332
3333                 /* never assigned */
3334                 if (hwc->idx == -1)
3335                         break;
3336
3337                 /* constraint still honored */
3338                 if (!test_bit(hwc->idx, c->idxmsk))
3339                         break;
3340
3341                 /* not already used */
3342                 if (test_bit(hwc->idx, used_mask))
3343                         break;
3344
3345                 __set_bit(hwc->idx, used_mask);
3346                 if (assign)
3347                         assign[i] = hwc->idx;
3348         }
3349         /* slow path */
3350         if (i != n)
3351                 ret = perf_assign_events(box->event_list, n,
3352                                          wmin, wmax, assign);
3353
3354         if (!assign || ret) {
3355                 for (i = 0; i < n; i++)
3356                         uncore_put_event_constraint(box, box->event_list[i]);
3357         }
3358         return ret ? -EINVAL : 0;
3359 }
3360
3361 static void uncore_pmu_event_start(struct perf_event *event, int flags)
3362 {
3363         struct intel_uncore_box *box = uncore_event_to_box(event);
3364         int idx = event->hw.idx;
3365
3366         if (WARN_ON_ONCE(!(event->hw.state & PERF_HES_STOPPED)))
3367                 return;
3368
3369         if (WARN_ON_ONCE(idx == -1 || idx >= UNCORE_PMC_IDX_MAX))
3370                 return;
3371
3372         event->hw.state = 0;
3373         box->events[idx] = event;
3374         box->n_active++;
3375         __set_bit(idx, box->active_mask);
3376
3377         local64_set(&event->hw.prev_count, uncore_read_counter(box, event));
3378         uncore_enable_event(box, event);
3379
3380         if (box->n_active == 1) {
3381                 uncore_enable_box(box);
3382                 uncore_pmu_start_hrtimer(box);
3383         }
3384 }
3385
3386 static void uncore_pmu_event_stop(struct perf_event *event, int flags)
3387 {
3388         struct intel_uncore_box *box = uncore_event_to_box(event);
3389         struct hw_perf_event *hwc = &event->hw;
3390
3391         if (__test_and_clear_bit(hwc->idx, box->active_mask)) {
3392                 uncore_disable_event(box, event);
3393                 box->n_active--;
3394                 box->events[hwc->idx] = NULL;
3395                 WARN_ON_ONCE(hwc->state & PERF_HES_STOPPED);
3396                 hwc->state |= PERF_HES_STOPPED;
3397
3398                 if (box->n_active == 0) {
3399                         uncore_disable_box(box);
3400                         uncore_pmu_cancel_hrtimer(box);
3401                 }
3402         }
3403
3404         if ((flags & PERF_EF_UPDATE) && !(hwc->state & PERF_HES_UPTODATE)) {
3405                 /*
3406                  * Drain the remaining delta count out of a event
3407                  * that we are disabling:
3408                  */
3409                 uncore_perf_event_update(box, event);
3410                 hwc->state |= PERF_HES_UPTODATE;
3411         }
3412 }
3413
3414 static int uncore_pmu_event_add(struct perf_event *event, int flags)
3415 {
3416         struct intel_uncore_box *box = uncore_event_to_box(event);
3417         struct hw_perf_event *hwc = &event->hw;
3418         int assign[UNCORE_PMC_IDX_MAX];
3419         int i, n, ret;
3420
3421         if (!box)
3422                 return -ENODEV;
3423
3424         ret = n = uncore_collect_events(box, event, false);
3425         if (ret < 0)
3426                 return ret;
3427
3428         hwc->state = PERF_HES_UPTODATE | PERF_HES_STOPPED;
3429         if (!(flags & PERF_EF_START))
3430                 hwc->state |= PERF_HES_ARCH;
3431
3432         ret = uncore_assign_events(box, assign, n);
3433         if (ret)
3434                 return ret;
3435
3436         /* save events moving to new counters */
3437         for (i = 0; i < box->n_events; i++) {
3438                 event = box->event_list[i];
3439                 hwc = &event->hw;
3440
3441                 if (hwc->idx == assign[i] &&
3442                         hwc->last_tag == box->tags[assign[i]])
3443                         continue;
3444                 /*
3445                  * Ensure we don't accidentally enable a stopped
3446                  * counter simply because we rescheduled.
3447                  */
3448                 if (hwc->state & PERF_HES_STOPPED)
3449                         hwc->state |= PERF_HES_ARCH;
3450
3451                 uncore_pmu_event_stop(event, PERF_EF_UPDATE);
3452         }
3453
3454         /* reprogram moved events into new counters */
3455         for (i = 0; i < n; i++) {
3456                 event = box->event_list[i];
3457                 hwc = &event->hw;
3458
3459                 if (hwc->idx != assign[i] ||
3460                         hwc->last_tag != box->tags[assign[i]])
3461                         uncore_assign_hw_event(box, event, assign[i]);
3462                 else if (i < box->n_events)
3463                         continue;
3464
3465                 if (hwc->state & PERF_HES_ARCH)
3466                         continue;
3467
3468                 uncore_pmu_event_start(event, 0);
3469         }
3470         box->n_events = n;
3471
3472         return 0;
3473 }
3474
3475 static void uncore_pmu_event_del(struct perf_event *event, int flags)
3476 {
3477         struct intel_uncore_box *box = uncore_event_to_box(event);
3478         int i;
3479
3480         uncore_pmu_event_stop(event, PERF_EF_UPDATE);
3481
3482         for (i = 0; i < box->n_events; i++) {
3483                 if (event == box->event_list[i]) {
3484                         uncore_put_event_constraint(box, event);
3485
3486                         while (++i < box->n_events)
3487                                 box->event_list[i - 1] = box->event_list[i];
3488
3489                         --box->n_events;
3490                         break;
3491                 }
3492         }
3493
3494         event->hw.idx = -1;
3495         event->hw.last_tag = ~0ULL;
3496 }
3497
3498 static void uncore_pmu_event_read(struct perf_event *event)
3499 {
3500         struct intel_uncore_box *box = uncore_event_to_box(event);
3501         uncore_perf_event_update(box, event);
3502 }
3503
3504 /*
3505  * validation ensures the group can be loaded onto the
3506  * PMU if it was the only group available.
3507  */
3508 static int uncore_validate_group(struct intel_uncore_pmu *pmu,
3509                                 struct perf_event *event)
3510 {
3511         struct perf_event *leader = event->group_leader;
3512         struct intel_uncore_box *fake_box;
3513         int ret = -EINVAL, n;
3514
3515         fake_box = uncore_alloc_box(pmu->type, NUMA_NO_NODE);
3516         if (!fake_box)
3517                 return -ENOMEM;
3518
3519         fake_box->pmu = pmu;
3520         /*
3521          * the event is not yet connected with its
3522          * siblings therefore we must first collect
3523          * existing siblings, then add the new event
3524          * before we can simulate the scheduling
3525          */
3526         n = uncore_collect_events(fake_box, leader, true);
3527         if (n < 0)
3528                 goto out;
3529
3530         fake_box->n_events = n;
3531         n = uncore_collect_events(fake_box, event, false);
3532         if (n < 0)
3533                 goto out;
3534
3535         fake_box->n_events = n;
3536
3537         ret = uncore_assign_events(fake_box, NULL, n);
3538 out:
3539         kfree(fake_box);
3540         return ret;
3541 }
3542
3543 static int uncore_pmu_event_init(struct perf_event *event)
3544 {
3545         struct intel_uncore_pmu *pmu;
3546         struct intel_uncore_box *box;
3547         struct hw_perf_event *hwc = &event->hw;
3548         int ret;
3549
3550         if (event->attr.type != event->pmu->type)
3551                 return -ENOENT;
3552
3553         pmu = uncore_event_to_pmu(event);
3554         /* no device found for this pmu */
3555         if (pmu->func_id < 0)
3556                 return -ENOENT;
3557
3558         /*
3559          * Uncore PMU does measure at all privilege level all the time.
3560          * So it doesn't make sense to specify any exclude bits.
3561          */
3562         if (event->attr.exclude_user || event->attr.exclude_kernel ||
3563                         event->attr.exclude_hv || event->attr.exclude_idle)
3564                 return -EINVAL;
3565
3566         /* Sampling not supported yet */
3567         if (hwc->sample_period)
3568                 return -EINVAL;
3569
3570         /*
3571          * Place all uncore events for a particular physical package
3572          * onto a single cpu
3573          */
3574         if (event->cpu < 0)
3575                 return -EINVAL;
3576         box = uncore_pmu_to_box(pmu, event->cpu);
3577         if (!box || box->cpu < 0)
3578                 return -EINVAL;
3579         event->cpu = box->cpu;
3580
3581         event->hw.idx = -1;
3582         event->hw.last_tag = ~0ULL;
3583         event->hw.extra_reg.idx = EXTRA_REG_NONE;
3584         event->hw.branch_reg.idx = EXTRA_REG_NONE;
3585
3586         if (event->attr.config == UNCORE_FIXED_EVENT) {
3587                 /* no fixed counter */
3588                 if (!pmu->type->fixed_ctl)
3589                         return -EINVAL;
3590                 /*
3591                  * if there is only one fixed counter, only the first pmu
3592                  * can access the fixed counter
3593                  */
3594                 if (pmu->type->single_fixed && pmu->pmu_idx > 0)
3595                         return -EINVAL;
3596
3597                 /* fixed counters have event field hardcoded to zero */
3598                 hwc->config = 0ULL;
3599         } else {
3600                 hwc->config = event->attr.config & pmu->type->event_mask;
3601                 if (pmu->type->ops->hw_config) {
3602                         ret = pmu->type->ops->hw_config(box, event);
3603                         if (ret)
3604                                 return ret;
3605                 }
3606         }
3607
3608         if (event->group_leader != event)
3609                 ret = uncore_validate_group(pmu, event);
3610         else
3611                 ret = 0;
3612
3613         return ret;
3614 }
3615
3616 static ssize_t uncore_get_attr_cpumask(struct device *dev,
3617                                 struct device_attribute *attr, char *buf)
3618 {
3619         int n = cpulist_scnprintf(buf, PAGE_SIZE - 2, &uncore_cpu_mask);
3620
3621         buf[n++] = '\n';
3622         buf[n] = '\0';
3623         return n;
3624 }
3625
3626 static DEVICE_ATTR(cpumask, S_IRUGO, uncore_get_attr_cpumask, NULL);
3627
3628 static struct attribute *uncore_pmu_attrs[] = {
3629         &dev_attr_cpumask.attr,
3630         NULL,
3631 };
3632
3633 static struct attribute_group uncore_pmu_attr_group = {
3634         .attrs = uncore_pmu_attrs,
3635 };
3636
3637 static int __init uncore_pmu_register(struct intel_uncore_pmu *pmu)
3638 {
3639         int ret;
3640
3641         if (!pmu->type->pmu) {
3642                 pmu->pmu = (struct pmu) {
3643                         .attr_groups    = pmu->type->attr_groups,
3644                         .task_ctx_nr    = perf_invalid_context,
3645                         .event_init     = uncore_pmu_event_init,
3646                         .add            = uncore_pmu_event_add,
3647                         .del            = uncore_pmu_event_del,
3648                         .start          = uncore_pmu_event_start,
3649                         .stop           = uncore_pmu_event_stop,
3650                         .read           = uncore_pmu_event_read,
3651                 };
3652         } else {
3653                 pmu->pmu = *pmu->type->pmu;
3654                 pmu->pmu.attr_groups = pmu->type->attr_groups;
3655         }
3656
3657         if (pmu->type->num_boxes == 1) {
3658                 if (strlen(pmu->type->name) > 0)
3659                         sprintf(pmu->name, "uncore_%s", pmu->type->name);
3660                 else
3661                         sprintf(pmu->name, "uncore");
3662         } else {
3663                 sprintf(pmu->name, "uncore_%s_%d", pmu->type->name,
3664                         pmu->pmu_idx);
3665         }
3666
3667         ret = perf_pmu_register(&pmu->pmu, pmu->name, -1);
3668         return ret;
3669 }
3670
3671 static void __init uncore_type_exit(struct intel_uncore_type *type)
3672 {
3673         int i;
3674
3675         for (i = 0; i < type->num_boxes; i++)
3676                 free_percpu(type->pmus[i].box);
3677         kfree(type->pmus);
3678         type->pmus = NULL;
3679         kfree(type->events_group);
3680         type->events_group = NULL;
3681 }
3682
3683 static void __init uncore_types_exit(struct intel_uncore_type **types)
3684 {
3685         int i;
3686         for (i = 0; types[i]; i++)
3687                 uncore_type_exit(types[i]);
3688 }
3689
3690 static int __init uncore_type_init(struct intel_uncore_type *type)
3691 {
3692         struct intel_uncore_pmu *pmus;
3693         struct attribute_group *attr_group;
3694         struct attribute **attrs;
3695         int i, j;
3696
3697         pmus = kzalloc(sizeof(*pmus) * type->num_boxes, GFP_KERNEL);
3698         if (!pmus)
3699                 return -ENOMEM;
3700
3701         type->unconstrainted = (struct event_constraint)
3702                 __EVENT_CONSTRAINT(0, (1ULL << type->num_counters) - 1,
3703                                 0, type->num_counters, 0, 0);
3704
3705         for (i = 0; i < type->num_boxes; i++) {
3706                 pmus[i].func_id = -1;
3707                 pmus[i].pmu_idx = i;
3708                 pmus[i].type = type;
3709                 INIT_LIST_HEAD(&pmus[i].box_list);
3710                 pmus[i].box = alloc_percpu(struct intel_uncore_box *);
3711                 if (!pmus[i].box)
3712                         goto fail;
3713         }
3714
3715         if (type->event_descs) {
3716                 i = 0;
3717                 while (type->event_descs[i].attr.attr.name)
3718                         i++;
3719
3720                 attr_group = kzalloc(sizeof(struct attribute *) * (i + 1) +
3721                                         sizeof(*attr_group), GFP_KERNEL);
3722                 if (!attr_group)
3723                         goto fail;
3724
3725                 attrs = (struct attribute **)(attr_group + 1);
3726                 attr_group->name = "events";
3727                 attr_group->attrs = attrs;
3728
3729                 for (j = 0; j < i; j++)
3730                         attrs[j] = &type->event_descs[j].attr.attr;
3731
3732                 type->events_group = attr_group;
3733         }
3734
3735         type->pmu_group = &uncore_pmu_attr_group;
3736         type->pmus = pmus;
3737         return 0;
3738 fail:
3739         uncore_type_exit(type);
3740         return -ENOMEM;
3741 }
3742
3743 static int __init uncore_types_init(struct intel_uncore_type **types)
3744 {
3745         int i, ret;
3746
3747         for (i = 0; types[i]; i++) {
3748                 ret = uncore_type_init(types[i]);
3749                 if (ret)
3750                         goto fail;
3751         }
3752         return 0;
3753 fail:
3754         while (--i >= 0)
3755                 uncore_type_exit(types[i]);
3756         return ret;
3757 }
3758
3759 static struct pci_driver *uncore_pci_driver;
3760 static bool pcidrv_registered;
3761
3762 /*
3763  * add a pci uncore device
3764  */
3765 static int uncore_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
3766 {
3767         struct intel_uncore_pmu *pmu;
3768         struct intel_uncore_box *box;
3769         struct intel_uncore_type *type;
3770         int phys_id;
3771
3772         phys_id = pcibus_to_physid[pdev->bus->number];
3773         if (phys_id < 0)
3774                 return -ENODEV;
3775
3776         if (UNCORE_PCI_DEV_TYPE(id->driver_data) == UNCORE_EXTRA_PCI_DEV) {
3777                 extra_pci_dev[phys_id][UNCORE_PCI_DEV_IDX(id->driver_data)] = pdev;
3778                 pci_set_drvdata(pdev, NULL);
3779                 return 0;
3780         }
3781
3782         type = pci_uncores[UNCORE_PCI_DEV_TYPE(id->driver_data)];
3783         box = uncore_alloc_box(type, NUMA_NO_NODE);
3784         if (!box)
3785                 return -ENOMEM;
3786
3787         /*
3788          * for performance monitoring unit with multiple boxes,
3789          * each box has a different function id.
3790          */
3791         pmu = &type->pmus[UNCORE_PCI_DEV_IDX(id->driver_data)];
3792         if (pmu->func_id < 0)
3793                 pmu->func_id = pdev->devfn;
3794         else
3795                 WARN_ON_ONCE(pmu->func_id != pdev->devfn);
3796
3797         box->phys_id = phys_id;
3798         box->pci_dev = pdev;
3799         box->pmu = pmu;
3800         uncore_box_init(box);
3801         pci_set_drvdata(pdev, box);
3802
3803         raw_spin_lock(&uncore_box_lock);
3804         list_add_tail(&box->list, &pmu->box_list);
3805         raw_spin_unlock(&uncore_box_lock);
3806
3807         return 0;
3808 }
3809
3810 static void uncore_pci_remove(struct pci_dev *pdev)
3811 {
3812         struct intel_uncore_box *box = pci_get_drvdata(pdev);
3813         struct intel_uncore_pmu *pmu;
3814         int i, cpu, phys_id = pcibus_to_physid[pdev->bus->number];
3815
3816         box = pci_get_drvdata(pdev);
3817         if (!box) {
3818                 for (i = 0; i < UNCORE_EXTRA_PCI_DEV_MAX; i++) {
3819                         if (extra_pci_dev[phys_id][i] == pdev) {
3820                                 extra_pci_dev[phys_id][i] = NULL;
3821                                 break;
3822                         }
3823                 }
3824                 WARN_ON_ONCE(i >= UNCORE_EXTRA_PCI_DEV_MAX);
3825                 return;
3826         }
3827
3828         pmu = box->pmu;
3829         if (WARN_ON_ONCE(phys_id != box->phys_id))
3830                 return;
3831
3832         pci_set_drvdata(pdev, NULL);
3833
3834         raw_spin_lock(&uncore_box_lock);
3835         list_del(&box->list);
3836         raw_spin_unlock(&uncore_box_lock);
3837
3838         for_each_possible_cpu(cpu) {
3839                 if (*per_cpu_ptr(pmu->box, cpu) == box) {
3840                         *per_cpu_ptr(pmu->box, cpu) = NULL;
3841                         atomic_dec(&box->refcnt);
3842                 }
3843         }
3844
3845         WARN_ON_ONCE(atomic_read(&box->refcnt) != 1);
3846         kfree(box);
3847 }
3848
3849 static int __init uncore_pci_init(void)
3850 {
3851         int ret;
3852
3853         switch (boot_cpu_data.x86_model) {
3854         case 45: /* Sandy Bridge-EP */
3855                 ret = snbep_pci2phy_map_init(0x3ce0);
3856                 if (ret)
3857                         return ret;
3858                 pci_uncores = snbep_pci_uncores;
3859                 uncore_pci_driver = &snbep_uncore_pci_driver;
3860                 break;
3861         case 62: /* IvyTown */
3862                 ret = snbep_pci2phy_map_init(0x0e1e);
3863                 if (ret)
3864                         return ret;
3865                 pci_uncores = ivt_pci_uncores;
3866                 uncore_pci_driver = &ivt_uncore_pci_driver;
3867                 break;
3868         case 42: /* Sandy Bridge */
3869                 ret = snb_pci2phy_map_init(PCI_DEVICE_ID_INTEL_SNB_IMC);
3870                 if (ret)
3871                         return ret;
3872                 pci_uncores = snb_pci_uncores;
3873                 uncore_pci_driver = &snb_uncore_pci_driver;
3874                 break;
3875         case 58: /* Ivy Bridge */
3876                 ret = snb_pci2phy_map_init(PCI_DEVICE_ID_INTEL_IVB_IMC);
3877                 if (ret)
3878                         return ret;
3879                 pci_uncores = snb_pci_uncores;
3880                 uncore_pci_driver = &ivb_uncore_pci_driver;
3881                 break;
3882         case 60: /* Haswell */
3883         case 69: /* Haswell Celeron */
3884                 ret = snb_pci2phy_map_init(PCI_DEVICE_ID_INTEL_HSW_IMC);
3885                 if (ret)
3886                         return ret;
3887                 pci_uncores = snb_pci_uncores;
3888                 uncore_pci_driver = &hsw_uncore_pci_driver;
3889                 break;
3890         default:
3891                 return 0;
3892         }
3893
3894         ret = uncore_types_init(pci_uncores);
3895         if (ret)
3896                 return ret;
3897
3898         uncore_pci_driver->probe = uncore_pci_probe;
3899         uncore_pci_driver->remove = uncore_pci_remove;
3900
3901         ret = pci_register_driver(uncore_pci_driver);
3902         if (ret == 0)
3903                 pcidrv_registered = true;
3904         else
3905                 uncore_types_exit(pci_uncores);
3906
3907         return ret;
3908 }
3909
3910 static void __init uncore_pci_exit(void)
3911 {
3912         if (pcidrv_registered) {
3913                 pcidrv_registered = false;
3914                 pci_unregister_driver(uncore_pci_driver);
3915                 uncore_types_exit(pci_uncores);
3916         }
3917 }
3918
3919 /* CPU hot plug/unplug are serialized by cpu_add_remove_lock mutex */
3920 static LIST_HEAD(boxes_to_free);
3921
3922 static void uncore_kfree_boxes(void)
3923 {
3924         struct intel_uncore_box *box;
3925
3926         while (!list_empty(&boxes_to_free)) {
3927                 box = list_entry(boxes_to_free.next,
3928                                  struct intel_uncore_box, list);
3929                 list_del(&box->list);
3930                 kfree(box);
3931         }
3932 }
3933
3934 static void uncore_cpu_dying(int cpu)
3935 {
3936         struct intel_uncore_type *type;
3937         struct intel_uncore_pmu *pmu;
3938         struct intel_uncore_box *box;
3939         int i, j;
3940
3941         for (i = 0; msr_uncores[i]; i++) {
3942                 type = msr_uncores[i];
3943                 for (j = 0; j < type->num_boxes; j++) {
3944                         pmu = &type->pmus[j];
3945                         box = *per_cpu_ptr(pmu->box, cpu);
3946                         *per_cpu_ptr(pmu->box, cpu) = NULL;
3947                         if (box && atomic_dec_and_test(&box->refcnt))
3948                                 list_add(&box->list, &boxes_to_free);
3949                 }
3950         }
3951 }
3952
3953 static int uncore_cpu_starting(int cpu)
3954 {
3955         struct intel_uncore_type *type;
3956         struct intel_uncore_pmu *pmu;
3957         struct intel_uncore_box *box, *exist;
3958         int i, j, k, phys_id;
3959
3960         phys_id = topology_physical_package_id(cpu);
3961
3962         for (i = 0; msr_uncores[i]; i++) {
3963                 type = msr_uncores[i];
3964                 for (j = 0; j < type->num_boxes; j++) {
3965                         pmu = &type->pmus[j];
3966                         box = *per_cpu_ptr(pmu->box, cpu);
3967                         /* called by uncore_cpu_init? */
3968                         if (box && box->phys_id >= 0) {
3969                                 uncore_box_init(box);
3970                                 continue;
3971                         }
3972
3973                         for_each_online_cpu(k) {
3974                                 exist = *per_cpu_ptr(pmu->box, k);
3975                                 if (exist && exist->phys_id == phys_id) {
3976                                         atomic_inc(&exist->refcnt);
3977                                         *per_cpu_ptr(pmu->box, cpu) = exist;
3978                                         if (box) {
3979                                                 list_add(&box->list,
3980                                                          &boxes_to_free);
3981                                                 box = NULL;
3982                                         }
3983                                         break;
3984                                 }
3985                         }
3986
3987                         if (box) {
3988                                 box->phys_id = phys_id;
3989                                 uncore_box_init(box);
3990                         }
3991                 }
3992         }
3993         return 0;
3994 }
3995
3996 static int uncore_cpu_prepare(int cpu, int phys_id)
3997 {
3998         struct intel_uncore_type *type;
3999         struct intel_uncore_pmu *pmu;
4000         struct intel_uncore_box *box;
4001         int i, j;
4002
4003         for (i = 0; msr_uncores[i]; i++) {
4004                 type = msr_uncores[i];
4005                 for (j = 0; j < type->num_boxes; j++) {
4006                         pmu = &type->pmus[j];
4007                         if (pmu->func_id < 0)
4008                                 pmu->func_id = j;
4009
4010                         box = uncore_alloc_box(type, cpu_to_node(cpu));
4011                         if (!box)
4012                                 return -ENOMEM;
4013
4014                         box->pmu = pmu;
4015                         box->phys_id = phys_id;
4016                         *per_cpu_ptr(pmu->box, cpu) = box;
4017                 }
4018         }
4019         return 0;
4020 }
4021
4022 static void
4023 uncore_change_context(struct intel_uncore_type **uncores, int old_cpu, int new_cpu)
4024 {
4025         struct intel_uncore_type *type;
4026         struct intel_uncore_pmu *pmu;
4027         struct intel_uncore_box *box;
4028         int i, j;
4029
4030         for (i = 0; uncores[i]; i++) {
4031                 type = uncores[i];
4032                 for (j = 0; j < type->num_boxes; j++) {
4033                         pmu = &type->pmus[j];
4034                         if (old_cpu < 0)
4035                                 box = uncore_pmu_to_box(pmu, new_cpu);
4036                         else
4037                                 box = uncore_pmu_to_box(pmu, old_cpu);
4038                         if (!box)
4039                                 continue;
4040
4041                         if (old_cpu < 0) {
4042                                 WARN_ON_ONCE(box->cpu != -1);
4043                                 box->cpu = new_cpu;
4044                                 continue;
4045                         }
4046
4047                         WARN_ON_ONCE(box->cpu != old_cpu);
4048                         if (new_cpu >= 0) {
4049                                 uncore_pmu_cancel_hrtimer(box);
4050                                 perf_pmu_migrate_context(&pmu->pmu,
4051                                                 old_cpu, new_cpu);
4052                                 box->cpu = new_cpu;
4053                         } else {
4054                                 box->cpu = -1;
4055                         }
4056                 }
4057         }
4058 }
4059
4060 static void uncore_event_exit_cpu(int cpu)
4061 {
4062         int i, phys_id, target;
4063
4064         /* if exiting cpu is used for collecting uncore events */
4065         if (!cpumask_test_and_clear_cpu(cpu, &uncore_cpu_mask))
4066                 return;
4067
4068         /* find a new cpu to collect uncore events */
4069         phys_id = topology_physical_package_id(cpu);
4070         target = -1;
4071         for_each_online_cpu(i) {
4072                 if (i == cpu)
4073                         continue;
4074                 if (phys_id == topology_physical_package_id(i)) {
4075                         target = i;
4076                         break;
4077                 }
4078         }
4079
4080         /* migrate uncore events to the new cpu */
4081         if (target >= 0)
4082                 cpumask_set_cpu(target, &uncore_cpu_mask);
4083
4084         uncore_change_context(msr_uncores, cpu, target);
4085         uncore_change_context(pci_uncores, cpu, target);
4086 }
4087
4088 static void uncore_event_init_cpu(int cpu)
4089 {
4090         int i, phys_id;
4091
4092         phys_id = topology_physical_package_id(cpu);
4093         for_each_cpu(i, &uncore_cpu_mask) {
4094                 if (phys_id == topology_physical_package_id(i))
4095                         return;
4096         }
4097
4098         cpumask_set_cpu(cpu, &uncore_cpu_mask);
4099
4100         uncore_change_context(msr_uncores, -1, cpu);
4101         uncore_change_context(pci_uncores, -1, cpu);
4102 }
4103
4104 static int uncore_cpu_notifier(struct notifier_block *self,
4105                                unsigned long action, void *hcpu)
4106 {
4107         unsigned int cpu = (long)hcpu;
4108
4109         /* allocate/free data structure for uncore box */
4110         switch (action & ~CPU_TASKS_FROZEN) {
4111         case CPU_UP_PREPARE:
4112                 uncore_cpu_prepare(cpu, -1);
4113                 break;
4114         case CPU_STARTING:
4115                 uncore_cpu_starting(cpu);
4116                 break;
4117         case CPU_UP_CANCELED:
4118         case CPU_DYING:
4119                 uncore_cpu_dying(cpu);
4120                 break;
4121         case CPU_ONLINE:
4122         case CPU_DEAD:
4123                 uncore_kfree_boxes();
4124                 break;
4125         default:
4126                 break;
4127         }
4128
4129         /* select the cpu that collects uncore events */
4130         switch (action & ~CPU_TASKS_FROZEN) {
4131         case CPU_DOWN_FAILED:
4132         case CPU_STARTING:
4133                 uncore_event_init_cpu(cpu);
4134                 break;
4135         case CPU_DOWN_PREPARE:
4136                 uncore_event_exit_cpu(cpu);
4137                 break;
4138         default:
4139                 break;
4140         }
4141
4142         return NOTIFY_OK;
4143 }
4144
4145 static struct notifier_block uncore_cpu_nb = {
4146         .notifier_call  = uncore_cpu_notifier,
4147         /*
4148          * to migrate uncore events, our notifier should be executed
4149          * before perf core's notifier.
4150          */
4151         .priority       = CPU_PRI_PERF + 1,
4152 };
4153
4154 static void __init uncore_cpu_setup(void *dummy)
4155 {
4156         uncore_cpu_starting(smp_processor_id());
4157 }
4158
4159 static int __init uncore_cpu_init(void)
4160 {
4161         int ret, max_cores;
4162
4163         max_cores = boot_cpu_data.x86_max_cores;
4164         switch (boot_cpu_data.x86_model) {
4165         case 26: /* Nehalem */
4166         case 30:
4167         case 37: /* Westmere */
4168         case 44:
4169                 msr_uncores = nhm_msr_uncores;
4170                 break;
4171         case 42: /* Sandy Bridge */
4172         case 58: /* Ivy Bridge */
4173                 if (snb_uncore_cbox.num_boxes > max_cores)
4174                         snb_uncore_cbox.num_boxes = max_cores;
4175                 msr_uncores = snb_msr_uncores;
4176                 break;
4177         case 45: /* Sandy Bridge-EP */
4178                 if (snbep_uncore_cbox.num_boxes > max_cores)
4179                         snbep_uncore_cbox.num_boxes = max_cores;
4180                 msr_uncores = snbep_msr_uncores;
4181                 break;
4182         case 46: /* Nehalem-EX */
4183                 uncore_nhmex = true;
4184         case 47: /* Westmere-EX aka. Xeon E7 */
4185                 if (!uncore_nhmex)
4186                         nhmex_uncore_mbox.event_descs = wsmex_uncore_mbox_events;
4187                 if (nhmex_uncore_cbox.num_boxes > max_cores)
4188                         nhmex_uncore_cbox.num_boxes = max_cores;
4189                 msr_uncores = nhmex_msr_uncores;
4190                 break;
4191         case 62: /* IvyTown */
4192                 if (ivt_uncore_cbox.num_boxes > max_cores)
4193                         ivt_uncore_cbox.num_boxes = max_cores;
4194                 msr_uncores = ivt_msr_uncores;
4195                 break;
4196
4197         default:
4198                 return 0;
4199         }
4200
4201         ret = uncore_types_init(msr_uncores);
4202         if (ret)
4203                 return ret;
4204
4205         return 0;
4206 }
4207
4208 static int __init uncore_pmus_register(void)
4209 {
4210         struct intel_uncore_pmu *pmu;
4211         struct intel_uncore_type *type;
4212         int i, j;
4213
4214         for (i = 0; msr_uncores[i]; i++) {
4215                 type = msr_uncores[i];
4216                 for (j = 0; j < type->num_boxes; j++) {
4217                         pmu = &type->pmus[j];
4218                         uncore_pmu_register(pmu);
4219                 }
4220         }
4221
4222         for (i = 0; pci_uncores[i]; i++) {
4223                 type = pci_uncores[i];
4224                 for (j = 0; j < type->num_boxes; j++) {
4225                         pmu = &type->pmus[j];
4226                         uncore_pmu_register(pmu);
4227                 }
4228         }
4229
4230         return 0;
4231 }
4232
4233 static void __init uncore_cpumask_init(void)
4234 {
4235         int cpu;
4236
4237         /*
4238          * ony invoke once from msr or pci init code
4239          */
4240         if (!cpumask_empty(&uncore_cpu_mask))
4241                 return;
4242
4243         get_online_cpus();
4244
4245         for_each_online_cpu(cpu) {
4246                 int i, phys_id = topology_physical_package_id(cpu);
4247
4248                 for_each_cpu(i, &uncore_cpu_mask) {
4249                         if (phys_id == topology_physical_package_id(i)) {
4250                                 phys_id = -1;
4251                                 break;
4252                         }
4253                 }
4254                 if (phys_id < 0)
4255                         continue;
4256
4257                 uncore_cpu_prepare(cpu, phys_id);
4258                 uncore_event_init_cpu(cpu);
4259         }
4260         on_each_cpu(uncore_cpu_setup, NULL, 1);
4261
4262         register_cpu_notifier(&uncore_cpu_nb);
4263
4264         put_online_cpus();
4265 }
4266
4267
4268 static int __init intel_uncore_init(void)
4269 {
4270         int ret;
4271
4272         if (boot_cpu_data.x86_vendor != X86_VENDOR_INTEL)
4273                 return -ENODEV;
4274
4275         if (cpu_has_hypervisor)
4276                 return -ENODEV;
4277
4278         ret = uncore_pci_init();
4279         if (ret)
4280                 goto fail;
4281         ret = uncore_cpu_init();
4282         if (ret) {
4283                 uncore_pci_exit();
4284                 goto fail;
4285         }
4286         uncore_cpumask_init();
4287
4288         uncore_pmus_register();
4289         return 0;
4290 fail:
4291         return ret;
4292 }
4293 device_initcall(intel_uncore_init);