]> git.karo-electronics.de Git - karo-tx-linux.git/blob - arch/x86/kernel/cpu/perf_event_intel_uncore.c
ARM: fix build errors caused by selection of errata 798181
[karo-tx-linux.git] / arch / x86 / kernel / cpu / perf_event_intel_uncore.c
1 #include "perf_event_intel_uncore.h"
2
3 static struct intel_uncore_type *empty_uncore[] = { NULL, };
4 static struct intel_uncore_type **msr_uncores = empty_uncore;
5 static struct intel_uncore_type **pci_uncores = empty_uncore;
6 /* pci bus to socket mapping */
7 static int pcibus_to_physid[256] = { [0 ... 255] = -1, };
8
9 static struct pci_dev *extra_pci_dev[UNCORE_SOCKET_MAX][UNCORE_EXTRA_PCI_DEV_MAX];
10
11 static DEFINE_RAW_SPINLOCK(uncore_box_lock);
12
13 /* mask of cpus that collect uncore events */
14 static cpumask_t uncore_cpu_mask;
15
16 /* constraint for the fixed counter */
17 static struct event_constraint constraint_fixed =
18         EVENT_CONSTRAINT(~0ULL, 1 << UNCORE_PMC_IDX_FIXED, ~0ULL);
19 static struct event_constraint constraint_empty =
20         EVENT_CONSTRAINT(0, 0, 0);
21
22 #define __BITS_VALUE(x, i, n)  ((typeof(x))(((x) >> ((i) * (n))) & \
23                                 ((1ULL << (n)) - 1)))
24
25 DEFINE_UNCORE_FORMAT_ATTR(event, event, "config:0-7");
26 DEFINE_UNCORE_FORMAT_ATTR(event_ext, event, "config:0-7,21");
27 DEFINE_UNCORE_FORMAT_ATTR(umask, umask, "config:8-15");
28 DEFINE_UNCORE_FORMAT_ATTR(edge, edge, "config:18");
29 DEFINE_UNCORE_FORMAT_ATTR(tid_en, tid_en, "config:19");
30 DEFINE_UNCORE_FORMAT_ATTR(inv, inv, "config:23");
31 DEFINE_UNCORE_FORMAT_ATTR(cmask5, cmask, "config:24-28");
32 DEFINE_UNCORE_FORMAT_ATTR(cmask8, cmask, "config:24-31");
33 DEFINE_UNCORE_FORMAT_ATTR(thresh8, thresh, "config:24-31");
34 DEFINE_UNCORE_FORMAT_ATTR(thresh5, thresh, "config:24-28");
35 DEFINE_UNCORE_FORMAT_ATTR(occ_sel, occ_sel, "config:14-15");
36 DEFINE_UNCORE_FORMAT_ATTR(occ_invert, occ_invert, "config:30");
37 DEFINE_UNCORE_FORMAT_ATTR(occ_edge, occ_edge, "config:14-51");
38 DEFINE_UNCORE_FORMAT_ATTR(filter_tid, filter_tid, "config1:0-4");
39 DEFINE_UNCORE_FORMAT_ATTR(filter_link, filter_link, "config1:5-8");
40 DEFINE_UNCORE_FORMAT_ATTR(filter_nid, filter_nid, "config1:10-17");
41 DEFINE_UNCORE_FORMAT_ATTR(filter_nid2, filter_nid, "config1:32-47");
42 DEFINE_UNCORE_FORMAT_ATTR(filter_state, filter_state, "config1:18-22");
43 DEFINE_UNCORE_FORMAT_ATTR(filter_state2, filter_state, "config1:17-22");
44 DEFINE_UNCORE_FORMAT_ATTR(filter_opc, filter_opc, "config1:23-31");
45 DEFINE_UNCORE_FORMAT_ATTR(filter_opc2, filter_opc, "config1:52-60");
46 DEFINE_UNCORE_FORMAT_ATTR(filter_band0, filter_band0, "config1:0-7");
47 DEFINE_UNCORE_FORMAT_ATTR(filter_band1, filter_band1, "config1:8-15");
48 DEFINE_UNCORE_FORMAT_ATTR(filter_band2, filter_band2, "config1:16-23");
49 DEFINE_UNCORE_FORMAT_ATTR(filter_band3, filter_band3, "config1:24-31");
50 DEFINE_UNCORE_FORMAT_ATTR(match_rds, match_rds, "config1:48-51");
51 DEFINE_UNCORE_FORMAT_ATTR(match_rnid30, match_rnid30, "config1:32-35");
52 DEFINE_UNCORE_FORMAT_ATTR(match_rnid4, match_rnid4, "config1:31");
53 DEFINE_UNCORE_FORMAT_ATTR(match_dnid, match_dnid, "config1:13-17");
54 DEFINE_UNCORE_FORMAT_ATTR(match_mc, match_mc, "config1:9-12");
55 DEFINE_UNCORE_FORMAT_ATTR(match_opc, match_opc, "config1:5-8");
56 DEFINE_UNCORE_FORMAT_ATTR(match_vnw, match_vnw, "config1:3-4");
57 DEFINE_UNCORE_FORMAT_ATTR(match0, match0, "config1:0-31");
58 DEFINE_UNCORE_FORMAT_ATTR(match1, match1, "config1:32-63");
59 DEFINE_UNCORE_FORMAT_ATTR(mask_rds, mask_rds, "config2:48-51");
60 DEFINE_UNCORE_FORMAT_ATTR(mask_rnid30, mask_rnid30, "config2:32-35");
61 DEFINE_UNCORE_FORMAT_ATTR(mask_rnid4, mask_rnid4, "config2:31");
62 DEFINE_UNCORE_FORMAT_ATTR(mask_dnid, mask_dnid, "config2:13-17");
63 DEFINE_UNCORE_FORMAT_ATTR(mask_mc, mask_mc, "config2:9-12");
64 DEFINE_UNCORE_FORMAT_ATTR(mask_opc, mask_opc, "config2:5-8");
65 DEFINE_UNCORE_FORMAT_ATTR(mask_vnw, mask_vnw, "config2:3-4");
66 DEFINE_UNCORE_FORMAT_ATTR(mask0, mask0, "config2:0-31");
67 DEFINE_UNCORE_FORMAT_ATTR(mask1, mask1, "config2:32-63");
68
69 static u64 uncore_msr_read_counter(struct intel_uncore_box *box, struct perf_event *event)
70 {
71         u64 count;
72
73         rdmsrl(event->hw.event_base, count);
74
75         return count;
76 }
77
78 /*
79  * generic get constraint function for shared match/mask registers.
80  */
81 static struct event_constraint *
82 uncore_get_constraint(struct intel_uncore_box *box, struct perf_event *event)
83 {
84         struct intel_uncore_extra_reg *er;
85         struct hw_perf_event_extra *reg1 = &event->hw.extra_reg;
86         struct hw_perf_event_extra *reg2 = &event->hw.branch_reg;
87         unsigned long flags;
88         bool ok = false;
89
90         /*
91          * reg->alloc can be set due to existing state, so for fake box we
92          * need to ignore this, otherwise we might fail to allocate proper
93          * fake state for this extra reg constraint.
94          */
95         if (reg1->idx == EXTRA_REG_NONE ||
96             (!uncore_box_is_fake(box) && reg1->alloc))
97                 return NULL;
98
99         er = &box->shared_regs[reg1->idx];
100         raw_spin_lock_irqsave(&er->lock, flags);
101         if (!atomic_read(&er->ref) ||
102             (er->config1 == reg1->config && er->config2 == reg2->config)) {
103                 atomic_inc(&er->ref);
104                 er->config1 = reg1->config;
105                 er->config2 = reg2->config;
106                 ok = true;
107         }
108         raw_spin_unlock_irqrestore(&er->lock, flags);
109
110         if (ok) {
111                 if (!uncore_box_is_fake(box))
112                         reg1->alloc = 1;
113                 return NULL;
114         }
115
116         return &constraint_empty;
117 }
118
119 static void uncore_put_constraint(struct intel_uncore_box *box, struct perf_event *event)
120 {
121         struct intel_uncore_extra_reg *er;
122         struct hw_perf_event_extra *reg1 = &event->hw.extra_reg;
123
124         /*
125          * Only put constraint if extra reg was actually allocated. Also
126          * takes care of event which do not use an extra shared reg.
127          *
128          * Also, if this is a fake box we shouldn't touch any event state
129          * (reg->alloc) and we don't care about leaving inconsistent box
130          * state either since it will be thrown out.
131          */
132         if (uncore_box_is_fake(box) || !reg1->alloc)
133                 return;
134
135         er = &box->shared_regs[reg1->idx];
136         atomic_dec(&er->ref);
137         reg1->alloc = 0;
138 }
139
140 static u64 uncore_shared_reg_config(struct intel_uncore_box *box, int idx)
141 {
142         struct intel_uncore_extra_reg *er;
143         unsigned long flags;
144         u64 config;
145
146         er = &box->shared_regs[idx];
147
148         raw_spin_lock_irqsave(&er->lock, flags);
149         config = er->config;
150         raw_spin_unlock_irqrestore(&er->lock, flags);
151
152         return config;
153 }
154
155 /* Sandy Bridge-EP uncore support */
156 static struct intel_uncore_type snbep_uncore_cbox;
157 static struct intel_uncore_type snbep_uncore_pcu;
158
159 static void snbep_uncore_pci_disable_box(struct intel_uncore_box *box)
160 {
161         struct pci_dev *pdev = box->pci_dev;
162         int box_ctl = uncore_pci_box_ctl(box);
163         u32 config = 0;
164
165         if (!pci_read_config_dword(pdev, box_ctl, &config)) {
166                 config |= SNBEP_PMON_BOX_CTL_FRZ;
167                 pci_write_config_dword(pdev, box_ctl, config);
168         }
169 }
170
171 static void snbep_uncore_pci_enable_box(struct intel_uncore_box *box)
172 {
173         struct pci_dev *pdev = box->pci_dev;
174         int box_ctl = uncore_pci_box_ctl(box);
175         u32 config = 0;
176
177         if (!pci_read_config_dword(pdev, box_ctl, &config)) {
178                 config &= ~SNBEP_PMON_BOX_CTL_FRZ;
179                 pci_write_config_dword(pdev, box_ctl, config);
180         }
181 }
182
183 static void snbep_uncore_pci_enable_event(struct intel_uncore_box *box, struct perf_event *event)
184 {
185         struct pci_dev *pdev = box->pci_dev;
186         struct hw_perf_event *hwc = &event->hw;
187
188         pci_write_config_dword(pdev, hwc->config_base, hwc->config | SNBEP_PMON_CTL_EN);
189 }
190
191 static void snbep_uncore_pci_disable_event(struct intel_uncore_box *box, struct perf_event *event)
192 {
193         struct pci_dev *pdev = box->pci_dev;
194         struct hw_perf_event *hwc = &event->hw;
195
196         pci_write_config_dword(pdev, hwc->config_base, hwc->config);
197 }
198
199 static u64 snbep_uncore_pci_read_counter(struct intel_uncore_box *box, struct perf_event *event)
200 {
201         struct pci_dev *pdev = box->pci_dev;
202         struct hw_perf_event *hwc = &event->hw;
203         u64 count = 0;
204
205         pci_read_config_dword(pdev, hwc->event_base, (u32 *)&count);
206         pci_read_config_dword(pdev, hwc->event_base + 4, (u32 *)&count + 1);
207
208         return count;
209 }
210
211 static void snbep_uncore_pci_init_box(struct intel_uncore_box *box)
212 {
213         struct pci_dev *pdev = box->pci_dev;
214
215         pci_write_config_dword(pdev, SNBEP_PCI_PMON_BOX_CTL, SNBEP_PMON_BOX_CTL_INT);
216 }
217
218 static void snbep_uncore_msr_disable_box(struct intel_uncore_box *box)
219 {
220         u64 config;
221         unsigned msr;
222
223         msr = uncore_msr_box_ctl(box);
224         if (msr) {
225                 rdmsrl(msr, config);
226                 config |= SNBEP_PMON_BOX_CTL_FRZ;
227                 wrmsrl(msr, config);
228         }
229 }
230
231 static void snbep_uncore_msr_enable_box(struct intel_uncore_box *box)
232 {
233         u64 config;
234         unsigned msr;
235
236         msr = uncore_msr_box_ctl(box);
237         if (msr) {
238                 rdmsrl(msr, config);
239                 config &= ~SNBEP_PMON_BOX_CTL_FRZ;
240                 wrmsrl(msr, config);
241         }
242 }
243
244 static void snbep_uncore_msr_enable_event(struct intel_uncore_box *box, struct perf_event *event)
245 {
246         struct hw_perf_event *hwc = &event->hw;
247         struct hw_perf_event_extra *reg1 = &hwc->extra_reg;
248
249         if (reg1->idx != EXTRA_REG_NONE)
250                 wrmsrl(reg1->reg, uncore_shared_reg_config(box, 0));
251
252         wrmsrl(hwc->config_base, hwc->config | SNBEP_PMON_CTL_EN);
253 }
254
255 static void snbep_uncore_msr_disable_event(struct intel_uncore_box *box,
256                                         struct perf_event *event)
257 {
258         struct hw_perf_event *hwc = &event->hw;
259
260         wrmsrl(hwc->config_base, hwc->config);
261 }
262
263 static void snbep_uncore_msr_init_box(struct intel_uncore_box *box)
264 {
265         unsigned msr = uncore_msr_box_ctl(box);
266
267         if (msr)
268                 wrmsrl(msr, SNBEP_PMON_BOX_CTL_INT);
269 }
270
271 static struct attribute *snbep_uncore_formats_attr[] = {
272         &format_attr_event.attr,
273         &format_attr_umask.attr,
274         &format_attr_edge.attr,
275         &format_attr_inv.attr,
276         &format_attr_thresh8.attr,
277         NULL,
278 };
279
280 static struct attribute *snbep_uncore_ubox_formats_attr[] = {
281         &format_attr_event.attr,
282         &format_attr_umask.attr,
283         &format_attr_edge.attr,
284         &format_attr_inv.attr,
285         &format_attr_thresh5.attr,
286         NULL,
287 };
288
289 static struct attribute *snbep_uncore_cbox_formats_attr[] = {
290         &format_attr_event.attr,
291         &format_attr_umask.attr,
292         &format_attr_edge.attr,
293         &format_attr_tid_en.attr,
294         &format_attr_inv.attr,
295         &format_attr_thresh8.attr,
296         &format_attr_filter_tid.attr,
297         &format_attr_filter_nid.attr,
298         &format_attr_filter_state.attr,
299         &format_attr_filter_opc.attr,
300         NULL,
301 };
302
303 static struct attribute *snbep_uncore_pcu_formats_attr[] = {
304         &format_attr_event_ext.attr,
305         &format_attr_occ_sel.attr,
306         &format_attr_edge.attr,
307         &format_attr_inv.attr,
308         &format_attr_thresh5.attr,
309         &format_attr_occ_invert.attr,
310         &format_attr_occ_edge.attr,
311         &format_attr_filter_band0.attr,
312         &format_attr_filter_band1.attr,
313         &format_attr_filter_band2.attr,
314         &format_attr_filter_band3.attr,
315         NULL,
316 };
317
318 static struct attribute *snbep_uncore_qpi_formats_attr[] = {
319         &format_attr_event_ext.attr,
320         &format_attr_umask.attr,
321         &format_attr_edge.attr,
322         &format_attr_inv.attr,
323         &format_attr_thresh8.attr,
324         &format_attr_match_rds.attr,
325         &format_attr_match_rnid30.attr,
326         &format_attr_match_rnid4.attr,
327         &format_attr_match_dnid.attr,
328         &format_attr_match_mc.attr,
329         &format_attr_match_opc.attr,
330         &format_attr_match_vnw.attr,
331         &format_attr_match0.attr,
332         &format_attr_match1.attr,
333         &format_attr_mask_rds.attr,
334         &format_attr_mask_rnid30.attr,
335         &format_attr_mask_rnid4.attr,
336         &format_attr_mask_dnid.attr,
337         &format_attr_mask_mc.attr,
338         &format_attr_mask_opc.attr,
339         &format_attr_mask_vnw.attr,
340         &format_attr_mask0.attr,
341         &format_attr_mask1.attr,
342         NULL,
343 };
344
345 static struct uncore_event_desc snbep_uncore_imc_events[] = {
346         INTEL_UNCORE_EVENT_DESC(clockticks,      "event=0xff,umask=0x00"),
347         INTEL_UNCORE_EVENT_DESC(cas_count_read,  "event=0x04,umask=0x03"),
348         INTEL_UNCORE_EVENT_DESC(cas_count_write, "event=0x04,umask=0x0c"),
349         { /* end: all zeroes */ },
350 };
351
352 static struct uncore_event_desc snbep_uncore_qpi_events[] = {
353         INTEL_UNCORE_EVENT_DESC(clockticks,       "event=0x14"),
354         INTEL_UNCORE_EVENT_DESC(txl_flits_active, "event=0x00,umask=0x06"),
355         INTEL_UNCORE_EVENT_DESC(drs_data,         "event=0x102,umask=0x08"),
356         INTEL_UNCORE_EVENT_DESC(ncb_data,         "event=0x103,umask=0x04"),
357         { /* end: all zeroes */ },
358 };
359
360 static struct attribute_group snbep_uncore_format_group = {
361         .name = "format",
362         .attrs = snbep_uncore_formats_attr,
363 };
364
365 static struct attribute_group snbep_uncore_ubox_format_group = {
366         .name = "format",
367         .attrs = snbep_uncore_ubox_formats_attr,
368 };
369
370 static struct attribute_group snbep_uncore_cbox_format_group = {
371         .name = "format",
372         .attrs = snbep_uncore_cbox_formats_attr,
373 };
374
375 static struct attribute_group snbep_uncore_pcu_format_group = {
376         .name = "format",
377         .attrs = snbep_uncore_pcu_formats_attr,
378 };
379
380 static struct attribute_group snbep_uncore_qpi_format_group = {
381         .name = "format",
382         .attrs = snbep_uncore_qpi_formats_attr,
383 };
384
385 #define SNBEP_UNCORE_MSR_OPS_COMMON_INIT()                      \
386         .init_box       = snbep_uncore_msr_init_box,            \
387         .disable_box    = snbep_uncore_msr_disable_box,         \
388         .enable_box     = snbep_uncore_msr_enable_box,          \
389         .disable_event  = snbep_uncore_msr_disable_event,       \
390         .enable_event   = snbep_uncore_msr_enable_event,        \
391         .read_counter   = uncore_msr_read_counter
392
393 static struct intel_uncore_ops snbep_uncore_msr_ops = {
394         SNBEP_UNCORE_MSR_OPS_COMMON_INIT(),
395 };
396
397 #define SNBEP_UNCORE_PCI_OPS_COMMON_INIT()                      \
398         .init_box       = snbep_uncore_pci_init_box,            \
399         .disable_box    = snbep_uncore_pci_disable_box,         \
400         .enable_box     = snbep_uncore_pci_enable_box,          \
401         .disable_event  = snbep_uncore_pci_disable_event,       \
402         .read_counter   = snbep_uncore_pci_read_counter
403
404 static struct intel_uncore_ops snbep_uncore_pci_ops = {
405         SNBEP_UNCORE_PCI_OPS_COMMON_INIT(),
406         .enable_event   = snbep_uncore_pci_enable_event,        \
407 };
408
409 static struct event_constraint snbep_uncore_cbox_constraints[] = {
410         UNCORE_EVENT_CONSTRAINT(0x01, 0x1),
411         UNCORE_EVENT_CONSTRAINT(0x02, 0x3),
412         UNCORE_EVENT_CONSTRAINT(0x04, 0x3),
413         UNCORE_EVENT_CONSTRAINT(0x05, 0x3),
414         UNCORE_EVENT_CONSTRAINT(0x07, 0x3),
415         UNCORE_EVENT_CONSTRAINT(0x09, 0x3),
416         UNCORE_EVENT_CONSTRAINT(0x11, 0x1),
417         UNCORE_EVENT_CONSTRAINT(0x12, 0x3),
418         UNCORE_EVENT_CONSTRAINT(0x13, 0x3),
419         UNCORE_EVENT_CONSTRAINT(0x1b, 0xc),
420         UNCORE_EVENT_CONSTRAINT(0x1c, 0xc),
421         UNCORE_EVENT_CONSTRAINT(0x1d, 0xc),
422         UNCORE_EVENT_CONSTRAINT(0x1e, 0xc),
423         EVENT_CONSTRAINT_OVERLAP(0x1f, 0xe, 0xff),
424         UNCORE_EVENT_CONSTRAINT(0x21, 0x3),
425         UNCORE_EVENT_CONSTRAINT(0x23, 0x3),
426         UNCORE_EVENT_CONSTRAINT(0x31, 0x3),
427         UNCORE_EVENT_CONSTRAINT(0x32, 0x3),
428         UNCORE_EVENT_CONSTRAINT(0x33, 0x3),
429         UNCORE_EVENT_CONSTRAINT(0x34, 0x3),
430         UNCORE_EVENT_CONSTRAINT(0x35, 0x3),
431         UNCORE_EVENT_CONSTRAINT(0x36, 0x1),
432         UNCORE_EVENT_CONSTRAINT(0x37, 0x3),
433         UNCORE_EVENT_CONSTRAINT(0x38, 0x3),
434         UNCORE_EVENT_CONSTRAINT(0x39, 0x3),
435         UNCORE_EVENT_CONSTRAINT(0x3b, 0x1),
436         EVENT_CONSTRAINT_END
437 };
438
439 static struct event_constraint snbep_uncore_r2pcie_constraints[] = {
440         UNCORE_EVENT_CONSTRAINT(0x10, 0x3),
441         UNCORE_EVENT_CONSTRAINT(0x11, 0x3),
442         UNCORE_EVENT_CONSTRAINT(0x12, 0x1),
443         UNCORE_EVENT_CONSTRAINT(0x23, 0x3),
444         UNCORE_EVENT_CONSTRAINT(0x24, 0x3),
445         UNCORE_EVENT_CONSTRAINT(0x25, 0x3),
446         UNCORE_EVENT_CONSTRAINT(0x26, 0x3),
447         UNCORE_EVENT_CONSTRAINT(0x32, 0x3),
448         UNCORE_EVENT_CONSTRAINT(0x33, 0x3),
449         UNCORE_EVENT_CONSTRAINT(0x34, 0x3),
450         EVENT_CONSTRAINT_END
451 };
452
453 static struct event_constraint snbep_uncore_r3qpi_constraints[] = {
454         UNCORE_EVENT_CONSTRAINT(0x10, 0x3),
455         UNCORE_EVENT_CONSTRAINT(0x11, 0x3),
456         UNCORE_EVENT_CONSTRAINT(0x12, 0x3),
457         UNCORE_EVENT_CONSTRAINT(0x13, 0x1),
458         UNCORE_EVENT_CONSTRAINT(0x20, 0x3),
459         UNCORE_EVENT_CONSTRAINT(0x21, 0x3),
460         UNCORE_EVENT_CONSTRAINT(0x22, 0x3),
461         UNCORE_EVENT_CONSTRAINT(0x23, 0x3),
462         UNCORE_EVENT_CONSTRAINT(0x24, 0x3),
463         UNCORE_EVENT_CONSTRAINT(0x25, 0x3),
464         UNCORE_EVENT_CONSTRAINT(0x26, 0x3),
465         UNCORE_EVENT_CONSTRAINT(0x28, 0x3),
466         UNCORE_EVENT_CONSTRAINT(0x29, 0x3),
467         UNCORE_EVENT_CONSTRAINT(0x2a, 0x3),
468         UNCORE_EVENT_CONSTRAINT(0x2b, 0x3),
469         UNCORE_EVENT_CONSTRAINT(0x2c, 0x3),
470         UNCORE_EVENT_CONSTRAINT(0x2d, 0x3),
471         UNCORE_EVENT_CONSTRAINT(0x2e, 0x3),
472         UNCORE_EVENT_CONSTRAINT(0x2f, 0x3),
473         UNCORE_EVENT_CONSTRAINT(0x30, 0x3),
474         UNCORE_EVENT_CONSTRAINT(0x31, 0x3),
475         UNCORE_EVENT_CONSTRAINT(0x32, 0x3),
476         UNCORE_EVENT_CONSTRAINT(0x33, 0x3),
477         UNCORE_EVENT_CONSTRAINT(0x34, 0x3),
478         UNCORE_EVENT_CONSTRAINT(0x36, 0x3),
479         UNCORE_EVENT_CONSTRAINT(0x37, 0x3),
480         UNCORE_EVENT_CONSTRAINT(0x38, 0x3),
481         UNCORE_EVENT_CONSTRAINT(0x39, 0x3),
482         EVENT_CONSTRAINT_END
483 };
484
485 static struct intel_uncore_type snbep_uncore_ubox = {
486         .name           = "ubox",
487         .num_counters   = 2,
488         .num_boxes      = 1,
489         .perf_ctr_bits  = 44,
490         .fixed_ctr_bits = 48,
491         .perf_ctr       = SNBEP_U_MSR_PMON_CTR0,
492         .event_ctl      = SNBEP_U_MSR_PMON_CTL0,
493         .event_mask     = SNBEP_U_MSR_PMON_RAW_EVENT_MASK,
494         .fixed_ctr      = SNBEP_U_MSR_PMON_UCLK_FIXED_CTR,
495         .fixed_ctl      = SNBEP_U_MSR_PMON_UCLK_FIXED_CTL,
496         .ops            = &snbep_uncore_msr_ops,
497         .format_group   = &snbep_uncore_ubox_format_group,
498 };
499
500 static struct extra_reg snbep_uncore_cbox_extra_regs[] = {
501         SNBEP_CBO_EVENT_EXTRA_REG(SNBEP_CBO_PMON_CTL_TID_EN,
502                                   SNBEP_CBO_PMON_CTL_TID_EN, 0x1),
503         SNBEP_CBO_EVENT_EXTRA_REG(0x0334, 0xffff, 0x4),
504         SNBEP_CBO_EVENT_EXTRA_REG(0x0534, 0xffff, 0x4),
505         SNBEP_CBO_EVENT_EXTRA_REG(0x0934, 0xffff, 0x4),
506         SNBEP_CBO_EVENT_EXTRA_REG(0x4134, 0xffff, 0x6),
507         SNBEP_CBO_EVENT_EXTRA_REG(0x0135, 0xffff, 0x8),
508         SNBEP_CBO_EVENT_EXTRA_REG(0x0335, 0xffff, 0x8),
509         SNBEP_CBO_EVENT_EXTRA_REG(0x4135, 0xffff, 0xc),
510         SNBEP_CBO_EVENT_EXTRA_REG(0x4335, 0xffff, 0xc),
511         SNBEP_CBO_EVENT_EXTRA_REG(0x4435, 0xffff, 0x2),
512         SNBEP_CBO_EVENT_EXTRA_REG(0x4835, 0xffff, 0x2),
513         SNBEP_CBO_EVENT_EXTRA_REG(0x4a35, 0xffff, 0x2),
514         SNBEP_CBO_EVENT_EXTRA_REG(0x5035, 0xffff, 0x2),
515         SNBEP_CBO_EVENT_EXTRA_REG(0x0136, 0xffff, 0x8),
516         SNBEP_CBO_EVENT_EXTRA_REG(0x0336, 0xffff, 0x8),
517         SNBEP_CBO_EVENT_EXTRA_REG(0x4136, 0xffff, 0xc),
518         SNBEP_CBO_EVENT_EXTRA_REG(0x4336, 0xffff, 0xc),
519         SNBEP_CBO_EVENT_EXTRA_REG(0x4436, 0xffff, 0x2),
520         SNBEP_CBO_EVENT_EXTRA_REG(0x4836, 0xffff, 0x2),
521         SNBEP_CBO_EVENT_EXTRA_REG(0x4a36, 0xffff, 0x2),
522         SNBEP_CBO_EVENT_EXTRA_REG(0x4037, 0x40ff, 0x2),
523         EVENT_EXTRA_END
524 };
525
526 static void snbep_cbox_put_constraint(struct intel_uncore_box *box, struct perf_event *event)
527 {
528         struct hw_perf_event_extra *reg1 = &event->hw.extra_reg;
529         struct intel_uncore_extra_reg *er = &box->shared_regs[0];
530         int i;
531
532         if (uncore_box_is_fake(box))
533                 return;
534
535         for (i = 0; i < 5; i++) {
536                 if (reg1->alloc & (0x1 << i))
537                         atomic_sub(1 << (i * 6), &er->ref);
538         }
539         reg1->alloc = 0;
540 }
541
542 static struct event_constraint *
543 __snbep_cbox_get_constraint(struct intel_uncore_box *box, struct perf_event *event,
544                             u64 (*cbox_filter_mask)(int fields))
545 {
546         struct hw_perf_event_extra *reg1 = &event->hw.extra_reg;
547         struct intel_uncore_extra_reg *er = &box->shared_regs[0];
548         int i, alloc = 0;
549         unsigned long flags;
550         u64 mask;
551
552         if (reg1->idx == EXTRA_REG_NONE)
553                 return NULL;
554
555         raw_spin_lock_irqsave(&er->lock, flags);
556         for (i = 0; i < 5; i++) {
557                 if (!(reg1->idx & (0x1 << i)))
558                         continue;
559                 if (!uncore_box_is_fake(box) && (reg1->alloc & (0x1 << i)))
560                         continue;
561
562                 mask = cbox_filter_mask(0x1 << i);
563                 if (!__BITS_VALUE(atomic_read(&er->ref), i, 6) ||
564                     !((reg1->config ^ er->config) & mask)) {
565                         atomic_add(1 << (i * 6), &er->ref);
566                         er->config &= ~mask;
567                         er->config |= reg1->config & mask;
568                         alloc |= (0x1 << i);
569                 } else {
570                         break;
571                 }
572         }
573         raw_spin_unlock_irqrestore(&er->lock, flags);
574         if (i < 5)
575                 goto fail;
576
577         if (!uncore_box_is_fake(box))
578                 reg1->alloc |= alloc;
579
580         return NULL;
581 fail:
582         for (; i >= 0; i--) {
583                 if (alloc & (0x1 << i))
584                         atomic_sub(1 << (i * 6), &er->ref);
585         }
586         return &constraint_empty;
587 }
588
589 static u64 snbep_cbox_filter_mask(int fields)
590 {
591         u64 mask = 0;
592
593         if (fields & 0x1)
594                 mask |= SNBEP_CB0_MSR_PMON_BOX_FILTER_TID;
595         if (fields & 0x2)
596                 mask |= SNBEP_CB0_MSR_PMON_BOX_FILTER_NID;
597         if (fields & 0x4)
598                 mask |= SNBEP_CB0_MSR_PMON_BOX_FILTER_STATE;
599         if (fields & 0x8)
600                 mask |= SNBEP_CB0_MSR_PMON_BOX_FILTER_OPC;
601
602         return mask;
603 }
604
605 static struct event_constraint *
606 snbep_cbox_get_constraint(struct intel_uncore_box *box, struct perf_event *event)
607 {
608         return __snbep_cbox_get_constraint(box, event, snbep_cbox_filter_mask);
609 }
610
611 static int snbep_cbox_hw_config(struct intel_uncore_box *box, struct perf_event *event)
612 {
613         struct hw_perf_event_extra *reg1 = &event->hw.extra_reg;
614         struct extra_reg *er;
615         int idx = 0;
616
617         for (er = snbep_uncore_cbox_extra_regs; er->msr; er++) {
618                 if (er->event != (event->hw.config & er->config_mask))
619                         continue;
620                 idx |= er->idx;
621         }
622
623         if (idx) {
624                 reg1->reg = SNBEP_C0_MSR_PMON_BOX_FILTER +
625                         SNBEP_CBO_MSR_OFFSET * box->pmu->pmu_idx;
626                 reg1->config = event->attr.config1 & snbep_cbox_filter_mask(idx);
627                 reg1->idx = idx;
628         }
629         return 0;
630 }
631
632 static struct intel_uncore_ops snbep_uncore_cbox_ops = {
633         SNBEP_UNCORE_MSR_OPS_COMMON_INIT(),
634         .hw_config              = snbep_cbox_hw_config,
635         .get_constraint         = snbep_cbox_get_constraint,
636         .put_constraint         = snbep_cbox_put_constraint,
637 };
638
639 static struct intel_uncore_type snbep_uncore_cbox = {
640         .name                   = "cbox",
641         .num_counters           = 4,
642         .num_boxes              = 8,
643         .perf_ctr_bits          = 44,
644         .event_ctl              = SNBEP_C0_MSR_PMON_CTL0,
645         .perf_ctr               = SNBEP_C0_MSR_PMON_CTR0,
646         .event_mask             = SNBEP_CBO_MSR_PMON_RAW_EVENT_MASK,
647         .box_ctl                = SNBEP_C0_MSR_PMON_BOX_CTL,
648         .msr_offset             = SNBEP_CBO_MSR_OFFSET,
649         .num_shared_regs        = 1,
650         .constraints            = snbep_uncore_cbox_constraints,
651         .ops                    = &snbep_uncore_cbox_ops,
652         .format_group           = &snbep_uncore_cbox_format_group,
653 };
654
655 static u64 snbep_pcu_alter_er(struct perf_event *event, int new_idx, bool modify)
656 {
657         struct hw_perf_event *hwc = &event->hw;
658         struct hw_perf_event_extra *reg1 = &hwc->extra_reg;
659         u64 config = reg1->config;
660
661         if (new_idx > reg1->idx)
662                 config <<= 8 * (new_idx - reg1->idx);
663         else
664                 config >>= 8 * (reg1->idx - new_idx);
665
666         if (modify) {
667                 hwc->config += new_idx - reg1->idx;
668                 reg1->config = config;
669                 reg1->idx = new_idx;
670         }
671         return config;
672 }
673
674 static struct event_constraint *
675 snbep_pcu_get_constraint(struct intel_uncore_box *box, struct perf_event *event)
676 {
677         struct hw_perf_event_extra *reg1 = &event->hw.extra_reg;
678         struct intel_uncore_extra_reg *er = &box->shared_regs[0];
679         unsigned long flags;
680         int idx = reg1->idx;
681         u64 mask, config1 = reg1->config;
682         bool ok = false;
683
684         if (reg1->idx == EXTRA_REG_NONE ||
685             (!uncore_box_is_fake(box) && reg1->alloc))
686                 return NULL;
687 again:
688         mask = 0xffULL << (idx * 8);
689         raw_spin_lock_irqsave(&er->lock, flags);
690         if (!__BITS_VALUE(atomic_read(&er->ref), idx, 8) ||
691             !((config1 ^ er->config) & mask)) {
692                 atomic_add(1 << (idx * 8), &er->ref);
693                 er->config &= ~mask;
694                 er->config |= config1 & mask;
695                 ok = true;
696         }
697         raw_spin_unlock_irqrestore(&er->lock, flags);
698
699         if (!ok) {
700                 idx = (idx + 1) % 4;
701                 if (idx != reg1->idx) {
702                         config1 = snbep_pcu_alter_er(event, idx, false);
703                         goto again;
704                 }
705                 return &constraint_empty;
706         }
707
708         if (!uncore_box_is_fake(box)) {
709                 if (idx != reg1->idx)
710                         snbep_pcu_alter_er(event, idx, true);
711                 reg1->alloc = 1;
712         }
713         return NULL;
714 }
715
716 static void snbep_pcu_put_constraint(struct intel_uncore_box *box, struct perf_event *event)
717 {
718         struct hw_perf_event_extra *reg1 = &event->hw.extra_reg;
719         struct intel_uncore_extra_reg *er = &box->shared_regs[0];
720
721         if (uncore_box_is_fake(box) || !reg1->alloc)
722                 return;
723
724         atomic_sub(1 << (reg1->idx * 8), &er->ref);
725         reg1->alloc = 0;
726 }
727
728 static int snbep_pcu_hw_config(struct intel_uncore_box *box, struct perf_event *event)
729 {
730         struct hw_perf_event *hwc = &event->hw;
731         struct hw_perf_event_extra *reg1 = &hwc->extra_reg;
732         int ev_sel = hwc->config & SNBEP_PMON_CTL_EV_SEL_MASK;
733
734         if (ev_sel >= 0xb && ev_sel <= 0xe) {
735                 reg1->reg = SNBEP_PCU_MSR_PMON_BOX_FILTER;
736                 reg1->idx = ev_sel - 0xb;
737                 reg1->config = event->attr.config1 & (0xff << reg1->idx);
738         }
739         return 0;
740 }
741
742 static struct intel_uncore_ops snbep_uncore_pcu_ops = {
743         SNBEP_UNCORE_MSR_OPS_COMMON_INIT(),
744         .hw_config              = snbep_pcu_hw_config,
745         .get_constraint         = snbep_pcu_get_constraint,
746         .put_constraint         = snbep_pcu_put_constraint,
747 };
748
749 static struct intel_uncore_type snbep_uncore_pcu = {
750         .name                   = "pcu",
751         .num_counters           = 4,
752         .num_boxes              = 1,
753         .perf_ctr_bits          = 48,
754         .perf_ctr               = SNBEP_PCU_MSR_PMON_CTR0,
755         .event_ctl              = SNBEP_PCU_MSR_PMON_CTL0,
756         .event_mask             = SNBEP_PCU_MSR_PMON_RAW_EVENT_MASK,
757         .box_ctl                = SNBEP_PCU_MSR_PMON_BOX_CTL,
758         .num_shared_regs        = 1,
759         .ops                    = &snbep_uncore_pcu_ops,
760         .format_group           = &snbep_uncore_pcu_format_group,
761 };
762
763 static struct intel_uncore_type *snbep_msr_uncores[] = {
764         &snbep_uncore_ubox,
765         &snbep_uncore_cbox,
766         &snbep_uncore_pcu,
767         NULL,
768 };
769
770 enum {
771         SNBEP_PCI_QPI_PORT0_FILTER,
772         SNBEP_PCI_QPI_PORT1_FILTER,
773 };
774
775 static int snbep_qpi_hw_config(struct intel_uncore_box *box, struct perf_event *event)
776 {
777         struct hw_perf_event *hwc = &event->hw;
778         struct hw_perf_event_extra *reg1 = &hwc->extra_reg;
779         struct hw_perf_event_extra *reg2 = &hwc->branch_reg;
780
781         if ((hwc->config & SNBEP_PMON_CTL_EV_SEL_MASK) == 0x38) {
782                 reg1->idx = 0;
783                 reg1->reg = SNBEP_Q_Py_PCI_PMON_PKT_MATCH0;
784                 reg1->config = event->attr.config1;
785                 reg2->reg = SNBEP_Q_Py_PCI_PMON_PKT_MASK0;
786                 reg2->config = event->attr.config2;
787         }
788         return 0;
789 }
790
791 static void snbep_qpi_enable_event(struct intel_uncore_box *box, struct perf_event *event)
792 {
793         struct pci_dev *pdev = box->pci_dev;
794         struct hw_perf_event *hwc = &event->hw;
795         struct hw_perf_event_extra *reg1 = &hwc->extra_reg;
796         struct hw_perf_event_extra *reg2 = &hwc->branch_reg;
797
798         if (reg1->idx != EXTRA_REG_NONE) {
799                 int idx = box->pmu->pmu_idx + SNBEP_PCI_QPI_PORT0_FILTER;
800                 struct pci_dev *filter_pdev = extra_pci_dev[box->phys_id][idx];
801                 WARN_ON_ONCE(!filter_pdev);
802                 if (filter_pdev) {
803                         pci_write_config_dword(filter_pdev, reg1->reg,
804                                                 (u32)reg1->config);
805                         pci_write_config_dword(filter_pdev, reg1->reg + 4,
806                                                 (u32)(reg1->config >> 32));
807                         pci_write_config_dword(filter_pdev, reg2->reg,
808                                                 (u32)reg2->config);
809                         pci_write_config_dword(filter_pdev, reg2->reg + 4,
810                                                 (u32)(reg2->config >> 32));
811                 }
812         }
813
814         pci_write_config_dword(pdev, hwc->config_base, hwc->config | SNBEP_PMON_CTL_EN);
815 }
816
817 static struct intel_uncore_ops snbep_uncore_qpi_ops = {
818         SNBEP_UNCORE_PCI_OPS_COMMON_INIT(),
819         .enable_event           = snbep_qpi_enable_event,
820         .hw_config              = snbep_qpi_hw_config,
821         .get_constraint         = uncore_get_constraint,
822         .put_constraint         = uncore_put_constraint,
823 };
824
825 #define SNBEP_UNCORE_PCI_COMMON_INIT()                          \
826         .perf_ctr       = SNBEP_PCI_PMON_CTR0,                  \
827         .event_ctl      = SNBEP_PCI_PMON_CTL0,                  \
828         .event_mask     = SNBEP_PMON_RAW_EVENT_MASK,            \
829         .box_ctl        = SNBEP_PCI_PMON_BOX_CTL,               \
830         .ops            = &snbep_uncore_pci_ops,                \
831         .format_group   = &snbep_uncore_format_group
832
833 static struct intel_uncore_type snbep_uncore_ha = {
834         .name           = "ha",
835         .num_counters   = 4,
836         .num_boxes      = 1,
837         .perf_ctr_bits  = 48,
838         SNBEP_UNCORE_PCI_COMMON_INIT(),
839 };
840
841 static struct intel_uncore_type snbep_uncore_imc = {
842         .name           = "imc",
843         .num_counters   = 4,
844         .num_boxes      = 4,
845         .perf_ctr_bits  = 48,
846         .fixed_ctr_bits = 48,
847         .fixed_ctr      = SNBEP_MC_CHy_PCI_PMON_FIXED_CTR,
848         .fixed_ctl      = SNBEP_MC_CHy_PCI_PMON_FIXED_CTL,
849         .event_descs    = snbep_uncore_imc_events,
850         SNBEP_UNCORE_PCI_COMMON_INIT(),
851 };
852
853 static struct intel_uncore_type snbep_uncore_qpi = {
854         .name                   = "qpi",
855         .num_counters           = 4,
856         .num_boxes              = 2,
857         .perf_ctr_bits          = 48,
858         .perf_ctr               = SNBEP_PCI_PMON_CTR0,
859         .event_ctl              = SNBEP_PCI_PMON_CTL0,
860         .event_mask             = SNBEP_QPI_PCI_PMON_RAW_EVENT_MASK,
861         .box_ctl                = SNBEP_PCI_PMON_BOX_CTL,
862         .num_shared_regs        = 1,
863         .ops                    = &snbep_uncore_qpi_ops,
864         .event_descs            = snbep_uncore_qpi_events,
865         .format_group           = &snbep_uncore_qpi_format_group,
866 };
867
868
869 static struct intel_uncore_type snbep_uncore_r2pcie = {
870         .name           = "r2pcie",
871         .num_counters   = 4,
872         .num_boxes      = 1,
873         .perf_ctr_bits  = 44,
874         .constraints    = snbep_uncore_r2pcie_constraints,
875         SNBEP_UNCORE_PCI_COMMON_INIT(),
876 };
877
878 static struct intel_uncore_type snbep_uncore_r3qpi = {
879         .name           = "r3qpi",
880         .num_counters   = 3,
881         .num_boxes      = 2,
882         .perf_ctr_bits  = 44,
883         .constraints    = snbep_uncore_r3qpi_constraints,
884         SNBEP_UNCORE_PCI_COMMON_INIT(),
885 };
886
887 enum {
888         SNBEP_PCI_UNCORE_HA,
889         SNBEP_PCI_UNCORE_IMC,
890         SNBEP_PCI_UNCORE_QPI,
891         SNBEP_PCI_UNCORE_R2PCIE,
892         SNBEP_PCI_UNCORE_R3QPI,
893 };
894
895 static struct intel_uncore_type *snbep_pci_uncores[] = {
896         [SNBEP_PCI_UNCORE_HA]           = &snbep_uncore_ha,
897         [SNBEP_PCI_UNCORE_IMC]          = &snbep_uncore_imc,
898         [SNBEP_PCI_UNCORE_QPI]          = &snbep_uncore_qpi,
899         [SNBEP_PCI_UNCORE_R2PCIE]       = &snbep_uncore_r2pcie,
900         [SNBEP_PCI_UNCORE_R3QPI]        = &snbep_uncore_r3qpi,
901         NULL,
902 };
903
904 static DEFINE_PCI_DEVICE_TABLE(snbep_uncore_pci_ids) = {
905         { /* Home Agent */
906                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_UNC_HA),
907                 .driver_data = UNCORE_PCI_DEV_DATA(SNBEP_PCI_UNCORE_HA, 0),
908         },
909         { /* MC Channel 0 */
910                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_UNC_IMC0),
911                 .driver_data = UNCORE_PCI_DEV_DATA(SNBEP_PCI_UNCORE_IMC, 0),
912         },
913         { /* MC Channel 1 */
914                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_UNC_IMC1),
915                 .driver_data = UNCORE_PCI_DEV_DATA(SNBEP_PCI_UNCORE_IMC, 1),
916         },
917         { /* MC Channel 2 */
918                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_UNC_IMC2),
919                 .driver_data = UNCORE_PCI_DEV_DATA(SNBEP_PCI_UNCORE_IMC, 2),
920         },
921         { /* MC Channel 3 */
922                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_UNC_IMC3),
923                 .driver_data = UNCORE_PCI_DEV_DATA(SNBEP_PCI_UNCORE_IMC, 3),
924         },
925         { /* QPI Port 0 */
926                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_UNC_QPI0),
927                 .driver_data = UNCORE_PCI_DEV_DATA(SNBEP_PCI_UNCORE_QPI, 0),
928         },
929         { /* QPI Port 1 */
930                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_UNC_QPI1),
931                 .driver_data = UNCORE_PCI_DEV_DATA(SNBEP_PCI_UNCORE_QPI, 1),
932         },
933         { /* R2PCIe */
934                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_UNC_R2PCIE),
935                 .driver_data = UNCORE_PCI_DEV_DATA(SNBEP_PCI_UNCORE_R2PCIE, 0),
936         },
937         { /* R3QPI Link 0 */
938                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_UNC_R3QPI0),
939                 .driver_data = UNCORE_PCI_DEV_DATA(SNBEP_PCI_UNCORE_R3QPI, 0),
940         },
941         { /* R3QPI Link 1 */
942                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_UNC_R3QPI1),
943                 .driver_data = UNCORE_PCI_DEV_DATA(SNBEP_PCI_UNCORE_R3QPI, 1),
944         },
945         { /* QPI Port 0 filter  */
946                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x3c86),
947                 .driver_data = UNCORE_PCI_DEV_DATA(UNCORE_EXTRA_PCI_DEV,
948                                                    SNBEP_PCI_QPI_PORT0_FILTER),
949         },
950         { /* QPI Port 0 filter  */
951                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x3c96),
952                 .driver_data = UNCORE_PCI_DEV_DATA(UNCORE_EXTRA_PCI_DEV,
953                                                    SNBEP_PCI_QPI_PORT1_FILTER),
954         },
955         { /* end: all zeroes */ }
956 };
957
958 static struct pci_driver snbep_uncore_pci_driver = {
959         .name           = "snbep_uncore",
960         .id_table       = snbep_uncore_pci_ids,
961 };
962
963 /*
964  * build pci bus to socket mapping
965  */
966 static int snbep_pci2phy_map_init(int devid)
967 {
968         struct pci_dev *ubox_dev = NULL;
969         int i, bus, nodeid;
970         int err = 0;
971         u32 config = 0;
972
973         while (1) {
974                 /* find the UBOX device */
975                 ubox_dev = pci_get_device(PCI_VENDOR_ID_INTEL, devid, ubox_dev);
976                 if (!ubox_dev)
977                         break;
978                 bus = ubox_dev->bus->number;
979                 /* get the Node ID of the local register */
980                 err = pci_read_config_dword(ubox_dev, 0x40, &config);
981                 if (err)
982                         break;
983                 nodeid = config;
984                 /* get the Node ID mapping */
985                 err = pci_read_config_dword(ubox_dev, 0x54, &config);
986                 if (err)
987                         break;
988                 /*
989                  * every three bits in the Node ID mapping register maps
990                  * to a particular node.
991                  */
992                 for (i = 0; i < 8; i++) {
993                         if (nodeid == ((config >> (3 * i)) & 0x7)) {
994                                 pcibus_to_physid[bus] = i;
995                                 break;
996                         }
997                 }
998         }
999
1000         if (ubox_dev)
1001                 pci_dev_put(ubox_dev);
1002
1003         return err ? pcibios_err_to_errno(err) : 0;
1004 }
1005 /* end of Sandy Bridge-EP uncore support */
1006
1007 /* IvyTown uncore support */
1008 static void ivt_uncore_msr_init_box(struct intel_uncore_box *box)
1009 {
1010         unsigned msr = uncore_msr_box_ctl(box);
1011         if (msr)
1012                 wrmsrl(msr, IVT_PMON_BOX_CTL_INT);
1013 }
1014
1015 static void ivt_uncore_pci_init_box(struct intel_uncore_box *box)
1016 {
1017         struct pci_dev *pdev = box->pci_dev;
1018
1019         pci_write_config_dword(pdev, SNBEP_PCI_PMON_BOX_CTL, IVT_PMON_BOX_CTL_INT);
1020 }
1021
1022 #define IVT_UNCORE_MSR_OPS_COMMON_INIT()                        \
1023         .init_box       = ivt_uncore_msr_init_box,              \
1024         .disable_box    = snbep_uncore_msr_disable_box,         \
1025         .enable_box     = snbep_uncore_msr_enable_box,          \
1026         .disable_event  = snbep_uncore_msr_disable_event,       \
1027         .enable_event   = snbep_uncore_msr_enable_event,        \
1028         .read_counter   = uncore_msr_read_counter
1029
1030 static struct intel_uncore_ops ivt_uncore_msr_ops = {
1031         IVT_UNCORE_MSR_OPS_COMMON_INIT(),
1032 };
1033
1034 static struct intel_uncore_ops ivt_uncore_pci_ops = {
1035         .init_box       = ivt_uncore_pci_init_box,
1036         .disable_box    = snbep_uncore_pci_disable_box,
1037         .enable_box     = snbep_uncore_pci_enable_box,
1038         .disable_event  = snbep_uncore_pci_disable_event,
1039         .enable_event   = snbep_uncore_pci_enable_event,
1040         .read_counter   = snbep_uncore_pci_read_counter,
1041 };
1042
1043 #define IVT_UNCORE_PCI_COMMON_INIT()                            \
1044         .perf_ctr       = SNBEP_PCI_PMON_CTR0,                  \
1045         .event_ctl      = SNBEP_PCI_PMON_CTL0,                  \
1046         .event_mask     = IVT_PMON_RAW_EVENT_MASK,              \
1047         .box_ctl        = SNBEP_PCI_PMON_BOX_CTL,               \
1048         .ops            = &ivt_uncore_pci_ops,                  \
1049         .format_group   = &ivt_uncore_format_group
1050
1051 static struct attribute *ivt_uncore_formats_attr[] = {
1052         &format_attr_event.attr,
1053         &format_attr_umask.attr,
1054         &format_attr_edge.attr,
1055         &format_attr_inv.attr,
1056         &format_attr_thresh8.attr,
1057         NULL,
1058 };
1059
1060 static struct attribute *ivt_uncore_ubox_formats_attr[] = {
1061         &format_attr_event.attr,
1062         &format_attr_umask.attr,
1063         &format_attr_edge.attr,
1064         &format_attr_inv.attr,
1065         &format_attr_thresh5.attr,
1066         NULL,
1067 };
1068
1069 static struct attribute *ivt_uncore_cbox_formats_attr[] = {
1070         &format_attr_event.attr,
1071         &format_attr_umask.attr,
1072         &format_attr_edge.attr,
1073         &format_attr_tid_en.attr,
1074         &format_attr_thresh8.attr,
1075         &format_attr_filter_tid.attr,
1076         &format_attr_filter_link.attr,
1077         &format_attr_filter_state2.attr,
1078         &format_attr_filter_nid2.attr,
1079         &format_attr_filter_opc2.attr,
1080         NULL,
1081 };
1082
1083 static struct attribute *ivt_uncore_pcu_formats_attr[] = {
1084         &format_attr_event_ext.attr,
1085         &format_attr_occ_sel.attr,
1086         &format_attr_edge.attr,
1087         &format_attr_thresh5.attr,
1088         &format_attr_occ_invert.attr,
1089         &format_attr_occ_edge.attr,
1090         &format_attr_filter_band0.attr,
1091         &format_attr_filter_band1.attr,
1092         &format_attr_filter_band2.attr,
1093         &format_attr_filter_band3.attr,
1094         NULL,
1095 };
1096
1097 static struct attribute *ivt_uncore_qpi_formats_attr[] = {
1098         &format_attr_event_ext.attr,
1099         &format_attr_umask.attr,
1100         &format_attr_edge.attr,
1101         &format_attr_thresh8.attr,
1102         NULL,
1103 };
1104
1105 static struct attribute_group ivt_uncore_format_group = {
1106         .name = "format",
1107         .attrs = ivt_uncore_formats_attr,
1108 };
1109
1110 static struct attribute_group ivt_uncore_ubox_format_group = {
1111         .name = "format",
1112         .attrs = ivt_uncore_ubox_formats_attr,
1113 };
1114
1115 static struct attribute_group ivt_uncore_cbox_format_group = {
1116         .name = "format",
1117         .attrs = ivt_uncore_cbox_formats_attr,
1118 };
1119
1120 static struct attribute_group ivt_uncore_pcu_format_group = {
1121         .name = "format",
1122         .attrs = ivt_uncore_pcu_formats_attr,
1123 };
1124
1125 static struct attribute_group ivt_uncore_qpi_format_group = {
1126         .name = "format",
1127         .attrs = ivt_uncore_qpi_formats_attr,
1128 };
1129
1130 static struct intel_uncore_type ivt_uncore_ubox = {
1131         .name           = "ubox",
1132         .num_counters   = 2,
1133         .num_boxes      = 1,
1134         .perf_ctr_bits  = 44,
1135         .fixed_ctr_bits = 48,
1136         .perf_ctr       = SNBEP_U_MSR_PMON_CTR0,
1137         .event_ctl      = SNBEP_U_MSR_PMON_CTL0,
1138         .event_mask     = IVT_U_MSR_PMON_RAW_EVENT_MASK,
1139         .fixed_ctr      = SNBEP_U_MSR_PMON_UCLK_FIXED_CTR,
1140         .fixed_ctl      = SNBEP_U_MSR_PMON_UCLK_FIXED_CTL,
1141         .ops            = &ivt_uncore_msr_ops,
1142         .format_group   = &ivt_uncore_ubox_format_group,
1143 };
1144
1145 static struct extra_reg ivt_uncore_cbox_extra_regs[] = {
1146         SNBEP_CBO_EVENT_EXTRA_REG(SNBEP_CBO_PMON_CTL_TID_EN,
1147                                   SNBEP_CBO_PMON_CTL_TID_EN, 0x1),
1148         SNBEP_CBO_EVENT_EXTRA_REG(0x1031, 0x10ff, 0x2),
1149         SNBEP_CBO_EVENT_EXTRA_REG(0x0334, 0xffff, 0x4),
1150         SNBEP_CBO_EVENT_EXTRA_REG(0x0534, 0xffff, 0x4),
1151         SNBEP_CBO_EVENT_EXTRA_REG(0x0934, 0xffff, 0x4),
1152         SNBEP_CBO_EVENT_EXTRA_REG(0x4134, 0xffff, 0xc),
1153         SNBEP_CBO_EVENT_EXTRA_REG(0x0135, 0xffff, 0x10),
1154         SNBEP_CBO_EVENT_EXTRA_REG(0x0335, 0xffff, 0x10),
1155         SNBEP_CBO_EVENT_EXTRA_REG(0x2135, 0xffff, 0x10),
1156         SNBEP_CBO_EVENT_EXTRA_REG(0x2335, 0xffff, 0x10),
1157         SNBEP_CBO_EVENT_EXTRA_REG(0x4135, 0xffff, 0x18),
1158         SNBEP_CBO_EVENT_EXTRA_REG(0x4335, 0xffff, 0x18),
1159         SNBEP_CBO_EVENT_EXTRA_REG(0x4435, 0xffff, 0x8),
1160         SNBEP_CBO_EVENT_EXTRA_REG(0x4835, 0xffff, 0x8),
1161         SNBEP_CBO_EVENT_EXTRA_REG(0x4a35, 0xffff, 0x8),
1162         SNBEP_CBO_EVENT_EXTRA_REG(0x5035, 0xffff, 0x8),
1163         SNBEP_CBO_EVENT_EXTRA_REG(0x8135, 0xffff, 0x10),
1164         SNBEP_CBO_EVENT_EXTRA_REG(0x8335, 0xffff, 0x10),
1165         SNBEP_CBO_EVENT_EXTRA_REG(0x0136, 0xffff, 0x10),
1166         SNBEP_CBO_EVENT_EXTRA_REG(0x0336, 0xffff, 0x10),
1167         SNBEP_CBO_EVENT_EXTRA_REG(0x2336, 0xffff, 0x10),
1168         SNBEP_CBO_EVENT_EXTRA_REG(0x2336, 0xffff, 0x10),
1169         SNBEP_CBO_EVENT_EXTRA_REG(0x4136, 0xffff, 0x18),
1170         SNBEP_CBO_EVENT_EXTRA_REG(0x4336, 0xffff, 0x18),
1171         SNBEP_CBO_EVENT_EXTRA_REG(0x4436, 0xffff, 0x8),
1172         SNBEP_CBO_EVENT_EXTRA_REG(0x4836, 0xffff, 0x8),
1173         SNBEP_CBO_EVENT_EXTRA_REG(0x4a36, 0xffff, 0x8),
1174         SNBEP_CBO_EVENT_EXTRA_REG(0x5036, 0xffff, 0x8),
1175         SNBEP_CBO_EVENT_EXTRA_REG(0x8136, 0xffff, 0x10),
1176         SNBEP_CBO_EVENT_EXTRA_REG(0x8336, 0xffff, 0x10),
1177         SNBEP_CBO_EVENT_EXTRA_REG(0x4037, 0x40ff, 0x8),
1178         EVENT_EXTRA_END
1179 };
1180
1181 static u64 ivt_cbox_filter_mask(int fields)
1182 {
1183         u64 mask = 0;
1184
1185         if (fields & 0x1)
1186                 mask |= IVT_CB0_MSR_PMON_BOX_FILTER_TID;
1187         if (fields & 0x2)
1188                 mask |= IVT_CB0_MSR_PMON_BOX_FILTER_LINK;
1189         if (fields & 0x4)
1190                 mask |= IVT_CB0_MSR_PMON_BOX_FILTER_STATE;
1191         if (fields & 0x8)
1192                 mask |= IVT_CB0_MSR_PMON_BOX_FILTER_NID;
1193         if (fields & 0x10)
1194                 mask |= IVT_CB0_MSR_PMON_BOX_FILTER_OPC;
1195
1196         return mask;
1197 }
1198
1199 static struct event_constraint *
1200 ivt_cbox_get_constraint(struct intel_uncore_box *box, struct perf_event *event)
1201 {
1202         return __snbep_cbox_get_constraint(box, event, ivt_cbox_filter_mask);
1203 }
1204
1205 static int ivt_cbox_hw_config(struct intel_uncore_box *box, struct perf_event *event)
1206 {
1207         struct hw_perf_event_extra *reg1 = &event->hw.extra_reg;
1208         struct extra_reg *er;
1209         int idx = 0;
1210
1211         for (er = ivt_uncore_cbox_extra_regs; er->msr; er++) {
1212                 if (er->event != (event->hw.config & er->config_mask))
1213                         continue;
1214                 idx |= er->idx;
1215         }
1216
1217         if (idx) {
1218                 reg1->reg = SNBEP_C0_MSR_PMON_BOX_FILTER +
1219                         SNBEP_CBO_MSR_OFFSET * box->pmu->pmu_idx;
1220                 reg1->config = event->attr.config1 & ivt_cbox_filter_mask(idx);
1221                 reg1->idx = idx;
1222         }
1223         return 0;
1224 }
1225
1226 static void ivt_cbox_enable_event(struct intel_uncore_box *box, struct perf_event *event)
1227 {
1228         struct hw_perf_event *hwc = &event->hw;
1229         struct hw_perf_event_extra *reg1 = &hwc->extra_reg;
1230
1231         if (reg1->idx != EXTRA_REG_NONE) {
1232                 u64 filter = uncore_shared_reg_config(box, 0);
1233                 wrmsrl(reg1->reg, filter & 0xffffffff);
1234                 wrmsrl(reg1->reg + 6, filter >> 32);
1235         }
1236
1237         wrmsrl(hwc->config_base, hwc->config | SNBEP_PMON_CTL_EN);
1238 }
1239
1240 static struct intel_uncore_ops ivt_uncore_cbox_ops = {
1241         .init_box               = ivt_uncore_msr_init_box,
1242         .disable_box            = snbep_uncore_msr_disable_box,
1243         .enable_box             = snbep_uncore_msr_enable_box,
1244         .disable_event          = snbep_uncore_msr_disable_event,
1245         .enable_event           = ivt_cbox_enable_event,
1246         .read_counter           = uncore_msr_read_counter,
1247         .hw_config              = ivt_cbox_hw_config,
1248         .get_constraint         = ivt_cbox_get_constraint,
1249         .put_constraint         = snbep_cbox_put_constraint,
1250 };
1251
1252 static struct intel_uncore_type ivt_uncore_cbox = {
1253         .name                   = "cbox",
1254         .num_counters           = 4,
1255         .num_boxes              = 15,
1256         .perf_ctr_bits          = 44,
1257         .event_ctl              = SNBEP_C0_MSR_PMON_CTL0,
1258         .perf_ctr               = SNBEP_C0_MSR_PMON_CTR0,
1259         .event_mask             = IVT_CBO_MSR_PMON_RAW_EVENT_MASK,
1260         .box_ctl                = SNBEP_C0_MSR_PMON_BOX_CTL,
1261         .msr_offset             = SNBEP_CBO_MSR_OFFSET,
1262         .num_shared_regs        = 1,
1263         .constraints            = snbep_uncore_cbox_constraints,
1264         .ops                    = &ivt_uncore_cbox_ops,
1265         .format_group           = &ivt_uncore_cbox_format_group,
1266 };
1267
1268 static struct intel_uncore_ops ivt_uncore_pcu_ops = {
1269         IVT_UNCORE_MSR_OPS_COMMON_INIT(),
1270         .hw_config              = snbep_pcu_hw_config,
1271         .get_constraint         = snbep_pcu_get_constraint,
1272         .put_constraint         = snbep_pcu_put_constraint,
1273 };
1274
1275 static struct intel_uncore_type ivt_uncore_pcu = {
1276         .name                   = "pcu",
1277         .num_counters           = 4,
1278         .num_boxes              = 1,
1279         .perf_ctr_bits          = 48,
1280         .perf_ctr               = SNBEP_PCU_MSR_PMON_CTR0,
1281         .event_ctl              = SNBEP_PCU_MSR_PMON_CTL0,
1282         .event_mask             = IVT_PCU_MSR_PMON_RAW_EVENT_MASK,
1283         .box_ctl                = SNBEP_PCU_MSR_PMON_BOX_CTL,
1284         .num_shared_regs        = 1,
1285         .ops                    = &ivt_uncore_pcu_ops,
1286         .format_group           = &ivt_uncore_pcu_format_group,
1287 };
1288
1289 static struct intel_uncore_type *ivt_msr_uncores[] = {
1290         &ivt_uncore_ubox,
1291         &ivt_uncore_cbox,
1292         &ivt_uncore_pcu,
1293         NULL,
1294 };
1295
1296 static struct intel_uncore_type ivt_uncore_ha = {
1297         .name           = "ha",
1298         .num_counters   = 4,
1299         .num_boxes      = 2,
1300         .perf_ctr_bits  = 48,
1301         IVT_UNCORE_PCI_COMMON_INIT(),
1302 };
1303
1304 static struct intel_uncore_type ivt_uncore_imc = {
1305         .name           = "imc",
1306         .num_counters   = 4,
1307         .num_boxes      = 8,
1308         .perf_ctr_bits  = 48,
1309         .fixed_ctr_bits = 48,
1310         .fixed_ctr      = SNBEP_MC_CHy_PCI_PMON_FIXED_CTR,
1311         .fixed_ctl      = SNBEP_MC_CHy_PCI_PMON_FIXED_CTL,
1312         IVT_UNCORE_PCI_COMMON_INIT(),
1313 };
1314
1315 static struct intel_uncore_type ivt_uncore_qpi = {
1316         .name           = "qpi",
1317         .num_counters   = 4,
1318         .num_boxes      = 3,
1319         .perf_ctr_bits  = 48,
1320         .perf_ctr       = SNBEP_PCI_PMON_CTR0,
1321         .event_ctl      = SNBEP_PCI_PMON_CTL0,
1322         .event_mask     = IVT_QPI_PCI_PMON_RAW_EVENT_MASK,
1323         .box_ctl        = SNBEP_PCI_PMON_BOX_CTL,
1324         .ops            = &ivt_uncore_pci_ops,
1325         .format_group   = &ivt_uncore_qpi_format_group,
1326 };
1327
1328 static struct intel_uncore_type ivt_uncore_r2pcie = {
1329         .name           = "r2pcie",
1330         .num_counters   = 4,
1331         .num_boxes      = 1,
1332         .perf_ctr_bits  = 44,
1333         .constraints    = snbep_uncore_r2pcie_constraints,
1334         IVT_UNCORE_PCI_COMMON_INIT(),
1335 };
1336
1337 static struct intel_uncore_type ivt_uncore_r3qpi = {
1338         .name           = "r3qpi",
1339         .num_counters   = 3,
1340         .num_boxes      = 2,
1341         .perf_ctr_bits  = 44,
1342         .constraints    = snbep_uncore_r3qpi_constraints,
1343         IVT_UNCORE_PCI_COMMON_INIT(),
1344 };
1345
1346 enum {
1347         IVT_PCI_UNCORE_HA,
1348         IVT_PCI_UNCORE_IMC,
1349         IVT_PCI_UNCORE_QPI,
1350         IVT_PCI_UNCORE_R2PCIE,
1351         IVT_PCI_UNCORE_R3QPI,
1352 };
1353
1354 static struct intel_uncore_type *ivt_pci_uncores[] = {
1355         [IVT_PCI_UNCORE_HA]     = &ivt_uncore_ha,
1356         [IVT_PCI_UNCORE_IMC]    = &ivt_uncore_imc,
1357         [IVT_PCI_UNCORE_QPI]    = &ivt_uncore_qpi,
1358         [IVT_PCI_UNCORE_R2PCIE] = &ivt_uncore_r2pcie,
1359         [IVT_PCI_UNCORE_R3QPI]  = &ivt_uncore_r3qpi,
1360         NULL,
1361 };
1362
1363 static DEFINE_PCI_DEVICE_TABLE(ivt_uncore_pci_ids) = {
1364         { /* Home Agent 0 */
1365                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xe30),
1366                 .driver_data = UNCORE_PCI_DEV_DATA(IVT_PCI_UNCORE_HA, 0),
1367         },
1368         { /* Home Agent 1 */
1369                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xe38),
1370                 .driver_data = UNCORE_PCI_DEV_DATA(IVT_PCI_UNCORE_HA, 1),
1371         },
1372         { /* MC0 Channel 0 */
1373                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xeb4),
1374                 .driver_data = UNCORE_PCI_DEV_DATA(IVT_PCI_UNCORE_IMC, 0),
1375         },
1376         { /* MC0 Channel 1 */
1377                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xeb5),
1378                 .driver_data = UNCORE_PCI_DEV_DATA(IVT_PCI_UNCORE_IMC, 1),
1379         },
1380         { /* MC0 Channel 3 */
1381                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xeb0),
1382                 .driver_data = UNCORE_PCI_DEV_DATA(IVT_PCI_UNCORE_IMC, 2),
1383         },
1384         { /* MC0 Channel 4 */
1385                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xeb1),
1386                 .driver_data = UNCORE_PCI_DEV_DATA(IVT_PCI_UNCORE_IMC, 3),
1387         },
1388         { /* MC1 Channel 0 */
1389                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xef4),
1390                 .driver_data = UNCORE_PCI_DEV_DATA(IVT_PCI_UNCORE_IMC, 4),
1391         },
1392         { /* MC1 Channel 1 */
1393                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xef5),
1394                 .driver_data = UNCORE_PCI_DEV_DATA(IVT_PCI_UNCORE_IMC, 5),
1395         },
1396         { /* MC1 Channel 3 */
1397                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xef0),
1398                 .driver_data = UNCORE_PCI_DEV_DATA(IVT_PCI_UNCORE_IMC, 6),
1399         },
1400         { /* MC1 Channel 4 */
1401                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xef1),
1402                 .driver_data = UNCORE_PCI_DEV_DATA(IVT_PCI_UNCORE_IMC, 7),
1403         },
1404         { /* QPI0 Port 0 */
1405                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xe32),
1406                 .driver_data = UNCORE_PCI_DEV_DATA(IVT_PCI_UNCORE_QPI, 0),
1407         },
1408         { /* QPI0 Port 1 */
1409                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xe33),
1410                 .driver_data = UNCORE_PCI_DEV_DATA(IVT_PCI_UNCORE_QPI, 1),
1411         },
1412         { /* QPI1 Port 2 */
1413                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xe3a),
1414                 .driver_data = UNCORE_PCI_DEV_DATA(IVT_PCI_UNCORE_QPI, 2),
1415         },
1416         { /* R2PCIe */
1417                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xe34),
1418                 .driver_data = UNCORE_PCI_DEV_DATA(IVT_PCI_UNCORE_R2PCIE, 0),
1419         },
1420         { /* R3QPI0 Link 0 */
1421                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xe36),
1422                 .driver_data = UNCORE_PCI_DEV_DATA(IVT_PCI_UNCORE_R3QPI, 0),
1423         },
1424         { /* R3QPI0 Link 1 */
1425                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xe37),
1426                 .driver_data = UNCORE_PCI_DEV_DATA(IVT_PCI_UNCORE_R3QPI, 1),
1427         },
1428         { /* R3QPI1 Link 2 */
1429                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xe3e),
1430                 .driver_data = UNCORE_PCI_DEV_DATA(IVT_PCI_UNCORE_R3QPI, 2),
1431         },
1432         { /* end: all zeroes */ }
1433 };
1434
1435 static struct pci_driver ivt_uncore_pci_driver = {
1436         .name           = "ivt_uncore",
1437         .id_table       = ivt_uncore_pci_ids,
1438 };
1439 /* end of IvyTown uncore support */
1440
1441 /* Sandy Bridge uncore support */
1442 static void snb_uncore_msr_enable_event(struct intel_uncore_box *box, struct perf_event *event)
1443 {
1444         struct hw_perf_event *hwc = &event->hw;
1445
1446         if (hwc->idx < UNCORE_PMC_IDX_FIXED)
1447                 wrmsrl(hwc->config_base, hwc->config | SNB_UNC_CTL_EN);
1448         else
1449                 wrmsrl(hwc->config_base, SNB_UNC_CTL_EN);
1450 }
1451
1452 static void snb_uncore_msr_disable_event(struct intel_uncore_box *box, struct perf_event *event)
1453 {
1454         wrmsrl(event->hw.config_base, 0);
1455 }
1456
1457 static void snb_uncore_msr_init_box(struct intel_uncore_box *box)
1458 {
1459         if (box->pmu->pmu_idx == 0) {
1460                 wrmsrl(SNB_UNC_PERF_GLOBAL_CTL,
1461                         SNB_UNC_GLOBAL_CTL_EN | SNB_UNC_GLOBAL_CTL_CORE_ALL);
1462         }
1463 }
1464
1465 static struct uncore_event_desc snb_uncore_events[] = {
1466         INTEL_UNCORE_EVENT_DESC(clockticks, "event=0xff,umask=0x00"),
1467         { /* end: all zeroes */ },
1468 };
1469
1470 static struct attribute *snb_uncore_formats_attr[] = {
1471         &format_attr_event.attr,
1472         &format_attr_umask.attr,
1473         &format_attr_edge.attr,
1474         &format_attr_inv.attr,
1475         &format_attr_cmask5.attr,
1476         NULL,
1477 };
1478
1479 static struct attribute_group snb_uncore_format_group = {
1480         .name           = "format",
1481         .attrs          = snb_uncore_formats_attr,
1482 };
1483
1484 static struct intel_uncore_ops snb_uncore_msr_ops = {
1485         .init_box       = snb_uncore_msr_init_box,
1486         .disable_event  = snb_uncore_msr_disable_event,
1487         .enable_event   = snb_uncore_msr_enable_event,
1488         .read_counter   = uncore_msr_read_counter,
1489 };
1490
1491 static struct event_constraint snb_uncore_cbox_constraints[] = {
1492         UNCORE_EVENT_CONSTRAINT(0x80, 0x1),
1493         UNCORE_EVENT_CONSTRAINT(0x83, 0x1),
1494         EVENT_CONSTRAINT_END
1495 };
1496
1497 static struct intel_uncore_type snb_uncore_cbox = {
1498         .name           = "cbox",
1499         .num_counters   = 2,
1500         .num_boxes      = 4,
1501         .perf_ctr_bits  = 44,
1502         .fixed_ctr_bits = 48,
1503         .perf_ctr       = SNB_UNC_CBO_0_PER_CTR0,
1504         .event_ctl      = SNB_UNC_CBO_0_PERFEVTSEL0,
1505         .fixed_ctr      = SNB_UNC_FIXED_CTR,
1506         .fixed_ctl      = SNB_UNC_FIXED_CTR_CTRL,
1507         .single_fixed   = 1,
1508         .event_mask     = SNB_UNC_RAW_EVENT_MASK,
1509         .msr_offset     = SNB_UNC_CBO_MSR_OFFSET,
1510         .constraints    = snb_uncore_cbox_constraints,
1511         .ops            = &snb_uncore_msr_ops,
1512         .format_group   = &snb_uncore_format_group,
1513         .event_descs    = snb_uncore_events,
1514 };
1515
1516 static struct intel_uncore_type *snb_msr_uncores[] = {
1517         &snb_uncore_cbox,
1518         NULL,
1519 };
1520 /* end of Sandy Bridge uncore support */
1521
1522 /* Nehalem uncore support */
1523 static void nhm_uncore_msr_disable_box(struct intel_uncore_box *box)
1524 {
1525         wrmsrl(NHM_UNC_PERF_GLOBAL_CTL, 0);
1526 }
1527
1528 static void nhm_uncore_msr_enable_box(struct intel_uncore_box *box)
1529 {
1530         wrmsrl(NHM_UNC_PERF_GLOBAL_CTL, NHM_UNC_GLOBAL_CTL_EN_PC_ALL | NHM_UNC_GLOBAL_CTL_EN_FC);
1531 }
1532
1533 static void nhm_uncore_msr_enable_event(struct intel_uncore_box *box, struct perf_event *event)
1534 {
1535         struct hw_perf_event *hwc = &event->hw;
1536
1537         if (hwc->idx < UNCORE_PMC_IDX_FIXED)
1538                 wrmsrl(hwc->config_base, hwc->config | SNB_UNC_CTL_EN);
1539         else
1540                 wrmsrl(hwc->config_base, NHM_UNC_FIXED_CTR_CTL_EN);
1541 }
1542
1543 static struct attribute *nhm_uncore_formats_attr[] = {
1544         &format_attr_event.attr,
1545         &format_attr_umask.attr,
1546         &format_attr_edge.attr,
1547         &format_attr_inv.attr,
1548         &format_attr_cmask8.attr,
1549         NULL,
1550 };
1551
1552 static struct attribute_group nhm_uncore_format_group = {
1553         .name = "format",
1554         .attrs = nhm_uncore_formats_attr,
1555 };
1556
1557 static struct uncore_event_desc nhm_uncore_events[] = {
1558         INTEL_UNCORE_EVENT_DESC(clockticks,                "event=0xff,umask=0x00"),
1559         INTEL_UNCORE_EVENT_DESC(qmc_writes_full_any,       "event=0x2f,umask=0x0f"),
1560         INTEL_UNCORE_EVENT_DESC(qmc_normal_reads_any,      "event=0x2c,umask=0x0f"),
1561         INTEL_UNCORE_EVENT_DESC(qhl_request_ioh_reads,     "event=0x20,umask=0x01"),
1562         INTEL_UNCORE_EVENT_DESC(qhl_request_ioh_writes,    "event=0x20,umask=0x02"),
1563         INTEL_UNCORE_EVENT_DESC(qhl_request_remote_reads,  "event=0x20,umask=0x04"),
1564         INTEL_UNCORE_EVENT_DESC(qhl_request_remote_writes, "event=0x20,umask=0x08"),
1565         INTEL_UNCORE_EVENT_DESC(qhl_request_local_reads,   "event=0x20,umask=0x10"),
1566         INTEL_UNCORE_EVENT_DESC(qhl_request_local_writes,  "event=0x20,umask=0x20"),
1567         { /* end: all zeroes */ },
1568 };
1569
1570 static struct intel_uncore_ops nhm_uncore_msr_ops = {
1571         .disable_box    = nhm_uncore_msr_disable_box,
1572         .enable_box     = nhm_uncore_msr_enable_box,
1573         .disable_event  = snb_uncore_msr_disable_event,
1574         .enable_event   = nhm_uncore_msr_enable_event,
1575         .read_counter   = uncore_msr_read_counter,
1576 };
1577
1578 static struct intel_uncore_type nhm_uncore = {
1579         .name           = "",
1580         .num_counters   = 8,
1581         .num_boxes      = 1,
1582         .perf_ctr_bits  = 48,
1583         .fixed_ctr_bits = 48,
1584         .event_ctl      = NHM_UNC_PERFEVTSEL0,
1585         .perf_ctr       = NHM_UNC_UNCORE_PMC0,
1586         .fixed_ctr      = NHM_UNC_FIXED_CTR,
1587         .fixed_ctl      = NHM_UNC_FIXED_CTR_CTRL,
1588         .event_mask     = NHM_UNC_RAW_EVENT_MASK,
1589         .event_descs    = nhm_uncore_events,
1590         .ops            = &nhm_uncore_msr_ops,
1591         .format_group   = &nhm_uncore_format_group,
1592 };
1593
1594 static struct intel_uncore_type *nhm_msr_uncores[] = {
1595         &nhm_uncore,
1596         NULL,
1597 };
1598 /* end of Nehalem uncore support */
1599
1600 /* Nehalem-EX uncore support */
1601 DEFINE_UNCORE_FORMAT_ATTR(event5, event, "config:1-5");
1602 DEFINE_UNCORE_FORMAT_ATTR(counter, counter, "config:6-7");
1603 DEFINE_UNCORE_FORMAT_ATTR(match, match, "config1:0-63");
1604 DEFINE_UNCORE_FORMAT_ATTR(mask, mask, "config2:0-63");
1605
1606 static void nhmex_uncore_msr_init_box(struct intel_uncore_box *box)
1607 {
1608         wrmsrl(NHMEX_U_MSR_PMON_GLOBAL_CTL, NHMEX_U_PMON_GLOBAL_EN_ALL);
1609 }
1610
1611 static void nhmex_uncore_msr_disable_box(struct intel_uncore_box *box)
1612 {
1613         unsigned msr = uncore_msr_box_ctl(box);
1614         u64 config;
1615
1616         if (msr) {
1617                 rdmsrl(msr, config);
1618                 config &= ~((1ULL << uncore_num_counters(box)) - 1);
1619                 /* WBox has a fixed counter */
1620                 if (uncore_msr_fixed_ctl(box))
1621                         config &= ~NHMEX_W_PMON_GLOBAL_FIXED_EN;
1622                 wrmsrl(msr, config);
1623         }
1624 }
1625
1626 static void nhmex_uncore_msr_enable_box(struct intel_uncore_box *box)
1627 {
1628         unsigned msr = uncore_msr_box_ctl(box);
1629         u64 config;
1630
1631         if (msr) {
1632                 rdmsrl(msr, config);
1633                 config |= (1ULL << uncore_num_counters(box)) - 1;
1634                 /* WBox has a fixed counter */
1635                 if (uncore_msr_fixed_ctl(box))
1636                         config |= NHMEX_W_PMON_GLOBAL_FIXED_EN;
1637                 wrmsrl(msr, config);
1638         }
1639 }
1640
1641 static void nhmex_uncore_msr_disable_event(struct intel_uncore_box *box, struct perf_event *event)
1642 {
1643         wrmsrl(event->hw.config_base, 0);
1644 }
1645
1646 static void nhmex_uncore_msr_enable_event(struct intel_uncore_box *box, struct perf_event *event)
1647 {
1648         struct hw_perf_event *hwc = &event->hw;
1649
1650         if (hwc->idx >= UNCORE_PMC_IDX_FIXED)
1651                 wrmsrl(hwc->config_base, NHMEX_PMON_CTL_EN_BIT0);
1652         else if (box->pmu->type->event_mask & NHMEX_PMON_CTL_EN_BIT0)
1653                 wrmsrl(hwc->config_base, hwc->config | NHMEX_PMON_CTL_EN_BIT22);
1654         else
1655                 wrmsrl(hwc->config_base, hwc->config | NHMEX_PMON_CTL_EN_BIT0);
1656 }
1657
1658 #define NHMEX_UNCORE_OPS_COMMON_INIT()                          \
1659         .init_box       = nhmex_uncore_msr_init_box,            \
1660         .disable_box    = nhmex_uncore_msr_disable_box,         \
1661         .enable_box     = nhmex_uncore_msr_enable_box,          \
1662         .disable_event  = nhmex_uncore_msr_disable_event,       \
1663         .read_counter   = uncore_msr_read_counter
1664
1665 static struct intel_uncore_ops nhmex_uncore_ops = {
1666         NHMEX_UNCORE_OPS_COMMON_INIT(),
1667         .enable_event   = nhmex_uncore_msr_enable_event,
1668 };
1669
1670 static struct attribute *nhmex_uncore_ubox_formats_attr[] = {
1671         &format_attr_event.attr,
1672         &format_attr_edge.attr,
1673         NULL,
1674 };
1675
1676 static struct attribute_group nhmex_uncore_ubox_format_group = {
1677         .name           = "format",
1678         .attrs          = nhmex_uncore_ubox_formats_attr,
1679 };
1680
1681 static struct intel_uncore_type nhmex_uncore_ubox = {
1682         .name           = "ubox",
1683         .num_counters   = 1,
1684         .num_boxes      = 1,
1685         .perf_ctr_bits  = 48,
1686         .event_ctl      = NHMEX_U_MSR_PMON_EV_SEL,
1687         .perf_ctr       = NHMEX_U_MSR_PMON_CTR,
1688         .event_mask     = NHMEX_U_PMON_RAW_EVENT_MASK,
1689         .box_ctl        = NHMEX_U_MSR_PMON_GLOBAL_CTL,
1690         .ops            = &nhmex_uncore_ops,
1691         .format_group   = &nhmex_uncore_ubox_format_group
1692 };
1693
1694 static struct attribute *nhmex_uncore_cbox_formats_attr[] = {
1695         &format_attr_event.attr,
1696         &format_attr_umask.attr,
1697         &format_attr_edge.attr,
1698         &format_attr_inv.attr,
1699         &format_attr_thresh8.attr,
1700         NULL,
1701 };
1702
1703 static struct attribute_group nhmex_uncore_cbox_format_group = {
1704         .name = "format",
1705         .attrs = nhmex_uncore_cbox_formats_attr,
1706 };
1707
1708 /* msr offset for each instance of cbox */
1709 static unsigned nhmex_cbox_msr_offsets[] = {
1710         0x0, 0x80, 0x40, 0xc0, 0x20, 0xa0, 0x60, 0xe0, 0x240, 0x2c0,
1711 };
1712
1713 static struct intel_uncore_type nhmex_uncore_cbox = {
1714         .name                   = "cbox",
1715         .num_counters           = 6,
1716         .num_boxes              = 10,
1717         .perf_ctr_bits          = 48,
1718         .event_ctl              = NHMEX_C0_MSR_PMON_EV_SEL0,
1719         .perf_ctr               = NHMEX_C0_MSR_PMON_CTR0,
1720         .event_mask             = NHMEX_PMON_RAW_EVENT_MASK,
1721         .box_ctl                = NHMEX_C0_MSR_PMON_GLOBAL_CTL,
1722         .msr_offsets            = nhmex_cbox_msr_offsets,
1723         .pair_ctr_ctl           = 1,
1724         .ops                    = &nhmex_uncore_ops,
1725         .format_group           = &nhmex_uncore_cbox_format_group
1726 };
1727
1728 static struct uncore_event_desc nhmex_uncore_wbox_events[] = {
1729         INTEL_UNCORE_EVENT_DESC(clockticks, "event=0xff,umask=0"),
1730         { /* end: all zeroes */ },
1731 };
1732
1733 static struct intel_uncore_type nhmex_uncore_wbox = {
1734         .name                   = "wbox",
1735         .num_counters           = 4,
1736         .num_boxes              = 1,
1737         .perf_ctr_bits          = 48,
1738         .event_ctl              = NHMEX_W_MSR_PMON_CNT0,
1739         .perf_ctr               = NHMEX_W_MSR_PMON_EVT_SEL0,
1740         .fixed_ctr              = NHMEX_W_MSR_PMON_FIXED_CTR,
1741         .fixed_ctl              = NHMEX_W_MSR_PMON_FIXED_CTL,
1742         .event_mask             = NHMEX_PMON_RAW_EVENT_MASK,
1743         .box_ctl                = NHMEX_W_MSR_GLOBAL_CTL,
1744         .pair_ctr_ctl           = 1,
1745         .event_descs            = nhmex_uncore_wbox_events,
1746         .ops                    = &nhmex_uncore_ops,
1747         .format_group           = &nhmex_uncore_cbox_format_group
1748 };
1749
1750 static int nhmex_bbox_hw_config(struct intel_uncore_box *box, struct perf_event *event)
1751 {
1752         struct hw_perf_event *hwc = &event->hw;
1753         struct hw_perf_event_extra *reg1 = &hwc->extra_reg;
1754         struct hw_perf_event_extra *reg2 = &hwc->branch_reg;
1755         int ctr, ev_sel;
1756
1757         ctr = (hwc->config & NHMEX_B_PMON_CTR_MASK) >>
1758                 NHMEX_B_PMON_CTR_SHIFT;
1759         ev_sel = (hwc->config & NHMEX_B_PMON_CTL_EV_SEL_MASK) >>
1760                   NHMEX_B_PMON_CTL_EV_SEL_SHIFT;
1761
1762         /* events that do not use the match/mask registers */
1763         if ((ctr == 0 && ev_sel > 0x3) || (ctr == 1 && ev_sel > 0x6) ||
1764             (ctr == 2 && ev_sel != 0x4) || ctr == 3)
1765                 return 0;
1766
1767         if (box->pmu->pmu_idx == 0)
1768                 reg1->reg = NHMEX_B0_MSR_MATCH;
1769         else
1770                 reg1->reg = NHMEX_B1_MSR_MATCH;
1771         reg1->idx = 0;
1772         reg1->config = event->attr.config1;
1773         reg2->config = event->attr.config2;
1774         return 0;
1775 }
1776
1777 static void nhmex_bbox_msr_enable_event(struct intel_uncore_box *box, struct perf_event *event)
1778 {
1779         struct hw_perf_event *hwc = &event->hw;
1780         struct hw_perf_event_extra *reg1 = &hwc->extra_reg;
1781         struct hw_perf_event_extra *reg2 = &hwc->branch_reg;
1782
1783         if (reg1->idx != EXTRA_REG_NONE) {
1784                 wrmsrl(reg1->reg, reg1->config);
1785                 wrmsrl(reg1->reg + 1, reg2->config);
1786         }
1787         wrmsrl(hwc->config_base, NHMEX_PMON_CTL_EN_BIT0 |
1788                 (hwc->config & NHMEX_B_PMON_CTL_EV_SEL_MASK));
1789 }
1790
1791 /*
1792  * The Bbox has 4 counters, but each counter monitors different events.
1793  * Use bits 6-7 in the event config to select counter.
1794  */
1795 static struct event_constraint nhmex_uncore_bbox_constraints[] = {
1796         EVENT_CONSTRAINT(0 , 1, 0xc0),
1797         EVENT_CONSTRAINT(0x40, 2, 0xc0),
1798         EVENT_CONSTRAINT(0x80, 4, 0xc0),
1799         EVENT_CONSTRAINT(0xc0, 8, 0xc0),
1800         EVENT_CONSTRAINT_END,
1801 };
1802
1803 static struct attribute *nhmex_uncore_bbox_formats_attr[] = {
1804         &format_attr_event5.attr,
1805         &format_attr_counter.attr,
1806         &format_attr_match.attr,
1807         &format_attr_mask.attr,
1808         NULL,
1809 };
1810
1811 static struct attribute_group nhmex_uncore_bbox_format_group = {
1812         .name = "format",
1813         .attrs = nhmex_uncore_bbox_formats_attr,
1814 };
1815
1816 static struct intel_uncore_ops nhmex_uncore_bbox_ops = {
1817         NHMEX_UNCORE_OPS_COMMON_INIT(),
1818         .enable_event           = nhmex_bbox_msr_enable_event,
1819         .hw_config              = nhmex_bbox_hw_config,
1820         .get_constraint         = uncore_get_constraint,
1821         .put_constraint         = uncore_put_constraint,
1822 };
1823
1824 static struct intel_uncore_type nhmex_uncore_bbox = {
1825         .name                   = "bbox",
1826         .num_counters           = 4,
1827         .num_boxes              = 2,
1828         .perf_ctr_bits          = 48,
1829         .event_ctl              = NHMEX_B0_MSR_PMON_CTL0,
1830         .perf_ctr               = NHMEX_B0_MSR_PMON_CTR0,
1831         .event_mask             = NHMEX_B_PMON_RAW_EVENT_MASK,
1832         .box_ctl                = NHMEX_B0_MSR_PMON_GLOBAL_CTL,
1833         .msr_offset             = NHMEX_B_MSR_OFFSET,
1834         .pair_ctr_ctl           = 1,
1835         .num_shared_regs        = 1,
1836         .constraints            = nhmex_uncore_bbox_constraints,
1837         .ops                    = &nhmex_uncore_bbox_ops,
1838         .format_group           = &nhmex_uncore_bbox_format_group
1839 };
1840
1841 static int nhmex_sbox_hw_config(struct intel_uncore_box *box, struct perf_event *event)
1842 {
1843         struct hw_perf_event *hwc = &event->hw;
1844         struct hw_perf_event_extra *reg1 = &hwc->extra_reg;
1845         struct hw_perf_event_extra *reg2 = &hwc->branch_reg;
1846
1847         /* only TO_R_PROG_EV event uses the match/mask register */
1848         if ((hwc->config & NHMEX_PMON_CTL_EV_SEL_MASK) !=
1849             NHMEX_S_EVENT_TO_R_PROG_EV)
1850                 return 0;
1851
1852         if (box->pmu->pmu_idx == 0)
1853                 reg1->reg = NHMEX_S0_MSR_MM_CFG;
1854         else
1855                 reg1->reg = NHMEX_S1_MSR_MM_CFG;
1856         reg1->idx = 0;
1857         reg1->config = event->attr.config1;
1858         reg2->config = event->attr.config2;
1859         return 0;
1860 }
1861
1862 static void nhmex_sbox_msr_enable_event(struct intel_uncore_box *box, struct perf_event *event)
1863 {
1864         struct hw_perf_event *hwc = &event->hw;
1865         struct hw_perf_event_extra *reg1 = &hwc->extra_reg;
1866         struct hw_perf_event_extra *reg2 = &hwc->branch_reg;
1867
1868         if (reg1->idx != EXTRA_REG_NONE) {
1869                 wrmsrl(reg1->reg, 0);
1870                 wrmsrl(reg1->reg + 1, reg1->config);
1871                 wrmsrl(reg1->reg + 2, reg2->config);
1872                 wrmsrl(reg1->reg, NHMEX_S_PMON_MM_CFG_EN);
1873         }
1874         wrmsrl(hwc->config_base, hwc->config | NHMEX_PMON_CTL_EN_BIT22);
1875 }
1876
1877 static struct attribute *nhmex_uncore_sbox_formats_attr[] = {
1878         &format_attr_event.attr,
1879         &format_attr_umask.attr,
1880         &format_attr_edge.attr,
1881         &format_attr_inv.attr,
1882         &format_attr_thresh8.attr,
1883         &format_attr_match.attr,
1884         &format_attr_mask.attr,
1885         NULL,
1886 };
1887
1888 static struct attribute_group nhmex_uncore_sbox_format_group = {
1889         .name                   = "format",
1890         .attrs                  = nhmex_uncore_sbox_formats_attr,
1891 };
1892
1893 static struct intel_uncore_ops nhmex_uncore_sbox_ops = {
1894         NHMEX_UNCORE_OPS_COMMON_INIT(),
1895         .enable_event           = nhmex_sbox_msr_enable_event,
1896         .hw_config              = nhmex_sbox_hw_config,
1897         .get_constraint         = uncore_get_constraint,
1898         .put_constraint         = uncore_put_constraint,
1899 };
1900
1901 static struct intel_uncore_type nhmex_uncore_sbox = {
1902         .name                   = "sbox",
1903         .num_counters           = 4,
1904         .num_boxes              = 2,
1905         .perf_ctr_bits          = 48,
1906         .event_ctl              = NHMEX_S0_MSR_PMON_CTL0,
1907         .perf_ctr               = NHMEX_S0_MSR_PMON_CTR0,
1908         .event_mask             = NHMEX_PMON_RAW_EVENT_MASK,
1909         .box_ctl                = NHMEX_S0_MSR_PMON_GLOBAL_CTL,
1910         .msr_offset             = NHMEX_S_MSR_OFFSET,
1911         .pair_ctr_ctl           = 1,
1912         .num_shared_regs        = 1,
1913         .ops                    = &nhmex_uncore_sbox_ops,
1914         .format_group           = &nhmex_uncore_sbox_format_group
1915 };
1916
1917 enum {
1918         EXTRA_REG_NHMEX_M_FILTER,
1919         EXTRA_REG_NHMEX_M_DSP,
1920         EXTRA_REG_NHMEX_M_ISS,
1921         EXTRA_REG_NHMEX_M_MAP,
1922         EXTRA_REG_NHMEX_M_MSC_THR,
1923         EXTRA_REG_NHMEX_M_PGT,
1924         EXTRA_REG_NHMEX_M_PLD,
1925         EXTRA_REG_NHMEX_M_ZDP_CTL_FVC,
1926 };
1927
1928 static struct extra_reg nhmex_uncore_mbox_extra_regs[] = {
1929         MBOX_INC_SEL_EXTAR_REG(0x0, DSP),
1930         MBOX_INC_SEL_EXTAR_REG(0x4, MSC_THR),
1931         MBOX_INC_SEL_EXTAR_REG(0x5, MSC_THR),
1932         MBOX_INC_SEL_EXTAR_REG(0x9, ISS),
1933         /* event 0xa uses two extra registers */
1934         MBOX_INC_SEL_EXTAR_REG(0xa, ISS),
1935         MBOX_INC_SEL_EXTAR_REG(0xa, PLD),
1936         MBOX_INC_SEL_EXTAR_REG(0xb, PLD),
1937         /* events 0xd ~ 0x10 use the same extra register */
1938         MBOX_INC_SEL_EXTAR_REG(0xd, ZDP_CTL_FVC),
1939         MBOX_INC_SEL_EXTAR_REG(0xe, ZDP_CTL_FVC),
1940         MBOX_INC_SEL_EXTAR_REG(0xf, ZDP_CTL_FVC),
1941         MBOX_INC_SEL_EXTAR_REG(0x10, ZDP_CTL_FVC),
1942         MBOX_INC_SEL_EXTAR_REG(0x16, PGT),
1943         MBOX_SET_FLAG_SEL_EXTRA_REG(0x0, DSP),
1944         MBOX_SET_FLAG_SEL_EXTRA_REG(0x1, ISS),
1945         MBOX_SET_FLAG_SEL_EXTRA_REG(0x5, PGT),
1946         MBOX_SET_FLAG_SEL_EXTRA_REG(0x6, MAP),
1947         EVENT_EXTRA_END
1948 };
1949
1950 /* Nehalem-EX or Westmere-EX ? */
1951 static bool uncore_nhmex;
1952
1953 static bool nhmex_mbox_get_shared_reg(struct intel_uncore_box *box, int idx, u64 config)
1954 {
1955         struct intel_uncore_extra_reg *er;
1956         unsigned long flags;
1957         bool ret = false;
1958         u64 mask;
1959
1960         if (idx < EXTRA_REG_NHMEX_M_ZDP_CTL_FVC) {
1961                 er = &box->shared_regs[idx];
1962                 raw_spin_lock_irqsave(&er->lock, flags);
1963                 if (!atomic_read(&er->ref) || er->config == config) {
1964                         atomic_inc(&er->ref);
1965                         er->config = config;
1966                         ret = true;
1967                 }
1968                 raw_spin_unlock_irqrestore(&er->lock, flags);
1969
1970                 return ret;
1971         }
1972         /*
1973          * The ZDP_CTL_FVC MSR has 4 fields which are used to control
1974          * events 0xd ~ 0x10. Besides these 4 fields, there are additional
1975          * fields which are shared.
1976          */
1977         idx -= EXTRA_REG_NHMEX_M_ZDP_CTL_FVC;
1978         if (WARN_ON_ONCE(idx >= 4))
1979                 return false;
1980
1981         /* mask of the shared fields */
1982         if (uncore_nhmex)
1983                 mask = NHMEX_M_PMON_ZDP_CTL_FVC_MASK;
1984         else
1985                 mask = WSMEX_M_PMON_ZDP_CTL_FVC_MASK;
1986         er = &box->shared_regs[EXTRA_REG_NHMEX_M_ZDP_CTL_FVC];
1987
1988         raw_spin_lock_irqsave(&er->lock, flags);
1989         /* add mask of the non-shared field if it's in use */
1990         if (__BITS_VALUE(atomic_read(&er->ref), idx, 8)) {
1991                 if (uncore_nhmex)
1992                         mask |= NHMEX_M_PMON_ZDP_CTL_FVC_EVENT_MASK(idx);
1993                 else
1994                         mask |= WSMEX_M_PMON_ZDP_CTL_FVC_EVENT_MASK(idx);
1995         }
1996
1997         if (!atomic_read(&er->ref) || !((er->config ^ config) & mask)) {
1998                 atomic_add(1 << (idx * 8), &er->ref);
1999                 if (uncore_nhmex)
2000                         mask = NHMEX_M_PMON_ZDP_CTL_FVC_MASK |
2001                                 NHMEX_M_PMON_ZDP_CTL_FVC_EVENT_MASK(idx);
2002                 else
2003                         mask = WSMEX_M_PMON_ZDP_CTL_FVC_MASK |
2004                                 WSMEX_M_PMON_ZDP_CTL_FVC_EVENT_MASK(idx);
2005                 er->config &= ~mask;
2006                 er->config |= (config & mask);
2007                 ret = true;
2008         }
2009         raw_spin_unlock_irqrestore(&er->lock, flags);
2010
2011         return ret;
2012 }
2013
2014 static void nhmex_mbox_put_shared_reg(struct intel_uncore_box *box, int idx)
2015 {
2016         struct intel_uncore_extra_reg *er;
2017
2018         if (idx < EXTRA_REG_NHMEX_M_ZDP_CTL_FVC) {
2019                 er = &box->shared_regs[idx];
2020                 atomic_dec(&er->ref);
2021                 return;
2022         }
2023
2024         idx -= EXTRA_REG_NHMEX_M_ZDP_CTL_FVC;
2025         er = &box->shared_regs[EXTRA_REG_NHMEX_M_ZDP_CTL_FVC];
2026         atomic_sub(1 << (idx * 8), &er->ref);
2027 }
2028
2029 static u64 nhmex_mbox_alter_er(struct perf_event *event, int new_idx, bool modify)
2030 {
2031         struct hw_perf_event *hwc = &event->hw;
2032         struct hw_perf_event_extra *reg1 = &hwc->extra_reg;
2033         u64 idx, orig_idx = __BITS_VALUE(reg1->idx, 0, 8);
2034         u64 config = reg1->config;
2035
2036         /* get the non-shared control bits and shift them */
2037         idx = orig_idx - EXTRA_REG_NHMEX_M_ZDP_CTL_FVC;
2038         if (uncore_nhmex)
2039                 config &= NHMEX_M_PMON_ZDP_CTL_FVC_EVENT_MASK(idx);
2040         else
2041                 config &= WSMEX_M_PMON_ZDP_CTL_FVC_EVENT_MASK(idx);
2042         if (new_idx > orig_idx) {
2043                 idx = new_idx - orig_idx;
2044                 config <<= 3 * idx;
2045         } else {
2046                 idx = orig_idx - new_idx;
2047                 config >>= 3 * idx;
2048         }
2049
2050         /* add the shared control bits back */
2051         if (uncore_nhmex)
2052                 config |= NHMEX_M_PMON_ZDP_CTL_FVC_MASK & reg1->config;
2053         else
2054                 config |= WSMEX_M_PMON_ZDP_CTL_FVC_MASK & reg1->config;
2055         config |= NHMEX_M_PMON_ZDP_CTL_FVC_MASK & reg1->config;
2056         if (modify) {
2057                 /* adjust the main event selector */
2058                 if (new_idx > orig_idx)
2059                         hwc->config += idx << NHMEX_M_PMON_CTL_INC_SEL_SHIFT;
2060                 else
2061                         hwc->config -= idx << NHMEX_M_PMON_CTL_INC_SEL_SHIFT;
2062                 reg1->config = config;
2063                 reg1->idx = ~0xff | new_idx;
2064         }
2065         return config;
2066 }
2067
2068 static struct event_constraint *
2069 nhmex_mbox_get_constraint(struct intel_uncore_box *box, struct perf_event *event)
2070 {
2071         struct hw_perf_event_extra *reg1 = &event->hw.extra_reg;
2072         struct hw_perf_event_extra *reg2 = &event->hw.branch_reg;
2073         int i, idx[2], alloc = 0;
2074         u64 config1 = reg1->config;
2075
2076         idx[0] = __BITS_VALUE(reg1->idx, 0, 8);
2077         idx[1] = __BITS_VALUE(reg1->idx, 1, 8);
2078 again:
2079         for (i = 0; i < 2; i++) {
2080                 if (!uncore_box_is_fake(box) && (reg1->alloc & (0x1 << i)))
2081                         idx[i] = 0xff;
2082
2083                 if (idx[i] == 0xff)
2084                         continue;
2085
2086                 if (!nhmex_mbox_get_shared_reg(box, idx[i],
2087                                 __BITS_VALUE(config1, i, 32)))
2088                         goto fail;
2089                 alloc |= (0x1 << i);
2090         }
2091
2092         /* for the match/mask registers */
2093         if (reg2->idx != EXTRA_REG_NONE &&
2094             (uncore_box_is_fake(box) || !reg2->alloc) &&
2095             !nhmex_mbox_get_shared_reg(box, reg2->idx, reg2->config))
2096                 goto fail;
2097
2098         /*
2099          * If it's a fake box -- as per validate_{group,event}() we
2100          * shouldn't touch event state and we can avoid doing so
2101          * since both will only call get_event_constraints() once
2102          * on each event, this avoids the need for reg->alloc.
2103          */
2104         if (!uncore_box_is_fake(box)) {
2105                 if (idx[0] != 0xff && idx[0] != __BITS_VALUE(reg1->idx, 0, 8))
2106                         nhmex_mbox_alter_er(event, idx[0], true);
2107                 reg1->alloc |= alloc;
2108                 if (reg2->idx != EXTRA_REG_NONE)
2109                         reg2->alloc = 1;
2110         }
2111         return NULL;
2112 fail:
2113         if (idx[0] != 0xff && !(alloc & 0x1) &&
2114             idx[0] >= EXTRA_REG_NHMEX_M_ZDP_CTL_FVC) {
2115                 /*
2116                  * events 0xd ~ 0x10 are functional identical, but are
2117                  * controlled by different fields in the ZDP_CTL_FVC
2118                  * register. If we failed to take one field, try the
2119                  * rest 3 choices.
2120                  */
2121                 BUG_ON(__BITS_VALUE(reg1->idx, 1, 8) != 0xff);
2122                 idx[0] -= EXTRA_REG_NHMEX_M_ZDP_CTL_FVC;
2123                 idx[0] = (idx[0] + 1) % 4;
2124                 idx[0] += EXTRA_REG_NHMEX_M_ZDP_CTL_FVC;
2125                 if (idx[0] != __BITS_VALUE(reg1->idx, 0, 8)) {
2126                         config1 = nhmex_mbox_alter_er(event, idx[0], false);
2127                         goto again;
2128                 }
2129         }
2130
2131         if (alloc & 0x1)
2132                 nhmex_mbox_put_shared_reg(box, idx[0]);
2133         if (alloc & 0x2)
2134                 nhmex_mbox_put_shared_reg(box, idx[1]);
2135         return &constraint_empty;
2136 }
2137
2138 static void nhmex_mbox_put_constraint(struct intel_uncore_box *box, struct perf_event *event)
2139 {
2140         struct hw_perf_event_extra *reg1 = &event->hw.extra_reg;
2141         struct hw_perf_event_extra *reg2 = &event->hw.branch_reg;
2142
2143         if (uncore_box_is_fake(box))
2144                 return;
2145
2146         if (reg1->alloc & 0x1)
2147                 nhmex_mbox_put_shared_reg(box, __BITS_VALUE(reg1->idx, 0, 8));
2148         if (reg1->alloc & 0x2)
2149                 nhmex_mbox_put_shared_reg(box, __BITS_VALUE(reg1->idx, 1, 8));
2150         reg1->alloc = 0;
2151
2152         if (reg2->alloc) {
2153                 nhmex_mbox_put_shared_reg(box, reg2->idx);
2154                 reg2->alloc = 0;
2155         }
2156 }
2157
2158 static int nhmex_mbox_extra_reg_idx(struct extra_reg *er)
2159 {
2160         if (er->idx < EXTRA_REG_NHMEX_M_ZDP_CTL_FVC)
2161                 return er->idx;
2162         return er->idx + (er->event >> NHMEX_M_PMON_CTL_INC_SEL_SHIFT) - 0xd;
2163 }
2164
2165 static int nhmex_mbox_hw_config(struct intel_uncore_box *box, struct perf_event *event)
2166 {
2167         struct intel_uncore_type *type = box->pmu->type;
2168         struct hw_perf_event_extra *reg1 = &event->hw.extra_reg;
2169         struct hw_perf_event_extra *reg2 = &event->hw.branch_reg;
2170         struct extra_reg *er;
2171         unsigned msr;
2172         int reg_idx = 0;
2173         /*
2174          * The mbox events may require 2 extra MSRs at the most. But only
2175          * the lower 32 bits in these MSRs are significant, so we can use
2176          * config1 to pass two MSRs' config.
2177          */
2178         for (er = nhmex_uncore_mbox_extra_regs; er->msr; er++) {
2179                 if (er->event != (event->hw.config & er->config_mask))
2180                         continue;
2181                 if (event->attr.config1 & ~er->valid_mask)
2182                         return -EINVAL;
2183
2184                 msr = er->msr + type->msr_offset * box->pmu->pmu_idx;
2185                 if (WARN_ON_ONCE(msr >= 0xffff || er->idx >= 0xff))
2186                         return -EINVAL;
2187
2188                 /* always use the 32~63 bits to pass the PLD config */
2189                 if (er->idx == EXTRA_REG_NHMEX_M_PLD)
2190                         reg_idx = 1;
2191                 else if (WARN_ON_ONCE(reg_idx > 0))
2192                         return -EINVAL;
2193
2194                 reg1->idx &= ~(0xff << (reg_idx * 8));
2195                 reg1->reg &= ~(0xffff << (reg_idx * 16));
2196                 reg1->idx |= nhmex_mbox_extra_reg_idx(er) << (reg_idx * 8);
2197                 reg1->reg |= msr << (reg_idx * 16);
2198                 reg1->config = event->attr.config1;
2199                 reg_idx++;
2200         }
2201         /*
2202          * The mbox only provides ability to perform address matching
2203          * for the PLD events.
2204          */
2205         if (reg_idx == 2) {
2206                 reg2->idx = EXTRA_REG_NHMEX_M_FILTER;
2207                 if (event->attr.config2 & NHMEX_M_PMON_MM_CFG_EN)
2208                         reg2->config = event->attr.config2;
2209                 else
2210                         reg2->config = ~0ULL;
2211                 if (box->pmu->pmu_idx == 0)
2212                         reg2->reg = NHMEX_M0_MSR_PMU_MM_CFG;
2213                 else
2214                         reg2->reg = NHMEX_M1_MSR_PMU_MM_CFG;
2215         }
2216         return 0;
2217 }
2218
2219 static u64 nhmex_mbox_shared_reg_config(struct intel_uncore_box *box, int idx)
2220 {
2221         struct intel_uncore_extra_reg *er;
2222         unsigned long flags;
2223         u64 config;
2224
2225         if (idx < EXTRA_REG_NHMEX_M_ZDP_CTL_FVC)
2226                 return box->shared_regs[idx].config;
2227
2228         er = &box->shared_regs[EXTRA_REG_NHMEX_M_ZDP_CTL_FVC];
2229         raw_spin_lock_irqsave(&er->lock, flags);
2230         config = er->config;
2231         raw_spin_unlock_irqrestore(&er->lock, flags);
2232         return config;
2233 }
2234
2235 static void nhmex_mbox_msr_enable_event(struct intel_uncore_box *box, struct perf_event *event)
2236 {
2237         struct hw_perf_event *hwc = &event->hw;
2238         struct hw_perf_event_extra *reg1 = &hwc->extra_reg;
2239         struct hw_perf_event_extra *reg2 = &hwc->branch_reg;
2240         int idx;
2241
2242         idx = __BITS_VALUE(reg1->idx, 0, 8);
2243         if (idx != 0xff)
2244                 wrmsrl(__BITS_VALUE(reg1->reg, 0, 16),
2245                         nhmex_mbox_shared_reg_config(box, idx));
2246         idx = __BITS_VALUE(reg1->idx, 1, 8);
2247         if (idx != 0xff)
2248                 wrmsrl(__BITS_VALUE(reg1->reg, 1, 16),
2249                         nhmex_mbox_shared_reg_config(box, idx));
2250
2251         if (reg2->idx != EXTRA_REG_NONE) {
2252                 wrmsrl(reg2->reg, 0);
2253                 if (reg2->config != ~0ULL) {
2254                         wrmsrl(reg2->reg + 1,
2255                                 reg2->config & NHMEX_M_PMON_ADDR_MATCH_MASK);
2256                         wrmsrl(reg2->reg + 2, NHMEX_M_PMON_ADDR_MASK_MASK &
2257                                 (reg2->config >> NHMEX_M_PMON_ADDR_MASK_SHIFT));
2258                         wrmsrl(reg2->reg, NHMEX_M_PMON_MM_CFG_EN);
2259                 }
2260         }
2261
2262         wrmsrl(hwc->config_base, hwc->config | NHMEX_PMON_CTL_EN_BIT0);
2263 }
2264
2265 DEFINE_UNCORE_FORMAT_ATTR(count_mode,           count_mode,     "config:2-3");
2266 DEFINE_UNCORE_FORMAT_ATTR(storage_mode,         storage_mode,   "config:4-5");
2267 DEFINE_UNCORE_FORMAT_ATTR(wrap_mode,            wrap_mode,      "config:6");
2268 DEFINE_UNCORE_FORMAT_ATTR(flag_mode,            flag_mode,      "config:7");
2269 DEFINE_UNCORE_FORMAT_ATTR(inc_sel,              inc_sel,        "config:9-13");
2270 DEFINE_UNCORE_FORMAT_ATTR(set_flag_sel,         set_flag_sel,   "config:19-21");
2271 DEFINE_UNCORE_FORMAT_ATTR(filter_cfg_en,        filter_cfg_en,  "config2:63");
2272 DEFINE_UNCORE_FORMAT_ATTR(filter_match,         filter_match,   "config2:0-33");
2273 DEFINE_UNCORE_FORMAT_ATTR(filter_mask,          filter_mask,    "config2:34-61");
2274 DEFINE_UNCORE_FORMAT_ATTR(dsp,                  dsp,            "config1:0-31");
2275 DEFINE_UNCORE_FORMAT_ATTR(thr,                  thr,            "config1:0-31");
2276 DEFINE_UNCORE_FORMAT_ATTR(fvc,                  fvc,            "config1:0-31");
2277 DEFINE_UNCORE_FORMAT_ATTR(pgt,                  pgt,            "config1:0-31");
2278 DEFINE_UNCORE_FORMAT_ATTR(map,                  map,            "config1:0-31");
2279 DEFINE_UNCORE_FORMAT_ATTR(iss,                  iss,            "config1:0-31");
2280 DEFINE_UNCORE_FORMAT_ATTR(pld,                  pld,            "config1:32-63");
2281
2282 static struct attribute *nhmex_uncore_mbox_formats_attr[] = {
2283         &format_attr_count_mode.attr,
2284         &format_attr_storage_mode.attr,
2285         &format_attr_wrap_mode.attr,
2286         &format_attr_flag_mode.attr,
2287         &format_attr_inc_sel.attr,
2288         &format_attr_set_flag_sel.attr,
2289         &format_attr_filter_cfg_en.attr,
2290         &format_attr_filter_match.attr,
2291         &format_attr_filter_mask.attr,
2292         &format_attr_dsp.attr,
2293         &format_attr_thr.attr,
2294         &format_attr_fvc.attr,
2295         &format_attr_pgt.attr,
2296         &format_attr_map.attr,
2297         &format_attr_iss.attr,
2298         &format_attr_pld.attr,
2299         NULL,
2300 };
2301
2302 static struct attribute_group nhmex_uncore_mbox_format_group = {
2303         .name           = "format",
2304         .attrs          = nhmex_uncore_mbox_formats_attr,
2305 };
2306
2307 static struct uncore_event_desc nhmex_uncore_mbox_events[] = {
2308         INTEL_UNCORE_EVENT_DESC(bbox_cmds_read, "inc_sel=0xd,fvc=0x2800"),
2309         INTEL_UNCORE_EVENT_DESC(bbox_cmds_write, "inc_sel=0xd,fvc=0x2820"),
2310         { /* end: all zeroes */ },
2311 };
2312
2313 static struct uncore_event_desc wsmex_uncore_mbox_events[] = {
2314         INTEL_UNCORE_EVENT_DESC(bbox_cmds_read, "inc_sel=0xd,fvc=0x5000"),
2315         INTEL_UNCORE_EVENT_DESC(bbox_cmds_write, "inc_sel=0xd,fvc=0x5040"),
2316         { /* end: all zeroes */ },
2317 };
2318
2319 static struct intel_uncore_ops nhmex_uncore_mbox_ops = {
2320         NHMEX_UNCORE_OPS_COMMON_INIT(),
2321         .enable_event   = nhmex_mbox_msr_enable_event,
2322         .hw_config      = nhmex_mbox_hw_config,
2323         .get_constraint = nhmex_mbox_get_constraint,
2324         .put_constraint = nhmex_mbox_put_constraint,
2325 };
2326
2327 static struct intel_uncore_type nhmex_uncore_mbox = {
2328         .name                   = "mbox",
2329         .num_counters           = 6,
2330         .num_boxes              = 2,
2331         .perf_ctr_bits          = 48,
2332         .event_ctl              = NHMEX_M0_MSR_PMU_CTL0,
2333         .perf_ctr               = NHMEX_M0_MSR_PMU_CNT0,
2334         .event_mask             = NHMEX_M_PMON_RAW_EVENT_MASK,
2335         .box_ctl                = NHMEX_M0_MSR_GLOBAL_CTL,
2336         .msr_offset             = NHMEX_M_MSR_OFFSET,
2337         .pair_ctr_ctl           = 1,
2338         .num_shared_regs        = 8,
2339         .event_descs            = nhmex_uncore_mbox_events,
2340         .ops                    = &nhmex_uncore_mbox_ops,
2341         .format_group           = &nhmex_uncore_mbox_format_group,
2342 };
2343
2344 static void nhmex_rbox_alter_er(struct intel_uncore_box *box, struct perf_event *event)
2345 {
2346         struct hw_perf_event *hwc = &event->hw;
2347         struct hw_perf_event_extra *reg1 = &hwc->extra_reg;
2348
2349         /* adjust the main event selector and extra register index */
2350         if (reg1->idx % 2) {
2351                 reg1->idx--;
2352                 hwc->config -= 1 << NHMEX_R_PMON_CTL_EV_SEL_SHIFT;
2353         } else {
2354                 reg1->idx++;
2355                 hwc->config += 1 << NHMEX_R_PMON_CTL_EV_SEL_SHIFT;
2356         }
2357
2358         /* adjust extra register config */
2359         switch (reg1->idx % 6) {
2360         case 2:
2361                 /* shift the 8~15 bits to the 0~7 bits */
2362                 reg1->config >>= 8;
2363                 break;
2364         case 3:
2365                 /* shift the 0~7 bits to the 8~15 bits */
2366                 reg1->config <<= 8;
2367                 break;
2368         };
2369 }
2370
2371 /*
2372  * Each rbox has 4 event set which monitor PQI port 0~3 or 4~7.
2373  * An event set consists of 6 events, the 3rd and 4th events in
2374  * an event set use the same extra register. So an event set uses
2375  * 5 extra registers.
2376  */
2377 static struct event_constraint *
2378 nhmex_rbox_get_constraint(struct intel_uncore_box *box, struct perf_event *event)
2379 {
2380         struct hw_perf_event *hwc = &event->hw;
2381         struct hw_perf_event_extra *reg1 = &hwc->extra_reg;
2382         struct hw_perf_event_extra *reg2 = &hwc->branch_reg;
2383         struct intel_uncore_extra_reg *er;
2384         unsigned long flags;
2385         int idx, er_idx;
2386         u64 config1;
2387         bool ok = false;
2388
2389         if (!uncore_box_is_fake(box) && reg1->alloc)
2390                 return NULL;
2391
2392         idx = reg1->idx % 6;
2393         config1 = reg1->config;
2394 again:
2395         er_idx = idx;
2396         /* the 3rd and 4th events use the same extra register */
2397         if (er_idx > 2)
2398                 er_idx--;
2399         er_idx += (reg1->idx / 6) * 5;
2400
2401         er = &box->shared_regs[er_idx];
2402         raw_spin_lock_irqsave(&er->lock, flags);
2403         if (idx < 2) {
2404                 if (!atomic_read(&er->ref) || er->config == reg1->config) {
2405                         atomic_inc(&er->ref);
2406                         er->config = reg1->config;
2407                         ok = true;
2408                 }
2409         } else if (idx == 2 || idx == 3) {
2410                 /*
2411                  * these two events use different fields in a extra register,
2412                  * the 0~7 bits and the 8~15 bits respectively.
2413                  */
2414                 u64 mask = 0xff << ((idx - 2) * 8);
2415                 if (!__BITS_VALUE(atomic_read(&er->ref), idx - 2, 8) ||
2416                                 !((er->config ^ config1) & mask)) {
2417                         atomic_add(1 << ((idx - 2) * 8), &er->ref);
2418                         er->config &= ~mask;
2419                         er->config |= config1 & mask;
2420                         ok = true;
2421                 }
2422         } else {
2423                 if (!atomic_read(&er->ref) ||
2424                                 (er->config == (hwc->config >> 32) &&
2425                                  er->config1 == reg1->config &&
2426                                  er->config2 == reg2->config)) {
2427                         atomic_inc(&er->ref);
2428                         er->config = (hwc->config >> 32);
2429                         er->config1 = reg1->config;
2430                         er->config2 = reg2->config;
2431                         ok = true;
2432                 }
2433         }
2434         raw_spin_unlock_irqrestore(&er->lock, flags);
2435
2436         if (!ok) {
2437                 /*
2438                  * The Rbox events are always in pairs. The paired
2439                  * events are functional identical, but use different
2440                  * extra registers. If we failed to take an extra
2441                  * register, try the alternative.
2442                  */
2443                 if (idx % 2)
2444                         idx--;
2445                 else
2446                         idx++;
2447                 if (idx != reg1->idx % 6) {
2448                         if (idx == 2)
2449                                 config1 >>= 8;
2450                         else if (idx == 3)
2451                                 config1 <<= 8;
2452                         goto again;
2453                 }
2454         } else {
2455                 if (!uncore_box_is_fake(box)) {
2456                         if (idx != reg1->idx % 6)
2457                                 nhmex_rbox_alter_er(box, event);
2458                         reg1->alloc = 1;
2459                 }
2460                 return NULL;
2461         }
2462         return &constraint_empty;
2463 }
2464
2465 static void nhmex_rbox_put_constraint(struct intel_uncore_box *box, struct perf_event *event)
2466 {
2467         struct intel_uncore_extra_reg *er;
2468         struct hw_perf_event_extra *reg1 = &event->hw.extra_reg;
2469         int idx, er_idx;
2470
2471         if (uncore_box_is_fake(box) || !reg1->alloc)
2472                 return;
2473
2474         idx = reg1->idx % 6;
2475         er_idx = idx;
2476         if (er_idx > 2)
2477                 er_idx--;
2478         er_idx += (reg1->idx / 6) * 5;
2479
2480         er = &box->shared_regs[er_idx];
2481         if (idx == 2 || idx == 3)
2482                 atomic_sub(1 << ((idx - 2) * 8), &er->ref);
2483         else
2484                 atomic_dec(&er->ref);
2485
2486         reg1->alloc = 0;
2487 }
2488
2489 static int nhmex_rbox_hw_config(struct intel_uncore_box *box, struct perf_event *event)
2490 {
2491         struct hw_perf_event *hwc = &event->hw;
2492         struct hw_perf_event_extra *reg1 = &event->hw.extra_reg;
2493         struct hw_perf_event_extra *reg2 = &event->hw.branch_reg;
2494         int idx;
2495
2496         idx = (event->hw.config & NHMEX_R_PMON_CTL_EV_SEL_MASK) >>
2497                 NHMEX_R_PMON_CTL_EV_SEL_SHIFT;
2498         if (idx >= 0x18)
2499                 return -EINVAL;
2500
2501         reg1->idx = idx;
2502         reg1->config = event->attr.config1;
2503
2504         switch (idx % 6) {
2505         case 4:
2506         case 5:
2507                 hwc->config |= event->attr.config & (~0ULL << 32);
2508                 reg2->config = event->attr.config2;
2509                 break;
2510         };
2511         return 0;
2512 }
2513
2514 static void nhmex_rbox_msr_enable_event(struct intel_uncore_box *box, struct perf_event *event)
2515 {
2516         struct hw_perf_event *hwc = &event->hw;
2517         struct hw_perf_event_extra *reg1 = &hwc->extra_reg;
2518         struct hw_perf_event_extra *reg2 = &hwc->branch_reg;
2519         int idx, port;
2520
2521         idx = reg1->idx;
2522         port = idx / 6 + box->pmu->pmu_idx * 4;
2523
2524         switch (idx % 6) {
2525         case 0:
2526                 wrmsrl(NHMEX_R_MSR_PORTN_IPERF_CFG0(port), reg1->config);
2527                 break;
2528         case 1:
2529                 wrmsrl(NHMEX_R_MSR_PORTN_IPERF_CFG1(port), reg1->config);
2530                 break;
2531         case 2:
2532         case 3:
2533                 wrmsrl(NHMEX_R_MSR_PORTN_QLX_CFG(port),
2534                         uncore_shared_reg_config(box, 2 + (idx / 6) * 5));
2535                 break;
2536         case 4:
2537                 wrmsrl(NHMEX_R_MSR_PORTN_XBR_SET1_MM_CFG(port),
2538                         hwc->config >> 32);
2539                 wrmsrl(NHMEX_R_MSR_PORTN_XBR_SET1_MATCH(port), reg1->config);
2540                 wrmsrl(NHMEX_R_MSR_PORTN_XBR_SET1_MASK(port), reg2->config);
2541                 break;
2542         case 5:
2543                 wrmsrl(NHMEX_R_MSR_PORTN_XBR_SET2_MM_CFG(port),
2544                         hwc->config >> 32);
2545                 wrmsrl(NHMEX_R_MSR_PORTN_XBR_SET2_MATCH(port), reg1->config);
2546                 wrmsrl(NHMEX_R_MSR_PORTN_XBR_SET2_MASK(port), reg2->config);
2547                 break;
2548         };
2549
2550         wrmsrl(hwc->config_base, NHMEX_PMON_CTL_EN_BIT0 |
2551                 (hwc->config & NHMEX_R_PMON_CTL_EV_SEL_MASK));
2552 }
2553
2554 DEFINE_UNCORE_FORMAT_ATTR(xbr_mm_cfg, xbr_mm_cfg, "config:32-63");
2555 DEFINE_UNCORE_FORMAT_ATTR(xbr_match, xbr_match, "config1:0-63");
2556 DEFINE_UNCORE_FORMAT_ATTR(xbr_mask, xbr_mask, "config2:0-63");
2557 DEFINE_UNCORE_FORMAT_ATTR(qlx_cfg, qlx_cfg, "config1:0-15");
2558 DEFINE_UNCORE_FORMAT_ATTR(iperf_cfg, iperf_cfg, "config1:0-31");
2559
2560 static struct attribute *nhmex_uncore_rbox_formats_attr[] = {
2561         &format_attr_event5.attr,
2562         &format_attr_xbr_mm_cfg.attr,
2563         &format_attr_xbr_match.attr,
2564         &format_attr_xbr_mask.attr,
2565         &format_attr_qlx_cfg.attr,
2566         &format_attr_iperf_cfg.attr,
2567         NULL,
2568 };
2569
2570 static struct attribute_group nhmex_uncore_rbox_format_group = {
2571         .name = "format",
2572         .attrs = nhmex_uncore_rbox_formats_attr,
2573 };
2574
2575 static struct uncore_event_desc nhmex_uncore_rbox_events[] = {
2576         INTEL_UNCORE_EVENT_DESC(qpi0_flit_send,         "event=0x0,iperf_cfg=0x80000000"),
2577         INTEL_UNCORE_EVENT_DESC(qpi1_filt_send,         "event=0x6,iperf_cfg=0x80000000"),
2578         INTEL_UNCORE_EVENT_DESC(qpi0_idle_filt,         "event=0x0,iperf_cfg=0x40000000"),
2579         INTEL_UNCORE_EVENT_DESC(qpi1_idle_filt,         "event=0x6,iperf_cfg=0x40000000"),
2580         INTEL_UNCORE_EVENT_DESC(qpi0_date_response,     "event=0x0,iperf_cfg=0xc4"),
2581         INTEL_UNCORE_EVENT_DESC(qpi1_date_response,     "event=0x6,iperf_cfg=0xc4"),
2582         { /* end: all zeroes */ },
2583 };
2584
2585 static struct intel_uncore_ops nhmex_uncore_rbox_ops = {
2586         NHMEX_UNCORE_OPS_COMMON_INIT(),
2587         .enable_event           = nhmex_rbox_msr_enable_event,
2588         .hw_config              = nhmex_rbox_hw_config,
2589         .get_constraint         = nhmex_rbox_get_constraint,
2590         .put_constraint         = nhmex_rbox_put_constraint,
2591 };
2592
2593 static struct intel_uncore_type nhmex_uncore_rbox = {
2594         .name                   = "rbox",
2595         .num_counters           = 8,
2596         .num_boxes              = 2,
2597         .perf_ctr_bits          = 48,
2598         .event_ctl              = NHMEX_R_MSR_PMON_CTL0,
2599         .perf_ctr               = NHMEX_R_MSR_PMON_CNT0,
2600         .event_mask             = NHMEX_R_PMON_RAW_EVENT_MASK,
2601         .box_ctl                = NHMEX_R_MSR_GLOBAL_CTL,
2602         .msr_offset             = NHMEX_R_MSR_OFFSET,
2603         .pair_ctr_ctl           = 1,
2604         .num_shared_regs        = 20,
2605         .event_descs            = nhmex_uncore_rbox_events,
2606         .ops                    = &nhmex_uncore_rbox_ops,
2607         .format_group           = &nhmex_uncore_rbox_format_group
2608 };
2609
2610 static struct intel_uncore_type *nhmex_msr_uncores[] = {
2611         &nhmex_uncore_ubox,
2612         &nhmex_uncore_cbox,
2613         &nhmex_uncore_bbox,
2614         &nhmex_uncore_sbox,
2615         &nhmex_uncore_mbox,
2616         &nhmex_uncore_rbox,
2617         &nhmex_uncore_wbox,
2618         NULL,
2619 };
2620 /* end of Nehalem-EX uncore support */
2621
2622 static void uncore_assign_hw_event(struct intel_uncore_box *box, struct perf_event *event, int idx)
2623 {
2624         struct hw_perf_event *hwc = &event->hw;
2625
2626         hwc->idx = idx;
2627         hwc->last_tag = ++box->tags[idx];
2628
2629         if (hwc->idx == UNCORE_PMC_IDX_FIXED) {
2630                 hwc->event_base = uncore_fixed_ctr(box);
2631                 hwc->config_base = uncore_fixed_ctl(box);
2632                 return;
2633         }
2634
2635         hwc->config_base = uncore_event_ctl(box, hwc->idx);
2636         hwc->event_base  = uncore_perf_ctr(box, hwc->idx);
2637 }
2638
2639 static void uncore_perf_event_update(struct intel_uncore_box *box, struct perf_event *event)
2640 {
2641         u64 prev_count, new_count, delta;
2642         int shift;
2643
2644         if (event->hw.idx >= UNCORE_PMC_IDX_FIXED)
2645                 shift = 64 - uncore_fixed_ctr_bits(box);
2646         else
2647                 shift = 64 - uncore_perf_ctr_bits(box);
2648
2649         /* the hrtimer might modify the previous event value */
2650 again:
2651         prev_count = local64_read(&event->hw.prev_count);
2652         new_count = uncore_read_counter(box, event);
2653         if (local64_xchg(&event->hw.prev_count, new_count) != prev_count)
2654                 goto again;
2655
2656         delta = (new_count << shift) - (prev_count << shift);
2657         delta >>= shift;
2658
2659         local64_add(delta, &event->count);
2660 }
2661
2662 /*
2663  * The overflow interrupt is unavailable for SandyBridge-EP, is broken
2664  * for SandyBridge. So we use hrtimer to periodically poll the counter
2665  * to avoid overflow.
2666  */
2667 static enum hrtimer_restart uncore_pmu_hrtimer(struct hrtimer *hrtimer)
2668 {
2669         struct intel_uncore_box *box;
2670         unsigned long flags;
2671         int bit;
2672
2673         box = container_of(hrtimer, struct intel_uncore_box, hrtimer);
2674         if (!box->n_active || box->cpu != smp_processor_id())
2675                 return HRTIMER_NORESTART;
2676         /*
2677          * disable local interrupt to prevent uncore_pmu_event_start/stop
2678          * to interrupt the update process
2679          */
2680         local_irq_save(flags);
2681
2682         for_each_set_bit(bit, box->active_mask, UNCORE_PMC_IDX_MAX)
2683                 uncore_perf_event_update(box, box->events[bit]);
2684
2685         local_irq_restore(flags);
2686
2687         hrtimer_forward_now(hrtimer, ns_to_ktime(UNCORE_PMU_HRTIMER_INTERVAL));
2688         return HRTIMER_RESTART;
2689 }
2690
2691 static void uncore_pmu_start_hrtimer(struct intel_uncore_box *box)
2692 {
2693         __hrtimer_start_range_ns(&box->hrtimer,
2694                         ns_to_ktime(UNCORE_PMU_HRTIMER_INTERVAL), 0,
2695                         HRTIMER_MODE_REL_PINNED, 0);
2696 }
2697
2698 static void uncore_pmu_cancel_hrtimer(struct intel_uncore_box *box)
2699 {
2700         hrtimer_cancel(&box->hrtimer);
2701 }
2702
2703 static void uncore_pmu_init_hrtimer(struct intel_uncore_box *box)
2704 {
2705         hrtimer_init(&box->hrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
2706         box->hrtimer.function = uncore_pmu_hrtimer;
2707 }
2708
2709 struct intel_uncore_box *uncore_alloc_box(struct intel_uncore_type *type, int cpu)
2710 {
2711         struct intel_uncore_box *box;
2712         int i, size;
2713
2714         size = sizeof(*box) + type->num_shared_regs * sizeof(struct intel_uncore_extra_reg);
2715
2716         box = kzalloc_node(size, GFP_KERNEL, cpu_to_node(cpu));
2717         if (!box)
2718                 return NULL;
2719
2720         for (i = 0; i < type->num_shared_regs; i++)
2721                 raw_spin_lock_init(&box->shared_regs[i].lock);
2722
2723         uncore_pmu_init_hrtimer(box);
2724         atomic_set(&box->refcnt, 1);
2725         box->cpu = -1;
2726         box->phys_id = -1;
2727
2728         return box;
2729 }
2730
2731 static struct intel_uncore_box *
2732 uncore_pmu_to_box(struct intel_uncore_pmu *pmu, int cpu)
2733 {
2734         struct intel_uncore_box *box;
2735
2736         box = *per_cpu_ptr(pmu->box, cpu);
2737         if (box)
2738                 return box;
2739
2740         raw_spin_lock(&uncore_box_lock);
2741         list_for_each_entry(box, &pmu->box_list, list) {
2742                 if (box->phys_id == topology_physical_package_id(cpu)) {
2743                         atomic_inc(&box->refcnt);
2744                         *per_cpu_ptr(pmu->box, cpu) = box;
2745                         break;
2746                 }
2747         }
2748         raw_spin_unlock(&uncore_box_lock);
2749
2750         return *per_cpu_ptr(pmu->box, cpu);
2751 }
2752
2753 static struct intel_uncore_pmu *uncore_event_to_pmu(struct perf_event *event)
2754 {
2755         return container_of(event->pmu, struct intel_uncore_pmu, pmu);
2756 }
2757
2758 static struct intel_uncore_box *uncore_event_to_box(struct perf_event *event)
2759 {
2760         /*
2761          * perf core schedules event on the basis of cpu, uncore events are
2762          * collected by one of the cpus inside a physical package.
2763          */
2764         return uncore_pmu_to_box(uncore_event_to_pmu(event), smp_processor_id());
2765 }
2766
2767 static int
2768 uncore_collect_events(struct intel_uncore_box *box, struct perf_event *leader, bool dogrp)
2769 {
2770         struct perf_event *event;
2771         int n, max_count;
2772
2773         max_count = box->pmu->type->num_counters;
2774         if (box->pmu->type->fixed_ctl)
2775                 max_count++;
2776
2777         if (box->n_events >= max_count)
2778                 return -EINVAL;
2779
2780         n = box->n_events;
2781         box->event_list[n] = leader;
2782         n++;
2783         if (!dogrp)
2784                 return n;
2785
2786         list_for_each_entry(event, &leader->sibling_list, group_entry) {
2787                 if (event->state <= PERF_EVENT_STATE_OFF)
2788                         continue;
2789
2790                 if (n >= max_count)
2791                         return -EINVAL;
2792
2793                 box->event_list[n] = event;
2794                 n++;
2795         }
2796         return n;
2797 }
2798
2799 static struct event_constraint *
2800 uncore_get_event_constraint(struct intel_uncore_box *box, struct perf_event *event)
2801 {
2802         struct intel_uncore_type *type = box->pmu->type;
2803         struct event_constraint *c;
2804
2805         if (type->ops->get_constraint) {
2806                 c = type->ops->get_constraint(box, event);
2807                 if (c)
2808                         return c;
2809         }
2810
2811         if (event->attr.config == UNCORE_FIXED_EVENT)
2812                 return &constraint_fixed;
2813
2814         if (type->constraints) {
2815                 for_each_event_constraint(c, type->constraints) {
2816                         if ((event->hw.config & c->cmask) == c->code)
2817                                 return c;
2818                 }
2819         }
2820
2821         return &type->unconstrainted;
2822 }
2823
2824 static void uncore_put_event_constraint(struct intel_uncore_box *box, struct perf_event *event)
2825 {
2826         if (box->pmu->type->ops->put_constraint)
2827                 box->pmu->type->ops->put_constraint(box, event);
2828 }
2829
2830 static int uncore_assign_events(struct intel_uncore_box *box, int assign[], int n)
2831 {
2832         unsigned long used_mask[BITS_TO_LONGS(UNCORE_PMC_IDX_MAX)];
2833         struct event_constraint *c;
2834         int i, wmin, wmax, ret = 0;
2835         struct hw_perf_event *hwc;
2836
2837         bitmap_zero(used_mask, UNCORE_PMC_IDX_MAX);
2838
2839         for (i = 0, wmin = UNCORE_PMC_IDX_MAX, wmax = 0; i < n; i++) {
2840                 hwc = &box->event_list[i]->hw;
2841                 c = uncore_get_event_constraint(box, box->event_list[i]);
2842                 hwc->constraint = c;
2843                 wmin = min(wmin, c->weight);
2844                 wmax = max(wmax, c->weight);
2845         }
2846
2847         /* fastpath, try to reuse previous register */
2848         for (i = 0; i < n; i++) {
2849                 hwc = &box->event_list[i]->hw;
2850                 c = hwc->constraint;
2851
2852                 /* never assigned */
2853                 if (hwc->idx == -1)
2854                         break;
2855
2856                 /* constraint still honored */
2857                 if (!test_bit(hwc->idx, c->idxmsk))
2858                         break;
2859
2860                 /* not already used */
2861                 if (test_bit(hwc->idx, used_mask))
2862                         break;
2863
2864                 __set_bit(hwc->idx, used_mask);
2865                 if (assign)
2866                         assign[i] = hwc->idx;
2867         }
2868         /* slow path */
2869         if (i != n)
2870                 ret = perf_assign_events(box->event_list, n,
2871                                          wmin, wmax, assign);
2872
2873         if (!assign || ret) {
2874                 for (i = 0; i < n; i++)
2875                         uncore_put_event_constraint(box, box->event_list[i]);
2876         }
2877         return ret ? -EINVAL : 0;
2878 }
2879
2880 static void uncore_pmu_event_start(struct perf_event *event, int flags)
2881 {
2882         struct intel_uncore_box *box = uncore_event_to_box(event);
2883         int idx = event->hw.idx;
2884
2885         if (WARN_ON_ONCE(!(event->hw.state & PERF_HES_STOPPED)))
2886                 return;
2887
2888         if (WARN_ON_ONCE(idx == -1 || idx >= UNCORE_PMC_IDX_MAX))
2889                 return;
2890
2891         event->hw.state = 0;
2892         box->events[idx] = event;
2893         box->n_active++;
2894         __set_bit(idx, box->active_mask);
2895
2896         local64_set(&event->hw.prev_count, uncore_read_counter(box, event));
2897         uncore_enable_event(box, event);
2898
2899         if (box->n_active == 1) {
2900                 uncore_enable_box(box);
2901                 uncore_pmu_start_hrtimer(box);
2902         }
2903 }
2904
2905 static void uncore_pmu_event_stop(struct perf_event *event, int flags)
2906 {
2907         struct intel_uncore_box *box = uncore_event_to_box(event);
2908         struct hw_perf_event *hwc = &event->hw;
2909
2910         if (__test_and_clear_bit(hwc->idx, box->active_mask)) {
2911                 uncore_disable_event(box, event);
2912                 box->n_active--;
2913                 box->events[hwc->idx] = NULL;
2914                 WARN_ON_ONCE(hwc->state & PERF_HES_STOPPED);
2915                 hwc->state |= PERF_HES_STOPPED;
2916
2917                 if (box->n_active == 0) {
2918                         uncore_disable_box(box);
2919                         uncore_pmu_cancel_hrtimer(box);
2920                 }
2921         }
2922
2923         if ((flags & PERF_EF_UPDATE) && !(hwc->state & PERF_HES_UPTODATE)) {
2924                 /*
2925                  * Drain the remaining delta count out of a event
2926                  * that we are disabling:
2927                  */
2928                 uncore_perf_event_update(box, event);
2929                 hwc->state |= PERF_HES_UPTODATE;
2930         }
2931 }
2932
2933 static int uncore_pmu_event_add(struct perf_event *event, int flags)
2934 {
2935         struct intel_uncore_box *box = uncore_event_to_box(event);
2936         struct hw_perf_event *hwc = &event->hw;
2937         int assign[UNCORE_PMC_IDX_MAX];
2938         int i, n, ret;
2939
2940         if (!box)
2941                 return -ENODEV;
2942
2943         ret = n = uncore_collect_events(box, event, false);
2944         if (ret < 0)
2945                 return ret;
2946
2947         hwc->state = PERF_HES_UPTODATE | PERF_HES_STOPPED;
2948         if (!(flags & PERF_EF_START))
2949                 hwc->state |= PERF_HES_ARCH;
2950
2951         ret = uncore_assign_events(box, assign, n);
2952         if (ret)
2953                 return ret;
2954
2955         /* save events moving to new counters */
2956         for (i = 0; i < box->n_events; i++) {
2957                 event = box->event_list[i];
2958                 hwc = &event->hw;
2959
2960                 if (hwc->idx == assign[i] &&
2961                         hwc->last_tag == box->tags[assign[i]])
2962                         continue;
2963                 /*
2964                  * Ensure we don't accidentally enable a stopped
2965                  * counter simply because we rescheduled.
2966                  */
2967                 if (hwc->state & PERF_HES_STOPPED)
2968                         hwc->state |= PERF_HES_ARCH;
2969
2970                 uncore_pmu_event_stop(event, PERF_EF_UPDATE);
2971         }
2972
2973         /* reprogram moved events into new counters */
2974         for (i = 0; i < n; i++) {
2975                 event = box->event_list[i];
2976                 hwc = &event->hw;
2977
2978                 if (hwc->idx != assign[i] ||
2979                         hwc->last_tag != box->tags[assign[i]])
2980                         uncore_assign_hw_event(box, event, assign[i]);
2981                 else if (i < box->n_events)
2982                         continue;
2983
2984                 if (hwc->state & PERF_HES_ARCH)
2985                         continue;
2986
2987                 uncore_pmu_event_start(event, 0);
2988         }
2989         box->n_events = n;
2990
2991         return 0;
2992 }
2993
2994 static void uncore_pmu_event_del(struct perf_event *event, int flags)
2995 {
2996         struct intel_uncore_box *box = uncore_event_to_box(event);
2997         int i;
2998
2999         uncore_pmu_event_stop(event, PERF_EF_UPDATE);
3000
3001         for (i = 0; i < box->n_events; i++) {
3002                 if (event == box->event_list[i]) {
3003                         uncore_put_event_constraint(box, event);
3004
3005                         while (++i < box->n_events)
3006                                 box->event_list[i - 1] = box->event_list[i];
3007
3008                         --box->n_events;
3009                         break;
3010                 }
3011         }
3012
3013         event->hw.idx = -1;
3014         event->hw.last_tag = ~0ULL;
3015 }
3016
3017 static void uncore_pmu_event_read(struct perf_event *event)
3018 {
3019         struct intel_uncore_box *box = uncore_event_to_box(event);
3020         uncore_perf_event_update(box, event);
3021 }
3022
3023 /*
3024  * validation ensures the group can be loaded onto the
3025  * PMU if it was the only group available.
3026  */
3027 static int uncore_validate_group(struct intel_uncore_pmu *pmu,
3028                                 struct perf_event *event)
3029 {
3030         struct perf_event *leader = event->group_leader;
3031         struct intel_uncore_box *fake_box;
3032         int ret = -EINVAL, n;
3033
3034         fake_box = uncore_alloc_box(pmu->type, smp_processor_id());
3035         if (!fake_box)
3036                 return -ENOMEM;
3037
3038         fake_box->pmu = pmu;
3039         /*
3040          * the event is not yet connected with its
3041          * siblings therefore we must first collect
3042          * existing siblings, then add the new event
3043          * before we can simulate the scheduling
3044          */
3045         n = uncore_collect_events(fake_box, leader, true);
3046         if (n < 0)
3047                 goto out;
3048
3049         fake_box->n_events = n;
3050         n = uncore_collect_events(fake_box, event, false);
3051         if (n < 0)
3052                 goto out;
3053
3054         fake_box->n_events = n;
3055
3056         ret = uncore_assign_events(fake_box, NULL, n);
3057 out:
3058         kfree(fake_box);
3059         return ret;
3060 }
3061
3062 static int uncore_pmu_event_init(struct perf_event *event)
3063 {
3064         struct intel_uncore_pmu *pmu;
3065         struct intel_uncore_box *box;
3066         struct hw_perf_event *hwc = &event->hw;
3067         int ret;
3068
3069         if (event->attr.type != event->pmu->type)
3070                 return -ENOENT;
3071
3072         pmu = uncore_event_to_pmu(event);
3073         /* no device found for this pmu */
3074         if (pmu->func_id < 0)
3075                 return -ENOENT;
3076
3077         /*
3078          * Uncore PMU does measure at all privilege level all the time.
3079          * So it doesn't make sense to specify any exclude bits.
3080          */
3081         if (event->attr.exclude_user || event->attr.exclude_kernel ||
3082                         event->attr.exclude_hv || event->attr.exclude_idle)
3083                 return -EINVAL;
3084
3085         /* Sampling not supported yet */
3086         if (hwc->sample_period)
3087                 return -EINVAL;
3088
3089         /*
3090          * Place all uncore events for a particular physical package
3091          * onto a single cpu
3092          */
3093         if (event->cpu < 0)
3094                 return -EINVAL;
3095         box = uncore_pmu_to_box(pmu, event->cpu);
3096         if (!box || box->cpu < 0)
3097                 return -EINVAL;
3098         event->cpu = box->cpu;
3099
3100         event->hw.idx = -1;
3101         event->hw.last_tag = ~0ULL;
3102         event->hw.extra_reg.idx = EXTRA_REG_NONE;
3103         event->hw.branch_reg.idx = EXTRA_REG_NONE;
3104
3105         if (event->attr.config == UNCORE_FIXED_EVENT) {
3106                 /* no fixed counter */
3107                 if (!pmu->type->fixed_ctl)
3108                         return -EINVAL;
3109                 /*
3110                  * if there is only one fixed counter, only the first pmu
3111                  * can access the fixed counter
3112                  */
3113                 if (pmu->type->single_fixed && pmu->pmu_idx > 0)
3114                         return -EINVAL;
3115
3116                 /* fixed counters have event field hardcoded to zero */
3117                 hwc->config = 0ULL;
3118         } else {
3119                 hwc->config = event->attr.config & pmu->type->event_mask;
3120                 if (pmu->type->ops->hw_config) {
3121                         ret = pmu->type->ops->hw_config(box, event);
3122                         if (ret)
3123                                 return ret;
3124                 }
3125         }
3126
3127         if (event->group_leader != event)
3128                 ret = uncore_validate_group(pmu, event);
3129         else
3130                 ret = 0;
3131
3132         return ret;
3133 }
3134
3135 static ssize_t uncore_get_attr_cpumask(struct device *dev,
3136                                 struct device_attribute *attr, char *buf)
3137 {
3138         int n = cpulist_scnprintf(buf, PAGE_SIZE - 2, &uncore_cpu_mask);
3139
3140         buf[n++] = '\n';
3141         buf[n] = '\0';
3142         return n;
3143 }
3144
3145 static DEVICE_ATTR(cpumask, S_IRUGO, uncore_get_attr_cpumask, NULL);
3146
3147 static struct attribute *uncore_pmu_attrs[] = {
3148         &dev_attr_cpumask.attr,
3149         NULL,
3150 };
3151
3152 static struct attribute_group uncore_pmu_attr_group = {
3153         .attrs = uncore_pmu_attrs,
3154 };
3155
3156 static int __init uncore_pmu_register(struct intel_uncore_pmu *pmu)
3157 {
3158         int ret;
3159
3160         pmu->pmu = (struct pmu) {
3161                 .attr_groups    = pmu->type->attr_groups,
3162                 .task_ctx_nr    = perf_invalid_context,
3163                 .event_init     = uncore_pmu_event_init,
3164                 .add            = uncore_pmu_event_add,
3165                 .del            = uncore_pmu_event_del,
3166                 .start          = uncore_pmu_event_start,
3167                 .stop           = uncore_pmu_event_stop,
3168                 .read           = uncore_pmu_event_read,
3169         };
3170
3171         if (pmu->type->num_boxes == 1) {
3172                 if (strlen(pmu->type->name) > 0)
3173                         sprintf(pmu->name, "uncore_%s", pmu->type->name);
3174                 else
3175                         sprintf(pmu->name, "uncore");
3176         } else {
3177                 sprintf(pmu->name, "uncore_%s_%d", pmu->type->name,
3178                         pmu->pmu_idx);
3179         }
3180
3181         ret = perf_pmu_register(&pmu->pmu, pmu->name, -1);
3182         return ret;
3183 }
3184
3185 static void __init uncore_type_exit(struct intel_uncore_type *type)
3186 {
3187         int i;
3188
3189         for (i = 0; i < type->num_boxes; i++)
3190                 free_percpu(type->pmus[i].box);
3191         kfree(type->pmus);
3192         type->pmus = NULL;
3193         kfree(type->events_group);
3194         type->events_group = NULL;
3195 }
3196
3197 static void __init uncore_types_exit(struct intel_uncore_type **types)
3198 {
3199         int i;
3200         for (i = 0; types[i]; i++)
3201                 uncore_type_exit(types[i]);
3202 }
3203
3204 static int __init uncore_type_init(struct intel_uncore_type *type)
3205 {
3206         struct intel_uncore_pmu *pmus;
3207         struct attribute_group *attr_group;
3208         struct attribute **attrs;
3209         int i, j;
3210
3211         pmus = kzalloc(sizeof(*pmus) * type->num_boxes, GFP_KERNEL);
3212         if (!pmus)
3213                 return -ENOMEM;
3214
3215         type->unconstrainted = (struct event_constraint)
3216                 __EVENT_CONSTRAINT(0, (1ULL << type->num_counters) - 1,
3217                                 0, type->num_counters, 0, 0);
3218
3219         for (i = 0; i < type->num_boxes; i++) {
3220                 pmus[i].func_id = -1;
3221                 pmus[i].pmu_idx = i;
3222                 pmus[i].type = type;
3223                 INIT_LIST_HEAD(&pmus[i].box_list);
3224                 pmus[i].box = alloc_percpu(struct intel_uncore_box *);
3225                 if (!pmus[i].box)
3226                         goto fail;
3227         }
3228
3229         if (type->event_descs) {
3230                 i = 0;
3231                 while (type->event_descs[i].attr.attr.name)
3232                         i++;
3233
3234                 attr_group = kzalloc(sizeof(struct attribute *) * (i + 1) +
3235                                         sizeof(*attr_group), GFP_KERNEL);
3236                 if (!attr_group)
3237                         goto fail;
3238
3239                 attrs = (struct attribute **)(attr_group + 1);
3240                 attr_group->name = "events";
3241                 attr_group->attrs = attrs;
3242
3243                 for (j = 0; j < i; j++)
3244                         attrs[j] = &type->event_descs[j].attr.attr;
3245
3246                 type->events_group = attr_group;
3247         }
3248
3249         type->pmu_group = &uncore_pmu_attr_group;
3250         type->pmus = pmus;
3251         return 0;
3252 fail:
3253         uncore_type_exit(type);
3254         return -ENOMEM;
3255 }
3256
3257 static int __init uncore_types_init(struct intel_uncore_type **types)
3258 {
3259         int i, ret;
3260
3261         for (i = 0; types[i]; i++) {
3262                 ret = uncore_type_init(types[i]);
3263                 if (ret)
3264                         goto fail;
3265         }
3266         return 0;
3267 fail:
3268         while (--i >= 0)
3269                 uncore_type_exit(types[i]);
3270         return ret;
3271 }
3272
3273 static struct pci_driver *uncore_pci_driver;
3274 static bool pcidrv_registered;
3275
3276 /*
3277  * add a pci uncore device
3278  */
3279 static int uncore_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
3280 {
3281         struct intel_uncore_pmu *pmu;
3282         struct intel_uncore_box *box;
3283         struct intel_uncore_type *type;
3284         int phys_id;
3285
3286         phys_id = pcibus_to_physid[pdev->bus->number];
3287         if (phys_id < 0)
3288                 return -ENODEV;
3289
3290         if (UNCORE_PCI_DEV_TYPE(id->driver_data) == UNCORE_EXTRA_PCI_DEV) {
3291                 extra_pci_dev[phys_id][UNCORE_PCI_DEV_IDX(id->driver_data)] = pdev;
3292                 pci_set_drvdata(pdev, NULL);
3293                 return 0;
3294         }
3295
3296         type = pci_uncores[UNCORE_PCI_DEV_TYPE(id->driver_data)];
3297         box = uncore_alloc_box(type, 0);
3298         if (!box)
3299                 return -ENOMEM;
3300
3301         /*
3302          * for performance monitoring unit with multiple boxes,
3303          * each box has a different function id.
3304          */
3305         pmu = &type->pmus[UNCORE_PCI_DEV_IDX(id->driver_data)];
3306         if (pmu->func_id < 0)
3307                 pmu->func_id = pdev->devfn;
3308         else
3309                 WARN_ON_ONCE(pmu->func_id != pdev->devfn);
3310
3311         box->phys_id = phys_id;
3312         box->pci_dev = pdev;
3313         box->pmu = pmu;
3314         uncore_box_init(box);
3315         pci_set_drvdata(pdev, box);
3316
3317         raw_spin_lock(&uncore_box_lock);
3318         list_add_tail(&box->list, &pmu->box_list);
3319         raw_spin_unlock(&uncore_box_lock);
3320
3321         return 0;
3322 }
3323
3324 static void uncore_pci_remove(struct pci_dev *pdev)
3325 {
3326         struct intel_uncore_box *box = pci_get_drvdata(pdev);
3327         struct intel_uncore_pmu *pmu;
3328         int i, cpu, phys_id = pcibus_to_physid[pdev->bus->number];
3329
3330         box = pci_get_drvdata(pdev);
3331         if (!box) {
3332                 for (i = 0; i < UNCORE_EXTRA_PCI_DEV_MAX; i++) {
3333                         if (extra_pci_dev[phys_id][i] == pdev) {
3334                                 extra_pci_dev[phys_id][i] = NULL;
3335                                 break;
3336                         }
3337                 }
3338                 WARN_ON_ONCE(i >= UNCORE_EXTRA_PCI_DEV_MAX);
3339                 return;
3340         }
3341
3342         pmu = box->pmu;
3343         if (WARN_ON_ONCE(phys_id != box->phys_id))
3344                 return;
3345
3346         pci_set_drvdata(pdev, NULL);
3347
3348         raw_spin_lock(&uncore_box_lock);
3349         list_del(&box->list);
3350         raw_spin_unlock(&uncore_box_lock);
3351
3352         for_each_possible_cpu(cpu) {
3353                 if (*per_cpu_ptr(pmu->box, cpu) == box) {
3354                         *per_cpu_ptr(pmu->box, cpu) = NULL;
3355                         atomic_dec(&box->refcnt);
3356                 }
3357         }
3358
3359         WARN_ON_ONCE(atomic_read(&box->refcnt) != 1);
3360         kfree(box);
3361 }
3362
3363 static int __init uncore_pci_init(void)
3364 {
3365         int ret;
3366
3367         switch (boot_cpu_data.x86_model) {
3368         case 45: /* Sandy Bridge-EP */
3369                 ret = snbep_pci2phy_map_init(0x3ce0);
3370                 if (ret)
3371                         return ret;
3372                 pci_uncores = snbep_pci_uncores;
3373                 uncore_pci_driver = &snbep_uncore_pci_driver;
3374                 break;
3375         case 62: /* IvyTown */
3376                 ret = snbep_pci2phy_map_init(0x0e1e);
3377                 if (ret)
3378                         return ret;
3379                 pci_uncores = ivt_pci_uncores;
3380                 uncore_pci_driver = &ivt_uncore_pci_driver;
3381                 break;
3382         default:
3383                 return 0;
3384         }
3385
3386         ret = uncore_types_init(pci_uncores);
3387         if (ret)
3388                 return ret;
3389
3390         uncore_pci_driver->probe = uncore_pci_probe;
3391         uncore_pci_driver->remove = uncore_pci_remove;
3392
3393         ret = pci_register_driver(uncore_pci_driver);
3394         if (ret == 0)
3395                 pcidrv_registered = true;
3396         else
3397                 uncore_types_exit(pci_uncores);
3398
3399         return ret;
3400 }
3401
3402 static void __init uncore_pci_exit(void)
3403 {
3404         if (pcidrv_registered) {
3405                 pcidrv_registered = false;
3406                 pci_unregister_driver(uncore_pci_driver);
3407                 uncore_types_exit(pci_uncores);
3408         }
3409 }
3410
3411 /* CPU hot plug/unplug are serialized by cpu_add_remove_lock mutex */
3412 static LIST_HEAD(boxes_to_free);
3413
3414 static void uncore_kfree_boxes(void)
3415 {
3416         struct intel_uncore_box *box;
3417
3418         while (!list_empty(&boxes_to_free)) {
3419                 box = list_entry(boxes_to_free.next,
3420                                  struct intel_uncore_box, list);
3421                 list_del(&box->list);
3422                 kfree(box);
3423         }
3424 }
3425
3426 static void uncore_cpu_dying(int cpu)
3427 {
3428         struct intel_uncore_type *type;
3429         struct intel_uncore_pmu *pmu;
3430         struct intel_uncore_box *box;
3431         int i, j;
3432
3433         for (i = 0; msr_uncores[i]; i++) {
3434                 type = msr_uncores[i];
3435                 for (j = 0; j < type->num_boxes; j++) {
3436                         pmu = &type->pmus[j];
3437                         box = *per_cpu_ptr(pmu->box, cpu);
3438                         *per_cpu_ptr(pmu->box, cpu) = NULL;
3439                         if (box && atomic_dec_and_test(&box->refcnt))
3440                                 list_add(&box->list, &boxes_to_free);
3441                 }
3442         }
3443 }
3444
3445 static int uncore_cpu_starting(int cpu)
3446 {
3447         struct intel_uncore_type *type;
3448         struct intel_uncore_pmu *pmu;
3449         struct intel_uncore_box *box, *exist;
3450         int i, j, k, phys_id;
3451
3452         phys_id = topology_physical_package_id(cpu);
3453
3454         for (i = 0; msr_uncores[i]; i++) {
3455                 type = msr_uncores[i];
3456                 for (j = 0; j < type->num_boxes; j++) {
3457                         pmu = &type->pmus[j];
3458                         box = *per_cpu_ptr(pmu->box, cpu);
3459                         /* called by uncore_cpu_init? */
3460                         if (box && box->phys_id >= 0) {
3461                                 uncore_box_init(box);
3462                                 continue;
3463                         }
3464
3465                         for_each_online_cpu(k) {
3466                                 exist = *per_cpu_ptr(pmu->box, k);
3467                                 if (exist && exist->phys_id == phys_id) {
3468                                         atomic_inc(&exist->refcnt);
3469                                         *per_cpu_ptr(pmu->box, cpu) = exist;
3470                                         if (box) {
3471                                                 list_add(&box->list,
3472                                                          &boxes_to_free);
3473                                                 box = NULL;
3474                                         }
3475                                         break;
3476                                 }
3477                         }
3478
3479                         if (box) {
3480                                 box->phys_id = phys_id;
3481                                 uncore_box_init(box);
3482                         }
3483                 }
3484         }
3485         return 0;
3486 }
3487
3488 static int uncore_cpu_prepare(int cpu, int phys_id)
3489 {
3490         struct intel_uncore_type *type;
3491         struct intel_uncore_pmu *pmu;
3492         struct intel_uncore_box *box;
3493         int i, j;
3494
3495         for (i = 0; msr_uncores[i]; i++) {
3496                 type = msr_uncores[i];
3497                 for (j = 0; j < type->num_boxes; j++) {
3498                         pmu = &type->pmus[j];
3499                         if (pmu->func_id < 0)
3500                                 pmu->func_id = j;
3501
3502                         box = uncore_alloc_box(type, cpu);
3503                         if (!box)
3504                                 return -ENOMEM;
3505
3506                         box->pmu = pmu;
3507                         box->phys_id = phys_id;
3508                         *per_cpu_ptr(pmu->box, cpu) = box;
3509                 }
3510         }
3511         return 0;
3512 }
3513
3514 static void
3515 uncore_change_context(struct intel_uncore_type **uncores, int old_cpu, int new_cpu)
3516 {
3517         struct intel_uncore_type *type;
3518         struct intel_uncore_pmu *pmu;
3519         struct intel_uncore_box *box;
3520         int i, j;
3521
3522         for (i = 0; uncores[i]; i++) {
3523                 type = uncores[i];
3524                 for (j = 0; j < type->num_boxes; j++) {
3525                         pmu = &type->pmus[j];
3526                         if (old_cpu < 0)
3527                                 box = uncore_pmu_to_box(pmu, new_cpu);
3528                         else
3529                                 box = uncore_pmu_to_box(pmu, old_cpu);
3530                         if (!box)
3531                                 continue;
3532
3533                         if (old_cpu < 0) {
3534                                 WARN_ON_ONCE(box->cpu != -1);
3535                                 box->cpu = new_cpu;
3536                                 continue;
3537                         }
3538
3539                         WARN_ON_ONCE(box->cpu != old_cpu);
3540                         if (new_cpu >= 0) {
3541                                 uncore_pmu_cancel_hrtimer(box);
3542                                 perf_pmu_migrate_context(&pmu->pmu,
3543                                                 old_cpu, new_cpu);
3544                                 box->cpu = new_cpu;
3545                         } else {
3546                                 box->cpu = -1;
3547                         }
3548                 }
3549         }
3550 }
3551
3552 static void uncore_event_exit_cpu(int cpu)
3553 {
3554         int i, phys_id, target;
3555
3556         /* if exiting cpu is used for collecting uncore events */
3557         if (!cpumask_test_and_clear_cpu(cpu, &uncore_cpu_mask))
3558                 return;
3559
3560         /* find a new cpu to collect uncore events */
3561         phys_id = topology_physical_package_id(cpu);
3562         target = -1;
3563         for_each_online_cpu(i) {
3564                 if (i == cpu)
3565                         continue;
3566                 if (phys_id == topology_physical_package_id(i)) {
3567                         target = i;
3568                         break;
3569                 }
3570         }
3571
3572         /* migrate uncore events to the new cpu */
3573         if (target >= 0)
3574                 cpumask_set_cpu(target, &uncore_cpu_mask);
3575
3576         uncore_change_context(msr_uncores, cpu, target);
3577         uncore_change_context(pci_uncores, cpu, target);
3578 }
3579
3580 static void uncore_event_init_cpu(int cpu)
3581 {
3582         int i, phys_id;
3583
3584         phys_id = topology_physical_package_id(cpu);
3585         for_each_cpu(i, &uncore_cpu_mask) {
3586                 if (phys_id == topology_physical_package_id(i))
3587                         return;
3588         }
3589
3590         cpumask_set_cpu(cpu, &uncore_cpu_mask);
3591
3592         uncore_change_context(msr_uncores, -1, cpu);
3593         uncore_change_context(pci_uncores, -1, cpu);
3594 }
3595
3596 static int uncore_cpu_notifier(struct notifier_block *self,
3597                                unsigned long action, void *hcpu)
3598 {
3599         unsigned int cpu = (long)hcpu;
3600
3601         /* allocate/free data structure for uncore box */
3602         switch (action & ~CPU_TASKS_FROZEN) {
3603         case CPU_UP_PREPARE:
3604                 uncore_cpu_prepare(cpu, -1);
3605                 break;
3606         case CPU_STARTING:
3607                 uncore_cpu_starting(cpu);
3608                 break;
3609         case CPU_UP_CANCELED:
3610         case CPU_DYING:
3611                 uncore_cpu_dying(cpu);
3612                 break;
3613         case CPU_ONLINE:
3614         case CPU_DEAD:
3615                 uncore_kfree_boxes();
3616                 break;
3617         default:
3618                 break;
3619         }
3620
3621         /* select the cpu that collects uncore events */
3622         switch (action & ~CPU_TASKS_FROZEN) {
3623         case CPU_DOWN_FAILED:
3624         case CPU_STARTING:
3625                 uncore_event_init_cpu(cpu);
3626                 break;
3627         case CPU_DOWN_PREPARE:
3628                 uncore_event_exit_cpu(cpu);
3629                 break;
3630         default:
3631                 break;
3632         }
3633
3634         return NOTIFY_OK;
3635 }
3636
3637 static struct notifier_block uncore_cpu_nb = {
3638         .notifier_call  = uncore_cpu_notifier,
3639         /*
3640          * to migrate uncore events, our notifier should be executed
3641          * before perf core's notifier.
3642          */
3643         .priority       = CPU_PRI_PERF + 1,
3644 };
3645
3646 static void __init uncore_cpu_setup(void *dummy)
3647 {
3648         uncore_cpu_starting(smp_processor_id());
3649 }
3650
3651 static int __init uncore_cpu_init(void)
3652 {
3653         int ret, cpu, max_cores;
3654
3655         max_cores = boot_cpu_data.x86_max_cores;
3656         switch (boot_cpu_data.x86_model) {
3657         case 26: /* Nehalem */
3658         case 30:
3659         case 37: /* Westmere */
3660         case 44:
3661                 msr_uncores = nhm_msr_uncores;
3662                 break;
3663         case 42: /* Sandy Bridge */
3664         case 58: /* Ivy Bridge */
3665                 if (snb_uncore_cbox.num_boxes > max_cores)
3666                         snb_uncore_cbox.num_boxes = max_cores;
3667                 msr_uncores = snb_msr_uncores;
3668                 break;
3669         case 45: /* Sandy Bridge-EP */
3670                 if (snbep_uncore_cbox.num_boxes > max_cores)
3671                         snbep_uncore_cbox.num_boxes = max_cores;
3672                 msr_uncores = snbep_msr_uncores;
3673                 break;
3674         case 46: /* Nehalem-EX */
3675                 uncore_nhmex = true;
3676         case 47: /* Westmere-EX aka. Xeon E7 */
3677                 if (!uncore_nhmex)
3678                         nhmex_uncore_mbox.event_descs = wsmex_uncore_mbox_events;
3679                 if (nhmex_uncore_cbox.num_boxes > max_cores)
3680                         nhmex_uncore_cbox.num_boxes = max_cores;
3681                 msr_uncores = nhmex_msr_uncores;
3682                 break;
3683         case 62: /* IvyTown */
3684                 if (ivt_uncore_cbox.num_boxes > max_cores)
3685                         ivt_uncore_cbox.num_boxes = max_cores;
3686                 msr_uncores = ivt_msr_uncores;
3687                 break;
3688
3689         default:
3690                 return 0;
3691         }
3692
3693         ret = uncore_types_init(msr_uncores);
3694         if (ret)
3695                 return ret;
3696
3697         get_online_cpus();
3698
3699         for_each_online_cpu(cpu) {
3700                 int i, phys_id = topology_physical_package_id(cpu);
3701
3702                 for_each_cpu(i, &uncore_cpu_mask) {
3703                         if (phys_id == topology_physical_package_id(i)) {
3704                                 phys_id = -1;
3705                                 break;
3706                         }
3707                 }
3708                 if (phys_id < 0)
3709                         continue;
3710
3711                 uncore_cpu_prepare(cpu, phys_id);
3712                 uncore_event_init_cpu(cpu);
3713         }
3714         on_each_cpu(uncore_cpu_setup, NULL, 1);
3715
3716         register_cpu_notifier(&uncore_cpu_nb);
3717
3718         put_online_cpus();
3719
3720         return 0;
3721 }
3722
3723 static int __init uncore_pmus_register(void)
3724 {
3725         struct intel_uncore_pmu *pmu;
3726         struct intel_uncore_type *type;
3727         int i, j;
3728
3729         for (i = 0; msr_uncores[i]; i++) {
3730                 type = msr_uncores[i];
3731                 for (j = 0; j < type->num_boxes; j++) {
3732                         pmu = &type->pmus[j];
3733                         uncore_pmu_register(pmu);
3734                 }
3735         }
3736
3737         for (i = 0; pci_uncores[i]; i++) {
3738                 type = pci_uncores[i];
3739                 for (j = 0; j < type->num_boxes; j++) {
3740                         pmu = &type->pmus[j];
3741                         uncore_pmu_register(pmu);
3742                 }
3743         }
3744
3745         return 0;
3746 }
3747
3748 static int __init intel_uncore_init(void)
3749 {
3750         int ret;
3751
3752         if (boot_cpu_data.x86_vendor != X86_VENDOR_INTEL)
3753                 return -ENODEV;
3754
3755         if (cpu_has_hypervisor)
3756                 return -ENODEV;
3757
3758         ret = uncore_pci_init();
3759         if (ret)
3760                 goto fail;
3761         ret = uncore_cpu_init();
3762         if (ret) {
3763                 uncore_pci_exit();
3764                 goto fail;
3765         }
3766
3767         uncore_pmus_register();
3768         return 0;
3769 fail:
3770         return ret;
3771 }
3772 device_initcall(intel_uncore_init);